diff --git a/.circleci/config.yml b/.circleci/config.yml index ab70c684e4d..c0780cd047f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,498 +11,514 @@ version: 2.1 +_check_skip: &check_skip + name: Check-skip + command: | + set -e + export COMMIT_MESSAGE=$(git log --format=oneline -n 1); + if [[ "$CIRCLE_PULL_REQUEST" != "" ]] && ([[ "$COMMIT_MESSAGE" == *"[skip circle]"* ]] || [[ "$COMMIT_MESSAGE" == *"[circle skip]"* ]]); then + echo "Skip detected, exiting job ${CIRCLE_JOB} for PR ${CIRCLE_PULL_REQUEST}." + circleci-agent step halt; + fi + jobs: - build_docs: - parameters: - scheduled: - type: string - default: "false" - docker: - - image: cimg/base:current-22.04 - # medium 2 vCPUs, 4GB mem; medium+ 3vCPUs 6GB mem; large 4 vCPUs 8GB mem - # https://circleci.com/docs/configuration-reference#resourceclass - resource_class: medium+ - steps: - - restore_cache: - keys: - - source-cache - - checkout - - run: - name: Complete checkout - command: | - set -e - if ! git remote -v | grep upstream; then - git remote add upstream https://github.com/mne-tools/mne-python.git + build_docs: + parameters: + scheduled: + type: string + default: "false" + machine: + image: ubuntu-2404:current + # large 4 vCPUs 15GB mem + # https://discuss.circleci.com/t/changes-to-remote-docker-reporting-pricing/47759 + resource_class: large + steps: + - restore_cache: + keys: + - source-cache + - checkout: + method: full + - run: + name: Complete checkout + command: | + set -e + if ! git remote -v | grep upstream; then + git remote add upstream https://github.com/mne-tools/mne-python.git + fi + git remote set-url upstream https://github.com/mne-tools/mne-python.git + git fetch upstream + - save_cache: + key: source-cache + paths: + - ".git" + - run: + <<: *check_skip + - run: + name: Merge with upstream and triage run + command: | + set -e + echo $(git log -1 --pretty=%B) | tee gitlog.txt + echo ${CI_PULL_REQUEST//*pull\//} | tee merge.txt + if [[ $(cat merge.txt) != "" ]]; then + echo "Merging $(cat merge.txt)"; + git pull --ff-only upstream "refs/pull/$(cat merge.txt)/merge"; + else + if [[ "$CIRCLE_BRANCH" == "main" ]]; then + KIND=dev + else + KIND=stable fi - git remote set-url upstream https://github.com/mne-tools/mne-python.git - git fetch upstream - - save_cache: - key: source-cache - paths: - - ".git" - - run: - name: Check-skip - command: | - set -e export COMMIT_MESSAGE=$(git log --format=oneline -n 1); - if [[ -v CIRCLE_PULL_REQUEST ]] && ([[ "$COMMIT_MESSAGE" == *"[skip circle]"* ]] || [[ "$COMMIT_MESSAGE" == *"[circle skip]"* ]]); then - echo "Skip detected, exiting job ${CIRCLE_JOB} for PR ${CIRCLE_PULL_REQUEST}." - circleci-agent step halt; - fi - - run: - name: Merge with upstream and triage run - command: | - set -e - echo $(git log -1 --pretty=%B) | tee gitlog.txt - echo ${CI_PULL_REQUEST//*pull\//} | tee merge.txt - if [[ $(cat merge.txt) != "" ]]; then - echo "Merging $(cat merge.txt)"; - git pull --ff-only upstream "refs/pull/$(cat merge.txt)/merge"; - else - if [[ "$CIRCLE_BRANCH" == "main" ]]; then - KIND=dev + if [[ "<< parameters.scheduled >>" == "true" ]]; then + echo "Scheduled full build detected, checking if it's required." + wget https://mne.tools/${KIND}/_version.txt; + REMOTE_VERSION=$(cat _version.txt) + THIS_VERSION=$(git rev-parse HEAD) + echo "Current ${KIND} SHA: ${REMOTE_VERSION}" + echo "This ${KIND} SHA: ${THIS_VERSION}" + if [[ "${THIS_VERSION}" != "${REMOTE_VERSION}" ]]; then + echo "Rebuild required." else - KIND=stable - fi - export COMMIT_MESSAGE=$(git log --format=oneline -n 1); - if [[ "<< parameters.scheduled >>" == "true" ]]; then - echo "Scheduled full build detected, checking if it's required." - wget https://mne.tools/${KIND}/_version.txt; - REMOTE_VERSION=$(cat _version.txt) - THIS_VERSION=$(git rev-parse HEAD) - echo "Current ${KIND} SHA: ${REMOTE_VERSION}" - echo "This ${KIND} SHA: ${THIS_VERSION}" - if [[ "${THIS_VERSION}" != "${REMOTE_VERSION}" ]]; then - echo "Rebuild required." - else - echo "Rebuild skipped." - circleci-agent step halt; - fi - elif [[ "$COMMIT_MESSAGE" == *"[circle deploy]"* ]]; then - echo "Forced deployed build detected, building and deploying docs"; - else - echo "Waiting until scheduled run to build ${KIND} docs, exiting job ${CIRCLE_JOB}." + echo "Rebuild skipped." circleci-agent step halt; fi + elif [[ "$COMMIT_MESSAGE" == *"[circle deploy]"* ]]; then + echo "Forced deployed build detected, building and deploying docs"; + else + echo "Waiting until scheduled run to build ${KIND} docs, exiting job ${CIRCLE_JOB}." + circleci-agent step halt; fi + fi - - run: - name: Set BASH_ENV - command: | - set -e - set -o pipefail - ./tools/setup_xvfb.sh - sudo apt install -qq graphviz optipng python3.10-venv python3-venv libxft2 ffmpeg - python3.10 -m venv ~/python_env - echo "set -e" >> $BASH_ENV - echo "export OPENBLAS_NUM_THREADS=4" >> $BASH_ENV - echo "export XDG_RUNTIME_DIR=/tmp/runtime-circleci" >> $BASH_ENV - echo "export MNE_FULL_DATE=true" >> $BASH_ENV - source tools/get_minimal_commands.sh - echo "export MNE_3D_BACKEND=pyvistaqt" >> $BASH_ENV - echo "export MNE_3D_OPTION_MULTI_SAMPLES=1" >> $BASH_ENV - echo "export MNE_BROWSER_BACKEND=qt" >> $BASH_ENV - echo "export MNE_BROWSER_PRECOMPUTE=false" >> $BASH_ENV - echo "export PATH=~/.local/bin/:$PATH" >> $BASH_ENV - echo "export DISPLAY=:99" >> $BASH_ENV - echo "source ~/python_env/bin/activate" >> $BASH_ENV - mkdir -p ~/.local/bin - ln -s ~/python_env/bin/python ~/.local/bin/python - echo "BASH_ENV:" - cat $BASH_ENV - mkdir -p ~/mne_data - touch pattern.txt - - run: - name: check neuromag2ft - command: | - neuromag2ft --version + - run: + name: Set BASH_ENV + command: ./tools/circleci_bash_env.sh - - run: - name: Install fonts needed for diagrams - command: | - mkdir -p $HOME/.fonts - curl https://codeload.github.com/adobe-fonts/source-code-pro/tar.gz/2.038R-ro/1.058R-it/1.018R-VAR | tar xz -C $HOME/.fonts - curl https://codeload.github.com/adobe-fonts/source-sans-pro/tar.gz/3.028R | tar xz -C $HOME/.fonts - fc-cache -f + - run: + name: Install fonts needed for diagrams + command: | + mkdir -p $HOME/.fonts + echo "Source Code Pro" + curl https://codeload.github.com/adobe-fonts/source-code-pro/tar.gz/2.038R-ro/1.058R-it/1.018R-VAR | tar xz -C $HOME/.fonts + echo "Source Sans Pro" + curl https://codeload.github.com/adobe-fonts/source-sans/tar.gz/3.028R | tar xz -C $HOME/.fonts + fc-cache -f - # Load pip cache - - restore_cache: - keys: - - pip-cache - - restore_cache: - keys: - - user-install-bin-cache-310 + # Load pip cache + - restore_cache: + keys: + - pip-cache-0 + - restore_cache: + keys: + - user-install-bin-cache-310 - # Hack in uninstalls of libraries as necessary if pip doesn't do the right thing in upgrading for us... - - run: - name: Get Python running - command: | - ./tools/circleci_dependencies.sh + # Hack in uninstalls of libraries as necessary if pip doesn't do the right thing in upgrading for us... + - run: + name: Get Python running + command: | + ./tools/circleci_dependencies.sh - - save_cache: - key: pip-cache - paths: - - ~/.cache/pip - - save_cache: - key: user-install-bin-cache-310 - paths: - - ~/.local/lib/python3.10/site-packages - - ~/.local/bin + - save_cache: + key: pip-cache-0 + paths: + - ~/.cache/pip + - save_cache: + key: user-install-bin-cache-310 + paths: + - ~/.local/lib/python3.10/site-packages + - ~/.local/bin - - run: - name: Check Qt - command: | - ./tools/check_qt_import.sh PyQt6 - # Load tiny cache so that ~/.mne does not need to be created below - - restore_cache: - keys: - - data-cache-tiny-0 + - run: + name: Check Qt + command: | + ./tools/check_qt_import.sh PyQt6 + # Load tiny cache so that ~/.mne does not need to be created below + - restore_cache: + keys: + - data-cache-tiny-0 - # Look at what we have and fail early if there is some library conflict - - run: - name: Check installation - command: | - which python - QT_DEBUG_PLUGINS=1 mne sys_info -pd - python -c "import numpy; numpy.show_config()" - LIBGL_DEBUG=verbose python -c "import pyvistaqt; pyvistaqt.BackgroundPlotter(show=True)" - python -c "import mne; mne.set_config('MNE_USE_CUDA', 'false')" # this is needed for the config tutorial - python -c "import mne; mne.set_config('MNE_LOGGING_LEVEL', 'info')" - python -c "import mne; level = mne.get_config('MNE_LOGGING_LEVEL'); assert level.lower() == 'info', repr(level)" - - run: - name: List packages - command: python -m pip list + # Look at what we have and fail early if there is some library conflict + - run: + name: Check installation + command: | + which python + QT_DEBUG_PLUGINS=1 mne sys_info -pd + python -c "import numpy; numpy.show_config()" + python -c "import dipy.align.metrics" + LIBGL_DEBUG=verbose python -c "import pyvistaqt; pyvistaqt.BackgroundPlotter(show=True)" + python -c "import mne; mne.set_config('MNE_USE_CUDA', 'false')" # this is needed for the config tutorial + python -c "import mne; mne.set_config('MNE_LOGGING_LEVEL', 'info')" + python -c "import mne; level = mne.get_config('MNE_LOGGING_LEVEL'); assert level.lower() == 'info', repr(level)" + - run: + name: List packages + command: python -m pip list - # Figure out if we should run a full, pattern, or noplot version - - restore_cache: - keys: - - data-cache-tiny-1 - - restore_cache: - keys: - - data-cache-multimodal - - restore_cache: - keys: - - data-cache-limo - - restore_cache: - keys: - - data-cache-fsaverage - - restore_cache: - keys: - - data-cache-bst-phantom-ctf - - restore_cache: - keys: - - data-cache-bst-raw - - restore_cache: - keys: - - data-cache-bst-phantom-elekta - - restore_cache: - keys: - - data-cache-bst-auditory - - restore_cache: - keys: - - data-cache-bst-resting - - restore_cache: - keys: - - data-cache-fieldtrip - - restore_cache: - keys: - - data-cache-somato - - restore_cache: - keys: - - data-cache-hf-sef - - restore_cache: - keys: - - data-cache-opm - - restore_cache: - keys: - - data-cache-sample - - restore_cache: - keys: - - data-cache-spm-face - - restore_cache: - keys: - - data-cache-testing - - restore_cache: - keys: - - data-cache-visual - - restore_cache: - keys: - - data-cache-ucl-opm-auditory - - run: - name: Get data - command: | - ./tools/circleci_download.sh - - run: - name: Verify build type - command: | - echo "PATTERN=$(cat pattern.txt)" - echo "BUILD=$(cat build.txt)" - ls -al ~/mne_data; + # Figure out if we should run a full build or specify a pattern + - restore_cache: + keys: + - data-cache-tiny-1 + - restore_cache: + keys: + - data-cache-multimodal + - restore_cache: + keys: + - data-cache-limo + - restore_cache: + keys: + - data-cache-fsaverage + - restore_cache: + keys: + - data-cache-bst-raw + - restore_cache: + keys: + - data-cache-bst-phantom-ctf + - restore_cache: + keys: + - data-cache-bst-phantom-elekta + - restore_cache: + keys: + - data-cache-bst-phantom-kernel + - restore_cache: + keys: + - data-cache-bst-auditory + - restore_cache: + keys: + - data-cache-bst-resting + - restore_cache: + keys: + - data-cache-fieldtrip + - restore_cache: + keys: + - data-cache-somato + - restore_cache: + keys: + - data-cache-hf-sef + - restore_cache: + keys: + - data-cache-opm + - restore_cache: + keys: + - data-cache-sample + - restore_cache: + keys: + - data-cache-spm-face + - restore_cache: + keys: + - data-cache-testing + - restore_cache: + keys: + - data-cache-visual + - restore_cache: + keys: + - data-cache-ucl-opm-auditory + - restore_cache: + keys: + - data-cache-phantom-kit + - restore_cache: + keys: + - data-cache-ds004388 + - run: + name: Get data + # This limit could be increased, but this is helpful for finding slow ones + # (even ~2GB datasets should be downloadable in this time from good + # providers) + no_output_timeout: 10m + command: | + ./tools/circleci_download.sh + - run: + name: Verify build type + command: | + echo "PATTERN=$(cat pattern.txt)" + echo "BUILD=$(cat build.txt)" + ls -al ~/mne_data; - # Run doctest (if it's full or front) before building the docs - - run: - name: make test-doc - command: | - if [[ $(cat gitlog.txt) == *"[circle front]"* ]] || [[ $(cat build.txt) == "html_dev-memory" ]] || [[ $(cat build.txt) == "html_stable-memory" ]]; then - make test-doc; - mkdir -p doc/_build/test-results/test-doc; - cp junit-results.xml doc/_build/test-results/test-doc/junit.xml; - fi; - # Build docs - - run: - name: make html - command: | - PATTERN=$(cat pattern.txt) make -C doc $(cat build.txt); - - run: - name: Show profiling output - when: always - command: | - if compgen -G "doc/*.dat" > /dev/null; then - mkdir -p doc/generated - mprof plot doc/*.dat --output doc/generated/memory.png - else - echo "No profile data found in doc/" - fi - - run: - name: Sanity check system state - command: | - python -c "import mne; level = mne.get_config('MNE_LOGGING_LEVEL'); assert level.lower() == 'info', repr(level)" + # Run doctest (if it's full or front) before building the docs + - run: + name: make test-doc + command: | + if [[ $(cat gitlog.txt) == *"[circle front]"* ]] || [[ $(cat build.txt) == "html-memory" ]] ; then + make test-doc; + mkdir -p doc/_build/test-results/test-doc; + cp junit-results.xml doc/_build/test-results/test-doc/junit.xml; + cp coverage.xml doc/_build/test-results/test-doc/coverage.xml; + fi; + # Build docs + - run: + name: make html + command: | # we have -o pipefail in #BASH_ENV so we should be okay + set -x + PATTERN=$(cat pattern.txt) make -C doc $(cat build.txt) 2>&1 | tee sphinx_log.txt + - run: + name: Check sphinx log for warnings (which are treated as errors) + when: always + command: | + ! grep "^.*\(WARNING\|ERROR\): " sphinx_log.txt + - run: + name: Show profiling output + when: always + command: | + if compgen -G "doc/*.dat" > /dev/null; then + mkdir -p doc/generated + mprof plot doc/*.dat --output doc/generated/memory.png + else + echo "No profile data found in doc/" + fi + - run: + name: Sanity check system state + command: | + python -c "import mne; level = mne.get_config('MNE_LOGGING_LEVEL'); assert level.lower() == 'info', repr(level)" - # Reduce upload time of artifacts we will (almost) never look at - - run: - name: Reduce artifact upload time - command: | - if grep -q html_dev-pattern-memory build.txt || grep -q html_dev-noplot build.txt; then - zip -rm doc/_build/html/_downloads.zip doc/_build/html/_downloads - fi - for NAME in generated auto_tutorials auto_examples; do - zip -rm doc/${NAME}.zip doc/${NAME} - done + # Reduce upload time of artifacts we will (almost) never look at + - run: + name: Reduce artifact upload time + command: | + if grep -q html-pattern-memory build.txt; then + zip -rm doc/_build/html/_downloads.zip doc/_build/html/_downloads + fi + for NAME in generated auto_tutorials auto_examples; do + zip -rm doc/${NAME}.zip doc/${NAME} + done - # Save the JUnit file - - store_test_results: - path: doc/_build/test-results - - store_artifacts: - path: doc/_build/test-results - destination: test-results - # Save the SG RST - - store_artifacts: - path: doc/auto_examples.zip - - store_artifacts: - path: doc/auto_tutorials.zip - - store_artifacts: - path: doc/generated.zip - # Save the HTML - - store_artifacts: - path: doc/_build/html/ - destination: dev - - store_artifacts: - path: doc/_build/html_stable/ - destination: stable - - persist_to_workspace: - root: doc/_build - paths: - - html - - html_stable + # Save the JUnit file + - store_test_results: + path: doc/_build/test-results + - store_artifacts: + path: doc/_build/test-results + destination: test-results + # Upload test results to Codecov + - run: + name: Upload test results to Codecov + environment: + CODECOV_TOKEN: fb4c4a94-72d7-4743-bb08-af25b623a29a + command: | + if [[ -f doc/_build/test-results/test-doc/coverage.xml ]]; then + bash <(curl -s https://codecov.io/bash) -f doc/_build/test-results/test-doc/coverage.xml || true + fi + # Save the SG RST + - store_artifacts: + path: doc/auto_examples.zip + - store_artifacts: + path: doc/auto_tutorials.zip + - store_artifacts: + path: doc/generated.zip + # Save the HTML + - store_artifacts: + path: doc/_build/html/ + destination: html + - persist_to_workspace: + root: doc/_build + paths: + - html - # Keep these separate, maybe better in terms of size limitations (?) - - save_cache: - key: data-cache-tiny-0 # < 100 M, might as well combine - paths: - - ~/.mne - - ~/mne_data/MNE-kiloword-data # (28 M) - - ~/mne_data/MNE-eegbci-data # (35 M) - - ~/mne_data/MNE-misc-data # (39 M) - - ~/mne_data/mTRF_1.5 # (56 M) - - ~/mne_data/MNE-phantom-4DBTi # (77 M) - - save_cache: - key: data-cache-tiny-1 # more to combine - paths: - - ~/mne_data/MNE-fNIRS-motor-data # (71 M) - - ~/mne_data/MNE-refmeg-noise-data # (93 M) - - ~/mne_data/physionet-sleep-data # (95 M) - - save_cache: - key: data-cache-multimodal - paths: - - ~/mne_data/MNE-multimodal-data # (240 M) - - save_cache: - key: data-cache-limo - paths: - - ~/mne_data/MNE-limo-data # (244 M) - - save_cache: - key: data-cache-fsaverage - paths: - - ~/mne_data/MNE-fsaverage-data # (762 M) - - save_cache: - key: data-cache-bst-phantom-ctf - paths: - - ~/mne_data/MNE-brainstorm-data/bst_phantom_ctf # (177 M) - - save_cache: - key: data-cache-bst-raw - paths: - - ~/mne_data/MNE-brainstorm-data/bst_raw # (830 M) - - save_cache: - key: data-cache-bst-phantom-elekta - paths: - - ~/mne_data/MNE-brainstorm-data/bst_phantom_elekta # (1.4 G) - - save_cache: - key: data-cache-bst-auditory - paths: - - ~/mne_data/MNE-brainstorm-data/bst_auditory # (2.9 G) - - save_cache: - key: data-cache-bst-resting - paths: - - ~/mne_data/MNE-brainstorm-data/bst_resting # (4.5 G) - - save_cache: - key: data-cache-fieldtrip - paths: - - ~/mne_data/MNE-fieldtrip_cmc-data # (699 M) - - save_cache: - key: data-cache-somato - paths: - - ~/mne_data/MNE-somato-data # (750 M) - - save_cache: - key: data-cache-hf-sef - paths: - - ~/mne_data/HF_SEF # (1.3 G) - - save_cache: - key: data-cache-opm - paths: - - ~/mne_data/MNE-OPM-data # (1.9 G) - - save_cache: - key: data-cache-sample - paths: - - ~/mne_data/MNE-sample-data # (3.2 G) - - save_cache: - key: data-cache-spm-face - paths: - - ~/mne_data/MNE-spm-face # (1.5 G) - - save_cache: - key: data-cache-testing - paths: - - ~/mne_data/MNE-testing-data # (2.5 G) - - save_cache: - key: data-cache-visual - paths: - - ~/mne_data/MNE-visual_92_categories-data # (6 G) - - save_cache: - key: data-cache-ucl-opm-auditory - paths: - - ~/mne_data/auditory_OPM_stationary # (4 G) + # Keep these separate, maybe better in terms of size limitations (?) + - save_cache: + key: data-cache-tiny-0 # < 100 M, might as well combine + paths: + - ~/.mne + - ~/mne_data/MNE-kiloword-data # (28 M) + - ~/mne_data/MNE-eegbci-data # (35 M) + - ~/mne_data/MNE-misc-data # (39 M) + - ~/mne_data/mTRF_1.5 # (56 M) + - ~/mne_data/MNE-phantom-4DBTi # (77 M) + - save_cache: + key: data-cache-tiny-1 # more to combine + paths: + - ~/mne_data/MNE-fNIRS-motor-data # (71 M) + - ~/mne_data/MNE-refmeg-noise-data # (93 M) + - ~/mne_data/physionet-sleep-data # (95 M) + - save_cache: + key: data-cache-multimodal + paths: + - ~/mne_data/MNE-multimodal-data # (240 M) + - save_cache: + key: data-cache-limo + paths: + - ~/mne_data/MNE-limo-data # (244 M) + - save_cache: + key: data-cache-fsaverage + paths: + - ~/mne_data/MNE-fsaverage-data # (762 M) + - save_cache: + key: data-cache-bst-raw + paths: + - ~/mne_data/MNE-brainstorm-data/bst_raw # (830 M) + - save_cache: + key: data-cache-bst-phantom-ctf + paths: + - ~/mne_data/MNE-brainstorm-data/bst_phantom_ctf # (177 M) + - save_cache: + key: data-cache-bst-phantom-elekta + paths: + - ~/mne_data/MNE-brainstorm-data/bst_phantom_elekta # (1.4 G) + - save_cache: + key: data-cache-bst-phantom-kernel + paths: + - ~/mne_data/MNE-phantom-kernel-data # (362 M) + - save_cache: + key: data-cache-bst-auditory + paths: + - ~/mne_data/MNE-brainstorm-data/bst_auditory # (2.9 G) + - save_cache: + key: data-cache-bst-resting + paths: + - ~/mne_data/MNE-brainstorm-data/bst_resting # (4.5 G) + - save_cache: + key: data-cache-fieldtrip + paths: + - ~/mne_data/MNE-fieldtrip_cmc-data # (699 M) + - save_cache: + key: data-cache-somato + paths: + - ~/mne_data/MNE-somato-data # (750 M) + - save_cache: + key: data-cache-hf-sef + paths: + - ~/mne_data/HF_SEF # (1.3 G) + - save_cache: + key: data-cache-opm + paths: + - ~/mne_data/MNE-OPM-data # (1.9 G) + - save_cache: + key: data-cache-sample + paths: + - ~/mne_data/MNE-sample-data # (3.2 G) + - save_cache: + key: data-cache-spm-face + paths: + - ~/mne_data/MNE-spm-face # (1.5 G) + - save_cache: + key: data-cache-testing + paths: + - ~/mne_data/MNE-testing-data # (2.5 G) + - save_cache: + key: data-cache-visual + paths: + - ~/mne_data/MNE-visual_92_categories-data # (6 G) + - save_cache: + key: data-cache-ucl-opm-auditory + paths: + - ~/mne_data/auditory_OPM_stationary # (4 G) + - save_cache: + key: data-cache-phantom-kit + paths: + - ~/mne_data/MNE-phantom-KIT-data # (1 G) + - save_cache: + key: data-cache-ds004388 + paths: + - ~/mne_data/ds004388 # (1.8 G) - linkcheck: - # there are a few files excluded from this for expediency, see Makefile - parameters: - scheduled: - type: string - default: "false" - docker: - - image: circleci/python:3.9.2-buster - steps: - - restore_cache: - keys: - - source-cache - - checkout - - run: - name: Set BASH_ENV - command: | - set -e - echo "set -e" >> $BASH_ENV - echo "export PATH=~/.local/bin/:$PATH" >> $BASH_ENV - - run: - name: Check-skip - command: | - export COMMIT_MESSAGE=$(git log --format=oneline -n 1); - if [[ "$COMMIT_MESSAGE" != *"[circle linkcheck]"* ]] && [ "<< parameters.scheduled >>" != "true" ]; then - echo "Skip detected, exiting job ${CIRCLE_JOB}." - circleci-agent step halt; - fi - - restore_cache: - keys: - - pip-cache - - run: - name: Get Python running - command: | - ./tools/circleci_dependencies.sh - - run: - name: Check installation - command: | - mne sys_info -pd - - run: - name: make linkcheck - command: | - make -C doc linkcheck - - run: - name: make linkcheck-grep - when: always - command: | - make -C doc linkcheck-grep - - store_artifacts: - path: doc/_build/linkcheck - destination: linkcheck + linkcheck: + # there are a few files excluded from this for expediency, see Makefile + parameters: + scheduled: + type: string + default: "false" + machine: + image: ubuntu-2404:current + resource_class: large + steps: + - restore_cache: + keys: + - source-cache + - checkout + - run: + name: Check-skip + command: | + export COMMIT_MESSAGE=$(git log --format=oneline -n 1); + if [[ "$COMMIT_MESSAGE" != *"[circle linkcheck]"* ]] && [ "<< parameters.scheduled >>" != "true" ]; then + echo "Skip detected, exiting job ${CIRCLE_JOB}." + circleci-agent step halt; + fi + - run: + name: Set BASH_ENV + command: ./tools/circleci_bash_env.sh + - restore_cache: + keys: + - pip-cache-0 + - run: + name: Get Python running + command: | + ./tools/circleci_dependencies.sh + - run: + name: Check installation + command: | + mne sys_info -pd + - run: + name: make linkcheck + no_output_timeout: 40m + command: | + make -C doc linkcheck + - store_artifacts: + path: doc/_build/linkcheck + destination: linkcheck - deploy: - machine: - image: ubuntu-2004:202111-01 - steps: - - attach_workspace: - at: /tmp/build - - restore_cache: - keys: - - website-cache - - run: - name: Set BASH_ENV - command: | - set -e - echo "set -e" >> $BASH_ENV - # Don't try to deploy if nothing is there or not on the right branch - - run: - name: Check docs - command: | - if [ ! -f /tmp/build/html/index.html ] && [ ! -f /tmp/build/html_stable/index.html ]; then - echo "No files found to upload (build: ${CIRCLE_BRANCH})."; - circleci-agent step halt; - fi; - - run: - name: Fetch docs - command: | - mkdir -p ~/.ssh - echo -e "Host *\nStrictHostKeyChecking no" > ~/.ssh/config - chmod og= ~/.ssh/config - if [ ! -d ~/mne-tools.github.io ]; then - git clone git@github.com:/mne-tools/mne-tools.github.io.git ~/mne-tools.github.io --depth=1 - fi - - run: - name: Deploy docs - command: | - git config --global user.email "circle@mne.com"; - git config --global user.name "Circle CI"; - cd ~/mne-tools.github.io; - git checkout main - git remote -v - git fetch origin - git reset --hard origin/main - git clean -xdf - if [ "${CIRCLE_BRANCH}" == "main" ]; then - echo "Deploying dev docs for ${CIRCLE_BRANCH}."; - rm -Rf dev; - cp -a /tmp/build/html dev; - git add -A; - git commit -m "CircleCI update of dev docs (${CIRCLE_BUILD_NUM})."; - else - echo "Deploying stable docs for ${CIRCLE_BRANCH}."; - rm -Rf stable; - cp -a /tmp/build/html_stable stable; - git add -A; - git commit -m "CircleCI update of stable docs (${CIRCLE_BUILD_NUM})."; - fi; - git push origin main; - - save_cache: - key: website-cache - paths: - - ~/mne_data/MNE-visual_92_categories-data + deploy: + machine: + image: ubuntu-2404:current + steps: + - attach_workspace: + at: /tmp/build + - restore_cache: + keys: + - website-cache-1 + - add_ssh_keys: + fingerprints: + # SHA256:N4qvp6MSbXcTz/27xz96VPsNuTDRT92zoRP8EW0I/8I + - "19:fe:1d:c3:c7:af:7e:16:94:4c:e1:e7:0a:56:13:bd" + - run: + name: Set BASH_ENV + command: | + set -e + echo "set -e" >> $BASH_ENV + # Don't try to deploy if nothing is there or not on the right branch + - run: + name: Check docs + command: | + if [ ! -f /tmp/build/html/index.html ] ; then + echo "No files found to upload (build: ${CIRCLE_BRANCH})."; + circleci-agent step halt; + fi; + - run: + name: Fetch docs + command: | + mkdir -p ~/.ssh + echo -e "Host *\nStrictHostKeyChecking no" > ~/.ssh/config + chmod og= ~/.ssh/config + if [ ! -d ~/mne-tools.github.io ]; then + git clone git@github.com:/mne-tools/mne-tools.github.io.git ~/mne-tools.github.io --depth=1 + fi + - run: + name: Deploy docs + command: | + git config --global user.email "circle@mne.tools"; + git config --global user.name "Circle CI"; + ssh-add -D && ssh-add ~/.ssh/id_rsa_19fe1dc3c7af7e16944ce1e70a5613bd + cd ~/mne-tools.github.io; + git checkout main + git remote -v + git fetch origin + git reset --hard origin/main + git clean -xdf + if [ "${CIRCLE_BRANCH}" == "main" ]; then + echo "Deploying dev docs for ${CIRCLE_BRANCH}."; + rm -Rf dev; + cp -a /tmp/build/html dev; + git add -A; + git commit -m "CircleCI update of dev docs (${CIRCLE_BUILD_NUM})."; + else + echo "Deploying stable docs for ${CIRCLE_BRANCH}."; + rm -Rf stable; + cp -a /tmp/build/html stable; + git add -A; + git commit -m "CircleCI update of stable docs (${CIRCLE_BUILD_NUM})."; + fi; + git push origin main; + - save_cache: + key: website-cache-1 + paths: + - ~/mne-tools.github.io workflows: default: @@ -519,7 +535,7 @@ workflows: branches: only: - main - - maint/1.3 + - /maint\/.*/ main: jobs: @@ -539,14 +555,14 @@ workflows: only: - main - weekly: + monthly: jobs: - linkcheck: - name: linkcheck_weekly + name: linkcheck_monthly scheduled: "true" triggers: - schedule: - # "At 6:00 AM GMT on the first day of each month" is often enough + # "At 6:00 AM GMT on the first day of each month" cron: "0 6 1 * *" filters: branches: diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 84386c2aa85..00000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,44 +0,0 @@ -# Test on macOS M1 silicon -# -# cibuildwheel FAQ helps (search "cirrus"): -# https://cibuildwheel.readthedocs.io/en/stable/faq/#apple-silicon -# https://cirrus-ci.org/guide/writing-tasks/#supported-instructions -# In commit messages, [skip ci] or [skip cirrus] is supported natively. -macos_arm64_task: - name: macOS arm64 (M1) tests - # Auto-cancel PR builds - auto_cancellation: $CIRRUS_PR != '' - only_if: $CIRRUS_REPO_FULL_NAME == "mne-tools/mne-python" - macos_instance: - image: ghcr.io/cirruslabs/macos-monterey-base:latest - env: - PATH: /opt/homebrew/opt/python@3.10/libexec/bin:$PATH - python_install_script: - - ./tools/cirrus_install_python.sh - pip_cache: - folder: ~/Library/Caches/pip - fingerprint_script: - - echo $PYTHON_VERSION && cat requirements.txt && cat requirements_testing.txt && cat requirements_testing_extra.txt - populate_script: - - ./tools/cirrus_dependencies.sh - pip_install_script: # now we actually run it (possibly another time) to install deps - - ./tools/cirrus_dependencies.sh - mne_install_script: - - pip install -ve . - mne_sys_info_script: - - mne sys_info -pd - testing_cache: - folder: ~/mne_data - fingerprint_script: - - ./tools/get_testing_version.sh - populate_script: - - python -c "import mne; mne.datasets.testing.data_path(force_update=True, verbose=True)" - pytest_script: - - pytest -m "not slowtest" --tb=short --cov=mne --cov-report=xml -vv mne - # https://cirrus-ci.org/guide/writing-tasks/#artifact-parsing - always: - upload_results_artifacts: - path: ./*.xml - format: junit - type: text/xml - codecov_script: bash <(curl -s https://codecov.io/bash) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..054b0c65924 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,16 @@ +# PR number should follow the commit number so that our code credit +# can parse this file correctly: +d71e497dcf6f98e19eb81e82e641404a71d2d663 # 1420, split up viz.py +203a96cbba2732d2e349a8f96065e74bbfd2a53b # 5862, split utils.py +ff349f356edb04e1b5f0db13deda8d1a20aca351 # 6767, move around manual parts +31a83063557fbd54d898f00f9527ffc547888395 # 10407, alphabetize docdict +e81ec528a42ac687f3d961ed5cf8e25f236925b0 # 11667, black +12395f9d9cf6ea3c72b225b62e052dd0d17d9889 # 11868, YAML indentation +d6d2f8c6a2ed4a0b27357da9ddf8e0cd14931b59 # 12097, isort +e7dd1588013179013a50d3f6b8e8f9ae0a185783 # 12261, ruff format +940ac9553ce42c15b4c16ecd013824ca3ea7244a # 12533, whitespace +e39995d9be6fc831c7a4a59f09b7a7c0a41ae315 # 12588, percent formatting +1c5b39ff1d99bbcb2fc0e0071a989b3f3845ff30 # 12603, ruff UP028 +b8b168088cb474f27833f5f9db9d60abe00dca83 # 12779, PR JSONs +ee64eba6f345e895e3d5e7d2804fa6aa2dac2e6d # 12781, Header unification +362f9330925fb79a6adc19a42243672676dec63e # 12799, UP038 diff --git a/.git_archival.txt b/.git_archival.txt new file mode 100644 index 00000000000..7c5100942aa --- /dev/null +++ b/.git_archival.txt @@ -0,0 +1,3 @@ +node: $Format:%H$ +node-date: $Format:%cI$ +describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$ diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..00a7b00c94e --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +.git_archival.txt export-subst diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b39bd6b1d3d..e7db9e4f94c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,10 +13,115 @@ # This file was adapted from SciPy. # Each line is a file pattern followed by one or more owners. +# Syntax follows .gitignore, see https://git-scm.com/docs/gitignore#_pattern_format + +# Headings ideally should correspond to domains of knowledge/expertise, starting with +# algorithms implementations (generally main codebase mne/ and related docs) followed by +# project-level and miscellaneous items (CI, general documentation, etc.). + +################################################# +# Algorithms, implementations, and related docs # +################################################# + +# Artifact regression +/mne/preprocessing/_regress.py @wmvanvliet +/mne/preprocessing/tests/test_regress.py @wmvanvliet + +# Beamforming +/mne/beamformer @britta-wstnr +*dics*.py @britta-wstnr @wmvanvliet # related tutorials and examples +*lcmv*.py @britta-wstnr + +# Channels +/mne/channels @agramfort @mscheltienne @dengemann @jasmainak + +# Core sensor-space classes +/mne/epochs.py @drammock @agramfort @mscheltienne @dengemann +/mne/evoked.py @drammock @agramfort @mscheltienne @dengemann +/mne/io/*.* @drammock @agramfort @mscheltienne @dengemann + +# Current-source density +/mne/preprocessing/_csd.py @alexrockhill @dengemann + +# Decoding +/mne/decoding/csp.py @agramfort @dengemann +/mne/decoding/*.py @jasmainak + +# fNIRS +/mne/preprocessing/nirs @rob-luke +*fnirs*.py @rob-luke + +# forward +/mne/forward/ @agramfort @jasmainak +*forward*.py @agramfort + +# Intracranial +/mne/preprocessing/ieeg @alexrockhill @adam2392 +*seeg*.py @alexrockhill @adam2392 +*ecog*.py @alexrockhill @adam2392 + +# Inverse Sparse +/mne/inverse_sparse @agramfort +*mxne*.py @agramfort # related tutorials and examples +*gamma*.py @agramfort # related tutorials and examples # IO -/mne/io/brainvision @sappelhoff +/mne/io/brainvision @sappelhoff @adam2392 +/mne/io/nirx @rob-luke +/mne/io/snirf @rob-luke /mne/export @sappelhoff +/mne/io/eeglab.py @jasmainak +/mne/io/eeglab/tests/test_eeglab.py @jasmainak -# Beamforming -/mne/beamformer/ @britta-wstnr +# Minimum Norm +/mne/minimum_norm @agramfort + +# Preprocessing +/mne/preprocessing/ica.py @adam2392 @agramfort @mscheltienne @dengemann +/mne/preprocessing/infomax_.py @adam2392 @mscheltienne @dengemann +/mne/preprocessing/*annotate*.py @mscheltienne +/mne/preprocessing/bads.py @mscheltienne +/mne/preprocessing/e*g.py @mscheltienne + +# Report +/mne/report @dengemann @jasmainak + +# Simulation +/mne/simulation/ @agramfort +*simulate*.py @agramfort + +# Surface + transforms +/mne/surface.py @alexrockhill @larsoner +/mne/transforms.py @alexrockhill @larsoner +/mne/_freesurfer.py @alexrockhill @larsoner + +# TFR +/mne/time_frequency @drammock @adam2392 @mscheltienne + +# Viz +/mne/viz @drammock @dengemann +/mne/viz/_brain @larsoner @wmvanvliet +/mne/viz/ui_events.py @wmvanvliet +/tutorials/visualization @larsoner @wmvanvliet @dengemann +/examples/visualization @larsoner @dengemann + +# Datasets +/mne/datasets/brainstorm @jasmainak + +######################### +# Project-level / other # +######################### + +# Examples and tutorials +/examples @drammock @agramfort @dengemann +/tutorials @drammock @agramfort @dengemann + +# Non-tutorial documentation text and infrastructure +/doc @larsoner @drammock @agramfort @dengemann +/logo @drammock + +# Project infrastructure and CIs +/*.* @larsoner @drammock # files in the root directory +/.circleci @larsoner +/.github @larsoner +/tools @larsoner @drammock diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 53e02d49867..b7ab58dc917 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -10,4 +10,4 @@ This project and everyone participating in it is governed by the [MNE-Python's C ## How to contribute -Before contributing make sure you are familiar with [our contributing guide](https://mne.tools/dev/install/contributing.html). +Before contributing make sure you are familiar with [our contributing guide](https://mne.tools/dev/development/contributing.html). diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000000..d0c3b59f68d --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,15 @@ +# These are supported funding model platforms + +# github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +# patreon: # Replace with a single Patreon username +open_collective: mne-python +# ko_fi: # Replace with a single Ko-fi username +# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +# liberapay: # Replace with a single Liberapay username +# issuehunt: # Replace with a single IssueHunt username +# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +# polar: # Replace with a single Polar username +# buy_me_a_coffee: # Replace with a single Buy Me a Coffee username +# thanks_dev: # Replace with a single thanks.dev username +# custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 6ec575d28e8..ddd5834e533 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -29,8 +29,8 @@ body: Paste here a code snippet or minimal working example ([MWE](https://en.wikipedia.org/wiki/Minimal_Working_Example)) to replicate your problem, using one of the - [datasets shipped with MNE-Python](https://mne.tools/dev/overview/datasets_index.html), - preferably the one called [sample](https://mne.tools/dev/overview/datasets_index.html#sample). + [datasets shipped with MNE-Python](https://mne.tools/stable/documentation/datasets.html#datasets), + preferably the one called [sample](https://mne.tools/stable/documentation/datasets.html#sample). render: Python validations: required: true diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ea102484a7f..231488d2d47 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,7 @@ + + +#### Reference issue (if any) + + #### What does this implement/fix? -Explain your changes. + + #### Additional information -Any additional information you think is important. + + diff --git a/.github/actions/rename_towncrier/rename_towncrier.py b/.github/actions/rename_towncrier/rename_towncrier.py new file mode 100755 index 00000000000..72d4f4be272 --- /dev/null +++ b/.github/actions/rename_towncrier/rename_towncrier.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# Adapted from action-towncrier-changelog +import json +import os +import re +import subprocess +import sys +from pathlib import Path + +from github import Github +from tomllib import loads + +event_name = os.getenv("GITHUB_EVENT_NAME", "pull_request") +if not event_name.startswith("pull_request"): + print(f"No-op for {event_name}") + sys.exit(0) +if "GITHUB_EVENT_PATH" in os.environ: + with open(os.environ["GITHUB_EVENT_PATH"], encoding="utf-8") as fin: + event = json.load(fin) + pr_num = event["number"] + basereponame = event["pull_request"]["base"]["repo"]["full_name"] + real = True +else: # local testing + pr_num = 12318 # added some towncrier files + basereponame = "mne-tools/mne-python" + real = False + +g = Github(os.environ.get("GITHUB_TOKEN")) +baserepo = g.get_repo(basereponame) + +# Grab config from upstream's default branch +toml_cfg = loads(Path("pyproject.toml").read_text("utf-8")) + +config = toml_cfg["tool"]["towncrier"] +pr = baserepo.get_pull(pr_num) +modified_files = [f.filename for f in pr.get_files()] + +# Get types from config +types = [ent["directory"] for ent in toml_cfg["tool"]["towncrier"]["type"]] +type_pipe = "|".join(types) + +# Get files that potentially match the types +directory = toml_cfg["tool"]["towncrier"]["directory"] +assert directory.endswith("/"), directory + +file_re = re.compile(rf"^{directory}({type_pipe})\.rst$") +found_stubs = [f for f in modified_files if file_re.match(f)] +for stub in found_stubs: + fro = stub + to = file_re.sub(rf"{directory}{pr_num}.\1.rst", fro) + print(f"Renaming {fro} to {to}") + if real: + subprocess.check_call(["mv", fro, to]) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5ace4600a1f..8b0675e7df0 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,3 +4,11 @@ updates: directory: "/" schedule: interval: "weekly" + groups: + actions: + patterns: + - "*" + labels: + - no-changelog-entry-needed + cooldown: + default-days: 7 diff --git a/.github/release.yaml b/.github/release.yaml new file mode 100644 index 00000000000..386ce981916 --- /dev/null +++ b/.github/release.yaml @@ -0,0 +1,6 @@ +changelog: + exclude: + authors: + - dependabot + - pre-commit-ci + - github-actions diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml new file mode 100644 index 00000000000..b41e636bf5f --- /dev/null +++ b/.github/workflows/autofix.yml @@ -0,0 +1,24 @@ +name: autofix.ci + +on: # yamllint disable-line rule:truthy + pull_request: + types: [opened, synchronize, labeled, unlabeled] + +permissions: + contents: read + +jobs: + autofix: + name: Autoupdate changelog entry and headers + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - uses: actions/setup-python@v6 + with: + python-version: '3.12' + - run: pip install --upgrade towncrier pygithub gitpython numpy + - run: python ./.github/actions/rename_towncrier/rename_towncrier.py + - run: python ./tools/dev/ensure_headers.py + - uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27 diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml new file mode 100644 index 00000000000..68720eaaa34 --- /dev/null +++ b/.github/workflows/automerge.yml @@ -0,0 +1,17 @@ +name: Bot auto-merge +on: pull_request # yamllint disable-line rule:truthy + +jobs: + autobot: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + # Names can be found with gh api /repos/mne-tools/mne-python/pulls/12998 -q .user.login for example + if: (github.event.pull_request.user.login == 'dependabot[bot]' || github.event.pull_request.user.login == 'pre-commit-ci[bot]' || github.event.pull_request.user.login == 'github-actions[bot]') && github.repository == 'mne-tools/mne-python' + steps: + - name: Enable auto-merge for bot PRs + run: gh pr merge --auto --squash "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/check_changelog.yml b/.github/workflows/check_changelog.yml new file mode 100644 index 00000000000..7f8c0d28d7f --- /dev/null +++ b/.github/workflows/check_changelog.yml @@ -0,0 +1,22 @@ +name: Changelog + +on: # yamllint disable-line rule:truthy + pull_request: + types: [opened, synchronize, labeled, unlabeled] + branches: ["main"] + +permissions: + contents: read + +jobs: + changelog_checker: + name: Check towncrier entry in doc/changes/dev/ + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - uses: scientific-python/action-towncrier-changelog@v2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BOT_USERNAME: changelog-bot diff --git a/.github/workflows/circle_artifacts.yml b/.github/workflows/circle_artifacts.yml index c444a1e9bae..7bce0a59688 100644 --- a/.github/workflows/circle_artifacts.yml +++ b/.github/workflows/circle_artifacts.yml @@ -1,14 +1,18 @@ -on: [status] +on: [status] # yamllint disable-line rule:truthy +permissions: + contents: read + statuses: write jobs: circleci_artifacts_redirector_job: if: "${{ startsWith(github.event.context, 'ci/circleci: build_docs') }}" - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest name: Run CircleCI artifacts redirector steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@master + uses: scientific-python/circleci-artifacts-redirector-action@master with: repo-token: ${{ secrets.GITHUB_TOKEN }} - artifact-path: 0/dev/index.html + api-token: ${{ secrets.CIRCLECI_TOKEN }} + artifact-path: 0/html/sg_execution_times.html circleci-jobs: build_docs,build_docs_main job-title: Check the rendered docs here! diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 843a05e0cdb..c6a8ceee797 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -38,30 +38,32 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v5 + with: + persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. - + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality - + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - # If the Autobuild fails above, remove it and uncomment the following three lines. + # If the Autobuild fails above, remove it and uncomment the following three lines. # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. # - run: | @@ -69,4 +71,4 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/codespell_and_flake.yml b/.github/workflows/codespell_and_flake.yml deleted file mode 100644 index e191caa25d1..00000000000 --- a/.github/workflows/codespell_and_flake.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: 'codespell_and_flake' -# https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency -# https://docs.github.com/en/developers/webhooks-and-events/events/github-event-types#pullrequestevent -# workflow name, PR number (empty on push), push ref (empty on PR) -concurrency: - group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} - cancel-in-progress: true -on: - push: - branches: - - '*' - pull_request: - branches: - - '*' - -jobs: - style: - name: 'codespell and flake' - runs-on: ubuntu-20.04 - env: - CODESPELL_DIRS: 'mne/ doc/ tutorials/ examples/' - CODESPELL_SKIPS: 'doc/_build,doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg,references.bib,*.css,*.edf,*.bdf,*.vhdr' - - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: '3.9' - architecture: 'x64' - - run: | - python -m pip install --upgrade pip setuptools wheel - python -m pip install flake8 - name: 'Install dependencies' - - uses: rbialon/flake8-annotations@v1 - name: 'Setup flake8 annotations' - - run: make flake - name: 'Run flake8' - - uses: codespell-project/actions-codespell@v1.0 - with: - path: ${{ env.CODESPELL_DIRS }} - skip: ${{ env.CODESPELL_SKIPS }} - builtin: 'clear,rare,informal,names' - ignore_words_file: 'ignore_words.txt' - uri_ignore_words_list: 'bu' - name: 'Run codespell' diff --git a/.github/workflows/compat_minimal.yml b/.github/workflows/compat_minimal.yml deleted file mode 100644 index a0027f46397..00000000000 --- a/.github/workflows/compat_minimal.yml +++ /dev/null @@ -1,77 +0,0 @@ -name: 'compat / minimal' -concurrency: - group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} - cancel-in-progress: true -on: - push: - branches: - - '*' - pull_request: - branches: - - '*' - -jobs: - # Minimal (runs with and without testing data) - job: - name: 'minimal 3.8' - runs-on: ubuntu-20.04 - defaults: - run: - shell: bash - env: - CONDA_DEPENDENCIES: 'numpy scipy matplotlib' - DEPS: 'minimal' - DISPLAY: ':99.0' - MNE_DONTWRITE_HOME: true - MNE_FORCE_SERIAL: true - MNE_LOGGING_LEVEL: 'warning' - MNE_SKIP_NETWORK_TEST: 1 - OPENBLAS_NUM_THREADS: '1' - PYTHONUNBUFFERED: '1' - PYTHON_VERSION: '3.8' - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - run: ./tools/setup_xvfb.sh - name: 'Setup xvfb' - - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ env.PYTHON_VERSION }} - miniforge-version: latest - miniforge-variant: Mambaforge - use-mamba: true - - shell: bash -el {0} - run: | - ./tools/github_actions_dependencies.sh - source tools/get_minimal_commands.sh - name: 'Install dependencies' - - shell: bash -el {0} - run: ./tools/github_actions_install.sh - name: 'Install MNE' - - shell: bash -el {0} - run: ./tools/github_actions_infos.sh - name: 'Show infos' - - shell: bash -el {0} - run: ./tools/get_testing_version.sh - name: 'Get testing version' - - shell: bash -el {0} - run: MNE_SKIP_TESTING_DATASET_TESTS=true pytest -m "not (ultraslowtest or pgtest)" --tb=short --cov=mne --cov-report xml -vv -rfE mne/ - name: Run tests with no testing data - - uses: actions/cache@v3 - with: - key: ${{ env.TESTING_VERSION }} - path: ~/mne_data - name: 'Cache testing data' - - shell: bash -el {0} - run: ./tools/github_actions_download.sh - name: 'Download testing data' - - shell: bash -el {0} - run: ./tools/github_actions_locale.sh - name: 'Print locale' - - shell: bash -el {0} - run: ./tools/github_actions_test.sh - name: 'Run tests' - - uses: codecov/codecov-action@v3 - if: success() - name: 'Upload coverage to CodeCov' diff --git a/.github/workflows/compat_old.yml b/.github/workflows/compat_old.yml deleted file mode 100644 index 36e47774231..00000000000 --- a/.github/workflows/compat_old.yml +++ /dev/null @@ -1,68 +0,0 @@ -name: 'compat / old' -concurrency: - group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} - cancel-in-progress: true -on: - push: - branches: - - '*' - pull_request: - branches: - - '*' - -jobs: - job: - name: 'old 3.8' - runs-on: ubuntu-20.04 - defaults: - run: - shell: bash - env: - CONDA_DEPENDENCIES: 'numpy=1.20.2 scipy=1.6.3 matplotlib=3.4 pandas=1.2.4 scikit-learn=0.24.2' - DISPLAY: ':99.0' - MNE_LOGGING_LEVEL: 'warning' - OPENBLAS_NUM_THREADS: '1' - PYTHONUNBUFFERED: '1' - PYTHON_VERSION: '3.8' - MNE_IGNORE_WARNINGS_IN_TESTS: 'true' - steps: - - uses: actions/checkout@v3 - - run: ./tools/setup_xvfb.sh - name: 'Setup xvfb' - - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ env.PYTHON_VERSION }} - miniforge-version: latest - miniforge-variant: Mambaforge - use-mamba: true - - shell: bash -el {0} - run: | - ./tools/github_actions_dependencies.sh - source tools/get_minimal_commands.sh - name: 'Install dependencies' - - shell: bash -el {0} - run: ./tools/github_actions_install.sh - name: 'Install MNE' - - shell: bash -el {0} - run: ./tools/github_actions_infos.sh - name: 'Show infos' - - shell: bash -el {0} - run: ./tools/get_testing_version.sh - name: 'Get testing version' - - uses: actions/cache@v3 - with: - key: ${{ env.TESTING_VERSION }} - path: ~/mne_data - name: 'Cache testing data' - - shell: bash -el {0} - run: ./tools/github_actions_download.sh - name: 'Download testing data' - - shell: bash -el {0} - run: ./tools/github_actions_locale.sh - name: 'Print locale' - - shell: bash -el {0} - run: ./tools/github_actions_test.sh - name: 'Run tests' - - uses: codecov/codecov-action@v3 - if: success() - name: 'Upload coverage to CodeCov' diff --git a/.github/workflows/credit.yml b/.github/workflows/credit.yml new file mode 100644 index 00000000000..3ceb98a185d --- /dev/null +++ b/.github/workflows/credit.yml @@ -0,0 +1,45 @@ +name: Contributor credit + +on: # yamllint disable-line rule:truthy + # Scheduled actions only run on the main repo branch, which is exactly what we want + schedule: + - cron: '0 0 1 * *' # At 00:00 on day-of-month 1 + workflow_dispatch: + +jobs: + update_credit: + permissions: + contents: write + pull-requests: write + name: Update + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + GITHUB_TOKEN: ${{ github.token }} + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: true + - uses: actions/setup-python@v6 + with: + python-version: '3.12' + - run: pip install pygithub -e . + - run: python tools/dev/update_credit_json.py + - run: git add -f doc/sphinxext/prs/*.json + - run: | + git diff && git status --porcelain + if [[ $(git status --porcelain) ]]; then + echo "dirty=true" >> $GITHUB_OUTPUT + fi + id: status + - name: Create PR + run: | + set -xeo pipefail + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --global user.name "github-actions[bot]" + git checkout -b credit + git commit -am "MAINT: Update code credit" + git push origin credit + PR_NUM=$(gh pr create --base main --head credit --title "MAINT: Update code credit" --body "Created by credit [GitHub action](https://github.com/mne-tools/mne-python/actions/runs/${{ github.run_id }}).

*Adjustments may need to be made to \`doc/changes/credit_tools.py\` or \`.mailmap\` etc. to get CircleCI to pass.*" --label "no-changelog-entry-needed") + echo "Opened https://github.com/mne-tools/mne-python/pull/${PR_NUM}" >> $GITHUB_STEP_SUMMARY + if: steps.status.outputs.dirty == 'true' diff --git a/.github/workflows/linux_conda.yml b/.github/workflows/linux_conda.yml deleted file mode 100644 index 9822254ecee..00000000000 --- a/.github/workflows/linux_conda.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: 'linux / conda' -concurrency: - group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} - cancel-in-progress: true -on: - push: - branches: - - '*' - pull_request: - branches: - - '*' - -jobs: - py310: - runs-on: ubuntu-20.04 - name: 'linux conda 3.10' - defaults: - run: - shell: bash - env: - CONDA_ENV: 'environment.yml' - DISPLAY: ':99.0' - MNE_LOGGING_LEVEL: 'warning' - MKL_NUM_THREADS: '1' - PYTHONUNBUFFERED: '1' - PYTHON_VERSION: '3.10' - steps: - - uses: actions/checkout@v3 - - run: ./tools/setup_xvfb.sh - name: 'Setup xvfb' - - uses: conda-incubator/setup-miniconda@v2 - with: - activate-environment: 'mne' - python-version: ${{ env.PYTHON_VERSION }} - environment-file: ${{ env.CONDA_ENV }} - # No mamba for this one job (use conda itself!) - - shell: bash -el {0} - run: | - ./tools/github_actions_dependencies.sh - source tools/get_minimal_commands.sh - name: 'Install dependencies' - - shell: bash -el {0} - run: mne_surf2bem --version - name: 'Check minimal commands' - - shell: bash -el {0} - run: ./tools/github_actions_install.sh - name: 'Install MNE' - - shell: bash -el {0} - run: | - QT_QPA_PLATFORM=xcb LIBGL_DEBUG=verbose LD_DEBUG=libs python -c "import pyvistaqt; pyvistaqt.BackgroundPlotter(show=True)" - name: 'Check Qt GL' - - shell: bash -el {0} - run: ./tools/github_actions_infos.sh - name: 'Show infos' - - shell: bash -el {0} - run: ./tools/get_testing_version.sh - name: 'Get testing version' - - uses: actions/cache@v3 - with: - key: ${{ env.TESTING_VERSION }} - path: ~/mne_data - name: 'Cache testing data' - - shell: bash -el {0} - run: ./tools/github_actions_download.sh - name: 'Download testing data' - - shell: bash -el {0} - run: ./tools/github_actions_locale.sh - name: 'Print locale' - - shell: bash -el {0} - run: ./tools/github_actions_test.sh - name: 'Run tests' - - uses: codecov/codecov-action@v3 - if: success() - name: 'Upload coverage to CodeCov' - - notebook: - timeout-minutes: 90 - runs-on: ubuntu-20.04 - defaults: - run: - shell: bash - env: - CONDA_ENV: 'environment.yml' - PYTHON_VERSION: '3.10' - steps: - - uses: actions/checkout@v3 - - uses: conda-incubator/setup-miniconda@v2 - with: - activate-environment: 'mne' - python-version: ${{ env.PYTHON_VERSION }} - environment-file: ${{ env.CONDA_ENV }} - miniforge-version: latest - miniforge-variant: Mambaforge - use-mamba: true - - shell: bash -el {0} - run: | - # TODO: As of 2023/02/28, notebook tests need a pinned mesalib - mamba install -c conda-forge "vtk>=9.2=*osmesa*" "mesalib=21.2.5" - mamba list - name: 'Install OSMesa VTK variant' - - shell: bash -el {0} - run: | - pip uninstall -yq mne - pip install --progress-bar off -ve .[test] - name: 'Install dependencies' - - shell: bash -el {0} - run: ./tools/github_actions_infos.sh - name: 'Show infos' - - shell: bash -el {0} - run: ./tools/get_testing_version.sh - name: 'Get testing version' - - uses: actions/cache@v3 - with: - key: ${{ env.TESTING_VERSION }} - path: ~/mne_data - name: 'Cache testing data' - - shell: bash -el {0} - run: ./tools/github_actions_download.sh - name: 'Download testing data' - - shell: bash -el {0} - run: pytest --tb=short -m "not pgtest" --cov=mne --cov-report=xml --cov-report=html -vv mne/viz - name: 'Run viz tests' - - uses: codecov/codecov-action@v3 - if: success() - name: 'Upload coverage to CodeCov' diff --git a/.github/workflows/linux_pip.yml b/.github/workflows/linux_pip.yml deleted file mode 100644 index fff47e31508..00000000000 --- a/.github/workflows/linux_pip.yml +++ /dev/null @@ -1,76 +0,0 @@ -name: 'linux / pip-pre' -concurrency: - group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} - cancel-in-progress: true -on: - push: - branches: - - '*' - pull_request: - branches: - - '*' - -permissions: - contents: read - -jobs: - # PIP-pre + non-default stim channel + log level info - job: - name: 'linux pip 3.10' - runs-on: ubuntu-20.04 - defaults: - run: - shell: bash - env: - DISPLAY: ':99.0' - MNE_LOGGING_LEVEL: 'info' - MNE_STIM_CHANNEL: 'STI101' - OPENBLAS_NUM_THREADS: '1' - PYTHONUNBUFFERED: '1' - PYTHON_VERSION: '3.10' - steps: - - uses: actions/checkout@v3 - - run: ./tools/setup_xvfb.sh - name: 'Setup xvfb' - - uses: actions/setup-python@v4 - with: - python-version: ${{ env.PYTHON_VERSION }} - name: 'Setup python' - - shell: bash -el {0} - run: | - ./tools/github_actions_dependencies.sh - source tools/get_minimal_commands.sh - name: 'Install dependencies' - - shell: bash -el {0} - run: mne_surf2bem --version - name: 'Check minimal commands' - - shell: bash -el {0} - run: ./tools/github_actions_install.sh - name: 'Install MNE' - - shell: bash -el {0} - run: ./tools/github_actions_infos.sh - name: 'Show infos' - - shell: bash -el {0} - run: | - ./tools/check_qt_import.sh PyQt6 - python -c "import matplotlib; matplotlib.use('QtAgg'); import matplotlib.pyplot as plt; plt.figure()" - - shell: bash -el {0} - run: ./tools/get_testing_version.sh - name: 'Get testing version' - - uses: actions/cache@v3 - with: - key: ${{ env.TESTING_VERSION }} - path: ~/mne_data - name: 'Cache testing data' - - shell: bash -el {0} - run: ./tools/github_actions_download.sh - name: 'Download testing data' - - shell: bash -el {0} - run: ./tools/github_actions_locale.sh - name: 'Print locale' - - shell: bash -el {0} - run: ./tools/github_actions_test.sh - name: 'Run tests' - - uses: codecov/codecov-action@v3 - if: success() - name: 'Upload coverage to CodeCov' diff --git a/.github/workflows/macos_conda.yml b/.github/workflows/macos_conda.yml deleted file mode 100644 index 3befcc0b32b..00000000000 --- a/.github/workflows/macos_conda.yml +++ /dev/null @@ -1,76 +0,0 @@ -name: 'macos / conda' -concurrency: - group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} - cancel-in-progress: true -on: - push: - branches: - - '*' - pull_request: - branches: - - '*' - -jobs: - job: - name: 'macos 3.8' - runs-on: macos-latest - defaults: - run: - shell: bash - env: - PYTHON_VERSION: '3.8' - MNE_LOGGING_LEVEL: 'warning' - MNE_3D_OPTION_SMOOTH_SHADING: 'true' - OPENBLAS_NUM_THREADS: '1' - PYTHONUNBUFFERED: '1' - CONDA_ENV: 'environment.yml' - CI_OS_NAME: 'osx' - steps: - - uses: actions/checkout@v3 - - uses: conda-incubator/setup-miniconda@v2 - with: - activate-environment: 'mne' - python-version: ${{ env.PYTHON_VERSION }} - environment-file: ${{ env.CONDA_ENV }} - miniforge-version: latest - miniforge-variant: Mambaforge - use-mamba: true - - shell: bash -el {0} - run: | - ./tools/github_actions_dependencies.sh - name: 'Install dependencies' - # https://github.com/mne-tools/mne-python/issues/10805 - # https://github.com/mne-tools/mne-python/runs/7042965701?check_suite_focus=true - #- shell: bash -el {0} - # run: | - # source tools/get_minimal_commands.sh - # name: 'Install minimal commands' - #- shell: bash -el {0} - # run: mne_surf2bem --version - # name: 'Check minimal commands' - - shell: bash -el {0} - run: ./tools/github_actions_install.sh - name: 'Install MNE' - - shell: bash -el {0} - run: ./tools/github_actions_infos.sh - name: 'Show infos' - - shell: bash -el {0} - run: ./tools/get_testing_version.sh - name: 'Get testing version' - - uses: actions/cache@v3 - with: - key: ${{ env.TESTING_VERSION }} - path: ~/mne_data - name: 'Cache testing data' - - shell: bash -el {0} - run: ./tools/github_actions_download.sh - name: 'Download testing data' - - shell: bash -el {0} - run: ./tools/github_actions_locale.sh - name: 'Print locale' - - shell: bash -el {0} - run: ./tools/github_actions_test.sh - name: 'Run tests' - - uses: codecov/codecov-action@v3 - if: success() - name: 'Upload coverage to CodeCov' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..85e8ea7ebc7 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,51 @@ +# Upload a Python Package using Twine when a release is created + +name: Build +on: # yamllint disable-line rule:truthy + release: + types: [published] + push: + branches: ["main", "maint/*"] + pull_request: + branches: ["main", "maint/*"] + +permissions: + contents: read + +jobs: + package: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - uses: actions/setup-python@v6 + with: + python-version: '3.10' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build twine + - run: python -m build --sdist --wheel + - run: twine check --strict dist/* + - uses: actions/upload-artifact@v5 + with: + name: dist + path: dist + + pypi-upload: + needs: package + runs-on: ubuntu-latest + if: github.event_name == 'release' + permissions: + id-token: write # for trusted publishing + environment: + name: pypi + url: https://pypi.org/p/mne + steps: + - uses: actions/download-artifact@v6 + with: + name: dist + path: dist + - uses: pypa/gh-action-pypi-publish@release/v1 + if: github.event_name == 'release' diff --git a/.github/workflows/spec_zero.yml b/.github/workflows/spec_zero.yml new file mode 100644 index 00000000000..6a45f2876b6 --- /dev/null +++ b/.github/workflows/spec_zero.yml @@ -0,0 +1,62 @@ +name: SPEC0 + +on: # yamllint disable-line rule:truthy + schedule: + - cron: '0 0 * * 1' # At 00:00 every Monday + workflow_dispatch: + inputs: + ssh: + description: 'Enable ssh debugging' + required: false + default: false + type: boolean + +jobs: + update_versions: + permissions: + contents: write + pull-requests: write + name: Update dependency versions + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + GITHUB_TOKEN: ${{ github.token }} + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: true + - name: Triage SSH + run: | + if [[ "${{ inputs.ssh }}" == "true" ]] || [[ "$COMMIT_MESSAGE" == *"[actions ssh]"* ]]; then + echo "ENABLE_SSH=true" | tee -a $GITHUB_ENV + else + echo "ENABLE_SSH=false" | tee -a $GITHUB_ENV + fi + - name: Setup Remote SSH Connection + if: env.ENABLE_SSH == 'true' + uses: mxschmitt/action-tmate@v3 + timeout-minutes: 10 + with: + detached: true + - uses: actions/setup-python@v6 + with: + python-version: '3.12' + - run: pip install packaging requests tomlkit + - run: python tools/dev/spec_zero_update_versions.py + - run: | + git diff && git status --porcelain + if [[ $(git status --porcelain) ]]; then + echo "dirty=true" >> $GITHUB_OUTPUT + fi + id: status + - name: Create PR + run: | + set -xeo pipefail + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --global user.name "github-actions[bot]" + git checkout -b spec_zero + git commit -am "MAINT: Update dependency specifiers" + git push origin spec_zero + PR_NUM=$(gh pr create --base main --head spec_zero --title "MAINT: Update dependency specifiers" --body "Created by spec_zero [GitHub action](https://github.com/mne-tools/mne-python/actions/runs/${{ github.run_id }}).

*Adjustments may need to be made to shims in \`mne/fixes.py\` in this or another PR. \`git grep TODO VERSION\` is a good starting point for finding potential updates.*" --label "no-changelog-entry-needed") + echo "Opened https://github.com/mne-tools/mne-python/pull/${PR_NUM}" >> $GITHUB_STEP_SUMMARY + if: steps.status.outputs.dirty == 'true' diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000000..507bf4ebfb1 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,177 @@ +name: 'Tests' +concurrency: + group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} + cancel-in-progress: true +on: # yamllint disable-line rule:truthy + push: + branches: ["main", "maint/*"] + pull_request: + branches: ["main", "maint/*"] + # adapted from spyder-ide/spyder + workflow_dispatch: + inputs: + ssh: + description: 'Enable ssh debugging' + required: false + default: false + type: boolean + +permissions: + contents: read + +jobs: + style: + name: Style + runs-on: ubuntu-latest + timeout-minutes: 3 + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - uses: actions/setup-python@v6 + with: + python-version: '3.13' + - uses: pre-commit/action@v3.0.1 + - run: pip install mypy numpy scipy vulture + - run: mypy + - run: vulture + + bandit: + name: Bandit + needs: style + runs-on: ubuntu-latest + steps: + - uses: davidslusser/actions_python_bandit@v1.0.1 + with: + src: "mne" + options: "-c pyproject.toml -ll -r" + pip_install_command: "pip install bandit[toml]" + + pytest: + name: '${{ matrix.os }} / ${{ matrix.kind }} / ${{ matrix.python }}' + needs: style + timeout-minutes: 120 + runs-on: ${{ matrix.os }} + defaults: + run: + shell: bash -el {0} + env: + PYTHON_VERSION: '${{ matrix.python }}' + MKL_NUM_THREADS: '1' + OPENBLAS_NUM_THREADS: '1' + OMP_NUM_THREADS: '1' + PYTHONUNBUFFERED: '1' + MNE_CI_KIND: '${{ matrix.kind }}' + CI_OS_NAME: '${{ matrix.os }}' + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-latest + python: '3.13' + kind: pip + - os: ubuntu-latest + python: '3.13' + kind: pip-pre + - os: ubuntu-latest + python: '3.13' + kind: conda + - os: macos-latest # arm64 (Apple Silicon): Sequoia + python: '3.13' + kind: mamba + - os: macos-15-intel # intel: Sequoia + python: '3.13' + kind: mamba + - os: windows-latest + python: '3.11' + kind: mamba + - os: ubuntu-latest + python: '3.12' + kind: minimal + - os: ubuntu-22.04 + python: '3.10' + kind: old + steps: + - uses: actions/checkout@v5 + with: + fetch-depth: 0 + persist-credentials: false + - name: Get commit message + run: echo "COMMIT_MESSAGE=$(git show -s --format=%s ${{ github.event.pull_request.head.sha || github.sha }})" | tee -a ${GITHUB_ENV} + - name: Triage SSH + run: | + if [[ "${{ inputs.ssh }}" == "true" ]] || [[ "$COMMIT_MESSAGE" == *"[actions ssh]"* ]]; then + echo "ENABLE_SSH=true" | tee -a $GITHUB_ENV + else + echo "ENABLE_SSH=false" | tee -a $GITHUB_ENV + fi + - name: Setup Remote SSH Connection + if: env.ENABLE_SSH == 'true' + uses: mxschmitt/action-tmate@v3 + timeout-minutes: 80 + with: + detached: true + - run: ./tools/github_actions_env_vars.sh + # Xvfb/OpenGL + - uses: pyvista/setup-headless-display-action@v4 + with: + qt: true + pyvista: false + wm: false + # Python (if pip) + - uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python }} + if: startswith(matrix.kind, 'pip') + # Python (if conda) + - name: Fixes for conda + run: | + # For some reason on Linux we get crashes + if [[ "$RUNNER_OS" == "Linux" ]]; then + sed -i "/numba/d" environment.yml + fi + # And on Windows and macOS PySide6.9.0 segfaults + if [[ "$RUNNER_OS" == "macOS" ]]; then + sed -i "" "s/ - PySide6 .*/ - PySide6 =6.9.2/g" environment.yml + sed -i "" "s/ - vtk .*/ - vtk =9.5.1/g" environment.yml + + else + sed -i "s/ - PySide6 .*/ - PySide6 =6.9.2/g" environment.yml + sed -i "s/ - vtk .*/ - vtk =9.5.1/g" environment.yml + if [[ "$RUNNER_OS" == "Windows" ]]; then + echo "MNE_IS_OSMESA=true" | tee -a $GITHUB_ENV + fi + fi + if: matrix.kind == 'conda' || matrix.kind == 'mamba' + - uses: mamba-org/setup-micromamba@v2 + with: + environment-file: ${{ env.CONDA_ENV }} + environment-name: mne + log-level: ${{ runner.debug == '1' && 'debug' || 'info' }} + create-args: >- + python=${{ env.PYTHON_VERSION }} + -v + if: ${{ !startswith(matrix.kind, 'pip') }} + timeout-minutes: 20 + - run: bash ./tools/github_actions_dependencies.sh + # Minimal commands on Linux (macOS stalls) + - run: bash ./tools/get_minimal_commands.sh + if: startswith(matrix.os, 'ubuntu') && matrix.kind != 'minimal' && matrix.kind != 'old' + - run: bash ./tools/github_actions_infos.sh + # Check Qt + - run: bash ./tools/check_qt_import.sh $MNE_QT_BACKEND + if: env.MNE_QT_BACKEND != '' + - name: Run tests with no testing data + run: MNE_SKIP_TESTING_DATASET_TESTS=true pytest -m "not (ultraslowtest or pgtest)" --tb=short --cov=mne --cov-report xml -vv -rfE mne/ + if: matrix.kind == 'minimal' + - run: ./tools/get_testing_version.sh + - uses: actions/cache@v4 + with: + key: ${{ env.TESTING_VERSION }} + path: ~/mne_data + - run: bash ./tools/github_actions_download.sh + - run: bash ./tools/github_actions_test.sh # for some reason on macOS we need to run "bash X" in order for a failed test run to show up + - uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + if: success() || failure() diff --git a/.github/zizmor.yml b/.github/zizmor.yml new file mode 100644 index 00000000000..00ea2bb64ba --- /dev/null +++ b/.github/zizmor.yml @@ -0,0 +1,5 @@ +rules: + unpinned-uses: + config: + policies: + "*": ref-pin diff --git a/.gitignore b/.gitignore index 40c64c7bb65..d66fbef96de 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,11 @@ junit-results.xml *.tmproj *.png *.dat +# make sure we ship data files +!mne/data/**/*.dat +!mne/data/**/*.fif +!mne/data/**/*.fif.gz +!mne/icons/**/*.png .DS_Store events.eve foo-lh.label @@ -27,7 +32,6 @@ foo.lout bar.lout foobar.lout epochs_data.mat -memmap*.dat tmp-*.w tmtags auto_examples @@ -41,7 +45,6 @@ MNE-brainstorm-data* physionet-sleep-data* MEGSIM* build -mne/_version.py coverage htmlcov .cache/ @@ -63,13 +66,17 @@ tutorials/misc/report.h5 tutorials/io/fnirs.csv pip-log.txt .coverage* +!.coveragerc coverage.xml tags -doc/coverages -doc/samples -doc/*.dat -doc/fil-result -doc/optipng.exe +/doc/coverages +/doc/samples +/doc/fil-result +/doc/optipng.exe +/doc/sphinxext/.joblib +sg_execution_times.rst +sg_api_usage.rst +sg_api_unused.dot cover *.html @@ -90,7 +97,8 @@ cover .venv/ venv/ -*.json +/*.json +!codemeta.json .hypothesis/ - +.ruff_cache/ .ipynb_checkpoints/ diff --git a/.lgtm.yml b/.lgtm.yml deleted file mode 100644 index 4a43aa25c57..00000000000 --- a/.lgtm.yml +++ /dev/null @@ -1,8 +0,0 @@ -extraction: - javascript: - index: - filters: - - exclude: "**/*.js" -queries: - - exclude: py/missing-equals - - exclude: py/import-and-import-from diff --git a/.mailmap b/.mailmap index 0655885fc35..f76582bc657 100644 --- a/.mailmap +++ b/.mailmap @@ -2,8 +2,9 @@ Adam Li Adam Li Adam Li Adam Li Alan Leggitt leggitta Alessandro Tonin Lychfindel <58313635+Lychfindel@users.noreply.github.com> -Alex Rockhill Alex +Alex Lepauvre Alex lepauvre Alex Rockhill Alex +Alex Rockhill Alex Alex Rockhill Alex Rockhill Alex Rockhill Alex Rockhill Alexander Rudiuk Alexander Rudiuk @@ -14,16 +15,24 @@ Alexandre Gramfort Alexandre Gramfort Alexandre Gramfort Alexandre Gramfort Alexandre Gramfort Ana Radanovic anaradanovic <79697247+anaradanovic@users.noreply.github.com> +Andres Rodriguez Andrew Dykstra Andrew Quinn AJQuinn -Anna Padee <44297909+apadee@users.noreply.github.com> apadee <44297909+apadee@users.noreply.github.com> +Andy Gilbert <7andy121@gmail.com> Andrew Gilbert +Andy Gilbert <7andy121@gmail.com> Andrew Gilbert +Anna Padee apadee <44297909+apadee@users.noreply.github.com> Anne-Sophie Dubarry annesodub Archit Singhal <43236121+architsinghal-mriirs@users.noreply.github.com> archit singhal -Ashley Drew <33734402+ashdrew@users.noreply.github.com> ashdrew <33734402+ashdrew@users.noreply.github.com> +Arne Pelzer aplzr <7202498+aplzr@users.noreply.github.com> +Arne Pelzer pzr +Ashley Drew ashdrew <33734402+ashdrew@users.noreply.github.com> Asish Panda kaichogami Basile Pinsard Brad Buran Brad Buran Britta Westner britta-wstnr +Bruno Aristimunha +btkcodedev +buildqa Burkhard Maess Burkhard Maess Carina Forster Carina Carlos de la Torre carlos @@ -40,15 +49,18 @@ Christina Zhao ChristinaZhao Christoph Dinh Christoph Dinh Christopher J. Bailey Chris Bailey Claire Braboszcz claire-braboszcz +Clemens Brunner Clément Moutard -Cora Kim <41998428+kimcoco@users.noreply.github.com> kimcoco <41998428+kimcoco@users.noreply.github.com> +Cora Kim kimcoco <41998428+kimcoco@users.noreply.github.com> Cristóbal Moënne-Loccoz Cristóbal Dan G. Wakeman Daniel G. Wakeman Dan G. Wakeman Daniel Wakeman Dan G. Wakeman dgwakeman Dan G. Wakeman dgwakeman -Daniel Carlström Schad Daniel C Schad +Daniel C Schad Daniel C Schad +Daniel C Schad Daniel Carlström Schad Daniel McCloy Daniel McCloy +Daniel McCloy Daniel McCloy Daniel McCloy drammock Daniel Strohmeier Daniel Strohmeier Daniel Strohmeier joewalter @@ -66,6 +78,8 @@ Denis A. Engemann Denis Engemann dmalt Dominik Krzemiński dokato Dominik Welke dominikwelke <33089761+dominikwelke@users.noreply.github.com> +Dominik Welke dominikwelke +Dominik Wetzel Dominik Wetzel Eberhard Eich ebeich Eduard Ort Eduard Ort Eduard Ort eort @@ -73,7 +87,8 @@ Eduard Ort examplename Ellen Lau ellenlau Emily Stephen Emily P. Stephen Emily Stephen emilyps14 -Enrico Varano <69973551+enricovara@users.noreply.github.com> enricovara <69973551+enricovara@users.noreply.github.com> +Emma Bailey <93327939+emma-bailey@users.noreply.github.com> emma-bailey <93327939+emma-bailey@users.noreply.github.com> +Enrico Varano enricovara <69973551+enricovara@users.noreply.github.com> Enzo Altamiranda enzo Eric Larson Eric Larson Eric Larson Eric Larson @@ -89,7 +104,7 @@ Erkka Heinila Teekuningas Etienne de Montalivet Evgenii Kalenkovich kalenkovich Evgeny Goldstein <84768107+evgenygoldstein@users.noreply.github.com> evgenygoldstein <84768107+evgenygoldstein@users.noreply.github.com> -Ezequiel Mikulan <39155887+ezemikulan@users.noreply.github.com> ezemikulan <39155887+ezemikulan@users.noreply.github.com> +Ezequiel Mikulan ezemikulan <39155887+ezemikulan@users.noreply.github.com> Fahimeh Mamashli <33672431+fmamashli@users.noreply.github.com> fmamashli <33672431+fmamashli@users.noreply.github.com> Fede Raimondo Fede Fede Raimondo Fede Raimondo @@ -100,32 +115,44 @@ Fede Raimondo Federico Raimondo Federico Zamberlan <44038765+fzamberlan@users.noreply.github.com> Felix Klotzsche eioe Felix Klotzsche eioe -Félix Raimundo Felix Raimundo Frederik D. Weber Frederik-D-Weber Fu-Te Wong foucault Fu-Te Wong zuxfoucault +Félix Raimundo Felix Raimundo Gansheng Tan <49130176+GanshengT@users.noreply.github.com> Gansheng TAN <49130176+GanshengT@users.noreply.github.com> +Gennadiy Belonosov <7503709+Genuster@users.noreply.github.com> Gennadiy <7503709+Genuster@users.noreply.github.com> +Gennadiy Belonosov <7503709+Genuster@users.noreply.github.com> Genuster <7503709+Genuster@users.noreply.github.com> Giorgio Marinato neurogima <76406896+neurogima@users.noreply.github.com> +Giulio Gabrieli Guillaume Dumas deep-introspection Guillaume Dumas Guillaume Dumas +Hakimeh Aslsardroud Hamid Maymandi <46011104+HamidMandi@users.noreply.github.com> Hamid <46011104+HamidMandi@users.noreply.github.com> +Hasrat Ali Arzoo hasrat17 <56307533+hasrat17@users.noreply.github.com> +Hongjiang Ye YE Hongjiang Hongjiang Ye YE Hongjiang Hubert Banville hubertjb +Hyonyoung Shin <55095699+mcvain@users.noreply.github.com> mcvain <55095699+mcvain@users.noreply.github.com> Hüseyin Orkun Elmas Hüseyin Ingoo Lee dlsrnsi +Ivo de Jong ivopascal Jaakko Leppakangas Jaakko Leppakangas Jaakko Leppakangas jaeilepp Jaakko Leppakangas jaeilepp +Jacob Phelan Jair Montoya jmontoyam +Jan Ebert janEbert +Jan Sedivy Jan Sosulski jsosulski Jean-Baptiste Schiratti Jean-Baptiste SCHIRATTI -Jean-Remi King Jean-Rémi KING -Jean-Remi King kingjr -Jean-Remi King kingjr -Jean-Remi King kingjr -Jean-Remi King UMR9752 -Jean-Remi King UMR9752 +Jean-Rémi King Jean-Rémi KING +Jean-Rémi King kingjr +Jean-Rémi King kingjr +Jean-Rémi King kingjr +Jean-Rémi King UMR9752 +Jean-Rémi King UMR9752 Jeff Stout jstout211 +Jennifer Behnke Jesper Duemose Nielsen jdue Jevri Hanna Jeff Hanna Jevri Hanna Jevri Hanna @@ -142,30 +169,36 @@ Jona Sassenhagen jona-sassenhagen jona-sassenhagen@ Jona Sassenhagen jona.sassenhagen@gmail.com Jona Sassenhagen sassenha +Jonathan Kuziek Jordan Drew <39603454+jadrew43@users.noreply.github.com> jadrew43 <39603454+jadrew43@users.noreply.github.com> Joris Van den Bossche Joris Van den Bossche +Joshua Calder-Travis <38797399+jCalderTravis@users.noreply.github.com> jCalderTravis <38797399+jCalderTravis@users.noreply.github.com> +Joshua J Bear +Joshua Teves Joshua Teves José C. García Alanis Jose Alanis José C. García Alanis Jose C. G. Alanis <12409129+JoseAlanis@users.noreply.github.com> José C. García Alanis José C. G. Alanis <12409129+JoseAlanis@users.noreply.github.com> José C. García Alanis José C. García Alanis <12409129+JoseAlanis@users.noreply.github.com> -Joshua J Bear -Joshua Teves Joshua Teves Julius Welzel <52565341+JuliusWelzel@users.noreply.github.com> jwelzel <52565341+JuliusWelzel@users.noreply.github.com> +Justus Schwabedal Kaisu Lankinen <41806798+klankinen@users.noreply.github.com> klankinen <41806798+klankinen@users.noreply.github.com> Kambiz Tabavi Kambiz Tavabi Kambiz Tabavi kambysese Katarina Slama katarinaslama +Katia Al-Amir <129207373+katia-sentry@users.noreply.github.com> Katia <129207373+katia-sentry@users.noreply.github.com> Kostiantyn Maksymenko kostiantyn maksymenko Kostiantyn Maksymenko Maksymenko Kostiantyn Kostiantyn Maksymenko Maksymenko Kostiantyn Laetitia Grabot LaetitiaG Larry Eisenman lneisenman +Laurent Lementec Lenny Varghese lennyvarghese +Liberty Hamilton Lorenz Esch Lorenz Esch Lorenzo Alfine lorrandal Louis Thibault = Louis Thibault Louis Thibault -Lukas Gemein gemeinl +Lukas Gemein gemeinl Lukáš Hejtmánek hejtmy Mads Jensen mads jensen Mainak Jas Mainak @@ -175,31 +208,36 @@ Mainak Jas Mainak Jas mainakjas Manoj Kumar MechCoder Manu Sutela MJAS1 -Marian Dovgialo Marian Dovgialo -Marian Dovgialo mdovgialo +Marian Dovgialo Marian Dovgialo +Marian Dovgialo Marian Dovgialo +Marian Dovgialo mdovgialo Marijn van Vliet Marijn van Vliet -Mark Alexander Henney Mark -Mark Alexander Henney Mark Henney <120719655+henneysq@users.noreply.github.com> +Mark Henney Mark +Mark Henney Mark Alexander Henney +Mark Henney Mark Henney <120719655+henneysq@users.noreply.github.com> Mark Wronkiewicz wronk Marmaduke Woodman maedoc -Martin Billinger kazemakase -Martin Billinger Martin -Martin Billinger Martin Billinger -Martin Billinger mbillingr +Martin BaBer +Martin Billinger kazemakase +Martin Billinger kazemakase +Martin Billinger Martin Billinger +Martin Billinger mbillingr Martin Luessi martin Martin Luessi martin Martin Luessi mluessi@nmr.mgh.harvard.edu -Martin Schulz Martin Schulz <46245704+marsipu@users.noreply.github.com> +Martin Perez-Guevara +Martin Schulz Martin Schulz <46245704+marsipu@users.noreply.github.com> +Martin Schulz Martin Schulz Martin van Harmelen <1544429+MPvHarmelen@users.noreply.github.com> Martin <1544429+MPvHarmelen@users.noreply.github.com> Mathieu Scheltienne Mathieu Scheltienne <73893616+mscheltienne@users.noreply.github.com> Mathieu Scheltienne Mathieu Scheltienne Mathurin Massias mathurinm Mathurin Massias mathurinm -Mats van Es Mats Mats Matt Sanderson monkeyman192 Matteo Anelli Matteo Anelli Matteo Visconti di Oleggio Castello Matteo Visconti dOC -Matthias Dold <62005770+matthiasdold@users.noreply.github.com> matthiasdold <62005770+matthiasdold@users.noreply.github.com> +Matthias Dold matthiasdold <62005770+matthiasdold@users.noreply.github.com> Matthias Eberlein <41163089+MatthiasEb@users.noreply.github.com> MatthiasEb <41163089+MatthiasEb@users.noreply.github.com> Matti Hämäläinen Matti Hamalainen Matti Hämäläinen Matti Hamalainen @@ -212,25 +250,33 @@ Mikołaj Magnuski Mikolaj Magnuski mmagnuski Mohamed Sherif mohdsherif Mohammad Daneshzand <55800429+mdaneshzand@users.noreply.github.com> mdaneshzand <55800429+mdaneshzand@users.noreply.github.com> +Mojackhak <23111220065@m.fudan.edu.cn> Ankang Hu <23111220065@m.fudan.edu.cn> +Motofumi Fushimi <30593537+motofumi-fushimi@users.noreply.github.com> motofumi-fushimi <30593537+motofumi-fushimi@users.noreply.github.com> Natalie Klein natalieklein Nathalie Gayraud Nathalie Nathalie Gayraud Nathalie Naveen Srinivasan <172697+naveensrinivasan@users.noreply.github.com> Naveen <172697+naveensrinivasan@users.noreply.github.com> Nicolas Barascud nbara Nicolas Barascud Nicolas Barascud <10333715+nbara@users.noreply.github.com> +Nicolas Fourcaud-Trocmé Fourcaud-Trocmé Nicolas Gensollen Gensollen Nicolas Legrand Legrand Nicolas Nicolas Legrand LegrandNico Nicolas Legrand Nicolas Legrand +Niels Focke nmri-nfocke <114056301+nmri-nfocke@users.noreply.github.com> Niklas Wilming Niklas Wilming Nikolai Chapochnikov <23103092+chapochn@users.noreply.github.com> chapochn <23103092+chapochn@users.noreply.github.com> Nikolai Chapochnikov <23103092+chapochn@users.noreply.github.com> Nikolai M Chapochnikov <23103092+chapochn@users.noreply.github.com> Nikolas Chalas Nichalas +Noah Markowitz <34498671+nmarkowitz@users.noreply.github.com> NoahMarkowitz <34498671+nmarkowitz@users.noreply.github.com> Olaf Hauk Olaf Hauk Olaf Hauk olafhauk Omer Shubi Omer S +Pablo Arias Paul Pasler ppasler -Paul Roujansky +Paul Roujansky Paul ROUJANSKY +Paul Roujansky paulroujansky +Pavel Navratil Pedro Silva pbnsilva Phillip Alday Phillip Alday Phillip Alday Phillip Alday @@ -240,57 +286,86 @@ Pierre-Antoine Bannier Pierre-Antoine Bannier Pierre-Antoine Bannier Pierre-Antoine Bannier Pierre-Antoine Bannier Pierre-Antoine Bannier Pierre-Antoine Bannier Pierre-Antoine Bannier +Ping-Keng Jao nafraw Praveen Sripad prav Praveen Sripad prav +Proloy Das pdas6 Ram Pari Ram Ramonapariciog Apariciogarcia ramonapariciog +Rasmus Aagaard roraa Reza Nasri Reza Reza Nasri RezaNasri +Riessarius Stargardsky Roan LaPlante aestrivex -Rob Luke Robert Luke <748691+rob-luke@users.noreply.github.com> +Robert Luke Robert Luke <748691+rob-luke@users.noreply.github.com> +Robert Luke Robert Luke +Robert Seymour Robin Tibor Schirrmeister robintibor +Roeland Hancock +Romain Derollepot Romain Trachel Romain Trachel Romain Trachel Romain Trachel Romain Trachel trachelr Roman Goj +Rongfei Jin <131315c@gmail.com> GreasyCat <131315c@gmail.com> Ross Maddox rkmaddox Ross Maddox Ross Maddox Ross Maddox unknown -Rotem Falach Falach +Rotem Falach Falach +Roy Eric Wieske <139973278+Randomidous@users.noreply.github.com> Roy Eric <139973278+Randomidous@users.noreply.github.com> Ryan Law Ryan Law +Ryan Law Ryan M.C. Law +Sammi Chekroud Samuel Deslauriers-Gauthier Samuel Deslauriers-Gauthier +Santeri Ruuskanen Santeri Ruuskanen <66060772+ruuskas@users.noreply.github.com> Sara Sommariva sarasommariva Sebastien Treguer DataFox Sena Er <2799280+sena-neuro@users.noreply.github.com> Sena <2799280+sena-neuro@users.noreply.github.com> Senwen Deng <36327760+snwnde@users.noreply.github.com> Senwen DENG <36327760+snwnde@users.noreply.github.com> +Shristi Baral shristi Silvia Cotroneo <78911192+sfc-neuro@users.noreply.github.com> sfc-neuro <78911192+sfc-neuro@users.noreply.github.com> Simon Kern Simon Kern <14980558+skjerns@users.noreply.github.com> Simon Kern skjerns <14980558+skjerns@users.noreply.github.com> Simon Kern skjerns Sondre Foslien sondrfos +Sophie Herbst +Steinn Hauser Magnússon Steinn Magnusson Steve Matindi stevemats Steven Bierer Steven Bierer <40672003+NeuroLaunch@users.noreply.github.com> Steven M. Gutstein S. M. Gutstein Steven M. Gutstein smgutstein -T. Wang <81429617+twang5@users.noreply.github.com> twang5 <81429617+twang5@users.noreply.github.com> +sviter +T. Wang twang5 <81429617+twang5@users.noreply.github.com> Tanay Gahlot Tanay -Teon Brooks -Teon Brooks -Teon Brooks Teon -Teon Brooks Teon Brooks +Teon L Brooks +Teon L Brooks +Teon L Brooks Teon +Teon L Brooks Teon Brooks Thomas Donoghue Tom Thomas Radman -Timon Merk <38216460+timonmerk@users.noreply.github.com> timonmerk <38216460+timonmerk@users.noreply.github.com> +Timon Merk +Timon Merk Timon Merk <38216460+timonmerk@users.noreply.github.com> +Timon Merk timonmerk <38216460+timonmerk@users.noreply.github.com> Timothy Gates Tim Gates +Timur Sokhin Tod Flak <45362686+todflak@users.noreply.github.com> todflak <45362686+todflak@users.noreply.github.com> Tom Ma myd7349 +Tom Stone Stone +Tom Stone tomdstone <77251489+tomdstone@users.noreply.github.com> Tristan Stenner Tristan Stenner Tziona NessAiver TzionaN +user27182 <89109579+user27182@users.noreply.github.com> Valerii Chirkov Valerii <42982039+vagechirkov@users.noreply.github.com> Valerii Chirkov Valerii +Velu Prabhakar Kumaravel Velu Prabhakar Kumaravel Victoria Peterson vpeterson +Wei Xu Wei +Will Turner Will Turner +Yiping Zuo Frostime Yousra Bekhti Yoursa BEKHTI Yousra Bekhti Yoursa BEKHTI Yousra Bekhti Yousra BEKHTI Yousra Bekhti yousrabk Zhi Zhang <850734033@qq.com> ZHANG Zhi <850734033@qq.com> +Zhi Zhang <850734033@qq.com> ZHANG Zhi +Ziyi ZENG ZIYI ZENG diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..56211f729c9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,104 @@ +repos: + # Ruff mne + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.14.4 + hooks: + - id: ruff-check + name: ruff lint mne + args: ["--fix"] + files: ^mne/|^tools/ + exclude: vulture_allowlist.py + - id: ruff-check + name: ruff lint mne preview + args: ["--fix", "--preview", "--select=NPY201"] + files: ^mne/|^tools/ + - id: ruff-check + name: ruff lint doc, tutorials, and examples + # D103: missing docstring in public function + # D400: docstring first line must end with period + args: ["--ignore=D103,D400", "--fix"] + files: ^doc/|^tutorials/|^examples/ + - id: ruff-format + files: ^mne/|^doc/|^tutorials/|^examples/|^tools/ + + # Codespell + - repo: https://github.com/codespell-project/codespell + rev: v2.4.1 + hooks: + - id: codespell + additional_dependencies: + - tomli + files: ^mne/|^doc/|^examples/|^tutorials/|^tools/ + types_or: [python, bib, rst, inc] + + # yamllint + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.37.1 + hooks: + - id: yamllint + args: [--strict, -c, .yamllint.yml] + + # rstcheck + - repo: https://github.com/rstcheck/rstcheck.git + rev: v6.2.5 + hooks: + - id: rstcheck + additional_dependencies: + - tomli + files: ^doc/.*\.(rst|inc)$ + # Credit is problematic because we generate an include on the fly + exclude: ^doc/credit.rst$ + + # sorting + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v6.0.0 + hooks: + - id: file-contents-sorter + files: ^doc/changes/names.inc|^.mailmap|^doc/sphinxext/related_software.txt + args: ["--ignore-case"] + + - repo: https://github.com/pappasam/toml-sort + rev: v0.24.3 + hooks: + - id: toml-sort-fix + files: pyproject.toml + + # dependencies + - repo: local + hooks: + - id: update-env-file + name: Copy dependency changes from pyproject.toml to environment.yml + language: python + entry: ./tools/hooks/update_environment_file.py + files: '^(pyproject.toml|tools/hooks/update_environment_file.py)$' + - repo: local + hooks: + - id: dependency-sync + name: Copy core dependencies from pyproject.toml to README.rst + language: python + entry: ./tools/hooks/sync_dependencies.py + files: '^(pyproject.toml|tools/hooks/sync_dependencies.py)$' + additional_dependencies: ["mne==1.10.0"] + + # zizmor + - repo: https://github.com/woodruffw/zizmor-pre-commit + rev: v1.16.3 + hooks: + - id: zizmor + args: [--fix] + +# these should *not* be run on CIs: +ci: + skip: [dependency-sync] # needs MNE to work, which exceeds the free tier space alloc. + +# The following are too slow to run on local commits, so let's only run on CIs: +# +# - repo: https://github.com/pre-commit/mirrors-mypy +# rev: v1.9.0 +# hooks: +# - id: mypy +# +# - repo: https://github.com/jendrikseipp/vulture +# rev: 'v2.11' # or any later Vulture version +# hooks: +# - id: vulture diff --git a/.yamllint.yml b/.yamllint.yml new file mode 100644 index 00000000000..3b8f96c0e53 --- /dev/null +++ b/.yamllint.yml @@ -0,0 +1,10 @@ +extends: default + +ignore: | + .github/workflows/codeql-analysis.yml + +rules: + line-length: disable + document-start: disable + new-lines: + type: platform diff --git a/CITATION.cff b/CITATION.cff index 41e3352ed9e..1fbe592e0f5 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,9 +1,9 @@ cff-version: 1.2.0 title: "MNE-Python" message: "If you use this software, please cite both the software itself, and the paper listed in the preferred-citation field." -version: 1.3.0 -date-released: "2022-12-22" -commit: ea0f80614ca01f71333cbdfa4d06ee8932058cfe +version: 1.10.0 +date-released: "2025-07-14" +commit: 0c0e72efe1f062cce5fb2aaa9509cf77036ae6f9 doi: 10.5281/zenodo.592483 keywords: - MEG @@ -32,57 +32,55 @@ authors: - family-names: Jas given-names: Mainak - family-names: Brooks - given-names: Teon + given-names: Teon L - family-names: Sassenhagen given-names: Jona - - family-names: Luessi - given-names: Martin - family-names: McCloy given-names: Daniel + - family-names: Luessi + given-names: Martin - family-names: King - given-names: Jean-Remi - - family-names: Goj - given-names: Roman - - family-names: Favelier - given-names: Guillaume + given-names: Jean-Rémi - family-names: Höchenberger given-names: Richard - family-names: Brunner given-names: Clemens + - family-names: Goj + given-names: Roman + - family-names: Favelier + given-names: Guillaume - family-names: van Vliet given-names: Marijn - family-names: Wronkiewicz given-names: Mark + - family-names: Rockhill + given-names: Alex + - family-names: Appelhoff + given-names: Stefan - family-names: Holdgraf given-names: Chris + - family-names: Scheltienne + given-names: Mathieu - family-names: Massich given-names: Joan - family-names: Bekhti given-names: Yousra - - family-names: Rockhill - given-names: Alex - - family-names: Appelhoff - given-names: Stefan - family-names: Leggitt given-names: Alan - family-names: Dykstra given-names: Andrew - - family-names: Luke - given-names: Rob - family-names: Trachel given-names: Romain + - family-names: Luke + given-names: Robert - family-names: De Santis given-names: Lorenzo - family-names: Panda given-names: Asish - family-names: Magnuski given-names: Mikołaj - - family-names: Scheltienne - given-names: Mathieu - family-names: Westner given-names: Britta - - family-names: Billinger - given-names: Martin - family-names: Wakeman given-names: Dan G - family-names: Strohmeier @@ -105,8 +103,12 @@ authors: given-names: Luke - family-names: Raimondo given-names: Fede + - family-names: Huberty + given-names: Scott - family-names: Nurminen given-names: Jussi + - family-names: Billinger + given-names: Martin - family-names: Montoya given-names: Jair - family-names: Woodman @@ -121,6 +123,8 @@ authors: given-names: Cathy - family-names: García Alanis given-names: José C + - family-names: Orfanos + given-names: Dimitri Papadopoulos - family-names: Hauk given-names: Olaf - family-names: Maddox @@ -131,14 +135,19 @@ authors: given-names: Ashley - family-names: Dinh given-names: Christoph + - family-names: Binns + given-names: Thomas S - family-names: Dumas given-names: Guillaume + - name: Martin + - family-names: Benerradi + given-names: Johann - family-names: Hartmann given-names: Thomas - family-names: Ort given-names: Eduard - - family-names: Benerradi - given-names: Johann + - family-names: Billinger + given-names: Martin - family-names: Pasler given-names: Paul - family-names: Repplinger @@ -149,6 +158,8 @@ authors: given-names: Ana - family-names: Buran given-names: Brad + - family-names: Woessner + given-names: Jacob - family-names: Massias given-names: Mathurin - family-names: Hämäläinen @@ -161,6 +172,8 @@ authors: given-names: Christopher - family-names: Raimundo given-names: Félix + - family-names: Kaneda + given-names: Michiru - family-names: Alday given-names: Phillip - family-names: Pari @@ -171,18 +184,28 @@ authors: given-names: Yaroslav - family-names: Luo given-names: Yu-Han + - family-names: Gramfort + given-names: Alexandre - family-names: Kasper given-names: Johannes - family-names: Doelling given-names: Keith - family-names: Jensen given-names: Mads + - family-names: Ruuskanen + given-names: Santeri + - family-names: Kern + given-names: Simon - family-names: Gahlot given-names: Tanay - family-names: Nunes given-names: Adonay - family-names: Gütlin given-names: Dirk + - family-names: Heinila + given-names: Erkka + - family-names: Armeni + given-names: Kristijan - name: kjs - family-names: Weinstein given-names: Alejandro @@ -192,54 +215,60 @@ authors: given-names: Catalina María - family-names: Moënne-Loccoz given-names: Cristóbal - - family-names: Heinila - given-names: Erkka + - family-names: Altukhov + given-names: Dmitrii + - family-names: Peterson + given-names: Erica - family-names: Hanna given-names: Jevri - family-names: Houck given-names: Jon - family-names: Klein given-names: Natalie + - family-names: Roujansky + given-names: Paul + - family-names: Luke + given-names: Rob - family-names: Rantala given-names: Antti - family-names: Maess given-names: Burkhard + - family-names: Forster + given-names: Carina - family-names: O'Reilly given-names: Christian - - family-names: Peterson - given-names: Erica + - family-names: Welke + given-names: Dominik - family-names: Kolkhorst given-names: Henrich - family-names: Banville given-names: Hubert + - family-names: Zhang + given-names: Jack - family-names: Maksymenko given-names: Kostiantyn - family-names: Clarke given-names: Maggie - family-names: Anelli given-names: Matteo - - family-names: Kaneda - given-names: Michiru + - family-names: Chapochnikov + given-names: Nikolai - family-names: Bannier given-names: Pierre-Antoine - family-names: Choudhary given-names: Saket - - family-names: Huberty - given-names: Scott - - family-names: Kern - given-names: Simon - - family-names: Forster - given-names: Carina + - family-names: Férat + given-names: Victor - family-names: Kim given-names: Cora + - family-names: Welke + given-names: Dominik - family-names: Klotzsche given-names: Felix - family-names: Wong given-names: Fu-Te - family-names: Kojcic given-names: Ivana - - family-names: Zhang - given-names: Jack - family-names: Nielsen given-names: Jesper Duemose - family-names: Lankinen @@ -248,19 +277,26 @@ authors: given-names: Kambiz - family-names: Thibault given-names: Louis + - family-names: Gerster + given-names: Moritz + - family-names: Alibou + given-names: Nabil - family-names: Gayraud given-names: Nathalie - family-names: Ward given-names: Nick - - name: dependabot[bot] + - family-names: Chu + given-names: Qian + - family-names: Herbst + given-names: Sophie + - family-names: Radanovic + given-names: Ana - family-names: Quinn given-names: Andrew - family-names: Gauthier given-names: Antoine - family-names: Pinsard given-names: Basile - - family-names: Welke - given-names: Dominik - family-names: Stephen given-names: Emily - family-names: Hornberger @@ -271,14 +307,24 @@ authors: given-names: Evgenii - family-names: Mamashli given-names: Fahimeh + - family-names: Belonosov + given-names: Gennadiy + - family-names: O'Neill + given-names: George - family-names: Marinato given-names: Giorgio - family-names: Anevar given-names: Hafeza + - family-names: Abdelhedi + given-names: Hamza - family-names: Sosulski given-names: Jan - family-names: Stout given-names: Jeff + - family-names: Calder-Travis + given-names: Joshua + - family-names: Zhu + given-names: Judy D - family-names: Eisenman given-names: Larry - family-names: Esch @@ -289,6 +335,10 @@ authors: given-names: Nicolas - family-names: Legrand given-names: Nicolas + - family-names: Kapralov + given-names: Nikolai + - family-names: Molfese + given-names: Peter J - family-names: Falach given-names: Rotem - family-names: Deslauriers-Gauthier @@ -299,10 +349,16 @@ authors: given-names: Steve - family-names: Bierer given-names: Steven - - family-names: Férat - given-names: Victor + - family-names: Papadopoulo + given-names: Theodore + - family-names: Binns + given-names: Thomas Samuel + - family-names: Stenner + given-names: Tristan - family-names: Peterson given-names: Victoria + - family-names: Baratz + given-names: Zvi - family-names: Tonin given-names: Alessandro - family-names: Kovrig @@ -319,16 +375,20 @@ authors: given-names: Christina - family-names: Krzemiński given-names: Dominik - - family-names: Welke - given-names: Dominik - family-names: Makowski given-names: Dominique - family-names: Mikulan given-names: Ezequiel + - family-names: Hofer + given-names: Florian + - family-names: Ritz + given-names: Harrison - family-names: Schiratti given-names: Jean-Baptiste - family-names: Evans given-names: Jen + - family-names: Veillette + given-names: John - family-names: Drew given-names: Jordan - family-names: Teves @@ -337,8 +397,12 @@ authors: given-names: Kyle - family-names: Gwilliams given-names: Laura + - family-names: Lementec + given-names: Laurent - family-names: Varghese given-names: Lenny + - family-names: Hamilton + given-names: Liberty - family-names: Gemein given-names: Lukas - family-names: Hecker @@ -350,26 +414,28 @@ authors: given-names: Matt - family-names: Eberlein given-names: Matthias + - family-names: Žák + given-names: Michal - family-names: Sherif given-names: Mohamed - - family-names: Gerster - given-names: Moritz - family-names: Kozhemiako given-names: Nataliia - family-names: Srinivasan given-names: Naveen - family-names: Wilming given-names: Niklas - - family-names: Chapochnikov - given-names: Nikolai - family-names: Kozynets given-names: Oleh - family-names: Ablin given-names: Pierre + - family-names: Das + given-names: Proloy - family-names: Bertrand given-names: Quentin - family-names: Shoorangiz given-names: Reza + - family-names: Scholz + given-names: Richard - family-names: Hübner given-names: Rodrigo - family-names: Sommariva @@ -378,10 +444,10 @@ authors: given-names: Sena - family-names: Khan given-names: Sheraz - - family-names: Herbst - given-names: Sophie - family-names: Datta given-names: Sumalyo + - family-names: Donoghue + given-names: Thomas - family-names: Jochmann given-names: Thomas - family-names: Merk @@ -390,12 +456,12 @@ authors: given-names: Tod - family-names: Dupré la Tour given-names: Tom - - family-names: Stenner - given-names: Tristan - family-names: NessAiver given-names: Tziona - name: akshay0724 - name: sviter + - family-names: Earle-Richardson + given-names: Aaron - family-names: Hindle given-names: Abram - family-names: Koutsou @@ -406,24 +472,34 @@ authors: given-names: Adina - family-names: Ciok given-names: Alex - - family-names: Gramfort - given-names: Alexandre + - family-names: Lepauvre + given-names: Alex + - family-names: Kiefer + given-names: Alexander + - family-names: Gilbert + given-names: Andy - family-names: Pradhan given-names: Aniket - family-names: Padee given-names: Anna - family-names: Dubarry given-names: Anne-Sophie + - family-names: Collas + given-names: Antoine - family-names: Waniek given-names: Anton Nikolas - family-names: Singhal given-names: Archit - family-names: Rokem given-names: Ariel + - family-names: Pelzer + given-names: Arne - family-names: Hurst given-names: Austin - family-names: Beasley given-names: Ben + - family-names: Aristimunha + given-names: Bruno - family-names: Nicenboim given-names: Bruno - family-names: de la Torre @@ -437,9 +513,11 @@ authors: - family-names: Braboszcz given-names: Claire - family-names: Schad - given-names: Daniel Carlström + given-names: Daniel C - family-names: Hasegan given-names: Daniel + - family-names: Tse + given-names: Daniel - family-names: Sleiter given-names: Darin Erat - family-names: Haslacher @@ -452,8 +530,8 @@ authors: given-names: Desislava - family-names: Issagaliyeva given-names: Dinara - - family-names: Altukhov - given-names: Dmitrii + - family-names: Das + given-names: Diptyajit - family-names: Wetzel given-names: Dominik - family-names: Eich @@ -474,46 +552,63 @@ authors: given-names: Etienne - family-names: Goldstein given-names: Evgeny + - family-names: Mamashli + given-names: Fahimeh + - family-names: Negahbani + given-names: Farzin - family-names: Zamberlan given-names: Federico + - family-names: Pop + given-names: Florin - family-names: Weber given-names: Frederik D - family-names: Tan given-names: Gansheng - family-names: Brookshire given-names: Geoff + - family-names: O'Neill + given-names: George + - name: Giulio + - family-names: Reina + given-names: Gonzalo - family-names: Maymandi given-names: Hamid + - family-names: Arzoo + given-names: Hasrat Ali - family-names: Sonntag given-names: Hermann - family-names: Ye given-names: Hongjiang + - family-names: Shin + given-names: Hyonyoung - family-names: Elmas given-names: Hüseyin Orkun + - family-names: AZZ + given-names: Ilian - family-names: Machairas given-names: Ilias + - family-names: Zubarev + given-names: Ivan + - family-names: de Jong + given-names: Ivo + - family-names: Phelan + given-names: Jacob - family-names: Kaczmarzyk given-names: Jakub - family-names: Zerfowski given-names: Jan - family-names: van den Bosch given-names: Jasper J F - - family-names: Behnke - given-names: Jennifer - family-names: Van Der Donckt given-names: Jeroen - family-names: van der Meer given-names: Johan - family-names: Niediek given-names: Johannes - - family-names: Veillette - given-names: John - family-names: Koen given-names: Josh - family-names: Bear given-names: Joshua J - - family-names: Zhu - given-names: Judy D - family-names: Dammers given-names: Juergen - family-names: Galán @@ -522,20 +617,24 @@ authors: given-names: Julius - family-names: Slama given-names: Katarina + - family-names: Al-Amir + given-names: Katia - family-names: Leinweber given-names: Katrin - family-names: Grabot given-names: Laetitia - family-names: Andersen given-names: Lau Møller + - family-names: Almeida + given-names: Leonardo Rochael - family-names: Barbosa given-names: Leonardo S - - family-names: Hamilton - given-names: Liberty - family-names: Alfine given-names: Lorenzo - family-names: Hejtmánek given-names: Lukáš + - family-names: Balatsko + given-names: Maksym - family-names: Kitzbichler given-names: Manfred - family-names: Kumar @@ -547,10 +646,15 @@ authors: - family-names: Koculak given-names: Marcin - family-names: Henney - given-names: Mark Alexander + given-names: Mark + - family-names: BaBer + given-names: Martin + - family-names: Oberg + given-names: Martin - family-names: van Harmelen given-names: Martin - - name: MartinBaBer + - family-names: Scheltienne + given-names: Mathieu - family-names: Courtemanche given-names: Matt - family-names: Tucker @@ -573,30 +677,41 @@ authors: given-names: Mingjian - family-names: Daneshzand given-names: Mohammad + - name: Mojackhak + - family-names: Fourcaud-Trocmé + given-names: Nicolas - family-names: Gensollen given-names: Nicolas - family-names: Proulx given-names: Nicole + - family-names: Focke + given-names: Niels - family-names: Chalas given-names: Nikolas + - family-names: Markowitz + given-names: Noah - family-names: Shubi given-names: Omer + - family-names: Mainar + given-names: Pablo - family-names: Sundaram given-names: Padma - - family-names: Roujansky - given-names: Paul - family-names: Silva given-names: Pedro - - family-names: Molfese - given-names: Peter J + - family-names: Guetschel + given-names: Pierre - family-names: Li given-names: Quanliang + - family-names: Barthélemy + given-names: Quentin - family-names: Nadkarni given-names: Rahul - family-names: Gatti given-names: Ramiro - family-names: Apariciogarcia given-names: Ramonapariciog + - family-names: Aagaard + given-names: Rasmus - family-names: Nasri given-names: Reza - family-names: Koehler @@ -609,16 +724,24 @@ authors: given-names: Robert - family-names: Schirrmeister given-names: Robin Tibor + - family-names: Jin + given-names: Rongfei + - family-names: Wieske + given-names: Roy Eric - family-names: Law given-names: Ryan - family-names: Pai given-names: Sagun - family-names: Perry given-names: Sam - - family-names: Ruuskanen - given-names: Santeri + - family-names: Louviot + given-names: Samuel + - family-names: Saha + given-names: Sawradip - family-names: Mathot given-names: Sebastiaan + - family-names: Jentschke + given-names: Sebastian - family-names: Major given-names: Sebastian - family-names: Treguer @@ -629,10 +752,18 @@ authors: given-names: Senwen - family-names: Antopolskiy given-names: Sergey + - family-names: Shirazi + given-names: Seyed (Yahya) + - family-names: Keshari + given-names: Shresth + - family-names: Baral + given-names: Shristi - family-names: Wong given-names: Simeon - family-names: Wong given-names: Simeon + - family-names: Hofmann + given-names: Simon M - family-names: Poil given-names: Simon-Shlomo - family-names: Foslien @@ -641,6 +772,8 @@ authors: given-names: Sourav - family-names: Chambon given-names: Stanislas + - family-names: Magnússon + given-names: Steinn Hauser - family-names: Bethard given-names: Steven - family-names: Gutstein @@ -649,9 +782,7 @@ authors: given-names: Svea Marie - family-names: Wang given-names: T - - family-names: Papadopoulo - given-names: Theodore - - family-names: Donoghue + - family-names: Moreau given-names: Thomas - family-names: Radman given-names: Thomas @@ -659,16 +790,34 @@ authors: given-names: Timothy - family-names: Ma given-names: Tom + - family-names: Stone + given-names: Tom - family-names: Clausner given-names: Tommy - family-names: Anijärv given-names: Toomas Erik + - family-names: Kumaravel + given-names: Velu Prabhakar + - family-names: Xu + given-names: Wei + - family-names: Turner + given-names: Will + - family-names: Zuazo + given-names: Xabier de - family-names: Xia given-names: Xiaokai + - family-names: Zuo + given-names: Yiping + - family-names: Shen + given-names: Yixiao - family-names: Zhang given-names: Zhi + - family-names: ZENG + given-names: Ziyi + - name: btkcodedev - name: buildqa - name: luzpaz + - name: user27182 preferred-citation: title: "MEG and EEG Data Analysis with MNE-Python" journal: "Frontiers in Neuroscience" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bec834c7fdb..e653797b3ad 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,5 +5,5 @@ MNE-Python is maintained by a community of scientists and research labs. The pro Users and contributors to MNE-Python are expected to follow our [code of conduct](https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md). -The [contributing guide](https://mne.tools/dev/install/contributing.html) has details on the preferred contribution workflow +The [contributing guide](https://mne.tools/dev/development/contributing.html) has details on the preferred contribution workflow and the recommended system configuration for a smooth contribution/development experience. diff --git a/LICENSE.txt b/LICENSE.txt index 6d98ee83925..d30bf7507fb 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,24 +1,11 @@ -Copyright © 2011-2022, authors of MNE-Python -All rights reserved. +Copyright 2011-2024 MNE-Python authors -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 6c1aa9ff47f..00000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,89 +0,0 @@ -include *.rst -include LICENSE.txt -include SECURITY.md -include requirements.txt -include requirements_base.txt -include requirements_hdf5.txt -include requirements_testing.txt -include requirements_testing_extra.txt -include requirements_doc.txt -include mne/__init__.py - -recursive-include examples *.py -recursive-include examples *.txt -recursive-include tutorials *.py -recursive-include tutorials *.txt - -recursive-include mne *.py -recursive-include mne/data * -recursive-include mne/icons * -recursive-include mne/data/helmets * -recursive-include mne/data/image * -recursive-include mne/data/fsaverage * -include mne/datasets/_fsaverage/root.txt -include mne/datasets/_fsaverage/bem.txt -include mne/datasets/_infant/*.txt -include mne/datasets/_phantom/*.txt -include mne/data/dataset_checksums.txt -include mne/data/eegbci_checksums.txt - -recursive-include mne/html_templates *.html.jinja - -recursive-include mne/channels/data/layouts * -recursive-include mne/channels/data/montages * -recursive-include mne/channels/data/neighbors * - -recursive-include mne/gui/help *.json - -recursive-include mne/html *.js -recursive-include mne/html *.css - -recursive-include mne/report * - -recursive-include mne/io/artemis123/resources * - -recursive-include mne mne/datasets *.csv -include mne/io/edf/gdf_encodes.txt -include mne/datasets/sleep_physionet/SHA1SUMS - -### Exclude - -recursive-exclude examples/MNE-sample-data * -recursive-exclude examples/MNE-testing-data * -recursive-exclude examples/MNE-spm-face * -recursive-exclude examples/MNE-somato-data * -recursive-exclude tools * -exclude tools -exclude Makefile -exclude .coveragerc -exclude *.yml -exclude ignore_words.txt -exclude .mailmap -exclude codemeta.json -exclude CITATION.cff -recursive-exclude mne *.pyc - -recursive-exclude doc * -recursive-exclude logo * - -exclude CONTRIBUTING.md -exclude CODE_OF_CONDUCT.md -exclude .github -exclude .github/CONTRIBUTING.md -exclude .github/ISSUE_TEMPLATE -exclude .github/ISSUE_TEMPLATE/blank.md -exclude .github/ISSUE_TEMPLATE/bug_report.md -exclude .github/ISSUE_TEMPLATE/feature_request.md -exclude .github/PULL_REQUEST_TEMPLATE.md - -# Test files - -recursive-exclude mne/io/tests/data * -recursive-exclude mne/io/besa/tests/data * -recursive-exclude mne/io/bti/tests/data * -recursive-exclude mne/io/edf/tests/data * -recursive-exclude mne/io/kit/tests/data * -recursive-exclude mne/io/brainvision/tests/data * -recursive-exclude mne/io/egi/tests/data * -recursive-exclude mne/io/nicolet/tests/data * -recursive-exclude mne/preprocessing/tests/data * diff --git a/Makefile b/Makefile index a162617cd0a..3ce4eda956d 100644 --- a/Makefile +++ b/Makefile @@ -2,10 +2,9 @@ PYTHON ?= python PYTESTS ?= py.test -CTAGS ?= ctags CODESPELL_SKIPS ?= "doc/_build,doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg,references.bib,*.css,*.edf,*.bdf,*.vhdr" CODESPELL_DIRS ?= mne/ doc/ tutorials/ examples/ -all: clean inplace test test-doc +all: clean test-doc clean-pyc: find . -name "*.pyc" | xargs rm -f @@ -25,15 +24,8 @@ clean-cache: clean: clean-build clean-pyc clean-so clean-ctags clean-cache -in: inplace # just a shortcut -inplace: - $(PYTHON) setup.py build_ext -i - wheel: - $(PYTHON) setup.py sdist bdist_wheel - -wheel_quiet: - $(PYTHON) setup.py -q sdist bdist_wheel + $(PYTHON) -m build -w sample_data: @python -c "import mne; mne.datasets.sample.data_path(verbose=True);" @@ -41,24 +33,6 @@ sample_data: testing_data: @python -c "import mne; mne.datasets.testing.data_path(verbose=True);" -pytest: test - -test: in - rm -f .coverage - $(PYTESTS) -m 'not ultraslowtest' mne - -test-verbose: in - rm -f .coverage - $(PYTESTS) -m 'not ultraslowtest' mne --verbose - -test-fast: in - rm -f .coverage - $(PYTESTS) -m 'not slowtest' mne - -test-full: in - rm -f .coverage - $(PYTESTS) mne - test-no-network: in sudo unshare -n -- sh -c 'MNE_SKIP_NETWORK_TESTS=1 py.test mne' @@ -66,85 +40,23 @@ test-no-testing-data: in @MNE_SKIP_TESTING_DATASET_TESTS=true \ $(PYTESTS) mne -test-no-sample-with-coverage: in testing_data - rm -rf coverage .coverage - $(PYTESTS) --cov=mne --cov-report html:coverage - test-doc: sample_data testing_data - $(PYTESTS) --doctest-modules --doctest-ignore-import-errors --doctest-glob='*.rst' ./doc/ --ignore=./doc/auto_examples --ignore=./doc/auto_tutorials --ignore=./doc/_build --ignore=./doc/conf.py --ignore=doc/sphinxext --fulltrace - -test-coverage: testing_data - rm -rf coverage .coverage - $(PYTESTS) --cov=mne --cov-report html:coverage -# what's the difference with test-no-sample-with-coverage? - -test-mem: in testing_data - ulimit -v 1097152 && $(PYTESTS) mne + $(PYTESTS) --tb=short --cov=mne --cov-report=xml --cov-branch --doctest-modules --doctest-ignore-import-errors --doctest-glob='*.rst' ./doc/ --ignore=./doc/auto_examples --ignore=./doc/auto_tutorials --ignore=./doc/_build --ignore=./doc/conf.py --ignore=doc/sphinxext --fulltrace -trailing-spaces: - find . -name "*.py" | xargs perl -pi -e 's/[ \t]*$$//' +pre-commit: + @pre-commit run -a --show-diff-on-failure -ctags: - # make tags for symbol based navigation in emacs and vim - # Install with: sudo apt-get install exuberant-ctags - $(CTAGS) -R * - -upload-pipy: - python setup.py sdist bdist_egg register upload - -flake: - @if command -v flake8 > /dev/null; then \ - echo "Running flake8"; \ - flake8 --count; \ - else \ - echo "flake8 not found, please install it!"; \ - exit 1; \ - fi; - @echo "flake8 passed" +# Aliases for stuff we used to support or users might think of +ruff: pre-commit +flake: pre-commit +pep: pre-commit codespell: # running manually @codespell --builtin clear,rare,informal,names,usage -w -i 3 -q 3 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt --uri-ignore-words-list=bu $(CODESPELL_DIRS) -codespell-error: # running on travis - @codespell --builtin clear,rare,informal,names,usage -i 0 -q 7 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt --uri-ignore-words-list=bu $(CODESPELL_DIRS) - -pydocstyle: - @echo "Running pydocstyle" - @pydocstyle mne - -docstring: - @echo "Running docstring tests" - @$(PYTESTS) --doctest-modules mne/tests/test_docstring_parameters.py - -check-manifest: - check-manifest -q --ignore .circleci/config.yml,doc,logo,mne/io/*/tests/data*,mne/io/tests/data,mne/preprocessing/tests/data,.DS_Store,mne/_version.py - -check-readme: clean wheel_quiet +check-readme: clean wheel twine check dist/* nesting: @echo "Running import nesting tests" @$(PYTESTS) mne/tests/test_import_nesting.py - -pep: - @$(MAKE) -k flake pydocstyle docstring codespell-error check-manifest nesting check-readme - -manpages: - @echo "I: generating manpages" - set -e; mkdir -p _build/manpages && \ - cd bin && for f in mne*; do \ - descr=$$(grep -h -e "^ *'''" -e 'DESCRIP =' $$f -h | sed -e "s,.*' *\([^'][^']*\)'.*,\1,g" | head -n 1); \ - PYTHONPATH=../ \ - help2man -n "$$descr" --no-discard-stderr --no-info --version-string "$(uver)" ./$$f \ - >| ../_build/manpages/$$f.1; \ - done - -build-doc-dev: - cd doc; make clean - cd doc; DISPLAY=:1.0 xvfb-run -n 1 -s "-screen 0 1280x1024x24 -noreset -ac +extension GLX +render" make html_dev - -build-doc-stable: - cd doc; make clean - cd doc; DISPLAY=:1.0 xvfb-run -n 1 -s "-screen 0 1280x1024x24 -noreset -ac +extension GLX +render" make html_stable - -docstyle: pydocstyle diff --git a/README.rst b/README.rst index d94f19ad8a0..150dd3107b8 100644 --- a/README.rst +++ b/README.rst @@ -1,177 +1,144 @@ .. -*- mode: rst -*- -|PyPI|_ |conda-forge|_ |Zenodo|_ |Discourse|_ |Codecov|_ - -|MNE|_ - -.. |PyPI| image:: https://img.shields.io/pypi/dm/mne.svg?label=PyPI -.. _PyPI: https://pypi.org/project/mne/ - -.. |conda-forge| image:: https://img.shields.io/conda/dn/conda-forge/mne.svg?label=Conda -.. _conda-forge: https://anaconda.org/conda-forge/mne - -.. |Zenodo| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.592483.svg -.. _Zenodo: https://doi.org/10.5281/zenodo.592483 - -.. |Discourse| image:: https://img.shields.io/discourse/status?label=Community&server=https%3A%2F%2Fmne.discourse.group%2F -.. _Discourse: https://mne.discourse.group/ - -.. |Codecov| image:: https://img.shields.io/codecov/c/github/mne-tools/mne-python?label=Coverage -.. _Codecov: https://codecov.io/gh/mne-tools/mne-python - -.. |MNE| image:: https://mne.tools/stable/_static/mne_logo.svg -.. _MNE: https://mne.tools/dev/ - +|MNE| MNE-Python ========== -`MNE-Python software`_ is an open-source Python package for exploring, +MNE-Python is an open-source Python package for exploring, visualizing, and analyzing human neurophysiological data such as MEG, EEG, sEEG, ECoG, and more. It includes modules for data input/output, preprocessing, visualization, source estimation, time-frequency analysis, connectivity analysis, -machine learning, and statistics. +machine learning, statistics, and more. Documentation ^^^^^^^^^^^^^ -`MNE documentation`_ for MNE-Python is available online. +`Documentation`_ for MNE-Python encompasses installation instructions, tutorials, +and examples for a wide variety of topics, contributing guidelines, and an API +reference. -Installing MNE-Python -^^^^^^^^^^^^^^^^^^^^^ +Forum +^^^^^^ -To install the latest stable version of MNE-Python, you can use pip_ in a terminal: +The `user forum`_ is the best place to ask questions about MNE-Python usage or +the contribution process. The forum also features job opportunities and other +announcements. -.. code-block:: console +If you find a bug or have an idea for a new feature that should be added to +MNE-Python, please use the +`issue tracker `__ of +our GitHub repository. + + +Installation +^^^^^^^^^^^^ + +To install the latest stable version of MNE-Python with minimal dependencies +only, use pip_ in a terminal: - $ pip install -U mne +.. code-block:: console -- MNE-Python 0.17 was the last release to support Python 2.7 -- MNE-Python 0.18 requires Python 3.5 or higher -- MNE-Python 0.21 requires Python 3.6 or higher -- MNE-Python 0.24 requires Python 3.7 or higher -- MNE-Python 1.4 requires Python 3.8 or higher + $ pip install --upgrade mne -For more complete instructions and more advanced installation methods (e.g. for -the latest development version), see the `installation guide`_. +For more complete instructions, including our standalone installers and more +advanced installation methods, please refer to the `installation guide`_. -Get the latest code -^^^^^^^^^^^^^^^^^^^ +Get the development version +^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To install the latest version of the code using pip_ open a terminal and type: +To install the latest development version of MNE-Python using pip_, open a +terminal and type: .. code-block:: console - $ pip install -U https://github.com/mne-tools/mne-python/archive/main.zip + $ pip install --upgrade https://github.com/mne-tools/mne-python/archive/refs/heads/main.zip -To get the latest code using `git `__, open a terminal and type: +To clone the repository with `git `__, open a terminal +and type: .. code-block:: console $ git clone https://github.com/mne-tools/mne-python.git -Alternatively, you can also download a -`zip file of the latest development version `__. - Dependencies ^^^^^^^^^^^^ The minimum required dependencies to run MNE-Python are: -- Python >= 3.8 -- NumPy >= 1.20.2 -- SciPy >= 1.6.3 -- Matplotlib >= 3.4.0 -- pooch >= 1.5 -- tqdm -- Jinja2 -- decorator - -For full functionality, some functions require: - -- Scikit-learn >= 0.24.2 -- joblib >= 0.15 (for parallelization control) -- mne-qt-browser >= 0.1 (for fast raw data visualization) -- Qt5 >= 5.12 via one of the following bindings (for fast raw data visualization and interactive 3D visualization): - - - PyQt6 >= 6.0 - - PySide6 >= 6.0 - - PyQt5 >= 5.12 - - PySide2 >= 5.12 - -- Numba >= 0.53.1 -- NiBabel >= 3.2.1 -- OpenMEEG >= 2.5.5 -- Pandas >= 1.2.4 -- Picard >= 0.3 -- CuPy >= 9.0.0 (for NVIDIA CUDA acceleration) -- DIPY >= 1.4.0 -- Imageio >= 2.8.0 -- PyVista >= 0.32 (for 3D visualization) -- pyvistaqt >= 0.4 (for 3D visualization) -- mffpy >= 0.5.7 -- h5py -- h5io -- pymatreader - -Contributing to MNE-Python -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Please see the documentation on the MNE-Python homepage: - -https://mne.tools/dev/install/contributing.html +.. ↓↓↓ BEGIN CORE DEPS LIST. DO NOT EDIT! HANDLED BY PRE-COMMIT HOOK ↓↓↓ +- `Python `__ ≥ 3.10 +- `NumPy `__ ≥ 1.25 +- `SciPy `__ ≥ 1.11 +- `Matplotlib `__ ≥ 3.7 +- `Pooch `__ ≥ 1.5 +- `tqdm `__ +- `Jinja2 `__ +- `decorator `__ +- `lazy-loader `__ ≥ 0.3 +- `packaging `__ -Forum -^^^^^^ +.. ↑↑↑ END CORE DEPS LIST. DO NOT EDIT! HANDLED BY PRE-COMMIT HOOK ↑↑↑ -https://mne.discourse.group +Contributing +^^^^^^^^^^^^ +Please see the `contributing guidelines `__ on our documentation website. -Licensing -^^^^^^^^^ -MNE-Python is **BSD-licenced** (BSD-3-Clause): +About +^^^^^ - This software is OSI Certified Open Source Software. - OSI Certified is a certification mark of the Open Source Initiative. ++---------+------------+----------------+ +| CI | |Codecov| | |Bandit| | ++---------+------------+----------------+ +| Package | |PyPI| | |conda-forge| | ++---------+------------+----------------+ +| Docs | |Docs| | |Discourse| | ++---------+------------+----------------+ +| Meta | |Zenodo| | |OpenSSF| | ++---------+------------+----------------+ - Copyright (c) 2011-2022, authors of MNE-Python. - All rights reserved. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: +License +^^^^^^^ - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. +MNE-Python is licensed under the BSD-3-Clause license. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the names of MNE-Python authors nor the names of any - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +.. _Documentation: https://mne.tools/dev/ +.. _user forum: https://mne.discourse.group +.. _installation guide: https://mne.tools/dev/install/index.html +.. _pip: https://pip.pypa.io/en/stable/ - **This software is provided by the copyright holders and contributors - "as is" and any express or implied warranties, including, but not - limited to, the implied warranties of merchantability and fitness for - a particular purpose are disclaimed. In no event shall the copyright - owner or contributors be liable for any direct, indirect, incidental, - special, exemplary, or consequential damages (including, but not - limited to, procurement of substitute goods or services; loss of use, - data, or profits; or business interruption) however caused and on any - theory of liability, whether in contract, strict liability, or tort - (including negligence or otherwise) arising in any way out of the use - of this software, even if advised of the possibility of such - damage.** +.. |PyPI| image:: https://img.shields.io/pypi/dm/mne.svg?label=PyPI + :target: https://pypi.org/project/mne/ +.. |conda-forge| image:: https://img.shields.io/conda/dn/conda-forge/mne.svg?label=Conda + :target: https://anaconda.org/conda-forge/mne -.. _MNE-Python software: https://mne.tools/dev/ -.. _MNE documentation: https://mne.tools/dev/overview/index.html -.. _installation guide: https://mne.tools/dev/install/index.html -.. _pip: https://pip.pypa.io/en/stable/ +.. |Docs| image:: https://img.shields.io/badge/Docs-online-green?label=Documentation + :target: https://mne.tools/dev/ + +.. |Zenodo| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.592483.svg + :target: https://doi.org/10.5281/zenodo.592483 + +.. |Discourse| image:: https://img.shields.io/discourse/status?label=Forum&server=https%3A%2F%2Fmne.discourse.group%2F + :target: https://mne.discourse.group/ + +.. |Codecov| image:: https://img.shields.io/codecov/c/github/mne-tools/mne-python?label=Coverage + :target: https://codecov.io/gh/mne-tools/mne-python + +.. |Bandit| image:: https://img.shields.io/badge/Security-Bandit-yellow.svg + :target: https://github.com/PyCQA/bandit + +.. |OpenSSF| image:: https://www.bestpractices.dev/projects/7783/badge + :target: https://www.bestpractices.dev/projects/7783 + +.. |MNE| image:: https://mne.tools/dev/_static/mne_logo_gray.svg + :target: https://mne.tools/dev/ diff --git a/SECURITY.md b/SECURITY.md index 8ca1465708b..f8b0ce3d50a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -10,9 +10,9 @@ without a proper 6-month deprecation cycle. | Version | Supported | | ------- | ------------------------ | -| 1.4.x | :heavy_check_mark: (dev) | -| 1.3.x | :heavy_check_mark: | -| < 1.3 | :x: | +| 1.11.x | :heavy_check_mark: (dev) | +| 1.10.x | :heavy_check_mark: | +| < 1.10 | :x: | ## Reporting a Vulnerability @@ -21,7 +21,7 @@ recorded with a variety of devices/modalities (EEG, MEG, ECoG, fNIRS, etc). It is not expected that using MNE-Python will lead to security vulnerabilities under normal use cases (i.e., running without administrator privileges). However, if you think you have found a security vulnerability -in MNE-Python, **please do not report it as a GitHub issue**, in order to +in MNE-Python, **please do not report it as a GitHub issue**, in order to keep the vulnerability confidential. Instead, please report it to mne-core-dev-team@groups.io and include a description and proof-of-concept that is [short and self-contained](http://www.sscce.org/). diff --git a/azure-pipelines.yml b/azure-pipelines.yml index bedd0082506..474cdf326aa 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,6 +1,6 @@ trigger: # start a new build for every push - batch: False + batch: false branches: include: - 'main' @@ -13,367 +13,290 @@ pr: stages: -- stage: Check - jobs: - - job: Skip - pool: - vmImage: 'ubuntu-latest' - variables: - DECODE_PERCENTS: 'false' - RET: 'true' - BUILD_REASON: $(Build.Reason) - steps: - - bash: | - git_log=`git log --format=oneline -n 1 --skip=1` - echo "##vso[task.setvariable variable=log]$git_log" - - bash: echo "##vso[task.setvariable variable=RET]false" - condition: and(eq(variables.BUILD_REASON, 'PullRequest'), or(contains(variables.log, '[skip azp]'), contains(variables.log, '[azp skip]'), contains(variables.log, '[skip ci]'), contains(variables.log, '[ci skip]'))) - - bash: echo "##vso[task.setvariable variable=start_main;isOutput=true]$RET" - name: result + - stage: Check + jobs: + - job: Skip + pool: + vmImage: 'ubuntu-latest' + variables: + DECODE_PERCENTS: 'false' + RET: 'true' + BUILD_REASON: $(Build.Reason) + steps: + - bash: | + git_log=`git log --format=oneline -n 1 --skip=1` + echo "##vso[task.setvariable variable=log]$git_log" + - bash: echo "##vso[task.setvariable variable=RET]false" + condition: and(eq(variables.BUILD_REASON, 'PullRequest'), or(contains(variables.log, '[skip azp]'), contains(variables.log, '[azp skip]'), contains(variables.log, '[skip ci]'), contains(variables.log, '[ci skip]'))) + - bash: echo "##vso[task.setvariable variable=start_main;isOutput=true]$RET" + name: result -- stage: Style - variables: - AZURE_CI: 'true' - jobs: - - job: All - pool: - vmImage: 'ubuntu-latest' + - stage: Style variables: - PYTHON_VERSION: '3.9' - PYTHON_ARCH: 'x64' - steps: - - bash: echo $(COMMIT_MSG) - - task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - architecture: $(PYTHON_ARCH) - addToPath: true - displayName: 'Get Python' - - bash: | - set -e - python -m pip install --progress-bar off --upgrade pip setuptools wheel - python -m pip install --progress-bar off -r requirements_base.txt -r requirements_hdf5.txt -r requirements_testing.txt - displayName: Install dependencies - - bash: | - make flake - displayName: make flake - - bash: | - make codespell-error - displayName: make codespell - - bash: | - make pydocstyle - displayName: make pydocstyle - condition: always() - - bash: | - make docstring - displayName: make docstring - condition: always() - - bash: | - make nesting - displayName: make nesting - condition: always() - - bash: | - make check-manifest - displayName: make check-manifest - condition: always() - - bash: | - make check-readme - displayName: make check-readme - condition: always() + AZURE_CI: 'true' + jobs: + - job: All + pool: + vmImage: 'ubuntu-latest' + variables: + PYTHON_VERSION: '3.11' + PYTHON_ARCH: 'x64' + steps: + - bash: echo $(COMMIT_MSG) + - task: UsePythonVersion@0 + inputs: + versionSpec: $(PYTHON_VERSION) + architecture: $(PYTHON_ARCH) + addToPath: true + displayName: 'Get Python' + - bash: | + set -eo pipefail + python -m pip install --progress-bar off --upgrade pip build + python -m pip install --progress-bar off -ve .[hdf5] --group=test + python -m pip uninstall -yq pytest-qt # don't want to set up display, etc. for this + pre-commit install --install-hooks + displayName: Install dependencies + - bash: | + make pre-commit + displayName: make pre-commit + condition: always() + - bash: | + make nesting + displayName: make nesting + condition: always() + - bash: | + make check-readme + displayName: make check-readme + condition: always() + - bash: mypy + displayName: mypy + condition: always() + - bash: vulture + displayName: vulture + condition: always() -- stage: Test - condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) - dependsOn: ['Style', 'Check'] - variables: - AZURE_CI: 'true' - jobs: - - job: Ultraslow_PG - pool: - vmImage: 'ubuntu-20.04' - variables: - DISPLAY: ':99' - OPENBLAS_NUM_THREADS: '1' - steps: - - bash: | - set -e - ./tools/setup_xvfb.sh - sudo apt install -yq tcsh - displayName: 'Install Ubuntu dependencies' - - bash: | - source tools/get_minimal_commands.sh - displayName: 'Install minimal commands' - - bash: | - echo $PATH - mne_surf2bem --version - fsl_rigid_register --version - displayName: 'Test minimal commands' - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.9' - architecture: 'x64' - addToPath: true - displayName: 'Get Python' - - bash: | - set -e - python -m pip install --progress-bar off --upgrade pip setuptools wheel codecov - python -m pip install --progress-bar off mne-qt-browser[opengl] pyvista scikit-learn pytest-error-for-skips python-picard "PySide6!=6.3.0,!=6.4.0,!=6.4.0.1" qtpy - python -m pip uninstall -yq mne - python -m pip install --progress-bar off --upgrade -e .[test] - displayName: 'Install dependencies with pip' - - bash: | - set -e - mne sys_info -pd - mne sys_info -pd | grep "qtpy .*(PySide6=.*)$" - displayName: Print config - # Uncomment if "xcb not found" Qt errors/segfaults come up again - # - bash: | - # set -e - # LD_DEBUG=libs python -c "from PySide6.QtWidgets import QApplication, QWidget; app = QApplication([]); import matplotlib; matplotlib.use('QtAgg'); import matplotlib.pyplot as plt; plt.figure()" - - bash: source tools/get_testing_version.sh - displayName: 'Get testing version' - - task: Cache@2 - inputs: - key: $(testing_version) - path: /home/vsts/mne_data - displayName: 'Cache testing data' - - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" - displayName: 'Get test data' - - script: pytest --error-for-skips -m "ultraslowtest or pgtest" --tb=short --cov=mne --cov-report=xml --cov-report=html -vv mne - displayName: 'slow and mne-qt-browser tests' - # Coverage - - bash: bash <(curl -s https://codecov.io/bash) - displayName: 'Codecov' - condition: succeededOrFailed() - - task: PublishTestResults@2 - inputs: - testResultsFiles: '**/junit-*.xml' - testRunTitle: 'Publish test results for $(Agent.JobName)' - failTaskOnFailedTests: true - condition: succeededOrFailed() - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' - reportDirectory: '$(System.DefaultWorkingDirectory)/**/htmlcov' - - job: Qt - pool: - vmImage: 'ubuntu-20.04' + - stage: Test + condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) + dependsOn: ['Style', 'Check'] variables: - DISPLAY: ':99' - OPENBLAS_NUM_THREADS: '1' - TEST_OPTIONS: "--tb=short --cov=mne --cov-report=xml --cov-report=html --cov-append -vv mne/viz/_brain mne/viz/backends mne/viz/tests/test_evoked.py mne/gui mne/report" - steps: - - bash: ./tools/setup_xvfb.sh - displayName: 'Install Ubuntu dependencies' - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.10' - architecture: 'x64' - addToPath: true - displayName: 'Get Python' - - bash: | - set -e - python -m pip install --progress-bar off --upgrade pip setuptools wheel - python -m pip install --progress-bar off --upgrade --pre --only-binary=\"numpy,scipy,matplotlib,vtk\" numpy scipy matplotlib vtk - python -c "import vtk" - python -m pip install --progress-bar off --upgrade -r requirements.txt -r requirements_testing.txt -r requirements_testing_extra.txt codecov - python -m pip install -e . - displayName: 'Install dependencies with pip' - - bash: | - set -e - which mne - mne sys_info -pd - python ./tools/check_mne_location.py - displayName: Print config - - bash: source tools/get_testing_version.sh - displayName: 'Get testing version' - - task: Cache@2 - inputs: - key: $(testing_version) - path: /home/vsts/mne_data - displayName: 'Cache testing data' - - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" - displayName: 'Get test data' - - bash: | - set -e - mne sys_info -pd - mne sys_info -pd | grep "qtpy .* (PySide6=.*)$" - pytest -m "not slowtest" ${TEST_OPTIONS} - python -m pip uninstall -yq PySide6 - displayName: 'PySide6' - - bash: | - set -e - python -m pip install PyQt6 - mne sys_info -pd - mne sys_info -pd | grep "qtpy .* (PyQt6=.*)$" - pytest -m "not slowtest" ${TEST_OPTIONS} - python -m pip uninstall -yq PyQt6 PyQt6-sip PyQt6-Qt6 - displayName: 'PyQt6' - - bash: | - set -e - python -m pip install PySide2 - mne sys_info -pd - mne sys_info -pd | grep "qtpy .* (PySide2=.*)$" - pytest -m "not slowtest" ${TEST_OPTIONS} - python -m pip uninstall -yq PySide2 - displayName: 'PySide2' - # PyQt5 leaves cruft behind, so run it last - - bash: | - set -e - python -m pip install PyQt5 - mne sys_info -pd - mne sys_info -pd | grep "qtpy .* (PyQt5=.*)$" - pytest -m "not slowtest" ${TEST_OPTIONS} - python -m pip uninstall -yq PyQt5 PyQt5-sip PyQt5-Qt5 - displayName: 'PyQt5' - # Coverage - - bash: bash <(curl -s https://codecov.io/bash) - displayName: 'Codecov' - condition: succeededOrFailed() - - task: PublishTestResults@2 - inputs: - testResultsFiles: '**/junit-*.xml' - testRunTitle: 'Publish test results for $(Agent.JobName)' - failTaskOnFailedTests: true - condition: succeededOrFailed() - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' - reportDirectory: '$(System.DefaultWorkingDirectory)/**/htmlcov' + AZURE_CI: 'true' + jobs: + - job: Ultraslow_PG + pool: + vmImage: 'ubuntu-22.04' + variables: + DISPLAY: ':99' + OPENBLAS_NUM_THREADS: '1' + OMP_NUM_THREADS: '1' + MNE_TEST_ALLOW_SKIP: '^.*(PySide6 causes segfaults).*$' + MNE_BROWSER_PRECOMPUTE: 'false' + steps: + - bash: | + set -e + ./tools/setup_xvfb.sh + sudo apt install -yq tcsh + displayName: 'Install Ubuntu dependencies' + - bash: | + source tools/get_minimal_commands.sh + displayName: 'Install minimal commands' + - bash: | + echo $PATH + mne_surf2bem --version + fsl_rigid_register --version + displayName: 'Test minimal commands' + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.12' + architecture: 'x64' + addToPath: true + displayName: 'Get Python' + - bash: | + set -e + python -m pip install --progress-bar off --upgrade pip + python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git" pyvista scikit-learn python-picard qtpy nibabel sphinx-gallery "PySide6!=6.8.0,!=6.8.0.1,!=6.8.1.1,!=6.9.1" pandas neo pymatreader antio defusedxml curryreader + python -m pip uninstall -yq mne + python -m pip install --progress-bar off --upgrade -e . --group=test + displayName: 'Install dependencies with pip' + - bash: | + set -e + mne sys_info -pd + mne sys_info -pd | grep "qtpy .*(PySide6=.*)$" + displayName: Print config + - bash: | + set -e + LD_DEBUG=libs python -c "from PySide6.QtWidgets import QApplication, QWidget; app = QApplication([]); import matplotlib; matplotlib.use('QtAgg'); import matplotlib.pyplot as plt; plt.figure()" + - bash: source tools/get_testing_version.sh + displayName: 'Get testing version' + - task: Cache@2 + inputs: + key: $(testing_version) + path: /home/vsts/mne_data + displayName: 'Cache testing data' + - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" + displayName: 'Get test data' + - script: pytest -m "ultraslowtest or pgtest" --tb=short --cov=mne --cov-report=xml -vv mne + displayName: 'slow and mne-qt-browser tests' + # Coverage + - bash: bash <(curl -s https://codecov.io/bash) + displayName: 'Codecov' + condition: succeededOrFailed() + - task: PublishTestResults@2 + inputs: + testResultsFiles: '**/junit-*.xml' + testRunTitle: 'Publish test results for $(Agent.JobName)' + failTaskOnFailedTests: true + condition: succeededOrFailed() + - task: PublishCodeCoverageResults@2 + inputs: + summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' - - job: Windows - pool: - vmImage: 'windows-latest' - variables: - MNE_LOGGING_LEVEL: 'warning' - MNE_FORCE_SERIAL: 'true' - OPENBLAS_NUM_THREADS: 1 - MKL_NUM_THREADS: 2 - OMP_DYNAMIC: 'false' - MKL_DYNAMIC: 'false' - PYTHONUNBUFFERED: 1 - PYTHONIOENCODING: 'utf-8' - AZURE_CI_WINDOWS: 'true' - PYTHON_ARCH: 'x64' - timeoutInMinutes: 70 - strategy: - maxParallel: 4 - matrix: - 3.10 conda: - PLATFORM: 'x86-64' - TEST_MODE: 'conda' - PYTHON_VERSION: '3.10' - 3.9 pip: - TEST_MODE: 'pip' - PYTHON_VERSION: '3.9' - 3.10 pip pre: - TEST_MODE: 'pip-pre' - PYTHON_VERSION: '3.10' - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - architecture: $(PYTHON_ARCH) - addToPath: true - condition: in(variables['TEST_MODE'], 'pip', 'pip-pre') - displayName: 'Get Python' - # https://docs.microsoft.com/en-us/azure/devops/pipelines/ecosystems/anaconda - # https://github.com/MicrosoftDocs/pipelines-anaconda - # https://github.com/ContinuumIO/anaconda-issues/issues/10949 - - script: | - echo "##vso[task.prependpath]%CONDA%;%CONDA%\condabin;%CONDA%\Scripts;%CONDA%\Library\bin;%PROGRAMFILES%\Git\bin;%SystemRoot%\system32;%SystemRoot%;%SystemRoot%\System32\Wbem;%PROGRAMFILES%\Git\usr\bin" - condition: in(variables['TEST_MODE'], 'conda') - displayName: Add conda to PATH, deal with Qt linking bug - - bash: | - set -e - git clone --depth 1 https://github.com/pyvista/gl-ci-helpers.git - powershell gl-ci-helpers/appveyor/install_opengl.ps1 - displayName: Install OpenGL - - bash: | - set -e - ./tools/azure_dependencies.sh - condition: in(variables['TEST_MODE'], 'pip', 'pip-pre') - displayName: Install dependencies with pip - - script: conda install -c conda-forge mamba - condition: eq(variables['TEST_MODE'], 'conda') - displayName: Get mamba - - script: mamba env update --name base --file environment.yml - condition: eq(variables['TEST_MODE'], 'conda') - displayName: Setup MNE environment - # ipympl is not tested on Windows and even its installation interferes - # with basic matplotlib functionality so it must be uninstalled until fixed - - bash: | - set -e - mamba remove -c conda-forge --force -yq mne ipympl - rm /c/Miniconda/Scripts/mne.exe - condition: eq(variables['TEST_MODE'], 'conda') - displayName: Remove old MNE - - script: pip install -e . - displayName: 'Install MNE-Python dev' - - script: pip install --progress-bar off -e .[test] codecov - condition: eq(variables['TEST_MODE'], 'conda') - displayName: Install testing requirements - - script: mne sys_info -pd - displayName: 'Print config' - - script: python -c "import numpy; numpy.show_config()" - displayName: Print NumPy config - - bash: source tools/get_testing_version.sh - displayName: 'Get testing version' - - task: Cache@2 - inputs: - key: $(testing_version) - path: C:\Users\VssAdministrator\mne_data - displayName: 'Cache testing data' - - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" - displayName: 'Get test data' - - script: pytest -m "not (slowtest or pgtest)" --tb=short --cov=mne --cov-report=xml --cov-report=html -vv mne - displayName: 'Run tests' - - bash: bash <(curl -s https://codecov.io/bash) - displayName: 'Codecov' - condition: succeededOrFailed() - - task: PublishTestResults@2 - inputs: - testResultsFiles: '**/junit-*.xml' - testRunTitle: 'Publish test results for $(Agent.JobName) $(TEST_MODE) $(PYTHON_VERSION)' - failTaskOnFailedTests: true - condition: succeededOrFailed() - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' - reportDirectory: '$(System.DefaultWorkingDirectory)/**/htmlcov' + - job: Qt + pool: + vmImage: 'ubuntu-22.04' + variables: + DISPLAY: ':99' + OPENBLAS_NUM_THREADS: '1' + TEST_OPTIONS: "--tb=short --cov=mne --cov-report=xml --cov-append -vv mne/viz/_brain mne/viz/backends mne/viz/tests/test_evoked.py mne/gui mne/report" + MNE_TEST_ALLOW_SKIP: '^.*(PySide6 causes segfaults).*$' + steps: + - bash: ./tools/setup_xvfb.sh + displayName: 'Install Ubuntu dependencies' + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.10' + architecture: 'x64' + addToPath: true + displayName: 'Get Python' + - bash: | + set -e + python -m pip install --progress-bar off --upgrade pip + python -m pip install --progress-bar off --upgrade --pre --only-binary=\"numpy,scipy,matplotlib,vtk\" numpy scipy matplotlib vtk + python -c "import vtk" + python -m pip install --progress-bar off --upgrade -ve .[full] --group=test_extra + displayName: 'Install dependencies with pip' + - bash: | + set -e + which mne + mne sys_info -pd + python ./tools/check_mne_location.py + displayName: Print config + - bash: source tools/get_testing_version.sh + displayName: 'Get testing version' + - task: Cache@2 + inputs: + key: $(testing_version) + path: /home/vsts/mne_data + displayName: 'Cache testing data' + - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" + displayName: 'Get test data' + - bash: | + set -eo pipefail + python -m pip install PyQt6 + LD_DEBUG=libs python -c "from PyQt6.QtWidgets import QApplication, QWidget; app = QApplication([]); import matplotlib; matplotlib.use('QtAgg'); import matplotlib.pyplot as plt; plt.figure()" + displayName: 'Check Qt import' + - bash: | + set -eo pipefail + mne sys_info -pd + mne sys_info -pd | grep "qtpy .* (PyQt6=.*)$" + PYTEST_QT_API=PyQt6 pytest -m "not ultraslowtest" ${TEST_OPTIONS} + python -m pip uninstall -yq PyQt6 PyQt6-sip PyQt6-Qt6 + displayName: 'PyQt6' + - bash: | + set -eo pipefail + python -m pip install "PySide6!=6.8.0,!=6.8.0.1,!=6.9.1" + mne sys_info -pd + mne sys_info -pd | grep "qtpy .* (PySide6=.*)$" + PYTEST_QT_API=PySide6 pytest -m "not ultraslowtest" ${TEST_OPTIONS} + python -m pip uninstall -yq PySide6 + displayName: 'PySide6' + # PyQt5 leaves cruft behind, so run it last + - bash: | + set -eo pipefail + python -m pip install PyQt5 + mne sys_info -pd + mne sys_info -pd | grep "qtpy .* (PyQt5=.*)$" + PYTEST_QT_API=PyQt5 pytest -m "not ultraslowtest" ${TEST_OPTIONS} + python -m pip uninstall -yq PyQt5 PyQt5-sip PyQt5-Qt5 + displayName: 'PyQt5' + # Coverage + - bash: bash <(curl -s https://codecov.io/bash) + displayName: 'Codecov' + condition: succeededOrFailed() + - task: PublishTestResults@2 + inputs: + testResultsFiles: '**/junit-*.xml' + testRunTitle: 'Publish test results for $(Agent.JobName)' + failTaskOnFailedTests: true + condition: succeededOrFailed() + - task: PublishCodeCoverageResults@2 + inputs: + summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' - - job: SphinxWindows - pool: - vmImage: 'windows-latest' - variables: - AZURE_CI_WINDOWS: 'true' - steps: - - bash: | - set -e - git clone --depth 1 https://github.com/pyvista/gl-ci-helpers.git - powershell gl-ci-helpers/appveyor/install_opengl.ps1 - displayName: Install OpenGL - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.10' - - bash: | - set -eo pipefail - PYTHONUTF8=1 pip install --progress-bar off -r requirements.txt -r requirements_doc.txt - displayName: Install documentation dependencies - - script: pip install -e . - displayName: Install dev MNE - - script: mne sys_info -pd - displayName: Print config and test access to commands - - script: python -c "import numpy; numpy.show_config()" - displayName: Print NumPy config - - bash: | - set -eo pipefail - sed -i 's/.. graphviz::/.. graphviz/g' doc/install/contributing.rst - sed -i 's/.. graphviz::/.. graphviz/g' tutorials/preprocessing/40_artifact_correction_ica.py - sed -i '/sphinx\.ext\.graphviz/d' doc/conf.py - displayName: Skip graph that we cannot render - - bash: make -C doc html_dev-noplot - displayName: 'Build doc' + - job: Windows + pool: + vmImage: 'windows-latest' + variables: + MNE_LOGGING_LEVEL: 'warning' + MNE_FORCE_SERIAL: 'true' + OPENBLAS_NUM_THREADS: '2' + OMP_DYNAMIC: 'false' + PYTHONUNBUFFERED: 1 + PYTHONIOENCODING: 'utf-8' + AZURE_CI_WINDOWS: 'true' + PYTHON_ARCH: 'x64' + timeoutInMinutes: 95 + strategy: + maxParallel: 4 + matrix: + 3.10 pip: + TEST_MODE: 'pip' + PYTHON_VERSION: '3.10' + 3.13 pip pre: + TEST_MODE: 'pip-pre' + PYTHON_VERSION: '3.13' + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: $(PYTHON_VERSION) + architecture: $(PYTHON_ARCH) + addToPath: true + displayName: 'Get Python' + - bash: | + set -eo pipefail + git clone --depth 1 https://github.com/pyvista/setup-headless-display-action.git + MESA3D_VERSION=24.3.0 bash setup-headless-display-action/windows/install_opengl.sh + displayName: Install OpenGL + - bash: ./tools/azure_dependencies.sh + displayName: Install dependencies with pip + - script: pip install -e . + displayName: 'Install MNE-Python dev' + - script: mne sys_info -pd + displayName: 'Print config' + - script: python -c "import numpy; numpy.show_config()" + displayName: Print NumPy config + - script: python -c "import numpy; import scipy.linalg; import sklearn.neighbors; from threadpoolctl import threadpool_info; from pprint import pprint; pprint(threadpool_info())" + displayName: Print threadpoolctl info + - bash: source tools/get_testing_version.sh + displayName: 'Get testing version' + - task: Cache@2 + inputs: + key: $(testing_version) + path: C:\Users\VssAdministrator\mne_data + displayName: 'Cache testing data' + - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" + displayName: 'Get test data' + - script: pytest -m "not (slowtest or pgtest)" --tb=short --cov=mne --cov-report=xml -vv mne + displayName: 'Run tests' + - bash: bash <(curl -s https://codecov.io/bash) + displayName: 'Codecov' + condition: succeededOrFailed() + - task: PublishTestResults@2 + inputs: + testResultsFiles: '**/junit-*.xml' + testRunTitle: 'Publish test results for $(Agent.JobName) $(TEST_MODE) $(PYTHON_VERSION)' + failTaskOnFailedTests: true + condition: succeededOrFailed() + - task: PublishCodeCoverageResults@2 + inputs: + summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' diff --git a/codecov.yml b/codecov.yml index a011f80f1c6..408f379cd31 100644 --- a/codecov.yml +++ b/codecov.yml @@ -4,7 +4,7 @@ github_checks: # too noisy, even though "a" interactively disables them codecov: notify: - require_ci_to_pass: no + require_ci_to_pass: false coverage: status: diff --git a/codemeta.json b/codemeta.json index fd80dc13fd2..713417ddde4 100644 --- a/codemeta.json +++ b/codemeta.json @@ -5,11 +5,11 @@ "codeRepository": "git+https://github.com/mne-tools/mne-python.git", "dateCreated": "2010-12-26", "datePublished": "2014-08-04", - "dateModified": "2022-12-22", - "downloadUrl": "https://github.com/mne-tools/mne-python/archive/v1.3.0.zip", + "dateModified": "2025-07-14", + "downloadUrl": "https://github.com/mne-tools/mne-python/archive/v1.10.0.zip", "issueTracker": "https://github.com/mne-tools/mne-python/issues", "name": "MNE-Python", - "version": "1.3.0", + "version": "1.10.0", "description": "MNE-Python is an open-source Python package for exploring, visualizing, and analyzing human neurophysiological data. It provides methods for data input/output, preprocessing, visualization, source estimation, time-frequency analysis, connectivity analysis, machine learning, and statistics.", "applicationCategory": "Neuroscience", "developmentStatus": "active", @@ -37,9 +37,16 @@ "macOS" ], "softwareRequirements": [ - "python>=3.7", - "numpy>=1.15.4", - "scipy>=1.1.0" + "python>= 3.10", + "decorator", + "jinja2", + "lazy_loader >= 0.3", + "matplotlib >= 3.7", + "numpy >= 1.25,<3", + "packaging", + "pooch >= 1.5", + "scipy >= 1.11", + "tqdm" ], "author": [ { @@ -81,7 +88,7 @@ { "@type":"Person", "email":"teon.brooks@gmail.com", - "givenName":"Teon", + "givenName":"Teon L", "familyName": "Brooks" }, { @@ -90,12 +97,6 @@ "givenName":"Jona", "familyName": "Sassenhagen" }, - { - "@type":"Person", - "email":"mluessi@nmr.mgh.harvard.edu", - "givenName":"Martin", - "familyName": "Luessi" - }, { "@type":"Person", "email":"dan@mccloy.info", @@ -104,21 +105,15 @@ }, { "@type":"Person", - "email":"jeanremi.king+github@gmail.com", - "givenName":"Jean-Remi", - "familyName": "King" - }, - { - "@type":"Person", - "email":"roman.goj@gmail.com", - "givenName":"Roman", - "familyName": "Goj" + "email":"mluessi@nmr.mgh.harvard.edu", + "givenName":"Martin", + "familyName": "Luessi" }, { "@type":"Person", - "email":"guillaume.favelier@gmail.com", - "givenName":"Guillaume", - "familyName": "Favelier" + "email":"jeanremi.king+github@gmail.com", + "givenName":"Jean-Rémi", + "familyName": "King" }, { "@type":"Person", @@ -132,6 +127,18 @@ "givenName":"Clemens", "familyName": "Brunner" }, + { + "@type":"Person", + "email":"roman.goj@gmail.com", + "givenName":"Roman", + "familyName": "Goj" + }, + { + "@type":"Person", + "email":"guillaume.favelier@gmail.com", + "givenName":"Guillaume", + "familyName": "Favelier" + }, { "@type":"Person", "email":"w.m.vanvliet@gmail.com", @@ -144,12 +151,30 @@ "givenName":"Mark", "familyName": "Wronkiewicz" }, + { + "@type":"Person", + "email":"aprockhill206@gmail.com", + "givenName":"Alex", + "familyName": "Rockhill" + }, + { + "@type":"Person", + "email":"stefan.appelhoff@mailbox.org", + "givenName":"Stefan", + "familyName": "Appelhoff" + }, { "@type":"Person", "email":"choldgraf@gmail.com", "givenName":"Chris", "familyName": "Holdgraf" }, + { + "@type":"Person", + "email":"mathieu.scheltienne@gmail.com", + "givenName":"Mathieu", + "familyName": "Scheltienne" + }, { "@type":"Person", "email":"mailsik@gmail.com", @@ -162,18 +187,6 @@ "givenName":"Yousra", "familyName": "Bekhti" }, - { - "@type":"Person", - "email":"aprockhill206@gmail.com", - "givenName":"Alex", - "familyName": "Rockhill" - }, - { - "@type":"Person", - "email":"stefan.appelhoff@mailbox.org", - "givenName":"Stefan", - "familyName": "Appelhoff" - }, { "@type":"Person", "email":"leggitta3@gmail.com", @@ -186,18 +199,18 @@ "givenName":"Andrew", "familyName": "Dykstra" }, - { - "@type":"Person", - "email":"code@robertluke.net", - "givenName":"Rob", - "familyName": "Luke" - }, { "@type":"Person", "email":"romain.trachel@inria.fr", "givenName":"Romain", "familyName": "Trachel" }, + { + "@type":"Person", + "email":"code@robertluke.net", + "givenName":"Robert", + "familyName": "Luke" + }, { "@type":"Person", "email":"desantis.lnz@gmail.com", @@ -216,24 +229,12 @@ "givenName":"Mikołaj", "familyName": "Magnuski" }, - { - "@type":"Person", - "email":"mathieu.scheltienne@gmail.com", - "givenName":"Mathieu", - "familyName": "Scheltienne" - }, { "@type":"Person", "email":"britta.wstnr@gmail.com", "givenName":"Britta", "familyName": "Westner" }, - { - "@type":"Person", - "email":"martin.billinger@tugraz.at", - "givenName":"Martin", - "familyName": "Billinger" - }, { "@type":"Person", "email":"dgwakeman@gmail.com", @@ -300,12 +301,24 @@ "givenName":"Fede", "familyName": "Raimondo" }, + { + "@type":"Person", + "email":"", + "givenName":"Scott", + "familyName": "Huberty" + }, { "@type":"Person", "email":"jnu@iki.fi", "givenName":"Jussi", "familyName": "Nurminen" }, + { + "@type":"Person", + "email":"flKazemakase@gmail.com", + "givenName":"Martin", + "familyName": "Billinger" + }, { "@type":"Person", "email":"montoya.jair.m@gmail.com", @@ -326,7 +339,7 @@ }, { "@type":"Person", - "email":"dev@earthman-music.de", + "email":"dev@mgschulz.de", "givenName":"Martin", "familyName": "Schulz" }, @@ -348,6 +361,12 @@ "givenName":"José C", "familyName": "García Alanis" }, + { + "@type":"Person", + "email":"", + "givenName":"Dimitri Papadopoulos", + "familyName": "Orfanos" + }, { "@type":"Person", "email":"olaf.hauk@mrc-cbu.cam.ac.uk", @@ -368,7 +387,7 @@ }, { "@type":"Person", - "email":"", + "email":"ashdrew@uw.edu", "givenName":"Ashley", "familyName": "Drew" }, @@ -378,12 +397,30 @@ "givenName":"Christoph", "familyName": "Dinh" }, + { + "@type":"Person", + "email":"t.s.binns@outlook.com", + "givenName":"Thomas S", + "familyName": "Binns" + }, { "@type":"Person", "email":"deep@introspection.eu", "givenName":"Guillaume", "familyName": "Dumas" }, + { + "@type":"Person", + "email":"martin.billinger@tugraz.at", + "givenName":"", + "familyName": "Martin" + }, + { + "@type":"Person", + "email":"johann.benerradi@gmail.com", + "givenName":"Johann", + "familyName": "Benerradi" + }, { "@type":"Person", "email":"thomas.hartmann@th-ht.de", @@ -398,9 +435,9 @@ }, { "@type":"Person", - "email":"johann.benerradi@gmail.com", - "givenName":"Johann", - "familyName": "Benerradi" + "email":"flkazemakase@gmail.com", + "givenName":"Martin", + "familyName": "Billinger" }, { "@type":"Person", @@ -432,6 +469,12 @@ "givenName":"Brad", "familyName": "Buran" }, + { + "@type":"Person", + "email":"Woessner.jacob@gmail.com", + "givenName":"Jacob", + "familyName": "Woessner" + }, { "@type":"Person", "email":"mathurin.massias@gmail.com", @@ -468,6 +511,12 @@ "givenName":"Félix", "familyName": "Raimundo" }, + { + "@type":"Person", + "email":"rcmdnk@gmail.com", + "givenName":"Michiru", + "familyName": "Kaneda" + }, { "@type":"Person", "email":"phillip.alday@mpi.nl", @@ -498,6 +547,12 @@ "givenName":"Yu-Han", "familyName": "Luo" }, + { + "@type":"Person", + "email":"agramfort@fb.com", + "givenName":"Alexandre", + "familyName": "Gramfort" + }, { "@type":"Person", "email":"jeythekey@tutanota.com", @@ -516,6 +571,18 @@ "givenName":"Mads", "familyName": "Jensen" }, + { + "@type":"Person", + "email":"santeri.ruuskanen@aalto.fi", + "givenName":"Santeri", + "familyName": "Ruuskanen" + }, + { + "@type":"Person", + "email":"simon.kern@online.de", + "givenName":"Simon", + "familyName": "Kern" + }, { "@type":"Person", "email":"tanaygahlot@gmail.com", @@ -534,6 +601,18 @@ "givenName":"Dirk", "familyName": "Gütlin" }, + { + "@type":"Person", + "email":"erkkahe@gmail.com", + "givenName":"Erkka", + "familyName": "Heinila" + }, + { + "@type":"Person", + "email":"kristijan.armeni@gmail.com", + "givenName":"Kristijan", + "familyName": "Armeni" + }, { "@type":"Person", "email":"kjs@llama", @@ -566,9 +645,15 @@ }, { "@type":"Person", - "email":"erkkahe@gmail.com", - "givenName":"Erkka", - "familyName": "Heinila" + "email":"dm.altukhov@ya.ru", + "givenName":"Dmitrii", + "familyName": "Altukhov" + }, + { + "@type":"Person", + "email":"nordme@uw.edu", + "givenName":"Erica", + "familyName": "Peterson" }, { "@type":"Person", @@ -588,6 +673,18 @@ "givenName":"Natalie", "familyName": "Klein" }, + { + "@type":"Person", + "email":"paul@roujansky.eu", + "givenName":"Paul", + "familyName": "Roujansky" + }, + { + "@type":"Person", + "email":"code@robertluke.net", + "givenName":"Rob", + "familyName": "Luke" + }, { "@type":"Person", "email":"antti.rantala90@gmail.com", @@ -600,6 +697,12 @@ "givenName":"Burkhard", "familyName": "Maess" }, + { + "@type":"Person", + "email":"carinaforster0611@gmail.com", + "givenName":"Carina", + "familyName": "Forster" + }, { "@type":"Person", "email":"christian.oreilly@gmail.com", @@ -608,9 +711,9 @@ }, { "@type":"Person", - "email":"nordme@uw.edu", - "givenName":"Erica", - "familyName": "Peterson" + "email":"dominik.welke@ae.mpg.de", + "givenName":"Dominik", + "familyName": "Welke" }, { "@type":"Person", @@ -624,6 +727,12 @@ "givenName":"Hubert", "familyName": "Banville" }, + { + "@type":"Person", + "email":"zhangmengyu10@gmail.com", + "givenName":"Jack", + "familyName": "Zhang" + }, { "@type":"Person", "email":"makkostya@ukr.net", @@ -644,9 +753,9 @@ }, { "@type":"Person", - "email":"rcmdnk@gmail.com", - "givenName":"Michiru", - "familyName": "Kaneda" + "email":"", + "givenName":"Nikolai", + "familyName": "Chapochnikov" }, { "@type":"Person", @@ -662,27 +771,21 @@ }, { "@type":"Person", - "email":"", - "givenName":"Scott", - "familyName": "Huberty" - }, - { - "@type":"Person", - "email":"simon.kern@online.de", - "givenName":"Simon", - "familyName": "Kern" + "email":"victor.ferat@live.Fr", + "givenName":"Victor", + "familyName": "Férat" }, { "@type":"Person", - "email":"carinaforster0611@gmail.com", - "givenName":"Carina", - "familyName": "Forster" + "email":"kimjico@gmail.com", + "givenName":"Cora", + "familyName": "Kim" }, { "@type":"Person", - "email":"", - "givenName":"Cora", - "familyName": "Kim" + "email":"dominik.welke@web.de", + "givenName":"Dominik", + "familyName": "Welke" }, { "@type":"Person", @@ -702,12 +805,6 @@ "givenName":"Ivana", "familyName": "Kojcic" }, - { - "@type":"Person", - "email":"zhangmengyu10@gmail.com", - "givenName":"Jack", - "familyName": "Zhang" - }, { "@type":"Person", "email":"jdue@dtu.dk", @@ -732,6 +829,18 @@ "givenName":"Louis", "familyName": "Thibault" }, + { + "@type":"Person", + "email":"", + "givenName":"Moritz", + "familyName": "Gerster" + }, + { + "@type":"Person", + "email":"", + "givenName":"Nabil", + "familyName": "Alibou" + }, { "@type":"Person", "email":"nathalie.gayraud@inria.fr", @@ -747,8 +856,20 @@ { "@type":"Person", "email":"", - "givenName":"", - "familyName": "dependabot[bot]" + "givenName":"Qian", + "familyName": "Chu" + }, + { + "@type":"Person", + "email":"ksherbst@gmail.com", + "givenName":"Sophie", + "familyName": "Herbst" + }, + { + "@type":"Person", + "email":"", + "givenName":"Ana", + "familyName": "Radanovic" }, { "@type":"Person", @@ -768,12 +889,6 @@ "givenName":"Basile", "familyName": "Pinsard" }, - { - "@type":"Person", - "email":"dominik.welke@web.de", - "givenName":"Dominik", - "familyName": "Welke" - }, { "@type":"Person", "email":"emilyps14@gmail.com", @@ -804,6 +919,18 @@ "givenName":"Fahimeh", "familyName": "Mamashli" }, + { + "@type":"Person", + "email":"", + "givenName":"Gennadiy", + "familyName": "Belonosov" + }, + { + "@type":"Person", + "email":"g.o'neill@ucl.ac.uk", + "givenName":"George", + "familyName": "O'Neill" + }, { "@type":"Person", "email":"giorgio.marinato@unitn.it", @@ -816,6 +943,12 @@ "givenName":"Hafeza", "familyName": "Anevar" }, + { + "@type":"Person", + "email":"hamza.abdelhedii@gmail.com", + "givenName":"Hamza", + "familyName": "Abdelhedi" + }, { "@type":"Person", "email":"mail@jan-sosulski.de", @@ -828,6 +961,18 @@ "givenName":"Jeff", "familyName": "Stout" }, + { + "@type":"Person", + "email":"", + "givenName":"Joshua", + "familyName": "Calder-Travis" + }, + { + "@type":"Person", + "email":"", + "givenName":"Judy D", + "familyName": "Zhu" + }, { "@type":"Person", "email":"leisenman@wustl.edu", @@ -842,7 +987,7 @@ }, { "@type":"Person", - "email":"mdovgialo@fabrizzio.zfb.fuw.edu.pl", + "email":"marian.dowgialo@gmail.com", "givenName":"Marian", "familyName": "Dovgialo" }, @@ -858,6 +1003,18 @@ "givenName":"Nicolas", "familyName": "Legrand" }, + { + "@type":"Person", + "email":"4dvlup@gmail.com", + "givenName":"Nikolai", + "familyName": "Kapralov" + }, + { + "@type":"Person", + "email":"pmolfese@gmail.com", + "givenName":"Peter J", + "familyName": "Molfese" + }, { "@type":"Person", "email":"falachrotem@gmail.com", @@ -890,9 +1047,21 @@ }, { "@type":"Person", - "email":"victor.ferat@live.Fr", - "givenName":"Victor", - "familyName": "Férat" + "email":"Theodore.Papadopoulo@inria.fr", + "givenName":"Theodore", + "familyName": "Papadopoulo" + }, + { + "@type":"Person", + "email":"t.s.binns@outlook.com", + "givenName":"Thomas Samuel", + "familyName": "Binns" + }, + { + "@type":"Person", + "email":"ttstenner@gmail.com", + "givenName":"Tristan", + "familyName": "Stenner" }, { "@type":"Person", @@ -900,6 +1069,12 @@ "givenName":"Victoria", "familyName": "Peterson" }, + { + "@type":"Person", + "email":"z.baratz@gmail.com", + "givenName":"Zvi", + "familyName": "Baratz" + }, { "@type":"Person", "email":"alessandro.tonin@wysscenter.ch", @@ -948,12 +1123,6 @@ "givenName":"Dominik", "familyName": "Krzemiński" }, - { - "@type":"Person", - "email":"dominik.welke@ae.mpg.de", - "givenName":"Dominik", - "familyName": "Welke" - }, { "@type":"Person", "email":"dom.mak19@gmail.com", @@ -962,10 +1131,22 @@ }, { "@type":"Person", - "email":"", + "email":"e.mikulan@gmail.com", "givenName":"Ezequiel", "familyName": "Mikulan" }, + { + "@type":"Person", + "email":"hofaflo@gmail.com", + "givenName":"Florian", + "familyName": "Hofer" + }, + { + "@type":"Person", + "email":"harrison.ritz@gmail.com", + "givenName":"Harrison", + "familyName": "Ritz" + }, { "@type":"Person", "email":"jean.baptiste.schiratti@gmail.com", @@ -978,6 +1159,12 @@ "givenName":"Jen", "familyName": "Evans" }, + { + "@type":"Person", + "email":"johnv@uchicago.edu", + "givenName":"John", + "familyName": "Veillette" + }, { "@type":"Person", "email":"", @@ -1002,6 +1189,12 @@ "givenName":"Laura", "familyName": "Gwilliams" }, + { + "@type":"Person", + "email":"laurent.lementec@gmail.com", + "givenName":"Laurent", + "familyName": "Lementec" + }, { "@type":"Person", "email":"", @@ -1011,6 +1204,12 @@ { "@type":"Person", "email":"", + "givenName":"Liberty", + "familyName": "Hamilton" + }, + { + "@type":"Person", + "email":"lukas.gemein@gmx.de", "givenName":"Lukas", "familyName": "Gemein" }, @@ -1046,15 +1245,15 @@ }, { "@type":"Person", - "email":"molpsychistb@gmail.com", - "givenName":"Mohamed", - "familyName": "Sherif" + "email":"", + "givenName":"Michal", + "familyName": "Žák" }, { "@type":"Person", - "email":"", - "givenName":"Moritz", - "familyName": "Gerster" + "email":"molpsychistb@gmail.com", + "givenName":"Mohamed", + "familyName": "Sherif" }, { "@type":"Person", @@ -1074,12 +1273,6 @@ "givenName":"Niklas", "familyName": "Wilming" }, - { - "@type":"Person", - "email":"", - "givenName":"Nikolai", - "familyName": "Chapochnikov" - }, { "@type":"Person", "email":"", @@ -1092,6 +1285,12 @@ "givenName":"Pierre", "familyName": "Ablin" }, + { + "@type":"Person", + "email":"proloy@umd.edu", + "givenName":"Proloy", + "familyName": "Das" + }, { "@type":"Person", "email":"quentinbertrand54@gmail.com", @@ -1104,6 +1303,12 @@ "givenName":"Reza", "familyName": "Shoorangiz" }, + { + "@type":"Person", + "email":"", + "givenName":"Richard", + "familyName": "Scholz" + }, { "@type":"Person", "email":"rhubner@gmail.com", @@ -1126,13 +1331,7 @@ "@type":"Person", "email":"sheraz@nmr.mgh.harvard.edu", "givenName":"Sheraz", - "familyName": "Khan" - }, - { - "@type":"Person", - "email":"ksherbst@gmail.com", - "givenName":"Sophie", - "familyName": "Herbst" + "familyName": "Khan" }, { "@type":"Person", @@ -1140,6 +1339,12 @@ "givenName":"Sumalyo", "familyName": "Datta" }, + { + "@type":"Person", + "email":"tdonoghue.research@gmail.com", + "givenName":"Thomas", + "familyName": "Donoghue" + }, { "@type":"Person", "email":"", @@ -1148,7 +1353,7 @@ }, { "@type":"Person", - "email":"", + "email":"timon.merk@charite.de", "givenName":"Timon", "familyName": "Merk" }, @@ -1164,12 +1369,6 @@ "givenName":"Tom", "familyName": "Dupré la Tour" }, - { - "@type":"Person", - "email":"ttstenner@gmail.com", - "givenName":"Tristan", - "familyName": "Stenner" - }, { "@type":"Person", "email":"tzionan@mail.tau.ac.il", @@ -1188,6 +1387,12 @@ "givenName":"", "familyName": "sviter" }, + { + "@type":"Person", + "email":"Jakdaxter31@gmail.com", + "givenName":"Aaron", + "familyName": "Earle-Richardson" + }, { "@type":"Person", "email":"abram.hindle@softwareprocess.es", @@ -1220,9 +1425,21 @@ }, { "@type":"Person", - "email":"agramfort@fb.com", - "givenName":"Alexandre", - "familyName": "Gramfort" + "email":"alex.lepauvre@ae.mpg.de", + "givenName":"Alex", + "familyName": "Lepauvre" + }, + { + "@type":"Person", + "email":"", + "givenName":"Alexander", + "familyName": "Kiefer" + }, + { + "@type":"Person", + "email":"7andy121@gmail.com", + "givenName":"Andy", + "familyName": "Gilbert" }, { "@type":"Person", @@ -1232,7 +1449,7 @@ }, { "@type":"Person", - "email":"", + "email":"anna.padee@gmail.com", "givenName":"Anna", "familyName": "Padee" }, @@ -1242,6 +1459,12 @@ "givenName":"Anne-Sophie", "familyName": "Dubarry" }, + { + "@type":"Person", + "email":"contact@antoinecollas.fr", + "givenName":"Antoine", + "familyName": "Collas" + }, { "@type":"Person", "email":"", @@ -1260,6 +1483,12 @@ "givenName":"Ariel", "familyName": "Rokem" }, + { + "@type":"Person", + "email":"arne.pelzer@idmt.fraunhofer.de", + "givenName":"Arne", + "familyName": "Pelzer" + }, { "@type":"Person", "email":"mynameisaustinhurst@gmail.com", @@ -1272,6 +1501,12 @@ "givenName":"Ben", "familyName": "Beasley" }, + { + "@type":"Person", + "email":"b.aristimunha@gmail.com", + "givenName":"Bruno", + "familyName": "Aristimunha" + }, { "@type":"Person", "email":"bruno.nicenboim@gmail.com", @@ -1311,7 +1546,7 @@ { "@type":"Person", "email":"daniel.c.schad@protonmail.com", - "givenName":"Daniel Carlström", + "givenName":"Daniel C", "familyName": "Schad" }, { @@ -1320,6 +1555,12 @@ "givenName":"Daniel", "familyName": "Hasegan" }, + { + "@type":"Person", + "email":"xiezhibin.0.0.superman@gmail.com", + "givenName":"Daniel", + "familyName": "Tse" + }, { "@type":"Person", "email":"darin.sleiter@gmail.com", @@ -1358,9 +1599,9 @@ }, { "@type":"Person", - "email":"dm.altukhov@ya.ru", - "givenName":"Dmitrii", - "familyName": "Altukhov" + "email":"", + "givenName":"Diptyajit", + "familyName": "Das" }, { "@type":"Person", @@ -1394,7 +1635,7 @@ }, { "@type":"Person", - "email":"", + "email":"enricovarano@gmail.com", "givenName":"Enrico", "familyName": "Varano" }, @@ -1422,12 +1663,30 @@ "givenName":"Evgeny", "familyName": "Goldstein" }, + { + "@type":"Person", + "email":"fmamashli@gmail.com", + "givenName":"Fahimeh", + "familyName": "Mamashli" + }, + { + "@type":"Person", + "email":"farzin.negahbani@gmail.com", + "givenName":"Farzin", + "familyName": "Negahbani" + }, { "@type":"Person", "email":"", "givenName":"Federico", "familyName": "Zamberlan" }, + { + "@type":"Person", + "email":"florinpop@me.com", + "givenName":"Florin", + "familyName": "Pop" + }, { "@type":"Person", "email":"", @@ -1446,12 +1705,36 @@ "givenName":"Geoff", "familyName": "Brookshire" }, + { + "@type":"Person", + "email":"george.oneill.90@gmail.com", + "givenName":"George", + "familyName": "O'Neill" + }, + { + "@type":"Person", + "email":"gack94@gmail.com", + "givenName":"", + "familyName": "Giulio" + }, + { + "@type":"Person", + "email":"", + "givenName":"Gonzalo", + "familyName": "Reina" + }, { "@type":"Person", "email":"", "givenName":"Hamid", "familyName": "Maymandi" }, + { + "@type":"Person", + "email":"hasrat407@gmail.com", + "givenName":"Hasrat Ali", + "familyName": "Arzoo" + }, { "@type":"Person", "email":"hermann.sonntag@gmail.com", @@ -1464,18 +1747,48 @@ "givenName":"Hongjiang", "familyName": "Ye" }, + { + "@type":"Person", + "email":"", + "givenName":"Hyonyoung", + "familyName": "Shin" + }, { "@type":"Person", "email":"huseyinorkunelmas@gmail.com", "givenName":"Hüseyin Orkun", "familyName": "Elmas" }, + { + "@type":"Person", + "email":"", + "givenName":"Ilian", + "familyName": "AZZ" + }, { "@type":"Person", "email":"", "givenName":"Ilias", "familyName": "Machairas" }, + { + "@type":"Person", + "email":"ivan.zubarev@aalto.fi", + "givenName":"Ivan", + "familyName": "Zubarev" + }, + { + "@type":"Person", + "email":"ivopascal@gmail.com", + "givenName":"Ivo", + "familyName": "de Jong" + }, + { + "@type":"Person", + "email":"jacob.phelan.jp@gmail.com", + "givenName":"Jacob", + "familyName": "Phelan" + }, { "@type":"Person", "email":"", @@ -1494,12 +1807,6 @@ "givenName":"Jasper J F", "familyName": "van den Bosch" }, - { - "@type":"Person", - "email":"jennifer.behnke@localhost", - "givenName":"Jennifer", - "familyName": "Behnke" - }, { "@type":"Person", "email":"", @@ -1518,12 +1825,6 @@ "givenName":"Johannes", "familyName": "Niediek" }, - { - "@type":"Person", - "email":"johnv@uchicago.edu", - "givenName":"John", - "familyName": "Veillette" - }, { "@type":"Person", "email":"koen.joshua@gmail.com", @@ -1536,12 +1837,6 @@ "givenName":"Joshua J", "familyName": "Bear" }, - { - "@type":"Person", - "email":"", - "givenName":"Judy D", - "familyName": "Zhu" - }, { "@type":"Person", "email":"j.dammers@fz-juelich.de", @@ -1566,6 +1861,12 @@ "givenName":"Katarina", "familyName": "Slama" }, + { + "@type":"Person", + "email":"", + "givenName":"Katia", + "familyName": "Al-Amir" + }, { "@type":"Person", "email":"", @@ -1586,15 +1887,15 @@ }, { "@type":"Person", - "email":"lsbarbosa@gmail.com", - "givenName":"Leonardo S", - "familyName": "Barbosa" + "email":"leorochael@gmail.com", + "givenName":"Leonardo Rochael", + "familyName": "Almeida" }, { "@type":"Person", - "email":"", - "givenName":"Liberty", - "familyName": "Hamilton" + "email":"lsbarbosa@gmail.com", + "givenName":"Leonardo S", + "familyName": "Barbosa" }, { "@type":"Person", @@ -1608,6 +1909,12 @@ "givenName":"Lukáš", "familyName": "Hejtmánek" }, + { + "@type":"Person", + "email":"mbalatsko@gmail.com", + "givenName":"Maksym", + "familyName": "Balatsko" + }, { "@type":"Person", "email":"manfredg@nmr.mgh.harvard.edu", @@ -1641,9 +1948,21 @@ { "@type":"Person", "email":"mah@optoceutics.com", - "givenName":"Mark Alexander", + "givenName":"Mark", "familyName": "Henney" }, + { + "@type":"Person", + "email":"Martinb.nmb@gmail.com", + "givenName":"Martin", + "familyName": "BaBer" + }, + { + "@type":"Person", + "email":"", + "givenName":"Martin", + "familyName": "Oberg" + }, { "@type":"Person", "email":"", @@ -1652,9 +1971,9 @@ }, { "@type":"Person", - "email":"Martinb.nmb@gmail.com", - "givenName":"", - "familyName": "MartinBaBer" + "email":"mathieu.scheltienne@dandelion.science", + "givenName":"Mathieu", + "familyName": "Scheltienne" }, { "@type":"Person", @@ -1676,7 +1995,7 @@ }, { "@type":"Person", - "email":"", + "email":"matthias.dold@gmx.net", "givenName":"Matthias", "familyName": "Dold" }, @@ -1722,6 +2041,18 @@ "givenName":"Mohammad", "familyName": "Daneshzand" }, + { + "@type":"Person", + "email":"23111220065@m.fudan.edu.cn", + "givenName":"", + "familyName": "Mojackhak" + }, + { + "@type":"Person", + "email":"celicolimmo@free.fr", + "givenName":"Nicolas", + "familyName": "Fourcaud-Trocmé" + }, { "@type":"Person", "email":"nicolas.gensollen@gmail.com", @@ -1734,12 +2065,24 @@ "givenName":"Nicole", "familyName": "Proulx" }, + { + "@type":"Person", + "email":"nfocke@uni-goettingen.de", + "givenName":"Niels", + "familyName": "Focke" + }, { "@type":"Person", "email":"nikos.ch01@gmail.com", "givenName":"Nikolas", "familyName": "Chalas" }, + { + "@type":"Person", + "email":"", + "givenName":"Noah", + "familyName": "Markowitz" + }, { "@type":"Person", "email":"omer.shubi@gmail.com", @@ -1748,15 +2091,15 @@ }, { "@type":"Person", - "email":"tottochan@gmail.com", - "givenName":"Padma", - "familyName": "Sundaram" + "email":"pablomainar.pm@gmail.com", + "givenName":"Pablo", + "familyName": "Mainar" }, { "@type":"Person", - "email":"paul@roujansky.eu", - "givenName":"Paul", - "familyName": "Roujansky" + "email":"tottochan@gmail.com", + "givenName":"Padma", + "familyName": "Sundaram" }, { "@type":"Person", @@ -1766,9 +2109,9 @@ }, { "@type":"Person", - "email":"pmolfese@gmail.com", - "givenName":"Peter J", - "familyName": "Molfese" + "email":"", + "givenName":"Pierre", + "familyName": "Guetschel" }, { "@type":"Person", @@ -1776,6 +2119,12 @@ "givenName":"Quanliang", "familyName": "Li" }, + { + "@type":"Person", + "email":"q.barthelemy@gmail.com", + "givenName":"Quentin", + "familyName": "Barthélemy" + }, { "@type":"Person", "email":"rahuln@cs.washington.edu", @@ -1794,6 +2143,12 @@ "givenName":"Ramonapariciog", "familyName": "Apariciogarcia" }, + { + "@type":"Person", + "email":"raagaard97@gmail.com", + "givenName":"Rasmus", + "familyName": "Aagaard" + }, { "@type":"Person", "email":"reza@ddpo.ir", @@ -1830,6 +2185,18 @@ "givenName":"Robin Tibor", "familyName": "Schirrmeister" }, + { + "@type":"Person", + "email":"131315c@gmail.com", + "givenName":"Rongfei", + "familyName": "Jin" + }, + { + "@type":"Person", + "email":"", + "givenName":"Roy Eric", + "familyName": "Wieske" + }, { "@type":"Person", "email":"ryan.law@mpi.nl", @@ -1851,8 +2218,14 @@ { "@type":"Person", "email":"", - "givenName":"Santeri", - "familyName": "Ruuskanen" + "givenName":"Samuel", + "familyName": "Louviot" + }, + { + "@type":"Person", + "email":"", + "givenName":"Sawradip", + "familyName": "Saha" }, { "@type":"Person", @@ -1860,6 +2233,12 @@ "givenName":"Sebastiaan", "familyName": "Mathot" }, + { + "@type":"Person", + "email":"", + "givenName":"Sebastian", + "familyName": "Jentschke" + }, { "@type":"Person", "email":"", @@ -1890,6 +2269,24 @@ "givenName":"Sergey", "familyName": "Antopolskiy" }, + { + "@type":"Person", + "email":"shirazi@ieee.org", + "givenName":"Seyed (Yahya)", + "familyName": "Shirazi" + }, + { + "@type":"Person", + "email":"kesharishresth5@gmail.com", + "givenName":"Shresth", + "familyName": "Keshari" + }, + { + "@type":"Person", + "email":"baral.shristi@gmail.com", + "givenName":"Shristi", + "familyName": "Baral" + }, { "@type":"Person", "email":"", @@ -1902,6 +2299,12 @@ "givenName":"Simeon", "familyName": "Wong" }, + { + "@type":"Person", + "email":"", + "givenName":"Simon M", + "familyName": "Hofmann" + }, { "@type":"Person", "email":"", @@ -1926,6 +2329,12 @@ "givenName":"Stanislas", "familyName": "Chambon" }, + { + "@type":"Person", + "email":"", + "givenName":"Steinn Hauser", + "familyName": "Magnússon" + }, { "@type":"Person", "email":"bethard@email.arizona.edu", @@ -1946,21 +2355,15 @@ }, { "@type":"Person", - "email":"", + "email":"twang5@swarthmore.edu", "givenName":"T", "familyName": "Wang" }, { "@type":"Person", - "email":"Theodore.Papadopoulo@inria.fr", - "givenName":"Theodore", - "familyName": "Papadopoulo" - }, - { - "@type":"Person", - "email":"tdonoghue.research@gmail.com", + "email":"thomas.moreau.2010@gmail.com", "givenName":"Thomas", - "familyName": "Donoghue" + "familyName": "Moreau" }, { "@type":"Person", @@ -1980,6 +2383,12 @@ "givenName":"Tom", "familyName": "Ma" }, + { + "@type":"Person", + "email":"TASTONE@mgh.harvard.edu", + "givenName":"Tom", + "familyName": "Stone" + }, { "@type":"Person", "email":"", @@ -1992,18 +2401,66 @@ "givenName":"Toomas Erik", "familyName": "Anijärv" }, + { + "@type":"Person", + "email":"", + "givenName":"Velu Prabhakar", + "familyName": "Kumaravel" + }, + { + "@type":"Person", + "email":"weixu@mail.bnu.edu.cn", + "givenName":"Wei", + "familyName": "Xu" + }, + { + "@type":"Person", + "email":"williamfrancisturner@gmail.com", + "givenName":"Will", + "familyName": "Turner" + }, + { + "@type":"Person", + "email":"xabier@zuazo.org", + "givenName":"Xabier de", + "familyName": "Zuazo" + }, { "@type":"Person", "email":"xia@xiaokai.me", "givenName":"Xiaokai", "familyName": "Xia" }, + { + "@type":"Person", + "email":"frostime@foxmail.com", + "givenName":"Yiping", + "familyName": "Zuo" + }, + { + "@type":"Person", + "email":"", + "givenName":"Yixiao", + "familyName": "Shen" + }, { "@type":"Person", "email":"850734033@qq.com", "givenName":"Zhi", "familyName": "Zhang" }, + { + "@type":"Person", + "email":"ziyizeng@link.cuhk.edu.cn", + "givenName":"Ziyi", + "familyName": "ZENG" + }, + { + "@type":"Person", + "email":"btk.codedev@gmail.com", + "givenName":"", + "familyName": "btkcodedev" + }, { "@type":"Person", "email":"", @@ -2015,6 +2472,12 @@ "email":"", "givenName":"", "familyName": "luzpaz" + }, + { + "@type":"Person", + "email":"", + "givenName":"", + "familyName": "user27182" } ] } diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 00000000000..e696138e098 --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1 @@ +/code_credit.inc diff --git a/doc/Makefile b/doc/Makefile index 452b392759c..4eb4ee4dd06 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -4,131 +4,68 @@ # You can set these variables from the command line. SPHINXOPTS = -nWT --keep-going SPHINXBUILD = sphinx-build -PAPER = MPROF = SG_STAMP_STARTS=true mprof run -E --python sphinx # Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +ALLSPHINXOPTS = -d _build/doctrees $(SPHINXOPTS) . -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest +.PHONY: help clean html html-noplot html-pattern linkcheck linkcheck-grep doctest # make with no arguments will build the first target by default, i.e., build standalone HTML files -first_target: html_dev-noplot +first_target: html-noplot help: @echo "Please use \`make ' where is one of" - @echo " html_stable to make standalone HTML files (stable version)" - @echo " html_dev to make standalone HTML files (dev version)" - @echo " html_dev-pattern to make standalone HTML files for one example dir (dev version)" - @echo " *-noplot to make standalone HTML files without plotting" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview of all changed/added/deprecated items" + @echo " html to make standalone HTML files" + @echo " html-memory to make standalone HTML files while monitoring memory usage" + @echo " html-pattern to make standalone HTML files for a specific filename pattern" + @echo " html-front to make standalone HTML files with only the frontpage examples" + @echo " html-noplot to make standalone HTML files without plotting" + @echo " clean to clean HTML files" @echo " linkcheck to check all external links for integrity" + @echo " linkcheck-grep to grep the linkcheck result" @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " view to view the built HTML" clean: - -rm -rf _build auto_examples auto_tutorials generated *.stc *.fif *.nii.gz + -rm -rf _build sg_execution_times.rst auto_examples auto_tutorials generated *.stc *.fif *.nii.gz -html_stable: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html_stable +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html @echo - @echo "Build finished. The HTML pages are in _build/html_stable." - -html_stable-memory: - $(MPROF) -b html $(ALLSPHINXOPTS) _build/html_stable - @echo - @echo "Build finished. The HTML pages are in _build/html_stable." - -html_dev: - BUILD_DEV_HTML=1 $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html - @echo - @echo "Build finished. The HTML pages are in _build/html" + @echo "Build finished. The HTML pages are in _build/html." -html_dev-memory: - BUILD_DEV_HTML=1 $(MPROF) -b html $(ALLSPHINXOPTS) _build/html +html-memory: + $(MPROF) -b html $(ALLSPHINXOPTS) _build/html @echo - @echo "Build finished. The HTML pages are in _build/html" + @echo "Build finished. The HTML pages are in _build/html." -html_dev-pattern: - BUILD_DEV_HTML=1 $(SPHINXBUILD) -D sphinx_gallery_conf.filename_pattern=$(PATTERN) -D sphinx_gallery_conf.run_stale_examples=True -b html $(ALLSPHINXOPTS) _build/html +html-pattern: + $(SPHINXBUILD) -D sphinx_gallery_conf.filename_pattern=$(PATTERN) -D sphinx_gallery_conf.run_stale_examples=True -b html $(ALLSPHINXOPTS) _build/html @echo @echo "Build finished. The HTML pages are in _build/html" -html_dev-pattern-memory: - BUILD_DEV_HTML=1 $(MPROF) -D sphinx_gallery_conf.filename_pattern=$(PATTERN) -D sphinx_gallery_conf.run_stale_examples=True -b html $(ALLSPHINXOPTS) _build/html +html-pattern-memory: + $(MPROF) -D sphinx_gallery_conf.filename_pattern=$(PATTERN) -D sphinx_gallery_conf.run_stale_examples=True -b html $(ALLSPHINXOPTS) _build/html @echo @echo "Build finished. The HTML pages are in _build/html" -html_dev-noplot: - BUILD_DEV_HTML=1 $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) _build/html - @echo - @echo "Build finished. The HTML pages are in _build/html." - -html_dev-debug: - BUILD_DEV_HTML=1 $(SPHINXBUILD) -PD plot_gallery=0 -b html $(ALLSPHINXOPTS) _build/html - html-noplot: - $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) _build/html_stable + $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) _build/html @echo - @echo "Build finished. The HTML pages are in _build/html_stable." - -html_dev-front: - @PATTERN="\(30_mne_dspm_loreta.py\|50_decoding.py\|30_strf.py\|20_cluster_1samp_spatiotemporal.py\|20_visualize_evoked.py\)" make html_dev-pattern; - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml - @echo - @echo "Build finished. The HTML pages are in _build/dirhtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in _build/htmlhelp." + @echo "Build finished. The HTML pages are in _build/html." -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in _build/qthelp, like this:" - @echo "# qcollectiongenerator _build/qthelp/MNE.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile _build/qthelp/MNE.qhc" - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex - @echo - @echo "Build finished; the LaTeX files are in _build/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." +html-front: + @PATTERN="\(30_mne_dspm_loreta.py\|50_decoding.py\|30_strf.py\|20_cluster_1samp_spatiotemporal.py\|20_visualize_evoked.py\)" make html-pattern -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes - @echo - @echo "The overview file is in _build/changes." +# Aliases for old methods +html_dev-pattern-memory: html-pattern-memory +html_dev-pattern: html-pattern +html_dev-noplot: html-noplot +html_dev-front: html-front linkcheck: - @$(SPHINXBUILD) -b linkcheck -D nitpicky=0 -D plot_gallery=0 -D exclude_patterns="cited.rst,whats_new.rst,configure_git.rst" -d _build/doctrees . _build/linkcheck - -linkcheck-grep: - @! grep -h "^.*:.*: \[\(\(local\)\|\(broken\)\)\]" _build/linkcheck/output.txt + @$(SPHINXBUILD) -b linkcheck -D nitpicky=0 -q -D plot_gallery=0 -D exclude_patterns="cited.rst,whats_new.rst,configure_git.rst,_includes,changes/dev" -d _build/doctrees . _build/linkcheck doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest @@ -136,6 +73,9 @@ doctest: "results in _build/doctest/output.txt." view: - @python -c "import webbrowser; webbrowser.open_new_tab('file://$(PWD)/_build/html/index.html')" + @python -c "import webbrowser; webbrowser.open_new_tab('file://$(PWD)/_build/html/sg_execution_times.html')" show: view + +serve: + python -m http.server -d _build/html diff --git a/doc/_includes/bem_model.rst b/doc/_includes/bem_model.rst index 1985d85fc31..9770d36a7fe 100644 --- a/doc/_includes/bem_model.rst +++ b/doc/_includes/bem_model.rst @@ -17,7 +17,7 @@ Using the watershed algorithm The watershed algorithm [Segonne *et al.*, 2004] is part of the FreeSurfer software. -The name of the program is mri_watershed . +The name of the program is ``mri_watershed``. Its use in the MNE environment is facilitated by the script :ref:`mne watershed_bem`. @@ -39,6 +39,7 @@ a file called :file:`bem/watershed/ws.mgz` which contains the brain MRI volume. Furthermore, ``mne watershed_bem`` script converts the scalp surface to fif format and saves the result to :file:`bem/{}-head.fif`. +.. _bem_flash_algorithm: Using FLASH images ~~~~~~~~~~~~~~~~~~ @@ -50,7 +51,7 @@ reconstructions but it is strongly recommended that they are collected at the same time with the MPRAGEs or at least with the same scanner. For easy co-registration, the images should have FOV, matrix, slice thickness, gap, and slice orientation as the MPRAGE data. For information on suitable pulse -sequences, see :footcite:`FischlEtAl2004`. +sequences, see :footcite:t:`FischlEtAl2004`. Creation of the BEM meshes using this method involves the following steps: diff --git a/doc/_includes/channel_interpolation.rst b/doc/_includes/channel_interpolation.rst index da2ae1dc12f..e90a763d214 100644 --- a/doc/_includes/channel_interpolation.rst +++ b/doc/_includes/channel_interpolation.rst @@ -22,12 +22,11 @@ In short, data repair using spherical spline interpolation :footcite:`PerrinEtAl Spherical splines assume that the potential :math:`V(\boldsymbol{r_i})` at any point :math:`\boldsymbol{r_i}` on the surface of the sphere can be represented by: .. math:: V(\boldsymbol{r_i}) = c_0 + \sum_{j=1}^{N}c_{i}g_{m}(cos(\boldsymbol{r_i}, \boldsymbol{r_{j}})) - :label: model + :name: model where the :math:`C = (c_{1}, ..., c_{N})^{T}` are constants which must be estimated. The function :math:`g_{m}(\cdot)` of order :math:`m` is given by: .. math:: g_{m}(x) = \frac{1}{4 \pi}\sum_{n=1}^{\infty} \frac{2n + 1}{(n(n + 1))^m}P_{n}(x) - :label: legendre where :math:`P_{n}(x)` are `Legendre polynomials`_ of order :math:`n`. @@ -36,22 +35,22 @@ where :math:`P_{n}(x)` are `Legendre polynomials`_ of order :math:`n`. To estimate the constants :math:`C`, we must solve the following two equations simultaneously: .. math:: G_{ss}C + T_{s}c_0 = X - :label: matrix_form + :name: matrix_form .. math:: {T_s}^{T}C = 0 - :label: constraint + :name: constraint where :math:`G_{ss} \in R^{N \times N}` is a matrix whose entries are :math:`G_{ss}[i, j] = g_{m}(cos(\boldsymbol{r_i}, \boldsymbol{r_j}))` and :math:`X \in R^{N \times 1}` are the potentials :math:`V(\boldsymbol{r_i})` measured at the good channels. :math:`T_{s} = (1, 1, ..., 1)^\top` is a column vector of dimension :math:`N`. Equation :eq:`matrix_form` is the matrix formulation of Equation :eq:`model` and equation :eq:`constraint` is like applying an average reference to the data. From equation :eq:`matrix_form` and :eq:`constraint`, we get: .. math:: \begin{bmatrix} c_0 \\ C \end{bmatrix} = {\begin{bmatrix} {T_s}^{T} && 0 \\ T_s && G_{ss} \end{bmatrix}}^{-1} \begin{bmatrix} 0 \\ X \end{bmatrix} = C_{i}X - :label: estimate_constant + :name: estimate_constant :math:`C_{i}` is the same as matrix :math:`{\begin{bmatrix} {T_s}^{T} && 0 \\ T_s && G_{ss} \end{bmatrix}}^{-1}` but with its first column deleted, therefore giving a matrix of dimension :math:`(N + 1) \times N`. Now, to estimate the potentials :math:`\hat{X} \in R^{M \times 1}` at the bad channels, we have to do: .. math:: \hat{X} = G_{ds}C + T_{d}c_0 - :label: estimate_data + :name: estimate_data where :math:`G_{ds} \in R^{M \times N}` computes :math:`g_{m}(\boldsymbol{r_i}, \boldsymbol{r_j})` between the bad and good channels. :math:`T_{d} = (1, 1, ..., 1)^\top` is a column vector of dimension :math:`M`. Plugging in equation :eq:`estimate_constant` in :eq:`estimate_data`, we get @@ -60,7 +59,7 @@ where :math:`G_{ds} \in R^{M \times N}` computes :math:`g_{m}(\boldsymbol{r_i}, To interpolate bad channels, one can simply do: - >>> evoked.interpolate_bads(reset_bads=False) # doctest: +SKIP + >>> evoked.interpolate_bads(reset_bads=False) # doctest: +SKIP and the bad channel will be fixed. @@ -68,4 +67,4 @@ and the bad channel will be fixed. .. topic:: Examples: - * :ref:`ex-interpolate-bad-channels` + * :ref:`ex-interpolate-bad-channels` diff --git a/doc/_includes/channel_types.rst b/doc/_includes/channel_types.rst index 647dab25ba4..0a2ea0ab007 100644 --- a/doc/_includes/channel_types.rst +++ b/doc/_includes/channel_types.rst @@ -10,6 +10,11 @@ Supported channel types from the include: channel-types-begin-content +.. NOTE: In the future, this table should be automatically synchronized with + the sensor types listed in the glossary. Perhaps a table showing data type + channels as well as non-data type channels should be added to the glossary + and displayed here too. + Channel types are represented in MNE-Python with shortened or abbreviated names. This page lists all supported channel types, their abbreviated names, and the measurement unit used to represent data of that type. Where channel @@ -23,50 +28,77 @@ parentheses. More information about measurement units is given in the .. cssclass:: table-bordered .. rst-class:: midvalign -============= ========================================= ================= -Channel type Description Measurement unit -============= ========================================= ================= -eeg scalp electroencephalography (EEG) Volts +================= ========================================= ================= +Channel type Description Measurement unit +================= ========================================= ================= +eeg scalp electroencephalography (EEG) Volts + +meg (mag) Magnetoencephalography (magnetometers) Teslas + +meg (grad) Magnetoencephalography (gradiometers) Teslas/meter + +ecg Electrocardiography (ECG) Volts + +seeg Stereotactic EEG channels Volts + +dbs Deep brain stimulation (DBS) Volts + +ecog Electrocorticography (ECoG) Volts + +fnirs (hbo) Functional near-infrared spectroscopy Moles/liter + (oxyhemoglobin) + +fnirs (hbr) Functional near-infrared spectroscopy Moles/liter + (deoxyhemoglobin) + +emg Electromyography (EMG) Volts + +eog Electrooculography (EOG) Volts + +bio Miscellaneous biological channels (e.g., Arbitrary units + skin conductance) -meg (mag) Magnetoencephalography (magnetometers) Teslas +stim stimulus (a.k.a. trigger) channels Arbitrary units -meg (grad) Magnetoencephalography (gradiometers) Teslas/meter +resp respiration monitoring channel Volts -ecg Electrocardiography (ECG) Volts +chpi continuous head position indicator Teslas + (HPI) coil channels -seeg Stereotactic EEG channels Volts +exci Flux excitation channel -dbs Deep brain stimulation (DBS) Volts +ias Internal Active Shielding data + (Triux systems only?) -ecog Electrocorticography (ECoG) Volts +syst System status channel information + (Triux systems only) -fnirs (hbo) Functional near-infrared spectroscopy Moles/liter - (oxyhemoglobin) +temperature Temperature Degrees Celsius -fnirs (hbr) Functional near-infrared spectroscopy Moles/liter - (deoxyhemoglobin) +gsr Galvanic skin response Siemens -emg Electromyography (EMG) Volts +ref_meg Reference Magnetometers Teslas -bio Miscellaneous biological channels (e.g., Arbitrary units - skin conductance) +dipole Dipole amplitude Amperes -stim stimulus (a.k.a. trigger) channels Arbitrary units +gof Goodness of fit (GOF) Goodness-of-fit -resp respiration monitoring channel Volts +cw-nirs (amp) Continuous-wave functional near-infrared Volts + spectroscopy (CW-fNIRS) (CW amplitude) -chpi continuous head position indicator Teslas - (HPI) coil channels +fd-nirs (ac amp) Frequency-domain near-infrared Volts + spectroscopy (FD-NIRS AC amplitude) -exci Flux excitation channel +fd-nirs (phase) Frequency-domain near-infrared Radians + spectroscopy (FD-NIRS phase) -ias Internal Active Shielding data - (Triux systems only?) +fnirs (od) Functional near-infrared spectroscopy Volts + (optical density) -syst System status channel information - (Triux systems only) +csd Current source density Volts per square + meter -temperature Temperature Degrees Celsius +eyegaze Eye-tracking (gaze position) Arbitrary units -gsr Galvanic skin response Siemens -============= ========================================= ================= +pupil Eye-tracking (pupil size) Arbitrary units +================= ========================================= ================= \ No newline at end of file diff --git a/doc/_includes/data_formats.rst b/doc/_includes/data_formats.rst index 641810c6b63..63dbfcdc98b 100644 --- a/doc/_includes/data_formats.rst +++ b/doc/_includes/data_formats.rst @@ -75,6 +75,8 @@ EEG :ref:`Persyst ` .lay :func:`mn NIRS :ref:`NIRx ` directory :func:`mne.io.read_raw_nirx` NIRS :ref:`BOXY ` directory :func:`mne.io.read_raw_boxy` + +EYETRACK SR eyelink ASCII files .asc :func:`mne.io.read_raw_eyelink` ============ ============================================ ========= =================================== More details are provided in the tutorials in the :ref:`tut-data-formats` diff --git a/doc/_includes/dig_formats.rst b/doc/_includes/dig_formats.rst index c2d3fde4c27..47d99a889bc 100644 --- a/doc/_includes/dig_formats.rst +++ b/doc/_includes/dig_formats.rst @@ -1,4 +1,5 @@ :orphan: + .. _dig-formats: Supported formats for digitized 3D locations @@ -21,21 +22,23 @@ function for more info on reading specific file types. .. cssclass:: table-bordered .. rst-class:: midvalign -================= ================ ============================================== -Vendor Extension(s) MNE-Python function -================= ================ ============================================== -Neuromag .fif :func:`mne.channels.read_dig_fif` +===================== ================ ============================================== +Vendor Extension(s) MNE-Python function +===================== ================ ============================================== +Neuromag .fif :func:`mne.channels.read_dig_fif` + +Polhemus ISOTRAK .hsp, .elp, .eeg :func:`mne.channels.read_dig_polhemus_isotrak` -Polhemus ISOTRAK .hsp, .elp, .eeg :func:`mne.channels.read_dig_polhemus_isotrak` +EGI .xml :func:`mne.channels.read_dig_egi` -EGI .xml :func:`mne.channels.read_dig_egi` +MNE-C .hpts :func:`mne.channels.read_dig_hpts` -MNE-C .hpts :func:`mne.channels.read_dig_hpts` +Brain Products .bvct :func:`mne.channels.read_dig_captrak` -Brain Products .bvct :func:`mne.channels.read_dig_captrak` +Compumedics .dat, .cdt :func:`mne.channels.read_dig_curry` -Compumedics .dat :func:`mne.channels.read_dig_dat` -================= ================ ============================================== +Compumedics (legacy) .dat :func:`mne.channels.read_dig_dat` +===================== ================ ============================================== To load Polhemus FastSCAN files you can use :func:`montage `. diff --git a/doc/_includes/forward.rst b/doc/_includes/forward.rst index fecee06d918..40706fb292c 100644 --- a/doc/_includes/forward.rst +++ b/doc/_includes/forward.rst @@ -27,7 +27,7 @@ MEG/EEG and MRI coordinate systems :class:`~mne.SourceSpaces`, etc), information about the coordinate frame is encoded as a constant integer value. The meaning of those integers is determined `in the source code - `__. + `__. The coordinate systems used in MNE software (and FreeSurfer) and their relationships are depicted in :ref:`coordinate_system_figure`. Except for the @@ -130,26 +130,26 @@ transformation symbols (:math:`T_x`) indicate the transformations actually present in the FreeSurfer files. Generally, .. math:: \begin{bmatrix} - x_2 \\ - y_2 \\ - z_2 \\ - 1 - \end{bmatrix} = T_{12} \begin{bmatrix} - x_1 \\ - y_1 \\ - z_1 \\ - 1 - \end{bmatrix} = \begin{bmatrix} - R_{11} & R_{12} & R_{13} & x_0 \\ - R_{21} & R_{22} & R_{23} & y_0 \\ - R_{31} & R_{32} & R_{33} & z_0 \\ - 0 & 0 & 0 & 1 - \end{bmatrix} \begin{bmatrix} - x_1 \\ - y_1 \\ - z_1 \\ - 1 - \end{bmatrix}\ , + x_2 \\ + y_2 \\ + z_2 \\ + 1 + \end{bmatrix} = T_{12} \begin{bmatrix} + x_1 \\ + y_1 \\ + z_1 \\ + 1 + \end{bmatrix} = \begin{bmatrix} + R_{11} & R_{12} & R_{13} & x_0 \\ + R_{21} & R_{22} & R_{23} & y_0 \\ + R_{31} & R_{32} & R_{33} & z_0 \\ + 0 & 0 & 0 & 1 + \end{bmatrix} \begin{bmatrix} + x_1 \\ + y_1 \\ + z_1 \\ + 1 + \end{bmatrix}\ , where :math:`x_k`, :math:`y_k`,and :math:`z_k` are the location coordinates in two coordinate systems, :math:`T_{12}` is the coordinate transformation from @@ -161,20 +161,20 @@ files produced by FreeSurfer and MNE. The fixed transformations :math:`T_-` and :math:`T_+` are: .. math:: T_{-} = \begin{bmatrix} - 0.99 & 0 & 0 & 0 \\ - 0 & 0.9688 & 0.042 & 0 \\ - 0 & -0.0485 & 0.839 & 0 \\ - 0 & 0 & 0 & 1 - \end{bmatrix} + 0.99 & 0 & 0 & 0 \\ + 0 & 0.9688 & 0.042 & 0 \\ + 0 & -0.0485 & 0.839 & 0 \\ + 0 & 0 & 0 & 1 + \end{bmatrix} and .. math:: T_{+} = \begin{bmatrix} - 0.99 & 0 & 0 & 0 \\ - 0 & 0.9688 & 0.046 & 0 \\ - 0 & -0.0485 & 0.9189 & 0 \\ - 0 & 0 & 0 & 1 - \end{bmatrix} + 0.99 & 0 & 0 & 0 \\ + 0 & 0.9688 & 0.046 & 0 \\ + 0 & -0.0485 & 0.9189 & 0 \\ + 0 & 0 & 0 & 1 + \end{bmatrix} .. note:: This section does not discuss the transformation between the MRI voxel @@ -186,7 +186,7 @@ and The symbols :math:`T_x` are defined in :ref:`coordinate_system_figure`. .. tabularcolumns:: |p{0.2\linewidth}|p{0.3\linewidth}|p{0.5\linewidth}| -.. table:: Coordinate transformations in FreeSurfer and MNE software packages. +.. table:: Coordinate transformations in FreeSurfer and MNE software packages +------------------------------+-------------------------------+-------------------------------------------------+ | Transformation | FreeSurfer | MNE | @@ -352,11 +352,11 @@ coordinates (:math:`r_D`) by where .. math:: T = \begin{bmatrix} - e_x & 0 \\ - e_y & 0 \\ - e_z & 0 \\ - r_{0D} & 1 - \end{bmatrix}\ . + e_x & 0 \\ + e_y & 0 \\ + e_z & 0 \\ + r_{0D} & 1 + \end{bmatrix}\ . Calculation of the magnetic field --------------------------------- @@ -444,8 +444,8 @@ data: automatically created by the MNE-C utility ``mne_list_coil_def``. .. tabularcolumns:: |p{0.1\linewidth}|p{0.3\linewidth}|p{0.1\linewidth}|p{0.25\linewidth}|p{0.2\linewidth}| -.. _normal_coil_descriptions: -.. table:: Normal coil descriptions. +.. table:: Normal coil descriptions + :name: normal_coil_descriptions +------+-------------------------+----+----------------------------------+----------------------+ | Id | Description | n | r/mm | w | @@ -506,8 +506,8 @@ data: combinations have to be included. .. tabularcolumns:: |p{0.1\linewidth}|p{0.3\linewidth}|p{0.05\linewidth}|p{0.25\linewidth}|p{0.15\linewidth}| -.. _accurate_coil_descriptions: .. table:: Accurate coil descriptions + :name: accurate_coil_descriptions +------+-------------------------+----+----------------------------------+----------------------+ | Id | Description | n | r/mm | w | @@ -602,8 +602,8 @@ description line containing the following fields: .. tabularcolumns:: |p{0.1\linewidth}|p{0.5\linewidth}| -.. _coil_accuracies: -.. table:: Coil representation accuracies. +.. table:: Coil representation accuracies + :name: coil_accuracies ======= ==================================================================================== Value Meaning diff --git a/doc/_includes/ged.rst b/doc/_includes/ged.rst new file mode 100644 index 00000000000..5146fef5ffa --- /dev/null +++ b/doc/_includes/ged.rst @@ -0,0 +1,107 @@ +:orphan: + +Generalized eigendecomposition in decoding +========================================== + +.. NOTE: part of this file is included in doc/overview/implementation.rst. + Changes here are reflected there. If you want to link to this content, link + to :ref:`ged` to link to that section of the implementation.rst page. + The next line is a target for :start-after: so we can omit the title from + the include: + ged-begin-content + +This section describes the mathematical formulation and application of +Generalized Eigendecomposition (GED), often used in spatial filtering +and source separation algorithms, such as :class:`mne.decoding.CSP`, +:class:`mne.decoding.SPoC`, :class:`mne.decoding.SSD` and +:class:`mne.decoding.XdawnTransformer`. + +The core principle of GED is to find a set of channel weights (spatial filter) +that maximizes the ratio of signal power between two data features. +These features are defined by the researcher and are represented by two covariance matrices: +a "signal" matrix :math:`S` and a "reference" matrix :math:`R`. +For example, :math:`S` could be the covariance of data from a task time interval, +and :math:`S` could be the covariance from a baseline time interval. For more details see :footcite:`Cohen2022`. + +Algebraic formulation of GED +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A few definitions first: +Let :math:`n \in \mathbb{N}^+` be a number of channels. +Let :math:`\text{Symm}_n(\mathbb{R}) \subset M_n(\mathbb{R})` be a vector space of real symmetric matrices. +Let :math:`S^n_+, S^n_{++} \subset \text{Symm}_n(\mathbb{R})` be sets of real positive semidefinite and positive definite matrices, respectively. +Let :math:`S, R \in S^n_+` be covariance matrices estimated from electrophysiological data :math:`X_S \in M_{n \times t_S}(\mathbb{R})` and :math:`X_R \in M_{n \times t_R}(\mathbb{R})`. + +GED (or simultaneous diagonalization by congruence) of :math:`S` and :math:`R` +is possible when :math:`R` is full rank (and thus :math:`R \in S^n_{++}`): + +.. math:: + + SW = RWD, + +where :math:`W \in M_n(\mathbb{R})` is an invertible matrix of eigenvectors +of :math:`(S, R)` and :math:`D` is a diagonal matrix of eigenvalues :math:`\lambda_i`. + +Each eigenvector :math:`\mathbf{w} \in W` is a spatial filter that solves +an optimization problem of the form: + +.. math:: + + \operatorname{argmax}_{\mathbf{w}} \frac{\mathbf{w}^t S \mathbf{w}}{\mathbf{w}^t R \mathbf{w}} + +That is, using spatial filters :math:`W` on time-series :math:`X \in M_{n \times t}(\mathbb{R})`: + +.. math:: + + \mathbf{A} = W^t X, + +results in "activation" time-series :math:`A` of the estimated "sources", +such that the ratio of their variances, +:math:`\frac{\text{Var}(\mathbf{w}^T X_S)}{\text{Var}(\mathbf{w}^T X_R)} = \frac{\mathbf{w}^T S \mathbf{w}}{\mathbf{w}^T R \mathbf{w}}`, +is sequentially maximized spatial filters :math:`\mathbf{w}_i`, sorted according to :math:`\lambda_i`. + +GED in the principal subspace +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Unfortunately, :math:`R` might not be full rank depending on the data :math:`X_R` (for example due to average reference, removed PCA/ICA components, etc.). +In such cases, GED can be performed on :math:`S` and :math:`R` in the principal subspace :math:`Q = \operatorname{Im}(C_{ref}) \subset \mathbb{R}^n` of some reference +covariance :math:`C_{ref}` (in Common Spatial Pattern (CSP) algorithm, for example, :math:`C_{ref}=\frac{1}{2}(S+R)` and GED is performed on S and R'=S+R). + +More formally: +Let :math:`r \leq n` be a rank of :math:`C \in S^n_+`. +Let :math:`Q=\operatorname{Im}(C_{ref})` be a principal subspace of :math:`C_{ref}`. +Let :math:`P \in M_{n \times r}(\mathbb{R})` be an isometry formed by orthonormal basis of :math:`Q`. +Let :math:`f:S^n_+ \to S^r_+`, :math:`A \mapsto P^t A P` be a "restricting" map, that restricts quadratic form +:math:`q_A:\mathbb{R}^n \to \mathbb{R}` to :math:`q_{A|_Q}:\mathbb{R}^n \to \mathbb{R}` (in practical terms, :math:`q_A` maps +spatial filters to variance of the spatially filtered data :math:`X_A`). + +Then, the GED of :math:`S` and :math:`R` in the principal subspace :math:`Q` of :math:`C_{ref}` is performed as follows: + +1. :math:`S` and :math:`R` are transformed to :math:`S_Q = f(S) = P^t S P` and :math:`R_Q = f(R) = P^t R P`, + such that :math:`S_Q` and :math:`R_Q` are matrix representations of restricted :math:`q_{S|_Q}` and :math:`q_{R|_Q}`. +2. GED is performed on :math:`S_Q` and :math:`R_Q`: :math:`S_Q W_Q = R_Q W_Q D`. +3. Eigenvectors :math:`W_Q` of :math:`(S_Q, R_Q)` are transformed back to :math:`\mathbb{R}^n` + by :math:`W = P W_Q \in \mathbb{R}^{n \times r}` to obtain :math:`r` spatial filters. + +Note that the solution to the original optimization problem is preserved: + +.. math:: + + \frac{\mathbf{w_Q}^t S_Q \mathbf{w_Q}}{\mathbf{w_Q}^t R_Q \mathbf{w_Q}}= \frac{\mathbf{w_Q}^t (P^t S P) \mathbf{w_Q}}{\mathbf{w_Q}^t (P^t R P) + \mathbf{w_Q}} = \frac{\mathbf{w}^t S \mathbf{w}}{\mathbf{w}^t R \mathbf{w}} = \lambda + + +In addition to restriction, :math:`q_S` and :math:`q_R` can be rescaled based on the whitened :math:`C_{ref}`. +In this case the whitening map :math:`f_{wh}:S^n_+ \to S^r_+`, +:math:`A \mapsto P_{wh}^t A P_{wh}` transforms :math:`A` into matrix representation of :math:`q_{A|Q}` rescaled according to :math:`\Lambda^{-1/2}`, +where :math:`\Lambda` is a diagonal matrix of eigenvalues of :math:`C_{ref}` and so :math:`P_{wh} = P \Lambda^{-1/2}`. + +In MNE-Python, the matrix :math:`P` of the restricting map can be obtained using +:: + + _, ref_evecs, mask = mne.cov._smart_eigh(C_ref, ..., proj_subspace=True, ...) + restr_mat = ref_evecs[mask] + +while :math:`P_{wh}` using: +:: + + restr_mat = compute_whitener(C_ref, ..., pca=True, ...) \ No newline at end of file diff --git a/doc/_includes/institutional-partners.rst b/doc/_includes/institutional-partners.rst index 083be66859d..89586058cf0 100644 --- a/doc/_includes/institutional-partners.rst +++ b/doc/_includes/institutional-partners.rst @@ -14,13 +14,13 @@ Current partners ~~~~~~~~~~~~~~~~ - `Aalto-yliopiston perustieteiden korkeakoulu `_ +- `AE Studio `_ - `Athinoula A. Martinos Center for Biomedical Imaging `_ - `Children’s Hospital of Philadelphia Research Institute `_ - `Donders Institute for Brain, Cognition and Behaviour at Radboud University `_ - `Harvard Medical School `_ - `Institut national de recherche en informatique et en automatique `_ - `Karl-Franzens-Universität Graz `_ -- `Macquarie University `_ - `Massachusetts General Hospital `_ - `Max-Planck-Institut für Bildungsforschung `_ - `SWPS Uniwersytet Humanistycznospołeczny `_ @@ -32,11 +32,13 @@ Former partners - `Aarhus Universitet `_ - `Berkeley Institute for Data Science `_ - `Boston University `_ -- `Commissariat à l’énergie atomique et aux énergies alternatives `_ +- `Commissariat à l’énergie atomique et aux énergies alternatives `_ +- `Fondation Campus Biotech Geneva `_ - `Forschungszentrum Jülich `_ - `Institut du Cerveau et de la Moelle épinière `_ - `Institut national de la santé et de la recherche médicale `_ - `Massachusetts Institute of Technology `_ +- `Macquarie University `_ - `New York University `_ - `Technische Universität Ilmenau `_ - `Télécom ParisTech `_ diff --git a/doc/_includes/inverse.rst b/doc/_includes/inverse.rst index cf0eacffecf..6d0d77ed8bb 100644 --- a/doc/_includes/inverse.rst +++ b/doc/_includes/inverse.rst @@ -70,7 +70,7 @@ this by writing :math:`R' = R/ \lambda^2 = R \lambda^{-2}`, which yields the inverse operator .. math:: - :label: inv_m + :name: inv_m M &= R' G^\top (G R' G^\top + C)^{-1} \\ &= R \lambda^{-2} G^\top (G R \lambda^{-2} G^\top + C)^{-1} \\ @@ -106,12 +106,12 @@ The MNE software employs data whitening so that a 'whitened' inverse operator assumes the form .. math:: \tilde{M} = M C^{^1/_2} = R \tilde{G}^\top (\tilde{G} R \tilde{G}^\top + \lambda^2 I)^{-1}\ , - :label: inv_m_tilde + :name: inv_m_tilde where .. math:: \tilde{G} = C^{-^1/_2}G - :label: inv_g_tilde + :name: inv_g_tilde is the spatially whitened gain matrix. We arrive at the whitened inverse operator equation :eq:`inv_m_tilde` by making the substitution for @@ -128,7 +128,7 @@ operator equation :eq:`inv_m_tilde` by making the substitution for The expected current values are .. math:: - :label: inv_j_hat_t + :name: inv_j_hat_t \hat{j}(t) &= Mx(t) \\ &= M C^{^1/_2} C^{-^1/_2} x(t) \\ @@ -137,7 +137,7 @@ The expected current values are knowing :eq:`inv_m_tilde` and taking .. math:: - :label: inv_tilde_x_t + :name: inv_tilde_x_t \tilde{x}(t) = C^{-^1/_2}x(t) @@ -151,7 +151,7 @@ to raw data. To reflect the decrease of noise due to averaging, this matrix, C_0 / L`. .. note:: - When EEG data are included, the gain matrix :math:`G` needs to be average referenced when computing the linear inverse operator :math:`M`. This is incorporated during creating the spatial whitening operator :math:`C^{-^1/_2}`, which includes any projectors on the data. EEG data average reference (using a projector) is mandatory for source modeling and is checked when calculating the inverse operator. + When EEG data are included, the gain matrix :math:`G` needs to be average referenced when computing the linear inverse operator :math:`M`. This is incorporated during creating the spatial whitening operator :math:`C^{-^1/_2}`, which includes any projectors on the data. EEG data average reference (using a projector) is mandatory for source modeling and is checked when calculating the inverse operator. As shown above, regularization of the inverse solution is equivalent to a change in the variance of the current amplitudes in the Bayesian *a priori* @@ -224,7 +224,7 @@ computational convenience we prefer to take another route, which employs the singular-value decomposition (SVD) of the matrix .. math:: - :label: inv_a + :name: inv_a A &= \tilde{G} R^{^1/_2} \\ &= U \Lambda V^\top @@ -238,7 +238,7 @@ Combining the SVD from :eq:`inv_a` with the inverse equation :eq:`inv_m` it is easy to show that .. math:: - :label: inv_m_tilde_svd + :name: inv_m_tilde_svd \tilde{M} &= R \tilde{G}^\top (\tilde{G} R \tilde{G}^\top + \lambda^2 I)^{-1} \\ &= R^{^1/_2} A^\top (A A^\top + \lambda^2 I)^{-1} \\ @@ -253,7 +253,7 @@ where the elements of the diagonal matrix :math:`\Gamma` are simply .. `reginv` in our code: .. math:: - :label: inv_gamma_k + :name: inv_gamma_k \gamma_k = \frac{\lambda_k}{\lambda_k^2 + \lambda^2}\ . @@ -261,7 +261,7 @@ From our expected current equation :eq:`inv_j_hat_t` and our whitened measurement equation :eq:`inv_tilde_x_t`, if we take .. math:: - :label: inv_w_t + :name: inv_w_t w(t) &= U^\top \tilde{x}(t) \\ &= U^\top C^{-^1/_2} x(t)\ , @@ -269,7 +269,7 @@ measurement equation :eq:`inv_tilde_x_t`, if we take we can see that the expression for the expected current is just .. math:: - :label: inv_j_hat_t_svd + :name: inv_j_hat_t_svd \hat{j}(t) &= R^{^1/_2} V \Gamma w(t) \\ &= \sum_k {\bar{v_k} \gamma_k w_k(t)}\ , @@ -314,7 +314,7 @@ normalization factors, it's convenient to reuse our "weighted eigenleads" definition from equation :eq:`inv_j_hat_t` in matrix form as .. math:: - :label: inv_eigenleads_weighted + :name: inv_eigenleads_weighted \bar{V} = R^{^1/_2} V\ . diff --git a/doc/_includes/ssp.rst b/doc/_includes/ssp.rst index 90b46d99c33..40b25a237db 100644 --- a/doc/_includes/ssp.rst +++ b/doc/_includes/ssp.rst @@ -30,13 +30,13 @@ Without loss of generality we can always decompose any :math:`n`-channel measurement :math:`b(t)` into its signal and noise components as .. math:: b(t) = b_s(t) + b_n(t) - :label: additive_model + :name: additive_model Further, if we know that :math:`b_n(t)` is well characterized by a few field patterns :math:`b_1 \dotso b_m`, we can express the disturbance as .. math:: b_n(t) = Uc_n(t) + e(t)\ , - :label: pca + :name: pca where the columns of :math:`U` constitute an orthonormal basis for :math:`b_1 \dotso b_m`, :math:`c_n(t)` is an :math:`m`-component column vector, and the @@ -48,12 +48,12 @@ such that the conditions described above are satisfied. We can now construct the orthogonal complement operator .. math:: P_{\perp} = I - UU^\top - :label: projector + :name: projector and apply it to :math:`b(t)` in Equation :eq:`additive_model` yielding .. math:: b_{s}(t) \approx P_{\perp}b(t)\ , - :label: result + :name: result since :math:`P_{\perp}b_n(t) = P_{\perp}(Uc_n(t) + e(t)) \approx 0` and :math:`P_{\perp}b_{s}(t) \approx b_{s}(t)`. The projection operator @@ -80,7 +80,7 @@ the brain, it is necessary to apply the projection to the forward solution in the course of inverse computations. For more information on SSP, please consult the references listed in -:footcite:`TescheEtAl1995,UusitaloIlmoniemi1997`. +:footcite:t:`TescheEtAl1995,UusitaloIlmoniemi1997`. Estimation of the noise subspace ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -101,13 +101,13 @@ The EEG average reference is the mean signal over all the sensors. It is typical in EEG analysis to subtract the average reference from all the sensor signals :math:`b^{1}(t), ..., b^{n}(t)`. That is: -.. math:: {b}^{j}_{s}(t) = b^{j}(t) - \frac{1}{n}\sum_{k}{b^k(t)} - :label: eeg_proj +.. math:: {b}^{j}_{s}(t) = b^{j}(t) - \frac{1}{n}\sum_{k}{b^k(t)} + :name: eeg_proj where the noise term :math:`b_{n}^{j}(t)` is given by -.. math:: b_{n}^{j}(t) = \frac{1}{n}\sum_{k}{b^k(t)} - :label: noise_term +.. math:: b_{n}^{j}(t) = \frac{1}{n}\sum_{k}{b^k(t)} + :name: noise_term Thus, the projector vector :math:`P_{\perp}` will be given by :math:`P_{\perp}=\frac{1}{n}[1, 1, ..., 1]` diff --git a/doc/_static/default_linux_dock_icon.png b/doc/_static/default_linux_dock_icon.png new file mode 100644 index 00000000000..63156ae5865 Binary files /dev/null and b/doc/_static/default_linux_dock_icon.png differ diff --git a/doc/overview/images/flow_diagram.svg b/doc/_static/flow_diagram.svg similarity index 100% rename from doc/overview/images/flow_diagram.svg rename to doc/_static/flow_diagram.svg diff --git a/doc/_static/funding/cds-dark.svg b/doc/_static/funding/cds-dark.svg new file mode 100644 index 00000000000..940d66b5680 --- /dev/null +++ b/doc/_static/funding/cds-dark.svg @@ -0,0 +1,26 @@ + +image/svg+xml \ No newline at end of file diff --git a/doc/_static/funding/cds.png b/doc/_static/funding/cds.png deleted file mode 100644 index d726b8daeb1..00000000000 Binary files a/doc/_static/funding/cds.png and /dev/null differ diff --git a/doc/_static/funding/cds.svg b/doc/_static/funding/cds.svg new file mode 100644 index 00000000000..07b2482727d --- /dev/null +++ b/doc/_static/funding/cds.svg @@ -0,0 +1,27 @@ + +image/svg+xml diff --git a/doc/_static/institution_logos/AE-Studio-dark.svg b/doc/_static/institution_logos/AE-Studio-dark.svg new file mode 100644 index 00000000000..d3519ed2513 --- /dev/null +++ b/doc/_static/institution_logos/AE-Studio-dark.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/doc/_static/institution_logos/AE-Studio-light.svg b/doc/_static/institution_logos/AE-Studio-light.svg new file mode 100644 index 00000000000..d5c849f1718 --- /dev/null +++ b/doc/_static/institution_logos/AE-Studio-light.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/doc/_static/institution_logos/FCBG.svg b/doc/_static/institution_logos/FCBG.svg new file mode 100644 index 00000000000..c2c930fe5af --- /dev/null +++ b/doc/_static/institution_logos/FCBG.svg @@ -0,0 +1,82 @@ + + + + diff --git a/doc/_static/js/contrib-avatars.js b/doc/_static/js/contrib-avatars.js new file mode 100644 index 00000000000..dde2e8d29f7 --- /dev/null +++ b/doc/_static/js/contrib-avatars.js @@ -0,0 +1,53 @@ +async function getContribs(url) { + result = await fetch(url); + data = await result.json(); + return data; +} + +function addCards(data, container) { + data.forEach((entry) => { + if (!entry.login.endsWith("[bot]")) { + let card = document.createElement("div"); + let anchor = document.createElement("a"); + let image = document.createElement("img"); + card.setAttribute("class", "card my-1 mx-2"); + anchor.setAttribute("href", entry.html_url); + image.setAttribute("class", "card-img contributor-avatar"); + image.setAttribute("src", entry.avatar_url); + image.setAttribute("title", entry.login); + image.setAttribute("alt", `Contributor avatar for ${entry.login}`); + anchor.append(image); + card.append(anchor); + container.append(card); + } + }); +} + +async function putAvatarsInPage() { + // container + let outer = document.createElement("div"); + let title = document.createElement("p"); + let inner = document.createElement("div"); + outer.setAttribute("id", "contributor-avatars"); + outer.setAttribute("class", "container my-4"); + title.setAttribute("class", "h4 text-center font-weight-light"); + title.innerText = "Contributors"; + inner.setAttribute("class", "d-flex flex-wrap flex-row justify-content-center align-items-center"); + // GitHub API returns batches of 100 so we have to loop + var page = 1; + while (true) { + data = await getContribs( + `https://api.github.com/repos/mne-tools/mne-python/contributors?per_page=100&page=${page}` + ); + if (!data.length) { + break; + } + addCards(data, container=inner); + page++; + } + // finish + outer.append(title, inner); + document.body.append(outer); +} + +putAvatarsInPage(); diff --git a/doc/_static/js/custom-icons.js b/doc/_static/js/custom-icons.js new file mode 100644 index 00000000000..60b3c3b9276 --- /dev/null +++ b/doc/_static/js/custom-icons.js @@ -0,0 +1,19 @@ +FontAwesome.library.add( + /** + * Custom icon definitions + * + * see https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/header-links.html#svg-image-icons + */ + { + prefix: "fa-custom", + iconName: "opencollective", + icon: [ + 24, + 24, + [], + "e001", + // https://simpleicons.org/icons/opencollective.svg + "M12 0C5.373 0 0 5.373 0 12s5.373 12 12 12c2.54 0 4.894-.79 6.834-2.135l-3.107-3.109a7.715 7.715 0 1 1 0-13.512l3.107-3.109A11.943 11.943 0 0 0 12 0zm9.865 5.166l-3.109 3.107A7.67 7.67 0 0 1 19.715 12a7.682 7.682 0 0 1-.959 3.727l3.109 3.107A11.943 11.943 0 0 0 24 12c0-2.54-.79-4.894-2.135-6.834z", + ], + }, +); diff --git a/doc/_static/js/set_installer_tab.js b/doc/_static/js/set_installer_tab.js new file mode 100644 index 00000000000..ea019d51852 --- /dev/null +++ b/doc/_static/js/set_installer_tab.js @@ -0,0 +1,55 @@ +/* inspired by https://tobiasahlin.com/blog/move-from-jquery-to-vanilla-javascript/ */ + +function documentReady(callback) { + if (document.readyState != "loading") callback(); + else document.addEventListener("DOMContentLoaded", callback); +} + +function setTabs() { + var platform = "linux"; + if (navigator.userAgent.indexOf("Win") !== -1) { + platform = "windows"; + } + if (navigator.userAgent.indexOf("Mac") !== -1) { + // there's no good way to distinguish intel vs M1 in javascript so we + // just default to showing the most modern macOS installer + platform = "macos-apple"; + } + var platform_short = platform.split("-")[0]; + + let tab_label_nodes = [...document.querySelectorAll('.sd-tab-label')]; + + let install_tab_nodes = document.querySelectorAll( + '.install-selector-tabset')[0].children; + let install_input_nodes = [...install_tab_nodes].filter( + child => child.nodeName === "INPUT"); + let install_label = tab_label_nodes.filter( + // label.id is drawn from :name: property in the rST, which must + // be unique across the whole site (*sigh*) + label => label.id.startsWith(`install-${platform}`))[0]; + let install_id = install_label.getAttribute('for'); + let install_input = install_input_nodes.filter(node => node.id === install_id)[0]; + install_input.checked = true; + + let uninstall_tab_nodes = document.querySelectorAll( + '.uninstall-selector-tabset')[0].children; + let uninstall_input_nodes = [...uninstall_tab_nodes].filter( + child => child.nodeName === "INPUT"); + let uninstall_label = tab_label_nodes.filter( + label => label.id.startsWith(`uninstall-${platform_short}`))[0]; + let uninstall_id = uninstall_label.getAttribute('for'); + let uninstall_input = uninstall_input_nodes.filter(node => node.id === uninstall_id)[0]; + uninstall_input.checked = true; +} + +function setAlert() { + for (let button of document.querySelectorAll('.install-download-button')) { + button.addEventListener('click', function() { + alert = document.querySelectorAll('.install-download-alert')[0]; + alert.style.display = 'block'; + }); + } +} + +documentReady(setTabs); +documentReady(setAlert); diff --git a/doc/_static/js/update_installer_version.js b/doc/_static/js/update_installer_version.js new file mode 100644 index 00000000000..ad18890caf7 --- /dev/null +++ b/doc/_static/js/update_installer_version.js @@ -0,0 +1,61 @@ +/* inspired by https://tobiasahlin.com/blog/move-from-jquery-to-vanilla-javascript/ */ + +function documentReady(callback) { + if (document.readyState != "loading") callback(); + else document.addEventListener("DOMContentLoaded", callback); +} + +async function getRelease() { + result = await fetch("http://github.com/service/https://api.github.com/repos/mne-tools/mne-installers/releases/latest"); + data = await result.json(); + return data; +} +async function warnVersion() { + data = await getRelease(); + // Take v1.5.1 for example and change to 1.5 + ids = ["linux-installers", "macos-intel-installers", "macos-apple-installers", "windows-installers"]; + warn = false; + ids.forEach((id) => { + label_id = document.getElementById(id); + // tab is immediately after label + children = [].slice.call(label_id.parentNode.children); + div = children[children.indexOf(label_id) + 1]; + a = div.children[0].children[0]; // div->p->a + ending = a.href.split("-").slice(-1)[0]; // Should be one of: ["macOS_Intel.pkg", "macOS_M1.pkg", "Linux.sh", "Windows.exe"] + data["assets"].every((asset) => { + // find the matching asset + if (!asset["browser_download_url"].endsWith(ending)) { + return true; // continue + } + old_stem = a.href.split("/").slice(-1)[0]; + new_stem = asset["browser_download_url"].split("/").slice(-1)[0]; + a.href = asset["browser_download_url"]; + // also replace the command on Linux + if (ending === "Linux.sh") { + code = document.getElementById("codecell0"); + } + if (!warn) { + // MNE-Python-1.5.1_0-Linux.sh to 1.5 for example + old_ver = old_stem.split("-").slice(2)[0].split("_")[0].split(".").slice(0, 2).join("."); + new_ver = new_stem.split("-").slice(2)[0].split("_")[0].split(".").slice(0, 2).join("."); + if (old_ver !== new_ver) { + warn = `The installers below are for version ${new_ver} as ${old_ver} is no longer supported`; + } + } + return false; // do not continue + }); + }); + if (warn) { + let outer = document.createElement("div"); + let title = document.createElement("p"); + let inner = document.createElement("p"); + outer.setAttribute("class", "admonition warning"); + title.setAttribute("class", "admonition-title"); + title.innerText = "Warning"; + inner.innerText = warn; + outer.append(title, inner); + document.querySelectorAll('.install-selector-tabset')[0].before(outer); + } +} + +documentReady(warnVersion); diff --git a/doc/_static/mne_logo.svg b/doc/_static/mne_logo.svg index 470b72f0ab6..dc9ff39d029 100644 --- a/doc/_static/mne_logo.svg +++ b/doc/_static/mne_logo.svg @@ -1,16 +1,16 @@ - + - 2022-04-13T15:28:22.434101 + 2023-11-07T13:26:54.126105 image/svg+xml - Matplotlib v3.5.1, https://matplotlib.org/ + Matplotlib v3.8.0.dev1998+gc5707d9c79, https://matplotlib.org/ @@ -22,843 +22,760 @@ - + - +" clip-path="url(#p379c9ce79c)" style="fill: #ffffff"/> - + +iVBORw0KGgoAAAANSUhEUgAABdwAAAIzCAYAAAAagNu9AADrmklEQVR4nO397ZLrOLKsDXrXm3Pm/q92bM5YV80PlFpKLX0QQADhHnA322a7u0n3IMiUyEexgv/55/+D/+Kq/rq8Zbz+wn8S0/9U5lrMim0tLau6/sY/2SUM6e/sAh6UtYY712D1Ma48lhW1R9cbWWNUbRE1RdSSXcdsflZ2Ru5opkIee9auNWQ+nirH4gxnrMgYyWH9G6mYo5A1mnfC/chMpmrubHZE/nVa/Fr/kDzrNJ+YZ8GOen6GjDNg821xWGDx4yKrwffnC41lTS2rglThOsAF2IHctTRov+pt0D7mw3HzyVCH4gPQCQ+2CnnsWaw5rMfiDGeoZozksP4dVsxxVlyeM+tnA/OQHeAB7QmQ/VF9wP05LBO8t3wOUKwM34HXFyHL2loWs5Th+k2G7I/ZO7MM2X97RvvxQfbmVefm89QHEaWHPsOC+azTc1iPxRnrMnxd6Wc4x1kseWqZmbmz2RH5VbrZkyH7o8aA+3MBWZCZresd0IfvNxnCW9ZdFcD6TWyAHTBkj81Y6W3QPu5V5wZUEXZnZitlssPvnVnO4QSJznCGasZIDuvfesUchawT8tQy1bMBQ/Y/fWL0bz1zwP1u1pQN3lsNPFC4Cny/yRDeqq5KYB3ghOtA/jobsvd4r6nfoL3Ho0YdmV0/pzz4KTxQs2c5hxNWsq4X43E4gytjJIf1b71ijkLWaJ5SZ7naPV5m7mw2YMj+p0+MXtQTA9zvAU2ZcJmx6x348yRWAPDA+4ucbf0t66Zs2LtSrIAdyF/33Wtj0P7KM16MoJ3p5s+gXeshTOEBfnceM9zZlcW6bqznhjGjyvlwBleGc/bmKGQp5allzuZmZzPMZWd5tmk+ER5fa4kF7vfgu9z1/lpVAfxNBvFWprLh7i4ZsH+rYXeeIftr32i/2DoN2tfUofpQYtDOkcfecch6TKzH4ox1GVWuK2c4hzVHIeuEvJlM1dzZbEP22DqaT1cta4D7o5i63lsdnMCX4UeKHfp0gbKeG4tPDDB3t5jhOsBxTjLWSB2yN393s895GbRH1qD4QFT9IZcdUBi08x6LM9ZlVLmunLE2h/UzazRnZxb7d8toVkaeWmZm7mw2kD8ypsozzd1nuJb1wP0mBvAO6MF3IH/NdunKhcx6zqw4MYDbbLHDdYDnPBmyj/q7m33eq8SNYFgdqg8mBu0ceexZrDmsx+KMdRlVritnrM1h/cxiz1HIOiFPLZMh25A9ro7mE/IM+oO/8c9WiMnUya0A34HXF0322mXp6oXPfD5PFAucZZQCXAd4zmFVwH7PWumtAdmbJydoZ+pmbz7aN8cG7c5jz2LNYT0WZ6zLqHJdMWb479w5zorLU8tUzQUM2dnqaD6/9PPLfDekNHwf16ld8FfV+wejcM4ZxAJdFaUC1m9iOteG7DPe647D3ewjPhEe+Tem2aBf7cFIAXzvzmPPqpTDeiyMx8GYwXgudmQwnouRjJEc1s8S5+hknZA3k6maCxiys9XRfN7q90iZLPDesvFvdr7U4Dvw+iQzrKWKVkKp/T9k8YDSU6UG1wGu6yZr/apA9ubvbvYYL4P2yBoM2p3HnlUph/VYGI+DMYPxXOzIYDwXrBm7cpiPZWcW+/fXaNZMXkamImSfzTZk56qj+VzS6xnumcCZqesd+POEqAB4wBCeRUwg04qVIli/ie26NGSP8NeB7M03rt6qkL355N+gGrTvyXReTNbpOazHwpjBeD4Y12lHBuO5YM0YyWH9vNqZszOr8vdyRt5MpmquITtXHc2nW99fmsrQ9d7yOaTY/f4oQ3jL6pcyWAf44DqQu6aG7Fe9V3i6m/26T/5NaiZoV8xWeQBVOD5mKMKaw3osjBmM58Pr5AyGDOc4iynzNMgOzIH2CpC90nMUrgD3e1geeG/5+DefR8rd7496dyExrbVlrZY6VH+UAftz9t71MGR/9oyvtypoJ7k5lITdmdkqD70Kx8cMYFhzWI+FMYPxfHideDL8N+4c9s750SylPLXM2VzAkL0YZP/X5x+gB7g/7QjAXe/PqgLgbzKIt6qpElS/iRGuA/lrbcje473Kt343e/OK8OC4QVSE3bPZSg+ElfOqARjWnFMzqkBRr9OaDMZrdiRjVw7zsezMUvhOVuosV8ucyQVyR8aoPzNE1XD3idGLevqB+yvD7K73VgOfqgH4mz5dkIznwTpH2ZB3tVjhOsCx9pVGxdxztEA7M2RvfvQ3ZgMeuTVk5qv9U2MF8L07jxmMVMo5NcNwd43/jgzGdWLNGMlh/azambMzS+H7WClvJjMz15A9v4a7T4TH11rmgPurIAb43urgU1UA/yjDeGulGKDuLjHDdYDjXFTrYr/naEH25ssL2tm62ZuPQbvig5LKg6hCHjOEqZRzagYjsPQ6rfF3xtoM5zgrOk8tczZX/eWn2c8LETVE1dF8umqJAe7vCsgEyuzd78AZAP5RVy5y1nNlrRUDvM0UO1wHOM5RxjoZsn/y5YXszY+nA6L5cNwsZt84Kz4sVQbfu/OqQRjWnFMzDJDX+O/I8Dqty3AOf85o1gl5apmAITvD80pUHc1nuJZ44P6o7JEz9zruYga6r05k9trtVs8fBfO5tDgALZsUwPpNLOevKmRffVwqkL15Rnq5m31VHQbt6zMV8tizTs85NYMRjJ54DDsyGNeJNWMkp9Ln4WiOs/Lz1DIBQ3aGZxWmOgDgv6uB+00sXe/AnyeAHdoawr/X6B8T+zlnEgt0VZMSWAe4znNVwN5yDNnvntF+9brZm09uHcr5Sg9qCg/A7FmVcpyxLoMRWFZYpwrHcHKGc/bmjGaN5imNb1HLVJ7JzgC3GWq4+4RB9kf94G/sBZBM8B3QA/CAIfysdsPFiGuKCYhad6mBdYDvWqoM2FuWIfvdM9rP3eyr6nA3+57cqqCdHSSw5jhjXQYjGD3xGHZkMK7TyRnsOTuzFL7zlfKyMk+G7AzPKEx1AF+vh59/w5p2w+bng2SAxooAHnh/wTCs6eliA5xWnxSh+qOYrr/MtTRkv+JryD7mU+fGMQt2Z2ZXfyBlh987s04GWIwZjOejwjr5GJwRneGc8RyFLKW8mcyZXEP2vPyIGu4+87V0Xgu/R8o8HkgGaGbrfgd0AfxNBvGW9V3qUB3gAus3GbBHZqzyNWQf8+EYGdM8tG+klTrLMzIV8thhwskAizGjAmj3MazJ8PW6LmMkp9Jn4WiOs/LzZjJncg3Z8/IjaoiqA5i6Ft7PcDd8fy11AH/Tp4uPab0tK0IVgPqjGOE6YMAem7HS+yzI3vyifOrcQHpszJ5chQdh9qzTc07NYASKPgaODF+v6zKcszdnNGs074RGhZlMQ3bd/Kg6gPnr4KZ/8M+1l6YywfdWAw8QfnVhqEL4m75dpEzrb1nVYPqzDNff5e/OM2T/0zNe7mb/5nHuzbTS2JiMTPYH72qQhBH47cioAhXZ1qnCua5wDDsyWD+jWD8H2XMUsk7Im8mcgaszgB3Iuy+ezY7Ij6ghqo5AwP78X10D7o/Khu+tBs7u95uqdMG/05WLmvG8WHrKhro7xQrWb8o+FxUBe8tZ6W3IPu5TB7JH1JHVVT6TrdSNpZDHnsWa44x1GT6GeP8dGRXOw8kZIznsQJp13WayTsibyTRkz8mPqCGqjoWQ/VH9wP1RbPC91cEHeit2wX+Tobz1TtngNlvsYB3gOEcZ66Texd78DdnnvDhuJJuP9k21Wjf7TK7Kw2lFqGCIdV4Gm/+ODDb/HRkVjuHkDOfoZCnlzWRmQXbl+3GG/IgagG2A/VlzwP1RDPAd0ADwwPsLrzqIf1TvHw7ruTxZDGCWVQpgHeA5h1UBe8tZ6b3mGAzZR3wiPPKBv2I3+0y2Uqbhd70c1mNhPA62Y6hwHnyenRGdw/pZO5rjrPy8mcwTIXs2YGepAYiB7JP/oiEOuD+KBb4DOgD+phO74a9q9g+P/dzvFgtoVZYKVL+J6ZwbsM/4nwnZm1+UjyF7ZA3uZj87jxmWsOacmsEI/Nj8d2So+1fJYPx7GMlwznjOaNZonsI9xWymIfve7Ij8iBqAtC72t/p7FXB/CvmfGMCxGoAH3l/ADOupJCbYaOlIDarfxHa9VwbsLWu1vyF7jJdBe2QNBu018tizWHNYj4XxOE6EoyeeBzb/HRmM1+pIxkgO62fgzhyFrNE8pX8BqAjZsyF3dn5EDQBFF/v/9GJN1gP3TwUwAGNFAH+TQbxlxUgVqt9kuP6YXaOLvWUYssd4GbJH1qD4cKH00Fj1gZ8ZmDBCplMzfAx6/jsyKhzDyRnO0clSyjNkPy8foAfsz/r530FngGa27nfg9UWgBOGBzyeeZZ0ta6fUgfqj2OA6cAZgb1mr/dcdiyH7qE+Msm9ws2s4pZt9dyZ7lnPOBMg7Mtj8d2So++/IqHAMJ2dUzFHIUsozZNfJjqqBaUzMwHrcO9wfFyIbvrcaeKTcBf+sbxcJ07pb1hVVgumPYgTrwDlwveXtyDBkj/OqBdmbR24NmfnuZo/NY8+qlHNqBiP0O81/Rwab/46MCudhJGNXDvOx7MxSuJfIyDsNsp9873+TWBf7J70eKcMAmJUAPKAN4R915YJiOhdWbVWF6Y9iBetA/vobsPd6r/I1ZL/uY8jubvY1mc7am+OMdRk+Bj3/HRls/jsyTv78c45OVkaeIfu+bIb8QoD9wecf4OoM9+zu91bDb7FB33cXWRUQ/6irFyHbObLylQ1ws8QM1W9iODcVAXvL0YLsK+plhOzNK8IjH7JH1KH4T1bdzR6bVw1ksOYwZhjAxvvvyDjNf0dGhWMYyRjJYf2MZc9RyMrIy4Dsive+2dkR+QAPZF/cdNX/0lQG+N7q+C1WuHsSiH9W78XLeg6t12IAtExSgOo3MZy7jPUyYP/ka8je58Fxg6d8w63SWZ6Vyf4wzgwzqoAmxgwfQ33/HRnq/s5Ym1ExRyErI28UurqLXSf7pkMA+7P6gfunECYAD3AD3JNB/DtFXPzM55xFDLBVVUpQHeA61wbsM/6rfHlHxTS/epC9+eTWoXrD72722DzmB3/m42E8FkZgxnYMFc6D/fMzKlxHIxkjOZU+x3dnKdxDzOQZsq/Pnc2OyD8UsD9rDrh/KyIbHqt0wT/q04nMXk8FMQFGS09qQP0mtus+ax0N2L/5cnexNz8OqH334agnu6P+pJExM7lVu96YIQMjlKuSwQjl7B/rvyND3X9HBuPf2kjGrhxD9rmsjDwlyK7aSc7wrDEL2QsA9mfFAvdnsQN4QAPC32QYb1lzUgXqN7GBdaA+XG9ZmoC9eXNDdrYu9uZjyB6Rr9ZNpPLQyvzwv+uYWI/n1Aw2/x0Z9tfy35HBeJ2yZozkMH8njWZVvX+4aTdkV7vvnM2dzY7IN2B/9vhDa4H7nwVwAXhAH8Lf9O0CYVhry1opdZj+KEawDuSusQH7VW9uwN78DNlX1qHaXWPIHpvHnsWac2oGIzCzv5b/jgw2/x0ZjOdhJMM54zkKWYBWF3vbdz/Yn8nNzgYM2P/0+Kqf/22UAZkZATzwfuEUQfxNVy4olvW3rEdVAunPMlh/l787b/3xGrIbsn/20O1wUYPsM7lVH5SZs1jByakZbP47Muxf239HBuPfMmuGc3SyAC3IrnbPyZDNMIed6blvoJZ7h/vjzllgmRXA31SlG/6drl6EbOfF0lM20N0pVqh+U/a5qAjXW85Kb37A3jwN2T97nAnZZ7KrQ/bRPPYs1pxTMxjBnP1r++/IYPPfkcF4HkYynKOTBXhUDHPubDZQp4ud4V8N491ImWdjFgAP8MHeit3w39R78bKdMytW2cCWQexQ/SaGc2XAPuK95hhO6GJvXobsDPkeGcORxwwbWHNOzVD335Fh/9r+OzIYQT5rRsUchSzgDMjuLvYxGbC/8vlfLddmuLMAeODPhWSFuZ9OVmUY/0ozFz/r+a0kBgirIBWoDvCc04w1M2D/5LvC05D9u4chu0quAmhnz2LNOTWjAmC0v/13Z7D5s2aM5LB+R+zMUcgCdCC72r1mZi5gwB5dR/N5W8vYS1OZATzAD2lP7Iof1Q54xni9sADTk6QE1G9iuk6y1q8CYG/+huzzXobsUTWc9ACiANlH85iBA2vOqRls/jsy7G//nf7OWJvBnqOQ5Xns6zJnciOysyF79nNNZB3N53ItY8D9z8DfygbHihAe+H4BZK9rVTFBS2udFIH6TWzXaHW43rJW+58J2JtfpJche1QNmZ301R+4FKB+JbjhjHUZ9rd/Jf8dGRX+jkcyRnIqfQ+pZKl0sbd9dTIzcwED9sg6ms9wLTHA/VlsAB7QhfCP8pgay3otZZj+KDawDuSurQH7Ve8Vnu5iv+aTfzNoyM6byf6AfnrOqRk+htr+jGDU/s6IzGH+HlLIMmRfk5mZC8xBdoYxMQzPVHefsOfNNcD9RdAvscDhdwupBuKB6xcXy9pb1hVVAemPYoTqQP5aV4LrLeNcwN78Ir0M2aNrOOlBxJA9L6sKEGLMYIRa9rf/yf47Mhg/i0YyRnJYv4N2Z43em6hAdqWmipnM2VxAv4ud4Xkqqo7m81J7gPuzXhXDBIIrgfhnGcxbmcqGurvECtVvyj4Pu9fHgP2VZ3y9huxr6zBk581UgPrMMKAKrGHMsL/9T/bfkXHiMYxkjOQwfwftzDJkj82byczMNWDneJ66+1zSzx+BWVCZtQv+UZVB/LN6L2bG82WtVTa0zRY7VAc4zlFFuN5ytAB7863fxd68Ijxq3BSe9jCi8LDHnnV6DmOGj8H+O/0ZoSjT+uzw35HB+rnN+t0wmrM7awTKqgB2tczZXAN2jmepu8+Q/uxwZwXwAC/U/XQSK8L4V4r4Y2A9v9XEAGEVpADUb2I5pxlrpt693vxX+fIC9uZXq4u9eeTXoQa7s3KrjoxhBwKMUOjUDPvb3/48/jsyWL+HWL9/duaMZrmLvUbmTZlz2A3YY+toPv98HynDAuCB9wfODGoN468rCxruvn5Y4OjJUoLpNzFdN1nrV6F7vfmv8j1jTEzzivLJvynLHldjyM6Vx57FCI5HMkZyGDPsb3/76/rvyDj585Q9ZzRrJ2RX6ihXywQM2AsC9uf/qn+G+6sDygbHSt3wj7pycWSv7QliApnWvBRh+k2M16LhekTGKl8D9n4fjq56Q/Z9uYbs81msOadm+Bhy/ZVrt3++f5UM1u+FijlA7VExSoB9NvdkwM4C15tPhMelWmJemqoC4QENEP8oQ3nL0oboz2KE6kDuGu9cE9Xu9ebNDdibX03IznCTaMi+PlMhjxkKsOYwZlQAaPaP81eu/UT/HRkVjmEkwzlNlbvYTxoTY8CeW0NUHc1nqJYY4P5KTKNoHvVpwdVg/E1XTz7LObAsoBZEfxYrVAfOAestT7d7vXkbsI/5yHU/LKvBkL1GHjMUYM05NcP+9t/hfaL/jowKxzCSMZLD+t0zmgO4iz0yLyvTgD23BqY6AOC/K4H7s94VzQSBq3TFv1PvhcN0bixuVYbnr8QM1G/KPieG6yP+ZwH25hflU+cGLQt0Z2ZXf/BjBgO7slhBB+NxsB2D/e0/6s9W+44MNv+TM9hzAH7IrnKflZU5A9iBOciuDtizn5l++8zX8uZa+PlfkVlQmXEczbMqdsVf0cyFx3YOrc/KhrNsUgDqAM95qwjXW85K7zXHYMDe6xPhoX3DasjOk8eexQi3qmSw+a/OUF8f++d478pQ9z85gz3Ho2Ly82YygbwuduXnhYj8iBqi6gAuXwf3Dvfn4jNBsgKEv+nbSa8M5D9pFUxivQ5WiwWsVpAKTH8Uy/nPWLsKcL35awD25sk3JqZ5cdyoNR/tm1ZDdp489ixGOD2SMZJzIkSzf463/e0/639yBnuOu9hj8rIyDdhz8iNqiKpj4hp4P1Lm1cGxQXiAH8AayMeKBTxanFKE6TcxXdtZ67hzDQzYHz3rAvbmE+GRf9OY8YCSma3yAMg8xqVaDmMG4w8S9s/zV67d/nr+OzJYvw9GciqOiql8z5ORBxiwq+ZH1HDT7Kigm/7BP30z3Jm64G9SBfE3XbmwGNbZsjKlDNIfxQTVbzJcj8o4E7A3v0ivfLB999G+cTRkr5FXCXbsyjkRctm/rr9y7faP99+Rwfp9wPqdA9QdFaNyfzWTacCumR9RAxAK11/913MvTWXrgn+UOoh/1NWLkWXtLeuKqkD0RzEC9Zsy13v3uqh2rzfvFZ61AXvzifDIh/2njYqZyVV5CGQGBJVyGDPY/HdkGCTH+CvXbv94/x0ZjJ+hIxk7czwqRjMP2PsDyaOy7tNnsxnyI2oAlgP2Z80B91dihvDA55OkCOMf1XsRM50XS1sV4fk7Gap/VjW43jIM2OO8uMbXGLIbsrPkMWexAg/GDMbzoeyvXDubv3LtJ/rvyGD8DB3J2JlTcVSMyr3VTKZiF3s24FbPvykCsE9cA/HA/ZXeLTYb8K0M419p9I+A7bxZMWKAtdlihumPYjhXGWtluP7O14C9z0e/Q8OQvUYee1YVsMJ4HGzHcJK/cu29/sq1n+i/I4Px820kYySHGbAD+9ZtNEspz4B9b/ZsflQNm7vXv+pv4Ad/4580mMzeDf+obxdARSD/TtHQiPWcs4oBtipKBabfxHKeq4L1lrP22E4E7M2PZ0xM89G/iczMN2SPzWPPOhWsMK6V/eP8lWtf7a9c+4n+OzJO/75xF/tcVkbeaYBd/bkkogYgvXv9l96syc+//+OfIUwQHuCHsgby42IBi5au1GD6TUzXftYaGq5/842v24D9ikd+HYpdMhkPDwoPg+xZjAC8Sob9dfyVa+/1V67d/s5YkcMM2Xfe5yjcU92UMYfdgD0nH5CA66/0fqQME4QHPh8UO4wHrl9kBvOW1aQK0R/FBNRvylzXKnC9ZazyPQewN68IDw7gr3wzq/YAofBAyJ7F2lnICHDYYJr94/yVa+/1V67d/hwZlb5rmAF7289ZzzJg35PNkC8K11+pb4Y7G4S/SR3GP6rn4mRYe8u6qgoA/VmMQB3IX+ud66IM15u3AfuYj7vYZ/OVRsXsznTWvoyRHMYM++v4K9fe669cu/3z/VkzRnOYIbsC9FYZE3MaYFd+FrmJBbAveE6df2nquwVmgcGVYPyzRi5ulvNiaSob5O4WK1C/Kft87F4fw/VXntF+fHC9eXHcRCnf1Ko9RKg8EDI/WFfKYfyxgO0Y7J/jvdpfufbV/v5ciPevlFENsCtkzeQZsK/Pnc2OyC8M119pHrj3BLPB3m+LrA7kX2n2D4TtHFrXlQ1n2cQO0wGec1YRrLecld5rjsGAvcejRh2KN/QK0Ht3XiX4vSvHMM3+u7zZ/F27/XdmMH52jmQA9SC7AfufygDsquNhsuE6MA/YSUbD/OvRXcs64P5KnwpkBLlXTkpFKP9JO0AY47WwUixQtYoUQPqjWM5/1rpVgOvNn797vXkasH/30e0cycpWGRUzmseexZpzaob94/xde46/cu0n+u/IYPyhADBgV8kCDNhX585mR+RX6l4Pemb++VVMJjxmH03zToby8WIBkBaP1CD6o9iu5+pgvWWt9tfoXm+edQF784nw0L65dRe7fh7zg3wFULQjw/5x/q49x1+5dvvH+7NmAP1QjxmwV84yYF+bm51tuP7K5w/9fNsgHRardcW/Us9FkL3elrVDygD9WWxAHchd30pgvWWcCdebX6RXHcAeUYe72NdmKjyIGrJzwagdGfbP8V7tz1Q707rYX89/R4a72GveawD7Abtaw0lm7mw2wDEahuU5sPlc0veRMu+MGMBwBRj/rN6LiOE8WGerEjx/JUagftMpYL3l7chYd0wG7KM+ER75N3iqHSyG7HFZzB3zrMCA8TjYjkHZ37XH+DOtS6+/cu07/HdkMB4DUKuL3YD9t9zBzp0LcHSvszwDNp9hjc9wZwbxwPfFVQXyzxo9+SznyeJQdWj+Tsww/absc5OxRobrrzzj6zVgX1eH8pgapYeZauB7NGcki/V4GI+D7RiY/F17jr9rt/+oP2sGaxc7O/jefc+26zzdpHRPqpoLuHs9uo7m8w+w4qWp7CD+plOA/DtFXUhs5/UUZUNYBSnAdIDnXFYF6y1HC643X97u9ebHA9ibj34XxUk3+Apd7KN5zA/ZjHC6SsZJ/q49x5+pdqZ1sX+8/64Md7Fzw3xAZ0zM7vvDmczZ3Nlsd6/H1tF83tYSD9zfF/FarMD2ygmsDuWviAUW9ujxmlOs/2SpQPRnMV1nlcF6yzJcb56RXnH1sQD25pFbg7vY12Yasu/LOTXjJH/XHuPPtC69/sq12z/efySjUhd7xXsMwIB9RWZmLpDfvX4QXH+lH/yNf1LB8acDZ4XxN11dbIN5LjHBT0sXoj+K7ZrKXNMKXev3jFW+Buz9Phw3Wu5i35Or0PXFCr935TBmMI5WUPZ37TH+Xpccf+Xad2W4i71OzmiWAXt8ZmYuoA/Ymf4VdUAtPx+NskGxMox/VO+Jyl53y5pRBYD+LDagDuSvc5Wu9ea/0psbrjc/A/ZVdSh2sc/kqjxIsT8Asz7UM2YYsud5r/Znqp1pXXr9Xbv9d2ac3sXOfn9hwB6fmZmrDtebR41nvrvPL30eKfMqlAUGf1tUJSD/rJGTzXJerBrKhrk7xQjTb8o+D7vXRrlrvXnzw/XmacC+qo7TAPtMblXIztwxf2qG/XX8mWr3usT4K9du/2uq0sVe8f7CgL1G5k0zgN1wPbaO5vNV/TPcPxXHBH0rA/lXirhomM6fNaZsQMskZpD+KIZzVhGst5yV3muO4YTu9eZV44bLgH19pkIe88M2K6Rgg0b2j/Nnqt3rEuPv2nX8d2Ts6GKv9B25M8eAPTZvJnM2V717vcqz3t1nSLEvTVWB8cD1BasG5j9pN/hjuyaixQBSq0kFoj+K5TrIWLsKYL35nwfXm1+UT+qNzpOH9s3nKZ087F1mO7MYYUuVDPvneK/2V/Ve7c9UO9O62P+aGLvYK30Pj+YYsMfmzWTO5ip3r1eC68HPv7HA/ULgSzGDV4P5dWIBkVaeFAH6o9iu4az13LkOhuuPnnW715tPhIf2Dai72HnymB+4WXPcKZ/r79pj/E9ZF6Y17/VXrn2H/0iGATtnDrBvTj6w97iUGjxmMgHDdYZnvKg6ms/bWn7wN/Jh8bfFYgbyN/WerOw1t6xoqcPzZ7HB9Jsy19lg/ar3Ck93r1/z0b4JPQWwj2YqAP1KD/eMGfbX8Weq/ZR18Zrbf9S/ypiYSt/BwN4OdoXmh5m8mczZ3CzAnv1cw1JDRB3No7uWn6/hDGC4ApB/1swJZzgnVk1Vg+bvxArTgfxzsHttdhyv4Xqkl/RNT3gdpwH2mVyFBznmh27WHMYM++d4r/Znql3Vu9efac17/ZVr3+EPrO9iP/l7azTHgD0/byYTOLt7PTs/ooa7T8gz7/eRMu8KZoK+VxZDEcq/U8RFxHT+rDllA1oWMUP0RzGcr4y1Mlh/51sfrjevCI/8mzAD9vWZCnmsEGEkhzGDEbgw+bv2GH9V717/U9bc/t916pgY1u9fwICdIW8m03A9Lz+ihrvPfC1vroXxGe7sXfHPurqIlcD8J2VAP8brIlIMILWaVCD6o1iug6pQveWsPbYT4Xrz4+lebz75N2LKN6NK/1R318PwzqxKOYwZ9s/zZ6pd1bvXX9W7159pzSv4M46JYfw+GckYzakK2Kvfc9506miY7PyIGqLqALqugzUvTf22GMzgtecknALno8QCIq08KQL0R7Fdw1nruXMdDNcfPSO9uLrXm0/+zZjyDekJDzu7Hoh3ZjF2mFfJOMnfte/37vVX9e71Z1rzXn/l2m9iGxNz8vci4BecZufNZBqu5+RH1BBVx8w18K/WAPdvUgbyj+o9iQb0ViWpw/NnscF0IH+NDdavehuu9/vUuBlTBOwzuSoPWKwP+NVyGMEL2zGoQtNef1XvXn9V717/U9a8gj8bYG/7nJkB8HewK9yPZeQBmoBd+TkmqoaoOgLgOoA/roWfP4pjgMJVgPyzRi8EhnNi1VM2zN0lRpD+qOzzsHt9dhyvUtd68432M2BfVYcBO2cmexZrDmOG/fP8mWq397y/qvdqf+XagRpz2Ct9Jxqwa+YZruvlR9Rw0yK4/kp/dri/Owgm6HvlZKlC+VeKurCYzqE1pmw4yyR2kH4TwznLWCtlsN68DdfHfWJqYrgxOw2wz+QqPGSxPuQz5zBm2D/Hu9df1bvX397z/kznk9GfrYudFX4zd7FXBOxKzRynAXbD9aaNYP2dro+U+XbAbDD36kmuBOa/KQP8sV0XEWIAqFWlAtFvYroWqkL1lrPa/zy43vyifAzYI/LVbsoVAPtoHvODPiO0YMxQ9meq/ZR1sfe8/ynXyg5/NsDe9jkzAzBgz8iayQP2/suDm1S71xn+FXIRuP5L/65J3Az3T4vEDF17Lo6T4HyUmICktV9qAP1ZbNdv1nruXAfD9ZsfJ1xvXvk3Zix1nATYZ3LZH+x2ZbGCfMYM+8f5M9Vu73l/Ve9ef6Y13+HPBtkrfI+MZAD1ALtCo8NMXkb3uuF6bg0RcD0YrL/TnpemqnXHv1PvxWVAb1WROjh/FhtIvylznStB9ZahAdbvvnH1ssH15hPhoX2TmJWtAthH89izWHMYfyxgOwYmf6ba7T3vb+95f6bzOeKvDth3ZBiw898XZeSd1L2eDbez8wEpuP5Ke4D7N105ESpQ/lGzF6iBvRWharD8k1hB+k3Z52L3+iiD9ea9wrN253rziVH2TeJsDQbsXHms4HtXDmMGG+Bhg0eq4FTVu9ff3nu9V/sbsJ+TYcCukQWc1b2eDbez88XB+huff34+mjEB36snUBHMv1M07GE6n9ZrZQNZRrFD9EcxnL+M9VIH681/hScvXG9+PDcl6t3rs/knjIjZnceeVQVcKAM2Nn+m2lW9e/3tvde715/pWhnx7wFIFeawM36HAAbsozmjWTN5u7vXFe/ds7Nn8wEeuL6hSexzh/u3AhgB7olg/qoYYOA3rbimFI77JCkB9EcxXUdVoXrL0QPrzddwvc8n/ybJgH19pkJepQdkxgz75/mrevf629veLP7qXeynAnZmuL4zqzpcB/Yf40ymevZNs3CdsGu9Z/O5kTKKQP6mkYvnREi/W0xQ0/ouVXj+KMZrLmtdd66FwfqjZ7Sf9I3JG4/8OgzYa+QxP7gywgtD/Fh/ptrtbe8V/qdc44ABO0tGJcDOfj80mgW4e505dzYbOKprvUdrZ7hfOVhmKP+s0UU3qLdYVQGYvxMjSAfy17wSVG8ZK70N1/t9OOpRnmNowB6bx/yQzAov1CGPsj9T7fa2d7Z3r78Be67/jgyPh9l772W4Xi93Nhvg6FoXeXbNf2nq1YVSAvPPijqJBvcWkA9sM8UK0R+VfX52r9Gu4zVYj6sxsjZ3r2tmG7DnZLE+jDMeB9MxMNV+yrrY294r/A3Yc/1HMgzY+e+FPBqGN3c2u1LXelJT2M/HYCbA23NgynD+k3aBLabzrqJsyKoqBYB+E9M5zlg39W715r/mGAzXezzy68juClF6SDBgn89iBBiMGcr+rp3bW3W9VSE403r3+huwx/qPZOwA7MzQ293rf0rpvlk1F6jTtc7yr63/1beXpn4ulhXM9i5OVUA/KiawaOlICZ4/i+2az1rLCt3qzV8DrDdPw3X2OhRvnA3Y87IYu793ZNg/zl/Vu9efpW571/Hu9Tdgj/UfyTBg574PMlyvlwsYrkfXAQD/fV3L7EtTvx8gK5R/1MgiG9JbFaUMzV+JDaQDuWu8cz12HOepYL35RXrxdAKc2r0+m10ZsI/mVXpYZsxQ75JnWx+W2u19pvcp1zfQB5oM2OMzTn/BKXMOsG+ED5Bzz6x2nz6bC8zBdYP1P/UGrL/T+hnuVxdHAcw/avaEGdhbkaoGyj+JEaI/Kvtc7F4fZbDevFd4Gq5f88i/eVKcqaj0gGLAvi+HMeMkf6ba7T3nrXqd2Pu1VgJepr97Rn+gDmBnvl8YzarevX4aXM/uWq/ybHhTJ1h/p/yXpt5UFcy/0ypIZZDPq2wQyyp2gP4ohnOYsV7qUL35r/CMr9lwfV0dp3Wvz+QqPBSxgu9dOYwZ9s/x7vU/wVv1XNp73tuAPc8f4ATsrN/jO3MAje51w/VrMlyPqyMIrL9z+flYJCO87TkxVeB8jxiAoHW2lAD6TUx/N1nrt2sNFMF68+XtWm9+PHC9+eTfRBmwr8+sCNhHsliPh/E4WIBsr/dqf6baWbxV15vFm+U89nr3+jPNYWdalxH/kQwDds6cnd3ru+8h1TJnc9XhOsMzIbAcrL/Tt5emfhcjlL9p5OSeCOkt65UUwfkrMcF0IHddd66FwfqjZ7Sf4Xp0HaqjaQzY87IYAQBjxkn+9rZ3tDcLvGfyZgLsbXuOdRnxH8lgW/+2D99330jGaA7g7vXIvJnM2dxMuJ79LBZVQxJYf6f5kTLqUP5ZMxeJYb3FpCrA/JXYIPqjste9ElRvGat819TOOBKmeUX56MP1iBrUbuIVHo7Ysxi7v3dk2D/O39683iyA3d6v5Redxnj3+gM1AHul+wSgdve64fo1uWu9KQKuRz1tv1mTPTPcry6mEph/pSg4YnB/prJBbbaYIfpNDOdo9zrtOuaVx3VS13rzivLh6KTPvqnLgvtK/9TWgH1fDiNoYDoG136uNxPY9A8Dc94G7Hn+Bux8Oe5ez8+byQTOhussYB2Igeud68Hz0lSg72Sow/lPyoB6hvxNDEC1ghTg+bNYzn3G2lWA6s2fH6w3T8P1zx66N5ZZN/IKDyzsWaw5jKCB7RiYQCGLd68/S90s3ixrrerNBHmZPk8Y/Q3YeTIAd68z5M1kArpwPbvBCZAG6+/0c8mIEcaOnMzKkH5WLLDR4pIiOH8U43WdtaaG6le8o/1iazVcj68hM1/pAcKAfV8OY8ZJ/ky123vOm+U6YVkPJm8D9hj/1XAd4PsXBKwZozns3euG6++lPG89+/kL4BkHE/Vc/OZ6uNbhrgrlnzVzYRjWW4pSB+bvxAjSgdz13rkmO45z1fGwd6w3v0ivOnA9og53r9fIY354ZgQBykCJzZ+pdhZv1fX2Wu/zNmDf7w2s714H+NZ/JGNXDnv3euX7xtnMU7vWDdb/VOe1EDdSpucAFOD8syIBiOG99U5VAfk3sQL0mxjOy+41UobqzXuVr+H6NY9z4fpMtkr3+mgeexYj/N6RYf84/xO8VQGk13qf9ymAne2zyuNheDKAfd3r7PdWo1kzeTOZWXBdvWu9Elif/dcLD8qZ4V4dzn/TTnhnuD8uBsiqKHZ4/iimc5yxbupQvfmv8jVYv+6jDddn85W613dnsmexPqQzASX723ulNwtQ9nrMeasCdqbPkhF/NsDO+H23K8NwfS4rIw/QhOsG601MYP1v/N93/9PPHwvOBmhHFrIipB8VE1C0dKQEzV+J7brPWs9d62Co/ugZ7We4Hl2DGlyfyVV4WGLOMmC3v71jvVXhqdf6twzYc/zVAXul721mwG64/lo7R/rcpPyvdRnAetIYmLf6ANbf6c8O9ysnhg3KP2v2xBjYW+pSB+avxAbRb8pc651rsuM4DdajvCI8OEB/dheHSid5RiZ7VqUcNujTm8FWP0vtTOvCUjcLPPV6/JYB+7z/aXC9UkY1uK6QNZPnrvU+Gaz/1gBUf6t/8H/HRsr0nFB2OP9KkeDD8N76popw/IpYAfpNDOdl9xopQ/XmbbA+5pPftR5RhyJcn8mt3I3ECr5Hchgz7J/j3evP4s2y3l6Pcd9ebwP2vd6APmBn/K4byTBc3581k5cB11XBOjAP1xnAemK3+kv989ln/Qz33otCEdB/0k5oZ7g/LwbIqih2eP4olnOcsWa7jl0NqjffaD8+sN68DNdn89VG0lR9QDNgt3+G/wneJwBlr8dvsQD2U/7WgfMA+8nd6xUbFkazZvLURsIoj4OpAtY3QfVPynlp6ieNXFzVIP2oWECipSklaP4stms/ay13rsPqYzwRrDe/KJ+Yuk6H67P5Kg8x7A+DO7MM2OMz2OpnqZ3FmwUor/SuXnOvdw/IWAkXT/lbB3jWvG3P9R2xK6Na97rh+p86Da4brN/qSAfrv/RQz8+lRWLvnJ59GDewt9SlDMvfiQ2i35S51rvXRBWqN+8VnnW71ptPhIfh+s5chQcn5izWB1zG42ACV6q1n+B9AqxWrJkF9rJcH73evf7uXufIWH0eAG64rpAF7Ifravf6N83AdYP131oA1t/pWod7zwKzw/lXigIfBvfWVVUE5N/ECtAflX1eqgH1lqEF1ZsvZ9d686oD1yPqOGk0zEwuexcUK/jelcMGT1ZnMK0P09qz1M1yLquvBwvwNWCf9zdgj/UfyWDtXq94LzWaBRiuX5F61zoLWN8I1T8pfqRM7wlSBPTvtBPWGe7HKhu0qkkBnj+K5fxmrJs6VG/+q3wN1q/7aMP12Xx3r8fmsYLvXTkVugdVwViv/wnehtXj3izn8IQZ7EzeBuy5/oC719nv2YAz4PrJXesG65d88me4j5zoSpB+VCwA0dKWGjh/FNvfQNZa7lwHd6vfPKP9DNeja1CD6zO5Vf+ZcaUHXQP2OO/V/id4G1bv8V3pbcC+15tlvdv2XJ+vOzJYX2zKes+RkaUC10/sWjdYj62j0+vn5YXD3j09c9EY1ltVpAzL34kNot+UudZVgPo9Y5XvOWC9eUV4cIB+w/X1mQp5rA+7p2awQNle715/e895u+Y93izAl2U9VnuzrHfbnueze4c/4O515hzAcP2bTu5arwbWA3xed7j3XGDscP5ZkcDD8N7qUUVA/k2sAP0mhnOye42UoXrzNlgf88nvWo+oIxPuG7BrZp0Iv3dkGLCf6e2a93izAF+W9VjtzTLzvm1/lr+713lzgPpw3WB9NL8OWI/sev/vn17zI2V6L1I1QP9JGbDOkH9eDJBVTezg/Fks5zhj3XYduxpUb77RfnxgvXkZrs/mG67H5lV7CGUDHKsz2AANS+0neLvmPd4swJflh7SV3kzz15U/W0czGLvXK91vjOYAhuuflAXX1cE6C1SP9HkB1T9p/wz30Qu9EqifEQtItPSkBs2fxXbtZ61nBaDe/DWgevOsC9abT34dhuucmexZbGB6V4b9z/ZmAZ2K68Gyzgbs+7xZ/rVA2173s3XEv0r3eqV7mpt2nJublEbCGKyPiQWsJ0H1T/r538ll75yOgBSG9pai1EH5O7EB9Jsy13vnmuw4zpXHY7A+4hPhYbi+M9fd6zVz1CGKKnjr9WfxZllvxfVgWedVgN3Xxp9i+TGjbc9xLnf4A1z/euC+D1/GzhygNlzP6Fo3WM+tIcojEKp/Wpd7h3vvyWcH9K+0AtAY4ltAXSh+Razg/FEM5ydjnQzVX3nG1lsVrDcf3ZEws/mG67F5zA+jbJBjRwaTP1PtLHWzeLvmMV+AA7Cz/K2s9GYCvEyfqzv8mdb+vg/fPcBIxmiOR8K8lmLXujpYZ4HqQBxYH1yT8ZEyIxeQIqT/JgaQd9Mp8J9pzStJAZw/iuk6qArTW87aY1t1HCeA9eYV4aHdtT6brwTXRzMN1/flMD68s4DT1f4neLOATtd8F8vIEsXrudebZa3b9hzeO/xPnbvOep8B7IXrBuufZbA+J6Zu9YgOfuDXMe2d4T5zMVaE9dFiApDWXqnB8ndiu4Yz17UKUG8Zq3zjazdY56/DcH1NJntWlYdrNpDC5M9UO0vdLN4Mviu9GcbDtDp6ts1fi9Xeimvd693rf+JoGMbv5pGM0RzD9T+lNg4mE6wbqv/WAqj+ST//u8jZu6MjwYXhvcWgKpD8ndjg+aOy13732qh2qTdvbqje/LjAevPJh+vKI2mqw/XRPOYHUsbu9R0Zyv72nvNmgXoMviu9GcbDtO29zo9SXOte7x3+J8L1kRxmuL5zJIwKWAfG4brBep6HKFT/pMcZ7n1iB/SftBo+GehrKxvGsokZnD+K5bxVg+n3nJXe/FC9eRqsr6pDEa7P5FaF6zuzGB/iGTNYwNtqfxZvFrCnuB4MvgAHYK9+/gB3r+/2BvjgetuHZ/1HM0ZzDNd/S6lrfQasq4+BYelWJ4LqAN4e08wM9zEpg/qrYgF/lnWTCjR/FtPfUtYaVgDqzX/NcTB3qze/KJ8aYH22DsVxNApwfTSPFa6P5DBmnOTPVDtL3a55vS+gB9hZYLLiOrftObx3+LMB9grfyyMZwD64zn4PCJzVta7csc7QrS4C1T9p7wx3YP6B/wRgb1k3qYLyd2IC6DdlrvHO9dhxnCpQvXnydas3r5i6GOC68kgaBdi9O48569QMZX97z3m75jFfxa5qRcCuuM693r3+huux/qwZgOH6o3Z3rRus79+fBawnQvW3+hv/9+d/F6XKGJRo8GKAb0WqGiD/JEZ4/qjsc7F7fZSBevNe4cnZrd686oD1iDqyRsLMZKsAffYs1hzGB3mmY2CqncWbZb0ZfFd6M4BfxbVQ7F5vtfRsq+nd688G19s+POuzM4MZrlcfCaM2DiYTrDOMgSkM1T/9z48z3PsuWBVA/007oZzh/l5lA1cVsYPzR7Gc04w123XsalC9+RqsX/cxXN+d6+71+SzD79r+9p7zZvBd6a0G2KtfFwDHOrft63sDfICd7ftmV4bhepMKWAfG4fqpYL1St/pGqP5JMzPcxy78KqB+RCyw0KolJWD+Smx/F1nrWQGoN/9VvvF1M4L15hXhoQ3WM/Orw/XRPFa4PpLDmHGS/wneLHCPwXelNwP4rX5drIS9itdcr3ev/2lwfUeG4fp4lgpcV+xaPx2ss3WrR9XztC4ZM9znIcDJ0N6qI3VQ/k5sAP2mzPXeuSY7jvNUqN78uMB68zFcz+iaN1zPyTo1g8mfqXYWb5b1VvPt9a4M2FmuZcXxMMqfSacBdsbv42pwXWEkzEld68pgvVq3+iKo/kk/+Bt6o05WASWDfOtZVaH4FbGC80cxnJ/d66QM1Jv3mvoN1q965NeR0UGumOvRMPtyKgADFrDX63+CN8u5VPNV7KxW8wU4fsRo29f3NlyP9R/JMFxvGoHrKl3rBuv9YuhWF4bqn9Q63Ef/UNVA/TcxwLtHnfQDANvaV5ECNH8Wy7WQtXa7jt9QPb5Og/XYGtTg+u5Mw/WxHMYMZX+m2lnqds1jvtXBL4Ov4o8YK71Xf5ay/GuBtj2X/44Mw3WNrvWMcTAG62NiAetkUP1TPXMjZWYfpqsB+2ixgEcrR4qw/FmM13DmulaA6fcMDajePPm61ZtXDbAeUYfh+ppM9qwKD/Q7MliAYa93rz+LN8t6M/iu9GYA7F7ju6r/iLHam6l7nek7ZlcGK1zfeR/mrvU/pQjWDdXj6ogC6sBwPftnuD8qGqwY4FtZqgDHv4kRnt+Uvf6710YZqDfvFZ51u9WbT4yUu9Zn81Xg+s7O9dE8d6/b/1RvFsCn5qvYWa3mW32NmbyZ4HrbnmNddmXsgOvM90aVu9YN1q8rG6xXg+pRXfNPXj9//BEpjzHZDd0M+DmVDV+ZxQzNn8VwHjPWa9dxqwH15ssJ1ZuXwTpLfka3/G6Yb7h+bgYTWFEE1Su9XfNdDJ3VDOuw0pthjdv2tdcZ8GiYTH/Dde6udaU564pg3VC9iQ2qd/j82eE++5CsDOx7xQAErXOkBMufxfa3krWWO9dBEag37/pQvXlF+eSC9dkaFEfRGK5r5DBmKPuf4M0C+Bh8e70Z4K/aGjOsb6ujZ9v8tVjtzbLWbfuz/Ff/ywGA84f0m3bB9epd6wbr/ZoF64dD9U+KHykTBSROAvdWTSkD8k9ig+c3Za93FZh+z1jlG197dK1sYD2iHoY61LrWZ3IN1/lz1MEEk/8J3q75LsP19d5qa7zSm+WHIoDnXwpU8GeE6+5abzJYf6/MMTCG6nF1RPoAH9c2d4b7J60GSgb6ZysbzmaLFZo/i+E87V6rXces1KXePKP9uKB68zFYn83fDbozMitB7105jBksMKvXu9f/BG81315vNfir5suwvm37/LVguY4BnrWu4G+43qddXesG6++l2q3OMAKmIlSfWNefSx8cFWeVM4C8XrH9SKC4hlWkAsyfxXTNZKxhBZje/NccxwlQvXkZrM/mq3XLG67vy2HMUPY/wbs6mFSDvwzrwAJ+K6/xam+W0TDKn/8j/mwvlG37rM9gBuvAGFxXAeuKY2DUu9UN1d/raW2udbhHgYaK4H6nmGClNSZVUP5KjNdj5vruXA8D9UfP2FqZutWbT4RHLuBXHEWjANZH83ZlsUJ8Njih7H+CN4PvSm/D37W+DOvbts9fC8UfMVodPdvyfD7v8Ddcv66KXevuWP+uzG71KlCdDagDXWuzd6TMCthiiG9FqxIUvyJGcP6o7POxe312HO/KYzoJqjcvg/WoGty1Hp/HnHVqBgso6/Xu9WfxZllvBl8G+Fv93FVeY5a/adXRMEyfzyP+huvX5K71mDyD9T3ZQP4LW6M8gGVd6jPineF+VZkwzrA/XtlwVVHswPxZLOc4Y912HbsaUG++vFC9+RmsR9ZguB6fx57F2CGvDkBU4RCLd/WaDX/X+qqt70pvxdEwTJ/PTJ/9AB9c33Gf4q71JoP198ocA2OofhcbVP8b/99P//PP1IMr20zx3WIBh5au1GD5K7H9HWSt6c51WH2MKlC9eUZ6xdVnsD6fr9Ytzw68d2ZV6CzfkcECfHv9T/Bm8O31ZgCTamvs9V3r2+t9wmgYpu8VwHD9qpjh+s6udYP1zzq9Wz0CqosB9W+a63CPhBOnw3tLQxUA+TuxgfNHZa97JZjeMlZ6G6r3++SD9Yg6DNe58k6G6zsylP1P8GYBZoa/mr5q67vSm2GNAZ7udVVvwHD9qqrB9Z3g2WB9bS6QD9WjPCLAOglU/6X/4v/3+B95RsrsAmoG+/WUDWOZxAzNn8Vw3jLWy0D9nW+0n8H6qjoU57wrZbJCb+YcNvjdm8FU/wneLMCsMgBW8/X69vv2ejOscduew7vX33D9u3bAdYP1+f1GQavB+t78zBE6j2KD6k9A/Zt+/vggqz6XnAHyWdY7KQHzR7H9XVUF6fesld4aQL158kH15mWwPpuv1LU+mumxMGM5jBlM/vae82bwrQwmGa6JyuvL4uvRMHPevf6G69/lrnWD9U/KAuvqUB3g6VRPAuof9Tf+758d7pHQoTq8tyxVQP5JbPD8psy1rgLTm/+6YzFUH/GJ8NAF67P5lUfCjOZVgusjOUwQhM3/BG/FmhkAMMMae301fVlGw7D8Pa/2VofrI/cOjHCdGayPZhmsr8udAcoVoLp4l/pHfVnftSNlVoMjA33rkyrC8KtiheaPYjg/u9dpxzGfDNTvnpFeHN3qzcdgfWeu4Tp/jjL8ZvNn8WYBWwxwkgFMqvl6fft9e70Z1rhtn7/Ovd69/qfBdY+E2QPJDdbXZGZ2q1eB6oJA/Zt4ZriPiAHYfVO1HwUU1lxdCrD8WUzXRcb67Tx+NaDefGNrjq6zUrd6RB0G62szPdt9LEd99AwTxOn1ZwF9ijUzwEm1Nfb68vgyrG/bPn8ter17/Q3XP+v0rvWd42BUwLq71ftlqP6nIn6sAIB/Xh/Xz/CDn18+ek1MINJaI0VA/k6s12vWGlcB6feMVb7cXerNjweqN5+zwfpMvkfQxGWx5qjD9V5/pvVh8WZZbzU4ybAOXl9NX8W564qfbQAXXK8wEoa1a91g/bd2g3VD9TGxQHURoP5N4x3uK+GQYb4VqUpA/IpYofmjss/J7jVShunNmx+oN896UL355NaRma8AulXyGKF0lQwmkKMIqld6q8FJtXXw+mr6VofrK72Z4DrTdxXAORKmWte6wXpcHpAH1tWhuoH6Zz3UxDlSJhvGPcs/AFwT23lTlQIwfxTLec9at13HrwbUm2+0X1ydFee8nwjWZ3IV4DorWN+Vw5jBApJ7vXv9WbwZfAED4NW+V9fXa9vvWx2uM30ms6x1254LrjN2rRusN+3sxM8YA2OoPiYWqE4O1L/p5+OHa7X546NiAYoWp9QA+TsxXufVIXrL2pGhAdSbp6E6ex0G6zx5huvawEIRgK/0ZgFnhutrfZXWl+FvgwX6MlxnK71Z1rnV0qfT4HqlcTAG6zF5QM7LWU+H6kxd6klA/Zs+d7ivAkEG+dZOVQHiV8QIzR+VfS52r48yTG/eKzxj62WD6s0nwiMXrGd2y1cfQcOexQi+d2Qw+TPVzlK34a+mr9d3rW/l9V3prQrXTwPrQJ2udYP1mDzFbnVD9bpA/e/va5MzUiYbut1k8L9WLOdZReyw/Fks5zdj3XYd++pjUwDqzTPSiweqNx/dMTCz+dXB+mgec1YVgK/sf4I3A5wEroMzBoiotr5q8JfB1+u7x9tw/bXctb52nxEoa7Aelze7r6E6F1TfCNS/iXOG+y6xAENLV2qQ/JUY/w6y1nXnWijC9Ls3b5d686sF1ZtHbg1qYH0mlxl4j+aMZFUB+Ibr3N4r19uAcq2v567nry2gt74rvQ3XX0sdru/oWjdYbzJY/6wZsD7XnZ8/T91A/bP++/rYfpZBH79o1GJSBTD+TYzg/Kbs9d+9NjuOVwmoN89ov7gaWUbANJ/cOgzWufKYIb7heq4/izcLuK8MKBl8vb5e2x3einCd7XukB65XGAlTaRzMTvhcHawrQnVgHqxXgepCQP2b1nW4ZwA2Q35uZUNXdjED82exnMuMNVOH6c2fH6g3Ty6o3nzyu9Uj6sgE+7tB9+5M9izWHDZoweR/gjfDaJhWx3VlrwXD+lZd25W+Xt+1vsAZnevuWl+TwQzX2cF6xnx1RbCuDtWZutQJgPpL/XOvq9ZIGRYIaJ0lJVD+Smx/N1nruXMdFIF684324xxNw9Kt3jzOA+szuQpd66N5rHCdMeMkfxZvj4bR9c1eX4Y1qLq2bdu6voDh+iupw/XTu9bZwTqg07FuqD6mil3qi4D6N/0MPcD5ZaOWotTB+CexQfNHZa97JZDeMtYdD3uXevOL8qkD1SPqUAPrM7nsI2F2Zhmua/mf4G1AudbX6+u1ZfLt9TZc/1OnwXXWrnX2Oes7wbq71a/JUP3mIQ3Uv2msw303QDPgr6dsCMsmZmD+LIZzl7Fe6jC9+a/w5B5PY7AeV4PBenwecxZjBttoGyZ4z+Lt0TBcvn6xqdd2pe9Kb8P1P2W4Hrs9UK9r3WD9tQzV82poHvMAmxGoX7g2NEbKMAA+y3olJVD+LLa/q6y13LUOijC9+Z4B1JtXhIc2VJ/NVwP6zMC7Wo7hepw/izfL6Ac1SKkEKNXWQGltWw3XtaKDv8e315thfVsdPduu+xxeCdfVwfpIRu/2ButNCmA9o1s9cwRMNlSvBtQDu9Onrwu0kTKvP6D9AlKrkpTB+CexQfNHZa757nXZcaynAvXmxwXVm4/BulIuO1jfmVUBrp/kr+qtBCgVfbPXl2ENqq5t27auL8P6tu054Lq71mO3B3jHwewC69Xnqyt2q6tDdaaxL2RA/X96cz2/73DfDcsM+M9RVfg9ImZg/iiGc5axVruOe+WxnQbUm1+UTz5Uj6jDYJ0rb1cWK8BXht+r/VW9GQAaA0isDChXrIPX9rZtXV+G9W3br/FmGgmjDtZH9jFY3wfIT+lWN1SfExNQ3wDTr4hnpAwD0LOsK1KB5K/E9neWtZYVQHrzX3Mc7LPemYB684nwyIPqs/lKYH00013rnBnK/qreDACNASRWBpSG61pry+Krtr693h4JE+fPOA6mGliv3q2uCNUN1JuKAfU/9LTOP7++aPxyUquKlKH4N7FB85uy13znuuw41pXHww7Um1+UTx2oPluHYqe8wbpGDmMGk7+qtxpAU/P1+nptV/r6fQzj3qojYdTBOrC+a91gfXw/pREwhupjqgjUF8L0K/rd4Z4BzAz5aysbwrKJFZY/i+G8ZazVruNWg+nNlxOoNy9D9cgaTgDro5nsWRW61ndksECdXn8DtDHfHm8D4HW+Xlut8wXUf5mpR8LE+bONgzFYH4e3Kt3qJ0J1hi71qkA9ak79v/rPf9rHf/5IGQawZ1nvpALI34nt7ytrPSuA9Oa/yje+bkag3rwiPPKh+mwdnuuuncWawwSn1f3duT7mXRlSMgDKbLCstLZK5wswXH8Wy0gYg/Xv2jGixmC9SWkETBZUd5d6UwRQT+5Of6cbTL+ifOBuWbNSh+KfxAbMH5W97jvXZsexrjwedqDe/AzVV9RhsL4mjz3LI2Fy/Zm8lQBlr+8qbwZIybC+2WA5e22z13Wlb/batm3rj4RZ2bWuDtaB9cdgsF6/W91QfUzVgHoSTP+qv/B/ftKg2V/4T0qutUbZ8JVRzLD8WQznL2O9dh23GkxvvtF+XEC9+XCMosmE6rP51cH6aB4rWB/JYcxg8mfyrg7RDNfzYa0BsNb5ArTWttcXWAfX3bX+XmzjYAzW94J1pREwqlDdQP23goB6NEy/sllehzsD4LOsRykB8ldi+5vKXM8KIL35a8D05hlbq6F6bA1ZUD8D5rOD9V1Zp2awdFP3+huuj3uv8FVb32xfJQCstK69vtlr27atPRKGadb6aWAd6K/JYL1+t3oWVFfvUjdQf6+LMP2KPFLG0pI6FP8mNmj+qOy137k2O4515fGcBNSbFwdUbx7uVt+Vq9Ah7651+0d4q0G0ld7ZoJJhfbN9lQCw0rr2+mavbds2H657JEzM9j3gjHHOOitYZ5+vrgLVgXGwrgrVGbrUI4A6G0wHQoH6N6+frRDtr41Z1jplg1dWMcPyZ7Gcw91rtuu41WB6860P1JtXhEdut/psDQbr8Xm7sqoA/JP8DdfHvZVApdoanP6vApTWFdBa215fRbjONBJGvWt9xzgYd6zrgPWMbnVlqM7SpR4A1Rm70yO89na4s0A+y7pJCZK/EtvfVNZ67lyH1ceoAtSbZ6SXoXp0DaeA9dFM5nEwu3IYM5T9FeF6214L0FUFlUprq7SuPb7Z6wrUXVvA89ZnvU+D6wbrButReW3fnJnq2VDdQD3VyyNlLH6pQ/FPYgPmj8pe991rs+N4Vx4TO1BvfjyjX5pPPlSPqEMNrM/ksnet7zou1uNhgt+r/VngequlZ1stQFcVVFZd2+x1XeWbva6A3tpWh+tMI2FOA+vAnnnxBuu3LEP1b1KH6kxjX6IgeCRMn/D7WQqZ/sJ/lnlb+5QNXlnFDMufxXIOM9Zs17GrwfTmG+3H1aXefAzVZ/Org/XRPFboPZLDmMHSpd3rDXBAtLa9FqAzqNRaW6V17fHNXldAb20Z4LrqvHXlrnXGcTAG6/W71U+E6pW61BmBeqTXf/D/evyPazvcWSCfZQFagPyd2P6mstZ05zqsPkYVmN48+YB68+LonM+G+4qd8gbr/DlsGUzgHuCAaG17LUBnUFl3bbPXtcdXaV1bDddluH7z1RwJwwTWgRrjYAzWb1kjx6TTrZ4F1Qt0qZcE6gth+hV5pIzFowpA/JvYgPmjstd/99rsON6Vx8QO1JtflA8HUG8e+R3zat3qM7nsYH1nlrvWc/17uycN18d8K4PKihDY61r3mq3+MlPVrnWD9fjtgT1g3WNgXu1nqN6XPwXVaYA6Y3f6AEz/qP8H/2+gjZRZp78WeltxygatCmIG5c9iOZ8Za7br2FcfmwJMb56RXobqkTUYrMfnsYL1kRzGDKaueBa4zrImBpV9vkpr63XNv2az1xXQ61pv3msg9Sld6wbr1+Ru9bmstt85UN1APc4j0mcRTL+i1SNlLGu/lOD4O7H97WSu6c61UATpd2/O7vS7Hw9Qbz6G6kq57ljfl6MM1kf8FV9mutJbCVS2Gq7rdAicva7Z56rXV2Vde33V4Lq71vfWMuLP9AJZoN4YmJ3d6obq3zUD1Q3UYz1uigTqHTD9q/7C//FIGWuPKkDwq2KD5c/KPhcZ67PjmJVgevOM9our0VCdI393t/popsH6WA5jhupImFZLz7b5gG6ldzZcZ1jfFb7Z69q2rQfXlda1x9cjYcZ8e7c3WI/zrwLWq3arK0H1k7vUWWA4W3d6JEgHLh/fTwp8+wv/2Z55grJBqrLYIfmzmM511trtWgM1kN58V3jWBOrNJ7cORag+k6swdqYSWB/JUQbrAM9ImLa9DqDr9QXqjtjI9lWCwNnr6h+D3LU+6s0yDsZg/bNY56vv6lY3VH+tU7vUZ0G2YfpnRXbeI+ulqUyw0NKSGhj/JMa/g8z13bkeO47zVJje/KJ8OIB689CF6jP5Sh3y1brjT804oWt9pXfVrvW2rY6v0tpmr6vSNZt9XQGG6yO+quNgDNa/63SwrjIC5iCobqD+rw6A6Vf8PFLGmlMlAH5VjKD8WQznJWOdlEF6817lWxuoN68IjzOh+my2wfp8lsfBfNcJXesrvbNBZavhurLXQWlts9c1+1wBNeetG6yPeZ/QtX4aWGedr+5u9d/aDdVVu9QN1H8rCqhHwvRArx8KMPdJf2UXsFjs619FCpD8lZiuj6w13LUGq4/vNJh+96vTpR5Rh6F6nbxTx8HsyPCLTMd9e72rwnVD4JrrqnS9umu9qXrXOtM4GIP176rWrW6o/inz3C51FqBeHKZf8ePvcGcCjtZaqULxT2K9fjPXeveaqIL05h1fe2Wg3nzyofpsHYoz3Q3W9+cwZjB1rQP14TrLDxnZsJJhfVXgeva6Kv0Y5JEwN998UK34ElOD9fcyWN8D1g3VP0q6S312/6rd6YRgnh+4W/tVEXz3ihWUP4vhXGWs1Y7jXn1cCjC9eXKNfWk++lB9tga/KDU+jxWsj+QwwnuWrvW2PUd3eTb8BfIBsKKv4Xq8Z8VRO8AauG6wfpdi1zrTOBiD9WtihurAGFg3VP+uGeBaAagbpof4/VAAO8ualQogfyW2v8Gstazy0lSlGe+s42iYuuZVofpMdvUO+UpgfSTHXeu1vA3X1/kqrW32umZfs9nHD1yH6+5a79/WXetz3iPbr4brq8G6u9WbVMD6IFSX7VJnGPnCBNRZYXqAlzvcrX1ShuKfxAbMH5W95lUg+j1DB6Y3T06g3rw4utSbTx5Un81X65KvCNZHshjB+kiG4fpebyUArOartLbZ65p9zWYfv0fC9PsywHXVrnWD9SsZdTrWK3erZ3Sqn9ylHgHU2brTyWD6FT8D95OUDV/ZxAzKn8Vy7jLWTB2kN/8VntwjaSp1qUfUodgpr5LJnnXaS0wBj4SJ8FYCwGq+Smur4gnojNrp8VWC65XBOrCuZtWudYP17zJYl4HqkqNfsoE6MA/V2UA4m0+A3w8NyLOsb1IC5K/E9rd2wotTlWe9G6j3+hiqK+UarI/lnNa13rbnAOB+mammr9LaZq9rRbiePW+91RB/rVaG64pd60zjYAzW12QwQ3WgPlg/FaozAHU2CE4E06/4ucPdipE6DP8mNlj+rOz1370+yiC9eXPD9OYXV6OhOke+whiY3XmsYH0k5ySw3uvP4r1yzbNh7SpfpX8VkL2u2ddt9vWaDdc9EqaJAa57HMyfOhGsu1u9yVB93b4G6vV9Jv3uL039C/8JLchaq2zAqiJ2UP4opnOasW67jl8NpDffFZ41gXrzya1DEarP5Bqs780ZyVCG66re2fC3bZv/Q0M2BAZ0ZoNnA/vsa9YvM10D1yuD9d46WLrWmcbBVHh5qcG6ofqqTGAOqlcB6mwQXGiW+73DnQn2WWdJCYp/EuPfUOba+oWpV31XeMbWWg2oNw9D9V25zC8vHc1izVEG673+TN7ZoLJtW9dXpcO6xzd7XSv+YOGRMLXhusH6nzJY/7a9oXrL0oDqil3qBuqxHpE+0V6Dfh4pY/2pKgD8qhhB+bMYzsnudaow5/0kmN686gD1iDoM1bnyDNaviwms9/ordq23Onq21QHLPb5KP1xkr2vFHyw8EsZd6701sIyDMVh/ryrz1dlHwBiqv1fm2JdZ4MsCwqvD9IVgfv6lqX9N7n+CGGBpJSkA8ldiug6y1rACRG/+q3y5Z7szAfXmcy5Un8lWGTvDnlUBrANccJ3JWwn+KvpW7LDu8c1eV4+EiV9Td62vrYFhfXu3N1iP82/79B1DtW716lBdsUvdQD3WI9In2mvQb77DnQkiWhxSBeKfxHqdZ6/1znXxi1JfeUZ61Zzp7nnu+zIV8qqAdUC7a52pI95d6+t9VTqse3yz17XiDxbuWs+H2pXBeq+3wfprnToGxlA9dz/VsS8G8mt8mL2e/DxSpqqyQSyDWCH5s1jOVdUXpa4+LnaQfvc0UF9Vx0lQfXdmtc7408D6av9VMA3Qgr8rvRl+vFCB4NnXbvY166713K71ymAdyF9fwGA90t/d6t21bYXq7lJfv2/E/tU8In2ivQb9fmhgn3W2VOD4KzH+DWWtZ6UXpZ76klTGrnl1qK6Y7W71uRyD9Vh/lnEwrZaebbWgfTZcz16D7HWt+IPF6S8yZQC/anCdAawDK9fYYP2TDu9WN1R/o1OBOgsIrw7T3eFupUgZgl8VIyy/iWH9K74o1S9JjfQyUGfIV5nlXrEz3mA917961zrDjxjZELhtWw/Yq/xgUfFFpkrw12C935cFrLdaroPdlWC9wnz1Hd3qhurz+2V0qZ8M1CvCdBGQflUG7ixigKsKYgbkr8R0XjPWzi9J/ebLC9ObXx2gHlGHIlSfyVXojjdY791nXcbq+t21vt43G65nr0H2uiqtaeZIGHetr6lVDaz3bn8CWAf64Lq71Xty9oD1ylBdsUs9G4izAPXqMD0azD94/lABQauO1MD4OzH+fWSu7e71MEh/9o304htDY6iuA9VHM9lHzjCC9bYPF7xnGQcD5HdVr/auCIHbtjpwWeVfA6iMhMl+kWl2Z7W71vt9DdZ/S30MzOnd6obqf8pAPWf/SB+mWoj83OF+iqoA8KtiBOXPYjgn1QD6PWelNz9Mb34G6qvqUIPqM7ns3eqjWQbr+f4rwXqrpWdbHajc65sNgdu2HglzLf+6PBLmmlYAYAb4uwLuZ69r77YsLzB1x/p7FQHr26D66H6G6pz7MuzP5BHpE+210O/3S1P/wn9CQ6qKAZRWkwIgfyWma8EvSp311pntzjqKhmUETTbYV4PqM7kG65xgvTeDaRwMkA9+V3tXhesMa5AJ17OvW4+E0RgJ4671/m0VX2BqsP5eO8bAuFu9SQWqG6jrezD6RHsN+P3ucGeChxafVKH4OzFf75lr7Rel9viu8OSE6c2LA6g3j7wu9ex8hW713Xk7wDqwHkzvyGCC64pd6yu9s+E6w/pmd8N73vrV/Dy4ng2As7vWe2owWH+sY03X+kqw3utvsH5J3WDdUP23ToLqFYA6GwRnhemL5rh7pIyaqkHvETGD8kexnCu/LHXUe5Wvgfp1H0P1nbns89UBg3Umf5au9bZ9PgDPBsBAfof1Kl/PW78mj4TRGAnjrvUmhq511Tnr6mC90hgY9hEwJ0D1TCDNAMNZPBh9CPz80lRrrVTg+Csx/m1krefOtfCLUh89o/1qAfXmkVuDoXp83ghUBwzWV/mfMGt9pbfhev7aqvxrAM9b90iYaF+D9cc61nStG6y/VxWwbqieu1/mvtnZ1TwifaK9VvjBL009V8og/IoYYflNDGtf8WWpavPdWWF684ryyQfqEXWoQfWZXIP12/bnZaiC9ZXeDOutBNZX+VZcV4+EifXs2TZ7JIxS13r1l5garL/WarBeaQzMrhEwhuprMmf3d4f7Gh9mr04/A/fVYoCrCmIG5K/EdF4z1m7X8a8+NpXZ7qxjaAzU5/OrQ3WAdwxM24cLeu/KYJmz3rbnAODuWu/zzV7b7HX1SJhrUoHA2V3r2Wvauy1D1zrLnHWD9U/+53arG6rH7ae6b8T+LDUw+kR7rfBDGynDAw4tXqkB8U9ivOaz13fnmqhC9Oa9wpMTpjevGkA9og5D9c9i7lZv+/BB7x3HccKc9ZXeBuvrfN217q71yG0rzlrPXlOgNlg/pWOdDKoDpN3qFaG6gfrafRn2Z/Jg9Fnl1/G37w53NWWD2SwxQvJnsZyb3WvlF6R+8q0N05tPhIc2UJ/N3w3VRzMN1cdyDNbj/FmgvRJcZ1hfd61fU7WudRWwvio/u2vdYP0uxY7107rVWUfAGKrn7pe5b4X9mTzYvYCxv/cvai9N/SvalkQsAPQkKYDxd2K7XvyC1Aj/Vb7cM93Z5rkbqGt1x+8aAQOcDdZ3/OhxAlhf6Z0NgNu2NYG9u9avqVrXejYEPn0cTGWwrtqxbrAeu/3oPrvAuqF63H6Z+zLsz+QR6RPttQCi9/zttw53NtBoxUsZhH8T8/Wbve5+OWqvNzdIb35cML355AP1iDoM1b+LuVt9JItx1AxgsL7b22B9nW/22rpr/Zqqda17HMyq+fX5YB3g6Fhnmq+uDtVH9tkB1dk71U8A4ycD9YownRmkj/69f9Jf+D8eKbNC2ZCVWcxw/JWYzqVfjjrjv+Y4ToDpzctAfTa/OlQHanWrj+SsHgMDrH2BKVNHPwNYB7TgutL6Zq+ru9ZjPfMhsMY4mGywDvTUugasN+81XeuKYN3d6rHbA/U61VWg+mlAnQGGs0HwKB92kN55nH5pqvWn1KD4OzFe25lrW222u9pMd9Z57p7lHpdvqP5uH94s9fnqAFfHeq//yh8Esjur27Y6vtn/IiD7+N21Hpuv0rWePQ4mG6y3GvK71g3Wn2tZBtZLdKsbqmtknbpvxP7VPIByIP2qpzvcmVUFfPeIEZI/i+G8ZKyTX476zpN7DI2BelwNWTBfAaq3/ep0q4/krAbrwBkd673eButrfd21fk3uWv8ula717HEwBuv9vieAdXerf1c1qK7Sba4IxbOBOgsIZ4TpkSB9EUTv2bw2cGcAo5WlAMdfie268MtRI/xX+Z4B05tXhIc2UJ/Nz4DqgLvVd+e4Yz3O3+Ng1vpm/3CRffzuWo/NP7lrPXscjMH6bVuD9SfJg/Ud3erMgFwFjp8E4yvsH+UBxMF0ZpC+Asz/6/tDBx+teamC8G9ivlaz19wvR+315gbpzY8LpjeffKAeUYeh+pX99sDuXVlsLy4F1v84oArWAXetr/Z11/o1uWv9u9y1rtG1brB+1yqwrjxf/dRu9YpQXQmMG8bnegB8MF0BpLvDPVnZ4JVNzJD8WSznLmvNKrwgVeXlqM2zJlBvPvl1GKpf2c/d6gDfKJi2PQ/8VhwH0+ro2TYfgqt0rbcarm7nrvUrctd6rKe71nPheo9n234NXD+hY93d6mfuo5B16r4R+0d5RMB01q50BTD/r69fmmppQfF3Yr2Oq8Pze95qf5257qwvR21eHEC9eeR1qc/mK0H1tm+tbvXRnNPAeq8/C1gH8jurFX1VRsKs6FoHzoXr+SDYXetXdP2864D1Xl+D9V86Cqyf3q2uAMcN43X2B+p2piuAdHe4F1EFCH5FrKD8UQznohpAv+fogPTmWR+mN68Ij/zxM2pQHajbrb4zy2A91l9xHEzbPh+Cr/DNXt9Vx++RMHHbAfFwXaVrvcc3/8eKeLhusH6rwWA9YvvVYH0HiK4IyE+A6gbqHN3prF3pBCD9ouehwJ0BoKpKAZA/i+l8+wWpEf6rfHlhevPj6U5vPtpAHTBUX5G3K2vk3Bmsx3p7HMx6X5Wu9R5fd63H5meOhFHpWl8B1oGaXetqYL3VsWbOusH6e60G66yQ3FA9bj/VfSP2N0xf77PKr3l2yS9NrSRFGP5JzNdm5lr7Bam93n5B6phPPlCfrWMGqAOG6tF5O7rVAb6Xl45kMPlXHwfT460E1lsNV7dz1/oVqcDgal3rKj9UtPyzX2Kq+AJTVbCuDtVH9mEF8Tv3Ucg6dd+bZoE6E0xnhPLRXs1vjf7Cf87scF+harB7RMyA/Fks5ytjzSq8HLX584P05lkTpjefCI+zutTbvobqz6owBmZHhsH6Pu9suJ69vp61zg+DT5617nEwHgfTalhybF1QHVgLytXBuqE6Pxw3jL8uw/R1PtFezS9ef+E/PZv7pamnSQmKvxPjNeuXo0b5nzvTnXGeuzpQB86B6qOZOyF+lTEwOzJYRsEAnrM+6ltxHAzgrvXI7TLHwfTkV/uhAtAZB2OwfqvBYP1R6lCdNYN9n91ZM/saqI8pAjhXB+kEEP2iJ4BTZ7izqAL8vipGSP4ohnNRDZ7fc7QgevPlnedeCaYDuUAd0ILqCnmsUL3tw9NNvsPfYH2Pb/Y4mLZtXte6ClgH4mFwfpc1f9e6Clhv+XnjYAzWbzXkg3WPgdHZ3vvo7Ze5r2E6r0/zitVCiN6jesCdAZwqix2MvxLbOa/+YlTVl6I2b16Q3vx4YHrzMVDfnasA1QFesK7erT7ib7C+x/d0sA54HMwV+SWmcdsBueNgsuesG6zffA3WH2Wo7n2Y98vcdwaoV4Hp1UE6CUS/4Pm/Ov3SVBUpgvBvYr72std759r4pajvfKP9+MbPnA7U2/6G6u/ECtXbPue9GNVgfY+vEljv8V0B1lu+x8F806njYAzWzwbrii8uPWUMDCMkr5KhkpWx3+y+ykCdBaafAtKJxs7U63CPVDZ0ZRIzHH8W03nzS1Fn/A3S57zyYTpwJlCfyTVUf9yHr1t9R8Yq4Ntq6RMDAGd4UawSWAfqjYOp2WUdC/bP/hcAGi8wVQLrBN3qAAlYd7d63vasGaP7KGRl7AcYprN4NJ84RYJ0Ioj+xfOX/NLUKlIC4u/Eei1mru3ONdlxnGrz3CvDdMBAfTbfUP1PMXaS78hg6lYH1kFfVe/srvWKYL3lX/WsBYOrzVnv8TRYv6br63l8xzrNi0vdrR6z/Y4MZqheGcTP7HsyUGeB6aeAdLKxM+5wj1YF8N0jVkj+KIZzkrFOFWa6+6Wo44qA6cA8UAfO61Kfyd0J1QF3qxus1/LOButt23PnrDffU2Fw3jiYmv8CoNYLTA3WNeerrwTrbFCabXvWjJ37KO2XBdSzYXg1mB4Fq4tD9A+ef9TJDdwZQGkVKYDxV2K7Bqq/ELVl+aWovz0jveLqY+hOB2LWJwuoZ2UbqvPnGKxzeyuB9R5fz1lXgcEeB/NN0V3rKmC9bXut1myw3moI/8GgPFhnGgNzIiRnBuTVu9tHofrJQJ0FprN2pBee3X6TX5qaLVUQ/k3M11X2mu9emwoz3dlBevPjgulADaA+W4Nad/xOqA7wAu+RnB0Zp4D1Xn+D9X5fj4OJzc/sWvc4mCvZGuNgDNbzwTrLi0tPGgPDtj1rxs59MvZTA+rZMLwaTGcF6YQQ/YPvH+LucF+lbODKKGZA/iym8+eXos74a7wUtXnywXTAQD0i31D93T6cUH0kZySDCay37TleMrryBwEluJ49DqbV4K71b3LX+nd5HMyV7eLHwWSD9eYbD9dZurlPAOsnQnJmQG6o/lonA/WKMJ0ZpJOOnfFLU1WkBMQ/ifF6y15bvxj1qu8aVYbpAAdQj6jDUP2zdo63MVi/Lhb4zeStBNZ7fd21HpvvWetx27lr/Zoyu9aVwDqwDg5XB+tsUJpt+137sEN1FaAO5EH1TJjfPGb3rwnTDwHpLzxf1nlmh/ussgHtbjFC8mcxnBO/GHXUe5VvbM2MMB0wUM/Mnrkmds1Vb/vxZjGOgQEM1iO8Ddab3LV+Te5aj9tWoWt9BVjv8VUZB2Ow3rQKrJ/Uga6+PWtGRtYpXeq53fHzigDFLHU0n1gdML8dYALuDMBUXQpg/JXYzn3mOlYA6PeMVb7xtUfCdICrO735aAP12fyZ7N1QHdjXQb4zixGs7/jRgwV+9/ozgPVWR8+29cbBtBquetaCwYC71r+pWte6x8GEw3WD9YEaerdX9d6xPWsG+z5KXeqnAnXD9E9evGNnmue2+e2AX5q6R6og/JuYrx2GNa/4clS1F6OygnSAC6Y3n9w6MvN3j38BDNVncpTHwLD5rwLrQN2u9Wyw3nz5YTDgrvXI7Oi19DiYaxIZB2OwPlBD7/aq3hW2r7iPSpe6IlA3TI+t4+7FC9I3Q/Qe8XS4R4sBuDKJGY6/Esv5y1o3vxj1s6JBOsAJ05tXfnd6RB2KQB3YD9XbfrwjYEazThsDs9qfZRQMUBesAx4Hc1WZXet98DIaWp85DqZ58netG6wbrPdue4r3ju1ZM3buMwLVDdSv7DunWThbDaazgnSV2e3N922tfmkqg9Rg+DuxXkvZ6+uXol4XO0gH+GB684nw0AXqgM7ol7Yff16VbnXAYP2dDNbvUgHrgAYMBjwO5psyx8FUA+tA7jgYg3U9sG5QrrM9a8ZNlaG6IlA3TI+t4+4VJ4XZ7c03pM66He69yoayGWIF5M9iOTd+KeqYVkB0gBukN786MD2iDkWgDhiqR+S4Wz3X32D9ruxxMAbr1+RxMN+yPWf9m0TAOtAB1xngc2Wwbgiftz1rBmCgHp85pxkImt0ZH1HD3YdrdrsCRF81duaL6zxwZ4GhVaUCxV+J8drIWs9q89xXQXTgHJDevKJ8DNRHlfEyVkP1ptO61Vf7q728tNf3dLDe41ttznqPp8IPFZnjYDLnrB8M1qU61ldB9Z4aFLdl8mbcnjUD4IfqWiB+XNnd6dn5ETXcfWLE2iV/94yH6MGOfmlqr5QB+BWxXw/Z65+xPruOWQmiA2vOhWH6ujpmry+lLvXdmYbqXBmr6zdYv8tg3WA9yq9nWwWw3jxju9YzX2AqAtYBoY71yt3qqnUo18KcAeyD6jsht1qHunJ3OgtMPwGkC0D0/+n/+e7MN1ImG6iqiB2MP4vtvFZ/GSqwFqADayA6cA5Ib151xs4oAvW2L3+X+mjeaBbjCJi2D083+Q7/VWCd5ccGg/VcsA7UeoFpj6fnrMf4tW35X2BqsF4XrLPAbDbwrb79yD7uUo/ab1xZQD0bpjN1pbOC9GiIngjQe+WXpkZKDYJfEev1kb3Wu9dlNTwH1gF0YN35ij4PhunvdRpQn8k1VG9ihOqrM1ZCdcBg/XcNa8btZILgnm2rzVk3WL/iabD+PT90vE36KBjPV+fZlsm7wvYj+zB3qVcH6qrd6QwwnQmknwbRFwB0AF3ryNfhPqpsAJstVjD+SkznKmPddsDzmwzRb57Rfobpz8oC6m3/vV3quzOZoTrAB70Z/VnA+sp1MVg3WL8qv8D0myf/C0yLgXUguWPdYF1z29XbM9Uysv3IPtWguoE6Z+5sdlQNUXU0nxhFgnQViL5ivvy/8gz3XVIC4u/Eeq1krm0VeH7TynN8EkhvXlE++kAdOKdLfTTTUJ0zg2UMTKulZ1uD9btvLrD2nPU4v55tFeasn/oCU4P1a1IC6yzQmQVmq3rvyjBUn8ka1yhIPRWos8D0E0C6EkTvXMc6He49qgC/r4oVkj+K4XzsBOc37QDogB5Eb74rPGvCdCB33MtNakB9Jnc3xK8E1UdyGMG9wfpvVQXrPb4G67GeHgcT49e2rfMC0xVgHVgDlivOWK8Ov1nqqLC9obpOl7oiUDdMj63jpkjsrQLRV7yk9e4dDNwZwKmqFMD4K7Gd8wxwDuyD58Cea+VUkN78onw4YDqQD9Tb/obqnzR6nlnBOmMG0xgYwGD9dw09vvXAOuBxMFfkcTDftuMfB7PgBaYG6wbrNNuu3p6plpt6wbqhuoH66tzZ7Ij8iBqi6gC4QboSRB+o1S9N/SRVCP5NzOc8C5g/aic8B7QB+t1/hScnSL/7Gaj/3l8LqM/kGqrvzTFYj/E2WO/3ddd6rGelrnWPg7mS7XEw35QN1nu2zc4/pQ7G7Q3V60J1RaBumP5bUWg5EqQXh+gd3v/hHSlTFXb3ihmOvxIDML9pNzi/ade1qwjRmy/3LPfI+qL+HtSB+my+ofpr7To2VnhvsP5aq8B6q0MHrlcE6z357lq/kp3Tte5xMF9lsJ64LQtIZtj2FG9gzwgYbkDODdUN1PdkR+RH1ACcAdJXQPTFAL13F780dVZqQPydmED5o7Kg+U07/z52XEtq89xZu9KBWjC9eZwF1Gdyd0J1gBd4j+SshuqAwfo7uWvd42D8EtNvfvxd6x4HE7ddz7YrxsEwAGUDe646dmy/uludG5DvgeoqXeqKQN0w/beiYDo7SCd5AWqnN4BKL02tAr6vihWQPysbmN+U8cPSrmtSDaI33xWeNWE6oA/UZ/MN1d/twwnVR3KUu9Xb9hxgvXLHeq9v5viSVfkeB/Mt2y8x/ZybNg7GYP2isrfNzu/d9oQ6RrZnHAGzA5DvfEmpQpe6gfr+fCAGpjOCdAWIvgGg9+o1cD8NXq+SChR/JRZQ/qisf42x++9h9XGuPB52kA4Ypq+qQw2oA4bqCjnKYL3X22B9zNdgPdbT42DiPD0O5rOqgfWe/GxInJ2/clvVOka2Z+tWZ4Xq7lKPy5vJzMydzb6JBaafBNJFZ7ff/t+zXpqqDMCviBGSPyp7fFHGtV5hnrvKLPfIv28mmN58DNRHpADU236csHtXzsh5Uh0DAxisj/p6zrrHwXz28ziYz9l542AygbHBej54ZthWtQ7AUP36Pv2q3KWu1hWfnQ3Mw3S2rvRIUK0A0clmt9+0ZqRMdbA9I3Yo/krZoPxZWT8SVYDn94xVvvG1R3+eGKbH15CVP3NtVIXqI1m74L1yt3qvP8t89VZLj7fBusH6dxmsf1fWOJhTwToQ/wLTbECbne9t921vqH51n37tguoG6utyZ7MBDpheHaSrQPSVY2ea/y/9GI7/K0UQ/klskPxR2f+qYvfaKAP05r2mflaQDtSC6RF1ZAL93V3qAD9UZ85ZDdUB3W51gOPFpc3bYN1g/ZpOnLNusP5Z7ljn92TYlgV+s9QBrLt+R2phhPBtn97tubvUDdTX5gJ1YHoUCGaG6GoAPaDe/JemVgPdvWIG48/KBuWPqj7PXXWO+4of8BhBevPi6ZQ/EagDGl3qo3m7snZAdUC7Wx2oPwYGMFjvyTdYv5LND9bbtn6B6SdFw1V3rJ+bz7Ltam9lqL4jw13q+7MUM2/KhukG6Ve84iU+t/1RP8cD729SAuLvxATKb8pe151rsuNYVx7Pqn8FE/3ZY5i+rg41oA7UheqsnerAWVAd0ATrPaC6x3sF1AYM1hXAesvPeYGpAlhv2SkvMDVYD9w2GxRn51fftnd79REwhuqcOaNZM3kzmbO5hulxdTQfXoiuBtAD683vcL+ibDi7U4xw/JUYzolfgjouFYgOcIL05hXhoQ3TAQP1FXm7shihetuHB6yvhOqtlh7vfFBtsL6mVoP1OE+D9c8yWN/vqZTvbX9LuVu9AlRnBuqjWaN5GUD9ZJjO1JXOCtJVZrY337SxMz8U4FRNKlD8WYznOnMtd67HjuNUguhAbZDefPJhekQdakC97VsTqrN2qgPuVv9cS693Pqg2WM8F60D8eB2D9c8SAOvAgjnrBuvOV9h2pffKbnU+SG6ovjpnNEspD5gD6hVgukF6j1+8hDrmNTrcv0kVgF8RIyR/FMPaV3wJKrAOoAMaEP3uWROmN5/8OgzUa+Sxvay0bb8+g+Wlpa2WHu+689WBfLDe41sNrLf82DnrfT9U5LzA9CpYB/JeYKoA1nu2jQbrPdknv+Q0O19xW4CnW10dqjPDbuac0ayMPCAPqBumP/vEiB2iCwH0J/+vdfcDdwbAyih2MP4stvNY/SWowFqADqx9AfEpIL15ccD05qEL1AFD9egsj4C5JtUxMIDB+kgNButXsvlfYNoBo9NeYNp3XcaOg8kEtwpgfYVnNijOzlfcVrVbnQ2qt334OtXZu9QN1L8rG6hn50fVcffh7JJvfjojZ5r3Euj/Qwdeo6UGwr+J9Xxlr3PGuqwG6IAeRG++fhnqdZ+8cS+AJlCfyVWA+FVGwIxkMIH1lWNgAIP1kRoywXrPtplz1hXAetv2uHEwEnPWV3QAK4D1ip7Vt3W3+rvt+8QIvCt2qSsBdcP0eTF1yDevWLF3y999181tb/6XtGekTDaMzRIrHH8llnOUtWY74DmwFqADOhC9eUb71YLpgDZQb/vvBdwZmZWgettnfUe8wfq8t8F603VgnXtcCnPWAY+D+ezHPw7m1DnrFSF4dj7LtqvAOhMoZ4PqOzIM1eeygJwO9UygzQCx2brST5vb3nzlRs/4palKUPyVGM9f9prugufAeoAOrD3HJ4H05hXlE1PTqUB9JlslsxJUH8lhguoAz4tLe70N1ps8Duaq53ld630/UuR0rXvOesx29qy7rSJYP61bnRHcj2SM5uzOUgPq2TA7Oz+qjuYRJ3aQLgjQ//W/XDffS1OzYe1qMQLyZ7Gcg53g/KYdAB3Qg+jNd4UnH0xvXvnd6cCZQH0mdzfEHz3HrGBdvVsdMFh/pRWw+nSwDmh0rV8F6z2eHgfzWR4Hs9dTBYIzQG2GbRnAOheE75OhOl/OaBawf+yLKlBngNhsXeknjZy5+67TwvEz9We4j0oBjL8S2/nMgObAPnB+0+rrZeV5ZQfpzS/Si6M7HdAG6jP5Kl3qQD2oPpKzGqoDa8H6yjEwvf7Z8HdlHdlgvWfbamAd8EtMv3tyd60rjIMxWK+V37MtA1TvrYMFwrfte7Y1VK+QA5wD1LNhdnZ+RA13n3NAutrYmbt/l/g63D9JFYJ/Ehsgf1YWML9pNzgH9lxnq8+7yiz3qiAdiLl2VYH6bLYCVB89v1WgOqDdrQ6sg7692zOAdaWO9Z5tDdavbMcP1lu2x8G8k+es7892Pke3eu/27lbP8x/JYM8BxqB6xotJs4C6YfqjD1eXfPM6G6KvHj2Dq8C9IujuFTsYf1Y2KH9UBjS/ade1qwrQm/c5c9wrwfTmkTPyJStboUsd4IXqAN8IGOCMMTCAZ6yP+GaC9ZYfO2c9E6y3bT1n/Z0yQWelOesqwDgbVqvkAxxgnaVbXR16V4Hq7EAd2N+lrgjUq8D06iB9BZRWBegDddd4aaoaDH8nJkj+rBOg+e9Mvwz1T894RdZpmB5bg9r8dvbRL22/GlAd0O5W7/U3WL/LYN0vMP2e7Tnr72Swbs+dnr3bVgbrJ42AYYTqrOAeqA/UDdNza4io4+5zHkQXndv+P/0ncqRMFeh9Vcxw/FGZoPxZVcH5PWu1/5kvQ438W4v6ezBQ35+r0KXe9jNUv+7PA9V7t2d4cWlvHQbrBuvfZLAes10lsO7s2G2z89Wg+krvldDYUH1NxmjOzrEvBuprc2ezo2qIqqP5cIJ0JYC+Gp5PuNd9aaoKEH8lJkj+qMx/DbH7OlWf484O0YG6IL356ML02fyZbAWoPnp8O6A6wDdXvWXwgHWWbvXeWgzW64H1Hs9osA7kvcD0RLAO5L3AtBrcVshmyFcD6yd0q7NB9UqjX3Z2qasAddXu9Eowna1DvnlpQPSVAH19X/uvz5b1L01VBt9XxQrIH8UwOijrx50Kc9yVxs9E/81Xg+nNI7cGA/Vv+50N1QF3q38SQ7d6r/eKmhleDGuw/l3kYB1IfIHpCoBpsD6/nT1zwTpPB/qabnUm8M3YRV4Jqhuox2bO5s5mM+RH1HD3iRP7y09XAfTV8HzmBcf/6ucIIA5oQPFXYgDlj8r8FxE718Iz3P8UK0gHDNNZ8meuEQWovguoA4bqK7Z3t/qYr8G6wfp3z5wXmBqsO/sETyB3TBHLtm37nm15wPdp/iMZ7GNfTgDqmTC7Ulc646iZ5herFRB9JUAPgOdf9WKN13e4v5MqAP8mNkD+KIbxQZXnuKsBdGDNv0CpCtKbT4SHgXqP3KV+FyNUB7jA+spu9ZW1GKzfajgTrAPxc9YN1j+LHayv8KxUY0VPj4G5bXtdHgGj4w/U7FI/Baird6ZXBensEF0VoAeu609Z8P1NzGD8lRhg+U1Za1fpBagt40yIDsSubyWYHlGHGlAHDNUftQOqtxweMD2yPcsYmN5aDNavg+CWb7D+PbtnPXPmrCtAzBXjNtiBucKxVPQ0WL9te10s3erq0Pt0qK4A1P0S1P35ETVE1XH3ipMKRBcB6G/839ae1+H+Tmog/JOYIPmjstc4Y12UATqw7l0IzCC9+fHA9OaT250+W4OB+mcZqvd3tzOBdZZu9V7vqmC91eCO9W86Eaz3bJsF1nuy2bfLzFaB4BXBemWofoo3oz/QD9zcpZ6bN5OZmTubHVVDVB3NJ0YKEH0VQE+E5yN6D9yzoexuscLxZzGdF78EdU4r359wCkhvXlE+2t3pgB5Qb/vWhOqAR8BcleoYmF5vg/U1YB24Dtf71jV2zvpVsN6y/QLTd8p8SWQlGF2pxmxPg3UeQK3arX7qTHV3qefmzWQyZM/mR9UQUcfdhxukr4DoKwH6qpe1/s74qB8qgPtJKkD8nVjX+ZSXoLa89ceqBNFvMky/6pFfx+z1Zaj+XsxQveWc1a0OaI6BAQzW7zVc7dyuBdYBz1mP2pZ9zroCjFaoUSXbYF0TfjOBbzZ/Q3X+LMXMiGyG/Kg67j4x9ZwM0VcD9AV1z42UUYfg38QKyW9iWf/KL0JdCdABHYjePPlAevPiGTtjoL4v01C9yd3qn2WwfldFsN7j63Ew38Xete4565zZKp3omS/V7ck3WO/3PcV7xP90qF69S11xzMxsdkR+RA13nzNAuiJEXz1+pmV8rP+HBtrOiB2MvxLbumeu4c61WA3QgXUQHeAH6c0v0qsOTAd0gXrbn79LHTBUH80xWH8vg/VbDZ6zfi3f42A+qUrXeiUQrnAsqzwzwXrPtgbr/b5M3jv8V89VN1TfnzWTN5M5mzubzZAfVcdNUUhZAaIrA/QFta99aaoiCP8kNkj+KIa13r0+O+A5sBagA+vOHTNIb348ML355HanA2cBdcBQ/VGMUB3gAuss89V7vQ3W14D1tq3HwXySwviNLLC+wpN9u5Oz3bF+XWpg3d3q77XjZaXMUF1hzMxoXlbmbO5sdkR+RA03nQLSV0H04vPbAaUZ7j1iBuOvxHQOstZuFzwHdAF6846vnRWkN68on/zudEATqM9kjwJ1wFB9JuekbvVef4P1xxoM1q9l84+DyYSeHgezb7vMbIUaAXesqwFwBt9e715/Q/UaOaNZGXkzmZm5s9lRNQCcIF0BoiuOnblnLNHaDvdXUoPh78QEyR+Vvb47wTmwHp7fpAbRm2+0Hx9Ib176MB3QBOqARpc6YKi+GqoDZ4yB6d3eYN1g/YKOnLPek11lu8xshRp7tlW4Htdue3W72vBbtVv9VKhetUvdQH1PdkQ+EAPTWUG6CkQXfPnpi4xLx1Bjhvs3scLxZ7Gci93Q/KYK8Lz5nzvDnQ2kAxwwHTBQ79GuLvWWdS5UB9ytHrHtyh8zDNYN1ndvdxVkAnXmrCvA6ErH0rPtyWCdAYCr+TJ5A3xz1Vf/6DCSwZ4zmpWRN5M5mzubHZHP1pXOCtLVALrg3PZH7e9wfyUVIP5KLJD8UVnA/KZd4BzYd+2cDNGbX6wM05/3z+uOVxn7Ahiq33TSCBjA3ep/1nEVLPfUa7B+zbMOWAf456xXgswnHnPPttXAusfA8Piu9jZUPzNnNGsmbyYzM3c2G+DqSmd7IWvzipfntr/L6NKfwF0Zfn8TIxx/VjYsv2knNL+pAjxv/qt8z5nfHvV3wADTm4eB+hXtBOqAoXqvWEbA9Pqv/AFBqVsdMFgPButA4gtMTwTrmdk+5phtDdavbnsNGqjBb9VudUN1jozRHIUsxcyIbIAHpp8A0lVGzjRf/bEzYHxpqgIUfyUWUH5TBjC/aec1teN68fz22DoN02NrUALqAH+X+mgWI1QH3K3+TpXHwABrwHoHhDZYT9gO4AfrKzzZt8vMNljP8ry6nRYAV/Pt9T4NqjN3j7N3qZ8E1LNhekWQzg7RFQH6Jng+cgzXRsqoQvBPYgPkj8qE5Tdl/BCz6zpTneHOPnom8m+qCkyfrWMGpgP756gDdbvUR/czVI/3ZxgD0+owWDdYj9uuZ1uDdd3tMrMVrsWe7PxZ7Fe30wHVLKNaWEbAGKqvyakK1DPAdmZ3ehWYzgjSFSC64siZe8b60TMth/ylqcxQ/JUYQPmjsv71ws5rascxnj6/nQ2kA3HrptydDmgB9ZZpqA5wQvWRDJZudUBvDEzzjgfrK8bAAAbrkdv1bJsF1nu2PW27zOxK12JPdu45vK5sAJ5d60rfXu+V3eonQnVWcD+aM5qllDeTCRimR9fRfLghuipAJ5zb/qjxl6aqwfB3YoPkj8oe97P7xxjPcP/mywvSAS6Ynt2ZfpMiUAc0utRH83YAdWDfjwTuVn8vBrCe3a3etjVYvyLDzDO2y8xWOOZK12Kf53UZrHP4Ajzd6obqNXJGszLyZjKBOaA+C9MN0j95xUppZnvzLjG3/d+s/wDAjxQ4Z4bjj8oG5Y/K+hcMlea4q81wZwXpAE9nevOYUyZMB7SAOmCofpOh+nd5DMyYr8H6tQ0zwexVmAnwv8DUgHt+u8xsg/Ur2xqsM/i6Wz3PfySjYs5o1kzeTGZ2d3r2S1gjarj71B830zzX6NC57Y8a73B/JxUo/kpMoPxRmWN/dq9JhTnuq47hBJDevPJhOnAmUG+5/F3qo/uNQHVgPZAG+EbAjPgbrP+WwXrfOb54XJfBOpALAD1nXXe7zOwTf+RZ53l1u3pgnaU7exVYP6lbnRGqMwN1hazZzEygXgWmnwDSlea2N++12jW3vWX9Tz8G5IvFMiO/8ktQW9Zqfw2IDsT/6MXUld58YmSg3i9D9d9Sn6s+4r/6mFeNgWm1GKwD18F6q+GabzWw3rOt56zv2y4zW+GYT7wW27ZXt6sH1nt8GWoF1oF1JvDN+BLRKhkqWTN5quNeDNPf+fCOnGment3+PmNa8R3ur6QAxl+JBZbflLmO1ea5rz6ek0B68+Lqlp+F6YAuUG/Ze7vUZzKrQXVg/QgYwGD9cy0G64AMWAc8DoZ2u8xs9u0ysz0O5tt2NWG1Uq2qY2CYoP0Of9aM0RyFrJtG91QG6iww/QSQrjR2pnlLwPOOrP8dz48sDH8nNkj+KIa1zlifKi9DXfW+A2aQ3vwM019pdl1O6VIf3bcaVB/J2XEsqmNgWi0G68AasN5qOHPOOuBxMIzbZWZ7NNH8dn2e15U5YsVgnQesM4FvQ3W+nNGsmbwMoJ497iU7P6KGuw9Xl/zdj3/sTPOt8fLTgOPY0+H+Tsxw/FkMsPymU2a6qwJ0YM27DKqDdCAGpgO53ek3qQH1mdzR/QzVm07qVgfWzVdv3gbrrQYNsA7kgr1KncIaIPWs7Xq2ZQfrKzxVILjBuuZ8dTbwre4/krEzZ3dWxtiXTKBdBaYzgnSVbnTPb+/J+58+A3clIP5KTJD8UQzrWvVlqGoQHVhzLgzTPyuzO73lawH1mX0N1Zt2vHiVCayv7FZv/gbrrYZzX2Das+2JYD0zm327zGzPWf+2ncF6pGevr+J8dVXvHf6sGTtzRrOA/V3qquNeDNPf+fB3oyvOb98Fz4OO4YcC/t7ECsgf5fWqAc9vOhWiN784VYLpgIF6j3YCdaAWVB/JYYLqAM8YGMBg/V6DwfoV1RrBUWO7zGx2sN6TbbC+37PH12D9ti1HJzyT94g/awZ7DnAOUM/uTmeB6YwgnX3kzN13nQrPbwd6R8ooAPF3YgLlN530EtQd8BxYB9CBdefrBJAO1IHprY4coD6TPVOzofpdjFAdcLd6hLfBusF65HYrPA3C+bZTuBYN1rk9V/lWHwPDBr7V/VkzZrIM1NfmzmZH5EfUcPfhBOkqI2ear2e3P4nzpamMcPxZLOuWsVYV4Dmw9hyuOC/R9TJ1pQM1YDqg16EOaAB1wFDd3eox3lXB+skz1gGDddbsKtsB/HPWDda5PXt83a0+5q0Ovdn8RzJ25gBjUF0JqHtue4zYxs00r3NHzjTv9do9u/1F2txLUxXA+LNYQPlNmWu4C5zftBqgA4boAF9XOlAHpgNnAXWAv0t9NMtQ/ZpWdqsDemNggDVgfUG3OmCwftmTHXyeeCwK1yM7WO/zvOqXB5dVwLrHwKz1ZfJm9GfNGM3Z2aWuBtQ9t50PpJ8M0au8+DQ4hWuG+yuxAfJHMazdbmgO7AHnwPpzv+r8sUN0IA6kAzwwHcjtTp/NPwGoA/u61AFD9R65W/23srvVm6/BemR2z7YagLTGdpnZ7GB9hachOLcnYLA+4svkzeg/kjGSU7lLXQ2oZ3enZ+dH1XH34Rw30/z0APoOeL63t/3tZ9Jch/ujmMH4sxhA+aMyoDmwD5zfpArQm/caVQfpQB2YPluDgXp83q5j29Wt7271T9sbrEMIrAN5MLNnWw1Ayr1dZjY7WO/Zlh2sr/BUgeDZYL0yVD/Fe8R/RwZzl7qBOmfubHZEfkQNdx9OkK40t715r8PbO8H56OfOB3HOcH8lNkh+UxYsf1Q1cH7P0ZzhHg3RAU6QDvDAdCC3O312/wygDhiqz2wPGKpH+/dAdcBg/SZ3rHN7sm+Xmc0O1nuyDdbn/Sp6ulu935fJu9ffUL1PO8e+7Abqhul5NUTVcfeK08kjZ27aAdAXgPO3+nIO4jrcH8UKx5/FAMtv2g3Nb9r5g4sqQAf4ITpQF6QD+d3ps/sbqH/WjtEvo/swQnVgLVhfCdXb9vnd6q0Og/VTO9ZXeJ5YIztYB/jnrEeD9bZtLGCuBsEN1vV8mbwr+I9kANxQXalDXbErfjY7qoaoOppPjE6G6KuxNhE4HxX/DPdXYgLlN2UB85t2/0uFXdeNGkQHuEE6YJi+Yn81oA4Yqj9qB1QH3K3+SQbrTatgeSZY78mvBK0zs9m369mWfc76mrW5up3B+m5PJbDO0lXO0q3OBr2rQPWdo19UutRPBeosMP0UkL4ColcA6Ktf3vpn3sdjWtPh/ixGQP6obFh+U9Z4nwrw/CYViA7wgnSgFkyP8JgB6oBWl/po5s5Z8Ybq18XUrQ5wjIFpdbhjXQGsr/BUyK5UYxWwvsKTHayv8MwGy6eDdRZArdqtXmFuO3OXetuPO2smbyYzM3c2O6qGqDqaD1eX/N1PB6JXgucLx+j8Bu7sYPyVWGD5TZkz8Xf/awVlgA6sgegAN0gHuGA6YKBevUt9Jo8VrLONgAG4wHoPVAcM1nu369222px1Z2tvB/DDdXawvsKzUo09noDOy0sN1jm9Gf2BPZ3q7lKPy5vJnM2dzY7Ij6ghqo67V5yiga4iRC8Az9/kfdRPCmRng+SPYniJbMaYn53HvRKiAzogHeDsSgdqwXTgPKA+k8veqQ7U6VZvGQbrr+uoC9ZXbVsNrK/wVMiuVGMVsN7nedXvvHEwmbAe0AHrPb7V4TcT+D61W50dqhuor82dzY6qIaqO5hMnBZC+CqLvAOi74PnCY7k2UoYZkD+KAZY/Kms+fiV4DqwD6MC6c8TalQ5wwfQoH1WgDuzvUgfcqf6oCt3qvRkrx8AABuurt10B1nvyFYDwqdnsYB3gh+vsYH2FZ6UagTXjYLKBssE6p/cOf9YRMIbqcXkzmbO5s9kR+RE13H1iFAl6DdFv3vLw/E3elxnuDDCdDZTflP1C2Yx12QHQAUP0myJBOmCY/k6KQH0me/fxVutUB87rVgd4wHrvdVEVrAO5c9Z7tq0ErTOzFWqs0rV+4ktMq42DqQjWV/myAGqWHwMY/SuNgGGH6hlw+2SgXhWmR+NjNYi+A6DvhOeLjmfspamsgPxR2bD8UVnrVQGe37TyfJ4E0gEumA7kdqcDmkAd0OhSBwzVV3ert+05XlzaajFYH9m2Glhf4ensvdspvFS3Ste6AgjPrHEFWG/5V7fz7PaV3ixd9r3eI/5VoLpfiBqXN5PJkD2bH1VDRB2PikSv0SBdFaBXneHeMt/qhw6eM4Hym7LXaBc4v0kdoAPrXgDMDNKBejAdMFDfmWuofssZOZ5zutWBdWC98zjLgnXg3DnrKzwVsv0S053bXVOlUSsKNQK5c9azgTIDWGeB30zd6owjYAzV9wN1tcyIbIb8qDpuOg2kr4TPqwE62ctPRzXW4f4sRkj+qGxgDuyH5jftSlUF6EA8RAd4QXqkFwNMBwzUd+w3ukaG6iPHr9mt3moxWL/pZLC+Il8hW6HG08B62/bqdtwgvFoXvMF6rKeiL5P3iP/qbnVWqO4u9bi8mczZ3NnsiPyIGm5iBelKEL3C/PaWtV8fju+HGpYzgPJHZUFzYB84B/b8gLISoAMaEB3gBOmAYXpEftbceEP1x5z1UH0k55Ru9ea9ZgwMkA+LAR2w3rNtNc8Ta2QH6ys82SGzwfpnqYB1BlCt5rvS21CdI2M0RyFrJm8mczZ3Npsh/6YoVMsM0lcAaXWAXmB+OxDV4f4oNkh+UyYsvymjgl0/qCgC9JsM0vvFANMBA/Ue7QTqgKG6MlQHeMD6Kvjcu73KjPWebat5nlhj5g8+p42DqTS6JfMFpgbr9u317fXu9TdU58tRyJrNzMydzY7IB+qDdCWI7vntM5kfxTfD/ZUYYPlNWZXs/pcIygAd4IfoK/wM02NrUAPqgKH6oxihOqAN1j0G5i6DdX5onZm9okb2rnV2sL7Ck91vhafBep4vS+d39jqMeAN8YP10qK7Spa4I1KvA9FNAutLYmea9XkXmtwMrOtxvYoLkj8quKmOEz2p4DmgCdGBN3ZVBOsAzA14VqM/srwDUgVpQve3jbvU38hiYwRqqQXAVT4UaTwPrbdur29UA4QqAuedR9yoUUTjuHs8eXwZIzQK/T+pWZ4Tq7J3jBuprc2ezAS6YzvZS1rtfvBRntzf/PfA8Y3Z7y/14fHfgzgrIH8VSYdbc+x3g/KbVAB3QgugrfKNAOmCY/kondagD/F3qLctQvUcndKv3bm+wbs8q2VlgvWdb9q51dr8Vngbr+z17fBkAOIPvau8V1+29lj6xAu/VxzGSoZKlmBmRDfDAdEaQrgLRPbu9N2/pMf1sAe0soPym7BfF7gTnwB54DqwD6IAORAc4QTrAA9OB3O70iPzqY18AQ3WgH6oD0t3qAMkYmN7tDdZ18itle876p+2uySB8nx8QPw5GBVireFb37fU+rVvdUH0/4DZQ71dFmH7yC1CrzG5vWfvnt7fcj3o9UoYNkD8rG5jftBucA/vgObAWoANrj4UdpAOG6Z90IlAHDNUjcirMVQfOGAPTu73Buk5+JWDes63HwXzarga4PvEFpirAWsXTvr+1Eqwbqq/JGM1RyFLMvKkKTGftSlcYO9N8Pb+9L2+pfrajaxZYflMGNL+pEjwH1h+PAkQHOEE6UAemR9ShBtQBQ/Xf+6yH6sBZ3epAPvi9qSpY79lWJf/U7Giw3pPtrvU9fis8T3yBaUUIzgCq1XyBs8bAMI6ZYQfdp3Spz+RmA3UmmH4aSPf89itZObo8w/2T2CD5ozKB+U07wflNOwA6oAnRAX6QDvDBdMBAHTBQ/6ZdUL3tpz8CBuAC6yzz1QGD9d5t7bl/O+C8rvVMQGq/98rqWlcB1iqe1X2ZxsCcCNVHcjz6JS5vJhOYA+oM3elsMJ0dpCtC9KovQF14XD+pMJ0Blt+UAc2BfeD8JlWAftNpIB3ggulA/riXCA9D9c8ageqjead2q7cMjjEwvdsbrPNsW83TXeu7t7umSuCavUZ3rZ/nqegLrOtYZwPfbP4jGew5u7NG82YzM4E6S3c6I0xXAOmK42fuGfuUNce9Zb/UtQ73V2KC5TdlQfObqsHzXTkrIDqw5nwwdqUDPDAdMFAflaH6b+2A6sBZY2B6tzdY19y2mqdnrUd5XvWrAdfZ/YAzu9azwXI2rM4+/l5f4AywzthJXg2qn9ClnjnyhaE7nQ2ms4N0VYi+C6BnwPMFx/ZDBc6zgflNu8H5TVUAOrAOogP8IB2oC9MBfaAOGKqvyKsE1QGD9U9iAOu926tA6Ir57lrfvd01uct8n1+lrnUVCJ4Nqw3WH+vo06r16PXe4c+asTNnNCsjD8jrUq8C1BlhugJI94tQe7Ly9OI4xzvcn8UCy2/KgubA/rVQB+jAuvPFDNIBw/R3Og2ot9yaUB3gBeuLoTpgsB5Si9K2FfNVYL271j9tVwNcK3Rbu2v9LM9Vvgbr+7wZ/VkzRnNGszLyTgXqFWE6O0hXhejV4Xnw8f3QgfKbMoH5TRlrsytzNUAHdCA6wAvSAS6YHuETsdYnAXXAUP1RZLPVATKoPrK9O9bXblsxv9I4mJ5sd63P+a3wrNK1rrCGKzyVYHW2L8vLS1VfXMoIvSt1xCtk3TS658lAvTJMVwDp6gC90EtQL+b/T3Ed7o9igOU3Zf6gsDNbGaAD69aKGaQD9WA6kNudDswBdWD/2BdgL1Bv+9WC6sB53eq92/dAdcBgfWTbivkK42AA/q71vnW8ul0NcM3uB/B3rZ8K67M9e3xXQeoTwDob9GYF3js64kdydmcB5wF1lu50ljruXrFi75j/7e057nO5YfoN3JlA+U0MHfi7a9gBz4H151sFogO8IB2oBdMBXaDesmtD9RGgDvBC9ZbjbvVPYnh56Urv7G0r5p84DmaFJztozgKZmbCVHayv8FSoMdtzle8qsN4LDbPXYcT7RP9dGSM5u7Myxr5kvIQ1IjsiP6KGqDqaT5xUutFVx8/8ztkLzzNnubf8/wDAjyx07VVWHbvgObDnB5OV63gaSAe4YDpgoK4E1Nu+huotZy1UB7TBOst89ZXe2dtWzPc4mKjtrqkSdGX3O3EcjErXtopnj6/BOqc3o/9IxkjOznnqJ3SpqwN1w/QrXvFShui74HkmNA86xvcjZVhA+U3Z9ewE58C+f22gBtBvOgWkR3pVgOmAHlCfyd05+gUoBdUBd6t/VTYkZqpDBVivyM8E64DHwdiv36/Hk71rvdIPMhU9e3wZwDoL/GYC34wjYFjHzOzMAfZD9awudeXu+Iga7j4xYgbpq0C08viZe85eZc9xB4D/7HxpajYwB/ZD85sqwPOblCA6cAZIBzhgOpDbnd7ytYB629dQ/Z7DB9VH9mEC6yzwe6V39rYV8yuB9RWe7NC1Usd1FbC+wrNSjdmeq3wZwLrqfHUmaL/DfyRjJEdh9IuB+p7siPyIGu4+Z4H0VQB6B2jeCc+zwHlQ6rWXpjLA8puyoDmwf8a9OkAH1h0DM0iP9Is6PwwwHTBQ79FOoA7UgurAekjOBNUBTfjNUofB+jVlzVnv2TYLrLdta4Brdr9osN6yr27HvYYrPLM7wVVmtxusj/n2evf6M46AqdaprgLVDdTzaoiqo/nEKhrmKkL0qjPcgTBoPqN9He43ZQLzmzJeDrtrnXesrwpEB3hBOsAF04GzgfpstqH6cw4nVB/ZRxmss8Dvld7ZEDp7W89Zj/S86scNXRW6mat0rSuAcIUasz17fNXAuuqoFiZoP+LPmjGaMwKtlLrUFTvjZ7Ojaoiqo/nESQGkK0P0nfA8E5rPvBPiX13rcH8UAzAHcqD5TTt/pFAG6MC688QM0gHD9Pd1zB2PWpc6YKj+pBIjYACD9d3eStuq5HsczLft6oBrdr8ssN62rbGGKzxVxsGsgtUr4Hp1sM4GvtX9RzJGcxSguoH63vyIGu4+MWIH6Yoz3Jv/HqydAc8DoPmMfo4a0fKsjFE56gAdWHveVtTO2JUOxK6jgboeUAf4oXrL4uxW3wHimaD66u1ZvKtum+1ZqWvdLzHd61kFrvuc2POdKnetM/j2evf6M0Jv1jEzhuqxmZm5s9lRNUTV0XziwKgCSF8J0XcA9N0YOxOcT5yr/g73R2UD85uyZszv/LHCEH29r2H6tzoiXsyqN/YF0OhSB+pB9V37GKzv9/a2azwzu9Z7tnXXen2/zO7gKmu4wlOh+7/Hs8fXYH2tL5M3o/9IxmjOLrCuAtVPBeqG6Ve8dED6aoi+C2dngfNds+qf9EMDzYHcl7Pu7vTfcayrz60KSAdqw3TAQL3tW79LHdgK1QFisK4+Bmb19izeDDUz1Ouu9UjPq378sLAKGPZImH1+KzyzO8wzu9YN1s/y3uG/K8NQPSZvJjMzdzY7qoaoOppPrNjHzzTPdfC5KkBPAucP+V3HO9fh/kqZ0BzImTG/85hVIfoq7+jzXRWmA/pAve1vqP45rxZUH9mnF6oDBuvs3tnbZuef2rXetr26HTckrQTrPRJm3rMSrM/2VOpaZ+n+ZoHf6mB9R7c6+wgYFaiu2qVuoP7O51yQvgNt7wToWfB818x6vALu2cD8pszZ8pUAOqAH0YE1558RpgMG6r/3n1sLJagO8I+AAfZAcsZudeAcsL6yFm/b56nQtb7C05B03m+F52lwXeGcnNwJ7651Dl8m7wr+AHe3uqH6utzZbIb8qDruPtwwXRGk7wLoGfB8Izj/UMNH/SyFsZnQHMj58WDXiJ7Vx3YqSAcM01fWoQjUgdqd6sAeqD66D9sYmJF9DNa97bOqda33eV71qwMg2f08Emaf3wpPhRp7PFd0rTOAajVfJu8K/kA/VGMfAaMC1Q3UZz1ixDrHvfnFg1tliL4bnmeC88XH+n2kTDY0B/K67nfOt99xjGo/rqxY/6owHTBQBwzVP6kSVAe0x8Cwbc+wLUMdq/LdtX5lW24AqdDRzA7XfU7m/VZ4umvdwJ7Ju9efcb56tW51Q/V1ubPZEfkRNUTVcfeKkxJIXwnRdwL0DHiePdf9phfH/rMNqGePqtn9clh1gA6s+7GFHaQDNWE6YKA+KkP1mP1OGwPDtn1lWL5q2xVgHXDXeoZnFb8ez9Pguq9DXs/Tu9Y9v32f9w5/gLdb3VA9Lm8mMyKbIT+qjrtPnKLB7gpMrA7Rd8PzTHC+6Fj7XpqaDc2B/eAc2HfcO3KUIDrAC9KB2PPF0J3ePAzU+3P5ofroflW61Uf2Ydr+BG+lbVeA9Z58d63H+K3wdNf6Ps8qfis8Feb+AzXBenVfJu8d/ju61Q3Vc/NmMmdzZ7Mj8iNquPvEiB2kK0P0XQA9C5wzzHS/6d81WDvD/Z0yoPlNO49XGaDfdCJIB2rC9OajC9QBQ/XofUb32wHVAS7orb49i7fatpnjYHq2XQPrr25XB0AqHDM7XGf3W+GpcN1UGwejBqrVfJm8d/if3q1eGeDPZM7mzmYz5EfVcfeJA6IqIH0lfN4BmE+a536vIUx9He6PyoTmwP5u+115O0b8qEB0oD5Iv4mhOx0wUB+QO9UftAOqj+xz0vYs3mrbehzMVU8DyF1+7GB9hWcVvxWeCj/yADpd62oAnAV+q3oD7lY3VOfLnc2OyI+o4e4TA0oVQPoqCF0NoGfBc5aZ7gB+jgPnuzOVAfpNCiAdMEy/ImWgDtTvUt+93whUBzhHwIzso7w9i7fatiePg2nbXt3OADLCswpcP/GcnHjMQL2udTUAzgKoVb0Bzm51ZqiukDWTN5M5mzubHZEfUUNUHUAsTFcB6auh8y64fNI890dNHvd4h/ujMme7787e9ZJZRYgO8IN0gA+mN5/87nTgSKAOFIfqAG+3OiNUX52hCu1ZjlFlznrPtn6J6X5PdrAOnAfX2f1WeLprPcezx9dgndMbWA/Wd0BodtBtqL42dzY7Ij+ihpuYYboaSK8yy71l5YlgPE0D7gwvQ82qoQpAB9ZBdOA8kA5wwXTAQD0DqAOG6s9yt7re9mqwvHdblXEwPdu6az3Gb4Vnla71q54KP8qw+63wzOxazwbLahBczZfJG+AD68zd6pWhuoF6Xg0AL0xXAunqs9xbTo4IwDmAoeOPf2lqdXB+kzpAB9ZAdGDNNcDYld686sB0IBeot3xD9U/aBdWB9cB41z4nbc/irQTWe2rIXgMVuHciJGWH61X8VnhW+ls5uWudAVSr+TJ5A2eCdfa56obqa3Nns6NqAOKAOjNMXwFt1SH6boCeDc4TfjB4PVImu+N9NzwH9r0EVhWiA/wgHeCE6YCB+j1fC6jP7LsTqgNnd6vvyGDansW7Kljv2Xbdel3drg4IX+FZCWhWAc2G9TGe0T/ytOyr22l49vgyAPCV40gM1j9tzzkGhjlnNCsjbyZzNnc2O6qG6jBdCaRXmefesnLgOctsd+DSGsR3uD8qA5wD++A5sB6gA3oQHYgH6UBtmA5wAHXAUH3Hfobq+/ZR3p7Fe2UdFcfB9GyrAAtXeCoct8IYjiow/MRru9pIGBVP++7zNljnyFDJmsmbyZzNnc2OyGcD6swwXRGk7wDMGfCcAZxvOO5rL03NAufAXnh+kzpEB84F6c2vJkwH5IE6YKj+VbtGwFTb56TtWbyzwXpvDfkd7le30+geVQBx7HC90hqy+63wrPQvKNq29lTxXeltsK6ZMZozmqWUN5M5mzubfVMEFowA6qfBdMV57veMfRA9E55nj6l51tNa/GyF6VXh+U2rITqgBdIBbpgOGKi/riEPqANaUB3g71Yf3Y8Rku/IYNqexbu3jmy4zrAOlWDhCk+FOdeG6/X9VngqwPVqP+r1eNp33NtgnSNjZ85oVkbeTOZs7mw2UA+os8N0VZC+Cy5nAHQWcB587Nc63F8pA54DewE6oA3RAR2Q3jw5YTpQC6i3OgzVe+Rudf59TtqexTsbrDNsq9KVqQAgFeZcs4PmFZ6n+a3wjL4OW/bV7c71XOXLAsBZutbVwToz8GbOGc2ayZvJzMwFDNQ/e8VKaaZ7814PmncD9Ex4zjCq5kE/aeAc2A/PgT0AHVj/4tlVIB0wTB9V1PVsoJ6zrwJUH92PdR/GDKbtWbx7/zay4XrFrvUeXwVoZrg+77fC037zngpwXeVHvR7f7M9dFt9e75Vd6wbrPBkzWUp5M5mzucA8VK8I1NlhuuJM9+a/T1nwnAmcT6zBeIf7syrDc2A9QAf0IHrzjT8HjDAdMFB/lqH6d7FD9V1ZjBnK26u9xLS3huxts6GJAnBd4VlpFAe73wrP0/yAWi8zzQbL2Z+7Pb4rO8tXeRus62XszBnNysibyZzNrQLUGWG6Slf6Svi8CzDvBugM4Dyx4/5P4J4Bzm/aCdABfYgOnAvSgbowHQhZ/9Qu9dn9DdVj92PdhzGDafvqYL1n++yu9bYtPzRT8TyxW7iK3wpPdj+A/19Q2PO6KvsCPONgDNZr5IxmzeRlZQL5UN1A/YpXvJRB+k6YnAnPWWa8P+rCevwsAey7wTmwB57fpArRmzc/SAc4YTpA0Z0OEHSoZ++/G6oD+6B1xSzGDOXtK4+DWbvt1e00wJECIO3xPLFb+DS/FZ7+kecszx5fBgDOMg6Gac46I/SukqGSlZlpoB5Xx92LF6avgrRVIHoGQGcB55uO/fNImQxwDuyF58B6gA6sP6YVIB3g7koH+GA6YKAesf8oUAf2d6qP7ssMyHftw5jBtH2vN0PXOse211UNHClAZoC/W1hhHU/zW+F54o882Z49vtmfuyy+quNg2EbNsGbszNmdNZo3m5kJ1asB9dNg+koIuwM07wbomfCcYVTNOz2sy89yqL4bngN7ADpgiP4sw/TLOh6oA2dA9dH9mAE+6/Eob68I1nu3rzhrvcf3VBC3ogOTHQwrnOsqfj2ehuv2ZPcF1nWtM8Fvxm7yamD9hDnuM5gvu0u9IlBn7Zi/e67RauC8EypnwXMmcL5gDa6/NDUDnAP74Dmw5xhXQXTgTJAOxMF0oBZQj/BQ61IHakP1nVmMkHxHBgt0BtaB9d5aOLa9LpXuyWqd8JVGcbD7rfCs4lfpRx575kNwhq71U8B6lY51Q/WYPCAPqlcB6ifAdEWQvgsw7wboDOCcZVzNg35KzT5/1q5jWwnRAR2QDhim90gdqAOG6qv2Ywb4rMfDtP1KsA54HMx923O71ld4VhoJs8Kzit8KT8P1eb8Vnio/FPb4Kn1HADxz1ldD49P8RzJ25ihk3XQqVK8I1BVgujJI3wmTM+E5GzSfXIvrHe7P2g3PgToAHVgH0QENkA5QwnTAQP2XZoA6kAPVDeN19mHMWLk9C1jv3V4JrmfDmGzAY7ge43ma3wrPLLhe6V9Q2PO6GHwVu9bVwffpYF2lW30Un2WOfjFQf+UTB0Kjge4qSFsFomcAdBZwntx5/5MCzm/a2V2vDtGBNSAd4O5KB+rC9AifiDqyutQBQ3XVLIP1zzJYf972urKhSXatnrce47fCs4rfCk92uK5w3azw9GfkdTGA9VZHz7aa3oz+rBkqWYBmp3o2VGcZPdN8zoLpKyHsDtC8GyJnw3OGcTWvdGFdxjvcn7V7xvsOgA7oQnSAvysd4ITpAA9Qj/BQ7FLP2ldhv0qjYxgzTpizvtL7dBij4lkJrisATXa/FZ6G6/N+9qwL11Xht8H6mozRnN1Zp0F1li51ljruXnFSgumrofMuqJwFz9mg+aZ1eA3cM16QugugA+shOqAH0gFqmA4U7E6P8pkF6sBZUH1mX2ZAXm0ftu3dtf687XUxQBMVcJQJ1z2K4wy/FZ6G6/N+9sz/nqgO11W9d2UYrN+VMQJGFapXA+rs3ekroO1K8FoVorPA8+zO+2/6z8qXpu4E6IA+RAd0QDrAC9OBOBAe5ZU99gXIG/2iuK/CfpX2Ydt+JVgH1gHwld5q0OTkbvhKcH2FZxW/FZ6G6/v87Jn72as4EsZd63H+rBmjOaNZwH6wbqheo46717kwfQdw3gmTMwE6KzQPqur6SJmKAP0mVZAOSMB0gBSoM/lkd6kDeWB8Zn8VIM+exQjWd2S4a/3V9j3bGq4brsf4rfBk91vhWQWut+yr2533o0dFzx5fd63re4/4n5wxk6XUrT4L3rKA/mx2VA13nxixjqG5e+rB9OoQnQmek1TyU26Uy02rITqgB9KBc2B6pFeUT3aXOmCoXmk/1n0YM9y1/mrbPlUE1qt8Ddf3e54IXdnhOuD5/xF+FT17fA3Xx7zZusrZ/Ecy2HPcrb42dzY7qoaoOpoPL1BfAW4rgPTdED0boJNA87e68Pk599LUnQAd0IfowDqQDvDDdMBA/Zsyu9Sz968O5NmzDNa/iwWW93v3+OZDiGzf7FoN1/f6rfCs4gfkjYY58TxX9OzxrQzXWX7kZvIe8WfN2JmjAtZPheoG6le8DNN/++9TFkBnBOcz/4qnQ3fgvhueA3sAOrAeogNyIB0gh+mRfpWAOnAmVJ/ZV2E/9iy2OesAF1znAvE922pBiGzAowKNFI79xBrZ/QDD9Z1+FT17fLM/J1sN18XwvcbkXcF/JGNnjsE6Z+5sdlQNUXXcveIUDXVXAekKID0DoLPA803AfFY/oaB9F0AH9CE6oAPSAV6YHukVAdSB/C71WQ9FIK+SyQzjWY+HCaz3bq/Ytb7S29DomjI711d4VgKa7H49nobr+/zsabi+0rfXu9efDayzZozmjCAjQ/W1ubPZUTVE1dF8YnQ6TK8I0bOxNSs4DzrX70fK7ITnN+2A6IBB+rNOgOkAT4c6kAu1s/evDtVH9zt9H2Ww3ru9u9Y1favBdQWgucKT3W+Fp+H6vN8KTxUQng3X1cD6Sm82+K3uP5IxmlMZrJ8K1Q3Ur/jFyiD9u7LwNRs43zmq54N+loL1XQAdkIXoN0l0pa/wrNadflN2l/rs/orZlfertM9JYL13e8UHeiXfitBIAcIpAM1Kx8wO11d4Vvp7qejprvWzvHf4s2YA/GA9A3AbqnN0yzefOChqmF4XojPAcxJg/lVfroHrL03dCc+B9QD9poUgfVlHOnAWTAcM1KvtX73LnT3LYJ1h+55ta3fhZQK9Ht8V0EgFmFUCmobrn3Kv6cTzbE/DdcXvYibvXRnVwLq71dfmzmZH5EfUcPc5B6irg/SdSDsToLOC800/lvyUGePyKMVu9JtWgPQVvowwHagF1CM8FLvcVaD66H7MWYwvLwW4YPlKsN621wIFVX0N1+P8Vniy+63wZIfrKzwr1bjCs+LnZMu/ruw1WOnL5F3BfyQDMFiPypvJnM2dzY7Ij6ghqo67V5xUYPpqyLoLa2cBdCZ4vutfF0zqeof7s3YCdGA5RAcM0pd5Gqiv81DeXwGO796PeZ+Rv+OTutZZwPpKbyXfFSNhAMP1DM8TAanh+rzfCk+Fa2eVZzZc93cmp/cO/10ZBuu5eTOZEdkM+RE13H3iZJi+B6TvhugM8Jwdmges0R24FwTowGKIDmiB9BW+jDAd4IDhLB4nQfWMTGZIbrC+avuebTm62Vge5jPBFlAPrhs+xvit8GSH621b7vPi6zvHsypcr/ydOeJ9oj9QE6yf1K1uqP7oEyMFoL4KzFYD6VkQnRGcM/ygAOAnFLRXgejAOuC90psZpgNc3elsPidC9Zl9K+9nsM60fZ+qP8yr+UaDzJZ/XQZ7e/1WeLLD9ZZ9dTvD9Vm/FZ7Zn5WG6zwAXNV7xH9HhsH6XNZM3kzmbO5sdkR+RA1RdTQfbqCuCNN3QfQMaMwEz0mg+SW9WbfPI2U2AXRgE0QHDNJvYoXpQOzxMsBwBg8Deb79TgbrI/swwXWmB1c1AGG4nudpoLnXL/NfU7Cv4QrPU6/vHk/DdZ7vepYf6Cv4A9xw3WCdL3c2O6qGqDqaD1fX/N3PMP2VdoJkBoDODM4Xr8/PSqheAqKv9leA6UBtoB7lo9ylPru/2r7VXnYK7AHrACMo5+la7/Vn8VbzNVzP8VQAmgowkx2ur/CsVOMKz4pwvW2bd/wrfVd6M92jMPobrI9nZeTNZM7mzmZH5EfUcPeJg4KR8HMFrFyFP1eD9FMgOhs8Z/hB4YO6X5q6DaID60H66oxV3szd6YCB+koP5f0Vus5373cyWN+RwdJZrurN4JvZJdy2tWeUZyVAmgXX27Y+L7v8lDxP/iFype9Kbyb4vRqsj2T0IhRmsK6QNZM3kzmbO5vNkB9Vx90nTipAvQpMz4DHLACdHJz/T53r9QMUhOirc1Z6s3enA3WBepRPtociVJ/ZV2G/0axTx8GMZfRsy9OxxeLN4KsC13t8q3meWGPmdenzstdT4YcPoOZnZY9v9e/NXu9ef6Z7oJsqwfWdsPsksJ4N1SNqiKqj+cQpEnSqdafvgM27QXI2QGcG5wlr8xMG2yuA9NX+p8H0aL9KUD27hqz6qwN5ZrAOsIJynq711f4s3gy+2eMOqoFwFU+FGt29vs9vhafC9d3jeTpcr+7b693rz3QPBOwZCVMRrCvlzWTO5s5mR+RH1BBVx90rFoBG49SV3emrAesuuJwJ0dkAevYPCp16P1JmF0DflaUG0oF4mA4YqO/0yYbqs/ur7auwHytYH9mHrWu9ba/50MribbieCwpVPCvVmDmG40TQXOnaWeWZeU22bQ3tFb+Te713+J8O1w3WOXNns6NqiKrj7sXbpa7YnV4ZpLMAdCVwPrFmP0tBtDpIB86G6dGeLCA80ofBw53ufPtVAusj+yiD9dX+LN5V4Xrblh/qneyp0C2cNRpmhSe73wpPlb+Zit3rDN9Dit/JTN4j/r0ognUkTOVxMBmAW71bnQmqnwbU1WH6bpicDdGZ4Xny2nS/NHULRN+RswqkA2tgOhC/Jsx+huo8+yvB8dF9d46CAeqMgxnZhwl+9/oz1c4AH06H6z2+p8JHj4aJ8ax0rhWuHcP1PE8W31O8R/xXw3V3re/PysycyY3Ins2PqiGijptOBuqr4epOsJwFitngefaPCZNa2+H+rB1ZK0E6oAPTV3gaqHN7qEH1mX1377erYx3YBb35wHrb54wHVhZvw3UdCFUNPmbBdcAvNmX0W+GZ/XcY/YNPy7+6Xf53RnatK71Zat7hb7jOmTOaNZM3kzmbO5vNkB9Vx02RCDIaqBumf8rYD49ZALoSOA9cs/4O91cySP8sw/R8LwYgHuHh8TFr96sG1kf2Gcvo3Z7rgfIEcF8Vrvf42jPH06NhYjwVzkulGns8Ddft2+vb693rzwbWgfXrM5KxM2d31mheVuZs7mx2VA1RdQBxUF0BqK+EsRVBejZEZ4bn2WvzpNfAfWfXu0H6Hl9GCB7pxeJzMpQ3WH8tVrA+sg9b13qvv/LDsOF6n282fKnm6dEwMZ6VzrXCtXPyaJjsz2D7cnoD53atVx4HY7C+Nz+iBoC3S10JqK+GqzvBchYoZoPnZMC8W0/ruWakzGqIDqwF6cC5MD3ajwWER/oweKiB8Zl9FearA7yQfHyf3u15wPpqf1Vvw3UdsFXNUwFmngiaXeN3Vetez/68rO7L5A3UgOusAH80ZzQrI28mMzN3NjuqBuCcLvUV0LYKTM8AyAwQXQmcL1yv6yNldkB0QBekr/Jm92T0YgDiER6K42Nm9jVYz9qnd3u+LiTVh2HD9bW+2bUqwMwVnpkws9K5cY0xfj2e1eB6j2/25yWL70rv0+A64/3iSIZK1kxeVuZMLks+wAnVTwfqO4DzTqCcDdCZ4Xn22nzRTxmQDhimr/A0UOf2UOtWn9nXYH12n34xgW91f8P1tb7ZtdrzuxRgpgIUPrHGFZ6Z/5qibXvuZxuL70pvw3XNjJ05o1kzeWqZEdkR+UB9qK4E1FcD1hNAOhtAJ4fmX/VhPedemroDogN6IH2lt4H6Xh8GD8VO95ncqmB9ZxbjQwwT/O71Z/I2XO/zza61mmel7nWFdXSNMX6Axg8+2Z72HfNd7a0O15mhd2W4brA+pgjsFwXVTwTqVWB6BkBmgOhK4HzTer0G7hVA+mp/BZge7cnoxQDEIzxUobzBem4WY9f6jgymB2GWh2zD9T5fFc8eX4/i4PRb4VmpxhWeJ3evZ38OVfdd7W24zpGhkpWRN5MZkR2Rb6h+1S9eK4HsDoC6GyhnQ3RmgJ69Nhf1swSuK4P0lf7M3enRfkxAPcpHGarP7r97DAywF6yP7rc3q3d7vq71HRknPGQbrq/zted3ZcJMhR8qKtWoAuwVfvDJ9lzly1DrSu+V9yyG6xwZozkKWbOZmbmz2QAPWGeF6kpAvRJMzwTFbABdBJp/1Jc17Rspsxp078hQgekrPFn9mHwYPBRHyFQH66P7sXatj+Qod62z+a+C662Onm3zQYkK1DnZUwFmKsDRSjWu8FQYV9S21eg0z/7MVPRd6W24zpExmqOQNZM3kzmbO5sNzIN1tm71E6H6avBaGaazQHQ1eL5p3X7+DVsP0nfkKMH0Fb7MfobqPPtnjIEBDNZf7zeyD98DDNuDHpP/yodsNZiR7WvPa6oEMyuB5lNrzPwXFW3bWp49vkqf76u9mUbDsN0TsWbszBnNUsqbyYzIBjjA+glQXa1LvSpMzwbp7AA9e30u6ocCPmZnqAD1aM/KXlU8TgLrgAokr9O1PpLD9qDH5M/ygL1qTRhAhqFWrGcmzFTo5HaNMX49ngr/oqKip33HvU/rXmf8V5UjGaM5ClmzmZm5gMH6a59YKXWp7wCrO+FyJihmhOgi4Pyr3qzt55EyFTrf1bxP6FKP8mLpmlfe/wSwProfc9f6rhz1h7zV/itgZaujT2owoyKAUvFUgJmndnKfWCNQ619U9Hj2+GZ/vtn3t1Z997da+sR2X8SaMZqjkDWTN5M5mwvUAeus3erRIFMVqO+CzFngmAmiK8LzxesX0+H+TqojZFZ5s3syejH5GKz3SwGsj+5XCayP5LA95KnC9VZLz7b5wKGqb0XPSjBToZPbNX5W5r+oaNvW8uzxVfos7vVd6c3Uvc52X8SasTNnNCsjbyZzNtdg/dknTobqe4BzBkhmAOkKAJ1hnT6o76WpN3mEzD7fEzremXyUoTpgsL5uv5F96jxUsD3kGa7P+a70VoFPPb4qnpkwUwE0r/B0jZ+l8C8qKnrad8wX4Pn+b9uf5T+SsTNnNEspbyYTMFiPruPuxQ/VV4HZajA9Gw6zAvTsdVmhf9f65+m/9AiZ3d4nAPVILwaoHuGR1a0OGKx/3m9kH95/osqYwebP8nDN0s2n5Jtda7anAsxUAM0rPE+tUeGarOjZ45v9ubnSt9d7Zfe6+r0R4/3jSMZojkJWZiYwB9dnwToLVG8+MWKH6qpA/QSYzgbRVeH5onWMHSnjETJ6nkwgPNKLwUO1Wx3YD7p3Z7LffDN2re/IYILrAM/DNQPIqOqr4pnZvd62NWje6alybqIBu8rfo4qnfe9a+f3faunZluve6HS47q71zzJYjwWIkVDPUH0PbM4AyCwQXQWes6zXC10bKeMRMvt8WYF6pBeTTzZUB/TGwGTty961zgzxGTMM1+e9WboEVTo7e3yzaz21U1gBNCvUuMIz80cflb/x7M+NHl+lWke8Wf71WtueY012ZTDfd1eH66rjYKqB9dOgujJQ3w2Us8EwM0DPXptFWvvS1JsUYfoq7xOAeqQXA1SP8FAcA5O1706w3vbjvcmv0nVkuM7tXdU3u9ZqozhOBc2VauzxVLgmK3r2+GZ/xrH4AusAOxucZvNnzRjNUciazQS0u9YrgnXWDvq75xpV607PAsaMEF0dngeu6dhLU2/yCJn1vsx+hup/SnEMTNa+VcH6zizD9e9S/WfhDBCjqm92rZmdwm1bg+YIz0o1Ah4PE+nZ45v9eaToy9K9znZ/xAi+We+HVbJm8gCDdZbO+eYTq2igqQjVd4Hm00G6GjxnWbd/9Rq4u+t9n+8JQD3Ki8Ujs1sdMFj/vh/3TTdrDmOG4fqc70rv08FTte71FZ4qoPnU464E2LM/O5Q+j7Jr7fUFzuheZ7wHY2zSGMlQyZrJA/LgusH6s0+cFKC6OlDfDZMZYLACQGdYpyCtGSmj2vmushZsILyqj8H6zszR/dy1PpLDmMEE19v2HKBBDY4o+ap0r7f8q9udCUd93J+V+a8qVABzxc8jRd+V9wIs39UV/EcyKuaMZs3kqXatG6y/8zkXqlcD6pmQmBWiFwLnAIbWuX+kjCpMX+Wt4MkEsdl8lKH67P45UH40r2Y3CyP43pFxClxf6V0Z3CvVmgky27b8ngbs+z0N2Ot4Vvdd9SNlq6NnW03vXRnM0Jv9Xn8m79SudRawztqtbqj+6L8HNGeBYyaQrgrPk9fwDtw9RmafLytQZ/RigOqAwXr/fuNi71ofzWME3zsyRlb4lH8SrgZGqvpmw6FTobCPO87TgL2OZ3Xf7H8FdN+ew5vRnzVjZ85o1kyeIlw3WH/24e5WXwEeKwD1DJDMAtJVIDrLeg0obqSMaue7AlCP9mT0Mlif3z9v39H9+G+AmW/sGeE64O51du+qvtlwqNr89RWeBuxxnr4u63iu8mWoFeAA7OoAnM1/JGMkh/1+fzQLMFzPyI+qo3nwdqurQfWKQD0bDjND9Oy12aGnI7w2Uka5+13Fl9mPCaoD+WNgAIP1/n0N12eyWI9HGa73+rN4Vwb3DLUaZHJ7ngrt3cFex3OVL0OtgAH7Tu9dGe5en8syXN+fH1HD3YcH9P/24+6m/+29FrLuBMyZwJgRpFcB6AuPogF31e70ld7sXerRftWgOqAP1mf3d9d6bJ7hep+U4XqvP4u3fft9Ddi5PQ3Yv8uAndtzlS9DrSvvC1gAO9O9xg7/kYyRHPaGmtEsYBzcKMJ1hm7xymBdqVu9ClTPAMhMIF0VoBNXHTNSRhGor/I+AapHehmsz+/vrvX4PPYsj4aJ9T/B275NBuzcnirjhlasZfS1qXLc1c55jyeDrwH7Pm9Gf9aMnTmjWYDh+q7sqBqi6mg+cVLpVl8JY6sCdRaYrgDS+Ssc0veRMu5+1/Jk9IqA6oDB+nz26H77b87YgffOLMN1LX93xvP4rugSbvlXt7NnlJ+K56mA3Z71fmC4KftztG3L8Z3d613BfySjYg5guL4rOyI/qo7mEScFsK4O1U8C6swQnbeyGHV8rse9NPWdDNTjPQ3VP4vhX23kgvmZ3L1d67sz2bMqwHWA64GR6WGXpW6GmrO7Lw3Y63ieCthb/tXt+Neyoucq35XfU9mfo21bju+/Xu8T/UcyduaMZgH7Abvhel4Nd58YnQzWVwPonZDZMP0urmrGNfMD6KCuvTT1ndRg+krvE6A6YLDOtL/ajZnCCBr2rJWdaTe5e53fW63m7PEwQG6XcI+vPeP8VngasNuzku/KH+AZ1qHXu9ef7X6GNYM9BzBc35HNkB9Vx92HG6wbqn/L2S8mkM5TyXUlAPNZvQfu7nzX8qwK1QGDdcWbMsP1+SzDdS1/Fu/KvgbsZ3oasF/Zjn8tsz17fBk+77L/FRBgwL7Te4c/a8ZozmiW4fra3NnsqBqi6mg+54H1lTC6KlBngOn5FVyTIDgf1bqRMmpAfYUvs5+h+hqPrDnrbV+drvXRTPaRMLuyGOF620f3YfQEbzVfA3Z7Rnlmdgd7fFGcX49njy/D553S52ir4bpYADvTPQej/0gGew6w5575USrPVBG5s9lRNUTV0Xy4uujvfobqf2bsVSZQZ8bTleF5wDU2NlJGtfudvUs92i8KqgMG6/H7z+yrdUOmMIKGPWtlV9pN6g+Kqg/pBvd3GbDbM8rTgN2euz1ZfA3Y93hX8B/JGMlx9/qr/bSe5SKyI/Ijarj7xEHCSPCrBNZXA+idQN0w/a4KAD1jXNAbvQbuit3pq7zZPatC9Sgfg3WNXHbgvTOrAlxfncH0kMvizbLeKiM42rYaIM+ecX49npk/AFU7P9mePb7ZnyG9vgbse7wr+LNmjOYAe7vXlZqWZjIjsiPyI2qIquPuFSf2MTV33xpQPQuos6BrRYhOBM1nFD9SRg2or/CN9mOE6gAHEGfxyLwpOeEGsCrI3/GgwNi9xOTPVDsDvFm1HkpQKHtt7Rnn1+Op8i8sFM5PtmePb/bfe6+v0mdpry+T94n+Ixk7c6p3r6s+S85mR9UQVUfziZPB+h5wmgHUGRC2CkgvAs9H1D9SxkB9jWdlqB7lw+ChejNUvWt9d57hOk8G0wMuizeDb1UopOLZ41vt2DP/lYUKED7Zs8c3+28TqPtZyua92p/t3mk0Y2eOQvf6Sc91EdkR+VF13H1ipADWDdX7lImymUF6dXgefJ39CdwVgfoqb2aoDtQcARPlowzWZ/MN12PzmB8SGP9pMNMD7mp/lh8GGGquCoWy19aecX6Axhz2auenx7PHt+LfJqD3WbrSm+n+oML9E2sG4O71FZmzubPZEfkRNdx94sQ6pubuuQbWVoPqWUibEaZXgehZo4M+KHakjBpQX+HLCtUBLiAe5ZMN1pvHWd0Nhuu/teshocqDFdMD6AnehkJ9vtlra89YzxXXqUr3fjXPHt/sz5Ee31Wfpa2Gnm3zP6eZvE/0H8kYzdkJ2N29vj43Ij+qhog67j7cXeuqYH0XFDVQ1wTphNB8Rn0jZVYC9VX+KzxPgOqRXgxgvXnM7n9W1/pMrgJcH81z97r9mb0N2Nf5Ztdqz2uK7mJXuZZO9uzxzf4cAWp/nq70Vr5H2OHPmgHUBuyKz3ez2Qz5UXXcfXi71hXBemWozgDUlUB6MYDeqxojZRSgOmCwvs9jXqo3QIbrsXmscL3tY8Ce6c/izQCEgNwRHD2eq3yzaz3ZMxqwt+yr29mT3bPHl+HzlAGwr/Rmgfe93oz+rBlA7fnrioA9G65H1BBVR/M5C66rg/XduDYbqLPD9JMAesC5iB0pA2hBdYC7Wx0wWP/sMa/sGxC1Gy+Vm9rRvEqAnTFD2Z/Fe+WaqEGhkyFexTVVGBPTtrVnhmePb/bfPFD387TX9xTvEf8dGcyAHXAH+4rMmVyW/Iga7j5cnfR3P4P1R+1EuAbqr1UNpJOsc99IGWAd+F7pbaie68UA17O/+BXH0ShlMnffVOle35HB5M/ibcC+1je7VhXPHl+FMTEt/+p29oz07PFV+vus/Hl6incF/10ZBuxxeTOZs7mz2RH5ETXcfQzX531rgPVMqE4Cev8nZZDOtpYDqjFSBuAfAQNwwvBILwaw3jy0bzyUQHdGZsUHA9YHKvUHT0XAvtK7MhDKhm0qsFHF013sZ3r2+Gb/fQL+TN3hzXSfwOg/kjGaw9yo0vbjfx6ZzZzNnc1myI+q4+4Tq2i4qQjWq0N1FgisCNJZ1m6jdEbKrADqADdUj/YzWI+vQ/FmqzpcB7gfClgfphgfClke+FW9DYPW+WbXWtHTXez2zPD1Z+p6b6b7Eab7EOYMgPteuu2n8Uwyk5mZO5sdVUNUHc0nTgbrdcE6AxRWgekMa5WtL+eKa6SMClQHOEE4oxfLr9jZNxyKN1oqN7LuXq+RwfRA3uvP4t17hpSAEAMIUqlVxdNd7Gd69vhm/y0BtSE7C6hm+j6v4D+SARiwZ+fNZEZkM+RH1HD3iZMCXFcH66dBdWagnr02O7Rx/V8Dd0P1phO61aO8WMB688itwXB9Xabh+r4c9QdOFgi+2tswqM/39FqzPd3FXsezxzf7b6nHt/Jn6kpvpu9FpvuFEf9dGQbsuXkzmbO5s9lRNTDVcfc6F65XA+uZ4JgRqFcC6Yzr+0bxI2VWAXXgPKge6RcL6CM88r9cs3/FN1z/rl0PAwAv+B7JYcxQ9mfyrgyDsn2za614/O5i5/fs8VXx7PX152q/L5N3r3+Fe6qRDAP23LyZzNnc2eyI/Iga7j4xioR2But3nQDV2YCvKkxnW8cF6h8pAxiqR3syejF9kTF8wRuur8909/q+HMYMJn8mb4MgPd/sWrM93cWe49njq+LZ42vAvt6b6buR6Z5hh/9IhgF7bt5M5mzubDZDflQdd5/z4HoVsJ4Bk1lAsBpIZ1k3Ir0G7mpAHeCH6tF+FcF688mvI+vGKjN7980rO1xv+xmwr8xg8meqnQEEte3rAUEG3+xa3cUe69nje7Jnj6+72Hl8T/He4b8rgxmyG7Bz5s5mR9UQVUfz4YXril3rO8D6qVBdAagzrFOmJs/RzxK4bqjO6cf0JcbwxZ4N99Vu6BTgOsD/EMDYBcWYYcD+p3r/JhiAjZJvdq1Kx1+ti73H92TPHt/s8wTkXqdtWw6gzPL9yPTdW8EfMGDPzMvKnMllyY+q4+7DVc/dTwuuVwTr2bCYGahnr81qkaz92EgZQAuqr/Bl9Yu8sKqA9Yg6DNevy93r/DmMGUz+LIAd0ANB9s2vVaWLveVf3c6ekZ49vkp/T9nXads233elN9N3O9N9w4j/SAaw9j7iJvb769GsmbyZzMzc2eyI/Iga7j6c3etqI2FWw/WdsDMTHJNA3T9UCaazrvFFfQbup0P1aM94SB/lw/OrMMMX+mlwfSbX3evzWYzwe0cGk//q2tUg0ErvbN/Ta82GlypAWMWzxzf72s/uYgc4Pl/VfJm8T/QH6nWxG7CvzZ3NjsiPqCGqjrtXnJTgusF6RC6XlGE621pu0I+h+gJPg/UrHvpf5Jk3M0pwHeDvrhnNM2A/w58FsLdaerbNhzVVfavWenXLbHBpz1jPHl+Gaz/7x6C2ff46sIBwVe8R/x0Z1QD7aJZS3kxmZm5EPlsdzSdW0YDTcP1bzn6gzASC1YA609oRanykzE0G65F+UT5cvwYzfIln16B4A6fQvd7244b5jA9/IzlMAJzNv/eMZcPK1d7Zvkq19viu8MzuDlY5/xXPfY9v9vEDOj8GrfRd6c3yg0OvN6P/SEY1yG7Avi53NpshP6qOu0+cDNfrgnUWMKwC1FnWK1uT5+sacFeC6it8WcF68+L6osoG2ww1KI6k2d29DnBD79GckSzW42F7gGXzdxe7fUd9s2s1ZM/z7PH1dXpdhuxj3obsef4A7yx29n8pOpqlmBmRzZAfVcfdJ04KgF0drp8I1pmhOsP67BLBebgD91Xwe5W3gifrlwFL13rzya8jswZ3r3/br+ZNP2N3FWMGC5QA3MVu3z2eq3xXwMvs4z/Zs8c3+zz1+Bqyr/c2ZM/zB2pBdoV7+5m8mczM3NnsqBqY6rh7Ga6v1ElgnQDkvlVFoM683l/0EwaZVbrVV3hGX9QsMPvuw1NP9i/0ijdP1bvXR/OYs1gfltgeXt3FPue70lvJt2qthux5nj2+Kp6rfA3Z+31P8a7gP3KnaciulzeTOZs7mx1VA1Mdd69zAXsluG6w/ltVgDrj2i5Q/wx3g/VIP66u9ebD80WpDNdn8zO61wED9sysHZ1VbA+uqzNUu9jb9loAaNV5ZKg327ciZFdaU5Xjz17TXt+rWxqyn+Vdwd+QXQewz2Rm5s5mR+RH1BBVx90rjq0Yrj/67wGjWSCZDfwqA3W2tSTQa+CuBNVX+J4A1psXx5ckQx3ZNy0ZgH3mS9+AfT6LDU6P+O/IYHowZuliX+lt33W+2eASMGSP9uzxPf34AUP21d4sNa/2Z2wG2AHZK94Lj2Zl5M1kzubOZkfkR9QQVcfdixewG65/y9kvJhisBtWZ1k5McSNlHqUC1ptnpFfshcjSKX73ifDI/6LOvmFx97p+XhXAviODzd9d7Pu87WtwmX2uVDx7fLPXtMd31Q+aSmuw2pul5l5/tnuJkYyV9xM3Md+j7s4azcvKzMydzY6qIaqOu5cBe6x3TbjOAodVoDrLemVr0fnqHylz06lQ/e5Zt2u9+UR4cHxJZ96wqMF1QAN4784zYOfKcBf7nLdrXue50teQvZZnj2/2mvb4GrKv91YF4Ybs18R8n6qQNZM3kzmbO5vNkB9Vx93nLMCuDtdPBOvsUJ1hjXaK8Hx8B+4G6wbr13wM12fzDdjj89izGEe47MhgAuyAZhf7Sm/78vgastfy7PHNXtNeX5VrlcXX3vPeO/wBQ3aF+/2ZvJnMzNzZ7KgaoupoPjF8QwGuA+vgoOF6dDavqgJ15jXvVAPuq6D6Km92sN78Ir0M11fUYbjeuy//DTd7FiP83pGxeq1WPwyzwAjF7kr79vkqdQdnr2v2uTr9+AFD9tXeLN89TN47/AFDdvZ/RTqTl5XJkD2bH1lH8+HsYHf3+rP/Phms/1YloM64vhsUM8N9XRf8Cs/6YL15RflwfCkrw3VAD7Cr3PyyZ7HmMGYwdbEDhjMz3vbt882G7NnH3+ObXevpxw8Ysq/2ZvkcZ/Le4Q9wQnZ28O0u9rW5s9kR+RE13H0M2Od9Ddfnc7mkDtXZ1pNMfTPcDdYj/ep1rTefCI/8G4Os7nVAC7DvvgE1YN+Xw/YwzATY2/YcIKI6UFLyXfWvMgzZDdmjPXt9s6/Xtm38+erx7fVW/I5g8t7hD9SB7Oz3x6NZM3kzmZm5s9kR+RE1RNVxkwH7Cu99yoDKTCBYEaozrZ+wXgP3k8F684z0qtm13nwiPPJvCE7rXm/77r3pVchjzjo1gwmys8Dqld4sYEbJ19Ayf10rHn+PryF7fV8m715/xn+BxwjZ2z61ckazMvJmMmdzZ7Mj8iNquPvEcIYTAXuV7vWT4boSWGdZM1YFncuYkTLPUvkAYu1ab15RPobrkTUodq+3fTVueBXyWB+IGDNWd5sxdbH3+rN4u+Z+36rQMntde3yza80+/h7fqterou8p3oz+huyG7JGZs7mz2RH5ETVE1QFwA3Z3r3/K2QuYGSCxClRnWKtMkZynvpEyzzJYj/KL8uGB680n90Ykooas7nXAgJ0lr9LDEGMGUxd7256ji5DFmwX4KPkqQUuldc2uNfv4V/kqXa8sviu9WT5ze70Z/Q3ZDdkjM2dzZ7MZ8qPquOk0yK4O2A3X+cSwRjvEfh4+6BpwV/pVjxmuR9ZWDa43j9waTuten8ndnckO2HdlVckwZOf2ZgE+Sr6Glvm1Zh9/jy9Drb5meXxP8d7hP3IHt/oeo+1jyJ6VN5M5m5udPZsfWQdgwB7rvV47IWc2OGYGutlrs0rMax6o38D9ZLDePA3Xr/nU+PI3YN+Tu/PGmj2LscN8VwYTZGcCBCzehuxjviugpdIauFatWg3Z1/rae957xB/guse478N7b6mQNZM3k5mZO5sdVUNUHQAvZGdnSX96r9VJ3euMoLcaVGdc4yT9UILhP32j/Xh/EWWE/lW++A3Y9+QqdMowZ7E+1LF1mSl34Z3gXdlXCVoqra1rzb9ms6/Xlb4rvVlq7vVX/h4F6oyMYW8W2Z01mpeVOZs7mx1VQ1QdQBxkP7mLvRJgN1y/qwJYZ1tTco3PcD+xa735RXoZrq+sQxWwt/11bjQVAPtoHiv83pXD9vCrDAdO8K7sqwTZe3wZ1rZirT2+vmY1fVd6M33XMX2PAh4Zww7ZlfJmMhmyZ/Mj6wA4IbsCW7r7rtUJgJ0NBKvCdbZ1FNd34K70ocIM15tfTH1McL35RHjkdq8DmoB9Jlsps1oX+66cHRmG7Gd7V6/ZncE1gbhSrb2+K0A7wxqs9K5ec683oz9jNzvrvZ9K1kzeTGZm7mx2RH5EDYBHxcT4rtUucJoBlpmgsCJYZ1q/KnpzHfw8bKD1QWK4PuLF8Ut688kF7LNf7AbsazMrdrGPZDF2sQOG7JH+LN6KEEkJWLYariu7OzrbV6nWHl9fs5q+K71P+T4a8Xc3O3c3u9K/qp3JnM2dzY7Ij6gBOAeyq3Gxu78B+2opwXWWNWPWhvP5E3oiVH5RZP5QZgHad58Ij/wv+BMB+0xuZcA+mlfpAcuQPdefxduQ/S4DS61ae3yVau31VfoXGCu9GXxP8d7h72729TmjWUp5M5mzubPZEfkRNQCG7PO+61QVsLOAYhW4zrJe2SI9X2Mz3FW61ptntB/fFwUTXG8++V/uBux1M9kfRnY8YO16iDNkj/M/wVvNF6g7fkPJ17X6x6HVviu9WT5/mbxH/AG+e462D+d94M6c0ayZPLXMiOyI/IgaAEP2Oc91MmBfWQO3GNYoQ+zn5YKuzHBfo+iLhr27PrI+JsDO8qVuwL4vV+VmvdoDz46ckZU2ZD/bWxF6VQWWDGub7atUa69v1etW0fcU7x3+7mZfnzOapZQ3k5mZO5sdVQPACdnZ+c7dc512gNbTADs7xM1enx1iPweBepzhvkYqH2qG61d9OL7QDdj35apksmex5hiy5/qzeCsCfMNKDl+lWhl8fd2u9V3pzfI52evN6O9uds6c0ayMvJnM2dzZbIb8m6IICCtkXwUvldjZnxl7lQ2QWeFu9rqsEut6J+mHEg7/9oxXdbjevCI8OL7II77ADdj35CrcpLNnMf6zbcCQPdL/BG8G35VAx+A631ep1l5fg/a1viu9mb6bmL73AM5u9rZPHZg/mqWUN5OZmTubHVUDwAfZAV6mcvdcp2qQPRMks8LeanCddZ0JNTrDXeOXwug6Wetz9/qfUgXsWdkqmexZux46DNn1/Jlqrw68DCvX+SrVyuBb+bpd6c3ge4r3Dn9G0M58X8h8/5mVN5M5m5udPZt/kyH7iOc6rQawBux5qgDXGddVVFdmuGvA9ebJC9jZutebD8eXtwG7VpeGys0580POrpwKD7kn+Z/gbchuXyZfhlp97fb7rvRm+azs9e71Z/yhn3FsDDv8Vri3nsmbyczMnc2OyAcM2cc818mQPSKXS+pwnW09C+o3cFf60DJc7/WJqcmAfT5f7cZRJZM9y5Dd/hH+LHCHBewYVur5KtW60tfX7lpfe897j/iP3Imx/Wu6kYydOQpZipmzubPZUTUAfKCddUrA3W+NdsDYXcDUgL1JFbCzreNB+qGEw3/6cn+oMq6hAfvz/pqAfSZb6QZZIY/5ge3UB9xTIMYJ3mqgsm1vcG3ffl+G65dhHVg+j1k+L3v9mb6jgBo/9o9k7MxRyJrJm8nMzJ3NjsgH+CA7EMtcTm4M/dN/jzLgMhMYVoTrTOtXTYPXw9gM9xa4Robro14G7L/3171xUrtRNWTPyWGE7G0fLgjA5H+Ct7vZ7XuKb+Vrd6U3g+8p3jv8Ddo5c0azMvKyMmdyWfIBPtDOyDd+e66RIftMJofUADvLuikp4RxfA+5KH0wG7H0yYI+pQbGDXimTvaPHkJ0LACj7n+C9EugwwEr7rvNVqhWofe2u9Fasmcl7h79BO2fOaJZS3kwmQ/Zs/k0sz/A3MXKOu98aGbLPZHJIBbKzrBejyM/ha+C+4oSeBtebX6SXAfufHrpdCWpd7DO57OB7ZxYrZAf4Hv53ZKjCkRO8V4HKVkfPtlpAUclXqdZeX4P2MW/Fmpm8d/gbtHPmjGYp5c1kMmTP5gO1u9mbHy+L+e27DvAZsq8VOZz9nxjWikEq5+uDfqQ+iE75UI+siwWwA7k3SRE1KHaxz+QqZTI/OI1mMT7Utn24Hv6V/ZlqZ6lbDVSu9FbyVap1pa+vXy7fU7x3+DPekzADcIP2uLyZTPXsmyqDdlYe86evIftYZr4UoC3DOmVJ4fxMaHyG+6MU4HrzrP/Pppi+kBk6AbJvkgzZ12WyP8ywdrNXgOw7MlgAV68/i/dKmMMAKu1r30epXb8rvavX3Ovd68/0fQIYtLPmjGYp5c1kzubOZkfkA1zP9QAv/2h+a6QO2k+E7AoAN3uNdkvhnCxSP3BftVindK83LwP29x75dah2QSjdCLOD751ZOyA7wPlAywYWmPxP8VYDlfa176PUrt+V3tVr7vXu9Wf7vmJsAGAG4Oz3p0p5M5mzubPZEfkA1/M9wMtBml+8VsPBiqA9GyCzA93s9dkl9vOQoO/A/cTu9eYX6WXA/t4jv47TuthnclUy2bNYIXvbR/+B+SQowlI7A6RsdfRsmw8T7avpywDa2/b5a6H4GXSK94g/I2hv+/BljOYoZGXkzWTO5s5mR+QDXKNgAV4e0vzi5W723rxcscPd7PVZLfb1J9Fv4K7Svd48eT+0qwJ2gAOyZ9eg2MU+k6uUyd6V5AfZdRkn+at6G7TbV93X1/B631O8Gf1771FOvT8ZzVHImsnLypzJzc6+qSpoZ+Y1v30N2vvy8sQMeSsDduZ1J9ePxAche5e9Afs3j7O72Gfz1W58q0L20SzWbnbGTvMdGUz+TLUrgnaW9bBvfV+D9jHfld6nfDbv8K8A2p0zlzWTN5OpmjubfZNB+1W/NVJqNv0z4xzI3vI5gW/2uqwQ61qLav6lqQqAPbpGA/YrPtpd7LM1KGYrZbI/uLBC9rYP34OyOqxg8mfyZgDtLOthX03flSBSbS1YQPsp3jv8V4N2gPOHevac3VmjeVmZs7mz2RH5VZ//mxffDwB/+hq0X8/LEyv4rQbZWde5iPqAuwJcb57RfnyAHeD6kq3QxT5bgyH72kz2h5bRs1/lAZYxQ9mfqfbqgHKlt301fQG9H4yq/30weff6M323AAbtrDmjWUp5M5mzubPZEfkG7Ve91sig/WpWnljhbyXIzrrGRfUeuCt90Bmw96kKYI+oQ3VUzEy2IXtslrvZ9TNOAS0sY2MADkC50tu+670ZfjRiuI5ZPjMUr4te79X+OyD1atjO+B0/kjGao5A1k6eWGZEdkQ9wjY9hBe3NL14G7Vez8sQIgatAdsa1PUh34G7AHqOqgL35GLLP5mfdbCrdWCvkVYLsIzmMGfbP8TZoP8d3pTeDrxpoX+mtWDOTN6O/u9o5c3ZnjeZlZWbmzmbfZNB+1S9eBu1Xs/LECIMrgHbGdT1UP9Tw+u4Z7VcbsAMcYPvuk1+HKmSfyVa7uVV4UDFo58uwf463QTuf70rvyr4MoL1tn78WLDUzeff6M30HAHvuWxjvJXbmKGRl5M1kzubOZkfkRz3Vs8H2k0F7814rg/YcGbJbizT30lSVXw2j62SE7Exfoiyw/0TIPpOtlOlu9ts+nDmMGcr+TLWzgPa2vRagZLlG7HsXA2xnWQt7z3kz+q/uaq9037IzRyFrNlM1dzb7Jne1X/WLlwqf+tPfoD1D6qCdbT2tP9T70lSND7ATADtgyL6iDtUbPI+Mic8zaOeDBzsymPyZajdoH/dd6V3Zd6W3Gmhf6a14Lfd69/ozfQ+M+LurfX2OQlZG3kzmbO5sdkS+u9qv+sVLhVO9zqgP29nAsEG7tUmfgbvKBxczYAfqfmkyQPbsGgzZeTN3dbMDtUD7qRnK/gbtc96K4JPl+mComQG0tzp6ts1fi5XeTJ+nTJ/VO/zd1c6ZsztrNC8rMzN3Nvsmd7Vf9YuXCrP609+gfbcM2q3N+g3clT6smCF71S725hPhod3JPpt/0o2sAmQH9nWzA3seuFgfVBkzlP2ZwI0inFzp7ZrX+/Z6u6t9zPcU715/pu8CgLOrve3D970/mjOapXIPnZU5k5udfZNh+xWveCmxqz8z9oBTj4+5Sxm2s62ldVk/S06ewgcqaxc7YMi+qo7sbno10K6UyQ7amR/qWB+GmcCKuv8JoH2lNwtAZPBd6e2u9jHfld4s1waTN6P/6q52gPN7nz1nNEspbyZzNnc2OyI/6umfDba7q32d3NW+XwbtVqLmXpp6k8qHaPUu9ubF84VtyK55A6t0s+6xMdw5jBkn+TNBGwPEPb4rvRl8e73d1T7me4p3BX/Dds6c0SylvJnM2dzZ7Ih8d7Vf9YuXYfuVrByxAWJV2M62jtawxoD7qgvXXeyjXvlg++6TX0t2DaeNq1F5MDBo35fDmGH/9zoBtK/0VgSq1WtmuaZ9Pc/5M32uMq3LTathO+s9BnvOaJZS3kzmbO5sdkS+YftVv1ithJCG7RG5XJDYsN0i0HXgrvLrpCF7j0eMWGpRvnlTg+wzuQqgnXk+O3MOY4b9P2tVB3CrpWdbDm8WeKi4HizrzNDV3rbPX2d7z3sz+u+4r6l0nzGSo5I1mpeVmZk7m32TYftVv1gZtl/NyhETJFYF7QDXOloheg/cDdhjxAS1714RHobss/mK2bsh+2jmzm52gPfBdFcOY8aOtVIGQiygvW1vMDnq7Zp/iwG2K64zk3evP9tnK9Nn902MsJ31fkYlayZvJlM1dzb7JsP2q36xMmy/mpUjJkhs2G6R6Q7cVcbEANyQnfHLjwn6nw7ZZ/PVbpAVutmBfWNj2n4G7eoZyv69q8nSAdzrzwIPWcBh9ZpZfkBSvO5O8a7gv/Lz+ybG7+idOQpZM3kzmaq5s9k3GbZf9YuVYfvVrBwZEsfI61hWPxIfiisuP0P2qz4c9TDAftUbRaV/amrQPp/FCKirZCj7rwY1ipBW1bt6zb3eDF3tbfva55DJ+0R/w/b1OQpZM3kzmaq5s9k3GbZf9dORYXtELhckVrr+HsW2jlaoxl6a+iwFyM46KqZ5cX0BM9XjbvYzcg3a57NYcxgzTvNX7Wo/wZsFeCquB8t1zXIOT/Fe7c/4HWfYvj5HIWsmbyZTNTdKhu1X/eK1CkQatkfkckFihs8Ky3qhMeC+6g/MkH3EJ0ZM9bibXeuGOCPToJ0/hzFDvWueDdKwQKwTvBWB+ErvVV3trY6ebe2907vXn+nzezTDsH19jkLWTN5MZmZuhGbzuZBinE6G7ZZlWRt1HbgrdLEDZ0D25hXlkw+3WepQ7qY3aP8sg3buHEbIwXYMTJBGFZCd4M0CUhVHyLQ6erblWA9V79X+bLWPZBi2r88ZzRpVBkRUe76IyI7IjxJjd/vJcne7ZVkb9R64q3SxA4bs/T48X/zZN2PZNSjeBGc80Bi018xhzFD3P6WrvdefpW4Wb9W1Nmyv4b3an632kQzD9j3KaDrZmaf4nDGbHZEPeJTMdb94sfxYYr0W2/nxDw8WsX4Dd0P2KC9D9s8e7mZXvAE2aH+3X60s1gdqxgw2f9Wu9l5/e895s4BOFtjOsh6neK/23/Gd1yumz/KbqgGKnWBfaZTMqDKvD4Zrkwsn8orhXPVIrV7rmv6G9rn9G//Q/YhhhelnyclVAOxAfcjevPLh9t0nvxaD9n25KqAd4H9QOxmCj+QwwZ8R/94Mpq72tj0HHLM3r7dh+5neO/xXS/mH05tYv7t35oxKCbZn/e1k/81m5990Qnf7CinU+E4eJ2NZ1pPGXpr6SidC9ubHB9rdzR5fh0H7nlyD9vmsSjmMGWz+TLCdaW3sPefN0o0PGLZX8bb/d+1ANZXuEUZzRrNGlQEQs+7/Z8UAEXVxr2VZqnKXe1nNAfdVlwTzuJjmZ8j+3cegfTbfoP27doL2th/vQ+euLNaHaMbjYIMzLDCy159p3Vm8VdfbsJ3Xu1dMf5cV/Ee0ojHpWez3PSNSGe3CAJ97lQ2MsvNv2vG32avotVG7PtXq/abMMSqMcFh9rAzAua7WtPqBuyF7lF+Uj0F7tRoM2r9LAbSP5jE/3DJC6h0ZbMfA1NXetucBtKqwk2VNmLwN2+e043NrlTfT3/2I/4hWf64D+jDiWZVHyczI3e115DWxrLP0N/4BwPPjoTWta8BdBbID3KCdcQYbQxf53Se/DoP2fbkKsJ0dtO/M2gGSKmQod7UDXOtj73O9V8H2XqkCcbbPrZXejEDKo2TW54xKBWIwXtfflL22EfkaV4dlWa9Uocv9JoP3MvoM3Fec3tMge/OL8uGC/wyAm6WOzG6SjOzqoL3tt697jPmhlrHjvErGSbDdIH/eu1csdTPBdtU1YblO2vY8tY+I7XN9RIzjKnZLobvdsiwe7YCxJ40EYT3WStAdMHgvoD+B+6mQvXlyAe27F0cH+d0nH3Cz1HEaaJ/JrQzaR/PYsxg7zkdyGDMM22O8e/1VvXv9meru0QmwvVdMwJfpGmf0H5G72+t2t3ucjGVZK5UNlw3d98ngXVYNuHtkTKRfpJdBO2sdBu3XNXO2FGA7e1alnFMzDNtj/JlgG9Oa99XRu/31ulc+QqiuCdPfz2pVeIh0d/seMV23K6R4fLPXvuIxv5P/Rq2dyobLzNAdqPXZAtzBO1DjvukA/YSfJgXI3jxrd7M3ryiffMDNUkfmDaUaaAfc1a6YxZrDmLHjGJhgO9v6qNZu7z+18jpX7hBfKaa/zxGp1w+4u53p7+GdqsEaa73+i3+OgO7ZoHdEHiuzRszHrHidXpXhu4SuvTT1m1Z9qbibfdQryicfcLPUoQras7Krd7WP5rmDnnfdmIDsTYbtcf4rvVmg9Slgmeklqcp/Qyul/Pc/Kne3c+uEcTKWVUHM8Paqdh8DA1RmPm9Vu90f9QjfAX8HEWkOuJ/Yzd78Ir3qjY1pPhEeZ4P22Xx3tX/bjz+PFU4z5zBmGLbr+DNBTibvlddLz9arxyWxePeK6W+IUYxAX3tF88T0d8cm9b9Ta62YgehOMcDpFWI4LvZr7ATwfpMBPI36gbu72aP8eMB28+Gpx6D9LNAO1Ibt7FmVchgz1GF7rwzbY/yZAOFK75W3/0xQmel89orpb5RVrMfA/P1uWdZ3MUDUT1pV32pwW3W0DMP1ovByz78f/v/s9dqlZwB/E/N5KqLrwN3d7BFeNcfGNJ8Ij3zQHlHHaeNjgP1d7QB/p3nVLNYcxowKsF0ZQirXvtKb6UeIHjGN02B6QGO6FneI6ZrcKabrP0JVzotlKYm9A7masqA7kP9drwDeAZ71ypJB/HJ9Bu4q3ezN06C934sDcDef3I5yhhrc1X5NCl3to3nsWaw5jBknwnZVqDziv9Kb6YeCld7ubn+3vZUpj5OxVJUJU2ezGTpzAeAfxPx9nvLiVMBd7t9zcsAzy9+UGngHONYtW+9APMB/Lsn0GrirdLM3X4P2fi93tUfWoJzvrnaePPYs1hzWjF6dBtuVAady7Szeq6/3lVJ+EDvpc8CyrLNUtXs7GpyuWCdD9ytZ53a7AzrgHTB8/6ZPMP5RCud6g+7A/eRu9uYZ6WXQ/t0jH7RH1KHa1T6TndHVDmjAdmftzamSYdge679STLUzwU2mm2rldWGqnVFMnwU3nXYOLKu6GLvcDd3XQndgX7d7xneGwfu4/n76zwxrqKKrYP5ZKtfGRf0YtId6cXbbM4H25pMP25VB+2z+KSNk2r7cULpqVhUQfiJsZ5Nhfo6Y1sXd7daomP6mLItd2SMoIvKrdrmrSBW6t4x90B3I+X7K/ht/1COMVfqbNYBfr1FQT6r4S+SvBa5/4T9LYDYrbI+qLXLdWOqJWBvD9jEpwfaZa40dgI/KsH39zRzTDOtRndSRqwzzmdZd+bpnuh7Z5LWxrHEVAwaXVOmYo47kv4Fr8gz65v3WnK/oOu++66+vVbW/zsr5e/kbe4/ziv7GP7KfH38//Z9lPenzS1N7dGJHe/Pz+JhrPvpd7dk1ZGarwfZRKcB2Vgi+M2dHBhPQvMmjZGL9mcRUu3K3jnLtvWL6AWiHv1VL7kR+r5kuVPV1rdjlftpomea7rtMdWHvPtHvETMvKGzPT8jmk2vX+qFfQnWV9rRTNA3eD9givOBm089Zh2H5dhu1xedVg+46blgovSbVixQY3e8T00HLSda98zViW1cQ0gmGHZo+XDXbPKGqWO2DoHu9dZ8RMy8qda8404/2mCvD9JkP4ozV+qleMjmm+3KNjmqdh+zWffMgdUcfsNTQ7Ssew/ep+eWu8I4sZgo+o0vEwjtRQ7z5n81+pk2pXFtO6W9apGv0n+xnjCvJGRmiOZrgpYixD1BqwruTp42Wa954RM7vHzGT+/bKORbmtS/b6ROp5FA3r2lvT6u9wX/XA4a72Ga96oL35RHh4XvuolGD7jDIA/44shR8RVot93a7qtBelqsvrr6GTfiywrJU6rQt8hzLXlKHLnWm0TFSne2SXO7Cm0x2I/25c3ekOrP8+390BztLx3mrgU6Xu92e9g+6M58G6pOvA3aA90i/SiwduN58aXe0RdRi29ynjeHfDdtYO7Zms00fJMHa390q9+5zNX1m+obess7VzJMju8SOjeYoz1WdzDd1/6xTo3jzjr9nVwLo6eG+Zhu+v9NzxXvWe/Vv3O+O5sQBcPTWrRsecOD4mEmwbtvPWYdjeJ8P2WDFDcOYcxgzlGdZWvPxjwXv5YcN61p6RA/0ZI1X9d2Cvqv88vepxPWv2OLNHL2Tn38Q2Xua/+Gfo7/mdVvw9KI6Yaf57rrmM8R8M41QURp88j5/JXrNdejeixiNr0vW5w91d7ZF+kV71QHvzifAwbJ9RBmyfUXXYztoJXlGML0odkYFsrrz+cfKPTVryWJGzNXr+3eW+LndWDB3mUcfP1ukOxL9IFdAZMdN812jnOJaMl40ydL23On6L+fv/FXQ/9f67F7ozn1cRvQbuJ4P25sk5q7358cDt5sNTz+mwfVZZsF1hhvqMFI6Puevc3e1cOm3ci8rnjIK8ktasdsBH/3CQNwalRxnnKevayBxpY+j+p1ihO6AzYqb5Nq0G7y2j1qiZey4HfAf4R888yxD+mqK64hWuiRX6+xm4K4H25hvtV7+rvXlF+Ri2R9aQma8G22ekAMBHxQqn2VXlJqDKcayU18iyrkkBurJoZK1GoN0InNsJiXd3uat01WdlRmWzQHcQ1HFTNHQH4poslLrdmzf+9V6n3TPeW9ZescL3mxTuv9+Nn8lezwo6eJzNXy/+v+iENV3thu2jXlE+hu2RNRi29+7LP9olI2+XTu9uHxFnVZa1T2dM0dyn3rmkjA87rDPWrfrrNnp8M39HM2uaPc894vMj4ppiqQNo32mRfyWRc92BNTObV87E3jFjeudM78y52Yzzy5Vnib+aC8+2vhat/loCr+/u542QaX6RXobtnz0M27Nge5ZYgWqUFF7KerqqXIOrx71Y1ox64YPaA9yz1OuvpJFz4ZenzmVlAPBRGbrn1RBZB9vLVIH4F6oCa/5GqoD30+A7EyBWBvCP+gTjmdbbStOax+e/8B8J2B5dZ/SPF4bt3zwM2zPBn0fJrMnbKb+Ulfd4GOe3W1Ylre4QP/FBa8dD8y4YXk07100FumfBb0N3DujefHi73SPB+yqouQO8V+p6b3m5kJkVCD8DeHUQf5OB/PGKRw0eIRPlZ9j+2SMftqvLo2TqiP34Th8nc6p8PiyLS4w/GrA+cFbrch+VCgQ/DbpnZUfkR9QQVUfzie12P23MTPNdCw8rdr23zHywzA6AK0L4R30D8qznxbqsn++bdEihq715Grb3+xi2R9eh3N1+iirPbvf1s09eacuyrqj35YSMLzbd8ZLOkQzGtZrRzheoqqxdxvHNXO+ZLzONeInk7N961Issoz5zIq9z5peqAndoGf1ZvfrFnbteSprxAtJnkJzVTPUMdxk/+99Bd/YGtBn1QnfG83aoYoD7qhNq2D7rFeXDA9tZpA7bT+put6wTVOnz1bJYVQGI92rHMbCu0y6wHwnnvmnHDyKzWRkA/ETozpAfUcOtDgTV0nzm/yJveOxE8N6849bytT/+9V+rDPjecu/KvMdXAPA3nQji32mmK575HAtqHrgbtkf68cH2KDHBf7a12a3TPkKrn++dx1d9LS2LXauB72r/XrDY28VrIH5le84O9B0ZI+ebGbqPdLkDe/+FgBIAN3Qfz0dADc2Dp9s9oh5AC7wD67reV4P3lrFWDPC9ZefpFchlv4/6NIbGz7Z/aucIG+ZrJ2gd5oD7ybA9WqywnWndWGC7enf7jE7qbq88TsYal8/TdVWAmb06Efhan1UBiLN2ue/s1t6hndB9RIbuazJvucD4PUY2dI+oIbIOBNTSvM4C74DuuJmWcddO+N7yzgTwgCaEv8kwPlcHzKcfv4xOh+1VofajPErGsixLQ9VeIrRKjC+NtGK1+hwz/q0xvtx010tHd2SwHstNIy9W9EtN12Uqv0yV5YWqTC9VbV5xf88rXqx6+79IrXxB5Y6XQe5+yWbmCy4ZXypa4eWfr17YyrjWFrXGUKhhO6+Ya5tRle72bGlX36+qfw9WntRuFiPFeGN58vm4qtWgtNe/94xFQ4QMMUL9kzOYofvo1W7oHp+ZkXvLnlEE8J5VxN8+G3iPBobR4B3AUvBu+N6TlwuZmcGwOoR/1hUoz7T+1nb1j5RRB46zOmVuu7vb1+jUcTKWZVnWazGOHFEX26z7EVUZ+7Ijw/Pcm0bGy+wevTKTB4zdB2fNV88eMYPE/IgaImtZUU/z4hs1A6wfNwOsfdFq818/dqblrBfDi0dfQV+We8lP0L0Ke+yB7iznxQoRz+lU6W4/QWzrxlIPSx2KUpvfblmRYu3dYOwqYewCYexOXi13uX8X43gixrEv1TrQq3a67xybM9Pxp5Q5mzubfcuf259jzMytlghFj5qJqusf6IybAWp0vrec/V3ILB3eCp3Y70bTMD7DROlq17zC+bM6O9w9Sibaj7O7PUpMNTHVYlmW9U07X4S3UowvBa3QIc7Y/bxavX8TjGtUoZO+7bP+OJg70Hd2ugP7XqQ6elzA3s7zrJepAjkvNc3udo+oIbqO2Vqi6rl7xb1cFVjb9Q648/19zm/tuldkeukocyf8s75Bd/X73hGtgO6s559ZD+fhOnBXgu0rpFKnxavsa+i8rxzLOkuscG61KoBSRiC++hhGx2asVAUgvgsg96oaDN+VA4z9reyE7m2/c0bMzOQiKTsiP6KGqDqiaomsp3mtGTcDGL7f/feNZ8kC8C2bG8IDfM8Dz7rSBc92780od85P6dqfidqFqFAva3c729qx1ZMpr4Vlfdau8QCVVGXMRa8qnHfGkSa9Wj1ahnFkyo7PqSoZzrlrZK/RURInjHvJHPUSkZ2ZH1FDVB1RtQDxYxlWvWR11YtWFcfOtIx9Y0ayR3iwjVWpMNrk08galnW2pJX7uxT7r2LWuHxuLaue9s435NWuebyMc6sBznNT4YcDH8M17YDuvWJcJ0N37pzKc91nIPjYfnOZGbm37BkxAO+K4L15xYN3w/f9AH6HskEzKxyuAOMfdRXMs6y/RaPvI2XUumpPm91uWZbFLsZxGVXFOPIF6L8GWEdcrM6osE6Mo2UYx7JUmYPOPI6FOWc0a/TvRWnEDDZn3nJPHDMTUQNTHY+1zNbTvO5iHDkDaI6dAeLX9nXG/rEsDDPQ30Fflmexb9Cd8TmmR6PQneX8WKHqe2lqpNT/kCwd+VrT1imwNmP+acV1ZYYyjDOrgb2AaWXGjmua8e+mwjr1/m2MwENGkGzo3if2HGzKmnmZKtAPz2aOre2nM9sdg7m37Kz56izAO7KO2Vqi6rl74V+vGKnCd2Dt3Hdg7f3lMwzdcU/HAOEBfhB/U3Ug/05R3fFs5/Nw5QF3i0/+4+TWLCTJhH1ZXbcZuawdxlHaeXzV1/KKdoDFtg8fhN2RsWOtKgDxHetUBbr3auRcAHxAfARsMcPwUVA3ekwjWe52/5zZ9tXJnc2OyI+oIaqOqFoe62leXF3vzVMHvgN7u9+BegC+5f6prGeeT6CXkQ9dGUtz8vNj1lgbxmtlRMHr9xm4V1m0GbH/sbLXxyBDO2tEjJ2symJfz9O73EfE2OVeJYMRiO9Ypx3QvVeM52Ikh7U7XAGGu9v9rqxu95FM1dzZ7Mf8zBqi6nisZbae5hXf9d78YqQM34F6AL7l5UH4lp8nNRh/U8+seHOiGHl+/UvVuryY/+gtS12sL3CspIwXjbFr14vvmDX6wrv+ffwix5UZverN2PEiqh3r1LvHyEtUVx/HjoxbDltG1Rz2rJ0vVAXm7jt23+vccsf3zTne2ezoGrLreK5n3if2JYcrXgoZXSPw+4WrK+5kHl+8uurZcfdLOB/PQ/T5uJb/+v+y9WpdMtZnVu/Wl3HNLTnVAu7WnNQ+HNXkD+lxzT4o7Fb1c10V8O8CPiOrsAu678hghNU7MnasFSOAZYTuQJ21GslgvHZ35zAf02jWKEAbBWMZIFoVfjNA7wgQFgneo2oxfJ9TRQB/AoRvNfBC4U8wnv158JN64DzbObFS9P8HSU8ghENfhXsAAAAASUVORK5CYII=" id="imagecb18c62e3b" transform="scale(1 -1) translate(0 -135.12)" x="0" y="-4.66125" width="360" height="135.12"/> - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + - - + + - - + diff --git a/doc/_static/mne_logo_dark.svg b/doc/_static/mne_logo_dark.svg index abf08002f93..1afbae82f9a 100644 --- a/doc/_static/mne_logo_dark.svg +++ b/doc/_static/mne_logo_dark.svg @@ -1,16 +1,16 @@ - + - 2022-04-13T15:28:22.830772 + 2023-11-07T13:26:53.926101 image/svg+xml - Matplotlib v3.5.1, https://matplotlib.org/ + Matplotlib v3.8.0.dev1998+gc5707d9c79, https://matplotlib.org/ @@ -22,843 +22,760 @@ - + - +" clip-path="url(#pa1d6d36807)" style="fill: #808080"/> - + +iVBORw0KGgoAAAANSUhEUgAABdwAAAIzCAYAAAAagNu9AADrmklEQVR4nO397ZLrOLKsDXrXm3Pm/q92bM5YV80PlFpKLX0QQADhHnA322a7u0n3IMiUyEexgv/55/+D/+Kq/rq8Zbz+wn8S0/9U5lrMim0tLau6/sY/2SUM6e/sAh6UtYY712D1Ma48lhW1R9cbWWNUbRE1RdSSXcdsflZ2Ru5opkIee9auNWQ+nirH4gxnrMgYyWH9G6mYo5A1mnfC/chMpmrubHZE/nVa/Fr/kDzrNJ+YZ8GOen6GjDNg821xWGDx4yKrwffnC41lTS2rglThOsAF2IHctTRov+pt0D7mw3HzyVCH4gPQCQ+2CnnsWaw5rMfiDGeoZozksP4dVsxxVlyeM+tnA/OQHeAB7QmQ/VF9wP05LBO8t3wOUKwM34HXFyHL2loWs5Th+k2G7I/ZO7MM2X97RvvxQfbmVefm89QHEaWHPsOC+azTc1iPxRnrMnxd6Wc4x1kseWqZmbmz2RH5VbrZkyH7o8aA+3MBWZCZresd0IfvNxnCW9ZdFcD6TWyAHTBkj81Y6W3QPu5V5wZUEXZnZitlssPvnVnO4QSJznCGasZIDuvfesUchawT8tQy1bMBQ/Y/fWL0bz1zwP1u1pQN3lsNPFC4Cny/yRDeqq5KYB3ghOtA/jobsvd4r6nfoL3Ho0YdmV0/pzz4KTxQs2c5hxNWsq4X43E4gytjJIf1b71ijkLWaJ5SZ7naPV5m7mw2YMj+p0+MXtQTA9zvAU2ZcJmx6x348yRWAPDA+4ucbf0t66Zs2LtSrIAdyF/33Wtj0P7KM16MoJ3p5s+gXeshTOEBfnceM9zZlcW6bqznhjGjyvlwBleGc/bmKGQp5allzuZmZzPMZWd5tmk+ER5fa4kF7vfgu9z1/lpVAfxNBvFWprLh7i4ZsH+rYXeeIftr32i/2DoN2tfUofpQYtDOkcfecch6TKzH4ox1GVWuK2c4hzVHIeuEvJlM1dzZbEP22DqaT1cta4D7o5i63lsdnMCX4UeKHfp0gbKeG4tPDDB3t5jhOsBxTjLWSB2yN393s895GbRH1qD4QFT9IZcdUBi08x6LM9ZlVLmunLE2h/UzazRnZxb7d8toVkaeWmZm7mw2kD8ypsozzd1nuJb1wP0mBvAO6MF3IH/NdunKhcx6zqw4MYDbbLHDdYDnPBmyj/q7m33eq8SNYFgdqg8mBu0ceexZrDmsx+KMdRlVritnrM1h/cxiz1HIOiFPLZMh25A9ro7mE/IM+oO/8c9WiMnUya0A34HXF0322mXp6oXPfD5PFAucZZQCXAd4zmFVwH7PWumtAdmbJydoZ+pmbz7aN8cG7c5jz2LNYT0WZ6zLqHJdMWb479w5zorLU8tUzQUM2dnqaD6/9PPLfDekNHwf16ld8FfV+wejcM4ZxAJdFaUC1m9iOteG7DPe647D3ewjPhEe+Tem2aBf7cFIAXzvzmPPqpTDeiyMx8GYwXgudmQwnouRjJEc1s8S5+hknZA3k6maCxiys9XRfN7q90iZLPDesvFvdr7U4Dvw+iQzrKWKVkKp/T9k8YDSU6UG1wGu6yZr/apA9ubvbvYYL4P2yBoM2p3HnlUph/VYGI+DMYPxXOzIYDwXrBm7cpiPZWcW+/fXaNZMXkamImSfzTZk56qj+VzS6xnumcCZqesd+POEqAB4wBCeRUwg04qVIli/ie26NGSP8NeB7M03rt6qkL355N+gGrTvyXReTNbpOazHwpjBeD4Y12lHBuO5YM0YyWH9vNqZszOr8vdyRt5MpmquITtXHc2nW99fmsrQ9d7yOaTY/f4oQ3jL6pcyWAf44DqQu6aG7Fe9V3i6m/26T/5NaiZoV8xWeQBVOD5mKMKaw3osjBmM58Pr5AyGDOc4iynzNMgOzIH2CpC90nMUrgD3e1geeG/5+DefR8rd7496dyExrbVlrZY6VH+UAftz9t71MGR/9oyvtypoJ7k5lITdmdkqD70Kx8cMYFhzWI+FMYPxfHideDL8N+4c9s750SylPLXM2VzAkL0YZP/X5x+gB7g/7QjAXe/PqgLgbzKIt6qpElS/iRGuA/lrbcje473Kt343e/OK8OC4QVSE3bPZSg+ElfOqARjWnFMzqkBRr9OaDMZrdiRjVw7zsezMUvhOVuosV8ucyQVyR8aoPzNE1XD3idGLevqB+yvD7K73VgOfqgH4mz5dkIznwTpH2ZB3tVjhOsCx9pVGxdxztEA7M2RvfvQ3ZgMeuTVk5qv9U2MF8L07jxmMVMo5NcNwd43/jgzGdWLNGMlh/azambMzS+H7WClvJjMz15A9v4a7T4TH11rmgPurIAb43urgU1UA/yjDeGulGKDuLjHDdYDjXFTrYr/naEH25ssL2tm62ZuPQbvig5LKg6hCHjOEqZRzagYjsPQ6rfF3xtoM5zgrOk8tczZX/eWn2c8LETVE1dF8umqJAe7vCsgEyuzd78AZAP5RVy5y1nNlrRUDvM0UO1wHOM5RxjoZsn/y5YXszY+nA6L5cNwsZt84Kz4sVQbfu/OqQRjWnFMzDJDX+O/I8Dqty3AOf85o1gl5apmAITvD80pUHc1nuJZ44P6o7JEz9zruYga6r05k9trtVs8fBfO5tDgALZsUwPpNLOevKmRffVwqkL15Rnq5m31VHQbt6zMV8tizTs85NYMRjJ54DDsyGNeJNWMkp9Ln4WiOs/Lz1DIBQ3aGZxWmOgDgv6uB+00sXe/AnyeAHdoawr/X6B8T+zlnEgt0VZMSWAe4znNVwN5yDNnvntF+9brZm09uHcr5Sg9qCg/A7FmVcpyxLoMRWFZYpwrHcHKGc/bmjGaN5imNb1HLVJ7JzgC3GWq4+4RB9kf94G/sBZBM8B3QA/CAIfysdsPFiGuKCYhad6mBdYDvWqoM2FuWIfvdM9rP3eyr6nA3+57cqqCdHSSw5jhjXQYjGD3xGHZkMK7TyRnsOTuzFL7zlfKyMk+G7AzPKEx1AF+vh59/w5p2w+bng2SAxooAHnh/wTCs6eliA5xWnxSh+qOYrr/MtTRkv+JryD7mU+fGMQt2Z2ZXfyBlh987s04GWIwZjOejwjr5GJwRneGc8RyFLKW8mcyZXEP2vPyIGu4+87V0Xgu/R8o8HkgGaGbrfgd0AfxNBvGW9V3qUB3gAus3GbBHZqzyNWQf8+EYGdM8tG+klTrLMzIV8thhwskAizGjAmj3MazJ8PW6LmMkp9Jn4WiOs/LzZjJncg3Z8/IjaoiqA5i6Ft7PcDd8fy11AH/Tp4uPab0tK0IVgPqjGOE6YMAem7HS+yzI3vyifOrcQHpszJ5chQdh9qzTc07NYASKPgaODF+v6zKcszdnNGs074RGhZlMQ3bd/Kg6gPnr4KZ/8M+1l6YywfdWAw8QfnVhqEL4m75dpEzrb1nVYPqzDNff5e/OM2T/0zNe7mb/5nHuzbTS2JiMTPYH72qQhBH47cioAhXZ1qnCua5wDDsyWD+jWD8H2XMUsk7Im8mcgaszgB3Iuy+ezY7Ij6ghqo5AwP78X10D7o/Khu+tBs7u95uqdMG/05WLmvG8WHrKhro7xQrWb8o+FxUBe8tZ6W3IPu5TB7JH1JHVVT6TrdSNpZDHnsWa44x1GT6GeP8dGRXOw8kZIznsQJp13WayTsibyTRkz8mPqCGqjoWQ/VH9wP1RbPC91cEHeit2wX+Tobz1TtngNlvsYB3gOEcZ66Texd78DdnnvDhuJJuP9k21Wjf7TK7Kw2lFqGCIdV4Gm/+ODDb/HRkVjuHkDOfoZCnlzWRmQXbl+3GG/IgagG2A/VlzwP1RDPAd0ADwwPsLrzqIf1TvHw7ruTxZDGCWVQpgHeA5h1UBe8tZ6b3mGAzZR3wiPPKBv2I3+0y2Uqbhd70c1mNhPA62Y6hwHnyenRGdw/pZO5rjrPy8mcwTIXs2YGepAYiB7JP/oiEOuD+KBb4DOgD+phO74a9q9g+P/dzvFgtoVZYKVL+J6ZwbsM/4nwnZm1+UjyF7ZA3uZj87jxmWsOacmsEI/Nj8d2So+1fJYPx7GMlwznjOaNZonsI9xWymIfve7Ij8iBqAtC72t/p7FXB/CvmfGMCxGoAH3l/ADOupJCbYaOlIDarfxHa9VwbsLWu1vyF7jJdBe2QNBu018tizWHNYj4XxOE6EoyeeBzb/HRmM1+pIxkgO62fgzhyFrNE8pX8BqAjZsyF3dn5EDQBFF/v/9GJN1gP3TwUwAGNFAH+TQbxlxUgVqt9kuP6YXaOLvWUYssd4GbJH1qD4cKH00Fj1gZ8ZmDBCplMzfAx6/jsyKhzDyRnO0clSyjNkPy8foAfsz/r530FngGa27nfg9UWgBOGBzyeeZZ0ta6fUgfqj2OA6cAZgb1mr/dcdiyH7qE+Msm9ws2s4pZt9dyZ7lnPOBMg7Mtj8d2So++/IqHAMJ2dUzFHIUsozZNfJjqqBaUzMwHrcO9wfFyIbvrcaeKTcBf+sbxcJ07pb1hVVgumPYgTrwDlwveXtyDBkj/OqBdmbR24NmfnuZo/NY8+qlHNqBiP0O81/Rwab/46MCudhJGNXDvOx7MxSuJfIyDsNsp9873+TWBf7J70eKcMAmJUAPKAN4R915YJiOhdWbVWF6Y9iBetA/vobsPd6r/I1ZL/uY8jubvY1mc7am+OMdRk+Bj3/HRls/jsyTv78c45OVkaeIfu+bIb8QoD9wecf4OoM9+zu91bDb7FB33cXWRUQ/6irFyHbObLylQ1ws8QM1W9iODcVAXvL0YLsK+plhOzNK8IjH7JH1KH4T1bdzR6bVw1ksOYwZhjAxvvvyDjNf0dGhWMYyRjJYf2MZc9RyMrIy4Dsive+2dkR+QAPZF/cdNX/0lQG+N7q+C1WuHsSiH9W78XLeg6t12IAtExSgOo3MZy7jPUyYP/ka8je58Fxg6d8w63SWZ6Vyf4wzgwzqoAmxgwfQ33/HRnq/s5Ym1ExRyErI28UurqLXSf7pkMA+7P6gfunECYAD3AD3JNB/DtFXPzM55xFDLBVVUpQHeA61wbsM/6rfHlHxTS/epC9+eTWoXrD72722DzmB3/m42E8FkZgxnYMFc6D/fMzKlxHIxkjOZU+x3dnKdxDzOQZsq/Pnc2OyD8UsD9rDrh/KyIbHqt0wT/q04nMXk8FMQFGS09qQP0mtus+ax0N2L/5cnexNz8OqH334agnu6P+pJExM7lVu96YIQMjlKuSwQjl7B/rvyND3X9HBuPf2kjGrhxD9rmsjDwlyK7aSc7wrDEL2QsA9mfFAvdnsQN4QAPC32QYb1lzUgXqN7GBdaA+XG9ZmoC9eXNDdrYu9uZjyB6Rr9ZNpPLQyvzwv+uYWI/n1Aw2/x0Z9tfy35HBeJ2yZozkMH8njWZVvX+4aTdkV7vvnM2dzY7IN2B/9vhDa4H7nwVwAXhAH8Lf9O0CYVhry1opdZj+KEawDuSusQH7VW9uwN78DNlX1qHaXWPIHpvHnsWac2oGIzCzv5b/jgw2/x0ZjOdhJMM54zkKWYBWF3vbdz/Yn8nNzgYM2P/0+Kqf/22UAZkZATzwfuEUQfxNVy4olvW3rEdVAunPMlh/l787b/3xGrIbsn/20O1wUYPsM7lVH5SZs1jByakZbP47Muxf239HBuPfMmuGc3SyAC3IrnbPyZDNMIed6blvoJZ7h/vjzllgmRXA31SlG/6drl6EbOfF0lM20N0pVqh+U/a5qAjXW85Kb37A3jwN2T97nAnZZ7KrQ/bRPPYs1pxTMxjBnP1r++/IYPPfkcF4HkYynKOTBXhUDHPubDZQp4ud4V8N491ImWdjFgAP8MHeit3w39R78bKdMytW2cCWQexQ/SaGc2XAPuK95hhO6GJvXobsDPkeGcORxwwbWHNOzVD335Fh/9r+OzIYQT5rRsUchSzgDMjuLvYxGbC/8vlfLddmuLMAeODPhWSFuZ9OVmUY/0ozFz/r+a0kBgirIBWoDvCc04w1M2D/5LvC05D9u4chu0quAmhnz2LNOTWjAmC0v/13Z7D5s2aM5LB+R+zMUcgCdCC72r1mZi5gwB5dR/N5W8vYS1OZATzAD2lP7Iof1Q54xni9sADTk6QE1G9iuk6y1q8CYG/+huzzXobsUTWc9ACiANlH85iBA2vOqRls/jsy7G//nf7OWJvBnqOQ5Xns6zJnciOysyF79nNNZB3N53ItY8D9z8DfygbHihAe+H4BZK9rVTFBS2udFIH6TWzXaHW43rJW+58J2JtfpJche1QNmZ301R+4FKB+JbjhjHUZ9rd/Jf8dGRX+jkcyRnIqfQ+pZKl0sbd9dTIzcwED9sg6ms9wLTHA/VlsAB7QhfCP8pgay3otZZj+KDawDuSurQH7Ve8Vnu5iv+aTfzNoyM6byf6AfnrOqRk+htr+jGDU/s6IzGH+HlLIMmRfk5mZC8xBdoYxMQzPVHefsOfNNcD9RdAvscDhdwupBuKB6xcXy9pb1hVVAemPYoTqQP5aV4LrLeNcwN78Ir0M2aNrOOlBxJA9L6sKEGLMYIRa9rf/yf47Mhg/i0YyRnJYv4N2Z43em6hAdqWmipnM2VxAv4ud4Xkqqo7m81J7gPuzXhXDBIIrgfhnGcxbmcqGurvECtVvyj4Pu9fHgP2VZ3y9huxr6zBk581UgPrMMKAKrGHMsL/9T/bfkXHiMYxkjOQwfwftzDJkj82byczMNWDneJ66+1zSzx+BWVCZtQv+UZVB/LN6L2bG82WtVTa0zRY7VAc4zlFFuN5ytAB7863fxd68Ijxq3BSe9jCi8LDHnnV6DmOGj8H+O/0ZoSjT+uzw35HB+rnN+t0wmrM7awTKqgB2tczZXAN2jmepu8+Q/uxwZwXwAC/U/XQSK8L4V4r4Y2A9v9XEAGEVpADUb2I5pxlrpt693vxX+fIC9uZXq4u9eeTXoQa7s3KrjoxhBwKMUOjUDPvb3/48/jsyWL+HWL9/duaMZrmLvUbmTZlz2A3YY+toPv98HynDAuCB9wfODGoN468rCxruvn5Y4OjJUoLpNzFdN1nrV6F7vfmv8j1jTEzzivLJvynLHldjyM6Vx57FCI5HMkZyGDPsb3/76/rvyDj585Q9ZzRrJ2RX6ihXywQM2AsC9uf/qn+G+6sDygbHSt3wj7pycWSv7QliApnWvBRh+k2M16LhekTGKl8D9n4fjq56Q/Z9uYbs81msOadm+Bhy/ZVrt3++f5UM1u+FijlA7VExSoB9NvdkwM4C15tPhMelWmJemqoC4QENEP8oQ3nL0oboz2KE6kDuGu9cE9Xu9ebNDdibX03IznCTaMi+PlMhjxkKsOYwZlQAaPaP81eu/UT/HRkVjmEkwzlNlbvYTxoTY8CeW0NUHc1nqJYY4P5KTKNoHvVpwdVg/E1XTz7LObAsoBZEfxYrVAfOAestT7d7vXkbsI/5yHU/LKvBkL1GHjMUYM05NcP+9t/hfaL/jowKxzCSMZLD+t0zmgO4iz0yLyvTgD23BqY6AOC/K4H7s94VzQSBq3TFv1PvhcN0bixuVYbnr8QM1G/KPieG6yP+ZwH25hflU+cGLQt0Z2ZXf/BjBgO7slhBB+NxsB2D/e0/6s9W+44MNv+TM9hzAH7IrnKflZU5A9iBOciuDtizn5l++8zX8uZa+PlfkVlQmXEczbMqdsVf0cyFx3YOrc/KhrNsUgDqAM95qwjXW85K7zXHYMDe6xPhoX3DasjOk8eexQi3qmSw+a/OUF8f++d478pQ9z85gz3Ho2Ly82YygbwuduXnhYj8iBqi6gAuXwf3Dvfn4jNBsgKEv+nbSa8M5D9pFUxivQ5WiwWsVpAKTH8Uy/nPWLsKcL35awD25sk3JqZ5cdyoNR/tm1ZDdp489ixGOD2SMZJzIkSzf463/e0/639yBnuOu9hj8rIyDdhz8iNqiKpj4hp4P1Lm1cGxQXiAH8AayMeKBTxanFKE6TcxXdtZ67hzDQzYHz3rAvbmE+GRf9OY8YCSma3yAMg8xqVaDmMG4w8S9s/zV67d/nr+OzJYvw9GciqOiql8z5ORBxiwq+ZH1HDT7Kigm/7BP30z3Jm64G9SBfE3XbmwGNbZsjKlDNIfxQTVbzJcj8o4E7A3v0ivfLB999G+cTRkr5FXCXbsyjkRctm/rr9y7faP99+Rwfp9wPqdA9QdFaNyfzWTacCumR9RAxAK11/913MvTWXrgn+UOoh/1NWLkWXtLeuKqkD0RzEC9Zsy13v3uqh2rzfvFZ61AXvzifDIh/2njYqZyVV5CGQGBJVyGDPY/HdkGCTH+CvXbv94/x0ZjJ+hIxk7czwqRjMP2PsDyaOy7tNnsxnyI2oAlgP2Z80B91dihvDA55OkCOMf1XsRM50XS1sV4fk7Gap/VjW43jIM2OO8uMbXGLIbsrPkMWexAg/GDMbzoeyvXDubv3LtJ/rvyGD8DB3J2JlTcVSMyr3VTKZiF3s24FbPvykCsE9cA/HA/ZXeLTYb8K0M419p9I+A7bxZMWKAtdlihumPYjhXGWtluP7O14C9z0e/Q8OQvUYee1YVsMJ4HGzHcJK/cu29/sq1n+i/I4Px820kYySHGbAD+9ZtNEspz4B9b/ZsflQNm7vXv+pv4Ad/4580mMzeDf+obxdARSD/TtHQiPWcs4oBtipKBabfxHKeq4L1lrP22E4E7M2PZ0xM89G/iczMN2SPzWPPOhWsMK6V/eP8lWtf7a9c+4n+OzJO/75xF/tcVkbeaYBd/bkkogYgvXv9l96syc+//+OfIUwQHuCHsgby42IBi5au1GD6TUzXftYaGq5/842v24D9ikd+HYpdMhkPDwoPg+xZjAC8Sob9dfyVa+/1V67d/s5YkcMM2Xfe5yjcU92UMYfdgD0nH5CA66/0fqQME4QHPh8UO4wHrl9kBvOW1aQK0R/FBNRvylzXKnC9ZazyPQewN68IDw7gr3wzq/YAofBAyJ7F2lnICHDYYJr94/yVa+/1V67d/hwZlb5rmAF7289ZzzJg35PNkC8K11+pb4Y7G4S/SR3GP6rn4mRYe8u6qgoA/VmMQB3IX+ud66IM15u3AfuYj7vYZ/OVRsXsznTWvoyRHMYM++v4K9fe669cu/3z/VkzRnOYIbsC9FYZE3MaYFd+FrmJBbAveE6df2nquwVmgcGVYPyzRi5ulvNiaSob5O4WK1C/Kft87F4fw/VXntF+fHC9eXHcRCnf1Ko9RKg8EDI/WFfKYfyxgO0Y7J/jvdpfufbV/v5ciPevlFENsCtkzeQZsK/Pnc2OyC8M119pHrj3BLPB3m+LrA7kX2n2D4TtHFrXlQ1n2cQO0wGec1YRrLecld5rjsGAvcejRh2KN/QK0Ht3XiX4vSvHMM3+u7zZ/F27/XdmMH52jmQA9SC7AfufygDsquNhsuE6MA/YSUbD/OvRXcs64P5KnwpkBLlXTkpFKP9JO0AY47WwUixQtYoUQPqjWM5/1rpVgOvNn797vXkasH/30e0cycpWGRUzmseexZpzaob94/xde46/cu0n+u/IYPyhADBgV8kCDNhX585mR+RX6l4Pemb++VVMJjxmH03zToby8WIBkBaP1CD6o9iu5+pgvWWt9tfoXm+edQF784nw0L65dRe7fh7zg3wFULQjw/5x/q49x1+5dvvH+7NmAP1QjxmwV84yYF+bm51tuP7K5w/9fNsgHRardcW/Us9FkL3elrVDygD9WWxAHchd30pgvWWcCdebX6RXHcAeUYe72NdmKjyIGrJzwagdGfbP8V7tz1Q707rYX89/R4a72GveawD7Abtaw0lm7mw2wDEahuU5sPlc0veRMu+MGMBwBRj/rN6LiOE8WGerEjx/JUagftMpYL3l7chYd0wG7KM+ER75N3iqHSyG7HFZzB3zrMCA8TjYjkHZ37XH+DOtS6+/cu07/HdkMB4DUKuL3YD9t9zBzp0LcHSvszwDNp9hjc9wZwbxwPfFVQXyzxo9+SznyeJQdWj+Tsww/absc5OxRobrrzzj6zVgX1eH8pgapYeZauB7NGcki/V4GI+D7RiY/F17jr9rt/+oP2sGaxc7O/jefc+26zzdpHRPqpoLuHs9uo7m8w+w4qWp7CD+plOA/DtFXUhs5/UUZUNYBSnAdIDnXFYF6y1HC643X97u9ebHA9ibj34XxUk3+Apd7KN5zA/ZjHC6SsZJ/q49x5+pdqZ1sX+8/64Md7Fzw3xAZ0zM7vvDmczZ3Nlsd6/H1tF83tYSD9zfF/FarMD2ygmsDuWviAUW9ujxmlOs/2SpQPRnMV1nlcF6yzJcb56RXnH1sQD25pFbg7vY12Yasu/LOTXjJH/XHuPPtC69/sq12z/efySjUhd7xXsMwIB9RWZmLpDfvX4QXH+lH/yNf1LB8acDZ4XxN11dbIN5LjHBT0sXoj+K7ZrKXNMKXev3jFW+Buz9Phw3Wu5i35Or0PXFCr935TBmMI5WUPZ37TH+Xpccf+Xad2W4i71OzmiWAXt8ZmYuoA/Ymf4VdUAtPx+NskGxMox/VO+Jyl53y5pRBYD+LDagDuSvc5Wu9ea/0psbrjc/A/ZVdSh2sc/kqjxIsT8Asz7UM2YYsud5r/Znqp1pXXr9Xbv9d2ac3sXOfn9hwB6fmZmrDtebR41nvrvPL30eKfMqlAUGf1tUJSD/rJGTzXJerBrKhrk7xQjTb8o+D7vXRrlrvXnzw/XmacC+qo7TAPtMblXIztwxf2qG/XX8mWr3usT4K9du/2uq0sVe8f7CgL1G5k0zgN1wPbaO5vNV/TPcPxXHBH0rA/lXirhomM6fNaZsQMskZpD+KIZzVhGst5yV3muO4YTu9eZV44bLgH19pkIe88M2K6Rgg0b2j/Nnqt3rEuPv2nX8d2Ts6GKv9B25M8eAPTZvJnM2V717vcqz3t1nSLEvTVWB8cD1BasG5j9pN/hjuyaixQBSq0kFoj+K5TrIWLsKYL35nwfXm1+UT+qNzpOH9s3nKZ087F1mO7MYYUuVDPvneK/2V/Ve7c9UO9O62P+aGLvYK30Pj+YYsMfmzWTO5ip3r1eC68HPv7HA/ULgSzGDV4P5dWIBkVaeFAH6o9iu4az13LkOhuuPnnW715tPhIf2Dai72HnymB+4WXPcKZ/r79pj/E9ZF6Y17/VXrn2H/0iGATtnDrBvTj6w97iUGjxmMgHDdYZnvKg6ms/bWn7wN/Jh8bfFYgbyN/WerOw1t6xoqcPzZ7HB9Jsy19lg/ar3Ck93r1/z0b4JPQWwj2YqAP1KD/eMGfbX8Weq/ZR18Zrbf9S/ypiYSt/BwN4OdoXmh5m8mczZ3CzAnv1cw1JDRB3No7uWn6/hDGC4ApB/1swJZzgnVk1Vg+bvxArTgfxzsHttdhyv4Xqkl/RNT3gdpwH2mVyFBznmh27WHMYM++d4r/Znql3Vu9efac17/ZVr3+EPrO9iP/l7azTHgD0/byYTOLt7PTs/ooa7T8gz7/eRMu8KZoK+VxZDEcq/U8RFxHT+rDllA1oWMUP0RzGcr4y1Mlh/51sfrjevCI/8mzAD9vWZCnmsEGEkhzGDEbgw+bv2GH9V717/U9bc/t916pgY1u9fwICdIW8m03A9Lz+ihrvPfC1vroXxGe7sXfHPurqIlcD8J2VAP8brIlIMILWaVCD6o1iug6pQveWsPbYT4Xrz4+lebz75N2LKN6NK/1R318PwzqxKOYwZ9s/zZ6pd1bvXX9W7159pzSv4M46JYfw+GckYzakK2Kvfc9506miY7PyIGqLqALqugzUvTf22GMzgtecknALno8QCIq08KQL0R7Fdw1nruXMdDNcfPSO9uLrXm0/+zZjyDekJDzu7Hoh3ZjF2mFfJOMnfte/37vVX9e71Z1rzXn/l2m9iGxNz8vci4BecZufNZBqu5+RH1BBVx8w18K/WAPdvUgbyj+o9iQb0ViWpw/NnscF0IH+NDdavehuu9/vUuBlTBOwzuSoPWKwP+NVyGMEL2zGoQtNef1XvXn9V717/U9a8gj8bYG/7nJkB8HewK9yPZeQBmoBd+TkmqoaoOgLgOoA/roWfP4pjgMJVgPyzRi8EhnNi1VM2zN0lRpD+qOzzsHt9dhyvUtd68432M2BfVYcBO2cmexZrDmOG/fP8mWq397y/qvdqf+XagRpz2Ct9Jxqwa+YZruvlR9Rw0yK4/kp/dri/Owgm6HvlZKlC+VeKurCYzqE1pmw4yyR2kH4TwznLWCtlsN68DdfHfWJqYrgxOw2wz+QqPGSxPuQz5zBm2D/Hu9df1bvX397z/kznk9GfrYudFX4zd7FXBOxKzRynAXbD9aaNYP2dro+U+XbAbDD36kmuBOa/KQP8sV0XEWIAqFWlAtFvYroWqkL1lrPa/zy43vyifAzYI/LVbsoVAPtoHvODPiO0YMxQ9meq/ZR1sfe8/ynXyg5/NsDe9jkzAzBgz8iayQP2/suDm1S71xn+FXIRuP5L/65J3Az3T4vEDF17Lo6T4HyUmICktV9qAP1ZbNdv1nruXAfD9ZsfJ1xvXvk3Zix1nATYZ3LZH+x2ZbGCfMYM+8f5M9Vu73l/Ve9ef6Y13+HPBtkrfI+MZAD1ALtCo8NMXkb3uuF6bg0RcD0YrL/TnpemqnXHv1PvxWVAb1WROjh/FhtIvylznStB9ZahAdbvvnH1ssH15hPhoX2TmJWtAthH89izWHMYfyxgOwYmf6ba7T3vb+95f6bzOeKvDth3ZBiw898XZeSd1L2eDbez8wEpuP5Ke4D7N105ESpQ/lGzF6iBvRWharD8k1hB+k3Z52L3+iiD9ea9wrN253rziVH2TeJsDQbsXHms4HtXDmMGG+Bhg0eq4FTVu9ff3nu9V/sbsJ+TYcCukQWc1b2eDbez88XB+huff34+mjEB36snUBHMv1M07GE6n9ZrZQNZRrFD9EcxnL+M9VIH681/hScvXG9+PDcl6t3rs/knjIjZnceeVQVcKAM2Nn+m2lW9e/3tvde715/pWhnx7wFIFeawM36HAAbsozmjWTN5u7vXFe/ds7Nn8wEeuL6hSexzh/u3AhgB7olg/qoYYOA3rbimFI77JCkB9EcxXUdVoXrL0QPrzddwvc8n/ybJgH19pkJepQdkxgz75/mrevf629veLP7qXeynAnZmuL4zqzpcB/Yf40ymevZNs3CdsGu9Z/O5kTKKQP6mkYvnREi/W0xQ0/ouVXj+KMZrLmtdd66FwfqjZ7Sf9I3JG4/8OgzYa+QxP7gywgtD/Fh/ptrtbe8V/qdc44ABO0tGJcDOfj80mgW4e505dzYbOKprvUdrZ7hfOVhmKP+s0UU3qLdYVQGYvxMjSAfy17wSVG8ZK70N1/t9OOpRnmNowB6bx/yQzAov1CGPsj9T7fa2d7Z3r78Be67/jgyPh9l772W4Xi93Nhvg6FoXeXbNf2nq1YVSAvPPijqJBvcWkA9sM8UK0R+VfX52r9Gu4zVYj6sxsjZ3r2tmG7DnZLE+jDMeB9MxMNV+yrrY294r/A3Yc/1HMgzY+e+FPBqGN3c2u1LXelJT2M/HYCbA23NgynD+k3aBLabzrqJsyKoqBYB+E9M5zlg39W715r/mGAzXezzy68juClF6SDBgn89iBBiMGcr+rp3bW3W9VSE403r3+huwx/qPZOwA7MzQ293rf0rpvlk1F6jTtc7yr63/1beXpn4ulhXM9i5OVUA/KiawaOlICZ4/i+2az1rLCt3qzV8DrDdPw3X2OhRvnA3Y87IYu793ZNg/zl/Vu9efpW571/Hu9Tdgj/UfyTBg574PMlyvlwsYrkfXAQD/fV3L7EtTvx8gK5R/1MgiG9JbFaUMzV+JDaQDuWu8cz12HOepYL35RXrxdAKc2r0+m10ZsI/mVXpYZsxQ75JnWx+W2u19pvcp1zfQB5oM2OMzTn/BKXMOsG+ED5Bzz6x2nz6bC8zBdYP1P/UGrL/T+hnuVxdHAcw/avaEGdhbkaoGyj+JEaI/Kvtc7F4fZbDevFd4Gq5f88i/eVKcqaj0gGLAvi+HMeMkf6ba7T3nrXqd2Pu1VgJepr97Rn+gDmBnvl8YzarevX4aXM/uWq/ybHhTJ1h/p/yXpt5UFcy/0ypIZZDPq2wQyyp2gP4ohnOYsV7qUL35r/CMr9lwfV0dp3Wvz+QqPBSxgu9dOYwZ9s/x7vU/wVv1XNp73tuAPc8f4ATsrN/jO3MAje51w/VrMlyPqyMIrL9z+flYJCO87TkxVeB8jxiAoHW2lAD6TUx/N1nrt2sNFMF68+XtWm9+PHC9+eTfRBmwr8+sCNhHsliPh/E4WIBsr/dqf6baWbxV15vFm+U89nr3+jPNYWdalxH/kQwDds6cnd3ru+8h1TJnc9XhOsMzIbAcrL/Tt5emfhcjlL9p5OSeCOkt65UUwfkrMcF0IHddd66FwfqjZ7Sf4Xp0HaqjaQzY87IYAQBjxkn+9rZ3tDcLvGfyZgLsbXuOdRnxH8lgW/+2D99330jGaA7g7vXIvJnM2dxMuJ79LBZVQxJYf6f5kTLqUP5ZMxeJYb3FpCrA/JXYIPqjste9ElRvGat819TOOBKmeUX56MP1iBrUbuIVHo7Ysxi7v3dk2D/O39683iyA3d6v5Redxnj3+gM1AHul+wSgdve64fo1uWu9KQKuRz1tv1mTPTPcry6mEph/pSg4YnB/prJBbbaYIfpNDOdo9zrtOuaVx3VS13rzivLh6KTPvqnLgvtK/9TWgH1fDiNoYDoG136uNxPY9A8Dc94G7Hn+Bux8Oe5ez8+byQTOhussYB2Igeud68Hz0lSg72Sow/lPyoB6hvxNDEC1ghTg+bNYzn3G2lWA6s2fH6w3T8P1zx66N5ZZN/IKDyzsWaw5jKCB7RiYQCGLd68/S90s3ixrrerNBHmZPk8Y/Q3YeTIAd68z5M1kArpwPbvBCZAG6+/0c8mIEcaOnMzKkH5WLLDR4pIiOH8U43WdtaaG6le8o/1iazVcj68hM1/pAcKAfV8OY8ZJ/ky123vOm+U6YVkPJm8D9hj/1XAd4PsXBKwZozns3euG6++lPG89+/kL4BkHE/Vc/OZ6uNbhrgrlnzVzYRjWW4pSB+bvxAjSgdz13rkmO45z1fGwd6w3v0ivOnA9og53r9fIY354ZgQBykCJzZ+pdhZv1fX2Wu/zNmDf7w2s714H+NZ/JGNXDnv3euX7xtnMU7vWDdb/VOe1EDdSpucAFOD8syIBiOG99U5VAfk3sQL0mxjOy+41UobqzXuVr+H6NY9z4fpMtkr3+mgeexYj/N6RYf84/xO8VQGk13qf9ymAne2zyuNheDKAfd3r7PdWo1kzeTOZWXBdvWu9Elif/dcLD8qZ4V4dzn/TTnhnuD8uBsiqKHZ4/iimc5yxbupQvfmv8jVYv+6jDddn85W613dnsmexPqQzASX723ulNwtQ9nrMeasCdqbPkhF/NsDO+H23K8NwfS4rIw/QhOsG601MYP1v/N93/9PPHwvOBmhHFrIipB8VE1C0dKQEzV+J7brPWs9d62Co/ugZ7We4Hl2DGlyfyVV4WGLOMmC3v71jvVXhqdf6twzYc/zVAXul721mwG64/lo7R/rcpPyvdRnAetIYmLf6ANbf6c8O9ysnhg3KP2v2xBjYW+pSB+avxAbRb8pc651rsuM4DdajvCI8OEB/dheHSid5RiZ7VqUcNujTm8FWP0vtTOvCUjcLPPV6/JYB+7z/aXC9UkY1uK6QNZPnrvU+Gaz/1gBUf6t/8H/HRsr0nFB2OP9KkeDD8N76popw/IpYAfpNDOdl9xopQ/XmbbA+5pPftR5RhyJcn8mt3I3ECr5Hchgz7J/j3evP4s2y3l6Pcd9ebwP2vd6APmBn/K4byTBc3581k5cB11XBOjAP1xnAemK3+kv989ln/Qz33otCEdB/0k5oZ7g/LwbIqih2eP4olnOcsWa7jl0NqjffaD8+sN68DNdn89VG0lR9QDNgt3+G/wneJwBlr8dvsQD2U/7WgfMA+8nd6xUbFkazZvLURsIoj4OpAtY3QfVPynlp6ieNXFzVIP2oWECipSklaP4stms/ay13rsPqYzwRrDe/KJ+Yuk6H67P5Kg8x7A+DO7MM2OMz2OpnqZ3FmwUor/SuXnOvdw/IWAkXT/lbB3jWvG3P9R2xK6Na97rh+p86Da4brN/qSAfrv/RQz8+lRWLvnJ59GDewt9SlDMvfiQ2i35S51rvXRBWqN+8VnnW71ptPhIfh+s5chQcn5izWB1zG42ACV6q1n+B9AqxWrJkF9rJcH73evf7uXufIWH0eAG64rpAF7Ifravf6N83AdYP131oA1t/pWod7zwKzw/lXigIfBvfWVVUE5N/ECtAflX1eqgH1lqEF1ZsvZ9d686oD1yPqOGk0zEwuexcUK/jelcMGT1ZnMK0P09qz1M1yLquvBwvwNWCf9zdgj/UfyWDtXq94LzWaBRiuX5F61zoLWN8I1T8pfqRM7wlSBPTvtBPWGe7HKhu0qkkBnj+K5fxmrJs6VG/+q3wN1q/7aMP12Xx3r8fmsYLvXTkVugdVwViv/wnehtXj3izn8IQZ7EzeBuy5/oC719nv2YAz4PrJXesG65d88me4j5zoSpB+VCwA0dKWGjh/FNvfQNZa7lwHd6vfPKP9DNeja1CD6zO5Vf+ZcaUHXQP2OO/V/id4G1bv8V3pbcC+15tlvdv2XJ+vOzJYX2zKes+RkaUC10/sWjdYj62j0+vn5YXD3j09c9EY1ltVpAzL34kNot+UudZVgPo9Y5XvOWC9eUV4cIB+w/X1mQp5rA+7p2awQNle715/e895u+Y93izAl2U9VnuzrHfbnueze4c/4O515hzAcP2bTu5arwbWA3xed7j3XGDscP5ZkcDD8N7qUUVA/k2sAP0mhnOye42UoXrzNlgf88nvWo+oIxPuG7BrZp0Iv3dkGLCf6e2a93izAF+W9VjtzTLzvm1/lr+713lzgPpw3WB9NL8OWI/sev/vn17zI2V6L1I1QP9JGbDOkH9eDJBVTezg/Fks5zhj3XYduxpUb77RfnxgvXkZrs/mG67H5lV7CGUDHKsz2AANS+0neLvmPd4swJflh7SV3kzz15U/W0czGLvXK91vjOYAhuuflAXX1cE6C1SP9HkB1T9p/wz30Qu9EqifEQtItPSkBs2fxXbtZ61nBaDe/DWgevOsC9abT34dhuucmexZbGB6V4b9z/ZmAZ2K68Gyzgbs+7xZ/rVA2173s3XEv0r3eqV7mpt2nJublEbCGKyPiQWsJ0H1T/r538ll75yOgBSG9pai1EH5O7EB9Jsy13vnmuw4zpXHY7A+4hPhYbi+M9fd6zVz1CGKKnjr9WfxZllvxfVgWedVgN3Xxp9i+TGjbc9xLnf4A1z/euC+D1/GzhygNlzP6Fo3WM+tIcojEKp/Wpd7h3vvyWcH9K+0AtAY4ltAXSh+Razg/FEM5ydjnQzVX3nG1lsVrDcf3ZEws/mG67F5zA+jbJBjRwaTP1PtLHWzeLvmMV+AA7Cz/K2s9GYCvEyfqzv8mdb+vg/fPcBIxmiOR8K8lmLXujpYZ4HqQBxYH1yT8ZEyIxeQIqT/JgaQd9Mp8J9pzStJAZw/iuk6qArTW87aY1t1HCeA9eYV4aHdtT6brwTXRzMN1/flMD68s4DT1f4neLOATtd8F8vIEsXrudebZa3b9hzeO/xPnbvOep8B7IXrBuufZbA+J6Zu9YgOfuDXMe2d4T5zMVaE9dFiApDWXqnB8ndiu4Yz17UKUG8Zq3zjazdY56/DcH1NJntWlYdrNpDC5M9UO0vdLN4Mviu9GcbDtDp6ts1fi9Xeimvd693rf+JoGMbv5pGM0RzD9T+lNg4mE6wbqv/WAqj+ST//u8jZu6MjwYXhvcWgKpD8ndjg+aOy13732qh2qTdvbqje/LjAevPJh+vKI2mqw/XRPOYHUsbu9R0Zyv72nvNmgXoMviu9GcbDtO29zo9SXOte7x3+J8L1kRxmuL5zJIwKWAfG4brBep6HKFT/pMcZ7n1iB/SftBo+GehrKxvGsokZnD+K5bxVg+n3nJXe/FC9eRqsr6pDEa7P5FaF6zuzGB/iGTNYwNtqfxZvFrCnuB4MvgAHYK9+/gB3r+/2BvjgetuHZ/1HM0ZzDNd/S6lrfQasq4+BYelWJ4LqAN4e08wM9zEpg/qrYgF/lnWTCjR/FtPfUtYaVgDqzX/NcTB3qze/KJ8aYH22DsVxNApwfTSPFa6P5DBmnOTPVDtL3a55vS+gB9hZYLLiOrftObx3+LMB9grfyyMZwD64zn4PCJzVta7csc7QrS4C1T9p7wx3YP6B/wRgb1k3qYLyd2IC6DdlrvHO9dhxnCpQvXnydas3r5i6GOC68kgaBdi9O48569QMZX97z3m75jFfxa5qRcCuuM693r3+huux/qwZgOH6o3Z3rRus79+fBawnQvW3+hv/9+d/F6XKGJRo8GKAb0WqGiD/JEZ4/qjsc7F7fZSBevNe4cnZrd686oD1iDqyRsLMZKsAffYs1hzGB3mmY2CqncWbZb0ZfFd6M4BfxbVQ7F5vtfRsq+nd688G19s+POuzM4MZrlcfCaM2DiYTrDOMgSkM1T/9z48z3PsuWBVA/007oZzh/l5lA1cVsYPzR7Gc04w123XsalC9+RqsX/cxXN+d6+71+SzD79r+9p7zZvBd6a0G2KtfFwDHOrft63sDfICd7ftmV4bhepMKWAfG4fqpYL1St/pGqP5JMzPcxy78KqB+RCyw0KolJWD+Smx/F1nrWQGoN/9VvvF1M4L15hXhoQ3WM/Orw/XRPFa4PpLDmHGS/wneLHCPwXelNwP4rX5drIS9itdcr3ev/2lwfUeG4fp4lgpcV+xaPx2ss3WrR9XztC4ZM9znIcDJ0N6qI3VQ/k5sAP2mzPXeuSY7jvNUqN78uMB68zFcz+iaN1zPyTo1g8mfqXYWb5b1VvPt9a4M2FmuZcXxMMqfSacBdsbv42pwXWEkzEld68pgvVq3+iKo/kk/+Bt6o05WASWDfOtZVaH4FbGC80cxnJ/d66QM1Jv3mvoN1q965NeR0UGumOvRMPtyKgADFrDX63+CN8u5VPNV7KxW8wU4fsRo29f3NlyP9R/JMFxvGoHrKl3rBuv9YuhWF4bqn9Q63Ef/UNVA/TcxwLtHnfQDANvaV5ECNH8Wy7WQtXa7jt9QPb5Og/XYGtTg+u5Mw/WxHMYMZX+m2lnqds1jvtXBL4Ov4o8YK71Xf5ay/GuBtj2X/44Mw3WNrvWMcTAG62NiAetkUP1TPXMjZWYfpqsB+2ixgEcrR4qw/FmM13DmulaA6fcMDajePPm61ZtXDbAeUYfh+ppM9qwKD/Q7MliAYa93rz+LN8t6M/iu9GYA7F7ju6r/iLHam6l7nek7ZlcGK1zfeR/mrvU/pQjWDdXj6ogC6sBwPftnuD8qGqwY4FtZqgDHv4kRnt+Uvf6710YZqDfvFZ51u9WbT4yUu9Zn81Xg+s7O9dE8d6/b/1RvFsCn5qvYWa3mW32NmbyZ4HrbnmNddmXsgOvM90aVu9YN1q8rG6xXg+pRXfNPXj9//BEpjzHZDd0M+DmVDV+ZxQzNn8VwHjPWa9dxqwH15ssJ1ZuXwTpLfka3/G6Yb7h+bgYTWFEE1Su9XfNdDJ3VDOuw0pthjdv2tdcZ8GiYTH/Dde6udaU564pg3VC9iQ2qd/j82eE++5CsDOx7xQAErXOkBMufxfa3krWWO9dBEag37/pQvXlF+eSC9dkaFEfRGK5r5DBmKPuf4M0C+Bh8e70Z4K/aGjOsb6ujZ9v8tVjtzbLWbfuz/Ff/ywGA84f0m3bB9epd6wbr/ZoF64dD9U+KHykTBSROAvdWTSkD8k9ig+c3Za93FZh+z1jlG197dK1sYD2iHoY61LrWZ3IN1/lz1MEEk/8J3q75LsP19d5qa7zSm+WHIoDnXwpU8GeE6+5abzJYf6/MMTCG6nF1RPoAH9c2d4b7J60GSgb6ZysbzmaLFZo/i+E87V6rXces1KXePKP9uKB68zFYn83fDbozMitB7105jBksMKvXu9f/BG81315vNfir5suwvm37/LVguY4BnrWu4G+43qddXesG6++l2q3OMAKmIlSfWNefSx8cFWeVM4C8XrH9SKC4hlWkAsyfxXTNZKxhBZje/NccxwlQvXkZrM/mq3XLG67vy2HMUPY/wbs6mFSDvwzrwAJ+K6/xam+W0TDKn/8j/mwvlG37rM9gBuvAGFxXAeuKY2DUu9UN1d/raW2udbhHgYaK4H6nmGClNSZVUP5KjNdj5vruXA8D9UfP2FqZutWbT4RHLuBXHEWjANZH83ZlsUJ8Njih7H+CN4PvSm/D37W+DOvbts9fC8UfMVodPdvyfD7v8Ddcv66KXevuWP+uzG71KlCdDagDXWuzd6TMCthiiG9FqxIUvyJGcP6o7POxe312HO/KYzoJqjcvg/WoGty1Hp/HnHVqBgso6/Xu9WfxZllvBl8G+Fv93FVeY5a/adXRMEyfzyP+huvX5K71mDyD9T3ZQP4LW6M8gGVd6jPineF+VZkwzrA/XtlwVVHswPxZLOc4Y912HbsaUG++vFC9+RmsR9ZguB6fx57F2CGvDkBU4RCLd/WaDX/X+qqt70pvxdEwTJ/PTJ/9AB9c33Gf4q71JoP198ocA2OofhcbVP8b/99P//PP1IMr20zx3WIBh5au1GD5K7H9HWSt6c51WH2MKlC9eUZ6xdVnsD6fr9Ytzw68d2ZV6CzfkcECfHv9T/Bm8O31ZgCTamvs9V3r2+t9wmgYpu8VwHD9qpjh+s6udYP1zzq9Wz0CqosB9W+a63CPhBOnw3tLQxUA+TuxgfNHZa97JZjeMlZ6G6r3++SD9Yg6DNe58k6G6zsylP1P8GYBZoa/mr5q67vSm2GNAZ7udVVvwHD9qqrB9Z3g2WB9bS6QD9WjPCLAOglU/6X/4v/3+B95RsrsAmoG+/WUDWOZxAzNn8Vw3jLWy0D9nW+0n8H6qjoU57wrZbJCb+YcNvjdm8FU/wneLMCsMgBW8/X69vv2ejOscduew7vX33D9u3bAdYP1+f1GQavB+t78zBE6j2KD6k9A/Zt+/vggqz6XnAHyWdY7KQHzR7H9XVUF6fesld4aQL158kH15mWwPpuv1LU+mumxMGM5jBlM/vae82bwrQwmGa6JyuvL4uvRMHPevf6G69/lrnWD9U/KAuvqUB3g6VRPAuof9Tf+758d7pHQoTq8tyxVQP5JbPD8psy1rgLTm/+6YzFUH/GJ8NAF67P5lUfCjOZVgusjOUwQhM3/BG/FmhkAMMMae301fVlGw7D8Pa/2VofrI/cOjHCdGayPZhmsr8udAcoVoLp4l/pHfVnftSNlVoMjA33rkyrC8KtiheaPYjg/u9dpxzGfDNTvnpFeHN3qzcdgfWeu4Tp/jjL8ZvNn8WYBWwxwkgFMqvl6fft9e70Z1rhtn7/Ovd69/qfBdY+E2QPJDdbXZGZ2q1eB6oJA/Zt4ZriPiAHYfVO1HwUU1lxdCrD8WUzXRcb67Tx+NaDefGNrjq6zUrd6RB0G62szPdt9LEd99AwTxOn1ZwF9ijUzwEm1Nfb68vgyrG/bPn8ter17/Q3XP+v0rvWd42BUwLq71ftlqP6nIn6sAIB/Xh/Xz/CDn18+ek1MINJaI0VA/k6s12vWGlcB6feMVb7cXerNjweqN5+zwfpMvkfQxGWx5qjD9V5/pvVh8WZZbzU4ybAOXl9NX8W564qfbQAXXK8wEoa1a91g/bd2g3VD9TGxQHURoP5N4x3uK+GQYb4VqUpA/IpYofmjss/J7jVShunNmx+oN896UL355NaRma8AulXyGKF0lQwmkKMIqld6q8FJtXXw+mr6VofrK72Z4DrTdxXAORKmWte6wXpcHpAH1tWhuoH6Zz3UxDlSJhvGPcs/AFwT23lTlQIwfxTLec9at13HrwbUm2+0X1ydFee8nwjWZ3IV4DorWN+Vw5jBApJ7vXv9WbwZfAED4NW+V9fXa9vvWx2uM30ms6x1254LrjN2rRusN+3sxM8YA2OoPiYWqE4O1L/p5+OHa7X546NiAYoWp9QA+TsxXufVIXrL2pGhAdSbp6E6ex0G6zx5huvawEIRgK/0ZgFnhutrfZXWl+FvgwX6MlxnK71Z1rnV0qfT4HqlcTAG6zF5QM7LWU+H6kxd6klA/Zs+d7ivAkEG+dZOVQHiV8QIzR+VfS52r48yTG/eKzxj62WD6s0nwiMXrGd2y1cfQcOexQi+d2Qw+TPVzlK34a+mr9d3rW/l9V3prQrXTwPrQJ2udYP1mDzFbnVD9bpA/e/va5MzUiYbut1k8L9WLOdZReyw/Fks5zdj3XYd++pjUwDqzTPSiweqNx/dMTCz+dXB+mgec1YVgK/sf4I3A5wEroMzBoiotr5q8JfB1+u7x9tw/bXctb52nxEoa7Aelze7r6E6F1TfCNS/iXOG+y6xAENLV2qQ/JUY/w6y1nXnWijC9Ls3b5d686sF1ZtHbg1qYH0mlxl4j+aMZFUB+Ibr3N4r19uAcq2v567nry2gt74rvQ3XX0sdru/oWjdYbzJY/6wZsD7XnZ8/T91A/bP++/rYfpZBH79o1GJSBTD+TYzg/Kbs9d+9NjuOVwmoN89ov7gaWUbANJ/cOgzWufKYIb7heq4/izcLuK8MKBl8vb5e2x3einCd7XukB65XGAlTaRzMTvhcHawrQnVgHqxXgepCQP2b1nW4ZwA2Q35uZUNXdjED82exnMuMNVOH6c2fH6g3Ty6o3nzyu9Uj6sgE+7tB9+5M9izWHDZoweR/gjfDaJhWx3VlrwXD+lZd25W+Xt+1vsAZnevuWl+TwQzX2cF6xnx1RbCuDtWZutQJgPpL/XOvq9ZIGRYIaJ0lJVD+Smx/N1nruXMdFIF684324xxNw9Kt3jzOA+szuQpd66N5rHCdMeMkfxZvj4bR9c1eX4Y1qLq2bdu6voDh+iupw/XTu9bZwTqg07FuqD6mil3qi4D6N/0MPcD5ZaOWotTB+CexQfNHZa97JZDeMtYdD3uXevOL8qkD1SPqUAPrM7nsI2F2Zhmua/mf4G1AudbX6+u1ZfLt9TZc/1OnwXXWrnX2Oes7wbq71a/JUP3mIQ3Uv2msw303QDPgr6dsCMsmZmD+LIZzl7Fe6jC9+a/w5B5PY7AeV4PBenwecxZjBttoGyZ4z+Lt0TBcvn6xqdd2pe9Kb8P1P2W4Hrs9UK9r3WD9tQzV82poHvMAmxGoX7g2NEbKMAA+y3olJVD+LLa/q6y13LUOijC9+Z4B1JtXhIc2VJ/NVwP6zMC7Wo7hepw/izfL6Ac1SKkEKNXWQGltWw3XtaKDv8e315thfVsdPduu+xxeCdfVwfpIRu/2ButNCmA9o1s9cwRMNlSvBtQDu9Onrwu0kTKvP6D9AlKrkpTB+CexQfNHZa757nXZcaynAvXmxwXVm4/BulIuO1jfmVUBrp/kr+qtBCgVfbPXl2ENqq5t27auL8P6tu054Lq71mO3B3jHwewC69Xnqyt2q6tDdaaxL2RA/X96cz2/73DfDcsM+M9RVfg9ImZg/iiGc5axVruOe+WxnQbUm1+UTz5Uj6jDYJ0rb1cWK8BXht+r/VW9GQAaA0isDChXrIPX9rZtXV+G9W3br/FmGgmjDtZH9jFY3wfIT+lWN1SfExNQ3wDTr4hnpAwD0LOsK1KB5K/E9neWtZYVQHrzX3Mc7LPemYB684nwyIPqs/lKYH00013rnBnK/qreDACNASRWBpSG61pry+Krtr693h4JE+fPOA6mGliv3q2uCNUN1JuKAfU/9LTOP7++aPxyUquKlKH4N7FB85uy13znuuw41pXHww7Um1+UTx2oPluHYqe8wbpGDmMGk7+qtxpAU/P1+nptV/r6fQzj3qojYdTBOrC+a91gfXw/pREwhupjqgjUF8L0K/rd4Z4BzAz5aysbwrKJFZY/i+G8ZazVruNWg+nNlxOoNy9D9cgaTgDro5nsWRW61ndksECdXn8DtDHfHm8D4HW+Xlut8wXUf5mpR8LE+bONgzFYH4e3Kt3qJ0J1hi71qkA9ak79v/rPf9rHf/5IGQawZ1nvpALI34nt7ytrPSuA9Oa/yje+bkag3rwiPPKh+mwdnuuuncWawwSn1f3duT7mXRlSMgDKbLCstLZK5wswXH8Wy0gYg/Xv2jGixmC9SWkETBZUd5d6UwRQT+5Of6cbTL+ifOBuWbNSh+KfxAbMH5W97jvXZsexrjwedqDe/AzVV9RhsL4mjz3LI2Fy/Zm8lQBlr+8qbwZIybC+2WA5e22z13Wlb/batm3rj4RZ2bWuDtaB9cdgsF6/W91QfUzVgHoSTP+qv/B/ftKg2V/4T0qutUbZ8JVRzLD8WQznL2O9dh23GkxvvtF+XEC9+XCMosmE6rP51cH6aB4rWB/JYcxg8mfyrg7RDNfzYa0BsNb5ArTWttcXWAfX3bX+XmzjYAzW94J1pREwqlDdQP23goB6NEy/sllehzsD4LOsRykB8ldi+5vKXM8KIL35a8D05hlbq6F6bA1ZUD8D5rOD9V1Zp2awdFP3+huuj3uv8FVb32xfJQCstK69vtlr27atPRKGadb6aWAd6K/JYL1+t3oWVFfvUjdQf6+LMP2KPFLG0pI6FP8mNmj+qOy137k2O4515fGcBNSbFwdUbx7uVt+Vq9Ah7651+0d4q0G0ld7ZoJJhfbN9lQCw0rr2+mavbds2H657JEzM9j3gjHHOOitYZ5+vrgLVgXGwrgrVGbrUI4A6G0wHQoH6N6+frRDtr41Z1jplg1dWMcPyZ7Gcw91rtuu41WB6860P1JtXhEdut/psDQbr8Xm7sqoA/JP8DdfHvZVApdoanP6vApTWFdBa215fRbjONBJGvWt9xzgYd6zrgPWMbnVlqM7SpR4A1Rm70yO89na4s0A+y7pJCZK/EtvfVNZ67lyH1ceoAtSbZ6SXoXp0DaeA9dFM5nEwu3IYM5T9FeF6214L0FUFlUprq7SuPb7Z6wrUXVvA89ZnvU+D6wbrButReW3fnJnq2VDdQD3VyyNlLH6pQ/FPYgPmj8pe991rs+N4Vx4TO1BvfjyjX5pPPlSPqEMNrM/ksnet7zou1uNhgt+r/VngequlZ1stQFcVVFZd2+x1XeWbva6A3tpWh+tMI2FOA+vAnnnxBuu3LEP1b1KH6kxjX6IgeCRMn/D7WQqZ/sJ/lnlb+5QNXlnFDMufxXIOM9Zs17GrwfTmG+3H1aXefAzVZ/Org/XRPFboPZLDmMHSpd3rDXBAtLa9FqAzqNRaW6V17fHNXldAb20Z4LrqvHXlrnXGcTAG6/W71U+E6pW61BmBeqTXf/D/evyPazvcWSCfZQFagPyd2P6mstZ05zqsPkYVmN48+YB68+LonM+G+4qd8gbr/DlsGUzgHuCAaG17LUBnUFl3bbPXtcdXaV1bDddluH7z1RwJwwTWgRrjYAzWb1kjx6TTrZ4F1Qt0qZcE6gth+hV5pIzFowpA/JvYgPmjstd/99rsON6Vx8QO1JtflA8HUG8e+R3zat3qM7nsYH1nlrvWc/17uycN18d8K4PKihDY61r3mq3+MlPVrnWD9fjtgT1g3WNgXu1nqN6XPwXVaYA6Y3f6AEz/qP8H/2+gjZRZp78WeltxygatCmIG5c9iOZ8Za7br2FcfmwJMb56RXobqkTUYrMfnsYL1kRzGDKaueBa4zrImBpV9vkpr63XNv2az1xXQ61pv3msg9Sld6wbr1+Ru9bmstt85UN1APc4j0mcRTL+i1SNlLGu/lOD4O7H97WSu6c61UATpd2/O7vS7Hw9Qbz6G6kq57ljfl6MM1kf8FV9mutJbCVS2Gq7rdAicva7Z56rXV2Vde33V4Lq71vfWMuLP9AJZoN4YmJ3d6obq3zUD1Q3UYz1uigTqHTD9q/7C//FIGWuPKkDwq2KD5c/KPhcZ67PjmJVgevOM9our0VCdI393t/popsH6WA5jhupImFZLz7b5gG6ldzZcZ1jfFb7Z69q2rQfXlda1x9cjYcZ8e7c3WI/zrwLWq3arK0H1k7vUWWA4W3d6JEgHLh/fTwp8+wv/2Z55grJBqrLYIfmzmM511trtWgM1kN58V3jWBOrNJ7cORag+k6swdqYSWB/JUQbrAM9ImLa9DqDr9QXqjtjI9lWCwNnr6h+D3LU+6s0yDsZg/bNY56vv6lY3VH+tU7vUZ0G2YfpnRXbeI+ulqUyw0NKSGhj/JMa/g8z13bkeO47zVJje/KJ8OIB689CF6jP5Sh3y1brjT804oWt9pXfVrvW2rY6v0tpmr6vSNZt9XQGG6yO+quNgDNa/63SwrjIC5iCobqD+rw6A6Vf8PFLGmlMlAH5VjKD8WQznJWOdlEF6817lWxuoN68IjzOh+my2wfp8lsfBfNcJXesrvbNBZavhurLXQWlts9c1+1wBNeetG6yPeZ/QtX4aWGedr+5u9d/aDdVVu9QN1H8rCqhHwvRArx8KMPdJf2UXsFjs619FCpD8lZiuj6w13LUGq4/vNJh+96vTpR5Rh6F6nbxTx8HsyPCLTMd9e72rwnVD4JrrqnS9umu9qXrXOtM4GIP176rWrW6o/inz3C51FqBeHKZf8ePvcGcCjtZaqULxT2K9fjPXeveaqIL05h1fe2Wg3nzyofpsHYoz3Q3W9+cwZjB1rQP14TrLDxnZsJJhfVXgeva6Kv0Y5JEwN998UK34ElOD9fcyWN8D1g3VP0q6S312/6rd6YRgnh+4W/tVEXz3ihWUP4vhXGWs1Y7jXn1cCjC9eXKNfWk++lB9tga/KDU+jxWsj+QwwnuWrvW2PUd3eTb8BfIBsKKv4Xq8Z8VRO8AauG6wfpdi1zrTOBiD9WtihurAGFg3VP+uGeBaAagbpof4/VAAO8ualQogfyW2v8Gstazy0lSlGe+s42iYuuZVofpMdvUO+UpgfSTHXeu1vA3X1/kqrW32umZfs9nHD1yH6+5a79/WXetz3iPbr4brq8G6u9WbVMD6IFSX7VJnGPnCBNRZYXqAlzvcrX1ShuKfxAbMH5W95lUg+j1DB6Y3T06g3rw4utSbTx5Un81X65KvCNZHshjB+kiG4fpebyUArOartLbZ65p9zWYfv0fC9PsywHXVrnWD9SsZdTrWK3erZ3Sqn9ylHgHU2brTyWD6FT8D95OUDV/ZxAzKn8Vy7jLWTB2kN/8VntwjaSp1qUfUodgpr5LJnnXaS0wBj4SJ8FYCwGq+Smur4gnojNrp8VWC65XBOrCuZtWudYP17zJYl4HqkqNfsoE6MA/V2UA4m0+A3w8NyLOsb1IC5K/E9rd2wotTlWe9G6j3+hiqK+UarI/lnNa13rbnAOB+mammr9LaZq9rRbiePW+91RB/rVaG64pd60zjYAzW12QwQ3WgPlg/FaozAHU2CE4E06/4ucPdipE6DP8mNlj+rOz1370+yiC9eXPD9OYXV6OhOke+whiY3XmsYH0k5ySw3uvP4r1yzbNh7SpfpX8VkL2u2ddt9vWaDdc9EqaJAa57HMyfOhGsu1u9yVB93b4G6vV9Jv3uL039C/8JLchaq2zAqiJ2UP4opnOasW67jl8NpDffFZ41gXrzya1DEarP5Bqs780ZyVCG66re2fC3bZv/Q0M2BAZ0ZoNnA/vsa9YvM10D1yuD9d46WLrWmcbBVHh5qcG6ofqqTGAOqlcB6mwQXGiW+73DnQn2WWdJCYp/EuPfUOba+oWpV31XeMbWWg2oNw9D9V25zC8vHc1izVEG673+TN7ZoLJtW9dXpcO6xzd7XSv+YOGRMLXhusH6nzJY/7a9oXrL0oDqil3qBuqxHpE+0V6Dfh4pY/2pKgD8qhhB+bMYzsnudaow5/0kmN686gD1iDoM1bnyDNaviwms9/ordq23Onq21QHLPb5KP1xkr2vFHyw8EsZd6701sIyDMVh/ryrz1dlHwBiqv1fm2JdZ4MsCwqvD9IVgfv6lqX9N7n+CGGBpJSkA8ldiug6y1rACRG/+q3y5Z7szAfXmcy5Un8lWGTvDnlUBrANccJ3JWwn+KvpW7LDu8c1eV4+EiV9Td62vrYFhfXu3N1iP82/79B1DtW716lBdsUvdQD3WI9In2mvQb77DnQkiWhxSBeKfxHqdZ6/1znXxi1JfeUZ61Zzp7nnu+zIV8qqAdUC7a52pI95d6+t9VTqse3yz17XiDxbuWs+H2pXBeq+3wfprnToGxlA9dz/VsS8G8mt8mL2e/DxSpqqyQSyDWCH5s1jOVdUXpa4+LnaQfvc0UF9Vx0lQfXdmtc7408D6av9VMA3Qgr8rvRl+vFCB4NnXbvY166713K71ymAdyF9fwGA90t/d6t21bYXq7lJfv2/E/tU8In2ivQb9fmhgn3W2VOD4KzH+DWWtZ6UXpZ76klTGrnl1qK6Y7W71uRyD9Vh/lnEwrZaebbWgfTZcz16D7HWt+IPF6S8yZQC/anCdAawDK9fYYP2TDu9WN1R/o1OBOgsIrw7T3eFupUgZgl8VIyy/iWH9K74o1S9JjfQyUGfIV5nlXrEz3mA917961zrDjxjZELhtWw/Yq/xgUfFFpkrw12C935cFrLdaroPdlWC9wnz1Hd3qhurz+2V0qZ8M1CvCdBGQflUG7ixigKsKYgbkr8R0XjPWzi9J/ebLC9ObXx2gHlGHIlSfyVXojjdY791nXcbq+t21vt43G65nr0H2uiqtaeZIGHetr6lVDaz3bn8CWAf64Lq71Xty9oD1ylBdsUs9G4izAPXqMD0azD94/lABQauO1MD4OzH+fWSu7e71MEh/9o304htDY6iuA9VHM9lHzjCC9bYPF7xnGQcD5HdVr/auCIHbtjpwWeVfA6iMhMl+kWl2Z7W71vt9DdZ/S30MzOnd6obqf8pAPWf/SB+mWoj83OF+iqoA8KtiBOXPYjgn1QD6PWelNz9Mb34G6qvqUIPqM7ns3eqjWQbr+f4rwXqrpWdbHajc65sNgdu2HglzLf+6PBLmmlYAYAb4uwLuZ69r77YsLzB1x/p7FQHr26D66H6G6pz7MuzP5BHpE+210O/3S1P/wn9CQ6qKAZRWkwIgfyWma8EvSp311pntzjqKhmUETTbYV4PqM7kG65xgvTeDaRwMkA9+V3tXhesMa5AJ17OvW4+E0RgJ4671/m0VX2BqsP5eO8bAuFu9SQWqG6jrezD6RHsN+P3ucGeChxafVKH4OzFf75lr7Rel9viu8OSE6c2LA6g3j7wu9ex8hW713Xk7wDqwHkzvyGCC64pd6yu9s+E6w/pmd8N73vrV/Dy4ng2As7vWe2owWH+sY03X+kqw3utvsH5J3WDdUP23ToLqFYA6GwRnhemL5rh7pIyaqkHvETGD8kexnCu/LHXUe5Wvgfp1H0P1nbns89UBg3Umf5au9bZ9PgDPBsBAfof1Kl/PW78mj4TRGAnjrvUmhq511Tnr6mC90hgY9hEwJ0D1TCDNAMNZPBh9CPz80lRrrVTg+Csx/m1krefOtfCLUh89o/1qAfXmkVuDoXp83ghUBwzWV/mfMGt9pbfhev7aqvxrAM9b90iYaF+D9cc61nStG6y/VxWwbqieu1/mvtnZ1TwifaK9VvjBL009V8og/IoYYflNDGtf8WWpavPdWWF684ryyQfqEXWoQfWZXIP12/bnZaiC9ZXeDOutBNZX+VZcV4+EifXs2TZ7JIxS13r1l5garL/WarBeaQzMrhEwhuprMmf3d4f7Gh9mr04/A/fVYoCrCmIG5K/EdF4z1m7X8a8+NpXZ7qxjaAzU5/OrQ3WAdwxM24cLeu/KYJmz3rbnAODuWu/zzV7b7HX1SJhrUoHA2V3r2Wvauy1D1zrLnHWD9U/+53arG6rH7ae6b8T+LDUw+kR7rfBDGynDAw4tXqkB8U9ivOaz13fnmqhC9Oa9wpMTpjevGkA9og5D9c9i7lZv+/BB7x3HccKc9ZXeBuvrfN217q71yG0rzlrPXlOgNlg/pWOdDKoDpN3qFaG6gfrafRn2Z/Jg9Fnl1/G37w53NWWD2SwxQvJnsZyb3WvlF6R+8q0N05tPhIc2UJ/N3w3VRzMN1cdyDNbj/FmgvRJcZ1hfd61fU7WudRWwvio/u2vdYP0uxY7107rVWUfAGKrn7pe5b4X9mTzYvYCxv/cvai9N/SvalkQsAPQkKYDxd2K7XvyC1Aj/Vb7cM93Z5rkbqGt1x+8aAQOcDdZ3/OhxAlhf6Z0NgNu2NYG9u9avqVrXejYEPn0cTGWwrtqxbrAeu/3oPrvAuqF63H6Z+zLsz+QR6RPttQCi9/zttw53NtBoxUsZhH8T8/Wbve5+OWqvNzdIb35cML355AP1iDoM1b+LuVt9JItx1AxgsL7b22B9nW/22rpr/Zqqda17HMyq+fX5YB3g6Fhnmq+uDtVH9tkB1dk71U8A4ycD9YownRmkj/69f9Jf+D8eKbNC2ZCVWcxw/JWYzqVfjjrjv+Y4ToDpzctAfTa/OlQHanWrj+SsHgMDrH2BKVNHPwNYB7TgutL6Zq+ru9ZjPfMhsMY4mGywDvTUugasN+81XeuKYN3d6rHbA/U61VWg+mlAnQGGs0HwKB92kN55nH5pqvWn1KD4OzFe25lrW222u9pMd9Z57p7lHpdvqP5uH94s9fnqAFfHeq//yh8Esjur27Y6vtn/IiD7+N21Hpuv0rWePQ4mG6y3GvK71g3Wn2tZBtZLdKsbqmtknbpvxP7VPIByIP2qpzvcmVUFfPeIEZI/i+G8ZKyTX476zpN7DI2BelwNWTBfAaq3/ep0q4/krAbrwBkd673eButrfd21fk3uWv8ula717HEwBuv9vieAdXerf1c1qK7Sba4IxbOBOgsIZ4TpkSB9EUTv2bw2cGcAo5WlAMdfie268MtRI/xX+Z4B05tXhIc2UJ/Nz4DqgLvVd+e4Yz3O3+Ng1vpm/3CRffzuWo/NP7lrPXscjMH6bVuD9SfJg/Ud3erMgFwFjp8E4yvsH+UBxMF0ZpC+Asz/6/tDBx+teamC8G9ivlaz19wvR+315gbpzY8LpjeffKAeUYeh+pX99sDuXVlsLy4F1v84oArWAXetr/Z11/o1uWv9u9y1rtG1brB+1yqwrjxf/dRu9YpQXQmMG8bnegB8MF0BpLvDPVnZ4JVNzJD8WSznLmvNKrwgVeXlqM2zJlBvPvl1GKpf2c/d6gDfKJi2PQ/8VhwH0+ro2TYfgqt0rbcarm7nrvUrctd6rKe71nPheo9n234NXD+hY93d6mfuo5B16r4R+0d5RMB01q50BTD/r69fmmppQfF3Yr2Oq8Pze95qf5257qwvR21eHEC9eeR1qc/mK0H1tm+tbvXRnNPAeq8/C1gH8jurFX1VRsKs6FoHzoXr+SDYXetXdP2864D1Xl+D9V86Cqyf3q2uAMcN43X2B+p2piuAdHe4F1EFCH5FrKD8UQznohpAv+fogPTmWR+mN68Ij/zxM2pQHajbrb4zy2A91l9xHEzbPh+Cr/DNXt9Vx++RMHHbAfFwXaVrvcc3/8eKeLhusH6rwWA9YvvVYH0HiK4IyE+A6gbqHN3prF3pBCD9ouehwJ0BoKpKAZA/i+l8+wWpEf6rfHlhevPj6U5vPtpAHTBUX5G3K2vk3Bmsx3p7HMx6X5Wu9R5fd63H5meOhFHpWl8B1oGaXetqYL3VsWbOusH6e60G66yQ3FA9bj/VfSP2N0xf77PKr3l2yS9NrSRFGP5JzNdm5lr7Bam93n5B6phPPlCfrWMGqAOG6tF5O7rVAb6Xl45kMPlXHwfT460E1lsNV7dz1/oVqcDgal3rKj9UtPyzX2Kq+AJTVbCuDtVH9mEF8Tv3Ucg6dd+bZoE6E0xnhPLRXs1vjf7Cf87scF+harB7RMyA/Fks5ytjzSq8HLX584P05lkTpjefCI+zutTbvobqz6owBmZHhsH6Pu9suJ69vp61zg+DT5617nEwHgfTalhybF1QHVgLytXBuqE6Pxw3jL8uw/R1PtFezS9ef+E/PZv7pamnSQmKvxPjNeuXo0b5nzvTnXGeuzpQB86B6qOZOyF+lTEwOzJYRsEAnrM+6ltxHAzgrvXI7TLHwfTkV/uhAtAZB2OwfqvBYP1R6lCdNYN9n91ZM/saqI8pAjhXB+kEEP2iJ4BTZ7izqAL8vipGSP4ohnNRDZ7fc7QgevPlnedeCaYDuUAd0ILqCnmsUL3tw9NNvsPfYH2Pb/Y4mLZtXte6ClgH4mFwfpc1f9e6Clhv+XnjYAzWbzXkg3WPgdHZ3vvo7Ze5r2E6r0/zitVCiN6jesCdAZwqix2MvxLbOa/+YlTVl6I2b16Q3vx4YHrzMVDfnasA1QFesK7erT7ib7C+x/d0sA54HMwV+SWmcdsBueNgsuesG6zffA3WH2Wo7n2Y98vcdwaoV4Hp1UE6CUS/4Pm/Ov3SVBUpgvBvYr72std759r4pajvfKP9+MbPnA7U2/6G6u/ECtXbPue9GNVgfY+vEljv8V0B1lu+x8F806njYAzWzwbrii8uPWUMDCMkr5KhkpWx3+y+ykCdBaafAtKJxs7U63CPVDZ0ZRIzHH8W03nzS1Fn/A3S57zyYTpwJlCfyTVUf9yHr1t9R8Yq4Ntq6RMDAGd4UawSWAfqjYOp2WUdC/bP/hcAGi8wVQLrBN3qAAlYd7d63vasGaP7KGRl7AcYprN4NJ84RYJ0Ioj+xfOX/NLUKlIC4u/Eei1mru3ONdlxnGrz3CvDdMBAfTbfUP1PMXaS78hg6lYH1kFfVe/srvWKYL3lX/WsBYOrzVnv8TRYv6br63l8xzrNi0vdrR6z/Y4MZqheGcTP7HsyUGeB6aeAdLKxM+5wj1YF8N0jVkj+KIZzkrFOFWa6+6Wo44qA6cA8UAfO61Kfyd0J1QF3qxus1/LOButt23PnrDffU2Fw3jiYmv8CoNYLTA3WNeerrwTrbFCabXvWjJ37KO2XBdSzYXg1mB4Fq4tD9A+ef9TJDdwZQGkVKYDxV2K7Bqq/ELVl+aWovz0jveLqY+hOB2LWJwuoZ2UbqvPnGKxzeyuB9R5fz1lXgcEeB/NN0V3rKmC9bXut1myw3moI/8GgPFhnGgNzIiRnBuTVu9tHofrJQJ0FprN2pBee3X6TX5qaLVUQ/k3M11X2mu9emwoz3dlBevPjgulADaA+W4Nad/xOqA7wAu+RnB0Zp4D1Xn+D9X5fj4OJzc/sWvc4mCvZGuNgDNbzwTrLi0tPGgPDtj1rxs59MvZTA+rZMLwaTGcF6YQQ/YPvH+LucF+lbODKKGZA/iym8+eXos74a7wUtXnywXTAQD0i31D93T6cUH0kZySDCay37TleMrryBwEluJ49DqbV4K71b3LX+nd5HMyV7eLHwWSD9eYbD9dZurlPAOsnQnJmQG6o/lonA/WKMJ0ZpJOOnfFLU1WkBMQ/ifF6y15bvxj1qu8aVYbpAAdQj6jDUP2zdo63MVi/Lhb4zeStBNZ7fd21HpvvWetx27lr/Zoyu9aVwDqwDg5XB+tsUJpt+137sEN1FaAO5EH1TJjfPGb3rwnTDwHpLzxf1nlmh/ussgHtbjFC8mcxnBO/GHXUe5VvbM2MMB0wUM/Mnrkmds1Vb/vxZjGOgQEM1iO8Ddab3LV+Te5aj9tWoWt9BVjv8VUZB2Ow3rQKrJ/Uga6+PWtGRtYpXeq53fHzigDFLHU0n1gdML8dYALuDMBUXQpg/JXYzn3mOlYA6PeMVb7xtUfCdICrO735aAP12fyZ7N1QHdjXQb4zixGs7/jRgwV+9/ozgPVWR8+29cbBtBquetaCwYC71r+pWte6x8GEw3WD9YEaerdX9d6xPWsG+z5KXeqnAnXD9E9evGNnmue2+e2AX5q6R6og/JuYrx2GNa/4clS1F6OygnSAC6Y3n9w6MvN3j38BDNVncpTHwLD5rwLrQN2u9Wyw3nz5YTDgrvXI7Oi19DiYaxIZB2OwPlBD7/aq3hW2r7iPSpe6IlA3TI+t4+7FC9I3Q/Qe8XS4R4sBuDKJGY6/Esv5y1o3vxj1s6JBOsAJ05tXfnd6RB2KQB3YD9XbfrwjYEazThsDs9qfZRQMUBesAx4Hc1WZXet98DIaWp85DqZ58netG6wbrPdue4r3ju1ZM3buMwLVDdSv7DunWThbDaazgnSV2e3N922tfmkqg9Rg+DuxXkvZ6+uXol4XO0gH+GB684nw0AXqgM7ol7Yff16VbnXAYP2dDNbvUgHrgAYMBjwO5psyx8FUA+tA7jgYg3U9sG5QrrM9a8ZNlaG6IlA3TI+t4+4VJ4XZ7c03pM66He69yoayGWIF5M9iOTd+KeqYVkB0gBukN786MD2iDkWgDhiqR+S4Wz3X32D9ruxxMAbr1+RxMN+yPWf9m0TAOtAB1xngc2Wwbgiftz1rBmCgHp85pxkImt0ZH1HD3YdrdrsCRF81duaL6zxwZ4GhVaUCxV+J8drIWs9q89xXQXTgHJDevKJ8DNRHlfEyVkP1ptO61Vf7q728tNf3dLDe41ttznqPp8IPFZnjYDLnrB8M1qU61ldB9Z4aFLdl8mbcnjUD4IfqWiB+XNnd6dn5ETXcfWLE2iV/94yH6MGOfmlqr5QB+BWxXw/Z65+xPruOWQmiA2vOhWH6ujpmry+lLvXdmYbqXBmr6zdYv8tg3WA9yq9nWwWw3jxju9YzX2AqAtYBoY71yt3qqnUo18KcAeyD6jsht1qHunJ3OgtMPwGkC0D0/+n/+e7MN1ImG6iqiB2MP4vtvFZ/GSqwFqADayA6cA5Ib151xs4oAvW2L3+X+mjeaBbjCJi2D083+Q7/VWCd5ccGg/VcsA7UeoFpj6fnrMf4tW35X2BqsF4XrLPAbDbwrb79yD7uUo/ab1xZQD0bpjN1pbOC9GiIngjQe+WXpkZKDYJfEev1kb3Wu9dlNTwH1gF0YN35ij4PhunvdRpQn8k1VG9ihOqrM1ZCdcBg/XcNa8btZILgnm2rzVk3WL/iabD+PT90vE36KBjPV+fZlsm7wvYj+zB3qVcH6qrd6QwwnQmknwbRFwB0AF3ryNfhPqpsAJstVjD+SkznKmPddsDzmwzRb57Rfobpz8oC6m3/vV3quzOZoTrAB70Z/VnA+sp1MVg3WL8qv8D0myf/C0yLgXUguWPdYF1z29XbM9Uysv3IPtWguoE6Z+5sdlQNUXU0nxhFgnQViL5ivvy/8gz3XVIC4u/Eeq1krm0VeH7TynN8EkhvXlE++kAdOKdLfTTTUJ0zg2UMTKulZ1uD9btvLrD2nPU4v55tFeasn/oCU4P1a1IC6yzQmQVmq3rvyjBUn8ka1yhIPRWos8D0E0C6EkTvXMc6He49qgC/r4oVkj+K4XzsBOc37QDogB5Eb74rPGvCdCB33MtNakB9Jnc3xK8E1UdyGMG9wfpvVQXrPb4G67GeHgcT49e2rfMC0xVgHVgDlivOWK8Ov1nqqLC9obpOl7oiUDdMj63jpkjsrQLRV7yk9e4dDNwZwKmqFMD4K7Gd8wxwDuyD58Cea+VUkN78onw4YDqQD9Tb/obqnzR6nlnBOmMG0xgYwGD9dw09vvXAOuBxMFfkcTDftuMfB7PgBaYG6wbrNNuu3p6plpt6wbqhuoH66tzZ7Ij8iBqi6gC4QboSRB+o1S9N/SRVCP5NzOc8C5g/aic8B7QB+t1/hScnSL/7Gaj/3l8LqM/kGqrvzTFYj/E2WO/3ddd6rGelrnWPg7mS7XEw35QN1nu2zc4/pQ7G7Q3V60J1RaBumP5bUWg5EqQXh+gd3v/hHSlTFXb3ihmOvxIDML9pNzi/ade1qwjRmy/3LPfI+qL+HtSB+my+ofpr7To2VnhvsP5aq8B6q0MHrlcE6z357lq/kp3Tte5xMF9lsJ64LQtIZtj2FG9gzwgYbkDODdUN1PdkR+RH1ACcAdJXQPTFAL13F780dVZqQPydmED5o7Kg+U07/z52XEtq89xZu9KBWjC9eZwF1Gdyd0J1gBd4j+SshuqAwfo7uWvd42D8EtNvfvxd6x4HE7ddz7YrxsEwAGUDe646dmy/uludG5DvgeoqXeqKQN0w/beiYDo7SCd5AWqnN4BKL02tAr6vihWQPysbmN+U8cPSrmtSDaI33xWeNWE6oA/UZ/MN1d/twwnVR3KUu9Xb9hxgvXLHeq9v5viSVfkeB/Mt2y8x/ZybNg7GYP2isrfNzu/d9oQ6RrZnHAGzA5DvfEmpQpe6gfr+fCAGpjOCdAWIvgGg9+o1cD8NXq+SChR/JRZQ/qisf42x++9h9XGuPB52kA4Ypq+qQw2oA4bqCjnKYL3X22B9zNdgPdbT42DiPD0O5rOqgfWe/GxInJ2/clvVOka2Z+tWZ4Xq7lKPy5vJzMydzb6JBaafBNJFZ7ff/t+zXpqqDMCviBGSPyp7fFHGtV5hnrvKLPfIv28mmN58DNRHpADU236csHtXzsh5Uh0DAxisj/p6zrrHwXz28ziYz9l542AygbHBej54ZthWtQ7AUP36Pv2q3KWu1hWfnQ3Mw3S2rvRIUK0A0clmt9+0ZqRMdbA9I3Yo/krZoPxZWT8SVYDn94xVvvG1R3+eGKbH15CVP3NtVIXqI1m74L1yt3qvP8t89VZLj7fBusH6dxmsf1fWOJhTwToQ/wLTbECbne9t921vqH51n37tguoG6utyZ7MBDpheHaSrQPSVY2ea/y/9GI7/K0UQ/klskPxR2f+qYvfaKAP05r2mflaQDtSC6RF1ZAL93V3qAD9UZ85ZDdUB3W51gOPFpc3bYN1g/ZpOnLNusP5Z7ljn92TYlgV+s9QBrLt+R2phhPBtn97tubvUDdTX5gJ1YHoUCGaG6GoAPaDe/JemVgPdvWIG48/KBuWPqj7PXXWO+4of8BhBevPi6ZQ/EagDGl3qo3m7snZAdUC7Wx2oPwYGMFjvyTdYv5LND9bbtn6B6SdFw1V3rJ+bz7Ltam9lqL4jw13q+7MUM2/KhukG6Ve84iU+t/1RP8cD729SAuLvxATKb8pe151rsuNYVx7Pqn8FE/3ZY5i+rg41oA7UheqsnerAWVAd0ATrPaC6x3sF1AYM1hXAesvPeYGpAlhv2SkvMDVYD9w2GxRn51fftnd79REwhuqcOaNZM3kzmbO5hulxdTQfXoiuBtAD683vcL+ibDi7U4xw/JUYzolfgjouFYgOcIL05hXhoQ3TAQP1FXm7shihetuHB6yvhOqtlh7vfFBtsL6mVoP1OE+D9c8yWN/vqZTvbX9LuVu9AlRnBuqjWaN5GUD9ZJjO1JXOCtJVZrY337SxMz8U4FRNKlD8WYznOnMtd67HjuNUguhAbZDefPJhekQdakC97VsTqrN2qgPuVv9cS693Pqg2WM8F60D8eB2D9c8SAOvAgjnrBuvOV9h2pffKbnU+SG6ovjpnNEspD5gD6hVgukF6j1+8hDrmNTrcv0kVgF8RIyR/FMPaV3wJKrAOoAMaEP3uWROmN5/8OgzUa+Sxvay0bb8+g+Wlpa2WHu+689WBfLDe41sNrLf82DnrfT9U5LzA9CpYB/JeYKoA1nu2jQbrPdknv+Q0O19xW4CnW10dqjPDbuac0ayMPCAPqBumP/vEiB2iCwH0J/+vdfcDdwbAyih2MP4stvNY/SWowFqADqx9AfEpIL15ccD05qEL1AFD9egsj4C5JtUxMIDB+kgNButXsvlfYNoBo9NeYNp3XcaOg8kEtwpgfYVnNijOzlfcVrVbnQ2qt334OtXZu9QN1L8rG6hn50fVcffh7JJvfjojZ5r3Euj/Qwdeo6UGwr+J9Xxlr3PGuqwG6IAeRG++fhnqdZ+8cS+AJlCfyVWA+FVGwIxkMIH1lWNgAIP1kRoywXrPtplz1hXAetv2uHEwEnPWV3QAK4D1ip7Vt3W3+rvt+8QIvCt2qSsBdcP0eTF1yDevWLF3y999181tb/6XtGekTDaMzRIrHH8llnOUtWY74DmwFqADOhC9eUb71YLpgDZQb/vvBdwZmZWgettnfUe8wfq8t8F603VgnXtcCnPWAY+D+ezHPw7m1DnrFSF4dj7LtqvAOhMoZ4PqOzIM1eeygJwO9UygzQCx2brST5vb3nzlRs/4palKUPyVGM9f9prugufAeoAOrD3HJ4H05hXlE1PTqUB9JlslsxJUH8lhguoAz4tLe70N1ps8Duaq53ld630/UuR0rXvOesx29qy7rSJYP61bnRHcj2SM5uzOUgPq2TA7Oz+qjuYRJ3aQLgjQ//W/XDffS1OzYe1qMQLyZ7Gcg53g/KYdAB3Qg+jNd4UnH0xvXvnd6cCZQH0mdzfEHz3HrGBdvVsdMFh/pRWw+nSwDmh0rV8F6z2eHgfzWR4Hs9dTBYIzQG2GbRnAOheE75OhOl/OaBawf+yLKlBngNhsXeknjZy5+67TwvEz9We4j0oBjL8S2/nMgObAPnB+0+rrZeV5ZQfpzS/Si6M7HdAG6jP5Kl3qQD2oPpKzGqoDa8H6yjEwvf7Z8HdlHdlgvWfbamAd8EtMv3tyd60rjIMxWK+V37MtA1TvrYMFwrfte7Y1VK+QA5wD1LNhdnZ+RA13n3NAutrYmbt/l/g63D9JFYJ/Ehsgf1YWML9pNzgH9lxnq8+7yiz3qiAdiLl2VYH6bLYCVB89v1WgOqDdrQ6sg7692zOAdaWO9Z5tDdavbMcP1lu2x8G8k+es7892Pke3eu/27lbP8x/JYM8BxqB6xotJs4C6YfqjD1eXfPM6G6KvHj2Dq8C9IujuFTsYf1Y2KH9UBjS/ade1qwrQm/c5c9wrwfTmkTPyJStboUsd4IXqAN8IGOCMMTCAZ6yP+GaC9ZYfO2c9E6y3bT1n/Z0yQWelOesqwDgbVqvkAxxgnaVbXR16V4Hq7EAd2N+lrgjUq8D06iB9BZRWBegDddd4aaoaDH8nJkj+rBOg+e9Mvwz1T894RdZpmB5bg9r8dvbRL22/GlAd0O5W7/U3WL/LYN0vMP2e7Tnr72Swbs+dnr3bVgbrJ42AYYTqrOAeqA/UDdNza4io4+5zHkQXndv+P/0ncqRMFeh9Vcxw/FGZoPxZVcH5PWu1/5kvQ438W4v6ezBQ35+r0KXe9jNUv+7PA9V7t2d4cWlvHQbrBuvfZLAes10lsO7s2G2z89Wg+krvldDYUH1NxmjOzrEvBuprc2ezo2qIqqP5cIJ0JYC+Gp5PuNd9aaoKEH8lJkj+qMx/DbH7OlWf484O0YG6IL356ML02fyZbAWoPnp8O6A6wDdXvWXwgHWWbvXeWgzW64H1Hs9osA7kvcD0RLAO5L3AtBrcVshmyFcD6yd0q7NB9UqjX3Z2qasAddXu9Eowna1DvnlpQPSVAH19X/uvz5b1L01VBt9XxQrIH8UwOijrx50Kc9yVxs9E/81Xg+nNI7cGA/Vv+50N1QF3q38SQ7d6r/eKmhleDGuw/l3kYB1IfIHpCoBpsD6/nT1zwTpPB/qabnUm8M3YRV4Jqhuox2bO5s5mM+RH1HD3iRP7y09XAfTV8HzmBcf/6ucIIA5oQPFXYgDlj8r8FxE718Iz3P8UK0gHDNNZ8meuEQWovguoA4bqK7Z3t/qYr8G6wfp3z5wXmBqsO/sETyB3TBHLtm37nm15wPdp/iMZ7GNfTgDqmTC7Ulc646iZ5herFRB9JUAPgOdf9WKN13e4v5MqAP8mNkD+KIbxQZXnuKsBdGDNv0CpCtKbT4SHgXqP3KV+FyNUB7jA+spu9ZW1GKzfajgTrAPxc9YN1j+LHayv8KxUY0VPj4G5bXtdHgGj4w/U7FI/Baird6ZXBensEF0VoAeu609Z8P1NzGD8lRhg+U1Za1fpBagt40yIDsSubyWYHlGHGlAHDNUftQOqtxweMD2yPcsYmN5aDNavg+CWb7D+PbtnPXPmrCtAzBXjNtiBucKxVPQ0WL9te10s3erq0Pt0qK4A1P0S1P35ETVE1XH3ipMKRBcB6G/839ae1+H+Tmog/JOYIPmjstc4Y12UATqw7l0IzCC9+fHA9OaT250+W4OB+mcZqvd3tzOBdZZu9V7vqmC91eCO9W86Eaz3bJsF1nuy2bfLzFaB4BXBemWofoo3oz/QD9zcpZ6bN5OZmTubHVVDVB3NJ0YKEH0VQE+E5yN6D9yzoexuscLxZzGdF78EdU4r359wCkhvXlE+2t3pgB5Qb/vWhOqAR8BcleoYmF5vg/U1YB24Dtf71jV2zvpVsN6y/QLTd8p8SWQlGF2pxmxPg3UeQK3arX7qTHV3qefmzWQyZM/mR9UQUcfdhxukr4DoKwH6qpe1/s74qB8qgPtJKkD8nVjX+ZSXoLa89ceqBNFvMky/6pFfx+z1Zaj+XsxQveWc1a0OaI6BAQzW7zVc7dyuBdYBz1mP2pZ9zroCjFaoUSXbYF0TfjOBbzZ/Q3X+LMXMiGyG/Kg67j4x9ZwM0VcD9AV1z42UUYfg38QKyW9iWf/KL0JdCdABHYjePPlAevPiGTtjoL4v01C9yd3qn2WwfldFsN7j63Ew38Xete4565zZKp3omS/V7ck3WO/3PcV7xP90qF69S11xzMxsdkR+RA13nzNAuiJEXz1+pmV8rP+HBtrOiB2MvxLbumeu4c61WA3QgXUQHeAH6c0v0qsOTAd0gXrbn79LHTBUH80xWH8vg/VbDZ6zfi3f42A+qUrXeiUQrnAsqzwzwXrPtgbr/b5M3jv8V89VN1TfnzWTN5M5mzubzZAfVcdNUUhZAaIrA/QFta99aaoiCP8kNkj+KIa13r0+O+A5sBagA+vOHTNIb348ML355HanA2cBdcBQ/VGMUB3gAuss89V7vQ3W14D1tq3HwXySwviNLLC+wpN9u5Oz3bF+XWpg3d3q77XjZaXMUF1hzMxoXlbmbO5sdkR+RA03nQLSV0H04vPbAaUZ7j1iBuOvxHQOstZuFzwHdAF6846vnRWkN68on/zudEATqM9kjwJ1wFB9JuekbvVef4P1xxoM1q9l84+DyYSeHgezb7vMbIUaAXesqwFwBt9e715/Q/UaOaNZGXkzmZm5s9lRNQCcIF0BoiuOnblnLNHaDvdXUoPh78QEyR+Vvb47wTmwHp7fpAbRm2+0Hx9Ib176MB3QBOqARpc6YKi+GqoDZ4yB6d3eYN1g/YKOnLPek11lu8xshRp7tlW4Htdue3W72vBbtVv9VKhetUvdQH1PdkQ+EAPTWUG6CkQXfPnpi4xLx1Bjhvs3scLxZ7Gci93Q/KYK8Lz5nzvDnQ2kAxwwHTBQ79GuLvWWdS5UB9ytHrHtyh8zDNYN1ndvdxVkAnXmrCvA6ErH0rPtyWCdAYCr+TJ5A3xz1Vf/6DCSwZ4zmpWRN5M5mzubHZHP1pXOCtLVALrg3PZH7e9wfyUVIP5KLJD8UVnA/KZd4BzYd+2cDNGbX6wM05/3z+uOVxn7Ahiq33TSCBjA3ep/1nEVLPfUa7B+zbMOWAf456xXgswnHnPPttXAusfA8Piu9jZUPzNnNGsmbyYzM3c2G+DqSmd7IWvzipfntr/L6NKfwF0Zfn8TIxx/VjYsv2knNL+pAjxv/qt8z5nfHvV3wADTm4eB+hXtBOqAoXqvWEbA9Pqv/AFBqVsdMFgPButA4gtMTwTrmdk+5phtDdavbnsNGqjBb9VudUN1jozRHIUsxcyIbIAHpp8A0lVGzjRf/bEzYHxpqgIUfyUWUH5TBjC/aec1teN68fz22DoN02NrUALqAH+X+mgWI1QH3K3+TpXHwABrwHoHhDZYT9gO4AfrKzzZt8vMNljP8ry6nRYAV/Pt9T4NqjN3j7N3qZ8E1LNhekWQzg7RFQH6Jng+cgzXRsqoQvBPYgPkj8qE5Tdl/BCz6zpTneHOPnom8m+qCkyfrWMGpgP756gDdbvUR/czVI/3ZxgD0+owWDdYj9uuZ1uDdd3tMrMVrsWe7PxZ7Fe30wHVLKNaWEbAGKqvyakK1DPAdmZ3ehWYzgjSFSC64siZe8b60TMth/ylqcxQ/JUYQPmjsv71ws5rascxnj6/nQ2kA3HrptydDmgB9ZZpqA5wQvWRDJZudUBvDEzzjgfrK8bAAAbrkdv1bJsF1nu2PW27zOxK12JPdu45vK5sAJ5d60rfXu+V3eonQnVWcD+aM5qllDeTCRimR9fRfLghuipAJ5zb/qjxl6aqwfB3YoPkj8oe97P7xxjPcP/mywvSAS6Ynt2ZfpMiUAc0utRH83YAdWDfjwTuVn8vBrCe3a3etjVYvyLDzDO2y8xWOOZK12Kf53UZrHP4Ajzd6obqNXJGszLyZjKBOaA+C9MN0j95xUppZnvzLjG3/d+s/wDAjxQ4Z4bjj8oG5Y/K+hcMlea4q81wZwXpAE9nevOYUyZMB7SAOmCofpOh+nd5DMyYr8H6tQ0zwexVmAnwv8DUgHt+u8xsg/Ur2xqsM/i6Wz3PfySjYs5o1kzeTGZ2d3r2S1gjarj71B830zzX6NC57Y8a73B/JxUo/kpMoPxRmWN/dq9JhTnuq47hBJDevPJhOnAmUG+5/F3qo/uNQHVgPZAG+EbAjPgbrP+WwXrfOb54XJfBOpALAD1nXXe7zOwTf+RZ53l1u3pgnaU7exVYP6lbnRGqMwN1hazZzEygXgWmnwDSlea2N++12jW3vWX9Tz8G5IvFMiO/8ktQW9Zqfw2IDsT/6MXUld58YmSg3i9D9d9Sn6s+4r/6mFeNgWm1GKwD18F6q+GabzWw3rOt56zv2y4zW+GYT7wW27ZXt6sH1nt8GWoF1oF1JvDN+BLRKhkqWTN5quNeDNPf+fCOnGment3+PmNa8R3ur6QAxl+JBZbflLmO1ea5rz6ek0B68+Lqlp+F6YAuUG/Ze7vUZzKrQXVg/QgYwGD9cy0G64AMWAc8DoZ2u8xs9u0ysz0O5tt2NWG1Uq2qY2CYoP0Of9aM0RyFrJtG91QG6iww/QSQrjR2pnlLwPOOrP8dz48sDH8nNkj+KIa1zlifKi9DXfW+A2aQ3vwM019pdl1O6VIf3bcaVB/J2XEsqmNgWi0G68AasN5qOHPOOuBxMIzbZWZ7NNH8dn2e15U5YsVgnQesM4FvQ3W+nNGsmbwMoJ497iU7P6KGuw9Xl/zdj3/sTPOt8fLTgOPY0+H+Tsxw/FkMsPymU2a6qwJ0YM27DKqDdCAGpgO53ek3qQH1mdzR/QzVm07qVgfWzVdv3gbrrQYNsA7kgr1KncIaIPWs7Xq2ZQfrKzxVILjBuuZ8dTbwre4/krEzZ3dWxtiXTKBdBaYzgnSVbnTPb+/J+58+A3clIP5KTJD8UQzrWvVlqGoQHVhzLgzTPyuzO73lawH1mX0N1Zt2vHiVCayv7FZv/gbrrYZzX2Das+2JYD0zm327zGzPWf+2ncF6pGevr+J8dVXvHf6sGTtzRrOA/V3qquNeDNPf+fB3oyvOb98Fz4OO4YcC/t7ECsgf5fWqAc9vOhWiN784VYLpgIF6j3YCdaAWVB/JYYLqAM8YGMBg/V6DwfoV1RrBUWO7zGx2sN6TbbC+37PH12D9ti1HJzyT94g/awZ7DnAOUM/uTmeB6YwgnX3kzN13nQrPbwd6R8ooAPF3YgLlN530EtQd8BxYB9CBdefrBJAO1IHprY4coD6TPVOzofpdjFAdcLd6hLfBusF65HYrPA3C+bZTuBYN1rk9V/lWHwPDBr7V/VkzZrIM1NfmzmZH5EfUcPfhBOkqI2ear2e3P4nzpamMcPxZLOuWsVYV4Dmw9hyuOC/R9TJ1pQM1YDqg16EOaAB1wFDd3eox3lXB+skz1gGDddbsKtsB/HPWDda5PXt83a0+5q0Ovdn8RzJ25gBjUF0JqHtue4zYxs00r3NHzjTv9do9u/1F2txLUxXA+LNYQPlNmWu4C5zftBqgA4boAF9XOlAHpgNnAXWAv0t9NMtQ/ZpWdqsDemNggDVgfUG3OmCwftmTHXyeeCwK1yM7WO/zvOqXB5dVwLrHwKz1ZfJm9GfNGM3Z2aWuBtQ9t50PpJ8M0au8+DQ4hWuG+yuxAfJHMazdbmgO7AHnwPpzv+r8sUN0IA6kAzwwHcjtTp/NPwGoA/u61AFD9R65W/23srvVm6/BemR2z7YagLTGdpnZ7GB9hachOLcnYLA+4svkzeg/kjGSU7lLXQ2oZ3enZ+dH1XH34Rw30/z0APoOeL63t/3tZ9Jch/ujmMH4sxhA+aMyoDmwD5zfpArQm/caVQfpQB2YPluDgXp83q5j29Wt7271T9sbrEMIrAN5MLNnWw1Ayr1dZjY7WO/Zlh2sr/BUgeDZYL0yVD/Fe8R/RwZzl7qBOmfubHZEfkQNdx9OkK40t715r8PbO8H56OfOB3HOcH8lNkh+UxYsf1Q1cH7P0ZzhHg3RAU6QDvDAdCC3O312/wygDhiqz2wPGKpH+/dAdcBg/SZ3rHN7sm+Xmc0O1nuyDdbn/Sp6ulu935fJu9ffUL1PO8e+7Abqhul5NUTVcfeK08kjZ27aAdAXgPO3+nIO4jrcH8UKx5/FAMtv2g3Nb9r5g4sqQAf4ITpQF6QD+d3ps/sbqH/WjtEvo/swQnVgLVhfCdXb9vnd6q0Og/VTO9ZXeJ5YIztYB/jnrEeD9bZtLGCuBsEN1vV8mbwr+I9kANxQXalDXbErfjY7qoaoOppPjE6G6KuxNhE4HxX/DPdXYgLlN2UB85t2/0uFXdeNGkQHuEE6YJi+Yn81oA4Yqj9qB1QH3K3+SQbrTatgeSZY78mvBK0zs9m369mWfc76mrW5up3B+m5PJbDO0lXO0q3OBr2rQPWdo19UutRPBeosMP0UkL4ColcA6Ktf3vpn3sdjWtPh/ixGQP6obFh+U9Z4nwrw/CYViA7wgnSgFkyP8JgB6oBWl/po5s5Z8Ybq18XUrQ5wjIFpdbhjXQGsr/BUyK5UYxWwvsKTHayv8MwGy6eDdRZArdqtXmFuO3OXetuPO2smbyYzM3c2O6qGqDqaD1eX/N1PB6JXgucLx+j8Bu7sYPyVWGD5TZkz8Xf/awVlgA6sgegAN0gHuGA6YKBevUt9Jo8VrLONgAG4wHoPVAcM1nu369222px1Z2tvB/DDdXawvsKzUo09noDOy0sN1jm9Gf2BPZ3q7lKPy5vJnM2dzY7Ij6ghqo67V5yiga4iRC8Az9/kfdRPCmRng+SPYniJbMaYn53HvRKiAzogHeDsSgdqwXTgPKA+k8veqQ7U6VZvGQbrr+uoC9ZXbVsNrK/wVMiuVGMVsN7nedXvvHEwmbAe0AHrPb7V4TcT+D61W50dqhuor82dzY6qIaqO5hMnBZC+CqLvAOi74PnCY7k2UoYZkD+KAZY/Kms+fiV4DqwD6MC6c8TalQ5wwfQoH1WgDuzvUgfcqf6oCt3qvRkrx8AABuurt10B1nvyFYDwqdnsYB3gh+vsYH2FZ6UagTXjYLKBssE6p/cOf9YRMIbqcXkzmbO5s9kR+RE13H1iFAl6DdFv3vLw/E3elxnuDDCdDZTflP1C2Yx12QHQAUP0myJBOmCY/k6KQH0me/fxVutUB87rVgd4wHrvdVEVrAO5c9Z7tq0ErTOzFWqs0rV+4ktMq42DqQjWV/myAGqWHwMY/SuNgGGH6hlw+2SgXhWmR+NjNYi+A6DvhOeLjmfspamsgPxR2bD8UVnrVQGe37TyfJ4E0gEumA7kdqcDmkAd0OhSBwzVV3ert+05XlzaajFYH9m2Glhf4ensvdspvFS3Ste6AgjPrHEFWG/5V7fz7PaV3ixd9r3eI/5VoLpfiBqXN5PJkD2bH1VDRB2PikSv0SBdFaBXneHeMt/qhw6eM4Hym7LXaBc4v0kdoAPrXgDMDNKBejAdMFDfmWuofssZOZ5zutWBdWC98zjLgnXg3DnrKzwVsv0S053bXVOlUSsKNQK5c9azgTIDWGeB30zd6owjYAzV9wN1tcyIbIb8qDpuOg2kr4TPqwE62ctPRzXW4f4sRkj+qGxgDuyH5jftSlUF6EA8RAd4QXqkFwNMBwzUd+w3ukaG6iPHr9mt3moxWL/pZLC+Il8hW6HG08B62/bqdtwgvFoXvMF6rKeiL5P3iP/qbnVWqO4u9bi8mczZ3NnsiPyIGm5iBelKEL3C/PaWtV8fju+HGpYzgPJHZUFzYB84B/b8gLISoAMaEB3gBOmAYXpEftbceEP1x5z1UH0k55Ru9ea9ZgwMkA+LAR2w3rNtNc8Ta2QH6ys82SGzwfpnqYB1BlCt5rvS21CdI2M0RyFrJm8mczZ3Npsh/6YoVMsM0lcAaXWAXmB+OxDV4f4oNkh+UyYsvymjgl0/qCgC9JsM0vvFANMBA/Ue7QTqgKG6MlQHeMD6Kvjcu73KjPWebat5nlhj5g8+p42DqTS6JfMFpgbr9u317fXu9TdU58tRyJrNzMydzY7IB+qDdCWI7vntM5kfxTfD/ZUYYPlNWZXs/pcIygAd4IfoK/wM02NrUAPqgKH6oxihOqAN1j0G5i6DdX5onZm9okb2rnV2sL7Ck91vhafBep4vS+d39jqMeAN8YP10qK7Spa4I1KvA9FNAutLYmea9XkXmtwMrOtxvYoLkj8quKmOEz2p4DmgCdGBN3ZVBOsAzA14VqM/srwDUgVpQve3jbvU38hiYwRqqQXAVT4UaTwPrbdur29UA4QqAuedR9yoUUTjuHs8eXwZIzQK/T+pWZ4Tq7J3jBuprc2ezAS6YzvZS1rtfvBRntzf/PfA8Y3Z7y/14fHfgzgrIH8VSYdbc+x3g/KbVAB3QgugrfKNAOmCY/kondagD/F3qLctQvUcndKv3bm+wbs8q2VlgvWdb9q51dr8Vngbr+z17fBkAOIPvau8V1+29lj6xAu/VxzGSoZKlmBmRDfDAdEaQrgLRPbu9N2/pMf1sAe0soPym7BfF7gTnwB54DqwD6IAORAc4QTrAA9OB3O70iPzqY18AQ3WgH6oD0t3qAMkYmN7tDdZ18itle876p+2uySB8nx8QPw5GBVireFb37fU+rVvdUH0/4DZQ71dFmH7yC1CrzG5vWfvnt7fcj3o9UoYNkD8rG5jftBucA/vgObAWoANrj4UdpAOG6Z90IlAHDNUjcirMVQfOGAPTu73Buk5+JWDes63HwXzarga4PvEFpirAWsXTvr+1Eqwbqq/JGM1RyFLMvKkKTGftSlcYO9N8Pb+9L2+pfrajaxZYflMGNL+pEjwH1h+PAkQHOEE6UAemR9ShBtQBQ/Xf+6yH6sBZ3epAPvi9qSpY79lWJf/U7Giw3pPtrvU9fis8T3yBaUUIzgCq1XyBs8bAMI6ZYQfdp3Spz+RmA3UmmH4aSPf89itZObo8w/2T2CD5ozKB+U07wflNOwA6oAnRAX6QDvDBdMBAHTBQ/6ZdUL3tpz8CBuAC6yzz1QGD9d5t7bl/O+C8rvVMQGq/98rqWlcB1iqe1X2ZxsCcCNVHcjz6JS5vJhOYA+oM3elsMJ0dpCtC9KovQF14XD+pMJ0Blt+UAc2BfeD8JlWAftNpIB3ggulA/riXCA9D9c8ageqjead2q7cMjjEwvdsbrPNsW83TXeu7t7umSuCavUZ3rZ/nqegLrOtYZwPfbP4jGew5u7NG82YzM4E6S3c6I0xXAOmK42fuGfuUNce9Zb/UtQ73V2KC5TdlQfObqsHzXTkrIDqw5nwwdqUDPDAdMFAflaH6b+2A6sBZY2B6tzdY19y2mqdnrUd5XvWrAdfZ/YAzu9azwXI2rM4+/l5f4AywzthJXg2qn9ClnjnyhaE7nQ2ms4N0VYi+C6BnwPMFx/ZDBc6zgflNu8H5TVUAOrAOogP8IB2oC9MBfaAOGKqvyKsE1QGD9U9iAOu926tA6Ir57lrfvd01uct8n1+lrnUVCJ4Nqw3WH+vo06r16PXe4c+asTNnNCsjD8jrUq8C1BlhugJI94tQe7Ly9OI4xzvcn8UCy2/KgubA/rVQB+jAuvPFDNIBw/R3Og2ot9yaUB3gBeuLoTpgsB5Si9K2FfNVYL271j9tVwNcK3Rbu2v9LM9Vvgbr+7wZ/VkzRnNGszLyTgXqFWE6O0hXhejV4Xnw8f3QgfKbMoH5TRlrsytzNUAHdCA6wAvSAS6YHuETsdYnAXXAUP1RZLPVATKoPrK9O9bXblsxv9I4mJ5sd63P+a3wrNK1rrCGKzyVYHW2L8vLS1VfXMoIvSt1xCtk3TS658lAvTJMVwDp6gC90EtQL+b/T3Ed7o9igOU3Zf6gsDNbGaAD69aKGaQD9WA6kNudDswBdWD/2BdgL1Bv+9WC6sB53eq92/dAdcBgfWTbivkK42AA/q71vnW8ul0NcM3uB/B3rZ8K67M9e3xXQeoTwDob9GYF3js64kdydmcB5wF1lu50ljruXrFi75j/7e057nO5YfoN3JlA+U0MHfi7a9gBz4H151sFogO8IB2oBdMBXaDesmtD9RGgDvBC9ZbjbvVPYnh56Urv7G0r5p84DmaFJztozgKZmbCVHayv8FSoMdtzle8qsN4LDbPXYcT7RP9dGSM5u7Myxr5kvIQ1IjsiP6KGqDqaT5xUutFVx8/8ztkLzzNnubf8/wDAjyx07VVWHbvgObDnB5OV63gaSAe4YDpgoK4E1Nu+huotZy1UB7TBOst89ZXe2dtWzPc4mKjtrqkSdGX3O3EcjErXtopnj6/BOqc3o/9IxkjOznnqJ3SpqwN1w/QrXvFShui74HkmNA86xvcjZVhA+U3Z9ewE58C+f22gBtBvOgWkR3pVgOmAHlCfyd05+gUoBdUBd6t/VTYkZqpDBVivyM8E64DHwdiv36/Hk71rvdIPMhU9e3wZwDoL/GYC34wjYFjHzOzMAfZD9awudeXu+Iga7j4xYgbpq0C08viZe85eZc9xB4D/7HxpajYwB/ZD85sqwPOblCA6cAZIBzhgOpDbnd7ytYB629dQ/Z7DB9VH9mEC6yzwe6V39rYV8yuB9RWe7NC1Usd1FbC+wrNSjdmeq3wZwLrqfHUmaL/DfyRjJEdh9IuB+p7siPyIGu4+Z4H0VQB6B2jeCc+zwHlQ6rWXpjLA8puyoDmwf8a9OkAH1h0DM0iP9Is6PwwwHTBQ79FOoA7UgurAekjOBNUBTfjNUofB+jVlzVnv2TYLrLdta4Brdr9osN6yr27HvYYrPLM7wVVmtxusj/n2evf6M46AqdaprgLVDdTzaoiqo/nEKhrmKkL0qjPcgTBoPqN9He43ZQLzmzJeDrtrnXesrwpEB3hBOsAF04GzgfpstqH6cw4nVB/ZRxmss8Dvld7ZEDp7W89Zj/S86scNXRW6mat0rSuAcIUasz17fNXAuuqoFiZoP+LPmjGaMwKtlLrUFTvjZ7Ojaoiqo/nESQGkK0P0nfA8E5rPvBPiX13rcH8UAzAHcqD5TTt/pFAG6MC688QM0gHD9Pd1zB2PWpc6YKj+pBIjYACD9d3eStuq5HsczLft6oBrdr8ssN62rbGGKzxVxsGsgtUr4Hp1sM4GvtX9RzJGcxSguoH63vyIGu4+MWIH6Yoz3Jv/HqydAc8DoPmMfo4a0fKsjFE56gAdWHveVtTO2JUOxK6jgboeUAf4oXrL4uxW3wHimaD66u1ZvKtum+1ZqWvdLzHd61kFrvuc2POdKnetM/j2evf6M0Jv1jEzhuqxmZm5s9lRNUTV0XziwKgCSF8J0XcA9N0YOxOcT5yr/g73R2UD85uyZszv/LHCEH29r2H6tzoiXsyqN/YF0OhSB+pB9V37GKzv9/a2azwzu9Z7tnXXen2/zO7gKmu4wlOh+7/Hs8fXYH2tL5M3o/9IxmjOLrCuAtVPBeqG6Ve8dED6aoi+C2dngfNds+qf9EMDzYHcl7Pu7vTfcayrz60KSAdqw3TAQL3tW79LHdgK1QFisK4+Bmb19izeDDUz1Ouu9UjPq378sLAKGPZImH1+KzyzO8wzu9YN1s/y3uG/K8NQPSZvJjMzdzY7qoaoOppPrNjHzzTPdfC5KkBPAucP+V3HO9fh/kqZ0BzImTG/85hVIfoq7+jzXRWmA/pAve1vqP45rxZUH9mnF6oDBuvs3tnbZuef2rXetr26HTckrQTrPRJm3rMSrM/2VOpaZ+n+ZoHf6mB9R7c6+wgYFaiu2qVuoP7O51yQvgNt7wToWfB818x6vALu2cD8pszZ8pUAOqAH0YE1558RpgMG6r/3n1sLJagO8I+AAfZAcsZudeAcsL6yFm/b56nQtb7C05B03m+F52lwXeGcnNwJ7651Dl8m7wr+AHe3uqH6utzZbIb8qDruPtwwXRGk7wLoGfB8Izj/UMNH/SyFsZnQHMj58WDXiJ7Vx3YqSAcM01fWoQjUgdqd6sAeqD66D9sYmJF9DNa97bOqda33eV71qwMg2f08Emaf3wpPhRp7PFd0rTOAajVfJu8K/kA/VGMfAaMC1Q3UZz1ixDrHvfnFg1tliL4bnmeC88XH+n2kTDY0B/K67nfOt99xjGo/rqxY/6owHTBQBwzVP6kSVAe0x8Cwbc+wLUMdq/LdtX5lW24AqdDRzA7XfU7m/VZ4umvdwJ7Ju9efcb56tW51Q/V1ubPZEfkRNUTVcfeKkxJIXwnRdwL0DHiePdf9phfH/rMNqGePqtn9clh1gA6s+7GFHaQDNWE6YKA+KkP1mP1OGwPDtn1lWL5q2xVgHXDXeoZnFb8ez9Pguq9DXs/Tu9Y9v32f9w5/gLdb3VA9Lm8mMyKbIT+qjrtPnKLB7gpMrA7Rd8PzTHC+6Fj7XpqaDc2B/eAc2HfcO3KUIDrAC9KB2PPF0J3ePAzU+3P5ofroflW61Uf2Ydr+BG+lbVeA9Z58d63H+K3wdNf6Ps8qfis8Feb+AzXBenVfJu8d/ju61Q3Vc/NmMmdzZ7Mj8iNquPvEiB2kK0P0XQA9C5wzzHS/6d81WDvD/Z0yoPlNO49XGaDfdCJIB2rC9OajC9QBQ/XofUb32wHVAS7orb49i7fatpnjYHq2XQPrr25XB0AqHDM7XGf3W+GpcN1UGwejBqrVfJm8d/if3q1eGeDPZM7mzmYz5EfVcfeJA6IqIH0lfN4BmE+a536vIUx9He6PyoTmwP5u+115O0b8qEB0oD5Iv4mhOx0wUB+QO9UftAOqj+xz0vYs3mrbehzMVU8DyF1+7GB9hWcVvxWeCj/yADpd62oAnAV+q3oD7lY3VOfLnc2OyI+o4e4TA0oVQPoqCF0NoGfBc5aZ7gB+jgPnuzOVAfpNCiAdMEy/ImWgDtTvUt+93whUBzhHwIzso7w9i7fatiePg2nbXt3OADLCswpcP/GcnHjMQL2udTUAzgKoVb0Bzm51ZqiukDWTN5M5mzubHZEfUUNUHUAsTFcB6auh8y64fNI890dNHvd4h/ujMme7787e9ZJZRYgO8IN0gA+mN5/87nTgSKAOFIfqAG+3OiNUX52hCu1ZjlFlznrPtn6J6X5PdrAOnAfX2f1WeLprPcezx9dgndMbWA/Wd0BodtBtqL42dzY7Ij+ihpuYYboaSK8yy71l5YlgPE0D7gwvQ82qoQpAB9ZBdOA8kA5wwXTAQD0DqAOG6s9yt7re9mqwvHdblXEwPdu6az3Gb4Vnla71q54KP8qw+63wzOxazwbLahBczZfJG+AD68zd6pWhuoF6Xg0AL0xXAunqs9xbTo4IwDmAoeOPf2lqdXB+kzpAB9ZAdGDNNcDYld686sB0IBeot3xD9U/aBdWB9cB41z4nbc/irQTWe2rIXgMVuHciJGWH61X8VnhW+ls5uWudAVSr+TJ5A2eCdfa56obqa3Nns6NqAOKAOjNMXwFt1SH6boCeDc4TfjB4PVImu+N9NzwH9r0EVhWiA/wgHeCE6YCB+j1fC6jP7LsTqgNnd6vvyGDansW7Kljv2Xbdel3drg4IX+FZCWhWAc2G9TGe0T/ytOyr22l49vgyAPCV40gM1j9tzzkGhjlnNCsjbyZzNnc2O6qG6jBdCaRXmefesnLgOctsd+DSGsR3uD8qA5wD++A5sB6gA3oQHYgH6UBtmA5wAHXAUH3Hfobq+/ZR3p7Fe2UdFcfB9GyrAAtXeCoct8IYjiow/MRru9pIGBVP++7zNljnyFDJmsmbyZzNnc2OyGcD6swwXRGk7wDMGfCcAZxvOO5rL03NAufAXnh+kzpEB84F6c2vJkwH5IE6YKj+VbtGwFTb56TtWbyzwXpvDfkd7le30+geVQBx7HC90hqy+63wrPQvKNq29lTxXeltsK6ZMZozmqWUN5M5mzubfVMEFowA6qfBdMV57veMfRA9E55nj6l51tNa/GyF6VXh+U2rITqgBdIBbpgOGKi/riEPqANaUB3g71Yf3Y8Rku/IYNqexbu3jmy4zrAOlWDhCk+FOdeG6/X9VngqwPVqP+r1eNp33NtgnSNjZ85oVkbeTOZs7mw2UA+os8N0VZC+Cy5nAHQWcB587Nc63F8pA54DewE6oA3RAR2Q3jw5YTpQC6i3OgzVe+Rudf59TtqexTsbrDNsq9KVqQAgFeZcs4PmFZ6n+a3wjL4OW/bV7c71XOXLAsBZutbVwToz8GbOGc2ayZvJzMwFDNQ/e8VKaaZ7814PmncD9Ex4zjCq5kE/aeAc2A/PgT0AHVj/4tlVIB0wTB9V1PVsoJ6zrwJUH92PdR/GDKbtWbx7/zay4XrFrvUeXwVoZrg+77fC037zngpwXeVHvR7f7M9dFt9e75Vd6wbrPBkzWUp5M5mzucA8VK8I1NlhuuJM9+a/T1nwnAmcT6zBeIf7syrDc2A9QAf0IHrzjT8HjDAdMFB/lqH6d7FD9V1ZjBnK26u9xLS3huxts6GJAnBd4VlpFAe73wrP0/yAWi8zzQbL2Z+7Pb4rO8tXeRus62XszBnNysibyZzNrQLUGWG6Slf6Svi8CzDvBugM4Dyx4/5P4J4Bzm/aCdABfYgOnAvSgbowHQhZ/9Qu9dn9DdVj92PdhzGDafvqYL1n++yu9bYtPzRT8TyxW7iK3wpPdj+A/19Q2PO6KvsCPONgDNZr5IxmzeRlZQL5UN1A/YpXvJRB+k6YnAnPWWa8P+rCevwsAey7wTmwB57fpArRmzc/SAc4YTpA0Z0OEHSoZ++/G6oD+6B1xSzGDOXtK4+DWbvt1e00wJECIO3xPLFb+DS/FZ7+kecszx5fBgDOMg6Gac46I/SukqGSlZlpoB5Xx92LF6avgrRVIHoGQGcB55uO/fNImQxwDuyF58B6gA6sP6YVIB3g7koH+GA6YKAesf8oUAf2d6qP7ssMyHftw5jBtH2vN0PXOse211UNHClAZoC/W1hhHU/zW+F54o882Z49vtmfuyy+quNg2EbNsGbszNmdNZo3m5kJ1asB9dNg+koIuwM07wbomfCcYVTNOz2sy89yqL4bngN7ADpgiP4sw/TLOh6oA2dA9dH9mAE+6/Eob68I1nu3rzhrvcf3VBC3ogOTHQwrnOsqfj2ehuv2ZPcF1nWtM8Fvxm7yamD9hDnuM5gvu0u9IlBn7Zi/e67RauC8EypnwXMmcL5gDa6/NDUDnAP74Dmw5xhXQXTgTJAOxMF0oBZQj/BQ61IHakP1nVmMkHxHBgt0BtaB9d5aOLa9LpXuyWqd8JVGcbD7rfCs4lfpRx575kNwhq71U8B6lY51Q/WYPCAPqlcB6ifAdEWQvgsw7wboDOCcZVzNg35KzT5/1q5jWwnRAR2QDhim90gdqAOG6qv2Ywb4rMfDtP1KsA54HMx923O71ld4VhoJs8Kzit8KT8P1eb8Vnio/FPb4Kn1HADxz1ldD49P8RzJ25ihk3XQqVK8I1BVgujJI3wmTM+E5GzSfXIvrHe7P2g3PgToAHVgH0QENkA5QwnTAQP2XZoA6kAPVDeN19mHMWLk9C1jv3V4JrmfDmGzAY7ge43ma3wrPLLhe6V9Q2PO6GHwVu9bVwffpYF2lW30Un2WOfjFQf+UTB0Kjge4qSFsFomcAdBZwntx5/5MCzm/a2V2vDtGBNSAd4O5KB+rC9AifiDqyutQBQ3XVLIP1zzJYf972urKhSXatnrce47fCs4rfCk92uK5w3azw9GfkdTGA9VZHz7aa3oz+rBkqWYBmp3o2VGcZPdN8zoLpKyHsDtC8GyJnw3OGcTWvdGFdxjvcn7V7xvsOgA7oQnSAvysd4ITpAA9Qj/BQ7FLP2ldhv0qjYxgzTpizvtL7dBij4lkJrisATXa/FZ6G6/N+9qwL11Xht8H6mozRnN1Zp0F1li51ljruXnFSgumrofMuqJwFz9mg+aZ1eA3cM16QugugA+shOqAH0gFqmA4U7E6P8pkF6sBZUH1mX2ZAXm0ftu3dtf687XUxQBMVcJQJ1z2K4wy/FZ6G6/N+9sz/nqgO11W9d2UYrN+VMQJGFapXA+rs3ekroO1K8FoVorPA8+zO+2/6z8qXpu4E6IA+RAd0QDrAC9OBOBAe5ZU99gXIG/2iuK/CfpX2Ydt+JVgH1gHwld5q0OTkbvhKcH2FZxW/FZ6G6/v87Jn72as4EsZd63H+rBmjOaNZwH6wbqheo46717kwfQdw3gmTMwE6KzQPqur6SJmKAP0mVZAOSMB0gBSoM/lkd6kDeWB8Zn8VIM+exQjWd2S4a/3V9j3bGq4brsf4rfBk91vhWQWut+yr2533o0dFzx5fd63re4/4n5wxk6XUrT4L3rKA/mx2VA13nxixjqG5e+rB9OoQnQmek1TyU26Uy02rITqgB9KBc2B6pFeUT3aXOmCoXmk/1n0YM9y1/mrbPlUE1qt8Ddf3e54IXdnhOuD5/xF+FT17fA3Xx7zZusrZ/Ecy2HPcrb42dzY7qoaoOpoPL1BfAW4rgPTdED0boJNA87e68Pk599LUnQAd0IfowDqQDvDDdMBA/Zsyu9Sz968O5NmzDNa/iwWW93v3+OZDiGzf7FoN1/f6rfCs4gfkjYY58TxX9OzxrQzXWX7kZvIe8WfN2JmjAtZPheoG6le8DNN/++9TFkBnBOcz/4qnQ3fgvhueA3sAOrAeogNyIB0gh+mRfpWAOnAmVJ/ZV2E/9iy2OesAF1znAvE922pBiGzAowKNFI79xBrZ/QDD9Z1+FT17fLM/J1sN18XwvcbkXcF/JGNnjsE6Z+5sdlQNUXXcveIUDXVXAekKID0DoLPA803AfFY/oaB9F0AH9CE6oAPSAV6YHukVAdSB/C71WQ9FIK+SyQzjWY+HCaz3bq/Ytb7S29DomjI711d4VgKa7H49nobr+/zsabi+0rfXu9efDayzZozmjCAjQ/W1ubPZUTVE1dF8YnQ6TK8I0bOxNSs4DzrX70fK7ITnN+2A6IBB+rNOgOkAT4c6kAu1s/evDtVH9zt9H2Ww3ru9u9Y1favBdQWgucKT3W+Fp+H6vN8KTxUQng3X1cD6Sm82+K3uP5IxmlMZrJ8K1Q3Ur/jFyiD9u7LwNRs43zmq54N+loL1XQAdkIXoN0l0pa/wrNadflN2l/rs/orZlfertM9JYL13e8UHeiXfitBIAcIpAM1Kx8wO11d4Vvp7qejprvWzvHf4s2YA/GA9A3AbqnN0yzefOChqmF4XojPAcxJg/lVfroHrL03dCc+B9QD9poUgfVlHOnAWTAcM1KvtX73LnT3LYJ1h+55ta3fhZQK9Ht8V0EgFmFUCmobrn3Kv6cTzbE/DdcXvYibvXRnVwLq71dfmzmZH5EfUcPc5B6irg/SdSDsToLOC800/lvyUGePyKMVu9JtWgPQVvowwHagF1CM8FLvcVaD66H7MWYwvLwW4YPlKsN621wIFVX0N1+P8Vniy+63wZIfrKzwr1bjCs+LnZMu/ruw1WOnL5F3BfyQDMFiPypvJnM2dzY7Ij6ghqo67V5xUYPpqyLoLa2cBdCZ4vutfF0zqeof7s3YCdGA5RAcM0pd5Gqiv81DeXwGO796PeZ+Rv+OTutZZwPpKbyXfFSNhAMP1DM8TAanh+rzfCk+Fa2eVZzZc93cmp/cO/10ZBuu5eTOZEdkM+RE13H3iZJi+B6TvhugM8Jwdmges0R24FwTowGKIDmiB9BW+jDAd4IDhLB4nQfWMTGZIbrC+avuebTm62Vge5jPBFlAPrhs+xvit8GSH621b7vPi6zvHsypcr/ydOeJ9oj9QE6yf1K1uqP7oEyMFoL4KzFYD6VkQnRGcM/ygAOAnFLRXgejAOuC90psZpgNc3elsPidC9Zl9K+9nsM60fZ+qP8yr+UaDzJZ/XQZ7e/1WeLLD9ZZ9dTvD9Vm/FZ7Zn5WG6zwAXNV7xH9HhsH6XNZM3kzmbO5sdkR+RA1RdTQfbqCuCNN3QfQMaMwEz0mg+SW9WbfPI2U2AXRgE0QHDNJvYoXpQOzxMsBwBg8Deb79TgbrI/swwXWmB1c1AGG4nudpoLnXL/NfU7Cv4QrPU6/vHk/DdZ7vepYf6Cv4A9xw3WCdL3c2O6qGqDqaD1fX/N3PMP2VdoJkBoDODM4Xr8/PSqheAqKv9leA6UBtoB7lo9ylPru/2r7VXnYK7AHrACMo5+la7/Vn8VbzNVzP8VQAmgowkx2ur/CsVOMKz4pwvW2bd/wrfVd6M92jMPobrI9nZeTNZM7mzmZH5EfUcPeJg4KR8HMFrFyFP1eD9FMgOhs8Z/hB4YO6X5q6DaID60H66oxV3szd6YCB+koP5f0Vus5373cyWN+RwdJZrurN4JvZJdy2tWeUZyVAmgXX27Y+L7v8lDxP/iFype9Kbyb4vRqsj2T0IhRmsK6QNZM3kzmbO5vNkB9Vx90nTipAvQpMz4DHLACdHJz/T53r9QMUhOirc1Z6s3enA3WBepRPtociVJ/ZV2G/0axTx8GMZfRsy9OxxeLN4KsC13t8q3meWGPmdenzstdT4YcPoOZnZY9v9e/NXu9ef6Z7oJsqwfWdsPsksJ4N1SNqiKqj+cQpEnSqdafvgM27QXI2QGcG5wlr8xMG2yuA9NX+p8H0aL9KUD27hqz6qwN5ZrAOsIJynq711f4s3gy+2eMOqoFwFU+FGt29vs9vhafC9d3jeTpcr+7b693rz3QPBOwZCVMRrCvlzWTO5s5mR+RH1BBVx90rFoBG49SV3emrAesuuJwJ0dkAevYPCp16P1JmF0DflaUG0oF4mA4YqO/0yYbqs/ur7auwHytYH9mHrWu9ba/50MribbieCwpVPCvVmDmG40TQXOnaWeWZeU22bQ3tFb+Te713+J8O1w3WOXNns6NqiKrj7sXbpa7YnV4ZpLMAdCVwPrFmP0tBtDpIB86G6dGeLCA80ofBw53ufPtVAusj+yiD9dX+LN5V4Xrblh/qneyp0C2cNRpmhSe73wpPlb+Zit3rDN9Dit/JTN4j/r0ognUkTOVxMBmAW71bnQmqnwbU1WH6bpicDdGZ4Xny2nS/NHULRN+RswqkA2tgOhC/Jsx+huo8+yvB8dF9d46CAeqMgxnZhwl+9/oz1c4AH06H6z2+p8JHj4aJ8ax0rhWuHcP1PE8W31O8R/xXw3V3re/PysycyY3Ins2PqiGijptOBuqr4epOsJwFitngefaPCZNa2+H+rB1ZK0E6oAPTV3gaqHN7qEH1mX1377erYx3YBb35wHrb54wHVhZvw3UdCFUNPmbBdcAvNmX0W+GZ/XcY/YNPy7+6Xf53RnatK71Zat7hb7jOmTOaNZM3kzmbO5vNkB9Vx02RCDIaqBumf8rYD49ZALoSOA9cs/4O91cySP8sw/R8LwYgHuHh8TFr96sG1kf2Gcvo3Z7rgfIEcF8Vrvf42jPH06NhYjwVzkulGns8Ddft2+vb693rzwbWgfXrM5KxM2d31mheVuZs7mx2VA1RdQBxUF0BqK+EsRVBejZEZ4bn2WvzpNfAfWfXu0H6Hl9GCB7pxeJzMpQ3WH8tVrA+sg9b13qvv/LDsOF6n282fKnm6dEwMZ6VzrXCtXPyaJjsz2D7cnoD53atVx4HY7C+Nz+iBoC3S10JqK+GqzvBchYoZoPnZMC8W0/ruWakzGqIDqwF6cC5MD3ajwWER/oweKiB8Zl9FearA7yQfHyf3u15wPpqf1Vvw3UdsFXNUwFmngiaXeN3Vetez/68rO7L5A3UgOusAH80ZzQrI28mMzN3NjuqBuCcLvUV0LYKTM8AyAwQXQmcL1yv6yNldkB0QBekr/Jm92T0YgDiER6K42Nm9jVYz9qnd3u+LiTVh2HD9bW+2bUqwMwVnpkws9K5cY0xfj2e1eB6j2/25yWL70rv0+A64/3iSIZK1kxeVuZMLks+wAnVTwfqO4DzTqCcDdCZ4Xn22nzRTxmQDhimr/A0UOf2UOtWn9nXYH12n34xgW91f8P1tb7ZtdrzuxRgpgIUPrHGFZ6Z/5qibXvuZxuL70pvw3XNjJ05o1kzeWqZEdkR+UB9qK4E1FcD1hNAOhtAJ4fmX/VhPedemroDogN6IH2lt4H6Xh8GD8VO95ncqmB9ZxbjQwwT/O71Z/I2XO/zza61mmel7nWFdXSNMX6Axg8+2Z72HfNd7a0O15mhd2W4brA+pgjsFwXVTwTqVWB6BkBmgOhK4HzTer0G7hVA+mp/BZge7cnoxQDEIzxUobzBem4WY9f6jgymB2GWh2zD9T5fFc8eX4/i4PRb4VmpxhWeJ3evZ38OVfdd7W24zpGhkpWRN5MZkR2Rb6h+1S9eK4HsDoC6GyhnQ3RmgJ69Nhf1swSuK4P0lf7M3enRfkxAPcpHGarP7r97DAywF6yP7rc3q3d7vq71HRknPGQbrq/zted3ZcJMhR8qKtWoAuwVfvDJ9lzly1DrSu+V9yyG6xwZozkKWbOZmbmz2QAPWGeF6kpAvRJMzwTFbABdBJp/1Jc17Rspsxp078hQgekrPFn9mHwYPBRHyFQH66P7sXatj+Qod62z+a+C662Onm3zQYkK1DnZUwFmKsDRSjWu8FQYV9S21eg0z/7MVPRd6W24zpExmqOQNZM3kzmbO5sNzIN1tm71E6H6avBaGaazQHQ1eL5p3X7+DVsP0nfkKMH0Fb7MfobqPPtnjIEBDNZf7zeyD98DDNuDHpP/yodsNZiR7WvPa6oEMyuB5lNrzPwXFW3bWp49vkqf76u9mUbDsN0TsWbszBnNUsqbyYzIBjjA+glQXa1LvSpMzwbp7AA9e30u6ocCPmZnqAD1aM/KXlU8TgLrgAokr9O1PpLD9qDH5M/ygL1qTRhAhqFWrGcmzFTo5HaNMX49ngr/oqKip33HvU/rXmf8V5UjGaM5ClmzmZm5gMH6a59YKXWp7wCrO+FyJihmhOgi4Pyr3qzt55EyFTrf1bxP6FKP8mLpmlfe/wSwProfc9f6rhz1h7zV/itgZaujT2owoyKAUvFUgJmndnKfWCNQ619U9Hj2+GZ/vtn3t1Z997da+sR2X8SaMZqjkDWTN5M5mwvUAeus3erRIFMVqO+CzFngmAmiK8LzxesX0+H+TqojZFZ5s3syejH5GKz3SwGsj+5XCayP5LA95KnC9VZLz7b5wKGqb0XPSjBToZPbNX5W5r+oaNvW8uzxVfos7vVd6c3Uvc52X8SasTNnNCsjbyZzNtdg/dknTobqe4BzBkhmAOkKAJ1hnT6o76WpN3mEzD7fEzremXyUoTpgsL5uv5F96jxUsD3kGa7P+a70VoFPPb4qnpkwUwE0r/B0jZ+l8C8qKnrad8wX4Pn+b9uf5T+SsTNnNEspbyYTMFiPruPuxQ/VV4HZajA9Gw6zAvTsdVmhf9f65+m/9AiZ3d4nAPVILwaoHuGR1a0OGKx/3m9kH95/osqYwebP8nDN0s2n5Jtda7anAsxUAM0rPE+tUeGarOjZ45v9ubnSt9d7Zfe6+r0R4/3jSMZojkJWZiYwB9dnwToLVG8+MWKH6qpA/QSYzgbRVeH5onWMHSnjETJ6nkwgPNKLwUO1Wx3YD7p3Z7LffDN2re/IYILrAM/DNQPIqOqr4pnZvd62NWje6alybqIBu8rfo4qnfe9a+f3faunZluve6HS47q71zzJYjwWIkVDPUH0PbM4AyCwQXQWes6zXC10bKeMRMvt8WYF6pBeTTzZUB/TGwGTty961zgzxGTMM1+e9WboEVTo7e3yzaz21U1gBNCvUuMIz80cflb/x7M+NHl+lWke8Wf71WtueY012ZTDfd1eH66rjYKqB9dOgujJQ3w2Us8EwM0DPXptFWvvS1JsUYfoq7xOAeqQXA1SP8FAcA5O1706w3vbjvcmv0nVkuM7tXdU3u9ZqozhOBc2VauzxVLgmK3r2+GZ/xrH4AusAOxucZvNnzRjNUciazQS0u9YrgnXWDvq75xpV607PAsaMEF0dngeu6dhLU2/yCJn1vsx+hup/SnEMTNa+VcH6zizD9e9S/WfhDBCjqm92rZmdwm1bg+YIz0o1Ah4PE+nZ45v9eaToy9K9znZ/xAi+We+HVbJm8gCDdZbO+eYTq2igqQjVd4Hm00G6GjxnWbd/9Rq4u+t9n+8JQD3Ki8Ujs1sdMFj/vh/3TTdrDmOG4fqc70rv08FTte71FZ4qoPnU464E2LM/O5Q+j7Jr7fUFzuheZ7wHY2zSGMlQyZrJA/LgusH6s0+cFKC6OlDfDZMZYLACQGdYpyCtGSmj2vmushZsILyqj8H6zszR/dy1PpLDmMEE19v2HKBBDY4o+ap0r7f8q9udCUd93J+V+a8qVABzxc8jRd+V9wIs39UV/EcyKuaMZs3kqXatG6y/8zkXqlcD6pmQmBWiFwLnAIbWuX+kjCpMX+Wt4MkEsdl8lKH67P45UH40r2Y3CyP43pFxClxf6V0Z3CvVmgky27b8ngbs+z0N2Ot4Vvdd9SNlq6NnW03vXRnM0Jv9Xn8m79SudRawztqtbqj+6L8HNGeBYyaQrgrPk9fwDtw9RmafLytQZ/RigOqAwXr/fuNi71ofzWME3zsyRlb4lH8SrgZGqvpmw6FTobCPO87TgL2OZ3Xf7H8FdN+ew5vRnzVjZ85o1kyeIlw3WH/24e5WXwEeKwD1DJDMAtJVIDrLeg0obqSMaue7AlCP9mT0Mlif3z9v39H9+G+AmW/sGeE64O51du+qvtlwqNr89RWeBuxxnr4u63iu8mWoFeAA7OoAnM1/JGMkh/1+fzQLMFzPyI+qo3nwdqurQfWKQD0bDjND9Oy12aGnI7w2Uka5+13Fl9mPCaoD+WNgAIP1/n0N12eyWI9HGa73+rN4Vwb3DLUaZHJ7ngrt3cFex3OVL0OtgAH7Tu9dGe5en8syXN+fH1HD3YcH9P/24+6m/+29FrLuBMyZwJgRpFcB6AuPogF31e70ld7sXerRftWgOqAP1mf3d9d6bJ7hep+U4XqvP4u3fft9Ddi5PQ3Yv8uAndtzlS9DrSvvC1gAO9O9xg7/kYyRHPaGmtEsYBzcKMJ1hm7xymBdqVu9ClTPAMhMIF0VoBNXHTNSRhGor/I+AapHehmsz+/vrvX4PPYsj4aJ9T/B275NBuzcnirjhlasZfS1qXLc1c55jyeDrwH7Pm9Gf9aMnTmjWYDh+q7sqBqi6mg+cVLpVl8JY6sCdRaYrgDS+Ssc0veRMu5+1/Jk9IqA6oDB+nz26H77b87YgffOLMN1LX93xvP4rugSbvlXt7NnlJ+K56mA3Z71fmC4KftztG3L8Z3d613BfySjYg5guL4rOyI/qo7mEScFsK4O1U8C6swQnbeyGHV8rse9NPWdDNTjPQ3VP4vhX23kgvmZ3L1d67sz2bMqwHWA64GR6WGXpW6GmrO7Lw3Y63ieCthb/tXt+Neyoucq35XfU9mfo21bju+/Xu8T/UcyduaMZgH7Abvhel4Nd58YnQzWVwPonZDZMP0urmrGNfMD6KCuvTT1ndRg+krvE6A6YLDOtL/ajZnCCBr2rJWdaTe5e53fW63m7PEwQG6XcI+vPeP8VngasNuzku/KH+AZ1qHXu9ef7X6GNYM9BzBc35HNkB9Vx92HG6wbqn/L2S8mkM5TyXUlAPNZvQfu7nzX8qwK1QGDdcWbMsP1+SzDdS1/Fu/KvgbsZ3oasF/Zjn8tsz17fBk+77L/FRBgwL7Te4c/a8ZozmiW4fra3NnsqBqi6mg+54H1lTC6KlBngOn5FVyTIDgf1bqRMmpAfYUvs5+h+hqPrDnrbV+drvXRTPaRMLuyGOF620f3YfQEbzVfA3Z7Rnlmdgd7fFGcX49njy/D553S52ir4bpYADvTPQej/0gGew6w5575USrPVBG5s9lRNUTV0Xy4uujvfobqf2bsVSZQZ8bTleF5wDU2NlJGtfudvUs92i8KqgMG6/H7z+yrdUOmMIKGPWtlV9pN6g+Kqg/pBvd3GbDbM8rTgN2euz1ZfA3Y93hX8B/JGMlx9/qr/bSe5SKyI/Ijarj7xEHCSPCrBNZXA+idQN0w/a4KAD1jXNAbvQbuit3pq7zZPatC9Sgfg3WNXHbgvTOrAlxfncH0kMvizbLeKiM42rYaIM+ecX49npk/AFU7P9mePb7ZnyG9vgbse7wr+LNmjOYAe7vXlZqWZjIjsiPyI2qIquPuFSf2MTV33xpQPQuos6BrRYhOBM1nFD9SRg2or/CN9mOE6gAHEGfxyLwpOeEGsCrI3/GgwNi9xOTPVDsDvFm1HkpQKHtt7Rnn1+Op8i8sFM5PtmePb/bfe6+v0mdpry+T94n+Ixk7c6p3r6s+S85mR9UQVUfziZPB+h5wmgHUGRC2CkgvAs9H1D9SxkB9jWdlqB7lw+ChejNUvWt9d57hOk8G0wMuizeDb1UopOLZ41vt2DP/lYUKED7Zs8c3+28TqPtZyua92p/t3mk0Y2eOQvf6Sc91EdkR+VF13H1ipADWDdX7lImymUF6dXgefJ39CdwVgfoqb2aoDtQcARPlowzWZ/MN12PzmB8SGP9pMNMD7mp/lh8GGGquCoWy19aecX6Axhz2auenx7PHt+LfJqD3WbrSm+n+oML9E2sG4O71FZmzubPZEfkRNdx94sQ6pubuuQbWVoPqWUibEaZXgehZo4M+KHakjBpQX+HLCtUBLiAe5ZMN1pvHWd0Nhuu/teshocqDFdMD6AnehkJ9vtlra89YzxXXqUr3fjXPHt/sz5Ee31Wfpa2Gnm3zP6eZvE/0H8kYzdkJ2N29vj43Ij+qhog67j7cXeuqYH0XFDVQ1wTphNB8Rn0jZVYC9VX+KzxPgOqRXgxgvXnM7n9W1/pMrgJcH81z97r9mb0N2Nf5Ztdqz2uK7mJXuZZO9uzxzf4cAWp/nq70Vr5H2OHPmgHUBuyKz3ez2Qz5UXXcfXi71hXBemWozgDUlUB6MYDeqxojZRSgOmCwvs9jXqo3QIbrsXmscL3tY8Ce6c/izQCEgNwRHD2eq3yzaz3ZMxqwt+yr29mT3bPHl+HzlAGwr/Rmgfe93oz+rBlA7fnrioA9G65H1BBVR/M5C66rg/XduDYbqLPD9JMAesC5iB0pA2hBdYC7Wx0wWP/sMa/sGxC1Gy+Vm9rRvEqAnTFD2Z/Fe+WaqEGhkyFexTVVGBPTtrVnhmePb/bfPFD387TX9xTvEf8dGcyAHXAH+4rMmVyW/Iga7j5cnfR3P4P1R+1EuAbqr1UNpJOsc99IGWAd+F7pbaie68UA17O/+BXH0ShlMnffVOle35HB5M/ibcC+1je7VhXPHl+FMTEt/+p29oz07PFV+vus/Hl6incF/10ZBuxxeTOZs7mz2RH5ETXcfQzX531rgPVMqE4Cev8nZZDOtpYDqjFSBuAfAQNwwvBILwaw3jy0bzyUQHdGZsUHA9YHKvUHT0XAvtK7MhDKhm0qsFHF013sZ3r2+Gb/fQL+TN3hzXSfwOg/kjGaw9yo0vbjfx6ZzZzNnc1myI+q4+4Tq2i4qQjWq0N1FgisCNJZ1m6jdEbKrADqADdUj/YzWI+vQ/FmqzpcB7gfClgfphgfClke+FW9DYPW+WbXWtHTXez2zPD1Z+p6b6b7Eab7EOYMgPteuu2n8Uwyk5mZO5sdVUNUHc0nTgbrdcE6AxRWgekMa5WtL+eKa6SMClQHOEE4oxfLr9jZNxyKN1oqN7LuXq+RwfRA3uvP4t17hpSAEAMIUqlVxdNd7Gd69vhm/y0BtSE7C6hm+j6v4D+SARiwZ+fNZEZkM+RH1HD3iZMCXFcH66dBdWagnr02O7Rx/V8Dd0P1phO61aO8WMB688itwXB9Xabh+r4c9QdOFgi+2tswqM/39FqzPd3FXsezxzf7b6nHt/Jn6kpvpu9FpvuFEf9dGQbsuXkzmbO5s9lRNTDVcfc6F65XA+uZ4JgRqFcC6Yzr+0bxI2VWAXXgPKge6RcL6CM88r9cs3/FN1z/rl0PAwAv+B7JYcxQ9mfyrgyDsn2za614/O5i5/fs8VXx7PX152q/L5N3r3+Fe6qRDAP23LyZzNnc2eyI/Iga7j4xioR2But3nQDV2YCvKkxnW8cF6h8pAxiqR3syejF9kTF8wRuur8909/q+HMYMJn8mb4MgPd/sWrM93cWe49njq+LZ42vAvt6b6buR6Z5hh/9IhgF7bt5M5mzubDZDflQdd5/z4HoVsJ4Bk1lAsBpIZ1k3Ir0G7mpAHeCH6tF+FcF688mvI+vGKjN7980rO1xv+xmwr8xg8meqnQEEte3rAUEG3+xa3cUe69nje7Jnj6+72Hl8T/He4b8rgxmyG7Bz5s5mR9UQVUfz4YXril3rO8D6qVBdAagzrFOmJs/RzxK4bqjO6cf0JcbwxZ4N99Vu6BTgOsD/EMDYBcWYYcD+p3r/JhiAjZJvdq1Kx1+ti73H92TPHt/s8wTkXqdtWw6gzPL9yPTdW8EfMGDPzMvKnMllyY+q4+7DVc/dTwuuVwTr2bCYGahnr81qkaz92EgZQAuqr/Bl9Yu8sKqA9Yg6DNevy93r/DmMGUz+LIAd0ANB9s2vVaWLveVf3c6ekZ49vkp/T9nXads233elN9N3O9N9w4j/SAaw9j7iJvb769GsmbyZzMzc2eyI/Iga7j6c3etqI2FWw/WdsDMTHJNA3T9UCaazrvFFfQbup0P1aM94SB/lw/OrMMMX+mlwfSbX3evzWYzwe0cGk//q2tUg0ErvbN/Ta82GlypAWMWzxzf72s/uYgc4Pl/VfJm8T/QH6nWxG7CvzZ3NjsiPqCGqjrtXnJTgusF6RC6XlGE621pu0I+h+gJPg/UrHvpf5Jk3M0pwHeDvrhnNM2A/w58FsLdaerbNhzVVfavWenXLbHBpz1jPHl+Gaz/7x6C2ff46sIBwVe8R/x0Z1QD7aJZS3kxmZm5EPlsdzSdW0YDTcP1bzn6gzASC1YA609oRanykzE0G65F+UT5cvwYzfIln16B4A6fQvd7244b5jA9/IzlMAJzNv/eMZcPK1d7Zvkq19viu8MzuDlY5/xXPfY9v9vEDOj8GrfRd6c3yg0OvN6P/SEY1yG7Avi53NpshP6qOu0+cDNfrgnUWMKwC1FnWK1uT5+sacFeC6it8WcF68+L6osoG2ww1KI6k2d29DnBD79GckSzW42F7gGXzdxe7fUd9s2s1ZM/z7PH1dXpdhuxj3obsef4A7yx29n8pOpqlmBmRzZAfVcfdJ04KgF0drp8I1pmhOsP67BLBebgD91Xwe5W3gifrlwFL13rzya8jswZ3r3/br+ZNP2N3FWMGC5QA3MVu3z2eq3xXwMvs4z/Zs8c3+zz1+Bqyr/c2ZM/zB2pBdoV7+5m8mczM3NnsqBqY6rh7Ga6v1ElgnQDkvlVFoM683l/0EwaZVbrVV3hGX9QsMPvuw1NP9i/0ijdP1bvXR/OYs1gfltgeXt3FPue70lvJt2qthux5nj2+Kp6rfA3Z+31P8a7gP3KnaciulzeTOZs7mx1VA1Mdd69zAXsluG6w/ltVgDrj2i5Q/wx3g/VIP66u9ebD80WpDNdn8zO61wED9sysHZ1VbA+uqzNUu9jb9loAaNV5ZKg327ciZFdaU5Xjz17TXt+rWxqyn+Vdwd+QXQewz2Rm5s5mR+RH1BBVx90rjq0Yrj/67wGjWSCZDfwqA3W2tSTQa+CuBNVX+J4A1psXx5ckQx3ZNy0ZgH3mS9+AfT6LDU6P+O/IYHowZuliX+lt33W+2eASMGSP9uzxPf34AUP21d4sNa/2Z2wG2AHZK94Lj2Zl5M1kzubOZkfkR9QQVcfdixewG65/y9kvJhisBtWZ1k5McSNlHqUC1ptnpFfshcjSKX73ifDI/6LOvmFx97p+XhXAviODzd9d7Pu87WtwmX2uVDx7fLPXtMd31Q+aSmuw2pul5l5/tnuJkYyV9xM3Md+j7s4azcvKzMydzY6qIaqOu5cBe6x3TbjOAodVoDrLemVr0fnqHylz06lQ/e5Zt2u9+UR4cHxJZ96wqMF1QAN4784zYOfKcBf7nLdrXue50teQvZZnj2/2mvb4GrKv91YF4Ybs18R8n6qQNZM3kzmbO5vNkB9Vx93nLMCuDtdPBOvsUJ1hjXaK8Hx8B+4G6wbr13wM12fzDdjj89izGEe47MhgAuyAZhf7Sm/78vgastfy7PHNXtNeX5VrlcXX3vPeO/wBQ3aF+/2ZvJnMzNzZ7KgaoupoPjF8QwGuA+vgoOF6dDavqgJ15jXvVAPuq6D6Km92sN78Ir0M11fUYbjeuy//DTd7FiP83pGxeq1WPwyzwAjF7kr79vkqdQdnr2v2uTr9+AFD9tXeLN89TN47/AFDdvZ/RTqTl5XJkD2bH1lH8+HsYHf3+rP/Phms/1YloM64vhsUM8N9XRf8Cs/6YL15RflwfCkrw3VAD7Cr3PyyZ7HmMGYwdbEDhjMz3vbt882G7NnH3+ObXevpxw8Ysq/2ZvkcZ/Le4Q9wQnZ28O0u9rW5s9kR+RE13H0M2Od9Ddfnc7mkDtXZ1pNMfTPcDdYj/ep1rTefCI/8G4Os7nVAC7DvvgE1YN+Xw/YwzATY2/YcIKI6UFLyXfWvMgzZDdmjPXt9s6/Xtm38+erx7fVW/I5g8t7hD9SB7Oz3x6NZM3kzmZm5s9kR+RE1RNVxkwH7Cu99yoDKTCBYEaozrZ+wXgP3k8F684z0qtm13nwiPPJvCE7rXm/77r3pVchjzjo1gwmys8Dqld4sYEbJ19Ayf10rHn+PryF7fV8m715/xn+BxwjZ2z61ckazMvJmMmdzZ7Mj8iNquPvEcIYTAXuV7vWT4boSWGdZM1YFncuYkTLPUvkAYu1ab15RPobrkTUodq+3fTVueBXyWB+IGDNWd5sxdbH3+rN4u+Z+36rQMntde3yza80+/h7fqterou8p3oz+huyG7JGZs7mz2RH5ETVE1QFwA3Z3r3/K2QuYGSCxClRnWKtMkZynvpEyzzJYj/KL8uGB680n90Ykooas7nXAgJ0lr9LDEGMGUxd7256ji5DFmwX4KPkqQUuldc2uNfv4V/kqXa8sviu9WT5ze70Z/Q3ZDdkjM2dzZ7MZ8qPquOk0yK4O2A3X+cSwRjvEfh4+6BpwV/pVjxmuR9ZWDa43j9waTuten8ndnckO2HdlVckwZOf2ZgE+Sr6Glvm1Zh9/jy9Drb5meXxP8d7hP3IHt/oeo+1jyJ6VN5M5m5udPZsfWQdgwB7rvV47IWc2OGYGutlrs0rMax6o38D9ZLDePA3Xr/nU+PI3YN+Tu/PGmj2LscN8VwYTZGcCBCzehuxjviugpdIauFatWg3Z1/rae957xB/guse478N7b6mQNZM3k5mZO5sdVUNUHQAvZGdnSX96r9VJ3euMoLcaVGdc4yT9UILhP32j/Xh/EWWE/lW++A3Y9+QqdMowZ7E+1LF1mSl34Z3gXdlXCVoqra1rzb9ms6/Xlb4rvVlq7vVX/h4F6oyMYW8W2Z01mpeVOZs7mx1VQ1QdQBxkP7mLvRJgN1y/qwJYZ1tTco3PcD+xa735RXoZrq+sQxWwt/11bjQVAPtoHiv83pXD9vCrDAdO8K7sqwTZe3wZ1rZirT2+vmY1fVd6M33XMX2PAh4Zww7ZlfJmMhmyZ/Mj6wA4IbsCW7r7rtUJgJ0NBKvCdbZ1FNd34K70ocIM15tfTH1McL35RHjkdq8DmoB9Jlsps1oX+66cHRmG7Gd7V6/ZncE1gbhSrb2+K0A7wxqs9K5ec683oz9jNzvrvZ9K1kzeTGZm7mx2RH5EDYBHxcT4rtUucJoBlpmgsCJYZ1q/KnpzHfw8bKD1QWK4PuLF8Ut688kF7LNf7AbsazMrdrGPZDF2sQOG7JH+LN6KEEkJWLYariu7OzrbV6nWHl9fs5q+K71P+T4a8Xc3O3c3u9K/qp3JnM2dzY7Ij6gBOAeyq3Gxu78B+2opwXWWNWPWhvP5E3oiVH5RZP5QZgHad58Ij/wv+BMB+0xuZcA+mlfpAcuQPdefxduQ/S4DS61ae3yVau31VfoXGCu9GXxP8d7h72729TmjWUp5M5mzubPZEfkRNQCG7PO+61QVsLOAYhW4zrJe2SI9X2Mz3FW61ptntB/fFwUTXG8++V/uBux1M9kfRnY8YO16iDNkj/M/wVvNF6g7fkPJ17X6x6HVviu9WT5/mbxH/AG+e462D+d94M6c0ayZPLXMiOyI/IgaAEP2Oc91MmBfWQO3GNYoQ+zn5YKuzHBfo+iLhr27PrI+JsDO8qVuwL4vV+VmvdoDz46ckZU2ZD/bWxF6VQWWDGub7atUa69v1etW0fcU7x3+7mZfnzOapZQ3k5mZO5sdVQPACdnZ+c7dc512gNbTADs7xM1enx1iPweBepzhvkYqH2qG61d9OL7QDdj35apksmex5hiy5/qzeCsCfMNKDl+lWhl8fd2u9V3pzfI52evN6O9uds6c0ayMvJnM2dzZbIb8m6IICCtkXwUvldjZnxl7lQ2QWeFu9rqsEut6J+mHEg7/9oxXdbjevCI8OL7II77ADdj35CrcpLNnMf6zbcCQPdL/BG8G35VAx+A631ep1l5fg/a1viu9mb6bmL73AM5u9rZPHZg/mqWUN5OZmTubHVUDwAfZAV6mcvdcp2qQPRMks8LeanCddZ0JNTrDXeOXwug6Wetz9/qfUgXsWdkqmexZux46DNn1/Jlqrw68DCvX+SrVyuBb+bpd6c3ge4r3Dn9G0M58X8h8/5mVN5M5m5udPZt/kyH7iOc6rQawBux5qgDXGddVVFdmuGvA9ebJC9jZutebD8eXtwG7VpeGys0580POrpwKD7kn+Z/gbchuXyZfhlp97fb7rvRm+azs9e71Z/yhn3FsDDv8Vri3nsmbyczMnc2OyAcM2cc818mQPSKXS+pwnW09C+o3cFf60DJc7/WJqcmAfT5f7cZRJZM9y5Dd/hH+LHCHBewYVur5KtW60tfX7lpfe897j/iP3Imx/Wu6kYydOQpZipmzubPZUTUAfKCddUrA3W+NdsDYXcDUgL1JFbCzreNB+qGEw3/6cn+oMq6hAfvz/pqAfSZb6QZZIY/5ge3UB9xTIMYJ3mqgsm1vcG3ffl+G65dhHVg+j1k+L3v9mb6jgBo/9o9k7MxRyJrJm8nMzJ3NjsgH+CA7EMtcTm4M/dN/jzLgMhMYVoTrTOtXTYPXw9gM9xa4Robro14G7L/3171xUrtRNWTPyWGE7G0fLgjA5H+Ct7vZ7XuKb+Vrd6U3g+8p3jv8Ddo5c0azMvKyMmdyWfIBPtDOyDd+e66RIftMJofUADvLuikp4RxfA+5KH0wG7H0yYI+pQbGDXimTvaPHkJ0LACj7n+C9EugwwEr7rvNVqhWofe2u9Fasmcl7h79BO2fOaJZS3kwmQ/Zs/k0sz/A3MXKOu98aGbLPZHJIBbKzrBejyM/ha+C+4oSeBtebX6SXAfufHrpdCWpd7DO57OB7ZxYrZAf4Hv53ZKjCkRO8V4HKVkfPtlpAUclXqdZeX4P2MW/Fmpm8d/gbtHPmjGYp5c1kMmTP5gO1u9mbHy+L+e27DvAZsq8VOZz9nxjWikEq5+uDfqQ+iE75UI+siwWwA7k3SRE1KHaxz+QqZTI/OI1mMT7Utn24Hv6V/ZlqZ6lbDVSu9FbyVap1pa+vXy7fU7x3+DPekzADcIP2uLyZTPXsmyqDdlYe86evIftYZr4UoC3DOmVJ4fxMaHyG+6MU4HrzrP/Pppi+kBk6AbJvkgzZ12WyP8ywdrNXgOw7MlgAV68/i/dKmMMAKu1r30epXb8rvavX3Ovd68/0fQIYtLPmjGYp5c1kzubOZkfkA1zP9QAv/2h+a6QO2k+E7AoAN3uNdkvhnCxSP3BftVindK83LwP29x75dah2QSjdCLOD751ZOyA7wPlAywYWmPxP8VYDlfa176PUrt+V3tVr7vXu9Wf7vmJsAGAG4Oz3p0p5M5mzubPZEfkA1/M9wMtBml+8VsPBiqA9GyCzA93s9dkl9vOQoO/A/cTu9eYX6WXA/t4jv47TuthnclUy2bNYIXvbR/+B+SQowlI7A6RsdfRsmw8T7avpywDa2/b5a6H4GXSK94g/I2hv+/BljOYoZGXkzWTO5s5mR+QDXKNgAV4e0vzi5W723rxcscPd7PVZLfb1J9Fv4K7Svd48eT+0qwJ2gAOyZ9eg2MU+k6uUyd6V5AfZdRkn+at6G7TbV93X1/B631O8Gf1771FOvT8ZzVHImsnLypzJzc6+qSpoZ+Y1v30N2vvy8sQMeSsDduZ1J9ePxAche5e9Afs3j7O72Gfz1W58q0L20SzWbnbGTvMdGUz+TLUrgnaW9bBvfV+D9jHfld6nfDbv8K8A2p0zlzWTN5OpmjubfZNB+1W/NVJqNv0z4xzI3vI5gW/2uqwQ61qLav6lqQqAPbpGA/YrPtpd7LM1KGYrZbI/uLBC9rYP34OyOqxg8mfyZgDtLOthX03flSBSbS1YQPsp3jv8V4N2gPOHevac3VmjeVmZs7mz2RH5VZ//mxffDwB/+hq0X8/LEyv4rQbZWde5iPqAuwJcb57RfnyAHeD6kq3QxT5bgyH72kz2h5bRs1/lAZYxQ9mfqfbqgHKlt301fQG9H4yq/30weff6M323AAbtrDmjWUp5M5mzubPZEfkG7Ve91sig/WpWnljhbyXIzrrGRfUeuCt90Bmw96kKYI+oQ3VUzEy2IXtslrvZ9TNOAS0sY2MADkC50tu+670ZfjRiuI5ZPjMUr4te79X+OyD1atjO+B0/kjGao5A1k6eWGZEdkQ9wjY9hBe3NL14G7Vez8sQIgatAdsa1PUh34G7AHqOqgL35GLLP5mfdbCrdWCvkVYLsIzmMGfbP8TZoP8d3pTeDrxpoX+mtWDOTN6O/u9o5c3ZnjeZlZWbmzmbfZNB+1S9eBu1Xs/LECIMrgHbGdT1UP9Tw+u4Z7VcbsAMcYPvuk1+HKmSfyVa7uVV4UDFo58uwf463QTuf70rvyr4MoL1tn78WLDUzeff6M30HAHvuWxjvJXbmKGRl5M1kzubOZkfkRz3Vs8H2k0F7814rg/YcGbJbizT30lSVXw2j62SE7Exfoiyw/0TIPpOtlOlu9ts+nDmMGcr+TLWzgPa2vRagZLlG7HsXA2xnWQt7z3kz+q/uaq9037IzRyFrNlM1dzb7Jne1X/WLlwqf+tPfoD1D6qCdbT2tP9T70lSND7ATADtgyL6iDtUbPI+Mic8zaOeDBzsymPyZajdoH/dd6V3Zd6W3Gmhf6a14Lfd69/ozfQ+M+LurfX2OQlZG3kzmbO5sdkS+u9qv+sVLhVO9zqgP29nAsEG7tUmfgbvKBxczYAfqfmkyQPbsGgzZeTN3dbMDtUD7qRnK/gbtc96K4JPl+mComQG0tzp6ts1fi5XeTJ+nTJ/VO/zd1c6ZsztrNC8rMzN3Nvsmd7Vf9YuXCrP609+gfbcM2q3N+g3clT6smCF71S725hPhod3JPpt/0o2sAmQH9nWzA3seuFgfVBkzlP2ZwI0inFzp7ZrX+/Z6u6t9zPcU715/pu8CgLOrve3D970/mjOapXIPnZU5k5udfZNh+xWveCmxqz8z9oBTj4+5Sxm2s62ldVk/S06ewgcqaxc7YMi+qo7sbno10K6UyQ7amR/qWB+GmcCKuv8JoH2lNwtAZPBd6e2u9jHfld4s1waTN6P/6q52gPN7nz1nNEspbyZzNnc2OyI/6umfDba7q32d3NW+XwbtVqLmXpp6k8qHaPUu9ubF84VtyK55A6t0s+6xMdw5jBkn+TNBGwPEPb4rvRl8e73d1T7me4p3BX/Dds6c0SylvJnM2dzZ7Ih8d7Vf9YuXYfuVrByxAWJV2M62jtawxoD7qgvXXeyjXvlg++6TX0t2DaeNq1F5MDBo35fDmGH/9zoBtK/0VgSq1WtmuaZ9Pc/5M32uMq3LTathO+s9BnvOaJZS3kzmbO5sdkS+YftVv1ithJCG7RG5XJDYsN0i0HXgrvLrpCF7j0eMWGpRvnlTg+wzuQqgnXk+O3MOY4b9P2tVB3CrpWdbDm8WeKi4HizrzNDV3rbPX2d7z3sz+u+4r6l0nzGSo5I1mpeVmZk7m32TYftVv1gZtl/NyhETJFYF7QDXOloheg/cDdhjxAS1714RHobss/mK2bsh+2jmzm52gPfBdFcOY8aOtVIGQiygvW1vMDnq7Zp/iwG2K64zk3evP9tnK9Nn902MsJ31fkYlayZvJlM1dzb7JsP2q36xMmy/mpUjJkhs2G6R6Q7cVcbEANyQnfHLjwn6nw7ZZ/PVbpAVutmBfWNj2n4G7eoZyv69q8nSAdzrzwIPWcBh9ZpZfkBSvO5O8a7gv/Lz+ybG7+idOQpZM3kzmaq5s9k3GbZf9YuVYfvVrBwZEsfI61hWPxIfiisuP0P2qz4c9TDAftUbRaV/amrQPp/FCKirZCj7rwY1ipBW1bt6zb3eDF3tbfva55DJ+0R/w/b1OQpZM3kzmaq5s9k3GbZf9dORYXtELhckVrr+HsW2jlaoxl6a+iwFyM46KqZ5cX0BM9XjbvYzcg3a57NYcxgzTvNX7Wo/wZsFeCquB8t1zXIOT/Fe7c/4HWfYvj5HIWsmbyZTNTdKhu1X/eK1CkQatkfkckFihs8Ky3qhMeC+6g/MkH3EJ0ZM9bibXeuGOCPToJ0/hzFDvWueDdKwQKwTvBWB+ErvVV3trY6ebe2907vXn+nzezTDsH19jkLWTN5MZmZuhGbzuZBinE6G7ZZlWRt1HbgrdLEDZ0D25hXlkw+3WepQ7qY3aP8sg3buHEbIwXYMTJBGFZCd4M0CUhVHyLQ6erblWA9V79X+bLWPZBi2r88ZzRpVBkRUe76IyI7IjxJjd/vJcne7ZVkb9R64q3SxA4bs/T48X/zZN2PZNSjeBGc80Bi018xhzFD3P6WrvdefpW4Wb9W1Nmyv4b3an632kQzD9j3KaDrZmaf4nDGbHZEPeJTMdb94sfxYYr0W2/nxDw8WsX4Dd0P2KC9D9s8e7mZXvAE2aH+3X60s1gdqxgw2f9Wu9l5/e895s4BOFtjOsh6neK/23/Gd1yumz/KbqgGKnWBfaZTMqDKvD4Zrkwsn8orhXPVIrV7rmv6G9rn9G//Q/YhhhelnyclVAOxAfcjevPLh9t0nvxaD9n25KqAd4H9QOxmCj+QwwZ8R/94Mpq72tj0HHLM3r7dh+5neO/xXS/mH05tYv7t35oxKCbZn/e1k/81m5990Qnf7CinU+E4eJ2NZ1pPGXpr6SidC9ubHB9rdzR5fh0H7nlyD9vmsSjmMGWz+TLCdaW3sPefN0o0PGLZX8bb/d+1ANZXuEUZzRrNGlQEQs+7/Z8UAEXVxr2VZqnKXe1nNAfdVlwTzuJjmZ8j+3cegfTbfoP27doL2th/vQ+euLNaHaMbjYIMzLDCy159p3Vm8VdfbsJ3Xu1dMf5cV/Ee0ojHpWez3PSNSGe3CAJ97lQ2MsvNv2vG32avotVG7PtXq/abMMSqMcFh9rAzAua7WtPqBuyF7lF+Uj0F7tRoM2r9LAbSP5jE/3DJC6h0ZbMfA1NXetucBtKqwk2VNmLwN2+e043NrlTfT3/2I/4hWf64D+jDiWZVHyczI3e115DWxrLP0N/4BwPPjoTWta8BdBbID3KCdcQYbQxf53Se/DoP2fbkKsJ0dtO/M2gGSKmQod7UDXOtj73O9V8H2XqkCcbbPrZXejEDKo2TW54xKBWIwXtfflL22EfkaV4dlWa9Uocv9JoP3MvoM3Fec3tMge/OL8uGC/wyAm6WOzG6SjOzqoL3tt697jPmhlrHjvErGSbDdIH/eu1csdTPBdtU1YblO2vY8tY+I7XN9RIzjKnZLobvdsiwe7YCxJ40EYT3WStAdMHgvoD+B+6mQvXlyAe27F0cH+d0nH3Cz1HEaaJ/JrQzaR/PYsxg7zkdyGDMM22O8e/1VvXv9meru0QmwvVdMwJfpGmf0H5G72+t2t3ucjGVZK5UNlw3d98ngXVYNuHtkTKRfpJdBO2sdBu3XNXO2FGA7e1alnFMzDNtj/JlgG9Oa99XRu/31ulc+QqiuCdPfz2pVeIh0d/seMV23K6R4fLPXvuIxv5P/Rq2dyobLzNAdqPXZAtzBO1DjvukA/YSfJgXI3jxrd7M3ryiffMDNUkfmDaUaaAfc1a6YxZrDmLHjGJhgO9v6qNZu7z+18jpX7hBfKaa/zxGp1w+4u53p7+GdqsEaa73+i3+OgO7ZoHdEHiuzRszHrHidXpXhu4SuvTT1m1Z9qbibfdQryicfcLPUoQras7Krd7WP5rmDnnfdmIDsTYbtcf4rvVmg9Slgmeklqcp/Qyul/Pc/Kne3c+uEcTKWVUHM8Paqdh8DA1RmPm9Vu90f9QjfAX8HEWkOuJ/Yzd78Ir3qjY1pPhEeZ4P22Xx3tX/bjz+PFU4z5zBmGLbr+DNBTibvlddLz9arxyWxePeK6W+IUYxAX3tF88T0d8cm9b9Ta62YgehOMcDpFWI4LvZr7ATwfpMBPI36gbu72aP8eMB28+Gpx6D9LNAO1Ibt7FmVchgz1GF7rwzbY/yZAOFK75W3/0xQmel89orpb5RVrMfA/P1uWdZ3MUDUT1pV32pwW3W0DMP1ovByz78f/v/s9dqlZwB/E/N5KqLrwN3d7BFeNcfGNJ8Ij3zQHlHHaeNjgP1d7QB/p3nVLNYcxowKsF0ZQirXvtKb6UeIHjGN02B6QGO6FneI6ZrcKabrP0JVzotlKYm9A7masqA7kP9drwDeAZ71ypJB/HJ9Bu4q3ezN06C934sDcDef3I5yhhrc1X5NCl3to3nsWaw5jBknwnZVqDziv9Kb6YeCld7ubn+3vZUpj5OxVJUJU2ezGTpzAeAfxPx9nvLiVMBd7t9zcsAzy9+UGngHONYtW+9APMB/Lsn0GrirdLM3X4P2fi93tUfWoJzvrnaePPYs1hzWjF6dBtuVAady7Szeq6/3lVJ+EDvpc8CyrLNUtXs7GpyuWCdD9ytZ53a7AzrgHTB8/6ZPMP5RCud6g+7A/eRu9uYZ6WXQ/t0jH7RH1KHa1T6TndHVDmjAdmftzamSYdge679STLUzwU2mm2rldWGqnVFMnwU3nXYOLKu6GLvcDd3XQndgX7d7xneGwfu4/n76zwxrqKKrYP5ZKtfGRf0YtId6cXbbM4H25pMP25VB+2z+KSNk2r7cULpqVhUQfiJsZ5Nhfo6Y1sXd7daomP6mLItd2SMoIvKrdrmrSBW6t4x90B3I+X7K/ht/1COMVfqbNYBfr1FQT6r4S+SvBa5/4T9LYDYrbI+qLXLdWOqJWBvD9jEpwfaZa40dgI/KsH39zRzTDOtRndSRqwzzmdZd+bpnuh7Z5LWxrHEVAwaXVOmYo47kv4Fr8gz65v3WnK/oOu++66+vVbW/zsr5e/kbe4/ziv7GP7KfH38//Z9lPenzS1N7dGJHe/Pz+JhrPvpd7dk1ZGarwfZRKcB2Vgi+M2dHBhPQvMmjZGL9mcRUu3K3jnLtvWL6AWiHv1VL7kR+r5kuVPV1rdjlftpomea7rtMdWHvPtHvETMvKGzPT8jmk2vX+qFfQnWV9rRTNA3eD9givOBm089Zh2H5dhu1xedVg+46blgovSbVixQY3e8T00HLSda98zViW1cQ0gmGHZo+XDXbPKGqWO2DoHu9dZ8RMy8qda8404/2mCvD9JkP4ozV+qleMjmm+3KNjmqdh+zWffMgdUcfsNTQ7Ssew/ep+eWu8I4sZgo+o0vEwjtRQ7z5n81+pk2pXFtO6W9apGv0n+xnjCvJGRmiOZrgpYixD1BqwruTp42Wa954RM7vHzGT+/bKORbmtS/b6ROp5FA3r2lvT6u9wX/XA4a72Ga96oL35RHh4XvuolGD7jDIA/44shR8RVot93a7qtBelqsvrr6GTfiywrJU6rQt8hzLXlKHLnWm0TFSne2SXO7Cm0x2I/25c3ekOrP8+390BztLx3mrgU6Xu92e9g+6M58G6pOvA3aA90i/SiwduN58aXe0RdRi29ynjeHfDdtYO7Zms00fJMHa390q9+5zNX1m+obess7VzJMju8SOjeYoz1WdzDd1/6xTo3jzjr9nVwLo6eG+Zhu+v9NzxXvWe/Vv3O+O5sQBcPTWrRsecOD4mEmwbtvPWYdjeJ8P2WDFDcOYcxgzlGdZWvPxjwXv5YcN61p6RA/0ZI1X9d2Cvqv88vepxPWv2OLNHL2Tn38Q2Xua/+Gfo7/mdVvw9KI6Yaf57rrmM8R8M41QURp88j5/JXrNdejeixiNr0vW5w91d7ZF+kV71QHvzifAwbJ9RBmyfUXXYztoJXlGML0odkYFsrrz+cfKPTVryWJGzNXr+3eW+LndWDB3mUcfP1ukOxL9IFdAZMdN812jnOJaMl40ydL23On6L+fv/FXQ/9f67F7ozn1cRvQbuJ4P25sk5q7358cDt5sNTz+mwfVZZsF1hhvqMFI6Puevc3e1cOm3ci8rnjIK8ktasdsBH/3CQNwalRxnnKevayBxpY+j+p1ihO6AzYqb5Nq0G7y2j1qiZey4HfAf4R888yxD+mqK64hWuiRX6+xm4K4H25hvtV7+rvXlF+Ri2R9aQma8G22ekAMBHxQqn2VXlJqDKcayU18iyrkkBurJoZK1GoN0InNsJiXd3uat01WdlRmWzQHcQ1HFTNHQH4poslLrdmzf+9V6n3TPeW9ZescL3mxTuv9+Nn8lezwo6eJzNXy/+v+iENV3thu2jXlE+hu2RNRi29+7LP9olI2+XTu9uHxFnVZa1T2dM0dyn3rmkjA87rDPWrfrrNnp8M39HM2uaPc894vMj4ppiqQNo32mRfyWRc92BNTObV87E3jFjeudM78y52Yzzy5Vnib+aC8+2vhat/loCr+/u542QaX6RXobtnz0M27Nge5ZYgWqUFF7KerqqXIOrx71Y1ox64YPaA9yz1OuvpJFz4ZenzmVlAPBRGbrn1RBZB9vLVIH4F6oCa/5GqoD30+A7EyBWBvCP+gTjmdbbStOax+e/8B8J2B5dZ/SPF4bt3zwM2zPBn0fJrMnbKb+Ulfd4GOe3W1Ylre4QP/FBa8dD8y4YXk07100FumfBb0N3DujefHi73SPB+yqouQO8V+p6b3m5kJkVCD8DeHUQf5OB/PGKRw0eIRPlZ9j+2SMftqvLo2TqiP34Th8nc6p8PiyLS4w/GrA+cFbrch+VCgQ/DbpnZUfkR9QQVUfzie12P23MTPNdCw8rdr23zHywzA6AK0L4R30D8qznxbqsn++bdEihq715Grb3+xi2R9eh3N1+iirPbvf1s09eacuyrqj35YSMLzbd8ZLOkQzGtZrRzheoqqxdxvHNXO+ZLzONeInk7N961Issoz5zIq9z5peqAndoGf1ZvfrFnbteSprxAtJnkJzVTPUMdxk/+99Bd/YGtBn1QnfG83aoYoD7qhNq2D7rFeXDA9tZpA7bT+put6wTVOnz1bJYVQGI92rHMbCu0y6wHwnnvmnHDyKzWRkA/ETozpAfUcOtDgTV0nzm/yJveOxE8N6849bytT/+9V+rDPjecu/KvMdXAPA3nQji32mmK575HAtqHrgbtkf68cH2KDHBf7a12a3TPkKrn++dx1d9LS2LXauB72r/XrDY28VrIH5le84O9B0ZI+ebGbqPdLkDe/+FgBIAN3Qfz0dADc2Dp9s9oh5AC7wD67reV4P3lrFWDPC9ZefpFchlv4/6NIbGz7Z/aucIG+ZrJ2gd5oD7ybA9WqywnWndWGC7enf7jE7qbq88TsYal8/TdVWAmb06Efhan1UBiLN2ue/s1t6hndB9RIbuazJvucD4PUY2dI+oIbIOBNTSvM4C74DuuJmWcddO+N7yzgTwgCaEv8kwPlcHzKcfv4xOh+1VofajPErGsixLQ9VeIrRKjC+NtGK1+hwz/q0xvtx010tHd2SwHstNIy9W9EtN12Uqv0yV5YWqTC9VbV5xf88rXqx6+79IrXxB5Y6XQe5+yWbmCy4ZXypa4eWfr17YyrjWFrXGUKhhO6+Ya5tRle72bGlX36+qfw9WntRuFiPFeGN58vm4qtWgtNe/94xFQ4QMMUL9kzOYofvo1W7oHp+ZkXvLnlEE8J5VxN8+G3iPBobR4B3AUvBu+N6TlwuZmcGwOoR/1hUoz7T+1nb1j5RRB46zOmVuu7vb1+jUcTKWZVnWazGOHFEX26z7EVUZ+7Ijw/Pcm0bGy+wevTKTB4zdB2fNV88eMYPE/IgaImtZUU/z4hs1A6wfNwOsfdFq818/dqblrBfDi0dfQV+We8lP0L0Ke+yB7iznxQoRz+lU6W4/QWzrxlIPSx2KUpvfblmRYu3dYOwqYewCYexOXi13uX8X43gixrEv1TrQq3a67xybM9Pxp5Q5mzubfcuf259jzMytlghFj5qJqusf6IybAWp0vrec/V3ILB3eCp3Y70bTMD7DROlq17zC+bM6O9w9Sibaj7O7PUpMNTHVYlmW9U07X4S3UowvBa3QIc7Y/bxavX8TjGtUoZO+7bP+OJg70Hd2ugP7XqQ6elzA3s7zrJepAjkvNc3udo+oIbqO2Vqi6rl7xb1cFVjb9Q648/19zm/tuldkeukocyf8s75Bd/X73hGtgO6s559ZD+fhOnBXgu0rpFKnxavsa+i8rxzLOkuscG61KoBSRiC++hhGx2asVAUgvgsg96oaDN+VA4z9reyE7m2/c0bMzOQiKTsiP6KGqDqiaomsp3mtGTcDGL7f/feNZ8kC8C2bG8IDfM8Dz7rSBc92780od85P6dqfidqFqFAva3c729qx1ZMpr4Vlfdau8QCVVGXMRa8qnHfGkSa9Wj1ahnFkyo7PqSoZzrlrZK/RURInjHvJHPUSkZ2ZH1FDVB1RtQDxYxlWvWR11YtWFcfOtIx9Y0ayR3iwjVWpMNrk08galnW2pJX7uxT7r2LWuHxuLaue9s435NWuebyMc6sBznNT4YcDH8M17YDuvWJcJ0N37pzKc91nIPjYfnOZGbm37BkxAO+K4L15xYN3w/f9AH6HskEzKxyuAOMfdRXMs6y/RaPvI2XUumpPm91uWZbFLsZxGVXFOPIF6L8GWEdcrM6osE6Mo2UYx7JUmYPOPI6FOWc0a/TvRWnEDDZn3nJPHDMTUQNTHY+1zNbTvO5iHDkDaI6dAeLX9nXG/rEsDDPQ30Fflmexb9Cd8TmmR6PQneX8WKHqe2lqpNT/kCwd+VrT1imwNmP+acV1ZYYyjDOrgb2AaWXGjmua8e+mwjr1/m2MwENGkGzo3if2HGzKmnmZKtAPz2aOre2nM9sdg7m37Kz56izAO7KO2Vqi6rl74V+vGKnCd2Dt3Hdg7f3lMwzdcU/HAOEBfhB/U3Ug/05R3fFs5/Nw5QF3i0/+4+TWLCTJhH1ZXbcZuawdxlHaeXzV1/KKdoDFtg8fhN2RsWOtKgDxHetUBbr3auRcAHxAfARsMcPwUVA3ekwjWe52/5zZ9tXJnc2OyI+oIaqOqFoe62leXF3vzVMHvgN7u9+BegC+5f6prGeeT6CXkQ9dGUtz8vNj1lgbxmtlRMHr9xm4V1m0GbH/sbLXxyBDO2tEjJ2symJfz9O73EfE2OVeJYMRiO9Ypx3QvVeM52Ikh7U7XAGGu9v9rqxu95FM1dzZ7Mf8zBqi6nisZbae5hXf9d78YqQM34F6AL7l5UH4lp8nNRh/U8+seHOiGHl+/UvVuryY/+gtS12sL3CspIwXjbFr14vvmDX6wrv+ffwix5UZverN2PEiqh3r1LvHyEtUVx/HjoxbDltG1Rz2rJ0vVAXm7jt23+vccsf3zTne2ezoGrLreK5n3if2JYcrXgoZXSPw+4WrK+5kHl+8uurZcfdLOB/PQ/T5uJb/+v+y9WpdMtZnVu/Wl3HNLTnVAu7WnNQ+HNXkD+lxzT4o7Fb1c10V8O8CPiOrsAu678hghNU7MnasFSOAZYTuQJ21GslgvHZ35zAf02jWKEAbBWMZIFoVfjNA7wgQFgneo2oxfJ9TRQB/AoRvNfBC4U8wnv158JN64DzbObFS9P8HSU8ghENfhXsAAAAASUVORK5CYII=" id="image73e6625c3e" transform="scale(1 -1) translate(0 -135.12)" x="0" y="-4.66125" width="360" height="135.12"/> - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + - - + + - - + diff --git a/doc/_static/mne_logo_gray.svg b/doc/_static/mne_logo_gray.svg new file mode 100644 index 00000000000..1cabd8f5932 --- /dev/null +++ b/doc/_static/mne_logo_gray.svg @@ -0,0 +1,783 @@ + + + + + + + + 2023-11-07T13:26:54.034781 + image/svg+xml + + + Matplotlib v3.8.0.dev1998+gc5707d9c79, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/_static/mne_logo_small.svg b/doc/_static/mne_logo_small.svg index 506d2c0f4c0..0f62fdf3ce5 100644 --- a/doc/_static/mne_logo_small.svg +++ b/doc/_static/mne_logo_small.svg @@ -6,11 +6,11 @@ - 2022-04-13T15:28:23.367808 + 2023-11-07T13:26:54.345596 image/svg+xml - Matplotlib v3.5.1, https://matplotlib.org/ + Matplotlib v3.8.0.dev1998+gc5707d9c79, https://matplotlib.org/ @@ -21,9 +21,9 @@ - + +iVBORw0KGgoAAAANSUhEUgAAAEIAAAAQCAYAAACiEqkUAAAAsElEQVR4nO2QW2rEMBAEa8Ri2WLXd8rJcyeH+Bnc+VCHwJ5hCoYeNZI+KiR0r4H2gK2gve86Am2BzoC9/HdODp+P0vN0fwa6vDt1FXQG62PuU2fW8cXaZvbpyac++N4ry9b42hrL2ViuxvLT0BDcQ6Ah0IAz0AM0eq++MwaqoNo7jX3u0e+mt37yH61QSABSxB8pwqQIkyJMijApwqQIkyJMijApwqQIkyJMijApwvwCdAFgHcVQk7IAAAAASUVORK5CYII=" id="imagec02f1fbe38" transform="scale(1 -1) translate(0 -12)" x="86" y="-10.75" width="49.5" height="12"/> +" clip-path="url(#pf7563a16a6)" style="fill: #808080"/> - + diff --git a/doc/_static/mne_python_dock_icon.png b/doc/_static/mne_python_dock_icon.png new file mode 100644 index 00000000000..abd77ff9d82 Binary files /dev/null and b/doc/_static/mne_python_dock_icon.png differ diff --git a/doc/_static/style.css b/doc/_static/style.css index 1d51176c383..16b96ae6c11 100644 --- a/doc/_static/style.css +++ b/doc/_static/style.css @@ -7,74 +7,79 @@ --pst-font-family-monospace: 'Source Code Pro', var(--pst-font-family-monospace-system); /* colors that aren't responsive to light/dark mode */ --mne-color-discord: #5865F2; - --mne-color-twitter: #55acee; - --mne-color-primary: #007bff; - --mne-color-primary-text: #fff; - --mne-color-primary-highlight: #0063cc; /* font weight */ --mne-font-weight-semibold: 600; } html[data-theme="light"] { + /* pydata-sphinx-theme overrides */ + /* ↓↓↓ use default "info" colors for "primary" */ + --pst-color-primary: #276be9; + --pst-color-primary-bg: #dce7fc; + /* ↓↓↓ use default "primary" colors for "info" */ + --pst-color-info: var(--pst-teal-500); + --pst-color-info-bg: var(--pst-teal-200); + /* ↓↓↓ use "warning" colors for "secondary" */ + --pst-color-secondary: var(--pst-color-warning); + --pst-color-secondary-bg: var(--pst-color-warning-bg); + /* ↓↓↓ make sure new primary (link) color propogates to links on code */ + --pst-color-inline-code-links: var(--pst-color-link); + /* ↓↓↓ make sure new secondary (hover) color propogates to hovering on table rows */ + --pst-color-table-row-hover-bg: var(--pst-color-secondary-bg); /* topbar logo links */ --mne-color-github: #000; - --mne-color-discourse: #000; + --mne-color-discourse: #d0232b; --mne-color-mastodon: #2F0C7A; + --mne-color-sponsor: #BF3989; /* from GH sponsor heart, via browser devtools */ + --mne-color-opencollective: #1F87FF; /* scraped from logo SVG */ /* code block copy button */ --copybtn-opacity: 0.75; /* card header bg color */ --mne-color-card-header: rgba(0, 0, 0, 0.05); - /* section headings */ - --mne-color-heading: #003e80; - /* pydata-sphinx-theme overrides */ - --pst-color-primary: var(--mne-color-primary); - --pst-color-primary-text: var(--mne-color-primary-text); - --pst-color-primary-highlight: var(--mne-color-primary-highlight); - --pst-color-info: var(--pst-color-primary); - --pst-color-border: #ccc; - --pst-color-background: #fff; - --pst-color-link: var(--pst-color-primary-highlight); /* sphinx-gallery overrides */ --sg-download-a-background-color: var(--pst-color-primary); --sg-download-a-background-image: unset; --sg-download-a-border-color: var(--pst-color-border); - --sg-download-a-color: #fff; + --sg-download-a-color: var(--sd-color-primary-text); --sg-download-a-hover-background-color: var(--pst-color-primary-highlight); --sg-download-a-hover-box-shadow-1: none; --sg-download-a-hover-box-shadow-2: none; } html[data-theme="dark"] { + /* pydata-sphinx-theme overrides */ + /* ↓↓↓ use default "info" colors for "primary" */ + --pst-color-primary: #79a3f2; + --pst-color-primary-bg: #06245d; + /* ↓↓↓ use default "primary" colors for "info" */ + --pst-color-info: var(--pst-teal-400); + --pst-color-info-bg: var(--pst-teal-800); + /* ↓↓↓ use "warning" colors for "secondary" */ + --pst-color-secondary: var(--pst-color-warning); + --pst-color-secondary-bg: var(--pst-color-warning-bg); + /* ↓↓↓ make sure new primary (link) color propogates to links on code */ + --pst-color-inline-code-links: var(--pst-color-link); + /* ↓↓↓ make sure new secondary (hover) color propogates to hovering on table rows */ + --pst-color-table-row-hover-bg: var(--pst-color-secondary-bg); /* topbar logo links */ --mne-color-github: rgb(240, 246, 252); /* from their logo SVG */ --mne-color-discourse: #FFF9AE; /* from their logo SVG */ --mne-color-mastodon: #858AFA; /* www.joinmastodon.org/en/branding */ + --mne-color-sponsor: #DB61A2; /* from GH sponsor heart, via browser devtools */ + --mne-color-opencollective: #99CFFF; /* scraped from logo SVG */ /* code block copy button */ --copybtn-opacity: 0.25; /* card header bg color */ --mne-color-card-header: rgba(255, 255, 255, 0.2); - /* section headings */ - --mne-color-heading: #b8cbe0; - /* pydata-sphinx-theme overrides */ - --pst-color-primary: var(--mne-color-primary); - --pst-color-primary-text: var(--mne-color-primary-text); - --pst-color-primary-highlight: var(--mne-color-primary-highlight); - --pst-color-info: var(--pst-color-primary); - --pst-color-border: #333; - --pst-color-background: #000; - --pst-color-link: #66b0ff; /* sphinx-gallery overrides */ --sg-download-a-background-color: var(--pst-color-primary); --sg-download-a-background-image: unset; --sg-download-a-border-color: var(--pst-color-border); - --sg-download-a-color: #fff; + --sg-download-a-color: var(--sd-color-primary-text); --sg-download-a-hover-background-color: var(--pst-color-primary-highlight); --sg-download-a-hover-box-shadow-1: none; --sg-download-a-hover-box-shadow-2: none; } -h1, h2, h3, h4, h5, h6 { - color: var(--mne-color-heading); -} /* ************************************************************ Sphinx fixes */ @@ -102,9 +107,20 @@ html[data-theme="dark"] img { filter: none; } -/* prev/next links */ -.prev-next-area a p.prev-next-title { - color: var(--pst-color-link); +/* make versionadded smaller and inline with param name */ +/* don't do for deprecated / versionchanged; they have extra info (too long to fit) */ +div.versionadded > p { + margin-top: 0; + margin-bottom: 0; +} +div.versionadded { + margin: 0; + margin-left: 0.5rem; + display: inline-block; +} +/* when FF supports :has(), change to → dd > p:has(+div.versionadded) */ +dd>p { + display: inline; } /* **************************************************** sphinx-gallery fixes */ @@ -115,11 +131,11 @@ a.sphx-glr-backref-instance:hover { } /* backreference links: make non-MNE func/meth calls resemble regular code */ a[class^="sphx-glr-backref-module"] { - color: rgb(var(--pst-color-text-base)); + color: var(--pst-color-text-base); } /* backreference links: make MNE calls bold and colorful */ a[class^="sphx-glr-backref-module-mne"] { - color: rgb(var(--pst-color-link)); + color: var(--pst-color-link); font-weight: var(--mne-font-weight-semibold); } /* suppress redundant note at top of every tutorial and signature at the end */ @@ -135,8 +151,15 @@ p.sphx-glr-signature { border-radius: 0.5rem; /* ↓↓↓↓↓↓↓ these two rules copied from sphinx-design */ box-shadow: 0 .125rem .25rem var(--sd-color-shadow) !important; + text-decoration: none; transition: color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out; } +.sphx-glr-download a.download code { + color: var(--sg-download-a-color); +} +.sphx-glr-download a.download::before { + color: var(--sg-download-a-color); +} /* Report embedding */ iframe.sg_report { width: 95%; @@ -151,6 +174,57 @@ iframe.sg_report { display: none; } +/* Make our external thumbnails (e.g., mne-gui-addons) act like standard SG ones */ +.sphx-glr-thumbcontainer a.external { + bottom: 0; + display: block; + left: 0; + box-sizing: border-box; + padding: 150px 10px 0; + position: absolute; + right: 0; + top: 0; +} + +/* ******************************************************** HTML repr tables */ + +/* make table responsive to pydata-sphinx-theme's light/dark mode */ +.table > :not(caption) > * > * { + color: var(--pst-color-text-base); +} +.mne-repr-table tbody tr:hover { + background-color: var(--pst-color-table-row-hover-bg); +} +.mne-repr-section-toggle > button > svg > path { + fill: var(--pst-color-text-base); +} +/* make the expand/collapse button look nicer */ +.mne-repr-section-toggle > button { + padding: 20%; +} +/* make section header rows more distinct (and harmonize with pydata-sphinx-theme table +style in the process). Color copied from pydata-sphinx-theme; 2px copied from bootstrap. +*/ +.mne-repr-table th { + border-bottom: 2px solid var(--pst-color-primary); +} +/* harmonize the channel names buttons with the rest of the table */ +.mne-ch-names-btn { + font-size: inherit; + padding: 0.25rem; + min-width: 1.5rem; + font-weight: bold; +} +/* +.mne-ch-names-btn:hover { + background-color: var(--pst-color-); + text-decoration: underline; +} +.mne-ch-names-btn:focus-visible { + outline: 0.1875rem solid var(--pst-color-accent); + outline-offset: 0.1875rem; +} +*/ /* ***************************************************** sphinx-design fixes */ p.btn a { color: unset; @@ -183,21 +257,24 @@ aside.footnote:last-child { } /* ******************************************************* navbar icon links */ -#navbar-icon-links i.fa-square-github::before { +.navbar-icon-links svg.fa-github { color: var(--mne-color-github); } -#navbar-icon-links i.fa-square-twitter::before { - color: var(--mne-color-twitter); -} -#navbar-icon-links i.fa-discourse::before { +.navbar-icon-links svg.fa-discourse { color: var(--mne-color-discourse); } -#navbar-icon-links i.fa-discord::before { +.navbar-icon-links svg.fa-discord { color: var(--mne-color-discord); } -#navbar-icon-links i.fa-mastodon::before { +.navbar-icon-links svg.fa-mastodon { color: var(--mne-color-mastodon); } +.navbar-icon-links svg.fa-heart { + color: var(--mne-color-sponsor); +} +.navbar-icon-links svg.fa-opencollective { + color: var(--mne-color-opencollective); +} /* ************************************************************ nav elements */ /* topbar nav inactive */ @@ -206,7 +283,6 @@ aside.footnote:last-child { } /* topbar nav active */ .bd-header.navbar-light#navbar-main .navbar-nav > li.active > .nav-link { - color: var(--pst-color-link); font-weight: var(--mne-font-weight-semibold); } /* topbar nav hover */ @@ -214,18 +290,6 @@ aside.footnote:last-child { .bd-header.navbar-light#navbar-main .navbar-nav li a.nav-link:hover { color: var(--pst-color-secondary); } -/* sidebar nav */ -nav.bd-links .active > a, -nav.bd-links .active:hover > a, -.toc-entry a.nav-link.active, -.toc-entry a.nav-link.active:hover { - color: var(--pst-color-link); -} -/* sidebar nav hover */ -nav.bd-links li > a:hover, -.toc-entry a.nav-link:hover { - color: var(--pst-color-secondary); -} /* *********************************************************** homepage logo */ img.logo { @@ -237,10 +301,13 @@ img.logo { ul.quicklinks a { font-weight: var(--mne-font-weight-semibold); color: var(--pst-color-text-base); + text-decoration: none; +} +ul.quicklinks a svg { + color: var(--pst-color-text-muted); } ul.quicklinks a:hover { text-decoration: none; - color: var(--pst-color-secondary); } h5.card-header { margin-top: 0px; @@ -251,7 +318,6 @@ h5.card-header::before { height: 0px; margin-top: 0px; } - /* ******************************************************* homepage carousel */ div.frontpage-gallery { overflow: hidden; @@ -260,7 +326,7 @@ div.frontpage-gallery { } div.frontpage-gallery a { text-decoration: none; - color: rgb(var(--pst-color-text-base)); + color: var(--pst-color-text-base); } div.frontpage-gallery img.card-img { transform: scale(1.8); @@ -285,7 +351,7 @@ div.frontpage-gallery:hover .fadeout { needed for dark mode. */ div.card { border: 1px solid var(--pst-color-border); - background-color: rgb(var(--pst-color-background)); + background-color: var(--pst-color-background); } .card-header { border-bottom-color: var(--pst-color-border); @@ -294,13 +360,21 @@ div.card { /* *************************************** homepage funder/institution logos */ div#funder-logos div.card, -div#institution-logos div.card, div#funder-logos div.card img, -div#institution-logos div.card img { +div#institution-logos div.card, +div#institution-logos div.card img, +div#contributor-avatars div.card, +div#contributor-avatars div.card img { background-color: unset; border: none; border-radius: unset; } +div#contributor-avatars div.card img { + width: 2.5em; +} +.contributor-avatar { + clip-path: circle(closest-side); +} /* ************************************************************ funders page */ ul.funders li { @@ -335,3 +409,17 @@ img.hidden { td.justify { text-align-last: justify; } + +/* Matplotlib HTML5 video embedding */ +div.sphx-glr-animation video { + max-width: 100%; + height: auto; +} + +/* fix sidebar scrollbars */ +.sidebar-primary-items__end { + margin-bottom: 0 !important; + margin-top: 0 !important; + margin-left: 0 !important; + margin-right: 0 !important; +} diff --git a/doc/_static/versions.json b/doc/_static/versions.json index 018fd27236a..644b28f4441 100644 --- a/doc/_static/versions.json +++ b/doc/_static/versions.json @@ -1,14 +1,49 @@ [ { - "name": "1.4 (devel)", + "name": "1.11 (dev)", "version": "dev", "url": "https://mne.tools/dev/" }, { - "name": "1.3 (stable)", + "name": "1.10 (stable)", "version": "stable", "url": "https://mne.tools/stable/" }, + { + "name": "1.9", + "version": "1.9", + "url": "https://mne.tools/1.9/" + }, + { + "name": "1.8", + "version": "1.8", + "url": "https://mne.tools/1.8/" + }, + { + "name": "1.7", + "version": "1.7", + "url": "https://mne.tools/1.7/" + }, + { + "name": "1.6", + "version": "1.6", + "url": "https://mne.tools/1.6/" + }, + { + "name": "1.5", + "version": "1.5", + "url": "https://mne.tools/1.5/" + }, + { + "name": "1.4", + "version": "1.4", + "url": "https://mne.tools/1.4/" + }, + { + "name": "1.3", + "version": "1.3", + "url": "https://mne.tools/1.3/" + }, { "name": "1.2", "version": "1.2", @@ -39,43 +74,8 @@ "url": "https://mne.tools/0.21/" }, { + "name": "≤ 0.20 ...", "version": "0.20", - "url": "https://mne.tools/0.20/" - }, - { - "version": "0.19", - "url": "https://mne.tools/0.19/" - }, - { - "version": "0.18", - "url": "https://mne.tools/0.18/" - }, - { - "version": "0.17", - "url": "https://mne.tools/0.17/" - }, - { - "version": "0.16", - "url": "https://mne.tools/0.16/" - }, - { - "version": "0.15", - "url": "https://mne.tools/0.15/" - }, - { - "version": "0.14", - "url": "https://mne.tools/0.14/" - }, - { - "version": "0.13", - "url": "https://mne.tools/0.13/" - }, - { - "version": "0.12", - "url": "https://mne.tools/0.12/" - }, - { - "version": "0.11", - "url": "https://mne.tools/0.11/" + "url": "https://mne.tools/dev/old_versions/" } ] diff --git a/doc/_templates/copyright.html b/doc/_templates/copyright.html index abea05537ff..742b8b7e09c 100644 --- a/doc/_templates/copyright.html +++ b/doc/_templates/copyright.html @@ -1 +1 @@ -

© Copyright {{ copyright }}

+

© Copyright {{ copyright }}

diff --git a/doc/_templates/homepage.html b/doc/_templates/homepage.html index b960416ced5..023e92e4dbc 100644 --- a/doc/_templates/homepage.html +++ b/doc/_templates/homepage.html @@ -1,8 +1,8 @@
-
+ diff --git a/doc/connectivity.rst b/doc/api/connectivity.rst similarity index 100% rename from doc/connectivity.rst rename to doc/api/connectivity.rst diff --git a/doc/covariance.rst b/doc/api/covariance.rst similarity index 91% rename from doc/covariance.rst rename to doc/api/covariance.rst index 1de751f21a8..b5449186b27 100644 --- a/doc/covariance.rst +++ b/doc/api/covariance.rst @@ -5,7 +5,7 @@ Covariance computation .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ Covariance compute_covariance diff --git a/doc/creating_from_arrays.rst b/doc/api/creating_from_arrays.rst similarity index 86% rename from doc/creating_from_arrays.rst rename to doc/api/creating_from_arrays.rst index f580cadfc78..f5771f15ef8 100644 --- a/doc/creating_from_arrays.rst +++ b/doc/api/creating_from_arrays.rst @@ -5,7 +5,7 @@ Creating data objects from arrays .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ EvokedArray EpochsArray diff --git a/doc/api/datasets.rst b/doc/api/datasets.rst new file mode 100644 index 00000000000..87730fbd717 --- /dev/null +++ b/doc/api/datasets.rst @@ -0,0 +1,51 @@ + +Datasets +======== + +.. currentmodule:: mne.datasets + +:py:mod:`mne.datasets`: + +.. automodule:: mne.datasets + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: ../generated/ + + fetch_dataset + has_dataset + brainstorm.bst_auditory.data_path + brainstorm.bst_resting.data_path + brainstorm.bst_raw.data_path + default_path + eegbci.load_data + eegbci.standardize + fetch_aparc_sub_parcellation + fetch_fsaverage + fetch_hcp_mmp_parcellation + fetch_infant_template + fetch_phantom + fnirs_motor.data_path + hf_sef.data_path + kiloword.data_path + limo.load_data + misc.data_path + mtrf.data_path + multimodal.data_path + opm.data_path + sleep_physionet.age.fetch_data + sleep_physionet.temazepam.fetch_data + sample.data_path + somato.data_path + spm_face.data_path + ucl_opm_auditory.data_path + visual_92_categories.data_path + phantom_kit.data_path + phantom_4dbti.data_path + phantom_kernel.data_path + refmeg_noise.data_path + ssvep.data_path + erp_core.data_path + epilepsy_ecog.data_path + eyelink.data_path diff --git a/doc/decoding.rst b/doc/api/decoding.rst similarity index 80% rename from doc/decoding.rst rename to doc/api/decoding.rst index e539629c8c0..f8f2257825f 100644 --- a/doc/decoding.rst +++ b/doc/api/decoding.rst @@ -11,7 +11,7 @@ Decoding :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ CSP EMS @@ -29,12 +29,15 @@ Decoding GeneralizingEstimator SPoC SSD + XdawnTransformer + SpatialFilter Functions that assist with decoding and model fitting: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ compute_ems cross_val_multiscore get_coef + get_spatial_filter_from_estimator diff --git a/doc/events.rst b/doc/api/events.rst similarity index 87% rename from doc/events.rst rename to doc/api/events.rst index 7874185922d..3f7159a22d5 100644 --- a/doc/events.rst +++ b/doc/api/events.rst @@ -5,7 +5,7 @@ Events .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ Annotations AcqParserFIF @@ -24,6 +24,7 @@ Events concatenate_epochs events_from_annotations annotations_from_events + count_annotations :py:mod:`mne.event`: @@ -34,7 +35,7 @@ Events .. currentmodule:: mne.event .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ define_target_events match_event_names @@ -49,9 +50,9 @@ Events .. currentmodule:: mne.epochs .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ average_movements combine_event_ids equalize_epoch_counts - make_metadata \ No newline at end of file + make_metadata diff --git a/doc/export.rst b/doc/api/export.rst similarity index 89% rename from doc/export.rst rename to doc/api/export.rst index 32f58f230bf..7c3bfc3f868 100644 --- a/doc/export.rst +++ b/doc/api/export.rst @@ -11,7 +11,7 @@ Exporting .. currentmodule:: mne.export .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ export_epochs export_evokeds diff --git a/doc/file_io.rst b/doc/api/file_io.rst similarity index 91% rename from doc/file_io.rst rename to doc/api/file_io.rst index 4ddcf7d1d01..2da9059deb3 100644 --- a/doc/file_io.rst +++ b/doc/api/file_io.rst @@ -4,7 +4,7 @@ File I/O .. currentmodule:: mne .. autosummary:: - :toctree: generated + :toctree: ../generated/ channel_type channel_indices_by_type @@ -53,12 +53,14 @@ File I/O write_trans what io.read_info + io.write_info io.show_fiff + io.get_channel_type_constants Base class: .. autosummary:: - :toctree: generated + :toctree: ../generated/ :template: autosummary/class_no_members.rst BaseEpochs diff --git a/doc/forward.rst b/doc/api/forward.rst similarity index 93% rename from doc/forward.rst rename to doc/api/forward.rst index 7e554195aa6..5abcd5178fc 100644 --- a/doc/forward.rst +++ b/doc/api/forward.rst @@ -5,14 +5,14 @@ Forward Modeling .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ :template: autosummary/class_no_inherited_members.rst Forward SourceSpaces .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ add_source_space_distances apply_forward @@ -55,7 +55,7 @@ Forward Modeling .. currentmodule:: mne.bem .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ ConductorModel fit_sphere_to_headshape diff --git a/doc/inverse.rst b/doc/api/inverse.rst similarity index 91% rename from doc/inverse.rst rename to doc/api/inverse.rst index 96f1804c69b..754244c17fe 100644 --- a/doc/inverse.rst +++ b/doc/api/inverse.rst @@ -11,7 +11,7 @@ Inverse Solutions .. currentmodule:: mne.minimum_norm .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ InverseOperator apply_inverse @@ -43,7 +43,7 @@ Inverse Solutions .. currentmodule:: mne.inverse_sparse .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ mixed_norm tf_mixed_norm @@ -59,7 +59,7 @@ Inverse Solutions .. currentmodule:: mne.beamformer .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ Beamformer read_beamformer @@ -74,12 +74,13 @@ Inverse Solutions apply_dics_epochs apply_dics_tfr_epochs rap_music + trap_music make_lcmv_resolution_matrix .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ Dipole DipoleFixed @@ -94,6 +95,6 @@ Inverse Solutions .. currentmodule:: mne.dipole .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ get_phantom_dipoles diff --git a/doc/logging.rst b/doc/api/logging.rst similarity index 85% rename from doc/logging.rst rename to doc/api/logging.rst index 1168b3e086f..64cc21759a7 100644 --- a/doc/logging.rst +++ b/doc/api/logging.rst @@ -5,7 +5,7 @@ Logging and Configuration .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ get_config_path get_config @@ -14,6 +14,7 @@ Logging and Configuration set_log_file set_config set_cache_dir + set_memmap_min_size sys_info use_log_level verbose @@ -27,7 +28,7 @@ Logging and Configuration :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ deprecated warn @@ -41,7 +42,7 @@ Logging and Configuration :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ get_cuda_memory init_cuda diff --git a/doc/most_used_classes.rst b/doc/api/most_used_classes.rst similarity index 81% rename from doc/most_used_classes.rst rename to doc/api/most_used_classes.rst index 2e4bc1d7dd0..705b4e845b3 100644 --- a/doc/most_used_classes.rst +++ b/doc/api/most_used_classes.rst @@ -4,7 +4,7 @@ Most-used classes .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ io.Raw Epochs diff --git a/doc/mri.rst b/doc/api/mri.rst similarity index 90% rename from doc/mri.rst rename to doc/api/mri.rst index 420711eea89..6cd38cbbeb4 100644 --- a/doc/mri.rst +++ b/doc/api/mri.rst @@ -11,8 +11,12 @@ Step by step instructions for using :func:`gui.coregistration`: - `Scaling a template MRI for subjects for which no MRI is available `_ +See also: + +- :func:`mne-gui-addons:mne_gui_addons.locate_ieeg`. + .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ coreg.get_mni_fiducials coreg.estimate_head_mri_t @@ -20,7 +24,6 @@ Step by step instructions for using :func:`gui.coregistration`: io.write_fiducials get_montage_volume_labels gui.coregistration - gui.locate_ieeg create_default_subject head_to_mni head_to_mri diff --git a/doc/preprocessing.rst b/doc/api/preprocessing.rst similarity index 80% rename from doc/preprocessing.rst rename to doc/api/preprocessing.rst index 98403661a6d..07443e518aa 100644 --- a/doc/preprocessing.rst +++ b/doc/api/preprocessing.rst @@ -7,9 +7,14 @@ Projections: .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ + :template: autosummary/class_no_inherited_members.rst Projection + +.. autosummary:: + :toctree: ../generated/ + compute_proj_epochs compute_proj_evoked compute_proj_raw @@ -25,7 +30,7 @@ Projections: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ Layout DigMontage @@ -37,12 +42,14 @@ Projections: read_dig_polhemus_isotrak read_dig_captrak read_dig_dat + read_dig_curry read_dig_egi read_dig_fif read_dig_hpts read_dig_localite make_standard_montage read_custom_montage + transform_to_head compute_dev_head_t read_layout find_layout @@ -52,6 +59,7 @@ Projections: get_builtin_ch_adjacencies read_ch_adjacency equalize_channels + unify_bad_channels rename_channels generate_2d_layout make_1020_channel_selections @@ -66,7 +74,7 @@ Projections: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ ICA Xdawn @@ -83,9 +91,11 @@ Projections: compute_maxwell_basis compute_proj_ecg compute_proj_eog + compute_proj_hfc cortical_signal_suppression create_ecg_epochs create_eog_epochs + find_bad_channels_lof find_bad_channels_maxwell find_ecg_events find_eog_events @@ -107,6 +117,7 @@ Projections: read_ica_eeglab read_fine_calibration write_fine_calibration + apply_pca_obs :py:mod:`mne.preprocessing.nirs`: @@ -117,7 +128,7 @@ Projections: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ optical_density beer_lambert_law @@ -135,18 +146,36 @@ Projections: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ project_sensors_onto_brain make_montage_volume warp_montage +:py:mod:`mne.preprocessing.eyetracking`: + +.. currentmodule:: mne.preprocessing.eyetracking + +.. automodule:: mne.preprocessing.eyetracking + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: ../generated/ + + Calibration + read_eyelink_calibration + set_channel_types_eyetrack + convert_units + get_screen_visual_angle + interpolate_blinks + EEG referencing: .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ add_reference_channels set_bipolar_reference @@ -161,7 +190,7 @@ EEG referencing: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ construct_iir_filter create_filter @@ -179,7 +208,7 @@ EEG referencing: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ compute_chpi_amplitudes compute_chpi_snr @@ -203,7 +232,7 @@ EEG referencing: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ Transform quat_to_rot diff --git a/doc/python_reference.rst b/doc/api/python_reference.rst similarity index 100% rename from doc/python_reference.rst rename to doc/api/python_reference.rst diff --git a/doc/reading_raw_data.rst b/doc/api/reading_raw_data.rst similarity index 84% rename from doc/reading_raw_data.rst rename to doc/api/reading_raw_data.rst index ad04c0ca91a..50318d3ae91 100644 --- a/doc/reading_raw_data.rst +++ b/doc/api/reading_raw_data.rst @@ -10,39 +10,43 @@ Reading raw data :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ anonymize_info read_raw + read_raw_ant read_raw_artemis123 + read_raw_bdf + read_raw_boxy + read_raw_brainvision read_raw_bti read_raw_cnt read_raw_ctf read_raw_curry read_raw_edf - read_raw_bdf + read_raw_eeglab + read_raw_egi + read_raw_eximia + read_raw_eyelink + read_raw_fieldtrip + read_raw_fif + read_raw_fil read_raw_gdf + read_raw_hitachi read_raw_kit read_raw_nedf read_raw_nicolet - read_raw_hitachi + read_raw_nihon read_raw_nirx - read_raw_snirf - read_raw_eeglab - read_raw_brainvision - read_raw_egi - read_raw_fif - read_raw_eximia - read_raw_fieldtrip - read_raw_boxy + read_raw_nsx + read_raw_neuralynx read_raw_persyst - read_raw_nihon - read_raw_fil + read_raw_snirf Base class: .. autosummary:: - :toctree: generated + :toctree: ../generated/ :template: autosummary/class_no_members.rst BaseRaw @@ -56,6 +60,6 @@ Base class: :no-inherited-members: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ read_mrk diff --git a/doc/api/realtime.rst b/doc/api/realtime.rst new file mode 100644 index 00000000000..0df65ad0d56 --- /dev/null +++ b/doc/api/realtime.rst @@ -0,0 +1,6 @@ +.. include:: ../links.inc + +Realtime +======== + +Realtime functionality has moved to the standalone module `MNE-LSL`_. diff --git a/doc/report.rst b/doc/api/report.rst similarity index 79% rename from doc/report.rst rename to doc/api/report.rst index 5104f68adc6..eab37eae542 100644 --- a/doc/report.rst +++ b/doc/api/report.rst @@ -7,7 +7,7 @@ MNE-Report .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ Report open_report diff --git a/doc/sensor_space.rst b/doc/api/sensor_space.rst similarity index 87% rename from doc/sensor_space.rst rename to doc/api/sensor_space.rst index a1c72b3aa59..8121f63f3d5 100644 --- a/doc/sensor_space.rst +++ b/doc/api/sensor_space.rst @@ -5,12 +5,13 @@ Sensor Space Data .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ combine_evoked concatenate_raws equalize_channels grand_average + match_channel_orders pick_channels pick_channels_cov pick_channels_forward @@ -32,6 +33,6 @@ Sensor Space Data .. currentmodule:: mne.baseline .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ rescale diff --git a/doc/simulation.rst b/doc/api/simulation.rst similarity index 92% rename from doc/simulation.rst rename to doc/api/simulation.rst index e055729ae48..3416e05ed72 100644 --- a/doc/simulation.rst +++ b/doc/api/simulation.rst @@ -11,7 +11,7 @@ Simulation .. currentmodule:: mne.simulation .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ add_chpi add_ecg @@ -33,7 +33,7 @@ Simulation .. currentmodule:: mne.simulation.metrics .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ cosine_score region_localization_error diff --git a/doc/source_space.rst b/doc/api/source_space.rst similarity index 96% rename from doc/source_space.rst rename to doc/api/source_space.rst index ef6c8861bc2..cb4aee084f2 100644 --- a/doc/source_space.rst +++ b/doc/api/source_space.rst @@ -5,7 +5,7 @@ Source Space Data .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ BiHemiLabel Label diff --git a/doc/statistics.rst b/doc/api/statistics.rst similarity index 85% rename from doc/statistics.rst rename to doc/api/statistics.rst index 2b8313b1f11..f098b7206db 100644 --- a/doc/statistics.rst +++ b/doc/api/statistics.rst @@ -16,7 +16,7 @@ Parametric statistics (see :mod:`scipy.stats` and :mod:`statsmodels` for more options): .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ ttest_1samp_no_p ttest_ind_no_p @@ -29,7 +29,7 @@ options): Mass-univariate multiple comparison correction: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ bonferroni_correction fdr_correction @@ -37,7 +37,7 @@ Mass-univariate multiple comparison correction: Non-parametric (clustering) resampling methods: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ combine_adjacency permutation_cluster_test @@ -48,12 +48,19 @@ Non-parametric (clustering) resampling methods: summarize_clusters_stc bootstrap_confidence_interval +ERP-related statistics: + +.. autosummary:: + :toctree: ../generated/ + + erp.compute_sme + Compute ``adjacency`` matrices for cluster-level statistics: .. currentmodule:: mne .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ channels.find_ch_adjacency channels.read_ch_adjacency diff --git a/doc/time_frequency.rst b/doc/api/time_frequency.rst similarity index 80% rename from doc/time_frequency.rst rename to doc/api/time_frequency.rst index a366dbdecb9..a9ab2c34268 100644 --- a/doc/time_frequency.rst +++ b/doc/api/time_frequency.rst @@ -11,19 +11,28 @@ Time-Frequency .. currentmodule:: mne.time_frequency .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ AverageTFR + AverageTFRArray + BaseTFR EpochsTFR + EpochsTFRArray + RawTFR + RawTFRArray CrossSpectralDensity Spectrum + SpectrumArray EpochsSpectrum + EpochsSpectrumArray Functions that operate on mne-python objects: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ + combine_spectrum + combine_tfr csd_tfr csd_fourier csd_multitaper @@ -41,7 +50,7 @@ Functions that operate on mne-python objects: Functions that operate on ``np.ndarray`` objects: .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ csd_array_fourier csd_array_multitaper @@ -68,7 +77,7 @@ Functions that operate on ``np.ndarray`` objects: .. currentmodule:: mne.time_frequency.tfr .. autosummary:: - :toctree: generated/ + :toctree: ../generated/ cwt morlet diff --git a/doc/api/visualization.rst b/doc/api/visualization.rst new file mode 100644 index 00000000000..280ed51f590 --- /dev/null +++ b/doc/api/visualization.rst @@ -0,0 +1,132 @@ + +Visualization +============= + +.. currentmodule:: mne.viz + +:py:mod:`mne.viz`: + +.. automodule:: mne.viz + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: ../generated/ + + Brain + ClickableImage + EvokedField + Figure3D + add_background_image + centers_to_edges + compare_fiff + circular_layout + iter_topography + mne_analyze_colormap + plot_bem + plot_brain_colorbar + plot_bridged_electrodes + plot_chpi_snr + plot_cov + plot_channel_labels_circle + plot_ch_adjacency + plot_csd + plot_dipole_amplitudes + plot_dipole_locations + plot_drop_log + plot_epochs + plot_epochs_psd_topomap + plot_events + plot_evoked + plot_evoked_image + plot_evoked_topo + plot_evoked_topomap + plot_evoked_joint + plot_evoked_field + plot_evoked_white + plot_filter + plot_head_positions + plot_ideal_filter + plot_compare_evokeds + plot_ica_sources + plot_ica_components + plot_ica_properties + plot_ica_scores + plot_ica_overlay + plot_epochs_image + plot_layout + plot_montage + plot_projs_topomap + plot_projs_joint + plot_raw + plot_raw_psd + plot_regression_weights + plot_sensors + plot_snr_estimate + plot_source_estimates + link_brains + plot_volume_source_estimates + plot_vector_source_estimates + plot_sparse_source_estimates + plot_tfr_topomap + plot_topo_image_epochs + plot_topomap + plot_alignment + snapshot_brain_montage + plot_arrowmap + set_3d_backend + get_3d_backend + use_3d_backend + set_3d_options + set_3d_view + set_3d_title + create_3d_figure + close_3d_figure + close_all_3d_figures + get_brain_class + set_browser_backend + get_browser_backend + use_browser_backend + +Eyetracking +----------- + +.. currentmodule:: mne.viz.eyetracking + +:py:mod:`mne.viz.eyetracking`: + +.. automodule:: mne.viz.eyetracking + :no-members: + :no-inherited-members: +.. autosummary:: + :toctree: ../generated/ + + plot_gaze + +UI Events +--------- + +.. currentmodule:: mne.viz.ui_events + +:py:mod:`mne.viz.ui_events`: + +.. automodule:: mne.viz.ui_events + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: ../generated/ + + subscribe + unsubscribe + publish + link + unlink + disable_ui_events + UIEvent + ColormapRange + Contours + FigureClosing + PlaybackSpeed + TimeChange + VertexSelect diff --git a/doc/changes/0.10.inc b/doc/changes/0.10.inc deleted file mode 100644 index 6a0c3322e88..00000000000 --- a/doc/changes/0.10.inc +++ /dev/null @@ -1,148 +0,0 @@ -.. _changes_0_10: - -Version 0.10 (2015-10-26) -------------------------- - -Changelog -~~~~~~~~~ - -- Add support for generalized M-way repeated measures ANOVA for fully balanced designs with :func:`mne.stats.f_mway_rm` by `Denis Engemann`_ - -- Add epochs browser to interactively view and manipulate epochs with :func:`mne.viz.plot_epochs` by `Jaakko Leppakangas`_ - -- Speed up TF-MxNE inverse solver with block coordinate descent by `Daniel Strohmeier`_ and `Yousra Bekhti`_ - -- Speed up zero-phase overlap-add (default) filtering by a factor of up to 2 using linearity by `Ross Maddox`_ and `Eric Larson`_ - -- Add support for scaling and adjusting the number of channels/time per view by `Jaakko Leppakangas`_ - -- Add support to toggle the show/hide state of all sections with a single keypress ('t') in :class:`mne.Report` by `Mainak Jas`_ - -- Add support for BEM model creation :func:`mne.make_bem_model` by `Eric Larson`_ - -- Add support for BEM solution computation :func:`mne.make_bem_solution` by `Eric Larson`_ - -- Add ICA plotters for raw and epoch components by `Jaakko Leppakangas`_ - -- Add new object ``mne.decoding.TimeDecoding`` for decoding sensors' evoked response across time by `Jean-Remi King`_ - -- Add command ``mne freeview_bem_surfaces`` to quickly check BEM surfaces with Freeview by `Alex Gramfort`_. - -- Add support for splitting epochs into multiple files in :func:`mne.Epochs.save` by `Mainak Jas`_ and `Alex Gramfort`_ - -- Add support for jointly resampling a raw object and event matrix to avoid issues with resampling status channels by `Marijn van Vliet`_ - -- Add new method :class:`mne.preprocessing.Xdawn` for denoising and decoding of ERP/ERF by `Alexandre Barachant`_ - -- Add support for plotting patterns/filters in :class:`mne.decoding.CSP` and :class:`mne.decoding.LinearModel` by `Romain Trachel`_ - -- Add new object :class:`mne.decoding.LinearModel` for decoding M/EEG data and interpreting coefficients of linear models with patterns attribute by `Romain Trachel`_ and `Alex Gramfort`_ - -- Add support to append new channels to an object from a list of other objects by `Chris Holdgraf`_ - -- Add interactive plotting of topomap from time-frequency representation by `Jaakko Leppakangas`_ - -- Add ``plot_topo`` method to ``Evoked`` object by `Jaakko Leppakangas`_ - -- Add fetcher :mod:`mne.datasets.brainstorm ` for datasets used by Brainstorm in their tutorials by `Mainak Jas`_ - -- Add interactive plotting of single trials by right clicking on channel name in epochs browser by `Jaakko Leppakangas`_ - -- New logos and logo generation script by `Daniel McCloy`_ - -- Add ability to plot topomap with a "skirt" (channels outside of the head circle) by `Marijn van Vliet`_ - -- Add multiple options to ICA infomax and extended infomax algorithms (number of subgaussian components, computation of bias, iteration status printing), enabling equivalent computations to those performed by EEGLAB by `Jair Montoya Martinez`_ - -- Add :func:`mne.Epochs.apply_baseline` method to ``Epochs`` objects by `Teon Brooks`_ - -- Add ``preload`` argument to :func:`mne.read_epochs` to enable on-demand reads from disk by `Eric Larson`_ - -- Big rewrite of simulation module by `Yousra Bekhti`_, `Mark Wronkiewicz`_, `Eric Larson`_ and `Alex Gramfort`_. Allows to simulate raw with artifacts (ECG, EOG) and evoked data, exploiting the forward solution. See :func:`mne.simulation.simulate_raw`, :func:`mne.simulation.simulate_evoked` and :func:`mne.simulation.simulate_sparse_stc` - -- Add :func:`mne.Epochs.load_data` method to :class:`mne.Epochs` by `Teon Brooks`_ - -- Add support for drawing topomaps by selecting an area in :func:`mne.Evoked.plot` by `Jaakko Leppakangas`_ - -- Add support for finding peaks in evoked data in :func:`mne.Evoked.plot_topomap` by `Jona Sassenhagen`_ and `Jaakko Leppakangas`_ - -- Add source space morphing in :func:`morph_source_spaces` and :func:`SourceEstimate.to_original_src` by `Eric Larson`_ and `Denis Engemann`_ - -- Adapt ``corrmap`` function (Viola et al. 2009) to semi-automatically detect similar ICs across data sets by `Jona Sassenhagen`_ and `Denis Engemann`_ and `Eric Larson`_ - -- Clarify docstring for :class:`mne.preprocessing.ICA` by `jeythekey`_ - -- New ``mne flash_bem`` command to compute BEM surfaces from Flash MRI images by `Lorenzo Desantis`_, `Alex Gramfort`_ and `Eric Larson`_. See :func:`mne.bem.make_flash_bem`. - -- New gfp parameter in :func:`mne.Evoked.plot` method to display Global Field Power (GFP) by `Eric Larson`_. - -- Add ``mne.Report.add_slider_to_section`` methods to :class:`mne.Report` by `Teon Brooks`_ - -BUG -~~~ - -- Fix ``mne.io.add_reference_channels`` not setting ``info[nchan]`` correctly by `Federico Raimondo`_ - -- Fix ``mne.stats.bonferroni_correction`` reject mask output to use corrected p-values by `Denis Engemann`_ - -- Fix FFT filter artifacts when using short windows in overlap-add by `Eric Larson`_ - -- Fix picking channels from forward operator could return a channel ordering different from ``info['chs']`` by `Chris Bailey`_ - -- Fix dropping of events after downsampling stim channels by `Marijn van Vliet`_ - -- Fix scaling in :func:``mne.viz.utils._setup_vmin_vmax`` by `Jaakko Leppakangas`_ - -- Fix order of component selection in :class:`mne.decoding.CSP` by `Clemens Brunner`_ - -API -~~~ - -- Rename and deprecate ``mne.viz.plot_topo`` for ``mne.viz.plot_evoked_topo`` by `Jaakko Leppakangas`_ - -- Deprecated ``mne.decoding.transformer.ConcatenateChannels`` and replaced by ``mne.decoding.EpochsVectorizer`` by `Romain Trachel`_ - -- Deprecated ``lws`` and renamed ``ledoit_wolf`` for the ``reg`` argument in :class:`mne.decoding.CSP` by `Romain Trachel`_ - -- Redesigned and rewrote :meth:`mne.Epochs.plot` (no backwards compatibility) during the GSOC 2015 by `Jaakko Leppakangas`_, `Mainak Jas`_, `Federico Raimondo`_ and `Denis Engemann`_ - -- Deprecated and renamed ``mne.viz.plot_image_epochs`` for ``mne.plot.plot_epochs_image`` by `Teon Brooks`_ - -- ``picks`` argument has been added to :func:`mne.time_frequency.tfr_morlet`, :func:`mne.time_frequency.tfr_multitaper` by `Teon Brooks`_ - -- ``mne.io.Raw.preload_data`` has been deprecated for :func:`mne.io.Raw.load_data` by `Teon Brooks`_ - -- ``RawBrainVision`` objects now always have event channel ``'STI 014'``, and recordings with no events will have this channel set to zero by `Eric Larson`_ - -Authors -~~~~~~~ - -The committer list for this release is the following (preceded by number of commits): - -* 273 Eric Larson -* 270 Jaakko Leppakangas -* 194 Alexandre Gramfort -* 128 Denis A. Engemann -* 114 Jona Sassenhagen -* 107 Mark Wronkiewicz -* 97 Teon Brooks -* 81 Lorenzo De Santis -* 55 Yousra Bekhti -* 54 Jean-Remi King -* 48 Romain Trachel -* 45 Mainak Jas -* 40 Alexandre Barachant -* 32 Marijn van Vliet -* 26 Jair Montoya -* 22 Chris Holdgraf -* 16 Christopher J. Bailey -* 7 Christian Brodbeck -* 5 Natalie Klein -* 5 Fede Raimondo -* 5 Alan Leggitt -* 5 Roan LaPlante -* 5 Ross Maddox -* 4 Dan G. Wakeman -* 3 Daniel McCloy -* 3 Daniel Strohmeier -* 1 Jussi Nurminen diff --git a/doc/changes/0.11.inc b/doc/changes/0.11.inc deleted file mode 100644 index e3f9a67e3c4..00000000000 --- a/doc/changes/0.11.inc +++ /dev/null @@ -1,92 +0,0 @@ -.. _changes_0_11: - -Version 0.11 (2015-12-24) -------------------------- - -Changelog -~~~~~~~~~ - -- Maxwell filtering (SSS) implemented in :func:`mne.preprocessing.maxwell_filter` by `Mark Wronkiewicz`_ as part of Google Summer of Code, with help from `Samu Taulu`_, `Jukka Nenonen`_, and `Jussi Nurminen`_. Our implementation includes support for: - - - Fine calibration - - - Cross-talk correction - - - Temporal SSS (tSSS) - - - Head position translation - - - Internal component regularization - -- Compensation for movements using Maxwell filtering on epoched data in :func:`mne.epochs.average_movements` by `Eric Larson`_ and `Samu Taulu`_ - -- Add reader for Nicolet files in :func:`mne.io.read_raw_nicolet` by `Jaakko Leppakangas`_ - -- Add FIFF persistence for ICA labels by `Denis Engemann`_ - -- Display ICA labels in :func:`mne.viz.plot_ica_scores` and :func:`mne.viz.plot_ica_sources` (for evoked objects) by `Denis Engemann`_ - -- Plot spatially color coded lines in :func:`mne.Evoked.plot` by `Jona Sassenhagen`_ and `Jaakko Leppakangas`_ - -- Add reader for CTF data in :func:`mne.io.read_raw_ctf` by `Eric Larson`_ - -- Add support for Brainvision v2 in :func:`mne.io.read_raw_brainvision` by `Teon Brooks`_ - -- Improve speed of generalization across time ``mne.decoding.GeneralizationAcrossTime`` decoding up to a factor of seven by `Jean-Remi King`_ and `Federico Raimondo`_ and `Denis Engemann`_. - -- Add the explained variance for each principal component, ``explained_var``, key to the :class:`mne.Projection` by `Teon Brooks`_ - -- Added methods ``mne.Epochs.add_eeg_average_proj``, ``mne.io.Raw.add_eeg_average_proj``, and ``mne.Evoked.add_eeg_average_proj`` to add an average EEG reference. - -- Add reader for EEGLAB data in :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` by `Mainak Jas`_ - -BUG -~~~ - -- Fix bug that prevented homogeneous bem surfaces to be displayed in HTML reports by `Denis Engemann`_ - -- Added safeguards against ``None`` and negative values in reject and flat parameters in :class:`mne.Epochs` by `Eric Larson`_ - -- Fix train and test time window-length in ``mne.decoding.GeneralizationAcrossTime`` by `Jean-Remi King`_ - -- Added lower bound in :func:`mne.stats.linear_regression` on p-values ``p_val`` (and resulting ``mlog10_p_val``) using double floating point arithmetic limits by `Eric Larson`_ - -- Fix channel name pick in :func:`mne.Evoked.get_peak` method by `Alex Gramfort`_ - -- Fix drop percentages to take into account ``ignore`` option in :func:`mne.viz.plot_drop_log` and :func:`mne.Epochs.plot_drop_log` by `Eric Larson`_. - -- :class:`mne.EpochsArray` no longer has an average EEG reference silently added (but not applied to the data) by default. Use ``mne.EpochsArray.add_eeg_ref`` to properly add one. - -- Fix :func:`mne.io.read_raw_ctf` to read ``n_samp_tot`` instead of ``n_samp`` by `Jaakko Leppakangas`_ - -API -~~~ - -- :func:`mne.io.read_raw_brainvision` now has ``event_id`` argument to assign non-standard trigger events to a trigger value by `Teon Brooks`_ - -- :func:`mne.read_epochs` now has ``add_eeg_ref=False`` by default, since average EEG reference can be added before writing or after reading using the method ``mne.Epochs.add_eeg_ref``. - -- :class:`mne.EpochsArray` no longer has an average EEG reference silently added (but not applied to the data) by default. Use ``mne.EpochsArray.add_eeg_average_proj`` to properly add one. - -Authors -~~~~~~~ - -The committer list for this release is the following (preceded by number of commits): - -* 171 Eric Larson -* 117 Jaakko Leppakangas -* 58 Jona Sassenhagen -* 52 Mainak Jas -* 46 Alexandre Gramfort -* 33 Denis A. Engemann -* 28 Teon Brooks -* 24 Clemens Brunner -* 23 Christian Brodbeck -* 15 Mark Wronkiewicz -* 10 Jean-Remi King -* 5 Marijn van Vliet -* 3 Fede Raimondo -* 2 Alexander Rudiuk -* 2 emilyps14 -* 2 lennyvarghese -* 1 Marian Dovgialo diff --git a/doc/changes/0.12.inc b/doc/changes/0.12.inc deleted file mode 100644 index cf01f8ff62c..00000000000 --- a/doc/changes/0.12.inc +++ /dev/null @@ -1,211 +0,0 @@ -.. _changes_0_12: - -Version 0.12 (2016-05-10) -------------------------- - -Changelog -~~~~~~~~~ - -- Add ``overlay_times`` parameter to :func:`mne.viz.plot_epochs_image` to be able to display for example reaction times on top of the images, by `Alex Gramfort`_ - -- Animation for evoked topomap in :func:`mne.Evoked.animate_topomap` by `Jaakko Leppakangas`_ - -- Make :func:`mne.channels.find_layout` more robust for KIT systems in the presence of bad or missing channels by `Jaakko Leppakangas`_ - -- Add raw movement compensation to :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ - -- Add :class:`mne.Annotations` for for annotating segments of raw data by `Jaakko Leppakangas`_ - -- Add reading of .fif file montages by `Eric Larson`_ - -- Add system config utility :func:`mne.sys_info` by `Eric Larson`_ - -- Automatic cross-validation and scoring metrics in ``mne.decoding.GeneralizationAcrossTime``, by `Jean-Remi King`_ - -- ``mne.decoding.GeneralizationAcrossTime`` accepts non-deterministic cross-validations, by `Jean-Remi King`_ - -- Add plotting RMS of gradiometer pairs in :func:`mne.viz.plot_evoked_topo` by `Jaakko Leppakangas`_ - -- Add regularization methods to :func:`mne.compute_raw_covariance` by `Eric Larson`_. - -- Add command ``mne show_info`` to quickly show the measurement info from a .fif file from the terminal by `Alex Gramfort`_. - -- Add creating forward operator for dipole object :func:`mne.make_forward_dipole` by `Chris Bailey`_ - -- Add reading and estimation of fixed-position dipole time courses (similar to Elekta ``xfit``) using :func:`mne.read_dipole` and :func:`mne.fit_dipole` by `Eric Larson`_. - -- Accept ``mne.decoding.GeneralizationAcrossTime``'s ``scorer`` parameter to be a string that refers to a scikit-learn_ metric scorer by `Asish Panda`_. - -- Add method :func:`mne.Epochs.plot_image` calling :func:`mne.viz.plot_epochs_image` for better usability by `Asish Panda`_. - -- Add :func:`mne.io.read_raw_cnt` for reading Neuroscan CNT files by `Jaakko Leppakangas`_ - -- Add ``decim`` parameter to ``mne.time_frequency.cwt_morlet``, by `Jean-Remi King`_ - -- Add method :func:`mne.Epochs.plot_topo_image` by `Jaakko Leppakangas`_ - -- Add the ability to read events when importing raw EEGLAB files, by `Jona Sassenhagen`_. - -- Add function :func:`mne.viz.plot_sensors` and methods :func:`mne.Epochs.plot_sensors`, :func:`mne.io.Raw.plot_sensors` and :func:`mne.Evoked.plot_sensors` for plotting sensor positions and :func:`mne.viz.plot_layout` and :func:`mne.channels.Layout.plot` for plotting layouts by `Jaakko Leppakangas`_ - -- Add epoch rejection based on annotated segments by `Jaakko Leppakangas`_ - -- Add option to use new-style MEG channel names in ``mne.read_selection`` by `Eric Larson`_ - -- Add option for ``proj`` in :class:`mne.EpochsArray` by `Eric Larson`_ - -- Enable the usage of :func:`mne.viz.plot_topomap` with an :class:`mne.Info` instance for location information, by `Jona Sassenhagen`_. - -- Add support for electrocorticography (ECoG) channel type by `Eric Larson`_ - -- Add option for ``first_samp`` in :func:`mne.make_fixed_length_events` by `Jon Houck`_ - -- Add ability to auto-scale channel types for :func:`mne.viz.plot_raw` and :func:`mne.viz.plot_epochs` and corresponding object plotting methods by `Chris Holdgraf`_ - -BUG -~~~ - -- ``mne.time_frequency.compute_raw_psd``, ``mne.time_frequency.compute_epochs_psd``, ``mne.time_frequency.psd_multitaper``, and ``mne.time_frequency.psd_welch`` no longer remove rows/columns of the SSP matrix before applying SSP projectors when picks are provided by `Chris Holdgraf`_. - -- :func:`mne.Epochs.plot_psd` no longer calls a Welch PSD, and instead uses a Multitaper method which is more appropriate for epochs. Flags for this function are passed to ``mne.time_frequency.psd_multitaper`` by `Chris Holdgraf`_ - -- Time-cropping functions (e.g., :func:`mne.Epochs.crop`, :func:`mne.Evoked.crop`, :func:`mne.io.Raw.crop`, :func:`mne.SourceEstimate.crop`) made consistent with behavior of ``tmin`` and ``tmax`` of :class:`mne.Epochs`, where nearest sample is kept. For example, for MGH data acquired with ``sfreq=600.614990234``, constructing ``Epochs(..., tmin=-1, tmax=1)`` has bounds ``+/-1.00064103``, and now ``epochs.crop(-1, 1)`` will also have these bounds (previously they would have been ``+/-0.99897607``). Time cropping functions also no longer use relative tolerances when determining the boundaries. These changes have minor effects on functions that use cropping under the hood, such as :func:`mne.compute_covariance` and ``mne.connectivity.spectral_connectivity``. Changes by `Jaakko Leppakangas`_ and `Eric Larson`_ - -- Fix EEG spherical spline interpolation code to account for average reference by `Mainak Jas`_ - -- MEG projectors are removed after Maxwell filtering by `Eric Larson`_ - -- Fix ``mne.decoding.TimeDecoding`` to allow specifying ``clf`` by `Jean-Remi King`_ - -- Fix bug with units (uV) in 'Brain Vision Data Exchange Header File Version 1.0' by `Federico Raimondo`_ - -- Fix bug where :func:`mne.preprocessing.maxwell_filter` ``destination`` parameter did not properly set device-to-head transform by `Eric Larson`_ - -- Fix bug in rank calculation of ``mne.utils.estimate_rank``, ``mne.io.Raw.estimate_rank``, and covariance functions where the tolerance was set to slightly too small a value, new 'auto' mode uses values from ``scipy.linalg.orth`` by `Eric Larson`_. - -- Fix bug when specifying irregular ``train_times['slices']`` in ``mne.decoding.GeneralizationAcrossTime``, by `Jean-Remi King`_ - -- Fix colorbar range on norm data by `Jaakko Leppakangas`_ - -- Fix bug in ``mne.preprocessing.run_ica``, which used the ``ecg_criterion`` parameter for the EOG criterion instead of ``eog_criterion`` by `Christian Brodbeck`_ - -- Fix normals in CTF data reader by `Eric Larson`_ - -- Fix bug in :func:`mne.io.read_raw_ctf`, when omitting samples at the end by `Jaakko Leppakangas`_ - -- Fix ``info['lowpass']`` value for downsampled raw data by `Eric Larson`_ - -- Remove measurement date from :class:`mne.Info` in :func:`mne.io.Raw.anonymize` by `Eric Larson`_ - -- Fix bug that caused synthetic ecg channel creation even if channel was specified for ECG peak detection in :func:`mne.preprocessing.create_ecg_epochs` by `Jaakko Leppakangas`_ - -- Fix bug with vmin and vmax when None is passed in :func:`mne.viz.plot_topo_image_epochs` by `Jaakko Leppakangas`_ - -- Fix bug with :func:`mne.label_sign_flip` (and :func:`mne.extract_label_time_course`) by `Natalie Klein`_ and `Eric Larson`_ - -- Add copy parameter in :func:`mne.Epochs.apply_baseline` and :func:`mne.io.Raw.filter` methods by `Jona Sassenhagen`_ and `Alex Gramfort`_ - -- Fix bug in :func:`mne.merge_events` when using ``replace_events=False`` by `Alex Gramfort`_ - -- Fix bug in :class:`mne.Evoked` type setting in :func:`mne.stats.linear_regression_raw` by `Eric Larson`_ - -- Fix bug in ``mne.io.edf.RawEDF`` highpass filter setting to take max highpass to match warning message by `Teon Brooks`_ - -- Fix bugs with coordinane frame adjustments in ``mne.viz.plot_trans`` by `Eric Larson`_ - -- Fix bug in colormap selection in :func:`mne.Evoked.plot_projs_topomap` by `Jaakko Leppakangas`_ - -- Fix bug in source normal adjustment that occurred when 1) patch information is available (e.g., when distances have been calculated) and 2) points are excluded from the source space (by inner skull distance) by `Eric Larson`_ - -- Fix bug when merging info that has a field with list of dicts by `Jaakko Leppakangas`_ - -- The BTi/4D reader now considers user defined channel labels instead of the hard-ware names, however only for channels other than MEG. By `Denis Engemann`_ and `Alex Gramfort`_. - -- The BTi reader :func:`mne.io.read_raw_bti` can now read 2500 system data, by `Eric Larson`_ - -- Fix bug in :func:`mne.compute_raw_covariance` where rejection by non-data channels (e.g. EOG) was not done properly by `Eric Larson`_. - -- Change default scoring method of ``mne.decoding.GeneralizationAcrossTime`` and ``mne.decoding.TimeDecoding`` to estimate the scores within the cross-validation as in scikit-learn_ as opposed to across all cross-validated ``y_pred``. The method can be changed with the ``score_mode`` parameter by `Jean-Remi King`_ - -- Fix bug in :func:`mne.io.Raw.save` where, in rare cases, automatically split files could end up writing an extra empty file that wouldn't be read properly by `Eric Larson`_ - -- Fix :class:``mne.realtime.StimServer`` by removing superfluous argument ``ip`` used while initializing the object by `Mainak Jas`_. - -- Fix removal of projectors in :func:`mne.preprocessing.maxwell_filter` in ``st_only=True`` mode by `Eric Larson`_ - -API -~~~ - -- The default ``picks=None`` in :func:`mne.viz.plot_epochs_image` now only plots the first 5 channels, not all channels, by `Jona Sassenhagen`_ - -- The ``mesh_color`` parameter in :func:`mne.viz.plot_dipole_locations` has been removed (use ``brain_color`` instead), by `Marijn van Vliet`_ - -- Deprecated functions ``mne.time_frequency.compute_raw_psd`` and ``mne.time_frequency.compute_epochs_psd``, replaced by ``mne.time_frequency.psd_welch`` by `Chris Holdgraf`_ - -- Deprecated function ``mne.time_frequency.multitaper_psd`` and replaced by ``mne.time_frequency.psd_multitaper`` by `Chris Holdgraf`_ - -- The ``y_pred`` attribute in ``mne.decoding.GeneralizationAcrossTime`` and ``mne.decoding.TimeDecoding`` is now a numpy array, by `Jean-Remi King`_ - -- The :func:`mne.bem.fit_sphere_to_headshape` function now default to ``dig_kinds='auto'`` which will use extra digitization points, falling back to extra plus eeg digitization points if there not enough extra points are available. - -- The :func:`mne.bem.fit_sphere_to_headshape` now has a ``units`` argument that should be set explicitly. This will default to ``units='mm'`` in 0.12 for backward compatibility but change to ``units='m'`` in 0.13. - -- Added default parameters in Epochs class namely ``event_id=None``, ``tmin=-0.2`` and ``tmax=0.5``. - -- To unify and extend the behavior of :func:`mne.compute_raw_covariance` relative to :func:`mne.compute_covariance`, the default parameter ``tstep=0.2`` now discards any epochs at the end of the :class:`mne.io.Raw` instance that are not the full ``tstep`` duration. This will slightly change the computation of :func:`mne.compute_raw_covariance`, but should only potentially have a big impact if the :class:`mne.io.Raw` instance is short relative to ``tstep`` and the last, too short (now discarded) epoch contained data inconsistent with the epochs that preceded it. - -- The default ``picks=None`` in :func:`mne.io.Raw.filter` now picks eeg, meg, seeg, and ecog channels, by `Jean-Remi King`_ and `Eric Larson`_ - -- EOG, ECG and EMG channels are now plotted by default (if present in data) when using :func:`mne.viz.plot_evoked` by `Marijn van Vliet`_ - -- Replace pseudoinverse-based solver with much faster Cholesky solver in :func:`mne.stats.linear_regression_raw`, by `Jona Sassenhagen`_. - -- CTF data reader now reads EEG locations from .pos file as HPI points by `Jaakko Leppakangas`_ - -- Subselecting channels can now emit a warning if many channels have been subselected from projection vectors. We recommend only computing projection vertors for and applying projectors to channels that will be used in the final analysis. However, after picking a subset of channels, projection vectors can be renormalized with :func:`mne.Info.normalize_proj` if necessary to avoid warnings about subselection. Changes by `Eric Larson`_ and `Alex Gramfort`_. - -- Rename and deprecate ``mne.Epochs.drop_bad_epochs`` to :func:`mne.Epochs.drop_bad`, and ``mne.Epochs.drop_epochs`` to :func:`mne.Epochs.drop` by `Alex Gramfort`_. - -- The C wrapper ``mne.do_forward_solution`` has been deprecated in favor of the native Python version :func:`mne.make_forward_solution` by `Eric Larson`_ - -- The ``events`` parameter of :func:`mne.EpochsArray` is set by default to chronological time-samples and event values to 1, by `Jean-Remi King`_ - -Authors -~~~~~~~ - -The committer list for this release is the following (preceded by number of commits): - -* 348 Eric Larson -* 347 Jaakko Leppakangas -* 157 Alexandre Gramfort -* 139 Jona Sassenhagen -* 67 Jean-Remi King -* 32 Chris Holdgraf -* 31 Denis A. Engemann -* 30 Mainak Jas -* 16 Christopher J. Bailey -* 13 Marijn van Vliet -* 10 Mark Wronkiewicz -* 9 Teon Brooks -* 9 kaichogami -* 8 Clément Moutard -* 5 Camilo Lamus -* 5 mmagnuski -* 4 Christian Brodbeck -* 4 Daniel McCloy -* 4 Yousra Bekhti -* 3 Fede Raimondo -* 1 Jussi Nurminen -* 1 MartinBaBer -* 1 Mikolaj Magnuski -* 1 Natalie Klein -* 1 Niklas Wilming -* 1 Richard Höchenberger -* 1 Sagun Pai -* 1 Sourav Singh -* 1 Tom Dupré la Tour -* 1 jona-sassenhagen@ -* 1 kambysese -* 1 pbnsilva -* 1 sviter -* 1 zuxfoucault diff --git a/doc/changes/0.13.inc b/doc/changes/0.13.inc deleted file mode 100644 index 425ba4c76a1..00000000000 --- a/doc/changes/0.13.inc +++ /dev/null @@ -1,248 +0,0 @@ -.. _changes_0_13: - -Version 0.13 (2016-09-27) -------------------------- - -Changelog -~~~~~~~~~ - -- Add new class :class:`AcqParserFIF` to parse Elekta/Neuromag MEG acquisition info, allowing e.g. collecting epochs according to acquisition-defined averaging categories by `Jussi Nurminen`_ - -- Adds automatic determination of FIR filter parameters ``filter_length``, ``l_trans_bandwidth``, and ``h_trans_bandwidth`` and adds ``phase`` argument in e.g. in :meth:`mne.io.Raw.filter` by `Eric Larson`_ - -- Adds faster ``n_fft='auto'`` option to :meth:`mne.io.Raw.apply_hilbert` by `Eric Larson`_ - -- Adds new function ``mne.time_frequency.csd_array`` to compute the cross-spectral density of multivariate signals stored in an array, by `Nick Foti`_ - -- Add order params 'selection' and 'position' for :func:`mne.viz.plot_raw` to allow plotting of specific brain regions by `Jaakko Leppakangas`_ - -- Added the ability to decimate :class:`mne.Evoked` objects with :func:`mne.Evoked.decimate` by `Eric Larson`_ - -- Add generic array-filtering function :func:`mne.filter.filter_data` by `Eric Larson`_ - -- ``mne.viz.plot_trans`` now also shows head position indicators by `Christian Brodbeck`_ - -- Add label center of mass function :func:`mne.Label.center_of_mass` by `Eric Larson`_ - -- Added :func:`mne.viz.plot_ica_properties` that allows plotting of independent component properties similar to ``pop_prop`` in EEGLAB. Also :class:`mne.preprocessing.ICA` has :func:`mne.preprocessing.ICA.plot_properties` method now. Added by `Mikołaj Magnuski`_ - -- Add second-order sections (instead of ``(b, a)`` form) IIR filtering for reduced numerical error by `Eric Larson`_ - -- Add interactive colormap option to image plotting functions by `Jaakko Leppakangas`_ - -- Add support for the University of Maryland KIT system by `Christian Brodbeck`_ - -- Add support for \*.elp and \*.hsp files to the KIT2FIFF converter and ``mne.channels.read_dig_montage`` by `Teon Brooks`_ and `Christian Brodbeck`_ - -- Add option to preview events in the KIT2FIFF GUI by `Christian Brodbeck`_ - -- Add approximation of size of :class:`io.Raw`, :class:`Epochs`, and :class:`Evoked` in :func:`repr` by `Eric Larson`_ - -- Add possibility to select a subset of sensors by lasso selector to :func:`mne.viz.plot_sensors` and :func:`mne.viz.plot_raw` when using order='selection' or order='position' by `Jaakko Leppakangas`_ - -- Add the option to plot brain surfaces and source spaces to :func:`viz.plot_bem` by `Christian Brodbeck`_ - -- Add the ``--filterchpi`` option to :ref:`mne browse_raw`, by `Felix Raimundo`_ - -- Add the ``--no-decimate`` option to :ref:`mne make_scalp_surfaces` to skip the high-resolution surface decimation step, by `Eric Larson`_ - -- Add new class :class:`mne.decoding.EMS` to transform epochs with the event-matched spatial filters and add 'cv' parameter to :func:`mne.decoding.compute_ems`, by `Jean-Remi King`_ - -- Added :class:`mne.time_frequency.EpochsTFR` and average parameter in :func:`mne.time_frequency.tfr_morlet` and :func:`mne.time_frequency.tfr_multitaper` to compute time-frequency transforms on single trial epochs without averaging, by `Jean-Remi King`_ and `Alex Gramfort`_ - -- Added :class:`mne.decoding.TimeFrequency` to transform signals in scikit-learn pipelines, by `Jean-Remi King`_ - -- Added :class:`mne.decoding.UnsupervisedSpatialFilter` providing interface for scikit-learn decomposition algorithms to be used with MNE data, by `Jean-Remi King`_ and `Asish Panda`_ - -- Added support for multiclass decoding in :class:`mne.decoding.CSP`, by `Jean-Remi King`_ and `Alexandre Barachant`_ - -- Components obtained from :class:`mne.preprocessing.ICA` are now sorted by explained variance, by `Mikołaj Magnuski`_ - -- Adding an EEG reference channel using :func:`mne.add_reference_channels` will now use its digitized location from the FIFF file, if present, by `Chris Bailey`_ - -- Added interactivity to :func:`mne.preprocessing.ICA.plot_components` - passing an instance of :class:`io.Raw` or :class:`Epochs` in ``inst`` argument allows to open component properties by clicking on component topomaps, by `Mikołaj Magnuski`_ - -- Adds new function :func:`mne.viz.plot_compare_evokeds` to show multiple evoked time courses at a single location, or the mean over a ROI, or the GFP, automatically averaging and calculating a CI if multiple subjects are given, by `Jona Sassenhagen`_ - -- Added ``transform_into`` parameter into :class:`mne.decoding.CSP` to retrieve the average power of each source or the time course of each source, by `Jean-Remi King`_ - -- Added support for reading MaxShield (IAS) evoked data (e.g., from the acquisition machine) in :func:`mne.read_evokeds` by `Eric Larson`_ - -- Added support for functional near-infrared spectroscopy (fNIRS) channels by `Jaakko Leppakangas`_ - -- Added :attr:`mne.io.Raw.acqparser` convenience attribute for :class:`mne.AcqParserFIF` by `Eric Larson`_ - -- Added example of Representational Similarity Analysis, by `Jean-Remi King`_ - -BUG -~~~ - -- Fixed a bug where selecting epochs using hierarchical event IDs (HIDs) was *and*-like instead of *or*-like. When doing e.g. ``epochs[('Auditory', 'Left')]``, previously all trials that contain ``'Auditory'`` *and* ``'Left'`` (like ``'Auditory/Left'``) would be selected, but now any conditions matching ``'Auditory'`` *or* ``'Left'`` will be selected (like ``'Auditory/Left'``, ``'Auditory/Right'``, and ``'Visual/Left'``). This is now consistent with how epoch selection was done without HID tags, e.g. ``epochs[['a', 'b']]`` would select all epochs of type ``'a'`` and type ``'b'``. By `Eric Larson`_ - -- Fixed Infomax/Extended Infomax when the user provides an initial weights matrix by `Jair Montoya Martinez`_ - -- Fixed the default raw FIF writing buffer size to be 1 second instead of 10 seconds by `Eric Larson`_ - -- Fixed channel selection order when MEG channels do not come first in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ - -- Fixed color ranges to correspond to the colorbar when plotting several time instances with :func:`mne.viz.plot_evoked_topomap` by `Jaakko Leppakangas`_ - -- Added units to :func:`mne.io.read_raw_brainvision` for reading non-data channels and enable default behavior of inferring channel type by unit by `Jaakko Leppakangas`_ and `Pablo-Arias`_ - -- Fixed minor bugs with :func:`mne.Epochs.resample` and :func:`mne.Epochs.decimate` by `Eric Larson`_ - -- Fixed a bug where duplicate vertices were not strictly checked by :func:`mne.simulation.simulate_stc` by `Eric Larson`_ - -- Fixed a bug where some FIF files could not be read with :func:`mne.io.show_fiff` by `Christian Brodbeck`_ and `Eric Larson`_ - -- Fixed a bug where ``merge_grads=True`` causes :func:`mne.viz.plot_evoked_topo` to fail when plotting a list of evokeds by `Jaakko Leppakangas`_ - -- Fixed a bug when setting multiple bipolar references with :func:`set_bipolar_reference` by `Marijn van Vliet`_. - -- Fixed image scaling in :func:`mne.viz.plot_epochs_image` when plotting more than one channel by `Jaakko Leppakangas`_ - -- Fixed :class:`mne.preprocessing.Xdawn` to fit shuffled epochs by `Jean-Remi King`_ - -- Fixed a bug with channel order determination that could lead to an ``AssertionError`` when using :class:`mne.Covariance` matrices by `Eric Larson`_ - -- Fixed the check for CTF gradient compensation in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ - -- Fixed the import of EDF files with encoding characters in :func:`mne.io.read_raw_edf` by `Guillaume Dumas`_ - -- Fixed :class:`mne.Epochs` to ensure that detrend parameter is not a boolean by `Jean-Remi King`_ - -- Fixed bug with ``mne.realtime.FieldTripClient.get_data_as_epoch`` when ``picks=None`` which crashed the function by `Mainak Jas`_ - -- Fixed reading of units in ``.elc`` montage files (from ``UnitsPosition`` field) so that ``mne.channels.Montage`` objects are now returned with the ``pos`` attribute correctly in meters, by `Chris Mullins`_ - -- Fixed reading of BrainVision files by `Phillip Alday`_: - -- Greater support for BVA files, especially older ones: alternate text coding schemes with fallback to Latin-1 as well as units in column headers - -- Use online software filter information when present - -- Fix comparisons of filter settings for determining "strictest"/"weakest" filter - -- Weakest filter is now used for heterogeneous channel filter settings, leading to more consistent behavior with filtering methods applied to a subset of channels (e.g. ``Raw.filter`` with ``picks != None``). - -- Fixed plotting and timing of :class:`Annotations` and restricted addition of annotations outside data range to prevent problems with cropping and concatenating data by `Jaakko Leppakangas`_ - -- Fixed ICA plotting functions to refer to IC index instead of component number by `Andreas Hojlund`_ and `Jaakko Leppakangas`_ - -- Fixed bug with ``picks`` when interpolating MEG channels by `Mainak Jas`_. - -- Fixed bug in padding of Stockwell transform for signal of length a power of 2 by `Johannes Niediek`_ - -API -~~~ - -- The ``add_eeg_ref`` argument in core functions like :func:`mne.io.read_raw_fif` and :class:`mne.Epochs` has been deprecated in favor of using :func:`mne.set_eeg_reference` and equivalent instance methods like :meth:`raw.set_eeg_reference() `. In functions like :func:`mne.io.read_raw_fif` where the default in 0.13 and older versions is ``add_eeg_ref=True``, the default will change to ``add_eeg_ref=False`` in 0.14, and the argument will be removed in 0.15. - -- Multiple aspects of FIR filtering in MNE-Python has been refactored: - - 1. New recommended defaults for ``l_trans_bandwidth='auto'``, ``h_trans_bandwidth='auto'``, and ``filter_length='auto'``. This should generally reduce filter artifacts at the expense of slight decrease in effective filter stop-band attenuation. For details see :ref:`tut-filtering-in-python`. The default values of ``l_trans_bandwidth=h_trans_bandwidth=0.5`` and ``filter_length='10s'`` will change to ``'auto'`` in 0.14. - - 2. The ``filter_length=None`` option (i.e. use ``len(x)``) has been deprecated. - - 3. An improved ``phase='zero'`` zero-phase FIR filtering has been added. Instead of running the designed filter forward and backward, the filter is applied once and we compensate for the linear phase of the filter. The previous ``phase='zero-double'`` default will change to ``phase='zero'`` in 0.14. - - 4. A warning is provided when the filter is longer than the signal of interest, as this is unlikely to produce desired results. - - 5. Previously, if the filter was as long or longer than the signal of interest, direct FFT-based computations were used. Now a single code path (overlap-add filtering) is used for all FIR filters. This could cause minor changes in how short signals are filtered. - -- Support for Python 2.6 has been dropped, and the minimum supported dependencies are NumPy_ 1.8, SciPy_ 0.12, and Matplotlib_ 1.3 by `Eric Larson`_ - -- When CTF gradient compensation is applied to raw data, it is no longer reverted on save of :meth:`mne.io.Raw.save` by `Eric Larson`_ - -- Adds ``mne.time_frequency.csd_epochs`` to replace ``mne.time_frequency.csd_compute_epochs`` for naming consistency. ``mne.time_frequency.csd_compute_epochs`` is now deprecated and will be removed in mne 0.14, by `Nick Foti`_ - -- Weighted addition and subtraction of :class:`Evoked` as ``ev1 + ev2`` and ``ev1 - ev2`` have been deprecated, use explicit :func:`mne.combine_evoked(..., weights='nave') ` instead by `Eric Larson`_ - -- Deprecated support for passing a list of filenames to :class:`mne.io.Raw` constructor, use :func:`mne.io.read_raw_fif` and :func:`mne.concatenate_raws` instead by `Eric Larson`_ - -- Added options for setting data and date formats manually in :func:`mne.io.read_raw_cnt` by `Jaakko Leppakangas`_ - -- Now channels with units of 'C', 'µS', 'uS', 'ARU' and 'S' will be turned to misc by default in :func:`mne.io.read_raw_brainvision` by `Jaakko Leppakangas`_ - -- Add :func:`mne.io.anonymize_info` function to anonymize measurements and add methods to :class:`mne.io.Raw`, :class:`mne.Epochs` and :class:`mne.Evoked`, by `Jean-Remi King`_ - -- Now it is possible to plot only a subselection of channels in :func:`mne.viz.plot_raw` by using an array for order parameter by `Jaakko Leppakangas`_ - -- EOG channels can now be included when calling :func:`mne.preprocessing.ICA.fit` and a proper error is raised when trying to include unsupported channels by `Alexander Rudiuk`_ - -- :func:`mne.concatenate_epochs` and :func:`mne.compute_covariance` now check to see if all :class:`Epochs` instances have the same MEG-to-Head transformation, and errors by default if they do not by `Eric Larson`_ - -- Added option to pass a list of axes to :func:`mne.viz.plot_epochs_image` by `Mikołaj Magnuski`_ - -- Constructing IIR filters in :func:`mne.filter.construct_iir_filter` defaults to ``output='ba'`` in 0.13 but this will be changed to ``output='sos'`` by `Eric Larson`_ - -- Add ``zorder`` parameter to :func:`mne.Evoked.plot` and derived functions to sort allow sorting channels by e.g. standard deviation, by `Jona Sassenhagen`_ - -- The ``baseline`` parameter of :func:`mne.Epochs.apply_baseline` is set by default (None, 0), by `Felix Raimundo`_ - -- Adds :func:`mne.Evoked.apply_baseline` to be consistent with :func:`mne.Epochs.apply_baseline`, by `Felix Raimundo`_ - -- Deprecated the ``baseline`` parameter in :class:`mne.Evoked`, by `Felix Raimundo`_ - -- The API of :meth:`mne.SourceEstimate.plot` and :func:`mne.viz.plot_source_estimates` has been updated to reflect current PySurfer 0.6 API. The ``config_opts`` parameter is now deprecated and will be removed in mne 0.14, and the default representation for time will change from ``ms`` to ``s`` in mne 0.14. By `Christian Brodbeck`_ - -- The default dataset location has been changed from ``examples/`` in the MNE-Python root directory to ``~/mne_data`` in the user's home directory, by `Eric Larson`_ - -- A new option ``set_env`` has been added to :func:`mne.set_config` that defaults to ``False`` in 0.13 but will change to ``True`` in 0.14, by `Eric Larson`_ - -- The ``compensation`` parameter in :func:`mne.io.read_raw_fif` has been deprecated in favor of the method :meth:`mne.io.Raw.apply_gradient_compensation` by `Eric Larson`_ - -- ``mne.decoding.EpochsVectorizer`` has been deprecated in favor of :class:`mne.decoding.Vectorizer` by `Asish Panda`_ - -- The ``epochs_data`` parameter has been deprecated in :class:`mne.decoding.CSP`, in favour of the ``X`` parameter to comply to scikit-learn API, by `Jean-Remi King`_ - -- Deprecated ``mne.time_frequency.cwt_morlet`` and ``mne.time_frequency.single_trial_power`` in favour of :func:`mne.time_frequency.tfr_morlet` with parameter average=False, by `Jean-Remi King`_ and `Alex Gramfort`_ - -- Add argument ``mask_type`` to func:`mne.read_events` and func:`mne.find_events` to support MNE-C style of trigger masking by `Teon Brooks`_ and `Eric Larson`_ - -- Extended Infomax is now the new default in :func:`mne.preprocessing.infomax` (``extended=True``), by `Clemens Brunner`_ - -- :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` now take additional argument ``uint16_codec`` that allows to define the encoding of character arrays in set file. This helps in rare cases when reading a set file fails with ``TypeError: buffer is too small for requested array``. By `Mikołaj Magnuski`_ - -- Added :class:`mne.decoding.TemporalFilter` to filter data in scikit-learn pipelines, by `Asish Panda`_ - -- :func:`mne.preprocessing.create_ecg_epochs` now includes all the channels when ``picks=None`` by `Jaakko Leppakangas`_ - -- :func:`mne.set_eeg_reference` now allows moving from a custom to an average EEG reference by `Marijn van Vliet`_ - -Authors -~~~~~~~ - -The committer list for this release is the following (sorted by alphabetical order): - -* Alexander Rudiuk -* Alexandre Barachant -* Alexandre Gramfort -* Asish Panda -* Camilo Lamus -* Chris Holdgraf -* Christian Brodbeck -* Christopher J. Bailey -* Christopher Mullins -* Clemens Brunner -* Denis A. Engemann -* Eric Larson -* Federico Raimondo -* Félix Raimundo -* Guillaume Dumas -* Jaakko Leppakangas -* Jair Montoya -* Jean-Remi King -* Johannes Niediek -* Jona Sassenhagen -* Jussi Nurminen -* Keith Doelling -* Mainak Jas -* Marijn van Vliet -* Michael Krause -* Mikolaj Magnuski -* Nick Foti -* Phillip Alday -* Simon-Shlomo Poil -* Teon Brooks -* Yaroslav Halchenko diff --git a/doc/changes/0.14.inc b/doc/changes/0.14.inc deleted file mode 100644 index 7db46d27617..00000000000 --- a/doc/changes/0.14.inc +++ /dev/null @@ -1,250 +0,0 @@ -.. _changes_0_14: - -Version 0.14 (2017-03-24) -------------------------- - -Changelog -~~~~~~~~~ - -- Add example of time-frequency decoding with CSP by `Laura Gwilliams`_ - -- Automatically create a legend in :func:`mne.viz.plot_evoked_topo` by `Jussi Nurminen`_ - -- Add I/O support for Artemis123 infant/toddler MEG data by `Luke Bloy`_ - -- Add filter plotting functions :func:`mne.viz.plot_filter` and :func:`mne.viz.plot_ideal_filter` as well as filter creation function :func:`mne.filter.create_filter` by `Eric Larson`_ - -- Add HCP-MMP1.0 parcellation dataset downloader by `Eric Larson`_ - -- Add option to project EEG electrodes onto the scalp in ``mne.viz.plot_trans`` by `Eric Larson`_ - -- Add option to plot individual sensors in :meth:`mne.io.Raw.plot_psd` by `Alex Gramfort`_ and `Eric Larson`_ - -- Add option to plot ECoG electrodes in ``mne.viz.plot_trans`` by `Eric Larson`_ - -- Add convenient default values to :meth:`mne.io.Raw.apply_hilbert` and :meth:`mne.io.Raw.apply_function` by `Denis Engemann`_ - -- Remove MNE-C requirement for :ref:`mne make_scalp_surfaces` by `Eric Larson`_ - -- Add support for FastTrack Polhemus ``.mat`` file outputs in ``hsp`` argument of ``mne.channels.read_dig_montage`` by `Eric Larson`_ - -- Add option to convert 3d electrode plots to a snapshot with 2d electrode positions with :func:`mne.viz.snapshot_brain_montage` by `Chris Holdgraf`_ - -- Add skull surface plotting option to ``mne.viz.plot_trans`` by `Jaakko Leppakangas`_ - -- Add minimum-phase filtering option in :meth:`mne.io.Raw.filter` by `Eric Larson`_ - -- Add support for reading ASCII BrainVision files in :func:`mne.io.read_raw_brainvision` by `Eric Larson`_ - -- Add method of ICA objects for retrieving the component maps :meth:`mne.preprocessing.ICA.get_components` by `Jona Sassenhagen`_ - -- Add option to plot events in :func:`mne.viz.plot_epochs` by `Jaakko Leppakangas`_ - -- Add dipole definitions for older phantom at Otaniemi in :func:`mne.dipole.get_phantom_dipoles` by `Eric Larson`_ - -- Add spatial colors option for :func:`mne.viz.plot_raw_psd` by `Jaakko Leppakangas`_ - -- Add functions like :func:`get_volume_labels_from_src` to handle mixed source spaces by `Annalisa Pascarella`_ - -- Add convenience function for opening MNE documentation :func:`open_docs` by `Eric Larson`_ - -- Add option in :meth:`mne.io.Raw.plot` to display the time axis relative to ``raw.first_samp`` by `Mainak Jas`_ - -- Add new :mod:`mne.datasets.visual_92_categories ` dataset by `Jaakko Leppakangas`_ - -- Add option in :func:`mne.io.read_raw_edf` to allow channel exclusion by `Jaakko Leppakangas`_ - -- Allow integer event codes in :func:`mne.read_epochs_eeglab` by `Jaakko Leppakangas`_ - -- Add ability to match channel names in a case insensitive manner when applying a ``mne.channels.Montage`` by `Marijn van Vliet`_ - -- Add ``yscale`` keyword argument to :meth:`mne.time_frequency.AverageTFR.plot` that allows specifying whether to present the frequency axis in linear (``'linear'``) or log (``'log'``) scale. The default value is ``'auto'`` which detects whether frequencies are log-spaced and sets yscale to log. Added by `Mikołaj Magnuski`_ - -- Add :ref:`Representational Similarity Analysis (RSA) ` example on :mod:`mne.datasets.visual_92_categories.data_path` dataset by `Jaakko Leppakangas`_, `Jean-Remi King`_ and `Alex Gramfort`_ - -- Add support for NeuroScan files with event type 3 in :func:`mne.io.read_raw_cnt` by `Marijn van Vliet`_ - -- Add interactive annotation mode to :meth:`mne.io.Raw.plot` (accessed by pressing 'a') by `Jaakko Leppakangas`_ - -- Add support for deleting all projectors or a list of indices in :meth:`mne.io.Raw.del_proj` by `Eric Larson`_ - -- Add source space plotting with :meth:`mne.SourceSpaces.plot` using ``mne.viz.plot_trans`` by `Eric Larson`_ - -- Add :func:`mne.decoding.get_coef` to retrieve and inverse the coefficients of a linear model - typically a spatial filter or pattern, by `Jean-Remi King`_ - -- Add support for reading in EGI MFF digitization coordinate files in ``mne.channels.read_dig_montage`` by `Matt Boggess`_ - -- Add ``n_per_seg`` keyword argument to ``mne.time_frequency.psd_welch`` and :func:`mne.time_frequency.psd_array_welch` that allows to control segment length independently of ``n_fft`` and use zero-padding when ``n_fft > n_per_seg`` by `Mikołaj Magnuski`_ - -- Add annotation aware data getter :meth:`mne.io.Raw.get_data` by `Jaakko Leppakangas`_ - -- Add support of dipole location visualization with MRI slice overlay with matplotlib to :func:`mne.viz.plot_dipole_locations` via mode='orthoview' parameter by `Jaakko Leppakangas`_ and `Alex Gramfort`_ - -- Add plotting of head positions as a function of time in :func:`mne.viz.plot_head_positions` by `Eric Larson`_ - -- Add ``real_filter`` option to ``mne.beamformer.dics``, ``mne.beamformer.dics_source_power``, ``mne.beamformer.tf_dics`` and ``mne.beamformer.dics_epochs`` by `Eric Larson`_, `Alex Gramfort`_ and `Andrea Brovelli`_. - -- Add a demo script showing how to use a custom inverse solver with MNE by `Alex Gramfort`_ - -- Functions :func:`mne.preprocessing.create_ecg_epochs`, :func:`mne.preprocessing.create_eog_epochs`, :func:`mne.compute_raw_covariance` and ICA methods :meth:`mne.preprocessing.ICA.score_sources`, :meth:`mne.preprocessing.ICA.find_bads_ecg`, :meth:`mne.preprocessing.ICA.find_bads_eog` are now annotation aware by `Jaakko Leppakangas`_ - -- Allow using ``spatial_colors`` for non-standard layouts by creating custom layouts from channel locations and add ``to_sphere`` keyword to :func:`mne.viz.plot_sensors` to allow plotting sensors that are not on the head surface by `Jaakko Leppakangas`_ - -- Concatenating raws with :func:`mne.concatenate_raws` now creates boundary annotations automatically by `Jaakko Leppakangas`_ - -- :func:`mne.viz.plot_projs_topomap` now supports plotting EEG topomaps by passing in :class:`mne.Info` by `Eric Larson`_ - -BUG -~~~ - -- Fix bug with DICS and LCMV (e.g., ``mne.beamformer.lcmv``, ``mne.beamformer.dics``) where regularization was done improperly. The default ``reg=0.01`` has been changed to ``reg=0.05``, by `Andrea Brovelli`_, `Alex Gramfort`_, and `Eric Larson`_ - -- Fix callback function call in ``mne.viz.topo._plot_topo_onpick`` by `Erkka Heinila`_ - -- Fix reading multi-file CTF recordings in :func:`mne.io.read_raw_ctf` by `Niklas Wilming`_ - -- Fix computation of AR coefficients across channels in :func:`mne.time_frequency.fit_iir_model_raw` by `Eric Larson`_ - -- Fix maxfilter channel names extra space bug in :func:`mne.preprocessing.maxwell_filter` by `Sheraz Khan`_ - -- :func:`mne.channels.find_layout` now leaves out the excluded channels by `Jaakko Leppakangas`_ - -- Array data constructors :class:`mne.io.RawArray` and :class:`EvokedArray` now make a copy of the info structure by `Jaakko Leppakangas`_ - -- Fix bug with finding layouts in :func:`mne.viz.plot_projs_topomap` by `Eric Larson`_ - -- Fix bug :func:`mne.io.anonymize_info` when Info does not contain 'file_id' or 'meas_id' fields by `Jean-Remi King`_ - -- Fix colormap selection in :func:`mne.viz.plot_evoked_topomap` when using positive vmin with negative data by `Jaakko Leppakangas`_ - -- Fix channel name comparison in ``mne.channels.read_montage`` so that if ``ch_names`` is provided, the returned montage will have channel names in the same letter case by `Jaakko Leppakangas`_ - -- Fix :meth:`inst.set_montage(montage) ` to only set ``inst.info['dev_head_t']`` if ``dev_head_t=True`` in ``mne.channels.read_dig_montage`` by `Eric Larson`_ - -- Fix handling of events in ``mne.realtime.RtEpochs`` when the triggers were split between two buffers resulting in missing and/or duplicate epochs by `Mainak Jas`_ and `Antti Rantala`_ - -- Fix bug with automatic decimation in :func:`mne.io.read_raw_kit` by `Keith Doelling`_ - -- Fix bug with :func:`setup_volume_source_space` where arguments ``subject`` and ``subjects_dir`` were ignored by `Jaakko Leppakangas`_ - -- Fix sanity check for incompatible ``threshold`` and ``tail`` values in clustering functions like :func:`mne.stats.spatio_temporal_cluster_1samp_test` by `Eric Larson`_ - -- Fix ``_bad_dropped`` not being set when loading eeglab epoched files via :func:`mne.read_epochs_eeglab` which resulted in :func:`len` not working by `Mikołaj Magnuski`_ - -- Fix a bug in :meth:`mne.time_frequency.AverageTFR.plot` when plotting without a colorbar by `Jaakko Leppakangas`_ - -- Fix ``_filenames`` attribute in creation of :class:`mne.io.RawArray` with :meth:`mne.preprocessing.ICA.get_sources` by `Paul Pasler`_ - -- Fix contour levels in :func:`mne.viz.plot_evoked_topomap` to be uniform across topomaps by `Jaakko Leppakangas`_ - -- Fix bug in :func:`mne.preprocessing.maxwell_filter` where fine calibration indices were mismatched leading to an ``AssertionError`` by `Eric Larson`_ - -- Fix bug in :func:`mne.preprocessing.fix_stim_artifact` where non-data channels were interpolated by `Eric Larson`_ - -- :class:`mne.decoding.Scaler` now scales each channel independently using data from all time points (epochs and times) instead of scaling all channels for each time point. It also now accepts parameter ``scalings`` to determine the data scaling method (default is ``None`` to use static channel-type-based scaling), by `Asish Panda`_, `Jean-Remi King`_, and `Eric Larson`_ - -- Raise error if the cv parameter of ``mne.decoding.GeneralizationAcrossTime`` and ``mne.decoding.TimeDecoding`` is not a partition and the predict_mode is "cross-validation" by `Jean-Remi King`_ - -- Fix bug in :func:`mne.io.read_raw_edf` when ``preload=False`` and channels have different sampling rates by `Jaakko Leppakangas`_ - -- Fix :func:`mne.read_labels_from_annot` to set ``label.values[:]=1`` rather than 0 for consistency with the :class:`Label` class by `Jon Houck`_ - -- Fix plotting non-uniform freqs (for example log-spaced) in :meth:`mne.time_frequency.AverageTFR.plot` by `Mikołaj Magnuski`_ - -- Fix :func:`mne.minimum_norm.compute_source_psd` when used with ``pick_ori=None`` by `Annalisa Pascarella`_ and `Alex Gramfort`_ - -- Fix bug in :class:`mne.Annotations` where concatenating two raws where ``orig_time`` of the second run is ``None`` by `Jaakko Leppakangas`_ - -- Fix reading channel location from eeglab ``.set`` files when some of the channels do not provide this information. Previously all channel locations were ignored in such case, now they are read - unless a montage is provided by the user in which case only channel names are read from set file. By `Mikołaj Magnuski`_ - -- Fix reading eeglab ``.set`` files when ``.chanlocs`` structure does not contain ``X``, ``Y`` or ``Z`` fields by `Mikołaj Magnuski`_ - -- Fix bug with :func:`mne.simulation.simulate_raw` when ``interp != 'zero'`` by `Eric Larson`_ - -- Fix :func:`mne.fit_dipole` to handle sphere model rank deficiency properly by `Alex Gramfort`_ - -- Raise error in :func:`mne.concatenate_epochs` when concatenated epochs have conflicting event_id by `Mikołaj Magnuski`_ - -- Fix handling of ``n_components=None`` in :class:`mne.preprocessing.ICA` by `Richard Höchenberger`_ - -- Fix reading of fiducials correctly from CTF data in :func:`mne.io.read_raw_ctf` by `Jaakko Leppakangas`_ - -- Fix :func:`mne.beamformer.rap_music` to return dipoles with amplitudes in Am instead of nAm by `Jaakko Leppakangas`_ - -- Fix computation of duality gap in ``mne.inverse_sparse.mxne_optim.dgap_l21`` by `Mathurin Massias`_ - -API -~~~ - -- The filtering functions ``band_pass_filter``, ``band_stop_filter``, ``low_pass_filter``, and ``high_pass_filter`` have been deprecated in favor of :func:`mne.filter.filter_data` by `Eric Larson`_ - -- :class:`EvokedArray` now has default value ``tmin=0.`` by `Jaakko Leppakangas`_ - -- The ``ch_type`` argument for ``mne.viz.plot_trans`` has been deprecated, use ``eeg_sensors`` and ``meg_sensors`` instead, by `Eric Larson`_ - -- The default ``tmax=60.`` in :meth:`mne.io.Raw.plot_psd` will change to ``tmax=np.inf`` in 0.15, by `Eric Larson`_ - -- Base classes :class:`mne.io.BaseRaw` and :class:`mne.BaseEpochs` are now public to allow easier typechecking, by `Daniel McCloy`_ - -- :func:`mne.io.read_raw_edf` now combines triggers from multiple tal channels to 'STI 014' by `Jaakko Leppakangas`_ - -- The measurement info :class:`Info` no longer contains a potentially misleading ``info['filename']`` entry. Use class properties like :attr:`mne.io.Raw.filenames` or :attr:`mne.Epochs.filename` instead by `Eric Larson`_ - -- Default fiducial name change from 'nz' to 'nasion' in ``mne.channels.read_montage``, so that it is the same for both ``mne.channels.Montage`` and :class: `mne.channels.DigMontage` by `Leonardo Barbosa`_ - -- MNE's additional files for the ``fsaverage`` head/brain model are now included in MNE-Python, and the now superfluous ``mne_root`` parameter to :func:`create_default_subject` has been deprecated by `Christian Brodbeck`_ - -- An ``overwrite=False`` default parameter has been added to :func:`write_source_spaces` to protect against accidental overwrites, by `Eric Larson`_ - -- The :class:`mne.decoding.LinearModel` class will no longer support ``plot_filters`` and ``plot_patterns``, use :class:`mne.EvokedArray` with :func:`mne.decoding.get_coef` instead, by `Jean-Remi King`_ - -- Made functions :func:`mne.time_frequency.tfr_array_multitaper`, :func:`mne.time_frequency.tfr_array_morlet`, :func:`mne.time_frequency.tfr_array_stockwell`, :func:`mne.time_frequency.psd_array_multitaper` and :func:`mne.time_frequency.psd_array_welch` public to allow computing TFRs and PSDs on numpy arrays by `Jaakko Leppakangas`_ - -- :meth:`mne.preprocessing.ICA.fit` now rejects data annotated bad by default. Turn off with ``reject_by_annotation=False``, by `Jaakko Leppakangas`_ - -- :func:`mne.io.read_raw_egi` now names channels with pattern 'E'. This behavior can be changed with parameter ``channel_naming`` by `Jaakko Leppakangas`_ - -- the ``name`` parameter in :class:`mne.Epochs` is deprecated, by `Jaakko Leppakangas`_ - -Authors -~~~~~~~ - -People who contributed to this release (in alphabetical order): - -* Alexander Rudiuk -* Alexandre Gramfort -* Annalisa Pascarella -* Antti Rantala -* Asish Panda -* Burkhard Maess -* Chris Holdgraf -* Christian Brodbeck -* Cristóbal Moënne-Loccoz -* Daniel McCloy -* Denis A. Engemann -* Eric Larson -* Erkka Heinila -* Hermann Sonntag -* Jaakko Leppakangas -* Jakub Kaczmarzyk -* Jean-Remi King -* Jon Houck -* Jona Sassenhagen -* Jussi Nurminen -* Keith Doelling -* Leonardo S. Barbosa -* Lorenz Esch -* Lorenzo Alfine -* Luke Bloy -* Mainak Jas -* Marijn van Vliet -* Matt Boggess -* Matteo Visconti -* Mikolaj Magnuski -* Niklas Wilming -* Paul Pasler -* Richard Höchenberger -* Sheraz Khan -* Stefan Repplinger -* Teon Brooks -* Yaroslav Halchenko diff --git a/doc/changes/0.15.inc b/doc/changes/0.15.inc deleted file mode 100644 index ada8180d4ac..00000000000 --- a/doc/changes/0.15.inc +++ /dev/null @@ -1,329 +0,0 @@ -.. _changes_0_15: - -Version 0.15 (2017-10-20) -------------------------- - -Changelog -~~~~~~~~~ - -- :meth:`mne.channels.Layout.plot` and :func:`mne.viz.plot_layout` now allows plotting a subset of channels with ``picks`` argument by `Jaakko Leppakangas`_ - -- Add .bvef extension (BrainVision Electrodes File) to ``mne.channels.read_montage`` by `Jean-Baptiste Schiratti`_ - -- Add :func:`mne.decoding.cross_val_multiscore` to allow scoring of multiple tasks, typically used with :class:`mne.decoding.SlidingEstimator`, by `Jean-Remi King`_ - -- Add :class:`mne.decoding.ReceptiveField` module for modeling electrode response to input features by `Chris Holdgraf`_ - -- Add :class:`mne.decoding.TimeDelayingRidge` class, used by default by :class:`mne.decoding.ReceptiveField`, to speed up auto- and cross-correlation computations and enable Laplacian regularization by `Ross Maddox`_ and `Eric Larson`_ - -- Add new :mod:`mne.datasets.mtrf ` dataset by `Chris Holdgraf`_ - -- Add example of time-frequency decoding with CSP by `Laura Gwilliams`_ - -- Add :class:`mne.decoding.SPoC` to fit and apply spatial filters based on continuous target variables, by `Jean-Remi King`_ and `Alexandre Barachant`_ - -- Add Fieldtrip's electromyogram dataset, by `Alexandre Barachant`_ - -- Add ``reject_by_annotation`` option to :func:`mne.preprocessing.find_eog_events` (which is also utilised by :func:`mne.preprocessing.create_eog_epochs`) to omit data that is annotated as bad by `Jaakko Leppakangas`_ - -- Add example for fast screening of event-related dynamics in frequency bands by `Denis Engemann`_ - -- Add :meth:`mne.time_frequency.EpochsTFR.save` by `Jaakko Leppakangas`_ - -- Add butterfly mode (toggled with 'b' key) to :meth:`mne.io.Raw.plot` by `Jaakko Leppakangas`_ - -- Add ``axes`` parameter to plot_topo functions by `Jaakko Leppakangas`_ - -- Add options to change time windowing in :func:`mne.chpi.filter_chpi` by `Eric Larson`_ - -- ``mne.channels.Montage.plot``, :meth:`mne.channels.DigMontage.plot`, and :func:`mne.viz.plot_montage` now allow plotting channel locations as a topomap by `Clemens Brunner`_ - -- Add ``background_color`` parameter to :meth:`mne.Evoked.plot_topo` and :func:`mne.viz.plot_evoked_topo` and improve axes rendering as done in :func:`mne.viz.plot_compare_evokeds` by `Alex Gramfort`_ - -- Add support for GDF files in :func:`mne.io.read_raw_edf` by `Nicolas Barascud`_ - -- Add ``mne.io.find_edf_events`` for getting the events as they are in the EDF/GDF header by `Jaakko Leppakangas`_ - -- Speed up :meth:`mne.io.Raw.plot` and :meth:`mne.Epochs.plot` using (automatic) decimation based on low-passing with ``decim='auto'`` parameter by `Eric Larson`_ and `Jaakko Leppakangas`_ - -- Add ``mne.inverse_sparse.mxne_optim.dgap_l21l1`` for computing the duality gap for TF-MxNE as the new stopping criterion by `Daniel Strohmeier`_ - -- Add option to return a list of :class:`Dipole` objects in sparse source imaging methods by `Daniel Strohmeier`_ - -- Add :func:`mne.inverse_sparse.make_stc_from_dipoles` to generate stc objects from lists of dipoles by `Daniel Strohmeier`_ - -- Add ``mne.channels.find_ch_connectivity`` that tries to infer the correct connectivity template using channel info. If no template is found, it computes the connectivity matrix using :class:`Delaunay ` triangulation of the 2d projected channel positions by `Jaakko Leppakangas`_ - -- Add IO support for EGI MFF format by `Jaakko Leppakangas`_ and `ramonapariciog`_ - -- Add option to use matplotlib backend when plotting with :func:`mne.viz.plot_source_estimates` by `Jaakko Leppakangas`_ - -- Add ``mne.channels.Montage.get_pos2d`` to get the 2D positions of channels in a montage by `Clemens Brunner`_ - -- Add MGH 60- and 70-channel standard montages to ``mne.channels.read_montage`` by `Eric Larson`_ - -- Add option for embedding SVG instead of PNG in HTML for :class:`mne.Report` by `Eric Larson`_ - -- Add confidence intervals, number of free parameters, and χ² to :func:`mne.fit_dipole` and :func:`mne.read_dipole` by `Eric Larson`_ - -- :attr:`mne.SourceEstimate.data` is now writable, writing to it will also update :attr:`mne.SourceEstimate.times` by `Marijn van Vliet`_ - -- :meth:`mne.io.Raw.plot` and :meth:`mne.Epochs.plot` now use anti-aliasing to draw signals by `Clemens Brunner`_ - -- Allow using saved ``DigMontage`` to import digitization to :func:`mne.gui.coregistration` by `Jaakko Leppakangas`_ - -- Add function :func:`mne.channels.get_builtin_montages` to list all built-in montages by `Clemens Brunner`_ - -- :class:`mne.decoding.SlidingEstimator` and :class:`mne.decoding.GeneralizingEstimator` now accept ``**fit_params`` at fitting by `Jean-Remi King`_ - -- Add :class:`mne.VectorSourceEstimate` class which enables working with both source power and dipole orientations by `Marijn van Vliet`_ - -- Add option ``pick_ori='vector'`` to :func:`mne.minimum_norm.apply_inverse` to produce :class:`mne.VectorSourceEstimate` by `Marijn van Vliet`_ - -- Add support for :class:`numpy.random.RandomState` argument to ``seed`` in :mod:`statistical clustering functions ` and better documentation of exact 1-sample tests by `Eric Larson`_ - -- Extend :func:`mne.viz.plot_epochs_image`/:meth:`mne.Epochs.plot_image` with regards to grouping by or aggregating over channels. See the new example at ``examples/visualization/plot_roi_erpimage_by_rt.py`` by `Jona Sassenhagen`_ - -- Add bootstrapped confidence intervals to :func:`mne.viz.plot_compare_evokeds` by `Jona Sassenhagen`_ and `Denis Engemann`_ - -- Add example on how to plot ERDS maps (also known as ERSP) by `Clemens Brunner`_ - -- Add support for volume source spaces to ``spatial_src_connectivity`` and ``spatio_temporal_src_connectivity`` by `Alex Gramfort`_ - -- Plotting raw data (:func:`mne.viz.plot_raw` or :meth:`mne.io.Raw.plot`) with events now includes event numbers (if there are not more than 50 events on a page) by `Clemens Brunner`_ - -- Add filtering functions :meth:`mne.Epochs.filter` and :meth:`mne.Evoked.filter`, as well as ``pad`` argument to :meth:`mne.io.Raw.filter` by `Eric Larson`_ - -- Add high frequency somatosensory MEG dataset by `Jussi Nurminen`_ - -- Add reduced set of labels for HCPMMP-1.0 parcellation in :func:`mne.datasets.fetch_hcp_mmp_parcellation` by `Eric Larson`_ - -- Enable morphing between hemispheres with ``mne.compute_morph_matrix`` by `Christian Brodbeck`_ - -- Add ``return_residual`` to :func:`mne.minimum_norm.apply_inverse` by `Eric Larson`_ - -- Add ``return_drop_log`` to :func:`mne.preprocessing.compute_proj_eog` and :func:`mne.preprocessing.compute_proj_ecg` by `Eric Larson`_ - -- Add time cursor and category/amplitude status message into the single-channel evoked plot by `Jussi Nurminen`_ - -BUG -~~~ -- Fixed a bug when creating spherical volumetric grid source spaces in :func:`setup_volume_source_space` by improving the minimum-distance computations, which in general will decrease the number of used source space points by `Eric Larson`_ - -- Fix bug in :meth:`mne.io.read_raw_brainvision` read .vhdr files with ANSI codepage by `Okba Bekhelifi`_ and `Alex Gramfort`_ - -- Fix unit scaling when reading in EGI digitization files using ``mne.channels.read_dig_montage`` by `Matt Boggess`_ - -- Fix ``picks`` default in :meth:`mne.io.Raw.filter` to include ``ref_meg`` channels by default by `Eric Larson`_ - -- Fix :class:`mne.decoding.CSP` order of spatial filter in ``patterns_`` by `Alexandre Barachant`_ - -- :meth:`mne.concatenate_epochs` now maintains the relative position of events during concatenation by `Alexandre Barachant`_ - -- Fix bug in script ``mne make_scalp_surfaces`` by `Denis Engemann`_ (this bug prevented creation of high-resolution meshes when they were absent in the first place.) - -- Fix writing of raw files with empty set of annotations by `Jaakko Leppakangas`_ - -- Fix bug in :meth:`mne.preprocessing.ICA.plot_properties` where merging gradiometers would fail by `Jaakko Leppakangas`_ - -- Fix :func:`mne.viz.plot_sensors` to maintain proper aspect ratio by `Eric Larson`_ - -- Fix :func:`mne.viz.plot_topomap` to allow 0 contours by `Jaakko Leppakangas`_ - -- Fix :class:`mne.preprocessing.ICA` source-picking to increase threshold for rank estimation to 1e-14 by `Jesper Duemose Nielsen`_ - -- Fix :func:`mne.set_bipolar_reference` to support duplicates in anodes by `Jean-Baptiste Schiratti`_ and `Alex Gramfort`_ - -- Fix visuals of :func:`mne.viz.plot_evoked` and a bug where ylim changes when using interactive topomap plotting by `Jaakko Leppakangas`_ - -- Fix :meth:`mne.Evoked.plot_topomap` when using the ``mask`` argument with paired gradiometers by `Eric Larson`_ - -- Fix bug in :meth:`mne.Label.fill` where an empty label raised an error, by `Eric Larson`_ - -- Fix :func:`mne.io.read_raw_ctf` to also include the samples in the last block by `Jaakko Leppakangas`_ - -- Fix :meth:`mne.preprocessing.ICA.save` to close file before attempting to delete it when write fails by `Jesper Duemose Nielsen`_ - -- Fix :func:`mne.simulation.simulate_evoked` to use nave parameter instead of snr, by `Yousra Bekhti`_ - -- Fix :func:`mne.read_bem_surfaces` for BEM files missing normals by `Christian Brodbeck`_ - -- Fix :func:`mne.transform_surface_to` to actually copy when ``copy=True`` by `Eric Larson`_ - -- Fix :func:`mne.io.read_raw_brainvision` to read vectorized data correctly by `Jaakko Leppakangas`_ and `Phillip Alday`_ - -- Fix ``mne.connectivity.spectral_connectivity`` so that if ``n_jobs > 1`` it does not ignore last ``n_epochs % n_jobs`` epochs by `Mikołaj Magnuski`_ - -- Fix :func:`mne.io.read_raw_edf` to infer sampling rate correctly when reading EDF+ files where tal-channel has a higher sampling frequency by `Jaakko Leppakangas`_ - -- Fix default value of ``kind='topomap'`` in ``mne.channels.Montage.plot`` to be consistent with :func:`mne.viz.plot_montage` by `Clemens Brunner`_ - -- Fix bug in :meth:`to_data_frame ` where non-consecutive picks would make the function crash by `Jaakko Leppakangas`_ - -- Fix channel picking and drop in :class:`mne.time_frequency.EpochsTFR` by `Lukáš Hejtmánek`_ - -- Fix :func:`mne.SourceEstimate.transform` to properly update :attr:`mne.SourceEstimate.times` by `Marijn van Vliet`_ - -- Fix :func:`mne.viz.plot_evoked_joint` to allow custom titles without appending information about the channels by `Jaakko Leppakangas`_ - -- Fix writing a forward solution after being processed by :func:`mne.forward.restrict_forward_to_label` or :func:`mne.forward.restrict_forward_to_stc` by `Marijn van Vliet`_ - -- Fix bug in :func:`mne.viz.plot_compare_evokeds` where ``truncate_yaxis`` was ignored (default is now ``False``), by `Jona Sassenhagen`_ - -- Fix bug in :func:`mne.viz.plot_evoked` where all xlabels were removed when using ``spatial_colors=True``, by `Jesper Duemose Nielsen`_ - -- Fix field mapping :func:`mne.make_field_map` and MEG bad channel interpolation functions (e.g., :meth:`mne.Evoked.interpolate_bads`) to choose a better number of components during pseudoinversion when few channels are available, by `Eric Larson`_ - -- Fix bug in :func:`mne.io.read_raw_brainvision`, changed default to read coordinate information if available and added test, by `Jesper Duemose Nielsen`_ - -- Fix bug in :meth:`mne.SourceEstimate.to_original_src` where morphing failed if two vertices map to the same target vertex, by `Marijn van Vliet`_ - -- Fix :class:`mne.preprocessing.Xdawn` to give verbose error messages about rank deficiency and handle transforming :class:`mne.Evoked`, by `Eric Larson`_ - -- Fix bug in DC and Nyquist frequency multitaper PSD computations, e.g. in ``mne.time_frequency.psd_multitaper``, by `Eric Larson`_ - -- Fix default padding type for :meth:`mne.Epochs.resample` and :meth:`mne.Evoked.resample` to be ``'edge'`` by default, by `Eric Larson`_ - -- Fix :func:`mne.inverse_sparse.mixed_norm`, :func:`mne.inverse_sparse.tf_mixed_norm` and :func:`mne.inverse_sparse.gamma_map` to work with volume source space and sphere head models in MEG by `Alex Gramfort`_ and `Yousra Bekhti`_ - -- Fix :meth:`mne.Evoked.as_type` channel renaming to append ``'_v'`` instead of ``'_virtual'`` to channel names to comply with shorter naming (15 char) requirements, by `Eric Larson`_ - -- Fix treatment of CTF HPI coils as fiducial points in :func:`mne.gui.coregistration` by `Eric Larson`_ - -- Fix resampling of events along with raw in :func:`mne.io.Raw` to now take into consideration the value of ``first_samp`` by `Chris Bailey`_ - -- Fix labels of PSD plots in :func:`mne.viz.plot_raw_psd` by `Alejandro Weinstein`_ - -- Fix depth weighting of sparse solvers (:func:`mne.inverse_sparse.mixed_norm`, :func:`mne.inverse_sparse.tf_mixed_norm` and :func:`mne.inverse_sparse.gamma_map`) with free orientation source spaces to improve orientation estimation by `Alex Gramfort`_ and `Yousra Bekhti`_ - -- Fix the threshold in :func:`mne.beamformer.rap_music` to properly estimate the rank by `Yousra Bekhti`_ - -- Fix treatment of vector inverse in :func:`mne.minimum_norm.apply_inverse_epochs` by `Emily Stephen`_ - -- Fix :func:`mne.find_events` when passing a list as stim_channel parameter by `Alex Gramfort`_ - -- Fix parallel processing when computing covariance with shrinkage estimators by `Denis Engemann`_ - -API -~~~ -- Removed the ``mne.datasets.megsim`` dataset because it was taken down by its host (MRN). Use the :mod:`mne.simulation` functions instead, by `Eric Larson`_ - -- Add ``skip_by_annotation`` to :meth:`mne.io.Raw.filter` to process data concatenated with e.g. :func:`mne.concatenate_raws` separately. This parameter will default to the old behavior (treating all data as a single block) in 0.15 but will change to ``skip_by_annotation='edge'``, which will separately filter the concatenated chunks separately, in 0.16. This should help prevent potential problems with filter-induced ringing in concatenated files, by `Eric Larson`_ - -- ICA channel names have now been reformatted to start from zero, e.g. ``"ICA000"``, to match indexing schemes in :class:`mne.preprocessing.ICA` and related functions, by `Stefan Repplinger`_ and `Eric Larson`_ - -- Add :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.apply_lcmv`, :func:`mne.beamformer.apply_lcmv_epochs`, and :func:`mne.beamformer.apply_lcmv_raw` to enable the separate computation and application of LCMV beamformer weights by `Britta Westner`_, `Alex Gramfort`_, and `Denis Engemann`_. - -- Add ``weight_norm`` parameter to enable both unit-noise-gain beamformer and neural activity index (weight normalization) and make whitening optional by allowing ``noise_cov=None`` in ``mne.beamformer.lcmv``, ``mne.beamformer.lcmv_epochs``, and ``mne.beamformer.lcmv_raw``, by `Britta Westner`_, `Alex Gramfort`_, and `Denis Engemann`_. - -- Add new filtering mode ``fir_design='firwin'`` (default in the next 0.16 release) that gets improved attenuation using fewer samples compared to ``fir_design='firwin2'`` (default in the current 0.15 release) by `Eric Larson`_ - -- Make the goodness of fit (GOF) of the dipoles returned by :func:`mne.beamformer.rap_music` consistent with the GOF of dipoles returned by :func:`mne.fit_dipole` by `Alex Gramfort`_. - -- :class:`mne.decoding.SlidingEstimator` will now replace ``mne.decoding.TimeDecoding`` to make it generic and fully compatible with scikit-learn, by `Jean-Remi King`_ and `Alex Gramfort`_ - -- :class:`mne.decoding.GeneralizingEstimator` will now replace ``mne.decoding.GeneralizationAcrossTime`` to make it generic and fully compatible with scikit-learn, by `Jean-Remi King`_ and `Alex Gramfort`_ - -- ``mne.viz.decoding.plot_gat_times``, ``mne.viz.decoding.plot_gat_matrix`` are now deprecated. Use matplotlib instead as shown in the examples, by `Jean-Remi King`_ and `Alex Gramfort`_ - -- Add ``norm_trace`` parameter to control single-epoch covariance normalization in :class:mne.decoding.CSP, by `Jean-Remi King`_ - -- Allow passing a list of channel names as ``show_names`` in function :func:`mne.viz.plot_sensors` and methods :meth:`mne.Evoked.plot_sensors`, :meth:`mne.Epochs.plot_sensors` and :meth:`mne.io.Raw.plot_sensors` to show only a subset of channel names by `Jaakko Leppakangas`_ - -- Make function ``mne.io.eeglab.read_events_eeglab`` public to allow loading overlapping events from EEGLAB files, by `Jona Sassenhagen`_. - -- :func:`mne.find_events` ``mask_type`` parameter will change from ``'not_and'`` to ``'and'`` in 0.16. - -- Instead of raising an error, duplicate channel names in the data file are now appended with a running number by `Jaakko Leppakangas`_ - -- :func:`mne.io.read_raw_edf` has now ``'auto'`` option for ``stim_channel`` (default in version 0.16) that automatically detects if EDF annotations or GDF events exist in the header and constructs the stim channel based on these events by `Jaakko Leppakangas`_ - -- :meth:`mne.io.Raw.plot_psd` now rejects data annotated bad by default. Turn off with ``reject_by_annotation=False``, by `Eric Larson`_ - -- :func:`mne.set_eeg_reference` and the related methods (e.g., :meth:`mne.io.Raw.set_eeg_reference`) have a new argument ``projection``, which if set to False directly applies an average reference instead of adding an SSP projector, by `Clemens Brunner`_ - -- Deprecate ``plot_trans`` in favor of :func:`mne.viz.plot_alignment` and add ``bem`` parameter for plotting conductor model by `Jaakko Leppakangas`_ - -- ``mne.beamformer.tf_lcmv`` now has a ``raw`` parameter to accommodate epochs objects that already have data loaded with ``preload=True``, with :meth:`mne.Epochs.load_data`, or that are read from disk, by `Eric Larson`_ - -- ``mne.time_frequency.psd_welch`` and :func:`mne.time_frequency.psd_array_welch` now use a Hamming window (instead of a Hann window) by `Clemens Brunner`_ - -- ``picks`` parameter in ``mne.beamformer.lcmv``, ``mne.beamformer.lcmv_epochs``, ``mne.beamformer.lcmv_raw``, ``mne.beamformer.tf_lcmv`` and :func:`mne.beamformer.rap_music` is now deprecated and will be removed in 0.16, by `Britta Westner`_, `Alex Gramfort`_, and `Denis Engemann`_. - -- The keyword argument ``frequencies`` has been deprecated in favor of ``freqs`` in various time-frequency functions, e.g. :func:`mne.time_frequency.tfr_array_morlet`, by `Eric Larson`_ - -- Add ``patterns=False`` parameter in :class:`mne.decoding.ReceptiveField`. Turn on to compute inverse model coefficients, by `Nicolas Barascud`_ - -- The ``scale``, ``scale_time``, and ``unit`` parameters have been deprecated in favor of ``scalings``, ``scalings_time``, and ``units`` in :func:`mne.viz.plot_evoked_topomap` and related functions, by `Eric Larson`_ - -- ``loose`` parameter in inverse solvers has now a default value ``'auto'`` depending if the source space is a surface, volume, or discrete type by `Alex Gramfort`_ and `Yousra Bekhti`_ - -- The behavior of ``'mean_flip'`` label-flipping in :meth:`mne.extract_label_time_course` and related functions has been changed such that the flip, instead of having arbitrary sign, maximally aligns in the positive direction of the normals of the label, by `Eric Larson`_ - -- Deprecate force_fixed and surf_ori in :func:`mne.read_forward_solution` by `Daniel Strohmeier`_ - -- :func:`mne.convert_forward_solution` has a new argument ``use_cps``, which controls whether information on cortical patch statistics is applied while generating surface-oriented forward solutions with free and fixed orientation by `Daniel Strohmeier`_ - -- :func:`mne.write_forward_solution` writes a forward solution as a forward solution with free orientation in X/Y/Z RAS coordinates if it is derived from a forward solution with free orientation and as a forward solution with fixed orientation in surface-based local coordinates otherwise by `Daniel Strohmeier`_ - -- ``loose=None`` in inverse solvers is deprecated, use explicitly ``loose=0`` for fixed constraint and ``loose=1.0`` for free orientations by `Eric Larson`_ - -- Zero-channel-value in PSD calculation in :func:`mne.viz.plot_raw_psd` has been relaxed from error to warning by `Alejandro Weinstein`_ - -- Expose "rank" parameter in :func:`mne.viz.plot_evoked_white` to correct rank estimates on the spot during visualization by `Denis Engemann`_, `Eric Larson`_, `Alex Gramfort`_. - -- Show channel name under mouse cursor on topography plots by `Jussi Nurminen`_ - -- Return maximum response amplitude from :meth:`mne.Evoked.get_peak` - -Authors -~~~~~~~ - -People who contributed to this release (in alphabetical order): - -* akshay0724 -* Alejandro Weinstein -* Alexander Rudiuk -* Alexandre Barachant -* Alexandre Gramfort -* Andrew Dykstra -* Britta Westner -* Chris Bailey -* Chris Holdgraf -* Christian Brodbeck -* Christopher Holdgraf -* Clemens Brunner -* Cristóbal Moënne-Loccoz -* Daniel McCloy -* Daniel Strohmeier -* Denis A. Engemann -* Emily P. Stephen -* Eric Larson -* Fede Raimondo -* Jaakko Leppakangas -* Jean-Baptiste Schiratti -* Jean-Remi King -* Jesper Duemose Nielsen -* Joan Massich -* Jon Houck -* Jona Sassenhagen -* Jussi Nurminen -* Laetitia Grabot -* Laura Gwilliams -* Luke Bloy -* Lukáš Hejtmánek -* Mainak Jas -* Marijn van Vliet -* Mathurin Massias -* Matt Boggess -* Mikolaj Magnuski -* Nicolas Barascud -* Nicole Proulx -* Phillip Alday -* Ramonapariciog Apariciogarcia -* Robin Tibor Schirrmeister -* Rodrigo Hübner -* S. M. Gutstein -* Simon Kern -* Teon Brooks -* Yousra Bekhti diff --git a/doc/changes/0.16.inc b/doc/changes/0.16.inc deleted file mode 100644 index 4726b6d3efc..00000000000 --- a/doc/changes/0.16.inc +++ /dev/null @@ -1,258 +0,0 @@ -.. _changes_0_16: - -Version 0.16 (2018-05-01) -------------------------- - -Changelog -~~~~~~~~~ - -- Add possibility to pass dict of floats as argument to :func:`mne.make_ad_hoc_cov` by `Nathalie Gayraud`_ - -- Add support for metadata in :class:`mne.Epochs` by `Chris Holdgraf`_, `Alex Gramfort`_, `Jona Sassenhagen`_, and `Eric Larson`_ - -- Add support for plotting a dense head in :func:`mne.viz.plot_alignment` by `Eric Larson`_ - -- Allow plotting in user-created mayavi Scene in :func:`mne.viz.plot_alignment` by `Daniel McCloy`_ - -- Reduce memory consumption and do not require data to be loaded in :meth:`mne.Epochs.apply_baseline` by `Eric Larson`_ - -- Add option ``render_bem`` to :meth:`mne.Report.parse_folder` by `Eric Larson`_ - -- Add to :func:`mne.viz.plot_alignment` plotting of coordinate frame axes via ``show_axes`` and terrain-style interaction via ``interaction``, by `Eric Larson`_ - -- Add option ``initial_event`` to :func:`mne.find_events` by `Clemens Brunner`_ - -- Left and right arrow keys now scroll by 25% of the visible data, whereas Shift+left/right scroll by a whole page in :meth:`mne.io.Raw.plot` by `Clemens Brunner`_ - -- Add support for gantry tilt angle determination from Elekta FIF file header by `Chris Bailey`_ - -- Add possibility to concatenate :class:`mne.Annotations` objects with ``+`` or ``+=`` operators by `Clemens Brunner`_ - -- Add support for MaxShield raw files in :class:`mne.Report` by `Eric Larson`_ - -- Add ability to plot whitened data in :meth:`mne.io.Raw.plot`, :meth:`mne.Epochs.plot`, :meth:`mne.Evoked.plot`, and :meth:`mne.Evoked.plot_topo` by `Eric Larson`_ - -- Workaround for reading EGI MFF files with physiological signals that also present a bug from the EGI system in :func:`mne.io.read_raw_egi` by `Federico Raimondo`_ - -- Add support for reading subject height and weight in ``info['subject_info']`` by `Eric Larson`_ - -- Improve online filtering of raw data when plotting with :meth:`mne.io.Raw.plot` to filter in segments in accordance with the default ``skip_by_annotation=('edge', 'bad_acq_skip')`` of :meth:`mne.io.Raw.filter` to avoid edge ringing by `Eric Larson`_ - -- Add support for multiple head position files, plotting of sensors, and control of plotting color and axes in :func:`mne.viz.plot_head_positions` by `Eric Larson`_ - -- Add ability to read and write :class:`Annotations` separate from :class:`mne.io.Raw` instances via :meth:`Annotations.save` and :func:`read_annotations` by `Eric Larson`_ - -- Add option to unset a montage by passing `None` to :meth:`mne.io.Raw.set_montage` by `Clemens Brunner`_ - -- Add sensor denoising via :func:`mne.preprocessing.oversampled_temporal_projection` by `Eric Larson`_ - -- Add ``mne.io.pick.get_channel_types`` which returns all available channel types in MNE by `Clemens Brunner`_ - -- Use standard PCA instead of randomized PCA whitening prior to ICA to increase reproducibility by `Clemens Brunner`_ - -- Plot sEEG electrodes in :func:`mne.viz.plot_alignment` by `Alex Gramfort`_ - -- Add support for any data type like sEEG or ECoG in covariance related functions (estimation, whitening and plotting) by `Alex Gramfort`_ and `Eric Larson`_ - -- Add function ``mne.io.read_annotations_eeglab`` to allow loading annotations from EEGLAB files, by `Alex Gramfort`_ - -- :meth:`mne.io.Raw.set_montage` now accepts a string as its ``montage`` argument; this will set a builtin montage, by `Clemens Brunner`_ - -- Add 4D BTi phantom dataset :func:`mne.datasets.phantom_4dbti.data_path`, by `Alex Gramfort`_ - -- Changed the background color to grey in :func:`mne.viz.plot_alignment` to make helmet more visible, by `Alex Gramfort`_ - -- Add :meth:`mne.io.Raw.reorder_channels`, :meth:`mne.Evoked.reorder_channels`, etc. to reorder channels, by `Eric Larson`_ - -- Add to ``mne coreg`` and :func:`mne.gui.coregistration` by `Eric Larson`_: - - - Improved visibility of points inside the head - - Projection of EEG electrodes - - Orientation of extra points toward the surface - - Scaling points by distance to the head surface - - Display of HPI points - - ICP fitting with convergence criteria - - Faster display updates - - Scaling of ``mri/*.mgz`` files - - Scaling of ``mri/trainsforms/talairach.xfm`` files for conversion to MNI space - -- Add ability to exclude components interactively by clicking on their labels in :meth:`mne.preprocessing.ICA.plot_components` by `Mikołaj Magnuski`_ - -- Add reader for manual annotations of raw data produced by Brainstorm by `Anne-Sophie Dubarry`_ - -- Add eLORETA noise normalization for minimum-norm solvers by `Eric Larson`_ - -- Tighter duality gap computation in ``mne.inverse_sparse.tf_mxne_optim`` and new parametrization with ``alpha`` and ``l1_ratio`` instead of ``alpha_space`` and ``alpha_time`` by `Mathurin Massias`_ and `Daniel Strohmeier`_ - -- Add ``dgap_freq`` parameter in ``mne.inverse_sparse.mxne_optim`` solvers to control the frequency of duality gap computation by `Mathurin Massias`_ and `Daniel Strohmeier`_ - -- Add support for reading Eximia files by `Eric Larson`_ and `Federico Raimondo`_ - -- Add the Picard algorithm to perform ICA for :class:`mne.preprocessing.ICA`, by `Pierre Ablin`_ and `Alex Gramfort`_ - -- Add ability to supply a mask to the plot in :func:`mne.viz.plot_evoked_image` by `Jona Sassenhagen`_ - -- Add ``connectivity=False`` to cluster-based statistical functions to perform non-clustering stats by `Eric Larson`_ - -- Add :func:`mne.time_frequency.csd_morlet` and :func:`mne.time_frequency.csd_array_morlet` to estimate cross-spectral density using Morlet wavelets, by `Marijn van Vliet`_ - -- Add multidictionary time-frequency support to :func:`mne.inverse_sparse.tf_mixed_norm` by `Mathurin Massias`_ and `Daniel Strohmeier`_ - -- Add new DICS implementation as :func:`mne.beamformer.make_dics`, :func:`mne.beamformer.apply_dics`, :func:`mne.beamformer.apply_dics_csd` and :func:`mne.beamformer.apply_dics_epochs`, by `Marijn van Vliet`_ and `Susanna Aro`_ - -Bug -~~~ - -- Fix bug in EEG interpolation code to do nothing if there is no channel to interpolate by `Mainak Jas`_ - -- Fix bug in ``mne.preprocessing.peak_finder`` to output datatype consistently and added input check for empty vectors by `Tommy Clausner`_ - -- Fix bug in :func:`mne.io.read_raw_brainvision` to use the correct conversion for filters from time constant to frequency by `Stefan Appelhoff`_ - -- Fix bug with events when saving split files using :meth:`mne.Epochs.save` by `Eric Larson`_ - -- Fix bug in :class:`mne.decoding.SlidingEstimator` and :class:`mne.decoding.GeneralizingEstimator` to allow :func:`mne.decoding.cross_val_multiscore` to automatically detect whether the ``base_estimator`` is a classifier and use a ``StratifiedKFold`` instead of a ``KFold`` when ``cv`` is not specified, by `Jean-Remi King`_ - -- Fix bug in :func:`mne.set_eeg_reference` to remove an average reference projector when setting the reference to ``[]`` (i.e. do not change the existing reference) by `Clemens Brunner`_ - -- Fix bug in threshold-free cluster enhancement parameter validation (:func:`mne.stats.permutation_cluster_1samp_test` and :func:`mne.stats.permutation_cluster_test`) by `Clemens Brunner`_ - -- Fix bug in :meth:`mne.io.Raw.plot` to correctly display event types when annotations are present by `Clemens Brunner`_ - -- Fix bug in :func:`mne.stats.spatio_temporal_cluster_test` default value for ``threshold`` is now calculated based on the array sizes in ``X``, by `Eric Larson`_ - -- Fix bug in :func:`mne.simulation.simulate_raw` with ``use_cps=True`` where CPS was not actually used by `Eric Larson`_ - -- Fix bug in :func:`mne.simulation.simulate_raw` where 1- and 3-layer BEMs were not properly transformed using ``trans`` by `Eric Larson`_ - -- Fix bug in :func:`mne.viz.plot_alignment` where the head surface file ``-head.fif`` was not used even though present by `Chris Bailey`_ - -- Fix bug when writing compressed sparse column matrices (e.g., Maxwell filtering cross-talk matrices) by `Marijn van Vliet`_ and `Eric Larson`_ - -- Fix bug in :meth:`mne.io.Raw.plot_psd` to correctly deal with ``reject_by_annotation=False`` by `Clemens Brunner`_ - -- Fix bug in :func:`mne.make_fixed_length_events` when hitting corner case problems rounding to sample numbers by `Eric Larson`_ - -- Fix bug in :class:`mne.Epochs` when passing events as list with ``event_id=None`` by `Alex Gramfort`_ - -- Fix bug in ``mne.Report.add_figs_to_section`` when passing :class:`numpy.ndarray` by `Eric Larson`_ - -- Fix bug in CSS class setting in `mne.Report` BEM section by `Eric Larson`_ - -- Fix bug in :class:`Annotations` where annotations that extend to the end of a recording were not extended properly by `Eric Larson`_ - -- Fix bug in :meth:`mne.io.Raw.filter` to properly raw data with acquisition skips in separate segments by `Eric Larson`_ - -- Fix bug in :func:`mne.preprocessing.maxwell_filter` where homogeneous fields were not removed for CTF systems by `Eric Larson`_ - -- Fix computation of average quaternions in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ - -- Fix bug in writing ``raw.annotations`` where empty annotations could not be written to disk, by `Eric Larson`_ - -- Fix support for writing FIF files with acquisition skips by using empty buffers rather than writing zeros by `Eric Larson`_ - -- Fix bug in the ``mne make_scalp_surfaces`` command where ``--force`` (to bypass topology check failures) was ignored by `Eric Larson`_ - -- Fix bug in :func:`mne.preprocessing.maxwell_filter` when providing ``origin`` in ``'meg'`` coordinate frame for recordings with a MEG to head transform (i.e., non empty-room recordings) by `Eric Larson`_ - -- Fix bug in :func:`mne.viz.plot_cov` that ignored ``colorbar`` argument by `Nathalie Gayraud`_ - -- Fix bug when picking CTF channels that could cause data saved to disk to be unreadable by `Eric Larson`_ - -- Fix bug when reading event latencies (in samples) from eeglab files didn't translate indices to 0-based python indexing by `Mikołaj Magnuski`_ - -- Fix consistency between :class:`mne.Epochs` and :func:`mne.stats.linear_regression_raw` in converting between samples and times (:func:`mne.stats.linear_regression_raw` now rounds, instead of truncating) by `Phillip Alday`_ - -- Fix bug in ``mne coreg`` where sphere surfaces were scaled by `Eric Larson`_ - -- Fix bug in :meth:`mne.Evoked.plot_topomap` when using ``proj='interactive'`` mode by `Eric Larson`_ - -- Fix bug when passing ``show_sensors=1`` to :func:`mne.viz.plot_compare_evokeds` resulted in sensors legend placed in lower right of the figure (position 4 in matplotlib), not upper right by `Mikołaj Magnuski`_ - -- Fix handling of annotations when cropping and concatenating raw data by `Alex Gramfort`_ and `Eric Larson`_ - -- Fix bug in :func:`mne.preprocessing.create_ecg_epochs` where ``keep_ecg=False`` was ignored by `Eric Larson`_ - -- Fix bug in :meth:`mne.io.Raw.plot_psd` when ``picks is not None`` and ``picks`` spans more than one channel type by `Eric Larson`_ - -- Fix bug in :class:`mne.make_forward_solution` when passing data with compensation channels (e.g. CTF) that contain bad channels by `Alex Gramfort`_ - -- Fix bug in :meth:`mne.SourceEstimate.get_peak` and :meth:`mne.VolSourceEstimate.get_peak` when there is only a single time point by `Marijn van Vliet`_ - -- Fix bug in :func:`mne.io.read_raw_edf` when reading BDF files stimulus channels are now not scaled anymore by `Clemens Brunner`_ - -API -~~~ - -- Channels with unknown locations are now assigned position ``[np.nan, np.nan, np.nan]`` instead of ``[0., 0., 0.]``, by `Eric Larson`_ - -- Removed unused ``image_mask`` argument from :func:`mne.viz.plot_topomap` by `Eric Larson`_ - -- Unknown measurement dates are now stored as ``info['meas_date'] = None`` rather than using the current date. ``None`` is also now used when anonymizing data and when determining the machine ID for writing files, by `Mainak Jas`_ and `Eric Larson`_ - -- :meth:`mne.Evoked.plot` will now append the number of epochs averaged for the evoked data in the first plot title, by `Eric Larson`_ - -- Changed the line width in :func:`mne.viz.plot_bem` from 2.0 to 1.0 for better visibility of underlying structures, by `Eric Larson`_ - -- Changed the behavior of :meth:`mne.io.Raw.pick_channels` and similar methods to be consistent with :func:`mne.pick_channels` to treat channel list as a set (ignoring order) -- if reordering is necessary use ``inst.reorder_channels``, by `Eric Larson`_ - -- Changed the labeling of some plotting functions to use more standard capitalization and units, e.g. "Time (s)" instead of "time [sec]" by `Eric Larson`_ - -- ``mne.time_frequency.csd_epochs`` has been refactored into :func:`mne.time_frequency.csd_fourier` and :func:`mne.time_frequency.csd_multitaper`, by `Marijn van Vliet`_ - -- ``mne.time_frequency.csd_array`` has been refactored into :func:`mne.time_frequency.csd_array_fourier` and :func:`mne.time_frequency.csd_array_multitaper`, by `Marijn van Vliet`_ - -- Added ``clean_names=False`` parameter to :func:`mne.io.read_raw_ctf` for control over cleaning of main channel names and compensation channel names from CTF suffixes by `Oleh Kozynets`_ - -- The functions ``lcmv``, ``lcmv_epochs``, and ``lcmv_raw`` are now deprecated in favor of :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.apply_lcmv`, :func:`mne.beamformer.apply_lcmv_epochs`, and :func:`mne.beamformer.apply_lcmv_raw`, by `Britta Westner`_ - -- The functions ``mne.beamformer.dics``, ``mne.beamformer.dics_epochs`` and ``mne.beamformer.dics_source_power`` are now deprecated in favor of :func:`mne.beamformer.make_dics`, :func:`mne.beamformer.apply_dics`, and :func:`mne.beamformer.apply_dics_csd`, by `Marijn van Vliet`_ - - -Authors -~~~~~~~ - -People who contributed to this release (in alphabetical order): - -* Alejandro Weinstein -* Alexandre Gramfort -* Annalisa Pascarella -* Anne-Sophie Dubarry -* Britta Westner -* Chris Bailey -* Chris Holdgraf -* Christian Brodbeck -* Claire Braboszcz -* Clemens Brunner -* Daniel McCloy -* Denis A. Engemann -* Desislava Petkova -* Dominik Krzemiński -* Eric Larson -* Erik Hornberger -* Fede Raimondo -* Henrich Kolkhorst -* Jean-Remi King -* Jen Evans -* Joan Massich -* Jon Houck -* Jona Sassenhagen -* Juergen Dammers -* Jussi Nurminen -* Kambiz Tavabi -* Katrin Leinweber -* Kostiantyn Maksymenko -* Larry Eisenman -* Luke Bloy -* Mainak Jas -* Marijn van Vliet -* Mathurin Massias -* Mikolaj Magnuski -* Nathalie Gayraud -* Oleh Kozynets -* Phillip Alday -* Pierre Ablin -* Stefan Appelhoff -* Stefan Repplinger -* Tommy Clausner -* Yaroslav Halchenko diff --git a/doc/changes/0.17.inc b/doc/changes/0.17.inc deleted file mode 100644 index fa5a49a5287..00000000000 --- a/doc/changes/0.17.inc +++ /dev/null @@ -1,317 +0,0 @@ -.. _changes_0_17: - -Version 0.17 (2018-11-19) -------------------------- - -Changelog -~~~~~~~~~ - -- Add new tutorial for :class:`mne.Annotations` and ``events`` by `Joan Massich`_ and `Alex Gramfort`_ - -- Add support for saving :class:`mne.Annotations` as CSV and TXT files by `Joan Massich`_ and `Alex Gramfort`_ - -- Add :meth:`mne.Epochs.shift_time` that shifts the time axis of :class:`mne.Epochs` by `Thomas Hartmann`_ - -- Add :func:`mne.viz.plot_arrowmap` computes arrowmaps using Hosaka-Cohen transformation from magnetometer or gradiometer data, these arrows represents an estimation of the current flow underneath the MEG sensors by `Sheraz Khan`_ - -- Add :func:`mne.io.read_raw_fieldtrip`, :func:`mne.read_epochs_fieldtrip` and :func:`mne.read_evoked_fieldtrip` to import FieldTrip data. By `Thomas Hartmann`_ and `Dirk Gütlin`_. - -- Add ``rank`` parameter to :func:`mne.compute_covariance`, :func:`mne.cov.regularize` and related functions to preserve data rank and speed up computation using low-rank computations during regularization by `Eric Larson`_ and `Denis Engemann`_ - -- Add new function :func:`mne.read_annotations` that can read annotations in EEGLAB, BrainVision, EDF and Brainstorm formats by `Joan Massich`_ and `Alex Gramfort`_. - -- Add capability to read and save Epochs containing complex data (e.g. after Hilbert-transform) using :meth:`mne.Epochs.save` and :func:`mne.read_epochs`, by `Stefan Repplinger`_, `Eric Larson`_ and `Alex Gramfort`_ - -- Add optically pumped magnetometer dataset and example by `Rasmus Zetter`_ and `Eric Larson`_ - -- Add ``orgin`` parameter to :meth:`mne.io.Raw.time_as_index` to allow ``times`` to be relative to this ``origin`` by `Joan Massich`_ - -- Add ``title`` argument to :meth:`mne.SourceEstimate.plot` by `Eric Larson`_ - -- :func:`mne.io.Raw.set_annotations` now changes ``orig_time`` to match ``meas_date`` and shift ``self.annotations.onset`` accordingly. Previous behavior is deprecated and would be removed in 0.18. Work by `Joan Massich`_ - -- Add :func:`mne.compute_source_morph` which creates a :class:`mne.SourceMorph` object to unify morphing any type of source estimates (surface or volume) from one subject to another for group studies. It is now possible to do group studies when working on the volume with MNE. Work by `Tommy Clausner`_ during GSOC 2018 with the help of `Alex Gramfort`_ and `Eric Larson`_. - -- Add ability to pass threshold for EOG to :func:`mne.preprocessing.find_eog_events` and :func:`mne.preprocessing.create_eog_epochs` by `Peter Molfese`_ - -- Add possibility to save :class:`mne.VolSourceEstimate` and :class:`mne.MixedSourceEstimate` to HDF5 format (file extension .h5) with :meth:`mne.VolSourceEstimate.save` and :meth:`mne.MixedSourceEstimate.save` by `Alex Gramfort`_ - -- Add ``replace`` parameter to :meth:`mne.io.Raw.add_events` to allow adding events while removing the old ones on the stim channel by `Alex Gramfort`_ - -- Add ability to pass ``axes`` to ``ts_args`` and ``topomap_args`` of :meth:`mne.viz.plot_evoked_joint` by `Jona Sassenhagen`_ - -- Add ability to pass a precomputed forward solution to :func:`mne.simulation.simulate_raw` by `Eric Larson`_ - -- Add ability to read and write beamformers with :func:`mne.beamformer.read_beamformer` and :class:`mne.beamformer.Beamformer.save` by `Eric Larson`_ - -- Add resting-state source power spectral estimation example ``sphx_glr_auto_examples_datasets_plot_opm_rest_data.py`` by `Eric Larson`_, `Denis Engemann`_, and `Luke Bloy`_ - -- Add :func:`mne.channels.make_1020_channel_selections` to group 10/20-named EEG channels by location, by `Jona Sassenhagen`_ - -- Add helmet for Artemis123 for :func:`mne.viz.plot_alignment` by `Eric Larson`_ - -- Add support for reading MATLAB ``v7.3+`` files in :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` via `pymatreader`_ by `Steven Gutstein`_, `Eric Larson`_, and `Thomas Hartmann`_ - -- Add support for raw PSD plots in :meth:`mne.Report.parse_folder` via ``raw_psd`` argument of :class:`mne.Report` by `Eric Larson`_ - -- Add ``trig_shift_by_type`` parameter in :func:`mne.io.read_raw_brainvision` to allow to specify offsets for arbitrary marker types by `Henrich Kolkhorst`_ - -- Add progress bar support to :class:`mne.decoding.SlidingEstimator` and :class:`mne.decoding.GeneralizingEstimator` by `Eric Larson`_ - -- Add interactive visualization of volume source estimates using :func:`mne.viz.plot_volume_source_estimates` by `Mainak Jas`_ - -- Add :func:`mne.head_to_mri` to convert positions from head coordinates to MRI RAS coordinates, by `Joan Massich`_ and `Alex Gramfort`_ - -- Add improved CTF helmet for :func:`mne.viz.plot_alignment` by `Eric Larson`_ - -- Add handling in :func:`mne.combine_evoked` and :func:`mne.grand_average` for input with the same channels in different orders, if required, by `Jona Sassenhagen`_ - -- Add ``split_naming`` parameter to the `mne.io.Raw.save` method to allow for BIDS-compatible raw file name construction by `Teon Brooks`_ - -- Add ``origin`` parameter to :meth:`mne.Evoked.interpolate_bads` and related methods by `Eric Larson`_ - -- Add automated MEG helmet shape approximation to :func:`mne.viz.plot_alignment` by `Eric Larson`_ - -- Add capability to save a :class:`mne.Report` to an HDF5 file to :meth:`mne.Report.save` by `Marijn van Vliet`_ - -- Add :func:`mne.open_report` to read back a :class:`mne.Report` object that was saved to an HDF5 file by `Marijn van Vliet`_ - -- Add multi-taper estimation to :func:`mne.minimum_norm.compute_source_psd` by `Eric Larson`_ - -- Add support for custom, e.g. robust, averaging methods in :meth:`mne.Epochs.average` by `Jona Sassenhagen`_ - -- Add support for Neuromag 122 system by `Alex Gramfort`_ - -- Add function ``mne.io.read_annotations_brainvision`` for reading directly Brainvision marker files by `Alex Gramfort`_ - -- Add :meth:`mne.Report.remove` method to remove existing figures from a report, by `Marijn van Vliet`_ - -- Add sign to output of max-power orientation for :func:`mne.beamformer.make_dics` by `Eric Larson`_ - -- Add support for ``pick_ori='max-power'`` when ``weight_norm=None`` in :func:`mne.beamformer.make_lcmv` by `Marijn van Vliet`_ - -- Add support for ``weight_norm='nai'`` for all ``pick_ori`` options in :func:`mne.beamformer.make_lcmv` by `Marijn van Vliet`_ - -- Add support for ``weight_norm='nai'`` to :func:`mne.beamformer.make_dics` by `Marijn van Vliet`_ - -- Add parameter ``rank=None`` to :func:`mne.beamformer.make_dics` by `Marijn van Vliet`_ - -- Add parameter ``rank='full'`` to :func:`mne.beamformer.make_lcmv`, which can be set to ``None`` to auto-compute the rank of the covariance matrix before regularization by `Marijn van Vliet`_ - -- Handle different time vectors in topography plots using :func:`mne.viz.plot_evoked_topo` by `Jussi Nurminen`_ - -- Speed up :func:`mne.inverse_sparse.mixed_norm` if the ``solver`` parameter is set to ``bcd`` using :func:`scipy.linalg.get_blas_funcs` by `Quentin Bertrand`_ - -Bug -~~~ - -- Fix bug with scaling of data in ``mne.cov._compute_covariance_auto`` that was affecting the :class:`mne.decoding.SPoC` estimator by `David Sabbagh`_ - -- Fix :func:`mne.io.Raw.plot_projs_topomap` by `Joan Massich`_ - -- Fix bug in :func:`mne.minimum_norm.compute_source_psd` where the ``stc.times`` output was scaled by 1000, by `Eric Larson`_ - -- Fix default values for ``'diagonal_fixed'`` estimation method of :func:`mne.compute_covariance` to be ``0.1`` for all channel types, as in :func:`mne.cov.regularize` by `Eric Larson`_ - -- Fix reading edf file annotations by `Joan Massich`_ - -- Fix bug with reading events from BrainVision files by `Stefan Appelhoff`_ - -- Fix bug where :func:`mne.io.read_raw_eeglab` would warn when the stim channel is populated with an array of zeros by `Joan Massich`_ - -- Fix 2nd column of events in BrainVision to no longer store duration but rather be contained by ``raw.annotations`` by `Alex Gramfort`_ - -- Fix checking of the correctness of the ``prepared=True`` argument in :func:`mne.minimum_norm.apply_inverse` and related functions by `Eric Larson`_ - -- Fix bug of not showing ERD's in baseline rescaled tfr topomaps if grads are combined by `Erkka Heinila`_ - -- Fix bug with FIF I/O where strings were written in UTF-8 format instead of Latin-1 by `Eric Larson`_ - -- Fix bug with reading measurement dates from BrainVision files by `Stefan Appelhoff`_ - -- Fix bug with `mne.fit_dipole` where the residual was returned as ``ndarray`` instead of :class:`mne.Evoked` instance, by `Eric Larson`_ - -- Fix bug with ``mne flash_bem`` when ``flash30`` is not used by `Eric Larson`_ - -- Fix bug with :func:`mne.stats.permutation_cluster_test` and :func:`mne.stats.spatio_temporal_cluster_test` where ``threshold=None`` was not calculated properly for a f-oneway test by `Daniel McCloy`_ and `Eric Larson`_ - -- Fix bug with channel names in ``mgh70`` montage in ``mne.channels.read_montage`` by `Eric Larson`_ - -- Fix duplication of ``info['hpi_meas']`` and ``info['hpi_results']`` by `Sara Sommariva`_ - -- Fix bug in :func:`mne.io.read_raw_edf` when reading large files on Windows by `Marcin Koculak`_ - -- Fix check in :func:`mne.viz.plot_sensors` for invalid channel locations by `Eric Larson`_ - -- Fix bug in :func:`mne.io.read_raw_edf` where GDF files had ``info['highpass']`` and ``info['lowpass']`` set to NaN and ``info['meas_date']`` set incorrectly, by `Eric Larson`_ - -- Fix bug in :func:`mne.preprocessing.ICA.apply` to handle arrays as ``exclude`` property by `Joan Massich`_ - -- Fix bug in ``method='eLORETA'`` for :func:`mne.minimum_norm.apply_inverse` when using a sphere model and saved ``inv`` by `Eric Larson`_ - -- Fix bug in :class:`mne.io.Raw` where warnings were emitted when objects were deleted by `Eric Larson`_ - -- Fix vector data support for :class:`mne.VolSourceEstimate` by `Christian Brodbeck`_ - -- Fix bug with IIR filtering axis in :func:`mne.filter.filter_data` by `Eric Larson`_ - -- Fix bug with non-boxcar windows in :meth:`mne.io.Raw.resample` and :func:`mne.filter.resample` by `Eric Larson`_ - -- Fix bug in :func:`mne.minimum_norm.apply_inverse` where applying an MEG-only inverse would raise an error about needing an average EEG reference by `Eric Larson`_ - -- Fix bug in ``inst.apply_proj()`` where an average EEG reference was always added by `Eric Larson`_ - -- Fix bug in :func:`mne.time_frequency.tfr_morlet`, :func:`mne.time_frequency.tfr_multitaper`, and :func:`mne.time_frequency.tfr_stockwell` where not all data channels were picked by `Eric Larson`_ - -- Fix bug in :meth:`mne.preprocessing.ICA.plot_overlay` and :func:`mne.make_field_map` for CTF data with compensation by `Eric Larson`_ - -- Fix bug in :func:`mne.create_info` passing ``int`` as ``ch_names`` on Windows by `Eric Larson`_ - -- Fix bug in ``mne.realtime.RtEpochs`` where events during the buildup of the buffer were not correctly processed when incoming data buffers are smaller than the epochs by `Henrich Kolkhorst`_ - -- Fix bug in :func:`mne.io.read_raw_brainvision` where 1-indexed BrainVision events were not being converted into 0-indexed mne events by `Steven Bethard`_ - -- Fix bug in :func:`mne.viz.plot_snr_estimate` and :func:`mne.minimum_norm.estimate_snr` where the inverse rank was not properly utilized (especially affecting SSS'ed MEG data) by `Eric Larson`_ - -- Fix error when saving stc as nifti image when using volume source space formed by more than one label by `Alex Gramfort`_ - -- Fix error when interpolating MEG channels with compensation using reference channels (like for CTF data) by `Alex Gramfort`_ - -- Fix bug in :func:`mne.make_sphere_model` where EEG sphere model coefficients were not optimized properly by `Eric Larson`_ - -- Fix bug in :func:`mne.io.read_raw_ctf` to read bad channels and segments from CTF ds files by `Luke Bloy`_ - -- Fix problem with :meth:`mne.io.Raw.add_channels` where ``raw.info['bads']`` was replicated by `Eric Larson`_ - -- Fix bug with :class:`mne.Epochs` where an error was thrown when resizing data (e.g., during :meth:`mne.Epochs.drop_bad`) by `Eric Larson`_ - -- Fix naming of ``raw.info['buffer_size_sec']`` to be ``raw.buffer_size_sec`` as it is a writing parameter rather than a measurement parameter by `Eric Larson`_ - -- Fix EGI-MFF parser not to require ``dateutil`` package by `Eric Larson`_ - -- Fix error when running LCMV on MEG channels with compensation using reference channels (like for CTF data) by `Alex Gramfort`_ - -- Fix the use of :func:`sklearn.model_selection.cross_val_predict` with :class:`mne.decoding.SlidingEstimator` by `Alex Gramfort`_ - -- Fix event sample number increase when combining many Epochs objects with :func:`mne.concatenate_epochs` with by `Jasper van den Bosch`_ - -- Fix title of custom slider images to :class:`mne.Report` by `Marijn van Vliet`_ - -- Fix missing initialization of ``self._current`` in :class:`mne.Epochs` by `Henrich Kolkhorst`_ - -- Fix processing of data with bad segments and acquisition skips with new ``skip_by_annotation`` parameter in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ - -- Fix symlinking to use relative paths in ``mne flash_bem` and ``mne watershed_bem`` by `Eric Larson`_ - -- Fix error in mne coreg when saving with scaled MRI if fiducials haven't been saved by `Ezequiel Mikulan`_ - -- Fix normalization error in :func:`mne.beamformer.make_lcmv` when ``pick_ori='normal', weight_norm='unit_noise_gain'`` by `Marijn van Vliet`_ - -- Fix MNE-C installation instructions by `buildqa`_ - -- Fix computation of max-power orientation in :func:`mne.beamformer.make_dics` when ``pick_ori='max-power', weight_norm='unit_noise_gain'`` by `Marijn van Vliet`_ - -API -~~~ - -- Deprecated separate reading of annotations and synthesis of STI014 channels in readers by `Joan Massich`_: - - - Deprecated ``mne.io.read_annotations_eeglab`` - - Deprecated ``annot`` and ``annotmap`` parameters in :meth:`~mne.io.read_raw_edf` - - Deprecated ``stim_channel`` parameters in :func:`~mne.io.read_raw_edf`, :func:`~mne.io.read_raw_brainvision`, and :func:`~mne.io.read_raw_eeglab` - - Annotations are now added to ``raw`` instances directly upon reading as :attr:`raw.annotations `. - They can also be read separately with :func:`mne.read_annotations` for EEGLAB, BrainVision, EDF, and Brainstorm formats. - Use :func:`mne.events_from_annotations(raw.annotations) ` - to convert these to events instead of the old way (using STI014 channel synthesis followed by :func:`mne.find_events(raw) `). - - In 0.17 (this release) - Use ``read_raw_...(stim_channel=False)`` to disable warnings (and stim channel synthesis), but other arguments for ``stim_channel`` will still be supported. - - In 0.18 - The only supported option will be ``read_raw_...(stim_channel=False)``, and all stim-channel-synthesis arguments will be removed. At this point, ``stim_channel`` should be removed from scripts for future compatibility, but ``stim_channel=False`` will still be acceptable for backward compatibility. - - In 0.19 - The ``stim_channel`` keyword arguments will be removed from ``read_raw_...`` functions. - -- Calling :meth:``mne.io.pick.pick_info`` removing channels that are needed by compensation matrices (``info['comps']``) no longer raises ``RuntimeException`` but instead logs an info level message. By `Luke Bloy`_ - -- :meth:`mne.Epochs.save` now has the parameter ``fmt`` to specify the desired format (precision) saving epoched data, by `Stefan Repplinger`_, `Eric Larson`_ and `Alex Gramfort`_ - -- Deprecated ``mne.SourceEstimate.morph_precomputed``, ``mne.SourceEstimate.morph``, ``mne.compute_morph_matrix``, ``mne.morph_data_precomputed`` and ``mne.morph_data`` in favor of :func:`mne.compute_source_morph`, by `Tommy Clausner`_ - -- Prepare transition to Python 3. This release will be the last release compatible with Python 2. The next version will be Python 3 only. - -- CUDA support now relies on CuPy_ instead of ``PyCUDA`` and ``scikits-cuda``. It can be installed using ``conda install cupy``. By `Eric Larson`_ - -- Functions requiring a color cycle will now default to Matplotlib rcParams colors, by `Stefan Appelhoff`_ - -- :meth:`mne.Evoked.plot_image` has gained the ability to ``show_names``, and if a selection is provided to ``group_by``, ``axes`` can now receive a `dict`, by `Jona Sassenhagen`_ - -- Calling :meth:`mne.Epochs.decimate` with ``decim=1`` no longer copies the data by `Henrich Kolkhorst`_ - -- Removed blocking (waiting for new epochs) in ``mne.realtime.RtEpochs.get_data()`` by `Henrich Kolkhorst`_ - -- Warning messages are now only emitted as :func:`warnings.warn_explicit` rather than also being emitted as ``logging`` messages (unless a logging file is being used) to avoid duplicate warning messages, by `Eric Larson`_ - -- Deprecated save_stc_as_volume function in favor of :meth:`mne.VolSourceEstimate.as_volume` and :meth:`mne.VolSourceEstimate.save_as_volume` by `Alex Gramfort`_ - -- ``src.kind`` now equals to ``'mixed'`` (and not ``'combined'``) for a mixed source space (made of surfaces and volume grids) by `Alex Gramfort`_ - -- Deprecation of :attr:`mne.io.Raw.annotations` property in favor of :meth:`mne.io.Raw.set_annotations` by `Joan Massich`_ - -- The default value of ``stop_receive_thread`` in ``mne.realtime.RtEpochs.stop`` has been changed to ``True`` by `Henrich Kolkhorst`_ - -- Using the :meth:`mne.io.Raw.add_channels` on an instance with memmapped data will now resize the memmap file to append the new channels on Windows and Linux, by `Eric Larson`_ - -- :attr:`mne.io.Raw.annotations` when missing is set to an empty :class:`mne.Annotations` rather than ``None`` by `Joan Massich`_ and `Alex Gramfort`_ - -- Mismatches in CTF compensation grade are now checked in inverse computation by `Eric Larson`_ - - -Authors -~~~~~~~ - -People who contributed to this release (in alphabetical order): - -* Alexandre Gramfort -* Antoine Gauthier -* Britta Westner -* Christian Brodbeck -* Clemens Brunner -* Daniel McCloy -* David Sabbagh -* Denis A. Engemann -* Eric Larson -* Ezequiel Mikulan -* Henrich Kolkhorst -* Hubert Banville -* Jasper J.F. van den Bosch -* Jen Evans -* Joan Massich -* Johan van der Meer -* Jona Sassenhagen -* Kambiz Tavabi -* Lorenz Esch -* Luke Bloy -* Mainak Jas -* Manu Sutela -* Marcin Koculak -* Marijn van Vliet -* Mikolaj Magnuski -* Peter J. Molfese -* Sam Perry -* Sara Sommariva -* Sergey Antopolskiy -* Sheraz Khan -* Stefan Appelhoff -* Stefan Repplinger -* Steven Bethard -* Teekuningas -* Teon Brooks -* Thomas Hartmann -* Thomas Jochmann -* Tom Dupré la Tour -* Tristan Stenner -* buildqa -* jeythekey diff --git a/doc/changes/0.18.inc b/doc/changes/0.18.inc deleted file mode 100644 index e525e8849a9..00000000000 --- a/doc/changes/0.18.inc +++ /dev/null @@ -1,268 +0,0 @@ -.. _changes_0_18: - -Version 0.18 (2019-05-19) -------------------------- - -Changelog -~~~~~~~~~ - -- Add ``event_id='auto'`` in :func:`mne.events_from_annotations` to accommodate Brainvision markers by `Jona Sassenhagen`_, `Joan Massich`_ and `Eric Larson`_ - -- Add example on how to simulate raw data using subject anatomy, by `Ivana Kojcic`_,`Eric Larson`_,`Samuel Deslauriers-Gauthier`_ and`Kostiantyn Maksymenko`_ - -- :func:`mne.beamformer.apply_lcmv_cov` returns static source power after supplying a data covariance matrix to the beamformer filter by `Britta Westner`_ and `Marijn van Vliet`_ - -- Add ``butterfly`` and ``order`` arguments to :func:`mne.viz.plot_epochs` and offer separated traces for non-meg data (seeg, eeg, ecog) in butterfly view by `Stefan Repplinger`_ and `Eric Larson`_ - -- :meth:`mne.Epochs.get_data` now takes a ``picks`` parameter by `Jona Sassenhagen`_ - -- :func:`~mne.viz.plot_compare_evokeds` will generate topo plots if ``axes='topo'`` by `Jona Sassenhagen`_ - -- ``mne.viz.iter_topography`` can yield an additional axis, e.g., for plotting legends by `Jona Sassenhagen`_ and `Daniel McCloy`_ - -- Default plot title reflects channel type when ``picks`` is a channel type in :func:`~mne.viz.plot_compare_evokeds` by `Daniel McCloy`_ - -- Color scale limits in :func:`~mne.viz.plot_topo_image_epochs` are now computed separately per channel type in combined mag/grad plots, by `Daniel McCloy`_ - -- :func:`mne.simulation.simulate_stc` now allows for label overlaps by `Nathalie Gayraud`_, and `Ivana Kojcic`_ - -- Add ``long_format`` option to the pandas dataframe exporters, e.g :meth:`mne.Epochs.to_data_frame` by `Denis Engemann`_ - -- Add example on how to load standard montage :ref:`plot_montage` by `Joan Massich`_ - -- Add new tutorial on :ref:`tut-eeg-fsaverage-source-modeling` by `Alex Gramfort`_, and `Joan Massich`_ - -- Add :meth:`mne.Epochs.apply_hilbert` and :meth:`mne.Evoked.apply_hilbert` by `Eric Larson`_ - -- Add convenience ``fsaverage`` subject dataset fetcher / updater :func:`mne.datasets.fetch_fsaverage` by `Eric Larson`_ - -- Add ``fmin`` and ``fmax`` argument to :meth:`mne.time_frequency.AverageTFR.crop` and to :meth:`mne.time_frequency.EpochsTFR.crop` to crop TFR objects along frequency axis by `Dirk Gütlin`_ - -- Add support to :func:`mne.read_annotations` to read CNT formats by `Joan Massich`_ - -- Add ``reject`` parameter to :meth:`mne.preprocessing.ICA.plot_properties` to visualize rejected epochs by `Antoine Gauthier`_ - -- Add support for picking channels using channel name and type strings to functions with ``picks`` arguments, along with a convenience :meth:`mne.io.Raw.pick`, :meth:`mne.Epochs.pick`, and :meth:`mne.Evoked.pick` method, by `Eric Larson`_ - -- Add new tutorial on :ref:`tut-sleep-stage-classif` by `Alex Gramfort`_, `Stanislas Chambon`_ and `Joan Massich`_ - -- Add data fetchers for polysomnography (PSG) recordings from Physionet (:func:`mne.datasets.sleep_physionet.age.fetch_data` and :func:`mne.datasets.sleep_physionet.temazepam.fetch_data`) by `Alex Gramfort`_ and `Joan Massich`_ - -- Add envelope correlation code in ``mne.connectivity.envelope_correlation`` by `Denis Engemann`_, `Sheraz Khan`_, and `Eric Larson`_ - -- Add option to toggle all projectors in :meth:`mne.io.Raw.plot` and related functions by `Eric Larson`_ - -- Add support for indexing, slicing, and iterating :class:`mne.Annotations` by `Joan Massich`_ - -- :meth:`mne.io.Raw.plot` now uses the lesser of ``n_channels`` and ``raw.ch_names``, by `Joan Massich`_ - -- Add support for FIR filtering in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` by passing ``filtorder=0`` or ``--filtorder 0``, respectively, by `Eric Larson`_ - -- Add ``chunk_duration`` parameter to :func:`mne.events_from_annotations` to allow multiple events from a single annotation by `Joan Massich`_ - -- Add :class:`mne.simulation.SourceSimulator` class to simplify simulating SourceEstimates, by `Samuel Deslauriers-Gauthier`_, `Kostiantyn Maksymenko`_, `Nathalie Gayraud`_, `Ivana Kojcic`_, `Alex Gramfort`_, and `Eric Larson`_ - -- :func:`mne.io.read_raw_edf` now detects analog stim channels labeled ``'STATUS'`` and sets them as stim channel. :func:`mne.io.read_raw_edf` no longer synthesize TAL annotations into stim channel but stores them in ``raw.annotations`` when reading by `Joan Massich`_ - -- Add `mne.simulation.add_noise` for ad-hoc noise addition to `io.Raw`, `Epochs`, and `Evoked` instances, by `Eric Larson`_ - -- Add ``drop_refs=True`` parameter to :func:`set_bipolar_reference` to drop/keep anode and cathode channels after applying the reference by `Cristóbal Moënne-Loccoz`_. - -- Add processing of reference MEG channels to :class:`mne.preprocessing.ICA` by `Jevri Hanna`_ - -- Add use of :func:`scipy.signal.windows.dpss` for faster multitaper window computations in PSD functions by `Eric Larson`_ - -- Add :func:`mne.morph_labels` to facilitate morphing label sets obtained from parcellations, by `Eric Larson`_ - -- Add :func:`mne.labels_to_stc` to facilitate working with label data, by `Eric Larson`_ - -- Add :func:`mne.label.select_sources` to simplify the selection of sources within a label, by `Samuel Deslauriers-Gauthier`_ - -- Add support for using :class:`mne.Info` in :func:`mne.simulation.simulate_raw` instead of :class:`mne.io.Raw` by `Eric Larson`_ - -- Add support for passing an iterable and stim channel values using ``stc`` parameter of :func:`mne.simulation.simulate_raw` by `Eric Larson`_ - -- Add ``overlap`` argument to :func:`mne.make_fixed_length_events` by `Eric Larson`_ - -- Add approximate distance-based ``spacing`` source space decimation algorithm to :func:`mne.setup_source_space` by `Eric Larson`_ - -- Add 448-labels subdivided aparc cortical parcellation by `Denis Engemann`_ and `Sheraz Khan`_ - -- Add option to improve rendering in :ref:`mne coreg` for modern graphics cards by `Eric Larson`_ - -- Add ``mne.preprocessing.mark_flat`` to automate marking of flat channels and segments of raw data by `Eric Larson`_ - -- Add support for CUDA-based correlation computations and progress bars in :class:`mne.decoding.ReceptiveField` by `Eric Larson`_ - -- Add support for file-like objects in :func:`mne.io.read_raw_fif` as long as preloading is used by `Eric Larson`_ - -- Add keyboard shortcuts to nativate volume source estimates in time using (shift+)left/right arrow keys by `Mainak Jas`_ - -- Add option to SSP preprocessing functions (e.g., :func:`mne.preprocessing.compute_proj_eog` and :func:`mne.compute_proj_epochs`) to process MEG channels jointly with ``meg='combined'`` by `Eric Larson`_ - -- Add Epoch selection and metadata functionality to :class:`mne.time_frequency.EpochsTFR` using new mixin class by `Keith Doelling`_ - -- Add ``reject_by_annotation`` argument to :func:`mne.preprocessing.find_ecg_events` by `Eric Larson`_ - -- Add ``pca`` argument to return the rank-reduced whitener in :func:`mne.cov.compute_whitener` by `Eric Larson`_ - -- Add ``extrapolate`` argument to :func:`mne.viz.plot_topomap` for better control of extrapolation points placement by `Mikołaj Magnuski`_ - -- Add ``channel_wise`` argument to :func:`mne.io.Raw.apply_function` to allow applying a function on multiple channels at once by `Hubert Banville`_ - -- Add option ``copy='auto'`` to control data copying in :class:`mne.io.RawArray` by `Eric Larson`_ - -- The ``mri`` parameter in :func:`mne.setup_volume_source_space` is now automatically set to ``T1.mgz`` if ``subject`` is provided. This allows to get a :class:`mne.SourceSpaces` of kind ``volume`` more automatically. By `Alex Gramfort`_ - -- Add better ``__repr__`` for constants, and :class:`info['dig'] ` entries via ``DigPoint`` by `Eric Larson`_ - -- Allow string argument in :meth:`mne.io.Raw.drop_channels` to remove a single channel by `Clemens Brunner`_ - -- Add additional depth weighting options for inverse solvers (e.g., :func:`mne.inverse_sparse.gamma_map` and :func:`mne.inverse_sparse.mixed_norm`) by `Eric Larson`_ - -- Add depth weighting to LCMV beamformers via ``depth`` argument in :func:`mne.beamformer.make_lcmv` by `Eric Larson`_ - -- Allow toggling of DC removal in :meth:`mne.io.Raw.plot` by pressing the 'd' key by `Clemens Brunner`_ - -- Improved clicking in :meth:`mne.io.Raw.plot` (left click on trace toggles bad, left click on background sets green line, right click anywhere removes green line) by `Clemens Brunner`_ - -- Add ``mne.realtime.LSLClient`` for realtime data acquisition with LSL streams of data by `Teon Brooks`_ and `Mainak Jas`_ - -- Add partial support for PyVista as a 3D backend using :func:`mne.viz.use_3d_backend` by `Guillaume Favelier`_ - -- Add option ``ids = None`` in :func:`mne.event.shift_time_events` for considering all events by `Nikolas Chalas`_ and `Joan Massich`_ - -- Add ``mne.realtime.MockLSLStream`` to simulate an LSL stream for testing and examples by `Teon Brooks`_ - -- Add support for file-like objects in :func:`mne.read_epochs` as long as preloading is used by `Paul Roujansky`_ - -Bug -~~~ - -- Fix annotations in split fif files :func:`mne.io.read_raw_fif` by `Joan Massich`_ - -- Fix :meth:`mne.Epochs.plot` with ``scalings='auto'`` to properly compute channel-wise scalings by `Stefan Repplinger`_ - -- Fix :func:`mne.gui.coregistration` and :ref:`mne coreg` crashing with segmentation fault when switching subjects by `Eric Larson`_ - -- Fix :func:`mne.io.read_raw_brainvision` to accommodate vmrk files which do not have any annotations by Alexander Kovrig - -- Fix :meth:`mne.io.Raw.plot` and :meth:`mne.Epochs.plot` to auto-scale ``misc`` channel types by default by `Eric Larson`_ - -- Fix filtering functions (e.g., :meth:`mne.io.Raw.filter`) to properly take into account the two elements in ``n_pad`` parameter by `Bruno Nicenboim`_ - -- Fix ``feature_names`` parameter change after fitting in :class:`mne.decoding.ReceptiveField` by `Jean-Remi King`_ - -- Fix index error in :func:`mne.io.read_raw_cnt` when creating stim_channel manually by `Joan Massich`_ - -- Fix bug with ``weight_norm='unit-gain'`` in :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` by `Britta Westner`_ - -- Fix 32bits annotations in :func:`mne.io.read_raw_cnt` by `Joan Massich`_ - -- Fix :func:`mne.events_from_annotations` to ignore ``'BAD_'` and ``'EDGE_'`` annotations by default using a new default ``regexp`` by `Eric Larson`_ - -- Fix bug in ``mne.preprocessing.mark_flat`` where ``raw.first_samp`` was not taken into account by `kalenkovich`_ - -- Fix date parsing in :func:`mne.io.read_raw_cnt` by `Joan Massich`_ - -- Fix topological checks and error messages for BEM surfaces in :func:`mne.make_bem_model` by `Eric Larson`_ - -- Fix default HTML language of :class:`mne.Report` to be ``"en-us"`` instead of ``"fr"`` and allow setting via ``report.lang`` property by `Eric Larson`_ - -- Fix bug where loading epochs with ``preload=True`` and subsequently using :meth:`mne.Epochs.drop_bad` with new ``reject`` or ``flat`` entries leads to improper data (and ``epochs.selection``) since v0.16.0 by `Eric Larson`_. - If your code uses ``Epochs(..., preload=True).drop_bad(reject=..., flat=...)``, we recommend regenerating these data. - -- Fix :ref:`mne flash_bem` to properly utilize ``flash30`` images when conversion from DICOM images is used, and to properly deal with non-standard acquisition affines by `Eric Larson`_ - -- Fix :meth:`mne.io.Raw.set_annotations` with ``annotations=None`` to create an empty annotations object with ``orig_time`` that matches the :class:`mne.io.Raw` instance by `Eric Larson`_ - -- Fix :func:`mne.io.read_raw_edf` returning all the annotations with the same name in GDF files by `Joan Massich`_ - -- Fix boundaries during plotting of raw data with :func:`mne.io.Raw.plot` and :ref:`mne browse_raw` on scaled displays (e.g., macOS with HiDPI/Retina screens) by `Clemens Brunner`_ - -- Fix bug where filtering was not performed with ``lowpass`` or ``highpass`` in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` by `Eric Larson`_ - -- Fix :func:`mne.simulation.simulate_evoked` that was failing to simulate the noise with heterogeneous sensor types due to poor conditioning of the noise covariance and make sure the projections from the noise covariance are taken into account `Alex Gramfort`_ - -- Fix checking of ``data`` dimensionality in :class:`mne.SourceEstimate` and related constructors by `Eric Larson`_ - -- Fix :meth:`mne.io.Raw.append` annotations miss-alignment by `Joan Massich`_ - -- Fix hash bug in the ``mne.io.edf`` module when installing on Windows by `Eric Larson`_ - -- Fix :func:`mne.io.read_raw_edf` reading duplicate channel names by `Larry Eisenman`_ - -- Fix :func:`set_bipolar_reference` in the case of generating all bipolar combinations and also in the case of repeated channels in both lists (anode and cathode) by `Cristóbal Moënne-Loccoz`_ - -- Fix missing code for computing the median when ``method='median'`` in :meth:`mne.Epochs.average` by `Cristóbal Moënne-Loccoz`_ - -- Fix CTF helmet plotting in :func:`mne.viz.plot_evoked_field` by `Eric Larson`_ - -- Fix saving of rejection parameters in :meth:`mne.Epochs.save` by `Eric Larson`_ - -- Fix orientations returned by :func:`mne.dipole.get_phantom_dipoles` (half were flipped 180 degrees) by `Eric Larson`_ - -- Fix bug in :func:`mne.viz.plot_compare_evokeds` when ``evoked.times[0] >= 0`` would cause a problem with ``vlines='auto'`` mode by `Eric Larson`_ - -- Fix path bugs in :func:`mne.bem.make_flash_bem` and :ref:`mne flash_bem` by `Eric Larson`_ - -- Fix :meth:`mne.time_frequency.AverageTFR.plot_joint` mishandling bad channels, by `David Haslacher`_ and `Jona Sassenhagen`_ - -- Fix :func:`mne.beamformer.make_lcmv` failing when full rank data is used (i.e., when no projection is done) with ``reg=0.``, by `Eric Larson`_ - -- Fix issue with bad channels ignored in :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` by `Alex Gramfort`_ - -- Fix :func:`mne.compute_proj_raw` when ``duration != None`` not to apply existing proj and to avoid using duplicate raw data samples by `Eric Larson`_ - -- Fix ``reject_by_annotation`` not being passed internally by :func:`mne.preprocessing.create_ecg_epochs` and :ref:`mne clean_eog_ecg` to :func:`mne.preprocessing.find_ecg_events` by `Eric Larson`_ - -- Fix :func:`mne.io.read_raw_edf` failing when EDF header fields (such as patient name) contained special characters, by `Clemens Brunner`_ - -- Fix :func:`mne.io.read_raw_eeglab` incorrectly parsing event durations by `Clemens Brunner`_ - -- Fix :func:`mne.io.read_raw_egi` when cropping non-preloaded EGI MFF data by `Alex Gramfort`_ - -- Fix :meth:`mne.io.Raw.interpolate_bads` for interpolating CTF MEG channels when reference sensors are present by `jeythekey`_ - -- Fix a bug in :meth:`mne.io.Raw.resample`, where resampling events could result in indices > n_times-1, by `jeythekey`_ - -- Fix :meth:`mne.preprocessing.ICA.score_sources` to use the ``sfreq`` of the raw data to filter rather than the ``sfreq`` when the ICA was fit, by `jeythekey`_ - -- Fix a bug in :class:`mne.preprocessing.ICA`, where manually setting the attribute ``ICA.exclude`` to an np.array resulted in the removal of random components when later also providing the ``exclude`` argument to any ``apply...``-method, by `jeythekey`_ - -- Ascending changed to descending sorting of scores for integer ``..._criterion`` arguments in ``mne.preprocessing.ICA.detect_artifacts`` and ``mne.preprocessing.run_ica``, as it used to be documented; the docstring in these functions was corrected for float ``..._criterion`` arguments, by `jeythekey`_ - -API -~~~ - -- Deprecate ``cov, iir_params, blink, ecg, chpi, random_state`` and support for :class:`mne.io.Raw` instance inputs in :func:`mne.simulation.simulate_raw`; use :func:`mne.simulation.add_noise`, :func:`mne.simulation.add_ecg`, :func:`mne.simulation.add_eog`, and :func:`mne.simulation.add_chpi` by `Eric Larson`_ - -- Add ``overwrite`` parameter in :func:`mne.Epochs.save` by `Katarina Slama`_ - -- Add ``stim_channel`` parameter in :func:`mne.io.read_raw_cnt` to toggle stim channel synthesis by `Joan Massich`_ - -- Python 2 is no longer supported; MNE-Python now requires Python 3.5+, by `Eric Larson`_ - -- A new class :class:`mne.VolVectorSourceEstimate` is returned by :func:`mne.minimum_norm.apply_inverse` (and related functions) when a volume source space and ``pick_ori='vector'`` is used, by `Eric Larson`_ - -- Converting a forward solution with a volume source space to fixed orientation using :func:`mne.convert_forward_solution` now raises an error, by `Eric Larson`_ - -- ``raw.estimate_rank`` has been deprecated and will be removed in 0.19 in favor of :func:`mne.compute_rank` by `Eric Larson`_ - -- :class:`Annotations` are now kept sorted (by onset time) during instantiation and :meth:`~Annotations.append` operations, by `Eric Larson`_ - -- Deprecate ``mne.io.find_edf_events`` by `Joan Massich`_ - -- Deprecate ``limit_depth_chs`` in :func:`mne.minimum_norm.make_inverse_operator` in favor of ``depth=dict(limit_depth_chs=...)`` by `Eric Larson`_ - -- Reading BDF and GDF files with :func:`mne.io.read_raw_edf` is deprecated and replaced by :func:`mne.io.read_raw_bdf` and :func:`mne.io.read_raw_gdf`, by `Clemens Brunner`_ - -- :func:`mne.forward.compute_depth_prior` has been reworked to operate directly on :class:`Forward` instance as ``forward`` rather than a representation scattered across the parameters ``G, is_fixed_ori, patch_info``, by `Eric Larson`_ - -- Deprecate ``method='extended-infomax'`` in :class:`mne.preprocessing.ICA`; Extended Infomax can now be computed with ``method='infomax'`` and ``fit_params=dict(extended=True)`` by `Clemens Brunner`_ - -- Fix support for supplying ``extrapolate`` via :meth:`ica.plot_properties(..., topomap_args=dict(extrapolate=...)) ` by `Sebastian Castano`_ - -- The peak finder that was formerly accessible via ``from mne.preprocessing.peak_finder import peak_finder`` should now be imported directly from the enclosing namespace as ``from mne.preprocessing import peak_finder`` by `Eric Larson`_ - -- Deprecate ``mne.realtime`` module to make a standalone module ``mne-realtime`` that will live outside of this package by `Teon Brooks`_ diff --git a/doc/changes/0.19.inc b/doc/changes/0.19.inc deleted file mode 100644 index 5fb5d0306e7..00000000000 --- a/doc/changes/0.19.inc +++ /dev/null @@ -1,241 +0,0 @@ -.. NOTE: we are now using links to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Changelog" for new features - - "Bug" for bug fixes - - "API" for backward-incompatible changes - -.. _changes_0_19: - -Version 0.19 (2019-09-24) -------------------------- - -Changelog -~~~~~~~~~ - -- Add :func:`mne.cuda.set_cuda_device` and config variable ``MNE_CUDA_DEVICE`` to select among multiple GPUs (by numeric device ID) by `Daniel McCloy`_. - -- Add :func:`mne.channels.make_standard_montage` to create :class:`mne.channels.DigMontage` from templates by `Joan Massich`_ and `Alex Gramfort`_. - -- Add :func:`mne.channels.compute_dev_head_t` to compute Device-to-Head transformation from a montage by `Joan Massich`_ and `Alex Gramfort`_. - -- Add :func:`mne.channels.read_dig_fif` to read digitization coordinates from ``.fif`` files by `Joan Massich`_ and `Alex Gramfort`_. - -- Add :func:`mne.channels.read_dig_egi` to read digitization coordinates from EGI ``.xml`` files by `Joan Massich`_ and `Alex Gramfort`_. - -- Add :func:`mne.channels.read_dig_polhemus_isotrak` and :func:`mne.channels.read_polhemus_fastscan` to read Polhemus data by `Joan Massich`_ - -- Add ``mne.channels.read_dig_captrack`` to read BrainVision CapTrak (BVCT) digitization coordinate files by `Stefan Appelhoff`_ and `Joan Massich`_ - -- Add :func:`mne.channels.make_dig_montage` to create :class:`mne.channels.DigMontage` objects out of np.arrays by `Joan Massich`_ - -- Add :func:`mne.channels.read_custom_montage` to read various EEG electrode locations files by `Joan Massich`_ and `Alex Gramfort`_. - -- Add support for making epochs with duplicated events, by allowing three policies: "error" (default), "drop", or "merge" in :class:`mne.Epochs` by `Stefan Appelhoff`_ - -- Allow :meth:`mne.Annotations.crop` to support negative ``tmin`` and ``tmax`` by `Joan Massich`_ - -- Unknown events code in GDF are now visible in the ``event_id`` by `Theodore Papadopoulo`_ - -- Now :func:`mne.io.read_raw_ctf` populates ``raw.annotations`` with the markers in ``MarkerFile.mrk`` if any by `Joan Massich`_ - -- Add options for controlling the use of the ``-T1`` flag and the location of the brainmask output in :ref:`mne watershed_bem` by `Eric Larson`_ - -- Add support to :func:`mne.read_annotations` to read CTF marker files by `Joan Massich`_ - -- Do not convert effective number of averages (``nave`` attribute of :class:`mne.Evoked`) to integer except when saving to FIFF file by `Daniel McCloy`_. - -- Add automatic fiducial position estimation in :ref:`mne coreg` using MNI Talairach fiducial locations in :func:`mne.coreg.get_mni_fiducials` by `Jon Houck`_ and `Eric Larson`_ - -- Add support for :ref:`mne coreg` scaling surrogate subjects without surface reconstructions, such as those created for volumetric analyses only (e.g., with ``recon-all -autorecon1``) by `Eric Larson`_ - -- Add reader for Curry data in :func:`mne.io.read_raw_curry` by `Dirk Gütlin`_ - -- Butterfly channel plots now possible for :meth:`mne.Epochs.plot_psd` with ``average=False``. Infrastructure for this function now shared with analogous Raw function, found in ``mne.viz.utils`` by `Jevri Hanna`_ - -- Add option not to orthogonalize power envelopes with ``orthogonalize=False`` in ``mne.connectivity.envelope_correlation`` by `Denis Engemann`_ - -- Accept filenames of raw .fif files that end in ``_meg.fif`` to enable complicance with the Brain Imaging Data Structure by `Stefan Appelhoff`_ - -- Add function to check the type of a FIF file using :func:`mne.what` and :ref:`mne what` by `Eric Larson`_ - -- Add support for specifying the initial time and/or position and providing a :class:`mne.SourceMorph` instead of :class:`mne.SourceSpaces` in :func:`mne.viz.plot_volume_source_estimates` by `Eric Larson`_ - -- Speed up morph map generation in :func:`mne.read_morph_map` by ~5-10x by using :func:`numba.jit` by `Eric Larson`_ - -- Speed up :func:`mne.setup_volume_source_space`, especially when ``volume_label is not None`` by `Eric Larson`_ - -- Speed up :ref:`mne coreg` interactive and automated (ICP) alignment by using nearest-neighbor calculations in the MRI coordinate frame, by `Eric Larson`_ - -- Add :func:`mne.dig_mri_distances` to compute the distances between digitized head points and the MRI head surface by `Alex Gramfort`_ and `Eric Larson`_ - -- Add scale bars for data channels in :func:`mne.io.Raw.plot` by `Eric Larson`_ - -- Add :func:`mne.viz.plot_brain_colorbar` to plot a colorbar appropriately matched to a :func:`mne.viz.plot_source_estimates` plot by `Eric Larson`_ - -- Add support for showing head surface (to visualize digitization fit) while showing a single-layer BEM to :func:`mne.viz.plot_alignment` by `Eric Larson`_ - -- Add option ``include_tmax=True`` to cropping methods :meth:`mne.io.Raw.crop`, :meth:`mne.Epochs.crop`, :meth:`mne.Evoked.crop`, :meth:`mne.SourceEstimate.crop`, :meth:`mne.Dipole.crop`, and :meth:`mne.time_frequency.AverageTFR.crop` by `Eric Larson`_ - -- Change the behavior of :meth:`mne.io.Raw.plot` for ``scalings='auto'`` and ``remove_dc=True`` to compute the scalings on the data with DC removed by `Clemens Brunner`_ - -- Allow creating annotations within existing annotations in :func:`mne.io.Raw.plot` by default (the old snapping behavior can be toggled by pressing 'p') by `Clemens Brunner`_ - -- Add plotting of rank estimates in :func:`mne.viz.plot_cov` by `Eric Larson`_ - -- Add a new ``mne.viz.plot_sensors_connectivity`` function to visualize the sensor connectivity in 3D by `Guillaume Favelier`_ and `Alex Gramfort`_ - -- Add support for ``info['utc_offset']``, ``info['device_info']``, and ``info['helium_info']`` components of :class:`mne.Info` by `Eric Larson`_ - -- Add control over dipole colors in :func:`mne.viz.plot_dipole_locations` when using orthoview mode by `Eric Larson`_ - -- Use second-order-sections filtering in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` by `Eric Larson`_ - -- Add re-referencing functionality for ecog and seeg channel types in :func:`mne.set_eeg_reference` by `Keith Doelling`_ - -- Add support for median averaging and for returning unaggregated segments in ``mne.time_frequency.psd_welch`` by `Richard Höchenberger`_ - -- :func:`io.read_raw_kit`: add support for NYU New York 2019 system update, by `Christian Brodbeck`_ - - -Bug -~~~ - -- Fix setting montage eeg ref position for captrak by `Joan Massich`_ - -- Fix saving raw read from BDF file using ``tmin`` and ``tmax`` using ``preload=False`` by `Alex Gramfort`_ - -- Fix :func:`mne.grand_average` to use equal sum-to-one weights (like it used to, before changes to underlying :func:`mne.combine_evoked`) by `Daniel McCloy`_ - -- Fix :meth:`mne.io.Raw.filter` to deal with instances with no data channels properly by `Eric Larson`_ - -- Fix one-sample baseline issue in :class:`mne.BaseEpochs` when using ``tmin=0`` by `Milan Rybář`_ - -- Fix bug in :func:`mne.viz.plot_volume_source_estimates` where ``'glass_brain'`` MRIs were not transformed to MNI space, by `Eric Larson`_ - -- Fix bug in :func:`mne.viz.plot_volume_source_estimates` where MRIs with voxels not in RAS orientation could not be browsed properly, by `Eric Larson`_ - -- Fix bug in :meth:`mne.SourceMorph.apply` where output STCs had ``stc.vertices`` defined improperly, by `Eric Larson`_ - -- Fix bug in :meth:`mne.SourceMorph.apply` where the default was errantly ``mri_space=False`` instead of ``mri_space=None`` (as documented), by `Eric Larson`_ - -- Fix ``mne.VolVectorSourceEstimate.normal`` and :func:`mne.minimum_norm.apply_inverse` to only allow normal extraction/orientation for surface and discrete source spaces by `Eric Larson`_ - -- Fix :meth:`mne.io.Raw.set_annotations` for ``meas_date`` previous to 1970 by `Joan Massich`_ - -- Fix horizontal spacing issues in :meth:`mne.io.Raw.plot_psd` by `Jevri Hanna`_ - -- Fix reading of dates in BrainVision files if no "New Segment" marker is specified, no date is given, or data is missing, by `Stefan Appelhoff`_ - -- Fix bug with reading one-channel GDF files by `Abram Hindle`_ - -- Fix bug with y-axis labeling in :meth:`mne.io.Raw.plot_psd` by `Eric Larson`_ - -- Fix side-effect where :func:`mne.viz.plot_ica_sources` and :meth:`mne.preprocessing.ICA.plot_sources` changed the ``ICA.exclude`` attribute even when users didn't interact with the plot by `Daniel McCloy`_. - -- Fix scaling of sources in :meth:`ica.plot_sources(epochs) ` by `Eric Larson`_ - -- Fix wrong assumptions about units in BrainVision montages and add test asserting units in "mm" or "auto", by `Stefan Appelhoff`_ - -- Fix scaling issue with signals in mV in EDF files read with :func:`mne.io.read_raw_edf` by `Alex Gramfort`_ - -- Fix :func:`mne.extract_label_time_course` behavior when using STCs generated with ``apply_inverse(..., label=label)`` or ``stc.in_label`` by `Eric Larson`_ - -- Fix bug in :func:`mne.io.read_raw_brainvision` so that recording date timestamps are also recognized if channel reference data is negative, by `Stefan Appelhoff`_ - -- Fix order of ``info['dig']`` that was alphabetical based on channel names and not following the channel order when using :meth:`mne.io.Raw.set_montage` and a ``mne.channels.Montage`` object by `Joan Massich`_ and `Alex Gramfort`_. - -- Fix reading CNT files larger than 2Gb by `Joan Massich`_ - -- Fix reading of 4D/BTi data with different channel names and onsets in :func:`mne.io.read_raw_bti` by `Joshua Bear`_ and `Eberhard Eich`_ - -- Fix formula for effective number of averages in :func:`mne.combine_evoked` when ``weights='equal'`` by `Daniel McCloy`_. - -- Fix bug in :func:`mne.simulation.simulate_stc` to avoid empty stc if label vertices and source space do not intersect, by `Kostiantyn Maksymenko`_ - -- Fix ``event_id='auto'`` in :func:`mne.events_from_annotations` to recover Brainvision markers after saving it in ``.fif`` by `Joan Massich`_ - -- Fix :func:`mne.read_epochs_eeglab` when epochs are stored as float. By `Thomas Radman`_ - -- Fix :func:`mne.Evoked.resample` and :func:`mne.Epochs.resample` not setting ``inst.info['lowpass']`` properly by `Eric Larson`_ - -- Fix checks when constructing volumetric and surface source spaces with :func:`mne.setup_volume_source_space` and :func:`mne.setup_source_space`, respectively, by `Eric Larson`_ - -- Fix bug in handling of :class:`mne.Evoked` types that were not produced by MNE-Python (e.g., alternating average) by `Eric Larson`_ - -- Fix bug in :func:`mne.read_source_estimate` where vector volumetric source estimates could not be read by `Eric Larson`_ - -- Fix bug in :func:`mne.inverse_sparse.mixed_norm` and :func:`mne.inverse_sparse.tf_mixed_norm` where ``weights`` was supplied but ``weights_min`` was not, by `Eric Larson`_ - -- Fix bug in :func:`mne.set_eeg_reference` where non-EEG channels could be re-referenced by default if there were no EEG channels present, by `Eric Larson`_ - -- Fix bug in :func:`mne.io.Raw.plot` when using HiDPI displays and the MacOSX backend of matplotlib by `Eric Larson`_ - -- Fix bug in :func:`mne.viz.plot_compare_evokeds` when using Neuromag 122 system by `Eric Larson`_ - -- Fix bug in :func:`mne.Epochs.plot_psd` when some channels had zero/infinite ``psd`` values causing erroneous error messages by `Luke Bloy`_ - -- Fix :func:`mne.Evoked.decimate` not setting ``inst.first`` and ``inst.last`` properly by `Marijn van Vliet`_ - -- Fix :func:`mne.io.read_raw_brainvision` not handling ``Event`` markers created by PyCorder correctly by `Richard Höchenberger`_ - -- Fix support for string-like objects (such as :class:`python:pathlib.Path`) by `Eric Larson`_ - -- Fix :class:`mne.Report` silently suppressing exceptions when used as a context manager by `Marijn van Vliet`_ - -API -~~~ - -- Deprecate ``mne.channels.Montage`` class, ``mne.channels.read_montage`` and ``mne.channels.read_dig_montage`` function by `Joan Massich`_. - -- Deprecate passing ``Montage``, ``str`` as montage parameter in :meth:`mne.io.Raw.set_montage` by `Joan Massich`_. - -- Deprecate ``set_dig`` parameter in :meth:`mne.io.Raw.set_montage` and ``update_ch_names`` in ``mne.io.RawEEGLAB.set_montage`` when using :class:`mne.channels.DigMontage` as by `Joan Massich`_. - -- Now :meth:`mne.io.Raw.set_montage` raises an error when :class:`mne.channels.DigMontage` contains only a subset of the channels in ``raw.info``. It also adds ``raise_if_subset`` parameter to ensure backward compatibility (defaults to False in 0.19, to True in 0.20, and will be removed in 0.21) by `Joan Massich`_. - -- Minimum dependency versions for the following libraries have been bumped up (by `Eric Larson`_): - - - NumPy: 1.12.1 - - SciPy: 0.18.1 - - matplotlib: 2.0.2 - - scikit-learn: 0.18.2 - - pandas 0.19.2 - -- New boolean parameter ``show_scrollbars`` for :meth:`mne.io.Raw.plot`, :meth:`mne.Epochs.plot`, and :meth:`mne.preprocessing.ICA.plot_sources` (and associated functions) that allows hiding the scrollbars and buttons for a "zen mode" data browsing experience. When the plot window has focus, zen mode can be toggled by pressing :kbd:`z`, by `Daniel McCloy`_. - -- Deprecate ``mne.evoked.grand_average`` in favor of :func:`mne.grand_average` (which works on both :class:`~mne.Evoked` and :class:`~mne.time_frequency.AverageTFR`) by `Daniel McCloy`_ - -- Deprecate ``exclude`` parameter in :func:`mne.viz.plot_ica_sources` and :meth:`mne.preprocessing.ICA.plot_sources`, instead always use the ``exclude`` attribute of the ICA object by `Daniel McCloy`_. - -- Deprecate ``montage`` parameter in favor of the ``set_montage`` method in all EEG data readers :func:`mne.io.read_raw_cnt`, :func:`mne.io.read_raw_egi`, :func:`mne.io.read_raw_edf`, :func:`mne.io.read_raw_gdf`, :func:`mne.io.read_raw_nicolet`, :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` by `Alex Gramfort`_ - -- New parameter ``clear`` in :func:`mne.viz.plot_epochs_image` for clearing pre-existing axes before plotting into them by `Daniel McCloy`_ - -- :func:`mne.viz.plot_epochs_image` no longer supports ``group_by='type'`` — combining by channel type is now the default when ``picks`` is a channel type string; to get individual plots for each channel, pass ``picks`` as a list of channel names or indices by `Daniel McCloy`_ - -- New parameter ``combine`` in :func:`mne.viz.plot_compare_evokeds` for specifying method to combine information across channels by `Daniel McCloy`_ - -- FIFF constants related to SmartShield (``*_SMSH_*`` and ``*_SMARTSHIELD``) have been renamed to ``IAS`` for consistency with MEGIN, by `Eric Larson`_ - -- The ``gfp`` parameter of :func:`mne.viz.plot_compare_evokeds` is deprecated; use ``combine='gfp'`` instead by `Daniel McCloy`_ - -- The ``truncate_yaxis='max_ticks'`` parameter of :func:`mne.viz.plot_compare_evokeds` is deprecated; use ``truncate_yaxis='auto'`` instead by `Daniel McCloy`_ - -- The ``truncate_xaxis`` and ``truncate_yaxis`` parameters of :func:`mne.viz.plot_compare_evokeds` now perform one-sided truncation unless both are ``True`` by `Daniel McCloy`_ - -- The ``show_legend`` parameter of :func:`mne.viz.plot_compare_evokeds` is renamed to ``legend`` by `Daniel McCloy`_ - -- :func:`mne.viz.plot_compare_evokeds` always returns a list of figures even when a single figure is generated by `Daniel McCloy`_ - -- Deprecate ``average=True`` and ``spatial_colors=False`` for :func:`mne.Epochs.plot_psd` by `Jevri Hanna`_ - -- :func:`mne.io.read_raw_brainvision` no longer raises an error when there are inconsistencies between ``info['chs']`` and ``montage`` but warns instead by `Joan Massich`_ - -- Add ``update_ch_names`` parameter to ``mne.io.RawEEGLAB.set_montage`` to allow updating the channel names based on the montage by `Joan Massich`_ - -- Reading annotations contained in GDF files with :func:`mne.io.read_raw_gdf` now returns numeric event codes as descriptions (instead of textual descriptions) due to restrictive licensing of the GDF event code table from BioSig by `Clemens Brunner`_ - -- ``channels.find_ch_connectivity`` now returns pre-built neighbor maps for KIT systems when available, by `Christian Brodbeck`_ diff --git a/doc/changes/0.20.inc b/doc/changes/0.20.inc deleted file mode 100644 index a0cf3356b79..00000000000 --- a/doc/changes/0.20.inc +++ /dev/null @@ -1,416 +0,0 @@ -.. NOTE: we are now using links to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Changelog" for new features - - "Bug" for bug fixes - - "API" for backward-incompatible changes - -.. _changes_0_20: - -Version 0.20 (2020-03-27) -------------------------- - -Changelog -~~~~~~~~~ - -- Improved :func:`mne.viz.plot_epochs` to label epoch counts starting from 0, by `Sophie Herbst`_ - -- Add :func:`minimum_norm.apply_inverse_cov` to compute static power by applying inverse solutions to a data covariance matrix by `Denis Engemann`_, `Luke Bloy`_, and `Eric Larson`_ - -- Add :func:`mne.minimum_norm.resolution_metrics` to compute various resolution metrics for inverse solutions, by `Olaf Hauk`_ - -- Add current source density :func:`mne.preprocessing.compute_current_source_density` to compute the surface Laplacian in order to reduce volume conduction in data by `Alex Rockhill`_ - -- Add :func:`mne.chpi.extract_chpi_locs_ctf` to extract cHPI coil locations from CTF data by `Luke Bloy`_ - -- Add :func:`mne.chpi.compute_chpi_amplitudes`, :func:`mne.chpi.compute_chpi_locs`, and :func:`mne.chpi.compute_head_pos` to compute head positions from cHPI coil locations by `Eric Larson`_ and `Luke Bloy`_ - -- Add ``allow_line_only`` option to :func:`mne.chpi.filter_chpi` to allow filtering line frequencies only in files that do not have cHPI information by `Eric Larson`_ - -- Add :func:`mne.io.Raw.set_meas_date` by `Eric Larson`_ - -- Add :meth:`mne.Epochs.as_type` to allow remapping data in MEG channels to virtual magnetometer or gradiometer channels by `Sophie Herbst`_ and `Alex Gramfort`_ - -- Add ``copy`` parameter to :meth:`mne.Epochs.iter_evoked` by `Alex Gramfort`_ - -- Add command :ref:`mne setup_source_space` to quickly set up bilateral hemisphere surface-based source space with subsampling by `Victor Ferat`_. - -- Add command :ref:`mne sys_info` to print system information by `Eric Larson`_ - -- Add function :func:`mne.make_fixed_length_epochs` to segment raw into fixed length epochs by `Mohammad Daneshzand`_ - -- Add support for computing patch information only in surface source space creation with ``add_dist='patch'`` in :func:`mne.setup_source_space` and ``dist_limit=0`` in :func:`mne.add_source_space_distances` by `Eric Larson`_ - -- Add :class:`mne.Label.restrict` to restrict a label to vertices within a source space by `Eric Larson`_ - -- Add support for passing a destination source space ``src_to`` in :func:`mne.compute_source_morph` to ensure morphing for multiple subjects results in consistent STCs, by `Eric Larson`_ - -- Add support for ``smooth='nearest'`` for surfaces in :func:`mne.compute_source_morph` by `Eric Larson`_ - -- Add support for plotting fNIRS channels in :func:`mne.viz.plot_alignment`, :func:`mne.viz.plot_evoked_topo` and :func:`mne.Evoked.animate_topomap` by `Eric Larson`_ and `Robert Luke`_ - -- Add command line tool :ref:`mne anonymize` for anonymizing raw fiff files by `Luke Bloy`_ - -- Add :func:`mne.preprocessing.find_bad_channels_maxwell` for automatic bad channel detection via Maxwell filtering by `Eric Larson`_ - -- Add support to :func:`mne.io.anonymize_info` to allow time offset to be applied to dates by `Luke Bloy`_ - -- Add support for computing resolution matrix to get point spread functions (PSF) and cross-talk functions (CTF) in :func:`mne.minimum_norm.make_inverse_resolution_matrix`, :func:`mne.beamformer.make_lcmv_resolution_matrix`, :func:`mne.minimum_norm.get_cross_talk`, :func:`mne.minimum_norm.get_point_spread` by `Olaf Hauk`_ - -- Add :func:`mne.preprocessing.read_ica_eeglab` to read EEGLAB ICA decompositions by `Christian O'Reilly`_ - -- Add keyboard functionality to interactive colorbar plotting TFRs by `Stefan Repplinger`_ - -- Add the ability to ``return_event_id`` in :func:`mne.read_events` for use with MNE-C produced ``-annot.fif`` files, by `Eric Larson`_ - -- Add option to ``mne.connectivity.spectral_connectivity`` to compute corrected imaginary PLV by `Adonay Nunes`_ - -- Add :func:`mne.SourceEstimate.estimate_snr` to estimate source-space SNR, by `Kaisu Lankinen`_ and `Padma Sundaram`_ - -- Add option to specify the coordinate frame in :func:`mne.channels.read_custom_montage` by `Eric Larson`_ - -- Add option to use ``match_case=False`` to :meth:`mne.io.Raw.set_montage` and related functions by `Eric Larson`_ - -- Add reader for NIRx data in :func:`mne.io.read_raw_nirx` by `Robert Luke`_ - -- Add function to convert NIRS data to optical density :func:`mne.preprocessing.nirs.optical_density` by `Robert Luke`_ - -- Add function to convert NIRS data to haemoglobin concentration :func:`mne.preprocessing.nirs.beer_lambert_law` by `Robert Luke`_ - -- Add ``fig`` argument to :func:`mne.viz.plot_evoked_field` by `Eric Larson`_ - -- Add functions to calculate spatial information of NIRS channels :func:`mne.preprocessing.nirs.source_detector_distances` and :func:`mne.preprocessing.nirs.short_channels` by `Robert Luke`_ - -- Add reader for ``*.dat`` electrode position files :func:`mne.channels.read_dig_dat` by `Christian Brodbeck`_ - -- Improved :func:`mne.viz.plot_events` to always show event counts by `Eric Larson`_ - -- Improved :ref:`limo-dataset` usage and :ref:`example ` for usage of :func:`mne.stats.linear_regression` by `Jose Alanis`_ - -- Add support for ``reduce_rank=True`` for vector beamformers by `Eric Larson`_ - -- Add ``method='sphere'`` support for using Freesurfer spherical inflation in :func:`mne.decimate_surface` to facilitate mesh downsampling for use with :func:`mne.make_bem_model` by `Eric Larson`_ - -- Speed up :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` calculations by vectorizing linear algebra calls by `Dmitrii Altukhov`_ and `Eric Larson`_ - -- Speed up :func:`mne.make_forward_solution` using Numba, by `Eric Larson`_ - -- Speed up :func:`mne.io.read_raw_fif` data reading when the recording is long and there are many data tags repeatedly accessed, by `Eric Larson`_ - -- For KIT systems without built-in layout, :func:`mne.channels.find_layout` now falls back on an automatically generated layout, by `Christian Brodbeck`_ - -- :meth:`mne.Epochs.plot` now takes a ``epochs_colors`` parameter to color specific epoch segments by `Mainak Jas`_ - -- Add command :ref:`mne setup_forward_model` to quickly create a BEM model for a subject by `Victor Ferat`_. - -- Add command :ref:`mne prepare_bem_model` to quickly create a BEM solution using the linear collocation approach by `Victor Ferat`_. - -- Add support for reading ``.bdip`` Xfit binary dipole fit files in :func:`mne.read_dipole` by `Eric Larson`_ - -- Allow returning vector source estimates from sparse inverse solvers through ``pick_ori='vector'`` by `Christian Brodbeck`_ - -- Add NIRS support to :func:`mne.viz.plot_topomap` and :func:`mne.viz.plot_ica_components` by `Robert Luke`_ - -- Add the ability to :func:`mne.channels.equalize_channels` to also re-order the channels and also operate on instances of :class:`mne.Info`, :class:`mne.Forward`, :class:`mne.Covariance` and :class:`mne.time_frequency.CrossSpectralDensity` by `Marijn van Vliet`_ - -- Allow `mne.channels.read_custom_montage` to handle fiducial points for BESA spherical (``.elp``) files by `Richard Höchenberger`_ - -- Add ``rank`` argument to :func:`mne.fit_dipole` by `Eric Larson`_ - -- Add function to convert events to annotations :func:`mne.annotations_from_events` by `Nicolas Barascud`_ - -- Add function to calculate scalp coupling index for fNIRS data :func:`mne.preprocessing.nirs.scalp_coupling_index` by `Robert Luke`_ - -- Add ``item`` argument to :meth:`mne.Epochs.get_data` for faster access to NumPy data arrays compared to :meth:`mne.Epochs.__getitem__` for frequent access on large :class:`mne.Epochs` objects, by `Eric Larson`_ - -- More accurate coordinate system for Easycap montages in :func:`mne.channels.make_standard_montage` by `Christian Brodbeck`_ - -- Add ``border`` argument to :func:`mne.viz.plot_topomap`. ``border`` controls the value of the edge points to which topomap values are extrapolated. ``border='mean'`` sets these points value to the average of their neighbours. By `Mikołaj Magnuski`_ - -- Add function :func:`mne.viz.link_brains` to link time properties of multiple brain objects interactively by `Guillaume Favelier`_ - -- Add function :func:`mne.preprocessing.annotate_movement` to annotate periods with head motion and :func:`mne.preprocessing.compute_average_dev_head_t` to re-estimate the device to head transform with average head position during segments with acceptable head movement. by `Adonay Nunes`_ - -- Make selected channels more distinguishable in :meth:`mne.Epochs.plot_sensors` when using ``kind='select'`` by `Mikołaj Magnuski`_ - -- Allow retrieval of the number of Infomax ICA iterations via the new ``return_n_iter`` keyword argument of :func:`mne.preprocessing.infomax` by `Richard Höchenberger`_ - -- Expose the number of ICA iterations during the fitting procedure via the ``n_iter_`` attribute of :class:`mne.preprocessing.ICA` by `Richard Höchenberger`_ - -- :func:`mne.grand_average` now produces a warning when only a single dataset was passed, instead of raising an error by `Richard Höchenberger`_ - -- Add ``nrows`` and ``ncols`` parameters to :func:`mne.viz.plot_evoked_topomap`, which allows to create multiline topomap plots for Evoked. By `Federico Raimondo`_ - -- Improve repr of :class:`mne.Info` (empty items are not shown anymore) by `Clemens Brunner`_ - -Bug -~~~ - -- Fix the ``reduce_rank`` parameter in :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` to reduce the rank of the leadfield first before then reducing the rank of the denominator of the beamformer formula for the inversion by `Britta Westner`_ and `Eric Larson`_. - -- Fix bug with :func:`mne.beamformer.make_lcmv` with data where rank was incorrectly checked by `Eric Larson`_. - -- Allow :func:`mne.channels.read_dig_hpts` to accept point categories (eg. EEG) to be specified in upper case by `Alex Gramfort`_. - -- Fix a bug in ``mne.MixedSourceEstimate.plot_surface`` that prevented plotting in latest PySurfer by `Christian O'Reilly`_. - -- Fix ``method='eLORETA'`` in :func:`mne.minimum_norm.apply_inverse`, by `Eric Larson`_: - - 1. regularization, which was off by roughly a factor of a square root - 2. units, which are now nAm instead of being arbitrary - 3. explained variance and residuals, which were previously unavailable - 4. ``force_equal`` in the case of loose orientations, which previously - defaulted to ``True`` now defaults to ``False``. - Reapplying orientation priors at each iteration of the algorithm - sufficiently preserves a smooth transition between loose=0. to loose=1. - without compromising localization accuracy in testing. - -- Fix bug in ``method='eLORETA'`` for :func:`mne.minimum_norm.apply_inverse` (and variants) to allow restricting source estimation to a label by `Luke Bloy`_ - -- Fix bug in :func:`mne.compute_covariance` and :func:`mne.compute_raw_covariance` where biased normalization (based on degrees of freedom) was used and ``cov.nfree`` was not set properly by `Eric Larson`_ - -- Fix ``mne.VectorSourceEstimate.normal`` to account for cortical patch statistics using ``use_cps=True`` by `Eric Larson`_ - -- Fix ``pick_ori='normal'`` for :func:`mne.minimum_norm.apply_inverse` when the inverse was computed with ``loose=1.`` and the forward solution was not in surface orientation, by `Eric Larson`_ - -- Fix missing xlabel for :func:`mne.io.Raw.plot_psd` and similar functions when passing a subset of axes from a figure by `Alex Gramfort`_ - -- Fix wrong DataFrame index when ``index=None`` in methods :meth:`mne.io.Raw.to_data_frame`, :meth:`mne.Epochs.to_data_frame`, :meth:`mne.Evoked.to_data_frame`, and :meth:`mne.SourceEstimate.to_data_frame`, by `Daniel McCloy`_. - -- Fix incorrect scaling of cluster temporal extent in :func:`mne.stats.summarize_clusters_stc` by `Daniel McCloy`_. - -- Fix :func:`mne.time_frequency.read_tfrs` to properly read :class:`~mne.Info` transforms by `Eric Larson`_ - -- Fix :func:`mne.viz.plot_sensors` to always plot in head coordinate frame by `Daniel McCloy`_. - -- Fix :func:`mne.io.read_raw_cnt` for version 1 of format in the presence of annotations by `Alex Gramfort`_. - -- Fix :class:`mne.decoding.GeneralizingEstimator` and related classes to support multi-class sklearn scorers such as ``'roc_auc_ovo'`` and ``'roc_auc_ovo_weighted'`` by `Eric Larson`_ - -- Fix :meth:`mne.io.read_raw_ctf` to set measurement date from CTF ds files by `Luke Bloy`_. - -- Fix :meth:`mne.read_epochs_eeglab` that ignored channel locations by `Alex Gramfort`_. - -- Fix :func:`mne.io.read_raw_brainvision` when channel names have spaces by `Sebastian Major`_. - -- Fix :func:`mne.io.read_raw_brainvision` when ``"Core"`` is in the data header by `Eric Larson`_ - -- Fix :meth:`mne.io.Raw.anonymize` to correctly reset ``raw.annotations.orig_time`` by `Luke Bloy`_. - -- Fix :meth:`mne.io.Raw.anonymize` to correctly avoid shifting ``raw.annotations.onset`` relative to ``raw.first_samp`` by `Eric Larson`_ - -- Fix :meth:`mne.io.Raw.set_channel_types` and :meth:`mne.io.Raw.rename_channels` and related methods to return the instance instead of ``None`` by `Eric Larson`_ - -- :meth:`mne.Epochs.iter_evoked` now does not return a copy of info when ``copy=False`` (default parameter) by `Alex Gramfort`_ - -- The attribute :class:`mne.Annotations.orig_time ` is now read-only, and is a :class:`~python:datetime.datetime` object (or None) rather than float, by `Eric Larson`_ - -- Fix bug where MRI distances were not properly initialized in :ref:`mne coreg` by `Eric Larson`_ - -- Fix ``xscale='log'`` in :meth:`mne.io.Raw.plot_psd` and related functions by `Alex Gramfort`_ - -- Unify behavior of ``raw.annotations.append(...)`` when ``raw.info['meas_date']`` is None to make onsets absolute relative to ``first_samp`` as they are when ``raw.info['meas_date']`` is not None; i.e., you might need to do ``raw.annotations.append(old_time + raw.first_time)``, by `Eric Larson`_ - -- The :class:`info['meas_date'] ` entry is now a :class:`~python:datetime.datetime` object (or None) rather than float, by `Eric Larson`_ - -- Fix :meth:`mne.time_frequency.AverageTFR.save` and related functions to deal with :class:`~python:datetime.datetime` objects properly by `Eric Larson`_ - -- Fix date reading before Unix time zero (1970 Jan 1) on Windows by `Alex Rockhill`_. - -- Fix bug running subprocesses (e.g., in :func:`mne.bem.make_watershed_bem`) in Jupyter notebooks, by `Eric Larson`_ - -- Fix :meth:`mne.io.read_raw_artemis123` to add HPI locations to digitization points, by `Luke Bloy`_ - -- Fix :meth:`mne.Epochs.shift_time` and :meth:`mne.Evoked.shift_time` to return the modified :class:`~mne.Epochs` or :class:`~mne.Evoked` instance (instead of ``None``) by `Daniel McCloy`_. - -- Fix :func:`mne.read_annotations` for text files with zero or one annotations, by `Adonay Nunes`_ - -- Fix bug in :class:`~mne.preprocessing.ICA` where requesting extended infomax via ``fit_params={'extended': True}`` was overridden, by `Daniel McCloy`_. - -- Fix bug in :meth:`mne.Epochs.interpolate_bads` where the ``origin`` was not used during MEG or EEG data interpolation by `Eric Larson`_. Old behavior can be achieved using ``origin=(0., 0., 0.)`` for EEG and ``origin=(0., 0., 0.04)`` for MEG, and the new default is ``origin='auto'``, which uses a head-digitization-based fit. - -- Fix bug in :func:`mne.write_evokeds` where ``evoked.nave`` was not saved properly when multiple :class:`~mne.Evoked` instances were written to a single file, by `Eric Larson`_ - -- Fix bug in ``mne.preprocessing.mark_flat`` where acquisition skips were not handled properly, by `Eric Larson`_ - -- Fix bug in :func:`mne.viz.plot_bem` where some sources were not plotted by `Jean-Remi King`_ and `Eric Larson`_ - -- Fix TAL channel parsing (annotations) for EDF-D files by `Clemens Brunner`_ - -- Fix bug with :func:`mne.viz.plot_dipole_locations` when plotting in head coordinates by `Eric Larson`_ - -- Fix handling in cases where no components are found in :func:`mne.preprocessing.corrmap` by `Eric Larson`_ - -- Fix handling of repeated events in :class:`mne.Epochs` by `Fahimeh Mamashli`_ and `Alex Gramfort`_ - -- Fix many bugs with plotting sensors overlaid on a head outline. All plotting is now done in head coordinates and scaled by ``head_radius``, which defaults to 0.095, by `Eric Larson`_ - -- Fix :func:`mne.io.anonymize_info` to allow shifting dates of service and to match anticipated changes in mne-cpp by `Luke Bloy`_ - -- Fix reading of cardinals in .htps files (identifier are int not strings) by `Alex Gramfort`_ - -- Fix IO of TFRs when event_id contain a / in one of the keys by `Alex Gramfort`_ - -- Fix plotting of TFRs with linear y-scale (misaligned frequency bins), by `Stefan Repplinger`_ and `Eric Larson`_ - -- Fix ``info['sfreq']`` when decimating in :func:`mne.time_frequency.tfr_multitaper` and :func:`mne.time_frequency.tfr_morlet` and make sure an error is raised when exceed Nyquist frequency by `Adonay Nunes`_ - -- Fix bug in EDF(+) loading, filter values ignored by insufficient regex, by `Demetres Kostas`_ - -- Fix missing scaling of tolerance parameter in :func:`mne.inverse_sparse.tf_mixed_norm` and :func:`mne.inverse_sparse.mixed_norm`, by `Mathurin Massias`_ - -- Fix the automatic scaling of the glyphs in :func:`mne.viz.plot_vector_source_estimates` by using 10% of the brain width, by `Guillaume Favelier`_ - -- Fix handling of missing values (NaNs) in ``mne.time_frequency.psd_welch`` by `Clemens Brunner`_ - -- Fix :class:`mne.decoding.LinearModel` to support the refitted estimator of ``GridSearchCV`` in ``sklearn`` by `Chun-Hui Li`_ - -- Fix :func:`mne.viz.plot_topomap` when there are bad channels in the provided :class:`mne.Info` instance by `Eric Larson`_ - -- Fix check for subject definition existing in source space in :func:`mne.compute_source_morph` and related functions by `Eric Larson`_ - -- Fix creation of new figures in :meth:`mne.Epochs.plot_image` and :func:`mne.viz.plot_epochs_image` by `Eric Larson`_ - -- Fix bug in :class:`~mne.preprocessing.Xdawn` where filters where selected along the incorrect axis, by `Henrich Kolkhorst`_ - -- Fix bug in :func:`mne.bem.make_watershed_bem` where some surfaces were saved incorrectly in the working directory by `Yu-Han Luo`_ - -- Fix support for multiple TAL (annotations) channels in BDF reader by `Clemens Brunner`_ - -- Fix missing ``brainmask`` file type in :func:`mne.bem.make_watershed_bem` by changing its default value to ``ws.mgz`` by `Fu-Te Wong`_ - -- Fix bug in :func:`mne.channels.make_standard_montage` which would return ``easycap-M1`` even when requesting ``easycap-M10`` by `Christian Brodbeck`_ - -- Fix the way planar gradiometers are combined in :func:`mne.viz.plot_tfr_topomap` and :meth:`mne.Epochs.plot_psd_topomap` by `Geoff Brookshire`_ - -- Improve error message when trying to load FieldTrip data from a cell array by `Thomas Hartmann`_ - -- Fix bug in :func:`mne.read_evoked_fieldtrip` causing it to crash when channels were present in the provided Info object but were not present in the data by `Thomas Hartmann`_ - -- Fix placement of extrapolation points in :meth:`mne.Evoked.plot_topomap` and related functions when exactly three channels were used by `Mikołaj Magnuski`_. - -- Fix bug in reading annotations in :func:`read_annotations`, which would not accept ";" character by `Adam Li`_ - -- Fix bug in :func:`mne.Report.parse_folder`, which would not recognize ``*meg.fif`` files by `Dmitrii Altukhov`_. - -- Include ``fit_params`` when saving an :class:`~mne.preprocessing.ICA` instance to disk by `Richard Höchenberger`_ - -- Update old url link in :func:`mne.datasets.eegbci.load_data` to ``EEGMI_URL = 'https://physionet.org/files/eegmmidb/1.0.0/'`` by `Ramiro Gatti`_ - -- Ensure corrected p-values calculated by :func:`mne.stats.bonferroni_correction` never exceed the value of 1.0 by `Richard Höchenberger`_ - -API -~~~ - -- ``mne.io.pick.get_channel_types`` is now called ``mne.io.pick.get_channel_type_constants`` to better reflect its return values, by `Daniel McCloy`_. - -- :meth:`mne.Evoked.as_type` now returns an instance of :class:`mne.EvokedArray` by `Sophie Herbst`_ and `Alex Gramfort`_ - -- Bumped minimum requirements to fall 2017 versions by `Eric Larson`_: - - - NumPy 1.13.3 - - SciPy 1.0.0 - - matplotlib 2.1 - - scikit-learn 0.19.1 (optional requirement) - - pandas 0.21 (optional requirement) - -- :meth:`mne.Epochs.plot` now accepts an ``event_id`` parameter (useful in tandem with ``event_colors`` for specifying event colors by name) by `Daniel McCloy`_. - -- Passing ``ch_types=None`` has been deprecated in favor of explicitly passing ``ch_types='misc'`` in :func:`mne.create_info` by `Eric Larson`_ - -- New time conversion options for methods :meth:`mne.io.Raw.to_data_frame`, :meth:`mne.Epochs.to_data_frame`, :meth:`mne.Evoked.to_data_frame`, and :meth:`mne.SourceEstimate.to_data_frame`, by `Daniel McCloy`_. - -- :meth:`mne.Epochs.shift_time` and :meth:`mne.Evoked.shift_time` now allow shifting times by arbitrary amounts (previously only by integer multiples of the sampling period), by `Daniel McCloy`_ and `Eric Larson`_. - -- The ``head_pos`` argument of :func:`mne.Evoked.plot_topomap` and related functions has been deprecated in favor of ``head_radius``, by `Eric Larson`_. - -- The ``layout`` argument to topomap-related functions such as :meth:`mne.Evoked.plot_topomap` and :func:`mne.viz.plot_tfr_topomap` has been deprecated in favor of channel-position based flattening based on the ``info`` and ``sphere`` argument, by `Eric Larson`_. - -- The APIs of :meth:`mne.io.Raw.plot_projs_topomap`, :meth:`mne.Epochs.plot_projs_topomap` and :meth:`mne.Evoked.plot_projs_topomap` are now more similar to :func:`mne.viz.plot_projs_topomap` by `Daniel McCloy`_. - -- The function :func:`mne.setup_volume_source_space` has a ``sphere_units`` argument that defaults to ``'mm'`` in 0.20 but will change to ``'m'`` in 0.21, set it to avoid a warning by `Eric Larson`_. - -- :func:`mne.viz.plot_projs_topomap` and the related methods :meth:`mne.io.Raw.plot_projs_topomap`, :meth:`mne.Epochs.plot_projs_topomap` and :meth:`mne.Evoked.plot_projs_topomap` now accept parameter ``vlim`` to control the colormap, with keyword ``'joint'`` computing the colormap jointly across all projectors of a given channel type, by `Daniel McCloy`_. - -- New methods :meth:`mne.io.Raw.get_channel_types`, :meth:`mne.Epochs.get_channel_types`, :meth:`mne.Evoked.get_channel_types` by `Daniel McCloy`_. - -- Deprecate ``mne.minimum_norm.point_spread_function`` and ``mne.minimum_norm.cross_talk_function`` by `Alex Gramfort`_ - -- Deprecate ``montage`` parameter of :func:`mne.create_info` in favor of :meth:`mne.io.Raw.set_montage`, :meth:`mne.Info.set_montage`, and related functions by `Eric Larson`_ - -- :func:`mne.channels.equalize_channels` no longer operates in-place by default, but instead makes copies of the objects when necessary (see ``copy`` parameter) by `Marijn van Vliet`_ - -- :func:`mne.channels.equalize_channels` now uses the first object in the list as a template for channel ordering by `Marijn van Vliet`_ - -- :func:`mne.channels.equalize_channels` now also re-orders the channels to match, in addition to dropping channels that are not shared by all objects by `Marijn van Vliet`_ - -- :meth:`mne.io.Raw.pick_channels`, :meth:`mne.Epochs.pick_channels` and :meth:`mne.Evoked.pick_channels` now have an ``ordered`` parameter to enforce the ordering of the picked channels by `Marijn van Vliet`_ - -- New methods :meth:`mne.Forward.pick_channels`, :meth:`mne.Covariance.pick_channels`, ``mne.Info.pick_channels``, :meth:`mne.time_frequency.CrossSpectralDensity.pick_channels` by `Marijn van Vliet`_ - -- New attributes ``mne.Forward.ch_names`` and ``mne.Info.ch_names`` by `Marijn van Vliet`_ - -- In :class:`~mne.preprocessing.Xdawn`, the components are stored in the rows of attributes ``filters_`` and ``patterns_`` to be consistent with :class:`~mne.decoding.CSP` and :class:`~mne.preprocessing.ICA` by `Henrich Kolkhorst`_ - -- Drop ``unit`` keyword argument from :func:`mne.channels.read_custom_montage`, as it was unused by `Richard Höchenberger`_ - -- Deprecate ``mne.preprocessing.run_ica``, use ``mne.preprocessing.ICA.detect_artifacts`` instead by `Richard Höchenberger`_ - -Authors -~~~~~~~ - -People who contributed to this release (in alphabetical order): - -* Adam Li -* Adonay Nunes -* Alex Rockhill -* Alexandre Gramfort -* Ariel Rokem -* Britta Westner -* Carlos de la Torre -* Christian Brodbeck -* Christian O'Reilly -* Chun-Hui Li -* Clemens Brunner -* Daniel McCloy -* Demetres Kostas -* Denis A. Engemann -* Dmitrii Altukhov -* Dominik Welke -* Eric Larson -* Ezequiel Mikulan -* Fahimeh Mamashli -* Fede Raimondo -* Fu-Te Wong -* Geoff Brookshire -* Guillaume Favelier -* Henrich Kolkhorst -* Joan Massich -* José C. García Alanis -* Jussi Nurminen -* Kaisu Lankinen -* Luke Bloy -* Mainak Jas -* Marijn van Vliet -* Mathurin Massias -* Mikolaj Magnuski -* Mohamed Sherif -* Mohammad Daneshzand -* Nataliia Kozhemiako -* Nicolas Barascud -* Nicolas Legrand -* Olaf Hauk -* Padma Sundaram -* Ramiro Gatti -* Richard Höchenberger -* Robert Luke -* Robert Oostenveld -* Sebastian Major -* Sophie Herbst -* Stefan Appelhoff -* Stefan Repplinger -* Thomas Hartmann -* Victor Férat -* Yu-Han Luo diff --git a/doc/changes/0.21.inc b/doc/changes/0.21.inc deleted file mode 100644 index 47298cdd969..00000000000 --- a/doc/changes/0.21.inc +++ /dev/null @@ -1,495 +0,0 @@ -.. NOTE: we are now using links to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. _changes_0_21: - -Version 0.21 (2020-09-21) -------------------------- - -.. |Rahul Nadkarni| replace:: **Rahul Nadkarni** -.. |Lau Møller Andersen| replace:: **Lau Møller Andersen** -.. |Kyle Mathewson| replace:: **Kyle Mathewson** -.. |Jan Sedivy| replace:: **Jan Sedivy** -.. |Johann Benerradi| replace:: **Johann Benerradi** -.. |Martin Schulz| replace:: **Martin Schulz** -.. |Jeroen Van Der Donckt| replace:: **Jeroen Van Der Donckt** -.. |Simeon Wong| replace:: **Simeon Wong** -.. |Svea Marie Meyer| replace:: **Svea Marie Meyer** -.. |Lx37| replace:: **Lx37** -.. |Liberty Hamilton| replace:: **Liberty Hamilton** -.. |Steven Bierer| replace:: **Steven Bierer** - -Enhancements -~~~~~~~~~~~~ - -- Modified :meth:`mne.Epochs.pick_types` to remove dropped channel types from ``reject`` and ``flat`` dictionaries **by new contributor** |Rahul Nadkarni|_ - -- Add function :func:`mne.preprocessing.regress_artifact` to remove artifacts using linear regression **by new contributor** |Kyle Mathewson|_ and `Eric Larson`_ - -- Add ``sources`` and ``detectors`` options for fNIRS use of :meth:`mne.viz.plot_alignment` allowing plotting of optode locations in addition to channel midpoint ``channels`` and ``path`` between fNIRS optodes **by new contributor** |Kyle Mathewson|_ - -- BrainVision data format files are now parsed for EEG impedance values in :func:`mne.io.read_raw_brainvision` and provided as a ``.impedances`` attribute of ``raw`` **by new contributor** |Jan Sedivy|_ and `Stefan Appelhoff`_ - -- Add function :func:`mne.channels.combine_channels` to combine channels from Raw, Epochs, or Evoked according to ROIs (combinations including mean, median, or standard deviation; can also use a callable) **by new contributor** |Johann Benerradi|_ - -- Improved documentation building instructions and execution on Windows **by new contributor** |Martin Schulz|_, `kalenkovich`_, and `Eric Larson`_ - -- Speed up reading of annotations in EDF+ files **by new contributor** |Jeroen Van Der Donckt|_ - -- Add head to mri and mri to voxel space transform details to :ref:`tut-source-alignment` tutorial, by `Alex Rockhill`_ - -- Improve memory efficiency of :func:`mne.concatenate_epochs` by `Eric Larson`_ - -- Allow picking channels in raw instances (e.g., :meth:`mne.io.Raw.pick_types`) without preloading data, by `Eric Larson`_ - -- :meth:`mne.preprocessing.ICA.plot_sources` now plots annotation markers similar to :meth:`mne.io.Raw.plot` by `Luke Bloy`_ - -- Add support for scalar multiplication and division of :class:`mne.time_frequency.AverageTFR` instances by `Luke Bloy`_ - -- Add support for signals in mV for :func:`mne.io.read_raw_brainvision` by `Clemens Brunner`_ - -- :meth:`mne.Epochs.plot_psd_topomap` and :func:`mne.viz.plot_epochs_psd_topomap` now allow joint colorbar limits across subplots, by `Daniel McCloy`_. - -- Add support for using loose-orientation surface priors with free-orientation volume priors in mixed source space inverses by `Eric Larson`_ - -- Add :class:`mne.MixedVectorSourceEstimate` for vector source estimates for mixed source spaces, by `Eric Larson`_ - -- Add mixed and volumetric source estimate plotting using volumetric ray-casting to :meth:`mne.MixedSourceEstimate.plot` and :meth:`mne.VolSourceEstimate.plot_3d` by `Eric Larson`_ - -- Add :meth:`mne.MixedSourceEstimate.surface` and :meth:`mne.MixedSourceEstimate.volume` methods to allow surface and volume extraction by `Eric Larson`_ - -- Add :meth:`mne.VectorSourceEstimate.project` to project vector source estimates onto the direction of maximum source power by `Eric Larson`_ - -- Add support to :func:`mne.extract_label_time_course` for vector-valued and volumetric source estimates by `Eric Larson`_ - -- Add method :meth:`mne.VolSourceEstimate.in_label` by `Eric Larson`_ - -- Add support for mixed source spaces to :func:`mne.compute_source_morph` by `Eric Larson`_ - -- Add support for volume and mixed source spaces to :func:`mne.stats.summarize_clusters_stc` by `Eric Larson`_ - -- Add support for omitting the SDR step in volumetric morphing by passing ``n_iter_sdr=()`` to `mne.compute_source_morph` by `Eric Larson`_ - -- Add ``single_volume`` argument to :func:`mne.setup_volume_source_space` to facilitate creating source spaces with many volumes (e.g., all subvolumes of ``aseg.mgz``) by `Eric Larson`_ - -- Add support for passing a string argument to ``bg_img`` in `mne.viz.plot_volume_source_estimates` by `Eric Larson`_ - -- Add support for providing the destination surface source space in the ``src_to`` argument of :func:`mne.compute_source_morph` by `Eric Larson`_ - -- Add explained variance logging to :func:`mne.inverse_sparse.mixed_norm`, :func:`mne.inverse_sparse.gamma_map`, and :func:`mne.beamformer.rap_music` by `Eric Larson`_ - -- Add ``tol_kind`` option to :func:`mne.compute_rank` by `Eric Larson`_ - -- Allow resampling raw data with :func:`mne.io.Raw.resample` without preloading data, by `Eric Larson`_ - -- Allow using ``pick_ori='vector'`` with a fixed-orientation inverse to facilitate visualization with :func:`mne.viz.plot_vector_source_estimates` by `Eric Larson`_ - -- :func:`mne.viz.plot_dipole_locations` and :meth:`mne.Dipole.plot_locations` gained a ``title`` argument to specify a custom figure title in ``orthoview`` mode by `Richard Höchenberger`_ - -- Add orientations projected onto the MRI slices in :func:`mne.viz.plot_dipole_locations` in ``'orthoview'`` mode by `Eric Larson`_ - -- Add temporal derivative distribution repair (TDDR) :func:`mne.preprocessing.nirs.temporal_derivative_distribution_repair` with shortened alias ``mne.preprocessing.nirs.tddr`` by `Robert Luke`_ - -- Add :func:`mne.read_freesurfer_lut` to make it easier to work with volume atlases by `Eric Larson`_ - -- Add support for overlap-add processing when ``method='spectrum_fit'`` in :func:`mne.io.Raw.notch_filter` by `Eric Larson`_ - -- Add functionality to interpolate bad NIRS channels by `Robert Luke`_ - -- Add ability to interpolate EEG channels using minimum-norm projection in :meth:`mne.io.Raw.interpolate_bads` and related functions with ``method=dict(eeg='MNE')`` by `Eric Larson`_ - -- Added ability of :func:`mne.io.read_raw_nirx` to open data by passing path to header file `Robert Luke`_ - -- Add :meth:`mne.channels.DigMontage.rename_channels` to allow renaming montage channels by `Eric Larson`_ - -- Document :meth:`mne.channels.compute_native_head_t` to allow computing the native-to-head transformation that will be applied when doing :meth:`raw.set_montage ` and related functions by `Eric Larson`_ - -- Add support to in :meth:`mne.io.Raw.plot` for passing ``clipping`` as a float to clip to a proportion of the dedicated channel range by `Eric Larson`_ - -- Add function :func:`mne.preprocessing.annotate_muscle_zscore` to annotate periods with muscle artifacts. by `Adonay Nunes`_ - -- Add :func:`mne.stats.ttest_ind_no_p` to mirror :func:`mne.stats.ttest_1samp_no_p` with hat correction by `Eric Larson`_ - -- Add `mne.stats.combine_adjacency` to combine adjacency matrices for independent data dimensions to enable things like spatio-temporal-time-frequency clustering in `mne.stats.permutation_cluster_test` by `Eric Larson`_ - -- Speed up raw data reading without preload in :func:`mne.io.read_raw_nirx` by `Eric Larson`_ - -- Speed up :meth:`mne.Epochs.copy` and :meth:`mne.Epochs.__getitem__` by avoiding copying immutable attributes by `Eric Larson`_ - -- Speed up and reduce memory usage of :meth:`mne.SourceEstimate.plot` and related functions/methods when ``show_traces=True`` by `Eric Larson`_ - -- Reduce memory usage of `~mne.io.Raw.plot_psd`, ``mne.time_frequency.psd_welch``, and `~mne.time_frequency.psd_array_welch` for long segments of data by `Eric Larson`_ - -- Support for saving movies of source time courses (STCs) with ``brain.save_movie`` method and from graphical user interface by `Guillaume Favelier`_ - -- Add ``mri`` and ``show_orientation`` arguments to :func:`mne.viz.plot_bem` by `Eric Larson`_ - -- Add :func:`mne.viz.centers_to_edges` to help when using :meth:`matplotlib.axes.Axes.pcolormesh` with flat shading by `Eric Larson`_ - -- Add ``on_missing='raise'`` to :meth:`mne.io.Raw.set_montage` and related functions to allow ignoring of missing electrode coordinates by `Adam Li`_ - -- Add ``on_missing='raise'`` to :func:`mne.viz.plot_events` to allow ignoring missing events when passing ``event_id`` by `Eric Larson`_ - -- Add REST EEG infinity reference scheme to :meth:`mne.io.Raw.set_eeg_reference` and related functions by `Eric Larson`_ - -- Add better sanity checking of ``max_pca_components`` and ``n_components`` to provide more informative error messages for :class:`mne.preprocessing.ICA` by `Eric Larson`_ - -- Add ``plot`` option to :meth:`mne.viz.plot_filter` allowing selection of which filter properties are plotted and added option for user to supply ``axes`` by `Robert Luke`_ - -- Add estimation method legend to :func:`mne.viz.plot_snr_estimate` by `Eric Larson`_ - -- Add support to `mne.SourceSpaces.export_volume` for ``mri_resolution='sparse'`` to color only the nearest-neighbor voxels instead of entire regions by `Eric Larson`_ - -- Add ``axes`` argument to :func:`mne.viz.plot_evoked_white`, :meth:`mne.Evoked.plot_white`, and :func:`mne.viz.plot_snr_estimate` by `Eric Larson`_ - -- Change default to ``surfaces='auto'`` from ``surfaces='head'`` to allow :func:`mne.viz.plot_alignment` to work when just passing a :class:`mne.Info` as ``plot_alignment(info)`` by `Eric Larson`_ - -- Add ECoG misc EDF dataset to the :ref:`tut-working-with-ecog` tutorial to show snapshots of time-frequency activity by `Adam Li`_ - -- Add :func:`mne.viz.set_3d_options` and ``MNE_3D_OPTION_ANTIALIAS`` environment variable to control full-scene antialiasing (FXAA) in 3D functions like :ref:`mne coreg`, :func:`mne.viz.plot_alignment`, and :func:`mne.viz.plot_source_estimates`; this can be useful on systems where FXAA does not work well, such as some with MESA software rendering, by `Eric Larson`_ - -- Add config variable ``MNE_TQDM`` to support explicitly setting the TQDM backend to use, for example ``tqdm`` or ``tqdm.auto`` (default) by `Eric Larson`_ - -- Add better support for reading corrupted FIF files in :func:`mne.io.read_raw_fif` by `Eric Larson`_ - -- BIDS conformity: When saving FIF files to disk and the files are split into parts, the ``split_naming='bids'`` parameter now uses a "_split-%d" naming instead of the previous "_part-%d", by `Stefan Appelhoff`_ - -- Add support for whitening and multiple channel types in :func:`mne.beamformer.make_dics` by `Marijn van Vliet`_ - -- Add better error message when trying to save incompatible `~mne.Evoked` objects to the same file by `Eric Larson`_ - -- Add support for loading complex numbers from mat files by `Thomas Hartmann`_ - -- Add generic reader function :func:`mne.io.read_raw` that loads files based on their extensions (it wraps the underlying specific ``read_raw_xxx`` functions) by `Clemens Brunner`_ - -- Add automatic T3 magnetometer detection and application of :meth:`mne.io.Raw.fix_mag_coil_types` to :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ - -- Add extended SSS (eSSS) support to :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ - -- Add ``'auto'`` option to :meth:`mne.preprocessing.ICA.find_bads_ecg` to automatically determine the threshold for CTPS method by `Yu-Han Luo`_ - -- Add a ``notebook`` 3d backend for visualization in jupyter notebook with :func:`mne.viz.set_3d_backend` by `Guillaume Favelier`_ - -- Add support for reading and writing surfaces in Wavefront .obj format to the :func:`mne.read_surface` and :func:`mne.write_surface` by `Marijn van Vliet`_ - -- Add tutorial on how to manually fix BEM meshes in Blender by `Marijn van Vliet`_ and `Ezequiel Mikulan`_ - -- :func:`mne.write_evokeds` will now accept :class:`mne.Evoked` objects with differing channel orders in ``info['bads']``, which would previously raise an exception by `Richard Höchenberger`_ - -- Add support for reading channel names (e.g., for Ricoh data) to :func:`mne.io.read_raw_kit` by `Alex Gramfort`_ and `Eric Larson`_ - -- The ``reject_tmin`` and ``reject_tmax`` parameters of :class:`mne.Epochs` are now taken into account when using the ``reject_by_annotation`` parameter by `Stefan Appelhoff`_ - -- :func:`mne.preprocessing.find_bad_channels_maxwell` now automatically applies a low-pass filter before running bad channel detection. This can be disabled, restoring previous behavior by `Richard Höchenberger`_ - -- Add ``proj='reconstruct'`` to :meth:`mne.Evoked.plot` and related functions to apply projectors and then undo the signal bias using field mapping by `Eric Larson`_ - -- Add writing BEM surfaces and solutions in H5 format in :func:`mne.write_bem_surfaces` and :func:`mne.write_bem_solution` by `Eric Larson`_ - -- When picking a subset of channels, or when dropping channels from `~mne.io.Raw`, `~mne.Epochs`, or `~mne.Evoked`, projectors that can only be applied to the removed channels will now be dropped automatically by `Richard Höchenberger`_ - -- :class:`mne.Report` now can add topomaps of SSP projectors to the generated report. This behavior can be toggled via the new ``projs`` argument by `Richard Höchenberger`_ - -- When passing a list of `~mne.Evoked` objects to `~mne.viz.plot_compare_evokeds`, each evoked's ``.comment`` attribute will be used to label the trace. If ``.comment`` is empty, a 1-based index is assigned as the label by `Richard Höchenberger`_ - -- Speed up :func:`mne.stats.summarize_clusters_stc` using Numba by `Yu-Han Luo`_ - -- Add ``reject_by_annotation=True`` to :func:`mne.make_fixed_length_epochs` and :meth:`mne.preprocessing.ICA.plot_properties` to reject bad data segments based on annotation by `Yu-Han Luo`_ - -- `~mne.Report.parse_folder` now accepts a path-like folder name (it used to work with strings only) by `Alex Gramfort`_ - -- Add ``component_order`` parameter to :class:`mne.decoding.CSP` which allows switching between ``mutual_info`` (default) and ``alternate`` (a simpler and frequently used option) by `Martin Billinger`_ and `Clemens Brunner`_ - -- Add memory size information to the ``repr`` of :class:`mne.SourceSpaces` and :class:`mne.SourceEstimate` and related classes by `Eric Larson`_ - -- Add reader for Persyst (.lay + .dat format) data in :func:`mne.io.read_raw_persyst` by `Adam Li`_ - -- Use PyVista as the default backend for 3D visualization instead of Mayavi by `Guillaume Favelier`_ - -- Add reader for SNIRF NIRS data in :func:`mne.io.read_raw_snirf` by `Robert Luke`_ - -- `~mne.Evoked` has gained ``tmin`` and ``tmax`` attributes for more consistency with `~mne.Epochs` by `Richard Höchenberger`_ - -Bugs -~~~~ - -- Fix bug for writing and reading complex evoked data modifying :func:`mne.write_evokeds` and :func:`mne.read_evokeds` **by new contributor** |Lau Møller Andersen|_ - -- Fix bug by adding error message when trying to save complex stc data in a non.-h5 format :meth:`mne.VolSourceEstimate.save` **by new contributor** |Lau Møller Andersen|_ - -- Fix bug with logging in :meth:`mne.io.Raw.set_eeg_reference` and related functions **by new contributor** |Simeon Wong|_ - -- Fix bug with :func:`mne.io.read_raw_gdf` where birthdays were not parsed properly, leading to an error **by new contributor** |Svea Marie Meyer|_ - -- Fix bug with :func:`mne.io.read_raw_edf` where recording ID was not read properly for non-ASCII characters **by new contributor** |Lx37|_ - -- Fix bug in :func:`mne.set_eeg_reference` and related functions to set ``info['custom_ref_applied']`` to ``True`` for 'ecog' and 'seeg' channels in addition to 'eeg' **by new contributor** |Liberty Hamilton|_ - -- Fix bug with :func:`mne.chpi.compute_chpi_amplitudes` and :func:`mne.chpi.filter_chpi` to optimize time window length **by new contributor** |Steven Bierer|_ - -- Fix bug with :func:`mne.preprocessing.ICA.find_bads_eog` when more than one EOG components are present by `Christian O'Reilly`_ - -- Fix bug with non-preloaded data when using ``raw.apply_proj().load_data().get_data()`` where projectors were not applied by `Eric Larson`_ - -- Fix bug to permit :meth:`stc.project('nn', src) ` to be applied after ``stc`` was restricted to an :class:`mne.Label` by `Luke Bloy`_ - -- Fix bug with :func:`mne.io.Raw.set_meas_date` to support setting ``meas_date`` to ``None``, by `Luke Bloy`_ - -- Fix bug with :func:`mne.setup_volume_source_space` when ``volume_label`` was supplied where voxels slightly (in a worst case, about 37% times ``pos`` in distance) outside the voxel-grid-based bounds of regions were errantly included, by `Eric Larson`_ - -- Fix bug with :ref:`mne coreg` where reverse scalings were applied to ``src[0]['src_mri_t']`` for volume source spaces, so morphing and plotting did not work correctly by `Eric Larson`_ - -- Fix bug with :func:`mne.io.read_raw_ctf` when reference magnetometers have the compensation grade marked by `Eric Larson`_ - -- Fix bug with `mne.SourceSpaces.export_volume` with ``use_lut=False`` where no values were written by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.annotate_movement` where bad data segments, specified in ``raw.annotations``, would be handled incorrectly by `Luke Bloy`_ - -- Fix bug with :func:`mne.compute_source_morph` when more than one volume source space was present (e.g., when using labels) where only the first label would be interpolated when ``mri_resolution=True`` by `Eric Larson`_ - -- Fix bug with :func:`mne.compute_source_morph` when morphing to a volume source space when ``src_to`` is used and the destination subject is not ``fsaverage`` by `Eric Larson`_ - -- Fix bug with :func:`mne.compute_source_morph` where outermost voxels in the destination source space could be errantly omitted by `Eric Larson`_ - -- Fix bug with :func:`mne.compute_source_morph` where complex data was cast to real when doing a volumetric morph by `Eric Larson`_ - -- Fix bug with :func:`mne.minimum_norm.compute_source_psd_epochs` and :func:`mne.minimum_norm.source_band_induced_power` raised errors when ``method='eLORETA'`` by `Eric Larson`_ - -- Fix bug with :func:`mne.minimum_norm.apply_inverse` where the explained variance did not work for complex data by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.compute_current_source_density` where values were not properly computed; maps should now be more focal, by `Alex Rockhill`_ and `Eric Larson`_ - -- Fix bug with :func:`mne.combine_evoked` where equal-weighted averages were wrongly computed as equal-weighted sums, by `Daniel McCloy`_ - -- Fix bug with :meth:`mne.channels.DigMontage.plot` when reference channels with no defined position are present, by `Eric Larson`_ - -- Fix bug with :func:`mne.channels.read_custom_montage` where ``"headshape"`` points in SFP format were not read properly, by `Eric Larson`_ - -- Fix bug with setting HTML classes when using ``mne.Report.add_bem_to_section`` by `Eric Larson`_ - -- Fix bug with convex-hull based MEG helmet creation where the face area could be covered up, by `Eric Larson`_ - -- Fix bug with :func:`mne.inverse_sparse.mixed_norm` and :func:`mne.inverse_sparse.gamma_map` with ``return_as_dipoles=True`` and :func:`mne.beamformer.rap_music` where ``dip.gof`` was the same for all dipoles. GOF values / explained variance across all dipoles now gets split among the dipoles using SVD of the lead fields. To get the total GOF, ``sum(dip.gof for dip in dipoles)`` can be used, by `Eric Larson`_ - -- Fix bug with :func:`mne.inverse_sparse.gamma_map` where output data was not assigned properly when using a free-orientation forward and ``xyz_same_gamma=False`` by `Eric Larson`_ - -- Fix to enable interactive plotting with no colorbar with :func:`mne.viz.plot_evoked_topomap` by `Daniel McCloy`_ - -- Fix plotting with :func:`mne.viz.plot_evoked_topomap` to pre-existing axes by `Daniel McCloy`_ - -- Fix bug with :func:`mne.viz.plot_vector_source_estimates` using the PyVista backend with ``time_viewer=True`` when updating the arrow colormaps by `Eric Larson`_ - -- Fix bug with :func:`mne.viz.plot_vector_source_estimates` where ``clim='auto'`` and ``clim=dict(..., kind='percent')`` did not take into account the magnitude of the activation, by `Eric Larson`_ - -- The default plotting mode for :func:`mne.io.Raw.plot` and :ref:`mne browse_raw` has been changed to ``clipping=3.`` to facilitate data analysis with large deflections, by `Eric Larson`_ - -- PSD plots will now show non-data channels (e.g., ``misc``) if those channels are explicitly passed to ``picks``, by `Daniel McCloy`_. - -- Fix bug with :func:`mne.time_frequency.read_tfrs` where ``info['meas_date']`` was not parsed correctly, by `Eric Larson`_ - -- Fix bug with :func:`mne.time_frequency.tfr_array_stockwell` where inputs were not properly validated by `Eric Larson`_ - -- Fix handling of NaN when using TFCE in clustering functions such as :func:`mne.stats.spatio_temporal_cluster_1samp_test` by `Eric Larson`_ - -- Fix handling of signs when using TFCE by `Eric Larson`_ - -- The :class:`mne.MixedSourceEstimate` class has been clarified to contain two cortical surface source spaces, plus at least one other source space. Creating source estimates in other orderings is not supported, by `Eric Larson`_ - -- Fix bug where :class:`VolSourceEstimate.vertices ` was an instance of :class:`~numpy.ndarray` instead of :class:`python:list` of one :class:`~numpy.ndarray`, by `Eric Larson`_ - -- Fix default to be ``foreground=None`` in :func:`mne.viz.plot_source_estimates` to use white or black text based on the background color by `Eric Larson`_ - -- Fix bug with writing EGI and CTF `mne.Info` to H5 format, e.g., with `mne.time_frequency.AverageTFR.save` by `Eric Larson`_ - -- Fix bug with topomap outlines in :meth:`mne.time_frequency.AverageTFR.plot_joint` and :meth:`mne.time_frequency.AverageTFR.plot_topo` by `Eric Larson`_ - -- Fix bug with :func:`mne.io.Raw.plot` where toggling all projectors did not actually take effect by `Eric Larson`_ - -- Fix bug with :func:`mne.write_proj` where the number of columns was incorrectly written by `Eric Larson`_ - -- Fix bug with :func:`mne.read_epochs` when loading data in complex format with ``preload=False`` by `Eric Larson`_ - -- Fix bug with :meth:`mne.Epochs.save` where the file splitting calculations did not account for the sizes of non-data writes by `Eric Larson`_ - -- Fix bug with :class:`mne.Epochs` when metadata was not subselected properly when ``event_repeated='drop'`` by `Eric Larson`_ - -- Fix bug with :class:`mne.Epochs` where ``epochs.drop_log`` was a list of list of str rather than an immutable tuple of tuple of str (not meant to be changed by the user) by `Eric Larson`_ - -- Fix bug with :class:`mne.Report` where the BEM section could not be toggled by `Eric Larson`_ - -- Fix bug when using :meth:`mne.Epochs.crop` to exclude the baseline period would break :func:`mne.Epochs.save` / :func:`mne.read_epochs` round-trip by `Eric Larson`_ - -- Fix bug with `mne.Epochs.subtract_evoked` where using decimated epochs would lead to an error by `Eric Larson`_ - -- Fix bug with :func:`mne.viz.plot_bem` and :class:`mne.Report` when plotting BEM contours when MRIs are not in standard FreeSurfer orientation by `Eric Larson`_ - -- Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where it would warn even when an explicit ``rank`` was used by `Eric Larson`_ - -- Fix bugs with :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` where: - - - Noise normalization factors ``weight_norm='unit-noise-gain'`` and ``weight_norm='nai'`` were computed incorrectly - - ``pick_ori='max-power'`` computed the max-power orientation incorrectly - - ``pick_ori='normal'`` did not compute power or noise normalization factors correctly - - :func:`mne.beamformer.apply_lcmv_cov` did not apply whitening and projections properly - -- Fix :ref:`mne setup_forward_model` to have it actually compute the BEM solution in addition to creating the BEM model by `Eric Larson`_ - -- Fix bug with :func:`mne.io.read_raw_edf` where null bytes were not properly handled, causing an error when opening a file by `Eric Larson`_ - -- Fix bug with :func:`mne.Report` where unicode characters were not rendered properly (encoding for HTML was not set) by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.nirs.scalp_coupling_index` where filter transition was incorrectly assigned by `Robert Luke`_ - -- Fix bug with :func:`mne.make_forward_dipole` where :func:`mne.write_forward_solution` could not be used by `Eric Larson`_ - -- Fix bug with :meth:`mne.io.Raw.plot` when ``scalings='auto'`` where bad data would prevent channel plotting by `Eric Larson`_ - -- Default ``border`` and ``extrapolate`` arguments for :func:`mne.Evoked.plot_topomap` and related functions were changed from ``0.`` to ``'mean'`` and ``'box'`` to ``'auto'``, respectively, to help more accurately reflect sensor geometries and boundary conditions. ``extrapolate='auto'`` uses ``extrapolate='local'`` for MEG data and ``extrapolate='head'`` otherwise, by `Eric Larson`_ - -- Fix bug that prevents ``n_jobs`` from being a NumPy integer type, by `Daniel McCloy`_. - -- Fix bug with :func:`mne.epochs.average_movements` where epoch weights were computed using all basis vectors instead of the internal basis only by `Eric Larson`_ - -- Fix bug with :func:`mne.get_volume_labels_from_aseg` where the returned labels were alphabetical instead of reflecting their volumetric ID-based order by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.find_bad_channels_maxwell` where good data of exactly ``step`` duration would lead to an error by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.find_bad_channels_maxwell` where indices were not handled properly when MEG channels were not first in the raw instance, and logging messages incorrectly reported the interval used by `Eric Larson`_ - -- Make :func:`mne.set_config` accept path-like input values by `Richard Höchenberger`_ - -- Fix bug with :func:`mne.write_labels_to_annot` and :func:`mne.datasets.fetch_hcp_mmp_parcellation` where label name strings were not properly terminated, leading to problems loading in FreeSurfer by `Eric Larson`_ - -- Fix bug with :func:`mne.beamformer.make_dics` where complex conjugates were not applied properly by `Britta Westner`_ and `Eric Larson`_ - -- Fix bug with :func:`mne.bem.make_watershed_bem` where the RAS coordinates of watershed bem surfaces were not updated correctly from the volume file by `Yu-Han Luo`_ - -- Fix bug with :meth:`mne.io.Raw.get_channel_types` and related methods where the ordering of ``picks`` was not preserved, by `Eric Larson`_ - -- Fix bug with :meth:`mne.io.Raw.plot_psd` with ``average=False`` and multiple channel types where channel locations were not shown properly by `Eric Larson`_ - -- Fix bug with :func:`mne.viz.plot_sparse_source_estimates` when using ``'sphere'`` mode by `Eric Larson`_ and `Guillaume Favelier`_ - -- Fix bug in FieldTrip reader functions when channels are missing in the ``info`` object by `Thomas Hartmann`_ - -- Throw proper error when trying to import FieldTrip Epochs data with non-uniform time for trials by `Thomas Hartmann`_ - -- Throw proper error when trying to import FieldTrip data saved by an old, incompatible version by `Thomas Hartmann`_ - -- Fix bug in :func:`mne.read_epochs_fieldtrip` when importing data without a ``trialinfo`` field by `Thomas Hartmann`_ - -- Fix bug in :meth:`mne.preprocessing.ICA.plot_properties` where time series plot doesn't start at the proper tmin by `Teon Brooks`_ - -- Fix bug with :meth:`mne.preprocessing.ICA.plot_properties` where a :class:`mne.io.Raw` object with annotations would lead to an error by `Yu-Han Luo`_ - -- Fix bug with :func:`mne.events_from_annotations(raw.annotations) ` when ``orig_time`` of annotations is None and ``raw.first_time > 0``, by `Alex Gramfort`_ - -API changes -~~~~~~~~~~~ - -- Python 3.5 is no longer supported, Python 3.6+ is required, by `Eric Larson`_ - -- ``adjacency`` has replaced ``connectivity`` in the names of: - - 1. Arguments to clustering functions, such as `mne.stats.permutation_cluster_test`, and - 2. Function names for defining adjacency, such as `mne.spatio_temporal_src_adjacency` replacing ``mne.spatio_temporal_src_connectivity``. - - The complete list of changed function names is: - - - ``mne.channels.find_ch_connectivity`` → `~mne.channels.find_ch_adjacency` - - ``mne.channels.read_ch_connectivity`` → `~mne.channels.read_ch_adjacency` - - ``mne.spatial_dist_connectivity`` → `~mne.spatial_dist_adjacency` - - ``mne.spatial_inter_hemi_connectivity`` → `~mne.spatial_inter_hemi_adjacency` - - ``mne.spatial_src_connectivity`` → `~mne.spatial_src_adjacency` - - ``mne.spatial_tris_connectivity`` → `~mne.spatial_tris_adjacency` - - ``mne.spatio_temporal_dist_connectivity`` → `~mne.spatio_temporal_dist_adjacency` - - ``mne.spatio_temporal_src_connectivity`` → `~mne.spatio_temporal_src_adjacency` - - ``mne.spatio_temporal_tris_connectivity`` → `~mne.spatio_temporal_tris_adjacency` - - "connectivity" is now reserved for discussions of functional and effective connectivity of the brain, and "adjacency" for source or sensor neighbor definitions for cluster-based analyses, by `Eric Larson`_. - -- The default for the ``standardize_names`` argument of :func:`mne.io.read_raw_kit` will change from ``True`` to ``False`` in 0.22, by `Eric Larson`_ - -- The ``normalize_fwd`` argument of :func:`mne.beamformer.make_dics` has been deprecated in favor of ``depth``, by `Eric Larson`_ - -- The ``overwrite`` parameter of :func:`mne.write_bem_surfaces` and :func:`mne.write_bem_solution` defaults to ``True`` in 0.21 but will change to ``False`` in 0.22, by `Eric Larson`_ - -- Add ``n_cols`` parameter to :meth:`mne.preprocessing.ICA.plot_scores` to allow plotting scores in multiple columns, by `Luke Bloy`_ - -- In :func:`mne.stats.permutation_cluster_test` and :func:`mne.stats.permutation_cluster_1samp_test` the default parameter value ``out_type='mask'`` has changed to ``None``, which in 0.21 means ``'mask'`` but will change to mean ``'indices'`` in the next version, by `Daniel McCloy`_ - -- The default window size set by ``filter_length`` when ``method='spectrum_fit'`` in :meth:`mne.io.Raw.notch_filter` will change from ``None`` (use whole file) to ``'10s'`` in 0.22, by `Eric Larson`_ - -- ``vmin`` and ``vmax`` parameters are deprecated in :meth:`mne.Epochs.plot_psd_topomap` and :func:`mne.viz.plot_epochs_psd_topomap`; use new ``vlim`` parameter instead, by `Daniel McCloy`_. - -- The method ``stc_mixed.plot_surface`` for a :class:`mne.MixedSourceEstimate` has been deprecated in favor of :meth:`stc.surface().plot(...) ` by `Eric Larson`_ - -- The method ``stc.normal`` for :class:`mne.VectorSourceEstimate` has been deprecated in favor of :meth:`stc.project('nn', src) ` by `Eric Larson`_ - -- Add ``use_dev_head_trans`` parameter to :func:`mne.preprocessing.annotate_movement` to allow choosing the device to head transform is used to define the fixed cHPI coordinates by `Luke Bloy`_ - -- The function ``mne.channels.read_dig_captrack`` will be deprecated in version 0.22 in favor of :func:`mne.channels.read_dig_captrak` to correct the spelling error: "captraCK" -> "captraK", by `Stefan Appelhoff`_ - -- The ``threshold`` argument in :meth:`mne.preprocessing.ICA.find_bads_ecg` defaults to ``None`` in version 0.21 but will change to ``'auto'`` in 0.22 by `Yu-Han Luo`_ - -- The function ``mne.preprocessing.mark_flat`` has been deprecated in favor of ``mne.preprocessing.annotate_flat`` by `Eric Larson`_ - -- The default argument ``meg=True`` in :func:`mne.pick_types` will change to ``meg=False`` in version 0.22 by `Clemens Brunner`_ - -- Added :meth:`mne.io.Raw.get_montage`, which obtains the montage that an instance has, by `Adam Li`_ - - -Authors -~~~~~~~ - -People who contributed to this release in alphabetical order -(people with a + are first time contributors): - -* Adam Li -* Adonay Nunes -* Alejandro Weinstein -* Alex Rockhill -* Alexandre Gramfort -* Anton Nikolas Waniek -* Britta Westner -* Christian O'Reilly -* Clemens Brunner -* Daniel McCloy -* Eric Larson -* Evgenii Kalenkovich -* Fede Raimondo -* Guillaume Favelier -* Hubert Banville -* Jeroen Van Der Donckt + -* Johann Benerradi + -* Kyle Mathewson + -* Lau Møller Andersen + -* Liberty Hamilton + -* Luke Bloy -* Lx37 + -* Mainak Jas -* Marijn van Vliet -* Martin Billinger -* Martin Schulz + -* Martin van Harmelen -* Mikolaj Magnuski -* Olaf Hauk -* Rahul Nadkarni + -* Richard Höchenberger -* Robert Luke -* Sara Sommariva -* Simeon Wong + -* Stefan Appelhoff -* Steven Bierer + -* Svea Marie Meyer + -* Teon Brooks -* Thomas Hartmann -* Yu-Han Luo -* chapochn -* mshader diff --git a/doc/changes/0.22.inc b/doc/changes/0.22.inc deleted file mode 100644 index 90186b62de9..00000000000 --- a/doc/changes/0.22.inc +++ /dev/null @@ -1,272 +0,0 @@ -.. NOTE: we are now using links to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. _changes_0_22: - -Version 0.22.0 (2020-12-17) ---------------------------- - -.. |Austin Hurst| replace:: **Austin Hurst** - -.. |Aniket Pradhan| replace:: **Aniket Pradhan** - -.. |Eduard Ort| replace:: **Eduard Ort** - -.. |Evan Hathaway| replace:: **Evan Hathaway** - -.. |Hongjiang Ye| replace:: **Hongjiang Ye** - -.. |Jeff Stout| replace:: **Jeff Stout** - -.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** - -.. |Qianliang Li| replace:: **Qianliang Li** - -.. |Tod Flak| replace:: **Tod Flak** - -.. |Victoria Peterson| replace:: **Victoria Peterson** - - -Enhancements -~~~~~~~~~~~~ -- Add :func:`mne.read_evokeds_mff` to read averaged MFFs (requires mffpy >= 0.5.7) **by new contributor** |Evan Hathaway|_ (:gh:`8354`) - -- Add :class:`mne.decoding.SSD` for spatial filtering with spatio-spectral-decomposition (:gh:`7070` **by new contributor** |Victoria Peterson|_ and `Denis Engemann`_) - -- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` (:gh:`7717` **by new contributor** |Jonathan Kuziek|_ and `Kyle Mathewson`_) - -- Add options to use labels in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8275` by `Olaf Hauk`_) - -- Update ``surfaces`` argument in :func:`mne.viz.plot_alignment` to allow dict for transparency values, and set default for sEEG data to have transparency (:gh:`8445` by `Keith Doelling`_) - -- Add support for ``mri_fiducials='estimated'`` in :func:`mne.viz.plot_alignment` to allow estimating MRI fiducial locations using :func:`mne.coreg.get_mni_fiducials` (:gh:`8553` by `Eric Larson`_) - -- Update default values in :ref:`mne coreg` and :func:`mne.viz.plot_alignment` for clearer representation of MRI and digitized fiducial points (:gh:`8553` by `Alex Gramfort`_ and `Eric Larson`_) - -- Add ``n_pca_components`` argument to :func:`mne.viz.plot_ica_overlay` (:gh:`8351` by `Eric Larson`_) - -- Add :func:`mne.stc_near_sensors` to facilitate plotting ECoG data (:gh:`8190` by `Eric Larson`_) - -- Add ``proj`` argument to :func:`mne.make_fixed_length_epochs` (:gh:`8351` by `Eric Larson`_) - -- Add :func:`mne.preprocessing.realign_raw` to realign simultaneous raw recordings in the presence of clock drift (:gh:`8539` by `Eric Larson`_) - -- Reduce memory usage of volume source spaces (:gh:`8379` by `Eric Larson`_) - -- Speed up heavy use of :meth:`mne.SourceMorph.apply` for volumetric source spaces by use of the method :meth:`mne.SourceMorph.compute_vol_morph_mat` (:gh:`8366` by `Eric Larson`_) - -- Add support for non-uniform ``zooms`` (e.g., when using a surrogate MRI via :func:`mne.scale_mri`) in volumetric morphing (:gh:`8642` by `Eric Larson`_) - -- In :func:`mne.compute_source_morph` ``zooms`` are no longer required to match the spacing of ``src_to``, which is useful to ensure the morphing is accurate when the ``src_to`` spacing is large (e.g., 1 cm) (:gh:`8642` by `Eric Larson`_) - -- Add volumetric source space support to :func:`mne.labels_to_stc` (:gh:`8447` by `Eric Larson`_) - -- Speed up :class:`mne.decoding.TimeDelayingRidge` with edge correction using Numba (:gh:`8323` by `Eric Larson`_) - -- Add :meth:`mne.Epochs.reset_drop_log_selection` to facilitate writing epochs with many ignored entries in their drop log (:gh:`8449` by `Eric Larson`_) - -- Add sEEG source visualization using :func:`mne.stc_near_sensors` and sEEG working tutorial (:gh:`8402` by `Eric Larson`_ and `Adam Li`_) - -- Add :meth:`mne.channels.DigMontage.get_positions`, which will return a dictionary of channel positions, coordinate frame and fiducial locations (:gh:`8460` by `Adam Li`_) - -- Add support for writing digitization points in a coordinate frame other than head in :meth:`mne.channels.DigMontage.save` (:gh:`8532` by `Eric Larson`_) - -- Add ``picks`` parameter to :func:`mne.preprocessing.fix_stim_artifact` to specify which channel needs to be fixed (:gh:`8482` by `Alex Gramfort`_) - -- Add progress bar support to :func:`mne.time_frequency.csd_morlet` (:gh:`8608` by `Eric Larson`_) - -- Further improved documentation building instructions and execution on Windows (:gh:`8502` by `kalenkovich`_ and `Eric Larson`_) - -- Add option to disable TQDM entirely with ``MNE_TQDM='off'`` (:gh:`8515` by `Eric Larson`_) - -- Add option ``on_header_missing`` to :func:`mne.channels.read_polhemus_fastscan` (:gh:`8622` by `Eric Larson`_) - -- Add option ``window`` to ``mne.time_frequency.psd_welch`` and related functions (:gh:`8862` by `Eric Larson`_) - -- `mne.preprocessing.ICA.plot_sources` now displays an `mne.preprocessing.ICA.plot_properties` window when right-clicking on component names on the y-axis (:gh:`8381` by `Daniel McCloy`_) - -- :func:`mne.io.read_raw_edf`, :func:`mne.io.read_raw_bdf`, and :func:`mne.io.read_raw_gdf` now detect and handle invalid highpass/lowpass filter settings (:gh:`8584` by `Clemens Brunner`_) - -- If a ``baseline`` tuple containing one or two ``None`` values – e.g. ``(None, 0)``, ``(0, None)``, or ``(None, None)`` – is passed to `~mne.Epochs` or `~mne.Epochs.apply_baseline`, the ``None`` value(s) will be replaced with the actual time (i.e., :attr:`~mne.Epochs.tmin` and :attr:`~mne.Epochs.tmax`, respectively) when populating ``Epochs.baseline`` (:gh:`8442` by `Richard Höchenberger`_) - -- `~mne.Epochs` will now retain the information about an applied baseline correction, even if the baseline period is partially or completely removed through cropping later on (:gh:`8442` by `Richard Höchenberger`_) - -- Add :func:`mne.source_space.compute_distance_to_sensors` to compute distances between vertices and sensors (:gh:`8534` by `Olaf Hauk`_ and `Marijn van Vliet`_) - -- Annotations can now be shown/hidden interactively in raw plots (:gh:`8624` by `Daniel McCloy`_) - -- Added argument ``colors`` to `mne.grow_labels` (:gh:`8519` by `Olaf Hauk`_) - -- Added `mne.SourceEstimate.apply_baseline` method for baseline-correction of source estimates (:gh:`8452` by `Olaf Hauk`_) - -- New `mne.viz.Brain.set_time` method to set the displayed time in seconds (:gh:`8415` by `Daniel McCloy`_) - -- Update the ``backend`` parameter of :func:`mne.viz.plot_source_estimates` to integrate ``pyvista`` (:gh:`8395` by `Guillaume Favelier`_) - -- Add ``group_by`` parameter to `mne.viz.plot_epochs` and `mne.Epochs.plot` to allow displaying channel data by sensor position (:gh:`8381` by `Daniel McCloy`_) - -Bugs -~~~~ -- Fix orthogonalization of power envelopes in ``mne.connectivity.envelope_correlation`` (:gh:`8658` **by new contributor** |Qianliang Li|_ and `Eric Larson`_) - -- Fix data overwrite of cascading simulation operations :`mne.simulation.simulate_raw` (:gh:`8633` **by new contributor** |Jeff Stout|_) - -- Fix a transpose issue of :func:`mne.decoding.CSP.plot_filters` (:gh:`8580` **by new contributor** |Hongjiang Ye|_) - -- Fix :func:`mne.io.read_raw_curry` to deal with Curry datasets that have channels that are listed in the labels file, but which are absent from the saved data file (e.g. 'Ref' channel). Also now populates info['meas_date'] if possible (:gh:`8400` **by new contributor** |Tod Flak|_) - -- Fix bug with mne.io.egi.tests/test_egi.py where it mandatorily downloaded testing data when it was not necessary (:gh:`8474` **by new contributor** |Aniket Pradhan|_) - -- Fix bug with reading split files that have dashes in the filename (:gh:`8339` **by new contributor** |Eduard Ort|_) - -- Fix bug with parsing EDF dates and date integers (:gh:`8558` **by new contributor** |Austin Hurst|_ and `Eric Larson`_) - -- Fix bug with reading EDF and KIT files on big endian architectures such as s390x (:gh:`8618` by `Eric Larson`_) - -- Fix bug with :func:`mne.beamformer.make_dics` where the ``rank`` parameter was not properly handled (:gh:`8594` by `Marijn van Vliet`_ and `Eric Larson`_) - -- Fix bug with :func:`mne.beamformer.apply_dics` where the whitener was not properly applied (:gh:`8610` by `Eric Larson`_) - -- Fix bug with `~mne.viz.plot_epochs_image` when ``order`` is supplied and multiple conditions are plotted (:gh:`8377` by `Daniel McCloy`_ ) - -- Fix bug with :func:`mne.viz.plot_source_estimates` when using the PyVista backend where singleton time points were not handled properly (:gh:`8285` by `Eric Larson`_) - -- Fix bug when passing ``axes`` to plotting functions, :func:`matplotlib.pyplot.tight_layout` will not be called when the figure was created using a constrained layout (:gh:`8344` by `Eric Larson`_) - -- Fix bug with compensated CTF data when picking channels without preload (:gh:`8318` by `Eric Larson`_) - -- Fix bug with plotting MEG topographies where the wrong extrapolation made was used in ICA (:gh:`8637` by `Eric Larson`_) - -- Fix bug when merging fNIRS channels in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8306` by `Robert Luke`_) - -- Fix bug where events could overflow when writing to FIF (:gh:`8448` by `Eric Larson`_) - -- :func:`mne.io.read_raw_edf` now supports EDF files with invalid recording dates (:gh:`8283` by `Clemens Brunner`_) - -- Fix bug with :func:`mne.io.Raw.save` when using ``split_naming='bids'`` where non-split files would still be named ``name_split-01_meg.fif`` instead of the requested ``name_meg.fif`` (:gh:`8464` by `Alex Gramfort`_ and `Eric Larson`_) - -- Fix bug with :class:`mne.preprocessing.ICA` where ``n_pca_components`` as a :class:`python:float` would give the number of components that explained less than or equal to the given variance. It now gives greater than the given number for better usability and consistency with :class:`sklearn.decomposition.PCA`. Generally this will mean that one more component will be included (:gh:`8326` by `Eric Larson`_) - -- Fix bug with :class:`mne.preprocessing.ICA` where projections were not tracked properly (:gh:`8343` by `Eric Larson`_) - -- Fix bug where extrapolation points created artifacts in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8425` by `Mikołaj Magnuski`_) - -- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` where full-rank data were not handled properly (:gh:`8326` by `Eric Larson`_) - -- Fix bug with :ref:`somato-dataset` where the BEM was not included (:gh:`8317` by `Eric Larson`_) - -- Fix bug with coordinate frames when performing volumetric morphs via :func:`mne.compute_source_morph` and :meth:`mne.SourceMorph.apply` that could lead to ~5 mm bias (:gh:`8642` by `Eric Larson`_) - -- Fix bug with volumetric rendering alpha in :meth:`mne.VolSourceEstimate.plot_3d` and related functions (:gh:`8663` by `Eric Larson`_) - -- Fix missing documentation of :func:`mne.io.read_raw_nihon` in :ref:`tut-imorting-eeg-data` (:gh:`8320` by `Adam Li`_) - -- Fix bug with :func:`mne.add_reference_channels` when :func:`mne.io.Raw.reorder_channels` or related methods are used afterward (:gh:`8303`, :gh:`#8484` by `Eric Larson`_) - -- Fix bug where the ``verbose`` arguments to :meth:`mne.Evoked.apply_baseline` and :meth:`mne.Epochs.apply_baseline` were not keyword-only (:gh:`8349` by `Eric Larson`_) - -- ``ICA.max_pca_components`` will not be altered by calling `~mne.preprocessing.ICA.fit` anymore. Instead, the new attribute ``ICA.max_pca_components_`` will be set (:gh:`8321` by `Richard Höchenberger`_) - -- Fix bug that `~mne.viz.plot_ica_overlay` would sometimes not create red traces (:gh:`8341` by `Richard Höchenberger`_) - -- Fix bug with :class:`~mne.preprocessing.ICA` where ``n_components=None, n_pca_components=None`` could lead to unstable unmixing matrix inversion by making ``n_components=None`` also use the lesser of ``n_components=0.999999`` and ``n_components=n_pca_components`` (:gh:`8351` by `Eric Larson`_) - -- The ``ica.n_pca_components`` property is no longer be updated during :meth:`mne.preprocessing.ICA.fit`, instead ``ica.n_components_`` will be added to the instance (:gh:`8351` by `Eric Larson`_) - -- Pass ``rank`` everyhwere in forward preparation for source imaging. This bug affected sparse solvers when using maxfilter data (:gh:`8368` by `Alex Gramfort`_) - -- Fix bug in :func:`mne.viz.plot_alignment` where ECoG and sEEG channels were not plotted and fNIRS channels were always plotted in the head coordinate frame (:gh:`8393` by `Eric Larson`_) - -- Fix bug in :func:`mne.set_bipolar_reference` where ``ch_info`` could contain invalid channel information keys (:gh:`8416` by `Eric Larson`_) - -- When reading BrainVision raw data, the channel units and types were sometimes not inferred correctly (:gh:`8434` by `Richard Höchenberger`_) - -- Attempting to remove baseline correction from preloaded `~mne.Epochs` will now raise an exception (:gh:`8435` by `Richard Höchenberger`_) - -- :meth:`mne.Report.parse_folder` will now correctly handle split FIFF files (:gh:`8486`, :gh:`8491` by `Richard Höchenberger`_) - -- Fix bug where BrainVision channel names, event types, and event descriptions containing commas were incorrectly parsed (:gh:`8492` by `Stefan Appelhoff`_) - -- Fix bug in :func:`mne.preprocessing.compute_fine_calibration` where the magnetometer calibration coefficients were computed incorrectly (:gh:`8522` by `Eric Larson`_) - -- Fix bug in :func:`mne.io.read_raw_eeglab` where empty event durations led to an error (:gh:`8384` by `Mikołaj Magnuski`_) - -- Fix inset sensor plots to always use equal aspect (:gh:`8545` by `Daniel McCloy`_) - -- Fix bug in `mne.viz.plot_compare_evokeds` where evokeds with identical ``comment`` attributes would not plot properly if passed as a list (:gh:`8590` by `Daniel McCloy`_) - -- Fix bug in ``mne.time_frequency.psd_welch`` and related functions where the window default errantly changed from ``'hamming'`` to ``('tukey', 0.25)`` (:gh:`8862` by `Eric Larson`_) - -- Fix bug in :func:`mne.io.read_raw_kit` where scale factors for EEG channels could be set to zero (:gh:`8542` by `Eric Larson`_) - -- Fix reading GDF files with excluded channels in :func:`mne.io.read_raw_gdf` (:gh:`8520` by `Clemens Brunner`_) - -- Fix automatic selection of extrapolation mask type from channel type when plotting field maps (:gh:`8589` by `Daniel McCloy`_) - -- Fix bug in :func:`mne.viz.set_3d_title` where 3D plot could have multiple titles that overlap (:gh:`8564` by `Guillaume Favelier`_) - -- Fix bug in :func:`mne.viz.set_3d_view` where plotter is not updated properly causing camera issues in the doc (:gh:`8564` by `Guillaume Favelier`_) - -- :func:`mne.preprocessing.find_ecg_events` didn't take the ``tstart`` parameter value into account when calculating the average heart rate (:gh:`8605` by `Richard Höchenberger`_) - -API changes -~~~~~~~~~~~ - -- Minimum required versions were increased for core dependencies NumPy (1.15.4), SciPy (1.1.0), and Matplotlib (3.0) and for the optional dependencies scikit-learn (0.20.2) and pandas (0.23.4) (:gh:`8374` by `Eric Larson`_) - -- The parameter ``on_split_missing`` has been added to :func:`mne.io.read_raw_fif` and its default will change from ``'warn'`` to ``'raise'`` in 0.23, by (:gh:`8357` `Eric Larson`_) - -- The ``max_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_components`` during initialization and ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` instead (:gh:`8351` by `Eric Larson`_) - -- The ``n_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` (:gh:`8356` by `Eric Larson`_) - -- The ``trans`` argument of :func:`mne.extract_label_time_course` is deprecated and will be removed in 0.23 as it is no longer necessary (:gh:`8389` by `Eric Larson`_) - -- Parameter ``event_colors`` in `mne.viz.plot_epochs` and `mne.Epochs.plot` is deprecated, replaced by ``event_color`` which is consistent with `mne.viz.plot_raw` and provides greater flexibility (:gh:`8381` by `Daniel McCloy`_) - -Authors -~~~~~~~ - -People who contributed to this release in alphabetical order -(people with a + are first time contributors): - -* Adam Li -* Alexandre Gramfort -* Aniket Pradhan + -* Austin Hurst + -* Christian Brodbeck -* Clemens Brunner -* Daniel McCloy -* Denis A. Engemann -* Eduard Ort + -* Eric Larson -* Evan Hathaway + -* Evgenii Kalenkovich -* Fede Raimondo -* Guillaume Favelier -* Hongjiang Ye + -* Jean-Remi King -* Jeff Stout + -* Jonathan Kuziek + -* Jussi Nurminen -* Justus Schwabedal -* Keith Doelling -* Kyle Mathewson -* Mads Jensen -* Mainak Jas -* Marijn van Vliet -* Mikolaj Magnuski -* Olaf Hauk -* Qianliang Li + -* Richard Höchenberger -* Robert Luke -* Stefan Appelhoff -* Thomas Hartmann -* Tod Flak + -* Victoria Peterson + diff --git a/doc/changes/0.23.inc b/doc/changes/0.23.inc deleted file mode 100644 index fdcaeb60630..00000000000 --- a/doc/changes/0.23.inc +++ /dev/null @@ -1,402 +0,0 @@ -.. NOTE: we are now using links to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - description of enhancement/bugfix/API change (:gh:`xxxx` **by new contributor** |Firstname Lastname|_) - -.. _changes_0_23_0: - -Version 0.23.0 (2021-04-27) ---------------------------- - -.. |Jack Zhang| replace:: **Jack Zhang** - -.. |Sumalyo Datta| replace:: **Sumalyo Datta** - -.. |Anna Padee| replace:: **Anna Padee** - -.. |Richard Koehler| replace:: **Richard Koehler** - -.. |Zhi Zhang| replace:: **Zhi Zhang** - -.. |Rotem Falach| replace:: **Rotem Falach** - -.. |Andres Rodriguez| replace:: **Andres Rodriguez** - -.. |Matt Sanderson| replace:: **Matt Sanderson** - -.. |Enrico Varano| replace:: **Enrico Varano** - -.. |Dominik Welke| replace:: **Dominik Welke** - -.. |Judy D Zhu| replace:: **Judy D Zhu** - -.. |Valerii Chirkov| replace:: **Valerii Chirkov** - -.. |Matteo Anelli| replace:: **Matteo Anelli** - -.. |Apoorva Karekal| replace:: **Apoorva Karekal** - -.. |Cora Kim| replace:: **Cora Kim** - -.. |Silvia Cotroneo| replace:: **Silvia Cotroneo** - -.. |Ram Pari| replace:: **Ram Pari** - -.. |Erica Peterson| replace:: **Erica Peterson** - -.. |Maggie Clarke| replace:: **Maggie Clarke** - -.. |Felix Klotzsche| replace:: **Felix Klotzsche** - -Enhancements -~~~~~~~~~~~~ -- Add support for exporting to EEGLAB's set format with :mod:`eeglabio` with new methods :meth:`mne.io.Raw.export` and :meth:`mne.Epochs.export`. (:gh:`9192` **by new contributor** |Jack Zhang|_) - -- :func:`mne.viz.plot_evoked_topo` now uses a Minimum/Maximum of peaks for each channel type to calculate ylim when input param is None (:gh:`9285` **by new contributor** |Ram Pari|_) - -- Add exclude parameter to :func:`mne.viz.plot_evoked_topo` (:gh:`9278` **by new contributor** |Ram Pari|_) - -- Add :meth:`mne.time_frequency.EpochsTFR.to_data_frame` and :meth:`mne.time_frequency.AverageTFR.to_data_frame` methods (:gh:`9124` **by new contributor** |Felix Klotzsche|_) - -- Add ``selection`` and ``drop_log`` to :class:`mne.time_frequency.EpochsTFR` (:gh:`9124` **by new contributor** |Felix Klotzsche|_) - -- Add CSV, TSV, and XYZ support to :func:`mne.channels.read_custom_montage` (:gh:`9203` **by new contributor** |Jack Zhang|_) - -- Add HTML representation for `~mne.Epochs` in Jupyter Notebooks (:gh:`9174` **by new contributor** |Valerii Chirkov|_) - -- Speed up :func:`mne.viz.plot_ica_properties` by refactoring (:gh:`9174` **by new contributor** |Valerii Chirkov|_) - -- Add ``apply_function`` method to epochs and evoked objects (:gh:`9088` **by new contributor** |Erica Peterson|_ and `Victoria Peterson`_) - -- New tutorial for function :func:`mne.make_fixed_length_epochs` (:gh:`9156` **by new contributor** |Erica Peterson|_) - -- Add different colors for each volume source space in :func:`mne.viz.plot_alignment` (:gh:`9043` **by new contributor** |Valerii Chirkov|_) - -- Add ``overlap`` parameter to :func:`mne.make_fixed_length_epochs` to allow creating overlapping fixed length epochs (:gh:`9096` **by new contributor** |Silvia Cotroneo|_) - -- Add :meth:`mne.Dipole.to_mni` for more convenient dipole.pos to MNI conversion (:gh:`9043` **by new contributor** |Valerii Chirkov|_) - -- Update citations in maxwell.py (:gh:`9043` **by new contributor** |Valerii Chirkov|_) - -- New Tutorial for analyzing frequency-tagging data (:gh:`8867` **by new contributor** |Dominik Welke|_ and `kalenkovich`_) - -- Add dbs as new channel type for deep brain stimulation (DBS) recordings (:gh:`8739` **by new contributor** |Richard Koehler|_) - -- Add some preprocessing functions to the EEGLAB migration guide (:gh:`9169` **by new contributor** |Apoorva Karekal|_) - -- Add :func:`mne.chpi.extract_chpi_locs_kit` to read cHPI coil locations from KIT/Yokogawa data (:gh:`8813` **by new contributor** |Matt Sanderson|_, `Robert Seymour`_, and `Eric Larson`_) - -- Add ``match_alias`` parameter to :meth:`mne.io.Raw.set_montage` and related functions to match unrecognized channel location names to known aliases (:gh:`8799` **by new contributor** |Zhi Zhang|_) - -- Update the ``notebook`` 3d backend to use ``ipyvtk_simple`` for a better integration within ``Jupyter`` (:gh:`8503` by `Guillaume Favelier`_) - -- Remove the 15-character limitation for channel names when writing to FIF format. If you need the old 15-character names, you can use something like ``raw.rename_channels({n: n[:13] for n in raw.ch_names}, allow_duplicates=True)``, by `Eric Larson`_ (:gh:`8346`) - -- Add channel-specific annotation support to :class:`mne.Annotations` via ``ch_names`` parameter (:gh:`8896` by `Eric Larson`_) - -- Add toggle-all button to :class:`mne.Report` HTML and ``width`` argument to ``mne.Report.add_bem_to_section`` (:gh:`8723` by `Eric Larson`_) - -- Add infant template MRI dataset downloader :func:`mne.datasets.fetch_infant_template` (:gh:`8738` by `Eric Larson`_ and `Christian O'Reilly`_) - -- Add digitizer information to :func:`mne.io.read_raw_egi` (:gh:`8789` by `Christian Brodbeck`_) - -- Add support for reading some incomplete raw FIF files in :func:`mne.io.read_raw_fif` (:gh:`9268` by `Eric Larson`_) - -- Allow reading digitization from files other than ``*.fif`` in the coregistration GUI (:gh:`8790` by `Christian Brodbeck`_) - -- Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) - -- Reduce memory consumption of `mne.io.Raw` and speed up epoching when thousands of events are present for `mne.Epochs` (:gh:`8801` by `Eric Larson`_) - -- Speed up ``import mne`` by reducing function creation overhead (:gh:`8829` by `Eric Larson`_) - -- `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) - -- `mne.Report` has gained the new methods `~mne.Report.add_custom_js` and `~mne.Report.add_custom_css` for adding user-defined JavaScript and styles (:gh:`8762`, :gh:`9037` by `Richard Höchenberger`_) - -- Add option to control appearance of opaque inside surface of the head to :ref:`mne coreg` (:gh:`8793` by `Eric Larson`_) - -- Add option to disable projection using ``--projoff`` in :ref:`mne browse_raw` (:gh:`9262` by `Eric Larson`_) - -- Add keypress to toggle projection using ``shift+j`` in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` (:gh:`9262` by `Eric Larson`_) - -- Add support for non-FIF files in :ref:`mne browse_raw` using :func:`mne.io.read_raw` (:gh:`8806` by `Eric Larson`_) - -- Add :func:`mne.io.read_raw_nedf` for reading StarStim / enobio NEDF files (:gh:`8734` by `Tristan Stenner`_) - -- Add :meth:`raw.describe() ` to display (or return) descriptive statistics for each channel (:gh:`8760` by `Clemens Brunner`_) - -- Add :meth:`annotations.to_data_frame() ` to return annotations as a pandas dataframe (:gh:`8783` by `Robert Luke`_) - -- Add :func:`mne.preprocessing.compute_maxwell_basis` to compute the SSS basis function (:gh:`8822` by `Eric Larson`_) - -- Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) - -- Add warning to :func:`mne.cov.compute_whitener` when an explicit ``rank`` parameter leads to a large increase in condition number (:gh:`8805` by `Eric Larson`_) - -- Add parameter ``align=True`` to `mne.viz.Brain.show_view` to make views relative to the closest canonical (MNI) axes rather than the native MRI surface RAS coordinates (:gh:`8794` by `Eric Larson`_) - -- Add ``auto_close`` to ``mne.Report.add_figs_to_section`` and ``mne.Report.add_slider_to_section`` to manage closing figures (:gh:`8730` by `Guillaume Favelier`_) - -- Add :func:`mne.write_head_bem` to support writing head surface files (:gh:`8841` by `Yu-Han Luo`_) - -- The signal of ``resp`` (respiratory) channels is now assumed to be in the unit Volt (:gh:`8858` by `Richard Höchenberger`_) - -- Static type checkers like Pylance (comes with VS Code) now display the parameters of many more functions correctly, largely improving overall usability for VS Code users (:gh:`8862` by `Richard Höchenberger`_) - -- Support new EEGLAB file format (:gh:`8874` by `Clemens Brunner`_) - -- Reading and writing FIFF files whose filenames end with ``_meg.fif.gz``, ``_eeg.fif(.gz)``, and ``_ieeg.fif(.gz)`` doesn't emit a warning anymore; this improves interobaility with BIDS-formatted datasets (:gh:`8868` by `Richard Höchenberger`_) - -- On macOS, we now set the environment variable ``QT_MAC_WANTS_LAYER`` to ``"1"`` if it hasn't been set explicitly by the user, in order to ensure that `~mne.SourceEstimate` plots work on macOS 11 with older versions of Qt and PyQt (:gh:`8959` by `Richard Höchenberger`_) - -- :func:`mne.time_frequency.EpochsTFR.average` now allows different ways of averaging, such as "median", or callable functions (:gh:`8879` by `Adam Li`_) - -- `~mne.Epochs` metadata can now be generated automatically from events using `mne.epochs.make_metadata` (:gh:`8834` by `Richard Höchenberger`_) - -- Interactions with sliders in `mne.Report` will now continuously update the linked content (it was updated only on mouse button release before) (:gh:`9023` by `Richard Höchenberger`_) - -- `mne.viz.plot_drop_log` and :meth:`mne.Epochs.plot_drop_log` now omit displaying the subject name in the title if ``subject=None`` is passed (:gh:`9015` by `Richard Höchenberger`_) - -- Plot ECoG tutorial now uses a real epilepsy seizure dataset and visualizes the seizure onset (:gh:`9087` by `Eric Larson`_, `Adam Li`_, `Alex Rockhill`_ and `Liberty Hamilton`_) - -- Improve documentation of Report-Class (:gh:`9113` by `Martin Schulz`_) - -- Add :func:`mne.channels.DigMontage.add_estimated_fiducials` which will add LPA, RPA and Nasion fiducial points to the ``DigMontage`` object in ``mri`` coordinate frame (:gh:`9118` by `Adam Li`_) - -- :func:`mne.io.anonymize_info` now anonymizes also sex and hand fields when ``keep_his`` is ``False`` (:gh:`9103`, :gh:`9175` **by new contributor** |Rotem Falach|_ and `Richard Höchenberger`_) - -- Add parameter ``theme`` to :class:`mne.viz.Brain` for optional Dark-Mode (:gh:`9149` by `Martin Schulz`_, `Guillaume Favelier`_) - -- Add first_samp support for raw simulations with `mne.simulation.simulate_raw` and `mne.simulation.SourceSimulator` (:gh:`9166` by `Steven Bierer`_) - -- `~mne.Evoked` gained a ``baseline`` attribute that is automatically assembled based on the baseline of the averaged `~mne.Epochs` (:gh:`9210` by `Richard Höchenberger`_) - -- Add ``units`` parameter to :meth:`mne.io.Raw.get_data` to return data in the desired unit (:gh:`9136` by `Johann Benerradi`_ and `Stefan Appelhoff`_) - -- Add :func:`mne.preprocessing.equalize_bads` to interpolate bad channels in a list of `~mne.Evoked`, `~mne.Epochs` or `~mne.io.Raw` having different sets of bad channels (:gh:`9241` by `Alex Gramfort`_) - -- :meth:`mne.Epochs.equalize_event_counts` can now be called without providing a list of event names, and will equalize the counts of **all** event types present in the `~mne.Epochs` (:gh:`9261` by `Richard Höchenberger`_) - -- :func:`mne.preprocessing.find_eog_events` and :func:`mne.preprocessing.create_eog_epochs` now accept a list of channel names, allowing you to specify multiple EOG channels at once (:gh:`9269` by `Richard Höchenberger`_) - -- Improve performance of :func:`mne.set_bipolar_reference` (:gh:`9270` by `Martin Schulz`_) - -- Add support for setting montages on fNIRS data, with built in standard montages for Artinis OctaMon and Artinis Brite23 devices (:gh:`9141` by `Johann Benerradi`_, `Robert Luke`_ and `Eric Larson`_) - -- Enable support for reading SNIRF files with 2D optode positions (:gh:`9347` `Robert Luke`_) - - -Bugs -~~~~ -- Fix bug with :func:`mne.time_frequency.tfr` where plot misbehaved. Plot now always returns a list of figures instead of a single figure handle (:gh:`8013` **by new contributor** |Eduard Ort|_) - -- Fix bug with :func:`mne.viz.plot_evoked_topo` where set ylim parameters gets swapped across channel types. (:gh:`9207` **by new contributor** |Ram Pari|_) - -- Fix bug with :func:`mne.io.read_raw_edf` where µV was not correctly recognized (:gh:`9187` **by new contributor** |Sumalyo Datta|_) - -- Fix bug with :func:`mne.viz.plot_compare_evokeds` did not check type of combine. (:gh:`9151` **by new contributor** |Matteo Anelli|_) - -- Fix bug with :func:`mne.viz.plot_evoked_topo` where ``ylim`` was only being applied to the first channel in the dataset (:gh:`9162` **by new contributor** |Ram Pari|_ ) - -- Fix bug with :func:`mne.Epochs.plot_image` allowing interactive zoom to work properly (:gh:`9152` by **by new contributor** |Maggie Clarke|_ and `Daniel McCloy`_) - -- Fix bug with :func:`mne.Epochs.plot_image` where the ``x_label`` was different depending on the evoked parameter (:gh:`9115` **by new contributor** |Matteo Anelli|_) - -- Fix bug with restricting :func:`mne.io.Raw.save` saving options to .fif and .fif.gz extensions (:gh:`9062` **by new contributor** |Valerii Chirkov|_) - -- Fix bug with :func:`mne.io.read_raw_kit` where missing marker coils were not handled (:gh:`8989` **by new contributor** |Judy D Zhu|_) - -- Fix bug with ``mne.connectivity.spectral_connectivity`` where time axis in Epochs data object was dropped. (:gh:`8839` **by new contributor** |Anna Padee|_) - -- Fix bug with `mne.io.Raw.resample` to allow passing ``stim_picks='misc'`` (:gh:`8844` **by new contributor** |Enrico Varano|_ and `Eric Larson`_) - -- Fix bugs with `mne.io.read_raw_persyst` where multiple ``Comments`` with the same name are allowed, and ``Comments`` with a "," character are now allowed (:gh:`8311` and :gh:`8806` **by new contributor** |Andres Rodriguez|_ and `Adam Li`_) - -- Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) - -- Fix bug with :func:`mne.viz.snapshot_brain_montage` where the positions were incorrect (:gh:`8983` by `Eric Larson`_) - -- Fix bug with :func:`mne.preprocessing.maxwell_filter` where the eSSS basis had to exactly match the good channels instead of being a superset (:gh:`8675` by `Eric Larson`_) - -- Fix bug with ``mne.Report.add_bem_to_section`` where ``n_jobs != 1`` would cause ``n_jobs`` subsets of MRI images in some orientations to be flipped (:gh:`8713` by `Eric Larson`_) - -- Fix bug with :meth:`raw.plot() ` where annotations didn't immediately appear when changing window duration (:gh:`8689` by `Daniel McCloy`_) - -- Fix bug with :meth:`raw.plot() ` where ``scalings='auto'`` did not compute scalings using the full range of data (:gh:`8806` by `Eric Larson`_) - -- Fix bug with :meth:`raw.plot() ` where setting a ``lowpass`` could lead to non-data-channels not plotting (:gh:`8954` by `Eric Larson`_) - -- Fix bug with :meth:`mne.io.Raw.load_data` and :meth:`mne.Epochs.drop_bad` where ``verbose`` logging was not handled properly (:gh:`8884` by `Eric Larson`_) - -- Fix bug with :func:`mne.io.read_raw_nicolet` where header type values such as num_sample and duration_in_sec where not parsed properly (:gh:`8712` by `Alex Gramfort`_) - -- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` when reading decompositions using PCA dimensionality reduction (:gh:`8780` by `Alex Gramfort`_ and `Eric Larson`_) - -- Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where ``depth`` was errantly restricted to be less than or equal to 1. (:gh:`8804` by `Eric Larson`_) - -- Fix bug with :func:`mne.stats.permutation_cluster_1samp_test` and related clustering functions when ``adjacency=None`` and ``out_type='indices'`` (:gh:`#8842` by `Eric Larson`_) - -- Fix bug with :func:`mne.viz.plot_alignment` where plotting a sphere model could ignore the ``brain`` argument (:gh:`8857` by `Eric Larson`_) - -- Fix bug with :func:`mne.SourceEstimate.plot` where flatmaps were not positioned properly when using ``hemi='both'`` (:gh:`9315` by `Eric Larson`_) - -- Fix bug with :meth:`mne.Annotations.save` where files could be overwritten accidentally, it can now be controlled via the ``overwrite`` argument (:gh:`8896` by `Eric Larson`_) - -- Fix bug with ``replace`` argument of ``mne.Report.add_bem_to_section`` and ``mne.Report.add_slider_to_section`` (:gh:`8723` by `Eric Larson`_) - -- Fix bug with :func:`mne.chpi.compute_chpi_locs` where all cHPI coils being off would lead to an empty array of the wrong dimensionality (:gh:`8956` by `Eric Larson`_) - -- Fix bug with :func:`mne.extract_label_time_course` where labels, STCs, and the source space were not checked for compatible ``subject`` attributes (:gh:`9284` by `Eric Larson`_) - -- Fix bug with :func:`mne.grow_labels` where ``overlap=False`` could run forever or raise an error (:gh:`9317` by `Eric Larson`_) - -- Fix compatibility bugs with :mod:`mne_realtime` (:gh:`8845` by `Eric Larson`_) - -- Fix bug with `mne.viz.Brain` where non-inflated surfaces had an X-offset imposed by default (:gh:`8794` by `Eric Larson`_) - -- Fix bug with :ref:`mne coreg` where nasion values were not updated when clicking (:gh:`8793` by `Eric Larson`_) - -- Fix bug with matplotlib-based 3D plotting where ``Axes3D`` were not properly initialized in :func:`mne.viz.plot_source_estimates` (:gh:`8811` by `Chris Bailey`_) - -- Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) - -- Fix bug where hidden annotations could be deleted interactively in :meth:`mne.io.Raw.plot` windows (:gh:`8831` by `Daniel McCloy`_) - -- Function :func:`mne.set_bipolar_reference` was not working when passing ``Epochs`` constructed with some ``picks`` (:gh:`8728` by `Alex Gramfort`_) - -- Fix anonymization issue of FIF files after IO round trip (:gh:`8731` by `Alex Gramfort`_) - -- Fix bug in `mne.preprocessing.ICA.plot_sources` where right-clicking component names could yield `~mne.preprocessing.ICA.plot_properties` windows for the wrong component if ``picks`` had been specified (:gh:`8996` by `Daniel McCloy`_) - -- Fix title not shown in :func:`mne.viz.plot_montage` (:gh:`8752` by `Clemens Brunner`_) - -- `mne.io.read_raw_egi` now correctly handles `pathlib.Path` filenames (:gh:`8759` by `Richard Höchenberger`_) - -- `mne.viz.plot_evoked` and `mne.Evoked.plot` now correctly plot global field power (GFP) for EEG data when ``gfp=True`` or ``gfp='only'`` is passed (used to plot RMS). For MEG data, we continue to plot the RMS, but now label it correctly as such (:gh:`8775` by `Richard Höchenberger`_) - -- Fix bug with :ref:`mne make_scalp_surfaces` where ``--overwrite`` was not functional (:gh:`8800` by `Yu-Han Luo`_) - -- Fix bug with :func:`mne.viz.plot_topomap` when plotting gradiometers with a missing channel in a pair (:gh:`8817` by `Alex Gramfort`_) - -- :meth:`epochs.crop() ` now also adjusts the ``reject_tmin`` and ``reject_tmax`` attributes if necessary (:gh:`8821` by `Richard Höchenberger`_) - -- When creating `~mne.Epochs`, we now ensure that ``reject_tmin`` and ``reject_tmax`` cannot fall outside of the epochs' time interval anymore (:gh:`8821` by `Richard Höchenberger`_) - -- `~mne.io.read_raw_bti` erroneously treated response channels as respiratory channels (:gh:`8856` by `Richard Höchenberger`_) - -- The RMS trace shown in the time viewer of `~mne.SourceEstimate` plots is now correctly labeled as ``RMS`` (was ``GFP`` before) (:gh:`8965` by `Richard Höchenberger`_) - -- Fix bug with :meth:`mne.SourceEstimate.plot` and related functions where the scalars were not interactively updated properly (:gh:`8985` by `Eric Larson`_) - -- Fix bug with mne.channels.find_ch_adjacency() returning wrong adjacency for Neuromag122-Data (:gh:`8891` by `Martin Schulz`_) - -- Fix :func:`mne.read_dipole` yielding :class:`mne.Dipole` objects that could not be indexed (:gh:`8963` by `Marijn van Vliet`_) - -- Fix bug when setting n_jobs > 1 in :meth:`mne.Report.parse_folder` (:gh:`9109` by `Martin Schulz`_) - -- Fix bug with :meth:`mne.Evoked.plot_image` where an incorrect clim parameter did not raise any error (:gh:`9115` **by new contributor** |Matteo Anelli|_) - -- Fix bug with ``mne.io.Raw.pick`` where incorrect fnirs types were returned (:gh:`9178` by `Robert Luke`_) - -- Fix bug when passing both axes and picks to `mne.viz.plot_compare_evokeds` (:gh:`9252` by `Daniel McCloy`_) - -- Improved string representation of `~mne.Epochs` containing multiple event types; improved (and more mathematically correct) ``evoked.comment`` in the `mne.combine_evoked` output; and better (and often more concise) legend labels in the figures created via `~mne.viz.plot_compare_evokeds` (:gh:`9027` by `Richard Höchenberger`_) - -- :func:`mne.preprocessing.find_ecg_events` now correctly handles situation where no ECG activity could be detected, and correctly returns an empty array of ECG events (:gh:`9236` by `Richard Höchenberger`_) - -- Fix bug with ``picks`` attribute for `~mne.Epochs` after calling :meth:`mne.Epochs.add_channels` (:gh:`9246` by `Alex Gramfort`_) - -- Fix bug where ``backend='notebook'`` could not be used in :meth:`mne.SourceEstimate.plot` (:gh:`9305` by `Jean-Remi King`_) - -- `mne.preprocessing.compute_proj_eog` and `mne.preprocessing.compute_proj_ecg` now return empty lists if no EOG or ECG events, respectively, could be found. Previously, we'd return ``None`` in these situations, which does not match the documented behavior of returning a list of projectors (:gh:`9277` by `Richard Höchenberger`_) - -API changes -~~~~~~~~~~~ -- Introduced new ``'auto'`` settings for ``ICA.max_iter``. The old default ``max_iter=200`` will be removed in MNE-Python 0.24 (:gh:`9099` **by new contributor** |Cora Kim|_) - -- ``mne.viz.plot_sensors_connectivity`` now allows setting the colorbar label via the ``cbar_label`` parameter (:gh:`9248` by `Daniel McCloy`_) - -- ``mne.read_selection`` has been deprecated in favor of `mne.read_vectorview_selection`. ``mne.read_selection`` will be removed in MNE-Python 0.24 (:gh:`8870` by `Richard Höchenberger`_) - -- ``mne.beamformer.tf_dics`` has been deprecated and will be removed in MNE-Python 0.24 (:gh:`9122` by `Britta Westner`_) - -- Fitting `~mne.preprocessing.ICA` on baseline-corrected `~mne.Epochs`, and / or applying it on baseline-corrected `~mne.Epochs` or `~mne.Evoked` data will now display a warning. Users are advised to only baseline correct their data after cleaning is completed (:gh:`9033` by `Richard Höchenberger`_) - -- Supplying multiple channel names to `mne.preprocessing.find_eog_events` or `mne.preprocessing.compute_proj_eog` as a string of comma-separated channel names has been deprecated; please pass a list of channel names instead. Support for comma-separated strings will be removed in MNE-Python 0.24 (:gh:`9269` by `Richard Höchenberger`_) - -- The default in :func:`mne.beamformer.make_dics` of ``real_filter=False`` will change to ``real_filter=True`` in 0.24 (:gh:`9340` by `Britta Westner`_) - -Authors -~~~~~~~ - -People who contributed to this release in alphabetical order -(people with a + are first time contributors): - -* Adam Li -* Alex Rockhill -* Alexandre Gramfort -* Ana Radanovic -* Andres Rodriguez+ -* Anna Padee+ -* Apoorva Karekal+ -* Britta Westner -* Catalina Magalvan -* Christian Brodbeck -* Christian Mista -* Christian O'Reilly -* Christina Zhao -* Christopher J. Bailey -* Clemens Brunner -* Cora Kim+ -* Daniel McCloy -* Denis A. Engemann -* Dominik Welke+ -* Eduard Ort -* Enrico Varano+ -* Eric Larson -* Erica Peterson+ -* Evgenii Kalenkovich -* Felix Klotzsche+ -* Giorgio Marinato -* Guillaume Favelier -* Jack Zhang+ -* Jean-Remi King -* Johann Benerradi -* Joris Van den Bossche -* Judy D Zhu+ -* Liberty Hamilton -* Luke Bloy -* Maggie Clarke+ -* Mainak Jas -* Manorama Kadwani -* Marijn van Vliet -* Martin Schulz -* Matt Sanderson+ -* Matteo Anelli+ -* Nicolas Gensollen -* Ram Pari+ -* Richard Höchenberger -* Richard Koehler+ -* Robert Luke -* Rotem Falach+ -* Sebastien Treguer -* Silvia Cotroneo+ -* Stefan Appelhoff -* Steven Bierer -* Sumalyo Datta+ -* Timon Merk -* Tristan Stenner -* Valerii Chirkov+ -* Victoria Peterson -* Yu-Han Luo -* Zhi Zhang+ diff --git a/doc/changes/0.24.inc b/doc/changes/0.24.inc deleted file mode 100644 index 41fb4bcb3b0..00000000000 --- a/doc/changes/0.24.inc +++ /dev/null @@ -1,425 +0,0 @@ -.. NOTE: we are now using links to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - description of enhancement/bugfix/API change (:gh:`xxxx` **by new contributor** |Firstname Lastname|_) - -.. _changes_0_24_0: - -Version 0.24.0 (2021-11-03) ---------------------------- - -.. |David Julien| replace:: **David Julien** - -.. |Dominique Makowski| replace:: **Dominique Makowski** - -.. |Romain Derollepot| replace:: **Romain Derollepot** - -.. |Jan Sosulski| replace:: **Jan Sosulski** - -.. |Xiaokai Xia| replace:: **Xiaokai Xia** - -.. |Marian Dovgialo| replace:: **Marian Dovgialo** - -.. |Pierre-Antoine Bannier| replace:: **Pierre-Antoine Bannier** - -.. |Darin Erat Sleiter| replace:: **Darin Erat Sleiter** - -.. |Mathieu Scheltienne| replace:: **Mathieu Scheltienne** - -.. |Timothy Gates| replace:: **Timothy Gates** - -.. |Reza Shoorangiz| replace:: **Reza Shoorangiz** - -.. |Evgeny Goldstein| replace:: **Evgeny Goldstein** - -.. |Joshua Teves| replace:: **Joshua Teves** - -.. |Riessarius Stargardsky| replace:: **Riessarius Stargardsky** - -.. |John Samuelsson| replace:: **John Samuelsson** - -Enhancements -~~~~~~~~~~~~ -.. - Add something cool (:gh:`9192` **by new contributor** |New Contributor|_) - -- Add `pooch` to system information reports (:gh:`9801` **by new contributor** |Joshua Teves|_) - -- Get annotation descriptions from the name field of SNIRF stimulus groups when reading SNIRF files via `mne.io.read_raw_snirf` (:gh:`9575` **by new contributor** |Darin Erat Sleiter|_) - -- Add support for NIRSport and NIRSport2 devices to `mne.io.read_raw_nirx` (:gh:`9348` and :gh:`9401` **by new contributor** |David Julien|_, **new contributor** |Romain Derollepot|_, `Robert Luke`_, and `Eric Larson`_) - -- New function :func:`mne.Dipole.to_volume_labels` to get atlas label for MRI coordinates. (:gh:`9376` by **new contributor** |Marian Dovgialo|_) - -- Add support for SURE parameter selection in :func:`mne.inverse_sparse.mixed_norm` and make ``alpha`` parameter now default to ``'sure'`` (:gh:`9430` by **new contributor** |Pierre-Antoine Bannier|_ and `Alex Gramfort`_) - -- Speed up BCD solver in :func:`mne.inverse_sparse.mixed_norm` by adding Anderson acceleration (:gh:`9481` by **new contributor** |Pierre-Antoine Bannier|_ and `Alex Gramfort`_) - -- Add support for list of channel types for EEG/sEEG/ECoG/DBS referencing (:gh:`9637` **by new contributor** |Mathieu Scheltienne|_) - -- Add the ability to set point size and line width in :func:`mne.viz.plot_sensors` and related functions (:gH:`9872` **by new contributor** |Dominique Makowski|_) - -- Add support for changing the default event id used by :func:`mne.make_fixed_length_epochs` (:gh:`9782` **by new contributor** |Mathieu Scheltienne|_) - -- Improve documentation for arguments ``threshold``, ``method`` and ``measure`` of ICA methods `~mne.preprocessing.ICA.find_bads_ecg`, `~mne.preprocessing.ICA.find_bads_ref` and `~mne.preprocessing.ICA.find_bads_eog` (:gh:`9858` **by new contributor** |Mathieu Scheltienne|_) - -- Add support for more than 3 source estimates in :func:`mne.viz.plot_sparse_source_estimates` (:gh:`9640` **by new contributor** |Pierre-Antoine Bannier|_ and `Alex Gramfort`_) - -- Add cortical signal suppression (CSS) :func:`mne.preprocessing.cortical_signal_suppression` (:gh:`6888`, :gh:`9892` **by new contributor** |John Samuelsson|_ and `Eric Larson`_) - -- Show all good channel types and counts when printing a :class:`mne.Info` in the notebook (:gh:`9725` by `Valerii Chirkov`_ and `Eric Larson`_) - -- Speed up point decimation in :func:`mne.io.read_raw_kit` by vectorization and use of :class:`scipy.spatial.cKDTree` (:gh:`9568` by `Jean-Remi King`_ and `Eric Larson`_) - -- Add ability to export EDF+ files using :func:`mne.export.export_raw` (:gh:`9643` by `Adam Li`_) - -- New function :func:`mne.chpi.get_chpi_info` to retrieve basic information about the cHPI system used when recording MEG data (:gh:`9369` by `Richard Höchenberger`_) - -- New namespace `mne.export` created to contain functions (such as `mne.export.export_raw` and `mne.export.export_epochs`) for exporting data to non-FIF formats (:gh:`9427` by `Eric Larson`_) - -- Add support for Hitachi fNIRS devices in `mne.io.read_raw_hitachi` (:gh:`9391` by `Eric Larson`_) - -- Add support for ``picks`` in :func:`mne.stc_near_sensors` (:gh:`9396` by `Eric Larson`_) - -- Add label methods `mne.Label.compute_area` and `mne.Label.distances_to_outside` (:gh:`9424` by `Eric Larson`_) - -- Add projections when printing a :class:`mne.Info` in the notebook (:gh:`9403` by `Alex Gramfort`_) - -- Add new function :func:`mne.bem.make_scalp_surfaces` to compute scalp surfaces for coregistration (:gh:`9463` by `Alex Gramfort`_) - -- Add support for interpolating oxy and deoxyhaemoglobin data types (:gh:`9431` by `Robert Luke`_) - -- Add support for SNIRF files in :class:`mne.Report` (:gh:`9443` by `Robert Luke`_) - -- Add support for exporting MFF evoked files using `mne.export.export_evokeds` and `mne.export.export_evokeds_mff` (:gh:`9406` by `Evan Hathaway`_) - -- :func:`mne.concatenate_raws`, :func:`mne.concatenate_epochs`, and :func:`mne.write_evokeds` gained a new parameter ``on_mismatch``, which controls behavior in case not all of the supplied instances share the same device-to-head transformation (:gh:`9438` by `Richard Höchenberger`_) - -- Add support for multiple datablocks (acquistions with pauses) in :func:`mne.io.read_raw_nihon` (:gh:`9437` by `Federico Raimondo`_) - -- Add new function :func:`mne.preprocessing.annotate_break` to automatically detect and mark "break" periods without any marked experimental events in the continuous data (:gh:`9445` by `Richard Höchenberger`_) - -- Add "time_format" to :meth:`mne.io.Raw.plot` and :meth:`mne.preprocessing.ICA.plot_sources` to display acquisition time on x-axis (:gh:`9419` by `Martin Schulz`_) - -- Speed up :meth:`mne.decoding.TimeDelayingRidge.predict` by switching to FFT-based convolution (:gh:`9458` by `Ross Maddox`_) - -- Show multiple colors and linestyles for excluded components with :class:`mne.Evoked` in :meth:`mne.preprocessing.ICA.plot_sources` (:gh:`9444` by `Martin Schulz`_) - -- Add functions for aligning MRI and CT data `mne.transforms.compute_volume_registration` and `mne.transforms.apply_volume_registration` (:gh:`9503` by `Alex Rockhill`_ and `Eric Larson`_) - -- Add tutorial for how to processes image (CT and MR) files in order to localize electrode contacts for intracranial recordings :ref:`tut-ieeg-localize` (:gh:`9484` by `Alex Rockhill`_) - -- Add vertex number in source space for :meth:`mne.VolSourceEstimate.plot` and :func:`mne.viz.plot_volume_source_estimates` (:gh:`9697` by `Eric Larson`_) - -- Add support for colormap normalization in :func:`mne.viz.plot_topomap` (:gh:`9468` by `Clemens Brunner`_) - -- Update :func:`mne.preprocessing.realign_raw` with Numpy-recommended polynomial fitting method (:gh:`9514` by `Erica Peterson`_) - -- Add a warning to `mne.preprocessing.nirs.beer_lambert_law` when source-detector distances are zero (:gh:`9541` by `Eric Larson`_) - -- Add ``exclude`` parameter to :meth:`mne.io.Raw.plot_psd` and :meth:`mne.Epochs.plot_psd` (:gh:`9519` by `Clemens Brunner`_) - -- Add `mne.Annotations.rename` to rename annotation descriptions (:gh:`9525` by `Robert Luke`_) - -- Add `mne.Annotations.set_durations` to set annotation durations (:gh:`9529` by `Robert Luke`_) - -- The ``exclude`` parameter in `mne.io.read_raw_edf`, `mne.io.read_raw_bdf`, and `mne.io.read_raw_gdf` now also accepts a regular expression (:gh:`9558` by `Clemens Brunner`_) - -- Add :meth:`mne.Evoked.get_data` method to :class:`mne.Evoked` (:gh:`9555` by `Stefan Appelhoff`_) - -- The :meth:`mne.Epochs.get_data` method now has a ``units`` parameter (:gh:`9553` by `Stefan Appelhoff`_) - -- Add ``mne.warp_montage_volume`` to use an anatomical volume with channel locations at high or low insensities to apply a symmetric diffeomorphic registration to a template (e.g. fsaverage) space (:gh:`9544` by `Alex Rockhill`_) - -- :meth:`mne.io.Raw.get_data`, :meth:`mne.Epochs.get_data`, and :meth:`mne.Evoked.get_data` methods now have ``tmin`` and ``tmax`` parameters to specify data window in seconds to get (:gh:`9556` by `Stefan Appelhoff`_) - -- The :meth:`mne.Evoked.get_data` method now has a ``units`` parameter (:gh:`9578` by `Stefan Appelhoff`_) - -- Add `mne.chpi.compute_chpi_snr` and `mne.viz.plot_chpi_snr` for computing and plotting the time-varying SNR of continuously-active HPI coils (:gh:`9570` by `Daniel McCloy`_ and `Jussi Nurminen`_) - -- Add :func:`mne.get_montage_volume_labels` to find the regions of interest in a Freesurfer atlas anatomical segmentation for an intracranial electrode montage and :func:`mne.viz.plot_channel_labels_circle` to plot them (:gh:`9545` by `Alex Rockhill`_) - -- Add :func:`mne.viz.Brain.add_volume_labels` to plot subcortical surfaces and other regions of interest (:gh:`9540` by `Alex Rockhill`_ and `Eric Larson`_) - -- Add custom cortex curvature colors in :class:`mne.viz.Brain` via the ``cortex`` argument (:gh:`9750` by `Eric Larson`_) - -- Add :meth:`mne.channels.DigMontage.apply_trans` to apply a transform directly to a montage (:gh:`9601` by `Alex Rockhill`_) - -- :meth:`mne.preprocessing.ICA.fit` now emits a warning if any of the ``start``, ``stop``, ``reject``, and ``flat`` parameters are passed when performing ICA on `~mne.Epochs`. These parameters only have an effect on `~mne.io.Raw` data and were previously silently ignored in the case of `~mne.Epochs` (:gh:`9605` by `Richard Höchenberger`_) - -- :func:`mne.viz.plot_alignment` now emits a warning (instead of aborting with an exception) if the surfaces contain topological defects (:gh:`9614` by `Richard Höchenberger`_) - -- The coregistration GUI can now display the head surface even if there are topological defects. Previously, a low-resolution standard head unsuitable for individualized coregistration was displayed (:gh:`9614` by `Richard Höchenberger`_) - -- New :class:`mne.coreg.Coregistration` object to allow coregistration scripting without a dependency on mayavi (:gh:`9516` by `Guillaume Favelier`_). - -- Add :meth:`mne.viz.Brain.add_head` to plot the head surface (:gh:`9618` by `Alex Rockhill`_) - -- Add :func:`mne.channels.read_dig_localite` to read Localite electrode location files (:gh:`9658` by `Clemens Brunner`_) - -- Add :meth:`mne.viz.Brain.add_sensors` to plot sensor locations (:gh:`9585` by `Alex Rockhill`_) - -- Add :func:`mne.coreg.estimate_head_mri_t` to estimate the head->mri transform from fsaverage fiducials (:gh:`9585` by `Alex Rockhill`_) - -- Add :meth:`mne.channels.DigMontage.add_mni_fiducials` to add fiducials to a montage in MNI coordinates (:gh:`9682` by `Alex Rockhill`_) - -- Add :meth:`mne.channels.DigMontage.remove_fiducials` to remove fiducials under rare circumstances when the coordinate frame should not be set to "head" (:gh:`9684` by `Alex Rockhill`_) - -- Add remove methods for mesh objects in :class:`mne.viz.Brain` (:gh:`9688` by `Alex Rockhill`_) - -- Add ``--show-paths`` and ``--developer`` options to :ref:`mne sys_info` (:gh:`9789` by `Eric Larson`_) - -- Add ability to export EDF+ files using :func:`mne.export.export_raw` (:gh:`9643` by `Adam Li`_) - -- Reading EDF files via :func:`mne.io.read_raw_edf` now can infer channel type from the signal label in the EDF header (:gh:`9694` by `Adam Li`_) - -- Add :func:`mne.gui.locate_ieeg` to locate intracranial electrode contacts from a CT, an MRI (with Freesurfer ``recon-all``) and the channel names from an :class:`mne.Info` object (:gh:`9586` by `Alex Rockhill`_) - -- All data fetching code now relies on ``pooch``, which is an added optional requirement for dataset fetchers (:gh:`9742` by `Adam Li`_ and `Daniel McCloy`_) - -- Add phantom FreeSurfer subject fetcher :func:`mne.datasets.fetch_phantom` to allow fetching the Otaniemi phantom (:gh:`9796` by `Eric Larson`_) - -- Add :func:`mne.datasets.fetch_dataset` to allow packages to fetch datasets in a similar fashion to MNE (:gh:`9763` by `Adam Li`_) - -- Add full ECoG dataset to MNE-misc-data and demonstrate its use in :ref:`ex-electrode-pos-2d` and :ref:`tut-ieeg-localize` (:gh:`9784` by `Alex Rockhill`_) - -- Add options ``tol`` and ``accuracy`` to :func:`mne.fit_dipole` to control optimization (:gh:`9810` by `Eric Larson`_) - -- Completely revamp the `~mne.Report` experience: new HTML layout, many new methods, more flexibility; the functionality is demonstrated in :ref:`tut-report` (:gh:`9754`, :gh:`9828`, :gh:`9847`, :gh:`9860`, :gh:`9861`, :gh:`9862`, :gh:`9863`, :gh:`9871`, :gh:`9881`, :gh:`9891`, :gh:`9901` by `Richard Höchenberger`_, `Eric Larson`_, and `Alex Gramfort`_) - -- Add basic HTML representations of `~mne.Forward` and `~mne.minimum_norm.InverseOperator` instances for a nicer Jupyter experience (:gh:`9754` by `Richard Höchenberger`_) - -- `mne.io.read_raw_brainvision` now handles ASCII data with comma-separated values, as may be exported from BrainVision Analyzer (:gh:`9795` by `Richard Höchenberger`_) - -- Add :func:`mne.preprocessing.ieeg.project_sensors_onto_brain` to project ECoG sensors onto the pial surface to compensate for brain shift (:gh:`9800` by `Alex Rockhill`_) - -- Add support for data acquired with NIRx devices using Aurora software version 2021.9 (:gh:`9800` by `Robert Luke`_, and `Eric Larson`_) - -- All functions for reading and writing files should now automatically handle ``~`` (the tilde character) and expand it to the user's home directory. Should you come across any function that doesn't do it, please do let us know! (:gh:`9613`, :gh:`9845` by `Richard Höchenberger`_) - -- All functions accepting a FreeSurfer subjects directory via a ``subjects_dir`` parameter can now consume :py:class:`pathlib.Path` objects too (used to be only strings) (:gh:`9613` by `Richard Höchenberger`_) - -- Add support for colormap normalization in :meth:`mne.time_frequency.AverageTFR.plot` (:gh:`9851` by `Clemens Brunner`_) - -- Add support for BIDS-compatible filenames when splitting big epochs files via the new ``split_naming`` parameter in :meth:`mne.Epochs.save` (:gh:`9869` by `Denis Engemann`_) - -- Add ``by_event_type`` parameter to :meth:`mne.Epochs.average` to create a list containing an :class:`mne.Evoked` object for each event type (:gh:`9859` by `Marijn van Vliet`_) - -- Add pyqtgraph as a new backend for :meth:`mne.io.Raw.plot` (:gh:`9687` by `Martin Schulz`_) - -- Add :func:`mne.viz.set_browser_backend`, :func:`mne.viz.use_browser_backend` and :func:`mne.viz.get_browser_backend` to set matplotlib or pyqtgraph as backend for :meth:`mne.io.Raw.plot` (:gh:`9687` by `Martin Schulz`_) - -Bugs -~~~~ -- Fix bug in :meth:`mne.io.Raw.pick` and related functions when parameter list contains channels which are not in info instance (:gh:`9708` **by new contributor** |Evgeny Goldstein|_) - -- Fix a few typos (:gh:`9706` **by new contributor** |Timothy Gates|_) - -- Fix bug with :meth:`mne.Epochs.crop` and :meth:`mne.Evoked.crop` when ``include_tmax=False``, where the last sample was always cut off, even when ``tmax > epo.times[-1]`` (:gh:`9378` **by new contributor** |Jan Sosulski|_) - -- Fix bug with `mne.io.read_raw_curry` to allow reading Curry 7 and 8 event files with '.ceo' and '.cdt.ceo' extensions (:gh:`9381`, :gh:`9712` by **new contributor** |Xiaokai Xia|_, `Daniel McCloy`_, and **by new contributor** |Reza Shoorangiz|_) - -- Fix bug with `mne.compute_rank`, `mne.cov.compute_whitener` and `mne.preprocessing.ICA.fit` when explicit picks including bad channels are provided (:gh:`9719` **by new contributor** |Mathieu Scheltienne|_) - -- Fix bug with :func:`mne.concatenate_epochs` where the concatenated events could end up in non-chronological order. (:gh:`9765` **by new contributor** |Jan Sosulski|_) - -- Fix bug where :func:`mne.time_frequency.psd_array_welch` and :func:`mne.time_frequency.psd_array_multitaper` failed to handle negative integer for ``n_jobs`` argument (:gh:`9849` **by new contributor** |Riessarius Stargardsky|_) - -- Fix bug where resampling returns empty instances (:gh:`9897` **by new contributor** |Mathieu Scheltienne|_ and `Eric Larson`_) - -- Fix bug with `mne.io.Raw.get_montage` now returning both good and bad channels in the montage. (:gh:`9920` **by new contributor** |Mathieu Scheltienne|_) - -- Fix bug with :func:`mne.io.read_raw_nihon` where latin-1 annotations could not be read (:gh:`9384` by `Alex Gramfort`_) - -- Fix bug when printing a :class:`mne.io.RawArray` in the notebook (:gh:`9404` by `Alex Gramfort`_) - -- Fix bug when computing rank from info for SSS data with only gradiometers or magnetometers (:gh:`9435` by `Alex Gramfort`_) - -- Fix bug with `mne.preprocessing.find_bad_channels_maxwell` where all-flat segments could lead to an error (:gh:`9531` by `Eric Larson`_) - -- Fix bug with `mne.io.Raw.set_montage` and related functions where the channel coordinate frame was not properly set to head (:gh:`9447` by `Eric Larson`_) - -- Fix bug with `mne.io.read_raw_fieldtrip` and `mne.read_epochs_fieldtrip` where channel positions were not set properly (:gh:`9447` by `Eric Larson`_) - -- Fix bug with :func:`mne.io.read_raw_kit` where omitting HPI coils could lead to an :exc:`python:AssertionError` on reading (:gh:`9612` by `Eric Larson`_) - -- Fix bug with :func:`mne.io.read_raw_edf` where reading only stimulus channels would lead to an error (:gh:`9831` by `Eric Larson`_) - -- Fix bug with `mne.preprocessing.nirs.optical_density` where protection against zero values was not guaranteed (:gh:`9522` by `Eric Larson`_) - -- :func:`mne.concatenate_raws` now raises an exception if ``raw.info['dev_head_t']`` differs between files. This behavior can be controlled using the new ``on_mismatch`` parameter (:gh:`9438` by `Richard Höchenberger`_) - -- Fixed bug in :meth:`mne.Epochs.drop_bad` where subsequent rejections failed if they only specified thresholds for a subset of the channel types used in a previous rejection (:gh:`9485` by `Richard Höchenberger`_). - -- Fix bug with `mne.simulation.simulate_evoked`, `mne.apply_forward`, and `mne.apply_forward_raw` where systems with EEG channels that come before MEG channels would have them mixed up in the output evoked or raw object (:gh:`#9513` by `Eric Larson`_) - -- In :func:`mne.viz.plot_ica_scores` and :meth:`mne.preprocessing.ICA.plot_scores`, the figure and axis titles no longer overlap when plotting only a single EOG or ECG channel (:gh:`9489` by `Richard Höchenberger`_). - -- Ensure `mne.io.Raw.get_montage` works with SNIRF data (:gh:`9524` by `Robert Luke`_) - -- Fix bug in :func:`mne.setup_volume_source_space` where non-finite positions could be used in a discrete source space (:gh:`9603` by `Eric Larson`_) - -- Fix bug in :func:`mne.viz.plot_topomap` (and related methods like :meth:`mne.Evoked.plot_topomap`) where large distances between electrodes (higher than head radius) would lead to an error (:gh:`9528` by `Mikołaj Magnuski`_). - -- Fix bug in `mne.viz.plot_topomap` (and related methods) where passing ``axes`` that are part of a matplotlib figure that uses a constrained layout would emit warnings (:gh:`9558` by `Eric Larson`_) - -- Fix bug in :func:`mne.make_forward_solution` where impossible sensor-BEM geometries were not checked (:gh:`9893` by `Eric Larson`_) - -- Fix bug in :func:`mne.concatenate_epochs` when concatenating :class:`mne.Epochs` objects with 0 events (:gh:`9535` by `Marijn van Vliet`_) - -- Fix bug in :func:`mne.viz.Brain.screenshot` where the RGBA mode was not supported (:gh:`9564` by `Guillaume Favelier`_). - -- Fix bug in :func:`mne.io.read_raw_egi` where reading data from a data segment that is part of an acquisition skip would lead to an error (:gh:`9565` by `Eric Larson`_) - -- Prevent :meth:`mne.io.Raw.plot` windows from spawning at sizes too small to properly render (:gh:`9629` by `Daniel McCloy`_) - -- Fix bug where :meth:`mne.io.Raw.plot` raised an error when plotting raw dipole data (:gh:`9771` by `Eric Larson`_) - -- Fix bug in ::meth:`mne.preprocessing.ICA.find_bads_ecg` where passing ``start`` and ``stop`` lead to erroneous data windows depending on the combination of Raw, Epochs, Evoked, and the type (int, float, None) of ``start`` and ``stop`` (:gh:`9556` by `Stefan Appelhoff`_) - -- Fix bug in :func:`mne.viz.set_3d_backend` and :func:`mne.viz.get_3d_backend` where the PyVistaQt-based backend was ambiguously named ``'pyvista'`` instead of ``'pyvistaqt'``; use ``set_3d_backend('pyvistaqt')`` and expect ``'pyvistaqt'`` as the output of :func:`mne.viz.get_3d_backend` instead of ``'pyvista'``, and consider using ``get_3d_backend().startswith('pyvista')`` for example for backward-compatible conditionals (:gh:`9607` by `Guillaume Favelier`_) - -- Fix bug in :func:`mne.viz.plot_compare_evokeds` where confidence bands were not drawn if only one condition was plotted (:gh:`9663` by `Daniel McCloy`_) - -- Fix bug where setting of a montage with fNIRS data got set to "unknown" coordinate frame when it should have been in "head" (:gh:`9630` by `Alex Rockhill`_) - -- Fix bug where "seeg", "ecog", "dbs" and "fnirs" data had coordinate frame unknown upon loading from a file when it should have been in "head" (:gh:`9580` by `Alex Rockhill`_) - -- Raise error when no ``trans`` is provided to :func:`mne.viz.plot_alignment` when required instead of assuming identity head->mri transform (:gh:`9585` by `Alex Rockhill`_) - -- Fix bug where :meth:`mne.Epochs.equalize_event_counts` failed when only one good epoch existed for one of the event types (:gh:`9758` by `Daniel McCloy`_) - -- Fix bug where channels with a dollar sign ($) were not being labeled "misc" in :func:`mne.io.read_raw_nihon` (:gh:`9695` by `Adam Li`_) - -- Fix bug where :func:`mne.io.read_raw_persyst` was lower-casing events it found in the ``.lay`` file (:gh:`9746` by `Adam Li`_) - -- Fix bug with Qhull when plotting OPM sensors in :func:`mne.viz.plot_alignment` (:gh:`9799` and :gh:`9804` by `Eric Larson`_) - -- Fix bug where :func:`mne.io.read_raw_snirf` was including the landmark index as a spatial coordinate (:gh:`9777` by `Robert luke`_) - -- Fix bug where `mne.Annotations` were not appending channel names when being added together (:gh:`9780` by `Adam Li`_) - -- `mne.viz.plot_evoked_joint` now correctly handles a ``times='peaks'`` parameter value (:gh:`9754` by `Richard Höchenberger`_) - -- In `mne.viz.plot_compare_evokeds`, slightly adjust the x-axis limit calculation to avoid drawing an axis that is too short (:gh:`9754` by `Richard Höchenberger`_) - -- Fix a bug with :func:`mne.viz.snapshot_brain_montage` with the Mayavi backend where electrodes were shown in the wrong positions, and with the PyVista backend where the snapshot always contained the electrodes but could be missing a hemisphere (:gh:`9933` by `Eric Larson`_) - -- In :meth:`mne.io.Raw.plot_psd` and :meth:`mne.Epochs.plot_psd`, avoid overlap between subplot titles and x-axis tick labels (:gh:`9864` by `Richard Höchenberger`_) - -- In :meth:`mne.Epochs.plot_psd_topomap`, the data is now scaled to match the output of :meth:`mne.Epochs.plot_psd` (:gh:`9873` by `Richard Höchenberger`_) - -- Fix channel locations with ``NaN`` values causing all channel locations not to be plotted in :func:`mne.viz.Brain.add_sensors` (:gh:`9911` by `Alex Rockhill`_) - -- Fix infinite loop bug in :func:`mne.get_montage_volume_labels` (:gh:`9940` by `Alex Rockhill`_) - -API changes -~~~~~~~~~~~ -- The :class:`mne.Info` class is now more strictly checked when attributes are set. Setting most attributes directly will lead to an error in the next release. See the docstring of :class:`mne.Info` for more information (:gh:`9867` **by new contributor** |Mathieu Scheltienne|_) - -- The Mayavi 3D backend has been deprecated (:gh:`9904` by `Eric Larson`_): - - - In 3D plotting functions, use PyVista-based backends (``'pyvistaqt'`` and ``'notebook'``) as PySurfer/Mayavi will be removed in the next version - - - To use the :ref:`mne kit2fiff` command-line GUI for fiducial markers, install the separate ``mne-kit-gui`` module, which can still be launched via ``$ mne kit2fiff`` - - - The ``mne.gui.fiducials`` GUI has been deprecated, use :ref:`mne coreg` or ``mne_kit_gui.fiducials()`` to set MRI fiducials instead - -- In `mne.compute_source_morph`, the ``niter_affine`` and ``niter_sdr`` parameters have been replaced by ``niter`` and ``pipeline`` parameters for more consistent and finer-grained control of registration/warping steps and iteration (:gh:`9505` by `Alex Rockhill`_ and `Eric Larson`_) - -- Split :func:`mne.viz.Brain.show_view` argument ``view`` into ``azimuth``, ``elevation`` and ``focalpoint`` for clearer view setting and make the default for ``row`` and ``col`` apply to all rows and columns (:gh:`9596` by `Alex Rockhill`_) - -- Deprecate ``solver='prox'`` in :func:`mne.inverse_sparse.mixed_norm` in favor of ``solver='cd'`` and ``solver='bcd'`` as coordinate descent solvers consistently outperform proximal gradient descent (:gh:`9608` by `Pierre-Antoine Bannier`_) - -- All ``mne.connectivity`` functions have moved to the ``mne-connectivity`` package; they are deprecated in MNE-Python and will be removed in version 0.25 (:gh:`9493` by `Adam Li`_). - -- :func:`mne.inverse_sparse.mixed_norm` now simply warns when source estimates contain no dipole, e.g. if data are too noisy and alpha is based on SURE (:gh:`9685` by `Alex Gramfort`_) - -- Deprecate functions :samp:`mne.datasets.{DATASET_NAME}.has_{DATASET_NAME}_data()` for these datasets: ``epilepsy_ecog``, ``fnirs_motor``, ``multimodal``, ``opm``, ``phantom_4dbti``, ``refmeg_noise``, ``sample``, ``somato``, and ``ssvep``. Use the generic :func:`mne.datasets.has_dataset` instead (:gh:`9781` by `Daniel McCloy`_ and `Adam Li`_) - -- :class:`mne.Report` modernization has led to multiple deprecations (:gh:`9754` by `Richard Höchenberger`_): - - - ``mne.Report.add_figs_to_section`` and ``mne.Report.add_slider_to_section`` have been deprecated in favor of :meth:`mne.Report.add_figure` - - - ``mne.Report.add_images_to_section`` has been deprecated in favor of :meth:`mne.Report.add_image` - - - ``mne.Report.add_bem_to_section`` has been deprecated in favor of :meth:`mne.Report.add_bem` - - - ``mne.Report.add_htmls_to_section`` has been deprecated in favor of :meth:`mne.Report.add_html` - - - In :meth:`mne.Report.parse_folder`, the ``sort_sections`` parameter has been deprecated in favor of ``sort_content`` - - - In :meth:`mne.Report.remove`, the ``caption`` and ``section`` parameters have been deprecated in favor of ``title`` and ``tags``, respectively - - - The ``mne.Report.sections`` attribute has been deprecated in favor of ``mne.Report.tags`` - - - The ``mne.Report.fnames`` attribute has been deprecated without replacement - -- :meth:`mne.Epochs.plot` and :func:`mne.viz.plot_epochs` gained parameter ``show_scalebars`` analogous to :meth:`mne.viz.plot_raw` (:gh:`9815` by `Daniel McCloy`_) - -- The output folder name for HF_SEF datasets is now ``hf_sef`` instead of ``HF_SEF`` (:gh:`9763` by `Adam Li`_) - -- Deprecate ``mne.viz.utils.center_cmap`` (:gh:`9851` by `Clemens Brunner`_) - -- The default partial pathlength factor of :func:`mne.preprocessing.nirs.beer_lambert_law` will change from 0.1 in 0.24 to 6.0 in the next release (:gh:`9843` by `Robert Luke`_) - -- ``mne.preprocessing.ICA.detect_artifacts`` has been deprecated. Please use `~mne.preprocessing.ICA.find_bads_eog` and `~mne.preprocessing.ICA.find_bads_ecg` instead (:gh:`9909` by `Richard Höchenberger`_) - -Authors -~~~~~~~ -People who contributed to this release in alphabetical order -(people with a + are first time contributors): - -* Adam Li -* Alex Rockhill -* Alexandre Gramfort -* Britta Westner -* Clemens Brunner -* Daniel McCloy -* Darin Erat Sleiter+ -* David Julien+ -* Denis A. Engemann -* Dominique Makowski+ -* Eric Larson -* Erica Peterson -* Evan Hathaway -* Evgeny Goldstein+ -* Fede Raimondo -* Gansheng Tan+ -* Guillaume Favelier -* Hubert Banville -* Jan Sosulski+ -* Jean-Remi King -* Jeff Stout -* Johann Benerradi -* John Samuelsson+ -* Josh Koen+ -* Joshua Teves+ -* Lukas Hecker+ -* Luke Bloy -* Marian Dovgialo+ -* Marijn van Vliet -* Martin Schulz -* Mathieu Scheltienne+ -* Mikołaj Magnuski -* Pierre-Antoine Bannier+ -* Reza Shoorangiz+ -* Richard Höchenberger -* Riessarius Stargardsky+ -* Robert Luke -* Romain Derollepot+ -* Ross Maddox -* Ryan Law+ -* Stefan Appelhoff -* Steve Matindi+ -* Timothy Gates+ -* Valerii Chirkov -* Xiaokai Xia+ diff --git a/doc/changes/0.8.inc b/doc/changes/0.8.inc deleted file mode 100644 index 62d3b17d404..00000000000 --- a/doc/changes/0.8.inc +++ /dev/null @@ -1,194 +0,0 @@ -.. _changes_0_8: - -Version 0.8 (2014-06-25) ------------------------- - -Changelog -~~~~~~~~~ - -- Add Python3 support by `Nick Ward`_, `Alex Gramfort`_, `Denis Engemann`_, and `Eric Larson`_ - -- Add ``get_peak`` method for evoked and stc objects by `Denis Engemann`_ - -- Add ``iter_topography`` function for radically simplified custom sensor topography plotting by `Denis Engemann`_ - -- Add field line interpolation by `Eric Larson`_ - -- Add full provenance tacking for epochs and improve ``drop_log`` by `Tal Linzen`_, `Alex Gramfort`_ and `Denis Engemann`_ - -- Add systematic contains method to ``Raw``, ``Epochs`` and ``Evoked`` for channel type membership testing by `Denis Engemann`_ - -- Add fiff unicode writing and reading support by `Denis Engemann`_ - -- Add 3D MEG/EEG field plotting function and evoked method by `Denis Engemann`_ and `Alex Gramfort`_ - -- Add consistent channel-dropping methods to ``Raw``, ``Epochs`` and ``Evoked`` by `Denis Engemann`_ and `Alex Gramfort`_ - -- Add ``equalize_channnels`` function to set common channels for a list of ``Raw``, ``Epochs``, or ``Evoked`` objects by `Denis Engemann`_ - -- Add ``plot_events`` function to visually display paradigm by `Alex Gramfort`_ - -- Improved connectivity circle plot by `Martin Luessi`_ - -- Add ability to anonymize measurement info by `Eric Larson`_ - -- Add callback to connectivity circle plot to isolate connections to clicked nodes `Roan LaPlante`_ - -- Add ability to add patch information to source spaces by `Eric Larson`_ - -- Add ``split_label`` function to divide labels into multiple parts by `Christian Brodbeck`_ - -- Add ``color`` attribute to ``Label`` objects by `Christian Brodbeck`_ - -- Add ``max`` mode for ``extract_label_time_course`` by `Mads Jensen`_ - -- Add ``rename_channels`` function to change channel names and types in info object by `Dan Wakeman`_ and `Denis Engemann`_ - -- Add ``compute_ems`` function to extract the time course of experimental effects by `Denis Engemann`_, `Sébastien Marti`_ and `Alex Gramfort`_ - -- Add option to expand Labels defined in a source space to the original surface (``Label.fill()``) by `Christian Brodbeck`_ - -- GUIs can be invoked form the command line using ``$ mne coreg`` and ``$ mne kit2fiff`` by `Christian Brodbeck`_ - -- Add ``add_channels_epochs`` function to combine different recordings at the Epochs level by `Christian Brodbeck`_ and `Denis Engemann`_ - -- Add support for EGI Netstation simple binary files by `Denis Engemann`_ - -- Add support for treating arbitrary data (numpy ndarray) as a Raw instance by `Eric Larson`_ - -- Support for parsing the EDF+ annotation channel by `Martin Billinger`_ - -- Add EpochsArray constructor for creating epochs from numpy arrays by `Denis Engemann`_ and `Federico Raimondo`_ - -- Add connector to FieldTrip realtime client by `Mainak Jas`_ - -- Add color and event_id with legend options in plot_events in viz.py by `Cathy Nangini`_ - -- Add ``events_list`` parameter to ``mne.concatenate_raws`` to concatenate events corresponding to runs by `Denis Engemann`_ - -- Add ``read_ch_connectivity`` function to read FieldTrip neighbor template .mat files and obtain sensor adjacency matrices by `Denis Engemann`_ - -- Add display of head in helmet from -trans.fif file to check coregistration quality by `Mainak Jas`_ - -- Add ``raw.add_events`` to allow adding events to a raw file by `Eric Larson`_ - -- Add ``plot_image`` method to Evoked object to display data as images by `Jean-Remi King`_ and `Alex Gramfort`_ and `Denis Engemann`_ - -- Add BCI demo with CSP on motor imagery by `Martin Billinger`_ - -- New ICA API with unified methods for processing ``Raw``, ``Epochs`` and ``Evoked`` objects by `Denis Engemann`_ - -- Apply ICA at the evoked stage by `Denis Engemann`_ - -- New ICA methods for visualizing unmixing quality, artifact detection and rejection by `Denis Engemann`_ - -- Add ``pick_channels`` and ``drop_channels`` mixin class to pick and drop channels from ``Raw``, ``Epochs``, and ``Evoked`` objects by `Andrew Dykstra`_ and `Denis Engemann`_ - -- Add ``EvokedArray`` class to create an Evoked object from an array by `Andrew Dykstra`_ - -- Add ``plot_bem`` method to visualize BEM contours on MRI anatomical images by `Mainak Jas`_ and `Alex Gramfort`_ - -- Add automated ECG detection using cross-trial phase statistics by `Denis Engemann`_ and `Juergen Dammers`_ - -- Add Forward class to succintly display gain matrix info by `Andrew Dykstra`_ - -- Add reading and writing of split raw files by `Martin Luessi`_ - -- Add OLS regression function by `Tal Linzen`_, `Teon Brooks`_ and `Denis Engemann`_ - -- Add computation of point spread and cross-talk functions for MNE type solutions by `Alex Gramfort`_ and `Olaf Hauk`_ - -- Add mask parameter to ``plot_evoked_topomap`` and ``evoked.plot_topomap`` by `Denis Engemann`_ and `Alex Gramfort`_ - -- Add infomax and extended infomax ICA by `Denis Engemann`_, `Juergen Dammers`_ and `Lukas Breuer`_ and `Federico Raimondo`_ - -- Aesthetically redesign interpolated topography plots by `Denis Engemann`_ and `Alex Gramfort`_ - -- Simplify sensor space time-frequency analysis API with ``tfr_morlet`` function by `Alex Gramfort`_ and `Denis Engemann`_ - -- Add new somatosensory MEG dataset with nice time-frequency content by `Alex Gramfort`_ - -- Add HDF5 write/read support for SourceEstimates by `Eric Larson`_ - -- Add InverseOperator class to display inverse operator info by `Mainak Jas`_ - -- Add ``$ mne report`` command to generate html reports of MEG/EEG data analysis pipelines by `Mainak Jas`_, `Alex Gramfort`_ and `Denis Engemann`_ - -- Improve ICA verbosity with regard to rank reduction by `Denis Engemann`_ - -BUG -~~~ - -- Fix incorrect ``times`` attribute when stc was computed using ``apply_inverse`` after decimation at epochs stage for certain, arbitrary sample frequencies by `Denis Engemann`_ - -- Fix corner case error for step-down-in-jumps permutation test (when step-down threshold was high enough to include all clusters) by `Eric Larson`_ - -- Fix selection of total number of components via float when picking ICA sources by `Denis Engemann`_ and `Qunxi Dong`_ - -- Fix writing and reading transforms after modification in measurement info by `Denis Engemann`_ and `Martin Luessi`_ and `Eric Larson`_ - -- Fix pre-whitening / rescaling when estimating ICA on multiple channels without covariance by `Denis Engemann`_ - -- Fix ICA pre-whitening, avoid recomputation when applying ICA to new data by `Denis Engemann`_ - -API -~~~ - -- The minimum numpy version has been increased to 1.6 from 1.4. - -- Epochs object now has a selection attribute to track provenance of selected Epochs. The length of the drop_log attribute is now the same as the length of the original events passed to Epochs. In earlier versions it had the length of the events filtered by event_id. Epochs has also now a plot_drop_log method. - -- Deprecate Epochs.drop_picks in favor of a new method called drop_channels - -- Deprecate ``labels_from_parc`` and ``parc_from_labels`` in favor of ``read_labels_from_annot`` and ``write_labels_to_annot`` - -- The default of the new add_dist option of ``setup_source_space`` to add patch information will change from False to True in MNE-Python 0.9 - -- Deprecate ``read_evoked`` and ``write_evoked`` in favor of ``read_evokeds`` and ``write_evokeds``. read_evokeds will return all `Evoked` instances in a file by default. - -- Deprecate ``setno`` in favor of ``condition`` in the initialization of an Evoked instance. This affects ``mne.fiff.Evoked`` and ``read_evokeds``, but not ``read_evoked``. - -- Deprecate ``mne.fiff`` module, use ``mne.io`` instead e.g. ``mne.io.Raw`` instead of ``mne.fiff.Raw``. - -- Pick functions (e.g., ``pick_types``) are now in the mne namespace (e.g. use ``mne.pick_types``). - -- Deprecated ICA methods specific to one container type. Use ICA.fit, ICA.get_sources ICA.apply and ``ICA.plot_*`` for processing Raw, Epochs and Evoked objects. - -- The default smoothing method for ``mne.stc_to_label`` will change in v0.9, and the old method is deprecated. - -- As default, for ICA the maximum number of PCA components equals the number of channels passed. The number of PCA components used to reconstruct the sensor space signals now defaults to the maximum number of PCA components estimated. - -Authors -~~~~~~~ - -The committer list for this release is the following (preceded by number of commits): - -* 418 Denis A. Engemann -* 284 Alexandre Gramfort -* 242 Eric Larson -* 155 Christian Brodbeck -* 144 Mainak Jas -* 49 Martin Billinger -* 49 Andrew Dykstra -* 44 Tal Linzen -* 37 Dan G. Wakeman -* 36 Martin Luessi -* 26 Teon Brooks -* 20 Cathy Nangini -* 15 Hari Bharadwaj -* 15 Roman Goj -* 10 Ross Maddox -* 9 Marmaduke Woodman -* 8 Praveen Sripad -* 8 Tanay -* 8 Roan LaPlante -* 5 Saket Choudhary -* 4 Nick Ward -* 4 Mads Jensen -* 3 Olaf Hauk -* 3 Brad Buran -* 2 Daniel Strohmeier -* 2 Federico Raimondo -* 2 Alan Leggitt -* 1 Jean-Remi King -* 1 Matti Hämäläinen diff --git a/doc/changes/0.9.inc b/doc/changes/0.9.inc deleted file mode 100644 index 0d8cc565472..00000000000 --- a/doc/changes/0.9.inc +++ /dev/null @@ -1,239 +0,0 @@ -.. _changes_0_9: - -Version 0.9 (2015-05-22) ------------------------- - -Changelog -~~~~~~~~~ - -- Add support for mayavi figures in ``add_section`` method in Report by `Mainak Jas`_ - -- Add extract volumes of interest from freesurfer segmentation and setup as volume source space by `Alan Leggitt`_ - -- Add support to combine source spaces of different types by `Alan Leggitt`_ - -- Add support for source estimate for mixed source spaces by `Alan Leggitt`_ - -- Add ``SourceSpaces.save_as_volume`` method by `Alan Leggitt`_ - -- Automatically compute proper box sizes when generating layouts on the fly by `Marijn van Vliet`_ - -- Average evoked topographies across time points by `Denis Engemann`_ - -- Add option to Report class to save images as vector graphics (SVG) by `Denis Engemann`_ - -- Add events count to ``mne.viz.plot_events`` by `Denis Engemann`_ - -- Add support for stereotactic EEG (sEEG) channel type by `Marmaduke Woodman`_ - -- Add support for montage files by `Denis Engemann`_, `Marijn van Vliet`_, `Jona Sassenhagen`_, `Alex Gramfort`_ and `Teon Brooks`_ - -- Add support for spatiotemporal permutation clustering on sensors by `Denis Engemann`_ - -- Add support for multitaper time-frequency analysis by `Hari Bharadwaj`_ - -- Add Stockwell (S) transform for time-frequency representations by `Denis Engemann`_ and `Alex Gramfort`_ - -- Add reading and writing support for time frequency data (AverageTFR objects) by `Denis Engemann`_ - -- Add reading and writing support for digitizer data, and function for adding dig points to info by `Teon Brooks`_ - -- Add ``plot_projs_topomap`` method to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Teon Brooks`_ - -- Add EEG (based on spherical splines) and MEG (based on field interpolation) bad channel interpolation method to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Denis Engemann`_ and `Mainak Jas`_ - -- Add parameter to ``whiten_evoked``, ``compute_whitener`` and ``prepare_noise_cov`` to set the exact rank by `Martin Luessi`_ and `Denis Engemann`_ - -- Add fiff I/O for processing history and MaxFilter info by `Denis Engemann`_ and `Eric Larson`_ - -- Add automated regularization with support for multiple sensor types to ``compute_covariance`` by `Denis Engemann`_ and `Alex Gramfort`_ - -- Add ``Evoked.plot_white`` method to diagnose the quality of the estimated noise covariance and its impact on spatial whitening by `Denis Engemann`_ and `Alex Gramfort`_ - -- Add ``mne.evoked.grand_average`` function to compute grand average of Evoked data while interpolating bad EEG channels if necessary by `Mads Jensen`_ and `Alex Gramfort`_ - -- Improve EEG referencing support and add support for bipolar referencing by `Marijn van Vliet`_ and `Alex Gramfort`_ - -- Enable TFR calculation on Evoked objects by `Eric Larson`_ - -- Add support for combining Evoked datasets with arbitrary weights (e.g., for oddball paradigms) by `Eric Larson`_ and `Alex Gramfort`_ - -- Add support for concatenating a list of Epochs objects by `Denis Engemann`_ - -- Labels support subtraction (``label_1 - label_2``) by `Christian Brodbeck`_ - -- Add GeneralizationAcrossTime object with support for cross-condition generalization by `Jean-Remi King`_ and `Denis Engemann`_ - -- Add support for single dipole fitting by `Eric Larson`_ - -- Add support for spherical models in forward calculations by `Eric Larson`_ - -- Add support for SNR estimation by `Eric Larson`_ - -- Add support for Savitsky-Golay filtering of Evoked and Epochs by `Eric Larson`_ - -- Add support for adding an empty reference channel to data by `Teon Brooks`_ - -- Add reader function ``mne.io.read_raw_fif`` for Raw FIF files by `Teon Brooks`_ - -- Add example of creating MNE objects from arbitrary data and NEO files by `Jaakko Leppakangas`_ - -- Add ``plot_psd`` and ``plot_psd_topomap`` methods to epochs by `Yousra Bekhti`_, `Eric Larson`_ and `Denis Engemann`_ - -- ``evoked.pick_types``, ``epochs.pick_types``, and ``tfr.pick_types`` added by `Eric Larson`_ - -- ``rename_channels`` and ``set_channel_types`` added as methods to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Teon Brooks`_ - -- Add RAP-MUSIC inverse method by `Yousra Bekhti`_ and `Alex Gramfort`_ - -- Add ``evoked.as_type`` to allow remapping data in MEG channels to virtual magnetometer or gradiometer channels by `Mainak Jas`_ - -- Add ``mne.Report.add_bem_to_section``, ``mne.Report.add_htmls_to_section`` methods to :class:`mne.Report` by `Teon Brooks`_ - -- Add support for KIT epochs files with ``read_epochs_kit`` by `Teon Brooks`_ - -- Add whitening plots for evokeds to ``mne.Report`` by `Mainak Jas`_ - -- Add ``DigMontage`` class and reader to interface with digitization info by `Teon Brooks`_ and `Christian Brodbeck`_ - -- Add ``set_montage`` method to the ``Raw``, ``Epochs``, and ``Evoked`` objects by `Teon Brooks`_ and `Denis Engemann`_ - -- Add support for capturing sensor positions when clicking on an image by `Chris Holdgraf`_ - -- Add support for custom sensor positions when creating Layout objects by `Chris Holdgraf`_ - -BUG -~~~ - -- Fix energy conservation for STFT with tight frames by `Daniel Strohmeier`_ - -- Fix incorrect data matrix when tfr was plotted with parameters ``tmin``, ``tmax``, ``fmin`` and ``fmax`` by `Mainak Jas`_ - -- Fix channel names in topomaps by `Alex Gramfort`_ - -- Fix mapping of ``l_trans_bandwidth`` (to low frequency) and ``h_trans_bandwidth`` (to high frequency) in ``_BaseRaw.filter`` by `Denis Engemann`_ - -- Fix scaling source spaces when distances have to be recomputed by `Christian Brodbeck`_ - -- Fix repeated samples in client to FieldTrip buffer by `Mainak Jas`_ and `Federico Raimondo`_ - -- Fix highpass and lowpass units read from Brainvision vhdr files by `Alex Gramfort`_ - -- Add missing attributes for BrainVision and KIT systems needed for resample by `Teon Brooks`_ - -- Fix file extensions of SSP projection files written by mne commands (from _proj.fif to -prof.fif) by `Alex Gramfort`_ - -- Generating EEG layouts no longer requires digitization points by `Marijn van Vliet`_ - -- Add missing attributes to BTI, KIT, and BrainVision by `Eric Larson`_ - -- The API change to the edf, brainvision, and egi break backwards compatibility for when importing eeg data by `Teon Brooks`_ - -- Fix bug in ``mne.viz.plot_topo`` if ylim was passed for single sensor layouts by `Denis Engemann`_ - -- Average reference projections will no longer by automatically added after applying a custom EEG reference by `Marijn van Vliet`_ - -- Fix picks argument to filter in n dimensions (affects FilterEstimator), and highpass filter in FilterEstimator by `Mainak Jas`_ - -- Fix beamformer code LCMV/DICS for CTF data with reference channels by `Denis Engemann`_ and `Alex Gramfort`_ - -- Fix scalings for bad EEG channels in ``mne.viz.plot_topo`` by `Marijn van Vliet`_ - -- Fix EGI reading when no events are present by `Federico Raimondo`_ - -- Add functionality to determine plot limits automatically or by data percentiles by `Mark Wronkiewicz`_ - -- Fix bug in mne.io.edf where the channel offsets were omitted in the voltage calculations by `Teon Brooks`_ - -- Decouple section ordering in command-line from python interface for mne-report by `Mainak Jas`_ - -- Fix bug with ICA resetting by `Denis Engemann`_ - -API -~~~ - -- apply_inverse functions have a new boolean parameter ``prepared`` which saves computation time by calling ``prepare_inverse_operator`` only if it is False - -- find_events and read_events functions have a new parameter ``mask`` to set some bits to a don't care state by `Teon Brooks`_ - -- New channels module including layouts, electrode montages, and neighbor definitions of sensors which deprecates ``mne.layouts`` by `Denis Engemann`_ - -- ``read_raw_brainvision``, ``read_raw_edf``, ``read_raw_egi`` all use a standard montage import by `Teon Brooks`_ - -- Fix missing calibration factors for ``mne.io.egi.read_raw_egi`` by `Denis Engemann`_ and `Federico Raimondo`_ - -- Allow multiple filename patterns as a list (e.g., \*raw.fif and \*-eve.fif) to be parsed by mne report in ``Report.parse_folder()`` by `Mainak Jas`_ - -- ``read_hsp``, ``read_elp``, and ``write_hsp``, ``write_mrk`` were removed and made private by `Teon Brooks`_ - -- When computing the noise covariance or MNE inverse solutions, the rank is estimated empirically using more sensitive thresholds, which stabilizes results by `Denis Engemann`_ and `Eric Larson`_ and `Alex Gramfort`_ - -- Raw FIFF files can be preloaded after class instantiation using ``raw.preload_data()`` - -- Add ``label`` parameter to ``apply_inverse`` by `Teon Brooks`_ - -- Deprecated ``label_time_courses`` for ``in_label`` method in `SourceEstimate` by `Teon Brooks`_ - -- Deprecated ``as_data_frame`` for ``to_data_frame`` by `Chris Holdgraf`_ - -- Add ``transform``, ``unit`` parameters to ``read_montage`` by `Teon Brooks`_ - -- Deprecated ``fmin, fmid, fmax`` in stc.plot and added ``clim`` by `Mark Wronkiewicz`_ - -- Use ``scipy.signal.welch`` instead of matplotlib.psd inside ``compute_raw_psd`` and ``compute_epochs_psd`` by `Yousra Bekhti`_ `Eric Larson`_ and `Denis Engemann`_. As a consequence, ``Raw.plot_raw_psds`` has been deprecated. - -- ``Raw`` instances returned by ``mne.forward.apply_forward_raw`` now always have times starting from - zero to be consistent with all other ``Raw`` instances. To get the former ``start`` and ``stop`` times, - use ``raw.first_samp / raw.info['sfreq']`` and ``raw.last_samp / raw.info['sfreq']``. - -- ``pick_types_evoked`` has been deprecated in favor of ``evoked.pick_types``. - -- Deprecated changing the sensor type of channels in ``rename_channels`` by `Teon Brooks`_ - -- CUDA is no longer initialized at module import, but only when first used. - -- ``add_figs_to_section`` and ``add_images_to_section`` now have a ``textbox`` parameter to add comments to the image by `Teon Brooks`_ - -- Deprecated ``iir_filter_raw`` for ``fit_iir_model_raw``. - -- Add ``montage`` parameter to the ``create_info`` function to create the info using montages by `Teon Brooks`_ - -Authors -~~~~~~~ - -The committer list for this release is the following (preceded by number of commits): - -* 515 Eric Larson -* 343 Denis A. Engemann -* 304 Alexandre Gramfort -* 300 Teon Brooks -* 142 Mainak Jas -* 119 Jean-Remi King -* 77 Alan Leggitt -* 75 Marijn van Vliet -* 63 Chris Holdgraf -* 57 Yousra Bekhti -* 49 Mark Wronkiewicz -* 44 Christian Brodbeck -* 30 Jona Sassenhagen -* 29 Hari Bharadwaj -* 27 Clément Moutard -* 24 Ingoo Lee -* 18 Marmaduke Woodman -* 16 Martin Luessi -* 10 Jaakko Leppakangas -* 9 Andrew Dykstra -* 9 Daniel Strohmeier -* 7 kjs -* 6 Dan G. Wakeman -* 5 Federico Raimondo -* 3 Basile Pinsard -* 3 Christoph Dinh -* 3 Hafeza Anevar -* 2 Martin Billinger -* 2 Roan LaPlante -* 1 Manoj Kumar -* 1 Matt Tucker -* 1 Romain Trachel -* 1 mads jensen -* 1 sviter diff --git a/doc/changes/1.0.inc b/doc/changes/1.0.inc deleted file mode 100644 index 0bfca8f5d97..00000000000 --- a/doc/changes/1.0.inc +++ /dev/null @@ -1,325 +0,0 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc - -.. _changes_1_0_0: - -Version 1.0.0 (2022-03-21) --------------------------- - -Enhancements -~~~~~~~~~~~~ - -- Add ``use_orig_time`` option to :meth:`mne.Annotations.crop`. (:gh:`10396` by :newcontrib:`Michiru Kaneda`) - -- Speed up :func:`mne.preprocessing.annotate_muscle_zscore`, :func:`mne.preprocessing.annotate_movement`, and :func:`mne.preprocessing.annotate_nan` through better annotation creation (:gh:`10089` by :newcontrib:`Senwen Deng`) - -- Fix some unused variables in time_frequency_erds.py example (:gh:`10076` by :newcontrib:`Jan Zerfowski`) - -- Document ``BAD_ACQ_SKIP`` annotation behavior (:gh:`10054` by :newcontrib:`Etienne de Montalivet`) - -- Add link to related ``MNE-ARI`` package (:gh:`10174` by :newcontrib:`John Veillette`) - -- :func:`mne.time_frequency.psd_array_multitaper` can now return complex results per-taper when specifying ``output='complex'`` (:gh:`10307` by `Mikołaj Magnuski`_) - -- :func:`mne.time_frequency.tfr_array_multitaper` can now return results for ``output='phase'`` instead of an error (:gh:`10281` by `Mikołaj Magnuski`_) - -- Add show local maxima toggling button to :func:`mne.gui.locate_ieeg` (:gh:`9952` by `Alex Rockhill`_) - -- Show boundaries in :func:`mne.gui.locate_ieeg` (:gh:`10379` by `Eric Larson`_) - -- Add argument ``cval`` to :func:`mne.transforms.apply_volume_registration` to set interpolation values outside the image domain (:gh:`10379` by `Eric Larson`_) - -- Improve docstring of :class:`mne.Info` and add attributes that were not covered (:gh:`9922` by `Mathieu Scheltienne`_) - -- Add an alternate, manual procedure for aligning a CT to an MR procedure to :ref:`tut-ieeg-localize` (:gh:`9978` by `Alex Rockhill`_) - -- Improve docstring of export functions :func:`mne.export.export_raw`, :func:`mne.export.export_epochs`, :func:`mne.export.export_evokeds`, :func:`mne.export.export_evokeds_mff` and issue a warning when there are unapplied projectors (:gh:`9994` by `Mathieu Scheltienne`_) - -- Add support for reading haemoglobin fNIRS data to :func:`mne.io.read_raw_snirf` (:gh:`9929` by `Robert Luke`_) - -- Add ``fill_hole_size`` keyword argument to :func:`mne.viz.Brain.add_volume_labels` to close holes in the mesh (:gh:`10024` by `Alex Rockhill`_) - -- Changed :class:`mne.Epochs` and :class:`mne.Evoked` to have a more concise ``__repr__`` to improve interactive MNE usage in Python Interactive Console, IDEs, and debuggers when many events are handled. (:gh:`10042` by `Jan Sosulski`_) - -- Improve docstring of ``events`` arguments and cross-referencing to :term:`events` (:gh:`10056` by `Mathieu Scheltienne`_) - -- Speed up repeated surface-smoothing operation (e.g., in repeated calls to :meth:`stc.plot() `) (:gh:`10077` by `Eric Larson`_) - -- Add ``verbose`` parameter to :func:`mne.io.Raw.load_bad_channels` and log information on how bad channels are updated (:gh:`10102` by `Stefan Appelhoff`_) - -- Add ``infer_type`` argument to :func:`mne.io.read_raw_edf` and :func:`mne.io.read_raw_bdf` to automatically infer channel types from channel labels (:gh:`10058` by `Clemens Brunner`_) - -- Reduce the time it takes to generate a :class:`mne.io.Raw`, :class:`~mne.Epochs`, or :class:`~mne.preprocessing.ICA` figure if a ``scalings`` parameter is provided (:gh:`10109` by `Richard Höchenberger`_ and `Eric Larson`_) - -- :meth:`mne.Report.add_raw` gained a new ``scalings`` parameter to provide custom data scalings for the butterfly plots (:gh:`10109` by `Richard Höchenberger`_) - -- The ``butterfly`` parameter of :meth:`mne.Report.add_raw` now also accepts numbers to specify how many segments to plot (:gh:`10115` by `Richard Höchenberger`_) - -- The ``psd`` parameter of :meth:`mne.Report.add_epochs` now also accepts numbers to specify the signal duration used for PSD calculation (:gh:`10119` by `Richard Höchenberger`_) - -- Drastically speed up butterfly plot generation in :meth:`mne.Report.add_raw`. We now don't plot annotations anymore; however, we feel that the speed improvements justify this change, also considering the annotations were of limited use in the displayed one-second time slices anyway (:gh:`10114`, :gh:`10116` by `Richard Höchenberger`_) - -- In :class:`mne.Report`, limit the width of automatically generated figures to a maximum of 850 pixels (450 pixels for :class:`mne.SourceEstimate` plots), and the resolution to 100 DPI to reduce file size, memory consumption, and – in some cases like :meth:`mne.Report.add_stc` – processing time (:gh:`10126`, :gh:`10129`, :gh:`10135`, :gh:`10142` by `Richard Höchenberger`_) - -- :class:`~mne.Epochs` metadata tables are now included in :class:`mne.Report` (:gh:`10166` by `Richard Höchenberger`_) - -- :func:`mne.get_head_surf` and :func:`mne.dig_mri_distances` gained a new parameter, ``on_defects``, controlling how to handle surfaces with topological defects (:gh:`10175` by `Richard Höchenberger`_) - -- :meth:`mne.Report.add_epochs` gained a new parameter, ``drop_log_ignore``, to control which drop reasons to omit when creating the drop log plot (:gh:`10182` by `Richard Höchenberger`_) - -- :meth:`mne.Epochs.plot_drop_log` now also includes the absolute number of epochs dropped in the title (:gh:`10186` by `Richard Höchenberger`_) - -- Add a button to show the maximum intensity projection in :func:`mne.gui.locate_ieeg` (:gh:`10185` by `Alex Rockhill`_) - -- Annotations from a :class:`~mne.io.Raw` object are now preserved by the :class:`~mne.Epochs` constructor and are supported when saving Epochs (:gh:`9969` and :gh:`10019` by `Adam Li`_) - -- Add a checkbox to show and hide the MEG helmet in the coregistration GUI (:gh:`10200` by `Guillaume Favelier`_) - -- The coregistration GUI (instantiated via :func:`mne.gui.coregistration`) gained a new attribute, ``coreg``, to access the :class:`mne.coreg.Coregistration` instance used by the GUI (:gh:`10255` by `Richard Höchenberger`_) - -- Add marching cubes display of head if :func:`mne.bem.make_scalp_surfaces` has not computed or the recon-all hasn't finished (:gh:`10202` by `Alex Rockhill`_) - -- The default interaction style of :func:`mne.gui.coregistration` and :func:`mne.viz.plot_alignment` has been changed to ``'terrain'``, which keeps one axis fixed and should make interactions with the 3D scene more predictable (:gh:`9972`, :gh:`10206` by `Richard Höchenberger`_) - -- :func:`mne.gui.coregistration` now uses the proper widget style for push buttons, making for a more native feel of the application (:gh:`10220` by `Richard Höchenberger`_ and `Guillaume Favelier`_) - -- :class:`mne.coreg.Coregistration`, :func:`mne.scale_bem`, and :func:`mne.scale_mri` gained a new parameter, ``on_defects``, controlling how to handle topological defects (:gh:`10230`, :gh:`10249` by `Richard Höchenberger`_) - -- :class:`mne.coreg.Coregistration` gained a new attribute, ``fiducials``, allowing for convenient retrieval of the MRI fiducial points (:gh:`10243`, by `Richard Höchenberger`_) - -- Added plotting points to represent contacts on the max intensity projection plot for :func:`mne.gui.locate_ieeg` (:gh:`10212` by `Alex Rockhill`_) - -- Add lines in 3D and on the maximum intensity projection when more than two electrode contacts are selected to aid in identifying that contact for :func:`mne.gui.locate_ieeg` (:gh:`10212` by `Alex Rockhill`_) - -- Add a ``block`` parameter to :class:`mne.viz.Brain` and the UI of :class:`mne.coreg.Coregistration` to prevent the windows from closing immediately when running in a non-interactive Python session (:gh:`10222` by `Guillaume Favelier`_) - -- All methods of :class:`mne.Report` with a ``tags`` parameter now also accept a single tag passed as a string (previously, you needed to pass a tuple of strings, even for a single tag) (:gh:`10183`, by `Richard Höchenberger`_) - -- :meth:`mne.Report.add_trans` has gained a new parameter, ``alpha``, to control the level of opacity of the rendered head (:gh:`10247`, by `Richard Höchenberger`_) - -- The new convenience function :func:`mne.event.match_event_names` allows for straightforward checking if a specific event name or a group of events is present in a collection of event names (:gh:`10233` by `Richard Höchenberger`_) - -- :meth:`Epochs.plot` is now supported by the pyqtgraph-backend (:gh:`10297` by `Martin Schulz`_) - -- The ``map_surface`` parameter of :meth:`mne.viz.Brain.add_foci` now works and allows you to add foci to a rendering of a brain that are positioned at the vertex of the mesh closest to the given coordinates (:gh:`10299` by `Marijn van Vliet`_) - -- :meth:`mne.preprocessing.ICA.plot_sources()` is now also supported by the ``qt`` backend (:gh:`10330` by `Martin Schulz`_) - -- Added :meth:`mne.viz.Brain.add_dipole` and :meth:`mne.viz.Brain.add_forward` to plot dipoles on a brain as well as :meth:`mne.viz.Brain.remove_dipole` and :meth:`mne.viz.Brain.remove_forward` (:gh:`10373` by `Alex Rockhill`_) - -- Made anterior/posterior slice scrolling in :func:`mne.gui.locate_ieeg` possible for users without page up and page down buttons by allowing angle bracket buttons to be used (:gh:`10384` by `Alex Rockhill`_) - -- Add support for ``theme='auto'`` for automatic dark-mode support in :meth:`raw.plot() ` and related functions and methods when using the ``'qt'`` backend (:gh:`10417` by `Eric Larson`_) - -- Add support for ``MNE_3D_OPTION_THEME`` and ``MNE_BROWSER_THEME`` to use ``['light', 'dark', 'auto']`` or a custom theme path in :class:`mne.viz.Brain` and :meth:`raw.plot() ` (and related functions and methods when using the ``'qt'`` backend(), respectively (:gh:`10418` by `Eric Larson`_) - -- Add support for passing time-frequency data to :func:`mne.stats.spatio_temporal_cluster_test` and :func:`mne.stats.spatio_temporal_cluster_1samp_test` and added an example to :ref:`tut-cluster-spatiotemporal-sensor` (:gh:`10384` by `Alex Rockhill`_) - -- Add support for reading optical density fNIRS data to :func:`mne.io.read_raw_snirf` (:gh:`10408` by `Robert Luke`_) - -- Added :func:`mne.source_space.get_decimated_surfaces` to extract subsurfaces from a SourceSpaces instance or a .fif file (:gh:`10421` by `Joshua Teves`_) - -Bugs -~~~~ -- Fix bug with :func:`mne.io.read_raw_nihon` where latin-1 channels could not be read (:gh:`10429` by :newcontrib:`Matthias Eberlein`) - -- Fix bug with :func:`mne.io.read_raw_nihon` so that it handles duplicates in channel names (:gh:`10431` by :newcontrib:`Matthias Eberlein`) - -- Fix bug in :func:`mne.io.read_raw_egi` where the ``meas_date`` in a ``rawMFF`` object was incorrectly assigned. The function now converts the ``meas_date`` to UTC and provides a UTC offset key, i.e. ``raw.info['utc_offset']``. (:gh:`10304` by :newcontrib:`Scott Huberty`) - -- Fix datetime conversion for tmin/tmax=None cases in :meth:`mne.Annotations.crop`. Allow the use of float and None simultaneously for :meth:`mne.Annotations.crop`. (:gh:`10361` by :newcontrib:`Michiru Kaneda`) - -- Add Shift_JIST mu in :func:`mne.io.read_raw_edf` (:gh:`10356` by :newcontrib:`Michiru Kaneda`) - -- Teach :func:`mne.io.read_raw_bti` to use its ``eog_ch`` parameter (:gh:`10093` by :newcontrib:`Adina Wagner`) - -- Fix use of arguments in :func:`numpy.loadtxt` (:gh:`10189` by :newcontrib:`Federico Zamberlan`) - -- Fix documentation of options in :func:`mne.stc_near_sensors` (:gh:`` by :newcontrib:`Nikolai Chapochnikov`) - -- :func:`mne.time_frequency.tfr_array_multitaper` now returns results per taper when ``output='complex'`` (:gh:`10281` by `Mikołaj Magnuski`_) - -- Fix default of :func:`mne.io.Raw.plot` to be ``use_opengl=None``, which will act like False unless ``MNE_BROWSER_USE_OPENGL=true`` is set in the user configuration (:gh:`9957` by `Eric Larson`_) - -- Fix bug with :class:`mne.Report` where figures were saved with ``bbox_inches='tight'``, which led to inconsistent sizes in sliders (:gh:`9966` by `Eric Larson`_) - -- When opening a saved report and saving it to a different filename again, don't change ``Report.fname`` to avoid a regression when using :func:`~mne.open_report` as a context manager (:gh:`9998` by `Marijn van Vliet`_) - -- Fix bug in :func:`mne.make_forward_solution` where sensor-sphere geometry check was incorrect (:gh:`9968` by `Eric Larson`_) - -- Use single char alphanumeric suffix when renaming long channel names (over 15-characters) when writing to FIF format. (:gh:`10002` by `Luke Bloy`_) - -- Add argument ``overwrite`` to :func:`mne.export.export_raw`, :func:`mne.export.export_epochs`, :func:`mne.export.export_evokeds` and :func:`mne.export.export_evokeds_mff` (:gh:`9975` by `Mathieu Scheltienne`_) - -- :func:`mne.gui.coregistration` and the ``mne coreg`` command didn't respect the ``interaction`` parameter (:gh:`9972` by `Richard Höchenberger`_) - -- Fix incorrect projection of source space onto white matter surface instead of pial in :ref:`tut-working-with-ecog` (:gh:`9980` by `Alex Rockhill`_) - -- Fix channel type support when reading from EEGLAB ``.set`` format with :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` (:gh:`9990` by `Mathieu Scheltienne`_) - -- Fix suboptimal alignment using :func:`mne.transforms.compute_volume_registration` (:gh:`9991` by `Alex Rockhill`_) - -- Only warn if header is missing in BrainVision files instead of raising an error (:gh:`10001` by `Clemens Brunner`_) - -- Add argument ``overwrite`` to `mne.preprocessing.ICA.save` to check for existing file (:gh:`10004` by `Mathieu Scheltienne`_) - -- :class:`mne.Report` now raises an exception if invalid tags were passed (:gh:`9970` by `Richard Höchenberger`_) - -- Fix bug in :func:`mne.get_montage_volume_labels` that set the maximum number of voxels to be included too low causing unwanted capping of the included voxel labels (:gh:`10021` by `Alex Rockhill`_) - -- Fix annotation cropping and I/O roundtrip when there is no measurement date available (:gh:`10040` by `Mathieu Scheltienne`_ and `Alex Gramfort`_). - -- Fix bug where :meth:`raw.crop() ` from the beginning of the instance could break annotations and ``raw.first_samp`` (:gh:`10358` by `Eric Larson`_) - -- :func:`~mne.sys_info` output now contains the installed version of ``pooch``, too; this output had been accidentally removed previously (:gh:`10047` by `Richard Höchenberger`_) - -- Fix VTK version extraction in :func:`mne.sys_info` (:gh:`10399` by `Eric Larson`_) - -- Fix automatic channel type detection from channel labels in :func:`mne.io.read_raw_edf` and :func:`mne.io.read_raw_bdf` (and disable this functionality from :func:`mne.io.read_raw_gdf`) (:gh:`10058` by `Clemens Brunner`_) - -- Fix :func:`~mne.stats.permutation_cluster_1samp_test` to properly handle 2-dimensional data in combination with TFCE (:gh:`10073` by `Richard Höchenberger`_) - -- Fix channel grouping error when using "butterfly mode" with :meth:`mne.io.Raw.plot` (:gh:`10087` by `Daniel McCloy`_) - -- Fix inconsistent behavior of ``mne.preprocessing.annotate_*`` functions by making them all return :class:`mne.Annotations` objects with the ``orig_time`` attribute set to ``raw.info["meas_time"]`` (:gh:`10067` and :gh:`10118` by `Stefan Appelhoff`_, `Eric Larson`_, and `Alex Gramfort`_) - -- Fix bug that appears during automatic calculation of the colormap of `mne.viz.Brain` when data values of ``fmin`` and ``fmax`` are too close (:gh:`10074` by `Guillaume Favelier`_) - -- We now display a scrollbar in the tags dropdown of a `~mne.Report` if many tags have been added, granting access to all tags instead of "hiding" them below the bottom of the page (:gh:`10082` by `Richard Höchenberger`_) - -- Creating :class:`mne.Epochs` now provides clearer logging (less ambiguous, no duplicates) when the ``preload`` and/or ``metadata`` parameters are set (:gh:`10112` by `Stefan Appelhoff`_) - -- Fix bug with :class:`mne.Epochs` where save-load round-trip with FIF would cause :meth:`mne.Epochs.apply_baseline` to no longer work (:gh:`10177` by `Eric Larson`_) - -- Fix functions by adding missing ``overwrite`` parameters: :func:`mne.write_events`, :func:`mne.write_cov`, :func:`mne.write_evokeds`, :meth:`mne.SourceEstimate.save`, :func:`mne.minimum_norm.write_inverse_operator`, :func:`mne.write_proj`, and related methods (:gh:`10127` by `Eric Larson`_) - -- Fix bug with :func:`mne.transforms.compute_volume_registration` and :func:`mne.compute_source_morph` (volumetric) where the smoothing factors were not scaled based on ``zooms`` (:gh:`10132` by `Eric Larson`_) - -- Remove repeated logging output when overwriting an existing `~mne.io.Raw` file (:gh:`10095` by `Richard Höchenberger`_ and `Stefan Appelhoff`_) - -- In the plots generated by :meth:`mne.Report.add_stc`, we now only add 5 labels to the color bar to reduce the chance of overlap, which could previously cause the labels to become unreadable (:gh:`10135` by `Richard Höchenberger`_) - -- :meth:`mne.Report.add_trans` now allows you to add sensor alignment plots for head surfaces that have topological defects (:gh:`10175` by `Richard Höchenberger`_) - -- :meth:`mne.Report.add_trans` now also works if no digitization points are present in the data (:gh:`10176` by `Jeff Stout`_) - -- Argument ``verbose`` is now respected by dataset fetching (:gh:`10210` by `Mathieu Scheltienne`_) - -- Fix bug with :func:`mne.io.read_raw_hitachi` where empty ``Comment`` descriptions were not handled properly (:gh:`10235` by `Eric Larson`_) - -- Fix bug with input validation of low-level filtering functions (:gh:`10267` by `Eric Larson`_) - -- :func:`mne.gui.coregistration` now works with surfaces containing topological defects (:gh:`10230`, by `Richard Höchenberger`_) - -- Fix bug with :func:`mne.io.read_raw_nirx` being unable to read measurement dates recorded on systems with German (de_DE), French (fr_FR), and Italian (it_IT) locales (:gh:`10277` by `Eric Larson`_) - -- Fix bug with projector normalization checks that were too sensitive, and improve warning (:gh:`10292` by `Eric Larson`_) - -- Fix bug with :func:`mne.viz.plot_alignment` where head-coordinate source spaces (e.g., from a forward solution) were not properly plotted (:gh:`10309` by `Eric Larson`_) - -- :func:`mne.read_trans` and :func:`mne.io.read_fiducials` now correctly expand ``~`` in the provided path (i.e., to the user's home directory) (:gh:`10265`, :gh:`10415`, by `Richard Höchenberger`_) - -- :func:`mne.find_events` now uses ``first_samp`` and not ``0`` for initial event when using ``initial_value`` (:gh:`10289`, by `Alex Gramfort`_) - -- Fix bug with :func:`mne.channels.make_standard_montage` for ``'standard*'``, ``'mgh*'``, and ``'artinis*'`` montages where the points were incorrectly scaled and fiducials incorrectly set away from the correct values for use with the ``fsaverage`` subject (:gh:`10324` by `Eric Larson`_) - -- Fix bug with :meth:`mne.Report.add_figure` where figures generated externally were closed and possibly resized during render (:gh:`10342` by `Eric Larson`_) - -- Fix bug with :func:`mne.viz.plot_sparse_source_estimates` where the return value was incorrect (:gh:`10347` by `Eric Larson`_) - -- Fix plotting bug in :ref:`ex-electrode-pos-2d` and make view look more natural in :ref:`ex-movement-detect` (:gh:`10313`, by `Alex Rockhill`_) - -- Fix bug with blank 3D rendering with MESA software rendering (:gh:`10400` by `Eric Larson`_) - -- Fix a bug in :func:`mne.gui.locate_ieeg` where 2D lines on slice plots failed to update and were shown when not in maximum projection mode (:gh:`10335`, by `Alex Rockhill`_) - -- Fix misleading color scale in :ref:`tut-cluster-tfr` for the plotting of cluster F-statistics (:gh:`10393` by `Alex Rockhill`_) - -- Fix baseline removal using ``remove_dc=True`` in :meth:`raw.plot() ` for data containing ``np.nan`` (:gh:`10392` by `Clemens Brunner`_) - -- Fix misleading color scale in :ref:`tut-timefreq-twoway-anova` for plotting F-stats (:gh:`10401` by `Alex Rockhill`_) - -- Fix misleading ``T_obs`` return name for :func:`mne.stats.spatio_temporal_cluster_test` when the default returns an F-statistic (:gh:`10401` by `Alex Rockhill`_) - -API changes -~~~~~~~~~~~ -- The default browser for :meth:`raw.plot() `, :meth:`epochs.plot() `, and :meth:`ica.plot_sources() ` has been changed to the ``'qt'`` backend on systems where `mne_qt_browser `__ is installed. To change back to matplotlib within a session, you can use :func:`mne.viz.set_browser_backend('matplotlib') `. To set it permanently on your system, you can use :func:`mne.set_config('MNE_BROWSER_BACKEND', 'matplotlib') ` (:gh:`9960` by `Martin Schulz`_ and `Eric Larson`_) - -- ``mne.Info.pick_channels`` has been deprecated. Use ``inst.pick_channels`` to pick channels from :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked`. Use :func:`mne.pick_info` to pick channels from :class:`mne.Info` (:gh:`10039` by `Mathieu Scheltienne`_) - -- All :func:`data_path ` functions now return :class:`python:pathlib.Path` objects rather than strings. Support for string concatenation with plus (``+``) is thus deprecated and will be removed in 1.2, use the forward-slash ``/`` operator instead (:gh:`10348` by `Eric Larson`_) - -- Argument ``event_list`` has been deprecated in favor of ``events`` in :func:`mne.write_events` (:gh:`10056` by `Mathieu Scheltienne`_) - -- ``mne.preprocessing.annotate_flat`` has been deprecated in favor of :func:`mne.preprocessing.annotate_amplitude`, that covers both minimum and maximum peak-to-peak variation. (:gh:`10143` by `Mathieu Scheltienne`_) - -- The ``max_ori_out`` parameter of :func:`mne.beamformer.apply_lcmv` and related functions is being removed as only signed estimates are supported. ``abs(stc)`` can be used to obtain unsigned estimates (:gh:`10366` by `Eric Larson`_) - -- The ``verbose`` attribute of classes (e.g., :class:`mne.io.Raw`, `mne.Epochs`, etc.) has been deprecated. Explicitly pass ``verbose`` to methods as necessary instead. (:gh:`10267` by `Eric Larson`_) - -- In :func:`mne.viz.set_browser_backend`, the `mne-qt-browser `__-based backend is now called ``'qt'`` rather than ``'pyqtgraph'`` for simplicity (:gh:`10323` by `Eric Larson`_) - -Dependencies -~~~~~~~~~~~~ -Numerous external dependencies that used to be bundled with MNE-Python are now -not shipped with the package anymore and will instead be retrieved -automatically from their official sources when you install MNE-Python. This -simplifies MNE-Python maintenance and keeps the package smaller. The following -new dependencies have been added: - -- `Jinja2`_ (replaces ``Tempita``, which is not maintained anymore; :gh:`10211` by `Richard Höchenberger`_) - - -.. _Jinja2: https://jinja.palletsprojects.com/ - -Authors -~~~~~~~ - -* Adam Li -* Adina Wagner+ -* Alex Rockhill -* Alexandre Gramfort -* Britta Westner -* Clemens Brunner -* Daniel McCloy -* Eduard Ort -* Eric Larson -* Etienne de Montalivet+ -* Federico Zamberlan+ -* Guillaume Favelier -* Jan Sosulski -* Jan Zerfowski+ -* Jeff Stout -* John Veillette+ -* Joshua Teves -* Julia Guiomar Niso Galán -* Luke Bloy -* Marijn van Vliet -* Martin Schulz -* Mathieu Scheltienne -* Matthias Eberlein+ -* Michiru Kaneda+ -* Mikołaj Magnuski -* Nikolai Chapochnikov+ -* Richard Höchenberger -* Robert Luke -* Scott Huberty+ -* Senwen Deng+ -* Stefan Appelhoff -* Steve Matindi -* Thomas Hartmann diff --git a/doc/changes/1.1.inc b/doc/changes/1.1.inc deleted file mode 100644 index fcb2be19823..00000000000 --- a/doc/changes/1.1.inc +++ /dev/null @@ -1,289 +0,0 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc - -.. _changes_1_1_0: - -Version 1.1.0 (2022-08-03) --------------------------- - -Enhancements -~~~~~~~~~~~~ -- Added new built-in montage R-Net 128 Ch (Brain Products) (:gh:`10974` by :newcontrib:`Ilias Machairas`) - -- :class:`~mne.channels.DigMontage` objects can now be checked for equality via ``==`` (:gh:`10942` by :newcontrib:`Tziona NessAiver`) - -- Add a note to clarify the inclusion of average referencing gain matrix during whitening (:gh:`10922` by :newcontrib:`Mingjian He`) - -- Add time axis scale bar for :meth:`mne.Epochs.plot` using matplotlib backend (:gh:`10736` by :newcontrib:`Jordan Drew`) - -- Add :func:`mne.bem.distance_to_bem` to find depth of source positions (:gh:`10632` by :newcontrib:`Matt Courtemanche`) - -- Add support for ahdr files in :func:`mne.io.read_raw_brainvision` (:gh:`10515` by :newcontrib:`Alessandro Tonin`) - -- Add support for reading data from Gowerlabs devices to :func:`mne.io.read_raw_snirf` (:gh:`10555` by :newcontrib:`Samuel Powell` and `Robert Luke`_) - -- Add built-in sensor layout files for Geodesic Head Web 130 and 280 devices (:gh:`10627` by `Evan Hathaway`_) - -- Add ``mne-icalabel`` to :func:`mne.sys_info` (:gh:`10615` by `Adam Li`_) - -- Add support for ``overview_mode`` in :meth:`raw.plot() ` and related functions/methods (:gh:`10501` by `Eric Larson`_) - -- Add :meth:`mne.io.Raw.crop_by_annotations` method to get chunks of Raw data based on :class:`mne.Annotations`. (:gh:`10460` by `Alex Gramfort`_) - -- The ``pick_channels`` method gained a ``verbose`` parameter, allowing e.g. to suppress messages about removed projectors (:gh:`10544` by `Richard Höchenberger`_) - -- Add :func:`mne.viz.plot_projs_joint` for joint plotting of projectors and Evoked (:gh:`10720` by `Eric Larson`_) - -- The :func:`mne.make_forward_dipole` function can now take a list of dipoles to make a multi-dipole forward models (:gh:`10464` by `Marijn van Vliet`_) - -- Add ``mode='outlines'`` support to :meth:`mne.Dipole.plot_locations` and :func:`mne.viz.plot_dipole_locations` (:gh:`10699` by `Eric Larson`_) - -- Add :meth:`mne.preprocessing.ICA.find_bads_muscle` to find muscle-related ICA components with an example, :ref:`ex-muscle-ica` (:gh:`10534` by `Alex Rockhill`_) - -- Add example of Xfit-style ECD modeling using multiple dipoles (:gh:`10464` by `Marijn van Vliet`_) - -- Add ``head_source`` argument to :func:`mne.make_field_map` to allow selecting which head source to use (:gh:`10568` by `Eric Larson`_) - -- Add support for ``n_jobs=None`` to support :func:`joblib:joblib.parallel_backend` for more precise control over parallelization (:gh:`10567` by `Eric Larson`_) - -- It is now possible to compute inverse solutions with restricted source orientations using discrete forward models (:gh:`10464` by `Marijn van Vliet`_) - -- The new function :func:`mne.preprocessing.maxwell_filter_prepare_emptyroom` simplifies the preconditioning of an empty-room recording for our Maxwell filtering operations (:gh:`10533` by `Richard Höchenberger`_ and `Eric Larson`_) - -- Add keyboard shortcuts to toggle :meth:`mne.preprocessing.ICA.plot_properties` topomap channel types ('t') and power spectral density log-scale ('l') (:gh:`10557` by `Alex Rockhill`_) - -- Add ``--mri``, and ``--threshold`` options to :ref:`mne make_scalp_surfaces` to improve head surface mesh extraction (:gh:`10591` by `Eric Larson`_) - -- Add :func:`mne.preprocessing.compute_bridged_electrodes` to detect EEG electrodes with shared spatial sources due to a conductive medium connecting two or more electrodes, add :ref:`ex-eeg-bridging` for an example and :func:`mne.viz.plot_bridged_electrodes` to help visualize (:gh:`10571` by `Alex Rockhill`_) - -- Add ``'nearest'`` as an option for the ``image_interp`` argument in :func:`mne.viz.plot_topomap` to plot a topomap without interpolation using a Voronoi parcelation (:gh:`10571` by `Alex Rockhill`_) - -- Add :func:`mne.preprocessing.interpolate_bridged_electrodes` to use the spatially smeared signal to get a better interpolation rather than dropping those channels (:gh:`10587` by `Alex Rockhill`_) - -- Add support for hemoglobin type fNIRS data to temporal derivative distribution repair (TDDR) :func:`mne.preprocessing.nirs.temporal_derivative_distribution_repair` (:gh:`10125` by `Johann Benerradi`_) - -- :func:`mne.viz.plot_evoked_topomap` and :meth:`mne.Evoked.plot_topomap` now display the time range the map was averaged over if ``average`` was passed (:gh:`10606` by `Richard Höchenberger`_) - -- :func:`mne.viz.plot_evoked_topomap` and :meth:`mne.Evoked.plot_topomap` can now average the topographic maps across different time periods for each time point. To do this, pass a list of periods via the ``average`` parameter (:gh:`10610` by `Richard Höchenberger`_) - -- :func:`mne.viz.plot_evoked` and :meth:`mne.Evoked.plot` gained a new parameter, ``highlight``, to visually highlight time periods of interest (:gh:`10614` by `Richard Höchenberger`_) - -- Added fNIRS support to :func:`mne.Info.get_montage` (:gh:`10611` by `Robert Luke`_) - -- Add :func:`mne.viz.plot_ch_adjacency` to visualize and manually edit the channel adjacency matrix (:gh:`10633` by `Mikołaj Magnuski`_) - -- Add :meth:`mne.viz.Brain.get_view` to get the current camera parameters such that they can be passed to :meth:`mne.viz.Brain.show_view` (:gh:`10661` by `Alex Rockhill`_) - -- Added support for Aurora version 2021.9.0.6 to :func:`mne.io.read_raw_nirx` (:gh:`10668` by `Robert Luke`_) - -- Added BrainVision format (``.vhdr``, ``.vmrk``, ``.eeg``) to :func:`mne.export.export_raw` (:gh:`10681` by `Stefan Appelhoff`_) - -- Added ``export`` method to :class:`mne.Evoked` (:gh:`10681` by `Stefan Appelhoff`_) - -- Add timeformat ``YYYY/MM/DD`` to ``mne.io.ctf.info._convert_time`` (:gh:`10674` by `Simon Kern`_) - -- :meth:`mne.Report.add_figure` and :meth:`mne.Report.add_image` gained a new parameter, ``section``, allowing to put multiple figures or images into the same "content block". Previously, the only way to group figures and images was by means of a slider, which sometimes made content hard to discover. The slider remains available as an alternative (:gh:`10694` by `Richard Höchenberger`_) - -- All functions and methods that plot topographic maps for EEG (2D projections of the EEG sensor locations) now accept the parameter value ``sphere='eeglab'`` to lay out the sensors with respect to the head circle in a similar way to how EEGLAB does, i.e., T7/T8 and Fpz/Oz are placed directly on the circle for template montages (:gh:`10572` by `Richard Höchenberger`_) - -- :func:`mne.channels.get_builtin_montages` gained a new parameter, ``descriptions``, which allows to retrieve the descriptions of the montages in addition to their names (:gh:`10373` by `Richard Höchenberger`_) - -- Add ``include`` option to :meth:`mne.io.read_raw_edf`, :meth:`mne.io.read_raw_bdf` and :meth:`mne.io.read_raw_gdf`. (:gh:`10734` by `Michiru Kaneda`_) - -- :func:`mne.gui.coregistration` gained a new parameter, ``fullscreen``, to start the GUI in fullscreen mode (:gh:`10284` by `Richard Höchenberger`_) - -- The built-in FieldTrip channel adjacency matrices, which can be read via :func:`~mne.channels.read_ch_adjacency`, have been synchronized with FieldTrip again. This means that numerous new adjacencies have been added, most notably those for standard 10-05 and 10-20 montages (:gh:`10749` by `Richard Höchenberger`_) - -- :meth:`mne.Evoked.plot_field` gained a new parameter, ``interaction``, to control the rotation axes when interacting with the head (:gh:`10788` by `Richard Höchenberger`_) - -- Add :func:`mne.read_evoked_besa` for reading evokeds from BESA ``.avr`` and ``.mul`` files. (:gh:`10892` by `Marijn van Vliet`_) - -- Add :meth:`mne.time_frequency.EpochsTFR.decimate` to reduce size of time-frequency epochs objects (:gh:`10940` by `Alex Rockhill`_) - -- Add an alias that maps EGI reference channel names (``VREF`` or ``Vertex Reference``) to the electrode name in the standard EGI montages (``Cz``). Now setting standard EGI montages with :meth:`~mne.io.Raw.set_montage` with ``match_alias=True`` will avoid raising an error (:gh:`10823` by `Scott Huberty`_) - -- Add provenance in :func:`mne.time_frequency.EpochsTFR.apply_baseline` (:gh:`10979` by `Alex Rockhill`_) - -Bugs -~~~~ -- Fix bug in ``mne.surface._project_onto_surface`` for the condition ``method != 'accurate' and return_nn`` (:gh:`10930` by `Christian O'Reilly`_) - -- Fix bug in :func:`mne.export.export_raw` to ignore None value in filenames attribute of :class:`mne.io.RawArray` (:gh:`10927` by :newcontrib:`Reza Nasri`) - -- Fix bug in :func:`mne.io.read_raw_edf` to allow reading in all Hypnodyne ZMax EDFs to be read in without issues (:gh:`10754` by :newcontrib:`Frederik Weber`) - -- Fix bug in :func:`mne.Epochs.drop_bad` where data was loaded although no rejection had to be performed (:gh:`10718` by :newcontrib:`Lukas Gemein`) - -- Pick also bad channels when selecting T1T2 magnetometers in :func:`mne.preprocessing.maxwell_filter` (:gh:`10639` by :newcontrib:`Matti Toivonen`) - -- Make ``color`` parameter check in in :func:`mne.viz.plot_evoked_topo` consistent (:gh:`10217` by :newcontrib:`T. Wang` and `Stefan Appelhoff`_) - -- Fix bug in :func:`mne.io.read_raw_brainvision` when BrainVision data are acquired with the Brain Products "V-Amp" amplifier and disabled lowpass filter is marked with value ``0`` (:gh:`10517` by :newcontrib:`Alessandro Tonin`) - -- Fix bug in :func:`mne.pick_types` and related methods where ``csd=True`` was not passed handled properly (:gh:`10470` by :newcontrib:`Matthias Dold`) - -- Fix bug where plots produced using the ``'qt'`` / ``mne_qt_browser`` backend could not be added using :meth:`mne.Report.add_figure` (:gh:`10485` by `Eric Larson`_) - -- Fix bug where ``theme`` was not handled properly in :meth:`mne.io.Raw.plot` (:gh:`10487`, :gh:`10500` by `Mathieu Scheltienne`_ and `Eric Larson`_) - -- Fix bug in :meth:`raw.crop(start, stop) ` that would cause annotations to be erroneously shifted when ``start != 0`` and no measurement date was set. (:gh:`10491` by `Eric Larson`_) - -- Fix bug in :func:`mne.io.read_raw_bti` where unknown electrode locations were not handled properly (:gh:`10662` by `Eric Larson`_) - -- Fix bug in :func:`mne.io.read_raw_ctf` on Windows where large files could not be read (:gh:`10866` by `Eric Larson`_) - -- Fix bug in :func:`mne.io.read_raw_ctf` where invalid measurement dates were not handled properly (:gh:`10957` by `Jean-Remi King`_ and `Eric Larson`_) - -- Rendering issues with recent MESA releases can be avoided by setting the new environment variable``MNE_3D_OPTION_MULTI_SAMPLES=1`` or using :func:`mne.viz.set_3d_options` (:gh:`10513` by `Eric Larson`_) - -- Fix behavior for the ``pyvista`` 3D renderer's ``quiver3D`` function so that default arguments plot a glyph in ``arrow`` mode (:gh:`10493` by `Alex Rockhill`_) - -- Retain epochs metadata when using :func:`mne.channels.combine_channels` (:gh:`10504` by `Clemens Brunner`_) - -- Fix epochs indexing with metadata containing boolean type and missing values (:gh:`10705` by `Clemens Brunner`_ and `Alex Gramfort`_) - -- Fix reading of fiducial locations in :func:`mne.io.read_raw_eeglab` (:gh:`10521` by `Alex Gramfort`_) - -- Prevent creation of montage with invalid ``[x, y, z]`` coordinates with :func:`mne.channels.make_dig_montage` (:gh:`10547` by `Mathieu Scheltienne`_) - -- Fix bug in coregistration GUI that prevented it from starting up if only a high-resolution head model was available (:gh:`10543` by `Richard Höchenberger`_) - -- Fix bug with :class:`mne.Epochs.add_reference_channels` where attributes were not updated properly so subsequent `~mne.Epochs.pick_types` calls were broken (:gh:`10912` by `Eric Larson`_) -- -- Fix bug in the :class:`mne.viz.Brain` tool bar that prevented the buttons to call the corresponding feature (:gh:`10560` by `Guillaume Favelier`_) - -- Fix issue with saving epochs once :func:`~mne.preprocessing.compute_current_source_density` has been used if a rejection threshold was used first (:gh:`10619` by `Alex Rockhill`_ and `Richard Höchenberger`_) - -- Fix bug in :func:`mne.viz.plot_evoked_image` that would cause incorrect sub-titles when using ``group_by`` (:gh:`10618` by `Reza Shoorangiz`_) - -- Reduce memory usage when loading an EDF file with ``preload=False`` (:gh:`10638` by `Clemens Brunner`_) - -- Fix bug in :meth:`mne.io.Raw.get_channel_types` and related methods where ``unique=True`` would return a :class:`python:set` with arbitrary order rather than a :class:`python:list` that preserves the same order as ``inst.ch_names`` (:gh:`10720` by `Eric Larson`_) - -- In :func:`mne.preprocessing.find_bad_channels_maxwell`, do not re-filter the data if a low-pass filter with the requested frequency has already been applied (:gh:`10664` by `Richard Höchenberger`_) - -- Fix a problem in :meth:`mne.Evoked.get_peak`, where under certain circumstances the ``mode`` parameters ``'pos'`` and ``'neg'`` were not honored when ``tmin`` and/or ``tmax`` were passed as well (:gh:`10686` by `Richard Höchenberger`_) - -- :func:`mne.read_evokeds`, :func:`mne.channels.read_custom_montage`, :func:`mne.channels.read_dig_hpts`, :func:`mne.channels.read_dig_polhemus_isotrak`, and :func:`mne.channels.read_polhemus_fastscan` now correctly expand ``~`` in the provided path to the user's home directory (:gh:`10685`, :gh:`10688` by `Richard Höchenberger`_) - -- Fix bug in :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab`, where the presence of channels without position in the ``.set`` file caused positions incorrectly assigned to all further channels (:gh:`8754` by `Mikołaj Magnuski`_) - -- Combining channels of :class:`mne.Epochs` or :class:`mne.Evoked` objects now properly retains baseline information (:gh:`10703` by `Clemens Brunner`_) - -- In :class:`mne.Report`, some figures would have an undesired border added to the edges; this has now been resolved (:gh:`10730` by `Richard Höchenberger`_) - -- Fix selection of EEG channels and selected sphere when plotting bridged electrodes with :func:`mne.viz.plot_bridged_electrodes` (:gh:`10753` by `Mathieu Scheltienne`_) - -- Fix broken links in :ref:`tut-fix-meshes` (:gh:`10765` by `Alex Rockhill`_) - -- The built-in FieldTrip channel adjacency matrix for ``easycapM1`` had a bug, where the channel ``PO8`` was incorrectly labeled as ``PO6`` This has been resolved by updating the adjacency matrix to the latest version provided by FieldTrip (:gh:`10749` by `Richard Höchenberger`_) - -- Add a YouTube video tutorial for editing Freesurfer surfaces in Blender to :ref:`tut-fix-meshes` (:gh:`10778` by `Alex Rockhill`_) - -- The default interaction style of :meth:`mne.Evoked.plot_field` has been changed such that if behaves like :func:`~mne.viz.plot_alignment` when trying to rotate the head (:gh:`10788` by `Richard Höchenberger`_) - -- Add a video on how to operate the intracranial electrode contact location GUI in :ref:`tut-ieeg-localize` (:gh:`10800` by `Alex Rockhill`_) - -- Add a video on how to operate the coregistration GUI in :ref:`tut-source-alignment` (:gh:`10802` by `Alex Rockhill`_) - -- Add ``show`` and ``block`` arguments to :func:`mne.gui.coregistration` and :func:`mne.gui.locate_ieeg` to pop up the GUIs and halt execution of subsequent code respectively (:gh:`10802` by `Alex Rockhill`_) - -- Correctly report the number of available projections when printing measurement info in a Jupyter notebook (:gh:`10471` by `Clemens Brunner`_) - -- Fix value set in ``raw.orig_format`` for readers of BrainVision (ASCII format), EGI and Artemis123 files (:gh:`10851` by `Mathieu Scheltienne`_) - -- When reading EGI MFF files, channel names are no longer ignored and reference channel information is properly incorporated (:gh:`10898` by `Scott Huberty`_ and `Daniel McCloy`_) - -- Fix bug in :func:`mne.time_frequency.psd_array_welch` and related functions/methods where the bias was not accounted for when ``average='median'`` (:gh:`10990` by `Eric Larson`_) - -- Fix bug in :class:`mne.decoding.TemporalFilter` where filter parameters were not handled properly (:gh:`10968` by `Eric Larson`_) - -- Fix documentation bug in ``ica.plot_sources`` to specify that ``picks`` keyword argument is for picking ICA components to plot (:gh:`10936` by `Adam Li`_) - -- Annotations contained in EDF files are correctly read as UTF-8 according to the EDF specification (:gh:`10963` by `Clemens Brunner`_) - -- Fix bug where complex-valued topoplots caused an error, convert to amplitude instead (:gh:`10978` by `Alex Rockhill`_) - -API and behavior changes -~~~~~~~~~~~~~~~~~~~~~~~~ -- When creating BEM surfaces via :func:`mne.bem.make_watershed_bem` and :func:`mne.bem.make_flash_bem`, the ``copy`` parameter now defaults to ``True``. This means that instead of creating symbolic links inside the FreeSurfer subject's ``bem`` folder, we now create "actual" files. This should avoid troubles when sharing files across different operating systems and file systems (:gh:`10531` by `Richard Höchenberger`_) - -- The ordering of channels returned by :func:`mne.io.read_raw_nirx` is now ordered by channel name, rather than the order provided by the manufacturer. This enables consistent ordering of channels across different file types (:gh:`10555` by `Robert Luke`_) - -- For :func:`mne.viz.plot_topomap`, :func:`mne.viz.plot_evoked_topomap`, :func:`mne.viz.plot_arrowmap`, :func:`mne.viz.plot_ica_components`, :meth:`mne.Covariance.plot_topomap`, :meth:`mne.Evoked.plot_topomap`, :meth:`mne.Evoked.animate_topomap`, :meth:`mne.decoding.CSP.plot_patterns`, :meth:`mne.Projection.plot_topomap` and :meth:`mne.preprocessing.ICA.plot_components` the topomap image interpolation was previously a cubic interpolation but now can be ``'linear'`` and ``'nearest'`` as well. Unless ``image_interp='nearest'`` is passed mne uses a subsequent matplotlib bilinear interpolation to make the interpolated image smoother. Previously, ``'image_interp'`` controlled this second interpolation step and, for the first interpolation, the only option was cubic. To simplify, ``image_interp`` is now responsible for the main interpolation and the subsequent matplotlib image interpolation is bilinear but can be changed afterward using ``im.set_interpolation`` (:gh:`10617` by `Alex Rockhill`_) - -- The FLASH BEM surface generation via :func:`mne.bem.make_flash_bem` or the :ref:`mne flash_bem` command has now been clarified in terms of expected inputs. In :func:`mne.bem.convert_flash_mris` the ``convert`` parameter is now deprecated and the ``flash5`` and ``flash30`` now accept list of file names or nibabel images. One breaking change is that now the flash files are looked for as mef05_*.mgz and mef30_*.mgz in mri/flash folder (:gh:`10715` by `Alex Gramfort`_) - -- :func:`~mne.viz.plot_compare_evokeds` gets a new parameter ``time_unit`` which can be ``"s"`` or ``"ms"`` (:gh:`10950` by `Daniel McCloy`_) - -- The ``to_data_frame`` methods for :meth:`Raw `, :meth:`Epochs `, :meth:`Evoked `, :meth:`SourceEstimate `, :meth:`EpochsTFR `, and :meth:`AverageTFR ` all now have default ``time_format=None`` (keep time in seconds) instead of ``time_format='ms'`` (convert time to milliseconds) (:gh:`10925` by `Daniel McCloy`_) - -- :func:`mne.Evoked.decimate` now aligns to the sample at time=0 (so that the exact sample when the event occurred is preserved) to be consistent with :func:`mne.Epochs.decimate` (:gh:`10945` by `Alex Rockhill`_) - -Authors -~~~~~~~ - -* Adam Li -* Adeline Fecker+ -* Alessandro Tonin+ -* Alex Ciok+ -* Alex Rockhill -* Alexandre Gramfort -* Archit Singhal+ -* Ashley Drew+ -* Ben Beasley+ -* Carina Forster+ -* Christian O'Reilly -* Clemens Brunner -* Cora Kim -* Daniel McCloy -* Dominik Welke -* Dominique Makowski -* Eric Larson -* Evan Hathaway -* Frederik D. Weber+ -* Guillaume Favelier -* Hamid Maymandi+ -* Ilias Machairas+ -* Jean-Remi King -* Johann Benerradi -* Jon Houck -* Jona Sassenhagen -* Jordan Drew+ -* Lukas Gemein+ -* Marijn van Vliet -* Martin Schulz -* Mathieu Scheltienne -* Matt Courtemanche+ -* Matthias Dold+ -* Matti Toivonen+ -* Michiru Kaneda+ -* Mikołaj Magnuski -* Mingjian He+ -* Naveen Srinivasan+ -* Phillip Alday -* Reza Nasri+ -* Reza Shoorangiz -* Richard Höchenberger -* Rob Luke -* Scott Huberty+ -* Simon Kern -* Sondre Foslien+ -* Stefan Appelhoff -* T. Wang+ -* Tziona NessAiver+ diff --git a/doc/changes/1.2.inc b/doc/changes/1.2.inc deleted file mode 100644 index fd5b23e5fd5..00000000000 --- a/doc/changes/1.2.inc +++ /dev/null @@ -1,134 +0,0 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc - -.. _changes_1_2_0: - -Version 1.2.0 (2022-10-11) --------------------------- - -Enhancements -~~~~~~~~~~~~ -- Add ``vmin`` and ``vmax`` parameters to :meth:`mne.Evoked.animate_topomap` (:gh:`11073` by :newcontrib:`Mats van Es`) -- Enable common average reference projection for ECoG, sEEG, and DBS data (optionally jointly with ``joint=True``) in :meth:`~mne.io.Raw.set_eeg_reference` and related functions/methods (:gh:`10656` by :newcontrib:`Moritz Gerster` and `Eric Larson`_) -- The ``trans`` parameter in :func:`mne.make_field_map` now accepts a :class:`~pathlib.Path` object, and uses standardised loading logic (:gh:`10784` by :newcontrib:`Andrew Quinn`) -- Add support for computing forward solutions using OpenMEEG_ (:gh:`11011` by `Alex Gramfort`_ and `Eric Larson`_) -- EEGLAB files (saved as MAT versions less than v7.3) can now be imported with :func:`mne.io.read_raw_eeglab` without the optional dependency ``pymatreader`` (:gh:`11006` by `Clemens Brunner`_) -- Add eight source estimate comparison metrics to new submodule :mod:`mne.simulation.metrics` (:gh:`6233` by `Kostiantyn Maksymenko`_ and `Alex Gramfort`_) -- Add :func:`mne.time_frequency.csd_tfr` to compute cross-spectral density from :class:`mne.time_frequency.EpochsTFR` (:gh:`10986` by `Alex Rockhill`_) -- Improve ``repr()`` for :class:`mne.minimum_norm.InverseOperator` when loose orientation is used (:gh:`11048` by `Eric Larson`_) -- Add support for vector and volume source estimates in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8639`, :gh:`11083` by `Olaf Hauk`_, `Marijn van Vliet`_, `Alex Gramfort`_, and `Eric Larson`_) -- :meth:`mne.Epochs.plot_psd_topomap` now suppresses redundant colorbars when ``vlim='joint'`` (:gh:`11051` by `Daniel McCloy`_) -- Add ``starting_affine`` keyword argument to :func:`mne.transforms.compute_volume_registration` to initialize an alignment with an affine (:gh:`11020` by `Alex Rockhill`_) -- Add HTML representation for `~mne.Evoked` in Jupyter Notebooks (:gh:`11075` by `Valerii Chirkov`_ and `Andrew Quinn`_) -- Add support for ``temperature`` and ``gsr`` (galvanic skin response, i.e., electrodermal activity) channel types (:gh:`11090`, :gh:`11108` by `Eric Larson`_ and `Richard Höchenberger`_) -- Allow :func:`mne.beamformer.make_dics` to take ``pick_ori='vector'`` to compute vector source estimates (:gh:`19080` by `Alex Rockhill`_) -- Add ``units`` parameter to :func:`mne.io.read_raw_edf` in case units are missing from the file (:gh:`11099` by `Alex Gramfort`_) -- Add ``on_missing`` functionality to all of our classes that have a ``drop_channels`` method, to control what happens when channel names are not in the object (:gh:`11077` by `Andrew Quinn`_) -- Improve interpolation of bridged electrodes with `~mne.preprocessing.interpolate_bridged_electrodes` (:gh:`11094` by `Mathieu Scheltienne`_) -- Add :func:`mne.minimum_norm.apply_inverse_tfr_epochs` to apply inverse methods to time-frequency resolved epochs (:gh:`11095` by `Alex Rockhill`_) -- Add support for multiple probes via multiple CSV files passed to :func:`mne.io.read_raw_hitachi` (:gh:`11186` by `Eric Larson`_) -- Add :func:`mne.chpi.get_active_chpi` to retrieve the number of active hpi coils for each time point (:gh:`11122` by `Eduard Ort`_) -- Add example of how to obtain time-frequency decomposition using narrow bandpass Hilbert transforms to :ref:`ex-tfr-comparison` (:gh:`11116` by `Alex Rockhill`_) -- Add ``==`` and ``!=`` comparison between `mne.Projection` objects (:gh:`11147` by `Mathieu Scheltienne`_) -- Parse automatically temperature channel with :func:`mne.io.read_raw_edf` (:gh:`11150` by `Eric Larson`_ and `Alex Gramfort`_) -- Add ``unscale`` option to :func:`mne.head_to_mri` to facilitate working with surrogate MRI data (:gh:`11185` by `Eric Larson`_) -- Add ``encoding`` parameter to :func:`mne.io.read_raw_edf` and :func:`mne.io.read_raw_bdf` to support custom (non-UTF8) annotation channel encodings (:gh:`11154` by `Clemens Brunner`_) -- :class:`mne.preprocessing.ICA` gained a new method, :meth:`~mne.preprocessing.ICA.get_explained_variance_ratio`, that allows the retrieval of the proportion of variance explained by ICA components (:gh:`11141` by `Richard Höchenberger`_) -- Add ``on_baseline`` to :meth:`mne.preprocessing.ICA.apply`, :meth:`mne.preprocessing.ICA.plot_overlay`, and :func:`mne.viz.plot_ica_overlay` to allow reapplying baseline correction after applying ICA (:gh:`11232` by `Eric Larson`_) -- Add config option ``MNE_REPR_HTML`` to disable HTML repr in notebook environments (:gh:`11159` by `Clemens Brunner`_) - -Bugs -~~~~ -- Fix bug in :meth:`mne.io.Raw.crop` where argument ``include_tmax`` was not considered in checking ``tmax`` in bounds (:gh:`11196` by `Lukas Gemein`_) -- Fix bug in :func:`mne.io.read_raw_eeglab` where unlabeled fiducials caused reading errors (:gh:`11074` by :newcontrib:`Sebastiaan Mathot`) -- Fix bug in :func:`mne.time_frequency.read_csd` that returned ``projs`` as a list of dict instead of :class:`mne.Projection` (:gh:`11072` by :newcontrib:`Chetan Gohil`) -- Fix bug in :func:`mne.decoding.TimeFrequency` that prevented cloning if constructor arguments were modified (:gh:`11004` by :newcontrib:`Daniel Carlström Schad`) -- Fix bug in ``repr`` of decoding estimator classes (e.g., :class:`mne.decoding.SlidingEstimator`) where the parameters where not stored in the result string but instead just printed to ``sys.stdout`` (:gh:`11190` by :newcontrib:`Dominik Wetzel`) -- Document ``height`` and ``weight`` keys of ``subject_info`` entry in :class:`mne.Info` (:gh:`11019` by :newcontrib:`Sena Er`) -- Fix bug in :class:`mne.viz.Brain` constructor where the first argument was named ``subject_id`` instead of ``subject`` (:gh:`11049` by `Eric Larson`_) -- Fix bug in :ref:`mne coreg` where the MEG helmet position was not updated during ICP fitting (:gh:`11084` by `Eric Larson`_) -- Fix bug in :func:`~mne.io.read_raw_curry`, and :func:`~mne.io.read_raw_cnt` where digitization points were not read properly (:gh:`11145` by `Eric Larson`_) -- Fix bug in :func:`mne.minimum_norm.apply_inverse_epochs` where the average EEG projector was not checked properly (:gh:`11182` by `Eric Larson`_) -- Fix bug in :func:`mne.viz.plot_filter` when plotting filters created using ``output='ba'`` mode with ``compensation`` turned on. (:gh:`11040` by `Marian Dovgialo`_) -- Fix bugs in documentation of surface :class:`~mne.SourceSpaces` (:gh:`11171` by `Eric Larson`_) -- Fix bug in :func:`mne.viz.plot_compare_evokeds` where automatic legend labels could be excessively long; they are now abbreviated with ``...`` when necessary (:gh:`11181` by `Eric Larson`_) -- Fix bugs with ``verbose='error'`` not being used properly and allowing warnings through (:gh:`11193` by `Eric Larson`_) -- Fix bug in :func:`mne.io.read_raw_bti` where EEG, EMG, and H/VEOG channels were not detected properly, and many non-ECG channels were called ECG. The logic has been improved, and any channels of unknown type are now labeled as ``misc`` (:gh:`11102` by `Eric Larson`_) -- Fix bug in :func:`mne.viz.plot_topomap` when providing ``sphere="eeglab"`` (:gh:`11081` by `Mathieu Scheltienne`_) -- Fix bug in :meth:`mne.Dipole.to_mri` where MRI RAS rather than MRI surface RAS was returned (:gh:`11185` by `Eric Larson`_) -- Fix bug in :meth:`epochs.save ` where the ``verbose`` parameter defaulted to ``True`` instead of ``None`` (:gh:`11191` by `Eric Larson`_) -- The string and HTML representation of :class:`mne.preprocessing.ICA` reported incorrect values for the explained variance. This information has been removed from the representations, and should instead be retrieved via the new :meth:`~mne.preprocessing.ICA.get_explained_variance_ratio` method (:gh:`11141` by `Richard Höchenberger`_) -- Fix bug in :meth:`mne.Evoked.plot` and related methods where a ``np.nan`` location value in any channel causes spatial colours to fail (:gh:`6870` by `Simeon Wong`_) -- Fix bug in :meth:`mne.preprocessing.ICA.find_bads_muscle` where epochs caused an error when passed as the ``inst`` (:gh:`11197` by `Alex Rockhill`_) -- Fix bug in readers where EEG coordinates were assumed to be in head coordinates but no fiducial points were present. Estimated fiducial locations will now be added automatically to reflect the assumption of locations being in the head coordinate frame (:gh:`11212` by `Stefan Appelhoff`_ and `Eric Larson`_) -- The duration of raw data sometimes wasn't displayed correctly in Jupyter notebooks by omitting fractions of a second. We now always round up to the next full second so a duration of less than 1 second will not be displayed as a duration of zero anymore (:gh:`11203` by `Richard Höchenberger`_) -- Fix bug in :meth:`mne.Report.add_ica` and where baselines were not reapplied to the data when ``inst`` is Epochs or Evoked (:gh:`11232` by `Eric Larson`_) - -API changes -~~~~~~~~~~~ -- In meth:`mne.Evoked.plot`, the default value of the ``spatial_colors`` parameter has been changed to ``'auto'``, which will use spatial colors if channel locations are available (:gh:`11201` by :newcontrib:`Hüseyin Orkun Elmas` and `Daniel McCloy`_) -- Starting with this release we now follow the Python convention of using ``FutureWarning`` instead of ``DeprecationWarning`` to signal user-facing changes to our API (:gh:`11120` by `Daniel McCloy`_) -- The ``names`` parameter of :func:`mne.viz.plot_arrowmap` and :func:`mne.viz.plot_regression_weights` has been deprecated; sensor names will be automatically drawn from the ``info_from`` or ``model`` parameter (respectively), and can be hidden, shown, or altered via the ``show_names`` parameter (:gh:`11123` by `Daniel McCloy`_) -- The ``bands`` parameter of :meth:`mne.Epochs.plot_psd_topomap` now accepts :class:`dict` input; legacy :class:`tuple` input is supported, but discouraged for new code (:gh:`11050` by `Daniel McCloy`_) -- The :func:`mne.head_to_mri` new function parameter ``kind`` default will change from ``'ras'`` to ``'mri'`` (:gh:`11185` by `Eric Larson`_) -- The ``show_toolbar`` argument to :class:`mne.viz.Brain` is being removed by deprecation (:gh:`11049` by `Eric Larson`_) -- New classes :class:`~mne.time_frequency.Spectrum` and :class:`~mne.time_frequency.EpochsSpectrum`, created via new methods :meth:`Raw.compute_psd()`, :meth:`Epochs.compute_psd()`, and :meth:`Evoked.compute_psd()` (:gh:`10184` by `Daniel McCloy`_) -- Functions/methods for plotting topomaps now have more consistent signatures; most have gained parameters for finer control of the topomap image and/or colorbar. See docstrings for details (:gh:`11123` by `Daniel McCloy`_) -- In :func:`mne.viz.plot_topomap`, passing ``axes=None`` will now create a new :class:`~matplotlib.figure.Figure` instead of plotting into the current axes (:gh:`11123` by `Daniel McCloy`_) -- In :func:`mne.viz.plot_topomap`, the parameter ``show_names`` has been deprecated, use the ``names`` parameter instead (:gh:`11123` by `Daniel McCloy`_) -- In :func:`mne.viz.plot_topomap`, :func:`~mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_evoked_topomap`, and related class methods :meth:`mne.Covariance.plot_topomap`, :meth:`mne.Evoked.plot_topomap`, and :meth:`mne.time_frequency.AverageTFR.plot_topomap`: the parameters ``vmin`` and ``vmax`` are deprecated in favor of ``vlim``, for consistency with other ``plot_topomap`` functions and methods (:gh:`11123` by `Daniel McCloy`_) -- In :func:`mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_evoked_topomap`, and related class methods :meth:`mne.Covariance.plot_topomap`, :meth:`mne.Evoked.plot_topomap`, and :meth:`mne.time_frequency.AverageTFR.plot_topomap`: the ``title`` parameter has been deprecated; use matplotlib's :meth:`fig.suptitle()` instead (:gh:`11123` by `Daniel McCloy`_) -- In functions/methods for plotting topomaps, the ``outlines='skirt'`` option is deprecated; use ``sphere='eeglab'`` for similar functionality (:gh:`11123` by `Daniel McCloy`_) -- The ``mne.epochs.add_channels_epochs`` function has been deprecated in favor of :meth:`epochs.add_channels ` (:gh:`11180` by `Eric Larson`_) -- The PSD functions that operate on Raw/Epochs/Evoked instances (``mne.time_frequency.psd_welch`` and ``mne.time_frequency.psd_multitaper``) are deprecated; for equivalent functionality create :class:`~mne.time_frequency.Spectrum` or :class:`~mne.time_frequency.EpochsSpectrum` objects instead and then run ``spectrum.get_data(return_freqs=True)`` (:gh:`10184` by `Daniel McCloy`_) -- Added new class :class:`mne.preprocessing.EOGRegression` to allow more flexibility when using regression to reduce EOG artifacts (:gh:`11046` by `Marijn van Vliet`_) -- New parameter ``exclude`` added to :func:`mne.preprocessing.regress_artifact` to prevent regression from being applied to certain channels (:gh:`11046` by `Marijn van Vliet`_) - -Authors -~~~~~~~ - -* Alex Rockhill -* Alexandre Gramfort -* Andrew Quinn+ -* Chetan Gohil+ -* Christian Brodbeck -* Clemens Brunner -* Daniel Carlström Schad+ -* Daniel McCloy -* Dominik Wetzel+ -* Eduard Ort -* Eric Larson -* Hüseyin Orkun Elmas+ -* Jack Zhang -* Joan Massich -* Julius Welzel+ -* Kostiantyn Maksymenko -* Lukas Gemein -* Marian Dovgialo -* Marijn van Vliet -* Mathieu Scheltienne -* Mats van Es+ -* Moritz Gerster+ -* Olaf Hauk -* Richard Höchenberger -* Sebastiaan Mathot+ -* Sena Er+ -* Simeon Wong -* Stefan Appelhoff -* Valerii Chirkov -* luzpaz+ diff --git a/doc/changes/1.3.inc b/doc/changes/1.3.inc deleted file mode 100644 index f08a90681f5..00000000000 --- a/doc/changes/1.3.inc +++ /dev/null @@ -1,116 +0,0 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc - -.. _changes_1_3_1: - -Version 1.3.1 (2023-02-23) --------------------------- - -Bugs -~~~~ -- Fix visualization dialog compatibility with matplotlib 3.7 (:gh:`11409` by `Daniel McCloy`_ and `Eric Larson`_) - -.. _changes_1_3_0: - -Version 1.3.0 (2022-12-21) --------------------------- - -Enhancements -~~~~~~~~~~~~ -- Improve default line width used in :func:`mne.viz.plot_filter` (:gh:`11333` by :newcontrib:`Toomas Erik Anijärv`) -- Add the argument ``max_iter`` to :func:`~mne.time_frequency.psd_array_multitaper` and to :func:`~mne.time_frequency.csd_array_multitaper` to increase the maximum number of iterations to reach convergence when using ``adaptive=True`` (:gh:`11237` by `Mathieu Scheltienne`_) -- Add a warning to the docstring of :func:`mne.channels.find_ch_adjacency` to encourage users to validate their outputs (:gh:`11236` by `Felix Klotzsche`_ and `Eric Larson`_) -- Mixed, cortical + discrete source spaces with fixed orientations are now allowed. (:gh:`11241` by `Jevri Hanna`_) -- Add size information to the ``repr`` of :class:`mne.Report` (:gh:`11357` by `Eric Larson`_) -- Add support for ``image_format='webp'`` to :class:`mne.Report` when using Matplotlib 3.6+, which can reduce file sizes by up to 50% compared to ``'png'``. The new default ``image_format='auto'`` will automatically use this format if it's available on the system (:gh:`11359` by `Eric Larson`_) -- Add :func:`mne.beamformer.apply_dics_tfr_epochs` to apply a DICS beamformer to time-frequency resolved epochs (:gh:`11096` by `Alex Rockhill`_) -- Add :func:`mne.time_frequency.fwhm` to determine the full-width half maximum for :func:`mne.time_frequency.morlet` (:gh:`11353` by `Britta Westner`_, `Daniel McCloy`_, and `Eric Larson`_) -- Check whether head radius (estimated from channel positions) is correct when reading EEGLAB data with :func:`~mne.io.read_raw_eeglab` and :func:`~mne.read_epochs_eeglab`. If head radius is not within likely values, warn informing about possible units mismatch and the new ``montage_units`` argument (:gh:`11283` by `Mikołaj Magnuski`_). -- Add support for a callable passed in ``combine`` for `mne.time_frequency.AverageTFR.plot` and `mne.time_frequency.AverageTFR.plot_joint` (:gh:`11329` by `Mathieu Scheltienne`_) - -Bugs -~~~~ -- Fix bug in :func:`mne.export.export_raw` when exporting raw to EDF with EDFLib when subject ``sex`` is missing (:gh:`11375` by :newcontrib:`Mark Alexander Henney`) -- Fix bugs in documentation of :func:`mne.time_frequency.tfr_array_multitaper` and related functions/methods (:gh:`11310` by :newcontrib:`Santeri Ruuskanen`) -- Fix bug to be able to customize legends when using :meth:`mne.viz.Brain.add_volume_labels` (:gh:`11289` by :newcontrib:`Mauricio Cespedes Tenorio`) -- Fix bug in documentation of :func:`mne.channels.make_dig_montage` (:gh:`11235` by :newcontrib:`Daniel Hasegan`) -- Add support for bad channel handling in :func:`mne.set_bipolar_reference` (:gh:`11245` by `Moritz Gerster`_, :newcontrib:`Dinara Issagaliyeva`, :newcontrib:`Jennifer Behnke`, :newcontrib:`Hakimeh Aslsardroud`, and :newcontrib:`Pavel Navratil`) -- Fix X coordinates of ear_left and ear_right in head outlines dict (:gh:`11255` by :newcontrib:`Tom Ma`) -- Add type checking for ``tmin`` and ``tmax`` arguments for ``crop`` (:gh:`11263` by :newcontrib:`Carlos de la Torre-Ortiz`). -- Fix bug where trying to setup projection without an EEG average reference projector would fail (:gh:`11351` by :newcontrib:`Enzo Altamiranda`) -- Fix bug where ``ica.reject_`` was not saved to disk, and the ``ica.reject_`` property was not inherited from ``Epochs`` when doing ``ICA.fit(epochs)`` (:gh:`11244` by `Eric Larson`_) -- Fix bug in automatic MESA detection for disabling advanced 3D options (:gh:`11271` by `Eric Larson`_) -- Fix bug in :func:`mne.head_to_mri` when ``unscale=True`` (:gh:`11303` by `Eric Larson`_) -- Fix bug in :meth:`set_montage('mgh60') ` and related functions where the ``'mgh60'`` sensor positions were incorrectly ordered (:gh:`11290` by `Eric Larson`_) -- Fix bug in the ``.compute_psd()`` methods where the number of unaggregated Welch segments was wrongly computed for some inputs, leading to an assertion error when computing the PSD (:gh:`11248` by `Daniel McCloy`_) -- Fix bug in the :func:`~mne.viz.plot_evoked_topo` and :meth:`~mne.Evoked.plot_topo`, where legend colors where shown incorrectly on newer matplotlib versions (:gh:`11258` by `Erkka Heinila`_) -- Fix bug where EEGLAB channel positions were read as meters, while they are commonly in millimeters, leading to head outlies of the size of one channel when plotting topomaps. Now ``montage_units`` argument has been added to :func:`~mne.io.read_raw_eeglab` and :func:`~mne.read_epochs_eeglab` to control in what units EEGLAB channel positions are read. The default is millimeters, ``'mm'`` (:gh:`11283` by `Mikołaj Magnuski`_) -- Fix bug where computing PSD with welch's method with more jobs than channels would fail (:gh:`11298` by `Mathieu Scheltienne`_) -- Fix bug where the default FFT length changed for spectrum plots (:gh:`11345` by `Daniel McCloy`_) -- Fix bug with :func:`mne.decoding.cross_val_multiscore` where progress bars were not displayed correctly (:gh:`11311` by `Eric Larson`_) -- Fix channel selection edge-cases in `~mne.preprocessing.ICA.find_bads_muscle` (:gh:`11300` by `Mathieu Scheltienne`_) -- Fix bug with :func:`mne.io.read_raw_curry` where a dot in the parent folders prevented files from being read (:gh:`11340` by `Eric Larson`_) -- Fix bug with :class:`mne.Report` with ``replace=True`` where the wrong content was replaced and ``section`` was not respected (:gh:`11318`, :gh:`11346` by `Eric Larson`_) -- Fix bug with unit conversion when setting reference MEG as the channel type in :meth:`mne.io.Raw.set_channel_types` and related methods (:gh:`11344` by `Eric Larson`_) -- Fix bug where reference MEG channels could not be plotted using :func:`mne.viz.plot_epochs_image` (:gh:`11344` by `Eric Larson`_) -- Fix bug where ``image_format='gif'`` was errantly documented as being supported by :class:`mne.Report`, it is now only supported in :meth:`mne.Report.add_image` (:gh:`11347` by `Eric Larson`_) -- Multitaper spectral estimation now uses periodic (rather than symmetric) taper windows. This also necessitated changing the default ``max_iter`` of our cross-spectral density functions from 150 to 250. (:gh:`11293` by `Daniel McCloy`_) -- Fix :meth:`mne.Epochs.plot_image` and :func:`mne.viz.plot_epochs_image` when using EMG signals (:gh:`11322` by `Alex Gramfort`_) -- Fix selection of ICA components in :func:`mne.viz.plot_ica_components` and :meth:`mne.preprocessing.ICA.plot_components` (:gh:`11369` by `Mathieu Scheltienne`_) -- Fix bug where a Freesurfer reconstruction run with both a T1 and a T2 caused :class:`mne.viz.Brain` not to initialize pial surfaces properly (:gh:`11361` by `Alex Rockhill`_) - -API changes -~~~~~~~~~~~ -- In :func:`mne.time_frequency.dpss_windows`, interpolating is deprecated (nowadays SciPy's computations are fast enough for large ``N`` without interpolation). This affects parameters ``interp_from`` and ``interp_kind``. Two new parameters of the underlying SciPy :func:`~scipy.signal.windows.dpss` function are also exposed: ``sym`` (for choosing symmetric vs. periodic windows) and ``norm`` (window normalization method). (:gh:`11293` by `Daniel McCloy`_) -- In :meth:`mne.decoding.CSP.plot_patterns`, :meth:`mne.decoding.CSP.plot_filters`, :meth:`mne.preprocessing.ICA.plot_components`, and :func:`mne.viz.plot_ica_components`, the parameters ``vmin`` and ``vmax`` are deprecated in favor of ``vlim``, for consistency with other topomap-plotting functions and methods (:gh:`11371` by `Daniel McCloy`_) -- In :meth:`mne.decoding.CSP.plot_patterns` and :meth:`mne.decoding.CSP.plot_filters` the ``title`` parameter is deprecated and will be removed in version 1.4 (:gh:`11371` by `Daniel McCloy`_) -- The APIs of :meth:`mne.preprocessing.ICA.plot_components` and :func:`mne.viz.plot_ica_components` gained new parameters ``show_names``, ``extrapolate``, ``border``, ``size``, ``cnorm``, ``cbar_fmt``, ``axes``, ``nrows``, ``ncols``, for consistency with other topomap-plotting functions and methods (:gh:`11371` by `Daniel McCloy`_) -- The APIs of :meth:`mne.decoding.CSP.plot_patterns` and :meth:`mne.decoding.CSP.plot_filters` gained new parameters ``extrapolate``, ``border``, ``cnorm``, ``axes``, ``nrows``, ``ncols``, for consistency with other topomap-plotting functions and methods (:gh:`11371` by `Daniel McCloy`_) - -Authors -~~~~~~~ - -* Alex Rockhill -* Alexandre Gramfort -* Britta Westner -* Carlos de la Torre+ -* Daniel Hasegan+ -* Daniel McCloy -* Dinara Issagaliyeva+ -* Enzo Altamiranda+ -* Eric Brayet+ -* Eric Larson -* Erkka Heinila -* Felix Klotzsche -* Hakimeh Aslsardroud+ -* Jennifer Behnke+ -* Jevri Hanna -* Lukas Hecker -* Mark Alexander Henney+ -* Mathieu Scheltienne -* Mauricio Cespedes+ -* Mikołaj Magnuski -* Moritz Gerster -* Omer Shubi+ -* Pavel Navratil+ -* Richard Höchenberger -* Santeri Ruuskanen+ -* Stefan Appelhoff -* Timon Merk -* Tom Ma+ -* Toomas Erik Anijärv+ diff --git a/doc/changes/dev.rst b/doc/changes/dev.rst new file mode 100644 index 00000000000..0e80d522b51 --- /dev/null +++ b/doc/changes/dev.rst @@ -0,0 +1,5 @@ +.. See doc/development/contributing.rst for description of how to add entries. + +.. _current: + +.. towncrier-draft-entries:: Version |release| (development) diff --git a/doc/changes/dev.rst.template b/doc/changes/dev.rst.template new file mode 100644 index 00000000000..0e80d522b51 --- /dev/null +++ b/doc/changes/dev.rst.template @@ -0,0 +1,5 @@ +.. See doc/development/contributing.rst for description of how to add entries. + +.. _current: + +.. towncrier-draft-entries:: Version |release| (development) diff --git a/doc/changes/dev/.gitignore b/doc/changes/dev/.gitignore new file mode 100644 index 00000000000..f935021a8f8 --- /dev/null +++ b/doc/changes/dev/.gitignore @@ -0,0 +1 @@ +!.gitignore diff --git a/doc/changes/dev/12847.bugfix.rst b/doc/changes/dev/12847.bugfix.rst new file mode 100644 index 00000000000..26301853842 --- /dev/null +++ b/doc/changes/dev/12847.bugfix.rst @@ -0,0 +1 @@ +Handle scenario where an Eyelink recording switched from binocular to monocular mode during a trial by `Scott Huberty`_ \ No newline at end of file diff --git a/doc/changes/dev/13068.bugfix.rst b/doc/changes/dev/13068.bugfix.rst new file mode 100644 index 00000000000..91795f1e730 --- /dev/null +++ b/doc/changes/dev/13068.bugfix.rst @@ -0,0 +1 @@ +Fixed ICA getting sources for concatenated raw instances, by :newcontrib:`Beige Jin`. \ No newline at end of file diff --git a/doc/changes/dev/13096.newfeature.rst b/doc/changes/dev/13096.newfeature.rst new file mode 100644 index 00000000000..7c5ad84a98a --- /dev/null +++ b/doc/changes/dev/13096.newfeature.rst @@ -0,0 +1 @@ +Support for preloading=False when reading .set files, by `Bruno Aristimunha`_. \ No newline at end of file diff --git a/doc/changes/dev/13109.bugfix.rst b/doc/changes/dev/13109.bugfix.rst new file mode 100644 index 00000000000..00dc5c4dfb9 --- /dev/null +++ b/doc/changes/dev/13109.bugfix.rst @@ -0,0 +1 @@ +Fix reading annotations with :func:`mne.read_annotations` from .csv files containing nanoseconds in times, and drop nanoseconds from times returned from :meth:`mne.Annotations.to_data_frame` and saved in .csv files by :meth:`mne.Annotations.save`, by `Thomas Binns`_. \ No newline at end of file diff --git a/doc/changes/dev/13112.bugfix.rst b/doc/changes/dev/13112.bugfix.rst new file mode 100644 index 00000000000..1bffca6cdef --- /dev/null +++ b/doc/changes/dev/13112.bugfix.rst @@ -0,0 +1 @@ +Fix bug with :func:`mne.io.read_raw_egi` where ``info["dev_head_t"]`` was an identity matrix instead of ``None``, by `Eric Larson`_. diff --git a/doc/changes/dev/13156.newfeature.rst b/doc/changes/dev/13156.newfeature.rst new file mode 100644 index 00000000000..067a3cd4481 --- /dev/null +++ b/doc/changes/dev/13156.newfeature.rst @@ -0,0 +1 @@ +Added support for file like objects in :func:`read_raw_bdf `, :func:`read_raw_edf ` and :func:`read_raw_gdf `, by :newcontrib:`Santi Martínez`. \ No newline at end of file diff --git a/doc/changes/dev/13176.dependency.rst b/doc/changes/dev/13176.dependency.rst new file mode 100644 index 00000000000..713ce3ba502 --- /dev/null +++ b/doc/changes/dev/13176.dependency.rst @@ -0,0 +1 @@ +New reader for Neuroscan Curry files, using the curry-python-reader module, by `Dominik Welke`_. \ No newline at end of file diff --git a/doc/changes/dev/13176.newfeature.rst b/doc/changes/dev/13176.newfeature.rst new file mode 100644 index 00000000000..dd483b214fc --- /dev/null +++ b/doc/changes/dev/13176.newfeature.rst @@ -0,0 +1 @@ +Read impedances and montage from Neuroscan Curry files, by `Dominik Welke`_. \ No newline at end of file diff --git a/doc/changes/dev/13251.bugfix.rst b/doc/changes/dev/13251.bugfix.rst new file mode 100644 index 00000000000..506d2c4b8b4 --- /dev/null +++ b/doc/changes/dev/13251.bugfix.rst @@ -0,0 +1 @@ +Read Nihon Kohden annotation file accurately (using sublog parsing), by `Tom Ma`_. diff --git a/doc/changes/dev/13259.newfeature.rst b/doc/changes/dev/13259.newfeature.rst new file mode 100644 index 00000000000..d510015e26f --- /dev/null +++ b/doc/changes/dev/13259.newfeature.rst @@ -0,0 +1,3 @@ +Implement GEDTransformer superclass that generalizes +:class:`mne.decoding.CSP`, :class:`mne.decoding.SPoC`, :class:`mne.decoding.XdawnTransformer`, +:class:`mne.decoding.SSD` and fix related bugs and inconsistencies, by `Gennadiy Belonosov`_. \ No newline at end of file diff --git a/doc/changes/dev/13307.newfeature.rst b/doc/changes/dev/13307.newfeature.rst new file mode 100644 index 00000000000..5917c49531f --- /dev/null +++ b/doc/changes/dev/13307.newfeature.rst @@ -0,0 +1 @@ +Added ``on_inside="raise"`` parameter to :func:`mne.make_forward_solution` and :func:`mne.make_forward_dipole` to control behavior when MEG sensors are inside the outer skin surface. This is useful for forward solutions that are computed with sensors just inside the outer skin surface (e.g., with some OPM coregistrations), by `Eric Larson`_. diff --git a/doc/changes/dev/13332.newfeature.rst b/doc/changes/dev/13332.newfeature.rst new file mode 100644 index 00000000000..018dfbb9094 --- /dev/null +++ b/doc/changes/dev/13332.newfeature.rst @@ -0,0 +1,4 @@ +Implement :class:`mne.decoding.SpatialFilter` class returned by :func:`mne.decoding.get_spatial_filter_from_estimator` for +visualisation of filters and patterns for :class:`mne.decoding.LinearModel` +and additionally eigenvalues for GED-based transformers such as +:class:`mne.decoding.XdawnTransformer`, :class:`mne.decoding.CSP`, by `Gennadiy Belonosov`_. \ No newline at end of file diff --git a/doc/changes/dev/13341.bugfix.rst b/doc/changes/dev/13341.bugfix.rst new file mode 100644 index 00000000000..feb489e9c85 --- /dev/null +++ b/doc/changes/dev/13341.bugfix.rst @@ -0,0 +1 @@ +Fix bug with :func:`mne.open_report` not working with old saved files, by `Eric Larson`_. \ No newline at end of file diff --git a/doc/changes/dev/13350.apichange.rst b/doc/changes/dev/13350.apichange.rst new file mode 100644 index 00000000000..b508971d10b --- /dev/null +++ b/doc/changes/dev/13350.apichange.rst @@ -0,0 +1 @@ +Add ``on_few_samples`` parameter to :func:`mne.compute_covariance` and :func:`mne.compute_raw_covariance` for controlling behavior when there are fewer samples than channels, which can lead to inaccurate covariance estimates, by :newcontrib:`Emmanuel Ferdman`. \ No newline at end of file diff --git a/doc/changes/dev/13354.bugfix.rst b/doc/changes/dev/13354.bugfix.rst new file mode 100644 index 00000000000..c2bbbd337fb --- /dev/null +++ b/doc/changes/dev/13354.bugfix.rst @@ -0,0 +1 @@ +Restore ``clipping=3`` in :meth:`~mne.io.Raw.plot` (which was mistakenly changed to ``1.5``) to reduce excessive clipping of raw data, by `Clemens Brunner`_. \ No newline at end of file diff --git a/doc/changes/dev/13357.bugfix.rst b/doc/changes/dev/13357.bugfix.rst new file mode 100644 index 00000000000..f2d784080b5 --- /dev/null +++ b/doc/changes/dev/13357.bugfix.rst @@ -0,0 +1 @@ +Make :func:`~mne.io.read_raw_eyelink` work with ASCII files collected with Eye Event Mode set to "HREF" by `Scott Huberty`_. \ No newline at end of file diff --git a/doc/changes/dev/13361.bugfix.rst b/doc/changes/dev/13361.bugfix.rst new file mode 100644 index 00000000000..7038fc21346 --- /dev/null +++ b/doc/changes/dev/13361.bugfix.rst @@ -0,0 +1,8 @@ +``model`` parameter of :class:`mne.decoding.LinearModel` +will not be modified, use ``model_`` attribute to access the fitted model. +To be compatible with all MNE-Python versions you can use +``getattr(clf, "model_", getattr(clf, "model"))`` +The provided ``model`` is expected to be a supervised predictor, +i.e. classifier or regressor (or :class:`sklearn.multiclass.OneVsRestClassifier`), +otherwise an error will be raised. +by `Gennadiy Belonosov`_. \ No newline at end of file diff --git a/doc/changes/dev/13371.bugfix.rst b/doc/changes/dev/13371.bugfix.rst new file mode 100644 index 00000000000..cb2e7dc5926 --- /dev/null +++ b/doc/changes/dev/13371.bugfix.rst @@ -0,0 +1 @@ +Fix the check in :func:`mne.make_forward_solution` that all MEG sensors are outside a spherical BEM model, by `Marijn van Vliet`_ diff --git a/doc/changes/dev/13373.other.rst b/doc/changes/dev/13373.other.rst new file mode 100644 index 00000000000..523b6df8fd1 --- /dev/null +++ b/doc/changes/dev/13373.other.rst @@ -0,0 +1 @@ +Changed "enhancement" to "new feature" in the contributing guide as enhancement is not a recognized change type, by `Shristi Baral`_ diff --git a/doc/changes/dev/13375.bugfix.rst b/doc/changes/dev/13375.bugfix.rst new file mode 100644 index 00000000000..93536b5a01c --- /dev/null +++ b/doc/changes/dev/13375.bugfix.rst @@ -0,0 +1 @@ +Fix bug with :func:`mne.grand_average` not working with :class:`mne.time_frequency.Spectrum` objects, by `Thomas Binns`_. \ No newline at end of file diff --git a/doc/changes/dev/13376.bugfix.rst b/doc/changes/dev/13376.bugfix.rst new file mode 100644 index 00000000000..d796d8cb382 --- /dev/null +++ b/doc/changes/dev/13376.bugfix.rst @@ -0,0 +1 @@ +Ensure auto-computed plot scalings are always non-zero, by `Daniel McCloy`_. \ No newline at end of file diff --git a/doc/changes/dev/13391.bugfix.rst b/doc/changes/dev/13391.bugfix.rst new file mode 100644 index 00000000000..50dc1a6e191 --- /dev/null +++ b/doc/changes/dev/13391.bugfix.rst @@ -0,0 +1 @@ +Fix :func:`~mne.viz.plot_evoked_joint` docstring to correctly document ``exclude`` parameter default as ``'bads'`` instead of ``None``, by :newcontrib:`Tharupahan Jayawardana`. \ No newline at end of file diff --git a/doc/changes/dev/13393.bugfix.rst b/doc/changes/dev/13393.bugfix.rst new file mode 100644 index 00000000000..ed6bcd10942 --- /dev/null +++ b/doc/changes/dev/13393.bugfix.rst @@ -0,0 +1 @@ +Make decoding classes sklearn-compliant, by `Gennadiy Belonosov`_. \ No newline at end of file diff --git a/doc/changes/dev/13395.bugfix.rst b/doc/changes/dev/13395.bugfix.rst new file mode 100644 index 00000000000..186e6305132 --- /dev/null +++ b/doc/changes/dev/13395.bugfix.rst @@ -0,0 +1 @@ +Fix loading problems caused by attempting to iterate floats in :func:`~mne.read_epochs_eeglab` for specific EEGLab data, by :newcontrib:`Emrecan Çelik`. \ No newline at end of file diff --git a/doc/changes/dev/13398.apichange.rst b/doc/changes/dev/13398.apichange.rst new file mode 100644 index 00000000000..8b8adc0362a --- /dev/null +++ b/doc/changes/dev/13398.apichange.rst @@ -0,0 +1 @@ +The default for :func:`mne.make_field_map` will change to ``"auto"`` in MNE-Python 1.12 (from ``(0., 0., 0.04)``), changes by :newcontrib:`Paul Anders`. diff --git a/doc/changes/dev/13400.newfeature.rst b/doc/changes/dev/13400.newfeature.rst new file mode 100644 index 00000000000..77ae89cf1df --- /dev/null +++ b/doc/changes/dev/13400.newfeature.rst @@ -0,0 +1 @@ +Add more options for the ``sphere`` parameter of :func:`mne.viz.plot_sensors`, by `Marijn van Vliet`_ diff --git a/doc/changes/dev/13407.bugfix.rst b/doc/changes/dev/13407.bugfix.rst new file mode 100644 index 00000000000..150d0bbfe3f --- /dev/null +++ b/doc/changes/dev/13407.bugfix.rst @@ -0,0 +1 @@ +Fix bug with :func:`mne.make_forward_solution` where sources were not checked to make sure they're inside the inner skull for spherical BEMs, by `Eric Larson`_. diff --git a/doc/changes/dev/13428.bugfix.rst b/doc/changes/dev/13428.bugfix.rst new file mode 100644 index 00000000000..513bbcf697b --- /dev/null +++ b/doc/changes/dev/13428.bugfix.rst @@ -0,0 +1 @@ +Preserve event-to-epoch mapping when exporting EEGLAB .set files by `Scott Huberty`_ \ No newline at end of file diff --git a/doc/changes/dev/13435.newfeature.rst b/doc/changes/dev/13435.newfeature.rst new file mode 100644 index 00000000000..1aed60dc123 --- /dev/null +++ b/doc/changes/dev/13435.newfeature.rst @@ -0,0 +1 @@ +Add support for BDF export in :func:`mne.export.export_raw`, by `Clemens Brunner`_ \ No newline at end of file diff --git a/doc/changes/dev/13440.bugfix.rst b/doc/changes/dev/13440.bugfix.rst new file mode 100644 index 00000000000..61062ae9d8f --- /dev/null +++ b/doc/changes/dev/13440.bugfix.rst @@ -0,0 +1 @@ +Fix and update osf.io links to new format, by :newcontrib:`Johannes Herforth`. diff --git a/doc/changes/dev/13445.newfeature.rst b/doc/changes/dev/13445.newfeature.rst new file mode 100644 index 00000000000..3b915207222 --- /dev/null +++ b/doc/changes/dev/13445.newfeature.rst @@ -0,0 +1 @@ +Improve precision of fiducial clicking in ``mne coreg``, by `Eric Larson`_. diff --git a/doc/changes/dev/13448.newfeature.rst b/doc/changes/dev/13448.newfeature.rst new file mode 100644 index 00000000000..d329015520f --- /dev/null +++ b/doc/changes/dev/13448.newfeature.rst @@ -0,0 +1 @@ +Add support for Nihon Kohden EEG-1200A V01.00, by `Eric Larson`_. \ No newline at end of file diff --git a/doc/changes/dev/13451.other.rst b/doc/changes/dev/13451.other.rst new file mode 100644 index 00000000000..6a2e1247501 --- /dev/null +++ b/doc/changes/dev/13451.other.rst @@ -0,0 +1 @@ +Add a SPEC0-like policy in which minimum-supported versions for select dependencies are set to the latest minor release that was available 2 years prior, by `Thomas Binns`_ \ No newline at end of file diff --git a/doc/changes/dev/13452.other.rst b/doc/changes/dev/13452.other.rst new file mode 100644 index 00000000000..15dedc0e243 --- /dev/null +++ b/doc/changes/dev/13452.other.rst @@ -0,0 +1,3 @@ +Removed development dependencies from user-visible "extras"; they're now dependency +groups only visible to developers (and can be installed for example via +``pip install --group dev`` with pip version 25.1 or later), by `Richard Höchenberger`_ diff --git a/doc/changes/dev/13456.newfeature.rst b/doc/changes/dev/13456.newfeature.rst new file mode 100644 index 00000000000..a69172cf380 --- /dev/null +++ b/doc/changes/dev/13456.newfeature.rst @@ -0,0 +1 @@ +The ``rename_channels`` method now has an ``on_missing`` parameter to control behavior on channel mismatch, by `Stefan Appelhoff`_. diff --git a/doc/changes/dev/13458.apichange.rst b/doc/changes/dev/13458.apichange.rst new file mode 100644 index 00000000000..f2b9ec5a007 --- /dev/null +++ b/doc/changes/dev/13458.apichange.rst @@ -0,0 +1 @@ +Add ``encoding`` parameter to :func:`mne.io.read_raw_nihon` for better handling of annotation decoding, by `Tom Ma`_. diff --git a/doc/changes/dev/13460.newfeature.rst b/doc/changes/dev/13460.newfeature.rst new file mode 100644 index 00000000000..cdbf9edd0f9 --- /dev/null +++ b/doc/changes/dev/13460.newfeature.rst @@ -0,0 +1 @@ +Added the ``annotation_regex`` parameter to :func:`mne.viz.plot_raw` and :func:`mne.viz.plot_ica_sources`, allowing automatic hiding of annotations that do not match the specified regular expression. The same functionality is available via :meth:`mne.io.Raw.plot` and :meth:`mne.preprocessing.ICA.plot_sources`. Contributed by `Johannes Herforth`_. diff --git a/doc/changes/dev/13463.newfeature.rst b/doc/changes/dev/13463.newfeature.rst new file mode 100644 index 00000000000..4e9bb0f3a7f --- /dev/null +++ b/doc/changes/dev/13463.newfeature.rst @@ -0,0 +1 @@ +The ``combine_channels`` method now has an ``on_missing`` parameter to control behavior on missing event ids, by :newcontrib:`Michael Straube`. diff --git a/doc/changes/dev/13466.bugfix.rst b/doc/changes/dev/13466.bugfix.rst new file mode 100644 index 00000000000..6255c69b1a4 --- /dev/null +++ b/doc/changes/dev/13466.bugfix.rst @@ -0,0 +1 @@ +Fix bug with :func:`mne.add_reference_channels` not working correctly when passing more than one channel name, by `Michael Straube`_. diff --git a/doc/changes/dev/13468.bugfix.rst b/doc/changes/dev/13468.bugfix.rst new file mode 100644 index 00000000000..162ee90ff6a --- /dev/null +++ b/doc/changes/dev/13468.bugfix.rst @@ -0,0 +1 @@ +Correctly set the calibration factor in Nihon Kohden reader (which affects channel amplitudes), by `Tom Ma`_. diff --git a/doc/changes/dev/13469.bugfix.rst b/doc/changes/dev/13469.bugfix.rst new file mode 100644 index 00000000000..2ac336a2c27 --- /dev/null +++ b/doc/changes/dev/13469.bugfix.rst @@ -0,0 +1 @@ +Make :func:`mne.preprocessing.eyetracking.read_eyelink_calibration` robust to files with blank lines, by `Scott Huberty`_. diff --git a/doc/changes/dev/13470.bugfix.rst b/doc/changes/dev/13470.bugfix.rst new file mode 100644 index 00000000000..a17c377e45e --- /dev/null +++ b/doc/changes/dev/13470.bugfix.rst @@ -0,0 +1 @@ +Fix bug with :func:`mne.viz.plot_topomap` that caused unexpected channel name plotting when using a mask, by `Michael Straube`_. \ No newline at end of file diff --git a/doc/changes/dev/13478.bugfix.rst b/doc/changes/dev/13478.bugfix.rst new file mode 100644 index 00000000000..426f0ba5a91 --- /dev/null +++ b/doc/changes/dev/13478.bugfix.rst @@ -0,0 +1 @@ +Fix bug with :func:`mne.preprocessing.ICA.apply` not working when the ICA instance was fitted including marked bad channels, by `Michael Straube`_. \ No newline at end of file diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc deleted file mode 100644 index 2a4860065b4..00000000000 --- a/doc/changes/latest.inc +++ /dev/null @@ -1,71 +0,0 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc - -.. _current: - -Current (1.4.dev0) ------------------- - -Enhancements -~~~~~~~~~~~~ -- Add ``:unit:`` Sphinx directive to enable use of uniform non-breaking spaces throughout the documentation (:gh:`11469` by :newcontrib:`Sawradip Saha`) -- Adjusted the algorithm used in :class:`mne.decoding.SSD` to support non-full rank data (:gh:`11458` by :newcontrib:`Thomas Binns`) -- Changed suggested type for ``ch_groups``` in `mne.viz.plot_sensors` from array to list of list(s) (arrays are still supported). (:gh:`11465` by `Hyonyoung Shin`_) -- Add support for UCL/FIL OPM data using :func:`mne.io.read_raw_fil` (:gh:`11366` by :newcontrib:`George O'Neill` and `Robert Seymour`_) -- Forward argument ``axes`` from `mne.viz.plot_sensors` to `mne.channels.DigMontage.plot` (:gh:`11470` by :newcontrib:`Jan Ebert` and `Mathieu Scheltienne`_) -- Added ability to read stimulus durations from SNIRF files when using :func:`mne.io.read_raw_snirf` (:gh:`11397` by `Robert Luke`_) -- Add :meth:`mne.Info.save` to save an :class:`mne.Info` object to a fif file (:gh:`11401` by `Alex Rockhill`_) -- Improved error message when downloads are corrupted for :func:`mne.datasets.sample.data_path` and related functions (:gh:`11407` by `Eric Larson`_) -- Add support for ``skip_by_annotation`` in :func:`mne.io.Raw.notch_filter` (:gh:`11388` by `Mainak Jas`_) -- Add support for ``output='complex'`` to :func:`mne.time_frequency.psd_array_welch` and when using ``method='welch'`` with :meth:`mne.Epochs.compute_psd` (:gh:`11556` by `Eric Larson`_) -- Slightly adjusted the window title for :func:`mne.Epochs.plot` (:gh:`11419` by `Richard Höchenberger`_ and `Daniel McCloy`_) -- Add :func:`mne.count_events` to count unique event types in a given event array (:gh:`11430` by `Clemens Brunner`_) -- Add a video to :ref:`tut-freesurfer-mne` of a brain inflating from the pial surface to aid in understanding the inflated brain (:gh:`11440` by `Alex Rockhill`_) -- Add automatic projection of sEEG contact onto the inflated surface for :meth:`mne.viz.Brain.add_sensors` (:gh:`11436` by `Alex Rockhill`_) -- Allow an image with intracranial electrode contacts (e.g. computed tomography) to be used without the freesurfer recon-all surfaces to locate contacts so that it doesn't have to be downsampled to freesurfer dimensions (for microelectrodes) and show an example :ref:`ex-ieeg-micro` with :func:`mne.transforms.apply_volume_registration_points` added to aid this transform (:gh:`11567` by `Alex Rockhill`_) -- Use new :meth:`dipy.workflows.align.DiffeomorphicMap.transform_points` to transform a montage of intracranial contacts more efficiently (:gh:`11572` by `Alex Rockhill`_) - -Bugs -~~~~ -- Fix :func:`mne.time_frequency.psd_array_multitaper` docstring where argument ``bandwidth`` incorrectly reported argument as half-bandwidth and gave wrong explanation of default value (:gh:`11479` by :newcontrib: `Tom Stone`_) -- Fix bug where installation of a package depending on ``mne`` will error when done in an environment where ``setuptools`` is not present (:gh:`11454` by :newcontrib: `Arne Pelzer`_) -- Fix bug where :func:`mne.preprocessing.regress_artifact` and :class:`mne.preprocessing.EOGRegression` incorrectly tracked ``picks`` (:gh:`11366` by `Eric Larson`_) -- Fix bug where channel names were not properly sanitized in :func:`mne.write_evokeds` and related functions (:gh:`11399` by `Eric Larson`_) -- Fix bug where splash screen would not always disappear (:gh:`11398` by `Eric Larson`_) -- Fix bug where having a different combination of volumes loaded into ``freeview`` caused different affines to be returned by :func:`mne.read_lta` for the same Linear Transform Array (LTA) (:gh:`11402` by `Alex Rockhill`_) -- Fix how :class:`mne.channels.DigMontage` is set when using :func:`mne.gui.locate_ieeg` so that :func:`mne.Info.get_montage` works and does not return ``None`` (:gh:`11421` by `Alex Rockhill`_) -- Fix :func:`mne.io.read_raw_edf` when reading EDF data with different sampling rates and a mix of data channels when using ``infer_types=True`` (:gh:`11427` by `Alex Gramfort`_) -- Fix how :class:`mne.channels.DigMontage` is set when using :func:`mne.preprocessing.ieeg.project_sensors_onto_brain` so that :func:`mne.Info.get_montage` works and does not return ``None`` (:gh:`11436` by `Alex Rockhill`_) -- Fix configuration folder discovery on Windows, which would fail in certain edge cases; and produce a helpful error message if discovery still fails (:gh:`11441` by `Richard Höchenberger`_) -- Make :class:`~mne.decoding.SlidingEstimator` and :class:`~mne.decoding.GeneralizingEstimator` respect the ``verbose`` argument. Now with ``verbose=False``, the progress bar is not shown during fitting, scoring, etc. (:gh:`11450` by `Mikołaj Magnuski`_) -- Fix bug with :func:`mne.gui.locate_ieeg` where Freesurfer ``?h.pial.T1`` was not recognized and suppress excess logging (:gh:`11489` by `Alex Rockhill`_) -- All functions accepting paths can now correctly handle :class:`~pathlib.Path` as input. Historically, we expected strings (instead of "proper" path objects), and only added :class:`~pathlib.Path` support in a few select places, leading to inconsistent behavior. (:gh:`11473` and :gh:`11499` by `Mathieu Scheltienne`_) -- Fix visualization dialog compatibility with matplotlib 3.7 (:gh:`11409` by `Daniel McCloy`_ and `Eric Larson`_) -- Expand tilde (user directory) in config keys (:gh:`11537` by `Clemens Brunner`_) -- Fix bug in :func:`mne.preprocessing.compute_maxwell_basis` where using ``int_order=0`` would raise an error (:gh:`11562` by `Eric Larson`_) -- In the legacy function :func:`mne.viz.plot_epochs_psd_topomap`, the parameter ``names`` now works again (:gh:`11563` by `Daniel McCloy`_) -- Fix :func:`mne.io.read_raw` for file names containing multiple dots (:gh:`11521` by `Clemens Brunner`_) -- Fix bug in :func:`mne.export.export_raw` when exporting to EDF with a physical range set smaller than the data range (:gh:`11569` by `Mathieu Scheltienne`_) -- Fix bug in :func:`mne.concatenate_raws` where two raws could not be merged if the order of the bad channel lists did not match (:gh:`11502` by `Moritz Gerster`_) - - -API changes -~~~~~~~~~~~ -- Deprecate arguments ``kind`` and ``path`` from :func:`mne.channels.read_layout` in favor of a common argument ``fname`` (:gh:`11500` by `Mathieu Scheltienne`_) -- Change ``aligned_ct`` positional argument in :func:`mne.gui.locate_ieeg` to ``base_image`` to reflect that this can now be used with unaligned images (:gh:`11567` by `Alex Rockhill`_) -- ``mne.warp_montage_volume`` was deprecated in favor of :func:`mne.preprocessing.ieeg.warp_montage` (acts directly on points instead of using an intermediate volume) and :func:`mne.preprocessing.ieeg.make_montage_volume` (which makes a volume of ieeg contact locations which can still be useful) (:gh:`11572` by `Alex Rockhill`_) diff --git a/doc/changes/latest.inc.template b/doc/changes/latest.inc.template deleted file mode 100644 index 97287e9076c..00000000000 --- a/doc/changes/latest.inc.template +++ /dev/null @@ -1,34 +0,0 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc - -.. _current: - -Current (1.1.dev0) ------------------- - -Enhancements -~~~~~~~~~~~~ -- None yet - -Bugs -~~~~ -- None yet - -API changes -~~~~~~~~~~~ -- None yet diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 72dbdfd4e36..709a7ebd623 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -1,543 +1,351 @@ -.. _Abram Hindle: http://softwareprocess.es - -.. _Adam Li: http://github.com/adam2392 - +.. _Aaron Earle-Richardson: https://github.com/Aaronearlerichardson +.. _Abram Hindle: https://softwareprocess.es +.. _Adam Li: https://github.com/adam2392 .. _Adeline Fecker: https://github.com/adelinefecker - .. _Adina Wagner: https://github.com/adswa - .. _Adonay Nunes: https://github.com/AdoNunes - .. _Alan Leggitt: https://github.com/leggitta - .. _Alejandro Weinstein: http://ocam.cl - .. _Alessandro Tonin: https://www.linkedin.com/in/alessandro-tonin-7892b046 - .. _Alex Ciok: https://github.com/alexCiok - -.. _Alex Gramfort: http://alexandre.gramfort.net - +.. _Alex Gramfort: https://alexandre.gramfort.net +.. _Alex Kiefer: https://home.alexk101.dev +.. _Alex Lepauvre: https://github.com/AlexLepauvre .. _Alex Rockhill: https://github.com/alexrockhill/ - .. _Alexander Rudiuk: https://github.com/ARudiuk - -.. _Alexandre Barachant: http://alexandre.barachant.org - -.. _Andrea Brovelli: https://andrea-brovelli.net - +.. _Alexandre Barachant: https://alexandre.barachant.org +.. _Andrea Brovelli: https://brovelli.github.io/ .. _Andreas Hojlund: https://github.com/ahoejlund - .. _Andres Rodriguez: https://github.com/infinitejest/ - .. _Andrew Dykstra: https://github.com/adykstra - +.. _Andrew Gilbert: https://github.com/adgilbert .. _Andrew Quinn: https://github.com/ajquinn - .. _Aniket Pradhan: https://github.com/Aniket-Pradhan - .. _Anna Padee: https://github.com/apadee/ - -.. _Annalisa Pascarella: https://www.dima.unige.it/~pascarel/html/cv.html - +.. _Annalisa Pascarella: https://github.com/annapasca .. _Anne-Sophie Dubarry: https://github.com/annesodub - +.. _Antoine Collas: https://www.antoinecollas.fr .. _Antoine Gauthier: https://github.com/Okamille - .. _Antti Rantala: https://github.com/Odingod - .. _Apoorva Karekal: https://github.com/apoorva6262 - .. _Archit Singhal: https://github.com/architsinghal-mriirs - .. _Arne Pelzer: https://github.com/aplzr - .. _Ashley Drew: https://github.com/ashdrew - .. _Asish Panda: https://github.com/kaichogami - .. _Austin Hurst: https://github.com/a-hurst - +.. _Beige Jin: https://github.com/BeiGeJin .. _Ben Beasley: https://github.com/musicinmybrain - +.. _Bradley Voytek: https://github.com/voytek .. _Britta Westner: https://britta-wstnr.github.io - +.. _Bruno Aristimunha: https://bruaristimunha.github.io .. _Bruno Nicenboim: https://bnicenboim.github.io - +.. _btkcodedev: https://github.com/btkcodedev .. _buildqa: https://github.com/buildqa - +.. _Carina Forster: https://github.com/CarinaFo .. _Carlos de la Torre-Ortiz: https://ctorre.me - -.. _Carina Forster: https://github.com/carinafo - .. _Cathy Nangini: https://github.com/KatiRG - .. _Chetan Gohil: https://github.com/cgohil8 - .. _Chris Bailey: https://github.com/cjayb - .. _Chris Holdgraf: https://chrisholdgraf.com - -.. _Chris Mullins: http://crmullins.com - +.. _Chris Mullins: https://crmullins.com .. _Christian Brodbeck: https://github.com/christianbrodbeck - .. _Christian O'Reilly: https://github.com/christian-oreilly - .. _Christopher Dinh: https://github.com/chdinh - .. _Chun-Hui Li: https://github.com/iamsc - .. _Clemens Brunner: https://github.com/cbrnr - .. _Cora Kim: https://github.com/kimcoco - .. _Cristóbal Moënne-Loccoz: https://github.com/cmmoenne - .. _Dan Wakeman: https://github.com/dgwakeman - -.. _Daniel Carlström Schad: https://github.com/Dod12 - +.. _Daniel Carlström Schad: https://github.com/DanielCSchad .. _Daniel Hasegan: https://daniel.hasegan.com - -.. _Daniel McCloy: http://dan.mccloy.info - +.. _Daniel McCloy: https://dan.mccloy.info .. _Daniel Strohmeier: https://github.com/joewalter - +.. _Daniel Tse: https://github.com/Xiezhibin .. _Darin Erat Sleiter: https://github.com/dsleiter - .. _David Haslacher: https://github.com/davidhaslacher - .. _David Julien: https://github.com/Swy7ch - .. _David Sabbagh: https://github.com/DavidSabbagh - .. _Demetres Kostas: https://github.com/kostasde - -.. _Denis Engemann: http://denis-engemann.de - +.. _Denis Engemann: https://denis-engemann.de .. _Dinara Issagaliyeva: https://github.com/dissagaliyeva - +.. _Diptyajit Das: https://github.com/dasdiptyajit .. _Dirk Gütlin: https://github.com/DiGyt - .. _Dmitrii Altukhov: https://github.com/dmalt - .. _Dominik Welke: https://github.com/dominikwelke/ - .. _Dominik Wetzel: https://github.com/schmetzler - .. _Dominique Makowski: https://dominiquemakowski.github.io/ - .. _Eberhard Eich: https://github.com/ebeich - .. _Eduard Ort: https://github.com/eort - -.. _Emily Stephen: http://github.com/emilyps14 - +.. _Emily Stephen: https://github.com/emilyps14 +.. _Emma Bailey: https://www.cbs.mpg.de/employees/bailey +.. _Emmanuel Ferdman: https://github.com/emmanuel-ferdman +.. _Emrecan Çelik: https://github.com/emrecncelik .. _Enrico Varano: https://github.com/enricovara/ - .. _Enzo Altamiranda: https://www.linkedin.com/in/enzoalt - -.. _Eric Larson: http://larsoner.com - +.. _Eric Larson: https://larsoner.com .. _Erica Peterson: https://github.com/nordme - .. _Erkka Heinila: https://github.com/Teekuningas - .. _Etienne de Montalivet: https://github.com/etiennedemontalivet - .. _Evan Hathaway: https://github.com/ephathaway - +.. _Evgenii Kalenkovich: https://github.com/kalenkovich .. _Evgeny Goldstein: https://github.com/evgenygoldstein - .. _Ezequiel Mikulan: https://github.com/ezemikulan - .. _Ezequiel Mikulan: https://github.com/ezemikulan - .. _Fahimeh Mamashli: https://github.com/fmamashli - +.. _Farzin Negahbani: https://github.com/Farzin-Negahbani .. _Federico Raimondo: https://github.com/fraimondo - .. _Federico Zamberlan: https://github.com/fzamberlan - .. _Felix Klotzsche: https://github.com/eioe - .. _Felix Raimundo: https://github.com/gamazeps - +.. _Florian Hofer: https://github.com/hofaflo +.. _Florin Pop: https://github.com/florin-pop .. _Frederik Weber: https://github.com/Frederik-D-Weber - .. _Fu-Te Wong: https://github.com/zuxfoucault - +.. _Gennadiy Belonosov: https://github.com/Genuster .. _Geoff Brookshire: https://github.com/gbrookshire - .. _George O'Neill: https://georgeoneill.github.io - -.. _Guillaume Dumas: http://www.extrospection.eu - +.. _Gonzalo Reina: https://orcid.org/0000-0003-4219-2306 +.. _Guillaume Dumas: https://mila.quebec/en/person/guillaume-dumas .. _Guillaume Favelier: https://github.com/GuillaumeFavelier - .. _Hakimeh Aslsardroud: https://www.researchgate.net/profile/Hakimeh-Pourakbari - -.. _Hamid Maymandi: https://github.com/HamidMandi - .. _Hakimeh Pourakbari: https://github.com/Hpakbari - -.. _Hari Bharadwaj: http://www.haribharadwaj.com - +.. _Hamid Maymandi: https://github.com/HamidMandi +.. _Hamza Abdelhedi: https://github.com/BabaSanfour +.. _Hari Bharadwaj: https://github.com/haribharadwaj +.. _Harrison Ritz: https://github.com/harrisonritz +.. _Hasrat Ali Arzoo: https://github.com/hasrat17 .. _Henrich Kolkhorst: https://github.com/hekolk - -.. _Hongjiang Ye: https://github.com/rubyyhj - +.. _Hongjiang Ye: https://github.com/ye-hongjiang .. _Hubert Banville: https://github.com/hubertjb - -.. _Hüseyin Orkun Elmas: https://github.com/HuseyinOrkun - .. _Hyonyoung Shin: https://github.com/mcvain - +.. _Hüseyin Orkun Elmas: https://github.com/HuseyinOrkun +.. _Ilian Azz: https://github.com/ilianAZZ .. _Ilias Machairas: https://github.com/JungleHippo - +.. _Ivan Skelin: https://github.com/ivan-skelin +.. _Ivan Zubarev: https://github.com/zubara .. _Ivana Kojcic: https://github.com/ikojcic - +.. _Ivo de Jong: https://github.com/ivopascal .. _Jaakko Leppakangas: https://github.com/jaeilepp - .. _Jack Zhang: https://github.com/jackz314 - +.. _Jacob Phelan: https://github.com/JacPhe +.. _Jacob Woessner: https://github.com/withmywoessner .. _Jair Montoya Martinez: https://github.com/jmontoyam - .. _Jan Ebert: https://www.jan-ebert.com/ - .. _Jan Sedivy: https://github.com/honzaseda - .. _Jan Sosulski: https://jan-sosulski.de - .. _Jan Zerfowski: https://github.com/jzerfowski - +.. _Jan-Mathijs Schoeffelen: https://github.com/schoffelen .. _Jasper van den Bosch: https://github.com/ilogue - .. _Jean-Baptiste Schiratti: https://github.com/jbschiratti - -.. _Jean-Remi King: https://github.com/kingjr - +.. _Jean-Rémi King: https://github.com/kingjr .. _Jeff Stout: https://megcore.nih.gov/index.php/Staff - .. _Jennifer Behnke: https://github.com/JKBehnke - .. _Jeroen Van Der Donckt: https://github.com/jvdd - .. _Jesper Duemose Nielsen: https://github.com/jdue - .. _Jevri Hanna: https://github.com/jshanna100 - .. _jeythekey: https://github.com/jeythekey - .. _Joan Massich: https://github.com/massich - .. _Johann Benerradi: https://github.com/HanBnrd - +.. _Johannes Herforth: https://herforth.net .. _Johannes Niediek: https://github.com/jniediek - .. _John Samuelsson: https://github.com/johnsam7 - .. _John Veillette: https://psychology.uchicago.edu/directory/john-veillette - .. _Jon Houck: https://www.mrn.org/people/jon-m.-houck/principal-investigators - .. _Jona Sassenhagen: https://github.com/jona-sassenhagen - .. _Jonathan Kuziek: https://github.com/kuziekj - .. _Jordan Drew: https://github.com/jadrew43 - .. _Jose Alanis: https://github.com/JoseAlanis - .. _Joshua Bear: https://github.com/joshbear - +.. _Joshua Calder-Travis: https://github.com/jCalderTravis .. _Joshua Teves: https://github.com/jbteves - .. _Judy D Zhu: https://github.com/JD-Zhu - .. _Juergen Dammers: https://github.com/jdammers - .. _Jukka Nenonen: https://www.linkedin.com/pub/jukka-nenonen/28/b5a/684 - .. _Jussi Nurminen: https://github.com/jjnurminen - .. _Kaisu Lankinen: http://bishoplab.berkeley.edu/Kaisu.html - -.. _kalenkovich: https://github.com/kalenkovich - .. _Katarina Slama: https://github.com/katarinaslama - +.. _Katia Al-Amir: https://github.com/katia-sentry .. _Keith Doelling: https://github.com/kdoelling1919 - .. _Kostiantyn Maksymenko: https://github.com/makkostya - +.. _Kristijan Armeni: https://github.com/kristijanarmeni .. _Kyle Mathewson: https://github.com/kylemath - .. _Larry Eisenman: https://github.com/lneisenman - .. _Lau Møller Andersen: https://github.com/ualsbombe - -.. _Laura Gwilliams: http://lauragwilliams.github.io - +.. _Laura Gwilliams: https://lauragwilliams.github.io +.. _Laurent Le Mentec: https://github.com/LaurentLM .. _Leonardo Barbosa: https://github.com/noreun - +.. _Leonardo Rochael Almeida: https://github.com/leorochael .. _Liberty Hamilton: https://github.com/libertyh - .. _Lorenzo Desantis: https://github.com/lorenzo-desantis/ - .. _Lukas Breuer: https://www.researchgate.net/profile/Lukas-Breuer-2 - .. _Lukas Gemein: https://github.com/gemeinl - -.. _Lukáš Hejtmánek: https://github.com/hejtmy - .. _Luke Bloy: https://www.research.chop.edu/imaging/team - +.. _Lukáš Hejtmánek: https://github.com/hejtmy .. _Lx37: https://github.com/Lx37 - .. _Mads Jensen: https://github.com/MadsJensen - .. _Maggie Clarke: https://github.com/mdclarke - .. _Mainak Jas: https://jasmainak.github.io - +.. _Maksym Balatsko: https://github.com/mbalatsko .. _Marcin Koculak: https://github.com/mkoculak - .. _Marian Dovgialo: https://github.com/mdovgialo - .. _Marijn van Vliet: https://github.com/wmvanvliet - .. _Mark Alexander Henney: https://github.com/henneysq - -.. _Mark Wronkiewicz: https://ml.jpl.nasa.gov/people/wronkiewicz/wronkiewicz.html - +.. _Mark Wronkiewicz: https://github.com/wronk .. _Marmaduke Woodman: https://github.com/maedoc - .. _Martin Billinger: https://github.com/mbillingr - .. _Martin Luessi: https://github.com/mluessi - +.. _Martin Oberg: https://github.com/obergmartin .. _Martin Schulz: https://github.com/marsipu - .. _Mathieu Scheltienne: https://github.com/mscheltienne - .. _Mathurin Massias: https://mathurinm.github.io/ - .. _Mats van Es: https://github.com/matsvanes - .. _Matt Boggess: https://github.com/mattboggess - .. _Matt Courtemanche: https://github.com/mjcourte - .. _Matt Sanderson: https://github.com/monkeyman192 - .. _Matteo Anelli: https://github.com/matteoanelli - .. _Matthias Dold: https://matthiasdold.de - .. _Matthias Eberlein: https://github.com/MatthiasEb - +.. _Matti Hämäläinen: https://research.aalto.fi/en/persons/matti-h%C3%A4m%C3%A4l%C3%A4inen/ .. _Matti Toivonen: https://github.com/mattitoi - .. _Mauricio Cespedes Tenorio: https://github.com/mcespedes99 - +.. _Michael Straube: https://github.com/mistraube +.. _Michal Žák: https://github.com/michalrzak .. _Michiru Kaneda: https://github.com/rcmdnk - .. _Mikołaj Magnuski: https://github.com/mmagnuski - -.. _Milan Rybář: http://milanrybar.cz - +.. _Milan Rybář: https://milanrybar.cz .. _Mingjian He: https://github.com/mh105 - .. _Mohammad Daneshzand: https://github.com/mdaneshzand - +.. _Mojackhak: https://github.com/Mojackhak .. _Moritz Gerster: https://github.com/moritz-gerster - +.. _Motofumi Fushimi: https://github.com/motofumi-fushimi/motofumi-fushimi.github.io +.. _Nabil Alibou: https://github.com/nabilalibou .. _Natalie Klein: https://github.com/natalieklein - .. _Nathalie Gayraud: https://github.com/ngayraud - .. _Naveen Srinivasan: https://github.com/naveensrinivasan - -.. _Nick Foti: http://nfoti.github.io - -.. _Nick Ward: http://www.ucl.ac.uk/ion/departments/sobell/Research/NWard - +.. _Nick Foti: https://nfoti.github.io +.. _Nick Ward: https://profiles.ucl.ac.uk/10827-nick-ward .. _Nicolas Barascud: https://github.com/nbara - +.. _Nicolas Fourcaud-Trocmé: https://www.crnl.fr/fr/user/316 +.. _Niels Focke: https://neurologie.umg.eu/forschung/arbeitsgruppen/epilepsie-und-bildgebungsforschung .. _Niklas Wilming: https://github.com/nwilming - .. _Nikolai Chapochnikov: https://github.com/chapochn - +.. _Nikolai Kapralov: https://github.com/ctrltz .. _Nikolas Chalas: https://github.com/Nichalas - +.. _Noah Markowitz: https://github.com/nmarkowitz .. _Okba Bekhelifi: https://github.com/okbalefthanded - -.. _Olaf Hauk: http://www.neuroscience.cam.ac.uk/directory/profile.php?olafhauk - +.. _Olaf Hauk: https://neuroscience.cam.ac.uk/member/olafhauk +.. _Ole Jensen: https://www.psy.ox.ac.uk/people/ole-jensen .. _Oleh Kozynets: https://github.com/OlehKSS - +.. _Pablo Mainar: https://github.com/pablomainar .. _Pablo-Arias: https://github.com/Pablo-Arias - .. _Padma Sundaram: https://www.nmr.mgh.harvard.edu/user/8071 - +.. _Paul Anders: https://github.com/Mettphysik .. _Paul Pasler: https://github.com/ppasler - .. _Paul Roujansky: https://github.com/paulroujansky - .. _Pavel Navratil: https://github.com/navrpa13 - .. _Peter Molfese: https://github.com/pmolfese - .. _Phillip Alday: https://palday.bitbucket.io - .. _Pierre Ablin: https://pierreablin.com - +.. _Pierre Guetschel: https://github.com/PierreGtch .. _Pierre-Antoine Bannier: https://github.com/PABannier - +.. _Ping-Keng Jao: https://github.com/nafraw +.. _Proloy Das: https://github.com/proloyd +.. _Qian Chu: https://github.com/qian-chu .. _Qianliang Li: https://www.dtu.dk/english/service/phonebook/person?id=126774 - +.. _Quentin Barthélemy: https://github.com/qbarthelemy .. _Quentin Bertrand: https://github.com/QB3 - .. _Qunxi Dong: https://github.com/dongqunxi - .. _Rahul Nadkarni: https://github.com/rahuln - .. _Ram Pari: https://github.com/ramkpari - .. _Ramiro Gatti: https://github.com/ragatti - .. _ramonapariciog: https://github.com/ramonapariciog - -.. _Rasmus Zetter: https://people.aalto.fi/rasmus.zetter - +.. _Rasmus Aagaard: https://github.com/rasgaard +.. _Rasmus Zetter: https://github.com/rzetter .. _Reza Nasri: https://github.com/rznas - .. _Reza Shoorangiz: https://github.com/rezashr - .. _Richard Höchenberger: https://github.com/hoechenberger - .. _Richard Koehler: https://github.com/richardkoehler - +.. _Richard Scholz: https://github.com/scholzri .. _Riessarius Stargardsky: https://github.com/Riessarius - .. _Roan LaPlante: https://github.com/aestrivex - .. _Robert Luke: https://github.com/rob-luke - .. _Robert Seymour: https://neurofractal.github.io - .. _Romain Derollepot: https://github.com/rderollepot - .. _Romain Trachel: https://fr.linkedin.com/in/trachelr - -.. _Roman Goj: http://romanmne.blogspot.co.uk - -.. _Ross Maddox: https://www.urmc.rochester.edu/labs/maddox-lab.aspx - +.. _Roman Goj: https://romanmne.blogspot.co.uk +.. _Rongfei Jin: https://github.com/greasycat +.. _Ross Maddox: https://medicine.umich.edu/dept/khri/ross-maddox-phd .. _Rotem Falach: https://github.com/Falach - +.. _Roy Eric Wieske: https://github.com/Randomidous +.. _Sammi Chekroud: https://github.com/schekroud .. _Samu Taulu: https://phys.washington.edu/people/samu-taulu - .. _Samuel Deslauriers-Gauthier: https://github.com/sdeslauriers - +.. _Samuel Louviot: https://github.com/Sam54000 .. _Samuel Powell: https://github.com/samuelpowell - .. _Santeri Ruuskanen: https://github.com/ruuskas - -.. _Sara Sommariva: http://www.dima.unige.it/~sommariva/ - -.. _Sawradip Saha: http://sawradip.github.io/ - +.. _Santi Martínez: https://github.com/szz-dvl +.. _Sara Sommariva: https://github.com/sarasommariva +.. _Sawradip Saha: https://sawradip.github.io/ .. _Scott Huberty: https://orcid.org/0000-0003-2637-031X - -.. _Sebastiaan Mathot: http://www.cogsci.nl/smathot - +.. _Sebastiaan Mathot: https://www.cogsci.nl/smathot .. _Sebastian Castano: https://github.com/jscastanoc - +.. _Sebastian Jentschke: https://github.com/sjentsch .. _Sebastian Major: https://github.com/major-s - -.. _Sébastien Marti: https://www.researchgate.net/profile/Sebastien-Marti - .. _Sena Er: https://github.com/sena-neuro - .. _Senwen Deng: https://snwn.de - +.. _Seyed Yahya Shirazi: https://neuromechanist.github.io .. _Sheraz Khan: https://github.com/SherazKhan - +.. _Shresth Keshari: https://github.com/shresth-keshari +.. _Shristi Baral: https://github.com/shristibaral .. _Silvia Cotroneo: https://github.com/sfc-neuro - .. _Simeon Wong: https://github.com/dtxe - -.. _Simon Kern: https://github.com/skjerns - -.. _Simon Kornblith: http://simonster.com - +.. _Simon Kern: https://skjerns.de +.. _Simon Kornblith: https://simonster.com +.. _Simon M. Hofmann: https://github.com/SHEscher .. _Sondre Foslien: https://github.com/sondrfos - .. _Sophie Herbst: https://github.com/SophieHerbst - .. _Stanislas Chambon: https://github.com/Slasnista - -.. _Stefan Appelhoff: http://stefanappelhoff.com - +.. _Stefan Appelhoff: https://stefanappelhoff.com .. _Stefan Repplinger: https://github.com/stfnrpplngr - +.. _Steinn Hauser Magnusson: https://github.com/steinnhauser .. _Steven Bethard: https://github.com/bethard - .. _Steven Bierer: https://github.com/neurolaunch - .. _Steven Gutstein: https://github.com/smgutstein - .. _Sumalyo Datta: https://github.com/Sumalyo - .. _Susanna Aro: https://www.linkedin.com/in/susanna-aro - .. _Svea Marie Meyer: https://github.com/SveaMeyer13 - +.. _Sébastien Marti: https://www.researchgate.net/profile/Sebastien-Marti .. _T. Wang: https://github.com/twang5 - -.. _Tal Linzen: http://tallinzen.net/ - -.. _Teon Brooks: https://teonbrooks.com - -.. _Theodore Papadopoulo: https://github.com/papadop - +.. _Tal Linzen: https://tallinzen.net/ +.. _Teon Brooks: https://github.com/teonbrooks +.. _Tharupahan Jayawardana: https://github.com/tharu-jwd .. _Thomas Binns: https://github.com/tsbinns - .. _Thomas Hartmann: https://github.com/thht - .. _Thomas Radman: https://github.com/tradman - +.. _Théodore Papadopoulo: https://github.com/papadop .. _Timothy Gates: https://au.linkedin.com/in/tim-gates-0528a4199 - +.. _Timur Sokhin: https://github.com/Qwinpin .. _Tod Flak: https://github.com/todflak - .. _Tom Ma: https://github.com/myd7349 - .. _Tom Stone: https://github.com/tomdstone - .. _Tommy Clausner: https://github.com/TommyClausner - -.. _Toomas Erik Anijärv: http://www.toomaserikanijarv.com/ - +.. _Toomas Erik Anijärv: https://www.toomaserikanijarv.com/ .. _Tristan Stenner: https://github.com/tstenner/ - .. _Tziona NessAiver: https://github.com/TzionaN - +.. _user27182: https://github.com/user27182 .. _Valerii Chirkov: https://github.com/vagechirkov - +.. _Velu Prabhakar Kumaravel: https://github.com/vpKumaravel .. _Victor Ferat: https://github.com/vferat - .. _Victoria Peterson: https://github.com/vpeterson - +.. _Wei Xu: https://github.com/psyxw +.. _Will Turner: https://bootstrapbill.github.io +.. _Xabier de Zuazo: https://github.com/zuazo .. _Xiaokai Xia: https://github.com/dddd1007 - .. _Yaroslav Halchenko: http://haxbylab.dartmouth.edu/ppl/yarik.html - +.. _Yiping Zuo: https://github.com/frostime +.. _Yixiao Shen: https://github.com/SYXiao2002 .. _Yousra Bekhti: https://www.linkedin.com/pub/yousra-bekhti/56/886/421 - .. _Yu-Han Luo: https://github.com/yh-luo - .. _Zhi Zhang: https://github.com/tczhangzhi/ +.. _Ziyi ZENG: https://github.com/ZiyiTsang +.. _Zvi Baratz: https://github.com/ZviBaratz diff --git a/doc/changes/0.1.inc b/doc/changes/v0.1.rst similarity index 100% rename from doc/changes/0.1.inc rename to doc/changes/v0.1.rst diff --git a/doc/changes/v0.10.rst b/doc/changes/v0.10.rst new file mode 100644 index 00000000000..d237da49930 --- /dev/null +++ b/doc/changes/v0.10.rst @@ -0,0 +1,148 @@ +.. _changes_0_10: + +Version 0.10 (2015-10-26) +------------------------- + +Changelog +~~~~~~~~~ + +- Add support for generalized M-way repeated measures ANOVA for fully balanced designs with :func:`mne.stats.f_mway_rm` by `Denis Engemann`_ + +- Add epochs browser to interactively view and manipulate epochs with :func:`mne.viz.plot_epochs` by `Jaakko Leppakangas`_ + +- Speed up TF-MxNE inverse solver with block coordinate descent by `Daniel Strohmeier`_ and `Yousra Bekhti`_ + +- Speed up zero-phase overlap-add (default) filtering by a factor of up to 2 using linearity by `Ross Maddox`_ and `Eric Larson`_ + +- Add support for scaling and adjusting the number of channels/time per view by `Jaakko Leppakangas`_ + +- Add support to toggle the show/hide state of all sections with a single keypress ('t') in :class:`mne.Report` by `Mainak Jas`_ + +- Add support for BEM model creation :func:`mne.make_bem_model` by `Eric Larson`_ + +- Add support for BEM solution computation :func:`mne.make_bem_solution` by `Eric Larson`_ + +- Add ICA plotters for raw and epoch components by `Jaakko Leppakangas`_ + +- Add new object ``mne.decoding.TimeDecoding`` for decoding sensors' evoked response across time by `Jean-Rémi King`_ + +- Add command ``mne freeview_bem_surfaces`` to quickly check BEM surfaces with Freeview by `Alex Gramfort`_. + +- Add support for splitting epochs into multiple files in :func:`mne.Epochs.save` by `Mainak Jas`_ and `Alex Gramfort`_ + +- Add support for jointly resampling a raw object and event matrix to avoid issues with resampling status channels by `Marijn van Vliet`_ + +- Add new method :class:`mne.preprocessing.Xdawn` for denoising and decoding of ERP/ERF by `Alexandre Barachant`_ + +- Add support for plotting patterns/filters in :class:`mne.decoding.CSP` and :class:`mne.decoding.LinearModel` by `Romain Trachel`_ + +- Add new object :class:`mne.decoding.LinearModel` for decoding M/EEG data and interpreting coefficients of linear models with patterns attribute by `Romain Trachel`_ and `Alex Gramfort`_ + +- Add support to append new channels to an object from a list of other objects by `Chris Holdgraf`_ + +- Add interactive plotting of topomap from time-frequency representation by `Jaakko Leppakangas`_ + +- Add ``plot_topo`` method to ``Evoked`` object by `Jaakko Leppakangas`_ + +- Add fetcher :mod:`mne.datasets.brainstorm ` for datasets used by Brainstorm in their tutorials by `Mainak Jas`_ + +- Add interactive plotting of single trials by right clicking on channel name in epochs browser by `Jaakko Leppakangas`_ + +- New logos and logo generation script by `Daniel McCloy`_ + +- Add ability to plot topomap with a "skirt" (channels outside of the head circle) by `Marijn van Vliet`_ + +- Add multiple options to ICA infomax and extended infomax algorithms (number of subgaussian components, computation of bias, iteration status printing), enabling equivalent computations to those performed by EEGLAB by `Jair Montoya Martinez`_ + +- Add :func:`mne.Epochs.apply_baseline` method to ``Epochs`` objects by `Teon Brooks`_ + +- Add ``preload`` argument to :func:`mne.read_epochs` to enable on-demand reads from disk by `Eric Larson`_ + +- Big rewrite of simulation module by `Yousra Bekhti`_, `Mark Wronkiewicz`_, `Eric Larson`_ and `Alex Gramfort`_. Allows to simulate raw with artifacts (ECG, EOG) and evoked data, exploiting the forward solution. See :func:`mne.simulation.simulate_raw`, :func:`mne.simulation.simulate_evoked` and :func:`mne.simulation.simulate_sparse_stc` + +- Add :func:`mne.Epochs.load_data` method to :class:`mne.Epochs` by `Teon Brooks`_ + +- Add support for drawing topomaps by selecting an area in :func:`mne.Evoked.plot` by `Jaakko Leppakangas`_ + +- Add support for finding peaks in evoked data in :func:`mne.Evoked.plot_topomap` by `Jona Sassenhagen`_ and `Jaakko Leppakangas`_ + +- Add source space morphing in :func:`morph_source_spaces` and :func:`SourceEstimate.to_original_src` by `Eric Larson`_ and `Denis Engemann`_ + +- Adapt ``corrmap`` function (Viola et al. 2009) to semi-automatically detect similar ICs across data sets by `Jona Sassenhagen`_ and `Denis Engemann`_ and `Eric Larson`_ + +- Clarify docstring for :class:`mne.preprocessing.ICA` by `jeythekey`_ + +- New ``mne flash_bem`` command to compute BEM surfaces from Flash MRI images by `Lorenzo Desantis`_, `Alex Gramfort`_ and `Eric Larson`_. See :func:`mne.bem.make_flash_bem`. + +- New gfp parameter in :func:`mne.Evoked.plot` method to display Global Field Power (GFP) by `Eric Larson`_. + +- Add ``mne.Report.add_slider_to_section`` methods to :class:`mne.Report` by `Teon Brooks`_ + +BUG +~~~ + +- Fix ``mne.io.add_reference_channels`` not setting ``info[nchan]`` correctly by `Federico Raimondo`_ + +- Fix ``mne.stats.bonferroni_correction`` reject mask output to use corrected p-values by `Denis Engemann`_ + +- Fix FFT filter artifacts when using short windows in overlap-add by `Eric Larson`_ + +- Fix picking channels from forward operator could return a channel ordering different from ``info['chs']`` by `Chris Bailey`_ + +- Fix dropping of events after downsampling stim channels by `Marijn van Vliet`_ + +- Fix scaling in ``mne.viz.utils._setup_vmin_vmax`` by `Jaakko Leppakangas`_ + +- Fix order of component selection in :class:`mne.decoding.CSP` by `Clemens Brunner`_ + +API +~~~ + +- Rename and deprecate ``mne.viz.plot_topo`` for ``mne.viz.plot_evoked_topo`` by `Jaakko Leppakangas`_ + +- Deprecated ``mne.decoding.transformer.ConcatenateChannels`` and replaced by ``mne.decoding.EpochsVectorizer`` by `Romain Trachel`_ + +- Deprecated ``lws`` and renamed ``ledoit_wolf`` for the ``reg`` argument in :class:`mne.decoding.CSP` by `Romain Trachel`_ + +- Redesigned and rewrote :meth:`mne.Epochs.plot` (no backwards compatibility) during the GSOC 2015 by `Jaakko Leppakangas`_, `Mainak Jas`_, `Federico Raimondo`_ and `Denis Engemann`_ + +- Deprecated and renamed ``mne.viz.plot_image_epochs`` for ``mne.plot.plot_epochs_image`` by `Teon Brooks`_ + +- ``picks`` argument has been added to :func:`mne.time_frequency.tfr_morlet`, :func:`mne.time_frequency.tfr_multitaper` by `Teon Brooks`_ + +- ``mne.io.Raw.preload_data`` has been deprecated for :func:`mne.io.Raw.load_data` by `Teon Brooks`_ + +- ``RawBrainVision`` objects now always have event channel ``'STI 014'``, and recordings with no events will have this channel set to zero by `Eric Larson`_ + +Authors +~~~~~~~ + +The committer list for this release is the following (preceded by number of commits): + +* 273 Eric Larson +* 270 Jaakko Leppakangas +* 194 Alexandre Gramfort +* 128 Denis A. Engemann +* 114 Jona Sassenhagen +* 107 Mark Wronkiewicz +* 97 Teon Brooks +* 81 Lorenzo De Santis +* 55 Yousra Bekhti +* 54 Jean-Rémi King +* 48 Romain Trachel +* 45 Mainak Jas +* 40 Alexandre Barachant +* 32 Marijn van Vliet +* 26 Jair Montoya +* 22 Chris Holdgraf +* 16 Christopher J. Bailey +* 7 Christian Brodbeck +* 5 Natalie Klein +* 5 Fede Raimondo +* 5 Alan Leggitt +* 5 Roan LaPlante +* 5 Ross Maddox +* 4 Dan G. Wakeman +* 3 Daniel McCloy +* 3 Daniel Strohmeier +* 1 Jussi Nurminen diff --git a/doc/changes/v0.11.rst b/doc/changes/v0.11.rst new file mode 100644 index 00000000000..218508e86fb --- /dev/null +++ b/doc/changes/v0.11.rst @@ -0,0 +1,92 @@ +.. _changes_0_11: + +Version 0.11 (2015-12-24) +------------------------- + +Changelog +~~~~~~~~~ + +- Maxwell filtering (SSS) implemented in :func:`mne.preprocessing.maxwell_filter` by `Mark Wronkiewicz`_ as part of Google Summer of Code, with help from `Samu Taulu`_, `Jukka Nenonen`_, and `Jussi Nurminen`_. Our implementation includes support for: + + - Fine calibration + + - Cross-talk correction + + - Temporal SSS (tSSS) + + - Head position translation + + - Internal component regularization + +- Compensation for movements using Maxwell filtering on epoched data in :func:`mne.epochs.average_movements` by `Eric Larson`_ and `Samu Taulu`_ + +- Add reader for Nicolet files in :func:`mne.io.read_raw_nicolet` by `Jaakko Leppakangas`_ + +- Add FIFF persistence for ICA labels by `Denis Engemann`_ + +- Display ICA labels in :func:`mne.viz.plot_ica_scores` and :func:`mne.viz.plot_ica_sources` (for evoked objects) by `Denis Engemann`_ + +- Plot spatially color coded lines in :func:`mne.Evoked.plot` by `Jona Sassenhagen`_ and `Jaakko Leppakangas`_ + +- Add reader for CTF data in :func:`mne.io.read_raw_ctf` by `Eric Larson`_ + +- Add support for Brainvision v2 in :func:`mne.io.read_raw_brainvision` by `Teon Brooks`_ + +- Improve speed of generalization across time ``mne.decoding.GeneralizationAcrossTime`` decoding up to a factor of seven by `Jean-Rémi King`_ and `Federico Raimondo`_ and `Denis Engemann`_. + +- Add the explained variance for each principal component, ``explained_var``, key to the :class:`mne.Projection` by `Teon Brooks`_ + +- Added methods ``mne.Epochs.add_eeg_average_proj``, ``mne.io.Raw.add_eeg_average_proj``, and ``mne.Evoked.add_eeg_average_proj`` to add an average EEG reference. + +- Add reader for EEGLAB data in :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` by `Mainak Jas`_ + +BUG +~~~ + +- Fix bug that prevented homogeneous bem surfaces to be displayed in HTML reports by `Denis Engemann`_ + +- Added safeguards against ``None`` and negative values in reject and flat parameters in :class:`mne.Epochs` by `Eric Larson`_ + +- Fix train and test time window-length in ``mne.decoding.GeneralizationAcrossTime`` by `Jean-Rémi King`_ + +- Added lower bound in :func:`mne.stats.linear_regression` on p-values ``p_val`` (and resulting ``mlog10_p_val``) using double floating point arithmetic limits by `Eric Larson`_ + +- Fix channel name pick in :func:`mne.Evoked.get_peak` method by `Alex Gramfort`_ + +- Fix drop percentages to take into account ``ignore`` option in :func:`mne.viz.plot_drop_log` and :func:`mne.Epochs.plot_drop_log` by `Eric Larson`_. + +- :class:`mne.EpochsArray` no longer has an average EEG reference silently added (but not applied to the data) by default. Use ``mne.EpochsArray.add_eeg_ref`` to properly add one. + +- Fix :func:`mne.io.read_raw_ctf` to read ``n_samp_tot`` instead of ``n_samp`` by `Jaakko Leppakangas`_ + +API +~~~ + +- :func:`mne.io.read_raw_brainvision` now has ``event_id`` argument to assign non-standard trigger events to a trigger value by `Teon Brooks`_ + +- :func:`mne.read_epochs` now has ``add_eeg_ref=False`` by default, since average EEG reference can be added before writing or after reading using the method ``mne.Epochs.add_eeg_ref``. + +- :class:`mne.EpochsArray` no longer has an average EEG reference silently added (but not applied to the data) by default. Use ``mne.EpochsArray.add_eeg_average_proj`` to properly add one. + +Authors +~~~~~~~ + +The committer list for this release is the following (preceded by number of commits): + +* 171 Eric Larson +* 117 Jaakko Leppakangas +* 58 Jona Sassenhagen +* 52 Mainak Jas +* 46 Alexandre Gramfort +* 33 Denis A. Engemann +* 28 Teon Brooks +* 24 Clemens Brunner +* 23 Christian Brodbeck +* 15 Mark Wronkiewicz +* 10 Jean-Rémi King +* 5 Marijn van Vliet +* 3 Fede Raimondo +* 2 Alexander Rudiuk +* 2 emilyps14 +* 2 lennyvarghese +* 1 Marian Dovgialo diff --git a/doc/changes/v0.12.rst b/doc/changes/v0.12.rst new file mode 100644 index 00000000000..b48b80bf2d0 --- /dev/null +++ b/doc/changes/v0.12.rst @@ -0,0 +1,211 @@ +.. _changes_0_12: + +Version 0.12 (2016-05-10) +------------------------- + +Changelog +~~~~~~~~~ + +- Add ``overlay_times`` parameter to :func:`mne.viz.plot_epochs_image` to be able to display for example reaction times on top of the images, by `Alex Gramfort`_ + +- Animation for evoked topomap in :func:`mne.Evoked.animate_topomap` by `Jaakko Leppakangas`_ + +- Make :func:`mne.channels.find_layout` more robust for KIT systems in the presence of bad or missing channels by `Jaakko Leppakangas`_ + +- Add raw movement compensation to :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ + +- Add :class:`mne.Annotations` for for annotating segments of raw data by `Jaakko Leppakangas`_ + +- Add reading of .fif file montages by `Eric Larson`_ + +- Add system config utility :func:`mne.sys_info` by `Eric Larson`_ + +- Automatic cross-validation and scoring metrics in ``mne.decoding.GeneralizationAcrossTime``, by `Jean-Rémi King`_ + +- ``mne.decoding.GeneralizationAcrossTime`` accepts non-deterministic cross-validations, by `Jean-Rémi King`_ + +- Add plotting RMS of gradiometer pairs in :func:`mne.viz.plot_evoked_topo` by `Jaakko Leppakangas`_ + +- Add regularization methods to :func:`mne.compute_raw_covariance` by `Eric Larson`_. + +- Add command ``mne show_info`` to quickly show the measurement info from a .fif file from the terminal by `Alex Gramfort`_. + +- Add creating forward operator for dipole object :func:`mne.make_forward_dipole` by `Chris Bailey`_ + +- Add reading and estimation of fixed-position dipole time courses (similar to Elekta ``xfit``) using :func:`mne.read_dipole` and :func:`mne.fit_dipole` by `Eric Larson`_. + +- Accept ``mne.decoding.GeneralizationAcrossTime``'s ``scorer`` parameter to be a string that refers to a scikit-learn_ metric scorer by `Asish Panda`_. + +- Add method :func:`mne.Epochs.plot_image` calling :func:`mne.viz.plot_epochs_image` for better usability by `Asish Panda`_. + +- Add :func:`mne.io.read_raw_cnt` for reading Neuroscan CNT files by `Jaakko Leppakangas`_ + +- Add ``decim`` parameter to ``mne.time_frequency.cwt_morlet``, by `Jean-Rémi King`_ + +- Add method :func:`mne.Epochs.plot_topo_image` by `Jaakko Leppakangas`_ + +- Add the ability to read events when importing raw EEGLAB files, by `Jona Sassenhagen`_. + +- Add function :func:`mne.viz.plot_sensors` and methods :func:`mne.Epochs.plot_sensors`, :func:`mne.io.Raw.plot_sensors` and :func:`mne.Evoked.plot_sensors` for plotting sensor positions and :func:`mne.viz.plot_layout` and :func:`mne.channels.Layout.plot` for plotting layouts by `Jaakko Leppakangas`_ + +- Add epoch rejection based on annotated segments by `Jaakko Leppakangas`_ + +- Add option to use new-style MEG channel names in ``mne.read_selection`` by `Eric Larson`_ + +- Add option for ``proj`` in :class:`mne.EpochsArray` by `Eric Larson`_ + +- Enable the usage of :func:`mne.viz.plot_topomap` with an :class:`mne.Info` instance for location information, by `Jona Sassenhagen`_. + +- Add support for electrocorticography (ECoG) channel type by `Eric Larson`_ + +- Add option for ``first_samp`` in :func:`mne.make_fixed_length_events` by `Jon Houck`_ + +- Add ability to auto-scale channel types for :func:`mne.viz.plot_raw` and :func:`mne.viz.plot_epochs` and corresponding object plotting methods by `Chris Holdgraf`_ + +BUG +~~~ + +- ``mne.time_frequency.compute_raw_psd``, ``mne.time_frequency.compute_epochs_psd``, ``mne.time_frequency.psd_multitaper``, and ``mne.time_frequency.psd_welch`` no longer remove rows/columns of the SSP matrix before applying SSP projectors when picks are provided by `Chris Holdgraf`_. + +- :func:`mne.Epochs.plot_psd` no longer calls a Welch PSD, and instead uses a Multitaper method which is more appropriate for epochs. Flags for this function are passed to ``mne.time_frequency.psd_multitaper`` by `Chris Holdgraf`_ + +- Time-cropping functions (e.g., :func:`mne.Epochs.crop`, :func:`mne.Evoked.crop`, :func:`mne.io.Raw.crop`, :func:`mne.SourceEstimate.crop`) made consistent with behavior of ``tmin`` and ``tmax`` of :class:`mne.Epochs`, where nearest sample is kept. For example, for MGH data acquired with ``sfreq=600.614990234``, constructing ``Epochs(..., tmin=-1, tmax=1)`` has bounds ``+/-1.00064103``, and now ``epochs.crop(-1, 1)`` will also have these bounds (previously they would have been ``+/-0.99897607``). Time cropping functions also no longer use relative tolerances when determining the boundaries. These changes have minor effects on functions that use cropping under the hood, such as :func:`mne.compute_covariance` and ``mne.connectivity.spectral_connectivity``. Changes by `Jaakko Leppakangas`_ and `Eric Larson`_ + +- Fix EEG spherical spline interpolation code to account for average reference by `Mainak Jas`_ + +- MEG projectors are removed after Maxwell filtering by `Eric Larson`_ + +- Fix ``mne.decoding.TimeDecoding`` to allow specifying ``clf`` by `Jean-Rémi King`_ + +- Fix bug with units (uV) in 'Brain Vision Data Exchange Header File Version 1.0' by `Federico Raimondo`_ + +- Fix bug where :func:`mne.preprocessing.maxwell_filter` ``destination`` parameter did not properly set device-to-head transform by `Eric Larson`_ + +- Fix bug in rank calculation of ``mne.utils.estimate_rank``, ``mne.io.Raw.estimate_rank``, and covariance functions where the tolerance was set to slightly too small a value, new 'auto' mode uses values from ``scipy.linalg.orth`` by `Eric Larson`_. + +- Fix bug when specifying irregular ``train_times['slices']`` in ``mne.decoding.GeneralizationAcrossTime``, by `Jean-Rémi King`_ + +- Fix colorbar range on norm data by `Jaakko Leppakangas`_ + +- Fix bug in ``mne.preprocessing.run_ica``, which used the ``ecg_criterion`` parameter for the EOG criterion instead of ``eog_criterion`` by `Christian Brodbeck`_ + +- Fix normals in CTF data reader by `Eric Larson`_ + +- Fix bug in :func:`mne.io.read_raw_ctf`, when omitting samples at the end by `Jaakko Leppakangas`_ + +- Fix ``info['lowpass']`` value for downsampled raw data by `Eric Larson`_ + +- Remove measurement date from :class:`mne.Info` in :func:`mne.io.Raw.anonymize` by `Eric Larson`_ + +- Fix bug that caused synthetic ecg channel creation even if channel was specified for ECG peak detection in :func:`mne.preprocessing.create_ecg_epochs` by `Jaakko Leppakangas`_ + +- Fix bug with vmin and vmax when None is passed in :func:`mne.viz.plot_topo_image_epochs` by `Jaakko Leppakangas`_ + +- Fix bug with :func:`mne.label_sign_flip` (and :func:`mne.extract_label_time_course`) by `Natalie Klein`_ and `Eric Larson`_ + +- Add copy parameter in :func:`mne.Epochs.apply_baseline` and :func:`mne.io.Raw.filter` methods by `Jona Sassenhagen`_ and `Alex Gramfort`_ + +- Fix bug in :func:`mne.merge_events` when using ``replace_events=False`` by `Alex Gramfort`_ + +- Fix bug in :class:`mne.Evoked` type setting in :func:`mne.stats.linear_regression_raw` by `Eric Larson`_ + +- Fix bug in ``mne.io.edf.RawEDF`` highpass filter setting to take max highpass to match warning message by `Teon Brooks`_ + +- Fix bugs with coordinane frame adjustments in ``mne.viz.plot_trans`` by `Eric Larson`_ + +- Fix bug in colormap selection in :func:`mne.Evoked.plot_projs_topomap` by `Jaakko Leppakangas`_ + +- Fix bug in source normal adjustment that occurred when 1) patch information is available (e.g., when distances have been calculated) and 2) points are excluded from the source space (by inner skull distance) by `Eric Larson`_ + +- Fix bug when merging info that has a field with list of dicts by `Jaakko Leppakangas`_ + +- The BTi/4D reader now considers user defined channel labels instead of the hard-ware names, however only for channels other than MEG. By `Denis Engemann`_ and `Alex Gramfort`_. + +- The BTi reader :func:`mne.io.read_raw_bti` can now read 2500 system data, by `Eric Larson`_ + +- Fix bug in :func:`mne.compute_raw_covariance` where rejection by non-data channels (e.g. EOG) was not done properly by `Eric Larson`_. + +- Change default scoring method of ``mne.decoding.GeneralizationAcrossTime`` and ``mne.decoding.TimeDecoding`` to estimate the scores within the cross-validation as in scikit-learn_ as opposed to across all cross-validated ``y_pred``. The method can be changed with the ``score_mode`` parameter by `Jean-Rémi King`_ + +- Fix bug in :func:`mne.io.Raw.save` where, in rare cases, automatically split files could end up writing an extra empty file that wouldn't be read properly by `Eric Larson`_ + +- Fix ``mne.realtime.StimServer`` by removing superfluous argument ``ip`` used while initializing the object by `Mainak Jas`_. + +- Fix removal of projectors in :func:`mne.preprocessing.maxwell_filter` in ``st_only=True`` mode by `Eric Larson`_ + +API +~~~ + +- The default ``picks=None`` in :func:`mne.viz.plot_epochs_image` now only plots the first 5 channels, not all channels, by `Jona Sassenhagen`_ + +- The ``mesh_color`` parameter in :func:`mne.viz.plot_dipole_locations` has been removed (use ``brain_color`` instead), by `Marijn van Vliet`_ + +- Deprecated functions ``mne.time_frequency.compute_raw_psd`` and ``mne.time_frequency.compute_epochs_psd``, replaced by ``mne.time_frequency.psd_welch`` by `Chris Holdgraf`_ + +- Deprecated function ``mne.time_frequency.multitaper_psd`` and replaced by ``mne.time_frequency.psd_multitaper`` by `Chris Holdgraf`_ + +- The ``y_pred`` attribute in ``mne.decoding.GeneralizationAcrossTime`` and ``mne.decoding.TimeDecoding`` is now a numpy array, by `Jean-Rémi King`_ + +- The :func:`mne.bem.fit_sphere_to_headshape` function now default to ``dig_kinds='auto'`` which will use extra digitization points, falling back to extra plus eeg digitization points if there not enough extra points are available. + +- The :func:`mne.bem.fit_sphere_to_headshape` now has a ``units`` argument that should be set explicitly. This will default to ``units='mm'`` in 0.12 for backward compatibility but change to ``units='m'`` in 0.13. + +- Added default parameters in Epochs class namely ``event_id=None``, ``tmin=-0.2`` and ``tmax=0.5``. + +- To unify and extend the behavior of :func:`mne.compute_raw_covariance` relative to :func:`mne.compute_covariance`, the default parameter ``tstep=0.2`` now discards any epochs at the end of the :class:`mne.io.Raw` instance that are not the full ``tstep`` duration. This will slightly change the computation of :func:`mne.compute_raw_covariance`, but should only potentially have a big impact if the :class:`mne.io.Raw` instance is short relative to ``tstep`` and the last, too short (now discarded) epoch contained data inconsistent with the epochs that preceded it. + +- The default ``picks=None`` in :func:`mne.io.Raw.filter` now picks eeg, meg, seeg, and ecog channels, by `Jean-Rémi King`_ and `Eric Larson`_ + +- EOG, ECG and EMG channels are now plotted by default (if present in data) when using :func:`mne.viz.plot_evoked` by `Marijn van Vliet`_ + +- Replace pseudoinverse-based solver with much faster Cholesky solver in :func:`mne.stats.linear_regression_raw`, by `Jona Sassenhagen`_. + +- CTF data reader now reads EEG locations from .pos file as HPI points by `Jaakko Leppakangas`_ + +- Subselecting channels can now emit a warning if many channels have been subselected from projection vectors. We recommend only computing projection vertors for and applying projectors to channels that will be used in the final analysis. However, after picking a subset of channels, projection vectors can be renormalized with :func:`mne.Info.normalize_proj` if necessary to avoid warnings about subselection. Changes by `Eric Larson`_ and `Alex Gramfort`_. + +- Rename and deprecate ``mne.Epochs.drop_bad_epochs`` to :func:`mne.Epochs.drop_bad`, and ``mne.Epochs.drop_epochs`` to :func:`mne.Epochs.drop` by `Alex Gramfort`_. + +- The C wrapper ``mne.do_forward_solution`` has been deprecated in favor of the native Python version :func:`mne.make_forward_solution` by `Eric Larson`_ + +- The ``events`` parameter of :func:`mne.EpochsArray` is set by default to chronological time-samples and event values to 1, by `Jean-Rémi King`_ + +Authors +~~~~~~~ + +The committer list for this release is the following (preceded by number of commits): + +* 348 Eric Larson +* 347 Jaakko Leppakangas +* 157 Alexandre Gramfort +* 139 Jona Sassenhagen +* 67 Jean-Rémi King +* 32 Chris Holdgraf +* 31 Denis A. Engemann +* 30 Mainak Jas +* 16 Christopher J. Bailey +* 13 Marijn van Vliet +* 10 Mark Wronkiewicz +* 9 Teon Brooks +* 9 kaichogami +* 8 Clément Moutard +* 5 Camilo Lamus +* 5 mmagnuski +* 4 Christian Brodbeck +* 4 Daniel McCloy +* 4 Yousra Bekhti +* 3 Fede Raimondo +* 1 Jussi Nurminen +* 1 MartinBaBer +* 1 Mikolaj Magnuski +* 1 Natalie Klein +* 1 Niklas Wilming +* 1 Richard Höchenberger +* 1 Sagun Pai +* 1 Sourav Singh +* 1 Tom Dupré la Tour +* 1 jona-sassenhagen@ +* 1 kambysese +* 1 pbnsilva +* 1 sviter +* 1 zuxfoucault diff --git a/doc/changes/v0.13.rst b/doc/changes/v0.13.rst new file mode 100644 index 00000000000..50fcde79702 --- /dev/null +++ b/doc/changes/v0.13.rst @@ -0,0 +1,248 @@ +.. _changes_0_13: + +Version 0.13 (2016-09-27) +------------------------- + +Changelog +~~~~~~~~~ + +- Add new class :class:`AcqParserFIF` to parse Elekta/Neuromag MEG acquisition info, allowing e.g. collecting epochs according to acquisition-defined averaging categories by `Jussi Nurminen`_ + +- Adds automatic determination of FIR filter parameters ``filter_length``, ``l_trans_bandwidth``, and ``h_trans_bandwidth`` and adds ``phase`` argument in e.g. in :meth:`mne.io.Raw.filter` by `Eric Larson`_ + +- Adds faster ``n_fft='auto'`` option to :meth:`mne.io.Raw.apply_hilbert` by `Eric Larson`_ + +- Adds new function ``mne.time_frequency.csd_array`` to compute the cross-spectral density of multivariate signals stored in an array, by `Nick Foti`_ + +- Add order params 'selection' and 'position' for :func:`mne.viz.plot_raw` to allow plotting of specific brain regions by `Jaakko Leppakangas`_ + +- Added the ability to decimate :class:`mne.Evoked` objects with :func:`mne.Evoked.decimate` by `Eric Larson`_ + +- Add generic array-filtering function :func:`mne.filter.filter_data` by `Eric Larson`_ + +- ``mne.viz.plot_trans`` now also shows head position indicators by `Christian Brodbeck`_ + +- Add label center of mass function :func:`mne.Label.center_of_mass` by `Eric Larson`_ + +- Added :func:`mne.viz.plot_ica_properties` that allows plotting of independent component properties similar to ``pop_prop`` in EEGLAB. Also :class:`mne.preprocessing.ICA` has :func:`mne.preprocessing.ICA.plot_properties` method now. Added by `Mikołaj Magnuski`_ + +- Add second-order sections (instead of ``(b, a)`` form) IIR filtering for reduced numerical error by `Eric Larson`_ + +- Add interactive colormap option to image plotting functions by `Jaakko Leppakangas`_ + +- Add support for the University of Maryland KIT system by `Christian Brodbeck`_ + +- Add support for \*.elp and \*.hsp files to the KIT2FIFF converter and ``mne.channels.read_dig_montage`` by `Teon Brooks`_ and `Christian Brodbeck`_ + +- Add option to preview events in the KIT2FIFF GUI by `Christian Brodbeck`_ + +- Add approximation of size of :class:`io.Raw`, :class:`Epochs`, and :class:`Evoked` in :func:`repr` by `Eric Larson`_ + +- Add possibility to select a subset of sensors by lasso selector to :func:`mne.viz.plot_sensors` and :func:`mne.viz.plot_raw` when using order='selection' or order='position' by `Jaakko Leppakangas`_ + +- Add the option to plot brain surfaces and source spaces to :func:`viz.plot_bem` by `Christian Brodbeck`_ + +- Add the ``--filterchpi`` option to :ref:`mne browse_raw`, by `Felix Raimundo`_ + +- Add the ``--no-decimate`` option to :ref:`mne make_scalp_surfaces` to skip the high-resolution surface decimation step, by `Eric Larson`_ + +- Add new class :class:`mne.decoding.EMS` to transform epochs with the event-matched spatial filters and add 'cv' parameter to :func:`mne.decoding.compute_ems`, by `Jean-Rémi King`_ + +- Added :class:`mne.time_frequency.EpochsTFR` and average parameter in :func:`mne.time_frequency.tfr_morlet` and :func:`mne.time_frequency.tfr_multitaper` to compute time-frequency transforms on single trial epochs without averaging, by `Jean-Rémi King`_ and `Alex Gramfort`_ + +- Added :class:`mne.decoding.TimeFrequency` to transform signals in scikit-learn pipelines, by `Jean-Rémi King`_ + +- Added :class:`mne.decoding.UnsupervisedSpatialFilter` providing interface for scikit-learn decomposition algorithms to be used with MNE data, by `Jean-Rémi King`_ and `Asish Panda`_ + +- Added support for multiclass decoding in :class:`mne.decoding.CSP`, by `Jean-Rémi King`_ and `Alexandre Barachant`_ + +- Components obtained from :class:`mne.preprocessing.ICA` are now sorted by explained variance, by `Mikołaj Magnuski`_ + +- Adding an EEG reference channel using :func:`mne.add_reference_channels` will now use its digitized location from the FIFF file, if present, by `Chris Bailey`_ + +- Added interactivity to :func:`mne.preprocessing.ICA.plot_components` - passing an instance of :class:`io.Raw` or :class:`Epochs` in ``inst`` argument allows to open component properties by clicking on component topomaps, by `Mikołaj Magnuski`_ + +- Adds new function :func:`mne.viz.plot_compare_evokeds` to show multiple evoked time courses at a single location, or the mean over a ROI, or the GFP, automatically averaging and calculating a CI if multiple subjects are given, by `Jona Sassenhagen`_ + +- Added ``transform_into`` parameter into :class:`mne.decoding.CSP` to retrieve the average power of each source or the time course of each source, by `Jean-Rémi King`_ + +- Added support for reading MaxShield (IAS) evoked data (e.g., from the acquisition machine) in :func:`mne.read_evokeds` by `Eric Larson`_ + +- Added support for functional near-infrared spectroscopy (fNIRS) channels by `Jaakko Leppakangas`_ + +- Added :attr:`mne.io.Raw.acqparser` convenience attribute for :class:`mne.AcqParserFIF` by `Eric Larson`_ + +- Added example of Representational Similarity Analysis, by `Jean-Rémi King`_ + +BUG +~~~ + +- Fixed a bug where selecting epochs using hierarchical event IDs (HIDs) was *and*-like instead of *or*-like. When doing e.g. ``epochs[('Auditory', 'Left')]``, previously all trials that contain ``'Auditory'`` *and* ``'Left'`` (like ``'Auditory/Left'``) would be selected, but now any conditions matching ``'Auditory'`` *or* ``'Left'`` will be selected (like ``'Auditory/Left'``, ``'Auditory/Right'``, and ``'Visual/Left'``). This is now consistent with how epoch selection was done without HID tags, e.g. ``epochs[['a', 'b']]`` would select all epochs of type ``'a'`` and type ``'b'``. By `Eric Larson`_ + +- Fixed Infomax/Extended Infomax when the user provides an initial weights matrix by `Jair Montoya Martinez`_ + +- Fixed the default raw FIF writing buffer size to be 1 second instead of 10 seconds by `Eric Larson`_ + +- Fixed channel selection order when MEG channels do not come first in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ + +- Fixed color ranges to correspond to the colorbar when plotting several time instances with :func:`mne.viz.plot_evoked_topomap` by `Jaakko Leppakangas`_ + +- Added units to :func:`mne.io.read_raw_brainvision` for reading non-data channels and enable default behavior of inferring channel type by unit by `Jaakko Leppakangas`_ and `Pablo-Arias`_ + +- Fixed minor bugs with :func:`mne.Epochs.resample` and :func:`mne.Epochs.decimate` by `Eric Larson`_ + +- Fixed a bug where duplicate vertices were not strictly checked by :func:`mne.simulation.simulate_stc` by `Eric Larson`_ + +- Fixed a bug where some FIF files could not be read with :func:`mne.io.show_fiff` by `Christian Brodbeck`_ and `Eric Larson`_ + +- Fixed a bug where ``merge_grads=True`` causes :func:`mne.viz.plot_evoked_topo` to fail when plotting a list of evokeds by `Jaakko Leppakangas`_ + +- Fixed a bug when setting multiple bipolar references with :func:`set_bipolar_reference` by `Marijn van Vliet`_. + +- Fixed image scaling in :func:`mne.viz.plot_epochs_image` when plotting more than one channel by `Jaakko Leppakangas`_ + +- Fixed :class:`mne.preprocessing.Xdawn` to fit shuffled epochs by `Jean-Rémi King`_ + +- Fixed a bug with channel order determination that could lead to an ``AssertionError`` when using :class:`mne.Covariance` matrices by `Eric Larson`_ + +- Fixed the check for CTF gradient compensation in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ + +- Fixed the import of EDF files with encoding characters in :func:`mne.io.read_raw_edf` by `Guillaume Dumas`_ + +- Fixed :class:`mne.Epochs` to ensure that detrend parameter is not a boolean by `Jean-Rémi King`_ + +- Fixed bug with ``mne.realtime.FieldTripClient.get_data_as_epoch`` when ``picks=None`` which crashed the function by `Mainak Jas`_ + +- Fixed reading of units in ``.elc`` montage files (from ``UnitsPosition`` field) so that ``mne.channels.Montage`` objects are now returned with the ``pos`` attribute correctly in meters, by `Chris Mullins`_ + +- Fixed reading of BrainVision files by `Phillip Alday`_: + +- Greater support for BVA files, especially older ones: alternate text coding schemes with fallback to Latin-1 as well as units in column headers + +- Use online software filter information when present + +- Fix comparisons of filter settings for determining "strictest"/"weakest" filter + +- Weakest filter is now used for heterogeneous channel filter settings, leading to more consistent behavior with filtering methods applied to a subset of channels (e.g. ``Raw.filter`` with ``picks != None``). + +- Fixed plotting and timing of :class:`Annotations` and restricted addition of annotations outside data range to prevent problems with cropping and concatenating data by `Jaakko Leppakangas`_ + +- Fixed ICA plotting functions to refer to IC index instead of component number by `Andreas Hojlund`_ and `Jaakko Leppakangas`_ + +- Fixed bug with ``picks`` when interpolating MEG channels by `Mainak Jas`_. + +- Fixed bug in padding of Stockwell transform for signal of length a power of 2 by `Johannes Niediek`_ + +API +~~~ + +- The ``add_eeg_ref`` argument in core functions like :func:`mne.io.read_raw_fif` and :class:`mne.Epochs` has been deprecated in favor of using :func:`mne.set_eeg_reference` and equivalent instance methods like :meth:`raw.set_eeg_reference() `. In functions like :func:`mne.io.read_raw_fif` where the default in 0.13 and older versions is ``add_eeg_ref=True``, the default will change to ``add_eeg_ref=False`` in 0.14, and the argument will be removed in 0.15. + +- Multiple aspects of FIR filtering in MNE-Python has been refactored: + + 1. New recommended defaults for ``l_trans_bandwidth='auto'``, ``h_trans_bandwidth='auto'``, and ``filter_length='auto'``. This should generally reduce filter artifacts at the expense of slight decrease in effective filter stop-band attenuation. For details see :ref:`tut-filtering-in-python`. The default values of ``l_trans_bandwidth=h_trans_bandwidth=0.5`` and ``filter_length='10s'`` will change to ``'auto'`` in 0.14. + + 2. The ``filter_length=None`` option (i.e. use ``len(x)``) has been deprecated. + + 3. An improved ``phase='zero'`` zero-phase FIR filtering has been added. Instead of running the designed filter forward and backward, the filter is applied once and we compensate for the linear phase of the filter. The previous ``phase='zero-double'`` default will change to ``phase='zero'`` in 0.14. + + 4. A warning is provided when the filter is longer than the signal of interest, as this is unlikely to produce desired results. + + 5. Previously, if the filter was as long or longer than the signal of interest, direct FFT-based computations were used. Now a single code path (overlap-add filtering) is used for all FIR filters. This could cause minor changes in how short signals are filtered. + +- Support for Python 2.6 has been dropped, and the minimum supported dependencies are NumPy_ 1.8, SciPy_ 0.12, and Matplotlib_ 1.3 by `Eric Larson`_ + +- When CTF gradient compensation is applied to raw data, it is no longer reverted on save of :meth:`mne.io.Raw.save` by `Eric Larson`_ + +- Adds ``mne.time_frequency.csd_epochs`` to replace ``mne.time_frequency.csd_compute_epochs`` for naming consistency. ``mne.time_frequency.csd_compute_epochs`` is now deprecated and will be removed in mne 0.14, by `Nick Foti`_ + +- Weighted addition and subtraction of :class:`Evoked` as ``ev1 + ev2`` and ``ev1 - ev2`` have been deprecated, use explicit :func:`mne.combine_evoked(..., weights='nave') ` instead by `Eric Larson`_ + +- Deprecated support for passing a list of filenames to :class:`mne.io.Raw` constructor, use :func:`mne.io.read_raw_fif` and :func:`mne.concatenate_raws` instead by `Eric Larson`_ + +- Added options for setting data and date formats manually in :func:`mne.io.read_raw_cnt` by `Jaakko Leppakangas`_ + +- Now channels with units of 'C', 'µS', 'uS', 'ARU' and 'S' will be turned to misc by default in :func:`mne.io.read_raw_brainvision` by `Jaakko Leppakangas`_ + +- Add :func:`mne.io.anonymize_info` function to anonymize measurements and add methods to :class:`mne.io.Raw`, :class:`mne.Epochs` and :class:`mne.Evoked`, by `Jean-Rémi King`_ + +- Now it is possible to plot only a subselection of channels in :func:`mne.viz.plot_raw` by using an array for order parameter by `Jaakko Leppakangas`_ + +- EOG channels can now be included when calling :func:`mne.preprocessing.ICA.fit` and a proper error is raised when trying to include unsupported channels by `Alexander Rudiuk`_ + +- :func:`mne.concatenate_epochs` and :func:`mne.compute_covariance` now check to see if all :class:`Epochs` instances have the same MEG-to-Head transformation, and errors by default if they do not by `Eric Larson`_ + +- Added option to pass a list of axes to :func:`mne.viz.plot_epochs_image` by `Mikołaj Magnuski`_ + +- Constructing IIR filters in :func:`mne.filter.construct_iir_filter` defaults to ``output='ba'`` in 0.13 but this will be changed to ``output='sos'`` by `Eric Larson`_ + +- Add ``zorder`` parameter to :func:`mne.Evoked.plot` and derived functions to sort allow sorting channels by e.g. standard deviation, by `Jona Sassenhagen`_ + +- The ``baseline`` parameter of :func:`mne.Epochs.apply_baseline` is set by default (None, 0), by `Felix Raimundo`_ + +- Adds :func:`mne.Evoked.apply_baseline` to be consistent with :func:`mne.Epochs.apply_baseline`, by `Felix Raimundo`_ + +- Deprecated the ``baseline`` parameter in :class:`mne.Evoked`, by `Felix Raimundo`_ + +- The API of :meth:`mne.SourceEstimate.plot` and :func:`mne.viz.plot_source_estimates` has been updated to reflect current PySurfer 0.6 API. The ``config_opts`` parameter is now deprecated and will be removed in mne 0.14, and the default representation for time will change from ``ms`` to ``s`` in mne 0.14. By `Christian Brodbeck`_ + +- The default dataset location has been changed from ``examples/`` in the MNE-Python root directory to ``~/mne_data`` in the user's home directory, by `Eric Larson`_ + +- A new option ``set_env`` has been added to :func:`mne.set_config` that defaults to ``False`` in 0.13 but will change to ``True`` in 0.14, by `Eric Larson`_ + +- The ``compensation`` parameter in :func:`mne.io.read_raw_fif` has been deprecated in favor of the method :meth:`mne.io.Raw.apply_gradient_compensation` by `Eric Larson`_ + +- ``mne.decoding.EpochsVectorizer`` has been deprecated in favor of :class:`mne.decoding.Vectorizer` by `Asish Panda`_ + +- The ``epochs_data`` parameter has been deprecated in :class:`mne.decoding.CSP`, in favour of the ``X`` parameter to comply to scikit-learn API, by `Jean-Rémi King`_ + +- Deprecated ``mne.time_frequency.cwt_morlet`` and ``mne.time_frequency.single_trial_power`` in favour of :func:`mne.time_frequency.tfr_morlet` with parameter average=False, by `Jean-Rémi King`_ and `Alex Gramfort`_ + +- Add argument ``mask_type`` to :func:`mne.read_events` and :func:`mne.find_events` to support MNE-C style of trigger masking by `Teon Brooks`_ and `Eric Larson`_ + +- Extended Infomax is now the new default in :func:`mne.preprocessing.infomax` (``extended=True``), by `Clemens Brunner`_ + +- :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` now take additional argument ``uint16_codec`` that allows to define the encoding of character arrays in set file. This helps in rare cases when reading a set file fails with ``TypeError: buffer is too small for requested array``. By `Mikołaj Magnuski`_ + +- Added :class:`mne.decoding.TemporalFilter` to filter data in scikit-learn pipelines, by `Asish Panda`_ + +- :func:`mne.preprocessing.create_ecg_epochs` now includes all the channels when ``picks=None`` by `Jaakko Leppakangas`_ + +- :func:`mne.set_eeg_reference` now allows moving from a custom to an average EEG reference by `Marijn van Vliet`_ + +Authors +~~~~~~~ + +The committer list for this release is the following (sorted by alphabetical order): + +* Alexander Rudiuk +* Alexandre Barachant +* Alexandre Gramfort +* Asish Panda +* Camilo Lamus +* Chris Holdgraf +* Christian Brodbeck +* Christopher J. Bailey +* Christopher Mullins +* Clemens Brunner +* Denis A. Engemann +* Eric Larson +* Federico Raimondo +* Félix Raimundo +* Guillaume Dumas +* Jaakko Leppakangas +* Jair Montoya +* Jean-Rémi King +* Johannes Niediek +* Jona Sassenhagen +* Jussi Nurminen +* Keith Doelling +* Mainak Jas +* Marijn van Vliet +* Michael Krause +* Mikolaj Magnuski +* Nick Foti +* Phillip Alday +* Simon-Shlomo Poil +* Teon Brooks +* Yaroslav Halchenko diff --git a/doc/changes/v0.14.rst b/doc/changes/v0.14.rst new file mode 100644 index 00000000000..bfb9498a0c1 --- /dev/null +++ b/doc/changes/v0.14.rst @@ -0,0 +1,250 @@ +.. _changes_0_14: + +Version 0.14 (2017-03-24) +------------------------- + +Changelog +~~~~~~~~~ + +- Add example of time-frequency decoding with CSP by `Laura Gwilliams`_ + +- Automatically create a legend in :func:`mne.viz.plot_evoked_topo` by `Jussi Nurminen`_ + +- Add I/O support for Artemis123 infant/toddler MEG data by `Luke Bloy`_ + +- Add filter plotting functions :func:`mne.viz.plot_filter` and :func:`mne.viz.plot_ideal_filter` as well as filter creation function :func:`mne.filter.create_filter` by `Eric Larson`_ + +- Add HCP-MMP1.0 parcellation dataset downloader by `Eric Larson`_ + +- Add option to project EEG electrodes onto the scalp in ``mne.viz.plot_trans`` by `Eric Larson`_ + +- Add option to plot individual sensors in :meth:`mne.io.Raw.plot_psd` by `Alex Gramfort`_ and `Eric Larson`_ + +- Add option to plot ECoG electrodes in ``mne.viz.plot_trans`` by `Eric Larson`_ + +- Add convenient default values to :meth:`mne.io.Raw.apply_hilbert` and :meth:`mne.io.Raw.apply_function` by `Denis Engemann`_ + +- Remove MNE-C requirement for :ref:`mne make_scalp_surfaces` by `Eric Larson`_ + +- Add support for FastTrack Polhemus ``.mat`` file outputs in ``hsp`` argument of ``mne.channels.read_dig_montage`` by `Eric Larson`_ + +- Add option to convert 3d electrode plots to a snapshot with 2d electrode positions with :func:`mne.viz.snapshot_brain_montage` by `Chris Holdgraf`_ + +- Add skull surface plotting option to ``mne.viz.plot_trans`` by `Jaakko Leppakangas`_ + +- Add minimum-phase filtering option in :meth:`mne.io.Raw.filter` by `Eric Larson`_ + +- Add support for reading ASCII BrainVision files in :func:`mne.io.read_raw_brainvision` by `Eric Larson`_ + +- Add method of ICA objects for retrieving the component maps :meth:`mne.preprocessing.ICA.get_components` by `Jona Sassenhagen`_ + +- Add option to plot events in :func:`mne.viz.plot_epochs` by `Jaakko Leppakangas`_ + +- Add dipole definitions for older phantom at Otaniemi in :func:`mne.dipole.get_phantom_dipoles` by `Eric Larson`_ + +- Add spatial colors option for :func:`mne.viz.plot_raw_psd` by `Jaakko Leppakangas`_ + +- Add functions like :func:`get_volume_labels_from_src` to handle mixed source spaces by `Annalisa Pascarella`_ + +- Add convenience function for opening MNE documentation :func:`open_docs` by `Eric Larson`_ + +- Add option in :meth:`mne.io.Raw.plot` to display the time axis relative to ``raw.first_samp`` by `Mainak Jas`_ + +- Add new :mod:`mne.datasets.visual_92_categories ` dataset by `Jaakko Leppakangas`_ + +- Add option in :func:`mne.io.read_raw_edf` to allow channel exclusion by `Jaakko Leppakangas`_ + +- Allow integer event codes in :func:`mne.read_epochs_eeglab` by `Jaakko Leppakangas`_ + +- Add ability to match channel names in a case insensitive manner when applying a ``mne.channels.Montage`` by `Marijn van Vliet`_ + +- Add ``yscale`` keyword argument to :meth:`mne.time_frequency.AverageTFR.plot` that allows specifying whether to present the frequency axis in linear (``'linear'``) or log (``'log'``) scale. The default value is ``'auto'`` which detects whether frequencies are log-spaced and sets yscale to log. Added by `Mikołaj Magnuski`_ + +- Add :ref:`Representational Similarity Analysis (RSA) ` example on :mod:`mne.datasets.visual_92_categories.data_path` dataset by `Jaakko Leppakangas`_, `Jean-Rémi King`_ and `Alex Gramfort`_ + +- Add support for NeuroScan files with event type 3 in :func:`mne.io.read_raw_cnt` by `Marijn van Vliet`_ + +- Add interactive annotation mode to :meth:`mne.io.Raw.plot` (accessed by pressing 'a') by `Jaakko Leppakangas`_ + +- Add support for deleting all projectors or a list of indices in :meth:`mne.io.Raw.del_proj` by `Eric Larson`_ + +- Add source space plotting with :meth:`mne.SourceSpaces.plot` using ``mne.viz.plot_trans`` by `Eric Larson`_ + +- Add :func:`mne.decoding.get_coef` to retrieve and inverse the coefficients of a linear model - typically a spatial filter or pattern, by `Jean-Rémi King`_ + +- Add support for reading in EGI MFF digitization coordinate files in ``mne.channels.read_dig_montage`` by `Matt Boggess`_ + +- Add ``n_per_seg`` keyword argument to ``mne.time_frequency.psd_welch`` and :func:`mne.time_frequency.psd_array_welch` that allows to control segment length independently of ``n_fft`` and use zero-padding when ``n_fft > n_per_seg`` by `Mikołaj Magnuski`_ + +- Add annotation aware data getter :meth:`mne.io.Raw.get_data` by `Jaakko Leppakangas`_ + +- Add support of dipole location visualization with MRI slice overlay with matplotlib to :func:`mne.viz.plot_dipole_locations` via mode='orthoview' parameter by `Jaakko Leppakangas`_ and `Alex Gramfort`_ + +- Add plotting of head positions as a function of time in :func:`mne.viz.plot_head_positions` by `Eric Larson`_ + +- Add ``real_filter`` option to ``mne.beamformer.dics``, ``mne.beamformer.dics_source_power``, ``mne.beamformer.tf_dics`` and ``mne.beamformer.dics_epochs`` by `Eric Larson`_, `Alex Gramfort`_ and `Andrea Brovelli`_. + +- Add a demo script showing how to use a custom inverse solver with MNE by `Alex Gramfort`_ + +- Functions :func:`mne.preprocessing.create_ecg_epochs`, :func:`mne.preprocessing.create_eog_epochs`, :func:`mne.compute_raw_covariance` and ICA methods :meth:`mne.preprocessing.ICA.score_sources`, :meth:`mne.preprocessing.ICA.find_bads_ecg`, :meth:`mne.preprocessing.ICA.find_bads_eog` are now annotation aware by `Jaakko Leppakangas`_ + +- Allow using ``spatial_colors`` for non-standard layouts by creating custom layouts from channel locations and add ``to_sphere`` keyword to :func:`mne.viz.plot_sensors` to allow plotting sensors that are not on the head surface by `Jaakko Leppakangas`_ + +- Concatenating raws with :func:`mne.concatenate_raws` now creates boundary annotations automatically by `Jaakko Leppakangas`_ + +- :func:`mne.viz.plot_projs_topomap` now supports plotting EEG topomaps by passing in :class:`mne.Info` by `Eric Larson`_ + +BUG +~~~ + +- Fix bug with DICS and LCMV (e.g., ``mne.beamformer.lcmv``, ``mne.beamformer.dics``) where regularization was done improperly. The default ``reg=0.01`` has been changed to ``reg=0.05``, by `Andrea Brovelli`_, `Alex Gramfort`_, and `Eric Larson`_ + +- Fix callback function call in ``mne.viz.topo._plot_topo_onpick`` by `Erkka Heinila`_ + +- Fix reading multi-file CTF recordings in :func:`mne.io.read_raw_ctf` by `Niklas Wilming`_ + +- Fix computation of AR coefficients across channels in :func:`mne.time_frequency.fit_iir_model_raw` by `Eric Larson`_ + +- Fix maxfilter channel names extra space bug in :func:`mne.preprocessing.maxwell_filter` by `Sheraz Khan`_ + +- :func:`mne.channels.find_layout` now leaves out the excluded channels by `Jaakko Leppakangas`_ + +- Array data constructors :class:`mne.io.RawArray` and :class:`EvokedArray` now make a copy of the info structure by `Jaakko Leppakangas`_ + +- Fix bug with finding layouts in :func:`mne.viz.plot_projs_topomap` by `Eric Larson`_ + +- Fix bug :func:`mne.io.anonymize_info` when Info does not contain 'file_id' or 'meas_id' fields by `Jean-Rémi King`_ + +- Fix colormap selection in :func:`mne.viz.plot_evoked_topomap` when using positive vmin with negative data by `Jaakko Leppakangas`_ + +- Fix channel name comparison in ``mne.channels.read_montage`` so that if ``ch_names`` is provided, the returned montage will have channel names in the same letter case by `Jaakko Leppakangas`_ + +- Fix :meth:`inst.set_montage(montage) ` to only set ``inst.info['dev_head_t']`` if ``dev_head_t=True`` in ``mne.channels.read_dig_montage`` by `Eric Larson`_ + +- Fix handling of events in ``mne.realtime.RtEpochs`` when the triggers were split between two buffers resulting in missing and/or duplicate epochs by `Mainak Jas`_ and `Antti Rantala`_ + +- Fix bug with automatic decimation in :func:`mne.io.read_raw_kit` by `Keith Doelling`_ + +- Fix bug with :func:`setup_volume_source_space` where arguments ``subject`` and ``subjects_dir`` were ignored by `Jaakko Leppakangas`_ + +- Fix sanity check for incompatible ``threshold`` and ``tail`` values in clustering functions like :func:`mne.stats.spatio_temporal_cluster_1samp_test` by `Eric Larson`_ + +- Fix ``_bad_dropped`` not being set when loading eeglab epoched files via :func:`mne.read_epochs_eeglab` which resulted in :func:`len` not working by `Mikołaj Magnuski`_ + +- Fix a bug in :meth:`mne.time_frequency.AverageTFR.plot` when plotting without a colorbar by `Jaakko Leppakangas`_ + +- Fix ``_filenames`` attribute in creation of :class:`mne.io.RawArray` with :meth:`mne.preprocessing.ICA.get_sources` by `Paul Pasler`_ + +- Fix contour levels in :func:`mne.viz.plot_evoked_topomap` to be uniform across topomaps by `Jaakko Leppakangas`_ + +- Fix bug in :func:`mne.preprocessing.maxwell_filter` where fine calibration indices were mismatched leading to an ``AssertionError`` by `Eric Larson`_ + +- Fix bug in :func:`mne.preprocessing.fix_stim_artifact` where non-data channels were interpolated by `Eric Larson`_ + +- :class:`mne.decoding.Scaler` now scales each channel independently using data from all time points (epochs and times) instead of scaling all channels for each time point. It also now accepts parameter ``scalings`` to determine the data scaling method (default is ``None`` to use static channel-type-based scaling), by `Asish Panda`_, `Jean-Rémi King`_, and `Eric Larson`_ + +- Raise error if the cv parameter of ``mne.decoding.GeneralizationAcrossTime`` and ``mne.decoding.TimeDecoding`` is not a partition and the predict_mode is "cross-validation" by `Jean-Rémi King`_ + +- Fix bug in :func:`mne.io.read_raw_edf` when ``preload=False`` and channels have different sampling rates by `Jaakko Leppakangas`_ + +- Fix :func:`mne.read_labels_from_annot` to set ``label.values[:]=1`` rather than 0 for consistency with the :class:`Label` class by `Jon Houck`_ + +- Fix plotting non-uniform freqs (for example log-spaced) in :meth:`mne.time_frequency.AverageTFR.plot` by `Mikołaj Magnuski`_ + +- Fix :func:`mne.minimum_norm.compute_source_psd` when used with ``pick_ori=None`` by `Annalisa Pascarella`_ and `Alex Gramfort`_ + +- Fix bug in :class:`mne.Annotations` where concatenating two raws where ``orig_time`` of the second run is ``None`` by `Jaakko Leppakangas`_ + +- Fix reading channel location from eeglab ``.set`` files when some of the channels do not provide this information. Previously all channel locations were ignored in such case, now they are read - unless a montage is provided by the user in which case only channel names are read from set file. By `Mikołaj Magnuski`_ + +- Fix reading eeglab ``.set`` files when ``.chanlocs`` structure does not contain ``X``, ``Y`` or ``Z`` fields by `Mikołaj Magnuski`_ + +- Fix bug with :func:`mne.simulation.simulate_raw` when ``interp != 'zero'`` by `Eric Larson`_ + +- Fix :func:`mne.fit_dipole` to handle sphere model rank deficiency properly by `Alex Gramfort`_ + +- Raise error in :func:`mne.concatenate_epochs` when concatenated epochs have conflicting event_id by `Mikołaj Magnuski`_ + +- Fix handling of ``n_components=None`` in :class:`mne.preprocessing.ICA` by `Richard Höchenberger`_ + +- Fix reading of fiducials correctly from CTF data in :func:`mne.io.read_raw_ctf` by `Jaakko Leppakangas`_ + +- Fix :func:`mne.beamformer.rap_music` to return dipoles with amplitudes in Am instead of nAm by `Jaakko Leppakangas`_ + +- Fix computation of duality gap in ``mne.inverse_sparse.mxne_optim.dgap_l21`` by `Mathurin Massias`_ + +API +~~~ + +- The filtering functions ``band_pass_filter``, ``band_stop_filter``, ``low_pass_filter``, and ``high_pass_filter`` have been deprecated in favor of :func:`mne.filter.filter_data` by `Eric Larson`_ + +- :class:`EvokedArray` now has default value ``tmin=0.`` by `Jaakko Leppakangas`_ + +- The ``ch_type`` argument for ``mne.viz.plot_trans`` has been deprecated, use ``eeg_sensors`` and ``meg_sensors`` instead, by `Eric Larson`_ + +- The default ``tmax=60.`` in :meth:`mne.io.Raw.plot_psd` will change to ``tmax=np.inf`` in 0.15, by `Eric Larson`_ + +- Base classes :class:`mne.io.BaseRaw` and :class:`mne.BaseEpochs` are now public to allow easier typechecking, by `Daniel McCloy`_ + +- :func:`mne.io.read_raw_edf` now combines triggers from multiple tal channels to 'STI 014' by `Jaakko Leppakangas`_ + +- The measurement info :class:`Info` no longer contains a potentially misleading ``info['filename']`` entry. Use class properties like :attr:`mne.io.Raw.filenames` or :attr:`mne.Epochs.filename` instead by `Eric Larson`_ + +- Default fiducial name change from 'nz' to 'nasion' in ``mne.channels.read_montage``, so that it is the same for both ``mne.channels.Montage`` and :class: `mne.channels.DigMontage` by `Leonardo Barbosa`_ + +- MNE's additional files for the ``fsaverage`` head/brain model are now included in MNE-Python, and the now superfluous ``mne_root`` parameter to :func:`create_default_subject` has been deprecated by `Christian Brodbeck`_ + +- An ``overwrite=False`` default parameter has been added to :func:`write_source_spaces` to protect against accidental overwrites, by `Eric Larson`_ + +- The :class:`mne.decoding.LinearModel` class will no longer support ``plot_filters`` and ``plot_patterns``, use :class:`mne.EvokedArray` with :func:`mne.decoding.get_coef` instead, by `Jean-Rémi King`_ + +- Made functions :func:`mne.time_frequency.tfr_array_multitaper`, :func:`mne.time_frequency.tfr_array_morlet`, :func:`mne.time_frequency.tfr_array_stockwell`, :func:`mne.time_frequency.psd_array_multitaper` and :func:`mne.time_frequency.psd_array_welch` public to allow computing TFRs and PSDs on numpy arrays by `Jaakko Leppakangas`_ + +- :meth:`mne.preprocessing.ICA.fit` now rejects data annotated bad by default. Turn off with ``reject_by_annotation=False``, by `Jaakko Leppakangas`_ + +- :func:`mne.io.read_raw_egi` now names channels with pattern 'E'. This behavior can be changed with parameter ``channel_naming`` by `Jaakko Leppakangas`_ + +- the ``name`` parameter in :class:`mne.Epochs` is deprecated, by `Jaakko Leppakangas`_ + +Authors +~~~~~~~ + +People who contributed to this release (in alphabetical order): + +* Alexander Rudiuk +* Alexandre Gramfort +* Annalisa Pascarella +* Antti Rantala +* Asish Panda +* Burkhard Maess +* Chris Holdgraf +* Christian Brodbeck +* Cristóbal Moënne-Loccoz +* Daniel McCloy +* Denis A. Engemann +* Eric Larson +* Erkka Heinila +* Hermann Sonntag +* Jaakko Leppakangas +* Jakub Kaczmarzyk +* Jean-Rémi King +* Jon Houck +* Jona Sassenhagen +* Jussi Nurminen +* Keith Doelling +* Leonardo S. Barbosa +* Lorenz Esch +* Lorenzo Alfine +* Luke Bloy +* Mainak Jas +* Marijn van Vliet +* Matt Boggess +* Matteo Visconti +* Mikolaj Magnuski +* Niklas Wilming +* Paul Pasler +* Richard Höchenberger +* Sheraz Khan +* Stefan Repplinger +* Teon Brooks +* Yaroslav Halchenko diff --git a/doc/changes/v0.15.rst b/doc/changes/v0.15.rst new file mode 100644 index 00000000000..94470ba5316 --- /dev/null +++ b/doc/changes/v0.15.rst @@ -0,0 +1,329 @@ +.. _changes_0_15: + +Version 0.15 (2017-10-20) +------------------------- + +Changelog +~~~~~~~~~ + +- :meth:`mne.channels.Layout.plot` and :func:`mne.viz.plot_layout` now allows plotting a subset of channels with ``picks`` argument by `Jaakko Leppakangas`_ + +- Add .bvef extension (BrainVision Electrodes File) to ``mne.channels.read_montage`` by `Jean-Baptiste Schiratti`_ + +- Add :func:`mne.decoding.cross_val_multiscore` to allow scoring of multiple tasks, typically used with :class:`mne.decoding.SlidingEstimator`, by `Jean-Rémi King`_ + +- Add :class:`mne.decoding.ReceptiveField` module for modeling electrode response to input features by `Chris Holdgraf`_ + +- Add :class:`mne.decoding.TimeDelayingRidge` class, used by default by :class:`mne.decoding.ReceptiveField`, to speed up auto- and cross-correlation computations and enable Laplacian regularization by `Ross Maddox`_ and `Eric Larson`_ + +- Add new :mod:`mne.datasets.mtrf ` dataset by `Chris Holdgraf`_ + +- Add example of time-frequency decoding with CSP by `Laura Gwilliams`_ + +- Add :class:`mne.decoding.SPoC` to fit and apply spatial filters based on continuous target variables, by `Jean-Rémi King`_ and `Alexandre Barachant`_ + +- Add Fieldtrip's electromyogram dataset, by `Alexandre Barachant`_ + +- Add ``reject_by_annotation`` option to :func:`mne.preprocessing.find_eog_events` (which is also utilised by :func:`mne.preprocessing.create_eog_epochs`) to omit data that is annotated as bad by `Jaakko Leppakangas`_ + +- Add example for fast screening of event-related dynamics in frequency bands by `Denis Engemann`_ + +- Add :meth:`mne.time_frequency.EpochsTFR.save` by `Jaakko Leppakangas`_ + +- Add butterfly mode (toggled with 'b' key) to :meth:`mne.io.Raw.plot` by `Jaakko Leppakangas`_ + +- Add ``axes`` parameter to plot_topo functions by `Jaakko Leppakangas`_ + +- Add options to change time windowing in :func:`mne.chpi.filter_chpi` by `Eric Larson`_ + +- ``mne.channels.Montage.plot``, :meth:`mne.channels.DigMontage.plot`, and :func:`mne.viz.plot_montage` now allow plotting channel locations as a topomap by `Clemens Brunner`_ + +- Add ``background_color`` parameter to :meth:`mne.Evoked.plot_topo` and :func:`mne.viz.plot_evoked_topo` and improve axes rendering as done in :func:`mne.viz.plot_compare_evokeds` by `Alex Gramfort`_ + +- Add support for GDF files in :func:`mne.io.read_raw_edf` by `Nicolas Barascud`_ + +- Add ``mne.io.find_edf_events`` for getting the events as they are in the EDF/GDF header by `Jaakko Leppakangas`_ + +- Speed up :meth:`mne.io.Raw.plot` and :meth:`mne.Epochs.plot` using (automatic) decimation based on low-passing with ``decim='auto'`` parameter by `Eric Larson`_ and `Jaakko Leppakangas`_ + +- Add ``mne.inverse_sparse.mxne_optim.dgap_l21l1`` for computing the duality gap for TF-MxNE as the new stopping criterion by `Daniel Strohmeier`_ + +- Add option to return a list of :class:`Dipole` objects in sparse source imaging methods by `Daniel Strohmeier`_ + +- Add :func:`mne.inverse_sparse.make_stc_from_dipoles` to generate stc objects from lists of dipoles by `Daniel Strohmeier`_ + +- Add ``mne.channels.find_ch_connectivity`` that tries to infer the correct connectivity template using channel info. If no template is found, it computes the connectivity matrix using :class:`Delaunay ` triangulation of the 2d projected channel positions by `Jaakko Leppakangas`_ + +- Add IO support for EGI MFF format by `Jaakko Leppakangas`_ and `ramonapariciog`_ + +- Add option to use matplotlib backend when plotting with :func:`mne.viz.plot_source_estimates` by `Jaakko Leppakangas`_ + +- Add ``mne.channels.Montage.get_pos2d`` to get the 2D positions of channels in a montage by `Clemens Brunner`_ + +- Add MGH 60- and 70-channel standard montages to ``mne.channels.read_montage`` by `Eric Larson`_ + +- Add option for embedding SVG instead of PNG in HTML for :class:`mne.Report` by `Eric Larson`_ + +- Add confidence intervals, number of free parameters, and χ² to :func:`mne.fit_dipole` and :func:`mne.read_dipole` by `Eric Larson`_ + +- :attr:`mne.SourceEstimate.data` is now writable, writing to it will also update :attr:`mne.SourceEstimate.times` by `Marijn van Vliet`_ + +- :meth:`mne.io.Raw.plot` and :meth:`mne.Epochs.plot` now use anti-aliasing to draw signals by `Clemens Brunner`_ + +- Allow using saved ``DigMontage`` to import digitization to :func:`mne.gui.coregistration` by `Jaakko Leppakangas`_ + +- Add function :func:`mne.channels.get_builtin_montages` to list all built-in montages by `Clemens Brunner`_ + +- :class:`mne.decoding.SlidingEstimator` and :class:`mne.decoding.GeneralizingEstimator` now accept ``**fit_params`` at fitting by `Jean-Rémi King`_ + +- Add :class:`mne.VectorSourceEstimate` class which enables working with both source power and dipole orientations by `Marijn van Vliet`_ + +- Add option ``pick_ori='vector'`` to :func:`mne.minimum_norm.apply_inverse` to produce :class:`mne.VectorSourceEstimate` by `Marijn van Vliet`_ + +- Add support for :class:`numpy.random.RandomState` argument to ``seed`` in :mod:`statistical clustering functions ` and better documentation of exact 1-sample tests by `Eric Larson`_ + +- Extend :func:`mne.viz.plot_epochs_image`/:meth:`mne.Epochs.plot_image` with regards to grouping by or aggregating over channels. See the new example at ``examples/visualization/plot_roi_erpimage_by_rt.py`` by `Jona Sassenhagen`_ + +- Add bootstrapped confidence intervals to :func:`mne.viz.plot_compare_evokeds` by `Jona Sassenhagen`_ and `Denis Engemann`_ + +- Add example on how to plot ERDS maps (also known as ERSP) by `Clemens Brunner`_ + +- Add support for volume source spaces to ``spatial_src_connectivity`` and ``spatio_temporal_src_connectivity`` by `Alex Gramfort`_ + +- Plotting raw data (:func:`mne.viz.plot_raw` or :meth:`mne.io.Raw.plot`) with events now includes event numbers (if there are not more than 50 events on a page) by `Clemens Brunner`_ + +- Add filtering functions :meth:`mne.Epochs.filter` and :meth:`mne.Evoked.filter`, as well as ``pad`` argument to :meth:`mne.io.Raw.filter` by `Eric Larson`_ + +- Add high frequency somatosensory MEG dataset by `Jussi Nurminen`_ + +- Add reduced set of labels for HCPMMP-1.0 parcellation in :func:`mne.datasets.fetch_hcp_mmp_parcellation` by `Eric Larson`_ + +- Enable morphing between hemispheres with ``mne.compute_morph_matrix`` by `Christian Brodbeck`_ + +- Add ``return_residual`` to :func:`mne.minimum_norm.apply_inverse` by `Eric Larson`_ + +- Add ``return_drop_log`` to :func:`mne.preprocessing.compute_proj_eog` and :func:`mne.preprocessing.compute_proj_ecg` by `Eric Larson`_ + +- Add time cursor and category/amplitude status message into the single-channel evoked plot by `Jussi Nurminen`_ + +BUG +~~~ +- Fixed a bug when creating spherical volumetric grid source spaces in :func:`setup_volume_source_space` by improving the minimum-distance computations, which in general will decrease the number of used source space points by `Eric Larson`_ + +- Fix bug in :meth:`mne.io.read_raw_brainvision` read .vhdr files with ANSI codepage by `Okba Bekhelifi`_ and `Alex Gramfort`_ + +- Fix unit scaling when reading in EGI digitization files using ``mne.channels.read_dig_montage`` by `Matt Boggess`_ + +- Fix ``picks`` default in :meth:`mne.io.Raw.filter` to include ``ref_meg`` channels by default by `Eric Larson`_ + +- Fix :class:`mne.decoding.CSP` order of spatial filter in ``patterns_`` by `Alexandre Barachant`_ + +- :meth:`mne.concatenate_epochs` now maintains the relative position of events during concatenation by `Alexandre Barachant`_ + +- Fix bug in script ``mne make_scalp_surfaces`` by `Denis Engemann`_ (this bug prevented creation of high-resolution meshes when they were absent in the first place.) + +- Fix writing of raw files with empty set of annotations by `Jaakko Leppakangas`_ + +- Fix bug in :meth:`mne.preprocessing.ICA.plot_properties` where merging gradiometers would fail by `Jaakko Leppakangas`_ + +- Fix :func:`mne.viz.plot_sensors` to maintain proper aspect ratio by `Eric Larson`_ + +- Fix :func:`mne.viz.plot_topomap` to allow 0 contours by `Jaakko Leppakangas`_ + +- Fix :class:`mne.preprocessing.ICA` source-picking to increase threshold for rank estimation to 1e-14 by `Jesper Duemose Nielsen`_ + +- Fix :func:`mne.set_bipolar_reference` to support duplicates in anodes by `Jean-Baptiste Schiratti`_ and `Alex Gramfort`_ + +- Fix visuals of :func:`mne.viz.plot_evoked` and a bug where ylim changes when using interactive topomap plotting by `Jaakko Leppakangas`_ + +- Fix :meth:`mne.Evoked.plot_topomap` when using the ``mask`` argument with paired gradiometers by `Eric Larson`_ + +- Fix bug in :meth:`mne.Label.fill` where an empty label raised an error, by `Eric Larson`_ + +- Fix :func:`mne.io.read_raw_ctf` to also include the samples in the last block by `Jaakko Leppakangas`_ + +- Fix :meth:`mne.preprocessing.ICA.save` to close file before attempting to delete it when write fails by `Jesper Duemose Nielsen`_ + +- Fix :func:`mne.simulation.simulate_evoked` to use nave parameter instead of snr, by `Yousra Bekhti`_ + +- Fix :func:`mne.read_bem_surfaces` for BEM files missing normals by `Christian Brodbeck`_ + +- Fix :func:`mne.transform_surface_to` to actually copy when ``copy=True`` by `Eric Larson`_ + +- Fix :func:`mne.io.read_raw_brainvision` to read vectorized data correctly by `Jaakko Leppakangas`_ and `Phillip Alday`_ + +- Fix ``mne.connectivity.spectral_connectivity`` so that if ``n_jobs > 1`` it does not ignore last ``n_epochs % n_jobs`` epochs by `Mikołaj Magnuski`_ + +- Fix :func:`mne.io.read_raw_edf` to infer sampling rate correctly when reading EDF+ files where tal-channel has a higher sampling frequency by `Jaakko Leppakangas`_ + +- Fix default value of ``kind='topomap'`` in ``mne.channels.Montage.plot`` to be consistent with :func:`mne.viz.plot_montage` by `Clemens Brunner`_ + +- Fix bug in :meth:`to_data_frame ` where non-consecutive picks would make the function crash by `Jaakko Leppakangas`_ + +- Fix channel picking and drop in :class:`mne.time_frequency.EpochsTFR` by `Lukáš Hejtmánek`_ + +- Fix :func:`mne.SourceEstimate.transform` to properly update :attr:`mne.SourceEstimate.times` by `Marijn van Vliet`_ + +- Fix :func:`mne.viz.plot_evoked_joint` to allow custom titles without appending information about the channels by `Jaakko Leppakangas`_ + +- Fix writing a forward solution after being processed by :func:`mne.forward.restrict_forward_to_label` or :func:`mne.forward.restrict_forward_to_stc` by `Marijn van Vliet`_ + +- Fix bug in :func:`mne.viz.plot_compare_evokeds` where ``truncate_yaxis`` was ignored (default is now ``False``), by `Jona Sassenhagen`_ + +- Fix bug in :func:`mne.viz.plot_evoked` where all xlabels were removed when using ``spatial_colors=True``, by `Jesper Duemose Nielsen`_ + +- Fix field mapping :func:`mne.make_field_map` and MEG bad channel interpolation functions (e.g., :meth:`mne.Evoked.interpolate_bads`) to choose a better number of components during pseudoinversion when few channels are available, by `Eric Larson`_ + +- Fix bug in :func:`mne.io.read_raw_brainvision`, changed default to read coordinate information if available and added test, by `Jesper Duemose Nielsen`_ + +- Fix bug in :meth:`mne.SourceEstimate.to_original_src` where morphing failed if two vertices map to the same target vertex, by `Marijn van Vliet`_ + +- Fix :class:`mne.preprocessing.Xdawn` to give verbose error messages about rank deficiency and handle transforming :class:`mne.Evoked`, by `Eric Larson`_ + +- Fix bug in DC and Nyquist frequency multitaper PSD computations, e.g. in ``mne.time_frequency.psd_multitaper``, by `Eric Larson`_ + +- Fix default padding type for :meth:`mne.Epochs.resample` and :meth:`mne.Evoked.resample` to be ``'edge'`` by default, by `Eric Larson`_ + +- Fix :func:`mne.inverse_sparse.mixed_norm`, :func:`mne.inverse_sparse.tf_mixed_norm` and :func:`mne.inverse_sparse.gamma_map` to work with volume source space and sphere head models in MEG by `Alex Gramfort`_ and `Yousra Bekhti`_ + +- Fix :meth:`mne.Evoked.as_type` channel renaming to append ``'_v'`` instead of ``'_virtual'`` to channel names to comply with shorter naming (15 char) requirements, by `Eric Larson`_ + +- Fix treatment of CTF HPI coils as fiducial points in :func:`mne.gui.coregistration` by `Eric Larson`_ + +- Fix resampling of events along with raw in :func:`mne.io.Raw` to now take into consideration the value of ``first_samp`` by `Chris Bailey`_ + +- Fix labels of PSD plots in :func:`mne.viz.plot_raw_psd` by `Alejandro Weinstein`_ + +- Fix depth weighting of sparse solvers (:func:`mne.inverse_sparse.mixed_norm`, :func:`mne.inverse_sparse.tf_mixed_norm` and :func:`mne.inverse_sparse.gamma_map`) with free orientation source spaces to improve orientation estimation by `Alex Gramfort`_ and `Yousra Bekhti`_ + +- Fix the threshold in :func:`mne.beamformer.rap_music` to properly estimate the rank by `Yousra Bekhti`_ + +- Fix treatment of vector inverse in :func:`mne.minimum_norm.apply_inverse_epochs` by `Emily Stephen`_ + +- Fix :func:`mne.find_events` when passing a list as stim_channel parameter by `Alex Gramfort`_ + +- Fix parallel processing when computing covariance with shrinkage estimators by `Denis Engemann`_ + +API +~~~ +- Removed the ``mne.datasets.megsim`` dataset because it was taken down by its host (MRN). Use the :mod:`mne.simulation` functions instead, by `Eric Larson`_ + +- Add ``skip_by_annotation`` to :meth:`mne.io.Raw.filter` to process data concatenated with e.g. :func:`mne.concatenate_raws` separately. This parameter will default to the old behavior (treating all data as a single block) in 0.15 but will change to ``skip_by_annotation='edge'``, which will separately filter the concatenated chunks separately, in 0.16. This should help prevent potential problems with filter-induced ringing in concatenated files, by `Eric Larson`_ + +- ICA channel names have now been reformatted to start from zero, e.g. ``"ICA000"``, to match indexing schemes in :class:`mne.preprocessing.ICA` and related functions, by `Stefan Repplinger`_ and `Eric Larson`_ + +- Add :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.apply_lcmv`, :func:`mne.beamformer.apply_lcmv_epochs`, and :func:`mne.beamformer.apply_lcmv_raw` to enable the separate computation and application of LCMV beamformer weights by `Britta Westner`_, `Alex Gramfort`_, and `Denis Engemann`_. + +- Add ``weight_norm`` parameter to enable both unit-noise-gain beamformer and neural activity index (weight normalization) and make whitening optional by allowing ``noise_cov=None`` in ``mne.beamformer.lcmv``, ``mne.beamformer.lcmv_epochs``, and ``mne.beamformer.lcmv_raw``, by `Britta Westner`_, `Alex Gramfort`_, and `Denis Engemann`_. + +- Add new filtering mode ``fir_design='firwin'`` (default in the next 0.16 release) that gets improved attenuation using fewer samples compared to ``fir_design='firwin2'`` (default in the current 0.15 release) by `Eric Larson`_ + +- Make the goodness of fit (GOF) of the dipoles returned by :func:`mne.beamformer.rap_music` consistent with the GOF of dipoles returned by :func:`mne.fit_dipole` by `Alex Gramfort`_. + +- :class:`mne.decoding.SlidingEstimator` will now replace ``mne.decoding.TimeDecoding`` to make it generic and fully compatible with scikit-learn, by `Jean-Rémi King`_ and `Alex Gramfort`_ + +- :class:`mne.decoding.GeneralizingEstimator` will now replace ``mne.decoding.GeneralizationAcrossTime`` to make it generic and fully compatible with scikit-learn, by `Jean-Rémi King`_ and `Alex Gramfort`_ + +- ``mne.viz.decoding.plot_gat_times``, ``mne.viz.decoding.plot_gat_matrix`` are now deprecated. Use matplotlib instead as shown in the examples, by `Jean-Rémi King`_ and `Alex Gramfort`_ + +- Add ``norm_trace`` parameter to control single-epoch covariance normalization in :class:`mne.decoding.CSP`, by `Jean-Rémi King`_ + +- Allow passing a list of channel names as ``show_names`` in function :func:`mne.viz.plot_sensors` and methods :meth:`mne.Evoked.plot_sensors`, :meth:`mne.Epochs.plot_sensors` and :meth:`mne.io.Raw.plot_sensors` to show only a subset of channel names by `Jaakko Leppakangas`_ + +- Make function ``mne.io.eeglab.read_events_eeglab`` public to allow loading overlapping events from EEGLAB files, by `Jona Sassenhagen`_. + +- :func:`mne.find_events` ``mask_type`` parameter will change from ``'not_and'`` to ``'and'`` in 0.16. + +- Instead of raising an error, duplicate channel names in the data file are now appended with a running number by `Jaakko Leppakangas`_ + +- :func:`mne.io.read_raw_edf` has now ``'auto'`` option for ``stim_channel`` (default in version 0.16) that automatically detects if EDF annotations or GDF events exist in the header and constructs the stim channel based on these events by `Jaakko Leppakangas`_ + +- :meth:`mne.io.Raw.plot_psd` now rejects data annotated bad by default. Turn off with ``reject_by_annotation=False``, by `Eric Larson`_ + +- :func:`mne.set_eeg_reference` and the related methods (e.g., :meth:`mne.io.Raw.set_eeg_reference`) have a new argument ``projection``, which if set to False directly applies an average reference instead of adding an SSP projector, by `Clemens Brunner`_ + +- Deprecate ``plot_trans`` in favor of :func:`mne.viz.plot_alignment` and add ``bem`` parameter for plotting conductor model by `Jaakko Leppakangas`_ + +- ``mne.beamformer.tf_lcmv`` now has a ``raw`` parameter to accommodate epochs objects that already have data loaded with ``preload=True``, with :meth:`mne.Epochs.load_data`, or that are read from disk, by `Eric Larson`_ + +- ``mne.time_frequency.psd_welch`` and :func:`mne.time_frequency.psd_array_welch` now use a Hamming window (instead of a Hann window) by `Clemens Brunner`_ + +- ``picks`` parameter in ``mne.beamformer.lcmv``, ``mne.beamformer.lcmv_epochs``, ``mne.beamformer.lcmv_raw``, ``mne.beamformer.tf_lcmv`` and :func:`mne.beamformer.rap_music` is now deprecated and will be removed in 0.16, by `Britta Westner`_, `Alex Gramfort`_, and `Denis Engemann`_. + +- The keyword argument ``frequencies`` has been deprecated in favor of ``freqs`` in various time-frequency functions, e.g. :func:`mne.time_frequency.tfr_array_morlet`, by `Eric Larson`_ + +- Add ``patterns=False`` parameter in :class:`mne.decoding.ReceptiveField`. Turn on to compute inverse model coefficients, by `Nicolas Barascud`_ + +- The ``scale``, ``scale_time``, and ``unit`` parameters have been deprecated in favor of ``scalings``, ``scalings_time``, and ``units`` in :func:`mne.viz.plot_evoked_topomap` and related functions, by `Eric Larson`_ + +- ``loose`` parameter in inverse solvers has now a default value ``'auto'`` depending if the source space is a surface, volume, or discrete type by `Alex Gramfort`_ and `Yousra Bekhti`_ + +- The behavior of ``'mean_flip'`` label-flipping in :meth:`mne.extract_label_time_course` and related functions has been changed such that the flip, instead of having arbitrary sign, maximally aligns in the positive direction of the normals of the label, by `Eric Larson`_ + +- Deprecate force_fixed and surf_ori in :func:`mne.read_forward_solution` by `Daniel Strohmeier`_ + +- :func:`mne.convert_forward_solution` has a new argument ``use_cps``, which controls whether information on cortical patch statistics is applied while generating surface-oriented forward solutions with free and fixed orientation by `Daniel Strohmeier`_ + +- :func:`mne.write_forward_solution` writes a forward solution as a forward solution with free orientation in X/Y/Z RAS coordinates if it is derived from a forward solution with free orientation and as a forward solution with fixed orientation in surface-based local coordinates otherwise by `Daniel Strohmeier`_ + +- ``loose=None`` in inverse solvers is deprecated, use explicitly ``loose=0`` for fixed constraint and ``loose=1.0`` for free orientations by `Eric Larson`_ + +- Zero-channel-value in PSD calculation in :func:`mne.viz.plot_raw_psd` has been relaxed from error to warning by `Alejandro Weinstein`_ + +- Expose "rank" parameter in :func:`mne.viz.plot_evoked_white` to correct rank estimates on the spot during visualization by `Denis Engemann`_, `Eric Larson`_, `Alex Gramfort`_. + +- Show channel name under mouse cursor on topography plots by `Jussi Nurminen`_ + +- Return maximum response amplitude from :meth:`mne.Evoked.get_peak` + +Authors +~~~~~~~ + +People who contributed to this release (in alphabetical order): + +* akshay0724 +* Alejandro Weinstein +* Alexander Rudiuk +* Alexandre Barachant +* Alexandre Gramfort +* Andrew Dykstra +* Britta Westner +* Chris Bailey +* Chris Holdgraf +* Christian Brodbeck +* Christopher Holdgraf +* Clemens Brunner +* Cristóbal Moënne-Loccoz +* Daniel McCloy +* Daniel Strohmeier +* Denis A. Engemann +* Emily P. Stephen +* Eric Larson +* Fede Raimondo +* Jaakko Leppakangas +* Jean-Baptiste Schiratti +* Jean-Rémi King +* Jesper Duemose Nielsen +* Joan Massich +* Jon Houck +* Jona Sassenhagen +* Jussi Nurminen +* Laetitia Grabot +* Laura Gwilliams +* Luke Bloy +* Lukáš Hejtmánek +* Mainak Jas +* Marijn van Vliet +* Mathurin Massias +* Matt Boggess +* Mikolaj Magnuski +* Nicolas Barascud +* Nicole Proulx +* Phillip Alday +* Ramonapariciog Apariciogarcia +* Robin Tibor Schirrmeister +* Rodrigo Hübner +* S. M. Gutstein +* Simon Kern +* Teon Brooks +* Yousra Bekhti diff --git a/doc/changes/v0.16.rst b/doc/changes/v0.16.rst new file mode 100644 index 00000000000..cdf6dbf0495 --- /dev/null +++ b/doc/changes/v0.16.rst @@ -0,0 +1,258 @@ +.. _changes_0_16: + +Version 0.16 (2018-05-01) +------------------------- + +Changelog +~~~~~~~~~ + +- Add possibility to pass dict of floats as argument to :func:`mne.make_ad_hoc_cov` by `Nathalie Gayraud`_ + +- Add support for metadata in :class:`mne.Epochs` by `Chris Holdgraf`_, `Alex Gramfort`_, `Jona Sassenhagen`_, and `Eric Larson`_ + +- Add support for plotting a dense head in :func:`mne.viz.plot_alignment` by `Eric Larson`_ + +- Allow plotting in user-created mayavi Scene in :func:`mne.viz.plot_alignment` by `Daniel McCloy`_ + +- Reduce memory consumption and do not require data to be loaded in :meth:`mne.Epochs.apply_baseline` by `Eric Larson`_ + +- Add option ``render_bem`` to :meth:`mne.Report.parse_folder` by `Eric Larson`_ + +- Add to :func:`mne.viz.plot_alignment` plotting of coordinate frame axes via ``show_axes`` and terrain-style interaction via ``interaction``, by `Eric Larson`_ + +- Add option ``initial_event`` to :func:`mne.find_events` by `Clemens Brunner`_ + +- Left and right arrow keys now scroll by 25% of the visible data, whereas Shift+left/right scroll by a whole page in :meth:`mne.io.Raw.plot` by `Clemens Brunner`_ + +- Add support for gantry tilt angle determination from Elekta FIF file header by `Chris Bailey`_ + +- Add possibility to concatenate :class:`mne.Annotations` objects with ``+`` or ``+=`` operators by `Clemens Brunner`_ + +- Add support for MaxShield raw files in :class:`mne.Report` by `Eric Larson`_ + +- Add ability to plot whitened data in :meth:`mne.io.Raw.plot`, :meth:`mne.Epochs.plot`, :meth:`mne.Evoked.plot`, and :meth:`mne.Evoked.plot_topo` by `Eric Larson`_ + +- Workaround for reading EGI MFF files with physiological signals that also present a bug from the EGI system in :func:`mne.io.read_raw_egi` by `Federico Raimondo`_ + +- Add support for reading subject height and weight in ``info['subject_info']`` by `Eric Larson`_ + +- Improve online filtering of raw data when plotting with :meth:`mne.io.Raw.plot` to filter in segments in accordance with the default ``skip_by_annotation=('edge', 'bad_acq_skip')`` of :meth:`mne.io.Raw.filter` to avoid edge ringing by `Eric Larson`_ + +- Add support for multiple head position files, plotting of sensors, and control of plotting color and axes in :func:`mne.viz.plot_head_positions` by `Eric Larson`_ + +- Add ability to read and write :class:`Annotations` separate from :class:`mne.io.Raw` instances via :meth:`Annotations.save` and :func:`read_annotations` by `Eric Larson`_ + +- Add option to unset a montage by passing `None` to :meth:`mne.io.Raw.set_montage` by `Clemens Brunner`_ + +- Add sensor denoising via :func:`mne.preprocessing.oversampled_temporal_projection` by `Eric Larson`_ + +- Add ``mne.io.pick.get_channel_types`` which returns all available channel types in MNE by `Clemens Brunner`_ + +- Use standard PCA instead of randomized PCA whitening prior to ICA to increase reproducibility by `Clemens Brunner`_ + +- Plot sEEG electrodes in :func:`mne.viz.plot_alignment` by `Alex Gramfort`_ + +- Add support for any data type like sEEG or ECoG in covariance related functions (estimation, whitening and plotting) by `Alex Gramfort`_ and `Eric Larson`_ + +- Add function ``mne.io.read_annotations_eeglab`` to allow loading annotations from EEGLAB files, by `Alex Gramfort`_ + +- :meth:`mne.io.Raw.set_montage` now accepts a string as its ``montage`` argument; this will set a builtin montage, by `Clemens Brunner`_ + +- Add 4D BTi phantom dataset :func:`mne.datasets.phantom_4dbti.data_path`, by `Alex Gramfort`_ + +- Changed the background color to grey in :func:`mne.viz.plot_alignment` to make helmet more visible, by `Alex Gramfort`_ + +- Add :meth:`mne.io.Raw.reorder_channels`, :meth:`mne.Evoked.reorder_channels`, etc. to reorder channels, by `Eric Larson`_ + +- Add to ``mne coreg`` and :func:`mne.gui.coregistration` by `Eric Larson`_: + + - Improved visibility of points inside the head + - Projection of EEG electrodes + - Orientation of extra points toward the surface + - Scaling points by distance to the head surface + - Display of HPI points + - ICP fitting with convergence criteria + - Faster display updates + - Scaling of ``mri/*.mgz`` files + - Scaling of ``mri/trainsforms/talairach.xfm`` files for conversion to MNI space + +- Add ability to exclude components interactively by clicking on their labels in :meth:`mne.preprocessing.ICA.plot_components` by `Mikołaj Magnuski`_ + +- Add reader for manual annotations of raw data produced by Brainstorm by `Anne-Sophie Dubarry`_ + +- Add eLORETA noise normalization for minimum-norm solvers by `Eric Larson`_ + +- Tighter duality gap computation in ``mne.inverse_sparse.tf_mxne_optim`` and new parametrization with ``alpha`` and ``l1_ratio`` instead of ``alpha_space`` and ``alpha_time`` by `Mathurin Massias`_ and `Daniel Strohmeier`_ + +- Add ``dgap_freq`` parameter in ``mne.inverse_sparse.mxne_optim`` solvers to control the frequency of duality gap computation by `Mathurin Massias`_ and `Daniel Strohmeier`_ + +- Add support for reading Eximia files by `Eric Larson`_ and `Federico Raimondo`_ + +- Add the Picard algorithm to perform ICA for :class:`mne.preprocessing.ICA`, by `Pierre Ablin`_ and `Alex Gramfort`_ + +- Add ability to supply a mask to the plot in :func:`mne.viz.plot_evoked_image` by `Jona Sassenhagen`_ + +- Add ``connectivity=False`` to cluster-based statistical functions to perform non-clustering stats by `Eric Larson`_ + +- Add :func:`mne.time_frequency.csd_morlet` and :func:`mne.time_frequency.csd_array_morlet` to estimate cross-spectral density using Morlet wavelets, by `Marijn van Vliet`_ + +- Add multidictionary time-frequency support to :func:`mne.inverse_sparse.tf_mixed_norm` by `Mathurin Massias`_ and `Daniel Strohmeier`_ + +- Add new DICS implementation as :func:`mne.beamformer.make_dics`, :func:`mne.beamformer.apply_dics`, :func:`mne.beamformer.apply_dics_csd` and :func:`mne.beamformer.apply_dics_epochs`, by `Marijn van Vliet`_ and `Susanna Aro`_ + +Bug +~~~ + +- Fix bug in EEG interpolation code to do nothing if there is no channel to interpolate by `Mainak Jas`_ + +- Fix bug in ``mne.preprocessing.peak_finder`` to output datatype consistently and added input check for empty vectors by `Tommy Clausner`_ + +- Fix bug in :func:`mne.io.read_raw_brainvision` to use the correct conversion for filters from time constant to frequency by `Stefan Appelhoff`_ + +- Fix bug with events when saving split files using :meth:`mne.Epochs.save` by `Eric Larson`_ + +- Fix bug in :class:`mne.decoding.SlidingEstimator` and :class:`mne.decoding.GeneralizingEstimator` to allow :func:`mne.decoding.cross_val_multiscore` to automatically detect whether the ``base_estimator`` is a classifier and use a ``StratifiedKFold`` instead of a ``KFold`` when ``cv`` is not specified, by `Jean-Rémi King`_ + +- Fix bug in :func:`mne.set_eeg_reference` to remove an average reference projector when setting the reference to ``[]`` (i.e. do not change the existing reference) by `Clemens Brunner`_ + +- Fix bug in threshold-free cluster enhancement parameter validation (:func:`mne.stats.permutation_cluster_1samp_test` and :func:`mne.stats.permutation_cluster_test`) by `Clemens Brunner`_ + +- Fix bug in :meth:`mne.io.Raw.plot` to correctly display event types when annotations are present by `Clemens Brunner`_ + +- Fix bug in :func:`mne.stats.spatio_temporal_cluster_test` default value for ``threshold`` is now calculated based on the array sizes in ``X``, by `Eric Larson`_ + +- Fix bug in :func:`mne.simulation.simulate_raw` with ``use_cps=True`` where CPS was not actually used by `Eric Larson`_ + +- Fix bug in :func:`mne.simulation.simulate_raw` where 1- and 3-layer BEMs were not properly transformed using ``trans`` by `Eric Larson`_ + +- Fix bug in :func:`mne.viz.plot_alignment` where the head surface file ``-head.fif`` was not used even though present by `Chris Bailey`_ + +- Fix bug when writing compressed sparse column matrices (e.g., Maxwell filtering cross-talk matrices) by `Marijn van Vliet`_ and `Eric Larson`_ + +- Fix bug in :meth:`mne.io.Raw.plot_psd` to correctly deal with ``reject_by_annotation=False`` by `Clemens Brunner`_ + +- Fix bug in :func:`mne.make_fixed_length_events` when hitting corner case problems rounding to sample numbers by `Eric Larson`_ + +- Fix bug in :class:`mne.Epochs` when passing events as list with ``event_id=None`` by `Alex Gramfort`_ + +- Fix bug in ``mne.Report.add_figs_to_section`` when passing :class:`numpy.ndarray` by `Eric Larson`_ + +- Fix bug in CSS class setting in `mne.Report` BEM section by `Eric Larson`_ + +- Fix bug in :class:`Annotations` where annotations that extend to the end of a recording were not extended properly by `Eric Larson`_ + +- Fix bug in :meth:`mne.io.Raw.filter` to properly raw data with acquisition skips in separate segments by `Eric Larson`_ + +- Fix bug in :func:`mne.preprocessing.maxwell_filter` where homogeneous fields were not removed for CTF systems by `Eric Larson`_ + +- Fix computation of average quaternions in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ + +- Fix bug in writing ``raw.annotations`` where empty annotations could not be written to disk, by `Eric Larson`_ + +- Fix support for writing FIF files with acquisition skips by using empty buffers rather than writing zeros by `Eric Larson`_ + +- Fix bug in the ``mne make_scalp_surfaces`` command where ``--force`` (to bypass topology check failures) was ignored by `Eric Larson`_ + +- Fix bug in :func:`mne.preprocessing.maxwell_filter` when providing ``origin`` in ``'meg'`` coordinate frame for recordings with a MEG to head transform (i.e., non empty-room recordings) by `Eric Larson`_ + +- Fix bug in :func:`mne.viz.plot_cov` that ignored ``colorbar`` argument by `Nathalie Gayraud`_ + +- Fix bug when picking CTF channels that could cause data saved to disk to be unreadable by `Eric Larson`_ + +- Fix bug when reading event latencies (in samples) from eeglab files didn't translate indices to 0-based python indexing by `Mikołaj Magnuski`_ + +- Fix consistency between :class:`mne.Epochs` and :func:`mne.stats.linear_regression_raw` in converting between samples and times (:func:`mne.stats.linear_regression_raw` now rounds, instead of truncating) by `Phillip Alday`_ + +- Fix bug in ``mne coreg`` where sphere surfaces were scaled by `Eric Larson`_ + +- Fix bug in :meth:`mne.Evoked.plot_topomap` when using ``proj='interactive'`` mode by `Eric Larson`_ + +- Fix bug when passing ``show_sensors=1`` to :func:`mne.viz.plot_compare_evokeds` resulted in sensors legend placed in lower right of the figure (position 4 in matplotlib), not upper right by `Mikołaj Magnuski`_ + +- Fix handling of annotations when cropping and concatenating raw data by `Alex Gramfort`_ and `Eric Larson`_ + +- Fix bug in :func:`mne.preprocessing.create_ecg_epochs` where ``keep_ecg=False`` was ignored by `Eric Larson`_ + +- Fix bug in :meth:`mne.io.Raw.plot_psd` when ``picks is not None`` and ``picks`` spans more than one channel type by `Eric Larson`_ + +- Fix bug in :class:`mne.make_forward_solution` when passing data with compensation channels (e.g. CTF) that contain bad channels by `Alex Gramfort`_ + +- Fix bug in :meth:`mne.SourceEstimate.get_peak` and :meth:`mne.VolSourceEstimate.get_peak` when there is only a single time point by `Marijn van Vliet`_ + +- Fix bug in :func:`mne.io.read_raw_edf` when reading BDF files stimulus channels are now not scaled anymore by `Clemens Brunner`_ + +API +~~~ + +- Channels with unknown locations are now assigned position ``[np.nan, np.nan, np.nan]`` instead of ``[0., 0., 0.]``, by `Eric Larson`_ + +- Removed unused ``image_mask`` argument from :func:`mne.viz.plot_topomap` by `Eric Larson`_ + +- Unknown measurement dates are now stored as ``info['meas_date'] = None`` rather than using the current date. ``None`` is also now used when anonymizing data and when determining the machine ID for writing files, by `Mainak Jas`_ and `Eric Larson`_ + +- :meth:`mne.Evoked.plot` will now append the number of epochs averaged for the evoked data in the first plot title, by `Eric Larson`_ + +- Changed the line width in :func:`mne.viz.plot_bem` from 2.0 to 1.0 for better visibility of underlying structures, by `Eric Larson`_ + +- Changed the behavior of :meth:`mne.io.Raw.pick_channels` and similar methods to be consistent with :func:`mne.pick_channels` to treat channel list as a set (ignoring order) -- if reordering is necessary use ``inst.reorder_channels``, by `Eric Larson`_ + +- Changed the labeling of some plotting functions to use more standard capitalization and units, e.g. "Time (s)" instead of "time [sec]" by `Eric Larson`_ + +- ``mne.time_frequency.csd_epochs`` has been refactored into :func:`mne.time_frequency.csd_fourier` and :func:`mne.time_frequency.csd_multitaper`, by `Marijn van Vliet`_ + +- ``mne.time_frequency.csd_array`` has been refactored into :func:`mne.time_frequency.csd_array_fourier` and :func:`mne.time_frequency.csd_array_multitaper`, by `Marijn van Vliet`_ + +- Added ``clean_names=False`` parameter to :func:`mne.io.read_raw_ctf` for control over cleaning of main channel names and compensation channel names from CTF suffixes by `Oleh Kozynets`_ + +- The functions ``lcmv``, ``lcmv_epochs``, and ``lcmv_raw`` are now deprecated in favor of :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.apply_lcmv`, :func:`mne.beamformer.apply_lcmv_epochs`, and :func:`mne.beamformer.apply_lcmv_raw`, by `Britta Westner`_ + +- The functions ``mne.beamformer.dics``, ``mne.beamformer.dics_epochs`` and ``mne.beamformer.dics_source_power`` are now deprecated in favor of :func:`mne.beamformer.make_dics`, :func:`mne.beamformer.apply_dics`, and :func:`mne.beamformer.apply_dics_csd`, by `Marijn van Vliet`_ + + +Authors +~~~~~~~ + +People who contributed to this release (in alphabetical order): + +* Alejandro Weinstein +* Alexandre Gramfort +* Annalisa Pascarella +* Anne-Sophie Dubarry +* Britta Westner +* Chris Bailey +* Chris Holdgraf +* Christian Brodbeck +* Claire Braboszcz +* Clemens Brunner +* Daniel McCloy +* Denis A. Engemann +* Desislava Petkova +* Dominik Krzemiński +* Eric Larson +* Erik Hornberger +* Fede Raimondo +* Henrich Kolkhorst +* Jean-Rémi King +* Jen Evans +* Joan Massich +* Jon Houck +* Jona Sassenhagen +* Juergen Dammers +* Jussi Nurminen +* Kambiz Tavabi +* Katrin Leinweber +* Kostiantyn Maksymenko +* Larry Eisenman +* Luke Bloy +* Mainak Jas +* Marijn van Vliet +* Mathurin Massias +* Mikolaj Magnuski +* Nathalie Gayraud +* Oleh Kozynets +* Phillip Alday +* Pierre Ablin +* Stefan Appelhoff +* Stefan Repplinger +* Tommy Clausner +* Yaroslav Halchenko diff --git a/doc/changes/v0.17.rst b/doc/changes/v0.17.rst new file mode 100644 index 00000000000..49e722c584d --- /dev/null +++ b/doc/changes/v0.17.rst @@ -0,0 +1,317 @@ +.. _changes_0_17: + +Version 0.17 (2018-11-19) +------------------------- + +Changelog +~~~~~~~~~ + +- Add new tutorial for :class:`mne.Annotations` and ``events`` by `Joan Massich`_ and `Alex Gramfort`_ + +- Add support for saving :class:`mne.Annotations` as CSV and TXT files by `Joan Massich`_ and `Alex Gramfort`_ + +- Add :meth:`mne.Epochs.shift_time` that shifts the time axis of :class:`mne.Epochs` by `Thomas Hartmann`_ + +- Add :func:`mne.viz.plot_arrowmap` computes arrowmaps using Hosaka-Cohen transformation from magnetometer or gradiometer data, these arrows represents an estimation of the current flow underneath the MEG sensors by `Sheraz Khan`_ + +- Add :func:`mne.io.read_raw_fieldtrip`, :func:`mne.read_epochs_fieldtrip` and :func:`mne.read_evoked_fieldtrip` to import FieldTrip data. By `Thomas Hartmann`_ and `Dirk Gütlin`_. + +- Add ``rank`` parameter to :func:`mne.compute_covariance`, :func:`mne.cov.regularize` and related functions to preserve data rank and speed up computation using low-rank computations during regularization by `Eric Larson`_ and `Denis Engemann`_ + +- Add new function :func:`mne.read_annotations` that can read annotations in EEGLAB, BrainVision, EDF and Brainstorm formats by `Joan Massich`_ and `Alex Gramfort`_. + +- Add capability to read and save Epochs containing complex data (e.g. after Hilbert-transform) using :meth:`mne.Epochs.save` and :func:`mne.read_epochs`, by `Stefan Repplinger`_, `Eric Larson`_ and `Alex Gramfort`_ + +- Add optically pumped magnetometer dataset and example by `Rasmus Zetter`_ and `Eric Larson`_ + +- Add ``origin`` parameter to :meth:`mne.io.Raw.time_as_index` to allow ``times`` to be relative to this ``origin`` by `Joan Massich`_ + +- Add ``title`` argument to :meth:`mne.SourceEstimate.plot` by `Eric Larson`_ + +- :func:`mne.io.Raw.set_annotations` now changes ``orig_time`` to match ``meas_date`` and shift ``self.annotations.onset`` accordingly. Previous behavior is deprecated and would be removed in 0.18. Work by `Joan Massich`_ + +- Add :func:`mne.compute_source_morph` which creates a :class:`mne.SourceMorph` object to unify morphing any type of source estimates (surface or volume) from one subject to another for group studies. It is now possible to do group studies when working on the volume with MNE. Work by `Tommy Clausner`_ during GSOC 2018 with the help of `Alex Gramfort`_ and `Eric Larson`_. + +- Add ability to pass threshold for EOG to :func:`mne.preprocessing.find_eog_events` and :func:`mne.preprocessing.create_eog_epochs` by `Peter Molfese`_ + +- Add possibility to save :class:`mne.VolSourceEstimate` and :class:`mne.MixedSourceEstimate` to HDF5 format (file extension .h5) with :meth:`mne.VolSourceEstimate.save` and :meth:`mne.MixedSourceEstimate.save` by `Alex Gramfort`_ + +- Add ``replace`` parameter to :meth:`mne.io.Raw.add_events` to allow adding events while removing the old ones on the stim channel by `Alex Gramfort`_ + +- Add ability to pass ``axes`` to ``ts_args`` and ``topomap_args`` of :meth:`mne.viz.plot_evoked_joint` by `Jona Sassenhagen`_ + +- Add ability to pass a precomputed forward solution to :func:`mne.simulation.simulate_raw` by `Eric Larson`_ + +- Add ability to read and write beamformers with :func:`mne.beamformer.read_beamformer` and :class:`mne.beamformer.Beamformer.save` by `Eric Larson`_ + +- Add resting-state source power spectral estimation example ``sphx_glr_auto_examples_datasets_plot_opm_rest_data.py`` by `Eric Larson`_, `Denis Engemann`_, and `Luke Bloy`_ + +- Add :func:`mne.channels.make_1020_channel_selections` to group 10/20-named EEG channels by location, by `Jona Sassenhagen`_ + +- Add helmet for Artemis123 for :func:`mne.viz.plot_alignment` by `Eric Larson`_ + +- Add support for reading MATLAB ``v7.3+`` files in :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` via `pymatreader`_ by `Steven Gutstein`_, `Eric Larson`_, and `Thomas Hartmann`_ + +- Add support for raw PSD plots in :meth:`mne.Report.parse_folder` via ``raw_psd`` argument of :class:`mne.Report` by `Eric Larson`_ + +- Add ``trig_shift_by_type`` parameter in :func:`mne.io.read_raw_brainvision` to allow to specify offsets for arbitrary marker types by `Henrich Kolkhorst`_ + +- Add progress bar support to :class:`mne.decoding.SlidingEstimator` and :class:`mne.decoding.GeneralizingEstimator` by `Eric Larson`_ + +- Add interactive visualization of volume source estimates using :func:`mne.viz.plot_volume_source_estimates` by `Mainak Jas`_ + +- Add :func:`mne.head_to_mri` to convert positions from head coordinates to MRI RAS coordinates, by `Joan Massich`_ and `Alex Gramfort`_ + +- Add improved CTF helmet for :func:`mne.viz.plot_alignment` by `Eric Larson`_ + +- Add handling in :func:`mne.combine_evoked` and :func:`mne.grand_average` for input with the same channels in different orders, if required, by `Jona Sassenhagen`_ + +- Add ``split_naming`` parameter to the `mne.io.Raw.save` method to allow for BIDS-compatible raw file name construction by `Teon Brooks`_ + +- Add ``origin`` parameter to :meth:`mne.Evoked.interpolate_bads` and related methods by `Eric Larson`_ + +- Add automated MEG helmet shape approximation to :func:`mne.viz.plot_alignment` by `Eric Larson`_ + +- Add capability to save a :class:`mne.Report` to an HDF5 file to :meth:`mne.Report.save` by `Marijn van Vliet`_ + +- Add :func:`mne.open_report` to read back a :class:`mne.Report` object that was saved to an HDF5 file by `Marijn van Vliet`_ + +- Add multi-taper estimation to :func:`mne.minimum_norm.compute_source_psd` by `Eric Larson`_ + +- Add support for custom, e.g. robust, averaging methods in :meth:`mne.Epochs.average` by `Jona Sassenhagen`_ + +- Add support for Neuromag 122 system by `Alex Gramfort`_ + +- Add function ``mne.io.read_annotations_brainvision`` for reading directly Brainvision marker files by `Alex Gramfort`_ + +- Add :meth:`mne.Report.remove` method to remove existing figures from a report, by `Marijn van Vliet`_ + +- Add sign to output of max-power orientation for :func:`mne.beamformer.make_dics` by `Eric Larson`_ + +- Add support for ``pick_ori='max-power'`` when ``weight_norm=None`` in :func:`mne.beamformer.make_lcmv` by `Marijn van Vliet`_ + +- Add support for ``weight_norm='nai'`` for all ``pick_ori`` options in :func:`mne.beamformer.make_lcmv` by `Marijn van Vliet`_ + +- Add support for ``weight_norm='nai'`` to :func:`mne.beamformer.make_dics` by `Marijn van Vliet`_ + +- Add parameter ``rank=None`` to :func:`mne.beamformer.make_dics` by `Marijn van Vliet`_ + +- Add parameter ``rank='full'`` to :func:`mne.beamformer.make_lcmv`, which can be set to ``None`` to auto-compute the rank of the covariance matrix before regularization by `Marijn van Vliet`_ + +- Handle different time vectors in topography plots using :func:`mne.viz.plot_evoked_topo` by `Jussi Nurminen`_ + +- Speed up :func:`mne.inverse_sparse.mixed_norm` if the ``solver`` parameter is set to ``bcd`` using :func:`scipy.linalg.get_blas_funcs` by `Quentin Bertrand`_ + +Bug +~~~ + +- Fix bug with scaling of data in ``mne.cov._compute_covariance_auto`` that was affecting the :class:`mne.decoding.SPoC` estimator by `David Sabbagh`_ + +- Fix :func:`mne.io.Raw.plot_projs_topomap` by `Joan Massich`_ + +- Fix bug in :func:`mne.minimum_norm.compute_source_psd` where the ``stc.times`` output was scaled by 1000, by `Eric Larson`_ + +- Fix default values for ``'diagonal_fixed'`` estimation method of :func:`mne.compute_covariance` to be ``0.1`` for all channel types, as in :func:`mne.cov.regularize` by `Eric Larson`_ + +- Fix reading edf file annotations by `Joan Massich`_ + +- Fix bug with reading events from BrainVision files by `Stefan Appelhoff`_ + +- Fix bug where :func:`mne.io.read_raw_eeglab` would warn when the stim channel is populated with an array of zeros by `Joan Massich`_ + +- Fix 2nd column of events in BrainVision to no longer store duration but rather be contained by ``raw.annotations`` by `Alex Gramfort`_ + +- Fix checking of the correctness of the ``prepared=True`` argument in :func:`mne.minimum_norm.apply_inverse` and related functions by `Eric Larson`_ + +- Fix bug of not showing ERD's in baseline rescaled tfr topomaps if grads are combined by `Erkka Heinila`_ + +- Fix bug with FIF I/O where strings were written in UTF-8 format instead of Latin-1 by `Eric Larson`_ + +- Fix bug with reading measurement dates from BrainVision files by `Stefan Appelhoff`_ + +- Fix bug with `mne.fit_dipole` where the residual was returned as ``ndarray`` instead of :class:`mne.Evoked` instance, by `Eric Larson`_ + +- Fix bug with ``mne flash_bem`` when ``flash30`` is not used by `Eric Larson`_ + +- Fix bug with :func:`mne.stats.permutation_cluster_test` and :func:`mne.stats.spatio_temporal_cluster_test` where ``threshold=None`` was not calculated properly for a f-oneway test by `Daniel McCloy`_ and `Eric Larson`_ + +- Fix bug with channel names in ``mgh70`` montage in ``mne.channels.read_montage`` by `Eric Larson`_ + +- Fix duplication of ``info['hpi_meas']`` and ``info['hpi_results']`` by `Sara Sommariva`_ + +- Fix bug in :func:`mne.io.read_raw_edf` when reading large files on Windows by `Marcin Koculak`_ + +- Fix check in :func:`mne.viz.plot_sensors` for invalid channel locations by `Eric Larson`_ + +- Fix bug in :func:`mne.io.read_raw_edf` where GDF files had ``info['highpass']`` and ``info['lowpass']`` set to NaN and ``info['meas_date']`` set incorrectly, by `Eric Larson`_ + +- Fix bug in :func:`mne.preprocessing.ICA.apply` to handle arrays as ``exclude`` property by `Joan Massich`_ + +- Fix bug in ``method='eLORETA'`` for :func:`mne.minimum_norm.apply_inverse` when using a sphere model and saved ``inv`` by `Eric Larson`_ + +- Fix bug in :class:`mne.io.Raw` where warnings were emitted when objects were deleted by `Eric Larson`_ + +- Fix vector data support for :class:`mne.VolSourceEstimate` by `Christian Brodbeck`_ + +- Fix bug with IIR filtering axis in :func:`mne.filter.filter_data` by `Eric Larson`_ + +- Fix bug with non-boxcar windows in :meth:`mne.io.Raw.resample` and :func:`mne.filter.resample` by `Eric Larson`_ + +- Fix bug in :func:`mne.minimum_norm.apply_inverse` where applying an MEG-only inverse would raise an error about needing an average EEG reference by `Eric Larson`_ + +- Fix bug in ``inst.apply_proj()`` where an average EEG reference was always added by `Eric Larson`_ + +- Fix bug in :func:`mne.time_frequency.tfr_morlet`, :func:`mne.time_frequency.tfr_multitaper`, and :func:`mne.time_frequency.tfr_stockwell` where not all data channels were picked by `Eric Larson`_ + +- Fix bug in :meth:`mne.preprocessing.ICA.plot_overlay` and :func:`mne.make_field_map` for CTF data with compensation by `Eric Larson`_ + +- Fix bug in :func:`mne.create_info` passing ``int`` as ``ch_names`` on Windows by `Eric Larson`_ + +- Fix bug in ``mne.realtime.RtEpochs`` where events during the buildup of the buffer were not correctly processed when incoming data buffers are smaller than the epochs by `Henrich Kolkhorst`_ + +- Fix bug in :func:`mne.io.read_raw_brainvision` where 1-indexed BrainVision events were not being converted into 0-indexed mne events by `Steven Bethard`_ + +- Fix bug in :func:`mne.viz.plot_snr_estimate` and :func:`mne.minimum_norm.estimate_snr` where the inverse rank was not properly utilized (especially affecting SSS'ed MEG data) by `Eric Larson`_ + +- Fix error when saving stc as nifti image when using volume source space formed by more than one label by `Alex Gramfort`_ + +- Fix error when interpolating MEG channels with compensation using reference channels (like for CTF data) by `Alex Gramfort`_ + +- Fix bug in :func:`mne.make_sphere_model` where EEG sphere model coefficients were not optimized properly by `Eric Larson`_ + +- Fix bug in :func:`mne.io.read_raw_ctf` to read bad channels and segments from CTF ds files by `Luke Bloy`_ + +- Fix problem with :meth:`mne.io.Raw.add_channels` where ``raw.info['bads']`` was replicated by `Eric Larson`_ + +- Fix bug with :class:`mne.Epochs` where an error was thrown when resizing data (e.g., during :meth:`mne.Epochs.drop_bad`) by `Eric Larson`_ + +- Fix naming of ``raw.info['buffer_size_sec']`` to be ``raw.buffer_size_sec`` as it is a writing parameter rather than a measurement parameter by `Eric Larson`_ + +- Fix EGI-MFF parser not to require ``dateutil`` package by `Eric Larson`_ + +- Fix error when running LCMV on MEG channels with compensation using reference channels (like for CTF data) by `Alex Gramfort`_ + +- Fix the use of :func:`sklearn.model_selection.cross_val_predict` with :class:`mne.decoding.SlidingEstimator` by `Alex Gramfort`_ + +- Fix event sample number increase when combining many Epochs objects with :func:`mne.concatenate_epochs` with by `Jasper van den Bosch`_ + +- Fix title of custom slider images to :class:`mne.Report` by `Marijn van Vliet`_ + +- Fix missing initialization of ``self._current`` in :class:`mne.Epochs` by `Henrich Kolkhorst`_ + +- Fix processing of data with bad segments and acquisition skips with new ``skip_by_annotation`` parameter in :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ + +- Fix symlinking to use relative paths in ``mne flash_bem`` and ``mne watershed_bem`` by `Eric Larson`_ + +- Fix error in mne coreg when saving with scaled MRI if fiducials haven't been saved by `Ezequiel Mikulan`_ + +- Fix normalization error in :func:`mne.beamformer.make_lcmv` when ``pick_ori='normal', weight_norm='unit_noise_gain'`` by `Marijn van Vliet`_ + +- Fix MNE-C installation instructions by `buildqa`_ + +- Fix computation of max-power orientation in :func:`mne.beamformer.make_dics` when ``pick_ori='max-power', weight_norm='unit_noise_gain'`` by `Marijn van Vliet`_ + +API +~~~ + +- Deprecated separate reading of annotations and synthesis of STI014 channels in readers by `Joan Massich`_: + + - Deprecated ``mne.io.read_annotations_eeglab`` + - Deprecated ``annot`` and ``annotmap`` parameters in :meth:`~mne.io.read_raw_edf` + - Deprecated ``stim_channel`` parameters in :func:`~mne.io.read_raw_edf`, :func:`~mne.io.read_raw_brainvision`, and :func:`~mne.io.read_raw_eeglab` + + Annotations are now added to ``raw`` instances directly upon reading as :attr:`raw.annotations `. + They can also be read separately with :func:`mne.read_annotations` for EEGLAB, BrainVision, EDF, and Brainstorm formats. + Use :func:`mne.events_from_annotations(raw.annotations) ` + to convert these to events instead of the old way (using STI014 channel synthesis followed by :func:`mne.find_events(raw) `). + + In 0.17 (this release) + Use ``read_raw_...(stim_channel=False)`` to disable warnings (and stim channel synthesis), but other arguments for ``stim_channel`` will still be supported. + + In 0.18 + The only supported option will be ``read_raw_...(stim_channel=False)``, and all stim-channel-synthesis arguments will be removed. At this point, ``stim_channel`` should be removed from scripts for future compatibility, but ``stim_channel=False`` will still be acceptable for backward compatibility. + + In 0.19 + The ``stim_channel`` keyword arguments will be removed from ``read_raw_...`` functions. + +- Calling ``mne.io.pick.pick_info`` removing channels that are needed by compensation matrices (``info['comps']``) no longer raises ``RuntimeException`` but instead logs an info level message. By `Luke Bloy`_ + +- :meth:`mne.Epochs.save` now has the parameter ``fmt`` to specify the desired format (precision) saving epoched data, by `Stefan Repplinger`_, `Eric Larson`_ and `Alex Gramfort`_ + +- Deprecated ``mne.SourceEstimate.morph_precomputed``, ``mne.SourceEstimate.morph``, ``mne.compute_morph_matrix``, ``mne.morph_data_precomputed`` and ``mne.morph_data`` in favor of :func:`mne.compute_source_morph`, by `Tommy Clausner`_ + +- Prepare transition to Python 3. This release will be the last release compatible with Python 2. The next version will be Python 3 only. + +- CUDA support now relies on CuPy_ instead of ``PyCUDA`` and ``scikits-cuda``. It can be installed using ``conda install cupy``. By `Eric Larson`_ + +- Functions requiring a color cycle will now default to Matplotlib rcParams colors, by `Stefan Appelhoff`_ + +- :meth:`mne.Evoked.plot_image` has gained the ability to ``show_names``, and if a selection is provided to ``group_by``, ``axes`` can now receive a `dict`, by `Jona Sassenhagen`_ + +- Calling :meth:`mne.Epochs.decimate` with ``decim=1`` no longer copies the data by `Henrich Kolkhorst`_ + +- Removed blocking (waiting for new epochs) in ``mne.realtime.RtEpochs.get_data()`` by `Henrich Kolkhorst`_ + +- Warning messages are now only emitted as :func:`warnings.warn_explicit` rather than also being emitted as ``logging`` messages (unless a logging file is being used) to avoid duplicate warning messages, by `Eric Larson`_ + +- Deprecated save_stc_as_volume function in favor of :meth:`mne.VolSourceEstimate.as_volume` and :meth:`mne.VolSourceEstimate.save_as_volume` by `Alex Gramfort`_ + +- ``src.kind`` now equals to ``'mixed'`` (and not ``'combined'``) for a mixed source space (made of surfaces and volume grids) by `Alex Gramfort`_ + +- Deprecation of :attr:`mne.io.Raw.annotations` property in favor of :meth:`mne.io.Raw.set_annotations` by `Joan Massich`_ + +- The default value of ``stop_receive_thread`` in ``mne.realtime.RtEpochs.stop`` has been changed to ``True`` by `Henrich Kolkhorst`_ + +- Using the :meth:`mne.io.Raw.add_channels` on an instance with memmapped data will now resize the memmap file to append the new channels on Windows and Linux, by `Eric Larson`_ + +- :attr:`mne.io.Raw.annotations` when missing is set to an empty :class:`mne.Annotations` rather than ``None`` by `Joan Massich`_ and `Alex Gramfort`_ + +- Mismatches in CTF compensation grade are now checked in inverse computation by `Eric Larson`_ + + +Authors +~~~~~~~ + +People who contributed to this release (in alphabetical order): + +* Alexandre Gramfort +* Antoine Gauthier +* Britta Westner +* Christian Brodbeck +* Clemens Brunner +* Daniel McCloy +* David Sabbagh +* Denis A. Engemann +* Eric Larson +* Ezequiel Mikulan +* Henrich Kolkhorst +* Hubert Banville +* Jasper J.F. van den Bosch +* Jen Evans +* Joan Massich +* Johan van der Meer +* Jona Sassenhagen +* Kambiz Tavabi +* Lorenz Esch +* Luke Bloy +* Mainak Jas +* Manu Sutela +* Marcin Koculak +* Marijn van Vliet +* Mikolaj Magnuski +* Peter J. Molfese +* Sam Perry +* Sara Sommariva +* Sergey Antopolskiy +* Sheraz Khan +* Stefan Appelhoff +* Stefan Repplinger +* Steven Bethard +* Teekuningas +* Teon Brooks +* Thomas Hartmann +* Thomas Jochmann +* Tom Dupré la Tour +* Tristan Stenner +* buildqa +* jeythekey diff --git a/doc/changes/v0.18.rst b/doc/changes/v0.18.rst new file mode 100644 index 00000000000..aa9c4b37826 --- /dev/null +++ b/doc/changes/v0.18.rst @@ -0,0 +1,268 @@ +.. _changes_0_18: + +Version 0.18 (2019-05-19) +------------------------- + +Changelog +~~~~~~~~~ + +- Add ``event_id='auto'`` in :func:`mne.events_from_annotations` to accommodate Brainvision markers by `Jona Sassenhagen`_, `Joan Massich`_ and `Eric Larson`_ + +- Add example on how to simulate raw data using subject anatomy, by `Ivana Kojcic`_, `Eric Larson`_, `Samuel Deslauriers-Gauthier`_ and `Kostiantyn Maksymenko`_ + +- :func:`mne.beamformer.apply_lcmv_cov` returns static source power after supplying a data covariance matrix to the beamformer filter by `Britta Westner`_ and `Marijn van Vliet`_ + +- Add ``butterfly`` and ``order`` arguments to :func:`mne.viz.plot_epochs` and offer separated traces for non-meg data (seeg, eeg, ecog) in butterfly view by `Stefan Repplinger`_ and `Eric Larson`_ + +- :meth:`mne.Epochs.get_data` now takes a ``picks`` parameter by `Jona Sassenhagen`_ + +- :func:`~mne.viz.plot_compare_evokeds` will generate topo plots if ``axes='topo'`` by `Jona Sassenhagen`_ + +- ``mne.viz.iter_topography`` can yield an additional axis, e.g., for plotting legends by `Jona Sassenhagen`_ and `Daniel McCloy`_ + +- Default plot title reflects channel type when ``picks`` is a channel type in :func:`~mne.viz.plot_compare_evokeds` by `Daniel McCloy`_ + +- Color scale limits in :func:`~mne.viz.plot_topo_image_epochs` are now computed separately per channel type in combined mag/grad plots, by `Daniel McCloy`_ + +- :func:`mne.simulation.simulate_stc` now allows for label overlaps by `Nathalie Gayraud`_, and `Ivana Kojcic`_ + +- Add ``long_format`` option to the pandas dataframe exporters, e.g :meth:`mne.Epochs.to_data_frame` by `Denis Engemann`_ + +- Add example on how to load standard montage :ref:`plot_montage` by `Joan Massich`_ + +- Add new tutorial on :ref:`tut-eeg-fsaverage-source-modeling` by `Alex Gramfort`_, and `Joan Massich`_ + +- Add :meth:`mne.Epochs.apply_hilbert` and :meth:`mne.Evoked.apply_hilbert` by `Eric Larson`_ + +- Add convenience ``fsaverage`` subject dataset fetcher / updater :func:`mne.datasets.fetch_fsaverage` by `Eric Larson`_ + +- Add ``fmin`` and ``fmax`` argument to :meth:`mne.time_frequency.AverageTFR.crop` and to :meth:`mne.time_frequency.EpochsTFR.crop` to crop TFR objects along frequency axis by `Dirk Gütlin`_ + +- Add support to :func:`mne.read_annotations` to read CNT formats by `Joan Massich`_ + +- Add ``reject`` parameter to :meth:`mne.preprocessing.ICA.plot_properties` to visualize rejected epochs by `Antoine Gauthier`_ + +- Add support for picking channels using channel name and type strings to functions with ``picks`` arguments, along with a convenience :meth:`mne.io.Raw.pick`, :meth:`mne.Epochs.pick`, and :meth:`mne.Evoked.pick` method, by `Eric Larson`_ + +- Add new tutorial on :ref:`tut-sleep-stage-classif` by `Alex Gramfort`_, `Stanislas Chambon`_ and `Joan Massich`_ + +- Add data fetchers for polysomnography (PSG) recordings from Physionet (:func:`mne.datasets.sleep_physionet.age.fetch_data` and :func:`mne.datasets.sleep_physionet.temazepam.fetch_data`) by `Alex Gramfort`_ and `Joan Massich`_ + +- Add envelope correlation code in ``mne.connectivity.envelope_correlation`` by `Denis Engemann`_, `Sheraz Khan`_, and `Eric Larson`_ + +- Add option to toggle all projectors in :meth:`mne.io.Raw.plot` and related functions by `Eric Larson`_ + +- Add support for indexing, slicing, and iterating :class:`mne.Annotations` by `Joan Massich`_ + +- :meth:`mne.io.Raw.plot` now uses the lesser of ``n_channels`` and ``raw.ch_names``, by `Joan Massich`_ + +- Add support for FIR filtering in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` by passing ``filtorder=0`` or ``--filtorder 0``, respectively, by `Eric Larson`_ + +- Add ``chunk_duration`` parameter to :func:`mne.events_from_annotations` to allow multiple events from a single annotation by `Joan Massich`_ + +- Add :class:`mne.simulation.SourceSimulator` class to simplify simulating SourceEstimates, by `Samuel Deslauriers-Gauthier`_, `Kostiantyn Maksymenko`_, `Nathalie Gayraud`_, `Ivana Kojcic`_, `Alex Gramfort`_, and `Eric Larson`_ + +- :func:`mne.io.read_raw_edf` now detects analog stim channels labeled ``'STATUS'`` and sets them as stim channel. :func:`mne.io.read_raw_edf` no longer synthesize TAL annotations into stim channel but stores them in ``raw.annotations`` when reading by `Joan Massich`_ + +- Add `mne.simulation.add_noise` for ad-hoc noise addition to `io.Raw`, `Epochs`, and `Evoked` instances, by `Eric Larson`_ + +- Add ``drop_refs=True`` parameter to :func:`set_bipolar_reference` to drop/keep anode and cathode channels after applying the reference by `Cristóbal Moënne-Loccoz`_. + +- Add processing of reference MEG channels to :class:`mne.preprocessing.ICA` by `Jevri Hanna`_ + +- Add use of :func:`scipy.signal.windows.dpss` for faster multitaper window computations in PSD functions by `Eric Larson`_ + +- Add :func:`mne.morph_labels` to facilitate morphing label sets obtained from parcellations, by `Eric Larson`_ + +- Add :func:`mne.labels_to_stc` to facilitate working with label data, by `Eric Larson`_ + +- Add :func:`mne.label.select_sources` to simplify the selection of sources within a label, by `Samuel Deslauriers-Gauthier`_ + +- Add support for using :class:`mne.Info` in :func:`mne.simulation.simulate_raw` instead of :class:`mne.io.Raw` by `Eric Larson`_ + +- Add support for passing an iterable and stim channel values using ``stc`` parameter of :func:`mne.simulation.simulate_raw` by `Eric Larson`_ + +- Add ``overlap`` argument to :func:`mne.make_fixed_length_events` by `Eric Larson`_ + +- Add approximate distance-based ``spacing`` source space decimation algorithm to :func:`mne.setup_source_space` by `Eric Larson`_ + +- Add 448-labels subdivided aparc cortical parcellation by `Denis Engemann`_ and `Sheraz Khan`_ + +- Add option to improve rendering in :ref:`mne coreg` for modern graphics cards by `Eric Larson`_ + +- Add ``mne.preprocessing.mark_flat`` to automate marking of flat channels and segments of raw data by `Eric Larson`_ + +- Add support for CUDA-based correlation computations and progress bars in :class:`mne.decoding.ReceptiveField` by `Eric Larson`_ + +- Add support for file-like objects in :func:`mne.io.read_raw_fif` as long as preloading is used by `Eric Larson`_ + +- Add keyboard shortcuts to nativate volume source estimates in time using (shift+)left/right arrow keys by `Mainak Jas`_ + +- Add option to SSP preprocessing functions (e.g., :func:`mne.preprocessing.compute_proj_eog` and :func:`mne.compute_proj_epochs`) to process MEG channels jointly with ``meg='combined'`` by `Eric Larson`_ + +- Add Epoch selection and metadata functionality to :class:`mne.time_frequency.EpochsTFR` using new mixin class by `Keith Doelling`_ + +- Add ``reject_by_annotation`` argument to :func:`mne.preprocessing.find_ecg_events` by `Eric Larson`_ + +- Add ``pca`` argument to return the rank-reduced whitener in :func:`mne.cov.compute_whitener` by `Eric Larson`_ + +- Add ``extrapolate`` argument to :func:`mne.viz.plot_topomap` for better control of extrapolation points placement by `Mikołaj Magnuski`_ + +- Add ``channel_wise`` argument to :func:`mne.io.Raw.apply_function` to allow applying a function on multiple channels at once by `Hubert Banville`_ + +- Add option ``copy='auto'`` to control data copying in :class:`mne.io.RawArray` by `Eric Larson`_ + +- The ``mri`` parameter in :func:`mne.setup_volume_source_space` is now automatically set to ``T1.mgz`` if ``subject`` is provided. This allows to get a :class:`mne.SourceSpaces` of kind ``volume`` more automatically. By `Alex Gramfort`_ + +- Add better ``__repr__`` for constants, and :class:`info['dig'] ` entries via ``DigPoint`` by `Eric Larson`_ + +- Allow string argument in :meth:`mne.io.Raw.drop_channels` to remove a single channel by `Clemens Brunner`_ + +- Add additional depth weighting options for inverse solvers (e.g., :func:`mne.inverse_sparse.gamma_map` and :func:`mne.inverse_sparse.mixed_norm`) by `Eric Larson`_ + +- Add depth weighting to LCMV beamformers via ``depth`` argument in :func:`mne.beamformer.make_lcmv` by `Eric Larson`_ + +- Allow toggling of DC removal in :meth:`mne.io.Raw.plot` by pressing the 'd' key by `Clemens Brunner`_ + +- Improved clicking in :meth:`mne.io.Raw.plot` (left click on trace toggles bad, left click on background sets green line, right click anywhere removes green line) by `Clemens Brunner`_ + +- Add ``mne.realtime.LSLClient`` for realtime data acquisition with LSL streams of data by `Teon Brooks`_ and `Mainak Jas`_ + +- Add partial support for PyVista as a 3D backend using :func:`mne.viz.use_3d_backend` by `Guillaume Favelier`_ + +- Add option ``ids = None`` in :func:`mne.event.shift_time_events` for considering all events by `Nikolas Chalas`_ and `Joan Massich`_ + +- Add ``mne.realtime.MockLSLStream`` to simulate an LSL stream for testing and examples by `Teon Brooks`_ + +- Add support for file-like objects in :func:`mne.read_epochs` as long as preloading is used by `Paul Roujansky`_ + +Bug +~~~ + +- Fix annotations in split fif files :func:`mne.io.read_raw_fif` by `Joan Massich`_ + +- Fix :meth:`mne.Epochs.plot` with ``scalings='auto'`` to properly compute channel-wise scalings by `Stefan Repplinger`_ + +- Fix :func:`mne.gui.coregistration` and :ref:`mne coreg` crashing with segmentation fault when switching subjects by `Eric Larson`_ + +- Fix :func:`mne.io.read_raw_brainvision` to accommodate vmrk files which do not have any annotations by Alexander Kovrig + +- Fix :meth:`mne.io.Raw.plot` and :meth:`mne.Epochs.plot` to auto-scale ``misc`` channel types by default by `Eric Larson`_ + +- Fix filtering functions (e.g., :meth:`mne.io.Raw.filter`) to properly take into account the two elements in ``n_pad`` parameter by `Bruno Nicenboim`_ + +- Fix ``feature_names`` parameter change after fitting in :class:`mne.decoding.ReceptiveField` by `Jean-Rémi King`_ + +- Fix index error in :func:`mne.io.read_raw_cnt` when creating stim_channel manually by `Joan Massich`_ + +- Fix bug with ``weight_norm='unit-gain'`` in :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` by `Britta Westner`_ + +- Fix 32bits annotations in :func:`mne.io.read_raw_cnt` by `Joan Massich`_ + +- Fix :func:`mne.events_from_annotations` to ignore ``'BAD_'`` and ``'EDGE_'`` annotations by default using a new default ``regexp`` by `Eric Larson`_ + +- Fix bug in ``mne.preprocessing.mark_flat`` where ``raw.first_samp`` was not taken into account by `Evgenii Kalenkovich`_ + +- Fix date parsing in :func:`mne.io.read_raw_cnt` by `Joan Massich`_ + +- Fix topological checks and error messages for BEM surfaces in :func:`mne.make_bem_model` by `Eric Larson`_ + +- Fix default HTML language of :class:`mne.Report` to be ``"en-us"`` instead of ``"fr"`` and allow setting via ``report.lang`` property by `Eric Larson`_ + +- Fix bug where loading epochs with ``preload=True`` and subsequently using :meth:`mne.Epochs.drop_bad` with new ``reject`` or ``flat`` entries leads to improper data (and ``epochs.selection``) since v0.16.0 by `Eric Larson`_. + If your code uses ``Epochs(..., preload=True).drop_bad(reject=..., flat=...)``, we recommend regenerating these data. + +- Fix :ref:`mne flash_bem` to properly utilize ``flash30`` images when conversion from DICOM images is used, and to properly deal with non-standard acquisition affines by `Eric Larson`_ + +- Fix :meth:`mne.io.Raw.set_annotations` with ``annotations=None`` to create an empty annotations object with ``orig_time`` that matches the :class:`mne.io.Raw` instance by `Eric Larson`_ + +- Fix :func:`mne.io.read_raw_edf` returning all the annotations with the same name in GDF files by `Joan Massich`_ + +- Fix boundaries during plotting of raw data with :func:`mne.io.Raw.plot` and :ref:`mne browse_raw` on scaled displays (e.g., macOS with HiDPI/Retina screens) by `Clemens Brunner`_ + +- Fix bug where filtering was not performed with ``lowpass`` or ``highpass`` in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` by `Eric Larson`_ + +- Fix :func:`mne.simulation.simulate_evoked` that was failing to simulate the noise with heterogeneous sensor types due to poor conditioning of the noise covariance and make sure the projections from the noise covariance are taken into account `Alex Gramfort`_ + +- Fix checking of ``data`` dimensionality in :class:`mne.SourceEstimate` and related constructors by `Eric Larson`_ + +- Fix :meth:`mne.io.Raw.append` annotations miss-alignment by `Joan Massich`_ + +- Fix hash bug in the ``mne.io.edf`` module when installing on Windows by `Eric Larson`_ + +- Fix :func:`mne.io.read_raw_edf` reading duplicate channel names by `Larry Eisenman`_ + +- Fix :func:`set_bipolar_reference` in the case of generating all bipolar combinations and also in the case of repeated channels in both lists (anode and cathode) by `Cristóbal Moënne-Loccoz`_ + +- Fix missing code for computing the median when ``method='median'`` in :meth:`mne.Epochs.average` by `Cristóbal Moënne-Loccoz`_ + +- Fix CTF helmet plotting in :func:`mne.viz.plot_evoked_field` by `Eric Larson`_ + +- Fix saving of rejection parameters in :meth:`mne.Epochs.save` by `Eric Larson`_ + +- Fix orientations returned by :func:`mne.dipole.get_phantom_dipoles` (half were flipped 180 degrees) by `Eric Larson`_ + +- Fix bug in :func:`mne.viz.plot_compare_evokeds` when ``evoked.times[0] >= 0`` would cause a problem with ``vlines='auto'`` mode by `Eric Larson`_ + +- Fix path bugs in :func:`mne.bem.make_flash_bem` and :ref:`mne flash_bem` by `Eric Larson`_ + +- Fix :meth:`mne.time_frequency.AverageTFR.plot_joint` mishandling bad channels, by `David Haslacher`_ and `Jona Sassenhagen`_ + +- Fix :func:`mne.beamformer.make_lcmv` failing when full rank data is used (i.e., when no projection is done) with ``reg=0.``, by `Eric Larson`_ + +- Fix issue with bad channels ignored in :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` by `Alex Gramfort`_ + +- Fix :func:`mne.compute_proj_raw` when ``duration != None`` not to apply existing proj and to avoid using duplicate raw data samples by `Eric Larson`_ + +- Fix ``reject_by_annotation`` not being passed internally by :func:`mne.preprocessing.create_ecg_epochs` and :ref:`mne clean_eog_ecg` to :func:`mne.preprocessing.find_ecg_events` by `Eric Larson`_ + +- Fix :func:`mne.io.read_raw_edf` failing when EDF header fields (such as patient name) contained special characters, by `Clemens Brunner`_ + +- Fix :func:`mne.io.read_raw_eeglab` incorrectly parsing event durations by `Clemens Brunner`_ + +- Fix :func:`mne.io.read_raw_egi` when cropping non-preloaded EGI MFF data by `Alex Gramfort`_ + +- Fix :meth:`mne.io.Raw.interpolate_bads` for interpolating CTF MEG channels when reference sensors are present by `jeythekey`_ + +- Fix a bug in :meth:`mne.io.Raw.resample`, where resampling events could result in indices > n_times-1, by `jeythekey`_ + +- Fix :meth:`mne.preprocessing.ICA.score_sources` to use the ``sfreq`` of the raw data to filter rather than the ``sfreq`` when the ICA was fit, by `jeythekey`_ + +- Fix a bug in :class:`mne.preprocessing.ICA`, where manually setting the attribute ``ICA.exclude`` to an np.array resulted in the removal of random components when later also providing the ``exclude`` argument to any ``apply...``-method, by `jeythekey`_ + +- Ascending changed to descending sorting of scores for integer ``..._criterion`` arguments in ``mne.preprocessing.ICA.detect_artifacts`` and ``mne.preprocessing.run_ica``, as it used to be documented; the docstring in these functions was corrected for float ``..._criterion`` arguments, by `jeythekey`_ + +API +~~~ + +- Deprecate ``cov, iir_params, blink, ecg, chpi, random_state`` and support for :class:`mne.io.Raw` instance inputs in :func:`mne.simulation.simulate_raw`; use :func:`mne.simulation.add_noise`, :func:`mne.simulation.add_ecg`, :func:`mne.simulation.add_eog`, and :func:`mne.simulation.add_chpi` by `Eric Larson`_ + +- Add ``overwrite`` parameter in :func:`mne.Epochs.save` by `Katarina Slama`_ + +- Add ``stim_channel`` parameter in :func:`mne.io.read_raw_cnt` to toggle stim channel synthesis by `Joan Massich`_ + +- Python 2 is no longer supported; MNE-Python now requires Python 3.5+, by `Eric Larson`_ + +- A new class :class:`mne.VolVectorSourceEstimate` is returned by :func:`mne.minimum_norm.apply_inverse` (and related functions) when a volume source space and ``pick_ori='vector'`` is used, by `Eric Larson`_ + +- Converting a forward solution with a volume source space to fixed orientation using :func:`mne.convert_forward_solution` now raises an error, by `Eric Larson`_ + +- ``raw.estimate_rank`` has been deprecated and will be removed in 0.19 in favor of :func:`mne.compute_rank` by `Eric Larson`_ + +- :class:`Annotations` are now kept sorted (by onset time) during instantiation and :meth:`~Annotations.append` operations, by `Eric Larson`_ + +- Deprecate ``mne.io.find_edf_events`` by `Joan Massich`_ + +- Deprecate ``limit_depth_chs`` in :func:`mne.minimum_norm.make_inverse_operator` in favor of ``depth=dict(limit_depth_chs=...)`` by `Eric Larson`_ + +- Reading BDF and GDF files with :func:`mne.io.read_raw_edf` is deprecated and replaced by :func:`mne.io.read_raw_bdf` and :func:`mne.io.read_raw_gdf`, by `Clemens Brunner`_ + +- :func:`mne.forward.compute_depth_prior` has been reworked to operate directly on :class:`Forward` instance as ``forward`` rather than a representation scattered across the parameters ``G, is_fixed_ori, patch_info``, by `Eric Larson`_ + +- Deprecate ``method='extended-infomax'`` in :class:`mne.preprocessing.ICA`; Extended Infomax can now be computed with ``method='infomax'`` and ``fit_params=dict(extended=True)`` by `Clemens Brunner`_ + +- Fix support for supplying ``extrapolate`` via :meth:`ica.plot_properties(..., topomap_args=dict(extrapolate=...)) ` by `Sebastian Castano`_ + +- The peak finder that was formerly accessible via ``from mne.preprocessing.peak_finder import peak_finder`` should now be imported directly from the enclosing namespace as ``from mne.preprocessing import peak_finder`` by `Eric Larson`_ + +- Deprecate ``mne.realtime`` module to make a standalone module ``mne-realtime`` that will live outside of this package by `Teon Brooks`_ diff --git a/doc/changes/v0.19.rst b/doc/changes/v0.19.rst new file mode 100644 index 00000000000..5e34210868a --- /dev/null +++ b/doc/changes/v0.19.rst @@ -0,0 +1,232 @@ +.. _changes_0_19: + +Version 0.19 (2019-09-24) +------------------------- + +Changelog +~~~~~~~~~ + +- Add :func:`mne.cuda.set_cuda_device` and config variable ``MNE_CUDA_DEVICE`` to select among multiple GPUs (by numeric device ID) by `Daniel McCloy`_. + +- Add :func:`mne.channels.make_standard_montage` to create :class:`mne.channels.DigMontage` from templates by `Joan Massich`_ and `Alex Gramfort`_. + +- Add :func:`mne.channels.compute_dev_head_t` to compute Device-to-Head transformation from a montage by `Joan Massich`_ and `Alex Gramfort`_. + +- Add :func:`mne.channels.read_dig_fif` to read digitization coordinates from ``.fif`` files by `Joan Massich`_ and `Alex Gramfort`_. + +- Add :func:`mne.channels.read_dig_egi` to read digitization coordinates from EGI ``.xml`` files by `Joan Massich`_ and `Alex Gramfort`_. + +- Add :func:`mne.channels.read_dig_polhemus_isotrak` and :func:`mne.channels.read_polhemus_fastscan` to read Polhemus data by `Joan Massich`_ + +- Add ``mne.channels.read_dig_captrack`` to read BrainVision CapTrak (BVCT) digitization coordinate files by `Stefan Appelhoff`_ and `Joan Massich`_ + +- Add :func:`mne.channels.make_dig_montage` to create :class:`mne.channels.DigMontage` objects out of np.arrays by `Joan Massich`_ + +- Add :func:`mne.channels.read_custom_montage` to read various EEG electrode locations files by `Joan Massich`_ and `Alex Gramfort`_. + +- Add support for making epochs with duplicated events, by allowing three policies: "error" (default), "drop", or "merge" in :class:`mne.Epochs` by `Stefan Appelhoff`_ + +- Allow :meth:`mne.Annotations.crop` to support negative ``tmin`` and ``tmax`` by `Joan Massich`_ + +- Unknown events code in GDF are now visible in the ``event_id`` by `Théodore Papadopoulo`_ + +- Now :func:`mne.io.read_raw_ctf` populates ``raw.annotations`` with the markers in ``MarkerFile.mrk`` if any by `Joan Massich`_ + +- Add options for controlling the use of the ``-T1`` flag and the location of the brainmask output in :ref:`mne watershed_bem` by `Eric Larson`_ + +- Add support to :func:`mne.read_annotations` to read CTF marker files by `Joan Massich`_ + +- Do not convert effective number of averages (``nave`` attribute of :class:`mne.Evoked`) to integer except when saving to FIFF file by `Daniel McCloy`_. + +- Add automatic fiducial position estimation in :ref:`mne coreg` using MNI Talairach fiducial locations in :func:`mne.coreg.get_mni_fiducials` by `Jon Houck`_ and `Eric Larson`_ + +- Add support for :ref:`mne coreg` scaling surrogate subjects without surface reconstructions, such as those created for volumetric analyses only (e.g., with ``recon-all -autorecon1``) by `Eric Larson`_ + +- Add reader for Curry data in :func:`mne.io.read_raw_curry` by `Dirk Gütlin`_ + +- Butterfly channel plots now possible for :meth:`mne.Epochs.plot_psd` with ``average=False``. Infrastructure for this function now shared with analogous Raw function, found in ``mne.viz.utils`` by `Jevri Hanna`_ + +- Add option not to orthogonalize power envelopes with ``orthogonalize=False`` in ``mne.connectivity.envelope_correlation`` by `Denis Engemann`_ + +- Accept filenames of raw .fif files that end in ``_meg.fif`` to enable complicance with the Brain Imaging Data Structure by `Stefan Appelhoff`_ + +- Add function to check the type of a FIF file using :func:`mne.what` and :ref:`mne what` by `Eric Larson`_ + +- Add support for specifying the initial time and/or position and providing a :class:`mne.SourceMorph` instead of :class:`mne.SourceSpaces` in :func:`mne.viz.plot_volume_source_estimates` by `Eric Larson`_ + +- Speed up morph map generation in :func:`mne.read_morph_map` by ~5-10x by using :func:`numba.jit` by `Eric Larson`_ + +- Speed up :func:`mne.setup_volume_source_space`, especially when ``volume_label is not None`` by `Eric Larson`_ + +- Speed up :ref:`mne coreg` interactive and automated (ICP) alignment by using nearest-neighbor calculations in the MRI coordinate frame, by `Eric Larson`_ + +- Add :func:`mne.dig_mri_distances` to compute the distances between digitized head points and the MRI head surface by `Alex Gramfort`_ and `Eric Larson`_ + +- Add scale bars for data channels in :func:`mne.io.Raw.plot` by `Eric Larson`_ + +- Add :func:`mne.viz.plot_brain_colorbar` to plot a colorbar appropriately matched to a :func:`mne.viz.plot_source_estimates` plot by `Eric Larson`_ + +- Add support for showing head surface (to visualize digitization fit) while showing a single-layer BEM to :func:`mne.viz.plot_alignment` by `Eric Larson`_ + +- Add option ``include_tmax=True`` to cropping methods :meth:`mne.io.Raw.crop`, :meth:`mne.Epochs.crop`, :meth:`mne.Evoked.crop`, :meth:`mne.SourceEstimate.crop`, :meth:`mne.Dipole.crop`, and :meth:`mne.time_frequency.AverageTFR.crop` by `Eric Larson`_ + +- Change the behavior of :meth:`mne.io.Raw.plot` for ``scalings='auto'`` and ``remove_dc=True`` to compute the scalings on the data with DC removed by `Clemens Brunner`_ + +- Allow creating annotations within existing annotations in :func:`mne.io.Raw.plot` by default (the old snapping behavior can be toggled by pressing 'p') by `Clemens Brunner`_ + +- Add plotting of rank estimates in :func:`mne.viz.plot_cov` by `Eric Larson`_ + +- Add a new ``mne.viz.plot_sensors_connectivity`` function to visualize the sensor connectivity in 3D by `Guillaume Favelier`_ and `Alex Gramfort`_ + +- Add support for ``info['utc_offset']``, ``info['device_info']``, and ``info['helium_info']`` components of :class:`mne.Info` by `Eric Larson`_ + +- Add control over dipole colors in :func:`mne.viz.plot_dipole_locations` when using orthoview mode by `Eric Larson`_ + +- Use second-order-sections filtering in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` by `Eric Larson`_ + +- Add re-referencing functionality for ecog and seeg channel types in :func:`mne.set_eeg_reference` by `Keith Doelling`_ + +- Add support for median averaging and for returning unaggregated segments in ``mne.time_frequency.psd_welch`` by `Richard Höchenberger`_ + +- :func:`io.read_raw_kit`: add support for NYU New York 2019 system update, by `Christian Brodbeck`_ + + +Bug +~~~ + +- Fix setting montage eeg ref position for captrak by `Joan Massich`_ + +- Fix saving raw read from BDF file using ``tmin`` and ``tmax`` using ``preload=False`` by `Alex Gramfort`_ + +- Fix :func:`mne.grand_average` to use equal sum-to-one weights (like it used to, before changes to underlying :func:`mne.combine_evoked`) by `Daniel McCloy`_ + +- Fix :meth:`mne.io.Raw.filter` to deal with instances with no data channels properly by `Eric Larson`_ + +- Fix one-sample baseline issue in :class:`mne.BaseEpochs` when using ``tmin=0`` by `Milan Rybář`_ + +- Fix bug in :func:`mne.viz.plot_volume_source_estimates` where ``'glass_brain'`` MRIs were not transformed to MNI space, by `Eric Larson`_ + +- Fix bug in :func:`mne.viz.plot_volume_source_estimates` where MRIs with voxels not in RAS orientation could not be browsed properly, by `Eric Larson`_ + +- Fix bug in :meth:`mne.SourceMorph.apply` where output STCs had ``stc.vertices`` defined improperly, by `Eric Larson`_ + +- Fix bug in :meth:`mne.SourceMorph.apply` where the default was errantly ``mri_space=False`` instead of ``mri_space=None`` (as documented), by `Eric Larson`_ + +- Fix ``mne.VolVectorSourceEstimate.normal`` and :func:`mne.minimum_norm.apply_inverse` to only allow normal extraction/orientation for surface and discrete source spaces by `Eric Larson`_ + +- Fix :meth:`mne.io.Raw.set_annotations` for ``meas_date`` previous to 1970 by `Joan Massich`_ + +- Fix horizontal spacing issues in :meth:`mne.io.Raw.plot_psd` by `Jevri Hanna`_ + +- Fix reading of dates in BrainVision files if no "New Segment" marker is specified, no date is given, or data is missing, by `Stefan Appelhoff`_ + +- Fix bug with reading one-channel GDF files by `Abram Hindle`_ + +- Fix bug with y-axis labeling in :meth:`mne.io.Raw.plot_psd` by `Eric Larson`_ + +- Fix side-effect where :func:`mne.viz.plot_ica_sources` and :meth:`mne.preprocessing.ICA.plot_sources` changed the ``ICA.exclude`` attribute even when users didn't interact with the plot by `Daniel McCloy`_. + +- Fix scaling of sources in :meth:`ica.plot_sources(epochs) ` by `Eric Larson`_ + +- Fix wrong assumptions about units in BrainVision montages and add test asserting units in "mm" or "auto", by `Stefan Appelhoff`_ + +- Fix scaling issue with signals in mV in EDF files read with :func:`mne.io.read_raw_edf` by `Alex Gramfort`_ + +- Fix :func:`mne.extract_label_time_course` behavior when using STCs generated with ``apply_inverse(..., label=label)`` or ``stc.in_label`` by `Eric Larson`_ + +- Fix bug in :func:`mne.io.read_raw_brainvision` so that recording date timestamps are also recognized if channel reference data is negative, by `Stefan Appelhoff`_ + +- Fix order of ``info['dig']`` that was alphabetical based on channel names and not following the channel order when using :meth:`mne.io.Raw.set_montage` and a ``mne.channels.Montage`` object by `Joan Massich`_ and `Alex Gramfort`_. + +- Fix reading CNT files larger than 2Gb by `Joan Massich`_ + +- Fix reading of 4D/BTi data with different channel names and onsets in :func:`mne.io.read_raw_bti` by `Joshua Bear`_ and `Eberhard Eich`_ + +- Fix formula for effective number of averages in :func:`mne.combine_evoked` when ``weights='equal'`` by `Daniel McCloy`_. + +- Fix bug in :func:`mne.simulation.simulate_stc` to avoid empty stc if label vertices and source space do not intersect, by `Kostiantyn Maksymenko`_ + +- Fix ``event_id='auto'`` in :func:`mne.events_from_annotations` to recover Brainvision markers after saving it in ``.fif`` by `Joan Massich`_ + +- Fix :func:`mne.read_epochs_eeglab` when epochs are stored as float. By `Thomas Radman`_ + +- Fix :func:`mne.Evoked.resample` and :func:`mne.Epochs.resample` not setting ``inst.info['lowpass']`` properly by `Eric Larson`_ + +- Fix checks when constructing volumetric and surface source spaces with :func:`mne.setup_volume_source_space` and :func:`mne.setup_source_space`, respectively, by `Eric Larson`_ + +- Fix bug in handling of :class:`mne.Evoked` types that were not produced by MNE-Python (e.g., alternating average) by `Eric Larson`_ + +- Fix bug in :func:`mne.read_source_estimate` where vector volumetric source estimates could not be read by `Eric Larson`_ + +- Fix bug in :func:`mne.inverse_sparse.mixed_norm` and :func:`mne.inverse_sparse.tf_mixed_norm` where ``weights`` was supplied but ``weights_min`` was not, by `Eric Larson`_ + +- Fix bug in :func:`mne.set_eeg_reference` where non-EEG channels could be re-referenced by default if there were no EEG channels present, by `Eric Larson`_ + +- Fix bug in :func:`mne.io.Raw.plot` when using HiDPI displays and the MacOSX backend of matplotlib by `Eric Larson`_ + +- Fix bug in :func:`mne.viz.plot_compare_evokeds` when using Neuromag 122 system by `Eric Larson`_ + +- Fix bug in :func:`mne.Epochs.plot_psd` when some channels had zero/infinite ``psd`` values causing erroneous error messages by `Luke Bloy`_ + +- Fix :func:`mne.Evoked.decimate` not setting ``inst.first`` and ``inst.last`` properly by `Marijn van Vliet`_ + +- Fix :func:`mne.io.read_raw_brainvision` not handling ``Event`` markers created by PyCorder correctly by `Richard Höchenberger`_ + +- Fix support for string-like objects (such as :class:`python:pathlib.Path`) by `Eric Larson`_ + +- Fix :class:`mne.Report` silently suppressing exceptions when used as a context manager by `Marijn van Vliet`_ + +API +~~~ + +- Deprecate ``mne.channels.Montage`` class, ``mne.channels.read_montage`` and ``mne.channels.read_dig_montage`` function by `Joan Massich`_. + +- Deprecate passing ``Montage``, ``str`` as montage parameter in :meth:`mne.io.Raw.set_montage` by `Joan Massich`_. + +- Deprecate ``set_dig`` parameter in :meth:`mne.io.Raw.set_montage` and ``update_ch_names`` in ``mne.io.RawEEGLAB.set_montage`` when using :class:`mne.channels.DigMontage` as by `Joan Massich`_. + +- Now :meth:`mne.io.Raw.set_montage` raises an error when :class:`mne.channels.DigMontage` contains only a subset of the channels in ``raw.info``. It also adds ``raise_if_subset`` parameter to ensure backward compatibility (defaults to False in 0.19, to True in 0.20, and will be removed in 0.21) by `Joan Massich`_. + +- Minimum dependency versions for the following libraries have been bumped up (by `Eric Larson`_): + + - NumPy: 1.12.1 + - SciPy: 0.18.1 + - matplotlib: 2.0.2 + - scikit-learn: 0.18.2 + - pandas 0.19.2 + +- New boolean parameter ``show_scrollbars`` for :meth:`mne.io.Raw.plot`, :meth:`mne.Epochs.plot`, and :meth:`mne.preprocessing.ICA.plot_sources` (and associated functions) that allows hiding the scrollbars and buttons for a "zen mode" data browsing experience. When the plot window has focus, zen mode can be toggled by pressing :kbd:`z`, by `Daniel McCloy`_. + +- Deprecate ``mne.evoked.grand_average`` in favor of :func:`mne.grand_average` (which works on both :class:`~mne.Evoked` and :class:`~mne.time_frequency.AverageTFR`) by `Daniel McCloy`_ + +- Deprecate ``exclude`` parameter in :func:`mne.viz.plot_ica_sources` and :meth:`mne.preprocessing.ICA.plot_sources`, instead always use the ``exclude`` attribute of the ICA object by `Daniel McCloy`_. + +- Deprecate ``montage`` parameter in favor of the ``set_montage`` method in all EEG data readers :func:`mne.io.read_raw_cnt`, :func:`mne.io.read_raw_egi`, :func:`mne.io.read_raw_edf`, :func:`mne.io.read_raw_gdf`, :func:`mne.io.read_raw_nicolet`, :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` by `Alex Gramfort`_ + +- New parameter ``clear`` in :func:`mne.viz.plot_epochs_image` for clearing pre-existing axes before plotting into them by `Daniel McCloy`_ + +- :func:`mne.viz.plot_epochs_image` no longer supports ``group_by='type'`` — combining by channel type is now the default when ``picks`` is a channel type string; to get individual plots for each channel, pass ``picks`` as a list of channel names or indices by `Daniel McCloy`_ + +- New parameter ``combine`` in :func:`mne.viz.plot_compare_evokeds` for specifying method to combine information across channels by `Daniel McCloy`_ + +- FIFF constants related to SmartShield (``*_SMSH_*`` and ``*_SMARTSHIELD``) have been renamed to ``IAS`` for consistency with MEGIN, by `Eric Larson`_ + +- The ``gfp`` parameter of :func:`mne.viz.plot_compare_evokeds` is deprecated; use ``combine='gfp'`` instead by `Daniel McCloy`_ + +- The ``truncate_yaxis='max_ticks'`` parameter of :func:`mne.viz.plot_compare_evokeds` is deprecated; use ``truncate_yaxis='auto'`` instead by `Daniel McCloy`_ + +- The ``truncate_xaxis`` and ``truncate_yaxis`` parameters of :func:`mne.viz.plot_compare_evokeds` now perform one-sided truncation unless both are ``True`` by `Daniel McCloy`_ + +- The ``show_legend`` parameter of :func:`mne.viz.plot_compare_evokeds` is renamed to ``legend`` by `Daniel McCloy`_ + +- :func:`mne.viz.plot_compare_evokeds` always returns a list of figures even when a single figure is generated by `Daniel McCloy`_ + +- Deprecate ``average=True`` and ``spatial_colors=False`` for :func:`mne.Epochs.plot_psd` by `Jevri Hanna`_ + +- :func:`mne.io.read_raw_brainvision` no longer raises an error when there are inconsistencies between ``info['chs']`` and ``montage`` but warns instead by `Joan Massich`_ + +- Add ``update_ch_names`` parameter to ``mne.io.RawEEGLAB.set_montage`` to allow updating the channel names based on the montage by `Joan Massich`_ + +- Reading annotations contained in GDF files with :func:`mne.io.read_raw_gdf` now returns numeric event codes as descriptions (instead of textual descriptions) due to restrictive licensing of the GDF event code table from BioSig by `Clemens Brunner`_ + +- ``channels.find_ch_connectivity`` now returns pre-built neighbor maps for KIT systems when available, by `Christian Brodbeck`_ diff --git a/doc/changes/0.2.inc b/doc/changes/v0.2.rst similarity index 100% rename from doc/changes/0.2.inc rename to doc/changes/v0.2.rst diff --git a/doc/changes/v0.20.rst b/doc/changes/v0.20.rst new file mode 100644 index 00000000000..eb191834ea2 --- /dev/null +++ b/doc/changes/v0.20.rst @@ -0,0 +1,407 @@ +.. _changes_0_20: + +Version 0.20 (2020-03-27) +------------------------- + +Changelog +~~~~~~~~~ + +- Improved :func:`mne.viz.plot_epochs` to label epoch counts starting from 0, by `Sophie Herbst`_ + +- Add :func:`minimum_norm.apply_inverse_cov` to compute static power by applying inverse solutions to a data covariance matrix by `Denis Engemann`_, `Luke Bloy`_, and `Eric Larson`_ + +- Add :func:`mne.minimum_norm.resolution_metrics` to compute various resolution metrics for inverse solutions, by `Olaf Hauk`_ + +- Add current source density :func:`mne.preprocessing.compute_current_source_density` to compute the surface Laplacian in order to reduce volume conduction in data by `Alex Rockhill`_ + +- Add :func:`mne.chpi.extract_chpi_locs_ctf` to extract cHPI coil locations from CTF data by `Luke Bloy`_ + +- Add :func:`mne.chpi.compute_chpi_amplitudes`, :func:`mne.chpi.compute_chpi_locs`, and :func:`mne.chpi.compute_head_pos` to compute head positions from cHPI coil locations by `Eric Larson`_ and `Luke Bloy`_ + +- Add ``allow_line_only`` option to :func:`mne.chpi.filter_chpi` to allow filtering line frequencies only in files that do not have cHPI information by `Eric Larson`_ + +- Add :func:`mne.io.Raw.set_meas_date` by `Eric Larson`_ + +- Add :meth:`mne.Epochs.as_type` to allow remapping data in MEG channels to virtual magnetometer or gradiometer channels by `Sophie Herbst`_ and `Alex Gramfort`_ + +- Add ``copy`` parameter to :meth:`mne.Epochs.iter_evoked` by `Alex Gramfort`_ + +- Add command :ref:`mne setup_source_space` to quickly set up bilateral hemisphere surface-based source space with subsampling by `Victor Ferat`_. + +- Add command :ref:`mne sys_info` to print system information by `Eric Larson`_ + +- Add function :func:`mne.make_fixed_length_epochs` to segment raw into fixed length epochs by `Mohammad Daneshzand`_ + +- Add support for computing patch information only in surface source space creation with ``add_dist='patch'`` in :func:`mne.setup_source_space` and ``dist_limit=0`` in :func:`mne.add_source_space_distances` by `Eric Larson`_ + +- Add :class:`mne.Label.restrict` to restrict a label to vertices within a source space by `Eric Larson`_ + +- Add support for passing a destination source space ``src_to`` in :func:`mne.compute_source_morph` to ensure morphing for multiple subjects results in consistent STCs, by `Eric Larson`_ + +- Add support for ``smooth='nearest'`` for surfaces in :func:`mne.compute_source_morph` by `Eric Larson`_ + +- Add support for plotting fNIRS channels in :func:`mne.viz.plot_alignment`, :func:`mne.viz.plot_evoked_topo` and :func:`mne.Evoked.animate_topomap` by `Eric Larson`_ and `Robert Luke`_ + +- Add command line tool :ref:`mne anonymize` for anonymizing raw fiff files by `Luke Bloy`_ + +- Add :func:`mne.preprocessing.find_bad_channels_maxwell` for automatic bad channel detection via Maxwell filtering by `Eric Larson`_ + +- Add support to :func:`mne.io.anonymize_info` to allow time offset to be applied to dates by `Luke Bloy`_ + +- Add support for computing resolution matrix to get point spread functions (PSF) and cross-talk functions (CTF) in :func:`mne.minimum_norm.make_inverse_resolution_matrix`, :func:`mne.beamformer.make_lcmv_resolution_matrix`, :func:`mne.minimum_norm.get_cross_talk`, :func:`mne.minimum_norm.get_point_spread` by `Olaf Hauk`_ + +- Add :func:`mne.preprocessing.read_ica_eeglab` to read EEGLAB ICA decompositions by `Christian O'Reilly`_ + +- Add keyboard functionality to interactive colorbar plotting TFRs by `Stefan Repplinger`_ + +- Add the ability to ``return_event_id`` in :func:`mne.read_events` for use with MNE-C produced ``-annot.fif`` files, by `Eric Larson`_ + +- Add option to ``mne.connectivity.spectral_connectivity`` to compute corrected imaginary PLV by `Adonay Nunes`_ + +- Add :func:`mne.SourceEstimate.estimate_snr` to estimate source-space SNR, by `Kaisu Lankinen`_ and `Padma Sundaram`_ + +- Add option to specify the coordinate frame in :func:`mne.channels.read_custom_montage` by `Eric Larson`_ + +- Add option to use ``match_case=False`` to :meth:`mne.io.Raw.set_montage` and related functions by `Eric Larson`_ + +- Add reader for NIRx data in :func:`mne.io.read_raw_nirx` by `Robert Luke`_ + +- Add function to convert NIRS data to optical density :func:`mne.preprocessing.nirs.optical_density` by `Robert Luke`_ + +- Add function to convert NIRS data to haemoglobin concentration :func:`mne.preprocessing.nirs.beer_lambert_law` by `Robert Luke`_ + +- Add ``fig`` argument to :func:`mne.viz.plot_evoked_field` by `Eric Larson`_ + +- Add functions to calculate spatial information of NIRS channels :func:`mne.preprocessing.nirs.source_detector_distances` and :func:`mne.preprocessing.nirs.short_channels` by `Robert Luke`_ + +- Add reader for ``*.dat`` electrode position files :func:`mne.channels.read_dig_dat` by `Christian Brodbeck`_ + +- Improved :func:`mne.viz.plot_events` to always show event counts by `Eric Larson`_ + +- Improved :ref:`limo-dataset` usage and :ref:`example ` for usage of :func:`mne.stats.linear_regression` by `Jose Alanis`_ + +- Add support for ``reduce_rank=True`` for vector beamformers by `Eric Larson`_ + +- Add ``method='sphere'`` support for using Freesurfer spherical inflation in :func:`mne.decimate_surface` to facilitate mesh downsampling for use with :func:`mne.make_bem_model` by `Eric Larson`_ + +- Speed up :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` calculations by vectorizing linear algebra calls by `Dmitrii Altukhov`_ and `Eric Larson`_ + +- Speed up :func:`mne.make_forward_solution` using Numba, by `Eric Larson`_ + +- Speed up :func:`mne.io.read_raw_fif` data reading when the recording is long and there are many data tags repeatedly accessed, by `Eric Larson`_ + +- For KIT systems without built-in layout, :func:`mne.channels.find_layout` now falls back on an automatically generated layout, by `Christian Brodbeck`_ + +- :meth:`mne.Epochs.plot` now takes a ``epochs_colors`` parameter to color specific epoch segments by `Mainak Jas`_ + +- Add command :ref:`mne setup_forward_model` to quickly create a BEM model for a subject by `Victor Ferat`_. + +- Add command :ref:`mne prepare_bem_model` to quickly create a BEM solution using the linear collocation approach by `Victor Ferat`_. + +- Add support for reading ``.bdip`` Xfit binary dipole fit files in :func:`mne.read_dipole` by `Eric Larson`_ + +- Allow returning vector source estimates from sparse inverse solvers through ``pick_ori='vector'`` by `Christian Brodbeck`_ + +- Add NIRS support to :func:`mne.viz.plot_topomap` and :func:`mne.viz.plot_ica_components` by `Robert Luke`_ + +- Add the ability to :func:`mne.channels.equalize_channels` to also re-order the channels and also operate on instances of :class:`mne.Info`, :class:`mne.Forward`, :class:`mne.Covariance` and :class:`mne.time_frequency.CrossSpectralDensity` by `Marijn van Vliet`_ + +- Allow `mne.channels.read_custom_montage` to handle fiducial points for BESA spherical (``.elp``) files by `Richard Höchenberger`_ + +- Add ``rank`` argument to :func:`mne.fit_dipole` by `Eric Larson`_ + +- Add function to convert events to annotations :func:`mne.annotations_from_events` by `Nicolas Barascud`_ + +- Add function to calculate scalp coupling index for fNIRS data :func:`mne.preprocessing.nirs.scalp_coupling_index` by `Robert Luke`_ + +- Add ``item`` argument to :meth:`mne.Epochs.get_data` for faster access to NumPy data arrays compared to :meth:`mne.Epochs.__getitem__` for frequent access on large :class:`mne.Epochs` objects, by `Eric Larson`_ + +- More accurate coordinate system for Easycap montages in :func:`mne.channels.make_standard_montage` by `Christian Brodbeck`_ + +- Add ``border`` argument to :func:`mne.viz.plot_topomap`. ``border`` controls the value of the edge points to which topomap values are extrapolated. ``border='mean'`` sets these points value to the average of their neighbours. By `Mikołaj Magnuski`_ + +- Add function :func:`mne.viz.link_brains` to link time properties of multiple brain objects interactively by `Guillaume Favelier`_ + +- Add function :func:`mne.preprocessing.annotate_movement` to annotate periods with head motion and :func:`mne.preprocessing.compute_average_dev_head_t` to re-estimate the device to head transform with average head position during segments with acceptable head movement. by `Adonay Nunes`_ + +- Make selected channels more distinguishable in :meth:`mne.Epochs.plot_sensors` when using ``kind='select'`` by `Mikołaj Magnuski`_ + +- Allow retrieval of the number of Infomax ICA iterations via the new ``return_n_iter`` keyword argument of :func:`mne.preprocessing.infomax` by `Richard Höchenberger`_ + +- Expose the number of ICA iterations during the fitting procedure via the ``n_iter_`` attribute of :class:`mne.preprocessing.ICA` by `Richard Höchenberger`_ + +- :func:`mne.grand_average` now produces a warning when only a single dataset was passed, instead of raising an error by `Richard Höchenberger`_ + +- Add ``nrows`` and ``ncols`` parameters to :func:`mne.viz.plot_evoked_topomap`, which allows to create multiline topomap plots for Evoked. By `Federico Raimondo`_ + +- Improve repr of :class:`mne.Info` (empty items are not shown anymore) by `Clemens Brunner`_ + +Bug +~~~ + +- Fix the ``reduce_rank`` parameter in :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` to reduce the rank of the leadfield first before then reducing the rank of the denominator of the beamformer formula for the inversion by `Britta Westner`_ and `Eric Larson`_. + +- Fix bug with :func:`mne.beamformer.make_lcmv` with data where rank was incorrectly checked by `Eric Larson`_. + +- Allow :func:`mne.channels.read_dig_hpts` to accept point categories (eg. EEG) to be specified in upper case by `Alex Gramfort`_. + +- Fix a bug in ``mne.MixedSourceEstimate.plot_surface`` that prevented plotting in latest PySurfer by `Christian O'Reilly`_. + +- Fix ``method='eLORETA'`` in :func:`mne.minimum_norm.apply_inverse`, by `Eric Larson`_: + + 1. regularization, which was off by roughly a factor of a square root + 2. units, which are now nAm instead of being arbitrary + 3. explained variance and residuals, which were previously unavailable + 4. ``force_equal`` in the case of loose orientations, which previously + defaulted to ``True`` now defaults to ``False``. + Reapplying orientation priors at each iteration of the algorithm + sufficiently preserves a smooth transition between loose=0. to loose=1. + without compromising localization accuracy in testing. + +- Fix bug in ``method='eLORETA'`` for :func:`mne.minimum_norm.apply_inverse` (and variants) to allow restricting source estimation to a label by `Luke Bloy`_ + +- Fix bug in :func:`mne.compute_covariance` and :func:`mne.compute_raw_covariance` where biased normalization (based on degrees of freedom) was used and ``cov.nfree`` was not set properly by `Eric Larson`_ + +- Fix ``mne.VectorSourceEstimate.normal`` to account for cortical patch statistics using ``use_cps=True`` by `Eric Larson`_ + +- Fix ``pick_ori='normal'`` for :func:`mne.minimum_norm.apply_inverse` when the inverse was computed with ``loose=1.`` and the forward solution was not in surface orientation, by `Eric Larson`_ + +- Fix missing xlabel for :func:`mne.io.Raw.plot_psd` and similar functions when passing a subset of axes from a figure by `Alex Gramfort`_ + +- Fix wrong DataFrame index when ``index=None`` in methods :meth:`mne.io.Raw.to_data_frame`, :meth:`mne.Epochs.to_data_frame`, :meth:`mne.Evoked.to_data_frame`, and :meth:`mne.SourceEstimate.to_data_frame`, by `Daniel McCloy`_. + +- Fix incorrect scaling of cluster temporal extent in :func:`mne.stats.summarize_clusters_stc` by `Daniel McCloy`_. + +- Fix :func:`mne.time_frequency.read_tfrs` to properly read :class:`~mne.Info` transforms by `Eric Larson`_ + +- Fix :func:`mne.viz.plot_sensors` to always plot in head coordinate frame by `Daniel McCloy`_. + +- Fix :func:`mne.io.read_raw_cnt` for version 1 of format in the presence of annotations by `Alex Gramfort`_. + +- Fix :class:`mne.decoding.GeneralizingEstimator` and related classes to support multi-class sklearn scorers such as ``'roc_auc_ovo'`` and ``'roc_auc_ovo_weighted'`` by `Eric Larson`_ + +- Fix :meth:`mne.io.read_raw_ctf` to set measurement date from CTF ds files by `Luke Bloy`_. + +- Fix :meth:`mne.read_epochs_eeglab` that ignored channel locations by `Alex Gramfort`_. + +- Fix :func:`mne.io.read_raw_brainvision` when channel names have spaces by `Sebastian Major`_. + +- Fix :func:`mne.io.read_raw_brainvision` when ``"Core"`` is in the data header by `Eric Larson`_ + +- Fix :meth:`mne.io.Raw.anonymize` to correctly reset ``raw.annotations.orig_time`` by `Luke Bloy`_. + +- Fix :meth:`mne.io.Raw.anonymize` to correctly avoid shifting ``raw.annotations.onset`` relative to ``raw.first_samp`` by `Eric Larson`_ + +- Fix :meth:`mne.io.Raw.set_channel_types` and :meth:`mne.io.Raw.rename_channels` and related methods to return the instance instead of ``None`` by `Eric Larson`_ + +- :meth:`mne.Epochs.iter_evoked` now does not return a copy of info when ``copy=False`` (default parameter) by `Alex Gramfort`_ + +- The attribute :class:`mne.Annotations.orig_time ` is now read-only, and is a :class:`~python:datetime.datetime` object (or None) rather than float, by `Eric Larson`_ + +- Fix bug where MRI distances were not properly initialized in :ref:`mne coreg` by `Eric Larson`_ + +- Fix ``xscale='log'`` in :meth:`mne.io.Raw.plot_psd` and related functions by `Alex Gramfort`_ + +- Unify behavior of ``raw.annotations.append(...)`` when ``raw.info['meas_date']`` is None to make onsets absolute relative to ``first_samp`` as they are when ``raw.info['meas_date']`` is not None; i.e., you might need to do ``raw.annotations.append(old_time + raw.first_time)``, by `Eric Larson`_ + +- The :class:`info['meas_date'] ` entry is now a :class:`~python:datetime.datetime` object (or None) rather than float, by `Eric Larson`_ + +- Fix :meth:`mne.time_frequency.AverageTFR.save` and related functions to deal with :class:`~python:datetime.datetime` objects properly by `Eric Larson`_ + +- Fix date reading before Unix time zero (1970 Jan 1) on Windows by `Alex Rockhill`_. + +- Fix bug running subprocesses (e.g., in :func:`mne.bem.make_watershed_bem`) in Jupyter notebooks, by `Eric Larson`_ + +- Fix :meth:`mne.io.read_raw_artemis123` to add HPI locations to digitization points, by `Luke Bloy`_ + +- Fix :meth:`mne.Epochs.shift_time` and :meth:`mne.Evoked.shift_time` to return the modified :class:`~mne.Epochs` or :class:`~mne.Evoked` instance (instead of ``None``) by `Daniel McCloy`_. + +- Fix :func:`mne.read_annotations` for text files with zero or one annotations, by `Adonay Nunes`_ + +- Fix bug in :class:`~mne.preprocessing.ICA` where requesting extended infomax via ``fit_params={'extended': True}`` was overridden, by `Daniel McCloy`_. + +- Fix bug in :meth:`mne.Epochs.interpolate_bads` where the ``origin`` was not used during MEG or EEG data interpolation by `Eric Larson`_. Old behavior can be achieved using ``origin=(0., 0., 0.)`` for EEG and ``origin=(0., 0., 0.04)`` for MEG, and the new default is ``origin='auto'``, which uses a head-digitization-based fit. + +- Fix bug in :func:`mne.write_evokeds` where ``evoked.nave`` was not saved properly when multiple :class:`~mne.Evoked` instances were written to a single file, by `Eric Larson`_ + +- Fix bug in ``mne.preprocessing.mark_flat`` where acquisition skips were not handled properly, by `Eric Larson`_ + +- Fix bug in :func:`mne.viz.plot_bem` where some sources were not plotted by `Jean-Rémi King`_ and `Eric Larson`_ + +- Fix TAL channel parsing (annotations) for EDF-D files by `Clemens Brunner`_ + +- Fix bug with :func:`mne.viz.plot_dipole_locations` when plotting in head coordinates by `Eric Larson`_ + +- Fix handling in cases where no components are found in :func:`mne.preprocessing.corrmap` by `Eric Larson`_ + +- Fix handling of repeated events in :class:`mne.Epochs` by `Fahimeh Mamashli`_ and `Alex Gramfort`_ + +- Fix many bugs with plotting sensors overlaid on a head outline. All plotting is now done in head coordinates and scaled by ``head_radius``, which defaults to 0.095, by `Eric Larson`_ + +- Fix :func:`mne.io.anonymize_info` to allow shifting dates of service and to match anticipated changes in mne-cpp by `Luke Bloy`_ + +- Fix reading of cardinals in .htps files (identifier are int not strings) by `Alex Gramfort`_ + +- Fix IO of TFRs when event_id contain a / in one of the keys by `Alex Gramfort`_ + +- Fix plotting of TFRs with linear y-scale (misaligned frequency bins), by `Stefan Repplinger`_ and `Eric Larson`_ + +- Fix ``info['sfreq']`` when decimating in :func:`mne.time_frequency.tfr_multitaper` and :func:`mne.time_frequency.tfr_morlet` and make sure an error is raised when exceed Nyquist frequency by `Adonay Nunes`_ + +- Fix bug in EDF(+) loading, filter values ignored by insufficient regex, by `Demetres Kostas`_ + +- Fix missing scaling of tolerance parameter in :func:`mne.inverse_sparse.tf_mixed_norm` and :func:`mne.inverse_sparse.mixed_norm`, by `Mathurin Massias`_ + +- Fix the automatic scaling of the glyphs in :func:`mne.viz.plot_vector_source_estimates` by using 10% of the brain width, by `Guillaume Favelier`_ + +- Fix handling of missing values (NaNs) in ``mne.time_frequency.psd_welch`` by `Clemens Brunner`_ + +- Fix :class:`mne.decoding.LinearModel` to support the refitted estimator of ``GridSearchCV`` in ``sklearn`` by `Chun-Hui Li`_ + +- Fix :func:`mne.viz.plot_topomap` when there are bad channels in the provided :class:`mne.Info` instance by `Eric Larson`_ + +- Fix check for subject definition existing in source space in :func:`mne.compute_source_morph` and related functions by `Eric Larson`_ + +- Fix creation of new figures in :meth:`mne.Epochs.plot_image` and :func:`mne.viz.plot_epochs_image` by `Eric Larson`_ + +- Fix bug in :class:`~mne.preprocessing.Xdawn` where filters where selected along the incorrect axis, by `Henrich Kolkhorst`_ + +- Fix bug in :func:`mne.bem.make_watershed_bem` where some surfaces were saved incorrectly in the working directory by `Yu-Han Luo`_ + +- Fix support for multiple TAL (annotations) channels in BDF reader by `Clemens Brunner`_ + +- Fix missing ``brainmask`` file type in :func:`mne.bem.make_watershed_bem` by changing its default value to ``ws.mgz`` by `Fu-Te Wong`_ + +- Fix bug in :func:`mne.channels.make_standard_montage` which would return ``easycap-M1`` even when requesting ``easycap-M10`` by `Christian Brodbeck`_ + +- Fix the way planar gradiometers are combined in :func:`mne.viz.plot_tfr_topomap` and :meth:`mne.Epochs.plot_psd_topomap` by `Geoff Brookshire`_ + +- Improve error message when trying to load FieldTrip data from a cell array by `Thomas Hartmann`_ + +- Fix bug in :func:`mne.read_evoked_fieldtrip` causing it to crash when channels were present in the provided Info object but were not present in the data by `Thomas Hartmann`_ + +- Fix placement of extrapolation points in :meth:`mne.Evoked.plot_topomap` and related functions when exactly three channels were used by `Mikołaj Magnuski`_. + +- Fix bug in reading annotations in :func:`read_annotations`, which would not accept ";" character by `Adam Li`_ + +- Fix bug in :func:`mne.Report.parse_folder`, which would not recognize ``*meg.fif`` files by `Dmitrii Altukhov`_. + +- Include ``fit_params`` when saving an :class:`~mne.preprocessing.ICA` instance to disk by `Richard Höchenberger`_ + +- Update old url link in :func:`mne.datasets.eegbci.load_data` to ``EEGMI_URL = 'https://physionet.org/files/eegmmidb/1.0.0/'`` by `Ramiro Gatti`_ + +- Ensure corrected p-values calculated by :func:`mne.stats.bonferroni_correction` never exceed the value of 1.0 by `Richard Höchenberger`_ + +API +~~~ + +- ``mne.io.pick.get_channel_types`` is now called ``mne.io.pick.get_channel_type_constants`` to better reflect its return values, by `Daniel McCloy`_. + +- :meth:`mne.Evoked.as_type` now returns an instance of :class:`mne.EvokedArray` by `Sophie Herbst`_ and `Alex Gramfort`_ + +- Bumped minimum requirements to fall 2017 versions by `Eric Larson`_: + + - NumPy 1.13.3 + - SciPy 1.0.0 + - matplotlib 2.1 + - scikit-learn 0.19.1 (optional requirement) + - pandas 0.21 (optional requirement) + +- :meth:`mne.Epochs.plot` now accepts an ``event_id`` parameter (useful in tandem with ``event_colors`` for specifying event colors by name) by `Daniel McCloy`_. + +- Passing ``ch_types=None`` has been deprecated in favor of explicitly passing ``ch_types='misc'`` in :func:`mne.create_info` by `Eric Larson`_ + +- New time conversion options for methods :meth:`mne.io.Raw.to_data_frame`, :meth:`mne.Epochs.to_data_frame`, :meth:`mne.Evoked.to_data_frame`, and :meth:`mne.SourceEstimate.to_data_frame`, by `Daniel McCloy`_. + +- :meth:`mne.Epochs.shift_time` and :meth:`mne.Evoked.shift_time` now allow shifting times by arbitrary amounts (previously only by integer multiples of the sampling period), by `Daniel McCloy`_ and `Eric Larson`_. + +- The ``head_pos`` argument of :func:`mne.Evoked.plot_topomap` and related functions has been deprecated in favor of ``head_radius``, by `Eric Larson`_. + +- The ``layout`` argument to topomap-related functions such as :meth:`mne.Evoked.plot_topomap` and :func:`mne.viz.plot_tfr_topomap` has been deprecated in favor of channel-position based flattening based on the ``info`` and ``sphere`` argument, by `Eric Larson`_. + +- The APIs of :meth:`mne.io.Raw.plot_projs_topomap`, :meth:`mne.Epochs.plot_projs_topomap` and :meth:`mne.Evoked.plot_projs_topomap` are now more similar to :func:`mne.viz.plot_projs_topomap` by `Daniel McCloy`_. + +- The function :func:`mne.setup_volume_source_space` has a ``sphere_units`` argument that defaults to ``'mm'`` in 0.20 but will change to ``'m'`` in 0.21, set it to avoid a warning by `Eric Larson`_. + +- :func:`mne.viz.plot_projs_topomap` and the related methods :meth:`mne.io.Raw.plot_projs_topomap`, :meth:`mne.Epochs.plot_projs_topomap` and :meth:`mne.Evoked.plot_projs_topomap` now accept parameter ``vlim`` to control the colormap, with keyword ``'joint'`` computing the colormap jointly across all projectors of a given channel type, by `Daniel McCloy`_. + +- New methods :meth:`mne.io.Raw.get_channel_types`, :meth:`mne.Epochs.get_channel_types`, :meth:`mne.Evoked.get_channel_types` by `Daniel McCloy`_. + +- Deprecate ``mne.minimum_norm.point_spread_function`` and ``mne.minimum_norm.cross_talk_function`` by `Alex Gramfort`_ + +- Deprecate ``montage`` parameter of :func:`mne.create_info` in favor of :meth:`mne.io.Raw.set_montage`, :meth:`mne.Info.set_montage`, and related functions by `Eric Larson`_ + +- :func:`mne.channels.equalize_channels` no longer operates in-place by default, but instead makes copies of the objects when necessary (see ``copy`` parameter) by `Marijn van Vliet`_ + +- :func:`mne.channels.equalize_channels` now uses the first object in the list as a template for channel ordering by `Marijn van Vliet`_ + +- :func:`mne.channels.equalize_channels` now also re-orders the channels to match, in addition to dropping channels that are not shared by all objects by `Marijn van Vliet`_ + +- :meth:`mne.io.Raw.pick_channels`, :meth:`mne.Epochs.pick_channels` and :meth:`mne.Evoked.pick_channels` now have an ``ordered`` parameter to enforce the ordering of the picked channels by `Marijn van Vliet`_ + +- New methods :meth:`mne.Forward.pick_channels`, :meth:`mne.Covariance.pick_channels`, ``mne.Info.pick_channels``, :meth:`mne.time_frequency.CrossSpectralDensity.pick_channels` by `Marijn van Vliet`_ + +- New attributes ``mne.Forward.ch_names`` and ``mne.Info.ch_names`` by `Marijn van Vliet`_ + +- In :class:`~mne.preprocessing.Xdawn`, the components are stored in the rows of attributes ``filters_`` and ``patterns_`` to be consistent with :class:`~mne.decoding.CSP` and :class:`~mne.preprocessing.ICA` by `Henrich Kolkhorst`_ + +- Drop ``unit`` keyword argument from :func:`mne.channels.read_custom_montage`, as it was unused by `Richard Höchenberger`_ + +- Deprecate ``mne.preprocessing.run_ica``, use ``mne.preprocessing.ICA.detect_artifacts`` instead by `Richard Höchenberger`_ + +Authors +~~~~~~~ + +People who contributed to this release (in alphabetical order): + +* Adam Li +* Adonay Nunes +* Alex Rockhill +* Alexandre Gramfort +* Ariel Rokem +* Britta Westner +* Carlos de la Torre +* Christian Brodbeck +* Christian O'Reilly +* Chun-Hui Li +* Clemens Brunner +* Daniel McCloy +* Demetres Kostas +* Denis A. Engemann +* Dmitrii Altukhov +* Dominik Welke +* Eric Larson +* Ezequiel Mikulan +* Fahimeh Mamashli +* Fede Raimondo +* Fu-Te Wong +* Geoff Brookshire +* Guillaume Favelier +* Henrich Kolkhorst +* Joan Massich +* José C. García Alanis +* Jussi Nurminen +* Kaisu Lankinen +* Luke Bloy +* Mainak Jas +* Marijn van Vliet +* Mathurin Massias +* Mikolaj Magnuski +* Mohamed Sherif +* Mohammad Daneshzand +* Nataliia Kozhemiako +* Nicolas Barascud +* Nicolas Legrand +* Olaf Hauk +* Padma Sundaram +* Ramiro Gatti +* Richard Höchenberger +* Robert Luke +* Robert Oostenveld +* Sebastian Major +* Sophie Herbst +* Stefan Appelhoff +* Stefan Repplinger +* Thomas Hartmann +* Victor Férat +* Yu-Han Luo diff --git a/doc/changes/v0.21.rst b/doc/changes/v0.21.rst new file mode 100644 index 00000000000..2f2fa201a3c --- /dev/null +++ b/doc/changes/v0.21.rst @@ -0,0 +1,486 @@ +.. _changes_0_21: + +Version 0.21 (2020-09-21) +------------------------- + +.. |Rahul Nadkarni| replace:: **Rahul Nadkarni** +.. |Lau Møller Andersen| replace:: **Lau Møller Andersen** +.. |Kyle Mathewson| replace:: **Kyle Mathewson** +.. |Jan Sedivy| replace:: **Jan Sedivy** +.. |Johann Benerradi| replace:: **Johann Benerradi** +.. |Martin Schulz| replace:: **Martin Schulz** +.. |Jeroen Van Der Donckt| replace:: **Jeroen Van Der Donckt** +.. |Simeon Wong| replace:: **Simeon Wong** +.. |Svea Marie Meyer| replace:: **Svea Marie Meyer** +.. |Lx37| replace:: **Lx37** +.. |Liberty Hamilton| replace:: **Liberty Hamilton** +.. |Steven Bierer| replace:: **Steven Bierer** + +Enhancements +~~~~~~~~~~~~ + +- Modified :meth:`mne.Epochs.pick_types` to remove dropped channel types from ``reject`` and ``flat`` dictionaries **by new contributor** |Rahul Nadkarni|_ + +- Add function :func:`mne.preprocessing.regress_artifact` to remove artifacts using linear regression **by new contributor** |Kyle Mathewson|_ and `Eric Larson`_ + +- Add ``sources`` and ``detectors`` options for fNIRS use of :meth:`mne.viz.plot_alignment` allowing plotting of optode locations in addition to channel midpoint ``channels`` and ``path`` between fNIRS optodes **by new contributor** |Kyle Mathewson|_ + +- BrainVision data format files are now parsed for EEG impedance values in :func:`mne.io.read_raw_brainvision` and provided as a ``.impedances`` attribute of ``raw`` **by new contributor** |Jan Sedivy|_ and `Stefan Appelhoff`_ + +- Add function :func:`mne.channels.combine_channels` to combine channels from Raw, Epochs, or Evoked according to ROIs (combinations including mean, median, or standard deviation; can also use a callable) **by new contributor** |Johann Benerradi|_ + +- Improved documentation building instructions and execution on Windows **by new contributor** |Martin Schulz|_, `Evgenii Kalenkovich`_, and `Eric Larson`_ + +- Speed up reading of annotations in EDF+ files **by new contributor** |Jeroen Van Der Donckt|_ + +- Add head to mri and mri to voxel space transform details to :ref:`tut-source-alignment` tutorial, by `Alex Rockhill`_ + +- Improve memory efficiency of :func:`mne.concatenate_epochs` by `Eric Larson`_ + +- Allow picking channels in raw instances (e.g., :meth:`mne.io.Raw.pick_types`) without preloading data, by `Eric Larson`_ + +- :meth:`mne.preprocessing.ICA.plot_sources` now plots annotation markers similar to :meth:`mne.io.Raw.plot` by `Luke Bloy`_ + +- Add support for scalar multiplication and division of :class:`mne.time_frequency.AverageTFR` instances by `Luke Bloy`_ + +- Add support for signals in mV for :func:`mne.io.read_raw_brainvision` by `Clemens Brunner`_ + +- :meth:`mne.Epochs.plot_psd_topomap` and :func:`mne.viz.plot_epochs_psd_topomap` now allow joint colorbar limits across subplots, by `Daniel McCloy`_. + +- Add support for using loose-orientation surface priors with free-orientation volume priors in mixed source space inverses by `Eric Larson`_ + +- Add :class:`mne.MixedVectorSourceEstimate` for vector source estimates for mixed source spaces, by `Eric Larson`_ + +- Add mixed and volumetric source estimate plotting using volumetric ray-casting to :meth:`mne.MixedSourceEstimate.plot` and :meth:`mne.VolSourceEstimate.plot_3d` by `Eric Larson`_ + +- Add :meth:`mne.MixedSourceEstimate.surface` and :meth:`mne.MixedSourceEstimate.volume` methods to allow surface and volume extraction by `Eric Larson`_ + +- Add :meth:`mne.VectorSourceEstimate.project` to project vector source estimates onto the direction of maximum source power by `Eric Larson`_ + +- Add support to :func:`mne.extract_label_time_course` for vector-valued and volumetric source estimates by `Eric Larson`_ + +- Add method :meth:`mne.VolSourceEstimate.in_label` by `Eric Larson`_ + +- Add support for mixed source spaces to :func:`mne.compute_source_morph` by `Eric Larson`_ + +- Add support for volume and mixed source spaces to :func:`mne.stats.summarize_clusters_stc` by `Eric Larson`_ + +- Add support for omitting the SDR step in volumetric morphing by passing ``n_iter_sdr=()`` to `mne.compute_source_morph` by `Eric Larson`_ + +- Add ``single_volume`` argument to :func:`mne.setup_volume_source_space` to facilitate creating source spaces with many volumes (e.g., all subvolumes of ``aseg.mgz``) by `Eric Larson`_ + +- Add support for passing a string argument to ``bg_img`` in `mne.viz.plot_volume_source_estimates` by `Eric Larson`_ + +- Add support for providing the destination surface source space in the ``src_to`` argument of :func:`mne.compute_source_morph` by `Eric Larson`_ + +- Add explained variance logging to :func:`mne.inverse_sparse.mixed_norm`, :func:`mne.inverse_sparse.gamma_map`, and :func:`mne.beamformer.rap_music` by `Eric Larson`_ + +- Add ``tol_kind`` option to :func:`mne.compute_rank` by `Eric Larson`_ + +- Allow resampling raw data with :func:`mne.io.Raw.resample` without preloading data, by `Eric Larson`_ + +- Allow using ``pick_ori='vector'`` with a fixed-orientation inverse to facilitate visualization with :func:`mne.viz.plot_vector_source_estimates` by `Eric Larson`_ + +- :func:`mne.viz.plot_dipole_locations` and :meth:`mne.Dipole.plot_locations` gained a ``title`` argument to specify a custom figure title in ``orthoview`` mode by `Richard Höchenberger`_ + +- Add orientations projected onto the MRI slices in :func:`mne.viz.plot_dipole_locations` in ``'orthoview'`` mode by `Eric Larson`_ + +- Add temporal derivative distribution repair (TDDR) :func:`mne.preprocessing.nirs.temporal_derivative_distribution_repair` with shortened alias ``mne.preprocessing.nirs.tddr`` by `Robert Luke`_ + +- Add :func:`mne.read_freesurfer_lut` to make it easier to work with volume atlases by `Eric Larson`_ + +- Add support for overlap-add processing when ``method='spectrum_fit'`` in :func:`mne.io.Raw.notch_filter` by `Eric Larson`_ + +- Add functionality to interpolate bad NIRS channels by `Robert Luke`_ + +- Add ability to interpolate EEG channels using minimum-norm projection in :meth:`mne.io.Raw.interpolate_bads` and related functions with ``method=dict(eeg='MNE')`` by `Eric Larson`_ + +- Added ability of :func:`mne.io.read_raw_nirx` to open data by passing path to header file `Robert Luke`_ + +- Add :meth:`mne.channels.DigMontage.rename_channels` to allow renaming montage channels by `Eric Larson`_ + +- Document :meth:`mne.channels.compute_native_head_t` to allow computing the native-to-head transformation that will be applied when doing :meth:`raw.set_montage ` and related functions by `Eric Larson`_ + +- Add support to in :meth:`mne.io.Raw.plot` for passing ``clipping`` as a float to clip to a proportion of the dedicated channel range by `Eric Larson`_ + +- Add function :func:`mne.preprocessing.annotate_muscle_zscore` to annotate periods with muscle artifacts. by `Adonay Nunes`_ + +- Add :func:`mne.stats.ttest_ind_no_p` to mirror :func:`mne.stats.ttest_1samp_no_p` with hat correction by `Eric Larson`_ + +- Add `mne.stats.combine_adjacency` to combine adjacency matrices for independent data dimensions to enable things like spatio-temporal-time-frequency clustering in `mne.stats.permutation_cluster_test` by `Eric Larson`_ + +- Speed up raw data reading without preload in :func:`mne.io.read_raw_nirx` by `Eric Larson`_ + +- Speed up :meth:`mne.Epochs.copy` and :meth:`mne.Epochs.__getitem__` by avoiding copying immutable attributes by `Eric Larson`_ + +- Speed up and reduce memory usage of :meth:`mne.SourceEstimate.plot` and related functions/methods when ``show_traces=True`` by `Eric Larson`_ + +- Reduce memory usage of `~mne.io.Raw.plot_psd`, ``mne.time_frequency.psd_welch``, and `~mne.time_frequency.psd_array_welch` for long segments of data by `Eric Larson`_ + +- Support for saving movies of source time courses (STCs) with ``brain.save_movie`` method and from graphical user interface by `Guillaume Favelier`_ + +- Add ``mri`` and ``show_orientation`` arguments to :func:`mne.viz.plot_bem` by `Eric Larson`_ + +- Add :func:`mne.viz.centers_to_edges` to help when using :meth:`matplotlib.axes.Axes.pcolormesh` with flat shading by `Eric Larson`_ + +- Add ``on_missing='raise'`` to :meth:`mne.io.Raw.set_montage` and related functions to allow ignoring of missing electrode coordinates by `Adam Li`_ + +- Add ``on_missing='raise'`` to :func:`mne.viz.plot_events` to allow ignoring missing events when passing ``event_id`` by `Eric Larson`_ + +- Add REST EEG infinity reference scheme to :meth:`mne.io.Raw.set_eeg_reference` and related functions by `Eric Larson`_ + +- Add better sanity checking of ``max_pca_components`` and ``n_components`` to provide more informative error messages for :class:`mne.preprocessing.ICA` by `Eric Larson`_ + +- Add ``plot`` option to :meth:`mne.viz.plot_filter` allowing selection of which filter properties are plotted and added option for user to supply ``axes`` by `Robert Luke`_ + +- Add estimation method legend to :func:`mne.viz.plot_snr_estimate` by `Eric Larson`_ + +- Add support to `mne.SourceSpaces.export_volume` for ``mri_resolution='sparse'`` to color only the nearest-neighbor voxels instead of entire regions by `Eric Larson`_ + +- Add ``axes`` argument to :func:`mne.viz.plot_evoked_white`, :meth:`mne.Evoked.plot_white`, and :func:`mne.viz.plot_snr_estimate` by `Eric Larson`_ + +- Change default to ``surfaces='auto'`` from ``surfaces='head'`` to allow :func:`mne.viz.plot_alignment` to work when just passing a :class:`mne.Info` as ``plot_alignment(info)`` by `Eric Larson`_ + +- Add ECoG misc EDF dataset to the :ref:`tut-working-with-ecog` tutorial to show snapshots of time-frequency activity by `Adam Li`_ + +- Add :func:`mne.viz.set_3d_options` and ``MNE_3D_OPTION_ANTIALIAS`` environment variable to control full-scene antialiasing (FXAA) in 3D functions like :ref:`mne coreg`, :func:`mne.viz.plot_alignment`, and :func:`mne.viz.plot_source_estimates`; this can be useful on systems where FXAA does not work well, such as some with MESA software rendering, by `Eric Larson`_ + +- Add config variable ``MNE_TQDM`` to support explicitly setting the TQDM backend to use, for example ``tqdm`` or ``tqdm.auto`` (default) by `Eric Larson`_ + +- Add better support for reading corrupted FIF files in :func:`mne.io.read_raw_fif` by `Eric Larson`_ + +- BIDS conformity: When saving FIF files to disk and the files are split into parts, the ``split_naming='bids'`` parameter now uses a "_split-%d" naming instead of the previous "_part-%d", by `Stefan Appelhoff`_ + +- Add support for whitening and multiple channel types in :func:`mne.beamformer.make_dics` by `Marijn van Vliet`_ + +- Add better error message when trying to save incompatible `~mne.Evoked` objects to the same file by `Eric Larson`_ + +- Add support for loading complex numbers from mat files by `Thomas Hartmann`_ + +- Add generic reader function :func:`mne.io.read_raw` that loads files based on their extensions (it wraps the underlying specific ``read_raw_xxx`` functions) by `Clemens Brunner`_ + +- Add automatic T3 magnetometer detection and application of :meth:`mne.io.Raw.fix_mag_coil_types` to :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ + +- Add extended SSS (eSSS) support to :func:`mne.preprocessing.maxwell_filter` by `Eric Larson`_ + +- Add ``'auto'`` option to :meth:`mne.preprocessing.ICA.find_bads_ecg` to automatically determine the threshold for CTPS method by `Yu-Han Luo`_ + +- Add a ``notebook`` 3d backend for visualization in jupyter notebook with :func:`mne.viz.set_3d_backend` by `Guillaume Favelier`_ + +- Add support for reading and writing surfaces in Wavefront .obj format to the :func:`mne.read_surface` and :func:`mne.write_surface` by `Marijn van Vliet`_ + +- Add tutorial on how to manually fix BEM meshes in Blender by `Marijn van Vliet`_ and `Ezequiel Mikulan`_ + +- :func:`mne.write_evokeds` will now accept :class:`mne.Evoked` objects with differing channel orders in ``info['bads']``, which would previously raise an exception by `Richard Höchenberger`_ + +- Add support for reading channel names (e.g., for Ricoh data) to :func:`mne.io.read_raw_kit` by `Alex Gramfort`_ and `Eric Larson`_ + +- The ``reject_tmin`` and ``reject_tmax`` parameters of :class:`mne.Epochs` are now taken into account when using the ``reject_by_annotation`` parameter by `Stefan Appelhoff`_ + +- :func:`mne.preprocessing.find_bad_channels_maxwell` now automatically applies a low-pass filter before running bad channel detection. This can be disabled, restoring previous behavior by `Richard Höchenberger`_ + +- Add ``proj='reconstruct'`` to :meth:`mne.Evoked.plot` and related functions to apply projectors and then undo the signal bias using field mapping by `Eric Larson`_ + +- Add writing BEM surfaces and solutions in H5 format in :func:`mne.write_bem_surfaces` and :func:`mne.write_bem_solution` by `Eric Larson`_ + +- When picking a subset of channels, or when dropping channels from `~mne.io.Raw`, `~mne.Epochs`, or `~mne.Evoked`, projectors that can only be applied to the removed channels will now be dropped automatically by `Richard Höchenberger`_ + +- :class:`mne.Report` now can add topomaps of SSP projectors to the generated report. This behavior can be toggled via the new ``projs`` argument by `Richard Höchenberger`_ + +- When passing a list of `~mne.Evoked` objects to `~mne.viz.plot_compare_evokeds`, each evoked's ``.comment`` attribute will be used to label the trace. If ``.comment`` is empty, a 1-based index is assigned as the label by `Richard Höchenberger`_ + +- Speed up :func:`mne.stats.summarize_clusters_stc` using Numba by `Yu-Han Luo`_ + +- Add ``reject_by_annotation=True`` to :func:`mne.make_fixed_length_epochs` and :meth:`mne.preprocessing.ICA.plot_properties` to reject bad data segments based on annotation by `Yu-Han Luo`_ + +- `~mne.Report.parse_folder` now accepts a path-like folder name (it used to work with strings only) by `Alex Gramfort`_ + +- Add ``component_order`` parameter to :class:`mne.decoding.CSP` which allows switching between ``mutual_info`` (default) and ``alternate`` (a simpler and frequently used option) by `Martin Billinger`_ and `Clemens Brunner`_ + +- Add memory size information to the ``repr`` of :class:`mne.SourceSpaces` and :class:`mne.SourceEstimate` and related classes by `Eric Larson`_ + +- Add reader for Persyst (.lay + .dat format) data in :func:`mne.io.read_raw_persyst` by `Adam Li`_ + +- Use PyVista as the default backend for 3D visualization instead of Mayavi by `Guillaume Favelier`_ + +- Add reader for SNIRF NIRS data in :func:`mne.io.read_raw_snirf` by `Robert Luke`_ + +- `~mne.Evoked` has gained ``tmin`` and ``tmax`` attributes for more consistency with `~mne.Epochs` by `Richard Höchenberger`_ + +Bugs +~~~~ + +- Fix bug for writing and reading complex evoked data modifying :func:`mne.write_evokeds` and :func:`mne.read_evokeds` **by new contributor** |Lau Møller Andersen|_ + +- Fix bug by adding error message when trying to save complex stc data in a non.-h5 format :meth:`mne.VolSourceEstimate.save` **by new contributor** |Lau Møller Andersen|_ + +- Fix bug with logging in :meth:`mne.io.Raw.set_eeg_reference` and related functions **by new contributor** |Simeon Wong|_ + +- Fix bug with :func:`mne.io.read_raw_gdf` where birthdays were not parsed properly, leading to an error **by new contributor** |Svea Marie Meyer|_ + +- Fix bug with :func:`mne.io.read_raw_edf` where recording ID was not read properly for non-ASCII characters **by new contributor** |Lx37|_ + +- Fix bug in :func:`mne.set_eeg_reference` and related functions to set ``info['custom_ref_applied']`` to ``True`` for 'ecog' and 'seeg' channels in addition to 'eeg' **by new contributor** |Liberty Hamilton|_ + +- Fix bug with :func:`mne.chpi.compute_chpi_amplitudes` and :func:`mne.chpi.filter_chpi` to optimize time window length **by new contributor** |Steven Bierer|_ + +- Fix bug with :func:`mne.preprocessing.ICA.find_bads_eog` when more than one EOG components are present by `Christian O'Reilly`_ + +- Fix bug with non-preloaded data when using ``raw.apply_proj().load_data().get_data()`` where projectors were not applied by `Eric Larson`_ + +- Fix bug to permit :meth:`stc.project('nn', src) ` to be applied after ``stc`` was restricted to an :class:`mne.Label` by `Luke Bloy`_ + +- Fix bug with :func:`mne.io.Raw.set_meas_date` to support setting ``meas_date`` to ``None``, by `Luke Bloy`_ + +- Fix bug with :func:`mne.setup_volume_source_space` when ``volume_label`` was supplied where voxels slightly (in a worst case, about 37% times ``pos`` in distance) outside the voxel-grid-based bounds of regions were errantly included, by `Eric Larson`_ + +- Fix bug with :ref:`mne coreg` where reverse scalings were applied to ``src[0]['src_mri_t']`` for volume source spaces, so morphing and plotting did not work correctly by `Eric Larson`_ + +- Fix bug with :func:`mne.io.read_raw_ctf` when reference magnetometers have the compensation grade marked by `Eric Larson`_ + +- Fix bug with `mne.SourceSpaces.export_volume` with ``use_lut=False`` where no values were written by `Eric Larson`_ + +- Fix bug with :func:`mne.preprocessing.annotate_movement` where bad data segments, specified in ``raw.annotations``, would be handled incorrectly by `Luke Bloy`_ + +- Fix bug with :func:`mne.compute_source_morph` when more than one volume source space was present (e.g., when using labels) where only the first label would be interpolated when ``mri_resolution=True`` by `Eric Larson`_ + +- Fix bug with :func:`mne.compute_source_morph` when morphing to a volume source space when ``src_to`` is used and the destination subject is not ``fsaverage`` by `Eric Larson`_ + +- Fix bug with :func:`mne.compute_source_morph` where outermost voxels in the destination source space could be errantly omitted by `Eric Larson`_ + +- Fix bug with :func:`mne.compute_source_morph` where complex data was cast to real when doing a volumetric morph by `Eric Larson`_ + +- Fix bug with :func:`mne.minimum_norm.compute_source_psd_epochs` and :func:`mne.minimum_norm.source_band_induced_power` raised errors when ``method='eLORETA'`` by `Eric Larson`_ + +- Fix bug with :func:`mne.minimum_norm.apply_inverse` where the explained variance did not work for complex data by `Eric Larson`_ + +- Fix bug with :func:`mne.preprocessing.compute_current_source_density` where values were not properly computed; maps should now be more focal, by `Alex Rockhill`_ and `Eric Larson`_ + +- Fix bug with :func:`mne.combine_evoked` where equal-weighted averages were wrongly computed as equal-weighted sums, by `Daniel McCloy`_ + +- Fix bug with :meth:`mne.channels.DigMontage.plot` when reference channels with no defined position are present, by `Eric Larson`_ + +- Fix bug with :func:`mne.channels.read_custom_montage` where ``"headshape"`` points in SFP format were not read properly, by `Eric Larson`_ + +- Fix bug with setting HTML classes when using ``mne.Report.add_bem_to_section`` by `Eric Larson`_ + +- Fix bug with convex-hull based MEG helmet creation where the face area could be covered up, by `Eric Larson`_ + +- Fix bug with :func:`mne.inverse_sparse.mixed_norm` and :func:`mne.inverse_sparse.gamma_map` with ``return_as_dipoles=True`` and :func:`mne.beamformer.rap_music` where ``dip.gof`` was the same for all dipoles. GOF values / explained variance across all dipoles now gets split among the dipoles using SVD of the lead fields. To get the total GOF, ``sum(dip.gof for dip in dipoles)`` can be used, by `Eric Larson`_ + +- Fix bug with :func:`mne.inverse_sparse.gamma_map` where output data was not assigned properly when using a free-orientation forward and ``xyz_same_gamma=False`` by `Eric Larson`_ + +- Fix to enable interactive plotting with no colorbar with :func:`mne.viz.plot_evoked_topomap` by `Daniel McCloy`_ + +- Fix plotting with :func:`mne.viz.plot_evoked_topomap` to pre-existing axes by `Daniel McCloy`_ + +- Fix bug with :func:`mne.viz.plot_vector_source_estimates` using the PyVista backend with ``time_viewer=True`` when updating the arrow colormaps by `Eric Larson`_ + +- Fix bug with :func:`mne.viz.plot_vector_source_estimates` where ``clim='auto'`` and ``clim=dict(..., kind='percent')`` did not take into account the magnitude of the activation, by `Eric Larson`_ + +- The default plotting mode for :func:`mne.io.Raw.plot` and :ref:`mne browse_raw` has been changed to ``clipping=3.`` to facilitate data analysis with large deflections, by `Eric Larson`_ + +- PSD plots will now show non-data channels (e.g., ``misc``) if those channels are explicitly passed to ``picks``, by `Daniel McCloy`_. + +- Fix bug with :func:`mne.time_frequency.read_tfrs` where ``info['meas_date']`` was not parsed correctly, by `Eric Larson`_ + +- Fix bug with :func:`mne.time_frequency.tfr_array_stockwell` where inputs were not properly validated by `Eric Larson`_ + +- Fix handling of NaN when using TFCE in clustering functions such as :func:`mne.stats.spatio_temporal_cluster_1samp_test` by `Eric Larson`_ + +- Fix handling of signs when using TFCE by `Eric Larson`_ + +- The :class:`mne.MixedSourceEstimate` class has been clarified to contain two cortical surface source spaces, plus at least one other source space. Creating source estimates in other orderings is not supported, by `Eric Larson`_ + +- Fix bug where :class:`VolSourceEstimate.vertices ` was an instance of :class:`~numpy.ndarray` instead of :class:`python:list` of one :class:`~numpy.ndarray`, by `Eric Larson`_ + +- Fix default to be ``foreground=None`` in :func:`mne.viz.plot_source_estimates` to use white or black text based on the background color by `Eric Larson`_ + +- Fix bug with writing EGI and CTF `mne.Info` to H5 format, e.g., with `mne.time_frequency.AverageTFR.save` by `Eric Larson`_ + +- Fix bug with topomap outlines in :meth:`mne.time_frequency.AverageTFR.plot_joint` and :meth:`mne.time_frequency.AverageTFR.plot_topo` by `Eric Larson`_ + +- Fix bug with :func:`mne.io.Raw.plot` where toggling all projectors did not actually take effect by `Eric Larson`_ + +- Fix bug with :func:`mne.write_proj` where the number of columns was incorrectly written by `Eric Larson`_ + +- Fix bug with :func:`mne.read_epochs` when loading data in complex format with ``preload=False`` by `Eric Larson`_ + +- Fix bug with :meth:`mne.Epochs.save` where the file splitting calculations did not account for the sizes of non-data writes by `Eric Larson`_ + +- Fix bug with :class:`mne.Epochs` when metadata was not subselected properly when ``event_repeated='drop'`` by `Eric Larson`_ + +- Fix bug with :class:`mne.Epochs` where ``epochs.drop_log`` was a list of list of str rather than an immutable tuple of tuple of str (not meant to be changed by the user) by `Eric Larson`_ + +- Fix bug with :class:`mne.Report` where the BEM section could not be toggled by `Eric Larson`_ + +- Fix bug when using :meth:`mne.Epochs.crop` to exclude the baseline period would break :func:`mne.Epochs.save` / :func:`mne.read_epochs` round-trip by `Eric Larson`_ + +- Fix bug with `mne.Epochs.subtract_evoked` where using decimated epochs would lead to an error by `Eric Larson`_ + +- Fix bug with :func:`mne.viz.plot_bem` and :class:`mne.Report` when plotting BEM contours when MRIs are not in standard FreeSurfer orientation by `Eric Larson`_ + +- Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where it would warn even when an explicit ``rank`` was used by `Eric Larson`_ + +- Fix bugs with :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` where: + + - Noise normalization factors ``weight_norm='unit-noise-gain'`` and ``weight_norm='nai'`` were computed incorrectly + - ``pick_ori='max-power'`` computed the max-power orientation incorrectly + - ``pick_ori='normal'`` did not compute power or noise normalization factors correctly + - :func:`mne.beamformer.apply_lcmv_cov` did not apply whitening and projections properly + +- Fix :ref:`mne setup_forward_model` to have it actually compute the BEM solution in addition to creating the BEM model by `Eric Larson`_ + +- Fix bug with :func:`mne.io.read_raw_edf` where null bytes were not properly handled, causing an error when opening a file by `Eric Larson`_ + +- Fix bug with :func:`mne.Report` where unicode characters were not rendered properly (encoding for HTML was not set) by `Eric Larson`_ + +- Fix bug with :func:`mne.preprocessing.nirs.scalp_coupling_index` where filter transition was incorrectly assigned by `Robert Luke`_ + +- Fix bug with :func:`mne.make_forward_dipole` where :func:`mne.write_forward_solution` could not be used by `Eric Larson`_ + +- Fix bug with :meth:`mne.io.Raw.plot` when ``scalings='auto'`` where bad data would prevent channel plotting by `Eric Larson`_ + +- Default ``border`` and ``extrapolate`` arguments for :func:`mne.Evoked.plot_topomap` and related functions were changed from ``0.`` to ``'mean'`` and ``'box'`` to ``'auto'``, respectively, to help more accurately reflect sensor geometries and boundary conditions. ``extrapolate='auto'`` uses ``extrapolate='local'`` for MEG data and ``extrapolate='head'`` otherwise, by `Eric Larson`_ + +- Fix bug that prevents ``n_jobs`` from being a NumPy integer type, by `Daniel McCloy`_. + +- Fix bug with :func:`mne.epochs.average_movements` where epoch weights were computed using all basis vectors instead of the internal basis only by `Eric Larson`_ + +- Fix bug with :func:`mne.get_volume_labels_from_aseg` where the returned labels were alphabetical instead of reflecting their volumetric ID-based order by `Eric Larson`_ + +- Fix bug with :func:`mne.preprocessing.find_bad_channels_maxwell` where good data of exactly ``step`` duration would lead to an error by `Eric Larson`_ + +- Fix bug with :func:`mne.preprocessing.find_bad_channels_maxwell` where indices were not handled properly when MEG channels were not first in the raw instance, and logging messages incorrectly reported the interval used by `Eric Larson`_ + +- Make :func:`mne.set_config` accept path-like input values by `Richard Höchenberger`_ + +- Fix bug with :func:`mne.write_labels_to_annot` and :func:`mne.datasets.fetch_hcp_mmp_parcellation` where label name strings were not properly terminated, leading to problems loading in FreeSurfer by `Eric Larson`_ + +- Fix bug with :func:`mne.beamformer.make_dics` where complex conjugates were not applied properly by `Britta Westner`_ and `Eric Larson`_ + +- Fix bug with :func:`mne.bem.make_watershed_bem` where the RAS coordinates of watershed bem surfaces were not updated correctly from the volume file by `Yu-Han Luo`_ + +- Fix bug with :meth:`mne.io.Raw.get_channel_types` and related methods where the ordering of ``picks`` was not preserved, by `Eric Larson`_ + +- Fix bug with :meth:`mne.io.Raw.plot_psd` with ``average=False`` and multiple channel types where channel locations were not shown properly by `Eric Larson`_ + +- Fix bug with :func:`mne.viz.plot_sparse_source_estimates` when using ``'sphere'`` mode by `Eric Larson`_ and `Guillaume Favelier`_ + +- Fix bug in FieldTrip reader functions when channels are missing in the ``info`` object by `Thomas Hartmann`_ + +- Throw proper error when trying to import FieldTrip Epochs data with non-uniform time for trials by `Thomas Hartmann`_ + +- Throw proper error when trying to import FieldTrip data saved by an old, incompatible version by `Thomas Hartmann`_ + +- Fix bug in :func:`mne.read_epochs_fieldtrip` when importing data without a ``trialinfo`` field by `Thomas Hartmann`_ + +- Fix bug in :meth:`mne.preprocessing.ICA.plot_properties` where time series plot doesn't start at the proper tmin by `Teon Brooks`_ + +- Fix bug with :meth:`mne.preprocessing.ICA.plot_properties` where a :class:`mne.io.Raw` object with annotations would lead to an error by `Yu-Han Luo`_ + +- Fix bug with :func:`mne.events_from_annotations(raw.annotations) ` when ``orig_time`` of annotations is None and ``raw.first_time > 0``, by `Alex Gramfort`_ + +API changes +~~~~~~~~~~~ + +- Python 3.5 is no longer supported, Python 3.6+ is required, by `Eric Larson`_ + +- ``adjacency`` has replaced ``connectivity`` in the names of: + + 1. Arguments to clustering functions, such as `mne.stats.permutation_cluster_test`, and + 2. Function names for defining adjacency, such as `mne.spatio_temporal_src_adjacency` replacing ``mne.spatio_temporal_src_connectivity``. + + The complete list of changed function names is: + + - ``mne.channels.find_ch_connectivity`` → `~mne.channels.find_ch_adjacency` + - ``mne.channels.read_ch_connectivity`` → `~mne.channels.read_ch_adjacency` + - ``mne.spatial_dist_connectivity`` → `~mne.spatial_dist_adjacency` + - ``mne.spatial_inter_hemi_connectivity`` → `~mne.spatial_inter_hemi_adjacency` + - ``mne.spatial_src_connectivity`` → `~mne.spatial_src_adjacency` + - ``mne.spatial_tris_connectivity`` → `~mne.spatial_tris_adjacency` + - ``mne.spatio_temporal_dist_connectivity`` → `~mne.spatio_temporal_dist_adjacency` + - ``mne.spatio_temporal_src_connectivity`` → `~mne.spatio_temporal_src_adjacency` + - ``mne.spatio_temporal_tris_connectivity`` → `~mne.spatio_temporal_tris_adjacency` + + "connectivity" is now reserved for discussions of functional and effective connectivity of the brain, and "adjacency" for source or sensor neighbor definitions for cluster-based analyses, by `Eric Larson`_. + +- The default for the ``standardize_names`` argument of :func:`mne.io.read_raw_kit` will change from ``True`` to ``False`` in 0.22, by `Eric Larson`_ + +- The ``normalize_fwd`` argument of :func:`mne.beamformer.make_dics` has been deprecated in favor of ``depth``, by `Eric Larson`_ + +- The ``overwrite`` parameter of :func:`mne.write_bem_surfaces` and :func:`mne.write_bem_solution` defaults to ``True`` in 0.21 but will change to ``False`` in 0.22, by `Eric Larson`_ + +- Add ``n_cols`` parameter to :meth:`mne.preprocessing.ICA.plot_scores` to allow plotting scores in multiple columns, by `Luke Bloy`_ + +- In :func:`mne.stats.permutation_cluster_test` and :func:`mne.stats.permutation_cluster_1samp_test` the default parameter value ``out_type='mask'`` has changed to ``None``, which in 0.21 means ``'mask'`` but will change to mean ``'indices'`` in the next version, by `Daniel McCloy`_ + +- The default window size set by ``filter_length`` when ``method='spectrum_fit'`` in :meth:`mne.io.Raw.notch_filter` will change from ``None`` (use whole file) to ``'10s'`` in 0.22, by `Eric Larson`_ + +- ``vmin`` and ``vmax`` parameters are deprecated in :meth:`mne.Epochs.plot_psd_topomap` and :func:`mne.viz.plot_epochs_psd_topomap`; use new ``vlim`` parameter instead, by `Daniel McCloy`_. + +- The method ``stc_mixed.plot_surface`` for a :class:`mne.MixedSourceEstimate` has been deprecated in favor of :meth:`stc.surface().plot(...) ` by `Eric Larson`_ + +- The method ``stc.normal`` for :class:`mne.VectorSourceEstimate` has been deprecated in favor of :meth:`stc.project('nn', src) ` by `Eric Larson`_ + +- Add ``use_dev_head_trans`` parameter to :func:`mne.preprocessing.annotate_movement` to allow choosing the device to head transform is used to define the fixed cHPI coordinates by `Luke Bloy`_ + +- The function ``mne.channels.read_dig_captrack`` will be deprecated in version 0.22 in favor of :func:`mne.channels.read_dig_captrak` to correct the spelling error: "captraCK" -> "captraK", by `Stefan Appelhoff`_ + +- The ``threshold`` argument in :meth:`mne.preprocessing.ICA.find_bads_ecg` defaults to ``None`` in version 0.21 but will change to ``'auto'`` in 0.22 by `Yu-Han Luo`_ + +- The function ``mne.preprocessing.mark_flat`` has been deprecated in favor of ``mne.preprocessing.annotate_flat`` by `Eric Larson`_ + +- The default argument ``meg=True`` in :func:`mne.pick_types` will change to ``meg=False`` in version 0.22 by `Clemens Brunner`_ + +- Added :meth:`mne.io.Raw.get_montage`, which obtains the montage that an instance has, by `Adam Li`_ + + +Authors +~~~~~~~ + +People who contributed to this release in alphabetical order +(people with a + are first time contributors): + +* Adam Li +* Adonay Nunes +* Alejandro Weinstein +* Alex Rockhill +* Alexandre Gramfort +* Anton Nikolas Waniek +* Britta Westner +* Christian O'Reilly +* Clemens Brunner +* Daniel McCloy +* Eric Larson +* Evgenii Kalenkovich +* Fede Raimondo +* Guillaume Favelier +* Hubert Banville +* Jeroen Van Der Donckt + +* Johann Benerradi + +* Kyle Mathewson + +* Lau Møller Andersen + +* Liberty Hamilton + +* Luke Bloy +* Lx37 + +* Mainak Jas +* Marijn van Vliet +* Martin Billinger +* Martin Schulz + +* Martin van Harmelen +* Mikolaj Magnuski +* Olaf Hauk +* Rahul Nadkarni + +* Richard Höchenberger +* Robert Luke +* Sara Sommariva +* Simeon Wong + +* Stefan Appelhoff +* Steven Bierer + +* Svea Marie Meyer + +* Teon Brooks +* Thomas Hartmann +* Yu-Han Luo +* chapochn +* mshader diff --git a/doc/changes/v0.22.rst b/doc/changes/v0.22.rst new file mode 100644 index 00000000000..52d7cec71e5 --- /dev/null +++ b/doc/changes/v0.22.rst @@ -0,0 +1,263 @@ +.. _changes_0_22: + +Version 0.22.0 (2020-12-17) +--------------------------- + +.. |Austin Hurst| replace:: **Austin Hurst** + +.. |Aniket Pradhan| replace:: **Aniket Pradhan** + +.. |Eduard Ort| replace:: **Eduard Ort** + +.. |Evan Hathaway| replace:: **Evan Hathaway** + +.. |Hongjiang Ye| replace:: **Hongjiang Ye** + +.. |Jeff Stout| replace:: **Jeff Stout** + +.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** + +.. |Qianliang Li| replace:: **Qianliang Li** + +.. |Tod Flak| replace:: **Tod Flak** + +.. |Victoria Peterson| replace:: **Victoria Peterson** + + +Enhancements +~~~~~~~~~~~~ +- Add :func:`mne.read_evokeds_mff` to read averaged MFFs (requires mffpy >= 0.5.7) **by new contributor** |Evan Hathaway|_ (:gh:`8354`) + +- Add :class:`mne.decoding.SSD` for spatial filtering with spatio-spectral-decomposition (:gh:`7070` **by new contributor** |Victoria Peterson|_ and `Denis Engemann`_) + +- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` (:gh:`7717` **by new contributor** |Jonathan Kuziek|_ and `Kyle Mathewson`_) + +- Add options to use labels in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8275` by `Olaf Hauk`_) + +- Update ``surfaces`` argument in :func:`mne.viz.plot_alignment` to allow dict for transparency values, and set default for sEEG data to have transparency (:gh:`8445` by `Keith Doelling`_) + +- Add support for ``mri_fiducials='estimated'`` in :func:`mne.viz.plot_alignment` to allow estimating MRI fiducial locations using :func:`mne.coreg.get_mni_fiducials` (:gh:`8553` by `Eric Larson`_) + +- Update default values in :ref:`mne coreg` and :func:`mne.viz.plot_alignment` for clearer representation of MRI and digitized fiducial points (:gh:`8553` by `Alex Gramfort`_ and `Eric Larson`_) + +- Add ``n_pca_components`` argument to :func:`mne.viz.plot_ica_overlay` (:gh:`8351` by `Eric Larson`_) + +- Add :func:`mne.stc_near_sensors` to facilitate plotting ECoG data (:gh:`8190` by `Eric Larson`_) + +- Add ``proj`` argument to :func:`mne.make_fixed_length_epochs` (:gh:`8351` by `Eric Larson`_) + +- Add :func:`mne.preprocessing.realign_raw` to realign simultaneous raw recordings in the presence of clock drift (:gh:`8539` by `Eric Larson`_) + +- Reduce memory usage of volume source spaces (:gh:`8379` by `Eric Larson`_) + +- Speed up heavy use of :meth:`mne.SourceMorph.apply` for volumetric source spaces by use of the method :meth:`mne.SourceMorph.compute_vol_morph_mat` (:gh:`8366` by `Eric Larson`_) + +- Add support for non-uniform ``zooms`` (e.g., when using a surrogate MRI via :func:`mne.scale_mri`) in volumetric morphing (:gh:`8642` by `Eric Larson`_) + +- In :func:`mne.compute_source_morph` ``zooms`` are no longer required to match the spacing of ``src_to``, which is useful to ensure the morphing is accurate when the ``src_to`` spacing is large (e.g., 1 cm) (:gh:`8642` by `Eric Larson`_) + +- Add volumetric source space support to :func:`mne.labels_to_stc` (:gh:`8447` by `Eric Larson`_) + +- Speed up :class:`mne.decoding.TimeDelayingRidge` with edge correction using Numba (:gh:`8323` by `Eric Larson`_) + +- Add :meth:`mne.Epochs.reset_drop_log_selection` to facilitate writing epochs with many ignored entries in their drop log (:gh:`8449` by `Eric Larson`_) + +- Add sEEG source visualization using :func:`mne.stc_near_sensors` and sEEG working tutorial (:gh:`8402` by `Eric Larson`_ and `Adam Li`_) + +- Add :meth:`mne.channels.DigMontage.get_positions`, which will return a dictionary of channel positions, coordinate frame and fiducial locations (:gh:`8460` by `Adam Li`_) + +- Add support for writing digitization points in a coordinate frame other than head in :meth:`mne.channels.DigMontage.save` (:gh:`8532` by `Eric Larson`_) + +- Add ``picks`` parameter to :func:`mne.preprocessing.fix_stim_artifact` to specify which channel needs to be fixed (:gh:`8482` by `Alex Gramfort`_) + +- Add progress bar support to :func:`mne.time_frequency.csd_morlet` (:gh:`8608` by `Eric Larson`_) + +- Further improved documentation building instructions and execution on Windows (:gh:`8502` by `Evgenii Kalenkovich`_ and `Eric Larson`_) + +- Add option to disable TQDM entirely with ``MNE_TQDM='off'`` (:gh:`8515` by `Eric Larson`_) + +- Add option ``on_header_missing`` to :func:`mne.channels.read_polhemus_fastscan` (:gh:`8622` by `Eric Larson`_) + +- Add option ``window`` to ``mne.time_frequency.psd_welch`` and related functions (:gh:`8862` by `Eric Larson`_) + +- `mne.preprocessing.ICA.plot_sources` now displays an `mne.preprocessing.ICA.plot_properties` window when right-clicking on component names on the y-axis (:gh:`8381` by `Daniel McCloy`_) + +- :func:`mne.io.read_raw_edf`, :func:`mne.io.read_raw_bdf`, and :func:`mne.io.read_raw_gdf` now detect and handle invalid highpass/lowpass filter settings (:gh:`8584` by `Clemens Brunner`_) + +- If a ``baseline`` tuple containing one or two ``None`` values – e.g. ``(None, 0)``, ``(0, None)``, or ``(None, None)`` – is passed to `~mne.Epochs` or `~mne.Epochs.apply_baseline`, the ``None`` value(s) will be replaced with the actual time (i.e., :attr:`~mne.Epochs.tmin` and :attr:`~mne.Epochs.tmax`, respectively) when populating ``Epochs.baseline`` (:gh:`8442` by `Richard Höchenberger`_) + +- `~mne.Epochs` will now retain the information about an applied baseline correction, even if the baseline period is partially or completely removed through cropping later on (:gh:`8442` by `Richard Höchenberger`_) + +- Add :func:`mne.source_space.compute_distance_to_sensors` to compute distances between vertices and sensors (:gh:`8534` by `Olaf Hauk`_ and `Marijn van Vliet`_) + +- Annotations can now be shown/hidden interactively in raw plots (:gh:`8624` by `Daniel McCloy`_) + +- Added argument ``colors`` to `mne.grow_labels` (:gh:`8519` by `Olaf Hauk`_) + +- Added `mne.SourceEstimate.apply_baseline` method for baseline-correction of source estimates (:gh:`8452` by `Olaf Hauk`_) + +- New `mne.viz.Brain.set_time` method to set the displayed time in seconds (:gh:`8415` by `Daniel McCloy`_) + +- Update the ``backend`` parameter of :func:`mne.viz.plot_source_estimates` to integrate ``pyvista`` (:gh:`8395` by `Guillaume Favelier`_) + +- Add ``group_by`` parameter to `mne.viz.plot_epochs` and `mne.Epochs.plot` to allow displaying channel data by sensor position (:gh:`8381` by `Daniel McCloy`_) + +Bugs +~~~~ +- Fix orthogonalization of power envelopes in ``mne.connectivity.envelope_correlation`` (:gh:`8658` **by new contributor** |Qianliang Li|_ and `Eric Larson`_) + +- Fix data overwrite of cascading simulation operations :`mne.simulation.simulate_raw` (:gh:`8633` **by new contributor** |Jeff Stout|_) + +- Fix a transpose issue of :func:`mne.decoding.CSP.plot_filters` (:gh:`8580` **by new contributor** |Hongjiang Ye|_) + +- Fix :func:`mne.io.read_raw_curry` to deal with Curry datasets that have channels that are listed in the labels file, but which are absent from the saved data file (e.g. 'Ref' channel). Also now populates info['meas_date'] if possible (:gh:`8400` **by new contributor** |Tod Flak|_) + +- Fix bug with mne.io.egi.tests/test_egi.py where it mandatorily downloaded testing data when it was not necessary (:gh:`8474` **by new contributor** |Aniket Pradhan|_) + +- Fix bug with reading split files that have dashes in the filename (:gh:`8339` **by new contributor** |Eduard Ort|_) + +- Fix bug with parsing EDF dates and date integers (:gh:`8558` **by new contributor** |Austin Hurst|_ and `Eric Larson`_) + +- Fix bug with reading EDF and KIT files on big endian architectures such as s390x (:gh:`8618` by `Eric Larson`_) + +- Fix bug with :func:`mne.beamformer.make_dics` where the ``rank`` parameter was not properly handled (:gh:`8594` by `Marijn van Vliet`_ and `Eric Larson`_) + +- Fix bug with :func:`mne.beamformer.apply_dics` where the whitener was not properly applied (:gh:`8610` by `Eric Larson`_) + +- Fix bug with `~mne.viz.plot_epochs_image` when ``order`` is supplied and multiple conditions are plotted (:gh:`8377` by `Daniel McCloy`_ ) + +- Fix bug with :func:`mne.viz.plot_source_estimates` when using the PyVista backend where singleton time points were not handled properly (:gh:`8285` by `Eric Larson`_) + +- Fix bug when passing ``axes`` to plotting functions, :func:`matplotlib.pyplot.tight_layout` will not be called when the figure was created using a constrained layout (:gh:`8344` by `Eric Larson`_) + +- Fix bug with compensated CTF data when picking channels without preload (:gh:`8318` by `Eric Larson`_) + +- Fix bug with plotting MEG topographies where the wrong extrapolation made was used in ICA (:gh:`8637` by `Eric Larson`_) + +- Fix bug when merging fNIRS channels in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8306` by `Robert Luke`_) + +- Fix bug where events could overflow when writing to FIF (:gh:`8448` by `Eric Larson`_) + +- :func:`mne.io.read_raw_edf` now supports EDF files with invalid recording dates (:gh:`8283` by `Clemens Brunner`_) + +- Fix bug with :func:`mne.io.Raw.save` when using ``split_naming='bids'`` where non-split files would still be named ``name_split-01_meg.fif`` instead of the requested ``name_meg.fif`` (:gh:`8464` by `Alex Gramfort`_ and `Eric Larson`_) + +- Fix bug with :class:`mne.preprocessing.ICA` where ``n_pca_components`` as a :class:`python:float` would give the number of components that explained less than or equal to the given variance. It now gives greater than the given number for better usability and consistency with :class:`sklearn.decomposition.PCA`. Generally this will mean that one more component will be included (:gh:`8326` by `Eric Larson`_) + +- Fix bug with :class:`mne.preprocessing.ICA` where projections were not tracked properly (:gh:`8343` by `Eric Larson`_) + +- Fix bug where extrapolation points created artifacts in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8425` by `Mikołaj Magnuski`_) + +- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` where full-rank data were not handled properly (:gh:`8326` by `Eric Larson`_) + +- Fix bug with :ref:`somato-dataset` where the BEM was not included (:gh:`8317` by `Eric Larson`_) + +- Fix bug with coordinate frames when performing volumetric morphs via :func:`mne.compute_source_morph` and :meth:`mne.SourceMorph.apply` that could lead to ~5 mm bias (:gh:`8642` by `Eric Larson`_) + +- Fix bug with volumetric rendering alpha in :meth:`mne.VolSourceEstimate.plot_3d` and related functions (:gh:`8663` by `Eric Larson`_) + +- Fix missing documentation of :func:`mne.io.read_raw_nihon` in :ref:`tut-imorting-eeg-data` (:gh:`8320` by `Adam Li`_) + +- Fix bug with :func:`mne.add_reference_channels` when :func:`mne.io.Raw.reorder_channels` or related methods are used afterward (:gh:`8303`, :gh:`#8484` by `Eric Larson`_) + +- Fix bug where the ``verbose`` arguments to :meth:`mne.Evoked.apply_baseline` and :meth:`mne.Epochs.apply_baseline` were not keyword-only (:gh:`8349` by `Eric Larson`_) + +- ``ICA.max_pca_components`` will not be altered by calling `~mne.preprocessing.ICA.fit` anymore. Instead, the new attribute ``ICA.max_pca_components_`` will be set (:gh:`8321` by `Richard Höchenberger`_) + +- Fix bug that `~mne.viz.plot_ica_overlay` would sometimes not create red traces (:gh:`8341` by `Richard Höchenberger`_) + +- Fix bug with :class:`~mne.preprocessing.ICA` where ``n_components=None, n_pca_components=None`` could lead to unstable unmixing matrix inversion by making ``n_components=None`` also use the lesser of ``n_components=0.999999`` and ``n_components=n_pca_components`` (:gh:`8351` by `Eric Larson`_) + +- The ``ica.n_pca_components`` property is no longer be updated during :meth:`mne.preprocessing.ICA.fit`, instead ``ica.n_components_`` will be added to the instance (:gh:`8351` by `Eric Larson`_) + +- Pass ``rank`` everyhwere in forward preparation for source imaging. This bug affected sparse solvers when using maxfilter data (:gh:`8368` by `Alex Gramfort`_) + +- Fix bug in :func:`mne.viz.plot_alignment` where ECoG and sEEG channels were not plotted and fNIRS channels were always plotted in the head coordinate frame (:gh:`8393` by `Eric Larson`_) + +- Fix bug in :func:`mne.set_bipolar_reference` where ``ch_info`` could contain invalid channel information keys (:gh:`8416` by `Eric Larson`_) + +- When reading BrainVision raw data, the channel units and types were sometimes not inferred correctly (:gh:`8434` by `Richard Höchenberger`_) + +- Attempting to remove baseline correction from preloaded `~mne.Epochs` will now raise an exception (:gh:`8435` by `Richard Höchenberger`_) + +- :meth:`mne.Report.parse_folder` will now correctly handle split FIFF files (:gh:`8486`, :gh:`8491` by `Richard Höchenberger`_) + +- Fix bug where BrainVision channel names, event types, and event descriptions containing commas were incorrectly parsed (:gh:`8492` by `Stefan Appelhoff`_) + +- Fix bug in :func:`mne.preprocessing.compute_fine_calibration` where the magnetometer calibration coefficients were computed incorrectly (:gh:`8522` by `Eric Larson`_) + +- Fix bug in :func:`mne.io.read_raw_eeglab` where empty event durations led to an error (:gh:`8384` by `Mikołaj Magnuski`_) + +- Fix inset sensor plots to always use equal aspect (:gh:`8545` by `Daniel McCloy`_) + +- Fix bug in `mne.viz.plot_compare_evokeds` where evokeds with identical ``comment`` attributes would not plot properly if passed as a list (:gh:`8590` by `Daniel McCloy`_) + +- Fix bug in ``mne.time_frequency.psd_welch`` and related functions where the window default errantly changed from ``'hamming'`` to ``('tukey', 0.25)`` (:gh:`8862` by `Eric Larson`_) + +- Fix bug in :func:`mne.io.read_raw_kit` where scale factors for EEG channels could be set to zero (:gh:`8542` by `Eric Larson`_) + +- Fix reading GDF files with excluded channels in :func:`mne.io.read_raw_gdf` (:gh:`8520` by `Clemens Brunner`_) + +- Fix automatic selection of extrapolation mask type from channel type when plotting field maps (:gh:`8589` by `Daniel McCloy`_) + +- Fix bug in :func:`mne.viz.set_3d_title` where 3D plot could have multiple titles that overlap (:gh:`8564` by `Guillaume Favelier`_) + +- Fix bug in :func:`mne.viz.set_3d_view` where plotter is not updated properly causing camera issues in the doc (:gh:`8564` by `Guillaume Favelier`_) + +- :func:`mne.preprocessing.find_ecg_events` didn't take the ``tstart`` parameter value into account when calculating the average heart rate (:gh:`8605` by `Richard Höchenberger`_) + +API changes +~~~~~~~~~~~ + +- Minimum required versions were increased for core dependencies NumPy (1.15.4), SciPy (1.1.0), and Matplotlib (3.0) and for the optional dependencies scikit-learn (0.20.2) and pandas (0.23.4) (:gh:`8374` by `Eric Larson`_) + +- The parameter ``on_split_missing`` has been added to :func:`mne.io.read_raw_fif` and its default will change from ``'warn'`` to ``'raise'`` in 0.23, by (:gh:`8357` `Eric Larson`_) + +- The ``max_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_components`` during initialization and ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` instead (:gh:`8351` by `Eric Larson`_) + +- The ``n_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` (:gh:`8356` by `Eric Larson`_) + +- The ``trans`` argument of :func:`mne.extract_label_time_course` is deprecated and will be removed in 0.23 as it is no longer necessary (:gh:`8389` by `Eric Larson`_) + +- Parameter ``event_colors`` in `mne.viz.plot_epochs` and `mne.Epochs.plot` is deprecated, replaced by ``event_color`` which is consistent with `mne.viz.plot_raw` and provides greater flexibility (:gh:`8381` by `Daniel McCloy`_) + +Authors +~~~~~~~ + +People who contributed to this release in alphabetical order +(people with a + are first time contributors): + +* Adam Li +* Alexandre Gramfort +* Aniket Pradhan + +* Austin Hurst + +* Christian Brodbeck +* Clemens Brunner +* Daniel McCloy +* Denis A. Engemann +* Eduard Ort + +* Eric Larson +* Evan Hathaway + +* Evgenii Kalenkovich +* Fede Raimondo +* Guillaume Favelier +* Hongjiang Ye + +* Jean-Rémi King +* Jeff Stout + +* Jonathan Kuziek + +* Jussi Nurminen +* Justus Schwabedal +* Keith Doelling +* Kyle Mathewson +* Mads Jensen +* Mainak Jas +* Marijn van Vliet +* Mikolaj Magnuski +* Olaf Hauk +* Qianliang Li + +* Richard Höchenberger +* Robert Luke +* Stefan Appelhoff +* Thomas Hartmann +* Tod Flak + +* Victoria Peterson + diff --git a/doc/changes/v0.23.rst b/doc/changes/v0.23.rst new file mode 100644 index 00000000000..a005feab4ec --- /dev/null +++ b/doc/changes/v0.23.rst @@ -0,0 +1,388 @@ +.. _changes_0_23_0: + +Version 0.23.0 (2021-04-27) +--------------------------- + +.. |Jack Zhang| replace:: **Jack Zhang** + +.. |Sumalyo Datta| replace:: **Sumalyo Datta** + +.. |Anna Padee| replace:: **Anna Padee** + +.. |Richard Koehler| replace:: **Richard Koehler** + +.. |Zhi Zhang| replace:: **Zhi Zhang** + +.. |Rotem Falach| replace:: **Rotem Falach** + +.. |Andres Rodriguez| replace:: **Andres Rodriguez** + +.. |Matt Sanderson| replace:: **Matt Sanderson** + +.. |Enrico Varano| replace:: **Enrico Varano** + +.. |Dominik Welke| replace:: **Dominik Welke** + +.. |Judy D Zhu| replace:: **Judy D Zhu** + +.. |Valerii Chirkov| replace:: **Valerii Chirkov** + +.. |Matteo Anelli| replace:: **Matteo Anelli** + +.. |Apoorva Karekal| replace:: **Apoorva Karekal** + +.. |Cora Kim| replace:: **Cora Kim** + +.. |Silvia Cotroneo| replace:: **Silvia Cotroneo** + +.. |Ram Pari| replace:: **Ram Pari** + +.. |Erica Peterson| replace:: **Erica Peterson** + +.. |Maggie Clarke| replace:: **Maggie Clarke** + +.. |Felix Klotzsche| replace:: **Felix Klotzsche** + +Enhancements +~~~~~~~~~~~~ +- Add support for exporting to EEGLAB's set format with :mod:`eeglabio` with new methods :meth:`mne.io.Raw.export` and :meth:`mne.Epochs.export`. (:gh:`9192` **by new contributor** |Jack Zhang|_) + +- :func:`mne.viz.plot_evoked_topo` now uses a Minimum/Maximum of peaks for each channel type to calculate ylim when input param is None (:gh:`9285` **by new contributor** |Ram Pari|_) + +- Add exclude parameter to :func:`mne.viz.plot_evoked_topo` (:gh:`9278` **by new contributor** |Ram Pari|_) + +- Add :meth:`mne.time_frequency.EpochsTFR.to_data_frame` and :meth:`mne.time_frequency.AverageTFR.to_data_frame` methods (:gh:`9124` **by new contributor** |Felix Klotzsche|_) + +- Add ``selection`` and ``drop_log`` to :class:`mne.time_frequency.EpochsTFR` (:gh:`9124` **by new contributor** |Felix Klotzsche|_) + +- Add CSV, TSV, and XYZ support to :func:`mne.channels.read_custom_montage` (:gh:`9203` **by new contributor** |Jack Zhang|_) + +- Add HTML representation for `~mne.Epochs` in Jupyter Notebooks (:gh:`9174` **by new contributor** |Valerii Chirkov|_) + +- Speed up :func:`mne.viz.plot_ica_properties` by refactoring (:gh:`9174` **by new contributor** |Valerii Chirkov|_) + +- Add ``apply_function`` method to epochs and evoked objects (:gh:`9088` **by new contributor** |Erica Peterson|_ and `Victoria Peterson`_) + +- New tutorial for function :func:`mne.make_fixed_length_epochs` (:gh:`9156` **by new contributor** |Erica Peterson|_) + +- Add different colors for each volume source space in :func:`mne.viz.plot_alignment` (:gh:`9043` **by new contributor** |Valerii Chirkov|_) + +- Add ``overlap`` parameter to :func:`mne.make_fixed_length_epochs` to allow creating overlapping fixed length epochs (:gh:`9096` **by new contributor** |Silvia Cotroneo|_) + +- Add :meth:`mne.Dipole.to_mni` for more convenient dipole.pos to MNI conversion (:gh:`9043` **by new contributor** |Valerii Chirkov|_) + +- Update citations in maxwell.py (:gh:`9043` **by new contributor** |Valerii Chirkov|_) + +- New Tutorial for analyzing frequency-tagging data (:gh:`8867` **by new contributor** |Dominik Welke|_ and `Evgenii Kalenkovich`_) + +- Add dbs as new channel type for deep brain stimulation (DBS) recordings (:gh:`8739` **by new contributor** |Richard Koehler|_) + +- Add some preprocessing functions to the EEGLAB migration guide (:gh:`9169` **by new contributor** |Apoorva Karekal|_) + +- Add :func:`mne.chpi.extract_chpi_locs_kit` to read cHPI coil locations from KIT/Yokogawa data (:gh:`8813` **by new contributor** |Matt Sanderson|_, `Robert Seymour`_, and `Eric Larson`_) + +- Add ``match_alias`` parameter to :meth:`mne.io.Raw.set_montage` and related functions to match unrecognized channel location names to known aliases (:gh:`8799` **by new contributor** |Zhi Zhang|_) + +- Update the ``notebook`` 3d backend to use ``ipyvtk_simple`` for a better integration within ``Jupyter`` (:gh:`8503` by `Guillaume Favelier`_) + +- Remove the 15-character limitation for channel names when writing to FIF format. If you need the old 15-character names, you can use something like ``raw.rename_channels({n: n[:13] for n in raw.ch_names}, allow_duplicates=True)``, by `Eric Larson`_ (:gh:`8346`) + +- Add channel-specific annotation support to :class:`mne.Annotations` via ``ch_names`` parameter (:gh:`8896` by `Eric Larson`_) + +- Add toggle-all button to :class:`mne.Report` HTML and ``width`` argument to ``mne.Report.add_bem_to_section`` (:gh:`8723` by `Eric Larson`_) + +- Add infant template MRI dataset downloader :func:`mne.datasets.fetch_infant_template` (:gh:`8738` by `Eric Larson`_ and `Christian O'Reilly`_) + +- Add digitizer information to :func:`mne.io.read_raw_egi` (:gh:`8789` by `Christian Brodbeck`_) + +- Add support for reading some incomplete raw FIF files in :func:`mne.io.read_raw_fif` (:gh:`9268` by `Eric Larson`_) + +- Allow reading digitization from files other than ``*.fif`` in the coregistration GUI (:gh:`8790` by `Christian Brodbeck`_) + +- Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) + +- Reduce memory consumption of `mne.io.Raw` and speed up epoching when thousands of events are present for `mne.Epochs` (:gh:`8801` by `Eric Larson`_) + +- Speed up ``import mne`` by reducing function creation overhead (:gh:`8829` by `Eric Larson`_) + +- `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) + +- `mne.Report` has gained the new methods `~mne.Report.add_custom_js` and `~mne.Report.add_custom_css` for adding user-defined JavaScript and styles (:gh:`8762`, :gh:`9037` by `Richard Höchenberger`_) + +- Add option to control appearance of opaque inside surface of the head to :ref:`mne coreg` (:gh:`8793` by `Eric Larson`_) + +- Add option to disable projection using ``--projoff`` in :ref:`mne browse_raw` (:gh:`9262` by `Eric Larson`_) + +- Add keypress to toggle projection using ``shift+j`` in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` (:gh:`9262` by `Eric Larson`_) + +- Add support for non-FIF files in :ref:`mne browse_raw` using :func:`mne.io.read_raw` (:gh:`8806` by `Eric Larson`_) + +- Add :func:`mne.io.read_raw_nedf` for reading StarStim / enobio NEDF files (:gh:`8734` by `Tristan Stenner`_) + +- Add :meth:`raw.describe() ` to display (or return) descriptive statistics for each channel (:gh:`8760` by `Clemens Brunner`_) + +- Add :meth:`annotations.to_data_frame() ` to return annotations as a pandas dataframe (:gh:`8783` by `Robert Luke`_) + +- Add :func:`mne.preprocessing.compute_maxwell_basis` to compute the SSS basis function (:gh:`8822` by `Eric Larson`_) + +- Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) + +- Add warning to :func:`mne.cov.compute_whitener` when an explicit ``rank`` parameter leads to a large increase in condition number (:gh:`8805` by `Eric Larson`_) + +- Add parameter ``align=True`` to `mne.viz.Brain.show_view` to make views relative to the closest canonical (MNI) axes rather than the native MRI surface RAS coordinates (:gh:`8794` by `Eric Larson`_) + +- Add ``auto_close`` to ``mne.Report.add_figs_to_section`` and ``mne.Report.add_slider_to_section`` to manage closing figures (:gh:`8730` by `Guillaume Favelier`_) + +- Add :func:`mne.write_head_bem` to support writing head surface files (:gh:`8841` by `Yu-Han Luo`_) + +- The signal of ``resp`` (respiratory) channels is now assumed to be in the unit Volt (:gh:`8858` by `Richard Höchenberger`_) + +- Static type checkers like Pylance (comes with VS Code) now display the parameters of many more functions correctly, largely improving overall usability for VS Code users (:gh:`8862` by `Richard Höchenberger`_) + +- Support new EEGLAB file format (:gh:`8874` by `Clemens Brunner`_) + +- Reading and writing FIFF files whose filenames end with ``_meg.fif.gz``, ``_eeg.fif(.gz)``, and ``_ieeg.fif(.gz)`` doesn't emit a warning anymore; this improves interobaility with BIDS-formatted datasets (:gh:`8868` by `Richard Höchenberger`_) + +- On macOS, we now set the environment variable ``QT_MAC_WANTS_LAYER`` to ``"1"`` if it hasn't been set explicitly by the user, in order to ensure that `~mne.SourceEstimate` plots work on macOS 11 with older versions of Qt and PyQt (:gh:`8959` by `Richard Höchenberger`_) + +- :func:`mne.time_frequency.EpochsTFR.average` now allows different ways of averaging, such as "median", or callable functions (:gh:`8879` by `Adam Li`_) + +- `~mne.Epochs` metadata can now be generated automatically from events using `mne.epochs.make_metadata` (:gh:`8834` by `Richard Höchenberger`_) + +- Interactions with sliders in `mne.Report` will now continuously update the linked content (it was updated only on mouse button release before) (:gh:`9023` by `Richard Höchenberger`_) + +- `mne.viz.plot_drop_log` and :meth:`mne.Epochs.plot_drop_log` now omit displaying the subject name in the title if ``subject=None`` is passed (:gh:`9015` by `Richard Höchenberger`_) + +- Plot ECoG tutorial now uses a real epilepsy seizure dataset and visualizes the seizure onset (:gh:`9087` by `Eric Larson`_, `Adam Li`_, `Alex Rockhill`_ and `Liberty Hamilton`_) + +- Improve documentation of Report-Class (:gh:`9113` by `Martin Schulz`_) + +- Add :func:`mne.channels.DigMontage.add_estimated_fiducials` which will add LPA, RPA and Nasion fiducial points to the ``DigMontage`` object in ``mri`` coordinate frame (:gh:`9118` by `Adam Li`_) + +- :func:`mne.io.anonymize_info` now anonymizes also sex and hand fields when ``keep_his`` is ``False`` (:gh:`9103`, :gh:`9175` **by new contributor** |Rotem Falach|_ and `Richard Höchenberger`_) + +- Add parameter ``theme`` to :class:`mne.viz.Brain` for optional Dark-Mode (:gh:`9149` by `Martin Schulz`_, `Guillaume Favelier`_) + +- Add first_samp support for raw simulations with `mne.simulation.simulate_raw` and `mne.simulation.SourceSimulator` (:gh:`9166` by `Steven Bierer`_) + +- `~mne.Evoked` gained a ``baseline`` attribute that is automatically assembled based on the baseline of the averaged `~mne.Epochs` (:gh:`9210` by `Richard Höchenberger`_) + +- Add ``units`` parameter to :meth:`mne.io.Raw.get_data` to return data in the desired unit (:gh:`9136` by `Johann Benerradi`_ and `Stefan Appelhoff`_) + +- Add :func:`mne.preprocessing.equalize_bads` to interpolate bad channels in a list of `~mne.Evoked`, `~mne.Epochs` or `~mne.io.Raw` having different sets of bad channels (:gh:`9241` by `Alex Gramfort`_) + +- :meth:`mne.Epochs.equalize_event_counts` can now be called without providing a list of event names, and will equalize the counts of **all** event types present in the `~mne.Epochs` (:gh:`9261` by `Richard Höchenberger`_) + +- :func:`mne.preprocessing.find_eog_events` and :func:`mne.preprocessing.create_eog_epochs` now accept a list of channel names, allowing you to specify multiple EOG channels at once (:gh:`9269` by `Richard Höchenberger`_) + +- Improve performance of :func:`mne.set_bipolar_reference` (:gh:`9270` by `Martin Schulz`_) + +- Add support for setting montages on fNIRS data, with built in standard montages for Artinis OctaMon and Artinis Brite23 devices (:gh:`9141` by `Johann Benerradi`_, `Robert Luke`_ and `Eric Larson`_) + +- Enable support for reading SNIRF files with 2D optode positions (:gh:`9347` `Robert Luke`_) + + +Bugs +~~~~ +- Fix bug with :func:`mne.time_frequency.tfr` where plot misbehaved. Plot now always returns a list of figures instead of a single figure handle (:gh:`8013` **by new contributor** `Eduard Ort`_) + +- Fix bug with :func:`mne.viz.plot_evoked_topo` where set ylim parameters gets swapped across channel types. (:gh:`9207` **by new contributor** |Ram Pari|_) + +- Fix bug with :func:`mne.io.read_raw_edf` where µV was not correctly recognized (:gh:`9187` **by new contributor** |Sumalyo Datta|_) + +- Fix bug with :func:`mne.viz.plot_compare_evokeds` did not check type of combine. (:gh:`9151` **by new contributor** |Matteo Anelli|_) + +- Fix bug with :func:`mne.viz.plot_evoked_topo` where ``ylim`` was only being applied to the first channel in the dataset (:gh:`9162` **by new contributor** |Ram Pari|_ ) + +- Fix bug with :func:`mne.Epochs.plot_image` allowing interactive zoom to work properly (:gh:`9152` by **by new contributor** |Maggie Clarke|_ and `Daniel McCloy`_) + +- Fix bug with :func:`mne.Epochs.plot_image` where the ``x_label`` was different depending on the evoked parameter (:gh:`9115` **by new contributor** |Matteo Anelli|_) + +- Fix bug with restricting :func:`mne.io.Raw.save` saving options to .fif and .fif.gz extensions (:gh:`9062` **by new contributor** |Valerii Chirkov|_) + +- Fix bug with :func:`mne.io.read_raw_kit` where missing marker coils were not handled (:gh:`8989` **by new contributor** |Judy D Zhu|_) + +- Fix bug with ``mne.connectivity.spectral_connectivity`` where time axis in Epochs data object was dropped. (:gh:`8839` **by new contributor** |Anna Padee|_) + +- Fix bug with `mne.io.Raw.resample` to allow passing ``stim_picks='misc'`` (:gh:`8844` **by new contributor** |Enrico Varano|_ and `Eric Larson`_) + +- Fix bugs with `mne.io.read_raw_persyst` where multiple ``Comments`` with the same name are allowed, and ``Comments`` with a "," character are now allowed (:gh:`8311` and :gh:`8806` **by new contributor** |Andres Rodriguez|_ and `Adam Li`_) + +- Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) + +- Fix bug with :func:`mne.viz.snapshot_brain_montage` where the positions were incorrect (:gh:`8983` by `Eric Larson`_) + +- Fix bug with :func:`mne.preprocessing.maxwell_filter` where the eSSS basis had to exactly match the good channels instead of being a superset (:gh:`8675` by `Eric Larson`_) + +- Fix bug with ``mne.Report.add_bem_to_section`` where ``n_jobs != 1`` would cause ``n_jobs`` subsets of MRI images in some orientations to be flipped (:gh:`8713` by `Eric Larson`_) + +- Fix bug with :meth:`raw.plot() ` where annotations didn't immediately appear when changing window duration (:gh:`8689` by `Daniel McCloy`_) + +- Fix bug with :meth:`raw.plot() ` where ``scalings='auto'`` did not compute scalings using the full range of data (:gh:`8806` by `Eric Larson`_) + +- Fix bug with :meth:`raw.plot() ` where setting a ``lowpass`` could lead to non-data-channels not plotting (:gh:`8954` by `Eric Larson`_) + +- Fix bug with :meth:`mne.io.Raw.load_data` and :meth:`mne.Epochs.drop_bad` where ``verbose`` logging was not handled properly (:gh:`8884` by `Eric Larson`_) + +- Fix bug with :func:`mne.io.read_raw_nicolet` where header type values such as num_sample and duration_in_sec where not parsed properly (:gh:`8712` by `Alex Gramfort`_) + +- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` when reading decompositions using PCA dimensionality reduction (:gh:`8780` by `Alex Gramfort`_ and `Eric Larson`_) + +- Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where ``depth`` was errantly restricted to be less than or equal to 1. (:gh:`8804` by `Eric Larson`_) + +- Fix bug with :func:`mne.stats.permutation_cluster_1samp_test` and related clustering functions when ``adjacency=None`` and ``out_type='indices'`` (:gh:`#8842` by `Eric Larson`_) + +- Fix bug with :func:`mne.viz.plot_alignment` where plotting a sphere model could ignore the ``brain`` argument (:gh:`8857` by `Eric Larson`_) + +- Fix bug with :func:`mne.SourceEstimate.plot` where flatmaps were not positioned properly when using ``hemi='both'`` (:gh:`9315` by `Eric Larson`_) + +- Fix bug with :meth:`mne.Annotations.save` where files could be overwritten accidentally, it can now be controlled via the ``overwrite`` argument (:gh:`8896` by `Eric Larson`_) + +- Fix bug with ``replace`` argument of ``mne.Report.add_bem_to_section`` and ``mne.Report.add_slider_to_section`` (:gh:`8723` by `Eric Larson`_) + +- Fix bug with :func:`mne.chpi.compute_chpi_locs` where all cHPI coils being off would lead to an empty array of the wrong dimensionality (:gh:`8956` by `Eric Larson`_) + +- Fix bug with :func:`mne.extract_label_time_course` where labels, STCs, and the source space were not checked for compatible ``subject`` attributes (:gh:`9284` by `Eric Larson`_) + +- Fix bug with :func:`mne.grow_labels` where ``overlap=False`` could run forever or raise an error (:gh:`9317` by `Eric Larson`_) + +- Fix compatibility bugs with ``mne_realtime`` (:gh:`8845` by `Eric Larson`_) + +- Fix bug with `mne.viz.Brain` where non-inflated surfaces had an X-offset imposed by default (:gh:`8794` by `Eric Larson`_) + +- Fix bug with :ref:`mne coreg` where nasion values were not updated when clicking (:gh:`8793` by `Eric Larson`_) + +- Fix bug with matplotlib-based 3D plotting where ``Axes3D`` were not properly initialized in :func:`mne.viz.plot_source_estimates` (:gh:`8811` by `Chris Bailey`_) + +- Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) + +- Fix bug where hidden annotations could be deleted interactively in :meth:`mne.io.Raw.plot` windows (:gh:`8831` by `Daniel McCloy`_) + +- Function :func:`mne.set_bipolar_reference` was not working when passing ``Epochs`` constructed with some ``picks`` (:gh:`8728` by `Alex Gramfort`_) + +- Fix anonymization issue of FIF files after IO round trip (:gh:`8731` by `Alex Gramfort`_) + +- Fix bug in `mne.preprocessing.ICA.plot_sources` where right-clicking component names could yield `~mne.preprocessing.ICA.plot_properties` windows for the wrong component if ``picks`` had been specified (:gh:`8996` by `Daniel McCloy`_) + +- Fix title not shown in :func:`mne.viz.plot_montage` (:gh:`8752` by `Clemens Brunner`_) + +- `mne.io.read_raw_egi` now correctly handles `pathlib.Path` filenames (:gh:`8759` by `Richard Höchenberger`_) + +- `mne.viz.plot_evoked` and `mne.Evoked.plot` now correctly plot global field power (GFP) for EEG data when ``gfp=True`` or ``gfp='only'`` is passed (used to plot RMS). For MEG data, we continue to plot the RMS, but now label it correctly as such (:gh:`8775` by `Richard Höchenberger`_) + +- Fix bug with :ref:`mne make_scalp_surfaces` where ``--overwrite`` was not functional (:gh:`8800` by `Yu-Han Luo`_) + +- Fix bug with :func:`mne.viz.plot_topomap` when plotting gradiometers with a missing channel in a pair (:gh:`8817` by `Alex Gramfort`_) + +- :meth:`epochs.crop() ` now also adjusts the ``reject_tmin`` and ``reject_tmax`` attributes if necessary (:gh:`8821` by `Richard Höchenberger`_) + +- When creating `~mne.Epochs`, we now ensure that ``reject_tmin`` and ``reject_tmax`` cannot fall outside of the epochs' time interval anymore (:gh:`8821` by `Richard Höchenberger`_) + +- `~mne.io.read_raw_bti` erroneously treated response channels as respiratory channels (:gh:`8856` by `Richard Höchenberger`_) + +- The RMS trace shown in the time viewer of `~mne.SourceEstimate` plots is now correctly labeled as ``RMS`` (was ``GFP`` before) (:gh:`8965` by `Richard Höchenberger`_) + +- Fix bug with :meth:`mne.SourceEstimate.plot` and related functions where the scalars were not interactively updated properly (:gh:`8985` by `Eric Larson`_) + +- Fix bug with mne.channels.find_ch_adjacency() returning wrong adjacency for Neuromag122-Data (:gh:`8891` by `Martin Schulz`_) + +- Fix :func:`mne.read_dipole` yielding :class:`mne.Dipole` objects that could not be indexed (:gh:`8963` by `Marijn van Vliet`_) + +- Fix bug when setting n_jobs > 1 in :meth:`mne.Report.parse_folder` (:gh:`9109` by `Martin Schulz`_) + +- Fix bug with :meth:`mne.Evoked.plot_image` where an incorrect clim parameter did not raise any error (:gh:`9115` **by new contributor** |Matteo Anelli|_) + +- Fix bug with ``mne.io.Raw.pick`` where incorrect fnirs types were returned (:gh:`9178` by `Robert Luke`_) + +- Fix bug when passing both axes and picks to `mne.viz.plot_compare_evokeds` (:gh:`9252` by `Daniel McCloy`_) + +- Improved string representation of `~mne.Epochs` containing multiple event types; improved (and more mathematically correct) ``evoked.comment`` in the `mne.combine_evoked` output; and better (and often more concise) legend labels in the figures created via `~mne.viz.plot_compare_evokeds` (:gh:`9027` by `Richard Höchenberger`_) + +- :func:`mne.preprocessing.find_ecg_events` now correctly handles situation where no ECG activity could be detected, and correctly returns an empty array of ECG events (:gh:`9236` by `Richard Höchenberger`_) + +- Fix bug with ``picks`` attribute for `~mne.Epochs` after calling :meth:`mne.Epochs.add_channels` (:gh:`9246` by `Alex Gramfort`_) + +- Fix bug where ``backend='notebook'`` could not be used in :meth:`mne.SourceEstimate.plot` (:gh:`9305` by `Jean-Rémi King`_) + +- `mne.preprocessing.compute_proj_eog` and `mne.preprocessing.compute_proj_ecg` now return empty lists if no EOG or ECG events, respectively, could be found. Previously, we'd return ``None`` in these situations, which does not match the documented behavior of returning a list of projectors (:gh:`9277` by `Richard Höchenberger`_) + +API changes +~~~~~~~~~~~ +- Introduced new ``'auto'`` settings for ``ICA.max_iter``. The old default ``max_iter=200`` will be removed in MNE-Python 0.24 (:gh:`9099` **by new contributor** |Cora Kim|_) + +- ``mne.viz.plot_sensors_connectivity`` now allows setting the colorbar label via the ``cbar_label`` parameter (:gh:`9248` by `Daniel McCloy`_) + +- ``mne.read_selection`` has been deprecated in favor of `mne.read_vectorview_selection`. ``mne.read_selection`` will be removed in MNE-Python 0.24 (:gh:`8870` by `Richard Höchenberger`_) + +- ``mne.beamformer.tf_dics`` has been deprecated and will be removed in MNE-Python 0.24 (:gh:`9122` by `Britta Westner`_) + +- Fitting `~mne.preprocessing.ICA` on baseline-corrected `~mne.Epochs`, and / or applying it on baseline-corrected `~mne.Epochs` or `~mne.Evoked` data will now display a warning. Users are advised to only baseline correct their data after cleaning is completed (:gh:`9033` by `Richard Höchenberger`_) + +- Supplying multiple channel names to `mne.preprocessing.find_eog_events` or `mne.preprocessing.compute_proj_eog` as a string of comma-separated channel names has been deprecated; please pass a list of channel names instead. Support for comma-separated strings will be removed in MNE-Python 0.24 (:gh:`9269` by `Richard Höchenberger`_) + +- The default in :func:`mne.beamformer.make_dics` of ``real_filter=False`` will change to ``real_filter=True`` in 0.24 (:gh:`9340` by `Britta Westner`_) + +Authors +~~~~~~~ + +People who contributed to this release in alphabetical order +(people with a + are first time contributors): + +* Adam Li +* Alex Rockhill +* Alexandre Gramfort +* Ana Radanovic +* Andres Rodriguez+ +* Anna Padee+ +* Apoorva Karekal+ +* Britta Westner +* Catalina Magalvan +* Christian Brodbeck +* Christian Mista +* Christian O'Reilly +* Christina Zhao +* Christopher J. Bailey +* Clemens Brunner +* Cora Kim+ +* Daniel McCloy +* Denis A. Engemann +* Dominik Welke+ +* Eduard Ort +* Enrico Varano+ +* Eric Larson +* Erica Peterson+ +* Evgenii Kalenkovich +* Felix Klotzsche+ +* Giorgio Marinato +* Guillaume Favelier +* Jack Zhang+ +* Jean-Rémi King +* Johann Benerradi +* Joris Van den Bossche +* Judy D Zhu+ +* Liberty Hamilton +* Luke Bloy +* Maggie Clarke+ +* Mainak Jas +* Manorama Kadwani +* Marijn van Vliet +* Martin Schulz +* Matt Sanderson+ +* Matteo Anelli+ +* Nicolas Gensollen +* Ram Pari+ +* Richard Höchenberger +* Richard Koehler+ +* Robert Luke +* Rotem Falach+ +* Sebastien Treguer +* Silvia Cotroneo+ +* Stefan Appelhoff +* Steven Bierer +* Sumalyo Datta+ +* Timon Merk +* Tristan Stenner +* Valerii Chirkov+ +* Victoria Peterson +* Yu-Han Luo +* Zhi Zhang+ diff --git a/doc/changes/v0.24.rst b/doc/changes/v0.24.rst new file mode 100644 index 00000000000..f36191d8af1 --- /dev/null +++ b/doc/changes/v0.24.rst @@ -0,0 +1,411 @@ +.. _changes_0_24_0: + +Version 0.24.0 (2021-11-03) +--------------------------- + +.. |David Julien| replace:: **David Julien** + +.. |Dominique Makowski| replace:: **Dominique Makowski** + +.. |Romain Derollepot| replace:: **Romain Derollepot** + +.. |Jan Sosulski| replace:: **Jan Sosulski** + +.. |Xiaokai Xia| replace:: **Xiaokai Xia** + +.. |Marian Dovgialo| replace:: **Marian Dovgialo** + +.. |Pierre-Antoine Bannier| replace:: **Pierre-Antoine Bannier** + +.. |Darin Erat Sleiter| replace:: **Darin Erat Sleiter** + +.. |Mathieu Scheltienne| replace:: **Mathieu Scheltienne** + +.. |Timothy Gates| replace:: **Timothy Gates** + +.. |Reza Shoorangiz| replace:: **Reza Shoorangiz** + +.. |Evgeny Goldstein| replace:: **Evgeny Goldstein** + +.. |Joshua Teves| replace:: **Joshua Teves** + +.. |Riessarius Stargardsky| replace:: **Riessarius Stargardsky** + +.. |John Samuelsson| replace:: **John Samuelsson** + +Enhancements +~~~~~~~~~~~~ +.. - Add something cool (:gh:`9192` **by new contributor** |New Contributor|_) + +- Add ``pooch`` to system information reports (:gh:`9801` **by new contributor** |Joshua Teves|_) + +- Get annotation descriptions from the name field of SNIRF stimulus groups when reading SNIRF files via `mne.io.read_raw_snirf` (:gh:`9575` **by new contributor** |Darin Erat Sleiter|_) + +- Add support for NIRSport and NIRSport2 devices to `mne.io.read_raw_nirx` (:gh:`9348` and :gh:`9401` **by new contributor** |David Julien|_, **new contributor** |Romain Derollepot|_, `Robert Luke`_, and `Eric Larson`_) + +- New function :func:`mne.Dipole.to_volume_labels` to get atlas label for MRI coordinates. (:gh:`9376` by **new contributor** |Marian Dovgialo|_) + +- Add support for SURE parameter selection in :func:`mne.inverse_sparse.mixed_norm` and make ``alpha`` parameter now default to ``'sure'`` (:gh:`9430` by **new contributor** |Pierre-Antoine Bannier|_ and `Alex Gramfort`_) + +- Speed up BCD solver in :func:`mne.inverse_sparse.mixed_norm` by adding Anderson acceleration (:gh:`9481` by **new contributor** |Pierre-Antoine Bannier|_ and `Alex Gramfort`_) + +- Add support for list of channel types for EEG/sEEG/ECoG/DBS referencing (:gh:`9637` **by new contributor** |Mathieu Scheltienne|_) + +- Add the ability to set point size and line width in :func:`mne.viz.plot_sensors` and related functions (:gH:`9872` **by new contributor** |Dominique Makowski|_) + +- Add support for changing the default event id used by :func:`mne.make_fixed_length_epochs` (:gh:`9782` **by new contributor** |Mathieu Scheltienne|_) + +- Improve documentation for arguments ``threshold``, ``method`` and ``measure`` of ICA methods `~mne.preprocessing.ICA.find_bads_ecg`, `~mne.preprocessing.ICA.find_bads_ref` and `~mne.preprocessing.ICA.find_bads_eog` (:gh:`9858` **by new contributor** |Mathieu Scheltienne|_) + +- Add support for more than 3 source estimates in :func:`mne.viz.plot_sparse_source_estimates` (:gh:`9640` **by new contributor** |Pierre-Antoine Bannier|_ and `Alex Gramfort`_) + +- Add cortical signal suppression (CSS) :func:`mne.preprocessing.cortical_signal_suppression` (:gh:`6888`, :gh:`9892` **by new contributor** |John Samuelsson|_ and `Eric Larson`_) + +- Show all good channel types and counts when printing a :class:`mne.Info` in the notebook (:gh:`9725` by `Valerii Chirkov`_ and `Eric Larson`_) + +- Speed up point decimation in :func:`mne.io.read_raw_kit` by vectorization and use of :class:`scipy.spatial.cKDTree` (:gh:`9568` by `Jean-Rémi King`_ and `Eric Larson`_) + +- Add ability to export EDF+ files using :func:`mne.export.export_raw` (:gh:`9643` by `Adam Li`_) + +- New function :func:`mne.chpi.get_chpi_info` to retrieve basic information about the cHPI system used when recording MEG data (:gh:`9369` by `Richard Höchenberger`_) + +- New namespace `mne.export` created to contain functions (such as `mne.export.export_raw` and `mne.export.export_epochs`) for exporting data to non-FIF formats (:gh:`9427` by `Eric Larson`_) + +- Add support for Hitachi fNIRS devices in `mne.io.read_raw_hitachi` (:gh:`9391` by `Eric Larson`_) + +- Add support for ``picks`` in :func:`mne.stc_near_sensors` (:gh:`9396` by `Eric Larson`_) + +- Add label methods `mne.Label.compute_area` and `mne.Label.distances_to_outside` (:gh:`9424` by `Eric Larson`_) + +- Add projections when printing a :class:`mne.Info` in the notebook (:gh:`9403` by `Alex Gramfort`_) + +- Add new function :func:`mne.bem.make_scalp_surfaces` to compute scalp surfaces for coregistration (:gh:`9463` by `Alex Gramfort`_) + +- Add support for interpolating oxy and deoxyhaemoglobin data types (:gh:`9431` by `Robert Luke`_) + +- Add support for SNIRF files in :class:`mne.Report` (:gh:`9443` by `Robert Luke`_) + +- Add support for exporting MFF evoked files using `mne.export.export_evokeds` and `mne.export.export_evokeds_mff` (:gh:`9406` by `Evan Hathaway`_) + +- :func:`mne.concatenate_raws`, :func:`mne.concatenate_epochs`, and :func:`mne.write_evokeds` gained a new parameter ``on_mismatch``, which controls behavior in case not all of the supplied instances share the same device-to-head transformation (:gh:`9438` by `Richard Höchenberger`_) + +- Add support for multiple datablocks (acquisitions with pauses) in :func:`mne.io.read_raw_nihon` (:gh:`9437` by `Federico Raimondo`_) + +- Add new function :func:`mne.preprocessing.annotate_break` to automatically detect and mark "break" periods without any marked experimental events in the continuous data (:gh:`9445` by `Richard Höchenberger`_) + +- Add "time_format" to :meth:`mne.io.Raw.plot` and :meth:`mne.preprocessing.ICA.plot_sources` to display acquisition time on x-axis (:gh:`9419` by `Martin Schulz`_) + +- Speed up :meth:`mne.decoding.TimeDelayingRidge.predict` by switching to FFT-based convolution (:gh:`9458` by `Ross Maddox`_) + +- Show multiple colors and linestyles for excluded components with :class:`mne.Evoked` in :meth:`mne.preprocessing.ICA.plot_sources` (:gh:`9444` by `Martin Schulz`_) + +- Add functions for aligning MRI and CT data `mne.transforms.compute_volume_registration` and `mne.transforms.apply_volume_registration` (:gh:`9503` by `Alex Rockhill`_ and `Eric Larson`_) + +- Add tutorial for how to processes image (CT and MR) files in order to localize electrode contacts for intracranial recordings :ref:`tut-ieeg-localize` (:gh:`9484` by `Alex Rockhill`_) + +- Add vertex number in source space for :meth:`mne.VolSourceEstimate.plot` and :func:`mne.viz.plot_volume_source_estimates` (:gh:`9697` by `Eric Larson`_) + +- Add support for colormap normalization in :func:`mne.viz.plot_topomap` (:gh:`9468` by `Clemens Brunner`_) + +- Update :func:`mne.preprocessing.realign_raw` with Numpy-recommended polynomial fitting method (:gh:`9514` by `Erica Peterson`_) + +- Add a warning to `mne.preprocessing.nirs.beer_lambert_law` when source-detector distances are zero (:gh:`9541` by `Eric Larson`_) + +- Add ``exclude`` parameter to :meth:`mne.io.Raw.plot_psd` and :meth:`mne.Epochs.plot_psd` (:gh:`9519` by `Clemens Brunner`_) + +- Add `mne.Annotations.rename` to rename annotation descriptions (:gh:`9525` by `Robert Luke`_) + +- Add `mne.Annotations.set_durations` to set annotation durations (:gh:`9529` by `Robert Luke`_) + +- The ``exclude`` parameter in `mne.io.read_raw_edf`, `mne.io.read_raw_bdf`, and `mne.io.read_raw_gdf` now also accepts a regular expression (:gh:`9558` by `Clemens Brunner`_) + +- Add :meth:`mne.Evoked.get_data` method to :class:`mne.Evoked` (:gh:`9555` by `Stefan Appelhoff`_) + +- The :meth:`mne.Epochs.get_data` method now has a ``units`` parameter (:gh:`9553` by `Stefan Appelhoff`_) + +- Add ``mne.warp_montage_volume`` to use an anatomical volume with channel locations at high or low insensities to apply a symmetric diffeomorphic registration to a template (e.g. fsaverage) space (:gh:`9544` by `Alex Rockhill`_) + +- :meth:`mne.io.Raw.get_data`, :meth:`mne.Epochs.get_data`, and :meth:`mne.Evoked.get_data` methods now have ``tmin`` and ``tmax`` parameters to specify data window in seconds to get (:gh:`9556` by `Stefan Appelhoff`_) + +- The :meth:`mne.Evoked.get_data` method now has a ``units`` parameter (:gh:`9578` by `Stefan Appelhoff`_) + +- Add `mne.chpi.compute_chpi_snr` and `mne.viz.plot_chpi_snr` for computing and plotting the time-varying SNR of continuously-active HPI coils (:gh:`9570` by `Daniel McCloy`_ and `Jussi Nurminen`_) + +- Add :func:`mne.get_montage_volume_labels` to find the regions of interest in a Freesurfer atlas anatomical segmentation for an intracranial electrode montage and :func:`mne.viz.plot_channel_labels_circle` to plot them (:gh:`9545` by `Alex Rockhill`_) + +- Add :func:`mne.viz.Brain.add_volume_labels` to plot subcortical surfaces and other regions of interest (:gh:`9540` by `Alex Rockhill`_ and `Eric Larson`_) + +- Add custom cortex curvature colors in :class:`mne.viz.Brain` via the ``cortex`` argument (:gh:`9750` by `Eric Larson`_) + +- Add :meth:`mne.channels.DigMontage.apply_trans` to apply a transform directly to a montage (:gh:`9601` by `Alex Rockhill`_) + +- :meth:`mne.preprocessing.ICA.fit` now emits a warning if any of the ``start``, ``stop``, ``reject``, and ``flat`` parameters are passed when performing ICA on `~mne.Epochs`. These parameters only have an effect on `~mne.io.Raw` data and were previously silently ignored in the case of `~mne.Epochs` (:gh:`9605` by `Richard Höchenberger`_) + +- :func:`mne.viz.plot_alignment` now emits a warning (instead of aborting with an exception) if the surfaces contain topological defects (:gh:`9614` by `Richard Höchenberger`_) + +- The coregistration GUI can now display the head surface even if there are topological defects. Previously, a low-resolution standard head unsuitable for individualized coregistration was displayed (:gh:`9614` by `Richard Höchenberger`_) + +- New :class:`mne.coreg.Coregistration` object to allow coregistration scripting without a dependency on mayavi (:gh:`9516` by `Guillaume Favelier`_). + +- Add :meth:`mne.viz.Brain.add_head` to plot the head surface (:gh:`9618` by `Alex Rockhill`_) + +- Add :func:`mne.channels.read_dig_localite` to read Localite electrode location files (:gh:`9658` by `Clemens Brunner`_) + +- Add :meth:`mne.viz.Brain.add_sensors` to plot sensor locations (:gh:`9585` by `Alex Rockhill`_) + +- Add :func:`mne.coreg.estimate_head_mri_t` to estimate the head->mri transform from fsaverage fiducials (:gh:`9585` by `Alex Rockhill`_) + +- Add :meth:`mne.channels.DigMontage.add_mni_fiducials` to add fiducials to a montage in MNI coordinates (:gh:`9682` by `Alex Rockhill`_) + +- Add :meth:`mne.channels.DigMontage.remove_fiducials` to remove fiducials under rare circumstances when the coordinate frame should not be set to "head" (:gh:`9684` by `Alex Rockhill`_) + +- Add remove methods for mesh objects in :class:`mne.viz.Brain` (:gh:`9688` by `Alex Rockhill`_) + +- Add ``--show-paths`` and ``--developer`` options to :ref:`mne sys_info` (:gh:`9789` by `Eric Larson`_) + +- Add ability to export EDF+ files using :func:`mne.export.export_raw` (:gh:`9643` by `Adam Li`_) + +- Reading EDF files via :func:`mne.io.read_raw_edf` now can infer channel type from the signal label in the EDF header (:gh:`9694` by `Adam Li`_) + +- Add ``mne.gui.locate_ieeg`` to locate intracranial electrode contacts from a CT, an MRI (with Freesurfer ``recon-all``) and the channel names from an :class:`mne.Info` object (:gh:`9586` by `Alex Rockhill`_) + +- All data fetching code now relies on ``pooch``, which is an added optional requirement for dataset fetchers (:gh:`9742` by `Adam Li`_ and `Daniel McCloy`_) + +- Add phantom FreeSurfer subject fetcher :func:`mne.datasets.fetch_phantom` to allow fetching the Otaniemi phantom (:gh:`9796` by `Eric Larson`_) + +- Add :func:`mne.datasets.fetch_dataset` to allow packages to fetch datasets in a similar fashion to MNE (:gh:`9763` by `Adam Li`_) + +- Add full ECoG dataset to MNE-misc-data and demonstrate its use in :ref:`ex-electrode-pos-2d` and :ref:`tut-ieeg-localize` (:gh:`9784` by `Alex Rockhill`_) + +- Add options ``tol`` and ``accuracy`` to :func:`mne.fit_dipole` to control optimization (:gh:`9810` by `Eric Larson`_) + +- Completely revamp the `~mne.Report` experience: new HTML layout, many new methods, more flexibility; the functionality is demonstrated in :ref:`tut-report` (:gh:`9754`, :gh:`9828`, :gh:`9847`, :gh:`9860`, :gh:`9861`, :gh:`9862`, :gh:`9863`, :gh:`9871`, :gh:`9881`, :gh:`9891`, :gh:`9901` by `Richard Höchenberger`_, `Eric Larson`_, and `Alex Gramfort`_) + +- Add basic HTML representations of `~mne.Forward` and `~mne.minimum_norm.InverseOperator` instances for a nicer Jupyter experience (:gh:`9754` by `Richard Höchenberger`_) + +- `mne.io.read_raw_brainvision` now handles ASCII data with comma-separated values, as may be exported from BrainVision Analyzer (:gh:`9795` by `Richard Höchenberger`_) + +- Add :func:`mne.preprocessing.ieeg.project_sensors_onto_brain` to project ECoG sensors onto the pial surface to compensate for brain shift (:gh:`9800` by `Alex Rockhill`_) + +- Add support for data acquired with NIRx devices using Aurora software version 2021.9 (:gh:`9800` by `Robert Luke`_, and `Eric Larson`_) + +- All functions for reading and writing files should now automatically handle ``~`` (the tilde character) and expand it to the user's home directory. Should you come across any function that doesn't do it, please do let us know! (:gh:`9613`, :gh:`9845` by `Richard Höchenberger`_) + +- All functions accepting a FreeSurfer subjects directory via a ``subjects_dir`` parameter can now consume :class:`pathlib.Path` objects too (used to be only strings) (:gh:`9613` by `Richard Höchenberger`_) + +- Add support for colormap normalization in :meth:`mne.time_frequency.AverageTFR.plot` (:gh:`9851` by `Clemens Brunner`_) + +- Add support for BIDS-compatible filenames when splitting big epochs files via the new ``split_naming`` parameter in :meth:`mne.Epochs.save` (:gh:`9869` by `Denis Engemann`_) + +- Add ``by_event_type`` parameter to :meth:`mne.Epochs.average` to create a list containing an :class:`mne.Evoked` object for each event type (:gh:`9859` by `Marijn van Vliet`_) + +- Add pyqtgraph as a new backend for :meth:`mne.io.Raw.plot` (:gh:`9687` by `Martin Schulz`_) + +- Add :func:`mne.viz.set_browser_backend`, :func:`mne.viz.use_browser_backend` and :func:`mne.viz.get_browser_backend` to set matplotlib or pyqtgraph as backend for :meth:`mne.io.Raw.plot` (:gh:`9687` by `Martin Schulz`_) + +Bugs +~~~~ +- Fix bug in :meth:`mne.io.Raw.pick` and related functions when parameter list contains channels which are not in info instance (:gh:`9708` **by new contributor** |Evgeny Goldstein|_) + +- Fix a few typos (:gh:`9706` **by new contributor** |Timothy Gates|_) + +- Fix bug with :meth:`mne.Epochs.crop` and :meth:`mne.Evoked.crop` when ``include_tmax=False``, where the last sample was always cut off, even when ``tmax > epo.times[-1]`` (:gh:`9378` **by new contributor** |Jan Sosulski|_) + +- Fix bug with `mne.io.read_raw_curry` to allow reading Curry 7 and 8 event files with '.ceo' and '.cdt.ceo' extensions (:gh:`9381`, :gh:`9712` by **new contributor** |Xiaokai Xia|_, `Daniel McCloy`_, and **by new contributor** |Reza Shoorangiz|_) + +- Fix bug with `mne.compute_rank`, `mne.cov.compute_whitener` and `mne.preprocessing.ICA.fit` when explicit picks including bad channels are provided (:gh:`9719` **by new contributor** |Mathieu Scheltienne|_) + +- Fix bug with :func:`mne.concatenate_epochs` where the concatenated events could end up in non-chronological order. (:gh:`9765` **by new contributor** |Jan Sosulski|_) + +- Fix bug where :func:`mne.time_frequency.psd_array_welch` and :func:`mne.time_frequency.psd_array_multitaper` failed to handle negative integer for ``n_jobs`` argument (:gh:`9849` **by new contributor** |Riessarius Stargardsky|_) + +- Fix bug where resampling returns empty instances (:gh:`9897` **by new contributor** |Mathieu Scheltienne|_ and `Eric Larson`_) + +- Fix bug with `mne.io.Raw.get_montage` now returning both good and bad channels in the montage. (:gh:`9920` **by new contributor** |Mathieu Scheltienne|_) + +- Fix bug with :func:`mne.io.read_raw_nihon` where latin-1 annotations could not be read (:gh:`9384` by `Alex Gramfort`_) + +- Fix bug when printing a :class:`mne.io.RawArray` in the notebook (:gh:`9404` by `Alex Gramfort`_) + +- Fix bug when computing rank from info for SSS data with only gradiometers or magnetometers (:gh:`9435` by `Alex Gramfort`_) + +- Fix bug with `mne.preprocessing.find_bad_channels_maxwell` where all-flat segments could lead to an error (:gh:`9531` by `Eric Larson`_) + +- Fix bug with `mne.io.Raw.set_montage` and related functions where the channel coordinate frame was not properly set to head (:gh:`9447` by `Eric Larson`_) + +- Fix bug with `mne.io.read_raw_fieldtrip` and `mne.read_epochs_fieldtrip` where channel positions were not set properly (:gh:`9447` by `Eric Larson`_) + +- Fix bug with :func:`mne.io.read_raw_kit` where omitting HPI coils could lead to an :exc:`python:AssertionError` on reading (:gh:`9612` by `Eric Larson`_) + +- Fix bug with :func:`mne.io.read_raw_edf` where reading only stimulus channels would lead to an error (:gh:`9831` by `Eric Larson`_) + +- Fix bug with `mne.preprocessing.nirs.optical_density` where protection against zero values was not guaranteed (:gh:`9522` by `Eric Larson`_) + +- :func:`mne.concatenate_raws` now raises an exception if ``raw.info['dev_head_t']`` differs between files. This behavior can be controlled using the new ``on_mismatch`` parameter (:gh:`9438` by `Richard Höchenberger`_) + +- Fixed bug in :meth:`mne.Epochs.drop_bad` where subsequent rejections failed if they only specified thresholds for a subset of the channel types used in a previous rejection (:gh:`9485` by `Richard Höchenberger`_). + +- Fix bug with `mne.simulation.simulate_evoked`, `mne.apply_forward`, and `mne.apply_forward_raw` where systems with EEG channels that come before MEG channels would have them mixed up in the output evoked or raw object (:gh:`#9513` by `Eric Larson`_) + +- In :func:`mne.viz.plot_ica_scores` and :meth:`mne.preprocessing.ICA.plot_scores`, the figure and axis titles no longer overlap when plotting only a single EOG or ECG channel (:gh:`9489` by `Richard Höchenberger`_). + +- Ensure `mne.io.Raw.get_montage` works with SNIRF data (:gh:`9524` by `Robert Luke`_) + +- Fix bug in :func:`mne.setup_volume_source_space` where non-finite positions could be used in a discrete source space (:gh:`9603` by `Eric Larson`_) + +- Fix bug in :func:`mne.viz.plot_topomap` (and related methods like :meth:`mne.Evoked.plot_topomap`) where large distances between electrodes (higher than head radius) would lead to an error (:gh:`9528` by `Mikołaj Magnuski`_). + +- Fix bug in `mne.viz.plot_topomap` (and related methods) where passing ``axes`` that are part of a matplotlib figure that uses a constrained layout would emit warnings (:gh:`9558` by `Eric Larson`_) + +- Fix bug in :func:`mne.make_forward_solution` where impossible sensor-BEM geometries were not checked (:gh:`9893` by `Eric Larson`_) + +- Fix bug in :func:`mne.concatenate_epochs` when concatenating :class:`mne.Epochs` objects with 0 events (:gh:`9535` by `Marijn van Vliet`_) + +- Fix bug in :func:`mne.viz.Brain.screenshot` where the RGBA mode was not supported (:gh:`9564` by `Guillaume Favelier`_). + +- Fix bug in :func:`mne.io.read_raw_egi` where reading data from a data segment that is part of an acquisition skip would lead to an error (:gh:`9565` by `Eric Larson`_) + +- Prevent :meth:`mne.io.Raw.plot` windows from spawning at sizes too small to properly render (:gh:`9629` by `Daniel McCloy`_) + +- Fix bug where :meth:`mne.io.Raw.plot` raised an error when plotting raw dipole data (:gh:`9771` by `Eric Larson`_) + +- Fix bug in ::meth:`mne.preprocessing.ICA.find_bads_ecg` where passing ``start`` and ``stop`` lead to erroneous data windows depending on the combination of Raw, Epochs, Evoked, and the type (int, float, None) of ``start`` and ``stop`` (:gh:`9556` by `Stefan Appelhoff`_) + +- Fix bug in :func:`mne.viz.set_3d_backend` and :func:`mne.viz.get_3d_backend` where the PyVistaQt-based backend was ambiguously named ``'pyvista'`` instead of ``'pyvistaqt'``; use ``set_3d_backend('pyvistaqt')`` and expect ``'pyvistaqt'`` as the output of :func:`mne.viz.get_3d_backend` instead of ``'pyvista'``, and consider using ``get_3d_backend().startswith('pyvista')`` for example for backward-compatible conditionals (:gh:`9607` by `Guillaume Favelier`_) + +- Fix bug in :func:`mne.viz.plot_compare_evokeds` where confidence bands were not drawn if only one condition was plotted (:gh:`9663` by `Daniel McCloy`_) + +- Fix bug where setting of a montage with fNIRS data got set to "unknown" coordinate frame when it should have been in "head" (:gh:`9630` by `Alex Rockhill`_) + +- Fix bug where "seeg", "ecog", "dbs" and "fnirs" data had coordinate frame unknown upon loading from a file when it should have been in "head" (:gh:`9580` by `Alex Rockhill`_) + +- Raise error when no ``trans`` is provided to :func:`mne.viz.plot_alignment` when required instead of assuming identity head->mri transform (:gh:`9585` by `Alex Rockhill`_) + +- Fix bug where :meth:`mne.Epochs.equalize_event_counts` failed when only one good epoch existed for one of the event types (:gh:`9758` by `Daniel McCloy`_) + +- Fix bug where channels with a dollar sign ($) were not being labeled "misc" in :func:`mne.io.read_raw_nihon` (:gh:`9695` by `Adam Li`_) + +- Fix bug where :func:`mne.io.read_raw_persyst` was lower-casing events it found in the ``.lay`` file (:gh:`9746` by `Adam Li`_) + +- Fix bug with Qhull when plotting OPM sensors in :func:`mne.viz.plot_alignment` (:gh:`9799` and :gh:`9804` by `Eric Larson`_) + +- Fix bug where :func:`mne.io.read_raw_snirf` was including the landmark index as a spatial coordinate (:gh:`9777` by `Robert luke`_) + +- Fix bug where `mne.Annotations` were not appending channel names when being added together (:gh:`9780` by `Adam Li`_) + +- `mne.viz.plot_evoked_joint` now correctly handles a ``times='peaks'`` parameter value (:gh:`9754` by `Richard Höchenberger`_) + +- In `mne.viz.plot_compare_evokeds`, slightly adjust the x-axis limit calculation to avoid drawing an axis that is too short (:gh:`9754` by `Richard Höchenberger`_) + +- Fix a bug with :func:`mne.viz.snapshot_brain_montage` with the Mayavi backend where electrodes were shown in the wrong positions, and with the PyVista backend where the snapshot always contained the electrodes but could be missing a hemisphere (:gh:`9933` by `Eric Larson`_) + +- In :meth:`mne.io.Raw.plot_psd` and :meth:`mne.Epochs.plot_psd`, avoid overlap between subplot titles and x-axis tick labels (:gh:`9864` by `Richard Höchenberger`_) + +- In :meth:`mne.Epochs.plot_psd_topomap`, the data is now scaled to match the output of :meth:`mne.Epochs.plot_psd` (:gh:`9873` by `Richard Höchenberger`_) + +- Fix channel locations with ``NaN`` values causing all channel locations not to be plotted in :func:`mne.viz.Brain.add_sensors` (:gh:`9911` by `Alex Rockhill`_) + +- Fix infinite loop bug in :func:`mne.get_montage_volume_labels` (:gh:`9940` by `Alex Rockhill`_) + +API changes +~~~~~~~~~~~ +- The :class:`mne.Info` class is now more strictly checked when attributes are set. Setting most attributes directly will lead to an error in the next release. See the docstring of :class:`mne.Info` for more information (:gh:`9867` **by new contributor** |Mathieu Scheltienne|_) + +- The Mayavi 3D backend has been deprecated (:gh:`9904` by `Eric Larson`_): + + - In 3D plotting functions, use PyVista-based backends (``'pyvistaqt'`` and ``'notebook'``) as PySurfer/Mayavi will be removed in the next version + + - To use the :ref:`mne kit2fiff` command-line GUI for fiducial markers, install the separate ``mne-kit-gui`` module, which can still be launched via ``$ mne kit2fiff`` + + - The ``mne.gui.fiducials`` GUI has been deprecated, use :ref:`mne coreg` or ``mne_kit_gui.fiducials()`` to set MRI fiducials instead + +- In `mne.compute_source_morph`, the ``niter_affine`` and ``niter_sdr`` parameters have been replaced by ``niter`` and ``pipeline`` parameters for more consistent and finer-grained control of registration/warping steps and iteration (:gh:`9505` by `Alex Rockhill`_ and `Eric Larson`_) + +- Split :func:`mne.viz.Brain.show_view` argument ``view`` into ``azimuth``, ``elevation`` and ``focalpoint`` for clearer view setting and make the default for ``row`` and ``col`` apply to all rows and columns (:gh:`9596` by `Alex Rockhill`_) + +- Deprecate ``solver='prox'`` in :func:`mne.inverse_sparse.mixed_norm` in favor of ``solver='cd'`` and ``solver='bcd'`` as coordinate descent solvers consistently outperform proximal gradient descent (:gh:`9608` by `Pierre-Antoine Bannier`_) + +- All ``mne.connectivity`` functions have moved to the ``mne-connectivity`` package; they are deprecated in MNE-Python and will be removed in version 0.25 (:gh:`9493` by `Adam Li`_). + +- :func:`mne.inverse_sparse.mixed_norm` now simply warns when source estimates contain no dipole, e.g. if data are too noisy and alpha is based on SURE (:gh:`9685` by `Alex Gramfort`_) + +- Deprecate functions :samp:`mne.datasets.{DATASET_NAME}.has_{DATASET_NAME}_data()` for these datasets: ``epilepsy_ecog``, ``fnirs_motor``, ``multimodal``, ``opm``, ``phantom_4dbti``, ``refmeg_noise``, ``sample``, ``somato``, and ``ssvep``. Use the generic :func:`mne.datasets.has_dataset` instead (:gh:`9781` by `Daniel McCloy`_ and `Adam Li`_) + +- :class:`mne.Report` modernization has led to multiple deprecations (:gh:`9754` by `Richard Höchenberger`_): + + - ``mne.Report.add_figs_to_section`` and ``mne.Report.add_slider_to_section`` have been deprecated in favor of :meth:`mne.Report.add_figure` + + - ``mne.Report.add_images_to_section`` has been deprecated in favor of :meth:`mne.Report.add_image` + + - ``mne.Report.add_bem_to_section`` has been deprecated in favor of :meth:`mne.Report.add_bem` + + - ``mne.Report.add_htmls_to_section`` has been deprecated in favor of :meth:`mne.Report.add_html` + + - In :meth:`mne.Report.parse_folder`, the ``sort_sections`` parameter has been deprecated in favor of ``sort_content`` + + - In :meth:`mne.Report.remove`, the ``caption`` and ``section`` parameters have been deprecated in favor of ``title`` and ``tags``, respectively + + - The ``mne.Report.sections`` attribute has been deprecated in favor of ``mne.Report.tags`` + + - The ``mne.Report.fnames`` attribute has been deprecated without replacement + +- :meth:`mne.Epochs.plot` and :func:`mne.viz.plot_epochs` gained parameter ``show_scalebars`` analogous to :meth:`mne.viz.plot_raw` (:gh:`9815` by `Daniel McCloy`_) + +- The output folder name for HF_SEF datasets is now ``hf_sef`` instead of ``HF_SEF`` (:gh:`9763` by `Adam Li`_) + +- Deprecate ``mne.viz.utils.center_cmap`` (:gh:`9851` by `Clemens Brunner`_) + +- The default partial pathlength factor of :func:`mne.preprocessing.nirs.beer_lambert_law` will change from 0.1 in 0.24 to 6.0 in the next release (:gh:`9843` by `Robert Luke`_) + +- ``mne.preprocessing.ICA.detect_artifacts`` has been deprecated. Please use `~mne.preprocessing.ICA.find_bads_eog` and `~mne.preprocessing.ICA.find_bads_ecg` instead (:gh:`9909` by `Richard Höchenberger`_) + +Authors +~~~~~~~ +People who contributed to this release in alphabetical order +(people with a + are first time contributors): + +* Adam Li +* Alex Rockhill +* Alexandre Gramfort +* Britta Westner +* Clemens Brunner +* Daniel McCloy +* Darin Erat Sleiter+ +* David Julien+ +* Denis A. Engemann +* Dominique Makowski+ +* Eric Larson +* Erica Peterson +* Evan Hathaway +* Evgeny Goldstein+ +* Fede Raimondo +* Gansheng Tan+ +* Guillaume Favelier +* Hubert Banville +* Jan Sosulski+ +* Jean-Rémi King +* Jeff Stout +* Johann Benerradi +* John Samuelsson+ +* Josh Koen+ +* Joshua Teves+ +* Lukas Hecker+ +* Luke Bloy +* Marian Dovgialo+ +* Marijn van Vliet +* Martin Schulz +* Mathieu Scheltienne+ +* Mikołaj Magnuski +* Pierre-Antoine Bannier+ +* Reza Shoorangiz+ +* Richard Höchenberger +* Riessarius Stargardsky+ +* Robert Luke +* Romain Derollepot+ +* Ross Maddox +* Ryan Law+ +* Stefan Appelhoff +* Steve Matindi+ +* Timothy Gates+ +* Valerii Chirkov +* Xiaokai Xia+ diff --git a/doc/changes/0.3.inc b/doc/changes/v0.3.rst similarity index 100% rename from doc/changes/0.3.inc rename to doc/changes/v0.3.rst diff --git a/doc/changes/0.4.inc b/doc/changes/v0.4.rst similarity index 100% rename from doc/changes/0.4.inc rename to doc/changes/v0.4.rst diff --git a/doc/changes/0.5.inc b/doc/changes/v0.5.rst similarity index 100% rename from doc/changes/0.5.inc rename to doc/changes/v0.5.rst diff --git a/doc/changes/0.6.inc b/doc/changes/v0.6.rst similarity index 100% rename from doc/changes/0.6.inc rename to doc/changes/v0.6.rst diff --git a/doc/changes/0.7.inc b/doc/changes/v0.7.rst similarity index 100% rename from doc/changes/0.7.inc rename to doc/changes/v0.7.rst diff --git a/doc/changes/v0.8.rst b/doc/changes/v0.8.rst new file mode 100644 index 00000000000..324bb016123 --- /dev/null +++ b/doc/changes/v0.8.rst @@ -0,0 +1,194 @@ +.. _changes_0_8: + +Version 0.8 (2014-06-25) +------------------------ + +Changelog +~~~~~~~~~ + +- Add Python3 support by `Nick Ward`_, `Alex Gramfort`_, `Denis Engemann`_, and `Eric Larson`_ + +- Add ``get_peak`` method for evoked and stc objects by `Denis Engemann`_ + +- Add ``iter_topography`` function for radically simplified custom sensor topography plotting by `Denis Engemann`_ + +- Add field line interpolation by `Eric Larson`_ + +- Add full provenance tacking for epochs and improve ``drop_log`` by `Tal Linzen`_, `Alex Gramfort`_ and `Denis Engemann`_ + +- Add systematic contains method to ``Raw``, ``Epochs`` and ``Evoked`` for channel type membership testing by `Denis Engemann`_ + +- Add fiff unicode writing and reading support by `Denis Engemann`_ + +- Add 3D MEG/EEG field plotting function and evoked method by `Denis Engemann`_ and `Alex Gramfort`_ + +- Add consistent channel-dropping methods to ``Raw``, ``Epochs`` and ``Evoked`` by `Denis Engemann`_ and `Alex Gramfort`_ + +- Add ``equalize_channnels`` function to set common channels for a list of ``Raw``, ``Epochs``, or ``Evoked`` objects by `Denis Engemann`_ + +- Add ``plot_events`` function to visually display paradigm by `Alex Gramfort`_ + +- Improved connectivity circle plot by `Martin Luessi`_ + +- Add ability to anonymize measurement info by `Eric Larson`_ + +- Add callback to connectivity circle plot to isolate connections to clicked nodes `Roan LaPlante`_ + +- Add ability to add patch information to source spaces by `Eric Larson`_ + +- Add ``split_label`` function to divide labels into multiple parts by `Christian Brodbeck`_ + +- Add ``color`` attribute to ``Label`` objects by `Christian Brodbeck`_ + +- Add ``max`` mode for ``extract_label_time_course`` by `Mads Jensen`_ + +- Add ``rename_channels`` function to change channel names and types in info object by `Dan Wakeman`_ and `Denis Engemann`_ + +- Add ``compute_ems`` function to extract the time course of experimental effects by `Denis Engemann`_, `Sébastien Marti`_ and `Alex Gramfort`_ + +- Add option to expand Labels defined in a source space to the original surface (``Label.fill()``) by `Christian Brodbeck`_ + +- GUIs can be invoked form the command line using ``$ mne coreg`` and ``$ mne kit2fiff`` by `Christian Brodbeck`_ + +- Add ``add_channels_epochs`` function to combine different recordings at the Epochs level by `Christian Brodbeck`_ and `Denis Engemann`_ + +- Add support for EGI Netstation simple binary files by `Denis Engemann`_ + +- Add support for treating arbitrary data (numpy ndarray) as a Raw instance by `Eric Larson`_ + +- Support for parsing the EDF+ annotation channel by `Martin Billinger`_ + +- Add EpochsArray constructor for creating epochs from numpy arrays by `Denis Engemann`_ and `Federico Raimondo`_ + +- Add connector to FieldTrip realtime client by `Mainak Jas`_ + +- Add color and event_id with legend options in plot_events in viz.py by `Cathy Nangini`_ + +- Add ``events_list`` parameter to ``mne.concatenate_raws`` to concatenate events corresponding to runs by `Denis Engemann`_ + +- Add ``read_ch_connectivity`` function to read FieldTrip neighbor template .mat files and obtain sensor adjacency matrices by `Denis Engemann`_ + +- Add display of head in helmet from -trans.fif file to check coregistration quality by `Mainak Jas`_ + +- Add ``raw.add_events`` to allow adding events to a raw file by `Eric Larson`_ + +- Add ``plot_image`` method to Evoked object to display data as images by `Jean-Rémi King`_ and `Alex Gramfort`_ and `Denis Engemann`_ + +- Add BCI demo with CSP on motor imagery by `Martin Billinger`_ + +- New ICA API with unified methods for processing ``Raw``, ``Epochs`` and ``Evoked`` objects by `Denis Engemann`_ + +- Apply ICA at the evoked stage by `Denis Engemann`_ + +- New ICA methods for visualizing unmixing quality, artifact detection and rejection by `Denis Engemann`_ + +- Add ``pick_channels`` and ``drop_channels`` mixin class to pick and drop channels from ``Raw``, ``Epochs``, and ``Evoked`` objects by `Andrew Dykstra`_ and `Denis Engemann`_ + +- Add ``EvokedArray`` class to create an Evoked object from an array by `Andrew Dykstra`_ + +- Add ``plot_bem`` method to visualize BEM contours on MRI anatomical images by `Mainak Jas`_ and `Alex Gramfort`_ + +- Add automated ECG detection using cross-trial phase statistics by `Denis Engemann`_ and `Juergen Dammers`_ + +- Add Forward class to succinctly display gain matrix info by `Andrew Dykstra`_ + +- Add reading and writing of split raw files by `Martin Luessi`_ + +- Add OLS regression function by `Tal Linzen`_, `Teon Brooks`_ and `Denis Engemann`_ + +- Add computation of point spread and cross-talk functions for MNE type solutions by `Alex Gramfort`_ and `Olaf Hauk`_ + +- Add mask parameter to ``plot_evoked_topomap`` and ``evoked.plot_topomap`` by `Denis Engemann`_ and `Alex Gramfort`_ + +- Add infomax and extended infomax ICA by `Denis Engemann`_, `Juergen Dammers`_ and `Lukas Breuer`_ and `Federico Raimondo`_ + +- Aesthetically redesign interpolated topography plots by `Denis Engemann`_ and `Alex Gramfort`_ + +- Simplify sensor space time-frequency analysis API with ``tfr_morlet`` function by `Alex Gramfort`_ and `Denis Engemann`_ + +- Add new somatosensory MEG dataset with nice time-frequency content by `Alex Gramfort`_ + +- Add HDF5 write/read support for SourceEstimates by `Eric Larson`_ + +- Add InverseOperator class to display inverse operator info by `Mainak Jas`_ + +- Add ``$ mne report`` command to generate html reports of MEG/EEG data analysis pipelines by `Mainak Jas`_, `Alex Gramfort`_ and `Denis Engemann`_ + +- Improve ICA verbosity with regard to rank reduction by `Denis Engemann`_ + +BUG +~~~ + +- Fix incorrect ``times`` attribute when stc was computed using ``apply_inverse`` after decimation at epochs stage for certain, arbitrary sample frequencies by `Denis Engemann`_ + +- Fix corner case error for step-down-in-jumps permutation test (when step-down threshold was high enough to include all clusters) by `Eric Larson`_ + +- Fix selection of total number of components via float when picking ICA sources by `Denis Engemann`_ and `Qunxi Dong`_ + +- Fix writing and reading transforms after modification in measurement info by `Denis Engemann`_ and `Martin Luessi`_ and `Eric Larson`_ + +- Fix pre-whitening / rescaling when estimating ICA on multiple channels without covariance by `Denis Engemann`_ + +- Fix ICA pre-whitening, avoid recomputation when applying ICA to new data by `Denis Engemann`_ + +API +~~~ + +- The minimum numpy version has been increased to 1.6 from 1.4. + +- Epochs object now has a selection attribute to track provenance of selected Epochs. The length of the drop_log attribute is now the same as the length of the original events passed to Epochs. In earlier versions it had the length of the events filtered by event_id. Epochs has also now a plot_drop_log method. + +- Deprecate Epochs.drop_picks in favor of a new method called drop_channels + +- Deprecate ``labels_from_parc`` and ``parc_from_labels`` in favor of ``read_labels_from_annot`` and ``write_labels_to_annot`` + +- The default of the new add_dist option of ``setup_source_space`` to add patch information will change from False to True in MNE-Python 0.9 + +- Deprecate ``read_evoked`` and ``write_evoked`` in favor of ``read_evokeds`` and ``write_evokeds``. read_evokeds will return all `Evoked` instances in a file by default. + +- Deprecate ``setno`` in favor of ``condition`` in the initialization of an Evoked instance. This affects ``mne.fiff.Evoked`` and ``read_evokeds``, but not ``read_evoked``. + +- Deprecate ``mne.fiff`` module, use ``mne.io`` instead e.g. ``mne.io.Raw`` instead of ``mne.fiff.Raw``. + +- Pick functions (e.g., ``pick_types``) are now in the mne namespace (e.g. use ``mne.pick_types``). + +- Deprecated ICA methods specific to one container type. Use ICA.fit, ICA.get_sources ICA.apply and ``ICA.plot_*`` for processing Raw, Epochs and Evoked objects. + +- The default smoothing method for ``mne.stc_to_label`` will change in v0.9, and the old method is deprecated. + +- As default, for ICA the maximum number of PCA components equals the number of channels passed. The number of PCA components used to reconstruct the sensor space signals now defaults to the maximum number of PCA components estimated. + +Authors +~~~~~~~ + +The committer list for this release is the following (preceded by number of commits): + +* 418 Denis A. Engemann +* 284 Alexandre Gramfort +* 242 Eric Larson +* 155 Christian Brodbeck +* 144 Mainak Jas +* 49 Martin Billinger +* 49 Andrew Dykstra +* 44 Tal Linzen +* 37 Dan G. Wakeman +* 36 Martin Luessi +* 26 Teon Brooks +* 20 Cathy Nangini +* 15 Hari Bharadwaj +* 15 Roman Goj +* 10 Ross Maddox +* 9 Marmaduke Woodman +* 8 Praveen Sripad +* 8 Tanay +* 8 Roan LaPlante +* 5 Saket Choudhary +* 4 Nick Ward +* 4 Mads Jensen +* 3 Olaf Hauk +* 3 Brad Buran +* 2 Daniel Strohmeier +* 2 Federico Raimondo +* 2 Alan Leggitt +* 1 Jean-Rémi King +* 1 Matti Hämäläinen diff --git a/doc/changes/v0.9.rst b/doc/changes/v0.9.rst new file mode 100644 index 00000000000..5ab19f3a07f --- /dev/null +++ b/doc/changes/v0.9.rst @@ -0,0 +1,239 @@ +.. _changes_0_9: + +Version 0.9 (2015-05-22) +------------------------ + +Changelog +~~~~~~~~~ + +- Add support for mayavi figures in ``add_section`` method in Report by `Mainak Jas`_ + +- Add extract volumes of interest from freesurfer segmentation and setup as volume source space by `Alan Leggitt`_ + +- Add support to combine source spaces of different types by `Alan Leggitt`_ + +- Add support for source estimate for mixed source spaces by `Alan Leggitt`_ + +- Add ``SourceSpaces.save_as_volume`` method by `Alan Leggitt`_ + +- Automatically compute proper box sizes when generating layouts on the fly by `Marijn van Vliet`_ + +- Average evoked topographies across time points by `Denis Engemann`_ + +- Add option to Report class to save images as vector graphics (SVG) by `Denis Engemann`_ + +- Add events count to ``mne.viz.plot_events`` by `Denis Engemann`_ + +- Add support for stereotactic EEG (sEEG) channel type by `Marmaduke Woodman`_ + +- Add support for montage files by `Denis Engemann`_, `Marijn van Vliet`_, `Jona Sassenhagen`_, `Alex Gramfort`_ and `Teon Brooks`_ + +- Add support for spatiotemporal permutation clustering on sensors by `Denis Engemann`_ + +- Add support for multitaper time-frequency analysis by `Hari Bharadwaj`_ + +- Add Stockwell (S) transform for time-frequency representations by `Denis Engemann`_ and `Alex Gramfort`_ + +- Add reading and writing support for time frequency data (AverageTFR objects) by `Denis Engemann`_ + +- Add reading and writing support for digitizer data, and function for adding dig points to info by `Teon Brooks`_ + +- Add ``plot_projs_topomap`` method to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Teon Brooks`_ + +- Add EEG (based on spherical splines) and MEG (based on field interpolation) bad channel interpolation method to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Denis Engemann`_ and `Mainak Jas`_ + +- Add parameter to ``whiten_evoked``, ``compute_whitener`` and ``prepare_noise_cov`` to set the exact rank by `Martin Luessi`_ and `Denis Engemann`_ + +- Add fiff I/O for processing history and MaxFilter info by `Denis Engemann`_ and `Eric Larson`_ + +- Add automated regularization with support for multiple sensor types to ``compute_covariance`` by `Denis Engemann`_ and `Alex Gramfort`_ + +- Add ``Evoked.plot_white`` method to diagnose the quality of the estimated noise covariance and its impact on spatial whitening by `Denis Engemann`_ and `Alex Gramfort`_ + +- Add ``mne.evoked.grand_average`` function to compute grand average of Evoked data while interpolating bad EEG channels if necessary by `Mads Jensen`_ and `Alex Gramfort`_ + +- Improve EEG referencing support and add support for bipolar referencing by `Marijn van Vliet`_ and `Alex Gramfort`_ + +- Enable TFR calculation on Evoked objects by `Eric Larson`_ + +- Add support for combining Evoked datasets with arbitrary weights (e.g., for oddball paradigms) by `Eric Larson`_ and `Alex Gramfort`_ + +- Add support for concatenating a list of Epochs objects by `Denis Engemann`_ + +- Labels support subtraction (``label_1 - label_2``) by `Christian Brodbeck`_ + +- Add GeneralizationAcrossTime object with support for cross-condition generalization by `Jean-Rémi King`_ and `Denis Engemann`_ + +- Add support for single dipole fitting by `Eric Larson`_ + +- Add support for spherical models in forward calculations by `Eric Larson`_ + +- Add support for SNR estimation by `Eric Larson`_ + +- Add support for Savitsky-Golay filtering of Evoked and Epochs by `Eric Larson`_ + +- Add support for adding an empty reference channel to data by `Teon Brooks`_ + +- Add reader function ``mne.io.read_raw_fif`` for Raw FIF files by `Teon Brooks`_ + +- Add example of creating MNE objects from arbitrary data and NEO files by `Jaakko Leppakangas`_ + +- Add ``plot_psd`` and ``plot_psd_topomap`` methods to epochs by `Yousra Bekhti`_, `Eric Larson`_ and `Denis Engemann`_ + +- ``evoked.pick_types``, ``epochs.pick_types``, and ``tfr.pick_types`` added by `Eric Larson`_ + +- ``rename_channels`` and ``set_channel_types`` added as methods to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Teon Brooks`_ + +- Add RAP-MUSIC inverse method by `Yousra Bekhti`_ and `Alex Gramfort`_ + +- Add ``evoked.as_type`` to allow remapping data in MEG channels to virtual magnetometer or gradiometer channels by `Mainak Jas`_ + +- Add ``mne.Report.add_bem_to_section``, ``mne.Report.add_htmls_to_section`` methods to :class:`mne.Report` by `Teon Brooks`_ + +- Add support for KIT epochs files with ``read_epochs_kit`` by `Teon Brooks`_ + +- Add whitening plots for evokeds to ``mne.Report`` by `Mainak Jas`_ + +- Add ``DigMontage`` class and reader to interface with digitization info by `Teon Brooks`_ and `Christian Brodbeck`_ + +- Add ``set_montage`` method to the ``Raw``, ``Epochs``, and ``Evoked`` objects by `Teon Brooks`_ and `Denis Engemann`_ + +- Add support for capturing sensor positions when clicking on an image by `Chris Holdgraf`_ + +- Add support for custom sensor positions when creating Layout objects by `Chris Holdgraf`_ + +BUG +~~~ + +- Fix energy conservation for STFT with tight frames by `Daniel Strohmeier`_ + +- Fix incorrect data matrix when tfr was plotted with parameters ``tmin``, ``tmax``, ``fmin`` and ``fmax`` by `Mainak Jas`_ + +- Fix channel names in topomaps by `Alex Gramfort`_ + +- Fix mapping of ``l_trans_bandwidth`` (to low frequency) and ``h_trans_bandwidth`` (to high frequency) in ``_BaseRaw.filter`` by `Denis Engemann`_ + +- Fix scaling source spaces when distances have to be recomputed by `Christian Brodbeck`_ + +- Fix repeated samples in client to FieldTrip buffer by `Mainak Jas`_ and `Federico Raimondo`_ + +- Fix highpass and lowpass units read from Brainvision vhdr files by `Alex Gramfort`_ + +- Add missing attributes for BrainVision and KIT systems needed for resample by `Teon Brooks`_ + +- Fix file extensions of SSP projection files written by mne commands (from _proj.fif to -prof.fif) by `Alex Gramfort`_ + +- Generating EEG layouts no longer requires digitization points by `Marijn van Vliet`_ + +- Add missing attributes to BTI, KIT, and BrainVision by `Eric Larson`_ + +- The API change to the edf, brainvision, and egi break backwards compatibility for when importing eeg data by `Teon Brooks`_ + +- Fix bug in ``mne.viz.plot_topo`` if ylim was passed for single sensor layouts by `Denis Engemann`_ + +- Average reference projections will no longer by automatically added after applying a custom EEG reference by `Marijn van Vliet`_ + +- Fix picks argument to filter in n dimensions (affects FilterEstimator), and highpass filter in FilterEstimator by `Mainak Jas`_ + +- Fix beamformer code LCMV/DICS for CTF data with reference channels by `Denis Engemann`_ and `Alex Gramfort`_ + +- Fix scalings for bad EEG channels in ``mne.viz.plot_topo`` by `Marijn van Vliet`_ + +- Fix EGI reading when no events are present by `Federico Raimondo`_ + +- Add functionality to determine plot limits automatically or by data percentiles by `Mark Wronkiewicz`_ + +- Fix bug in mne.io.edf where the channel offsets were omitted in the voltage calculations by `Teon Brooks`_ + +- Decouple section ordering in command-line from python interface for mne-report by `Mainak Jas`_ + +- Fix bug with ICA resetting by `Denis Engemann`_ + +API +~~~ + +- apply_inverse functions have a new boolean parameter ``prepared`` which saves computation time by calling ``prepare_inverse_operator`` only if it is False + +- find_events and read_events functions have a new parameter ``mask`` to set some bits to a don't care state by `Teon Brooks`_ + +- New channels module including layouts, electrode montages, and neighbor definitions of sensors which deprecates ``mne.layouts`` by `Denis Engemann`_ + +- ``read_raw_brainvision``, ``read_raw_edf``, ``read_raw_egi`` all use a standard montage import by `Teon Brooks`_ + +- Fix missing calibration factors for ``mne.io.egi.read_raw_egi`` by `Denis Engemann`_ and `Federico Raimondo`_ + +- Allow multiple filename patterns as a list (e.g., \*raw.fif and \*-eve.fif) to be parsed by mne report in ``Report.parse_folder()`` by `Mainak Jas`_ + +- ``read_hsp``, ``read_elp``, and ``write_hsp``, ``write_mrk`` were removed and made private by `Teon Brooks`_ + +- When computing the noise covariance or MNE inverse solutions, the rank is estimated empirically using more sensitive thresholds, which stabilizes results by `Denis Engemann`_ and `Eric Larson`_ and `Alex Gramfort`_ + +- Raw FIFF files can be preloaded after class instantiation using ``raw.preload_data()`` + +- Add ``label`` parameter to ``apply_inverse`` by `Teon Brooks`_ + +- Deprecated ``label_time_courses`` for ``in_label`` method in `SourceEstimate` by `Teon Brooks`_ + +- Deprecated ``as_data_frame`` for ``to_data_frame`` by `Chris Holdgraf`_ + +- Add ``transform``, ``unit`` parameters to ``read_montage`` by `Teon Brooks`_ + +- Deprecated ``fmin, fmid, fmax`` in stc.plot and added ``clim`` by `Mark Wronkiewicz`_ + +- Use ``scipy.signal.welch`` instead of matplotlib.psd inside ``compute_raw_psd`` and ``compute_epochs_psd`` by `Yousra Bekhti`_ `Eric Larson`_ and `Denis Engemann`_. As a consequence, ``Raw.plot_raw_psds`` has been deprecated. + +- ``Raw`` instances returned by ``mne.forward.apply_forward_raw`` now always have times starting from + zero to be consistent with all other ``Raw`` instances. To get the former ``start`` and ``stop`` times, + use ``raw.first_samp / raw.info['sfreq']`` and ``raw.last_samp / raw.info['sfreq']``. + +- ``pick_types_evoked`` has been deprecated in favor of ``evoked.pick_types``. + +- Deprecated changing the sensor type of channels in ``rename_channels`` by `Teon Brooks`_ + +- CUDA is no longer initialized at module import, but only when first used. + +- ``add_figs_to_section`` and ``add_images_to_section`` now have a ``textbox`` parameter to add comments to the image by `Teon Brooks`_ + +- Deprecated ``iir_filter_raw`` for ``fit_iir_model_raw``. + +- Add ``montage`` parameter to the ``create_info`` function to create the info using montages by `Teon Brooks`_ + +Authors +~~~~~~~ + +The committer list for this release is the following (preceded by number of commits): + +* 515 Eric Larson +* 343 Denis A. Engemann +* 304 Alexandre Gramfort +* 300 Teon Brooks +* 142 Mainak Jas +* 119 Jean-Rémi King +* 77 Alan Leggitt +* 75 Marijn van Vliet +* 63 Chris Holdgraf +* 57 Yousra Bekhti +* 49 Mark Wronkiewicz +* 44 Christian Brodbeck +* 30 Jona Sassenhagen +* 29 Hari Bharadwaj +* 27 Clément Moutard +* 24 Ingoo Lee +* 18 Marmaduke Woodman +* 16 Martin Luessi +* 10 Jaakko Leppakangas +* 9 Andrew Dykstra +* 9 Daniel Strohmeier +* 7 kjs +* 6 Dan G. Wakeman +* 5 Federico Raimondo +* 3 Basile Pinsard +* 3 Christoph Dinh +* 3 Hafeza Anevar +* 2 Martin Billinger +* 2 Roan LaPlante +* 1 Manoj Kumar +* 1 Matt Tucker +* 1 Romain Trachel +* 1 mads jensen +* 1 sviter diff --git a/doc/changes/v1.0.rst b/doc/changes/v1.0.rst new file mode 100644 index 00000000000..dd5e7b501ed --- /dev/null +++ b/doc/changes/v1.0.rst @@ -0,0 +1,307 @@ +.. _changes_1_0_0: + +Version 1.0.0 (2022-03-21) +-------------------------- + +Enhancements +~~~~~~~~~~~~ + +- Add ``use_orig_time`` option to :meth:`mne.Annotations.crop`. (:gh:`10396` by :newcontrib:`Michiru Kaneda`) + +- Speed up :func:`mne.preprocessing.annotate_muscle_zscore`, :func:`mne.preprocessing.annotate_movement`, and :func:`mne.preprocessing.annotate_nan` through better annotation creation (:gh:`10089` by :newcontrib:`Senwen Deng`) + +- Fix some unused variables in time_frequency_erds.py example (:gh:`10076` by :newcontrib:`Jan Zerfowski`) + +- Document ``BAD_ACQ_SKIP`` annotation behavior (:gh:`10054` by :newcontrib:`Etienne de Montalivet`) + +- Add link to related ``MNE-ARI`` package (:gh:`10174` by :newcontrib:`John Veillette`) + +- :func:`mne.time_frequency.psd_array_multitaper` can now return complex results per-taper when specifying ``output='complex'`` (:gh:`10307` by `Mikołaj Magnuski`_) + +- :func:`mne.time_frequency.tfr_array_multitaper` can now return results for ``output='phase'`` instead of an error (:gh:`10281` by `Mikołaj Magnuski`_) + +- Add show local maxima toggling button to ``mne.gui.locate_ieeg`` (:gh:`9952` by `Alex Rockhill`_) + +- Show boundaries in ``mne.gui.locate_ieeg`` (:gh:`10379` by `Eric Larson`_) + +- Add argument ``cval`` to :func:`mne.transforms.apply_volume_registration` to set interpolation values outside the image domain (:gh:`10379` by `Eric Larson`_) + +- Improve docstring of :class:`mne.Info` and add attributes that were not covered (:gh:`9922` by `Mathieu Scheltienne`_) + +- Add an alternate, manual procedure for aligning a CT to an MR procedure to :ref:`tut-ieeg-localize` (:gh:`9978` by `Alex Rockhill`_) + +- Improve docstring of export functions :func:`mne.export.export_raw`, :func:`mne.export.export_epochs`, :func:`mne.export.export_evokeds`, :func:`mne.export.export_evokeds_mff` and issue a warning when there are unapplied projectors (:gh:`9994` by `Mathieu Scheltienne`_) + +- Add support for reading haemoglobin fNIRS data to :func:`mne.io.read_raw_snirf` (:gh:`9929` by `Robert Luke`_) + +- Add ``fill_hole_size`` keyword argument to :func:`mne.viz.Brain.add_volume_labels` to close holes in the mesh (:gh:`10024` by `Alex Rockhill`_) + +- Changed :class:`mne.Epochs` and :class:`mne.Evoked` to have a more concise ``__repr__`` to improve interactive MNE usage in Python Interactive Console, IDEs, and debuggers when many events are handled. (:gh:`10042` by `Jan Sosulski`_) + +- Improve docstring of ``events`` arguments and cross-referencing to :term:`events` (:gh:`10056` by `Mathieu Scheltienne`_) + +- Speed up repeated surface-smoothing operation (e.g., in repeated calls to :meth:`stc.plot() `) (:gh:`10077` by `Eric Larson`_) + +- Add ``verbose`` parameter to :func:`mne.io.Raw.load_bad_channels` and log information on how bad channels are updated (:gh:`10102` by `Stefan Appelhoff`_) + +- Add ``infer_type`` argument to :func:`mne.io.read_raw_edf` and :func:`mne.io.read_raw_bdf` to automatically infer channel types from channel labels (:gh:`10058` by `Clemens Brunner`_) + +- Reduce the time it takes to generate a :class:`mne.io.Raw`, :class:`~mne.Epochs`, or :class:`~mne.preprocessing.ICA` figure if a ``scalings`` parameter is provided (:gh:`10109` by `Richard Höchenberger`_ and `Eric Larson`_) + +- :meth:`mne.Report.add_raw` gained a new ``scalings`` parameter to provide custom data scalings for the butterfly plots (:gh:`10109` by `Richard Höchenberger`_) + +- The ``butterfly`` parameter of :meth:`mne.Report.add_raw` now also accepts numbers to specify how many segments to plot (:gh:`10115` by `Richard Höchenberger`_) + +- The ``psd`` parameter of :meth:`mne.Report.add_epochs` now also accepts numbers to specify the signal duration used for PSD calculation (:gh:`10119` by `Richard Höchenberger`_) + +- Drastically speed up butterfly plot generation in :meth:`mne.Report.add_raw`. We now don't plot annotations anymore; however, we feel that the speed improvements justify this change, also considering the annotations were of limited use in the displayed one-second time slices anyway (:gh:`10114`, :gh:`10116` by `Richard Höchenberger`_) + +- In :class:`mne.Report`, limit the width of automatically generated figures to a maximum of 850 pixels (450 pixels for :class:`mne.SourceEstimate` plots), and the resolution to 100 DPI to reduce file size, memory consumption, and – in some cases like :meth:`mne.Report.add_stc` – processing time (:gh:`10126`, :gh:`10129`, :gh:`10135`, :gh:`10142` by `Richard Höchenberger`_) + +- :class:`~mne.Epochs` metadata tables are now included in :class:`mne.Report` (:gh:`10166` by `Richard Höchenberger`_) + +- :func:`mne.get_head_surf` and :func:`mne.dig_mri_distances` gained a new parameter, ``on_defects``, controlling how to handle surfaces with topological defects (:gh:`10175` by `Richard Höchenberger`_) + +- :meth:`mne.Report.add_epochs` gained a new parameter, ``drop_log_ignore``, to control which drop reasons to omit when creating the drop log plot (:gh:`10182` by `Richard Höchenberger`_) + +- :meth:`mne.Epochs.plot_drop_log` now also includes the absolute number of epochs dropped in the title (:gh:`10186` by `Richard Höchenberger`_) + +- Add a button to show the maximum intensity projection in ``mne.gui.locate_ieeg`` (:gh:`10185` by `Alex Rockhill`_) + +- Annotations from a :class:`~mne.io.Raw` object are now preserved by the :class:`~mne.Epochs` constructor and are supported when saving Epochs (:gh:`9969` and :gh:`10019` by `Adam Li`_) + +- Add a checkbox to show and hide the MEG helmet in the coregistration GUI (:gh:`10200` by `Guillaume Favelier`_) + +- The coregistration GUI (instantiated via :func:`mne.gui.coregistration`) gained a new attribute, ``coreg``, to access the :class:`mne.coreg.Coregistration` instance used by the GUI (:gh:`10255` by `Richard Höchenberger`_) + +- Add marching cubes display of head if :func:`mne.bem.make_scalp_surfaces` has not computed or the recon-all hasn't finished (:gh:`10202` by `Alex Rockhill`_) + +- The default interaction style of :func:`mne.gui.coregistration` and :func:`mne.viz.plot_alignment` has been changed to ``'terrain'``, which keeps one axis fixed and should make interactions with the 3D scene more predictable (:gh:`9972`, :gh:`10206` by `Richard Höchenberger`_) + +- :func:`mne.gui.coregistration` now uses the proper widget style for push buttons, making for a more native feel of the application (:gh:`10220` by `Richard Höchenberger`_ and `Guillaume Favelier`_) + +- :class:`mne.coreg.Coregistration`, :func:`mne.scale_bem`, and :func:`mne.scale_mri` gained a new parameter, ``on_defects``, controlling how to handle topological defects (:gh:`10230`, :gh:`10249` by `Richard Höchenberger`_) + +- :class:`mne.coreg.Coregistration` gained a new attribute, ``fiducials``, allowing for convenient retrieval of the MRI fiducial points (:gh:`10243`, by `Richard Höchenberger`_) + +- Added plotting points to represent contacts on the max intensity projection plot for ``mne.gui.locate_ieeg`` (:gh:`10212` by `Alex Rockhill`_) + +- Add lines in 3D and on the maximum intensity projection when more than two electrode contacts are selected to aid in identifying that contact for ``mne.gui.locate_ieeg`` (:gh:`10212` by `Alex Rockhill`_) + +- Add a ``block`` parameter to :class:`mne.viz.Brain` and the UI of :class:`mne.coreg.Coregistration` to prevent the windows from closing immediately when running in a non-interactive Python session (:gh:`10222` by `Guillaume Favelier`_) + +- All methods of :class:`mne.Report` with a ``tags`` parameter now also accept a single tag passed as a string (previously, you needed to pass a tuple of strings, even for a single tag) (:gh:`10183`, by `Richard Höchenberger`_) + +- :meth:`mne.Report.add_trans` has gained a new parameter, ``alpha``, to control the level of opacity of the rendered head (:gh:`10247`, by `Richard Höchenberger`_) + +- The new convenience function :func:`mne.event.match_event_names` allows for straightforward checking if a specific event name or a group of events is present in a collection of event names (:gh:`10233` by `Richard Höchenberger`_) + +- :meth:`Epochs.plot` is now supported by the pyqtgraph-backend (:gh:`10297` by `Martin Schulz`_) + +- The ``map_surface`` parameter of :meth:`mne.viz.Brain.add_foci` now works and allows you to add foci to a rendering of a brain that are positioned at the vertex of the mesh closest to the given coordinates (:gh:`10299` by `Marijn van Vliet`_) + +- :meth:`mne.preprocessing.ICA.plot_sources()` is now also supported by the ``qt`` backend (:gh:`10330` by `Martin Schulz`_) + +- Added :meth:`mne.viz.Brain.add_dipole` and :meth:`mne.viz.Brain.add_forward` to plot dipoles on a brain as well as :meth:`mne.viz.Brain.remove_dipole` and :meth:`mne.viz.Brain.remove_forward` (:gh:`10373` by `Alex Rockhill`_) + +- Made anterior/posterior slice scrolling in ``mne.gui.locate_ieeg`` possible for users without page up and page down buttons by allowing angle bracket buttons to be used (:gh:`10384` by `Alex Rockhill`_) + +- Add support for ``theme='auto'`` for automatic dark-mode support in :meth:`raw.plot() ` and related functions and methods when using the ``'qt'`` backend (:gh:`10417` by `Eric Larson`_) + +- Add support for ``MNE_3D_OPTION_THEME`` and ``MNE_BROWSER_THEME`` to use ``['light', 'dark', 'auto']`` or a custom theme path in :class:`mne.viz.Brain` and :meth:`raw.plot() ` (and related functions and methods when using the ``'qt'`` backend(), respectively (:gh:`10418` by `Eric Larson`_) + +- Add support for passing time-frequency data to :func:`mne.stats.spatio_temporal_cluster_test` and :func:`mne.stats.spatio_temporal_cluster_1samp_test` and added an example to :ref:`tut-cluster-spatiotemporal-sensor` (:gh:`10384` by `Alex Rockhill`_) + +- Add support for reading optical density fNIRS data to :func:`mne.io.read_raw_snirf` (:gh:`10408` by `Robert Luke`_) + +- Added :func:`mne.source_space.get_decimated_surfaces` to extract subsurfaces from a SourceSpaces instance or a .fif file (:gh:`10421` by `Joshua Teves`_) + +Bugs +~~~~ +- Fix bug with :func:`mne.io.read_raw_nihon` where latin-1 channels could not be read (:gh:`10429` by :newcontrib:`Matthias Eberlein`) + +- Fix bug with :func:`mne.io.read_raw_nihon` so that it handles duplicates in channel names (:gh:`10431` by :newcontrib:`Matthias Eberlein`) + +- Fix bug in :func:`mne.io.read_raw_egi` where the ``meas_date`` in a ``rawMFF`` object was incorrectly assigned. The function now converts the ``meas_date`` to UTC and provides a UTC offset key, i.e. ``raw.info['utc_offset']``. (:gh:`10304` by :newcontrib:`Scott Huberty`) + +- Fix datetime conversion for tmin/tmax=None cases in :meth:`mne.Annotations.crop`. Allow the use of float and None simultaneously for :meth:`mne.Annotations.crop`. (:gh:`10361` by :newcontrib:`Michiru Kaneda`) + +- Add Shift_JIST mu in :func:`mne.io.read_raw_edf` (:gh:`10356` by :newcontrib:`Michiru Kaneda`) + +- Teach :func:`mne.io.read_raw_bti` to use its ``eog_ch`` parameter (:gh:`10093` by :newcontrib:`Adina Wagner`) + +- Fix use of arguments in :func:`numpy.loadtxt` (:gh:`10189` by :newcontrib:`Federico Zamberlan`) + +- Fix documentation of options in :func:`mne.stc_near_sensors` (:gh:`10007` by :newcontrib:`Nikolai Chapochnikov`) + +- :func:`mne.time_frequency.tfr_array_multitaper` now returns results per taper when ``output='complex'`` (:gh:`10281` by `Mikołaj Magnuski`_) + +- Fix default of :func:`mne.io.Raw.plot` to be ``use_opengl=None``, which will act like False unless ``MNE_BROWSER_USE_OPENGL=true`` is set in the user configuration (:gh:`9957` by `Eric Larson`_) + +- Fix bug with :class:`mne.Report` where figures were saved with ``bbox_inches='tight'``, which led to inconsistent sizes in sliders (:gh:`9966` by `Eric Larson`_) + +- When opening a saved report and saving it to a different filename again, don't change ``Report.fname`` to avoid a regression when using :func:`~mne.open_report` as a context manager (:gh:`9998` by `Marijn van Vliet`_) + +- Fix bug in :func:`mne.make_forward_solution` where sensor-sphere geometry check was incorrect (:gh:`9968` by `Eric Larson`_) + +- Use single char alphanumeric suffix when renaming long channel names (over 15-characters) when writing to FIF format. (:gh:`10002` by `Luke Bloy`_) + +- Add argument ``overwrite`` to :func:`mne.export.export_raw`, :func:`mne.export.export_epochs`, :func:`mne.export.export_evokeds` and :func:`mne.export.export_evokeds_mff` (:gh:`9975` by `Mathieu Scheltienne`_) + +- :func:`mne.gui.coregistration` and the ``mne coreg`` command didn't respect the ``interaction`` parameter (:gh:`9972` by `Richard Höchenberger`_) + +- Fix incorrect projection of source space onto white matter surface instead of pial in :ref:`tut-working-with-ecog` (:gh:`9980` by `Alex Rockhill`_) + +- Fix channel type support when reading from EEGLAB ``.set`` format with :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` (:gh:`9990` by `Mathieu Scheltienne`_) + +- Fix suboptimal alignment using :func:`mne.transforms.compute_volume_registration` (:gh:`9991` by `Alex Rockhill`_) + +- Only warn if header is missing in BrainVision files instead of raising an error (:gh:`10001` by `Clemens Brunner`_) + +- Add argument ``overwrite`` to `mne.preprocessing.ICA.save` to check for existing file (:gh:`10004` by `Mathieu Scheltienne`_) + +- :class:`mne.Report` now raises an exception if invalid tags were passed (:gh:`9970` by `Richard Höchenberger`_) + +- Fix bug in :func:`mne.get_montage_volume_labels` that set the maximum number of voxels to be included too low causing unwanted capping of the included voxel labels (:gh:`10021` by `Alex Rockhill`_) + +- Fix annotation cropping and I/O roundtrip when there is no measurement date available (:gh:`10040` by `Mathieu Scheltienne`_ and `Alex Gramfort`_). + +- Fix bug where :meth:`raw.crop() ` from the beginning of the instance could break annotations and ``raw.first_samp`` (:gh:`10358` by `Eric Larson`_) + +- :func:`~mne.sys_info` output now contains the installed version of ``pooch``, too; this output had been accidentally removed previously (:gh:`10047` by `Richard Höchenberger`_) + +- Fix VTK version extraction in :func:`mne.sys_info` (:gh:`10399` by `Eric Larson`_) + +- Fix automatic channel type detection from channel labels in :func:`mne.io.read_raw_edf` and :func:`mne.io.read_raw_bdf` (and disable this functionality from :func:`mne.io.read_raw_gdf`) (:gh:`10058` by `Clemens Brunner`_) + +- Fix :func:`~mne.stats.permutation_cluster_1samp_test` to properly handle 2-dimensional data in combination with TFCE (:gh:`10073` by `Richard Höchenberger`_) + +- Fix channel grouping error when using "butterfly mode" with :meth:`mne.io.Raw.plot` (:gh:`10087` by `Daniel McCloy`_) + +- Fix inconsistent behavior of ``mne.preprocessing.annotate_*`` functions by making them all return :class:`mne.Annotations` objects with the ``orig_time`` attribute set to ``raw.info["meas_time"]`` (:gh:`10067` and :gh:`10118` by `Stefan Appelhoff`_, `Eric Larson`_, and `Alex Gramfort`_) + +- Fix bug that appears during automatic calculation of the colormap of `mne.viz.Brain` when data values of ``fmin`` and ``fmax`` are too close (:gh:`10074` by `Guillaume Favelier`_) + +- We now display a scrollbar in the tags dropdown of a `~mne.Report` if many tags have been added, granting access to all tags instead of "hiding" them below the bottom of the page (:gh:`10082` by `Richard Höchenberger`_) + +- Creating :class:`mne.Epochs` now provides clearer logging (less ambiguous, no duplicates) when the ``preload`` and/or ``metadata`` parameters are set (:gh:`10112` by `Stefan Appelhoff`_) + +- Fix bug with :class:`mne.Epochs` where save-load round-trip with FIF would cause :meth:`mne.Epochs.apply_baseline` to no longer work (:gh:`10177` by `Eric Larson`_) + +- Fix functions by adding missing ``overwrite`` parameters: :func:`mne.write_events`, :func:`mne.write_cov`, :func:`mne.write_evokeds`, :meth:`mne.SourceEstimate.save`, :func:`mne.minimum_norm.write_inverse_operator`, :func:`mne.write_proj`, and related methods (:gh:`10127` by `Eric Larson`_) + +- Fix bug with :func:`mne.transforms.compute_volume_registration` and :func:`mne.compute_source_morph` (volumetric) where the smoothing factors were not scaled based on ``zooms`` (:gh:`10132` by `Eric Larson`_) + +- Remove repeated logging output when overwriting an existing `~mne.io.Raw` file (:gh:`10095` by `Richard Höchenberger`_ and `Stefan Appelhoff`_) + +- In the plots generated by :meth:`mne.Report.add_stc`, we now only add 5 labels to the color bar to reduce the chance of overlap, which could previously cause the labels to become unreadable (:gh:`10135` by `Richard Höchenberger`_) + +- :meth:`mne.Report.add_trans` now allows you to add sensor alignment plots for head surfaces that have topological defects (:gh:`10175` by `Richard Höchenberger`_) + +- :meth:`mne.Report.add_trans` now also works if no digitization points are present in the data (:gh:`10176` by `Jeff Stout`_) + +- Argument ``verbose`` is now respected by dataset fetching (:gh:`10210` by `Mathieu Scheltienne`_) + +- Fix bug with :func:`mne.io.read_raw_hitachi` where empty ``Comment`` descriptions were not handled properly (:gh:`10235` by `Eric Larson`_) + +- Fix bug with input validation of low-level filtering functions (:gh:`10267` by `Eric Larson`_) + +- :func:`mne.gui.coregistration` now works with surfaces containing topological defects (:gh:`10230`, by `Richard Höchenberger`_) + +- Fix bug with :func:`mne.io.read_raw_nirx` being unable to read measurement dates recorded on systems with German (de_DE), French (fr_FR), and Italian (it_IT) locales (:gh:`10277` by `Eric Larson`_) + +- Fix bug with projector normalization checks that were too sensitive, and improve warning (:gh:`10292` by `Eric Larson`_) + +- Fix bug with :func:`mne.viz.plot_alignment` where head-coordinate source spaces (e.g., from a forward solution) were not properly plotted (:gh:`10309` by `Eric Larson`_) + +- :func:`mne.read_trans` and :func:`mne.io.read_fiducials` now correctly expand ``~`` in the provided path (i.e., to the user's home directory) (:gh:`10265`, :gh:`10415`, by `Richard Höchenberger`_) + +- :func:`mne.find_events` now uses ``first_samp`` and not ``0`` for initial event when using ``initial_value`` (:gh:`10289`, by `Alex Gramfort`_) + +- Fix bug with :func:`mne.channels.make_standard_montage` for ``'standard*'``, ``'mgh*'``, and ``'artinis*'`` montages where the points were incorrectly scaled and fiducials incorrectly set away from the correct values for use with the ``fsaverage`` subject (:gh:`10324` by `Eric Larson`_) + +- Fix bug with :meth:`mne.Report.add_figure` where figures generated externally were closed and possibly resized during render (:gh:`10342` by `Eric Larson`_) + +- Fix bug with :func:`mne.viz.plot_sparse_source_estimates` where the return value was incorrect (:gh:`10347` by `Eric Larson`_) + +- Fix plotting bug in :ref:`ex-electrode-pos-2d` and make view look more natural in :ref:`ex-movement-detect` (:gh:`10313`, by `Alex Rockhill`_) + +- Fix bug with blank 3D rendering with MESA software rendering (:gh:`10400` by `Eric Larson`_) + +- Fix a bug in ``mne.gui.locate_ieeg`` where 2D lines on slice plots failed to update and were shown when not in maximum projection mode (:gh:`10335`, by `Alex Rockhill`_) + +- Fix misleading color scale in :ref:`tut-cluster-tfr` for the plotting of cluster F-statistics (:gh:`10393` by `Alex Rockhill`_) + +- Fix baseline removal using ``remove_dc=True`` in :meth:`raw.plot() ` for data containing ``np.nan`` (:gh:`10392` by `Clemens Brunner`_) + +- Fix misleading color scale in :ref:`tut-timefreq-twoway-anova` for plotting F-stats (:gh:`10401` by `Alex Rockhill`_) + +- Fix misleading ``T_obs`` return name for :func:`mne.stats.spatio_temporal_cluster_test` when the default returns an F-statistic (:gh:`10401` by `Alex Rockhill`_) + +API changes +~~~~~~~~~~~ +- The default browser for :meth:`raw.plot() `, :meth:`epochs.plot() `, and :meth:`ica.plot_sources() ` has been changed to the ``'qt'`` backend on systems where `mne_qt_browser `__ is installed. To change back to matplotlib within a session, you can use :func:`mne.viz.set_browser_backend('matplotlib') `. To set it permanently on your system, you can use :func:`mne.set_config('MNE_BROWSER_BACKEND', 'matplotlib') ` (:gh:`9960` by `Martin Schulz`_ and `Eric Larson`_) + +- ``mne.Info.pick_channels`` has been deprecated. Use ``inst.pick_channels`` to pick channels from :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked`. Use :func:`mne.pick_info` to pick channels from :class:`mne.Info` (:gh:`10039` by `Mathieu Scheltienne`_) + +- All :func:`data_path ` functions now return :class:`python:pathlib.Path` objects rather than strings. Support for string concatenation with plus (``+``) is thus deprecated and will be removed in 1.2, use the forward-slash ``/`` operator instead (:gh:`10348` by `Eric Larson`_) + +- Argument ``event_list`` has been deprecated in favor of ``events`` in :func:`mne.write_events` (:gh:`10056` by `Mathieu Scheltienne`_) + +- ``mne.preprocessing.annotate_flat`` has been deprecated in favor of :func:`mne.preprocessing.annotate_amplitude`, that covers both minimum and maximum peak-to-peak variation. (:gh:`10143` by `Mathieu Scheltienne`_) + +- The ``max_ori_out`` parameter of :func:`mne.beamformer.apply_lcmv` and related functions is being removed as only signed estimates are supported. ``abs(stc)`` can be used to obtain unsigned estimates (:gh:`10366` by `Eric Larson`_) + +- The ``verbose`` attribute of classes (e.g., :class:`mne.io.Raw`, `mne.Epochs`, etc.) has been deprecated. Explicitly pass ``verbose`` to methods as necessary instead. (:gh:`10267` by `Eric Larson`_) + +- In :func:`mne.viz.set_browser_backend`, the `mne-qt-browser `__-based backend is now called ``'qt'`` rather than ``'pyqtgraph'`` for simplicity (:gh:`10323` by `Eric Larson`_) + +Dependencies +~~~~~~~~~~~~ +Numerous external dependencies that used to be bundled with MNE-Python are now +not shipped with the package anymore and will instead be retrieved +automatically from their official sources when you install MNE-Python. This +simplifies MNE-Python maintenance and keeps the package smaller. The following +new dependencies have been added: + +- `Jinja2`_ (replaces ``Tempita``, which is not maintained anymore; :gh:`10211` by `Richard Höchenberger`_) + + +.. _Jinja2: https://jinja.palletsprojects.com/ + +Authors +~~~~~~~ + +* Adam Li +* Adina Wagner+ +* Alex Rockhill +* Alexandre Gramfort +* Britta Westner +* Clemens Brunner +* Daniel McCloy +* Eduard Ort +* Eric Larson +* Etienne de Montalivet+ +* Federico Zamberlan+ +* Guillaume Favelier +* Jan Sosulski +* Jan Zerfowski+ +* Jeff Stout +* John Veillette+ +* Joshua Teves +* Julia Guiomar Niso Galán +* Luke Bloy +* Marijn van Vliet +* Martin Schulz +* Mathieu Scheltienne +* Matthias Eberlein+ +* Michiru Kaneda+ +* Mikołaj Magnuski +* Nikolai Chapochnikov+ +* Richard Höchenberger +* Robert Luke +* Scott Huberty+ +* Senwen Deng+ +* Stefan Appelhoff +* Steve Matindi +* Thomas Hartmann diff --git a/doc/changes/v1.1.rst b/doc/changes/v1.1.rst new file mode 100644 index 00000000000..03b03dc3f18 --- /dev/null +++ b/doc/changes/v1.1.rst @@ -0,0 +1,271 @@ +.. _changes_1_1_0: + +Version 1.1.0 (2022-08-03) +-------------------------- + +Enhancements +~~~~~~~~~~~~ +- Added new built-in montage R-Net 128 Ch (Brain Products) (:gh:`10974` by :newcontrib:`Ilias Machairas`) + +- :class:`~mne.channels.DigMontage` objects can now be checked for equality via ``==`` (:gh:`10942` by :newcontrib:`Tziona NessAiver`) + +- Add a note to clarify the inclusion of average referencing gain matrix during whitening (:gh:`10922` by :newcontrib:`Mingjian He`) + +- Add time axis scale bar for :meth:`mne.Epochs.plot` using matplotlib backend (:gh:`10736` by :newcontrib:`Jordan Drew`) + +- Add :func:`mne.bem.distance_to_bem` to find depth of source positions (:gh:`10632` by :newcontrib:`Matt Courtemanche`) + +- Add support for ahdr files in :func:`mne.io.read_raw_brainvision` (:gh:`10515` by :newcontrib:`Alessandro Tonin`) + +- Add support for reading data from Gowerlabs devices to :func:`mne.io.read_raw_snirf` (:gh:`10555` by :newcontrib:`Samuel Powell` and `Robert Luke`_) + +- Add built-in sensor layout files for Geodesic Head Web 130 and 280 devices (:gh:`10627` by `Evan Hathaway`_) + +- Add ``mne-icalabel`` to :func:`mne.sys_info` (:gh:`10615` by `Adam Li`_) + +- Add support for ``overview_mode`` in :meth:`raw.plot() ` and related functions/methods (:gh:`10501` by `Eric Larson`_) + +- Add :meth:`mne.io.Raw.crop_by_annotations` method to get chunks of Raw data based on :class:`mne.Annotations`. (:gh:`10460` by `Alex Gramfort`_) + +- The ``pick_channels`` method gained a ``verbose`` parameter, allowing e.g. to suppress messages about removed projectors (:gh:`10544` by `Richard Höchenberger`_) + +- Add :func:`mne.viz.plot_projs_joint` for joint plotting of projectors and Evoked (:gh:`10720` by `Eric Larson`_) + +- The :func:`mne.make_forward_dipole` function can now take a list of dipoles to make a multi-dipole forward models (:gh:`10464` by `Marijn van Vliet`_) + +- Add ``mode='outlines'`` support to :meth:`mne.Dipole.plot_locations` and :func:`mne.viz.plot_dipole_locations` (:gh:`10699` by `Eric Larson`_) + +- Add :meth:`mne.preprocessing.ICA.find_bads_muscle` to find muscle-related ICA components with an example, :ref:`ex-muscle-ica` (:gh:`10534` by `Alex Rockhill`_) + +- Add example of Xfit-style ECD modeling using multiple dipoles (:gh:`10464` by `Marijn van Vliet`_) + +- Add ``head_source`` argument to :func:`mne.make_field_map` to allow selecting which head source to use (:gh:`10568` by `Eric Larson`_) + +- Add support for ``n_jobs=None`` to support ``joblib:joblib.parallel_backend`` for more precise control over parallelization (:gh:`10567` by `Eric Larson`_) + +- It is now possible to compute inverse solutions with restricted source orientations using discrete forward models (:gh:`10464` by `Marijn van Vliet`_) + +- The new function :func:`mne.preprocessing.maxwell_filter_prepare_emptyroom` simplifies the preconditioning of an empty-room recording for our Maxwell filtering operations (:gh:`10533` by `Richard Höchenberger`_ and `Eric Larson`_) + +- Add keyboard shortcuts to toggle :meth:`mne.preprocessing.ICA.plot_properties` topomap channel types ('t') and power spectral density log-scale ('l') (:gh:`10557` by `Alex Rockhill`_) + +- Add ``--mri``, and ``--threshold`` options to :ref:`mne make_scalp_surfaces` to improve head surface mesh extraction (:gh:`10591` by `Eric Larson`_) + +- Add :func:`mne.preprocessing.compute_bridged_electrodes` to detect EEG electrodes with shared spatial sources due to a conductive medium connecting two or more electrodes, add :ref:`ex-eeg-bridging` for an example and :func:`mne.viz.plot_bridged_electrodes` to help visualize (:gh:`10571` by `Alex Rockhill`_) + +- Add ``'nearest'`` as an option for the ``image_interp`` argument in :func:`mne.viz.plot_topomap` to plot a topomap without interpolation using a Voronoi parcelation (:gh:`10571` by `Alex Rockhill`_) + +- Add :func:`mne.preprocessing.interpolate_bridged_electrodes` to use the spatially smeared signal to get a better interpolation rather than dropping those channels (:gh:`10587` by `Alex Rockhill`_) + +- Add support for hemoglobin type fNIRS data to temporal derivative distribution repair (TDDR) :func:`mne.preprocessing.nirs.temporal_derivative_distribution_repair` (:gh:`10125` by `Johann Benerradi`_) + +- :func:`mne.viz.plot_evoked_topomap` and :meth:`mne.Evoked.plot_topomap` now display the time range the map was averaged over if ``average`` was passed (:gh:`10606` by `Richard Höchenberger`_) + +- :func:`mne.viz.plot_evoked_topomap` and :meth:`mne.Evoked.plot_topomap` can now average the topographic maps across different time periods for each time point. To do this, pass a list of periods via the ``average`` parameter (:gh:`10610` by `Richard Höchenberger`_) + +- :func:`mne.viz.plot_evoked` and :meth:`mne.Evoked.plot` gained a new parameter, ``highlight``, to visually highlight time periods of interest (:gh:`10614` by `Richard Höchenberger`_) + +- Added fNIRS support to :func:`mne.Info.get_montage` (:gh:`10611` by `Robert Luke`_) + +- Add :func:`mne.viz.plot_ch_adjacency` to visualize and manually edit the channel adjacency matrix (:gh:`10633` by `Mikołaj Magnuski`_) + +- Add :meth:`mne.viz.Brain.get_view` to get the current camera parameters such that they can be passed to :meth:`mne.viz.Brain.show_view` (:gh:`10661` by `Alex Rockhill`_) + +- Added support for Aurora version 2021.9.0.6 to :func:`mne.io.read_raw_nirx` (:gh:`10668` by `Robert Luke`_) + +- Added BrainVision format (``.vhdr``, ``.vmrk``, ``.eeg``) to :func:`mne.export.export_raw` (:gh:`10681` by `Stefan Appelhoff`_) + +- Added ``export`` method to :class:`mne.Evoked` (:gh:`10681` by `Stefan Appelhoff`_) + +- Add timeformat ``YYYY/MM/DD`` to ``mne.io.ctf.info._convert_time`` (:gh:`10674` by `Simon Kern`_) + +- :meth:`mne.Report.add_figure` and :meth:`mne.Report.add_image` gained a new parameter, ``section``, allowing to put multiple figures or images into the same "content block". Previously, the only way to group figures and images was by means of a slider, which sometimes made content hard to discover. The slider remains available as an alternative (:gh:`10694` by `Richard Höchenberger`_) + +- All functions and methods that plot topographic maps for EEG (2D projections of the EEG sensor locations) now accept the parameter value ``sphere='eeglab'`` to lay out the sensors with respect to the head circle in a similar way to how EEGLAB does, i.e., T7/T8 and Fpz/Oz are placed directly on the circle for template montages (:gh:`10572` by `Richard Höchenberger`_) + +- :func:`mne.channels.get_builtin_montages` gained a new parameter, ``descriptions``, which allows to retrieve the descriptions of the montages in addition to their names (:gh:`10373` by `Richard Höchenberger`_) + +- Add ``include`` option to :meth:`mne.io.read_raw_edf`, :meth:`mne.io.read_raw_bdf` and :meth:`mne.io.read_raw_gdf`. (:gh:`10734` by `Michiru Kaneda`_) + +- :func:`mne.gui.coregistration` gained a new parameter, ``fullscreen``, to start the GUI in fullscreen mode (:gh:`10284` by `Richard Höchenberger`_) + +- The built-in FieldTrip channel adjacency matrices, which can be read via :func:`~mne.channels.read_ch_adjacency`, have been synchronized with FieldTrip again. This means that numerous new adjacencies have been added, most notably those for standard 10-05 and 10-20 montages (:gh:`10749` by `Richard Höchenberger`_) + +- :meth:`mne.Evoked.plot_field` gained a new parameter, ``interaction``, to control the rotation axes when interacting with the head (:gh:`10788` by `Richard Höchenberger`_) + +- Add :func:`mne.read_evoked_besa` for reading evokeds from BESA ``.avr`` and ``.mul`` files. (:gh:`10892` by `Marijn van Vliet`_) + +- Add :meth:`mne.time_frequency.EpochsTFR.decimate` to reduce size of time-frequency epochs objects (:gh:`10940` by `Alex Rockhill`_) + +- Add an alias that maps EGI reference channel names (``VREF`` or ``Vertex Reference``) to the electrode name in the standard EGI montages (``Cz``). Now setting standard EGI montages with :meth:`~mne.io.Raw.set_montage` with ``match_alias=True`` will avoid raising an error (:gh:`10823` by `Scott Huberty`_) + +- Add provenance in :func:`mne.time_frequency.EpochsTFR.apply_baseline` (:gh:`10979` by `Alex Rockhill`_) + +Bugs +~~~~ +- Fix bug in ``mne.surface._project_onto_surface`` for the condition ``method != 'accurate' and return_nn`` (:gh:`10930` by `Christian O'Reilly`_) + +- Fix bug in :func:`mne.export.export_raw` to ignore None value in filenames attribute of :class:`mne.io.RawArray` (:gh:`10927` by :newcontrib:`Reza Nasri`) + +- Fix bug in :func:`mne.io.read_raw_edf` to allow reading in all Hypnodyne ZMax EDFs to be read in without issues (:gh:`10754` by :newcontrib:`Frederik Weber`) + +- Fix bug in :func:`mne.Epochs.drop_bad` where data was loaded although no rejection had to be performed (:gh:`10718` by :newcontrib:`Lukas Gemein`) + +- Pick also bad channels when selecting T1T2 magnetometers in :func:`mne.preprocessing.maxwell_filter` (:gh:`10639` by :newcontrib:`Matti Toivonen`) + +- Make ``color`` parameter check in in :func:`mne.viz.plot_evoked_topo` consistent (:gh:`10217` by :newcontrib:`T. Wang` and `Stefan Appelhoff`_) + +- Fix bug in :func:`mne.io.read_raw_brainvision` when BrainVision data are acquired with the Brain Products "V-Amp" amplifier and disabled lowpass filter is marked with value ``0`` (:gh:`10517` by :newcontrib:`Alessandro Tonin`) + +- Fix bug in :func:`mne.pick_types` and related methods where ``csd=True`` was not passed handled properly (:gh:`10470` by :newcontrib:`Matthias Dold`) + +- Fix bug where plots produced using the ``'qt'`` / ``mne_qt_browser`` backend could not be added using :meth:`mne.Report.add_figure` (:gh:`10485` by `Eric Larson`_) + +- Fix bug where ``theme`` was not handled properly in :meth:`mne.io.Raw.plot` (:gh:`10487`, :gh:`10500` by `Mathieu Scheltienne`_ and `Eric Larson`_) + +- Fix bug in :meth:`raw.crop(start, stop) ` that would cause annotations to be erroneously shifted when ``start != 0`` and no measurement date was set. (:gh:`10491` by `Eric Larson`_) + +- Fix bug in :func:`mne.io.read_raw_bti` where unknown electrode locations were not handled properly (:gh:`10662` by `Eric Larson`_) + +- Fix bug in :func:`mne.io.read_raw_ctf` on Windows where large files could not be read (:gh:`10866` by `Eric Larson`_) + +- Fix bug in :func:`mne.io.read_raw_ctf` where invalid measurement dates were not handled properly (:gh:`10957` by `Jean-Rémi King`_ and `Eric Larson`_) + +- Rendering issues with recent MESA releases can be avoided by setting the new environment variable ``MNE_3D_OPTION_MULTI_SAMPLES=1`` or using :func:`mne.viz.set_3d_options` (:gh:`10513` by `Eric Larson`_) + +- Fix behavior for the ``pyvista`` 3D renderer's ``quiver3D`` function so that default arguments plot a glyph in ``arrow`` mode (:gh:`10493` by `Alex Rockhill`_) + +- Retain epochs metadata when using :func:`mne.channels.combine_channels` (:gh:`10504` by `Clemens Brunner`_) + +- Fix epochs indexing with metadata containing boolean type and missing values (:gh:`10705` by `Clemens Brunner`_ and `Alex Gramfort`_) + +- Fix reading of fiducial locations in :func:`mne.io.read_raw_eeglab` (:gh:`10521` by `Alex Gramfort`_) + +- Prevent creation of montage with invalid ``[x, y, z]`` coordinates with :func:`mne.channels.make_dig_montage` (:gh:`10547` by `Mathieu Scheltienne`_) + +- Fix bug in coregistration GUI that prevented it from starting up if only a high-resolution head model was available (:gh:`10543` by `Richard Höchenberger`_) + +- Fix bug with :class:`mne.Epochs.add_reference_channels` where attributes were not updated properly so subsequent `~mne.Epochs.pick_types` calls were broken (:gh:`10912` by `Eric Larson`_) +- +- Fix bug in the :class:`mne.viz.Brain` tool bar that prevented the buttons to call the corresponding feature (:gh:`10560` by `Guillaume Favelier`_) + +- Fix issue with saving epochs once :func:`~mne.preprocessing.compute_current_source_density` has been used if a rejection threshold was used first (:gh:`10619` by `Alex Rockhill`_ and `Richard Höchenberger`_) + +- Fix bug in :func:`mne.viz.plot_evoked_image` that would cause incorrect sub-titles when using ``group_by`` (:gh:`10618` by `Reza Shoorangiz`_) + +- Reduce memory usage when loading an EDF file with ``preload=False`` (:gh:`10638` by `Clemens Brunner`_) + +- Fix bug in :meth:`mne.io.Raw.get_channel_types` and related methods where ``unique=True`` would return a :class:`python:set` with arbitrary order rather than a :class:`python:list` that preserves the same order as ``inst.ch_names`` (:gh:`10720` by `Eric Larson`_) + +- In :func:`mne.preprocessing.find_bad_channels_maxwell`, do not re-filter the data if a low-pass filter with the requested frequency has already been applied (:gh:`10664` by `Richard Höchenberger`_) + +- Fix a problem in :meth:`mne.Evoked.get_peak`, where under certain circumstances the ``mode`` parameters ``'pos'`` and ``'neg'`` were not honored when ``tmin`` and/or ``tmax`` were passed as well (:gh:`10686` by `Richard Höchenberger`_) + +- :func:`mne.read_evokeds`, :func:`mne.channels.read_custom_montage`, :func:`mne.channels.read_dig_hpts`, :func:`mne.channels.read_dig_polhemus_isotrak`, and :func:`mne.channels.read_polhemus_fastscan` now correctly expand ``~`` in the provided path to the user's home directory (:gh:`10685`, :gh:`10688` by `Richard Höchenberger`_) + +- Fix bug in :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab`, where the presence of channels without position in the ``.set`` file caused positions incorrectly assigned to all further channels (:gh:`8754` by `Mikołaj Magnuski`_) + +- Combining channels of :class:`mne.Epochs` or :class:`mne.Evoked` objects now properly retains baseline information (:gh:`10703` by `Clemens Brunner`_) + +- In :class:`mne.Report`, some figures would have an undesired border added to the edges; this has now been resolved (:gh:`10730` by `Richard Höchenberger`_) + +- Fix selection of EEG channels and selected sphere when plotting bridged electrodes with :func:`mne.viz.plot_bridged_electrodes` (:gh:`10753` by `Mathieu Scheltienne`_) + +- Fix broken links in :ref:`tut-fix-meshes` (:gh:`10765` by `Alex Rockhill`_) + +- The built-in FieldTrip channel adjacency matrix for ``easycapM1`` had a bug, where the channel ``PO8`` was incorrectly labeled as ``PO6`` This has been resolved by updating the adjacency matrix to the latest version provided by FieldTrip (:gh:`10749` by `Richard Höchenberger`_) + +- Add a YouTube video tutorial for editing Freesurfer surfaces in Blender to :ref:`tut-fix-meshes` (:gh:`10778` by `Alex Rockhill`_) + +- The default interaction style of :meth:`mne.Evoked.plot_field` has been changed such that if behaves like :func:`~mne.viz.plot_alignment` when trying to rotate the head (:gh:`10788` by `Richard Höchenberger`_) + +- Add a video on how to operate the intracranial electrode contact location GUI in :ref:`tut-ieeg-localize` (:gh:`10800` by `Alex Rockhill`_) + +- Add a video on how to operate the coregistration GUI in :ref:`tut-source-alignment` (:gh:`10802` by `Alex Rockhill`_) + +- Add ``show`` and ``block`` arguments to :func:`mne.gui.coregistration` and ``mne.gui.locate_ieeg`` to pop up the GUIs and halt execution of subsequent code respectively (:gh:`10802` by `Alex Rockhill`_) + +- Correctly report the number of available projections when printing measurement info in a Jupyter notebook (:gh:`10471` by `Clemens Brunner`_) + +- Fix value set in ``raw.orig_format`` for readers of BrainVision (ASCII format), EGI and Artemis123 files (:gh:`10851` by `Mathieu Scheltienne`_) + +- When reading EGI MFF files, channel names are no longer ignored and reference channel information is properly incorporated (:gh:`10898` by `Scott Huberty`_ and `Daniel McCloy`_) + +- Fix bug in :func:`mne.time_frequency.psd_array_welch` and related functions/methods where the bias was not accounted for when ``average='median'`` (:gh:`10990` by `Eric Larson`_) + +- Fix bug in :class:`mne.decoding.TemporalFilter` where filter parameters were not handled properly (:gh:`10968` by `Eric Larson`_) + +- Fix documentation bug in ``ica.plot_sources`` to specify that ``picks`` keyword argument is for picking ICA components to plot (:gh:`10936` by `Adam Li`_) + +- Annotations contained in EDF files are correctly read as UTF-8 according to the EDF specification (:gh:`10963` by `Clemens Brunner`_) + +- Fix bug where complex-valued topoplots caused an error, convert to amplitude instead (:gh:`10978` by `Alex Rockhill`_) + +API and behavior changes +~~~~~~~~~~~~~~~~~~~~~~~~ +- When creating BEM surfaces via :func:`mne.bem.make_watershed_bem` and :func:`mne.bem.make_flash_bem`, the ``copy`` parameter now defaults to ``True``. This means that instead of creating symbolic links inside the FreeSurfer subject's ``bem`` folder, we now create "actual" files. This should avoid troubles when sharing files across different operating systems and file systems (:gh:`10531` by `Richard Höchenberger`_) + +- The ordering of channels returned by :func:`mne.io.read_raw_nirx` is now ordered by channel name, rather than the order provided by the manufacturer. This enables consistent ordering of channels across different file types (:gh:`10555` by `Robert Luke`_) + +- For :func:`mne.viz.plot_topomap`, :func:`mne.viz.plot_evoked_topomap`, :func:`mne.viz.plot_arrowmap`, :func:`mne.viz.plot_ica_components`, :meth:`mne.Covariance.plot_topomap`, :meth:`mne.Evoked.plot_topomap`, :meth:`mne.Evoked.animate_topomap`, :meth:`mne.decoding.CSP.plot_patterns`, :meth:`mne.Projection.plot_topomap` and :meth:`mne.preprocessing.ICA.plot_components` the topomap image interpolation was previously a cubic interpolation but now can be ``'linear'`` and ``'nearest'`` as well. Unless ``image_interp='nearest'`` is passed mne uses a subsequent matplotlib bilinear interpolation to make the interpolated image smoother. Previously, ``'image_interp'`` controlled this second interpolation step and, for the first interpolation, the only option was cubic. To simplify, ``image_interp`` is now responsible for the main interpolation and the subsequent matplotlib image interpolation is bilinear but can be changed afterward using ``im.set_interpolation`` (:gh:`10617` by `Alex Rockhill`_) + +- The FLASH BEM surface generation via :func:`mne.bem.make_flash_bem` or the :ref:`mne flash_bem` command has now been clarified in terms of expected inputs. In :func:`mne.bem.convert_flash_mris` the ``convert`` parameter is now deprecated and the ``flash5`` and ``flash30`` now accept list of file names or nibabel images. One breaking change is that now the flash files are looked for as mef05_*.mgz and mef30_*.mgz in mri/flash folder (:gh:`10715` by `Alex Gramfort`_) + +- :func:`~mne.viz.plot_compare_evokeds` gets a new parameter ``time_unit`` which can be ``"s"`` or ``"ms"`` (:gh:`10950` by `Daniel McCloy`_) + +- The ``to_data_frame`` methods for :meth:`Raw `, :meth:`Epochs `, :meth:`Evoked `, :meth:`SourceEstimate `, :meth:`EpochsTFR `, and :meth:`AverageTFR ` all now have default ``time_format=None`` (keep time in seconds) instead of ``time_format='ms'`` (convert time to milliseconds) (:gh:`10925` by `Daniel McCloy`_) + +- :func:`mne.Evoked.decimate` now aligns to the sample at time=0 (so that the exact sample when the event occurred is preserved) to be consistent with :func:`mne.Epochs.decimate` (:gh:`10945` by `Alex Rockhill`_) + +Authors +~~~~~~~ + +* Adam Li +* Adeline Fecker+ +* Alessandro Tonin+ +* Alex Ciok+ +* Alex Rockhill +* Alexandre Gramfort +* Archit Singhal+ +* Ashley Drew+ +* Ben Beasley+ +* Carina Forster+ +* Christian O'Reilly +* Clemens Brunner +* Cora Kim +* Daniel McCloy +* Dominik Welke +* Dominique Makowski +* Eric Larson +* Evan Hathaway +* Frederik D. Weber+ +* Guillaume Favelier +* Hamid Maymandi+ +* Ilias Machairas+ +* Jean-Rémi King +* Johann Benerradi +* Jon Houck +* Jona Sassenhagen +* Jordan Drew+ +* Lukas Gemein+ +* Marijn van Vliet +* Martin Schulz +* Mathieu Scheltienne +* Matt Courtemanche+ +* Matthias Dold+ +* Matti Toivonen+ +* Michiru Kaneda+ +* Mikołaj Magnuski +* Mingjian He+ +* Naveen Srinivasan+ +* Phillip Alday +* Reza Nasri+ +* Reza Shoorangiz +* Richard Höchenberger +* Rob Luke +* Scott Huberty+ +* Simon Kern +* Sondre Foslien+ +* Stefan Appelhoff +* T. Wang+ +* Tziona NessAiver+ diff --git a/doc/changes/v1.10.rst b/doc/changes/v1.10.rst new file mode 100644 index 00000000000..7c6baae19a5 --- /dev/null +++ b/doc/changes/v1.10.rst @@ -0,0 +1,130 @@ +.. _changes_1_10_0: + +Version 1.10.0 (2025-07-14) +=========================== + +Bugfixes +-------- + +- Fixed behavior of :func:`mne.viz.plot_source_estimates` where the ``title`` was not displayed properly, by :newcontrib:`Shristi Baral`. (`#12828 `__) +- Return events when requested even when current matches the desired sfreq in :meth:`mne.io.Raw.resample` by :newcontrib:`Roy Eric Wieske`. (`#13070 `__) +- Fix bug in :func:`mne.io.read_raw_gdf`, by :newcontrib:`Rongfei Jin`. (`#13113 `__) +- Fixed CTF cHPI coil location detection in :func:`mne.chpi.extract_chpi_locs_ctf` by :newcontrib:`Shresth Keshari`. (`#13123 `__) +- Fix bug with sampling frequency estimation in snirf files, by `Daniel McCloy`_ and :newcontrib:`Yixiao Shen`. (`#13184 `__) +- Fix bug where :func:`mne.gui.coregistration` would emit an error for some CTF datasets, by :newcontrib:`Wei Xu`. (`#13218 `__) +- Improved the configuration json to handle with parallel access with file lock configuration by :newcontrib:`Bruno Aristimunha`. (`#13241 `__) +- Update :func:`mne.viz.plot_alignment` and related functions to support PyVista deprecation of positional arguments by :newcontrib:`user27182`. (`#13277 `__) +- Fixed bug where :func:`mne.preprocessing.maxwell_filter_prepare_emptyroom` would not reliably identify meg channel types for matching bads across emptyroom and task, by :newcontrib:`Harrison Ritz`. (`#13280 `__) +- Fixed bug where saving FIFF files failed when ``info["subject_info"]["birthday"]`` was a :class:`pandas.Timestamp` instead of :class:`datetime.date`, by :newcontrib:`Laurent Le Mentec`. (`#13284 `__) +- Fixed almost-white (barely visible) channel traces when using ``spatial_colors=True`` in :meth:`mne.io.Raw.plot_psd`, :meth:`mne.Epochs.plot_psd`, and related PSD-plot helpers, by :newcontrib:`Mojackhak`. (`#13286 `__) +- Fix bug where :func:`mne.export.export_raw` does not correct for recording start time (:attr:`raw.first_time `) when exporting Raw instances to EDF or EEGLAB formats, by `Qian Chu`_. (`#12656 `__) +- Fix epoch indexing in :class:`mne.time_frequency.EpochsTFRArray` when initialising the class with the default ``drop_log`` parameter, by `Thomas Binns`_. (`#13028 `__) +- Fix loading and saving of :class:`~mne.time_frequency.EpochsSpectrum` objects that contain slash-separators in their condition names, by `Daniel McCloy`_. (`#13042 `__) +- Fix input boxes for the max value not showing when plotting fieldlines with :func:`~mne.viz.plot_evoked_field` when ``show_density=False``, by `Marijn van Vliet`_. (`#13048 `__) +- Fix bug with saving of anonymized data when helium info is present in measurement info, by `Eric Larson`_. (`#13056 `__) +- Fix computation of time intervals in :func:`mne.preprocessing.compute_fine_calibration` by `Eric Larson`_. (`#13062 `__) +- Fix bug in the colorbars created by :func:`mne.viz.plot_evoked_topomap` by `Santeri Ruuskanen`_. (`#13063 `__) +- Improved sklearn class compatibility and compliance, which resulted in some parameters of classes having an underscore appended to their name during ``fit``, such as: + + - :class:`mne.decoding.FilterEstimator` parameter ``picks`` passed to the initializer is set as ``est.picks_`` + - :class:`mne.decoding.UnsupervisedSpatialFilter` parameter ``estimator`` passed to the initializer is set as ``est.estimator_`` + + Unused ``verbose`` class parameters (that had no effect) were removed from :class:`~mne.decoding.PSDEstimator`, :class:`~mne.decoding.TemporalFilter`, and :class:`~mne.decoding.FilterEstimator` as well. + Changes by `Eric Larson`_. (`#13065 `__) +- Fix bug where taper weights were not correctly applied when computing multitaper power with :meth:`mne.Epochs.compute_tfr` and :func:`mne.time_frequency.tfr_array_multitaper`, by `Thomas Binns`_. (`#13067 `__) +- Fix bug cause by unnecessary assertion when loading mixed frequency EDFs without preloading :func:`mne.io.read_raw_edf` by `Simon Kern`_. (`#13069 `__) +- Fix bug with automated Mesa 3D detection for proper 3D option setting on systems with software rendering, by `Eric Larson`_. (`#13082 `__) +- Fix bug with reading digitization points from digitization strings with newer MEGIN systems, by `Eric Larson`_. (`#13083 `__) +- Fix bug when loading certain EEGLAB files that do not contain a ``nodatchans`` field, by `Clemens Brunner`_. (`#13097 `__) +- Do not convert the first "New Segment" marker in a BrainVision file to an annotation, as it only contains the recording date (which is already available in ``info["meas_date"]``), by `Clemens Brunner`_. (`#13100 `__) +- Take units (m or mm) into account when drawing :func:`~mne.viz.plot_evoked_field` on top of :class:`~mne.viz.Brain`, by `Marijn van Vliet`_. (`#13101 `__) +- Improved the handling of problematic MFF files in `mne.io.read_raw_egi` by `Scott Huberty`_. (`#13145 `__) +- Fix bug in :func:`mne.bem.make_watershed_bem` where function would fail if ``talairach_with_skull.lta`` did not exist due to changes in Freesurfer by `Peter Molfese`_. (`#13172 `__) +- Fix bug where :func:`mne.export.export_raw` might allocate huge intermediate arrays unnecessarily, when padding data blocks during export to EDF format, by `Daniel McCloy`_. (`#13174 `__) +- Fix bug with least-squares fitting of head origin using digitization points in :func:`mne.preprocessing.maxwell_filter`, by `Eric Larson`_. (`#13178 `__) +- Fix bug with :func:`mne.viz.plot_alignment` where ``eeg="projected"`` was not plotted, by `Eric Larson`_. (`#13179 `__) +- Fixed bug with filter padding type in :func:`mne.io.Raw.plot` and related functions to reduce edge ringing during data display, by `Eric Larson`_. (`#13183 `__) +- Handle channels with potential electrode location in :func:`~mne.preprocessing.maxwell_filter_prepare_emptyroom`, by `Mathieu Scheltienne`_. (`#13208 `__) +- Fixed bug where the file_id is not set when the file contains a directory at the end of the file, by `Théodore Papadopoulo`_. (`#13268 `__) +- Fix bug with :func:`mne.simulation.simulate_raw` and :class:`mne.simulation.SourceSimulator` where using different head positions with ``head_pos`` and a BEM would raise an error, by `Eric Larson`_. (`#13276 `__) +- Fix y-axis label in PSD plot when ``dB=True`` to show a more conventional label (i.e., dB/Hz or dB/√Hz), by `Clemens Brunner`_. (`#13298 `__) +- Fix bug in :func:`mne.sys_info` where calling it in Windows could lead to a an error while trying to get the amount of available memory, by `Eric Larson`_. (`#13310 `__) +- Change default pupil unit scalings from micrometrs to millimeters by `Scott Huberty`_ (`#13314 `__) +- Fix :meth:`mne.channels.DigMontage.rename_channels` so that the changed montage is returned instead of ``None``, by `Stefan Appelhoff`_. (`#13320 `__) + + +API changes by deprecation +-------------------------- + +- The backward-compatible defaults in :func:`mne.preprocessing.maxwell_filter` of ``st_overlap=False`` and ```mc_interp=None`` will change to their smooth variants ``True`` and ``"hann"``, respectively, in 1.11, by `Eric Larson`_. (`#13080 `__) + + +New features +------------ + +- Add PCA-OBS preprocessing for the removal of heart-artefacts from EEG or ESG datasets via :func:`mne.preprocessing.apply_pca_obs`, by :newcontrib:`Emma Bailey` and :newcontrib:`Steinn Hauser Magnusson`. (`#13037 `__) +- Add :meth:`mne.Evoked.interpolate_to` to allow interpolating EEG data to other montages, by :newcontrib:`Antoine Collas`. (`#13044 `__) +- Allow for ``topomap`` plotting of optically pumped MEG (OPM) sensors with overlapping channel locations. When channel locations overlap, plot the most radially oriented channel. By :newcontrib:`Harrison Ritz`. (`#13144 `__) +- Improved error message in :meth:`mne.io.Raw.save` when attempting to overwrite a file, by :newcontrib:`Sebastian Jentschke` (`#13233 `__) +- Add new ``select`` parameter to :func:`mne.viz.plot_evoked_topo` and :meth:`mne.Evoked.plot_topo` to toggle lasso selection of sensors, by `Marijn van Vliet`_. (`#12071 `__) +- Add source space(s) visualization(s) in :func:`mne.Report.add_forward`, by `Victor Ferat`_. (`#12848 `__) +- Added the option to return taper weights from :func:`mne.time_frequency.tfr_array_multitaper`, and taper weights are now stored in the :class:`mne.time_frequency.BaseTFR` objects, by `Thomas Binns`_. (`#12910 `__) +- Add ``fig.mne`` container for :class:`Colorbar ` in :func:`plot_connectivity_circle ` to allow users to access it directly, by `Santeri Ruuskanen`_. (`#13019 `__) +- Added :func:`mne.time_frequency.combine_tfr` to allow combining TFRs across tapers, by `Thomas Binns`_. (`#13054 `__) +- Add the function :func:`mne.time_frequency.combine_spectrum` for combining data across :class:`mne.time_frequency.Spectrum` objects, and allow :func:`mne.grand_average` to operate on :class:`mne.time_frequency.Spectrum` objects, by `Thomas Binns`_. (`#13058 `__) +- Add smooth processing of tSSS windows (using overlap-add) and movement compensation (using smooth interpolation of head positions) in :func:`mne.preprocessing.maxwell_filter` via ``st_overlap`` and ```mc_interp`` options, respectively, by `Eric Larson`_. (`#13080 `__) +- The :meth:`mne.Info.save` method now has an ``overwrite`` and a ``verbose`` parameter, by `Stefan Appelhoff`_. (`#13107 `__) +- Add ``upsampling`` option to :func:`mne.make_field_map` to allow upsampling MEG helmet surfaces for plotting, by `Eric Larson`_. (`#13179 `__) +- New argument ``sfreq`` to :func:`mne.io.read_raw_snirf`, to allow overriding the sampling frequency estimated from (possibly jittered) sampling periods in the file, by `Daniel McCloy`_. (`#13184 `__) +- Add an ``extras`` attribute to :class:`mne.Annotations` for storing arbitrary metadata, by `Pierre Guetschel`_. (`#13228 `__) +- :meth:`mne.Report.add_trans` now accepts ``"auto"`` and ``"fsaverage"`` as arguments for the ``trans`` parameter, by `Stefan Appelhoff`_. (`#13232 `__) +- Add a new parameter ``plot_kwargs`` to :meth:`mne.Report.add_trans`, to control low level properties of the visualization, by `Stefan Appelhoff`_. (`#13234 `__) +- :func:`~mne.channel_indices_by_type` now has an ``exclude`` parameter, by `Stefan Appelhoff`_. (`#13293 `__) + + +Other changes +------------- + +- Add test analytics functionality to track and analyze test performance, by :newcontrib:`Katia Al-Amir`. (`#13225 `__) + + +Authors +------- + +* Alexandre Gramfort +* Antoine Collas+ +* Britta Westner +* Bruno Aristimunha+ +* Clemens Brunner +* Daniel McCloy +* Dominik Welke +* Emma Bailey+ +* Eric Larson +* Harrison Ritz+ +* Jona Sassenhagen +* Katia Al-Amir+ +* Laurent Lementec+ +* Marijn van Vliet +* Mathieu Scheltienne +* Mojackhak+ +* Peter J. Molfese +* Pierre Guetschel +* Qian Chu +* Richard Höchenberger +* Rongfei Jin+ +* Roy Eric Wieske +* Santeri Ruuskanen +* Scott Huberty +* Sebastian Jentschke+ +* Shresth Keshari+ +* Shristi Baral+ +* Simon Kern +* Stefan Appelhoff +* Steinn Hauser Magnússon+ +* Teon L Brooks +* Theodore Papadopoulo +* Thomas S. Binns +* Victor Férat +* Wei Xu+ +* Yixiao Shen+ +* user27182+ diff --git a/doc/changes/v1.2.rst b/doc/changes/v1.2.rst new file mode 100644 index 00000000000..e292b472b03 --- /dev/null +++ b/doc/changes/v1.2.rst @@ -0,0 +1,116 @@ +.. _changes_1_2_0: + +Version 1.2.0 (2022-10-11) +-------------------------- + +Enhancements +~~~~~~~~~~~~ +- Add ``vmin`` and ``vmax`` parameters to :meth:`mne.Evoked.animate_topomap` (:gh:`11073` by :newcontrib:`Mats van Es`) +- Enable common average reference projection for ECoG, sEEG, and DBS data (optionally jointly with ``joint=True``) in :meth:`~mne.io.Raw.set_eeg_reference` and related functions/methods (:gh:`10656` by :newcontrib:`Moritz Gerster` and `Eric Larson`_) +- The ``trans`` parameter in :func:`mne.make_field_map` now accepts a :class:`~pathlib.Path` object, and uses standardised loading logic (:gh:`10784` by :newcontrib:`Andrew Quinn`) +- Add support for computing forward solutions using OpenMEEG_ (:gh:`11011` by `Alex Gramfort`_ and `Eric Larson`_) +- EEGLAB files (saved as MAT versions less than v7.3) can now be imported with :func:`mne.io.read_raw_eeglab` without the optional dependency ``pymatreader`` (:gh:`11006` by `Clemens Brunner`_) +- Add eight source estimate comparison metrics to new submodule :mod:`mne.simulation.metrics` (:gh:`6233` by `Kostiantyn Maksymenko`_ and `Alex Gramfort`_) +- Add :func:`mne.time_frequency.csd_tfr` to compute cross-spectral density from :class:`mne.time_frequency.EpochsTFR` (:gh:`10986` by `Alex Rockhill`_) +- Improve ``repr()`` for :class:`mne.minimum_norm.InverseOperator` when loose orientation is used (:gh:`11048` by `Eric Larson`_) +- Add support for vector and volume source estimates in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8639`, :gh:`11083` by `Olaf Hauk`_, `Marijn van Vliet`_, `Alex Gramfort`_, and `Eric Larson`_) +- :meth:`mne.Epochs.plot_psd_topomap` now suppresses redundant colorbars when ``vlim='joint'`` (:gh:`11051` by `Daniel McCloy`_) +- Add ``starting_affine`` keyword argument to :func:`mne.transforms.compute_volume_registration` to initialize an alignment with an affine (:gh:`11020` by `Alex Rockhill`_) +- Add HTML representation for `~mne.Evoked` in Jupyter Notebooks (:gh:`11075` by `Valerii Chirkov`_ and `Andrew Quinn`_) +- Add support for ``temperature`` and ``gsr`` (galvanic skin response, i.e., electrodermal activity) channel types (:gh:`11090`, :gh:`11108` by `Eric Larson`_ and `Richard Höchenberger`_) +- Allow :func:`mne.beamformer.make_dics` to take ``pick_ori='vector'`` to compute vector source estimates (:gh:`10980` by `Alex Rockhill`_) +- Add ``units`` parameter to :func:`mne.io.read_raw_edf` in case units are missing from the file (:gh:`11099` by `Alex Gramfort`_) +- Add ``on_missing`` functionality to all of our classes that have a ``drop_channels`` method, to control what happens when channel names are not in the object (:gh:`11077` by `Andrew Quinn`_) +- Improve interpolation of bridged electrodes with `~mne.preprocessing.interpolate_bridged_electrodes` (:gh:`11094` by `Mathieu Scheltienne`_) +- Add :func:`mne.minimum_norm.apply_inverse_tfr_epochs` to apply inverse methods to time-frequency resolved epochs (:gh:`11095` by `Alex Rockhill`_) +- Add support for multiple probes via multiple CSV files passed to :func:`mne.io.read_raw_hitachi` (:gh:`11186` by `Eric Larson`_) +- Add :func:`mne.chpi.get_active_chpi` to retrieve the number of active hpi coils for each time point (:gh:`11122` by `Eduard Ort`_) +- Add example of how to obtain time-frequency decomposition using narrow bandpass Hilbert transforms to :ref:`ex-tfr-comparison` (:gh:`11116` by `Alex Rockhill`_) +- Add ``==`` and ``!=`` comparison between `mne.Projection` objects (:gh:`11147` by `Mathieu Scheltienne`_) +- Parse automatically temperature channel with :func:`mne.io.read_raw_edf` (:gh:`11150` by `Eric Larson`_ and `Alex Gramfort`_) +- Add ``unscale`` option to :func:`mne.head_to_mri` to facilitate working with surrogate MRI data (:gh:`11185` by `Eric Larson`_) +- Add ``encoding`` parameter to :func:`mne.io.read_raw_edf` and :func:`mne.io.read_raw_bdf` to support custom (non-UTF8) annotation channel encodings (:gh:`11154` by `Clemens Brunner`_) +- :class:`mne.preprocessing.ICA` gained a new method, :meth:`~mne.preprocessing.ICA.get_explained_variance_ratio`, that allows the retrieval of the proportion of variance explained by ICA components (:gh:`11141` by `Richard Höchenberger`_) +- Add ``on_baseline`` to :meth:`mne.preprocessing.ICA.apply`, :meth:`mne.preprocessing.ICA.plot_overlay`, and :func:`mne.viz.plot_ica_overlay` to allow reapplying baseline correction after applying ICA (:gh:`11232` by `Eric Larson`_) +- Add config option ``MNE_REPR_HTML`` to disable HTML repr in notebook environments (:gh:`11159` by `Clemens Brunner`_) + +Bugs +~~~~ +- Fix bug in :meth:`mne.io.Raw.crop` where argument ``include_tmax`` was not considered in checking ``tmax`` in bounds (:gh:`11196` by `Lukas Gemein`_) +- Fix bug in :func:`mne.io.read_raw_eeglab` where unlabeled fiducials caused reading errors (:gh:`11074` by :newcontrib:`Sebastiaan Mathot`) +- Fix bug in :func:`mne.time_frequency.read_csd` that returned ``projs`` as a list of dict instead of :class:`mne.Projection` (:gh:`11072` by :newcontrib:`Chetan Gohil`) +- Fix bug in :func:`mne.decoding.TimeFrequency` that prevented cloning if constructor arguments were modified (:gh:`11004` by :newcontrib:`Daniel Carlström Schad`) +- Fix bug in ``repr`` of decoding estimator classes (e.g., :class:`mne.decoding.SlidingEstimator`) where the parameters where not stored in the result string but instead just printed to ``sys.stdout`` (:gh:`11190` by :newcontrib:`Dominik Wetzel`) +- Document ``height`` and ``weight`` keys of ``subject_info`` entry in :class:`mne.Info` (:gh:`11019` by :newcontrib:`Sena Er`) +- Fix bug in :class:`mne.viz.Brain` constructor where the first argument was named ``subject_id`` instead of ``subject`` (:gh:`11049` by `Eric Larson`_) +- Fix bug in :ref:`mne coreg` where the MEG helmet position was not updated during ICP fitting (:gh:`11084` by `Eric Larson`_) +- Fix bug in :func:`~mne.io.read_raw_curry`, and :func:`~mne.io.read_raw_cnt` where digitization points were not read properly (:gh:`11145` by `Eric Larson`_) +- Fix bug in :func:`mne.minimum_norm.apply_inverse_epochs` where the average EEG projector was not checked properly (:gh:`11182` by `Eric Larson`_) +- Fix bug in :func:`mne.viz.plot_filter` when plotting filters created using ``output='ba'`` mode with ``compensation`` turned on. (:gh:`11040` by `Marian Dovgialo`_) +- Fix bugs in documentation of surface :class:`~mne.SourceSpaces` (:gh:`11171` by `Eric Larson`_) +- Fix bug in :func:`mne.viz.plot_compare_evokeds` where automatic legend labels could be excessively long; they are now abbreviated with ``...`` when necessary (:gh:`11181` by `Eric Larson`_) +- Fix bugs with ``verbose='error'`` not being used properly and allowing warnings through (:gh:`11193` by `Eric Larson`_) +- Fix bug in :func:`mne.io.read_raw_bti` where EEG, EMG, and H/VEOG channels were not detected properly, and many non-ECG channels were called ECG. The logic has been improved, and any channels of unknown type are now labeled as ``misc`` (:gh:`11102` by `Eric Larson`_) +- Fix bug in :func:`mne.viz.plot_topomap` when providing ``sphere="eeglab"`` (:gh:`11081` by `Mathieu Scheltienne`_) +- Fix bug in :meth:`mne.Dipole.to_mri` where MRI RAS rather than MRI surface RAS was returned (:gh:`11185` by `Eric Larson`_) +- Fix bug in :meth:`epochs.save ` where the ``verbose`` parameter defaulted to ``True`` instead of ``None`` (:gh:`11191` by `Eric Larson`_) +- The string and HTML representation of :class:`mne.preprocessing.ICA` reported incorrect values for the explained variance. This information has been removed from the representations, and should instead be retrieved via the new :meth:`~mne.preprocessing.ICA.get_explained_variance_ratio` method (:gh:`11141` by `Richard Höchenberger`_) +- Fix bug in :meth:`mne.Evoked.plot` and related methods where a ``np.nan`` location value in any channel causes spatial colours to fail (:gh:`6870` by `Simeon Wong`_) +- Fix bug in :meth:`mne.preprocessing.ICA.find_bads_muscle` where epochs caused an error when passed as the ``inst`` (:gh:`11197` by `Alex Rockhill`_) +- Fix bug in readers where EEG coordinates were assumed to be in head coordinates but no fiducial points were present. Estimated fiducial locations will now be added automatically to reflect the assumption of locations being in the head coordinate frame (:gh:`11212` by `Stefan Appelhoff`_ and `Eric Larson`_) +- The duration of raw data sometimes wasn't displayed correctly in Jupyter notebooks by omitting fractions of a second. We now always round up to the next full second so a duration of less than 1 second will not be displayed as a duration of zero anymore (:gh:`11203` by `Richard Höchenberger`_) +- Fix bug in :meth:`mne.Report.add_ica` and where baselines were not reapplied to the data when ``inst`` is Epochs or Evoked (:gh:`11232` by `Eric Larson`_) + +API changes +~~~~~~~~~~~ +- In :meth:`mne.Evoked.plot`, the default value of the ``spatial_colors`` parameter has been changed to ``'auto'``, which will use spatial colors if channel locations are available (:gh:`11201` by :newcontrib:`Hüseyin Orkun Elmas` and `Daniel McCloy`_) +- Starting with this release we now follow the Python convention of using ``FutureWarning`` instead of ``DeprecationWarning`` to signal user-facing changes to our API (:gh:`11120` by `Daniel McCloy`_) +- The ``names`` parameter of :func:`mne.viz.plot_arrowmap` and :func:`mne.viz.plot_regression_weights` has been deprecated; sensor names will be automatically drawn from the ``info_from`` or ``model`` parameter (respectively), and can be hidden, shown, or altered via the ``show_names`` parameter (:gh:`11123` by `Daniel McCloy`_) +- The ``bands`` parameter of :meth:`mne.Epochs.plot_psd_topomap` now accepts :class:`dict` input; legacy :class:`tuple` input is supported, but discouraged for new code (:gh:`11050` by `Daniel McCloy`_) +- The :func:`mne.head_to_mri` new function parameter ``kind`` default will change from ``'ras'`` to ``'mri'`` (:gh:`11185` by `Eric Larson`_) +- The ``show_toolbar`` argument to :class:`mne.viz.Brain` is being removed by deprecation (:gh:`11049` by `Eric Larson`_) +- New classes :class:`~mne.time_frequency.Spectrum` and :class:`~mne.time_frequency.EpochsSpectrum`, created via new methods :meth:`Raw.compute_psd()`, :meth:`Epochs.compute_psd()`, and :meth:`Evoked.compute_psd()` (:gh:`10184` by `Daniel McCloy`_) +- Functions/methods for plotting topomaps now have more consistent signatures; most have gained parameters for finer control of the topomap image and/or colorbar. See docstrings for details (:gh:`11123` by `Daniel McCloy`_) +- In :func:`mne.viz.plot_topomap`, passing ``axes=None`` will now create a new :class:`~matplotlib.figure.Figure` instead of plotting into the current axes (:gh:`11123` by `Daniel McCloy`_) +- In :func:`mne.viz.plot_topomap`, the parameter ``show_names`` has been deprecated, use the ``names`` parameter instead (:gh:`11123` by `Daniel McCloy`_) +- In :func:`mne.viz.plot_topomap`, :func:`~mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_evoked_topomap`, and related class methods :meth:`mne.Covariance.plot_topomap`, :meth:`mne.Evoked.plot_topomap`, and :meth:`mne.time_frequency.AverageTFR.plot_topomap`: the parameters ``vmin`` and ``vmax`` are deprecated in favor of ``vlim``, for consistency with other ``plot_topomap`` functions and methods (:gh:`11123` by `Daniel McCloy`_) +- In :func:`mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_evoked_topomap`, and related class methods :meth:`mne.Covariance.plot_topomap`, :meth:`mne.Evoked.plot_topomap`, and :meth:`mne.time_frequency.AverageTFR.plot_topomap`: the ``title`` parameter has been deprecated; use matplotlib's :meth:`fig.suptitle()` instead (:gh:`11123` by `Daniel McCloy`_) +- In functions/methods for plotting topomaps, the ``outlines='skirt'`` option is deprecated; use ``sphere='eeglab'`` for similar functionality (:gh:`11123` by `Daniel McCloy`_) +- The ``mne.epochs.add_channels_epochs`` function has been deprecated in favor of :meth:`epochs.add_channels ` (:gh:`11180` by `Eric Larson`_) +- The PSD functions that operate on Raw/Epochs/Evoked instances (``mne.time_frequency.psd_welch`` and ``mne.time_frequency.psd_multitaper``) are deprecated; for equivalent functionality create :class:`~mne.time_frequency.Spectrum` or :class:`~mne.time_frequency.EpochsSpectrum` objects instead and then run ``spectrum.get_data(return_freqs=True)`` (:gh:`10184` by `Daniel McCloy`_) +- Added new class :class:`mne.preprocessing.EOGRegression` to allow more flexibility when using regression to reduce EOG artifacts (:gh:`11046` by `Marijn van Vliet`_) +- New parameter ``exclude`` added to :func:`mne.preprocessing.regress_artifact` to prevent regression from being applied to certain channels (:gh:`11046` by `Marijn van Vliet`_) + +Authors +~~~~~~~ + +* Alex Rockhill +* Alexandre Gramfort +* Andrew Quinn+ +* Chetan Gohil+ +* Christian Brodbeck +* Clemens Brunner +* Daniel Carlström Schad+ +* Daniel McCloy +* Dominik Wetzel+ +* Eduard Ort +* Eric Larson +* Hüseyin Orkun Elmas+ +* Jack Zhang +* Joan Massich +* Julius Welzel+ +* Kostiantyn Maksymenko +* Lukas Gemein +* Marian Dovgialo +* Marijn van Vliet +* Mathieu Scheltienne +* Mats van Es+ +* Moritz Gerster+ +* Olaf Hauk +* Richard Höchenberger +* Sebastiaan Mathot+ +* Sena Er+ +* Simeon Wong +* Stefan Appelhoff +* Valerii Chirkov +* luzpaz+ diff --git a/doc/changes/v1.3.rst b/doc/changes/v1.3.rst new file mode 100644 index 00000000000..d5923e12aa9 --- /dev/null +++ b/doc/changes/v1.3.rst @@ -0,0 +1,89 @@ +.. _changes_1_3_0: + +Version 1.3.0 (2022-12-21) +-------------------------- + +Enhancements +~~~~~~~~~~~~ +- Improve default line width used in :func:`mne.viz.plot_filter` (:gh:`11333` by :newcontrib:`Toomas Erik Anijärv`) +- Add the argument ``max_iter`` to :func:`~mne.time_frequency.psd_array_multitaper` and to :func:`~mne.time_frequency.csd_array_multitaper` to increase the maximum number of iterations to reach convergence when using ``adaptive=True`` (:gh:`11237` by `Mathieu Scheltienne`_) +- Add a warning to the docstring of :func:`mne.channels.find_ch_adjacency` to encourage users to validate their outputs (:gh:`11236` by `Felix Klotzsche`_ and `Eric Larson`_) +- Mixed, cortical + discrete source spaces with fixed orientations are now allowed. (:gh:`11241` by `Jevri Hanna`_) +- Add size information to the ``repr`` of :class:`mne.Report` (:gh:`11357` by `Eric Larson`_) +- Add support for ``image_format='webp'`` to :class:`mne.Report` when using Matplotlib 3.6+, which can reduce file sizes by up to 50% compared to ``'png'``. The new default ``image_format='auto'`` will automatically use this format if it's available on the system (:gh:`11359` by `Eric Larson`_) +- Add :func:`mne.beamformer.apply_dics_tfr_epochs` to apply a DICS beamformer to time-frequency resolved epochs (:gh:`11096` by `Alex Rockhill`_) +- Add :func:`mne.time_frequency.fwhm` to determine the full-width half maximum for :func:`mne.time_frequency.morlet` (:gh:`11353` by `Britta Westner`_, `Daniel McCloy`_, and `Eric Larson`_) +- Check whether head radius (estimated from channel positions) is correct when reading EEGLAB data with :func:`~mne.io.read_raw_eeglab` and :func:`~mne.read_epochs_eeglab`. If head radius is not within likely values, warn informing about possible units mismatch and the new ``montage_units`` argument (:gh:`11283` by `Mikołaj Magnuski`_). +- Add support for a callable passed in ``combine`` for `mne.time_frequency.AverageTFR.plot` and `mne.time_frequency.AverageTFR.plot_joint` (:gh:`11329` by `Mathieu Scheltienne`_) + +Bugs +~~~~ +- Fix bug in :func:`mne.export.export_raw` when exporting raw to EDF with EDFLib when subject ``sex`` is missing (:gh:`11375` by :newcontrib:`Mark Alexander Henney`) +- Fix bugs in documentation of :func:`mne.time_frequency.tfr_array_multitaper` and related functions/methods (:gh:`11310` by :newcontrib:`Santeri Ruuskanen`) +- Fix bug to be able to customize legends when using :meth:`mne.viz.Brain.add_volume_labels` (:gh:`11289` by :newcontrib:`Mauricio Cespedes Tenorio`) +- Fix bug in documentation of :func:`mne.channels.make_dig_montage` (:gh:`11235` by :newcontrib:`Daniel Hasegan`) +- Add support for bad channel handling in :func:`mne.set_bipolar_reference` (:gh:`11245` by `Moritz Gerster`_, :newcontrib:`Dinara Issagaliyeva`, :newcontrib:`Jennifer Behnke`, :newcontrib:`Hakimeh Aslsardroud`, and :newcontrib:`Pavel Navratil`) +- Fix X coordinates of ear_left and ear_right in head outlines dict (:gh:`11255` by :newcontrib:`Tom Ma`) +- Add type checking for ``tmin`` and ``tmax`` arguments for ``crop`` (:gh:`11263` by :newcontrib:`Carlos de la Torre-Ortiz`). +- Fix bug where trying to setup projection without an EEG average reference projector would fail (:gh:`11351` by :newcontrib:`Enzo Altamiranda`) +- Fix bug where ``ica.reject_`` was not saved to disk, and the ``ica.reject_`` property was not inherited from ``Epochs`` when doing ``ICA.fit(epochs)`` (:gh:`11244` by `Eric Larson`_) +- Fix bug in automatic MESA detection for disabling advanced 3D options (:gh:`11271` by `Eric Larson`_) +- Fix bug in :func:`mne.head_to_mri` when ``unscale=True`` (:gh:`11303` by `Eric Larson`_) +- Fix bug in :meth:`set_montage('mgh60') ` and related functions where the ``'mgh60'`` sensor positions were incorrectly ordered (:gh:`11290` by `Eric Larson`_) +- Fix bug in the ``.compute_psd()`` methods where the number of unaggregated Welch segments was wrongly computed for some inputs, leading to an assertion error when computing the PSD (:gh:`11248` by `Daniel McCloy`_) +- Fix bug in the :func:`~mne.viz.plot_evoked_topo` and :meth:`~mne.Evoked.plot_topo`, where legend colors where shown incorrectly on newer matplotlib versions (:gh:`11258` by `Erkka Heinila`_) +- Fix bug where EEGLAB channel positions were read as meters, while they are commonly in millimeters, leading to head outlies of the size of one channel when plotting topomaps. Now ``montage_units`` argument has been added to :func:`~mne.io.read_raw_eeglab` and :func:`~mne.read_epochs_eeglab` to control in what units EEGLAB channel positions are read. The default is millimeters, ``'mm'`` (:gh:`11283` by `Mikołaj Magnuski`_) +- Fix bug where computing PSD with welch's method with more jobs than channels would fail (:gh:`11298` by `Mathieu Scheltienne`_) +- Fix bug where the default FFT length changed for spectrum plots (:gh:`11345` by `Daniel McCloy`_) +- Fix bug with :func:`mne.decoding.cross_val_multiscore` where progress bars were not displayed correctly (:gh:`11311` by `Eric Larson`_) +- Fix channel selection edge-cases in `~mne.preprocessing.ICA.find_bads_muscle` (:gh:`11300` by `Mathieu Scheltienne`_) +- Fix bug with :func:`mne.io.read_raw_curry` where a dot in the parent folders prevented files from being read (:gh:`11340` by `Eric Larson`_) +- Fix bug with :class:`mne.Report` with ``replace=True`` where the wrong content was replaced and ``section`` was not respected (:gh:`11318`, :gh:`11346` by `Eric Larson`_) +- Fix bug with unit conversion when setting reference MEG as the channel type in :meth:`mne.io.Raw.set_channel_types` and related methods (:gh:`11344` by `Eric Larson`_) +- Fix bug where reference MEG channels could not be plotted using :func:`mne.viz.plot_epochs_image` (:gh:`11344` by `Eric Larson`_) +- Fix bug where ``image_format='gif'`` was errantly documented as being supported by :class:`mne.Report`, it is now only supported in :meth:`mne.Report.add_image` (:gh:`11347` by `Eric Larson`_) +- Multitaper spectral estimation now uses periodic (rather than symmetric) taper windows. This also necessitated changing the default ``max_iter`` of our cross-spectral density functions from 150 to 250. (:gh:`11293` by `Daniel McCloy`_) +- Fix :meth:`mne.Epochs.plot_image` and :func:`mne.viz.plot_epochs_image` when using EMG signals (:gh:`11322` by `Alex Gramfort`_) +- Fix selection of ICA components in :func:`mne.viz.plot_ica_components` and :meth:`mne.preprocessing.ICA.plot_components` (:gh:`11369` by `Mathieu Scheltienne`_) +- Fix bug where a Freesurfer reconstruction run with both a T1 and a T2 caused :class:`mne.viz.Brain` not to initialize pial surfaces properly (:gh:`11361` by `Alex Rockhill`_) + +API changes +~~~~~~~~~~~ +- In :func:`mne.time_frequency.dpss_windows`, interpolating is deprecated (nowadays SciPy's computations are fast enough for large ``N`` without interpolation). This affects parameters ``interp_from`` and ``interp_kind``. Two new parameters of the underlying SciPy :func:`~scipy.signal.windows.dpss` function are also exposed: ``sym`` (for choosing symmetric vs. periodic windows) and ``norm`` (window normalization method). (:gh:`11293` by `Daniel McCloy`_) +- In :meth:`mne.decoding.CSP.plot_patterns`, :meth:`mne.decoding.CSP.plot_filters`, :meth:`mne.preprocessing.ICA.plot_components`, and :func:`mne.viz.plot_ica_components`, the parameters ``vmin`` and ``vmax`` are deprecated in favor of ``vlim``, for consistency with other topomap-plotting functions and methods (:gh:`11371` by `Daniel McCloy`_) +- In :meth:`mne.decoding.CSP.plot_patterns` and :meth:`mne.decoding.CSP.plot_filters` the ``title`` parameter is deprecated and will be removed in version 1.4 (:gh:`11371` by `Daniel McCloy`_) +- The APIs of :meth:`mne.preprocessing.ICA.plot_components` and :func:`mne.viz.plot_ica_components` gained new parameters ``show_names``, ``extrapolate``, ``border``, ``size``, ``cnorm``, ``cbar_fmt``, ``axes``, ``nrows``, ``ncols``, for consistency with other topomap-plotting functions and methods (:gh:`11371` by `Daniel McCloy`_) +- The APIs of :meth:`mne.decoding.CSP.plot_patterns` and :meth:`mne.decoding.CSP.plot_filters` gained new parameters ``extrapolate``, ``border``, ``cnorm``, ``axes``, ``nrows``, ``ncols``, for consistency with other topomap-plotting functions and methods (:gh:`11371` by `Daniel McCloy`_) + +Authors +~~~~~~~ + +* Alex Rockhill +* Alexandre Gramfort +* Britta Westner +* Carlos de la Torre+ +* Daniel Hasegan+ +* Daniel McCloy +* Dinara Issagaliyeva+ +* Enzo Altamiranda+ +* Eric Brayet+ +* Eric Larson +* Erkka Heinila +* Felix Klotzsche +* Hakimeh Aslsardroud+ +* Jennifer Behnke+ +* Jevri Hanna +* Lukas Hecker +* Mark Alexander Henney+ +* Mathieu Scheltienne +* Mauricio Cespedes+ +* Mikołaj Magnuski +* Moritz Gerster +* Omer Shubi+ +* Pavel Navratil+ +* Richard Höchenberger +* Santeri Ruuskanen+ +* Stefan Appelhoff +* Timon Merk +* Tom Ma+ +* Toomas Erik Anijärv+ diff --git a/doc/changes/v1.4.rst b/doc/changes/v1.4.rst new file mode 100644 index 00000000000..2fa9ec2a0d1 --- /dev/null +++ b/doc/changes/v1.4.rst @@ -0,0 +1,133 @@ +.. _changes_1_4_0: + +Version 1.4.0 (2023-05-10) +-------------------------- + +Enhancements +~~~~~~~~~~~~ +- Add functionality for reading CNT spans/annotations marked bad to :func:`mne.io.read_raw_cnt` (:gh:`11631` by :newcontrib:`Jacob Woessner`) +- Add ``:unit:`` Sphinx directive to enable use of uniform non-breaking spaces throughout the documentation (:gh:`11469` by :newcontrib:`Sawradip Saha`) +- Adjusted the algorithm used in :class:`mne.decoding.SSD` to support non-full rank data (:gh:`11458` by :newcontrib:`Thomas Binns`) +- Changed suggested type for ``ch_groups`` in `mne.viz.plot_sensors` from array to list of list(s) (arrays are still supported). (:gh:`11465` by `Hyonyoung Shin`_) +- Add support for UCL/FIL OPM data using :func:`mne.io.read_raw_fil` (:gh:`11366` by :newcontrib:`George O'Neill` and `Robert Seymour`_) +- Add harmonic field correction (HFC) for OPM sensors in :func:`mne.preprocessing.compute_proj_hfc` (:gh:`11536` by :newcontrib:`George O'Neill` and `Eric Larson`_) +- Forward argument ``axes`` from `mne.viz.plot_sensors` to `mne.channels.DigMontage.plot` (:gh:`11470` by :newcontrib:`Jan Ebert` and `Mathieu Scheltienne`_) +- Add forward IIR filtering, using parameters ``method='iir', phase='forward'`` (:gh:`11078` by :newcontrib:`Quentin Barthélemy`) +- Added ability to read stimulus durations from SNIRF files when using :func:`mne.io.read_raw_snirf` (:gh:`11397` by `Robert Luke`_) +- Add :meth:`mne.Info.save` to save an :class:`mne.Info` object to a fif file (:gh:`11401` by `Alex Rockhill`_) +- Improved error message when downloads are corrupted for :func:`mne.datasets.sample.data_path` and related functions (:gh:`11407` by `Eric Larson`_) +- Improved :func:`mne.get_config("") ` behavior to return a dict of key-value pairs (config key and its description) rather than just a tuple of config keys (:gh:`11166` by `Clemens Brunner`_, `Daniel McCloy`_, and `Eric Larson`_) +- Add support for ``skip_by_annotation`` in :func:`mne.io.Raw.notch_filter` (:gh:`11388` by `Mainak Jas`_) +- Add support for ``output='complex'`` to :func:`mne.time_frequency.psd_array_welch` and when using ``method='welch'`` with :meth:`mne.Epochs.compute_psd` (:gh:`11556` by `Eric Larson`_) +- Slightly adjusted the window title for :func:`mne.Epochs.plot` (:gh:`11419` by `Richard Höchenberger`_ and `Daniel McCloy`_) +- Add :func:`mne.count_events` to count unique event types in a given event array (:gh:`11430` by `Clemens Brunner`_) +- Add a video to :ref:`tut-freesurfer-mne` of a brain inflating from the pial surface to aid in understanding the inflated brain (:gh:`11440` by `Alex Rockhill`_) +- Add automatic projection of sEEG contact onto the inflated surface for :meth:`mne.viz.Brain.add_sensors` (:gh:`11436` by `Alex Rockhill`_) +- Allow an image with intracranial electrode contacts (e.g. computed tomography) to be used without the freesurfer recon-all surfaces to locate contacts so that it doesn't have to be downsampled to freesurfer dimensions (for microelectrodes) and show an example :ref:`ex-ieeg-micro` with :func:`mne.transforms.apply_volume_registration_points` added to aid this transform (:gh:`11567` by `Alex Rockhill`_) +- Use new :meth:`dipy.align.imwarp.DiffeomorphicMap.transform_points` to transform a montage of intracranial contacts more efficiently (:gh:`11572` by `Alex Rockhill`_) +- Improve performance of raw data browsing with many annotations (:gh:`11614` by `Eric Larson`_) +- Add support for :func:`mne.preprocessing.maxwell_filter` with gradient-compensated CTF data, e.g., for tSSS-only mode (:gh:`10554` by `Eric Larson`_) +- Add support for eyetracking data using :func:`mne.io.read_raw_eyelink` (:gh:`11152` by `Dominik Welke`_ and `Scott Huberty`_) +- :func:`mne.channels.make_1020_channel_selections` gained a new parameter, ``return_ch_names``, to allow for easy retrieval of EEG channel names corresponding to the left, right, and midline portions of the montage (:gh:`11632` by `Richard Höchenberger`_) +- Methods for setting the sensor types of channels (e.g., for raw data, :meth:`mne.io.Raw.set_channel_types`) gained a new parameter, ``on_unit_change``, to control behavior (raise an exception, emit a warning, or do nothing) in case the measurement unit is adjusted automatically (:gh:`11668` by `Richard Höchenberger`_) +- :func:`mne.beamformer.trap_music` implements the TRAP-MUSIC localisation algorithm with the same signature as :func:`mne.beamformer.rap_music` (:gh:`11679` by `Théodore Papadopoulo`_) + +Bugs +~~~~ +- Improving compatibility with Qt6 by removing the use of deprecated ``AA_UseHighDpiPixmaps`` attribute for this Qt version (:gh:`11662` by :newcontrib:`Florin Pop`) +- Fix :func:`mne.time_frequency.psd_array_multitaper` docstring where argument ``bandwidth`` incorrectly reported argument as half-bandwidth and gave wrong explanation of default value (:gh:`11479` by :newcontrib:`Tom Stone`) +- Fix bug where installation of a package depending on ``mne`` will error when done in an environment where ``setuptools`` is not present (:gh:`11454` by :newcontrib:`Arne Pelzer`) +- Fix bug in :meth:`mne.Annotations.rename` where replacements were not done correctly (:gh:`11666` by :newcontrib:`Timur Sokhin` and `Eric Larson`_) +- Fix :meth:`mne.time_frequency.Spectrum.to_data_frame`'s docstring to reflect the correct name for the appended frequencies column (:gh:`11457` by :newcontrib:`Zvi Baratz`) +- Fix bug with :func:`mne.io.read_raw_egi` where events outside the recording window could break data reading (:gh:`11505` by :newcontrib:`Niels Focke`) +- Fix bug where :func:`mne.preprocessing.regress_artifact` and :class:`mne.preprocessing.EOGRegression` incorrectly tracked ``picks`` (:gh:`11366` by `Eric Larson`_) +- Fix bug where channel names were not properly sanitized in :func:`mne.write_evokeds` and related functions (:gh:`11399` by `Eric Larson`_) +- Fix bug where splash screen would not always disappear (:gh:`11398` by `Eric Larson`_) +- Fix bug where having a different combination of volumes loaded into ``freeview`` caused different affines to be returned by :func:`mne.read_lta` for the same Linear Transform Array (LTA) (:gh:`11402` by `Alex Rockhill`_) +- Fix how :class:`mne.channels.DigMontage` is set when using ``mne.gui.locate_ieeg`` so that :func:`mne.Info.get_montage` works and does not return ``None`` (:gh:`11421` by `Alex Rockhill`_) +- Fix :func:`mne.io.read_raw_edf` when reading EDF data with different sampling rates and a mix of data channels when using ``infer_types=True`` (:gh:`11427` by `Alex Gramfort`_) +- Fix how :class:`mne.channels.DigMontage` is set when using :func:`mne.preprocessing.ieeg.project_sensors_onto_brain` so that :func:`mne.Info.get_montage` works and does not return ``None`` (:gh:`11436` by `Alex Rockhill`_) +- Fix configuration folder discovery on Windows, which would fail in certain edge cases; and produce a helpful error message if discovery still fails (:gh:`11441` by `Richard Höchenberger`_) +- Make :class:`~mne.decoding.SlidingEstimator` and :class:`~mne.decoding.GeneralizingEstimator` respect the ``verbose`` argument. Now with ``verbose=False``, the progress bar is not shown during fitting, scoring, etc. (:gh:`11450` by `Mikołaj Magnuski`_) +- Fix bug with ``mne.gui.locate_ieeg`` where Freesurfer ``?h.pial.T1`` was not recognized and suppress excess logging (:gh:`11489` by `Alex Rockhill`_) +- All functions accepting paths can now correctly handle :class:`~pathlib.Path` as input. Historically, we expected strings (instead of "proper" path objects), and only added :class:`~pathlib.Path` support in a few select places, leading to inconsistent behavior. (:gh:`11473` and :gh:`11499` by `Mathieu Scheltienne`_) +- Fix visualization dialog compatibility with matplotlib 3.7 (:gh:`11409` by `Daniel McCloy`_ and `Eric Larson`_) +- Expand tilde (user directory) in config keys (:gh:`11537` by `Clemens Brunner`_) +- Fix bug in :func:`mne.preprocessing.compute_maxwell_basis` where using ``int_order=0`` would raise an error (:gh:`11562` by `Eric Larson`_) +- In the legacy function :func:`mne.viz.plot_epochs_psd_topomap`, the parameter ``names`` now works again (:gh:`11563` by `Daniel McCloy`_) +- Fix :func:`mne.io.read_raw` for file names containing multiple dots (:gh:`11521` by `Clemens Brunner`_) +- Fix bug in :func:`mne.export.export_raw` when exporting to EDF with a physical range set smaller than the data range (:gh:`11569` by `Mathieu Scheltienne`_) +- Fix bug in :func:`mne.concatenate_raws` where two raws could not be merged if the order of the bad channel lists did not match (:gh:`11502` by `Moritz Gerster`_) +- Fix bug where :meth:`mne.Evoked.plot_topomap` opened an extra figure (:gh:`11607` by `Alex Rockhill`_) +- Fix bug where :func:`mne.transforms.apply_volume_registration_points` modified info in place (:gh:`11612` by `Alex Rockhill`_) +- Fix bug where Maxwell-filtered data rank was not handled properly in :func:`mne.beamformer.make_lcmv` (:gh:`11664` by `Eric Larson`_) +- In :class:`~mne.Report`, custom figures now show up correctly when ``image_format='svg'`` is requested (:gh:`11623` by `Richard Höchenberger`_) +- Fix bug where providing ``axes`` in `mne.preprocessing.ICA.plot_components` would fail (:gh:`11654` by `Mathieu Scheltienne`_) +- Fix deprecation of ``title``, ``vmin`` and ``vmax`` in `~mne.viz.plot_tfr_topomap` called from interactive `mne.time_frequency.AverageTFR.plot_topo` (:gh:`11683` by `Mathieu Scheltienne`_) + +API changes +~~~~~~~~~~~ +- Deprecate ``mne.gui.locate_ieeg`` in favor of :func:`mne-gui-addons:mne_gui_addons.locate_ieeg` (:gh:`11616` by `Alex Rockhill`_) +- Deprecate arguments ``kind`` and ``path`` from :func:`mne.channels.read_layout` in favor of a common argument ``fname`` (:gh:`11500` by `Mathieu Scheltienne`_) +- Change ``aligned_ct`` positional argument in ``mne.gui.locate_ieeg`` to ``base_image`` to reflect that this can now be used with unaligned images (:gh:`11567` by `Alex Rockhill`_) +- ``mne.warp_montage_volume`` was deprecated in favor of :func:`mne.preprocessing.ieeg.warp_montage` (acts directly on points instead of using an intermediate volume) and :func:`mne.preprocessing.ieeg.make_montage_volume` (which makes a volume of ieeg contact locations which can still be useful) (:gh:`11572` by `Alex Rockhill`_) +- Deprecate ``mne.pick_channels_evoked`` in favor of ``evoked.copy().pick(...)`` (:gh:`11665` by `Eric Larson`_) +- Set instance methods ``inst.pick_types`` and ``inst.pick_channels`` as legacy in favor of ``inst.pick(...)`` (:gh:`11665` by `Eric Larson`_) +- The default of ``inst.pick_channels(..., ordered=False)`` will change to ``ordered=True`` in 1.5 to avoid silent bugs (:gh:`11665` by `Eric Larson`_) + +Authors +~~~~~~~ + +* Alex Rockhill +* Alexandre Gramfort +* Arne Pelzer+ +* Britta Westner +* Carlos de la Torre +* Clemens Brunner +* Daniel Hasegan +* Daniel McCloy +* Denis A. Engemann +* Dimitri Papadopoulos Orfanos +* Dinara Issagaliyeva +* Dominik Welke +* Enzo Altamiranda +* Eric Brayet +* Eric Larson +* Erkka Heinila +* Felix Klotzsche +* Florin Pop+ +* George O'Neill+ +* Giulio +* Hakimeh Aslsardroud +* Hyonyoung Shin+ +* Jacob Woessner+ +* Jan Ebert+ +* Jennifer Behnke +* Jevri Hanna +* Lukas Hecker +* Mainak Jas +* Mark Alexander Henney +* Mathieu Scheltienne +* Mauricio Cespedes +* Michiru Kaneda +* Mikołaj Magnuski +* Moritz Gerster +* Niels Focke+ +* Omer Shubi +* Pavel Navratil +* Quentin Barthélemy+ +* Richard Höchenberger +* Rob Luke +* Robert Seymour+ +* Santeri Ruuskanen +* Sawradip Saha+ +* Scott Huberty +* Stefan Appelhoff +* Theodore Papadopoulo +* Thomas Samuel Binns+ +* Timon Merk +* Timur Sokhin+ +* Tom Ma +* Tom Stone+ +* Toomas Erik Anijärv +* Zvi Baratz+ diff --git a/doc/changes/v1.5.rst b/doc/changes/v1.5.rst new file mode 100644 index 00000000000..c607aefe26d --- /dev/null +++ b/doc/changes/v1.5.rst @@ -0,0 +1,107 @@ +.. NOTE: we use cross-references to highlight new functions and classes. + Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the + whats_new page will have a link to the function/class documentation. + +.. NOTE: there are 3 separate sections for changes, based on type: + - "Enhancements" for new features + - "Bugs" for bug fixes + - "API changes" for backward-incompatible changes + +.. NOTE: changes from first-time contributors should be added to the TOP of + the relevant section (Enhancements / Bugs / API changes), and should look + like this (where xxxx is the pull request number): + + - description of enhancement/bugfix/API change (:gh:`xxxx` by + :newcontrib:`Firstname Lastname`) + + Also add a corresponding entry for yourself in doc/changes/names.inc + +.. _changes_1_5_0: + +Version 1.5.0 (2023-08-14) +-------------------------- + +Enhancements +~~~~~~~~~~~~ +- Add reader for NSx files (Blackrock Microsystems Data Acquisition Systems) (:gh:`11432` by :newcontrib:`Proloy Das`) +- Add ``cmap`` argument for the :func:`mne.viz.plot_sensors` (:gh:`11720` by :newcontrib:`Gennadiy Belonosov`) +- Return unmodified instance if new sampling frequency is identical to the original in :meth:`mne.io.Raw.resample`, :meth:`mne.Epochs.resample`, :meth:`mne.Evoked.resample` and :meth:`mne.SourceEstimate.resample` (:gh:`11736` by :newcontrib:`Gennadiy Belonosov`) +- When failing to locate a file, we now print the full path in quotation marks to help spot accidentally added trailing spaces (:gh:`11718` by `Richard Höchenberger`_) +- Add standard montage lookup table for ``easycap-M43`` (:gh:`11744` by :newcontrib:`Diptyajit Das`) +- Added :class:`mne.preprocessing.eyetracking.Calibration` to store eye-tracking calibration info, and :func:`mne.preprocessing.eyetracking.read_eyelink_calibration` to read calibration data from EyeLink systems (:gh:`11719` by `Scott Huberty`_) +- Ocular :class:`mne.Annotations` read in by :func:`mne.io.read_raw_eyelink` are now channel aware. This means if the left eye blinked, the associated annotation will store this in the ``'ch_names'`` key. (:gh:`11746` by `Scott Huberty`_) +- Added :func:`mne.preprocessing.eyetracking.interpolate_blinks` to linear interpolate eyetrack signals during blink periods. (:gh:`11740` by `Scott Huberty`_) +- Added a section for combining eye-tracking and EEG data to the preprocessing tutorial "working with eye tracker data in MNE-Python" (:gh:`11770` by `Scott Huberty`_) +- Added a ``show_bytes`` option to :ref:`mne show_fiff` to show byte offsets (:gh:`11800` by `Eric Larson`_) +- Add :meth:`mne.Annotations.count` and :func:`mne.count_annotations` to count unique annotations (:gh:`11796` by `Clemens Brunner`_) +- Split the display of :meth:`mne.preprocessing.ICA.plot_overlay` between sensor types and display the RMS for MEG sensors and the :term:`GFP` for EEG sensors in the second panel instead of the channel average (:gh:`11830` by `Mathieu Scheltienne`_) +- Add :meth:`mne.Info.set_channel_types`, :meth:`mne.Info.rename_channels`, :meth:`mne.Info.plot_sensors`, :meth:`mne.Info.anonymize`, :meth:`mne.Info.set_meas_date` (:gh:`11839` by `Alex Rockhill`_) +- Curry and NEDF raw files can now also be read with the generic :func:`mne.io.read_raw` function (:gh:`11841` by `Richard Höchenberger`_) + +Bugs +~~~~ +- Fix bug in :func:`mne.bem.convert_flash_mris` to handle missing "flash5_reg.mgz" when processing "flash30" data. (:gh:`11842` by :newcontrib:`Hamza Abdelhedi`) +- Fix bug where epochs ``event_id`` was not kept by :func:`mne.channels.combine_channels` (:gh:`11786` by :newcontrib:`Samuel Louviot`) +- Fix bug where user-provided codec was not used to read annotations when loading EEGLAB ``.set`` files (:gh:`11773` by :newcontrib:`Yiping Zuo`) +- Fix bug that required curv.*h files to create Brain object (:gh:`11704` by :newcontrib:`Aaron Earle-Richardson`) +- Extended test to highlight bug in :func:`mne.stats.permutation_t_test` (:gh:`11575` by :newcontrib:`Joshua Calder-Travis`) +- Fix bug that used wrong indices for line/label styles (sometimes causing an ``IndexError``) in :meth:`mne.preprocessing.ICA.plot_sources` under certain conditions (:gh:`11808` by :newcontrib:`Joshua Calder-Travis`) +- Fix bug with :func:`~mne.io.read_raw_snirf` to handle files with measurement time containing milliseconds (:gh:`11804` by :newcontrib:`Daniel Tse`) +- Fix loading of EDF with mixed sampling frequencies with :func:`mne.io.read_raw_edf`: Upsampling is now applied to the entire signal and no longer block based, significantly reducing edge artifacts (:gh:`11549` by `Simon Kern`_) +- Fix bug where :func:`mne.io.read_raw_cnt` imports unnecessary durations (:gh:`11828` by `Jacob Woessner`_) +- Fix bug where :meth:`mne.viz.Brain.add_volume_labels` used an incorrect orientation (:gh:`11730` by `Alex Rockhill`_) +- Fix bug with :func:`mne.forward.restrict_forward_to_label` where cortical patch information was not adjusted (:gh:`11694` by `Eric Larson`_) +- Fix bug with PySide6 compatibility (:gh:`11721` by `Eric Larson`_) +- Fix bug with :class:`mne.io.Raw`, :class:`mne.SourceEstimate`, and related classes where the ``decimate`` and ``shift_time`` methods were errantly added (:gh:`11853` by `Eric Larson`_) +- Fix hanging interpreter with matplotlib figures using ``mne/viz/_mpl_figure.py`` in spyder console and jupyter notebooks (:gh:`11696` by `Mathieu Scheltienne`_) +- Fix bug with overlapping text for :meth:`mne.Evoked.plot` (:gh:`11698` by `Alex Rockhill`_) +- Fix bug with :func:`mne.preprocessing.corrmap` where the template iteration had non-standard map averaging (:gh:`11857` by `Eric Larson`_) +- For :func:`mne.io.read_raw_eyelink`, the default value of the ``gap_description`` parameter is now ``'BAD_ACQ_SKIP'``, following MNE convention (:gh:`11719` by `Scott Huberty`_) +- Fix bug with :func:`mne.io.read_raw_fil` where datasets without sensor positions would not import (:gh:`11733` by `George O'Neill`_) +- Fix bug with :func:`mne.chpi.compute_chpi_snr` where cHPI being off for part of the recording or bad channels being defined led to an error or incorrect behavior (:gh:`11754`, :gh:`11755` by `Eric Larson`_) +- Allow int-like for the argument ``id`` of `~mne.make_fixed_length_events` (:gh:`11748` by `Mathieu Scheltienne`_) +- Fix bugs with :mod:`mne.decoding` classes (:gh:`11846` by `Eric Larson`_): + - :class:`mne.decoding.ReceptiveField` now sets ``rf.fit_intercept_`` during ``fit()`` rather than overwriting ``rf.fit_intercept`` + - :class:`mne.decoding.SlidingEstimator` now returns the same ``dtype`` as ``X`` (usually ``float64``) in ``sl.transform(X)`` rather than always returning ``int`` +- Fix bug with :func:`mne.beamformer.make_lcmv` where the source space was not checked properly when the ``label`` argument was provided (:gh:`11852` by `Eric Larson`_) +- Fix bug where :func:`mne.io.read_raw_egi` did not properly set the EEG reference location for the reference channel itself (:gh:`11822` by `Eric Larson`_) +- Fix bug with legacy :meth:`~mne.io.Raw.plot_psd` method where passed axes were not used (:gh:`11778` by `Daniel McCloy`_) +- blink :class:`mne.Annotations` read in by :func:`mne.io.read_raw_eyelink` now begin with ``'BAD_'``, i.e. ``'BAD_blink'``, because ocular data are missing during blinks. (:gh:`11746` by `Scott Huberty`_) +- Fix bug where :ref:`mne show_fiff` could fail with an ambiguous error if the file is corrupt (:gh:`11800` by `Eric Larson`_) +- Fix bug where annotation FIF files lacked an end block tag (:gh:`11800` by `Eric Larson`_) +- Fix display of :class:`~mne.Annotations` in `mne.preprocessing.ICA.plot_sources` when the ``raw`` has ``raw.first_samp != 0`` and doesn't have a measurement date (:gh:`11766` by `Mathieu Scheltienne`_) +- Fix bug in read_raw_eyelink, where STATUS information of samples was always assumed to be in the file. Performance and memory improvements were also made. (:gh:`11823` by `Scott Huberty`_) +- Fix closing of a topographic map created from an interactive drag on an Evoked or PSD plot (:gh:`11862` by `Mathieu Scheltienne`_) + +API changes +~~~~~~~~~~~ +- The ``baseline`` argument can now be array-like (e.g. ``list``, ``tuple``, ``np.ndarray``, ...) instead of only a ``tuple`` (:gh:`11713` by `Clemens Brunner`_) +- The ``events`` and ``event_id`` parameters of :meth:`Epochs.plot() ` now accept boolean values; see docstring for details (:gh:`11445` by `Daniel McCloy`_ and `Clemens Brunner`_) +- Deprecated ``gap_description`` keyword argument of :func:`mne.io.read_raw_eyelink`, which will be removed in mne version 1.6, in favor of using :meth:`mne.Annotations.rename` (:gh:`11719` by `Scott Huberty`_) + +Authors +~~~~~~~ +* Aaron Earle-Richardson+ +* Alex Rockhill +* Alexandre Gramfort +* Britta Westner +* Clemens Brunner +* Daniel McCloy +* Daniel Tse+ +* Diptyajit Das+ +* Eric Larson +* Gennadiy Belonosov+ +* George O'Neill +* Hamza Abdelhedi+ +* Jacob Woessner +* Joshua Calder-Travis+ +* Mathieu Scheltienne +* Proloy Das +* Richard Höchenberger +* Rob Luke +* Samuel Louviot+ +* Scott Huberty +* Simon Kern +* Stefan Appelhoff +* Thomas Moreau +* Yiping Zuo+ diff --git a/doc/changes/v1.6.rst b/doc/changes/v1.6.rst new file mode 100644 index 00000000000..f770b5046d2 --- /dev/null +++ b/doc/changes/v1.6.rst @@ -0,0 +1,132 @@ +.. _changes_1_6_0: + +Version 1.6.0 (2023-11-20) +-------------------------- + +Enhancements +~~~~~~~~~~~~ +- Add support for Neuralynx data files with :func:`mne.io.read_raw_neuralynx` (:gh:`11969` by :newcontrib:`Kristijan Armeni` and :newcontrib:`Ivan Skelin`) +- Improve tests for saving splits with :class:`mne.Epochs` (:gh:`11884` by `Dmitrii Altukhov`_) +- Added functionality for linking interactive figures together, such that changing one figure will affect another, see :ref:`tut-ui-events` and :mod:`mne.viz.ui_events`. Current figures implementing UI events are :func:`mne.viz.plot_topomap` and :func:`mne.viz.plot_source_estimates` (:gh:`11685` :gh:`11891` by `Marijn van Vliet`_) +- HTML anchors for :class:`mne.Report` now reflect the ``section-title`` of the report items rather than using a global incrementor ``global-N`` (:gh:`11890` by `Eric Larson`_) +- Added public :func:`mne.io.write_info` to complement :func:`mne.io.read_info` (:gh:`11918` by `Eric Larson`_) +- Added option ``remove_dc`` to to :meth:`Raw.compute_psd() `, :meth:`Epochs.compute_psd() `, and :meth:`Evoked.compute_psd() `, to allow skipping DC removal when computing Welch or multitaper spectra (:gh:`11769` by `Nikolai Chapochnikov`_) +- Add the possibility to provide a float between 0 and 1 as ``n_grad``, ``n_mag`` and ``n_eeg`` in `~mne.compute_proj_raw`, `~mne.compute_proj_epochs` and `~mne.compute_proj_evoked` to select the number of vectors based on the cumulative explained variance (:gh:`11919` by `Mathieu Scheltienne`_) +- Add extracting all time courses in a label using :func:`mne.extract_label_time_course` without applying an aggregation function (like ``mean``) (:gh:`12001` by `Hamza Abdelhedi`_) +- Added support for Artinis fNIRS data files to :func:`mne.io.read_raw_snirf` (:gh:`11926` by `Robert Luke`_) +- Add helpful error messages when using methods on empty :class:`mne.Epochs`-objects (:gh:`11306` by `Martin Schulz`_) +- Add support for passing a :class:`python:dict` as ``sensor_color`` to specify per-channel-type colors in :func:`mne.viz.plot_alignment` (:gh:`12067` by `Eric Larson`_) +- Add inferring EEGLAB files' montage unit automatically based on estimated head radius using :func:`read_raw_eeglab(..., montage_units="auto") ` (:gh:`11925` by `Jack Zhang`_, :gh:`11951` by `Eric Larson`_) +- Add :class:`~mne.time_frequency.EpochsSpectrumArray` and :class:`~mne.time_frequency.SpectrumArray` to support creating power spectra from :class:`NumPy array ` data (:gh:`11803` by `Alex Rockhill`_) +- Add support for writing forward solutions to HDF5 and convenience function :meth:`mne.Forward.save` (:gh:`12036` by `Eric Larson`_) +- Refactored internals of :func:`mne.read_annotations` (:gh:`11964` by `Paul Roujansky`_) +- Add support for drawing MEG sensors in :ref:`mne coreg` (:gh:`12098` by `Eric Larson`_) +- Improve string representation of :class:`mne.Covariance` (:gh:`12181` by `Eric Larson`_) +- Add ``check_version=True`` to :ref:`mne sys_info` to check for a new release on GitHub (:gh:`12146` by `Eric Larson`_) +- Bad channels are now colored gray in addition to being dashed when spatial colors are used in :func:`mne.viz.plot_evoked` and related functions (:gh:`12142` by `Eric Larson`_) +- By default MNE-Python creates matplotlib figures with ``layout='constrained'`` rather than the default ``layout='tight'`` (:gh:`12050`, :gh:`12103` by `Mathieu Scheltienne`_ and `Eric Larson`_) +- Enhance :func:`~mne.viz.plot_evoked_field` with a GUI that has controls for time, colormap, and contour lines (:gh:`11942` by `Marijn van Vliet`_) +- Add :class:`mne.viz.ui_events.UIEvent` linking for interactive colorbars, allowing users to link figures and change the colormap and limits interactively. This supports :func:`~mne.viz.plot_evoked_topomap`, :func:`~mne.viz.plot_ica_components`, :func:`~mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_projs_topomap`, :meth:`~mne.Evoked.plot_image`, and :meth:`~mne.Epochs.plot_image` (:gh:`12057` by `Santeri Ruuskanen`_) +- Add example KIT phantom dataset in :func:`mne.datasets.phantom_kit.data_path` and :ref:`tut-phantom-kit` (:gh:`12105` by `Judy D Zhu`_ and `Eric Larson`_) +- :func:`~mne.epochs.make_metadata` now accepts ``tmin=None`` and ``tmax=None``, which will bound the time window used for metadata generation by event names (instead of a fixed time). That way, you can now for example generate metadata spanning from one cue or fixation cross to the next, even if trial durations vary throughout the recording (:gh:`12118` by `Richard Höchenberger`_) +- Add support for passing multiple labels to :func:`mne.minimum_norm.source_induced_power` (:gh:`12026` by `Erica Peterson`_, `Eric Larson`_, and `Daniel McCloy`_ ) +- Added documentation to :meth:`mne.io.Raw.set_montage` and :func:`mne.add_reference_channels` to specify that montages should be set after adding reference channels (:gh:`12160` by `Jacob Woessner`_) +- Add argument ``splash`` to the function using the ``qt`` browser backend to allow enabling/disabling the splash screen (:gh:`12185` by `Mathieu Scheltienne`_) +- :class:`~mne.preprocessing.ICA`'s HTML representation (displayed in Jupyter notebooks and :class:`mne.Report`) now includes all optional fit parameters (e.g., max. number of iterations) (:gh:`12194`, by `Richard Höchenberger`_) + +Bugs +~~~~ +- Fix bug where :func:`mne.io.read_raw_gdf` would fail due to improper usage of ``np.clip`` (:gh:`12168` by :newcontrib:`Rasmus Aagaard`) +- Fix bugs with :func:`mne.preprocessing.realign_raw` where the start of ``other`` was incorrectly cropped; and onsets and durations in ``other.annotations`` were left unsynced with the resampled data (:gh:`11950` by :newcontrib:`Qian Chu`) +- Fix bug where ``encoding`` argument was ignored when reading annotations from an EDF file (:gh:`11958` by :newcontrib:`Andrew Gilbert`) +- Mark tests ``test_adjacency_matches_ft`` and ``test_fetch_uncompressed_file`` as network tests (:gh:`12041` by :newcontrib:`Maksym Balatsko`) +- Fix bug with :func:`mne.channels.read_ch_adjacency` (:gh:`11608` by :newcontrib:`Ivan Zubarev`) +- Fix bug where ``epochs.get_data(..., scalings=...)`` would errantly modify the preloaded data (:gh:`12121` by :newcontrib:`Pablo Mainar` and `Eric Larson`_) +- Fix bugs with saving splits for :class:`~mne.Epochs` (:gh:`11876` by `Dmitrii Altukhov`_) +- Fix bug with multi-plot 3D rendering where only one plot was updated (:gh:`11896` by `Eric Larson`_) +- Fix bug where ``verbose`` level was not respected inside parallel jobs (:gh:`12154` by `Eric Larson`_) +- Fix bug where subject birthdays were not correctly read by :func:`mne.io.read_raw_snirf` (:gh:`11912` by `Eric Larson`_) +- Fix bug where warnings were emitted when computing spectra for channels marked as bad (:gh:`12186` by `Eric Larson`_) +- Fix bug with :func:`mne.chpi.compute_head_pos` for CTF data where digitization points were modified in-place, producing an incorrect result during a save-load round-trip (:gh:`11934` by `Eric Larson`_) +- Fix bug where non-compliant stimulus data streams were not ignored by :func:`mne.io.read_raw_snirf` (:gh:`11915` by `Johann Benerradi`_) +- Fix bug with ``pca=False`` in :func:`mne.minimum_norm.compute_source_psd` (:gh:`11927` by `Alex Gramfort`_) +- Fix bug with notebooks when using PyVista 0.42 by implementing ``trame`` backend support (:gh:`11956` by `Eric Larson`_) +- Removed preload parameter from :func:`mne.io.read_raw_eyelink`, because data are always preloaded no matter what preload is set to (:gh:`11910` by `Scott Huberty`_) +- Fix bug with :meth:`mne.viz.Brain.get_view` where calling :meth:`~mne.viz.Brain.show_view` with returned parameters would change the view (:gh:`12000` by `Eric Larson`_) +- Fix bug with :meth:`mne.viz.Brain.show_view` where ``distance=None`` would change the view distance (:gh:`12000` by `Eric Larson`_) +- Fix bug with :meth:`~mne.viz.Brain.add_annotation` when reading an annotation from a file with both hemispheres shown (:gh:`11946` by `Marijn van Vliet`_) +- Fix bug with reported component number and errant reporting of PCA explained variance as ICA explained variance in :meth:`mne.Report.add_ica` (:gh:`12155`, :gh:`12167` by `Eric Larson`_ and `Richard Höchenberger`_) +- Fix bug with axis clip box boundaries in :func:`mne.viz.plot_evoked_topo` and related functions (:gh:`11999` by `Eric Larson`_) +- Fix bug with ``subject_info`` when loading data from and exporting to EDF file (:gh:`11952` by `Paul Roujansky`_) +- Fix bug where :class:`mne.Info` HTML representations listed all channel counts instead of good channel counts under the heading "Good channels" (:gh:`12145` by `Eric Larson`_) +- Fix rendering glitches when plotting Neuromag/TRIUX sensors in :func:`mne.viz.plot_alignment` and related functions (:gh:`12098` by `Eric Larson`_) +- Fix bug with delayed checking of :class:`info["bads"] ` (:gh:`12038` by `Eric Larson`_) +- Fix bug with :ref:`mne coreg` where points inside the head surface were not shown (:gh:`12147`, :gh:`12164` by `Eric Larson`_) +- Fix bug with :func:`mne.viz.plot_alignment` where ``sensor_colors`` were not handled properly on a per-channel-type basis (:gh:`12067` by `Eric Larson`_) +- Fix handling of channel information in annotations when loading data from and exporting to EDF file (:gh:`11960` :gh:`12017` :gh:`12044` by `Paul Roujansky`_) +- Add missing ``overwrite`` and ``verbose`` parameters to :meth:`Transform.save() ` (:gh:`12004` by `Marijn van Vliet`_) +- Fix parsing of eye-link :class:`~mne.Annotations` when ``apply_offsets=False`` is provided to :func:`~mne.io.read_raw_eyelink` (:gh:`12003` by `Mathieu Scheltienne`_) +- Correctly prune channel-specific :class:`~mne.Annotations` when creating :class:`~mne.Epochs` without the channel(s) included in the channel specific annotations (:gh:`12010` by `Mathieu Scheltienne`_) +- Fix :func:`~mne.viz.plot_volume_source_estimates` with :class:`~mne.VolSourceEstimate` which include a list of vertices (:gh:`12025` by `Mathieu Scheltienne`_) +- Add support for non-ASCII characters in Annotations, Evoked comments, etc when saving to FIFF format (:gh:`12080` by `Daniel McCloy`_) +- Correctly handle passing ``"eyegaze"`` or ``"pupil"`` to :meth:`mne.io.Raw.pick` (:gh:`12019` by `Scott Huberty`_) +- Fix bug with :func:`mne.time_frequency.Spectrum.plot` and related functions where bad channels were not marked (:gh:`12142` by `Eric Larson`_) +- Fix bug with :func:`~mne.viz.plot_raw` where changing ``MNE_BROWSER_BACKEND`` via :func:`~mne.set_config` would have no effect within a Python session (:gh:`12078` by `Santeri Ruuskanen`_) +- Improve handling of ``method`` argument in the channel interpolation function to support :class:`str` and raise helpful error messages (:gh:`12113` by `Mathieu Scheltienne`_) +- Fix combination of ``DIN`` event channels into a single synthetic trigger channel ``STI 014`` by the MFF reader of :func:`mne.io.read_raw_egi` (:gh:`12122` by `Mathieu Scheltienne`_) +- Fix bug with :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab` where automatic fiducial detection would fail for certain files (:gh:`12165` by `Clemens Brunner`_) +- Fix concatenation of ``raws`` with ``np.nan`` in the device to head transformation (:gh:`12198` by `Mathieu Scheltienne`_) +- Fix bug with :func:`mne.viz.plot_compare_evokeds` where the title was not displayed when ``axes='topo'`` (:gh:`12192` by `Jacob Woessner`_) +- Fix bug with :func:`mne.io.read_raw_cnt` where the bad channels were not properly read (:gh:`12189` by `Jacob Woessner`_) +- Fix bug where iterating over :class:`~mne.io.Raw` would result in an error (:gh:`12205` by `Clemens Brunner`_) + + +API changes +~~~~~~~~~~~ +- The default for :meth:`mne.Epochs.get_data` of ``copy=False`` will change to ``copy=True`` in 1.7. Set it explicitly to avoid a warning (:gh:`12121` by :newcontrib:`Pablo Mainar` and `Eric Larson`_) +- ``mne.preprocessing.apply_maxfilter`` and ``mne maxfilter`` have been deprecated and will be removed in 1.7. Use :func:`mne.preprocessing.maxwell_filter` (see :ref:`this tutorial `) in Python or the command-line utility from MEGIN ``maxfilter`` and :func:`mne.bem.fit_sphere_to_headshape` instead (:gh:`11938` by `Eric Larson`_) +- :func:`mne.io.kit.read_mrk` reading pickled files is deprecated using something like ``np.savetxt(fid, pts, delimiter="\t", newline="\n")`` to save your points instead (:gh:`11937` by `Eric Larson`_) +- Replace legacy ``inst.pick_channels`` and ``inst.pick_types`` with ``inst.pick`` (where ``inst`` is an instance of :class:`~mne.io.Raw`, :class:`~mne.Epochs`, or :class:`~mne.Evoked`) wherever possible (:gh:`11907` by `Clemens Brunner`_) +- The ``reset_camera`` parameter has been removed in favor of ``distance="auto"`` in :func:`mne.viz.set_3d_view`, :meth:`mne.viz.Brain.show_view`, and related functions (:gh:`12000` by `Eric Larson`_) +- Several unused parameters from :func:`mne.gui.coregistration` are now deprecated: tabbed, split, scrollable, head_inside, guess_mri_subject, scale, and ``advanced_rendering``. All arguments are also now keyword-only. (:gh:`12147` by `Eric Larson`_) + +Authors +~~~~~~~ +* Adam Li +* Alex Rockhill +* Alexandre Gramfort +* Ana Radanovic +* Andy Gilbert+ +* Clemens Brunner +* Daniel McCloy +* Denis A. Engemann +* Dimitri Papadopoulos Orfanos +* Dmitrii Altukhov +* Dominik Welke +* Eric Larson +* Erica Peterson +* Gonzalo Reina +* Hamza Abdelhedi +* Ivan Skelin+ +* Ivan Zubarev+ +* Jack Zhang +* Jacob Woessner +* Johann Benerradi +* John Veillette +* Judy D Zhu +* Kristijan Armeni+ +* Mainak Jas +* Maksym Balatsko+ +* Marijn van Vliet +* Martin Schulz +* Mathieu Scheltienne +* Nikolai Chapochnikov +* Pablo Mainar+ +* Paul Roujansky +* Qian Chu+ +* Rasmus Aagaard+ +* Richard Höchenberger +* Rob Luke +* Santeri Ruuskanen +* Scott Huberty +* Stefan Appelhoff diff --git a/doc/changes/v1.7.rst b/doc/changes/v1.7.rst new file mode 100644 index 00000000000..6b118612541 --- /dev/null +++ b/doc/changes/v1.7.rst @@ -0,0 +1,197 @@ +.. _changes_1_7_1: + +Version 1.7.1 (2024-06-14) +========================== + +Bugfixes +-------- + +- Fix bug where :func:`mne.time_frequency.csd_multitaper`, :func:`mne.time_frequency.csd_fourier`, :func:`mne.time_frequency.csd_array_multitaper`, and :func:`mne.time_frequency.csd_array_fourier` would return cross-spectral densities with the ``fmin`` and ``fmax`` frequencies missing, by `Thomas Binns`_ (`#12633 `__) +- Fix incorrect RuntimeWarning (different channel filter settings) in EDF/BDF import, by `Clemens Brunner`_. (`#12661 `__) + +Authors +------- + +* Clemens Brunner +* Thomas Binns + +.. _changes_1_7_0: + +Version 1.7.0 (2024-04-19) +========================== + +Notable changes +--------------- + +- In this version, we started adding type hints (also known as "type annotations") to select parts of the codebase. + This meta information will be used by development environments (IDEs) like VS Code and PyCharm automatically to provide + better assistance such as tab completion or error detection even before running your code. + + So far, we've only added return type hints to :func:`mne.io.read_raw`, :func:`mne.read_epochs`, :func:`mne.read_evokeds` and + all format-specific ``read_raw_*()`` and ``read_epochs_*()`` functions. Now your editors will know: + these functions return evoked and raw data, respectively. We are planning add type hints to more functions after careful + evaluation in the future. + + You don't need to do anything to benefit from these changes – your editor will pick them up automatically and provide the + enhanced experience if it supports it! (`#12250 `__) + + +Dependencies +------------ + +- ``defusedxml`` is now an optional (rather than required) dependency and needed when reading EGI-MFF data, NEDF data, and BrainVision montages, by `Eric Larson`_. (`#12264 `__) +- For developers, ``pytest>=8.0`` is now required for running unit tests, by `Eric Larson`_. (`#12376 `__) +- ``pytest-harvest`` is no longer used as a test dependency, by `Eric Larson`_. (`#12451 `__) +- The minimum supported version of Qt bindings is 5.15, by `Eric Larson`_. (`#12491 `__) + + +Bugfixes +-------- + +- Fix bug where section parameter in :meth:`mne.Report.add_html` was not being utilized resulting in improper formatting, by :newcontrib:`Martin Oberg`. (`#12319 `__) +- Fix bug in :func:`mne.preprocessing.maxwell_filter` where calibration was incorrectly applied during virtual sensor reconstruction, by `Eric Larson`_ and :newcontrib:`Motofumi Fushimi`. (`#12348 `__) +- Reformats channel and detector lookup in :func:`mne.io.read_raw_snirf` from array based to dictionary based. Removes incorrect assertions that every detector and source must have data associated with every registered optode position, by :newcontrib:`Alex Kiefer`. (`#12430 `__) +- Remove FDT file format check for strings in EEGLAB's EEG.data in :func:`mne.io.read_raw_eeglab` and related functions by :newcontrib:`Seyed Yahya Shirazi` (`#12523 `__) +- Fixes to interactivity in time-frequency objects: the rectangle selector now works on TFR image plots of gradiometer data; and in ``TFR.plot_joint()`` plots, the colormap limits of interactively-generated topomaps match the colormap limits of the main plot. By `Daniel McCloy`_. (`#11282 `__) +- Allow :func:`mne.viz.plot_compare_evokeds` to plot eyetracking channels, and improve error handling, y `Scott Huberty`_. (`#12190 `__) +- Fix bug in :meth:`mne.Epochs.apply_function` where data was handed down incorrectly in parallel processing, by `Dominik Welke`_. (`#12206 `__) +- Remove incorrect type hints in :func:`mne.io.read_raw_neuralynx`, by `Richard Höchenberger`_. (`#12236 `__) +- Fix bug with accessing the last data sample using ``raw[:, -1]`` where an empty array was returned, by `Eric Larson`_. (`#12248 `__) +- Correctly handle temporal gaps in Neuralynx .ncs files via :func:`mne.io.read_raw_neuralynx`, by `Kristijan Armeni`_ and `Eric Larson`_. (`#12279 `__) +- Fix bug where parent directory existence was not checked properly in :meth:`mne.io.Raw.save`, by `Eric Larson`_. (`#12282 `__) +- Add ``tol`` parameter to :meth:`mne.events_from_annotations` so that the user can specify the tolerance to ignore rounding errors of event onsets when using ``chunk_duration`` is not None (default is 1e-8), by `Michiru Kaneda`_ (`#12324 `__) +- Allow :meth:`mne.io.Raw.interpolate_bads` and :meth:`mne.Epochs.interpolate_bads` to work on ``ecog`` and ``seeg`` data; for ``seeg`` data a spline is fit to neighboring electrode contacts on the same shaft, by `Alex Rockhill`_ (`#12336 `__) +- Fix clicking on an axis of :func:`mne.viz.plot_evoked_topo` when multiple vertical lines ``vlines`` are used, by `Mathieu Scheltienne`_. (`#12345 `__) +- Fix bug in :meth:`mne.viz.EvokedField.set_vmax` that prevented setting the color limits of the MEG magnetic field density, by `Marijn van Vliet`_ (`#12354 `__) +- Fix faulty indexing in :func:`mne.io.read_raw_neuralynx` when picking a single channel, by `Kristijan Armeni`_. (`#12357 `__) +- Fix bug where :func:`mne.preprocessing.compute_proj_ecg` and :func:`mne.preprocessing.compute_proj_eog` could modify the default ``reject`` and ``flat`` arguments on multiple calls based on channel types present, by `Eric Larson`_. (`#12380 `__) +- Fix bad channels not handled properly in :func:`mne.stc_near_sensors` by `Alex Rockhill`_. (`#12382 `__) +- Fix bug where :func:`mne.preprocessing.regress_artifact` projection check was not specific to the channels being processed, by `Eric Larson`_. (`#12389 `__) +- Change how samples are read when using ``data_format='auto'`` in :func:`mne.io.read_raw_cnt`, by `Jacob Woessner`_. (`#12393 `__) +- Fix bugs with :class:`mne.Report` CSS where TOC items could disappear at the bottom of the page, by `Eric Larson`_. (`#12399 `__) +- In :func:`~mne.viz.plot_compare_evokeds`, actually plot GFP (not RMS amplitude) for EEG channels when global field power is requested by `Daniel McCloy`_. (`#12410 `__) +- Fix :ref:`tut-working-with-seeg` use of :func:`mne.stc_near_sensors` to use the :class:`mne.VolSourceEstimate` positions and not the pial surface, by `Alex Rockhill`_ (`#12436 `__) +- Fix prefiltering information management for EDF/BDF, by `Michiru Kaneda`_ (`#12441 `__) +- Fix validation of ``ch_type`` in :func:`mne.preprocessing.annotate_muscle_zscore`, by `Mathieu Scheltienne`_. (`#12444 `__) +- Fix errant redundant use of ``BIDSPath.split`` when writing split raw and epochs data, by `Eric Larson`_. (`#12451 `__) +- Disable config parser interpolation when reading BrainVision files, which allows using the percent sign as a regular character in channel units, by `Clemens Brunner`_. (`#12456 `__) +- Fix the default color of :meth:`mne.viz.Brain.add_text` to properly contrast with the figure background color, by `Marijn van Vliet`_. (`#12470 `__) +- Changed default ECoG and sEEG electrode sizes in brain plots to better reflect real world sizes, by `Liberty Hamilton`_ (`#12474 `__) +- Fixed bugs with handling of rank in :class:`mne.decoding.CSP`, by `Eric Larson`_. (`#12476 `__) +- Fix reading segmented recordings with :func:`mne.io.read_raw_eyelink` by `Dominik Welke`_. (`#12481 `__) +- Improve compatibility with other Qt-based GUIs by handling theme icons better, by `Eric Larson`_. (`#12483 `__) +- Fix problem caused by onsets with NaN values using :func:`mne.io.read_raw_eeglab` by `Jacob Woessner`_ (`#12484 `__) +- Fix cleaning of channel names for non vectorview or CTF dataset including whitespaces or dash in their channel names, by `Mathieu Scheltienne`_. (`#12489 `__) +- Fix bug with :meth:`mne.preprocessing.ICA.plot_sources` for ``evoked`` data where the + legend contained too many entries, by `Eric Larson`_. (`#12498 `__) +- Fix bug where using ``phase="minimum"`` in filtering functions like + :meth:`mne.io.Raw.filter` constructed a filter half the desired length with + compromised attenuation. Now ``phase="minimum"`` has the same length and comparable + suppression as ``phase="zero"``, and the old (incorrect) behavior can be achieved + with ``phase="minimum-half"``, by `Eric Larson`_. (`#12507 `__) +- Correct reading of ``info["subject_info"]["his_id"]`` in :func:`mne.io.read_raw_snirf`, by `Eric Larson`_. (`#12526 `__) +- Calling :meth:`~mne.io.Raw.compute_psd` with ``method="multitaper"`` is now expressly disallowed when ``reject_by_annotation=True`` and ``bad_*`` annotations are present (previously this was nominally allowed but resulted in ``nan`` values in the PSD). By `Daniel McCloy`_. (`#12535 `__) +- :meth:`~mne.io.Raw.compute_psd` and :func:`~mne.time_frequency.psd_array_welch` will now use FFT windows aligned to the onsets of good data spans when ``bad_*`` annotations are present. By `Daniel McCloy`_. (`#12536 `__) +- Fix bug in loading of complex/phase TFRs. By `Daniel McCloy`_. (`#12537 `__) +- Fix bug with :func:`mne.SourceSpaces.export_volume` where the ``img.affine`` was not set properly, by `Eric Larson`_. (`#12544 `__) + + +API changes by deprecation +-------------------------- + +- The default value of the ``zero_mean`` parameter of :func:`mne.time_frequency.tfr_array_morlet` will change from ``False`` to ``True`` in version 1.8, for consistency with related functions. By `Daniel McCloy`_. (`#11282 `__) +- The parameter for providing data to :func:`mne.time_frequency.tfr_array_morlet` and :func:`mne.time_frequency.tfr_array_multitaper` has been switched from ``epoch_data`` to ``data``. Only use the ``data`` parameter to avoid a warning. Changes by `Thomas Binns`_. (`#12308 `__) +- Change :func:`mne.stc_near_sensors` ``surface`` default from the ``'pial'`` surface to the surface in ``src`` if ``src`` is not ``None`` in version 1.8, by `Alex Rockhill`_. (`#12382 `__) + + +New features +------------ + +- Detecting Bad EEG/MEG channels using the local outlier factor (LOF) algorithm in :func:`mne.preprocessing.find_bad_channels_lof`, by :newcontrib:`Velu Prabhakar Kumaravel`. (`#11234 `__) +- Inform the user about channel discrepancy between provided info, forward operator, and/or covariance matrices in :func:`mne.beamformer.make_lcmv`, by :newcontrib:`Nikolai Kapralov`. (`#12238 `__) +- Support partial pathlength factors for each wavelength in :func:`mne.preprocessing.nirs.beer_lambert_law`, by :newcontrib:`Richard Scholz`. (`#12446 `__) +- Add ``picks`` parameter to :meth:`mne.io.Raw.plot`, allowing users to select which channels to plot. This makes makes the raw data plotting API consistent with :meth:`mne.Epochs.plot` and :meth:`mne.Evoked.plot`, by :newcontrib:`Ivo de Jong`. (`#12467 `__) +- New class :class:`mne.time_frequency.RawTFR` and new methods :meth:`mne.io.Raw.compute_tfr`, :meth:`mne.Epochs.compute_tfr`, and :meth:`mne.Evoked.compute_tfr`. These new methods supersede functions :func:`mne.time_frequency.tfr_morlet`, and :func:`mne.time_frequency.tfr_multitaper`, and :func:`mne.time_frequency.tfr_stockwell`, which are now considered "legacy" functions. By `Daniel McCloy`_. (`#11282 `__) +- Add ability reject :class:`mne.Epochs` using callables, by `Jacob Woessner`_. (`#12195 `__) +- Custom functions applied via :meth:`mne.io.Raw.apply_function`, :meth:`mne.Epochs.apply_function` or :meth:`mne.Evoked.apply_function` can now use ``ch_idx`` or ``ch_name`` to get access to the currently processed channel during channel wise processing. +- :meth:`mne.Evoked.apply_function` can now also work on full data array instead of just channel wise, analogous to :meth:`mne.io.Raw.apply_function` and :meth:`mne.Epochs.apply_function`, by `Dominik Welke`_. (`#12206 `__) +- Allow :class:`mne.time_frequency.EpochsTFR` as input to :func:`mne.epochs.equalize_epoch_counts`, by `Carina Forster`_. (`#12207 `__) +- Speed up export to .edf in :func:`mne.export.export_raw` by using ``edfio`` instead of ``EDFlib-Python``. (`#12218 `__) +- Added a helper function :func:`mne.preprocessing.eyetracking.convert_units` to convert eyegaze data from pixel-on-screen values to radians of visual angle. Also added a helper function :func:`mne.preprocessing.eyetracking.get_screen_visual_angle` to get the visual angle that the participant screen subtends, by `Scott Huberty`_. (`#12237 `__) +- We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. By `Richard Höchenberger`_ and `Eric Larson`_. (:gh:`12297`) (`#12250 `__) +- Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn`, by `Eric Larson`_. (`#12268 `__) +- The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. By `Richard Höchenberger`_. (:gh:`12281`) (`#12269 `__) +- :meth:`mne.Annotations.to_data_frame` can now output different formats for the ``onset`` column: seconds, milliseconds, datetime objects, and timedelta objects. By `Daniel McCloy`_. (`#12289 `__) +- Add method :meth:`mne.SourceEstimate.save_as_surface` to allow saving GIFTI files from surface source estimates, by `Peter Molfese`_. (`#12309 `__) +- :class:`mne.Epochs` can now be constructed using :class:`mne.Annotations` stored in the ``raw`` object, by specifying ``events=None``. By `Alex Rockhill`_. (`#12311 `__) +- Add :meth:`~mne.SourceEstimate.savgol_filter`, :meth:`~mne.SourceEstimate.filter`, :meth:`~mne.SourceEstimate.apply_hilbert`, and :meth:`~mne.SourceEstimate.apply_function` methods to :class:`mne.SourceEstimate` and related classes, by `Hamza Abdelhedi`_. (`#12323 `__) +- Add ability to export STIM channels to EDF in :meth:`mne.io.Raw.export`, by `Clemens Brunner`_. (`#12332 `__) +- Speed up raw FIF reading when using small buffer sizes by `Eric Larson`_. (`#12343 `__) +- Speed up :func:`mne.io.read_raw_neuralynx` on large datasets with many gaps, by `Kristijan Armeni`_. (`#12371 `__) +- Add ability to detect minima peaks found in :class:`mne.Evoked` if data is all positive and maxima if data is all negative. (`#12383 `__) +- Add ability to remove bad marker coils in :func:`mne.io.read_raw_kit`, by `Judy D Zhu`_. (`#12394 `__) +- Add option to pass ``image_kwargs`` to :class:`mne.Report.add_epochs` to allow adjusting e.g. ``vmin`` and ``vmax`` of the epochs image in the report, by `Sophie Herbst`_. (`#12443 `__) +- Add support for multiple raw instances in :func:`mne.preprocessing.compute_average_dev_head_t` by `Eric Larson`_. (`#12445 `__) +- Completing PR 12453. Add option to pass ``image_kwargs`` per channel type to :class:`mne.Report.add_epochs`. (`#12454 `__) +- :func:`mne.epochs.make_metadata` now accepts strings as ``tmin`` and ``tmax`` parameter values, simplifying metadata creation based on time-varying events such as responses to a stimulus, by `Richard Höchenberger`_. (`#12462 `__) +- Include date of acquisition and filter parameters in ``raw.info`` for :func:`mne.io.read_raw_neuralynx` by `Kristijan Armeni`_. (`#12463 `__) +- Add ``physical_range="channelwise"`` to :meth:`mne.io.Raw.export` for exporting to EDF, which can improve amplitude resolution if individual channels vary greatly in their offsets, by `Clemens Brunner`_. (`#12510 `__) +- Added the ability to reorder report contents via :meth:`mne.Report.reorder` (with + helper to get contents with :meth:`mne.Report.get_contents`), by `Eric Larson`_. (`#12513 `__) +- Add ``exclude_after_unique`` option to :meth:`mne.io.read_raw_edf` and :meth:`mne.io.read_raw_edf` to search for exclude channels after making channels names unique, by `Michiru Kaneda`_ (`#12518 `__) + + +Other changes +------------- + +- Updated the text in the preprocessing tutorial to use :meth:`mne.io.Raw.pick` instead of the legacy :meth:`mne.io.Raw.pick_types`, by :newcontrib:`btkcodedev`. (`#12326 `__) +- Clarify in the :ref:`EEG referencing tutorial ` that an average reference projector ready is required for inverse modeling, by :newcontrib:`Nabil Alibou` (`#12420 `__) +- Fix dead links in ``README.rst`` documentation by :newcontrib:`Will Turner`. (`#12461 `__) +- Replacing percent format with f-strings format specifiers , by :newcontrib:`Hasrat Ali Arzoo`. (`#12464 `__) +- Adopted towncrier_ for changelog entries, by `Eric Larson`_. (`#12299 `__) +- Automate adding of PR number to towncrier stubs, by `Eric Larson`_. (`#12318 `__) +- Refresh code base to use Python 3.9 syntax using Ruff UP rules (pyupgrade), by `Clemens Brunner`_. (`#12358 `__) +- Move private data preparation functions for BrainVision export from ``pybv`` to ``mne``, by `Clemens Brunner`_. (`#12450 `__) +- Update the list of sensor types in docstrings, tutorials and the glossary by `Nabil Alibou`_. (`#12509 `__) + + +Authors +------- +* Alex Rockhill +* Alexander Kiefer+ +* Alexandre Gramfort +* Britta Westner +* Carina Forster +* Clemens Brunner +* Daniel McCloy +* Dominik Welke +* Eric Larson +* Erkka Heinila +* Florian Hofer +* Hamza Abdelhedi +* Hasrat Ali Arzoo+ +* Ivo de Jong+ +* Jacob Woessner +* Judy D Zhu +* Kristijan Armeni +* Liberty Hamilton +* Marijn van Vliet +* Martin Oberg+ +* Mathieu Scheltienne +* Michiru Kaneda +* Motofumi Fushimi+ +* Nabil Alibou+ +* Nikolai Kapralov+ +* Peter J. Molfese +* Richard Höchenberger +* Richard Scholz+ +* Scott Huberty +* Seyed (Yahya) Shirazi+ +* Sophie Herbst +* Stefan Appelhoff +* Thomas Donoghue +* Thomas Samuel Binns +* Tristan Stenner +* Velu Prabhakar Kumaravel+ +* Will Turner+ +* btkcodedev+ diff --git a/doc/changes/v1.8.rst b/doc/changes/v1.8.rst new file mode 100644 index 00000000000..f6abcc65e52 --- /dev/null +++ b/doc/changes/v1.8.rst @@ -0,0 +1,173 @@ +.. _changes_1_8_0: + +Version 1.8.0 (2024-08-18) +========================== + +Dependencies +------------ + +- Minimum versions for dependencies were bumped to those ~2 years old at the time of release (by `Eric Larson`_), including: + + - NumPy ≥ 1.23 + - SciPy ≥ 1.9 + - Matplotlib ≥ 3.6 + - scikit-learn ≥ 1.1 (`#12554 `__) +- Official support for PySide2 has been dropped in this release (though it might continue + to work), by `Eric Larson`_. (`#12742 `__) + + +Bugfixes +-------- + +- Fix bug where an event that occurred only once was excluded in :func:`mne.io.read_raw_egi`, by :newcontrib:`Ping-Keng Jao`. (`#12300 `__) +- Fix bug where :func:`mne.stats.permutation_cluster_test` (and related functions) uses excessive amount of memory for large 2D data when TFCE method is selected, by :newcontrib:`Nicolas Fourcaud-Trocmé`. (`#12609 `__) +- Assure that blink times are handled correctly :func:`mne.preprocessing.eyetracking.interpolate_blinks`, even when the raw object is cropped by `Scott Huberty`_ and :newcontrib:`Sammi Chekroud`. (`#12759 `__) +- Fix check for dropping all channels in :meth:`mne.io.Raw.drop_channels` and related methods, by :newcontrib:`Farzin Negahbani`. (`#12763 `__) +- Fix scrolling behavior in :class:`~mne.Report` when clicking on a TOC entry multiple times, by `Richard Höchenberger`_. (`#12561 `__) +- Fix bug where :func:`mne.decoding.get_coef` did not work properly with :class:`mne.decoding.CSP`, by `Eric Larson`_. (`#12562 `__) +- The color scaling of Evoked topomaps added to reports via :meth:`mne.Report.add_evokeds` + was sometimes sub-optimal if bad channels were present in the data. This has now been fixed + and should be more consistent with the topomaps shown in the joint plots, by `Richard Höchenberger`_. (`#12578 `__) +- Fix error causing :meth:`mne.Epochs.interpolate_bads` not to work for ``seeg`` channels and fix a single contact on neighboring shafts sometimes being included in interpolation, by `Alex Rockhill`_ (`#12593 `__) +- Fix bug where :func:`mne.io.read_raw_fil` could not assign bad channels on import, by `George O'Neill`_. (`#12597 `__) +- Fixed a bug where :meth:`mne.Evoked.animate_topomap` did not work with :func:`mne.preprocessing.compute_current_source_density` - modified data, by `Michal Žák`_. (`#12605 `__) +- Fix overflow when plotting source estimates where data is all zero (or close to zero), and fix the range of allowed values for the colorbar sliders, by `Marijn van Vliet`_. (`#12612 `__) +- Fix adding channels to :class:`~mne.time_frequency.EpochsTFR` objects, by `Clemens Brunner`_. (`#12616 `__) +- Fix for new sklearn metadata routing protocol in decoding search_light, by `Alex Gramfort`_ (`#12620 `__) +- Fix bug where :func:`mne.time_frequency.csd_multitaper`, :func:`mne.time_frequency.csd_fourier`, :func:`mne.time_frequency.csd_array_multitaper`, and :func:`mne.time_frequency.csd_array_fourier` would return cross-spectral densities with the ``fmin`` and ``fmax`` frequencies missing, by `Thomas Binns`_ (`#12633 `__) +- Output types of sparse arrays were changed from ``matrix`` to ``array`` in + :func:`~mne.channels.read_ch_adjacency`, :func:`~mne.channels.find_ch_adjacency`, + :func:`~mne.stats.combine_adjacency`, :func:`~mne.spatio_temporal_src_adjacency`, + and related functions to comply with the pending deprecation of ``np.matrix``. + The returned objects now behave like standard :class:`~numpy.ndarray` objects, and + in particular ``*`` now operates element-wise instead of performing matrix + multiplication. You can use ``@`` as a backward compatible matrix multiplication + for both ``np.matrix`` and ``np.ndarray`` objects, and if a matrix is desired + the outputs can be cast directly, for example as ``scipy.sparse.csr_matrix(out)``. + Changed by `Eric Larson`_. (`#12646 `__) +- Fix incorrect RuntimeWarning (different channel filter settings) in EDF/BDF import, by `Clemens Brunner`_. (`#12661 `__) +- In :func:`mne.export.export_raw` (``fmt='edf'``), when padding data to create equal-length data blocks, + edge-padding is favored over zero-padding in order to avoid accidentally enlarging physical range, by `Qian Chu`_. (`#12676 `__) +- In :func:`mne.io.read_raw_eyelink`, gracefully handle missing datetime in file by `Scott Huberty`_. (`#12687 `__) +- Fix regression with :class:`mne.decoding.CSP` where using ``rank="full"`` errantly + raised an error, by `Eric Larson`_. (`#12694 `__) +- Fix in-memory anonymization of data read with :func:`mne.io.read_raw_edf` by `Eric Larson`_. (`#12720 `__) +- Fix bug with overplotting of butterfly labels in :func:`mne.viz.plot_raw` and related + functions and methods, by `Eric Larson`_. (`#12721 `__) +- Fix a bug where the ``ylim`` parameter would sometimes apply to the wrong channel types in :func:`mne.viz.plot_evoked_topo`, by `Marijn van Vliet`_. (`#12724 `__) +- Fix bug where ad-hoc regularization of a covariance with + :func:`mne.cov.regularize` did not properly account for bad channels + in rank calculations, by `Eric Larson`_. (`#12727 `__) +- Fix bug when reading NIRX files saved in a non-western encoding, by `Daniel McCloy`_. (`#12730 `__) +- :func:`~mne.set_log_file` and :func:`~mne.set_log_level` can now be correctly detected by + static analysis tools like Pylance, by `Richard Höchenberger`_. (`#12735 `__) +- Safeguard loading of ``meas_date`` in :func:`mne.io.read_raw_edf`, by `Mathieu Scheltienne`_. (`#12754 `__) +- Fix bug in :func:`~mne.preprocessing.maxwell_filter_prepare_emptyroom` where a difference in sampling frequencies between data and emptyroom files was ignored, by `Daniel McCloy`_. (`#12760 `__) + + +API changes by deprecation +-------------------------- + +- A new argument ``events_as_annotations`` has been added to :func:`mne.io.read_raw_egi` + with a default value of ``False`` that will change to ``True`` in version 1.9, by + `Scott Huberty`_ and `Eric Larson`_. (`#12300 `__) +- ``mne.Info.ch_names`` will now return an empty list instead of raising a ``KeyError`` if no channels + are present, by `Richard Höchenberger`_. (`#12583 `__) +- Documented that :func:`~mne.match_channel_orders` can also work on Epochs, and Evoked objects. Reflecting this, deprecated the ``raws`` parameter in favor of an ``insts`` parameter, by `Stefan Appelhoff`_. (`#12699 `__) +- The ``block`` argument to :class:`mne.viz.Brain` is deprecated and will be removed in + 1.9, use :func:`matplotlib.pyplot.show` with ``block=True`` instead, by `Eric Larson`_. (`#12719 `__) +- :func:`mne.datasets.fetch_fsaverage` now returns a :class:`python:pathlib.Path` object + rather than a string. Support for string concatenation with plus (``+``) is thus + deprecated and will be removed in 1.9, use the forward-slash ``/`` operator instead, + by `Eric Larson`_. (`#12771 `__) + + +New features +------------ + +- Added internals to allow modifying single-channel annotations in the Qt + raw browser, by :newcontrib:`Noah Markowitz`. (`#12669 `__) +- Adding :meth:`mne.channels.Layout.copy` and :meth:`mne.channels.Layout.pick` to copy and select channels from a :class:`mne.channels.Layout` object. Plotting 2D topographies of evoked responses with :func:`mne.viz.plot_evoked_topo` with both arguments ``layout`` and ``exclude`` now ignores excluded channels from the :class:`mne.channels.Layout`. By `Mathieu Scheltienne`_. (`#12338 `__) +- In :class:`~mne.Report` you can now easily navigate through images and figures connected to a slider with the left and right arrow keys. Clicking on the slider or respective image will focus the slider, enabling keyboard navigation, by `Richard Höchenberger`_ (`#12556 `__) +- When plotting EOG and ECG artifact scores for ICA in :meth:`mne.Report.add_ica`, + the channel names used for artifact detection are now displayed in the titles of + each respective subplot, by `Richard Höchenberger`_. (`#12573 `__) +- Use ``aseg='auto'`` for :meth:`mne.viz.Brain.add_volume_labels` and :func:`mne.get_montage_volume_labels` to use ``aparc+aseg`` by default or if not present use ``wmparc`` because freesurfer uses ``wmparc`` in the latest version, by `Alex Rockhill`_. (`#12576 `__) +- The HTML representations of :class:`~mne.io.Raw`, :class:`~mne.Epochs`, + and :class:`~mne.Evoked` (which you will see e.g. when working with Jupyter Notebooks or + :class:`~mne.Report`) have been updated to be more consistent and contain + slightly more information, by `Richard Höchenberger`_. (:gh:`12624`) (`#12583 `__) +- When adding :class:`~mne.Evoked` data to a :class:`~mne.Report` via + :meth:`~mne.Report.add_evokeds`, we now also include an "Info" section + with some basic summary info, as has already been the case for raw and + epochs data, by `Richard Höchenberger`_. (`#12584 `__) +- We added new installation variants for a full installation with the PySide6 Qt binding + (``"mne[full-pyside6]"``), with the PyQt6 binding (``"mne[full-pyqt6]"``, equivalent to + ``"mne[full]"``), and without any Qt binding (``"mne[full-no-qt]"``), which may be useful + in certain situations by `Richard Höchenberger`_. (`#12628 `__) +- Adding argument ``'random'`` to :func:`~mne.epochs.equalize_epoch_counts` and to :meth:`~mne.Epochs.equalize_event_counts` to randomly select epochs or events. By `Mathieu Scheltienne`_. (`#12649 `__) +- Add new parameter ``ignore_marker_types`` to :func:`~mne.io.read_raw_brainvision` to ignore marker types (and only use marker descriptions) when reading BrainVision files, by `Clemens Brunner`_. (`#12652 `__) +- Added support for passing ``axes`` to :func:`mne.viz.plot_head_positions` when + ``mode='field'``, by `Eric Larson`_. (`#12655 `__) +- The HTML representation of :class:`~mne.Epochs` (visible e.g. in Jupyter or in :class:`~mne.Report`) now + indicates whether metadata is attached to the epochs, by `Richard Höchenberger`_. (`#12686 `__) +- Montage plots created with :meth:`~mne.channels.DigMontage.plot` now scale both the channel dots *and* channel names with the new ``scale`` parameter. The default is ``scale=1`` (factors less than 1 will scale down, whereas factors greater than 1 will scale up). The previous ``scale_factor`` parameter only affected marker size, and this parameter is now deprecated. By `Clemens Brunner`_. (`#12703 `__) +- Add :func:`~mne.stats.erp.compute_sme` to compute the analytical standardized measurement error (SME) as a data quality measure for ERP studies, by `Clemens Brunner`_. (`#12707 `__) +- Use :class:`python:datetime.date` for ``info["subject_info"]["birthday"]`` rather than + a tuple of ``(year, month, day)`` by `Eric Larson`_. (`#12720 `__) +- Add default ``spatial_colors="auto"`` to :func:`mne.viz.plot_evoked_white` and + :meth:`mne.Evoked.plot_white` to enable spatial colors by default, by `Eric Larson`_. (`#12725 `__) +- :func:`mne.minimum_norm.make_inverse_operator` and related functions now more robustly + warn if the whitener computed from the noise covariance has an incorrect rank, + by `Eric Larson`_. (`#12727 `__) +- When indexing :class:`~mne.Epochs` (e.g. by doing ``epochs[0]``), static code analysis tools like Pylance + should now be able to infer that the returned object is an epoch, too, and provide editor support + like automated code completions, by `Richard Höchenberger`_. (`#12733 `__) +- When using the ``data_path()`` in any dataset included in :py:mod:`mne.datasets`, + static analysis tools like Pylance will now correctly infer that a `pathlib.Path` will + be returned and provide editor support like automated code completions, by `Richard Höchenberger`_. (`#12734 `__) +- Add support for storing Fourier coefficients in :class:`mne.time_frequency.Spectrum`, + :class:`mne.time_frequency.EpochsSpectrum`, :class:`mne.time_frequency.SpectrumArray`, + and :class:`mne.time_frequency.EpochsSpectrumArray` objects, by `Thomas Binns`_. (`#12747 `__) + + +Other changes +------------- + +- Fixed issue template links by :newcontrib:`Michal Žák` (`#12600 `__) +- Enhance documentation on decimation filtering to prevent aliasing, by :newcontrib:`Xabier de Zuazo`. (`#12650 `__) +- Added `vulture `__ as a pre-commit hook and removed related dead code, by `Eric Larson`_. (`#12569 `__) +- Add link to BEst-Python in the list of related software. by `Ilian Azz`_. (`#12659 `__) +- Improved clarity of parameter documentation for `mne.decoding.SSD.fit`, by `Thomas Binns`_. (`#12664 `__) +- Disable the "Back to top" button in the documentation, by `Richard Höchenberger`_. (`#12688 `__) +- Improve argument ``ylim`` documentation through :class:`~mne.Evoked` plotting function and validate type to :class:`dict` or ``None`` to prevent misuage, by `Mathieu Scheltienne`_. (`#12697 `__) +- Code contributions are now measured using PRs and reported on the :ref:`contributors` + page, by `Eric Larson`_. (`#12774 `__) +- Authorship headers in MNE-Python code have been standardized, by `Eric Larson`_. (`#12781 `__) + +Authors +------- + +* Alex Rockhill +* Alexandre Gramfort +* Clemens Brunner +* Daniel McCloy +* Dimitri Papadopoulos Orfanos +* Eric Larson +* Farzin Negahbani+ +* George O'Neill +* Ilian AZZ +* Marijn van Vliet +* Mathieu Scheltienne +* Michal Žák+ +* Nabil Alibou +* Nicolas Fourcaud-Trocmé+ +* Noah Markowitz+ +* Ping-Keng Jao+ +* Qian Chu +* Richard Höchenberger +* Sammi Chekroud+ +* Scott Huberty +* Stefan Appelhoff +* Thomas S. Binns +* Xabier de Zuazo+ diff --git a/doc/changes/v1.9.rst b/doc/changes/v1.9.rst new file mode 100644 index 00000000000..0c6f7c1fddc --- /dev/null +++ b/doc/changes/v1.9.rst @@ -0,0 +1,128 @@ +.. _changes_1_9_0: + +Version 1.9.0 (2024-12-18) +========================== + +Dependencies +------------ + +- Minimum supported dependencies were updated in accordance with SPEC0_, most notably Python 3.10+ is now required. (`#12798 `__) +- Importing from ``mne.decoding`` now explicitly requires ``scikit-learn`` to be installed, + by `Eric Larson`_. (`#12834 `__) +- Compatibility improved for Python 3.13, by `Eric Larson`_. (`#13021 `__) + + +Bugfixes +-------- + +- Fix typos in the Spatio-Spectral Decomposition example, by :newcontrib:`Simon M. Hofmann`. (`#12924 `__) +- Fix duration calculation for the textual (``__repr__``) and html (``_repr_html_``, used by e.g. Jupyter) display of :class:`mne.io.Raw` instances. For example a duration of 1h is now displayed as ``00:01:00`` rather than ``00:59:60``. By :newcontrib:`Leonardo Rochael Almeida`. (`#12955 `__) +- Fix IndexError when loading CNT file does not have annotations, by :newcontrib:`Ziyi ZENG`. (`#12986 `__) +- Fix handling of MRI file-path in :class:`mne.SourceSpaces` and safeguard saving of :class:`pathlib.Path` with ``h5io`` by casting to :class:`str`, by `Mathieu Scheltienne`_. (`#12803 `__) +- Cast ``fwd["info"]`` to :class:`~mne.Info` and ``fwd["src"]`` to :class:`~mne.SourceSpaces` when loading a forward solution from an HDF5 file, by `Mathieu Scheltienne`_. (`#12804 `__) +- Fix bug where :meth:`mne.Epochs.compute_tfr` could not be used with the multitaper method and complex or phase outputs, by `Thomas Binns`_. (`#12842 `__) +- Fixed a bug where split FIF files that were read and then appended to other + :class:`mne.io.Raw` instances had their ``BAD boundary`` annotations incorrectly offset + in samples by the number of split files, by `Eric Larson`_. (`#12843 `__) +- Enforce SI units for Eyetracking data (eyegaze data should be radians of visual angle, not pixels. Pupil size data should be meters). + Updated tutorials so demonstrate how to convert data to SI units before analyses, by `Scott Huberty`_. (`#12846 `__) +- Prevent the ``colorbar`` parameter being ignored in topomap plots such as :meth:`mne.time_frequency.Spectrum.plot_topomap`, by `Thomas Binns`_. (`#12853 `__) +- Fix bug where invalid data types (e.g., ``np.ndarray``s) could be used in some + :class:`mne.io.Info` fields like ``info["subject_info"]["weight"]``, by `Eric Larson`_. (`#12875 `__) +- When creating a :class:`~mne.time_frequency.SpectrumArray`, the array shape check now + compares against the total of both 'good' and 'bad' channels in the provided + :class:`~mne.Info` (previously only good channels were checked), by + `Mathieu Scheltienne`_. (`#12877 `__) +- Fix bug where :ref:`mne coreg` would always show MEG channels even if the "MEG Sensors" checkbox was disabled, by `Eric Larson`_. (`#12884 `__) +- :class:`mne.Report` HDF5 files are now written in ``mode='a'`` (append) to allow users to store other data in the HDF5 files, by `Eric Larson`_. (`#12901 `__) +- Fix bug in :func:`mne.io.read_raw_gdf` when NumPy >= 2 is used, by `Clemens Brunner`_. (`#12909 `__) +- Allow exporting edf where a channel contains only constant values, by `Florian Hofer`_. (`#12911 `__) +- Fix a bug in :func:`mne.epochs.make_metadata`, where missing values in the columns + generated for ``keep_first`` and ``keep_last`` events were represented by empty strings, + while it should have been ``NA`` values, by `Richard Höchenberger`_. (`#12931 `__) +- Fix decimal places of :class:`float` ``mne.Evoked.nave`` in :meth:`mne.Evoked.plot` and :meth:`mne.Evoked.plot_image`, by `Gennadiy Belonosov`_. (`#12936 `__) +- Fix displayed units in representations of classes such as :class:`mne.io.Raw` to correctly use KiB, MiB, GiB, and so on, by `Clemens Brunner`_. (`#12962 `__) +- Gracefully handle invalid patient info when reading EDF files by `Scott Huberty`_. (`#12968 `__) +- Correct :func:`mne.io.read_raw_cnt` to read responses and fix exceptions by `Jacob Woessner`_. (`#13007 `__) +- Fix errant detection of software-rendered vs hardware-rendered MESA GL contexts in 3D rendering on Linux, by `Eric Larson`_. (`#13012 `__) +- Fix plot scaling for :meth:`Spectrum.plot(dB=True, amplitude=True) `, by `Daniel McCloy`_. (`#13036 `__) + + +API changes by deprecation +-------------------------- + +- Deprecate ``average`` parameter in ``plot_filters`` and ``plot_patterns`` methods of the :class:`mne.decoding.CSP` and :class:`mne.decoding.SPoC` classes, by `Thomas Binns`_. (`#12829 `__) +- Deprecate ``subject`` parameter in favor of ``subjects`` in :func:`mne.datasets.eegbci.load_data`, by `Stefan Appelhoff`_. (`#12918 `__) + + +New features +------------ + +- Added support for ``sensor_scales`` to :meth:`mne.viz.Brain.add_sensors` and :func:`mne.viz.plot_alignment`, by :newcontrib:`Alex Lepauvre`. (`#12805 `__) +- Add convenience :attr:`mne.io.Raw.duration` property to centralize duration calculation for the textual (``__repr__``) and html (``_repr_html_``, used by e.g. Jupyter) display of :class:`mne.io.Raw` instances, by :newcontrib:`Leonardo Rochael Almeida`. (`#12955 `__) +- Add option to :func:`mne.preprocessing.fix_stim_artifact` to use baseline average to flatten TMS pulse artifact by `Fahimeh Mamashli`_ and `Padma Sundaram`_ and `Mohammad Daneshzand`_. (`#6915 `__) +- Add support for `dict` type argument ``ref_channels`` to :func:`mne.set_eeg_reference`, to allow flexible re-referencing (e.g. ``raw.set_eeg_reference(ref_channels={'A1': ['A2', 'A3']})`` will set the new A1 data to be ``A1 - mean(A2, A3)``), by `Alex Lepauvre`_ and `Qian Chu`_ and `Daniel McCloy`_. (`#12366 `__) +- Add reader for ANT Neuro files in the ``*.cnt`` format with :func:`~mne.io.read_raw_ant`, by `Mathieu Scheltienne`_, `Eric Larson`_ and `Proloy Das`_. (`#12792 `__) +- Add support for a :class:`mne.transforms.Transform` in the argument ``trans`` of the coregistration GUI called with :func:`mne.gui.coregistration`, by `Mathieu Scheltienne`_. (`#12801 `__) +- :meth:`~mne.io.Raw` and :meth:`~mne.Epochs.save` now return the path to the saved file(s), by `Victor Ferat`_. (`#12811 `__) +- :func:`mne.channels.read_custom_montage` may now read a newer version of the ``.elc`` ASA Electrode file format, by `Stefan Appelhoff`_. (`#12830 `__) +- Added the ``title`` argument to :func:`mne.viz.create_3d_figure`, and + ``color`` and ``position`` arguments to :func:`mne.viz.set_3d_title`, by `Eric Larson`_. (`#12871 `__) +- Improved reporting and plotting options: + + - :meth:`mne.Report.add_projs` can now plot with :func:`mne.viz.plot_projs_joint` rather than :func:`mne.viz.plot_projs_topomap` + - :class:`mne.Report` now has attributes ``img_max_width`` and ``img_max_res`` that can be used to control image scaling. + - :class:`mne.Report` now has an attribute ``collapse`` that allows collapsing sections and/or subsections by default. + - :func:`mne.viz.plot_head_positions` now has a ``totals=True`` option to show the total distance and angle of the head. + + Changes by `Eric Larson`_. (`#12901 `__) +- Added the ``psd_args`` argument to :func:`mne.viz.plot_ica_sources` and :meth:`mne.preprocessing.ICA.plot_sources`, by `Richard Scholz`_. (`#12912 `__) +- Add ability to use :func:`mne.preprocessing.compute_fine_calibration` with non-Neuromag-style systems, as well as options to control the bad-angle and error tolerances, by `Eric Larson`_. (`#12966 `__) +- Add support for ``uint16_codec`` argument in :func:`mne.io.read_raw_eeglab` when ``pymatreader`` (which already supported this argument previously) is not installed, by `Clemens Brunner`_. (`#12971 `__) +- Added support for saving and loading channel names from FIF in :meth:`mne.channels.DigMontage.save` and :meth:`mne.channels.read_dig_fif` and added the convention that they should be saved as ``-dig.fif``, by `Eric Larson`_. (`#13003 `__) +- Add new :meth:`Raw.rescale ` method to rescale the data in place, by `Clemens Brunner`_. (`#13018 `__) + + +Other changes +------------- + +- Fix a mistake in :ref:`tut-artifact-regression` where the wrong regression coefficients were applied, by :newcontrib:`Jacob Phelan`. (`#12978 `__) +- Use custom code in :func:`mne.sys_info` to get the amount of physical memory and a more informative CPU name instead of using the ``psutil`` package, by `Clemens Brunner`_. (`#12787 `__) +- Improve documentation clarity of ``fit_transform`` methods for :class:`mne.decoding.SSD`, :class:`mne.decoding.CSP`, and :class:`mne.decoding.SPoC` classes, by `Thomas Binns`_. (`#12827 `__) +- Improve handling of filenames in ``raw.filenames`` by using :class:`~pathlib.Path` instead of :class:`str`, by `Mathieu Scheltienne`_. (`#12843 `__) +- Improve automatic figure scaling of :func:`mne.viz.plot_events`, and event_id and count overview legend when a high amount of unique events is supplied, by `Stefan Appelhoff`_. (`#12844 `__) +- :meth:`mne.preprocessing.ICA.find_bads_muscle` can now be run when passing an ``inst`` without sensor positions. However, it will just use the first of three criteria (slope) to find muscle-related ICA components, by `Stefan Appelhoff`_. (`#12862 `__) +- Update governance model, by `Daniel McCloy`_. (`#12896 `__) +- Improve the :ref:`tut-autogenerate-metadata`, by `Clemens Brunner`_ and `Richard Höchenberger`_. (`#12931 `__) +- Mention some gotchas that arise from the fact that by default, we pool across dipole orientations when performing source estimation, by `Marijn van Vliet`_ (`#12960 `__) +- Repository CI security is now audited using `zizmor `__, by `Eric Larson`_. (`#13011 `__) + +Authors +------- + +* Alex Lepauvre+ +* Britta Westner +* Clemens Brunner +* Daniel McCloy +* Eric Larson +* Fahimeh Mamashli +* Florian Hofer +* Gennadiy Belonosov +* Jacob Phelan +* Jacob Woessner +* Leonardo Rochael Almeida+ +* Mainak Jas +* Marijn van Vliet +* Mathieu Scheltienne +* Proloy Das +* Qian Chu +* Richard Höchenberger +* Richard Scholz +* Santeri Ruuskanen +* Scott Huberty +* Simon M. Hofmann+ +* Stefan Appelhoff +* Thomas Grainger +* Thomas S. Binns +* Victor Férat +* Ziyi ZENG+ diff --git a/doc/cited.rst b/doc/cited.rst deleted file mode 100644 index 93a3371bb7f..00000000000 --- a/doc/cited.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _cited: - -Papers citing MNE-Python -======================== - -Estimates provided by Google Scholar as of 20 Dec 2022: - -- `MNE (1370) `_ -- `MNE-Python (1640) `_ diff --git a/doc/conf.py b/doc/conf.py index cd11edf8789..350f0d230e8 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,62 +1,85 @@ -# -*- coding: utf-8 -*- -# -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html +"""Configuration file for the Sphinx documentation builder. + +This file only contains a selection of the most common options. For a full +list see the documentation: +https://www.sphinx-doc.org/en/master/usage/configuration.html +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from datetime import datetime, timezone import faulthandler -import gc import os import subprocess import sys -import time -import warnings +from datetime import datetime, timezone +from importlib.metadata import metadata +from pathlib import Path -import numpy as np import matplotlib import sphinx -from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder +from intersphinx_registry import get_intersphinx_mapping from numpydoc import docscrape +from sphinx.config import is_serializable +from sphinx.domains.changeset import versionlabels +from sphinx_gallery.sorting import ExplicitOrder import mne -from mne.fixes import _compare_version +import mne.html_templates._templates from mne.tests.test_docstring_parameters import error_ignores -from mne.utils import (linkcode_resolve, # noqa, analysis:ignore - _assert_no_instances, sizeof_fmt, run_subprocess) -from mne.viz import Brain # noqa +from mne.utils import ( + linkcode_resolve, + run_subprocess, +) -matplotlib.use('agg') +assert linkcode_resolve is not None # avoid flake warnings, used by numpydoc +matplotlib.use("agg") faulthandler.enable() -os.environ['_MNE_BROWSER_NO_BLOCK'] = 'true' -os.environ['MNE_BROWSER_OVERVIEW_MODE'] = 'hidden' -os.environ['MNE_BROWSER_THEME'] = 'light' -os.environ['MNE_3D_OPTION_THEME'] = 'light' +os.environ["_MNE_BROWSER_NO_BLOCK"] = "true" +os.environ["MNE_BROWSER_OVERVIEW_MODE"] = "hidden" +os.environ["MNE_BROWSER_THEME"] = "light" +os.environ["MNE_3D_OPTION_THEME"] = "light" +# https://numba.readthedocs.io/en/latest/reference/deprecation.html#deprecation-of-old-style-numba-captured-errors # noqa: E501 +os.environ["NUMBA_CAPTURED_ERRORS"] = "new_style" +mne.html_templates._templates._COLLAPSED = True # collapse info _repr_html_ # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -curdir = os.path.dirname(__file__) -sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne'))) -sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext'))) +curpath = Path(__file__).parent.resolve(strict=True) +sys.path.append(str(curpath / "sphinxext")) +from credit_tools import generate_credit_rst # noqa: E402 +from mne_doc_utils import report_scraper, reset_warnings, sphinx_logger # noqa: E402 # -- Project information ----------------------------------------------------- -project = 'MNE' +project = "MNE" td = datetime.now(tz=timezone.utc) # We need to triage which date type we use so that incremental builds work # (Sphinx looks at variable changes and rewrites all files if some change) -copyright = ( - f'2012–{td.year}, MNE Developers. Last updated \n' # noqa: E501 - '') # noqa: E501 -if os.getenv('MNE_FULL_DATE', 'false').lower() != 'true': - copyright = f'2012–{td.year}, MNE Developers. Last updated locally.' +project_copyright = ( + f'2012–{td.year}, MNE Developers. Last updated .\n' # noqa: E501 + """""" +) +if os.getenv("MNE_FULL_DATE", "false").lower() != "true": + project_copyright = f"2012–{td.year}, MNE Developers. Last updated locally." # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -64,107 +87,110 @@ # # The full version, including alpha/beta/rc tags. release = mne.__version__ +sphinx_logger.info(f"Building documentation for MNE {release} ({mne.__file__})") # The short X.Y version. -version = '.'.join(release.split('.')[:2]) +version = ".".join(release.split(".")[:2]) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '2.0' +needs_sphinx = "6.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.coverage', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.linkcode', - 'sphinx.ext.mathjax', - 'sphinx.ext.todo', - 'sphinx.ext.graphviz', - 'numpydoc', - 'sphinx_gallery.gen_gallery', - 'gen_commands', - 'gh_substitutions', - 'mne_substitutions', - 'newcontrib_substitutions', - 'gen_names', - 'matplotlib.sphinxext.plot_directive', - 'sphinxcontrib.bibtex', - 'sphinx_copybutton', - 'sphinx_design', - 'sphinxcontrib.youtube', - 'unit_role', + # builtin + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.coverage", + "sphinx.ext.doctest", + "sphinx.ext.graphviz", + "sphinx.ext.intersphinx", + "sphinx.ext.linkcode", + "sphinx.ext.mathjax", + "sphinx.ext.todo", + # contrib + "matplotlib.sphinxext.plot_directive", + "numpydoc", + "sphinx_copybutton", + "sphinx_design", + "sphinx_gallery.gen_gallery", + "sphinxcontrib.bibtex", + "sphinxcontrib.youtube", + "sphinxcontrib.towncrier.ext", + # homegrown + "contrib_avatars", + "gen_commands", + "gen_names", + "gh_substitutions", + "mne_substitutions", + "newcontrib_substitutions", + "unit_role", + "related_software", + "directive_formatting", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_includes'] + +# NB: changes here should also be made to the linkcheck target in the Makefile +exclude_patterns = ["_includes", "changes/dev"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The main toctree document. -master_doc = 'index' +master_doc = "index" # List of documents that shouldn't be included in the build. unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['_build'] +exclude_trees = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = "py:obj" # A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['mne.'] +modindex_common_prefix = ["mne."] # -- Sphinx-Copybutton configuration ----------------------------------------- copybutton_prompt_text = r">>> |\.\.\. |\$ " copybutton_prompt_is_regexp = True +# -- sphinxcontrib-towncrier configuration ----------------------------------- + +towncrier_draft_working_directory = str(curpath.parent) + # -- Intersphinx configuration ----------------------------------------------- intersphinx_mapping = { - 'python': ('https://docs.python.org/3', None), - 'numpy': ('https://numpy.org/doc/stable', None), - 'scipy': ('https://docs.scipy.org/doc/scipy', None), - 'matplotlib': ('https://matplotlib.org/stable', None), - 'sklearn': ('https://scikit-learn.org/stable', None), - 'numba': ('https://numba.readthedocs.io/en/latest', None), - 'joblib': ('https://joblib.readthedocs.io/en/latest', None), - 'nibabel': ('https://nipy.org/nibabel', None), - 'nilearn': ('http://nilearn.github.io/stable', None), - 'nitime': ('https://nipy.org/nitime/', None), - 'surfer': ('https://pysurfer.github.io/', None), - 'mne_bids': ('https://mne.tools/mne-bids/stable', None), - 'mne-connectivity': ('https://mne.tools/mne-connectivity/stable', None), - 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None), - 'seaborn': ('https://seaborn.pydata.org/', None), - 'statsmodels': ('https://www.statsmodels.org/dev', None), - 'patsy': ('https://patsy.readthedocs.io/en/latest', None), - 'pyvista': ('https://docs.pyvista.org/', None), - 'imageio': ('https://imageio.readthedocs.io/en/latest', None), - 'mne_realtime': ('https://mne.tools/mne-realtime', None), - 'picard': ('https://pierreablin.github.io/picard/', None), - 'qdarkstyle': ('https://qdarkstylesheet.readthedocs.io/en/latest', None), - 'eeglabio': ('https://eeglabio.readthedocs.io/en/latest', None), - 'dipy': ('https://dipy.org/documentation/latest/', - 'https://dipy.org/documentation/latest/objects.inv/'), - 'pooch': ('https://www.fatiando.org/pooch/latest/', None), - 'pybv': ('https://pybv.readthedocs.io/en/latest/', None), - 'pyqtgraph': ('https://pyqtgraph.readthedocs.io/en/latest/', None), - 'openmeeg': ('https://openmeeg.github.io/', None), + # More niche so didn't upstream to intersphinx_registry + "nitime": ("https://nipy.org/nitime/", None), + "mne_bids": ("https://mne.tools/mne-bids/stable", None), + "mne-connectivity": ("https://mne.tools/mne-connectivity/stable", None), + "mne-gui-addons": ("https://mne.tools/mne-gui-addons", None), + "picard": ("https://mind-inria.github.io/picard/", None), + "eeglabio": ("https://eeglabio.readthedocs.io/en/latest", None), + "pybv": ("https://pybv.readthedocs.io/en/latest", None), } +intersphinx_mapping.update( + get_intersphinx_mapping( + packages=set( + """ +imageio matplotlib numpy pandas python scipy statsmodels sklearn numba joblib nibabel +seaborn patsy pyvista dipy nilearn pyqtgraph +""".strip().split() + ), + ) +) # NumPyDoc configuration ----------------------------------------------------- @@ -173,337 +199,397 @@ docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members numpydoc_class_members_toctree = False numpydoc_show_inherited_class_members = { - 'mne.SourceSpaces': False, - 'mne.Forward': False, + "mne.Forward": False, + "mne.Projection": False, + "mne.SourceSpaces": False, } numpydoc_attributes_as_param_list = True numpydoc_xref_param_type = True numpydoc_xref_aliases = { # Python - 'file-like': ':term:`file-like `', - 'iterator': ':term:`iterator `', - 'path-like': ':term:`path-like`', - 'array-like': ':term:`array_like `', - 'Path': ':class:`python:pathlib.Path`', - 'bool': ':class:`python:bool`', + "file-like": ":term:`file-like `", + "iterator": ":term:`iterator `", + "path-like": ":term:`path-like`", + "array-like": ":term:`array_like `", + "Path": ":class:`python:pathlib.Path`", + "bool": ":ref:`bool `", # Matplotlib - 'colormap': ':doc:`colormap `', - 'color': ':doc:`color `', - 'Axes': 'matplotlib.axes.Axes', - 'Figure': 'matplotlib.figure.Figure', - 'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D', - 'ColorbarBase': 'matplotlib.colorbar.ColorbarBase', + "colormap": ":ref:`colormap `", + "color": ":doc:`color `", + "Axes": "matplotlib.axes.Axes", + "Figure": "matplotlib.figure.Figure", + "Axes3D": "mpl_toolkits.mplot3d.axes3d.Axes3D", + "ColorbarBase": "matplotlib.colorbar.ColorbarBase", # sklearn - 'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut', + "LeaveOneOut": "sklearn.model_selection.LeaveOneOut", + "MetadataRequest": "sklearn.utils.metadata_routing.MetadataRequest", + "estimator": "sklearn.base.BaseEstimator", # joblib - 'joblib.Parallel': 'joblib.Parallel', + "joblib.Parallel": "joblib.Parallel", # nibabel - 'Nifti1Image': 'nibabel.nifti1.Nifti1Image', - 'Nifti2Image': 'nibabel.nifti2.Nifti2Image', - 'SpatialImage': 'nibabel.spatialimages.SpatialImage', + "Nifti1Image": "nibabel.nifti1.Nifti1Image", + "Nifti2Image": "nibabel.nifti2.Nifti2Image", + "SpatialImage": "nibabel.spatialimages.SpatialImage", # MNE - 'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked', - 'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces', - 'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout', - 'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel', - 'AverageTFR': 'mne.time_frequency.AverageTFR', - 'EpochsTFR': 'mne.time_frequency.EpochsTFR', - 'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA', - 'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations', - 'DigMontage': 'mne.channels.DigMontage', - 'VectorSourceEstimate': 'mne.VectorSourceEstimate', - 'VolSourceEstimate': 'mne.VolSourceEstimate', - 'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate', - 'MixedSourceEstimate': 'mne.MixedSourceEstimate', - 'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate', - 'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection', - 'ConductorModel': 'mne.bem.ConductorModel', - 'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed', - 'InverseOperator': 'mne.minimum_norm.InverseOperator', - 'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity', - 'SourceMorph': 'mne.SourceMorph', - 'Xdawn': 'mne.preprocessing.Xdawn', - 'Report': 'mne.Report', - 'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge', - 'Vectorizer': 'mne.decoding.Vectorizer', - 'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter', - 'TemporalFilter': 'mne.decoding.TemporalFilter', - 'SSD': 'mne.decoding.SSD', - 'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC', - 'PSDEstimator': 'mne.decoding.PSDEstimator', - 'LinearModel': 'mne.decoding.LinearModel', - 'FilterEstimator': 'mne.decoding.FilterEstimator', - 'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP', - 'Beamformer': 'mne.beamformer.Beamformer', - 'Transform': 'mne.transforms.Transform', - 'Coregistration': 'mne.coreg.Coregistration', - 'Figure3D': 'mne.viz.Figure3D', - 'EOGRegression': 'mne.preprocessing.EOGRegression', - 'Spectrum': 'mne.time_frequency.Spectrum', - 'EpochsSpectrum': 'mne.time_frequency.EpochsSpectrum', + "Label": "mne.Label", + "Forward": "mne.Forward", + "Evoked": "mne.Evoked", + "Info": "mne.Info", + "SourceSpaces": "mne.SourceSpaces", + "Epochs": "mne.Epochs", + "Layout": "mne.channels.Layout", + "EvokedArray": "mne.EvokedArray", + "BiHemiLabel": "mne.BiHemiLabel", + "AverageTFR": "mne.time_frequency.AverageTFR", + "AverageTFRArray": "mne.time_frequency.AverageTFRArray", + "EpochsTFR": "mne.time_frequency.EpochsTFR", + "EpochsTFRArray": "mne.time_frequency.EpochsTFRArray", + "RawTFR": "mne.time_frequency.RawTFR", + "RawTFRArray": "mne.time_frequency.RawTFRArray", + "Raw": "mne.io.Raw", + "ICA": "mne.preprocessing.ICA", + "Covariance": "mne.Covariance", + "Annotations": "mne.Annotations", + "DigMontage": "mne.channels.DigMontage", + "VectorSourceEstimate": "mne.VectorSourceEstimate", + "VolSourceEstimate": "mne.VolSourceEstimate", + "VolVectorSourceEstimate": "mne.VolVectorSourceEstimate", + "MixedSourceEstimate": "mne.MixedSourceEstimate", + "MixedVectorSourceEstimate": "mne.MixedVectorSourceEstimate", + "SourceEstimate": "mne.SourceEstimate", + "Projection": "mne.Projection", + "ConductorModel": "mne.bem.ConductorModel", + "Dipole": "mne.Dipole", + "DipoleFixed": "mne.DipoleFixed", + "InverseOperator": "mne.minimum_norm.InverseOperator", + "CrossSpectralDensity": "mne.time_frequency.CrossSpectralDensity", + "SourceMorph": "mne.SourceMorph", + "Xdawn": "mne.preprocessing.Xdawn", + "Report": "mne.Report", + "TimeDelayingRidge": "mne.decoding.TimeDelayingRidge", + "Vectorizer": "mne.decoding.Vectorizer", + "UnsupervisedSpatialFilter": "mne.decoding.UnsupervisedSpatialFilter", + "TemporalFilter": "mne.decoding.TemporalFilter", + "SSD": "mne.decoding.SSD", + "Scaler": "mne.decoding.Scaler", + "SPoC": "mne.decoding.SPoC", + "PSDEstimator": "mne.decoding.PSDEstimator", + "LinearModel": "mne.decoding.LinearModel", + "FilterEstimator": "mne.decoding.FilterEstimator", + "EMS": "mne.decoding.EMS", + "CSP": "mne.decoding.CSP", + "Beamformer": "mne.beamformer.Beamformer", + "Transform": "mne.transforms.Transform", + "Coregistration": "mne.coreg.Coregistration", + "Figure3D": "mne.viz.Figure3D", + "EOGRegression": "mne.preprocessing.EOGRegression", + "Spectrum": "mne.time_frequency.Spectrum", + "EpochsSpectrum": "mne.time_frequency.EpochsSpectrum", + "EpochsFIF": "mne.Epochs", + "EpochsEEGLAB": "mne.Epochs", + "EpochsKIT": "mne.Epochs", + "RawANT": "mne.io.Raw", + "RawBOXY": "mne.io.Raw", + "RawBrainVision": "mne.io.Raw", + "RawBTi": "mne.io.Raw", + "RawCTF": "mne.io.Raw", + "RawCurry": "mne.io.Raw", + "RawEDF": "mne.io.Raw", + "RawEEGLAB": "mne.io.Raw", + "RawEGI": "mne.io.Raw", + "RawEximia": "mne.io.Raw", + "RawEyelink": "mne.io.Raw", + "RawFIL": "mne.io.Raw", + "RawGDF": "mne.io.Raw", + "RawHitachi": "mne.io.Raw", + "RawKIT": "mne.io.Raw", + "RawNedf": "mne.io.Raw", + "RawNeuralynx": "mne.io.Raw", + "RawNihon": "mne.io.Raw", + "RawNIRX": "mne.io.Raw", + "RawPersyst": "mne.io.Raw", + "RawSNIRF": "mne.io.Raw", + "Calibration": "mne.preprocessing.eyetracking.Calibration", # dipy - 'dipy.align.AffineMap': 'dipy.align.imaffine.AffineMap', - 'dipy.align.DiffeomorphicMap': 'dipy.align.imwarp.DiffeomorphicMap', + "dipy.align.AffineMap": "dipy.align.imaffine.AffineMap", + "dipy.align.DiffeomorphicMap": "dipy.align.imwarp.DiffeomorphicMap", } numpydoc_xref_ignore = { # words - 'instance', 'instances', 'of', 'default', 'shape', 'or', - 'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in', - 'dtype', 'object', + "and", + "between", + "instance", + "instances", + "of", + "default", + "shape", + "or", + "with", + "length", + "pair", + "matplotlib", + "optional", + "kwargs", + "in", + "dtype", + "object", # shapes - 'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors', - 'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups', - 'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers', - 'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q', 'r', - 'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests', - 'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features', - 'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in', - 'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks', - 'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids', - 'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out', - 'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv', - 'n_dipoles_fwd', 'n_picks_ref', 'n_coords', 'n_meg', 'n_good_meg', - 'n_moments', 'n_patterns', 'n_new_events', - # Undocumented (on purpose) - 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', - 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', - 'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi', 'RawFIL', + "n_vertices", + "n_faces", + "n_channels", + "m", + "n", + "n_events", + "n_colors", + "n_times", + "obj", + "n_chan", + "n_epochs", + "n_picks", + "n_ch_groups", + "n_dipoles", + "n_ica_components", + "n_pos", + "n_node_names", + "n_tapers", + "n_signals", + "n_step", + "n_freqs", + "wsize", + "Tx", + "M", + "N", + "p", + "q", + "r", + "n_observations", + "n_regressors", + "n_cols", + "n_frequencies", + "n_tests", + "n_samples", + "n_peaks", + "n_permutations", + "nchan", + "n_points", + "n_features", + "n_parts", + "n_features_new", + "n_components", + "n_labels", + "n_events_in", + "n_splits", + "n_scores", + "n_outputs", + "n_trials", + "n_estimators", + "n_tasks", + "nd_features", + "n_classes", + "n_targets", + "n_slices", + "n_hpi", + "n_fids", + "n_elp", + "n_pts", + "n_tris", + "n_nodes", + "n_nonzero", + "n_events_out", + "n_segments", + "n_orient_inv", + "n_orient_fwd", + "n_orient", + "n_dipoles_lcmv", + "n_dipoles_fwd", + "n_picks_ref", + "n_coords", + "n_meg", + "n_good_meg", + "n_moments", + "n_patterns", + "n_new_events", # sklearn subclasses - 'mapping', 'to', 'any', + "mapping", + "to", + "any", + "pandas", + "polars", + "default", # unlinkable - 'CoregistrationUI', - 'IntracranialElectrodeLocator', - 'mne_qt_browser.figure.MNEQtBrowser', + "CoregistrationUI", + "mne_qt_browser.figure.MNEQtBrowser", + # pooch, since its website is unreliable and users will rarely need the links + "pooch.Unzip", + "pooch.Untar", + "pooch.HTTPDownloader", } numpydoc_validate = True -numpydoc_validation_checks = {'all'} | set(error_ignores) +numpydoc_validation_checks = {"all"} | set(error_ignores) numpydoc_validation_exclude = { # set of regex # dict subclasses - r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys', - r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values', + r"\.clear", + r"\.get$", + r"\.copy$", + r"\.fromkeys", + r"\.items", + r"\.keys", + r"\.move_to_end", + r"\.pop", + r"\.popitem", + r"\.setdefault", + r"\.update", + r"\.values", # list subclasses - r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove', - r'\.sort', + r"\.append", + r"\.count", + r"\.extend", + r"\.index", + r"\.insert", + r"\.remove", + r"\.sort", # we currently don't document these properly (probably okay) - r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__', - r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__', + r"\.__getitem__", + r"\.__contains__", + r"\.__hash__", + r"\.__mul__", + r"\.__sub__", + r"\.__add__", + r"\.__iter__", + r"\.__div__", + r"\.__neg__", # copied from sklearn - r'mne\.utils\.deprecated', + r"mne\.utils\.deprecated", } # -- Sphinx-gallery configuration -------------------------------------------- -class Resetter(object): - """Simple class to make the str(obj) static for Sphinx build env hash.""" - - def __init__(self): - self.t0 = time.time() - - def __repr__(self): - return f'<{self.__class__.__name__}>' - - def __call__(self, gallery_conf, fname, when): - import matplotlib.pyplot as plt - try: - from pyvista import Plotter # noqa - except ImportError: - Plotter = None # noqa - try: - from pyvistaqt import BackgroundPlotter # noqa - except ImportError: - BackgroundPlotter = None # noqa - try: - from vtkmodules.vtkCommonDataModel import vtkPolyData # noqa - except ImportError: - vtkPolyData = None # noqa - try: - from mne_qt_browser._pg_figure import MNEQtBrowser - except ImportError: - MNEQtBrowser = None - from mne.viz.backends.renderer import backend - _Renderer = backend._Renderer if backend is not None else None - reset_warnings(gallery_conf, fname) - # in case users have interactive mode turned on in matplotlibrc, - # turn it off here (otherwise the build can be very slow) - plt.ioff() - plt.rcParams['animation.embed_limit'] = 30. - plt.rcParams['figure.raise_window'] = False - # neo holds on to an exception, which in turn holds a stack frame, - # which will keep alive the global vars during SG execution - try: - import neo - neo.io.stimfitio.STFIO_ERR = None - except Exception: - pass - gc.collect() - when = f'mne/conf.py:Resetter.__call__:{when}:{fname}' - # Support stuff like - # MNE_SKIP_INSTANCE_ASSERTIONS="Brain,Plotter,BackgroundPlotter,vtkPolyData,_Renderer" make html_dev-memory # noqa: E501 - # to just test MNEQtBrowser - skips = os.getenv('MNE_SKIP_INSTANCE_ASSERTIONS', '').lower() - prefix = '' - if skips not in ('true', '1', 'all'): - prefix = 'Clean ' - skips = skips.split(',') - if 'brain' not in skips: - _assert_no_instances(Brain, when) # calls gc.collect() - if Plotter is not None and 'plotter' not in skips: - _assert_no_instances(Plotter, when) - if BackgroundPlotter is not None and \ - 'backgroundplotter' not in skips: - _assert_no_instances(BackgroundPlotter, when) - if vtkPolyData is not None and 'vtkpolydata' not in skips: - _assert_no_instances(vtkPolyData, when) - if '_renderer' not in skips: - _assert_no_instances(_Renderer, when) - if MNEQtBrowser is not None and \ - 'mneqtbrowser' not in skips: - # Ensure any manual fig.close() events get properly handled - from mne_qt_browser._pg_figure import QApplication - inst = QApplication.instance() - if inst is not None: - for _ in range(2): - inst.processEvents() - _assert_no_instances(MNEQtBrowser, when) - # This will overwrite some Sphinx printing but it's useful - # for memory timestamps - if os.getenv('SG_STAMP_STARTS', '').lower() == 'true': - import psutil - process = psutil.Process(os.getpid()) - mem = sizeof_fmt(process.memory_info().rss) - print(f'{prefix}{time.time() - self.t0:6.1f} s : {mem}'.ljust(22)) - - -examples_dirs = ['../tutorials', '../examples'] -gallery_dirs = ['auto_tutorials', 'auto_examples'] -os.environ['_MNE_BUILDING_DOC'] = 'true' -scrapers = ('matplotlib',) -try: - mne.viz.set_3d_backend(mne.viz.get_3d_backend()) -except Exception: - report_scraper = None -else: - backend = mne.viz.get_3d_backend() - if backend in ('notebook', 'pyvistaqt'): - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - import pyvista - pyvista.OFF_SCREEN = False - pyvista.BUILDING_GALLERY = True - scrapers += ( - mne.gui._GUIScraper(), - mne.viz._brain._BrainScraper(), - 'pyvista', - ) - report_scraper = mne.report._ReportScraper() - scrapers += (report_scraper,) - del backend -try: - import mne_qt_browser - _min_ver = _compare_version(mne_qt_browser.__version__, '>=', '0.2') - if mne.viz.get_browser_backend() == 'qt' and _min_ver: - scrapers += (mne.viz._scraper._MNEQtBrowserScraper(),) -except ImportError: - pass - -compress_images = ('images', 'thumbnails') +examples_dirs = ["../tutorials", "../examples"] +gallery_dirs = ["auto_tutorials", "auto_examples"] +os.environ["_MNE_BUILDING_DOC"] = "true" + +scrapers = ( + "matplotlib", + "mne_doc_utils.gui_scraper", + "mne_doc_utils.brain_scraper", + "pyvista", + "mne_doc_utils.report_scraper", + "mne_doc_utils.mne_qt_browser_scraper", +) + +compress_images = ("images", "thumbnails") # let's make things easier on Windows users # (on Linux and macOS it's easy enough to require this) -if sys.platform.startswith('win'): +if sys.platform.startswith("win"): try: - subprocess.check_call(['optipng', '--version']) + subprocess.check_call(["optipng", "--version"]) except Exception: compress_images = () +sphinx_gallery_parallel = int(os.getenv("MNE_DOC_BUILD_N_JOBS", "1")) sphinx_gallery_conf = { - 'doc_module': ('mne',), - 'reference_url': dict(mne=None), - 'examples_dirs': examples_dirs, - 'subsection_order': ExplicitOrder(['../examples/io/', - '../examples/simulation/', - '../examples/preprocessing/', - '../examples/visualization/', - '../examples/time_frequency/', - '../examples/stats/', - '../examples/decoding/', - '../examples/connectivity/', - '../examples/forward/', - '../examples/inverse/', - '../examples/realtime/', - '../examples/datasets/', - '../tutorials/intro/', - '../tutorials/io/', - '../tutorials/raw/', - '../tutorials/preprocessing/', - '../tutorials/epochs/', - '../tutorials/evoked/', - '../tutorials/time-freq/', - '../tutorials/forward/', - '../tutorials/inverse/', - '../tutorials/stats-sensor-space/', - '../tutorials/stats-source-space/', - '../tutorials/machine-learning/', - '../tutorials/clinical/', - '../tutorials/simulation/', - '../tutorials/sample-datasets/', - '../tutorials/misc/']), - 'gallery_dirs': gallery_dirs, - 'default_thumb_file': os.path.join('_static', 'mne_helmet.png'), - 'backreferences_dir': 'generated', - 'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning - 'thumbnail_size': (160, 112), - 'remove_config_comments': True, - 'min_reported_time': 1., - 'abort_on_example_error': False, - 'reset_modules': ('matplotlib', Resetter()), # called w/each script - 'reset_modules_order': 'both', - 'image_scrapers': scrapers, - 'show_memory': not sys.platform.startswith(('win', 'darwin')), - 'line_numbers': False, # messes with style - 'within_subsection_order': FileNameSortKey, - 'capture_repr': ('_repr_html_',), - 'junit': os.path.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'), - 'matplotlib_animations': True, - 'compress_images': compress_images, - 'filename_pattern': '^((?!sgskip).)*$', - 'exclude_implicit_doc': { - r'mne\.io\.read_raw_fif', r'mne\.io\.Raw', r'mne\.Epochs', - r'mne.datasets.*', + "doc_module": ("mne",), + "reference_url": dict(mne=None), + "examples_dirs": examples_dirs, + "subsection_order": ExplicitOrder( + [ + "../examples/io/", + "../examples/simulation/", + "../examples/preprocessing/", + "../examples/visualization/", + "../examples/time_frequency/", + "../examples/stats/", + "../examples/decoding/", + "../examples/connectivity/", + "../examples/forward/", + "../examples/inverse/", + "../examples/realtime/", + "../examples/datasets/", + "../tutorials/intro/", + "../tutorials/io/", + "../tutorials/raw/", + "../tutorials/preprocessing/", + "../tutorials/epochs/", + "../tutorials/evoked/", + "../tutorials/time-freq/", + "../tutorials/forward/", + "../tutorials/inverse/", + "../tutorials/stats-sensor-space/", + "../tutorials/stats-source-space/", + "../tutorials/machine-learning/", + "../tutorials/clinical/", + "../tutorials/simulation/", + "../tutorials/sample-datasets/", + "../tutorials/visualization/", + "../tutorials/misc/", + ] + ), + "gallery_dirs": gallery_dirs, + "default_thumb_file": os.path.join("_static", "mne_helmet.png"), + "backreferences_dir": "generated", + "plot_gallery": "True", # Avoid annoying Unicode/bool default warning + "thumbnail_size": (160, 112), + "remove_config_comments": True, + "min_reported_time": 1.0, + "abort_on_example_error": False, + "reset_modules": ( + "matplotlib", + "mne_doc_utils.reset_modules", + ), # called w/each script + "reset_modules_order": "both", + "image_scrapers": scrapers, + "show_memory": sys.platform == "linux" and sphinx_gallery_parallel == 1, + "line_numbers": False, # messes with style + "within_subsection_order": "FileNameSortKey", + "capture_repr": ("_repr_html_",), + "junit": os.path.join("..", "test-results", "sphinx-gallery", "junit.xml"), + "matplotlib_animations": True, + "compress_images": compress_images, + "filename_pattern": "^((?!sgskip).)*$", + "exclude_implicit_doc": { + r"mne\.io\.read_raw_fif", + r"mne\.io\.Raw", + r"mne\.Epochs", + r"mne.datasets.*", }, - 'show_api_usage': False, # disable for now until graph warning fixed - 'api_usage_ignore': ( - '(' - '.*__.*__|' # built-ins - '.*Base.*|.*Array.*|mne.Vector.*|mne.Mixed.*|mne.Vol.*|' # inherited - 'mne.coreg.Coregistration.*|' # GUI + "show_api_usage": "unused", + "api_usage_ignore": ( + "(" + ".*__.*__|" # built-ins + ".*Base.*|.*Array.*|mne.Vector.*|mne.Mixed.*|mne.Vol.*|" # inherited + "mne.coreg.Coregistration.*|" # GUI # common - '.*utils.*|.*verbose()|.*copy()|.*update()|.*save()|' - '.*get_data()|' + ".*utils.*|.*verbose()|.*copy()|.*update()|.*save()|" + ".*get_data()|" # mixins - '.*add_channels()|.*add_reference_channels()|' - '.*anonymize()|.*apply_baseline()|.*apply_function()|' - '.*apply_hilbert()|.*as_type()|.*decimate()|' - '.*drop()|.*drop_channels()|.*drop_log_stats()|' - '.*export()|.*get_channel_types()|' - '.*get_montage()|.*interpolate_bads()|.*next()|' - '.*pick()|.*pick_channels()|.*pick_types()|' - '.*plot_sensors()|.*rename_channels()|' - '.*reorder_channels()|.*savgol_filter()|' - '.*set_eeg_reference()|.*set_channel_types()|' - '.*set_meas_date()|.*set_montage()|.*shift_time()|' - '.*time_as_index()|.*to_data_frame()|' + ".*add_channels()|.*add_reference_channels()|" + ".*anonymize()|.*apply_baseline()|.*apply_function()|" + ".*apply_hilbert()|.*as_type()|.*decimate()|" + ".*drop()|.*drop_channels()|.*drop_log_stats()|" + ".*export()|.*get_channel_types()|" + ".*get_montage()|.*interpolate_bads()|.*next()|" + ".*pick()|.*pick_channels()|.*pick_types()|" + ".*plot_sensors()|.*rename_channels()|" + ".*reorder_channels()|.*savgol_filter()|" + ".*set_eeg_reference()|.*set_channel_types()|" + ".*set_meas_date()|.*set_montage()|.*shift_time()|" + ".*time_as_index()|.*to_data_frame()|" # dictionary inherited - '.*clear()|.*fromkeys()|.*get()|.*items()|' - '.*keys()|.*pop()|.*popitem()|.*setdefault()|' - '.*values()|' + ".*clear()|.*fromkeys()|.*get()|.*items()|" + ".*keys()|.*pop()|.*popitem()|.*setdefault()|" + ".*values()|" # sklearn inherited - '.*apply()|.*decision_function()|.*fit()|' - '.*fit_transform()|.*get_params()|.*predict()|' - '.*predict_proba()|.*set_params()|.*transform()|' + ".*apply()|.*decision_function()|.*fit()|" + ".*fit_transform()|.*get_params()|.*predict()|" + ".*predict_proba()|.*set_params()|.*transform()|" # I/O, also related to mixins - '.*.remove.*|.*.write.*)') + ".*.remove.*|.*.write.*)" + ), + "copyfile_regex": r".*index\.rst", # allow custom index.rst files + "parallel": sphinx_gallery_parallel, } +assert is_serializable(sphinx_gallery_conf) # Files were renamed from plot_* with: # find . -type f -name 'plot_*.py' -exec sh -c 'x="{}"; xn=`basename "${x}"`; git mv "$x" `dirname "${x}"`/${xn:5}' \; # noqa @@ -514,9 +600,14 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): # does not respect the autodoc templates that would otherwise insert # the .. include:: lines, so we need to do it. # Eventually this could perhaps live in SG. - if what in ('attribute', 'method'): - size = os.path.getsize(os.path.join( - os.path.dirname(__file__), 'generated', '%s.examples' % (name,))) + if what in ("attribute", "method"): + size = os.path.getsize( + os.path.join( + os.path.dirname(__file__), + "generated", + f"{name}.examples", + ) + ) if size > 0: lines += """ .. _sphx_glr_backreferences_{1}: @@ -525,121 +616,242 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): .. minigallery:: {1} -""".format(name.split('.')[-1], name).split('\n') +""".format(name.split(".")[-1], name).split("\n") + + +def fix_sklearn_inherited_docstrings(app, what, name, obj, options, lines): + """Fix sklearn docstrings because they use autolink and we do not.""" + if ( + name.startswith("mne.decoding.") or name.startswith("mne.preprocessing.Xdawn") + ) and name.endswith( + ( + ".get_metadata_routing", + ".fit", + ".fit_transform", + ".set_output", + ".transform", + ) + ): + if ":Parameters:" in lines: + loc = lines.index(":Parameters:") + else: + loc = lines.index(":Returns:") + lines.insert(loc, "") + lines.insert(loc, ".. default-role:: autolink") + lines.insert(loc, "") # -- Other extension configuration ------------------------------------------- -linkcheck_request_headers = dict(user_agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36') # noqa: E501 +# Consider using http://magjac.com/graphviz-visual-editor for this +graphviz_dot_args = [ + "-Gsep=-0.5", + "-Gpad=0.5", + "-Nshape=box", + "-Nfontsize=20", + "-Nfontname=Open Sans,Arial", +] +graphviz_output_format = "svg" # for API usage diagrams +user_agent = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Mobile Safari/537.36" # noqa: E501 +# Can eventually add linkcheck_request_headers if needed linkcheck_ignore = [ # will be compiled to regex - r'https://datashare.is.ed.ac.uk/handle/10283/2189/?show=full', # noqa Max retries exceeded with url: /handle/10283/2189?show=full (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)'))) - 'https://doi.org/10.1002/mds.870120629', # Read timed out. - 'https://doi.org/10.1088/0031-9155/32/1/004', # noqa Read timed out. (read timeout=15) - 'https://doi.org/10.1088/0031-9155/40/3/001', # noqa Read timed out. (read timeout=15) - 'https://doi.org/10.1088/0031-9155/51/7/008', # noqa Read timed out. (read timeout=15) - 'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa Read timed out. (read timeout=15) - 'https://doi.org/10.1088/0967-3334/22/4/305', # noqa Read timed out. (read timeout=15) - 'https://doi.org/10.1088/1741-2552/aacfe4', # noqa Read timed out. (read timeout=15) - 'https://doi.org/10.1093/sleep/18.7.557', # noqa 403 Client Error: Forbidden for url: https://academic.oup.com/sleep/article-lookup/doi/10.1093/sleep/18.7.557 - 'https://doi.org/10.1162/089976699300016719', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/11/2/417-441/6242 - 'https://doi.org/10.1162/jocn.1993.5.2.162', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/5/2/162-176/3095 - 'https://doi.org/10.1162/neco.1995.7.6.1129', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/7/6/1129-1159/5909 - 'https://doi.org/10.1162/jocn_a_00405', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/25/9/1477-1492/27980 - 'https://doi.org/10.1167/15.6.4', # noqa 403 Client Error: Forbidden for url: https://jov.arvojournals.org/article.aspx?doi=10.1167/15.6.4 - 'https://doi.org/10.7488/ds/1556', # noqa Max retries exceeded with url: /handle/10283/2189 (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)'))) - 'https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach', # noqa Max retries exceeded with url: /imaging/MniTalairach (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)'))) - 'https://www.nyu.edu/', # noqa Max retries exceeded with url: / (Caused by SSLError(SSLError(1, '[SSL: DH_KEY_TOO_SMALL] dh key too small (_ssl.c:1122)'))) - 'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) - 'https://hal.archives-ouvertes.fr/hal-01848442.*', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/ - 'http://www.cs.ucl.ac.uk/staff/d.barber/brml.*', # noqa Sometimes: Read timed out - 'https://compumedicsneuroscan.com/scan-acquire-configuration-files.*', # noqa SSL certificate error as of 2021/09/28 - 'https://chrisholdgraf.com/', # noqa Max retries exceeded sometimes - 'https://www.dtu.dk/english/service/phonebook/person.*', # noqa Too slow - 'https://speakerdeck.com/dengemann/eeg-sensor-covariance-using-cross-validation', # noqa Too slow - 'https://doi.org/10.1002/hbm.10024', # noqa Too slow sometimes - 'https://www.researchgate.net/', # noqa As of 2022/05/31 we get "403 Forbidden" errors, might have to do with https://stackoverflow.com/questions/72347165 but not worth the effort to fix + # 403 Client Error: Forbidden + "https://doi.org/10.1002/", # onlinelibrary.wiley.com/doi/10.1002/hbm + "https://doi.org/10.1017/", # cambridge.org + "https://doi.org/10.1016/", # neuroimage + "https://doi.org/10.1021/", # pubs.acs.org/doi/abs + "https://doi.org/10.1063/", # pubs.aip.org/aip/jap + "https://doi.org/10.1073/", # pnas.org + "https://doi.org/10.1080/", # www.tandfonline.com + "https://doi.org/10.1088/", # www.tandfonline.com + "https://doi.org/10.1090/", # ams.org + "https://doi.org/10.1093/", # academic.oup.com/sleep/ + "https://doi.org/10.1098/", # royalsocietypublishing.org + "https://doi.org/10.1101/", # www.biorxiv.org + "https://doi.org/10.1103/", # journals.aps.org/rmp + "https://doi.org/10.1111/", # onlinelibrary.wiley.com/doi/10.1111/psyp + "https://doi.org/10.1126/", # www.science.org + "https://doi.org/10.1137/", # epubs.siam.org + "https://doi.org/10.1145/", # dl.acm.org + "https://doi.org/10.1155/", # www.hindawi.com/journals/cin + "https://doi.org/10.1161/", # www.ahajournals.org + "https://doi.org/10.1162/", # direct.mit.edu/neco/article/ + "https://doi.org/10.1167/", # jov.arvojournals.org + "https://doi.org/10.1177/", # journals.sagepub.com + "https://doi.org/10.1523/", # jneurosci.org + "https://doi.org/10.3109/", # www.tandfonline.com + "https://doi.org/10.3390/", # mdpi.com + "https://hms.harvard.edu/", # doc/funding.rst + "https://stackoverflow.com/questions/21752259/python-why-pickle", # doc/help/faq + "https://blender.org/", + "https://home.alexk101.dev/", + "https://www.mq.edu.au/", + "https://www.biorxiv.org/content/10.1101/", # biorxiv.org + "https://www.researchgate.net/profile/", + "https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl.html", + r"https://scholar.google.com/scholar/?cites=12188330066413208874&as_ylo=2014", + r"https://scholar.google.com/scholar/?cites=1521584321377182930&as_ylo=2013", + "https://www.research.chop.edu/imaging", + "http://prdownloads.sourceforge.net/optipng", + "https://sourceforge.net/projects/aespa/files/", + "https://sourceforge.net/projects/ezwinports/files/", + r"https://.*/.sourceforge/.net/", + "https://www.cogsci.nl/smathot", + "https://www.mathworks.com/products/compiler/matlab-runtime.html", + "https://medicine.umich.edu/dept/khri/ross-maddox-phd", + # TODO https://github.com/mne-tools/curry-python-reader/issues/5 + "https://github.com/mne-tools/curry-python-reader/README.md", + # 500 server error + "https://openwetware.org/wiki/Beauchamp:FreeSurfer", + # 503 Server error + "https://hal.archives-ouvertes.fr/hal-01848442", + # Read timed out + "http://www.cs.ucl.ac.uk/staff/d.barber/brml", + "https://www.cea.fr/", + "http://www.humanconnectome.org/data", + "https://www.mail-archive.com/freesurfer@nmr.mgh.harvard.edu", + "https://launchpad.net/", + # Max retries exceeded + "https://doi.org/10.7488/ds/1556", + "https://datashare.is.ed.ac.uk/handle/10283", + "https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach", + "https://www.nyu.edu/", + # Too slow + "https://speakerdeck.com/dengemann/", + "https://www.dtu.dk/english/service/phonebook/person", + "https://www.gnu.org/software/make/", + "https://www.macports.org/", + "https://hastie.su.domains/CASI", + # SSL problems sometimes + "http://ilabs.washington.edu/", + "https://psychophysiology.cpmc.columbia.edu/", + "https://erc.easme-web.eu/", + # Not rendered by linkcheck builder + r"ides\.html", ] linkcheck_anchors = False # saves a bit of time linkcheck_timeout = 15 # some can be quite slow +linkcheck_retries = 3 +linkcheck_report_timeouts_as_broken = False # autodoc / autosummary autosummary_generate = True -autodoc_default_options = {'inherited-members': None} +autodoc_default_options = {"inherited-members": None} # sphinxcontrib-bibtex -bibtex_bibfiles = ['./references.bib'] -bibtex_style = 'unsrt' -bibtex_footbibliography_header = '' +bibtex_bibfiles = ["./references.bib"] +bibtex_style = "unsrt" +bibtex_footbibliography_header = "" # -- Nitpicky ---------------------------------------------------------------- nitpicky = True +show_warning_types = True nitpick_ignore = [ ("py:class", "None. Remove all items from D."), - ("py:class", "a set-like object providing a view on D's items"), - ("py:class", "a set-like object providing a view on D's keys"), - ("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501 - ("py:class", "None. Update D from dict/iterable E and F."), + ( + "py:class", + "v, remove specified key and return the corresponding value.", + ), # noqa: E501 ("py:class", "an object providing a view on D's values"), ("py:class", "a shallow copy of D"), ("py:class", "(k, v), remove and return some (key, value) pair as a"), ("py:class", "_FuncT"), # type hint used in @verbose decorator ("py:class", "mne.utils._logging._FuncT"), + ("py:class", "None. Remove all items from od."), ] nitpick_ignore_regex = [ - ('py:.*', r"mne\.io\.BaseRaw.*"), - ('py:.*', r"mne\.BaseEpochs.*"), - ('py:obj', "(filename|metadata|proj|times|tmax|tmin|annotations|ch_names|compensation_grade|filenames|first_samp|first_time|last_samp|n_times|proj|times|tmax|tmin)"), # noqa: E501 + ("py:class", "a set-like object providing a view on D's (items|keys)"), + ("py:class", r"None\. Update D from (dict|mapping)/iterable E and F\."), + # Classes whose methods we purposefully do not document + ("py:.*", r"mne\.io\.BaseRaw.*"), # use mne.io.Raw + ("py:.*", r"mne\.BaseEpochs.*"), # use mne.Epochs + # Type hints for undocumented types + ("py:.*", r"mne\.io\..*\.Raw.*"), # RawEDF etc. + ("py:.*", r"mne\.epochs\.EpochsFIF.*"), + ("py:.*", r"mne\.io\..*\.Epochs.*"), # EpochsKIT etc. + ( # BaseRaw attributes are documented in Raw + "py:obj", + "(filename|metadata|proj|times|tmax|tmin|annotations|ch_names" + "|compensation_grade|duration|filenames|first_samp|first_time" + "|last_samp|n_times|proj|times|tmax|tmin)", + ), ] -suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside +suppress_warnings = [ + "image.nonlocal_uri", # we intentionally link outside +] + +# -- Sphinx hacks / overrides ------------------------------------------------ + +versionlabels["versionadded"] = sphinx.locale._("New in v%s") # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'pydata_sphinx_theme' +html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -switcher_version_match = 'dev' if release.endswith('dev0') else version +switcher_version_match = "dev" if ".dev" in release else version html_theme_options = { - 'icon_links': [ - dict(name='GitHub', - url='https://github.com/mne-tools/mne-python', - icon='fa-brands fa-square-github'), - dict(name='Mastodon', - url='https://fosstodon.org/@mne', - icon='fa-brands fa-mastodon', - attributes=dict(rel='me')), - dict(name='Twitter', - url='https://twitter.com/mne_python', - icon='fa-brands fa-square-twitter'), - dict(name='Discourse', - url='https://mne.discourse.group/', - icon='fa-brands fa-discourse'), - dict(name='Discord', - url='https://discord.gg/rKfvxTuATa', - icon='fa-brands fa-discord') + "icon_links": [ + dict( + name="Discord (office hours)", + url="https://discord.gg/rKfvxTuATa", + icon="fa-brands fa-discord fa-fw", + ), + dict( + name="Mastodon", + url="https://fosstodon.org/@mne", + icon="fa-brands fa-mastodon fa-fw", + attributes=dict(rel="me"), + ), + dict( + name="Q&A Forum", + url="https://mne.discourse.group/", + icon="fa-brands fa-discourse fa-fw", + ), + dict( + name="Code Repository", + url="https://github.com/mne-tools/mne-python", + icon="fa-brands fa-github fa-fw", + ), + dict( + name="Sponsor us on GitHub", + url="https://github.com/sponsors/mne-tools", + icon="fa-regular fa-heart fa-fw", + ), + dict( + name="Donate via OpenCollective", + url="https://opencollective.com/mne-python", + icon="fa-custom fa-opencollective fa-fw", + ), + ], + "icon_links_label": "External Links", # for screen reader + "use_edit_page_button": False, + "navigation_with_keys": False, + "show_toc_level": 1, + "article_header_start": [], # disable breadcrumbs + "navbar_end": [ + "theme-switcher", + "version-switcher", + "navbar-icon-links", ], - 'icon_links_label': 'External Links', # for screen reader - 'use_edit_page_button': False, - 'navigation_with_keys': False, - 'show_toc_level': 1, - 'navbar_end': ['theme-switcher', 'version-switcher', 'navbar-icon-links'], - 'footer_start': ['copyright'], - 'footer_end': [], - 'secondary_sidebar_items': ['page-toc'], - 'analytics': dict(google_analytics_id='G-5TBCPCRB6X'), - 'switcher': { - 'json_url': 'https://mne.tools/dev/_static/versions.json', - 'version_match': switcher_version_match, + "navbar_align": "left", + "navbar_persistent": ["search-button"], + "footer_start": ["copyright"], + "secondary_sidebar_items": ["page-toc", "edit-this-page"], + "analytics": dict(google_analytics_id="G-5TBCPCRB6X"), + "switcher": { + "json_url": "https://mne.tools/dev/_static/versions.json", + "version_match": switcher_version_match, }, - 'pygment_light_style': 'default', - 'pygment_dark_style': 'github-dark', + "back_to_top_button": False, } # The name of an image file (relative to this directory) to place at the top @@ -655,24 +867,27 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] html_css_files = [ - 'style.css', + "style.css", +] +html_js_files = [ + ("js/custom-icons.js", {"defer": "defer"}), ] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. html_extra_path = [ - 'contributing.html', - 'documentation.html', - 'getting_started.html', - 'install_mne_python.html', + "contributing.html", + "documentation.html", + "getting_started.html", + "install_mne_python.html", ] # Custom sidebar templates, maps document names to template names. html_sidebars = { - 'index': ['sidebar-quicklinks.html'], + "index": ["sidebar-quicklinks.html"], } # If true, links to the reST sources are added to the pages. @@ -683,263 +898,380 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): html_show_sphinx = False # accommodate different logo shapes (width values in rem) -xs = '2' -sm = '2.5' -md = '3' -lg = '4.5' -xl = '5' -xxl = '6' +xs = "2" +sm = "2.5" +md = "3" +lg = "4.5" +xl = "5" +xxl = "6" # variables to pass to HTML templating engine html_context = { - 'build_dev_html': bool(int(os.environ.get('BUILD_DEV_HTML', False))), - 'default_mode': 'auto', - 'pygment_light_style': 'tango', - 'pygment_dark_style': 'native', - 'funders': [ - dict(img='nih.svg', size='3', title='National Institutes of Health'), - dict(img='nsf.png', size='3.5', - title='US National Science Foundation'), - dict(img='erc.svg', size='3.5', title='European Research Council', - klass='only-light'), - dict(img='erc-dark.svg', size='3.5', title='European Research Council', - klass='only-dark'), - dict(img='doe.svg', size='3', title='US Department of Energy'), - dict(img='anr.svg', size='3.5', - title='Agence Nationale de la Recherche'), - dict(img='cds.png', size='2.25', - title='Paris-Saclay Center for Data Science'), - dict(img='google.svg', size='2.25', title='Google'), - dict(img='amazon.svg', size='2.5', title='Amazon'), - dict(img='czi.svg', size='2.5', title='Chan Zuckerberg Initiative'), + "default_mode": "auto", + # next 3 are for the "edit this page" button + "github_user": "mne-tools", + "github_repo": "mne-python", + "github_version": "main", + "doc_path": "doc", + "funders": [ + dict(img="nih.svg", size="3", title="National Institutes of Health"), + dict(img="nsf.png", size="3.5", title="US National Science Foundation"), + dict( + img="erc.svg", + size="3.5", + title="European Research Council", + klass="only-light", + ), + dict( + img="erc-dark.svg", + size="3.5", + title="European Research Council", + klass="only-dark", + ), + dict(img="doe.svg", size="3", title="US Department of Energy"), + dict(img="anr.svg", size="3.5", title="Agence Nationale de la Recherche"), + dict( + img="cds.svg", + size="1.75", + title="Paris-Saclay Center for Data Science", + klass="only-light", + ), + dict( + img="cds-dark.svg", + size="1.75", + title="Paris-Saclay Center for Data Science", + klass="only-dark", + ), + dict(img="google.svg", size="2.25", title="Google"), + dict(img="amazon.svg", size="2.5", title="Amazon"), + dict(img="czi.svg", size="2.5", title="Chan Zuckerberg Initiative"), ], - 'institutions': [ - dict(name='Massachusetts General Hospital', - img='MGH.svg', - url='https://www.massgeneral.org/', - size=sm), - dict(name='Athinoula A. Martinos Center for Biomedical Imaging', - img='Martinos.png', - url='https://martinos.org/', - size=md), - dict(name='Harvard Medical School', - img='Harvard.png', - url='https://hms.harvard.edu/', - size=sm), - dict(name='Massachusetts Institute of Technology', - img='MIT.svg', - url='https://web.mit.edu/', - size=md), - dict(name='New York University', - img='NYU.svg', - url='https://www.nyu.edu/', - size=xs, - klass='only-light'), - dict(name='New York University', - img='NYU-dark.svg', - url='https://www.nyu.edu/', - size=xs, - klass='only-dark'), - dict(name='Commissariat à l´énergie atomique et aux énergies alternatives', # noqa E501 - img='CEA.png', - url='http://www.cea.fr/', - size=md), - dict(name='Aalto-yliopiston perustieteiden korkeakoulu', - img='Aalto.svg', - url='https://sci.aalto.fi/', - size=md, - klass='only-light'), - dict(name='Aalto-yliopiston perustieteiden korkeakoulu', - img='Aalto-dark.svg', - url='https://sci.aalto.fi/', - size=md, - klass='only-dark'), - dict(name='Télécom ParisTech', - img='Telecom_Paris_Tech.svg', - url='https://www.telecom-paris.fr/', - size=md), - dict(name='University of Washington', - img='Washington.svg', - url='https://www.washington.edu/', - size=md, - klass='only-light'), - dict(name='University of Washington', - img='Washington-dark.svg', - url='https://www.washington.edu/', - size=md, - klass='only-dark'), - dict(name='Institut du Cerveau et de la Moelle épinière', - img='ICM.jpg', - url='https://icm-institute.org/', - size=md), - dict(name='Boston University', - img='BU.svg', - url='https://www.bu.edu/', - size=lg), - dict(name='Institut national de la santé et de la recherche médicale', - img='Inserm.svg', - url='https://www.inserm.fr/', - size=xl, - klass='only-light'), - dict(name='Institut national de la santé et de la recherche médicale', - img='Inserm-dark.svg', - url='https://www.inserm.fr/', - size=xl, - klass='only-dark'), - dict(name='Forschungszentrum Jülich', - img='Julich.svg', - url='https://www.fz-juelich.de/', - size=xl, - klass='only-light'), - dict(name='Forschungszentrum Jülich', - img='Julich-dark.svg', - url='https://www.fz-juelich.de/', - size=xl, - klass='only-dark'), - dict(name='Technische Universität Ilmenau', - img='Ilmenau.svg', - url='https://www.tu-ilmenau.de/', - size=xxl, - klass='only-light'), - dict(name='Technische Universität Ilmenau', - img='Ilmenau-dark.svg', - url='https://www.tu-ilmenau.de/', - size=xxl, - klass='only-dark'), - dict(name='Berkeley Institute for Data Science', - img='BIDS.svg', - url='https://bids.berkeley.edu/', - size=lg, - klass='only-light'), - dict(name='Berkeley Institute for Data Science', - img='BIDS-dark.svg', - url='https://bids.berkeley.edu/', - size=lg, - klass='only-dark'), - dict(name='Institut national de recherche en informatique et en automatique', # noqa E501 - img='inria.png', - url='https://www.inria.fr/', - size=xl), - dict(name='Aarhus Universitet', - img='Aarhus.svg', - url='https://www.au.dk/', - size=xl, - klass='only-light'), - dict(name='Aarhus Universitet', - img='Aarhus-dark.svg', - url='https://www.au.dk/', - size=xl, - klass='only-dark'), - dict(name='Karl-Franzens-Universität Graz', - img='Graz.svg', - url='https://www.uni-graz.at/', - size=md), - dict(name='SWPS Uniwersytet Humanistycznospołeczny', - img='SWPS.svg', - url='https://www.swps.pl/', - size=xl, - klass='only-light'), - dict(name='SWPS Uniwersytet Humanistycznospołeczny', - img='SWPS-dark.svg', - url='https://www.swps.pl/', - size=xl, - klass='only-dark'), - dict(name='Max-Planck-Institut für Bildungsforschung', - img='MPIB.svg', - url='https://www.mpib-berlin.mpg.de/', - size=xxl, - klass='only-light'), - dict(name='Max-Planck-Institut für Bildungsforschung', - img='MPIB-dark.svg', - url='https://www.mpib-berlin.mpg.de/', - size=xxl, - klass='only-dark'), - dict(name='Macquarie University', - img='Macquarie.svg', - url='https://www.mq.edu.au/', - size=lg, - klass='only-light'), - dict(name='Macquarie University', - img='Macquarie-dark.svg', - url='https://www.mq.edu.au/', - size=lg, - klass='only-dark'), - dict(name='Children’s Hospital of Philadelphia Research Institute', - img='CHOP.svg', - url='https://www.research.chop.edu/imaging', - size=xxl, - klass='only-light'), - dict(name='Children’s Hospital of Philadelphia Research Institute', - img='CHOP-dark.svg', - url='https://www.research.chop.edu/imaging', - size=xxl, - klass='only-dark'), - dict(name='Donders Institute for Brain, Cognition and Behaviour at Radboud University', # noqa E501 - img='Donders.png', - url='https://www.ru.nl/donders/', - size=xl), + "institutions": [ + dict( + name="Massachusetts General Hospital", + img="MGH.svg", + url="https://www.massgeneral.org/", + size=sm, + ), + dict( + name="Athinoula A. Martinos Center for Biomedical Imaging", + img="Martinos.png", + url="https://martinos.org/", + size=md, + ), + dict( + name="Harvard Medical School", + img="Harvard.png", + url="https://hms.harvard.edu/", + size=sm, + ), + dict( + name="Massachusetts Institute of Technology", + img="MIT.svg", + url="https://web.mit.edu/", + size=md, + ), + dict( + name="New York University", + img="NYU.svg", + url="https://www.nyu.edu/", + size=xs, + klass="only-light", + ), + dict( + name="New York University", + img="NYU-dark.svg", + url="https://www.nyu.edu/", + size=xs, + klass="only-dark", + ), + dict( + name="Commissariat à l´énergie atomique et aux énergies alternatives", # noqa E501 + img="CEA.png", + url="http://www.cea.fr/", + size=md, + ), + dict( + name="Aalto-yliopiston perustieteiden korkeakoulu", + img="Aalto.svg", + url="https://sci.aalto.fi/", + size=md, + klass="only-light", + ), + dict( + name="Aalto-yliopiston perustieteiden korkeakoulu", + img="Aalto-dark.svg", + url="https://sci.aalto.fi/", + size=md, + klass="only-dark", + ), + dict( + name="Télécom ParisTech", + img="Telecom_Paris_Tech.svg", + url="https://www.telecom-paris.fr/", + size=md, + ), + dict( + name="University of Washington", + img="Washington.svg", + url="https://www.washington.edu/", + size=md, + klass="only-light", + ), + dict( + name="University of Washington", + img="Washington-dark.svg", + url="https://www.washington.edu/", + size=md, + klass="only-dark", + ), + dict( + name="Institut du Cerveau et de la Moelle épinière", + img="ICM.jpg", + url="https://icm-institute.org/", + size=md, + ), + dict( + name="Boston University", img="BU.svg", url="https://www.bu.edu/", size=lg + ), + dict( + name="Institut national de la santé et de la recherche médicale", + img="Inserm.svg", + url="https://www.inserm.fr/", + size=xl, + klass="only-light", + ), + dict( + name="Institut national de la santé et de la recherche médicale", + img="Inserm-dark.svg", + url="https://www.inserm.fr/", + size=xl, + klass="only-dark", + ), + dict( + name="Forschungszentrum Jülich", + img="Julich.svg", + url="https://www.fz-juelich.de/", + size=xl, + klass="only-light", + ), + dict( + name="Forschungszentrum Jülich", + img="Julich-dark.svg", + url="https://www.fz-juelich.de/", + size=xl, + klass="only-dark", + ), + dict( + name="Technische Universität Ilmenau", + img="Ilmenau.svg", + url="https://www.tu-ilmenau.de/", + size=xxl, + klass="only-light", + ), + dict( + name="Technische Universität Ilmenau", + img="Ilmenau-dark.svg", + url="https://www.tu-ilmenau.de/", + size=xxl, + klass="only-dark", + ), + dict( + name="Berkeley Institute for Data Science", + img="BIDS.svg", + url="https://bids.berkeley.edu/", + size=lg, + klass="only-light", + ), + dict( + name="Berkeley Institute for Data Science", + img="BIDS-dark.svg", + url="https://bids.berkeley.edu/", + size=lg, + klass="only-dark", + ), + dict( + name="Institut national de recherche en informatique et en automatique", # noqa E501 + img="inria.png", + url="https://www.inria.fr/", + size=xl, + ), + dict( + name="Aarhus Universitet", + img="Aarhus.svg", + url="https://www.au.dk/", + size=xl, + klass="only-light", + ), + dict( + name="Aarhus Universitet", + img="Aarhus-dark.svg", + url="https://www.au.dk/", + size=xl, + klass="only-dark", + ), + dict( + name="Karl-Franzens-Universität Graz", + img="Graz.svg", + url="https://www.uni-graz.at/", + size=md, + ), + dict( + name="SWPS Uniwersytet Humanistycznospołeczny", + img="SWPS.svg", + url="https://www.swps.pl/", + size=xl, + klass="only-light", + ), + dict( + name="SWPS Uniwersytet Humanistycznospołeczny", + img="SWPS-dark.svg", + url="https://www.swps.pl/", + size=xl, + klass="only-dark", + ), + dict( + name="Max-Planck-Institut für Bildungsforschung", + img="MPIB.svg", + url="https://www.mpib-berlin.mpg.de/", + size=xxl, + klass="only-light", + ), + dict( + name="Max-Planck-Institut für Bildungsforschung", + img="MPIB-dark.svg", + url="https://www.mpib-berlin.mpg.de/", + size=xxl, + klass="only-dark", + ), + dict( + name="Macquarie University", + img="Macquarie.svg", + url="https://www.mq.edu.au/", + size=lg, + klass="only-light", + ), + dict( + name="Macquarie University", + img="Macquarie-dark.svg", + url="https://www.mq.edu.au/", + size=lg, + klass="only-dark", + ), + dict( + name="AE Studio", + img="AE-Studio-light.svg", + url="https://ae.studio/", + size=xxl, + klass="only-light", + ), + dict( + name="AE Studio", + img="AE-Studio-dark.svg", + url="https://ae.studio/", + size=xxl, + klass="only-dark", + ), + dict( + name="Children’s Hospital of Philadelphia Research Institute", + img="CHOP.svg", + url="https://www.research.chop.edu/imaging", + size=xxl, + klass="only-light", + ), + dict( + name="Children’s Hospital of Philadelphia Research Institute", + img="CHOP-dark.svg", + url="https://www.research.chop.edu/imaging", + size=xxl, + klass="only-dark", + ), + dict( + name="Donders Institute for Brain, Cognition and Behaviour at Radboud University", # noqa E501 + img="Donders.png", + url="https://www.ru.nl/donders/", + size=xl, + ), + dict( + name="Fondation Campus Biotech Geneva", + img="FCBG.svg", + url="https://fcbg.ch/", + size=sm, + ), ], # \u00AD is an optional hyphen (not rendered unless needed) # If these are changed, the Makefile should be updated, too - 'carousel': [ - dict(title='Source Estimation', - text='Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.', # noqa E501 - url='auto_tutorials/inverse/index.html', - img='sphx_glr_30_mne_dspm_loreta_008.gif', - alt='dSPM'), - dict(title='Machine Learning', - text='Advanced decoding models including time general\u00ADiza\u00ADtion.', # noqa E501 - url='auto_tutorials/machine-learning/50_decoding.html', - img='sphx_glr_50_decoding_006.png', - alt='Decoding'), - dict(title='Encoding Models', - text='Receptive field estima\u00ADtion with optional smooth\u00ADness priors.', # noqa E501 - url='auto_tutorials/machine-learning/30_strf.html', - img='sphx_glr_30_strf_001.png', - alt='STRF'), - dict(title='Statistics', - text='Parametric and non-parametric, permutation tests and clustering.', # noqa E501 - url='auto_tutorials/stats-source-space/index.html', - img='sphx_glr_20_cluster_1samp_spatiotemporal_001.png', - alt='Clusters'), - dict(title='Connectivity', - text='All-to-all spectral and effective connec\u00ADtivity measures.', # noqa E501 - url='https://mne.tools/mne-connectivity/stable/auto_examples/mne_inverse_label_connectivity.html', # noqa E501 - img='https://mne.tools/mne-connectivity/stable/_images/sphx_glr_mne_inverse_label_connectivity_001.png', # noqa E501 - alt='Connectivity'), - dict(title='Data Visualization', - text='Explore your data from multiple perspectives.', - url='auto_tutorials/evoked/20_visualize_evoked.html', - img='sphx_glr_20_visualize_evoked_010.png', - alt='Visualization'), - ] + "carousel": [ + dict( + title="Source Estimation", + text="Distributed, sparse, mixed-norm, beam\u00adformers, dipole fitting, and more.", # noqa E501 + url="auto_tutorials/inverse/index.html", + img="sphx_glr_30_mne_dspm_loreta_008.gif", + alt="dSPM", + ), + dict( + title="Machine Learning", + text="Advanced decoding models including time general\u00adiza\u00adtion.", # noqa E501 + url="auto_tutorials/machine-learning/50_decoding.html", + img="sphx_glr_50_decoding_006.png", + alt="Decoding", + ), + dict( + title="Encoding Models", + text="Receptive field estima\u00adtion with optional smooth\u00adness priors.", # noqa E501 + url="auto_tutorials/machine-learning/30_strf.html", + img="sphx_glr_30_strf_001.png", + alt="STRF", + ), + dict( + title="Statistics", + text="Parametric and non-parametric, permutation tests and clustering.", # noqa E501 + url="auto_tutorials/stats-source-space/index.html", + img="sphx_glr_20_cluster_1samp_spatiotemporal_001.png", + alt="Clusters", + ), + dict( + title="Connectivity", + text="All-to-all spectral and effective connec\u00adtivity measures.", # noqa E501 + url="https://mne.tools/mne-connectivity/stable/auto_examples/mne_inverse_label_connectivity.html", # noqa E501 + img="https://mne.tools/mne-connectivity/stable/_images/sphx_glr_mne_inverse_label_connectivity_001.png", # noqa E501 + alt="Connectivity", + ), + dict( + title="Data Visualization", + text="Explore your data from multiple perspectives.", + url="auto_tutorials/evoked/20_visualize_evoked.html", + img="sphx_glr_20_visualize_evoked_010.png", + alt="Visualization", + ), + ], } # Output file base name for HTML help builder. -htmlhelp_basename = 'mne-doc' +htmlhelp_basename = "mne-doc" # -- Options for plot_directive ---------------------------------------------- # Adapted from SciPy plot_include_source = True -plot_formats = [('png', 96)] +plot_formats = [("png", 96)] plot_html_show_formats = False plot_html_show_source_link = False font_size = 13 * 72 / 96.0 # 13 px plot_rcparams = { - 'font.size': font_size, - 'axes.titlesize': font_size, - 'axes.labelsize': font_size, - 'xtick.labelsize': font_size, - 'ytick.labelsize': font_size, - 'legend.fontsize': font_size, - 'figure.figsize': (6, 5), - 'figure.subplot.bottom': 0.2, - 'figure.subplot.left': 0.2, - 'figure.subplot.right': 0.9, - 'figure.subplot.top': 0.85, - 'figure.subplot.wspace': 0.4, - 'text.usetex': False, + "font.size": font_size, + "axes.titlesize": font_size, + "axes.labelsize": font_size, + "xtick.labelsize": font_size, + "ytick.labelsize": font_size, + "legend.fontsize": font_size, + "figure.figsize": (6, 5), + "figure.subplot.bottom": 0.2, + "figure.subplot.left": 0.2, + "figure.subplot.right": 0.9, + "figure.subplot.top": 0.85, + "figure.subplot.wspace": 0.4, + "text.usetex": False, } @@ -956,149 +1288,70 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -latex_toplevel_sectioning = 'part' - -_np_print_defaults = np.get_printoptions() - +latex_toplevel_sectioning = "part" # -- Warnings management ----------------------------------------------------- - -def reset_warnings(gallery_conf, fname): - """Ensure we are future compatible and ignore silly warnings.""" - # In principle, our examples should produce no warnings. - # Here we cause warnings to become errors, with a few exceptions. - # This list should be considered alongside - # setup.cfg -> [tool:pytest] -> filterwarnings - - # remove tweaks from other module imports or example runs - warnings.resetwarnings() - # restrict - warnings.filterwarnings('error') - # allow these, but show them - warnings.filterwarnings('always', '.*non-standard config type: "foo".*') - warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*') - warnings.filterwarnings('always', '.*cannot make axes width small.*') - warnings.filterwarnings('always', '.*Axes that are not compatible.*') - warnings.filterwarnings('always', '.*FastICA did not converge.*') - # ECoG BIDS spec violations: - warnings.filterwarnings('always', '.*Fiducial point nasion not found.*') - warnings.filterwarnings('always', '.*DigMontage is only a subset of.*') - warnings.filterwarnings( # xhemi morph (should probably update sample) - 'always', '.*does not exist, creating it and saving it.*') - # internal warnings - warnings.filterwarnings('default', module='sphinx') - # allow these warnings, but don't show them - for key in ( - 'The module matplotlib.tight_layout is deprecated', # nilearn - 'invalid version and will not be supported', # pyxdf - 'distutils Version classes are deprecated', # seaborn and neo - '`np.object` is a deprecated alias for the builtin `object`', # pyxdf - # nilearn, should be fixed in > 0.9.1 - 'In future, it will be an error for \'np.bool_\' scalars to', - # sklearn hasn't updated to SciPy's sym_pos dep - 'The \'sym_pos\' keyword is deprecated', - # numba - '`np.MachAr` is deprecated', - # joblib hasn't updated to avoid distutils - 'distutils package is deprecated', - # jupyter - 'Jupyter is migrating its paths to use standard', - r'Widget\..* is deprecated\.', - # PyQt6 - 'Enum value .* is marked as deprecated', - # matplotlib PDF output - 'The py23 module has been deprecated', - # pkg_resources - 'Implementing implicit namespace packages', - 'Deprecated call to `pkg_resources', - # nilearn - 'pkg_resources is deprecated as an API', - r'The .* was deprecated in Matplotlib 3\.7', - ): - warnings.filterwarnings( # deal with other modules having bad imports - 'ignore', message=".*%s.*" % key, category=DeprecationWarning) - warnings.filterwarnings( - 'ignore', message=( - 'Matplotlib is currently using agg, which is a non-GUI backend.*' - ) - ) - # matplotlib 3.6 in nilearn and pyvista - warnings.filterwarnings( - 'ignore', message='.*cmap function will be deprecated.*') - # xarray/netcdf4 - warnings.filterwarnings( - 'ignore', message=r'numpy\.ndarray size changed, may indicate.*', - category=RuntimeWarning) - # qdarkstyle - warnings.filterwarnings( - 'ignore', message=r'.*Setting theme=.*6 in qdarkstyle.*', - category=RuntimeWarning) - # pandas, via seaborn (examples/time_frequency/time_frequency_erds.py) - warnings.filterwarnings( - 'ignore', message=r'iteritems is deprecated.*Use \.items instead\.', - category=FutureWarning) - # pandas in 50_epochs_to_data_frame.py - warnings.filterwarnings( - 'ignore', message=r'invalid value encountered in cast', - category=RuntimeWarning) - # xarray _SixMetaPathImporter (?) - warnings.filterwarnings( - 'ignore', message=r'falling back to find_module', - category=ImportWarning) - - # In case we use np.set_printoptions in any tutorials, we only - # want it to affect those: - np.set_printoptions(**_np_print_defaults) - - reset_warnings(None, None) - # -- Fontawesome support ----------------------------------------------------- - -brand_icons = ('apple', 'linux', 'windows', 'discourse', 'python') +brand_icons = ("apple", "linux", "windows", "discourse", "python") fixed_width_icons = ( # homepage: - 'book', 'code-branch', 'newspaper', 'circle-question', 'quote-left', + "book", + "code-branch", + "newspaper", + "circle-question", + "quote-left", # contrib guide: - 'bug-slash', 'comment', 'computer-mouse', 'hand-sparkles', 'pencil', - 'text-slash', 'universal-access', 'wand-magic-sparkles', - 'discourse', 'python', + "bug-slash", + "comment", + "computer-mouse", + "hand-sparkles", + "pencil", + "text-slash", + "universal-access", + "wand-magic-sparkles", + "discourse", + "python", ) other_icons = ( - 'hand-paper', 'question', 'rocket', 'server', 'code', 'desktop', - 'terminal', 'cloud-arrow-down', 'wrench', 'hourglass-half' + "hand-paper", + "question", + "rocket", + "server", + "code", + "desktop", + "terminal", + "cloud-arrow-down", + "wrench", + "hourglass-half", ) icon_class = dict() for icon in brand_icons + fixed_width_icons + other_icons: - icon_class[icon] = ('fa-brands',) if icon in brand_icons else ('fa-solid',) - icon_class[icon] += ('fa-fw',) if icon in fixed_width_icons else () + icon_class[icon] = ("fa-brands",) if icon in brand_icons else ("fa-solid",) + icon_class[icon] += ("fa-fw",) if icon in fixed_width_icons else () -prolog = '' +rst_prolog = "" for icon, classes in icon_class.items(): - prolog += f''' + rst_prolog += f""" .. |{icon}| raw:: html - -''' + +""" -prolog += ''' +rst_prolog += """ .. |ensp| unicode:: U+2002 .. EN SPACE -''' + +.. include:: /links.inc +.. include:: /changes/names.inc + +.. currentmodule:: mne +""" # -- Dependency info ---------------------------------------------------------- -try: - from importlib.metadata import metadata # new in Python 3.8 - min_py = metadata('mne')['Requires-Python'] -except ModuleNotFoundError: - from pkg_resources import get_distribution - info = get_distribution('mne').get_metadata_lines('PKG-INFO') - for line in info: - if line.strip().startswith('Requires-Python'): - min_py = line.split(':')[1] -min_py = min_py.lstrip(' =<>') -prolog += f'\n.. |min_python_version| replace:: {min_py}\n' +min_py = metadata("mne")["Requires-Python"].lstrip(" =<>") +rst_prolog += f"\n.. |min_python_version| replace:: {min_py}\n" # -- website redirects -------------------------------------------------------- @@ -1106,153 +1359,254 @@ def reset_warnings(gallery_conf, fname): # since we don't need to add redirects for examples added after this date. needed_plot_redirects = { # tutorials - '10_epochs_overview.py', '10_evoked_overview.py', '10_overview.py', - '10_preprocessing_overview.py', '10_raw_overview.py', - '10_reading_meg_data.py', '15_handling_bad_channels.py', - '20_event_arrays.py', '20_events_from_raw.py', '20_reading_eeg_data.py', - '20_rejecting_bad_data.py', '20_visualize_epochs.py', - '20_visualize_evoked.py', '30_annotate_raw.py', '30_epochs_metadata.py', - '30_filtering_resampling.py', '30_info.py', '30_reading_fnirs_data.py', - '35_artifact_correction_regression.py', '40_artifact_correction_ica.py', - '40_autogenerate_metadata.py', '40_sensor_locations.py', - '40_visualize_raw.py', '45_projectors_background.py', - '50_artifact_correction_ssp.py', '50_configure_mne.py', - '50_epochs_to_data_frame.py', '55_setting_eeg_reference.py', - '59_head_positions.py', '60_make_fixed_length_epochs.py', - '60_maxwell_filtering_sss.py', '70_fnirs_processing.py', + "10_epochs_overview.py", + "10_evoked_overview.py", + "10_overview.py", + "10_preprocessing_overview.py", + "10_raw_overview.py", + "10_reading_meg_data.py", + "15_handling_bad_channels.py", + "20_event_arrays.py", + "20_events_from_raw.py", + "20_reading_eeg_data.py", + "20_rejecting_bad_data.py", + "20_visualize_epochs.py", + "20_visualize_evoked.py", + "30_annotate_raw.py", + "30_epochs_metadata.py", + "30_filtering_resampling.py", + "30_info.py", + "30_reading_fnirs_data.py", + "35_artifact_correction_regression.py", + "40_artifact_correction_ica.py", + "40_autogenerate_metadata.py", + "40_sensor_locations.py", + "40_visualize_raw.py", + "45_projectors_background.py", + "50_artifact_correction_ssp.py", + "50_configure_mne.py", + "50_epochs_to_data_frame.py", + "55_setting_eeg_reference.py", + "59_head_positions.py", + "60_make_fixed_length_epochs.py", + "60_maxwell_filtering_sss.py", + "70_fnirs_processing.py", # examples - '3d_to_2d.py', 'brainstorm_data.py', 'channel_epochs_image.py', - 'cluster_stats_evoked.py', 'compute_csd.py', - 'compute_mne_inverse_epochs_in_label.py', - 'compute_mne_inverse_raw_in_label.py', 'compute_mne_inverse_volume.py', - 'compute_source_psd_epochs.py', 'covariance_whitening_dspm.py', - 'custom_inverse_solver.py', - 'decoding_csp_eeg.py', 'decoding_csp_timefreq.py', - 'decoding_spatio_temporal_source.py', 'decoding_spoc_CMC.py', - 'decoding_time_generalization_conditions.py', - 'decoding_unsupervised_spatial_filter.py', 'decoding_xdawn_eeg.py', - 'define_target_events.py', 'dics_source_power.py', 'eeg_csd.py', - 'eeg_on_scalp.py', 'eeglab_head_sphere.py', 'elekta_epochs.py', - 'ems_filtering.py', 'eog_artifact_histogram.py', 'evoked_arrowmap.py', - 'evoked_ers_source_power.py', 'evoked_topomap.py', 'evoked_whitening.py', - 'fdr_stats_evoked.py', 'find_ref_artifacts.py', - 'fnirs_artifact_removal.py', 'forward_sensitivity_maps.py', - 'gamma_map_inverse.py', 'hf_sef_data.py', 'ica_comparison.py', - 'interpolate_bad_channels.py', 'label_activation_from_stc.py', - 'label_from_stc.py', 'label_source_activations.py', - 'left_cerebellum_volume_source.py', 'limo_data.py', - 'linear_model_patterns.py', 'linear_regression_raw.py', - 'meg_sensors.py', 'mixed_norm_inverse.py', - 'mixed_source_space_inverse.py', - 'mne_cov_power.py', 'mne_helmet.py', 'mne_inverse_coherence_epochs.py', - 'mne_inverse_envelope_correlation.py', - 'mne_inverse_envelope_correlation_volume.py', - 'mne_inverse_psi_visual.py', - 'morph_surface_stc.py', 'morph_volume_stc.py', 'movement_compensation.py', - 'movement_detection.py', 'multidict_reweighted_tfmxne.py', - 'muscle_detection.py', 'opm_data.py', 'otp.py', 'parcellation.py', - 'psf_ctf_label_leakage.py', 'psf_ctf_vertices.py', - 'psf_ctf_vertices_lcmv.py', 'publication_figure.py', 'rap_music.py', - 'read_inverse.py', 'read_neo_format.py', 'read_noise_covariance_matrix.py', - 'read_stc.py', 'receptive_field_mtrf.py', 'resolution_metrics.py', - 'resolution_metrics_eegmeg.py', 'roi_erpimage_by_rt.py', - 'sensor_noise_level.py', - 'sensor_permutation_test.py', 'sensor_regression.py', - 'shift_evoked.py', 'simulate_evoked_data.py', 'simulate_raw_data.py', - 'simulated_raw_data_using_subject_anatomy.py', 'snr_estimate.py', - 'source_label_time_frequency.py', 'source_power_spectrum.py', - 'source_power_spectrum_opm.py', 'source_simulator.py', - 'source_space_morphing.py', 'source_space_snr.py', - 'source_space_time_frequency.py', 'ssd_spatial_filters.py', - 'ssp_projs_sensitivity_map.py', 'temporal_whitening.py', - 'time_frequency_erds.py', 'time_frequency_global_field_power.py', - 'time_frequency_mixed_norm_inverse.py', 'time_frequency_simulated.py', - 'topo_compare_conditions.py', 'topo_customized.py', - 'vector_mne_solution.py', 'virtual_evoked.py', 'xdawn_denoising.py', - 'xhemi.py', + "3d_to_2d.py", + "brainstorm_data.py", + "channel_epochs_image.py", + "cluster_stats_evoked.py", + "compute_csd.py", + "compute_mne_inverse_epochs_in_label.py", + "compute_mne_inverse_raw_in_label.py", + "compute_mne_inverse_volume.py", + "compute_source_psd_epochs.py", + "covariance_whitening_dspm.py", + "custom_inverse_solver.py", + "decoding_csp_eeg.py", + "decoding_csp_timefreq.py", + "decoding_spatio_temporal_source.py", + "decoding_spoc_CMC.py", + "decoding_time_generalization_conditions.py", + "decoding_unsupervised_spatial_filter.py", + "decoding_xdawn_eeg.py", + "define_target_events.py", + "dics_source_power.py", + "eeg_csd.py", + "eeg_on_scalp.py", + "eeglab_head_sphere.py", + "elekta_epochs.py", + "ems_filtering.py", + "eog_artifact_histogram.py", + "evoked_arrowmap.py", + "evoked_ers_source_power.py", + "evoked_topomap.py", + "evoked_whitening.py", + "fdr_stats_evoked.py", + "find_ref_artifacts.py", + "fnirs_artifact_removal.py", + "forward_sensitivity_maps.py", + "gamma_map_inverse.py", + "hf_sef_data.py", + "ica_comparison.py", + "interpolate_bad_channels.py", + "label_activation_from_stc.py", + "label_from_stc.py", + "label_source_activations.py", + "left_cerebellum_volume_source.py", + "limo_data.py", + "linear_model_patterns.py", + "linear_regression_raw.py", + "meg_sensors.py", + "mixed_norm_inverse.py", + "mixed_source_space_inverse.py", + "mne_cov_power.py", + "mne_helmet.py", + "mne_inverse_coherence_epochs.py", + "mne_inverse_envelope_correlation.py", + "mne_inverse_envelope_correlation_volume.py", + "mne_inverse_psi_visual.py", + "morph_surface_stc.py", + "morph_volume_stc.py", + "movement_compensation.py", + "movement_detection.py", + "multidict_reweighted_tfmxne.py", + "muscle_detection.py", + "opm_data.py", + "otp.py", + "parcellation.py", + "psf_ctf_label_leakage.py", + "psf_ctf_vertices.py", + "psf_ctf_vertices_lcmv.py", + "publication_figure.py", + "rap_music.py", + "trap_music.py", + "read_inverse.py", + "read_neo_format.py", + "read_noise_covariance_matrix.py", + "read_stc.py", + "receptive_field_mtrf.py", + "resolution_metrics.py", + "resolution_metrics_eegmeg.py", + "roi_erpimage_by_rt.py", + "sensor_noise_level.py", + "sensor_permutation_test.py", + "sensor_regression.py", + "shift_evoked.py", + "simulate_evoked_data.py", + "simulate_raw_data.py", + "simulated_raw_data_using_subject_anatomy.py", + "snr_estimate.py", + "source_label_time_frequency.py", + "source_power_spectrum.py", + "source_power_spectrum_opm.py", + "source_simulator.py", + "source_space_morphing.py", + "source_space_snr.py", + "source_space_time_frequency.py", + "ssd_spatial_filters.py", + "ssp_projs_sensitivity_map.py", + "temporal_whitening.py", + "time_frequency_erds.py", + "time_frequency_global_field_power.py", + "time_frequency_mixed_norm_inverse.py", + "time_frequency_simulated.py", + "topo_compare_conditions.py", + "topo_customized.py", + "vector_mne_solution.py", + "virtual_evoked.py", + "xdawn_denoising.py", + "xhemi.py", } -ex = 'auto_examples' -co = 'connectivity' -mne_conn = 'https://mne.tools/mne-connectivity/stable' -tu = 'auto_tutorials' -di = 'discussions' -sm = 'source-modeling' -fw = 'forward' -nv = 'inverse' -sn = 'stats-sensor-space' -sr = 'stats-source-space' -sd = 'sample-datasets' -ml = 'machine-learning' -tf = 'time-freq' -si = 'simulation' +api_redirects = { + "connectivity", + "covariance", + "creating_from_arrays", + "datasets", + "decoding", + "events", + "export", + "file_io", + "forward", + "inverse", + "logging", + "most_used_classes", + "mri", + "preprocessing", + "python_reference", + "reading_raw_data", + "realtime", + "report", + "sensor_space", + "simulation", + "source_space", + "statistics", + "time_frequency", + "visualization", +} +ex = "auto_examples" +co = "connectivity" +mne_conn = "https://mne.tools/mne-connectivity/stable" +tu = "auto_tutorials" +pr = "preprocessing" +di = "discussions" +sm = "source-modeling" +fw = "forward" +nv = "inverse" +sn = "stats-sensor-space" +sr = "stats-source-space" +sd = "sample-datasets" +ml = "machine-learning" +tf = "time-freq" +si = "simulation" +vi = "visualization" custom_redirects = { # Custom redirects (one HTML path to another, relative to outdir) # can be added here as fr->to key->value mappings - f'{tu}/evoked/plot_eeg_erp.html': f'{tu}/evoked/30_eeg_erp.html', - f'{tu}/evoked/plot_whitened.html': f'{tu}/evoked/40_whitened.html', - f'{tu}/misc/plot_modifying_data_inplace.html': f'{tu}/intro/15_inplace.html', # noqa E501 - f'{tu}/misc/plot_report.html': f'{tu}/intro/70_report.html', - f'{tu}/misc/plot_seeg.html': f'{tu}/clinical/20_seeg.html', - f'{tu}/misc/plot_ecog.html': f'{tu}/clinical/30_ecog.html', - f'{tu}/{ml}/plot_receptive_field.html': f'{tu}/{ml}/30_strf.html', - f'{tu}/{ml}/plot_sensors_decoding.html': f'{tu}/{ml}/50_decoding.html', - f'{tu}/{sm}/plot_background_freesurfer.html': f'{tu}/{fw}/10_background_freesurfer.html', # noqa E501 - f'{tu}/{sm}/plot_source_alignment.html': f'{tu}/{fw}/20_source_alignment.html', # noqa E501 - f'{tu}/{sm}/plot_forward.html': f'{tu}/{fw}/30_forward.html', - f'{tu}/{sm}/plot_eeg_no_mri.html': f'{tu}/{fw}/35_eeg_no_mri.html', - f'{tu}/{sm}/plot_background_freesurfer_mne.html': f'{tu}/{fw}/50_background_freesurfer_mne.html', # noqa E501 - f'{tu}/{sm}/plot_fix_bem_in_blender.html': f'{tu}/{fw}/80_fix_bem_in_blender.html', # noqa E501 - f'{tu}/{sm}/plot_compute_covariance.html': f'{tu}/{fw}/90_compute_covariance.html', # noqa E501 - f'{tu}/{sm}/plot_object_source_estimate.html': f'{tu}/{nv}/10_stc_class.html', # noqa E501 - f'{tu}/{sm}/plot_dipole_fit.html': f'{tu}/{nv}/20_dipole_fit.html', - f'{tu}/{sm}/plot_mne_dspm_source_localization.html': f'{tu}/{nv}/30_mne_dspm_loreta.html', # noqa E501 - f'{tu}/{sm}/plot_dipole_orientations.html': f'{tu}/{nv}/35_dipole_orientations.html', # noqa E501 - f'{tu}/{sm}/plot_mne_solutions.html': f'{tu}/{nv}/40_mne_fixed_free.html', - f'{tu}/{sm}/plot_beamformer_lcmv.html': f'{tu}/{nv}/50_beamformer_lcmv.html', # noqa E501 - f'{tu}/{sm}/plot_visualize_stc.html': f'{tu}/{nv}/60_visualize_stc.html', - f'{tu}/{sm}/plot_eeg_mri_coords.html': f'{tu}/{nv}/70_eeg_mri_coords.html', - f'{tu}/{sd}/plot_brainstorm_phantom_elekta.html': f'{tu}/{nv}/80_brainstorm_phantom_elekta.html', # noqa E501 - f'{tu}/{sd}/plot_brainstorm_phantom_ctf.html': f'{tu}/{nv}/85_brainstorm_phantom_ctf.html', # noqa E501 - f'{tu}/{sd}/plot_phantom_4DBTi.html': f'{tu}/{nv}/90_phantom_4DBTi.html', - f'{tu}/{sd}/plot_brainstorm_auditory.html': f'{tu}/io/60_ctf_bst_auditory.html', # noqa E501 - f'{tu}/{sd}/plot_sleep.html': f'{tu}/clinical/60_sleep.html', - f'{tu}/{di}/plot_background_filtering.html': f'{tu}/preprocessing/25_background_filtering.html', # noqa E501 - f'{tu}/{di}/plot_background_statistics.html': f'{tu}/{sn}/10_background_stats.html', # noqa E501 - f'{tu}/{sn}/plot_stats_cluster_erp.html': f'{tu}/{sn}/20_erp_stats.html', - f'{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html': f'{tu}/{sn}/40_cluster_1samp_time_freq.html', # noqa E501 - f'{tu}/{sn}/plot_stats_cluster_time_frequency.html': f'{tu}/{sn}/50_cluster_between_time_freq.html', # noqa E501 - f'{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors.html': f'{tu}/{sn}/75_cluster_ftest_spatiotemporal.html', # noqa E501 - f'{tu}/{sr}/plot_stats_cluster_spatio_temporal.html': f'{tu}/{sr}/20_cluster_1samp_spatiotemporal.html', # noqa E501 - f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp.html': f'{tu}/{sr}/30_cluster_ftest_spatiotemporal.html', # noqa E501 - f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova.html': f'{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal.html', # noqa E501 - f'{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html': f'{tu}/{sn}/70_cluster_rmANOVA_time_freq.html', # noqa E501 - f'{tu}/{tf}/plot_sensors_time_frequency.html': f'{tu}/{tf}/20_sensors_time_frequency.html', # noqa E501 - f'{tu}/{tf}/plot_ssvep.html': f'{tu}/{tf}/50_ssvep.html', - f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501 - f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html', - f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html', - f'{ex}/{co}/mne_inverse_label_connectivity.html': f'{mne_conn}/{ex}/mne_inverse_label_connectivity.html', # noqa E501 - f'{ex}/{co}/cwt_sensor_connectivity.html': f'{mne_conn}/{ex}/cwt_sensor_connectivity.html', # noqa E501 - f'{ex}/{co}/mixed_source_space_connectivity.html': f'{mne_conn}/{ex}/mixed_source_space_connectivity.html', # noqa E501 - f'{ex}/{co}/mne_inverse_coherence_epochs.html': f'{mne_conn}/{ex}/mne_inverse_coherence_epochs.html', # noqa E501 - f'{ex}/{co}/mne_inverse_connectivity_spectrum.html': f'{mne_conn}/{ex}/mne_inverse_connectivity_spectrum.html', # noqa E501 - f'{ex}/{co}/mne_inverse_envelope_correlation_volume.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation_volume.html', # noqa E501 - f'{ex}/{co}/mne_inverse_envelope_correlation.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation.html', # noqa E501 - f'{ex}/{co}/mne_inverse_psi_visual.html': f'{mne_conn}/{ex}/mne_inverse_psi_visual.html', # noqa E501 - f'{ex}/{co}/sensor_connectivity.html': f'{mne_conn}/{ex}/sensor_connectivity.html', # noqa E501 + "install/contributing": "development/contributing", + "overview/cite": "documentation/cite", + "overview/get_help": "help/index", + "overview/roadmap": "development/roadmap", + "whats_new": "development/whats_new", + f"{tu}/evoked/plot_eeg_erp": f"{tu}/evoked/30_eeg_erp", + f"{tu}/evoked/plot_whitened": f"{tu}/evoked/40_whitened", + f"{tu}/misc/plot_modifying_data_inplace": f"{tu}/intro/15_inplace", + f"{tu}/misc/plot_report": f"{tu}/intro/70_report", + f"{tu}/misc/plot_seeg": f"{tu}/clinical/20_seeg", + f"{tu}/misc/plot_ecog": f"{tu}/clinical/30_ecog", + f"{tu}/{ml}/plot_receptive_field": f"{tu}/{ml}/30_strf", + f"{tu}/{ml}/plot_sensors_decoding": f"{tu}/{ml}/50_decoding", + f"{tu}/{sm}/plot_background_freesurfer": f"{tu}/{fw}/10_background_freesurfer", + f"{tu}/{sm}/plot_source_alignment": f"{tu}/{fw}/20_source_alignment", + f"{tu}/{sm}/plot_forward": f"{tu}/{fw}/30_forward", + f"{tu}/{sm}/plot_eeg_no_mri": f"{tu}/{fw}/35_eeg_no_mri", + f"{tu}/{sm}/plot_background_freesurfer_mne": f"{tu}/{fw}/50_background_freesurfer_mne", # noqa E501 + f"{tu}/{sm}/plot_fix_bem_in_blender": f"{tu}/{fw}/80_fix_bem_in_blender", + f"{tu}/{sm}/plot_compute_covariance": f"{tu}/{fw}/90_compute_covariance", + f"{tu}/{sm}/plot_object_source_estimate": f"{tu}/{nv}/10_stc_class", + f"{tu}/{sm}/plot_dipole_fit": f"{tu}/{nv}/20_dipole_fit", + f"{tu}/{sm}/plot_mne_dspm_source_localization": f"{tu}/{nv}/30_mne_dspm_loreta", + f"{tu}/{sm}/plot_dipole_orientations": f"{tu}/{nv}/35_dipole_orientations", + f"{tu}/{sm}/plot_mne_solutions": f"{tu}/{nv}/40_mne_fixed_free", + f"{tu}/{sm}/plot_beamformer_lcmv": f"{tu}/{nv}/50_beamformer_lcmv", + f"{tu}/{sm}/plot_visualize_stc": f"{tu}/{nv}/60_visualize_stc", + f"{tu}/{sm}/plot_eeg_mri_coords": f"{tu}/{nv}/70_eeg_mri_coords", + f"{tu}/{sd}/plot_brainstorm_phantom_elekta": f"{tu}/{nv}/80_brainstorm_phantom_elekta", # noqa E501 + f"{tu}/{sd}/plot_brainstorm_phantom_ctf": f"{tu}/{nv}/85_brainstorm_phantom_ctf", + f"{tu}/{sd}/plot_phantom_4DBTi": f"{tu}/{nv}/90_phantom_4DBTi", + f"{tu}/{sd}/plot_brainstorm_auditory": f"{tu}/io/60_ctf_bst_auditory", + f"{tu}/{sd}/plot_sleep": f"{tu}/clinical/60_sleep", + f"{tu}/{di}/plot_background_filtering": f"{tu}/{pr}/25_background_filtering", + f"{tu}/{di}/plot_background_statistics": f"{tu}/{sn}/10_background_stats", + f"{tu}/{sn}/plot_stats_cluster_erp": f"{tu}/{sn}/20_erp_stats", + f"{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency": f"{tu}/{sn}/40_cluster_1samp_time_freq", # noqa E501 + f"{tu}/{sn}/plot_stats_cluster_time_frequency": f"{tu}/{sn}/50_cluster_between_time_freq", # noqa E501 + f"{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors": f"{tu}/{sn}/75_cluster_ftest_spatiotemporal", # noqa E501 + f"{tu}/{sr}/plot_stats_cluster_spatio_temporal": f"{tu}/{sr}/20_cluster_1samp_spatiotemporal", # noqa E501 + f"{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp": f"{tu}/{sr}/30_cluster_ftest_spatiotemporal", # noqa E501 + f"{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova": f"{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal", # noqa E501 + f"{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova": f"{tu}/{sn}/70_cluster_rmANOVA_time_freq", # noqa E501 + f"{tu}/{tf}/plot_sensors_time_frequency": f"{tu}/{tf}/20_sensors_time_frequency", + f"{tu}/{tf}/plot_ssvep": f"{tu}/{tf}/50_ssvep", + f"{tu}/{si}/plot_creating_data_structures": f"{tu}/{si}/10_array_objs", + f"{tu}/{si}/plot_point_spread": f"{tu}/{si}/70_point_spread", + f"{tu}/{si}/plot_dics": f"{tu}/{si}/80_dics", + f"{tu}/{tf}/plot_eyetracking": f"{tu}/{pr}/90_eyetracking_data", + f"{ex}/{co}/mne_inverse_label_connectivity": f"{mne_conn}/{ex}/mne_inverse_label_connectivity", # noqa E501 + f"{ex}/{co}/cwt_sensor_connectivity": f"{mne_conn}/{ex}/cwt_sensor_connectivity", + f"{ex}/{co}/mixed_source_space_connectivity": f"{mne_conn}/{ex}/mixed_source_space_connectivity", # noqa E501 + f"{ex}/{co}/mne_inverse_coherence_epochs": f"{mne_conn}/{ex}/mne_inverse_coherence_epochs", # noqa E501 + f"{ex}/{co}/mne_inverse_connectivity_spectrum": f"{mne_conn}/{ex}/mne_inverse_connectivity_spectrum", # noqa E501 + f"{ex}/{co}/mne_inverse_envelope_correlation_volume": f"{mne_conn}/{ex}/mne_inverse_envelope_correlation_volume", # noqa E501 + f"{ex}/{co}/mne_inverse_envelope_correlation": f"{mne_conn}/{ex}/mne_inverse_envelope_correlation", # noqa E501 + f"{ex}/{co}/mne_inverse_psi_visual": f"{mne_conn}/{ex}/mne_inverse_psi_visual", + f"{ex}/{co}/sensor_connectivity": f"{mne_conn}/{ex}/sensor_connectivity", + f"{ex}/{vi}/publication_figure": f"{tu}/{vi}/10_publication_figure", + f"{ex}/{vi}/sensor_noise_level": f"{tu}/{pr}/50_artifact_correction_ssp", } - -def make_redirects(app, exception): - """Make HTML redirects.""" - # https://www.sphinx-doc.org/en/master/extdev/appapi.html - # Adapted from sphinxcontrib/redirects (BSD-2-Clause) - if not (isinstance(app.builder, - sphinx.builders.html.StandaloneHTMLBuilder) and - exception is None): - return - logger = sphinx.util.logging.getLogger('mne') - TEMPLATE = """\ +# Adapted from sphinxcontrib/redirects (BSD-2-Clause) +REDIRECT_TEMPLATE = """\ @@ -1266,86 +1620,135 @@ def make_redirects(app, exception): If you are not redirected automatically, follow this link. -""" # noqa: E501 - sphinx_gallery_conf = app.config['sphinx_gallery_conf'] - for src_dir, out_dir in zip(sphinx_gallery_conf['examples_dirs'], - sphinx_gallery_conf['gallery_dirs']): - root = os.path.abspath(os.path.join(app.srcdir, src_dir)) - fnames = [os.path.join(os.path.relpath(dirpath, root), fname) - for dirpath, _, fnames in os.walk(root) - for fname in fnames - if fname in needed_plot_redirects] +""" + + +def check_existing_redirect(path): + """Make sure existing HTML files are redirects, before overwriting.""" + if path.is_file(): + with open(path) as fid: + for _ in range(8): + next(fid) + line = fid.readline() + if "Page Redirection" not in line: + raise RuntimeError( + "Attempted overwrite of HTML file with a redirect, where the " + "original file was not already a redirect." + ) + + +def _check_valid_builder(app, exception): + valid_builder = isinstance(app.builder, sphinx.builders.html.StandaloneHTMLBuilder) + return valid_builder and exception is None + + +def make_gallery_redirects(app, exception): + """Make HTML redirects for our sphinx gallery pages.""" + if not _check_valid_builder(app, exception): + return + sg_conf = app.config["sphinx_gallery_conf"] + for src_dir, out_dir in zip(sg_conf["examples_dirs"], sg_conf["gallery_dirs"]): + root = (Path(app.srcdir) / src_dir).resolve() + fnames = [ + pyfile.relative_to(root) + for pyfile in root.rglob(r"**/*.py") + if pyfile.name in needed_plot_redirects + ] # plot_ redirects for fname in fnames: - dirname = os.path.join(app.outdir, out_dir, os.path.dirname(fname)) - to_fname = os.path.splitext(os.path.basename(fname))[0] + '.html' - fr_fname = f'plot_{to_fname}' - to_path = os.path.join(dirname, to_fname) - fr_path = os.path.join(dirname, fr_fname) - assert os.path.isfile(to_path), (fname, to_path) - with open(fr_path, 'w') as fid: - fid.write(TEMPLATE.format(to=to_fname)) - logger.info( - f'Added {len(fnames):3d} HTML plot_* redirects for {out_dir}') - # custom redirects - for fr, to in custom_redirects.items(): - if not to.startswith('http'): - assert os.path.isfile(os.path.join(app.outdir, to)), to - # handle links to sibling folders - path_parts = to.split('/') - assert tu in path_parts, path_parts # need to refactor otherwise - path_parts = ['..'] + path_parts[(path_parts.index(tu) + 1):] - to = os.path.join(*path_parts) - assert to.endswith('html'), to - fr_path = os.path.join(app.outdir, fr) - assert fr_path.endswith('html'), fr_path + dirname = Path(app.outdir) / out_dir / fname.parent + to_fname = fname.with_suffix(".html").name + fr_fname = f"plot_{to_fname}" + to_path = dirname / to_fname + fr_path = dirname / fr_fname + assert to_path.is_file(), (fname, to_path) + with open(fr_path, "w") as fid: + fid.write(REDIRECT_TEMPLATE.format(to=to_fname)) + sphinx_logger.info( + f"Added {len(fnames):3d} HTML plot_* redirects for {out_dir}" + ) + + +def make_api_redirects(app, exception): + """Make HTML redirects for our API pages.""" + if not _check_valid_builder(app, exception): + return + + for page in api_redirects: + fname = f"{page}.html" + fr_path = Path(app.outdir) / fname + to_path = Path(app.outdir) / "api" / fname # allow overwrite if existing file is just a redirect - if os.path.isfile(fr_path): - with open(fr_path, 'r') as fid: - for _ in range(8): - next(fid) - line = fid.readline() - assert 'Page Redirection' in line, line - # handle folders that no longer exist - if fr_path.split('/')[-2] in ( - 'misc', 'discussions', 'source-modeling', 'sample-datasets', - 'connectivity'): - os.makedirs(os.path.dirname(fr_path), exist_ok=True) - with open(fr_path, 'w') as fid: - fid.write(TEMPLATE.format(to=to)) - logger.info( - f'Added {len(custom_redirects):3d} HTML custom redirects') + check_existing_redirect(fr_path) + with open(fr_path, "w") as fid: + fid.write(REDIRECT_TEMPLATE.format(to=to_path)) + sphinx_logger.info(f"Added {len(api_redirects):3d} HTML API redirects") + + +def make_custom_redirects(app, exception): + """Make HTML redirects for miscellaneous pages.""" + if not _check_valid_builder(app, exception): + return + + for _fr, _to in custom_redirects.items(): + fr = f"{_fr}.html" + to = f"{_to}.html" + fr_path = Path(app.outdir) / fr + check_existing_redirect(fr_path) + if to.startswith("http"): + to_path = to + else: + to_path = Path(app.outdir) / to + assert to_path.is_file(), to_path + # recreate folders that no longer exist + defunct_gallery_folders = ( + "misc", + "discussions", + "source-modeling", + "sample-datasets", + "connectivity", + ) + parts = fr_path.relative_to(Path(app.outdir)).parts + if ( + len(parts) > 1 # whats_new violates this + and parts[1] in defunct_gallery_folders + and not fr_path.parent.exists() + ): + os.makedirs(fr_path.parent, exist_ok=True) + # write the redirect + with open(fr_path, "w") as fid: + fid.write(REDIRECT_TEMPLATE.format(to=to_path)) + sphinx_logger.info(f"Added {len(custom_redirects):3d} HTML custom redirects") def make_version(app, exception): """Make a text file with the git version.""" - if not (isinstance(app.builder, - sphinx.builders.html.StandaloneHTMLBuilder) and - exception is None): + if not ( + isinstance(app.builder, sphinx.builders.html.StandaloneHTMLBuilder) + and exception is None + ): return - logger = sphinx.util.logging.getLogger('mne') try: - stdout, _ = run_subprocess(['git', 'rev-parse', 'HEAD'], verbose=False) + stdout, _ = run_subprocess(["git", "rev-parse", "HEAD"], verbose=False) except Exception as exc: - logger.warning(f'Failed to write _version.txt: {exc}') + sphinx_logger.warning(f"Failed to write _version.txt: {exc}") return - with open(os.path.join(app.outdir, '_version.txt'), 'w') as fid: + with open(os.path.join(app.outdir, "_version.txt"), "w") as fid: fid.write(stdout) - logger.info(f'Added "{stdout.rstrip()}" > _version.txt') + sphinx_logger.info(f'Added "{stdout.rstrip()}" > _version.txt') # -- Connect our handlers to the main Sphinx app --------------------------- + def setup(app): """Set up the Sphinx app.""" - app.connect('autodoc-process-docstring', append_attr_meth_examples) - if report_scraper is not None: - report_scraper.app = app - app.config.rst_prolog = prolog - app.connect('builder-inited', report_scraper.copyfiles) - sphinx_logger = sphinx.util.logging.getLogger('mne') - sphinx_logger.info( - f'Building documentation for MNE {release} ({mne.__file__})') - sphinx_logger.info(f'Building with scrapers={scrapers}') - app.connect('build-finished', make_redirects) - app.connect('build-finished', make_version) + app.connect("autodoc-process-docstring", append_attr_meth_examples) + app.connect("autodoc-process-docstring", fix_sklearn_inherited_docstrings) + # High prio, will happen before SG + app.connect("builder-inited", generate_credit_rst, priority=10) + app.connect("builder-inited", report_scraper.set_dirs, priority=20) + app.connect("build-finished", make_gallery_redirects) + app.connect("build-finished", make_api_redirects) + app.connect("build-finished", make_custom_redirects) + app.connect("build-finished", make_version) diff --git a/doc/conftest.py b/doc/conftest.py index 102c338598e..1e791fbb923 100644 --- a/doc/conftest.py +++ b/doc/conftest.py @@ -1 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from mne.conftest import * # noqa diff --git a/doc/credit.rst b/doc/credit.rst new file mode 100644 index 00000000000..4d9a9957604 --- /dev/null +++ b/doc/credit.rst @@ -0,0 +1,12 @@ +:orphan: + +.. _contributors: + +============ +Contributors +============ + +There are many different ways to contribute to MNE-Python! So far we only list +code contributions below, but plan to add other metrics in the future. + +.. include:: ./code_credit.inc diff --git a/doc/datasets.rst b/doc/datasets.rst deleted file mode 100644 index 8f8e98d4d82..00000000000 --- a/doc/datasets.rst +++ /dev/null @@ -1,47 +0,0 @@ - -Datasets -======== - -.. currentmodule:: mne.datasets - -:py:mod:`mne.datasets`: - -.. automodule:: mne.datasets - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - fetch_dataset - has_dataset - brainstorm.bst_auditory.data_path - brainstorm.bst_resting.data_path - brainstorm.bst_raw.data_path - eegbci.load_data - eegbci.standardize - fetch_aparc_sub_parcellation - fetch_fsaverage - fetch_hcp_mmp_parcellation - fetch_infant_template - fetch_phantom - fnirs_motor.data_path - hf_sef.data_path - kiloword.data_path - limo.load_data - misc.data_path - mtrf.data_path - multimodal.data_path - opm.data_path - sleep_physionet.age.fetch_data - sleep_physionet.temazepam.fetch_data - sample.data_path - somato.data_path - spm_face.data_path - ucl_opm_auditory.data_path - visual_92_categories.data_path - phantom_4dbti.data_path - refmeg_noise.data_path - ssvep.data_path - erp_core.data_path - epilepsy_ecog.data_path \ No newline at end of file diff --git a/doc/install/contributing.rst b/doc/development/contributing.rst similarity index 90% rename from doc/install/contributing.rst rename to doc/development/contributing.rst index d1419ef80f3..beed58f6558 100644 --- a/doc/install/contributing.rst +++ b/doc/development/contributing.rst @@ -3,7 +3,6 @@ Contributing guide ================== -.. include:: ../links.inc .. highlight:: console Thanks for taking the time to contribute! MNE-Python is an open-source project @@ -94,8 +93,8 @@ Setting up your local development environment Configuring git ~~~~~~~~~~~~~~~ -.. note:: Git GUI alternative - :class: sidebar +.. admonition:: Git GUI alternative + :class: sidebar note `GitHub desktop`_ is a GUI alternative to command line git that some users appreciate; it is available for |windows| Windows and |apple| MacOS. @@ -216,8 +215,6 @@ Once you have git installed and configured, and before creating your local copy of the codebase, go to the `MNE-Python GitHub`_ page and create a `fork`_ into your GitHub user account. -.. image:: https://docs.github.com/assets/cb-28613/images/help/repository/fork_button.png - This will create a copy of the MNE-Python codebase inside your GitHub user account (this is called "your fork"). Changes you make to MNE-Python will eventually get "pushed" to your fork, and will be incorporated into the @@ -233,8 +230,8 @@ of how that structure is set up is given here: Creating the virtual environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. note:: Supported Python environments - :class: sidebar +.. admonition:: Supported Python environments + :class: sidebar note We strongly recommend the `Anaconda`_ or `Miniconda`_ environment managers for Python. Other setups are possible but are not officially supported by @@ -245,8 +242,8 @@ Creating the virtual environment These instructions will set up a Python environment that is separated from your system-level Python and any other managed Python environments on your computer. -This lets you switch between different versions of Python (MNE-Python requires -version 3.8 or higher) and also switch between the stable and development +This lets you switch between different versions of Python and also switch between +the stable and development versions of MNE-Python (so you can, for example, use the same computer to analyze your data with the stable release, and also work with the latest development version to fix bugs or add new features). Even if you've already @@ -282,18 +279,19 @@ Then make a local clone of your remote fork (``origin``):: $ git clone https://github.com/$GITHUB_USERNAME/mne-python.git Finally, set up a link between your local clone and the official repository -(``upstream``):: +(``upstream``) and set up ``git diff`` to work properly:: $ cd mne-python $ git remote add upstream https://github.com/mne-tools/mne-python.git $ git fetch --all + $ git config --local blame.ignoreRevsFile .git-blame-ignore-revs Now we'll remove the *stable* version of MNE-Python and replace it with the *development* version (the clone we just created with git). Make sure you're in the correct environment first (``conda activate mnedev``), and then do:: $ cd $INSTALL_LOCATION/mne-python # make sure we're in the right folder - $ conda remove --force mne # the --force avoids dependency checking + $ conda remove --force mne-base # the --force avoids dependency checking $ pip install -e . The command ``pip install -e .`` installs a python module into the current @@ -306,11 +304,11 @@ be reflected the next time you open a Python interpreter and ``import mne`` Finally, we'll add a few dependencies that are not needed for running MNE-Python, but are needed for locally running our test suite:: - $ pip install -r requirements_testing.txt + $ pip install --group=test And for building our documentation:: - $ pip install -r requirements_doc.txt + $ pip install --group=doc $ conda install graphviz .. note:: @@ -331,9 +329,15 @@ To build documentation, you will also require `optipng`_: - On Windows, unzip :file:`optipng.exe` from the `optipng for Windows`_ archive into the :file:`doc/` folder. This step is optional for Windows users. -You can also choose to install some optional linters for reStructuredText:: +There are additional optional dependencies needed to run various tests, such as +scikit-learn for decoding tests, or nibabel for MRI tests. If you want to run all the +tests, consider using our MNE installers (which provide these dependencies) or pay +attention to the skips that ``pytest`` reports and install the relevant libraries. +For example, this traceback:: + + SKIPPED [2] mne/io/eyelink/tests/test_eyelink.py:14: could not import 'pandas': No module named 'pandas' - $ conda install -c conda-forge sphinx-autobuild doc8 +indicates that ``pandas`` needs to be installed in order to run the Eyelink tests. .. _basic-git: @@ -371,7 +375,7 @@ feature, you should first synchronize your local ``main`` branch with the $ git merge upstream/main # synchronize local main branch with remote upstream main branch $ git checkout -b new-feature-x # create local branch "new-feature-x" and check it out -.. note:: Alternative +.. tip:: :class: sidebar You can save some typing by using ``git pull upstream/main`` to replace @@ -587,42 +591,54 @@ Describe your changes in the changelog -------------------------------------- Include in your changeset a brief description of the change in the -:ref:`changelog ` (:file:`doc/changes/latest.inc`; this can be -skipped for very minor changes like correcting typos in the documentation). - -There are different sections of the changelog for each release, and separate -**subsections for bugfixes, new features, and changes to the public API.** -Please be sure to add your entry to the appropriate subsection. - -The styling and positioning of the entry depends on whether you are a -first-time contributor or have been mentioned in the changelog before. - -First-time contributors -""""""""""""""""""""""" - -Welcome to MNE-Python! We're very happy to have you here. 🤗 And to ensure you -get proper credit for your work, please add a changelog entry with the -following pattern **at the top** of the respective subsection (bugs, -enhancements, etc.): - -.. code-block:: rst - - - Bugs - ---- - - - Short description of the changes (:gh:`0000` by :newcontrib:`Firstname Lastname`) - - - ... - -where ``0000`` must be replaced with the respective GitHub pull request (PR) -number, and ``Firstname Lastname`` must be replaced with your full name. - -It is usually best to wait to add a line to the changelog until your PR is -finalized, to avoid merge conflicts (since the changelog is updated with -almost every PR). - -Lastly, make sure that your name is included in the list of authors in +:ref:`changelog ` using towncrier_ format, which aggregates small, +properly-named ``.rst`` files to create a changelog. This can be +skipped for very minor changes like correcting typos in the documentation. + +There are six separate sections for changes, based on change type. +To add a changelog entry to a given section, name it as +:file:`doc/changes/dev/..rst`. The types are: + +notable + For overarching changes, e.g., adding type hints package-wide. These are rare. +dependency + For changes to dependencies, e.g., adding a new dependency or changing + the minimum version of an existing dependency. +bugfix + For bug fixes. Can change code behavior with no deprecation period. +apichange + Code behavior changes that require a deprecation period. +newfeature + For new features. +other + For changes that don't fit into any of the above categories, e.g., + internal refactorings. + +For example, for a new feature PR with number 12345, the changelog entry should be +added as a new file :file:`doc/changes/dev/12345.newfeature.rst`. The file should +contain: + +1. A brief description of the change, typically in a single line of one or two + sentences. +2. reST links to **public** API endpoints like functions (``:func:``), + classes (``:class:``), and methods (``:meth:``). If changes are only internal + to private functions/attributes, mention internal refactoring rather than name + the private attributes changed. +3. Author credit. If you are a new contributor (we're very happy to have you here! 🤗), + you should using the ``:newcontrib:`` reST role, whereas previous contributors should + use a standard reST link to their name. For example, a new contributor could write: + + .. code-block:: rst + + Short description of the changes, by :newcontrib:`Firstname Lastname`. + + And an previous contributor could write: + + .. code-block:: rst + + Short description of the changes, by `Firstname Lastname`_. + +Make sure that your name is included in the list of authors in :file:`doc/changes/names.inc`, otherwise the documentation build will fail. To add an author name, append a line with the following pattern (note how the syntax is different from that used in the changelog): @@ -634,27 +650,13 @@ how the syntax is different from that used in the changelog): Many contributors opt to link to their GitHub profile that way. Have a look at the existing entries in the file to get some inspiration. -Recurring contributors -"""""""""""""""""""""" - -The changelog entry should follow the following patterns: - -.. code-block:: rst - - - Short description of the changes from one contributor (:gh:`0000` by `Contributor Name`_) - - Short description of the changes from several contributors (:gh:`0000` by `Contributor Name`_, `Second Contributor`_, and `Third Contributor`_) - -where ``0000`` must be replaced with the respective GitHub pull request (PR) -number. Mind the Oxford comma in the case of multiple contributors. - Sometimes, changes that shall appear as a single changelog entry are spread out -across multiple PRs. In this case, name all relevant PRs, separated by -commas: +across multiple PRs. In this case, edit the existing towncrier file for the relevant +change, and append additional PR numbers in parentheticals with the ``:gh:`` role like: .. code-block:: rst - - Short description of the changes from one contributor in multiple PRs (:gh:`0000`, :gh:`1111` by `Contributor Name`_) - - Short description of the changes from several contributors in multiple PRs (:gh:`0000`, :gh:`1111` by `Contributor Name`_, `Second Contributor`_, and `Third Contributor`_) + Short description of the changes, by `Firstname Lastname`_. (:gh:`12346`) Test locally before opening pull requests (PRs) ----------------------------------------------- @@ -694,11 +696,16 @@ Adhere to standard Python style guidelines All contributions to MNE-Python are checked against style guidelines described in `PEP 8`_. We also check for common coding errors (such as variables that are defined but never used). We allow very few exceptions to these guidelines, and -use tools such as pep8_, pyflakes_, and flake8_ to check code style +use tools such as ruff_ to check code style automatically. From the :file:`mne-python` root directory, you can check for -style violations by running:: +style violations by first installing our pre-commit hook:: + + $ pip install pre-commit + $ pre-commit install --install-hooks - $ make flake +Then running:: + + $ make ruff # alias for `pre-commit run -a` in the shell. Several text editors or IDEs also have Python style checking, which can highlight style errors while you code (and train you to make those @@ -748,7 +755,7 @@ but complete docstrings are appropriate when private functions/methods are relatively complex. To run some basic tests on documentation, you can use:: $ pytest mne/tests/test_docstring_parameters.py - $ make docstyle + $ make ruff Cross-reference everywhere @@ -853,13 +860,13 @@ data with a meaningful middle (zero-point) and ``Reds`` otherwise. This applies to both visualization functions and tutorials/examples. -.. _run_tests: +.. _run-tests: Running the test suite ~~~~~~~~~~~~~~~~~~~~~~ -.. note:: pytest flags - :class: sidebar +.. admonition:: pytest flags + :class: sidebar tip The ``-x`` flag exits the pytest run when any test fails; this can speed up debugging when running all tests in a file or module. @@ -867,7 +874,7 @@ Running the test suite The ``--pdb`` flag will automatically start the python debugger upon test failure. -The full test suite can be run by calling ``make test`` from the +The full test suite can be run by calling ``pytest -m "not ultraslowtest" mne`` from the ``mne-python`` root folder. Testing the entire module can be quite slow, however, so to run individual tests while working on a new feature, you can run the following line:: @@ -904,28 +911,16 @@ You can build the documentation locally using `GNU Make`_ with :file:`doc/Makefile`. From within the :file:`doc` directory, you can test formatting and linking by running:: - $ make html_dev-noplot + $ make html-noplot This will build the documentation *except* it will format (but not execute) the tutorial and example files. If you have created or modified an example or tutorial, you should instead run -:samp:`PATTERN={} make html_dev-pattern` to render +:samp:`make html-pattern PATTERN={}` to render all the documentation and additionally execute just your example or tutorial (so you can make sure it runs successfully and generates the output / figures you expect). -.. note:: - If you are using a *Windows command shell*, to use the pattern approach, - use the following two lines: - - .. code-block:: doscon - - > set PATTERN= - > make html_dev-pattern - - If you are on Windows but using the `git BASH`_ shell, use the same two - commands but replace ``set`` with ``export``. - After either of these commands completes, ``make show`` will open the locally-rendered documentation site in your browser. If you see many warnings that seem unrelated to your contributions, it might be that your output folder @@ -1034,6 +1029,11 @@ down the road. Here are the guidelines: nicely formatted) and the resulting output and figures will be rendered as part of the tutorial/example. +- Examples and tutorials should execute as quickly and with as low memory usage as + possible while still conveying necessary information. To see current execution + times and memory usage, visit the `sg_execution_times page`_. To see unused API + entries, see the `sg_api_usage page`_. + `This sample pull request`_ exemplifies many of the conventions listed above: it addresses only one problem; it started with an issue to discuss the problem and some possible solutions; it is a PR from the user's non-main branch into @@ -1049,6 +1049,8 @@ it can serve as a useful example of what to expect from the PR review process. .. _open a new issue: https://github.com/mne-tools/mne-python/issues/new/choose .. _This sample pull request: https://github.com/mne-tools/mne-python/pull/6230 .. _our user forum: https://mne.discourse.group +.. _sg_execution_times page: https://mne.tools/dev/sg_execution_times.html +.. _sg_api_usage page: https://mne.tools/dev/sg_api_usage.html .. git installation @@ -1097,8 +1099,7 @@ it can serve as a useful example of what to expect from the PR review process. .. linting .. _PEP 8: https://www.python.org/dev/peps/pep-0008/ -.. _pyflakes: https://pypi.org/project/pyflakes -.. _Flake8: http://flake8.pycqa.org/ +.. _ruff: https://beta.ruff.rs/docs .. misc @@ -1112,5 +1113,7 @@ it can serve as a useful example of what to expect from the PR review process. .. optipng -.. _optipng: http://optipng.sourceforge.net/ -.. _optipng for Windows: http://prdownloads.sourceforge.net/optipng/optipng-0.7.7-win32.zip?download +.. _optipng: https://optipng.sourceforge.net/ +.. _optipng for Windows: http://prdownloads.sourceforge.net/optipng/optipng-0.7.8-win64.zip?download + +.. include:: ../links.inc diff --git a/doc/development/governance.rst b/doc/development/governance.rst new file mode 100644 index 00000000000..77d01b66c85 --- /dev/null +++ b/doc/development/governance.rst @@ -0,0 +1,380 @@ +.. _governance: + +================== +Project Governance +================== + +The purpose of this document is to formalize the governance process +used by the MNE-Python project in both ordinary and extraordinary +situations, and to clarify how decisions are made and how the various +elements of our community interact, including the relationship between +open source collaborative development and work that may be funded by +for-profit or non-profit entities. + +The Project +=========== + +The MNE-Python Project (The Project) is an open source software project. The +goal of The Project is to develop open source software for analysis of +neuroscience data in Python. The Project is released under the BSD (or similar) +open source license, developed openly and is hosted publicly under the +``mne-tools`` GitHub organization. + +The Project is developed by a team of distributed developers, called +Contributors. Contributors are individuals who have contributed code, +documentation, designs, or other work to the Project. Anyone can be a +Contributor. Contributors can be affiliated with any legal entity or +none. Contributors participate in the project by submitting, reviewing, +and discussing GitHub Pull Requests and Issues and participating in open +and public Project discussions on GitHub, Discourse, and other +channels. The foundation of Project participation is openness and +transparency. + +The Project Community consists of all Contributors and Users of the +Project. Contributors work on behalf of and are responsible to the +larger Project Community and we strive to keep the barrier between +Contributors and Users as low as possible. + +The Project is not a legal entity, nor does it currently have any formal +relationships with legal entities. + +Governance Model +================ + +.. _leadership-roles: + +Leadership Roles +^^^^^^^^^^^^^^^^ + +The MNE-Python leadership structure shall consist of the following groups. +A list of the current members of the respective groups is maintained at the +page :ref:`governance-people`. + +Maintainer Team +--------------- + +The Maintainer Team is responsible for implementing changes to the software and +supporting the user community. Duties: + +- Infrastructure/large-scale software decisions, in partnership with the Steering + Council +- Reviewing and merging pull requests +- Responding to issues on GitHub +- Monitoring CI failures and addressing them +- Community maintenance: answering forum posts, holding office hours +- Community information: social media announcements (releases, new features, etc) +- Training new members of the Maintainer Team + +*Note:* different permissions may be given to each maintainer based on the work they do +(e.g., GitHub repository triage/merge/admin permissions, social media account access, +Discord admin roles, forum admin rights). The role of maintainer does not confer these +automatically. + +Steering Council +---------------- + +The Steering Council is responsible for guiding and shepherding the project on a +day-to-day basis. Duties: + +- Obtaining funding (either by writing grants specifically for MNE development, or + convincing others to include funds for MNE development in their research grants) +- Translating high-level roadmap guidance from the Advisory Board (e.g. “better support + for OPMs”) into actionable roadmap items (e.g., “Add support for OPM manufacturers + besides QuSpin, and add standard preprocessing routines for coreg and OPM-specific + artifacts”) +- Coordination with the larger Scientific Python ecosystem +- Large-scale changes to the software (e.g., type hints, docdict, things that affect + multiple submodules), in partnership with the Maintainer Team +- Infrastructure decisions (e.g., dependency version policy, release cadence, CI + management, etc), in partnership with the Maintainer Team +- Any other governance task not mentioned elsewhere, and that falls outside of the + responsibilities of other teams +- Attendance at Steering Council meetings (approx. every 2 weeks; time to be decided + among SC members) +- Attendance at Advisory Board meetings (approx. every 1-2 years) +- Write funding proposals +- Communicate/coordinate with Maintainer Team + +Members of the Steering Council shall additionally be considered as members of the +Maintainer Team, *ex officio*, and thus shall have the necessary rights and privileges +afforded to maintainers (passwords, merge rights, etc). + +Chair of the Steering Council +----------------------------- + +The Chair of the Steering Council is responsible for liaising between the Steering +Council and the community. Duties: + +- Convening the Steering Council meetings +- Calling for votes when consensus fails +- Communicating important decisions (and the context for why those decisions were + taken) to the community + +External Advisory Board +----------------------- + +The External Advisory Board is responsible for high-level roadmap and funding +guidance. Duties: + +- Attendance at Advisory Board meetings (approx. every 1-2 years) +- Periodically communicating with Steering Council to impart guidance + +Meetings +^^^^^^^^ + +Maintainer Meetings +------------------- + +The Maintainer Team can decide if there should be maintainer meetings or not. These +could be either discussion meetings or social meetings to keep in touch with each other +(or something completely different!). Frequency and time could vary. + +Steering Council Meetings +------------------------- + +The Steering Council will have mandatory meetings every two weeks to discuss project +management and funding. The Steering Council may decide to change meeting time or +frequency at their discretion. + +All-hands Meetings +------------------ + +At least once a year, all maintainers and Steering Council members should come together +in a (possibly virtual) meeting. Meeting time will be determined via poll. During this +meeting, any governance changes proposed since the prior meeting shall be discussed and +may be adopted by vote. + +Population of Leadership Roles +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Maintainer Team +--------------- + +Appointment +~~~~~~~~~~~ + +- Once per year, before the annual all-hands meeting, the Maintainer Team shall assess + their anticipated needs for the coming year. During the meeting, they shall decide how + many new maintainers they want to add to their team. New maintainers are selected from + applications by a vote of the currently-serving maintainers and the Steering Council. +- Ad-hoc additions are possible by initiative of the Steering Council for exceptional + circumstances, e.g., hiring someone with grant funds to specifically do MNE + maintenance. These ad-hoc additions do not necessarily require a vote from the + Maintainer Team, as the job application procedures of the hiring institution are + assumed to be sufficiently rigorous to avoid bias, nepotism, etc. + +Duration of service +~~~~~~~~~~~~~~~~~~~ + +Appointment to the Maintainer Team is for an indefinite term. + +Termination +~~~~~~~~~~~ + +Loss of maintainer status (and revocation of associated rights and +privileges, e.g., passwords, merge rights, etc) can occur under the following +circumstances: + +- *Voluntary resignation*, at any time, for any reason. +- *Inactivity*. Once per year, before the annual all-hands meeting, the Steering Council + shall assess maintainer activity for the preceding year. Any maintainers seeming to be + inactive shall be contacted and given opportunity to dispute their inactivity (e.g., + by highlighting ways they have been working in the MNE community that may not be + visible from simple metrics like GitHub or forum activity reports). Maintainers who do + not dispute their inactivity (or fail to respond within 14 days, or longer at the + discretion of the Steering Council) shall be removed from the Maintainer Team. In + cases where the Steering Council and the maintainer still disagree about the alleged + inactivity, removal may still occur by a two-thirds majority vote of the rest of the + Maintainer Team. +- *Conduct*. At any time, a maintainer may be removed by unanimous vote of the + code-of-conduct committee, for violations of our community guidelines (in accordance + with the enforcement guidelines outlined therein). + +*Reinstatement*. Maintainers who voluntarily resigned may be re-appointed on an ad-hoc +basis by a vote of the current Maintainer Team. Maintainers removed for inactivity may +re-apply to an annual call for new maintainers. Maintainers removed for reasons of +conduct may be reinstated only if their eligibility is allowed/restored by the +code-of-conduct committee in accordance with the enforcement section of our Community +Guidelines. In such cases the re-eligible former maintainer may re-apply through the +annual appointment process. + +Steering Council +---------------- + +Appointment +~~~~~~~~~~~ +A term on the Steering Council shall last approximately 2 years. Terms shall be +staggered such that no more than half of the seats shall be open for election in any +given year. Upon first constitution, the Steering Council decides which 50% of the +members shall be granted an initial three year term to initiate the staggering. + +The Maintainer Team and current Steering Council will vote to fill the open seats on the +Steering Council. Candidates can be (self-)nominated from the current Maintainer Team +and Steering Council. + +At any time, the Steering Council may increase the number of seats on the Council to +adapt to the Council’s workload and needs. New seats shall be filled in the same manner +as normal (re-)elections, i.e., by vote open to all members of the Maintainer Team and +Steering Council. Term length shall be set so as to maintain the 50/50 balance of +staggered re-election cycles as nearly as possible, and in cases where perfect balance +already exists, the term shall err towards being *longer*. + +In the case of vacancies due to termination (see below), the Steering Council may call a +special election (following the same procedures as in a normal (re-)election), or may +choose to wait to fill the seat until the next scheduled election. For filled vacancies, +the term shall be the balance of the unserved term of the person vacating the seat, +unless the remaining time after the vacancy-filling election is 6 months or shorter, in +which case the term shall be for 2 years plus the remaining time on the vacant seat. + +Termination +~~~~~~~~~~~ + +Loss of Steering Council status (and revocation of associated rights and privileges, +e.g., passwords, merge rights, etc) can occur under the following circumstances: + +- *Voluntary resignation*, at any time, for any reason. +- *Conduct*. At any time, a member of the Steering Council may be removed by unanimous + vote of the code-of-conduct committee, for violations of our community guidelines (in + accordance with the enforcement guidelines outlined therein). + +External Advisory Board +----------------------- + +The External Advisory Board shall be populated by invitation from the Steering Council. +Anyone may propose individuals for potential invitation. Appointment and removal from +the External Advisory Board is determined by the Steering Council. + +Decision Making Process +^^^^^^^^^^^^^^^^^^^^^^^ + +Announcement of Elections +------------------------- + +All votes shall be open for at least ten days and shall be announced 14 days in advance +to all eligible voters by email. The voting deadline shall also be added to the core +team’s shared Google calendar. At least one reminder shall be sent out half-way through +the voting period. + +Voting Mechanism +---------------- + +All elections shall be held as anonymous online votes using ElectionBuddy or a similar +service. Unless otherwise specified the mechanism shall be +`ranked choice voting `__ +with a threshold of 50% + 1 vote. That means, everyone ranks those candidates (in order +of preference) that they could see filling the role in question. Note that it is +possible for a voter to reject all candidates by submitting a blank ballot, so that if a +single person is running for a seat it is still possible for them to fail to be elected +if enough voters cast blank ballots. + +Voting for the Steering Council +------------------------------- + +Votes for Steering Council membership shall be scheduled as-needed to address Steering +Council workload, and advertised to eligible candidates (i.e., the Maintainer Team) for +a minimum of 14 days, after which a vote of current maintainers and Steering Council +members shall be scheduled. + +Voting for the Maintainer Team +------------------------------ + +Votes for additions to the Maintainer Team shall be scheduled promptly following the +annual all-hands meeting. The Maintainer Team shall advertise the open seats via online +MNE-Python channels. Applications (consisting of a short candidate statement) must be +open for a minimum of 14 days, after which a vote of the current maintainers and +Steering Council shall be scheduled. The Maintainer Team shall set up a confidential +submission system for applications (consisting of short candidate statements), such as a +dedicated email address, Google form, or similar confidential submission mechanism. + +Institutional Partners and Funding +================================== + +The leadership roles for the project are :ref:`defined above `. No +outside institution, individual, or legal entity has the ability to own, +control, usurp, or influence the project other than by participating in +the Project in one of those roles. However, because +institutions can be an important funding mechanism for the project, it +is important to formally acknowledge institutional participation in the +project. These are Institutional Partners. + +An Institutional Contributor is any individual Project Contributor who +contributes to the project as part of their official duties at an +Institutional Partner. Likewise, an Institutional Project Leader is anyone +in a Project leadership role who contributes to the project as part +of their official duties at an Institutional Partner. + +With these definitions, an Institutional Partner is any recognized legal +entity in any country that employs at least 1 Institutional Contributor or +Institutional Project Leader. Institutional Partners can be for-profit or +non-profit entities. + +Institutions become eligible to become an Institutional Partner by +employing individuals who actively contribute to The Project as part of +their official duties. To state this another way, the only way for a +Partner to influence the project is by actively contributing to the open +development of the project, in equal terms to any other member of the +community of Contributors and Leaders. Merely using Project +Software in institutional context does not allow an entity to become an +Institutional Partner. Financial gifts do not enable an entity to become +an Institutional Partner. Once an institution becomes eligible for +Institutional Partnership, the Steering Council must nominate and +approve the Partnership. + +If, at some point, an existing Institutional Partner stops having any +contributing employees, then a one year grace period commences. If, at +the end of this one-year period, they continue not to have any +contributing employees, then their Institutional Partnership will +lapse, and resuming it will require going through the normal process +for new Partnerships. + +An Institutional Partner is free to pursue funding for their work on The +Project through any legal means. This could involve a non-profit +organization raising money from private foundations and donors or a +for-profit company building proprietary products and services that +leverage Project Software and Services. Funding acquired by +Institutional Partners to work on The Project is called Institutional +Funding. However, no funding obtained by an Institutional Partner can +override Project Leadership. If a Partner has funding to do MNE-Python work +and the Project Leadership decides to not pursue that work as a project, the +Partner is free to pursue it on their own. However, in this situation, +that part of the Partner’s work will not be under the MNE-Python umbrella and +cannot use the Project trademarks in any way that suggests a formal +relationship. + +Institutional Partner benefits are: + +- optional acknowledgement on the MNE-Python website and in talks +- ability to acknowledge their own funding sources on the MNE-Python + website and in talks +- ability to influence the project through the participation of their + Institutional Contributors and Institutional Project Leaders. +- invitation of the Council Members to MNE-Python Developer Meetings + +A list of current Institutional Partners is maintained at the page +:ref:`supporting-institutions`. + +Document History +================ + +https://github.com/mne-tools/mne-python/commits/main/doc/overview/governance.rst + + +Acknowledgements +================ + +Substantial portions of this document were adapted from the +`SciPy project's governance document +`_, +which in turn was adapted from +`Jupyter/IPython project's governance document +`_ and +`NumPy's governance document +`_. + +License +======= + +To the extent possible under law, the authors have waived all +copyright and related or neighboring rights to the MNE-Python project +governance document, as per the `CC-0 public domain dedication / license +`_. diff --git a/doc/development/index.rst b/doc/development/index.rst new file mode 100644 index 00000000000..98fc28f8e7f --- /dev/null +++ b/doc/development/index.rst @@ -0,0 +1,35 @@ +MNE-Python Development +====================== + +.. NOTE: this first section (up until "overview of contribution process") is + basically a copy/paste of CONTRIBUTING.md from the repository root, with one + sentence deleted to avoid self-referential linking. Changes made here should + be mirrored there, and vice-versa. + +MNE-Python is maintained by a community of scientists and research labs. The +project accepts contributions in the form of bug reports, fixes, feature +additions, and documentation improvements (including typo corrections). The +best way to start contributing is by `opening an issue`_ on our GitHub page to +discuss ideas for changes or enhancements, or to tell us about behavior that +you think might be a bug. For *general troubleshooting* or *usage questions*, +please consider posting your questions on our `MNE Forum`_. + +Users and contributors to MNE-Python are expected to follow our +`code of conduct`_. + +The `contributing guide`_ has details on the preferred contribution workflow +and the recommended system configuration for a smooth contribution/development +experience. + +.. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose +.. _`MNE Forum`: https://mne.discourse.group +.. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md +.. _`contributing guide`: https://mne.tools/dev/development/contributing.html + +.. toctree:: + :hidden: + + contributing + whats_new + roadmap + governance diff --git a/doc/development/roadmap.rst b/doc/development/roadmap.rst new file mode 100644 index 00000000000..278ed3ce213 --- /dev/null +++ b/doc/development/roadmap.rst @@ -0,0 +1,302 @@ +Roadmap +======= + +This page describes some of the major medium- to long-term goals for +MNE-Python. These are goals that require substantial effort and/or +API design considerations. Some of these may be suitable for Google Summer of +Code projects, while others require more extensive work. + + +Open +---- + +Type Annotations +^^^^^^^^^^^^^^^^ + +We would like to have type annotations for as much of our codebase as is practicable. +The main motivation for this is to improve the end-user experience when writing analysis +code that uses MNE-Python (i.e., code-completion suggestions, which rely on static +analysis / type hints). The main discussion of how to go about this is in :gh:`12243`. +Some piecemeal progress has been made (e.g., :gh:`12250`) but there isn't currently +anyone actively chipping away at this, hence its status as "open" rather than "in +progress". + +Docstring De-duplication +^^^^^^^^^^^^^^^^^^^^^^^^ + +For many years, MNE-Python has used a technique borrowed from SciPy (called +`doccer `__) +for improving the consistency of parameter names and descriptions that recur across our +API. For example, parameters for number of parallel jobs to use, for specifying random +seeds, or for controlling the appearance of a colorbar on a plot --- all of these appear +in multiple functions/methods in MNE-Python. The approach works by re-defining a +function's ``__doc__`` attribute at import time, filling in placeholders in the +docstring's parameter list with fully spelled-out equivalents (which are stored in a big +dictionary called the ``docdict``). There are two major downsides: + +1. Many docstrings can't be read (at least not in full) while browsing the source code. +2. Static code analyzers don't have access to the completed docstrings, so things like + hover-tooltips in IDEs are less useful than they would be if the docstrings were + complete in-place. + +A possible route forward: + +- Convert all docstrings to be fully spelled out in the source code. +- Instead of maintaining the ``docdict``, maintain a registry of sets of + function+parameter combinations that ought to be identical. +- Add a test that the entries in the registry are indeed identical, so that + inconsistencies cannot be introduced in existing code. +- Add a test that parses docstrings in any *newly added* functions and looks for + parameter names that maybe should be added to the registry of identical docstrings. +- To allow for parameter descriptions that should be *nearly* identical (e.g., the same + except one refers to :class:`~mne.io.Raw` objects and the other refers to + :class:`~mne.Epochs` objects), consider using regular expressions to check the + "identity" of the parameter descriptions. + +The main discussion is in :gh:`8218`; a wider discussion among maintainers of other +packages in the Scientific Python Ecosystem is +`here `__. + +Containerization +^^^^^^^^^^^^^^^^ + +Users sometimes encounter difficulty getting a working MNE-Python environment on shared +resources (such as compute clusters), due to various problems (old versions of package +managers or graphics libraries, lack of sufficient permissions, etc). Providing a +robust and up-to-date containerized distribution of MNE-Python would alleviate some of +these issues. Initial efforts can be seen in the +`MNE-Docker repository `__; these efforts +should be revived, brought up-to-date as necessary, and integrated into our normal +release process so that the images do not become stale. + +Education +^^^^^^^^^ + +Live workshops/tutorials/trainings on MNE-Python have historically been organized +*ad-hoc* rather than centrally. Instructors for these workshops are often approached +directly by the organization or group desiring to host the training, and there is often +no way for users outside that group to attend (or even learn about the opportunity). At +a minimum, we would like to have a process for keeping track of educational events that +feature MNE-Python or other tools in the MNE suite. Ideally, we would go further and +initiate a recurring series of tutorials that could be advertised widely. Such events +might even provide a small revenue stream for MNE-Python, to support things like +continuous integration costs. + + +In progress +----------- + +Diversity, Equity, and Inclusion (DEI) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +MNE-Python is committed to recruiting and retaining a diverse pool of +contributors, see :gh:`8221`. + +First-class OPM support +^^^^^^^^^^^^^^^^^^^^^^^ +MNE-Python has support for reading some OPM data formats such as FIF and FIL/QuSpin. +Support should be added for other manufacturers, and standard preprocessing routines +should be added to deal with coregistration adjustment and OPM-specific artifacts. +See for example :gh:`11275`, :gh:`11276`, :gh:`11579`, :gh:`12179`. + +Deep source modeling +^^^^^^^^^^^^^^^^^^^^ +Existing source modeling and inverse routines are not explicitly designed to +deal with deep sources. Advanced algorithms exist from MGH for enhancing +deep source localization, and these should be implemented and vetted in +MNE-Python. See :gh:`6784`. + +Time-frequency classes +^^^^^^^^^^^^^^^^^^^^^^ +Historically our codebase had classes related to :term:`TFRs ` that +were incomplete. New classes are being built from the ground up: + +- new classes :class:`~mne.time_frequency.Spectrum` and + :class:`~mne.time_frequency.EpochsSpectrum` (implemented in :gh:`10184`, with + follow-up tweaks and bugfixes in :gh:`11178`, :gh:`11259`, :gh:`11280`, :gh:`11345`, + :gh:`11418`, :gh:`11563`, :gh:`11680`, :gh:`11682`, :gh:`11778`, :gh:`11921`, + :gh:`11978`, :gh:`12747`), and corresponding array-based constructors + :class:`~mne.time_frequency.SpectrumArray` and + :class:`~mne.time_frequency.EpochsSpectrumArray` (:gh:`11803`). + +- new class :class:`~mne.time_frequency.RawTFR` and updated classes + :class:`~mne.time_frequency.EpochsTFR` and :class:`~mne.time_frequency.AverageTFR`, + and corresponding array-based constructors :class:`~mne.time_frequency.RawTFRArray`, + :class:`~mne.time_frequency.EpochsTFRArray` and + :class:`~mne.time_frequency.AverageTFRArray` (implemented in :gh:`11282`, with + follow-ups in :gh:`12514`, :gh:`12842`). + +- new/updated classes for source-space frequency and time-frequency data are not yet + implemented. + +Other related issues: :gh:`6290`, :gh:`7671`, :gh:`8026`, :gh:`8724`, :gh:`9045`, +and PRs: :gh:`6609`, :gh:`6629`, :gh:`6672`, :gh:`6673`, :gh:`8397`, :gh:`8892`. + +Modernization of realtime processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +LSL has become the de facto standard for streaming data from EEG/MEG systems. +We should deprecate `MNE-Realtime`_ in favor of the newly minted `MNE-LSL`_. +We should then fully support MNE-LSL using modern coding best practices such as CI +integration. + +Core components of commonly used real-time processing pipelines should be implemented in +MNE-LSL, including but not limited to realtime IIR filtering, artifact rejection, +montage and reference setting, and online averaging. Integration with standard +MNE-Python plotting routines (evoked joint plots, topomaps, etc.) should be +supported with continuous updating. + +Clustering statistics API +^^^^^^^^^^^^^^^^^^^^^^^^^ +The current clustering statistics code has limited functionality. It should be +re-worked to create a new ``cluster_based_statistic`` or similar function. + +The new API will likely be along the lines of:: + + cluster_stat(obs, design, *, alpha=0.05, cluster_alpha=0.05, ...) + +with: + +``obs`` : :class:`pandas.DataFrame` + Has columns like "subject", "condition", and "data". + The "data" column holds things like :class:`mne.Evoked`, + :class:`mne.SourceEstimate`, :class:`mne.time_frequency.Spectrum`, etc. +``design`` : `str` + Likely Wilkinson notation to mirror :func:`patsy.dmatrices` (e.g., this is + is used by :class:`statsmodels.regression.linear_model.OLS`). Getting from the + string to the design matrix could be done via Patsy or more likely + `Formulaic `__. + +This generic API will support mixed within- and between-subjects designs, +different statistical functions/tests, etc. This should be achievable without +introducing any significant speed penalty (e.g., < 10% slower) compared to the existing +more specialized/limited functions, since most computation cost is in clustering rather +than statistical testing. + +The clustering function will return a user-friendly ``ClusterStat`` object or similar +that retains information about dimensionality, significance, etc. and facilitates +plotting and interpretation of results. + +Clear tutorials will be needed to: + +1. Show how different contrasts can be done (toy data). +2. Show some common analyses on real data (time-freq, sensor space, source space, etc.) + +Regression tests will be written to ensure equivalent outputs when compared to FieldTrip +for cases that FieldTrip also supports. + +More details are in :gh:`4859`; progress in :gh:`12663`. + + +.. _documentation-updates: + +Documentation updates +^^^^^^^^^^^^^^^^^^^^^ +Our documentation has many minor issues, which can be found under the tag +:gh:`labels/DOC`. + + +Completed +--------- + +3D visualization +^^^^^^^^^^^^^^^^ +Historically we used Mayavi for 3D visualization, but faced limitations and challenges +with it. We switched to PyVista to get major improvements, such as: + +1. *Proper notebook support (through ``ipyvtklink``)* (complete; updated to use ``trame``) +2. *Better interactivity with surface plots* (complete) +3. Time-frequency plotting (complementary to volume-based + :ref:`time-frequency-viz`) +4. Integration of multiple functions as done in ``mne_analyze``, e.g., + simultaneous source estimate viewing, field map + viewing, head surface display, etc. These were all available in + separate functions, but can now be combined in a single plot. + +The meta-issue tracking to-do lists for surface plotting was :gh:`7162`. + +Improved sEEG/ECoG/DBS support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +iEEG-specific pipeline steps such as electrode localization and visualizations +are now available in `MNE-gui-addons`_. + +Access to open EEG/MEG databases +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Open EEG/MEG databases are now more easily accessible via standardized tools such as +`openneuro-py`_. + +Eye-tracking support +^^^^^^^^^^^^^^^^^^^^ +We had a GSoC student funded to improve support for eye-tracking data, see +`the GSoC proposal `__ +for details. An EyeLink data reader and analysis/plotting functions are now available. + +Pediatric and clinical MEG pipelines +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +MNE-Python provides automated analysis of BIDS-compliant datasets via +`MNE-BIDS-Pipeline`_. Functionality from the +`mnefun `__ pipeline, +which has been used extensively for pediatric data analysis at `I-LABS`_, +now provides better support for pediatric and clinical data processing. +Multiple processing steps (e.g., eSSS), sanity checks (e.g., cHPI quality), +and reporting (e.g., SSP joint plots, SNR plots) have been added. + +Integrate OpenMEEG via improved Python bindings +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`OpenMEEG`_ is a state-of-the art solver for +forward modeling in the field of brain imaging with MEG/EEG. It solves +numerically partial differential equations (PDE). It is written in C++ with +Python bindings written in SWIG. +The ambition of the project is to integrate OpenMEEG into MNE offering to MNE +the ability to solve more forward problems (cortical mapping, intracranial +recordings, etc.). Tasks that have been completed: + +- Cleanup Python bindings (remove useless functions, check memory managements, + etc.) +- Understand how MNE encodes info about sensors (location, orientation, + integration points etc.) and allow OpenMEEG to be used. +- Modernize CI systems (e.g., using ``cibuildwheel``). +- Automated deployment on PyPI and conda-forge. + +.. _time-frequency-viz: + +Time-frequency visualization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +We implemented a viewer for interactive visualization of volumetric +source-time-frequency (5-D) maps on MRI slices (orthogonal 2D viewer). +`NutmegTrip `__ +(written by Sarang Dalal) provides similar functionality in MATLAB in +conjunction with FieldTrip. Example of NutmegTrip's source-time-frequency mode +in action (click for link to YouTube): + +.. image:: https://i.ytimg.com/vi/xKdjZZphdNc/maxresdefault.jpg + :target: https://www.youtube.com/watch?v=xKdjZZphdNc + :width: 50% + +See :func:`mne-gui-addons:mne_gui_addons.view_vol_stc`. + +Distributed computing support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`MNE-BIDS-Pipeline`_ has been enhanced with support for cloud computing +via `Dask`_ and :doc:`joblib `. +After configuring Dask to use local or remote distributed computing resources, +MNE-BIDS-Pipeline can readily make use of remote workers to parallelize +processing across subjects. + +2D visualization +^^^^^^^^^^^^^^^^ +`This goal `__ +was completed under CZI `EOSS2`_. Some additional enhancements that could also +be implemented are listed in :gh:`7751`. + +Tutorial / example overhaul +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`This goal `__ +was completed under CZI `EOSS2`_. Ongoing documentation needs are listed in +:ref:`documentation-updates`. + +Cluster computing images +^^^^^^^^^^^^^^^^^^^^^^^^ +As part of `this goal `__, +we created docker images suitable for cloud computing via `MNE-Docker`_. + +.. _I-LABS: http://ilabs.washington.edu/ diff --git a/doc/development/whats_new.rst b/doc/development/whats_new.rst new file mode 100644 index 00000000000..24a88810746 --- /dev/null +++ b/doc/development/whats_new.rst @@ -0,0 +1,47 @@ +.. _whats_new: + +What's new +========== + +Changes for each version of MNE-Python are listed below. + +.. toctree:: + :maxdepth: 1 + + ../changes/dev.rst + ../changes/v1.10.rst + ../changes/v1.9.rst + ../changes/v1.8.rst + ../changes/v1.7.rst + ../changes/v1.6.rst + ../changes/v1.5.rst + ../changes/v1.4.rst + ../changes/v1.3.rst + ../changes/v1.2.rst + ../changes/v1.1.rst + ../changes/v1.0.rst + ../changes/v0.24.rst + ../changes/v0.23.rst + ../changes/v0.22.rst + ../changes/v0.21.rst + ../changes/v0.20.rst + ../changes/v0.19.rst + ../changes/v0.18.rst + ../changes/v0.17.rst + ../changes/v0.16.rst + ../changes/v0.15.rst + ../changes/v0.14.rst + ../changes/v0.13.rst + ../changes/v0.12.rst + ../changes/v0.11.rst + ../changes/v0.10.rst + ../changes/v0.9.rst + ../changes/v0.8.rst + ../changes/v0.7.rst + ../changes/v0.6.rst + ../changes/v0.5.rst + ../changes/v0.4.rst + ../changes/v0.3.rst + ../changes/v0.2.rst + ../changes/v0.1.rst + ../old_versions/index.rst diff --git a/doc/overview/cite.rst b/doc/documentation/cite.rst similarity index 76% rename from doc/overview/cite.rst rename to doc/documentation/cite.rst index e8be40e1301..5d67fec3ea3 100644 --- a/doc/overview/cite.rst +++ b/doc/documentation/cite.rst @@ -7,9 +7,12 @@ Citing the software ------------------- To cite specific version numbers of the software, you can use the DOIs provided -by `Zenodo `_. Additionally, we ask that -when citing the MNE-Python package, you cite the canonical journal article -reference :footcite:`GramfortEtAl2013a`: +by `Zenodo `_; look for the "versions" +list in the sidebar to find the correct DOI for the version you used in your +work (or to cite the project in a general way, use +https://doi.org/10.5281/zenodo.592483). Additionally, we ask that when citing +MNE-Python you also cite the canonical journal article reference +:footcite:`GramfortEtAl2013a`: .. footbibliography:: diff --git a/doc/documentation/cited.rst b/doc/documentation/cited.rst new file mode 100644 index 00000000000..f602d5d71ca --- /dev/null +++ b/doc/documentation/cited.rst @@ -0,0 +1,9 @@ +.. _cited: + +Papers citing MNE-Python +======================== + +Estimates provided by Google Scholar as of 16 December 2024: + +- `MNE (2,070) `_ +- `MNE-Python (3,850) `_ diff --git a/doc/overview/cookbook.rst b/doc/documentation/cookbook.rst similarity index 98% rename from doc/overview/cookbook.rst rename to doc/documentation/cookbook.rst index 5aab1045e2e..d5eb149002b 100644 --- a/doc/overview/cookbook.rst +++ b/doc/documentation/cookbook.rst @@ -13,7 +13,7 @@ References below refer to Python functions and objects. .. _flow_diagram: -.. figure:: images/flow_diagram.svg +.. figure:: ../_static/flow_diagram.svg :alt: MNE Workflow Flowchart :align: center @@ -185,10 +185,7 @@ has been completed as described in :ref:`CHDBBCEJ`. .. _BABGCDHA: -.. table:: Recommended subdivisions of an icosahedron and an octahedron for - the creation of source spaces. The approximate source spacing and - corresponding surface area have been calculated assuming a - 1000-cm2 surface area per hemisphere. +.. table:: Recommended subdivisions of an icosahedron and an octahedron for the creation of source spaces. The approximate source spacing and corresponding surface area have been calculated assuming a 1000-cm2 surface area per hemisphere. =========== ====================== =================== ============================= ``spacing`` Sources per hemisphere Source spacing / mm Surface area per source / mm2 diff --git a/doc/documentation/datasets.rst b/doc/documentation/datasets.rst new file mode 100644 index 00000000000..2ec98664e74 --- /dev/null +++ b/doc/documentation/datasets.rst @@ -0,0 +1,556 @@ +.. _datasets: + +Datasets Overview +################# + +.. note:: Contributing datasets to MNE-Python + :class: sidebar + + Do not hesitate to contact MNE-Python developers on the + `MNE Forum `_ to discuss the possibility of + adding more publicly available datasets. + +All the dataset fetchers are available in :mod:`mne.datasets`. To download any of the datasets, +use the ``data_path`` (fetches full dataset) or the ``load_data`` (fetches dataset partially) functions. + +All fetchers will check the default download location first to see if the dataset +is already on your computer, and only download it if necessary. The default +download location is also configurable; see the documentation of any of the +``data_path`` functions for more information. + +.. _sample-dataset: + +Sample +====== +:func:`mne.datasets.sample.data_path` + +These data were acquired with the Neuromag +Vectorview system at MGH/HMS/MIT Athinoula A. Martinos Center Biomedical +Imaging. EEG data from a 60-channel electrode cap was acquired simultaneously with +the MEG. The original MRI data set was acquired with a Siemens 1.5 T +Sonata scanner using an MPRAGE sequence. + +.. note:: These data are provided solely for the purpose of getting familiar + with the MNE software. The data should not be used to evaluate the + performance of the MEG or MRI system employed. + +In this experiment, checkerboard patterns were presented to the subject +into the left and right visual field, interspersed by tones to the +left or right ear. The interval between the stimuli was 750 ms. Occasionally +a smiley face was presented at the center of the visual field. +The subject was asked to press a key with the right index finger +as soon as possible after the appearance of the face. + +.. table:: Trigger codes for the sample data set. + + ========= ===== ========================================== + Name Contents + ========= ===== ========================================== + LA 1 Response to left-ear auditory stimulus + RA 2 Response to right-ear auditory stimulus + LV 3 Response to left visual field stimulus + RV 4 Response to right visual field stimulus + smiley 5 Response to the smiley face + button 32 Response triggered by the button press + ========= ===== ========================================== + +Contents of the data set +^^^^^^^^^^^^^^^^^^^^^^^^ + +The sample data set contains two main directories: ``MEG/sample`` (the MEG/EEG +data) and ``subjects/sample`` (the MRI reconstructions). +In addition to subject ``sample``, the MRI surface reconstructions from another +subject, morph, are provided to demonstrate morphing capabilities. + +.. table:: Contents of the MEG/sample directory. + + ======================== ===================================================================== + File Contents + ======================== ===================================================================== + sample/audvis_raw.fif The raw MEG/EEG data + audvis.ave A template script for off-line averaging + auvis.cov A template script for the computation of a noise-covariance matrix + ======================== ===================================================================== + +.. table:: Overview of the contents of the subjects/sample directory. + + ======================= ====================================================================== + File / directory Contents + ======================= ====================================================================== + bem Directory for the forward modelling data + bem/watershed BEM surface segmentation data computed with the watershed algorithm + bem/inner_skull.surf Inner skull surface for BEM + bem/outer_skull.surf Outer skull surface for BEM + bem/outer_skin.surf Skin surface for BEM + sample-head.fif Skin surface in fif format for mne_analyze visualizations + surf Surface reconstructions + mri/T1 The T1-weighted MRI data employed in visualizations + ======================= ====================================================================== + +The following preprocessing steps have been already accomplished +in the sample data set: + +- The MRI surface reconstructions have + been computed using the FreeSurfer software. + +- The BEM surfaces have been created with the watershed algorithm, + see :ref:`bem_watershed_algorithm`. + +The **sample** dataset is distributed with :ref:`fsaverage` for convenience. + +.. _ucl-opm-auditory-dataset: + +UCL OPM Auditory +================ +:func:`mne.datasets.ucl_opm_auditory.data_path`. + +A basic auditory evoked field experiment using an OPM setup from FIL at UCL. +See :footcite:`SeymourEtAl2022` for details. + +.. topic:: Examples + + * :ref:`tut-opm-processing` + +Brainstorm +========== +Dataset fetchers for three Brainstorm tutorials are available. Users must agree to the +license terms of these datasets before downloading them. These files are recorded in a CTF 275 system +and are provided in native CTF format (.ds files). + +Auditory +^^^^^^^^ +:func:`mne.datasets.brainstorm.bst_raw.data_path`. + +Details about the data can be found at the Brainstorm `auditory dataset tutorial`_. + +.. topic:: Examples + + * :ref:`tut-brainstorm-auditory`: Partially replicates the original Brainstorm tutorial. + +Resting state +^^^^^^^^^^^^^ +:func:`mne.datasets.brainstorm.bst_resting.data_path` + +Details can be found at the Brainstorm `resting state dataset tutorial`_. + +.. topic:: Examples + + * :ref:`mne-connectivity:ex-envelope-correlation` + +Median nerve +^^^^^^^^^^^^ +:func:`mne.datasets.brainstorm.bst_raw.data_path` + +Details can be found at the Brainstorm `median nerve dataset tutorial`_. + +.. topic:: Examples + + * :ref:`ex-brainstorm-raw` + +SPM faces +========= +:func:`mne.datasets.spm_face.data_path` + +The `SPM faces dataset`_ contains EEG, MEG and fMRI recordings on face perception. + +.. topic:: Examples + + * :ref:`ex-spm-faces` Full pipeline including artifact removal, epochs averaging, forward model computation and source reconstruction using dSPM on the contrast: "faces - scrambled". + +EEGBCI motor imagery +==================== +:func:`mne.datasets.eegbci.load_data` + +The EEGBCI dataset is documented in :footcite:`SchalkEtAl2004` and on the +`PhysioNet documentation page `_. +The data set is available at PhysioNet :footcite:`GoldbergerEtAl2000`. +It contains 64-channel EEG recordings from 109 subjects and 14 runs on each +subject in EDF+ format. The recordings were made using the BCI2000 system. +To load a subject, do:: + + from mne.io import concatenate_raws, read_raw_edf + from mne.datasets import eegbci + subjects = [1] # may vary + runs = [4, 8, 12] # may vary + raw_fnames = eegbci.load_data(subjects, runs) + raws = [read_raw_edf(f, preload=True) for f in raw_fnames] + # concatenate runs from subject + raw = concatenate_raws(raws) + # make channel names follow standard conventions + eegbci.standardize(raw) + +.. topic:: Examples + + * :ref:`ex-decoding-csp-eeg` + +.. _somato-dataset: + +Somatosensory +============= +:func:`mne.datasets.somato.data_path` + +This dataset contains somatosensory data with event-related synchronizations +(ERS) and desynchronizations (ERD). + +.. topic:: Examples + + * :ref:`tut-sensors-time-freq` + * :ref:`ex-inverse-source-power` + * :ref:`ex-time-freq-global-field-power` + +Multimodal +========== +:func:`mne.datasets.multimodal.data_path` + +This dataset contains a single subject recorded at Otaniemi (Aalto University) +with auditory, visual, and somatosensory stimuli. + +.. topic:: Examples + + * :ref:`ex-io-ave-fiff` + +.. _fnirs-motor-dataset: + +fNIRS motor +=========== +:func:`mne.datasets.fnirs_motor.data_path` + +This dataset contains a single subject recorded at Macquarie University. +It has optodes placed over the motor cortex. There are three conditions: + +- tapping the left thumb to fingers +- tapping the right thumb to fingers +- a control where nothing happens + +The tapping lasts 5 seconds, and there are 30 trials of each condition. + +.. topic:: Examples + + * :ref:`tut-fnirs-processing` + +High frequency SEF +================== +:func:`mne.datasets.hf_sef.data_path()` + +This dataset contains somatosensory evoked fields (median nerve stimulation) +with thousands of epochs. It was recorded with an Elekta TRIUX MEG device at +a sampling frequency of 3 kHz. The dataset is suitable for investigating +high-frequency somatosensory responses. Data from two subjects are included +with MRI images in DICOM format and FreeSurfer reconstructions. + +.. topic:: Examples + + * :ref:`high-frequency SEF responses `. + +Visual 92 object categories +=========================== +:func:`mne.datasets.visual_92_categories.data_path`. + +This dataset is recorded using a 306-channel Neuromag vectorview system. + +Experiment consisted in the visual presentation of 92 images of human, animal +and inanimate objects either natural or artificial :footcite:`CichyEtAl2014`. +Given the high number of conditions this dataset is well adapted to an approach +based on Representational Similarity Analysis (RSA). + +.. topic:: Examples + + * :ref:`Representational Similarity Analysis (RSA) `: Partially replicates the results from :footcite:`CichyEtAl2014`. + + +mTRF Dataset +============ +:func:`mne.datasets.mtrf.data_path`. + +This dataset contains 128 channel EEG as well as natural speech stimulus features, +which is also available `here `_. + +The experiment consisted of subjects listening to natural speech. +The dataset contains several feature representations of the speech stimulus, +suitable for using to fit continuous regression models of neural activity. +More details and a description of the package can be found in +:footcite:`CrosseEtAl2016`. + +.. topic:: Examples + + * :ref:`Receptive Field Estimation and Prediction `: Partially replicates the results from :footcite:`CrosseEtAl2016`. + + +.. _kiloword-dataset: + +Kiloword dataset +================ +:func:`mne.datasets.kiloword.data_path`. + +This dataset consists of averaged EEG data from 75 subjects performing a +lexical decision task on 960 English words :footcite:`DufauEtAl2015`. The words +are richly annotated, and can be used for e.g. multiple regression estimation +of EEG correlates of printed word processing. + + +KIT phantom dataset +============================= +:func:`mne.datasets.phantom_kit.data_path`. + +This dataset was obtained with a phantom on a KIT system at +Macquarie University in Sydney, Australia. + +.. topic:: Examples + + * :ref:`tut-phantom-KIT` + + +4D Neuroimaging / BTi dataset +============================= +:func:`mne.datasets.phantom_4dbti.data_path`. + +This dataset was obtained with a phantom on a 4D Neuroimaging / BTi system at +the MEG center in La Timone hospital in Marseille. + +.. topic:: Examples + + * :ref:`tut-phantom-4Dbti` + +Kernel OPM phantom dataset +========================== +:func:`mne.datasets.phantom_kernel.data_path`. + +This dataset was obtained with a Neuromag phantom in a Kernel Flux (720-sensor) +system at ILABS at the University of Washington. Only 7 out of 42 possible modules +were active for testing purposes, yielding 121 channels of data with limited coverage +(mostly occipital and parietal). + +.. topic:: Examples + + * :ref:`ex-kernel-opm-phantom` + +OPM +=== +:func:`mne.datasets.opm.data_path` + +OPM data acquired using an Elekta DACQ, simply piping the data into Elekta +magnetometer channels. The FIF files thus appear to come from a TRIUX system +that is only acquiring a small number of magnetometer channels instead of the +whole array. + +The OPM ``coil_type`` is custom, requiring a custom ``coil_def.dat``. +The new ``coil_type`` is 9999. + +OPM co-registration differs a bit from the typical SQUID-MEG workflow. +No ``-trans.fif`` file is needed for the OPMs, the FIF files include proper +sensor locations in MRI coordinates and no digitization of RPA/LPA/Nasion. +Thus the MEG<->Head coordinate transform is taken to be an identity matrix +(i.e., everything is in MRI coordinates), even though this mis-identifies +the head coordinate frame (which is defined by the relationship of the +LPA, RPA, and Nasion). + +Triggers include: + +* Median nerve stimulation: trigger value 257. +* Magnetic trigger (in OPM measurement only): trigger value 260. + 1 second before the median nerve stimulation, a magnetic trigger is piped into the MSR. + This was to be able to check the synchronization between OPMs retrospectively, as each + sensor runs on an independent clock. Synchronization turned out to be satisfactory. + +.. topic:: Examples + + * :ref:`ex-opm-somatosensory` + * :ref:`ex-opm-resting-state` + +The Sleep PolySomnoGraphic Database +=================================== +:func:`mne.datasets.sleep_physionet.age.fetch_data` +:func:`mne.datasets.sleep_physionet.temazepam.fetch_data` + +The sleep PhysioNet database contains 197 whole-night PolySomnoGraphic sleep +recordings, containing EEG, EOG, chin EMG, and event markers. Some records also +contain respiration and body temperature. Corresponding hypnograms (sleep +patterns) were manually scored by well-trained technicians according to the +Rechtschaffen and Kales manual, and are also available. If you use these +data please cite :footcite:`KempEtAl2000` and :footcite:`GoldbergerEtAl2000`. + +.. topic:: Examples + + * :ref:`tut-sleep-stage-classif` + +Reference channel noise MEG data set +==================================== +:func:`mne.datasets.refmeg_noise.data_path`. + +This dataset was obtained with a 4D Neuroimaging / BTi system at +the University Clinic - Erlangen, Germany. There are powerful bursts of +external magnetic noise throughout the recording, which make it a good +example for automatic noise removal techniques. + +.. topic:: Examples + + * :ref:`ex-megnoise_processing` + +Miscellaneous Datasets +====================== +These datasets are used for specific purposes in the documentation and in +general are not useful for separate analyses. + +.. _fsaverage: + +fsaverage +^^^^^^^^^ +:func:`mne.datasets.fetch_fsaverage` + +For convenience, we provide a function to separately download and extract the +(or update an existing) fsaverage subject. See also the +:ref:`background information on fsaverage `. + +.. topic:: Examples + + :ref:`tut-eeg-fsaverage-source-modeling` + +Infant template MRIs +^^^^^^^^^^^^^^^^^^^^ +:func:`mne.datasets.fetch_infant_template` + +This function will download an infant template MRI from +:footcite:`OReillyEtAl2021` along with MNE-specific files. + +ECoG Dataset +^^^^^^^^^^^^ +:func:`mne.datasets.misc.data_path`. Data exists at ``/ecog/``. + +This dataset contains a sample electrocorticography (ECoG) dataset. It includes +two grids of electrodes and ten shaft electrodes with simulated motor data (actual data +pending availability). + +.. topic:: Examples + + * :ref:`ex-electrode-pos-2d`: Demonstrates how to project a 3D electrode location onto a 2D image, a common procedure in ECoG analyses. + * :ref:`tut-ieeg-localize`: Demonstrates how to use a graphical user interface to locate electrode contacts as well as warp them to a common atlas. + +sEEG Dataset +^^^^^^^^^^^^ +:func:`mne.datasets.misc.data_path`. Data exists at ``/seeg/``. + +This dataset contains a sample stereoelectroencephalography (sEEG) dataset. +It includes 21 shaft electrodes during a two-choice movement task on a keyboard. + +.. topic:: Examples + + * :ref:`tut-ieeg-localize`: Demonstrates how to use a graphical user interface to locate electrode contacts as well as warp them to a common atlas. + * :ref:`tut-working-with-seeg`: Demonstrates ways to plot sEEG anatomy and results. + +.. _limo-dataset: + +LIMO Dataset +^^^^^^^^^^^^ +:func:`mne.datasets.limo.load_data`. + +In the original LIMO experiment (see :footcite:`RousseletEtAl2010`), participants +performed a +two-alternative forced choice task, discriminating between two face stimuli. +Subjects discriminated the same two faces during the whole experiment. +The critical manipulation consisted of the level of noise added to the +face-stimuli during the task, making the faces more or less discernible to the +observer. + +The presented faces varied across a noise-signal (or phase-coherence) continuum +spanning from 0 to 100% in increasing steps of 10%. In other words, faces with +high phase-coherence (e.g., 90%) were easy to identify, while faces with low +phase-coherence (e.g., 10%) were hard to identify and by extension hard to +discriminate. + +.. topic:: Examples + + * :ref:`Single trial linear regression analysis with the LIMO dataset + `: Explores data from a single subject of the LIMO dataset + and demonstrates how to fit a single trial linear regression using the + information contained in the metadata of the individual datasets. + +.. _erp-core-dataset: + +ERP CORE Dataset +^^^^^^^^^^^^^^^^ +:func:`mne.datasets.erp_core.data_path` + +The original `ERP CORE dataset`_ :footcite:`Kappenman2021` contains data from +40 participants who completed 6 EEG experiments, carefully crafted to evoke +7 well-known event-related potential (ERP) components. + +Currently, the MNE-Python ERP CORE dataset only provides data from one +participant (subject ``001``) of the Flankers paradigm, which elicits the +lateralized readiness potential (LRP) and error-related negativity (ERN). The +data provided is **not** the original data from the ERP CORE dataset, but +rather a slightly modified version, designed to demonstrate the Epochs metadata +functionality. For example, we already set the references and montage +correctly, and stored events as Annotations. Data is provided in ``FIFF`` +format. + +.. topic:: Examples + + * :ref:`tut-autogenerate-metadata`: Learn how to auto-generate + `~mne.Epochs` metadata, and visualize the error-related negativity (ERN) + ERP component. + +.. _ssvep-dataset: + +SSVEP +===== +:func:`mne.datasets.ssvep.data_path` + +This is a simple example dataset with frequency tagged visual stimulation: +N=2 participants observed checkerboards patterns inverting with a constant +frequency of either 12.0 Hz of 15.0 Hz. 10 trials of 20.0 s length each. +32 channels wet EEG was recorded. + +Data format: BrainVision .eeg/.vhdr/.vmrk files organized according to BIDS +standard. + +.. topic:: Examples + + * :ref:`tut-ssvep` + +.. _eyelink-dataset: + +EYELINK +======= +:func:`mne.datasets.eyelink.data_path` + +Two small example datasets of eye-tracking data from SR Research EyeLink. + +EEG-Eyetracking +^^^^^^^^^^^^^^^ +:func:`mne.datasets.eyelink.data_path`. Data exists at ``/eeg-et/``. + +Contains both EEG (EGI) and eye-tracking (ASCII format) data recorded from a +pupillary light reflex experiment, stored in separate files. 1 participant fixated +on the screen while short light flashes appeared. Event onsets were recorded by a +photodiode attached to the screen and were sent to both the EEG and eye-tracking +systems. + +.. topic:: Examples + + * :ref:`tut-eyetrack` + +Freeviewing +^^^^^^^^^^^ +:func:`mne.datasets.eyelink.data_path`. Data exists at ``/freeviewing/``. + +Contains eye-tracking data (ASCII format) from 1 participant who was free-viewing a +video of a natural scene. In some videos, the natural scene was pixelated such that +the people in the scene were unrecognizable. + +.. topic:: Examples + + * :ref:`tut-eyetrack-heatmap` + +References +========== + +.. footbibliography:: + + +.. LINKS + +.. _auditory dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetAuditory +.. _resting state dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetResting +.. _median nerve dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf +.. _SPM faces dataset: https://www.fil.ion.ucl.ac.uk/spm/data/mmfaces/ +.. _ERP-CORE dataset: https://erpinfo.org/erp-core diff --git a/doc/overview/design_philosophy.rst b/doc/documentation/design_philosophy.rst similarity index 100% rename from doc/overview/design_philosophy.rst rename to doc/documentation/design_philosophy.rst index 2330150a063..5bdec09b4fa 100644 --- a/doc/overview/design_philosophy.rst +++ b/doc/documentation/design_philosophy.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - .. _design_philosophy: Design philosophy @@ -96,3 +94,5 @@ of data. .. LINKS .. _`method chaining`: https://en.wikipedia.org/wiki/Method_chaining + +.. include:: ../links.inc diff --git a/doc/glossary.rst b/doc/documentation/glossary.rst similarity index 96% rename from doc/glossary.rst rename to doc/documentation/glossary.rst index bdd1e54cfef..89a5c477a75 100644 --- a/doc/glossary.rst +++ b/doc/documentation/glossary.rst @@ -1,8 +1,6 @@ Glossary ======== -.. currentmodule:: mne - The Glossary provides short definitions of vocabulary specific to MNE-Python and general neuroimaging concepts. If you think a term is missing, please consider `creating a new issue`_ or `opening a pull request`_ to add it. @@ -43,15 +41,15 @@ general neuroimaging concepts. If you think a term is missing, please consider Channels refer to MEG sensors, EEG electrodes or other sensors such as EOG, ECG, sEEG, ECoG, etc. Channels usually have a type (such as gradiometer), and a unit (such as T/m) used e.g. for - plotting. See also :term:`data channels`. + plotting. See also :term:`data channels` and :term:`non-data channels`. data channels Many functions in MNE-Python operate on "data channels" by default. These are channels that contain electrophysiological data from the brain, as opposed to other channel types such as EOG, ECG, stimulus/trigger, - or acquisition system status data. The set of channels considered - "data channels" in MNE contains the following types (together with scale - factors for plotting): + or acquisition system status data (see :term:`non-data channels`). + The set of channels considered "data channels" in MNE contains the + following types (together with scale factors for plotting): .. mne:: data channels list @@ -289,6 +287,13 @@ general neuroimaging concepts. If you think a term is missing, please consider data into a common space for statistical analysis. See :ref:`ch_morph` for more details. + non-data channels + All types of channels other than :term:`data channels`. + The set of channels considered "non-data channels" in MNE contains the + following types (together with scale factors for plotting): + + .. mne:: non-data channels list + OPM optically pumped magnetometer An optically pumped magnetometer (OPM) is a type of magnetometer @@ -352,6 +357,10 @@ general neuroimaging concepts. If you think a term is missing, please consider A selection is a set of picked channels (for example, all sensors falling within a :term:`region of interest`). + sensor types + All the sensors handled by MNE-Python can be divided into two categories: + :term:`data channels` and :term:`non-data channels`. + STC source estimate source time course diff --git a/doc/overview/implementation.rst b/doc/documentation/implementation.rst similarity index 93% rename from doc/overview/implementation.rst rename to doc/documentation/implementation.rst index ebae0201f7a..49fe31bac9c 100644 --- a/doc/overview/implementation.rst +++ b/doc/documentation/implementation.rst @@ -124,6 +124,14 @@ Morphing and averaging source estimates :start-after: morph-begin-content +.. _ged: + +Generalized eigendecomposition in decoding +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. include:: ../_includes/ged.rst + :start-after: ged-begin-content + References ^^^^^^^^^^ .. footbibliography:: diff --git a/doc/documentation/index.rst b/doc/documentation/index.rst new file mode 100644 index 00000000000..764fcd08188 --- /dev/null +++ b/doc/documentation/index.rst @@ -0,0 +1,63 @@ +.. _documentation_overview: + +Documentation overview +====================== + +.. note:: + + If you haven't already installed MNE-Python, please take a look + at our :ref:`installation guides`. Please also kindly find some + resources for :doc:`../help/learn_python` if you need to. + + +The documentation for MNE-Python is divided into four main sections: + +1. The :doc:`../auto_tutorials/index` provide narrative explanations, sample + code, and expected output for the most common MNE-Python analysis tasks. The + emphasis is on thorough explanations that get new users up to speed quickly, + at the expense of covering only a limited number of topics. + +2. The :doc:`How-to Examples <../auto_examples/index>` provides working code + samples demonstrating various analysis and visualization techniques. These + examples often lack the narrative explanations seen in the tutorials, but + can be a useful way to discover new analysis or plotting ideas, or to see + how a particular technique you've read about can be applied using + MNE-Python. + +3. The :doc:`glossary` provides short definitions of MNE-Python-specific + vocabulary and general neuroimaging concepts. The glossary is often a good + place to look if you don't understand a term or acronym used somewhere else + in the documentation. + +4. The :doc:`API reference <../api/python_reference>` provides documentation for + the classes, functions and methods in the MNE-Python codebase. This is the + same information that is rendered when running + :samp:`help(mne.{})` in an interactive Python session, or + when typing :samp:`mne.{}?` in an IPython session or Jupyter + notebook. + +The rest of the MNE-Python documentation pages (parts outside of the four +categories above) are shown in the navigation menu, including the +:ref:`list of example datasets`, +:ref:`implementation details`, and more. +Documentation for the related C and MATLAB tools are available here: + +- `MNE-MATLAB`_ (repository) +- `MNE-C `_ (PDF) + +.. toctree:: + :hidden: + + Tutorials<../auto_tutorials/index> + Examples<../auto_examples/index> + glossary + Implementation details + design_philosophy + Example datasets + Command-line tools<../generated/commands> + ../help/migrating + cookbook + cite + cited + +.. include:: ../links.inc diff --git a/doc/funding.rst b/doc/funding.rst index 4e0827df4d1..bbf25a7165c 100644 --- a/doc/funding.rst +++ b/doc/funding.rst @@ -29,7 +29,7 @@ Development of MNE-Python has been supported by: `14-NEUC-0002-01 `_, **IDEX** Paris-Saclay `11-IDEX-0003-02 `_ -- |cds| **Paris-Saclay Center for Data Science:** +- |cds| |cdsdk| **Paris-Saclay Center for Data Science:** `PARIS-SACLAY `_ - |goo| **Google:** Summer of code (×7 years) @@ -61,9 +61,10 @@ institutions include: :class: only-dark .. |doe| image:: _static/funding/doe.svg .. |anr| image:: _static/funding/anr.svg -.. |cds| image:: _static/funding/cds.png +.. |cds| image:: _static/funding/cds.svg + :class: only-light +.. |cdsdk| image:: _static/funding/cds-dark.svg + :class: only-dark .. |goo| image:: _static/funding/google.svg .. |ama| image:: _static/funding/amazon.svg .. |czi| image:: _static/funding/czi.svg - -.. include:: links.inc \ No newline at end of file diff --git a/doc/overview/faq.rst b/doc/help/faq.rst similarity index 97% rename from doc/overview/faq.rst rename to doc/help/faq.rst index 14c574bb3cb..7720885d643 100644 --- a/doc/overview/faq.rst +++ b/doc/help/faq.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - .. _faq: ================================ @@ -174,7 +172,7 @@ have an ``n_jobs`` flag but may still use multiple cores. This is because support multithreading: - `OpenBLAS `_ -- `Intel Math Kernel Library (MKL) `_, +- `Intel Math Kernel Library (MKL) `_, which uses `OpenMP `_ To control how many cores are used for linear-algebra-heavy functions like @@ -236,9 +234,11 @@ of data. We'll discuss some major ones here, with some of their implications: - :func:`mne.Epochs.decimate`, which does the same thing as the ``decim`` parameter in the :class:`mne.Epochs` constructor, sub-selects every - :math:`N^{th}` sample before and after each event. This should only be - used when the raw data have been sufficiently low-passed e.g. by - :func:`mne.io.Raw.filter` to avoid aliasing artifacts. + :math:`N^{th}` sample before and after each event. To avoid aliasing + artifacts, the raw data should be sufficiently low-passed before decimation. + It is recommended to use :func:`mne.io.Raw.filter` with ``h_freq`` set to + half the new sampling rate (fs/2N) or lower, as per the Nyquist criterion, to + ensure effective attenuation of frequency content above this threshold. - :func:`mne.Epochs.resample`, :func:`mne.Evoked.resample`, and :func:`mne.SourceEstimate.resample` all resample data. @@ -277,6 +277,7 @@ magnitude. Forward and Inverse Solution ============================ +.. _faq_how_should_i_regularize: How should I regularize the covariance matrix? ---------------------------------------------- diff --git a/doc/help/index.rst b/doc/help/index.rst new file mode 100644 index 00000000000..ceaf7a10c25 --- /dev/null +++ b/doc/help/index.rst @@ -0,0 +1,27 @@ +.. _help: + +Getting help +^^^^^^^^^^^^ + +There are several places to obtain help with MNE software tools. + +- The `MNE Forum`_ is a good placed to go for both troubleshooting and general + questions. +- The :ref:`faq` page has some troubleshooting tips, and is a good source of + general information. There are also some troubleshooting tips built into + the :ref:`Python ` and + :ref:`MNE-Python ` installation pages (look for the + |hand-paper| symbols), and some tips related to 3D plotting problems on + :ref:`the advanced setup page `. +- If you want to request new features or if you're confident that you have + found a bug, please create a new issue on the `GitHub issues page`_. + When reporting bugs, please try to replicate the bug with the MNE-Python + :ref:`sample data `, and make every effort to simplify your + example script to only the elements necessary to replicate the bug. + + +.. toctree:: + :hidden: + + learn_python + faq diff --git a/doc/overview/learn_python.rst b/doc/help/learn_python.rst similarity index 97% rename from doc/overview/learn_python.rst rename to doc/help/learn_python.rst index 87328c82891..25d42b3847f 100644 --- a/doc/overview/learn_python.rst +++ b/doc/help/learn_python.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - .. _learn-python: Getting started with Python diff --git a/doc/overview/migrating.rst b/doc/help/migrating.rst similarity index 97% rename from doc/overview/migrating.rst rename to doc/help/migrating.rst index 4fe4ba3ce4f..9aeb945c418 100644 --- a/doc/overview/migrating.rst +++ b/doc/help/migrating.rst @@ -86,6 +86,6 @@ Potential pitfalls - The concept of channel types is critical in MNE because it supports analysis of multimodal data (e.g., EEG, MEG, EOG, Stim channel, etc) whereas most EEGLAB functions assume all channels are of the same type (EEG). To restrict - channels to a single type, see :func:`mne.pick_types`, :meth:`raw.pick_types - `, :meth:`epochs.pick_types `, - :meth:`evoked.pick_types `, etc. + channels to a single type, see :func:`mne.pick_types`, + :meth:`raw.pick() `, :meth:`epochs.pick() ` + and :meth:`evoked.pick() `. diff --git a/doc/index.rst b/doc/index.rst index 12295e60aa5..7629f3d037b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,3 +1,5 @@ +:html_theme.sidebar_secondary.remove: + .. title:: MNE .. The page title must be in rST for it to show in next/prev page buttons. @@ -33,7 +35,7 @@ MNE-Python Homepage :hidden: Install - Documentation - API Reference - Get help - Development + Documentation + API Reference + Get Help + Development diff --git a/doc/install/advanced.rst b/doc/install/advanced.rst index cfb76ebbf38..f1e217403aa 100644 --- a/doc/install/advanced.rst +++ b/doc/install/advanced.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - .. _advanced_setup: Advanced setup @@ -103,49 +101,51 @@ development version of MNE-Python. If you plan to contribute to MNE-Python, or just prefer to use git rather than pip to make frequent updates, there are instructions for installing from a ``git clone`` in the :ref:`contributing`. +Choosing the Qt framework +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``conda-forge`` version of MNE-Python ships with PyQt5. If you would like to +use a different binding, you can instead install MNE-Python via ``pip``: + +.. code-block:: console + + $ pip install "mne[full]" # uses PyQt6 + $ pip install "mne[full-pyqt6]" # same as above + $ pip install "mne[full-pyside6]" # use PySide6 + $ pip install "mne[full-no-qt]" # don't install any Qt binding -.. _other-py-distros: +.. _CUDA: -Other Python distributions +Fixing dock icons on Linux ^^^^^^^^^^^^^^^^^^^^^^^^^^ -While the `Anaconda`_ Python distribution provides many conveniences, other -distributions of Python should also work with MNE-Python. In particular, -`Miniconda`_ is a lightweight alternative to Anaconda that is fully compatible; -like Anaconda, Miniconda includes the ``conda`` command line tool for -installing new packages and managing environments; unlike Anaconda, Miniconda -starts off with a minimal set of around 30 packages instead of Anaconda's -hundreds. See the `installation instructions for Miniconda`_ for more info. -A similar alternative is `MiniForge`_, which uses the ``conda-forge`` channel -as the default source for package installation (saving you the trouble of -typing ``--channel=conda-forge`` with each ``conda install`` command). - -.. warning:: - - If you have the ``PYTHONPATH`` or ``PYTHONHOME`` environment variables set, - you may run into difficulty using Anaconda. See the - `Anaconda troubleshooting guide`_ for more information. Note that it is - easy to switch between ``conda``-managed Python installations and the - system Python installation using the ``conda activate`` and ``conda - deactivate`` commands, so you may find that after adopting Anaconda it is - possible (indeed, preferable) to leave ``PYTHONPATH`` and ``PYTHONHOME`` - permanently unset. - - -It is also possible to use a system-level installation of Python (version -|min_python_version| or higher) and use ``pip`` to install MNE-Python and its -dependencies, using the provided `requirements file`_: +On newer versions of Ubuntu (e.g., 24.04), applications must supply a ``.desktop`` +file associated with them, otherwise a generic icon will be used like: -.. code-block:: console +.. image:: ../_static/default_linux_dock_icon.png + :alt: Default Linux dock icon - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/requirements.txt - $ pip install --user -r requirements.txt +To fix this, you can create a ``.desktop`` file for MNE-Python. Here is an example +file that you can save as ``~/.local/share/applications/mne-python.desktop`` after +fixing the path to the MNE-Python icon, which you can download +`here `__ +if needed: -Other configurations will probably also work, but we may be unable to offer -support if you encounter difficulties related to your particular Python -installation choices. +.. code-block:: ini -.. _CUDA: + [Desktop Entry] + Type=Application + Version=1.5 + Name=MNE-Python + StartupWMClass=MNE-Python + Icon=/path/to/mne-python/mne/icons/mne_default_icon.png + SingleMainWindow=true + NoDisplay=true + +It should make the icon appear correctly in the dock: + +.. image:: ../_static/mne_python_dock_icon.png + :alt: MNE-Python dock icon GPU acceleration with CUDA ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -211,12 +211,20 @@ or by doing :func:`mne.viz.set_3d_options(antialias=False) ` within a given Python session. +Some hardware-accelerated graphics on linux (e.g., some Intel graphics cards) +provide an insufficient implementation of OpenGL, and in those cases it can help to +force software rendering instead with something like: + +.. code-block:: console + + $ export LIBGL_ALWAYS_SOFTWARE=true + Another issue that may come up is that the MESA software itself may be out of date in certain operating systems, for example CentOS. This may lead to incomplete rendering of some 3D plots. A solution is described in this `Github comment `_. It boils down to building a newer version (e.g., 18.3.6) locally following a variant of `these instructions `_. -If you have CentOS 7 or newer, you can also try some `prebuilt binaries `_ we made. +If you have CentOS 7 or newer, you can also try some `prebuilt binaries `_ we made. After downloading the files, untar them and add them to the appropriate library paths using the following commands: @@ -273,10 +281,23 @@ of VTK and/or QT are incompatible. This series of commands should fix it: If you installed VTK using ``pip`` rather than ``conda``, substitute the first line for ``pip uninstall -y vtk``. +3D plotting trouble on Linux +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you are having trouble with 3D plotting on Linux, one possibility is that you +are using Wayland for graphics. To check, you can do: + +.. code-block:: console + + $ echo $XDG_SESSION_TYPE + wayland + +If so, you will need to tell Qt to use X11 instead of Wayland. You can do this +by setting ``export QT_QPA_PLATFORM=xcb`` in your terminal session. To make it +permanent for your logins, you can set it for example in ``~/.profile``. .. LINKS -.. _environment file: https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml .. _`pyvista`: https://docs.pyvista.org/ .. _`X server`: https://en.wikipedia.org/wiki/X_Window_System .. _`xvfb`: https://en.wikipedia.org/wiki/Xvfb diff --git a/doc/install/check_installation.rst b/doc/install/check_installation.rst index 70f97c9c9c1..4a7592a66e8 100644 --- a/doc/install/check_installation.rst +++ b/doc/install/check_installation.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - .. _testing-installation: @@ -18,26 +16,41 @@ a terminal:: This should display some system information along with the versions of MNE-Python and its dependencies. Typical output looks like this:: - Platform: Linux-5.0.0-1031-gcp-x86_64-with-glibc2.2.5 - Python: 3.8.1 (default, Dec 20 2019, 10:06:11) [GCC 7.4.0] - Executable: /home/travis/virtualenv/python3.8.1/bin/python - CPU: x86_64: 2 cores - Memory: 7.8 GB - - mne: 0.21.dev0 - numpy: 1.19.0.dev0+8dfaa4a {blas=openblas, lapack=openblas} - scipy: 1.5.0.dev0+f614064 - matplotlib: 3.2.1 {backend=QtAgg} - - sklearn: 0.22.2.post1 - numba: 0.49.0 - nibabel: 3.1.0 - cupy: Not found - pandas: 1.0.3 - dipy: 1.1.1 - pyvista: 0.25.2 {pyvistaqt=0.1.0} - vtk: 9.0.0 - qtpy: 2.0.1 {PySide6=6.2.4} + Platform Windows-10-10.0.20348-SP0 + Python 3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:34:57) [MSC v.1936 64 bit (AMD64)] + Executable C:\Miniconda3\envs\mne\python.exe + CPU Intel64 Family 6 Model 85 Stepping 7, GenuineIntel (2 cores) + Memory 7.0 GB + + Core + ├☑ mne 1.6.0.dev67+gb12384562 + ├☑ numpy 1.25.2 (OpenBLAS 0.3.23.dev with 1 thread) + ├☑ scipy 1.11.2 + ├☑ matplotlib 3.7.2 (backend=QtAgg) + ├☑ pooch 1.7.0 + └☑ jinja2 3.1.2 + + Numerical (optional) + ├☑ sklearn 1.3.0 + ├☑ nibabel 5.1.0 + ├☑ nilearn 0.10.1 + ├☑ dipy 1.7.0 + ├☑ openmeeg 2.5.6 + ├☑ pandas 2.1.0 + └☐ unavailable numba, cupy + + Visualization (optional) + ├☑ pyvista 0.41.1 (OpenGL 3.3 (Core Profile) Mesa 10.2.4 (git-d92815a) via Gallium 0.4 on llvmpipe (LLVM 3.4, 256 bits)) + ├☑ pyvistaqt 0.0.0 + ├☑ ipyvtklink 0.2.2 + ├☑ vtk 9.2.6 + ├☑ qtpy 2.4.0 (PyQt5=5.15.8) + ├☑ ipympl 0.9.3 + ├☑ pyqtgraph 0.13.3 + └☑ mne-qt-browser 0.5.2 + + Ecosystem (optional) + └☐ unavailable mne-bids, mne-nirs, mne-features, mne-connectivity, mne-icalabel, mne-bids-pipeline .. dropdown:: If you get an error... @@ -71,7 +84,6 @@ troubleshooting installation problems. .. LINKS -.. _environment file: https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml .. _`pyvista`: https://docs.pyvista.org/ .. _`X server`: https://en.wikipedia.org/wiki/X_Window_System .. _`xvfb`: https://en.wikipedia.org/wiki/Xvfb diff --git a/doc/install/ides.rst b/doc/install/ides.rst new file mode 100644 index 00000000000..ff5d28ff381 --- /dev/null +++ b/doc/install/ides.rst @@ -0,0 +1,55 @@ +.. _ide_setup: + +IDE integration (VSCode, Spyder, etc.) +====================================== + +Most users find it convenient to write and run their code in an `Integrated +Development Environment`_ (IDE). Some popular choices for scientific +Python development are: + +- `Visual Studio Code`_ (often shortened to "VS Code" or "vscode") is a + development-focused text editor that supports many programming languages in + addition to Python, includes an integrated terminal console, and has a rich + extension ecosystem. Installing + `Microsoft's Python Extension + `__ is + enough to get most Python users up and running. VS Code is free and + open-source. + +- `Spyder`_ is a free and open-source IDE developed by and for scientists who + use Python. It can be installed via a + `standalone Spyder installer `__. + To avoid dependency conflicts with Spyder, you should install ``mne`` in a + separate environment, as explained in previous sections or using our dedicated + installer. Then, instruct + Spyder to use the MNE-Python interpreter by opening + Spyder and `navigating to `__ + :samp:`Tools > Preferences > Python Interpreter > Use the following interpreter`. + +- `PyCharm`_ is an IDE specifically for Python development that provides an + all-in-one solution (no extension packages needed). PyCharm comes in a + free and open-source Community edition as well as a paid Professional edition. + +For these IDEs, you'll need to provide the path to the Python interpreter you want it +to use. If you're using the MNE-Python installers, on Linux and macOS opening the +**Prompt** will display several lines of information, including a line that will read +something like: + +.. code-block:: output + + Using Python: /some/directory/mne-python_1.10.0_0/bin/python + +Altertatively (or on Windows), you can find that path by opening the Python interpreter +you want to use (e.g., the one from the MNE-Python installer, or a ``conda`` environment +that you have activated) and running:: + + >>> import sys + >>> print(sys.executable) # doctest:+SKIP + +This should print something like +``C:\Program Files\MNE-Python\1.10.0_0\bin\python.exe`` (Windows) or +``/Users/user/Applications/MNE-Python/1.10.0_0/.mne-python/bin/python`` (macOS). + +For Spyder, if the console cannot start because ``spyder-kernels`` is missing, +install the required version in the conda environment. For example, with the +environment you want to use activated, run ``conda install spyder-kernels``. diff --git a/doc/install/index.rst b/doc/install/index.rst index e4f12a112c6..1adbee4df92 100644 --- a/doc/install/index.rst +++ b/doc/install/index.rst @@ -1,3 +1,5 @@ +:html_theme.sidebar_secondary.remove: + .. _install: .. _quick-start: @@ -72,6 +74,7 @@ Installing MNE-Python :hidden: installers + ides manual_install advanced check_installation diff --git a/doc/install/installers.rst b/doc/install/installers.rst index c16e5ef7ec3..e82084cf393 100644 --- a/doc/install/installers.rst +++ b/doc/install/installers.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - .. _installers: MNE-Python installers @@ -7,21 +5,23 @@ MNE-Python installers MNE-Python installers are the easiest way to install MNE-Python and all dependencies. They also provide many additional -Python packages and tools, including the `Spyder`_ development environment. -Got any questions? Let us know on the `MNE Forum`_! +Python packages and tools. Got any questions? Let us know on the `MNE Forum`_! + +Platform-specific installers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. tab-set:: - :class: platform-selector-tabset + :class: install-selector-tabset .. tab-item:: Linux :class-content: text-center - :name: linux-installers + :name: install-linux - .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.3.1/MNE-Python-1.3.1_0-Linux.sh + .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.10.2/MNE-Python-1.10.2_0-Linux.sh :ref-type: ref :color: primary :shadow: - :class: font-weight-bold mt-3 + :class: font-weight-bold mt-3 install-download-button |cloud-arrow-down| |ensp| Download for Linux @@ -31,18 +31,18 @@ Got any questions? Let us know on the `MNE Forum`_! .. code-block:: console - $ sh ./MNE-Python-1.1.1_0-Linux.sh + $ sh ./MNE-Python-1.10.2_0-Linux.sh .. tab-item:: macOS (Intel) :class-content: text-center - :name: macos-intel-installers + :name: install-macos-intel - .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.3.1/MNE-Python-1.3.1_0-macOS_Intel.pkg + .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.10.2/MNE-Python-1.10.2_0-macOS_Intel.pkg :ref-type: ref :color: primary :shadow: - :class: font-weight-bold mt-3 + :class: font-weight-bold mt-3 install-download-button |cloud-arrow-down| |ensp| Download for macOS (Intel) @@ -52,13 +52,13 @@ Got any questions? Let us know on the `MNE Forum`_! .. tab-item:: macOS (Apple Silicon) :class-content: text-center - :name: macos-apple-installers + :name: install-macos-apple - .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.3.1/MNE-Python-1.3.1_0-macOS_M1.pkg + .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.10.2/MNE-Python-1.10.2_0-macOS_M1.pkg :ref-type: ref :color: primary :shadow: - :class: font-weight-bold mt-3 + :class: font-weight-bold mt-3 install-download-button |cloud-arrow-down| |ensp| Download for macOS (Apple Silicon) @@ -68,18 +68,39 @@ Got any questions? Let us know on the `MNE Forum`_! .. tab-item:: Windows :class-content: text-center - :name: windows-installers + :name: install-windows - .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.3.1/MNE-Python-1.3.1_0-Windows.exe + .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.10.2/MNE-Python-1.10.2_0-Windows.exe :ref-type: ref :color: primary :shadow: - :class: font-weight-bold mt-3 + :class: font-weight-bold mt-3 install-download-button |cloud-arrow-down| |ensp| Download for Windows **Supported platforms:** Windows 10 and newer +.. card:: + :class-body: text-center + :class-card: install-download-alert hidden + + .. We have to use a button-link here because button-ref doesn't properly nested parse the inline code + + .. button-link:: ides.html + :ref-type: ref + :color: success + :shadow: + :class: font-weight-bold mt-3 + :click-parent: + + |rocket| Go to IDE Setup + + Once installation completes, **set up your IDE**! + +.. raw:: html + + + First steps ^^^^^^^^^^^ @@ -92,7 +113,7 @@ bundles to the ``Applications`` folder on macOS. .. rst-class:: mt-4 .. rst-class:: mb-5 -- |code| |ensp| Use **Spyder** to start writing your own analysis scripts right away, or to run one of our examples from this website. +- |code| |ensp| Set up **Visual Studio Code** or another IDE (:ref:`instructions here `) to start writing your own analysis scripts right away, or to run one of our examples from this website. .. rst-class:: mt-3 - |desktop| |ensp| With **System Info**, list the versions of all installed MNE-Python-related packages. @@ -107,50 +128,53 @@ bundles to the ``Applications`` folder on macOS. particularly long on Apple Silicon-based computers. Subsequent runs should usually be much faster. +Uninstallation +^^^^^^^^^^^^^^ -VS Code Setup -^^^^^^^^^^^^^ +To remove the MNE-Python distribution provided by our installers above: -If you want to use MNE-Python with `Visual Studio Code`_, you need to tell the -VS Code Python extension where to find the respective Python executable. To do -so, simply start the **Prompt**. It will display several lines of -information, including a line that will read something like: +1. Remove relevant lines from your shell initialization scripts if you + added them at installation time. To do this, you can run from the MNE Prompt: -.. code-block:: + .. code-block:: bash - Using Python: /some/directory/mne-python_1.0.0_1/bin/python + $ conda init --reverse -This path is what you need to enter in VS Code when selecting the Python -interpreter. + Or you can manually edit shell initialization scripts, e.g., ``~/.bashrc`` or + ``~/.bash_profile``. -.. note:: - This information is currently not displayed on the Windows platform. +2. Follow the instructions below to remove the MNE-Python conda installation for your platform: + .. tab-set:: + :class: uninstall-selector-tabset -.. raw:: html + .. tab-item:: Linux + :name: uninstall-linux + + In a BASH terminal you can do: + + .. code-block:: bash + + $ which python + /home/username/mne-python/1.10.2_0/bin/python + $ rm -Rf /home/$USER/mne-python + $ rm /home/$USER/.local/share/applications/mne-python-*.desktop + + .. tab-item:: macOS + :name: uninstall-macos + + You can simply `drag the MNE-Python folder to the trash in the Finder `__. + + Alternatively, you can do something like: + + .. code-block:: bash + + $ which python + /Users/username/Applications/MNE-Python/1.10.2_0/.mne-python/bin/python + $ rm -Rf /Users/$USER/Applications/MNE-Python # if user-specific + $ rm -Rf /Applications/MNE-Python # if system-wide + + .. tab-item:: Windows + :name: uninstall-windows - + To uninstall MNE-Python, you can remove the application using the `Windows Control Panel `__. diff --git a/doc/install/manual_install.rst b/doc/install/manual_install.rst index c4013f84f1f..5a67a3aea68 100644 --- a/doc/install/manual_install.rst +++ b/doc/install/manual_install.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - .. _manual-install: .. _standard-instructions: @@ -11,23 +9,26 @@ Install via :code:`pip` or :code:`conda` instead. MNE-Python requires Python version |min_python_version| or higher. If you -need to install Python, please see :ref:`install-python`. +need help installing Python, please refer to our :ref:`install-python` guide. Installing MNE-Python with all dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We suggest to install MNE-Python into its own ``conda`` environment. +If you use Anaconda, we suggest installing MNE-Python into its own ``conda`` environment. + +First, please ensure you're using a recent version of ``conda``. Run in your terminal: + +.. code-block:: console + + $ conda update --name=base conda # update conda + $ conda --version -The dependency stack is large and may take a long time (several tens of -minutes) to resolve on some systems via the default ``conda`` solver. We -therefore highly recommend using `mamba `__ -instead, a ``conda`` replacement that is **much** faster. +The installed ``conda`` version should be ``23.10.0`` or newer. -Run in your terminal: +Now, you can install MNE-Python: .. code-block:: console - $ conda install --channel=conda-forge --name=base mamba - $ mamba create --override-channels --channel=conda-forge --name=mne mne + $ conda create --channel=conda-forge --strict-channel-priority --name=mne mne This will create a new ``conda`` environment called ``mne`` (you can adjust this by passing a different name via ``--name``) and install all @@ -35,12 +36,12 @@ dependencies into it. If you need to convert structural MRI scans into models of the scalp, inner/outer skull, and cortical surfaces, you will also need -:doc:`FreeSurfer `. +to install :doc:`FreeSurfer `. -Installing a minimal MNE-Python with core functionality only -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you only need MNE-Python's core functionality including 2D plotting (but -**without 3D visualization**), install via :code:`pip`: +Installing MNE-Python with core dependencies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +If you only need MNE-Python's core functionality, which includes 2D plotting +(but does not support 3D visualization), install via :code:`pip`: .. code-block:: console @@ -50,30 +51,41 @@ or via :code:`conda`: .. code-block:: console - $ conda create --strict-channel-priority --channel=conda-forge --name=mne mne-base + $ conda create --channel=conda-forge --strict-channel-priority --name=mne mne-base This will create a new ``conda`` environment called ``mne`` (you can adjust this by passing a different name via ``--name``). -Installing a minimal MNE-Python with EEGLAB I/O support -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you plan to use MNE-Python's functions that require **HDF5 I/O** (this +This minimal installation requires only a few dependencies. If you need additional +functionality later on, you can install individual packages as needed. + +Installing MNE-Python with HDF5 support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +If you plan to use MNE-Python's functions that require +`HDF5 `__ I/O (this includes :func:`mne.io.read_raw_eeglab`, :meth:`mne.SourceMorph.save`, and others), you should run via :code:`pip`: .. code-block:: console - $ pip install mne[hdf5] + $ pip install "mne[hdf5]" -or via :code:`conda` +or via :code:`conda`: .. code-block:: console - $ conda create --strict-channel-priority --channel=conda-forge --name=mne mne-base h5io h5py pymatreader + $ conda create --override-channels --channel=conda-forge --name=mne mne-base h5io h5py pymatreader This will create a new ``conda`` environment called ``mne`` (you can adjust this by passing a different name via ``--name``). +If you have already installed MNE-Python with core dependencies (e.g. via ``pip install mne``), +you can install these two packages to unlock HDF5 support: + +.. code-block:: console + + $ pip install h5io pymatreader + Installing MNE-Python for other scenarios ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :ref:`advanced_setup` page has additional @@ -81,60 +93,3 @@ tips and tricks for special situations (servers, notebooks, CUDA, installing the development version, etc). The :ref:`contributing` has additional installation instructions for (future) contributors to MNE-Python (e.g, extra dependencies for running our tests and building our documentation). - -Python IDEs -=========== - -Most users find it convenient to write and run their code in an `Integrated -Development Environment`_ (IDE). Some popular choices for scientific -Python development are: - -- `Visual Studio Code`_ (often shortened to "VS Code" or "vscode") is a - development-focused text editor that supports many programming languages in - addition to Python, includes an integrated terminal console, and has a rich - ecosystem of packages to extend its capabilities. Installing - `Microsoft's Python Extension - `__ is - enough to get most Python users up and running. VS Code is free and - open-source. -- `Spyder`_ is a free and open-source IDE developed by and for scientists who - use Python. It is included by default in the ``base`` environment when you - install Anaconda, and can be started from a terminal with the command - ``spyder`` (or on Windows or macOS, launched from the Anaconda Navigator GUI). - It can also be installed with `dedicated installers `_. - To avoid dependency conflicts with Spyder, you should install ``mne`` in a - separate environment, like explained in the earlier sections. Then, set - Spyder to use the ``mne`` environment as its default interpreter by opening - Spyder and navigating to - :samp:`Tools > Preferences > Python Interpreter > Use the following interpreter`. - There, paste the output of the following terminal commands - - .. code-block:: console - - $ conda activate mne - $ python -c "import sys; print(sys.executable)" - - It should be something like ``C:\Users\user\anaconda3\envs\mne\python.exe`` - (Windows) or ``/Users/user/opt/anaconda3/envs/mne/bin/python`` (macOS). - - If the Spyder console can not start because ``spyder-kernels`` is missing, - install the required version in the ``mne`` environment with the following - commands in the terminal, where you replace ``...`` with the exact version of - ``spyder-kernels`` that Spyder tells you it requires. - - .. code-block:: console - - $ conda activate mne - $ conda install --strict-channel-priority --channel=conda-forge spyder-kernels=... - - Refer to the `Spyder documentation `_ - for more information about ``spyder-kernels`` and the version matching. - - If the Spyder graphic backend is not set to ``inline`` but to e.g. ``Qt5``, - ``PyQt5`` (``pip``) or ``pyqt`` (``conda``) must be installed in the ``mne`` - environment. - -- `PyCharm`_ is an IDE specifically for Python development that provides an - all-in-one installation (no extension packages needed). PyCharm comes in a - free "community" edition and a paid "professional" edition, and is - closed-source. diff --git a/doc/install/manual_install_python.rst b/doc/install/manual_install_python.rst index fec3eecacda..623b27d2a12 100644 --- a/doc/install/manual_install_python.rst +++ b/doc/install/manual_install_python.rst @@ -1,155 +1,34 @@ :orphan: -.. include:: ../links.inc - .. _install-python: Installing Python -^^^^^^^^^^^^^^^^^ +================= MNE-Python requires Python and several Python packages. MNE-Python -version |version| requires Python version |min_python_version| or higher. We -recommend the `Anaconda`_ distribution of Python, which comes with more than -250 scientific packages pre-bundled and includes the ``conda`` command line -tool for installing new packages and managing different package sets -("environments") for different projects. - -To get started, follow the `installation instructions for Anaconda`_. -When you are done, if you type the following commands in a command shell, -you should see outputs similar to the following (assuming you installed -conda to ``/home/user/anaconda3``): - - -.. tab-set:: - :class: platform-selector-tabset - - .. tab-item:: Linux - :name: linux-manual-install - - .. code-block:: console - - $ conda --version && python --version - conda 4.9.2 - Python 3.8.13 :: Anaconda, Inc. - $ which python - /home/user/anaconda3/bin/python - $ which pip - /home/user/anaconda3/bin/pip - - - .. tab-item:: macOS - :name: macos-manual-install - - .. code-block:: console - - $ conda --version && python --version - conda 4.9.2 - Python 3.8.13 - $ which python - /Users/user/opt/anaconda3/bin/python - $ which pip - /Users/user/opt/anaconda3/bin/pip - - - .. tab-item:: Windows - :name: windows-manual-install - - Most of our instructions start with ``$``, which indicates - that the commands are designed to be run from a ``bash`` command shell. - - Windows command prompts do not expose the same command-line tools as - ``bash`` shells, so commands like ``which`` will not work. You can test - your installation in Windows ``cmd.exe`` shells with ``where`` instead: - - .. code-block:: doscon - - > where python - C:\Users\user\anaconda3\python.exe - > where pip - C:\Users\user\anaconda3\Scripts\pip.exe - - -.. raw:: html - -
- - -.. dropdown:: If you get an error... - :color: danger - :icon: alert-fill - - .. rubric:: If you see something like: - - :: - - conda: command not found - - It means that your ``PATH`` variable (what the system uses to find - programs) is not set properly. In a correct installation, doing:: - - $ echo $PATH - ...:/home/user/anaconda3/bin:... - - Will show the Anaconda binary path (above) somewhere in the output - (probably at or near the beginning), but the ``command not found`` error - suggests that it is missing. - - On Linux or macOS, the installer should have put something - like the following in your ``~/.bashrc`` or ``~/.bash_profile`` (or your - ``.zprofile`` if you're using macOS Catalina or later, where the default - shell is ``zsh``): - - .. code-block:: console - - # >>> conda initialize >>> - # !! Contents within this block are managed by 'conda init' !! - __conda_setup= ... - ... - # <<< conda initialize <<< - - If this is missing, it is possible that you are not on the same shell that - was used during the installation. You can verify which shell you are on by - using the command:: - - $ echo $SHELL - - If you do not find this line in the configuration file for the shell you - are using (bash, zsh, tcsh, etc.), try running:: - - conda init - - in your command shell. If your shell is not ``cmd.exe`` (Windows) or - ``bash`` (Linux, macOS) you will need to pass the name of the shell to the - ``conda init`` command. See ``conda init --help`` for more info and - supported shells. - - You can also consult the Anaconda documentation and search for - Anaconda install tips (`Stack Overflow`_ results are often helpful) - to fix these or other problems when ``conda`` does not work. - - -.. raw:: html - - +version |version| requires Python version |min_python_version| or higher. + +We recommend using a ``conda``-based Python installation, such as +`Anaconda`_, `Miniconda`_, or `Miniforge`_. For new users we recommend +our pre-built :ref:`installers`, which use ``conda`` environments under the hood. + +.. warning:: + Anaconda Inc., the company that develops the Anaconda and Miniconda Python + distributions, + `changed their terms of service `__ + in March of 2024. If you're unsure about whether your usage situation requires a paid + license, we recommend using Miniforge or our pre-built installer instead. These + options, by default, install packages only from the community-maintained `conda-forge`_ + distribution channel, and avoid the distribution channels covered by Anaconda's terms + of service. + +.. _other-py-distros: + +Other Python distributions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +While conda-based CPython distributions provide many conveniences, other types of +installation (``pip`` / ``poetry``, ``venv`` / system-level) and/or other Python +distributions (PyPy) *should* also work with MNE-Python. Generally speaking, if you can +install SciPy, getting MNE-Python to work should be unproblematic. Note however that we +do not offer installation support for anything other than conda-based installations. diff --git a/doc/install/mne_c.rst b/doc/install/mne_c.rst index a5512930af4..10985a0c392 100644 --- a/doc/install/mne_c.rst +++ b/doc/install/mne_c.rst @@ -1,7 +1,5 @@ :orphan: -.. include:: ../links.inc - .. _install_mne_c: Installing MNE-C @@ -161,12 +159,13 @@ effect or you need a faster graphics adapter. Troubleshooting MNE-C installation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If MNE-C can't find ``libxp.so.6``, download libxp6 from debian_ or similar and -install it: +If MNE-C can't find ``libxp.so.6``, you will need to get the package from the original +author (https://launchpad.net/%7Ezeehio/+archive/ubuntu/libxp) to install it: .. code-block:: console - $ sudo dpkg -i libxp6_1.0.2-1ubuntu1_amd64.deb + $ sudo add-apt-repository ppa:zeehio/libxp + $ sudo apt install libxp6 If MNE-C can't find ``libgfortran.so.1``, you can probably safely link that filename to the current version of libfortran that came with your system. On @@ -184,7 +183,7 @@ If you encounter other errors installing MNE-C, please post a message to the .. _MNE-C download page: http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/index.php .. _MATLAB runtime: https://www.mathworks.com/products/compiler/matlab-runtime.html -.. _netpbm: http://netpbm.sourceforge.net/ +.. _netpbm: https://netpbm.sourceforge.net/ .. _MacPorts: https://www.macports.org/ .. _Homebrew: https://brew.sh/ .. _XCode developer tools: https://developer.apple.com/xcode/ diff --git a/doc/install/mne_tools_suite.rst b/doc/install/mne_tools_suite.rst index c9126e5c012..0a37f666634 100644 --- a/doc/install/mne_tools_suite.rst +++ b/doc/install/mne_tools_suite.rst @@ -1,5 +1,3 @@ -.. include:: ../links.inc - Overview of the MNE tools suite =============================== @@ -23,50 +21,25 @@ Related software types like functional near-infrared spectroscopy (fNIRS). MNE-Python is collaboratively developed and has more than 200 contributors. -- :ref:`MNE MATLAB ` provides a MATLAB interface to the .fif file - format and other MNE data structures, and provides example MATLAB +- `MNE-MATLAB`_ provides a MATLAB interface to the .fif + file format and other MNE data structures, and provides example MATLAB implementations of some of the core analysis functionality of MNE-C. It is - distributed alongside MNE-C, and can also be downloaded from the `MNE-MATLAB - git repository`_. + distributed alongside MNE-C, and can also be downloaded from the `MNE-MATLAB`_ GitHub repository. - :ref:`MNE-CPP ` provides core MNE functionality implemented in C++ and is primarily intended for embedded and real-time applications. There is also a growing ecosystem of other Python packages that work alongside -MNE-Python, including packages for: +MNE-Python, including: .. note:: Something missing? :class: sidebar If you know of a package that is related but not listed here, feel free to - :ref:`make a pull request ` to add it to this list. - -- a graphical user interface for MNE-Python (`MNELAB`_) -- easily importing MEG data from the Human Connectome Project for - use with MNE-Python (`MNE-HCP`_) -- managing MNE projects so that they comply with the `Brain - Imaging Data Structure`_ specification (`MNE-BIDS`_) -- automatic bad channel detection and interpolation (`autoreject`_) -- convolutional sparse dictionary learning and waveform shape estimation - (`alphaCSC`_) -- independent component analysis (ICA) with good performance on real data - (`PICARD`_) -- phase-amplitude coupling (`pactools`_) -- representational similarity analysis (`rsa`_) -- microstate analysis (`microstate`_) -- connectivity analysis using dynamic imaging of coherent sources (DICS) - (`conpy`_) -- general-purpose statistical analysis of M/EEG data (`eelbrain`_) -- post-hoc modification of linear models (`posthoc`_) -- a python implementation of the Preprocessing Pipeline (PREP) for EEG data - (`pyprep`_) -- automatic multi-dipole localization and uncertainty quantification with - the Bayesian algorithm SESAME (`sesameeg`_) -- GLM and group level analysis of near-infrared spectroscopy data (`mne-nirs`_) -- High-level EEG Python library for all kinds of EEG inverse solutions (`invertmeeg`_) -- All-Resolutions Inference (ARI) for statistically valid circular inference - and effect localization (`MNE-ARI`_) + to add it to this list by :ref:`making a pull request ` to update + `doc/sphinxext/related_software.py `__. +.. related-software:: What should I install? ^^^^^^^^^^^^^^^^^^^^^^ @@ -94,21 +67,4 @@ Getting help Help with installation is available through the `MNE Forum`_. See the :ref:`help` page for more information. - -.. LINKS: - -.. _MNELAB: https://github.com/cbrnr/mnelab -.. _autoreject: https://autoreject.github.io/ -.. _alphaCSC: https://alphacsc.github.io/ -.. _picard: https://pierreablin.github.io/picard/ -.. _pactools: https://pactools.github.io/ -.. _rsa: https://github.com/wmvanvliet/mne-rsa -.. _microstate: https://github.com/wmvanvliet/mne_microstates -.. _conpy: https://aaltoimaginglanguage.github.io/conpy/ -.. _eelbrain: https://eelbrain.readthedocs.io/en/stable/index.html -.. _posthoc: https://users.aalto.fi/~vanvlm1/posthoc/python/ -.. _pyprep: https://github.com/sappelhoff/pyprep -.. _sesameeg: https://pybees.github.io/sesameeg -.. _mne-nirs: https://github.com/mne-tools/mne-nirs -.. _invertmeeg: https://github.com/LukeTheHecker/invert -.. _MNE-ARI: https://github.com/john-veillette/mne_ari +.. include:: ../links.inc diff --git a/doc/install/updating.rst b/doc/install/updating.rst index 88e843a5e95..c946d5e496e 100644 --- a/doc/install/updating.rst +++ b/doc/install/updating.rst @@ -12,19 +12,14 @@ options, depending on how you originally installed it. If you're not using the MNE-Python installers, keep reading. -.. warning:: - - Before performing package upgrade operations, check to make sure that the - environment you wish to modify has been activated (and if not, call - ``conda activate name_of_environment`` first). - Upgrading MNE-Python only ^^^^^^^^^^^^^^^^^^^^^^^^^ If you wish to update MNE-Python only and leave other packages in their current state, you can usually safely do this with ``pip``, even if you originally -installed via conda. With the ``mne`` environment active, do: +installed via conda. With the ``mne`` environment active +(``conda activate name_of_environment``), do: .. code-block:: console @@ -42,17 +37,18 @@ sure all packages remain compatible with one another during upgrades. Here we'll demonstrate renaming the old environment first, as a safety measure. We'll assume that the existing environment is called ``mne`` and you want to rename the old one so that the new, upgraded environment can be called ``mne`` -instead. Unfortunately ``conda`` doesn't have a "rename" command so we'll first -clone the old one with a new name (``old_mne``), then delete the original, then -create the new, updated environment re-using the original name. In the first -step we'll also use conda in ``--offline`` mode so that it uses cached -copies of all the packages instead of re-downloading them. +instead. + +.. warning:: + + Before running the below command, ensure that your existing MNE conda + environment is **not** activated. Run ``conda deactivate`` if in doubt. .. code-block:: console - $ conda create --name old_mne --clone mne --offline # copy with new name, - $ conda env remove --name mne --all # remove original, - $ conda create --name mne --channel conda-forge mne # replace with updated + $ conda rename --name=mne old_mne # rename existing "mne" env to "old_mne" + +Then, just follow our regular installation instructions, :ref:`manual-install`. .. note:: @@ -81,8 +77,9 @@ Upgrading to the development version Sometimes, new features or bugfixes become available that are important to your research and you just can't wait for the next official release of MNE-Python to start taking advantage of them. In such cases, you can use ``pip`` to install -the *development version* of MNE-Python: +the *development version* of MNE-Python. Ensure to activate the MNE conda +environment first by running ``conda activate mne``. .. code-block:: console - $ pip install -U --no-deps https://github.com/mne-tools/mne-python/archive/main.zip + $ pip install -U --no-deps https://github.com/mne-tools/mne-python/archive/refs/heads/main.zip diff --git a/doc/links.inc b/doc/links.inc index 431a4edabc2..7a6f7afc39f 100644 --- a/doc/links.inc +++ b/doc/links.inc @@ -11,9 +11,7 @@ .. mne -.. _`MNE-Python`: http://mne.tools/mne-python-intro .. _`MNE-Python GitHub`: https://github.com/mne-tools/mne-python -.. _`mne-scripts`: https://github.com/mne-tools/mne-scripts/ .. _`MNE-C manual`: https://mne.tools/mne-c-manual/MNE-manual-2.7.3.pdf .. _`GitHub issues page`: https://github.com/mne-tools/mne-python/issues/ .. _`MNE Forum`: https://mne.discourse.group @@ -21,9 +19,16 @@ .. _`MNE-BIDS-Pipeline`: https://mne.tools/mne-bids-pipeline .. _`MNE-HCP`: http://mne.tools/mne-hcp .. _`MNE-Realtime`: https://mne.tools/mne-realtime -.. _`MNE-MATLAB git repository`: https://github.com/mne-tools/mne-matlab +.. _`MNE-LSL`: https://mne.tools/mne-lsl +.. _`MNE-gui-addons`: https://mne.tools/mne-gui-addons +.. _`MNE-MATLAB`: https://github.com/mne-tools/mne-matlab .. _`MNE-Docker`: https://github.com/mne-tools/mne-docker -.. _OpenMEEG: http://openmeeg.github.io +.. _`MNE-ICAlabel`: https://github.com/mne-tools/mne-icalabel +.. _`MNE-Connectivity`: https://github.com/mne-tools/mne-connectivity +.. _`MNE-NIRS`: https://github.com/mne-tools/mne-nirs +.. _PICARD: https://mind-inria.github.io/picard/ +.. _OpenMEEG: https://openmeeg.github.io +.. _openneuro-py: https://pypi.org/project/openneuro-py .. _EOSS2: https://chanzuckerberg.com/eoss/proposals/improving-usability-of-core-neuroscience-analysis-tools-with-mne-python .. _EOSS4: https://chanzuckerberg.com/eoss/proposals/building-pediatric-and-clinical-data-pipelines-for-mne-python/ .. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md @@ -39,95 +44,6 @@ .. _ft_exch: https://mailman.science.ru.nl/pipermail/fieldtrip/2008-October/001794.html - -.. TEMPLATE FOR LINKS TO OTHER PROJECTS -.. _PROJECTNAME: http://neuroimaging.scipy.org -.. _`PROJECTNAME github`: http://github.com/nipy -.. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel - -.. numpy - -.. _numpy: http://www.numpy.org -.. _`numpy github`: http://github.com/numpy/numpy -.. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion - - -.. scipy - -.. _scipy: http://www.scipy.org -.. _`scipy github`: http://github.com/scipy/scipy -.. _`scipy mailing list`: http://mail.scipy.org/mailman/listinfo/scipy-dev - -.. freesurfer - -.. _freesurfer: https://surfer.nmr.mgh.harvard.edu/ -.. _`freesufer github`: https://github.com/freesurfer/freesurfer -.. _`freesurfer mailing list`: https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurferSupport - - -.. nipy - -.. _nipy: http://nipy.org/nipy -.. _`nipy github`: http://github.com/nipy/nipy -.. _`nipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel - - -.. ipython - -.. _ipython: https://ipython.org -.. _`ipython github`: https://github.com/ipython/ipython -.. _`ipython mailing list`: https://mail.python.org/mailman/listinfo/ipython-dev - - -.. dipy - -.. _dipy: http://nipy.org/dipy -.. _`dipy github`: http://github.com/Garyfallidis/dipy -.. _`dipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel - - -.. nibabel - -.. _nibabel: http://nipy.org/nibabel -.. _`nibabel github`: http://github.com/nipy/nibabel -.. _`nibabel mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel - - -.. h5py - -.. _h5py: http://www.h5py.org -.. _`h5py github`: http://github.com/h5py/h5py -.. _`h5py mailing list`: https://groups.google.com/forum/#!forum/h5py - - -.. Pytest - -.. _pytest: https://docs.pytest.org/ - - -.. Flake8 - -.. _Flake8: http://flake8.pycqa.org/ - - -.. pymatreader - -.. _pymatreader: https://gitlab.com/obob/pymatreader - - -.. h5io - -.. _h5io: https://github.com/h5io/h5io - - -.. CuPy - -.. _CuPy: https://cupy.chainer.org/ - -.. Dask - -.. _Dask: https://dask.org/ - .. git stuff .. _git: https://git-scm.com/ @@ -155,9 +71,19 @@ .. _python: http://www.python.org .. _Brain Imaging Data Structure: https://bids.neuroimaging.io/ +.. _SPEC0: https://scientific-python.org/specs/spec-0000 .. python packages +.. _numpy: http://www.numpy.org +.. _scipy: http://www.scipy.org +.. _freesurfer: https://surfer.nmr.mgh.harvard.edu/ +.. _nipy: http://nipy.org/nipy +.. _h5py: http://www.h5py.org +.. _pymatreader: https://gitlab.com/obob/pymatreader +.. _h5io: https://github.com/h5io/h5io +.. _CuPy: https://cupy.chainer.org/ +.. _Dask: https://dask.org/ .. _pep8: https://pypi.org/project/pep8/ .. _pyflakes: https://pypi.org/project/pyflakes .. _coverage: https://pypi.python.org/pypi/coverage @@ -165,13 +91,13 @@ .. _nitime: http://nipy.org/nitime/ .. _joblib: https://pypi.python.org/pypi/joblib .. _scikit-learn: https://scikit-learn.org/stable/ -.. _pyDICOM: https://pypi.python.org/pypi/pydicom/ .. _matplotlib: https://matplotlib.org/ .. _sphinx: https://www.sphinx-doc.org/ .. _pandas: https://pandas.pydata.org/ .. _PIL: https://pypi.python.org/pypi/PIL .. _tqdm: https://tqdm.github.io/ .. _pooch: https://www.fatiando.org/pooch/latest/ +.. _towncrier: https://towncrier.readthedocs.io/ .. python editors @@ -187,10 +113,10 @@ .. _installation instructions for Anaconda: http://docs.continuum.io/anaconda/install .. _installation instructions for Miniconda: https://conda.io/projects/conda/en/latest/user-guide/install/index.html .. _Anaconda troubleshooting guide: http://conda.pydata.org/docs/troubleshooting.html +.. _conda-forge: https://conda-forge.org .. installation links -.. _requirements file: https://raw.githubusercontent.com/mne-tools/mne-python/main/requirements.txt .. _NVIDIA CUDA GPU processing: https://developer.nvidia.com/cuda-zone .. _NVIDIA proprietary drivers: https://www.geforce.com/drivers @@ -198,5 +124,3 @@ .. _sphinx-gallery: https://sphinx-gallery.github.io .. _NumPy docstring style guidelines: https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard .. _Stack Overflow: https://stackoverflow.com/ - -.. vim: ft=rst diff --git a/doc/martinos.rst b/doc/martinos.rst deleted file mode 100644 index a698ec5eed7..00000000000 --- a/doc/martinos.rst +++ /dev/null @@ -1,37 +0,0 @@ -:orphan: - -.. _inside_martinos: - -Martinos Center setup ---------------------- - -For people within the MGH/MIT/HMS Martinos Center, MNE is available on the network. - -In a terminal do: - -.. code-block:: console - - $ setenv PATH /usr/pubsw/packages/python/anaconda/bin:${PATH} - -If you use Bash replace the previous instruction with: - -.. code-block:: console - - $ export PATH=/usr/pubsw/packages/python/anaconda/bin:${PATH} - -Then start the python interpreter with: - -.. code-block:: console - - $ ipython - -Then type:: - - >>> import mne - -If you get a new prompt with no error messages, you should be good to go. - -We encourage all Martinos center Python users to subscribe to the -`Martinos Python mailing list`_. - -.. _Martinos Python mailing list: https://mail.nmr.mgh.harvard.edu/mailman/listinfo/martinos-python diff --git a/doc/old_versions/index.rst b/doc/old_versions/index.rst new file mode 100644 index 00000000000..abac30dad82 --- /dev/null +++ b/doc/old_versions/index.rst @@ -0,0 +1,18 @@ +Archived documentation for old versions +======================================= + +.. compressed with: zip -r -s 100m 0.20.zip 0.20/ + +Zip archives of old documentation versions are available below. Some have +multiple parts because GitHub has a 100 MB file size limit. + +- `v0.20 part 1 `__ and `part 2 `__ +- `v0.19 part 1 `__ and `part 2 `__ +- `v0.18 `__ +- `v0.17 `__ +- `v0.16 `__ +- `v0.15 `__ +- `v0.14 `__ +- `v0.13 `__ +- `v0.12 `__ +- `v0.11 `__ diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst deleted file mode 100644 index 23827d978d7..00000000000 --- a/doc/overview/datasets_index.rst +++ /dev/null @@ -1,490 +0,0 @@ -.. _datasets: - -Datasets Overview -################# - -.. note:: Contributing datasets to MNE-Python - :class: sidebar - - Do not hesitate to contact MNE-Python developers on the - `MNE Forum `_ to discuss the possibility of - adding more publicly available datasets. - -All the dataset fetchers are available in :mod:`mne.datasets`. To download any of the datasets, -use the ``data_path`` (fetches full dataset) or the ``load_data`` (fetches dataset partially) functions. - -All fetchers will check the default download location first to see if the dataset -is already on your computer, and only download it if necessary. The default -download location is also configurable; see the documentation of any of the -``data_path`` functions for more information. - -.. _sample-dataset: - -Sample -====== -:func:`mne.datasets.sample.data_path` - -These data were acquired with the Neuromag -Vectorview system at MGH/HMS/MIT Athinoula A. Martinos Center Biomedical -Imaging. EEG data from a 60-channel electrode cap was acquired simultaneously with -the MEG. The original MRI data set was acquired with a Siemens 1.5 T -Sonata scanner using an MPRAGE sequence. - -.. note:: These data are provided solely for the purpose of getting familiar - with the MNE software. The data should not be used to evaluate the - performance of the MEG or MRI system employed. - -In this experiment, checkerboard patterns were presented to the subject -into the left and right visual field, interspersed by tones to the -left or right ear. The interval between the stimuli was 750 ms. Occasionally -a smiley face was presented at the center of the visual field. -The subject was asked to press a key with the right index finger -as soon as possible after the appearance of the face. - -.. table:: Trigger codes for the sample data set. - - ========= ===== ========================================== - Name Contents - ========= ===== ========================================== - LA 1 Response to left-ear auditory stimulus - RA 2 Response to right-ear auditory stimulus - LV 3 Response to left visual field stimulus - RV 4 Response to right visual field stimulus - smiley 5 Response to the smiley face - button 32 Response triggered by the button press - ========= ===== ========================================== - -Contents of the data set -^^^^^^^^^^^^^^^^^^^^^^^^ - -The sample data set contains two main directories: ``MEG/sample`` (the MEG/EEG -data) and ``subjects/sample`` (the MRI reconstructions). -In addition to subject ``sample``, the MRI surface reconstructions from another -subject, morph, are provided to demonstrate morphing capabilities. - -.. table:: Contents of the MEG/sample directory. - - ======================== ===================================================================== - File Contents - ======================== ===================================================================== - sample/audvis_raw.fif The raw MEG/EEG data - audvis.ave A template script for off-line averaging - auvis.cov A template script for the computation of a noise-covariance matrix - ======================== ===================================================================== - -.. table:: Overview of the contents of the subjects/sample directory. - - ======================= ====================================================================== - File / directory Contents - ======================= ====================================================================== - bem Directory for the forward modelling data - bem/watershed BEM surface segmentation data computed with the watershed algorithm - bem/inner_skull.surf Inner skull surface for BEM - bem/outer_skull.surf Outer skull surface for BEM - bem/outer_skin.surf Skin surface for BEM - sample-head.fif Skin surface in fif format for mne_analyze visualizations - surf Surface reconstructions - mri/T1 The T1-weighted MRI data employed in visualizations - ======================= ====================================================================== - -The following preprocessing steps have been already accomplished -in the sample data set: - -- The MRI surface reconstructions have - been computed using the FreeSurfer software. - -- The BEM surfaces have been created with the watershed algorithm, - see :ref:`bem_watershed_algorithm`. - -The **sample** dataset is distributed with :ref:`fsaverage` for convenience. - -.. _ucl-opm-auditory-dataset: - -UCL OPM Auditory -================ -:func:`mne.datasets.ucl_opm_auditory.data_path`. - -A basic auditory evoked field experiment using an OPM setup from FIL at UCL. -See :footcite:`SeymourEtAl2022` for details. - -.. topic:: Examples - - * :ref:`tut-opm-processing` - -Brainstorm -========== -Dataset fetchers for three Brainstorm tutorials are available. Users must agree to the -license terms of these datasets before downloading them. These files are recorded in a CTF 275 system -and are provided in native CTF format (.ds files). - -Auditory -^^^^^^^^ -:func:`mne.datasets.brainstorm.bst_raw.data_path`. - -Details about the data can be found at the Brainstorm `auditory dataset tutorial`_. - -.. topic:: Examples - - * :ref:`tut-brainstorm-auditory`: Partially replicates the original Brainstorm tutorial. - -Resting state -^^^^^^^^^^^^^ -:func:`mne.datasets.brainstorm.bst_resting.data_path` - -Details can be found at the Brainstorm `resting state dataset tutorial`_. - -.. topic:: Examples - - * :ref:`mne-connectivity:ex-envelope-correlation` - -Median nerve -^^^^^^^^^^^^ -:func:`mne.datasets.brainstorm.bst_raw.data_path` - -Details can be found at the Brainstorm `median nerve dataset tutorial`_. - -.. topic:: Examples - - * :ref:`ex-brainstorm-raw` - -SPM faces -========= -:func:`mne.datasets.spm_face.data_path` - -The `SPM faces dataset`_ contains EEG, MEG and fMRI recordings on face perception. - -.. topic:: Examples - - * :ref:`ex-spm-faces` Full pipeline including artifact removal, epochs averaging, forward model computation and source reconstruction using dSPM on the contrast: "faces - scrambled". - -EEGBCI motor imagery -==================== -:func:`mne.datasets.eegbci.load_data` - -The EEGBCI dataset is documented in :footcite:`SchalkEtAl2004`. The data set is -available at PhysioNet :footcite:`GoldbergerEtAl2000`. The dataset contains -64-channel EEG recordings from 109 subjects and 14 runs on each subject in EDF+ -format. The recordings were made using the BCI2000 system. To load a subject, -do:: - - from mne.io import concatenate_raws, read_raw_edf - from mne.datasets import eegbci - raw_fnames = eegbci.load_data(subject, runs) - raws = [read_raw_edf(f, preload=True) for f in raw_fnames] - raw = concatenate_raws(raws) - -.. topic:: Examples - - * :ref:`ex-decoding-csp-eeg` - -.. _somato-dataset: - -Somatosensory -============= -:func:`mne.datasets.somato.data_path` - -This dataset contains somatosensory data with event-related synchronizations -(ERS) and desynchronizations (ERD). - -.. topic:: Examples - - * :ref:`tut-sensors-time-freq` - * :ref:`ex-inverse-source-power` - * :ref:`ex-time-freq-global-field-power` - -Multimodal -========== -:func:`mne.datasets.multimodal.data_path` - -This dataset contains a single subject recorded at Otaniemi (Aalto University) -with auditory, visual, and somatosensory stimuli. - -.. topic:: Examples - - * :ref:`ex-io-ave-fiff` - -.. _fnirs-motor-dataset: - -fNIRS motor -=========== -:func:`mne.datasets.fnirs_motor.data_path` - -This dataset contains a single subject recorded at Macquarie University. -It has optodes placed over the motor cortex. There are three conditions: - -- tapping the left thumb to fingers -- tapping the right thumb to fingers -- a control where nothing happens - -The tapping lasts 5 seconds, and there are 30 trials of each condition. - -.. topic:: Examples - - * :ref:`tut-fnirs-processing` - -High frequency SEF -================== -:func:`mne.datasets.hf_sef.data_path()` - -This dataset contains somatosensory evoked fields (median nerve stimulation) -with thousands of epochs. It was recorded with an Elekta TRIUX MEG device at -a sampling frequency of 3 kHz. The dataset is suitable for investigating -high-frequency somatosensory responses. Data from two subjects are included -with MRI images in DICOM format and FreeSurfer reconstructions. - -.. topic:: Examples - - * :ref:`high-frequency SEF responses `. - -Visual 92 object categories -=========================== -:func:`mne.datasets.visual_92_categories.data_path`. - -This dataset is recorded using a 306-channel Neuromag vectorview system. - -Experiment consisted in the visual presentation of 92 images of human, animal -and inanimate objects either natural or artificial :footcite:`CichyEtAl2014`. -Given the high number of conditions this dataset is well adapted to an approach -based on Representational Similarity Analysis (RSA). - -.. topic:: Examples - - * :ref:`Representational Similarity Analysis (RSA) `: Partially replicates the results from :footcite:`CichyEtAl2014`. - - -mTRF Dataset -============ -:func:`mne.datasets.mtrf.data_path`. - -This dataset contains 128 channel EEG as well as natural speech stimulus features, -which is also available `here `_. - -The experiment consisted of subjects listening to natural speech. -The dataset contains several feature representations of the speech stimulus, -suitable for using to fit continuous regression models of neural activity. -More details and a description of the package can be found in -:footcite:`CrosseEtAl2016`. - -.. topic:: Examples - - * :ref:`Receptive Field Estimation and Prediction `: Partially replicates the results from :footcite:`CrosseEtAl2016`. - - -.. _kiloword-dataset: - -Kiloword dataset -================ -:func:`mne.datasets.kiloword.data_path`. - -This dataset consists of averaged EEG data from 75 subjects performing a -lexical decision task on 960 English words :footcite:`DufauEtAl2015`. The words -are richly annotated, and can be used for e.g. multiple regression estimation -of EEG correlates of printed word processing. - - -4D Neuroimaging / BTi dataset -============================= -:func:`mne.datasets.phantom_4dbti.data_path`. - -This dataset was obtained with a phantom on a 4D Neuroimaging / BTi system at -the MEG center in La Timone hospital in Marseille. - -.. topic:: Examples - - * :ref:`tut-phantom-4Dbti` - -OPM -=== -:func:`mne.datasets.opm.data_path` - -OPM data acquired using an Elekta DACQ, simply piping the data into Elekta -magnetometer channels. The FIF files thus appear to come from a TRIUX system -that is only acquiring a small number of magnetometer channels instead of the -whole array. - -The OPM ``coil_type`` is custom, requiring a custom ``coil_def.dat``. -The new ``coil_type`` is 9999. - -OPM co-registration differs a bit from the typical SQUID-MEG workflow. -No ``-trans.fif`` file is needed for the OPMs, the FIF files include proper -sensor locations in MRI coordinates and no digitization of RPA/LPA/Nasion. -Thus the MEG<->Head coordinate transform is taken to be an identity matrix -(i.e., everything is in MRI coordinates), even though this mis-identifies -the head coordinate frame (which is defined by the relationship of the -LPA, RPA, and Nasion). - -Triggers include: - -* Median nerve stimulation: trigger value 257. -* Magnetic trigger (in OPM measurement only): trigger value 260. - 1 second before the median nerve stimulation, a magnetic trigger is piped into the MSR. - This was to be able to check the synchronization between OPMs retrospectively, as each - sensor runs on an independent clock. Synchronization turned out to be satisfactory. - -.. topic:: Examples - - * :ref:`ex-opm-somatosensory` - * :ref:`ex-opm-resting-state` - -The Sleep PolySomnoGraphic Database -=================================== -:func:`mne.datasets.sleep_physionet.age.fetch_data` -:func:`mne.datasets.sleep_physionet.temazepam.fetch_data` - -The sleep PhysioNet database contains 197 whole-night PolySomnoGraphic sleep -recordings, containing EEG, EOG, chin EMG, and event markers. Some records also -contain respiration and body temperature. Corresponding hypnograms (sleep -patterns) were manually scored by well-trained technicians according to the -Rechtschaffen and Kales manual, and are also available. If you use these -data please cite :footcite:`KempEtAl2000` and :footcite:`GoldbergerEtAl2000`. - -.. topic:: Examples - - * :ref:`tut-sleep-stage-classif` - -Reference channel noise MEG data set -==================================== -:func:`mne.datasets.refmeg_noise.data_path`. - -This dataset was obtained with a 4D Neuroimaging / BTi system at -the University Clinic - Erlangen, Germany. There are powerful bursts of -external magnetic noise throughout the recording, which make it a good -example for automatic noise removal techniques. - -.. topic:: Examples - - * :ref:`ex-megnoise_processing` - -Miscellaneous Datasets -====================== -These datasets are used for specific purposes in the documentation and in -general are not useful for separate analyses. - -.. _fsaverage: - -fsaverage -^^^^^^^^^ -:func:`mne.datasets.fetch_fsaverage` - -For convenience, we provide a function to separately download and extract the -(or update an existing) fsaverage subject. - -.. topic:: Examples - - :ref:`tut-eeg-fsaverage-source-modeling` - -Infant template MRIs -^^^^^^^^^^^^^^^^^^^^ -:func:`mne.datasets.fetch_infant_template` - -This function will download an infant template MRI from -:footcite:`OReillyEtAl2021` along with MNE-specific files. - -ECoG Dataset -^^^^^^^^^^^^ -:func:`mne.datasets.misc.data_path`. Data exists at ``/ecog/``. - -This dataset contains a sample electrocorticography (ECoG) dataset. It includes -two grids of electrodes and ten shaft electrodes with simulated motor data (actual data -pending availability). - -.. topic:: Examples - - * :ref:`ex-electrode-pos-2d`: Demonstrates how to project a 3D electrode location onto a 2D image, a common procedure in ECoG analyses. - * :ref:`tut-ieeg-localize`: Demonstrates how to use a graphical user interface to locate electrode contacts as well as warp them to a common atlas. - -sEEG Dataset -^^^^^^^^^^^^ -:func:`mne.datasets.misc.data_path`. Data exists at ``/seeg/``. - -This dataset contains a sample stereoelectroencephalography (sEEG) dataset. -It includes 21 shaft electrodes during a two-choice movement task on a keyboard. - -.. topic:: Examples - - * :ref:`tut-ieeg-localize`: Demonstrates how to use a graphical user interface to locate electrode contacts as well as warp them to a common atlas. - * :ref:`tut-working-with-seeg`: Demonstrates ways to plot sEEG anatomy and results. - -.. _limo-dataset: - -LIMO Dataset -^^^^^^^^^^^^ -:func:`mne.datasets.limo.load_data`. - -In the original LIMO experiment (see :footcite:`RousseletEtAl2010`), participants -performed a -two-alternative forced choice task, discriminating between two face stimuli. -Subjects discriminated the same two faces during the whole experiment. -The critical manipulation consisted of the level of noise added to the -face-stimuli during the task, making the faces more or less discernible to the -observer. - -The presented faces varied across a noise-signal (or phase-coherence) continuum -spanning from 0 to 100% in increasing steps of 10%. In other words, faces with -high phase-coherence (e.g., 90%) were easy to identify, while faces with low -phase-coherence (e.g., 10%) were hard to identify and by extension hard to -discriminate. - -.. topic:: Examples - - * :ref:`Single trial linear regression analysis with the LIMO dataset - `: Explores data from a single subject of the LIMO dataset - and demonstrates how to fit a single trial linear regression using the - information contained in the metadata of the individual datasets. - -.. _erp-core-dataset: - -ERP CORE Dataset -^^^^^^^^^^^^^^^^ -:func:`mne.datasets.erp_core.data_path` - -The original `ERP CORE dataset`_ :footcite:`Kappenman2021` contains data from -40 participants who completed 6 EEG experiments, carefully crafted to evoke -7 well-known event-related potential (ERP) components. - -Currently, the MNE-Python ERP CORE dataset only provides data from one -participant (subject ``001``) of the Flankers paradigm, which elicits the -lateralized readiness potential (LRP) and error-related negativity (ERN). The -data provided is **not** the original data from the ERP CORE dataset, but -rather a slightly modified version, designed to demonstrate the Epochs metadata -functionality. For example, we already set the references and montage -correctly, and stored events as Annotations. Data is provided in ``FIFF`` -format. - -.. topic:: Examples - - * :ref:`tut-autogenerate-metadata`: Learn how to auto-generate - `~mne.Epochs` metadata, and visualize the error-related negativity (ERN) - ERP component. - -.. _ssvep-dataset: - -SSVEP -===== -:func:`mne.datasets.ssvep.data_path` - -This is a simple example dataset with frequency tagged visual stimulation: -N=2 participants observed checkerboards patterns inverting with a constant -frequency of either 12.0 Hz of 15.0 Hz. 10 trials of 20.0 s length each. -32 channels wet EEG was recorded. - -Data format: BrainVision .eeg/.vhdr/.vmrk files organized according to BIDS -standard. - -.. topic:: Examples - - * :ref:`tut-ssvep` - -References -========== - -.. footbibliography:: - - -.. LINKS - -.. _auditory dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetAuditory -.. _resting state dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetResting -.. _median nerve dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf -.. _SPM faces dataset: https://www.fil.ion.ucl.ac.uk/spm/data/mmfaces/ -.. _ERP-CORE dataset: https://erpinfo.org/erp-core diff --git a/doc/overview/development.rst b/doc/overview/development.rst deleted file mode 100644 index 7840b245a7a..00000000000 --- a/doc/overview/development.rst +++ /dev/null @@ -1,35 +0,0 @@ -MNE-Python Development -====================== - -.. NOTE: this first section (up until "overview of contribution process") is - basically a copy/paste of CONTRIBUTING.md from the repository root, with one - sentence deleted to avoid self-referential linking. Changes made here should - be mirrored there, and vice-versa. - -MNE-Python is maintained by a community of scientists and research labs. The -project accepts contributions in the form of bug reports, fixes, feature -additions, and documentation improvements (including typo corrections). The -best way to start contributing is by `opening an issue`_ on our GitHub page to -discuss ideas for changes or enhancements, or to tell us about behavior that -you think might be a bug. For *general troubleshooting* or *usage questions*, -please consider posting your questions on our `MNE Forum`_. - -Users and contributors to MNE-Python are expected to follow our -`code of conduct`_. - -The `contributing guide`_ has details on the preferred contribution workflow -and the recommended system configuration for a smooth contribution/development -experience. - -.. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose -.. _`MNE Forum`: https://mne.discourse.group -.. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md -.. _`contributing guide`: https://mne.tools/dev/install/contributing.html - -.. toctree:: - :hidden: - - ../install/contributing - ../whats_new - roadmap - governance diff --git a/doc/overview/get_help.rst b/doc/overview/get_help.rst deleted file mode 100644 index 1beab4c1046..00000000000 --- a/doc/overview/get_help.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. include:: ../links.inc - -.. _help: - -Getting help -^^^^^^^^^^^^ - -There are several places to obtain help with MNE software tools. - -- The `MNE Forum`_ is a good placed to go for both troubleshooting and general - questions. -- The :ref:`faq` page has some troubleshooting tips, and is a good source of - general information. There are also some troubleshooting tips built into - the :ref:`Python ` and - :ref:`MNE-Python ` installation pages (look for the - |hand-paper| symbols), and some tips related to 3D plotting problems on - :ref:`the advanced setup page `. -- If you want to request new features or if you're confident that you have - found a bug, please create a new issue on the `GitHub issues page`_. - When reporting bugs, please try to replicate the bug with the MNE-Python - :ref:`sample data `, and make every effort to simplify your - example script to only the elements necessary to replicate the bug. - - -.. toctree:: - :hidden: - - learn_python - faq diff --git a/doc/overview/governance.rst b/doc/overview/governance.rst deleted file mode 100644 index f7d81ee8a85..00000000000 --- a/doc/overview/governance.rst +++ /dev/null @@ -1,314 +0,0 @@ -.. _governance: - -================== -Project Governance -================== - -The purpose of this document is to formalize the governance process -used by the MNE-Python project in both ordinary and extraordinary -situations, and to clarify how decisions are made and how the various -elements of our community interact, including the relationship between -open source collaborative development and work that may be funded by -for-profit or non-profit entities. - - -The Project -=========== - -The MNE-Python Project (The Project) is an open source software project. The -goal of The Project is to develop open source software for analysis of -neuroscience data in Python. The Project is released under the BSD (or similar) -open source license, developed openly and is hosted publicly under the -``mne-tools`` GitHub organization. - -The Project is developed by a team of distributed developers, called -Contributors. Contributors are individuals who have contributed code, -documentation, designs, or other work to the Project. Anyone can be a -Contributor. Contributors can be affiliated with any legal entity or -none. Contributors participate in the project by submitting, reviewing, -and discussing GitHub Pull Requests and Issues and participating in open -and public Project discussions on GitHub, Discourse, and other -channels. The foundation of Project participation is openness and -transparency. - -The Project Community consists of all Contributors and Users of the -Project. Contributors work on behalf of and are responsible to the -larger Project Community and we strive to keep the barrier between -Contributors and Users as low as possible. - -The Project is not a legal entity, nor does it currently have any formal -relationships with legal entities. - - -Governance model -================ - -This section describes the governance and leadership model of The -Project. - -The foundations of Project governance are: - -- openness and transparency -- active contribution -- institutional neutrality - - -Traditionally, Project leadership was provided by a subset of Contributors, -informally called Core Developers, whose active and consistent contributions -were rewarded by granting them “commit rights” to the Project GitHub -repositories. In general, all Project decisions are made through consensus among -the Core Developers with input from the Community. - -While this approach has served us well, as the Project grows we see a need for -a more formal governance model. The MNE-Python Core Developers expressed a -preference for a leadership model which includes a BDFL (Benevolent Dictator -for Life). Therefore, moving forward The Project leadership will consist of a -BDFL and Steering Council. - -BDFL ----- - -The Project will have a BDFL (Benevolent Dictator for Life), who is currently -Alexandre Gramfort. As Dictator, the BDFL has the authority to make all final -decisions for The Project. As Benevolent, the BDFL, in practice, chooses to -defer that authority to the consensus of the community discussion channels and -the Steering Council (see below). It is expected, and in the past has been the -case, that the BDFL will only rarely assert their final authority. Because -rarely used, we refer to BDFL’s final authority as a “special” or “overriding” -vote. When it does occur, the BDFL override typically happens in situations -where there is a deadlock in the Steering Council or if the Steering Council -asks the BDFL to make a decision on a specific matter. To ensure the -benevolence of the BDFL, The Project encourages others to fork the project if -they disagree with the overall direction the BDFL is taking. The BDFL may -delegate their authority on a particular decision or set of decisions to -any other Council member at their discretion. - -The BDFL can appoint their successor, but it is expected that the Steering -Council would be consulted on this decision. If the BDFL is unable to appoint a -successor, the Steering Council will make this decision — preferably by -consensus, but if needed, by a majority vote. - -Note that the BDFL can step down at any time, and acting in good faith, will -also listen to serious calls to do so. Also note that the BDFL is more a role -for fallback decision making rather than that of a director/CEO. - -Steering Council ----------------- - -The Project will have a Steering Council that consists of Project Contributors -who have produced contributions that are substantial in quality and quantity, -and sustained over at least one year. The overall role of the Council is to -ensure, through working with the BDFL and taking input from the Community, the -long-term well-being of the project, both technically and as a community. - -During the everyday project activities, Council Members participate in -discussions, code review, and other project activities as peers with all other -Contributors and the Community. In these everyday activities, Council Members -do not have any special power or privilege through their membership on the -Council. However, it is expected that because of the quality and quantity of -their contributions and their expert knowledge of the Project Software and -Services, Council Members will provide useful guidance, both technical and -in terms of project direction, to potentially less experienced contributors. - -The Steering Council and its Members play a special role in certain situations. -In particular, the Council may: - -- Make decisions about the overall scope, vision, and direction of the project. -- Make decisions about strategic collaborations with other organizations or - individuals. -- Make decisions about specific technical issues, features, bugs, and pull - requests. They are the primary mechanism of guiding the code review process - and merging pull requests. -- Make decisions about the Services that are run by The Project and manage - those Services for the benefit of the Project and Community. -- Make decisions when regular community discussion does not produce consensus - on an issue in a reasonable time frame. -- Update policy documents, such as this one. - -Council membership -~~~~~~~~~~~~~~~~~~ - -To become eligible for being a Steering Council Member, an individual must be a -Project Contributor who has produced contributions that are substantial in -quality and quantity, and sustained over at least one year. Potential Council -Members are nominated by existing Council members and voted upon by the -existing Council after asking if the potential Member is interested and willing -to serve in that capacity. The Council will be initially formed from the set of -existing Core Developers who, as of May 2021, have been significantly -active over the last two years. - -When considering potential Members, the Council will look at candidates with a -comprehensive view of their contributions. This will include, but is not limited -to, code, code review, infrastructure work, Discourse participation, -community help/building, education and outreach, design work, etc. We are -deliberately not setting arbitrary quantitative metrics (like “100 commits in -this repo”) to avoid encouraging behavior that plays to the metrics rather than -The Project’s overall well-being. We want to encourage a diverse array of -backgrounds, viewpoints, and talents in our team, which is why we explicitly do -not define code as the sole metric on which council membership will be -evaluated. - -If a Council Member becomes inactive in the project for a period of one year, -they will be considered for removal from the Council. Before removal, inactive -Member will be approached to see if they plan on returning to active -participation. If not, they will be removed immediately upon a Council -vote. If they plan on returning to active participation soon, they will be -given a grace period of one year. If they don’t return to active participation -within that time period they will be removed by vote of the Council without -further grace period. All former Council Members can be considered for -membership again at any time in the future, like any other Project Contributor. -Retired Council Members will be listed on the project website, acknowledging -the period during which they were active in the Council. - -The Council reserves the right to eject current Members, other than the BDFL, -if they are deemed to be actively harmful to the project’s well-being, and -attempts at communication and conflict resolution have failed. - -A list of current Steering Council Members is maintained at the -page :ref:`governance-people`. - -Conflict of interest -~~~~~~~~~~~~~~~~~~~~ - -It is expected that the BDFL and Council Members will be employed at a wide -range of companies, universities, and non-profit organizations. Because of this, -it is possible that Members will have a conflict of interest. Such conflicts of -interest include, but are not limited to: - -- Financial interest, such as investments, employment or contracting work, - outside of The Project that may influence their work on The Project. -- Access to proprietary information of their employer that could potentially - leak into their work with the Project. - -All members of the Council, BDFL included, shall disclose to the rest of the -Council any conflict of interest they may have. Members with a conflict of -interest in a particular issue may participate in Council discussions on that -issue, but must recuse themselves from voting on the issue. If the BDFL has -recused themself for a particular decision, the Council will appoint a -substitute BDFL for that decision. - -Private communications of the Council -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Unless specifically required, all Council discussions and activities will be -public and done in collaboration and discussion with the Project Contributors -and Community. The Council will have a private communication channel that will be used -sparingly and only when a specific matter requires privacy. When private -communications and decisions are needed, the Council will do its best to -summarize those to the Community after removing personal/private/sensitive -information that should not be posted to the public internet. - -Council decision making -~~~~~~~~~~~~~~~~~~~~~~~ - -If it becomes necessary for the Steering Council to produce a formal -decision, then they will use a form of the `Apache Foundation voting -process `_. This is a -formalized version of consensus, in which +1 votes indicate agreement, --1 votes are vetoes (and must be accompanied with a rationale), -and one can also vote fractionally (e.g. -0.5, +0.5) if one -wishes to express an opinion without registering a full veto. These -numeric votes are also often used informally as a way of getting a -general sense of people's feelings on some issue, and should not -normally be taken as formal votes. A formal vote only occurs if -explicitly declared, and if this does occur, then the vote should be held -open for long enough to give all interested Council Members a chance to -respond — at least one week. - -In practice, we anticipate that for most Steering Council decisions -(e.g., voting in new members) a more informal process will suffice. - - -Institutional Partners and funding -================================== - -The Steering Council is the primary leadership for the project. No -outside institution, individual, or legal entity has the ability to own, -control, usurp, or influence the project other than by participating in -the Project as Contributors and Council Members. However, because -institutions can be an important funding mechanism for the project, it -is important to formally acknowledge institutional participation in the -project. These are Institutional Partners. - -An Institutional Contributor is any individual Project Contributor who -contributes to the project as part of their official duties at an -Institutional Partner. Likewise, an Institutional Council Member is any -Project Steering Council Member who contributes to the project as part -of their official duties at an Institutional Partner. - -With these definitions, an Institutional Partner is any recognized legal -entity in any country that employs at least 1 Institutional Contributor or -Institutional Council Member. Institutional Partners can be for-profit or -non-profit entities. - -Institutions become eligible to become an Institutional Partner by -employing individuals who actively contribute to The Project as part of -their official duties. To state this another way, the only way for a -Partner to influence the project is by actively contributing to the open -development of the project, in equal terms to any other member of the -community of Contributors and Council Members. Merely using Project -Software in institutional context does not allow an entity to become an -Institutional Partner. Financial gifts do not enable an entity to become -an Institutional Partner. Once an institution becomes eligible for -Institutional Partnership, the Steering Council must nominate and -approve the Partnership. - -If, at some point, an existing Institutional Partner stops having any -contributing employees, then a one year grace period commences. If, at -the end of this one-year period, they continue not to have any -contributing employees, then their Institutional Partnership will -lapse, and resuming it will require going through the normal process -for new Partnerships. - -An Institutional Partner is free to pursue funding for their work on The -Project through any legal means. This could involve a non-profit -organization raising money from private foundations and donors or a -for-profit company building proprietary products and services that -leverage Project Software and Services. Funding acquired by -Institutional Partners to work on The Project is called Institutional -Funding. However, no funding obtained by an Institutional Partner can -override the Steering Council. If a Partner has funding to do MNE-Python work -and the Council decides to not pursue that work as a project, the -Partner is free to pursue it on their own. However, in this situation, -that part of the Partner’s work will not be under the MNE-Python umbrella and -cannot use the Project trademarks in any way that suggests a formal -relationship. - -Institutional Partner benefits are: - -- optional acknowledgement on the MNE-Python website and in talks -- ability to acknowledge their own funding sources on the MNE-Python - website and in talks -- ability to influence the project through the participation of their - Council Member -- invitation of the Council Members to MNE-Python Developer Meetings - -A list of current Institutional Partners is maintained at the page -:ref:`supporting-institutions`. - - -Document history -================ - -https://github.com/mne-tools/mne-python/commits/main/doc/overview/governance.rst - - -Acknowledgements -================ - -Substantial portions of this document were adapted from the -`SciPy project's governance document -`_, -which in turn was adapted from -`Jupyter/IPython project's governance document -`_ and -`NumPy's governance document -`_. - -License -======= - -To the extent possible under law, the authors have waived all -copyright and related or neighboring rights to the MNE-Python project -governance document, as per the `CC-0 public domain dedication / license -`_. diff --git a/doc/overview/images/Digitizer-example.png b/doc/overview/images/Digitizer-example.png deleted file mode 100644 index 35ca1aa50c7..00000000000 Binary files a/doc/overview/images/Digitizer-example.png and /dev/null differ diff --git a/doc/overview/index.rst b/doc/overview/index.rst deleted file mode 100644 index b7af9b92fb8..00000000000 --- a/doc/overview/index.rst +++ /dev/null @@ -1,63 +0,0 @@ -.. include:: ../links.inc - -.. _documentation_overview: - -Documentation overview -====================== - -.. note:: - - If you haven't already installed MNE-Python, please take a look - at our :ref:`installation guides`. Please also kindly find some - resources for :doc:`learn_python` if you need to. - - -The documentation for MNE-Python is divided into four main sections: - -1. The :doc:`../auto_tutorials/index` provide narrative explanations, sample - code, and expected output for the most common MNE-Python analysis tasks. The - emphasis is on thorough explanations that get new users up to speed quickly, - at the expense of covering only a limited number of topics. - -2. The :doc:`How-to Examples <../auto_examples/index>` provides working code - samples demonstrating various analysis and visualization techniques. These - examples often lack the narrative explanations seen in the tutorials, but - can be a useful way to discover new analysis or plotting ideas, or to see - how a particular technique you've read about can be applied using - MNE-Python. - -3. The :doc:`../glossary` provides short definitions of MNE-Python-specific - vocabulary and general neuroimaging concepts. The glossary is often a good - place to look if you don't understand a term or acronym used somewhere else - in the documentation. - -4. The :doc:`API reference <../python_reference>` provides documentation for - the classes, functions and methods in the MNE-Python codebase. This is the - same information that is rendered when running - :samp:`help(mne.{})` in an interactive Python session, or - when typing :samp:`mne.{}?` in an IPython session or Jupyter - notebook. - -The rest of the MNE-Python documentation pages (parts outside of the four -categories above) are shown in the navigation menu, including the -:ref:`list of example datasets`, -:ref:`implementation details`, and more. -Documentation for the related C and MATLAB tools are available here: - -- :ref:`MNE-MATLAB ` (HTML) -- `MNE-C `_ (PDF) - -.. toctree:: - :hidden: - - Tutorials<../auto_tutorials/index> - Examples<../auto_examples/index> - ../glossary - Implementation details - design_philosophy - Example datasets - Command-line tools<../generated/commands> - migrating - cookbook - cite - ../cited diff --git a/doc/overview/matlab.rst b/doc/overview/matlab.rst deleted file mode 100644 index 7934f07a77d..00000000000 --- a/doc/overview/matlab.rst +++ /dev/null @@ -1,1220 +0,0 @@ -:orphan: - -.. include:: ../links.inc - -.. _mne_matlab: - -======================== -MNE-MATLAB documentation -======================== - -.. note:: The MNE MATLAB Toolbox is compatible with Matlab versions 7.0 or later. - -Overview -######## - -The MNE software contains a collection Matlab ``.m``-files to -facilitate interfacing with binary file formats of the MNE software. -The toolbox is located at ``$MNE_ROOT/share/matlab`` . The -names of the MNE Matlab toolbox functions begin either with ``mne_`` or -with ``fiff_`` . When you source the ``mne_setup`` script -as described in :ref:`user_environment`, one of the following actions -takes place: - -- If you do not have the Matlab startup.m - file, it will be created and lines allowing access to the MNE Matlab - toolbox are added. - -- If you have startup.m and it does not have the standard MNE - Matlab toolbox setup lines, you will be instructed to add them manually. - -- If you have startup.m and the standard MNE Matlab toolbox - setup lines are there, nothing happens. - -A summary of the available routines is provided in the `MNE-C manual`_. The -toolbox also contains a set of examples which may be useful starting points -for your own development. The names of these functions start with ``mne_ex``. - -.. note:: - - The MATLAB function ``fiff_setup_read_raw`` has a significant change. The - sample numbers now take into account possible initial skip in the file, - *i.e.*, the time between the start of the data acquisition and the start of - saving the data to disk. The ``first_samp`` member of the returned structure - indicates the initial skip in samples. If you want your own routines, which - assume that initial skip has been removed, perform identically with the - previous version, subtract ``first_samp`` from the sample numbers you - specify to ``fiff_read_raw_segment``. Furthermore, ``fiff_setup_read_raw`` - has an optional argument to allow reading of unprocessed MaxShield data - acquired with the Elekta MEG systems. - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BGBCGHAG: -.. table:: High-level reading routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_find_evoked | Find all evoked data sets from a file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_bad_channels | Read the bad channel list. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_ctf_comp | Read CTF software gradient compensation data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_evoked | Read evoked-response data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_evoked_all | Read all evoked-response data from a file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_meas_info | Read measurement information. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_mri | Read an MRI description file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_proj | Read signal-space projection data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_raw_segment | Read a segment of raw data with time limits are specified | - | | in samples. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_raw_segment_times | Read a segment of raw data with time limits specified | - | | in seconds. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_setup_read_raw | Set up data structures before using fiff_read_raw_segment | - | | or fiff_read_raw_segment_times. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Channel selection utilities. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_pick_channels | Create a selector to pick desired channels from data | - | | according to include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_channels_evoked | Pick desired channels from evoked-response data according | - | | to include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_info | Modify measurement info to include only selected channels. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_types | Create a selector to pick desired channels from data | - | | according to channel types (MEG, EEG, STIM) in combination | - | | with include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_types_evoked | Pick desired channels from evoked-response data according | - | | to channel types (MEG, EEG, STIM) in combination with | - | | include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Coordinate transformation utilities. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_invert_transform | Invert a coordinate transformation structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_reset_ch_pos | Reset channel position transformation to the default values | - | | present in the file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_transform_eeg_chs | Transform electrode positions to another coordinate frame. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_transform_meg_chs | Apply a coordinate transformation to the sensor location | - | | data to bring the integration points to another coordinate | - | | frame. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Basic reading routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_define_constants | Define a structure which contains the constant relevant | - | | to fif files. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_dir_tree_find | Find nodes of a given type in a directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_list_dir_tree | List a directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_make_dir_tree | Create a directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_open | Open a fif file and create the directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_named_matrix | Read a named matrix from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_tag | Read one tag from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_tag_info | Read the info of one tag from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_split_name_list | Split a colon-separated list of names into a cell array | - | | of strings. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Writing routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_end_block | Write a FIFF_END_BLOCK tag. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_end_file | Write the standard closing. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_start_block | Write a FIFF_START_BLOCK tag. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_start_file | Write the appropriate beginning of a file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_ch_info | Write a channel information structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_coord_trans | Write a coordinate transformation structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_ctf_comp | Write CTF compensation data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_dig_point | Write one digitizer data point. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_complex | Write single-precision complex numbers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_complex_matrix | Write a single-precision complex matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_double | Write double-precision floats. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_double_complex | Write double-precision complex numbers. | - +--------------------------------+--------------------------------------------------------------+ - |fiff_write_double_complex_matrix| Write a double-precision complex matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_double_matrix | Write a double-precision matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_evoked | Write an evoked-reponse data file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_float | Write single-precision floats. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_float_matrix | Write a single-precision matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_id | Write an id tag. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_int | Write 32-bit integers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_int_matrix | Write a matrix of 32-bit integers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_name_list | Write a name list. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_named_matrix | Write a named matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_proj | Write SSP data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_short | Write 16-bit integers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_string | Write a string. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: High-level data writing routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_write_evoked | Write an evoked-response data file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_finish_writing_raw | Write the closing tags to a raw data file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_start_writing_raw | Start writing raw data file, *i.e.*, write the measurement | - | | information. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_dig_file | Write a fif file containing digitization data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_raw_buffer | Write one raw data buffer. This is used after a call to | - | | fiff_start_writing_raw. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Coil definition utilities. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_add_coil_defs | Add coil definitions to an array of channel information | - | | structures. | - +--------------------------------+--------------------------------------------------------------+ - | mne_load_coil_def | Load a coil definition file. | - +--------------------------------+--------------------------------------------------------------+ - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Routines for software gradient compensation and signal-space projection. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_compensate_to | Apply or remove CTF software gradient compensation from | - | | evoked-response data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_get_current_comp | Get the state of software gradient compensation from | - | | measurement info. | - +--------------------------------+--------------------------------------------------------------+ - | mne_make_compensator | Make a compensation matrix which switches the status of | - | | CTF software gradient compensation from one state to another.| - +--------------------------------+--------------------------------------------------------------+ - | mne_make_projector_info | Create a signal-space projection operator with the | - | | projection item definitions and cell arrays of channel names | - | | and bad channel names as input. | - +--------------------------------+--------------------------------------------------------------+ - | mne_make_projector_info | Like mne_make_projector but uses the measurement info | - | | structure as input. | - +--------------------------------+--------------------------------------------------------------+ - | mne_set_current_comp | Change the information about the compensation status in | - | | measurement info. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: High-level routines for reading MNE data files. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_pick_channels_cov | Pick desired channels from a sensor covariance matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_pick_channels_forward | Pick desired channels (rows) from a forward solution. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_bem_surfaces | Read triangular tessellations of surfaces for | - | | boundary-element models. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_cov | Read a covariance matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_epoch | Read an epoch of data from the output file of mne_epochs2mat.| - +--------------------------------+--------------------------------------------------------------+ - | mne_read_events | Read an event list from a fif file produced by | - | | mne_browse_raw or mne_process_raw. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_forward_solution | Read a forward solution from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_inverse_operator | Read an inverse operator from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_morph_map | Read an morphing map produced with mne_make_morph_maps. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_noise_cov | Read a noise-covariance matrix from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_source_spaces | Read source space information from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: High-level routines for writing MNE data files. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_write_cov | Write a covariance matrix to an open file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_cov_file | Write a complete file containing just a covariance matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_events | Write a fif format event file compatible with mne_browse_raw | - | | and mne_process_raw. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_inverse_sol_stc | Write stc files containing an inverse solution or other | - | | dynamic data on the cortical surface. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_inverse_sol_w | Write w files containing an inverse solution or other static | - | | data on the cortical surface. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BABBDDAI: -.. table:: Routines related to stc, w, and label files. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_read_stc_file | Read data from one stc file. The vertex numbering in the | - | | returned structure will start from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_stc_file1 | Read data from one stc file. The vertex numbering in the | - | | returned structure will start from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_w_file | Read data from one w file. The vertex numbering in the | - | | returned structure will start from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_w_file1 | Read data from one w file. The vertex numbering in the | - | | returned structure will start from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_stc_file | Write a new stc file. It is assumed the the vertex numbering | - | | in the input data structure containing the stc information | - | | starts from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_stc_file1 | Write a new stc file. It is assumed the the vertex numbering | - | | in the input data structure containing the stc information | - | | starts from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_w_file | Write a new w file. It is assumed the the vertex numbering | - | | in the input data structure containing the w file | - | | information starts from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_w_file1 | Write a new w file. It is assumed the the vertex numbering | - | | in the input data structure containing the w file | - | | information starts from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_label_file | Read a label file (ROI). | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_label_file | Write a label file (ROI). | - +--------------------------------+--------------------------------------------------------------+ - | mne_label_time_courses | Extract time courses corresponding to a label from an | - | | stc file. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Routines for reading FreeSurfer surfaces. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_read_curvature | Read a curvature file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_surface | Read one surface, return the vertex locations and | - | | triangulation info. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_surfaces | Read surfaces corresponding to one or both hemispheres. | - | | Optionally read curvature information and add derived | - | | surface data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_reduce_surface | Reduce the number of triangles on a surface using the | - | | reducepatch Matlab function. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_surface | Write a FreeSurfer surface file. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BGBEGFBD: -.. table:: Utility functions. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_block_diag | Create a sparse block-diagonal matrix out of a vector. | - +--------------------------------+--------------------------------------------------------------+ - | mne_combine_xyz | Calculate the square sum of the three Cartesian components | - | | of several vectors listed in one row or column vector. | - +--------------------------------+--------------------------------------------------------------+ - | mne_file_name | Compose a file name relative to $MNE_ROOT. | - +--------------------------------+--------------------------------------------------------------+ - | mne_find_channel | Find a channel by name from measurement info. | - +--------------------------------+--------------------------------------------------------------+ - | mne_find_source_space_hemi | Determine whether a given source space belongs to the left | - | | or right hemisphere. | - +--------------------------------+--------------------------------------------------------------+ - | mne_fread3 | Read a three-byte integer. | - +--------------------------------+--------------------------------------------------------------+ - | mne_fwrite3 | Write a three-byte integer. | - +--------------------------------+--------------------------------------------------------------+ - | mne_make_combined_event_file | Combine data from several trigger channels into one event | - | | file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_omit_first_line | Omit first line from a multi-line message. This routine is | - | | useful for formatting error messages. | - +--------------------------------+--------------------------------------------------------------+ - | mne_prepare_inverse_operator | Prepare inverse operator data for calculating L2 | - | | minimum-norm solutions and dSPM. | - +--------------------------------+--------------------------------------------------------------+ - | mne_setup_toolbox | Set up the MNE Matlab toolbox. | - +--------------------------------+--------------------------------------------------------------+ - | mne_transform_coordinates | Transform locations between different coordinate systems. | - | | This function uses the output file from | - | | ``mne_collect_transforms``. | - +--------------------------------+--------------------------------------------------------------+ - | mne_transpose_named_matrix | Create a transpose of a named matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_transform_source_space_to | Transform source space data to another coordinate frame. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BGBEFADJ: -.. table:: Examples demonstrating the use of the toolbox. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_ex_average_epochs | Example of averaging epoch data produced by mne_epochs2mat. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_cancel_noise | Example of noise cancellation procedures. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_compute_inverse | Example of computing a L2 minimum-norm estimate or a dSPM | - | | solution. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_data_sets | Example of listing evoked-response data sets. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_evoked_grad_amp | Compute tangential gradient amplitudes from planar | - | | gradiometer data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_epochs | Read epoch data from a raw data file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_evoked | Example of reading evoked-response data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_raw | Example of reading raw data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_write_raw | Example of processing raw data (read and write). | - +--------------------------------+--------------------------------------------------------------+ - -.. note:: In order for the inverse operator calculation to work correctly with data processed with the Elekta-Neuromag Maxfilter (TM) software, the so-called *processing history* block must be included in data files. Previous versions of the MNE Matlab functions did not copy processing history to files saved. As of March 30, 2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have been enhanced to include these data to the output file as appropriate. If you have older raw data files created in Matlab from input which has been processed Maxfilter, it is necessary to copy the *processing history* block from the original to modified raw data file using the ``mne_copy_processing_history`` utility. The raw data processing programs mne_browse_raw and mne_process_raw have handled copying of the processing history since revision 2.5 of the MNE software. - -Some data structures -#################### - -The MNE Matlab toolbox relies heavily on structures to organize -the data. This section gives detailed information about fields in -the essential data structures employed in the MNE Matlab toolbox. -In the structure definitions, data types referring to other MNE -Matlab toolbox structures are shown in italics. In addition, :ref:`matlab_fif_constants` -lists the values of various FIFF constants defined by fiff_define_constants.m . -The documented structures are: - -**tag** - - Contains one tag from the fif file, see :ref:`BGBGIIGD`. - -**taginfo** - - Contains the information about one tag, see :ref:`BGBBJBJJ`. - -**directory** - - Contains the tag directory as a tree structure, see :ref:`BGBEDHBG`. - -**id** - - A fif ID, see :ref:`BGBDAHHJ`. - -**named matrix** - - Contains a matrix with names for rows and/or columns, see :ref:`BGBBEDID`. - A named matrix is used to store, *e.g.*, SSP vectors and forward solutions. - -**trans** - - A 4 x 4 coordinate-transformation matrix operating on augmented column - vectors. Indication of the coordinate frames to which this transformation - relates is included, see :ref:`BGBDHBIF`. - -**dig** - - A Polhemus digitizer data point, see :ref:`BGBHDEDG`. - -**coildef** - - The coil definition structure useful for forward calculations and array - visualization, see :ref:`BGBGBEBH`. For more detailed information on - coil definitions, see :ref:`coil_geometry_information`. - -**ch** - - Channel information structure, see :ref:`BGBIABGD`. - -**proj** - - Signal-space projection data, see :ref:`BGBCJHJB`. - -**comp** - - Software gradiometer compensation data, see :ref:`BGBJDIFD`. - -**measurement info** - - Translation of the FIFFB_MEAS_INFO entity, see :ref:`BGBFHDIJ` and - :class:`mne.Info`. This data structure is returned by fiff_read_meas_info, - will not be as complete as :class:`mne.Info`. - -**surf** - - Used to represent triangulated surfaces and cortical source spaces, see :ref:`BGBEFJCB`. - -**cov** - - Used for storing covariance matrices, see :ref:`BGBJJIED`. - -**fwd** - - Forward solution data returned by mne_read_forward_solution , - see :ref:`BGBFJIBJ`. - -**inv** - - Inverse operator decomposition data returned by mne_read_inverse_operator. - For more information on inverse operator - decomposition, see :ref:`minimum_norm_estimates`. For an example on how to - compute inverse solution using this data, see the sample routine mne_ex_compute_inverse . - -.. note:: The MNE Matlab toolbox tries it best to employ vertex numbering starting from 1 as opposed to 0 as recorded in the data files. There are, however, two exceptions where explicit attention to the vertex numbering convention is needed. First, the standard stc and w file reading and writing routines return and assume zero-based vertex numbering. There are now versions with names ending with '1', which return and assume one-based vertex numbering, see :ref:`BABBDDAI`. Second, the logno field of the channel information in the data files produced by mne_compute_raw_inverse is the zero-based number of the vertex whose source space signal is contained on this channel. - - -.. tabularcolumns:: |p{0.38\linewidth}|p{0.06\linewidth}|p{0.46\linewidth}| -.. _matlab_fif_constants: -.. table:: FIFF constants. - - +-------------------------------+-------+----------------------------------------------------------+ - | Name | Value | Purpose | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MEG_CH | 1 | This is a MEG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_REF_MEG_CH | 301 | This a reference MEG channel, located far away from the | - | | | head. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_EEF_CH | 2 | This is an EEG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MCG_CH | 201 | This a MCG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_STIM_CH | 3 | This is a digital trigger channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_EOG_CH | 202 | This is an EOG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_EMG_CH | 302 | This is an EMG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ECG_CH | 402 | This is an ECG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MISC_CH | 502 | This is a miscellaneous analog channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_RESP_CH | 602 | This channel contains respiration monitor output. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_UNKNOWN | 0 | Unknown coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_DEVICE | 1 | The MEG device coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_ISOTRAK | 2 | The Polhemus digitizer coordinate frame (does not appear | - | | | in data files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_HPI | 3 | HPI coil coordinate frame (does not appear in data | - | | | files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_HEAD | 4 | The MEG head coordinate frame (Neuromag convention). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_MRI | 5 | The MRI coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_MRI_SLICE | 6 | The coordinate frame of a single MRI slice. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_MRI_DISPLAY | 7 | The preferred coordinate frame for displaying the MRIs | - | | | (used by MRIlab). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_DICOM_DEVICE | 8 | The DICOM coordinate frame (does not appear in files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_IMAGING_DEVICE | 9 | A generic imaging device coordinate frame (does not | - | | | appear in files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_COORD_TUFTS_EEG | 300 | The Tufts EEG data coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_COORD_CTF_DEVICE | 1001 | The CTF device coordinate frame (does not appear in | - | | | files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_COORD_CTF_HEAD | 1004 | The CTF/4D head coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_AVERAGE | 100 | Data aspect: average. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_STD_ERR | 101 | Data aspect: standard error of mean. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_SINGLE | 102 | Single epoch. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_SUBAVERAGE | 103 | One subaverage. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_ALTAVERAGE | 104 | One alternating (plus-minus) subaverage. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_SAMPLE | 105 | A sample cut from raw data. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_POWER_DENSITY | 106 | Power density spectrum. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_DIPOLE_WAVE | 200 | The time course of an equivalent current dipole. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_UNKNOWN | -1 | Unknown BEM surface. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_BRAIN | 1 | The inner skull surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_SKULL | 3 | The outer skull surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_HEAD | 4 | The scalp surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SURF_LEFT_HEMI | 101 | Left hemisphere cortical surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SURF_RIGHT_HEMI | 102 | Right hemisphere cortical surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_CARDINAL | 1 | Digitization point which is a cardinal landmark a.k.a. | - | | | fiducial point | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_HPI | 2 | Digitized HPI coil location | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_EEG | 3 | Digitized EEG electrode location | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_ECG | 3 | Digitized ECG electrode location | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_EXTRA | 4 | Additional head surface point | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_LPA | 1 | Identifier for left auricular landmark | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_NASION | 2 | Identifier for nasion | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_RPA | 3 | Identifier for right auricular landmark | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_FIXED_ORI | 1 | Fixed orientation constraint used in the computation of | - | | | a forward solution. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_FREE_ORI | 2 | No orientation constraint used in the computation of | - | | | a forward solution | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_MEG | 1 | Indicates an inverse operator based on MEG only | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_EEG | 2 | Indicates an inverse operator based on EEG only. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_MEG_EEG | 3 | Indicates an inverse operator based on both MEG and EEG. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_UNKNOWN_COV | 0 | An unknown covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_NOISE_COV | 1 | Indicates a noise covariance matrix. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SENSOR_COV | 1 | Synonym for FIFFV_MNE_NOISE_COV | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SOURCE_COV | 2 | Indicates a source covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_FMRI_PRIOR_COV | 3 | Indicates a covariance matrix associated with fMRI priors| - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SIGNAL_COV | 4 | Indicates the data (signal + noise) covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_DEPTH_PRIOR_COV | 5 | Indicates the depth prior (depth weighting) covariance | - | | | matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_ORIENT_PRIOR_COV | 6 | Indicates the orientation (loose orientation constrain) | - | | | prior covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_NONE | 0 | The nature of this projection item is unknown | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_FIELD | 1 | This is projection item is a generic field pattern or | - | | | field patterns. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_DIP_FIX | 2 | This projection item is the field of one dipole | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_DIP_ROT | 3 | This projection item corresponds to the fields of three | - | | | or two orthogonal dipoles at some location. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_HOMOG_GRAD | 4 | This projection item contains the homogeneous gradient | - | | | fields as seen by the sensor array. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_HOMOG_FIELD | 5 | This projection item contains the three homogeneous field| - | | | components as seen by the sensor array. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_EEG_AVREF | 10 | This projection item corresponds to the average EEG | - | | | reference. | - +-------------------------------+-------+----------------------------------------------------------+ - -.. _BGBGIIGD: - -.. table:: The tag structure. - - ======= =========== ============================================ - Field Data type Description - ======= =========== ============================================ - kind int32 The kind of the data item. - type uint32 The data type used to represent the data. - size int32 Size of the data in bytes. - next int32 Byte offset of the next tag in the file. - data various The data itself. - ======= =========== ============================================ - -.. _BGBBJBJJ: - -.. table:: The taginfo structure. - - ======= =========== ============================================ - Field Data type Description - ======= =========== ============================================ - kind double The kind of the data item. - type double The data type used to represent the data. - size double Size of the data in bytes. - pos double Byte offset to this tag in the file. - ======= =========== ============================================ - -.. _BGBEDHBG: - -.. table:: The directory structure. - - ============ ============ ================================================================ - Field Data type Description - ============ ============ ================================================================ - block double The block id of this directory node. - id id The unique identifier of this node. - parent_id id The unique identifier of the node this node was derived from. - nent double Number of entries in this node. - nchild double Number of children to this node. - dir taginfo Information about tags in this node. - children directory The children of this node. - ============ ============ ================================================================ - -.. _BGBDAHHJ: - -.. table:: The id structure. - - ========== =========== ============================================================ - Field Data type Description - ========== =========== ============================================================ - version int32 The fif file version (major < < 16 | minor). - machid int32(2) Unique identifier of the computer this id was created on. - secs int32 Time since January 1, 1970 (seconds). - usecs int32 Time since January 1, 1970 (microseconds past secs ). - ========== =========== ============================================================ - -.. _BGBBEDID: - -.. table:: The named matrix structure. - - ============ =========== ====================================================================== - Field Data type Description - ============ =========== ====================================================================== - nrow int32 Number of rows. - ncol int32 Number of columns. - row_names cell(*) The names of associated with the rows. This member may be empty. - col_names cell(*) The names of associated with the columns. This member may be empty. - data various The matrix data, usually of type single or double. - ============ =========== ====================================================================== - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBDHBIF: -.. table:: The trans structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | from | int32 | The source coordinate frame, see | - | | | :ref:`matlab_fif_constants`. Look | - | | | for entries starting with FIFFV_COORD or FIFFV_MNE_COORD.| - +---------------------------+-----------+----------------------------------------------------------+ - | to | int32 | The destination coordinate frame. | - +---------------------------+-----------+----------------------------------------------------------+ - | trans |double(4,4)| The 4-by-4 coordinate transformation matrix. This | - | | | operates from augmented position column vectors given in | - | | | *from* coordinates to give results in *to* coordinates. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBHDEDG: -.. table:: The dig structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | kind | int32 | The type of digitizing point. Possible values are listed | - | | | in :ref:`matlab_fif_constants`. Look for entries | - | | | starting with FIFF_POINT. | - +---------------------------+-----------+----------------------------------------------------------+ - | ident | int32 | Identifier for this point. | - +---------------------------+-----------+----------------------------------------------------------+ - | r | single(3) | The location of this point. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBGBEBH: -.. table:: The coildef structure. For more detailed information, see :ref:`coil_geometry_information`. - - +-------------------+-------------------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===================+===================+==========================================================+ - | class | double | The coil (or electrode) class. | - +-------------------+-------------------+----------------------------------------------------------+ - | id | double | The coil (or electrode) id. | - +-------------------+-------------------+----------------------------------------------------------+ - | accuracy | double | Representation accuracy. | - +-------------------+-------------------+----------------------------------------------------------+ - | num_points | double | Number of integration points. | - +-------------------+-------------------+----------------------------------------------------------+ - | size | double | Coil size. | - +-------------------+-------------------+----------------------------------------------------------+ - | baseline | double | Coil baseline. | - +-------------------+-------------------+----------------------------------------------------------+ - | description | char(*) | Coil description. | - +-------------------+-------------------+----------------------------------------------------------+ - | coildefs | double | Each row contains the integration point weight, followed | - | | (num_points,7) | by location [m] and normal. | - +-------------------+-------------------+----------------------------------------------------------+ - | FV | struct | Contains the faces and vertices which can be used to | - | | | draw the coil for visualization. | - +-------------------+-------------------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBIABGD: -.. table:: The ch structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | scanno | int32 | Scanning order number, starting from 1. | - +---------------------------+-----------+----------------------------------------------------------+ - | logno | int32 | Logical channel number, conventions in the usage of this | - | | | number vary. | - +---------------------------+-----------+----------------------------------------------------------+ - | kind | int32 | The channel type (FIFFV_MEG_CH, FIFF_EEG_CH, etc., see | - | | | :ref:`matlab_fif_constants` ). | - +---------------------------+-----------+----------------------------------------------------------+ - | range | double | The hardware-oriented part of the calibration factor. | - | | | This should be only applied to the continuous raw data. | - +---------------------------+-----------+----------------------------------------------------------+ - | cal | double | The calibration factor to bring the channels to physical | - | | | units. | - +---------------------------+-----------+----------------------------------------------------------+ - | loc | double(12)| The channel location. The first three numbers indicate | - | | | the location [m], followed by the three unit vectors of | - | | | the channel-specific coordinate frame. These data contain| - | | | the values saved in the fif file and should not be | - | | | changed. The values are specified in device coordinates | - | | | for MEG and in head coordinates for EEG channels, | - | | | respectively. | - +---------------------------+-----------+----------------------------------------------------------+ - | coil_trans |double(4,4)| Initially, transformation from the channel coordinates | - | | | to device coordinates. This transformation is updated by | - | | | calls to fiff_transform_meg_chs and | - | | | fiff_transform_eeg_chs. | - +---------------------------+-----------+----------------------------------------------------------+ - | eeg_loc | double(6) | The location of the EEG electrode in coord_frame | - | | | coordinates. The first three values contain the location | - | | | of the electrode [m]. If six values are present, the | - | | | remaining ones indicate the location of the reference | - | | | electrode for this channel. | - +---------------------------+-----------+----------------------------------------------------------+ - | coord_frame | int32 | Initially, the coordinate frame is FIFFV_COORD_DEVICE | - | | | for MEG channels and FIFFV_COORD_HEAD for EEG channels. | - +---------------------------+-----------+----------------------------------------------------------+ - | unit | int32 | Unit of measurement. Relevant values are: 201 = T/m, | - | | | 112 = T, 107 = V, and 202 = Am. | - +---------------------------+-----------+----------------------------------------------------------+ - | unit_mul | int32 | The data are given in unit s multiplied by 10unit_mul. | - | | | Presently, unit_mul is always zero. | - +---------------------------+-----------+----------------------------------------------------------+ - | ch_name | char(*) | Name of the channel. | - +---------------------------+-----------+----------------------------------------------------------+ - | coil_def | coildef | The coil definition structure. This is present only if | - | | | mne_add_coil_defs has been successfully called. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBCJHJB: -.. table:: The proj structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | kind | int32 | The type of the projection item. Possible values are | - | | | listed in :ref:`matlab_fif_constants`. Look for entries | - | | | starting with FIFFV_PROJ_ITEM or FIFFV_MNE_PROJ_ITEM. | - +---------------------------+-----------+----------------------------------------------------------+ - | active | int32 | Is this item active, i.e., applied or about to be | - | | | applied to the data. | - +---------------------------+-----------+----------------------------------------------------------+ - | data | named | The projection vectors. The column names indicate the | - | | matrix | names of the channels associated to the elements of the | - | | | vectors. | - +---------------------------+-----------+----------------------------------------------------------+ - - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBJDIFD: -.. table:: The comp structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | ctfkind | int32 | The kind of the compensation as stored in file. | - +---------------------------+-----------+----------------------------------------------------------+ - | kind | int32 | ctfkind mapped into small integer numbers. | - +---------------------------+-----------+----------------------------------------------------------+ - | save_calibrated | logical | Were the compensation data saved in calibrated form. If | - | | | this field is false, the matrix will be decalibrated | - | | | using the fields row_cals and col_cals when the | - | | | compensation data are saved by the toolbox. | - +---------------------------+-----------+----------------------------------------------------------+ - | row_cals | double(*) | Calibration factors applied to the rows of the | - | | | compensation data matrix when the data were read. | - +---------------------------+-----------+----------------------------------------------------------+ - | col_cals | double(*) | Calibration factors applied to the columns of the | - | | | compensation data matrix when the data were read. | - +---------------------------+-----------+----------------------------------------------------------+ - | data | named | The compensation data matrix. The row_names list the | - | | matrix | names of the channels to which this compensation applies | - | | | and the col_names the compensation channels. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBFHDIJ: -.. table:: The meas info structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | file_id | id | The fif ID of the measurement file. | - +---------------------------+-----------+----------------------------------------------------------+ - | meas_id | id | The ID assigned to this measurement by the acquisition | - | | | system or during file conversion. | - +---------------------------+-----------+----------------------------------------------------------+ - | nchan | int32 | Number of channels. | - +---------------------------+-----------+----------------------------------------------------------+ - | sfreq | double | Sampling frequency. | - +---------------------------+-----------+----------------------------------------------------------+ - | highpass | double | Highpass corner frequency [Hz]. Zero indicates a DC | - | | | recording. | - +---------------------------+-----------+----------------------------------------------------------+ - | lowpass | double | Lowpass corner frequency [Hz]. | - +---------------------------+-----------+----------------------------------------------------------+ - | chs | ch(nchan) | An array of channel information structures. | - +---------------------------+-----------+----------------------------------------------------------+ - | ch_names |cell(nchan)| Cell array of channel names. | - +---------------------------+-----------+----------------------------------------------------------+ - | dev_head_t | trans | The device to head transformation. | - +---------------------------+-----------+----------------------------------------------------------+ - | ctf_head_t | trans | The transformation from 4D/CTF head coordinates to | - | | | Neuromag head coordinates. This is only present in | - | | | 4D/CTF data. | - +---------------------------+-----------+----------------------------------------------------------+ - | dev_ctf_t | trans | The transformation from device coordinates to 4D/CTF | - | | | head coordinates. This is only present in 4D/CTF data. | - +---------------------------+-----------+----------------------------------------------------------+ - | dig | dig(*) | The Polhemus digitization data in head coordinates. | - +---------------------------+-----------+----------------------------------------------------------+ - | bads | cell(*) | Bad channel list. | - +---------------------------+-----------+----------------------------------------------------------+ - | projs | proj(*) | SSP operator data. | - +---------------------------+-----------+----------------------------------------------------------+ - | comps | comp(*) | Software gradient compensation data. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBEFJCB: - -.. table:: The surf structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | id | int32 | The surface ID. | - +---------------------------+-----------+----------------------------------------------------------+ - | sigma | double | The electrical conductivity of the compartment bounded by| - | | | this surface. This field is present in BEM surfaces only.| - +---------------------------+-----------+----------------------------------------------------------+ - | np | int32 | Number of vertices on the surface. | - +---------------------------+-----------+----------------------------------------------------------+ - | ntri | int32 | Number of triangles on the surface. | - +---------------------------+-----------+----------------------------------------------------------+ - | coord_frame | int32 | Coordinate frame in which the locations and orientations | - | | | are expressed. | - +---------------------------+-----------+----------------------------------------------------------+ - | rr | double | The vertex locations. | - | | (np,3) | | - +---------------------------+-----------+----------------------------------------------------------+ - | nn | double | The vertex normals. If derived surface data was not | - | | (np,3) | requested, this is empty. | - +---------------------------+-----------+----------------------------------------------------------+ - | tris | int32 | Vertex numbers of the triangles in counterclockwise | - | | (ntri,3) | order as seen from the outside. | - +---------------------------+-----------+----------------------------------------------------------+ - | nuse | int32 | Number of active vertices, *i.e.*, vertices included in | - | | | a decimated source space. | - +---------------------------+-----------+----------------------------------------------------------+ - | inuse | int32(np) | Which vertices are in use. | - +---------------------------+-----------+----------------------------------------------------------+ - | vertno |int32(nuse)| Indices of the vertices in use. | - +---------------------------+-----------+----------------------------------------------------------+ - | curv | double(np)| Curvature values at the vertices. If curvature | - | | | information was not requested, this field is empty or | - | | | absent. | - +---------------------------+-----------+----------------------------------------------------------+ - | tri_area | double | The triangle areas in m2.If derived surface data was not | - | | (ntri) | requested, this field will be missing. | - +---------------------------+-----------+----------------------------------------------------------+ - | tri_cent | double | The triangle centroids. If derived surface data was not | - | | (ntri,3) | requested, this field will be missing. | - +---------------------------+-----------+----------------------------------------------------------+ - | tri_nn | double | The triangle normals. If derived surface data was not | - | | (ntri,3) | requested, this field will be missing. | - +---------------------------+-----------+----------------------------------------------------------+ - | nuse_tri | int32 | Number of triangles in use. This is present only if the | - | | | surface corresponds to a source space created with the | - | | | ``--ico`` option. | - +---------------------------+-----------+----------------------------------------------------------+ - | use_tris | int32 | The vertices of the triangles in use in the complete | - | | (nuse_tri)| triangulation. This is present only if the surface | - | | | corresponds to a source space created with the | - | | | ``--ico`` option. | - +---------------------------+-----------+----------------------------------------------------------+ - | nearest | int32(np) | This field is present only if patch information has been | - | | | computed for a source space. For each vertex in the | - | | | triangulation, these values indicate the nearest active | - | | | source space vertex. | - +---------------------------+-----------+----------------------------------------------------------+ - | nearest_dist | double(np)| This field is present only if patch information has been | - | | | computed for a source space. For each vertex in the | - | | | triangulation, these values indicate the distance to the | - | | | nearest active source space vertex. | - +---------------------------+-----------+----------------------------------------------------------+ - | dist | double | Distances between vertices on this surface given as a | - | | (np,np) | sparse matrix. A zero off-diagonal entry in this matrix | - | | | indicates that the corresponding distance has not been | - | | | calculated. | - +---------------------------+-----------+----------------------------------------------------------+ - | dist_limit | double | The value given to mne_add_patch_info with the ``--dist``| - | | | option. This value is presently | - | | | always negative, indicating that only distances between | - | | | active source space vertices, as indicated by the vertno | - | | | field of this structure, have been calculated. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBJJIED: - -.. table:: The cov structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | kind | double | What kind of a covariance matrix (1 = noise covariance, | - | | | 2 = source covariance). | - +---------------------------+-----------+----------------------------------------------------------+ - | diag | double | Is this a diagonal matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | dim | int32 | Dimension of the covariance matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | names | cell(*) | Names of the channels associated with the entries | - | | | (may be empty). | - +---------------------------+-----------+----------------------------------------------------------+ - | data | double | The covariance matrix. This a double(dim) vector for a | - | | (dim,dim) | diagonal covariance matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | projs | proj(*) | The SSP vectors applied to these data. | - +---------------------------+-----------+----------------------------------------------------------+ - | bads | cell(*) | Bad channel names. | - +---------------------------+-----------+----------------------------------------------------------+ - | nfree | int32 | Number of data points used to compute this matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | eig |double(dim)| The eigenvalues of the covariance matrix. This field may | - | | | be empty for a diagonal covariance matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | eigvec | double | The eigenvectors of the covariance matrix. | - | | (dim,dim) | | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBFJIBJ: - -.. table:: The fwd structure. - - +-------------------------+-------------+----------------------------------------------------------+ - | Field | Data Type | Description | - +=========================+=============+==========================================================+ - | source_ori | int32 | Has the solution been computed for the current component | - | | | normal to the cortex only (1) or all three source | - | | | orientations (2). | - +-------------------------+-------------+----------------------------------------------------------+ - | coord_frame | int32 | Coordinate frame in which the locations and orientations | - | | | are expressed. | - +-------------------------+-------------+----------------------------------------------------------+ - | nsource | int32 | Total number of source space points. | - +-------------------------+-------------+----------------------------------------------------------+ - | nchan | int32 | Number of channels. | - +-------------------------+-------------+----------------------------------------------------------+ - | sol | named | The forward solution matrix. | - | | matrix | | - +-------------------------+-------------+----------------------------------------------------------+ - | sol_grad | named | The derivatives of the forward solution with respect to | - | | matrix | the dipole location coordinates. | - | | | This field is present only if the forward solution was | - | | | computed with the ``--grad`` option in MNE-C. | - +-------------------------+-------------+----------------------------------------------------------+ - | mri_head_t | trans | Transformation from the MRI coordinate frame to the | - | | | (Neuromag) head coordinate frame. | - +-------------------------+-------------+----------------------------------------------------------+ - | src | surf(:) | The description of the source spaces. | - +-------------------------+-------------+----------------------------------------------------------+ - | source_rr | double | The source locations. | - | | (nsource,3) | | - +-------------------------+-------------+----------------------------------------------------------+ - | source_nn | double(:,3) | The source orientations. Number of rows is either | - | | | nsource (fixed source orientations) or 3*nsource | - | | | (all source orientations). | - +-------------------------+-------------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBIEIJE: - -.. table:: The inv structure. Note: The fields proj, whitener, reginv, and noisenorm are filled in by the routine mne_prepare_inverse_operator. - - +---------------------+-------------+----------------------------------------------------------+ - | Field | Data Type | Description | - +=====================+=============+==========================================================+ - | methods | int32 | Has the solution been computed using MEG data (1), EEG | - | | | data (2), or both (3). | - +---------------------+-------------+----------------------------------------------------------+ - | source_ori | int32 | Has the solution been computed for the current component | - | | | normal to the cortex only (1) or all three source | - | | | orientations (2). | - +---------------------+-------------+----------------------------------------------------------+ - | nsource | int32 | Total number of source space points. | - +---------------------+-------------+----------------------------------------------------------+ - | nchan | int32 | Number of channels. | - +---------------------+-------------+----------------------------------------------------------+ - | coord_frame | int32 | Coordinate frame in which the locations and orientations | - | | | are expressed. | - +---------------------+-------------+----------------------------------------------------------+ - | source_nn | double(:,3) | The source orientations. Number of rows is either | - | | | nsource (fixed source orientations) or 3*nsource (all | - | | | source orientations). | - +---------------------+-------------+----------------------------------------------------------+ - | sing | double | The singular values, *i.e.*, the diagonal values of | - | | (nchan) | :math:`\Lambda`, see :ref:`mne_solution`. | - +---------------------+-------------+----------------------------------------------------------+ - | eigen_leads | double | The matrix :math:`V`, see :ref:`mne_solution`. | - | | (:,nchan) | | - +---------------------+-------------+----------------------------------------------------------+ - | eigen_fields | double | The matrix :math:`U^\top`, see | - | | (nchan, | :ref:`mne_solution`. | - | | nchan) | | - +---------------------+-------------+----------------------------------------------------------+ - | noise_cov | cov | The noise covariance matrix :math:`C`. | - +---------------------+-------------+----------------------------------------------------------+ - | source_cov | cov | The source covariance matrix :math:`R`. | - +---------------------+-------------+----------------------------------------------------------+ - | src | surf(:) | The description of the source spaces. | - +---------------------+-------------+----------------------------------------------------------+ - | mri_head_t | trans | Transformation from the MRI coordinate frame to the | - | | | (Neuromag) head coordinate frame. | - +---------------------+-------------+----------------------------------------------------------+ - | nave | double | The number of averages. | - +---------------------+-------------+----------------------------------------------------------+ - | projs | proj(:) | The SSP vectors which were active when the decomposition | - | | | was computed. | - +---------------------+-------------+----------------------------------------------------------+ - | proj | double | The projection operator computed using projs. | - | | (nchan) | | - +---------------------+-------------+----------------------------------------------------------+ - | whitener | | A sparse matrix containing the noise normalization | - | | | factors. Dimension is either nsource (fixed source | - | | | orientations) or 3*nsource (all source orientations). | - +---------------------+-------------+----------------------------------------------------------+ - | reginv | double | The diagonal matrix :math:`\Gamma`, see | - | | (nchan) | :ref:`mne_solution`. | - +---------------------+-------------+----------------------------------------------------------+ - | noisenorm | double(:) | A sparse matrix containing the noise normalization | - | | | factors. Dimension is either nsource (fixed source | - | | | orientations) or 3*nsource (all source orientations). | - +---------------------+-------------+----------------------------------------------------------+ - - -On-line documentation for individual routines -############################################# - -Each of the routines listed in Tables :ref:`BGBCGHAG` - :ref:`BGBEFADJ` has on-line documentation accessible by saying ``help`` <*routine name*> in Matlab. diff --git a/doc/overview/people.rst b/doc/overview/people.rst index 18404e0e56e..f33a0b04d67 100644 --- a/doc/overview/people.rst +++ b/doc/overview/people.rst @@ -1,20 +1,12 @@ :orphan: -.. include:: ../changes/names.inc - .. _governance-people: -Current steering council and institutional partners -=================================================== - -Benevolent Dictator for Life ----------------------------- - -Alexandre Gramfort is the Benevolent Dictator for Life (BDFL) - +Current Project Leadership and Institutional Partners +===================================================== -Steering Council ----------------- +Maintainer Team +--------------- * `Adam Li`_ * `Alex Gramfort`_ @@ -24,22 +16,39 @@ Steering Council * `Daniel McCloy`_ * `Denis Engemann`_ * `Eric Larson`_ -* `Guillaume Favelier`_ -* `Luke Bloy`_ * `Mainak Jas`_ * `Marijn van Vliet`_ +* `Mathieu Scheltienne`_ * `Mikołaj Magnuski`_ * `Richard Höchenberger`_ * `Robert Luke`_ * `Stefan Appelhoff`_ +Steering Council +---------------- + +* `Britta Westner`_ +* `Daniel McCloy`_ (chair) +* `Eric Larson`_ + +Advisory Board +-------------- + +* `Alex Gramfort`_ +* `Bradley Voytek`_ +* `Jan-Mathijs Schoeffelen`_ +* `Liberty Hamilton`_ +* `Matti Hämäläinen`_ +* `Ole Jensen`_ + .. _governance-cpgrl: Community Participation Guidelines Response Leads ------------------------------------------------- -See our `Community Participation Guidelines `__ for explanation. Current Response Leads are: +See our `Community Participation Guidelines `__ +for explanation. Current Response Leads are: * `Daniel McCloy`_ * `Eric Larson`_ diff --git a/doc/overview/roadmap.rst b/doc/overview/roadmap.rst deleted file mode 100644 index 26a301383ce..00000000000 --- a/doc/overview/roadmap.rst +++ /dev/null @@ -1,215 +0,0 @@ -.. include:: ../links.inc - -Roadmap -======= - -This page describes some of the major medium- to long-term goals for -MNE-Python. These are goals that require substantial effort and/or -API design considerations. Some of these may be suitable for Google Summer of -Code projects, while others require more extensive work. - -.. contents:: Page contents - :local: - -Open ----- - -.. _time-frequency-viz: - -Time-frequency visualization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We should implement a viewer for interactive visualization of volumetric -source-time-frequency (5-D) maps on MRI slices (orthogonal 2D viewer). -`NutmegTrip `__ -(written by Sarang Dalal) provides similar functionality in Matlab in -conjunction with FieldTrip. Example of NutmegTrip's source-time-frequency mode -in action (click for link to YouTube): - -.. image:: https://i.ytimg.com/vi/xKdjZZphdNc/maxresdefault.jpg - :target: https://www.youtube.com/watch?v=xKdjZZphdNc - :width: 50% - -Clustering statistics API -^^^^^^^^^^^^^^^^^^^^^^^^^ -The current clustering statistics code has limited functionality. It should be -re-worked to create a new ``cluster_based_statistic`` or similar function. -In particular, the new API should: - -1. Support mixed within- and between-subjects designs, different statistical - functions, etc. This should be done via a ``design`` argument that mirrors - :func:`patsy.dmatrices` or similar community standard (e.g., this is what - is used by :class:`statsmodels.regression.linear_model.OLS`). -2. Have clear tutorials showing how different contrasts can be done (toy data). -3. Have clear tutorials showing some common analyses on real data (time-freq, - sensor space, source space, etc.) -4. Not introduce any significant speed penalty (e.g., < 10% slower) compared - to the existing, more specialized/limited functions. - -More details are in :gh:`4859`. - -Access to open EEG/MEG databases -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We should improve the access to open EEG/MEG databases via the -:mod:`mne.datasets` module, in other words improve our dataset fetchers. -We have physionet, but much more. Having a consistent API to access multiple -data sources would be great. See :gh:`2852` and :gh:`3585` for some ideas, -as well as: - -- `OpenNEURO `__ - "A free and open platform for sharing MRI, MEG, EEG, iEEG, and ECoG data." - See for example :gh:`6687`. -- `Human Connectome Project Datasets `__ - Over a 3-year span (2012-2015), the Human Connectome Project (HCP) scanned - 1,200 healthy adult subjects. The available data includes MR structural - scans, behavioral data and (on a subset of the data) resting state and/or - task MEG data. -- `MMN dataset `__ - Used for tutorial/publications applying DCM for ERP analysis using SPM. -- Kymata datasets - Current and archived EMEG measurement data, used to test hypotheses in the - Kymata atlas. The participants are healthy human adults listening to the - radio and/or watching films, and the data is comprised of (averaged) EEG - and MEG sensor data and source current reconstructions. -- `BNCI Horizon `__ - BCI datasets. - -Integrate OpenMEEG via improved Python bindings -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`OpenMEEG `__ is a state-of-the art solver for -forward modeling in the field of brain imaging with MEG/EEG. It solves -numerically partial differential equations (PDE). It is written in C++ with -Python bindings written in `SWIG `__. -The ambition of the project is to integrate OpenMEEG into MNE offering to MNE -the ability to solve more forward problems (cortical mapping, intracranial -recordings, etc.). Some software tasks that shall be completed: - -- Cleanup Python bindings (remove useless functions, check memory managements, - etc.) -- Write example scripts for OpenMEEG that automatically generate web pages as - for `MNE `__ -- Understand how MNE encodes info about sensors (location, orientation, - integration points etc.) and allow OpenMEEG to be used. -- Help package OpenMEEG for Debian/Ubuntu -- Help manage `the continuous integration system - `__ - - -In progress ------------ - -Diversity, Equity, and Inclusion (DEI) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -MNE-Python is committed to recruiting and retaining a diverse pool of -contributors, see :gh:`8221`. - -First-class OPM support -^^^^^^^^^^^^^^^^^^^^^^^ -MNE-Python has support for reading some OPM data formats such as FIF, but -support is still rudimentary. Support should be added for other manufacturers, -and standard (and/or novel) preprocessing routines should be added to deal with -coregistration adjustment, forward modeling, and OPM-specific artifacts. - -Deep source modeling -^^^^^^^^^^^^^^^^^^^^ -Existing source modeling and inverse routines are not explicitly designed to -deal with deep sources. Advanced algorithms exist from MGH for enhancing -deep source localization, and these should be implemented and vetted in -MNE-Python. - -Better sEEG/ECoG/DBS support -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Some support already exists for iEEG electrodes in MNE-Python thanks in part -to standard abstractions. However, iEEG-specific pipeline steps (e.g., -electrode localization) and visualizations (e.g., per-shaft topo plots, -:ref:`time-frequency-viz`) are missing. MNE-Python should work with members of -the ECoG/sEEG community to work with or build in existing tools, and extend -native functionality for depth electrodes. - -Time-frequency classes -^^^^^^^^^^^^^^^^^^^^^^ -Our current codebase implements classes related to :term:`TFRs ` that -remain incomplete. We should implement new classes from the ground up -that can hold frequency data (``Spectrum``), cross-spectral data -(``CrossSpectrum``), multitaper estimates (``MultitaperSpectrum``), and -time-varying estimates (``Spectrogram``). These should work for -continuous, epoched, and averaged sensor data, as well as source-space brain -data. - -See related issues :gh:`6290`, :gh:`7671`, :gh:`8026`, :gh:`8724`, :gh:`9045`, -and PRs :gh:`6609`, :gh:`6629`, :gh:`6672`, :gh:`6673`, :gh:`8397`, and -:gh:`8892`. - -Pediatric and clinical MEG pipelines -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -MNE-Python is in the process of providing automated analysis of BIDS-compliant -datasets, see `MNE-BIDS-Pipeline`_. By incorporating functionality from the -`mnefun `__ pipeline, -which has been used extensively for pediatric data analysis at `I-LABS`_, -better support for pediatric and clinical data processing can be achieved. -Multiple processing steps (e.g., eSSS), sanity checks (e.g., cHPI quality), -and reporting (e.g., SSP joint plots, SNR plots) will be implemented. - -Statistics efficiency -^^^^^^^^^^^^^^^^^^^^^ -A key technique in functional neuroimaging analysis is clustering brain -activity in adjacent regions prior to statistical analysis. An important -clustering algorithm — threshold-free cluster enhancement (TFCE) — currently -relies on computationally expensive permutations for hypothesis testing. -A faster, probabilistic version of TFCE (pTFCE) is available, and we are in the -process of implementing this new algorithm. - -3D visualization -^^^^^^^^^^^^^^^^ -Historically we have used Mayavi for 3D visualization, but have faced -limitations and challenges with it. We should work to use some other backend -(e.g., PyVista) to get major improvements, such as: - -1. *Proper notebook support (through ipyvtklink)* (complete) -2. *Better interactivity with surface plots* (complete) -3. Time-frequency plotting (complementary to volume-based - :ref:`time-frequency-viz`) -4. Integration of multiple functions as done in ``mne_analyze``, e.g., - simultaneous source estimate viewing, field map - viewing, head surface display, etc. These are all currently available in - separate functions, but we should be able to combine them in a single plot - as well. - -The meta-issue for tracking to-do lists for surface plotting is :gh:`7162`. - -.. _documentation-updates: - -Documentation updates -^^^^^^^^^^^^^^^^^^^^^ -Our documentation has many minor issues, which can be found under the tag -:gh:`labels/DOC`. - - -Completed ---------- - -Distributed computing support -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`MNE-BIDS-Pipeline`_ has been enhanced with support for cloud computing -via `Dask`_ and :doc:`joblib `. -After configuring Dask to use local or remote distributed computing resources, -MNE-BIDS-Pipeline can readily make use of remote workers to parallelize -processing across subjects. - -2D visualization -^^^^^^^^^^^^^^^^ -`This goal `__ -was completed under CZI `EOSS2`_. Some additional enhancements that could also -be implemented are listed in :gh:`7751`. - -Tutorial / example overhaul -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -`This goal `__ -was completed under CZI `EOSS2`_. Ongoing documentation needs are listed in -:ref:`documentation-updates`. - -Cluster computing images -^^^^^^^^^^^^^^^^^^^^^^^^ -As part of `this goal `__, -we created docker images suitable for cloud computing via `MNE-Docker`_. - -.. _I-LABS: http://ilabs.washington.edu/ diff --git a/doc/realtime.rst b/doc/realtime.rst deleted file mode 100644 index 91c027a9e3f..00000000000 --- a/doc/realtime.rst +++ /dev/null @@ -1,5 +0,0 @@ - -Realtime -======== - -Realtime functionality has moved to the standalone module :mod:`mne_realtime`. diff --git a/doc/references.bib b/doc/references.bib index 87a033a97f5..3b7ddf6dc04 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -282,6 +282,18 @@ @article{Cohen2019 year = {2019} } +@article{Cohen2022, +author = {Cohen, Michael X}, +doi = {10.1016/j.neuroimage.2021.118809}, +journal = {NeuroImage}, +pages = {118809}, +title = {A tutorial on generalized eigendecomposition for denoising, contrast enhancement, and dimension reduction in multichannel electrophysiology}, +volume = {247}, +year = {2022}, +issn = {1053-8119}, + +} + @article{CohenHosaka1976, author = {Cohen, David and Hosaka, Hidehiro}, doi = {10.1016/S0022-0736(76)80041-6}, @@ -444,7 +456,7 @@ @book{EfronHastie2016 series = {Institute of {{Mathematical Statistics}} Monographs}, shorttitle = {Computer Age Statistical Inference}, title = {Computer Age Statistical Inference: Algorithms, Evidence, and Data Science}, - url = {https://web.stanford.edu/~hastie/CASI/}, + url = {https://hastie.su.domains/CASI/}, year = {2016} } @@ -1158,6 +1170,17 @@ @misc{Lowry2014 year = {2014} } +@article{LuckEtAl2021, + author = {Luck, Steven J. and Stewart, Andrew X. and Simmons, Aaron M. and Rhemtulla, Mijke}, + journal = {Psychophysiology}, + title = {Standardized Measurement Error: A Universal Metric of Data Quality for Averaged Event-Related Potentials}, + volume = {58}, + number = {6}, + pages = {e13793}, + year = {2021}, + doi = {10.1111/psyp.13793} +} + @article{MaessEtAl2016, author = {Maess, Burkhard and Schröger, Erich and Widmann, Andreas}, doi = {10.1016/j.jneumeth.2015.12.003}, @@ -1190,6 +1213,17 @@ @article{Makeig1993 year = {1993} } +@article{Makela2018, + author = {Mäkelä, Niko and Stenroos, Matti and Sarvas, Jukka and Ilmoniemi, Risto J.}, + doi = {10.1016/j.neuroimage.2017.11.013}, + journal = {Neuroimage}, + number = {}, + pages = {73-83}, + title = {Truncated RAP-MUSIC (TRAP-MUSIC) for MEG and EEG source localization}, + volume = {167}, + year = {2018} +} + @article{MarisOostenveld2007, author = {Maris, Eric and Oostenveld, Robert}, doi = {10.1016/j.jneumeth.2007.03.024}, @@ -1259,6 +1293,17 @@ @article{MosherLeahy1999 year = {1999} } +@inproceedings{MosherLeahy1996, + title = {{EEG} and {MEG} source localization using recursively applied ({RAP}) {MUSIC}}, + doi = {10.1109/ACSSC.1996.599135}, + booktitle = {Conference {Record} of {The} {Thirtieth} {Asilomar} {Conference} on {Signals}, {Systems} and {Computers}}, + author = {Mosher, J.C. and Leahy, R.M.}, + month = nov, + year = {1996}, + note = {ISSN: 1058-6393}, + pages = {1201--1207 vol.2} +} + @inproceedings{MoukademEtAl2014, address = {{Lisbon}}, author = {Moukadem, Ali and Bouguila, Zied and Abdeslam, Djaffar Ould and Dieterlen, Alain}, @@ -1302,6 +1347,16 @@ @inproceedings{NdiayeEtAl2016 year = {2016} } +@article{NiazyEtAl2005, + author = {Niazy, R. K. and Beckmann, C.F. and Iannetti, G.D. and Brady, J. M. and Smith, S. M.}, + title = {Removal of FMRI environment artifacts from EEG data using optimal basis sets}, + journal = {NeuroImage}, + year = {2005}, + volume = {28}, + pages = {720-737}, + doi = {10.1016/j.neuroimage.2005.06.067.} +} + @article{NicholsHolmes2002, author = {Nichols, Thomas E. and Holmes, Andrew P.}, doi = {10.1002/hbm.1058}, @@ -2406,3 +2461,76 @@ @article{SeymourEtAl2022 year = {2022}, pages = {118834} } + +@article{TierneyEtAl2021, + title = {Modelling optically pumped magnetometer interference in MEG as a spatially homogeneous magnetic field}, + volume = {244}, + issn = {1053-8119}, + doi = {10.1016/j.neuroimage.2021.118484}, + language = {en}, + journal = {NeuroImage}, + author = {Tierney, Tim M. and Alexander, Nicholas and Mellor, Stephanie and Holmes, Niall and Seymour, Robert and O'Neill, George C. and Maguire, Eleanor A. and Barnes, Gareth R.}, + year = {2021}, +} + +@article{TierneyEtAl2022, + title = {Spherical harmonic based noise rejection and neuronal sampling with multi-axis OPMs}, + journal = {NeuroImage}, + volume = {258}, + year = {2022}, + issn = {1053-8119}, + doi = {10.1016/j.neuroimage.2022.119338}, + author = {Tierney, Tim M. and Mellor, Stephanie nd O'Neill, George C. and Timms, Ryan C. and Barnes, Gareth R.}, +} + +@article{KumaravelEtAl2022, + doi = {10.3390/s22197314}, + url = {https://doi.org/10.3390/s22197314}, + year = {2022}, + month = sep, + publisher = {{MDPI} {AG}}, + volume = {22}, + number = {19}, + pages = {7314}, + author = {Velu Prabhakar Kumaravel and Marco Buiatti and Eugenio Parise and Elisabetta Farella}, + title = {Adaptable and Robust {EEG} Bad Channel Detection Using Local Outlier Factor ({LOF})}, + journal = {Sensors} +} + +@article{BreunigEtAl2000, + author = {Breunig, Markus M. and Kriegel, Hans-Peter and Ng, Raymond T. and Sander, J\"{o}rg}, + title = {LOF: Identifying Density-Based Local Outliers}, + year = {2000}, + issue_date = {June 2000}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {29}, + number = {2}, + url = {https://doi.org/10.1145/335191.335388}, + doi = {10.1145/335191.335388}, + journal = {SIGMOD Rec.}, + month = {may}, + pages = {93–104}, + numpages = {12}, + keywords = {outlier detection, database mining} +} + +@article{OyamaEtAl2015, + title = {Dry phantom for magnetoencephalography —{Configuration}, calibration, and contribution}, + volume = {251}, + issn = {0165-0270}, + doi = {10.1016/j.jneumeth.2015.05.004}, + journal = {Journal of Neuroscience Methods}, + author = {Oyama, Daisuke and Adachi, Yoshiaki and Yumoto, Masato and Hashimoto, Isao and Uehara, Gen}, + month = aug, + year = {2015}, + pages = {24--36}, +} + +@inproceedings{MellotEtAl2024, + title = {Physics-informed and Unsupervised Riemannian Domain Adaptation for Machine Learning on Heterogeneous EEG Datasets}, + author = {Mellot, Apolline and Collas, Antoine and Chevallier, Sylvain and Engemann, Denis and Gramfort, Alexandre}, + booktitle = {Proceedings of the 32nd European Signal Processing Conference (EUSIPCO)}, + year = {2024}, + address = {Lyon, France} +} diff --git a/doc/sphinxext/_avatar_template.html b/doc/sphinxext/_avatar_template.html new file mode 100644 index 00000000000..a0e086ee85e --- /dev/null +++ b/doc/sphinxext/_avatar_template.html @@ -0,0 +1,7 @@ + + + + + + + diff --git a/doc/sphinxext/contrib_avatars.py b/doc/sphinxext/contrib_avatars.py new file mode 100644 index 00000000000..9741a9eb095 --- /dev/null +++ b/doc/sphinxext/contrib_avatars.py @@ -0,0 +1,50 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +from pathlib import Path + +from mne.utils import _replace_md5 + + +def generate_contrib_avatars(app, config): + """Render a template webpage with avatars generated by JS and a GitHub API call.""" + root = Path(app.srcdir) + infile = root / "sphinxext" / "_avatar_template.html" + outfile = root / "_templates" / "avatars.html.new" + if os.getenv("MNE_ADD_CONTRIBUTOR_IMAGE", "false").lower() != "true": + body = """\ +

Contributor avators will appear here in full doc builds. Set \ +MNE_ADD_CONTRIBUTOR_IMAGE=true in your environment to generate it.

""" + else: + from selenium import webdriver + from selenium.common.exceptions import WebDriverException + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + + try: + options = webdriver.ChromeOptions() + options.add_argument("--headless=new") + driver = webdriver.Chrome(options=options) + except WebDriverException: + options = webdriver.FirefoxOptions() + options.add_argument("-headless") + driver = webdriver.Firefox(options=options) + driver.get(f"file://{infile}") + wait = WebDriverWait(driver, 20) + wait.until(lambda d: d.find_element(by=By.ID, value="contributor-avatars")) + body = driver.find_element(by=By.TAG_NAME, value="body").get_property( + "innerHTML" + ) + assert isinstance(body, str), type(body) + driver.quit() + with open(outfile, "w") as fid: + fid.write(body) + _replace_md5(str(outfile)) + + +def setup(app): + """Set up the Sphinx app.""" + app.connect("config-inited", generate_contrib_avatars) + return diff --git a/doc/sphinxext/credit_tools.py b/doc/sphinxext/credit_tools.py new file mode 100644 index 00000000000..10dfab58680 --- /dev/null +++ b/doc/sphinxext/credit_tools.py @@ -0,0 +1,516 @@ +"""Create code credit RST file. + +Run ./tools/dev/update_credit_json.py first to get the latest PR JSON files. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import glob +import json +import pathlib +import re +from collections import defaultdict +from pathlib import Path + +import numpy as np +import sphinx.util.logging + +import mne +from mne.utils import logger, verbose + +sphinx_logger = sphinx.util.logging.getLogger("mne") + +repo_root = Path(__file__).parents[2] +doc_root = repo_root / "doc" +data_dir = doc_root / "sphinxext" + +# TODO: For contributor names there are three sources of potential truth: +# +# 1. names.inc +# 2. GitHub profile names (that we pull dynamically here) +# 3. commit history / .mailmap. +# +# All three names can mismatch. Currently we try to defer to names.inc since this +# is assumed to have been chosen the most consciously/intentionally by contributors. +# Though it is possible that people can change their preferred names as well, so +# preferring GitHub profile info (when complete!) is probably preferable. + +# Allowed singletons +single_names = "btkcodedev buildqa sviter Akshay user27182 Mojackhak".split() +# Surnames where we have more than one distinct contributor: +name_counts = dict( + Bailey=2, + Das=2, + Drew=2, + Li=2, + Peterson=2, + Wong=2, + Zhang=2, +) +# Exceptions, e.g., abbrevitaions in first/last name or all-caps +exceptions = [ + "T. Wang", + "Ziyi ZENG", +] +# Manual renames +manual_renames = { + "alexandra": "Alexandra Corneyllie", # 7600 + "alexandra.corneyllie": "Alexandra Corneyllie", # 7600 + "akshay0724": "Akshay", # 4046, TODO: Check singleton + "AnneSo": "Anne-Sophie Dubarry", # 4910 + "Basile": "Basile Pinsard", # 1791 + "ChristinaZhao": "Christina Zhao", # 9075 + "Drew, J.": "Jordan Drew", # 10861 + "enzo": "Enzo Altamiranda", # 11351 + "Frostime": "Yiping Zuo", # 11773 + "Gennadiy": "Gennadiy Belonosov", # 11720 + "Genuster": "Gennadiy Belonosov", # 12936 + "GreasyCat": "Rongfei Jin", # 13113 + "Hamid": "Hamid Maymandi", # 10849 + "jwelzel": "Julius Welzel", # 11118 + "Katia": "Katia Al-Amir", # 13225 + "Martin": "Martin Billinger", # 8099, TODO: Check + "Mats": "Mats van Es", # 11068 + "Michael": "Michael Krause", # 3304 + "Naveen": "Naveen Srinivasan", # 10787 + "NoahMarkowitz": "Noah Markowitz", # 12669 + "PAB": "Pierre-Antoine Bannier", # 9430 + "Rob Luke": "Robert Luke", + "Sena": "Sena Er", # 11029 + "TzionaN": "Tziona NessAiver", # 10953 + "Valerii": "Valerii Chirkov", # 9043 + "Wei": "Wei Xu", # 13218 + "Zhenya": "Evgenii Kalenkovich", # 6310, TODO: Check +} + + +def _good_name(name): + if name is None: + return False + assert isinstance(name, str), type(name) + if not name.strip(): + return False + if " " not in name and name not in single_names: # at least two parts + return False + if name not in exceptions and "." in name.split()[0] or "." in name.split()[-1]: + return False + if " " in name and name not in exceptions: + first = name.split()[0] + last = name.split()[-1] + if first == first.upper() or last == last.upper(): # e.g., KING instead of King + return False + return True + + +@verbose +def generate_credit_rst(app=None, *, verbose=False): + """Get the credit RST.""" + sphinx_logger.info("Creating code credit RST inclusion file") + ignores = [ + int(ignore.split("#", maxsplit=1)[1].strip().split()[0][:-1]) + for ignore in (repo_root / ".git-blame-ignore-revs") + .read_text("utf-8") + .splitlines() + if not ignore.strip().startswith("#") and ignore.strip() + ] + ignores = {str(ig): [] for ig in ignores} + + # Use mailmap to help translate emails to names + mailmap = dict() + # mapping from email to name + name_map: dict[str, str] = dict() + for line in (repo_root / ".mailmap").read_text("utf-8").splitlines(): + name = re.match("^([^<]+) <([^<>]+)>", line.strip()).group(1) + assert _good_name(name), repr(name) + emails = list(re.findall("<([^<>]+)>", line.strip())) + assert len(emails) > 0 + new = emails[0] + if new in name_map: + assert name_map[new] == name + else: + name_map[new] = name + if len(emails) == 1: + continue + for old in emails[1:]: + if old in mailmap: + assert new == mailmap[old] # can be different names + else: + mailmap[old] = new + if old in name_map: + assert name_map[old] == name + else: + name_map[old] = name + + unknown_emails: set[str] = set() + + # dict with (name, commit) keys, values are int change counts + # ("commits" is really "PRs" for Python mode) + commits: dict[tuple[str], int] = defaultdict(lambda: 0) + + # dict with filename keys, values are dicts with name keys and +/- ndarrays + stats: dict[str, dict[str, np.ndarray]] = defaultdict( + lambda: defaultdict( + lambda: np.zeros(2, int), + ), + ) + + bad_commits = set() + expected_bad_names = dict() + + for fname in sorted(glob.glob(str(data_dir / "prs" / "*.json"))): + commit = Path(fname).stem # PR number is in the filename + data = json.loads(Path(fname).read_text("utf-8")) + del fname + assert data != {} + authors = data["authors"] + for author in authors: + if ( + author["e"] is not None + and author["e"] not in name_map + and _good_name(author["n"]) + ): + name_map[author["e"]] = author["n"] + for file, counts in data["changes"].items(): + if commit in ignores: + ignores[commit].append([file, commit]) + continue + p, m = counts["a"], counts["d"] + used_authors = set() + for author in authors: + if author["e"] is not None: + if author["e"] not in name_map: + unknown_emails.add( + f"{author['e'].ljust(29)} " + "https://github.com/mne-tools/mne-python/pull/" + f"{commit}/files" + ) + continue + name = name_map[author["e"]] + else: + name = author["n"] + if name in manual_renames: + assert _good_name(manual_renames[name]), ( + f"Bad manual rename: {name}" + ) + name = manual_renames[name] + if " " in name: + first, last = name.rsplit(" ", maxsplit=1) + if last == last.upper() and len(last) > 1: + last = last.capitalize() + if first == first.upper() and len(first) > 1: + first = first.capitalize() + name = f"{first} {last}" + assert not first.upper() == first, f"Bad {name=} from {commit}" + assert _good_name(name), f"Bad {name=} from {commit}" + if "King" in name: + assert name == "Jean-Rémi King", name + + if name is None: + bad_commits.add(commit) + continue + if name in used_authors: + continue + if not _good_name(name) and name not in expected_bad_names: + expected_bad_names[name] = f"{name} from #{commit}" + if author["e"]: + expected_bad_names[name] += f" email {author['e']}" + assert name.strip(), repr(name) + used_authors.add(name) + # treat moves and permission changes like a single-line change + if p == m == 0: + p = 1 + commits[(name, commit)] += p + m + stats[file][name] += [p, m] + if bad_commits: + raise RuntimeError( + "Run:\nrm " + + " ".join(f"{bad}.json" for bad in sorted(bad_commits, key=int)) + ) + + # Check for duplicate names based on last name, and also singleton names. + last_map = defaultdict(lambda: set()) + bad_names = set() + for these_stats in stats.values(): + for name in these_stats: + assert name == name.strip(), f"Un-stripped name: {repr(name)}" + last = name.split()[-1] + first = name.split()[0] + last_map[last].add(name) + name_where = expected_bad_names.get(name, name) + if last == name and name not in single_names: + bad_names.add(f"Singleton: {name_where}") + if "." in last or "." in first and name not in exceptions: + bad_names.add(f"Abbreviation: {name_where}") + bad_names = sorted(bad_names) + for last, names in last_map.items(): + if len(names) > name_counts.get(last, 1): + bad_names.append(f"Duplicates: {sorted(names)}") + if bad_names: + what = ( + "Unexpected possible duplicates or bad names found, " + f"consider modifying {'/'.join(Path(__file__).parts[-3:])}:\n" + ) + raise RuntimeError(what + "\n".join(bad_names)) + + unknown_emails = set( + email + for email in unknown_emails + if "autofix-ci[bot]" not in email + and "pre-commit-ci[bot]" not in email + and "dependabot[bot]" not in email + and "github-actions[bot]" not in email + ) + what = "Unknown emails, consider adding to .mailmap:\n" + assert len(unknown_emails) == 0, what + "\n".join(sorted(unknown_emails)) + + logger.info("Biggest included commits/PRs:") + commits = dict( + (k, commits[k]) + for k in sorted(commits, key=lambda k_: commits[k_], reverse=True) + ) + for ni, name in enumerate(commits, 1): + if ni > 10: + break + logger.info(f"{str(name[1]).ljust(5)} @ {commits[name]:5d} by {name[0]}") + + logger.info("\nIgnored commits:") + # Report the ignores + for commit in ignores: # should have found one of each + logger.info(f"ignored {len(ignores[commit]):3d} files for {commit}") + assert len(ignores[commit]) >= 1, (ignores[commit], commit) + globs = dict() + + # This is the mapping from changed filename globs to module names on the website. + # We need to include aliases for old stuff. Anything we want to exclude we put in + # "null" with a higher priority (i.e., in dict first): + link_overrides = dict() # overrides for links + for key in """ + *.qrc *.png *.svg *.ico *.elc *.sfp *.lout *.lay *.csd *.txt + mne/_version.py mne/externals/* */__init__.py* */resources.py paper.bib + mne/html/*.css mne/html/*.js mne/io/bti/tests/data/* */SHA1SUMS *__init__py + AUTHORS.rst CITATION.cff CONTRIBUTING.rst codemeta.json mne/tests/*.* jr-tools + */whats_new.rst */latest.inc */dev.rst */changelog.rst */manual/* doc/*.json + logo/LICENSE doc/credit.rst + """.strip().split(): + globs[key] = "null" + # Now onto the actual module organization + root_path = pathlib.Path(mne.__file__).parent + mod_file_map = dict() + for file in root_path.iterdir(): + rel = file.relative_to(root_path).with_suffix("") + mod = f"mne.{rel}" + if file.is_dir(): + globs[f"mne/{rel}/*.*"] = mod + globs[f"mne/{rel}.*"] = mod + elif file.is_file() and file.suffix == ".py": + key = f"mne/{rel}.py" + if file.stem == "conftest": + globs[key] = "maintenance" + globs["conftest.py"] = "maintenance" + else: + globs[key] = mod + mod_file_map[mod] = key + globs["mne/artifacts/*.py"] = "mne.preprocessing" + for key in """ + pick.py constants.py info.py fiff/*.* _fiff/*.* raw.py testing.py _hdf5.py + compensator.py + """.strip().split(): + globs[f"mne/{key}"] = "mne.io" + for key in ("mne/transforms/*.py", "mne/_freesurfer.py"): + globs[key] = "mne.transforms" + globs["mne/mixed_norm/*.py"] = "mne.inverse_sparse" + globs["mne/__main__.py"] = "mne.commands" + globs["bin/*"] = "mne.commands" + globs["mne/morph_map.py"] = "mne.surface" + globs["mne/baseline.py"] = "mne.epochs" + for key in """ + parallel.py rank.py misc.py data/*.* defaults.py fixes.py icons/*.* icons.* + """.strip().split(): + globs[f"mne/{key}"] = "mne.utils" + for key in ("mne/_ola.py", "mne/cuda.py"): + globs[key] = "mne.filter" + for key in """ + *digitization/*.py layouts/*.py montages/*.py selection.py + """.strip().split(): + globs[f"mne/{key}"] = "mne.channels" + globs["mne/sparse_learning/*.py"] = "mne.inverse_sparse" + globs["mne/csp.py"] = "mne.preprocessing" + globs["mne/bem_surfaces.py"] = "mne.bem" + globs["mne/coreg/*.py"] = "mne.coreg" + globs["mne/inverse.py"] = "mne.minimum_norm" + globs["mne/stc.py"] = "mne.source_estimate" + globs["mne/surfer.py"] = "mne.viz" + globs["mne/tfr.py"] = "mne.time_frequency" + globs["mne/connectivity/*.py"] = "mne-connectivity (moved)" + link_overrides["mne-connectivity (moved)"] = "mne-tools/mne-connectivity" + globs["mne/realtime/*.py"] = "mne-realtime (moved)" + link_overrides["mne-realtime (moved)"] = "mne-tools/mne-realtime" + globs["mne/html_templates/*.*"] = "mne.report" + globs[".circleci/*"] = "maintenance" + link_overrides["maintenance"] = "mne-tools/mne-python" + globs["tools/*"] = "maintenance" + globs["doc/*"] = "doc" + for key in ("*.py", "*.rst"): + for mod in ("examples", "tutorials", "doc"): + globs[f"{mod}/{key}"] = mod + for key in """ + *.yml *.md setup.* MANIFEST.in Makefile README.rst flow_diagram.py *.toml + debian/* logo/*.py *.git* .pre-commit-config.yaml .mailmap .coveragerc make/* + """.strip().split(): + globs[key] = "maintenance" + + mod_stats = defaultdict(lambda: defaultdict(lambda: np.zeros(2, int))) + other_files = set() + total_lines = np.zeros(2, int) + for fname, counts in stats.items(): + for pattern, mod in globs.items(): + if glob.fnmatch.fnmatch(fname, pattern): + break + else: + other_files.add(fname) + mod = "other" + for e, pm in counts.items(): + if mod == "mne._fiff": + raise RuntimeError + # sanity check a bit + if mod != "null" and (".png" in fname or "/manual/" in fname): + raise RuntimeError(f"Unexpected {mod} {fname}") + mod_stats[mod][e] += pm + mod_stats["mne"][e] += pm + total_lines += pm + mod_stats.pop("null") # stuff we shouldn't give credit for + mod_stats = dict( + (k, mod_stats[k]) + for k in sorted( + mod_stats, + key=lambda x: ( + not x.startswith("mne"), + x == "maintenance", + x.replace("-", "."), + ), + ) + ) # sort modules alphabetically + other_files = sorted(other_files) + if len(other_files): + raise RuntimeError( + f"{len(other_files)} misc file(s) found:\n" + "\n".join(other_files) + ) + logger.info(f"\nTotal line change count: {list(map(int, total_lines))}") + + # sphinx-design badges that we use for contributors + BADGE_KINDS = ["bdg-info-line", "bdg"] + content = f"""\ +.. THIS FILE IS AUTO-GENERATED BY {Path(__file__).stem} AND WILL BE OVERWRITTEN + +.. raw:: html + + + +.. _code_credit: + +Code credit +=========== + +Below are lists of code contributors to MNE-Python. The numbers in parentheses are the +number of lines changed in our code history. + +- :{BADGE_KINDS[0]}:`This badge` is used for the top 10% of contributors. +- :{BADGE_KINDS[1]}:`This badge` is used for the remaining 90% of contributors. + +Entire codebase +--------------- + +""" + for mi, (mod, counts) in enumerate(mod_stats.items()): + if mi == 0: + assert mod == "mne", mod + indent = " " * 3 + elif mi == 1: + indent = " " * 6 + content += """ + +By submodule +------------ + +Contributors often have domain-specific expertise, so we've broken down the +contributions by submodule as well below. + +.. grid:: 1 2 3 3 + :gutter: 1 + +""" + # if there are 10 this is 100, if there are 100 this is 100 + these_stats = dict((k, v.sum()) for k, v in counts.items()) + these_stats = dict( + (k, these_stats[k]) + for k in sorted(these_stats, key=lambda x: these_stats[x], reverse=True) + ) + if mod in link_overrides: + link = f"https://github.com/%7Blink_overrides[mod]%7D" + else: + kind = "blame" if mod in mod_file_map else "tree" + link_mod = mod_file_map.get(mod, mod.replace(".", "/")) + link = f"https://github.com/mne-tools/mne-python/%7Bkind%7D/main/%7Blink_mod%7D" + assert "moved" not in link, (mod, link) + # Use badges because they flow nicely, inside a grid to make it more compact + stat_lines = [] + for ki, (k, v) in enumerate(these_stats.items()): + # Round to two digits, e.g. 12340 -> 12000, 12560 -> 13000 + v_round = int(float(f"{v:.2g}")) + assert v_round > 0, f"Got zero lines changed for {k} in {mod}: {v_round}" + # And then write as a max-3-char human-readable abbreviation like + # 123, 1.2k, 123k, 12m, etc. + for prefix in ("", "k", "m", "g"): + if v_round >= 1000: + v_round = v_round / 1000 + else: + if v_round >= 10 or prefix == "": # keep single digit as 1 not 1.0 + v_round = f"{int(round(v_round))}" + else: + v_round = f"{v_round:.1f}" + v_round += prefix + break + else: + raise RuntimeError(f"Too many digits in {v}") + idx = 0 if ki < (len(these_stats) - 1) // 10 + 1 else 1 + if any(b in k for b in ("[bot]", "Lumberbot", "Deleted user")): + continue + assert _good_name(k) + stat_lines.append(f":{BADGE_KINDS[idx]}:`{k} ({v_round})`") + stat_lines = f"\n{indent}".join(stat_lines) + if mi == 0: + content += f""" + +.. card:: {mod} + :class-card: overflow-auto + :link: https://github.com/mne-tools/mne-python/graphs/contributors + +{indent}{stat_lines} + +""" + else: + content += f""" + + .. grid-item-card:: {mod} + :class-card: overflow-auto + :link: {link} + +{indent}{stat_lines} + +""" + (doc_root / "code_credit.inc").write_text(content, encoding="utf-8") + + +if __name__ == "__main__": + generate_credit_rst(verbose=True) diff --git a/doc/sphinxext/directive_formatting.py b/doc/sphinxext/directive_formatting.py new file mode 100644 index 00000000000..a3090ab4c90 --- /dev/null +++ b/doc/sphinxext/directive_formatting.py @@ -0,0 +1,88 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re + +from mne_doc_utils import sphinx_logger + + +def setup(app): + app.connect("source-read", check_directive_formatting) + app.connect("autodoc-process-docstring", check_directive_formatting) + + +def setup_module(): + # HACK: Stop nosetests running setup() above + pass + + +def check_directive_formatting(*args): + """Check that directives are not missing a space. + + For args, see Sphinx events 'source-read' and 'autodoc-process-docstring'. + """ + # Extract relevant info from args + if len(args) == 3: # from source-read + source_type = "File" + name = args[1] + source = args[2][0] + source_concat = source # content already a single string + elif len(args) == 6: # from autodoc-process-docstring + source_type = "Docstring" + name = args[2] + source = args[5] + source_concat = "\n".join(source) # combine lines into single string + else: + raise RuntimeError("Unexpected number of arguments from Sphinx event") + + # Check if any directives are present + if re.search(r"\.\.\s*[a-zA-Z]+::", source_concat) is None: + return + + # Separate content into lines (docstrings already are) + if source_type == "File": + source = source.split("\n") + + # Check for bad formatting + for idx, line in enumerate(source): + # Check for missing space after '..' + missing = re.search(r"\.\.[a-zA-Z]+::", line) + if missing is not None: + sphinx_logger.warning( + f"{source_type} '{name}' is missing a space after '..' in the " + f"directive '{missing.group()}'" + ) + # Extra spaces after '..' don't affect formatting + + # Check for missing preceding blank line + # (exceptions are for directives at the start of files, after a header, or after + # another directive/another directive's content) + if idx == 0: + continue + dir_pattern = r"\.\. [a-zA-Z]+::" + head_pattern = r"^[-|=|\^]+$" + directive = re.search(dir_pattern, line) + if directive is not None: + line_prev = source[idx - 1].strip() + if ( # If previous line is... + line_prev != "" # not empty + and not re.search(head_pattern, line_prev) # not a header + and not re.search(dir_pattern, line_prev) # not a directive + ): + # Check if previous line is part of another directive + bad = True + for line_prev in reversed(source[: idx - 1]): + line_prev = line_prev.strip() + if line_prev == "" or re.search(head_pattern, line_prev): + # is a blank line or header, so not part of another directive + break # must be bad formatting + if re.search(dir_pattern, line_prev): + bad = False # is part of another directive, is good formatting + break + # or keep going until we reach the first line (so must be bad) + if bad: + sphinx_logger.warning( + f"{source_type} '{name}' is missing a blank line before the " + f"directive '{directive.group()}'" + ) diff --git a/doc/sphinxext/flow_diagram.py b/doc/sphinxext/flow_diagram.py index 55bbf4aa8d0..3b6007c6595 100644 --- a/doc/sphinxext/flow_diagram.py +++ b/doc/sphinxext/flow_diagram.py @@ -1,80 +1,90 @@ -# -*- coding: utf-8 -*- +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os from os import path as op -title = 'mne-python flow diagram' +title = "mne-python flow diagram" -font_face = 'Arial' +font_face = "Arial" node_size = 12 node_small_size = 9 edge_size = 9 -sensor_color = '#7bbeca' -source_color = '#ff6347' +sensor_color = "#7bbeca" +source_color = "#ff6347" -legend = """ -< +legend = f""" +< - - -
+
Sensor (M/EEG) space
+
Source (brain) space
>""" % (edge_size, sensor_color, source_color) -legend = ''.join(legend.split('\n')) +
>""" +legend = "".join(legend.split("\n")) nodes = dict( - T1='T1', - flashes='Flash5/30', - trans='Head-MRI trans', - recon='Freesurfer surfaces', - bem='BEM', - src='http://github.com/Source%20space/nmne.SourceSpaces', - cov='Noise covariance\nmne.Covariance', - fwd='Forward solution\nmne.forward.Forward', - inv='Inverse operator\nmne.minimum_norm.InverseOperator', - stc='Source estimate\nmne.SourceEstimate', - raw='Raw data\nmne.io.Raw', - epo='Epoched data\nmne.Epochs', - evo='Averaged data\nmne.Evoked', - pre='Preprocessed data\nmne.io.Raw', + T1="T1", + flashes="Flash5/30", + trans="Head-MRI trans", + recon="Freesurfer surfaces", + bem="BEM", + src="http://github.com/Source%20space/nmne.SourceSpaces", + cov="Noise covariance\nmne.Covariance", + fwd="Forward solution\nmne.forward.Forward", + inv="Inverse operator\nmne.minimum_norm.InverseOperator", + stc="Source estimate\nmne.SourceEstimate", + raw="Raw data\nmne.io.Raw", + epo="Epoched data\nmne.Epochs", + evo="Averaged data\nmne.Evoked", + pre="Preprocessed data\nmne.io.Raw", legend=legend, ) -sensor_space = ('raw', 'pre', 'epo', 'evo', 'cov') -source_space = ('src', 'stc', 'bem', 'flashes', 'recon', 'T1') +sensor_space = ("raw", "pre", "epo", "evo", "cov") +source_space = ("src", "stc", "bem", "flashes", "recon", "T1") edges = ( - ('T1', 'recon'), - ('flashes', 'bem'), - ('recon', 'bem'), - ('recon', 'src', 'mne.setup_source_space'), - ('src', 'fwd'), - ('bem', 'fwd'), - ('trans', 'fwd', 'mne.make_forward_solution'), - ('fwd', 'inv'), - ('cov', 'inv', 'mne.make_inverse_operator'), - ('inv', 'stc'), - ('evo', 'stc', 'mne.minimum_norm.apply_inverse'), - ('raw', 'pre', 'raw.filter\n' - 'mne.preprocessing.ICA\n' - 'mne.preprocessing.compute_proj_eog\n' - 'mne.preprocessing.compute_proj_ecg\n' - '...'), - ('pre', 'epo', 'mne.Epochs'), - ('epo', 'evo', 'epochs.average'), - ('epo', 'cov', 'mne.compute_covariance'), + ("T1", "recon"), + ("flashes", "bem"), + ("recon", "bem"), + ("recon", "src", "mne.setup_source_space"), + ("src", "fwd"), + ("bem", "fwd"), + ("trans", "fwd", "mne.make_forward_solution"), + ("fwd", "inv"), + ("cov", "inv", "mne.make_inverse_operator"), + ("inv", "stc"), + ("evo", "stc", "mne.minimum_norm.apply_inverse"), + ( + "raw", + "pre", + "raw.filter\n" + "mne.preprocessing.ICA\n" + "mne.preprocessing.compute_proj_eog\n" + "mne.preprocessing.compute_proj_ecg\n" + "...", + ), + ("pre", "epo", "mne.Epochs"), + ("epo", "evo", "epochs.average"), + ("epo", "cov", "mne.compute_covariance"), ) subgraphs = ( - [('T1', 'flashes', 'recon', 'bem', 'src'), - ('' - 'Freesurfer / MNE-C>' % node_small_size)], + [ + ("T1", "flashes", "recon", "bem", "src"), + ( + f'' + "Freesurfer / MNE-C>" + ), + ], ) def setup(app): - app.connect('builder-inited', generate_flow_diagram) - app.add_config_value('make_flow_diagram', True, 'html') + app.connect("builder-inited", generate_flow_diagram) + app.add_config_value("make_flow_diagram", True, "html") def setup_module(): @@ -83,84 +93,88 @@ def setup_module(): def generate_flow_diagram(app): - out_dir = op.join(app.builder.outdir, '_static') + out_dir = op.join(app.builder.outdir, "_static") if not op.isdir(out_dir): os.makedirs(out_dir) - out_fname = op.join(out_dir, 'mne-python_flow.svg') - make_flow_diagram = app is None or \ - bool(app.builder.config.make_flow_diagram) + out_fname = op.join(out_dir, "mne-python_flow.svg") + make_flow_diagram = app is None or bool(app.builder.config.make_flow_diagram) if not make_flow_diagram: - print('Skipping flow diagram, webpage will have a missing image') + print("Skipping flow diagram, webpage will have a missing image") return import pygraphviz as pgv + g = pgv.AGraph(name=title, directed=True) for key, label in nodes.items(): - label = label.split('\n') + label = label.split("\n") if len(label) > 1: - label[0] = ('<' % node_size - + label[0] + '') + label[0] = f'<' + label[0] + "" for li in range(1, len(label)): - label[li] = ('' % node_small_size - + label[li] + '') - label[-1] = label[-1] + '>' - label = '
'.join(label) + label[li] = ( + f'' + + label[li] + + "" + ) + label[-1] = label[-1] + ">" + label = "
".join(label) else: label = label[0] - g.add_node(key, shape='plaintext', label=label) + g.add_node(key, shape="plaintext", label=label) # Create and customize nodes and edges for edge in edges: g.add_edge(*edge[:2]) e = g.get_edge(*edge[:2]) if len(edge) > 2: - e.attr['label'] = ('<' + - '
'.join(edge[2].split('\n')) + - '
>') - e.attr['fontsize'] = edge_size + e.attr["label"] = ( + "<" + + '
'.join(edge[2].split("\n")) + + '
>' + ) + e.attr["fontsize"] = edge_size # Change colors - for these_nodes, color in zip((sensor_space, source_space), - (sensor_color, source_color)): + for these_nodes, color in zip( + (sensor_space, source_space), (sensor_color, source_color) + ): for node in these_nodes: - g.get_node(node).attr['fillcolor'] = color - g.get_node(node).attr['style'] = 'filled' + g.get_node(node).attr["fillcolor"] = color + g.get_node(node).attr["style"] = "filled" # Create subgraphs for si, subgraph in enumerate(subgraphs): - g.add_subgraph(subgraph[0], 'cluster%s' % si, - label=subgraph[1], color='black') + g.add_subgraph(subgraph[0], f"cluster{si}", label=subgraph[1], color="black") # Format (sub)graphs for gr in g.subgraphs() + [g]: for x in [gr.node_attr, gr.edge_attr]: - x['fontname'] = font_face - g.node_attr['shape'] = 'box' + x["fontname"] = font_face + g.node_attr["shape"] = "box" # A couple of special ones - for ni, node in enumerate(('fwd', 'inv', 'trans')): + for ni, node in enumerate(("fwd", "inv", "trans")): node = g.get_node(node) - node.attr['gradientangle'] = 270 + node.attr["gradientangle"] = 270 colors = (source_color, sensor_color) colors = colors if ni == 0 else colors[::-1] - node.attr['fillcolor'] = ':'.join(colors) - node.attr['style'] = 'filled' + node.attr["fillcolor"] = ":".join(colors) + node.attr["style"] = "filled" del node - g.get_node('legend').attr.update(shape='plaintext', margin=0, rank='sink') + g.get_node("legend").attr.update(shape="plaintext", margin=0, rank="sink") # put legend in same rank/level as inverse - leg = g.add_subgraph(['legend', 'inv'], name='legendy') - leg.graph_attr['rank'] = 'same' + leg = g.add_subgraph(["legend", "inv"], name="legendy") + leg.graph_attr["rank"] = "same" - g.layout('dot') - g.draw(out_fname, format='svg') + g.layout("dot") + g.draw(out_fname, format="svg") return g # This is useful for testing/iterating to see what the result looks like -if __name__ == '__main__': +if __name__ == "__main__": from mne.io.constants import Bunch - out_dir = op.abspath(op.join(op.dirname(__file__), '..', '_build', 'html')) - app = Bunch(builder=Bunch(outdir=out_dir, - config=Bunch(make_flow_diagram=True))) + + out_dir = op.abspath(op.join(op.dirname(__file__), "..", "_build", "html")) + app = Bunch(builder=Bunch(outdir=out_dir, config=Bunch(make_flow_diagram=True))) g = generate_flow_diagram(app) diff --git a/doc/sphinxext/gen_commands.py b/doc/sphinxext/gen_commands.py index e0169a44b77..9e9d04eca08 100644 --- a/doc/sphinxext/gen_commands.py +++ b/doc/sphinxext/gen_commands.py @@ -1,15 +1,16 @@ -# -*- coding: utf-8 -*- +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import glob from importlib import import_module -import os -from os import path as op +from pathlib import Path -from mne.utils import _replace_md5, ArgvSetter +from mne.utils import ArgvSetter, _replace_md5 def setup(app): - app.connect('builder-inited', generate_commands_rst) + app.connect("builder-inited", generate_commands_rst) def setup_module(): @@ -49,25 +50,29 @@ def setup_module(): def generate_commands_rst(app=None): - from sphinx.util import status_iterator - out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated')) - if not op.isdir(out_dir): - os.mkdir(out_dir) - out_fname = op.join(out_dir, 'commands.rst.new') - - command_path = op.abspath( - op.join(os.path.dirname(__file__), '..', '..', 'mne', 'commands')) - fnames = sorted([ - op.basename(fname) - for fname in glob.glob(op.join(command_path, 'mne_*.py'))]) + try: + from sphinx.util.display import status_iterator + except Exception: + from sphinx.util import status_iterator + root = Path(__file__).parents[2] + out_dir = root / "doc" / "generated" + out_dir.mkdir(exist_ok=True) + out_fname = out_dir / "commands.rst.new" + + command_path = root / "mne" / "commands" + fnames = sorted( + Path(fname).name for fname in glob.glob(str(command_path / "mne_*.py")) + ) + assert len(fnames) iterator = status_iterator( - fnames, 'generating MNE command help ... ', length=len(fnames)) - with open(out_fname, 'w', encoding='utf8') as f: + fnames, "generating MNE command help ... ", length=len(fnames) + ) + with open(out_fname, "w", encoding="utf8") as f: f.write(header) for fname in iterator: cmd_name = fname[:-3] - module = import_module('.' + cmd_name, 'mne.commands') - with ArgvSetter(('mne', cmd_name, '--help')) as out: + module = import_module("." + cmd_name, "mne.commands") + with ArgvSetter(("mne", cmd_name, "--help")) as out: try: module.run() except SystemExit: # this is how these terminate @@ -79,29 +84,30 @@ def generate_commands_rst(app=None): # Add header marking for idx in (1, 0): - output.insert(idx, '-' * len(output[0])) + output.insert(idx, "-" * len(output[0])) # Add code styling for the "Usage: " line for li, line in enumerate(output): - if line.startswith('Usage: mne '): - output[li] = 'Usage: ``%s``' % line[7:] + if line.startswith("Usage: mne "): + output[li] = f"Usage: ``{line[7:]}``" break # Turn "Options:" into field list - if 'Options:' in output: - ii = output.index('Options:') - output[ii] = 'Options' - output.insert(ii + 1, '-------') - output.insert(ii + 2, '') - output.insert(ii + 3, '.. rst-class:: field-list cmd-list') - output.insert(ii + 4, '') - output = '\n'.join(output) - cmd_name_space = cmd_name.replace('mne_', 'mne ') - f.write(command_rst.format( - cmd_name_space, '=' * len(cmd_name_space), output)) - _replace_md5(out_fname) + if "Options:" in output: + ii = output.index("Options:") + output[ii] = "Options" + output.insert(ii + 1, "-------") + output.insert(ii + 2, "") + output.insert(ii + 3, ".. rst-class:: field-list cmd-list") + output.insert(ii + 4, "") + output = "\n".join(output) + cmd_name_space = cmd_name.replace("mne_", "mne ") + f.write( + command_rst.format(cmd_name_space, "=" * len(cmd_name_space), output) + ) + _replace_md5(str(out_fname)) # This is useful for testing/iterating to see what the result looks like -if __name__ == '__main__': +if __name__ == "__main__": generate_commands_rst() diff --git a/doc/sphinxext/gen_names.py b/doc/sphinxext/gen_names.py index 01785598430..24212dd7a9b 100644 --- a/doc/sphinxext/gen_names.py +++ b/doc/sphinxext/gen_names.py @@ -1,11 +1,13 @@ -# -*- coding: utf-8 -*- +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os from os import path as op def setup(app): - app.connect('builder-inited', generate_name_links_rst) + app.connect("builder-inited", generate_name_links_rst) def setup_module(): @@ -14,17 +16,18 @@ def setup_module(): def generate_name_links_rst(app=None): - if 'linkcheck' not in str(app.builder).lower(): + if "linkcheck" not in str(app.builder).lower(): return - out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated')) + out_dir = op.abspath(op.join(op.dirname(__file__), "..", "generated")) if not op.isdir(out_dir): os.mkdir(out_dir) - out_fname = op.join(out_dir, '_names.rst') + out_fname = op.join(out_dir, "_names.rst") names_path = op.abspath( - op.join(os.path.dirname(__file__), '..', 'changes', 'names.inc')) - with open(out_fname, 'w', encoding='utf8') as fout: - fout.write(':orphan:\n\n') - with open(names_path, 'r') as fin: + op.join(os.path.dirname(__file__), "..", "changes", "names.inc") + ) + with open(out_fname, "w", encoding="utf8") as fout: + fout.write(":orphan:\n\n") + with open(names_path) as fin: for line in fin: - if line.startswith('.. _'): - fout.write(f'- {line[4:]}') + if line.startswith(".. _"): + fout.write(f"- {line[4:]}") diff --git a/doc/sphinxext/gh_substitutions.py b/doc/sphinxext/gh_substitutions.py index 2c1cbf1f76c..467e93c5470 100644 --- a/doc/sphinxext/gh_substitutions.py +++ b/doc/sphinxext/gh_substitutions.py @@ -1,10 +1,12 @@ -# -*- coding: utf-8 -*- +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from docutils.nodes import reference from docutils.parsers.rst.roles import set_classes -def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]): +def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # noqa: B006 """Link to a GitHub issue. adapted from @@ -17,14 +19,14 @@ def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # direct link mode slug = text else: - slug = 'issues/' + text - text = '#' + text - ref = 'https://github.com/mne-tools/mne-python/' + slug + slug = "issues/" + text + text = "#" + text + ref = "https://github.com/mne-tools/mne-python/" + slug set_classes(options) node = reference(rawtext, text, refuri=ref, **options) return [node], [] def setup(app): - app.add_role('gh', gh_role) + app.add_role("gh", gh_role) return diff --git a/doc/sphinxext/mne_doc_utils.py b/doc/sphinxext/mne_doc_utils.py new file mode 100644 index 00000000000..7d6c545837e --- /dev/null +++ b/doc/sphinxext/mne_doc_utils.py @@ -0,0 +1,229 @@ +"""Doc building utils.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import gc +import os +import time +import warnings +from pathlib import Path + +import numpy as np +import pyvista +import sphinx.util.logging +from sphinx.errors import ExtensionError + +import mne +from mne.utils import ( + _assert_no_instances as _assert_no_instances_mne, +) +from mne.utils import ( + _get_extra_data_path, + sizeof_fmt, +) +from mne.viz import Brain + +sphinx_logger = sphinx.util.logging.getLogger("mne") +_np_print_defaults = np.get_printoptions() + + +def reset_warnings(gallery_conf, fname): + """Ensure we are future compatible and ignore silly warnings.""" + # In principle, our examples should produce no warnings. + # Here we cause warnings to become errors, with a few exceptions. + # This list should be considered alongside + # setup.cfg -> [tool:pytest] -> filterwarnings + + # remove tweaks from other module imports or example runs + warnings.resetwarnings() + # restrict + warnings.filterwarnings("error") + # allow these, but show them + warnings.filterwarnings("always", '.*non-standard config type: "foo".*') + warnings.filterwarnings("always", '.*config type: "MNEE_USE_CUUDAA".*') + warnings.filterwarnings("always", ".*cannot make axes width small.*") + warnings.filterwarnings("always", ".*Axes that are not compatible.*") + warnings.filterwarnings("always", ".*FastICA did not converge.*") + # ECoG BIDS spec violations: + warnings.filterwarnings("always", ".*Fiducial point nasion not found.*") + warnings.filterwarnings("always", ".*DigMontage is only a subset of.*") + warnings.filterwarnings( # xhemi morph (should probably update sample) + "always", ".*does not exist, creating it and saving it.*" + ) + # internal warnings + warnings.filterwarnings("default", module="sphinx") + # don't error on joblib warning during parallel doc build otherwise we get a + # cryptic deadlock instead of a nice traceback + warnings.filterwarnings( + "always", + "A worker stopped while some jobs were given to the executor.*", + category=UserWarning, + ) + # ignore (DeprecationWarning) + for key in ( + # nibabel + "__array__ implementation doesn't accept.*", + # pybtex (from sphinxcontrib-bibtex) + "pkg_resources is deprecated as an API.*", + "\nImplementing implicit namespace packages", + # latexcodec + r"open_text is deprecated\. Use files", + # dipy etc. + "The `disp` and `iprint` options of the L-BFGS-B solver", + ): + warnings.filterwarnings( # deal with other modules having bad imports + "ignore", message=f".*{key}.*", category=DeprecationWarning + ) + # ignore (UserWarning) + for message in ( + # Matplotlib + ".*is non-interactive, and thus cannot.*", + # pybtex + ".*pkg_resources is deprecated as an API.*", + ): + warnings.filterwarnings( + "ignore", + message=message, + category=UserWarning, + ) + # ignore (RuntimeWarning) + for message in ( + # mne-python config file "corruption" due to doc build parallelization + ".*The MNE-Python config file.*valid JSON.*", + ): + warnings.filterwarnings( + "ignore", + message=message, + category=RuntimeWarning, + ) + + # In case we use np.set_printoptions in any tutorials, we only + # want it to affect those: + np.set_printoptions(**_np_print_defaults) + + +t0 = time.time() + + +def _assert_no_instances(cls, when): + """Wrap our internal one but make the traceback nicer when it fails.""" + try: + _assert_no_instances_mne(cls, when) + except Exception as exc: + raise ExtensionError(str(exc)) from None + + +def reset_modules(gallery_conf, fname, when): + """Do the reset.""" + import matplotlib.pyplot as plt + + mne.viz.set_3d_backend("pyvistaqt") + pyvista.OFF_SCREEN = False + pyvista.BUILDING_GALLERY = True + + from pyvista import Plotter # noqa + + try: + from pyvistaqt import BackgroundPlotter # noqa + except ImportError: + BackgroundPlotter = None # noqa + try: + from vtkmodules.vtkCommonDataModel import vtkPolyData # noqa + except ImportError: + vtkPolyData = None # noqa + try: + from mne_qt_browser._pg_figure import MNEQtBrowser + except ImportError: + MNEQtBrowser = None + from mne.viz.backends.renderer import backend + + _Renderer = backend._Renderer if backend is not None else None + reset_warnings(gallery_conf, fname) + # in case users have interactive mode turned on in matplotlibrc, + # turn it off here (otherwise the build can be very slow) + plt.ioff() + plt.rcParams["animation.embed_limit"] = 40.0 + plt.rcParams["figure.raise_window"] = False + # https://github.com/sphinx-gallery/sphinx-gallery/pull/1243#issue-2043332860 + plt.rcParams["animation.html"] = "html5" + # neo holds on to an exception, which in turn holds a stack frame, + # which will keep alive the global vars during SG execution + try: + import neo + + neo.io.stimfitio.STFIO_ERR = None + except Exception: + pass + gc.collect() + + # Agg does not call close_event so let's clean up on our own :( + # https://github.com/matplotlib/matplotlib/issues/18609 + mne.viz.ui_events._cleanup_agg() + assert len(mne.viz.ui_events._event_channels) == 0, list( + mne.viz.ui_events._event_channels + ) + + orig_when = when + when = f"mne/conf.py:Resetter.__call__:{when}:{fname}" + # Support stuff like + # MNE_SKIP_INSTANCE_ASSERTIONS="Brain,Plotter,BackgroundPlotter,vtkPolyData,_Renderer" make html-memory # noqa: E501 + # to just test MNEQtBrowser + skips = os.getenv("MNE_SKIP_INSTANCE_ASSERTIONS", "").lower() + prefix = "" + if skips not in ("true", "1", "all"): + prefix = "Clean " + skips = skips.split(",") + if "brain" not in skips: + _assert_no_instances(Brain, when) # calls gc.collect() + if Plotter is not None and "plotter" not in skips: + _assert_no_instances(Plotter, when) + if BackgroundPlotter is not None and "backgroundplotter" not in skips: + _assert_no_instances(BackgroundPlotter, when) + if vtkPolyData is not None and "vtkpolydata" not in skips: + _assert_no_instances(vtkPolyData, when) + if "_renderer" not in skips: + _assert_no_instances(_Renderer, when) + if MNEQtBrowser is not None and "mneqtbrowser" not in skips: + # Ensure any manual fig.close() events get properly handled + from mne_qt_browser._pg_figure import QApplication + + inst = QApplication.instance() + if inst is not None: + for _ in range(2): + inst.processEvents() + _assert_no_instances(MNEQtBrowser, when) + # This will overwrite some Sphinx printing but it's useful + # for memory timestamps + if os.getenv("SG_STAMP_STARTS", "").lower() == "true": + import psutil + + process = psutil.Process(os.getpid()) + mem = sizeof_fmt(process.memory_info().rss) + print(f"{prefix}{time.time() - t0:6.1f} s : {mem}".ljust(22)) + + if fname == "50_configure_mne.py": + # This messes with the config, so let's do so in a temp dir + if orig_when == "before": + fake_home = Path(_get_extra_data_path()) / "temp" + fake_home.mkdir(exist_ok=True, parents=True) + os.environ["_MNE_FAKE_HOME_DIR"] = str(fake_home) + else: + assert orig_when == "after" + to_del = Path(os.environ["_MNE_FAKE_HOME_DIR"]) + try: + (to_del / "mne-python.json").unlink() + except Exception: + pass + try: + to_del.rmdir() + except Exception: + pass + del os.environ["_MNE_FAKE_HOME_DIR"] + + +report_scraper = mne.report._ReportScraper() +mne_qt_browser_scraper = mne.viz._scraper._MNEQtBrowserScraper() +brain_scraper = mne.viz._brain._BrainScraper() +gui_scraper = mne.gui._GUIScraper() diff --git a/doc/sphinxext/mne_substitutions.py b/doc/sphinxext/mne_substitutions.py index a9309baaf42..df23dbc9103 100644 --- a/doc/sphinxext/mne_substitutions.py +++ b/doc/sphinxext/mne_substitutions.py @@ -1,48 +1,81 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from docutils import nodes from docutils.parsers.rst import Directive from docutils.statemachine import StringList +from mne._fiff.pick import ( + _DATA_CH_TYPES_ORDER_DEFAULT, + _DATA_CH_TYPES_SPLIT, + _EYETRACK_CH_TYPES_SPLIT, + _PICK_TYPES_DATA_DICT, +) from mne.defaults import DEFAULTS -from mne.io.pick import (_PICK_TYPES_DATA_DICT, _DATA_CH_TYPES_SPLIT, - _DATA_CH_TYPES_ORDER_DEFAULT) class MNESubstitution(Directive): # noqa: D101 - has_content = False required_arguments = 1 final_argument_whitespace = True def run(self, **kwargs): # noqa: D102 env = self.state.document.settings.env - if self.arguments[0] == 'data channels list': + if self.arguments[0] == "data channels list": keys = list() for key in _DATA_CH_TYPES_ORDER_DEFAULT: if key in _DATA_CH_TYPES_SPLIT: keys.append(key) - elif key not in ('meg', 'fnirs') and \ - _PICK_TYPES_DATA_DICT.get(key, False): + elif key not in ("meg", "fnirs") and _PICK_TYPES_DATA_DICT.get( + key, False + ): + keys.append(key) + rst = "- " + "\n- ".join( + f"``{repr(key)}``: **{DEFAULTS['titles'][key]}** " + f"(scaled by {DEFAULTS['scalings'][key]:g} to " + f"plot in *{DEFAULTS['units'][key]}*)" + for key in keys + ) + elif self.arguments[0] == "non-data channels list": + keys = list() + rst = "" + for key in _DATA_CH_TYPES_ORDER_DEFAULT: + if ( + not _PICK_TYPES_DATA_DICT.get(key, True) + or key in _EYETRACK_CH_TYPES_SPLIT + or key in ("ref_meg", "whitened") + ): keys.append(key) - rst = '- ' + '\n- '.join( - '``%r``: **%s** (scaled by %g to plot in *%s*)' - % (key, DEFAULTS['titles'][key], DEFAULTS['scalings'][key], - DEFAULTS['units'][key]) - for key in keys) + for key in keys: + if DEFAULTS["scalings"].get(key, False) and DEFAULTS["units"].get( + key, False + ): + rst += ( + f"- ``{repr(key)}``: **{DEFAULTS['titles'][key]}** " + f"(scaled by {DEFAULTS['scalings'][key]:g} to " + f"plot in *{DEFAULTS['units'][key]}*)\n" + ) + else: + rst += f"- ``{repr(key)}``: **{DEFAULTS['titles'][key]}**\n" else: raise self.error( - 'MNE directive unknown in %s: %r' - % (env.doc2path(env.docname, base=None), - self.arguments[0],)) + "MNE directive unknown in %s: %r" # noqa: UP031 + % ( + env.doc2path(env.docname, base=None), + self.arguments[0], + ) + ) node = nodes.compound(rst) # General(Body), Element content = StringList( - rst.split('\n'), parent=self.content.parent, - parent_offset=self.content.parent_offset) + rst.split("\n"), + parent=self.content.parent, + parent_offset=self.content.parent_offset, + ) self.state.nested_parse(content, self.content_offset, node) return [node] def setup(app): # noqa: D103 - app.add_directive('mne', MNESubstitution) - return {'version': '0.1', - 'parallel_read_safe': True, - 'parallel_write_safe': True} + app.add_directive("mne", MNESubstitution) + return {"version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True} diff --git a/doc/sphinxext/newcontrib_substitutions.py b/doc/sphinxext/newcontrib_substitutions.py index 559a14bafa4..85feae1ad89 100644 --- a/doc/sphinxext/newcontrib_substitutions.py +++ b/doc/sphinxext/newcontrib_substitutions.py @@ -1,20 +1,21 @@ -# -*- coding: utf-8 -*- +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from docutils.nodes import reference, strong, target -def newcontrib_role(name, rawtext, text, lineno, inliner, options={}, - content=[]): +def newcontrib_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # noqa: B006 """Create a role to highlight new contributors in changelog entries.""" - newcontrib = f'new contributor {text}' - alias_text = f' <{text}_>' - rawtext = f'`{newcontrib}{alias_text}`_' + newcontrib = f"new contributor {text}" + alias_text = f" <{text}_>" + rawtext = f"`{newcontrib}{alias_text}`_" refname = text.lower() strong_node = strong(rawtext, newcontrib) target_node = target(alias_text, refname=refname, names=[newcontrib]) target_node.indirect_reference_name = text options.update(refname=refname, name=newcontrib) - ref_node = reference('', '', strong_node, **options) + ref_node = reference("", "", strong_node, **options) ref_node[0].rawsource = rawtext inliner.document.note_indirect_target(target_node) inliner.document.note_refname(ref_node) @@ -22,5 +23,5 @@ def newcontrib_role(name, rawtext, text, lineno, inliner, options={}, def setup(app): - app.add_role('newcontrib', newcontrib_role) + app.add_role("newcontrib", newcontrib_role) return diff --git a/doc/sphinxext/prs/1.json b/doc/sphinxext/prs/1.json new file mode 100644 index 00000000000..faf866354a4 --- /dev/null +++ b/doc/sphinxext/prs/1.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cc832c4711fd29382be9ca0fe892b0d9f1301c97", + "authors": [ + { + "n": "ellenlau", + "e": "ellenlau@umd.edu" + } + ], + "changes": { + "examples/inverse/plot_label_source_activations.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10.json b/doc/sphinxext/prs/10.json new file mode 100644 index 00000000000..26ebc5cba10 --- /dev/null +++ b/doc/sphinxext/prs/10.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3b705d9973cd6e824f74226d1223387f4572453d", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 199, + "d": 82 + }, + "mne/tests/test_filter.py": { + "a": 21, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1000.json b/doc/sphinxext/prs/1000.json new file mode 100644 index 00000000000..d735617144e --- /dev/null +++ b/doc/sphinxext/prs/1000.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "399d323ecc7360ec210775a3f2ba9769747bf140", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/edf/tests/test_edf.py": { + "a": 17, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10001.json b/doc/sphinxext/prs/10001.json new file mode 100644 index 00000000000..d0ad9087382 --- /dev/null +++ b/doc/sphinxext/prs/10001.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "923e6dd6f7a83af91e9bfdfc42b21805a411a978", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 10, + "d": 8 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 21, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10002.json b/doc/sphinxext/prs/10002.json new file mode 100644 index 00000000000..8ade4063cf0 --- /dev/null +++ b/doc/sphinxext/prs/10002.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a305c1a8820d155b38bcacea0b773808ccebb30", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10003.json b/doc/sphinxext/prs/10003.json new file mode 100644 index 00000000000..1728f8b0151 --- /dev/null +++ b/doc/sphinxext/prs/10003.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "444075aefe9d4f907f1ab652e2841953d98334db", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/glossary.rst": { + "a": 11, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 4, + "d": 4 + }, + "mne/surface.py": { + "a": 3, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10004.json b/doc/sphinxext/prs/10004.json new file mode 100644 index 00000000000..07e8f8288db --- /dev/null +++ b/doc/sphinxext/prs/10004.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "33aa10ec7604b1aabe1c6ef51b11f3638aa0a0f5", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10005.json b/doc/sphinxext/prs/10005.json new file mode 100644 index 00000000000..addf33c2390 --- /dev/null +++ b/doc/sphinxext/prs/10005.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1df8a5f8863e12abbe05da2aa6b93edbcbcc1144", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/externals/pymatreader/pymatreader.py": { + "a": 2, + "d": 2 + }, + "mne/externals/pymatreader/utils.py": { + "a": 3, + "d": 5 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10007.json b/doc/sphinxext/prs/10007.json new file mode 100644 index 00000000000..291abdad328 --- /dev/null +++ b/doc/sphinxext/prs/10007.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f71b98a3a082a79a89e0e5948813fbd7a27fdb66", + "authors": [ + { + "n": "Nikolai M Chapochnikov", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10008.json b/doc/sphinxext/prs/10008.json new file mode 100644 index 00000000000..cd6408c333a --- /dev/null +++ b/doc/sphinxext/prs/10008.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8e2ca2aa8c3b667bdbb92fa2b74df7c7b39e524a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report/js_and_css/report.js": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10009.json b/doc/sphinxext/prs/10009.json new file mode 100644 index 00000000000..ffaa379869d --- /dev/null +++ b/doc/sphinxext/prs/10009.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "47e15a8d78ac4315187322e7b2f8c99f5d5b4239", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 4, + "d": 6 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 3 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10013.json b/doc/sphinxext/prs/10013.json new file mode 100644 index 00000000000..9700be86043 --- /dev/null +++ b/doc/sphinxext/prs/10013.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5672f78b1e2bb3dd51fea1d30aa8f7a879d2ee38", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "mne/externals/pymatreader/pymatreader.py": { + "a": 6, + "d": 2 + }, + "mne/externals/pymatreader/utils.py": { + "a": 6, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10015.json b/doc/sphinxext/prs/10015.json new file mode 100644 index 00000000000..578d26db577 --- /dev/null +++ b/doc/sphinxext/prs/10015.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "0ad069bbff3a1b6f380ce77b191c766eddc0a991", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 16 + }, + "mne/conftest.py": { + "a": 2, + "d": 11 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 9, + "d": 10 + }, + "mne/gui/_coreg.py": { + "a": 181, + "d": 84 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 8, + "d": 6 + }, + "mne/utils/check.py": { + "a": 18, + "d": 8 + }, + "mne/viz/_3d.py": { + "a": 39, + "d": 20 + }, + "mne/viz/backends/_abstract.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 5, + "d": 8 + }, + "mne/viz/backends/_pyvista.py": { + "a": 15, + "d": 17 + }, + "mne/viz/backends/_qt.py": { + "a": 3, + "d": 0 + }, + "mne/viz/backends/_utils.py": { + "a": 16, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "server_environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10017.json b/doc/sphinxext/prs/10017.json new file mode 100644 index 00000000000..b45e5e035a4 --- /dev/null +++ b/doc/sphinxext/prs/10017.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e6db45db994313be3bc9d686d7bc830b33a186d1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10018.json b/doc/sphinxext/prs/10018.json new file mode 100644 index 00000000000..d7d31472315 --- /dev/null +++ b/doc/sphinxext/prs/10018.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "54638887057a2cc771e108e44dd4a61b75de4933", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10019.json b/doc/sphinxext/prs/10019.json new file mode 100644 index 00000000000..80ce048a3ce --- /dev/null +++ b/doc/sphinxext/prs/10019.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "2791ba472a7a3cec6135b70441288384842bf6ff", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 3 + }, + "mne/annotations.py": { + "a": 205, + "d": 0 + }, + "mne/epochs.py": { + "a": 79, + "d": 208 + }, + "mne/io/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 199, + "d": 68 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10020.json b/doc/sphinxext/prs/10020.json new file mode 100644 index 00000000000..be72c500c00 --- /dev/null +++ b/doc/sphinxext/prs/10020.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "955f9149a56de39b4f51b94780e910a871e2efd3", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 9, + "d": 1 + }, + "doc/conf.py": { + "a": 27, + "d": 20 + }, + "doc/install/contributing.rst": { + "a": 56, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10021.json b/doc/sphinxext/prs/10021.json new file mode 100644 index 00000000000..571e9808d18 --- /dev/null +++ b/doc/sphinxext/prs/10021.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4cab7e96262c44101b9d87c3d91a56010054cc83", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/surface.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10024.json b/doc/sphinxext/prs/10024.json new file mode 100644 index 00000000000..da749aeb864 --- /dev/null +++ b/doc/sphinxext/prs/10024.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "57b9de61c1ba0386654cec0d95145ea35f40ca31", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/surface.py": { + "a": 22, + "d": 7 + }, + "mne/tests/test_surface.py": { + "a": 6, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10025.json b/doc/sphinxext/prs/10025.json new file mode 100644 index 00000000000..d2070ac0cd0 --- /dev/null +++ b/doc/sphinxext/prs/10025.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ce29d3e402cc390e51c16a97f9d2ff27a205147b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 7, + "d": 3 + }, + "tools/circleci_dependencies.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10026.json b/doc/sphinxext/prs/10026.json new file mode 100644 index 00000000000..df420a4bcca --- /dev/null +++ b/doc/sphinxext/prs/10026.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3722dd8772941fd1502ea57ec8390fb875156e31", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10027.json b/doc/sphinxext/prs/10027.json new file mode 100644 index 00000000000..7f742c3dc85 --- /dev/null +++ b/doc/sphinxext/prs/10027.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3e87a691b07ef14dd3705054ecc789a1fa6f18b6", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10030.json b/doc/sphinxext/prs/10030.json new file mode 100644 index 00000000000..ff4c3b38564 --- /dev/null +++ b/doc/sphinxext/prs/10030.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ead37bb15e89eff45d0813183867a1807f44aa7f", + "authors": [ + { + "n": "Julia Guiomar Niso Galán", + "e": null + } + ], + "changes": { + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10031.json b/doc/sphinxext/prs/10031.json new file mode 100644 index 00000000000..a3ed7256fe7 --- /dev/null +++ b/doc/sphinxext/prs/10031.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "98b01534297e38ec6e876d3753f04bc7e82473e0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10036.json b/doc/sphinxext/prs/10036.json new file mode 100644 index 00000000000..da105b1b376 --- /dev/null +++ b/doc/sphinxext/prs/10036.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b5f48116920871e50aa9f6e21d5046eafc63100e", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10038.json b/doc/sphinxext/prs/10038.json new file mode 100644 index 00000000000..7fde83cfbbc --- /dev/null +++ b/doc/sphinxext/prs/10038.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0789b7dade297a7e5cae10a343a870ed260ab9a0", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/datasets/__init__.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10039.json b/doc/sphinxext/prs/10039.json new file mode 100644 index 00000000000..b61cbd71b65 --- /dev/null +++ b/doc/sphinxext/prs/10039.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "602958dc4bb7094a770671d78777c2d90cc266bb", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 6, + "d": 6 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1004.json b/doc/sphinxext/prs/1004.json new file mode 100644 index 00000000000..25060b67cc7 --- /dev/null +++ b/doc/sphinxext/prs/1004.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1af793825ae60e68c2845c853c5c9a1bffc3628b", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/epochs.py": { + "a": 13, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10040.json b/doc/sphinxext/prs/10040.json new file mode 100644 index 00000000000..458de624038 --- /dev/null +++ b/doc/sphinxext/prs/10040.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a3d20ad4a003bbb873bec1237d4941c9498fe1ab", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10041.json b/doc/sphinxext/prs/10041.json new file mode 100644 index 00000000000..93f088a4b45 --- /dev/null +++ b/doc/sphinxext/prs/10041.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "34ca144db4adab33d576bb60db1d7e8661b1082f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10044.json b/doc/sphinxext/prs/10044.json new file mode 100644 index 00000000000..d0c2a02d8b1 --- /dev/null +++ b/doc/sphinxext/prs/10044.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f58eb9d55c21f20ba56016a2e06a05d2997b5faa", + "authors": [ + { + "n": "Jan Sosulski", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 5, + "d": 1 + }, + "mne/evoked.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10045.json b/doc/sphinxext/prs/10045.json new file mode 100644 index 00000000000..5d235361685 --- /dev/null +++ b/doc/sphinxext/prs/10045.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b22031065418396b131676e7ef472acfcb123002", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 6, + "d": 3 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10047.json b/doc/sphinxext/prs/10047.json new file mode 100644 index 00000000000..f83191c4697 --- /dev/null +++ b/doc/sphinxext/prs/10047.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6da538f2ac9f4d922a499ff407885dae08f1b6f2", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/config.py": { + "a": 4, + "d": 4 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10048.json b/doc/sphinxext/prs/10048.json new file mode 100644 index 00000000000..3ff42a6491c --- /dev/null +++ b/doc/sphinxext/prs/10048.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "327777cd77b7ff41d26ebcc334161bbb07bc5db3", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 10, + "d": 9 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 11 + }, + "mne/viz/backends/_utils.py": { + "a": 57, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 4 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10050.json b/doc/sphinxext/prs/10050.json new file mode 100644 index 00000000000..9bd20f416be --- /dev/null +++ b/doc/sphinxext/prs/10050.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "07ef4d698ff60c1e1f04b3d0566bec6250fa0e90", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10051.json b/doc/sphinxext/prs/10051.json new file mode 100644 index 00000000000..8e7d4b5d8df --- /dev/null +++ b/doc/sphinxext/prs/10051.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0a63648b6596ece5afe1448bacd32658b27a7fdc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 28 + }, + "mne/utils/docs.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10052.json b/doc/sphinxext/prs/10052.json new file mode 100644 index 00000000000..cd5673a4b09 --- /dev/null +++ b/doc/sphinxext/prs/10052.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d9862fdfa76dcace92d8749df5277b6388981fb0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10054.json b/doc/sphinxext/prs/10054.json new file mode 100644 index 00000000000..70491326c91 --- /dev/null +++ b/doc/sphinxext/prs/10054.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c45920796deabdd95f0a8ec40e5184605de85cc7", + "authors": [ + { + "n": "Etienne de Montalivet", + "e": "etienne.demontalivet@protonmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 6, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10056.json b/doc/sphinxext/prs/10056.json new file mode 100644 index 00000000000..2d617764e3c --- /dev/null +++ b/doc/sphinxext/prs/10056.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "10b537c26573d999597500a854c602d7fa81688c", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/glossary.rst": { + "a": 22, + "d": 10 + }, + "mne/annotations.py": { + "a": 3, + "d": 4 + }, + "mne/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/event.py": { + "a": 32, + "d": 30 + }, + "mne/io/base.py": { + "a": 6, + "d": 5 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 12 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 20 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10058.json b/doc/sphinxext/prs/10058.json new file mode 100644 index 00000000000..6e3fd7e7c8d --- /dev/null +++ b/doc/sphinxext/prs/10058.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "eff12c448bea2d9acb1c9483f05ec32ba24ffae1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/export/tests/test_export.py": { + "a": 12, + "d": 17 + }, + "mne/io/edf/edf.py": { + "a": 81, + "d": 51 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 37, + "d": 14 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10061.json b/doc/sphinxext/prs/10061.json new file mode 100644 index 00000000000..b30dfb02dd8 --- /dev/null +++ b/doc/sphinxext/prs/10061.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1514d0c72e4f0c45b00af9138925512f52640ea9", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10064.json b/doc/sphinxext/prs/10064.json new file mode 100644 index 00000000000..1d0b8cb1a2f --- /dev/null +++ b/doc/sphinxext/prs/10064.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ffdac14db914c7c810e93cf256c020e6e55b487f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 6, + "d": 3 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10065.json b/doc/sphinxext/prs/10065.json new file mode 100644 index 00000000000..ca3c59f48a7 --- /dev/null +++ b/doc/sphinxext/prs/10065.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "113dafb58f77879229ba7debe0bcb2295178b25c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10066.json b/doc/sphinxext/prs/10066.json new file mode 100644 index 00000000000..fd3eb626a2b --- /dev/null +++ b/doc/sphinxext/prs/10066.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7b5c61cd951e5071170995fa885151711e8f48c0", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.24.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10067.json b/doc/sphinxext/prs/10067.json new file mode 100644 index 00000000000..c554c537b0a --- /dev/null +++ b/doc/sphinxext/prs/10067.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "151b8a6cf92524d7e97ea63592b6d3707c38e2b4", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/annotate_nan.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 16, + "d": 9 + }, + "mne/preprocessing/flat.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 33, + "d": 13 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10068.json b/doc/sphinxext/prs/10068.json new file mode 100644 index 00000000000..d04ea0d8f49 --- /dev/null +++ b/doc/sphinxext/prs/10068.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "00f27916b1eb865d33c6dcb757ef5412c7a6bcbb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10069.json b/doc/sphinxext/prs/10069.json new file mode 100644 index 00000000000..4553555339b --- /dev/null +++ b/doc/sphinxext/prs/10069.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "05f0358927404671a600e2fc8e35b30b0cd93f19", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 57, + "d": 64 + }, + "doc/conf.py": { + "a": 19, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 12 + }, + "tutorials/clinical/20_seeg.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1007.json b/doc/sphinxext/prs/1007.json new file mode 100644 index 00000000000..fdcc15a4fa3 --- /dev/null +++ b/doc/sphinxext/prs/1007.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e0cac66a4840087cc624c081312f05ae4f37a244", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/layouts/layout.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10071.json b/doc/sphinxext/prs/10071.json new file mode 100644 index 00000000000..bde1bf15b5c --- /dev/null +++ b/doc/sphinxext/prs/10071.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "3aad5b1aa7f74ea0f8d927664d8b844afbc40cea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 1 + }, + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 4, + "d": 4 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 7, + "d": 7 + }, + "examples/inverse/covariance_whitening_dspm.py": { + "a": 0, + "d": 188 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 5, + "d": 1 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/mne_cov_power.py": { + "a": 10, + "d": 12 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 7, + "d": 4 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 2, + "d": 3 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 8, + "d": 37 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 6, + "d": 8 + }, + "mne/transforms.py": { + "a": 2, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 6, + "d": 6 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 6, + "d": 9 + }, + "tutorials/forward/30_forward.py": { + "a": 16, + "d": 12 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 5, + "d": 5 + }, + "tutorials/intro/70_report.py": { + "a": 30, + "d": 25 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 4, + "d": 3 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 7, + "d": 5 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 7, + "d": 6 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 13, + "d": 9 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 6, + "d": 6 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 10, + "d": 8 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 13, + "d": 13 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 4, + "d": 1 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 9, + "d": 6 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 9, + "d": 9 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10072.json b/doc/sphinxext/prs/10072.json new file mode 100644 index 00000000000..0f3097494cf --- /dev/null +++ b/doc/sphinxext/prs/10072.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "963eea23740f0e697ac03dc1fd671ee91e15c162", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10073.json b/doc/sphinxext/prs/10073.json new file mode 100644 index 00000000000..f4da66e3bfc --- /dev/null +++ b/doc/sphinxext/prs/10073.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8eba56032b93e2f51030c4ddcd28c9ae3addafb1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10074.json b/doc/sphinxext/prs/10074.json new file mode 100644 index 00000000000..1cb7df6d30b --- /dev/null +++ b/doc/sphinxext/prs/10074.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "59ca238896e275e0f97f2c81fc9bdd18f31066b1", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/colormap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10075.json b/doc/sphinxext/prs/10075.json new file mode 100644 index 00000000000..b3b964d185d --- /dev/null +++ b/doc/sphinxext/prs/10075.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4128974b2428826b89c4b25c2f49bfac8132b941", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10076.json b/doc/sphinxext/prs/10076.json new file mode 100644 index 00000000000..e94bc1f2d71 --- /dev/null +++ b/doc/sphinxext/prs/10076.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "01da5a32ac1a80cf0cc42684f154597595608ea0", + "authors": [ + { + "n": "Jan Zerfowski", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 1 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10077.json b/doc/sphinxext/prs/10077.json new file mode 100644 index 00000000000..ee42b5f064a --- /dev/null +++ b/doc/sphinxext/prs/10077.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3fd23c67578a8424e59df17b9026bddb0409b538", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/morph.py": { + "a": 17, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 34, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 50, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10078.json b/doc/sphinxext/prs/10078.json new file mode 100644 index 00000000000..e47acf688a2 --- /dev/null +++ b/doc/sphinxext/prs/10078.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f2ae58f6bfdbabf0aaa06a6f650124f9b2788581", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 24, + "d": 13 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 2, + "d": 2 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1008.json b/doc/sphinxext/prs/1008.json new file mode 100644 index 00000000000..d98851a0f63 --- /dev/null +++ b/doc/sphinxext/prs/1008.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a1ac7575dc528d27243b5ed6dab354569df98a02", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/layouts/KIT-157.lout": { + "a": 158, + "d": 0 + }, + "mne/layouts/layout.py": { + "a": 4, + "d": 0 + }, + "mne/layouts/tests/test_layout.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10080.json b/doc/sphinxext/prs/10080.json new file mode 100644 index 00000000000..7622cce359d --- /dev/null +++ b/doc/sphinxext/prs/10080.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8954c3711b57dc1b90a8079c278384ba684ed17f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10082.json b/doc/sphinxext/prs/10082.json new file mode 100644 index 00000000000..d825a31cf33 --- /dev/null +++ b/doc/sphinxext/prs/10082.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f3c90675295e411d0c4242f2eeeb2a75b64dead6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/templates/header.html": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10084.json b/doc/sphinxext/prs/10084.json new file mode 100644 index 00000000000..efdf182d21d --- /dev/null +++ b/doc/sphinxext/prs/10084.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c1cbc784f7c44abfa33d728528c319f4a468dc0c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 5, + "d": 1 + }, + "mne/decoding/mixin.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10085.json b/doc/sphinxext/prs/10085.json new file mode 100644 index 00000000000..34d8a3411bd --- /dev/null +++ b/doc/sphinxext/prs/10085.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "269527169e08f953caf4675c7960eae93f5bc307", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 61, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 67, + "d": 12 + }, + "mne/viz/backends/_abstract.py": { + "a": 0, + "d": 4 + }, + "mne/viz/backends/_notebook.py": { + "a": 0, + "d": 3 + }, + "mne/viz/backends/_qt.py": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10086.json b/doc/sphinxext/prs/10086.json new file mode 100644 index 00000000000..3a249a0e704 --- /dev/null +++ b/doc/sphinxext/prs/10086.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d7202b4866c88c6989a548e268b87113ac69e212", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10087.json b/doc/sphinxext/prs/10087.json new file mode 100644 index 00000000000..940d0c2154d --- /dev/null +++ b/doc/sphinxext/prs/10087.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9dff61616bfc9ca411df578a4f3b963de8ac023c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10089.json b/doc/sphinxext/prs/10089.json new file mode 100644 index 00000000000..e19cdfd95a0 --- /dev/null +++ b/doc/sphinxext/prs/10089.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d7c8fe391ac6c0b01b66d7e328689f3dbe9a51dd", + "authors": [ + { + "n": "Sen-wen DENG", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 29, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10091.json b/doc/sphinxext/prs/10091.json new file mode 100644 index 00000000000..09b12d89452 --- /dev/null +++ b/doc/sphinxext/prs/10091.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "8db9aac060fe736438933efd0a421a866853727b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 4, + "d": 5 + }, + "mne/cov.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/_fetch.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 4, + "d": 5 + }, + "mne/externals/tqdm/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 2, + "d": 3 + }, + "mne/utils/_testing.py": { + "a": 5, + "d": 6 + }, + "mne/utils/tests/test_linalg.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_scraper.py": { + "a": 5, + "d": 4 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 7 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10093.json b/doc/sphinxext/prs/10093.json new file mode 100644 index 00000000000..e6997b069b3 --- /dev/null +++ b/doc/sphinxext/prs/10093.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "59af71a7e37e8b6af80de28fd3d1ec344fc38a7b", + "authors": [ + { + "n": "Adina Wagner", + "e": "adina.wagner@t-online.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10095.json b/doc/sphinxext/prs/10095.json new file mode 100644 index 00000000000..ea0bbad427d --- /dev/null +++ b/doc/sphinxext/prs/10095.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fa0e8d951e7fff82b7a4402dd858d18016f69a64", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/utils/check.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10096.json b/doc/sphinxext/prs/10096.json new file mode 100644 index 00000000000..4a1ae1bd60b --- /dev/null +++ b/doc/sphinxext/prs/10096.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "ae0ca34a4a53d0a307381833bc75c4b536d5ea54", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 0, + "d": 6 + }, + "mne/cov.py": { + "a": 2, + "d": 10 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 3, + "d": 11 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 0, + "d": 7 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 0, + "d": 4 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 1, + "d": 3 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 8 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 6 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10097.json b/doc/sphinxext/prs/10097.json new file mode 100644 index 00000000000..d873fa958f8 --- /dev/null +++ b/doc/sphinxext/prs/10097.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "12833cebf689840f4e339f78e27466d256e36c37", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10098.json b/doc/sphinxext/prs/10098.json new file mode 100644 index 00000000000..e20d80ef0cc --- /dev/null +++ b/doc/sphinxext/prs/10098.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a3066f6eb3730d13d2dce632ff6cb5d4870fbe7f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_mpl_figure.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 8, + "d": 0 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1010.json b/doc/sphinxext/prs/1010.json new file mode 100644 index 00000000000..6f15b761ad2 --- /dev/null +++ b/doc/sphinxext/prs/1010.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "948d9135925f6769f151151ea870592a41af6ada", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/layouts/layout.py": { + "a": 3, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10100.json b/doc/sphinxext/prs/10100.json new file mode 100644 index 00000000000..98297a44d31 --- /dev/null +++ b/doc/sphinxext/prs/10100.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e8f71755fc88b4d6a368b0b3e12c1d4ac1c1f3bc", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 6, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 3 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10102.json b/doc/sphinxext/prs/10102.json new file mode 100644 index 00000000000..4ac2327131d --- /dev/null +++ b/doc/sphinxext/prs/10102.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e79924cdf226782763acc46c75f7aede06456b32", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 21, + "d": 15 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10108.json b/doc/sphinxext/prs/10108.json new file mode 100644 index 00000000000..b8ae56b6f40 --- /dev/null +++ b/doc/sphinxext/prs/10108.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f0abc4c332b265355f39ec230539e037d23cd752", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "server_environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10109.json b/doc/sphinxext/prs/10109.json new file mode 100644 index 00000000000..b4800fad6b4 --- /dev/null +++ b/doc/sphinxext/prs/10109.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6a265839876a2c4205a557ec387edda10cc237b4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 0 + }, + "mne/report/report.py": { + "a": 19, + "d": 8 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 11 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 17 + }, + "mne/viz/utils.py": { + "a": 20, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10110.json b/doc/sphinxext/prs/10110.json new file mode 100644 index 00000000000..477104ca02a --- /dev/null +++ b/doc/sphinxext/prs/10110.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8333d703dc2c9feb8b81e2585294c77deeca4ae4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/morph.py": { + "a": 8, + "d": 7 + }, + "mne/tests/test_morph.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10111.json b/doc/sphinxext/prs/10111.json new file mode 100644 index 00000000000..3fdd74cec70 --- /dev/null +++ b/doc/sphinxext/prs/10111.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b7daa3603d75062f713c84707da3a940c7de7266", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "doc/overview/people.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10112.json b/doc/sphinxext/prs/10112.json new file mode 100644 index 00000000000..219e0d7f8ea --- /dev/null +++ b/doc/sphinxext/prs/10112.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f19fe3cd0089101f209b968f1fe66a2a8eae6c1f", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 17, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 32, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10113.json b/doc/sphinxext/prs/10113.json new file mode 100644 index 00000000000..93383638194 --- /dev/null +++ b/doc/sphinxext/prs/10113.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "82d32b5d90e156b915084251a0be29bd34419c01", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/overview/people.rst": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10114.json b/doc/sphinxext/prs/10114.json new file mode 100644 index 00000000000..898e8ba49c9 --- /dev/null +++ b/doc/sphinxext/prs/10114.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4faf87f4ac2deb133a40554df02f0596046611a7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/report/report.py": { + "a": 26, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10115.json b/doc/sphinxext/prs/10115.json new file mode 100644 index 00000000000..3f8451a6a1e --- /dev/null +++ b/doc/sphinxext/prs/10115.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9917316f4e376d3c6f3d7cff4dafd237dba64bbf", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 21, + "d": 12 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10116.json b/doc/sphinxext/prs/10116.json new file mode 100644 index 00000000000..d302ace4f28 --- /dev/null +++ b/doc/sphinxext/prs/10116.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9b3676d991220edfad86edc9cbd97cea032c5191", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 45, + "d": 32 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10117.json b/doc/sphinxext/prs/10117.json new file mode 100644 index 00000000000..ed53506500d --- /dev/null +++ b/doc/sphinxext/prs/10117.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a885741c16abc21b5fccba85a3650a03f87e5e8f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 294, + "d": 100 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 20, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10118.json b/doc/sphinxext/prs/10118.json new file mode 100644 index 00000000000..d3c8db91209 --- /dev/null +++ b/doc/sphinxext/prs/10118.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "a9b63658006855ad67512eb6dfde8bbaf095acd8", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 8, + "d": 0 + }, + "mne/io/base.py": { + "a": 4, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 6, + "d": 1 + }, + "mne/preprocessing/annotate_nan.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 16, + "d": 17 + }, + "mne/preprocessing/flat.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/tests/test_annotate_nan.py": { + "a": 53, + "d": 0 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 54, + "d": 4 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 15, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10119.json b/doc/sphinxext/prs/10119.json new file mode 100644 index 00000000000..ba396378ea9 --- /dev/null +++ b/doc/sphinxext/prs/10119.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "16498afc8b6a92a6f0e5754c71c6d64ed62c1ad2", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 88, + "d": 28 + }, + "mne/report/tests/test_report.py": { + "a": 12, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1012.json b/doc/sphinxext/prs/1012.json new file mode 100644 index 00000000000..e0a449c1ae4 --- /dev/null +++ b/doc/sphinxext/prs/1012.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "673ee9af04d3e347b7d3c63fcbfd14680e0c49c9", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/fiff/evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10120.json b/doc/sphinxext/prs/10120.json new file mode 100644 index 00000000000..b8c6a08bdeb --- /dev/null +++ b/doc/sphinxext/prs/10120.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "a8dbd5f4158f3bfaeb0e1b46576de18514f34219", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 6 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 2 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 11, + "d": 9 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 13, + "d": 13 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 4, + "d": 2 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 4, + "d": 4 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 22, + "d": 16 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_reference.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_what.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 18, + "d": 19 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 6, + "d": 4 + }, + "mne/tests/test_annotations.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 5, + "d": 6 + }, + "mne/tests/test_label.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_morph_map.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 6 + }, + "mne/tests/test_source_space.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 4, + "d": 3 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 2, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 9, + "d": 22 + }, + "mne/utils/tests/test_linalg.py": { + "a": 3, + "d": 2 + }, + "mne/utils/tests/test_testing.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 5, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 5 + }, + "mne/viz/tests/test_topo.py": { + "a": 3, + "d": 2 + }, + "tools/azure_dependencies.sh": { + "a": 3, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 13, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10122.json b/doc/sphinxext/prs/10122.json new file mode 100644 index 00000000000..557b4f0f903 --- /dev/null +++ b/doc/sphinxext/prs/10122.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e0aa5feb7b2dcb6704242fd9ad4b5fd1e7b97b0d", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 9, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10123.json b/doc/sphinxext/prs/10123.json new file mode 100644 index 00000000000..930bc1903bc --- /dev/null +++ b/doc/sphinxext/prs/10123.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c49ddf168c74db8a6211489ac8de9be6dbe6708e", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "tutorials/io/30_reading_fnirs_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10125.json b/doc/sphinxext/prs/10125.json new file mode 100644 index 00000000000..489c7e27226 --- /dev/null +++ b/doc/sphinxext/prs/10125.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "575193c1b189733071a0623bf436c08daeab671c", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 13, + "d": 10 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 28, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10126.json b/doc/sphinxext/prs/10126.json new file mode 100644 index 00000000000..bc3d4e9229a --- /dev/null +++ b/doc/sphinxext/prs/10126.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7133478275cb3737d56e6381995246a5abdfa3fc", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 52, + "d": 10 + }, + "tutorials/intro/70_report.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10127.json b/doc/sphinxext/prs/10127.json new file mode 100644 index 00000000000..66c26c2aa34 --- /dev/null +++ b/doc/sphinxext/prs/10127.json @@ -0,0 +1,143 @@ +{ + "merge_commit_sha": "fd4f692f142ed8107cc816c3f60cfb3b4509e477", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 12, + "d": 8 + }, + "mne/bem.py": { + "a": 4, + "d": 7 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 16, + "d": 16 + }, + "mne/epochs.py": { + "a": 2, + "d": 3 + }, + "mne/event.py": { + "a": 14, + "d": 16 + }, + "mne/evoked.py": { + "a": 16, + "d": 12 + }, + "mne/forward/forward.py": { + "a": 6, + "d": 3 + }, + "mne/io/_digitization.py": { + "a": 12, + "d": 5 + }, + "mne/io/base.py": { + "a": 16, + "d": 17 + }, + "mne/io/meas_info.py": { + "a": 9, + "d": 25 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/write.py": { + "a": 9, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 13, + "d": 9 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/morph_map.py": { + "a": 6, + "d": 5 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 11 + }, + "mne/proj.py": { + "a": 12, + "d": 7 + }, + "mne/source_estimate.py": { + "a": 55, + "d": 50 + }, + "mne/source_space.py": { + "a": 11, + "d": 7 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_event.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 18, + "d": 16 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 12, + "d": 6 + }, + "mne/time_frequency/csd.py": { + "a": 12, + "d": 4 + }, + "mne/transforms.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10129.json b/doc/sphinxext/prs/10129.json new file mode 100644 index 00000000000..4375241c5ff --- /dev/null +++ b/doc/sphinxext/prs/10129.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "74c3965088eb3f74e650d6191b834be0601cae1f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 67, + "d": 24 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10130.json b/doc/sphinxext/prs/10130.json new file mode 100644 index 00000000000..2b31597befa --- /dev/null +++ b/doc/sphinxext/prs/10130.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c3245352f4ccf8cf278baa1b911b4853c06b3dcf", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10132.json b/doc/sphinxext/prs/10132.json new file mode 100644 index 00000000000..e455c3a67b2 --- /dev/null +++ b/doc/sphinxext/prs/10132.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0126591383729475326714e3edc0360cdbbe92e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/surface.py": { + "a": 8, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 6, + "d": 7 + }, + "mne/transforms.py": { + "a": 14, + "d": 6 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10133.json b/doc/sphinxext/prs/10133.json new file mode 100644 index 00000000000..d79a26cabc3 --- /dev/null +++ b/doc/sphinxext/prs/10133.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3ccb227367560803f91d3f19251a1531e3dde6e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_pip.yml": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10134.json b/doc/sphinxext/prs/10134.json new file mode 100644 index 00000000000..c954bb82b37 --- /dev/null +++ b/doc/sphinxext/prs/10134.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "61c5c51cacd998cde4bb0ba350738619a4bcee72", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 9, + "d": 13 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/gh_substitutions.py": { + "a": 5, + "d": 5 + }, + "doc/sphinxext/newcontrib_substitutions.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10135.json b/doc/sphinxext/prs/10135.json new file mode 100644 index 00000000000..352f5ed15a8 --- /dev/null +++ b/doc/sphinxext/prs/10135.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ad5ab0ab44981694409abe6d06da44b599f079ee", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/defaults.py": { + "a": 7, + "d": 1 + }, + "mne/report/report.py": { + "a": 16, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10136.json b/doc/sphinxext/prs/10136.json new file mode 100644 index 00000000000..c2de34ea529 --- /dev/null +++ b/doc/sphinxext/prs/10136.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "410ebf1856fee8f433da865f0ca7e4ef62e30260", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1014.json b/doc/sphinxext/prs/1014.json new file mode 100644 index 00000000000..f044a189341 --- /dev/null +++ b/doc/sphinxext/prs/1014.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "99a492bca5cfb65177e7167e09822d373e6429bf", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 13, + "d": 1 + }, + "examples/plot_field_lines.py": { + "a": 0, + "d": 88 + }, + "examples/plot_meg_eeg_fields_3d.py": { + "a": 36, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/channels.py": { + "a": 9, + "d": 4 + }, + "mne/fiff/evoked.py": { + "a": 25, + "d": 0 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 178, + "d": 69 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 39, + "d": 18 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_viz.py": { + "a": 35, + "d": 11 + }, + "mne/viz.py": { + "a": 113, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10140.json b/doc/sphinxext/prs/10140.json new file mode 100644 index 00000000000..e5eb0eefb9e --- /dev/null +++ b/doc/sphinxext/prs/10140.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3d4a4f66232448de542ac076df7b57115644978a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/io/base.py": { + "a": 4, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 2 + }, + "mne/utils/dataframe.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10141.json b/doc/sphinxext/prs/10141.json new file mode 100644 index 00000000000..4ad5e86b73a --- /dev/null +++ b/doc/sphinxext/prs/10141.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ea5e19e62382b48d0779907e626e4671d920149e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 1, + "d": 2 + }, + "mne/gui/_coreg.py": { + "a": 33, + "d": 4 + }, + "mne/viz/backends/_abstract.py": { + "a": 8, + "d": 6 + }, + "mne/viz/backends/_notebook.py": { + "a": 13, + "d": 7 + }, + "mne/viz/backends/_qt.py": { + "a": 18, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10142.json b/doc/sphinxext/prs/10142.json new file mode 100644 index 00000000000..d9b90d1408f --- /dev/null +++ b/doc/sphinxext/prs/10142.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b65a428c6cd28505735601b2c607a52ea1d98f21", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10143.json b/doc/sphinxext/prs/10143.json new file mode 100644 index 00000000000..75da123cf7f --- /dev/null +++ b/doc/sphinxext/prs/10143.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "a2e3080c2b64f7a5242c1ecb9675aaef1874db4b", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/0.21.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/annotate_amplitude.py": { + "a": 254, + "d": 0 + }, + "mne/preprocessing/flat.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/tests/test_annotate_amplitude.py": { + "a": 394, + "d": 0 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 12, + "d": 5 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 2 + }, + "tools/setup_xvfb.sh": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10144.json b/doc/sphinxext/prs/10144.json new file mode 100644 index 00000000000..8c80bdb212a --- /dev/null +++ b/doc/sphinxext/prs/10144.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "077d311204efb0f947a8fbe57419ced0fc1769ff", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 24, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1015.json b/doc/sphinxext/prs/1015.json new file mode 100644 index 00000000000..d19b51cfc8b --- /dev/null +++ b/doc/sphinxext/prs/1015.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e5f41bfca6881ab6f0bec8ba2c148ee2793c9c0e", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10151.json b/doc/sphinxext/prs/10151.json new file mode 100644 index 00000000000..7dabca9ed09 --- /dev/null +++ b/doc/sphinxext/prs/10151.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fecf5411d28f47dcb4ff379976a1b4813edc45a9", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 21, + "d": 0 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10152.json b/doc/sphinxext/prs/10152.json new file mode 100644 index 00000000000..66fd69bfdff --- /dev/null +++ b/doc/sphinxext/prs/10152.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e3fec3ff414017a93b4d7ae7d6698c06b22ed3b4", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10153.json b/doc/sphinxext/prs/10153.json new file mode 100644 index 00000000000..a1fb91ac0d0 --- /dev/null +++ b/doc/sphinxext/prs/10153.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "433a4e19bb15b061c17a3fee3e9f8e8f6d8ea229", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 3, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10154.json b/doc/sphinxext/prs/10154.json new file mode 100644 index 00000000000..f8f2d99aa49 --- /dev/null +++ b/doc/sphinxext/prs/10154.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f37ef8e4ae07cd1c10118cb6e64e904333920924", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "figures/Figure_6.png": { + "a": 0, + "d": 0 + }, + "figures/circle.png": { + "a": 0, + "d": 0 + }, + "figures/ecog.png": { + "a": 0, + "d": 0 + }, + "figures/electrode.png": { + "a": 0, + "d": 0 + }, + "paper.bib": { + "a": 58, + "d": 0 + }, + "paper.md": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10155.json b/doc/sphinxext/prs/10155.json new file mode 100644 index 00000000000..00079f3c56c --- /dev/null +++ b/doc/sphinxext/prs/10155.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "dfe7f7e950109f9ee7346fe8bd77d7de18c8c726", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/install/index.rst": { + "a": 2, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 5, + "d": 4 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 41, + "d": 25 + }, + "tutorials/clinical/20_seeg.py": { + "a": 3, + "d": 0 + }, + "tutorials/clinical/30_ecog.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10157.json b/doc/sphinxext/prs/10157.json new file mode 100644 index 00000000000..a127959b46b --- /dev/null +++ b/doc/sphinxext/prs/10157.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d7932956b5ae2516f6880cfdb2642f619108e3f2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10159.json b/doc/sphinxext/prs/10159.json new file mode 100644 index 00000000000..4a5b6e3effc --- /dev/null +++ b/doc/sphinxext/prs/10159.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "523576f6bd805dfbf3e882933d779c5a2fd6d740", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10166.json b/doc/sphinxext/prs/10166.json new file mode 100644 index 00000000000..e2e103cb0f0 --- /dev/null +++ b/doc/sphinxext/prs/10166.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "79a431910d88d8174f754e4868b29a1aa42a7351", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/report/js_and_css/bootstrap-icons/bootstrap-icons.mne.min.css": { + "a": 1, + "d": 0 + }, + "mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py": { + "a": 60, + "d": 0 + }, + "mne/report/js_and_css/bootstrap-table/bootstrap-table-copy-rows.min.js": { + "a": 10, + "d": 0 + }, + "mne/report/js_and_css/bootstrap-table/bootstrap-table-export.min.js": { + "a": 10, + "d": 0 + }, + "mne/report/js_and_css/bootstrap-table/bootstrap-table.min.css": { + "a": 10, + "d": 0 + }, + "mne/report/js_and_css/bootstrap-table/bootstrap-table.min.js": { + "a": 10, + "d": 0 + }, + "mne/report/js_and_css/bootstrap-table/tableExport.min.js": { + "a": 13, + "d": 0 + }, + "mne/report/js_and_css/report.js": { + "a": 31, + "d": 13 + }, + "mne/report/report.py": { + "a": 101, + "d": 8 + }, + "mne/report/templates/epochs.html": { + "a": 1, + "d": 0 + }, + "mne/report/tests/test_report.py": { + "a": 40, + "d": 10 + }, + "setup.cfg": { + "a": 2, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 18, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10168.json b/doc/sphinxext/prs/10168.json new file mode 100644 index 00000000000..7d40881d2e7 --- /dev/null +++ b/doc/sphinxext/prs/10168.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fd0387089d88575773e20eb84de08929598ba45d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10170.json b/doc/sphinxext/prs/10170.json new file mode 100644 index 00000000000..9b73d165ab9 --- /dev/null +++ b/doc/sphinxext/prs/10170.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3ab96f07e8396ec0fe11b532f9e6d42f4637b3a3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10171.json b/doc/sphinxext/prs/10171.json new file mode 100644 index 00000000000..8ee4af53709 --- /dev/null +++ b/doc/sphinxext/prs/10171.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "faf8b2d96e601db405bc4cc1eb1d0bd6e71541dc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10172.json b/doc/sphinxext/prs/10172.json new file mode 100644 index 00000000000..3a0bc197013 --- /dev/null +++ b/doc/sphinxext/prs/10172.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "215fe39f6a8a565f932c74d57d9f677ec587d672", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/inverse.rst": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10174.json b/doc/sphinxext/prs/10174.json new file mode 100644 index 00000000000..d1ea315a71a --- /dev/null +++ b/doc/sphinxext/prs/10174.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cda07d5d8f3e551799c0677b5dcce89d9f0dbdc7", + "authors": [ + { + "n": "John Veillette", + "e": "johnv@uchicago.edu" + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10175.json b/doc/sphinxext/prs/10175.json new file mode 100644 index 00000000000..9087411b343 --- /dev/null +++ b/doc/sphinxext/prs/10175.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e6282d27dbe2f54e6c9135a5c883170f7beeb3dd", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 1 + }, + "mne/report/report.py": { + "a": 2, + "d": 1 + }, + "mne/surface.py": { + "a": 14, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10176.json b/doc/sphinxext/prs/10176.json new file mode 100644 index 00000000000..1cbc720bd79 --- /dev/null +++ b/doc/sphinxext/prs/10176.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a8889009536218960e5546d941453c0f1ae86c61", + "authors": [ + { + "n": "Jeff Stout", + "e": null + } + ], + "changes": { + "mne/report/report.py": { + "a": 15, + "d": 9 + }, + "mne/report/tests/test_report.py": { + "a": 15, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10177.json b/doc/sphinxext/prs/10177.json new file mode 100644 index 00000000000..87d36580845 --- /dev/null +++ b/doc/sphinxext/prs/10177.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "337bee137414d3effb246258e3fd3ff7297392a5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10178.json b/doc/sphinxext/prs/10178.json new file mode 100644 index 00000000000..f615398cd3b --- /dev/null +++ b/doc/sphinxext/prs/10178.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "161497b28df1b8db77ab4e6061ea9b32a74d17ca", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10179.json b/doc/sphinxext/prs/10179.json new file mode 100644 index 00000000000..ae458c8926e --- /dev/null +++ b/doc/sphinxext/prs/10179.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "38d63e33bc1b0e51394ce800123b42853e3a0c20", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1018.json b/doc/sphinxext/prs/1018.json new file mode 100644 index 00000000000..a1327f5a6a0 --- /dev/null +++ b/doc/sphinxext/prs/1018.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "010b0800f53dbd0506c93276d47a2b80143989f4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/channels.py": { + "a": 40, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10181.json b/doc/sphinxext/prs/10181.json new file mode 100644 index 00000000000..bf4c1f8a902 --- /dev/null +++ b/doc/sphinxext/prs/10181.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe6a30c67b6d08a10331e6eff4372e161d7e4109", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10182.json b/doc/sphinxext/prs/10182.json new file mode 100644 index 00000000000..35555cdd4d2 --- /dev/null +++ b/doc/sphinxext/prs/10182.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bafc6ef4857806bf28fa7952865ba9a71f514274", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 17, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10183.json b/doc/sphinxext/prs/10183.json new file mode 100644 index 00000000000..ec964a9423d --- /dev/null +++ b/doc/sphinxext/prs/10183.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ebaaed43a0ff8010de1deb0861b3642d1308fff8", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 10, + "d": 5 + }, + "mne/report/tests/test_report.py": { + "a": 15, + "d": 16 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10184.json b/doc/sphinxext/prs/10184.json new file mode 100644 index 00000000000..baab5931012 --- /dev/null +++ b/doc/sphinxext/prs/10184.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "93485e025d576470c21cc936becb3992c01a9c5e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 3, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 11, + "d": 0 + }, + "doc/time_frequency.rst": { + "a": 3, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 3 + }, + "mne/cov.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 96, + "d": 40 + }, + "mne/evoked.py": { + "a": 93, + "d": 1 + }, + "mne/filter.py": { + "a": 2, + "d": 1 + }, + "mne/html_templates/repr/spectrum.html.jinja": { + "a": 50, + "d": 0 + }, + "mne/io/base.py": { + "a": 41, + "d": 38 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 3 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 34, + "d": 69 + }, + "mne/time_frequency/spectrum.py": { + "a": 1146, + "d": 0 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 30, + "d": 27 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 116, + "d": 243 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 184, + "d": 0 + }, + "mne/utils/check.py": { + "a": 2, + "d": 1 + }, + "mne/utils/dataframe.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 151, + "d": 41 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 8, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 18, + "d": 25 + }, + "mne/viz/ica.py": { + "a": 7, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 36, + "d": 87 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 57, + "d": 57 + }, + "mne/viz/utils.py": { + "a": 12, + "d": 13 + }, + "setup.cfg": { + "a": 4, + "d": 1 + }, + "tutorials/clinical/60_sleep.py": { + "a": 6, + "d": 7 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 36, + "d": 19 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 42, + "d": 14 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 172, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10185.json b/doc/sphinxext/prs/10185.json new file mode 100644 index 00000000000..62e8ceccb9d --- /dev/null +++ b/doc/sphinxext/prs/10185.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dfe32b078b4c6deb16101db36ca452074d2d11e9", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 25, + "d": 1 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10186.json b/doc/sphinxext/prs/10186.json new file mode 100644 index 00000000000..195f6c36498 --- /dev/null +++ b/doc/sphinxext/prs/10186.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2d5d1686e9b06a1e7bc29794bd2084b0336f0ded", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10188.json b/doc/sphinxext/prs/10188.json new file mode 100644 index 00000000000..750d1e3a989 --- /dev/null +++ b/doc/sphinxext/prs/10188.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1dc380aabb8d4d8869aea0192c3beabeb9fc141c", + "authors": [ + { + "n": "Eduard Ort", + "e": "eduardxort@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/annotations.py": { + "a": 9, + "d": 1 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10189.json b/doc/sphinxext/prs/10189.json new file mode 100644 index 00000000000..8db0b8ea453 --- /dev/null +++ b/doc/sphinxext/prs/10189.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "15f478791aa37e260a4d77b95a1a67166da95ccb", + "authors": [ + { + "n": "fzamberlan", + "e": "44038765+fzamberlan@users.noreply.github.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10191.json b/doc/sphinxext/prs/10191.json new file mode 100644 index 00000000000..5e1bcba0c29 --- /dev/null +++ b/doc/sphinxext/prs/10191.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3b84f95f792b71bbd7a1f223910d676590d5bf7f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/io/read_neo_format.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10192.json b/doc/sphinxext/prs/10192.json new file mode 100644 index 00000000000..247d54bf951 --- /dev/null +++ b/doc/sphinxext/prs/10192.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b7e8c7791b000258b2ba158ddd89eecbbb35af30", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 0, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10196.json b/doc/sphinxext/prs/10196.json new file mode 100644 index 00000000000..c5d61167284 --- /dev/null +++ b/doc/sphinxext/prs/10196.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "67f3fb8f838893f7210b805aa51b7ffeeac292b5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10199.json b/doc/sphinxext/prs/10199.json new file mode 100644 index 00000000000..ef700631c7d --- /dev/null +++ b/doc/sphinxext/prs/10199.json @@ -0,0 +1,423 @@ +{ + "merge_commit_sha": "6e312e1254105b9f2546c3b91108b1f96b7f05c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 0, + "d": 1 + }, + ".lgtm.yml": { + "a": 0, + "d": 5 + }, + "MANIFEST.in": { + "a": 2, + "d": 1 + }, + "README.rst": { + "a": 8, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 0, + "d": 3 + }, + "doc/install/index.rst": { + "a": 6, + "d": 13 + }, + "doc/links.inc": { + "a": 5, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 1, + "d": 2 + }, + "environment.yml": { + "a": 6, + "d": 2 + }, + "mne/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 5, + "d": 3 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_external.py": { + "a": 4, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 5, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/conftest.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/testing/_testing.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 4 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 0 + }, + "mne/externals/FieldTrip.py": { + "a": 0, + "d": 508 + }, + "mne/externals/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/externals/decorator.py": { + "a": 0, + "d": 454 + }, + "mne/externals/doccer.py": { + "a": 0, + "d": 149 + }, + "mne/externals/h5io/__init__.py": { + "a": 0, + "d": 6 + }, + "mne/externals/h5io/_h5io.py": { + "a": 0, + "d": 746 + }, + "mne/externals/pymatreader/__init__.py": { + "a": 0, + "d": 32 + }, + "mne/externals/pymatreader/pymatreader.py": { + "a": 0, + "d": 103 + }, + "mne/externals/pymatreader/utils.py": { + "a": 0, + "d": 263 + }, + "mne/externals/tqdm/__init__.py": { + "a": 0, + "d": 12 + }, + "mne/externals/tqdm/_tqdm/__init__.py": { + "a": 0, + "d": 40 + }, + "mne/externals/tqdm/_tqdm/__main__.py": { + "a": 0, + "d": 2 + }, + "mne/externals/tqdm/_tqdm/_main.py": { + "a": 0, + "d": 7 + }, + "mne/externals/tqdm/_tqdm/_monitor.py": { + "a": 0, + "d": 99 + }, + "mne/externals/tqdm/_tqdm/_tqdm.py": { + "a": 0, + "d": 7 + }, + "mne/externals/tqdm/_tqdm/_tqdm_gui.py": { + "a": 0, + "d": 7 + }, + "mne/externals/tqdm/_tqdm/_tqdm_notebook.py": { + "a": 0, + "d": 7 + }, + "mne/externals/tqdm/_tqdm/_tqdm_pandas.py": { + "a": 0, + "d": 46 + }, + "mne/externals/tqdm/_tqdm/_utils.py": { + "a": 0, + "d": 6 + }, + "mne/externals/tqdm/_tqdm/_version.py": { + "a": 0, + "d": 59 + }, + "mne/externals/tqdm/_tqdm/auto.py": { + "a": 0, + "d": 6 + }, + "mne/externals/tqdm/_tqdm/autonotebook.py": { + "a": 0, + "d": 18 + }, + "mne/externals/tqdm/_tqdm/cli.py": { + "a": 0, + "d": 228 + }, + "mne/externals/tqdm/_tqdm/contrib/__init__.py": { + "a": 0, + "d": 10 + }, + "mne/externals/tqdm/_tqdm/gui.py": { + "a": 0, + "d": 321 + }, + "mne/externals/tqdm/_tqdm/notebook.py": { + "a": 0, + "d": 261 + }, + "mne/externals/tqdm/_tqdm/std.py": { + "a": 0, + "d": 1464 + }, + "mne/externals/tqdm/_tqdm/tqdm.1": { + "a": 0, + "d": 255 + }, + "mne/externals/tqdm/_tqdm/utils.py": { + "a": 0, + "d": 345 + }, + "mne/fixes.py": { + "a": 3, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 6, + "d": 7 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 4, + "d": 4 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 9 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 10, + "d": 32 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 7, + "d": 6 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 4 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 2, + "d": 13 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 6, + "d": 3 + }, + "mne/parallel.py": { + "a": 2, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 2 + }, + "mne/report/report.py": { + "a": 17, + "d": 3 + }, + "mne/report/tests/test_report.py": { + "a": 8, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 9, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 4 + }, + "mne/tests/test_import_nesting.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 7 + }, + "mne/time_frequency/csd.py": { + "a": 4, + "d": 2 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 5, + "d": 2 + }, + "mne/utils/check.py": { + "a": 20, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 74, + "d": 4 + }, + "mne/utils/progressbar.py": { + "a": 7, + "d": 1 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 5, + "d": 4 + }, + "mne/viz/conftest.py": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 6, + "d": 2 + }, + "requirements_base.txt": { + "a": 9, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "requirements_hdf5.txt": { + "a": 3, + "d": 0 + }, + "setup.cfg": { + "a": 4, + "d": 4 + }, + "setup.py": { + "a": 8, + "d": 12 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 7, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1020.json b/doc/sphinxext/prs/1020.json new file mode 100644 index 00000000000..c6ae1eff66b --- /dev/null +++ b/doc/sphinxext/prs/1020.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5b20fc2208e41f712f017f67ba6e1df43c563462", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/time_frequency/tests/test_ar.py": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 48, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10200.json b/doc/sphinxext/prs/10200.json new file mode 100644 index 00000000000..fd3e51fe160 --- /dev/null +++ b/doc/sphinxext/prs/10200.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f95c84760aae461179e8fab55034fa3dc0b5463d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 30, + "d": 2 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 3, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 16, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10202.json b/doc/sphinxext/prs/10202.json new file mode 100644 index 00000000000..e1c1096209e --- /dev/null +++ b/doc/sphinxext/prs/10202.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e923880fdfa021b9d1b487206db0826e4b88b800", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 23, + "d": 8 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10203.json b/doc/sphinxext/prs/10203.json new file mode 100644 index 00000000000..b5db59ed8f5 --- /dev/null +++ b/doc/sphinxext/prs/10203.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f90b303ce66a8415e64edd4605b09ac0179c1ebf", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10205.json b/doc/sphinxext/prs/10205.json new file mode 100644 index 00000000000..7ec7b9c82f4 --- /dev/null +++ b/doc/sphinxext/prs/10205.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e48ec6cb0689f747f2ec53a162fe13496c03a200", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10206.json b/doc/sphinxext/prs/10206.json new file mode 100644 index 00000000000..36add585fb2 --- /dev/null +++ b/doc/sphinxext/prs/10206.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9b44cb9af43c0fde8b61c37abdd7dfa5d0d531f2", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 14, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1021.json b/doc/sphinxext/prs/1021.json new file mode 100644 index 00000000000..531d3d6ac36 --- /dev/null +++ b/doc/sphinxext/prs/1021.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4d81e9b6e300f671d3de19a06ee69300ae6cf1b1", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "examples/plot_evoked_topomap.py": { + "a": 39, + "d": 34 + }, + "mne/fiff/evoked.py": { + "a": 11, + "d": 5 + }, + "mne/tests/test_viz.py": { + "a": 10, + "d": 0 + }, + "mne/viz.py": { + "a": 32, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10210.json b/doc/sphinxext/prs/10210.json new file mode 100644 index 00000000000..cdedb1b2b3f --- /dev/null +++ b/doc/sphinxext/prs/10210.json @@ -0,0 +1,171 @@ +{ + "merge_commit_sha": "2b2b0cff2fa82d07919a6947563a0b0ddba95499", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/datasets/_fake/_fake.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/_fetch.py": { + "a": 4, + "d": 4 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/_infant/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/erp_core/erp_core.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/limo/limo.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/misc/_misc.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/opm/opm.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/sample/sample.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 4, + "d": 9 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 1, + "d": 5 + }, + "mne/datasets/somato/somato.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 2, + "d": 4 + }, + "mne/datasets/testing/_testing.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 5 + }, + "mne/datasets/utils.py": { + "a": 4, + "d": 8 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10211.json b/doc/sphinxext/prs/10211.json new file mode 100644 index 00000000000..74e776cd3a7 --- /dev/null +++ b/doc/sphinxext/prs/10211.json @@ -0,0 +1,199 @@ +{ + "merge_commit_sha": "e6336f0ee5dcb13023bee516787150dfb25a4bcf", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 2, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 13, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/data/html_templates.py": { + "a": 0, + "d": 180 + }, + "mne/epochs.py": { + "a": 11, + "d": 9 + }, + "mne/externals/tempita/__init__.py": { + "a": 0, + "d": 1309 + }, + "mne/externals/tempita/_looper.py": { + "a": 0, + "d": 163 + }, + "mne/externals/tempita/compat3.py": { + "a": 0, + "d": 45 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 2 + }, + "mne/html_templates/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/html_templates/_templates.py": { + "a": 25, + "d": 0 + }, + "mne/html_templates/report/code.html.jinja": { + "a": 24, + "d": 0 + }, + "mne/html_templates/report/cov.html.jinja": { + "a": 23, + "d": 0 + }, + "mne/html_templates/report/epochs.html.jinja": { + "a": 27, + "d": 0 + }, + "mne/html_templates/report/evoked.html.jinja": { + "a": 26, + "d": 0 + }, + "mne/html_templates/report/footer.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/report/forward.html.jinja": { + "a": 23, + "d": 0 + }, + "mne/html_templates/report/header.html.jinja": { + "a": 10, + "d": 10 + }, + "mne/html_templates/report/html.html.jinja": { + "a": 23, + "d": 0 + }, + "mne/html_templates/report/ica.html.jinja": { + "a": 29, + "d": 0 + }, + "mne/html_templates/report/image.html.jinja": { + "a": 36, + "d": 0 + }, + "mne/html_templates/report/inverse.html.jinja": { + "a": 23, + "d": 0 + }, + "mne/html_templates/report/raw.html.jinja": { + "a": 25, + "d": 0 + }, + "mne/html_templates/report/slider.html.jinja": { + "a": 22, + "d": 22 + }, + "mne/html_templates/report/toc.html.jinja": { + "a": 4, + "d": 4 + }, + "mne/html_templates/repr/epochs.html.jinja": { + "a": 22, + "d": 0 + }, + "mne/html_templates/repr/forward.html.jinja": { + "a": 18, + "d": 0 + }, + "mne/html_templates/repr/ica.html.jinja": { + "a": 32, + "d": 0 + }, + "mne/html_templates/repr/info.html.jinja": { + "a": 74, + "d": 0 + }, + "mne/html_templates/repr/inverse_operator.html.jinja": { + "a": 14, + "d": 0 + }, + "mne/html_templates/repr/raw.html.jinja": { + "a": 12, + "d": 0 + }, + "mne/io/base.py": { + "a": 5, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 57, + "d": 13 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 2 + }, + "mne/report/report.py": { + "a": 74, + "d": 106 + }, + "mne/report/templates/code.html": { + "a": 0, + "d": 24 + }, + "mne/report/templates/cov.html": { + "a": 0, + "d": 23 + }, + "mne/report/templates/epochs.html": { + "a": 0, + "d": 27 + }, + "mne/report/templates/evoked.html": { + "a": 0, + "d": 26 + }, + "mne/report/templates/forward.html": { + "a": 0, + "d": 23 + }, + "mne/report/templates/html.html": { + "a": 0, + "d": 23 + }, + "mne/report/templates/ica.html": { + "a": 0, + "d": 29 + }, + "mne/report/templates/image.html": { + "a": 0, + "d": 36 + }, + "mne/report/templates/inverse.html": { + "a": 0, + "d": 23 + }, + "mne/report/templates/raw.html": { + "a": 0, + "d": 25 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10212.json b/doc/sphinxext/prs/10212.json new file mode 100644 index 00000000000..31cee773400 --- /dev/null +++ b/doc/sphinxext/prs/10212.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "43e7ab99ccc7d7e006af81804147b57abf882b7e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 106, + "d": 38 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 17, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10214.json b/doc/sphinxext/prs/10214.json new file mode 100644 index 00000000000..f3f7d36e890 --- /dev/null +++ b/doc/sphinxext/prs/10214.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7ecd46fe444a9c7e9d2b0c7f63ef05af09d41082", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10215.json b/doc/sphinxext/prs/10215.json new file mode 100644 index 00000000000..5c0ba138098 --- /dev/null +++ b/doc/sphinxext/prs/10215.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c9fff0c24c80f17282a72251470b207f878a36f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/overview/governance.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10217.json b/doc/sphinxext/prs/10217.json new file mode 100644 index 00000000000..dfb306a1052 --- /dev/null +++ b/doc/sphinxext/prs/10217.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "ceb381a4cdb05e0c0b237ec4180106e970dc08a4", + "authors": [ + { + "n": "twang5", + "e": "twang5@swarthmore.edu" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + ".gitignore": { + "a": 2, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 14, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 7, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1022.json b/doc/sphinxext/prs/1022.json new file mode 100644 index 00000000000..f37de36724c --- /dev/null +++ b/doc/sphinxext/prs/1022.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "10211c039a8c37f98f4172ae1af196b0041a3c6d", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/fiff/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10220.json b/doc/sphinxext/prs/10220.json new file mode 100644 index 00000000000..56889bad1a0 --- /dev/null +++ b/doc/sphinxext/prs/10220.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f0611b5e93af11b59f48b540f4ec3a5d56c0bff5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 14, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 23, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10222.json b/doc/sphinxext/prs/10222.json new file mode 100644 index 00000000000..ae5baf88754 --- /dev/null +++ b/doc/sphinxext/prs/10222.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e05151abb5d752cf6bd91fec0dc395ca7aa286fc", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_coreg.py": { + "a": 14, + "d": 3 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 5, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10223.json b/doc/sphinxext/prs/10223.json new file mode 100644 index 00000000000..2cbec7bd720 --- /dev/null +++ b/doc/sphinxext/prs/10223.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "89e163f60be122be2150ee93a9d3fcea5641b8b7", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/fixes.py": { + "a": 1, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10226.json b/doc/sphinxext/prs/10226.json new file mode 100644 index 00000000000..90d562dc1b3 --- /dev/null +++ b/doc/sphinxext/prs/10226.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "037ebe20a4aa300eb3cb7b9d7828bdae9c1fa954", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/gui/_ieeg_locate_gui.py": { + "a": 7, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 12 + }, + "mne/viz/utils.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10229.json b/doc/sphinxext/prs/10229.json new file mode 100644 index 00000000000..e963bf1118d --- /dev/null +++ b/doc/sphinxext/prs/10229.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "47051833f21bb372d60afc3adbf4305648ac7f69", + "authors": [ + { + "n": "Steve Matindi", + "e": null + } + ], + "changes": { + "LICENSE.txt": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10230.json b/doc/sphinxext/prs/10230.json new file mode 100644 index 00000000000..814d0e69cd7 --- /dev/null +++ b/doc/sphinxext/prs/10230.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8cfadafadb3ddd7c91a1b03934f0eb1d2fc54a3c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/coreg.py": { + "a": 29, + "d": 10 + }, + "mne/gui/_coreg.py": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10233.json b/doc/sphinxext/prs/10233.json new file mode 100644 index 00000000000..23cc8de48ac --- /dev/null +++ b/doc/sphinxext/prs/10233.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "e305764e54300bb174d37c2c69cbb03e089a4e4b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/events.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 6, + "d": 4 + }, + "mne/event.py": { + "a": 87, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 48, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 4 + }, + "mne/utils/mixin.py": { + "a": 3, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10234.json b/doc/sphinxext/prs/10234.json new file mode 100644 index 00000000000..6f11ead60f7 --- /dev/null +++ b/doc/sphinxext/prs/10234.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "edcdaf38efdb436c13860ebdf4b190d4719f8051", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10235.json b/doc/sphinxext/prs/10235.json new file mode 100644 index 00000000000..e49ed3ffc7d --- /dev/null +++ b/doc/sphinxext/prs/10235.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a1cd8003bf7a359ebd76e4044afb7e8c0a1bfba9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/hitachi/hitachi.py": { + "a": 2, + "d": 0 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10239.json b/doc/sphinxext/prs/10239.json new file mode 100644 index 00000000000..05acdbf48e8 --- /dev/null +++ b/doc/sphinxext/prs/10239.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "c8751aad782b6f39cbe9516799f2d3dab9df0204", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/mri.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 8, + "d": 7 + }, + "mne/coreg.py": { + "a": 1, + "d": 11 + }, + "mne/gui/_coreg.py": { + "a": 1, + "d": 11 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 12, + "d": 8 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10240.json b/doc/sphinxext/prs/10240.json new file mode 100644 index 00000000000..39005aaa2eb --- /dev/null +++ b/doc/sphinxext/prs/10240.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "24f63b1d8ccc665308c0e2637b9782c504250f1e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 7, + "d": 5 + }, + "mne/viz/backends/_notebook.py": { + "a": 7, + "d": 5 + }, + "mne/viz/backends/_qt.py": { + "a": 28, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10241.json b/doc/sphinxext/prs/10241.json new file mode 100644 index 00000000000..0316b3ac5cc --- /dev/null +++ b/doc/sphinxext/prs/10241.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "92908de66e7a6da60300929b3d0859c40e25eaa6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 18, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10242.json b/doc/sphinxext/prs/10242.json new file mode 100644 index 00000000000..7aa898f7bc8 --- /dev/null +++ b/doc/sphinxext/prs/10242.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "89650ca14913a6f32badec6b07c12d160d125934", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 3, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 124, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10243.json b/doc/sphinxext/prs/10243.json new file mode 100644 index 00000000000..70ae6c5cdeb --- /dev/null +++ b/doc/sphinxext/prs/10243.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "49bdb5e0e0eb85dbfa37d3dfda03e7591dc7ed4d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 51, + "d": 12 + }, + "mne/gui/_coreg.py": { + "a": 14, + "d": 13 + }, + "mne/tests/test_coreg.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10244.json b/doc/sphinxext/prs/10244.json new file mode 100644 index 00000000000..cfc2bb27c8e --- /dev/null +++ b/doc/sphinxext/prs/10244.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "53878ce44237bb7abc5cc00721e19fa22e6f25fd", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10245.json b/doc/sphinxext/prs/10245.json new file mode 100644 index 00000000000..293f2619074 --- /dev/null +++ b/doc/sphinxext/prs/10245.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9396ca77ba61f78714bb14449d4f78ecbec1caf0", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 18, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10246.json b/doc/sphinxext/prs/10246.json new file mode 100644 index 00000000000..19c374781ef --- /dev/null +++ b/doc/sphinxext/prs/10246.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "585e444a709efb1aa5236182c3ba8052d1658cbb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/html_templates/report/epochs.html.jinja": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10247.json b/doc/sphinxext/prs/10247.json new file mode 100644 index 00000000000..5d1dcd671da --- /dev/null +++ b/doc/sphinxext/prs/10247.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "89f3d40d41e56130d91a555180a6a357ba00ed29", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 14, + "d": 7 + }, + "tutorials/intro/70_report.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10249.json b/doc/sphinxext/prs/10249.json new file mode 100644 index 00000000000..cb09b59ba66 --- /dev/null +++ b/doc/sphinxext/prs/10249.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4bedd849b868f3fe35a98ebf1c9dbd1acfa758f5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10250.json b/doc/sphinxext/prs/10250.json new file mode 100644 index 00000000000..2b33e7d4619 --- /dev/null +++ b/doc/sphinxext/prs/10250.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b3bd234a412405e4fe5222382aa817e830928e10", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 5, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 6, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 5, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10251.json b/doc/sphinxext/prs/10251.json new file mode 100644 index 00000000000..f7a630d58d8 --- /dev/null +++ b/doc/sphinxext/prs/10251.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e1aec0ec39bee4ff437ab4f0fb80d4cc673a7751", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 45, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10252.json b/doc/sphinxext/prs/10252.json new file mode 100644 index 00000000000..ae5f065e03d --- /dev/null +++ b/doc/sphinxext/prs/10252.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0accf8ceb7dbdcfe64f74f43260cb66583159cab", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/__init__.py": { + "a": 6, + "d": 7 + }, + "mne/gui/_coreg.py": { + "a": 39, + "d": 18 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10253.json b/doc/sphinxext/prs/10253.json new file mode 100644 index 00000000000..5a98840622a --- /dev/null +++ b/doc/sphinxext/prs/10253.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b5688906f971ad0b4026d9812e021dbfd015bf70", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 142, + "d": 16 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10255.json b/doc/sphinxext/prs/10255.json new file mode 100644 index 00000000000..6a563fd2b51 --- /dev/null +++ b/doc/sphinxext/prs/10255.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "abf530f78ea39e5dfeaf3d33b0f994c706eb9e9e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 62, + "d": 57 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10257.json b/doc/sphinxext/prs/10257.json new file mode 100644 index 00000000000..64226a40208 --- /dev/null +++ b/doc/sphinxext/prs/10257.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f2d152916e8cd32c411c9b429317b455a7e699f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10259.json b/doc/sphinxext/prs/10259.json new file mode 100644 index 00000000000..b924af6716d --- /dev/null +++ b/doc/sphinxext/prs/10259.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8992bd050194b31ec03b8b6ef70021b307d2a541", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 36, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 24, + "d": 15 + }, + "mne/viz/backends/_notebook.py": { + "a": 55, + "d": 22 + }, + "mne/viz/backends/_qt.py": { + "a": 32, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10260.json b/doc/sphinxext/prs/10260.json new file mode 100644 index 00000000000..65f48f5c3c1 --- /dev/null +++ b/doc/sphinxext/prs/10260.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2142d19a47a26cf5b94926c321daec50a94eff86", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 56, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10261.json b/doc/sphinxext/prs/10261.json new file mode 100644 index 00000000000..7b0e2f64d21 --- /dev/null +++ b/doc/sphinxext/prs/10261.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "addab681a95d8a1054cfd0e0de1842f3ee4b467f", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/utils/config.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10262.json b/doc/sphinxext/prs/10262.json new file mode 100644 index 00000000000..f64c9f70ab3 --- /dev/null +++ b/doc/sphinxext/prs/10262.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cfa92b5119c9acb2293c654585f3b730dce1f19c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 39, + "d": 18 + }, + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10265.json b/doc/sphinxext/prs/10265.json new file mode 100644 index 00000000000..0fba315fdf4 --- /dev/null +++ b/doc/sphinxext/prs/10265.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "79cbc029fb45bfb1bd441f7544c53d76521be7d9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/transforms.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10266.json b/doc/sphinxext/prs/10266.json new file mode 100644 index 00000000000..49aff51f8dd --- /dev/null +++ b/doc/sphinxext/prs/10266.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4595f8c7c849181ee225b994b2eacdfd143feb82", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 23, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10267.json b/doc/sphinxext/prs/10267.json new file mode 100644 index 00000000000..2be0d6e9f29 --- /dev/null +++ b/doc/sphinxext/prs/10267.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "69e2ce6016f49c915ac37f4adf57db347d4d14f2", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "examples/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/README.txt": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10268.json b/doc/sphinxext/prs/10268.json new file mode 100644 index 00000000000..7921abc185d --- /dev/null +++ b/doc/sphinxext/prs/10268.json @@ -0,0 +1,159 @@ +{ + "merge_commit_sha": "93a3be5d21028331a88c42ef9dfa10566bcd6d1f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "doc/logging.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/_ola.py": { + "a": 4, + "d": 5 + }, + "mne/annotations.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 0, + "d": 13 + }, + "mne/channels/channels.py": { + "a": 9, + "d": 11 + }, + "mne/cov.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/receptive_field.py": { + "a": 2, + "d": 4 + }, + "mne/decoding/search_light.py": { + "a": 4, + "d": 8 + }, + "mne/decoding/time_frequency.py": { + "a": 5, + "d": 7 + }, + "mne/decoding/transformer.py": { + "a": 10, + "d": 10 + }, + "mne/dipole.py": { + "a": 9, + "d": 10 + }, + "mne/epochs.py": { + "a": 17, + "d": 26 + }, + "mne/evoked.py": { + "a": 10, + "d": 13 + }, + "mne/filter.py": { + "a": 4, + "d": 4 + }, + "mne/io/base.py": { + "a": 13, + "d": 13 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 0, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 4, + "d": 4 + }, + "mne/label.py": { + "a": 12, + "d": 17 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 8, + "d": 8 + }, + "mne/preprocessing/ica.py": { + "a": 9, + "d": 10 + }, + "mne/report/report.py": { + "a": 10, + "d": 9 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 16, + "d": 18 + }, + "mne/source_space.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 61, + "d": 37 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 1 + }, + "mne/utils/tests/test_logging.py": { + "a": 9, + "d": 18 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1027.json b/doc/sphinxext/prs/1027.json new file mode 100644 index 00000000000..326eb38fda6 --- /dev/null +++ b/doc/sphinxext/prs/1027.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "969ac4ba6167ddf255aac6fd8042f1d5e142232a", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + ".travis.yml": { + "a": 21, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10270.json b/doc/sphinxext/prs/10270.json new file mode 100644 index 00000000000..76333387553 --- /dev/null +++ b/doc/sphinxext/prs/10270.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5b788d6cfb6d6c84c7d630ba41d9696f2c37e31b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 12, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 8, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 8, + "d": 0 + }, + "mne/viz/backends/_qt.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10271.json b/doc/sphinxext/prs/10271.json new file mode 100644 index 00000000000..33c28211bbd --- /dev/null +++ b/doc/sphinxext/prs/10271.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "139a6347e481477650022b9fc069bea84be344eb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10272.json b/doc/sphinxext/prs/10272.json new file mode 100644 index 00000000000..ad940935e4e --- /dev/null +++ b/doc/sphinxext/prs/10272.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0fcac00dd9095771f36642864bfb302e8c3e138b", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 68, + "d": 81 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10273.json b/doc/sphinxext/prs/10273.json new file mode 100644 index 00000000000..0268a11558e --- /dev/null +++ b/doc/sphinxext/prs/10273.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4e8ae75d51d8fb4b9a20744c65f4f0eaa4eb7a82", + "authors": [ + { + "n": "Eduard Ort", + "e": "eduardxort@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10275.json b/doc/sphinxext/prs/10275.json new file mode 100644 index 00000000000..e5a817df298 --- /dev/null +++ b/doc/sphinxext/prs/10275.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "30f182d94ebaa226c8f7777c939c8ab80738ca07", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10276.json b/doc/sphinxext/prs/10276.json new file mode 100644 index 00000000000..b4b868cc226 --- /dev/null +++ b/doc/sphinxext/prs/10276.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "623645a3d3f5e71ddaf448ccd6ca1be74748219a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 5 + }, + "mne/gui/__init__.py": { + "a": 14, + "d": 13 + }, + "mne/gui/_coreg.py": { + "a": 15, + "d": 32 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 27, + "d": 11 + }, + "mne/utils/config.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10277.json b/doc/sphinxext/prs/10277.json new file mode 100644 index 00000000000..c464b46c0c8 --- /dev/null +++ b/doc/sphinxext/prs/10277.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d53c5bb9d9c69c055b6bca680ee750ac235bbe58", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/_localized_abbr.py": { + "a": 60, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 33, + "d": 10 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 44, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10278.json b/doc/sphinxext/prs/10278.json new file mode 100644 index 00000000000..a0bb8406114 --- /dev/null +++ b/doc/sphinxext/prs/10278.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e11f1fcd09f1ec14e48bc70bddc27b9ce3beff45", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 14, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10279.json b/doc/sphinxext/prs/10279.json new file mode 100644 index 00000000000..c28356572eb --- /dev/null +++ b/doc/sphinxext/prs/10279.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e59df7c3128f1b5299f949a9bfe26adc98ff1440", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 19, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10280.json b/doc/sphinxext/prs/10280.json new file mode 100644 index 00000000000..2e022da6e85 --- /dev/null +++ b/doc/sphinxext/prs/10280.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a315288bfc483aab7e79f42e1d78c4b2bf3e5d15", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 0, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10281.json b/doc/sphinxext/prs/10281.json new file mode 100644 index 00000000000..67626b339cb --- /dev/null +++ b/doc/sphinxext/prs/10281.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6422d6c033c3a9d6221c2848dbb4b0cd6d8848f7", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 9, + "d": 7 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 29, + "d": 14 + }, + "mne/time_frequency/tfr.py": { + "a": 29, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10282.json b/doc/sphinxext/prs/10282.json new file mode 100644 index 00000000000..b7a8f562086 --- /dev/null +++ b/doc/sphinxext/prs/10282.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2ade8dab149f9b45d2321208a16131f8c89f0d10", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/links.inc": { + "a": 3, + "d": 0 + }, + "doc/overview/roadmap.rst": { + "a": 54, + "d": 63 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10283.json b/doc/sphinxext/prs/10283.json new file mode 100644 index 00000000000..154f8d2742f --- /dev/null +++ b/doc/sphinxext/prs/10283.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d694c74c3f90e3128a321bbef2fbc5de77fae1e8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 21, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10284.json b/doc/sphinxext/prs/10284.json new file mode 100644 index 00000000000..ccddf99e14f --- /dev/null +++ b/doc/sphinxext/prs/10284.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "5920d6930da19ed393949acf3c9bb7d7859c3b9d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 11, + "d": 2 + }, + "mne/gui/_coreg.py": { + "a": 13, + "d": 4 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 5, + "d": 0 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 5, + "d": 2 + }, + "mne/viz/backends/_qt.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10289.json b/doc/sphinxext/prs/10289.json new file mode 100644 index 00000000000..b379624b185 --- /dev/null +++ b/doc/sphinxext/prs/10289.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "258b462f06a3a5ef4c6d8ded6ca03f519b498831", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/event.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10290.json b/doc/sphinxext/prs/10290.json new file mode 100644 index 00000000000..9af3b02a6e9 --- /dev/null +++ b/doc/sphinxext/prs/10290.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "193802765043f9b03b29cd9da08e2c690c22ef72", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_notebook.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10291.json b/doc/sphinxext/prs/10291.json new file mode 100644 index 00000000000..da7e8cde992 --- /dev/null +++ b/doc/sphinxext/prs/10291.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7a8964a6c8277ae1e84e0a606604f1c7eadd2569", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10292.json b/doc/sphinxext/prs/10292.json new file mode 100644 index 00000000000..2fd2ba3d259 --- /dev/null +++ b/doc/sphinxext/prs/10292.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8babd8ce675be5fa17a9288b0ed49b2cd88b8f75", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 1 + }, + "mne/io/proj.py": { + "a": 46, + "d": 14 + }, + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/proj.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10293.json b/doc/sphinxext/prs/10293.json new file mode 100644 index 00000000000..061e7fce4e9 --- /dev/null +++ b/doc/sphinxext/prs/10293.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "00ea4fa75906149cfa20232d6a72a8abc73f1430", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 137, + "d": 141 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10294.json b/doc/sphinxext/prs/10294.json new file mode 100644 index 00000000000..0410ffd510f --- /dev/null +++ b/doc/sphinxext/prs/10294.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b28c9af5920db2c94967f4cc3d8789fc18b5a5af", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10296.json b/doc/sphinxext/prs/10296.json new file mode 100644 index 00000000000..d7bd5ec5385 --- /dev/null +++ b/doc/sphinxext/prs/10296.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fa643e8dcde1024d21d695e4c788f1a1a8a0e41f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_notebook.py": { + "a": 19, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10297.json b/doc/sphinxext/prs/10297.json new file mode 100644 index 00000000000..595f824f87f --- /dev/null +++ b/doc/sphinxext/prs/10297.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "3677baf21a6d8a82d64eb9683b980c9d16907ad0", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 14, + "d": 4 + }, + "mne/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 22, + "d": 24 + }, + "mne/viz/_mpl_figure.py": { + "a": 12, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 12, + "d": 7 + }, + "mne/viz/tests/test_epochs.py": { + "a": 35, + "d": 38 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10298.json b/doc/sphinxext/prs/10298.json new file mode 100644 index 00000000000..97999da861f --- /dev/null +++ b/doc/sphinxext/prs/10298.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c3e812ba3840a336f4da6c26236e1b4487f6c02e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/mri.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 22, + "d": 0 + }, + "mne/tests/test_freesurfer.py": { + "a": 39, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 39, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10299.json b/doc/sphinxext/prs/10299.json new file mode 100644 index 00000000000..e241700a52a --- /dev/null +++ b/doc/sphinxext/prs/10299.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "00e0889d0a5b7cf953cc3fdc6065b0564f220a75", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 22, + "d": 5 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 13, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1030.json b/doc/sphinxext/prs/1030.json new file mode 100644 index 00000000000..37f09f5ac70 --- /dev/null +++ b/doc/sphinxext/prs/1030.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "37ef1ca556317ccef68311aaedd0612f8a95ff64", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/fiff/tag.py": { + "a": 8, + "d": 5 + }, + "mne/fiff/tests/test_raw.py": { + "a": 13, + "d": 0 + }, + "mne/fiff/write.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10300.json b/doc/sphinxext/prs/10300.json new file mode 100644 index 00000000000..48e14d77fd6 --- /dev/null +++ b/doc/sphinxext/prs/10300.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "47c53f9cf57bd81de4427f903cde839df5aba664", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 20, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10301.json b/doc/sphinxext/prs/10301.json new file mode 100644 index 00000000000..ce060d67e82 --- /dev/null +++ b/doc/sphinxext/prs/10301.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "40eaa0e1114d0bb64e8363d70527833f48189c25", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10302.json b/doc/sphinxext/prs/10302.json new file mode 100644 index 00000000000..6cbfa4d785d --- /dev/null +++ b/doc/sphinxext/prs/10302.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f1c118cefb45a4a9aafce1725120f67997860ed2", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 32, + "d": 12 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 22, + "d": 7 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10304.json b/doc/sphinxext/prs/10304.json new file mode 100644 index 00000000000..c5265847d4b --- /dev/null +++ b/doc/sphinxext/prs/10304.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "bdd8a3f963963f7ccf54bd3a286748bf3dcb88ff", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 24, + "d": 22 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10305.json b/doc/sphinxext/prs/10305.json new file mode 100644 index 00000000000..0349c5bdbd7 --- /dev/null +++ b/doc/sphinxext/prs/10305.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "639d7362b2c1a4599b583f247a431a3492c24190", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 85, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 11, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 23, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10306.json b/doc/sphinxext/prs/10306.json new file mode 100644 index 00000000000..afe8acb4a57 --- /dev/null +++ b/doc/sphinxext/prs/10306.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6dcfda8edf51ba4cbc61ccc6b06aca1b804731d4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 11, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 119, + "d": 86 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10307.json b/doc/sphinxext/prs/10307.json new file mode 100644 index 00000000000..9c8e95fae9a --- /dev/null +++ b/doc/sphinxext/prs/10307.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e6b53672ba66af749a075559aa2e94fe06802f3c", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 38, + "d": 16 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10309.json b/doc/sphinxext/prs/10309.json new file mode 100644 index 00000000000..ee65649cd6a --- /dev/null +++ b/doc/sphinxext/prs/10309.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "6107c3a6377eb07c2ba1c5a93cea54122fe74d64", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 2, + "d": 2 + }, + "examples/forward/left_cerebellum_volume_source.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/read_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/movement_detection.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 2, + "d": 3 + }, + "examples/visualization/3d_to_2d.py": { + "a": 5, + "d": 6 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 27, + "d": 21 + }, + "mne/viz/tests/test_3d.py": { + "a": 8, + "d": 5 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 2, + "d": 2 + }, + "tutorials/clinical/30_ecog.py": { + "a": 3, + "d": 3 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 2, + "d": 3 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 10, + "d": 4 + }, + "tutorials/forward/30_forward.py": { + "a": 2, + "d": 2 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 6, + "d": 9 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10313.json b/doc/sphinxext/prs/10313.json new file mode 100644 index 00000000000..1efcb85ef88 --- /dev/null +++ b/doc/sphinxext/prs/10313.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "58e17f29b4daf65d013fcf90f6746b565160a016", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/movement_compensation.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/movement_detection.py": { + "a": 5, + "d": 2 + }, + "examples/visualization/3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10316.json b/doc/sphinxext/prs/10316.json new file mode 100644 index 00000000000..3ea52973a49 --- /dev/null +++ b/doc/sphinxext/prs/10316.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ff7ca979b70e92d265678ab00b832ac175adc40d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10317.json b/doc/sphinxext/prs/10317.json new file mode 100644 index 00000000000..c4b026d5be3 --- /dev/null +++ b/doc/sphinxext/prs/10317.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "356dde91de47d8d1d113b65400ece5c777d833c2", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/viz/_mpl_figure.py": { + "a": 3, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10318.json b/doc/sphinxext/prs/10318.json new file mode 100644 index 00000000000..a57d71caf1b --- /dev/null +++ b/doc/sphinxext/prs/10318.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "92393e880316d5ba07f67cd62571fbe80d37cc36", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/icons/README.rst": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10319.json b/doc/sphinxext/prs/10319.json new file mode 100644 index 00000000000..fbb970cc3c5 --- /dev/null +++ b/doc/sphinxext/prs/10319.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a5f8d0bf0cb68f91696d442bdb81f3b2d152e8e2", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 0, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 29, + "d": 18 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 5 + }, + "mne/viz/backends/_abstract.py": { + "a": 0, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 12, + "d": 5 + }, + "tutorials/clinical/20_seeg.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 0, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10322.json b/doc/sphinxext/prs/10322.json new file mode 100644 index 00000000000..42fb585e357 --- /dev/null +++ b/doc/sphinxext/prs/10322.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aa0db284840730b91baf2a56e73bdf672c4b5e15", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "paper.md": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10323.json b/doc/sphinxext/prs/10323.json new file mode 100644 index 00000000000..f95748dab9b --- /dev/null +++ b/doc/sphinxext/prs/10323.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "3ccb6c0d6810f95b0ee22568e0bbefb24b3cd9d7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 3, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_figure.py": { + "a": 33, + "d": 29 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 7, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10324.json b/doc/sphinxext/prs/10324.json new file mode 100644 index 00000000000..f253424fe63 --- /dev/null +++ b/doc/sphinxext/prs/10324.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0c3c52fa96365cfc8a6fc8b4b9e97d038ffaf385", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 25, + "d": 10 + }, + "mne/channels/montage.py": { + "a": 14, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 20, + "d": 4 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 14, + "d": 14 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10325.json b/doc/sphinxext/prs/10325.json new file mode 100644 index 00000000000..965fc4bddb6 --- /dev/null +++ b/doc/sphinxext/prs/10325.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2075f84592a77a40213706ec54f4965aca60afd2", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 21, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10326.json b/doc/sphinxext/prs/10326.json new file mode 100644 index 00000000000..694b2810406 --- /dev/null +++ b/doc/sphinxext/prs/10326.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d20ead8b9c1bbb2fab9b0af93f1cca913e3dddfd", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "figures/Figure_2.png": { + "a": 0, + "d": 0 + }, + "figures/Figure_3.png": { + "a": 0, + "d": 0 + }, + "paper.md": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10328.json b/doc/sphinxext/prs/10328.json new file mode 100644 index 00000000000..95df6fe256d --- /dev/null +++ b/doc/sphinxext/prs/10328.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bdb7921aeeffbd8858113ca2101a4d46031393b4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "paper.md": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1033.json b/doc/sphinxext/prs/1033.json new file mode 100644 index 00000000000..72ea6555d6a --- /dev/null +++ b/doc/sphinxext/prs/1033.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "91415917083fa64cb9c386f7f26d8553d88b266e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/brainvision/brainvision.py": { + "a": 63, + "d": 27 + }, + "mne/fiff/brainvision/tests/test_brainvision.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10330.json b/doc/sphinxext/prs/10330.json new file mode 100644 index 00000000000..6fa06f10c12 --- /dev/null +++ b/doc/sphinxext/prs/10330.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "9315ed6e77157b9536452bafc738ac97065f9ca1", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 17, + "d": 8 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 5, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 18, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 0, + "d": 12 + }, + "mne/viz/ica.py": { + "a": 13, + "d": 19 + }, + "mne/viz/raw.py": { + "a": 0, + "d": 14 + }, + "mne/viz/tests/test_ica.py": { + "a": 18, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10331.json b/doc/sphinxext/prs/10331.json new file mode 100644 index 00000000000..dbf45d52f3c --- /dev/null +++ b/doc/sphinxext/prs/10331.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "20078a124a135e5fce7b55f00f1659be2df137fe", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10332.json b/doc/sphinxext/prs/10332.json new file mode 100644 index 00000000000..35af0dc55e4 --- /dev/null +++ b/doc/sphinxext/prs/10332.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1713cc294f8c545bd7940e6d0dc52ec719f3495b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "paper.md": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10334.json b/doc/sphinxext/prs/10334.json new file mode 100644 index 00000000000..fa4a327a594 --- /dev/null +++ b/doc/sphinxext/prs/10334.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "919489a2da08ddb3d70e4056de1ab7c7305c5e6f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/tests/test_commands.py": { + "a": 15, + "d": 14 + }, + "mne/conftest.py": { + "a": 21, + "d": 5 + }, + "mne/tests/test_coreg.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_source_space.py": { + "a": 9, + "d": 8 + }, + "mne/utils/_logging.py": { + "a": 9, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 3, + "d": 0 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 16, + "d": 10 + }, + "mne/utils/tests/test_testing.py": { + "a": 25, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10335.json b/doc/sphinxext/prs/10335.json new file mode 100644 index 00000000000..b4c63078234 --- /dev/null +++ b/doc/sphinxext/prs/10335.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "32084513d547657029ce4a77b8db79d2a2a24b70", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 17, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10336.json b/doc/sphinxext/prs/10336.json new file mode 100644 index 00000000000..779a0e73033 --- /dev/null +++ b/doc/sphinxext/prs/10336.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "397eda40c41b0e9a9587b443b53faa07a05cdcca", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/io/base.py": { + "a": 7, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10338.json b/doc/sphinxext/prs/10338.json new file mode 100644 index 00000000000..0e08bf8a72a --- /dev/null +++ b/doc/sphinxext/prs/10338.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ad54e0ffae82d86fc96f8cfef312637ae96a3da5", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "paper.md": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10339.json b/doc/sphinxext/prs/10339.json new file mode 100644 index 00000000000..dadf60aaed2 --- /dev/null +++ b/doc/sphinxext/prs/10339.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "62a9de90c88a6a9dfcc1094e5ca4643434991dda", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10340.json b/doc/sphinxext/prs/10340.json new file mode 100644 index 00000000000..353dc3d7874 --- /dev/null +++ b/doc/sphinxext/prs/10340.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7d6943aa455d572e09ddec60879a84863f4b8ef2", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "paper.md": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10341.json b/doc/sphinxext/prs/10341.json new file mode 100644 index 00000000000..cc9d22b89d4 --- /dev/null +++ b/doc/sphinxext/prs/10341.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "926385a7fc9af342801642b1731662b785c8fcd8", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "paper.md": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10342.json b/doc/sphinxext/prs/10342.json new file mode 100644 index 00000000000..c38faefff1c --- /dev/null +++ b/doc/sphinxext/prs/10342.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "65fb5a63f959203654d88a3614bd880da86ad735", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 23, + "d": 15 + }, + "mne/report/tests/test_report.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10345.json b/doc/sphinxext/prs/10345.json new file mode 100644 index 00000000000..90f74a64f28 --- /dev/null +++ b/doc/sphinxext/prs/10345.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c92316d96ec373fe4e3d923b5b0a6ca7119299ad", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10346.json b/doc/sphinxext/prs/10346.json new file mode 100644 index 00000000000..14f54d39f1b --- /dev/null +++ b/doc/sphinxext/prs/10346.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0c500107be1a85e42c9b63411829c5dbaedf62d4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 34, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 7, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 8, + "d": 2 + }, + "mne/viz/backends/_qt.py": { + "a": 53, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10347.json b/doc/sphinxext/prs/10347.json new file mode 100644 index 00000000000..403866383c9 --- /dev/null +++ b/doc/sphinxext/prs/10347.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "9e546ac41629ff18548328dfe79fe47d232b1229", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/ssp.rst": { + "a": 1, + "d": 1 + }, + "doc/_templates/autosummary/class.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 3, + "d": 20 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/cookbook.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/design_philosophy.rst": { + "a": 2, + "d": 1 + }, + "doc/overview/matlab.rst": { + "a": 1, + "d": 1 + }, + "doc/visualization.rst": { + "a": 1, + "d": 0 + }, + "ignore_words.txt": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 4, + "d": 0 + }, + "mne/dipole.py": { + "a": 3, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 5, + "d": 3 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 17, + "d": 18 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 30, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 28, + "d": 24 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 6, + "d": 6 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 15, + "d": 9 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10348.json b/doc/sphinxext/prs/10348.json new file mode 100644 index 00000000000..9037f19d5cb --- /dev/null +++ b/doc/sphinxext/prs/10348.json @@ -0,0 +1,463 @@ +{ + "merge_commit_sha": "5fd9ca6c6323fc5307f46c86b7c6bfae522525e7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/memory.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/datasets/brainstorm_data.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 6, + "d": 6 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 8, + "d": 7 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 3, + "d": 2 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 3, + "d": 2 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 3, + "d": 2 + }, + "examples/decoding/ems_filtering.py": { + "a": 3, + "d": 2 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 1, + "d": 1 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 3, + "d": 4 + }, + "examples/inverse/compute_mne_inverse_epochs_in_label.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 9, + "d": 11 + }, + "examples/inverse/label_from_stc.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/label_source_activations.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 7, + "d": 5 + }, + "examples/inverse/mne_cov_power.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 6, + "d": 5 + }, + "examples/inverse/rap_music.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/read_inverse.py": { + "a": 4, + "d": 6 + }, + "examples/inverse/read_stc.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/resolution_metrics.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/resolution_metrics_eegmeg.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/source_space_snr.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 5, + "d": 4 + }, + "examples/preprocessing/css.py": { + "a": 9, + "d": 8 + }, + "examples/preprocessing/define_target_events.py": { + "a": 3, + "d": 2 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/interpolate_bad_channels.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/virtual_evoked.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 3, + "d": 2 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 7, + "d": 7 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 3, + "d": 2 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 3, + "d": 2 + }, + "examples/stats/fdr_stats_evoked.py": { + "a": 3, + "d": 2 + }, + "examples/stats/linear_regression_raw.py": { + "a": 2, + "d": 1 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 3, + "d": 2 + }, + "examples/stats/sensor_regression.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/compute_csd.py": { + "a": 3, + "d": 2 + }, + "examples/time_frequency/compute_source_psd_epochs.py": { + "a": 6, + "d": 5 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 4, + "d": 3 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 4, + "d": 3 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 5, + "d": 5 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 3, + "d": 2 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/channel_epochs_image.py": { + "a": 3, + "d": 2 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 4, + "d": 3 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/evoked_topomap.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_whitening.py": { + "a": 3, + "d": 2 + }, + "examples/visualization/meg_sensors.py": { + "a": 7, + "d": 6 + }, + "examples/visualization/parcellation.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/ssp_projs_sensitivity_map.py": { + "a": 4, + "d": 3 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 3, + "d": 2 + }, + "examples/visualization/topo_customized.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/xhemi.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 9, + "d": 0 + }, + "mne/datasets/_fetch.py": { + "a": 12, + "d": 6 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 23, + "d": 5 + }, + "mne/datasets/utils.py": { + "a": 40, + "d": 9 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 2, + "d": 3 + }, + "mne/dipole.py": { + "a": 2, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 4 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 3, + "d": 2 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/tests/test_css.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 5 + }, + "mne/stats/tests/test_regression.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 2 + }, + "mne/utils/tests/test_testing.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 6 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/40_whitened.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 2, + "d": 3 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 3, + "d": 6 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 7, + "d": 6 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 5, + "d": 4 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 5, + "d": 4 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 8, + "d": 8 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 4, + "d": 3 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 2, + "d": 1 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 3, + "d": 2 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 3, + "d": 2 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 7, + "d": 8 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 5, + "d": 6 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 6, + "d": 5 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 3, + "d": 2 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10349.json b/doc/sphinxext/prs/10349.json new file mode 100644 index 00000000000..20e6afe7539 --- /dev/null +++ b/doc/sphinxext/prs/10349.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7c0e36d3f8764137219d1681193081ace91774cd", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 20, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 6, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10350.json b/doc/sphinxext/prs/10350.json new file mode 100644 index 00000000000..6cd948cce79 --- /dev/null +++ b/doc/sphinxext/prs/10350.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d2d6ec1eb5fb51289978e8ec34fb19d5c5c02b57", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/time_frequency/time_frequency_erds.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10356.json b/doc/sphinxext/prs/10356.json new file mode 100644 index 00000000000..6519f878289 --- /dev/null +++ b/doc/sphinxext/prs/10356.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ffb4ef3d623b9e7202e5078cd3301c8cfc40d5a5", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10357.json b/doc/sphinxext/prs/10357.json new file mode 100644 index 00000000000..fd399d73543 --- /dev/null +++ b/doc/sphinxext/prs/10357.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2520a6d66123c5acb7f6301d49943283adc4a7a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10358.json b/doc/sphinxext/prs/10358.json new file mode 100644 index 00000000000..a15f3cb7889 --- /dev/null +++ b/doc/sphinxext/prs/10358.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "ffbd0957764c838f71815cbce980b6d3ea2c73e8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 14, + "d": 6 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 5 + }, + "mne/io/base.py": { + "a": 21, + "d": 8 + }, + "mne/io/tests/test_raw.py": { + "a": 53, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 17, + "d": 7 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 147, + "d": 11 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10359.json b/doc/sphinxext/prs/10359.json new file mode 100644 index 00000000000..f7615d30ff4 --- /dev/null +++ b/doc/sphinxext/prs/10359.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "01b4bf2f3a4a7f259c4d3377c83aa6e07b62eb09", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 38, + "d": 10 + }, + "mne/viz/_scraper.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 2, + "d": 2 + }, + "tutorials/intro/10_overview.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/15_inplace.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1036.json b/doc/sphinxext/prs/1036.json new file mode 100644 index 00000000000..e75385df1f8 --- /dev/null +++ b/doc/sphinxext/prs/1036.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c079e0a117a27f87916f451f5a3776eaa6cd8408", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 28, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10361.json b/doc/sphinxext/prs/10361.json new file mode 100644 index 00000000000..56a5e7e4a3b --- /dev/null +++ b/doc/sphinxext/prs/10361.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "776b203fb323d7d935a4b76d9e20b13541491e59", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_annotations.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10362.json b/doc/sphinxext/prs/10362.json new file mode 100644 index 00000000000..1f7b63f556b --- /dev/null +++ b/doc/sphinxext/prs/10362.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c87494462b7935e592efc0dcdc9553de1d0ccc5e", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + } + ], + "changes": { + "tutorials/clinical/60_sleep.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10363.json b/doc/sphinxext/prs/10363.json new file mode 100644 index 00000000000..3de2bd6f5dd --- /dev/null +++ b/doc/sphinxext/prs/10363.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "732bb1f994e64e41a8e95dcc10dc98c22cac95c0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10364.json b/doc/sphinxext/prs/10364.json new file mode 100644 index 00000000000..cabd69e076e --- /dev/null +++ b/doc/sphinxext/prs/10364.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4396a3f7f073ba735d7e578f449496676fdc2bd7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 0, + "d": 5 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 2, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 8, + "d": 7 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 4, + "d": 4 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10365.json b/doc/sphinxext/prs/10365.json new file mode 100644 index 00000000000..dbb41976e9d --- /dev/null +++ b/doc/sphinxext/prs/10365.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "79ea57a4318d8d045f5966c26360b079f40a4865", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10366.json b/doc/sphinxext/prs/10366.json new file mode 100644 index 00000000000..f569acced04 --- /dev/null +++ b/doc/sphinxext/prs/10366.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e2150ede1cfbdb44ad64268623a3d55ed87a1702", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 21, + "d": 22 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 2, + "d": 4 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 15, + "d": 15 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10368.json b/doc/sphinxext/prs/10368.json new file mode 100644 index 00000000000..10e851a0693 --- /dev/null +++ b/doc/sphinxext/prs/10368.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "54884b563d312270d6442fbd709350a4d72afb8c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 5, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_qt.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10369.json b/doc/sphinxext/prs/10369.json new file mode 100644 index 00000000000..33ca7a7d50f --- /dev/null +++ b/doc/sphinxext/prs/10369.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4cf71bf9db388c4d2e6b1eefdd688e1675a13524", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/_static/branch_dropdown.png": { + "a": 0, + "d": 0 + }, + "doc/_static/forking_button.png": { + "a": 0, + "d": 0 + }, + "doc/_static/pull_button.png": { + "a": 0, + "d": 0 + }, + "logo/LICENSE": { + "a": 397, + "d": 0 + }, + "logo/README.md": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10370.json b/doc/sphinxext/prs/10370.json new file mode 100644 index 00000000000..b4da9788a24 --- /dev/null +++ b/doc/sphinxext/prs/10370.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4bbc11e09844dca7211994a217b30dcbf889146f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/dipole.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10371.json b/doc/sphinxext/prs/10371.json new file mode 100644 index 00000000000..9c3aea9f718 --- /dev/null +++ b/doc/sphinxext/prs/10371.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "082c8ad100343fda87bde4246c03d559832e2d26", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 10, + "d": 17 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 17, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10373.json b/doc/sphinxext/prs/10373.json new file mode 100644 index 00000000000..bed3d4f6fb4 --- /dev/null +++ b/doc/sphinxext/prs/10373.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "4a24000b81373badd378b78b9e930c32ac2c2acf", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/visualization/brain.py": { + "a": 23, + "d": 6 + }, + "mne/dipole.py": { + "a": 1, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 21, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 16, + "d": 21 + }, + "mne/viz/_brain/_brain.py": { + "a": 125, + "d": 23 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 25, + "d": 1 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10375.json b/doc/sphinxext/prs/10375.json new file mode 100644 index 00000000000..21f757d2740 --- /dev/null +++ b/doc/sphinxext/prs/10375.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4cca31d0c84391531fb60b87d33d98a3af0022e7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 25, + "d": 57 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10376.json b/doc/sphinxext/prs/10376.json new file mode 100644 index 00000000000..f1ffc744dbe --- /dev/null +++ b/doc/sphinxext/prs/10376.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "761b03c0e1f385fc9d6cebce9ad01f9818e47f42", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 3, + "d": 3 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10377.json b/doc/sphinxext/prs/10377.json new file mode 100644 index 00000000000..c573556ee7f --- /dev/null +++ b/doc/sphinxext/prs/10377.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e7f8ccc24f1a1f85ac929f967f290b49f1b908e9", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 4, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 7, + "d": 3 + }, + "mne/gui/_coreg.py": { + "a": 61, + "d": 28 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 45, + "d": 9 + }, + "mne/surface.py": { + "a": 99, + "d": 18 + }, + "mne/viz/_3d.py": { + "a": 25, + "d": 14 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10378.json b/doc/sphinxext/prs/10378.json new file mode 100644 index 00000000000..6489c42ddaf --- /dev/null +++ b/doc/sphinxext/prs/10378.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "f6432882738d000c79778906bcbaace7a3ff940e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 58, + "d": 41 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_qt.py": { + "a": 33, + "d": 9 + }, + "server_environment.yml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10379.json b/doc/sphinxext/prs/10379.json new file mode 100644 index 00000000000..1003ac2bad4 --- /dev/null +++ b/doc/sphinxext/prs/10379.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "6d0aa2572221c9a710c5965fab96042f3cf9ad3c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alex", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 168, + "d": 114 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 19, + "d": 15 + }, + "mne/tests/test_transforms.py": { + "a": 5, + "d": 2 + }, + "mne/transforms.py": { + "a": 19, + "d": 2 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1038.json b/doc/sphinxext/prs/1038.json new file mode 100644 index 00000000000..54209336431 --- /dev/null +++ b/doc/sphinxext/prs/1038.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8850d07c13de05cd270abea6843d5b54f3737d7d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/cite.rst": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10381.json b/doc/sphinxext/prs/10381.json new file mode 100644 index 00000000000..236e7caeb71 --- /dev/null +++ b/doc/sphinxext/prs/10381.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3c3c87670976714a05265d806b0e1834ea875585", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10384.json b/doc/sphinxext/prs/10384.json new file mode 100644 index 00000000000..9a1b4f58ab0 --- /dev/null +++ b/doc/sphinxext/prs/10384.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f1c7fc696ea3b8fa48bdee8f89d949766975907e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10385.json b/doc/sphinxext/prs/10385.json new file mode 100644 index 00000000000..209ffd1f688 --- /dev/null +++ b/doc/sphinxext/prs/10385.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a79a2319fafe53dd1155de235d07959b48d8b23", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 23, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10386.json b/doc/sphinxext/prs/10386.json new file mode 100644 index 00000000000..8a46a0c6a01 --- /dev/null +++ b/doc/sphinxext/prs/10386.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0799d4c68e68ed0f226cbc33c03936ccc0a5a313", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "tutorials/io/30_reading_fnirs_data.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10388.json b/doc/sphinxext/prs/10388.json new file mode 100644 index 00000000000..1f379f8e62d --- /dev/null +++ b/doc/sphinxext/prs/10388.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "61f10d539fcb3e95943eb1462771a3538d53a772", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 3, + "d": 2 + }, + "mne/gui/_coreg.py": { + "a": 4, + "d": 9 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 2, + "d": 1 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 8, + "d": 0 + }, + "mne/utils/misc.py": { + "a": 11, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 5, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 4, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 20, + "d": 4 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1039.json b/doc/sphinxext/prs/1039.json new file mode 100644 index 00000000000..d732f2f8548 --- /dev/null +++ b/doc/sphinxext/prs/1039.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b0892d20b8e4408714946ea16e68932ff5a0d1ab", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 11, + "d": 10 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10392.json b/doc/sphinxext/prs/10392.json new file mode 100644 index 00000000000..3ad079dd552 --- /dev/null +++ b/doc/sphinxext/prs/10392.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5e101bc0ff80b8ee022f24de00e1c4c9ebaae00b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10393.json b/doc/sphinxext/prs/10393.json new file mode 100644 index 00000000000..f396886c453 --- /dev/null +++ b/doc/sphinxext/prs/10393.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "373f63591de506c6f173e8598fb1c20dbbe85539", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 31, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10395.json b/doc/sphinxext/prs/10395.json new file mode 100644 index 00000000000..1e0a75e380c --- /dev/null +++ b/doc/sphinxext/prs/10395.json @@ -0,0 +1,687 @@ +{ + "merge_commit_sha": "6608b720b214529732542cb2aac11b401770eddf", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "examples/datasets/brainstorm_data.py": { + "a": 1, + "d": 0 + }, + "examples/datasets/hf_sef_data.py": { + "a": 1, + "d": 0 + }, + "examples/datasets/limo_data.py": { + "a": 1, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 1, + "d": 0 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 2, + "d": 1 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 3, + "d": 0 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 3, + "d": 0 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/ems_filtering.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 3, + "d": 0 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 1, + "d": 0 + }, + "examples/forward/left_cerebellum_volume_source.py": { + "a": 3, + "d": 0 + }, + "examples/forward/source_space_morphing.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/compute_mne_inverse_epochs_in_label.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/dics_source_power.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/label_from_stc.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/label_source_activations.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/mne_cov_power.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/morph_surface_stc.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/rap_music.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/read_inverse.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/read_stc.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/resolution_metrics.py": { + "a": 5, + "d": 2 + }, + "examples/inverse/resolution_metrics_eegmeg.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/snr_estimate.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/source_space_snr.py": { + "a": 4, + "d": 2 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 1, + "d": 1 + }, + "examples/io/elekta_epochs.py": { + "a": 1, + "d": 0 + }, + "examples/io/read_neo_format.py": { + "a": 3, + "d": 1 + }, + "examples/io/read_noise_covariance_matrix.py": { + "a": 3, + "d": 0 + }, + "examples/io/read_xdf.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/css.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/define_target_events.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/fnirs_artifact_removal.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/interpolate_bad_channels.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/movement_compensation.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/movement_detection.py": { + "a": 3, + "d": 2 + }, + "examples/preprocessing/muscle_detection.py": { + "a": 5, + "d": 2 + }, + "examples/preprocessing/otp.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/virtual_evoked.py": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 1, + "d": 0 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 3, + "d": 0 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 3, + "d": 0 + }, + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 3, + "d": 0 + }, + "examples/simulation/source_simulator.py": { + "a": 3, + "d": 0 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 16, + "d": 14 + }, + "examples/stats/fdr_stats_evoked.py": { + "a": 3, + "d": 0 + }, + "examples/stats/linear_regression_raw.py": { + "a": 3, + "d": 0 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 3, + "d": 0 + }, + "examples/stats/sensor_regression.py": { + "a": 5, + "d": 2 + }, + "examples/time_frequency/compute_csd.py": { + "a": 5, + "d": 2 + }, + "examples/time_frequency/compute_source_psd_epochs.py": { + "a": 3, + "d": 0 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 3, + "d": 0 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 3, + "d": 0 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 3, + "d": 0 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 3, + "d": 0 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 3, + "d": 0 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/3d_to_2d.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/brain.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/channel_epochs_image.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/eeglab_head_sphere.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/evoked_whitening.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/meg_sensors.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/mne_helmet.py": { + "a": 4, + "d": 0 + }, + "examples/visualization/parcellation.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/publication_figure.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/ssp_projs_sensitivity_map.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/topo_customized.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/xhemi.py": { + "a": 2, + "d": 0 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 0 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/40_whitened.py": { + "a": 4, + "d": 0 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 1, + "d": 0 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 3, + "d": 0 + }, + "tutorials/forward/30_forward.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 1, + "d": 0 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/15_inplace.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/30_info.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 4, + "d": 0 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 1, + "d": 0 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 2, + "d": 0 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 4, + "d": 1 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 2, + "d": 0 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 1, + "d": 0 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 1, + "d": 0 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 1, + "d": 0 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 1, + "d": 0 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 2, + "d": 0 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 4, + "d": 0 + }, + "tutorials/simulation/80_dics.py": { + "a": 3, + "d": 0 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 5, + "d": 2 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 3, + "d": 0 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 1, + "d": 2 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 3, + "d": 0 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 3, + "d": 0 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 3, + "d": 0 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 3, + "d": 0 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 1, + "d": 0 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 1, + "d": 0 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10396.json b/doc/sphinxext/prs/10396.json new file mode 100644 index 00000000000..b0e7d3a878e --- /dev/null +++ b/doc/sphinxext/prs/10396.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bbcd975b1288a1a5a34088fed57caa67988d5922", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/annotations.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10397.json b/doc/sphinxext/prs/10397.json new file mode 100644 index 00000000000..173a3fe2cb1 --- /dev/null +++ b/doc/sphinxext/prs/10397.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "195df2ac9f88c03913f6292c99a7addfb3a99ecb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 12, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10399.json b/doc/sphinxext/prs/10399.json new file mode 100644 index 00000000000..aada5448cd3 --- /dev/null +++ b/doc/sphinxext/prs/10399.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7214707dad2df89df646e523426a5df33bf4bf0a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/config.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1040.json b/doc/sphinxext/prs/1040.json new file mode 100644 index 00000000000..611d86cb8b8 --- /dev/null +++ b/doc/sphinxext/prs/1040.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cf7b40bd97def4396662406a3d3941e846683a93", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 74, + "d": 54 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10400.json b/doc/sphinxext/prs/10400.json new file mode 100644 index 00000000000..bcda1f124eb --- /dev/null +++ b/doc/sphinxext/prs/10400.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d25c87d65238e4a0b1f583be1dde2defcff0a5c6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10401.json b/doc/sphinxext/prs/10401.json new file mode 100644 index 00000000000..c672202aa39 --- /dev/null +++ b/doc/sphinxext/prs/10401.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7a1e900ad1db8a271142c32a8d7e5dd8d0e38ca7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 7, + "d": 1 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 1, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 19, + "d": 16 + }, + "mne/stats/parametric.py": { + "a": 6, + "d": 3 + }, + "mne/stats/tests/test_parametric.py": { + "a": 6, + "d": 0 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 9, + "d": 9 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 149, + "d": 8 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 54, + "d": 50 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10406.json b/doc/sphinxext/prs/10406.json new file mode 100644 index 00000000000..9d7a5f0f887 --- /dev/null +++ b/doc/sphinxext/prs/10406.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6fa13b73caacccdf1487035fbc9190395a00a981", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/check.py": { + "a": 40, + "d": 5 + }, + "mne/utils/tests/test_check.py": { + "a": 53, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10407.json b/doc/sphinxext/prs/10407.json new file mode 100644 index 00000000000..0897a9e4b93 --- /dev/null +++ b/doc/sphinxext/prs/10407.json @@ -0,0 +1,211 @@ +{ + "merge_commit_sha": "31a83063557fbd54d898f00f9527ffc547888395", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/baseline.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 9, + "d": 8 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 13, + "d": 13 + }, + "mne/cov.py": { + "a": 28, + "d": 28 + }, + "mne/decoding/csp.py": { + "a": 7, + "d": 7 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 34, + "d": 34 + }, + "mne/evoked.py": { + "a": 12, + "d": 12 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 1 + }, + "mne/export/_export.py": { + "a": 7, + "d": 7 + }, + "mne/filter.py": { + "a": 14, + "d": 14 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 23, + "d": 23 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/proj.py": { + "a": 5, + "d": 5 + }, + "mne/io/reference.py": { + "a": 4, + "d": 4 + }, + "mne/label.py": { + "a": 11, + "d": 11 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 21, + "d": 21 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 10, + "d": 10 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 38, + "d": 38 + }, + "mne/rank.py": { + "a": 5, + "d": 5 + }, + "mne/report/report.py": { + "a": 40, + "d": 40 + }, + "mne/source_estimate.py": { + "a": 21, + "d": 21 + }, + "mne/stats/cluster_level.py": { + "a": 44, + "d": 44 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 26, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 7 + }, + "mne/utils/docs.py": { + "a": 2845, + "d": 2463 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 12, + "d": 12 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 6 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 13, + "d": 13 + }, + "mne/viz/topomap.py": { + "a": 66, + "d": 66 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10408.json b/doc/sphinxext/prs/10408.json new file mode 100644 index 00000000000..7d4bb617583 --- /dev/null +++ b/doc/sphinxext/prs/10408.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "67eccfbcb3e7e3f9634f12fdf6c3f4f16f91feca", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/snirf/_snirf.py": { + "a": 20, + "d": 5 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 21, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10409.json b/doc/sphinxext/prs/10409.json new file mode 100644 index 00000000000..e017149f624 --- /dev/null +++ b/doc/sphinxext/prs/10409.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b2e3005f1a2ec31d57027eef0ef434c70c2633d1", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1041.json b/doc/sphinxext/prs/1041.json new file mode 100644 index 00000000000..e312e248c32 --- /dev/null +++ b/doc/sphinxext/prs/1041.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cd1bc42cbc0b9e7e46029302f674b357b6ba4027", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/proj.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10411.json b/doc/sphinxext/prs/10411.json new file mode 100644 index 00000000000..1465d55fb69 --- /dev/null +++ b/doc/sphinxext/prs/10411.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "84ec00016f7ae354ef4bbbe7aeb4c5e91cfd9b64", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_conda.yml": { + "a": 44, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 0, + "d": 46 + }, + "server_environment.yml": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10412.json b/doc/sphinxext/prs/10412.json new file mode 100644 index 00000000000..207e43cd7a1 --- /dev/null +++ b/doc/sphinxext/prs/10412.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "a93c198d3d6fd9bad27c3f6d4fb879db980330ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "logo/generate_mne_logos.py": { + "a": 22, + "d": 7 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 1 + }, + "mne/icons/mne-splash.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne.qrc": { + "a": 1, + "d": 0 + }, + "mne/icons/resources.py": { + "a": 2950, + "d": 1127 + }, + "mne/viz/_figure.py": { + "a": 3, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 4, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 15, + "d": 5 + }, + "mne/viz/backends/_qt.py": { + "a": 11, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 36, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10414.json b/doc/sphinxext/prs/10414.json new file mode 100644 index 00000000000..7909ee40dce --- /dev/null +++ b/doc/sphinxext/prs/10414.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6e2c52c592be02de3c798c98b21cfdc1d6e264ad", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_qt.py": { + "a": 4, + "d": 28 + }, + "mne/viz/backends/_utils.py": { + "a": 29, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10415.json b/doc/sphinxext/prs/10415.json new file mode 100644 index 00000000000..fdcf535fc80 --- /dev/null +++ b/doc/sphinxext/prs/10415.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fec2972c6257b358d0bfbd0d35a59e7e83630158", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10416.json b/doc/sphinxext/prs/10416.json new file mode 100644 index 00000000000..a51bea3a7d4 --- /dev/null +++ b/doc/sphinxext/prs/10416.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c7df0d91cae4b87958f21f0bbfc944ab62ba8763", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 31, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10417.json b/doc/sphinxext/prs/10417.json new file mode 100644 index 00000000000..539cffa2d1f --- /dev/null +++ b/doc/sphinxext/prs/10417.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "8f46d3ea80dabdf7f6499acfc7d114ef8a32302b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 13, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 5 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 3 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 6, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10418.json b/doc/sphinxext/prs/10418.json new file mode 100644 index 00000000000..9037dfd9616 --- /dev/null +++ b/doc/sphinxext/prs/10418.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "20366bb72785fe7f5cf08cdf4783503e3a312edf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 14, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 3, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 5 + }, + "mne/viz/_brain/_brain.py": { + "a": 9, + "d": 6 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 3, + "d": 6 + }, + "mne/viz/backends/_utils.py": { + "a": 39, + "d": 22 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10421.json b/doc/sphinxext/prs/10421.json new file mode 100644 index 00000000000..f3abd99a7aa --- /dev/null +++ b/doc/sphinxext/prs/10421.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5704330f7d9cb549427a9861b2489d29eebe2f45", + "authors": [ + { + "n": "Joshua Teves", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/source_space.rst": { + "a": 1, + "d": 0 + }, + "mne/source_space.py": { + "a": 37, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10423.json b/doc/sphinxext/prs/10423.json new file mode 100644 index 00000000000..9cdea544034 --- /dev/null +++ b/doc/sphinxext/prs/10423.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0ac9eba4c359ea8aaaa8b05d2c83114286d4da2d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_ieeg_locate_gui.py": { + "a": 4, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10424.json b/doc/sphinxext/prs/10424.json new file mode 100644 index 00000000000..7568f7f106a --- /dev/null +++ b/doc/sphinxext/prs/10424.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "40e7e2ab9afc007e58cb03269c7738e9ee010532", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_conda.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10425.json b/doc/sphinxext/prs/10425.json new file mode 100644 index 00000000000..2455f4b6fd3 --- /dev/null +++ b/doc/sphinxext/prs/10425.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1b14d458cbcd391a6907032b809ec37410b60592", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 5, + "d": 2 + }, + "tools/azure_dependencies.sh": { + "a": 3, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10429.json b/doc/sphinxext/prs/10429.json new file mode 100644 index 00000000000..34be3081136 --- /dev/null +++ b/doc/sphinxext/prs/10429.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8507b1c3f7ff7457f9cd429be914dcca0f6dc9c5", + "authors": [ + { + "n": "Matthias Eberlein", + "e": "matthias.eberlein@tu-dresden.de" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 29, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1043.json b/doc/sphinxext/prs/1043.json new file mode 100644 index 00000000000..4ef3a7013e9 --- /dev/null +++ b/doc/sphinxext/prs/1043.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f2b834b0f1da448ad448f95dc920b9c3f7205ee3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 8, + "d": 0 + }, + ".travis.yml": { + "a": 26, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10430.json b/doc/sphinxext/prs/10430.json new file mode 100644 index 00000000000..2d4468fb39d --- /dev/null +++ b/doc/sphinxext/prs/10430.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "af685332c76332e6ab50aeb401276a89c9e3cce1", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 40, + "d": 44 + }, + "doc/install/check_installation.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 9, + "d": 9 + }, + "mne/gui/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 17, + "d": 17 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 13, + "d": 11 + }, + "mne/utils/config.py": { + "a": 13, + "d": 5 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_qt.py": { + "a": 24, + "d": 19 + }, + "mne/viz/backends/_utils.py": { + "a": 6, + "d": 7 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 9, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10431.json b/doc/sphinxext/prs/10431.json new file mode 100644 index 00000000000..905c854a085 --- /dev/null +++ b/doc/sphinxext/prs/10431.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "eb1ed981fcab62c0892c02cae9fa087bc08bc522", + "authors": [ + { + "n": "Matthias Eberlein", + "e": "matthias.eberlein@tu-dresden.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 4, + "d": 5 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10432.json b/doc/sphinxext/prs/10432.json new file mode 100644 index 00000000000..d4b17c01c9d --- /dev/null +++ b/doc/sphinxext/prs/10432.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "400de94d556e9ec6c6f6308024a6e3417e54ba9a", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 100 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 15 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 106, + "d": 6 + }, + "mne/io/tests/test_meas_info.py": { + "a": 22, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10433.json b/doc/sphinxext/prs/10433.json new file mode 100644 index 00000000000..e9579508c5e --- /dev/null +++ b/doc/sphinxext/prs/10433.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2427d3b2f1a75a92664e919bb5a7071df8ea92fd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10434.json b/doc/sphinxext/prs/10434.json new file mode 100644 index 00000000000..3b2ccacc8da --- /dev/null +++ b/doc/sphinxext/prs/10434.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2e0753da5c0fee1768bda1a77a72f9b67e9ae863", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10436.json b/doc/sphinxext/prs/10436.json new file mode 100644 index 00000000000..81c28924d75 --- /dev/null +++ b/doc/sphinxext/prs/10436.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "38caebe4dfd9b96a9fa0619675b8df9b7bb46808", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_static/mne_installer_console.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_installer_macOS.png": { + "a": 0, + "d": 0 + }, + "doc/conf.py": { + "a": 4, + "d": 1 + }, + "doc/index.rst": { + "a": 5, + "d": 5 + }, + "doc/install/advanced.rst": { + "a": 48, + "d": 19 + }, + "doc/install/check_installation.rst": { + "a": 77, + "d": 0 + }, + "doc/install/index.rst": { + "a": 10, + "d": 53 + }, + "doc/install/install_type_selector.html": { + "a": 40, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 62, + "d": 0 + }, + "doc/install/installers_platform_selector.html": { + "a": 79, + "d": 0 + }, + "doc/install/manual_install.rst": { + "a": 115, + "d": 0 + }, + "doc/install/manual_install_python.rst": { + "a": 3, + "d": 1 + }, + "doc/install/mne_python.rst": { + "a": 0, + "d": 257 + }, + "doc/install/mne_tools_suite.rst": { + "a": 2, + "d": 1 + }, + "doc/install/updating.rst": { + "a": 8, + "d": 0 + }, + "doc/links.inc": { + "a": 5, + "d": 2 + }, + "doc/overview/faq.rst": { + "a": 1, + "d": 2 + }, + "doc/overview/get_help.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/index.rst": { + "a": 3, + "d": 4 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10438.json b/doc/sphinxext/prs/10438.json new file mode 100644 index 00000000000..ee4e5f5155d --- /dev/null +++ b/doc/sphinxext/prs/10438.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "306adb67fcaadf88fc88011d74b18359a0ecd1ff", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 96, + "d": 50 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 0, + "d": 4 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 3 + }, + "mne/viz/backends/_notebook.py": { + "a": 154, + "d": 17 + }, + "mne/viz/backends/_qt.py": { + "a": 3, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10439.json b/doc/sphinxext/prs/10439.json new file mode 100644 index 00000000000..b5aed117f7b --- /dev/null +++ b/doc/sphinxext/prs/10439.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a6d407da0b263ee098357c15daf0040c463bab98", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/viz/circle.py": { + "a": 37, + "d": 33 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1044.json b/doc/sphinxext/prs/1044.json new file mode 100644 index 00000000000..cdad5102117 --- /dev/null +++ b/doc/sphinxext/prs/1044.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a4979c891b51adfa740efa46e05f253688cceeb9", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/epochs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10441.json b/doc/sphinxext/prs/10441.json new file mode 100644 index 00000000000..1f8a6502a65 --- /dev/null +++ b/doc/sphinxext/prs/10441.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f3fc6e4093cefb70488e705e9b3cb41d0cdf8e59", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/viz/circle.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10442.json b/doc/sphinxext/prs/10442.json new file mode 100644 index 00000000000..d0f95c5d874 --- /dev/null +++ b/doc/sphinxext/prs/10442.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "634544ba9a003fb6635ae1bdd89b7a15aac4c22e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_coreg.py": { + "a": 11, + "d": 8 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10443.json b/doc/sphinxext/prs/10443.json new file mode 100644 index 00000000000..3d09dde3234 --- /dev/null +++ b/doc/sphinxext/prs/10443.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6faab5f33e66d9fc68b1990ccc4c45cc676dd84a", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/viz/circle.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10444.json b/doc/sphinxext/prs/10444.json new file mode 100644 index 00000000000..9c23017e620 --- /dev/null +++ b/doc/sphinxext/prs/10444.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "45f6c2f60312aab09d5c7c88b17114b0eabab116", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10448.json b/doc/sphinxext/prs/10448.json new file mode 100644 index 00000000000..a997febc525 --- /dev/null +++ b/doc/sphinxext/prs/10448.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c20d4c7e78e1cd9ad2a70c6160133129b3d20596", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/check_installation.rst": { + "a": 1, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10450.json b/doc/sphinxext/prs/10450.json new file mode 100644 index 00000000000..37fefdaa147 --- /dev/null +++ b/doc/sphinxext/prs/10450.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "38ffd9b1edff7ecf1ed507dfc9c9a4cf3730451c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 2 + }, + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 2 + }, + ".github/workflows/linux_conda.yml": { + "a": 1, + "d": 2 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 2 + }, + ".github/workflows/macos_conda.yml": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10452.json b/doc/sphinxext/prs/10452.json new file mode 100644 index 00000000000..b7645f3b8b0 --- /dev/null +++ b/doc/sphinxext/prs/10452.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cd5a5608d2183efdab2625627584913cdeddbba6", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10455.json b/doc/sphinxext/prs/10455.json new file mode 100644 index 00000000000..8481a436cc6 --- /dev/null +++ b/doc/sphinxext/prs/10455.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8fde21e3da598dd53c01cd23e1b5bb24d6ecf6df", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_notebook.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10456.json b/doc/sphinxext/prs/10456.json new file mode 100644 index 00000000000..f0fe6cf5fc4 --- /dev/null +++ b/doc/sphinxext/prs/10456.json @@ -0,0 +1,199 @@ +{ + "merge_commit_sha": "4301092691d279314d3a46ee58de66a6f3a6152a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 0, + "d": 3 + }, + "mne/icons/README.rst": { + "a": 0, + "d": 12 + }, + "mne/icons/clear-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/dark/actions/clear.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/help.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/movie.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/pause.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/play.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/reset.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/restore.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/scale.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/screenshot.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/visibility_off.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/actions/visibility_on.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/dark/index.theme": { + "a": 11, + "d": 0 + }, + "mne/icons/help-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/light/actions/clear.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/help.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/movie.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/pause.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/play.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/reset.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/restore.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/scale.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/screenshot.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/visibility_off.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/visibility_on.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/index.theme": { + "a": 11, + "d": 0 + }, + "mne/icons/mne.qrc": { + "a": 0, + "d": 18 + }, + "mne/icons/mne_bigsur_icon.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne_default_icon.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne_splash.png": { + "a": 0, + "d": 0 + }, + "mne/icons/movie-black-18dp.svg": { + "a": 0, + "d": 72 + }, + "mne/icons/pause-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/play-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/reset-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/resources.py": { + "a": 0, + "d": 6075 + }, + "mne/icons/restore-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/scale-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/screenshot-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/visibility_off-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/icons/visibility_on-black-18dp.svg": { + "a": 0, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 7 + }, + "mne/viz/backends/_abstract.py": { + "a": 0, + "d": 4 + }, + "mne/viz/backends/_notebook.py": { + "a": 0, + "d": 3 + }, + "mne/viz/backends/_qt.py": { + "a": 26, + "d": 25 + }, + "mne/viz/backends/_utils.py": { + "a": 30, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10457.json b/doc/sphinxext/prs/10457.json new file mode 100644 index 00000000000..39491ec6755 --- /dev/null +++ b/doc/sphinxext/prs/10457.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cf8543c8d609d717cd39d69d49431d3da73a7fdb", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 9, + "d": 3 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 113, + "d": 103 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10458.json b/doc/sphinxext/prs/10458.json new file mode 100644 index 00000000000..fbeae927ed2 --- /dev/null +++ b/doc/sphinxext/prs/10458.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4359456cfc9debb581d290acf4b677f4c31bed92", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 2, + "d": 0 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 0, + "d": 1 + }, + "mne/icons/dark/actions/folder.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/light/actions/folder.svg": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 8, + "d": 6 + }, + "mne/viz/backends/_notebook.py": { + "a": 37, + "d": 21 + }, + "mne/viz/backends/_qt.py": { + "a": 27, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10459.json b/doc/sphinxext/prs/10459.json new file mode 100644 index 00000000000..078d02faf59 --- /dev/null +++ b/doc/sphinxext/prs/10459.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "933e35c19b917989fe6fe8917118d9bddc2e7ae7", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 9, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 29, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10460.json b/doc/sphinxext/prs/10460.json new file mode 100644 index 00000000000..1638f76a6ae --- /dev/null +++ b/doc/sphinxext/prs/10460.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6bb14380fcb2108b336a0436a6f17d929735a0db", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 31, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 35, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10461.json b/doc/sphinxext/prs/10461.json new file mode 100644 index 00000000000..c164a8f336a --- /dev/null +++ b/doc/sphinxext/prs/10461.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cdbd0459b71be3e094d8c85badc69a33a5b68d2c", + "authors": [ + { + "n": "Simon Kern", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/montage.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10462.json b/doc/sphinxext/prs/10462.json new file mode 100644 index 00000000000..3db4eca717e --- /dev/null +++ b/doc/sphinxext/prs/10462.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "091da8f01aeeecd7d583ba596cf5a85cd649f192", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "tutorials/preprocessing/25_background_filtering.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10463.json b/doc/sphinxext/prs/10463.json new file mode 100644 index 00000000000..81a9df7e8c3 --- /dev/null +++ b/doc/sphinxext/prs/10463.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "60eebfdff97028061647d96b2cfebbbf3a7eef16", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10464.json b/doc/sphinxext/prs/10464.json new file mode 100644 index 00000000000..3475f9bcd7c --- /dev/null +++ b/doc/sphinxext/prs/10464.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "8233041319f97195db35db2461bf155cefe50f36", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/multi_dipole_model.py": { + "a": 166, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 6, + "d": 0 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 4, + "d": 6 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 6, + "d": 6 + }, + "mne/minimum_norm/inverse.py": { + "a": 26, + "d": 6 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 19, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10473.json b/doc/sphinxext/prs/10473.json new file mode 100644 index 00000000000..a3362e3efb3 --- /dev/null +++ b/doc/sphinxext/prs/10473.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1a58d7c5def8ee46d939f461811ff8fbf1283538", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/evoked/30_eeg_erp.py": { + "a": 191, + "d": 180 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10478.json b/doc/sphinxext/prs/10478.json new file mode 100644 index 00000000000..65fc50ec326 --- /dev/null +++ b/doc/sphinxext/prs/10478.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2c3e97f4f8d5b6f8bc55679e8d26135064481105", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 1, + "d": 1 + }, + "doc/install/installers_platform_selector.html": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10479.json b/doc/sphinxext/prs/10479.json new file mode 100644 index 00000000000..42e6788ad28 --- /dev/null +++ b/doc/sphinxext/prs/10479.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d203c226bf9a6b74708ee9ebcea2bcc17eea7fb5", + "authors": [ + { + "n": "Sondre Foslien", + "e": null + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1048.json b/doc/sphinxext/prs/1048.json new file mode 100644 index 00000000000..7e7ea4072cd --- /dev/null +++ b/doc/sphinxext/prs/1048.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2bd975aa3f048249bda8bba606a81f3dd1633b88", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_evoked_topomap.py": { + "a": 4, + "d": 4 + }, + "mne/fiff/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 12, + "d": 0 + }, + "mne/viz.py": { + "a": 15, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10484.json b/doc/sphinxext/prs/10484.json new file mode 100644 index 00000000000..c3177c9fa6e --- /dev/null +++ b/doc/sphinxext/prs/10484.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b226f7048243f620a66f34108aa865bbea3a1a8e", + "authors": [ + { + "n": "matthiasdold", + "e": "matthias.dold@gmx.net" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_pick.py": { + "a": 19, + "d": 0 + }, + "mne/preprocessing/_csd.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10485.json b/doc/sphinxext/prs/10485.json new file mode 100644 index 00000000000..3d2a88bb09c --- /dev/null +++ b/doc/sphinxext/prs/10485.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "c992168eb556526b06cbba494b6a938dbb3729f6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 7, + "d": 2 + }, + "mne/report/report.py": { + "a": 24, + "d": 11 + }, + "mne/report/tests/test_report.py": { + "a": 20, + "d": 2 + }, + "mne/utils/_bunch.py": { + "a": 4, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 1 + }, + "mne/viz/_scraper.py": { + "a": 41, + "d": 10 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 3 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10487.json b/doc/sphinxext/prs/10487.json new file mode 100644 index 00000000000..e46dc45bce3 --- /dev/null +++ b/doc/sphinxext/prs/10487.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a191018c2a7732e8637f827dbf04d6e8fee3b405", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10488.json b/doc/sphinxext/prs/10488.json new file mode 100644 index 00000000000..1bbca3a762e --- /dev/null +++ b/doc/sphinxext/prs/10488.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2fb83987ef0999788b7aede3091753583491b57d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10489.json b/doc/sphinxext/prs/10489.json new file mode 100644 index 00000000000..c26c840592c --- /dev/null +++ b/doc/sphinxext/prs/10489.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2052b15cf1194bfa19c1b168b8b530c82d67f839", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/datasets/_fsaverage/base.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/_phantom/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1049.json b/doc/sphinxext/prs/1049.json new file mode 100644 index 00000000000..d3a446daba1 --- /dev/null +++ b/doc/sphinxext/prs/1049.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bc16701b5078bab2753ee896801256ac57d30ca5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10491.json b/doc/sphinxext/prs/10491.json new file mode 100644 index 00000000000..6d7935b61b5 --- /dev/null +++ b/doc/sphinxext/prs/10491.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "2f53da713804847cc4e122b3ce1f784470258280", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 10, + "d": 2 + }, + "mne/io/base.py": { + "a": 8, + "d": 2 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 61, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10492.json b/doc/sphinxext/prs/10492.json new file mode 100644 index 00000000000..d3d4539d926 --- /dev/null +++ b/doc/sphinxext/prs/10492.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7796a9316c825ae007be3476f64eae4e0e1dde49", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10493.json b/doc/sphinxext/prs/10493.json new file mode 100644 index 00000000000..e53147312d0 --- /dev/null +++ b/doc/sphinxext/prs/10493.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "93dc7dc9022ed861ddb2afda73143efbbaf6bf4a", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/visualization/brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 14 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10495.json b/doc/sphinxext/prs/10495.json new file mode 100644 index 00000000000..6d80aaea2bc --- /dev/null +++ b/doc/sphinxext/prs/10495.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7a8d41e7175db1b6744d20f5670f90190329a1c9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 97, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_scraper.py": { + "a": 4, + "d": 1 + }, + "requirements_testing.txt": { + "a": 0, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10497.json b/doc/sphinxext/prs/10497.json new file mode 100644 index 00000000000..860c02e8d0a --- /dev/null +++ b/doc/sphinxext/prs/10497.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "eea8ea65892163cfbbb10f51aa74d4dd09dfd222", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "tutorials/evoked/10_evoked_overview.py": { + "a": 64, + "d": 64 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10498.json b/doc/sphinxext/prs/10498.json new file mode 100644 index 00000000000..306270740ab --- /dev/null +++ b/doc/sphinxext/prs/10498.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5a6b5fc08bfa95642fdeb22d6e2a477845528dfb", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "tutorials/evoked/20_visualize_evoked.py": { + "a": 62, + "d": 65 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10500.json b/doc/sphinxext/prs/10500.json new file mode 100644 index 00000000000..66c43982755 --- /dev/null +++ b/doc/sphinxext/prs/10500.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "c276f61c8ed23473834eea88af6e13966e0b834e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/icons/toolbar_move_horizontal@2x.png": { + "a": 0, + "d": 0 + }, + "mne/icons/toolbar_move_vertical@2x.png": { + "a": 0, + "d": 0 + }, + "mne/icons/toolbar_separator_horizontal.png": { + "a": 0, + "d": 0 + }, + "mne/icons/toolbar_separator_horizontal@2x.png": { + "a": 0, + "d": 0 + }, + "mne/icons/toolbar_separator_vertical@2x.png": { + "a": 0, + "d": 0 + }, + "mne/viz/_scraper.py": { + "a": 2, + "d": 9 + }, + "mne/viz/backends/_utils.py": { + "a": 67, + "d": 6 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 40, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10501.json b/doc/sphinxext/prs/10501.json new file mode 100644 index 00000000000..129965429f3 --- /dev/null +++ b/doc/sphinxext/prs/10501.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "d0fc8a2acca1ee7c601e9c040185eddabe6f231c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/io/base.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 3, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 7, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 11, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 8, + "d": 2 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10502.json b/doc/sphinxext/prs/10502.json new file mode 100644 index 00000000000..387e62b65cf --- /dev/null +++ b/doc/sphinxext/prs/10502.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "321cafcb8bfae073c66cbe4dfb29a1002f0e0f53", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 3, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 8, + "d": 9 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/commands/tests/test_commands.py": { + "a": 6, + "d": 6 + }, + "mne/conftest.py": { + "a": 12, + "d": 3 + }, + "mne/datasets/misc/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/misc/_misc.py": { + "a": 7, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 4, + "d": 2 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 3, + "d": 3 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 7, + "d": 7 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 4, + "d": 2 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 5, + "d": 8 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 4, + "d": 7 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 9, + "d": 8 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_coreg.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 4, + "d": 2 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 0, + "d": 2 + }, + "mne/viz/conftest.py": { + "a": 5, + "d": 3 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 4 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 20, + "d": 2 + }, + "tools/github_actions_download.sh": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10503.json b/doc/sphinxext/prs/10503.json new file mode 100644 index 00000000000..98c28bd1290 --- /dev/null +++ b/doc/sphinxext/prs/10503.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0252176cb87d1163f8b19e808ccc6d2c299aea66", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_utils.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10504.json b/doc/sphinxext/prs/10504.json new file mode 100644 index 00000000000..1cb8e5d8758 --- /dev/null +++ b/doc/sphinxext/prs/10504.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "493418e6e30c9ef85c57aabc6702c1724dd8d82b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10506.json b/doc/sphinxext/prs/10506.json new file mode 100644 index 00000000000..5415ee354a6 --- /dev/null +++ b/doc/sphinxext/prs/10506.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "41d38b7a55afeb2a6a840d07a155dae837326ef0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10509.json b/doc/sphinxext/prs/10509.json new file mode 100644 index 00000000000..0067bf4b6ea --- /dev/null +++ b/doc/sphinxext/prs/10509.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e63da5125bc789c79ef9c05dc6e54592821b710d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 18, + "d": 12 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 15, + "d": 3 + }, + "mne/utils/check.py": { + "a": 12, + "d": 11 + }, + "mne/utils/config.py": { + "a": 7, + "d": 12 + }, + "mne/viz/backends/_utils.py": { + "a": 32, + "d": 21 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 0, + "d": 11 + }, + "mne/viz/tests/test_ica.py": { + "a": 10, + "d": 1 + }, + "requirements.txt": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10511.json b/doc/sphinxext/prs/10511.json new file mode 100644 index 00000000000..88b78f5fa57 --- /dev/null +++ b/doc/sphinxext/prs/10511.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9b25b5d8b6d4c6753ff226443f44b381c8c98d4a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers_platform_selector.html": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10513.json b/doc/sphinxext/prs/10513.json new file mode 100644 index 00000000000..a887449c47f --- /dev/null +++ b/doc/sphinxext/prs/10513.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "25e9451ccacd0523343807a54255f8d06d202755", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 7, + "d": 21 + }, + "azure-pipelines.yml": { + "a": 19, + "d": 19 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 11, + "d": 7 + }, + "doc/install/advanced.rst": { + "a": 6, + "d": 0 + }, + "doc/install/check_installation.rst": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 2 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/utils/misc.py": { + "a": 6, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 24, + "d": 11 + }, + "mne/viz/_brain/_brain.py": { + "a": 31, + "d": 14 + }, + "mne/viz/_brain/callback.py": { + "a": 11, + "d": 5 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 29, + "d": 0 + }, + "mne/viz/_scraper.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 20, + "d": 11 + }, + "mne/viz/backends/_qt.py": { + "a": 8, + "d": 2 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 15, + "d": 1 + }, + "mne/viz/tests/test_scraper.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_utils.py": { + "a": 18, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 4 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 13 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/setup_xvfb.sh": { + "a": 2, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10515.json b/doc/sphinxext/prs/10515.json new file mode 100644 index 00000000000..a7cc57bb114 --- /dev/null +++ b/doc/sphinxext/prs/10515.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "c4c100cdd030d5de41b5b558e50f497a3f6900e4", + "authors": [ + { + "n": "Alessandro Tonin", + "e": null + }, + { + "n": "Alessandro Tonin", + "e": "alessandro.tonin@wysscenter.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 11, + "d": 10 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/_read_raw.py": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 74, + "d": 47 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10517.json b/doc/sphinxext/prs/10517.json new file mode 100644 index 00000000000..f5cf705dee9 --- /dev/null +++ b/doc/sphinxext/prs/10517.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "394ee2c810ea06ee472e65005893ce939d879fa2", + "authors": [ + { + "n": "Alessandro Tonin", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Alessandro Tonin", + "e": "alessandro.tonin@wysscenter.ch" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1052.json b/doc/sphinxext/prs/1052.json new file mode 100644 index 00000000000..827f92fe2b5 --- /dev/null +++ b/doc/sphinxext/prs/1052.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "758a48b467391d114397dc9c616c0627da2222f0", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/evoked.py": { + "a": 16, + "d": 12 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz.py": { + "a": 42, + "d": 35 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10521.json b/doc/sphinxext/prs/10521.json new file mode 100644 index 00000000000..45cb4a95ebb --- /dev/null +++ b/doc/sphinxext/prs/10521.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5e60e475ef8019af73cc81e184a10bde524c681f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 16, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10523.json b/doc/sphinxext/prs/10523.json new file mode 100644 index 00000000000..7dbda44ce17 --- /dev/null +++ b/doc/sphinxext/prs/10523.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "f222ef5b8caa622e56279f5e33cefbe3771fe186", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/mne_logo.svg": { + "a": 864, + "d": 174 + }, + "doc/_static/mne_logo_dark.svg": { + "a": 866, + "d": 0 + }, + "doc/_static/mne_logo_small.svg": { + "a": 82, + "d": 80 + }, + "doc/_static/style.css": { + "a": 26, + "d": 2 + }, + "doc/_templates/homepage.html": { + "a": 3, + "d": 3 + }, + "doc/conf.py": { + "a": 4, + "d": 1 + }, + "doc/index.rst": { + "a": 12, + "d": 2 + }, + "doc/install/install_type_selector.html": { + "a": 2, + "d": 2 + }, + "logo/generate_mne_logos.py": { + "a": 26, + "d": 15 + }, + "mne/icons/mne_bigsur_icon.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne_default_icon.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne_icon.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne_splash.png": { + "a": 0, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10525.json b/doc/sphinxext/prs/10525.json new file mode 100644 index 00000000000..46ea82d2b81 --- /dev/null +++ b/doc/sphinxext/prs/10525.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "706dd3a36096e88a12b3bfb207e348c26fe8625c", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "examples/preprocessing/muscle_detection.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10527.json b/doc/sphinxext/prs/10527.json new file mode 100644 index 00000000000..689a5451b0a --- /dev/null +++ b/doc/sphinxext/prs/10527.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "30ca67b25c7e558affd3fe17db18bab035b1add4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 0 + }, + "mne/io/_digitization.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10528.json b/doc/sphinxext/prs/10528.json new file mode 100644 index 00000000000..88be22ae0c7 --- /dev/null +++ b/doc/sphinxext/prs/10528.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a264cdb6dfae0e86db93f8de1d43350deb5849f4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers_platform_selector.html": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10531.json b/doc/sphinxext/prs/10531.json new file mode 100644 index 00000000000..cd6636f563b --- /dev/null +++ b/doc/sphinxext/prs/10531.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "df2a581041949d8bd3a106f127101d7a034f6595", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 3 + }, + "mne/bem.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10534.json b/doc/sphinxext/prs/10534.json new file mode 100644 index 00000000000..9ccc37b449e --- /dev/null +++ b/doc/sphinxext/prs/10534.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5adb55223e564575ac384dbc853d0daaa4e75d8e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 28, + "d": 0 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 0, + "d": 3 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 117, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 104, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10535.json b/doc/sphinxext/prs/10535.json new file mode 100644 index 00000000000..9b7b93e608b --- /dev/null +++ b/doc/sphinxext/prs/10535.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ab0888e8e092f4dec102aab0a377177a73387ca8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers_platform_selector.html": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10536.json b/doc/sphinxext/prs/10536.json new file mode 100644 index 00000000000..29e7d5b39b7 --- /dev/null +++ b/doc/sphinxext/prs/10536.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a0d1125166029844923d896a76519b6e13a5a8b9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers_platform_selector.html": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10537.json b/doc/sphinxext/prs/10537.json new file mode 100644 index 00000000000..01513214ffd --- /dev/null +++ b/doc/sphinxext/prs/10537.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b6ce4f850d7c2dd562001fe2d76777276c6a9543", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 138, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 112, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1054.json b/doc/sphinxext/prs/1054.json new file mode 100644 index 00000000000..24420bbdd72 --- /dev/null +++ b/doc/sphinxext/prs/1054.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "080b67ae223704841b618cdd1451080fb5660268", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainak@neuro.hut.fi" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 78, + "d": 0 + }, + "mne/externals/FieldTrip.py": { + "a": 508, + "d": 0 + }, + "mne/realtime/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 257, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10540.json b/doc/sphinxext/prs/10540.json new file mode 100644 index 00000000000..894598047cb --- /dev/null +++ b/doc/sphinxext/prs/10540.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dc88b7a18297818e7ab796d8e43755e499f324cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "logo/generate_mne_logos.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10541.json b/doc/sphinxext/prs/10541.json new file mode 100644 index 00000000000..ed248cfe98c --- /dev/null +++ b/doc/sphinxext/prs/10541.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "47d7a8eb302ca59de109972b0c87b32b70b52713", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10543.json b/doc/sphinxext/prs/10543.json new file mode 100644 index 00000000000..1b93102f7c5 --- /dev/null +++ b/doc/sphinxext/prs/10543.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c6bfb20172f4c37bbcb2960efa01c40c17aa4101", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 19, + "d": 0 + }, + "mne/coreg.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 30, + "d": 7 + }, + "mne/tests/test_coreg.py": { + "a": 4, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10544.json b/doc/sphinxext/prs/10544.json new file mode 100644 index 00000000000..b3bfcacea14 --- /dev/null +++ b/doc/sphinxext/prs/10544.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "419ecdbf4ee0ac7cdfd2d71d3d001f77ec5d9840", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10545.json b/doc/sphinxext/prs/10545.json new file mode 100644 index 00000000000..92e58b0892c --- /dev/null +++ b/doc/sphinxext/prs/10545.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4216e50db1af4f8c78c9b90cbd4de865808cb464", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10546.json b/doc/sphinxext/prs/10546.json new file mode 100644 index 00000000000..42c19a80de0 --- /dev/null +++ b/doc/sphinxext/prs/10546.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a48c9b99ff2af47dcc86a8d76427457f0a74626a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 6, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 7 + }, + "mne/viz/backends/_qt.py": { + "a": 13, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10547.json b/doc/sphinxext/prs/10547.json new file mode 100644 index 00000000000..06782486929 --- /dev/null +++ b/doc/sphinxext/prs/10547.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4e601bcab824e49d9f96eba9c402b75868fe4604", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 9, + "d": 0 + }, + "mne/io/_digitization.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10548.json b/doc/sphinxext/prs/10548.json new file mode 100644 index 00000000000..aa0fbeb08fa --- /dev/null +++ b/doc/sphinxext/prs/10548.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2424a49b8af33b557367cf63b584155bd6000394", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10549.json b/doc/sphinxext/prs/10549.json new file mode 100644 index 00000000000..7488578f6d2 --- /dev/null +++ b/doc/sphinxext/prs/10549.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ac45cbe05687c9b7a09151b73a577c3f51b37f08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_coreg.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10552.json b/doc/sphinxext/prs/10552.json new file mode 100644 index 00000000000..db50639a288 --- /dev/null +++ b/doc/sphinxext/prs/10552.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0908c79aca54211d4bef084cd54d772e7bf0ff20", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 4, + "d": 1 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 5, + "d": 3 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 17, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10554.json b/doc/sphinxext/prs/10554.json new file mode 100644 index 00000000000..7a4f828e305 --- /dev/null +++ b/doc/sphinxext/prs/10554.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fc981bd93b7a485480d0d074fe29fbfb23443609", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 8, + "d": 3 + }, + "mne/io/base.py": { + "a": 15, + "d": 22 + }, + "mne/preprocessing/maxwell.py": { + "a": 38, + "d": 13 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 35, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10555.json b/doc/sphinxext/prs/10555.json new file mode 100644 index 00000000000..8f0a15bd4c5 --- /dev/null +++ b/doc/sphinxext/prs/10555.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7fcace5d8ebe6f56a05fcaf269e26512d034de1f", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 3, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 68, + "d": 62 + }, + "mne/io/snirf/_snirf.py": { + "a": 73, + "d": 33 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 41, + "d": 24 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 13, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10556.json b/doc/sphinxext/prs/10556.json new file mode 100644 index 00000000000..7ce8fe33678 --- /dev/null +++ b/doc/sphinxext/prs/10556.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f523f24af64921cf3e49fee01f665ef36867e05a", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "examples/inverse/multi_dipole_model.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10557.json b/doc/sphinxext/prs/10557.json new file mode 100644 index 00000000000..07155bf9295 --- /dev/null +++ b/doc/sphinxext/prs/10557.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "419b9e2d0825d4660b39200391ed1ad9b8e1b892", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 58, + "d": 16 + }, + "mne/viz/tests/test_ica.py": { + "a": 26, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10558.json b/doc/sphinxext/prs/10558.json new file mode 100644 index 00000000000..8d06fab96c5 --- /dev/null +++ b/doc/sphinxext/prs/10558.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aa4118655e70b57d99262aad70d4fe38ea6ecd63", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 6, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10559.json b/doc/sphinxext/prs/10559.json new file mode 100644 index 00000000000..84a4d09a55a --- /dev/null +++ b/doc/sphinxext/prs/10559.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "04e5f687c3241503b63010ca3943bb5e173efb4d", + "authors": [ + { + "n": "Simeon Wong", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10560.json b/doc/sphinxext/prs/10560.json new file mode 100644 index 00000000000..17f1d232014 --- /dev/null +++ b/doc/sphinxext/prs/10560.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9bd26a39996ddafe31830ce31add97a8fd84a970", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 6, + "d": 4 + }, + "mne/viz/backends/_abstract.py": { + "a": 8, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 14, + "d": 5 + }, + "mne/viz/backends/_qt.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10563.json b/doc/sphinxext/prs/10563.json new file mode 100644 index 00000000000..5119208468c --- /dev/null +++ b/doc/sphinxext/prs/10563.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bd210961c5d899af2ecd7d7e2bb53b80ec4807e3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10567.json b/doc/sphinxext/prs/10567.json new file mode 100644 index 00000000000..87406800e8d --- /dev/null +++ b/doc/sphinxext/prs/10567.json @@ -0,0 +1,371 @@ +{ + "merge_commit_sha": "62143cbeb12a822e08833941c4e0f1d450d35240", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 6, + "d": 8 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_whitening.py": { + "a": 3, + "d": 3 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 7, + "d": 6 + }, + "mne/cuda.py": { + "a": 9, + "d": 7 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 11, + "d": 17 + }, + "mne/decoding/tests/test_base.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 5, + "d": 6 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 5, + "d": 5 + }, + "mne/dipole.py": { + "a": 4, + "d": 4 + }, + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/evoked.py": { + "a": 4, + "d": 3 + }, + "mne/filter.py": { + "a": 22, + "d": 26 + }, + "mne/forward/_compute_forward.py": { + "a": 7, + "d": 5 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 5 + }, + "mne/forward/_lead_dots.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 4 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 11, + "d": 9 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 5, + "d": 6 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 8, + "d": 9 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/parallel.py": { + "a": 55, + "d": 76 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 4, + "d": 4 + }, + "mne/report/report.py": { + "a": 20, + "d": 25 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 8, + "d": 8 + }, + "mne/stats/cluster_level.py": { + "a": 6, + "d": 7 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 6, + "d": 6 + }, + "mne/surface.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 11, + "d": 8 + }, + "mne/tests/test_parallel.py": { + "a": 40, + "d": 0 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 5, + "d": 0 + }, + "mne/time_frequency/_stockwell.py": { + "a": 4, + "d": 5 + }, + "mne/time_frequency/csd.py": { + "a": 11, + "d": 9 + }, + "mne/time_frequency/multitaper.py": { + "a": 6, + "d": 7 + }, + "mne/time_frequency/psd.py": { + "a": 7, + "d": 8 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 5, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/30_forward.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 9, + "d": 7 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 4, + "d": 4 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 3, + "d": 2 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10568.json b/doc/sphinxext/prs/10568.json new file mode 100644 index 00000000000..69f4e29ab7b --- /dev/null +++ b/doc/sphinxext/prs/10568.json @@ -0,0 +1,151 @@ +{ + "merge_commit_sha": "858d1b25aafe98c54786e4344d2e5a11ac65da4b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.20.inc": { + "a": 1, + "d": 1 + }, + "mne/_ola.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 5, + "d": 18 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 0, + "d": 3 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 4 + }, + "mne/dipole.py": { + "a": 3, + "d": 4 + }, + "mne/epochs.py": { + "a": 2, + "d": 3 + }, + "mne/event.py": { + "a": 1, + "d": 10 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 12 + }, + "mne/gui/_coreg.py": { + "a": 2, + "d": 28 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 0, + "d": 26 + }, + "mne/io/base.py": { + "a": 2, + "d": 3 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 29 + }, + "mne/io/tests/test_meas_info.py": { + "a": 0, + "d": 7 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/flat.py": { + "a": 0, + "d": 108 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 0, + "d": 96 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 0, + "d": 17 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 6 + }, + "mne/utils/tests/test_logging.py": { + "a": 2, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10569.json b/doc/sphinxext/prs/10569.json new file mode 100644 index 00000000000..d09c6cd0635 --- /dev/null +++ b/doc/sphinxext/prs/10569.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fc1112d8708e7ca8a8f7af6062c2927e538b55b2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 9, + "d": 2 + }, + "mne/io/open.py": { + "a": 5, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10571.json b/doc/sphinxext/prs/10571.json new file mode 100644 index 00000000000..a958d578158 --- /dev/null +++ b/doc/sphinxext/prs/10571.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "a6a67de8ca8a57c5aefaa13fd233e0958c81c26f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 49, + "d": 0 + }, + "doc/visualization.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 262, + "d": 0 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 5, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/_csd.py": { + "a": 123, + "d": 2 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 29, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 26, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 115, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10572.json b/doc/sphinxext/prs/10572.json new file mode 100644 index 00000000000..d441193e18b --- /dev/null +++ b/doc/sphinxext/prs/10572.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "65a53882e7ba2d7909a9e14de48218dfeb00f664", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "doc/glossary.rst": { + "a": 6, + "d": 0 + }, + "examples/visualization/eeglab_head_sphere.py": { + "a": 0, + "d": 130 + }, + "mne/utils/check.py": { + "a": 79, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 36, + "d": 21 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 3 + }, + "mne/viz/tests/test_montage.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 33, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 14, + "d": 4 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 160, + "d": 132 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10573.json b/doc/sphinxext/prs/10573.json new file mode 100644 index 00000000000..fddbc84c712 --- /dev/null +++ b/doc/sphinxext/prs/10573.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "42102b58a24d1b864a4a668ba687fe8097e52a00", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 3, + "d": 3 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 3, + "d": 2 + }, + "mne/viz/backends/_abstract.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_qt.py": { + "a": 18, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10574.json b/doc/sphinxext/prs/10574.json new file mode 100644 index 00000000000..2fa4d7fa0eb --- /dev/null +++ b/doc/sphinxext/prs/10574.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d5544e700600cde978e8a5d6ea73aedad6843a28", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10575.json b/doc/sphinxext/prs/10575.json new file mode 100644 index 00000000000..f144182f827 --- /dev/null +++ b/doc/sphinxext/prs/10575.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4cab7a555b2430e1de11799a22b949607987603e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10578.json b/doc/sphinxext/prs/10578.json new file mode 100644 index 00000000000..333b4e3c63f --- /dev/null +++ b/doc/sphinxext/prs/10578.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f059f8f1edaee3f9b6954e7987dd1e6d482bba27", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1058.json b/doc/sphinxext/prs/1058.json new file mode 100644 index 00000000000..f5102cb6d54 --- /dev/null +++ b/doc/sphinxext/prs/1058.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "856353d24e852bbb078245b60a40f9e72d72a964", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 28, + "d": 8 + }, + "mne/fiff/pick.py": { + "a": 9, + "d": 7 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10580.json b/doc/sphinxext/prs/10580.json new file mode 100644 index 00000000000..442ce4eb278 --- /dev/null +++ b/doc/sphinxext/prs/10580.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "820e3ccc63a13fbe0142f3333c7739ecaa42612d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 15, + "d": 13 + }, + "doc/install/manual_install.rst": { + "a": 69, + "d": 43 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10581.json b/doc/sphinxext/prs/10581.json new file mode 100644 index 00000000000..d236da5f825 --- /dev/null +++ b/doc/sphinxext/prs/10581.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "770aa7c8e3183f54140632f86796c808641ecd26", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 14, + "d": 12 + }, + "doc/install/manual_install.rst": { + "a": 69, + "d": 43 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10582.json b/doc/sphinxext/prs/10582.json new file mode 100644 index 00000000000..ae1e619e08b --- /dev/null +++ b/doc/sphinxext/prs/10582.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "00e96d940ac666f66c5a268f17564c470e141236", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10583.json b/doc/sphinxext/prs/10583.json new file mode 100644 index 00000000000..bbba6ca6006 --- /dev/null +++ b/doc/sphinxext/prs/10583.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "cf820dfe28bfa2a01f4b431de33ff571287d1320", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 6, + "d": 6 + }, + "doc/_static/style.css": { + "a": 8, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 34, + "d": 34 + }, + "doc/install/installers_platform_selector.html": { + "a": 1, + "d": 1 + }, + "doc/install/manual_install.rst": { + "a": 17, + "d": 19 + }, + "doc/install/manual_install_platform_selector.html": { + "a": 56, + "d": 0 + }, + "doc/install/manual_install_python.rst": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10586.json b/doc/sphinxext/prs/10586.json new file mode 100644 index 00000000000..cf075fa0eed --- /dev/null +++ b/doc/sphinxext/prs/10586.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a5d91041d787fcd2ba58330a4ccde360cf543b7f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "examples/preprocessing/eeg_bridging.py": { + "a": 16, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10587.json b/doc/sphinxext/prs/10587.json new file mode 100644 index 00000000000..cd9df77c775 --- /dev/null +++ b/doc/sphinxext/prs/10587.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4349f6d9805c43e1034b09742668a952fdff40ec", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 92, + "d": 7 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/interpolate.py": { + "a": 85, + "d": 1 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 23, + "d": 1 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10588.json b/doc/sphinxext/prs/10588.json new file mode 100644 index 00000000000..c253085ceca --- /dev/null +++ b/doc/sphinxext/prs/10588.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cc44af23232542e5a14d86922e10f6d0ea22bc1e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/preprocessing/eeg_bridging.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10589.json b/doc/sphinxext/prs/10589.json new file mode 100644 index 00000000000..c8358ae09ff --- /dev/null +++ b/doc/sphinxext/prs/10589.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "438eefc036405cae3b25a4a3d91bedb690a2d36d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 6, + "d": 6 + }, + "doc/_static/style.css": { + "a": 6, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 32, + "d": 32 + }, + "doc/install/installers_platform_selector.html": { + "a": 1, + "d": 1 + }, + "doc/install/manual_install.rst": { + "a": 17, + "d": 19 + }, + "doc/install/manual_install_platform_selector.html": { + "a": 56, + "d": 0 + }, + "doc/install/manual_install_python.rst": { + "a": 1, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10592.json b/doc/sphinxext/prs/10592.json new file mode 100644 index 00000000000..6dac477267a --- /dev/null +++ b/doc/sphinxext/prs/10592.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "754ea8e1d4f4204b172653eda34d30a74438b9fe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 4, + "d": 2 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 6, + "d": 3 + }, + "mne/bem.py": { + "a": 54, + "d": 23 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 6, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 7, + "d": 4 + }, + "mne/coreg.py": { + "a": 2, + "d": 0 + }, + "mne/surface.py": { + "a": 73, + "d": 50 + }, + "mne/tests/test_bem.py": { + "a": 53, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 3, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 2 + }, + "mne/utils/misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 69, + "d": 48 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 129, + "d": 75 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10593.json b/doc/sphinxext/prs/10593.json new file mode 100644 index 00000000000..bc7310209f7 --- /dev/null +++ b/doc/sphinxext/prs/10593.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "50c1adf5d7b4cbf000010a6a9aedbc90f495c921", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/colormap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10597.json b/doc/sphinxext/prs/10597.json new file mode 100644 index 00000000000..204b600f448 --- /dev/null +++ b/doc/sphinxext/prs/10597.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fae9ada64b49afd9f6bb8161111a2aee9070c38c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/install/manual_install.rst": { + "a": 2, + "d": 3 + }, + "doc/install/manual_install_platform_selector.html": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10599.json b/doc/sphinxext/prs/10599.json new file mode 100644 index 00000000000..64a660ee53a --- /dev/null +++ b/doc/sphinxext/prs/10599.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cc86f163126ad1aa950f1909062abe5e9b1888c5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 0, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10600.json b/doc/sphinxext/prs/10600.json new file mode 100644 index 00000000000..77d06387ac4 --- /dev/null +++ b/doc/sphinxext/prs/10600.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8f37e20a3551ba36a10c6ea541b64dd87da53a21", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/install/manual_install.rst": { + "a": 2, + "d": 3 + }, + "doc/install/manual_install_platform_selector.html": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10603.json b/doc/sphinxext/prs/10603.json new file mode 100644 index 00000000000..c0026d25c81 --- /dev/null +++ b/doc/sphinxext/prs/10603.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "90172f41531b15f35b0cf2c87c6b39044f54469e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "tutorials/intro/70_report.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10605.json b/doc/sphinxext/prs/10605.json new file mode 100644 index 00000000000..001c5599fdb --- /dev/null +++ b/doc/sphinxext/prs/10605.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b00dcb1cb0376f0965054145b4222c6a7bfaf7ee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/versions.json": { + "a": 32, + "d": 16 + }, + "doc/conf.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10606.json b/doc/sphinxext/prs/10606.json new file mode 100644 index 00000000000..31c1c73b533 --- /dev/null +++ b/doc/sphinxext/prs/10606.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "add9e1620958b61f5509bea172ce8343975aee10", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 14, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10607.json b/doc/sphinxext/prs/10607.json new file mode 100644 index 00000000000..2212c242ba4 --- /dev/null +++ b/doc/sphinxext/prs/10607.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9b7fdd11f60debe810e7445c0699d9e6f487e0b1", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10610.json b/doc/sphinxext/prs/10610.json new file mode 100644 index 00000000000..9c2d971eb02 --- /dev/null +++ b/doc/sphinxext/prs/10610.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "20a721328b430b850e5e4654e93a592cdab87271", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 9 + }, + "mne/viz/tests/test_topomap.py": { + "a": 23, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 66, + "d": 21 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10611.json b/doc/sphinxext/prs/10611.json new file mode 100644 index 00000000000..f2659d60643 --- /dev/null +++ b/doc/sphinxext/prs/10611.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "d24d0766c8c16733d1fdcf4e908c722505e9d507", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 30, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 27, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 37, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 30, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10613.json b/doc/sphinxext/prs/10613.json new file mode 100644 index 00000000000..2249ff0e505 --- /dev/null +++ b/doc/sphinxext/prs/10613.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "343afe9d34d8cce35344819bdfa0e733aae1159f", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/snirf/_snirf.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10614.json b/doc/sphinxext/prs/10614.json new file mode 100644 index 00000000000..151a6bc1db8 --- /dev/null +++ b/doc/sphinxext/prs/10614.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9f833658527a2c06ba833bffee33c9ba3a4a528a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 43, + "d": 6 + }, + "mne/viz/tests/test_evoked.py": { + "a": 14, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10615.json b/doc/sphinxext/prs/10615.json new file mode 100644 index 00000000000..7988011e888 --- /dev/null +++ b/doc/sphinxext/prs/10615.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "439e598ccb30019e81ebe8fc7885c8b5e57dff02", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10616.json b/doc/sphinxext/prs/10616.json new file mode 100644 index 00000000000..b0fca5b257d --- /dev/null +++ b/doc/sphinxext/prs/10616.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "65547f9712f3cf029e9aa7eeb9372596edbf7fe9", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/references.bib": { + "a": 13, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10617.json b/doc/sphinxext/prs/10617.json new file mode 100644 index 00000000000..d61b0e1344c --- /dev/null +++ b/doc/sphinxext/prs/10617.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "be56457160097ba8826e2a0c8e66c5b4073bea05", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_topomap.py": { + "a": 29, + "d": 0 + }, + "mne/cov.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 8 + }, + "mne/defaults.py": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 12, + "d": 8 + }, + "mne/io/proj.py": { + "a": 11, + "d": 7 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 5 + }, + "mne/viz/tests/test_ica.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 7, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 64, + "d": 41 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10618.json b/doc/sphinxext/prs/10618.json new file mode 100644 index 00000000000..43ec1c17383 --- /dev/null +++ b/doc/sphinxext/prs/10618.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "97bce190e1a9831395ed0c1ebf5365b248e023d3", + "authors": [ + { + "n": "Reza Shoorangiz", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10619.json b/doc/sphinxext/prs/10619.json new file mode 100644 index 00000000000..54b36f28d55 --- /dev/null +++ b/doc/sphinxext/prs/10619.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "820bd7a90f7babe7104efdc722c840d0137919a7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alex", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/_csd.py": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1062.json b/doc/sphinxext/prs/1062.json new file mode 100644 index 00000000000..f3e436b780b --- /dev/null +++ b/doc/sphinxext/prs/1062.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6f822578746945abd595a4772cfaa85ec6478d54", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 22 + }, + "mne/fiff/channels.py": { + "a": 76, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 25, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 27, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 29, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10620.json b/doc/sphinxext/prs/10620.json new file mode 100644 index 00000000000..9e9cc35b3c8 --- /dev/null +++ b/doc/sphinxext/prs/10620.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7b34565088f0120e9795fba1acb16a74936105f2", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/_csd.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10621.json b/doc/sphinxext/prs/10621.json new file mode 100644 index 00000000000..94b9974e0da --- /dev/null +++ b/doc/sphinxext/prs/10621.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "e711615915c3c5c99b605d4b064f668684fec08b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alex", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Alex", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/1.0.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/_csd.py": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 15, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 18 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10622.json b/doc/sphinxext/prs/10622.json new file mode 100644 index 00000000000..6b5b879dfdb --- /dev/null +++ b/doc/sphinxext/prs/10622.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "04d799d3fae689db4f3af7317e8e93c1ba7b8328", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers_platform_selector.html": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10624.json b/doc/sphinxext/prs/10624.json new file mode 100644 index 00000000000..bae6cabc836 --- /dev/null +++ b/doc/sphinxext/prs/10624.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ae0b1bdda4567cc3ac30d1168270c396c8c93409", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10626.json b/doc/sphinxext/prs/10626.json new file mode 100644 index 00000000000..2e69f6a9294 --- /dev/null +++ b/doc/sphinxext/prs/10626.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3f7388a888eb473d79b3729ae75b5d2e403c7778", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/gui/_ieeg_locate_gui.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10627.json b/doc/sphinxext/prs/10627.json new file mode 100644 index 00000000000..3afb3db78b8 --- /dev/null +++ b/doc/sphinxext/prs/10627.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7655632d8e85164a36b8f322ed52e9262a7e2ebb", + "authors": [ + { + "n": "Evan Hathaway", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/data/layouts/GeodesicHeadWeb-130.lout": { + "a": 132, + "d": 0 + }, + "mne/channels/data/layouts/GeodesicHeadWeb-280.lout": { + "a": 282, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 54, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10628.json b/doc/sphinxext/prs/10628.json new file mode 100644 index 00000000000..3467b1587e1 --- /dev/null +++ b/doc/sphinxext/prs/10628.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "549de120926465a514ac5d7b0b8950f49ec2ade5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_conda.yml": { + "a": 7, + "d": 3 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "tools/setup_xvfb.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1063.json b/doc/sphinxext/prs/1063.json new file mode 100644 index 00000000000..1a3efcdb3be --- /dev/null +++ b/doc/sphinxext/prs/1063.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f5fed2a8c88217c7154ce47df4b8eaf35ed9d1ee", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/edf/edf.py": { + "a": 10, + "d": 4 + }, + "mne/fiff/edf/tests/test_edf.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10630.json b/doc/sphinxext/prs/10630.json new file mode 100644 index 00000000000..4dec95bd7d2 --- /dev/null +++ b/doc/sphinxext/prs/10630.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "fd3112ef5b8bf4f23f4dda320ef91b1b22e7f7cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/overview/faq.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 20, + "d": 3 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 64, + "d": 42 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 3 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 27, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 4, + "d": 13 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10632.json b/doc/sphinxext/prs/10632.json new file mode 100644 index 00000000000..74a7ffeef17 --- /dev/null +++ b/doc/sphinxext/prs/10632.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "18ccda607e9139563270011ecb3391c4c35011ce", + "authors": [ + { + "n": "Matt Courtemanche", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/forward.rst": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 62, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 47, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10633.json b/doc/sphinxext/prs/10633.json new file mode 100644 index 00000000000..0c3103a6cf7 --- /dev/null +++ b/doc/sphinxext/prs/10633.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "c3d67179a80b9d9e8c23684c8cad4c01e1c44093", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/visualization.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 9, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 97, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 210, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 38, + "d": 1 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10636.json b/doc/sphinxext/prs/10636.json new file mode 100644 index 00000000000..5d6c992443c --- /dev/null +++ b/doc/sphinxext/prs/10636.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7f5b0f1c0d06e8b06eb51363b876c497fab9934f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 3, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10637.json b/doc/sphinxext/prs/10637.json new file mode 100644 index 00000000000..ea74d14fb69 --- /dev/null +++ b/doc/sphinxext/prs/10637.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0d3232195d3d531b53c095eda7a9907cbfbc4c30", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10638.json b/doc/sphinxext/prs/10638.json new file mode 100644 index 00000000000..a234b3ca1b0 --- /dev/null +++ b/doc/sphinxext/prs/10638.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9d972e45a10c3cecace5eace7c5eac35d53db7e7", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10639.json b/doc/sphinxext/prs/10639.json new file mode 100644 index 00000000000..054bce79e69 --- /dev/null +++ b/doc/sphinxext/prs/10639.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "db2c8fd8671a20857c8c56b71b116a6560b6638f", + "authors": [ + { + "n": "Matti Toivonen", + "e": "matti.toivonen@megin.fi" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10642.json b/doc/sphinxext/prs/10642.json new file mode 100644 index 00000000000..0648f7fb2b4 --- /dev/null +++ b/doc/sphinxext/prs/10642.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "f43584635b386a845c13953cc113305cc4e70721", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larsoner@monolith.local" + } + ], + "changes": { + "mne/channels/interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/conftest.py": { + "a": 24, + "d": 1 + }, + "mne/fixes.py": { + "a": 8, + "d": 5 + }, + "mne/io/nirx/nirx.py": { + "a": 0, + "d": 20 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 6, + "d": 21 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 5 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 22, + "d": 24 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 1 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 10, + "d": 10 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 5, + "d": 9 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 4, + "d": 8 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 141, + "d": 90 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 8, + "d": 7 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 91, + "d": 25 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 0, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10643.json b/doc/sphinxext/prs/10643.json new file mode 100644 index 00000000000..510fb890bfa --- /dev/null +++ b/doc/sphinxext/prs/10643.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6a8dd1434c6bbea527385838aa1fa0cc07c5201f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/1.0.inc": { + "a": 6, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10645.json b/doc/sphinxext/prs/10645.json new file mode 100644 index 00000000000..60a3f579c27 --- /dev/null +++ b/doc/sphinxext/prs/10645.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "965521831b5ca8675becc0cc9716db0cfc8ad946", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 9, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 14, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 15, + "d": 2 + }, + "mne/viz/backends/_qt.py": { + "a": 52, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10646.json b/doc/sphinxext/prs/10646.json new file mode 100644 index 00000000000..f99302987b9 --- /dev/null +++ b/doc/sphinxext/prs/10646.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1e806f358c7eee7e29192615a6394464d6c5abec", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_gui_api.py": { + "a": 11, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 11, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 18, + "d": 3 + }, + "mne/viz/backends/_qt.py": { + "a": 31, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10649.json b/doc/sphinxext/prs/10649.json new file mode 100644 index 00000000000..cc3c710dc2f --- /dev/null +++ b/doc/sphinxext/prs/10649.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1349ad88bdfe72d9214fe7496f74b1629985673d", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1065.json b/doc/sphinxext/prs/1065.json new file mode 100644 index 00000000000..796ec1477e5 --- /dev/null +++ b/doc/sphinxext/prs/1065.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "7795da0e6678797db127ebef967a1ca8d785d5b3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 0 + }, + "mne/fiff/brainvision/brainvision.py": { + "a": 17, + "d": 7 + }, + "mne/fiff/tag.py": { + "a": 6, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 9, + "d": 10 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 8, + "d": 8 + }, + "mne/utils.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10651.json b/doc/sphinxext/prs/10651.json new file mode 100644 index 00000000000..48f2ff988ae --- /dev/null +++ b/doc/sphinxext/prs/10651.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a9867adc80459317504468ab7829ed7379169e7", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 18, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10654.json b/doc/sphinxext/prs/10654.json new file mode 100644 index 00000000000..34d7bce1cc3 --- /dev/null +++ b/doc/sphinxext/prs/10654.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4061f38ceec3727e08dccc5c9e8b2d8a408bf314", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 18, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10656.json b/doc/sphinxext/prs/10656.json new file mode 100644 index 00000000000..83c3e59ff56 --- /dev/null +++ b/doc/sphinxext/prs/10656.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "fefa7dba887089a46cf5e0f8836ff6fb07be2552", + "authors": [ + { + "n": "Moritz Gerster", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 2 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/io/proj.py": { + "a": 94, + "d": 34 + }, + "mne/io/reference.py": { + "a": 19, + "d": 6 + }, + "mne/io/tests/test_reference.py": { + "a": 40, + "d": 33 + }, + "mne/proj.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_proj.py": { + "a": 98, + "d": 11 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 14, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10658.json b/doc/sphinxext/prs/10658.json new file mode 100644 index 00000000000..82c0a8c1fd0 --- /dev/null +++ b/doc/sphinxext/prs/10658.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "a2f6fe7cb6b1ef38fea697ee82cb97eb65c79217", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "Makefile": { + "a": 0, + "d": 2 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 6, + "d": 8 + }, + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "examples/decoding/ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 92, + "d": 18 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/stats/_adjacency.py": { + "a": 15, + "d": 4 + }, + "mne/stats/cluster_level.py": { + "a": 44, + "d": 31 + }, + "mne/stats/parametric.py": { + "a": 2, + "d": 10 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 56, + "d": 14 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 30, + "d": 13 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 103, + "d": 18 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 90, + "d": 43 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 74, + "d": 30 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 3, + "d": 3 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1066.json b/doc/sphinxext/prs/1066.json new file mode 100644 index 00000000000..67b37827baa --- /dev/null +++ b/doc/sphinxext/prs/1066.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9ed41d984dc894baacfb49c6d92cdf3d90b1d318", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/brainvision/brainvision.py": { + "a": 20, + "d": 9 + }, + "mne/fiff/tag.py": { + "a": 6, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 40, + "d": 41 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 8, + "d": 8 + }, + "mne/utils.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10661.json b/doc/sphinxext/prs/10661.json new file mode 100644 index 00000000000..be7f8fd6ca9 --- /dev/null +++ b/doc/sphinxext/prs/10661.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2dbebc005f7498a06d5b1d7b64de5b3cb07b1430", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 29, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 14, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10662.json b/doc/sphinxext/prs/10662.json new file mode 100644 index 00000000000..4fece100d2d --- /dev/null +++ b/doc/sphinxext/prs/10662.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f5df9272193a5e84c6bceddb5b9b6582578a155c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 21, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10664.json b/doc/sphinxext/prs/10664.json new file mode 100644 index 00000000000..5de31ea0be4 --- /dev/null +++ b/doc/sphinxext/prs/10664.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5bddef897c97bdcfb8d879901137c599e776df56", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10666.json b/doc/sphinxext/prs/10666.json new file mode 100644 index 00000000000..73c1c5f3dde --- /dev/null +++ b/doc/sphinxext/prs/10666.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "cba0f9b9d40ee19345f576a72017316174121851", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "Makefile": { + "a": 0, + "d": 2 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 6, + "d": 8 + }, + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "examples/decoding/ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 90, + "d": 16 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/stats/_adjacency.py": { + "a": 15, + "d": 4 + }, + "mne/stats/cluster_level.py": { + "a": 44, + "d": 31 + }, + "mne/stats/parametric.py": { + "a": 2, + "d": 10 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 56, + "d": 14 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 30, + "d": 13 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 102, + "d": 17 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 90, + "d": 43 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 74, + "d": 30 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 3, + "d": 3 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10667.json b/doc/sphinxext/prs/10667.json new file mode 100644 index 00000000000..2dc94b7d418 --- /dev/null +++ b/doc/sphinxext/prs/10667.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a62f9a4d6fffa7c7c84817d4fde988f7930eace1", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/gui/_ieeg_locate_gui.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10668.json b/doc/sphinxext/prs/10668.json new file mode 100644 index 00000000000..b0aa8f36ff6 --- /dev/null +++ b/doc/sphinxext/prs/10668.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3e2b87fa02b00b3fc0cab4fd7483973ad53d0bbe", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 22, + "d": 3 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10669.json b/doc/sphinxext/prs/10669.json new file mode 100644 index 00000000000..73c462a55f4 --- /dev/null +++ b/doc/sphinxext/prs/10669.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6b2cc8554f1503831b8fb807de57d6c5b26ade37", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 25, + "d": 6 + }, + "mne/viz/_3d_overlay.py": { + "a": 191, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 180 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1067.json b/doc/sphinxext/prs/1067.json new file mode 100644 index 00000000000..3a2809f4395 --- /dev/null +++ b/doc/sphinxext/prs/1067.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4fa583a1796c8d4838d7e31b656e8d14068abfdd", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/brainvision/brainvision.py": { + "a": 41, + "d": 20 + }, + "mne/fiff/brainvision/tests/test_brainvision.py": { + "a": 17, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10671.json b/doc/sphinxext/prs/10671.json new file mode 100644 index 00000000000..b64465ebbbc --- /dev/null +++ b/doc/sphinxext/prs/10671.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "fc53916dcc98184fde17d2d41973c156e24c02f9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 16, + "d": 1 + }, + "doc/conf.py": { + "a": 7, + "d": 3 + }, + "doc/index.rst": { + "a": 6, + "d": 12 + }, + "doc/install/check_installation.rst": { + "a": 2, + "d": 2 + }, + "doc/install/contributing.rst": { + "a": 7, + "d": 6 + }, + "doc/install/index.rst": { + "a": 63, + "d": 2 + }, + "doc/install/install_type_selector.html": { + "a": 0, + "d": 40 + }, + "doc/install/installers.rst": { + "a": 74, + "d": 3 + }, + "doc/install/installers_platform_selector.html": { + "a": 0, + "d": 79 + }, + "doc/install/manual_install.rst": { + "a": 2, + "d": 2 + }, + "doc/install/manual_install_python.rst": { + "a": 59, + "d": 44 + }, + "doc/overview/cite.rst": { + "a": 4, + "d": 4 + }, + "doc/sphinxext/sphinx_bootstrap_divs/__init__.py": { + "a": 0, + "d": 196 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css": { + "a": 0, + "d": 9 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js": { + "a": 0, + "d": 6 + }, + "requirements_doc.txt": { + "a": 2, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10672.json b/doc/sphinxext/prs/10672.json new file mode 100644 index 00000000000..3f805c8761b --- /dev/null +++ b/doc/sphinxext/prs/10672.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "56436a4f49239c96f74b0cbc6a8369195232cdb5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10674.json b/doc/sphinxext/prs/10674.json new file mode 100644 index 00000000000..1e5b5d97c98 --- /dev/null +++ b/doc/sphinxext/prs/10674.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "85bca4ccc4b957b6597799a7eb92b972a78a7fe1", + "authors": [ + { + "n": "Simon Kern", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10676.json b/doc/sphinxext/prs/10676.json new file mode 100644 index 00000000000..0872ae6739e --- /dev/null +++ b/doc/sphinxext/prs/10676.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "368c7fcee60a55d76d26c6d77bfec08b4cad658b", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "mne/utils/check.py": { + "a": 23, + "d": 2 + }, + "mne/viz/backends/renderer.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10677.json b/doc/sphinxext/prs/10677.json new file mode 100644 index 00000000000..1888aa4dea8 --- /dev/null +++ b/doc/sphinxext/prs/10677.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ee1f4ce8caccc2e8e77a7a520d34a216f2a8fccb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10681.json b/doc/sphinxext/prs/10681.json new file mode 100644 index 00000000000..515e2672941 --- /dev/null +++ b/doc/sphinxext/prs/10681.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "aafad953b07b0891cf179a00608293171a3226e4", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 4, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 24, + "d": 0 + }, + "mne/export/_brainvision.py": { + "a": 19, + "d": 0 + }, + "mne/export/_export.py": { + "a": 10, + "d": 30 + }, + "mne/export/tests/test_export.py": { + "a": 59, + "d": 14 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 5, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 36, + "d": 7 + }, + "requirements_testing_extra.txt": { + "a": 1, + "d": 0 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10683.json b/doc/sphinxext/prs/10683.json new file mode 100644 index 00000000000..e57dc54c832 --- /dev/null +++ b/doc/sphinxext/prs/10683.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b74d00449e796f0af163c5cfb86386a7b8a98d6e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/install/manual_install.rst": { + "a": 5, + "d": 3 + }, + "doc/install/manual_install_platform_selector.html": { + "a": 0, + "d": 55 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10685.json b/doc/sphinxext/prs/10685.json new file mode 100644 index 00000000000..b3e5b1ee1a3 --- /dev/null +++ b/doc/sphinxext/prs/10685.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bd452f34c2a25f2f65e66ec4ee1ee9f0b0a05062", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10686.json b/doc/sphinxext/prs/10686.json new file mode 100644 index 00000000000..2248fbe4a35 --- /dev/null +++ b/doc/sphinxext/prs/10686.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1cd46a6b846541d9627ec2d9e55a07165fa5e228", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/evoked.py": { + "a": 27, + "d": 20 + }, + "mne/export/tests/test_export.py": { + "a": 3, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 56, + "d": 11 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10688.json b/doc/sphinxext/prs/10688.json new file mode 100644 index 00000000000..acb4d5360e9 --- /dev/null +++ b/doc/sphinxext/prs/10688.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a877fada44f4241e740dad3dee8ffeb1a50422e3", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 10, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10689.json b/doc/sphinxext/prs/10689.json new file mode 100644 index 00000000000..022e55add21 --- /dev/null +++ b/doc/sphinxext/prs/10689.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6df80b5f2e9823dd299f9887e6fbf5b6f8380bbc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 2, + "d": 2 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 2, + "d": 7 + }, + "doc/install/mne_tools_suite.rst": { + "a": 1, + "d": 1 + }, + "doc/links.inc": { + "a": 4, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10692.json b/doc/sphinxext/prs/10692.json new file mode 100644 index 00000000000..ff50fcbd7aa --- /dev/null +++ b/doc/sphinxext/prs/10692.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6c15f996b53996534ebbcc0ef8d5fc8ec27d825e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 13, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 7, + "d": 44 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10694.json b/doc/sphinxext/prs/10694.json new file mode 100644 index 00000000000..250c47cca34 --- /dev/null +++ b/doc/sphinxext/prs/10694.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "a9b139d00d6ce966dfebc078597ad335ebb425f9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/html_templates/report/bem.html.jinja": { + "a": 8, + "d": 0 + }, + "mne/html_templates/report/code.html.jinja": { + "a": 6, + "d": 24 + }, + "mne/html_templates/report/epochs.html.jinja": { + "a": 0, + "d": 27 + }, + "mne/html_templates/report/evoked.html.jinja": { + "a": 0, + "d": 26 + }, + "mne/html_templates/report/forward.html.jinja": { + "a": 5, + "d": 23 + }, + "mne/html_templates/report/ica.html.jinja": { + "a": 0, + "d": 29 + }, + "mne/html_templates/report/image.html.jinja": { + "a": 15, + "d": 34 + }, + "mne/html_templates/report/inverse.html.jinja": { + "a": 5, + "d": 23 + }, + "mne/html_templates/report/raw.html.jinja": { + "a": 0, + "d": 25 + }, + "mne/html_templates/report/section.html.jinja": { + "a": 9, + "d": 5 + }, + "mne/html_templates/report/toc.html.jinja": { + "a": 3, + "d": 3 + }, + "mne/report/report.py": { + "a": 793, + "d": 724 + }, + "mne/report/tests/test_report.py": { + "a": 18, + "d": 14 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 29, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10699.json b/doc/sphinxext/prs/10699.json new file mode 100644 index 00000000000..430d2134eb4 --- /dev/null +++ b/doc/sphinxext/prs/10699.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "6b905281bf86ff336f2208297dc0f6794d25c746", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 0, + "d": 2 + }, + "mne/_freesurfer.py": { + "a": 14, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 15 + }, + "mne/dipole.py": { + "a": 10, + "d": 80 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_freesurfer.py": { + "a": 17, + "d": 2 + }, + "mne/transforms.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 13, + "d": 7 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 77, + "d": 48 + }, + "mne/viz/_brain/_brain.py": { + "a": 9, + "d": 14 + }, + "mne/viz/_dipole.py": { + "a": 166, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 25, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 4, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 18, + "d": 21 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 10, + "d": 0 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10703.json b/doc/sphinxext/prs/10703.json new file mode 100644 index 00000000000..bce15de9c84 --- /dev/null +++ b/doc/sphinxext/prs/10703.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "618eb5480d648a9bd1f4741af4fd08856c1a69fa", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10705.json b/doc/sphinxext/prs/10705.json new file mode 100644 index 00000000000..ce96e79bcfd --- /dev/null +++ b/doc/sphinxext/prs/10705.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b2652a68fd503a087ecba38a208603567db214da", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 18, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10710.json b/doc/sphinxext/prs/10710.json new file mode 100644 index 00000000000..0124ae15c3c --- /dev/null +++ b/doc/sphinxext/prs/10710.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ae52496f77ddee08dcaea8daaf4ccbfa11e791b1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10711.json b/doc/sphinxext/prs/10711.json new file mode 100644 index 00000000000..452026c1da8 --- /dev/null +++ b/doc/sphinxext/prs/10711.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "36ec7f4299f0085120c61bee628e747c83ada742", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".mailmap": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10714.json b/doc/sphinxext/prs/10714.json new file mode 100644 index 00000000000..eaf70acc041 --- /dev/null +++ b/doc/sphinxext/prs/10714.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "83c716268344f79d2621f2fc7498b0c9e8975b2e", + "authors": [ + { + "n": "Dominique Makowski", + "e": "D.Makowski@sussex.ac.uk" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/_csd.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10715.json b/doc/sphinxext/prs/10715.json new file mode 100644 index 00000000000..dc4a883b460 --- /dev/null +++ b/doc/sphinxext/prs/10715.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "24e5fe2ec67c5c23bce53a2158cc8cc78907eae6", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 184, + "d": 99 + }, + "mne/commands/mne_flash_bem.py": { + "a": 55, + "d": 35 + }, + "mne/commands/tests/test_commands.py": { + "a": 39, + "d": 12 + }, + "tools/get_minimal_commands.sh": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10716.json b/doc/sphinxext/prs/10716.json new file mode 100644 index 00000000000..3db5aa41e34 --- /dev/null +++ b/doc/sphinxext/prs/10716.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f176d91f0c2f6adeb7f7d6d196ff5271d636735d", + "authors": [ + { + "n": "Jon Houck", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10718.json b/doc/sphinxext/prs/10718.json new file mode 100644 index 00000000000..99ae74d917f --- /dev/null +++ b/doc/sphinxext/prs/10718.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "eec5fe96ead56b5d162f667bdca1c1c3ea39832c", + "authors": [ + { + "n": "gemeinl", + "e": "lukas.gemein@gmx.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10719.json b/doc/sphinxext/prs/10719.json new file mode 100644 index 00000000000..fb9b7a04e35 --- /dev/null +++ b/doc/sphinxext/prs/10719.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f645075a801596753c1ae3625095ee93bc8af8d6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1072.json b/doc/sphinxext/prs/1072.json new file mode 100644 index 00000000000..8956b2a569a --- /dev/null +++ b/doc/sphinxext/prs/1072.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a1efe9cdbeba29f1a34ea96219e3fab14786f74f", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10720.json b/doc/sphinxext/prs/10720.json new file mode 100644 index 00000000000..a90cc57952c --- /dev/null +++ b/doc/sphinxext/prs/10720.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "237fdfeb5efe4632b4bfeff1adacf87757a6e2ba", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tools/generate_codemeta.py": { + "a": 108, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10722.json b/doc/sphinxext/prs/10722.json new file mode 100644 index 00000000000..1a76d23804f --- /dev/null +++ b/doc/sphinxext/prs/10722.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "948f0dc2745ba44d5add4faabc4090075bf1a770", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/visualization.rst": { + "a": 1, + "d": 0 + }, + "mne/io/pick.py": { + "a": 4, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 12 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_proj.py": { + "a": 224, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 14, + "d": 9 + }, + "mne/viz/tests/test_proj.py": { + "a": 56, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 21, + "d": 11 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 2 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 51, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10723.json b/doc/sphinxext/prs/10723.json new file mode 100644 index 00000000000..2b2962442a4 --- /dev/null +++ b/doc/sphinxext/prs/10723.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1df52a04d7d11ef9d64d6e8c78cf0b187d4378e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10727.json b/doc/sphinxext/prs/10727.json new file mode 100644 index 00000000000..30bd1c890e4 --- /dev/null +++ b/doc/sphinxext/prs/10727.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "255cc03e4e0532cbc9bad12b1f65d2b201ca668f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10729.json b/doc/sphinxext/prs/10729.json new file mode 100644 index 00000000000..72066807a16 --- /dev/null +++ b/doc/sphinxext/prs/10729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4e329679a8d07e88ddcc231837ddc6d6785eca24", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 13, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10730.json b/doc/sphinxext/prs/10730.json new file mode 100644 index 00000000000..f7d5b808e63 --- /dev/null +++ b/doc/sphinxext/prs/10730.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2ca8b24d7b54a018dc5942235e6bc02dfbbee597", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10733.json b/doc/sphinxext/prs/10733.json new file mode 100644 index 00000000000..8a476db768c --- /dev/null +++ b/doc/sphinxext/prs/10733.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5387316b1e8fc0d31bc52993885f2ae77f774463", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 21, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10734.json b/doc/sphinxext/prs/10734.json new file mode 100644 index 00000000000..588fbd65028 --- /dev/null +++ b/doc/sphinxext/prs/10734.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "152b5113d135ebbba196b878e4c6a08a1a937d8c", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 82, + "d": 23 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 14, + "d": 0 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10735.json b/doc/sphinxext/prs/10735.json new file mode 100644 index 00000000000..2f5a771fe38 --- /dev/null +++ b/doc/sphinxext/prs/10735.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a9bdf81d21626c22dc879154367648515bcea3cd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10736.json b/doc/sphinxext/prs/10736.json new file mode 100644 index 00000000000..54da38c3d00 --- /dev/null +++ b/doc/sphinxext/prs/10736.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1310269f27bdf4347dfc465a834c1e04d25563d8", + "authors": [ + { + "n": "Drew, J.", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/viz/_mpl_figure.py": { + "a": 20, + "d": 8 + }, + "mne/viz/tests/test_epochs.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10737.json b/doc/sphinxext/prs/10737.json new file mode 100644 index 00000000000..c5605451b0a --- /dev/null +++ b/doc/sphinxext/prs/10737.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5ff33fa5ae8c1b0004ef894d5fe009140203ed48", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 163, + "d": 73 + }, + "mne/channels/tests/test_montage.py": { + "a": 21, + "d": 25 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 5 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 5, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10739.json b/doc/sphinxext/prs/10739.json new file mode 100644 index 00000000000..6d7c26ab2d9 --- /dev/null +++ b/doc/sphinxext/prs/10739.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5af527d399571e1ac0c84af29fdfd2b4fd676cbd", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/visualization/montage_sgskip.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1074.json b/doc/sphinxext/prs/1074.json new file mode 100644 index 00000000000..f8286bb56b3 --- /dev/null +++ b/doc/sphinxext/prs/1074.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e1f8f96c25b8dc2f278ce7a0d82ef9345ca6173c", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/source_space.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10740.json b/doc/sphinxext/prs/10740.json new file mode 100644 index 00000000000..c700827f6ba --- /dev/null +++ b/doc/sphinxext/prs/10740.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0fefef9cfc063280b13a9d7bc00e8e489f9e3134", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10748.json b/doc/sphinxext/prs/10748.json new file mode 100644 index 00000000000..c1af87c3193 --- /dev/null +++ b/doc/sphinxext/prs/10748.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5944b870f3d2d1803c0563e73792855f004d8932", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/check.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10749.json b/doc/sphinxext/prs/10749.json new file mode 100644 index 00000000000..abe31ddb046 --- /dev/null +++ b/doc/sphinxext/prs/10749.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "01dce0d268b3b3243d363b1d7f14ac2894030cbe", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 335, + "d": 86 + }, + "mne/channels/data/neighbors/easycapM1_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ecog256_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ecog256bipolar_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/eeg1010_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/elec1005_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/elec1010_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/elec1020_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/itab153_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/itab28_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/language29ch-avg_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/mpi_59_channels_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/neuromag122cmb_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/neuromag306cmb_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/yokogawa160_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/yokogawa440_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 88, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10753.json b/doc/sphinxext/prs/10753.json new file mode 100644 index 00000000000..6aec3c31af9 --- /dev/null +++ b/doc/sphinxext/prs/10753.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0d8f46934d524253b2bcb154460ac9ef17556ac5", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 9, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 17, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10754.json b/doc/sphinxext/prs/10754.json new file mode 100644 index 00000000000..e46e208a468 --- /dev/null +++ b/doc/sphinxext/prs/10754.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "aafa189db6096040088f4d2ffbdae0e4732aed10", + "authors": [ + { + "n": "Frederik-D-Weber", + "e": "Frederik-D-Weber@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10755.json b/doc/sphinxext/prs/10755.json new file mode 100644 index 00000000000..e605d1f9fd8 --- /dev/null +++ b/doc/sphinxext/prs/10755.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "74edf166440b278fb16ffc9fe64cebd272e35ccb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1076.json b/doc/sphinxext/prs/1076.json new file mode 100644 index 00000000000..ff687aa5bd9 --- /dev/null +++ b/doc/sphinxext/prs/1076.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5a3fa6dcddce37802c71be3477abcc8f975eed57", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10760.json b/doc/sphinxext/prs/10760.json new file mode 100644 index 00000000000..560e670e9b6 --- /dev/null +++ b/doc/sphinxext/prs/10760.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "04b5ed9afb7722e72e43dd2082bdfc5e4c1c9d52", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 8, + "d": 4 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10762.json b/doc/sphinxext/prs/10762.json new file mode 100644 index 00000000000..12f79781783 --- /dev/null +++ b/doc/sphinxext/prs/10762.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "12dd9c950eeb59fb908afd0293a3a8fdc6e020a9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 26, + "d": 10 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10763.json b/doc/sphinxext/prs/10763.json new file mode 100644 index 00000000000..21f49ad40de --- /dev/null +++ b/doc/sphinxext/prs/10763.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "dabcabf551b5de684fc77d387b865824860f670c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 9, + "d": 9 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 3, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 25, + "d": 26 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 9, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 4, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 4 + }, + "mne/viz/backends/_qt.py": { + "a": 13, + "d": 2 + }, + "mne/viz/backends/renderer.py": { + "a": 0, + "d": 6 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10764.json b/doc/sphinxext/prs/10764.json new file mode 100644 index 00000000000..a56635e80c4 --- /dev/null +++ b/doc/sphinxext/prs/10764.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ba2754fe3d9f49e486773403e673b04df8bb66e7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/forward.rst": { + "a": 14, + "d": 12 + }, + "doc/_includes/morph.rst": { + "a": 5, + "d": 4 + }, + "doc/_static/style.css": { + "a": 25, + "d": 16 + }, + "doc/install/contributing.rst": { + "a": 10, + "d": 5 + }, + "doc/install/mne_tools_suite.rst": { + "a": 1, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10765.json b/doc/sphinxext/prs/10765.json new file mode 100644 index 00000000000..11012529a8f --- /dev/null +++ b/doc/sphinxext/prs/10765.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5fb84d6b9ffd7d9d29714795da89cb8be23828b7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10770.json b/doc/sphinxext/prs/10770.json new file mode 100644 index 00000000000..5cba0db5574 --- /dev/null +++ b/doc/sphinxext/prs/10770.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "faa543feffe057638a7c823479a5b5ad6ab3b3a2", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/preprocessing/interpolate.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10776.json b/doc/sphinxext/prs/10776.json new file mode 100644 index 00000000000..6033cd88be4 --- /dev/null +++ b/doc/sphinxext/prs/10776.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "37a9e5587443109515a85cdc4396af4e014ecf82", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 7, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 4, + "d": 2 + }, + "mne/viz/_mpl_figure.py": { + "a": 56, + "d": 47 + }, + "mne/viz/tests/test_raw.py": { + "a": 56, + "d": 35 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10777.json b/doc/sphinxext/prs/10777.json new file mode 100644 index 00000000000..a15fd9d1f8a --- /dev/null +++ b/doc/sphinxext/prs/10777.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "da02a256423404a81929d6de278bc63d3192a280", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/gui/_core.py": { + "a": 483, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 79, + "d": 456 + }, + "mne/gui/tests/test_core_gui.py": { + "a": 82, + "d": 0 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 3, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10778.json b/doc/sphinxext/prs/10778.json new file mode 100644 index 00000000000..c064b6c3378 --- /dev/null +++ b/doc/sphinxext/prs/10778.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "428fc90675442acee259a485d818d5b1d671359d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10781.json b/doc/sphinxext/prs/10781.json new file mode 100644 index 00000000000..f79c6842bc7 --- /dev/null +++ b/doc/sphinxext/prs/10781.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9151f6f33e99f980812344e1f015c6007f837e42", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/ctf/ctf.py": { + "a": 12, + "d": 2 + }, + "mne/io/ctf/eeg.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/hc.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/res4.py": { + "a": 4, + "d": 4 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10787.json b/doc/sphinxext/prs/10787.json new file mode 100644 index 00000000000..83f2c5bd45c --- /dev/null +++ b/doc/sphinxext/prs/10787.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "632e89a2eaafd742058a30a1494b8f06f6ab5f6c", + "authors": [ + { + "n": "Naveen", + "e": null + } + ], + "changes": { + ".github/workflows/linux_pip.yml": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10788.json b/doc/sphinxext/prs/10788.json new file mode 100644 index 00000000000..703a7e4fe10 --- /dev/null +++ b/doc/sphinxext/prs/10788.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0dbdcb68aca0fc282b390f13fd464d3598dee5e2", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1079.json b/doc/sphinxext/prs/1079.json new file mode 100644 index 00000000000..370934afb97 --- /dev/null +++ b/doc/sphinxext/prs/1079.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "93afc83bd996b43b8ed3b4a02a6774b3fbab875b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 5, + "d": 5 + }, + "mne/utils.py": { + "a": 39, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10792.json b/doc/sphinxext/prs/10792.json new file mode 100644 index 00000000000..83fc2ab28d4 --- /dev/null +++ b/doc/sphinxext/prs/10792.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6021d4a49dd5f6064b2af58e6309b08c72e02606", + "authors": [ + { + "n": "Naveen", + "e": null + } + ], + "changes": { + ".github/dependabot.yml": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10793.json b/doc/sphinxext/prs/10793.json new file mode 100644 index 00000000000..ca42156877d --- /dev/null +++ b/doc/sphinxext/prs/10793.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9423ce676734d74e0551132ab4d6698d9bab3807", + "authors": [ + { + "n": "Alex Ciok", + "e": null + } + ], + "changes": { + "tutorials/README.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10794.json b/doc/sphinxext/prs/10794.json new file mode 100644 index 00000000000..c63595897a3 --- /dev/null +++ b/doc/sphinxext/prs/10794.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bd1b4cc7029a90e2124795596b7396f6f9026909", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_conda.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/macos_conda.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10795.json b/doc/sphinxext/prs/10795.json new file mode 100644 index 00000000000..2dab2f0a0e1 --- /dev/null +++ b/doc/sphinxext/prs/10795.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7218ac1f609211875e1a02a25a0c8d112ab799c2", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10796.json b/doc/sphinxext/prs/10796.json new file mode 100644 index 00000000000..412bcd50b0d --- /dev/null +++ b/doc/sphinxext/prs/10796.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "562f4730dd3d462abb4ec134270c7cf0a9d53b0e", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_conda.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/macos_conda.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10797.json b/doc/sphinxext/prs/10797.json new file mode 100644 index 00000000000..643e019f970 --- /dev/null +++ b/doc/sphinxext/prs/10797.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "af66aa0dc8a056797f591cf6fa7216d56e386bf9", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_conda.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/macos_conda.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10800.json b/doc/sphinxext/prs/10800.json new file mode 100644 index 00000000000..145d35dcd5c --- /dev/null +++ b/doc/sphinxext/prs/10800.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5e7e35ad0c50fb0de42872e2743c4fae2e511d90", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10802.json b/doc/sphinxext/prs/10802.json new file mode 100644 index 00000000000..6a6e8dd1988 --- /dev/null +++ b/doc/sphinxext/prs/10802.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "db6c8221efeb4d7de881c937012dca8ff9fcf2f8", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 53, + "d": 35 + }, + "mne/gui/_coreg.py": { + "a": 6, + "d": 1 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 4, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 20, + "d": 0 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 10, + "d": 9 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 18, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10804.json b/doc/sphinxext/prs/10804.json new file mode 100644 index 00000000000..1e93688b2ea --- /dev/null +++ b/doc/sphinxext/prs/10804.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0dc113d4abda83aa3e3dcf865ce43bdad3a568b2", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 19, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10820.json b/doc/sphinxext/prs/10820.json new file mode 100644 index 00000000000..68396882723 --- /dev/null +++ b/doc/sphinxext/prs/10820.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c75dfffcf03c2c061a37db6ba83171e68a91faba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/macos_conda.yml": { + "a": 9, + "d": 4 + }, + "tools/get_minimal_commands.sh": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10824.json b/doc/sphinxext/prs/10824.json new file mode 100644 index 00000000000..4b409c05f48 --- /dev/null +++ b/doc/sphinxext/prs/10824.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "89b6357edb0dfed9a56f83e43815a42700e6cd3c", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10825.json b/doc/sphinxext/prs/10825.json new file mode 100644 index 00000000000..589e9edae1d --- /dev/null +++ b/doc/sphinxext/prs/10825.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3ff8e9aced24d0e62e4a17348aa940b749500e9d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10829.json b/doc/sphinxext/prs/10829.json new file mode 100644 index 00000000000..f2e5b8b3c25 --- /dev/null +++ b/doc/sphinxext/prs/10829.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "23a8da684a4debe87b8de99dda6fba399ac22063", + "authors": [ + { + "n": "Carina", + "e": "carinaforster0611@gmail.com" + } + ], + "changes": { + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1083.json b/doc/sphinxext/prs/1083.json new file mode 100644 index 00000000000..bff5ef0386d --- /dev/null +++ b/doc/sphinxext/prs/1083.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "99da520ca4eb85624d49e91e79c7a39a279bd27c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10830.json b/doc/sphinxext/prs/10830.json new file mode 100644 index 00000000000..19ba7f6e2bd --- /dev/null +++ b/doc/sphinxext/prs/10830.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c5e14281bfef445c245bd54633c48456427ef0b7", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "examples/forward/source_space_morphing.py": { + "a": 8, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10832.json b/doc/sphinxext/prs/10832.json new file mode 100644 index 00000000000..d32b3a401e7 --- /dev/null +++ b/doc/sphinxext/prs/10832.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ff40ce33418a786b23b534a6cfcb44cca5f6b105", + "authors": [ + { + "n": "kimcoco", + "e": "kimjico@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/inverse/evoked_ers_source_power.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10838.json b/doc/sphinxext/prs/10838.json new file mode 100644 index 00000000000..52d3acabb78 --- /dev/null +++ b/doc/sphinxext/prs/10838.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6eaca414e4b3dc89f64a2e5287ff9d6d8eb1ceb0", + "authors": [ + { + "n": "Carina", + "e": "carinaforster0611@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/io/read_noise_covariance_matrix.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10839.json b/doc/sphinxext/prs/10839.json new file mode 100644 index 00000000000..0e638a183ec --- /dev/null +++ b/doc/sphinxext/prs/10839.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "486519911d25ad2cc34c9bf6ad7c0a38e7f8aabd", + "authors": [ + { + "n": "Carina", + "e": "carinaforster0611@gmail.com" + } + ], + "changes": { + "mne/preprocessing/infomax_.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1084.json b/doc/sphinxext/prs/1084.json new file mode 100644 index 00000000000..e35f5bbe04a --- /dev/null +++ b/doc/sphinxext/prs/1084.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6cca18958f4e297ed3ec239e1ecacf9a0deb1367", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 4, + "d": 0 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_viz.py": { + "a": 7, + "d": 1 + }, + "mne/viz.py": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10841.json b/doc/sphinxext/prs/10841.json new file mode 100644 index 00000000000..facf2c123a8 --- /dev/null +++ b/doc/sphinxext/prs/10841.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "8f6305a0db273e91de8d965f1fe84a550febb876", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 0 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 3, + "d": 3 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 16, + "d": 63 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 3, + "d": 9 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 4 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10843.json b/doc/sphinxext/prs/10843.json new file mode 100644 index 00000000000..9120292f5be --- /dev/null +++ b/doc/sphinxext/prs/10843.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e8b76b94dee4176ceb48e72bd284b5fb3db5c837", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "examples/inverse/dics_source_power.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10844.json b/doc/sphinxext/prs/10844.json new file mode 100644 index 00000000000..1e643f3263d --- /dev/null +++ b/doc/sphinxext/prs/10844.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c72cdaf002b5ecae75cc5f525e224bcac153acba", + "authors": [ + { + "n": "Adeline Fecker", + "e": null + } + ], + "changes": { + "examples/forward/left_cerebellum_volume_source.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10845.json b/doc/sphinxext/prs/10845.json new file mode 100644 index 00000000000..7a23ab0beea --- /dev/null +++ b/doc/sphinxext/prs/10845.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "380efbf63e5604c65c733567e744d36a224959cd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 14, + "d": 67 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10846.json b/doc/sphinxext/prs/10846.json new file mode 100644 index 00000000000..6e8daf60241 --- /dev/null +++ b/doc/sphinxext/prs/10846.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6c43b2d398780e2e69ea2f5b2e3db6b59135825c", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "jadrew43", + "e": "39603454+jadrew43@users.noreply.github.com" + }, + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + "examples/inverse/mixed_source_space_inverse.py": { + "a": 13, + "d": 14 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 6, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10849.json b/doc/sphinxext/prs/10849.json new file mode 100644 index 00000000000..0d00d33bbef --- /dev/null +++ b/doc/sphinxext/prs/10849.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5b582a19bf79164bd86f297e467e3b1117a2a4eb", + "authors": [ + { + "n": "Hamid", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/inverse/mne_cov_power.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1085.json b/doc/sphinxext/prs/1085.json new file mode 100644 index 00000000000..38892a4043e --- /dev/null +++ b/doc/sphinxext/prs/1085.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2d96346a43f37fe8fa1a9bccd348cfd618043c1c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 183, + "d": 5 + }, + "mne/tests/test_label.py": { + "a": 46, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10851.json b/doc/sphinxext/prs/10851.json new file mode 100644 index 00000000000..42061e91889 --- /dev/null +++ b/doc/sphinxext/prs/10851.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "7e71888056a2f922fdc4e9e42454294197b0cffc", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 4, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 3, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10856.json b/doc/sphinxext/prs/10856.json new file mode 100644 index 00000000000..4e50615f534 --- /dev/null +++ b/doc/sphinxext/prs/10856.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "82b65ab7451bbdd9ae49c363c7018bfde554be12", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 4, + "d": 1 + }, + "mne/source_space.py": { + "a": 13, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10858.json b/doc/sphinxext/prs/10858.json new file mode 100644 index 00000000000..b71d45c6e6c --- /dev/null +++ b/doc/sphinxext/prs/10858.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7f11a236a54e2175c6e871a88d69803e3281b99a", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "examples/inverse/snr_estimate.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10859.json b/doc/sphinxext/prs/10859.json new file mode 100644 index 00000000000..8afdac97c57 --- /dev/null +++ b/doc/sphinxext/prs/10859.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7cb4523eacd6bdc054721570de70ecfcfa38a42d", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10861.json b/doc/sphinxext/prs/10861.json new file mode 100644 index 00000000000..2781b876c29 --- /dev/null +++ b/doc/sphinxext/prs/10861.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e49525c470b01b04c7270dc054bbfa7600021262", + "authors": [ + { + "n": "Drew, J.", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10862.json b/doc/sphinxext/prs/10862.json new file mode 100644 index 00000000000..03eb940192a --- /dev/null +++ b/doc/sphinxext/prs/10862.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2ac8b164dc5dd7ddfdd5b82a7c013eba4672cb61", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "examples/io/read_xdf.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10863.json b/doc/sphinxext/prs/10863.json new file mode 100644 index 00000000000..d41273d8512 --- /dev/null +++ b/doc/sphinxext/prs/10863.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1ae1c28c42496c4465494b46aa69eec2313eb3f5", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "examples/preprocessing/movement_compensation.py": { + "a": 4, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10864.json b/doc/sphinxext/prs/10864.json new file mode 100644 index 00000000000..8af3101adba --- /dev/null +++ b/doc/sphinxext/prs/10864.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c5e683eac98803c6ba7bdbb8af439a2766f86a94", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "examples/preprocessing/movement_detection.py": { + "a": 5, + "d": 8 + }, + "examples/preprocessing/muscle_detection.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10865.json b/doc/sphinxext/prs/10865.json new file mode 100644 index 00000000000..e2976d5dae8 --- /dev/null +++ b/doc/sphinxext/prs/10865.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b58d6a52353d753353e0c7822ce9d1276d045e84", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10866.json b/doc/sphinxext/prs/10866.json new file mode 100644 index 00000000000..593f2db970f --- /dev/null +++ b/doc/sphinxext/prs/10866.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0452d69396ade5186bfad166aa7c7f608b288867", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10868.json b/doc/sphinxext/prs/10868.json new file mode 100644 index 00000000000..3d1ef5fe85a --- /dev/null +++ b/doc/sphinxext/prs/10868.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b612d2e6411e64e27d6d9522c651e0d90bf1b8d", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "examples/inverse/morph_surface_stc.py": { + "a": 7, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10870.json b/doc/sphinxext/prs/10870.json new file mode 100644 index 00000000000..ea440a75a7c --- /dev/null +++ b/doc/sphinxext/prs/10870.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3426356b3ec34787c2ac478dff1302cabfe7df99", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "examples/datasets/opm_data.py": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10873.json b/doc/sphinxext/prs/10873.json new file mode 100644 index 00000000000..07b6d8e1c8b --- /dev/null +++ b/doc/sphinxext/prs/10873.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f0948f862c82159781ce822b2bd7614298f924be", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10874.json b/doc/sphinxext/prs/10874.json new file mode 100644 index 00000000000..e3e2f730100 --- /dev/null +++ b/doc/sphinxext/prs/10874.json @@ -0,0 +1,191 @@ +{ + "merge_commit_sha": "fdffb6eb95edb3858dd5884b5930b20c8d411448", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/forward.rst": { + "a": 4, + "d": 4 + }, + "doc/_includes/morph.rst": { + "a": 2, + "d": 2 + }, + "doc/_static/funding/anr.svg": { + "a": 73, + "d": 6 + }, + "doc/_static/funding/erc-dark.svg": { + "a": 662, + "d": 0 + }, + "doc/_static/funding/erc.svg": { + "a": 2, + "d": 2 + }, + "doc/_static/funding/nih.png": { + "a": 0, + "d": 0 + }, + "doc/_static/funding/nih.svg": { + "a": 83, + "d": 0 + }, + "doc/_static/institution_logos/Aalto-dark.svg": { + "a": 2466, + "d": 0 + }, + "doc/_static/institution_logos/Aarhus-dark.svg": { + "a": 65, + "d": 0 + }, + "doc/_static/institution_logos/Aarhus.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Aarhus.svg": { + "a": 65, + "d": 0 + }, + "doc/_static/institution_logos/BIDS-dark.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/CHOP-dark.svg": { + "a": 1, + "d": 0 + }, + "doc/_static/institution_logos/Graz.jpg": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Graz.svg": { + "a": 1, + "d": 0 + }, + "doc/_static/institution_logos/Ilmenau-dark.svg": { + "a": 91, + "d": 0 + }, + "doc/_static/institution_logos/Ilmenau.gif": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Ilmenau.svg": { + "a": 91, + "d": 0 + }, + "doc/_static/institution_logos/Inserm-dark.svg": { + "a": 46, + "d": 0 + }, + "doc/_static/institution_logos/Inserm.svg": { + "a": 6, + "d": 6 + }, + "doc/_static/institution_logos/Julich-dark.svg": { + "a": 55, + "d": 0 + }, + "doc/_static/institution_logos/Julich.svg": { + "a": 33, + "d": 19 + }, + "doc/_static/institution_logos/MPIB-dark.svg": { + "a": 3, + "d": 0 + }, + "doc/_static/institution_logos/Macquarie-dark.svg": { + "a": 165, + "d": 0 + }, + "doc/_static/institution_logos/Macquarie.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Macquarie.svg": { + "a": 222, + "d": 0 + }, + "doc/_static/institution_logos/NYU-dark.svg": { + "a": 66, + "d": 0 + }, + "doc/_static/institution_logos/NYU.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/NYU.svg": { + "a": 66, + "d": 0 + }, + "doc/_static/institution_logos/SWPS-dark.svg": { + "a": 151, + "d": 0 + }, + "doc/_static/institution_logos/Washington-dark.svg": { + "a": 34, + "d": 0 + }, + "doc/_static/institution_logos/Washington.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Washington.svg": { + "a": 34, + "d": 0 + }, + "doc/_static/style.css": { + "a": 115, + "d": 69 + }, + "doc/_templates/homepage.html": { + "a": 4, + "d": 4 + }, + "doc/conf.py": { + "a": 103, + "d": 30 + }, + "doc/funding.rst": { + "a": 5, + "d": 2 + }, + "doc/install/check_installation.rst": { + "a": 1, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 54, + "d": 25 + }, + "doc/install/manual_install_python.rst": { + "a": 1, + "d": 0 + }, + "doc/install/mne_tools_suite.rst": { + "a": 2, + "d": 2 + }, + "doc/install/updating.rst": { + "a": 4, + "d": 1 + }, + "doc/overview/cite.rst": { + "a": 2, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10875.json b/doc/sphinxext/prs/10875.json new file mode 100644 index 00000000000..5463b3c2809 --- /dev/null +++ b/doc/sphinxext/prs/10875.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b0a415d83d90e365e8a60483a0885b3b7ad3609b", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "tutorials/clinical/10_ieeg_localize.py": { + "a": 18, + "d": 21 + }, + "tutorials/clinical/20_seeg.py": { + "a": 6, + "d": 8 + }, + "tutorials/clinical/30_ecog.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10876.json b/doc/sphinxext/prs/10876.json new file mode 100644 index 00000000000..64fa3dc2e73 --- /dev/null +++ b/doc/sphinxext/prs/10876.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5c5c7bad9eee72758567ec65306b5793d0640be8", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "tutorials/epochs/10_epochs_overview.py": { + "a": 2, + "d": 3 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 4, + "d": 5 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 1, + "d": 3 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 4, + "d": 5 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10879.json b/doc/sphinxext/prs/10879.json new file mode 100644 index 00000000000..76a0809fecd --- /dev/null +++ b/doc/sphinxext/prs/10879.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3180e0b2061b008886873de518686debce2fe47d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10880.json b/doc/sphinxext/prs/10880.json new file mode 100644 index 00000000000..fb45282d9e9 --- /dev/null +++ b/doc/sphinxext/prs/10880.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fe9f5a503377bc30683f22a16a4490e54111eb46", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 4, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10884.json b/doc/sphinxext/prs/10884.json new file mode 100644 index 00000000000..f8268db75b9 --- /dev/null +++ b/doc/sphinxext/prs/10884.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "386d4cf56551a248637c5291d5b1568dbcd26d1e", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "doc/_includes/forward.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10886.json b/doc/sphinxext/prs/10886.json new file mode 100644 index 00000000000..09079728b48 --- /dev/null +++ b/doc/sphinxext/prs/10886.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e579fdb81d7d0ca30e7821cd61ea52ec6189e388", + "authors": [ + { + "n": "Carina", + "e": "carinaforster0611@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Phillip Alday", + "e": "palday@users.noreply.github.com" + }, + { + "n": "Phillip Alday", + "e": "palday@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Phillip Alday", + "e": "palday@users.noreply.github.com" + } + ], + "changes": { + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "tutorials/epochs/15_baseline_regression.py": { + "a": 285, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10887.json b/doc/sphinxext/prs/10887.json new file mode 100644 index 00000000000..2229b688314 --- /dev/null +++ b/doc/sphinxext/prs/10887.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3513c9472a56495415a2c8827d5083c650095050", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/inverse/mixed_source_space_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 7, + "d": 6 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 5, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10889.json b/doc/sphinxext/prs/10889.json new file mode 100644 index 00000000000..de4eda4715f --- /dev/null +++ b/doc/sphinxext/prs/10889.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "37bbe2757f062875049ff66e6bb2cdc8d1dcc412", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/decoding/tests/test_search_light.py": { + "a": 5, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10891.json b/doc/sphinxext/prs/10891.json new file mode 100644 index 00000000000..c4723026466 --- /dev/null +++ b/doc/sphinxext/prs/10891.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4d388dea2a305dc7ca97a1b556c81a4437e8732d", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "tutorials/forward/10_background_freesurfer.py": { + "a": 1, + "d": 2 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 9, + "d": 10 + }, + "tutorials/forward/30_forward.py": { + "a": 5, + "d": 6 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 13, + "d": 16 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10892.json b/doc/sphinxext/prs/10892.json new file mode 100644 index 00000000000..83085b59945 --- /dev/null +++ b/doc/sphinxext/prs/10892.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "6921ed20975ae0b37be447bb5d2a185262f08ae7", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/file_io.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/besa/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/besa/besa.py": { + "a": 243, + "d": 0 + }, + "mne/io/besa/tests/data/simulation.avr": { + "a": 35, + "d": 0 + }, + "mne/io/besa/tests/data/simulation.elp": { + "a": 33, + "d": 0 + }, + "mne/io/besa/tests/data/simulation.mul": { + "a": 202, + "d": 0 + }, + "mne/io/besa/tests/data/simulation_oldstyle.avr": { + "a": 34, + "d": 0 + }, + "mne/io/besa/tests/data/simulation_oldstyle.elp": { + "a": 33, + "d": 0 + }, + "mne/io/besa/tests/test_besa.py": { + "a": 74, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10893.json b/doc/sphinxext/prs/10893.json new file mode 100644 index 00000000000..89c668677d8 --- /dev/null +++ b/doc/sphinxext/prs/10893.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fdcfac7cf2b2620a8b16af595a31ada1bc0b0592", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "tutorials/intro/10_overview.py": { + "a": 5, + "d": 6 + }, + "tutorials/intro/15_inplace.py": { + "a": 2, + "d": 3 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 3, + "d": 4 + }, + "tutorials/intro/30_info.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10894.json b/doc/sphinxext/prs/10894.json new file mode 100644 index 00000000000..5857c35947c --- /dev/null +++ b/doc/sphinxext/prs/10894.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8a7658b615a67845a54302c357cd66d86f56cc99", + "authors": [ + { + "n": "Archit Singhal", + "e": null + }, + { + "n": "archit singhal", + "e": "zyphergiest@pop-os.localdomain" + } + ], + "changes": { + "examples/preprocessing/otp.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10895.json b/doc/sphinxext/prs/10895.json new file mode 100644 index 00000000000..7e4afa84efe --- /dev/null +++ b/doc/sphinxext/prs/10895.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b3983fce7d06314275dc964c1d737fb904d8c9f2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 8, + "d": 7 + }, + "doc/links.inc": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10896.json b/doc/sphinxext/prs/10896.json new file mode 100644 index 00000000000..6b94c3cb8d7 --- /dev/null +++ b/doc/sphinxext/prs/10896.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3c23f13c0262118d075de0719248409bdc838982", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "tutorials/inverse/10_stc_class.py": { + "a": 3, + "d": 5 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 6, + "d": 7 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 7, + "d": 9 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 2 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10897.json b/doc/sphinxext/prs/10897.json new file mode 100644 index 00000000000..d797ba6755d --- /dev/null +++ b/doc/sphinxext/prs/10897.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3b2e6c914dbb820fec74cd1f4aca6e29e7e5c558", + "authors": [ + { + "n": "ashdrew", + "e": "ashdrew@uw.edu" + } + ], + "changes": { + "tutorials/io/30_reading_fnirs_data.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10898.json b/doc/sphinxext/prs/10898.json new file mode 100644 index 00000000000..03a84eaa81e --- /dev/null +++ b/doc/sphinxext/prs/10898.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "21c61576c2dce056a40946af0820d56e2259ec5f", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 39, + "d": 19 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 31, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/109.json b/doc/sphinxext/prs/109.json new file mode 100644 index 00000000000..f9103da0eb2 --- /dev/null +++ b/doc/sphinxext/prs/109.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "971aabad689f18951f810605125bd865c88315a0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 8, + "d": 30 + }, + "mne/fiff/proj.py": { + "a": 54, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 121, + "d": 83 + }, + "mne/fiff/tests/test_raw.py": { + "a": 30, + "d": 0 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 20, + "d": 3 + }, + "mne/utils.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1090.json b/doc/sphinxext/prs/1090.json new file mode 100644 index 00000000000..72ffb41fddd --- /dev/null +++ b/doc/sphinxext/prs/1090.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "44c41d463ca82924096c47605bf9afcdf940a745", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 21, + "d": 7 + }, + "mne/tests/test_viz.py": { + "a": 11, + "d": 2 + }, + "mne/viz.py": { + "a": 81, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10900.json b/doc/sphinxext/prs/10900.json new file mode 100644 index 00000000000..1e2237b5d89 --- /dev/null +++ b/doc/sphinxext/prs/10900.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a424fba542b7df53a5dcec12c518c7f6af64a740", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10906.json b/doc/sphinxext/prs/10906.json new file mode 100644 index 00000000000..4adce52a70b --- /dev/null +++ b/doc/sphinxext/prs/10906.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4351e611364be97c9bd156535ba73735bf6ea78d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 6, + "d": 3 + }, + "mne/datasets/_fetch.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 67, + "d": 2 + }, + "mne/source_space.py": { + "a": 130, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10907.json b/doc/sphinxext/prs/10907.json new file mode 100644 index 00000000000..b29a141e06e --- /dev/null +++ b/doc/sphinxext/prs/10907.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8a837ec2c6544589747bfd7aec39115cda3ecff5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1091.json b/doc/sphinxext/prs/1091.json new file mode 100644 index 00000000000..818055842d2 --- /dev/null +++ b/doc/sphinxext/prs/1091.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "05c8f5202322ba4c41827e5b9d459eedb3216138", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 93, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/ems.py": { + "a": 117, + "d": 0 + }, + "mne/decoding/tests/test_ems.py": { + "a": 61, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 28, + "d": 19 + }, + "mne/utils.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10911.json b/doc/sphinxext/prs/10911.json new file mode 100644 index 00000000000..e5626f47a73 --- /dev/null +++ b/doc/sphinxext/prs/10911.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "86d3c2a9559d1e7cd59ae806fcdc25051becc9de", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/autosummary/class_no_inherited_members.rst": { + "a": 13, + "d": 0 + }, + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "doc/forward.rst": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10912.json b/doc/sphinxext/prs/10912.json new file mode 100644 index 00000000000..9e4c5199b00 --- /dev/null +++ b/doc/sphinxext/prs/10912.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "73247a9a5afb5cebc72f1c63b08efc015a1a8955", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/reference.py": { + "a": 5, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10913.json b/doc/sphinxext/prs/10913.json new file mode 100644 index 00000000000..e830fb6f44c --- /dev/null +++ b/doc/sphinxext/prs/10913.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "4c219c20a2ce07cde7dc26875200eeb6d7abeffb", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 5, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 423, + "d": 31 + }, + "mne/viz/backends/_notebook.py": { + "a": 729, + "d": 11 + }, + "mne/viz/backends/_qt.py": { + "a": 734, + "d": 26 + }, + "mne/viz/backends/renderer.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 109, + "d": 0 + }, + "mne/viz/backends/tests/test_abstract_notebook.py": { + "a": 30, + "d": 0 + }, + "requirements.txt": { + "a": 2, + "d": 0 + }, + "server_environment.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10917.json b/doc/sphinxext/prs/10917.json new file mode 100644 index 00000000000..fb573afb286 --- /dev/null +++ b/doc/sphinxext/prs/10917.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c6d26627a3b0a8ceae33a2f1e59f81207e4d4456", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1092.json b/doc/sphinxext/prs/1092.json new file mode 100644 index 00000000000..dec79c60099 --- /dev/null +++ b/doc/sphinxext/prs/1092.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "690bc1dd7e000c0ae55dd99a274cbb374e956462", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/meas_info.py": { + "a": 24, + "d": 1 + }, + "mne/fiff/tests/test_meas_info.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10920.json b/doc/sphinxext/prs/10920.json new file mode 100644 index 00000000000..73217277729 --- /dev/null +++ b/doc/sphinxext/prs/10920.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "2d1babfeef86f6294d5f7a981026f13489dd3e0b", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/mri.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 95, + "d": 32 + }, + "mne/gui/__init__.py": { + "a": 96, + "d": 1 + }, + "mne/gui/_core.py": { + "a": 33, + "d": 33 + }, + "mne/gui/_vol_stc.py": { + "a": 896, + "d": 0 + }, + "mne/gui/tests/test_core.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_vol_stc.py": { + "a": 202, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 31, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10922.json b/doc/sphinxext/prs/10922.json new file mode 100644 index 00000000000..78c82245613 --- /dev/null +++ b/doc/sphinxext/prs/10922.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "73e1d858e1aad07f12c649ef1b84707d98dd59a7", + "authors": [ + { + "n": "Mingjian He", + "e": null + } + ], + "changes": { + "doc/_includes/inverse.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10923.json b/doc/sphinxext/prs/10923.json new file mode 100644 index 00000000000..734e165ba3e --- /dev/null +++ b/doc/sphinxext/prs/10923.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "94433a355fe164dca8154e6a27e0442cb6844801", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_pip.yml": { + "a": 3, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 4, + "d": 0 + }, + "tools/setup_xvfb.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10925.json b/doc/sphinxext/prs/10925.json new file mode 100644 index 00000000000..859f14132eb --- /dev/null +++ b/doc/sphinxext/prs/10925.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "febd79800e325aad32eda57b63ad8bd95ae6ea9d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 15, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10926.json b/doc/sphinxext/prs/10926.json new file mode 100644 index 00000000000..ca2a42f702a --- /dev/null +++ b/doc/sphinxext/prs/10926.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e09482b016d1a1a4284edd837cf3c130730386d3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10927.json b/doc/sphinxext/prs/10927.json new file mode 100644 index 00000000000..5ee1d621b68 --- /dev/null +++ b/doc/sphinxext/prs/10927.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b733f72c4f2fdffcaf1248491368c7240e30c7f8", + "authors": [ + { + "n": "Reza", + "e": "rezanasri@outlook.com" + }, + { + "n": "RezaNasri", + "e": "rezanasri@outlook.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/export/_eeglab.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1093.json b/doc/sphinxext/prs/1093.json new file mode 100644 index 00000000000..65aa7eef216 --- /dev/null +++ b/doc/sphinxext/prs/1093.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cdc5e465a353f8b018786989b6b28e0f7c7e4939", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/surface.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10930.json b/doc/sphinxext/prs/10930.json new file mode 100644 index 00000000000..dacd42670d7 --- /dev/null +++ b/doc/sphinxext/prs/10930.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "171371169a519062296fc370d3050ba6a7146de5", + "authors": [ + { + "n": "Christian O'Reilly", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/surface.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 46, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10933.json b/doc/sphinxext/prs/10933.json new file mode 100644 index 00000000000..ac17ddf17ea --- /dev/null +++ b/doc/sphinxext/prs/10933.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6955f977ac12612f38ab30277dcfff94315a0463", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10936.json b/doc/sphinxext/prs/10936.json new file mode 100644 index 00000000000..f5ae1f5117b --- /dev/null +++ b/doc/sphinxext/prs/10936.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e8ded9719de7425c8d6752d2942a6ecba20fd0fc", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10938.json b/doc/sphinxext/prs/10938.json new file mode 100644 index 00000000000..4c5565c98e8 --- /dev/null +++ b/doc/sphinxext/prs/10938.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c6a86f15b8ace3758443f392ba8bfe5a9a0c2878", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1094.json b/doc/sphinxext/prs/1094.json new file mode 100644 index 00000000000..ea9ec16c267 --- /dev/null +++ b/doc/sphinxext/prs/1094.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "034cc9d43d7bbfde25ea0bf02bf2428806a214bf", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10940.json b/doc/sphinxext/prs/10940.json new file mode 100644 index 00000000000..449806b307e --- /dev/null +++ b/doc/sphinxext/prs/10940.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "80c3596228287c5577be6b921f25c426edef2aec", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 79 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 15, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 10 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 81, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10942.json b/doc/sphinxext/prs/10942.json new file mode 100644 index 00000000000..4e9068d5066 --- /dev/null +++ b/doc/sphinxext/prs/10942.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a8e9495685531fc0fd2f52fc4bec32b769ec936f", + "authors": [ + { + "n": "TzionaN", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 9, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 14, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10944.json b/doc/sphinxext/prs/10944.json new file mode 100644 index 00000000000..7d10e54fda4 --- /dev/null +++ b/doc/sphinxext/prs/10944.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6c2b485d128643a5e96ee45fbe496ecea9438f28", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/check.py": { + "a": 19, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10945.json b/doc/sphinxext/prs/10945.json new file mode 100644 index 00000000000..42cb3144e51 --- /dev/null +++ b/doc/sphinxext/prs/10945.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "23f37cdaf1b5e2b2b8d0e7c38de2b0483b38673d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 11, + "d": 10 + }, + "mne/epochs.py": { + "a": 8, + "d": 28 + }, + "mne/evoked.py": { + "a": 14, + "d": 132 + }, + "mne/filter.py": { + "a": 3, + "d": 6 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 66 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 11, + "d": 9 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 10, + "d": 17 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 182, + "d": 54 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10949.json b/doc/sphinxext/prs/10949.json new file mode 100644 index 00000000000..2c6d4301e62 --- /dev/null +++ b/doc/sphinxext/prs/10949.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "82be959161b4911faba40aca7604236c4ee0f3ec", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "server_environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1095.json b/doc/sphinxext/prs/1095.json new file mode 100644 index 00000000000..39c5a0a8c21 --- /dev/null +++ b/doc/sphinxext/prs/1095.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "034ffce93c6e2bab7aedfce5ae0ed0ecbc887e4b", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10950.json b/doc/sphinxext/prs/10950.json new file mode 100644 index 00000000000..d8005bb05f9 --- /dev/null +++ b/doc/sphinxext/prs/10950.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b9287882beb8e04ad362b333ad5f14047a8c07ff", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 14, + "d": 9 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10951.json b/doc/sphinxext/prs/10951.json new file mode 100644 index 00000000000..bf1176d40e8 --- /dev/null +++ b/doc/sphinxext/prs/10951.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6a0b02dc06290d0f7f89c6c35310b9e7216ee58c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10953.json b/doc/sphinxext/prs/10953.json new file mode 100644 index 00000000000..b27707c9240 --- /dev/null +++ b/doc/sphinxext/prs/10953.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4e5ca1bf86429d976d86223480bd091d4b3888e2", + "authors": [ + { + "n": "TzionaN", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10956.json b/doc/sphinxext/prs/10956.json new file mode 100644 index 00000000000..d8bbcd361ad --- /dev/null +++ b/doc/sphinxext/prs/10956.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf7fd4755a95ad43aa7c2dd07b1ed12656f85292", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/backends/_utils.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10957.json b/doc/sphinxext/prs/10957.json new file mode 100644 index 00000000000..0971b20f377 --- /dev/null +++ b/doc/sphinxext/prs/10957.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c12a02000776adb9df18da55ea60c50cbf83827d", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/info.py": { + "a": 6, + "d": 0 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10958.json b/doc/sphinxext/prs/10958.json new file mode 100644 index 00000000000..fb422cc3086 --- /dev/null +++ b/doc/sphinxext/prs/10958.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "622dc0cf358bd20523219d01f93d2f87b28c1c86", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10962.json b/doc/sphinxext/prs/10962.json new file mode 100644 index 00000000000..c4c70b36977 --- /dev/null +++ b/doc/sphinxext/prs/10962.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b4c44312da300c79578f9bfb953ee5a0b597d6c4", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/constants.py": { + "a": 5, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10963.json b/doc/sphinxext/prs/10963.json new file mode 100644 index 00000000000..b19c2a60c08 --- /dev/null +++ b/doc/sphinxext/prs/10963.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1d1868d2c94e3cf7ff27f3256d20aae0840664ea", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 4 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10967.json b/doc/sphinxext/prs/10967.json new file mode 100644 index 00000000000..5f8c1992dfb --- /dev/null +++ b/doc/sphinxext/prs/10967.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f3f26c61aa9cd8480493f86df4b7bcef13e598e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/surface.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 25, + "d": 34 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10968.json b/doc/sphinxext/prs/10968.json new file mode 100644 index 00000000000..3fcf363ada9 --- /dev/null +++ b/doc/sphinxext/prs/10968.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "48eb3f3e218d2297af0ef3310c1e86b70012e7a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 11, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10969.json b/doc/sphinxext/prs/10969.json new file mode 100644 index 00000000000..ca35a701e2b --- /dev/null +++ b/doc/sphinxext/prs/10969.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "04747bbfbc6f1714ffc943dfe9b86a689446580a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 13, + "d": 8 + }, + "examples/simulation/source_simulator.py": { + "a": 12, + "d": 6 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 6, + "d": 11 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 4, + "d": 6 + }, + "examples/visualization/3d_to_2d.py": { + "a": 9, + "d": 7 + }, + "examples/visualization/brain.py": { + "a": 10, + "d": 10 + }, + "examples/visualization/meg_sensors.py": { + "a": 11, + "d": 9 + }, + "examples/visualization/mne_helmet.py": { + "a": 5, + "d": 7 + }, + "mne/forward/_field_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10972.json b/doc/sphinxext/prs/10972.json new file mode 100644 index 00000000000..5e01cd416b2 --- /dev/null +++ b/doc/sphinxext/prs/10972.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f2ea6807c86d265c3828c94fb9c052324dbca49c", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/gui/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_ieeg_locate.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_core.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_coreg.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10973.json b/doc/sphinxext/prs/10973.json new file mode 100644 index 00000000000..55ce98f111e --- /dev/null +++ b/doc/sphinxext/prs/10973.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "2a25dd1c090bbcf2e3a3a148eac41e88d6d369dc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/institution_logos/BIDS-dark.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/BIDS-dark.svg": { + "a": 318, + "d": 0 + }, + "doc/_static/institution_logos/BIDS.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/BIDS.svg": { + "a": 318, + "d": 0 + }, + "doc/_static/style.css": { + "a": 120, + "d": 98 + }, + "doc/_templates/layout.html": { + "a": 69, + "d": 9 + }, + "doc/conf.py": { + "a": 3, + "d": 4 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 2, + "d": 1 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 2, + "d": 1 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 2, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 2, + "d": 1 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 10, + "d": 8 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 2, + "d": 1 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 7, + "d": 5 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 12, + "d": 8 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 2, + "d": 1 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 10, + "d": 5 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 2, + "d": 1 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 9, + "d": 6 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10974.json b/doc/sphinxext/prs/10974.json new file mode 100644 index 00000000000..b9c0c2f4aec --- /dev/null +++ b/doc/sphinxext/prs/10974.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f4e6f971c43327ec5cc4100b7e026b33303de860", + "authors": [ + { + "n": "Ilias Machairas", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 2, + "d": 0 + }, + "mne/channels/data/montages/brainproducts-RNP-BA-128.txt": { + "a": 131, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10978.json b/doc/sphinxext/prs/10978.json new file mode 100644 index 00000000000..d36cce4242d --- /dev/null +++ b/doc/sphinxext/prs/10978.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "d3fb79553783d4dd4637f946e721a006547c07d4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 3, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 6, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 7, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10979.json b/doc/sphinxext/prs/10979.json new file mode 100644 index 00000000000..6bc67b3a5fd --- /dev/null +++ b/doc/sphinxext/prs/10979.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "64dff6189215cbead61db81d6e6de811a05633e3", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1098.json b/doc/sphinxext/prs/1098.json new file mode 100644 index 00000000000..0a028564bc3 --- /dev/null +++ b/doc/sphinxext/prs/1098.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e0ef5d084014dbe2628d15b33132ba834560698e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10980.json b/doc/sphinxext/prs/10980.json new file mode 100644 index 00000000000..567af6948b3 --- /dev/null +++ b/doc/sphinxext/prs/10980.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "11696d018595e934bf98080f08d1ddeb704e1c4d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 19, + "d": 8 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10982.json b/doc/sphinxext/prs/10982.json new file mode 100644 index 00000000000..f5cc76bcc4f --- /dev/null +++ b/doc/sphinxext/prs/10982.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "93c144e562c81489766eeb4cb98766534a8576af", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 0, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 3, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/hc.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/markers.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 3, + "d": 3 + }, + "mne/transforms.py": { + "a": 3, + "d": 3 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10986.json b/doc/sphinxext/prs/10986.json new file mode 100644 index 00000000000..f63f8c36baf --- /dev/null +++ b/doc/sphinxext/prs/10986.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "530988c7475f9a60cfa137972b8523180aaa3c0f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/time_frequency.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 49, + "d": 3 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 21, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10989.json b/doc/sphinxext/prs/10989.json new file mode 100644 index 00000000000..225fab0efbe --- /dev/null +++ b/doc/sphinxext/prs/10989.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f97faf7ae1e88dd0976c6b2dbbf23155c1c83065", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 3 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10990.json b/doc/sphinxext/prs/10990.json new file mode 100644 index 00000000000..4e9d05b3d68 --- /dev/null +++ b/doc/sphinxext/prs/10990.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "245844206d4b6d806fac2ec2ba60b77f83b31841", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 8, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10993.json b/doc/sphinxext/prs/10993.json new file mode 100644 index 00000000000..6c2f2150218 --- /dev/null +++ b/doc/sphinxext/prs/10993.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "078149bf980943e84c8b7fd9950e7dc89f3b7745", + "authors": [ + { + "n": "Ben Beasley", + "e": "code@musicinmybrain.net" + } + ], + "changes": { + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10994.json b/doc/sphinxext/prs/10994.json new file mode 100644 index 00000000000..c0545b1672e --- /dev/null +++ b/doc/sphinxext/prs/10994.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "d57a3d649050aa4cdcdea86e7f5b8039df8d2307", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 249, + "d": 231 + }, + "CITATION.cff": { + "a": 655, + "d": 0 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "SECURITY.md": { + "a": 3, + "d": 3 + }, + "codemeta.json": { + "a": 262, + "d": 154 + }, + "doc/_static/versions.json": { + "a": 6, + "d": 2 + }, + "doc/changes/1.1.inc": { + "a": 56, + "d": 3 + }, + "doc/changes/names.inc": { + "a": 248, + "d": 230 + }, + "doc/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/_version.py": { + "a": 1, + "d": 1 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10997.json b/doc/sphinxext/prs/10997.json new file mode 100644 index 00000000000..5d5ac98d752 --- /dev/null +++ b/doc/sphinxext/prs/10997.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "60087ebe36943f760cafcc10a0e99bc5b3b685bf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 33, + "d": 5 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 32, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/10999.json b/doc/sphinxext/prs/10999.json new file mode 100644 index 00000000000..04378490962 --- /dev/null +++ b/doc/sphinxext/prs/10999.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2d183aade4f482ca39516edb6569b9ca450b8665", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 31, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1100.json b/doc/sphinxext/prs/1100.json new file mode 100644 index 00000000000..f4a5ef6c149 --- /dev/null +++ b/doc/sphinxext/prs/1100.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4ef4ff748a3f83a2761ecf25093ae162821e84bb", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/datasets/sample/sample.py": { + "a": 11, + "d": 4 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 8, + "d": 4 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11000.json b/doc/sphinxext/prs/11000.json new file mode 100644 index 00000000000..eaf269fbc69 --- /dev/null +++ b/doc/sphinxext/prs/11000.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f27df270539bda9994ec791232d87a82b262d721", + "authors": [ + { + "n": "Jack Zhang", + "e": "me@jackzhang.me" + } + ], + "changes": { + "mne/export/_eeglab.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11001.json b/doc/sphinxext/prs/11001.json new file mode 100644 index 00000000000..c5b75af4618 --- /dev/null +++ b/doc/sphinxext/prs/11001.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cee9e96e99abc98ce4fd0ae5e0a9ffcb0c3919f3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/time_frequency_erds.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11003.json b/doc/sphinxext/prs/11003.json new file mode 100644 index 00000000000..837fa2d4fa8 --- /dev/null +++ b/doc/sphinxext/prs/11003.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ef8b971340ddee0f7fe4a2a51c51ccc37d205b3e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "tools/check_mne_location.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11004.json b/doc/sphinxext/prs/11004.json new file mode 100644 index 00000000000..0bb7694555a --- /dev/null +++ b/doc/sphinxext/prs/11004.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ec8a360911172e8c245b676c0e2af38c023b266f", + "authors": [ + { + "n": "Daniel C Schad", + "e": null + }, + { + "n": "Daniel Carlström Schad", + "e": "daniel.c.schad@protonmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 5, + "d": 0 + }, + "mne/decoding/time_frequency.py": { + "a": 11, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11006.json b/doc/sphinxext/prs/11006.json new file mode 100644 index 00000000000..ea6979bdf8f --- /dev/null +++ b/doc/sphinxext/prs/11006.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a511bfbc820293f08a9a42aa19e9f21cbf63c4a0", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/_eeglab.py": { + "a": 82, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 4 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 23, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11007.json b/doc/sphinxext/prs/11007.json new file mode 100644 index 00000000000..e34bd2653e4 --- /dev/null +++ b/doc/sphinxext/prs/11007.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d44fc4e19ee44ec7f70870842507c282f33b54e5", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 6, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 19, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 5 + }, + "requirements_doc.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11008.json b/doc/sphinxext/prs/11008.json new file mode 100644 index 00000000000..c4e46cc8eec --- /dev/null +++ b/doc/sphinxext/prs/11008.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "2ba483ab91828be18a480e689d95f61c422ed6eb", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/bem.py": { + "a": 11, + "d": 79 + }, + "mne/commands/mne_flash_bem.py": { + "a": 3, + "d": 6 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/_fetch.py": { + "a": 4, + "d": 3 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 13 + }, + "mne/datasets/utils.py": { + "a": 4, + "d": 34 + }, + "mne/gui/tests/test_coreg.py": { + "a": 3, + "d": 1 + }, + "mne/parallel.py": { + "a": 1, + "d": 31 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 31 + }, + "mne/utils/tests/test_testing.py": { + "a": 1, + "d": 20 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11009.json b/doc/sphinxext/prs/11009.json new file mode 100644 index 00000000000..9d735d43e53 --- /dev/null +++ b/doc/sphinxext/prs/11009.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "440ce1a9bed96526526cc256dfa282335571ed21", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/tests/test_docstring_parameters.py": { + "a": 8, + "d": 12 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11011.json b/doc/sphinxext/prs/11011.json new file mode 100644 index 00000000000..be22df63b5d --- /dev/null +++ b/doc/sphinxext/prs/11011.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "44f499f13c5100cc86058c65ba81dbd50412ec7e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 0, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 203, + "d": 37 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 69, + "d": 10 + }, + "mne/forward/_make_forward.py": { + "a": 14, + "d": 9 + }, + "mne/forward/forward.py": { + "a": 47, + "d": 33 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 144, + "d": 28 + }, + "mne/io/constants.py": { + "a": 0, + "d": 4 + }, + "mne/simulation/raw.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 37, + "d": 12 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 13, + "d": 0 + }, + "mne/utils/config.py": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 4, + "d": 3 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11012.json b/doc/sphinxext/prs/11012.json new file mode 100644 index 00000000000..2acffd7eecc --- /dev/null +++ b/doc/sphinxext/prs/11012.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b5fb30aac5ead825727d7d83db51378101941868", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/gen_commands.py": { + "a": 2, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 15, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11016.json b/doc/sphinxext/prs/11016.json new file mode 100644 index 00000000000..50200907240 --- /dev/null +++ b/doc/sphinxext/prs/11016.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8baaa7675ea34d3b98dd2875ad8b5efaf6d44d16", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 10 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11017.json b/doc/sphinxext/prs/11017.json new file mode 100644 index 00000000000..962217fee44 --- /dev/null +++ b/doc/sphinxext/prs/11017.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "879829e3d4da228f0c022d8f1ca4abf6e4599c81", + "authors": [ + { + "n": "Hüseyin", + "e": "huseyinorkunelmas@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_proj.py": { + "a": 2, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 20, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 25, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11018.json b/doc/sphinxext/prs/11018.json new file mode 100644 index 00000000000..0cf65c7b00e --- /dev/null +++ b/doc/sphinxext/prs/11018.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9adf3f1cad3264b93e32c0ca51e3fc003a297a11", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/report/report.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11019.json b/doc/sphinxext/prs/11019.json new file mode 100644 index 00000000000..33c7a6ca8fd --- /dev/null +++ b/doc/sphinxext/prs/11019.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7044b8202f9549c18f486d263f167153138d9925", + "authors": [ + { + "n": "Sena", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1102.json b/doc/sphinxext/prs/1102.json new file mode 100644 index 00000000000..88b376c2758 --- /dev/null +++ b/doc/sphinxext/prs/1102.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "004d3aaaaaa693ea7b5a490a49ef268c117ae2a7", + "authors": [ + { + "n": "Daniel G. Wakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 15, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11020.json b/doc/sphinxext/prs/11020.json new file mode 100644 index 00000000000..935de373f6d --- /dev/null +++ b/doc/sphinxext/prs/11020.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "711da1d40783c8f37cea85e0b338c5e08e5dad93", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/transforms.py": { + "a": 11, + "d": 4 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 7, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11022.json b/doc/sphinxext/prs/11022.json new file mode 100644 index 00000000000..ac0634d6738 --- /dev/null +++ b/doc/sphinxext/prs/11022.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c8d8c5f72e6714ae147994effcee8efe04c648f4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 58 + }, + "mne/io/pick.py": { + "a": 1, + "d": 57 + }, + "mne/utils/docs.py": { + "a": 61, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11024.json b/doc/sphinxext/prs/11024.json new file mode 100644 index 00000000000..17c635a6838 --- /dev/null +++ b/doc/sphinxext/prs/11024.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6d27c4260adcbad9c171688427453eaf99545cd1", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/references.bib": { + "a": 0, + "d": 10 + }, + "mne/preprocessing/_csd.py": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11029.json b/doc/sphinxext/prs/11029.json new file mode 100644 index 00000000000..434804a80aa --- /dev/null +++ b/doc/sphinxext/prs/11029.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c1f65be9f79cdb29f629f5335d79d2fedd32f73b", + "authors": [ + { + "n": "Sena", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/time_frequency/compute_csd.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11033.json b/doc/sphinxext/prs/11033.json new file mode 100644 index 00000000000..5388fc2e49a --- /dev/null +++ b/doc/sphinxext/prs/11033.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f55c5eab72c6848ee808413eee473fbf21036a56", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 9, + "d": 69 + }, + "doc/conf.py": { + "a": 5, + "d": 2 + }, + "doc/install/installers.rst": { + "a": 12, + "d": 4 + }, + "doc/install/manual_install_python.rst": { + "a": 8, + "d": 3 + }, + "requirements_doc.txt": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11034.json b/doc/sphinxext/prs/11034.json new file mode 100644 index 00000000000..aa60a2054ef --- /dev/null +++ b/doc/sphinxext/prs/11034.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ae3e0b885685c07915fe06031d70b3c637b2aa90", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11035.json b/doc/sphinxext/prs/11035.json new file mode 100644 index 00000000000..d039067a3c2 --- /dev/null +++ b/doc/sphinxext/prs/11035.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7511bf717665005dcc0445efab30e16cbe84ba99", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/_includes/channel_types.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/0.23.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11037.json b/doc/sphinxext/prs/11037.json new file mode 100644 index 00000000000..76a6dd19ec0 --- /dev/null +++ b/doc/sphinxext/prs/11037.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "93a1856445f1f838b0faf4b4cdf61397661c7c0a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11038.json b/doc/sphinxext/prs/11038.json new file mode 100644 index 00000000000..422d64b73a5 --- /dev/null +++ b/doc/sphinxext/prs/11038.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7d80664de0ebefcee5532864565bb49cfae27cc2", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/time_frequency/csd.py": { + "a": 26, + "d": 6 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1104.json b/doc/sphinxext/prs/1104.json new file mode 100644 index 00000000000..8cab051489c --- /dev/null +++ b/doc/sphinxext/prs/1104.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "a3dbd6751c4455c436ced5bd6d96019285b0cfb3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/externals/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/externals/jdcal.py": { + "a": 116, + "d": 0 + }, + "mne/fiff/brainvision/brainvision.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/constants.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/edf/edf.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/meas_info.py": { + "a": 67, + "d": 3 + }, + "mne/fiff/open.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 6, + "d": 0 + }, + "mne/fiff/tag.py": { + "a": 4, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 28, + "d": 2 + }, + "mne/fiff/write.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11040.json b/doc/sphinxext/prs/11040.json new file mode 100644 index 00000000000..72ac38084af --- /dev/null +++ b/doc/sphinxext/prs/11040.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "776c91f7a92402f02174c1f79124e236e1e38e89", + "authors": [ + { + "n": "mdovgialo", + "e": "mdovgialo@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_misc.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11041.json b/doc/sphinxext/prs/11041.json new file mode 100644 index 00000000000..fb85df74ebe --- /dev/null +++ b/doc/sphinxext/prs/11041.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3df770ce87915b5ef221d9b65ace481868a871b6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/export/tests/test_export.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11046.json b/doc/sphinxext/prs/11046.json new file mode 100644 index 00000000000..17e54485542 --- /dev/null +++ b/doc/sphinxext/prs/11046.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "12cf8749adfee03caaaa1424b0440a2eaa9886cb", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 11, + "d": 0 + }, + "doc/visualization.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/eog_regression.py": { + "a": 78, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_regress.py": { + "a": 269, + "d": 32 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 105, + "d": 5 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 83, + "d": 0 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 165, + "d": 105 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11047.json b/doc/sphinxext/prs/11047.json new file mode 100644 index 00000000000..16765704dac --- /dev/null +++ b/doc/sphinxext/prs/11047.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "fa11149b9d246c115ea0794ce9c819c36ec34f72", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 0 + }, + "Makefile": { + "a": 3, + "d": 3 + }, + "ignore_words.txt": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 1, + "d": 1 + }, + "mne/utils/dataframe.py": { + "a": 15, + "d": 4 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11048.json b/doc/sphinxext/prs/11048.json new file mode 100644 index 00000000000..74ab45cdf72 --- /dev/null +++ b/doc/sphinxext/prs/11048.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f855987927f1ad94670288067a8598e14b7fd509", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11049.json b/doc/sphinxext/prs/11049.json new file mode 100644 index 00000000000..4b2cdba0280 --- /dev/null +++ b/doc/sphinxext/prs/11049.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d4564c25bd156f85a63904414f76bccae55db67e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 39, + "d": 20 + }, + "mne/viz/_brain/surface.py": { + "a": 5, + "d": 5 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11050.json b/doc/sphinxext/prs/11050.json new file mode 100644 index 00000000000..2c00946c646 --- /dev/null +++ b/doc/sphinxext/prs/11050.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9acfa0642c0681a5d208a8ce551b2985338ace0a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 20, + "d": 12 + }, + "mne/viz/topomap.py": { + "a": 19, + "d": 11 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11051.json b/doc/sphinxext/prs/11051.json new file mode 100644 index 00000000000..d80ac04735a --- /dev/null +++ b/doc/sphinxext/prs/11051.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7720b4d6b4b637bb0819a9a44e750ee82675b25f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 4 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11052.json b/doc/sphinxext/prs/11052.json new file mode 100644 index 00000000000..f8599470ba6 --- /dev/null +++ b/doc/sphinxext/prs/11052.json @@ -0,0 +1,143 @@ +{ + "merge_commit_sha": "0cd79a3608ab3817ee180c0f0c4d9307bf653d3a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/funding/amazon.svg": { + "a": 16, + "d": 17 + }, + "doc/_static/funding/anr.svg": { + "a": 14, + "d": 68 + }, + "doc/_static/funding/czi.svg": { + "a": 12, + "d": 19 + }, + "doc/_static/funding/doe.svg": { + "a": 72, + "d": 1658 + }, + "doc/_static/funding/erc-dark.svg": { + "a": 13, + "d": 658 + }, + "doc/_static/funding/erc.svg": { + "a": 13, + "d": 658 + }, + "doc/_static/funding/google.svg": { + "a": 9, + "d": 1 + }, + "doc/_static/funding/nih.svg": { + "a": 6, + "d": 81 + }, + "doc/_static/institution_logos/Aalto-dark.svg": { + "a": 11, + "d": 2466 + }, + "doc/_static/institution_logos/Aalto.svg": { + "a": 11, + "d": 2466 + }, + "doc/_static/institution_logos/Aarhus-dark.svg": { + "a": 2, + "d": 64 + }, + "doc/_static/institution_logos/Aarhus.svg": { + "a": 2, + "d": 64 + }, + "doc/_static/institution_logos/BIDS-dark.svg": { + "a": 52, + "d": 318 + }, + "doc/_static/institution_logos/BIDS.svg": { + "a": 52, + "d": 318 + }, + "doc/_static/institution_logos/BU.svg": { + "a": 4, + "d": 1 + }, + "doc/_static/institution_logos/CHOP-dark.svg": { + "a": 10, + "d": 1 + }, + "doc/_static/institution_logos/CHOP.svg": { + "a": 12, + "d": 1 + }, + "doc/_static/institution_logos/Inserm-dark.svg": { + "a": 12, + "d": 46 + }, + "doc/_static/institution_logos/Inserm.svg": { + "a": 12, + "d": 46 + }, + "doc/_static/institution_logos/Julich-dark.svg": { + "a": 2, + "d": 54 + }, + "doc/_static/institution_logos/Julich.svg": { + "a": 2, + "d": 54 + }, + "doc/_static/institution_logos/MGH.svg": { + "a": 7, + "d": 18 + }, + "doc/_static/institution_logos/MIT.svg": { + "a": 6, + "d": 8 + }, + "doc/_static/institution_logos/MPIB-dark.svg": { + "a": 1, + "d": 3 + }, + "doc/_static/institution_logos/MPIB.svg": { + "a": 21, + "d": 3 + }, + "doc/_static/institution_logos/Macquarie.svg": { + "a": 1, + "d": 222 + }, + "doc/_static/institution_logos/NYU-dark.svg": { + "a": 11, + "d": 66 + }, + "doc/_static/institution_logos/NYU.svg": { + "a": 11, + "d": 66 + }, + "doc/_static/institution_logos/SWPS-dark.svg": { + "a": 25, + "d": 151 + }, + "doc/_static/institution_logos/SWPS.svg": { + "a": 25, + "d": 151 + }, + "doc/_static/institution_logos/Telecom_Paris_Tech.svg": { + "a": 11, + "d": 1 + }, + "doc/_static/institution_logos/Washington-dark.svg": { + "a": 2, + "d": 33 + }, + "doc/_static/institution_logos/Washington.svg": { + "a": 2, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11053.json b/doc/sphinxext/prs/11053.json new file mode 100644 index 00000000000..2fe3bd45a40 --- /dev/null +++ b/doc/sphinxext/prs/11053.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dd3d0f4d616f619993b3b4ef6b1afec66737704d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11056.json b/doc/sphinxext/prs/11056.json new file mode 100644 index 00000000000..27c674ddd25 --- /dev/null +++ b/doc/sphinxext/prs/11056.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e0df20d1dd9bdab5d44b76aeadc93c6e9731e4c1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11057.json b/doc/sphinxext/prs/11057.json new file mode 100644 index 00000000000..c4df7b87fe9 --- /dev/null +++ b/doc/sphinxext/prs/11057.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "56543eda8f8739cc9e36de0b6eb95f93d756a39b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11059.json b/doc/sphinxext/prs/11059.json new file mode 100644 index 00000000000..ebea3393b58 --- /dev/null +++ b/doc/sphinxext/prs/11059.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "46262a369ea40ea658df7a5435ee9b7776517064", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 18, + "d": 22 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11060.json b/doc/sphinxext/prs/11060.json new file mode 100644 index 00000000000..dfb6d07c66e --- /dev/null +++ b/doc/sphinxext/prs/11060.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "eeefc2ed0aa39c10eab3bd1ef883c9cacb6f7b86", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/codeql-analysis.yml": { + "a": 72, + "d": 0 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 3 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11065.json b/doc/sphinxext/prs/11065.json new file mode 100644 index 00000000000..6ca49585e2b --- /dev/null +++ b/doc/sphinxext/prs/11065.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e59fcf5002ce7839dcd608ffd2f0bee6857e11b1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11066.json b/doc/sphinxext/prs/11066.json new file mode 100644 index 00000000000..c04d5bf6c40 --- /dev/null +++ b/doc/sphinxext/prs/11066.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c58e87556af8173aa72a4bb59dcc64b7a2b51e89", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 8, + "d": 0 + }, + "mne/conftest.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11067.json b/doc/sphinxext/prs/11067.json new file mode 100644 index 00000000000..531b51a9372 --- /dev/null +++ b/doc/sphinxext/prs/11067.json @@ -0,0 +1,123 @@ +{ + "merge_commit_sha": "82a341076416bfbf3e62ae97e330ecfe9bedc1eb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 6 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/brain.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 4 + }, + "mne/fixes.py": { + "a": 11, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 14, + "d": 14 + }, + "mne/utils/check.py": { + "a": 3, + "d": 11 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 14 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 7, + "d": 4 + }, + "mne/viz/_mpl_figure.py": { + "a": 5, + "d": 5 + }, + "mne/viz/backends/_utils.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/tests/test_abstract_notebook.py": { + "a": 4, + "d": 2 + }, + "mne/viz/circle.py": { + "a": 2, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 8, + "d": 16 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 9, + "d": 9 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 5, + "d": 5 + }, + "mne/viz/tests/test_raw.py": { + "a": 13, + "d": 7 + }, + "mne/viz/tests/test_topomap.py": { + "a": 7, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 74, + "d": 11 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 2 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11068.json b/doc/sphinxext/prs/11068.json new file mode 100644 index 00000000000..8bf73b1fb86 --- /dev/null +++ b/doc/sphinxext/prs/11068.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "61b90ee2d555d10a6cb52dd5a6454d7d910a9166", + "authors": [ + { + "n": "Mats", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11069.json b/doc/sphinxext/prs/11069.json new file mode 100644 index 00000000000..799c676045e --- /dev/null +++ b/doc/sphinxext/prs/11069.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "af925f4fd7f0274fe6bc642561af2abeb954289d", + "authors": [ + { + "n": "Chetan Gohil", + "e": "chetan.gohil@psych.ox.ac.uk" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11070.json b/doc/sphinxext/prs/11070.json new file mode 100644 index 00000000000..a5f93e659e8 --- /dev/null +++ b/doc/sphinxext/prs/11070.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7ab69ab8ad6e90817d41b945e2b1e5daac26b8ff", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 43, + "d": 27 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 9, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11071.json b/doc/sphinxext/prs/11071.json new file mode 100644 index 00000000000..4b17567d7ee --- /dev/null +++ b/doc/sphinxext/prs/11071.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "bb93462a5853dc1a6bd6f4ebfb3a9d8b9431e731", + "authors": [ + { + "n": "Andrew Quinn", + "e": "aquinn501@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 3, + "d": 0 + }, + "mne/dipole.py": { + "a": 3, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 10, + "d": 15 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 0 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 3, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11072.json b/doc/sphinxext/prs/11072.json new file mode 100644 index 00000000000..80f9bccd0d8 --- /dev/null +++ b/doc/sphinxext/prs/11072.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ec85fe81b0f679f6ffad0611935e5fbfb98ad378", + "authors": [ + { + "n": "Chetan Gohil", + "e": "chetan.gohil@psych.ox.ac.uk" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 6, + "d": 0 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 16, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11073.json b/doc/sphinxext/prs/11073.json new file mode 100644 index 00000000000..c77b5237941 --- /dev/null +++ b/doc/sphinxext/prs/11073.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6ff1ea387822a66759dc9a2b801661bb5347d3a0", + "authors": [ + { + "n": "Mats", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 6, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11074.json b/doc/sphinxext/prs/11074.json new file mode 100644 index 00000000000..ae1572e7050 --- /dev/null +++ b/doc/sphinxext/prs/11074.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "86f38376a75fbe3d3e894eb4348b653e4340a6b3", + "authors": [ + { + "n": "Sebastiaan Mathot", + "e": "s.mathot@cogsci.nl" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 7 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11075.json b/doc/sphinxext/prs/11075.json new file mode 100644 index 00000000000..1ec467f9677 --- /dev/null +++ b/doc/sphinxext/prs/11075.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "1f19f5ba77c8b794f39701aeffcfdd88c1d1b070", + "authors": [ + { + "n": "Valerii", + "e": "vagechirkov@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 12, + "d": 0 + }, + "mne/html_templates/repr/evoked.html.jinja": { + "a": 29, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 0 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11076.json b/doc/sphinxext/prs/11076.json new file mode 100644 index 00000000000..ef42e36b185 --- /dev/null +++ b/doc/sphinxext/prs/11076.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4302ad01495a151e215bd5a0d6e221e2d79c0911", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/simulation/plot_stc_metrics.py": { + "a": 3, + "d": 6 + }, + "mne/simulation/metrics/metrics.py": { + "a": 38, + "d": 42 + }, + "mne/simulation/metrics/tests/test_metrics.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11077.json b/doc/sphinxext/prs/11077.json new file mode 100644 index 00000000000..d4675ed4550 --- /dev/null +++ b/doc/sphinxext/prs/11077.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5ee892f2ef79f451b7bdba251f4dec18e266001e", + "authors": [ + { + "n": "Andrew Quinn", + "e": "aquinn501@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11078.json b/doc/sphinxext/prs/11078.json new file mode 100644 index 00000000000..96ea78c97de --- /dev/null +++ b/doc/sphinxext/prs/11078.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "f319107095d0ca09cc453ddc57815d9f7fd0ccfe", + "authors": [ + { + "n": "Quentin Barthélemy", + "e": "q.barthelemy@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 2 + }, + "mne/filter.py": { + "a": 55, + "d": 25 + }, + "mne/tests/test_filter.py": { + "a": 42, + "d": 11 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 5 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11080.json b/doc/sphinxext/prs/11080.json new file mode 100644 index 00000000000..42ca1596da6 --- /dev/null +++ b/doc/sphinxext/prs/11080.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bdc435d8a7643a934365a221ab9d52ec8b33bc33", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 8, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 11, + "d": 0 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11081.json b/doc/sphinxext/prs/11081.json new file mode 100644 index 00000000000..ab68ec918ef --- /dev/null +++ b/doc/sphinxext/prs/11081.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b53ed6ecc76b695a79850c9786e547b06fcc6c36", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 6, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11083.json b/doc/sphinxext/prs/11083.json new file mode 100644 index 00000000000..10e4cfaf067 --- /dev/null +++ b/doc/sphinxext/prs/11083.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "08c8cad249e6e180096b0c8c3e9075ca6ae31bd6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 48, + "d": 5 + }, + "mne/conftest.py": { + "a": 11, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 16, + "d": 7 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 45, + "d": 56 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 3, + "d": 1 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 88, + "d": 60 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11084.json b/doc/sphinxext/prs/11084.json new file mode 100644 index 00000000000..d39e9bc406d --- /dev/null +++ b/doc/sphinxext/prs/11084.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a0ca70901cd7105da03607e7155ae606e18ed195", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 2 + }, + "mne/gui/tests/test_coreg.py": { + "a": 11, + "d": 0 + }, + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11085.json b/doc/sphinxext/prs/11085.json new file mode 100644 index 00000000000..599bec00baf --- /dev/null +++ b/doc/sphinxext/prs/11085.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "89fbad30af8065fbdbf8a0289f741fa5454330b6", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "examples/inverse/psf_ctf_vertices.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11086.json b/doc/sphinxext/prs/11086.json new file mode 100644 index 00000000000..291a97442d6 --- /dev/null +++ b/doc/sphinxext/prs/11086.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "bea8767a164bc43d02134bb7c9f063b1209a253c", + "authors": [ + { + "n": "luz paz", + "e": "luzpaz@pm.me" + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/documentation.md": { + "a": 1, + "d": 1 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/data/FreeSurferColorLUT.txt": { + "a": 3, + "d": 3 + }, + "mne/filter.py": { + "a": 3, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 2 + }, + "mne/report/js_and_css/report.js": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11090.json b/doc/sphinxext/prs/11090.json new file mode 100644 index 00000000000..c43789fc687 --- /dev/null +++ b/doc/sphinxext/prs/11090.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7aa3a27631b16fc9c16852c720e7243d876fe104", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_includes/channel_types.rst": { + "a": 4, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 5 + }, + "mne/defaults.py": { + "a": 10, + "d": 6 + }, + "mne/io/constants.py": { + "a": 6, + "d": 2 + }, + "mne/io/pick.py": { + "a": 20, + "d": 9 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11094.json b/doc/sphinxext/prs/11094.json new file mode 100644 index 00000000000..ec7f2dd0984 --- /dev/null +++ b/doc/sphinxext/prs/11094.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4787bac9147902afefd608c235e8d983199b5bf3", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/interpolate.py": { + "a": 104, + "d": 22 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 104, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11095.json b/doc/sphinxext/prs/11095.json new file mode 100644 index 00000000000..cc33803033b --- /dev/null +++ b/doc/sphinxext/prs/11095.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0faabfb093ebf4e03d8b6cbdb0b9418eeb090ff1", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/inverse.rst": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 89, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 48, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11096.json b/doc/sphinxext/prs/11096.json new file mode 100644 index 00000000000..758bb77f2a1 --- /dev/null +++ b/doc/sphinxext/prs/11096.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "45d87fb0e5c34156c9568e04cd6ceef81140bed4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/inverse.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/dics_epochs.py": { + "a": 119, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 73, + "d": 6 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 72, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11097.json b/doc/sphinxext/prs/11097.json new file mode 100644 index 00000000000..dbedc29c857 --- /dev/null +++ b/doc/sphinxext/prs/11097.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "02fce0bb67c9b00494ec3397bd18b5a81699ef7f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 87, + "d": 50 + }, + "mne/utils/tests/test_docs.py": { + "a": 48, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11099.json b/doc/sphinxext/prs/11099.json new file mode 100644 index 00000000000..0912e47d3da --- /dev/null +++ b/doc/sphinxext/prs/11099.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6cebb1332a5e33c9a4e4d6999625a4cc585803e1", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 25, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11101.json b/doc/sphinxext/prs/11101.json new file mode 100644 index 00000000000..47d32165762 --- /dev/null +++ b/doc/sphinxext/prs/11101.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a5acbec2c9c5a93a6ac2a06127068927858e6302", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/bug_report.md": { + "a": 0, + "d": 52 + }, + ".github/ISSUE_TEMPLATE/bug_report.yml": { + "a": 65, + "d": 0 + }, + ".github/ISSUE_TEMPLATE/config.yml": { + "a": 3, + "d": 3 + }, + ".github/ISSUE_TEMPLATE/documentation.md": { + "a": 0, + "d": 15 + }, + ".github/ISSUE_TEMPLATE/documentation.yml": { + "a": 26, + "d": 0 + }, + ".github/ISSUE_TEMPLATE/feature_request.md": { + "a": 0, + "d": 27 + }, + ".github/ISSUE_TEMPLATE/feature_request.yml": { + "a": 51, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11102.json b/doc/sphinxext/prs/11102.json new file mode 100644 index 00000000000..4401b2968fa --- /dev/null +++ b/doc/sphinxext/prs/11102.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a02fa0a386a1feb770caaeaa91678b753ab8e48e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 29, + "d": 8 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 63, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11103.json b/doc/sphinxext/prs/11103.json new file mode 100644 index 00000000000..7cf388f181d --- /dev/null +++ b/doc/sphinxext/prs/11103.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "9555588cb1e0456ab91b016a69740e4db3bff5a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 58, + "d": 43 + }, + "mne/forward/_compute_forward.py": { + "a": 52, + "d": 159 + }, + "mne/forward/_make_forward.py": { + "a": 81, + "d": 120 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 5 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 62, + "d": 51 + }, + "mne/preprocessing/maxwell.py": { + "a": 3, + "d": 14 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 24, + "d": 18 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11104.json b/doc/sphinxext/prs/11104.json new file mode 100644 index 00000000000..5ffcebab85e --- /dev/null +++ b/doc/sphinxext/prs/11104.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bc30e0e8c687cd91f75432df97e4f348ee8d73eb", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 0, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 0, + "d": 8 + }, + "mne/channels/tests/test_montage.py": { + "a": 0, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11105.json b/doc/sphinxext/prs/11105.json new file mode 100644 index 00000000000..5e0d3b70232 --- /dev/null +++ b/doc/sphinxext/prs/11105.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "532cfc5cee360dff6bad9cac7710c805aee9a347", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11108.json b/doc/sphinxext/prs/11108.json new file mode 100644 index 00000000000..4a1c10f2567 --- /dev/null +++ b/doc/sphinxext/prs/11108.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9385b96ad12c26e8f7d3b913b330eade4ee1365a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1111.json b/doc/sphinxext/prs/1111.json new file mode 100644 index 00000000000..b731e3878e4 --- /dev/null +++ b/doc/sphinxext/prs/1111.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "78794f60f896cf2780902007b43ac191a4314a0c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 224, + "d": 68 + }, + "mne/tests/test_label.py": { + "a": 43, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11115.json b/doc/sphinxext/prs/11115.json new file mode 100644 index 00000000000..50faf33416f --- /dev/null +++ b/doc/sphinxext/prs/11115.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d587b89154cc398cacabc6e0d9b630c0aaf6e37a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/topo_customized.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/psd.py": { + "a": 8, + "d": 7 + }, + "tools/azure_dependencies.sh": { + "a": 3, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 1 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 13, + "d": 12 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11116.json b/doc/sphinxext/prs/11116.json new file mode 100644 index 00000000000..290a76bb562 --- /dev/null +++ b/doc/sphinxext/prs/11116.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4310c92b85fa7dcc77ffe322501037e5ad82372d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 77, + "d": 52 + }, + "mne/time_frequency/_stockwell.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11118.json b/doc/sphinxext/prs/11118.json new file mode 100644 index 00000000000..914d423a820 --- /dev/null +++ b/doc/sphinxext/prs/11118.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2aceb47bb16698beebcff2118bdbf89abcaed3cc", + "authors": [ + { + "n": "jwelzel", + "e": null + } + ], + "changes": { + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11120.json b/doc/sphinxext/prs/11120.json new file mode 100644 index 00000000000..09f91050573 --- /dev/null +++ b/doc/sphinxext/prs/11120.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "db22d67b65b07dec07f3f65cbc52d8d146c8265b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 5, + "d": 5 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 4, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 13 + }, + "mne/utils/tests/test_docs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11121.json b/doc/sphinxext/prs/11121.json new file mode 100644 index 00000000000..17b5f17b724 --- /dev/null +++ b/doc/sphinxext/prs/11121.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "963453660fc3805a67b924646f4b88f57badcdaa", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 4, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11122.json b/doc/sphinxext/prs/11122.json new file mode 100644 index 00000000000..d828ffd5494 --- /dev/null +++ b/doc/sphinxext/prs/11122.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2c356d6a4cb4fa33e6d1350fc9ec76446cc9162e", + "authors": [ + { + "n": "Eduard Ort", + "e": "eduardxort@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 37, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 31, + "d": 1 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11123.json b/doc/sphinxext/prs/11123.json new file mode 100644 index 00000000000..fe84594524d --- /dev/null +++ b/doc/sphinxext/prs/11123.json @@ -0,0 +1,163 @@ +{ + "merge_commit_sha": "45720ccbae51927b4c7ceace3b89c7c294d21bab", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 7, + "d": 0 + }, + "examples/decoding/ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 4, + "d": 2 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/mne_cov_power.py": { + "a": 7, + "d": 4 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 4, + "d": 3 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/movement_compensation.py": { + "a": 9, + "d": 8 + }, + "examples/preprocessing/virtual_evoked.py": { + "a": 8, + "d": 7 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 10, + "d": 1 + }, + "examples/visualization/evoked_topomap.py": { + "a": 17, + "d": 21 + }, + "mne/cov.py": { + "a": 46, + "d": 27 + }, + "mne/evoked.py": { + "a": 13, + "d": 14 + }, + "mne/io/proj.py": { + "a": 103, + "d": 35 + }, + "mne/preprocessing/_regress.py": { + "a": 15, + "d": 8 + }, + "mne/preprocessing/ica.py": { + "a": 40, + "d": 35 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 76, + "d": 95 + }, + "mne/time_frequency/tfr.py": { + "a": 79, + "d": 104 + }, + "mne/utils/docs.py": { + "a": 133, + "d": 123 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 5 + }, + "mne/viz/tests/test_topomap.py": { + "a": 126, + "d": 130 + }, + "mne/viz/topomap.py": { + "a": 554, + "d": 471 + }, + "mne/viz/utils.py": { + "a": 37, + "d": 0 + }, + "tutorials/epochs/15_baseline_regression.py": { + "a": 5, + "d": 3 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 2, + "d": 3 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 3, + "d": 3 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 1, + "d": 2 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 4, + "d": 2 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 3, + "d": 3 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 8, + "d": 13 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 3, + "d": 3 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 4, + "d": 2 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11124.json b/doc/sphinxext/prs/11124.json new file mode 100644 index 00000000000..5f95cfbaa2d --- /dev/null +++ b/doc/sphinxext/prs/11124.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c58069137f800306ef8469b677530dbe14984429", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "tutorials/io/20_reading_eeg_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11125.json b/doc/sphinxext/prs/11125.json new file mode 100644 index 00000000000..f9b3e1be9df --- /dev/null +++ b/doc/sphinxext/prs/11125.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6b378e92f11a709e0cee12ac07d76d4bf8c55c5b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11126.json b/doc/sphinxext/prs/11126.json new file mode 100644 index 00000000000..931e21e9074 --- /dev/null +++ b/doc/sphinxext/prs/11126.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4fda0d849ec8bc10e3c6db18d9e4ba8dfdd1bb7e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/psf_ctf_vertices.py": { + "a": 5, + "d": 48 + }, + "examples/inverse/psf_volume.py": { + "a": 93, + "d": 0 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11127.json b/doc/sphinxext/prs/11127.json new file mode 100644 index 00000000000..a159c0d30f5 --- /dev/null +++ b/doc/sphinxext/prs/11127.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "75ce3a0033f649deb8bc06e89a89ad031ee18092", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/tests/test_abstract_notebook.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11129.json b/doc/sphinxext/prs/11129.json new file mode 100644 index 00000000000..fd61723ea04 --- /dev/null +++ b/doc/sphinxext/prs/11129.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2a44a666cd5b3907fc816219697e17de83ef476e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11132.json b/doc/sphinxext/prs/11132.json new file mode 100644 index 00000000000..9b2e5e25a73 --- /dev/null +++ b/doc/sphinxext/prs/11132.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7db211bf5e906dfac7333315a49b1a2eb3b75067", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 4 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11134.json b/doc/sphinxext/prs/11134.json new file mode 100644 index 00000000000..76cc2f65f4d --- /dev/null +++ b/doc/sphinxext/prs/11134.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aef49669fe1bdf19221e03e85cf961671508e0bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11136.json b/doc/sphinxext/prs/11136.json new file mode 100644 index 00000000000..477ed0dc1d4 --- /dev/null +++ b/doc/sphinxext/prs/11136.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9e67f7ce9100bbac452b990541e8f36a072f098a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/preprocessing/annotate_amplitude.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11139.json b/doc/sphinxext/prs/11139.json new file mode 100644 index 00000000000..514decda381 --- /dev/null +++ b/doc/sphinxext/prs/11139.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b8ba02fb74f63fef08f5386ea27a0f660342c99b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11140.json b/doc/sphinxext/prs/11140.json new file mode 100644 index 00000000000..50a184db92d --- /dev/null +++ b/doc/sphinxext/prs/11140.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cf235d539eb932013ea1fa5b78bc2dbf259e1c96", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11141.json b/doc/sphinxext/prs/11141.json new file mode 100644 index 00000000000..db590df223e --- /dev/null +++ b/doc/sphinxext/prs/11141.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "333fd053af346e94d61732e10dca7356be5f3c51", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/html_templates/repr/ica.html.jinja": { + "a": 0, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 139, + "d": 10 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 89, + "d": 2 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 37, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11143.json b/doc/sphinxext/prs/11143.json new file mode 100644 index 00000000000..ab0a48db17d --- /dev/null +++ b/doc/sphinxext/prs/11143.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "89e979b16add6944becba3ac750390a56518778b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 13, + "d": 9 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11144.json b/doc/sphinxext/prs/11144.json new file mode 100644 index 00000000000..373bcd3f5e1 --- /dev/null +++ b/doc/sphinxext/prs/11144.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "202a0d3b914de5816473970b381d2458a05866f2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/psf_volume.py": { + "a": 2, + "d": 1 + }, + "requirements_testing.txt": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11145.json b/doc/sphinxext/prs/11145.json new file mode 100644 index 00000000000..3c7bf2f09df --- /dev/null +++ b/doc/sphinxext/prs/11145.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e7326d63d224cd954966b4ee2c6322c0ff239305", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 22, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11146.json b/doc/sphinxext/prs/11146.json new file mode 100644 index 00000000000..ffde1b6d831 --- /dev/null +++ b/doc/sphinxext/prs/11146.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "8c3e6fb5ab0fdae3ce692d9ae9051f1c71bce37e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 3, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 3, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 11, + "d": 12 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 13, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 1 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 3, + "d": 15 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 11, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 13, + "d": 1 + }, + "mne/io/utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11147.json b/doc/sphinxext/prs/11147.json new file mode 100644 index 00000000000..2c3fad2af61 --- /dev/null +++ b/doc/sphinxext/prs/11147.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6df57c3d5995d281b21d84a31ab44c595a0dd74f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 10, + "d": 1 + }, + "mne/io/tests/test_proj.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11148.json b/doc/sphinxext/prs/11148.json new file mode 100644 index 00000000000..5cd0fedb0e8 --- /dev/null +++ b/doc/sphinxext/prs/11148.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c87ed30fa8126a9ca8f47066cdd98cacbfb38e32", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 6, + "d": 11 + }, + "tools/github_actions_dependencies.sh": { + "a": 6, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1115.json b/doc/sphinxext/prs/1115.json new file mode 100644 index 00000000000..467cb22abfa --- /dev/null +++ b/doc/sphinxext/prs/1115.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cbfa2c9ed2ebd972f5bd3eb0726105410546354e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11150.json b/doc/sphinxext/prs/11150.json new file mode 100644 index 00000000000..6d1ece44e98 --- /dev/null +++ b/doc/sphinxext/prs/11150.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bc74d7d071f6aadc6ff42092ac64614649b2bd1f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11151.json b/doc/sphinxext/prs/11151.json new file mode 100644 index 00000000000..97828812e00 --- /dev/null +++ b/doc/sphinxext/prs/11151.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "880e883c06184160c30d50da06803e67977ac366", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 0, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 0, + "d": 2 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 20 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 19, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11152.json b/doc/sphinxext/prs/11152.json new file mode 100644 index 00000000000..f0aa3211584 --- /dev/null +++ b/doc/sphinxext/prs/11152.json @@ -0,0 +1,151 @@ +{ + "merge_commit_sha": "62af4ac642b21bc0b996c8c86fdeab417806f6d7", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "dominikwelke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "doc/_includes/data_formats.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/datasets.rst": { + "a": 2, + "d": 1 + }, + "doc/overview/datasets_index.rst": { + "a": 13, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 13, + "d": 0 + }, + "doc/reading_raw_data.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 12, + "d": 6 + }, + "mne/datasets/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 12, + "d": 3 + }, + "mne/datasets/eyelink/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/eyelink/eyelink.py": { + "a": 26, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 1 + }, + "mne/defaults.py": { + "a": 13, + "d": 6 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/constants.py": { + "a": 14, + "d": 1 + }, + "mne/io/eyelink/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/eyelink/eyelink.py": { + "a": 882, + "d": 0 + }, + "mne/io/eyelink/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 147, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/pick.py": { + "a": 50, + "d": 14 + }, + "mne/io/tests/test_constants.py": { + "a": 5, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/eyetracking/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 146, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 6, + "d": 4 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 4, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 0 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 172, + "d": 0 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 107, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11154.json b/doc/sphinxext/prs/11154.json new file mode 100644 index 00000000000..b804f87ba5f --- /dev/null +++ b/doc/sphinxext/prs/11154.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b5b97f9a4ee3fe497efe0822fd190855e9a3359c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 25, + "d": 16 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 47, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11155.json b/doc/sphinxext/prs/11155.json new file mode 100644 index 00000000000..de54fc6030a --- /dev/null +++ b/doc/sphinxext/prs/11155.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5059cc13cde5a5dd55640762602c54cd25ee4400", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11156.json b/doc/sphinxext/prs/11156.json new file mode 100644 index 00000000000..9789cfd032d --- /dev/null +++ b/doc/sphinxext/prs/11156.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5cfb78063d52a5806eae88ffb14708af19552208", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11157.json b/doc/sphinxext/prs/11157.json new file mode 100644 index 00000000000..223b79ca677 --- /dev/null +++ b/doc/sphinxext/prs/11157.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "be6135a7f441581ae71cbe0403ee0b36774b8f69", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11159.json b/doc/sphinxext/prs/11159.json new file mode 100644 index 00000000000..162f02c98ec --- /dev/null +++ b/doc/sphinxext/prs/11159.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "63b430e7f18ab4b49ae35b5ecc8564993647be62", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/utils/misc.py": { + "a": 29, + "d": 0 + }, + "mne/utils/tests/test_misc.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1116.json b/doc/sphinxext/prs/1116.json new file mode 100644 index 00000000000..11c2847e225 --- /dev/null +++ b/doc/sphinxext/prs/1116.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "23eaf5587881f57339b68dcd53fee38906ad7f3a", + "authors": [ + { + "n": "aestrivex", + "e": "aestrivex@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/viz.py": { + "a": 40, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11160.json b/doc/sphinxext/prs/11160.json new file mode 100644 index 00000000000..2e01ceb5b1a --- /dev/null +++ b/doc/sphinxext/prs/11160.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d74c0fa3922710a0ae58eb1d82b2b212a28889d5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 8, + "d": 5 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 14, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11163.json b/doc/sphinxext/prs/11163.json new file mode 100644 index 00000000000..6667b3a20a3 --- /dev/null +++ b/doc/sphinxext/prs/11163.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "58a4b337697168020f8d72bdde3265897d4af61b", + "authors": [ + { + "n": "Eduard Ort", + "e": "eduardxort@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11166.json b/doc/sphinxext/prs/11166.json new file mode 100644 index 00000000000..36967d56d65 --- /dev/null +++ b/doc/sphinxext/prs/11166.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "da3823f906f871bda7b133778a99c800496e7963", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/logging.rst": { + "a": 1, + "d": 0 + }, + "mne/parallel.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 140, + "d": 84 + }, + "mne/utils/tests/test_config.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11169.json b/doc/sphinxext/prs/11169.json new file mode 100644 index 00000000000..c6172ff2cff --- /dev/null +++ b/doc/sphinxext/prs/11169.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "72ba75d7d65db6c23b73ffdd4b38eee113d668c9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11171.json b/doc/sphinxext/prs/11171.json new file mode 100644 index 00000000000..511c6e6521e --- /dev/null +++ b/doc/sphinxext/prs/11171.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7e229ed7f709d5345481548b186f832572536caa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/source_space.py": { + "a": 50, + "d": 7 + }, + "mne/tests/test_source_space.py": { + "a": 33, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11175.json b/doc/sphinxext/prs/11175.json new file mode 100644 index 00000000000..129f71bfd88 --- /dev/null +++ b/doc/sphinxext/prs/11175.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1f19c0fcf89d2cfa6b2702c9d9f5a485797d4ec6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/inverse/label_activation_from_stc.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 9, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11176.json b/doc/sphinxext/prs/11176.json new file mode 100644 index 00000000000..b420e9fe780 --- /dev/null +++ b/doc/sphinxext/prs/11176.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8ea5400fe42cb6c91ab3755ccd909f146f679294", + "authors": [ + { + "n": "Eduard Ort", + "e": "eduardxort@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_chpi.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11178.json b/doc/sphinxext/prs/11178.json new file mode 100644 index 00000000000..7f587797aba --- /dev/null +++ b/doc/sphinxext/prs/11178.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ea08b8a73a4f03f6bf0e99c0c2ee33bb279aff81", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 16, + "d": 11 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 24, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11179.json b/doc/sphinxext/prs/11179.json new file mode 100644 index 00000000000..bdfc21228cc --- /dev/null +++ b/doc/sphinxext/prs/11179.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a4cd082024dab16d277ff116959b16b7a5e73dc8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/simulation/70_point_spread.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1118.json b/doc/sphinxext/prs/1118.json new file mode 100644 index 00000000000..855a96e9ff7 --- /dev/null +++ b/doc/sphinxext/prs/1118.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8ac2a2389a3cee9d2632eb7ab7a095c0d6e720b7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/fiff/tests/data/small-src.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/source_space.py": { + "a": 44, + "d": 5 + }, + "mne/surface.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 85, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11180.json b/doc/sphinxext/prs/11180.json new file mode 100644 index 00000000000..30b15cf987b --- /dev/null +++ b/doc/sphinxext/prs/11180.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d5645aa0907faf06f7ef1d64699e9d91a74ad97d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/events.rst": { + "a": 0, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 3 + }, + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 59 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_docs.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11181.json b/doc/sphinxext/prs/11181.json new file mode 100644 index 00000000000..ff1c3e371c7 --- /dev/null +++ b/doc/sphinxext/prs/11181.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "706aea1eb723801c43ae5fc5f68bbb7b53184c29", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 11, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11182.json b/doc/sphinxext/prs/11182.json new file mode 100644 index 00000000000..4f0ef7795f8 --- /dev/null +++ b/doc/sphinxext/prs/11182.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "38dd35fded0e09bc1488534cbd35f81f5fa6688d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 22, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11183.json b/doc/sphinxext/prs/11183.json new file mode 100644 index 00000000000..4514cc6c268 --- /dev/null +++ b/doc/sphinxext/prs/11183.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "53b8667bdb142e1055a18375bd4d79e23d9f2733", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11185.json b/doc/sphinxext/prs/11185.json new file mode 100644 index 00000000000..11564863f99 --- /dev/null +++ b/doc/sphinxext/prs/11185.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0fcbae2d4bc556e44bce02b3169e5bd73371806c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/_freesurfer.py": { + "a": 36, + "d": 9 + }, + "mne/coreg.py": { + "a": 2, + "d": 6 + }, + "mne/dipole.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11186.json b/doc/sphinxext/prs/11186.json new file mode 100644 index 00000000000..0392fcb968a --- /dev/null +++ b/doc/sphinxext/prs/11186.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "f76d8c14181f77363fb7e04c5e0291b68cab79a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/hitachi/hitachi.py": { + "a": 65, + "d": 22 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 40, + "d": 12 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11187.json b/doc/sphinxext/prs/11187.json new file mode 100644 index 00000000000..6f9c324ebfe --- /dev/null +++ b/doc/sphinxext/prs/11187.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5aa12b6555993b3cdc958bcf594d74dae6bfdf04", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1119.json b/doc/sphinxext/prs/1119.json new file mode 100644 index 00000000000..ba0dc96362e --- /dev/null +++ b/doc/sphinxext/prs/1119.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "830e82e7f3fe05f98c0cbef6be48ee98b758c6cf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11190.json b/doc/sphinxext/prs/11190.json new file mode 100644 index 00000000000..e22a060a2cc --- /dev/null +++ b/doc/sphinxext/prs/11190.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "240df830bda18dc630f9bffea386ded3bd247ace", + "authors": [ + { + "n": "Dominik Wetzel", + "e": null + }, + { + "n": "Dominik Wetzel", + "e": "dominik.wetzel@fh-zwickau.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 2 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/fixes.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11191.json b/doc/sphinxext/prs/11191.json new file mode 100644 index 00000000000..48a109e187e --- /dev/null +++ b/doc/sphinxext/prs/11191.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "3032f48bec92a2ec12f9b36682070e99a72428bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/linux_pip.yml": { + "a": 4, + "d": 3 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 29, + "d": 10 + }, + "mne/time_frequency/spectrum.py": { + "a": 4, + "d": 3 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 4, + "d": 1 + }, + "tools/check_qt_import.sh": { + "a": 8, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11192.json b/doc/sphinxext/prs/11192.json new file mode 100644 index 00000000000..816de2bf9c2 --- /dev/null +++ b/doc/sphinxext/prs/11192.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7730df72dc51f663fe557e997b6f5486111f3e43", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 2, + "d": 2 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11193.json b/doc/sphinxext/prs/11193.json new file mode 100644 index 00000000000..6f5b9465c77 --- /dev/null +++ b/doc/sphinxext/prs/11193.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "3a28322b3f8301b1486e080e024f115feeadfeca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 7, + "d": 5 + }, + "mne/chpi.py": { + "a": 10, + "d": 8 + }, + "mne/cov.py": { + "a": 7, + "d": 5 + }, + "mne/dipole.py": { + "a": 5, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 5 + }, + "mne/report/report.py": { + "a": 19, + "d": 17 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 13, + "d": 6 + }, + "mne/utils/check.py": { + "a": 4, + "d": 2 + }, + "mne/utils/tests/test_check.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11197.json b/doc/sphinxext/prs/11197.json new file mode 100644 index 00000000000..5df658c3ab0 --- /dev/null +++ b/doc/sphinxext/prs/11197.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a40ce20e12c7fb84315172bd6a9e44915a9f811b", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11198.json b/doc/sphinxext/prs/11198.json new file mode 100644 index 00000000000..3da0d55f1d7 --- /dev/null +++ b/doc/sphinxext/prs/11198.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dcb6230cc4066ea993479caaf56944d59b90b770", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 43, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 9, + "d": 0 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 23, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11199.json b/doc/sphinxext/prs/11199.json new file mode 100644 index 00000000000..f2d0b319fff --- /dev/null +++ b/doc/sphinxext/prs/11199.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1b3e7d5013dcce1abd6b884a2b39f756ac7b7694", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/datasets/brainstorm_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11200.json b/doc/sphinxext/prs/11200.json new file mode 100644 index 00000000000..fbedba32272 --- /dev/null +++ b/doc/sphinxext/prs/11200.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b6e4f5ca04fd432fce6241f091947d37e890346a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/macos_conda.yml": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 6 + }, + "mne/viz/backends/renderer.py": { + "a": 8, + "d": 3 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 29, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11201.json b/doc/sphinxext/prs/11201.json new file mode 100644 index 00000000000..aac13c7dd06 --- /dev/null +++ b/doc/sphinxext/prs/11201.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "4a445507e6108bcd0477190d7c277d0a904dcb34", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Hüseyin", + "e": "huseyinorkunelmas@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_proj.py": { + "a": 2, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 30, + "d": 11 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11203.json b/doc/sphinxext/prs/11203.json new file mode 100644 index 00000000000..aa57053e0d8 --- /dev/null +++ b/doc/sphinxext/prs/11203.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4427adfb3bfe9d876ccbbed7fcffb625e6b3bce3", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11204.json b/doc/sphinxext/prs/11204.json new file mode 100644 index 00000000000..8f50472431a --- /dev/null +++ b/doc/sphinxext/prs/11204.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "de546e2188761de69eede103b7c5a6cc09542a4d", + "authors": [ + { + "n": "gemeinl", + "e": "lukas.gemein@gmx.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11205.json b/doc/sphinxext/prs/11205.json new file mode 100644 index 00000000000..1f0a1ab52e7 --- /dev/null +++ b/doc/sphinxext/prs/11205.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "35e466f3fbb71cc7b976ae1a88b97409adabf694", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "doc/references.bib": { + "a": 1, + "d": 2 + }, + "mne/conftest.py": { + "a": 5, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/res4.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 13, + "d": 1 + }, + "mne/viz/tests/test_scraper.py": { + "a": 2, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 0 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 2, + "d": 2 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 2, + "d": 2 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 0, + "d": 5 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11206.json b/doc/sphinxext/prs/11206.json new file mode 100644 index 00000000000..dc79b275fe4 --- /dev/null +++ b/doc/sphinxext/prs/11206.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a8980714127579192e3b4b2ebcc2c51be824d053", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 7, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11207.json b/doc/sphinxext/prs/11207.json new file mode 100644 index 00000000000..a0fb6624237 --- /dev/null +++ b/doc/sphinxext/prs/11207.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e10b1dae11867cb15e63c783318995d0099979cd", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/glossary.rst": { + "a": 0, + "d": 7 + }, + "mne/time_frequency/multitaper.py": { + "a": 8, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 6, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 10, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11209.json b/doc/sphinxext/prs/11209.json new file mode 100644 index 00000000000..d925307bb34 --- /dev/null +++ b/doc/sphinxext/prs/11209.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "2b3f339aed81f7990caeb1313e0c6341a15546ec", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".cirrus.yml": { + "a": 41, + "d": 0 + }, + "README.rst": { + "a": 4, + "d": 1 + }, + "mne/conftest.py": { + "a": 3, + "d": 0 + }, + "mne/gui/tests/test_core.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tools/cirrus_dependencies.sh": { + "a": 5, + "d": 0 + }, + "tools/cirrus_install_python.sh": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1121.json b/doc/sphinxext/prs/1121.json new file mode 100644 index 00000000000..73e35d58e1d --- /dev/null +++ b/doc/sphinxext/prs/1121.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d318be39e562e85c83ed071b443f9c1f3ef982df", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/inverse/plot_dics_source_power.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11211.json b/doc/sphinxext/prs/11211.json new file mode 100644 index 00000000000..960663aa8b4 --- /dev/null +++ b/doc/sphinxext/prs/11211.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b486ef18f74d22b7bdcd0cf6f0e1a8fb988d5e35", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".cirrus.yml": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11212.json b/doc/sphinxext/prs/11212.json new file mode 100644 index 00000000000..4de233c5cf5 --- /dev/null +++ b/doc/sphinxext/prs/11212.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "ac7cab31cd028df88bc925fe6ef68fea4403756b", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "mne/bem.py": { + "a": 10, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 43, + "d": 12 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/io/_digitization.py": { + "a": 44, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 3, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 4, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 2, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 6, + "d": 5 + }, + "mne/io/fieldtrip/utils.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 8, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 0 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 10, + "d": 0 + }, + "mne/io/proj.py": { + "a": 3, + "d": 5 + }, + "mne/io/reference.py": { + "a": 4, + "d": 3 + }, + "mne/io/snirf/_snirf.py": { + "a": 6, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 29, + "d": 8 + }, + "mne/io/tests/test_reference.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 6, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/_utils.py": { + "a": 10, + "d": 7 + }, + "setup.cfg": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11213.json b/doc/sphinxext/prs/11213.json new file mode 100644 index 00000000000..ed9c1dbb188 --- /dev/null +++ b/doc/sphinxext/prs/11213.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bfc2525b60b6dee59628a8d80c56d394586dc02d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/datasets/tests/test_datasets.py": { + "a": 5, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 49, + "d": 0 + }, + "mne/utils/config.py": { + "a": 4, + "d": 1 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11215.json b/doc/sphinxext/prs/11215.json new file mode 100644 index 00000000000..3a2c1ba447d --- /dev/null +++ b/doc/sphinxext/prs/11215.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "880ec67a02ed59273d4b442ea46a118387a8dc03", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 3, + "d": 2 + }, + "mne/fixes.py": { + "a": 0, + "d": 20 + }, + "mne/io/what.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 9 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 2, + "d": 2 + }, + "mne/utils/dataframe.py": { + "a": 3, + "d": 2 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11216.json b/doc/sphinxext/prs/11216.json new file mode 100644 index 00000000000..c5af628d705 --- /dev/null +++ b/doc/sphinxext/prs/11216.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "71271588e7d7fd3e165ad158571f40085eaa05e0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/overview/people.rst": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11219.json b/doc/sphinxext/prs/11219.json new file mode 100644 index 00000000000..58451e48394 --- /dev/null +++ b/doc/sphinxext/prs/11219.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "db0d02b72fc5246f3b5e8bd6a8d713b2801aa31b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 30, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1122.json b/doc/sphinxext/prs/1122.json new file mode 100644 index 00000000000..73e963dcb0a --- /dev/null +++ b/doc/sphinxext/prs/1122.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "64823209bdf74668bb7a318d1aa549791c6f630f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/_field_interpolation.py": { + "a": 4, + "d": 5 + }, + "mne/viz.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11222.json b/doc/sphinxext/prs/11222.json new file mode 100644 index 00000000000..d14b71b4e73 --- /dev/null +++ b/doc/sphinxext/prs/11222.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6a5c031a01a4aadabd4d178a376952390bf4a3df", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11223.json b/doc/sphinxext/prs/11223.json new file mode 100644 index 00000000000..13ff5510b8b --- /dev/null +++ b/doc/sphinxext/prs/11223.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4aed3f7257d04e51b4cd84ddf56e5ef755acdd84", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 34, + "d": 9 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11228.json b/doc/sphinxext/prs/11228.json new file mode 100644 index 00000000000..5350dd78ba0 --- /dev/null +++ b/doc/sphinxext/prs/11228.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "8a73a59beef4f896ef7f3c6ce4baede80f3ba7a6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/README.txt": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 4, + "d": 3 + }, + "mne/dipole.py": { + "a": 8, + "d": 4 + }, + "mne/epochs.py": { + "a": 10, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 15, + "d": 13 + }, + "mne/morph.py": { + "a": 9, + "d": 2 + }, + "mne/viz/backends/_abstract.py": { + "a": 7, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11229.json b/doc/sphinxext/prs/11229.json new file mode 100644 index 00000000000..575252225ce --- /dev/null +++ b/doc/sphinxext/prs/11229.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "20ebcaa85d01b1887aff54d2fe69ab8803151a7d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11230.json b/doc/sphinxext/prs/11230.json new file mode 100644 index 00000000000..4e19fd0ead3 --- /dev/null +++ b/doc/sphinxext/prs/11230.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8bd94067676bb2f5bb158e6f2f2c3d24f7e66040", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 14, + "d": 12 + }, + "mne/utils/tests/test_misc.py": { + "a": 34, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11231.json b/doc/sphinxext/prs/11231.json new file mode 100644 index 00000000000..4e15b4d8fca --- /dev/null +++ b/doc/sphinxext/prs/11231.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a373946084376800a95ce333fa984a8311a6bca", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11232.json b/doc/sphinxext/prs/11232.json new file mode 100644 index 00000000000..20c3e3aa007 --- /dev/null +++ b/doc/sphinxext/prs/11232.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0a0e00f6d9c3ebadd72451f4b7df68eeafb7fcd6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 24, + "d": 9 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 8, + "d": 1 + }, + "mne/utils/check.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11234.json b/doc/sphinxext/prs/11234.json new file mode 100644 index 00000000000..fdd34b1bf9e --- /dev/null +++ b/doc/sphinxext/prs/11234.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "ff1cfdd8e3ac3c05bfd5987a3e821f13cf7928f9", + "authors": [ + { + "n": "Velu Prabhakar Kumaravel", + "e": null + }, + { + "n": "Velu Prabhakar Kumaravel", + "e": "veluprabhakarkumaravel@Velus-MBP.lan" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/api/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/11234.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 31, + "d": 0 + }, + "mne/preprocessing/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/_lof.py": { + "a": 100, + "d": 0 + }, + "mne/preprocessing/tests/test_lof.py": { + "a": 39, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11235.json b/doc/sphinxext/prs/11235.json new file mode 100644 index 00000000000..f99cf206013 --- /dev/null +++ b/doc/sphinxext/prs/11235.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1dd1fa1296480317be3d75fb84c915e5eaacef9e", + "authors": [ + { + "n": "Daniel Hasegan", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11236.json b/doc/sphinxext/prs/11236.json new file mode 100644 index 00000000000..c4539874545 --- /dev/null +++ b/doc/sphinxext/prs/11236.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "b9628c09599eca74feb13b9ef75aba2eee1801bb", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 8, + "d": 0 + }, + "CITATION.cff": { + "a": 42, + "d": 19 + }, + "SECURITY.md": { + "a": 3, + "d": 3 + }, + "codemeta.json": { + "a": 120, + "d": 48 + }, + "doc/_static/versions.json": { + "a": 6, + "d": 2 + }, + "doc/changes/1.2.inc": { + "a": 37, + "d": 3 + }, + "doc/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/conf.py": { + "a": 9, + "d": 12 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/_version.py": { + "a": 1, + "d": 1 + }, + "requirements_doc.txt": { + "a": 2, + "d": 2 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 1 + }, + "tools/generate_codemeta.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11237.json b/doc/sphinxext/prs/11237.json new file mode 100644 index 00000000000..3785f329fe5 --- /dev/null +++ b/doc/sphinxext/prs/11237.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2482b52f26a2d36d98b42c8accc91e3608276bb6", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 6, + "d": 29 + }, + "mne/time_frequency/multitaper.py": { + "a": 11, + "d": 7 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 12, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11238.json b/doc/sphinxext/prs/11238.json new file mode 100644 index 00000000000..9216d0622f9 --- /dev/null +++ b/doc/sphinxext/prs/11238.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1a7d120ae41d22bb73b32c0b65a0598e1ebd03b3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1124.json b/doc/sphinxext/prs/1124.json new file mode 100644 index 00000000000..16b3e21e8cb --- /dev/null +++ b/doc/sphinxext/prs/1124.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "eb6cb0d2ecc05091b139db27718998f51741f190", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/channels.py": { + "a": 63, + "d": 0 + }, + "mne/fiff/tests/test_channels.py": { + "a": 66, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11241.json b/doc/sphinxext/prs/11241.json new file mode 100644 index 00000000000..a7f8f5e0916 --- /dev/null +++ b/doc/sphinxext/prs/11241.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "62e88db6d04df32a248073e5d204a88bab6cb2cf", + "authors": [ + { + "n": "Jevri Hanna", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 5, + "d": 10 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11244.json b/doc/sphinxext/prs/11244.json new file mode 100644 index 00000000000..0b1d4503c97 --- /dev/null +++ b/doc/sphinxext/prs/11244.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3c73d042288c564ac1ed3499a5724a349e91e207", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 16, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11245.json b/doc/sphinxext/prs/11245.json new file mode 100644 index 00000000000..ada4fdc051a --- /dev/null +++ b/doc/sphinxext/prs/11245.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "27df81a1c95c402403fc11ca7e391c8eb388ef16", + "authors": [ + { + "n": "Moritz Gerster", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 10, + "d": 0 + }, + "mne/io/reference.py": { + "a": 21, + "d": 2 + }, + "mne/io/tests/test_reference.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11246.json b/doc/sphinxext/prs/11246.json new file mode 100644 index 00000000000..42599de28f4 --- /dev/null +++ b/doc/sphinxext/prs/11246.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9eedef0abc44ab4dfe7e6d23580170ca705e9240", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11248.json b/doc/sphinxext/prs/11248.json new file mode 100644 index 00000000000..f6f9749cbf1 --- /dev/null +++ b/doc/sphinxext/prs/11248.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "98a0dfcc74983f94ff5c59dcfdee40443ee0386b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11249.json b/doc/sphinxext/prs/11249.json new file mode 100644 index 00000000000..0322ba9154c --- /dev/null +++ b/doc/sphinxext/prs/11249.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "689f88ba3a395a424b62973ed063c6ea95aceafa", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/references.bib": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1125.json b/doc/sphinxext/prs/1125.json new file mode 100644 index 00000000000..ca04753c9aa --- /dev/null +++ b/doc/sphinxext/prs/1125.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "406a8cf31f73e266494b924b845aaeb102892089", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11250.json b/doc/sphinxext/prs/11250.json new file mode 100644 index 00000000000..9356f0d9fa0 --- /dev/null +++ b/doc/sphinxext/prs/11250.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3d13746dc3d08b1eff073dab5c119812de4d42ee", + "authors": [ + { + "n": "Omer Shubi", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11251.json b/doc/sphinxext/prs/11251.json new file mode 100644 index 00000000000..acce54fce33 --- /dev/null +++ b/doc/sphinxext/prs/11251.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fcfb565bdf253ca2a20e1c43d622752160def656", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 2, + "d": 1 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11253.json b/doc/sphinxext/prs/11253.json new file mode 100644 index 00000000000..0c52dc5f0c9 --- /dev/null +++ b/doc/sphinxext/prs/11253.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4957469534ed5f8a4434df8a31d7033ce0560a36", + "authors": [ + { + "n": "Dinara Issagaliyeva", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11255.json b/doc/sphinxext/prs/11255.json new file mode 100644 index 00000000000..1c7563e2af7 --- /dev/null +++ b/doc/sphinxext/prs/11255.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "83db99f7ae2bde0e96412e7e33cffdb6b23b6152", + "authors": [ + { + "n": null, + "e": "myd7349@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11256.json b/doc/sphinxext/prs/11256.json new file mode 100644 index 00000000000..faa78985373 --- /dev/null +++ b/doc/sphinxext/prs/11256.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "792b398ef25564192db22cfa8dd4ca9f9b139e0d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 1 + }, + "tools/get_minimal_commands.sh": { + "a": 3, + "d": 1 + }, + "tools/get_testing_version.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11259.json b/doc/sphinxext/prs/11259.json new file mode 100644 index 00000000000..d5cbb2dbae7 --- /dev/null +++ b/doc/sphinxext/prs/11259.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3b29980d22becbf8bfa1c535f6e5680ad4bc749a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 0 + }, + "mne/evoked.py": { + "a": 4, + "d": 0 + }, + "mne/io/base.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1126.json b/doc/sphinxext/prs/1126.json new file mode 100644 index 00000000000..a6fb8fba551 --- /dev/null +++ b/doc/sphinxext/prs/1126.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "390a9483e98678e60f0ff8fc63b0f620e2b3c856", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 4, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 4, + "d": 5 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 7, + "d": 2 + }, + "mne/label.py": { + "a": 209, + "d": 72 + }, + "mne/tests/test_label.py": { + "a": 71, + "d": 56 + }, + "mne/tests/test_source_estimate.py": { + "a": 8, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11260.json b/doc/sphinxext/prs/11260.json new file mode 100644 index 00000000000..c0459b1a80d --- /dev/null +++ b/doc/sphinxext/prs/11260.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "85dfacbab79e9b802a7d91ce981bbb57b673ae49", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".mailmap": { + "a": 2, + "d": 0 + }, + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.19.inc": { + "a": 3, + "d": 3 + }, + "doc/changes/names.inc": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11261.json b/doc/sphinxext/prs/11261.json new file mode 100644 index 00000000000..d6d20f8b680 --- /dev/null +++ b/doc/sphinxext/prs/11261.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "907e8dbb18a67d6961cffbe63ff94dbe8dbc6ab2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11263.json b/doc/sphinxext/prs/11263.json new file mode 100644 index 00000000000..3ea067e2703 --- /dev/null +++ b/doc/sphinxext/prs/11263.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a49995ed5b1e84ab74a6dd1882f115d8dfd10406", + "authors": [ + { + "n": "Carlos de la Torre", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11265.json b/doc/sphinxext/prs/11265.json new file mode 100644 index 00000000000..ee2778c14f8 --- /dev/null +++ b/doc/sphinxext/prs/11265.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bcf13b13e5388dea3b6885ac2c6e1f3f7437a463", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 40, + "d": 47 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 46, + "d": 73 + }, + "mne/utils/docs.py": { + "a": 104, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11266.json b/doc/sphinxext/prs/11266.json new file mode 100644 index 00000000000..e4688e36428 --- /dev/null +++ b/doc/sphinxext/prs/11266.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "80f93b302611033537bdd266c27fd7f99e63df83", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 23, + "d": 16 + }, + "mne/utils/tests/test_misc.py": { + "a": 83, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11270.json b/doc/sphinxext/prs/11270.json new file mode 100644 index 00000000000..8e3293292ab --- /dev/null +++ b/doc/sphinxext/prs/11270.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f82c74ab5ee4bf6c32a39c282ab70a474340ca74", + "authors": [ + { + "n": "Carlos de la Torre", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11271.json b/doc/sphinxext/prs/11271.json new file mode 100644 index 00000000000..6f31e576a6e --- /dev/null +++ b/doc/sphinxext/prs/11271.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a46755782fa351bf9002be35b4c669ee01a019ff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 15, + "d": 2 + }, + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 17, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11272.json b/doc/sphinxext/prs/11272.json new file mode 100644 index 00000000000..0ac5088da47 --- /dev/null +++ b/doc/sphinxext/prs/11272.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7efe0b14d82be746799a99de4bb7b64c7ba1b644", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11273.json b/doc/sphinxext/prs/11273.json new file mode 100644 index 00000000000..bfd23473f8d --- /dev/null +++ b/doc/sphinxext/prs/11273.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b1d62431845a897d113dd7c077207d5ec41d1e0c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/autosummary/class.rst": { + "a": 1, + "d": 1 + }, + "doc/_templates/autosummary/class_no_inherited_members.rst": { + "a": 1, + "d": 1 + }, + "doc/_templates/autosummary/class_no_members.rst": { + "a": 8, + "d": 0 + }, + "doc/_templates/autosummary/function.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "doc/file_io.rst": { + "a": 1, + "d": 0 + }, + "doc/reading_raw_data.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11277.json b/doc/sphinxext/prs/11277.json new file mode 100644 index 00000000000..67a3d4cca86 --- /dev/null +++ b/doc/sphinxext/prs/11277.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fcf2770e98532c1bae7d3193087b82999a64a8e4", + "authors": [ + { + "n": null, + "e": "erkkahe@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 8, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11279.json b/doc/sphinxext/prs/11279.json new file mode 100644 index 00000000000..960214e5f69 --- /dev/null +++ b/doc/sphinxext/prs/11279.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5b71e261d1a85c06265a78dd5084de4b3ef86962", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 14, + "d": 7 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1128.json b/doc/sphinxext/prs/1128.json new file mode 100644 index 00000000000..214ab1d2db5 --- /dev/null +++ b/doc/sphinxext/prs/1128.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2a0814f7ce4f6f50b2410e170a49a30d1214b0b8", + "authors": [ + { + "n": "Mads Jensen", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 25, + "d": 12 + }, + "mne/tests/test_source_estimate.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11280.json b/doc/sphinxext/prs/11280.json new file mode 100644 index 00000000000..90997ca8270 --- /dev/null +++ b/doc/sphinxext/prs/11280.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cee4e7d11116bd4e0e219fe611c0121cafd2394d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_spectrum.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11281.json b/doc/sphinxext/prs/11281.json new file mode 100644 index 00000000000..714c1a6652e --- /dev/null +++ b/doc/sphinxext/prs/11281.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ad4c99cdca4b71026568e474118f6b18054addc6", + "authors": [ + { + "n": null, + "e": "timon.merk@charite.de" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 55, + "d": 15 + }, + "tutorials/clinical/30_ecog.py": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11282.json b/doc/sphinxext/prs/11282.json new file mode 100644 index 00000000000..ccf47195d92 --- /dev/null +++ b/doc/sphinxext/prs/11282.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "925f522829f74ded48935f39b51ef270adc70923", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/api/time_frequency.rst": { + "a": 5, + "d": 0 + }, + "doc/changes/devel/11282.apichange.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/11282.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/11282.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 10, + "d": 8 + }, + "examples/inverse/dics_epochs.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 2, + "d": 3 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 38, + "d": 42 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 7 + }, + "mne/conftest.py": { + "a": 46, + "d": 9 + }, + "mne/decoding/time_frequency.py": { + "a": 11, + "d": 11 + }, + "mne/epochs.py": { + "a": 142, + "d": 7 + }, + "mne/evoked.py": { + "a": 61, + "d": 0 + }, + "mne/html_templates/repr/tfr.html.jinja": { + "a": 60, + "d": 0 + }, + "mne/io/base.py": { + "a": 64, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/__init__.pyi": { + "a": 10, + "d": 2 + }, + "mne/time_frequency/_stockwell.py": { + "a": 47, + "d": 32 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 39, + "d": 48 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 8, + "d": 4 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 683, + "d": 511 + }, + "mne/time_frequency/tfr.py": { + "a": 2578, + "d": 1484 + }, + "mne/utils/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 10, + "d": 0 + }, + "mne/utils/check.py": { + "a": 20, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 588, + "d": 138 + }, + "mne/utils/mixin.py": { + "a": 10, + "d": 9 + }, + "mne/utils/numerics.py": { + "a": 14, + "d": 3 + }, + "mne/utils/spectrum.py": { + "a": 22, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 23, + "d": 12 + }, + "mne/viz/tests/test_topomap.py": { + "a": 11, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 18, + "d": 5 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 2, + "d": 3 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 5, + "d": 13 + }, + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 2, + "d": 3 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 3, + "d": 4 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 5, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11283.json b/doc/sphinxext/prs/11283.json new file mode 100644 index 00000000000..48abfc2d53f --- /dev/null +++ b/doc/sphinxext/prs/11283.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "062b5e0a34d9dfc8c701fa0d86f2a017262207f2", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 3, + "d": 7 + }, + "mne/defaults.py": { + "a": 2, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 4, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 59, + "d": 44 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 34, + "d": 19 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 41, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11284.json b/doc/sphinxext/prs/11284.json new file mode 100644 index 00000000000..1c25b077eb7 --- /dev/null +++ b/doc/sphinxext/prs/11284.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6e5ca18793c5a0d6db9c97b932bb89756c9dd17e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 1 + }, + "mne/utils/tests/test_misc.py": { + "a": 7, + "d": 14 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11289.json b/doc/sphinxext/prs/11289.json new file mode 100644 index 00000000000..db3803348df --- /dev/null +++ b/doc/sphinxext/prs/11289.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6c3a159824c41ffac40e289da601fb57de3649ba", + "authors": [ + { + "n": "Mauricio Cespedes", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11290.json b/doc/sphinxext/prs/11290.json new file mode 100644 index 00000000000..ec55397ded8 --- /dev/null +++ b/doc/sphinxext/prs/11290.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9e5989097dcf60634c1daf111457ca8e202d29e6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/data/montages/mgh60.elc": { + "a": 42, + "d": 42 + }, + "mne/channels/tests/test_montage.py": { + "a": 75, + "d": 13 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11291.json b/doc/sphinxext/prs/11291.json new file mode 100644 index 00000000000..4d6863e797a --- /dev/null +++ b/doc/sphinxext/prs/11291.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "52f34d33013aadd92150d43f52234969ea9a621a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "examples/visualization/publication_figure.py": { + "a": 4, + "d": 6 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 3, + "d": 3 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 2 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 4, + "d": 5 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 12, + "d": 13 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 2, + "d": 3 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 10, + "d": 11 + }, + "tutorials/simulation/80_dics.py": { + "a": 5, + "d": 6 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11293.json b/doc/sphinxext/prs/11293.json new file mode 100644 index 00000000000..38c3ce24266 --- /dev/null +++ b/doc/sphinxext/prs/11293.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "649357dfce54d5ea76b80c9cc03d642e02ed1535", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/filter.py": { + "a": 1, + "d": 9 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 42, + "d": 63 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 7, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11298.json b/doc/sphinxext/prs/11298.json new file mode 100644 index 00000000000..3010f0a2de1 --- /dev/null +++ b/doc/sphinxext/prs/11298.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "15d20b88fef9ccdc16ecd2435bafe516a6b0ce67", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1130.json b/doc/sphinxext/prs/1130.json new file mode 100644 index 00000000000..3188b64f13f --- /dev/null +++ b/doc/sphinxext/prs/1130.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8270ec8847afbe784fe970a033d60896eb3d2bf0", + "authors": [ + { + "n": "Saket Choudhary", + "e": "saketkc@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11300.json b/doc/sphinxext/prs/11300.json new file mode 100644 index 00000000000..7e12f000b99 --- /dev/null +++ b/doc/sphinxext/prs/11300.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bb328cbeb253165f09b7dc9122d44c432c483138", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11301.json b/doc/sphinxext/prs/11301.json new file mode 100644 index 00000000000..ebb948fb1e6 --- /dev/null +++ b/doc/sphinxext/prs/11301.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "57bd4a5e75008db9141f3458204b03551da57b8b", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 2, + "d": 1 + }, + "doc/_static/institution_logos/Donders.png": { + "a": 0, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11302.json b/doc/sphinxext/prs/11302.json new file mode 100644 index 00000000000..52da95780b1 --- /dev/null +++ b/doc/sphinxext/prs/11302.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4ef3a88c7c1ea29b4925cece2072ad5c5d826b82", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 9, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11303.json b/doc/sphinxext/prs/11303.json new file mode 100644 index 00000000000..bd9731c65b5 --- /dev/null +++ b/doc/sphinxext/prs/11303.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "867505ddb9f02c2629a6b364e190589d47ffbbf5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/_freesurfer.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11304.json b/doc/sphinxext/prs/11304.json new file mode 100644 index 00000000000..f635ee09f64 --- /dev/null +++ b/doc/sphinxext/prs/11304.json @@ -0,0 +1,175 @@ +{ + "merge_commit_sha": "0ec28e9ad8e234ea51266644ae1ac35a2bc11f46", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/0.12.inc": { + "a": 4, + "d": 4 + }, + "doc/changes/0.14.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.15.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.19.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.20.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.21.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.22.inc": { + "a": 2, + "d": 2 + }, + "doc/time_frequency.rst": { + "a": 0, + "d": 2 + }, + "mne/_freesurfer.py": { + "a": 5, + "d": 9 + }, + "mne/cov.py": { + "a": 4, + "d": 19 + }, + "mne/decoding/csp.py": { + "a": 13, + "d": 7 + }, + "mne/decoding/transformer.py": { + "a": 4, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 55 + }, + "mne/evoked.py": { + "a": 11, + "d": 14 + }, + "mne/fixes.py": { + "a": 0, + "d": 20 + }, + "mne/preprocessing/_regress.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/simulation/_metrics.py": { + "a": 0, + "d": 49 + }, + "mne/simulation/metrics/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 12, + "d": 15 + }, + "mne/tests/test_coreg.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 5 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 152 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 10, + "d": 30 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 68 + }, + "mne/viz/_brain/_brain.py": { + "a": 8, + "d": 26 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 4 + }, + "mne/viz/_mpl_figure.py": { + "a": 3, + "d": 73 + }, + "mne/viz/backends/_pyvista.py": { + "a": 11, + "d": 11 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_figure.py": { + "a": 0, + "d": 7 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 13 + }, + "mne/viz/topomap.py": { + "a": 24, + "d": 147 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 14 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 1, + "d": 2 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 2, + "d": 2 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11306.json b/doc/sphinxext/prs/11306.json new file mode 100644 index 00000000000..a2f6c7e93de --- /dev/null +++ b/doc/sphinxext/prs/11306.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "fe9358f161af6743adb61160824d93f5e7b77aa7", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 10, + "d": 0 + }, + "mne/epochs.py": { + "a": 55, + "d": 12 + }, + "mne/event.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 61, + "d": 13 + }, + "mne/time_frequency/spectrum.py": { + "a": 3, + "d": 1 + }, + "mne/utils/check.py": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11307.json b/doc/sphinxext/prs/11307.json new file mode 100644 index 00000000000..f30ae5ac420 --- /dev/null +++ b/doc/sphinxext/prs/11307.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8585dd5a2867a1666d19178026b87bd6e98378b3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11308.json b/doc/sphinxext/prs/11308.json new file mode 100644 index 00000000000..7e10cdf5e74 --- /dev/null +++ b/doc/sphinxext/prs/11308.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6fb51a10abb6d37a9c120ea7ebb7188fcddf6443", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_chpi.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11309.json b/doc/sphinxext/prs/11309.json new file mode 100644 index 00000000000..e4357007d57 --- /dev/null +++ b/doc/sphinxext/prs/11309.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f26528d78764c83f754873c40f17e40d5eb08d2d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 40, + "d": 38 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1131.json b/doc/sphinxext/prs/1131.json new file mode 100644 index 00000000000..775ca544544 --- /dev/null +++ b/doc/sphinxext/prs/1131.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6f699dd7ce87e5ed7bbc1739afa3e53eea3a87c0", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 11, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11310.json b/doc/sphinxext/prs/11310.json new file mode 100644 index 00000000000..2b6d234a5a6 --- /dev/null +++ b/doc/sphinxext/prs/11310.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "80a558ef008dc0add4f151eb0fc6051071a0d404", + "authors": [ + { + "n": "Santeri Ruuskanen", + "e": null + }, + { + "n": "Santeri Ruuskanen", + "e": "santeri.ruuskanen@aalto.fi" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 11, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11311.json b/doc/sphinxext/prs/11311.json new file mode 100644 index 00000000000..20404afdad0 --- /dev/null +++ b/doc/sphinxext/prs/11311.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "815bee6b38c160e6f83d86bf929e79040536c83d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 9, + "d": 3 + }, + "mne/decoding/search_light.py": { + "a": 10, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11313.json b/doc/sphinxext/prs/11313.json new file mode 100644 index 00000000000..ca0fa44b7fc --- /dev/null +++ b/doc/sphinxext/prs/11313.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aeeb111f8555b49460583a34c573437358c1cb9f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11315.json b/doc/sphinxext/prs/11315.json new file mode 100644 index 00000000000..d5c76baee53 --- /dev/null +++ b/doc/sphinxext/prs/11315.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4d12df23916601003aef45d9e72c1e240191cd53", + "authors": [ + { + "n": "Eric Brayet", + "e": null + } + ], + "changes": { + "doc/changes/1.2.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11317.json b/doc/sphinxext/prs/11317.json new file mode 100644 index 00000000000..055f14d3410 --- /dev/null +++ b/doc/sphinxext/prs/11317.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c1cc3f203e2d7f4edb97df312fdabaca51b9da9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11318.json b/doc/sphinxext/prs/11318.json new file mode 100644 index 00000000000..20a3822a2e8 --- /dev/null +++ b/doc/sphinxext/prs/11318.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4f3ad29ec65a9e888cb237b24c834a9e161ba3f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 5, + "d": 8 + }, + "mne/report/tests/test_report.py": { + "a": 15, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1132.json b/doc/sphinxext/prs/1132.json new file mode 100644 index 00000000000..3365aca7bf4 --- /dev/null +++ b/doc/sphinxext/prs/1132.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b5d50fe1b81486d03f022e0451c71f495f828403", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fiff/edf/edf.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11322.json b/doc/sphinxext/prs/11322.json new file mode 100644 index 00000000000..a101f58f98e --- /dev/null +++ b/doc/sphinxext/prs/11322.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e65e5e5b49a7cdc21fb42d4a92bca5bd7a4e4329", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11325.json b/doc/sphinxext/prs/11325.json new file mode 100644 index 00000000000..97e64822f16 --- /dev/null +++ b/doc/sphinxext/prs/11325.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "be6bee79833ad48aaf34b7afabf08df8fc154475", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11328.json b/doc/sphinxext/prs/11328.json new file mode 100644 index 00000000000..385ee6b9f1d --- /dev/null +++ b/doc/sphinxext/prs/11328.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0fbd8fcae6e6c3a69c47069da9af8127bd113e8a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/sidebar-quicklinks.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 10, + "d": 18 + }, + "doc/install/contributing.rst": { + "a": 7, + "d": 7 + }, + "doc/install/index.rst": { + "a": 1, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11329.json b/doc/sphinxext/prs/11329.json new file mode 100644 index 00000000000..5a7a41f9f66 --- /dev/null +++ b/doc/sphinxext/prs/11329.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "063a3e87c521fc0bff0307a456cb8d363ecffa9e", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 24, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 45, + "d": 12 + }, + "mne/viz/utils.py": { + "a": 9, + "d": 9 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11333.json b/doc/sphinxext/prs/11333.json new file mode 100644 index 00000000000..ab75c6cdf1b --- /dev/null +++ b/doc/sphinxext/prs/11333.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bbdf98b7d09b0c70db41ebfc203b3531e91c8e67", + "authors": [ + { + "n": "Toomas Erik Anijärv", + "e": "toomaserikanijarv@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 11, + "d": 5 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11334.json b/doc/sphinxext/prs/11334.json new file mode 100644 index 00000000000..dd28c007625 --- /dev/null +++ b/doc/sphinxext/prs/11334.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e56581776c5b5442ae2f4fa7de5ab43a0600322d", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 5, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11335.json b/doc/sphinxext/prs/11335.json new file mode 100644 index 00000000000..4aa601d6e94 --- /dev/null +++ b/doc/sphinxext/prs/11335.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8f01c9002a8ec6d89824e0cc1c6415be73ab1987", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11338.json b/doc/sphinxext/prs/11338.json new file mode 100644 index 00000000000..df9152cbefc --- /dev/null +++ b/doc/sphinxext/prs/11338.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "049295e9fd84c821db4537c9ea50efc20625b43d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 3, + "d": 0 + }, + "mne/io/ctf/markers.py": { + "a": 5, + "d": 4 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 4 + }, + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1134.json b/doc/sphinxext/prs/1134.json new file mode 100644 index 00000000000..83e977e046a --- /dev/null +++ b/doc/sphinxext/prs/1134.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f0d0211ef87171566439caf042b57401fa58a114", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11340.json b/doc/sphinxext/prs/11340.json new file mode 100644 index 00000000000..a8c43636b8e --- /dev/null +++ b/doc/sphinxext/prs/11340.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c4e624962a1908c71a2fe912a531b6783adc7987", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/curry/curry.py": { + "a": 14, + "d": 6 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11344.json b/doc/sphinxext/prs/11344.json new file mode 100644 index 00000000000..7582d818aa9 --- /dev/null +++ b/doc/sphinxext/prs/11344.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "275a9879a058cc5b9eb48a39c46c6ec1d18b6248", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Felix Klotzsche", + "e": "klotzsche@cbs.mpg.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 6, + "d": 3 + }, + "mne/io/pick.py": { + "a": 6, + "d": 6 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11345.json b/doc/sphinxext/prs/11345.json new file mode 100644 index 00000000000..4468401248f --- /dev/null +++ b/doc/sphinxext/prs/11345.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f5d9a9eddefaffeedac46a79dfd149e6923fe048", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11346.json b/doc/sphinxext/prs/11346.json new file mode 100644 index 00000000000..1180ddfce43 --- /dev/null +++ b/doc/sphinxext/prs/11346.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3812114eeea9a9ffc8449fb23b21353e878e6f25", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 36, + "d": 33 + }, + "mne/report/tests/test_report.py": { + "a": 37, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11347.json b/doc/sphinxext/prs/11347.json new file mode 100644 index 00000000000..87f4fa0d6ce --- /dev/null +++ b/doc/sphinxext/prs/11347.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "452d51d7ae5d058b04ead9f6d5995e4303e3e241", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 7, + "d": 2 + }, + "doc/conf.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11348.json b/doc/sphinxext/prs/11348.json new file mode 100644 index 00000000000..67703c61ea9 --- /dev/null +++ b/doc/sphinxext/prs/11348.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ad8b91d752c9eb577dfbb96b4158ca0cb0ab2267", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/clinical/30_ecog.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1135.json b/doc/sphinxext/prs/1135.json new file mode 100644 index 00000000000..d19f50d8c3c --- /dev/null +++ b/doc/sphinxext/prs/1135.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "770af5643eb5130c727337af195089897b14cb19", + "authors": [ + { + "n": "Saket Choudhary", + "e": "saketkc@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 5, + "d": 0 + }, + "mne/utils.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11350.json b/doc/sphinxext/prs/11350.json new file mode 100644 index 00000000000..aec70a55389 --- /dev/null +++ b/doc/sphinxext/prs/11350.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "d7bcdd526fd64680107dc98e5871fe128ec33079", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 0, + "d": 1 + }, + "doc/conf.py": { + "a": 0, + "d": 1 + }, + "doc/mri.rst": { + "a": 0, + "d": 1 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 32, + "d": 95 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 96 + }, + "mne/gui/_core.py": { + "a": 33, + "d": 33 + }, + "mne/gui/_vol_stc.py": { + "a": 0, + "d": 896 + }, + "mne/gui/tests/test_core.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_vol_stc.py": { + "a": 0, + "d": 202 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 0, + "d": 31 + }, + "mne/utils/tests/test_check.py": { + "a": 1, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11351.json b/doc/sphinxext/prs/11351.json new file mode 100644 index 00000000000..028daa3ed4a --- /dev/null +++ b/doc/sphinxext/prs/11351.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ec28281c54a9b4c7ce74c0a58a09ced1a5e580f8", + "authors": [ + { + "n": "enzo", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_proj.py": { + "a": 0, + "d": 24 + }, + "mne/tests/test_proj.py": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11353.json b/doc/sphinxext/prs/11353.json new file mode 100644 index 00000000000..b15396fc21f --- /dev/null +++ b/doc/sphinxext/prs/11353.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "73047d3df0565195a977f690773153ec8710a2f4", + "authors": [ + { + "n": "Britta Westner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 28, + "d": 0 + }, + "doc/references.bib": { + "a": 19, + "d": 0 + }, + "doc/time_frequency.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 31, + "d": 7 + }, + "mne/time_frequency/tfr.py": { + "a": 129, + "d": 15 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11354.json b/doc/sphinxext/prs/11354.json new file mode 100644 index 00000000000..47742e2738c --- /dev/null +++ b/doc/sphinxext/prs/11354.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ee64a44c569024d653f0cd4fa8ed8c6a4e0a6b76", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11356.json b/doc/sphinxext/prs/11356.json new file mode 100644 index 00000000000..4d5e57f62a6 --- /dev/null +++ b/doc/sphinxext/prs/11356.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "306b311a0a3b4ea9c353589d7647523ea0882253", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_conda.yml": { + "a": 6, + "d": 2 + }, + "doc/install/advanced.rst": { + "a": 7, + "d": 8 + }, + "doc/install/check_installation.rst": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 5, + "d": 1 + }, + "mne/conftest.py": { + "a": 5, + "d": 2 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 1, + "d": 0 + }, + "mne/utils/check.py": { + "a": 8, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 7, + "d": 0 + }, + "mne/viz/backends/_utils.py": { + "a": 54, + "d": 0 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 33, + "d": 3 + }, + "mne/viz/backends/tests/test_abstract_notebook.py": { + "a": 0, + "d": 31 + }, + "server_environment.yml": { + "a": 0, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11357.json b/doc/sphinxext/prs/11357.json new file mode 100644 index 00000000000..fb55804b651 --- /dev/null +++ b/doc/sphinxext/prs/11357.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1be103defc5e8a10773a3fe1a0ec56515920f08f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 26, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11359.json b/doc/sphinxext/prs/11359.json new file mode 100644 index 00000000000..dfe6b608e9d --- /dev/null +++ b/doc/sphinxext/prs/11359.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8f6af8068fa2281d9b6bc207bd46ae705a31eb2d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 61, + "d": 19 + }, + "mne/report/tests/test_report.py": { + "a": 42, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1136.json b/doc/sphinxext/prs/1136.json new file mode 100644 index 00000000000..695b76c1266 --- /dev/null +++ b/doc/sphinxext/prs/1136.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f1f8c2c773c028ebda4d42431e0a1bbf7bc873c1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11361.json b/doc/sphinxext/prs/11361.json new file mode 100644 index 00000000000..227463ab6e3 --- /dev/null +++ b/doc/sphinxext/prs/11361.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5346534f838ef62a5bbb1fc5388d19854e284f91", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/surface.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11363.json b/doc/sphinxext/prs/11363.json new file mode 100644 index 00000000000..83ae940025b --- /dev/null +++ b/doc/sphinxext/prs/11363.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "52590f330c3515b7871ee29063e991a5f2d79054", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11364.json b/doc/sphinxext/prs/11364.json new file mode 100644 index 00000000000..c4cee148296 --- /dev/null +++ b/doc/sphinxext/prs/11364.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4e759bd5811a7f8d46b81a26c10869e496a9f878", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11365.json b/doc/sphinxext/prs/11365.json new file mode 100644 index 00000000000..6491ffb6d69 --- /dev/null +++ b/doc/sphinxext/prs/11365.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6b2d5f04b4e52e43ec0e34b29f48034b7b931d36", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11366.json b/doc/sphinxext/prs/11366.json new file mode 100644 index 00000000000..ceacefbe783 --- /dev/null +++ b/doc/sphinxext/prs/11366.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "d7570264afaa24da658bbd996ed50c2a37b0ffcf", + "authors": [ + { + "n": "George O'Neill", + "e": null + }, + { + "n": "Robert Seymour", + "e": "robbyseymour@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 7, + "d": 0 + }, + "doc/_includes/data_formats.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/datasets.rst": { + "a": 1, + "d": 0 + }, + "doc/glossary.rst": { + "a": 20, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 13, + "d": 0 + }, + "doc/reading_raw_data.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 15, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 10, + "d": 2 + }, + "mne/datasets/ucl_opm_auditory/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py": { + "a": 32, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 48, + "d": 27 + }, + "mne/io/fil/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/io/fil/fil.py": { + "a": 298, + "d": 0 + }, + "mne/io/fil/sensors.py": { + "a": 145, + "d": 0 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 146, + "d": 0 + }, + "mne/io/tests/test_read_raw.py": { + "a": 25, + "d": 3 + }, + "mne/preprocessing/_regress.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 32, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 4, + "d": 1 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 8, + "d": 1 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 181, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11369.json b/doc/sphinxext/prs/11369.json new file mode 100644 index 00000000000..27576d63d32 --- /dev/null +++ b/doc/sphinxext/prs/11369.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6c403d34d9542174203a6764197664e21bea8279", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/pick.py": { + "a": 19, + "d": 8 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 12, + "d": 8 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11371.json b/doc/sphinxext/prs/11371.json new file mode 100644 index 00000000000..e82c07aec7a --- /dev/null +++ b/doc/sphinxext/prs/11371.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "715e9790836c1e25ac20a82feed0b68d0e227d1b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 146, + "d": 174 + }, + "mne/preprocessing/ica.py": { + "a": 20, + "d": 19 + }, + "mne/utils/docs.py": { + "a": 35, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 24, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 139, + "d": 92 + }, + "mne/viz/utils.py": { + "a": 14, + "d": 0 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11374.json b/doc/sphinxext/prs/11374.json new file mode 100644 index 00000000000..bb37a23575b --- /dev/null +++ b/doc/sphinxext/prs/11374.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "66fdc9f4af9f6eb71db8a3952ef9bb5e8cf7578d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11375.json b/doc/sphinxext/prs/11375.json new file mode 100644 index 00000000000..6025e428e90 --- /dev/null +++ b/doc/sphinxext/prs/11375.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d133a304d68b8e1aac5ebef49441e6ffa78a399b", + "authors": [ + { + "n": "Mark Henney", + "e": null + }, + { + "n": "Mark", + "e": "mah@optoceutics.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 5, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11377.json b/doc/sphinxext/prs/11377.json new file mode 100644 index 00000000000..80936e9ec3b --- /dev/null +++ b/doc/sphinxext/prs/11377.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "83f56953569853304c4f4d974090b7f54df799a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 11, + "d": 13 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11378.json b/doc/sphinxext/prs/11378.json new file mode 100644 index 00000000000..d2bb9c0aa75 --- /dev/null +++ b/doc/sphinxext/prs/11378.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "71467604d0e47a98a12364fe59e215c59a331e7a", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11379.json b/doc/sphinxext/prs/11379.json new file mode 100644 index 00000000000..ec3ed21b2cd --- /dev/null +++ b/doc/sphinxext/prs/11379.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a1fec00207a0c9b1281cb2ad1e761bd6b866a66", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11381.json b/doc/sphinxext/prs/11381.json new file mode 100644 index 00000000000..5fad315347a --- /dev/null +++ b/doc/sphinxext/prs/11381.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "654248a3ea9d5b106eaba51b9a74b93a531b3746", + "authors": [ + { + "n": "Lukas Hecker", + "e": null + } + ], + "changes": { + "doc/install/mne_tools_suite.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11384.json b/doc/sphinxext/prs/11384.json new file mode 100644 index 00000000000..3815a6913db --- /dev/null +++ b/doc/sphinxext/prs/11384.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "1c4c2504ee553e0cdb8def2bb0499b02bc93bbe3", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Jennifer Behnke", + "e": "jennifer.behnke@localhost" + }, + { + "n": "Hakimeh Aslsardroud", + "e": "hakimeh.aslsardroud@localhost" + }, + { + "n": "Pavel Navratil", + "e": "pavel.navratil@localhost" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 6, + "d": 0 + }, + "CITATION.cff": { + "a": 43, + "d": 17 + }, + "SECURITY.md": { + "a": 3, + "d": 3 + }, + "codemeta.json": { + "a": 117, + "d": 39 + }, + "doc/_static/versions.json": { + "a": 7, + "d": 2 + }, + "doc/changes/1.3.inc": { + "a": 36, + "d": 3 + }, + "doc/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/_version.py": { + "a": 1, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11387.json b/doc/sphinxext/prs/11387.json new file mode 100644 index 00000000000..ee401dca2fe --- /dev/null +++ b/doc/sphinxext/prs/11387.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "546cbafc4524a17653a57e960cbfa03088d0bdbf", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11388.json b/doc/sphinxext/prs/11388.json new file mode 100644 index 00000000000..5e5adac4332 --- /dev/null +++ b/doc/sphinxext/prs/11388.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b298e6d2710a9d55e531a0958d48e88b9b92f539", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/filter.py": { + "a": 1, + "d": 9 + }, + "mne/io/base.py": { + "a": 19, + "d": 9 + }, + "mne/tests/test_annotations.py": { + "a": 10, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11389.json b/doc/sphinxext/prs/11389.json new file mode 100644 index 00000000000..95fe56dcb9c --- /dev/null +++ b/doc/sphinxext/prs/11389.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a23c62b8e405f05a628c0fa05929b5c1fb01abc5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11390.json b/doc/sphinxext/prs/11390.json new file mode 100644 index 00000000000..bf8249ad0b6 --- /dev/null +++ b/doc/sphinxext/prs/11390.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "96a4bc2e928043a16ab23682fc818cf0a3e78aef", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11391.json b/doc/sphinxext/prs/11391.json new file mode 100644 index 00000000000..783e5fc459c --- /dev/null +++ b/doc/sphinxext/prs/11391.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e5086fcb20e891ef1c4ddc2ce2ae16047b496c5f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 7, + "d": 48 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 8 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 30 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 0, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 0, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 19, + "d": 41 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11397.json b/doc/sphinxext/prs/11397.json new file mode 100644 index 00000000000..ab8cec0cd37 --- /dev/null +++ b/doc/sphinxext/prs/11397.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9b1e7dd50846882efd62c6f02946e249585a1d8d", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/links.inc": { + "a": 1, + "d": 1 + }, + "doc/overview/roadmap.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 5, + "d": 1 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 27, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11398.json b/doc/sphinxext/prs/11398.json new file mode 100644 index 00000000000..14304261963 --- /dev/null +++ b/doc/sphinxext/prs/11398.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "147d099c4ac17e3558a758cf672bb448affeae04", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 44, + "d": 14 + }, + "mne/gui/_core.py": { + "a": 2, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 1 + }, + "mne/gui/tests/test_core.py": { + "a": 12, + "d": 30 + }, + "mne/gui/tests/test_coreg.py": { + "a": 35, + "d": 5 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 17, + "d": 12 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 18, + "d": 28 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 9 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 5, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 11, + "d": 22 + }, + "mne/viz/backends/_qt.py": { + "a": 18, + "d": 15 + }, + "mne/viz/backends/_utils.py": { + "a": 41, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 0, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tools/cirrus_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11399.json b/doc/sphinxext/prs/11399.json new file mode 100644 index 00000000000..4f90907d469 --- /dev/null +++ b/doc/sphinxext/prs/11399.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b9e0d966e564f18220b57af3835adb42ddfc3d16", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 15, + "d": 27 + }, + "mne/cov.py": { + "a": 8, + "d": 9 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 16, + "d": 9 + }, + "mne/io/proc_history.py": { + "a": 6, + "d": 4 + }, + "mne/io/proj.py": { + "a": 6, + "d": 4 + }, + "mne/io/write.py": { + "a": 21, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1140.json b/doc/sphinxext/prs/1140.json new file mode 100644 index 00000000000..65e322961d5 --- /dev/null +++ b/doc/sphinxext/prs/1140.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "22fb909b06f5f69867d241b426a9e8978f31cd9e", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "doc/source/getting_started.rst": { + "a": 113, + "d": 82 + }, + "doc/source/python_tutorial.rst": { + "a": 7, + "d": 37 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11401.json b/doc/sphinxext/prs/11401.json new file mode 100644 index 00000000000..5f52d299b38 --- /dev/null +++ b/doc/sphinxext/prs/11401.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ea88c55b183b5ed5aec793737caf30d750ac999e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11402.json b/doc/sphinxext/prs/11402.json new file mode 100644 index 00000000000..b5548ac741d --- /dev/null +++ b/doc/sphinxext/prs/11402.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4c5f722585b6637b9180b50345a7ee0bbb6942be", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/_freesurfer.py": { + "a": 33, + "d": 1 + }, + "mne/tests/test_freesurfer.py": { + "a": 37, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11405.json b/doc/sphinxext/prs/11405.json new file mode 100644 index 00000000000..c8e4d72eb36 --- /dev/null +++ b/doc/sphinxext/prs/11405.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "cc377dabd5a4e449d2bc706470a2ed16ea2339fe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 13, + "d": 6 + }, + "doc/api/datasets.rst": { + "a": 1, + "d": 0 + }, + "doc/documentation/datasets.rst": { + "a": 13, + "d": 0 + }, + "examples/datasets/kernel_phantom.py": { + "a": 103, + "d": 0 + }, + "mne/_fiff/_digitization.py": { + "a": 3, + "d": 0 + }, + "mne/bem.py": { + "a": 4, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 0 + }, + "mne/coreg.py": { + "a": 6, + "d": 4 + }, + "mne/data/helmets/Kernel_Flux.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/data/helmets/Kernel_Flux_ch_pos.txt": { + "a": 202, + "d": 0 + }, + "mne/datasets/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 8, + "d": 0 + }, + "mne/datasets/phantom_kernel/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/phantom_kernel/phantom_kernel.py": { + "a": 32, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 28, + "d": 31 + }, + "mne/surface.py": { + "a": 77, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 17, + "d": 0 + }, + "mne/transforms.py": { + "a": 52, + "d": 0 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 19, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 72, + "d": 21 + }, + "mne/viz/_brain/_brain.py": { + "a": 14, + "d": 13 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 0 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11407.json b/doc/sphinxext/prs/11407.json new file mode 100644 index 00000000000..a85cef113d2 --- /dev/null +++ b/doc/sphinxext/prs/11407.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6feb7091cf7ce7fb88325e5e9c1535370fa9c004", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/datasets/_fetch.py": { + "a": 12, + "d": 3 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11409.json b/doc/sphinxext/prs/11409.json new file mode 100644 index 00000000000..549a54588f5 --- /dev/null +++ b/doc/sphinxext/prs/11409.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "a5fa4ab637b0b59d2943d706dedce60822d01ad0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "mne/conftest.py": { + "a": 0, + "d": 5 + }, + "mne/viz/_mpl_figure.py": { + "a": 175, + "d": 87 + }, + "mne/viz/tests/test_epochs.py": { + "a": 0, + "d": 6 + }, + "mne/viz/tests/test_raw.py": { + "a": 31, + "d": 30 + }, + "mne/viz/tests/test_topomap.py": { + "a": 7, + "d": 14 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 21, + "d": 13 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 2, + "d": 2 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11412.json b/doc/sphinxext/prs/11412.json new file mode 100644 index 00000000000..9776f5f246e --- /dev/null +++ b/doc/sphinxext/prs/11412.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f6da05d18810f1a64572d341392299228220a460", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 5, + "d": 4 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11416.json b/doc/sphinxext/prs/11416.json new file mode 100644 index 00000000000..36986a885ce --- /dev/null +++ b/doc/sphinxext/prs/11416.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8b868a09a224781932cbfa6a19fd52280b417de8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11417.json b/doc/sphinxext/prs/11417.json new file mode 100644 index 00000000000..d94ca911c2b --- /dev/null +++ b/doc/sphinxext/prs/11417.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "0e607f3f0eb7c9cbc1e3c6274a214bbd454fb352", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 3, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 4, + "d": 2 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 9, + "d": 11 + }, + "mne/viz/tests/test_epochs.py": { + "a": 6, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 17, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11418.json b/doc/sphinxext/prs/11418.json new file mode 100644 index 00000000000..7e844d4c41d --- /dev/null +++ b/doc/sphinxext/prs/11418.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b573951de1a84194aff18e9a99eae9f01a3b7aa0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 5, + "d": 1 + }, + "mne/evoked.py": { + "a": 4, + "d": 1 + }, + "mne/io/base.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 11, + "d": 13 + }, + "mne/time_frequency/spectrum.py": { + "a": 0, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11419.json b/doc/sphinxext/prs/11419.json new file mode 100644 index 00000000000..4b5ed0b9d06 --- /dev/null +++ b/doc/sphinxext/prs/11419.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "36af824f9d16a6d411318bb79e736d7641f1ec14", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 37, + "d": 5 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1142.json b/doc/sphinxext/prs/1142.json new file mode 100644 index 00000000000..214c98c27ae --- /dev/null +++ b/doc/sphinxext/prs/1142.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2ee58a685d3176c852ed4b7e6167c376a1065a66", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 16, + "d": 21 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 33, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11420.json b/doc/sphinxext/prs/11420.json new file mode 100644 index 00000000000..c2e92bb8212 --- /dev/null +++ b/doc/sphinxext/prs/11420.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e50d9bc7979830d4d0f5416c2f78720741dbe582", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 12, + "d": 15 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "requirements_doc.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11421.json b/doc/sphinxext/prs/11421.json new file mode 100644 index 00000000000..9ecc166a3e1 --- /dev/null +++ b/doc/sphinxext/prs/11421.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6306265af0002e3b4a9e718c5fc3c4026fe80967", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/gui/_ieeg_locate.py": { + "a": 16, + "d": 6 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 20, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11422.json b/doc/sphinxext/prs/11422.json new file mode 100644 index 00000000000..80a047b69ce --- /dev/null +++ b/doc/sphinxext/prs/11422.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4afeceb9322a53b463d30177fe0ca3b58ffbb3b0", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11423.json b/doc/sphinxext/prs/11423.json new file mode 100644 index 00000000000..fcfa70af68e --- /dev/null +++ b/doc/sphinxext/prs/11423.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "61f83a95f6ec256c4053b0e0e50ead7c0d9f5340", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11424.json b/doc/sphinxext/prs/11424.json new file mode 100644 index 00000000000..a79c17c6850 --- /dev/null +++ b/doc/sphinxext/prs/11424.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "59125c9182804153729dfb355608457f09cb5bbd", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/tests/test_annotations.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11425.json b/doc/sphinxext/prs/11425.json new file mode 100644 index 00000000000..488fe274b02 --- /dev/null +++ b/doc/sphinxext/prs/11425.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "28ef1348cebb4ab55f2a22f64bc9c6da6e40569a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 3, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11427.json b/doc/sphinxext/prs/11427.json new file mode 100644 index 00000000000..b3c423058cd --- /dev/null +++ b/doc/sphinxext/prs/11427.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4f487feddddd9f3844aaee47e3993a2c137a7bf8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1143.json b/doc/sphinxext/prs/1143.json new file mode 100644 index 00000000000..1e9c7e2874c --- /dev/null +++ b/doc/sphinxext/prs/1143.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "65b7401f2bc78eaa1a73131b6e8070c12e3c3020", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/_compute_forward.py": { + "a": 12, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11430.json b/doc/sphinxext/prs/11430.json new file mode 100644 index 00000000000..9f400964f6e --- /dev/null +++ b/doc/sphinxext/prs/11430.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8dfb23be27536d654838377622a898b08f86cd31", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/events.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 34, + "d": 0 + }, + "mne/tests/test_event.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11432.json b/doc/sphinxext/prs/11432.json new file mode 100644 index 00000000000..0226e9a887c --- /dev/null +++ b/doc/sphinxext/prs/11432.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "1704513f46f2596f7dccbb520e30804324999409", + "authors": [ + { + "n": "Proloy Das", + "e": "proloy@umd.edu" + }, + { + "n": "pdas6", + "e": "pdas6@mgh.harvard.edu" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/reading_raw_data.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/nsx/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/nsx/nsx.py": { + "a": 535, + "d": 0 + }, + "mne/io/nsx/tests/test_nsx.py": { + "a": 282, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11435.json b/doc/sphinxext/prs/11435.json new file mode 100644 index 00000000000..863c0ecc920 --- /dev/null +++ b/doc/sphinxext/prs/11435.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "32b686c0416ca5d5751cf2e07befdfcecb6c0bcb", + "authors": [ + { + "n": "Giulio", + "e": "giulio.gabrieli@iit.it" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "agramfort@fb.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/export/_eeglab.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11436.json b/doc/sphinxext/prs/11436.json new file mode 100644 index 00000000000..70f93f22fd3 --- /dev/null +++ b/doc/sphinxext/prs/11436.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7262ad94b1c1e80d2cd63662a2f99aed49e74beb", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11437.json b/doc/sphinxext/prs/11437.json new file mode 100644 index 00000000000..cd8bfe22929 --- /dev/null +++ b/doc/sphinxext/prs/11437.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "11bc09911a2dbb6b73a86b31d8140ccd8a93b1c3", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 91, + "d": 1 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 102, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 14, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 31, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 25, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11438.json b/doc/sphinxext/prs/11438.json new file mode 100644 index 00000000000..e654c7ad693 --- /dev/null +++ b/doc/sphinxext/prs/11438.json @@ -0,0 +1,239 @@ +{ + "merge_commit_sha": "32feb72477e496663ae9dd0baccb85aab704c4f8", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/inverse/dics_source_power.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/source_space_snr.py": { + "a": 1, + "d": 1 + }, + "examples/io/elekta_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 2, + "d": 2 + }, + "mne/_ola.py": { + "a": 3, + "d": 3 + }, + "mne/baseline.py": { + "a": 2, + "d": 2 + }, + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_maxfilter.py": { + "a": 2, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 2 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 6, + "d": 6 + }, + "mne/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/epochs.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/annotate_amplitude.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/maxfilter.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/proj.py": { + "a": 3, + "d": 3 + }, + "mne/report/report.py": { + "a": 4, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 4, + "d": 4 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 4, + "d": 4 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11440.json b/doc/sphinxext/prs/11440.json new file mode 100644 index 00000000000..f602ef5fe3c --- /dev/null +++ b/doc/sphinxext/prs/11440.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "51b2b75424a239116384b3268029b3ea60e18953", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11441.json b/doc/sphinxext/prs/11441.json new file mode 100644 index 00000000000..dfa7d745b1d --- /dev/null +++ b/doc/sphinxext/prs/11441.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6f26d34593c937796c4edf9a68afb4f4b5e5361c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 15, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11445.json b/doc/sphinxext/prs/11445.json new file mode 100644 index 00000000000..a604f39fc5d --- /dev/null +++ b/doc/sphinxext/prs/11445.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1388d8440cd8347f8c86bdf2f3e91fae195d0359", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 35, + "d": 14 + }, + "mne/viz/tests/test_epochs.py": { + "a": 66, + "d": 32 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1145.json b/doc/sphinxext/prs/1145.json new file mode 100644 index 00000000000..9fceb6e8bfd --- /dev/null +++ b/doc/sphinxext/prs/1145.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "89fa95be4de530f38a19b2c9addbaec50336d8dc", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 2 + }, + "mne/gui/_marker_gui.py": { + "a": 8, + "d": 1 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 26, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11450.json b/doc/sphinxext/prs/11450.json new file mode 100644 index 00000000000..08e7b9e3807 --- /dev/null +++ b/doc/sphinxext/prs/11450.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d9964f568a02c8f9d900c84ebb8e857dfcad9844", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 68, + "d": 25 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 27, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 19, + "d": 11 + }, + "mne/utils/progressbar.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11452.json b/doc/sphinxext/prs/11452.json new file mode 100644 index 00000000000..1debfe8d711 --- /dev/null +++ b/doc/sphinxext/prs/11452.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9ca938ea6a6d1765e5e617b0efb064f1d0ea21f9", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "agramfort@fb.com" + } + ], + "changes": { + "tutorials/clinical/60_sleep.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11453.json b/doc/sphinxext/prs/11453.json new file mode 100644 index 00000000000..4d4e8ddab1a --- /dev/null +++ b/doc/sphinxext/prs/11453.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c1c45249e9af81296313dcbd682e3c00cf25df92", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11454.json b/doc/sphinxext/prs/11454.json new file mode 100644 index 00000000000..6d67a255e4e --- /dev/null +++ b/doc/sphinxext/prs/11454.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e12ba0a139fa1356933b45c07faea8cf4eaaf5af", + "authors": [ + { + "n": "pzr", + "e": "arne.pelzer@idmt.fraunhofer.de" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 9, + "d": 5 + }, + "mne/utils/misc.py": { + "a": 7, + "d": 6 + }, + "requirements_base.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11455.json b/doc/sphinxext/prs/11455.json new file mode 100644 index 00000000000..90c0716ea32 --- /dev/null +++ b/doc/sphinxext/prs/11455.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d021b91fbaa3abf3c3aafca5d826e30166421f9e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11457.json b/doc/sphinxext/prs/11457.json new file mode 100644 index 00000000000..54e00827026 --- /dev/null +++ b/doc/sphinxext/prs/11457.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5024b32782f7bf8e4dd944e57a3269c1b92d07d5", + "authors": [ + { + "n": "Zvi Baratz", + "e": "z.baratz@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11458.json b/doc/sphinxext/prs/11458.json new file mode 100644 index 00000000000..de1f1a71365 --- /dev/null +++ b/doc/sphinxext/prs/11458.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ae7ecf89f6d531e6b52ae69ea445be4a449de5e5", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Alexandre Gramfort", + "e": "agramfort@fb.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/ssd.py": { + "a": 54, + "d": 9 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11459.json b/doc/sphinxext/prs/11459.json new file mode 100644 index 00000000000..a6377ff13b5 --- /dev/null +++ b/doc/sphinxext/prs/11459.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b8df1bd8c03ccf5c6f86a1173ba82f81874ae1b6", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/ieeg/_projection.py": { + "a": 6, + "d": 14 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11465.json b/doc/sphinxext/prs/11465.json new file mode 100644 index 00000000000..9cf019d3787 --- /dev/null +++ b/doc/sphinxext/prs/11465.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3e69d028d242fa4f5504c9b6c2750dc8e37120bb", + "authors": [ + { + "n": "HY Shin", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11467.json b/doc/sphinxext/prs/11467.json new file mode 100644 index 00000000000..c1506d7cc13 --- /dev/null +++ b/doc/sphinxext/prs/11467.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "99bed94f837963bec5bafbca2a1b6a937a2126d3", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11469.json b/doc/sphinxext/prs/11469.json new file mode 100644 index 00000000000..f01c7a062cb --- /dev/null +++ b/doc/sphinxext/prs/11469.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "70119b6430afe22d480ba0f0ec39a6ee560a0c67", + "authors": [ + { + "n": "Sawradip Saha", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "agramfort@fb.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/sphinxext/unit_role.py": { + "a": 29, + "d": 0 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11470.json b/doc/sphinxext/prs/11470.json new file mode 100644 index 00000000000..433af89f1dc --- /dev/null +++ b/doc/sphinxext/prs/11470.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d740e47c8bbce81e6c0beda304748e606f20fc6c", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "janEbert", + "e": "janpublicebert@posteo.net" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 14, + "d": 12 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 11, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 0 + }, + "mne/viz/montage.py": { + "a": 5, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 34, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11471.json b/doc/sphinxext/prs/11471.json new file mode 100644 index 00000000000..3afe2c712e7 --- /dev/null +++ b/doc/sphinxext/prs/11471.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c93c848f36219040de5fb9a8b8014472724e4143", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 3, + "d": 1 + }, + ".github/workflows/compat_old.yml": { + "a": 3, + "d": 1 + }, + ".github/workflows/linux_conda.yml": { + "a": 4, + "d": 2 + }, + ".github/workflows/macos_conda.yml": { + "a": 3, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11472.json b/doc/sphinxext/prs/11472.json new file mode 100644 index 00000000000..31649b4e86f --- /dev/null +++ b/doc/sphinxext/prs/11472.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "449ad99c4cdee96772afc843f7abda7ca27bfeee", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11473.json b/doc/sphinxext/prs/11473.json new file mode 100644 index 00000000000..abb2cd73974 --- /dev/null +++ b/doc/sphinxext/prs/11473.json @@ -0,0 +1,811 @@ +{ + "merge_commit_sha": "5a97c2f0aa8afb53e63df1441fcda3508612364e", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/_freesurfer.py": { + "a": 50, + "d": 38 + }, + "mne/annotations.py": { + "a": 22, + "d": 16 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 10, + "d": 11 + }, + "mne/beamformer/tests/test_external.py": { + "a": 18, + "d": 18 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 12, + "d": 12 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 5, + "d": 6 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 13, + "d": 10 + }, + "mne/bem.py": { + "a": 36, + "d": 41 + }, + "mne/channels/channels.py": { + "a": 15, + "d": 11 + }, + "mne/channels/layout.py": { + "a": 34, + "d": 32 + }, + "mne/channels/montage.py": { + "a": 21, + "d": 20 + }, + "mne/channels/tests/test_channels.py": { + "a": 14, + "d": 18 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 9, + "d": 10 + }, + "mne/channels/tests/test_layout.py": { + "a": 22, + "d": 24 + }, + "mne/channels/tests/test_montage.py": { + "a": 49, + "d": 61 + }, + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 7, + "d": 5 + }, + "mne/commands/mne_setup_forward_model.py": { + "a": 2, + "d": 2 + }, + "mne/coreg.py": { + "a": 40, + "d": 39 + }, + "mne/cov.py": { + "a": 4, + "d": 4 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 3, + "d": 1 + }, + "mne/datasets/_infant/base.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/_phantom/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 11, + "d": 12 + }, + "mne/decoding/tests/test_csp.py": { + "a": 4, + "d": 5 + }, + "mne/decoding/tests/test_ems.py": { + "a": 5, + "d": 7 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 5, + "d": 5 + }, + "mne/dipole.py": { + "a": 9, + "d": 9 + }, + "mne/epochs.py": { + "a": 5, + "d": 4 + }, + "mne/event.py": { + "a": 15, + "d": 17 + }, + "mne/evoked.py": { + "a": 7, + "d": 5 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 1 + }, + "mne/export/_export.py": { + "a": 3, + "d": 3 + }, + "mne/export/tests/test_export.py": { + "a": 8, + "d": 9 + }, + "mne/forward/_field_interpolation.py": { + "a": 17, + "d": 7 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 4 + }, + "mne/forward/forward.py": { + "a": 53, + "d": 43 + }, + "mne/forward/tests/test_forward.py": { + "a": 23, + "d": 14 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 32, + "d": 29 + }, + "mne/gui/_core.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 20, + "d": 8 + }, + "mne/gui/tests/test_core.py": { + "a": 2, + "d": 3 + }, + "mne/gui/tests/test_coreg.py": { + "a": 46, + "d": 30 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 14, + "d": 15 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 6, + "d": 7 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 20, + "d": 13 + }, + "mne/io/array/tests/test_array.py": { + "a": 3, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 4, + "d": 2 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 11, + "d": 14 + }, + "mne/io/base.py": { + "a": 6, + "d": 5 + }, + "mne/io/boxy/boxy.py": { + "a": 2, + "d": 2 + }, + "mne/io/boxy/tests/test_boxy.py": { + "a": 32, + "d": 17 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 43, + "d": 42 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 14, + "d": 15 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 1, + "d": 3 + }, + "mne/io/ctf/ctf.py": { + "a": 6, + "d": 5 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 58, + "d": 57 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 35, + "d": 37 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 37, + "d": 17 + }, + "mne/io/eeglab/eeglab.py": { + "a": 20, + "d": 15 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 42, + "d": 43 + }, + "mne/io/egi/egi.py": { + "a": 8, + "d": 6 + }, + "mne/io/egi/egimff.py": { + "a": 15, + "d": 5 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 18, + "d": 19 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 2 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 2, + "d": 3 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 3, + "d": 3 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 19, + "d": 24 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 43, + "d": 40 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 4, + "d": 7 + }, + "mne/io/hitachi/hitachi.py": { + "a": 3, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 31, + "d": 23 + }, + "mne/io/kit/kit.py": { + "a": 63, + "d": 136 + }, + "mne/io/kit/tests/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 4, + "d": 10 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 36, + "d": 36 + }, + "mne/io/nedf/nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 1, + "d": 3 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 4, + "d": 6 + }, + "mne/io/nihon/nihon.py": { + "a": 3, + "d": 4 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 32, + "d": 27 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 21, + "d": 23 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 2 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 55, + "d": 25 + }, + "mne/io/tests/test_compensator.py": { + "a": 8, + "d": 7 + }, + "mne/io/tests/test_constants.py": { + "a": 8, + "d": 11 + }, + "mne/io/tests/test_meas_info.py": { + "a": 23, + "d": 21 + }, + "mne/io/tests/test_pick.py": { + "a": 26, + "d": 27 + }, + "mne/io/tests/test_proc_history.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_reference.py": { + "a": 8, + "d": 9 + }, + "mne/io/tests/test_show_fiff.py": { + "a": 5, + "d": 5 + }, + "mne/io/tests/test_what.py": { + "a": 7, + "d": 7 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 31, + "d": 28 + }, + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 47, + "d": 53 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 7, + "d": 8 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 12, + "d": 9 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 8, + "d": 7 + }, + "mne/morph.py": { + "a": 14, + "d": 10 + }, + "mne/morph_map.py": { + "a": 11, + "d": 12 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/_regress.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 5, + "d": 7 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 51, + "d": 35 + }, + "mne/preprocessing/maxwell.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 12, + "d": 12 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 12, + "d": 9 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 6, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 12, + "d": 9 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/tests/test_annotate_nan.py": { + "a": 8, + "d": 3 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 6, + "d": 5 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 6, + "d": 9 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 28, + "d": 29 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 5, + "d": 6 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 112, + "d": 98 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 4, + "d": 6 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 5, + "d": 4 + }, + "mne/proj.py": { + "a": 2, + "d": 2 + }, + "mne/report/report.py": { + "a": 15, + "d": 8 + }, + "mne/report/tests/test_report.py": { + "a": 172, + "d": 146 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 25, + "d": 9 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 1, + "d": 5 + }, + "mne/simulation/tests/test_raw.py": { + "a": 30, + "d": 21 + }, + "mne/simulation/tests/test_source.py": { + "a": 9, + "d": 9 + }, + "mne/source_estimate.py": { + "a": 47, + "d": 44 + }, + "mne/source_space.py": { + "a": 31, + "d": 15 + }, + "mne/stats/tests/test_regression.py": { + "a": 3, + "d": 6 + }, + "mne/surface.py": { + "a": 10, + "d": 11 + }, + "mne/tests/test_annotations.py": { + "a": 14, + "d": 17 + }, + "mne/tests/test_bem.py": { + "a": 21, + "d": 21 + }, + "mne/tests/test_chpi.py": { + "a": 35, + "d": 30 + }, + "mne/tests/test_coreg.py": { + "a": 130, + "d": 74 + }, + "mne/tests/test_cov.py": { + "a": 10, + "d": 13 + }, + "mne/tests/test_dipole.py": { + "a": 31, + "d": 30 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 5, + "d": 7 + }, + "mne/tests/test_epochs.py": { + "a": 65, + "d": 70 + }, + "mne/tests/test_event.py": { + "a": 15, + "d": 15 + }, + "mne/tests/test_evoked.py": { + "a": 12, + "d": 16 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 5 + }, + "mne/tests/test_freesurfer.py": { + "a": 14, + "d": 13 + }, + "mne/tests/test_label.py": { + "a": 75, + "d": 80 + }, + "mne/tests/test_misc.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_morph.py": { + "a": 35, + "d": 33 + }, + "mne/tests/test_morph_map.py": { + "a": 9, + "d": 10 + }, + "mne/tests/test_proj.py": { + "a": 32, + "d": 31 + }, + "mne/tests/test_rank.py": { + "a": 13, + "d": 13 + }, + "mne/tests/test_read_vectorview_selection.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 92, + "d": 74 + }, + "mne/tests/test_source_space.py": { + "a": 40, + "d": 31 + }, + "mne/tests/test_surface.py": { + "a": 33, + "d": 41 + }, + "mne/tests/test_transforms.py": { + "a": 30, + "d": 27 + }, + "mne/time_frequency/csd.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 8, + "d": 3 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 10, + "d": 10 + }, + "mne/transforms.py": { + "a": 26, + "d": 16 + }, + "mne/utils/check.py": { + "a": 29, + "d": 22 + }, + "mne/utils/config.py": { + "a": 16, + "d": 15 + }, + "mne/utils/docs.py": { + "a": 57, + "d": 7 + }, + "mne/utils/tests/test_check.py": { + "a": 9, + "d": 9 + }, + "mne/utils/tests/test_config.py": { + "a": 3, + "d": 3 + }, + "mne/utils/tests/test_logging.py": { + "a": 8, + "d": 9 + }, + "mne/utils/tests/test_numerics.py": { + "a": 8, + "d": 9 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 5, + "d": 5 + }, + "mne/utils/tests/test_testing.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 46, + "d": 35 + }, + "mne/viz/_brain/_brain.py": { + "a": 14, + "d": 3 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 46, + "d": 39 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 13, + "d": 11 + }, + "mne/viz/_dipole.py": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 24, + "d": 18 + }, + "mne/viz/tests/test_3d.py": { + "a": 40, + "d": 36 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_evoked.py": { + "a": 8, + "d": 9 + }, + "mne/viz/tests/test_ica.py": { + "a": 7, + "d": 7 + }, + "mne/viz/tests/test_misc.py": { + "a": 19, + "d": 14 + }, + "mne/viz/tests/test_montage.py": { + "a": 10, + "d": 8 + }, + "mne/viz/tests/test_topo.py": { + "a": 7, + "d": 7 + }, + "mne/viz/tests/test_topomap.py": { + "a": 12, + "d": 12 + }, + "mne/viz/tests/test_utils.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11475.json b/doc/sphinxext/prs/11475.json new file mode 100644 index 00000000000..ff1e809f3fc --- /dev/null +++ b/doc/sphinxext/prs/11475.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0dea13c2b3e6e9e90dbb5677be442eaf1203a180", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/ssd.py": { + "a": 40, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11478.json b/doc/sphinxext/prs/11478.json new file mode 100644 index 00000000000..6ba66b32fc2 --- /dev/null +++ b/doc/sphinxext/prs/11478.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "62c5e42a4f5980576fe6a6ceca49c3e0aadd35a0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "mne/conftest.py": { + "a": 3, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11479.json b/doc/sphinxext/prs/11479.json new file mode 100644 index 00000000000..b67fdde1083 --- /dev/null +++ b/doc/sphinxext/prs/11479.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "dfdc5a61333495a575deedf8e773113e1e42a6cf", + "authors": [ + { + "n": "tomdstone", + "e": "77251489+tomdstone@users.noreply.github.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "Stone", + "e": "TASTONE@mgh.harvard.edu" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11485.json b/doc/sphinxext/prs/11485.json new file mode 100644 index 00000000000..7d65ba43586 --- /dev/null +++ b/doc/sphinxext/prs/11485.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4ee23004264b63ba2861ca70a4c777aee141de97", + "authors": [ + { + "n": "Zvi Baratz", + "e": "z.baratz@gmail.com" + } + ], + "changes": { + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11488.json b/doc/sphinxext/prs/11488.json new file mode 100644 index 00000000000..73055986cab --- /dev/null +++ b/doc/sphinxext/prs/11488.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ff48beee8f9da3493fc99c41c81e9baeb053f516", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11489.json b/doc/sphinxext/prs/11489.json new file mode 100644 index 00000000000..68612ab1402 --- /dev/null +++ b/doc/sphinxext/prs/11489.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "80dc73463d13a68cb0f7b6d13b993b03e07a57ac", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/gui/_core.py": { + "a": 14, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11491.json b/doc/sphinxext/prs/11491.json new file mode 100644 index 00000000000..8f00839ea3c --- /dev/null +++ b/doc/sphinxext/prs/11491.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "18474b1d9227778ae875692a56f7e0927e856a54", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11493.json b/doc/sphinxext/prs/11493.json new file mode 100644 index 00000000000..9a8930ddb08 --- /dev/null +++ b/doc/sphinxext/prs/11493.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0cafb670d2285ce6a96627429c040b2ad9e4a60e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "tutorials/simulation/10_array_objs.py": { + "a": 47, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11496.json b/doc/sphinxext/prs/11496.json new file mode 100644 index 00000000000..5d2a462ac80 --- /dev/null +++ b/doc/sphinxext/prs/11496.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "824259f5633b49c136bffd42dea1e1a2cc119882", + "authors": [ + { + "n": "Zvi Baratz", + "e": "z.baratz@gmail.com" + } + ], + "changes": { + "examples/simulation/simulate_evoked_data.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11497.json b/doc/sphinxext/prs/11497.json new file mode 100644 index 00000000000..f188f3b5e14 --- /dev/null +++ b/doc/sphinxext/prs/11497.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2cf07038124f210d8b3f969d7657f52643fcc1f9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11498.json b/doc/sphinxext/prs/11498.json new file mode 100644 index 00000000000..7bcaa5bc775 --- /dev/null +++ b/doc/sphinxext/prs/11498.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "03961e4f5f82fd9529d84ef71c9ee3c74132f977", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11499.json b/doc/sphinxext/prs/11499.json new file mode 100644 index 00000000000..eba572f5491 --- /dev/null +++ b/doc/sphinxext/prs/11499.json @@ -0,0 +1,299 @@ +{ + "merge_commit_sha": "603a96da58ddba3c2c05d3a01639abd669cc9543", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 4, + "d": 4 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_anonymize.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 4, + "d": 3 + }, + "mne/cuda.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 6, + "d": 5 + }, + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/filter.py": { + "a": 5, + "d": 5 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/besa/besa.py": { + "a": 5, + "d": 5 + }, + "mne/io/boxy/boxy.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 6, + "d": 6 + }, + "mne/io/bti/bti.py": { + "a": 6, + "d": 6 + }, + "mne/io/cnt/cnt.py": { + "a": 23, + "d": 23 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 8, + "d": 8 + }, + "mne/io/edf/edf.py": { + "a": 27, + "d": 26 + }, + "mne/io/eeglab/eeglab.py": { + "a": 6, + "d": 6 + }, + "mne/io/egi/egimff.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/events.py": { + "a": 2, + "d": 2 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 2 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/fil/fil.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 7, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 5 + }, + "mne/io/nedf/nedf.py": { + "a": 2, + "d": 2 + }, + "mne/io/nicolet/nicolet.py": { + "a": 16, + "d": 16 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/io/what.py": { + "a": 2, + "d": 2 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/misc.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 9, + "d": 8 + }, + "mne/parallel.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/maxfilter.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 2 + }, + "mne/proj.py": { + "a": 10, + "d": 10 + }, + "mne/rank.py": { + "a": 4, + "d": 4 + }, + "mne/simulation/metrics/metrics.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/source.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 13, + "d": 15 + }, + "mne/source_space.py": { + "a": 15, + "d": 14 + }, + "mne/surface.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 6 + }, + "mne/transforms.py": { + "a": 3, + "d": 3 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1150.json b/doc/sphinxext/prs/1150.json new file mode 100644 index 00000000000..a1ed3bef24f --- /dev/null +++ b/doc/sphinxext/prs/1150.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "05af70db83b9e9f78dfc7b0ce9af0e471e138a2b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11500.json b/doc/sphinxext/prs/11500.json new file mode 100644 index 00000000000..58865e51b07 --- /dev/null +++ b/doc/sphinxext/prs/11500.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0efe50b0e9ad44f863bd1e1aac145924f4c0bbfb", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 66, + "d": 21 + }, + "mne/channels/tests/test_layout.py": { + "a": 32, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11502.json b/doc/sphinxext/prs/11502.json new file mode 100644 index 00000000000..5cce1fc7f25 --- /dev/null +++ b/doc/sphinxext/prs/11502.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0e305ee75fed512e8af6eba33bf5570652fa5069", + "authors": [ + { + "n": "Moritz Gerster", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 10, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 49, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11505.json b/doc/sphinxext/prs/11505.json new file mode 100644 index 00000000000..11d2817e6db --- /dev/null +++ b/doc/sphinxext/prs/11505.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6d3e41f888a72f99b436336e3dbf9dd39d1996a4", + "authors": [ + { + "n": "nmri-nfocke", + "e": "114056301+nmri-nfocke@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/egi/events.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11506.json b/doc/sphinxext/prs/11506.json new file mode 100644 index 00000000000..a187a43d836 --- /dev/null +++ b/doc/sphinxext/prs/11506.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ae0b34c08d68b7641a82428285e094267a6ac01c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/1.3.inc": { + "a": 9, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1151.json b/doc/sphinxext/prs/1151.json new file mode 100644 index 00000000000..ec4854c0966 --- /dev/null +++ b/doc/sphinxext/prs/1151.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "470ce0a4bf5e35c8699725917288306bdd9ad06d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11512.json b/doc/sphinxext/prs/11512.json new file mode 100644 index 00000000000..edc9d2c0362 --- /dev/null +++ b/doc/sphinxext/prs/11512.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e7d5226ae2599685112631e8ca5f203cf031d2c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/3d_to_2d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11517.json b/doc/sphinxext/prs/11517.json new file mode 100644 index 00000000000..07b0549675f --- /dev/null +++ b/doc/sphinxext/prs/11517.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3bf04edf744da89b1bb4f84cde5537758ed00865", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 8, + "d": 1 + }, + "mne/_version.py": { + "a": 0, + "d": 6 + }, + "requirements_base.txt": { + "a": 2, + "d": 1 + }, + "setup.py": { + "a": 5, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11518.json b/doc/sphinxext/prs/11518.json new file mode 100644 index 00000000000..cf8f8d3e1d7 --- /dev/null +++ b/doc/sphinxext/prs/11518.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "866761249fa2b7fab7e6ce198d5226ebbeaacc9d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_conda.yml": { + "a": 3, + "d": 1 + }, + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1152.json b/doc/sphinxext/prs/1152.json new file mode 100644 index 00000000000..a4561d3ab08 --- /dev/null +++ b/doc/sphinxext/prs/1152.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e6b948dbd4afb4fd4f129e822f16f5d60a13bc92", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/index.rst": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11521.json b/doc/sphinxext/prs/11521.json new file mode 100644 index 00000000000..6417c34296e --- /dev/null +++ b/doc/sphinxext/prs/11521.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "eb54bde2cc53f94b9cd253987937e3510a81f89f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 15, + "d": 3 + }, + "mne/io/tests/test_read_raw.py": { + "a": 25, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1153.json b/doc/sphinxext/prs/1153.json new file mode 100644 index 00000000000..181fbef653a --- /dev/null +++ b/doc/sphinxext/prs/1153.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f8f7137f88eb8ba6e62ed0f03fda46b87b6afb0c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/index.rst": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11536.json b/doc/sphinxext/prs/11536.json new file mode 100644 index 00000000000..9bfd58d69cc --- /dev/null +++ b/doc/sphinxext/prs/11536.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "295b7c7ee90ea341dab2cc497f7ec6d5eceb2770", + "authors": [ + { + "n": "George O'Neill", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 23, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 1 + }, + "mne/io/pick.py": { + "a": 5, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 16, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/hfc.py": { + "a": 100, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 4 + }, + "mne/preprocessing/tests/test_hfc.py": { + "a": 149, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 64, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11537.json b/doc/sphinxext/prs/11537.json new file mode 100644 index 00000000000..ce65a8949b0 --- /dev/null +++ b/doc/sphinxext/prs/11537.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fa618df07286e6be8177001bd23a91d76995e085", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11538.json b/doc/sphinxext/prs/11538.json new file mode 100644 index 00000000000..5dcb3e64905 --- /dev/null +++ b/doc/sphinxext/prs/11538.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "43979006d6236d3c9da27fdbe233f9b6fa450f04", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1154.json b/doc/sphinxext/prs/1154.json new file mode 100644 index 00000000000..5e74a0a8500 --- /dev/null +++ b/doc/sphinxext/prs/1154.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c23486285c2daa9f831c25a64444b5383449435e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/index.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11541.json b/doc/sphinxext/prs/11541.json new file mode 100644 index 00000000000..9e8bf952f29 --- /dev/null +++ b/doc/sphinxext/prs/11541.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "8325f8082afd1d0d836585e77e01fcf600fd348c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 0, + "d": 45 + }, + ".github/workflows/precommit.yml": { + "a": 14, + "d": 0 + }, + ".gitignore": { + "a": 1, + "d": 1 + }, + ".pre-commit-config.yaml": { + "a": 36, + "d": 0 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "Makefile": { + "a": 6, + "d": 89 + }, + "azure-pipelines.yml": { + "a": 5, + "d": 14 + }, + "doc/install/contributing.rst": { + "a": 10, + "d": 6 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 1, + "d": 2 + }, + "examples/visualization/topo_customized.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 2, + "d": 2 + }, + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 4, + "d": 1 + }, + "mne/utils/check.py": { + "a": 1, + "d": 0 + }, + "pyproject.toml": { + "a": 50, + "d": 0 + }, + "requirements_testing.txt": { + "a": 2, + "d": 3 + }, + "setup.cfg": { + "a": 0, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11547.json b/doc/sphinxext/prs/11547.json new file mode 100644 index 00000000000..95b7ea41d56 --- /dev/null +++ b/doc/sphinxext/prs/11547.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bf2502166eb15626c1205accc2d2d467535b8d93", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/0.24.inc": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11549.json b/doc/sphinxext/prs/11549.json new file mode 100644 index 00000000000..ea33954d5a0 --- /dev/null +++ b/doc/sphinxext/prs/11549.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "21efb09059c03c6bd964d5e01a5833c05a8865c7", + "authors": [ + { + "n": "Simon Kern", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 64, + "d": 17 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 46, + "d": 1 + }, + "mne/io/utils.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11550.json b/doc/sphinxext/prs/11550.json new file mode 100644 index 00000000000..a9554fc32ed --- /dev/null +++ b/doc/sphinxext/prs/11550.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "132d1da7a3bebbb265fc5b23c440abacbfd33de2", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/compat_old.yml": { + "a": 3, + "d": 3 + }, + "README.rst": { + "a": 3, + "d": 2 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/install/manual_install_python.rst": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 7 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "requirements_base.txt": { + "a": 1, + "d": 2 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11555.json b/doc/sphinxext/prs/11555.json new file mode 100644 index 00000000000..d9667aad75a --- /dev/null +++ b/doc/sphinxext/prs/11555.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ecae0576c838f18d3ceae00e2268412356580135", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 28, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11556.json b/doc/sphinxext/prs/11556.json new file mode 100644 index 00000000000..c92769b3f3c --- /dev/null +++ b/doc/sphinxext/prs/11556.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c461bad33dc953048f895ecc3aaea69208181024", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 15, + "d": 4 + }, + "mne/time_frequency/spectrum.py": { + "a": 11, + "d": 4 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 69, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11557.json b/doc/sphinxext/prs/11557.json new file mode 100644 index 00000000000..99ff93eb7a3 --- /dev/null +++ b/doc/sphinxext/prs/11557.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "29e99c85da5e192a402528ffc5500b17d0281334", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 19, + "d": 11 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_base.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 13, + "d": 153 + }, + "mne/surface.py": { + "a": 13, + "d": 33 + }, + "mne/tests/test_annotations.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 0 + }, + "mne/utils/check.py": { + "a": 0, + "d": 6 + }, + "mne/utils/numerics.py": { + "a": 2, + "d": 3 + }, + "mne/utils/tests/test_linalg.py": { + "a": 2, + "d": 8 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 3 + }, + "mne/viz/_brain/_scraper.py": { + "a": 1, + "d": 6 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 2 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11558.json b/doc/sphinxext/prs/11558.json new file mode 100644 index 00000000000..d78fde710b7 --- /dev/null +++ b/doc/sphinxext/prs/11558.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f628e191912066ca375f920e5a92a9674d85d804", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 10, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1156.json b/doc/sphinxext/prs/1156.json new file mode 100644 index 00000000000..004d811dd62 --- /dev/null +++ b/doc/sphinxext/prs/1156.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3adb1b81108156fc78d299a1707af7215c95498e", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/epochs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11562.json b/doc/sphinxext/prs/11562.json new file mode 100644 index 00000000000..a88e83c5fb6 --- /dev/null +++ b/doc/sphinxext/prs/11562.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7fb3ef2338b9e4684b03d7f171ef62ad2ffc9c53", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 18, + "d": 12 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 7, + "d": 5 + }, + "mne/tests/test_import_nesting.py": { + "a": 10, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11563.json b/doc/sphinxext/prs/11563.json new file mode 100644 index 00000000000..5ccb497fd31 --- /dev/null +++ b/doc/sphinxext/prs/11563.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "2bd0c3ee25606fc1e755fe589b0cc6bf409d3f84", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "examples/datasets/brainstorm_data.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 2, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 5, + "d": 4 + }, + "mne/report/report.py": { + "a": 13, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 3, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 9, + "d": 46 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 15, + "d": 0 + }, + "mne/utils/spectrum.py": { + "a": 63, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 11 + }, + "mne/viz/raw.py": { + "a": 6, + "d": 14 + }, + "mne/viz/tests/test_epochs.py": { + "a": 18, + "d": 16 + }, + "mne/viz/tests/test_raw.py": { + "a": 25, + "d": 31 + }, + "mne/viz/topomap.py": { + "a": 12, + "d": 10 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 6, + "d": 4 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/15_inplace.py": { + "a": 3, + "d": 3 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 3, + "d": 5 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 9, + "d": 6 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 6, + "d": 8 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11566.json b/doc/sphinxext/prs/11566.json new file mode 100644 index 00000000000..13634b7ead3 --- /dev/null +++ b/doc/sphinxext/prs/11566.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6384a8901182272c48f3e72a10142ea75184f47f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 2, + "d": 1 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11567.json b/doc/sphinxext/prs/11567.json new file mode 100644 index 00000000000..db9861782b7 --- /dev/null +++ b/doc/sphinxext/prs/11567.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "5ca0141f6bd9db9be98e94e7362e2103471487d2", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/mri.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/locate_ieeg_micro.py": { + "a": 94, + "d": 0 + }, + "mne/conftest.py": { + "a": 9, + "d": 8 + }, + "mne/gui/__init__.py": { + "a": 7, + "d": 5 + }, + "mne/gui/_core.py": { + "a": 152, + "d": 101 + }, + "mne/gui/_ieeg_locate.py": { + "a": 37, + "d": 17 + }, + "mne/gui/tests/test_core.py": { + "a": 6, + "d": 3 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 15, + "d": 10 + }, + "mne/surface.py": { + "a": 10, + "d": 5 + }, + "mne/tests/test_transforms.py": { + "a": 12, + "d": 0 + }, + "mne/transforms.py": { + "a": 83, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 4, + "d": 3 + }, + "mne/viz/backends/_utils.py": { + "a": 4, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11568.json b/doc/sphinxext/prs/11568.json new file mode 100644 index 00000000000..3cbdb7e1fdf --- /dev/null +++ b/doc/sphinxext/prs/11568.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "417a9c4e17e28f613e45e6ce45c24cafb7152b25", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 5, + "d": 5 + }, + "mne/commands/mne_sys_info.py": { + "a": 8, + "d": 1 + }, + "mne/utils/config.py": { + "a": 76, + "d": 67 + }, + "requirements_base.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11569.json b/doc/sphinxext/prs/11569.json new file mode 100644 index 00000000000..2699d163c41 --- /dev/null +++ b/doc/sphinxext/prs/11569.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5238cf7ccba5b5c14caaa76904dfacc22919cdcb", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 8, + "d": 4 + }, + "mne/export/tests/test_export.py": { + "a": 18, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11572.json b/doc/sphinxext/prs/11572.json new file mode 100644 index 00000000000..354272a1468 --- /dev/null +++ b/doc/sphinxext/prs/11572.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "df2cf84d8f4ed22aa897b78df9182984c499d8cb", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/0.24.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/mri.rst": { + "a": 0, + "d": 1 + }, + "doc/preprocessing.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ieeg/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 219, + "d": 0 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/ieeg/tests/test_volume.py": { + "a": 105, + "d": 0 + }, + "mne/surface.py": { + "a": 7, + "d": 14 + }, + "mne/tests/test_surface.py": { + "a": 20, + "d": 21 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 13, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11574.json b/doc/sphinxext/prs/11574.json new file mode 100644 index 00000000000..e2102642160 --- /dev/null +++ b/doc/sphinxext/prs/11574.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e6b025309b0dfc5c6881dceaf398b4be37125a57", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_spectrum.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11575.json b/doc/sphinxext/prs/11575.json new file mode 100644 index 00000000000..07eacba2060 --- /dev/null +++ b/doc/sphinxext/prs/11575.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "52a42e2d5993232ce268155a8c002428ff046cae", + "authors": [ + { + "n": "Joshua Calder-Travis", + "e": null + } + ], + "changes": { + "mne/stats/tests/test_permutations.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11577.json b/doc/sphinxext/prs/11577.json new file mode 100644 index 00000000000..e09cee6153a --- /dev/null +++ b/doc/sphinxext/prs/11577.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "367ed0ad708681c787b2088a4050352e46837c8d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 5, + "d": 5 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 13, + "d": 12 + }, + "mne/utils/tests/test_config.py": { + "a": 14, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11578.json b/doc/sphinxext/prs/11578.json new file mode 100644 index 00000000000..d4bbd457704 --- /dev/null +++ b/doc/sphinxext/prs/11578.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "6ccd1123ed10cdda2a34d0d4104fbad8f9bae23f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 2 + }, + "mne/_freesurfer.py": { + "a": 7, + "d": 22 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 6, + "d": 6 + }, + "mne/commands/tests/test_commands.py": { + "a": 4, + "d": 3 + }, + "mne/coreg.py": { + "a": 2, + "d": 6 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 5, + "d": 4 + }, + "mne/gui/_core.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_core.py": { + "a": 3, + "d": 3 + }, + "mne/gui/tests/test_coreg.py": { + "a": 3, + "d": 0 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 2, + "d": 1 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 3, + "d": 5 + }, + "mne/morph.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 2, + "d": 3 + }, + "mne/report/tests/test_report.py": { + "a": 7, + "d": 6 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 5 + }, + "mne/source_space.py": { + "a": 2, + "d": 6 + }, + "mne/surface.py": { + "a": 2, + "d": 5 + }, + "mne/tests/test_bem.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_coreg.py": { + "a": 15, + "d": 14 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_freesurfer.py": { + "a": 5, + "d": 6 + }, + "mne/tests/test_label.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_morph.py": { + "a": 15, + "d": 15 + }, + "mne/tests/test_morph_map.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 9, + "d": 9 + }, + "mne/tests/test_source_space.py": { + "a": 11, + "d": 8 + }, + "mne/tests/test_surface.py": { + "a": 4, + "d": 6 + }, + "mne/tests/test_transforms.py": { + "a": 2, + "d": 4 + }, + "mne/transforms.py": { + "a": 4, + "d": 4 + }, + "mne/utils/__init__.py": { + "a": 4, + "d": 4 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 45 + }, + "mne/utils/check.py": { + "a": 9, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 11, + "d": 4 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 7, + "d": 8 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11582.json b/doc/sphinxext/prs/11582.json new file mode 100644 index 00000000000..92b52d00f04 --- /dev/null +++ b/doc/sphinxext/prs/11582.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "b921c8904ac752061a4698132d4da0d3164278c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 4 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 5, + "d": 0 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 2, + "d": 9 + }, + "mne/label.py": { + "a": 2, + "d": 10 + }, + "mne/morph.py": { + "a": 0, + "d": 4 + }, + "mne/report/report.py": { + "a": 1, + "d": 6 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 2, + "d": 5 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 7 + }, + "mne/simulation/tests/test_source.py": { + "a": 0, + "d": 7 + }, + "mne/source_space.py": { + "a": 2, + "d": 8 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 2, + "d": 6 + }, + "mne/stats/tests/test_permutations.py": { + "a": 0, + "d": 5 + }, + "mne/tests/test_coreg.py": { + "a": 13, + "d": 14 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 10 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 14 + }, + "mne/tests/test_source_space.py": { + "a": 7, + "d": 18 + }, + "mne/utils/check.py": { + "a": 6, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11583.json b/doc/sphinxext/prs/11583.json new file mode 100644 index 00000000000..bb0b8ae74f1 --- /dev/null +++ b/doc/sphinxext/prs/11583.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1033764b80ef6154d8905b7b01113079991d78c8", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 0, + "d": 7 + }, + "mne/simulation/metrics/metrics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11584.json b/doc/sphinxext/prs/11584.json new file mode 100644 index 00000000000..3bde787a515 --- /dev/null +++ b/doc/sphinxext/prs/11584.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9475b278dedcaef42ba129058cd9bc7a6f18c4ac", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11585.json b/doc/sphinxext/prs/11585.json new file mode 100644 index 00000000000..8f7cf94bf28 --- /dev/null +++ b/doc/sphinxext/prs/11585.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "51e0b2553a2757a822d0f10f42c236ab4e6faf23", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11586.json b/doc/sphinxext/prs/11586.json new file mode 100644 index 00000000000..dc06a04a823 --- /dev/null +++ b/doc/sphinxext/prs/11586.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a9a5866ba6aa35c877d7188af625a880980fc0b", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "doc/_static/institution_logos/CEA.png": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11587.json b/doc/sphinxext/prs/11587.json new file mode 100644 index 00000000000..afaa6da6b9b --- /dev/null +++ b/doc/sphinxext/prs/11587.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7d389814213b1f2c9e67c5af4049ee1b07654744", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1159.json b/doc/sphinxext/prs/1159.json new file mode 100644 index 00000000000..2d6f1ef6f06 --- /dev/null +++ b/doc/sphinxext/prs/1159.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9dbda572f0184f83eeb06ae276fd256337c420de", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 5, + "d": 6 + }, + "mne/gui/_file_traits.py": { + "a": 147, + "d": 59 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 0, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11591.json b/doc/sphinxext/prs/11591.json new file mode 100644 index 00000000000..1a80a359fa4 --- /dev/null +++ b/doc/sphinxext/prs/11591.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "17f0dab46d0bd213965cdd222150eba973d32de0", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/boxy/boxy.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 4 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 2 + }, + "mne/io/fil/fil.py": { + "a": 2, + "d": 2 + }, + "mne/io/hitachi/hitachi.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/nedf/nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 2 + }, + "mne/io/nihon/nihon.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 2 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11592.json b/doc/sphinxext/prs/11592.json new file mode 100644 index 00000000000..29bcd847eca --- /dev/null +++ b/doc/sphinxext/prs/11592.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "1a339ee065afab491cc950fea45ff39552985c58", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/boxy/boxy.py": { + "a": 2, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 5, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 7, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 2, + "d": 1 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 1 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 5, + "d": 0 + }, + "mne/io/fil/fil.py": { + "a": 4, + "d": 2 + }, + "mne/io/hitachi/hitachi.py": { + "a": 2, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/nedf/nedf.py": { + "a": 2, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 2, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11594.json b/doc/sphinxext/prs/11594.json new file mode 100644 index 00000000000..47de7e418ad --- /dev/null +++ b/doc/sphinxext/prs/11594.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3d9af0fe6ecc0444932338ab083f06b194a5d9f3", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + ".github/config.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11595.json b/doc/sphinxext/prs/11595.json new file mode 100644 index 00000000000..ba991d3b5de --- /dev/null +++ b/doc/sphinxext/prs/11595.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c4c9347e3ec6ee5b52f3470120fee68867277222", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11596.json b/doc/sphinxext/prs/11596.json new file mode 100644 index 00000000000..54c717987df --- /dev/null +++ b/doc/sphinxext/prs/11596.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b419e03e36bae756af00f90c2c1a3cd6d7c20e1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/preprocessing/contralateral_referencing.py": { + "a": 71, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11599.json b/doc/sphinxext/prs/11599.json new file mode 100644 index 00000000000..97db8f50c25 --- /dev/null +++ b/doc/sphinxext/prs/11599.json @@ -0,0 +1,1315 @@ +{ + "merge_commit_sha": "623895d0fd8894196ca638ad10ddccc98d488b4c", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 0, + "d": 2 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 0, + "d": 2 + }, + "doc/sphinxext/gen_commands.py": { + "a": 0, + "d": 2 + }, + "doc/sphinxext/gen_names.py": { + "a": 0, + "d": 2 + }, + "doc/sphinxext/gh_substitutions.py": { + "a": 0, + "d": 2 + }, + "doc/sphinxext/newcontrib_substitutions.py": { + "a": 0, + "d": 2 + }, + "examples/datasets/brainstorm_data.py": { + "a": 0, + "d": 1 + }, + "examples/datasets/hf_sef_data.py": { + "a": 0, + "d": 1 + }, + "examples/datasets/limo_data.py": { + "a": 0, + "d": 1 + }, + "examples/datasets/opm_data.py": { + "a": 0, + "d": 1 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/ems_filtering.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 0, + "d": 1 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 0, + "d": 1 + }, + "examples/forward/left_cerebellum_volume_source.py": { + "a": 0, + "d": 1 + }, + "examples/forward/source_space_morphing.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/compute_mne_inverse_epochs_in_label.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/dics_epochs.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/dics_source_power.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/label_from_stc.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/label_source_activations.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/mne_cov_power.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/morph_surface_stc.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/multi_dipole_model.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/psf_volume.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/rap_music.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/read_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/read_stc.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/resolution_metrics.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/resolution_metrics_eegmeg.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/snr_estimate.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/source_space_snr.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 0, + "d": 1 + }, + "examples/io/elekta_epochs.py": { + "a": 0, + "d": 1 + }, + "examples/io/read_neo_format.py": { + "a": 0, + "d": 1 + }, + "examples/io/read_noise_covariance_matrix.py": { + "a": 0, + "d": 1 + }, + "examples/io/read_xdf.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/css.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/define_target_events.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/eog_regression.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/fnirs_artifact_removal.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/interpolate_bad_channels.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/locate_ieeg_micro.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/movement_compensation.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/movement_detection.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/muscle_detection.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/otp.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/virtual_evoked.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 0, + "d": 1 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 0, + "d": 1 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 0, + "d": 1 + }, + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 0, + "d": 1 + }, + "examples/simulation/source_simulator.py": { + "a": 0, + "d": 1 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 0, + "d": 1 + }, + "examples/stats/fdr_stats_evoked.py": { + "a": 0, + "d": 1 + }, + "examples/stats/linear_regression_raw.py": { + "a": 0, + "d": 1 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 0, + "d": 1 + }, + "examples/stats/sensor_regression.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/compute_csd.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/compute_source_psd_epochs.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/3d_to_2d.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/brain.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/channel_epochs_image.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/evoked_topomap.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/evoked_whitening.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/meg_sensors.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/mne_helmet.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/montage_sgskip.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/parcellation.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/publication_figure.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/ssp_projs_sensitivity_map.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/topo_customized.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/xhemi.py": { + "a": 0, + "d": 1 + }, + "logo/generate_mne_logos.py": { + "a": 0, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 0, + "d": 1 + }, + "mne/_ola.py": { + "a": 0, + "d": 1 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 0, + "d": 1 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 0, + "d": 1 + }, + "mne/chpi.py": { + "a": 0, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 0, + "d": 1 + }, + "mne/conftest.py": { + "a": 0, + "d": 1 + }, + "mne/coreg.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/_infant/base.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/_phantom/base.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/mixin.py": { + "a": 0, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 0, + "d": 1 + }, + "mne/dipole.py": { + "a": 0, + "d": 1 + }, + "mne/epochs.py": { + "a": 0, + "d": 2 + }, + "mne/evoked.py": { + "a": 0, + "d": 1 + }, + "mne/export/_brainvision.py": { + "a": 0, + "d": 1 + }, + "mne/export/_edf.py": { + "a": 0, + "d": 1 + }, + "mne/export/_eeglab.py": { + "a": 0, + "d": 1 + }, + "mne/export/_egimff.py": { + "a": 0, + "d": 1 + }, + "mne/export/_export.py": { + "a": 0, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 0, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 0, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 0, + "d": 1 + }, + "mne/gui/_core.py": { + "a": 0, + "d": 1 + }, + "mne/gui/_ieeg_locate.py": { + "a": 0, + "d": 1 + }, + "mne/gui/tests/test_core.py": { + "a": 0, + "d": 1 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 0, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 0, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 0, + "d": 1 + }, + "mne/io/base.py": { + "a": 0, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 0, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 0, + "d": 1 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 0, + "d": 1 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/events.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/general.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 0, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 0, + "d": 1 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 0, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 1 + }, + "mne/io/nedf/nedf.py": { + "a": 0, + "d": 1 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 0, + "d": 1 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 0, + "d": 1 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 0, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 0, + "d": 1 + }, + "mne/io/open.py": { + "a": 0, + "d": 1 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 0, + "d": 1 + }, + "mne/io/pick.py": { + "a": 0, + "d": 1 + }, + "mne/io/proc_history.py": { + "a": 0, + "d": 1 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_show_fiff.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_utils.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_write.py": { + "a": 0, + "d": 1 + }, + "mne/io/utils.py": { + "a": 0, + "d": 1 + }, + "mne/io/what.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/ieeg/tests/test_volume.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/otp.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 0, + "d": 1 + }, + "mne/rank.py": { + "a": 0, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 0, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 0, + "d": 1 + }, + "mne/stats/_adjacency.py": { + "a": 0, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 0, + "d": 1 + }, + "mne/stats/tests/test_adjacency.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_parallel.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 0, + "d": 1 + }, + "mne/transforms.py": { + "a": 0, + "d": 1 + }, + "mne/utils/_bunch.py": { + "a": 0, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 0, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 1 + }, + "mne/utils/check.py": { + "a": 0, + "d": 1 + }, + "mne/utils/config.py": { + "a": 0, + "d": 1 + }, + "mne/utils/dataframe.py": { + "a": 0, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 1 + }, + "mne/utils/fetching.py": { + "a": 0, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 0, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 0, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 0, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 0, + "d": 1 + }, + "mne/utils/progressbar.py": { + "a": 0, + "d": 1 + }, + "mne/utils/tests/test_bunch.py": { + "a": 0, + "d": 1 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 0, + "d": 1 + }, + "mne/utils/tests/test_testing.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_3d_overlay.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_dipole.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 0, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 0, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 0, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 0, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 0, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 0, + "d": 1 + }, + "tutorials/clinical/60_sleep.py": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/15_baseline_regression.py": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 0, + "d": 1 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 0, + "d": 1 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 0, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 0, + "d": 1 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 0, + "d": 1 + }, + "tutorials/evoked/40_whitened.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/30_forward.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 0, + "d": 1 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/15_inplace.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/30_info.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 0, + "d": 1 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 0, + "d": 1 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 0, + "d": 1 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 0, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 0, + "d": 1 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 0, + "d": 1 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 0, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 0, + "d": 1 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 0, + "d": 1 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 0, + "d": 1 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 0, + "d": 1 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 0, + "d": 1 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 0, + "d": 1 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 0, + "d": 1 + }, + "tutorials/simulation/80_dics.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 0, + "d": 1 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 0, + "d": 1 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 0, + "d": 1 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/116.json b/doc/sphinxext/prs/116.json new file mode 100644 index 00000000000..074f054da45 --- /dev/null +++ b/doc/sphinxext/prs/116.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e0e1f9cff35c025f7cdc261a71bf0cc4f1b196da", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 6, + "d": 4 + }, + "mne/fiff/tests/data/test-eve-1.eve": { + "a": 1, + "d": 0 + }, + "mne/fiff/tests/data/test-eve-1.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/data/test-eve-old-style.eve": { + "a": 0, + "d": 1 + }, + "mne/fiff/tests/data/test-eve.eve": { + "a": 0, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 21, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11600.json b/doc/sphinxext/prs/11600.json new file mode 100644 index 00000000000..678e5090779 --- /dev/null +++ b/doc/sphinxext/prs/11600.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "84259a673caafc3dfb37e07e9d73a3593c5c6acf", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "mne/_freesurfer.py": { + "a": 4, + "d": 4 + }, + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 10, + "d": 10 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/res4.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 5, + "d": 5 + }, + "mne/source_space.py": { + "a": 3, + "d": 3 + }, + "mne/surface.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_morph.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11601.json b/doc/sphinxext/prs/11601.json new file mode 100644 index 00000000000..1d9f8f834f8 --- /dev/null +++ b/doc/sphinxext/prs/11601.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "bde717c01d9d4eaa21233685b9ea4017b54a5a0c", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "mne/_ola.py": { + "a": 2, + "d": 2 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/mixin.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 4, + "d": 4 + }, + "mne/utils/numerics.py": { + "a": 2, + "d": 2 + }, + "mne/utils/progressbar.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d_overlay.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_scraper.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/callback.py": { + "a": 5, + "d": 5 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11604.json b/doc/sphinxext/prs/11604.json new file mode 100644 index 00000000000..74faabea5f1 --- /dev/null +++ b/doc/sphinxext/prs/11604.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "267967920ac384cd8698f65670bfb9c2e8720a49", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/proc_history.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/colormap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 3, + "d": 3 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11606.json b/doc/sphinxext/prs/11606.json new file mode 100644 index 00000000000..c4125d82bba --- /dev/null +++ b/doc/sphinxext/prs/11606.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6de2197a8c59f5b132c4769ff66706f1720c9d56", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11607.json b/doc/sphinxext/prs/11607.json new file mode 100644 index 00000000000..b3fec263172 --- /dev/null +++ b/doc/sphinxext/prs/11607.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3b4930abd32c00dc71da6c4a3e56a850a34ba185", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 31, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 11, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11612.json b/doc/sphinxext/prs/11612.json new file mode 100644 index 00000000000..5f258d20643 --- /dev/null +++ b/doc/sphinxext/prs/11612.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "686857c55288ed91cc13cdc43bf100ab1f6c475d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/transforms.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11614.json b/doc/sphinxext/prs/11614.json new file mode 100644 index 00000000000..0c413fcb0e5 --- /dev/null +++ b/doc/sphinxext/prs/11614.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "bfc8a3c471d271e3d31b7696fc0f9465a262223d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 7, + "d": 3 + }, + "mne/time_frequency/spectrum.py": { + "a": 3, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 12, + "d": 2 + }, + "mne/viz/backends/_utils.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11616.json b/doc/sphinxext/prs/11616.json new file mode 100644 index 00000000000..44a617000fc --- /dev/null +++ b/doc/sphinxext/prs/11616.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "d0cf4db4ba1f79cf31009b0c060ed64cc6b6de57", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 3, + "d": 3 + }, + "doc/changes/1.0.inc": { + "a": 7, + "d": 7 + }, + "doc/changes/1.1.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 4, + "d": 3 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 6, + "d": 1 + }, + "mne/conftest.py": { + "a": 17, + "d": 14 + }, + "mne/gui/__init__.py": { + "a": 34, + "d": 16 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 5, + "d": 4 + }, + "mne/utils/check.py": { + "a": 1, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11620.json b/doc/sphinxext/prs/11620.json new file mode 100644 index 00000000000..89b6ad91f5b --- /dev/null +++ b/doc/sphinxext/prs/11620.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a458622f4b1b7daa7685ad1d79fe132b22082ed9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 11, + "d": 5 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 3, + "d": 3 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 4, + "d": 4 + }, + "tools/setup_xvfb.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11621.json b/doc/sphinxext/prs/11621.json new file mode 100644 index 00000000000..2ef68599a8f --- /dev/null +++ b/doc/sphinxext/prs/11621.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b25f2505727eb7a79cd4955f044f7b027c57ab43", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11622.json b/doc/sphinxext/prs/11622.json new file mode 100644 index 00000000000..448a96ed353 --- /dev/null +++ b/doc/sphinxext/prs/11622.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5d1aa110fa0d889a98171131609e59747fc52d65", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11623.json b/doc/sphinxext/prs/11623.json new file mode 100644 index 00000000000..61aa0983841 --- /dev/null +++ b/doc/sphinxext/prs/11623.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4a467838cdd3b013547a813c1772f3a481546714", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/html_templates/report/image.html.jinja": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11625.json b/doc/sphinxext/prs/11625.json new file mode 100644 index 00000000000..3422fd91626 --- /dev/null +++ b/doc/sphinxext/prs/11625.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a605992c9001af6e2c19d9d82558eef8150833b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_spectrum.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11629.json b/doc/sphinxext/prs/11629.json new file mode 100644 index 00000000000..56e03d686c6 --- /dev/null +++ b/doc/sphinxext/prs/11629.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "1c301417dd3f80ea77289f2fd60a4fe585b9db8b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/preprocessing/locate_ieeg_micro.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 12, + "d": 6 + }, + "requirements.txt": { + "a": 0, + "d": 1 + }, + "requirements_doc.txt": { + "a": 5, + "d": 4 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1163.json b/doc/sphinxext/prs/1163.json new file mode 100644 index 00000000000..756a0277c49 --- /dev/null +++ b/doc/sphinxext/prs/1163.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0f80d89c9a46d3195fd75accc265e01cc2b1247c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/evoked.py": { + "a": 15, + "d": 16 + }, + "mne/filter.py": { + "a": 63, + "d": 46 + }, + "mne/tests/test_filter.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11631.json b/doc/sphinxext/prs/11631.json new file mode 100644 index 00000000000..97777334821 --- /dev/null +++ b/doc/sphinxext/prs/11631.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "263114e32d4fe4951f8f622f7ca74c30bfba37ae", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 60, + "d": 2 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11632.json b/doc/sphinxext/prs/11632.json new file mode 100644 index 00000000000..ae3df7e3950 --- /dev/null +++ b/doc/sphinxext/prs/11632.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f88f22d52b3b5e2cba3f790965ac14d2fe4a13e0", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/contralateral_referencing.py": { + "a": 3, + "d": 6 + }, + "mne/channels/channels.py": { + "a": 29, + "d": 16 + }, + "mne/channels/tests/test_channels.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11634.json b/doc/sphinxext/prs/11634.json new file mode 100644 index 00000000000..351a49c6817 --- /dev/null +++ b/doc/sphinxext/prs/11634.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "57f5ce300f8e67ffcc0d9a19ab724693105b74b6", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 46, + "d": 0 + }, + "mne/io/_digitization.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11635.json b/doc/sphinxext/prs/11635.json new file mode 100644 index 00000000000..5a53ed5eb35 --- /dev/null +++ b/doc/sphinxext/prs/11635.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "8fc3d07c8ab32101cba41723f3f8d825a08166df", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 4, + "d": 0 + }, + "mne/datasets/_fetch.py": { + "a": 12, + "d": 9 + }, + "mne/datasets/config.py": { + "a": 4, + "d": 2 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 20, + "d": 7 + }, + "mne/datasets/limo/limo.py": { + "a": 21, + "d": 8 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 22, + "d": 15 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 20, + "d": 7 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 20, + "d": 7 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 21, + "d": 27 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 71, + "d": 35 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11639.json b/doc/sphinxext/prs/11639.json new file mode 100644 index 00000000000..41f5037fdd5 --- /dev/null +++ b/doc/sphinxext/prs/11639.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4eb9ef04c50710dba840ceaac6dc4f69ffefdd31", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 41, + "d": 0 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/eegbci/tests/test_eegbci.py": { + "a": 14, + "d": 0 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 4, + "d": 29 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11640.json b/doc/sphinxext/prs/11640.json new file mode 100644 index 00000000000..f09c46bc60e --- /dev/null +++ b/doc/sphinxext/prs/11640.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "52506f4bed1af37e42644ed261735ef0a27b7556", + "authors": [ + { + "n": "Moritz Gerster", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/sensor_space.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 22, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 41, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11643.json b/doc/sphinxext/prs/11643.json new file mode 100644 index 00000000000..714c2eb9291 --- /dev/null +++ b/doc/sphinxext/prs/11643.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4adddf7c871fb345afc8426902dab987d589c7b3", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11644.json b/doc/sphinxext/prs/11644.json new file mode 100644 index 00000000000..84d0a088bc3 --- /dev/null +++ b/doc/sphinxext/prs/11644.json @@ -0,0 +1,235 @@ +{ + "merge_commit_sha": "d5556313874d1d68e50dcab685d80fb6f73a7913", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 0, + "d": 77 + }, + ".github/workflows/compat_old.yml": { + "a": 0, + "d": 68 + }, + ".github/workflows/linux_conda.yml": { + "a": 0, + "d": 125 + }, + ".github/workflows/linux_pip.yml": { + "a": 0, + "d": 76 + }, + ".github/workflows/macos_conda.yml": { + "a": 0, + "d": 76 + }, + ".github/workflows/precommit.yml": { + "a": 0, + "d": 14 + }, + ".github/workflows/tests.yml": { + "a": 117, + "d": 0 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 2 + }, + "mne/bem.py": { + "a": 10, + "d": 97 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 3, + "d": 3 + }, + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/evoked.py": { + "a": 11, + "d": 11 + }, + "mne/filter.py": { + "a": 43, + "d": 15 + }, + "mne/fixes.py": { + "a": 12, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 5, + "d": 5 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 54, + "d": 65 + }, + "mne/io/cnt/cnt.py": { + "a": 15, + "d": 14 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/res4.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf_comp.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/general.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 36, + "d": 36 + }, + "mne/io/open.py": { + "a": 3, + "d": 3 + }, + "mne/io/proc_history.py": { + "a": 5, + "d": 5 + }, + "mne/io/proj.py": { + "a": 6, + "d": 6 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 28, + "d": 20 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 2 + }, + "mne/io/tree.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 4 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 10 + }, + "mne/source_space.py": { + "a": 10, + "d": 10 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 9, + "d": 7 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 14, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tools/get_minimal_commands.sh": { + "a": 21, + "d": 11 + }, + "tools/github_actions_dependencies.sh": { + "a": 11, + "d": 15 + }, + "tools/github_actions_env_vars.sh": { + "a": 30, + "d": 0 + }, + "tools/github_actions_install.sh": { + "a": 4, + "d": 3 + }, + "tools/github_actions_locale.sh": { + "a": 0, + "d": 5 + }, + "tools/github_actions_test.sh": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11646.json b/doc/sphinxext/prs/11646.json new file mode 100644 index 00000000000..ae02ed3ac32 --- /dev/null +++ b/doc/sphinxext/prs/11646.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "8c7f0c61f37a150712251cc32200152b6b4bd37a", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 7, + "d": 1 + }, + "doc/_static/style.css": { + "a": 12, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/README.txt": { + "a": 23, + "d": 0 + }, + "examples/preprocessing/locate_ieeg_micro.py": { + "a": 0, + "d": 94 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 0, + "d": 544 + }, + "tutorials/clinical/README.txt": { + "a": 34, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11648.json b/doc/sphinxext/prs/11648.json new file mode 100644 index 00000000000..347d4e4bb57 --- /dev/null +++ b/doc/sphinxext/prs/11648.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cc4006845d3c5367f16705557d652d9bbb82e87e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 3 + }, + "requirements_testing.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11649.json b/doc/sphinxext/prs/11649.json new file mode 100644 index 00000000000..1a0ac87f630 --- /dev/null +++ b/doc/sphinxext/prs/11649.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e4dd2286e397d3c40a92fefddaddc6273e683e1d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1165.json b/doc/sphinxext/prs/1165.json new file mode 100644 index 00000000000..2285299537f --- /dev/null +++ b/doc/sphinxext/prs/1165.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7ef8390275c9ebf79bab67e3fe8bdd8016229ed9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 137, + "d": 30 + }, + "mne/tests/test_label.py": { + "a": 34, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11654.json b/doc/sphinxext/prs/11654.json new file mode 100644 index 00000000000..29bf279ea4c --- /dev/null +++ b/doc/sphinxext/prs/11654.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "909e45821bd004b6072a6426e4e8ea88e51b72ad", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 18, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 20, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11657.json b/doc/sphinxext/prs/11657.json new file mode 100644 index 00000000000..a6fbc0354d5 --- /dev/null +++ b/doc/sphinxext/prs/11657.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "cecbf0fb1dc919254bb6095db491b2d7c5f68003", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 5, + "d": 9 + }, + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 0, + "d": 34 + }, + "doc/Makefile": { + "a": 27, + "d": 93 + }, + "doc/conf.py": { + "a": 30, + "d": 45 + }, + "doc/install/contributing.rst": { + "a": 2, + "d": 14 + }, + "doc/sphinxext/gen_commands.py": { + "a": 16, + "d": 13 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "requirements_testing_extra.txt": { + "a": 2, + "d": 1 + }, + "tools/circleci_download.sh": { + "a": 5, + "d": 9 + }, + "tools/github_actions_dependencies.sh": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11658.json b/doc/sphinxext/prs/11658.json new file mode 100644 index 00000000000..94125208c96 --- /dev/null +++ b/doc/sphinxext/prs/11658.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "42b29db564564155a77352bae4303068f7d78935", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 28, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11662.json b/doc/sphinxext/prs/11662.json new file mode 100644 index 00000000000..f06d87c8cbe --- /dev/null +++ b/doc/sphinxext/prs/11662.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "32000d4983ba904a5660ec7c31818d881e1cfa61", + "authors": [ + { + "n": "Florin Pop", + "e": "florin@ae.studio" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/check.py": { + "a": 6, + "d": 5 + }, + "mne/viz/backends/_utils.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11663.json b/doc/sphinxext/prs/11663.json new file mode 100644 index 00000000000..b35f1f7d3d6 --- /dev/null +++ b/doc/sphinxext/prs/11663.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "18b80903da84483325584d8dffe7c9f847f12b81", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 3 + }, + "doc/_static/style.css": { + "a": 15, + "d": 4 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11664.json b/doc/sphinxext/prs/11664.json new file mode 100644 index 00000000000..237f12e31cc --- /dev/null +++ b/doc/sphinxext/prs/11664.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "fe1d6edf38d203a4b5cbdedef207847f70b2ebab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 22, + "d": 5 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 8 + }, + "mne/utils/config.py": { + "a": 4, + "d": 3 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 8 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11665.json b/doc/sphinxext/prs/11665.json new file mode 100644 index 00000000000..e449b8b108b --- /dev/null +++ b/doc/sphinxext/prs/11665.json @@ -0,0 +1,175 @@ +{ + "merge_commit_sha": "daec3da7c7ecfc8a2db53f438d0c64f4703d78d0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 6, + "d": 31 + }, + "doc/changes/latest.inc": { + "a": 7, + "d": 4 + }, + "doc/conf.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/multi_dipole_model.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_rap_music.py": { + "a": 2, + "d": 6 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 5, + "d": 4 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 7 + }, + "mne/channels/tests/test_channels.py": { + "a": 3, + "d": 3 + }, + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 10, + "d": 7 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/event.py": { + "a": 2, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 6, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 3 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 58, + "d": 50 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 35, + "d": 24 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 7 + }, + "mne/rank.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 5, + "d": 6 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 5, + "d": 6 + }, + "mne/viz/tests/test_topo.py": { + "a": 2, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 3 + }, + "tools/azure_dependencies.sh": { + "a": 4, + "d": 3 + }, + "tools/circleci_bash_env.sh": { + "a": 27, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 16 + }, + "tutorials/simulation/80_dics.py": { + "a": 5, + "d": 2 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11666.json b/doc/sphinxext/prs/11666.json new file mode 100644 index 00000000000..dc2730b4384 --- /dev/null +++ b/doc/sphinxext/prs/11666.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e9fd6e78283f4c0655dfe80e1e86053181543f8f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Timur Sokhin", + "e": "qwinpin@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 2, + "d": 5 + }, + "mne/tests/test_annotations.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11667.json b/doc/sphinxext/prs/11667.json new file mode 100644 index 00000000000..f7c7ce817e9 --- /dev/null +++ b/doc/sphinxext/prs/11667.json @@ -0,0 +1,2879 @@ +{ + "merge_commit_sha": "16fe5b5dbb1622ddfb7269fab889bab44ef43212", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".git-blame-ignore-revs": { + "a": 1, + "d": 0 + }, + ".github/workflows/tests.yml": { + "a": 1, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 5, + "d": 5 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1158, + "d": 800 + }, + "doc/install/contributing.rst": { + "a": 2, + "d": 1 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 100, + "d": 84 + }, + "doc/sphinxext/gen_commands.py": { + "a": 27, + "d": 25 + }, + "doc/sphinxext/gen_names.py": { + "a": 11, + "d": 10 + }, + "doc/sphinxext/gh_substitutions.py": { + "a": 4, + "d": 4 + }, + "doc/sphinxext/mne_substitutions.py": { + "a": 31, + "d": 20 + }, + "doc/sphinxext/newcontrib_substitutions.py": { + "a": 6, + "d": 7 + }, + "doc/sphinxext/unit_role.py": { + "a": 6, + "d": 4 + }, + "examples/datasets/brainstorm_data.py": { + "a": 21, + "d": 13 + }, + "examples/datasets/hf_sef_data.py": { + "a": 3, + "d": 4 + }, + "examples/datasets/limo_data.py": { + "a": 47, + "d": 48 + }, + "examples/datasets/opm_data.py": { + "a": 59, + "d": 36 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 41, + "d": 26 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 33, + "d": 24 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 64, + "d": 39 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 46, + "d": 30 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 60, + "d": 35 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 13, + "d": 14 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 47, + "d": 25 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 33, + "d": 20 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 43, + "d": 25 + }, + "examples/decoding/ems_filtering.py": { + "a": 35, + "d": 22 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 17, + "d": 16 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 82, + "d": 48 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 52, + "d": 38 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 37, + "d": 33 + }, + "examples/forward/left_cerebellum_volume_source.py": { + "a": 21, + "d": 11 + }, + "examples/forward/source_space_morphing.py": { + "a": 22, + "d": 20 + }, + "examples/inverse/compute_mne_inverse_epochs_in_label.py": { + "a": 49, + "d": 34 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 12, + "d": 12 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 13, + "d": 10 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 32, + "d": 19 + }, + "examples/inverse/dics_epochs.py": { + "a": 43, + "d": 24 + }, + "examples/inverse/dics_source_power.py": { + "a": 24, + "d": 12 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 78, + "d": 38 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 49, + "d": 26 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 22, + "d": 20 + }, + "examples/inverse/label_from_stc.py": { + "a": 35, + "d": 30 + }, + "examples/inverse/label_source_activations.py": { + "a": 46, + "d": 30 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 77, + "d": 41 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 83, + "d": 60 + }, + "examples/inverse/mne_cov_power.py": { + "a": 62, + "d": 33 + }, + "examples/inverse/morph_surface_stc.py": { + "a": 30, + "d": 25 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 19, + "d": 16 + }, + "examples/inverse/multi_dipole_model.py": { + "a": 42, + "d": 39 + }, + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 39, + "d": 23 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 50, + "d": 35 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 38, + "d": 31 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 93, + "d": 54 + }, + "examples/inverse/psf_volume.py": { + "a": 35, + "d": 27 + }, + "examples/inverse/rap_music.py": { + "a": 14, + "d": 16 + }, + "examples/inverse/read_inverse.py": { + "a": 21, + "d": 16 + }, + "examples/inverse/read_stc.py": { + "a": 7, + "d": 6 + }, + "examples/inverse/resolution_metrics.py": { + "a": 81, + "d": 46 + }, + "examples/inverse/resolution_metrics_eegmeg.py": { + "a": 86, + "d": 51 + }, + "examples/inverse/snr_estimate.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/source_space_snr.py": { + "a": 21, + "d": 17 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 81, + "d": 39 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 37, + "d": 23 + }, + "examples/io/elekta_epochs.py": { + "a": 17, + "d": 17 + }, + "examples/io/read_neo_format.py": { + "a": 5, + "d": 5 + }, + "examples/io/read_noise_covariance_matrix.py": { + "a": 3, + "d": 3 + }, + "examples/io/read_xdf.py": { + "a": 2, + "d": 4 + }, + "examples/preprocessing/contralateral_referencing.py": { + "a": 20, + "d": 26 + }, + "examples/preprocessing/css.py": { + "a": 36, + "d": 31 + }, + "examples/preprocessing/define_target_events.py": { + "a": 32, + "d": 17 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 127, + "d": 64 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 23, + "d": 16 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 7, + "d": 8 + }, + "examples/preprocessing/eog_regression.py": { + "a": 16, + "d": 15 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 14, + "d": 10 + }, + "examples/preprocessing/fnirs_artifact_removal.py": { + "a": 12, + "d": 9 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 16, + "d": 11 + }, + "examples/preprocessing/interpolate_bad_channels.py": { + "a": 8, + "d": 8 + }, + "examples/preprocessing/movement_compensation.py": { + "a": 14, + "d": 12 + }, + "examples/preprocessing/movement_detection.py": { + "a": 29, + "d": 19 + }, + "examples/preprocessing/muscle_detection.py": { + "a": 9, + "d": 5 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 19, + "d": 12 + }, + "examples/preprocessing/otp.py": { + "a": 27, + "d": 16 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 26, + "d": 12 + }, + "examples/preprocessing/virtual_evoked.py": { + "a": 13, + "d": 13 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 21, + "d": 13 + }, + "examples/simulation/plot_stc_metrics.py": { + "a": 95, + "d": 70 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 34, + "d": 24 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 26, + "d": 18 + }, + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 54, + "d": 50 + }, + "examples/simulation/source_simulator.py": { + "a": 13, + "d": 12 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 29, + "d": 23 + }, + "examples/stats/fdr_stats_evoked.py": { + "a": 38, + "d": 17 + }, + "examples/stats/linear_regression_raw.py": { + "a": 24, + "d": 14 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 31, + "d": 14 + }, + "examples/stats/sensor_regression.py": { + "a": 8, + "d": 6 + }, + "examples/time_frequency/compute_csd.py": { + "a": 21, + "d": 11 + }, + "examples/time_frequency/compute_source_psd_epochs.py": { + "a": 48, + "d": 28 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 59, + "d": 32 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 27, + "d": 16 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 99, + "d": 63 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 29, + "d": 18 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 13, + "d": 13 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 68, + "d": 39 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 41, + "d": 32 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 115, + "d": 55 + }, + "examples/visualization/3d_to_2d.py": { + "a": 16, + "d": 11 + }, + "examples/visualization/brain.py": { + "a": 26, + "d": 21 + }, + "examples/visualization/channel_epochs_image.py": { + "a": 36, + "d": 17 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 14, + "d": 7 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 18, + "d": 11 + }, + "examples/visualization/evoked_topomap.py": { + "a": 37, + "d": 30 + }, + "examples/visualization/evoked_whitening.py": { + "a": 32, + "d": 15 + }, + "examples/visualization/meg_sensors.py": { + "a": 33, + "d": 24 + }, + "examples/visualization/mne_helmet.py": { + "a": 35, + "d": 14 + }, + "examples/visualization/montage_sgskip.py": { + "a": 18, + "d": 11 + }, + "examples/visualization/parcellation.py": { + "a": 38, + "d": 17 + }, + "examples/visualization/publication_figure.py": { + "a": 81, + "d": 49 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 55, + "d": 21 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 4, + "d": 3 + }, + "examples/visualization/ssp_projs_sensitivity_map.py": { + "a": 12, + "d": 8 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 11, + "d": 11 + }, + "examples/visualization/topo_customized.py": { + "a": 16, + "d": 15 + }, + "examples/visualization/xhemi.py": { + "a": 17, + "d": 12 + }, + "ignore_words.txt": { + "a": 2, + "d": 0 + }, + "logo/generate_mne_logos.py": { + "a": 103, + "d": 70 + }, + "mne/__init__.py": { + "a": 190, + "d": 71 + }, + "mne/__main__.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 215, + "d": 160 + }, + "mne/_ola.py": { + "a": 166, + "d": 120 + }, + "mne/annotations.py": { + "a": 367, + "d": 256 + }, + "mne/baseline.py": { + "a": 57, + "d": 32 + }, + "mne/beamformer/__init__.py": { + "a": 14, + "d": 4 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 220, + "d": 132 + }, + "mne/beamformer/_dics.py": { + "a": 173, + "d": 95 + }, + "mne/beamformer/_lcmv.py": { + "a": 139, + "d": 69 + }, + "mne/beamformer/_rap_music.py": { + "a": 31, + "d": 28 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 7, + "d": 8 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 473, + "d": 292 + }, + "mne/beamformer/tests/test_external.py": { + "a": 41, + "d": 31 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 641, + "d": 336 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 83, + "d": 70 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 21, + "d": 22 + }, + "mne/bem.py": { + "a": 900, + "d": 686 + }, + "mne/channels/__init__.py": { + "a": 70, + "d": 30 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 37, + "d": 29 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 115, + "d": 112 + }, + "mne/channels/channels.py": { + "a": 822, + "d": 551 + }, + "mne/channels/interpolation.py": { + "a": 62, + "d": 43 + }, + "mne/channels/layout.py": { + "a": 262, + "d": 176 + }, + "mne/channels/montage.py": { + "a": 432, + "d": 332 + }, + "mne/channels/tests/test_channels.py": { + "a": 240, + "d": 201 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 116, + "d": 95 + }, + "mne/channels/tests/test_layout.py": { + "a": 136, + "d": 97 + }, + "mne/channels/tests/test_montage.py": { + "a": 1024, + "d": 769 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 165, + "d": 101 + }, + "mne/chpi.py": { + "a": 586, + "d": 396 + }, + "mne/commands/mne_anonymize.py": { + "a": 47, + "d": 20 + }, + "mne/commands/mne_browse_raw.py": { + "a": 128, + "d": 61 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 65, + "d": 32 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 137, + "d": 46 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 224, + "d": 107 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_coreg.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_flash_bem.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_maxfilter.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_prepare_bem_model.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_report.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_setup_forward_model.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_show_fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_show_info.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_surf2bem.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_sys_info.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_what.py": { + "a": 0, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 0, + "d": 0 + }, + "mne/commands/utils.py": { + "a": 0, + "d": 0 + }, + "mne/conftest.py": { + "a": 0, + "d": 0 + }, + "mne/coreg.py": { + "a": 0, + "d": 0 + }, + "mne/cov.py": { + "a": 0, + "d": 0 + }, + "mne/cuda.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/_fake/_fake.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/_fetch.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/_infant/base.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/_phantom/base.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/brainstorm/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 180, + "d": 175 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 35, + "d": 27 + }, + "mne/datasets/eegbci/tests/test_eegbci.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 15, + "d": 10 + }, + "mne/datasets/erp_core/erp_core.py": { + "a": 16, + "d": 11 + }, + "mne/datasets/eyelink/eyelink.py": { + "a": 16, + "d": 11 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 15, + "d": 10 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 16, + "d": 11 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 27, + "d": 21 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 13, + "d": 8 + }, + "mne/datasets/limo/limo.py": { + "a": 180, + "d": 107 + }, + "mne/datasets/misc/_misc.py": { + "a": 14, + "d": 8 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 13, + "d": 10 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 16, + "d": 11 + }, + "mne/datasets/opm/opm.py": { + "a": 14, + "d": 11 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 15, + "d": 10 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 15, + "d": 10 + }, + "mne/datasets/sample/sample.py": { + "a": 16, + "d": 11 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 99, + "d": 69 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 44, + "d": 27 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 31, + "d": 20 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 118, + "d": 78 + }, + "mne/datasets/somato/somato.py": { + "a": 16, + "d": 11 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 24, + "d": 16 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 14, + "d": 11 + }, + "mne/datasets/testing/__init__.py": { + "a": 7, + "d": 2 + }, + "mne/datasets/testing/_testing.py": { + "a": 31, + "d": 21 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 143, + "d": 112 + }, + "mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py": { + "a": 14, + "d": 10 + }, + "mne/datasets/utils.py": { + "a": 417, + "d": 206 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 15, + "d": 10 + }, + "mne/decoding/__init__.py": { + "a": 8, + "d": 3 + }, + "mne/decoding/base.py": { + "a": 105, + "d": 64 + }, + "mne/decoding/csp.py": { + "a": 234, + "d": 115 + }, + "mne/decoding/ems.py": { + "a": 28, + "d": 21 + }, + "mne/decoding/mixin.py": { + "a": 12, + "d": 9 + }, + "mne/decoding/receptive_field.py": { + "a": 104, + "d": 64 + }, + "mne/decoding/search_light.py": { + "a": 71, + "d": 60 + }, + "mne/decoding/ssd.py": { + "a": 122, + "d": 66 + }, + "mne/decoding/tests/test_base.py": { + "a": 100, + "d": 75 + }, + "mne/decoding/tests/test_csp.py": { + "a": 123, + "d": 86 + }, + "mne/decoding/tests/test_ems.py": { + "a": 25, + "d": 14 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 226, + "d": 173 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 54, + "d": 53 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 232, + "d": 103 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 120, + "d": 64 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 80, + "d": 56 + }, + "mne/decoding/time_frequency.py": { + "a": 28, + "d": 9 + }, + "mne/decoding/transformer.py": { + "a": 167, + "d": 94 + }, + "mne/defaults.py": { + "a": 279, + "d": 115 + }, + "mne/dipole.py": { + "a": 666, + "d": 356 + }, + "mne/epochs.py": { + "a": 1405, + "d": 753 + }, + "mne/event.py": { + "a": 393, + "d": 239 + }, + "mne/evoked.py": { + "a": 759, + "d": 344 + }, + "mne/export/_brainvision.py": { + "a": 1, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 109, + "d": 83 + }, + "mne/export/_eeglab.py": { + "a": 36, + "d": 19 + }, + "mne/export/_egimff.py": { + "a": 53, + "d": 46 + }, + "mne/export/_export.py": { + "a": 56, + "d": 33 + }, + "mne/export/tests/test_export.py": { + "a": 204, + "d": 172 + }, + "mne/filter.py": { + "a": 1154, + "d": 561 + }, + "mne/fixes.py": { + "a": 167, + "d": 115 + }, + "mne/forward/__init__.py": { + "a": 45, + "d": 19 + }, + "mne/forward/_compute_forward.py": { + "a": 160, + "d": 133 + }, + "mne/forward/_field_interpolation.py": { + "a": 0, + "d": 0 + }, + "mne/forward/_lead_dots.py": { + "a": 0, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 0, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 0, + "d": 0 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 0, + "d": 0 + }, + "mne/forward/tests/test_forward.py": { + "a": 0, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 0, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/gui/_core.py": { + "a": 0, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 0, + "d": 0 + }, + "mne/gui/_ieeg_locate.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_core.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_coreg.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 0, + "d": 0 + }, + "mne/html_templates/_templates.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 0, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/_digitization.py": { + "a": 0, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 0, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 0, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 0, + "d": 0 + }, + "mne/io/artemis123/artemis123.py": { + "a": 0, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 0, + "d": 0 + }, + "mne/io/artemis123/utils.py": { + "a": 0, + "d": 0 + }, + "mne/io/base.py": { + "a": 0, + "d": 0 + }, + "mne/io/besa/besa.py": { + "a": 0, + "d": 0 + }, + "mne/io/besa/tests/test_besa.py": { + "a": 0, + "d": 0 + }, + "mne/io/boxy/boxy.py": { + "a": 101, + "d": 81 + }, + "mne/io/boxy/tests/test_boxy.py": { + "a": 76, + "d": 62 + }, + "mne/io/brainvision/brainvision.py": { + "a": 348, + "d": 271 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 435, + "d": 309 + }, + "mne/io/bti/bti.py": { + "a": 821, + "d": 645 + }, + "mne/io/bti/constants.py": { + "a": 67, + "d": 67 + }, + "mne/io/bti/read.py": { + "a": 17, + "d": 22 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 237, + "d": 174 + }, + "mne/io/cnt/_utils.py": { + "a": 39, + "d": 31 + }, + "mne/io/cnt/cnt.py": { + "a": 203, + "d": 154 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 14, + "d": 15 + }, + "mne/io/compensator.py": { + "a": 36, + "d": 33 + }, + "mne/io/constants.py": { + "a": 882, + "d": 746 + }, + "mne/io/ctf/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 123, + "d": 85 + }, + "mne/io/ctf/eeg.py": { + "a": 43, + "d": 30 + }, + "mne/io/ctf/hc.py": { + "a": 32, + "d": 26 + }, + "mne/io/ctf/info.py": { + "a": 298, + "d": 220 + }, + "mne/io/ctf/markers.py": { + "a": 36, + "d": 31 + }, + "mne/io/ctf/res4.py": { + "a": 124, + "d": 107 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 473, + "d": 194 + }, + "mne/io/ctf/trans.py": { + "a": 63, + "d": 45 + }, + "mne/io/ctf_comp.py": { + "a": 49, + "d": 43 + }, + "mne/io/curry/curry.py": { + "a": 233, + "d": 154 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 263, + "d": 158 + }, + "mne/io/diff.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 678, + "d": 461 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 428, + "d": 227 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 53, + "d": 38 + }, + "mne/io/eeglab/_eeglab.py": { + "a": 4, + "d": 4 + }, + "mne/io/eeglab/eeglab.py": { + "a": 248, + "d": 150 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 366, + "d": 234 + }, + "mne/io/egi/egi.py": { + "a": 167, + "d": 112 + }, + "mne/io/egi/egimff.py": { + "a": 388, + "d": 309 + }, + "mne/io/egi/events.py": { + "a": 30, + "d": 27 + }, + "mne/io/egi/general.py": { + "a": 65, + "d": 61 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 236, + "d": 193 + }, + "mne/io/eximia/eximia.py": { + "a": 34, + "d": 23 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 22, + "d": 16 + }, + "mne/io/eyelink/eyelink.py": { + "a": 0, + "d": 0 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 0, + "d": 0 + }, + "mne/io/fieldtrip/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 0, + "d": 0 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 0, + "d": 0 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 0, + "d": 0 + }, + "mne/io/fieldtrip/utils.py": { + "a": 0, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 0, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 0, + "d": 0 + }, + "mne/io/fil/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/fil/fil.py": { + "a": 0, + "d": 0 + }, + "mne/io/fil/sensors.py": { + "a": 0, + "d": 0 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 0, + "d": 0 + }, + "mne/io/hitachi/hitachi.py": { + "a": 0, + "d": 0 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/constants.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/coreg.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 0, + "d": 0 + }, + "mne/io/matrix.py": { + "a": 0, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 0 + }, + "mne/io/nedf/nedf.py": { + "a": 0, + "d": 0 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 0, + "d": 0 + }, + "mne/io/nicolet/nicolet.py": { + "a": 0, + "d": 0 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 0, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 0, + "d": 0 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 0, + "d": 0 + }, + "mne/io/nirx/_localized_abbr.py": { + "a": 0, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 0, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 0, + "d": 0 + }, + "mne/io/open.py": { + "a": 0, + "d": 0 + }, + "mne/io/persyst/persyst.py": { + "a": 0, + "d": 0 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 0, + "d": 0 + }, + "mne/io/pick.py": { + "a": 0, + "d": 0 + }, + "mne/io/proc_history.py": { + "a": 0, + "d": 0 + }, + "mne/io/proj.py": { + "a": 0, + "d": 0 + }, + "mne/io/reference.py": { + "a": 0, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 0, + "d": 0 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 0, + "d": 0 + }, + "mne/io/tag.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_apply_function.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_compensator.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_constants.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_pick.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_proc_history.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_read_raw.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_show_fiff.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_utils.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_what.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_write.py": { + "a": 0, + "d": 0 + }, + "mne/io/tree.py": { + "a": 0, + "d": 0 + }, + "mne/io/utils.py": { + "a": 0, + "d": 0 + }, + "mne/io/what.py": { + "a": 0, + "d": 0 + }, + "mne/io/write.py": { + "a": 0, + "d": 0 + }, + "mne/label.py": { + "a": 0, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 59, + "d": 47 + }, + "mne/minimum_norm/inverse.py": { + "a": 896, + "d": 560 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 140, + "d": 73 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 63, + "d": 56 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 838, + "d": 567 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 128, + "d": 79 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 71, + "d": 64 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 17, + "d": 7 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 191, + "d": 86 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 403, + "d": 155 + }, + "mne/misc.py": { + "a": 15, + "d": 18 + }, + "mne/morph.py": { + "a": 509, + "d": 336 + }, + "mne/morph_map.py": { + "a": 57, + "d": 39 + }, + "mne/parallel.py": { + "a": 37, + "d": 27 + }, + "mne/preprocessing/__init__.py": { + "a": 26, + "d": 8 + }, + "mne/preprocessing/_csd.py": { + "a": 86, + "d": 65 + }, + "mne/preprocessing/_css.py": { + "a": 8, + "d": 10 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 179, + "d": 117 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 10, + "d": 11 + }, + "mne/preprocessing/_regress.py": { + "a": 126, + "d": 66 + }, + "mne/preprocessing/annotate_amplitude.py": { + "a": 65, + "d": 39 + }, + "mne/preprocessing/annotate_nan.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 161, + "d": 113 + }, + "mne/preprocessing/bads.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ctps_.py": { + "a": 10, + "d": 8 + }, + "mne/preprocessing/ecg.py": { + "a": 206, + "d": 89 + }, + "mne/preprocessing/eog.py": { + "a": 151, + "d": 63 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 67, + "d": 48 + }, + "mne/preprocessing/hfc.py": { + "a": 18, + "d": 12 + }, + "mne/preprocessing/ica.py": { + "a": 1140, + "d": 667 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 74, + "d": 53 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 91, + "d": 70 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 83, + "d": 64 + }, + "mne/preprocessing/ieeg/tests/test_volume.py": { + "a": 64, + "d": 40 + }, + "mne/preprocessing/infomax_.py": { + "a": 55, + "d": 37 + }, + "mne/preprocessing/interpolate.py": { + "a": 33, + "d": 37 + }, + "mne/preprocessing/maxfilter.py": { + "a": 75, + "d": 49 + }, + "mne/preprocessing/maxwell.py": { + "a": 1092, + "d": 689 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 12, + "d": 5 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 36, + "d": 28 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 17, + "d": 9 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 7, + "d": 6 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 104, + "d": 78 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 38, + "d": 37 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 213, + "d": 128 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 11, + "d": 14 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 17, + "d": 16 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 9, + "d": 9 + }, + "mne/preprocessing/otp.py": { + "a": 19, + "d": 15 + }, + "mne/preprocessing/realign.py": { + "a": 27, + "d": 22 + }, + "mne/preprocessing/ssp.py": { + "a": 335, + "d": 96 + }, + "mne/preprocessing/stim.py": { + "a": 33, + "d": 23 + }, + "mne/preprocessing/tests/test_annotate_amplitude.py": { + "a": 146, + "d": 142 + }, + "mne/preprocessing/tests/test_annotate_nan.py": { + "a": 5, + "d": 9 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 74, + "d": 64 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 69, + "d": 62 + }, + "mne/preprocessing/tests/test_css.py": { + "a": 18, + "d": 13 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 29, + "d": 25 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 46, + "d": 32 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 40, + "d": 27 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 37, + "d": 34 + }, + "mne/preprocessing/tests/test_hfc.py": { + "a": 40, + "d": 38 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 587, + "d": 497 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 9, + "d": 8 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 60, + "d": 43 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_realign.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/xdawn.py": { + "a": 0, + "d": 0 + }, + "mne/proj.py": { + "a": 0, + "d": 0 + }, + "mne/rank.py": { + "a": 0, + "d": 0 + }, + "mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py": { + "a": 0, + "d": 0 + }, + "mne/report/report.py": { + "a": 0, + "d": 0 + }, + "mne/report/tests/test_report.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/_metrics.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/evoked.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/metrics/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/metrics/metrics.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/metrics/tests/test_metrics.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/tests/test_raw.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 0, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 0, + "d": 0 + }, + "mne/source_space.py": { + "a": 0, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/stats/_adjacency.py": { + "a": 0, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 0, + "d": 0 + }, + "mne/stats/multi_comp.py": { + "a": 0, + "d": 0 + }, + "mne/stats/parametric.py": { + "a": 0, + "d": 0 + }, + "mne/stats/permutations.py": { + "a": 0, + "d": 0 + }, + "mne/stats/regression.py": { + "a": 113, + "d": 70 + }, + "mne/stats/tests/test_adjacency.py": { + "a": 16, + "d": 12 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 395, + "d": 250 + }, + "mne/stats/tests/test_multi_comp.py": { + "a": 4, + "d": 5 + }, + "mne/stats/tests/test_parametric.py": { + "a": 76, + "d": 68 + }, + "mne/stats/tests/test_permutations.py": { + "a": 20, + "d": 20 + }, + "mne/stats/tests/test_regression.py": { + "a": 42, + "d": 40 + }, + "mne/surface.py": { + "a": 656, + "d": 455 + }, + "mne/tests/test_annotations.py": { + "a": 620, + "d": 511 + }, + "mne/tests/test_bem.py": { + "a": 316, + "d": 250 + }, + "mne/tests/test_chpi.py": { + "a": 319, + "d": 254 + }, + "mne/tests/test_coreg.py": { + "a": 216, + "d": 162 + }, + "mne/tests/test_cov.py": { + "a": 429, + "d": 320 + }, + "mne/tests/test_defaults.py": { + "a": 16, + "d": 17 + }, + "mne/tests/test_dipole.py": { + "a": 213, + "d": 148 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 155, + "d": 111 + }, + "mne/tests/test_epochs.py": { + "a": 1885, + "d": 1216 + }, + "mne/tests/test_event.py": { + "a": 271, + "d": 266 + }, + "mne/tests/test_evoked.py": { + "a": 216, + "d": 205 + }, + "mne/tests/test_filter.py": { + "a": 553, + "d": 341 + }, + "mne/tests/test_freesurfer.py": { + "a": 115, + "d": 71 + }, + "mne/tests/test_import_nesting.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 539, + "d": 345 + }, + "mne/tests/test_line_endings.py": { + "a": 50, + "d": 27 + }, + "mne/tests/test_morph.py": { + "a": 512, + "d": 343 + }, + "mne/tests/test_morph_map.py": { + "a": 14, + "d": 13 + }, + "mne/tests/test_ola.py": { + "a": 35, + "d": 28 + }, + "mne/tests/test_parallel.py": { + "a": 14, + "d": 6 + }, + "mne/tests/test_proj.py": { + "a": 179, + "d": 152 + }, + "mne/tests/test_rank.py": { + "a": 121, + "d": 116 + }, + "mne/tests/test_read_vectorview_selection.py": { + "a": 30, + "d": 13 + }, + "mne/tests/test_source_estimate.py": { + "a": 0, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 0, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 0, + "d": 0 + }, + "mne/tests/test_transforms.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/_stft.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/_stockwell.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/ar.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 0, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 0, + "d": 0 + }, + "mne/transforms.py": { + "a": 0, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/utils/_bunch.py": { + "a": 0, + "d": 0 + }, + "mne/utils/_logging.py": { + "a": 0, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 0 + }, + "mne/utils/check.py": { + "a": 0, + "d": 0 + }, + "mne/utils/config.py": { + "a": 0, + "d": 0 + }, + "mne/utils/dataframe.py": { + "a": 0, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 0 + }, + "mne/utils/fetching.py": { + "a": 0, + "d": 0 + }, + "mne/utils/linalg.py": { + "a": 0, + "d": 0 + }, + "mne/utils/misc.py": { + "a": 0, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 0, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 0, + "d": 0 + }, + "mne/utils/progressbar.py": { + "a": 0, + "d": 0 + }, + "mne/utils/spectrum.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_bunch.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_docs.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_linalg.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_logging.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_misc.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_testing.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_3d_overlay.py": { + "a": 0, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/_scraper.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/callback.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/colormap.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/surface.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/view.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_dipole.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_proj.py": { + "a": 0, + "d": 0 + }, + "mne/viz/_scraper.py": { + "a": 0, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 0, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 0, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 0, + "d": 0 + }, + "mne/viz/backends/_qt.py": { + "a": 0, + "d": 0 + }, + "mne/viz/backends/_utils.py": { + "a": 94, + "d": 72 + }, + "mne/viz/backends/renderer.py": { + "a": 56, + "d": 32 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 35, + "d": 33 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 79, + "d": 66 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 31, + "d": 28 + }, + "mne/viz/circle.py": { + "a": 121, + "d": 74 + }, + "mne/viz/conftest.py": { + "a": 11, + "d": 9 + }, + "mne/viz/epochs.py": { + "a": 422, + "d": 262 + }, + "mne/viz/evoked.py": { + "a": 1315, + "d": 660 + }, + "mne/viz/ica.py": { + "a": 548, + "d": 337 + }, + "mne/viz/misc.py": { + "a": 522, + "d": 344 + }, + "mne/viz/montage.py": { + "a": 25, + "d": 10 + }, + "mne/viz/raw.py": { + "a": 236, + "d": 128 + }, + "mne/viz/tests/test_3d.py": { + "a": 666, + "d": 392 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 78, + "d": 53 + }, + "mne/viz/tests/test_circle.py": { + "a": 15, + "d": 10 + }, + "mne/viz/tests/test_epochs.py": { + "a": 183, + "d": 125 + }, + "mne/viz/tests/test_evoked.py": { + "a": 291, + "d": 216 + }, + "mne/viz/tests/test_figure.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 163, + "d": 132 + }, + "mne/viz/tests/test_misc.py": { + "a": 176, + "d": 119 + }, + "mne/viz/tests/test_montage.py": { + "a": 35, + "d": 28 + }, + "mne/viz/tests/test_proj.py": { + "a": 26, + "d": 18 + }, + "mne/viz/tests/test_raw.py": { + "a": 337, + "d": 293 + }, + "mne/viz/tests/test_scraper.py": { + "a": 8, + "d": 9 + }, + "mne/viz/tests/test_topo.py": { + "a": 186, + "d": 125 + }, + "mne/viz/tests/test_topomap.py": { + "a": 337, + "d": 264 + }, + "mne/viz/tests/test_utils.py": { + "a": 49, + "d": 44 + }, + "mne/viz/topo.py": { + "a": 597, + "d": 246 + }, + "mne/viz/topomap.py": { + "a": 1727, + "d": 836 + }, + "mne/viz/utils.py": { + "a": 888, + "d": 560 + }, + "pyproject.toml": { + "a": 3, + "d": 1 + }, + "requirements_testing.txt": { + "a": 1, + "d": 0 + }, + "setup.py": { + "a": 105, + "d": 95 + }, + "tools/check_mne_location.py": { + "a": 3, + "d": 2 + }, + "tools/generate_codemeta.py": { + "a": 72, + "d": 64 + }, + "tools/get_testing_version.sh": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 79, + "d": 47 + }, + "tutorials/clinical/30_ecog.py": { + "a": 77, + "d": 37 + }, + "tutorials/clinical/60_sleep.py": { + "a": 81, + "d": 51 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 48, + "d": 38 + }, + "tutorials/epochs/15_baseline_regression.py": { + "a": 67, + "d": 45 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 46, + "d": 34 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 17, + "d": 18 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 143, + "d": 89 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 56, + "d": 35 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 7, + "d": 8 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 30, + "d": 25 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 54, + "d": 43 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 80, + "d": 70 + }, + "tutorials/evoked/40_whitened.py": { + "a": 19, + "d": 12 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 5, + "d": 4 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 109, + "d": 64 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 17, + "d": 12 + }, + "tutorials/forward/30_forward.py": { + "a": 67, + "d": 34 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 39, + "d": 26 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 161, + "d": 107 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 18, + "d": 20 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 27, + "d": 23 + }, + "tutorials/intro/10_overview.py": { + "a": 80, + "d": 40 + }, + "tutorials/intro/15_inplace.py": { + "a": 8, + "d": 8 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 23, + "d": 17 + }, + "tutorials/intro/30_info.py": { + "a": 11, + "d": 11 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 25, + "d": 27 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 28, + "d": 22 + }, + "tutorials/intro/70_report.py": { + "a": 134, + "d": 155 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 0, + "d": 0 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 0, + "d": 0 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 0, + "d": 0 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 28, + "d": 23 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 6, + "d": 5 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 54, + "d": 35 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 42, + "d": 42 + }, + "tutorials/simulation/80_dics.py": { + "a": 99, + "d": 47 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 107, + "d": 49 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 33, + "d": 22 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 83, + "d": 41 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 88, + "d": 41 + }, + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 100, + "d": 46 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 111, + "d": 56 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 65, + "d": 36 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 40, + "d": 26 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 85, + "d": 52 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 18, + "d": 15 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 81, + "d": 41 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 226, + "d": 157 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11668.json b/doc/sphinxext/prs/11668.json new file mode 100644 index 00000000000..3ce44cc3688 --- /dev/null +++ b/doc/sphinxext/prs/11668.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "ccdbbdcc501042045c30f44454ed1d704c817550", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 15, + "d": 4 + }, + "mne/channels/tests/test_channels.py": { + "a": 11, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1167.json b/doc/sphinxext/prs/1167.json new file mode 100644 index 00000000000..bdd14d60c3a --- /dev/null +++ b/doc/sphinxext/prs/1167.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "90c94ef284f232ec619c6fcad7b1671f16f81ffe", + "authors": [ + { + "n": "Brad Buran", + "e": null + } + ], + "changes": { + "mne/forward/_lead_dots.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11670.json b/doc/sphinxext/prs/11670.json new file mode 100644 index 00000000000..0ff50c991b2 --- /dev/null +++ b/doc/sphinxext/prs/11670.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "295a1bbcb8eec67773506464b56230e4ef7c8e41", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 23, + "d": 23 + }, + "doc/conf.py": { + "a": 33, + "d": 28 + }, + "doc/references.bib": { + "a": 2, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 3, + "d": 4 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11673.json b/doc/sphinxext/prs/11673.json new file mode 100644 index 00000000000..98f94f9460d --- /dev/null +++ b/doc/sphinxext/prs/11673.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "24dff77255706e5498d8cc80ebe35b757725c4e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "README.rst": { + "a": 10, + "d": 11 + }, + "setup.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11675.json b/doc/sphinxext/prs/11675.json new file mode 100644 index 00000000000..535ea2f42b2 --- /dev/null +++ b/doc/sphinxext/prs/11675.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "dba7df1914f020f8905c7be4ae5a0ce823e4e2eb", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 0, + "d": 4 + }, + "doc/_templates/homepage.html": { + "a": 2, + "d": 2 + }, + "doc/_templates/layout.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 18, + "d": 14 + }, + "doc/index.rst": { + "a": 2, + "d": 0 + }, + "doc/install/index.rst": { + "a": 2, + "d": 0 + }, + "doc/overview/cite.rst": { + "a": 6, + "d": 3 + }, + "doc/overview/migrating.rst": { + "a": 3, + "d": 3 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11679.json b/doc/sphinxext/prs/11679.json new file mode 100644 index 00000000000..ac39687cfa2 --- /dev/null +++ b/doc/sphinxext/prs/11679.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "425e3b34439bfcff68f4f926b886aaba9a02478c", + "authors": [ + { + "n": "Theodore Papadopoulo", + "e": "Theodore.Papadopoulo@inria.fr" + } + ], + "changes": { + "doc/changes/0.19.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/inverse.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 23, + "d": 1 + }, + "examples/inverse/trap_music.py": { + "a": 61, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_rap_music.py": { + "a": 100, + "d": 69 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11680.json b/doc/sphinxext/prs/11680.json new file mode 100644 index 00000000000..cde518da0f9 --- /dev/null +++ b/doc/sphinxext/prs/11680.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "fdd56d5c6e5c8ac3bfa649abfa84b225d1629e9f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 6, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 10, + "d": 2 + }, + "mne/report/tests/test_report.py": { + "a": 19, + "d": 13 + }, + "mne/time_frequency/spectrum.py": { + "a": 21, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + }, + "mne/utils/spectrum.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 6, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 15, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11682.json b/doc/sphinxext/prs/11682.json new file mode 100644 index 00000000000..0e02aac276c --- /dev/null +++ b/doc/sphinxext/prs/11682.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "60b29bef68144144445ddd1d3ca3126e369a2a7d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/datasets/brainstorm_data.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 5, + "d": 1 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 6, + "d": 1 + }, + "mne/report/report.py": { + "a": 7, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 13, + "d": 19 + }, + "tutorials/clinical/60_sleep.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 4, + "d": 4 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 2, + "d": 2 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 1, + "d": 1 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 1, + "d": 1 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11683.json b/doc/sphinxext/prs/11683.json new file mode 100644 index 00000000000..bb66fc59578 --- /dev/null +++ b/doc/sphinxext/prs/11683.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d2600ee084bc884c3cc461cd94bfe25ae9932d4c", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11685.json b/doc/sphinxext/prs/11685.json new file mode 100644 index 00000000000..0164ba1afb5 --- /dev/null +++ b/doc/sphinxext/prs/11685.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "0e68aba7e52f7d5241c58509ffd00f6ffc23d7a6", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "doc/visualization.rst": { + "a": 24, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_ui_events.py": { + "a": 261, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 52, + "d": 18 + }, + "mne/viz/ui_events.py": { + "a": 305, + "d": 0 + }, + "tutorials/visualization/10_publication_figure.py": { + "a": 1, + "d": 1 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 100, + "d": 0 + }, + "tutorials/visualization/README.txt": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11686.json b/doc/sphinxext/prs/11686.json new file mode 100644 index 00000000000..1c76c0cee65 --- /dev/null +++ b/doc/sphinxext/prs/11686.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "7661516c7cc73055e8cdf85970d8b481ea9e45aa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 8, + "d": 0 + }, + "CITATION.cff": { + "a": 44, + "d": 19 + }, + "SECURITY.md": { + "a": 3, + "d": 3 + }, + "codemeta.json": { + "a": 131, + "d": 53 + }, + "doc/_static/versions.json": { + "a": 7, + "d": 2 + }, + "doc/changes/0.19.inc": { + "a": 0, + "d": 9 + }, + "doc/changes/0.20.inc": { + "a": 0, + "d": 9 + }, + "doc/changes/0.21.inc": { + "a": 0, + "d": 9 + }, + "doc/changes/0.22.inc": { + "a": 0, + "d": 9 + }, + "doc/changes/0.23.inc": { + "a": 0, + "d": 14 + }, + "doc/changes/0.24.inc": { + "a": 0, + "d": 14 + }, + "doc/changes/1.0.inc": { + "a": 0, + "d": 18 + }, + "doc/changes/1.1.inc": { + "a": 0, + "d": 18 + }, + "doc/changes/1.2.inc": { + "a": 0, + "d": 18 + }, + "doc/changes/1.3.inc": { + "a": 0, + "d": 18 + }, + "doc/changes/1.4.inc": { + "a": 61, + "d": 21 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 2 + }, + "tools/generate_codemeta.py": { + "a": 13, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11687.json b/doc/sphinxext/prs/11687.json new file mode 100644 index 00000000000..329db407fb3 --- /dev/null +++ b/doc/sphinxext/prs/11687.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "75dea8183599eb8ea8091216a32b41b34458f797", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/0.24.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 34, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 6, + "d": 6 + }, + "doc/mri.rst": { + "a": 4, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 0, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 14, + "d": 57 + }, + "mne/channels/tests/test_layout.py": { + "a": 0, + "d": 16 + }, + "mne/gui/__init__.py": { + "a": 2, + "d": 93 + }, + "mne/gui/_core.py": { + "a": 0, + "d": 639 + }, + "mne/gui/_ieeg_locate.py": { + "a": 0, + "d": 883 + }, + "mne/gui/tests/test_core.py": { + "a": 0, + "d": 71 + }, + "mne/gui/tests/test_ieeg_locate.py": { + "a": 0, + "d": 242 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 10 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 7 + }, + "mne/io/pick.py": { + "a": 0, + "d": 46 + }, + "mne/report/report.py": { + "a": 1, + "d": 7 + }, + "mne/surface.py": { + "a": 0, + "d": 198 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 153 + }, + "mne/time_frequency/spectrum.py": { + "a": 4, + "d": 12 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/spectrum.py": { + "a": 0, + "d": 5 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11692.json b/doc/sphinxext/prs/11692.json new file mode 100644 index 00000000000..81004d104e7 --- /dev/null +++ b/doc/sphinxext/prs/11692.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "68e4f89763135637110859ee0e05e594955815f1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/updating.rst": { + "a": 14, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11694.json b/doc/sphinxext/prs/11694.json new file mode 100644 index 00000000000..49d78ffef91 --- /dev/null +++ b/doc/sphinxext/prs/11694.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1e6aa7d99530f4cd11505665307b5667fe126b8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 7, + "d": 76 + }, + "mne/forward/tests/test_forward.py": { + "a": 43, + "d": 2 + }, + "mne/proj.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11695.json b/doc/sphinxext/prs/11695.json new file mode 100644 index 00000000000..9594517a73c --- /dev/null +++ b/doc/sphinxext/prs/11695.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "25094d7a0abe4ae17bff5f0e1f59c429770cfd19", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 2, + "d": 1 + }, + "doc/_static/institution_logos/AE-Studio-dark.svg": { + "a": 5, + "d": 0 + }, + "doc/_static/institution_logos/AE-Studio-light.svg": { + "a": 5, + "d": 0 + }, + "doc/conf.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11696.json b/doc/sphinxext/prs/11696.json new file mode 100644 index 00000000000..3153216390b --- /dev/null +++ b/doc/sphinxext/prs/11696.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "97db26cb8d867480fb4ac37f61a76090250555e6", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 4, + "d": 6 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 135, + "d": 169 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "requirements_base.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11697.json b/doc/sphinxext/prs/11697.json new file mode 100644 index 00000000000..4fbfeb5cab7 --- /dev/null +++ b/doc/sphinxext/prs/11697.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a7fec8c0b1c31425f807ccb4cffa8b76f5d2f94", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11698.json b/doc/sphinxext/prs/11698.json new file mode 100644 index 00000000000..c72ef0e784b --- /dev/null +++ b/doc/sphinxext/prs/11698.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4ce984f4458652551e2997da3749c2cbdb92a64e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1170.json b/doc/sphinxext/prs/1170.json new file mode 100644 index 00000000000..8d1b5110060 --- /dev/null +++ b/doc/sphinxext/prs/1170.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2daa7f621f6e1ec50fd8042a1bc19905368c6f2d", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 4, + "d": 7 + }, + "mne/tests/test_transforms.py": { + "a": 12, + "d": 11 + }, + "mne/transforms.py": { + "a": 20, + "d": 48 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11701.json b/doc/sphinxext/prs/11701.json new file mode 100644 index 00000000000..eb4f7fc6cfc --- /dev/null +++ b/doc/sphinxext/prs/11701.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cdcad6e14adf8e931dfec3d7da8329493e7a9c4b", + "authors": [ + { + "n": "Thomas Moreau", + "e": "thomas.moreau.2010@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_event.py": { + "a": 6, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/utils/check.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11702.json b/doc/sphinxext/prs/11702.json new file mode 100644 index 00000000000..37377e2884a --- /dev/null +++ b/doc/sphinxext/prs/11702.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c69f6327ecebc5bb8e70eeb39856677a640ea731", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11704.json b/doc/sphinxext/prs/11704.json new file mode 100644 index 00000000000..47b780388e2 --- /dev/null +++ b/doc/sphinxext/prs/11704.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bb1d54798e56b2854cba76d0f3b12a383b8bb96f", + "authors": [ + { + "n": "Aaron Earle-Richardson", + "e": "ae166@duke.edu" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_brain/surface.py": { + "a": 8, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11707.json b/doc/sphinxext/prs/11707.json new file mode 100644 index 00000000000..2086736f7ff --- /dev/null +++ b/doc/sphinxext/prs/11707.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "52fde5b8e4d9af5e10411911dd16e5cc11d80435", + "authors": [ + { + "n": "Joshua Calder-Travis", + "e": null + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11709.json b/doc/sphinxext/prs/11709.json new file mode 100644 index 00000000000..c55d34c7f26 --- /dev/null +++ b/doc/sphinxext/prs/11709.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a9126b19950e8803c87c022aee2e04d6d2515f9", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11713.json b/doc/sphinxext/prs/11713.json new file mode 100644 index 00000000000..47450aadcd3 --- /dev/null +++ b/doc/sphinxext/prs/11713.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "917b000f6f707e23499225c0e621b13b48e0f52a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/baseline.py": { + "a": 15, + "d": 15 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11714.json b/doc/sphinxext/prs/11714.json new file mode 100644 index 00000000000..ebbb1a5dfdf --- /dev/null +++ b/doc/sphinxext/prs/11714.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d466e180715c95a2a86713b3df85c2203682bc23", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11715.json b/doc/sphinxext/prs/11715.json new file mode 100644 index 00000000000..cf4f0abc6cf --- /dev/null +++ b/doc/sphinxext/prs/11715.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4914d2355c9b90f1b80862a78e548a649b36ad7c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11718.json b/doc/sphinxext/prs/11718.json new file mode 100644 index 00000000000..9aae9058aa4 --- /dev/null +++ b/doc/sphinxext/prs/11718.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dd9a4073eff6cf9320af1500b8a614a60a38fb95", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11719.json b/doc/sphinxext/prs/11719.json new file mode 100644 index 00000000000..8e4fd3e52fe --- /dev/null +++ b/doc/sphinxext/prs/11719.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "35d37979ceb6f7f2ea021face10dc679b929d03e", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/file_io.rst": { + "a": 1, + "d": 1 + }, + "doc/preprocessing.rst": { + "a": 2, + "d": 0 + }, + "mne/io/eyelink/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/io/eyelink/_utils.py": { + "a": 113, + "d": 0 + }, + "mne/io/eyelink/eyelink.py": { + "a": 53, + "d": 40 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/eyetracking/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/eyetracking/calibration.py": { + "a": 229, + "d": 0 + }, + "mne/preprocessing/eyetracking/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/eyetracking/tests/test_calibration.py": { + "a": 247, + "d": 0 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 49, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1172.json b/doc/sphinxext/prs/1172.json new file mode 100644 index 00000000000..328707f2dfc --- /dev/null +++ b/doc/sphinxext/prs/1172.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "030e087384a94c031ee45f348cbfbe3e95fe0a50", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "mne/fiff/open.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11720.json b/doc/sphinxext/prs/11720.json new file mode 100644 index 00000000000..25e3f039746 --- /dev/null +++ b/doc/sphinxext/prs/11720.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a8ec5d067ab8cc2ec8004d8dc291eebe893287d8", + "authors": [ + { + "n": "Gennadiy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11721.json b/doc/sphinxext/prs/11721.json new file mode 100644 index 00000000000..4270829eba1 --- /dev/null +++ b/doc/sphinxext/prs/11721.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "13dd05a48aeeb952deb183bede8ed588ae55f7a6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 5, + "d": 3 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 30, + "d": 9 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11723.json b/doc/sphinxext/prs/11723.json new file mode 100644 index 00000000000..a8ee8df3f35 --- /dev/null +++ b/doc/sphinxext/prs/11723.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c19bce34d636285e9ddd14f5c183c81795caaf99", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11724.json b/doc/sphinxext/prs/11724.json new file mode 100644 index 00000000000..073e49c2657 --- /dev/null +++ b/doc/sphinxext/prs/11724.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d158117a716512fbe96d8ff6c4d735f1e9601d09", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/overview/roadmap.rst": { + "a": 41, + "d": 36 + }, + "mne/report/tests/test_report.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11727.json b/doc/sphinxext/prs/11727.json new file mode 100644 index 00000000000..46012c0a3b3 --- /dev/null +++ b/doc/sphinxext/prs/11727.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c98749b529a41473cfccbc18485a9d1924546545", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11729.json b/doc/sphinxext/prs/11729.json new file mode 100644 index 00000000000..62c7c4fa44e --- /dev/null +++ b/doc/sphinxext/prs/11729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "959d2b64dd1f53b36eefb0e025110d01b92006ee", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1173.json b/doc/sphinxext/prs/1173.json new file mode 100644 index 00000000000..35d6c6448d0 --- /dev/null +++ b/doc/sphinxext/prs/1173.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dcd3979155fb10bb3e4f5582d1345cdc98c916d2", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "mne/fiff/meas_info.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11730.json b/doc/sphinxext/prs/11730.json new file mode 100644 index 00000000000..b5b6f5e7218 --- /dev/null +++ b/doc/sphinxext/prs/11730.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6fd91f6c61cf86b201b57b5df43ce6898518cc50", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/surface.py": { + "a": 11, + "d": 8 + }, + "mne/tests/test_surface.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11733.json b/doc/sphinxext/prs/11733.json new file mode 100644 index 00000000000..cde1578db49 --- /dev/null +++ b/doc/sphinxext/prs/11733.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ffa3fb50f3fcf4029d874d490a53c7d142c034ab", + "authors": [ + { + "n": "George O'Neill", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/fil/fil.py": { + "a": 17, + "d": 10 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 23, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11736.json b/doc/sphinxext/prs/11736.json new file mode 100644 index 00000000000..d83d0768fcb --- /dev/null +++ b/doc/sphinxext/prs/11736.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "ee767ddffbcde5869e4740548120c9055c8e41ed", + "authors": [ + { + "n": "Gennadiy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/filter.py": { + "a": 16, + "d": 2 + }, + "mne/io/base.py": { + "a": 7, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 8, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 8, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11740.json b/doc/sphinxext/prs/11740.json new file mode 100644 index 00000000000..98450af058a --- /dev/null +++ b/doc/sphinxext/prs/11740.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f48398a141ac4ed0ec4ec1f2cef766afd8605696", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/eyetracking/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 108, + "d": 0 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/eyetracking/tests/test_pupillometry.py": { + "a": 69, + "d": 0 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 69, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11741.json b/doc/sphinxext/prs/11741.json new file mode 100644 index 00000000000..f2de28519cb --- /dev/null +++ b/doc/sphinxext/prs/11741.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "95297672f1a6c33cc7f64f887282bcfdaa795900", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 1, + "d": 0 + }, + "doc/_static/institution_logos/FCBG.svg": { + "a": 82, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 6, + "d": 0 + }, + "doc/overview/people.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11744.json b/doc/sphinxext/prs/11744.json new file mode 100644 index 00000000000..28bbda6e457 --- /dev/null +++ b/doc/sphinxext/prs/11744.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c4464741467547a4e93f2f807af57313cdfa2331", + "authors": [ + { + "n": "Diptyajit Das", + "e": "bmedasdiptyajit@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 0 + }, + "mne/channels/data/montages/easycap-M43.txt": { + "a": 65, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11745.json b/doc/sphinxext/prs/11745.json new file mode 100644 index 00000000000..7e6dff7ac43 --- /dev/null +++ b/doc/sphinxext/prs/11745.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b0f8ce69c7258319624aebdd6de63b23e1c78ad5", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/overview/people.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 7 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11746.json b/doc/sphinxext/prs/11746.json new file mode 100644 index 00000000000..c5c68990b98 --- /dev/null +++ b/doc/sphinxext/prs/11746.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "bad786df12c4c14512cb1a27276a688912131a16", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 237, + "d": 0 + }, + "mne/io/eyelink/eyelink.py": { + "a": 34, + "d": 272 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 47, + "d": 18 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 3, + "d": 3 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 22, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11748.json b/doc/sphinxext/prs/11748.json new file mode 100644 index 00000000000..07df830ee4a --- /dev/null +++ b/doc/sphinxext/prs/11748.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1d776cb0f83fe55784f06e004bad672878e34a8a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1175.json b/doc/sphinxext/prs/1175.json new file mode 100644 index 00000000000..0c781919c99 --- /dev/null +++ b/doc/sphinxext/prs/1175.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "487500658b65c2cef24f45b3e92354c325633b21", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "mne/tests/test_transforms.py": { + "a": 9, + "d": 0 + }, + "mne/transforms.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11750.json b/doc/sphinxext/prs/11750.json new file mode 100644 index 00000000000..2c6eb48589b --- /dev/null +++ b/doc/sphinxext/prs/11750.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c8f478ba33b2d0eed9e326b543476403e54dcd03", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 3, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11752.json b/doc/sphinxext/prs/11752.json new file mode 100644 index 00000000000..bd0e16b5c87 --- /dev/null +++ b/doc/sphinxext/prs/11752.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "52d1e3e016e5caadb040eb3fa30790a8895d8732", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 14, + "d": 5 + }, + "mne/viz/tests/test_epochs.py": { + "a": 6, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 2, + "d": 3 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/40_whitened.py": { + "a": 2, + "d": 2 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/80_dics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11753.json b/doc/sphinxext/prs/11753.json new file mode 100644 index 00000000000..ab3d0e95a87 --- /dev/null +++ b/doc/sphinxext/prs/11753.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "be5f51aa8f5fdaec2b915be0346e163ab7363103", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11754.json b/doc/sphinxext/prs/11754.json new file mode 100644 index 00000000000..8c636a96e49 --- /dev/null +++ b/doc/sphinxext/prs/11754.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "38ed57443a45131f9dee5e7a21ca39c2c1fcd78b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 14, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11755.json b/doc/sphinxext/prs/11755.json new file mode 100644 index 00000000000..281d8b7e933 --- /dev/null +++ b/doc/sphinxext/prs/11755.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4b0923f8cf491e13da9f602af91c42cd3722261d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11756.json b/doc/sphinxext/prs/11756.json new file mode 100644 index 00000000000..88061526680 --- /dev/null +++ b/doc/sphinxext/prs/11756.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dca0a2267c02ba10778913bdafebd352d630962e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/1.1.inc": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11757.json b/doc/sphinxext/prs/11757.json new file mode 100644 index 00000000000..b500b1e4565 --- /dev/null +++ b/doc/sphinxext/prs/11757.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7def8369d7ca691d807fe916663168c2ee4c10e0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/preprocessing/eog.py": { + "a": 6, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11760.json b/doc/sphinxext/prs/11760.json new file mode 100644 index 00000000000..0e740e338b8 --- /dev/null +++ b/doc/sphinxext/prs/11760.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1763fa6af209b2eef34e2b41fe95f7e4f6d5dac1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 8, + "d": 7 + }, + "mne/viz/_mpl_figure.py": { + "a": 32, + "d": 8 + }, + "mne/viz/tests/test_raw.py": { + "a": 61, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11764.json b/doc/sphinxext/prs/11764.json new file mode 100644 index 00000000000..7ea1214f24f --- /dev/null +++ b/doc/sphinxext/prs/11764.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3c4a6f6bbc258dcb68f5d80ba5dd9b0d9f258057", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/install/manual_install.rst": { + "a": 28, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11765.json b/doc/sphinxext/prs/11765.json new file mode 100644 index 00000000000..7108578d96d --- /dev/null +++ b/doc/sphinxext/prs/11765.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b92c88d70468a0f8174ab527b3e24d7f07af966b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/parallel.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_parallel.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11766.json b/doc/sphinxext/prs/11766.json new file mode 100644 index 00000000000..17c6b236e10 --- /dev/null +++ b/doc/sphinxext/prs/11766.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2eb608a0f111faee842e017993273f950b083012", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 18, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11769.json b/doc/sphinxext/prs/11769.json new file mode 100644 index 00000000000..1699b450560 --- /dev/null +++ b/doc/sphinxext/prs/11769.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c874a7d764061e34098f84da49b6d598fed4bfba", + "authors": [ + { + "n": "Nikolai M Chapochnikov", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 7, + "d": 3 + }, + "mne/time_frequency/psd.py": { + "a": 5, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 10, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11770.json b/doc/sphinxext/prs/11770.json new file mode 100644 index 00000000000..c6bb3862997 --- /dev/null +++ b/doc/sphinxext/prs/11770.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "79647ec0f24de1c2c4d7d637726ef61cdc72a4da", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 7, + "d": 4 + }, + "mne/datasets/config.py": { + "a": 3, + "d": 3 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 131, + "d": 144 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11771.json b/doc/sphinxext/prs/11771.json new file mode 100644 index 00000000000..736e933b128 --- /dev/null +++ b/doc/sphinxext/prs/11771.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d78d0f5b0a6dedb4faf49950b07d72bdcc53557f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11773.json b/doc/sphinxext/prs/11773.json new file mode 100644 index 00000000000..9e9c3b35947 --- /dev/null +++ b/doc/sphinxext/prs/11773.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "06d396d40a6a925473dcf1451758272d68f64185", + "authors": [ + { + "n": "Frostime", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11774.json b/doc/sphinxext/prs/11774.json new file mode 100644 index 00000000000..1a8b477c262 --- /dev/null +++ b/doc/sphinxext/prs/11774.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b4eecda8d66ac083554db8ee1c925b98fdc2a402", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11777.json b/doc/sphinxext/prs/11777.json new file mode 100644 index 00000000000..f202ab761c0 --- /dev/null +++ b/doc/sphinxext/prs/11777.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5d452c6a091f05d83055c9eef202008b34db2158", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11778.json b/doc/sphinxext/prs/11778.json new file mode 100644 index 00000000000..de69ce83036 --- /dev/null +++ b/doc/sphinxext/prs/11778.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a3e98cea745b75665b7692161629937f6dc65921", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 13, + "d": 0 + }, + "mne/utils/spectrum.py": { + "a": 16, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11779.json b/doc/sphinxext/prs/11779.json new file mode 100644 index 00000000000..aadd709ba13 --- /dev/null +++ b/doc/sphinxext/prs/11779.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "30800ad71edcd1ae569c39083cd8d7dcc01c92ee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/artifact_detection.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11781.json b/doc/sphinxext/prs/11781.json new file mode 100644 index 00000000000..9aff0d033d1 --- /dev/null +++ b/doc/sphinxext/prs/11781.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "2a0b111349380429dcd5cab8abf1a211fd5eb408", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 7, + "d": 1 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 0, + "d": 1 + }, + "mne/conftest.py": { + "a": 4, + "d": 0 + }, + "mne/io/tag.py": { + "a": 22, + "d": 22 + }, + "mne/viz/tests/test_topomap.py": { + "a": 10, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 36, + "d": 21 + }, + "tools/azure_dependencies.sh": { + "a": 8, + "d": 10 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 9 + }, + "tools/github_actions_test.sh": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11783.json b/doc/sphinxext/prs/11783.json new file mode 100644 index 00000000000..ad2eb3416f0 --- /dev/null +++ b/doc/sphinxext/prs/11783.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4cf0d1d5ca692cf8426933ef5e683b648b04203d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/datasets/eegbci/eegbci.py": { + "a": 38, + "d": 41 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11786.json b/doc/sphinxext/prs/11786.json new file mode 100644 index 00000000000..5219a9ce677 --- /dev/null +++ b/doc/sphinxext/prs/11786.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "413e4a38601ce36e47018e9e4144d62a9d1c7a8e", + "authors": [ + { + "n": "Samuel Louviot", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11787.json b/doc/sphinxext/prs/11787.json new file mode 100644 index 00000000000..410b726fd7a --- /dev/null +++ b/doc/sphinxext/prs/11787.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5608da261cc18f4813c1165baa8a6d5f64a1ea8c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11789.json b/doc/sphinxext/prs/11789.json new file mode 100644 index 00000000000..779d7285ec2 --- /dev/null +++ b/doc/sphinxext/prs/11789.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e28943603444353511bea8f22f5646153b7ec903", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 0, + "d": 2 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11792.json b/doc/sphinxext/prs/11792.json new file mode 100644 index 00000000000..913852fc2f0 --- /dev/null +++ b/doc/sphinxext/prs/11792.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8051f6ddf68a797ec0c9e1801aea3c73808bba03", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 15, + "d": 11 + }, + "mne/viz/topomap.py": { + "a": 31, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11796.json b/doc/sphinxext/prs/11796.json new file mode 100644 index 00000000000..c2e37392877 --- /dev/null +++ b/doc/sphinxext/prs/11796.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "926bec285c34c0fbdcad7b09ac4f550af8288e8a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/events.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 35, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11798.json b/doc/sphinxext/prs/11798.json new file mode 100644 index 00000000000..dcd30084b72 --- /dev/null +++ b/doc/sphinxext/prs/11798.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "c9d20060d74b8f384aedfeb02c122ca7c31986fa", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + "doc/api/visualization.rst": { + "a": 15, + "d": 0 + }, + "doc/documentation/datasets.rst": { + "a": 23, + "d": 5 + }, + "examples/visualization/eyetracking_plot_heatmap.py": { + "a": 89, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 4, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/eyetracking/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/viz/eyetracking/heatmap.py": { + "a": 155, + "d": 0 + }, + "mne/viz/eyetracking/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/viz/eyetracking/tests/test_heatmap.py": { + "a": 35, + "d": 0 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 23, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11801.json b/doc/sphinxext/prs/11801.json new file mode 100644 index 00000000000..45a1ebaecfe --- /dev/null +++ b/doc/sphinxext/prs/11801.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "fc819143b28298c50c8d9de9555a02474fbe3bc9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_show_fiff.py": { + "a": 10, + "d": 1 + }, + "mne/io/constants.py": { + "a": 4, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 2 + }, + "mne/io/open.py": { + "a": 45, + "d": 21 + }, + "mne/io/tag.py": { + "a": 41, + "d": 40 + }, + "mne/io/tests/test_constants.py": { + "a": 7, + "d": 0 + }, + "mne/io/tree.py": { + "a": 6, + "d": 12 + }, + "mne/io/write.py": { + "a": 26, + "d": 89 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11803.json b/doc/sphinxext/prs/11803.json new file mode 100644 index 00000000000..014e1eb74fa --- /dev/null +++ b/doc/sphinxext/prs/11803.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "db07d55c6ed0b09c6e3f31568b096070f5886a98", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/time_frequency.rst": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 12, + "d": 0 + }, + "mne/epochs.py": { + "a": 7, + "d": 30 + }, + "mne/time_frequency/__init__.py": { + "a": 7, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 143, + "d": 4 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 79, + "d": 24 + }, + "mne/utils/docs.py": { + "a": 17, + "d": 0 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 9, + "d": 39 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11804.json b/doc/sphinxext/prs/11804.json new file mode 100644 index 00000000000..2d4b1338487 --- /dev/null +++ b/doc/sphinxext/prs/11804.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9b3a4d38a252e8e26980135229f68ca90d0fe359", + "authors": [ + { + "n": "Daniel Tse", + "e": "xiezhibin.0.0.superman@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11806.json b/doc/sphinxext/prs/11806.json new file mode 100644 index 00000000000..42391a43021 --- /dev/null +++ b/doc/sphinxext/prs/11806.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9a2758d708f23e4e67a1d0205fe645a6d9f77aea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11808.json b/doc/sphinxext/prs/11808.json new file mode 100644 index 00000000000..3f03c9bbd3e --- /dev/null +++ b/doc/sphinxext/prs/11808.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1b4f6ff4e169c93c6abd2fe858e4985d95e48626", + "authors": [ + { + "n": "Joshua Calder-Travis", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11814.json b/doc/sphinxext/prs/11814.json new file mode 100644 index 00000000000..122ef51d178 --- /dev/null +++ b/doc/sphinxext/prs/11814.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6e5d6e87ab83dd62c8e1cd95b9f8691b43fb9774", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/45_projectors_background.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11815.json b/doc/sphinxext/prs/11815.json new file mode 100644 index 00000000000..3b8555497fd --- /dev/null +++ b/doc/sphinxext/prs/11815.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "57146eedf8779c50745677bed832f8481a9c5a86", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 4, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 37 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 11, + "d": 4 + }, + "tools/github_actions_env_vars.sh": { + "a": 0, + "d": 4 + }, + "tools/github_actions_test.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11819.json b/doc/sphinxext/prs/11819.json new file mode 100644 index 00000000000..778d49a2b83 --- /dev/null +++ b/doc/sphinxext/prs/11819.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7b2304840405f1d7956494c735c91d8bff094e7e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/_static/js/contrib-avatars.js": { + "a": 51, + "d": 0 + }, + "doc/_static/style.css": { + "a": 11, + "d": 2 + }, + "doc/_templates/layout.html": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1182.json b/doc/sphinxext/prs/1182.json new file mode 100644 index 00000000000..f94f71fa069 --- /dev/null +++ b/doc/sphinxext/prs/1182.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f408543ac09f99039417c65d8f6dece33d4675ed", + "authors": [ + { + "n": "Ross Maddox", + "e": "rkmaddox@uw.edu" + } + ], + "changes": { + "mne/epochs.py": { + "a": 19, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11821.json b/doc/sphinxext/prs/11821.json new file mode 100644 index 00000000000..6a9dff169cd --- /dev/null +++ b/doc/sphinxext/prs/11821.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "66c40caea1fd343fbe49a34815542065ceecdf3d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 16, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 1 + }, + "mne/cov.py": { + "a": 8, + "d": 0 + }, + "mne/io/eyelink/eyelink.py": { + "a": 7, + "d": 3 + }, + "mne/io/proj.py": { + "a": 10, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 8 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 62, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11822.json b/doc/sphinxext/prs/11822.json new file mode 100644 index 00000000000..76f948cd493 --- /dev/null +++ b/doc/sphinxext/prs/11822.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "68e7272563fa88c2f0bd6d3e551eac9c51c2d07d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 9, + "d": 12 + }, + "mne/io/egi/egimff.py": { + "a": 2, + "d": 13 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 21, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11823.json b/doc/sphinxext/prs/11823.json new file mode 100644 index 00000000000..c0b18074426 --- /dev/null +++ b/doc/sphinxext/prs/11823.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cc1d1825c41144c8c4c3dd1cbb44f8af7bf78e9b", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 9, + "d": 65 + }, + "mne/io/eyelink/eyelink.py": { + "a": 180, + "d": 182 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 100, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11826.json b/doc/sphinxext/prs/11826.json new file mode 100644 index 00000000000..edd7538a278 --- /dev/null +++ b/doc/sphinxext/prs/11826.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "fc88fe51565210144b4df4784352922f99799bab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 4, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 8, + "d": 10 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 2, + "d": 0 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 9, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 6, + "d": 0 + }, + "mne/io/hitachi/hitachi.py": { + "a": 181, + "d": 186 + }, + "mne/io/kit/kit.py": { + "a": 71, + "d": 70 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 31, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11828.json b/doc/sphinxext/prs/11828.json new file mode 100644 index 00000000000..4afafa54e89 --- /dev/null +++ b/doc/sphinxext/prs/11828.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "74f3f18c82d3548e12833712ac6fd6074f1a3005", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11829.json b/doc/sphinxext/prs/11829.json new file mode 100644 index 00000000000..0f8d723c9f6 --- /dev/null +++ b/doc/sphinxext/prs/11829.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1a49303d74abba32ac216f88d040e66789a0d02b", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/install/mne_tools_suite.rst": { + "a": 5, + "d": 4 + }, + "doc/links.inc": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11830.json b/doc/sphinxext/prs/11830.json new file mode 100644 index 00000000000..dacbd7eed3f --- /dev/null +++ b/doc/sphinxext/prs/11830.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bc94f08ab08c572cf0597db96003ee9f152f10a6", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 85, + "d": 55 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11836.json b/doc/sphinxext/prs/11836.json new file mode 100644 index 00000000000..0551234ff3f --- /dev/null +++ b/doc/sphinxext/prs/11836.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0fd4ded6b773a34cdeee7b17a8c39a6ac93940ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11837.json b/doc/sphinxext/prs/11837.json new file mode 100644 index 00000000000..163e3510897 --- /dev/null +++ b/doc/sphinxext/prs/11837.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a560999215de1d58af57660cce08f813fac738a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 0, + "d": 4 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 9 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_test.sh": { + "a": 2, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11838.json b/doc/sphinxext/prs/11838.json new file mode 100644 index 00000000000..abda6431666 --- /dev/null +++ b/doc/sphinxext/prs/11838.json @@ -0,0 +1,579 @@ +{ + "merge_commit_sha": "dea8f8728eef1832ba6dc750e447fc0799f46f88", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 269, + "d": 222 + }, + "mne/_fiff/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/meas_info.py": { + "a": 2, + "d": 3 + }, + "mne/_fiff/open.py": { + "a": 2, + "d": 3 + }, + "mne/_fiff/pick.py": { + "a": 2, + "d": 0 + }, + "mne/_fiff/tag.py": { + "a": 3, + "d": 5 + }, + "mne/_fiff/write.py": { + "a": 4, + "d": 5 + }, + "mne/_freesurfer.py": { + "a": 2, + "d": 1 + }, + "mne/_ola.py": { + "a": 1, + "d": 3 + }, + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/__init__.py": { + "a": 32, + "d": 17 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 2 + }, + "mne/bem.py": { + "a": 1, + "d": 5 + }, + "mne/channels/__init__.py": { + "a": 47, + "d": 78 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 11 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 4 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 7, + "d": 3 + }, + "mne/channels/tests/test_layout.py": { + "a": 3, + "d": 2 + }, + "mne/chpi.py": { + "a": 5, + "d": 8 + }, + "mne/commands/__init__.py": { + "a": 7, + "d": 1 + }, + "mne/coreg.py": { + "a": 2, + "d": 4 + }, + "mne/cov.py": { + "a": 4, + "d": 4 + }, + "mne/cuda.py": { + "a": 1, + "d": 5 + }, + "mne/datasets/__init__.py": { + "a": 42, + "d": 69 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/__init__.py": { + "a": 23, + "d": 16 + }, + "mne/decoding/csp.py": { + "a": 4, + "d": 7 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/ssd.py": { + "a": 5, + "d": 7 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 3, + "d": 7 + }, + "mne/decoding/transformer.py": { + "a": 7, + "d": 2 + }, + "mne/dipole.py": { + "a": 3, + "d": 5 + }, + "mne/epochs.py": { + "a": 15, + "d": 8 + }, + "mne/evoked.py": { + "a": 7, + "d": 4 + }, + "mne/export/__init__.py": { + "a": 12, + "d": 2 + }, + "mne/filter.py": { + "a": 29, + "d": 49 + }, + "mne/fixes.py": { + "a": 26, + "d": 74 + }, + "mne/forward/__init__.py": { + "a": 52, + "d": 45 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 7 + }, + "mne/gui/__init__.py": { + "a": 6, + "d": 283 + }, + "mne/gui/_coreg.py": { + "a": 6, + "d": 5 + }, + "mne/gui/_gui.py": { + "a": 284, + "d": 0 + }, + "mne/html_templates/__init__.py": { + "a": 7, + "d": 1 + }, + "mne/html_templates/_templates.py": { + "a": 20, + "d": 15 + }, + "mne/inverse_sparse/__init__.py": { + "a": 10, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 64, + "d": 84 + }, + "mne/io/_fiff_wrap.py": { + "a": 14, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 69, + "d": 63 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 7, + "d": 9 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 3 + }, + "mne/io/eeglab/_eeglab.py": { + "a": 7, + "d": 12 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 1 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 4, + "d": 2 + }, + "mne/io/fiff/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_read_raw.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 2, + "d": 7 + }, + "mne/minimum_norm/__init__.py": { + "a": 34, + "d": 26 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 7 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 3, + "d": 2 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 3, + "d": 1 + }, + "mne/morph.py": { + "a": 2, + "d": 12 + }, + "mne/morph_map.py": { + "a": 2, + "d": 3 + }, + "mne/parallel.py": { + "a": 9, + "d": 2 + }, + "mne/preprocessing/__init__.py": { + "a": 50, + "d": 45 + }, + "mne/preprocessing/_annotate_amplitude.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/_csd.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/_regress.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 4, + "d": 7 + }, + "mne/preprocessing/bads.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ctps_.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 15 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/infomax_.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/interpolate.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 8 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/realign.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/stim.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 5 + }, + "mne/rank.py": { + "a": 1, + "d": 2 + }, + "mne/report/__init__.py": { + "a": 11, + "d": 1 + }, + "mne/report/report.py": { + "a": 12, + "d": 20 + }, + "mne/simulation/__init__.py": { + "a": 16, + "d": 5 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/simulation/metrics/metrics.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 15 + }, + "mne/source_space.py": { + "a": 10, + "d": 17 + }, + "mne/stats/__init__.py": { + "a": 31, + "d": 19 + }, + "mne/stats/_adjacency.py": { + "a": 1, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 6, + "d": 18 + }, + "mne/stats/parametric.py": { + "a": 7, + "d": 11 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 6 + }, + "mne/surface.py": { + "a": 6, + "d": 14 + }, + "mne/tests/test_import_nesting.py": { + "a": 12, + "d": 10 + }, + "mne/time_frequency/__init__.py": { + "a": 48, + "d": 29 + }, + "mne/time_frequency/_stft.py": { + "a": 2, + "d": 6 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 6 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 4, + "d": 8 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/spectrum.py": { + "a": 4, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 6 + }, + "mne/transforms.py": { + "a": 4, + "d": 11 + }, + "mne/utils/__init__.py": { + "a": 204, + "d": 211 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 17, + "d": 0 + }, + "mne/utils/linalg.py": { + "a": 3, + "d": 13 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 7 + }, + "mne/viz/__init__.py": { + "a": 102, + "d": 87 + }, + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 7 + }, + "mne/viz/_dipole.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 6, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 4, + "d": 2 + }, + "mne/viz/montage.py": { + "a": 5, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 5, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 38, + "d": 21 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 4 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "requirements_base.txt": { + "a": 1, + "d": 0 + }, + "tools/github_actions_env_vars.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11839.json b/doc/sphinxext/prs/11839.json new file mode 100644 index 00000000000..e385589d406 --- /dev/null +++ b/doc/sphinxext/prs/11839.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0d4550b77f2e546f37e69ba4c709cb71132b1848", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + ".git-blame-ignore-revs": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 413 + }, + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/evoked.py": { + "a": 3, + "d": 1 + }, + "mne/io/base.py": { + "a": 9, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 424, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1184.json b/doc/sphinxext/prs/1184.json new file mode 100644 index 00000000000..c1bf6d1e9e8 --- /dev/null +++ b/doc/sphinxext/prs/1184.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50fe6bb67d9a85d1d0c4579060b67c50a31c3ef7", + "authors": [ + { + "n": "Tanay Gahlot", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11841.json b/doc/sphinxext/prs/11841.json new file mode 100644 index 00000000000..88142ad8cff --- /dev/null +++ b/doc/sphinxext/prs/11841.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "269d06d57d2adb24449539e6d99e569464ea5d9b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11842.json b/doc/sphinxext/prs/11842.json new file mode 100644 index 00000000000..cc7403489a6 --- /dev/null +++ b/doc/sphinxext/prs/11842.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4f8295490c731d75b6763d710242c789e09c3ce2", + "authors": [ + { + "n": "Hamza Abdelhedi", + "e": "hamzaabdelhedi18@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11844.json b/doc/sphinxext/prs/11844.json new file mode 100644 index 00000000000..4c90a0fdd23 --- /dev/null +++ b/doc/sphinxext/prs/11844.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1bd2cbcce26c5dbbdc5eb5ab0c8c0f018ba4a893", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 5, + "d": 4 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 4, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 0 + }, + "requirements_testing.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11846.json b/doc/sphinxext/prs/11846.json new file mode 100644 index 00000000000..ffc1e507ec3 --- /dev/null +++ b/doc/sphinxext/prs/11846.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "64f384608b63ab6962d1e80c8c9ff1c2453d41b4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 28, + "d": 104 + }, + "mne/decoding/receptive_field.py": { + "a": 16, + "d": 6 + }, + "mne/decoding/search_light.py": { + "a": 36, + "d": 10 + }, + "mne/decoding/tests/test_base.py": { + "a": 24, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 39, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 25, + "d": 0 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 15, + "d": 3 + }, + "mne/fixes.py": { + "a": 1, + "d": 32 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11847.json b/doc/sphinxext/prs/11847.json new file mode 100644 index 00000000000..5a7e024f4e3 --- /dev/null +++ b/doc/sphinxext/prs/11847.json @@ -0,0 +1,219 @@ +{ + "merge_commit_sha": "b5a9a25d3fac7e4058b91efcadf6561b85aa45a3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_external.py": { + "a": 2, + "d": 4 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 5 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 4, + "d": 8 + }, + "mne/decoding/tests/test_base.py": { + "a": 4, + "d": 8 + }, + "mne/decoding/tests/test_csp.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/tests/test_ems.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 6, + "d": 7 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 2, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 6, + "d": 19 + }, + "mne/gui/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 3, + "d": 3 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 2, + "d": 11 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 8, + "d": 13 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_what.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/eyetracking/tests/test_pupillometry.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 29 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 2, + "d": 4 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 3, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/metrics/tests/test_metrics.py": { + "a": 5, + "d": 6 + }, + "mne/stats/tests/test_adjacency.py": { + "a": 2, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 4, + "d": 4 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 3, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 13, + "d": 18 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 6 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_morph.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 14, + "d": 16 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 3, + "d": 5 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 10, + "d": 14 + }, + "mne/utils/_testing.py": { + "a": 47, + "d": 32 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 2 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 7 + }, + "mne/viz/tests/test_scraper.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11849.json b/doc/sphinxext/prs/11849.json new file mode 100644 index 00000000000..e6cd30975d8 --- /dev/null +++ b/doc/sphinxext/prs/11849.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6e9ab0782890c5a3a6aac39159ccdb55a8e61223", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 6, + "d": 6 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 3, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 0 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11851.json b/doc/sphinxext/prs/11851.json new file mode 100644 index 00000000000..c01546cc730 --- /dev/null +++ b/doc/sphinxext/prs/11851.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2a39a3aa20eb52fa50fe40e99797f7dac3d99bf7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 16, + "d": 7 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 243, + "d": 249 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11852.json b/doc/sphinxext/prs/11852.json new file mode 100644 index 00000000000..50266e31f5a --- /dev/null +++ b/doc/sphinxext/prs/11852.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "23542f5f1c6ebcc52c50471a0dbede85cd04a7b2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 9, + "d": 0 + }, + "mne/source_space.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11853.json b/doc/sphinxext/prs/11853.json new file mode 100644 index 00000000000..6b9a953e43f --- /dev/null +++ b/doc/sphinxext/prs/11853.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "a6c07cb5cdf916ee345409ccc33c56db0043dfaf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/dipole.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 18, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 85, + "d": 73 + }, + "mne/utils/tests/test_mixin.py": { + "a": 42, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11855.json b/doc/sphinxext/prs/11855.json new file mode 100644 index 00000000000..ce7ec8d3678 --- /dev/null +++ b/doc/sphinxext/prs/11855.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6f9b03c3811a81f81853f6bef88d13f985fd40c6", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 9, + "d": 10 + }, + "mne/viz/_mpl_figure.py": { + "a": 14, + "d": 24 + }, + "mne/viz/tests/test_raw.py": { + "a": 83, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11857.json b/doc/sphinxext/prs/11857.json new file mode 100644 index 00000000000..12e7668bf50 --- /dev/null +++ b/doc/sphinxext/prs/11857.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "69fbf81a4594acc5b42c024e9c9ead954c9ab443", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 4, + "d": 4 + }, + "mne/viz/_scraper.py": { + "a": 4, + "d": 2 + }, + "tutorials/clinical/60_sleep.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11858.json b/doc/sphinxext/prs/11858.json new file mode 100644 index 00000000000..55bcc4f2792 --- /dev/null +++ b/doc/sphinxext/prs/11858.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bb6d542fdaa8efc85aa87255c886168bface24a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 38, + "d": 19 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11859.json b/doc/sphinxext/prs/11859.json new file mode 100644 index 00000000000..fb572f1234f --- /dev/null +++ b/doc/sphinxext/prs/11859.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "193e41093e8f9c888c1d56abbc86267200c15e53", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 65, + "d": 9 + }, + ".cirrus.yml": { + "a": 0, + "d": 44 + }, + "azure-pipelines.yml": { + "a": 11, + "d": 10 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tools/cirrus_dependencies.sh": { + "a": 0, + "d": 5 + }, + "tools/cirrus_install_python.sh": { + "a": 0, + "d": 6 + }, + "tools/get_testing_version.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11862.json b/doc/sphinxext/prs/11862.json new file mode 100644 index 00000000000..d74d53ec730 --- /dev/null +++ b/doc/sphinxext/prs/11862.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "34413d21ed2eae9bc6f411f44eb76ffd98e1b16f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11863.json b/doc/sphinxext/prs/11863.json new file mode 100644 index 00000000000..e4d8d3f4402 --- /dev/null +++ b/doc/sphinxext/prs/11863.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7ce2b2c9a7bf7d3fb4b98633fc89f3b5653d66d3", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "examples/visualization/sensor_noise_level.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11864.json b/doc/sphinxext/prs/11864.json new file mode 100644 index 00000000000..b0fdfadb994 --- /dev/null +++ b/doc/sphinxext/prs/11864.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "176b78c517e20c0b6b9ae98382395a91e78aa322", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/js/contrib-avatars.js": { + "a": 5, + "d": 3 + }, + "tools/generate_codemeta.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11865.json b/doc/sphinxext/prs/11865.json new file mode 100644 index 00000000000..797c7c46de2 --- /dev/null +++ b/doc/sphinxext/prs/11865.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "86dd11d988521abdae531a82a482c243eee28cca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 2 + }, + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + ".pre-commit-config.yaml": { + "a": 7, + "d": 0 + }, + ".yamllint.yml": { + "a": 16, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "codecov.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11867.json b/doc/sphinxext/prs/11867.json new file mode 100644 index 00000000000..2b9eba4859d --- /dev/null +++ b/doc/sphinxext/prs/11867.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7c4e27c65c05e2252005e827b9c13e5ad91077c3", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 3, + "d": 2 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11868.json b/doc/sphinxext/prs/11868.json new file mode 100644 index 00000000000..8394f9103e3 --- /dev/null +++ b/doc/sphinxext/prs/11868.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "12395f9d9cf6ea3c72b225b62e052dd0d17d9889", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 490, + "d": 490 + }, + ".github/workflows/tests.yml": { + "a": 27, + "d": 27 + }, + ".pre-commit-config.yaml": { + "a": 37, + "d": 37 + }, + ".yamllint.yml": { + "a": 0, + "d": 8 + }, + "azure-pipelines.yml": { + "a": 286, + "d": 286 + }, + "environment.yml": { + "a": 56, + "d": 56 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11869.json b/doc/sphinxext/prs/11869.json new file mode 100644 index 00000000000..e960d7a5bfa --- /dev/null +++ b/doc/sphinxext/prs/11869.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6e0f206595954c7815b2823a6dafb20c940586c9", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11872.json b/doc/sphinxext/prs/11872.json new file mode 100644 index 00000000000..23c8b59c334 --- /dev/null +++ b/doc/sphinxext/prs/11872.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "baf1ad576c54918c95182a047e0a23183e64374d", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "mne/io/eyelink/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eyetracking/calibration.py": { + "a": 2, + "d": 7 + }, + "mne/preprocessing/eyetracking/tests/test_calibration.py": { + "a": 11, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11875.json b/doc/sphinxext/prs/11875.json new file mode 100644 index 00000000000..d72d5ada602 --- /dev/null +++ b/doc/sphinxext/prs/11875.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "2188c25a1534535643f3083e0c01beca2bd13630", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_rap_music.py": { + "a": 4, + "d": 7 + }, + "mne/bem.py": { + "a": 2, + "d": 4 + }, + "mne/cov.py": { + "a": 2, + "d": 3 + }, + "mne/dipole.py": { + "a": 3, + "d": 7 + }, + "mne/forward/_field_interpolation.py": { + "a": 2, + "d": 3 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 3 + }, + "mne/io/proj.py": { + "a": 2, + "d": 3 + }, + "mne/label.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 3, + "d": 2 + }, + "mne/proj.py": { + "a": 6, + "d": 9 + }, + "mne/tests/test_cov.py": { + "a": 5, + "d": 7 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11876.json b/doc/sphinxext/prs/11876.json new file mode 100644 index 00000000000..9e416aed8a6 --- /dev/null +++ b/doc/sphinxext/prs/11876.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9e85b2ec8887d3c918c82654daf56cac67076c6b", + "authors": [ + { + "n": "Dmitrii Altukhov", + "e": "dm.altukhov@ya.ru" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 11, + "d": 24 + }, + "mne/io/utils.py": { + "a": 15, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 40, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11877.json b/doc/sphinxext/prs/11877.json new file mode 100644 index 00000000000..d7f848b4425 --- /dev/null +++ b/doc/sphinxext/prs/11877.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2e357a6f9131208a0e5f69f6ca161f76dbf20f54", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "mne/io/eyelink/eyelink.py": { + "a": 6, + "d": 6 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11878.json b/doc/sphinxext/prs/11878.json new file mode 100644 index 00000000000..212c53961b1 --- /dev/null +++ b/doc/sphinxext/prs/11878.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "522cf44e5327979ac734b99fb87792000aba3a04", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "mne/conftest.py": { + "a": 6, + "d": 2 + }, + "mne/viz/_mpl_figure.py": { + "a": 3, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11880.json b/doc/sphinxext/prs/11880.json new file mode 100644 index 00000000000..9dfa5cc2755 --- /dev/null +++ b/doc/sphinxext/prs/11880.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "039122abe7a8e744ac1e35a8a311a347351da47d", + "authors": [ + { + "n": "Dmitrii Altukhov", + "e": "dm.altukhov@ya.ru" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_epochs.py": { + "a": 111, + "d": 64 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11883.json b/doc/sphinxext/prs/11883.json new file mode 100644 index 00000000000..8d64efdda3e --- /dev/null +++ b/doc/sphinxext/prs/11883.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7c5ab0a9fb4c2e5f2fd3cfa05cef9d3ec05b94f8", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11884.json b/doc/sphinxext/prs/11884.json new file mode 100644 index 00000000000..ca84ee2b273 --- /dev/null +++ b/doc/sphinxext/prs/11884.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "75c83c3fa8be3a6f3a950dcd35ba222d460eb5b4", + "authors": [ + { + "n": "Dmitrii Altukhov", + "e": "dm.altukhov@ya.ru" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 100, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11886.json b/doc/sphinxext/prs/11886.json new file mode 100644 index 00000000000..9b85394e708 --- /dev/null +++ b/doc/sphinxext/prs/11886.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "4dd4db9647338dab31c1ca8847331502efcea3b7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 4, + "d": 0 + }, + "CITATION.cff": { + "a": 39, + "d": 20 + }, + "SECURITY.md": { + "a": 3, + "d": 3 + }, + "codemeta.json": { + "a": 102, + "d": 48 + }, + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/_static/versions.json": { + "a": 7, + "d": 2 + }, + "doc/changes/0.17.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.8.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/1.5.inc": { + "a": 30, + "d": 3 + }, + "doc/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11887.json b/doc/sphinxext/prs/11887.json new file mode 100644 index 00000000000..f497890aa74 --- /dev/null +++ b/doc/sphinxext/prs/11887.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "84da5551c77a05596dc35362f004f98375f1a8de", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 6, + "d": 6 + }, + "doc/changes/latest.inc": { + "a": 34, + "d": 0 + }, + "doc/changes/latest.inc.template": { + "a": 1, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 6, + "d": 6 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/io/eyelink/eyelink.py": { + "a": 12, + "d": 42 + }, + "mne/utils/__init__.py": { + "a": 0, + "d": 10 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 45 + }, + "mne/utils/check.py": { + "a": 0, + "d": 7 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 9 + }, + "mne/viz/tests/test_epochs.py": { + "a": 22, + "d": 25 + }, + "requirements_base.txt": { + "a": 3, + "d": 3 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11888.json b/doc/sphinxext/prs/11888.json new file mode 100644 index 00000000000..37994e78494 --- /dev/null +++ b/doc/sphinxext/prs/11888.json @@ -0,0 +1,243 @@ +{ + "merge_commit_sha": "921e02701b3c6fede08ba77cef321ee3a064de96", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/_static/versions.json": { + "a": 2, + "d": 37 + }, + "doc/changes/devel.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/devel.rst.template": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.1.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.10.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.11.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.12.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.13.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.14.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.15.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.16.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.17.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.18.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.19.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.2.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.20.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.21.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.22.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.23.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.24.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.3.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.4.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.5.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.6.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.7.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.8.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v0.9.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v1.0.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v1.1.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v1.2.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v1.3.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v1.4.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/v1.5.rst": { + "a": 0, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "doc/funding.rst": { + "a": 0, + "d": 2 + }, + "doc/glossary.rst": { + "a": 0, + "d": 2 + }, + "doc/install/advanced.rst": { + "a": 0, + "d": 2 + }, + "doc/install/check_installation.rst": { + "a": 0, + "d": 2 + }, + "doc/install/contributing.rst": { + "a": 0, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 0, + "d": 2 + }, + "doc/install/manual_install.rst": { + "a": 0, + "d": 2 + }, + "doc/install/manual_install_python.rst": { + "a": 0, + "d": 2 + }, + "doc/install/mne_c.rst": { + "a": 0, + "d": 2 + }, + "doc/install/mne_tools_suite.rst": { + "a": 0, + "d": 2 + }, + "doc/links.inc": { + "a": 9, + "d": 94 + }, + "doc/old_versions/index.rst": { + "a": 18, + "d": 0 + }, + "doc/overview/design_philosophy.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/faq.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/get_help.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/index.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/learn_python.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/matlab.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/people.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/roadmap.rst": { + "a": 0, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 36, + "d": 36 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 2 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11889.json b/doc/sphinxext/prs/11889.json new file mode 100644 index 00000000000..0864757f9c2 --- /dev/null +++ b/doc/sphinxext/prs/11889.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f48b4899b35940d0dc2ece525ca264e0a92658b4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11890.json b/doc/sphinxext/prs/11890.json new file mode 100644 index 00000000000..744406fcfd3 --- /dev/null +++ b/doc/sphinxext/prs/11890.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "6fe2e1322afb6ae5f6816f05d8f85dd78678e1aa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/devel.rst.template": { + "a": 1, + "d": 1 + }, + "doc/changes/v1.3.rst": { + "a": 0, + "d": 9 + }, + "mne/html_templates/report/html.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/report/section.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/report/slider.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/report/js_and_css/report.js": { + "a": 13, + "d": 2 + }, + "mne/report/report.py": { + "a": 156, + "d": 150 + }, + "mne/report/tests/test_report.py": { + "a": 70, + "d": 6 + }, + "tutorials/intro/70_report.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11891.json b/doc/sphinxext/prs/11891.json new file mode 100644 index 00000000000..1f2a5ff4829 --- /dev/null +++ b/doc/sphinxext/prs/11891.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "f3fac153135d6f2aa2ab53e08a7f443511550711", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 8, + "d": 0 + }, + "doc/visualization.rst": { + "a": 3, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 238, + "d": 137 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 13, + "d": 69 + }, + "mne/viz/_brain/callback.py": { + "a": 0, + "d": 118 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 60, + "d": 28 + }, + "mne/viz/backends/_abstract.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_ui_events.py": { + "a": 25, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ui_events.py": { + "a": 166, + "d": 42 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 55, + "d": 43 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11896.json b/doc/sphinxext/prs/11896.json new file mode 100644 index 00000000000..de588eab4ba --- /dev/null +++ b/doc/sphinxext/prs/11896.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9e0d1512beb9d51011e2c5a0dd1e0dba2ec292b6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 5 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11903.json b/doc/sphinxext/prs/11903.json new file mode 100644 index 00000000000..fe116224f8b --- /dev/null +++ b/doc/sphinxext/prs/11903.json @@ -0,0 +1,923 @@ +{ + "merge_commit_sha": "1e3206b28b4cc3cd9db05a022b7ea82cde4024e4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 9, + "d": 0 + }, + "doc/file_io.rst": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/mne_substitutions.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 5, + "d": 5 + }, + "mne/_fiff/__init__.py": { + "a": 25, + "d": 0 + }, + "mne/_fiff/_digitization.py": { + "a": 18, + "d": 47 + }, + "mne/_fiff/compensator.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/constants.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/ctf_comp.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/matrix.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/open.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/pick.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/proc_history.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/proj.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/reference.py": { + "a": 10, + "d": 5 + }, + "mne/_fiff/tag.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/tests/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/_fiff/tests/test_compensator.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 14, + "d": 36 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 15, + "d": 9 + }, + "mne/_fiff/tests/test_proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 5, + "d": 4 + }, + "mne/_fiff/tests/test_show_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_what.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/tests/test_write.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tree.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/utils.py": { + "a": 0, + "d": 0 + }, + "mne/_fiff/what.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/write.py": { + "a": 0, + "d": 0 + }, + "mne/_freesurfer.py": { + "a": 8, + "d": 3 + }, + "mne/annotations.py": { + "a": 7, + "d": 7 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 6, + "d": 6 + }, + "mne/channels/channels.py": { + "a": 11, + "d": 14 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/channels/layout.py": { + "a": 5, + "d": 5 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 5 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_layout.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 2, + "d": 2 + }, + "mne/chpi.py": { + "a": 9, + "d": 8 + }, + "mne/conftest.py": { + "a": 3, + "d": 1 + }, + "mne/coreg.py": { + "a": 4, + "d": 4 + }, + "mne/cov.py": { + "a": 28, + "d": 24 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 4, + "d": 4 + }, + "mne/epochs.py": { + "a": 12, + "d": 12 + }, + "mne/event.py": { + "a": 7, + "d": 7 + }, + "mne/evoked.py": { + "a": 8, + "d": 8 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 8, + "d": 2 + }, + "mne/filter.py": { + "a": 5, + "d": 6 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 5, + "d": 4 + }, + "mne/forward/forward.py": { + "a": 14, + "d": 10 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 6, + "d": 6 + }, + "mne/gui/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 74, + "d": 24 + }, + "mne/io/_constants.py": { + "a": 16, + "d": 0 + }, + "mne/io/_meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/io/_pick.py": { + "a": 27, + "d": 0 + }, + "mne/io/_proj.py": { + "a": 10, + "d": 0 + }, + "mne/io/_reference.py": { + "a": 10, + "d": 0 + }, + "mne/io/_tag.py": { + "a": 10, + "d": 0 + }, + "mne/io/_utils.py": { + "a": 10, + "d": 0 + }, + "mne/io/_write.py": { + "a": 10, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 5, + "d": 5 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 23, + "d": 9 + }, + "mne/io/besa/besa.py": { + "a": 1, + "d": 1 + }, + "mne/io/boxy/boxy.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 4 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 6, + "d": 4 + }, + "mne/io/bti/read.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 3, + "d": 4 + }, + "mne/io/cnt/cnt.py": { + "a": 5, + "d": 5 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/eeg.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/hc.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 4, + "d": 5 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/trans.py": { + "a": 2, + "d": 2 + }, + "mne/io/curry/curry.py": { + "a": 5, + "d": 5 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 2, + "d": 2 + }, + "mne/io/diff.py": { + "a": 0, + "d": 40 + }, + "mne/io/edf/edf.py": { + "a": 4, + "d": 4 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 5 + }, + "mne/io/egi/egi.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/egimff.py": { + "a": 4, + "d": 4 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 2 + }, + "mne/io/eyelink/eyelink.py": { + "a": 2, + "d": 2 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 2, + "d": 2 + }, + "mne/io/fieldtrip/utils.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 13, + "d": 14 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 3 + }, + "mne/io/fil/fil.py": { + "a": 5, + "d": 5 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 4 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 5, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/nedf.py": { + "a": 2, + "d": 2 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 4, + "d": 4 + }, + "mne/io/nihon/nihon.py": { + "a": 3, + "d": 3 + }, + "mne/io/nirx/nirx.py": { + "a": 3, + "d": 3 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/nsx/nsx.py": { + "a": 3, + "d": 3 + }, + "mne/io/nsx/tests/test_nsx.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/persyst.py": { + "a": 3, + "d": 3 + }, + "mne/io/snirf/_snirf.py": { + "a": 5, + "d": 5 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_deprecation.py": { + "a": 39, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 7, + "d": 8 + }, + "mne/minimum_norm/inverse.py": { + "a": 13, + "d": 9 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/morph_map.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_css.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/_regress.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/annotate_amplitude.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/hfc.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 13, + "d": 13 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/interpolate.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 9, + "d": 7 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_hfc.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 5, + "d": 5 + }, + "mne/rank.py": { + "a": 5, + "d": 7 + }, + "mne/report/report.py": { + "a": 3, + "d": 2 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/source_space.py": { + "a": 31, + "d": 20 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 20, + "d": 16 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 4, + "d": 4 + }, + "mne/utils/_testing.py": { + "a": 4, + "d": 4 + }, + "mne/utils/check.py": { + "a": 6, + "d": 6 + }, + "mne/utils/config.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 3 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 7, + "d": 8 + }, + "mne/viz/misc.py": { + "a": 9, + "d": 4 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11904.json b/doc/sphinxext/prs/11904.json new file mode 100644 index 00000000000..3b11c6efa4a --- /dev/null +++ b/doc/sphinxext/prs/11904.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0306101af9f1c9317d1a4cdca574aa6e8f3374d7", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/time_frequency/time_frequency_erds.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11905.json b/doc/sphinxext/prs/11905.json new file mode 100644 index 00000000000..1c7770b6a7a --- /dev/null +++ b/doc/sphinxext/prs/11905.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1ee327014b9389962b70331f41b7de979e01140b", + "authors": [ + { + "n": "Dmitrii Altukhov", + "e": "dm.altukhov@ya.ru" + } + ], + "changes": { + "mne/io/base.py": { + "a": 5, + "d": 4 + }, + "mne/io/utils.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11907.json b/doc/sphinxext/prs/11907.json new file mode 100644 index 00000000000..ee4188ee8e9 --- /dev/null +++ b/doc/sphinxext/prs/11907.json @@ -0,0 +1,471 @@ +{ + "merge_commit_sha": "1bed9ddbb8dd20af8c0072735c1f22062330ee8e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 1 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/ems_filtering.py": { + "a": 2, + "d": 6 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/multi_dipole_model.py": { + "a": 7, + "d": 6 + }, + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/rap_music.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/source_space_snr.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/trap_music.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 1, + "d": 3 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 1, + "d": 2 + }, + "examples/simulation/plot_stc_metrics.py": { + "a": 2, + "d": 2 + }, + "examples/stats/linear_regression_raw.py": { + "a": 6, + "d": 6 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/_fiff/reference.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 5, + "d": 5 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 5, + "d": 5 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 10, + "d": 15 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 8, + "d": 8 + }, + "mne/beamformer/tests/test_external.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 10, + "d": 10 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 3, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 19, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 1, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 13, + "d": 13 + }, + "mne/forward/tests/test_forward.py": { + "a": 4, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 13, + "d": 13 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 8, + "d": 8 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 23, + "d": 15 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 18, + "d": 18 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 2, + "d": 2 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 9, + "d": 13 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 8, + "d": 8 + }, + "mne/tests/test_dipole.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 13, + "d": 13 + }, + "mne/tests/test_event.py": { + "a": 7, + "d": 8 + }, + "mne/tests/test_evoked.py": { + "a": 9, + "d": 9 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_numerics.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_topo.py": { + "a": 8, + "d": 8 + }, + "mne/viz/tests/test_topomap.py": { + "a": 8, + "d": 8 + }, + "mne/viz/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/15_baseline_regression.py": { + "a": 2, + "d": 2 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 2, + "d": 2 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 1, + "d": 0 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 7, + "d": 8 + }, + "tutorials/simulation/80_dics.py": { + "a": 3, + "d": 6 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 6, + "d": 11 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 3, + "d": 2 + }, + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 1, + "d": 1 + }, + "tutorials/visualization/10_publication_figure.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11909.json b/doc/sphinxext/prs/11909.json new file mode 100644 index 00000000000..438dbae142f --- /dev/null +++ b/doc/sphinxext/prs/11909.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d64981c5f26cbc85affe0edf3ad66b13521d51e5", + "authors": [ + { + "n": "John Veillette", + "e": "johnv@uchicago.edu" + } + ], + "changes": { + "doc/install/mne_tools_suite.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1191.json b/doc/sphinxext/prs/1191.json new file mode 100644 index 00000000000..ea811dd6990 --- /dev/null +++ b/doc/sphinxext/prs/1191.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2ee88da94eab2add694775971ae11b742c5fe6fc", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/brainvision/brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11910.json b/doc/sphinxext/prs/11910.json new file mode 100644 index 00000000000..37df619ca03 --- /dev/null +++ b/doc/sphinxext/prs/11910.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4b9a778e8eb922d0b2437b818a6d039f2fe0a465", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 534, + "d": 5 + }, + "mne/io/eyelink/eyelink.py": { + "a": 18, + "d": 543 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 2, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/eyetracking/tests/test_pupillometry.py": { + "a": 1, + "d": 3 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 2, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11911.json b/doc/sphinxext/prs/11911.json new file mode 100644 index 00000000000..859e5aadb9f --- /dev/null +++ b/doc/sphinxext/prs/11911.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3e32781376427a053ea04a4b92052e53241b96ab", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11912.json b/doc/sphinxext/prs/11912.json new file mode 100644 index 00000000000..3ef5c54b7d3 --- /dev/null +++ b/doc/sphinxext/prs/11912.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "18aa019be069445a2bdb413a57089ea670b8446f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 4, + "d": 4 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 36, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 1, + "d": 1 + }, + "requirements_testing_extra.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11914.json b/doc/sphinxext/prs/11914.json new file mode 100644 index 00000000000..87ddb428577 --- /dev/null +++ b/doc/sphinxext/prs/11914.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "74ab43a0bc61b9dbdc7fbf821d0b5ea176077313", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/__init__.py": { + "a": 5, + "d": 13 + }, + "mne/io/constants.py": { + "a": 0, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 0 + }, + "mne/io/pick.py": { + "a": 0, + "d": 0 + }, + "mne/io/proj.py": { + "a": 0, + "d": 0 + }, + "mne/io/reference.py": { + "a": 0, + "d": 0 + }, + "mne/io/tag.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_deprecation.py": { + "a": 1, + "d": 1 + }, + "mne/io/utils.py": { + "a": 0, + "d": 0 + }, + "mne/io/write.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11915.json b/doc/sphinxext/prs/11915.json new file mode 100644 index 00000000000..9223f7573ac --- /dev/null +++ b/doc/sphinxext/prs/11915.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e8e672b63b3d3e5e84d8881a4f5aac5f4bd214e1", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11916.json b/doc/sphinxext/prs/11916.json new file mode 100644 index 00000000000..cd8e4c40ec2 --- /dev/null +++ b/doc/sphinxext/prs/11916.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3960fe8e28771f9d84cb5fcf2898be10ede089cd", + "authors": [ + { + "n": "Nikolai M Chapochnikov", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11917.json b/doc/sphinxext/prs/11917.json new file mode 100644 index 00000000000..c91376c3c13 --- /dev/null +++ b/doc/sphinxext/prs/11917.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ca83201eed98c1b6716542a9aa9e29684538f613", + "authors": [ + { + "n": "Nikolai M Chapochnikov", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 6, + "d": 4 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11918.json b/doc/sphinxext/prs/11918.json new file mode 100644 index 00000000000..a6e3f1a982e --- /dev/null +++ b/doc/sphinxext/prs/11918.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "28d461420084dc714aec67ab370865ac5aa28d3e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/file_io.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 12, + "d": 4 + }, + "mne/_fiff/pick.py": { + "a": 0, + "d": 30 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/ssd.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/_fiff_wrap.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_annotate_amplitude.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 6 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 7 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11919.json b/doc/sphinxext/prs/11919.json new file mode 100644 index 00000000000..aa5f3046b0a --- /dev/null +++ b/doc/sphinxext/prs/11919.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "22ffe6f7a5560cc6a196c69e196007b8bcb13bfe", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 3, + "d": 2 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/proj.py": { + "a": 65, + "d": 80 + }, + "mne/tests/test_proj.py": { + "a": 17, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1192.json b/doc/sphinxext/prs/1192.json new file mode 100644 index 00000000000..d1f2daffab4 --- /dev/null +++ b/doc/sphinxext/prs/1192.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "440b544a7e7178838bfb2c62521b9a28d9c57aaf", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 7, + "d": 1 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 110, + "d": 20 + }, + "mne/source_space.py": { + "a": 18, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 53, + "d": 12 + }, + "mne/tests/test_source_space.py": { + "a": 23, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11920.json b/doc/sphinxext/prs/11920.json new file mode 100644 index 00000000000..aa836243c28 --- /dev/null +++ b/doc/sphinxext/prs/11920.json @@ -0,0 +1,327 @@ +{ + "merge_commit_sha": "b33f3a83898823149e306901d94fd8de643ee4e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 4, + "d": 1 + }, + "mne/_fiff/__init__.py": { + "a": 5, + "d": 3 + }, + "mne/_fiff/meas_info.py": { + "a": 10, + "d": 9 + }, + "mne/_freesurfer.py": { + "a": 1, + "d": 4 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 1, + "d": 4 + }, + "mne/channels/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 43 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 5 + }, + "mne/channels/tests/test_montage.py": { + "a": 7, + "d": 2 + }, + "mne/chpi.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 4 + }, + "mne/dipole.py": { + "a": 2, + "d": 4 + }, + "mne/epochs.py": { + "a": 1, + "d": 6 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 14, + "d": 15 + }, + "mne/io/base.py": { + "a": 2, + "d": 5 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 2 + }, + "mne/io/eyelink/eyelink.py": { + "a": 1, + "d": 2 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 3, + "d": 5 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 4 + }, + "mne/io/kit/coreg.py": { + "a": 6, + "d": 7 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 2 + }, + "mne/label.py": { + "a": 5, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eyetracking/calibration.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 0, + "d": 2 + }, + "mne/report/report.py": { + "a": 8, + "d": 8 + }, + "mne/simulation/evoked.py": { + "a": 4, + "d": 6 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/source_space/__init__.py": { + "a": 54, + "d": 0 + }, + "mne/source_space/_source_space.py": { + "a": 28, + "d": 50 + }, + "mne/source_space/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 10, + "d": 4 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 12, + "d": 3 + }, + "mne/tests/test_import_nesting.py": { + "a": 218, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 18, + "d": 10 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 85 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 3, + "d": 6 + }, + "mne/utils/mixin.py": { + "a": 4, + "d": 23 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 10 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 5, + "d": 8 + }, + "mne/viz/_mpl_figure.py": { + "a": 9, + "d": 5 + }, + "mne/viz/backends/_utils.py": { + "a": 4, + "d": 8 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 12, + "d": 4 + }, + "mne/viz/misc.py": { + "a": 4, + "d": 3 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 17, + "d": 20 + }, + "mne/viz/utils.py": { + "a": 20, + "d": 3 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11921.json b/doc/sphinxext/prs/11921.json new file mode 100644 index 00000000000..c1bc04d5ee5 --- /dev/null +++ b/doc/sphinxext/prs/11921.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "beb52bdeac0a9c637099739f5c1f126a384c4564", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/simulation/10_array_objs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11923.json b/doc/sphinxext/prs/11923.json new file mode 100644 index 00000000000..2939a9aeb06 --- /dev/null +++ b/doc/sphinxext/prs/11923.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "6a701d39b24c337c59295b38d88fd7e5ebaa25ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 0, + "d": 8 + }, + "mne/viz/backends/_pyvista.py": { + "a": 24, + "d": 34 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 0 + }, + "tools/circleci_bash_env.sh": { + "a": 0, + "d": 1 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11925.json b/doc/sphinxext/prs/11925.json new file mode 100644 index 00000000000..08a0b51fe8e --- /dev/null +++ b/doc/sphinxext/prs/11925.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2d01672f77f7e5b124ddfbeecb3f745f99921609", + "authors": [ + { + "n": "Jack Zhang", + "e": "me@jackzhang.me" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 38, + "d": 14 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 69, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11926.json b/doc/sphinxext/prs/11926.json new file mode 100644 index 00000000000..a5f9a8e359b --- /dev/null +++ b/doc/sphinxext/prs/11926.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "e8baea60027e10423e86f0cf0ec9eaa50238fa47", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 26, + "d": 7 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 43, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "requirements_testing_extra.txt": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11927.json b/doc/sphinxext/prs/11927.json new file mode 100644 index 00000000000..a9ddaa0df75 --- /dev/null +++ b/doc/sphinxext/prs/11927.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4af14f493e28b0f28611ba62d971c1319577fa72", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 3, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11928.json b/doc/sphinxext/prs/11928.json new file mode 100644 index 00000000000..652f175b7e4 --- /dev/null +++ b/doc/sphinxext/prs/11928.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f44636f00666b8eb869417960926d01690ff4f42", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/preprocessing.rst": { + "a": 5, + "d": 0 + }, + "mne/_fiff/proj.py": { + "a": 4, + "d": 3 + }, + "mne/proj.py": { + "a": 5, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 14, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11929.json b/doc/sphinxext/prs/11929.json new file mode 100644 index 00000000000..f124ea131c4 --- /dev/null +++ b/doc/sphinxext/prs/11929.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d31ff7a7c3f4374bae3a1f31fea35f05be0673e2", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1193.json b/doc/sphinxext/prs/1193.json new file mode 100644 index 00000000000..8aff3930ab2 --- /dev/null +++ b/doc/sphinxext/prs/1193.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "330a39178cb21c24931701c84a0fd9fffa95cd4e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 25, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11930.json b/doc/sphinxext/prs/11930.json new file mode 100644 index 00000000000..be445aea069 --- /dev/null +++ b/doc/sphinxext/prs/11930.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9627f43bb2c03233827046bc06acdab0968d6610", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/cov.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11932.json b/doc/sphinxext/prs/11932.json new file mode 100644 index 00000000000..704d0d82275 --- /dev/null +++ b/doc/sphinxext/prs/11932.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e94f255675c9287e8f69ca08db20a1eb6ada8a69", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "mne/utils/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 46, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 9, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11934.json b/doc/sphinxext/prs/11934.json new file mode 100644 index 00000000000..62b512cec78 --- /dev/null +++ b/doc/sphinxext/prs/11934.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a969c28702e2c4041335632b784493fc5cf8d8a3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 6, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 22, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11935.json b/doc/sphinxext/prs/11935.json new file mode 100644 index 00000000000..ad3724d9899 --- /dev/null +++ b/doc/sphinxext/prs/11935.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "82b2e82e42c5167f30df371f3ca7e18b86a2313a", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11937.json b/doc/sphinxext/prs/11937.json new file mode 100644 index 00000000000..5e5215fe873 --- /dev/null +++ b/doc/sphinxext/prs/11937.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "4afa3f44ef389ef4d50fba4f17f3359e2733c5cc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 11, + "d": 0 + }, + "README.rst": { + "a": 7, + "d": 1 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 2, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 4, + "d": 5 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/_fetch.py": { + "a": 5, + "d": 6 + }, + "mne/html_templates/_templates.py": { + "a": 1, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/events.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/general.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 12, + "d": 7 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 12, + "d": 4 + }, + "mne/io/nedf/nedf.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 2, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 26, + "d": 42 + }, + "mne/utils/misc.py": { + "a": 10, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 6 + }, + "mne/utils/tests/test_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_scraper.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 3, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "requirements_base.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11939.json b/doc/sphinxext/prs/11939.json new file mode 100644 index 00000000000..7c259b73cfd --- /dev/null +++ b/doc/sphinxext/prs/11939.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "94aa16910d26264367d69472b52fb26cb08b29eb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11940.json b/doc/sphinxext/prs/11940.json new file mode 100644 index 00000000000..c114c45c510 --- /dev/null +++ b/doc/sphinxext/prs/11940.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "99acd07823dc7018a09ccc8f1fa58a9e7ab884c2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 10, + "d": 5 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 4, + "d": 1 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11942.json b/doc/sphinxext/prs/11942.json new file mode 100644 index 00000000000..73b52ac5f4e --- /dev/null +++ b/doc/sphinxext/prs/11942.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "d2883d577a151f57723d7380f9149abb4247165e", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/visualization.rst": { + "a": 4, + "d": 2 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/mne_helmet.py": { + "a": 3, + "d": 1 + }, + "mne/evoked.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 58, + "d": 126 + }, + "mne/viz/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 16, + "d": 120 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 10, + "d": 3 + }, + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 185, + "d": 0 + }, + "mne/viz/evoked_field.py": { + "a": 575, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 59, + "d": 0 + }, + "mne/viz/tests/test_ui_events.py": { + "a": 0, + "d": 2 + }, + "mne/viz/ui_events.py": { + "a": 34, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 2 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 30, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11944.json b/doc/sphinxext/prs/11944.json new file mode 100644 index 00000000000..f5e9b4264d9 --- /dev/null +++ b/doc/sphinxext/prs/11944.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7aec681b4e98e5245a25dd68ec95339239d22b15", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/manual_install.rst": { + "a": 10, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11946.json b/doc/sphinxext/prs/11946.json new file mode 100644 index 00000000000..6fad3e1f6ea --- /dev/null +++ b/doc/sphinxext/prs/11946.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d5e125fb6367377262c94c0bbab593b67a2bf5cd", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 9, + "d": 10 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11948.json b/doc/sphinxext/prs/11948.json new file mode 100644 index 00000000000..f1a5cd4ff33 --- /dev/null +++ b/doc/sphinxext/prs/11948.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c83f0eccf42ebd513b05e1b31a50023680759b3d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/v1.2.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11950.json b/doc/sphinxext/prs/11950.json new file mode 100644 index 00000000000..f6fdc8f8837 --- /dev/null +++ b/doc/sphinxext/prs/11950.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0363feed5ac8f1d68c9c90878e5ca370761acd5f", + "authors": [ + { + "n": "Qian Chu", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/realign.py": { + "a": 27, + "d": 9 + }, + "mne/preprocessing/tests/test_realign.py": { + "a": 102, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11951.json b/doc/sphinxext/prs/11951.json new file mode 100644 index 00000000000..8f8d4dbfdc3 --- /dev/null +++ b/doc/sphinxext/prs/11951.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7503c4e7dc5cbff23175be37d135308bf73448cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 1, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 34, + "d": 44 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 12, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 2 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11952.json b/doc/sphinxext/prs/11952.json new file mode 100644 index 00000000000..47d8e70bb2a --- /dev/null +++ b/doc/sphinxext/prs/11952.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9fd1869f64aefa6386cc1384821f7c524ef771c0", + "authors": [ + { + "n": "Paul ROUJANSKY", + "e": "paul.roujansky@bioserenity.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 18, + "d": 11 + }, + "mne/export/tests/test_export.py": { + "a": 13, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 54, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 39, + "d": 4 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11953.json b/doc/sphinxext/prs/11953.json new file mode 100644 index 00000000000..fea8af97bb5 --- /dev/null +++ b/doc/sphinxext/prs/11953.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3dd8899aaa8345d434f31cf0f0248d9c4eabfc5d", + "authors": [ + { + "n": "Dmitrii Altukhov", + "e": "dm.altukhov@ya.ru" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 140, + "d": 197 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 31, + "d": 6 + }, + "mne/io/nsx/tests/test_nsx.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11954.json b/doc/sphinxext/prs/11954.json new file mode 100644 index 00000000000..8bc9bed959f --- /dev/null +++ b/doc/sphinxext/prs/11954.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "68ff5de17e4ca3cfe5a9ef9c8cf2c2473ec1d880", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11955.json b/doc/sphinxext/prs/11955.json new file mode 100644 index 00000000000..3d92bbf92de --- /dev/null +++ b/doc/sphinxext/prs/11955.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "53636038c463cfb13c2fc9c7b072f14fc30930e6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 12 + }, + "doc/install/check_installation.rst": { + "a": 35, + "d": 20 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/morph_map.py": { + "a": 1, + "d": 1 + }, + "mne/source_space/_source_space.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 7, + "d": 14 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 5 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 28 + }, + "requirements.txt": { + "a": 2, + "d": 2 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11956.json b/doc/sphinxext/prs/11956.json new file mode 100644 index 00000000000..bf06ee6ae47 --- /dev/null +++ b/doc/sphinxext/prs/11956.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "db090a3157efc2b26778cdc314ab7ee41442cfe0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/overview/roadmap.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 3, + "d": 1 + }, + "mne/conftest.py": { + "a": 15, + "d": 3 + }, + "mne/utils/config.py": { + "a": 6, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 23, + "d": 8 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 7, + "d": 8 + }, + "mne/viz/backends/_notebook.py": { + "a": 49, + "d": 20 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + }, + "requirements.txt": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11957.json b/doc/sphinxext/prs/11957.json new file mode 100644 index 00000000000..07c6d1e2a44 --- /dev/null +++ b/doc/sphinxext/prs/11957.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "98a1f20ce45bc3475212b47746e1642e02bde73d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/mne_c.rst": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11958.json b/doc/sphinxext/prs/11958.json new file mode 100644 index 00000000000..470aec4459d --- /dev/null +++ b/doc/sphinxext/prs/11958.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c6779a83864d67835329aad0cba6e67d4d9d520f", + "authors": [ + { + "n": "Andy Gilbert", + "e": "adgilbert21@icloud.com" + }, + { + "n": "Andrew Gilbert", + "e": "andrew.gilbert@irhythmtech.com" + }, + { + "n": "paulroujansky", + "e": "paul@roujansky.eu" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11960.json b/doc/sphinxext/prs/11960.json new file mode 100644 index 00000000000..5e28432865b --- /dev/null +++ b/doc/sphinxext/prs/11960.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "80524afe333ece39f27f4639a32d186b1eb485b0", + "authors": [ + { + "n": "Paul ROUJANSKY", + "e": "paul.roujansky@bioserenity.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 6, + "d": 2 + }, + "mne/export/_edf.py": { + "a": 21, + "d": 10 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 27, + "d": 6 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 14, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11962.json b/doc/sphinxext/prs/11962.json new file mode 100644 index 00000000000..54dc4e168a3 --- /dev/null +++ b/doc/sphinxext/prs/11962.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "33d5db7296c3df407d013a44b1684bf448b432cd", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_notebook.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11964.json b/doc/sphinxext/prs/11964.json new file mode 100644 index 00000000000..e96338f4e39 --- /dev/null +++ b/doc/sphinxext/prs/11964.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5a135edb0323628ac38468f000a96cc4d75e24db", + "authors": [ + { + "n": "Paul ROUJANSKY", + "e": "paul.roujansky@bioserenity.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 1 + }, + "mne/annotations.py": { + "a": 18, + "d": 21 + }, + "mne/io/edf/edf.py": { + "a": 18, + "d": 13 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 20, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11965.json b/doc/sphinxext/prs/11965.json new file mode 100644 index 00000000000..f6d4526a14e --- /dev/null +++ b/doc/sphinxext/prs/11965.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1d7b2187516cb5f17f7c194e66e8546708d546c2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 0, + "d": 4 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 0, + "d": 7 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 1, + "d": 15 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11969.json b/doc/sphinxext/prs/11969.json new file mode 100644 index 00000000000..ae10e280c34 --- /dev/null +++ b/doc/sphinxext/prs/11969.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "e2b3c03bf1b55863d5653ff091f3784e9ab34121", + "authors": [ + { + "n": "Kristijan Armeni", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 4, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/io/neuralynx/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 199, + "d": 0 + }, + "mne/io/neuralynx/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 138, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11970.json b/doc/sphinxext/prs/11970.json new file mode 100644 index 00000000000..607a766d562 --- /dev/null +++ b/doc/sphinxext/prs/11970.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0d063a7c80c9e87dbf7d5ae537bc3c93b5c2229d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 34, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11971.json b/doc/sphinxext/prs/11971.json new file mode 100644 index 00000000000..d5d54d34af2 --- /dev/null +++ b/doc/sphinxext/prs/11971.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d1c26aa338b8f988263ba84e1552d5ae9cc9774c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".mailmap": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11972.json b/doc/sphinxext/prs/11972.json new file mode 100644 index 00000000000..8c94784793f --- /dev/null +++ b/doc/sphinxext/prs/11972.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "14ac07df6f5cb8f070a019a758e9283798fa0554", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11974.json b/doc/sphinxext/prs/11974.json new file mode 100644 index 00000000000..515cf9ae35d --- /dev/null +++ b/doc/sphinxext/prs/11974.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "82c8b85f8c13e8a5db4a075f7b027b2f455c3994", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11975.json b/doc/sphinxext/prs/11975.json new file mode 100644 index 00000000000..3abb9e0f75a --- /dev/null +++ b/doc/sphinxext/prs/11975.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6bb318d43ae3d556f2bc637168bf2f97bdb93f75", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/check_steering_committee.py": { + "a": 58, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11976.json b/doc/sphinxext/prs/11976.json new file mode 100644 index 00000000000..2192b4f5595 --- /dev/null +++ b/doc/sphinxext/prs/11976.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "48c6d7617694364cfa0882e706afe66cfefe7609", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 37, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11978.json b/doc/sphinxext/prs/11978.json new file mode 100644 index 00000000000..d572ec449ed --- /dev/null +++ b/doc/sphinxext/prs/11978.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "89d10ffa8519fd60fcfae460258113b5a14b425e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 10, + "d": 2 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11979.json b/doc/sphinxext/prs/11979.json new file mode 100644 index 00000000000..07d2613cdbd --- /dev/null +++ b/doc/sphinxext/prs/11979.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5935f407f24556ae2c30fd407b4296d6b146b926", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 17, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11980.json b/doc/sphinxext/prs/11980.json new file mode 100644 index 00000000000..604962ee4ef --- /dev/null +++ b/doc/sphinxext/prs/11980.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cc74f7329d576bddaa5ec5f767efab5985c819a8", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/codeql-analysis.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11981.json b/doc/sphinxext/prs/11981.json new file mode 100644 index 00000000000..6aec5b6e841 --- /dev/null +++ b/doc/sphinxext/prs/11981.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dbc3aaa8e4d672518953eaf26cae16f374b83eef", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11982.json b/doc/sphinxext/prs/11982.json new file mode 100644 index 00000000000..5a520972519 --- /dev/null +++ b/doc/sphinxext/prs/11982.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "201f1efa945243727a826f7b15f30d57a80038e6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11983.json b/doc/sphinxext/prs/11983.json new file mode 100644 index 00000000000..98dc144adbc --- /dev/null +++ b/doc/sphinxext/prs/11983.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2e3f8058cd3e609d36949ba005b15b8c597cd5b1", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11987.json b/doc/sphinxext/prs/11987.json new file mode 100644 index 00000000000..a1d20e10c9c --- /dev/null +++ b/doc/sphinxext/prs/11987.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "db3a7924beea9ab075b4aacbd2db4ce36588f446", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 25, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11988.json b/doc/sphinxext/prs/11988.json new file mode 100644 index 00000000000..d53cf983137 --- /dev/null +++ b/doc/sphinxext/prs/11988.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1dfcac7ff7d528a8e4b78ef82b8693a454fce679", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11989.json b/doc/sphinxext/prs/11989.json new file mode 100644 index 00000000000..98949308aed --- /dev/null +++ b/doc/sphinxext/prs/11989.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cef0d3cb77dcb38aee2076fea97bdadb48267150", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1199.json b/doc/sphinxext/prs/1199.json new file mode 100644 index 00000000000..c5a6daf7df4 --- /dev/null +++ b/doc/sphinxext/prs/1199.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "efd5b45a84e4d3147debc77cc8762a5ed3234369", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/fiff/meas_info.py": { + "a": 6, + "d": 5 + }, + "mne/fiff/tests/test_meas_info.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11993.json b/doc/sphinxext/prs/11993.json new file mode 100644 index 00000000000..70364910930 --- /dev/null +++ b/doc/sphinxext/prs/11993.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aae0122fcea3acaba1cd7b11ce3b838a366cda28", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 16, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11994.json b/doc/sphinxext/prs/11994.json new file mode 100644 index 00000000000..4963c59cdc3 --- /dev/null +++ b/doc/sphinxext/prs/11994.json @@ -0,0 +1,227 @@ +{ + "merge_commit_sha": "ba127f705a0046cd30d9fb4bfcc610cf1d66bbfd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/evoked_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 3 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 9, + "d": 9 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/source_space/_source_space.py": { + "a": 3, + "d": 3 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_adjacency.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 9, + "d": 9 + }, + "mne/tests/test_morph.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 5, + "d": 5 + }, + "mne/utils/check.py": { + "a": 14, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_figure.py": { + "a": 6, + "d": 6 + }, + "mne/viz/_mpl_figure.py": { + "a": 7, + "d": 7 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 10, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11996.json b/doc/sphinxext/prs/11996.json new file mode 100644 index 00000000000..b0dbdab70d4 --- /dev/null +++ b/doc/sphinxext/prs/11996.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5bc2190dfb07baaca2c688737191d2ef447b557f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/check.py": { + "a": 13, + "d": 12 + }, + "tools/azure_dependencies.sh": { + "a": 15, + "d": 11 + }, + "tools/github_actions_dependencies.sh": { + "a": 19, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11997.json b/doc/sphinxext/prs/11997.json new file mode 100644 index 00000000000..5706f5215e3 --- /dev/null +++ b/doc/sphinxext/prs/11997.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e6afe5fb64c46df50256635b74cf583d5cc92727", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tools/dev_reports/Makefile": { + "a": 16, + "d": 0 + }, + "tools/dev_reports/check_steering_committee.py": { + "a": 0, + "d": 0 + }, + "tools/dev_reports/unacknowledged-bug-reports.jq": { + "a": 32, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/11999.json b/doc/sphinxext/prs/11999.json new file mode 100644 index 00000000000..45ce3de4d28 --- /dev/null +++ b/doc/sphinxext/prs/11999.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "966bdcca9a6109fc771b69e08515ee4d39ff38cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "examples/io/elekta_epochs.py": { + "a": 2, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 13, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 0, + "d": 1 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12.json b/doc/sphinxext/prs/12.json new file mode 100644 index 00000000000..996e71426b8 --- /dev/null +++ b/doc/sphinxext/prs/12.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4073148134e4941d13fae7a8ac992bd09f27091d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/artifacts/ecg.py": { + "a": 30, + "d": 13 + }, + "mne/artifacts/tests/test_ecg.py": { + "a": 3, + "d": 3 + }, + "mne/epochs.py": { + "a": 22, + "d": 11 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12000.json b/doc/sphinxext/prs/12000.json new file mode 100644 index 00000000000..b8c0dc74994 --- /dev/null +++ b/doc/sphinxext/prs/12000.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "77ba263489b86ee7ad8c8bf02c8e6cb10e0c74d8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 3, + "d": 0 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 65, + "d": 50 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 87, + "d": 22 + }, + "mne/viz/_brain/view.py": { + "a": 33, + "d": 24 + }, + "mne/viz/backends/_abstract.py": { + "a": 9, + "d": 9 + }, + "mne/viz/backends/_pyvista.py": { + "a": 62, + "d": 65 + }, + "mne/viz/backends/renderer.py": { + "a": 3, + "d": 2 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12001.json b/doc/sphinxext/prs/12001.json new file mode 100644 index 00000000000..e5683ef29e3 --- /dev/null +++ b/doc/sphinxext/prs/12001.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "37ae7e37354fddc5eced35aa96973e3f944c9780", + "authors": [ + { + "n": "Hamza Abdelhedi", + "e": "hamzaabdelhedi18@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 15, + "d": 11 + }, + "mne/tests/test_source_estimate.py": { + "a": 38, + "d": 8 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12002.json b/doc/sphinxext/prs/12002.json new file mode 100644 index 00000000000..80f6d645233 --- /dev/null +++ b/doc/sphinxext/prs/12002.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "59b31604e7e106e74455e7b6fbc4282b74fbe933", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12003.json b/doc/sphinxext/prs/12003.json new file mode 100644 index 00000000000..58d40c79aca --- /dev/null +++ b/doc/sphinxext/prs/12003.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9f19bd694ce131b95535dd706c4d856333caaf66", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 20, + "d": 13 + }, + "mne/io/eyelink/eyelink.py": { + "a": 12, + "d": 49 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 29, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 50, + "d": 0 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12004.json b/doc/sphinxext/prs/12004.json new file mode 100644 index 00000000000..4d4e0502bdb --- /dev/null +++ b/doc/sphinxext/prs/12004.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9de0ef64d8964ab97d40addc33d6965249df262a", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/transforms.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12005.json b/doc/sphinxext/prs/12005.json new file mode 100644 index 00000000000..196fe89cd32 --- /dev/null +++ b/doc/sphinxext/prs/12005.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "034e9dce29a5754c7134aa4753ca81149ff3f55c", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/evoked_field.py": { + "a": 2, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12006.json b/doc/sphinxext/prs/12006.json new file mode 100644 index 00000000000..92efcac59be --- /dev/null +++ b/doc/sphinxext/prs/12006.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d79606beef83e0b77512668ceb4945364113efbc", + "authors": [ + { + "n": "Gonzalo Reina", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 12, + "d": 1 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 11, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12008.json b/doc/sphinxext/prs/12008.json new file mode 100644 index 00000000000..c87587da84a --- /dev/null +++ b/doc/sphinxext/prs/12008.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8ee60d0db3001b855dd827738baff09aab10c627", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 1 + }, + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "tools/circleci_download.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12009.json b/doc/sphinxext/prs/12009.json new file mode 100644 index 00000000000..d43b99b81ac --- /dev/null +++ b/doc/sphinxext/prs/12009.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "7a91c470be872698dd21b5c76ab26c1bc3f4f10c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 4, + "d": 0 + }, + "mne/fixes.py": { + "a": 0, + "d": 20 + }, + "mne/transforms.py": { + "a": 7, + "d": 11 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 0 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 0, + "d": 3 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 2, + "d": 3 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12010.json b/doc/sphinxext/prs/12010.json new file mode 100644 index 00000000000..48f39040616 --- /dev/null +++ b/doc/sphinxext/prs/12010.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b9e907667531ecee36b1e7fa79d2c546a14eaf28", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12013.json b/doc/sphinxext/prs/12013.json new file mode 100644 index 00000000000..cf24043297f --- /dev/null +++ b/doc/sphinxext/prs/12013.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "079c868240a898204bf82b2f1bf0e04cdee75da1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 10, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12014.json b/doc/sphinxext/prs/12014.json new file mode 100644 index 00000000000..4fc7272ec32 --- /dev/null +++ b/doc/sphinxext/prs/12014.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "9ed326f4124058bd6a61790964b4f84456a49e52", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "nordme", + "e": "nordme@uw.edu" + }, + { + "n": "nordme", + "e": "38704848+nordme@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 79, + "d": 0 + }, + "mne/channels/tests/test_unify_bads.py": { + "a": 53, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12016.json b/doc/sphinxext/prs/12016.json new file mode 100644 index 00000000000..7e099f263e3 --- /dev/null +++ b/doc/sphinxext/prs/12016.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5c4166f43c9358fd7aa3d960686d78c1bbcd7513", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12017.json b/doc/sphinxext/prs/12017.json new file mode 100644 index 00000000000..b3d13221fff --- /dev/null +++ b/doc/sphinxext/prs/12017.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "de86932602782c75166e0d3ff4270a824ce4d655", + "authors": [ + { + "n": "Paul ROUJANSKY", + "e": "paul.roujansky@bioserenity.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12019.json b/doc/sphinxext/prs/12019.json new file mode 100644 index 00000000000..3b75d9e5625 --- /dev/null +++ b/doc/sphinxext/prs/12019.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e5a9db2933b2a309b466097c6988ab8117cb6e19", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/pick.py": { + "a": 10, + "d": 7 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12020.json b/doc/sphinxext/prs/12020.json new file mode 100644 index 00000000000..90e308fe4a1 --- /dev/null +++ b/doc/sphinxext/prs/12020.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "e99ea74de08bd19559bdcad754175b8d4b1f831a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/bem_model.rst": { + "a": 3, + "d": 2 + }, + "doc/_includes/forward.rst": { + "a": 1, + "d": 1 + }, + "doc/_includes/ssp.rst": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 30, + "d": 19 + }, + "mne/cov.py": { + "a": 5, + "d": 7 + }, + "mne/forward/_make_forward.py": { + "a": 6, + "d": 4 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 5 + }, + "mne/source_space/_source_space.py": { + "a": 25, + "d": 17 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 8 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 7, + "d": 10 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12021.json b/doc/sphinxext/prs/12021.json new file mode 100644 index 00000000000..90851cad829 --- /dev/null +++ b/doc/sphinxext/prs/12021.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2138c173cee05651cab808526233037eaa4d8b94", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12023.json b/doc/sphinxext/prs/12023.json new file mode 100644 index 00000000000..84bf5fb35c7 --- /dev/null +++ b/doc/sphinxext/prs/12023.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "927f88daad73559d090f2016c3827f62ab7170a8", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 17, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12025.json b/doc/sphinxext/prs/12025.json new file mode 100644 index 00000000000..7d1b2498618 --- /dev/null +++ b/doc/sphinxext/prs/12025.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "578f2a9c56f41329be375669d0c79600cc57b6ad", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 10, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 14, + "d": 15 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 65, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12026.json b/doc/sphinxext/prs/12026.json new file mode 100644 index 00000000000..11fbfdcd202 --- /dev/null +++ b/doc/sphinxext/prs/12026.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "e4a6eba5d0a4c5f92b904207c33252f1979043db", + "authors": [ + { + "n": "nordme", + "e": "nordme@uw.edu" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 45, + "d": 3 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 140, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 203, + "d": 18 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12027.json b/doc/sphinxext/prs/12027.json new file mode 100644 index 00000000000..53f0a34dc8d --- /dev/null +++ b/doc/sphinxext/prs/12027.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9f31cf35945a3bb8924208a816072d541443a9eb", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 44, + "d": 9 + }, + "mne/channels/interpolation.py": { + "a": 17, + "d": 3 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12029.json b/doc/sphinxext/prs/12029.json new file mode 100644 index 00000000000..a10644cb545 --- /dev/null +++ b/doc/sphinxext/prs/12029.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "04e05d4d323b26af483dde38664cb817f7df8e8a", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12032.json b/doc/sphinxext/prs/12032.json new file mode 100644 index 00000000000..3aaf6b8a2d4 --- /dev/null +++ b/doc/sphinxext/prs/12032.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3eaf3c1c7f98b75c7f8ea52cc707faf18ce2c01c", + "authors": [ + { + "n": "Kristijan Armeni", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12033.json b/doc/sphinxext/prs/12033.json new file mode 100644 index 00000000000..64d3fc7309a --- /dev/null +++ b/doc/sphinxext/prs/12033.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1def4bf8a84b09d465ffd613984ee2badd364a3c", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "tutorials/forward/20_source_alignment.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/30_forward.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12035.json b/doc/sphinxext/prs/12035.json new file mode 100644 index 00000000000..2ea2612a994 --- /dev/null +++ b/doc/sphinxext/prs/12035.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d52a0d3cfc26c43cb5e1c960b5441bd887ef4a54", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/time_frequency_erds.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12036.json b/doc/sphinxext/prs/12036.json new file mode 100644 index 00000000000..67822ccb14f --- /dev/null +++ b/doc/sphinxext/prs/12036.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bd4d1d6df252e499f52598733da15d0757bce173", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 3, + "d": 0 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 8, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 39, + "d": 8 + }, + "mne/forward/tests/test_forward.py": { + "a": 9, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12037.json b/doc/sphinxext/prs/12037.json new file mode 100644 index 00000000000..824ffcf5512 --- /dev/null +++ b/doc/sphinxext/prs/12037.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9f22adbcb2dd478a8d618b8479e5765eaad49c9e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 51, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12038.json b/doc/sphinxext/prs/12038.json new file mode 100644 index 00000000000..2a550f672f8 --- /dev/null +++ b/doc/sphinxext/prs/12038.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3c3ec57feebbdddb826535fe16768db787519bae", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 10, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12039.json b/doc/sphinxext/prs/12039.json new file mode 100644 index 00000000000..88be4474f00 --- /dev/null +++ b/doc/sphinxext/prs/12039.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "ff6bad289da40e08b82e4db010815c607a09d2a7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 51, + "d": 23 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 29, + "d": 7 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 4 + }, + "mne/io/artemis123/artemis123.py": { + "a": 7, + "d": 7 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_annotate_nan.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1204.json b/doc/sphinxext/prs/1204.json new file mode 100644 index 00000000000..a0ccf08588a --- /dev/null +++ b/doc/sphinxext/prs/1204.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d8343a4f40db1adcfcee4fb53970797b5dc68b74", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 24, + "d": 0 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12040.json b/doc/sphinxext/prs/12040.json new file mode 100644 index 00000000000..48d651ccd82 --- /dev/null +++ b/doc/sphinxext/prs/12040.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "110947f35abc93fd88f4e6b72f9e573dc25b355f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/dics_epochs.py": { + "a": 2, + "d": 7 + }, + "tutorials/intro/70_report.py": { + "a": 14, + "d": 33 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12041.json b/doc/sphinxext/prs/12041.json new file mode 100644 index 00000000000..c1e8020ba62 --- /dev/null +++ b/doc/sphinxext/prs/12041.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "56587f33e2cd3fd253a29784cbe1781235aa0447", + "authors": [ + { + "n": "Maksym Balatsko", + "e": "mbalatsko@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12042.json b/doc/sphinxext/prs/12042.json new file mode 100644 index 00000000000..95336a8a9e6 --- /dev/null +++ b/doc/sphinxext/prs/12042.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e4afb0aa211ca2990ead7f4bd93adbf99ad007ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 0, + "d": 2 + }, + "doc/install/advanced.rst": { + "a": 8, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12043.json b/doc/sphinxext/prs/12043.json new file mode 100644 index 00000000000..993e6015d2c --- /dev/null +++ b/doc/sphinxext/prs/12043.json @@ -0,0 +1,219 @@ +{ + "merge_commit_sha": "c6863e5470a4ea312f0d7762d1ba4e9139d63785", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/flow_diagram.svg": { + "a": 0, + "d": 0 + }, + "doc/api/connectivity.rst": { + "a": 0, + "d": 0 + }, + "doc/api/covariance.rst": { + "a": 0, + "d": 0 + }, + "doc/api/creating_from_arrays.rst": { + "a": 0, + "d": 0 + }, + "doc/api/datasets.rst": { + "a": 0, + "d": 0 + }, + "doc/api/decoding.rst": { + "a": 0, + "d": 0 + }, + "doc/api/events.rst": { + "a": 0, + "d": 0 + }, + "doc/api/export.rst": { + "a": 0, + "d": 0 + }, + "doc/api/file_io.rst": { + "a": 0, + "d": 0 + }, + "doc/api/forward.rst": { + "a": 0, + "d": 0 + }, + "doc/api/inverse.rst": { + "a": 0, + "d": 0 + }, + "doc/api/logging.rst": { + "a": 0, + "d": 0 + }, + "doc/api/most_used_classes.rst": { + "a": 0, + "d": 0 + }, + "doc/api/mri.rst": { + "a": 0, + "d": 0 + }, + "doc/api/preprocessing.rst": { + "a": 0, + "d": 0 + }, + "doc/api/python_reference.rst": { + "a": 0, + "d": 0 + }, + "doc/api/reading_raw_data.rst": { + "a": 0, + "d": 0 + }, + "doc/api/realtime.rst": { + "a": 0, + "d": 0 + }, + "doc/api/report.rst": { + "a": 0, + "d": 0 + }, + "doc/api/sensor_space.rst": { + "a": 0, + "d": 0 + }, + "doc/api/simulation.rst": { + "a": 0, + "d": 0 + }, + "doc/api/source_space.rst": { + "a": 0, + "d": 0 + }, + "doc/api/statistics.rst": { + "a": 0, + "d": 0 + }, + "doc/api/time_frequency.rst": { + "a": 0, + "d": 0 + }, + "doc/api/visualization.rst": { + "a": 0, + "d": 0 + }, + "doc/conf.py": { + "a": 64, + "d": 22 + }, + "doc/development/contributing.rst": { + "a": 0, + "d": 0 + }, + "doc/development/governance.rst": { + "a": 0, + "d": 0 + }, + "doc/development/index.rst": { + "a": 2, + "d": 2 + }, + "doc/development/roadmap.rst": { + "a": 0, + "d": 0 + }, + "doc/development/whats_new.rst": { + "a": 42, + "d": 0 + }, + "doc/documentation/cite.rst": { + "a": 0, + "d": 0 + }, + "doc/documentation/cited.rst": { + "a": 0, + "d": 0 + }, + "doc/documentation/cookbook.rst": { + "a": 1, + "d": 1 + }, + "doc/documentation/datasets.rst": { + "a": 0, + "d": 0 + }, + "doc/documentation/design_philosophy.rst": { + "a": 0, + "d": 0 + }, + "doc/documentation/glossary.rst": { + "a": 0, + "d": 0 + }, + "doc/documentation/implementation.rst": { + "a": 0, + "d": 0 + }, + "doc/documentation/index.rst": { + "a": 8, + "d": 8 + }, + "doc/help/faq.rst": { + "a": 0, + "d": 0 + }, + "doc/help/index.rst": { + "a": 0, + "d": 0 + }, + "doc/help/learn_python.rst": { + "a": 0, + "d": 0 + }, + "doc/help/migrating.rst": { + "a": 0, + "d": 0 + }, + "doc/index.rst": { + "a": 4, + "d": 4 + }, + "doc/install/advanced.rst": { + "a": 0, + "d": 41 + }, + "doc/install/manual_install_python.rst": { + "a": 13, + "d": 141 + }, + "doc/install/mne_tools_suite.rst": { + "a": 3, + "d": 4 + }, + "doc/links.inc": { + "a": 2, + "d": 1 + }, + "doc/overview/images/Digitizer-example.png": { + "a": 0, + "d": 0 + }, + "doc/overview/matlab.rst": { + "a": 0, + "d": 1218 + }, + "doc/whats_new.rst": { + "a": 0, + "d": 42 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12044.json b/doc/sphinxext/prs/12044.json new file mode 100644 index 00000000000..c52829e79fe --- /dev/null +++ b/doc/sphinxext/prs/12044.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fd08b5273ed2e2c02cde409252e10b3b580f5f37", + "authors": [ + { + "n": "Paul ROUJANSKY", + "e": "paul.roujansky@bioserenity.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 16, + "d": 17 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 76, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12046.json b/doc/sphinxext/prs/12046.json new file mode 100644 index 00000000000..9a34b658b2b --- /dev/null +++ b/doc/sphinxext/prs/12046.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d585ff4ad2672fce92b629da681b2b5057a85888", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12050.json b/doc/sphinxext/prs/12050.json new file mode 100644 index 00000000000..28a61b2c0c3 --- /dev/null +++ b/doc/sphinxext/prs/12050.json @@ -0,0 +1,303 @@ +{ + "merge_commit_sha": "fdaeb86206d334242bcbcfd09460c970b33e4dcd", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 2, + "d": 4 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 1, + "d": 2 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 5, + "d": 4 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 5, + "d": 11 + }, + "examples/inverse/label_source_activations.py": { + "a": 2, + "d": 4 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/source_space_snr.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 2, + "d": 5 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/eog_regression.py": { + "a": 3, + "d": 4 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 0, + "d": 3 + }, + "examples/simulation/plot_stc_metrics.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 8, + "d": 17 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 0, + "d": 1 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 5, + "d": 12 + }, + "examples/visualization/3d_to_2d.py": { + "a": 1, + "d": 2 + }, + "examples/visualization/evoked_topomap.py": { + "a": 1, + "d": 3 + }, + "mne/conftest.py": { + "a": 25, + "d": 13 + }, + "mne/preprocessing/eyetracking/calibration.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 0, + "d": 1 + }, + "mne/report/report.py": { + "a": 6, + "d": 24 + }, + "mne/time_frequency/spectrum.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 20, + "d": 18 + }, + "mne/viz/_3d.py": { + "a": 14, + "d": 12 + }, + "mne/viz/__init__.pyi": { + "a": 0, + "d": 2 + }, + "mne/viz/_dipole.py": { + "a": 1, + "d": 3 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 11 + }, + "mne/viz/_mpl_figure.py": { + "a": 23, + "d": 6 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 2, + "d": 13 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 3, + "d": 0 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 11 + }, + "mne/viz/evoked.py": { + "a": 36, + "d": 39 + }, + "mne/viz/ica.py": { + "a": 8, + "d": 16 + }, + "mne/viz/misc.py": { + "a": 24, + "d": 27 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 8 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 38, + "d": 68 + }, + "mne/viz/utils.py": { + "a": 16, + "d": 143 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "requirements_base.txt": { + "a": 1, + "d": 1 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 2, + "d": 5 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 1, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 1, + "d": 2 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 20, + "d": 20 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 3, + "d": 7 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 0, + "d": 3 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 3, + "d": 2 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 1, + "d": 4 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 8, + "d": 7 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 6, + "d": 6 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 0, + "d": 1 + }, + "tutorials/simulation/80_dics.py": { + "a": 1, + "d": 2 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 6, + "d": 11 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 6, + "d": 11 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 1, + "d": 2 + }, + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 3, + "d": 7 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 4, + "d": 9 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12052.json b/doc/sphinxext/prs/12052.json new file mode 100644 index 00000000000..0d4a7bfaf32 --- /dev/null +++ b/doc/sphinxext/prs/12052.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "40da6d9123debed6eaf8879c96c604b80c2f9f6e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/evoked_field.py": { + "a": 1, + "d": 1 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12053.json b/doc/sphinxext/prs/12053.json new file mode 100644 index 00000000000..351fbab88ed --- /dev/null +++ b/doc/sphinxext/prs/12053.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "de3b5a00e3883c010863642c57818d8cb862ca45", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12056.json b/doc/sphinxext/prs/12056.json new file mode 100644 index 00000000000..014bd1bd6c0 --- /dev/null +++ b/doc/sphinxext/prs/12056.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7c1429403e4beaa0dadb369512e2d5b1b1ed79fc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/js/set_installer_tab.js": { + "a": 32, + "d": 0 + }, + "doc/_static/js/update_installer_version.js": { + "a": 61, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 4, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12057.json b/doc/sphinxext/prs/12057.json new file mode 100644 index 00000000000..1e657584766 --- /dev/null +++ b/doc/sphinxext/prs/12057.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "cedefa0751adbf6d2be4531239019ef50302a8af", + "authors": [ + { + "n": "Santeri Ruuskanen", + "e": null + }, + { + "n": "Santeri Ruuskanen", + "e": "santeri.ruuskanen@aalto.fi" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 73, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 62, + "d": 8 + }, + "mne/viz/ui_events.py": { + "a": 15, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 32, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12060.json b/doc/sphinxext/prs/12060.json new file mode 100644 index 00000000000..16248e58036 --- /dev/null +++ b/doc/sphinxext/prs/12060.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "b069d4abd7bd7b39973c9fcbf0d9ada02b5e4b19", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_ola.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 2 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 90, + "d": 3 + }, + "setup.py": { + "a": 0, + "d": 88 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 3, + "d": 3 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 4, + "d": 4 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12061.json b/doc/sphinxext/prs/12061.json new file mode 100644 index 00000000000..084ca233666 --- /dev/null +++ b/doc/sphinxext/prs/12061.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a8b4638cf415838425fba34d2773c1007ac99ee1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12063.json b/doc/sphinxext/prs/12063.json new file mode 100644 index 00000000000..3b93f29d5a5 --- /dev/null +++ b/doc/sphinxext/prs/12063.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c65345ad5cdadb466a8351e5e173aeb9a577f2a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 8, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12064.json b/doc/sphinxext/prs/12064.json new file mode 100644 index 00000000000..d0c7aa2d582 --- /dev/null +++ b/doc/sphinxext/prs/12064.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "63ce95d4f0928e2ad035cb67c70ea06db44bd19f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 8, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/info.html.jinja": { + "a": 80, + "d": 10 + }, + "mne/html_templates/repr/raw.html.jinja": { + "a": 1, + "d": 12 + }, + "mne/io/base.py": { + "a": 5, + "d": 3 + }, + "mne/utils/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/30_info.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12065.json b/doc/sphinxext/prs/12065.json new file mode 100644 index 00000000000..9119c5cdaae --- /dev/null +++ b/doc/sphinxext/prs/12065.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9d20815494aca4ccf134bcc8758aadad3a9954ae", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 15, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12066.json b/doc/sphinxext/prs/12066.json new file mode 100644 index 00000000000..cb78cc7a02c --- /dev/null +++ b/doc/sphinxext/prs/12066.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "27d1c59777061a4a94c6a5fc03e6e08fc6c0cf9a", + "authors": [ + { + "n": "Ivan Zubarev", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 9 + }, + "mne/channels/tests/test_channels.py": { + "a": 20, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12067.json b/doc/sphinxext/prs/12067.json new file mode 100644 index 00000000000..f3b1bc08cd4 --- /dev/null +++ b/doc/sphinxext/prs/12067.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "905c12c6eac0b6bd67dc88707c32f1f804ae56fd", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/io/__init__.py": { + "a": 1, + "d": 44 + }, + "mne/io/tests/test_deprecation.py": { + "a": 0, + "d": 10 + }, + "mne/source_space/__init__.py": { + "a": 1, + "d": 33 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 0, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12068.json b/doc/sphinxext/prs/12068.json new file mode 100644 index 00000000000..9e610fff87c --- /dev/null +++ b/doc/sphinxext/prs/12068.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "05f1fa69fdddc2725bd3099280cd189e96a940f3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "mne/defaults.py": { + "a": 3, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 100, + "d": 106 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 11, + "d": 12 + }, + "mne/viz/tests/test_3d.py": { + "a": 6, + "d": 0 + }, + "tutorials/clinical/30_ecog.py": { + "a": 5, + "d": 4 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 0, + "d": 1 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12071.json b/doc/sphinxext/prs/12071.json new file mode 100644 index 00000000000..09e7ab310dd --- /dev/null +++ b/doc/sphinxext/prs/12071.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "aca49655b10fc17679142e07c5d46659be1099da", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12071.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 12, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 14, + "d": 25 + }, + "mne/viz/tests/test_topo.py": { + "a": 35, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 69, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 81, + "d": 15 + }, + "mne/viz/ui_events.py": { + "a": 20, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 108, + "d": 63 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12072.json b/doc/sphinxext/prs/12072.json new file mode 100644 index 00000000000..325971112fb --- /dev/null +++ b/doc/sphinxext/prs/12072.json @@ -0,0 +1,203 @@ +{ + "merge_commit_sha": "81b7ddfe3fc33f83ba7d472a6a67e714850d7c73", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 274 + }, + "mne/__init__.pyi": { + "a": 426, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 32 + }, + "mne/beamformer/__init__.pyi": { + "a": 34, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 1, + "d": 47 + }, + "mne/channels/__init__.pyi": { + "a": 76, + "d": 0 + }, + "mne/commands/__init__.py": { + "a": 1, + "d": 6 + }, + "mne/commands/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 42 + }, + "mne/datasets/__init__.pyi": { + "a": 72, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 23 + }, + "mne/decoding/__init__.pyi": { + "a": 40, + "d": 0 + }, + "mne/export/__init__.py": { + "a": 1, + "d": 9 + }, + "mne/export/__init__.pyi": { + "a": 3, + "d": 0 + }, + "mne/forward/__init__.py": { + "a": 2, + "d": 53 + }, + "mne/forward/__init__.pyi": { + "a": 86, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 6 + }, + "mne/gui/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/html_templates/__init__.py": { + "a": 1, + "d": 6 + }, + "mne/html_templates/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/__init__.py": { + "a": 1, + "d": 10 + }, + "mne/inverse_sparse/__init__.pyi": { + "a": 3, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 52 + }, + "mne/io/__init__.pyi": { + "a": 87, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 34 + }, + "mne/minimum_norm/__init__.pyi": { + "a": 50, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 51 + }, + "mne/preprocessing/__init__.pyi": { + "a": 91, + "d": 0 + }, + "mne/report/__init__.py": { + "a": 1, + "d": 8 + }, + "mne/report/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/simulation/__init__.py": { + "a": 1, + "d": 15 + }, + "mne/simulation/__init__.pyi": { + "a": 22, + "d": 0 + }, + "mne/source_space/__init__.py": { + "a": 1, + "d": 19 + }, + "mne/source_space/__init__.pyi": { + "a": 22, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 31 + }, + "mne/stats/__init__.pyi": { + "a": 42, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 54 + }, + "mne/time_frequency/__init__.pyi": { + "a": 73, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 206 + }, + "mne/utils/__init__.pyi": { + "a": 384, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 106 + }, + "mne/viz/__init__.pyi": { + "a": 178, + "d": 0 + }, + "tools/dev/Makefile": { + "a": 0, + "d": 0 + }, + "tools/dev/check_steering_committee.py": { + "a": 0, + "d": 0 + }, + "tools/dev/generate_pyi_files.py": { + "a": 76, + "d": 0 + }, + "tools/dev/unacknowledged-bug-reports.jq": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12073.json b/doc/sphinxext/prs/12073.json new file mode 100644 index 00000000000..b60f593532e --- /dev/null +++ b/doc/sphinxext/prs/12073.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5f7c9b90fdf89bf7b33ee75b74f79fdb1128d42a", + "authors": [ + { + "n": "Santeri Ruuskanen", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 10, + "d": 8 + }, + "mne/utils/docs.py": { + "a": 37, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12074.json b/doc/sphinxext/prs/12074.json new file mode 100644 index 00000000000..052ba972809 --- /dev/null +++ b/doc/sphinxext/prs/12074.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5a83eaea91c48c8ac87a443b102b4509f88e8f50", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 4, + "d": 2 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 0, + "d": 39 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 0, + "d": 1 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12076.json b/doc/sphinxext/prs/12076.json new file mode 100644 index 00000000000..278a1c5bb17 --- /dev/null +++ b/doc/sphinxext/prs/12076.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "643580122691d58e855f5f25124e78615fbcd933", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12077.json b/doc/sphinxext/prs/12077.json new file mode 100644 index 00000000000..8e751ed4a0f --- /dev/null +++ b/doc/sphinxext/prs/12077.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "812b02ef0808bff97b32be54473de9bb986366db", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/js/contrib-avatars.js": { + "a": 1, + "d": 1 + }, + "doc/_templates/homepage.html": { + "a": 2, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 0, + "d": 3 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/_avatar_template.html": { + "a": 7, + "d": 0 + }, + "doc/sphinxext/contrib_avatars.py": { + "a": 34, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + }, + "tools/circleci_bash_env.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12078.json b/doc/sphinxext/prs/12078.json new file mode 100644 index 00000000000..d68239ba266 --- /dev/null +++ b/doc/sphinxext/prs/12078.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "92f5dd8520a74a273872536365a144972a656e3c", + "authors": [ + { + "n": "Santeri Ruuskanen", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1208.json b/doc/sphinxext/prs/1208.json new file mode 100644 index 00000000000..281e05847a8 --- /dev/null +++ b/doc/sphinxext/prs/1208.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cc92acc223956f9ca73d884bc4d73b51bfa2a389", + "authors": [ + { + "n": "aestrivex", + "e": "aestrivex@gmail.com" + } + ], + "changes": { + "mne/viz.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12080.json b/doc/sphinxext/prs/12080.json new file mode 100644 index 00000000000..f66096107a8 --- /dev/null +++ b/doc/sphinxext/prs/12080.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "23fa43c3df27428734aa23f7bca3911c52b29048", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/tag.py": { + "a": 6, + "d": 1 + }, + "mne/_fiff/write.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12081.json b/doc/sphinxext/prs/12081.json new file mode 100644 index 00000000000..84ea50bc90e --- /dev/null +++ b/doc/sphinxext/prs/12081.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6aca4ece3080e49b936f4a94096b4b074f0713d2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12086.json b/doc/sphinxext/prs/12086.json new file mode 100644 index 00000000000..6af5e4c4dde --- /dev/null +++ b/doc/sphinxext/prs/12086.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "566fa0701f5c44324b330c8b21d8c8a434898e3e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 4, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 11, + "d": 4 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 3, + "d": 4 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12089.json b/doc/sphinxext/prs/12089.json new file mode 100644 index 00000000000..383f8b4101e --- /dev/null +++ b/doc/sphinxext/prs/12089.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "647fdd309a6f4e386a246f522817f0ba3f616b6f", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1209.json b/doc/sphinxext/prs/1209.json new file mode 100644 index 00000000000..7f7ac143746 --- /dev/null +++ b/doc/sphinxext/prs/1209.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0683f1290b801945acfe6d0b7c99cfc245e17a3b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 13, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12095.json b/doc/sphinxext/prs/12095.json new file mode 100644 index 00000000000..01f138255a3 --- /dev/null +++ b/doc/sphinxext/prs/12095.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3b6a33954c1aeb2ee02db3e9840b1df00d8c3be3", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/_fiff/write.py": { + "a": 1, + "d": 2 + }, + "mne/chpi.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12097.json b/doc/sphinxext/prs/12097.json new file mode 100644 index 00000000000..89a7cd6b905 --- /dev/null +++ b/doc/sphinxext/prs/12097.json @@ -0,0 +1,2431 @@ +{ + "merge_commit_sha": "d6d2f8c6a2ed4a0b27357da9ddf8e0cd14931b59", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/datasets/hf_sef_data.py": { + "a": 2, + "d": 1 + }, + "examples/datasets/limo_data.py": { + "a": 3, + "d": 4 + }, + "examples/datasets/opm_data.py": { + "a": 1, + "d": 0 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 4, + "d": 5 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 6, + "d": 7 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 5, + "d": 7 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 4, + "d": 5 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 4, + "d": 5 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 1, + "d": 2 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 2, + "d": 3 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 5, + "d": 7 + }, + "examples/decoding/ems_filtering.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 6, + "d": 7 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 5, + "d": 5 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 1, + "d": 0 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/compute_mne_inverse_epochs_in_label.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/dics_epochs.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/dics_source_power.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/label_from_stc.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/label_source_activations.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/mne_cov_power.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/multi_dipole_model.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 4, + "d": 6 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/psf_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/rap_music.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/resolution_metrics.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/snr_estimate.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/source_space_snr.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/trap_music.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 2, + "d": 1 + }, + "examples/io/elekta_epochs.py": { + "a": 2, + "d": 1 + }, + "examples/io/read_neo_format.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/css.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/define_target_events.py": { + "a": 3, + "d": 2 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eog_regression.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/fnirs_artifact_removal.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/muscle_detection.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_stc_metrics.py": { + "a": 7, + "d": 6 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 2, + "d": 2 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 6, + "d": 6 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/fdr_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/stats/sensor_regression.py": { + "a": 3, + "d": 2 + }, + "examples/time_frequency/compute_csd.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/compute_source_psd_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 4, + "d": 3 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 4, + "d": 4 + }, + "examples/visualization/3d_to_2d.py": { + "a": 6, + "d": 2 + }, + "examples/visualization/channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/evoked_topomap.py": { + "a": 2, + "d": 2 + }, + "examples/visualization/evoked_whitening.py": { + "a": 1, + "d": 2 + }, + "examples/visualization/montage_sgskip.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/ssp_projs_sensitivity_map.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 2, + "d": 2 + }, + "examples/visualization/topo_customized.py": { + "a": 2, + "d": 2 + }, + "mne/__init__.pyi": { + "a": 25, + "d": 25 + }, + "mne/_fiff/_digitization.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/compensator.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/ctf_comp.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/matrix.py": { + "a": 3, + "d": 3 + }, + "mne/_fiff/meas_info.py": { + "a": 68, + "d": 68 + }, + "mne/_fiff/open.py": { + "a": 4, + "d": 4 + }, + "mne/_fiff/pick.py": { + "a": 6, + "d": 6 + }, + "mne/_fiff/proc_history.py": { + "a": 10, + "d": 10 + }, + "mne/_fiff/proj.py": { + "a": 20, + "d": 20 + }, + "mne/_fiff/reference.py": { + "a": 17, + "d": 18 + }, + "mne/_fiff/tag.py": { + "a": 8, + "d": 8 + }, + "mne/_fiff/tests/test_compensator.py": { + "a": 3, + "d": 3 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 4, + "d": 5 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 41, + "d": 41 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 23, + "d": 23 + }, + "mne/_fiff/tests/test_proc_history.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 13, + "d": 13 + }, + "mne/_fiff/tests/test_what.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tree.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/utils.py": { + "a": 2, + "d": 1 + }, + "mne/_fiff/what.py": { + "a": 6, + "d": 6 + }, + "mne/_fiff/write.py": { + "a": 4, + "d": 4 + }, + "mne/_freesurfer.py": { + "a": 11, + "d": 10 + }, + "mne/_ola.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 43, + "d": 42 + }, + "mne/baseline.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/__init__.pyi": { + "a": 10, + "d": 10 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 9, + "d": 9 + }, + "mne/beamformer/_dics.py": { + "a": 14, + "d": 14 + }, + "mne/beamformer/_lcmv.py": { + "a": 10, + "d": 10 + }, + "mne/beamformer/_rap_music.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 10, + "d": 10 + }, + "mne/beamformer/tests/test_external.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 18, + "d": 19 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 2, + "d": 3 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 2, + "d": 1 + }, + "mne/bem.py": { + "a": 32, + "d": 32 + }, + "mne/channels/__init__.pyi": { + "a": 17, + "d": 17 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 2, + "d": 4 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 5, + "d": 6 + }, + "mne/channels/channels.py": { + "a": 35, + "d": 35 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 4 + }, + "mne/channels/layout.py": { + "a": 6, + "d": 6 + }, + "mne/channels/montage.py": { + "a": 36, + "d": 38 + }, + "mne/channels/tests/test_channels.py": { + "a": 25, + "d": 25 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 6, + "d": 7 + }, + "mne/channels/tests/test_layout.py": { + "a": 9, + "d": 9 + }, + "mne/channels/tests/test_montage.py": { + "a": 38, + "d": 41 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 6, + "d": 8 + }, + "mne/channels/tests/test_unify_bads.py": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 26, + "d": 26 + }, + "mne/commands/mne_anonymize.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_browse_raw.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_maxfilter.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_prepare_bem_model.py": { + "a": 3, + "d": 2 + }, + "mne/commands/mne_report.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_setup_forward_model.py": { + "a": 3, + "d": 2 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_show_fiff.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_show_info.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_sys_info.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 1, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 17, + "d": 17 + }, + "mne/commands/utils.py": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 17, + "d": 17 + }, + "mne/coreg.py": { + "a": 26, + "d": 27 + }, + "mne/cov.py": { + "a": 52, + "d": 50 + }, + "mne/cuda.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/__init__.pyi": { + "a": 15, + "d": 15 + }, + "mne/datasets/_fetch.py": { + "a": 7, + "d": 8 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/_infant/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/_phantom/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/erp_core/erp_core.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/eyelink/eyelink.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 4, + "d": 3 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/limo/limo.py": { + "a": 4, + "d": 4 + }, + "mne/datasets/misc/_misc.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/opm/opm.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 7, + "d": 2 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 8, + "d": 6 + }, + "mne/datasets/somato/somato.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/testing/_testing.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 6, + "d": 8 + }, + "mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 15, + "d": 15 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 7, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/ems.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/receptive_field.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/search_light.py": { + "a": 4, + "d": 3 + }, + "mne/decoding/ssd.py": { + "a": 6, + "d": 6 + }, + "mne/decoding/tests/test_base.py": { + "a": 16, + "d": 17 + }, + "mne/decoding/tests/test_csp.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/tests/test_ems.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 6, + "d": 7 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 9, + "d": 9 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 6, + "d": 7 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_frequency.py": { + "a": 4, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 7, + "d": 8 + }, + "mne/dipole.py": { + "a": 28, + "d": 31 + }, + "mne/epochs.py": { + "a": 72, + "d": 73 + }, + "mne/event.py": { + "a": 14, + "d": 14 + }, + "mne/evoked.py": { + "a": 49, + "d": 50 + }, + "mne/export/__init__.pyi": { + "a": 1, + "d": 1 + }, + "mne/export/_edf.py": { + "a": 1, + "d": 0 + }, + "mne/export/_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/export/_egimff.py": { + "a": 4, + "d": 4 + }, + "mne/export/_export.py": { + "a": 1, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 11, + "d": 11 + }, + "mne/filter.py": { + "a": 11, + "d": 12 + }, + "mne/fixes.py": { + "a": 6, + "d": 5 + }, + "mne/forward/__init__.pyi": { + "a": 30, + "d": 30 + }, + "mne/forward/_compute_forward.py": { + "a": 6, + "d": 6 + }, + "mne/forward/_field_interpolation.py": { + "a": 10, + "d": 10 + }, + "mne/forward/_lead_dots.py": { + "a": 1, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 22, + "d": 24 + }, + "mne/forward/forward.py": { + "a": 37, + "d": 38 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 7, + "d": 9 + }, + "mne/forward/tests/test_forward.py": { + "a": 17, + "d": 17 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 29, + "d": 31 + }, + "mne/gui/__init__.pyi": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 34, + "d": 35 + }, + "mne/gui/_gui.py": { + "a": 3, + "d": 3 + }, + "mne/gui/tests/test_coreg.py": { + "a": 6, + "d": 6 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 3, + "d": 2 + }, + "mne/inverse_sparse/__init__.pyi": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 15, + "d": 16 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 6, + "d": 6 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 8, + "d": 8 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 14, + "d": 12 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 10, + "d": 10 + }, + "mne/io/__init__.pyi": { + "a": 25, + "d": 25 + }, + "mne/io/_fiff_wrap.py": { + "a": 6, + "d": 4 + }, + "mne/io/_read_raw.py": { + "a": 14, + "d": 14 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 6, + "d": 6 + }, + "mne/io/artemis123/artemis123.py": { + "a": 11, + "d": 11 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 6, + "d": 6 + }, + "mne/io/artemis123/utils.py": { + "a": 4, + "d": 2 + }, + "mne/io/base.py": { + "a": 55, + "d": 55 + }, + "mne/io/besa/besa.py": { + "a": 2, + "d": 1 + }, + "mne/io/besa/tests/test_besa.py": { + "a": 3, + "d": 3 + }, + "mne/io/boxy/boxy.py": { + "a": 2, + "d": 2 + }, + "mne/io/boxy/tests/test_boxy.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 7, + "d": 7 + }, + "mne/io/bti/bti.py": { + "a": 14, + "d": 14 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 15, + "d": 15 + }, + "mne/io/cnt/_utils.py": { + "a": 3, + "d": 3 + }, + "mne/io/cnt/cnt.py": { + "a": 8, + "d": 10 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 3, + "d": 3 + }, + "mne/io/ctf/ctf.py": { + "a": 11, + "d": 13 + }, + "mne/io/ctf/eeg.py": { + "a": 5, + "d": 5 + }, + "mne/io/ctf/hc.py": { + "a": 2, + "d": 3 + }, + "mne/io/ctf/info.py": { + "a": 10, + "d": 12 + }, + "mne/io/ctf/markers.py": { + "a": 3, + "d": 2 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 10, + "d": 10 + }, + "mne/io/ctf/trans.py": { + "a": 5, + "d": 5 + }, + "mne/io/curry/curry.py": { + "a": 11, + "d": 11 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 9, + "d": 10 + }, + "mne/io/edf/edf.py": { + "a": 6, + "d": 7 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 8, + "d": 9 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 4, + "d": 4 + }, + "mne/io/eeglab/eeglab.py": { + "a": 12, + "d": 11 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 7, + "d": 7 + }, + "mne/io/egi/egi.py": { + "a": 5, + "d": 5 + }, + "mne/io/egi/egimff.py": { + "a": 14, + "d": 14 + }, + "mne/io/egi/events.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/general.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 5, + "d": 5 + }, + "mne/io/eximia/eximia.py": { + "a": 3, + "d": 3 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 1, + "d": 1 + }, + "mne/io/eyelink/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/eyelink/eyelink.py": { + "a": 2, + "d": 2 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 4, + "d": 5 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 6, + "d": 6 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 2 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 6, + "d": 6 + }, + "mne/io/fieldtrip/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 15, + "d": 17 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 21, + "d": 21 + }, + "mne/io/fil/fil.py": { + "a": 8, + "d": 9 + }, + "mne/io/fil/sensors.py": { + "a": 1, + "d": 0 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 4, + "d": 7 + }, + "mne/io/hitachi/hitachi.py": { + "a": 4, + "d": 4 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 4, + "d": 5 + }, + "mne/io/kit/coreg.py": { + "a": 8, + "d": 9 + }, + "mne/io/kit/kit.py": { + "a": 15, + "d": 15 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 10, + "d": 10 + }, + "mne/io/nedf/nedf.py": { + "a": 3, + "d": 3 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 8, + "d": 7 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 2, + "d": 2 + }, + "mne/io/nihon/nihon.py": { + "a": 3, + "d": 3 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 4, + "d": 4 + }, + "mne/io/nirx/nirx.py": { + "a": 15, + "d": 15 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 7, + "d": 7 + }, + "mne/io/nsx/nsx.py": { + "a": 3, + "d": 5 + }, + "mne/io/nsx/tests/test_nsx.py": { + "a": 5, + "d": 6 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 9, + "d": 8 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 8, + "d": 8 + }, + "mne/io/tests/test_apply_function.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 16, + "d": 16 + }, + "mne/io/tests/test_read_raw.py": { + "a": 1, + "d": 2 + }, + "mne/label.py": { + "a": 12, + "d": 12 + }, + "mne/minimum_norm/__init__.pyi": { + "a": 15, + "d": 15 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 31, + "d": 33 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 6, + "d": 6 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 31, + "d": 32 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 4, + "d": 3 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 7, + "d": 9 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 12, + "d": 12 + }, + "mne/morph.py": { + "a": 22, + "d": 17 + }, + "mne/morph_map.py": { + "a": 7, + "d": 7 + }, + "mne/parallel.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/__init__.pyi": { + "a": 29, + "d": 29 + }, + "mne/preprocessing/_annotate_amplitude.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/_csd.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/_css.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 11, + "d": 12 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_regress.py": { + "a": 8, + "d": 8 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 9, + "d": 9 + }, + "mne/preprocessing/ecg.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/eog.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/eyetracking/tests/test_calibration.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/eyetracking/tests/test_pupillometry.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/hfc.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 61, + "d": 64 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 6, + "d": 5 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/ieeg/tests/test_volume.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/interpolate.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 27, + "d": 28 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 13, + "d": 14 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/stim.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/tests/test_annotate_amplitude.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_annotate_nan.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 6, + "d": 8 + }, + "mne/preprocessing/tests/test_css.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/tests/test_hfc.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 21, + "d": 19 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 2, + "d": 5 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 19, + "d": 20 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_realign.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 6, + "d": 7 + }, + "mne/preprocessing/xdawn.py": { + "a": 4, + "d": 4 + }, + "mne/proj.py": { + "a": 15, + "d": 15 + }, + "mne/rank.py": { + "a": 14, + "d": 14 + }, + "mne/report/__init__.pyi": { + "a": 1, + "d": 1 + }, + "mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py": { + "a": 2, + "d": 2 + }, + "mne/report/report.py": { + "a": 48, + "d": 49 + }, + "mne/report/tests/test_report.py": { + "a": 10, + "d": 11 + }, + "mne/simulation/__init__.pyi": { + "a": 4, + "d": 4 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/metrics/metrics.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/metrics/tests/test_metrics.py": { + "a": 5, + "d": 6 + }, + "mne/simulation/raw.py": { + "a": 19, + "d": 19 + }, + "mne/simulation/source.py": { + "a": 7, + "d": 7 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 10, + "d": 10 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 25, + "d": 25 + }, + "mne/simulation/tests/test_source.py": { + "a": 6, + "d": 7 + }, + "mne/source_estimate.py": { + "a": 30, + "d": 30 + }, + "mne/source_space/__init__.pyi": { + "a": 3, + "d": 3 + }, + "mne/source_space/_source_space.py": { + "a": 53, + "d": 54 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 20, + "d": 19 + }, + "mne/stats/__init__.pyi": { + "a": 13, + "d": 13 + }, + "mne/stats/_adjacency.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 12, + "d": 12 + }, + "mne/stats/parametric.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 2, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 4, + "d": 4 + }, + "mne/stats/tests/test_adjacency.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 12, + "d": 13 + }, + "mne/stats/tests/test_multi_comp.py": { + "a": 3, + "d": 3 + }, + "mne/stats/tests/test_parametric.py": { + "a": 3, + "d": 3 + }, + "mne/stats/tests/test_permutations.py": { + "a": 3, + "d": 3 + }, + "mne/stats/tests/test_regression.py": { + "a": 2, + "d": 3 + }, + "mne/surface.py": { + "a": 23, + "d": 24 + }, + "mne/tests/test_annotations.py": { + "a": 22, + "d": 22 + }, + "mne/tests/test_bem.py": { + "a": 17, + "d": 17 + }, + "mne/tests/test_chpi.py": { + "a": 22, + "d": 22 + }, + "mne/tests/test_coreg.py": { + "a": 22, + "d": 22 + }, + "mne/tests/test_cov.py": { + "a": 24, + "d": 25 + }, + "mne/tests/test_defaults.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 26, + "d": 28 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 30, + "d": 30 + }, + "mne/tests/test_event.py": { + "a": 15, + "d": 15 + }, + "mne/tests/test_evoked.py": { + "a": 11, + "d": 11 + }, + "mne/tests/test_filter.py": { + "a": 17, + "d": 17 + }, + "mne/tests/test_freesurfer.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_import_nesting.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_label.py": { + "a": 19, + "d": 21 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 17, + "d": 16 + }, + "mne/tests/test_morph_map.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_ola.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_parallel.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 15, + "d": 15 + }, + "mne/tests/test_rank.py": { + "a": 7, + "d": 9 + }, + "mne/tests/test_source_estimate.py": { + "a": 37, + "d": 38 + }, + "mne/tests/test_surface.py": { + "a": 14, + "d": 14 + }, + "mne/tests/test_transforms.py": { + "a": 31, + "d": 29 + }, + "mne/time_frequency/__init__.pyi": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 13, + "d": 13 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 14, + "d": 13 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 4, + "d": 5 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 8, + "d": 10 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 16, + "d": 16 + }, + "mne/time_frequency/tfr.py": { + "a": 37, + "d": 38 + }, + "mne/transforms.py": { + "a": 18, + "d": 19 + }, + "mne/utils/__init__.pyi": { + "a": 148, + "d": 148 + }, + "mne/utils/_bunch.py": { + "a": 0, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 6, + "d": 6 + }, + "mne/utils/_testing.py": { + "a": 7, + "d": 7 + }, + "mne/utils/check.py": { + "a": 14, + "d": 13 + }, + "mne/utils/config.py": { + "a": 2, + "d": 3 + }, + "mne/utils/dataframe.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 7, + "d": 7 + }, + "mne/utils/mixin.py": { + "a": 8, + "d": 9 + }, + "mne/utils/numerics.py": { + "a": 9, + "d": 9 + }, + "mne/utils/progressbar.py": { + "a": 4, + "d": 4 + }, + "mne/utils/spectrum.py": { + "a": 1, + "d": 0 + }, + "mne/utils/tests/test_bunch.py": { + "a": 2, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 14, + "d": 14 + }, + "mne/utils/tests/test_config.py": { + "a": 6, + "d": 5 + }, + "mne/utils/tests/test_docs.py": { + "a": 6, + "d": 5 + }, + "mne/utils/tests/test_linalg.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_logging.py": { + "a": 9, + "d": 9 + }, + "mne/utils/tests/test_misc.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_mixin.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_numerics.py": { + "a": 22, + "d": 23 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 58, + "d": 55 + }, + "mne/viz/_3d_overlay.py": { + "a": 2, + "d": 1 + }, + "mne/viz/__init__.pyi": { + "a": 51, + "d": 51 + }, + "mne/viz/_brain/_brain.py": { + "a": 55, + "d": 56 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_scraper.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/surface.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 17, + "d": 18 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 10, + "d": 5 + }, + "mne/viz/_dipole.py": { + "a": 5, + "d": 5 + }, + "mne/viz/_figure.py": { + "a": 16, + "d": 8 + }, + "mne/viz/_mpl_figure.py": { + "a": 6, + "d": 5 + }, + "mne/viz/_proj.py": { + "a": 5, + "d": 4 + }, + "mne/viz/backends/_abstract.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 47, + "d": 49 + }, + "mne/viz/backends/_pyvista.py": { + "a": 22, + "d": 23 + }, + "mne/viz/backends/_qt.py": { + "a": 71, + "d": 71 + }, + "mne/viz/backends/_utils.py": { + "a": 6, + "d": 6 + }, + "mne/viz/backends/renderer.py": { + "a": 15, + "d": 15 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 5, + "d": 5 + }, + "mne/viz/circle.py": { + "a": 4, + "d": 4 + }, + "mne/viz/conftest.py": { + "a": 6, + "d": 6 + }, + "mne/viz/epochs.py": { + "a": 16, + "d": 16 + }, + "mne/viz/evoked.py": { + "a": 50, + "d": 46 + }, + "mne/viz/evoked_field.py": { + "a": 15, + "d": 19 + }, + "mne/viz/ica.py": { + "a": 24, + "d": 24 + }, + "mne/viz/misc.py": { + "a": 22, + "d": 20 + }, + "mne/viz/montage.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 9, + "d": 9 + }, + "mne/viz/tests/test_3d.py": { + "a": 30, + "d": 30 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 8, + "d": 8 + }, + "mne/viz/tests/test_figure.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 6, + "d": 6 + }, + "mne/viz/tests/test_misc.py": { + "a": 12, + "d": 12 + }, + "mne/viz/tests/test_montage.py": { + "a": 2, + "d": 3 + }, + "mne/viz/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_scraper.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 5, + "d": 7 + }, + "mne/viz/tests/test_topomap.py": { + "a": 21, + "d": 24 + }, + "mne/viz/tests/test_utils.py": { + "a": 12, + "d": 12 + }, + "mne/viz/topo.py": { + "a": 11, + "d": 8 + }, + "mne/viz/topomap.py": { + "a": 53, + "d": 39 + }, + "mne/viz/ui_events.py": { + "a": 6, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 37, + "d": 33 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/60_sleep.py": { + "a": 5, + "d": 8 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 3, + "d": 2 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 2, + "d": 1 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 2, + "d": 2 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 4, + "d": 4 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 4, + "d": 2 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 4, + "d": 5 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 3, + "d": 2 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 3, + "d": 2 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 3, + "d": 3 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 3, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 1, + "d": 0 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 2, + "d": 2 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 4, + "d": 5 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 6, + "d": 7 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 2, + "d": 3 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 5, + "d": 3 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 4, + "d": 2 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 3, + "d": 3 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 3, + "d": 1 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 2, + "d": 0 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 1, + "d": 0 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 2, + "d": 3 + }, + "tutorials/simulation/80_dics.py": { + "a": 4, + "d": 4 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 4, + "d": 4 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 3, + "d": 3 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 3, + "d": 3 + }, + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 3, + "d": 3 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 5, + "d": 5 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 5, + "d": 6 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 2, + "d": 1 + }, + "tutorials/visualization/10_publication_figure.py": { + "a": 2, + "d": 2 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12098.json b/doc/sphinxext/prs/12098.json new file mode 100644 index 00000000000..f638ba02758 --- /dev/null +++ b/doc/sphinxext/prs/12098.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c7c8a2919734963726cd2d04fa9e01002b7f843c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 98, + "d": 40 + }, + "mne/gui/tests/test_coreg.py": { + "a": 6, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12099.json b/doc/sphinxext/prs/12099.json new file mode 100644 index 00000000000..05076da29d9 --- /dev/null +++ b/doc/sphinxext/prs/12099.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e1545e6214f09ffe0a84fed0bef9a17bc1a6386f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/sidebar-quicklinks.html": { + "a": 4, + "d": 4 + }, + "doc/conf.py": { + "a": 143, + "d": 119 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12102.json b/doc/sphinxext/prs/12102.json new file mode 100644 index 00000000000..e9518ba5dd8 --- /dev/null +++ b/doc/sphinxext/prs/12102.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a5ce1cbf7b29fdfd964bd9eb36b10319d2b3025b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".git-blame-ignore-revs": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12103.json b/doc/sphinxext/prs/12103.json new file mode 100644 index 00000000000..f5a39e071ca --- /dev/null +++ b/doc/sphinxext/prs/12103.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a5eb54846d0d5efb6f2519490d23569516c17d6d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 10, + "d": 33 + }, + "mne/viz/epochs.py": { + "a": 13, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 6 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 0, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 1, + "d": 0 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12105.json b/doc/sphinxext/prs/12105.json new file mode 100644 index 00000000000..f07da264b09 --- /dev/null +++ b/doc/sphinxext/prs/12105.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "16f4411162b9d4a90fcecb2559c45d1191b09fb5", + "authors": [ + { + "n": "Judy D Zhu", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 7, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "doc/api/datasets.rst": { + "a": 2, + "d": 1 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/documentation/datasets.rst": { + "a": 12, + "d": 0 + }, + "doc/references.bib": { + "a": 13, + "d": 0 + }, + "mne/datasets/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 8, + "d": 0 + }, + "mne/datasets/phantom_kit/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/phantom_kit/phantom_kit.py": { + "a": 28, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 67, + "d": 2 + }, + "mne/event.py": { + "a": 13, + "d": 9 + }, + "mne/tests/test_dipole.py": { + "a": 18, + "d": 7 + }, + "mne/transforms.py": { + "a": 2, + "d": 25 + }, + "mne/utils/config.py": { + "a": 4, + "d": 4 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 0 + }, + "tutorials/inverse/95_phantom_KIT.py": { + "a": 186, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12110.json b/doc/sphinxext/prs/12110.json new file mode 100644 index 00000000000..a3280398cf9 --- /dev/null +++ b/doc/sphinxext/prs/12110.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "875df8285e25ca4964566f2b900b808be65845d7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "Scott Huberty", + "e": "52462026+scott-huberty@users.noreply.github.com" + } + ], + "changes": { + "doc/api/covariance.rst": { + "a": 1, + "d": 1 + }, + "doc/api/creating_from_arrays.rst": { + "a": 1, + "d": 1 + }, + "doc/api/datasets.rst": { + "a": 1, + "d": 1 + }, + "doc/api/decoding.rst": { + "a": 2, + "d": 2 + }, + "doc/api/events.rst": { + "a": 3, + "d": 3 + }, + "doc/api/export.rst": { + "a": 1, + "d": 1 + }, + "doc/api/file_io.rst": { + "a": 2, + "d": 2 + }, + "doc/api/forward.rst": { + "a": 3, + "d": 3 + }, + "doc/api/inverse.rst": { + "a": 5, + "d": 5 + }, + "doc/api/logging.rst": { + "a": 3, + "d": 3 + }, + "doc/api/most_used_classes.rst": { + "a": 1, + "d": 1 + }, + "doc/api/mri.rst": { + "a": 1, + "d": 1 + }, + "doc/api/preprocessing.rst": { + "a": 11, + "d": 11 + }, + "doc/api/reading_raw_data.rst": { + "a": 3, + "d": 3 + }, + "doc/api/report.rst": { + "a": 1, + "d": 1 + }, + "doc/api/sensor_space.rst": { + "a": 2, + "d": 2 + }, + "doc/api/simulation.rst": { + "a": 2, + "d": 2 + }, + "doc/api/source_space.rst": { + "a": 1, + "d": 1 + }, + "doc/api/statistics.rst": { + "a": 4, + "d": 4 + }, + "doc/api/time_frequency.rst": { + "a": 4, + "d": 4 + }, + "doc/api/visualization.rst": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12113.json b/doc/sphinxext/prs/12113.json new file mode 100644 index 00000000000..1da3a47bb0e --- /dev/null +++ b/doc/sphinxext/prs/12113.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b45837b5e1e4606e25701b85e525caf2f87dd523", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 54, + "d": 50 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 24, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12115.json b/doc/sphinxext/prs/12115.json new file mode 100644 index 00000000000..0ed212cd688 --- /dev/null +++ b/doc/sphinxext/prs/12115.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4beb8dde7588c3153ee0a240b5e363dc987c95f1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12118.json b/doc/sphinxext/prs/12118.json new file mode 100644 index 00000000000..c940c1f82d6 --- /dev/null +++ b/doc/sphinxext/prs/12118.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "60db738c20fbfc84cb5468252742610522974aac", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 72, + "d": 20 + }, + "mne/tests/test_epochs.py": { + "a": 86, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12119.json b/doc/sphinxext/prs/12119.json new file mode 100644 index 00000000000..4fa23ce1421 --- /dev/null +++ b/doc/sphinxext/prs/12119.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f555d5f7c7389a6a3960e83253828b60fc816ce1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1212.json b/doc/sphinxext/prs/1212.json new file mode 100644 index 00000000000..a3f35d50e6f --- /dev/null +++ b/doc/sphinxext/prs/1212.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf9cf919730414f911ddfc3e71cd361d145617b6", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12121.json b/doc/sphinxext/prs/12121.json new file mode 100644 index 00000000000..d59b2944760 --- /dev/null +++ b/doc/sphinxext/prs/12121.json @@ -0,0 +1,255 @@ +{ + "merge_commit_sha": "7b3e3c931914ee655486e7b8d5a5a30668ce136f", + "authors": [ + { + "n": "Pablo Mainar", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "examples/datasets/kernel_phantom.py": { + "a": 1, + "d": 0 + }, + "examples/datasets/limo_data.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/otp.py": { + "a": 1, + "d": 2 + }, + "examples/stats/sensor_regression.py": { + "a": 8, + "d": 7 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 5, + "d": 9 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_csp.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 5, + "d": 5 + }, + "mne/epochs.py": { + "a": 107, + "d": 24 + }, + "mne/forward/_field_interpolation.py": { + "a": 5, + "d": 1 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 9, + "d": 9 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/xdawn.py": { + "a": 3, + "d": 3 + }, + "mne/rank.py": { + "a": 1, + "d": 2 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_regression.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 28, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 21, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_mixin.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 4, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 3, + "d": 1 + }, + "tutorials/epochs/15_baseline_regression.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12122.json b/doc/sphinxext/prs/12122.json new file mode 100644 index 00000000000..2c5c707ab7a --- /dev/null +++ b/doc/sphinxext/prs/12122.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c54d229913c5fe1bdb7a76b70930f66a89601c95", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 11, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12124.json b/doc/sphinxext/prs/12124.json new file mode 100644 index 00000000000..91de2550750 --- /dev/null +++ b/doc/sphinxext/prs/12124.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8c003e843717e316ba85008eff2298917489bbd1", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12128.json b/doc/sphinxext/prs/12128.json new file mode 100644 index 00000000000..df95be01bde --- /dev/null +++ b/doc/sphinxext/prs/12128.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "89b5a533867012a122fc43dfe57a18ad530c5906", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/utils/check.py": { + "a": 1, + "d": 2 + }, + "mne/utils/tests/test_check.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12129.json b/doc/sphinxext/prs/12129.json new file mode 100644 index 00000000000..9f2283e1f51 --- /dev/null +++ b/doc/sphinxext/prs/12129.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a320b631a7c5b06201505f7f86b1d163074d8420", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1213.json b/doc/sphinxext/prs/1213.json new file mode 100644 index 00000000000..97b205e5b13 --- /dev/null +++ b/doc/sphinxext/prs/1213.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "78a29b5b50797da0c80754ab4cd36380c743b769", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 101, + "d": 1 + }, + "mne/fiff/meas_info.py": { + "a": 140, + "d": 2 + }, + "mne/fiff/proj.py": { + "a": 22, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 107, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12133.json b/doc/sphinxext/prs/12133.json new file mode 100644 index 00000000000..be89431f29d --- /dev/null +++ b/doc/sphinxext/prs/12133.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4518a1a918051542ab7360ff63c529121a499417", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/development/governance.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/people.rst": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12135.json b/doc/sphinxext/prs/12135.json new file mode 100644 index 00000000000..bbbac60bf70 --- /dev/null +++ b/doc/sphinxext/prs/12135.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "debc275b795ea4cf4da72a74ee586c82b2e154fc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/read.py": { + "a": 9, + "d": 3 + }, + "mne/io/ctf/res4.py": { + "a": 16, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 4, + "d": 4 + }, + "mne/io/nsx/nsx.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 7, + "d": 6 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 0 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12139.json b/doc/sphinxext/prs/12139.json new file mode 100644 index 00000000000..5a01db2f469 --- /dev/null +++ b/doc/sphinxext/prs/12139.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c733e7b4d471eb72231b6492aa3031efc28d426d", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + ".lgtm.yml": { + "a": 0, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12140.json b/doc/sphinxext/prs/12140.json new file mode 100644 index 00000000000..83e14c8be02 --- /dev/null +++ b/doc/sphinxext/prs/12140.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b9983df4673dbcb36155509c3b3c4b4352798402", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "examples/preprocessing/eeg_bridging.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12142.json b/doc/sphinxext/prs/12142.json new file mode 100644 index 00000000000..7d39a4dd780 --- /dev/null +++ b/doc/sphinxext/prs/12142.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "cdad29dac30041ee79d4e719cb53ad63ffdf21a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 11, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 7, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12143.json b/doc/sphinxext/prs/12143.json new file mode 100644 index 00000000000..feb0f47f030 --- /dev/null +++ b/doc/sphinxext/prs/12143.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "58e8c7e779e741fe4168b4b0dd723aba7552a665", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12145.json b/doc/sphinxext/prs/12145.json new file mode 100644 index 00000000000..9e764964db7 --- /dev/null +++ b/doc/sphinxext/prs/12145.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "b9cab3ceb4e68c979e8496dd9799ab242b951bdc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 15, + "d": 30 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/html_templates/_templates.py": { + "a": 17, + "d": 1 + }, + "mne/html_templates/repr/evoked.html.jinja": { + "a": 1, + "d": 0 + }, + "mne/html_templates/repr/info.html.jinja": { + "a": 98, + "d": 141 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_misc.py": { + "a": 2, + "d": 2 + }, + "tutorials/intro/30_info.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12146.json b/doc/sphinxext/prs/12146.json new file mode 100644 index 00000000000..aaf37d5aa32 --- /dev/null +++ b/doc/sphinxext/prs/12146.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "72225b57014ea05cd26327e57c6852b969cbc570", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 0 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_sys_info.py": { + "a": 8, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 74, + "d": 3 + }, + "mne/utils/tests/test_config.py": { + "a": 72, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12148.json b/doc/sphinxext/prs/12148.json new file mode 100644 index 00000000000..2d0b2232179 --- /dev/null +++ b/doc/sphinxext/prs/12148.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7ff8c586e4521d0d45833d715fd3055056aec505", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_coreg.py": { + "a": 4, + "d": 3 + }, + "mne/gui/_gui.py": { + "a": 24, + "d": 29 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12150.json b/doc/sphinxext/prs/12150.json new file mode 100644 index 00000000000..1f670bd9440 --- /dev/null +++ b/doc/sphinxext/prs/12150.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3add8f87fd729af9d740ebe5ea3e6f46e95bb782", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12154.json b/doc/sphinxext/prs/12154.json new file mode 100644 index 00000000000..a8d11c4270c --- /dev/null +++ b/doc/sphinxext/prs/12154.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8d86df6780dd39dfcd55544ee358424a69d4b9be", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/parallel.py": { + "a": 7, + "d": 1 + }, + "mne/utils/tests/test_logging.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12155.json b/doc/sphinxext/prs/12155.json new file mode 100644 index 00000000000..f0dc4f3fbfd --- /dev/null +++ b/doc/sphinxext/prs/12155.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3e0e543089b4ad3a4b6b064f45749c86ebbaa081", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12156.json b/doc/sphinxext/prs/12156.json new file mode 100644 index 00000000000..ba31ba13318 --- /dev/null +++ b/doc/sphinxext/prs/12156.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "70a915b08a8a7b1a02fcbfde3e5d4eee4c057717", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 2, + "d": 17 + }, + "mne/report/tests/test_report.py": { + "a": 4, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12160.json b/doc/sphinxext/prs/12160.json new file mode 100644 index 00000000000..503eee0cb09 --- /dev/null +++ b/doc/sphinxext/prs/12160.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fcb59266b7616dd99908c6dac0e2295722b0c567", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/_fiff/meas_info.py": { + "a": 6, + "d": 0 + }, + "mne/_fiff/reference.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12161.json b/doc/sphinxext/prs/12161.json new file mode 100644 index 00000000000..1eeddd2bbad --- /dev/null +++ b/doc/sphinxext/prs/12161.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fbc666d98f76d02a1917317c3733b1fe94ea95dd", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/v0.17.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.18.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v1.0.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v1.1.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v1.4.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v1.5.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12162.json b/doc/sphinxext/prs/12162.json new file mode 100644 index 00000000000..4daee56a9bf --- /dev/null +++ b/doc/sphinxext/prs/12162.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a4d4c10c6546bd53fa01f7b81ca87c6d2c0f155a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 6, + "d": 0 + }, + "doc/development/contributing.rst": { + "a": 0, + "d": 2 + }, + "doc/help/faq.rst": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12163.json b/doc/sphinxext/prs/12163.json new file mode 100644 index 00000000000..feeeb870a87 --- /dev/null +++ b/doc/sphinxext/prs/12163.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "89ec1d156595ca8289b3fc81b9a0ef1119d3655b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 10, + "d": 0 + }, + "doc/_includes/dig_formats.rst": { + "a": 1, + "d": 0 + }, + "doc/development/contributing.rst": { + "a": 2, + "d": 0 + }, + "doc/documentation/design_philosophy.rst": { + "a": 2, + "d": 0 + }, + "doc/documentation/index.rst": { + "a": 2, + "d": 0 + }, + "doc/install/mne_tools_suite.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 5, + "d": 1 + }, + "pyproject.toml": { + "a": 14, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12164.json b/doc/sphinxext/prs/12164.json new file mode 100644 index 00000000000..a3d908f05e8 --- /dev/null +++ b/doc/sphinxext/prs/12164.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ec87fd8976a82ef7541c3114cd7955083eb278d6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12165.json b/doc/sphinxext/prs/12165.json new file mode 100644 index 00000000000..9c503305ae5 --- /dev/null +++ b/doc/sphinxext/prs/12165.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3bc18ffec21c2d0293d6af6452948d6f56f73b5d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 16, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12166.json b/doc/sphinxext/prs/12166.json new file mode 100644 index 00000000000..b9b23686093 --- /dev/null +++ b/doc/sphinxext/prs/12166.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7b6b79583b5e738de08828670222e690c65085f1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 108, + "d": 88 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12167.json b/doc/sphinxext/prs/12167.json new file mode 100644 index 00000000000..fad605d3fc0 --- /dev/null +++ b/doc/sphinxext/prs/12167.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "87d6be9047bbd5b234e22eafd79ab7695814fb41", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 5, + "d": 3 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12168.json b/doc/sphinxext/prs/12168.json new file mode 100644 index 00000000000..b5dd4669fb2 --- /dev/null +++ b/doc/sphinxext/prs/12168.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f02e5576df7cfcee3fa435cafe66602e65ff76ec", + "authors": [ + { + "n": "Rasmus Aagaard", + "e": null + }, + { + "n": "roraa", + "e": "roraa@dtu.dk" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1217.json b/doc/sphinxext/prs/1217.json new file mode 100644 index 00000000000..6663adda40c --- /dev/null +++ b/doc/sphinxext/prs/1217.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "43ad3b252ea3561ba182b7a47955bac252f6763f", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/channels.py": { + "a": 73, + "d": 23 + }, + "mne/fiff/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 25, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 24, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12171.json b/doc/sphinxext/prs/12171.json new file mode 100644 index 00000000000..f66eb860a0a --- /dev/null +++ b/doc/sphinxext/prs/12171.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "58afb0fb73e55296f21f6c68a643f2ea98f77483", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/clinical/60_sleep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12172.json b/doc/sphinxext/prs/12172.json new file mode 100644 index 00000000000..309656dfccc --- /dev/null +++ b/doc/sphinxext/prs/12172.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b988e67d9916ed651c381b867e8bf10c3a470905", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "mne/io/eyelink/_utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12174.json b/doc/sphinxext/prs/12174.json new file mode 100644 index 00000000000..1427f66bde3 --- /dev/null +++ b/doc/sphinxext/prs/12174.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8ea98e2dd542735e2ae950e5daf2bdcc363f90bd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12175.json b/doc/sphinxext/prs/12175.json new file mode 100644 index 00000000000..9bea7897126 --- /dev/null +++ b/doc/sphinxext/prs/12175.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "55b0184b94ec1c3d6b2559eeed383e760220f516", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tools/dev/Makefile": { + "a": 13, + "d": 2 + }, + "tools/dev/unacknowledged-issues.jq": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12176.json b/doc/sphinxext/prs/12176.json new file mode 100644 index 00000000000..56e3a618bc2 --- /dev/null +++ b/doc/sphinxext/prs/12176.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "ffbce015ed8e6c069e00d68114079251781f7fb8", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/mne_logo.svg": { + "a": 744, + "d": 827 + }, + "doc/_static/mne_logo_dark.svg": { + "a": 744, + "d": 827 + }, + "doc/_static/mne_logo_gray.svg": { + "a": 783, + "d": 0 + }, + "doc/_static/mne_logo_small.svg": { + "a": 6, + "d": 6 + }, + "doc/sphinxext/gen_commands.py": { + "a": 2, + "d": 2 + }, + "logo/generate_mne_logos.py": { + "a": 72, + "d": 40 + }, + "mne/icons/mne_default_icon.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne_splash.png": { + "a": 0, + "d": 0 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "tools/check_mne_location.py": { + "a": 1, + "d": 1 + }, + "tools/generate_codemeta.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12177.json b/doc/sphinxext/prs/12177.json new file mode 100644 index 00000000000..777d4bf8cc8 --- /dev/null +++ b/doc/sphinxext/prs/12177.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8af12aae0ec3f1e73a19ce9be5739922712be299", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12178.json b/doc/sphinxext/prs/12178.json new file mode 100644 index 00000000000..0f744d623bb --- /dev/null +++ b/doc/sphinxext/prs/12178.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "22a6dc6b22a6397b75b6caa2271079ed14931986", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/tests.yml": { + "a": 1, + "d": 11 + }, + "MANIFEST.in": { + "a": 0, + "d": 6 + }, + "Makefile": { + "a": 3, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 6, + "d": 7 + }, + "doc/changes/v0.24.rst": { + "a": 1, + "d": 1 + }, + "doc/development/contributing.rst": { + "a": 10, + "d": 4 + }, + "doc/links.inc": { + "a": 0, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 13, + "d": 7 + }, + "pyproject.toml": { + "a": 165, + "d": 9 + }, + "requirements.txt": { + "a": 0, + "d": 47 + }, + "requirements_base.txt": { + "a": 0, + "d": 12 + }, + "requirements_doc.txt": { + "a": 0, + "d": 22 + }, + "requirements_hdf5.txt": { + "a": 0, + "d": 3 + }, + "requirements_testing.txt": { + "a": 0, + "d": 15 + }, + "requirements_testing_extra.txt": { + "a": 0, + "d": 10 + }, + "setup.py": { + "a": 0, + "d": 57 + }, + "tools/azure_dependencies.sh": { + "a": 4, + "d": 5 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/generate_codemeta.py": { + "a": 11, + "d": 16 + }, + "tools/github_actions_dependencies.sh": { + "a": 8, + "d": 17 + }, + "tools/github_actions_env_vars.sh": { + "a": 2, + "d": 4 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1218.json b/doc/sphinxext/prs/1218.json new file mode 100644 index 00000000000..775fed91a89 --- /dev/null +++ b/doc/sphinxext/prs/1218.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "5a1feece8cccc5bb36c2ab91675d19377d0cfdf9", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 10, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 1 + }, + "mne/fiff/egi/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/fiff/egi/egi.py": { + "a": 273, + "d": 0 + }, + "mne/fiff/egi/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/fiff/egi/tests/data/test_egi.raw": { + "a": 0, + "d": 0 + }, + "mne/fiff/egi/tests/test_egi.py": { + "a": 60, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12181.json b/doc/sphinxext/prs/12181.json new file mode 100644 index 00000000000..705d8ead20f --- /dev/null +++ b/doc/sphinxext/prs/12181.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b55df6397459245d280ffed9fce27ed3e808187c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 5, + "d": 7 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 0 + }, + "mne/utils/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12184.json b/doc/sphinxext/prs/12184.json new file mode 100644 index 00000000000..90a9089141f --- /dev/null +++ b/doc/sphinxext/prs/12184.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb93c0a20cc936f7cdfbadc8aa5ee40a974a6e80", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12185.json b/doc/sphinxext/prs/12185.json new file mode 100644 index 00000000000..0333660f3f2 --- /dev/null +++ b/doc/sphinxext/prs/12185.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "14f4f865e6fa28231962b8c7582cac5f41798f9f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 4, + "d": 3 + }, + "mne/epochs.py": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 7, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12186.json b/doc/sphinxext/prs/12186.json new file mode 100644 index 00000000000..4ea5b9b00db --- /dev/null +++ b/doc/sphinxext/prs/12186.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6874483f6033743510a1ab0766c56ea007db1c20", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 7, + "d": 5 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 8, + "d": 5 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12187.json b/doc/sphinxext/prs/12187.json new file mode 100644 index 00000000000..405ae70fbd3 --- /dev/null +++ b/doc/sphinxext/prs/12187.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2bc47d078999203a23a6c78ac55952c9e699c861", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12188.json b/doc/sphinxext/prs/12188.json new file mode 100644 index 00000000000..52fcc0780b7 --- /dev/null +++ b/doc/sphinxext/prs/12188.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fd53fc44915ee3bea2f18c468eece4ed84476e1d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12189.json b/doc/sphinxext/prs/12189.json new file mode 100644 index 00000000000..6335ea879fa --- /dev/null +++ b/doc/sphinxext/prs/12189.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9b57c51686ca5536edc2a5e74444428a9a138ef6", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 33, + "d": 6 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 34, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12190.json b/doc/sphinxext/prs/12190.json new file mode 100644 index 00000000000..142c275098b --- /dev/null +++ b/doc/sphinxext/prs/12190.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0a9c61bbbd0b70193e9bb88ddaeea3a69c6805f4", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 9, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12192.json b/doc/sphinxext/prs/12192.json new file mode 100644 index 00000000000..3d8871a21d6 --- /dev/null +++ b/doc/sphinxext/prs/12192.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a91f582874a29b32b558d8a55d60c2b5e22b0812", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12194.json b/doc/sphinxext/prs/12194.json new file mode 100644 index 00000000000..76ab301f7ab --- /dev/null +++ b/doc/sphinxext/prs/12194.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9cbdc7b3eb0ff1e96e79c5cabd3fc7d8a8c27a92", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/html_templates/repr/ica.html.jinja": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12195.json b/doc/sphinxext/prs/12195.json new file mode 100644 index 00000000000..2ea9c69f428 --- /dev/null +++ b/doc/sphinxext/prs/12195.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "3a42bb913fcbfdfed7ae9e23b5649c51b372eb9c", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12195.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 78, + "d": 24 + }, + "mne/tests/test_epochs.py": { + "a": 161, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 43, + "d": 5 + }, + "mne/utils/mixin.py": { + "a": 10, + "d": 3 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 106, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12198.json b/doc/sphinxext/prs/12198.json new file mode 100644 index 00000000000..6d1fef2819e --- /dev/null +++ b/doc/sphinxext/prs/12198.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a9a94d92b2040f1455d36403bd10822cf6770c8f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 5, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12202.json b/doc/sphinxext/prs/12202.json new file mode 100644 index 00000000000..9541a5f8762 --- /dev/null +++ b/doc/sphinxext/prs/12202.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "fa0e8cfc42e8a80f6018df02372bb9f709bd993f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 0, + "d": 3 + }, + "doc/_includes/channel_interpolation.rst": { + "a": 5, + "d": 6 + }, + "doc/_includes/inverse.rst": { + "a": 12, + "d": 12 + }, + "doc/_includes/ssp.rst": { + "a": 6, + "d": 6 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/development/roadmap.rst": { + "a": 81, + "d": 79 + }, + "doc/links.inc": { + "a": 4, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 5, + "d": 5 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12203.json b/doc/sphinxext/prs/12203.json new file mode 100644 index 00000000000..03b6bdf0094 --- /dev/null +++ b/doc/sphinxext/prs/12203.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "26a0cdcfc0448ce7617f6ada8e0c324b2ceada0e", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12205.json b/doc/sphinxext/prs/12205.json new file mode 100644 index 00000000000..8f8f8bb4171 --- /dev/null +++ b/doc/sphinxext/prs/12205.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f58776c4d1a0101c1442ae0432e2395ce4a38809", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/pick.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12206.json b/doc/sphinxext/prs/12206.json new file mode 100644 index 00000000000..375853a90e3 --- /dev/null +++ b/doc/sphinxext/prs/12206.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "87df00d63991a51835992e054d690aa67ccca7da", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12206.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12206.newfeature.rst": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 38, + "d": 7 + }, + "mne/evoked.py": { + "a": 58, + "d": 12 + }, + "mne/io/base.py": { + "a": 37, + "d": 5 + }, + "mne/io/tests/test_apply_function.py": { + "a": 29, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 33, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 30, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12207.json b/doc/sphinxext/prs/12207.json new file mode 100644 index 00000000000..e4a4ec3cde8 --- /dev/null +++ b/doc/sphinxext/prs/12207.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "73ca06b4056bcb63d55bbcbc68c816c5c8b8f6f2", + "authors": [ + { + "n": "Carina", + "e": "carinaforster0611@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12207.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 19, + "d": 19 + }, + "mne/tests/test_epochs.py": { + "a": 38, + "d": 5 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 31, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 37, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12209.json b/doc/sphinxext/prs/12209.json new file mode 100644 index 00000000000..57dd4a57f81 --- /dev/null +++ b/doc/sphinxext/prs/12209.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3fd2c912f25d7c6344edb44e77bd19f036db1c5c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "pyproject.toml": { + "a": 4, + "d": 4 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1221.json b/doc/sphinxext/prs/1221.json new file mode 100644 index 00000000000..da0390ed53e --- /dev/null +++ b/doc/sphinxext/prs/1221.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "28cc5269865f3ae68822f3f63805e7db395e0c1a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/realtime/tests/test_stim_client_server.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12211.json b/doc/sphinxext/prs/12211.json new file mode 100644 index 00000000000..30e81e8827d --- /dev/null +++ b/doc/sphinxext/prs/12211.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "137664c00475d6714ae655eee4fd09ff87bd74c4", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12212.json b/doc/sphinxext/prs/12212.json new file mode 100644 index 00000000000..5e9f74a16ea --- /dev/null +++ b/doc/sphinxext/prs/12212.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3e73a0ea8ac886622d8f167bcb1b44d496be8a28", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12215.json b/doc/sphinxext/prs/12215.json new file mode 100644 index 00000000000..7cf25b8bc1b --- /dev/null +++ b/doc/sphinxext/prs/12215.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "239e574ee2f16f1c87dc3062df1876b6f5a5b1b5", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12218.json b/doc/sphinxext/prs/12218.json new file mode 100644 index 00000000000..c5f4d3e430b --- /dev/null +++ b/doc/sphinxext/prs/12218.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6c6e6ec6dfea7feebc488fa7d39d3f295f4e105b", + "authors": [ + { + "n": "Florian Hofer", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 2, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/export/_edf.py": { + "a": 121, + "d": 218 + }, + "mne/export/tests/test_export.py": { + "a": 122, + "d": 151 + }, + "mne/utils/__init__.pyi": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 2, + "d": 17 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1222.json b/doc/sphinxext/prs/1222.json new file mode 100644 index 00000000000..e4d70ef03b8 --- /dev/null +++ b/doc/sphinxext/prs/1222.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f663a08133ac5b0b36a980d5459235c813907d40", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 18, + "d": 16 + }, + "mne/tests/test_label.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12220.json b/doc/sphinxext/prs/12220.json new file mode 100644 index 00000000000..d5500d768ae --- /dev/null +++ b/doc/sphinxext/prs/12220.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "61777eb2679fbbc08f9752ae93ebcae547977fed", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 0, + "d": 1 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12222.json b/doc/sphinxext/prs/12222.json new file mode 100644 index 00000000000..e767c277c6a --- /dev/null +++ b/doc/sphinxext/prs/12222.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2efb77d7cbe085be548fdda9dfb3e1ad047ab812", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/links.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12223.json b/doc/sphinxext/prs/12223.json new file mode 100644 index 00000000000..c5b906fca4d --- /dev/null +++ b/doc/sphinxext/prs/12223.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d8448e3940790eb3f03d3fbed0286ae75956f0c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 7, + "d": 1 + }, + "mne/_fiff/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_browse_raw.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12224.json b/doc/sphinxext/prs/12224.json new file mode 100644 index 00000000000..17e02f3e9ae --- /dev/null +++ b/doc/sphinxext/prs/12224.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "e4838892bf8dcd220e623be66edfcef683f0e638", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 2, + "d": 0 + }, + "CITATION.cff": { + "a": 57, + "d": 35 + }, + "SECURITY.md": { + "a": 4, + "d": 4 + }, + "codemeta.json": { + "a": 169, + "d": 94 + }, + "doc/_static/style.css": { + "a": 1, + "d": 0 + }, + "doc/_static/versions.json": { + "a": 7, + "d": 2 + }, + "doc/changes/v1.6.rst": { + "a": 44, + "d": 21 + }, + "doc/development/whats_new.rst": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 5, + "d": 3 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12225.json b/doc/sphinxext/prs/12225.json new file mode 100644 index 00000000000..bc1aa6fb8ea --- /dev/null +++ b/doc/sphinxext/prs/12225.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d030961250650d0636de71d81b00fa69f90d30d8", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12226.json b/doc/sphinxext/prs/12226.json new file mode 100644 index 00000000000..afb8c898b06 --- /dev/null +++ b/doc/sphinxext/prs/12226.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d7612a115d7a3ff86b4bb55a19dfd4a62dab5b56", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 34, + "d": 0 + }, + "doc/development/whats_new.rst": { + "a": 1, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12227.json b/doc/sphinxext/prs/12227.json new file mode 100644 index 00000000000..574caa15e1f --- /dev/null +++ b/doc/sphinxext/prs/12227.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c86d86206610567091ba3fd2dcb63f919c768dc", + "authors": [ + { + "n": "Tristan Stenner", + "e": null + } + ], + "changes": { + "doc/changes/v1.6.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12228.json b/doc/sphinxext/prs/12228.json new file mode 100644 index 00000000000..46b55ecf8fe --- /dev/null +++ b/doc/sphinxext/prs/12228.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bf2d368b65ff8adf92bde35f9ae2d6fdd3a03104", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 7, + "d": 40 + }, + "doc/development/contributing.rst": { + "a": 7, + "d": 7 + }, + "doc/development/roadmap.rst": { + "a": 0, + "d": 2 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12229.json b/doc/sphinxext/prs/12229.json new file mode 100644 index 00000000000..aef6e4ebd3d --- /dev/null +++ b/doc/sphinxext/prs/12229.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "44c787fd4cffa3453ffbc7b6735a5d09f47eed44", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 3, + "d": 3 + }, + ".pre-commit-config.yaml": { + "a": 3, + "d": 0 + }, + "README.rst": { + "a": 2, + "d": 2 + }, + "doc/development/contributing.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 0, + "d": 11 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1223.json b/doc/sphinxext/prs/1223.json new file mode 100644 index 00000000000..450f8193ee5 --- /dev/null +++ b/doc/sphinxext/prs/1223.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "61ab9e37a0c937229edc08489bbc28d028cacf08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + ".travis.yml": { + "a": 1, + "d": 6 + }, + "doc/source/python_reference.rst": { + "a": 4, + "d": 0 + }, + "examples/plot_megsim_data.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 4, + "d": 5 + }, + "mne/fiff/__init__.py": { + "a": 10, + "d": 5 + }, + "mne/fiff/base.py": { + "a": 20, + "d": 472 + }, + "mne/fiff/brainvision/brainvision.py": { + "a": 2, + "d": 3 + }, + "mne/fiff/bti/raw.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/channels.py": { + "a": 6, + "d": 4 + }, + "mne/fiff/edf/edf.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/egi/egi.py": { + "a": 4, + "d": 4 + }, + "mne/fiff/egi/tests/test_egi.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/fiff/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/fiff/raw.py": { + "a": 535, + "d": 0 + }, + "mne/fiff/fiff/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 6, + "d": 6 + }, + "setup.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12231.json b/doc/sphinxext/prs/12231.json new file mode 100644 index 00000000000..26e12d6700f --- /dev/null +++ b/doc/sphinxext/prs/12231.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a7d479568b3f427f65d87452e5400235e6a73dc9", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12235.json b/doc/sphinxext/prs/12235.json new file mode 100644 index 00000000000..bfa00f9d6ec --- /dev/null +++ b/doc/sphinxext/prs/12235.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1334bfcdbdd2c3fa70d4e0cff8d24cdb64ce9ff7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "pyproject.toml": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12236.json b/doc/sphinxext/prs/12236.json new file mode 100644 index 00000000000..de037becce5 --- /dev/null +++ b/doc/sphinxext/prs/12236.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "133d589bace71074e9cfddc57b641796235aaaeb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/api/reading_raw_data.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/v1.6.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12237.json b/doc/sphinxext/prs/12237.json new file mode 100644 index 00000000000..8127dcac4a7 --- /dev/null +++ b/doc/sphinxext/prs/12237.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "acab264e3d844cce5dfa9fadb49cc55352a261bc", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 7 + }, + "doc/api/preprocessing.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/devel/12237.newfeature.rst": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 9, + "d": 5 + }, + "doc/sphinxext/contrib_avatars.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 1, + "d": 2 + }, + "doc/sphinxext/gen_names.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/gh_substitutions.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/mne_substitutions.py": { + "a": 3, + "d": 3 + }, + "doc/sphinxext/newcontrib_substitutions.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/unit_role.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/eyetracking_plot_heatmap.py": { + "a": 25, + "d": 4 + }, + "mne/conftest.py": { + "a": 51, + "d": 0 + }, + "mne/preprocessing/eyetracking/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 165, + "d": 0 + }, + "mne/preprocessing/eyetracking/tests/test_eyetracking.py": { + "a": 78, + "d": 0 + }, + "mne/preprocessing/eyetracking/utils.py": { + "a": 41, + "d": 0 + }, + "mne/viz/__init__.pyi": { + "a": 2, + "d": 1 + }, + "mne/viz/eyetracking/heatmap.py": { + "a": 69, + "d": 15 + }, + "mne/viz/eyetracking/tests/test_heatmap.py": { + "a": 43, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12238.json b/doc/sphinxext/prs/12238.json new file mode 100644 index 00000000000..b0b6a994515 --- /dev/null +++ b/doc/sphinxext/prs/12238.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "06c90a7982eee3b4747bbf6e6afca71014c3e5bf", + "authors": [ + { + "n": "Nikolai Kapralov", + "e": null + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/check.py": { + "a": 22, + "d": 2 + }, + "mne/utils/tests/test_check.py": { + "a": 13, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12239.json b/doc/sphinxext/prs/12239.json new file mode 100644 index 00000000000..0af04768aa8 --- /dev/null +++ b/doc/sphinxext/prs/12239.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a8422368e91d678c297429f19e496666a2a5e19f", + "authors": [ + { + "n": "Nikolai Kapralov", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1224.json b/doc/sphinxext/prs/1224.json new file mode 100644 index 00000000000..db67077a2eb --- /dev/null +++ b/doc/sphinxext/prs/1224.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7d3ececcbb37133769d5262d05df7e3f9ebc7ec1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12240.json b/doc/sphinxext/prs/12240.json new file mode 100644 index 00000000000..426edc944ee --- /dev/null +++ b/doc/sphinxext/prs/12240.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ac0d42c997f3698922736721ba272ab5e9a4b791", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "pyproject.toml": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12242.json b/doc/sphinxext/prs/12242.json new file mode 100644 index 00000000000..a34467a07b6 --- /dev/null +++ b/doc/sphinxext/prs/12242.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "476e50dd0b67474d98b96b06bf3752c0ec0e46cb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_abstract.py": { + "a": 17, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12244.json b/doc/sphinxext/prs/12244.json new file mode 100644 index 00000000000..7b5a723d32f --- /dev/null +++ b/doc/sphinxext/prs/12244.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5ae31b6ca7275a03332eef0c23b5d2cee68c3c42", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg.py": { + "a": 3, + "d": 0 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 11, + "d": 10 + }, + "tools/github_actions_dependencies.sh": { + "a": 16, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12245.json b/doc/sphinxext/prs/12245.json new file mode 100644 index 00000000000..57621858c33 --- /dev/null +++ b/doc/sphinxext/prs/12245.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "88fb4a612ed7f109c3bac1910c38e32ccea49ebb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".git_archival.txt": { + "a": 4, + "d": 0 + }, + ".gitattributes": { + "a": 1, + "d": 0 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/development/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/install/manual_install.rst": { + "a": 1, + "d": 1 + }, + "doc/install/updating.rst": { + "a": 2, + "d": 2 + }, + "mne/__init__.py": { + "a": 3, + "d": 4 + }, + "pyproject.toml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12246.json b/doc/sphinxext/prs/12246.json new file mode 100644 index 00000000000..304ac4196eb --- /dev/null +++ b/doc/sphinxext/prs/12246.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2a1f7e4929b03c243375551755f5e99618acb5ca", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/datasets/eegbci/eegbci.py": { + "a": 1, + "d": 7 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 6 + }, + "pyproject.toml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12248.json b/doc/sphinxext/prs/12248.json new file mode 100644 index 00000000000..67d616ef42f --- /dev/null +++ b/doc/sphinxext/prs/12248.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b107d92ecc0526bd8cd90b0a06c13caa9a9ec87c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12250.json b/doc/sphinxext/prs/12250.json new file mode 100644 index 00000000000..d7e546650de --- /dev/null +++ b/doc/sphinxext/prs/12250.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7bf1b4ab70404a9f0b3cf00783a06e1ce9e0c272", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".git-blame-ignore-revs": { + "a": 1, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 9, + "d": 0 + }, + "doc/changes/devel.rst": { + "a": 13, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/io/_read_raw.py": { + "a": 2, + "d": 1 + }, + "pyproject.toml": { + "a": 31, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12251.json b/doc/sphinxext/prs/12251.json new file mode 100644 index 00000000000..a809d7a2168 --- /dev/null +++ b/doc/sphinxext/prs/12251.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "20174f448e5f70623c3c8eda048ae43cb0b39a05", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 7, + "d": 8 + }, + "pyproject.toml": { + "a": 1, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12252.json b/doc/sphinxext/prs/12252.json new file mode 100644 index 00000000000..b320f88b555 --- /dev/null +++ b/doc/sphinxext/prs/12252.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "45eba5a688b555d3d31ea89d9d2e0aa2fae33ff8", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/surface.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12254.json b/doc/sphinxext/prs/12254.json new file mode 100644 index 00000000000..50005c4a887 --- /dev/null +++ b/doc/sphinxext/prs/12254.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "080a2a12554415f0a4d6b6d89b63cb80534fa2aa", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 3, + "d": 3 + }, + "doc/links.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12257.json b/doc/sphinxext/prs/12257.json new file mode 100644 index 00000000000..0e3ffaf20a2 --- /dev/null +++ b/doc/sphinxext/prs/12257.json @@ -0,0 +1,259 @@ +{ + "merge_commit_sha": "ac55693e1fa1d0da65791fb547ad4dd6fcd90dc5", + "authors": [ + { + "n": "Nikolai Kapralov", + "e": null + } + ], + "changes": { + "mne/_fiff/tests/test_compensator.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_proc_history.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_show_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_forward.py": { + "a": 1, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 4 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_read_raw.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_annotate_nan.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_hfc.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 3, + "d": 7 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 3, + "d": 9 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 3 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_read_vectorview_selection.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12260.json b/doc/sphinxext/prs/12260.json new file mode 100644 index 00000000000..11794304aa0 --- /dev/null +++ b/doc/sphinxext/prs/12260.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "82d195dd10c3b0756b5615b6e28dbc481040825f", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + ".gitignore": { + "a": 2, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/api/realtime.rst": { + "a": 2, + "d": 1 + }, + "doc/changes/v0.23.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.24.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 4, + "d": 4 + }, + "doc/install/mne_tools_suite.rst": { + "a": 0, + "d": 1 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/_fetch.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12261.json b/doc/sphinxext/prs/12261.json new file mode 100644 index 00000000000..fb32bc19e00 --- /dev/null +++ b/doc/sphinxext/prs/12261.json @@ -0,0 +1,303 @@ +{ + "merge_commit_sha": "e7dd1588013179013a50d3f6b8e8f9ae0a185783", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 0, + "d": 1 + }, + ".pre-commit-config.yaml": { + "a": 8, + "d": 8 + }, + "mne/_fiff/tag.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 6, + "d": 6 + }, + "mne/annotations.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 19, + "d": 54 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 4 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 5, + "d": 7 + }, + "mne/dipole.py": { + "a": 6, + "d": 6 + }, + "mne/epochs.py": { + "a": 5, + "d": 7 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 8, + "d": 2 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 5, + "d": 5 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 5, + "d": 7 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fil/fil.py": { + "a": 2, + "d": 2 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 2, + "d": 6 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 2 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/interpolate.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_annotate_amplitude.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 5, + "d": 7 + }, + "mne/source_space/_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 2 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 384, + "d": 1148 + }, + "mne/utils/progressbar.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 5 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 2 + }, + "pyproject.toml": { + "a": 0, + "d": 4 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 3, + "d": 1 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 2 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12263.json b/doc/sphinxext/prs/12263.json new file mode 100644 index 00000000000..9a99d7eac3b --- /dev/null +++ b/doc/sphinxext/prs/12263.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "092cf9002c0510de7d66255a8b11e3a591036f92", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/sphinxext/contrib_avatars.py": { + "a": 26, + "d": 18 + }, + "tools/circleci_bash_env.sh": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12264.json b/doc/sphinxext/prs/12264.json new file mode 100644 index 00000000000..31c9fdc5fda --- /dev/null +++ b/doc/sphinxext/prs/12264.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "3d3633cadf17631ffe2a984b13af5e21c5bbbde0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 5, + "d": 6 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 3, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 5, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 7, + "d": 2 + }, + "mne/io/egi/events.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/general.py": { + "a": 13, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 11, + "d": 0 + }, + "mne/io/nedf/nedf.py": { + "a": 3, + "d": 3 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 2, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12265.json b/doc/sphinxext/prs/12265.json new file mode 100644 index 00000000000..d434952852a --- /dev/null +++ b/doc/sphinxext/prs/12265.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "6187c836cdb4bbab4f222d7918df6f006a3beaa5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/_fiff/__init__.py": { + "a": 0, + "d": 19 + }, + "mne/commands/tests/test_commands.py": { + "a": 0, + "d": 27 + }, + "mne/gui/_gui.py": { + "a": 6, + "d": 61 + }, + "mne/io/constants.py": { + "a": 0, + "d": 9 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 11 + }, + "mne/io/pick.py": { + "a": 0, + "d": 9 + }, + "mne/io/proj.py": { + "a": 0, + "d": 11 + }, + "mne/io/reference.py": { + "a": 0, + "d": 11 + }, + "mne/io/tag.py": { + "a": 0, + "d": 11 + }, + "mne/io/tests/test_deprecation.py": { + "a": 0, + "d": 30 + }, + "mne/io/utils.py": { + "a": 0, + "d": 11 + }, + "mne/io/write.py": { + "a": 0, + "d": 11 + }, + "mne/preprocessing/__init__.pyi": { + "a": 0, + "d": 2 + }, + "mne/preprocessing/maxfilter.py": { + "a": 0, + "d": 230 + }, + "mne/source_space/_source_space.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 9, + "d": 39 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 3, + "d": 134 + }, + "mne/viz/_brain/_brain.py": { + "a": 0, + "d": 10 + }, + "mne/viz/backends/_abstract.py": { + "a": 0, + "d": 9 + }, + "mne/viz/backends/_pyvista.py": { + "a": 0, + "d": 21 + }, + "mne/viz/backends/renderer.py": { + "a": 0, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12268.json b/doc/sphinxext/prs/12268.json new file mode 100644 index 00000000000..732600f9b6f --- /dev/null +++ b/doc/sphinxext/prs/12268.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "432249ee6304b07a8ecece2b310a65c594c59ae9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "examples/datasets/spm_faces_dataset.py": { + "a": 28, + "d": 79 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 5, + "d": 6 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 4, + "d": 4 + }, + "mne/cuda.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 109, + "d": 57 + }, + "mne/io/base.py": { + "a": 16, + "d": 6 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 36, + "d": 30 + }, + "mne/source_estimate.py": { + "a": 23, + "d": 4 + }, + "mne/tests/test_filter.py": { + "a": 28, + "d": 11 + }, + "mne/tests/test_source_estimate.py": { + "a": 64, + "d": 52 + }, + "mne/utils/docs.py": { + "a": 45, + "d": 13 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 48, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12269.json b/doc/sphinxext/prs/12269.json new file mode 100644 index 00000000000..7ba16b572ae --- /dev/null +++ b/doc/sphinxext/prs/12269.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d00cbb12b9b6070a713ac67fcba19e7443c71ef7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 0, + "d": 86 + }, + "Makefile": { + "a": 1, + "d": 4 + }, + "azure-pipelines.yml": { + "a": 0, + "d": 4 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "pyproject.toml": { + "a": 26, + "d": 45 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12270.json b/doc/sphinxext/prs/12270.json new file mode 100644 index 00000000000..34089cb449f --- /dev/null +++ b/doc/sphinxext/prs/12270.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "bf03a03c91bc64b8a9cf72d75a399cfcb89c0662", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/datasets/brainstorm_data.py": { + "a": 3, + "d": 1 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 3, + "d": 3 + }, + "mne/io/array/tests/test_array.py": { + "a": 4, + "d": 2 + }, + "mne/report/report.py": { + "a": 4, + "d": 2 + }, + "mne/time_frequency/spectrum.py": { + "a": 36, + "d": 27 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 17, + "d": 14 + }, + "mne/viz/utils.py": { + "a": 9, + "d": 24 + }, + "tutorials/clinical/60_sleep.py": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 7, + "d": 7 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 3, + "d": 1 + }, + "tutorials/inverse/95_phantom_KIT.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 6, + "d": 2 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 12, + "d": 6 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 8, + "d": 1 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 3, + "d": 1 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 7, + "d": 7 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 1, + "d": 1 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 4, + "d": 4 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12275.json b/doc/sphinxext/prs/12275.json new file mode 100644 index 00000000000..e492727809e --- /dev/null +++ b/doc/sphinxext/prs/12275.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "854c0eb018beafa2841663bcbbdec3af1b35e73a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 4, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12278.json b/doc/sphinxext/prs/12278.json new file mode 100644 index 00000000000..598ecdd724b --- /dev/null +++ b/doc/sphinxext/prs/12278.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "59e50247c8fc4d6d5c968b9d9b6207b7ff5d6b24", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "LICENSE.txt": { + "a": 9, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12279.json b/doc/sphinxext/prs/12279.json new file mode 100644 index 00000000000..3f8322fbec7 --- /dev/null +++ b/doc/sphinxext/prs/12279.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "97512a15a74c6a610132c83e8e420dd4a7caf4f5", + "authors": [ + { + "n": "Kristijan Armeni", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 187, + "d": 27 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 95, + "d": 13 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "pyproject.toml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12281.json b/doc/sphinxext/prs/12281.json new file mode 100644 index 00000000000..56b900104f3 --- /dev/null +++ b/doc/sphinxext/prs/12281.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "8af33df490f94c3dd628cfc23beafed1a6cc6361", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 0, + "d": 1 + }, + ".gitignore": { + "a": 0, + "d": 1 + }, + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "examples/visualization/3d_to_2d.py": { + "a": 3, + "d": 6 + }, + "mne/conftest.py": { + "a": 5, + "d": 0 + }, + "mne/data/image/custom_layout.lout": { + "a": 0, + "d": 257 + }, + "mne/data/image/mni_brain.gif": { + "a": 0, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "pyproject.toml": { + "a": 8, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 1 + }, + "tools/github_actions_install.sh": { + "a": 0, + "d": 5 + }, + "tools/github_actions_test.sh": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12282.json b/doc/sphinxext/prs/12282.json new file mode 100644 index 00000000000..8e6ce095bbf --- /dev/null +++ b/doc/sphinxext/prs/12282.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fd5008a48a1819034f8cf94dfc0e31f7f1a74ba5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 7, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12284.json b/doc/sphinxext/prs/12284.json new file mode 100644 index 00000000000..8a2fcb07d8a --- /dev/null +++ b/doc/sphinxext/prs/12284.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8e500a3f4c8e37136c72e13e060f819b711198f1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 34 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12287.json b/doc/sphinxext/prs/12287.json new file mode 100644 index 00000000000..2ed1db492f7 --- /dev/null +++ b/doc/sphinxext/prs/12287.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "742065d5dd6c69223cc15bb7aca212e5d1a99988", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12288.json b/doc/sphinxext/prs/12288.json new file mode 100644 index 00000000000..fa107fe0e42 --- /dev/null +++ b/doc/sphinxext/prs/12288.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5df4cd6506ca2fb244070865a92bdbba8dabc1c4", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12289.json b/doc/sphinxext/prs/12289.json new file mode 100644 index 00000000000..32108ecac0b --- /dev/null +++ b/doc/sphinxext/prs/12289.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "4e2a60073c5c42030f825b7495032a91e5d7e722", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 14, + "d": 3 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 8, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/dataframe.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12291.json b/doc/sphinxext/prs/12291.json new file mode 100644 index 00000000000..0a50532e48f --- /dev/null +++ b/doc/sphinxext/prs/12291.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7ce9aa1789f9ebe928aaa315d993182ad416464e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 7, + "d": 2 + }, + "pyproject.toml": { + "a": 0, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12294.json b/doc/sphinxext/prs/12294.json new file mode 100644 index 00000000000..6d9ce7969d5 --- /dev/null +++ b/doc/sphinxext/prs/12294.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1034bffde6fe4a360da3fe155b3c16bdc6380b8d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 0, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12296.json b/doc/sphinxext/prs/12296.json new file mode 100644 index 00000000000..59ec81712ac --- /dev/null +++ b/doc/sphinxext/prs/12296.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "35f0ef65d02af33acf55ba01fa5aa62d8697e117", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 27, + "d": 24 + }, + "mne/annotations.py": { + "a": 4, + "d": 2 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/boxy/boxy.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 6 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/eximia/eximia.py": { + "a": 1, + "d": 1 + }, + "mne/io/eyelink/eyelink.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fil/fil.py": { + "a": 3, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 3, + "d": 1 + }, + "mne/io/nsx/nsx.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 3, + "d": 1 + }, + "pyproject.toml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12297.json b/doc/sphinxext/prs/12297.json new file mode 100644 index 00000000000..a4884069193 --- /dev/null +++ b/doc/sphinxext/prs/12297.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "40256aef4dd5b417be91ce544fa8031cc2abd9bf", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 3, + "d": 2 + }, + "doc/conf.py": { + "a": 12, + "d": 13 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 3, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/nedf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12298.json b/doc/sphinxext/prs/12298.json new file mode 100644 index 00000000000..6861e787093 --- /dev/null +++ b/doc/sphinxext/prs/12298.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b1329c3ae59d0da3646b0c667441e12ee0f7bd8d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 6, + "d": 0 + }, + "doc/conf.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12299.json b/doc/sphinxext/prs/12299.json new file mode 100644 index 00000000000..c01cab3e45a --- /dev/null +++ b/doc/sphinxext/prs/12299.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "00882bc2d24b07594c080af1a768f970476bdd4c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/check_changelog.yml": { + "a": 15, + "d": 0 + }, + "doc/changes/devel.rst": { + "a": 2, + "d": 53 + }, + "doc/changes/devel.rst.template": { + "a": 0, + "d": 34 + }, + "doc/changes/devel/.gitignore": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12190.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12218.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12236.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12238.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12248.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12250.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12250.notable.rst": { + "a": 11, + "d": 0 + }, + "doc/changes/devel/12264.dependency.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12268.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12269.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12279.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12282.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12289.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12299.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12308.apichange.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 9, + "d": 5 + }, + "doc/development/contributing.rst": { + "a": 51, + "d": 53 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "pyproject.toml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1230.json b/doc/sphinxext/prs/1230.json new file mode 100644 index 00000000000..4d06f91870d --- /dev/null +++ b/doc/sphinxext/prs/1230.json @@ -0,0 +1,155 @@ +{ + "merge_commit_sha": "ce903fe24a451a12a0b6634de63a7db79b70fa77", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/connectivity/effective.py": { + "a": 2, + "d": 2 + }, + "mne/coreg.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/base.py": { + "a": 12, + "d": 29 + }, + "mne/fiff/brainvision/brainvision.py": { + "a": 9, + "d": 7 + }, + "mne/fiff/bti/raw.py": { + "a": 389, + "d": 392 + }, + "mne/fiff/bti/tests/test_bti.py": { + "a": 0, + "d": 1 + }, + "mne/fiff/edf/edf.py": { + "a": 7, + "d": 6 + }, + "mne/fiff/egi/egi.py": { + "a": 2, + "d": 3 + }, + "mne/fiff/fiff/raw.py": { + "a": 114, + "d": 115 + }, + "mne/fiff/fiff/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/fiff/fiff/tests/test_raw.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/kit/kit.py": { + "a": 1, + "d": 2 + }, + "mne/fiff/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/open.py": { + "a": 17, + "d": 12 + }, + "mne/fiff/write.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 21, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 10, + "d": 11 + }, + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 2 + }, + "mne/proj.py": { + "a": 3, + "d": 2 + }, + "mne/realtime/stim_server_client.py": { + "a": 3, + "d": 0 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 1, + "d": 0 + }, + "mne/source_space.py": { + "a": 16, + "d": 17 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 5, + "d": 3 + }, + "mne/time_frequency/stft.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 4, + "d": 3 + }, + "mne/viz.py": { + "a": 8, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12300.json b/doc/sphinxext/prs/12300.json new file mode 100644 index 00000000000..e4961193c94 --- /dev/null +++ b/doc/sphinxext/prs/12300.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "fce9c5ed032cc7e7af0d7ea33f35e50d46510d64", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "nafraw", + "e": "ping-keng.jao@alumni.epfl.ch" + } + ], + "changes": { + "doc/changes/devel/12300.apichange.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/devel/12300.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 5, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 77, + "d": 74 + }, + "mne/io/egi/egimff.py": { + "a": 41, + "d": 114 + }, + "mne/io/egi/events.py": { + "a": 38, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 45, + "d": 15 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12301.json b/doc/sphinxext/prs/12301.json new file mode 100644 index 00000000000..af62fb32225 --- /dev/null +++ b/doc/sphinxext/prs/12301.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0a0cad8802e832669bb954a3bdd8e08bfaecf784", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12302.json b/doc/sphinxext/prs/12302.json new file mode 100644 index 00000000000..7449ea3a49c --- /dev/null +++ b/doc/sphinxext/prs/12302.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7242c291fdd572c58143281a64688968463b928a", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12303.json b/doc/sphinxext/prs/12303.json new file mode 100644 index 00000000000..450205bf46f --- /dev/null +++ b/doc/sphinxext/prs/12303.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "60e46f0b6c184e1bfb9c399124fa7b619a96622b", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/codeql-analysis.yml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12304.json b/doc/sphinxext/prs/12304.json new file mode 100644 index 00000000000..ebd5eb0a985 --- /dev/null +++ b/doc/sphinxext/prs/12304.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cf2ca7ea723bc92ec1fdb77abc9eafe165160420", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12307.json b/doc/sphinxext/prs/12307.json new file mode 100644 index 00000000000..1f23b8faf54 --- /dev/null +++ b/doc/sphinxext/prs/12307.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f1a8120d29a162ec42c85e9d64136e3c2405da2c", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12308.json b/doc/sphinxext/prs/12308.json new file mode 100644 index 00000000000..eee77bbd783 --- /dev/null +++ b/doc/sphinxext/prs/12308.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4742914ff898d22a3c3012aeefaf2a8301f2c2f8", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel.rst": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 16, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 16, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12309.json b/doc/sphinxext/prs/12309.json new file mode 100644 index 00000000000..9a2e299c8ad --- /dev/null +++ b/doc/sphinxext/prs/12309.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a03a40d50f871b3d51da923ac156d50443162ea8", + "authors": [ + { + "n": "Peter J. Molfese", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12309.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 72, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1231.json b/doc/sphinxext/prs/1231.json new file mode 100644 index 00000000000..a4f6c17df80 --- /dev/null +++ b/doc/sphinxext/prs/1231.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6803bc7024b04f49ec6207b3852a7fb660dc3049", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/egi/egi.py": { + "a": 55, + "d": 17 + }, + "mne/fiff/egi/tests/test_egi.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12311.json b/doc/sphinxext/prs/12311.json new file mode 100644 index 00000000000..aa439ff4349 --- /dev/null +++ b/doc/sphinxext/prs/12311.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "c73b8afcf3cb6304bb67c390d667cd1ac526473d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12311.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 5, + "d": 8 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 8, + "d": 11 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 5, + "d": 6 + }, + "examples/visualization/eyetracking_plot_heatmap.py": { + "a": 1, + "d": 5 + }, + "mne/epochs.py": { + "a": 54, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 20, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 3 + }, + "tools/setup_xvfb.sh": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 2 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 5 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12316.json b/doc/sphinxext/prs/12316.json new file mode 100644 index 00000000000..c1fdcecfd12 --- /dev/null +++ b/doc/sphinxext/prs/12316.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c52208bedfb36e9157678d091f4b4e03ec96c96d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "CONTRIBUTING.md": { + "a": 1, + "d": 1 + }, + "doc/development/index.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12317.json b/doc/sphinxext/prs/12317.json new file mode 100644 index 00000000000..1a147b6829a --- /dev/null +++ b/doc/sphinxext/prs/12317.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0f59894a2491797c996272c23c39412a62369f5b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12318.json b/doc/sphinxext/prs/12318.json new file mode 100644 index 00000000000..7ee964ecdda --- /dev/null +++ b/doc/sphinxext/prs/12318.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6733cae2a0765da9ec1b67a98937839d4cd9aadf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/actions/rename_towncrier/rename_towncrier.py": { + "a": 56, + "d": 0 + }, + ".github/workflows/autofix.yml": { + "a": 21, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12318.other.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12319.json b/doc/sphinxext/prs/12319.json new file mode 100644 index 00000000000..58f1817d048 --- /dev/null +++ b/doc/sphinxext/prs/12319.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ca7fe266c0a6d4426a62798f37d6a3428d08de6b", + "authors": [ + { + "n": "Martin Oberg", + "e": null + } + ], + "changes": { + "doc/changes/devel/12319.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12320.json b/doc/sphinxext/prs/12320.json new file mode 100644 index 00000000000..7948782ad55 --- /dev/null +++ b/doc/sphinxext/prs/12320.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7ccd100310892617dc3c4290465c1eefe1d47282", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12323.json b/doc/sphinxext/prs/12323.json new file mode 100644 index 00000000000..41c87204836 --- /dev/null +++ b/doc/sphinxext/prs/12323.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "70455902ba834bec75ae1fd0f7f5b2fc48455cf8", + "authors": [ + { + "n": "Hamza Abdelhedi", + "e": "hamzaabdelhedi18@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12323.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/filter.py": { + "a": 40, + "d": 16 + }, + "mne/source_estimate.py": { + "a": 56, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 11, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 29, + "d": 1 + }, + "mne/utils/check.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12324.json b/doc/sphinxext/prs/12324.json new file mode 100644 index 00000000000..16c85e9e064 --- /dev/null +++ b/doc/sphinxext/prs/12324.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6857f10bd6470495635ffea190e673e91dc81ac6", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12324.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 7, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12325.json b/doc/sphinxext/prs/12325.json new file mode 100644 index 00000000000..01f7000d420 --- /dev/null +++ b/doc/sphinxext/prs/12325.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6790426221b83ee16375ec19e974808d7b9aad4c", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12326.json b/doc/sphinxext/prs/12326.json new file mode 100644 index 00000000000..2fb3ff1d666 --- /dev/null +++ b/doc/sphinxext/prs/12326.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f060e6b470704823dad6120cd685ad7390c1eae6", + "authors": [ + { + "n": "btkcodedev", + "e": "btk.codedev@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12326.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1233.json b/doc/sphinxext/prs/1233.json new file mode 100644 index 00000000000..261bf3b75b3 --- /dev/null +++ b/doc/sphinxext/prs/1233.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d67973b73db6665770b272cfebf95136aa7d468b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 2 + }, + "examples/time_frequency/plot_stockwell.py": { + "a": 48, + "d": 0 + }, + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 20, + "d": 2 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/_stockwell.py": { + "a": 249, + "d": 0 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 93, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12330.json b/doc/sphinxext/prs/12330.json new file mode 100644 index 00000000000..504cce4ca8d --- /dev/null +++ b/doc/sphinxext/prs/12330.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ff03c6ca17f7d12f72ee1f488264acec6a8db06e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 6, + "d": 6 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 3 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12332.json b/doc/sphinxext/prs/12332.json new file mode 100644 index 00000000000..698dbee5f35 --- /dev/null +++ b/doc/sphinxext/prs/12332.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6eb4c3f4f94177d9c6e7c40cf941f5a4be9d4c98", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12332.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 33, + "d": 66 + }, + "mne/export/tests/test_export.py": { + "a": 5, + "d": 8 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12335.json b/doc/sphinxext/prs/12335.json new file mode 100644 index 00000000000..fa21ad13caf --- /dev/null +++ b/doc/sphinxext/prs/12335.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "596122d1f39a962e8299c63020885e207f127c87", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/development/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12336.json b/doc/sphinxext/prs/12336.json new file mode 100644 index 00000000000..a24159ef202 --- /dev/null +++ b/doc/sphinxext/prs/12336.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f70378a922a46b5432a761281ae857ec747b984b", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12336.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 27, + "d": 11 + }, + "mne/channels/interpolation.py": { + "a": 116, + "d": 26 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 50, + "d": 0 + }, + "mne/defaults.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12338.json b/doc/sphinxext/prs/12338.json new file mode 100644 index 00000000000..457af66a8b1 --- /dev/null +++ b/doc/sphinxext/prs/12338.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d6a58cb6ef9a8d69143c4629c3991ccbf250af42", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12338.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/development/contributing.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 136, + "d": 4 + }, + "mne/channels/tests/test_layout.py": { + "a": 116, + "d": 12 + }, + "mne/utils/docs.py": { + "a": 14, + "d": 10 + }, + "mne/viz/topo.py": { + "a": 11, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1234.json b/doc/sphinxext/prs/1234.json new file mode 100644 index 00000000000..5361d7a06a0 --- /dev/null +++ b/doc/sphinxext/prs/1234.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f127281d34540597e81dab029edbb67ed9a29be2", + "authors": [ + { + "n": null, + "e": "flKazemakase@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "mne/fiff/edf/edf.py": { + "a": 122, + "d": 7 + }, + "mne/fiff/edf/tests/data/test.edf": { + "a": 7, + "d": 7 + }, + "mne/fiff/edf/tests/test_edf.py": { + "a": 87, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12340.json b/doc/sphinxext/prs/12340.json new file mode 100644 index 00000000000..7d22423d136 --- /dev/null +++ b/doc/sphinxext/prs/12340.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4750f0dd5dc81c15704230c785541ab09fa5373b", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12343.json b/doc/sphinxext/prs/12343.json new file mode 100644 index 00000000000..75ce446d5b8 --- /dev/null +++ b/doc/sphinxext/prs/12343.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "16c17b4dba8381615d57267c01f1f2f310fc522c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12343.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/open.py": { + "a": 17, + "d": 12 + }, + "mne/_fiff/tag.py": { + "a": 38, + "d": 59 + }, + "mne/_fiff/tree.py": { + "a": 1, + "d": 46 + }, + "mne/commands/tests/test_commands.py": { + "a": 11, + "d": 2 + }, + "mne/epochs.py": { + "a": 2, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 65, + "d": 71 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 5, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12345.json b/doc/sphinxext/prs/12345.json new file mode 100644 index 00000000000..0d780407167 --- /dev/null +++ b/doc/sphinxext/prs/12345.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4f1557d7c122d3bad83a5f43c648c0cd3cfc802a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12345.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12347.json b/doc/sphinxext/prs/12347.json new file mode 100644 index 00000000000..eadd62a7baf --- /dev/null +++ b/doc/sphinxext/prs/12347.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d2c806c198ccedb5b66c8eb0da31519ad0e970e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12348.json b/doc/sphinxext/prs/12348.json new file mode 100644 index 00000000000..59937e7eaf5 --- /dev/null +++ b/doc/sphinxext/prs/12348.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8eb10e36ebfdac990ed86da919f2729177090dd2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "motofumi-fushimi", + "e": "30593537+motofumi-fushimi@users.noreply.github.com" + } + ], + "changes": { + ".mailmap": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12348.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 18, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12353.json b/doc/sphinxext/prs/12353.json new file mode 100644 index 00000000000..0cf114c11eb --- /dev/null +++ b/doc/sphinxext/prs/12353.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "eefd179b767fea456fd1eab3700c5299310b5b8e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 4, + "d": 5 + }, + "mne/_fiff/open.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 8, + "d": 8 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_annotate_amplitude.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/interpolate.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_realign.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 2, + "d": 2 + }, + "mne/source_space/_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 3, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked_field.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12354.json b/doc/sphinxext/prs/12354.json new file mode 100644 index 00000000000..dc85305fd86 --- /dev/null +++ b/doc/sphinxext/prs/12354.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6af181afff86c679e1ac6c94b65a7843e07ae923", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12354.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked_field.py": { + "a": 16, + "d": 18 + }, + "mne/viz/tests/test_3d.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12357.json b/doc/sphinxext/prs/12357.json new file mode 100644 index 00000000000..e5a46e50f04 --- /dev/null +++ b/doc/sphinxext/prs/12357.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "85838854a333575ecb9d74b8f767f8bcbd515d56", + "authors": [ + { + "n": "Kristijan Armeni", + "e": null + } + ], + "changes": { + "doc/changes/devel/12357.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 12, + "d": 6 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12358.json b/doc/sphinxext/prs/12358.json new file mode 100644 index 00000000000..59933880341 --- /dev/null +++ b/doc/sphinxext/prs/12358.json @@ -0,0 +1,775 @@ +{ + "merge_commit_sha": "2040898ac14e79353b7a23a07e177d1633298c0f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12358.other.rst": { + "a": 1, + "d": 0 + }, + "examples/datasets/limo_data.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/opm_data.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 1, + "d": 3 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 7, + "d": 7 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 2, + "d": 10 + }, + "examples/inverse/label_source_activations.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/read_stc.py": { + "a": 1, + "d": 3 + }, + "examples/preprocessing/define_target_events.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 1, + "d": 10 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/otp.py": { + "a": 2, + "d": 8 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_topomap.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/_digitization.py": { + "a": 13, + "d": 17 + }, + "mne/_fiff/meas_info.py": { + "a": 29, + "d": 45 + }, + "mne/_fiff/open.py": { + "a": 4, + "d": 4 + }, + "mne/_fiff/pick.py": { + "a": 14, + "d": 18 + }, + "mne/_fiff/proj.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/reference.py": { + "a": 5, + "d": 5 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 4, + "d": 4 + }, + "mne/_fiff/utils.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/what.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 2, + "d": 2 + }, + "mne/_ola.py": { + "a": 22, + "d": 30 + }, + "mne/annotations.py": { + "a": 15, + "d": 19 + }, + "mne/baseline.py": { + "a": 10, + "d": 12 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 11, + "d": 12 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 3 + }, + "mne/beamformer/_lcmv.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/_rap_music.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 41, + "d": 49 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 7, + "d": 7 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 6 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 10, + "d": 12 + }, + "mne/channels/tests/test_montage.py": { + "a": 18, + "d": 25 + }, + "mne/chpi.py": { + "a": 42, + "d": 52 + }, + "mne/commands/mne_anonymize.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 5, + "d": 7 + }, + "mne/commands/utils.py": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 16, + "d": 20 + }, + "mne/cov.py": { + "a": 31, + "d": 35 + }, + "mne/cuda.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 2, + "d": 6 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 8, + "d": 9 + }, + "mne/decoding/base.py": { + "a": 9, + "d": 14 + }, + "mne/decoding/csp.py": { + "a": 4, + "d": 6 + }, + "mne/decoding/mixin.py": { + "a": 5, + "d": 7 + }, + "mne/decoding/receptive_field.py": { + "a": 25, + "d": 31 + }, + "mne/decoding/search_light.py": { + "a": 4, + "d": 5 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 34, + "d": 29 + }, + "mne/epochs.py": { + "a": 32, + "d": 40 + }, + "mne/event.py": { + "a": 2, + "d": 4 + }, + "mne/evoked.py": { + "a": 22, + "d": 29 + }, + "mne/filter.py": { + "a": 50, + "d": 77 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 11, + "d": 10 + }, + "mne/io/artemis123/artemis123.py": { + "a": 3, + "d": 3 + }, + "mne/io/artemis123/utils.py": { + "a": 5, + "d": 5 + }, + "mne/io/base.py": { + "a": 3, + "d": 4 + }, + "mne/io/boxy/boxy.py": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 10 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 4, + "d": 4 + }, + "mne/io/bti/bti.py": { + "a": 3, + "d": 3 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 4, + "d": 4 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/eeg.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 2, + "d": 2 + }, + "mne/io/curry/curry.py": { + "a": 3, + "d": 3 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 8, + "d": 8 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 3 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/general.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 2, + "d": 2 + }, + "mne/io/eximia/eximia.py": { + "a": 3, + "d": 3 + }, + "mne/io/eyelink/eyelink.py": { + "a": 2, + "d": 2 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 3 + }, + "mne/io/fil/fil.py": { + "a": 3, + "d": 3 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 3, + "d": 6 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 2 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 2, + "d": 2 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 1, + "d": 2 + }, + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 2 + }, + "mne/io/nihon/nihon.py": { + "a": 7, + "d": 7 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 2 + }, + "mne/io/nsx/nsx.py": { + "a": 5, + "d": 5 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 6, + "d": 6 + }, + "mne/io/snirf/_snirf.py": { + "a": 4, + "d": 4 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 5, + "d": 5 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/misc.py": { + "a": 3, + "d": 3 + }, + "mne/morph.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/eyetracking/calibration.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 11 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 3 + }, + "mne/report/report.py": { + "a": 4, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/source_space/_source_space.py": { + "a": 3, + "d": 3 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/stats/parametric.py": { + "a": 4, + "d": 5 + }, + "mne/stats/regression.py": { + "a": 5, + "d": 5 + }, + "mne/surface.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_annotations.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 5, + "d": 5 + }, + "mne/transforms.py": { + "a": 4, + "d": 8 + }, + "mne/utils/check.py": { + "a": 6, + "d": 6 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 9 + }, + "mne/utils/misc.py": { + "a": 2, + "d": 27 + }, + "mne/utils/progressbar.py": { + "a": 2, + "d": 3 + }, + "mne/utils/tests/test_logging.py": { + "a": 6, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 7 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 3 + }, + "mne/viz/backends/_utils.py": { + "a": 3, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 10, + "d": 11 + }, + "mne/viz/evoked.py": { + "a": 19, + "d": 30 + }, + "mne/viz/montage.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 5 + }, + "mne/viz/ui_events.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 4 + }, + "pyproject.toml": { + "a": 2, + "d": 1 + }, + "tutorials/clinical/60_sleep.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 2, + "d": 4 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 1, + "d": 5 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 3, + "d": 2 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 3, + "d": 3 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 2, + "d": 4 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 2, + "d": 2 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 3, + "d": 5 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 3, + "d": 3 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 3, + "d": 3 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 2, + "d": 2 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 2, + "d": 2 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 16, + "d": 16 + }, + "tutorials/visualization/10_publication_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12359.json b/doc/sphinxext/prs/12359.json new file mode 100644 index 00000000000..25aede2b256 --- /dev/null +++ b/doc/sphinxext/prs/12359.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "87cfea5323c61ded31da970c07238c447f4fe37f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12361.json b/doc/sphinxext/prs/12361.json new file mode 100644 index 00000000000..b5d9ac58ab3 --- /dev/null +++ b/doc/sphinxext/prs/12361.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "52a905928d43703f96517b7050f524e5a653a079", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 10, + "d": 4 + }, + "doc/conf.py": { + "a": 9, + "d": 0 + }, + "mne/conftest.py": { + "a": 9, + "d": 0 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12362.json b/doc/sphinxext/prs/12362.json new file mode 100644 index 00000000000..46f56faf5ce --- /dev/null +++ b/doc/sphinxext/prs/12362.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a6f0331685bf6fc63001ed7ba6b379ee464fd91d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/manual_install.rst": { + "a": 11, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12363.json b/doc/sphinxext/prs/12363.json new file mode 100644 index 00000000000..61bad8e8335 --- /dev/null +++ b/doc/sphinxext/prs/12363.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b8708b46499882062872cf7dea114dc08ad2b8cb", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12364.json b/doc/sphinxext/prs/12364.json new file mode 100644 index 00000000000..000005543ef --- /dev/null +++ b/doc/sphinxext/prs/12364.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "acf7b887976dc30f3edf93883911dfa9e49faae8", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12366.json b/doc/sphinxext/prs/12366.json new file mode 100644 index 00000000000..b336718a89c --- /dev/null +++ b/doc/sphinxext/prs/12366.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d917be12de5739654f3c690fe176382948e261d2", + "authors": [ + { + "n": "Qian Chu", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Alex Lepauvre", + "e": "alexander.lepauvre@ad.aesthetics.mpg.de" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12366.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "mne/_fiff/reference.py": { + "a": 103, + "d": 14 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 154, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12368.json b/doc/sphinxext/prs/12368.json new file mode 100644 index 00000000000..bfb733f67d5 --- /dev/null +++ b/doc/sphinxext/prs/12368.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "566c6ea5a6c5c66657e9edcffb1e596d21ba6952", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12371.json b/doc/sphinxext/prs/12371.json new file mode 100644 index 00000000000..c40c044bce8 --- /dev/null +++ b/doc/sphinxext/prs/12371.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1b0d20850fa2ff60fb61b8a7e7a68a38323dcf07", + "authors": [ + { + "n": "Kristijan Armeni", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12371.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12374.json b/doc/sphinxext/prs/12374.json new file mode 100644 index 00000000000..08d8d2efb80 --- /dev/null +++ b/doc/sphinxext/prs/12374.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2f0d2f3ee2422dfb721d1bcc7138ff1a6fccd662", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12376.json b/doc/sphinxext/prs/12376.json new file mode 100644 index 00000000000..568624224b8 --- /dev/null +++ b/doc/sphinxext/prs/12376.json @@ -0,0 +1,175 @@ +{ + "merge_commit_sha": "990ce18d4eaf0af6d0f0aadbc41fd86aa826ea59", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12376.dependency.rst": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 4, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 14, + "d": 13 + }, + "mne/conftest.py": { + "a": 4, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 6, + "d": 2 + }, + "mne/forward/tests/test_forward.py": { + "a": 7, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 3, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 6 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 9, + "d": 5 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 7, + "d": 4 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 1 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 7, + "d": 5 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 4, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 8, + "d": 10 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 8, + "d": 12 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 13, + "d": 7 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 7, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 8, + "d": 4 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 3, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_bem.py": { + "a": 21, + "d": 13 + }, + "mne/tests/test_chpi.py": { + "a": 8, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 2, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 1 + }, + "mne/utils/tests/test_logging.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 8, + "d": 5 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_misc.py": { + "a": 9, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 3, + "d": 3 + }, + "pyproject.toml": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12377.json b/doc/sphinxext/prs/12377.json new file mode 100644 index 00000000000..dd3cb612ff6 --- /dev/null +++ b/doc/sphinxext/prs/12377.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e0af51dec97219655d59aae0eab09675363f3792", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 1, + "d": 3 + }, + "mne/conftest.py": { + "a": 4, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 6, + "d": 0 + }, + "pyproject.toml": { + "a": 2, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 6 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12378.json b/doc/sphinxext/prs/12378.json new file mode 100644 index 00000000000..1e4b472d344 --- /dev/null +++ b/doc/sphinxext/prs/12378.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5339e08d7188f94e480d3b98bb81716a2e84e7f4", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1238.json b/doc/sphinxext/prs/1238.json new file mode 100644 index 00000000000..842da6c1526 --- /dev/null +++ b/doc/sphinxext/prs/1238.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf0447e92afa53216381e45998e6a2274428ac07", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12380.json b/doc/sphinxext/prs/12380.json new file mode 100644 index 00000000000..538dd6e9fac --- /dev/null +++ b/doc/sphinxext/prs/12380.json @@ -0,0 +1,175 @@ +{ + "merge_commit_sha": "71faac906f00baa14b546da50d0f97aceedc004a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12380.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 2, + "d": 0 + }, + "mne/_fiff/open.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/pick.py": { + "a": 5, + "d": 5 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 7, + "d": 5 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/event.py": { + "a": 7, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/export/_export.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 1 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 9, + "d": 5 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 3, + "d": 0 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 8, + "d": 2 + }, + "mne/source_space/_source_space.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 5, + "d": 3 + }, + "mne/utils/tests/test_docs.py": { + "a": 6, + "d": 6 + }, + "mne/utils/tests/test_logging.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_qt.py": { + "a": 4, + "d": 4 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 7, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 10, + "d": 10 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12382.json b/doc/sphinxext/prs/12382.json new file mode 100644 index 00000000000..dd1d0a8705a --- /dev/null +++ b/doc/sphinxext/prs/12382.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6305bd16975e2fe31b1eaf63408c9f93bfe1b535", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12382.apichange.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12382.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 19, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12383.json b/doc/sphinxext/prs/12383.json new file mode 100644 index 00000000000..245d627dcd0 --- /dev/null +++ b/doc/sphinxext/prs/12383.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "03d78f43bf46e98cc4e2905c6e98e6190cf01924", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12383.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 23, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12385.json b/doc/sphinxext/prs/12385.json new file mode 100644 index 00000000000..a9af4166077 --- /dev/null +++ b/doc/sphinxext/prs/12385.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8419ca04f7f85e5861128593fa92462d0ccbfb27", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12326.other.rst": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12389.json b/doc/sphinxext/prs/12389.json new file mode 100644 index 00000000000..8374b8ba62f --- /dev/null +++ b/doc/sphinxext/prs/12389.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4ccd30fed3d83953506b4ff0532429973a0f797a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12389.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/pick.py": { + "a": 11, + "d": 1 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 6, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 9, + "d": 5 + }, + "mne/datasets/phantom_kit/phantom_kit.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_regress.py": { + "a": 13, + "d": 11 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 13, + "d": 0 + }, + "tutorials/inverse/95_phantom_KIT.py": { + "a": 31, + "d": 94 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12393.json b/doc/sphinxext/prs/12393.json new file mode 100644 index 00000000000..f4ec8237dbe --- /dev/null +++ b/doc/sphinxext/prs/12393.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4cea4a9237867f3cfcdfdf9f7a67d7ee6b790f27", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12393.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 23, + "d": 2 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12394.json b/doc/sphinxext/prs/12394.json new file mode 100644 index 00000000000..5a3b64dfd20 --- /dev/null +++ b/doc/sphinxext/prs/12394.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0aa4bec2228f5d14c38d11fc37cea7a1e8c5140c", + "authors": [ + { + "n": "Judy D Zhu", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12394.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/kit/coreg.py": { + "a": 13, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 19, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12399.json b/doc/sphinxext/prs/12399.json new file mode 100644 index 00000000000..b15ed52fd19 --- /dev/null +++ b/doc/sphinxext/prs/12399.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "324899ae4f60fb5d4911bfd679e53e8815a081d1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12399.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/html/d3.v3.min.js": { + "a": 0, + "d": 5 + }, + "mne/html/mpld3.v0.2.min.js": { + "a": 0, + "d": 2 + }, + "mne/report/js_and_css/report.css": { + "a": 19, + "d": 0 + }, + "mne/report/js_and_css/report.sass": { + "a": 0, + "d": 19 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12400.json b/doc/sphinxext/prs/12400.json new file mode 100644 index 00000000000..f8ad700fa7a --- /dev/null +++ b/doc/sphinxext/prs/12400.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "195a2cc8009160fd125e355b0280e903a941c874", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 66 + }, + ".github/workflows/tests.yml": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12410.json b/doc/sphinxext/prs/12410.json new file mode 100644 index 00000000000..488bf5dd52e --- /dev/null +++ b/doc/sphinxext/prs/12410.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d8ea2f5e60174d61301dbefbed9c76c9adc01ec9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12410.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 1, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 40, + "d": 6 + }, + "mne/utils/misc.py": { + "a": 4, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 13 + }, + "mne/viz/evoked.py": { + "a": 37, + "d": 24 + }, + "mne/viz/tests/test_evoked.py": { + "a": 23, + "d": 10 + }, + "mne/viz/utils.py": { + "a": 46, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12411.json b/doc/sphinxext/prs/12411.json new file mode 100644 index 00000000000..a4826c96830 --- /dev/null +++ b/doc/sphinxext/prs/12411.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "57611ae7142c3de407dcad60cffe110cc1c84c86", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 8, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 13, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12412.json b/doc/sphinxext/prs/12412.json new file mode 100644 index 00000000000..2962494f518 --- /dev/null +++ b/doc/sphinxext/prs/12412.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "78e840d4de0e1748cf32eab31de75887ec3e8082", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12417.json b/doc/sphinxext/prs/12417.json new file mode 100644 index 00000000000..d143a652aee --- /dev/null +++ b/doc/sphinxext/prs/12417.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0f5c91bbdfa0258c44c71ef8274cad22a90c13eb", + "authors": [ + { + "n": "Kristijan Armeni", + "e": null + } + ], + "changes": { + "mne/io/neuralynx/neuralynx.py": { + "a": 8, + "d": 51 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12419.json b/doc/sphinxext/prs/12419.json new file mode 100644 index 00000000000..220bc73e44c --- /dev/null +++ b/doc/sphinxext/prs/12419.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4c84b3e1f94a2772deb363d4298a078e64f55139", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12420.json b/doc/sphinxext/prs/12420.json new file mode 100644 index 00000000000..bc22f326c39 --- /dev/null +++ b/doc/sphinxext/prs/12420.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9f0dfefe612776226d22714958dec2f3e2c05e4b", + "authors": [ + { + "n": "Nabil Alibou", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12420.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12421.json b/doc/sphinxext/prs/12421.json new file mode 100644 index 00000000000..650e8f63823 --- /dev/null +++ b/doc/sphinxext/prs/12421.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e6b49ea773d522670a534ec4a03470ab9c2baf01", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12426.json b/doc/sphinxext/prs/12426.json new file mode 100644 index 00000000000..a1bdab68016 --- /dev/null +++ b/doc/sphinxext/prs/12426.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e05e77c92480871f0be38a184894d2ef48694844", + "authors": [ + { + "n": "Nabil Alibou", + "e": null + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12428.json b/doc/sphinxext/prs/12428.json new file mode 100644 index 00000000000..370c87deeb5 --- /dev/null +++ b/doc/sphinxext/prs/12428.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "78fbfea6363555feb873ef01762f20a4f6c53f72", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "pyproject.toml": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1243.json b/doc/sphinxext/prs/1243.json new file mode 100644 index 00000000000..232a5e6ec13 --- /dev/null +++ b/doc/sphinxext/prs/1243.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "1ecda6ffdbe048bc9f19e800e10f63e64f2713f0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 20, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/array/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/fiff/array/array.py": { + "a": 126, + "d": 0 + }, + "mne/fiff/array/tests/test_array.py": { + "a": 99, + "d": 0 + }, + "mne/fiff/bti/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/bti/bti.py": { + "a": 0, + "d": 0 + }, + "mne/fiff/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 4, + "d": 4 + }, + "setup.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12430.json b/doc/sphinxext/prs/12430.json new file mode 100644 index 00000000000..06320b7298c --- /dev/null +++ b/doc/sphinxext/prs/12430.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8e55fef3a354e2c3dbe086256dfa3a73f698e0c5", + "authors": [ + { + "n": "Alexander Kiefer", + "e": null + } + ], + "changes": { + "doc/changes/devel/12430.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 5, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12431.json b/doc/sphinxext/prs/12431.json new file mode 100644 index 00000000000..e6a401de03c --- /dev/null +++ b/doc/sphinxext/prs/12431.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2851fb5f8d2a780ff2ef5dd181678fe8f10f04c6", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12433.json b/doc/sphinxext/prs/12433.json new file mode 100644 index 00000000000..214b756906a --- /dev/null +++ b/doc/sphinxext/prs/12433.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cef847997255716045e43bf11967af6d5206c8ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12436.json b/doc/sphinxext/prs/12436.json new file mode 100644 index 00000000000..7088e6ddbeb --- /dev/null +++ b/doc/sphinxext/prs/12436.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ccf679457e0ccddc6a2aabeda6968da647f10c6e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/12436.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 6, + "d": 3 + }, + "tutorials/clinical/20_seeg.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12437.json b/doc/sphinxext/prs/12437.json new file mode 100644 index 00000000000..235efbcbff3 --- /dev/null +++ b/doc/sphinxext/prs/12437.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7436c9efc30c0d2679161a83e9255770f41ec21b", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12438.json b/doc/sphinxext/prs/12438.json new file mode 100644 index 00000000000..eceef08fdab --- /dev/null +++ b/doc/sphinxext/prs/12438.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1db901c2ab7dd428f43e3ad8a79f04edff790edc", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1244.json b/doc/sphinxext/prs/1244.json new file mode 100644 index 00000000000..9bc02a8f43e --- /dev/null +++ b/doc/sphinxext/prs/1244.json @@ -0,0 +1,163 @@ +{ + "merge_commit_sha": "ec3c0387437e76843614ada85b90a4b007470e43", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 2 + }, + "doc/source/whats_new.rst": { + "a": 12, + "d": 6 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 4, + "d": 4 + }, + "examples/plot_evoked_topomap.py": { + "a": 5, + "d": 3 + }, + "examples/plot_evoked_whitening.py": { + "a": 8, + "d": 8 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_meg_eeg_fields_3d.py": { + "a": 8, + "d": 6 + }, + "examples/plot_megsim_data.py": { + "a": 8, + "d": 8 + }, + "examples/plot_megsim_data_single_trial.py": { + "a": 2, + "d": 2 + }, + "examples/plot_read_evoked.py": { + "a": 3, + "d": 3 + }, + "examples/plot_shift_evoked.py": { + "a": 9, + "d": 9 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 10, + "d": 8 + }, + "examples/plot_ssp_projs_sensitivity_map.py": { + "a": 4, + "d": 5 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 8, + "d": 6 + }, + "examples/plot_topography.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 163, + "d": 47 + }, + "mne/fiff/tests/test_compensator.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 74, + "d": 61 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 3, + "d": 5 + }, + "mne/forward/tests/test_forward.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 5, + "d": 4 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 4, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 8, + "d": 8 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 11 + }, + "mne/tests/test_utils.py": { + "a": 9, + "d": 9 + }, + "mne/tests/test_viz.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12440.json b/doc/sphinxext/prs/12440.json new file mode 100644 index 00000000000..b81400ba8cc --- /dev/null +++ b/doc/sphinxext/prs/12440.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7f06cb0096b71d4a6a4cd60c8be666ed046cadd0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/conftest.py": { + "a": 0, + "d": 2 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12441.json b/doc/sphinxext/prs/12441.json new file mode 100644 index 00000000000..e77fb839d79 --- /dev/null +++ b/doc/sphinxext/prs/12441.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "73661d10942187a263a61ced85d31ff5eb8e1ce8", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12441.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 48, + "d": 61 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 133, + "d": 52 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 3 + }, + "mne/io/tests/test_read_raw.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12442.json b/doc/sphinxext/prs/12442.json new file mode 100644 index 00000000000..6af341b76f7 --- /dev/null +++ b/doc/sphinxext/prs/12442.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fc771130d7e7bd0a06984848c84eee209af7f4b8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12443.json b/doc/sphinxext/prs/12443.json new file mode 100644 index 00000000000..984daa718a8 --- /dev/null +++ b/doc/sphinxext/prs/12443.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5e23fe00bf0f7ec9cf75708269bbf2bd95a2dad6", + "authors": [ + { + "n": "Sophie Herbst", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12443.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 17, + "d": 2 + }, + "mne/report/tests/test_report.py": { + "a": 14, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12444.json b/doc/sphinxext/prs/12444.json new file mode 100644 index 00000000000..9a93f0eac34 --- /dev/null +++ b/doc/sphinxext/prs/12444.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "85ca0ed7df3ae689ecf7a0109cc95982c5be2895", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12444.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 13, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12445.json b/doc/sphinxext/prs/12445.json new file mode 100644 index 00000000000..56d62254c84 --- /dev/null +++ b/doc/sphinxext/prs/12445.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "49e895fd9c5ab44b92c73aaa210b223d4ad633f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12445.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 49, + "d": 19 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 74, + "d": 2 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 12, + "d": 11 + }, + "mne/transforms.py": { + "a": 23, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12446.json b/doc/sphinxext/prs/12446.json new file mode 100644 index 00000000000..6b15f5c2166 --- /dev/null +++ b/doc/sphinxext/prs/12446.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "abfa0a6256b8f7f938ea91b1a7146c1a522f01dc", + "authors": [ + { + "n": "Richard Scholz", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12446.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 14, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12447.json b/doc/sphinxext/prs/12447.json new file mode 100644 index 00000000000..c5f6c731c17 --- /dev/null +++ b/doc/sphinxext/prs/12447.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c410b6ddeb1f04562f83eed7d5418ff21254f7fd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/report/report.py": { + "a": 58, + "d": 56 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12448.json b/doc/sphinxext/prs/12448.json new file mode 100644 index 00000000000..af5855e6dde --- /dev/null +++ b/doc/sphinxext/prs/12448.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "614424718fa911cc0cfacc0df85652b3efaee358", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12450.json b/doc/sphinxext/prs/12450.json new file mode 100644 index 00000000000..4cfdcd88283 --- /dev/null +++ b/doc/sphinxext/prs/12450.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9ae99424325ef5f100c0774f469a948884fe8b07", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12450.other.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_brainvision.py": { + "a": 141, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12451.json b/doc/sphinxext/prs/12451.json new file mode 100644 index 00000000000..bd5d57418e5 --- /dev/null +++ b/doc/sphinxext/prs/12451.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "a867e5cd372ab8ee980efffd14c7d157c5b73bf3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/12451.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12451.dependency.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 11, + "d": 20 + }, + "mne/datasets/__init__.pyi": { + "a": 1, + "d": 1 + }, + "mne/datasets/_infant/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 8, + "d": 1 + }, + "mne/fixes.py": { + "a": 2, + "d": 3 + }, + "mne/io/base.py": { + "a": 7, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 28, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 43, + "d": 7 + }, + "mne/utils/check.py": { + "a": 19, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12452.json b/doc/sphinxext/prs/12452.json new file mode 100644 index 00000000000..2bb9331bf07 --- /dev/null +++ b/doc/sphinxext/prs/12452.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b210678357b1dae7f880d11392863be5fda4983e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "pyproject.toml": { + "a": 2, + "d": 2 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12453.json b/doc/sphinxext/prs/12453.json new file mode 100644 index 00000000000..f268ae08471 --- /dev/null +++ b/doc/sphinxext/prs/12453.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7d5329ee5aa1f14b5450abebaf9caeecd3ccdb8f", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12454.json b/doc/sphinxext/prs/12454.json new file mode 100644 index 00000000000..5bbc0dccf4e --- /dev/null +++ b/doc/sphinxext/prs/12454.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ae6e55e8eb41f080d6c209c129bb86ecdb2fc30d", + "authors": [ + { + "n": "Sophie Herbst", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12454.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 17, + "d": 3 + }, + "mne/report/tests/test_report.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12456.json b/doc/sphinxext/prs/12456.json new file mode 100644 index 00000000000..1ef598f37ff --- /dev/null +++ b/doc/sphinxext/prs/12456.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b8ff49750fd8381ad4ef6615aa941270d1e3c877", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12456.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12461.json b/doc/sphinxext/prs/12461.json new file mode 100644 index 00000000000..00adfaa637d --- /dev/null +++ b/doc/sphinxext/prs/12461.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "69d29637db82563cd56d8effc396124f243cf4fb", + "authors": [ + { + "n": "Will Turner", + "e": "williamfrancisturner@gmail.com" + }, + { + "n": "Will Turner", + "e": "wturner@student.unimelb.edu.au" + } + ], + "changes": { + ".github/CONTRIBUTING.md": { + "a": 1, + "d": 1 + }, + ".github/PULL_REQUEST_TEMPLATE.md": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/12461.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12462.json b/doc/sphinxext/prs/12462.json new file mode 100644 index 00000000000..0f9de3c36a6 --- /dev/null +++ b/doc/sphinxext/prs/12462.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d17d885a36935a40b4d1b62796510cf5015b1d2b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Sophie Herbst", + "e": "ksherbst@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12462.newfeature.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/epochs_metadata.py": { + "a": 171, + "d": 0 + }, + "mne/epochs.py": { + "a": 99, + "d": 21 + }, + "mne/tests/test_epochs.py": { + "a": 24, + "d": 6 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12463.json b/doc/sphinxext/prs/12463.json new file mode 100644 index 00000000000..4c827664616 --- /dev/null +++ b/doc/sphinxext/prs/12463.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f2fa901ca6f87eaadca6acd45f4c460f1c92f8fe", + "authors": [ + { + "n": "Kristijan Armeni", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12463.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 7, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 7, + "d": 0 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 82, + "d": 0 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12464.json b/doc/sphinxext/prs/12464.json new file mode 100644 index 00000000000..12752028358 --- /dev/null +++ b/doc/sphinxext/prs/12464.json @@ -0,0 +1,391 @@ +{ + "merge_commit_sha": "b8d9c1713bfef91e7b7bb663ec18a528a9b1691f", + "authors": [ + { + "n": "hasrat17", + "e": "hasrat407@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12464.other.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 3, + "d": 1 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 5, + "d": 9 + }, + "doc/sphinxext/mne_substitutions.py": { + "a": 4, + "d": 8 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 8, + "d": 8 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 7, + "d": 8 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 5 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 8, + "d": 8 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 4 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 3, + "d": 4 + }, + "mne/io/base.py": { + "a": 20, + "d": 30 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 4, + "d": 4 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 4, + "d": 4 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 5, + "d": 8 + }, + "mne/io/fieldtrip/utils.py": { + "a": 4, + "d": 5 + }, + "mne/io/kit/coreg.py": { + "a": 4, + "d": 6 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 3 + }, + "mne/io/nirx/nirx.py": { + "a": 4, + "d": 4 + }, + "mne/label.py": { + "a": 32, + "d": 35 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 3, + "d": 4 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 5 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 7 + }, + "mne/morph.py": { + "a": 17, + "d": 19 + }, + "mne/morph_map.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 8, + "d": 10 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/interpolate.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/maxwell.py": { + "a": 33, + "d": 42 + }, + "mne/preprocessing/otp.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 4, + "d": 8 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 2 + }, + "mne/proj.py": { + "a": 4, + "d": 4 + }, + "mne/rank.py": { + "a": 2, + "d": 2 + }, + "mne/report/report.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 19, + "d": 21 + }, + "mne/simulation/source.py": { + "a": 4, + "d": 5 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 21, + "d": 24 + }, + "mne/source_space/_source_space.py": { + "a": 41, + "d": 55 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 10 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 3 + }, + "mne/stats/tests/test_parametric.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 8, + "d": 11 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 7, + "d": 9 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_filter.py": { + "a": 4, + "d": 8 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 4, + "d": 7 + }, + "mne/time_frequency/tfr.py": { + "a": 14, + "d": 16 + }, + "mne/transforms.py": { + "a": 6, + "d": 10 + }, + "mne/utils/_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 6, + "d": 9 + }, + "mne/utils/check.py": { + "a": 16, + "d": 20 + }, + "mne/utils/config.py": { + "a": 3, + "d": 3 + }, + "mne/utils/dataframe.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 11 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 22, + "d": 25 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 15, + "d": 20 + }, + "mne/viz/_brain/_brain.py": { + "a": 11, + "d": 11 + }, + "mne/viz/_brain/colormap.py": { + "a": 1, + "d": 3 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 3 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/viz/misc.py": { + "a": 6, + "d": 6 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 6 + }, + "pyproject.toml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12467.json b/doc/sphinxext/prs/12467.json new file mode 100644 index 00000000000..f1c110c1fbc --- /dev/null +++ b/doc/sphinxext/prs/12467.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "5af2dd7f9f25511727716072bc27fcaed251a159", + "authors": [ + { + "n": "Ivo Pascal de Jong", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12467.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 6, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 19, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12469.json b/doc/sphinxext/prs/12469.json new file mode 100644 index 00000000000..4a07ceb1711 --- /dev/null +++ b/doc/sphinxext/prs/12469.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "985c1959d4b0e2229d9288e78d0a022926042b76", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1247.json b/doc/sphinxext/prs/1247.json new file mode 100644 index 00000000000..41a9d4e4a7c --- /dev/null +++ b/doc/sphinxext/prs/1247.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e09aa4cb6371e8e8b647ddf19db62a56e5c1aec8", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 40, + "d": 12 + }, + "mne/gui/_fiducials_gui.py": { + "a": 1, + "d": 0 + }, + "mne/gui/_file_traits.py": { + "a": 3, + "d": 0 + }, + "mne/gui/_viewer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12470.json b/doc/sphinxext/prs/12470.json new file mode 100644 index 00000000000..0f71e266244 --- /dev/null +++ b/doc/sphinxext/prs/12470.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "62a0c40fa01ec65910313109cd414e22b3b0d135", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12470.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12471.json b/doc/sphinxext/prs/12471.json new file mode 100644 index 00000000000..23385f1d06b --- /dev/null +++ b/doc/sphinxext/prs/12471.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "668b508a7342828cf30e090a02f2e1a6e7402f35", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 3, + "d": 3 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12474.json b/doc/sphinxext/prs/12474.json new file mode 100644 index 00000000000..456c0881933 --- /dev/null +++ b/doc/sphinxext/prs/12474.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "795d6d9510a691e589c43c8cc2aef4edd97661d7", + "authors": [ + { + "n": "Liberty Hamilton", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12474.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12476.json b/doc/sphinxext/prs/12476.json new file mode 100644 index 00000000000..c4f137bb883 --- /dev/null +++ b/doc/sphinxext/prs/12476.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e23e9e1bdd3ecf63af9386cd2a19129d26900864", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12476.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 71, + "d": 21 + }, + "mne/decoding/csp.py": { + "a": 68, + "d": 11 + }, + "mne/decoding/tests/test_csp.py": { + "a": 87, + "d": 30 + }, + "mne/rank.py": { + "a": 69, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12477.json b/doc/sphinxext/prs/12477.json new file mode 100644 index 00000000000..41d6a4687d0 --- /dev/null +++ b/doc/sphinxext/prs/12477.json @@ -0,0 +1,223 @@ +{ + "merge_commit_sha": "64901c4e24db697b23930f7557480ca0b97b4f8c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 4, + "d": 4 + }, + "doc/sphinxext/unit_role.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/hf_sef_data.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 0, + "d": 1 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 0, + "d": 1 + }, + "examples/io/elekta_epochs.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/css.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/eyetracking_plot_heatmap.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/topo_customized.py": { + "a": 0, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 3, + "d": 2 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 5, + "d": 3 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 0, + "d": 1 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 0, + "d": 1 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 3, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 6, + "d": 4 + }, + "mne/forward/tests/test_forward.py": { + "a": 3, + "d": 2 + }, + "mne/io/_read_raw.py": { + "a": 0, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 3, + "d": 2 + }, + "mne/io/besa/tests/test_besa.py": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 3, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 3, + "d": 2 + }, + "mne/io/eyelink/_utils.py": { + "a": 0, + "d": 1 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 2 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_lof.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 6, + "d": 4 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 3, + "d": 2 + }, + "mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py": { + "a": 0, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 12, + "d": 6 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 3, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 9, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 0, + "d": 1 + }, + "mne/viz/evoked_field.py": { + "a": 22, + "d": 21 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 6, + "d": 6 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_misc.py": { + "a": 6, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 2 + }, + "mne/viz/ui_events.py": { + "a": 1, + "d": 0 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12478.json b/doc/sphinxext/prs/12478.json new file mode 100644 index 00000000000..32801ccc78b --- /dev/null +++ b/doc/sphinxext/prs/12478.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "97b745a2ff2db3661ed64a3c54fc85abeea558cf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12481.json b/doc/sphinxext/prs/12481.json new file mode 100644 index 00000000000..d2ae74bd588 --- /dev/null +++ b/doc/sphinxext/prs/12481.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ae69b03a00eb2c72ae9b13c71ae541747daf55da", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "doc/changes/devel/12481.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12482.json b/doc/sphinxext/prs/12482.json new file mode 100644 index 00000000000..2c413b57b3e --- /dev/null +++ b/doc/sphinxext/prs/12482.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "946c616b6fe65663b66cf7433064eef2ff0e2cc0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12483.json b/doc/sphinxext/prs/12483.json new file mode 100644 index 00000000000..9eeab471b5d --- /dev/null +++ b/doc/sphinxext/prs/12483.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "72799349bd6c4987dcef8e71adaf1a4a8cc6e061", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12483.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_qt.py": { + "a": 26, + "d": 18 + }, + "mne/viz/backends/_utils.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12484.json b/doc/sphinxext/prs/12484.json new file mode 100644 index 00000000000..feca0bc60c4 --- /dev/null +++ b/doc/sphinxext/prs/12484.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b906b784430af749977828da0eeda922e28efd98", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12484.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 16, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 33, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12487.json b/doc/sphinxext/prs/12487.json new file mode 100644 index 00000000000..7a9372c9a21 --- /dev/null +++ b/doc/sphinxext/prs/12487.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "a622a467074ea9ba0f1d1168c4ec3b8c6af74323", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/channel_interpolation.rst": { + "a": 2, + "d": 2 + }, + "doc/_includes/forward.rst": { + "a": 35, + "d": 35 + }, + "doc/_includes/ssp.rst": { + "a": 2, + "d": 2 + }, + "doc/api/events.rst": { + "a": 1, + "d": 1 + }, + "doc/api/file_io.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.10.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.12.rst": { + "a": 35, + "d": 35 + }, + "doc/changes/v0.13.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.15.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.17.rst": { + "a": 42, + "d": 42 + }, + "doc/changes/v1.2.rst": { + "a": 1, + "d": 1 + }, + "doc/documentation/datasets.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12488.json b/doc/sphinxext/prs/12488.json new file mode 100644 index 00000000000..bc538332fa0 --- /dev/null +++ b/doc/sphinxext/prs/12488.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d7fdcb004ae5dc95d86f5be598bd87677ad1c251", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12489.json b/doc/sphinxext/prs/12489.json new file mode 100644 index 00000000000..d953504a978 --- /dev/null +++ b/doc/sphinxext/prs/12489.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "90067893e330c1941c055be22bc5f442ad320ec3", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12489.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/utils/misc.py": { + "a": 6, + "d": 5 + }, + "mne/utils/tests/test_misc.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12491.json b/doc/sphinxext/prs/12491.json new file mode 100644 index 00000000000..aacc16579ef --- /dev/null +++ b/doc/sphinxext/prs/12491.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b752d2a3cd247df18371eae6d8c29ebb8a938b87", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "README.rst": { + "a": 3, + "d": 3 + }, + "doc/changes/devel/12491.dependency.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 3, + "d": 7 + }, + "mne/viz/backends/_utils.py": { + "a": 32, + "d": 75 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 1, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12492.json b/doc/sphinxext/prs/12492.json new file mode 100644 index 00000000000..d7a4ed77d1d --- /dev/null +++ b/doc/sphinxext/prs/12492.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "25e6aecf1c0fd0b0f1e2b3fc8d46377dffa752e4", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 3, + "d": 6 + }, + "mne/_freesurfer.py": { + "a": 1, + "d": 1 + }, + "mne/_ola.py": { + "a": 3, + "d": 7 + }, + "mne/annotations.py": { + "a": 7, + "d": 10 + }, + "mne/bem.py": { + "a": 4, + "d": 5 + }, + "mne/channels/montage.py": { + "a": 3, + "d": 5 + }, + "mne/coreg.py": { + "a": 4, + "d": 4 + }, + "mne/cov.py": { + "a": 6, + "d": 8 + }, + "mne/dipole.py": { + "a": 5, + "d": 7 + }, + "mne/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 4, + "d": 18 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 2 + }, + "mne/io/array/array.py": { + "a": 2, + "d": 3 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/morph.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 6 + }, + "pyproject.toml": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12494.json b/doc/sphinxext/prs/12494.json new file mode 100644 index 00000000000..e080b83ff55 --- /dev/null +++ b/doc/sphinxext/prs/12494.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b0ac8a3650a6094ed6efe5ef548aa0ad31513711", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/backends/_utils.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12496.json b/doc/sphinxext/prs/12496.json new file mode 100644 index 00000000000..b410f603110 --- /dev/null +++ b/doc/sphinxext/prs/12496.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2a973338657e6d0609b1d60a8c109cad533c033c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_lof.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12498.json b/doc/sphinxext/prs/12498.json new file mode 100644 index 00000000000..8ae290c38c7 --- /dev/null +++ b/doc/sphinxext/prs/12498.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "60505e96a7d90a56b48a6e1ea1e9f9f16a3dd87a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12498.bugfix.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 0, + "d": 1 + }, + "mne/report/report.py": { + "a": 2, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 12, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 0 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12499.json b/doc/sphinxext/prs/12499.json new file mode 100644 index 00000000000..11cafe255cd --- /dev/null +++ b/doc/sphinxext/prs/12499.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "14ff9483cf92a40479b23f4b17886d3d384f65a5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1250.json b/doc/sphinxext/prs/1250.json new file mode 100644 index 00000000000..93311827979 --- /dev/null +++ b/doc/sphinxext/prs/1250.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bd3fe74a73de5fed74f9c8e5f45bedfe5e9aa963", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "mne/fiff/edf/edf.py": { + "a": 122, + "d": 7 + }, + "mne/fiff/edf/tests/data/test.edf": { + "a": 7, + "d": 7 + }, + "mne/fiff/edf/tests/test_edf.py": { + "a": 88, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12501.json b/doc/sphinxext/prs/12501.json new file mode 100644 index 00000000000..c870c0d63f4 --- /dev/null +++ b/doc/sphinxext/prs/12501.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5843ad17e2a883b5f029731252587ef9d01a8ab8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_lof.py": { + "a": 0, + "d": 4 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12504.json b/doc/sphinxext/prs/12504.json new file mode 100644 index 00000000000..7f4934a8fe9 --- /dev/null +++ b/doc/sphinxext/prs/12504.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f65bc00ad6ee586acf101fe21b6ed5848dcd9523", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 2, + "d": 4 + }, + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12506.json b/doc/sphinxext/prs/12506.json new file mode 100644 index 00000000000..3c77eba3dab --- /dev/null +++ b/doc/sphinxext/prs/12506.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cf6e13c1e7a0cf0fabbcb33865610007bd891ef3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 3, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12507.json b/doc/sphinxext/prs/12507.json new file mode 100644 index 00000000000..f4ae938f679 --- /dev/null +++ b/doc/sphinxext/prs/12507.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "415e7f68ed71135baff0ea857ca4fab5a3690bf8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12507.bugfix.rst": { + "a": 5, + "d": 0 + }, + "mne/filter.py": { + "a": 8, + "d": 65 + }, + "mne/fixes.py": { + "a": 55, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 55, + "d": 7 + }, + "mne/utils/docs.py": { + "a": 27, + "d": 12 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 12, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12509.json b/doc/sphinxext/prs/12509.json new file mode 100644 index 00000000000..5d13f791628 --- /dev/null +++ b/doc/sphinxext/prs/12509.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "c4d20c37d0c3f2a337428811f0e627ccdb078a09", + "authors": [ + { + "n": "Nabil Alibou", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/channel_types.rst": { + "a": 61, + "d": 29 + }, + "doc/changes/devel/12509.other.rst": { + "a": 2, + "d": 0 + }, + "doc/documentation/glossary.rst": { + "a": 15, + "d": 4 + }, + "doc/sphinxext/mne_substitutions.py": { + "a": 23, + "d": 1 + }, + "mne/_fiff/meas_info.py": { + "a": 22, + "d": 13 + }, + "mne/_fiff/pick.py": { + "a": 3, + "d": 4 + }, + "mne/defaults.py": { + "a": 6, + "d": 0 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1251.json b/doc/sphinxext/prs/1251.json new file mode 100644 index 00000000000..c9bd64ffd42 --- /dev/null +++ b/doc/sphinxext/prs/1251.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "77959bb618cd0a71d7f2728cea5ff003781d75e8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/edf/tests/test_edf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12510.json b/doc/sphinxext/prs/12510.json new file mode 100644 index 00000000000..782beed53f2 --- /dev/null +++ b/doc/sphinxext/prs/12510.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6c4418c2dc00d8d84ee7fc29acc6d04dfc4e7fac", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12510.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 6, + "d": 3 + }, + "mne/export/tests/test_export.py": { + "a": 27, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12511.json b/doc/sphinxext/prs/12511.json new file mode 100644 index 00000000000..6b1db9fcfed --- /dev/null +++ b/doc/sphinxext/prs/12511.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "169372da67dc243b817f024820b349495a5aa109", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 2, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 5, + "d": 3 + }, + "tools/github_actions_dependencies.sh": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12513.json b/doc/sphinxext/prs/12513.json new file mode 100644 index 00000000000..89d376957a6 --- /dev/null +++ b/doc/sphinxext/prs/12513.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "eee8e6fe580034f4a3a4fb13bdca3bfc99240708", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12513.newfeature.rst": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 72, + "d": 26 + }, + "mne/report/tests/test_report.py": { + "a": 41, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12514.json b/doc/sphinxext/prs/12514.json new file mode 100644 index 00000000000..037093c95c0 --- /dev/null +++ b/doc/sphinxext/prs/12514.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8ee98c95f4bc9866679fd49111f9b041b5b0e2fa", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 12, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12515.json b/doc/sphinxext/prs/12515.json new file mode 100644 index 00000000000..d838dc9ef45 --- /dev/null +++ b/doc/sphinxext/prs/12515.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b56420f601a0675cdc211c24aa5d52e722ece099", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12518.json b/doc/sphinxext/prs/12518.json new file mode 100644 index 00000000000..3fcc4697c7a --- /dev/null +++ b/doc/sphinxext/prs/12518.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5a1d009d02080f594414e45fc94543c035740e37", + "authors": [ + { + "n": "rcmdnk", + "e": "rcmdnk@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12518.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 46, + "d": 9 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 68, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1252.json b/doc/sphinxext/prs/1252.json new file mode 100644 index 00000000000..8cad5b04e9b --- /dev/null +++ b/doc/sphinxext/prs/1252.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f6cda36272b28f7b3a55b29ac751575b0edc608b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/egi/egi.py": { + "a": 27, + "d": 15 + }, + "mne/fiff/egi/tests/test_egi.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12520.json b/doc/sphinxext/prs/12520.json new file mode 100644 index 00000000000..6a7ffba3787 --- /dev/null +++ b/doc/sphinxext/prs/12520.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "58a02c25998f46b9de587cc76a51d6351d96e415", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 4, + "d": 5 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 3 + }, + "mne/export/_edf.py": { + "a": 17, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 11, + "d": 8 + }, + "mne/utils/dataframe.py": { + "a": 11, + "d": 2 + }, + "tools/azure_dependencies.sh": { + "a": 7, + "d": 31 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 42 + }, + "tools/install_pre_requirements.sh": { + "a": 82, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12522.json b/doc/sphinxext/prs/12522.json new file mode 100644 index 00000000000..f3ce47f5e58 --- /dev/null +++ b/doc/sphinxext/prs/12522.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3c101cdd88b1e83017190ae490654ed290785b94", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 2 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "doc/install/mne_tools_suite.rst": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12523.json b/doc/sphinxext/prs/12523.json new file mode 100644 index 00000000000..27ca5bfea52 --- /dev/null +++ b/doc/sphinxext/prs/12523.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a02838fcf9673e4ed955fa79abbe79b07c968d63", + "authors": [ + { + "n": "Seyed (Yahya) Shirazi", + "e": "shirazi@ieee.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12523.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12524.json b/doc/sphinxext/prs/12524.json new file mode 100644 index 00000000000..44a5f200717 --- /dev/null +++ b/doc/sphinxext/prs/12524.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0aef0795afdb1543dfee8fe76485d3a1ff40ed31", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12525.json b/doc/sphinxext/prs/12525.json new file mode 100644 index 00000000000..18a5d7674be --- /dev/null +++ b/doc/sphinxext/prs/12525.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8590d35cabeae22b0bda41d3b774587426ae961c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12526.json b/doc/sphinxext/prs/12526.json new file mode 100644 index 00000000000..5b9e6c2031b --- /dev/null +++ b/doc/sphinxext/prs/12526.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "675f38a036804b0c1259d05f2ff6b0131d69fc81", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12526.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 8, + "d": 1 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12529.json b/doc/sphinxext/prs/12529.json new file mode 100644 index 00000000000..01d8f315066 --- /dev/null +++ b/doc/sphinxext/prs/12529.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "026e2622e9f32741ac20bc4c051bdc89bbbd3785", + "authors": [ + { + "n": "tom", + "e": "tdonoghue.research@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 14, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1253.json b/doc/sphinxext/prs/1253.json new file mode 100644 index 00000000000..02e567d81a3 --- /dev/null +++ b/doc/sphinxext/prs/1253.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3d068c5e67da500862b0ebd2c4b8bffbcb5f18b4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/channels.py": { + "a": 6, + "d": 4 + }, + "mne/fiff/fiff/tests/test_raw.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12531.json b/doc/sphinxext/prs/12531.json new file mode 100644 index 00000000000..11e8f091570 --- /dev/null +++ b/doc/sphinxext/prs/12531.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "24c2e8f809eaae87c8e4c8d574fcf867b742f755", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/conf.py": { + "a": 0, + "d": 2 + }, + "mne/conftest.py": { + "a": 0, + "d": 3 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 12, + "d": 2 + }, + "mne/gui/tests/test_coreg.py": { + "a": 0, + "d": 3 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 10, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 18 + }, + "mne/utils/config.py": { + "a": 9, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 21, + "d": 3 + }, + "pyproject.toml": { + "a": 4, + "d": 5 + }, + "tools/install_pre_requirements.sh": { + "a": 4, + "d": 3 + }, + "tools/pyqt6_requirements.txt": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12532.json b/doc/sphinxext/prs/12532.json new file mode 100644 index 00000000000..37aa089fbd4 --- /dev/null +++ b/doc/sphinxext/prs/12532.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "067715af11030c3dbe54362a957de132170c3f43", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 6, + "d": 3 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12533.json b/doc/sphinxext/prs/12533.json new file mode 100644 index 00000000000..a910810bc2f --- /dev/null +++ b/doc/sphinxext/prs/12533.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "940ac9553ce42c15b4c16ecd013824ca3ea7244a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d_overlay.py": { + "a": 1, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 261, + "d": 298 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12534.json b/doc/sphinxext/prs/12534.json new file mode 100644 index 00000000000..32c53321d98 --- /dev/null +++ b/doc/sphinxext/prs/12534.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "0d514882be5e255c6c31639f06bc845eea770cfe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/_fiff/pick.py": { + "a": 9, + "d": 32 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 1, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12535.json b/doc/sphinxext/prs/12535.json new file mode 100644 index 00000000000..193ac9b96a5 --- /dev/null +++ b/doc/sphinxext/prs/12535.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "105c8b819c08d24262f65d870b55bf0b305340d7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/devel/12535.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 6, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12536.json b/doc/sphinxext/prs/12536.json new file mode 100644 index 00000000000..9202703faa0 --- /dev/null +++ b/doc/sphinxext/prs/12536.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a92d798744035a304e307f0c5170db6268d75079", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/devel/12536.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 21, + "d": 2 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 20, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12537.json b/doc/sphinxext/prs/12537.json new file mode 100644 index 00000000000..99adb4c0beb --- /dev/null +++ b/doc/sphinxext/prs/12537.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b33e7a1906bb12617f0483abc770ff0ca91ba201", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12537.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 7, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12538.json b/doc/sphinxext/prs/12538.json new file mode 100644 index 00000000000..1e7998f5470 --- /dev/null +++ b/doc/sphinxext/prs/12538.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf74c045d5220682e6e229b95a6e406014c0c73a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 27, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12539.json b/doc/sphinxext/prs/12539.json new file mode 100644 index 00000000000..fd00c15e1e3 --- /dev/null +++ b/doc/sphinxext/prs/12539.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e6dedb326910ddcd30acd207aa8f35491e14ca94", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_numerics.py": { + "a": 5, + "d": 1 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 60, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12540.json b/doc/sphinxext/prs/12540.json new file mode 100644 index 00000000000..77c44bce316 --- /dev/null +++ b/doc/sphinxext/prs/12540.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8c7daf49667d317b1759264eaaff01cc769ee656", + "authors": [ + { + "n": null, + "e": "erkkahe@gmail.com" + } + ], + "changes": { + "doc/install/mne_tools_suite.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12541.json b/doc/sphinxext/prs/12541.json new file mode 100644 index 00000000000..1a784c19d5d --- /dev/null +++ b/doc/sphinxext/prs/12541.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "637b4343f059d3c03e9e68f7fd65ef1ffc3b33c7", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12544.json b/doc/sphinxext/prs/12544.json new file mode 100644 index 00000000000..d984720415b --- /dev/null +++ b/doc/sphinxext/prs/12544.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b95fc4a9151f633d40074e7e43cb6f6487275a33", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12544.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/source_space/_source_space.py": { + "a": 3, + "d": 2 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12545.json b/doc/sphinxext/prs/12545.json new file mode 100644 index 00000000000..ae35699e2e2 --- /dev/null +++ b/doc/sphinxext/prs/12545.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "321825b5e13f771e5ce31d0383609468d0a1d5e8", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "examples/forward/left_cerebellum_volume_source.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12548.json b/doc/sphinxext/prs/12548.json new file mode 100644 index 00000000000..56776600be5 --- /dev/null +++ b/doc/sphinxext/prs/12548.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6b5a59d2cb07177a0735b3b9a2701345cfc1d98b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 5, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1255.json b/doc/sphinxext/prs/1255.json new file mode 100644 index 00000000000..9009f406b94 --- /dev/null +++ b/doc/sphinxext/prs/1255.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0f0f9509132533dffdfe39a439caca52c6d6c12b", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "mne/fiff/channels.py": { + "a": 26, + "d": 42 + }, + "mne/fiff/fiff/tests/test_raw.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12550.json b/doc/sphinxext/prs/12550.json new file mode 100644 index 00000000000..7386a018c62 --- /dev/null +++ b/doc/sphinxext/prs/12550.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dddbe78f0c1a4140e9fd54904593a456b117f56a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12552.json b/doc/sphinxext/prs/12552.json new file mode 100644 index 00000000000..67ef8e4883e --- /dev/null +++ b/doc/sphinxext/prs/12552.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "27c07a8e2c8e2add8c8921c144986a9530455eb1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12554.json b/doc/sphinxext/prs/12554.json new file mode 100644 index 00000000000..5e9d9c5aee5 --- /dev/null +++ b/doc/sphinxext/prs/12554.json @@ -0,0 +1,183 @@ +{ + "merge_commit_sha": "0d781c8329a524c7bd66b27d69348eabb468681d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 5, + "d": 5 + }, + "doc/changes/devel/12554.dependency.rst": { + "a": 6, + "d": 0 + }, + "doc/conf.py": { + "a": 0, + "d": 2 + }, + "mne/_fiff/reference.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 0, + "d": 26 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 2 + }, + "mne/dipole.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 9 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 1, + "d": 58 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 5, + "d": 5 + }, + "mne/io/kit/coreg.py": { + "a": 3, + "d": 17 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 0, + "d": 14 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 2 + }, + "mne/report/report.py": { + "a": 6, + "d": 22 + }, + "mne/report/tests/test_report.py": { + "a": 0, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 9 + }, + "mne/time_frequency/multitaper.py": { + "a": 0, + "d": 12 + }, + "mne/time_frequency/spectrum.py": { + "a": 8, + "d": 17 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 0, + "d": 13 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 45 + }, + "mne/utils/__init__.pyi": { + "a": 4, + "d": 0 + }, + "mne/utils/check.py": { + "a": 2, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 4 + }, + "mne/utils/linalg.py": { + "a": 55, + "d": 0 + }, + "mne/utils/spectrum.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_mpl_figure.py": { + "a": 7, + "d": 27 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 6 + }, + "mne/viz/ica.py": { + "a": 13, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 4, + "d": 13 + }, + "mne/viz/utils.py": { + "a": 24, + "d": 66 + }, + "pyproject.toml": { + "a": 3, + "d": 3 + }, + "tools/dev/gen_css_for_mne.py": { + "a": 2, + "d": 1 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12556.json b/doc/sphinxext/prs/12556.json new file mode 100644 index 00000000000..c1a5165f133 --- /dev/null +++ b/doc/sphinxext/prs/12556.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b72f02a5eab8ab931839e4f9bad6d8e8faa68861", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12556.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/html_templates/report/slider.html.jinja": { + "a": 6, + "d": 11 + }, + "mne/report/js_and_css/report.js": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12557.json b/doc/sphinxext/prs/12557.json new file mode 100644 index 00000000000..f424f2fc350 --- /dev/null +++ b/doc/sphinxext/prs/12557.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "96ac7afc522cd4775113b2327b0be7dc3a483e8f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report/js_and_css/report.js": { + "a": 13, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12560.json b/doc/sphinxext/prs/12560.json new file mode 100644 index 00000000000..e553e6a52bf --- /dev/null +++ b/doc/sphinxext/prs/12560.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f093b176565ef934bb5c0bcc6c2a5fc717f5e782", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12561.json b/doc/sphinxext/prs/12561.json new file mode 100644 index 00000000000..e999b2f13ea --- /dev/null +++ b/doc/sphinxext/prs/12561.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6e2ecad41bad6b19474173e38b6e3df1efd3e863", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12561.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/report/js_and_css/report.js": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12562.json b/doc/sphinxext/prs/12562.json new file mode 100644 index 00000000000..6472bfb4c95 --- /dev/null +++ b/doc/sphinxext/prs/12562.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "815db609f9142b88de54c297f9016abfc38da6c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12562.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 30, + "d": 8 + }, + "mne/decoding/csp.py": { + "a": 28, + "d": 4 + }, + "mne/decoding/tests/test_base.py": { + "a": 15, + "d": 6 + }, + "mne/decoding/tests/test_csp.py": { + "a": 27, + "d": 4 + }, + "mne/decoding/transformer.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12563.json b/doc/sphinxext/prs/12563.json new file mode 100644 index 00000000000..6b8800da957 --- /dev/null +++ b/doc/sphinxext/prs/12563.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6fd4674673dafb944fbade0297a8c609d090a59c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report/report.py": { + "a": 0, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12564.json b/doc/sphinxext/prs/12564.json new file mode 100644 index 00000000000..92a305f1353 --- /dev/null +++ b/doc/sphinxext/prs/12564.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "52d9a9b5a03503d00439c7ba999799d8dd4ac030", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12565.json b/doc/sphinxext/prs/12565.json new file mode 100644 index 00000000000..7c66dbdd84c --- /dev/null +++ b/doc/sphinxext/prs/12565.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9262ae4858b123b7dcd70d5fad99b3b4aa00d271", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 14, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12567.json b/doc/sphinxext/prs/12567.json new file mode 100644 index 00000000000..46f043ca3f6 --- /dev/null +++ b/doc/sphinxext/prs/12567.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "55cba826db9f5b7a9a28c6a12248108fddfadfa2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 5, + "d": 9 + }, + "mne/utils/config.py": { + "a": 25, + "d": 7 + }, + "mne/utils/tests/test_config.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12569.json b/doc/sphinxext/prs/12569.json new file mode 100644 index 00000000000..b3254d4fbe1 --- /dev/null +++ b/doc/sphinxext/prs/12569.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "7d36247e14f1f3ba8a10d4d4e73511e643355cda", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 3, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 11, + "d": 8 + }, + "azure-pipelines.yml": { + "a": 7, + "d": 0 + }, + "doc/changes/devel/12569.other.rst": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_flash_bem.py": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 2, + "d": 56 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 37, + "d": 37 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 8, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 6 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_utils.py": { + "a": 0, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 1 + }, + "pyproject.toml": { + "a": 14, + "d": 0 + }, + "tools/vulture_allowlist.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12570.json b/doc/sphinxext/prs/12570.json new file mode 100644 index 00000000000..0c689b0c28a --- /dev/null +++ b/doc/sphinxext/prs/12570.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e43b5d897228df7de41799583df2e38adbcffc6e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 213, + "d": 199 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 2, + "d": 2 + }, + "tutorials/clinical/20_seeg.py": { + "a": 3, + "d": 4 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12571.json b/doc/sphinxext/prs/12571.json new file mode 100644 index 00000000000..c0178746b90 --- /dev/null +++ b/doc/sphinxext/prs/12571.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e3a40fc1b173cb4b49356864aa3551237c5d0d2d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 8, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12573.json b/doc/sphinxext/prs/12573.json new file mode 100644 index 00000000000..125182830be --- /dev/null +++ b/doc/sphinxext/prs/12573.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cfb232a41eb316e2173fb4dba3ab3b23ab326995", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12573.newfeature.rst": { + "a": 3, + "d": 0 + }, + "mne/report/report.py": { + "a": 4, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12574.json b/doc/sphinxext/prs/12574.json new file mode 100644 index 00000000000..c77bf1ef4d4 --- /dev/null +++ b/doc/sphinxext/prs/12574.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7c0c07a1dfb8229a9fafd81b993b6cc75ce10a27", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 15, + "d": 257 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 245, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12575.json b/doc/sphinxext/prs/12575.json new file mode 100644 index 00000000000..0b13092d911 --- /dev/null +++ b/doc/sphinxext/prs/12575.json @@ -0,0 +1,263 @@ +{ + "merge_commit_sha": "2db23cd669e2304504df86da53584b7b82036538", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/gen_commands.py": { + "a": 14, + "d": 1 + }, + "mne/_fiff/reference.py": { + "a": 0, + "d": 27 + }, + "mne/_fiff/utils.py": { + "a": 0, + "d": 25 + }, + "mne/_fiff/write.py": { + "a": 2, + "d": 11 + }, + "mne/_freesurfer.py": { + "a": 0, + "d": 17 + }, + "mne/channels/layout.py": { + "a": 0, + "d": 55 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 33 + }, + "mne/chpi.py": { + "a": 0, + "d": 6 + }, + "mne/coreg.py": { + "a": 0, + "d": 28 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 0, + "d": 12 + }, + "mne/filter.py": { + "a": 0, + "d": 24 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg.py": { + "a": 0, + "d": 2 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 0, + "d": 22 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 0, + "d": 3 + }, + "mne/io/bti/bti.py": { + "a": 12, + "d": 93 + }, + "mne/io/bti/read.py": { + "a": 0, + "d": 20 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 0, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 0, + "d": 3 + }, + "mne/io/egi/egimff.py": { + "a": 0, + "d": 19 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 0, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 0 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 0, + "d": 1 + }, + "mne/misc.py": { + "a": 0, + "d": 16 + }, + "mne/morph.py": { + "a": 11, + "d": 11 + }, + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 0, + "d": 6 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 4, + "d": 0 + }, + "mne/report/report.py": { + "a": 1, + "d": 11 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 5, + "d": 7 + }, + "mne/surface.py": { + "a": 0, + "d": 6 + }, + "mne/tests/test_chpi.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 1, + "d": 0 + }, + "mne/transforms.py": { + "a": 0, + "d": 1 + }, + "mne/utils/__init__.pyi": { + "a": 0, + "d": 6 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 18 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 37 + }, + "mne/utils/numerics.py": { + "a": 0, + "d": 26 + }, + "mne/viz/_brain/_brain.py": { + "a": 0, + "d": 28 + }, + "mne/viz/_brain/surface.py": { + "a": 0, + "d": 9 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 0, + "d": 7 + }, + "mne/viz/backends/renderer.py": { + "a": 0, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 0, + "d": 6 + }, + "mne/viz/tests/test_montage.py": { + "a": 0, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 6 + }, + "pyproject.toml": { + "a": 10, + "d": 3 + }, + "tools/vulture_allowlist.py": { + "a": 119, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12576.json b/doc/sphinxext/prs/12576.json new file mode 100644 index 00000000000..3b5212bda49 --- /dev/null +++ b/doc/sphinxext/prs/12576.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "2448974c87828a4f4c2aac0e7c51376556e257b7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12576.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_freesurfer.py": { + "a": 11, + "d": 7 + }, + "mne/surface.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 0, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12578.json b/doc/sphinxext/prs/12578.json new file mode 100644 index 00000000000..b6a4b88b253 --- /dev/null +++ b/doc/sphinxext/prs/12578.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "59606aa0e3d43d7313a644d7c6be3e4ec863a1ea", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12578.bugfix.rst": { + "a": 3, + "d": 0 + }, + "mne/report/report.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12579.json b/doc/sphinxext/prs/12579.json new file mode 100644 index 00000000000..c93064ff7b0 --- /dev/null +++ b/doc/sphinxext/prs/12579.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d99d498257eb06c479500a88cf6abf28e9d943bf", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/html_templates/report/slider.html.jinja": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1258.json b/doc/sphinxext/prs/1258.json new file mode 100644 index 00000000000..f7bfbc41d57 --- /dev/null +++ b/doc/sphinxext/prs/1258.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "88e84cdbe1d63b91799ec07cb4a1ccd5e83cae9c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 12, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 25, + "d": 12 + }, + "mne/tests/test_viz.py": { + "a": 3, + "d": 2 + }, + "mne/viz.py": { + "a": 61, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12580.json b/doc/sphinxext/prs/12580.json new file mode 100644 index 00000000000..d20288748e4 --- /dev/null +++ b/doc/sphinxext/prs/12580.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5a5b4f10467664b1444873ab879f4ae5724eedbb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "pyproject.toml": { + "a": 2, + "d": 2 + }, + "tools/pyqt6_requirements.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12583.json b/doc/sphinxext/prs/12583.json new file mode 100644 index 00000000000..24b472f36cf --- /dev/null +++ b/doc/sphinxext/prs/12583.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "c55c44af394b611abcb4eb8553935d0b582f6e49", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12583.apichange.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/devel/12583.newfeature.rst": { + "a": 4, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 8, + "d": 74 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 7, + "d": 8 + }, + "mne/epochs.py": { + "a": 10, + "d": 7 + }, + "mne/evoked.py": { + "a": 9, + "d": 7 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 8 + }, + "mne/html_templates/_templates.py": { + "a": 118, + "d": 0 + }, + "mne/html_templates/repr/_acquisition.html.jinja": { + "a": 100, + "d": 0 + }, + "mne/html_templates/repr/_channels.html.jinja": { + "a": 51, + "d": 0 + }, + "mne/html_templates/repr/_filters.html.jinja": { + "a": 48, + "d": 0 + }, + "mne/html_templates/repr/_general.html.jinja": { + "a": 68, + "d": 0 + }, + "mne/html_templates/repr/_js_and_css.html.jinja": { + "a": 7, + "d": 0 + }, + "mne/html_templates/repr/epochs.html.jinja": { + "a": 10, + "d": 22 + }, + "mne/html_templates/repr/evoked.html.jinja": { + "a": 10, + "d": 30 + }, + "mne/html_templates/repr/forward.html.jinja": { + "a": 25, + "d": 8 + }, + "mne/html_templates/repr/info.html.jinja": { + "a": 10, + "d": 101 + }, + "mne/html_templates/repr/raw.html.jinja": { + "a": 10, + "d": 1 + }, + "mne/html_templates/repr/static/repr.css": { + "a": 105, + "d": 0 + }, + "mne/html_templates/repr/static/repr.js": { + "a": 35, + "d": 0 + }, + "mne/io/base.py": { + "a": 8, + "d": 8 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 2, + "d": 3 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_misc.py": { + "a": 3, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 28, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12584.json b/doc/sphinxext/prs/12584.json new file mode 100644 index 00000000000..a2e4d0c75ca --- /dev/null +++ b/doc/sphinxext/prs/12584.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "47d26ec99ac4b65deeac5c561860bb9ed7473878", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12584.newfeature.rst": { + "a": 4, + "d": 0 + }, + "mne/report/report.py": { + "a": 16, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12585.json b/doc/sphinxext/prs/12585.json new file mode 100644 index 00000000000..febd23e9db3 --- /dev/null +++ b/doc/sphinxext/prs/12585.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0b190849bac458e3dcbedb3b49f8e9891ded77ba", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12586.json b/doc/sphinxext/prs/12586.json new file mode 100644 index 00000000000..9c63c4f02ed --- /dev/null +++ b/doc/sphinxext/prs/12586.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "43414ba878ee7746464a8fd5aa7f4a555ea0bd5e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".git_archival.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12587.json b/doc/sphinxext/prs/12587.json new file mode 100644 index 00000000000..c7e1fa3e467 --- /dev/null +++ b/doc/sphinxext/prs/12587.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8903b4a1a2a3f2a6c73e2e5793f8e793151de633", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12588.json b/doc/sphinxext/prs/12588.json new file mode 100644 index 00000000000..57cff37e14d --- /dev/null +++ b/doc/sphinxext/prs/12588.json @@ -0,0 +1,939 @@ +{ + "merge_commit_sha": "e39995d9be6fc831c7a4a59f09b7a7c0a41ae315", + "authors": [ + { + "n": null, + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 5, + "d": 5 + }, + "doc/sphinxext/gen_commands.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 1, + "d": 1 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/label_from_stc.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/label_source_activations.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/read_inverse.py": { + "a": 8, + "d": 8 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 1, + "d": 1 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 3, + "d": 8 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/_digitization.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/compensator.py": { + "a": 6, + "d": 10 + }, + "mne/_fiff/ctf_comp.py": { + "a": 3, + "d": 3 + }, + "mne/_fiff/matrix.py": { + "a": 4, + "d": 4 + }, + "mne/_fiff/meas_info.py": { + "a": 25, + "d": 40 + }, + "mne/_fiff/open.py": { + "a": 4, + "d": 3 + }, + "mne/_fiff/pick.py": { + "a": 9, + "d": 9 + }, + "mne/_fiff/proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/proj.py": { + "a": 18, + "d": 23 + }, + "mne/_fiff/reference.py": { + "a": 11, + "d": 13 + }, + "mne/_fiff/tests/test_compensator.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 5, + "d": 5 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tree.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/write.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 1, + "d": 1 + }, + "mne/_ola.py": { + "a": 11, + "d": 15 + }, + "mne/annotations.py": { + "a": 5, + "d": 8 + }, + "mne/baseline.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 4 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 5 + }, + "mne/bem.py": { + "a": 57, + "d": 73 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 3, + "d": 3 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 6 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 7, + "d": 7 + }, + "mne/channels/tests/test_montage.py": { + "a": 10, + "d": 10 + }, + "mne/chpi.py": { + "a": 15, + "d": 25 + }, + "mne/commands/mne_anonymize.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 3, + "d": 3 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 14, + "d": 14 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 13, + "d": 13 + }, + "mne/commands/mne_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_flash_bem.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 6, + "d": 6 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_maxfilter.py": { + "a": 0, + "d": 257 + }, + "mne/commands/mne_report.py": { + "a": 6, + "d": 6 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_show_info.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 1, + "d": 1 + }, + "mne/commands/utils.py": { + "a": 3, + "d": 3 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 26, + "d": 26 + }, + "mne/cov.py": { + "a": 2, + "d": 4 + }, + "mne/cuda.py": { + "a": 5, + "d": 6 + }, + "mne/datasets/_fetch.py": { + "a": 2, + "d": 4 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 5, + "d": 9 + }, + "mne/datasets/utils.py": { + "a": 6, + "d": 6 + }, + "mne/decoding/csp.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 12, + "d": 12 + }, + "mne/dipole.py": { + "a": 16, + "d": 18 + }, + "mne/epochs.py": { + "a": 10, + "d": 15 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 5, + "d": 5 + }, + "mne/export/_export.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 9, + "d": 9 + }, + "mne/fixes.py": { + "a": 3, + "d": 3 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 13, + "d": 15 + }, + "mne/forward/forward.py": { + "a": 20, + "d": 22 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 14, + "d": 19 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 21, + "d": 23 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 3, + "d": 3 + }, + "mne/io/artemis123/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 11, + "d": 11 + }, + "mne/io/besa/besa.py": { + "a": 2, + "d": 2 + }, + "mne/io/boxy/boxy.py": { + "a": 2, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 7, + "d": 7 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 3 + }, + "mne/io/bti/bti.py": { + "a": 13, + "d": 15 + }, + "mne/io/bti/read.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/eeg.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/hc.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 10, + "d": 10 + }, + "mne/io/ctf/res4.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/trans.py": { + "a": 4, + "d": 4 + }, + "mne/io/curry/curry.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 7, + "d": 7 + }, + "mne/io/egi/egi.py": { + "a": 8, + "d": 10 + }, + "mne/io/egi/egimff.py": { + "a": 7, + "d": 7 + }, + "mne/io/egi/general.py": { + "a": 2, + "d": 2 + }, + "mne/io/eximia/eximia.py": { + "a": 1, + "d": 1 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 2, + "d": 2 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 2, + "d": 2 + }, + "mne/io/fieldtrip/utils.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/hitachi/hitachi.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 9, + "d": 9 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 4, + "d": 4 + }, + "mne/io/nirx/nirx.py": { + "a": 3, + "d": 3 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/persyst.py": { + "a": 6, + "d": 6 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 16, + "d": 16 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 13, + "d": 13 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 7, + "d": 9 + }, + "mne/morph_map.py": { + "a": 3, + "d": 3 + }, + "mne/parallel.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_csd.py": { + "a": 5, + "d": 7 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/bads.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 15, + "d": 17 + }, + "mne/preprocessing/infomax_.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 23, + "d": 29 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_annotate_amplitude.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 4 + }, + "mne/proj.py": { + "a": 2, + "d": 2 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 6, + "d": 10 + }, + "mne/simulation/metrics/metrics.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 5, + "d": 5 + }, + "mne/simulation/source.py": { + "a": 5, + "d": 5 + }, + "mne/simulation/tests/test_source.py": { + "a": 2, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 16, + "d": 18 + }, + "mne/source_space/_source_space.py": { + "a": 34, + "d": 34 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/stats/regression.py": { + "a": 3, + "d": 3 + }, + "mne/surface.py": { + "a": 15, + "d": 22 + }, + "mne/tests/test_annotations.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_bem.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_coreg.py": { + "a": 3, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_event.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stft.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 7, + "d": 7 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 18 + }, + "mne/transforms.py": { + "a": 5, + "d": 5 + }, + "mne/utils/_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 7, + "d": 11 + }, + "mne/utils/config.py": { + "a": 9, + "d": 11 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 4 + }, + "mne/utils/misc.py": { + "a": 3, + "d": 3 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 10, + "d": 12 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 10 + }, + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 7 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 12, + "d": 19 + }, + "mne/viz/evoked_field.py": { + "a": 3, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 5, + "d": 5 + }, + "mne/viz/misc.py": { + "a": 21, + "d": 23 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 7, + "d": 9 + }, + "mne/viz/topomap.py": { + "a": 10, + "d": 15 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 4, + "d": 4 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 3, + "d": 3 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12592.json b/doc/sphinxext/prs/12592.json new file mode 100644 index 00000000000..8bcc4f75bbb --- /dev/null +++ b/doc/sphinxext/prs/12592.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7fd22d63e8e478db23e9f114abc9b1b5228ed822", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/autofix.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12593.json b/doc/sphinxext/prs/12593.json new file mode 100644 index 00000000000..cc17927eacf --- /dev/null +++ b/doc/sphinxext/prs/12593.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "356e8546890b8f798a14777991ba7de6cd9ab9bb", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/12593.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 77, + "d": 23 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 23, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12595.json b/doc/sphinxext/prs/12595.json new file mode 100644 index 00000000000..d6e779f9299 --- /dev/null +++ b/doc/sphinxext/prs/12595.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e28cba92d80d64ae6bceae0eac4447ea01b74144", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/funding/cds-dark.svg": { + "a": 26, + "d": 0 + }, + "doc/_static/funding/cds.png": { + "a": 0, + "d": 0 + }, + "doc/_static/funding/cds.svg": { + "a": 27, + "d": 0 + }, + "doc/_static/style.css": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 13, + "d": 2 + }, + "doc/funding.rst": { + "a": 5, + "d": 2 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12597.json b/doc/sphinxext/prs/12597.json new file mode 100644 index 00000000000..760197f8ed5 --- /dev/null +++ b/doc/sphinxext/prs/12597.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "79d54dca56e4cb8c1ed6c0eec76ee4a5f66739ce", + "authors": [ + { + "n": "George O'Neill", + "e": null + } + ], + "changes": { + "doc/changes/devel/12597.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/fil/fil.py": { + "a": 1, + "d": 3 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 32, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12598.json b/doc/sphinxext/prs/12598.json new file mode 100644 index 00000000000..5057877a0e3 --- /dev/null +++ b/doc/sphinxext/prs/12598.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "537faea66da9a0f2bb4b09efe39b90627c941d68", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/filter.py": { + "a": 9, + "d": 1 + }, + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12600.json b/doc/sphinxext/prs/12600.json new file mode 100644 index 00000000000..ba3b97ecd16 --- /dev/null +++ b/doc/sphinxext/prs/12600.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7a4706b078b2cd0b9ccf4b3c6be83039ddc88617", + "authors": [ + { + "n": "Michal Žák", + "e": null + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/bug_report.yml": { + "a": 2, + "d": 2 + }, + "doc/changes/devel/12600.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12601.json b/doc/sphinxext/prs/12601.json new file mode 100644 index 00000000000..dbfdfdffa02 --- /dev/null +++ b/doc/sphinxext/prs/12601.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "01a60d97a8cee2864dd81ac9c25d6ba126a4af2d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 12, + "d": 18 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 4 + }, + "pyproject.toml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12602.json b/doc/sphinxext/prs/12602.json new file mode 100644 index 00000000000..11660f49ced --- /dev/null +++ b/doc/sphinxext/prs/12602.json @@ -0,0 +1,299 @@ +{ + "merge_commit_sha": "5b1c49e18a3a937c884cac529eebcc35a3e9ef04", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + ".github/actions/rename_towncrier/rename_towncrier.py": { + "a": 9, + "d": 11 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/meas_info.py": { + "a": 2, + "d": 4 + }, + "mne/_fiff/proj.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/write.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 3, + "d": 3 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 4 + }, + "mne/decoding/transformer.py": { + "a": 4, + "d": 4 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 3 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/besa/besa.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 3 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/morph_map.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_csd.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 8, + "d": 14 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/metrics/metrics.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/source_space/_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 3, + "d": 5 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_filter.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 3, + "d": 3 + }, + "mne/utils/check.py": { + "a": 4, + "d": 4 + }, + "mne/utils/config.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_logging.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 4 + }, + "mne/viz/circle.py": { + "a": 2, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 3 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12603.json b/doc/sphinxext/prs/12603.json new file mode 100644 index 00000000000..579738678d5 --- /dev/null +++ b/doc/sphinxext/prs/12603.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c5b39ff1d99bbcb2fc0e0071a989b3f3845ff30", + "authors": [ + { + "n": "Dimitri Papadopoulos Orfanos", + "e": null + } + ], + "changes": { + "tools/dev/ensure_headers.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12604.json b/doc/sphinxext/prs/12604.json new file mode 100644 index 00000000000..69cb0508b24 --- /dev/null +++ b/doc/sphinxext/prs/12604.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "44c69f5a5990ecb57f0f6590b37986a81f2bd325", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12605.json b/doc/sphinxext/prs/12605.json new file mode 100644 index 00000000000..e6d4ee4bf4c --- /dev/null +++ b/doc/sphinxext/prs/12605.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4cffc343a3cb7c75101a11294c517d845ab423eb", + "authors": [ + { + "n": "Michal Žák", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12605.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/pick.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 21, + "d": 18 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 15 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12609.json b/doc/sphinxext/prs/12609.json new file mode 100644 index 00000000000..a4b6200e3d9 --- /dev/null +++ b/doc/sphinxext/prs/12609.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cf0e12d9e329440408b19caef974ab95a1439686", + "authors": [ + { + "n": "Fourcaud-Trocmé", + "e": "celicolimmo@free.fr" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12609.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 20, + "d": 20 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12610.json b/doc/sphinxext/prs/12610.json new file mode 100644 index 00000000000..ed11cca70da --- /dev/null +++ b/doc/sphinxext/prs/12610.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6cad308dd83be5054a63ad0748ead08b641d6ac0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".git-blame-ignore-revs": { + "a": 3, + "d": 0 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 3, + "d": 1 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_base.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 18, + "d": 6 + }, + "mne/stats/_adjacency.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 10 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12612.json b/doc/sphinxext/prs/12612.json new file mode 100644 index 00000000000..e1d319ab340 --- /dev/null +++ b/doc/sphinxext/prs/12612.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5a20b82e7d8e1ae2976bd41e7a92ba68daca171d", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12612.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12615.json b/doc/sphinxext/prs/12615.json new file mode 100644 index 00000000000..31b9ed6151c --- /dev/null +++ b/doc/sphinxext/prs/12615.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "823e25deb03ee23b2df1fde9c03944c14d25ef94", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 1 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12616.json b/doc/sphinxext/prs/12616.json new file mode 100644 index 00000000000..e360da0535d --- /dev/null +++ b/doc/sphinxext/prs/12616.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4c9a176de185efd2ca3da466c81bca6094873beb", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12616.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12617.json b/doc/sphinxext/prs/12617.json new file mode 100644 index 00000000000..10e0b66fd8b --- /dev/null +++ b/doc/sphinxext/prs/12617.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "15fee8910b6b18938464e22caedec238b8f5334d", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12619.json b/doc/sphinxext/prs/12619.json new file mode 100644 index 00000000000..d9080f92d8f --- /dev/null +++ b/doc/sphinxext/prs/12619.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5c2a25567e5e1613c204ad40be58bf483db6a99e", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/source_space_snr.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12620.json b/doc/sphinxext/prs/12620.json new file mode 100644 index 00000000000..28dec19f622 --- /dev/null +++ b/doc/sphinxext/prs/12620.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b12396dfe051139682bdf566687b384789c034e7", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12620.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 16, + "d": 8 + }, + "tools/vulture_allowlist.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12622.json b/doc/sphinxext/prs/12622.json new file mode 100644 index 00000000000..aa9f7798495 --- /dev/null +++ b/doc/sphinxext/prs/12622.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "07aecf85877621a37fd83b69a78d837f81df81e2", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12624.json b/doc/sphinxext/prs/12624.json new file mode 100644 index 00000000000..51e97bee2f7 --- /dev/null +++ b/doc/sphinxext/prs/12624.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8f66cbcb28c7f41bf972b22c067d03782b14d107", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/12583.newfeature.rst": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/html_templates/_templates.py": { + "a": 0, + "d": 2 + }, + "mne/html_templates/repr/_channels.html.jinja": { + "a": 2, + "d": 2 + }, + "tools/vulture_allowlist.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12625.json b/doc/sphinxext/prs/12625.json new file mode 100644 index 00000000000..cd25c47a972 --- /dev/null +++ b/doc/sphinxext/prs/12625.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c600f909571d5e27387dd6f63840019219449aae", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12628.json b/doc/sphinxext/prs/12628.json new file mode 100644 index 00000000000..1cd6326bbcd --- /dev/null +++ b/doc/sphinxext/prs/12628.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7f2c9315dc6f9709f480ac711ebe30368b052c36", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12628.newfeature.rst": { + "a": 4, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 12, + "d": 0 + }, + "pyproject.toml": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12629.json b/doc/sphinxext/prs/12629.json new file mode 100644 index 00000000000..6f8b1da52c0 --- /dev/null +++ b/doc/sphinxext/prs/12629.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dff97690517ac087d22ef6e42c1a13dbd3f3d9d8", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12631.json b/doc/sphinxext/prs/12631.json new file mode 100644 index 00000000000..bb97f0bf42b --- /dev/null +++ b/doc/sphinxext/prs/12631.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5fe73cb91def82968b32fd7d41fd62281c56fbf0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12633.json b/doc/sphinxext/prs/12633.json new file mode 100644 index 00000000000..b53f0aa3c68 --- /dev/null +++ b/doc/sphinxext/prs/12633.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a222ba21cb0b3c13e6ca73c11b85c1a16838c71", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/12633.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 6, + "d": 3 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12634.json b/doc/sphinxext/prs/12634.json new file mode 100644 index 00000000000..b9fa2bdd35e --- /dev/null +++ b/doc/sphinxext/prs/12634.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "704f3e25b8fedbeda0443ff623d2c3bfde210bb1", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/12633.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 6, + "d": 3 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12635.json b/doc/sphinxext/prs/12635.json new file mode 100644 index 00000000000..d9c99052860 --- /dev/null +++ b/doc/sphinxext/prs/12635.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "07b69b33ffa6cbf7e4117590817e81123b03f17e", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12616.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12637.json b/doc/sphinxext/prs/12637.json new file mode 100644 index 00000000000..da70e562289 --- /dev/null +++ b/doc/sphinxext/prs/12637.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1d56eb98790be00cb80704c347a22b17631048aa", + "authors": [ + { + "n": "Nabil Alibou", + "e": null + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1264.json b/doc/sphinxext/prs/1264.json new file mode 100644 index 00000000000..ee6352f5eb8 --- /dev/null +++ b/doc/sphinxext/prs/1264.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "675120da19f33cc596a3c7aba75bd4be6992559a", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12640.json b/doc/sphinxext/prs/12640.json new file mode 100644 index 00000000000..7ebc2748d3d --- /dev/null +++ b/doc/sphinxext/prs/12640.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b21960d10118fa567d6f409ce01547ccbc226073", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12645.json b/doc/sphinxext/prs/12645.json new file mode 100644 index 00000000000..7ced64ab582 --- /dev/null +++ b/doc/sphinxext/prs/12645.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4ce0eb697ffecf8451bf43f3c67c015750ea9b27", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/check_changelog.yml": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 2 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12646.json b/doc/sphinxext/prs/12646.json new file mode 100644 index 00000000000..7dde635c09b --- /dev/null +++ b/doc/sphinxext/prs/12646.json @@ -0,0 +1,155 @@ +{ + "merge_commit_sha": "be5a1158a509bc8e9bb7801ab1690afdfaf4cb81", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12646.bugfix.rst": { + "a": 10, + "d": 0 + }, + "mne/_fiff/proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tag.py": { + "a": 15, + "d": 22 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 4, + "d": 4 + }, + "mne/_fiff/write.py": { + "a": 3, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 7 + }, + "mne/channels/tests/test_channels.py": { + "a": 3, + "d": 3 + }, + "mne/fixes.py": { + "a": 10, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 8, + "d": 8 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 1, + "d": 0 + }, + "mne/label.py": { + "a": 6, + "d": 6 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 29, + "d": 20 + }, + "mne/morph_map.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 14, + "d": 14 + }, + "mne/source_space/_source_space.py": { + "a": 11, + "d": 9 + }, + "mne/stats/_adjacency.py": { + "a": 10, + "d": 9 + }, + "mne/stats/cluster_level.py": { + "a": 9, + "d": 5 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_permutations.py": { + "a": 3, + "d": 2 + }, + "mne/surface.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_morph.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_morph_map.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 9, + "d": 3 + }, + "mne/utils/tests/test_numerics.py": { + "a": 9, + "d": 9 + }, + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 6 + }, + "tools/environment_minimal.yml": { + "a": 15, + "d": 0 + }, + "tools/environment_old.yml": { + "a": 17, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 10, + "d": 9 + }, + "tools/github_actions_env_vars.sh": { + "a": 14, + "d": 16 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12647.json b/doc/sphinxext/prs/12647.json new file mode 100644 index 00000000000..04b360c6e45 --- /dev/null +++ b/doc/sphinxext/prs/12647.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a307bbd5ea99d5ef35a5526ed9816b530b7e04a5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12649.json b/doc/sphinxext/prs/12649.json new file mode 100644 index 00000000000..d7dca0c8298 --- /dev/null +++ b/doc/sphinxext/prs/12649.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2e065838b82c0c28a39c5186f6ba6f3872a9fff1", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12649.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 28, + "d": 22 + }, + "mne/tests/test_epochs.py": { + "a": 15, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1265.json b/doc/sphinxext/prs/1265.json new file mode 100644 index 00000000000..3d7c87ff8bd --- /dev/null +++ b/doc/sphinxext/prs/1265.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "15beccae746dd450cc7eefff523a73a66fcff3f7", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12650.json b/doc/sphinxext/prs/12650.json new file mode 100644 index 00000000000..b44dddabd0b --- /dev/null +++ b/doc/sphinxext/prs/12650.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6fb9aaef6380033ad2fdeeaf38f0724fdb1aff70", + "authors": [ + { + "n": "Xabier de Zuazo", + "e": "xabier@zuazo.org" + } + ], + "changes": { + "doc/changes/devel/12650.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/help/faq.rst": { + "a": 5, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12651.json b/doc/sphinxext/prs/12651.json new file mode 100644 index 00000000000..ce476bae57f --- /dev/null +++ b/doc/sphinxext/prs/12651.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "07f429eeebf51a1a686be2bc25a9f760e9c66520", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/stats/_adjacency.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 9, + "d": 0 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 1, + "d": 0 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12652.json b/doc/sphinxext/prs/12652.json new file mode 100644 index 00000000000..59ff3f45254 --- /dev/null +++ b/doc/sphinxext/prs/12652.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "69f7d88f2e44f43fe7be92885e0e533e84320a32", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12652.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 7, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 52, + "d": 14 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12655.json b/doc/sphinxext/prs/12655.json new file mode 100644 index 00000000000..e5c9c9d0683 --- /dev/null +++ b/doc/sphinxext/prs/12655.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b1675730ac70d910abe43d042afa26bc653548c9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12655.newfeature.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 15, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 14, + "d": 7 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12656.json b/doc/sphinxext/prs/12656.json new file mode 100644 index 00000000000..6755a485d1c --- /dev/null +++ b/doc/sphinxext/prs/12656.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3c6a054093d305a98757a97398e5e34988a3aced", + "authors": [ + { + "n": "Qian Chu", + "e": null + } + ], + "changes": { + "doc/changes/devel/12656.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_brainvision.py": { + "a": 7, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 4, + "d": 1 + }, + "mne/export/_eeglab.py": { + "a": 11, + "d": 5 + }, + "mne/export/_export.py": { + "a": 7, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 82, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12657.json b/doc/sphinxext/prs/12657.json new file mode 100644 index 00000000000..4624c2e7712 --- /dev/null +++ b/doc/sphinxext/prs/12657.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "382c7bae5297f966c8698d22b66dcca27dd2d563", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12659.json b/doc/sphinxext/prs/12659.json new file mode 100644 index 00000000000..0a9ebe097a1 --- /dev/null +++ b/doc/sphinxext/prs/12659.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d0eff795ad5d52c72428f61cb39cfb966bd60571", + "authors": [ + { + "n": "Ilian AZZ", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12659.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/install/mne_tools_suite.rst": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12661.json b/doc/sphinxext/prs/12661.json new file mode 100644 index 00000000000..c30a8858c6e --- /dev/null +++ b/doc/sphinxext/prs/12661.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f90dd6eab35819df1da2b3e9aefb7f5d6ed3061c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12661.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 17, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12664.json b/doc/sphinxext/prs/12664.json new file mode 100644 index 00000000000..b68f8df8eef --- /dev/null +++ b/doc/sphinxext/prs/12664.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4da96f45ec13cdbbf8f88cd942b76d4a83609bfa", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/12664.other.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/ssd.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12666.json b/doc/sphinxext/prs/12666.json new file mode 100644 index 00000000000..800173e3427 --- /dev/null +++ b/doc/sphinxext/prs/12666.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "663d7417fcf498f87c960e656672558f7efed698", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 2, + "d": 4 + }, + "CITATION.cff": { + "a": 3, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "codemeta.json": { + "a": 3, + "d": 3 + }, + "doc/changes/devel/12633.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/v1.7.rst": { + "a": 17, + "d": 0 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 17, + "d": 19 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 3 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/generate_codemeta.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12667.json b/doc/sphinxext/prs/12667.json new file mode 100644 index 00000000000..c5f3b8dc563 --- /dev/null +++ b/doc/sphinxext/prs/12667.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1206b996eeff4f4ae359ca1188078f8a16474431", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/check_changelog.yml": { + "a": 1, + "d": 0 + }, + ".github/workflows/release.yml": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12668.json b/doc/sphinxext/prs/12668.json new file mode 100644 index 00000000000..2caa63dc9c3 --- /dev/null +++ b/doc/sphinxext/prs/12668.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c24702cb9feff0ab4a38b9903d4b8e23378c35be", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "CITATION.cff": { + "a": 3, + "d": 3 + }, + "codemeta.json": { + "a": 3, + "d": 3 + }, + "doc/changes/v1.7.rst": { + "a": 17, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12669.json b/doc/sphinxext/prs/12669.json new file mode 100644 index 00000000000..a8acbd600de --- /dev/null +++ b/doc/sphinxext/prs/12669.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "88388994ca489c871ad5fa58adfd8af5e19bbb6c", + "authors": [ + { + "n": "NoahMarkowitz", + "e": null + } + ], + "changes": { + "doc/changes/devel/12669.newfeature.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 8, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12670.json b/doc/sphinxext/prs/12670.json new file mode 100644 index 00000000000..4c9bd766a20 --- /dev/null +++ b/doc/sphinxext/prs/12670.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1792baef0253e256fec386449783ebd606bc1653", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 4, + "d": 4 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12671.json b/doc/sphinxext/prs/12671.json new file mode 100644 index 00000000000..31274fc641c --- /dev/null +++ b/doc/sphinxext/prs/12671.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "564cc1a2fd0b3263a49660b6cfce04b2186d39dd", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12673.json b/doc/sphinxext/prs/12673.json new file mode 100644 index 00000000000..83e83663a75 --- /dev/null +++ b/doc/sphinxext/prs/12673.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d082461bcb701fd66b186e92535fb5288f047c38", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 2, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12674.json b/doc/sphinxext/prs/12674.json new file mode 100644 index 00000000000..1174e0d22a8 --- /dev/null +++ b/doc/sphinxext/prs/12674.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3c15798383a132903506c6c101135580265d278f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12675.json b/doc/sphinxext/prs/12675.json new file mode 100644 index 00000000000..80faec8bc39 --- /dev/null +++ b/doc/sphinxext/prs/12675.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2c696d5ca0bc48b44f7916322b0759c760c54f94", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + ".github/release.yaml": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12676.json b/doc/sphinxext/prs/12676.json new file mode 100644 index 00000000000..07ed3c37373 --- /dev/null +++ b/doc/sphinxext/prs/12676.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e8854e1f485de868273c7510dfb646039835da25", + "authors": [ + { + "n": "Qian Chu", + "e": null + } + ], + "changes": { + "doc/changes/devel/12676.bugfix.rst": { + "a": 2, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 8, + "d": 3 + }, + "mne/export/tests/test_export.py": { + "a": 41, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12677.json b/doc/sphinxext/prs/12677.json new file mode 100644 index 00000000000..3102dcf692c --- /dev/null +++ b/doc/sphinxext/prs/12677.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "df1e76115b70eed9754cd2ac8a29774b8d8f134b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + ".github/dependabot.yml": { + "a": 4, + "d": 0 + }, + ".github/workflows/tests.yml": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 3, + "d": 2 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_env_vars.sh": { + "a": 3, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12678.json b/doc/sphinxext/prs/12678.json new file mode 100644 index 00000000000..49f68c13485 --- /dev/null +++ b/doc/sphinxext/prs/12678.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "caba81b9fb263a409a2c339ba63a148a92415d0d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12679.json b/doc/sphinxext/prs/12679.json new file mode 100644 index 00000000000..1973e5c2c00 --- /dev/null +++ b/doc/sphinxext/prs/12679.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "078ad48bae80534bd7d88926a9a13a4df5d765e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + ".github/dependabot.yml": { + "a": 4, + "d": 0 + }, + ".github/workflows/tests.yml": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 3, + "d": 2 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12680.json b/doc/sphinxext/prs/12680.json new file mode 100644 index 00000000000..87a7cdc0afb --- /dev/null +++ b/doc/sphinxext/prs/12680.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9cc9fb1c1a2223e0ab349aae34c678b8b09ec9c2", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12681.json b/doc/sphinxext/prs/12681.json new file mode 100644 index 00000000000..425933c2931 --- /dev/null +++ b/doc/sphinxext/prs/12681.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "141162a48ad34fecad6ee339022ac0566c8e5d8c", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/dependabot.yml": { + "a": 2, + "d": 0 + }, + ".github/workflows/autofix.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12682.json b/doc/sphinxext/prs/12682.json new file mode 100644 index 00000000000..152b9992976 --- /dev/null +++ b/doc/sphinxext/prs/12682.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "96679b70ce34b970f5225131e2b609ad59a599f2", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12686.json b/doc/sphinxext/prs/12686.json new file mode 100644 index 00000000000..4f509532cd9 --- /dev/null +++ b/doc/sphinxext/prs/12686.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bbc2a8243c1060112f2870570c5eb4719459cce9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12686.newfeature.rst": { + "a": 2, + "d": 0 + }, + "mne/html_templates/_templates.py": { + "a": 11, + "d": 0 + }, + "mne/html_templates/repr/_acquisition.html.jinja": { + "a": 9, + "d": 2 + }, + "mne/report/tests/test_report.py": { + "a": 3, + "d": 1 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12687.json b/doc/sphinxext/prs/12687.json new file mode 100644 index 00000000000..120311f59fb --- /dev/null +++ b/doc/sphinxext/prs/12687.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f16ebc51335d0bc9030f38763517e30799bc756a", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12687.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 12, + "d": 2 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12688.json b/doc/sphinxext/prs/12688.json new file mode 100644 index 00000000000..f76de999cb1 --- /dev/null +++ b/doc/sphinxext/prs/12688.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c9f9bd5e913a1d869a613940067291752e0398d8", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12688.other.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12689.json b/doc/sphinxext/prs/12689.json new file mode 100644 index 00000000000..d42fe2fccda --- /dev/null +++ b/doc/sphinxext/prs/12689.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b65fce62aa49b7302dab15c20be18698453bb4f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12691.json b/doc/sphinxext/prs/12691.json new file mode 100644 index 00000000000..6f9356dbeed --- /dev/null +++ b/doc/sphinxext/prs/12691.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b87838f8988755b70d9506afc3b5c04fcbf4af3d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 16, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12692.json b/doc/sphinxext/prs/12692.json new file mode 100644 index 00000000000..51a4c160fb2 --- /dev/null +++ b/doc/sphinxext/prs/12692.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cb3ab67d7cca0b9b1a6c813167cc3305fa559630", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/PULL_REQUEST_TEMPLATE.md": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12694.json b/doc/sphinxext/prs/12694.json new file mode 100644 index 00000000000..af14aaf2642 --- /dev/null +++ b/doc/sphinxext/prs/12694.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "31ef32efb478b6ee32669c5b50196114c13d2443", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12694.bugfix.rst": { + "a": 2, + "d": 0 + }, + "doc/development/governance.rst": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/tests/test_csp.py": { + "a": 24, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12695.json b/doc/sphinxext/prs/12695.json new file mode 100644 index 00000000000..9e0bcfd8956 --- /dev/null +++ b/doc/sphinxext/prs/12695.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ac863e63e6233dd3ddb60e1058731f1839e55b02", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12694.bugfix.rst": { + "a": 2, + "d": 0 + }, + "doc/development/governance.rst": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/tests/test_csp.py": { + "a": 24, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12696.json b/doc/sphinxext/prs/12696.json new file mode 100644 index 00000000000..f299d2b4601 --- /dev/null +++ b/doc/sphinxext/prs/12696.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e01a654ae05f8fcaec5d814a1081256947055205", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12697.json b/doc/sphinxext/prs/12697.json new file mode 100644 index 00000000000..9772c0846fa --- /dev/null +++ b/doc/sphinxext/prs/12697.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4954672afd85a8f01acf2c75f015ecadf0a53c59", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12697.other.rst": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 27, + "d": 37 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12699.json b/doc/sphinxext/prs/12699.json new file mode 100644 index 00000000000..be87dab6f35 --- /dev/null +++ b/doc/sphinxext/prs/12699.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "aa45f40d969cf2808c3628ed0315fb0f3f31ae35", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12699.apichange.rst": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 30, + "d": 11 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12702.json b/doc/sphinxext/prs/12702.json new file mode 100644 index 00000000000..b931637f9c5 --- /dev/null +++ b/doc/sphinxext/prs/12702.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7af05c63eed37accef26682a9015beadcfabdf4e", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12703.json b/doc/sphinxext/prs/12703.json new file mode 100644 index 00000000000..ce92df4fd4b --- /dev/null +++ b/doc/sphinxext/prs/12703.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "daca2b7910ae16dece5327e1b75eb60fc5df8406", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12703.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 2 + }, + "mne/viz/montage.py": { + "a": 40, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12706.json b/doc/sphinxext/prs/12706.json new file mode 100644 index 00000000000..40a26782e67 --- /dev/null +++ b/doc/sphinxext/prs/12706.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e5584427e6cc1bbcaf0c6dc114aeec6f716f97ae", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 5, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12707.json b/doc/sphinxext/prs/12707.json new file mode 100644 index 00000000000..c2ee5a58263 --- /dev/null +++ b/doc/sphinxext/prs/12707.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "26e5057adc81313bdb3084e3119bd334d7dc06bc", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/api/statistics.rst": { + "a": 7, + "d": 0 + }, + "doc/changes/devel/12707.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 11, + "d": 0 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/stats/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/stats/erp.py": { + "a": 80, + "d": 0 + }, + "mne/stats/tests/test_erp.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12708.json b/doc/sphinxext/prs/12708.json new file mode 100644 index 00000000000..389531a2f78 --- /dev/null +++ b/doc/sphinxext/prs/12708.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "2efa15cee622c4644e4d09ef5dc8d92c88c17de9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 5, + "d": 5 + }, + "doc/conf.py": { + "a": 5, + "d": 8 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 15, + "d": 4 + }, + "mne/report/report.py": { + "a": 11, + "d": 15 + }, + "mne/report/tests/test_report.py": { + "a": 13, + "d": 10 + }, + "tools/circleci_bash_env.sh": { + "a": 2, + "d": 3 + }, + "tools/circleci_download.sh": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12712.json b/doc/sphinxext/prs/12712.json new file mode 100644 index 00000000000..8d85ee60df7 --- /dev/null +++ b/doc/sphinxext/prs/12712.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1a341b1ba937228370cb4ce915f17affcec53251", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/stats/erp.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12713.json b/doc/sphinxext/prs/12713.json new file mode 100644 index 00000000000..8ace37b98ba --- /dev/null +++ b/doc/sphinxext/prs/12713.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "95783f8fd0f3c2dead77b3d3efcc6707d9445a66", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "LICENSE.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12716.json b/doc/sphinxext/prs/12716.json new file mode 100644 index 00000000000..b6914b0c656 --- /dev/null +++ b/doc/sphinxext/prs/12716.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "071a98fae28bd7c32c6ce357076263ef01031994", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/installers.rst": { + "a": 2, + "d": 22 + }, + "doc/install/manual_install.rst": { + "a": 31, + "d": 31 + }, + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 5, + "d": 0 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12718.json b/doc/sphinxext/prs/12718.json new file mode 100644 index 00000000000..e6644e19e73 --- /dev/null +++ b/doc/sphinxext/prs/12718.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c9adde80da913eecbc455a35f48332894241ac51", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/utils/progressbar.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12719.json b/doc/sphinxext/prs/12719.json new file mode 100644 index 00000000000..1cd3cce003d --- /dev/null +++ b/doc/sphinxext/prs/12719.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3fff7d99287809a5ed086ffd58029b2b4e2228c2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12719.apichange.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12720.json b/doc/sphinxext/prs/12720.json new file mode 100644 index 00000000000..96653fc464b --- /dev/null +++ b/doc/sphinxext/prs/12720.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "e299f1efb180968aaf2bb10c1807b8a988124ffa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12720.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12720.newfeature.rst": { + "a": 2, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 34, + "d": 14 + }, + "mne/_fiff/tag.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 11, + "d": 4 + }, + "mne/_fiff/write.py": { + "a": 4, + "d": 3 + }, + "mne/export/_edf.py": { + "a": 0, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 5, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 15, + "d": 14 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 4, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 4, + "d": 4 + }, + "mne/utils/__init__.pyi": { + "a": 4, + "d": 8 + }, + "mne/utils/numerics.py": { + "a": 12, + "d": 53 + }, + "mne/utils/tests/test_numerics.py": { + "a": 6, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12721.json b/doc/sphinxext/prs/12721.json new file mode 100644 index 00000000000..64bf81a8d6a --- /dev/null +++ b/doc/sphinxext/prs/12721.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6ad28a461fe29d96e5d822e2de6c4c9d1b7c2537", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12721.bugfix.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 9, + "d": 0 + }, + "mne/viz/_mpl_figure.py": { + "a": 9, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 37, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12722.json b/doc/sphinxext/prs/12722.json new file mode 100644 index 00000000000..3b29634e22c --- /dev/null +++ b/doc/sphinxext/prs/12722.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "88c0374a1f2751128ba57a439730979b8d01b472", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "examples/visualization/evoked_topomap.py": { + "a": 1, + "d": 0 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12724.json b/doc/sphinxext/prs/12724.json new file mode 100644 index 00000000000..cad123ff6af --- /dev/null +++ b/doc/sphinxext/prs/12724.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f20348ef207e41b24a0a0a3dcfc55fa470ec971c", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12724.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/io/elekta_epochs.py": { + "a": 0, + "d": 3 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 5, + "d": 5 + }, + "mne/viz/topo.py": { + "a": 11, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12725.json b/doc/sphinxext/prs/12725.json new file mode 100644 index 00000000000..ba66d5a877d --- /dev/null +++ b/doc/sphinxext/prs/12725.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "0c369e0eaa1327e612137ba0e4a54ad776ea6e48", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12725.newfeature.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 34, + "d": 30 + }, + "mne/viz/tests/test_evoked.py": { + "a": 6, + "d": 5 + }, + "tutorials/evoked/40_whitened.py": { + "a": 1, + "d": 2 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12726.json b/doc/sphinxext/prs/12726.json new file mode 100644 index 00000000000..25905cf5f87 --- /dev/null +++ b/doc/sphinxext/prs/12726.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "56fa09a20182c8a8f788ded77a4c7b4ad03ba82c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/_css.py": { + "a": 14, + "d": 10 + }, + "mne/preprocessing/tests/test_css.py": { + "a": 10, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12727.json b/doc/sphinxext/prs/12727.json new file mode 100644 index 00000000000..859b6ee7eb6 --- /dev/null +++ b/doc/sphinxext/prs/12727.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "018e9dcf07c3e53b964b8e91322b6795ce96fa52", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12727.bugfix.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/devel/12727.newfeature.rst": { + "a": 3, + "d": 0 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 0, + "d": 2 + }, + "mne/cov.py": { + "a": 10, + "d": 1 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 5, + "d": 0 + }, + "mne/rank.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 37, + "d": 3 + }, + "mne/tests/test_rank.py": { + "a": 7, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12729.json b/doc/sphinxext/prs/12729.json new file mode 100644 index 00000000000..7e40a52bfe7 --- /dev/null +++ b/doc/sphinxext/prs/12729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f0e50d70aa89b1179b71b2b3efe323aaeed757c1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1273.json b/doc/sphinxext/prs/1273.json new file mode 100644 index 00000000000..e941911f83f --- /dev/null +++ b/doc/sphinxext/prs/1273.json @@ -0,0 +1,1219 @@ +{ + "merge_commit_sha": "e4528f0d3f6062fd01f54a78790fb0f65cecc1e7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 8, + "d": 6 + }, + "doc/source/python_reference.rst": { + "a": 25, + "d": 54 + }, + "doc/source/python_tutorial.rst": { + "a": 7, + "d": 7 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 4, + "d": 4 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 3, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 3, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 3, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 3, + "d": 3 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 3, + "d": 3 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 4, + "d": 4 + }, + "examples/decoding/plot_decoding_sensors.py": { + "a": 5, + "d": 5 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 4, + "d": 4 + }, + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 5, + "d": 7 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 4, + "d": 4 + }, + "examples/export/plot_epochs_as_data_frame.py": { + "a": 3, + "d": 3 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 4, + "d": 4 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 3, + "d": 2 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 5, + "d": 5 + }, + "examples/plot_channel_epochs_image.py": { + "a": 4, + "d": 4 + }, + "examples/plot_define_target_events.py": { + "a": 4, + "d": 4 + }, + "examples/plot_estimate_covariance_matrix_baseline.py": { + "a": 5, + "d": 5 + }, + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 4, + "d": 4 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 4, + "d": 4 + }, + "examples/plot_evoked_topomap.py": { + "a": 1, + "d": 1 + }, + "examples/plot_evoked_topomap_delayed_ssp.py": { + "a": 4, + "d": 4 + }, + "examples/plot_evoked_whitening.py": { + "a": 2, + "d": 2 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 4, + "d": 4 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 5, + "d": 5 + }, + "examples/plot_meg_eeg_fields_3d.py": { + "a": 1, + "d": 1 + }, + "examples/plot_megsim_data.py": { + "a": 3, + "d": 3 + }, + "examples/plot_megsim_data_single_trial.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 4, + "d": 4 + }, + "examples/plot_read_epochs.py": { + "a": 4, + "d": 4 + }, + "examples/plot_read_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_shift_evoked.py": { + "a": 5, + "d": 4 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 3, + "d": 3 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 4, + "d": 4 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 3, + "d": 3 + }, + "examples/plot_topo_customized.py": { + "a": 3, + "d": 4 + }, + "examples/plot_topography.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 4, + "d": 4 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 5, + "d": 5 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 3, + "d": 3 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 3, + "d": 3 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 3, + "d": 3 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 4, + "d": 4 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 4, + "d": 4 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 4, + "d": 4 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 4, + "d": 4 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 4, + "d": 4 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 4, + "d": 4 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 4, + "d": 3 + }, + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_time_frequency.py": { + "a": 3, + "d": 3 + }, + "mne/__init__.py": { + "a": 7, + "d": 3 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 4, + "d": 4 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 5, + "d": 5 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/constants.py": { + "a": 0, + "d": 0 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 9, + "d": 9 + }, + "mne/decoding/classifier.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_classifier.py": { + "a": 10, + "d": 10 + }, + "mne/decoding/tests/test_csp.py": { + "a": 6, + "d": 6 + }, + "mne/decoding/tests/test_ems.py": { + "a": 4, + "d": 7 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/time_gen.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 14, + "d": 13 + }, + "mne/event.py": { + "a": 6, + "d": 6 + }, + "mne/fiff/__init__.py": { + "a": 65, + "d": 18 + }, + "mne/forward/_compute_forward.py": { + "a": 3, + "d": 3 + }, + "mne/forward/_field_interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 5, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 12, + "d": 12 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 3, + "d": 2 + }, + "mne/forward/tests/test_forward.py": { + "a": 4, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 8, + "d": 8 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 2 + }, + "mne/gui/_fiducials_gui.py": { + "a": 2, + "d": 1 + }, + "mne/gui/_file_traits.py": { + "a": 2, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 33, + "d": 0 + }, + "mne/io/array/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 3, + "d": 3 + }, + "mne/io/base.py": { + "a": 4, + "d": 4 + }, + "mne/io/brainvision/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.eeg": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.vhdr": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_bin_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_elp.txt": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 4, + "d": 2 + }, + "mne/io/bti/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/read.py": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/exported4D_linux.fif": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/exported4D_solaris.fif": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_config_linux": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_config_solaris": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_hs_linux": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_hs_solaris": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_pdf_linux": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_pdf_solaris": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 4, + "d": 3 + }, + "mne/io/bti/transforms.py": { + "a": 0, + "d": 0 + }, + "mne/io/channels.py": { + "a": 4, + "d": 4 + }, + "mne/io/compensator.py": { + "a": 1, + "d": 1 + }, + "mne/io/cov.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/diff.py": { + "a": 0, + "d": 0 + }, + "mne/io/edf/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/biosemi.hpts": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test.bdf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_bdf_eeglab.mat": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_edf_eeglab.mat": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_eeglab.mat": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 4, + "d": 3 + }, + "mne/io/egi/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/egi/tests/data/test_egi.raw": { + "a": 0, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 4, + "d": 2 + }, + "mne/io/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 4, + "d": 2 + }, + "mne/io/kit/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/sns.txt": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_Ykgw.mat": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_bin.fif": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_elp.txt": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_hsp.txt": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_mrk.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_mrk_post.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_mrk_pre.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/trans-sample.fif": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 4, + "d": 3 + }, + "mne/io/matrix.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 9, + "d": 10 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/fsaverage-fiducials.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/process_raw.sh": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/sample-audvis-raw-trans.txt": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/small-src.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-ave-2.log": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-ave.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-ave.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-ave.log": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-cov.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-cov.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-eve-1.eve": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-eve-1.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-eve-old-style.eve": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-eve.eve": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-eve.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-eve.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-km-cov.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-lh.label": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-mpr-eve.eve": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-nf-ave.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-no-reject.ave": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-rh.label": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test.ave": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test.cov": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_bads.txt": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_chpi_raw_hp.txt": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_chpi_raw_sss.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_ctf_comp_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_ctf_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_empty_room.cov": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_erm-cov.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_ica.lout": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_keepmean.cov": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_proj.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_proj.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_raw-eve.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_raw.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_raw.lout": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_withbads_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_wrong_bads.txt": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_channels.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_compensator.py": { + "a": 3, + "d": 2 + }, + "mne/io/tests/test_evoked.py": { + "a": 3, + "d": 4 + }, + "mne/io/tests/test_meas_info.py": { + "a": 8, + "d": 7 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/tree.py": { + "a": 6, + "d": 7 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/layouts/layout.py": { + "a": 4, + "d": 3 + }, + "mne/layouts/tests/test_layout.py": { + "a": 8, + "d": 7 + }, + "mne/minimum_norm/inverse.py": { + "a": 9, + "d": 9 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 6, + "d": 6 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/pick.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 12, + "d": 12 + }, + "mne/preprocessing/maxfilter.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 8, + "d": 7 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 18, + "d": 18 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 2, + "d": 2 + }, + "mne/proj.py": { + "a": 9, + "d": 9 + }, + "mne/realtime/client.py": { + "a": 4, + "d": 4 + }, + "mne/realtime/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 5, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 62, + "d": 7 + }, + "mne/source_space.py": { + "a": 5, + "d": 5 + }, + "mne/surface.py": { + "a": 8, + "d": 8 + }, + "mne/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 21, + "d": 21 + }, + "mne/tests/test_event.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 30, + "d": 25 + }, + "mne/tests/test_viz.py": { + "a": 15, + "d": 15 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 8, + "d": 8 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 5, + "d": 5 + }, + "mne/transforms.py": { + "a": 5, + "d": 8 + }, + "mne/utils.py": { + "a": 11, + "d": 3 + }, + "mne/viz.py": { + "a": 7, + "d": 6 + }, + "setup.py": { + "a": 9, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12730.json b/doc/sphinxext/prs/12730.json new file mode 100644 index 00000000000..5310953bf7a --- /dev/null +++ b/doc/sphinxext/prs/12730.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "43fb9d8c28193ad2c0e11c9282e65338e02f98b7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/devel/12730.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/nirx/_localized_abbr.py": { + "a": 1, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 10, + "d": 8 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12731.json b/doc/sphinxext/prs/12731.json new file mode 100644 index 00000000000..adbe5fafa9e --- /dev/null +++ b/doc/sphinxext/prs/12731.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "5843aa5c3f416dcc05495e9ddbbbd549596b6680", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".gitignore": { + "a": 5, + "d": 4 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/install/mne_tools_suite.rst": { + "a": 5, + "d": 34 + }, + "doc/sphinxext/related_software.py": { + "a": 235, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 0 + }, + "tools/circleci_bash_env.sh": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12732.json b/doc/sphinxext/prs/12732.json new file mode 100644 index 00000000000..94a7f290885 --- /dev/null +++ b/doc/sphinxext/prs/12732.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3987aa73a67251dc7b49f10f7db56e77040aaef8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12733.json b/doc/sphinxext/prs/12733.json new file mode 100644 index 00000000000..3cf2598ea3b --- /dev/null +++ b/doc/sphinxext/prs/12733.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1694d43b4ecfc3e680d4d5921b42ff043ecf78c1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12733.newfeature.rst": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12734.json b/doc/sphinxext/prs/12734.json new file mode 100644 index 00000000000..6872b920bb8 --- /dev/null +++ b/doc/sphinxext/prs/12734.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5ee0f2fcd42021d29e3cd92279e83b51cd4fc4a7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12734.newfeature.rst": { + "a": 3, + "d": 0 + }, + "mne/datasets/_fetch.py": { + "a": 3, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 13, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12735.json b/doc/sphinxext/prs/12735.json new file mode 100644 index 00000000000..b46ab253b34 --- /dev/null +++ b/doc/sphinxext/prs/12735.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "64f8eaf453b28470af514660ae7a74549b720412", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12735.bugfix.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.pyi": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12736.json b/doc/sphinxext/prs/12736.json new file mode 100644 index 00000000000..48385de5556 --- /dev/null +++ b/doc/sphinxext/prs/12736.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f5dcc6c1d70e0099ef20b5fc09ebbfc9e5a65268", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "tutorials/epochs/30_epochs_metadata.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12737.json b/doc/sphinxext/prs/12737.json new file mode 100644 index 00000000000..4a017ede19c --- /dev/null +++ b/doc/sphinxext/prs/12737.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b4e43315dcdb12e86c1f11e8111f8e6c0217f147", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/_typing.py": { + "a": 10, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12738.json b/doc/sphinxext/prs/12738.json new file mode 100644 index 00000000000..63dabdaa5db --- /dev/null +++ b/doc/sphinxext/prs/12738.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7ecd1416df18645957fd14198635294f824d149f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 3, + "d": 2 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/html_templates/_templates.py": { + "a": 5, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 9, + "d": 7 + }, + "mne/report/report.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 6 + }, + "mne/viz/ui_events.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12741.json b/doc/sphinxext/prs/12741.json new file mode 100644 index 00000000000..a466ef6912e --- /dev/null +++ b/doc/sphinxext/prs/12741.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc63345e04c29caeed03c88e8765e6a5e906f90c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12742.json b/doc/sphinxext/prs/12742.json new file mode 100644 index 00000000000..c4ef13fac6c --- /dev/null +++ b/doc/sphinxext/prs/12742.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "4e2eaf119e9148c2bf6589d995f1807e861a55d9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 12, + "d": 19 + }, + "doc/changes/devel/12742.dependency.rst": { + "a": 2, + "d": 0 + }, + "doc/install/manual_install.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 0, + "d": 4 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 5 + }, + "tools/install_pre_requirements.sh": { + "a": 8, + "d": 7 + }, + "tools/pyqt6_requirements.txt": { + "a": 0, + "d": 2 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 24, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12743.json b/doc/sphinxext/prs/12743.json new file mode 100644 index 00000000000..954e464685d --- /dev/null +++ b/doc/sphinxext/prs/12743.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a21b6da33b9459f24a2b66ee60fef64df770f270", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12744.json b/doc/sphinxext/prs/12744.json new file mode 100644 index 00000000000..524fc64b24e --- /dev/null +++ b/doc/sphinxext/prs/12744.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "790be2d0f1fc6f32dde6f482ee8ffa91a677dea8", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12747.json b/doc/sphinxext/prs/12747.json new file mode 100644 index 00000000000..648e05d3afc --- /dev/null +++ b/doc/sphinxext/prs/12747.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b08f759efd08fa060e68f0bdee45f45e3fc5347a", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/12747.newfeature.rst": { + "a": 3, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 202, + "d": 70 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 294, + "d": 32 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 4 + }, + "mne/utils/spectrum.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12749.json b/doc/sphinxext/prs/12749.json new file mode 100644 index 00000000000..f9d8e227c69 --- /dev/null +++ b/doc/sphinxext/prs/12749.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3cc01923a591b933a0d6ecef5a12b8e185495eef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/get_minimal_commands.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12751.json b/doc/sphinxext/prs/12751.json new file mode 100644 index 00000000000..5ae4c4a3560 --- /dev/null +++ b/doc/sphinxext/prs/12751.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4b538b6d84e5e688f2000f37a83f8861102a97c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12752.json b/doc/sphinxext/prs/12752.json new file mode 100644 index 00000000000..ddc1cac607f --- /dev/null +++ b/doc/sphinxext/prs/12752.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6e4cadf61b748415fd3b8e27e3326520de2cc981", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/report/report.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12754.json b/doc/sphinxext/prs/12754.json new file mode 100644 index 00000000000..7fdea9c9741 --- /dev/null +++ b/doc/sphinxext/prs/12754.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b6a24509354128baedebb9ad1d1214254bb6cdad", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12754.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 27, + "d": 35 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12757.json b/doc/sphinxext/prs/12757.json new file mode 100644 index 00000000000..82da5d95d60 --- /dev/null +++ b/doc/sphinxext/prs/12757.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "aa0b303ce87bbd32d3496812f74c4264c5268ba9", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/install/advanced.rst": { + "a": 0, + "d": 1 + }, + "doc/install/check_installation.rst": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12758.json b/doc/sphinxext/prs/12758.json new file mode 100644 index 00000000000..05d94c585eb --- /dev/null +++ b/doc/sphinxext/prs/12758.json @@ -0,0 +1,363 @@ +{ + "merge_commit_sha": "e06921d11af1251c17807a1e4dca84176ca8679d", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/define_target_events.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/utils.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/write.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 3, + "d": 3 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 5, + "d": 5 + }, + "mne/coreg.py": { + "a": 2, + "d": 6 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/search_light.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 4, + "d": 4 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 3 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 5, + "d": 3 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 4, + "d": 6 + }, + "mne/morph.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ctps_.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/metrics/metrics.py": { + "a": 2, + "d": 4 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 4 + }, + "mne/source_space/_source_space.py": { + "a": 2, + "d": 2 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 3, + "d": 5 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_freesurfer.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stft.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 6, + "d": 13 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 4, + "d": 6 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 4, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 11, + "d": 9 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 8 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 3 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 4, + "d": 4 + }, + "tutorials/forward/30_forward.py": { + "a": 2, + "d": 2 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 6, + "d": 2 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 2, + "d": 2 + }, + "tutorials/simulation/80_dics.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12759.json b/doc/sphinxext/prs/12759.json new file mode 100644 index 00000000000..e2c2fa6d839 --- /dev/null +++ b/doc/sphinxext/prs/12759.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "dd6e155fb47812139da33bb9226cc0e6e5792427", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Sammi Chekroud", + "e": "sammi.chekroud@psy.ox.ac.uk" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12759.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 7, + "d": 3 + }, + "mne/preprocessing/eyetracking/tests/test_pupillometry.py": { + "a": 15, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12760.json b/doc/sphinxext/prs/12760.json new file mode 100644 index 00000000000..03edd63705b --- /dev/null +++ b/doc/sphinxext/prs/12760.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a362e5ac3b2965d9cf129b9461e2907747618c30", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12760.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12761.json b/doc/sphinxext/prs/12761.json new file mode 100644 index 00000000000..9c1f040e644 --- /dev/null +++ b/doc/sphinxext/prs/12761.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "868746b427f8123dd0e6b4594a2bd18c7fa49331", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12763.json b/doc/sphinxext/prs/12763.json new file mode 100644 index 00000000000..c228053e640 --- /dev/null +++ b/doc/sphinxext/prs/12763.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c339bccf730eb1220dba70055deb312cd6913d05", + "authors": [ + { + "n": "Farzin Negahbani", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12763.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 6, + "d": 4 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12765.json b/doc/sphinxext/prs/12765.json new file mode 100644 index 00000000000..339afc7d166 --- /dev/null +++ b/doc/sphinxext/prs/12765.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "0f6e7d61c3e016385b71e67e98ae8a1d5c5e1c36", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 4, + "d": 4 + }, + ".pre-commit-config.yaml": { + "a": 8, + "d": 0 + }, + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 8, + "d": 324 + }, + "doc/changes/v1.7.rst": { + "a": 4, + "d": 4 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "doc/sphinxext/related_software.py": { + "a": 10, + "d": 1 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 13, + "d": 2 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 2, + "d": 1 + }, + "pyproject.toml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1277.json b/doc/sphinxext/prs/1277.json new file mode 100644 index 00000000000..690e031f54c --- /dev/null +++ b/doc/sphinxext/prs/1277.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8310aaf28f60946c7232c2001e0eccb71fb22ee6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 6, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 66, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 8, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 43, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12770.json b/doc/sphinxext/prs/12770.json new file mode 100644 index 00000000000..52474ac54ab --- /dev/null +++ b/doc/sphinxext/prs/12770.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b95d2869bad2fa20568b4f8e8483f14a5d74735", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/morph_volume_stc.py": { + "a": 3, + "d": 3 + }, + "tools/circleci_dependencies.sh": { + "a": 7, + "d": 2 + }, + "tools/install_pre_requirements.sh": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12771.json b/doc/sphinxext/prs/12771.json new file mode 100644 index 00000000000..afbdc7d6a66 --- /dev/null +++ b/doc/sphinxext/prs/12771.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e42e1d6a5ff1c3c807e3e6ac3f79d63be589a7f2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12771.apichange.rst": { + "a": 4, + "d": 0 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 6, + "d": 8 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 49, + "d": 48 + }, + "mne/datasets/_infant/base.py": { + "a": 6, + "d": 7 + }, + "mne/datasets/_phantom/base.py": { + "a": 5, + "d": 6 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 45, + "d": 39 + }, + "mne/datasets/utils.py": { + "a": 10, + "d": 10 + }, + "mne/utils/config.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 7, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12774.json b/doc/sphinxext/prs/12774.json new file mode 100644 index 00000000000..8bc18f8e221 --- /dev/null +++ b/doc/sphinxext/prs/12774.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "879361513bdd1c14ca4e223657517fe85b6085b0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 5 + }, + ".git-blame-ignore-revs": { + "a": 14, + "d": 7 + }, + ".github/workflows/credit.yml": { + "a": 43, + "d": 0 + }, + ".gitignore": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 73, + "d": 34 + }, + ".pre-commit-config.yaml": { + "a": 8, + "d": 5 + }, + "doc/.gitignore": { + "a": 1, + "d": 0 + }, + "doc/Makefile": { + "a": 1, + "d": 4 + }, + "doc/_templates/sidebar-quicklinks.html": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12774.other.rst": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "doc/credit.rst": { + "a": 12, + "d": 0 + }, + "doc/sphinxext/prs/12779.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/update_credit_rst.py": { + "a": 425, + "d": 0 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "pyproject.toml": { + "a": 3, + "d": 0 + }, + "tools/check_mne_location.py": { + "a": 1, + "d": 0 + }, + "tools/dev/check_steering_committee.py": { + "a": 3, + "d": 2 + }, + "tools/dev/ensure_headers.py": { + "a": 1, + "d": 0 + }, + "tools/dev/gen_css_for_mne.py": { + "a": 2, + "d": 1 + }, + "tools/dev/generate_pyi_files.py": { + "a": 1, + "d": 0 + }, + "tools/dev/update_credit_json.py": { + "a": 88, + "d": 0 + }, + "tools/generate_codemeta.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12776.json b/doc/sphinxext/prs/12776.json new file mode 100644 index 00000000000..5bbb82f7be7 --- /dev/null +++ b/doc/sphinxext/prs/12776.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4dcd4d16f961d4a614c105f2d28a3fd912a7c1b8", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 15, + "d": 9 + }, + "doc/_templates/copyright.html": { + "a": 1, + "d": 1 + }, + "doc/_templates/sidebar-quicklinks.html": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12777.json b/doc/sphinxext/prs/12777.json new file mode 100644 index 00000000000..1eaa8d89c67 --- /dev/null +++ b/doc/sphinxext/prs/12777.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "855ca5ea380498df0a3e1740a7b5134625e926e1", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "tools/check_qt_import.sh": { + "a": 4, + "d": 1 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12778.json b/doc/sphinxext/prs/12778.json new file mode 100644 index 00000000000..995d907f6ae --- /dev/null +++ b/doc/sphinxext/prs/12778.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6da505591cd76bc7f6579154475c4abb2a5cbb95", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/contrib_avatars.py": { + "a": 4, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 1, + "d": 14 + }, + "mne/utils/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 13, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12779.json b/doc/sphinxext/prs/12779.json new file mode 100644 index 00000000000..584d8132188 --- /dev/null +++ b/doc/sphinxext/prs/12779.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b8b168088cb474f27833f5f9db9d60abe00dca83", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/prs/1.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/3732.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1278.json b/doc/sphinxext/prs/1278.json new file mode 100644 index 00000000000..4b5948f045d --- /dev/null +++ b/doc/sphinxext/prs/1278.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e072dd2b536ba6ec4f49c9c03ad1e59370492673", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/io/evoked.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12781.json b/doc/sphinxext/prs/12781.json new file mode 100644 index 00000000000..a2b21519710 --- /dev/null +++ b/doc/sphinxext/prs/12781.json @@ -0,0 +1,2911 @@ +{ + "merge_commit_sha": "ee64eba6f345e895e3d5e7d2804fa6aa2dac2e6d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/actions/rename_towncrier/rename_towncrier.py": { + "a": 4, + "d": 0 + }, + ".github/workflows/autofix.yml": { + "a": 3, + "d": 2 + }, + ".github/workflows/credit.yml": { + "a": 0, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/12781.other.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 8, + "d": 5 + }, + "doc/conftest.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/contrib_avatars.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/gen_commands.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/gen_names.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/gh_substitutions.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 4, + "d": 0 + }, + "doc/sphinxext/mne_substitutions.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/newcontrib_substitutions.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/related_software.py": { + "a": 4, + "d": 0 + }, + "doc/sphinxext/unit_role.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/update_credit_rst.py": { + "a": 4, + "d": 0 + }, + "examples/datasets/kernel_phantom.py": { + "a": 2, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 2, + "d": 0 + }, + "examples/io/read_neo_format.py": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/contralateral_referencing.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/eyetracking_plot_heatmap.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/mne_helmet.py": { + "a": 2, + "d": 0 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "logo/generate_mne_logos.py": { + "a": 1, + "d": 2 + }, + "mne/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/__main__.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/_digitization.py": { + "a": 1, + "d": 6 + }, + "mne/_fiff/compensator.py": { + "a": 2, + "d": 0 + }, + "mne/_fiff/constants.py": { + "a": 1, + "d": 3 + }, + "mne/_fiff/ctf_comp.py": { + "a": 1, + "d": 4 + }, + "mne/_fiff/matrix.py": { + "a": 1, + "d": 3 + }, + "mne/_fiff/meas_info.py": { + "a": 1, + "d": 5 + }, + "mne/_fiff/open.py": { + "a": 1, + "d": 3 + }, + "mne/_fiff/pick.py": { + "a": 1, + "d": 4 + }, + "mne/_fiff/proc_history.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/proj.py": { + "a": 1, + "d": 5 + }, + "mne/_fiff/reference.py": { + "a": 1, + "d": 4 + }, + "mne/_fiff/tag.py": { + "a": 1, + "d": 3 + }, + "mne/_fiff/tests/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/_fiff/tests/test_compensator.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 1, + "d": 3 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 2, + "d": 0 + }, + "mne/_fiff/tests/test_proc_history.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 1, + "d": 4 + }, + "mne/_fiff/tests/test_show_fiff.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/tests/test_utils.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_what.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_write.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tree.py": { + "a": 1, + "d": 3 + }, + "mne/_fiff/utils.py": { + "a": 1, + "d": 9 + }, + "mne/_fiff/what.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/write.py": { + "a": 1, + "d": 3 + }, + "mne/_freesurfer.py": { + "a": 2, + "d": 3 + }, + "mne/_ola.py": { + "a": 1, + "d": 2 + }, + "mne/annotations.py": { + "a": 1, + "d": 3 + }, + "mne/baseline.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 4 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 5 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 4 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/tests/test_external.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 1, + "d": 5 + }, + "mne/channels/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 1, + "d": 11 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 2, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 10 + }, + "mne/channels/data/neighbors/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 3 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 9 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 11 + }, + "mne/channels/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 3 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 5 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 3 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 1, + "d": 3 + }, + "mne/channels/tests/test_unify_bads.py": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 18, + "d": 18 + }, + "mne/commands/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_anonymize.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 3, + "d": 10 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 2, + "d": 4 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_flash_bem.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 7 + }, + "mne/commands/mne_prepare_bem_model.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_report.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_setup_forward_model.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_show_fiff.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_show_info.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_surf2bem.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_sys_info.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_what.py": { + "a": 1, + "d": 2 + }, + "mne/commands/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 0 + }, + "mne/commands/utils.py": { + "a": 1, + "d": 3 + }, + "mne/conftest.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 1, + "d": 2 + }, + "mne/cov.py": { + "a": 1, + "d": 4 + }, + "mne/cuda.py": { + "a": 1, + "d": 2 + }, + "mne/data/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/_fake/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/_fake/_fake.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/_fetch.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/_fsaverage/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/_infant/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/_infant/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/_phantom/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/_phantom/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/config.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/eegbci/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/eegbci/tests/test_eegbci.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/epilepsy_ecog/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/erp_core/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/erp_core/erp_core.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/eyelink/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/eyelink/eyelink.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/fieldtrip_cmc/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/fnirs_motor/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/hf_sef/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/kiloword/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/limo/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/misc/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/misc/_misc.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/mtrf/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/multimodal/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/opm/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/opm/opm.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/phantom_4dbti/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/phantom_kernel/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/phantom_kernel/phantom_kernel.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/phantom_kit/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/phantom_kit/phantom_kit.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/refmeg_noise/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/sample/sample.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/sleep_physionet/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/somato/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/somato/somato.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/spm_face/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/ssvep/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/testing/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/testing/_testing.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/ucl_opm_auditory/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 8 + }, + "mne/datasets/visual_92_categories/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 6 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 4 + }, + "mne/decoding/mixin.py": { + "a": 3, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/search_light.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_base.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 5 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 4 + }, + "mne/defaults.py": { + "a": 1, + "d": 4 + }, + "mne/dipole.py": { + "a": 1, + "d": 3 + }, + "mne/epochs.py": { + "a": 1, + "d": 7 + }, + "mne/event.py": { + "a": 1, + "d": 5 + }, + "mne/evoked.py": { + "a": 1, + "d": 7 + }, + "mne/export/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/export/_brainvision.py": { + "a": 1, + "d": 2 + }, + "mne/export/_edf.py": { + "a": 1, + "d": 2 + }, + "mne/export/_eeglab.py": { + "a": 1, + "d": 2 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 2 + }, + "mne/export/_export.py": { + "a": 1, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 0 + }, + "mne/fixes.py": { + "a": 2, + "d": 4 + }, + "mne/forward/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 6 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 3 + }, + "mne/forward/_lead_dots.py": { + "a": 1, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 5 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 4 + }, + "mne/forward/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 2, + "d": 0 + }, + "mne/forward/tests/test_forward.py": { + "a": 2, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 7, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 2, + "d": 0 + }, + "mne/gui/_gui.py": { + "a": 1, + "d": 2 + }, + "mne/gui/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/gui/tests/test_coreg.py": { + "a": 1, + "d": 2 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 1, + "d": 2 + }, + "mne/html_templates/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/html_templates/_templates.py": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 2 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 3 + }, + "mne/io/__init__.py": { + "a": 3, + "d": 3 + }, + "mne/io/_fiff_wrap.py": { + "a": 1, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 1, + "d": 2 + }, + "mne/io/array/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 2 + }, + "mne/io/array/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 2 + }, + "mne/io/artemis123/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 2 + }, + "mne/io/artemis123/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 2 + }, + "mne/io/artemis123/utils.py": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 1, + "d": 9 + }, + "mne/io/besa/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/besa/besa.py": { + "a": 2, + "d": 0 + }, + "mne/io/besa/tests/test_besa.py": { + "a": 2, + "d": 0 + }, + "mne/io/boxy/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/boxy/boxy.py": { + "a": 1, + "d": 2 + }, + "mne/io/boxy/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/boxy/tests/test_boxy.py": { + "a": 1, + "d": 2 + }, + "mne/io/brainvision/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 8 + }, + "mne/io/brainvision/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 3 + }, + "mne/io/bti/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 9 + }, + "mne/io/bti/constants.py": { + "a": 1, + "d": 2 + }, + "mne/io/bti/read.py": { + "a": 1, + "d": 2 + }, + "mne/io/bti/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 2 + }, + "mne/io/cnt/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/_utils.py": { + "a": 1, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 3 + }, + "mne/io/cnt/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 1, + "d": 3 + }, + "mne/io/constants.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/constants.py": { + "a": 1, + "d": 3 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 3 + }, + "mne/io/ctf/eeg.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/hc.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/markers.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/res4.py": { + "a": 1, + "d": 3 + }, + "mne/io/ctf/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/trans.py": { + "a": 1, + "d": 2 + }, + "mne/io/curry/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 4 + }, + "mne/io/curry/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 1, + "d": 3 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 8 + }, + "mne/io/edf/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 7 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 3 + }, + "mne/io/eeglab/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/_eeglab.py": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 4 + }, + "mne/io/eeglab/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 4 + }, + "mne/io/egi/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 4 + }, + "mne/io/egi/egimff.py": { + "a": 2, + "d": 0 + }, + "mne/io/egi/events.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/general.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 2 + }, + "mne/io/eximia/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/eximia/eximia.py": { + "a": 1, + "d": 3 + }, + "mne/io/eximia/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 1, + "d": 2 + }, + "mne/io/eyelink/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/io/eyelink/_utils.py": { + "a": 2, + "d": 1 + }, + "mne/io/eyelink/eyelink.py": { + "a": 1, + "d": 4 + }, + "mne/io/eyelink/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 2, + "d": 0 + }, + "mne/io/fieldtrip/__init__.py": { + "a": 1, + "d": 4 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 1, + "d": 4 + }, + "mne/io/fieldtrip/tests/__init__.py": { + "a": 1, + "d": 4 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 2, + "d": 4 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 1, + "d": 4 + }, + "mne/io/fieldtrip/utils.py": { + "a": 2, + "d": 4 + }, + "mne/io/fiff/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 6 + }, + "mne/io/fiff/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 3 + }, + "mne/io/fil/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/fil/fil.py": { + "a": 1, + "d": 2 + }, + "mne/io/fil/sensors.py": { + "a": 1, + "d": 2 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 1, + "d": 2 + }, + "mne/io/hitachi/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 2 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 3 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 4 + }, + "mne/io/kit/tests/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 2 + }, + "mne/io/nedf/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/nedf/nedf.py": { + "a": 2, + "d": 0 + }, + "mne/io/nedf/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 2, + "d": 2 + }, + "mne/io/neuralynx/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 2, + "d": 0 + }, + "mne/io/neuralynx/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 2, + "d": 0 + }, + "mne/io/nicolet/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 2 + }, + "mne/io/nicolet/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 1, + "d": 2 + }, + "mne/io/nihon/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 2 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 1, + "d": 2 + }, + "mne/io/nirx/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/nirx/_localized_abbr.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 2 + }, + "mne/io/nirx/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 1, + "d": 3 + }, + "mne/io/nsx/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/nsx/nsx.py": { + "a": 2, + "d": 2 + }, + "mne/io/nsx/tests/test_nsx.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 1, + "d": 2 + }, + "mne/io/pick.py": { + "a": 1, + "d": 2 + }, + "mne/io/snirf/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 2 + }, + "mne/io/snirf/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/tests/data/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_apply_function.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 3 + }, + "mne/io/tests/test_read_raw.py": { + "a": 1, + "d": 2 + }, + "mne/label.py": { + "a": 1, + "d": 4 + }, + "mne/minimum_norm/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 1, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 4 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 1, + "d": 4 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 1, + "d": 3 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 3 + }, + "mne/misc.py": { + "a": 1, + "d": 3 + }, + "mne/morph.py": { + "a": 1, + "d": 4 + }, + "mne/morph_map.py": { + "a": 1, + "d": 5 + }, + "mne/parallel.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/__init__.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/_annotate_amplitude.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/_annotate_nan.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/_css.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/_lof.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/_regress.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/bads.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ctps_.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/eyetracking/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/eyetracking/_pupillometry.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/eyetracking/calibration.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/eyetracking/eyetracking.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/eyetracking/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/eyetracking/tests/test_calibration.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/eyetracking/tests/test_eyetracking.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/eyetracking/tests/test_pupillometry.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eyetracking/utils.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/hfc.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/ieeg/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ieeg/tests/test_volume.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/interpolate.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 5 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/realign.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/tests/test_annotate_amplitude.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_annotate_nan.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_css.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/tests/test_hfc.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_lof.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_realign.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 4 + }, + "mne/proj.py": { + "a": 1, + "d": 2 + }, + "mne/rank.py": { + "a": 2, + "d": 2 + }, + "mne/report/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 1, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 3 + }, + "mne/simulation/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/simulation/_metrics.py": { + "a": 1, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 4 + }, + "mne/simulation/metrics/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/simulation/metrics/metrics.py": { + "a": 1, + "d": 5 + }, + "mne/simulation/metrics/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/simulation/metrics/tests/test_metrics.py": { + "a": 1, + "d": 3 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 4 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 8 + }, + "mne/simulation/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 2 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 1, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 4 + }, + "mne/simulation/tests/test_source.py": { + "a": 1, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 5 + }, + "mne/source_space/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/source_space/_source_space.py": { + "a": 1, + "d": 3 + }, + "mne/source_space/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 1, + "d": 3 + }, + "mne/stats/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/stats/_adjacency.py": { + "a": 1, + "d": 3 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 7 + }, + "mne/stats/erp.py": { + "a": 1, + "d": 0 + }, + "mne/stats/multi_comp.py": { + "a": 1, + "d": 5 + }, + "mne/stats/parametric.py": { + "a": 1, + "d": 4 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 2 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 6 + }, + "mne/stats/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/stats/tests/test_adjacency.py": { + "a": 1, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 3 + }, + "mne/stats/tests/test_erp.py": { + "a": 4, + "d": 0 + }, + "mne/stats/tests/test_multi_comp.py": { + "a": 2, + "d": 0 + }, + "mne/stats/tests/test_parametric.py": { + "a": 2, + "d": 0 + }, + "mne/stats/tests/test_permutations.py": { + "a": 1, + "d": 2 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 4 + }, + "mne/surface.py": { + "a": 1, + "d": 5 + }, + "mne/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_defaults.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_event.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 5 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_freesurfer.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_misc.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_morph_map.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_ola.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_parallel.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_read_vectorview_selection.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/_stft.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 6 + }, + "mne/transforms.py": { + "a": 1, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/utils/_bunch.py": { + "a": 2, + "d": 4 + }, + "mne/utils/_logging.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_typing.py": { + "a": 4, + "d": 0 + }, + "mne/utils/check.py": { + "a": 2, + "d": 2 + }, + "mne/utils/config.py": { + "a": 2, + "d": 2 + }, + "mne/utils/dataframe.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + }, + "mne/utils/fetching.py": { + "a": 2, + "d": 2 + }, + "mne/utils/linalg.py": { + "a": 2, + "d": 2 + }, + "mne/utils/misc.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 2, + "d": 3 + }, + "mne/utils/progressbar.py": { + "a": 2, + "d": 2 + }, + "mne/utils/spectrum.py": { + "a": 2, + "d": 0 + }, + "mne/utils/tests/test_bunch.py": { + "a": 1, + "d": 3 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 3 + }, + "mne/utils/tests/test_config.py": { + "a": 2, + "d": 0 + }, + "mne/utils/tests/test_docs.py": { + "a": 2, + "d": 0 + }, + "mne/utils/tests/test_linalg.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_logging.py": { + "a": 2, + "d": 0 + }, + "mne/utils/tests/test_misc.py": { + "a": 2, + "d": 0 + }, + "mne/utils/tests/test_mixin.py": { + "a": 1, + "d": 2 + }, + "mne/utils/tests/test_numerics.py": { + "a": 2, + "d": 0 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 1, + "d": 2 + }, + "mne/utils/tests/test_testing.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 7 + }, + "mne/viz/_3d_overlay.py": { + "a": 1, + "d": 3 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/__init__.py": { + "a": 1, + "d": 7 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 7 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 2, + "d": 4 + }, + "mne/viz/_brain/_scraper.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/colormap.py": { + "a": 1, + "d": 5 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 6 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_brain/view.py": { + "a": 1, + "d": 6 + }, + "mne/viz/_dipole.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 3 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_scraper.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 4 + }, + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 5 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 4 + }, + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 5 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 5 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 1, + "d": 5 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 1, + "d": 5 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 1, + "d": 5 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 4 + }, + "mne/viz/conftest.py": { + "a": 1, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 9 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 8 + }, + "mne/viz/evoked_field.py": { + "a": 2, + "d": 0 + }, + "mne/viz/eyetracking/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/eyetracking/heatmap.py": { + "a": 1, + "d": 2 + }, + "mne/viz/eyetracking/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/eyetracking/tests/test_heatmap.py": { + "a": 1, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 5 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 7 + }, + "mne/viz/montage.py": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 4 + }, + "mne/viz/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 7 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 1, + "d": 7 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 4 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 7 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 9 + }, + "mne/viz/tests/test_figure.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 3 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 7 + }, + "mne/viz/tests/test_montage.py": { + "a": 1, + "d": 4 + }, + "mne/viz/tests/test_proj.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_scraper.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 6 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 6 + }, + "mne/viz/tests/test_ui_events.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_utils.py": { + "a": 1, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 8 + }, + "mne/viz/ui_events.py": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 9 + }, + "tools/check_mne_location.py": { + "a": 2, + "d": 0 + }, + "tools/dev/check_steering_committee.py": { + "a": 9, + "d": 4 + }, + "tools/dev/ensure_headers.py": { + "a": 125, + "d": 51 + }, + "tools/dev/gen_css_for_mne.py": { + "a": 1, + "d": 2 + }, + "tools/dev/generate_pyi_files.py": { + "a": 2, + "d": 0 + }, + "tools/dev/update_credit_json.py": { + "a": 4, + "d": 0 + }, + "tools/generate_codemeta.py": { + "a": 2, + "d": 0 + }, + "tools/vulture_allowlist.py": { + "a": 11, + "d": 1 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/40_whitened.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/30_forward.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/10_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/15_inplace.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/30_info.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 2, + "d": 0 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 2, + "d": 0 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 2, + "d": 0 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 2, + "d": 0 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 2, + "d": 0 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 2, + "d": 0 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 2, + "d": 0 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 2, + "d": 0 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 2, + "d": 0 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12783.json b/doc/sphinxext/prs/12783.json new file mode 100644 index 00000000000..3d4f0664e07 --- /dev/null +++ b/doc/sphinxext/prs/12783.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "01a1c1e40e81ef22ec8ee4d1564c3bdf87adfc88", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12786.json b/doc/sphinxext/prs/12786.json new file mode 100644 index 00000000000..fbe4032ee0f --- /dev/null +++ b/doc/sphinxext/prs/12786.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c9f8ee2998e6a47d667076a9c76ee48da480e4e1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12787.json b/doc/sphinxext/prs/12787.json new file mode 100644 index 00000000000..045dd24450f --- /dev/null +++ b/doc/sphinxext/prs/12787.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "828953e0ba584c149bcd2d6898d3599af1b30f40", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12787.other.rst": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 55, + "d": 5 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12788.json b/doc/sphinxext/prs/12788.json new file mode 100644 index 00000000000..787c8383b76 --- /dev/null +++ b/doc/sphinxext/prs/12788.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "dcb05a22cff16539e5753496668fa0a0b14b6c9a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 35, + "d": 10 + }, + "mne/html_templates/repr/_acquisition.html.jinja": { + "a": 13, + "d": 23 + }, + "mne/html_templates/repr/_channels.html.jinja": { + "a": 7, + "d": 17 + }, + "mne/html_templates/repr/_filters.html.jinja": { + "a": 6, + "d": 16 + }, + "mne/html_templates/repr/_general.html.jinja": { + "a": 8, + "d": 18 + }, + "mne/html_templates/repr/epochs.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/evoked.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/forward.html.jinja": { + "a": 3, + "d": 3 + }, + "mne/html_templates/repr/ica.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/info.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/inverse_operator.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/raw.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/spectrum.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/static/_section_header_row.html.jinja": { + "a": 12, + "d": 0 + }, + "mne/html_templates/repr/static/repr.css": { + "a": 95, + "d": 93 + }, + "mne/html_templates/repr/static/repr.js": { + "a": 17, + "d": 29 + }, + "mne/html_templates/repr/tfr.html.jinja": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 2 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12792.json b/doc/sphinxext/prs/12792.json new file mode 100644 index 00000000000..df21223b9d3 --- /dev/null +++ b/doc/sphinxext/prs/12792.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "ab69d3e235de846b53ac2052b33faddce9ffd4f9", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/api/reading_raw_data.rst": { + "a": 14, + "d": 13 + }, + "doc/changes/devel/12792.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/io/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 34, + "d": 10 + }, + "mne/io/ant/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/io/ant/ant.py": { + "a": 293, + "d": 0 + }, + "mne/io/ant/tests/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/io/ant/tests/test_ant.py": { + "a": 138, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 18, + "d": 15 + }, + "mne/io/nsx/nsx.py": { + "a": 8, + "d": 5 + }, + "mne/io/tests/test_read_raw.py": { + "a": 58, + "d": 1 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12793.json b/doc/sphinxext/prs/12793.json new file mode 100644 index 00000000000..90d06e08527 --- /dev/null +++ b/doc/sphinxext/prs/12793.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "450acb1c33d19b2fe054be9c6b01f8a83f8c7a9b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/credit.yml": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12794.json b/doc/sphinxext/prs/12794.json new file mode 100644 index 00000000000..a83bcc71056 --- /dev/null +++ b/doc/sphinxext/prs/12794.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "09696542308a8745b9a7d245bc5af00f8284d706", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12747.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/12774.json": { + "a": 103, + "d": 0 + }, + "doc/sphinxext/prs/12778.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12781.json": { + "a": 2911, + "d": 0 + }, + "doc/sphinxext/prs/12783.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12786.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12793.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12796.json b/doc/sphinxext/prs/12796.json new file mode 100644 index 00000000000..76455afaccd --- /dev/null +++ b/doc/sphinxext/prs/12796.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e108241dbc0392100cf1b5df1598bda2f1645459", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12794.json": { + "a": 39, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12797.json b/doc/sphinxext/prs/12797.json new file mode 100644 index 00000000000..050912edf23 --- /dev/null +++ b/doc/sphinxext/prs/12797.json @@ -0,0 +1,311 @@ +{ + "merge_commit_sha": "bf5173b3cdc69d86fb7db1f2fc2edd3b57fbc2a2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/credit.yml": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 2, + "d": 1 + }, + "CITATION.cff": { + "a": 39, + "d": 23 + }, + "codemeta.json": { + "a": 122, + "d": 74 + }, + "doc/_static/versions.json": { + "a": 7, + "d": 2 + }, + "doc/changes/devel.rst": { + "a": 0, + "d": 5 + }, + "doc/changes/devel/12300.apichange.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12300.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12338.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12554.dependency.rst": { + "a": 0, + "d": 6 + }, + "doc/changes/devel/12556.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12561.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12562.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12569.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12573.newfeature.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12576.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12578.bugfix.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12583.apichange.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12583.newfeature.rst": { + "a": 0, + "d": 4 + }, + "doc/changes/devel/12584.newfeature.rst": { + "a": 0, + "d": 4 + }, + "doc/changes/devel/12593.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12597.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12600.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12605.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12609.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12612.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12616.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12620.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12628.newfeature.rst": { + "a": 0, + "d": 4 + }, + "doc/changes/devel/12633.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12646.bugfix.rst": { + "a": 0, + "d": 10 + }, + "doc/changes/devel/12649.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12650.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12652.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12655.newfeature.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12659.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12661.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12664.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12669.newfeature.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12676.bugfix.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12686.newfeature.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12687.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12688.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12694.bugfix.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12697.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12699.apichange.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12703.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12707.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12719.apichange.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12720.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12720.newfeature.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12721.bugfix.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12724.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12725.newfeature.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12727.bugfix.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12727.newfeature.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12730.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12733.newfeature.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12734.newfeature.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12735.bugfix.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12742.dependency.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12747.newfeature.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/12754.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12759.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12760.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12763.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12771.apichange.rst": { + "a": 0, + "d": 4 + }, + "doc/changes/devel/12774.other.rst": { + "a": 0, + "d": 2 + }, + "doc/changes/devel/12781.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/v1.8.rst": { + "a": 173, + "d": 0 + }, + "doc/development/whats_new.rst": { + "a": 1, + "d": 1 + }, + "doc/documentation/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/sphinxext/update_credit_rst.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12798.json b/doc/sphinxext/prs/12798.json new file mode 100644 index 00000000000..4bdcbf6adfe --- /dev/null +++ b/doc/sphinxext/prs/12798.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "a82528d35c655b693b23143a88850fe7a851fa10", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + }, + "README.rst": { + "a": 8, + "d": 8 + }, + "doc/changes/devel.rst": { + "a": 5, + "d": 0 + }, + "doc/changes/devel/12798.dependency.rst": { + "a": 1, + "d": 0 + }, + "doc/development/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/development/whats_new.rst": { + "a": 1, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 5, + "d": 5 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 3, + "d": 34 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 0, + "d": 4 + }, + "mne/export/_edf.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 3, + "d": 9 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 5, + "d": 131 + }, + "mne/utils/_logging.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 8 + }, + "mne/viz/_brain/_brain.py": { + "a": 0, + "d": 27 + }, + "pyproject.toml": { + "a": 2, + "d": 1 + }, + "tools/environment_old.yml": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12799.json b/doc/sphinxext/prs/12799.json new file mode 100644 index 00000000000..72fd0c1a174 --- /dev/null +++ b/doc/sphinxext/prs/12799.json @@ -0,0 +1,251 @@ +{ + "merge_commit_sha": "362f9330925fb79a6adc19a42243672676dec63e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/open.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/pick.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/proj.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/reference.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/write.py": { + "a": 1, + "d": 1 + }, + "mne/_ola.py": { + "a": 2, + "d": 2 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 4 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 6, + "d": 6 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 3, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 4 + }, + "mne/io/fil/fil.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 4 + }, + "mne/morph.py": { + "a": 2, + "d": 4 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 5, + "d": 5 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/stats/regression.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 6 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + }, + "mne/utils/misc.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 5, + "d": 5 + }, + "mne/utils/numerics.py": { + "a": 11, + "d": 11 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 5 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 11, + "d": 11 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 3 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 3 + }, + "pyproject.toml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/128.json b/doc/sphinxext/prs/128.json new file mode 100644 index 00000000000..b55db8362f1 --- /dev/null +++ b/doc/sphinxext/prs/128.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7508cb24c38ddb1b7bd6180a2e581d7106fe0e39", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ssp.py": { + "a": 8, + "d": 3 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 38, + "d": 0 + }, + "mne/proj.py": { + "a": 22, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12800.json b/doc/sphinxext/prs/12800.json new file mode 100644 index 00000000000..ea5fadc2ff1 --- /dev/null +++ b/doc/sphinxext/prs/12800.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "de72aae04118d77afbf5e4f3c0a8b0037e5bc38b", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12801.json b/doc/sphinxext/prs/12801.json new file mode 100644 index 00000000000..d5cf5ceb9e4 --- /dev/null +++ b/doc/sphinxext/prs/12801.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dcd34d4e588e24c2f5de2995417407aa115c59d9", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12801.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 8, + "d": 7 + }, + "mne/gui/_gui.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12802.json b/doc/sphinxext/prs/12802.json new file mode 100644 index 00000000000..3d68a76c3a0 --- /dev/null +++ b/doc/sphinxext/prs/12802.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "966121be5931918ec63497aa4e9ba1404858007e", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12803.json b/doc/sphinxext/prs/12803.json new file mode 100644 index 00000000000..67c3cc3f4e7 --- /dev/null +++ b/doc/sphinxext/prs/12803.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "223d4aa9ebd1b7942329912a057b349912792d2c", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12803.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_freesurfer.py": { + "a": 19, + "d": 16 + }, + "mne/source_space/_source_space.py": { + "a": 3, + "d": 4 + }, + "mne/utils/check.py": { + "a": 29, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12804.json b/doc/sphinxext/prs/12804.json new file mode 100644 index 00000000000..1a714b42ba2 --- /dev/null +++ b/doc/sphinxext/prs/12804.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "99dd0e1c24856aaaca7289e7e14d7a1390496134", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/12804.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12805.json b/doc/sphinxext/prs/12805.json new file mode 100644 index 00000000000..a3dfd555637 --- /dev/null +++ b/doc/sphinxext/prs/12805.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "0aae72dda030242d992866ce6d551206749474b2", + "authors": [ + { + "n": "Alex lepauvre", + "e": "alex.lepauvre@ae.mpg.de" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12805.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_base.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 13, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 71, + "d": 5 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 111, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 156, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12806.json b/doc/sphinxext/prs/12806.json new file mode 100644 index 00000000000..c15a965b71f --- /dev/null +++ b/doc/sphinxext/prs/12806.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "ac2c4c0d72f9843690aad70e63b48269f5269a0f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/12803.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12804.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "mne/_freesurfer.py": { + "a": 19, + "d": 16 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 1 + }, + "mne/source_space/_source_space.py": { + "a": 3, + "d": 4 + }, + "mne/utils/check.py": { + "a": 29, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12810.json b/doc/sphinxext/prs/12810.json new file mode 100644 index 00000000000..d091af61277 --- /dev/null +++ b/doc/sphinxext/prs/12810.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "af19be0a123735fd6b85e82e01adba86d6574188", + "authors": [ + { + "n": "Proloy Das", + "e": "proloy@umd.edu" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/ant/ant.py": { + "a": 57, + "d": 6 + }, + "mne/io/ant/tests/test_ant.py": { + "a": 188, + "d": 21 + }, + "tools/vulture_allowlist.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12811.json b/doc/sphinxext/prs/12811.json new file mode 100644 index 00000000000..3cf0b022e14 --- /dev/null +++ b/doc/sphinxext/prs/12811.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "bd8f9cbd2cb3d520d9671ad445eb81c8fb85b942", + "authors": [ + { + "n": "Victor Férat", + "e": "victor.ferat@live.Fr" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12811.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/utils.py": { + "a": 6, + "d": 2 + }, + "mne/epochs.py": { + "a": 7, + "d": 0 + }, + "mne/io/base.py": { + "a": 13, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 13, + "d": 6 + }, + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12812.json b/doc/sphinxext/prs/12812.json new file mode 100644 index 00000000000..8111f32ed6b --- /dev/null +++ b/doc/sphinxext/prs/12812.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "08a383126bcf9bfc7b98e5462d5803dc1fa239a1", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/annotations.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12813.json b/doc/sphinxext/prs/12813.json new file mode 100644 index 00000000000..30a9de7df4d --- /dev/null +++ b/doc/sphinxext/prs/12813.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b1203ada6f1a3ecaac4e8b67df45b9124a34dcd7", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/io/ant/tests/test_ant.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12814.json b/doc/sphinxext/prs/12814.json new file mode 100644 index 00000000000..8ddb8f3403b --- /dev/null +++ b/doc/sphinxext/prs/12814.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "20871ff2286935a564a9db5320afa7437df18458", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12815.json b/doc/sphinxext/prs/12815.json new file mode 100644 index 00000000000..6b0f7fd1f1b --- /dev/null +++ b/doc/sphinxext/prs/12815.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f27bac8b5a128667d7f946f0089132fa5d0da9ba", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "examples/io/read_neo_format.py": { + "a": 1, + "d": 3 + }, + "mne/_fiff/meas_info.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12820.json b/doc/sphinxext/prs/12820.json new file mode 100644 index 00000000000..73fa2132a68 --- /dev/null +++ b/doc/sphinxext/prs/12820.json @@ -0,0 +1,159 @@ +{ + "merge_commit_sha": "13852f58bced5a49434d06e50d98efcf6b4292de", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/workflows/credit.yml": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 10, + "d": 8 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.10.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.11.rst": { + "a": 3, + "d": 3 + }, + "doc/changes/v0.12.rst": { + "a": 10, + "d": 10 + }, + "doc/changes/v0.13.rst": { + "a": 13, + "d": 13 + }, + "doc/changes/v0.14.rst": { + "a": 7, + "d": 7 + }, + "doc/changes/v0.15.rst": { + "a": 8, + "d": 8 + }, + "doc/changes/v0.16.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.18.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.20.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.21.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/v0.22.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.23.rst": { + "a": 3, + "d": 3 + }, + "doc/changes/v0.24.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.8.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v0.9.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v1.1.rst": { + "a": 2, + "d": 2 + }, + "doc/changes/v1.7.rst": { + "a": 90, + "d": 90 + }, + "doc/changes/v1.8.rst": { + "a": 63, + "d": 63 + }, + "doc/conf.py": { + "a": 2, + "d": 3 + }, + "doc/sphinxext/credit_tools.py": { + "a": 88, + "d": 9 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 7, + "d": 0 + }, + "doc/sphinxext/related_software.py": { + "a": 10, + "d": 2 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "examples/io/read_neo_format.py": { + "a": 4, + "d": 4 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12822.json b/doc/sphinxext/prs/12822.json new file mode 100644 index 00000000000..c141dd3a44a --- /dev/null +++ b/doc/sphinxext/prs/12822.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "346af6fec258e3bb0d30791dfacbe86cdb354f2b", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12792.json": { + "a": 75, + "d": 0 + }, + "doc/sphinxext/prs/12796.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12797.json": { + "a": 311, + "d": 0 + }, + "doc/sphinxext/prs/12798.json": { + "a": 99, + "d": 0 + }, + "doc/sphinxext/prs/12799.json": { + "a": 251, + "d": 0 + }, + "doc/sphinxext/prs/12800.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12801.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12802.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12803.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12804.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12805.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/12806.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/12810.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12811.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/12812.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12813.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12814.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12815.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12820.json": { + "a": 159, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12823.json b/doc/sphinxext/prs/12823.json new file mode 100644 index 00000000000..fb637d0e6cd --- /dev/null +++ b/doc/sphinxext/prs/12823.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e221c6cb71ec42f7eebe1c5ea81089d78c91d4a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".git-blame-ignore-revs": { + "a": 1, + "d": 0 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 1, + "d": 0 + }, + "mne/morph.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 5, + "d": 1 + }, + "mne/transforms.py": { + "a": 22, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12825.json b/doc/sphinxext/prs/12825.json new file mode 100644 index 00000000000..83fe12fffab --- /dev/null +++ b/doc/sphinxext/prs/12825.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3f1c7803f73862dec3e4e85e36e916f2a6d642c9", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12826.json b/doc/sphinxext/prs/12826.json new file mode 100644 index 00000000000..1ed61498660 --- /dev/null +++ b/doc/sphinxext/prs/12826.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "013d6c9f630342e5af7633641162c7b85417782a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/preprocessing/hfc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12827.json b/doc/sphinxext/prs/12827.json new file mode 100644 index 00000000000..80311bb192c --- /dev/null +++ b/doc/sphinxext/prs/12827.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f3a3ca4430e1d4b9c539e7949c946f4f83bdb43f", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12827.other.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 52, + "d": 3 + }, + "mne/decoding/ssd.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12828.json b/doc/sphinxext/prs/12828.json new file mode 100644 index 00000000000..6030dffc960 --- /dev/null +++ b/doc/sphinxext/prs/12828.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "176f64ff061136cf5628d76535a8d7e2e164d399", + "authors": [ + { + "n": "Shristi Baral", + "e": null + }, + { + "n": "shristi", + "e": "shristi.baral@aalto.fi" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12828.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 7, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 13, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12829.json b/doc/sphinxext/prs/12829.json new file mode 100644 index 00000000000..78d7d0433f4 --- /dev/null +++ b/doc/sphinxext/prs/12829.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c993ae5d65b3c161876da407e293e6784c6e8ad9", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/12829.apichange.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12830.json b/doc/sphinxext/prs/12830.json new file mode 100644 index 00000000000..7e46a12cc4e --- /dev/null +++ b/doc/sphinxext/prs/12830.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3ecdd6b2102b00f97d7ad98ca2483a0f79e90b30", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12830.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 19, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 24, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12832.json b/doc/sphinxext/prs/12832.json new file mode 100644 index 00000000000..7b877da0580 --- /dev/null +++ b/doc/sphinxext/prs/12832.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c46733a4812f5d19370699c7b18cb1c1c4ded9fa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12833.json b/doc/sphinxext/prs/12833.json new file mode 100644 index 00000000000..e1f4124bf3c --- /dev/null +++ b/doc/sphinxext/prs/12833.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2f299276fecedcb8b3626ea40fc4f1aecff51bce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/PULL_REQUEST_TEMPLATE.md": { + "a": 11, + "d": 4 + }, + "tools/github_actions_test.sh": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12834.json b/doc/sphinxext/prs/12834.json new file mode 100644 index 00000000000..cf5154627ed --- /dev/null +++ b/doc/sphinxext/prs/12834.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "5425ef42e41b6a427f3365e13ea57ecf9c0c12b0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12834.dependency.rst": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 28, + "d": 0 + }, + "examples/visualization/evoked_whitening.py": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/__init__.pyi": { + "a": 7, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 24, + "d": 34 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/ems.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/mixin.py": { + "a": 0, + "d": 89 + }, + "mne/decoding/receptive_field.py": { + "a": 5, + "d": 13 + }, + "mne/decoding/search_light.py": { + "a": 7, + "d": 21 + }, + "mne/decoding/ssd.py": { + "a": 9, + "d": 9 + }, + "mne/decoding/tests/test_base.py": { + "a": 32, + "d": 42 + }, + "mne/decoding/tests/test_csp.py": { + "a": 7, + "d": 9 + }, + "mne/decoding/tests/test_ems.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 30, + "d": 37 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 24, + "d": 33 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 6, + "d": 4 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 4, + "d": 3 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 6, + "d": 10 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 0, + "d": 3 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 2 + }, + "mne/fixes.py": { + "a": 48, + "d": 228 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 12, + "d": 3 + }, + "pyproject.toml": { + "a": 4, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 3 + }, + "tools/vulture_allowlist.py": { + "a": 4, + "d": 0 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12837.json b/doc/sphinxext/prs/12837.json new file mode 100644 index 00000000000..196fdf71511 --- /dev/null +++ b/doc/sphinxext/prs/12837.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a2f887146f080357a83104c4af2c95e2176fe88", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12838.json b/doc/sphinxext/prs/12838.json new file mode 100644 index 00000000000..3e4860bd08f --- /dev/null +++ b/doc/sphinxext/prs/12838.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5cd52993c0027a958cfbc4f6810dff5d180301e5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/utils/config.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12839.json b/doc/sphinxext/prs/12839.json new file mode 100644 index 00000000000..f1f8d57e8aa --- /dev/null +++ b/doc/sphinxext/prs/12839.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ebab915ea723155ee0fed788bcc92c2ff16a4f7f", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/_standard_montage_utils.py": { + "a": 9, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 55, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1284.json b/doc/sphinxext/prs/1284.json new file mode 100644 index 00000000000..61094b1f138 --- /dev/null +++ b/doc/sphinxext/prs/1284.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bbfab25bb2148fd74a3978dba6a430ea8c2a01f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12841.json b/doc/sphinxext/prs/12841.json new file mode 100644 index 00000000000..0165fbe7092 --- /dev/null +++ b/doc/sphinxext/prs/12841.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "528e04658c5ea7a5e55bcb5889ebb4c6d6847284", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12792.newfeature.rst": { + "a": 1, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/ant/ant.py": { + "a": 2, + "d": 2 + }, + "mne/io/ant/tests/test_ant.py": { + "a": 218, + "d": 33 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/vulture_allowlist.py": { + "a": 1, + "d": 0 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 28, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12842.json b/doc/sphinxext/prs/12842.json new file mode 100644 index 00000000000..7647f81bc5c --- /dev/null +++ b/doc/sphinxext/prs/12842.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e999e853b263c8b48a6fcfeb60c82b445e20d88b", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/12842.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 5, + "d": 4 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 13, + "d": 9 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 7, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12843.json b/doc/sphinxext/prs/12843.json new file mode 100644 index 00000000000..896123e0088 --- /dev/null +++ b/doc/sphinxext/prs/12843.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "10ff91a339642f75e18ba1497b60ead88c24cd2b", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12843.bugfix.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/devel/12843.other.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/open.py": { + "a": 22, + "d": 21 + }, + "mne/_fiff/tests/test_what.py": { + "a": 9, + "d": 4 + }, + "mne/_fiff/tree.py": { + "a": 9, + "d": 6 + }, + "mne/_fiff/utils.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/what.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 32, + "d": 38 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 15, + "d": 6 + }, + "mne/evoked.py": { + "a": 19, + "d": 6 + }, + "mne/export/_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 29, + "d": 30 + }, + "mne/gui/_gui.py": { + "a": 13, + "d": 15 + }, + "mne/io/base.py": { + "a": 67, + "d": 43 + }, + "mne/io/boxy/boxy.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 4, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 43, + "d": 23 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 52, + "d": 29 + }, + "mne/io/hitachi/hitachi.py": { + "a": 2, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 2, + "d": 1 + }, + "mne/report/report.py": { + "a": 41, + "d": 56 + }, + "mne/source_space/_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 22, + "d": 0 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12844.json b/doc/sphinxext/prs/12844.json new file mode 100644 index 00000000000..17e1ce65ad8 --- /dev/null +++ b/doc/sphinxext/prs/12844.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a218f96927ef06fd8a4e38363b9d98f74a93f1c3", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/12844.other.rst": { + "a": 1, + "d": 0 + }, + "examples/datasets/limo_data.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/epochs_metadata.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 32, + "d": 5 + }, + "tutorials/clinical/60_sleep.py": { + "a": 4, + "d": 4 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 4, + "d": 4 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12845.json b/doc/sphinxext/prs/12845.json new file mode 100644 index 00000000000..2cf94a62a25 --- /dev/null +++ b/doc/sphinxext/prs/12845.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "efe8b6a9ce87c9a45dcdc20a8c44e99d2efb6c13", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 3, + "d": 0 + }, + "doc/_static/js/set_installer_tab.js": { + "a": 24, + "d": 11 + }, + "doc/_static/js/update_installer_version.js": { + "a": 1, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 59, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12846.json b/doc/sphinxext/prs/12846.json new file mode 100644 index 00000000000..efb06196e5f --- /dev/null +++ b/doc/sphinxext/prs/12846.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "670330a1ead288f11cc3ffd5ca9d65136ca55ccd", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel/12846.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/eyetracking_plot_heatmap.py": { + "a": 4, + "d": 0 + }, + "mne/defaults.py": { + "a": 9, + "d": 9 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 33, + "d": 17 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 32, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12847.json b/doc/sphinxext/prs/12847.json new file mode 100644 index 00000000000..9545c8aa667 --- /dev/null +++ b/doc/sphinxext/prs/12847.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9652572611a0ef6941e59b40575bdfc05a945387", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/12847.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 259, + "d": 110 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 120, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12848.json b/doc/sphinxext/prs/12848.json new file mode 100644 index 00000000000..dc2401b2a93 --- /dev/null +++ b/doc/sphinxext/prs/12848.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "715784fb3f4276ee977f6bf12c67b57dd9e47843", + "authors": [ + { + "n": "Victor Férat", + "e": "victor.ferat@live.Fr" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12848.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 134, + "d": 5 + }, + "mne/report/tests/test_report.py": { + "a": 25, + "d": 1 + }, + "mne/source_space/_source_space.py": { + "a": 8, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 6, + "d": 5 + }, + "mne/utils/check.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/view.py": { + "a": 35, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 18, + "d": 8 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12850.json b/doc/sphinxext/prs/12850.json new file mode 100644 index 00000000000..8cb357550c8 --- /dev/null +++ b/doc/sphinxext/prs/12850.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4f58e814b4e5d01311f359f3d8f8e2cbca391fbb", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/defaults.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12853.json b/doc/sphinxext/prs/12853.json new file mode 100644 index 00000000000..4113fdfeac6 --- /dev/null +++ b/doc/sphinxext/prs/12853.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2cab0257a430127c4bae348380ab2be99bffb412", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/12853.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 18, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12856.json b/doc/sphinxext/prs/12856.json new file mode 100644 index 00000000000..8eefad7827a --- /dev/null +++ b/doc/sphinxext/prs/12856.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "efbd97292592e92c2bdfe09e2fea9cf9b1d2e6f6", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1286.json b/doc/sphinxext/prs/1286.json new file mode 100644 index 00000000000..c0dbb61fd6b --- /dev/null +++ b/doc/sphinxext/prs/1286.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b86e3c1f8caf86248e8e9321a4a75a045842a708", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 8, + "d": 3 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12860.json b/doc/sphinxext/prs/12860.json new file mode 100644 index 00000000000..2220691bead --- /dev/null +++ b/doc/sphinxext/prs/12860.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6f9646ff979294683bb385e63f3587aa3c7a6dd5", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_base.py": { + "a": 5, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 7 + }, + "mne/utils/progressbar.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12861.json b/doc/sphinxext/prs/12861.json new file mode 100644 index 00000000000..d7b6f005206 --- /dev/null +++ b/doc/sphinxext/prs/12861.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fa841cbc30ce830e40c60a8a44518b84e2f7f0cf", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12862.json b/doc/sphinxext/prs/12862.json new file mode 100644 index 00000000000..c89ada322dc --- /dev/null +++ b/doc/sphinxext/prs/12862.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b9cdca8638b20768e85822f5e5e57a5fe043d393", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12862.other.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 42, + "d": 9 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12865.json b/doc/sphinxext/prs/12865.json new file mode 100644 index 00000000000..c52aeea4606 --- /dev/null +++ b/doc/sphinxext/prs/12865.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e6f9c5df856418d331684dd3477e8c1d1c0f37e1", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12866.json b/doc/sphinxext/prs/12866.json new file mode 100644 index 00000000000..deda3246f0f --- /dev/null +++ b/doc/sphinxext/prs/12866.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ab2516826c4b3492f1246db978791207ad7f66d7", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12867.json b/doc/sphinxext/prs/12867.json new file mode 100644 index 00000000000..dde94ea5570 --- /dev/null +++ b/doc/sphinxext/prs/12867.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fc05aeb19e7e998356d400d5e93d977545a0511a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12868.json b/doc/sphinxext/prs/12868.json new file mode 100644 index 00000000000..a89104cb11e --- /dev/null +++ b/doc/sphinxext/prs/12868.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d248a5763e1ced3c9c65a12101e46d85e806df54", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/io/read_impedances.py": { + "a": 77, + "d": 0 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 0 + }, + "mne/io/ant/ant.py": { + "a": 1, + "d": 13 + }, + "mne/io/ant/tests/test_ant.py": { + "a": 1, + "d": 6 + }, + "pyproject.toml": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12869.json b/doc/sphinxext/prs/12869.json new file mode 100644 index 00000000000..8abf2a3f1cd --- /dev/null +++ b/doc/sphinxext/prs/12869.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1dd07a4ee95a3a16f2e0b6cf97b3e0416595dc7a", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1287.json b/doc/sphinxext/prs/1287.json new file mode 100644 index 00000000000..810765f6111 --- /dev/null +++ b/doc/sphinxext/prs/1287.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d91654931e4845b080bffc831d79d7570658f800", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 7, + "d": 1 + }, + "mne/tests/test_viz.py": { + "a": 14, + "d": 0 + }, + "mne/viz.py": { + "a": 67, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12870.json b/doc/sphinxext/prs/12870.json new file mode 100644 index 00000000000..d62c050fd91 --- /dev/null +++ b/doc/sphinxext/prs/12870.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "50ce70a9d04645988a3fddead56e6892faa940fe", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 6, + "d": 0 + }, + "pyproject.toml": { + "a": 311, + "d": 314 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12871.json b/doc/sphinxext/prs/12871.json new file mode 100644 index 00000000000..91385573801 --- /dev/null +++ b/doc/sphinxext/prs/12871.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "381688fc250e6f1b1a6e1bf26357379b0fec94a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12871.newfeature.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 4, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 12, + "d": 4 + }, + "mne/viz/backends/renderer.py": { + "a": 34, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12872.json b/doc/sphinxext/prs/12872.json new file mode 100644 index 00000000000..ec74da5c20d --- /dev/null +++ b/doc/sphinxext/prs/12872.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "78ea8b7527666bc92ed47b9a3056e8125ac786a4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/development/roadmap.rst": { + "a": 148, + "d": 64 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12875.json b/doc/sphinxext/prs/12875.json new file mode 100644 index 00000000000..124aa3135a3 --- /dev/null +++ b/doc/sphinxext/prs/12875.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5492174fc0e64762056b5b99a330129ca6eefca4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12875.bugfix.rst": { + "a": 2, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 178, + "d": 114 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 15, + "d": 5 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 0 + }, + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12877.json b/doc/sphinxext/prs/12877.json new file mode 100644 index 00000000000..f3906dedfa6 --- /dev/null +++ b/doc/sphinxext/prs/12877.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7af60e6329b79810061ccf99df4fa08b5232d1ac", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12877.bugfix.rst": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 19, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12878.json b/doc/sphinxext/prs/12878.json new file mode 100644 index 00000000000..400bc5dd56e --- /dev/null +++ b/doc/sphinxext/prs/12878.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8379668a42b68ba115310dfd9e5f821e3efb4db8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fiff/raw.py": { + "a": 8, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12879.json b/doc/sphinxext/prs/12879.json new file mode 100644 index 00000000000..0f8fc811afa --- /dev/null +++ b/doc/sphinxext/prs/12879.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "099cf6043732c511b403d5ec6df343a3f0060ed2", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1288.json b/doc/sphinxext/prs/1288.json new file mode 100644 index 00000000000..6e68ad9b64b --- /dev/null +++ b/doc/sphinxext/prs/1288.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5c18e533d3b345d83f9f2e40d83157a2316c9b26", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12880.json b/doc/sphinxext/prs/12880.json new file mode 100644 index 00000000000..0ae1b4cfe37 --- /dev/null +++ b/doc/sphinxext/prs/12880.json @@ -0,0 +1,159 @@ +{ + "merge_commit_sha": "717bd1a13716a6b1c4a008e2fe6a907f317fe54a", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12366.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/12787.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12788.json": { + "a": 91, + "d": 0 + }, + "doc/sphinxext/prs/12822.json": { + "a": 87, + "d": 0 + }, + "doc/sphinxext/prs/12823.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12825.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12826.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12827.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12829.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12830.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12832.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12833.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12834.json": { + "a": 135, + "d": 0 + }, + "doc/sphinxext/prs/12837.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12838.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12839.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12841.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/12842.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/12843.json": { + "a": 195, + "d": 0 + }, + "doc/sphinxext/prs/12844.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/12845.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12846.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12850.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12853.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12856.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12860.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12861.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12862.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12865.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12866.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12867.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12868.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/12869.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12870.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12871.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12875.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/12878.json": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12882.json b/doc/sphinxext/prs/12882.json new file mode 100644 index 00000000000..dae0aedb81a --- /dev/null +++ b/doc/sphinxext/prs/12882.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7e197c79a1314cf6293f6429aa932bbb7ef7378c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12846.bugfix.rst": { + "a": 2, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 3 + }, + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "doc/references.bib": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 3, + "d": 2 + }, + "tutorials/clinical/30_ecog.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12883.json b/doc/sphinxext/prs/12883.json new file mode 100644 index 00000000000..c444f6e997a --- /dev/null +++ b/doc/sphinxext/prs/12883.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ed933b8d9d523a9f4f7006d6e6d1c81901e5478a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/html_templates/repr/_channels.html.jinja": { + "a": 1, + "d": 17 + }, + "mne/html_templates/repr/forward.html.jinja": { + "a": 18, + "d": 24 + }, + "mne/html_templates/repr/static/_channels.html.jinja": { + "a": 17, + "d": 0 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12884.json b/doc/sphinxext/prs/12884.json new file mode 100644 index 00000000000..3420faeccb7 --- /dev/null +++ b/doc/sphinxext/prs/12884.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "bddaad9510941c1cf4e81266569172e993b59d92", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12884.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 10, + "d": 3 + }, + "mne/gui/tests/test_coreg.py": { + "a": 9, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_event.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 11, + "d": 4 + }, + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12885.json b/doc/sphinxext/prs/12885.json new file mode 100644 index 00000000000..84274b6b372 --- /dev/null +++ b/doc/sphinxext/prs/12885.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "53f258debb781aeee2c2e5cbf449f37822e3a898", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 28, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12886.json b/doc/sphinxext/prs/12886.json new file mode 100644 index 00000000000..37116f2ea92 --- /dev/null +++ b/doc/sphinxext/prs/12886.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d4c0d1c4062110db8c1af0bee47df969d8968d8b", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "pyproject.toml": { + "a": 18, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12887.json b/doc/sphinxext/prs/12887.json new file mode 100644 index 00000000000..d6286407d34 --- /dev/null +++ b/doc/sphinxext/prs/12887.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "94fc435de1786bd7f5e4545970111d9bdc8aa7f9", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12888.json b/doc/sphinxext/prs/12888.json new file mode 100644 index 00000000000..84d6ef07f51 --- /dev/null +++ b/doc/sphinxext/prs/12888.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c0a98ed956c35ad42572253d2ce6f7ce0ed91a54", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1289.json b/doc/sphinxext/prs/1289.json new file mode 100644 index 00000000000..c69b4320af2 --- /dev/null +++ b/doc/sphinxext/prs/1289.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "af31e0beb5b9108306c0f366c928607603e2c071", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "mne/io/array/array.py": { + "a": 2, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12890.json b/doc/sphinxext/prs/12890.json new file mode 100644 index 00000000000..fa84e8ae199 --- /dev/null +++ b/doc/sphinxext/prs/12890.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "922a7801a0ca6af225c7b861fe6bd97b1518af3a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 15, + "d": 0 + }, + "README.rst": { + "a": 7, + "d": 32 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/hooks/sync_dependencies.py": { + "a": 95, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12891.json b/doc/sphinxext/prs/12891.json new file mode 100644 index 00000000000..6e7a7b1a784 --- /dev/null +++ b/doc/sphinxext/prs/12891.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "06050bc72c637c297ff2e4fe0c7cba13243792e4", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12892.json b/doc/sphinxext/prs/12892.json new file mode 100644 index 00000000000..ab2b8aebc6b --- /dev/null +++ b/doc/sphinxext/prs/12892.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "72faa3caa2239c6a4d973fc4cfd4a66a57ebcf00", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12896.json b/doc/sphinxext/prs/12896.json new file mode 100644 index 00000000000..203d5d05c49 --- /dev/null +++ b/doc/sphinxext/prs/12896.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a1a05ae11234929f0608d5a6b4fc30206af89031", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12896.other.rst": { + "a": 1, + "d": 0 + }, + "doc/development/governance.rst": { + "a": 251, + "d": 187 + }, + "doc/overview/people.rst": { + "a": 18, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12898.json b/doc/sphinxext/prs/12898.json new file mode 100644 index 00000000000..7b39212813b --- /dev/null +++ b/doc/sphinxext/prs/12898.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b831c28f96ef177462e3cab632f8910d44d22c22", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/install/manual_install_python.rst": { + "a": 13, + "d": 4 + }, + "doc/links.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12900.json b/doc/sphinxext/prs/12900.json new file mode 100644 index 00000000000..64c7617bca8 --- /dev/null +++ b/doc/sphinxext/prs/12900.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7250311b2ce52ed8f50bacf193636350bf796ffb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/default_linux_dock_icon.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_python_dock_icon.png": { + "a": 0, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 31, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12901.json b/doc/sphinxext/prs/12901.json new file mode 100644 index 00000000000..20d79a85b23 --- /dev/null +++ b/doc/sphinxext/prs/12901.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "56e522ba303fe01a25a2a0d5d7507639a83fe02c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/12901.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12901.newfeature.rst": { + "a": 8, + "d": 0 + }, + "mne/html_templates/report/html.html.jinja": { + "a": 2, + "d": 6 + }, + "mne/html_templates/report/image.html.jinja": { + "a": 13, + "d": 13 + }, + "mne/html_templates/report/section.html.jinja": { + "a": 2, + "d": 6 + }, + "mne/html_templates/report/slider.html.jinja": { + "a": 3, + "d": 9 + }, + "mne/report/report.py": { + "a": 322, + "d": 122 + }, + "mne/report/tests/test_report.py": { + "a": 78, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 14, + "d": 8 + }, + "mne/viz/_3d.py": { + "a": 43, + "d": 17 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 19, + "d": 1 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12902.json b/doc/sphinxext/prs/12902.json new file mode 100644 index 00000000000..27cb64fbb70 --- /dev/null +++ b/doc/sphinxext/prs/12902.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f35aa5ae90616e97b0ce056a34dd98cdb7a9b184", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "environment.yml": { + "a": 12, + "d": 12 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12908.json b/doc/sphinxext/prs/12908.json new file mode 100644 index 00000000000..5c7956c5050 --- /dev/null +++ b/doc/sphinxext/prs/12908.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "18c0d44668024e933514e63e459306bd5b1a89a9", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12909.json b/doc/sphinxext/prs/12909.json new file mode 100644 index 00000000000..93097c15c54 --- /dev/null +++ b/doc/sphinxext/prs/12909.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "97f5b9c34e3f00433886f44f10b9cb09ae4b69a3", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12909.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12910.json b/doc/sphinxext/prs/12910.json new file mode 100644 index 00000000000..6240f4368c0 --- /dev/null +++ b/doc/sphinxext/prs/12910.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "1d2635f84a55785c3531cfe4027eda3820a7fb31", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12910.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 10, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 195, + "d": 26 + }, + "mne/time_frequency/tfr.py": { + "a": 249, + "d": 113 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 24, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12911.json b/doc/sphinxext/prs/12911.json new file mode 100644 index 00000000000..b55dbab3924 --- /dev/null +++ b/doc/sphinxext/prs/12911.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f4597ba4d75416dbe25ab6a08df1eadf17d9ec27", + "authors": [ + { + "n": "Florian Hofer", + "e": null + } + ], + "changes": { + "doc/changes/devel/12911.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 2, + "d": 0 + }, + "mne/export/tests/test_export.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12912.json b/doc/sphinxext/prs/12912.json new file mode 100644 index 00000000000..7b6b00fbeb8 --- /dev/null +++ b/doc/sphinxext/prs/12912.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c63da99a870c550a4eef1b57a51160b7052a3386", + "authors": [ + { + "n": "Richard Scholz", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12912.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 10, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12914.json b/doc/sphinxext/prs/12914.json new file mode 100644 index 00000000000..bc0dd79541f --- /dev/null +++ b/doc/sphinxext/prs/12914.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "0755ef01e4b0be6424e2758109bf6e7a6f41fc8e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 8, + "d": 1 + }, + "environment.yml": { + "a": 45, + "d": 51 + }, + "mne/utils/config.py": { + "a": 3, + "d": 1 + }, + "pyproject.toml": { + "a": 40, + "d": 35 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/hooks/update_environment_file.py": { + "a": 89, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12915.json b/doc/sphinxext/prs/12915.json new file mode 100644 index 00000000000..7cd1579d31a --- /dev/null +++ b/doc/sphinxext/prs/12915.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "92cf6f2bb72d3ae9668bd94daa9d28093671ec58", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12909.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12918.json b/doc/sphinxext/prs/12918.json new file mode 100644 index 00000000000..a5fb64d446a --- /dev/null +++ b/doc/sphinxext/prs/12918.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "e15292fc0bc8d5e32dd6d6099a839bf810963f3a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/12918.apichange.rst": { + "a": 1, + "d": 0 + }, + "doc/documentation/datasets.rst": { + "a": 12, + "d": 6 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 6, + "d": 5 + }, + "examples/preprocessing/eeg_bridging.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/muscle_ica.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 55, + "d": 25 + }, + "mne/datasets/eegbci/tests/test_eegbci.py": { + "a": 16, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 3 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12920.json b/doc/sphinxext/prs/12920.json new file mode 100644 index 00000000000..d41c1a8c642 --- /dev/null +++ b/doc/sphinxext/prs/12920.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f9ab4376906ea51da1cd38cae8bcd14cf559c154", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12924.json b/doc/sphinxext/prs/12924.json new file mode 100644 index 00000000000..6d2d045aecd --- /dev/null +++ b/doc/sphinxext/prs/12924.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f00deb44e2996a3b707c3f0399cfa0d8b842c566", + "authors": [ + { + "n": "Simon M. Hofmann", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12924.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12925.json b/doc/sphinxext/prs/12925.json new file mode 100644 index 00000000000..bec3acf2188 --- /dev/null +++ b/doc/sphinxext/prs/12925.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "98d55b4a9e29065766c29c3453336640844096ab", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12928.json b/doc/sphinxext/prs/12928.json new file mode 100644 index 00000000000..b4f01d69fcb --- /dev/null +++ b/doc/sphinxext/prs/12928.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "60346f64bce19a5657920fe63f8e1ab4a6498db0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 3, + "d": 3 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 0 + }, + "mne/stats/regression.py": { + "a": 3, + "d": 2 + }, + "mne/stats/tests/test_regression.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12931.json b/doc/sphinxext/prs/12931.json new file mode 100644 index 00000000000..734ee4af516 --- /dev/null +++ b/doc/sphinxext/prs/12931.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "46089954167343ff2e0d089b9a313674ef7475b6", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12931.bugfix.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/devel/12931.other.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 68, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12932.json b/doc/sphinxext/prs/12932.json new file mode 100644 index 00000000000..91bf8c5fc55 --- /dev/null +++ b/doc/sphinxext/prs/12932.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "424f0b4daaaefa96ff3fbecf0d20e2209b281755", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12933.json b/doc/sphinxext/prs/12933.json new file mode 100644 index 00000000000..e6909e9e73f --- /dev/null +++ b/doc/sphinxext/prs/12933.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "de7f767c8cdc1496e56994265713bbd0e2de84af", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/io/ant/ant.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12935.json b/doc/sphinxext/prs/12935.json new file mode 100644 index 00000000000..73389f6ffd2 --- /dev/null +++ b/doc/sphinxext/prs/12935.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "4a61d42727997e57089722e5275bd73098205d9d", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12872.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12877.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12879.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12880.json": { + "a": 159, + "d": 0 + }, + "doc/sphinxext/prs/12882.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12883.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12884.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/12885.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12886.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12887.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12888.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12890.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12891.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12892.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12898.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12900.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12901.json": { + "a": 71, + "d": 0 + }, + "doc/sphinxext/prs/12902.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12908.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12909.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12911.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12912.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12914.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/12915.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12918.json": { + "a": 55, + "d": 0 + }, + "doc/sphinxext/prs/12920.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12924.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12925.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12928.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/12932.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12933.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12936.json b/doc/sphinxext/prs/12936.json new file mode 100644 index 00000000000..4fbd5951c2c --- /dev/null +++ b/doc/sphinxext/prs/12936.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "333d97189e9315ebbc274bbf5d5d439938b7f840", + "authors": [ + { + "n": "Genuster", + "e": null + } + ], + "changes": { + "doc/changes/devel/12936.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12937.json b/doc/sphinxext/prs/12937.json new file mode 100644 index 00000000000..11618b8c1c9 --- /dev/null +++ b/doc/sphinxext/prs/12937.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ddca04beb56baa6af008b43e658ffbedac2c2228", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "tools/hooks/update_environment_file.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12938.json b/doc/sphinxext/prs/12938.json new file mode 100644 index 00000000000..3da4cbe9b09 --- /dev/null +++ b/doc/sphinxext/prs/12938.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "899b7bf510966619a44acadf82023748e39d6529", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/viz/_scraper.py": { + "a": 4, + "d": 6 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12941.json b/doc/sphinxext/prs/12941.json new file mode 100644 index 00000000000..0d98ac2cefb --- /dev/null +++ b/doc/sphinxext/prs/12941.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b3ecaab2440668cf7237841cd8cc83c276f24075", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12942.json b/doc/sphinxext/prs/12942.json new file mode 100644 index 00000000000..cae8206ee51 --- /dev/null +++ b/doc/sphinxext/prs/12942.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c1ad98b577de30e93ac96ea99b279069fe435843", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12947.json b/doc/sphinxext/prs/12947.json new file mode 100644 index 00000000000..f895056403c --- /dev/null +++ b/doc/sphinxext/prs/12947.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2983d2a19148e8760b67de399e85c91a3e040de7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/_static/js/set_installer_tab.js": { + "a": 10, + "d": 0 + }, + "doc/install/ides.rst": { + "a": 55, + "d": 0 + }, + "doc/install/index.rst": { + "a": 1, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 21, + "d": 4 + }, + "doc/install/manual_install.rst": { + "a": 0, + "d": 56 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12948.json b/doc/sphinxext/prs/12948.json new file mode 100644 index 00000000000..9c8f88baf30 --- /dev/null +++ b/doc/sphinxext/prs/12948.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "649857aacb24a0afc3b069f1e75bb3cf843a8766", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1295.json b/doc/sphinxext/prs/1295.json new file mode 100644 index 00000000000..f837bec35f8 --- /dev/null +++ b/doc/sphinxext/prs/1295.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a9e2f5fe33a03ffd181c25758ac938e925a023c7", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/surface.py": { + "a": 12, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12951.json b/doc/sphinxext/prs/12951.json new file mode 100644 index 00000000000..5dae7e4cc90 --- /dev/null +++ b/doc/sphinxext/prs/12951.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "cd6dd5f4db008cdef407298e2e24014e75916693", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 11, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 8, + "d": 3 + }, + "mne/decoding/search_light.py": { + "a": 16, + "d": 2 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_base.py": { + "a": 10, + "d": 1 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 12, + "d": 0 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12955.json b/doc/sphinxext/prs/12955.json new file mode 100644 index 00000000000..dc2ee03614c --- /dev/null +++ b/doc/sphinxext/prs/12955.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "060b60061f5d8226734fdb80340a2f1f53d37fd1", + "authors": [ + { + "n": "Leonardo Rochael Almeida", + "e": "leorochael@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12955.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/12955.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 3 + }, + "mne/io/base.py": { + "a": 23, + "d": 9 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 36, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12957.json b/doc/sphinxext/prs/12957.json new file mode 100644 index 00000000000..376c66f4f10 --- /dev/null +++ b/doc/sphinxext/prs/12957.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ecc620d5919b6209bf7d76301fd9764c80fc2826", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/cov.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12958.json b/doc/sphinxext/prs/12958.json new file mode 100644 index 00000000000..89e21215bb6 --- /dev/null +++ b/doc/sphinxext/prs/12958.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9832d4b7a9935898aa6ff6ddd0808e8b5aa3d7aa", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1296.json b/doc/sphinxext/prs/1296.json new file mode 100644 index 00000000000..3bb58a91088 --- /dev/null +++ b/doc/sphinxext/prs/1296.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a321f99dc2c2b32e7af04244d9fdd4f62fc660a4", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "examples/realtime/ftclient_rt_average.py": { + "a": 17, + "d": 5 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 36, + "d": 4 + }, + "mne/viz.py": { + "a": 28, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12960.json b/doc/sphinxext/prs/12960.json new file mode 100644 index 00000000000..91845644221 --- /dev/null +++ b/doc/sphinxext/prs/12960.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c3c1f2934f23bbb8409094c06576f7a470357abe", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12960.other.rst": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 20, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12962.json b/doc/sphinxext/prs/12962.json new file mode 100644 index 00000000000..eeb4d460256 --- /dev/null +++ b/doc/sphinxext/prs/12962.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1e7cef13d0e45355114a14beee68db365a91e315", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12962.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12966.json b/doc/sphinxext/prs/12966.json new file mode 100644 index 00000000000..f3a1facee6e --- /dev/null +++ b/doc/sphinxext/prs/12966.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f45b51b43bb6704e4567abc6a96ad995a58de0de", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12966.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 51, + "d": 12 + }, + "mne/preprocessing/maxwell.py": { + "a": 19, + "d": 14 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 179, + "d": 29 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12967.json b/doc/sphinxext/prs/12967.json new file mode 100644 index 00000000000..50dd7318c05 --- /dev/null +++ b/doc/sphinxext/prs/12967.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d810cc5977f95f76c3eb82db6b7003af68bd6176", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 4, + "d": 4 + }, + "mne/_fiff/proc_history.py": { + "a": 34, + "d": 20 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 143, + "d": 56 + }, + "mne/_fiff/write.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 16, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12968.json b/doc/sphinxext/prs/12968.json new file mode 100644 index 00000000000..bb6875d2c42 --- /dev/null +++ b/doc/sphinxext/prs/12968.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ede7f013a1b741da3fb4a565acad7a6c2e60e48e", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel/12968.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1297.json b/doc/sphinxext/prs/1297.json new file mode 100644 index 00000000000..6a37d667383 --- /dev/null +++ b/doc/sphinxext/prs/1297.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e5a05af86da0ded5e29fd7fe44ff8dd2a9711387", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 17, + "d": 3 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12970.json b/doc/sphinxext/prs/12970.json new file mode 100644 index 00000000000..90499062257 --- /dev/null +++ b/doc/sphinxext/prs/12970.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a267b8622587e32f992bb4c876a0f3a32d397b14", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12971.json b/doc/sphinxext/prs/12971.json new file mode 100644 index 00000000000..fb6fe3789ff --- /dev/null +++ b/doc/sphinxext/prs/12971.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "aa9dcb646d71f5b6fbee02a6bba12dcbe0ebeb60", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12971.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/_eeglab.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12972.json b/doc/sphinxext/prs/12972.json new file mode 100644 index 00000000000..67025bb3dbd --- /dev/null +++ b/doc/sphinxext/prs/12972.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cf8047f80fe1c674eeb6ce6a0e494e568eb585fb", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12973.json b/doc/sphinxext/prs/12973.json new file mode 100644 index 00000000000..9585ba745a8 --- /dev/null +++ b/doc/sphinxext/prs/12973.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5a1f3a0966b3b941b7e70e4f6dc5df9bbf844f6e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12975.json b/doc/sphinxext/prs/12975.json new file mode 100644 index 00000000000..6a1532a6dd6 --- /dev/null +++ b/doc/sphinxext/prs/12975.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "eac719920b66ec854e818560854b6d9ea9fa5e80", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12976.json b/doc/sphinxext/prs/12976.json new file mode 100644 index 00000000000..5cb40dac7cf --- /dev/null +++ b/doc/sphinxext/prs/12976.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a99aa9f798d878ba144b3f6f3127311841d5d2b5", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/related_software.py": { + "a": 4, + "d": 4 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12978.json b/doc/sphinxext/prs/12978.json new file mode 100644 index 00000000000..26f750a1348 --- /dev/null +++ b/doc/sphinxext/prs/12978.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "149453c569645b5cf29103f1dc0e54e5286ce23d", + "authors": [ + { + "n": "Jacob", + "e": "jacob.phelan.jp@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12978.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12983.json b/doc/sphinxext/prs/12983.json new file mode 100644 index 00000000000..abb29eec03f --- /dev/null +++ b/doc/sphinxext/prs/12983.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f5d043a34e3dda3c83401a68d5722c1c664dc925", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/decoding/transformer.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12984.json b/doc/sphinxext/prs/12984.json new file mode 100644 index 00000000000..3d0d3692a9b --- /dev/null +++ b/doc/sphinxext/prs/12984.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "57076c91ff9e8d49e8119648c9ac38fa5f8b38d7", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/_fiff/reference.py": { + "a": 8, + "d": 11 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12986.json b/doc/sphinxext/prs/12986.json new file mode 100644 index 00000000000..1b6ef2f2116 --- /dev/null +++ b/doc/sphinxext/prs/12986.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "98f9949ce867a9247a14c508f51fadeb991d4618", + "authors": [ + { + "n": "ZIYI ZENG", + "e": "ziyizeng@link.cuhk.edu.cn" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12986.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12988.json b/doc/sphinxext/prs/12988.json new file mode 100644 index 00000000000..d9dd345caf4 --- /dev/null +++ b/doc/sphinxext/prs/12988.json @@ -0,0 +1,231 @@ +{ + "merge_commit_sha": "535b401eee7c8c1ce5ce79d99a754af8a374c0fc", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "doc/sphinxext/contrib_avatars.py": { + "a": 2, + "d": 1 + }, + "mne/_fiff/matrix.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 2, + "d": 2 + }, + "mne/_ola.py": { + "a": 3, + "d": 9 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 5 + }, + "mne/channels/layout.py": { + "a": 6, + "d": 11 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 10, + "d": 10 + }, + "mne/cov.py": { + "a": 28, + "d": 25 + }, + "mne/datasets/utils.py": { + "a": 5, + "d": 2 + }, + "mne/decoding/ssd.py": { + "a": 6, + "d": 8 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 6, + "d": 4 + }, + "mne/event.py": { + "a": 11, + "d": 13 + }, + "mne/filter.py": { + "a": 15, + "d": 9 + }, + "mne/forward/_field_interpolation.py": { + "a": 7, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 25, + "d": 23 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 32, + "d": 23 + }, + "mne/io/array/array.py": { + "a": 5, + "d": 7 + }, + "mne/io/base.py": { + "a": 8, + "d": 8 + }, + "mne/io/bti/bti.py": { + "a": 8, + "d": 9 + }, + "mne/io/ctf/ctf.py": { + "a": 13, + "d": 7 + }, + "mne/io/ctf/info.py": { + "a": 5, + "d": 5 + }, + "mne/io/egi/egimff.py": { + "a": 4, + "d": 4 + }, + "mne/io/egi/general.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 5, + "d": 7 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 14, + "d": 15 + }, + "mne/label.py": { + "a": 32, + "d": 31 + }, + "mne/minimum_norm/inverse.py": { + "a": 18, + "d": 25 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 5, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 18, + "d": 11 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 16 + }, + "mne/preprocessing/infomax_.py": { + "a": 5, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 21, + "d": 21 + }, + "mne/preprocessing/xdawn.py": { + "a": 4, + "d": 4 + }, + "mne/rank.py": { + "a": 10, + "d": 6 + }, + "mne/simulation/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 18, + "d": 21 + }, + "mne/source_space/_source_space.py": { + "a": 35, + "d": 24 + }, + "mne/stats/cluster_level.py": { + "a": 13, + "d": 7 + }, + "mne/surface.py": { + "a": 14, + "d": 11 + }, + "mne/tests/test_line_endings.py": { + "a": 2, + "d": 4 + }, + "mne/transforms.py": { + "a": 5, + "d": 3 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 3, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1299.json b/doc/sphinxext/prs/1299.json new file mode 100644 index 00000000000..6ffd2bdd256 --- /dev/null +++ b/doc/sphinxext/prs/1299.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "86a5d10f164856d7afe3a97e17bb8974d2668ece", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12991.json b/doc/sphinxext/prs/12991.json new file mode 100644 index 00000000000..e309a6df7c4 --- /dev/null +++ b/doc/sphinxext/prs/12991.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e06873d4d6e0a84bbd7d6b4a01ae4af340c7bedd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 2 + }, + "tools/install_pre_requirements.sh": { + "a": 6, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12995.json b/doc/sphinxext/prs/12995.json new file mode 100644 index 00000000000..48d967dc7eb --- /dev/null +++ b/doc/sphinxext/prs/12995.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d987c2793148eb855e04e3ae9d4871e486d01e46", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12996.json b/doc/sphinxext/prs/12996.json new file mode 100644 index 00000000000..b931ebcc541 --- /dev/null +++ b/doc/sphinxext/prs/12996.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "ec77e7c36ce4a2d7897122513395b0e2418ea151", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/ant/ant.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12997.json b/doc/sphinxext/prs/12997.json new file mode 100644 index 00000000000..7df9ddfa26b --- /dev/null +++ b/doc/sphinxext/prs/12997.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0e09163e80013a426d419bda0c560ff3d48209bb", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "tutorials/forward/35_eeg_no_mri.py": { + "a": 9, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12998.json b/doc/sphinxext/prs/12998.json new file mode 100644 index 00000000000..289620794a2 --- /dev/null +++ b/doc/sphinxext/prs/12998.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "096243fe43c936587190a8e9e8e86b155446800e", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12931.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12935.json": { + "a": 135, + "d": 0 + }, + "doc/sphinxext/prs/12936.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12937.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12938.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12941.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12942.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12947.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12948.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12951.json": { + "a": 59, + "d": 0 + }, + "doc/sphinxext/prs/12955.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/12957.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12958.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12960.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12962.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12966.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12967.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/12968.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12970.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12971.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12972.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12973.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12975.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12976.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12978.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12983.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12984.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12986.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12988.json": { + "a": 231, + "d": 0 + }, + "doc/sphinxext/prs/12991.json": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12999.json b/doc/sphinxext/prs/12999.json new file mode 100644 index 00000000000..4c71e401c33 --- /dev/null +++ b/doc/sphinxext/prs/12999.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ed2fd8da1e16e449cdfe779491542fecad6ecbcb", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/autofix.yml": { + "a": 1, + "d": 1 + }, + ".mailmap": { + "a": 3, + "d": 0 + }, + "doc/sphinxext/credit_tools.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13.json b/doc/sphinxext/prs/13.json new file mode 100644 index 00000000000..dd33542d934 --- /dev/null +++ b/doc/sphinxext/prs/13.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cedb560fe02feeaae4e35113b9305d43b0bb2a79", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 80, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/stats/multi_comp.py": { + "a": 98, + "d": 0 + }, + "mne/stats/tests/test_multi_comp.py": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13000.json b/doc/sphinxext/prs/13000.json new file mode 100644 index 00000000000..d813ea98560 --- /dev/null +++ b/doc/sphinxext/prs/13000.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "53792b12a2d60229ada3c946987a184f3915c535", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 0, + "d": 2 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13001.json b/doc/sphinxext/prs/13001.json new file mode 100644 index 00000000000..032c9a5714f --- /dev/null +++ b/doc/sphinxext/prs/13001.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b19ac58598f55975ca7f6458fa7bfa7d21e00ae0", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13003.json b/doc/sphinxext/prs/13003.json new file mode 100644 index 00000000000..cda225124fa --- /dev/null +++ b/doc/sphinxext/prs/13003.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "7071a0e24e121b28c851565f2e64a0128941e83a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/13003.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/_digitization.py": { + "a": 24, + "d": 5 + }, + "mne/_fiff/write.py": { + "a": 5, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 36, + "d": 12 + }, + "mne/channels/tests/test_montage.py": { + "a": 33, + "d": 14 + }, + "mne/viz/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13007.json b/doc/sphinxext/prs/13007.json new file mode 100644 index 00000000000..7553cfa048b --- /dev/null +++ b/doc/sphinxext/prs/13007.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d47c22fc2ba21d31b46ae7816eba054d5e13add9", + "authors": [ + { + "n": "Jacob Woessner", + "e": "Woessner.jacob@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13007.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 19, + "d": 3 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13009.json b/doc/sphinxext/prs/13009.json new file mode 100644 index 00000000000..ecf5894f489 --- /dev/null +++ b/doc/sphinxext/prs/13009.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "391fd88dca91bca78aeacc996fa284712c3ea33b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 22, + "d": 0 + }, + "examples/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1301.json b/doc/sphinxext/prs/1301.json new file mode 100644 index 00000000000..c67efbf240f --- /dev/null +++ b/doc/sphinxext/prs/1301.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6ae6fe0af7cc3e7621a559077039049f6dd7918a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 11, + "d": 23 + }, + "mne/evoked.py": { + "a": 16, + "d": 5 + }, + "mne/tests/test_viz.py": { + "a": 5, + "d": 2 + }, + "mne/viz.py": { + "a": 71, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13010.json b/doc/sphinxext/prs/13010.json new file mode 100644 index 00000000000..4f4b88a5406 --- /dev/null +++ b/doc/sphinxext/prs/13010.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4967ecd3f44968408046948b738977aa41521ea2", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/api/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.pyi": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13011.json b/doc/sphinxext/prs/13011.json new file mode 100644 index 00000000000..ab057c0e83c --- /dev/null +++ b/doc/sphinxext/prs/13011.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b329515933915fd077495ea41de876119ac04c97", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Thomas Grainger", + "e": "tagrain@gmail.com" + } + ], + "changes": { + ".github/workflows/autofix.yml": { + "a": 2, + "d": 0 + }, + ".github/workflows/codeql-analysis.yml": { + "a": 5, + "d": 3 + }, + ".github/workflows/credit.yml": { + "a": 6, + "d": 5 + }, + ".github/workflows/release.yml": { + "a": 2, + "d": 0 + }, + ".github/workflows/tests.yml": { + "a": 3, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 5, + "d": 0 + }, + "doc/changes/devel/13011.other.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13012.json b/doc/sphinxext/prs/13012.json new file mode 100644 index 00000000000..066940ab6f5 --- /dev/null +++ b/doc/sphinxext/prs/13012.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9b7b5596ff7c089939bca179b98f1ce0094cb668", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13012.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 14, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 5 + }, + "mne/viz/backends/_qt.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13015.json b/doc/sphinxext/prs/13015.json new file mode 100644 index 00000000000..5b137162759 --- /dev/null +++ b/doc/sphinxext/prs/13015.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "31436fecd881a0e6fb29b83b4f36764ae81dabc7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/automerge.yml": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13017.json b/doc/sphinxext/prs/13017.json new file mode 100644 index 00000000000..49b2ec0202b --- /dev/null +++ b/doc/sphinxext/prs/13017.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b3eb56cf1e8993940daa1df68d5220f3110ecdbb", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13018.json b/doc/sphinxext/prs/13018.json new file mode 100644 index 00000000000..ef9951d9902 --- /dev/null +++ b/doc/sphinxext/prs/13018.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "521d667f9802655a71166823fd890fbd00bae5a8", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13018.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/related_software.py": { + "a": 4, + "d": 0 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 65, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13019.json b/doc/sphinxext/prs/13019.json new file mode 100644 index 00000000000..0d6677670c2 --- /dev/null +++ b/doc/sphinxext/prs/13019.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4f1f4bbbc1a9d0f828e28de9be4e69c05f86d9f5", + "authors": [ + { + "n": "Santeri Ruuskanen", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13019.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/circle.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_circle.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13020.json b/doc/sphinxext/prs/13020.json new file mode 100644 index 00000000000..5513e4c6153 --- /dev/null +++ b/doc/sphinxext/prs/13020.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "730358c1e2e17baf0491c04e0c8269382a29c613", + "authors": [ + { + "n": "Santeri Ruuskanen", + "e": null + } + ], + "changes": { + "doc/development/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13021.json b/doc/sphinxext/prs/13021.json new file mode 100644 index 00000000000..1d9a78d45cc --- /dev/null +++ b/doc/sphinxext/prs/13021.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "bd4a160215be67e2de1df7e0a86e27425b074807", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 3, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 1 + }, + "doc/changes/devel/13021.dependency.rst": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 33, + "d": 22 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_docs.py": { + "a": 16, + "d": 15 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 0 + }, + "tools/github_actions_env_vars.sh": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13028.json b/doc/sphinxext/prs/13028.json new file mode 100644 index 00000000000..7a162141000 --- /dev/null +++ b/doc/sphinxext/prs/13028.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d8149546e7010ee7d2b5a95335aeed0e9b202aaa", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/13028.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 16, + "d": 12 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13029.json b/doc/sphinxext/prs/13029.json new file mode 100644 index 00000000000..33a84c3f2c6 --- /dev/null +++ b/doc/sphinxext/prs/13029.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "f3a7fde522d69bc5bbc15844718812c7ab6480f4", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12896.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12995.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12996.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/12997.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12998.json": { + "a": 131, + "d": 0 + }, + "doc/sphinxext/prs/12999.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13000.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13001.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13003.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/13007.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13009.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13010.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13011.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/13012.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13015.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13017.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13018.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13019.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13020.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13021.json": { + "a": 59, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13031.json b/doc/sphinxext/prs/13031.json new file mode 100644 index 00000000000..428346e1b90 --- /dev/null +++ b/doc/sphinxext/prs/13031.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5b06ca4bb8f9138bf4af85ea3171d95df07462c5", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/credit.yml": { + "a": 2, + "d": 2 + }, + "doc/documentation/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/sphinxext/prs/13029.json": { + "a": 91, + "d": 0 + }, + "doc/sphinxext/prs/6915.json": { + "a": 43, + "d": 0 + }, + "tools/dev/update_credit_json.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13032.json b/doc/sphinxext/prs/13032.json new file mode 100644 index 00000000000..041aa7dfe1b --- /dev/null +++ b/doc/sphinxext/prs/13032.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b38385ef90d0fd8214d54b15c5fd91333c3bc032", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13035.json b/doc/sphinxext/prs/13035.json new file mode 100644 index 00000000000..e9cf38b09c9 --- /dev/null +++ b/doc/sphinxext/prs/13035.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "dcd26258c5fd83fd3974d73ba2b1e2773c33bc3d", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/io/ant/ant.py": { + "a": 14, + "d": 12 + }, + "mne/io/ant/tests/test_ant.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 23, + "d": 24 + }, + "mne/utils/tests/test_check.py": { + "a": 7, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 6 + }, + "tutorials/intro/70_report.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13036.json b/doc/sphinxext/prs/13036.json new file mode 100644 index 00000000000..6ce98904756 --- /dev/null +++ b/doc/sphinxext/prs/13036.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "41dbdd55eaff77314440ebc8700e0e58b1183113", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13036.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 26, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13037.json b/doc/sphinxext/prs/13037.json new file mode 100644 index 00000000000..7de2d953e82 --- /dev/null +++ b/doc/sphinxext/prs/13037.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "4f53a3732917dd1dbc91d4725ae79fc1c7ad4661", + "authors": [ + { + "n": "Steinn Hauser Magnússon", + "e": null + }, + { + "n": "Emma Bailey", + "e": "bailey@cbs.mpg.de" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Steinn Magnusson", + "e": "s.magnusson@senec.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "emma-bailey", + "e": "93327939+emma-bailey@users.noreply.github.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 8, + "d": 1 + }, + "doc/api/datasets.rst": { + "a": 1, + "d": 0 + }, + "doc/api/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13037.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "examples/preprocessing/esg_rm_heart_artefact_pcaobs.py": { + "a": 196, + "d": 0 + }, + "mne/datasets/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 29, + "d": 1 + }, + "mne/preprocessing/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/_pca_obs.py": { + "a": 333, + "d": 0 + }, + "mne/preprocessing/tests/test_pca_obs.py": { + "a": 107, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13038.json b/doc/sphinxext/prs/13038.json new file mode 100644 index 00000000000..c988969d1dd --- /dev/null +++ b/doc/sphinxext/prs/13038.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "14938b9657b255a38aa96482a4aaf410e8865859", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 2 + }, + "doc/sphinxext/prs/13031.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13032.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13035.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/13036.json": { + "a": 27, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 3, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 0, + "d": 1 + }, + "tools/hooks/update_environment_file.py": { + "a": 0, + "d": 3 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13040.json b/doc/sphinxext/prs/13040.json new file mode 100644 index 00000000000..97a70ab4a32 --- /dev/null +++ b/doc/sphinxext/prs/13040.json @@ -0,0 +1,171 @@ +{ + "merge_commit_sha": "637c231f40d8e6e022ab3ae04fa30911cbe0f78f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 4, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_flash_bem.py": { + "a": 0, + "d": 20 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 2 + }, + "mne/conftest.py": { + "a": 5, + "d": 14 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 3, + "d": 23 + }, + "mne/datasets/eegbci/tests/test_eegbci.py": { + "a": 0, + "d": 12 + }, + "mne/decoding/csp.py": { + "a": 0, + "d": 13 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_base.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 2, + "d": 6 + }, + "mne/io/base.py": { + "a": 1, + "d": 18 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 0, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 0, + "d": 8 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 0, + "d": 16 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 0, + "d": 50 + }, + "mne/utils/linalg.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 5 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 3 + }, + "mne/viz/backends/_qt.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 0, + "d": 44 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 0, + "d": 2 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 3, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/montage.py": { + "a": 4, + "d": 20 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 5 + }, + "pyproject.toml": { + "a": 4, + "d": 4 + }, + "tools/dev/Makefile": { + "a": 3, + "d": 0 + }, + "tools/environment_old.yml": { + "a": 10, + "d": 9 + }, + "tools/hooks/sync_dependencies.py": { + "a": 11, + "d": 1 + }, + "tools/hooks/update_environment_file.py": { + "a": 37, + "d": 7 + }, + "tools/vulture_allowlist.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13041.json b/doc/sphinxext/prs/13041.json new file mode 100644 index 00000000000..26790d0e352 --- /dev/null +++ b/doc/sphinxext/prs/13041.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "90d9c91fe5d46e0390cb22c943fd6e0dbb578838", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/_acquisition.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/_frequencies.html.jinja": { + "a": 62, + "d": 0 + }, + "mne/html_templates/repr/spectrum.html.jinja": { + "a": 9, + "d": 48 + }, + "mne/time_frequency/spectrum.py": { + "a": 3, + "d": 1 + }, + "tools/hooks/sync_dependencies.py": { + "a": 1, + "d": 2 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13042.json b/doc/sphinxext/prs/13042.json new file mode 100644 index 00000000000..5f2d05f7013 --- /dev/null +++ b/doc/sphinxext/prs/13042.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ee2d0caa98b23fe40bcddea2a666b6803f64c3dd", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13042.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13043.json b/doc/sphinxext/prs/13043.json new file mode 100644 index 00000000000..a67a958fa80 --- /dev/null +++ b/doc/sphinxext/prs/13043.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a51e32a9341bd1b44183356f6116af0821546fda", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13044.json b/doc/sphinxext/prs/13044.json new file mode 100644 index 00000000000..0619fccfb98 --- /dev/null +++ b/doc/sphinxext/prs/13044.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "7b316cb500a5039a9e79ada02f117f650801d17b", + "authors": [ + { + "n": "Antoine Collas", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13044.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 8, + "d": 0 + }, + "examples/preprocessing/interpolate_to.py": { + "a": 81, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 157, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 83, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13045.json b/doc/sphinxext/prs/13045.json new file mode 100644 index 00000000000..cb3db89fefd --- /dev/null +++ b/doc/sphinxext/prs/13045.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fd8c1eed5209393e6a2527342b61354efcb813ee", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/13028.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13038.json": { + "a": 63, + "d": 0 + }, + "doc/sphinxext/prs/13040.json": { + "a": 171, + "d": 0 + }, + "doc/sphinxext/prs/13041.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/13042.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13043.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13046.json b/doc/sphinxext/prs/13046.json new file mode 100644 index 00000000000..f946cb5a90a --- /dev/null +++ b/doc/sphinxext/prs/13046.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d81b7a66b692cfc3065ba260f4702f842d45e414", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/v1.7.rst": { + "a": 4, + "d": 4 + }, + "doc/changes/v1.9.rst": { + "a": 4, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13047.json b/doc/sphinxext/prs/13047.json new file mode 100644 index 00000000000..9267f7fada0 --- /dev/null +++ b/doc/sphinxext/prs/13047.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "47ea36043076e4e38896eda6241c6cb0d4c25938", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13048.json b/doc/sphinxext/prs/13048.json new file mode 100644 index 00000000000..a111abed437 --- /dev/null +++ b/doc/sphinxext/prs/13048.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6bc7dfb3535ef954b7a6fcbad93887abe2aa7c92", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13048.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked_field.py": { + "a": 30, + "d": 39 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13049.json b/doc/sphinxext/prs/13049.json new file mode 100644 index 00000000000..f66fcc1b618 --- /dev/null +++ b/doc/sphinxext/prs/13049.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "df350211b9177a26a2c5fe1e76ebc8e9d6cb4d99", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 9, + "d": 11 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "doc/development/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 6, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 5, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 4, + "d": 4 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 13, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/transforms.py": { + "a": 2, + "d": 3 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/hooks/update_environment_file.py": { + "a": 8, + "d": 38 + }, + "tools/install_pre_requirements.sh": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13051.json b/doc/sphinxext/prs/13051.json new file mode 100644 index 00000000000..8a9acb2b286 --- /dev/null +++ b/doc/sphinxext/prs/13051.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5fec4e024a963c3f628693ab172d5b77cbafe6db", + "authors": [ + { + "n": "Simon Kern", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13053.json b/doc/sphinxext/prs/13053.json new file mode 100644 index 00000000000..fc85ba79c3d --- /dev/null +++ b/doc/sphinxext/prs/13053.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "dedb3921b0ebadcc5a630234530604706c6faddd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 7, + "d": 6 + }, + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 10, + "d": 13 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 54, + "d": 3 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_parallel.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 8 + }, + "mne/viz/tests/test_evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 4 + }, + "pyproject.toml": { + "a": 0, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 0 + }, + "tools/get_minimal_commands.sh": { + "a": 5, + "d": 5 + }, + "tools/get_testing_version.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_env_vars.sh": { + "a": 16, + "d": 12 + }, + "tools/github_actions_test.sh": { + "a": 15, + "d": 9 + }, + "tools/hooks/update_environment_file.py": { + "a": 3, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13054.json b/doc/sphinxext/prs/13054.json new file mode 100644 index 00000000000..3d8a7b9f99f --- /dev/null +++ b/doc/sphinxext/prs/13054.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2ae61edccb2af5b5f9f3a89a3131499b5c229c27", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/api/time_frequency.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13054.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13056.json b/doc/sphinxext/prs/13056.json new file mode 100644 index 00000000000..94aa0bd39b6 --- /dev/null +++ b/doc/sphinxext/prs/13056.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2abb7b220ed2580e141158499919300cfa1f6a3b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13056.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 11, + "d": 4 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 30, + "d": 18 + }, + "mne/_fiff/write.py": { + "a": 5, + "d": 4 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13058.json b/doc/sphinxext/prs/13058.json new file mode 100644 index 00000000000..ef7ec02786e --- /dev/null +++ b/doc/sphinxext/prs/13058.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f82d3993617d2a34744eb955385448c67672d6ec", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/api/time_frequency.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13058.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 68, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 54, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 6 + }, + "mne/utils/numerics.py": { + "a": 33, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1306.json b/doc/sphinxext/prs/1306.json new file mode 100644 index 00000000000..305057a5186 --- /dev/null +++ b/doc/sphinxext/prs/1306.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f848f2cb86b0d20c4256a388622654a8addb526c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/channels.py": { + "a": 81, + "d": 0 + }, + "mne/io/tests/test_channels.py": { + "a": 45, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13060.json b/doc/sphinxext/prs/13060.json new file mode 100644 index 00000000000..67c9ee55115 --- /dev/null +++ b/doc/sphinxext/prs/13060.json @@ -0,0 +1,423 @@ +{ + "merge_commit_sha": "d472c268cb39fb6e4bf0dad24c802b17efdd4a33", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/credit_tools.py": { + "a": 4, + "d": 4 + }, + "doc/sphinxext/related_software.py": { + "a": 3, + "d": 3 + }, + "doc/sphinxext/unit_role.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/_digitization.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/meas_info.py": { + "a": 3, + "d": 4 + }, + "mne/_fiff/proj.py": { + "a": 7, + "d": 9 + }, + "mne/_fiff/reference.py": { + "a": 3, + "d": 3 + }, + "mne/_fiff/tag.py": { + "a": 1, + "d": 2 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_pick.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 5, + "d": 5 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 6, + "d": 8 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 6 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 3, + "d": 6 + }, + "mne/cov.py": { + "a": 5, + "d": 5 + }, + "mne/datasets/_fetch.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/config.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 1, + "d": 4 + }, + "mne/epochs.py": { + "a": 6, + "d": 10 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 5, + "d": 7 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 1 + }, + "mne/export/_export.py": { + "a": 1, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 3 + }, + "mne/forward/_field_interpolation.py": { + "a": 2, + "d": 3 + }, + "mne/forward/_make_forward.py": { + "a": 5, + "d": 6 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 7 + }, + "mne/gui/_coreg.py": { + "a": 2, + "d": 4 + }, + "mne/html_templates/_templates.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/_array.py": { + "a": 0, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 3, + "d": 3 + }, + "mne/io/base.py": { + "a": 2, + "d": 3 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 1, + "d": 1 + }, + "mne/io/fil/tests/test_fil.py": { + "a": 9, + "d": 9 + }, + "mne/io/neuralynx/tests/test_neuralynx.py": { + "a": 6, + "d": 6 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 5, + "d": 9 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 2 + }, + "mne/morph.py": { + "a": 3, + "d": 6 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/hfc.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 10 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 2 + }, + "mne/report/report.py": { + "a": 7, + "d": 10 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 5 + }, + "mne/source_space/_source_space.py": { + "a": 4, + "d": 6 + }, + "mne/surface.py": { + "a": 8, + "d": 9 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_filter.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/_stft.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/spectrum.py": { + "a": 9, + "d": 9 + }, + "mne/time_frequency/tfr.py": { + "a": 11, + "d": 12 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 5, + "d": 8 + }, + "mne/utils/config.py": { + "a": 6, + "d": 8 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 6 + }, + "mne/viz/_proj.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 9 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tools/dev/ensure_headers.py": { + "a": 6, + "d": 6 + }, + "tools/hooks/update_environment_file.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 2, + "d": 2 + }, + "tutorials/forward/30_forward.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/15_inplace.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 2 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13061.json b/doc/sphinxext/prs/13061.json new file mode 100644 index 00000000000..62ddd1e87e9 --- /dev/null +++ b/doc/sphinxext/prs/13061.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9ef5c23927e377d0a66169185f394297ea29d7b4", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13056.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 11, + "d": 4 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 30, + "d": 18 + }, + "mne/_fiff/write.py": { + "a": 5, + "d": 4 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13062.json b/doc/sphinxext/prs/13062.json new file mode 100644 index 00000000000..4160927b960 --- /dev/null +++ b/doc/sphinxext/prs/13062.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c0da91db9098b92ec3c20d8c0e237d0e02683865", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/13062.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 9, + "d": 6 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13063.json b/doc/sphinxext/prs/13063.json new file mode 100644 index 00000000000..77684e67814 --- /dev/null +++ b/doc/sphinxext/prs/13063.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "087779c3bd5ba84dbcef7f3689a7d70f0b045da7", + "authors": [ + { + "n": "Santeri Ruuskanen", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13063.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/evoked_topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 11, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 24, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13064.json b/doc/sphinxext/prs/13064.json new file mode 100644 index 00000000000..c18116b1cd1 --- /dev/null +++ b/doc/sphinxext/prs/13064.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "672bdf4357f815c63dfab91d9c8e257266bceb21", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/13062.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 9, + "d": 6 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13065.json b/doc/sphinxext/prs/13065.json new file mode 100644 index 00000000000..071c98cd26f --- /dev/null +++ b/doc/sphinxext/prs/13065.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "5f2b7f1a33d42c5a110f67e098f9efcf92be7fff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/13065.bugfix.rst": { + "a": 7, + "d": 0 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 8, + "d": 3 + }, + "mne/decoding/csp.py": { + "a": 43, + "d": 56 + }, + "mne/decoding/ems.py": { + "a": 19, + "d": 6 + }, + "mne/decoding/search_light.py": { + "a": 56, + "d": 25 + }, + "mne/decoding/ssd.py": { + "a": 75, + "d": 63 + }, + "mne/decoding/tests/test_base.py": { + "a": 10, + "d": 4 + }, + "mne/decoding/tests/test_csp.py": { + "a": 31, + "d": 15 + }, + "mne/decoding/tests/test_ems.py": { + "a": 7, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 15, + "d": 15 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 44, + "d": 10 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 17, + "d": 6 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 71, + "d": 17 + }, + "mne/decoding/time_frequency.py": { + "a": 23, + "d": 9 + }, + "mne/decoding/transformer.py": { + "a": 127, + "d": 113 + }, + "mne/time_frequency/multitaper.py": { + "a": 8, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 2 + }, + "tools/vulture_allowlist.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13067.json b/doc/sphinxext/prs/13067.json new file mode 100644 index 00000000000..e9a5c0338f2 --- /dev/null +++ b/doc/sphinxext/prs/13067.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8b9fc973e0bdaca9a5ba0c9333637722ed323633", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13067.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 12, + "d": 7 + }, + "mne/time_frequency/tfr.py": { + "a": 26, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13069.json b/doc/sphinxext/prs/13069.json new file mode 100644 index 00000000000..05319605699 --- /dev/null +++ b/doc/sphinxext/prs/13069.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bd8c318537ffcabf4c5fadd4347ec5068bb91b67", + "authors": [ + { + "n": "Simon Kern", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13069.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 6, + "d": 3 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1307.json b/doc/sphinxext/prs/1307.json new file mode 100644 index 00000000000..f89e4d228df --- /dev/null +++ b/doc/sphinxext/prs/1307.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "22b49010f1e479b3761f09483ae01e3830ee0fd0", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 9, + "d": 4 + }, + "mne/epochs.py": { + "a": 8, + "d": 3 + }, + "mne/event.py": { + "a": 7, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 11, + "d": 7 + }, + "mne/forward/tests/test_forward.py": { + "a": 11, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 8, + "d": 6 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 8, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 5, + "d": 5 + }, + "mne/io/bti/tests/data/exported4D_linux_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/exported4D_solaris_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/cov.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/evoked.py": { + "a": 7, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 7, + "d": 2 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 11, + "d": 3 + }, + "mne/io/kit/tests/data/test_bin_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 4, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/data/test-1-eve.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-proj.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-proj.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_compensator.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_evoked.py": { + "a": 24, + "d": 16 + }, + "mne/layouts/tests/test_layout.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 13, + "d": 9 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 9, + "d": 4 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 18, + "d": 9 + }, + "mne/proj.py": { + "a": 11, + "d": 5 + }, + "mne/source_space.py": { + "a": 14, + "d": 7 + }, + "mne/tests/test_cov.py": { + "a": 18, + "d": 9 + }, + "mne/tests/test_epochs.py": { + "a": 22, + "d": 10 + }, + "mne/tests/test_event.py": { + "a": 22, + "d": 11 + }, + "mne/tests/test_label.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_proj.py": { + "a": 26, + "d": 8 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 17, + "d": 4 + }, + "mne/tests/test_surface.py": { + "a": 3, + "d": 2 + }, + "mne/transforms.py": { + "a": 3, + "d": 5 + }, + "mne/utils.py": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13070.json b/doc/sphinxext/prs/13070.json new file mode 100644 index 00000000000..635bf15d76c --- /dev/null +++ b/doc/sphinxext/prs/13070.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7daeceef4a2b80f4d849ec55a72a6450020c8c0c", + "authors": [ + { + "n": "Roy Eric", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/13070.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 4, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13071.json b/doc/sphinxext/prs/13071.json new file mode 100644 index 00000000000..7efed2633d3 --- /dev/null +++ b/doc/sphinxext/prs/13071.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "27386d7bc8240500efcfc618e2fa57f0bcea1ace", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/autofix.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13072.json b/doc/sphinxext/prs/13072.json new file mode 100644 index 00000000000..2eda6ba7134 --- /dev/null +++ b/doc/sphinxext/prs/13072.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "96d22f87acb631c7cf04f5fcf0462f0956ba6f88", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13067.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_export.py": { + "a": 9, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 0, + "d": 16 + }, + "mne/time_frequency/tfr.py": { + "a": 33, + "d": 19 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13073.json b/doc/sphinxext/prs/13073.json new file mode 100644 index 00000000000..adf94fea4c7 --- /dev/null +++ b/doc/sphinxext/prs/13073.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "99e985845759005c2d809c705241918589aa2a0e", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/check_changelog.yml": { + "a": 3, + "d": 0 + }, + ".github/workflows/circle_artifacts.yml": { + "a": 3, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13075.json b/doc/sphinxext/prs/13075.json new file mode 100644 index 00000000000..94c1ec32100 --- /dev/null +++ b/doc/sphinxext/prs/13075.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "f97a916bc79942df1cc5578ed98cddbcf1aef907", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12656.bugfix.rst": { + "a": 1, + "d": 1 + }, + "mne/export/_export.py": { + "a": 5, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 5 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13076.json b/doc/sphinxext/prs/13076.json new file mode 100644 index 00000000000..bf05a88b721 --- /dev/null +++ b/doc/sphinxext/prs/13076.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6028982a3e34bf843d4694f60565a0fbb821ed2e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/related_software.py": { + "a": 16, + "d": 8 + }, + "mne/viz/backends/_pyvista.py": { + "a": 0, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 5, + "d": 5 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13077.json b/doc/sphinxext/prs/13077.json new file mode 100644 index 00000000000..5ba2d52db90 --- /dev/null +++ b/doc/sphinxext/prs/13077.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4dc9fe44df5c0367ef9d250b18214832c90196fb", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/related_software.py": { + "a": 16, + "d": 8 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 5, + "d": 5 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13080.json b/doc/sphinxext/prs/13080.json new file mode 100644 index 00000000000..f85b2afaea5 --- /dev/null +++ b/doc/sphinxext/prs/13080.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "2119b22dbe5dba4ecbec06e1902be08c1b765371", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/13080.apichange.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13080.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_ola.py": { + "a": 39, + "d": 24 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 2, + "d": 10 + }, + "mne/preprocessing/maxwell.py": { + "a": 351, + "d": 236 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 185, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_ola.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13081.json b/doc/sphinxext/prs/13081.json new file mode 100644 index 00000000000..2d430f5a953 --- /dev/null +++ b/doc/sphinxext/prs/13081.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d596b6ddedac0680da889c5305ab4ab5d7626743", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Roy Eric", + "e": "139973278+Randomidous@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13070.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 4, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13082.json b/doc/sphinxext/prs/13082.json new file mode 100644 index 00000000000..1eb2d448059 --- /dev/null +++ b/doc/sphinxext/prs/13082.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3db12ff5357d4d6666f3d2257e91cee877e83234", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13082.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/movement_detection.py": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13083.json b/doc/sphinxext/prs/13083.json new file mode 100644 index 00000000000..2eaa9d71258 --- /dev/null +++ b/doc/sphinxext/prs/13083.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "77195a2a5ff42fa0e193f889e712b279b6c65fd6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13083.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/_digitization.py": { + "a": 3, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 4, + "d": 22 + }, + "mne/_fiff/open.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tag.py": { + "a": 17, + "d": 7 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13084.json b/doc/sphinxext/prs/13084.json new file mode 100644 index 00000000000..c08d21af74b --- /dev/null +++ b/doc/sphinxext/prs/13084.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d3d0bf520624a4eea2e8d34a927284c4bb19b87a", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13082.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/movement_detection.py": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13086.json b/doc/sphinxext/prs/13086.json new file mode 100644 index 00000000000..261d6a9a0e3 --- /dev/null +++ b/doc/sphinxext/prs/13086.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "631ddb3e9da67456947e23c6a070aa869d853a26", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/_mpl_figure.py": { + "a": 2, + "d": 2 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13087.json b/doc/sphinxext/prs/13087.json new file mode 100644 index 00000000000..1606b92a8c4 --- /dev/null +++ b/doc/sphinxext/prs/13087.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4037ead8fe9ec27d7342263c574b99a7bc537104", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13088.json b/doc/sphinxext/prs/13088.json new file mode 100644 index 00000000000..f23c44206b3 --- /dev/null +++ b/doc/sphinxext/prs/13088.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "45fb777fbc53c88888032d40a14940c985079a93", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "mne/_fiff/proj.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13089.json b/doc/sphinxext/prs/13089.json new file mode 100644 index 00000000000..b0ad8b2fce2 --- /dev/null +++ b/doc/sphinxext/prs/13089.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "715540a823ae5dec335bee0b2499f1f7183c19c4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 4, + "d": 5 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13093.json b/doc/sphinxext/prs/13093.json new file mode 100644 index 00000000000..548ee491766 --- /dev/null +++ b/doc/sphinxext/prs/13093.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "24ef9aa2c618217c6532613abe318f4d5f65f5e3", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 5, + "d": 1 + }, + "doc/sphinxext/credit_tools.py": { + "a": 16, + "d": 16 + }, + "doc/sphinxext/prs/12071.json": { + "a": 67, + "d": 0 + }, + "doc/sphinxext/prs/12656.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12828.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/12910.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/13037.json": { + "a": 107, + "d": 0 + }, + "doc/sphinxext/prs/13045.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13046.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13047.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13048.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13049.json": { + "a": 87, + "d": 0 + }, + "doc/sphinxext/prs/13051.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13053.json": { + "a": 87, + "d": 0 + }, + "doc/sphinxext/prs/13054.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13056.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13058.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/13060.json": { + "a": 423, + "d": 0 + }, + "doc/sphinxext/prs/13061.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13062.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13063.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13064.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13065.json": { + "a": 103, + "d": 0 + }, + "doc/sphinxext/prs/13067.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13069.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13070.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13071.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13072.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13073.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13075.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13076.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13077.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13081.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13082.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13084.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13086.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13087.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13088.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13089.json": { + "a": 23, + "d": 0 + }, + "examples/io/read_xdf.py": { + "a": 1, + "d": 2 + }, + "tools/install_pre_requirements.sh": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13096.json b/doc/sphinxext/prs/13096.json new file mode 100644 index 00000000000..6b04f0c57cd --- /dev/null +++ b/doc/sphinxext/prs/13096.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "c4de9b61cd5f40a8771a853d483621c735390ec2", + "authors": [ + { + "n": "Young Truong", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Bru", + "e": "b.aristimunha@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13096.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/_eeglab.py": { + "a": 195, + "d": 7 + }, + "mne/io/eeglab/eeglab.py": { + "a": 63, + "d": 23 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 143, + "d": 2 + }, + "tools/azure_dependencies.sh": { + "a": 3, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 4, + "d": 0 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13097.json b/doc/sphinxext/prs/13097.json new file mode 100644 index 00000000000..e9b51d22673 --- /dev/null +++ b/doc/sphinxext/prs/13097.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9e7fe95f99016709dcad50c9494ebce4323e4cfd", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13097.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13099.json b/doc/sphinxext/prs/13099.json new file mode 100644 index 00000000000..18c7dc27787 --- /dev/null +++ b/doc/sphinxext/prs/13099.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "21a185fe8c680da6aced6e6ba06309a140082842", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13100.json b/doc/sphinxext/prs/13100.json new file mode 100644 index 00000000000..8093e0a90e9 --- /dev/null +++ b/doc/sphinxext/prs/13100.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e4cc4e27106774455347b5d95d8b7b58953af10b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/13100.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 154, + "d": 154 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 0, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13101.json b/doc/sphinxext/prs/13101.json new file mode 100644 index 00000000000..00058e14413 --- /dev/null +++ b/doc/sphinxext/prs/13101.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "afd69afe9ed9b07d48e860537d37fc349def6635", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13101.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 7, + "d": 3 + }, + "mne/viz/evoked_field.py": { + "a": 5, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 15, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13104.json b/doc/sphinxext/prs/13104.json new file mode 100644 index 00000000000..8433d97ace3 --- /dev/null +++ b/doc/sphinxext/prs/13104.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8eaa521270c17096586e45211f4df8a9f6bb57be", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13106.json b/doc/sphinxext/prs/13106.json new file mode 100644 index 00000000000..1684477fe4e --- /dev/null +++ b/doc/sphinxext/prs/13106.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "47b036953128b1fdc8e7d9631a89db9b296dce73", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/related_software.py": { + "a": 4, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13107.json b/doc/sphinxext/prs/13107.json new file mode 100644 index 00000000000..32286c0624a --- /dev/null +++ b/doc/sphinxext/prs/13107.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "64ed25561841e6d91413564222e498c44244ad94", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13107.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 11, + "d": 2 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13110.json b/doc/sphinxext/prs/13110.json new file mode 100644 index 00000000000..58cb8661ea4 --- /dev/null +++ b/doc/sphinxext/prs/13110.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cf5ef5fea4412dcc4b8f7c79c14b0faf9342efb1", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13112.json b/doc/sphinxext/prs/13112.json new file mode 100644 index 00000000000..faf066b8280 --- /dev/null +++ b/doc/sphinxext/prs/13112.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "fa11865cd994405906ca270ba3e78144676beb3f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13112.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 1, + "d": 3 + }, + "mne/chpi.py": { + "a": 3, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 0, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 0 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13113.json b/doc/sphinxext/prs/13113.json new file mode 100644 index 00000000000..c599bda3a09 --- /dev/null +++ b/doc/sphinxext/prs/13113.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b4a7c64e5b36747cfd3621e7a043ddd839db4e96", + "authors": [ + { + "n": "GreasyCat", + "e": null + }, + { + "n": "Scott Huberty", + "e": "52462026+scott-huberty@users.noreply.github.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13113.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13114.json b/doc/sphinxext/prs/13114.json new file mode 100644 index 00000000000..4439ce7eaf0 --- /dev/null +++ b/doc/sphinxext/prs/13114.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "64e01e54a3e149460c86a082ecb913993cd93dc6", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13083.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/_digitization.py": { + "a": 3, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 4, + "d": 22 + }, + "mne/_fiff/open.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tag.py": { + "a": 17, + "d": 7 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13116.json b/doc/sphinxext/prs/13116.json new file mode 100644 index 00000000000..e96e4ad3f5b --- /dev/null +++ b/doc/sphinxext/prs/13116.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4c98f8ae5524fea24968fb1aa2bf2028886d5934", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/development/governance.rst": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13120.json b/doc/sphinxext/prs/13120.json new file mode 100644 index 00000000000..7f237da3a38 --- /dev/null +++ b/doc/sphinxext/prs/13120.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bd0da9bcb4905ac3ad1276df346e91254db7df90", + "authors": [ + { + "n": "Yixiao Shen", + "e": null + } + ], + "changes": { + "doc/changes/devel/13184.bugfix.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13123.json b/doc/sphinxext/prs/13123.json new file mode 100644 index 00000000000..41e7a014fdb --- /dev/null +++ b/doc/sphinxext/prs/13123.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c0f166596edfc66e9ad15f7891048d27de08b803", + "authors": [ + { + "n": "Shresth Keshari", + "e": "kesharishresth5@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13123.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13125.json b/doc/sphinxext/prs/13125.json new file mode 100644 index 00000000000..30e00acbbd6 --- /dev/null +++ b/doc/sphinxext/prs/13125.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a79809d018d4e261b39e5eed27402528befc9ac8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13126.json b/doc/sphinxext/prs/13126.json new file mode 100644 index 00000000000..9034f95069f --- /dev/null +++ b/doc/sphinxext/prs/13126.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c570cfccee22f9869ae1f40c0ce5e8046433f083", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13129.json b/doc/sphinxext/prs/13129.json new file mode 100644 index 00000000000..cb83ac45521 --- /dev/null +++ b/doc/sphinxext/prs/13129.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "11df2fabe0a5f53e450a73aefd307e8418c55d9b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_fiff/_digitization.py": { + "a": 2, + "d": 1 + }, + "mne/_fiff/constants.py": { + "a": 16, + "d": 4 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 35, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 26, + "d": 13 + }, + "mne/transforms.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13132.json b/doc/sphinxext/prs/13132.json new file mode 100644 index 00000000000..e0ae49e4093 --- /dev/null +++ b/doc/sphinxext/prs/13132.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "71669461372359ec238f9dfa8aff5499938b690e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/preprocessing/movement_compensation.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/otp.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 6, + "d": 1 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13134.json b/doc/sphinxext/prs/13134.json new file mode 100644 index 00000000000..81cde97ba38 --- /dev/null +++ b/doc/sphinxext/prs/13134.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2587cda68e3f1795a8aa1c05084bf503ad91078c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "pyproject.toml": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13135.json b/doc/sphinxext/prs/13135.json new file mode 100644 index 00000000000..73dd4c36e8a --- /dev/null +++ b/doc/sphinxext/prs/13135.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "4cce83f50a1086dd3ff5642f45f9e971ec858fa2", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/credit_tools.py": { + "a": 2, + "d": 1 + }, + "doc/sphinxext/prs/13044.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/13080.json": { + "a": 63, + "d": 0 + }, + "doc/sphinxext/prs/13083.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13093.json": { + "a": 187, + "d": 0 + }, + "doc/sphinxext/prs/13097.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13099.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13100.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13101.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13104.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13106.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13107.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13110.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13113.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13114.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13116.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13123.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13125.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13126.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13129.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13132.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13134.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13140.json b/doc/sphinxext/prs/13140.json new file mode 100644 index 00000000000..9ec2dba13cd --- /dev/null +++ b/doc/sphinxext/prs/13140.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1fe211a7b70d30cb1bb0c8eaa195469c1df2616f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 27, + "d": 106 + }, + "mne/event.py": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 9, + "d": 2 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 4, + "d": 0 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13141.json b/doc/sphinxext/prs/13141.json new file mode 100644 index 00000000000..daa2b9f34c9 --- /dev/null +++ b/doc/sphinxext/prs/13141.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "266ccc9540f159609976fc6e1b2259c93923b2e3", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13143.json b/doc/sphinxext/prs/13143.json new file mode 100644 index 00000000000..5eb11ca288f --- /dev/null +++ b/doc/sphinxext/prs/13143.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "73c4a7cb844a13377ce68f31d7d5dec822067986", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/forward.rst": { + "a": 6, + "d": 6 + }, + "doc/documentation/cookbook.rst": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13144.json b/doc/sphinxext/prs/13144.json new file mode 100644 index 00000000000..13efae843c8 --- /dev/null +++ b/doc/sphinxext/prs/13144.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "61ebcf9e40302b16384c999aa89143407f7c80b1", + "authors": [ + { + "n": "Harrison Ritz", + "e": "hritz@princeton.edu" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13144.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 41, + "d": 4 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 18, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 15, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 102, + "d": 26 + }, + "tutorials/preprocessing/80_opm_processing.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13145.json b/doc/sphinxext/prs/13145.json new file mode 100644 index 00000000000..6fe7f24cf50 --- /dev/null +++ b/doc/sphinxext/prs/13145.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a5073bf977b73e9981a2222cc546c93081914a7d", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel/13145.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/egi/events.py": { + "a": 9, + "d": 3 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13146.json b/doc/sphinxext/prs/13146.json new file mode 100644 index 00000000000..47a2c5b2b99 --- /dev/null +++ b/doc/sphinxext/prs/13146.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e7a593add4bbda513cff94f8ba7a1166919dc376", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/tests/test_abstract.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13147.json b/doc/sphinxext/prs/13147.json new file mode 100644 index 00000000000..cea5666ae4c --- /dev/null +++ b/doc/sphinxext/prs/13147.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "be27cf8dd32922ba6e15599f7746da635e9856c0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/parallel.py": { + "a": 15, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13148.json b/doc/sphinxext/prs/13148.json new file mode 100644 index 00000000000..d61b8ec3daf --- /dev/null +++ b/doc/sphinxext/prs/13148.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7c531e49aa46083efd507921fe53a6ac0c89de49", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "mne/io/egi/egimff.py": { + "a": 9, + "d": 9 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13149.json b/doc/sphinxext/prs/13149.json new file mode 100644 index 00000000000..07c5f9f2e01 --- /dev/null +++ b/doc/sphinxext/prs/13149.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2cdc4af85a7d352f4e277a898b6fb118fc286233", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_fiff/meas_info.py": { + "a": 1, + "d": 0 + }, + "mne/dipole.py": { + "a": 13, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13152.json b/doc/sphinxext/prs/13152.json new file mode 100644 index 00000000000..1c5fc665bf7 --- /dev/null +++ b/doc/sphinxext/prs/13152.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ccaefd4439a77c3c3fa3618aa1ec41bae947e0c4", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13153.json b/doc/sphinxext/prs/13153.json new file mode 100644 index 00000000000..72f735012fb --- /dev/null +++ b/doc/sphinxext/prs/13153.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "df4a3f1601418bf1dfd1a3621c34bfa3b1188f8b", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/beamformer/_rap_music.py": { + "a": 2, + "d": 2 + }, + "mne/dipole.py": { + "a": 74, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13155.json b/doc/sphinxext/prs/13155.json new file mode 100644 index 00000000000..6e901901cf3 --- /dev/null +++ b/doc/sphinxext/prs/13155.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3077476caa1e91df86ab3f52df7ce6a983b7fd86", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13162.json b/doc/sphinxext/prs/13162.json new file mode 100644 index 00000000000..97f64b14cc4 --- /dev/null +++ b/doc/sphinxext/prs/13162.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1fb9dd6d5a3aa6734b9b07474c484d743338e8c2", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13163.json b/doc/sphinxext/prs/13163.json new file mode 100644 index 00000000000..7ddb03bb82e --- /dev/null +++ b/doc/sphinxext/prs/13163.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b43f4d48c4cd9aa70fb8f83c61f96986b8ab9180", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/development/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13164.json b/doc/sphinxext/prs/13164.json new file mode 100644 index 00000000000..310b43818bb --- /dev/null +++ b/doc/sphinxext/prs/13164.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b6cd5f4e5e42622950e369eeb44118c6500ecc70", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13171.json b/doc/sphinxext/prs/13171.json new file mode 100644 index 00000000000..bb662310cfe --- /dev/null +++ b/doc/sphinxext/prs/13171.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "511b7a5c28151542e1547dc7273e5c6ee67f3c7e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13172.json b/doc/sphinxext/prs/13172.json new file mode 100644 index 00000000000..6c62e5273aa --- /dev/null +++ b/doc/sphinxext/prs/13172.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "618932d7fbb6ef7e4d9f110c90a1cc77f51ab082", + "authors": [ + { + "n": "Peter J. Molfese", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13172.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 19, + "d": 0 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13173.json b/doc/sphinxext/prs/13173.json new file mode 100644 index 00000000000..d6514438782 --- /dev/null +++ b/doc/sphinxext/prs/13173.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a745fdc7f5b5925da4514d9f68ba6f9ffbf9403d", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13174.json b/doc/sphinxext/prs/13174.json new file mode 100644 index 00000000000..5737f7bba52 --- /dev/null +++ b/doc/sphinxext/prs/13174.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e04c4d92bd6ce6cb6dca4e9d427a8897c5a19c20", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/devel/13174.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_edf.py": { + "a": 12, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 5, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13178.json b/doc/sphinxext/prs/13178.json new file mode 100644 index 00000000000..b0c066d5a20 --- /dev/null +++ b/doc/sphinxext/prs/13178.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "704b3930e03c1428dc73481d8232c6838b4b1047", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13178.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 5, + "d": 1 + }, + "mne/bem.py": { + "a": 11, + "d": 31 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 16, + "d": 19 + }, + "mne/simulation/tests/test_raw.py": { + "a": 7, + "d": 3 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 7, + "d": 8 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13179.json b/doc/sphinxext/prs/13179.json new file mode 100644 index 00000000000..6028707180e --- /dev/null +++ b/doc/sphinxext/prs/13179.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "9b93f0329127f31bd2e6c117767f10d45edd0527", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13179.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13179.newfeature.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/mne_helmet.py": { + "a": 9, + "d": 3 + }, + "mne/forward/_field_interpolation.py": { + "a": 5, + "d": 1 + }, + "mne/surface.py": { + "a": 23, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 40, + "d": 28 + }, + "mne/viz/backends/_abstract.py": { + "a": 8, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 18, + "d": 6 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 1, + "d": 2 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1318.json b/doc/sphinxext/prs/1318.json new file mode 100644 index 00000000000..5b61922546d --- /dev/null +++ b/doc/sphinxext/prs/1318.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ecca44f7a2ee94cd54ff1bdee88bae46ef006978", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 63, + "d": 48 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13180.json b/doc/sphinxext/prs/13180.json new file mode 100644 index 00000000000..76e76c922a6 --- /dev/null +++ b/doc/sphinxext/prs/13180.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "e65a55965958829a0cfc66d45a57175214241d16", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 6, + "d": 3 + }, + "doc/sphinxext/prs/13135.json": { + "a": 107, + "d": 0 + }, + "doc/sphinxext/prs/13140.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13141.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13143.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13145.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13146.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13147.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13148.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13149.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13152.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13153.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13155.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13162.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13163.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13164.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13171.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13174.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13178.json": { + "a": 63, + "d": 0 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13183.json b/doc/sphinxext/prs/13183.json new file mode 100644 index 00000000000..ad99e840d29 --- /dev/null +++ b/doc/sphinxext/prs/13183.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "68f666e655b9fddea8a95ffb38c5c9d8a9e7a4ff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/13183.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/cuda.py": { + "a": 6, + "d": 3 + }, + "mne/filter.py": { + "a": 20, + "d": 3 + }, + "mne/tests/test_filter.py": { + "a": 100, + "d": 73 + }, + "mne/viz/_figure.py": { + "a": 47, + "d": 10 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13184.json b/doc/sphinxext/prs/13184.json new file mode 100644 index 00000000000..5e6c181ef8b --- /dev/null +++ b/doc/sphinxext/prs/13184.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "729bdf38c88d61ea806a53fa3eb47fe2af0725cb", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/devel/13184.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13184.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 57, + "d": 38 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 23, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1319.json b/doc/sphinxext/prs/1319.json new file mode 100644 index 00000000000..47bedb1cae0 --- /dev/null +++ b/doc/sphinxext/prs/1319.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "46a6babf98bf697ae6174c3b6fcf8361fc299316", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 7, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 7, + "d": 0 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 21, + "d": 23 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 30, + "d": 98 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 35, + "d": 169 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/bads.py": { + "a": 36, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 126, + "d": 25 + }, + "mne/preprocessing/eog.py": { + "a": 103, + "d": 32 + }, + "mne/preprocessing/ica.py": { + "a": 1020, + "d": 560 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 5, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 111, + "d": 78 + }, + "mne/tests/test_viz.py": { + "a": 64, + "d": 4 + }, + "mne/utils.py": { + "a": 36, + "d": 3 + }, + "mne/viz.py": { + "a": 491, + "d": 94 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13197.json b/doc/sphinxext/prs/13197.json new file mode 100644 index 00000000000..ef39bd7dc8e --- /dev/null +++ b/doc/sphinxext/prs/13197.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "daf7abfeef38fbab44c071d88c530b638bd9a372", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "examples/io/read_impedances.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13198.json b/doc/sphinxext/prs/13198.json new file mode 100644 index 00000000000..84313ed33b2 --- /dev/null +++ b/doc/sphinxext/prs/13198.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "80ca92561f35e481805629b0302daa3f8adfa24e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1320.json b/doc/sphinxext/prs/1320.json new file mode 100644 index 00000000000..8ea436e3aeb --- /dev/null +++ b/doc/sphinxext/prs/1320.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "26cda4e781ceee46acb4c7202b7273fd95e7e15e", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_coregistration_transform.py": { + "a": 31, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 4, + "d": 24 + }, + "mne/tests/test_viz.py": { + "a": 19, + "d": 2 + }, + "mne/transforms.py": { + "a": 20, + "d": 0 + }, + "mne/viz.py": { + "a": 87, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13202.json b/doc/sphinxext/prs/13202.json new file mode 100644 index 00000000000..8d03d015f40 --- /dev/null +++ b/doc/sphinxext/prs/13202.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6ac3c2dc2619270b63c85403579baa98b9085c62", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/mne_tools_suite.rst": { + "a": 0, + "d": 20 + }, + "doc/sphinxext/related_software.py": { + "a": 1, + "d": 8 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13205.json b/doc/sphinxext/prs/13205.json new file mode 100644 index 00000000000..3fd2879847c --- /dev/null +++ b/doc/sphinxext/prs/13205.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "21b2f344be1c5c7a9b81aad8c955dfcabd3107d6", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + "doc/_includes/institutional-partners.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13207.json b/doc/sphinxext/prs/13207.json new file mode 100644 index 00000000000..7a81181edde --- /dev/null +++ b/doc/sphinxext/prs/13207.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8ea69e113429aee4af7f461b73c38a9518963248", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13208.json b/doc/sphinxext/prs/13208.json new file mode 100644 index 00000000000..0955ea904a6 --- /dev/null +++ b/doc/sphinxext/prs/13208.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "46e7071d29d2babf21d4a9d199a3af30120c143e", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13208.bugfix.rst.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13210.json b/doc/sphinxext/prs/13210.json new file mode 100644 index 00000000000..0d97753be81 --- /dev/null +++ b/doc/sphinxext/prs/13210.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "855cb53c3bcc501aef3e06699e9b84c33a53ef7f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/datasets/opm_data.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 25, + "d": 2 + }, + "mne/viz/tests/test_utils.py": { + "a": 10, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 13, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13212.json b/doc/sphinxext/prs/13212.json new file mode 100644 index 00000000000..6b4575c84d0 --- /dev/null +++ b/doc/sphinxext/prs/13212.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a109ad5c711cddd27d45ec1ce50e1a221e55210a", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13214.json b/doc/sphinxext/prs/13214.json new file mode 100644 index 00000000000..08598cd4fdd --- /dev/null +++ b/doc/sphinxext/prs/13214.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a0dda10cd84d6b122b2f8e83efc091df8f4dc025", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13217.json b/doc/sphinxext/prs/13217.json new file mode 100644 index 00000000000..eec25d977ef --- /dev/null +++ b/doc/sphinxext/prs/13217.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e9d96334afc13d3eb026e42c8e96b2d6101c9b9", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13218.json b/doc/sphinxext/prs/13218.json new file mode 100644 index 00000000000..3451bdc0111 --- /dev/null +++ b/doc/sphinxext/prs/13218.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d089b459c3a7a276d015e553920b2201ba1bdde9", + "authors": [ + { + "n": "Wei", + "e": null + } + ], + "changes": { + "doc/changes/devel/13218.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13222.json b/doc/sphinxext/prs/13222.json new file mode 100644 index 00000000000..725d277dfaa --- /dev/null +++ b/doc/sphinxext/prs/13222.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f6c6bd78ac21b3433d5066e6ac8e539a9756e24c", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/zizmor.yml": { + "a": 5, + "d": 0 + }, + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13225.json b/doc/sphinxext/prs/13225.json new file mode 100644 index 00000000000..389fdf5c92d --- /dev/null +++ b/doc/sphinxext/prs/13225.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1d34cd9414403f5b8d9e5c6ff5d5f3093f927def", + "authors": [ + { + "n": "Katia", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 10, + "d": 0 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/13225.other.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13226.json b/doc/sphinxext/prs/13226.json new file mode 100644 index 00000000000..e6291441608 --- /dev/null +++ b/doc/sphinxext/prs/13226.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c18908cb6c9f010b52222a2f4d617da47af72426", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 17, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13228.json b/doc/sphinxext/prs/13228.json new file mode 100644 index 00000000000..3c9442df59d --- /dev/null +++ b/doc/sphinxext/prs/13228.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "80a20717e912ee2a7982225e7e2c5ed6b70032f5", + "authors": [ + { + "n": "Pierre Guetschel", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13228.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 319, + "d": 30 + }, + "mne/epochs.py": { + "a": 19, + "d": 5 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 170, + "d": 11 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 3 + }, + "mne/utils/check.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1323.json b/doc/sphinxext/prs/1323.json new file mode 100644 index 00000000000..29797328c26 --- /dev/null +++ b/doc/sphinxext/prs/1323.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "732a49bf9307bc96698462416cf10178c9b11a67", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 0, + "d": 1 + }, + "mne/io/channels.py": { + "a": 4, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 6, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13232.json b/doc/sphinxext/prs/13232.json new file mode 100644 index 00000000000..7a9afeab554 --- /dev/null +++ b/doc/sphinxext/prs/13232.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0efaf9666de854bc5f35334423b794b63a29a5d9", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/13232.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 7, + "d": 5 + }, + "mne/report/report.py": { + "a": 8, + "d": 5 + }, + "mne/report/tests/test_report.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_transforms.py": { + "a": 7, + "d": 2 + }, + "mne/transforms.py": { + "a": 24, + "d": 13 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13233.json b/doc/sphinxext/prs/13233.json new file mode 100644 index 00000000000..f063bdf347f --- /dev/null +++ b/doc/sphinxext/prs/13233.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2bb3ac284683ba60bee782f70e54bd1dcb5d4269", + "authors": [ + { + "n": "Sebastian Jentschke", + "e": null + } + ], + "changes": { + "doc/changes/devel/13233.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13234.json b/doc/sphinxext/prs/13234.json new file mode 100644 index 00000000000..bac959c6a31 --- /dev/null +++ b/doc/sphinxext/prs/13234.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "dda454d7e4bd2766a2bdff37c06583fde99ba747", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13234.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 48, + "d": 16 + }, + "mne/report/tests/test_report.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13235.json b/doc/sphinxext/prs/13235.json new file mode 100644 index 00000000000..80682d0d218 --- /dev/null +++ b/doc/sphinxext/prs/13235.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0febd730da2b068f3218ee1a961a40debcc0770e", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13236.json b/doc/sphinxext/prs/13236.json new file mode 100644 index 00000000000..aa4292b2378 --- /dev/null +++ b/doc/sphinxext/prs/13236.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7f583df4ff4c4c098883e1a00ec5300f7e57db3f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13237.json b/doc/sphinxext/prs/13237.json new file mode 100644 index 00000000000..6d61f4c6a62 --- /dev/null +++ b/doc/sphinxext/prs/13237.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "5df1721b488070e3b3928dface9dd0b8c39a3bef", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/credit_tools.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/prs/13120.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13144.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/13172.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13173.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13179.json": { + "a": 75, + "d": 0 + }, + "doc/sphinxext/prs/13180.json": { + "a": 95, + "d": 0 + }, + "doc/sphinxext/prs/13183.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/13184.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13197.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13198.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13202.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13205.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13207.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13210.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13212.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13214.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13217.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13218.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13222.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13225.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13226.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13233.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13235.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13236.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13239.json b/doc/sphinxext/prs/13239.json new file mode 100644 index 00000000000..703aadd1acf --- /dev/null +++ b/doc/sphinxext/prs/13239.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8f0fef638081525f2b13f239d66938ad0b2bdb1a", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13240.json b/doc/sphinxext/prs/13240.json new file mode 100644 index 00000000000..d01d073580e --- /dev/null +++ b/doc/sphinxext/prs/13240.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7a50fd785e3353803e0a14df5425cc1d327251b4", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13241.json b/doc/sphinxext/prs/13241.json new file mode 100644 index 00000000000..2457b4be4c8 --- /dev/null +++ b/doc/sphinxext/prs/13241.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "1ea5dbbdb056592980c3daa9816d91683abf13ba", + "authors": [ + { + "n": "Bru", + "e": "b.aristimunha@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13241.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/forward/_lead_dots.py": { + "a": 3, + "d": 3 + }, + "mne/utils/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/utils/config.py": { + "a": 76, + "d": 4 + }, + "mne/utils/tests/test_config.py": { + "a": 103, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13245.json b/doc/sphinxext/prs/13245.json new file mode 100644 index 00000000000..2996bd5c464 --- /dev/null +++ b/doc/sphinxext/prs/13245.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6696cec2edf456a626694056560fc8121d66174f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 9, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13246.json b/doc/sphinxext/prs/13246.json new file mode 100644 index 00000000000..115aa7f76ef --- /dev/null +++ b/doc/sphinxext/prs/13246.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9849b3e4030740848d6899268a163445ec2a8b0c", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13247.json b/doc/sphinxext/prs/13247.json new file mode 100644 index 00000000000..21f2e4d6da5 --- /dev/null +++ b/doc/sphinxext/prs/13247.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "70f0255909ddaf428ef55517ad3c0116819937c2", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/report/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13249.json b/doc/sphinxext/prs/13249.json new file mode 100644 index 00000000000..7d2b8b780d5 --- /dev/null +++ b/doc/sphinxext/prs/13249.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e0cc99b5babebe90f13e92b270fd860ac40b43a3", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/documentation/datasets.rst": { + "a": 2, + "d": 1 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13251.json b/doc/sphinxext/prs/13251.json new file mode 100644 index 00000000000..de9b32ad434 --- /dev/null +++ b/doc/sphinxext/prs/13251.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "181fea1e7e0bd73c2ec5cf67391cf598ad1af7bf", + "authors": [ + { + "n": null, + "e": "myd7349@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "doc/changes/dev/13251.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/nihon/nihon.py": { + "a": 55, + "d": 11 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 8, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13255.json b/doc/sphinxext/prs/13255.json new file mode 100644 index 00000000000..25420df1af4 --- /dev/null +++ b/doc/sphinxext/prs/13255.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "68b5523a1d2e301fb0e4f2460c67d42c465e46b7", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 23, + "d": 53 + }, + "mne/utils/config.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13256.json b/doc/sphinxext/prs/13256.json new file mode 100644 index 00000000000..c1810a40760 --- /dev/null +++ b/doc/sphinxext/prs/13256.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "cc5e2e64a9adefdfaa5d438e839d4447c8f05613", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/tests/test_commands.py": { + "a": 4, + "d": 4 + }, + "mne/cov.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 8, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13259.json b/doc/sphinxext/prs/13259.json new file mode 100644 index 00000000000..26f64c40dac --- /dev/null +++ b/doc/sphinxext/prs/13259.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "b4c2a3b6f725185ba6afe3b50aeab3449c343d27", + "authors": [ + { + "n": "Genuster", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/ged.rst": { + "a": 107, + "d": 0 + }, + "doc/api/decoding.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/devel/13259.newfeature.rst": { + "a": 3, + "d": 0 + }, + "doc/documentation/implementation.rst": { + "a": 8, + "d": 0 + }, + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "mne/decoding/__init__.pyi": { + "a": 2, + "d": 0 + }, + "mne/decoding/_covs_ged.py": { + "a": 284, + "d": 0 + }, + "mne/decoding/_ged.py": { + "a": 131, + "d": 0 + }, + "mne/decoding/_mod_ged.py": { + "a": 132, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 252, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 103, + "d": 227 + }, + "mne/decoding/ssd.py": { + "a": 52, + "d": 161 + }, + "mne/decoding/tests/test_ged.py": { + "a": 388, + "d": 0 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 94, + "d": 9 + }, + "mne/decoding/xdawn.py": { + "a": 207, + "d": 0 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 20, + "d": 16 + }, + "mne/preprocessing/xdawn.py": { + "a": 4, + "d": 152 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 0 + }, + "tools/vulture_allowlist.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1326.json b/doc/sphinxext/prs/1326.json new file mode 100644 index 00000000000..725c9e980e8 --- /dev/null +++ b/doc/sphinxext/prs/1326.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5b9e92ed4589c4a37749bfd24fa03bd3194fd552", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 24, + "d": 0 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_fixes.py": { + "a": 16, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13261.json b/doc/sphinxext/prs/13261.json new file mode 100644 index 00000000000..41a14e4f59d --- /dev/null +++ b/doc/sphinxext/prs/13261.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "99275fa2f1be6b260bfa38f0eef4dd605bd8686c", + "authors": [ + { + "n": "LaurentLM", + "e": "laurent.lementec@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13261.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 24, + "d": 9 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 15, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_config.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13262.json b/doc/sphinxext/prs/13262.json new file mode 100644 index 00000000000..737f055b9f9 --- /dev/null +++ b/doc/sphinxext/prs/13262.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9e5a929a309c66b0ce618795e6cc8123367f9896", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "doc/sphinxext/mne_doc_utils.py": { + "a": 2, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13266.json b/doc/sphinxext/prs/13266.json new file mode 100644 index 00000000000..06d9e6545b3 --- /dev/null +++ b/doc/sphinxext/prs/13266.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fc9078a93170ea1d2ccc88bffb85b48509757c4d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "tutorials/visualization/10_publication_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13267.json b/doc/sphinxext/prs/13267.json new file mode 100644 index 00000000000..e7d8b118a83 --- /dev/null +++ b/doc/sphinxext/prs/13267.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "b0e9a8596136c937eb90c86dc78c1870bf24853f", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/prs/13208.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13232.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13234.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13237.json": { + "a": 111, + "d": 0 + }, + "doc/sphinxext/prs/13239.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13240.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13241.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/13245.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13246.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13247.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13249.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13255.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13256.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13262.json": { + "a": 27, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13268.json b/doc/sphinxext/prs/13268.json new file mode 100644 index 00000000000..d48d7fc463f --- /dev/null +++ b/doc/sphinxext/prs/13268.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ecc33755375a49df8c5c27fb6263230b4d0b0193", + "authors": [ + { + "n": "Theodore Papadopoulo", + "e": "Theodore.Papadopoulo@inria.fr" + } + ], + "changes": { + "doc/changes/devel/13268.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/tag.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13273.json b/doc/sphinxext/prs/13273.json new file mode 100644 index 00000000000..549448bee0f --- /dev/null +++ b/doc/sphinxext/prs/13273.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4033b3a339c1226e248953eeee30e7488d50659f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 2, + "d": 2 + }, + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "doc/martinos.rst": { + "a": 0, + "d": 37 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13274.json b/doc/sphinxext/prs/13274.json new file mode 100644 index 00000000000..64f228f6361 --- /dev/null +++ b/doc/sphinxext/prs/13274.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "40b9232ecb01de8bbe0d039564e995214c90b630", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 6, + "d": 3 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/hooks/update_environment_file.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13276.json b/doc/sphinxext/prs/13276.json new file mode 100644 index 00000000000..482b19ece74 --- /dev/null +++ b/doc/sphinxext/prs/13276.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d9dcf118f0773f693722f2d9a651a09e5ada1df1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13276.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 14, + "d": 8 + }, + "mne/forward/_compute_forward.py": { + "a": 5, + "d": 0 + }, + "mne/simulation/tests/test_raw.py": { + "a": 26, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13277.json b/doc/sphinxext/prs/13277.json new file mode 100644 index 00000000000..265557863ca --- /dev/null +++ b/doc/sphinxext/prs/13277.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "58618f756d0c83dc5d57979ac14e87165394397d", + "authors": [ + { + "n": "user27182", + "e": "89109579+user27182@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13277.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1328.json b/doc/sphinxext/prs/1328.json new file mode 100644 index 00000000000..15a1fbb95ff --- /dev/null +++ b/doc/sphinxext/prs/1328.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "f3c721014a4115c451ce2e3c376194003f0f7818", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_eog_artifact_histogram.py": { + "a": 49, + "d": 0 + }, + "mne/epochs.py": { + "a": 30, + "d": 25 + }, + "mne/event.py": { + "a": 2, + "d": 26 + }, + "mne/io/base.py": { + "a": 40, + "d": 2 + }, + "mne/realtime/epochs.py": { + "a": 2, + "d": 13 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 6 + }, + "mne/tests/test_event.py": { + "a": 23, + "d": 0 + }, + "mne/utils.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13280.json b/doc/sphinxext/prs/13280.json new file mode 100644 index 00000000000..3c39b388472 --- /dev/null +++ b/doc/sphinxext/prs/13280.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "19264460d568eaa6243da44e68de9fbd662e03ff", + "authors": [ + { + "n": "Harrison Ritz", + "e": "hritz@princeton.edu" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13280.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13282.json b/doc/sphinxext/prs/13282.json new file mode 100644 index 00000000000..1343ff41ca1 --- /dev/null +++ b/doc/sphinxext/prs/13282.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "69620eb71a3689dabf5d0f1d926a01c68a8b6664", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/autofix.yml": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13283.json b/doc/sphinxext/prs/13283.json new file mode 100644 index 00000000000..b9c95b19890 --- /dev/null +++ b/doc/sphinxext/prs/13283.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ed69487d690a7e835b342d9367bd189e41b38e6f", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13284.json b/doc/sphinxext/prs/13284.json new file mode 100644 index 00000000000..4e5386216ff --- /dev/null +++ b/doc/sphinxext/prs/13284.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c4f5f0a0ea8d3f333c401449b96f77a9930e92d3", + "authors": [ + { + "n": "LaurentLM", + "e": "laurent.lementec@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13284.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 14, + "d": 3 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13286.json b/doc/sphinxext/prs/13286.json new file mode 100644 index 00000000000..74bc01cb3b4 --- /dev/null +++ b/doc/sphinxext/prs/13286.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8df70aea97c42a58d02e1c121679e9c1b2c62869", + "authors": [ + { + "n": "Ankang Hu", + "e": "23111220065@m.fudan.edu.cn" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13286.bugfix.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13290.json b/doc/sphinxext/prs/13290.json new file mode 100644 index 00000000000..c9e72dea20b --- /dev/null +++ b/doc/sphinxext/prs/13290.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a0e5a83962c800fcee21896cb891c63b6373c1cc", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13291.json b/doc/sphinxext/prs/13291.json new file mode 100644 index 00000000000..fb90d348c30 --- /dev/null +++ b/doc/sphinxext/prs/13291.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "729ce25d05280291469d0b5ddfd86007ae1b443d", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13293.json b/doc/sphinxext/prs/13293.json new file mode 100644 index 00000000000..f519f306f73 --- /dev/null +++ b/doc/sphinxext/prs/13293.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bb34f369ada8b27299a74118b858118cbcbf346a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/13293.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/pick.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13298.json b/doc/sphinxext/prs/13298.json new file mode 100644 index 00000000000..778da14a3c4 --- /dev/null +++ b/doc/sphinxext/prs/13298.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "5009ee876e6b905eb25a37a0e45c75aa7b4c6e9f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/devel/13298.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/spectrum.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 20, + "d": 16 + }, + "mne/viz/tests/test_raw.py": { + "a": 14, + "d": 9 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 9 + }, + "tutorials/time-freq/10_spectrum_class.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13299.json b/doc/sphinxext/prs/13299.json new file mode 100644 index 00000000000..2d44f641fdf --- /dev/null +++ b/doc/sphinxext/prs/13299.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b78d661203f4e805d5a9412583c1a10af3a73106", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 4, + "d": 5 + }, + "mne/utils/tests/test_config.py": { + "a": 3, + "d": 1 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13300.json b/doc/sphinxext/prs/13300.json new file mode 100644 index 00000000000..7e71d69f98e --- /dev/null +++ b/doc/sphinxext/prs/13300.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1652a7cf97a02a91ac3503e0f89f195cb505e191", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13301.json b/doc/sphinxext/prs/13301.json new file mode 100644 index 00000000000..174e45f1235 --- /dev/null +++ b/doc/sphinxext/prs/13301.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2e677b122980a605c1a3a86765f549b70ee0a534", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/time_frequency/spectrum.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13304.json b/doc/sphinxext/prs/13304.json new file mode 100644 index 00000000000..edd33692253 --- /dev/null +++ b/doc/sphinxext/prs/13304.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d193166134dcc30c3c2bf154ec188f63913d6ea6", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/links.inc": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13305.json b/doc/sphinxext/prs/13305.json new file mode 100644 index 00000000000..8422ae14ff8 --- /dev/null +++ b/doc/sphinxext/prs/13305.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a4a614f72da25c43a509b0ec987dbe7620d08981", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13306.json b/doc/sphinxext/prs/13306.json new file mode 100644 index 00000000000..ef6dc8e99c3 --- /dev/null +++ b/doc/sphinxext/prs/13306.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "6ad882335644fb80f8744e508387c52f7231553a", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/credit_tools.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/prs/13228.json": { + "a": 59, + "d": 0 + }, + "doc/sphinxext/prs/13261.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/13266.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13267.json": { + "a": 79, + "d": 0 + }, + "doc/sphinxext/prs/13268.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13273.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13274.json": { + "a": 39, + "d": 0 + }, + "doc/sphinxext/prs/13276.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13277.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13280.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13282.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13283.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13284.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13286.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13290.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13291.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13293.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13299.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13300.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13301.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13308.json b/doc/sphinxext/prs/13308.json new file mode 100644 index 00000000000..e71f749d25a --- /dev/null +++ b/doc/sphinxext/prs/13308.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "d970efb081463306fa29fde33d8c995ef649ee56", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "environment.yml": { + "a": 3, + "d": 3 + }, + "mne/decoding/_fixes.py": { + "a": 132, + "d": 0 + }, + "mne/decoding/tests/test_base.py": { + "a": 3, + "d": 1 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 6, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 13, + "d": 3 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/environment_old.yml": { + "a": 1, + "d": 1 + }, + "tools/hooks/update_environment_file.py": { + "a": 1, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13310.json b/doc/sphinxext/prs/13310.json new file mode 100644 index 00000000000..907ebbad157 --- /dev/null +++ b/doc/sphinxext/prs/13310.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3612f7bc3dbe57a4de2d976d749703bd75001d29", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/13310.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 13, + "d": 0 + }, + "mne/utils/config.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13311.json b/doc/sphinxext/prs/13311.json new file mode 100644 index 00000000000..c5f7fb6f04c --- /dev/null +++ b/doc/sphinxext/prs/13311.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2cc3715ce5591dc60fa70d34503a9e0eedb74bef", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13313.json b/doc/sphinxext/prs/13313.json new file mode 100644 index 00000000000..3d059ab464c --- /dev/null +++ b/doc/sphinxext/prs/13313.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "5fed2b6c85386021cb47a6b1ee44783ae223876d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 3, + "d": 3 + }, + "mne/_fiff/tests/test_constants.py": { + "a": 3, + "d": 0 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 15, + "d": 6 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 1 + }, + "mne/html_templates/report/forward.html.jinja": { + "a": 1, + "d": 0 + }, + "mne/html_templates/report/image.html.jinja": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 2, + "d": 2 + }, + "mne/report/report.py": { + "a": 150, + "d": 158 + }, + "mne/report/tests/test_report.py": { + "a": 7, + "d": 4 + }, + "tutorials/intro/70_report.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13314.json b/doc/sphinxext/prs/13314.json new file mode 100644 index 00000000000..7184757b9b3 --- /dev/null +++ b/doc/sphinxext/prs/13314.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "35398fdb1a9760e3926c0c5eff4ff931289c3d4d", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel/13314.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13315.json b/doc/sphinxext/prs/13315.json new file mode 100644 index 00000000000..ec59f508ea9 --- /dev/null +++ b/doc/sphinxext/prs/13315.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0c0e72efe1f062cce5fb2aaa9509cf77036ae6f9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 11, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13317.json b/doc/sphinxext/prs/13317.json new file mode 100644 index 00000000000..0280760ea6f --- /dev/null +++ b/doc/sphinxext/prs/13317.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b3cf34c30a4b1ee135b44300437b6fafabb8b36", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13318.json b/doc/sphinxext/prs/13318.json new file mode 100644 index 00000000000..f1c91424cf3 --- /dev/null +++ b/doc/sphinxext/prs/13318.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "56f863d9f99afca363468d14b478eef8761698f9", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/credit_tools.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/prs/12848.json": { + "a": 67, + "d": 0 + }, + "doc/sphinxext/prs/13298.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/13304.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13305.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13306.json": { + "a": 103, + "d": 0 + }, + "doc/sphinxext/prs/13308.json": { + "a": 67, + "d": 0 + }, + "doc/sphinxext/prs/13310.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13311.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13313.json": { + "a": 55, + "d": 0 + }, + "doc/sphinxext/prs/13314.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13315.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13319.json b/doc/sphinxext/prs/13319.json new file mode 100644 index 00000000000..72baa446b07 --- /dev/null +++ b/doc/sphinxext/prs/13319.json @@ -0,0 +1,283 @@ +{ + "merge_commit_sha": "0c106bc9c00a0f3136fdd8a55cc9852b2d8f8959", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 7, + "d": 1 + }, + "CITATION.cff": { + "a": 57, + "d": 25 + }, + "SECURITY.md": { + "a": 3, + "d": 3 + }, + "codemeta.json": { + "a": 171, + "d": 69 + }, + "doc/_static/versions.json": { + "a": 7, + "d": 2 + }, + "doc/changes/devel.rst": { + "a": 0, + "d": 5 + }, + "doc/changes/devel/12071.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12656.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12828.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12848.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/12910.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13019.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13028.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13037.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13042.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13044.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13048.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13054.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13056.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13058.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13062.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13063.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13065.bugfix.rst": { + "a": 0, + "d": 7 + }, + "doc/changes/devel/13067.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13069.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13070.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13080.apichange.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13080.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13082.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13083.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13097.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13100.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13101.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13107.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13113.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13123.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13144.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13145.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13172.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13174.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13178.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13179.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13179.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13183.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13184.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13184.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13208.bugfix.rst.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13218.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13225.other.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13228.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13232.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13233.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13234.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13241.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13261.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13268.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13276.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13277.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13280.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13284.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13286.bugfix.rst": { + "a": 0, + "d": 3 + }, + "doc/changes/devel/13293.newfeature.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13298.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13310.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/devel/13314.bugfix.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/v1.10.rst": { + "a": 129, + "d": 0 + }, + "doc/development/whats_new.rst": { + "a": 1, + "d": 1 + }, + "doc/documentation/cited.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13320.json b/doc/sphinxext/prs/13320.json new file mode 100644 index 00000000000..34dd5b42c16 --- /dev/null +++ b/doc/sphinxext/prs/13320.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ed26706d11833e2c4260069cfbf0e020c55d47fe", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/v1.10.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13321.json b/doc/sphinxext/prs/13321.json new file mode 100644 index 00000000000..381540eba1a --- /dev/null +++ b/doc/sphinxext/prs/13321.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "48f158893f0a64f8696f2c13e30121f7a90585e6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 23, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 15, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13322.json b/doc/sphinxext/prs/13322.json new file mode 100644 index 00000000000..3a3a875e594 --- /dev/null +++ b/doc/sphinxext/prs/13322.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "85e675985f2ac6f2facfa8d4e79164b056d2ea0c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 5, + "d": 5 + }, + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 3, + "d": 3 + }, + "environment.yml": { + "a": 4, + "d": 4 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 2, + "d": 6 + }, + "mne/preprocessing/maxwell.py": { + "a": 9, + "d": 27 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 42, + "d": 6 + }, + "mne/report/report.py": { + "a": 0, + "d": 10 + }, + "mne/utils/config.py": { + "a": 2, + "d": 2 + }, + "mne/utils/dataframe.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 7, + "d": 7 + }, + "tools/github_actions_dependencies.sh": { + "a": 6, + "d": 3 + }, + "tools/install_pre_requirements.sh": { + "a": 37, + "d": 57 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13327.json b/doc/sphinxext/prs/13327.json new file mode 100644 index 00000000000..8a2b2610453 --- /dev/null +++ b/doc/sphinxext/prs/13327.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3ac330a4c5887b7c19a3f4dd961ed071ecebb54a", + "authors": [ + { + "n": "Genuster", + "e": null + } + ], + "changes": { + "examples/decoding/ssd_spatial_filters.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/ssd.py": { + "a": 32, + "d": 1 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 38, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13330.json b/doc/sphinxext/prs/13330.json new file mode 100644 index 00000000000..9471407e814 --- /dev/null +++ b/doc/sphinxext/prs/13330.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4130e46d0499d8d58a4303f1d338b66e830e0588", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 7 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13331.json b/doc/sphinxext/prs/13331.json new file mode 100644 index 00000000000..dec20360212 --- /dev/null +++ b/doc/sphinxext/prs/13331.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c31ae1d9bd5f69660d1e94931ff6dff412a23dfd", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/js/custom-icons.js": { + "a": 19, + "d": 0 + }, + "doc/_static/style.css": { + "a": 11, + "d": 1 + }, + "doc/conf.py": { + "a": 17, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13332.json b/doc/sphinxext/prs/13332.json new file mode 100644 index 00000000000..2766cb13009 --- /dev/null +++ b/doc/sphinxext/prs/13332.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "0474b499fd5ede62db4f8673989fe24e1945f2d1", + "authors": [ + { + "n": "Genuster", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/ged.rst": { + "a": 1, + "d": 1 + }, + "doc/api/decoding.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/dev/13332.newfeature.rst": { + "a": 4, + "d": 0 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 6, + "d": 4 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 11, + "d": 2 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 42, + "d": 35 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 53, + "d": 18 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/__init__.pyi": { + "a": 3, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 61, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 12, + "d": 39 + }, + "mne/decoding/spatial_filter.py": { + "a": 639, + "d": 0 + }, + "mne/decoding/tests/test_base.py": { + "a": 123, + "d": 16 + }, + "mne/decoding/tests/test_spatial_filter.py": { + "a": 190, + "d": 0 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13333.json b/doc/sphinxext/prs/13333.json new file mode 100644 index 00000000000..c4aaca9b030 --- /dev/null +++ b/doc/sphinxext/prs/13333.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cb385eeb682cbb4a4b7ce6e3523507e375b58a5b", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/_static/js/custom-icons.js": { + "a": 19, + "d": 0 + }, + "doc/_static/style.css": { + "a": 11, + "d": 1 + }, + "doc/conf.py": { + "a": 17, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13334.json b/doc/sphinxext/prs/13334.json new file mode 100644 index 00000000000..5d1d206bbdb --- /dev/null +++ b/doc/sphinxext/prs/13334.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9d114d6c745e2651d2e6c946c466cdd7f85e4f12", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 5 + }, + "mne/gui/_coreg.py": { + "a": 5, + "d": 4 + }, + "mne/viz/_brain/_brain.py": { + "a": 46, + "d": 77 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 18, + "d": 13 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 3 + }, + "tools/circleci_bash_env.sh": { + "a": 0, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13335.json b/doc/sphinxext/prs/13335.json new file mode 100644 index 00000000000..5255dc8bfff --- /dev/null +++ b/doc/sphinxext/prs/13335.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "aacf0cfc36d5122e3f4118344a5b2a672041b626", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 7 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13336.json b/doc/sphinxext/prs/13336.json new file mode 100644 index 00000000000..852b3bd2b21 --- /dev/null +++ b/doc/sphinxext/prs/13336.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "00a84a3817ba1b4c679c6b6aeaf38b6983a5ea0c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 12, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13337.json b/doc/sphinxext/prs/13337.json new file mode 100644 index 00000000000..e6b4616b6ea --- /dev/null +++ b/doc/sphinxext/prs/13337.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1d318a43bf78bf360ded4eefb790c85bc515644e", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/mne_doc_utils.py": { + "a": 12, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13338.json b/doc/sphinxext/prs/13338.json new file mode 100644 index 00000000000..d2087a2e681 --- /dev/null +++ b/doc/sphinxext/prs/13338.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8f6ecb0090f5ba7fb0b99c19511d702cc5bb3811", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13340.json b/doc/sphinxext/prs/13340.json new file mode 100644 index 00000000000..20a44a9eeee --- /dev/null +++ b/doc/sphinxext/prs/13340.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf57d9ce3412b147591b74a6bb126e5072bb7642", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/index.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13341.json b/doc/sphinxext/prs/13341.json new file mode 100644 index 00000000000..2a72516078a --- /dev/null +++ b/doc/sphinxext/prs/13341.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "be0edb1e24c5f4998580d199d1d9551a0c3c1d21", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/13341.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 3, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13343.json b/doc/sphinxext/prs/13343.json new file mode 100644 index 00000000000..a8ca72834f9 --- /dev/null +++ b/doc/sphinxext/prs/13343.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "17c79290c0caecac7e555dfe86c3441c4006843b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/sidebar-quicklinks.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13344.json b/doc/sphinxext/prs/13344.json new file mode 100644 index 00000000000..0db4606021e --- /dev/null +++ b/doc/sphinxext/prs/13344.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "489423bbe7ae4957c98ae34cf17ff8c480bc07a9", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/13341.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 3, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13345.json b/doc/sphinxext/prs/13345.json new file mode 100644 index 00000000000..7fed85558ce --- /dev/null +++ b/doc/sphinxext/prs/13345.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "60f76e87b475e666bd38fd9713c483517eb55fd7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13346.json b/doc/sphinxext/prs/13346.json new file mode 100644 index 00000000000..092262edddf --- /dev/null +++ b/doc/sphinxext/prs/13346.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "043c8beb67e6a9364473ded5a57b1abbe983bf3d", + "authors": [ + { + "n": "Genuster", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/_ged.py": { + "a": 36, + "d": 19 + }, + "mne/decoding/base.py": { + "a": 70, + "d": 19 + }, + "mne/decoding/tests/test_ged.py": { + "a": 15, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13347.json b/doc/sphinxext/prs/13347.json new file mode 100644 index 00000000000..e6dcebc212f --- /dev/null +++ b/doc/sphinxext/prs/13347.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "94d9871acc920a47a04e9481d1fb2dfb25be7bfe", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 16, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13349.json b/doc/sphinxext/prs/13349.json new file mode 100644 index 00000000000..1d1da85f1ca --- /dev/null +++ b/doc/sphinxext/prs/13349.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "167872bf6b25a0e09d5662688acde8d068e68f9a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".github/workflows/check_changelog.yml": { + "a": 1, + "d": 1 + }, + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/_static/versions.json": { + "a": 1, + "d": 1 + }, + "doc/changes/dev.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/dev.rst.template": { + "a": 0, + "d": 0 + }, + "doc/changes/dev/.gitignore": { + "a": 0, + "d": 0 + }, + "doc/changes/dev/13112.bugfix.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/dev/13259.newfeature.rst": { + "a": 0, + "d": 0 + }, + "doc/changes/dev/13341.bugfix.rst": { + "a": 0, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/development/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/development/whats_new.rst": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/credit_tools.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_config.py": { + "a": 2, + "d": 2 + }, + "pyproject.toml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1335.json b/doc/sphinxext/prs/1335.json new file mode 100644 index 00000000000..f14d31794f8 --- /dev/null +++ b/doc/sphinxext/prs/1335.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f4ff521d09e3cf3b38d1b037161b55ef25502f0c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/layouts/EGI256.lout": { + "a": 257, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13350.json b/doc/sphinxext/prs/13350.json new file mode 100644 index 00000000000..6cae54c55ef --- /dev/null +++ b/doc/sphinxext/prs/13350.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "3fbf9865c8e27f6cffe15fa5335e922e82933af7", + "authors": [ + { + "n": "Emmanuel Ferdman", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13350.apichange.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 33, + "d": 7 + }, + "mne/decoding/_covs_ged.py": { + "a": 3, + "d": 0 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/rank.py": { + "a": 54, + "d": 8 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13351.json b/doc/sphinxext/prs/13351.json new file mode 100644 index 00000000000..ae8330ee29e --- /dev/null +++ b/doc/sphinxext/prs/13351.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1c9ac7c145f55416661a912a1de66302d2433ba6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 30, + "d": 12 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 0, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13354.json b/doc/sphinxext/prs/13354.json new file mode 100644 index 00000000000..05684c206a3 --- /dev/null +++ b/doc/sphinxext/prs/13354.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "75d3900ae0aabeda7fcc873d642b0ac3cf9c0172", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/dev/13354.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/15_inplace.py": { + "a": 9, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13355.json b/doc/sphinxext/prs/13355.json new file mode 100644 index 00000000000..43a785c384a --- /dev/null +++ b/doc/sphinxext/prs/13355.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "73f77b49606469863d510e307fa4a05311bc8251", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13356.json b/doc/sphinxext/prs/13356.json new file mode 100644 index 00000000000..0d637beb779 --- /dev/null +++ b/doc/sphinxext/prs/13356.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d77db389a37ff4603aec52485253d5b45ed61c65", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "pyproject.toml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13357.json b/doc/sphinxext/prs/13357.json new file mode 100644 index 00000000000..2f93fdcbcdc --- /dev/null +++ b/doc/sphinxext/prs/13357.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f04fcaa851e64b379a1a107f18cf3fd5b6b18f42", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13357.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 30, + "d": 7 + }, + "mne/io/eyelink/tests/test_eyelink.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13358.json b/doc/sphinxext/prs/13358.json new file mode 100644 index 00000000000..2323a5790fc --- /dev/null +++ b/doc/sphinxext/prs/13358.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "bbd67f15deceeda6c3a8aedd2ac8d0da14bb370c", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/development/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/install/mne_c.rst": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/prs/13112.json": { + "a": 67, + "d": 0 + }, + "doc/sphinxext/prs/13259.json": { + "a": 91, + "d": 0 + }, + "doc/sphinxext/prs/13317.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13318.json": { + "a": 63, + "d": 0 + }, + "doc/sphinxext/prs/13319.json": { + "a": 283, + "d": 0 + }, + "doc/sphinxext/prs/13320.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13321.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13322.json": { + "a": 79, + "d": 0 + }, + "doc/sphinxext/prs/13327.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13330.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13331.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13333.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13334.json": { + "a": 43, + "d": 0 + }, + "doc/sphinxext/prs/13335.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13336.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13337.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13338.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13340.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13341.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13343.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13344.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13345.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13346.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13349.json": { + "a": 75, + "d": 0 + }, + "doc/sphinxext/prs/13351.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13354.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13355.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13356.json": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13360.json b/doc/sphinxext/prs/13360.json new file mode 100644 index 00000000000..003abba488f --- /dev/null +++ b/doc/sphinxext/prs/13360.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "45c7faf355b76265a5e88b1bbd72543e362547f9", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13361.json b/doc/sphinxext/prs/13361.json new file mode 100644 index 00000000000..b8ac9623df7 --- /dev/null +++ b/doc/sphinxext/prs/13361.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f7cab3af285eb3e1e7afd3879d76b5539942a01e", + "authors": [ + { + "n": "Genuster", + "e": null + } + ], + "changes": { + "doc/changes/dev/13361.bugfix.rst": { + "a": 8, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 68, + "d": 39 + }, + "mne/decoding/tests/test_base.py": { + "a": 26, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13365.json b/doc/sphinxext/prs/13365.json new file mode 100644 index 00000000000..e5ae8354ab4 --- /dev/null +++ b/doc/sphinxext/prs/13365.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a07f970945aa36fc484bcd6ce88ce35f20a19a44", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13367.json b/doc/sphinxext/prs/13367.json new file mode 100644 index 00000000000..6ae2ee1f1d9 --- /dev/null +++ b/doc/sphinxext/prs/13367.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "70fbbd891afabf562a7315749dcefe9139df0020", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/install/installers.rst": { + "a": 7, + "d": 7 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 2, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13368.json b/doc/sphinxext/prs/13368.json new file mode 100644 index 00000000000..2413fc33ae3 --- /dev/null +++ b/doc/sphinxext/prs/13368.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b64c8231febe93926ec798c01ff01b5cb78055c3", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13369.json b/doc/sphinxext/prs/13369.json new file mode 100644 index 00000000000..e335e54c595 --- /dev/null +++ b/doc/sphinxext/prs/13369.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f8d8295beef7527938163af42ee9fc896c08a8a4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/FUNDING.yml": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13371.json b/doc/sphinxext/prs/13371.json new file mode 100644 index 00000000000..d5f72d1b05a --- /dev/null +++ b/doc/sphinxext/prs/13371.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3014786c595f4c5c8bfc0b1563be2da0e87aaf7f", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/dev/13371.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 9, + "d": 0 + }, + "tools/github_actions_test.sh": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13373.json b/doc/sphinxext/prs/13373.json new file mode 100644 index 00000000000..bf1be24a6e0 --- /dev/null +++ b/doc/sphinxext/prs/13373.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "be5fcefc67a66d35c87c2e33f6ac2a7951f0ee35", + "authors": [ + { + "n": "Shristi Baral", + "e": null + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13373.other.rst": { + "a": 1, + "d": 0 + }, + "doc/development/contributing.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13375.json b/doc/sphinxext/prs/13375.json new file mode 100644 index 00000000000..eec373472ab --- /dev/null +++ b/doc/sphinxext/prs/13375.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "97fd038b2add2b5a08923d735af72e3688d6ebf0", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/dev/13375.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 2 + }, + "mne/time_frequency/spectrum.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tests/test_spectrum.py": { + "a": 29, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13376.json b/doc/sphinxext/prs/13376.json new file mode 100644 index 00000000000..d693e496442 --- /dev/null +++ b/doc/sphinxext/prs/13376.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "befc4180c123e91d463ed7f737d703dc0090fce1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/dev/13376.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_utils.py": { + "a": 6, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13378.json b/doc/sphinxext/prs/13378.json new file mode 100644 index 00000000000..a24b7259c80 --- /dev/null +++ b/doc/sphinxext/prs/13378.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "637ab4e030f11184239a1a70b6dbb57b0cc84f5d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 7, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1338.json b/doc/sphinxext/prs/1338.json new file mode 100644 index 00000000000..922cb7b94ed --- /dev/null +++ b/doc/sphinxext/prs/1338.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "a2349e348353e3a8c735be20c47ec3abe1f2fc76", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 2 + }, + "mne/evoked.py": { + "a": 5, + "d": 1 + }, + "mne/io/base.py": { + "a": 8, + "d": 1 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 14, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 11, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 39, + "d": 3 + }, + "mne/utils.py": { + "a": 117, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13380.json b/doc/sphinxext/prs/13380.json new file mode 100644 index 00000000000..41515aea9bc --- /dev/null +++ b/doc/sphinxext/prs/13380.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ace9381898b3ad9da9590a3d0b99abedaa74e033", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13381.json b/doc/sphinxext/prs/13381.json new file mode 100644 index 00000000000..039daa3e47b --- /dev/null +++ b/doc/sphinxext/prs/13381.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0b0d5eaa48752ddbbbd1b41d04a835e03472983c", + "authors": [ + { + "n": "Lumberbot (aka Jack)", + "e": null + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13385.json b/doc/sphinxext/prs/13385.json new file mode 100644 index 00000000000..8f20e410670 --- /dev/null +++ b/doc/sphinxext/prs/13385.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "87b98dcdaae3e84d5d208fba091aca0086c259bc", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/autofix.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/codeql-analysis.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/credit.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/release.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13387.json b/doc/sphinxext/prs/13387.json new file mode 100644 index 00000000000..6347ffb1b28 --- /dev/null +++ b/doc/sphinxext/prs/13387.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5268ba8af4a7e169104c0115eac707924ce88dd8", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13389.json b/doc/sphinxext/prs/13389.json new file mode 100644 index 00000000000..27b3c8e3a5a --- /dev/null +++ b/doc/sphinxext/prs/13389.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "adeb86583c99f5bdd7baf7cefe2d62bb50388ab7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/transforms.py": { + "a": 2, + "d": 1 + }, + "pyproject.toml": { + "a": 18, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1339.json b/doc/sphinxext/prs/1339.json new file mode 100644 index 00000000000..15181a8fd0c --- /dev/null +++ b/doc/sphinxext/prs/1339.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "08ed7aa3dd6697cdb4c85adadcb81774717b455d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13391.json b/doc/sphinxext/prs/13391.json new file mode 100644 index 00000000000..945ca9549a1 --- /dev/null +++ b/doc/sphinxext/prs/13391.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b374c1bda31875a61bcc456bd09854e4b487394", + "authors": [ + { + "n": "Tharupahan Jayawardana", + "e": "tharupahanjayawardana@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13391.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13393.json b/doc/sphinxext/prs/13393.json new file mode 100644 index 00000000000..8f78fe88302 --- /dev/null +++ b/doc/sphinxext/prs/13393.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "d25726187b699818ac671dfdac18d2648f504c78", + "authors": [ + { + "n": "Genuster", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13393.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/_fixes.py": { + "a": 41, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 17, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 59, + "d": 11 + }, + "mne/decoding/search_light.py": { + "a": 10, + "d": 4 + }, + "mne/decoding/tests/test_ged.py": { + "a": 2, + "d": 7 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 10, + "d": 25 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 9 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 8, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 6 + }, + "mne/decoding/tests/test_xdawn.py": { + "a": 17, + "d": 0 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 66, + "d": 24 + }, + "mne/decoding/transformer.py": { + "a": 14, + "d": 0 + }, + "mne/decoding/xdawn.py": { + "a": 7, + "d": 1 + }, + "tools/vulture_allowlist.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13394.json b/doc/sphinxext/prs/13394.json new file mode 100644 index 00000000000..b324c18462b --- /dev/null +++ b/doc/sphinxext/prs/13394.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b00cf992aab87f0b60cf93a707d1c5eca5298ebb", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13395.json b/doc/sphinxext/prs/13395.json new file mode 100644 index 00000000000..91f7da01bf1 --- /dev/null +++ b/doc/sphinxext/prs/13395.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "17c4e03b50b1f504ea607db68a616e677891cf75", + "authors": [ + { + "n": "Emrecan Çelik", + "e": "emrecncelik@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13395.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13396.json b/doc/sphinxext/prs/13396.json new file mode 100644 index 00000000000..51acabd9b81 --- /dev/null +++ b/doc/sphinxext/prs/13396.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0836e429b462124cecac18c59d00281de88fb89e", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13398.json b/doc/sphinxext/prs/13398.json new file mode 100644 index 00000000000..55a940c0c94 --- /dev/null +++ b/doc/sphinxext/prs/13398.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "174c540c04f93bbba052da053682d1ebb0ab2e7e", + "authors": [ + { + "n": "Paul Anders", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 4, + "d": 1 + }, + "doc/changes/dev/13398.apichange.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "examples/visualization/mne_helmet.py": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 6, + "d": 6 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 22, + "d": 7 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 31, + "d": 11 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 4, + "d": 2 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 6, + "d": 1 + }, + "tutorials/visualization/20_ui_events.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/134.json b/doc/sphinxext/prs/134.json new file mode 100644 index 00000000000..de62d3dbe8f --- /dev/null +++ b/doc/sphinxext/prs/134.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "71b988a21b4f2fcff9b799dacc4d85e4e68fe6e7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/constants.py": { + "a": 5, + "d": 2 + }, + "mne/fiff/tests/data/fsaverage_audvis-meg-cropped-lh.stc": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/data/fsaverage_audvis-meg-cropped-rh.stc": { + "a": 0, + "d": 0 + }, + "mne/forward.py": { + "a": 5, + "d": 2 + }, + "mne/label.py": { + "a": 45, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 325, + "d": 77 + }, + "mne/source_space.py": { + "a": 141, + "d": 18 + }, + "mne/stats/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 686, + "d": 101 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 118, + "d": 33 + }, + "mne/surface.py": { + "a": 14, + "d": 7 + }, + "mne/tests/test_source_estimate.py": { + "a": 58, + "d": 11 + }, + "mne/tests/test_source_space.py": { + "a": 21, + "d": 2 + }, + "mne/utils.py": { + "a": 18, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13400.json b/doc/sphinxext/prs/13400.json new file mode 100644 index 00000000000..dc64cd60cd1 --- /dev/null +++ b/doc/sphinxext/prs/13400.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "288165207c9eed17ba24a9e4d827f78cbd727a91", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13400.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 8, + "d": 1 + }, + "mne/source_space/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 77, + "d": 77 + }, + "mne/utils/docs.py": { + "a": 31, + "d": 12 + }, + "mne/utils/tests/test_check.py": { + "a": 43, + "d": 7 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 121, + "d": 138 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13403.json b/doc/sphinxext/prs/13403.json new file mode 100644 index 00000000000..38c4ee33bab --- /dev/null +++ b/doc/sphinxext/prs/13403.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "4b6004a90a1210f852210898df5504b029dbedf0", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12847.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13332.json": { + "a": 79, + "d": 0 + }, + "doc/sphinxext/prs/13357.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13358.json": { + "a": 135, + "d": 0 + }, + "doc/sphinxext/prs/13360.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13361.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13365.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13367.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/13368.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13369.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13371.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/13373.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13376.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13378.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13380.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13381.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13385.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13387.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13389.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13391.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/13393.json": { + "a": 79, + "d": 0 + }, + "doc/sphinxext/prs/13394.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13396.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13400.json": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13404.json b/doc/sphinxext/prs/13404.json new file mode 100644 index 00000000000..e122be61b75 --- /dev/null +++ b/doc/sphinxext/prs/13404.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce79aab062854c11349906b1ff2b11668b0b01a5", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13405.json b/doc/sphinxext/prs/13405.json new file mode 100644 index 00000000000..54219b7ece0 --- /dev/null +++ b/doc/sphinxext/prs/13405.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "07a63fdb39db2c37e02d717c725129ea7aea1e4e", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13407.json b/doc/sphinxext/prs/13407.json new file mode 100644 index 00000000000..b55114b15ab --- /dev/null +++ b/doc/sphinxext/prs/13407.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "bb8d7609ceca60154a77ad43d29f121e08e2585e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/dev/13407.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 0, + "d": 2 + }, + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 0, + "d": 2 + }, + "mne/_ola.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 3, + "d": 3 + }, + "mne/conftest.py": { + "a": 10, + "d": 5 + }, + "mne/dipole.py": { + "a": 12, + "d": 10 + }, + "mne/forward/_compute_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 111, + "d": 32 + }, + "mne/forward/forward.py": { + "a": 6, + "d": 21 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 79, + "d": 5 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 11 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 7, + "d": 6 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 7 + }, + "mne/source_space/_source_space.py": { + "a": 42, + "d": 24 + }, + "mne/surface.py": { + "a": 53, + "d": 5 + }, + "mne/tests/test_dipole.py": { + "a": 12, + "d": 5 + }, + "mne/tests/test_morph.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 3 + }, + "tutorials/forward/30_forward.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13409.json b/doc/sphinxext/prs/13409.json new file mode 100644 index 00000000000..18bbf9a4baa --- /dev/null +++ b/doc/sphinxext/prs/13409.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "0e6a17804f536ae1a38076a1a8a5b05974c95d7c", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/autofix.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/credit.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/release.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 6, + "d": 1 + }, + "doc/sphinxext/related_software.py": { + "a": 4, + "d": 0 + }, + "mne/_ola.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1341.json b/doc/sphinxext/prs/1341.json new file mode 100644 index 00000000000..f27164724ff --- /dev/null +++ b/doc/sphinxext/prs/1341.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "4406e6ab6a731b444ed4fe04cf5fd4f2eca4ff81", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 5, + "d": 3 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_epochs.py": { + "a": 1, + "d": 2 + }, + "examples/plot_read_evoked.py": { + "a": 6, + "d": 3 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 47, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 5, + "d": 0 + }, + "mne/viz.py": { + "a": 152, + "d": 60 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13410.json b/doc/sphinxext/prs/13410.json new file mode 100644 index 00000000000..1f11d724763 --- /dev/null +++ b/doc/sphinxext/prs/13410.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e3339cb92d2656d6ac2a5304811cb8d58e843405", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13411.json b/doc/sphinxext/prs/13411.json new file mode 100644 index 00000000000..0507f5f41c7 --- /dev/null +++ b/doc/sphinxext/prs/13411.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1d3501ece2a74057bdc788a804d0ebcc7033c954", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13412.json b/doc/sphinxext/prs/13412.json new file mode 100644 index 00000000000..89a8e19a26c --- /dev/null +++ b/doc/sphinxext/prs/13412.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a46357c0fa1c2723e43e7b107f129f155c6ae0e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/preprocessing/_regress.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13413.json b/doc/sphinxext/prs/13413.json new file mode 100644 index 00000000000..745b48efd61 --- /dev/null +++ b/doc/sphinxext/prs/13413.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9ee6e52e49e669e32d12991d20b090ffc40f2b4b", + "authors": [ + { + "n": "Christodoulos Kechris", + "e": "christodouloskech@gmail.com" + } + ], + "changes": { + "doc/sphinxext/related_software.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13416.json b/doc/sphinxext/prs/13416.json new file mode 100644 index 00000000000..46c7cefadb5 --- /dev/null +++ b/doc/sphinxext/prs/13416.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a8e2fe8f0245abeb1276d434b333a4d1fd4d420e", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 19, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1342.json b/doc/sphinxext/prs/1342.json new file mode 100644 index 00000000000..b8d6d4003fd --- /dev/null +++ b/doc/sphinxext/prs/1342.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d93f39b3cf8fb81aaa641943e56f51b76dacce81", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13420.json b/doc/sphinxext/prs/13420.json new file mode 100644 index 00000000000..4e00efe7d62 --- /dev/null +++ b/doc/sphinxext/prs/13420.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "662fb510097e38667a342e41fa525330154c5f74", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13421.json b/doc/sphinxext/prs/13421.json new file mode 100644 index 00000000000..cc10cbda0be --- /dev/null +++ b/doc/sphinxext/prs/13421.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "52234feffb2d46ebcc7d204697ec9bec15fd6d75", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 1, + "d": 3 + }, + "tools/github_actions_dependencies.sh": { + "a": 0, + "d": 4 + }, + "tools/github_actions_env_vars.sh": { + "a": 1, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13423.json b/doc/sphinxext/prs/13423.json new file mode 100644 index 00000000000..6172e1cda7c --- /dev/null +++ b/doc/sphinxext/prs/13423.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ae1d4ae9686c91a195b7f83b57fdc60124b219aa", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/v1.10.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13424.json b/doc/sphinxext/prs/13424.json new file mode 100644 index 00000000000..1603a6a6819 --- /dev/null +++ b/doc/sphinxext/prs/13424.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "523f221bbb451c03efbfcba8faa7a3261aafeaf9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13426.json b/doc/sphinxext/prs/13426.json new file mode 100644 index 00000000000..3edd7d16b03 --- /dev/null +++ b/doc/sphinxext/prs/13426.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "08ef1c7a8d5ef8086a7e3ffda94d60e5a1263055", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 9, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 8, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 1 + }, + "tools/install_pre_requirements.sh": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13427.json b/doc/sphinxext/prs/13427.json new file mode 100644 index 00000000000..88c1afda391 --- /dev/null +++ b/doc/sphinxext/prs/13427.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "936bfae75fc4604443e5dc3e16c095cd9bd612a2", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13428.json b/doc/sphinxext/prs/13428.json new file mode 100644 index 00000000000..a8177bc04be --- /dev/null +++ b/doc/sphinxext/prs/13428.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3e29187ce3a2a63b4497dc83d30869ad33ecd1be", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13428.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/export/_eeglab.py": { + "a": 8, + "d": 0 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13431.json b/doc/sphinxext/prs/13431.json new file mode 100644 index 00000000000..f8fb6f633f9 --- /dev/null +++ b/doc/sphinxext/prs/13431.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "34e2743d7f1a1684f6359520392fd437bf2cd92f", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 13, + "d": 7 + }, + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + }, + "doc/sphinxext/related_software.py": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 5, + "d": 0 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 0 + }, + "tools/hooks/update_environment_file.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13432.json b/doc/sphinxext/prs/13432.json new file mode 100644 index 00000000000..a1f9475b5c1 --- /dev/null +++ b/doc/sphinxext/prs/13432.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "51b6a5c5fe711d10e959f1669960b4506cb4ad6e", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/13350.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/13375.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13395.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13398.json": { + "a": 75, + "d": 0 + }, + "doc/sphinxext/prs/13403.json": { + "a": 107, + "d": 0 + }, + "doc/sphinxext/prs/13404.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13405.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13407.json": { + "a": 107, + "d": 0 + }, + "doc/sphinxext/prs/13409.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/13410.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13411.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13412.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13413.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13416.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/13420.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/13423.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13424.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13426.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/13427.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/13428.json": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13434.json b/doc/sphinxext/prs/13434.json new file mode 100644 index 00000000000..3995a187bc2 --- /dev/null +++ b/doc/sphinxext/prs/13434.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4fdde3a1cc87c66263d9b0c663d6a81abfcd89b5", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 7, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13435.json b/doc/sphinxext/prs/13435.json new file mode 100644 index 00000000000..fba54c8e7a7 --- /dev/null +++ b/doc/sphinxext/prs/13435.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "61bc8b8755da982491d4cebf940bbde160b98e1a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13435.newfeature.rst": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/export/_edf_bdf.py": { + "a": 75, + "d": 34 + }, + "mne/export/_export.py": { + "a": 22, + "d": 16 + }, + "mne/export/tests/test_export.py": { + "a": 48, + "d": 0 + }, + "pyproject.toml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13436.json b/doc/sphinxext/prs/13436.json new file mode 100644 index 00000000000..e42a8e1f9e8 --- /dev/null +++ b/doc/sphinxext/prs/13436.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f7df4e5cc88bfb6795932ebbd7a3e72fdaff39d1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/check_changelog.yml": { + "a": 4, + "d": 1 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 3, + "d": 3 + }, + "mne/surface.py": { + "a": 3, + "d": 8 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13437.json b/doc/sphinxext/prs/13437.json new file mode 100644 index 00000000000..d522701eb73 --- /dev/null +++ b/doc/sphinxext/prs/13437.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "49b09fb6e0f6353f49239ee7a35a08b955c496ac", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/export/_edf_bdf.py": { + "a": 75, + "d": 34 + }, + "mne/export/_export.py": { + "a": 22, + "d": 16 + }, + "mne/export/tests/test_export.py": { + "a": 48, + "d": 0 + }, + "pyproject.toml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13439.json b/doc/sphinxext/prs/13439.json new file mode 100644 index 00000000000..c1b249d43df --- /dev/null +++ b/doc/sphinxext/prs/13439.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "adc01cdc40cdaaf00d39108923dce352bd3091b2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13440.json b/doc/sphinxext/prs/13440.json new file mode 100644 index 00000000000..480fdfa9bbb --- /dev/null +++ b/doc/sphinxext/prs/13440.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "231f2247f873094ef225ab0b364fcc073386e397", + "authors": [ + { + "n": "Johannes Herforth", + "e": "johannes@herforth.net" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/changes/dev/13440.bugfix.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + }, + "mne/datasets/_fetch.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/_phantom/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 26, + "d": 26 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "tools/get_minimal_commands.sh": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13442.json b/doc/sphinxext/prs/13442.json new file mode 100644 index 00000000000..ebc5cb550e3 --- /dev/null +++ b/doc/sphinxext/prs/13442.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f3c00cc7996b48f3c8166269c8d0c1d5b772edea", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/codeql-analysis.yml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13443.json b/doc/sphinxext/prs/13443.json new file mode 100644 index 00000000000..977425f1466 --- /dev/null +++ b/doc/sphinxext/prs/13443.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "06d09def3a2b9cbbae2b0a4af77b4ba0d81c228a", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13445.json b/doc/sphinxext/prs/13445.json new file mode 100644 index 00000000000..02c75b4862f --- /dev/null +++ b/doc/sphinxext/prs/13445.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7fb19a1a7dbf4b8d710dd71a88a1b8ee73e1e100", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/dev/13445.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/install/installers.rst": { + "a": 7, + "d": 7 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 1, + "d": 10 + }, + "mne/gui/tests/test_coreg.py": { + "a": 1, + "d": 7 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 46, + "d": 12 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13446.json b/doc/sphinxext/prs/13446.json new file mode 100644 index 00000000000..5a9318f44a6 --- /dev/null +++ b/doc/sphinxext/prs/13446.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3cfac64bbc778b0c52dcd76f4b4799cc59226fae", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13448.json b/doc/sphinxext/prs/13448.json new file mode 100644 index 00000000000..04779aef613 --- /dev/null +++ b/doc/sphinxext/prs/13448.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "aa2d977c794a6e25d3b0855e523b69141d6416ad", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/dev/13448.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 10, + "d": 15 + }, + "mne/io/nihon/nihon.py": { + "a": 5, + "d": 3 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1345.json b/doc/sphinxext/prs/1345.json new file mode 100644 index 00000000000..d4eaffe777a --- /dev/null +++ b/doc/sphinxext/prs/1345.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "41cfed03b8d17611cc3cb4671a9ed134d0cc1a9b", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 40, + "d": 27 + }, + "doc/source/whats_new.rst": { + "a": 6, + "d": 1 + }, + "mne/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/channels.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 12, + "d": 0 + }, + "mne/evoked.py": { + "a": 60, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/array/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 6, + "d": 74 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 72, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 49, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13451.json b/doc/sphinxext/prs/13451.json new file mode 100644 index 00000000000..1394676a859 --- /dev/null +++ b/doc/sphinxext/prs/13451.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "54644a5d36027f9ee5b672a15e998768b39b745c", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/spec_zero.yml": { + "a": 62, + "d": 0 + }, + ".yamllint.yml": { + "a": 2, + "d": 0 + }, + "doc/changes/dev/13451.other.rst": { + "a": 1, + "d": 0 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/dev/spec_zero_update_versions.py": { + "a": 258, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13452.json b/doc/sphinxext/prs/13452.json new file mode 100644 index 00000000000..01a919c507b --- /dev/null +++ b/doc/sphinxext/prs/13452.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "4fb5442b12d38427c4f1e818e5d5944e59323ec3", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 3 + }, + "doc/changes/dev/13452.other.rst": { + "a": 3, + "d": 0 + }, + "doc/development/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/sphinxext/related_software.txt": { + "a": 37, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 5, + "d": 1 + }, + "pyproject.toml": { + "a": 68, + "d": 67 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/circleci_dependencies.sh": { + "a": 7, + "d": 12 + }, + "tools/github_actions_dependencies.sh": { + "a": 12, + "d": 5 + }, + "tools/hooks/update_environment_file.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13453.json b/doc/sphinxext/prs/13453.json new file mode 100644 index 00000000000..7d51a84a459 --- /dev/null +++ b/doc/sphinxext/prs/13453.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "447a3e279e72fa9eac01c03b0f8dd20eadc87dff", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13456.json b/doc/sphinxext/prs/13456.json new file mode 100644 index 00000000000..1779fe8fef4 --- /dev/null +++ b/doc/sphinxext/prs/13456.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e6db33a24114d54d7b0baf4c6f2e5a8368acafd5", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/dev/13456.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 7, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 24, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 9, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13458.json b/doc/sphinxext/prs/13458.json new file mode 100644 index 00000000000..7f4af2859b6 --- /dev/null +++ b/doc/sphinxext/prs/13458.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "147ff5995f91a0a2b1dcce15b16980c2a03143a6", + "authors": [ + { + "n": null, + "e": "myd7349@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/dev/13458.apichange.rst": { + "a": 1, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 17, + "d": 14 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13459.json b/doc/sphinxext/prs/13459.json new file mode 100644 index 00000000000..d21ccc9a4e6 --- /dev/null +++ b/doc/sphinxext/prs/13459.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "2bc159e6210334af85edcfc179ebaf825bab5d50", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/credit.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/spec_zero.yml": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 1 + }, + "pyproject.toml": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13460.json b/doc/sphinxext/prs/13460.json new file mode 100644 index 00000000000..475522df4ed --- /dev/null +++ b/doc/sphinxext/prs/13460.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "65a717ced7b93d40cde68b13d037d0bba8690ba0", + "authors": [ + { + "n": "Johannes Herforth", + "e": "johannes@herforth.net" + }, + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/dev/13460.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 19, + "d": 17 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 5, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 9, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 8, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 16, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13462.json b/doc/sphinxext/prs/13462.json new file mode 100644 index 00000000000..2617a9e7b75 --- /dev/null +++ b/doc/sphinxext/prs/13462.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8b1a914d28b164b3ebe21395a9b6dea950147099", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 4, + "d": 0 + }, + "doc/overview/people.rst": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13463.json b/doc/sphinxext/prs/13463.json new file mode 100644 index 00000000000..0bbee61cefa --- /dev/null +++ b/doc/sphinxext/prs/13463.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e50d56542864e727705faf71501a2789bd59cc6f", + "authors": [ + { + "n": "Michael Straube", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13463.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 11, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13464.json b/doc/sphinxext/prs/13464.json new file mode 100644 index 00000000000..a7d8d457db3 --- /dev/null +++ b/doc/sphinxext/prs/13464.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6ec537f525050db438256be0254ee3bff707463f", + "authors": [ + { + "n": "dependabot[bot]", + "e": "49699333+dependabot[bot]@users.noreply.github.com" + } + ], + "changes": { + ".github/workflows/release.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13465.json b/doc/sphinxext/prs/13465.json new file mode 100644 index 00000000000..af7769ae5ed --- /dev/null +++ b/doc/sphinxext/prs/13465.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "610ebe5193457909485db37dcc0f750c0e6ecaaf", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13466.json b/doc/sphinxext/prs/13466.json new file mode 100644 index 00000000000..b55afb98fbb --- /dev/null +++ b/doc/sphinxext/prs/13466.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3d7722fecf2bf1d99e1a526654087912836d839a", + "authors": [ + { + "n": "Michael Straube", + "e": null + } + ], + "changes": { + "doc/changes/dev/13466.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/reference.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13469.json b/doc/sphinxext/prs/13469.json new file mode 100644 index 00000000000..d15a511194a --- /dev/null +++ b/doc/sphinxext/prs/13469.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f4a3f3f46477a8fda6b1adff0a8094faa670bec0", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/dev/13469.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/io/eyelink/_utils.py": { + "a": 10, + "d": 3 + }, + "mne/preprocessing/eyetracking/tests/test_calibration.py": { + "a": 34, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13470.json b/doc/sphinxext/prs/13470.json new file mode 100644 index 00000000000..b2634e21f57 --- /dev/null +++ b/doc/sphinxext/prs/13470.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f09b3da1a23757b76fd0adfdb9c6ed29db14af29", + "authors": [ + { + "n": "Michael Straube", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/dev/13470.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/13475.json b/doc/sphinxext/prs/13475.json new file mode 100644 index 00000000000..fc952399543 --- /dev/null +++ b/doc/sphinxext/prs/13475.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1815534a3252faf159594ae5d9480b529ab9fb3d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".mailmap": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1350.json b/doc/sphinxext/prs/1350.json new file mode 100644 index 00000000000..bfa7de4db83 --- /dev/null +++ b/doc/sphinxext/prs/1350.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7c2cc67a1ea22e169d2528a920fd8054123dbab3", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/decoding/classifier.py": { + "a": 13, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 11, + "d": 1 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 12, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1357.json b/doc/sphinxext/prs/1357.json new file mode 100644 index 00000000000..eaa008c1e9b --- /dev/null +++ b/doc/sphinxext/prs/1357.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "200c1f6a1b42c68ce8eb39eb86c64ac8c151610a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 45, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1359.json b/doc/sphinxext/prs/1359.json new file mode 100644 index 00000000000..c6cc86a49b3 --- /dev/null +++ b/doc/sphinxext/prs/1359.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "56f57d039c1c230c40978644e3406def2b547ae4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 13, + "d": 11 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ctps_.py": { + "a": 169, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 53, + "d": 16 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 84, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1361.json b/doc/sphinxext/prs/1361.json new file mode 100644 index 00000000000..c8f127523ce --- /dev/null +++ b/doc/sphinxext/prs/1361.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f13274cb3f05904b4428c2e0b4124cf6439ea495", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1363.json b/doc/sphinxext/prs/1363.json new file mode 100644 index 00000000000..34dc7b0166d --- /dev/null +++ b/doc/sphinxext/prs/1363.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "986317fc85970a7b2f9b6d711ad2e3ab0aa030a3", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 9, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1366.json b/doc/sphinxext/prs/1366.json new file mode 100644 index 00000000000..72e71b8b852 --- /dev/null +++ b/doc/sphinxext/prs/1366.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3aec0ec92ee3e92b1f72afafaf08d63190354ff6", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 6, + "d": 4 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 39, + "d": 25 + }, + "mne/tests/test_label.py": { + "a": 45, + "d": 42 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1367.json b/doc/sphinxext/prs/1367.json new file mode 100644 index 00000000000..79cb590d081 --- /dev/null +++ b/doc/sphinxext/prs/1367.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "b98f90b6ca949547d0c7b83819e1aabffe75e9bc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 5, + "d": 8 + }, + "mne/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 6, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 14, + "d": 16 + }, + "mne/io/tree.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_utils.py": { + "a": 2, + "d": 4 + }, + "mne/utils.py": { + "a": 11, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1368.json b/doc/sphinxext/prs/1368.json new file mode 100644 index 00000000000..074bcac1f9e --- /dev/null +++ b/doc/sphinxext/prs/1368.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a0f3548ebac093a403d616dabef711bee30ed288", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 83, + "d": 0 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 102, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 431, + "d": 0 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 79, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1371.json b/doc/sphinxext/prs/1371.json new file mode 100644 index 00000000000..ff8b399858a --- /dev/null +++ b/doc/sphinxext/prs/1371.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2dea04cbed2c733689c09b9202c50abc0a475907", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1375.json b/doc/sphinxext/prs/1375.json new file mode 100644 index 00000000000..92e980d1ac9 --- /dev/null +++ b/doc/sphinxext/prs/1375.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dfadeef3dbc3ae4df7975aee75e7c47f5fed018c", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1376.json b/doc/sphinxext/prs/1376.json new file mode 100644 index 00000000000..3e4c11ddf04 --- /dev/null +++ b/doc/sphinxext/prs/1376.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2df9ead7c1875251b63ee668a303e317c82bc143", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1378.json b/doc/sphinxext/prs/1378.json new file mode 100644 index 00000000000..0e220de6e62 --- /dev/null +++ b/doc/sphinxext/prs/1378.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8211e58a844bf6ac496f5ae63b8f3b85a591ab87", + "authors": [ + { + "n": "Praveen Sripad", + "e": "pravsripad@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1380.json b/doc/sphinxext/prs/1380.json new file mode 100644 index 00000000000..e5e41ab3087 --- /dev/null +++ b/doc/sphinxext/prs/1380.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "b1ecaab866d65754e6bb94f42aebacf6b149ecc9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 4, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 8, + "d": 8 + }, + "mne/forward/forward.py": { + "a": 45, + "d": 8 + }, + "mne/forward/tests/test_forward.py": { + "a": 14, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 6, + "d": 1 + }, + "mne/io/pick.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1381.json b/doc/sphinxext/prs/1381.json new file mode 100644 index 00000000000..e1488d9041f --- /dev/null +++ b/doc/sphinxext/prs/1381.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3b9b27169eb16a15c3d95831684b568be2e1f712", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 125, + "d": 45 + }, + "mne/io/constants.py": { + "a": 11, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 72, + "d": 15 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 26, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1387.json b/doc/sphinxext/prs/1387.json new file mode 100644 index 00000000000..eb8dadec5a3 --- /dev/null +++ b/doc/sphinxext/prs/1387.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7c53bc0f86dacb1ad6267115ad34e137d19f1eb6", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 20, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/139.json b/doc/sphinxext/prs/139.json new file mode 100644 index 00000000000..57a1d591ee2 --- /dev/null +++ b/doc/sphinxext/prs/139.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "87fb7b122fefc0752a296f926e43e95f286bdcd8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/export/plot_epochs_as_data_frame.py": { + "a": 40, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1390.json b/doc/sphinxext/prs/1390.json new file mode 100644 index 00000000000..282df088966 --- /dev/null +++ b/doc/sphinxext/prs/1390.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "f40a08e34cceb2ab31a8099293e0969f618d1e0d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "MANIFEST.in": { + "a": 4, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 22, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 6, + "d": 6 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/montages/10-5-System_Mastoids_EGI129.csd": { + "a": 467, + "d": 0 + }, + "mne/montages/EGI_256.csd": { + "a": 258, + "d": 0 + }, + "mne/montages/GSN-HydroCel-128.sfp": { + "a": 131, + "d": 0 + }, + "mne/montages/GSN-HydroCel-129.sfp": { + "a": 132, + "d": 0 + }, + "mne/montages/GSN-HydroCel-256.sfp": { + "a": 259, + "d": 0 + }, + "mne/montages/GSN-HydroCel-257.sfp": { + "a": 260, + "d": 0 + }, + "mne/montages/GSN-HydroCel-32.sfp": { + "a": 36, + "d": 0 + }, + "mne/montages/GSN-HydroCel-64_1.0.sfp": { + "a": 67, + "d": 0 + }, + "mne/montages/GSN-HydroCel-65_1.0.sfp": { + "a": 68, + "d": 0 + }, + "mne/montages/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/montages/easycap-M1.txt": { + "a": 75, + "d": 0 + }, + "mne/montages/easycap-M10.txt": { + "a": 62, + "d": 0 + }, + "mne/montages/montage.py": { + "a": 202, + "d": 0 + }, + "mne/montages/standard_1005.elc": { + "a": 698, + "d": 0 + }, + "mne/montages/standard_1020.elc": { + "a": 200, + "d": 0 + }, + "mne/montages/standard_alphabetic.elc": { + "a": 142, + "d": 0 + }, + "mne/montages/standard_postfixed.elc": { + "a": 212, + "d": 0 + }, + "mne/montages/standard_prefixed.elc": { + "a": 160, + "d": 0 + }, + "mne/montages/standard_primed.elc": { + "a": 212, + "d": 0 + }, + "mne/montages/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/montages/tests/test_montage.py": { + "a": 69, + "d": 0 + }, + "mne/utils.py": { + "a": 11, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/montage.py": { + "a": 38, + "d": 0 + }, + "mne/viz/tests/test_montage.py": { + "a": 16, + "d": 0 + }, + "setup.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1392.json b/doc/sphinxext/prs/1392.json new file mode 100644 index 00000000000..9ca34d20ce4 --- /dev/null +++ b/doc/sphinxext/prs/1392.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "390143b8c2188442837088649de14c6c022c3073", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1396.json b/doc/sphinxext/prs/1396.json new file mode 100644 index 00000000000..1401c0aaf81 --- /dev/null +++ b/doc/sphinxext/prs/1396.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d34e4a227656a733425b0961cf8340c892eae367", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_kit2fiff_gui.py": { + "a": 26, + "d": 15 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 24, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1398.json b/doc/sphinxext/prs/1398.json new file mode 100644 index 00000000000..f0fdf481267 --- /dev/null +++ b/doc/sphinxext/prs/1398.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a08cf6f4a643489067a691641f4210983c8653c9", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 27, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 0 + }, + "mne/viz.py": { + "a": 48, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1399.json b/doc/sphinxext/prs/1399.json new file mode 100644 index 00000000000..b4eaf64bccd --- /dev/null +++ b/doc/sphinxext/prs/1399.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1dbda634b22647f1ec86fc9946a31385024d7aab", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 7, + "d": 5 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 39, + "d": 21 + }, + "mne/preprocessing/ica.py": { + "a": 54, + "d": 39 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/14.json b/doc/sphinxext/prs/14.json new file mode 100644 index 00000000000..ec595ad56e8 --- /dev/null +++ b/doc/sphinxext/prs/14.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "791cca17b426088dc5732ec5be71933912cc0942", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 163, + "d": 8 + }, + "mne/tests/test_source_estimate.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/140.json b/doc/sphinxext/prs/140.json new file mode 100644 index 00000000000..fddb1a0fc4a --- /dev/null +++ b/doc/sphinxext/prs/140.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d895111057a43047b707e22d0078e89df27c2923", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1402.json b/doc/sphinxext/prs/1402.json new file mode 100644 index 00000000000..a5bc6869c8d --- /dev/null +++ b/doc/sphinxext/prs/1402.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "c525b255ada6ce0f263c89988e04adb0f65625d3", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_evoked_topomap.py": { + "a": 4, + "d": 5 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 0, + "d": 3 + }, + "mne/evoked.py": { + "a": 18, + "d": 11 + }, + "mne/layouts/layout.py": { + "a": 23, + "d": 29 + }, + "mne/preprocessing/ica.py": { + "a": 12, + "d": 5 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 2 + }, + "mne/viz.py": { + "a": 267, + "d": 61 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1405.json b/doc/sphinxext/prs/1405.json new file mode 100644 index 00000000000..09d7988bc57 --- /dev/null +++ b/doc/sphinxext/prs/1405.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1a6e5159fe0610acb5320f7858270aaa75b3371d", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/evoked.py": { + "a": 8, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 10, + "d": 3 + }, + "mne/tests/test_viz.py": { + "a": 3, + "d": 21 + }, + "mne/viz.py": { + "a": 21, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1406.json b/doc/sphinxext/prs/1406.json new file mode 100644 index 00000000000..22c2069f3e0 --- /dev/null +++ b/doc/sphinxext/prs/1406.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "88ad4ce209b4ab3ea249315fc18bfc0f1879ec7d", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 9, + "d": 4 + }, + "mne/viz.py": { + "a": 13, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1408.json b/doc/sphinxext/prs/1408.json new file mode 100644 index 00000000000..31f47e8a443 --- /dev/null +++ b/doc/sphinxext/prs/1408.json @@ -0,0 +1,651 @@ +{ + "merge_commit_sha": "8e50a3d326d54b8ead303f9a4b621dc1b96d44ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_sensors.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 2, + "d": 2 + }, + "examples/export/plot_epochs_as_data_frame.py": { + "a": 1, + "d": 1 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 2, + "d": 2 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_morph_data.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/plot_decimate_head_surface.py": { + "a": 1, + "d": 1 + }, + "examples/plot_define_target_events.py": { + "a": 1, + "d": 1 + }, + "examples/plot_estimate_covariance_matrix_baseline.py": { + "a": 1, + "d": 1 + }, + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 1, + "d": 1 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 2, + "d": 2 + }, + "examples/plot_evoked_topomap_delayed_ssp.py": { + "a": 2, + "d": 2 + }, + "examples/plot_evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_meg_eeg_fields_3d.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_forward.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_noise_covariance_matrix.py": { + "a": 1, + "d": 1 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/plot_ssp_projs_sensitivity_map.py": { + "a": 1, + "d": 1 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 2, + "d": 2 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 2, + "d": 2 + }, + "examples/plot_topo_customized.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topography.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "examples/read_events.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/baseline.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/channels.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/classifier.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/read.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/diff.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/matrix.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_compensator.py": { + "a": 1, + "d": 1 + }, + "mne/io/tree.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/layouts/layout.py": { + "a": 2, + "d": 2 + }, + "mne/layouts/tests/test_layout.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/misc.py": { + "a": 1, + "d": 1 + }, + "mne/parallel.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/mockclient.py": { + "a": 2, + "d": 2 + }, + "mne/selection.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/stats/multi_comp.py": { + "a": 1, + "d": 1 + }, + "mne/stats/parametric.py": { + "a": 2, + "d": 2 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_fixes.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz.py": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1409.json b/doc/sphinxext/prs/1409.json new file mode 100644 index 00000000000..66cad7e871f --- /dev/null +++ b/doc/sphinxext/prs/1409.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ac947d0723c093c4945bee92cfe7793940016643", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 23, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1411.json b/doc/sphinxext/prs/1411.json new file mode 100644 index 00000000000..a6f107eb126 --- /dev/null +++ b/doc/sphinxext/prs/1411.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "585e5cbe08f3f3b0eb4588a292f734f2a911c1c7", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1412.json b/doc/sphinxext/prs/1412.json new file mode 100644 index 00000000000..72fdd63491c --- /dev/null +++ b/doc/sphinxext/prs/1412.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "14e9a0f015dd627b48416a6e0ce277696c1aed70", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 4, + "d": 0 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1413.json b/doc/sphinxext/prs/1413.json new file mode 100644 index 00000000000..d783b9a5a2a --- /dev/null +++ b/doc/sphinxext/prs/1413.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "92bd8c7165d127cedf5608ec13a17ccdb26a43c6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1415.json b/doc/sphinxext/prs/1415.json new file mode 100644 index 00000000000..0eb8ad5bb73 --- /dev/null +++ b/doc/sphinxext/prs/1415.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "70b9035e162a4a9dd89585d8d1d752b7f18f65f4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 0 + }, + "mne/tests/test_utils.py": { + "a": 3, + "d": 1 + }, + "mne/utils.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1418.json b/doc/sphinxext/prs/1418.json new file mode 100644 index 00000000000..343679133d7 --- /dev/null +++ b/doc/sphinxext/prs/1418.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3ceb71122e7fbae757686cb148ac16995af1f950", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/viz.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1419.json b/doc/sphinxext/prs/1419.json new file mode 100644 index 00000000000..e412b1a7621 --- /dev/null +++ b/doc/sphinxext/prs/1419.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cf93bc65a6861a078539fde0a7e9847e7ea21548", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/142.json b/doc/sphinxext/prs/142.json new file mode 100644 index 00000000000..3266ccb49aa --- /dev/null +++ b/doc/sphinxext/prs/142.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "6845038f3e7c0e080986ed83dda618c030a7b08c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 2 + }, + "doc/source/whats_new.rst": { + "a": 12, + "d": 0 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 64, + "d": 0 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 34, + "d": 0 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 83, + "d": 0 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 56, + "d": 4 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/evoked.py": { + "a": 22, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 93, + "d": 1 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 17, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 51, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/proj.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 26, + "d": 0 + }, + "mne/time_frequency/ar.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1420.json b/doc/sphinxext/prs/1420.json new file mode 100644 index 00000000000..756b4ed91a8 --- /dev/null +++ b/doc/sphinxext/prs/1420.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "1f096f76619b6f63316bbd609756337e9d3a7ae8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 0, + "d": 676 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/viz.py": { + "a": 0, + "d": 5223 + }, + "mne/viz/_3d.py": { + "a": 635, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 19, + "d": 0 + }, + "mne/viz/circle.py": { + "a": 408, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 449, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 292, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 484, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 506, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 610, + "d": 0 + }, + "mne/viz/tests/__init__py": { + "a": 0, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 117, + "d": 0 + }, + "mne/viz/tests/test_circle.py": { + "a": 94, + "d": 0 + }, + "mne/viz/tests/test_epochs.py": { + "a": 118, + "d": 0 + }, + "mne/viz/tests/test_evoked.py": { + "a": 107, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 149, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 115, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 111, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 138, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 150, + "d": 0 + }, + "mne/viz/tests/test_utils.py": { + "a": 28, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 723, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 1029, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 365, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1422.json b/doc/sphinxext/prs/1422.json new file mode 100644 index 00000000000..0c13b1d621d --- /dev/null +++ b/doc/sphinxext/prs/1422.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d13681dfc8961b96654a17ee7e4e2584c1346260", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "examples/realtime/ftclient_rt_average.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 14, + "d": 7 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 68, + "d": 0 + }, + "mne/utils.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1424.json b/doc/sphinxext/prs/1424.json new file mode 100644 index 00000000000..197a2d5e910 --- /dev/null +++ b/doc/sphinxext/prs/1424.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "05a38415479692438a526f87ea9b965b1c7a356d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/surface.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1425.json b/doc/sphinxext/prs/1425.json new file mode 100644 index 00000000000..99f571f9f25 --- /dev/null +++ b/doc/sphinxext/prs/1425.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "3610ea9df2fe7cce4c29a8a257f2b2161f6281a3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 6, + "d": 3 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 5, + "d": 2 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 4, + "d": 4 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 3, + "d": 3 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 5, + "d": 2 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 6, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1426.json b/doc/sphinxext/prs/1426.json new file mode 100644 index 00000000000..d2d00aeb567 --- /dev/null +++ b/doc/sphinxext/prs/1426.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "36e7244c90548c503f34fc36f2d9b57d0985025c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1427.json b/doc/sphinxext/prs/1427.json new file mode 100644 index 00000000000..32f1ac8920b --- /dev/null +++ b/doc/sphinxext/prs/1427.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5d71237763b800df6408f118a63a43e9c6975c7f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 63, + "d": 16 + }, + "mne/tests/test_source_space.py": { + "a": 13, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/143.json b/doc/sphinxext/prs/143.json new file mode 100644 index 00000000000..2fd8b664e18 --- /dev/null +++ b/doc/sphinxext/prs/143.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1e2ddcff56ddfe887800d22000b6cef56b161e8c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 47, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1432.json b/doc/sphinxext/prs/1432.json new file mode 100644 index 00000000000..4a1aa22d21f --- /dev/null +++ b/doc/sphinxext/prs/1432.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5c0a828f030992729fff7b784b133b849bc0413c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1433.json b/doc/sphinxext/prs/1433.json new file mode 100644 index 00000000000..3a350897d6e --- /dev/null +++ b/doc/sphinxext/prs/1433.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "183b79d9671feb4fb821543a6c701c1778eec3a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "mne/_hdf5.py": { + "a": 156, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 27, + "d": 6 + }, + "mne/tests/test_hdf5.py": { + "a": 26, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 35, + "d": 15 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1435.json b/doc/sphinxext/prs/1435.json new file mode 100644 index 00000000000..b6f21255b1b --- /dev/null +++ b/doc/sphinxext/prs/1435.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f64d1a6ba5caba88cd0eb800f897aff6d7e83aaa", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1437.json b/doc/sphinxext/prs/1437.json new file mode 100644 index 00000000000..1890aa2a84c --- /dev/null +++ b/doc/sphinxext/prs/1437.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "268cb70c0e2340727d62ce5e9f6199dec6f53ecc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.cfg": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1438.json b/doc/sphinxext/prs/1438.json new file mode 100644 index 00000000000..f2f425c2957 --- /dev/null +++ b/doc/sphinxext/prs/1438.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e503ff689a826dfd0dcb68156a4afeeade478bc9", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1439.json b/doc/sphinxext/prs/1439.json new file mode 100644 index 00000000000..5fe9cb6f4bd --- /dev/null +++ b/doc/sphinxext/prs/1439.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4befe9eb1cf5ef2114eab8b173682737b153668a", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1440.json b/doc/sphinxext/prs/1440.json new file mode 100644 index 00000000000..4a488370f9b --- /dev/null +++ b/doc/sphinxext/prs/1440.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "66adf49f071059057cbea7bf7044f03b06467c57", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/commands/mne_report.py": { + "a": 4, + "d": 1 + }, + "mne/report.py": { + "a": 249, + "d": 194 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1441.json b/doc/sphinxext/prs/1441.json new file mode 100644 index 00000000000..efbc63e0144 --- /dev/null +++ b/doc/sphinxext/prs/1441.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "acc5d031a11679f162ec4d156fd65560d2c689a5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_hdf5.py": { + "a": 20, + "d": 9 + }, + "mne/io/array/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/tests/test_hdf5.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/__init__.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1445.json b/doc/sphinxext/prs/1445.json new file mode 100644 index 00000000000..20a3976f4f9 --- /dev/null +++ b/doc/sphinxext/prs/1445.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3e4a21ce7425aae03608fe936729bdff72bf03c2", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1447.json b/doc/sphinxext/prs/1447.json new file mode 100644 index 00000000000..8fc804027d5 --- /dev/null +++ b/doc/sphinxext/prs/1447.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "10249aa3e2ad1f428fb68fe15733e2017fdb0c1c", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1448.json b/doc/sphinxext/prs/1448.json new file mode 100644 index 00000000000..8fb8dd05e98 --- /dev/null +++ b/doc/sphinxext/prs/1448.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4bd5f69eb2943e44cd708ed5917d00858cb6e895", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 12, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1450.json b/doc/sphinxext/prs/1450.json new file mode 100644 index 00000000000..18b99dc9f3a --- /dev/null +++ b/doc/sphinxext/prs/1450.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a26113eaf8fbc16e96aa07bb0cb11db58127fb33", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_read_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1455.json b/doc/sphinxext/prs/1455.json new file mode 100644 index 00000000000..2c10c43e328 --- /dev/null +++ b/doc/sphinxext/prs/1455.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "dcd6eb2e2e9ceee34a34e78accd6261e6bf417f2", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_topo_customized.py": { + "a": 15, + "d": 10 + }, + "mne/time_frequency/psd.py": { + "a": 10, + "d": 8 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 3, + "d": 3 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 9, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1456.json b/doc/sphinxext/prs/1456.json new file mode 100644 index 00000000000..39860a3e279 --- /dev/null +++ b/doc/sphinxext/prs/1456.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "65acb03c71d72e207082750541fdecce2feb2812", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/export/plot_evoked_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 8, + "d": 8 + }, + "examples/plot_make_forward.py": { + "a": 17, + "d": 6 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_forward.py": { + "a": 1, + "d": 1 + }, + "examples/plot_ssp_projs_sensitivity_map.py": { + "a": 1, + "d": 1 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_customized.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1459.json b/doc/sphinxext/prs/1459.json new file mode 100644 index 00000000000..418d7048185 --- /dev/null +++ b/doc/sphinxext/prs/1459.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "5ea0d5bba127c462964fc94a8fedcd210b2ec39e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 2, + "d": 1 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 3, + "d": 2 + }, + "examples/plot_topo_customized.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 15, + "d": 9 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1460.json b/doc/sphinxext/prs/1460.json new file mode 100644 index 00000000000..1170c214573 --- /dev/null +++ b/doc/sphinxext/prs/1460.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aab2a2de13b99302f7204999f545109b1c507ddb", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1461.json b/doc/sphinxext/prs/1461.json new file mode 100644 index 00000000000..66e394b0761 --- /dev/null +++ b/doc/sphinxext/prs/1461.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f9f3c34595e86574cdd5f36f9f2cf32ed0a05d0b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1462.json b/doc/sphinxext/prs/1462.json new file mode 100644 index 00000000000..2d1c3cd639b --- /dev/null +++ b/doc/sphinxext/prs/1462.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "981912ce2984072e93f244213f4aa5b13fef3aca", + "authors": [ + { + "n": "Alan Leggitt", + "e": "leggitta3@gmail.com" + } + ], + "changes": { + "examples/plot_left_cerebellum_volume_source.py": { + "a": 10, + "d": 5 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 12, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 56, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 106, + "d": 9 + }, + "mne/source_space.py": { + "a": 352, + "d": 45 + }, + "mne/tests/test_source_estimate.py": { + "a": 26, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 52, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1465.json b/doc/sphinxext/prs/1465.json new file mode 100644 index 00000000000..ba03fc190f3 --- /dev/null +++ b/doc/sphinxext/prs/1465.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5a599d33ad271710c8eb6f5afce82e855d1ba176", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_ica.py": { + "a": 10, + "d": 7 + }, + "mne/source_estimate.py": { + "a": 6, + "d": 5 + }, + "mne/stats/tests/test_regression.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 9, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 36, + "d": 24 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1466.json b/doc/sphinxext/prs/1466.json new file mode 100644 index 00000000000..420197f8f94 --- /dev/null +++ b/doc/sphinxext/prs/1466.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4f6aa9c41e746a46d5f84ab203862a4f2497b8d2", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/mne-python.rst": { + "a": 1, + "d": 0 + }, + "doc/source/mne_report_tutorial.rst": { + "a": 87, + "d": 0 + }, + "mne/report.py": { + "a": 35, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1467.json b/doc/sphinxext/prs/1467.json new file mode 100644 index 00000000000..f027f881012 --- /dev/null +++ b/doc/sphinxext/prs/1467.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3591c2e8f05878432cd95169e653fc2b5d0e30a0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 4, + "d": 2 + }, + "mne/externals/tempita/__init__.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1468.json b/doc/sphinxext/prs/1468.json new file mode 100644 index 00000000000..2ac35fe96b5 --- /dev/null +++ b/doc/sphinxext/prs/1468.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "65eed5d13285cea4bac4b378d7e5844241896e9d", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/mne_report_tutorial.rst": { + "a": 36, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1469.json b/doc/sphinxext/prs/1469.json new file mode 100644 index 00000000000..3385ba55c21 --- /dev/null +++ b/doc/sphinxext/prs/1469.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e8d2a98e3623caa9d9bb8c3f2bfc94b548d7e263", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/mne_report_tutorial.rst": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/147.json b/doc/sphinxext/prs/147.json new file mode 100644 index 00000000000..a27f745eb97 --- /dev/null +++ b/doc/sphinxext/prs/147.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "273428798cd978f89e8a1761c420e15b77f99c38", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 7, + "d": 6 + }, + "mne/fiff/proj.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1470.json b/doc/sphinxext/prs/1470.json new file mode 100644 index 00000000000..536a11a6ac6 --- /dev/null +++ b/doc/sphinxext/prs/1470.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6d30fc9168e77a57fdd4371c975f6018be5393a6", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1478.json b/doc/sphinxext/prs/1478.json new file mode 100644 index 00000000000..b9c6898e467 --- /dev/null +++ b/doc/sphinxext/prs/1478.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "05c82acafc1aceae80d8b2c2b8804e5fe7a2cf63", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 21, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1479.json b/doc/sphinxext/prs/1479.json new file mode 100644 index 00000000000..7aab8b3ce89 --- /dev/null +++ b/doc/sphinxext/prs/1479.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7aca02a1279c98f72ca4f00981a90f07604c8334", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1481.json b/doc/sphinxext/prs/1481.json new file mode 100644 index 00000000000..1d8333ed517 --- /dev/null +++ b/doc/sphinxext/prs/1481.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "127fd59bb852b39cf7134cf249b47027506bc46c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 6, + "d": 3 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 6, + "d": 4 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 7, + "d": 4 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 8, + "d": 5 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 7, + "d": 9 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 5, + "d": 2 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 2, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 8, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1484.json b/doc/sphinxext/prs/1484.json new file mode 100644 index 00000000000..a0079ad1b16 --- /dev/null +++ b/doc/sphinxext/prs/1484.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a0bc13679b0af9796f16499ea7bebf63a2e864a7", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1485.json b/doc/sphinxext/prs/1485.json new file mode 100644 index 00000000000..dd40d4a5866 --- /dev/null +++ b/doc/sphinxext/prs/1485.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0b15b60cdadb8512761e20f98511b191dd8ae3fa", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1487.json b/doc/sphinxext/prs/1487.json new file mode 100644 index 00000000000..1ed18842034 --- /dev/null +++ b/doc/sphinxext/prs/1487.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a0e10457953dbcc98d5ba9a6b40f5c131d71b80e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 21, + "d": 0 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1488.json b/doc/sphinxext/prs/1488.json new file mode 100644 index 00000000000..73af1dc68fb --- /dev/null +++ b/doc/sphinxext/prs/1488.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "db5c64711c6eea94920ceba50a57db4f11e91861", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 12, + "d": 12 + }, + "MANIFEST.in": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1489.json b/doc/sphinxext/prs/1489.json new file mode 100644 index 00000000000..18a81e406d3 --- /dev/null +++ b/doc/sphinxext/prs/1489.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "92ac62aad15ed618e55b5f2ef34852a8f3377ac7", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/149.json b/doc/sphinxext/prs/149.json new file mode 100644 index 00000000000..3f0df708bdc --- /dev/null +++ b/doc/sphinxext/prs/149.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d0af4cd9ec5011057f645040496d3680919cc36d", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 1, + "d": 3 + }, + "mne/fiff/proj.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 91, + "d": 126 + }, + "mne/fiff/tests/test_raw.py": { + "a": 45, + "d": 23 + }, + "mne/forward.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1491.json b/doc/sphinxext/prs/1491.json new file mode 100644 index 00000000000..5569666a739 --- /dev/null +++ b/doc/sphinxext/prs/1491.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "15eb500c3dea94c381c93a66e6314632a9375f55", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/surface.py": { + "a": 20, + "d": 11 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1492.json b/doc/sphinxext/prs/1492.json new file mode 100644 index 00000000000..257dffdefa1 --- /dev/null +++ b/doc/sphinxext/prs/1492.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "36a599bf8e4e8633c33627409ee84c8d198a746a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_make_forward.py": { + "a": 2, + "d": 1 + }, + "examples/plot_read_forward.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 4, + "d": 6 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 6, + "d": 9 + }, + "mne/stats/cluster_level.py": { + "a": 21, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1493.json b/doc/sphinxext/prs/1493.json new file mode 100644 index 00000000000..d1e48b1e9f5 --- /dev/null +++ b/doc/sphinxext/prs/1493.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "16814e8295442acf6b07b6538f128f4ab773515e", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 92, + "d": 13 + }, + "mne/tests/test_report.py": { + "a": 0, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1496.json b/doc/sphinxext/prs/1496.json new file mode 100644 index 00000000000..bdf667c480c --- /dev/null +++ b/doc/sphinxext/prs/1496.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "06d41b9b378f5e4bc30f0607398235d6c2d6ceef", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 34, + "d": 14 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 42, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1498.json b/doc/sphinxext/prs/1498.json new file mode 100644 index 00000000000..d8cd64c6968 --- /dev/null +++ b/doc/sphinxext/prs/1498.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "99e0ffcb69cfdac0a23adce1440ac1f1d0239825", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 23, + "d": 10 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1499.json b/doc/sphinxext/prs/1499.json new file mode 100644 index 00000000000..8a86241fcfc --- /dev/null +++ b/doc/sphinxext/prs/1499.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dd0bc5e7305168d9ee367626f2fdfc344078854e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_read_noise_covariance_matrix.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 87, + "d": 62 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/15.json b/doc/sphinxext/prs/15.json new file mode 100644 index 00000000000..4f01172af94 --- /dev/null +++ b/doc/sphinxext/prs/15.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9682cf0954ae2826207b4913ca681148e7735a39", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 45, + "d": 22 + }, + "mne/fiff/tests/test_raw.py": { + "a": 30, + "d": 0 + }, + "mne/tests/test_raw.py": { + "a": 0, + "d": 46 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/150.json b/doc/sphinxext/prs/150.json new file mode 100644 index 00000000000..20ef4a1104a --- /dev/null +++ b/doc/sphinxext/prs/150.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dcb930b5d83bd3e850e8b10a23904c1b6c0b97c5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_cov.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1500.json b/doc/sphinxext/prs/1500.json new file mode 100644 index 00000000000..3c8cc7e5e41 --- /dev/null +++ b/doc/sphinxext/prs/1500.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "48453697212c60dedc6f869767a27da4b07c4f2c", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/contributing.rst": { + "a": 40, + "d": 48 + }, + "doc/source/git_links.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1502.json b/doc/sphinxext/prs/1502.json new file mode 100644 index 00000000000..d80761bb4fe --- /dev/null +++ b/doc/sphinxext/prs/1502.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3bc342a55ea3c47e4c0e748d0c28e268c2e8f72c", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "examples/realtime/ftclient_rt_compute_psd.py": { + "a": 76, + "d": 0 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 38, + "d": 0 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 34, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1503.json b/doc/sphinxext/prs/1503.json new file mode 100644 index 00000000000..7155bf0c7ff --- /dev/null +++ b/doc/sphinxext/prs/1503.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8a286b29bbb815cc58c216a6d76c3fa7192e7328", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 57, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 21, + "d": 6 + }, + "mne/tests/test_label.py": { + "a": 9, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1504.json b/doc/sphinxext/prs/1504.json new file mode 100644 index 00000000000..7e2e573d7f4 --- /dev/null +++ b/doc/sphinxext/prs/1504.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "77881eceb89b343be62167f848aefc13b63663a8", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 66, + "d": 54 + }, + "mne/tests/test_label.py": { + "a": 48, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1509.json b/doc/sphinxext/prs/1509.json new file mode 100644 index 00000000000..744fe14dad5 --- /dev/null +++ b/doc/sphinxext/prs/1509.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "39d98e4a49114cf1005f7841a1449e3e42ae6807", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/151.json b/doc/sphinxext/prs/151.json new file mode 100644 index 00000000000..ec7d03d97b9 --- /dev/null +++ b/doc/sphinxext/prs/151.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "063e01596cfb7b8775b701116aa958619fbab289", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/tests/test_cov.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1513.json b/doc/sphinxext/prs/1513.json new file mode 100644 index 00000000000..62b328af1bb --- /dev/null +++ b/doc/sphinxext/prs/1513.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "09d7b82cf34053f031feadf31bd729e6ed3c21a6", + "authors": [ + { + "n": "Alan Leggitt", + "e": "leggitta3@gmail.com" + } + ], + "changes": { + "examples/plot_left_cerebellum_volume_source.py": { + "a": 97, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/data/FreeSurferColorLUT.txt": { + "a": 1397, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 27, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 56, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 158, + "d": 14 + }, + "mne/source_space.py": { + "a": 490, + "d": 25 + }, + "mne/tests/test_source_estimate.py": { + "a": 26, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 122, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1515.json b/doc/sphinxext/prs/1515.json new file mode 100644 index 00000000000..016c444f3a7 --- /dev/null +++ b/doc/sphinxext/prs/1515.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "55b53d5c7d5b7eeabde307f5fce8d7e8554fd6f3", + "authors": [ + { + "n": "Alan Leggitt", + "e": "leggitta3@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 25, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 10, + "d": 0 + }, + "examples/plot_left_cerebellum_volume_source.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1517.json b/doc/sphinxext/prs/1517.json new file mode 100644 index 00000000000..55ed2531d10 --- /dev/null +++ b/doc/sphinxext/prs/1517.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "075f9933038235b1df83e5cef48ca6bb54bb5948", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/contributing.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 8 + }, + "mne/viz/ica.py": { + "a": 41, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1519.json b/doc/sphinxext/prs/1519.json new file mode 100644 index 00000000000..c47e976076f --- /dev/null +++ b/doc/sphinxext/prs/1519.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1713681dc3aead7c8893a08b4bd45f04d9abaa03", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/epochs.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/152.json b/doc/sphinxext/prs/152.json new file mode 100644 index 00000000000..0e3bd6753a8 --- /dev/null +++ b/doc/sphinxext/prs/152.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "10094ca7be875909ec1564c2999a1488dc45e0a7", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/__init__.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1520.json b/doc/sphinxext/prs/1520.json new file mode 100644 index 00000000000..d0a0ab0ecaf --- /dev/null +++ b/doc/sphinxext/prs/1520.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "923a714f5a3a36cd7458852a02e6715859dd4c45", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 0, + "d": 1 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 5, + "d": 4 + }, + "mne/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 14 + }, + "mne/label.py": { + "a": 13, + "d": 80 + }, + "mne/layouts/layout.py": { + "a": 2, + "d": 20 + }, + "mne/layouts/tests/test_layout.py": { + "a": 0, + "d": 9 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 6 + }, + "mne/mixed_norm/__init__.py": { + "a": 0, + "d": 7 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 312 + }, + "mne/tests/test_label.py": { + "a": 94, + "d": 5 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 7 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 4, + "d": 8 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 45 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 6, + "d": 10 + }, + "mne/viz/ica.py": { + "a": 0, + "d": 40 + }, + "mne/viz/topo.py": { + "a": 0, + "d": 232 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 14 + }, + "setup.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1523.json b/doc/sphinxext/prs/1523.json new file mode 100644 index 00000000000..2184d3fc8d9 --- /dev/null +++ b/doc/sphinxext/prs/1523.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "77f3d7097096a8bf89bddb87bbedd96a63274c79", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 40, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1529.json b/doc/sphinxext/prs/1529.json new file mode 100644 index 00000000000..ea8d4a71397 --- /dev/null +++ b/doc/sphinxext/prs/1529.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f125792c167b085cfe5be643344f9de93df1dd76", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/layouts/layout.py": { + "a": 19, + "d": 4 + }, + "mne/layouts/tests/test_layout.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/153.json b/doc/sphinxext/prs/153.json new file mode 100644 index 00000000000..3b45f430bdd --- /dev/null +++ b/doc/sphinxext/prs/153.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ffdba6fc035f799d184b9d48670a362f3f4b3000", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/inverse/plot_morph_data.py": { + "a": 20, + "d": 5 + }, + "mne/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 229, + "d": 54 + }, + "mne/tests/test_source_estimate.py": { + "a": 14, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1533.json b/doc/sphinxext/prs/1533.json new file mode 100644 index 00000000000..8a0e945f705 --- /dev/null +++ b/doc/sphinxext/prs/1533.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "32a36b4dfbbb44c9aa9a6bc35422df7b0f87228c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels.py": { + "a": 7, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1535.json b/doc/sphinxext/prs/1535.json new file mode 100644 index 00000000000..599a9e8f4da --- /dev/null +++ b/doc/sphinxext/prs/1535.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1685beabc4f1395598cd09f621a2a28e35e85f96", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/stft.py": { + "a": 7, + "d": 6 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 21, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1536.json b/doc/sphinxext/prs/1536.json new file mode 100644 index 00000000000..a0397f5853c --- /dev/null +++ b/doc/sphinxext/prs/1536.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d621f50184d10280ef3386e011ad9182c2e50116", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 5 + }, + "mne/viz/topo.py": { + "a": 32, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1538.json b/doc/sphinxext/prs/1538.json new file mode 100644 index 00000000000..e4285f32091 --- /dev/null +++ b/doc/sphinxext/prs/1538.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "aaf68e924cf0bc960af476ecdd16370386843ef9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/source/mne_report_tutorial.rst": { + "a": 2, + "d": 2 + }, + "mne/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/epochs.py": { + "a": 2, + "d": 19 + }, + "mne/evoked.py": { + "a": 9, + "d": 125 + }, + "mne/fiff/__init__.py": { + "a": 0, + "d": 79 + }, + "mne/source_space.py": { + "a": 2, + "d": 10 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 13 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + }, + "setup.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1540.json b/doc/sphinxext/prs/1540.json new file mode 100644 index 00000000000..790cd503312 --- /dev/null +++ b/doc/sphinxext/prs/1540.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "352483afc627a179505be649b4712a699a4b161a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1543.json b/doc/sphinxext/prs/1543.json new file mode 100644 index 00000000000..4f4579ba320 --- /dev/null +++ b/doc/sphinxext/prs/1543.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8086b4224f0c1ddb676018a8751fce7c3a560537", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1545.json b/doc/sphinxext/prs/1545.json new file mode 100644 index 00000000000..aa188487b06 --- /dev/null +++ b/doc/sphinxext/prs/1545.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d6fbb058ab0ddb284b2e8dc6fc88df770e284c94", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/tests/data/test_ica.lout": { + "a": 3, + "d": 3 + }, + "mne/layouts/layout.py": { + "a": 160, + "d": 42 + }, + "mne/layouts/tests/test_layout.py": { + "a": 137, + "d": 49 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1546.json b/doc/sphinxext/prs/1546.json new file mode 100644 index 00000000000..054390c4fb1 --- /dev/null +++ b/doc/sphinxext/prs/1546.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1dcc1cba093a7eb8fc1dccb7cdb1aebd692a22f7", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/155.json b/doc/sphinxext/prs/155.json new file mode 100644 index 00000000000..03730776be1 --- /dev/null +++ b/doc/sphinxext/prs/155.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "76c1d764f7beffb5de24821f26d7c3941eb2693f", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/artifacts/ica.py": { + "a": 18, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1550.json b/doc/sphinxext/prs/1550.json new file mode 100644 index 00000000000..62e9479c624 --- /dev/null +++ b/doc/sphinxext/prs/1550.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "81201d762867ac3acaa36b001de36cc9a4978bc5", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/layouts/layout.py": { + "a": 2, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1551.json b/doc/sphinxext/prs/1551.json new file mode 100644 index 00000000000..20a64aef693 --- /dev/null +++ b/doc/sphinxext/prs/1551.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d30ff45e90da322142a6b6c9a017e2630fc88cbc", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/viz/circle.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1552.json b/doc/sphinxext/prs/1552.json new file mode 100644 index 00000000000..d7cd9e44245 --- /dev/null +++ b/doc/sphinxext/prs/1552.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b538c2a3808e36fb793af13df7dd5b017d3dd611", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/mne_report_tutorial.rst": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 110, + "d": 26 + }, + "mne/tests/test_report.py": { + "a": 19, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1554.json b/doc/sphinxext/prs/1554.json new file mode 100644 index 00000000000..7cfb348a910 --- /dev/null +++ b/doc/sphinxext/prs/1554.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3c32f6059a0e8279d57679790c34bf9ff519078d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_misc.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1555.json b/doc/sphinxext/prs/1555.json new file mode 100644 index 00000000000..4eb153e2c05 --- /dev/null +++ b/doc/sphinxext/prs/1555.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e4ce25460326423c1a5bbb83fce6acf92d1201c9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1558.json b/doc/sphinxext/prs/1558.json new file mode 100644 index 00000000000..de8ccc9c12d --- /dev/null +++ b/doc/sphinxext/prs/1558.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4be3c611c7dcda04ce61b3d699f7611e25f14be9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1559.json b/doc/sphinxext/prs/1559.json new file mode 100644 index 00000000000..3ce809a7648 --- /dev/null +++ b/doc/sphinxext/prs/1559.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "17f8e54836839cff4659f30c833f739821a4fb16", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 12, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/156.json b/doc/sphinxext/prs/156.json new file mode 100644 index 00000000000..49ee80a4964 --- /dev/null +++ b/doc/sphinxext/prs/156.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "706d211ed3bb516752990503e605e7ef4aa274bb", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1561.json b/doc/sphinxext/prs/1561.json new file mode 100644 index 00000000000..58ccc02e150 --- /dev/null +++ b/doc/sphinxext/prs/1561.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "628e6776293f0d86df15034d8e45b6477ecf0d5b", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_file_traits.py": { + "a": 11, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1564.json b/doc/sphinxext/prs/1564.json new file mode 100644 index 00000000000..2837af24a47 --- /dev/null +++ b/doc/sphinxext/prs/1564.json @@ -0,0 +1,415 @@ +{ + "merge_commit_sha": "0826eca83da5839aea063cbfc34381eae17f5ff3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 3, + "d": 2 + }, + ".travis.yml": { + "a": 55, + "d": 23 + }, + "Makefile": { + "a": 15, + "d": 8 + }, + "bin/mne": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 33, + "d": 22 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 38, + "d": 33 + }, + "mne/commands/mne_browse_raw.py": { + "a": 11, + "d": 5 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 23, + "d": 16 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 17, + "d": 11 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 8, + "d": 3 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 41, + "d": 39 + }, + "mne/commands/mne_coreg.py": { + "a": 7, + "d": 2 + }, + "mne/commands/mne_flash_bem_model.py": { + "a": 15, + "d": 11 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 8, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 24, + "d": 18 + }, + "mne/commands/mne_maxfilter.py": { + "a": 60, + "d": 54 + }, + "mne/commands/mne_report.py": { + "a": 12, + "d": 2 + }, + "mne/commands/mne_surf2bem.py": { + "a": 12, + "d": 6 + }, + "mne/commands/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 103, + "d": 0 + }, + "mne/commands/utils.py": { + "a": 11, + "d": 4 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 30, + "d": 23 + }, + "mne/coreg.py": { + "a": 22, + "d": 6 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 2, + "d": 4 + }, + "mne/datasets/megsim/megsim.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 1, + "d": 13 + }, + "mne/datasets/testing/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/testing/_testing.py": { + "a": 41, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 21, + "d": 13 + }, + "mne/decoding/tests/test_csp.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/tests/test_ems.py": { + "a": 2, + "d": 4 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 3, + "d": 5 + }, + "mne/fixes.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_lead_dots.py": { + "a": 2, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 17, + "d": 25 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 17, + "d": 14 + }, + "mne/forward/tests/test_forward.py": { + "a": 46, + "d": 56 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 71, + "d": 73 + }, + "mne/gui/_coreg_gui.py": { + "a": 6, + "d": 31 + }, + "mne/gui/_fiducials_gui.py": { + "a": 5, + "d": 21 + }, + "mne/gui/_file_traits.py": { + "a": 37, + "d": 65 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 15, + "d": 10 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 12, + "d": 13 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 18, + "d": 17 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 2, + "d": 4 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 2, + "d": 3 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 17, + "d": 10 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 9, + "d": 4 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 4 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 4, + "d": 4 + }, + "mne/io/bti/bti.py": { + "a": 6, + "d": 6 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 2, + "d": 3 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 59, + "d": 12 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 2, + "d": 1 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 2, + "d": 3 + }, + "mne/io/tests/test_compensator.py": { + "a": 3, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 4 + }, + "mne/layouts/tests/test_layout.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 101, + "d": 90 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 8, + "d": 6 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 12, + "d": 8 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 8, + "d": 9 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 4, + "d": 4 + }, + "mne/simulation/tests/test_source.py": { + "a": 18, + "d": 12 + }, + "mne/source_space.py": { + "a": 225, + "d": 95 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 88, + "d": 85 + }, + "mne/stats/tests/test_regression.py": { + "a": 7, + "d": 8 + }, + "mne/tests/__init__.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_channels.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_coreg.py": { + "a": 10, + "d": 12 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_event.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_hdf5.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 39, + "d": 47 + }, + "mne/tests/test_proj.py": { + "a": 12, + "d": 9 + }, + "mne/tests/test_report.py": { + "a": 22, + "d": 25 + }, + "mne/tests/test_source_estimate.py": { + "a": 37, + "d": 37 + }, + "mne/tests/test_source_space.py": { + "a": 109, + "d": 182 + }, + "mne/tests/test_surface.py": { + "a": 17, + "d": 11 + }, + "mne/tests/test_transforms.py": { + "a": 9, + "d": 9 + }, + "mne/tests/test_utils.py": { + "a": 38, + "d": 15 + }, + "mne/utils.py": { + "a": 177, + "d": 65 + }, + "mne/viz/tests/test_3d.py": { + "a": 14, + "d": 13 + }, + "mne/viz/tests/test_epochs.py": { + "a": 0, + "d": 6 + }, + "mne/viz/tests/test_evoked.py": { + "a": 0, + "d": 5 + }, + "mne/viz/tests/test_ica.py": { + "a": 0, + "d": 5 + }, + "mne/viz/tests/test_misc.py": { + "a": 10, + "d": 9 + }, + "mne/viz/tests/test_raw.py": { + "a": 0, + "d": 3 + }, + "mne/viz/tests/test_topo.py": { + "a": 0, + "d": 5 + }, + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 3 + }, + "setup.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1565.json b/doc/sphinxext/prs/1565.json new file mode 100644 index 00000000000..665e175542e --- /dev/null +++ b/doc/sphinxext/prs/1565.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "69df199095bf66c3c070046eb8d20877804c038d", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_file_traits.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1566.json b/doc/sphinxext/prs/1566.json new file mode 100644 index 00000000000..30feaf668df --- /dev/null +++ b/doc/sphinxext/prs/1566.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5f76748c8d77f326925564fbae837062654dc32c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/decoding/time_gen.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/157.json b/doc/sphinxext/prs/157.json new file mode 100644 index 00000000000..1b3a8970b9e --- /dev/null +++ b/doc/sphinxext/prs/157.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c675c086aa99d13c571c4ea787fdf1bc590bdec6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1572.json b/doc/sphinxext/prs/1572.json new file mode 100644 index 00000000000..295a827eabb --- /dev/null +++ b/doc/sphinxext/prs/1572.json @@ -0,0 +1,227 @@ +{ + "merge_commit_sha": "a71e2c046dcdfdb3e59fd26d1654c73e08a1f03f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 8 + }, + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 28, + "d": 44 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 3, + "d": 4 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 3, + "d": 4 + }, + "mne/commands/mne_flash_bem_model.py": { + "a": 6, + "d": 1 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_maxfilter.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_report.py": { + "a": 15, + "d": 4 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 3 + }, + "mne/commands/tests/test_commands.py": { + "a": 82, + "d": 40 + }, + "mne/coreg.py": { + "a": 54, + "d": 63 + }, + "mne/cuda.py": { + "a": 85, + "d": 135 + }, + "mne/evoked.py": { + "a": 144, + "d": 151 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 98, + "d": 122 + }, + "mne/forward/tests/test_forward.py": { + "a": 33, + "d": 12 + }, + "mne/gui/_coreg_gui.py": { + "a": 11, + "d": 11 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 61, + "d": 86 + }, + "mne/gui/_marker_gui.py": { + "a": 29, + "d": 49 + }, + "mne/gui/_viewer.py": { + "a": 21, + "d": 31 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 15, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 16, + "d": 2 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 16, + "d": 2 + }, + "mne/label.py": { + "a": 15, + "d": 20 + }, + "mne/minimum_norm/inverse.py": { + "a": 177, + "d": 196 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxfilter.py": { + "a": 7, + "d": 4 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 105, + "d": 156 + }, + "mne/source_space.py": { + "a": 9, + "d": 13 + }, + "mne/surface.py": { + "a": 64, + "d": 107 + }, + "mne/tests/test_epochs.py": { + "a": 57, + "d": 6 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 10, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 21, + "d": 2 + }, + "mne/tests/test_report.py": { + "a": 68, + "d": 27 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 49, + "d": 12 + }, + "mne/tests/test_transforms.py": { + "a": 62, + "d": 6 + }, + "mne/tests/test_utils.py": { + "a": 17, + "d": 9 + }, + "mne/transforms.py": { + "a": 16, + "d": 75 + }, + "mne/utils.py": { + "a": 48, + "d": 61 + }, + "mne/viz/_3d.py": { + "a": 41, + "d": 20 + }, + "mne/viz/__init__.py": { + "a": 8, + "d": 9 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 12, + "d": 0 + }, + "mne/viz/tests/test_epochs.py": { + "a": 15, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 8, + "d": 4 + }, + "mne/viz/tests/test_topo.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1574.json b/doc/sphinxext/prs/1574.json new file mode 100644 index 00000000000..87d8ea4aa99 --- /dev/null +++ b/doc/sphinxext/prs/1574.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8e83d9fdc336709e857ed969b05850480878afd5", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 18, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1575.json b/doc/sphinxext/prs/1575.json new file mode 100644 index 00000000000..3e758f69f24 --- /dev/null +++ b/doc/sphinxext/prs/1575.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ccf606f7693729747a2d7c11fe417cb6ba64b3b9", + "authors": [ + { + "n": "Kevin Schiesser", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1578.json b/doc/sphinxext/prs/1578.json new file mode 100644 index 00000000000..69cfc49d229 --- /dev/null +++ b/doc/sphinxext/prs/1578.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f620c624e6c3e4b0e351455179221cb635429040", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 0 + }, + "mne/externals/FieldTrip.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1579.json b/doc/sphinxext/prs/1579.json new file mode 100644 index 00000000000..d6ae9eacb6a --- /dev/null +++ b/doc/sphinxext/prs/1579.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0163a3f5d24c71368aa1449931618965cc1ff479", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 36, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 33 + }, + "mne/tests/test_fixes.py": { + "a": 11, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1581.json b/doc/sphinxext/prs/1581.json new file mode 100644 index 00000000000..a701add8ac9 --- /dev/null +++ b/doc/sphinxext/prs/1581.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d94a10d27aad9a146070b33121c5e2e4b205808c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/layouts/biosemi.lay": { + "a": 64, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1582.json b/doc/sphinxext/prs/1582.json new file mode 100644 index 00000000000..71b31d29b7a --- /dev/null +++ b/doc/sphinxext/prs/1582.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9efc216892d8e49c26515483e5ba021ba346f28c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 0, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1583.json b/doc/sphinxext/prs/1583.json new file mode 100644 index 00000000000..ac1a4665a3f --- /dev/null +++ b/doc/sphinxext/prs/1583.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f39a12dedaa84090dcf73c0dc561acb32a228d9f", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/event.py": { + "a": 37, + "d": 5 + }, + "mne/tests/test_event.py": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1584.json b/doc/sphinxext/prs/1584.json new file mode 100644 index 00000000000..73b12ca108c --- /dev/null +++ b/doc/sphinxext/prs/1584.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4a9ac46cca6dbb012de09c7752c611998b663ed8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 5, + "d": 1 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 31, + "d": 14 + }, + "mne/tests/test_fixes.py": { + "a": 21, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1588.json b/doc/sphinxext/prs/1588.json new file mode 100644 index 00000000000..7ad263c2b90 --- /dev/null +++ b/doc/sphinxext/prs/1588.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "295750ca7cc7eb7a34cac1506cc9984f058d6f0b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 10, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 57, + "d": 48 + }, + "mne/commands/tests/test_commands.py": { + "a": 33, + "d": 1 + }, + "mne/commands/utils.py": { + "a": 0, + "d": 10 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 34, + "d": 15 + }, + "mne/tests/test_source_space.py": { + "a": 5, + "d": 8 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1589.json b/doc/sphinxext/prs/1589.json new file mode 100644 index 00000000000..2e9edd6b23a --- /dev/null +++ b/doc/sphinxext/prs/1589.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "70075e5f780787fbab8e1b6b01a30dc9a57c76ea", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/159.json b/doc/sphinxext/prs/159.json new file mode 100644 index 00000000000..6d3e8eebcee --- /dev/null +++ b/doc/sphinxext/prs/159.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a6b8e55c7039e7337848571358021fca5d5d4eb3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1590.json b/doc/sphinxext/prs/1590.json new file mode 100644 index 00000000000..964d04e8e16 --- /dev/null +++ b/doc/sphinxext/prs/1590.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "bf46062b42ba9e0702de81d833af9d2b5ec82f27", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 8, + "d": 3 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/constants.py": { + "a": 2, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 507, + "d": 283 + }, + "mne/io/kit/tests/data/test-epoch.raw": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test-eve.txt": { + "a": 2, + "d": 0 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 27, + "d": 4 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1592.json b/doc/sphinxext/prs/1592.json new file mode 100644 index 00000000000..a76c1a76ffe --- /dev/null +++ b/doc/sphinxext/prs/1592.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "834ba92eca3f66f623f72d582a2690511bb32a87", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 4, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1593.json b/doc/sphinxext/prs/1593.json new file mode 100644 index 00000000000..f3f7d2143dc --- /dev/null +++ b/doc/sphinxext/prs/1593.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e0f5f1ae1d2e26b5208eb06ecea821250869cbe3", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/filter.py": { + "a": 15, + "d": 3 + }, + "mne/io/base.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1594.json b/doc/sphinxext/prs/1594.json new file mode 100644 index 00000000000..789b4d0548f --- /dev/null +++ b/doc/sphinxext/prs/1594.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "6097260d7fa6a239e3292f5b315a1ddc446d32fb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "mne/_hdf5.py": { + "a": 4, + "d": 5 + }, + "mne/commands/tests/test_commands.py": { + "a": 5, + "d": 5 + }, + "mne/datasets/megsim/megsim.py": { + "a": 5, + "d": 4 + }, + "mne/datasets/megsim/urls.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/sample/sample.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/somato/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/somato/somato.py": { + "a": 4, + "d": 17 + }, + "mne/datasets/spm_face/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 4, + "d": 17 + }, + "mne/datasets/testing/_testing.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 12, + "d": 8 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 4, + "d": 3 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 15 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_hdf5.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 109, + "d": 57 + }, + "mne/utils.py": { + "a": 205, + "d": 329 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1595.json b/doc/sphinxext/prs/1595.json new file mode 100644 index 00000000000..e9446bec73e --- /dev/null +++ b/doc/sphinxext/prs/1595.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a3e4b196c43113fc032b05e94779af9803d2686", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1596.json b/doc/sphinxext/prs/1596.json new file mode 100644 index 00000000000..3e6a23933de --- /dev/null +++ b/doc/sphinxext/prs/1596.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6be17bc3bd689225e9df2cb40e5ad8ee41a307db", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_filter.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1599.json b/doc/sphinxext/prs/1599.json new file mode 100644 index 00000000000..914346734ad --- /dev/null +++ b/doc/sphinxext/prs/1599.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4beaf453bd07e348a90f4752488aa4a27a75b311", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 6 + }, + "mne/report.py": { + "a": 24, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 32, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/16.json b/doc/sphinxext/prs/16.json new file mode 100644 index 00000000000..c21afc1e07f --- /dev/null +++ b/doc/sphinxext/prs/16.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "592f9afca21a68755042741af760790d93df69b8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/read_events.py": { + "a": 4, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 12, + "d": 10 + }, + "mne/event.py": { + "a": 53, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/160.json b/doc/sphinxext/prs/160.json new file mode 100644 index 00000000000..df4c0035924 --- /dev/null +++ b/doc/sphinxext/prs/160.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dcbe3420f0d762ec51e7771528bfa32f2cb005f0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/layouts/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "setup.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1601.json b/doc/sphinxext/prs/1601.json new file mode 100644 index 00000000000..f8fa3daf75a --- /dev/null +++ b/doc/sphinxext/prs/1601.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "84f4a1359e9b5b3ae261df233a794cfa8e6e3020", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/mne_browse_raw.py": { + "a": 27, + "d": 2 + }, + "mne/io/base.py": { + "a": 22, + "d": 6 + }, + "mne/viz/raw.py": { + "a": 57, + "d": 3 + }, + "mne/viz/tests/test_raw.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1602.json b/doc/sphinxext/prs/1602.json new file mode 100644 index 00000000000..c8f9d66b1ae --- /dev/null +++ b/doc/sphinxext/prs/1602.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "099742619867b85e918b4dc4d6bfab3d8f2e7e2c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_evoked_topomap.py": { + "a": 4, + "d": 1 + }, + "mne/evoked.py": { + "a": 8, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 29, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1603.json b/doc/sphinxext/prs/1603.json new file mode 100644 index 00000000000..5250de229e4 --- /dev/null +++ b/doc/sphinxext/prs/1603.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b611de9e429d3bacd56f4ae10398b92272e0d59e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/mne_report_tutorial.rst": { + "a": 2, + "d": 2 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 71, + "d": 39 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1606.json b/doc/sphinxext/prs/1606.json new file mode 100644 index 00000000000..0fe85e09fa9 --- /dev/null +++ b/doc/sphinxext/prs/1606.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c2b81d5578bb59a5aec366ebe44c9fb734549993", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1607.json b/doc/sphinxext/prs/1607.json new file mode 100644 index 00000000000..aafc79b57b5 --- /dev/null +++ b/doc/sphinxext/prs/1607.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "385a2555f35a50ad5881b6c8faac8cf78b0e4ae9", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/tests/test_coreg.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1608.json b/doc/sphinxext/prs/1608.json new file mode 100644 index 00000000000..3a3c849d16e --- /dev/null +++ b/doc/sphinxext/prs/1608.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a26c0f6eefdf34e92a21c3a217715011f2cb27a4", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/channels.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1609.json b/doc/sphinxext/prs/1609.json new file mode 100644 index 00000000000..df5396fb579 --- /dev/null +++ b/doc/sphinxext/prs/1609.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9afd14672072a02261a0d428d31d6425cd850f9a", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1612.json b/doc/sphinxext/prs/1612.json new file mode 100644 index 00000000000..4ed63906b7a --- /dev/null +++ b/doc/sphinxext/prs/1612.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fef061f1ed0711f18271e45920e9bc2b5550e031", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 18, + "d": 15 + }, + "mne/preprocessing/ica.py": { + "a": 9, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1614.json b/doc/sphinxext/prs/1614.json new file mode 100644 index 00000000000..8ea20974253 --- /dev/null +++ b/doc/sphinxext/prs/1614.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d1914e1322075deb98c1bc6ae6fbc582668eafa8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 2 + }, + "mne/_hdf5.py": { + "a": 10, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 48, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 101, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1615.json b/doc/sphinxext/prs/1615.json new file mode 100644 index 00000000000..820678c258f --- /dev/null +++ b/doc/sphinxext/prs/1615.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "34c5a18f595420e63e1bcea0681fceb5b745973b", + "authors": [ + { + "n": "Kevin Schiesser", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1616.json b/doc/sphinxext/prs/1616.json new file mode 100644 index 00000000000..14e2fe2716c --- /dev/null +++ b/doc/sphinxext/prs/1616.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4adf459f7cf7f0a9d428d310ef4b958d9c9e63cc", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1618.json b/doc/sphinxext/prs/1618.json new file mode 100644 index 00000000000..6e064c37a3f --- /dev/null +++ b/doc/sphinxext/prs/1618.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "997597de0b962fcd4492faf6b7c531efc4109e62", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1619.json b/doc/sphinxext/prs/1619.json new file mode 100644 index 00000000000..383b5fd5fab --- /dev/null +++ b/doc/sphinxext/prs/1619.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "357d55728f13feef2c47d6bc43bcf17b57cac110", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 129, + "d": 91 + }, + "mne/channels/tests/test_layout.py": { + "a": 65, + "d": 17 + }, + "mne/layouts/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 17, + "d": 8 + }, + "mne/preprocessing/tests/test_maxfilter.py": { + "a": 95, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 13, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 22, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/162.json b/doc/sphinxext/prs/162.json new file mode 100644 index 00000000000..aed76c37de8 --- /dev/null +++ b/doc/sphinxext/prs/162.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5874483ed5d79d1fbfdf26b195dd7629cd58064f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/artifacts/tests/test_ica.py": { + "a": 9, + "d": 8 + }, + "mne/fiff/tests/test_raw.py": { + "a": 17, + "d": 13 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 4 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1620.json b/doc/sphinxext/prs/1620.json new file mode 100644 index 00000000000..7bf08af1cda --- /dev/null +++ b/doc/sphinxext/prs/1620.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b086d95246f7e1e4c1d45875cdfa5eea96f6bd9f", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1621.json b/doc/sphinxext/prs/1621.json new file mode 100644 index 00000000000..805a6de9c87 --- /dev/null +++ b/doc/sphinxext/prs/1621.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ef1325503c020ca49c8e568f067597421776c8ba", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1623.json b/doc/sphinxext/prs/1623.json new file mode 100644 index 00000000000..88f447ac2a1 --- /dev/null +++ b/doc/sphinxext/prs/1623.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "b6daccc4239f15a2f43f50160a6298cbf67b5c06", + "authors": [ + { + "n": "marmaduke woodman", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels.py": { + "a": 2, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 0 + }, + "mne/io/pick.py": { + "a": 18, + "d": 8 + }, + "mne/io/tests/test_pick.py": { + "a": 70, + "d": 1 + }, + "mne/tests/test_channels.py": { + "a": 7, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1627.json b/doc/sphinxext/prs/1627.json new file mode 100644 index 00000000000..43796eb0837 --- /dev/null +++ b/doc/sphinxext/prs/1627.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ca7239d7252ba38698266c6cc45962d8cf4f3f74", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 35, + "d": 18 + }, + "mne/io/brainvision/tests/data/test_highpass.vhdr": { + "a": 142, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 14, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 4, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 15, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1629.json b/doc/sphinxext/prs/1629.json new file mode 100644 index 00000000000..eefd58a3888 --- /dev/null +++ b/doc/sphinxext/prs/1629.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "f17a53b755c7d2c1aff8742dcdeb6afce80a0d34", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 23, + "d": 57 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 75, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 204, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 883, + "d": 7 + }, + "mne/tests/test_utils.py": { + "a": 39, + "d": 1 + }, + "mne/utils.py": { + "a": 31, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/decoding.py": { + "a": 126, + "d": 0 + }, + "mne/viz/tests/test_decoding.py": { + "a": 68, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/163.json b/doc/sphinxext/prs/163.json new file mode 100644 index 00000000000..ea32df8456d --- /dev/null +++ b/doc/sphinxext/prs/163.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2a9aadcdef282bf1dad6e04d39a855a5fc7c456e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_channel_epochs_image.py": { + "a": 72, + "d": 0 + }, + "mne/viz.py": { + "a": 94, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1630.json b/doc/sphinxext/prs/1630.json new file mode 100644 index 00000000000..36521356abe --- /dev/null +++ b/doc/sphinxext/prs/1630.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c6e90285f69a49866b30f62fa90c77902e8e1082", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1632.json b/doc/sphinxext/prs/1632.json new file mode 100644 index 00000000000..8e7a1fc3fce --- /dev/null +++ b/doc/sphinxext/prs/1632.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "936e3791cf4ed0945f678e882013e693e67cbd92", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 6, + "d": 4 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1636.json b/doc/sphinxext/prs/1636.json new file mode 100644 index 00000000000..fd9f363684e --- /dev/null +++ b/doc/sphinxext/prs/1636.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4e45ad1f8429edff31b24a291180a9a165c13b54", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 4, + "d": 4 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1638.json b/doc/sphinxext/prs/1638.json new file mode 100644 index 00000000000..784aee982bf --- /dev/null +++ b/doc/sphinxext/prs/1638.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "62a2b4531a25a88b993ae16ab92f30dd8f5f5632", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_read_forward.py": { + "a": 32, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/164.json b/doc/sphinxext/prs/164.json new file mode 100644 index 00000000000..b82d194ddf7 --- /dev/null +++ b/doc/sphinxext/prs/164.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7612f2b571a08dd5635d43c9ef66a3f55a1e8506", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 26, + "d": 5 + }, + "mne/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/event.py": { + "a": 64, + "d": 11 + }, + "mne/fiff/raw.py": { + "a": 43, + "d": 33 + }, + "mne/fiff/tests/data/test-mpr-eve.eve": { + "a": 32, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 22, + "d": 0 + }, + "mne/tests/test_event.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1642.json b/doc/sphinxext/prs/1642.json new file mode 100644 index 00000000000..cea3a2c1108 --- /dev/null +++ b/doc/sphinxext/prs/1642.json @@ -0,0 +1,327 @@ +{ + "merge_commit_sha": "b7cc0f9b45a95a91c357fd8d8f32c0897f13b4d7", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "MANIFEST.in": { + "a": 7, + "d": 6 + }, + "doc/source/python_reference.rst": { + "a": 5, + "d": 9 + }, + "doc/source/whats_new.rst": { + "a": 6, + "d": 1 + }, + "examples/stats/plot_spatio_temporal_cluster_stats_sensor.py": { + "a": 157, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 4 + }, + "mne/channels/__init__.py": { + "a": 10, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 46, + "d": 18 + }, + "mne/channels/data/layouts/CTF-275.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/CTF151.lay": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/CTF275.lay": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/EEG1005.lay": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/EGI256.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/KIT-157.lout": { + "a": 1, + "d": 1 + }, + "mne/channels/data/layouts/Vectorview-all.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/Vectorview-grad.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/Vectorview-mag.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/biosemi.lay": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/magnesWH3600.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/10-5-System_Mastoids_EGI129.csd": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/EGI_256.csd": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/GSN-HydroCel-128.sfp": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/GSN-HydroCel-129.sfp": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/GSN-HydroCel-256.sfp": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/GSN-HydroCel-257.sfp": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/GSN-HydroCel-32.sfp": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/easycap-M1.txt": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/easycap-M10.txt": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/standard_1005.elc": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/standard_1020.elc": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/standard_alphabetic.elc": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/standard_postfixed.elc": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/standard_prefixed.elc": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/standard_primed.elc": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/__init__.py": { + "a": 6, + "d": 0 + }, + "mne/channels/data/neighbors/biosemi16_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/biosemi32_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/biosemi64_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/bti148_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/bti248_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/bti248grad_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ctf151_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ctf275_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ctf64_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycap128ch-avg_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycap32ch-avg_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycap64ch-avg_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM11_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM14_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM15_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM1_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/neuromag122_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/neuromag306mag_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/neuromag306planar_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 197, + "d": 4 + }, + "mne/channels/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 14, + "d": 8 + }, + "mne/channels/tests/test_layout.py": { + "a": 4, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/layouts/__init__.py": { + "a": 21, + "d": 2 + }, + "mne/montages/__init__.py": { + "a": 0, + "d": 3 + }, + "mne/montages/montage.py": { + "a": 0, + "d": 202 + }, + "mne/montages/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 10, + "d": 10 + }, + "setup.py": { + "a": 9, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1644.json b/doc/sphinxext/prs/1644.json new file mode 100644 index 00000000000..26aec18661d --- /dev/null +++ b/doc/sphinxext/prs/1644.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "da6bca13816721fd2c2753ca48782ee2b24c2af0", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/montages/montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1646.json b/doc/sphinxext/prs/1646.json new file mode 100644 index 00000000000..8f18cbdbfdf --- /dev/null +++ b/doc/sphinxext/prs/1646.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "aa3d81879c109f4ae060f18c95c0bc9fd512d1a7", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1651.json b/doc/sphinxext/prs/1651.json new file mode 100644 index 00000000000..018d2363321 --- /dev/null +++ b/doc/sphinxext/prs/1651.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4800989b63212a700b175820d0583c32ea3cfc2e", + "authors": [ + { + "n": "Matthew A. Tucker", + "e": "matt.tucker@nyu.edu" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1656.json b/doc/sphinxext/prs/1656.json new file mode 100644 index 00000000000..b50ab90207f --- /dev/null +++ b/doc/sphinxext/prs/1656.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "c2600d3ef4a263cdccb3f7524b3bef21c3077b58", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 8, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 17, + "d": 32 + }, + "mne/channels/tests/test_montage.py": { + "a": 10, + "d": 3 + }, + "mne/coreg.py": { + "a": 2, + "d": 54 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 3, + "d": 3 + }, + "mne/io/array/array.py": { + "a": 5, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 4, + "d": 1 + }, + "mne/io/base.py": { + "a": 31, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 67, + "d": 131 + }, + "mne/io/brainvision/tests/data/test.hpts": { + "a": 46, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 19, + "d": 23 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 7, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 76, + "d": 149 + }, + "mne/io/edf/tests/data/biosemi.hpts": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 17, + "d": 8 + }, + "mne/io/egi/egi.py": { + "a": 30, + "d": 6 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 6, + "d": 4 + }, + "mne/io/kit/kit.py": { + "a": 6, + "d": 3 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 0, + "d": 26 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 7, + "d": 3 + }, + "mne/tests/test_transforms.py": { + "a": 69, + "d": 2 + }, + "mne/transforms.py": { + "a": 77, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1658.json b/doc/sphinxext/prs/1658.json new file mode 100644 index 00000000000..ac16bd18e2f --- /dev/null +++ b/doc/sphinxext/prs/1658.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "376b07e00406fe40e70fac2d985045c56293e868", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 2 + }, + "mne/io/egi/egi.py": { + "a": 6, + "d": 6 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1661.json b/doc/sphinxext/prs/1661.json new file mode 100644 index 00000000000..91e944fbc91 --- /dev/null +++ b/doc/sphinxext/prs/1661.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0f36a626bb43d90d82b48109a3f6809aca0a1c1e", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1662.json b/doc/sphinxext/prs/1662.json new file mode 100644 index 00000000000..e1bdbc4df76 --- /dev/null +++ b/doc/sphinxext/prs/1662.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e298f07fe0347b725a6f71193f636c89f8c86db", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1663.json b/doc/sphinxext/prs/1663.json new file mode 100644 index 00000000000..48ee4618915 --- /dev/null +++ b/doc/sphinxext/prs/1663.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9163a465c9663a3a0587c3da0b80a72ca3e724de", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/data/coil_def.dat": { + "a": 46, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1664.json b/doc/sphinxext/prs/1664.json new file mode 100644 index 00000000000..8d3564eaabe --- /dev/null +++ b/doc/sphinxext/prs/1664.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7dba8f60650ed70d7100840103dd34018ece0aa1", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/realtime/epochs.py": { + "a": 24, + "d": 5 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 92, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1667.json b/doc/sphinxext/prs/1667.json new file mode 100644 index 00000000000..946c2cae347 --- /dev/null +++ b/doc/sphinxext/prs/1667.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "15a7b675884a290f0696777806d5561a4154db8e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "doc/source/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/source/getting_started.rst": { + "a": 2, + "d": 0 + }, + "doc/source/known_projects.inc": { + "a": 5, + "d": 0 + }, + "mne/_hdf5.py": { + "a": 48, + "d": 56 + }, + "mne/tests/test_hdf5.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1668.json b/doc/sphinxext/prs/1668.json new file mode 100644 index 00000000000..cab9c170bf7 --- /dev/null +++ b/doc/sphinxext/prs/1668.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5151807f693b26623f955ebdca4fd86785f1e3dd", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1669.json b/doc/sphinxext/prs/1669.json new file mode 100644 index 00000000000..f5bb2906cfe --- /dev/null +++ b/doc/sphinxext/prs/1669.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d956e0310e165740d0e129edb350598eb51becba", + "authors": [ + { + "n": "Hari Bharadwaj", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1671.json b/doc/sphinxext/prs/1671.json new file mode 100644 index 00000000000..6de98680a06 --- /dev/null +++ b/doc/sphinxext/prs/1671.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "27502c56a8e4cb4a92ad057b6aa42517c672f698", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 157, + "d": 0 + }, + "examples/plot_evoked_whitening.py": { + "a": 57, + "d": 23 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 4, + "d": 3 + }, + "mne/cov.py": { + "a": 574, + "d": 83 + }, + "mne/evoked.py": { + "a": 39, + "d": 0 + }, + "mne/fixes.py": { + "a": 173, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_pick.py": { + "a": 44, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 145, + "d": 7 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 230, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1672.json b/doc/sphinxext/prs/1672.json new file mode 100644 index 00000000000..a9f8135507f --- /dev/null +++ b/doc/sphinxext/prs/1672.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6ba8d2988ab2593c9a7446ac19501261dc88d23c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 10, + "d": 5 + }, + "mne/channels/tests/test_layout.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1673.json b/doc/sphinxext/prs/1673.json new file mode 100644 index 00000000000..851c02b952e --- /dev/null +++ b/doc/sphinxext/prs/1673.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "355c67b45171bb92cb592b5603007e44fdb92c88", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/egi/egi.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1674.json b/doc/sphinxext/prs/1674.json new file mode 100644 index 00000000000..0ece3083c1c --- /dev/null +++ b/doc/sphinxext/prs/1674.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50303fc0b1a979a29a57af686c57cc473f74052d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1676.json b/doc/sphinxext/prs/1676.json new file mode 100644 index 00000000000..ddc42637e20 --- /dev/null +++ b/doc/sphinxext/prs/1676.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f99943965c8c692106d801fe1c28b15078d66017", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/realtime/epochs.py": { + "a": 13, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1677.json b/doc/sphinxext/prs/1677.json new file mode 100644 index 00000000000..566f65353ea --- /dev/null +++ b/doc/sphinxext/prs/1677.json @@ -0,0 +1,151 @@ +{ + "merge_commit_sha": "a3843cc90955db6e25230c3b57cbdc3c04b18f5c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 5 + }, + "doc/source/whats_new.rst": { + "a": 16, + "d": 8 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 3, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 54, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 22, + "d": 2 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 25 + }, + "mne/cuda.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 5, + "d": 3 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 9, + "d": 9 + }, + "mne/gui/_marker_gui.py": { + "a": 16, + "d": 15 + }, + "mne/io/base.py": { + "a": 4, + "d": 2 + }, + "mne/io/brainvision/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/__init__.py": { + "a": 2, + "d": 6 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 6, + "d": 87 + }, + "mne/io/kit/kit.py": { + "a": 16, + "d": 35 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 3, + "d": 21 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 146, + "d": 4 + }, + "mne/io/pick.py": { + "a": 4, + "d": 3 + }, + "mne/io/proj.py": { + "a": 46, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 54, + "d": 2 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/168.json b/doc/sphinxext/prs/168.json new file mode 100644 index 00000000000..5c7a67c5934 --- /dev/null +++ b/doc/sphinxext/prs/168.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "05667f9c5e5f8ce3b55c0e68724a6af6df6d7566", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ecg_artifacts_from_ica.py": { + "a": 103, + "d": 0 + }, + "examples/preprocessing/plot_eog_artifacts_from_ica.py": { + "a": 100, + "d": 0 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 66, + "d": 9 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 78, + "d": 23 + }, + "mne/artifacts/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/artifacts/eog.py": { + "a": 15, + "d": 6 + }, + "mne/artifacts/ica.py": { + "a": 408, + "d": 112 + }, + "mne/artifacts/tests/test_ica.py": { + "a": 47, + "d": 46 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1681.json b/doc/sphinxext/prs/1681.json new file mode 100644 index 00000000000..71dbed3ddc0 --- /dev/null +++ b/doc/sphinxext/prs/1681.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "57bb75966bdbb8b627cdff1e9329af705c6a21c9", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 20, + "d": 16 + }, + "mne/io/edf/edf.py": { + "a": 48, + "d": 53 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 5, + "d": 4 + }, + "mne/io/egi/egi.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1685.json b/doc/sphinxext/prs/1685.json new file mode 100644 index 00000000000..4f63a041898 --- /dev/null +++ b/doc/sphinxext/prs/1685.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7ff78dd55ada4375b2d287d3fbe804dc8ad0b3c6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 36, + "d": 0 + }, + "make/install_python.ps1": { + "a": 93, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1688.json b/doc/sphinxext/prs/1688.json new file mode 100644 index 00000000000..8905dda4c1c --- /dev/null +++ b/doc/sphinxext/prs/1688.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fd5d67170396bbd836e8697d32003b7ba947163d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 56, + "d": 23 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1689.json b/doc/sphinxext/prs/1689.json new file mode 100644 index 00000000000..e1a8da2fff0 --- /dev/null +++ b/doc/sphinxext/prs/1689.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "af5be4fc2b7fac257190cc9ede03ac5779576cd6", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/mne_report_tutorial.rst": { + "a": 19, + "d": 8 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 11, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/169.json b/doc/sphinxext/prs/169.json new file mode 100644 index 00000000000..d6f9b1c5065 --- /dev/null +++ b/doc/sphinxext/prs/169.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "908ebe9d8b3e43d8b90d945ee45d6292a25e4f23", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_topo_channel_epochs_image.py": { + "a": 78, + "d": 0 + }, + "mne/viz.py": { + "a": 104, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1696.json b/doc/sphinxext/prs/1696.json new file mode 100644 index 00000000000..a28ed88cb95 --- /dev/null +++ b/doc/sphinxext/prs/1696.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "7e344bcccbc6982245a69af9db15689627a5b5e9", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 5, + "d": 3 + }, + "examples/plot_interpolate_bad_channels.py": { + "a": 56, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 14, + "d": 2 + }, + "mne/channels/interpolation.py": { + "a": 167, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 80, + "d": 0 + }, + "mne/channels/utils.py": { + "a": 34, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/maxfilter.py": { + "a": 3, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1698.json b/doc/sphinxext/prs/1698.json new file mode 100644 index 00000000000..9138a822818 --- /dev/null +++ b/doc/sphinxext/prs/1698.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fcc83f6ce48585b2199f79654aab3b058186577c", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/mne_report_tutorial.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/17.json b/doc/sphinxext/prs/17.json new file mode 100644 index 00000000000..bab16437110 --- /dev/null +++ b/doc/sphinxext/prs/17.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3bf8057497870dcdc119fb9657154abe11cd778c", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 35, + "d": 6 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 41, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1701.json b/doc/sphinxext/prs/1701.json new file mode 100644 index 00000000000..f882fb1c30c --- /dev/null +++ b/doc/sphinxext/prs/1701.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "944c5c04edea3d3422953e47faf43fa2d46fd5e1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1705.json b/doc/sphinxext/prs/1705.json new file mode 100644 index 00000000000..9452d19e537 --- /dev/null +++ b/doc/sphinxext/prs/1705.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "90ddfff2dfabffd6d899fa8ff9085ab50d55fac5", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_morph_data.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 6, + "d": 6 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 4, + "d": 4 + }, + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_source.py": { + "a": 18, + "d": 18 + }, + "mne/source_estimate.py": { + "a": 57, + "d": 50 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 10, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 16, + "d": 16 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1711.json b/doc/sphinxext/prs/1711.json new file mode 100644 index 00000000000..6d83de785cf --- /dev/null +++ b/doc/sphinxext/prs/1711.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c2f1a6c162c1472b2286678fd824afc478a17617", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 46, + "d": 27 + }, + "mne/minimum_norm/inverse.py": { + "a": 11, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1712.json b/doc/sphinxext/prs/1712.json new file mode 100644 index 00000000000..a1cb9a32e72 --- /dev/null +++ b/doc/sphinxext/prs/1712.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "0204eae6323d5da698d633ad2ef074114bea0ecf", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 232, + "d": 19 + }, + "mne/io/base.py": { + "a": 22, + "d": 4 + }, + "mne/io/constants.py": { + "a": 22, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 43, + "d": 15 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 2 + }, + "mne/io/pick.py": { + "a": 42, + "d": 0 + }, + "mne/io/proc_history.py": { + "a": 276, + "d": 0 + }, + "mne/io/tag.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_proc_history.py": { + "a": 47, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 89, + "d": 4 + }, + "mne/utils.py": { + "a": 14, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1716.json b/doc/sphinxext/prs/1716.json new file mode 100644 index 00000000000..1c4bd1ecaca --- /dev/null +++ b/doc/sphinxext/prs/1716.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "75606c4efcda651c04208e2da3a8235730d537f8", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 0, + "d": 3 + }, + "mne/utils.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1718.json b/doc/sphinxext/prs/1718.json new file mode 100644 index 00000000000..3bbbd9583ef --- /dev/null +++ b/doc/sphinxext/prs/1718.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "6f091cc401625dc0ab7098a8f5e000d8bb3db981", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "appveyor.yml": { + "a": 7, + "d": 7 + }, + "make/install_python.ps1": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 3, + "d": 1 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 2, + "d": 1 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/forward/tests/test_forward.py": { + "a": 3, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 3, + "d": 1 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 1 + }, + "mne/io/constants.py": { + "a": 15, + "d": 18 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 9, + "d": 6 + }, + "mne/io/proc_history.py": { + "a": 24, + "d": 24 + }, + "mne/io/tests/test_proc_history.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 4, + "d": 1 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_report.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 7, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/172.json b/doc/sphinxext/prs/172.json new file mode 100644 index 00000000000..907d017adef --- /dev/null +++ b/doc/sphinxext/prs/172.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a8777cb319e7a66bf9b74aa341cfd60d1374d97f", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 28, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1721.json b/doc/sphinxext/prs/1721.json new file mode 100644 index 00000000000..da7e2500e9b --- /dev/null +++ b/doc/sphinxext/prs/1721.json @@ -0,0 +1,255 @@ +{ + "merge_commit_sha": "ec6eb7bfd6a7d10265dc0bfb9b0a73587d7f23de", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/source/conf.py": { + "a": 3, + "d": 10 + }, + "doc/source/contributing.rst": { + "a": 3, + "d": 1 + }, + "doc/source/python_reference.rst": { + "a": 4, + "d": 23 + }, + "doc/sphinxext/docscrape.py": { + "a": 0, + "d": 497 + }, + "doc/sphinxext/docscrape_sphinx.py": { + "a": 0, + "d": 137 + }, + "doc/sphinxext/gen_rst.py": { + "a": 466, + "d": 217 + }, + "doc/sphinxext/numpy_ext/docscrape.py": { + "a": 58, + "d": 45 + }, + "doc/sphinxext/numpy_ext/docscrape_sphinx.py": { + "a": 30, + "d": 16 + }, + "doc/sphinxext/numpy_ext/numpydoc.py": { + "a": 54, + "d": 25 + }, + "doc/sphinxext/numpy_ext_old/__init__.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext_old/docscrape.py": { + "a": 0, + "d": 490 + }, + "doc/sphinxext/numpy_ext_old/docscrape_sphinx.py": { + "a": 0, + "d": 133 + }, + "doc/sphinxext/numpy_ext_old/numpydoc.py": { + "a": 0, + "d": 111 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 5, + "d": 3 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 2, + "d": 0 + }, + "examples/plot_make_forward.py": { + "a": 5, + "d": 0 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 2, + "d": 0 + }, + "examples/plot_read_forward.py": { + "a": 2, + "d": 0 + }, + "examples/plot_read_noise_covariance_matrix.py": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 1 + }, + "mne/decoding/classifier.py": { + "a": 6, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/event.py": { + "a": 5, + "d": 1 + }, + "mne/evoked.py": { + "a": 12, + "d": 14 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 3, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/io/constants.py": { + "a": 3, + "d": 2 + }, + "mne/io/open.py": { + "a": 1, + "d": 2 + }, + "mne/io/pick.py": { + "a": 7, + "d": 7 + }, + "mne/io/proc_history.py": { + "a": 24, + "d": 24 + }, + "mne/label.py": { + "a": 13, + "d": 6 + }, + "mne/parallel.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 16, + "d": 23 + }, + "mne/preprocessing/ssp.py": { + "a": 0, + "d": 2 + }, + "mne/proj.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/client.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/mockclient.py": { + "a": 2, + "d": 0 + }, + "mne/simulation/evoked.py": { + "a": 10, + "d": 9 + }, + "mne/source_estimate.py": { + "a": 31, + "d": 33 + }, + "mne/stats/multi_comp.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 136, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 0, + "d": 2 + }, + "mne/time_frequency/stft.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 8 + }, + "mne/transforms.py": { + "a": 4, + "d": 4 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 14, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1723.json b/doc/sphinxext/prs/1723.json new file mode 100644 index 00000000000..b9ec695fbec --- /dev/null +++ b/doc/sphinxext/prs/1723.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2fe7d512fd08c9993afa5f427ff3cf9808fb6ec1", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 15, + "d": 13 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1725.json b/doc/sphinxext/prs/1725.json new file mode 100644 index 00000000000..032e1e7ad65 --- /dev/null +++ b/doc/sphinxext/prs/1725.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "679020521e680e2d0051111ed78ae18c9f7e4e09", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1726.json b/doc/sphinxext/prs/1726.json new file mode 100644 index 00000000000..fca2ee1772e --- /dev/null +++ b/doc/sphinxext/prs/1726.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f7a74af49f88c4a0107dcf202eaf97756128b4dc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/fiff/raw.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1731.json b/doc/sphinxext/prs/1731.json new file mode 100644 index 00000000000..28a41726498 --- /dev/null +++ b/doc/sphinxext/prs/1731.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b77988dfa592b05b804c39d94176d14e2889a60", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 8, + "d": 3 + }, + "mne/tests/test_cov.py": { + "a": 17, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1732.json b/doc/sphinxext/prs/1732.json new file mode 100644 index 00000000000..dd06c379f46 --- /dev/null +++ b/doc/sphinxext/prs/1732.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "3e35259323de85c0b3e9ccc3a5a7dd4c9c20fe3b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 15, + "d": 6 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 10, + "d": 7 + }, + "mne/evoked.py": { + "a": 62, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1733.json b/doc/sphinxext/prs/1733.json new file mode 100644 index 00000000000..79798b1d54c --- /dev/null +++ b/doc/sphinxext/prs/1733.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "eabc924be4f0ee23bcf52f16c4e580a77e8cbdd7", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 11, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/plot_rereference_eeg.py": { + "a": 63, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 4, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 5, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 0 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 3, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 5, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 13, + "d": 73 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 15, + "d": 14 + }, + "mne/io/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 9, + "d": 4 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 1, + "d": 35 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 13, + "d": 4 + }, + "mne/io/proj.py": { + "a": 29, + "d": 13 + }, + "mne/io/reference.py": { + "a": 302, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 181, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 14, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_proj.py": { + "a": 52, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1735.json b/doc/sphinxext/prs/1735.json new file mode 100644 index 00000000000..a70461f343c --- /dev/null +++ b/doc/sphinxext/prs/1735.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "85e98ff974dcdb16c020f2f795969e709ecbe9aa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1736.json b/doc/sphinxext/prs/1736.json new file mode 100644 index 00000000000..eb6c3159d23 --- /dev/null +++ b/doc/sphinxext/prs/1736.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "dff77d37ccc4fd611bfabb7362c8a13c85d3f913", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 7, + "d": 18 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 10, + "d": 13 + }, + "mne/parallel.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_filter.py": { + "a": 46, + "d": 44 + }, + "mne/time_frequency/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/_stockwell.py": { + "a": 64, + "d": 68 + }, + "mne/time_frequency/multitaper.py": { + "a": 5, + "d": 10 + }, + "mne/time_frequency/stft.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 26, + "d": 19 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 22, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 70, + "d": 81 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1740.json b/doc/sphinxext/prs/1740.json new file mode 100644 index 00000000000..6d66d9629e2 --- /dev/null +++ b/doc/sphinxext/prs/1740.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "083524fc9e7979053b4a40b68ae242d1cf18e153", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/_hdf5.py": { + "a": 0, + "d": 8 + }, + "mne/channels/layout.py": { + "a": 13, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 4, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 12, + "d": 17 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 27 + }, + "mne/io/bti/bti.py": { + "a": 5, + "d": 17 + }, + "mne/io/constants.py": { + "a": 14, + "d": 24 + }, + "mne/io/edf/edf.py": { + "a": 11, + "d": 18 + }, + "mne/io/egi/egi.py": { + "a": 4, + "d": 10 + }, + "mne/io/fiff/raw.py": { + "a": 18, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 4, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 7, + "d": 19 + }, + "mne/io/meas_info.py": { + "a": 249, + "d": 51 + }, + "mne/io/proc_history.py": { + "a": 16, + "d": 4 + }, + "mne/simulation/evoked.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1744.json b/doc/sphinxext/prs/1744.json new file mode 100644 index 00000000000..cf1a6695cf9 --- /dev/null +++ b/doc/sphinxext/prs/1744.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e4216638edf58fe9003a5bc9e294e6377afec661", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1746.json b/doc/sphinxext/prs/1746.json new file mode 100644 index 00000000000..05bed132a81 --- /dev/null +++ b/doc/sphinxext/prs/1746.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ac3bd177215911fa012e2832f20fe506e53ff24c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 27, + "d": 1 + }, + "mne/io/constants.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1747.json b/doc/sphinxext/prs/1747.json new file mode 100644 index 00000000000..a4b47a099d3 --- /dev/null +++ b/doc/sphinxext/prs/1747.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b168657ec4adc2e6d8e0079d7fe8a1c509ec1092", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 50, + "d": 17 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 41, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/175.json b/doc/sphinxext/prs/175.json new file mode 100644 index 00000000000..3e499aaf7ff --- /dev/null +++ b/doc/sphinxext/prs/175.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "020b1691e78279c98488d8a3e3673c65dc9b7091", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/python_tutorial.rst": { + "a": 11, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 189, + "d": 1 + }, + "mne/event.py": { + "a": 26, + "d": 25 + }, + "mne/fiff/constants.py": { + "a": 3, + "d": 1 + }, + "mne/fiff/tag.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/write.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 22, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1751.json b/doc/sphinxext/prs/1751.json new file mode 100644 index 00000000000..482f519b46f --- /dev/null +++ b/doc/sphinxext/prs/1751.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6c3bf4b96ecc39852a19bdf4c0707ae7dc89ecc0", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 2, + "d": 3 + }, + "mne/io/tests/test_reference.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1756.json b/doc/sphinxext/prs/1756.json new file mode 100644 index 00000000000..5a866e4f618 --- /dev/null +++ b/doc/sphinxext/prs/1756.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "6761a2aa8afbf4758cb929ed7530fdacd5c83656", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 3 + }, + "mne/label.py": { + "a": 6, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 12, + "d": 10 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 8, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 28, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1759.json b/doc/sphinxext/prs/1759.json new file mode 100644 index 00000000000..d0d040362b8 --- /dev/null +++ b/doc/sphinxext/prs/1759.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0ae914e321a9971adc77e15fa68dfa4c9a29ffea", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 64, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/176.json b/doc/sphinxext/prs/176.json new file mode 100644 index 00000000000..0a1a97af7dd --- /dev/null +++ b/doc/sphinxext/prs/176.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5714255bba5c6126a4011100f36e4ce710ae9fba", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1762.json b/doc/sphinxext/prs/1762.json new file mode 100644 index 00000000000..b8ee3b21b80 --- /dev/null +++ b/doc/sphinxext/prs/1762.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "f3c8d3ac0c8873ab608fd0dff7b06b8b7d065cc7", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 12, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/plot_clickable_image.py": { + "a": 66, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 84, + "d": 0 + }, + "mne/channels/tests/test_layout.py": { + "a": 34, + "d": 1 + }, + "mne/data/image/custom_layout.lout": { + "a": 24, + "d": 0 + }, + "mne/data/image/mni_brain.gif": { + "a": 0, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 3 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 14 + }, + "mne/viz/tests/test_utils.py": { + "a": 49, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 9, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 148, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1763.json b/doc/sphinxext/prs/1763.json new file mode 100644 index 00000000000..3e96130971e --- /dev/null +++ b/doc/sphinxext/prs/1763.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3f1e532ddd59154a150f85d1264dd66dabfa5992", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 21, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1768.json b/doc/sphinxext/prs/1768.json new file mode 100644 index 00000000000..305d0c4618d --- /dev/null +++ b/doc/sphinxext/prs/1768.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "087b568d88ae85c8478c7c4f8b9ccd974f98a43c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/bads.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1769.json b/doc/sphinxext/prs/1769.json new file mode 100644 index 00000000000..e5e6a5c338b --- /dev/null +++ b/doc/sphinxext/prs/1769.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b0d1952e0afc7d71d4120e6c0ec35a9eccc0fd7a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_hdf5.py": { + "a": 21, + "d": 9 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_hdf5.py": { + "a": 8, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 13, + "d": 0 + }, + "mne/utils.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1771.json b/doc/sphinxext/prs/1771.json new file mode 100644 index 00000000000..e96a5cd11fe --- /dev/null +++ b/doc/sphinxext/prs/1771.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6024057eebf809f30a20d09c0988a0bac4df5bb1", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 56, + "d": 64 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1772.json b/doc/sphinxext/prs/1772.json new file mode 100644 index 00000000000..e9865be734c --- /dev/null +++ b/doc/sphinxext/prs/1772.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "626a2f004024074ada5b5fc8b67ee6aea4e6f873", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 15, + "d": 4 + }, + "doc/source/advanced_setup.rst": { + "a": 130, + "d": 0 + }, + "doc/source/conf.py": { + "a": 6, + "d": 8 + }, + "doc/source/getting_started.rst": { + "a": 68, + "d": 173 + }, + "doc/source/mne-python.rst": { + "a": 1, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 5, + "d": 3 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 166, + "d": 0 + }, + "doc/sphinxext/gen_rst.py": { + "a": 45, + "d": 34 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 8, + "d": 11 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 8, + "d": 16 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 5 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 41, + "d": 38 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 53, + "d": 55 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1776.json b/doc/sphinxext/prs/1776.json new file mode 100644 index 00000000000..d8077e48532 --- /dev/null +++ b/doc/sphinxext/prs/1776.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fb3252956b06bffc1019ba23a3611e1ffd5fbbba", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/channels/interpolation.py": { + "a": 7, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1779.json b/doc/sphinxext/prs/1779.json new file mode 100644 index 00000000000..f0f0bca1c39 --- /dev/null +++ b/doc/sphinxext/prs/1779.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6f7de25e6081151fabf3a8e9433e7a7e51b089ea", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 14, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 13, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/178.json b/doc/sphinxext/prs/178.json new file mode 100644 index 00000000000..99b6d1f62da --- /dev/null +++ b/doc/sphinxext/prs/178.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "cffc01240a9cebbf8122904799d07d502ef1bded", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/tests/test_evoked.py": { + "a": 10, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 10, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 19, + "d": 9 + }, + "mne/source_space.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 10, + "d": 0 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1782.json b/doc/sphinxext/prs/1782.json new file mode 100644 index 00000000000..acb9c611468 --- /dev/null +++ b/doc/sphinxext/prs/1782.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "957e3f3d1c749f29384457478fef7bd3218c9c04", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 4, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 4, + "d": 4 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 3, + "d": 4 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 2, + "d": 8 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 3, + "d": 9 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 3, + "d": 9 + }, + "examples/plot_read_forward.py": { + "a": 1, + "d": 1 + }, + "examples/plot_ssp_projs_sensitivity_map.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 3, + "d": 5 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 1, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 23, + "d": 12 + }, + "mne/viz/_3d.py": { + "a": 106, + "d": 14 + }, + "mne/viz/tests/test_3d.py": { + "a": 55, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 44, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1785.json b/doc/sphinxext/prs/1785.json new file mode 100644 index 00000000000..1929c37cf02 --- /dev/null +++ b/doc/sphinxext/prs/1785.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "461efa998a2b67407761136807d4054852fb620e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 20, + "d": 7 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 9, + "d": 7 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 17, + "d": 20 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 9, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 162, + "d": 88 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 255, + "d": 57 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 4, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 38, + "d": 23 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 105, + "d": 17 + }, + "mne/io/proj.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1786.json b/doc/sphinxext/prs/1786.json new file mode 100644 index 00000000000..816bce7588c --- /dev/null +++ b/doc/sphinxext/prs/1786.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "7af8e9401be385a91f36f3a1fa5c77ad43e2b84c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/epochs.py": { + "a": 38, + "d": 41 + }, + "mne/evoked.py": { + "a": 0, + "d": 2 + }, + "mne/io/array/array.py": { + "a": 0, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 0, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 0, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 0, + "d": 1 + }, + "mne/io/proj.py": { + "a": 31, + "d": 2 + }, + "mne/realtime/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1788.json b/doc/sphinxext/prs/1788.json new file mode 100644 index 00000000000..2c42fa08ba3 --- /dev/null +++ b/doc/sphinxext/prs/1788.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5e731476b4f1ea74063a402109f4d0605cd83eab", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1789.json b/doc/sphinxext/prs/1789.json new file mode 100644 index 00000000000..a086993c7bb --- /dev/null +++ b/doc/sphinxext/prs/1789.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "244d21c7175671e5ed4dbc6c17e9c7453d10c6e5", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/179.json b/doc/sphinxext/prs/179.json new file mode 100644 index 00000000000..282f74ed7a6 --- /dev/null +++ b/doc/sphinxext/prs/179.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fb8c7029ea423722278fc66448e602342841fad9", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1791.json b/doc/sphinxext/prs/1791.json new file mode 100644 index 00000000000..c4bc41c63e6 --- /dev/null +++ b/doc/sphinxext/prs/1791.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "48b5b4072a4999bf45c8bbf8f7446eae499c1058", + "authors": [ + { + "n": "Basile", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1794.json b/doc/sphinxext/prs/1794.json new file mode 100644 index 00000000000..bbf11fe161b --- /dev/null +++ b/doc/sphinxext/prs/1794.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "97ae7e36874dedc4399699ab43f5b41df06b4183", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1798.json b/doc/sphinxext/prs/1798.json new file mode 100644 index 00000000000..e5336fa208f --- /dev/null +++ b/doc/sphinxext/prs/1798.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e5b719c2601d39717993a63e30b6500d9262c142", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/classifier.py": { + "a": 6, + "d": 2 + }, + "mne/decoding/tests/test_classifier.py": { + "a": 18, + "d": 2 + }, + "mne/filter.py": { + "a": 9, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1799.json b/doc/sphinxext/prs/1799.json new file mode 100644 index 00000000000..6a211dacc09 --- /dev/null +++ b/doc/sphinxext/prs/1799.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "82540830ef4c35f1d2ccf4cd0224ee709fd57a00", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 57, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 37, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/18.json b/doc/sphinxext/prs/18.json new file mode 100644 index 00000000000..08900f1e6f7 --- /dev/null +++ b/doc/sphinxext/prs/18.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9f4167c7e43563755c7517ce14e1e8c9db69e82f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 44, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1802.json b/doc/sphinxext/prs/1802.json new file mode 100644 index 00000000000..e3fdf9adb7e --- /dev/null +++ b/doc/sphinxext/prs/1802.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "35fc7f8a57cd4d09eaa2743de90902f9700b1442", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 49, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1805.json b/doc/sphinxext/prs/1805.json new file mode 100644 index 00000000000..67664633abf --- /dev/null +++ b/doc/sphinxext/prs/1805.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b38f0cdd27d3f36f743811609d00cb00d8ea5b7c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1809.json b/doc/sphinxext/prs/1809.json new file mode 100644 index 00000000000..f58d5c1e7fb --- /dev/null +++ b/doc/sphinxext/prs/1809.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "58d4be1bf21f3d3fcbebfb3092590328cd270089", + "authors": [ + { + "n": "aestrivex", + "e": "aestrivex@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/181.json b/doc/sphinxext/prs/181.json new file mode 100644 index 00000000000..91dd0126101 --- /dev/null +++ b/doc/sphinxext/prs/181.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "18dfdf960cf22335f7dbe5b2c7a3827aa7bb6de1", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1810.json b/doc/sphinxext/prs/1810.json new file mode 100644 index 00000000000..5df21bc8bb8 --- /dev/null +++ b/doc/sphinxext/prs/1810.json @@ -0,0 +1,995 @@ +{ + "merge_commit_sha": "538f8e74b93b44ae043c6b94988ce8f813b229cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 7, + "d": 1 + }, + "Makefile": { + "a": 9, + "d": 0 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 5, + "d": 4 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 5, + "d": 6 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 6, + "d": 6 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 8, + "d": 8 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 5, + "d": 4 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 6, + "d": 7 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 2, + "d": 1 + }, + "examples/datasets/plot_megsim_data_single_trial.py": { + "a": 2, + "d": 1 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 1, + "d": 2 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 8, + "d": 7 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 7, + "d": 6 + }, + "examples/decoding/plot_decoding_sensors.py": { + "a": 5, + "d": 4 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 6, + "d": 7 + }, + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 4, + "d": 4 + }, + "examples/export/plot_epochs_as_data_frame.py": { + "a": 1, + "d": 2 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 7, + "d": 6 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 4, + "d": 3 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 5, + "d": 7 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 1, + "d": 3 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 3, + "d": 4 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 6, + "d": 6 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_label_activation_from_stc.py": { + "a": 61, + "d": 61 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 3, + "d": 4 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_morph_data.py": { + "a": 6, + "d": 5 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_read_stc.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 3 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/plot_bem_contour_mri.py": { + "a": 2, + "d": 0 + }, + "examples/plot_channel_epochs_image.py": { + "a": 5, + "d": 4 + }, + "examples/plot_coregistration_transform.py": { + "a": 2, + "d": 0 + }, + "examples/plot_decimate_head_surface.py": { + "a": 2, + "d": 2 + }, + "examples/plot_define_target_events.py": { + "a": 4, + "d": 4 + }, + "examples/plot_estimate_covariance_matrix_baseline.py": { + "a": 2, + "d": 2 + }, + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 2, + "d": 2 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 3, + "d": 2 + }, + "examples/plot_evoked_topomap.py": { + "a": 2, + "d": 2 + }, + "examples/plot_evoked_topomap_delayed_ssp.py": { + "a": 3, + "d": 2 + }, + "examples/plot_evoked_whitening.py": { + "a": 2, + "d": 2 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 2, + "d": 2 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 4, + "d": 3 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 4, + "d": 3 + }, + "examples/plot_interpolate_bad_channels.py": { + "a": 3, + "d": 2 + }, + "examples/plot_left_cerebellum_volume_source.py": { + "a": 2, + "d": 2 + }, + "examples/plot_make_forward.py": { + "a": 4, + "d": 3 + }, + "examples/plot_meg_eeg_fields_3d.py": { + "a": 2, + "d": 2 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 3, + "d": 2 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 2, + "d": 2 + }, + "examples/plot_read_epochs.py": { + "a": 3, + "d": 2 + }, + "examples/plot_read_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/plot_read_forward.py": { + "a": 12, + "d": 12 + }, + "examples/plot_read_noise_covariance_matrix.py": { + "a": 3, + "d": 3 + }, + "examples/plot_rereference_eeg.py": { + "a": 2, + "d": 0 + }, + "examples/plot_shift_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 2, + "d": 2 + }, + "examples/plot_ssp_projs_sensitivity_map.py": { + "a": 4, + "d": 2 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 3, + "d": 3 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 3, + "d": 2 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 3, + "d": 1 + }, + "examples/plot_topo_customized.py": { + "a": 4, + "d": 4 + }, + "examples/plot_topography.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/plot_eog_artifact_histogram.py": { + "a": 4, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 4, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 5, + "d": 2 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 3, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 3, + "d": 2 + }, + "examples/read_events.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 7, + "d": 7 + }, + "examples/realtime/ftclient_rt_compute_psd.py": { + "a": 2, + "d": 4 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 8, + "d": 5 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 8, + "d": 8 + }, + "examples/realtime/rt_feedback_client.py": { + "a": 2, + "d": 3 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 8, + "d": 9 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 5, + "d": 5 + }, + "examples/stats/plot_cluster_methods_tutorial.py": { + "a": 4, + "d": 5 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 5, + "d": 6 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 4, + "d": 5 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 5, + "d": 5 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 6, + "d": 6 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 4, + "d": 4 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 2, + "d": 3 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_spatio_temporal_cluster_stats_sensor.py": { + "a": 6, + "d": 9 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 3, + "d": 4 + }, + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 7, + "d": 7 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 4, + "d": 3 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 4, + "d": 3 + }, + "examples/time_frequency/plot_stockwell.py": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 3, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency_multitaper_sensors.py": { + "a": 3, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency_sensors.py": { + "a": 4, + "d": 3 + }, + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 4, + "d": 4 + }, + "mne/_hdf5.py": { + "a": 0, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 3, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 5, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_browse_raw.py": { + "a": 3, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 3, + "d": 1 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 7, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 3, + "d": 3 + }, + "mne/commands/mne_report.py": { + "a": 1, + "d": 0 + }, + "mne/connectivity/effective.py": { + "a": 3, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 46, + "d": 41 + }, + "mne/connectivity/tests/test_effective.py": { + "a": 6, + "d": 6 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 11, + "d": 11 + }, + "mne/coreg.py": { + "a": 1, + "d": 2 + }, + "mne/cov.py": { + "a": 16, + "d": 15 + }, + "mne/cuda.py": { + "a": 5, + "d": 5 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/megsim/megsim.py": { + "a": 4, + "d": 4 + }, + "mne/datasets/megsim/urls.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/testing/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_classifier.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_csp.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/evoked.py": { + "a": 6, + "d": 6 + }, + "mne/filter.py": { + "a": 5, + "d": 5 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 11, + "d": 11 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 8, + "d": 8 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_forward.py": { + "a": 4, + "d": 4 + }, + "mne/gui/_coreg_gui.py": { + "a": 4, + "d": 4 + }, + "mne/gui/_fiducials_gui.py": { + "a": 6, + "d": 4 + }, + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_marker_gui.py": { + "a": 4, + "d": 4 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 4, + "d": 4 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 4, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 10, + "d": 8 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 5, + "d": 5 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 26, + "d": 43 + }, + "mne/io/array/array.py": { + "a": 0, + "d": 3 + }, + "mne/io/base.py": { + "a": 8, + "d": 8 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 3 + }, + "mne/io/bti/bti.py": { + "a": 6, + "d": 4 + }, + "mne/io/bti/read.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf.py": { + "a": 9, + "d": 11 + }, + "mne/io/diff.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 39, + "d": 34 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 5, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 15, + "d": 15 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 8, + "d": 8 + }, + "mne/io/pick.py": { + "a": 5, + "d": 5 + }, + "mne/io/proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 3, + "d": 4 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 3 + }, + "mne/io/tree.py": { + "a": 2, + "d": 1 + }, + "mne/io/write.py": { + "a": 2, + "d": 41 + }, + "mne/label.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 15, + "d": 16 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 4, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/misc.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 20, + "d": 15 + }, + "mne/preprocessing/peak_finder.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 10, + "d": 8 + }, + "mne/preprocessing/tests/test_maxfilter.py": { + "a": 1, + "d": 2 + }, + "mne/proj.py": { + "a": 4, + "d": 3 + }, + "mne/realtime/client.py": { + "a": 3, + "d": 3 + }, + "mne/realtime/epochs.py": { + "a": 14, + "d": 15 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/mockclient.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/stim_server_client.py": { + "a": 3, + "d": 2 + }, + "mne/report.py": { + "a": 16, + "d": 22 + }, + "mne/simulation/tests/test_source.py": { + "a": 8, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 8, + "d": 10 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/stats/parametric.py": { + "a": 4, + "d": 5 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_parametric.py": { + "a": 8, + "d": 11 + }, + "mne/surface.py": { + "a": 5, + "d": 6 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_cov.py": { + "a": 15, + "d": 16 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/_stockwell.py": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 14, + "d": 10 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 0, + "d": 1 + }, + "mne/transforms.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 14, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/circle.py": { + "a": 8, + "d": 7 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 3 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/test_evoked.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 4 + }, + "mne/viz/tests/test_misc.py": { + "a": 2, + "d": 4 + }, + "mne/viz/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 12, + "d": 13 + }, + "mne/viz/tests/test_topo.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 6, + "d": 6 + }, + "mne/viz/tests/test_utils.py": { + "a": 2, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 7, + "d": 6 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "setup.cfg": { + "a": 4, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1815.json b/doc/sphinxext/prs/1815.json new file mode 100644 index 00000000000..e8ef22a3f45 --- /dev/null +++ b/doc/sphinxext/prs/1815.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "193cf2f7799ba388654433b47e0ebeae0a5adfe4", + "authors": [ + { + "n": "Yousra Bekhti", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_rap_music.py": { + "a": 56, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_rap_music.py": { + "a": 267, + "d": 0 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 149, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 121, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1816.json b/doc/sphinxext/prs/1816.json new file mode 100644 index 00000000000..0f95ed895f9 --- /dev/null +++ b/doc/sphinxext/prs/1816.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3226cbf6fbe4f0b9f9f6c390bd4707865efbcea3", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1817.json b/doc/sphinxext/prs/1817.json new file mode 100644 index 00000000000..385025d698c --- /dev/null +++ b/doc/sphinxext/prs/1817.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "efd7571ca4f465ea68b9f03403f2173699b82bbc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/plot_read_epochs.py": { + "a": 1, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1818.json b/doc/sphinxext/prs/1818.json new file mode 100644 index 00000000000..0ffde8f097e --- /dev/null +++ b/doc/sphinxext/prs/1818.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "71332fbf18580829bed8cd6eba672fd5a5c6ebba", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 3, + "d": 3 + }, + "examples/plot_evoked_whitening.py": { + "a": 3, + "d": 3 + }, + "mne/cov.py": { + "a": 25, + "d": 6 + }, + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/fixes.py": { + "a": 0, + "d": 177 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1820.json b/doc/sphinxext/prs/1820.json new file mode 100644 index 00000000000..c20e471f7f5 --- /dev/null +++ b/doc/sphinxext/prs/1820.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2de9daf649d762193b62aa1ed973c62e7c97798d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 12, + "d": 10 + }, + "mne/beamformer/_lcmv.py": { + "a": 121, + "d": 37 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1822.json b/doc/sphinxext/prs/1822.json new file mode 100644 index 00000000000..d596b0a4147 --- /dev/null +++ b/doc/sphinxext/prs/1822.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9b7a8b7442efa1a8f7756d75c782d8fc325e8be9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 8, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1824.json b/doc/sphinxext/prs/1824.json new file mode 100644 index 00000000000..92c5da457de --- /dev/null +++ b/doc/sphinxext/prs/1824.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cb0ecdeb69c5e28b2d9031fb57cc6a564f83b5f4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1825.json b/doc/sphinxext/prs/1825.json new file mode 100644 index 00000000000..b7cb3197bf7 --- /dev/null +++ b/doc/sphinxext/prs/1825.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7c5d7c30f42da1871ef9f125fc38ce359e404009", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 2, + "d": 5 + }, + "mne/channels/tests/test_channels.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1827.json b/doc/sphinxext/prs/1827.json new file mode 100644 index 00000000000..8e7906f6081 --- /dev/null +++ b/doc/sphinxext/prs/1827.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "85a945ca65f0ad918dd37368183f18c2650338e0", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1828.json b/doc/sphinxext/prs/1828.json new file mode 100644 index 00000000000..f16b0ae4d8f --- /dev/null +++ b/doc/sphinxext/prs/1828.json @@ -0,0 +1,199 @@ +{ + "merge_commit_sha": "614488b79855cc3129a2ff645950dcd25d600ab5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "doc/source/python_reference.rst": { + "a": 14, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_dipole_fit.py": { + "a": 81, + "d": 0 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 0, + "d": 85 + }, + "examples/plot_left_cerebellum_volume_source.py": { + "a": 2, + "d": 2 + }, + "make/install_python.ps1": { + "a": 2, + "d": 2 + }, + "mne/__init__.py": { + "a": 5, + "d": 4 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 321, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 0, + "d": 2 + }, + "mne/channels/interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 2 + }, + "mne/channels/utils.py": { + "a": 0, + "d": 34 + }, + "mne/cov.py": { + "a": 72, + "d": 24 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 2, + "d": 1 + }, + "mne/dipole.py": { + "a": 361, + "d": 26 + }, + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/forward/_compute_forward.py": { + "a": 303, + "d": 121 + }, + "mne/forward/_field_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 164, + "d": 162 + }, + "mne/forward/forward.py": { + "a": 20, + "d": 10 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 37, + "d": 7 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 13, + "d": 4 + }, + "mne/io/pick.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 68 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 91, + "d": 88 + }, + "mne/surface.py": { + "a": 68, + "d": 28 + }, + "mne/tests/test_bem.py": { + "a": 0, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 13, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 99, + "d": 11 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/ar.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 32, + "d": 12 + }, + "mne/viz/_3d.py": { + "a": 14, + "d": 4 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1829.json b/doc/sphinxext/prs/1829.json new file mode 100644 index 00000000000..50d799fac43 --- /dev/null +++ b/doc/sphinxext/prs/1829.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "06fbaf15b22048ce5a4bf77fb9956876a48a1c99", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 112, + "d": 10 + }, + "mne/source_space.py": { + "a": 21, + "d": 6 + }, + "mne/tests/test_dipole.py": { + "a": 37, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1831.json b/doc/sphinxext/prs/1831.json new file mode 100644 index 00000000000..54e3cf9af7a --- /dev/null +++ b/doc/sphinxext/prs/1831.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f08e0d01439237025de4e04a0ad2588f40a373f7", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 49, + "d": 107 + }, + "mne/io/edf/tests/data/test_uneven_samp.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_uneven_samp.mat": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1832.json b/doc/sphinxext/prs/1832.json new file mode 100644 index 00000000000..874f075e062 --- /dev/null +++ b/doc/sphinxext/prs/1832.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "47674c285a9d85a652a1db5cdeb823e4ad125eaf", + "authors": [ + { + "n": null, + "e": "flKazemakase@gmail.com" + } + ], + "changes": { + "mne/preprocessing/infomax_.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1834.json b/doc/sphinxext/prs/1834.json new file mode 100644 index 00000000000..6cd5b79555c --- /dev/null +++ b/doc/sphinxext/prs/1834.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e3e58e5047f7706b12ef1535fb70606936d28053", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 10, + "d": 6 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1835.json b/doc/sphinxext/prs/1835.json new file mode 100644 index 00000000000..dec613995fb --- /dev/null +++ b/doc/sphinxext/prs/1835.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c8c088dd5ff507879d1d0c44f9d157617aad8bde", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/io/tag.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1838.json b/doc/sphinxext/prs/1838.json new file mode 100644 index 00000000000..23f198046e4 --- /dev/null +++ b/doc/sphinxext/prs/1838.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ab0bf315742959780bdc7ddd18d5147b7e7f32ce", + "authors": [ + { + "n": "Clement Moutard", + "e": "clement.moutard@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 11, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1842.json b/doc/sphinxext/prs/1842.json new file mode 100644 index 00000000000..80953c4f1b9 --- /dev/null +++ b/doc/sphinxext/prs/1842.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9256d6b2a7fd8132a986ccbb042db8f2c40edb5b", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1844.json b/doc/sphinxext/prs/1844.json new file mode 100644 index 00000000000..db01e500519 --- /dev/null +++ b/doc/sphinxext/prs/1844.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "8ed925e150e9af82e33bdb47f65afc4924a0fa5e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_snr_estimate.py": { + "a": 29, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 118, + "d": 0 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 42, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 40, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 19, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1845.json b/doc/sphinxext/prs/1845.json new file mode 100644 index 00000000000..8c9ffbe5ab5 --- /dev/null +++ b/doc/sphinxext/prs/1845.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "84dccca8d6398f8f71e9a1d9b60552ecf86d68bf", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1851.json b/doc/sphinxext/prs/1851.json new file mode 100644 index 00000000000..b2a62674c4e --- /dev/null +++ b/doc/sphinxext/prs/1851.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b3c8c4cd3cdce72fb4cf07d2ab0fd038c53c266c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/write.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1852.json b/doc/sphinxext/prs/1852.json new file mode 100644 index 00000000000..47abb6546bc --- /dev/null +++ b/doc/sphinxext/prs/1852.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "be463db0c007d35e9cc5102f688543c16d42aea2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 64, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 24, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1854.json b/doc/sphinxext/prs/1854.json new file mode 100644 index 00000000000..821d0aaf76b --- /dev/null +++ b/doc/sphinxext/prs/1854.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "d9c384f360005c2f88f2e71b706980aed417a7e4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/source/cite.rst": { + "a": 1, + "d": 1 + }, + "doc/source/contributing.rst": { + "a": 4, + "d": 4 + }, + "doc/source/getting_started.rst": { + "a": 1, + "d": 1 + }, + "doc/source/git_links.inc": { + "a": 23, + "d": 23 + }, + "doc/source/known_projects.inc": { + "a": 3, + "d": 3 + }, + "doc/source/manual/analyze.rst": { + "a": 1, + "d": 1 + }, + "doc/source/manual/convert.rst": { + "a": 3, + "d": 3 + }, + "doc/source/mne-cpp.rst": { + "a": 1, + "d": 1 + }, + "doc/source/this_project.inc": { + "a": 2, + "d": 3 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 4 + }, + "examples/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1855.json b/doc/sphinxext/prs/1855.json new file mode 100644 index 00000000000..5c65196599b --- /dev/null +++ b/doc/sphinxext/prs/1855.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "5bda2c003ec1cfd3b3deb4ef1fc32cebe3e1f91e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 6, + "d": 11 + }, + "mne/cov.py": { + "a": 4, + "d": 5 + }, + "mne/dipole.py": { + "a": 9, + "d": 7 + }, + "mne/epochs.py": { + "a": 12, + "d": 12 + }, + "mne/evoked.py": { + "a": 10, + "d": 8 + }, + "mne/io/base.py": { + "a": 7, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 7 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 17 + }, + "mne/tests/test_utils.py": { + "a": 11, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 5 + }, + "mne/utils.py": { + "a": 12, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 6, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 9, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1858.json b/doc/sphinxext/prs/1858.json new file mode 100644 index 00000000000..6c3b37d7c29 --- /dev/null +++ b/doc/sphinxext/prs/1858.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ec35be4d87505e9c7b7a0912e6c5e572c4fd3323", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 16, + "d": 32 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 0, + "d": 10 + }, + "mne/io/reference.py": { + "a": 83, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 102, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1859.json b/doc/sphinxext/prs/1859.json new file mode 100644 index 00000000000..8173a866647 --- /dev/null +++ b/doc/sphinxext/prs/1859.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "72368c9e2db4f99a7ad2f296c8c42752fa1712df", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "mne/report.py": { + "a": 51, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/186.json b/doc/sphinxext/prs/186.json new file mode 100644 index 00000000000..99af1c11bc4 --- /dev/null +++ b/doc/sphinxext/prs/186.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d45c46d9dc3bddab51fa2c0312fd18e3689760c1", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fixes.py": { + "a": 214, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_fixes.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1860.json b/doc/sphinxext/prs/1860.json new file mode 100644 index 00000000000..1be5b96523a --- /dev/null +++ b/doc/sphinxext/prs/1860.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c5dc39d285ca8395947e28fe512a0339838657b1", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1861.json b/doc/sphinxext/prs/1861.json new file mode 100644 index 00000000000..7a92371118c --- /dev/null +++ b/doc/sphinxext/prs/1861.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "28bc08f7ed2996f3c80061ba397d47a074adf54c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 4, + "d": 3 + }, + "mne/label.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 10, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1864.json b/doc/sphinxext/prs/1864.json new file mode 100644 index 00000000000..4bd27d5f953 --- /dev/null +++ b/doc/sphinxext/prs/1864.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "83bc6e189308e39db542878a0073be33233b5f37", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/plot_read_noise_covariance_matrix.py": { + "a": 6, + "d": 8 + }, + "mne/cov.py": { + "a": 34, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_misc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1865.json b/doc/sphinxext/prs/1865.json new file mode 100644 index 00000000000..3a97ffc6370 --- /dev/null +++ b/doc/sphinxext/prs/1865.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "7325131a3d17fa0a0e3b5221c5d06c691a617c51", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 2, + "d": 2 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 14, + "d": 8 + }, + "mne/surface.py": { + "a": 15, + "d": 8 + }, + "mne/tests/test_source_space.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_surface.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1866.json b/doc/sphinxext/prs/1866.json new file mode 100644 index 00000000000..c8829d68147 --- /dev/null +++ b/doc/sphinxext/prs/1866.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cc35976c3d1ce7803cb67e3a757d7927f597299a", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/187.json b/doc/sphinxext/prs/187.json new file mode 100644 index 00000000000..c76b13bd832 --- /dev/null +++ b/doc/sphinxext/prs/187.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "12501872506415575f2b824f7a0a13a5d55ca88a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_morph_data.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 2, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 7 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1870.json b/doc/sphinxext/prs/1870.json new file mode 100644 index 00000000000..b52bb76819c --- /dev/null +++ b/doc/sphinxext/prs/1870.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b54a39bb2947d68348157c55735246c33e1b765e", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1871.json b/doc/sphinxext/prs/1871.json new file mode 100644 index 00000000000..059999b8fdf --- /dev/null +++ b/doc/sphinxext/prs/1871.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe5aaecfe2ce2bf6efb88064e9b5dbea4b9559c2", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/time_gen.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1873.json b/doc/sphinxext/prs/1873.json new file mode 100644 index 00000000000..1216c879cdd --- /dev/null +++ b/doc/sphinxext/prs/1873.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f675c61e93c5a4536ef3ab60318baa120c506beb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 29, + "d": 4 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1874.json b/doc/sphinxext/prs/1874.json new file mode 100644 index 00000000000..76feda3235a --- /dev/null +++ b/doc/sphinxext/prs/1874.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a0ca9af2d862e60ecf0ebb4e4280473b3dde8d85", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1876.json b/doc/sphinxext/prs/1876.json new file mode 100644 index 00000000000..be24ce61a99 --- /dev/null +++ b/doc/sphinxext/prs/1876.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "73ed901a32895b3c17bef5494eec6a558b747b7f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 3 + }, + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_dipole_fit.py": { + "a": 2, + "d": 34 + }, + "examples/inverse/plot_rap_music.py": { + "a": 3, + "d": 2 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 3, + "d": 3 + }, + "mne/bem.py": { + "a": 11, + "d": 9 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 4, + "d": 4 + }, + "mne/cov.py": { + "a": 5, + "d": 1 + }, + "mne/dipole.py": { + "a": 94, + "d": 18 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 41, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 6, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 23, + "d": 4 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 0, + "d": 87 + }, + "mne/io/chpi.py": { + "a": 130, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 1, + "d": 20 + }, + "mne/io/tests/test_chpi.py": { + "a": 52, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 4, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 27, + "d": 59 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 34, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 10, + "d": 9 + }, + "mne/viz/tests/test_misc.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1877.json b/doc/sphinxext/prs/1877.json new file mode 100644 index 00000000000..7c00d5aea73 --- /dev/null +++ b/doc/sphinxext/prs/1877.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7bb23ab445a9aaee82d0b30250ee3d379dcc771c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 7, + "d": 7 + }, + "mne/io/proc_history.py": { + "a": 4, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1879.json b/doc/sphinxext/prs/1879.json new file mode 100644 index 00000000000..ed022c1ddf0 --- /dev/null +++ b/doc/sphinxext/prs/1879.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "34e116f4043ef7b8b19fe15d8b9261fc05dd5cf6", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 55, + "d": 49 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1881.json b/doc/sphinxext/prs/1881.json new file mode 100644 index 00000000000..5e14ac80ff4 --- /dev/null +++ b/doc/sphinxext/prs/1881.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "adf63c33fc5c4f7639af7799306a7e253b3ec3fa", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 5, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 11, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 5, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 6, + "d": 2 + }, + "mne/io/fiff/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 51, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 3, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1884.json b/doc/sphinxext/prs/1884.json new file mode 100644 index 00000000000..9c87f76e1e0 --- /dev/null +++ b/doc/sphinxext/prs/1884.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3017c02048056aca97b5ef0bb8310dc895ef8785", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 5, + "d": 5 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/189.json b/doc/sphinxext/prs/189.json new file mode 100644 index 00000000000..2b2dfa33e1e --- /dev/null +++ b/doc/sphinxext/prs/189.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f18dcaca4a18c3bf56d106cd5c9169e46a14f7d2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 29, + "d": 6 + }, + "mne/tests/test_source_estimate.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1891.json b/doc/sphinxext/prs/1891.json new file mode 100644 index 00000000000..b9bbd0991c4 --- /dev/null +++ b/doc/sphinxext/prs/1891.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8e97c24e94c200664993b5f516df37e1387a08a5", + "authors": [ + { + "n": "moutard", + "e": "clement.moutard@gmail.com" + } + ], + "changes": { + "mne/decoding/time_gen.py": { + "a": 28, + "d": 8 + }, + "mne/viz/decoding.py": { + "a": 42, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1895.json b/doc/sphinxext/prs/1895.json new file mode 100644 index 00000000000..8d72986cc00 --- /dev/null +++ b/doc/sphinxext/prs/1895.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3c875fce0419f6a1beabdde8f38fb5d903db2095", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1896.json b/doc/sphinxext/prs/1896.json new file mode 100644 index 00000000000..16dd10c0e63 --- /dev/null +++ b/doc/sphinxext/prs/1896.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "d1ad8e0606518a572a549a3a62bd81a18bb067d1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 3, + "d": 6 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 2, + "d": 2 + }, + "examples/plot_coregistration_transform.py": { + "a": 1, + "d": 1 + }, + "examples/plot_make_forward.py": { + "a": 2, + "d": 2 + }, + "examples/plot_meg_eeg_fields_3d.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 14, + "d": 9 + }, + "mne/forward/_make_forward.py": { + "a": 19, + "d": 13 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 6 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 32, + "d": 34 + }, + "mne/report.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 12, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1897.json b/doc/sphinxext/prs/1897.json new file mode 100644 index 00000000000..ff2c56a6a2a --- /dev/null +++ b/doc/sphinxext/prs/1897.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f5dd3c9207b670a91d603b9afe6ccbf00d176354", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 6, + "d": 5 + }, + "mne/epochs.py": { + "a": 14, + "d": 14 + }, + "mne/preprocessing/ecg.py": { + "a": 12, + "d": 6 + }, + "mne/preprocessing/eog.py": { + "a": 12, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 13, + "d": 6 + }, + "mne/preprocessing/ssp.py": { + "a": 6, + "d": 6 + }, + "mne/proj.py": { + "a": 12, + "d": 12 + }, + "mne/realtime/epochs.py": { + "a": 14, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/19.json b/doc/sphinxext/prs/19.json new file mode 100644 index 00000000000..512b94571e4 --- /dev/null +++ b/doc/sphinxext/prs/19.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b9a12876e7bbdae86f219d6dd071c81b73ccd17d", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 41, + "d": 28 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1901.json b/doc/sphinxext/prs/1901.json new file mode 100644 index 00000000000..eddd8abb8cf --- /dev/null +++ b/doc/sphinxext/prs/1901.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5b65c7b003325ca2adb1395e7a95b04ac2514f4d", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 17, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1908.json b/doc/sphinxext/prs/1908.json new file mode 100644 index 00000000000..bb445c7999f --- /dev/null +++ b/doc/sphinxext/prs/1908.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3fbdc6d689526743a6d1cf49a1d4aadbaa395a7f", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/191.json b/doc/sphinxext/prs/191.json new file mode 100644 index 00000000000..9419f1d9f6e --- /dev/null +++ b/doc/sphinxext/prs/191.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b016aaaddde06d1dd79c4d7098d514475506f374", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ecg_artifacts_from_ica.py": { + "a": 30, + "d": 24 + }, + "examples/preprocessing/plot_eog_artifacts_from_ica.py": { + "a": 23, + "d": 18 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 22, + "d": 6 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 26, + "d": 15 + }, + "mne/artifacts/ica.py": { + "a": 232, + "d": 98 + }, + "mne/artifacts/tests/test_ica.py": { + "a": 149, + "d": 75 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1911.json b/doc/sphinxext/prs/1911.json new file mode 100644 index 00000000000..27466572fa2 --- /dev/null +++ b/doc/sphinxext/prs/1911.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "47ec75c3a1c2ba4f16d0398c479707af65c3b7d9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/decoding/classifier.py": { + "a": 10, + "d": 6 + }, + "mne/epochs.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1914.json b/doc/sphinxext/prs/1914.json new file mode 100644 index 00000000000..ad610d2b3c1 --- /dev/null +++ b/doc/sphinxext/prs/1914.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "90d297e918e28f8eab1f7d364d47f064abb66d32", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 7, + "d": 7 + }, + "examples/time_frequency/plot_epochs_spectra.py": { + "a": 45, + "d": 0 + }, + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 0, + "d": 99 + }, + "mne/epochs.py": { + "a": 126, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 26, + "d": 6 + }, + "mne/time_frequency/psd.py": { + "a": 68, + "d": 42 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 59, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 17, + "d": 8 + }, + "mne/viz/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 94, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 81, + "d": 49 + }, + "mne/viz/tests/test_epochs.py": { + "a": 12, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 5 + }, + "mne/viz/tests/test_topo.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 214, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1915.json b/doc/sphinxext/prs/1915.json new file mode 100644 index 00000000000..a94d613c235 --- /dev/null +++ b/doc/sphinxext/prs/1915.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b7f0c3c06e58b766f84ae4d25d1defd814db931e", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 5, + "d": 2 + }, + "mne/decoding/time_gen.py": { + "a": 17, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1918.json b/doc/sphinxext/prs/1918.json new file mode 100644 index 00000000000..b0a505bb213 --- /dev/null +++ b/doc/sphinxext/prs/1918.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6498dd9871124776c71f4065fe22e70683c9de26", + "authors": [ + { + "n": "Hari Bharadwaj", + "e": null + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 10, + "d": 7 + }, + "mne/channels/tests/test_montage.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/192.json b/doc/sphinxext/prs/192.json new file mode 100644 index 00000000000..cbd78bf020a --- /dev/null +++ b/doc/sphinxext/prs/192.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "08ddd9a85e62a351773321c1bbc7bc4558cc9940", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 20, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1921.json b/doc/sphinxext/prs/1921.json new file mode 100644 index 00000000000..497481140dd --- /dev/null +++ b/doc/sphinxext/prs/1921.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6ab25486e53d2197a97229d694fdfe2ff079eeaa", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1923.json b/doc/sphinxext/prs/1923.json new file mode 100644 index 00000000000..f0569d716ad --- /dev/null +++ b/doc/sphinxext/prs/1923.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f83a81a73873adeef6822513a18faafe3ab2e369", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/reference.py": { + "a": 4, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1925.json b/doc/sphinxext/prs/1925.json new file mode 100644 index 00000000000..8ff719682b9 --- /dev/null +++ b/doc/sphinxext/prs/1925.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5e0ecd54e9e970b0e4e9b7cd5ad58c8b0d6f3a61", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 55 + }, + "mne/tests/test_epochs.py": { + "a": 69, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1929.json b/doc/sphinxext/prs/1929.json new file mode 100644 index 00000000000..aee6c26c071 --- /dev/null +++ b/doc/sphinxext/prs/1929.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "abf811e147c24397c161344841971426744e9773", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/193.json b/doc/sphinxext/prs/193.json new file mode 100644 index 00000000000..45fcd531e00 --- /dev/null +++ b/doc/sphinxext/prs/193.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9063dc9da686322b356864253fec7108f5eaace4", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/artifacts/ica.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1931.json b/doc/sphinxext/prs/1931.json new file mode 100644 index 00000000000..486931b80e6 --- /dev/null +++ b/doc/sphinxext/prs/1931.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "54ea53109e14e7c707d92703797c220f5165e6c1", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 3, + "d": 2 + }, + "doc/source/whats_new.rst": { + "a": 23, + "d": 20 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 18, + "d": 2 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 268 + }, + "mne/channels/montage.py": { + "a": 487, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 3, + "d": 3 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 104, + "d": 6 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/layouts/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/montage.py": { + "a": 24, + "d": 9 + }, + "mne/viz/tests/test_montage.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1932.json b/doc/sphinxext/prs/1932.json new file mode 100644 index 00000000000..0f3780c8f38 --- /dev/null +++ b/doc/sphinxext/prs/1932.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "14fdd49af1d809bdf2337b53c607f3f379b3bfed", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 7, + "d": 2 + }, + "mne/channels/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 96, + "d": 7 + }, + "mne/channels/tests/test_channels.py": { + "a": 30, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1933.json b/doc/sphinxext/prs/1933.json new file mode 100644 index 00000000000..ac5482e7558 --- /dev/null +++ b/doc/sphinxext/prs/1933.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "66f4adde0bca70eca03c79bd86923ed36cc61934", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/classifier.py": { + "a": 6, + "d": 6 + }, + "mne/epochs.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1936.json b/doc/sphinxext/prs/1936.json new file mode 100644 index 00000000000..601a51e80c6 --- /dev/null +++ b/doc/sphinxext/prs/1936.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ac9c1670e2c67613e11d3c466dc88159d89c62a6", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 10, + "d": 1 + }, + "mne/io/edf/tests/data/test_edf_eeglab.mat": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1937.json b/doc/sphinxext/prs/1937.json new file mode 100644 index 00000000000..1a8fe4b4c77 --- /dev/null +++ b/doc/sphinxext/prs/1937.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4cdd5d227b8e7dbe5284e2fb4bd26d0d03ef1a34", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/viz/topo.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/194.json b/doc/sphinxext/prs/194.json new file mode 100644 index 00000000000..0921e8f8375 --- /dev/null +++ b/doc/sphinxext/prs/194.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4684fc1dabe0218102e931c58a2d6dfdf22fffda", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "mne/time_frequency/stft.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1940.json b/doc/sphinxext/prs/1940.json new file mode 100644 index 00000000000..32bcc0022f9 --- /dev/null +++ b/doc/sphinxext/prs/1940.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "981995260f78317334b924a41ab7bb3f11cab354", + "authors": [ + { + "n": "Hafeza Anevar", + "e": "hafiza.taj@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 104, + "d": 0 + }, + "mne/utils.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1942.json b/doc/sphinxext/prs/1942.json new file mode 100644 index 00000000000..35061f72885 --- /dev/null +++ b/doc/sphinxext/prs/1942.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ca5440e1d0c7fe52e426c098b3d65d3ec007ebf9", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 6, + "d": 4 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1943.json b/doc/sphinxext/prs/1943.json new file mode 100644 index 00000000000..cc78274604a --- /dev/null +++ b/doc/sphinxext/prs/1943.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "8773accb114d38ea93b2390c738d1c20196dc6da", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "examples/plot_virtual_evoked.py": { + "a": 39, + "d": 0 + }, + "mne/evoked.py": { + "a": 25, + "d": 0 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 110, + "d": 13 + }, + "mne/forward/_lead_dots.py": { + "a": 214, + "d": 5 + }, + "mne/forward/_make_forward.py": { + "a": 19, + "d": 1 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 59, + "d": 4 + }, + "mne/utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1945.json b/doc/sphinxext/prs/1945.json new file mode 100644 index 00000000000..58d111fc671 --- /dev/null +++ b/doc/sphinxext/prs/1945.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a0c37d9f7572dce6271574aeaa3ab75a41818aa2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 0, + "d": 4 + }, + "mne/cuda.py": { + "a": 52, + "d": 41 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1946.json b/doc/sphinxext/prs/1946.json new file mode 100644 index 00000000000..bb86cffe6de --- /dev/null +++ b/doc/sphinxext/prs/1946.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "100621bac4e89475c20b9055fba5a51b8a78177c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 5, + "d": 5 + }, + "mne/io/base.py": { + "a": 26, + "d": 21 + }, + "mne/io/bti/read.py": { + "a": 16, + "d": 16 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 23, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1947.json b/doc/sphinxext/prs/1947.json new file mode 100644 index 00000000000..0329450658c --- /dev/null +++ b/doc/sphinxext/prs/1947.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "966e489a408a6ea32ab4af3ed0a04a22dbc4879d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 1, + "d": 4 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 4 + }, + "examples/inverse/plot_dipole_fit.py": { + "a": 1, + "d": 4 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 1, + "d": 4 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 2, + "d": 4 + }, + "examples/plot_decimate_head_surface.py": { + "a": 1, + "d": 4 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 1, + "d": 4 + }, + "mne/commands/mne_flash_bem_model.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1948.json b/doc/sphinxext/prs/1948.json new file mode 100644 index 00000000000..1b06c26590d --- /dev/null +++ b/doc/sphinxext/prs/1948.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c6884950188cecec2c6a8c7e536dd0b49eb2f044", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 24, + "d": 11 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 1 + }, + "mne/io/tests/test_apply_function.py": { + "a": 57, + "d": 0 + }, + "mne/utils.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1950.json b/doc/sphinxext/prs/1950.json new file mode 100644 index 00000000000..4c74048cfd5 --- /dev/null +++ b/doc/sphinxext/prs/1950.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "af34b75bb1657cf4f16b0da23fc4fdc7c9fa0d9e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 0 + }, + "mne/parallel.py": { + "a": 2, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 9, + "d": 5 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 39, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1951.json b/doc/sphinxext/prs/1951.json new file mode 100644 index 00000000000..10eab72613b --- /dev/null +++ b/doc/sphinxext/prs/1951.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6d83c531bce8a82923302201e5278a18713a798a", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 12, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1952.json b/doc/sphinxext/prs/1952.json new file mode 100644 index 00000000000..aa337177d8b --- /dev/null +++ b/doc/sphinxext/prs/1952.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "8fd16d645994c2dd220d7455e6b0e89c750870ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 1, + "d": 1 + }, + "examples/plot_decimate_head_surface.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 6 + }, + "mne/forward/tests/test_forward.py": { + "a": 4, + "d": 2 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 21 + }, + "mne/io/base.py": { + "a": 54, + "d": 26 + }, + "mne/io/brainvision/brainvision.py": { + "a": 11, + "d": 26 + }, + "mne/io/bti/bti.py": { + "a": 4, + "d": 26 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 10, + "d": 23 + }, + "mne/io/egi/egi.py": { + "a": 7, + "d": 23 + }, + "mne/io/fiff/raw.py": { + "a": 18, + "d": 66 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 50, + "d": 51 + }, + "mne/io/kit/kit.py": { + "a": 7, + "d": 20 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 5 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1953.json b/doc/sphinxext/prs/1953.json new file mode 100644 index 00000000000..c0c06350041 --- /dev/null +++ b/doc/sphinxext/prs/1953.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a43d49b2380f15f3e94f422cba1e21c254912943", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 4, + "d": 2 + }, + "mne/decoding/time_gen.py": { + "a": 28, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1955.json b/doc/sphinxext/prs/1955.json new file mode 100644 index 00000000000..10dbb70ff3d --- /dev/null +++ b/doc/sphinxext/prs/1955.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "37a613a84820a5b72a2298c8912446caa8707eba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/export/README.txt": { + "a": 0, + "d": 5 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 0, + "d": 66 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 0, + "d": 35 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 0, + "d": 81 + }, + "examples/plot_epochs_as_data_frame.py": { + "a": 0, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 8, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 12, + "d": 9 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1956.json b/doc/sphinxext/prs/1956.json new file mode 100644 index 00000000000..cc37f720a53 --- /dev/null +++ b/doc/sphinxext/prs/1956.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "3a3155f7e83bd8353ccdfb7208f749e1a7438695", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 0, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 6, + "d": 2 + }, + "examples/inverse/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 2, + "d": 4 + }, + "mne/channels/channels.py": { + "a": 65, + "d": 1 + }, + "mne/evoked.py": { + "a": 10, + "d": 6 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/io/pick.py": { + "a": 5, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 2, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 8, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 14, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1957.json b/doc/sphinxext/prs/1957.json new file mode 100644 index 00000000000..49d39037bb7 --- /dev/null +++ b/doc/sphinxext/prs/1957.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d565410fac89de75b6acd9adf172efc163dd9fdc", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/report.py": { + "a": 16, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1962.json b/doc/sphinxext/prs/1962.json new file mode 100644 index 00000000000..81c828c5056 --- /dev/null +++ b/doc/sphinxext/prs/1962.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "263110920da7985453be4e50a6b8dfea6afab0da", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/channels/data/montages/biosemi-128.txt": { + "a": 132, + "d": 0 + }, + "mne/channels/data/montages/biosemi-16.txt": { + "a": 20, + "d": 0 + }, + "mne/channels/data/montages/biosemi-160.txt": { + "a": 164, + "d": 0 + }, + "mne/channels/data/montages/biosemi-256.txt": { + "a": 260, + "d": 0 + }, + "mne/channels/data/montages/biosemi-32.txt": { + "a": 36, + "d": 0 + }, + "mne/channels/data/montages/biosemi-64.txt": { + "a": 68, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1965.json b/doc/sphinxext/prs/1965.json new file mode 100644 index 00000000000..c70e55cb58f --- /dev/null +++ b/doc/sphinxext/prs/1965.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c543fad284933f36c3ad1a654e516c97fbfb64de", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 25, + "d": 48 + }, + "mne/decoding/time_gen.py": { + "a": 28, + "d": 151 + }, + "mne/viz/decoding.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1967.json b/doc/sphinxext/prs/1967.json new file mode 100644 index 00000000000..df64155f776 --- /dev/null +++ b/doc/sphinxext/prs/1967.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6a65c7247e1a2736d2ccb70f7521e37d2784e8c4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 29, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1969.json b/doc/sphinxext/prs/1969.json new file mode 100644 index 00000000000..6af90c2fb7e --- /dev/null +++ b/doc/sphinxext/prs/1969.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4ecfa061167e3556d54fd0cf828f0f2ed405b504", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1972.json b/doc/sphinxext/prs/1972.json new file mode 100644 index 00000000000..3149bc0e483 --- /dev/null +++ b/doc/sphinxext/prs/1972.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6d785f170055e033e22f841ab0e1ae6284329dfa", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 11, + "d": 2 + }, + "mne/evoked.py": { + "a": 7, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 11, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 14, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 116, + "d": 58 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1974.json b/doc/sphinxext/prs/1974.json new file mode 100644 index 00000000000..f9a2923753c --- /dev/null +++ b/doc/sphinxext/prs/1974.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d6ed0e35b4d85926ef155c1c830baba31d3fa7c6", + "authors": [ + { + "n": "manoj kumar", + "e": "mechcoder@google.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1977.json b/doc/sphinxext/prs/1977.json new file mode 100644 index 00000000000..cf494b67404 --- /dev/null +++ b/doc/sphinxext/prs/1977.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "557e4d0b362e865b2f22c8b66692d62650d56524", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 10, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1978.json b/doc/sphinxext/prs/1978.json new file mode 100644 index 00000000000..d10a56640ab --- /dev/null +++ b/doc/sphinxext/prs/1978.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "000ebe8909933a20572685a7770c349478775bd4", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 4, + "d": 5 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 15, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1979.json b/doc/sphinxext/prs/1979.json new file mode 100644 index 00000000000..8ef00080f1b --- /dev/null +++ b/doc/sphinxext/prs/1979.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bf8b8758f440fc480d5cc601777cd0d8962870e7", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/faq.rst": { + "a": 73, + "d": 0 + }, + "doc/source/mne-python.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/198.json b/doc/sphinxext/prs/198.json new file mode 100644 index 00000000000..069f148938b --- /dev/null +++ b/doc/sphinxext/prs/198.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "22bec9c303824cc20da87d2da42f02e0b691d564", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fiff/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/fiff/proj.py": { + "a": 0, + "d": 25 + }, + "mne/fiff/raw.py": { + "a": 6, + "d": 106 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1980.json b/doc/sphinxext/prs/1980.json new file mode 100644 index 00000000000..8de97ea2aae --- /dev/null +++ b/doc/sphinxext/prs/1980.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "29a44177e061c84ae3af122c9a843299ccfb1c3a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 9, + "d": 12 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 11 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1981.json b/doc/sphinxext/prs/1981.json new file mode 100644 index 00000000000..70368a560e0 --- /dev/null +++ b/doc/sphinxext/prs/1981.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "367e542c058105a8141dd15011f9690237ebf974", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 76, + "d": 76 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1984.json b/doc/sphinxext/prs/1984.json new file mode 100644 index 00000000000..6228342e4a7 --- /dev/null +++ b/doc/sphinxext/prs/1984.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c1109e0001102b55c5dc056f88c12d9e9952a4c6", + "authors": [ + { + "n": "moutard", + "e": "clement.moutard@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 5, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1988.json b/doc/sphinxext/prs/1988.json new file mode 100644 index 00000000000..05c93714da1 --- /dev/null +++ b/doc/sphinxext/prs/1988.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "97708188c440becd3e048ec3e0f70fd15cb9e83b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 11, + "d": 8 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 2 + }, + "mne/utils.py": { + "a": 8, + "d": 0 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 4, + "d": 1 + }, + "mne/viz/tests/test_misc.py": { + "a": 0, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1990.json b/doc/sphinxext/prs/1990.json new file mode 100644 index 00000000000..98d16bbecd1 --- /dev/null +++ b/doc/sphinxext/prs/1990.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d353056eceb24cd448f98f86695de5589772911c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/plot_topo_compare_conditions.py": { + "a": 7, + "d": 4 + }, + "mne/epochs.py": { + "a": 30, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 19, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1992.json b/doc/sphinxext/prs/1992.json new file mode 100644 index 00000000000..2c4fbd822a3 --- /dev/null +++ b/doc/sphinxext/prs/1992.json @@ -0,0 +1,123 @@ +{ + "merge_commit_sha": "8a69ca9240e97245c5c19692ccc6f04a4271b85e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 9, + "d": 10 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 5 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 7 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 4, + "d": 4 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 6, + "d": 6 + }, + "mne/source_space.py": { + "a": 4, + "d": 4 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1993.json b/doc/sphinxext/prs/1993.json new file mode 100644 index 00000000000..1b8bb3d5f3d --- /dev/null +++ b/doc/sphinxext/prs/1993.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e3144574326c16ec5ba786a81be6c68db5daa912", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/mne_report_tutorial.rst": { + "a": 10, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_report.py": { + "a": 21, + "d": 2 + }, + "mne/report.py": { + "a": 86, + "d": 15 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1994.json b/doc/sphinxext/prs/1994.json new file mode 100644 index 00000000000..4777254b0d2 --- /dev/null +++ b/doc/sphinxext/prs/1994.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5bb39fa8bb6bda7e27ce899813337254bc184628", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 2 + }, + "mne/report.py": { + "a": 50, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1997.json b/doc/sphinxext/prs/1997.json new file mode 100644 index 00000000000..05a8ecc8f91 --- /dev/null +++ b/doc/sphinxext/prs/1997.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "8b0300df14b4115c70c91a53291afae408b2594b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 1 + }, + "mne/bem.py": { + "a": 13, + "d": 6 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 11, + "d": 5 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 7, + "d": 4 + }, + "mne/io/tests/test_pick.py": { + "a": 8, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/1998.json b/doc/sphinxext/prs/1998.json new file mode 100644 index 00000000000..bca374160a3 --- /dev/null +++ b/doc/sphinxext/prs/1998.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ef381aaa52d7ec985bdb544c3625b2ff31022d02", + "authors": [ + { + "n": "Yousra Bekhti", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2.json b/doc/sphinxext/prs/2.json new file mode 100644 index 00000000000..c6d9cef1ee1 --- /dev/null +++ b/doc/sphinxext/prs/2.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "1886878b372506c2168f2084c7c39bd03dfaaeb1", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "Makefile": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 53, + "d": 0 + }, + "mne/cov.py": { + "a": 73, + "d": 164 + }, + "mne/forward.py": { + "a": 58, + "d": 43 + }, + "mne/minimum_norm/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 93, + "d": 183 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 27, + "d": 28 + }, + "mne/stats/cluster_level.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/20.json b/doc/sphinxext/prs/20.json new file mode 100644 index 00000000000..75e99d71ecb --- /dev/null +++ b/doc/sphinxext/prs/20.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "a76fcb1b4198fa89e16be9a33d93af0c0d958177", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/conf.py": { + "a": 1, + "d": 1 + }, + "doc/source/getting_started.rst": { + "a": 9, + "d": 2 + }, + "doc/source/mne-python.rst": { + "a": 2, + "d": 1 + }, + "doc/source/python_tutorial.rst": { + "a": 329, + "d": 0 + }, + "mne/cov.py": { + "a": 4, + "d": 4 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 5, + "d": 5 + }, + "mne/fiff/open.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/proj.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/tree.py": { + "a": 3, + "d": 3 + }, + "mne/forward.py": { + "a": 7, + "d": 7 + }, + "mne/minimum_norm/inverse.py": { + "a": 14, + "d": 14 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 3 + }, + "mne/source_space.py": { + "a": 4, + "d": 4 + }, + "mne/surface.py": { + "a": 4, + "d": 4 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2001.json b/doc/sphinxext/prs/2001.json new file mode 100644 index 00000000000..223367bcfe0 --- /dev/null +++ b/doc/sphinxext/prs/2001.json @@ -0,0 +1,167 @@ +{ + "merge_commit_sha": "dd87dbd9b6ee92fc839b6b0defa04c7fa7d28459", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_hdf5.py": { + "a": 11, + "d": 2 + }, + "mne/bem.py": { + "a": 5, + "d": 5 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 1 + }, + "mne/coreg.py": { + "a": 4, + "d": 2 + }, + "mne/cov.py": { + "a": 5, + "d": 5 + }, + "mne/dipole.py": { + "a": 8, + "d": 5 + }, + "mne/filter.py": { + "a": 43, + "d": 36 + }, + "mne/fixes.py": { + "a": 39, + "d": 40 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 0, + "d": 1 + }, + "mne/io/base.py": { + "a": 5, + "d": 5 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/bads.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ctps_.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 34, + "d": 25 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 15, + "d": 9 + }, + "mne/stats/parametric.py": { + "a": 12, + "d": 11 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_import_nesting.py": { + "a": 45, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 8, + "d": 7 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 4 + }, + "mne/utils.py": { + "a": 5, + "d": 6 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2003.json b/doc/sphinxext/prs/2003.json new file mode 100644 index 00000000000..1643c696af3 --- /dev/null +++ b/doc/sphinxext/prs/2003.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "2986294d438096b7a097c33f710e86ecf37e1b2c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/circle.py": { + "a": 5, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 13, + "d": 10 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 14, + "d": 13 + }, + "mne/viz/misc.py": { + "a": 6, + "d": 6 + }, + "mne/viz/montage.py": { + "a": 6, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 14, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 21, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2005.json b/doc/sphinxext/prs/2005.json new file mode 100644 index 00000000000..8807129542e --- /dev/null +++ b/doc/sphinxext/prs/2005.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e5ff306445eb68a247ce13994ab8042c1dec261f", + "authors": [ + { + "n": "Yousra Bekhti", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "examples/plot_meg_eeg_fields_3d.py": { + "a": 11, + "d": 2 + }, + "mne/forward/_field_interpolation.py": { + "a": 10, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2006.json b/doc/sphinxext/prs/2006.json new file mode 100644 index 00000000000..1179f333d2a --- /dev/null +++ b/doc/sphinxext/prs/2006.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "854782b66b43bc2ed08a6a3a346859edcf51d0f5", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/channels/data/layouts/KIT-AD.lout": { + "a": 209, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2007.json b/doc/sphinxext/prs/2007.json new file mode 100644 index 00000000000..8ace239f8c0 --- /dev/null +++ b/doc/sphinxext/prs/2007.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "614867cae51422ff93e9efce946973ed97a94510", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2008.json b/doc/sphinxext/prs/2008.json new file mode 100644 index 00000000000..5e6c0dc74b9 --- /dev/null +++ b/doc/sphinxext/prs/2008.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "42027519e51a82f538fb25e743ce60343a23dfdf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/201.json b/doc/sphinxext/prs/201.json new file mode 100644 index 00000000000..991f3eed7b4 --- /dev/null +++ b/doc/sphinxext/prs/201.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6b6688df63361b287fc58c331d8efb1c520aaaa1", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "doc/source/manual/analyze.rst": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2010.json b/doc/sphinxext/prs/2010.json new file mode 100644 index 00000000000..72d1bd43198 --- /dev/null +++ b/doc/sphinxext/prs/2010.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bd4ccdea13ece088aa044093b8c864b3e2c9b72f", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2011.json b/doc/sphinxext/prs/2011.json new file mode 100644 index 00000000000..2bfe9ab4808 --- /dev/null +++ b/doc/sphinxext/prs/2011.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "ee041955ef298fd5846cd0145ef95319bb5da86a", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 2 + }, + "examples/plot_interpolate_bad_channels.py": { + "a": 21, + "d": 35 + }, + "mne/channels/channels.py": { + "a": 27, + "d": 3 + }, + "mne/channels/interpolation.py": { + "a": 59, + "d": 35 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 87, + "d": 50 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 8, + "d": 15 + }, + "mne/forward/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 55, + "d": 29 + }, + "mne/io/tests/test_apply_function.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2013.json b/doc/sphinxext/prs/2013.json new file mode 100644 index 00000000000..f5f8d5df113 --- /dev/null +++ b/doc/sphinxext/prs/2013.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "04e89a7a68cb1e4b8903c7a6c5f2137ebc441db4", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 18, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2018.json b/doc/sphinxext/prs/2018.json new file mode 100644 index 00000000000..e7a6b3d6522 --- /dev/null +++ b/doc/sphinxext/prs/2018.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "46bf9a7366d52d1341347fb7bd8e63eb99a0a14a", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 16, + "d": 5 + }, + "mne/tests/test_report.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/202.json b/doc/sphinxext/prs/202.json new file mode 100644 index 00000000000..1b1dfd281f2 --- /dev/null +++ b/doc/sphinxext/prs/202.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "adb4759575dc14ba10114022ff6147d1ea2b79d2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 13, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/README.txt": { + "a": 6, + "d": 0 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 80, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 133, + "d": 0 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 114, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/connectivity/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 964, + "d": 0 + }, + "mne/connectivity/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 183, + "d": 0 + }, + "mne/connectivity/tests/test_utils.py": { + "a": 23, + "d": 0 + }, + "mne/connectivity/utils.py": { + "a": 45, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 53, + "d": 36 + }, + "mne/time_frequency/multitaper.py": { + "a": 47, + "d": 3 + }, + "mne/viz.py": { + "a": 45, + "d": 0 + }, + "setup.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2021.json b/doc/sphinxext/prs/2021.json new file mode 100644 index 00000000000..cd740c3cc43 --- /dev/null +++ b/doc/sphinxext/prs/2021.json @@ -0,0 +1,211 @@ +{ + "merge_commit_sha": "bf5ee19168529488afc201afcea531077fd4882a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 25, + "d": 9 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 4, + "d": 7 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 3, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 11, + "d": 5 + }, + "mne/decoding/classifier.py": { + "a": 8, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 6, + "d": 4 + }, + "mne/dipole.py": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 19, + "d": 8 + }, + "mne/evoked.py": { + "a": 5, + "d": 3 + }, + "mne/filter.py": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 4, + "d": 4 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 4 + }, + "mne/io/chpi.py": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 0 + }, + "mne/io/pick.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 4, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/misc.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 13, + "d": 11 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/client.py": { + "a": 5, + "d": 0 + }, + "mne/realtime/epochs.py": { + "a": 16, + "d": 2 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 7, + "d": 1 + }, + "mne/realtime/mockclient.py": { + "a": 22, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 7, + "d": 3 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/parametric.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 53, + "d": 27 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 47, + "d": 3 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 7, + "d": 13 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 0 + }, + "mne/utils.py": { + "a": 12, + "d": 7 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 5, + "d": 5 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 5, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2023.json b/doc/sphinxext/prs/2023.json new file mode 100644 index 00000000000..d2a52995040 --- /dev/null +++ b/doc/sphinxext/prs/2023.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5d65321e902382642edd4b381c9617e4ced269bf", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2028.json b/doc/sphinxext/prs/2028.json new file mode 100644 index 00000000000..87b9aa7ea01 --- /dev/null +++ b/doc/sphinxext/prs/2028.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "678ae7b0583ce5cb78a4316d24d6c8618f1263dc", + "authors": [ + { + "n": "moutard", + "e": "clement.moutard@gmail.com" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 4, + "d": 2 + }, + "mne/epochs.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2029.json b/doc/sphinxext/prs/2029.json new file mode 100644 index 00000000000..ec648381b76 --- /dev/null +++ b/doc/sphinxext/prs/2029.json @@ -0,0 +1,199 @@ +{ + "merge_commit_sha": "45cdcdaa3842b6adcb38595ae31240899e38ee2b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/contributing.rst": { + "a": 7, + "d": 7 + }, + "doc/source/python_reference.rst": { + "a": 6, + "d": 1 + }, + "doc/source/python_tutorial.rst": { + "a": 1, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 18, + "d": 12 + }, + "doc/sphinxext/gen_rst.py": { + "a": 4, + "d": 2 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_rap_music.py": { + "a": 12, + "d": 9 + }, + "mne/bem.py": { + "a": 4, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 35, + "d": 4 + }, + "mne/connectivity/effective.py": { + "a": 3, + "d": 4 + }, + "mne/connectivity/spectral.py": { + "a": 49, + "d": 47 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 8, + "d": 4 + }, + "mne/decoding/classifier.py": { + "a": 7, + "d": 6 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/time_gen.py": { + "a": 33, + "d": 28 + }, + "mne/dipole.py": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 14, + "d": 11 + }, + "mne/event.py": { + "a": 8, + "d": 8 + }, + "mne/evoked.py": { + "a": 24, + "d": 8 + }, + "mne/filter.py": { + "a": 34, + "d": 23 + }, + "mne/io/base.py": { + "a": 24, + "d": 18 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 0 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 9, + "d": 0 + }, + "mne/io/reference.py": { + "a": 8, + "d": 4 + }, + "mne/io/tag.py": { + "a": 15, + "d": 2 + }, + "mne/io/tree.py": { + "a": 13, + "d": 5 + }, + "mne/label.py": { + "a": 15, + "d": 13 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 0 + }, + "mne/misc.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 32, + "d": 29 + }, + "mne/realtime/epochs.py": { + "a": 13, + "d": 12 + }, + "mne/report.py": { + "a": 9, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 34, + "d": 9 + }, + "mne/source_space.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 0 + }, + "mne/transforms.py": { + "a": 30, + "d": 4 + }, + "mne/utils.py": { + "a": 22, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 8, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 12, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 14, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/203.json b/doc/sphinxext/prs/203.json new file mode 100644 index 00000000000..9912b4f29ad --- /dev/null +++ b/doc/sphinxext/prs/203.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f5909c52185dbf311daa1238a9b6564a6083db42", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 27, + "d": 47 + }, + "mne/event.py": { + "a": 5, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 80, + "d": 2 + }, + "mne/fiff/tests/test_raw.py": { + "a": 44, + "d": 0 + }, + "mne/proj.py": { + "a": 4, + "d": 7 + }, + "mne/tests/test_proj.py": { + "a": 26, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2030.json b/doc/sphinxext/prs/2030.json new file mode 100644 index 00000000000..25a7ff01d12 --- /dev/null +++ b/doc/sphinxext/prs/2030.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "04262d6a9873bace9259bd23eddb7504daf78f23", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2031.json b/doc/sphinxext/prs/2031.json new file mode 100644 index 00000000000..1d8c18bd706 --- /dev/null +++ b/doc/sphinxext/prs/2031.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "62b97041e2daf1b9a170c79075519ebe9f9b8098", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2036.json b/doc/sphinxext/prs/2036.json new file mode 100644 index 00000000000..cc100f8c485 --- /dev/null +++ b/doc/sphinxext/prs/2036.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fbf64dcfb0a70a83988eca13741b06fa4b2ac3e1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2037.json b/doc/sphinxext/prs/2037.json new file mode 100644 index 00000000000..c4194e127b2 --- /dev/null +++ b/doc/sphinxext/prs/2037.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fb2ec6ceb116576c22e35caf8a02da1608a52dc5", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/preprocessing/ecg.py": { + "a": 16, + "d": 8 + }, + "mne/preprocessing/eog.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2038.json b/doc/sphinxext/prs/2038.json new file mode 100644 index 00000000000..dba96405a3a --- /dev/null +++ b/doc/sphinxext/prs/2038.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "82400e1a41c098cf4b33bcdaa701e110120d8790", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2039.json b/doc/sphinxext/prs/2039.json new file mode 100644 index 00000000000..63827eca7c2 --- /dev/null +++ b/doc/sphinxext/prs/2039.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c7da1c5c691cfdcd7fdae25ed77b024c0013baae", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 18, + "d": 6 + }, + "mne/tests/test_report.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/204.json b/doc/sphinxext/prs/204.json new file mode 100644 index 00000000000..0a9459bb360 --- /dev/null +++ b/doc/sphinxext/prs/204.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "c647bbba5d1f4d943d7645d3c36a88b1eb528834", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/python_tutorial.rst": { + "a": 11, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 189, + "d": 1 + }, + "mne/event.py": { + "a": 26, + "d": 25 + }, + "mne/fiff/constants.py": { + "a": 3, + "d": 1 + }, + "mne/fiff/tag.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/write.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 22, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2041.json b/doc/sphinxext/prs/2041.json new file mode 100644 index 00000000000..14cd8cc36e2 --- /dev/null +++ b/doc/sphinxext/prs/2041.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dca676e32ad455aa6033e2fcc6bed4ace39acf67", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/channels/data/montages/biosemi-64.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2045.json b/doc/sphinxext/prs/2045.json new file mode 100644 index 00000000000..6b1d67671d0 --- /dev/null +++ b/doc/sphinxext/prs/2045.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4592164ca9c68b519496e9bd9f2f2d2e86b3e7aa", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2046.json b/doc/sphinxext/prs/2046.json new file mode 100644 index 00000000000..8baca43ea63 --- /dev/null +++ b/doc/sphinxext/prs/2046.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "14aeefd220944c77888ab2afc506800c819e0d9b", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2053.json b/doc/sphinxext/prs/2053.json new file mode 100644 index 00000000000..118f3266fd2 --- /dev/null +++ b/doc/sphinxext/prs/2053.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1f47a58f175e42dce98026d554ac2bad2776cef6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2054.json b/doc/sphinxext/prs/2054.json new file mode 100644 index 00000000000..f651fe37aaf --- /dev/null +++ b/doc/sphinxext/prs/2054.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "638d45ffe124e294f9bfe1c37ec00ca65ec2c739", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 22, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 40, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2055.json b/doc/sphinxext/prs/2055.json new file mode 100644 index 00000000000..5468a3f8e0c --- /dev/null +++ b/doc/sphinxext/prs/2055.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6f87fc0fb637f69fe5a7570fd814991ead5da648", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/206.json b/doc/sphinxext/prs/206.json new file mode 100644 index 00000000000..b868d243e40 --- /dev/null +++ b/doc/sphinxext/prs/206.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4f57c7f5193016c996a104eb2abb1831f0a5d692", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/__init__.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 21, + "d": 3 + }, + "mne/surface.py": { + "a": 52, + "d": 7 + }, + "mne/tests/test_source_space.py": { + "a": 13, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2061.json b/doc/sphinxext/prs/2061.json new file mode 100644 index 00000000000..816995bdbd9 --- /dev/null +++ b/doc/sphinxext/prs/2061.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "bd981b94673e632bacb6624b121a8e15554de798", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 8, + "d": 0 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 4, + "d": 5 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 4, + "d": 5 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 1, + "d": 5 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 3 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 2, + "d": 6 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 3 + }, + "mne/cov.py": { + "a": 7, + "d": 5 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2063.json b/doc/sphinxext/prs/2063.json new file mode 100644 index 00000000000..16ff376d95a --- /dev/null +++ b/doc/sphinxext/prs/2063.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "589a25cf812a9a3c60dd1bf01e8043b29356a368", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2064.json b/doc/sphinxext/prs/2064.json new file mode 100644 index 00000000000..e05e71f2299 --- /dev/null +++ b/doc/sphinxext/prs/2064.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "9733bcfb6c9e4e90b36d8151f65797f7f267290b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_layout.py": { + "a": 3, + "d": 2 + }, + "mne/cov.py": { + "a": 12, + "d": 17 + }, + "mne/defaults.py": { + "a": 49, + "d": 0 + }, + "mne/epochs.py": { + "a": 9, + "d": 8 + }, + "mne/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/io/base.py": { + "a": 4, + "d": 3 + }, + "mne/io/chpi.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_defaults.py": { + "a": 22, + "d": 0 + }, + "mne/tests/test_import_nesting.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 7 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 7 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 6, + "d": 7 + }, + "mne/viz/topomap.py": { + "a": 8, + "d": 18 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 44 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/207.json b/doc/sphinxext/prs/207.json new file mode 100644 index 00000000000..8ed6b3f6cb7 --- /dev/null +++ b/doc/sphinxext/prs/207.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "65e9b588020c9a35fddf332a0e04fc2680037329", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 1, + "d": 2 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 2, + "d": 3 + }, + "examples/plot_topography.py": { + "a": 1, + "d": 2 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 4, + "d": 6 + }, + "mne/viz.py": { + "a": 366, + "d": 249 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2070.json b/doc/sphinxext/prs/2070.json new file mode 100644 index 00000000000..93992f934c6 --- /dev/null +++ b/doc/sphinxext/prs/2070.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d9b47afbd2eb862910bcebdaa6025a8e3510158e", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/channels/data/montages/biosemi128.txt": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/biosemi16.txt": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/biosemi160.txt": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/biosemi256.txt": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/biosemi32.txt": { + "a": 0, + "d": 0 + }, + "mne/channels/data/montages/biosemi64.txt": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2072.json b/doc/sphinxext/prs/2072.json new file mode 100644 index 00000000000..7ee030eba44 --- /dev/null +++ b/doc/sphinxext/prs/2072.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ec087192b2428cfc93e41c2163627cd04b85af5e", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 16, + "d": 4 + }, + "mne/decoding/time_gen.py": { + "a": 239, + "d": 134 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/decoding.py": { + "a": 114, + "d": 32 + }, + "mne/viz/tests/test_decoding.py": { + "a": 65, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2073.json b/doc/sphinxext/prs/2073.json new file mode 100644 index 00000000000..fcf363004f4 --- /dev/null +++ b/doc/sphinxext/prs/2073.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "ab9421d7b646294ae2e0ff61c0e45474265af60c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 21, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 3 + }, + "mne/fixes.py": { + "a": 108, + "d": 4 + }, + "mne/io/array/tests/test_array.py": { + "a": 5, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + }, + "mne/report.py": { + "a": 16, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 6, + "d": 3 + }, + "mne/source_space.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_import_nesting.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 6, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 71, + "d": 11 + }, + "mne/viz/tests/test_circle.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 11, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2076.json b/doc/sphinxext/prs/2076.json new file mode 100644 index 00000000000..236ced1aa5d --- /dev/null +++ b/doc/sphinxext/prs/2076.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "9e477f1b6bc5d3b8e219d56b8b594bd13694e384", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 10, + "d": 3 + }, + "mne/datasets/somato/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/somato/somato.py": { + "a": 11, + "d": 4 + }, + "mne/datasets/spm_face/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 10, + "d": 3 + }, + "mne/datasets/testing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/testing/_testing.py": { + "a": 10, + "d": 3 + }, + "mne/datasets/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 15, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 19, + "d": 3 + }, + "mne/io/base.py": { + "a": 1, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2077.json b/doc/sphinxext/prs/2077.json new file mode 100644 index 00000000000..0d7e569f4d3 --- /dev/null +++ b/doc/sphinxext/prs/2077.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b3756c391c5ae43155757095c70be66d930146bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 5, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2078.json b/doc/sphinxext/prs/2078.json new file mode 100644 index 00000000000..4662c1e6d70 --- /dev/null +++ b/doc/sphinxext/prs/2078.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6421d043596d44894f18e07fb49f324399b55e1e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_label.py": { + "a": 1, + "d": 3 + }, + "mne/viz/tests/test_circle.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2079.json b/doc/sphinxext/prs/2079.json new file mode 100644 index 00000000000..c0c8c649af0 --- /dev/null +++ b/doc/sphinxext/prs/2079.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "58bd351c612fd8b7c9232378605c9aff45fa9618", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/208.json b/doc/sphinxext/prs/208.json new file mode 100644 index 00000000000..5a86dc65465 --- /dev/null +++ b/doc/sphinxext/prs/208.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6ee3af2a5ef9cc61e874cc98bdde8ac1ec05d4a6", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + ".gitignore": { + "a": 3, + "d": 0 + }, + "examples/export/README.txt": { + "a": 2, + "d": 2 + }, + "mne/datasets/sample/__init__.py": { + "a": 27, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2080.json b/doc/sphinxext/prs/2080.json new file mode 100644 index 00000000000..d9d98ad8292 --- /dev/null +++ b/doc/sphinxext/prs/2080.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e7c770592bda0eadd526bdd172dcd3b4d212b5ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 84, + "d": 16 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 0 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 171, + "d": 157 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 14, + "d": 10 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 60 + }, + "mne/io/kit/kit.py": { + "a": 40, + "d": 61 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2081.json b/doc/sphinxext/prs/2081.json new file mode 100644 index 00000000000..541934ad68b --- /dev/null +++ b/doc/sphinxext/prs/2081.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a63b7517029e9336818a6b49a99e4e2398ed9b0b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/proj.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2085.json b/doc/sphinxext/prs/2085.json new file mode 100644 index 00000000000..4d56c3adcdf --- /dev/null +++ b/doc/sphinxext/prs/2085.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "315764d7f93f7065c8313869760c4516bcd1fb17", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 2, + "d": 4 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 2, + "d": 5 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 2, + "d": 4 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 3, + "d": 5 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 2, + "d": 3 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 2, + "d": 3 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 4, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 13, + "d": 12 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 45, + "d": 46 + }, + "mne/viz/tests/test_3d.py": { + "a": 34, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2090.json b/doc/sphinxext/prs/2090.json new file mode 100644 index 00000000000..45e9c17753d --- /dev/null +++ b/doc/sphinxext/prs/2090.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f4a1267970723e2dd39b9384465a0f15c80963c6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 40, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2091.json b/doc/sphinxext/prs/2091.json new file mode 100644 index 00000000000..9dd89731911 --- /dev/null +++ b/doc/sphinxext/prs/2091.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a5dd6823068edbb97a82ef69f750a830b1b9c58c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fiff/tests/test_raw.py": { + "a": 13, + "d": 12 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2092.json b/doc/sphinxext/prs/2092.json new file mode 100644 index 00000000000..8eaaa77cfb2 --- /dev/null +++ b/doc/sphinxext/prs/2092.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "e3b6f6e2ed8d814a02f9ccd9c6f040b4f274ce4d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 15, + "d": 27 + }, + "doc/sphinxext/gen_rst.py": { + "a": 14, + "d": 2 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 2, + "d": 2 + }, + "examples/datasets/plot_megsim_data_single_trial.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 4, + "d": 2 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_label_activation_from_stc.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_morph_data.py": { + "a": 4, + "d": 2 + }, + "examples/plot_clickable_image.py": { + "a": 2, + "d": 3 + }, + "examples/plot_epochs_to_data_frame.py": { + "a": 1, + "d": 1 + }, + "examples/plot_make_forward.py": { + "a": 1, + "d": 1 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 4, + "d": 0 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_methods_tutorial.py": { + "a": 4, + "d": 0 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 3, + "d": 4 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 4, + "d": 7 + }, + "examples/time_frequency/plot_stockwell.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_sensors.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 11, + "d": 4 + }, + "mne/datasets/megsim/megsim.py": { + "a": 11, + "d": 4 + }, + "mne/defaults.py": { + "a": 5, + "d": 1 + }, + "mne/evoked.py": { + "a": 4, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 2 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2094.json b/doc/sphinxext/prs/2094.json new file mode 100644 index 00000000000..290d26b7a11 --- /dev/null +++ b/doc/sphinxext/prs/2094.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9e6d4625c08e64ff5b20fef1eceb4be31b360e6c", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 9, + "d": 10 + }, + "mne/viz/tests/test_3d.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2096.json b/doc/sphinxext/prs/2096.json new file mode 100644 index 00000000000..2bf20d59804 --- /dev/null +++ b/doc/sphinxext/prs/2096.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f9d457444743c03f9de3d8c9c6b1bf1df5930762", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2097.json b/doc/sphinxext/prs/2097.json new file mode 100644 index 00000000000..1ac11f8b596 --- /dev/null +++ b/doc/sphinxext/prs/2097.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c92ff4af7bfb5948ee4f4572ed154a5cc7da4214", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2098.json b/doc/sphinxext/prs/2098.json new file mode 100644 index 00000000000..a846729de31 --- /dev/null +++ b/doc/sphinxext/prs/2098.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "463cd9be1a773cb1f253e9973bf8a860f6a7cbcc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 10, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/21.json b/doc/sphinxext/prs/21.json new file mode 100644 index 00000000000..3d4626b6003 --- /dev/null +++ b/doc/sphinxext/prs/21.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2d416e02f76540ebc421a2691860732b36b97965", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 34, + "d": 1 + }, + "mne/forward.py": { + "a": 195, + "d": 1 + }, + "mne/tests/test_forward.py": { + "a": 77, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/210.json b/doc/sphinxext/prs/210.json new file mode 100644 index 00000000000..75e40b019f2 --- /dev/null +++ b/doc/sphinxext/prs/210.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "afc38a4e4938d9b80e5fee741f1e6fe18fd99223", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "bin/mne_clean_eog_ecg.py": { + "a": 2, + "d": 2 + }, + "doc/source/python_reference.rst": { + "a": 5, + "d": 3 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_ecg_artifacts_from_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_eog_artifacts_from_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "mne/artifacts/__init__.py": { + "a": 8, + "d": 3 + }, + "mne/artifacts/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 6, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/eog.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/peak_finder.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ssp.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/stim.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 2, + "d": 0 + }, + "setup.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2100.json b/doc/sphinxext/prs/2100.json new file mode 100644 index 00000000000..d253b53d1b8 --- /dev/null +++ b/doc/sphinxext/prs/2100.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "19f6d545bf279ee3016afac90d08e6e995c48790", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/contributing.rst": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2101.json b/doc/sphinxext/prs/2101.json new file mode 100644 index 00000000000..eee481615e5 --- /dev/null +++ b/doc/sphinxext/prs/2101.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "6860170610654021fc6fbc1431a1215443b93f00", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/README.txt": { + "a": 3, + "d": 5 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/forward/README.txt": { + "a": 6, + "d": 0 + }, + "examples/forward/plot_bem_contour_mri.py": { + "a": 0, + "d": 0 + }, + "examples/forward/plot_coregistration_transform.py": { + "a": 0, + "d": 0 + }, + "examples/forward/plot_decimate_head_surface.py": { + "a": 0, + "d": 0 + }, + "examples/forward/plot_left_cerebellum_volume_source.py": { + "a": 0, + "d": 0 + }, + "examples/forward/plot_make_forward.py": { + "a": 0, + "d": 0 + }, + "examples/forward/plot_read_bem_surfaces.py": { + "a": 0, + "d": 0 + }, + "examples/forward/plot_read_forward.py": { + "a": 0, + "d": 0 + }, + "examples/io/README.txt": { + "a": 5, + "d": 0 + }, + "examples/io/plot_epochs_to_data_frame.py": { + "a": 0, + "d": 0 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 0, + "d": 0 + }, + "examples/io/plot_read_and_write_raw_data.py": { + "a": 0, + "d": 0 + }, + "examples/io/plot_read_epochs.py": { + "a": 0, + "d": 0 + }, + "examples/io/plot_read_evoked.py": { + "a": 0, + "d": 0 + }, + "examples/io/plot_read_noise_covariance_matrix.py": { + "a": 0, + "d": 0 + }, + "examples/io/read_events.py": { + "a": 0, + "d": 0 + }, + "examples/plot_compute_mne_inverse.py": { + "a": 0, + "d": 0 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 13, + "d": 16 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 0, + "d": 72 + }, + "examples/preprocessing/plot_define_target_events.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/plot_estimate_covariance_matrix_baseline.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/plot_estimate_covariance_matrix_raw.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/plot_interpolate_bad_channels.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/plot_shift_evoked.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/plot_virtual_evoked.py": { + "a": 0, + "d": 0 + }, + "examples/simulation/README.txt": { + "a": 5, + "d": 0 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/README.txt": { + "a": 5, + "d": 0 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_clickable_image.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_evoked_delayed_ssp.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_evoked_erf_erp.py": { + "a": 48, + "d": 0 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_evoked_topomap_delayed_ssp.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_meg_eeg_fields_3d.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_ssp_projs_sensitivity_map.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_ssp_projs_topomaps.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_topo_channel_epochs_image.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_topo_customized.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/plot_topography.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2102.json b/doc/sphinxext/prs/2102.json new file mode 100644 index 00000000000..94e82d67b33 --- /dev/null +++ b/doc/sphinxext/prs/2102.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fd29153900a5c7b1c4f230208df07aa2d3647a2f", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 17, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2103.json b/doc/sphinxext/prs/2103.json new file mode 100644 index 00000000000..8d0e0c11f7a --- /dev/null +++ b/doc/sphinxext/prs/2103.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6397b15a2b978dbf5ad9e4946fb66987b03f74bc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/_static/navy.css": { + "a": 1, + "d": 1 + }, + "doc/source/_templates/layout.html": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2104.json b/doc/sphinxext/prs/2104.json new file mode 100644 index 00000000000..2b23f0cbe0b --- /dev/null +++ b/doc/sphinxext/prs/2104.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a9d697358ccfff7d67e02df5889c203f7da0daa3", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/plot_corrmap_detection.py": { + "a": 76, + "d": 0 + }, + "mne/cov.py": { + "a": 13, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 321, + "d": 11 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 17, + "d": 4 + }, + "mne/tests/test_import_nesting.py": { + "a": 17, + "d": 12 + }, + "mne/tests/test_utils.py": { + "a": 23, + "d": 2 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2107.json b/doc/sphinxext/prs/2107.json new file mode 100644 index 00000000000..fc228c3a2ba --- /dev/null +++ b/doc/sphinxext/prs/2107.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7c17694fb3c71434a2db347e1c4aa51ae6894f8f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 2, + "d": 2 + }, + "doc/sphinxext/gen_rst.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2110.json b/doc/sphinxext/prs/2110.json new file mode 100644 index 00000000000..378e213e0e8 --- /dev/null +++ b/doc/sphinxext/prs/2110.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "99f878f8cb0cb53b1c6298966d70916ef3d9153a", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2114.json b/doc/sphinxext/prs/2114.json new file mode 100644 index 00000000000..492befc6b49 --- /dev/null +++ b/doc/sphinxext/prs/2114.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "14f89ce12865319fc7ebe62d669d78797afb727a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 7, + "d": 2 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 24, + "d": 6 + }, + "mne/evoked.py": { + "a": 12, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 0, + "d": 1 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_decoding.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 98, + "d": 80 + }, + "mne/viz/topomap.py": { + "a": 90, + "d": 48 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2115.json b/doc/sphinxext/prs/2115.json new file mode 100644 index 00000000000..9d6f4514a2c --- /dev/null +++ b/doc/sphinxext/prs/2115.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7b53ef6364ed3f945d92e2dd0bd9bd0d1f7174c9", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/data/coil_def_Elekta.dat": { + "a": 70, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 568, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 138, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2118.json b/doc/sphinxext/prs/2118.json new file mode 100644 index 00000000000..da1a99b85ba --- /dev/null +++ b/doc/sphinxext/prs/2118.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "748ae8366a8551b1454d41420e363798316334a6", + "authors": [ + { + "n": "sviter", + "e": "sviter33@gmail.com" + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/212.json b/doc/sphinxext/prs/212.json new file mode 100644 index 00000000000..43fd65bfa08 --- /dev/null +++ b/doc/sphinxext/prs/212.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "12d998b274c81194ce484312aefa1b2300ef4d47", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2120.json b/doc/sphinxext/prs/2120.json new file mode 100644 index 00000000000..8dbe2f7dafc --- /dev/null +++ b/doc/sphinxext/prs/2120.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a218bf7037f65913fc93648b62c89bf22e73e595", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 20, + "d": 14 + }, + "mne/decoding/time_gen.py": { + "a": 129, + "d": 124 + }, + "mne/viz/decoding.py": { + "a": 9, + "d": 9 + }, + "mne/viz/tests/test_decoding.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2123.json b/doc/sphinxext/prs/2123.json new file mode 100644 index 00000000000..abc40336f4e --- /dev/null +++ b/doc/sphinxext/prs/2123.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "312eed5fbe4ed1eb1c2096f9f0afa4bd4d400a33", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2126.json b/doc/sphinxext/prs/2126.json new file mode 100644 index 00000000000..c5881481680 --- /dev/null +++ b/doc/sphinxext/prs/2126.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "11a5bcf8bf14efe6de58606ee8b5002d833c550c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 14, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2127.json b/doc/sphinxext/prs/2127.json new file mode 100644 index 00000000000..b10cb6f9b73 --- /dev/null +++ b/doc/sphinxext/prs/2127.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "a227123a49db0dc7225453d40151a7bd7f18948b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 145, + "d": 13 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 8 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 9, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 40, + "d": 69 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 22, + "d": 13 + }, + "mne/io/egi/egi.py": { + "a": 5, + "d": 12 + }, + "mne/io/fiff/raw.py": { + "a": 67, + "d": 155 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 18, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 65, + "d": 101 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 12, + "d": 2 + }, + "mne/io/tag.py": { + "a": 5, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 31, + "d": 0 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2128.json b/doc/sphinxext/prs/2128.json new file mode 100644 index 00000000000..15aace96110 --- /dev/null +++ b/doc/sphinxext/prs/2128.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7115364cbe9cdf5d558d42da86df3ff2ccb1440d", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2129.json b/doc/sphinxext/prs/2129.json new file mode 100644 index 00000000000..73c200d6539 --- /dev/null +++ b/doc/sphinxext/prs/2129.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2e98506a5175d94fde6ae296921930cd07151e37", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2131.json b/doc/sphinxext/prs/2131.json new file mode 100644 index 00000000000..1172a72c2ad --- /dev/null +++ b/doc/sphinxext/prs/2131.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "375d59feb8ec50c43cbc2aaaa94233801dc1345f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 6, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2132.json b/doc/sphinxext/prs/2132.json new file mode 100644 index 00000000000..b5dd8ee975e --- /dev/null +++ b/doc/sphinxext/prs/2132.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c0062c9979c3632d021c853465e382fd70948e5b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/_images/plot_time_frequency.png": { + "a": 0, + "d": 0 + }, + "doc/source/python_tutorial.rst": { + "a": 12, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2133.json b/doc/sphinxext/prs/2133.json new file mode 100644 index 00000000000..1e8303dd6fb --- /dev/null +++ b/doc/sphinxext/prs/2133.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f9909e5c4df78f188071cefa9938d4bcc00eddde", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2134.json b/doc/sphinxext/prs/2134.json new file mode 100644 index 00000000000..de0071b34f5 --- /dev/null +++ b/doc/sphinxext/prs/2134.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b17b1a69a84fef58a80f63bcfe5a9f8b24ac354c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2135.json b/doc/sphinxext/prs/2135.json new file mode 100644 index 00000000000..8c221a12e48 --- /dev/null +++ b/doc/sphinxext/prs/2135.json @@ -0,0 +1,207 @@ +{ + "merge_commit_sha": "22ff36bb645dc9e5bb1b57d2e2dac0b808906bb4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 3, + "d": 4 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 27 + }, + "mne/decoding/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 0, + "d": 24 + }, + "mne/decoding/time_gen.py": { + "a": 1, + "d": 102 + }, + "mne/defaults.py": { + "a": 1, + "d": 0 + }, + "mne/dipole.py": { + "a": 1, + "d": 31 + }, + "mne/epochs.py": { + "a": 5, + "d": 161 + }, + "mne/evoked.py": { + "a": 3, + "d": 127 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 7 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 8 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 136 + }, + "mne/io/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 8, + "d": 26 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 10 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 2, + "d": 36 + }, + "mne/io/pick.py": { + "a": 1, + "d": 65 + }, + "mne/io/reference.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_pick.py": { + "a": 8, + "d": 6 + }, + "mne/label.py": { + "a": 2, + "d": 41 + }, + "mne/layouts/__init__.py": { + "a": 0, + "d": 21 + }, + "mne/minimum_norm/inverse.py": { + "a": 12, + "d": 28 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 7, + "d": 8 + }, + "mne/preprocessing/stim.py": { + "a": 0, + "d": 64 + }, + "mne/report.py": { + "a": 2, + "d": 22 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 85 + }, + "mne/source_space.py": { + "a": 2, + "d": 7 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 7 + }, + "mne/surface.py": { + "a": 2, + "d": 8 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 14 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 37 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 32 + }, + "mne/tests/test_evoked.py": { + "a": 3, + "d": 23 + }, + "mne/tests/test_report.py": { + "a": 0, + "d": 7 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 36 + }, + "mne/time_frequency/stft.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 28 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 26 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 9 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 8 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 17 + }, + "setup.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/214.json b/doc/sphinxext/prs/214.json new file mode 100644 index 00000000000..17389ca2c0b --- /dev/null +++ b/doc/sphinxext/prs/214.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4c03a6ca692c4273d856cac772f167bbb1592344", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/evoked.py": { + "a": 14, + "d": 4 + }, + "mne/viz.py": { + "a": 22, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2144.json b/doc/sphinxext/prs/2144.json new file mode 100644 index 00000000000..782dcb61679 --- /dev/null +++ b/doc/sphinxext/prs/2144.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "afad3b1aa7c185523d1fe722d46b5d3dfee9e9e2", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2146.json b/doc/sphinxext/prs/2146.json new file mode 100644 index 00000000000..18e7f02d99e --- /dev/null +++ b/doc/sphinxext/prs/2146.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d58b86edf4161db159c289322ac1d0953a68a67b", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 11, + "d": 11 + }, + "mne/viz/ica.py": { + "a": 18, + "d": 10 + }, + "mne/viz/tests/test_ica.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2148.json b/doc/sphinxext/prs/2148.json new file mode 100644 index 00000000000..82cd909f556 --- /dev/null +++ b/doc/sphinxext/prs/2148.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "36c0b2adfec80ea285b34e9cc0056ed496f484e9", + "authors": [ + { + "n": "jmontoyam", + "e": "montoya.jair.m@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 5, + "d": 0 + }, + "mne/io/tests/test_pick.py": { + "a": 41, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2150.json b/doc/sphinxext/prs/2150.json new file mode 100644 index 00000000000..504bb79cb68 --- /dev/null +++ b/doc/sphinxext/prs/2150.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b671789739123bd552480242e9c027596c5a0661", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2152.json b/doc/sphinxext/prs/2152.json new file mode 100644 index 00000000000..5f5aea3df8b --- /dev/null +++ b/doc/sphinxext/prs/2152.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d702effbc1715cda8de1143eac38dd67b0556740", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2153.json b/doc/sphinxext/prs/2153.json new file mode 100644 index 00000000000..a9b50fadd24 --- /dev/null +++ b/doc/sphinxext/prs/2153.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "60dfdb3ee782a24db58756e5ea9c532af76b7b9a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 5, + "d": 5 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 6, + "d": 6 + }, + "mne/stats/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/stats/parametric.py": { + "a": 107, + "d": 38 + }, + "mne/stats/tests/test_parametric.py": { + "a": 63, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2154.json b/doc/sphinxext/prs/2154.json new file mode 100644 index 00000000000..441be69f061 --- /dev/null +++ b/doc/sphinxext/prs/2154.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "37670b52f22b1929efb8f3e247c3ab59b0380ea4", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 4, + "d": 3 + }, + "mne/epochs.py": { + "a": 43, + "d": 7 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 600, + "d": 16 + }, + "mne/viz/raw.py": { + "a": 7, + "d": 51 + }, + "mne/viz/tests/test_epochs.py": { + "a": 49, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 51, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2156.json b/doc/sphinxext/prs/2156.json new file mode 100644 index 00000000000..068b794debb --- /dev/null +++ b/doc/sphinxext/prs/2156.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "de7914af588f7442f8155fa5d90965ac2c327fb6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2159.json b/doc/sphinxext/prs/2159.json new file mode 100644 index 00000000000..c3bc32aa6cc --- /dev/null +++ b/doc/sphinxext/prs/2159.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f03d958900a7d84828a32b80d08720ec43326224", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 48, + "d": 13 + }, + "mne/viz/tests/test_ica.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/216.json b/doc/sphinxext/prs/216.json new file mode 100644 index 00000000000..2ec2a90c340 --- /dev/null +++ b/doc/sphinxext/prs/216.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1cdaa936b247c710622d72fe1da496e1795d40cf", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2164.json b/doc/sphinxext/prs/2164.json new file mode 100644 index 00000000000..8a8dbd3ae07 --- /dev/null +++ b/doc/sphinxext/prs/2164.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "948fc07b92f88f01c169211066bca2c2300fb654", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/decoding/time_gen.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2166.json b/doc/sphinxext/prs/2166.json new file mode 100644 index 00000000000..c9a2020da6a --- /dev/null +++ b/doc/sphinxext/prs/2166.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "51c916fd676c5c5bae4ed86fa6f8fdb7357c8968", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 18, + "d": 16 + }, + "mne/viz/raw.py": { + "a": 7, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 16, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2167.json b/doc/sphinxext/prs/2167.json new file mode 100644 index 00000000000..e0fe02835b0 --- /dev/null +++ b/doc/sphinxext/prs/2167.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "114759c61ec12634a5b0259a69ac42b73ed63e0a", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/forward/_compute_forward.py": { + "a": 406, + "d": 121 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2168.json b/doc/sphinxext/prs/2168.json new file mode 100644 index 00000000000..2eda6eab416 --- /dev/null +++ b/doc/sphinxext/prs/2168.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "ce95d7388d3fc0c739c510dcedcc736eb67af692", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/forward/plot_make_forward.py": { + "a": 2, + "d": 3 + }, + "examples/forward/plot_read_forward.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 4, + "d": 7 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 4, + "d": 7 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 3, + "d": 3 + }, + "examples/plot_compute_mne_inverse.py": { + "a": 1, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 26, + "d": 20 + }, + "mne/viz/tests/test_3d.py": { + "a": 45, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2169.json b/doc/sphinxext/prs/2169.json new file mode 100644 index 00000000000..35feae5a39d --- /dev/null +++ b/doc/sphinxext/prs/2169.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7adc444632a1a722c2379fb4001fb0c5002d2f64", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 4, + "d": 6 + }, + "mne/tests/test_label.py": { + "a": 32, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2171.json b/doc/sphinxext/prs/2171.json new file mode 100644 index 00000000000..daeaf61df0c --- /dev/null +++ b/doc/sphinxext/prs/2171.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cb95a101a066686e58716d1a4b3fd3ab65e5aa5e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/dipole.py": { + "a": 21, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2173.json b/doc/sphinxext/prs/2173.json new file mode 100644 index 00000000000..4d28b718774 --- /dev/null +++ b/doc/sphinxext/prs/2173.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "581bf92ca33aa159cd5a5e509e8f4ccda986f979", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 4, + "d": 1 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/_fake/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/_fake/_fake.py": { + "a": 25, + "d": 0 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 6, + "d": 52 + }, + "mne/datasets/megsim/megsim.py": { + "a": 5, + "d": 51 + }, + "mne/datasets/megsim/urls.py": { + "a": 20, + "d": 9 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 28, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 105, + "d": 80 + }, + "mne/io/tests/test_chpi.py": { + "a": 28, + "d": 2 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_cov.py": { + "a": 12, + "d": 4 + }, + "mne/tests/test_fixes.py": { + "a": 30, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 27, + "d": 3 + }, + "mne/tests/test_source_estimate.py": { + "a": 41, + "d": 7 + }, + "mne/tests/test_source_space.py": { + "a": 19, + "d": 9 + }, + "mne/viz/tests/test_topo.py": { + "a": 22, + "d": 7 + }, + "mne/viz/tests/test_utils.py": { + "a": 18, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 4 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2179.json b/doc/sphinxext/prs/2179.json new file mode 100644 index 00000000000..051c420055b --- /dev/null +++ b/doc/sphinxext/prs/2179.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a57c5de97f49e26fb923ea3219ec4ebde37bd104", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2181.json b/doc/sphinxext/prs/2181.json new file mode 100644 index 00000000000..275d623d8b3 --- /dev/null +++ b/doc/sphinxext/prs/2181.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "735bdac076a8959db942824964462889a9458a47", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2182.json b/doc/sphinxext/prs/2182.json new file mode 100644 index 00000000000..0277f7ab027 --- /dev/null +++ b/doc/sphinxext/prs/2182.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e3b5485591621f3d3c93f5a1a393ac90723f9e1c", + "authors": [ + { + "n": "alexandre barachant", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 7, + "d": 6 + }, + "mne/io/edf/tests/data/test_edf_stim_channel.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_edf_stim_channel.txt": { + "a": 717, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 30, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2184.json b/doc/sphinxext/prs/2184.json new file mode 100644 index 00000000000..44cca04135b --- /dev/null +++ b/doc/sphinxext/prs/2184.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "1c83be5fca87c60b8d45690ad185c1dffa9af47d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 10, + "d": 3 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 4, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2193.json b/doc/sphinxext/prs/2193.json new file mode 100644 index 00000000000..d64ce6909ec --- /dev/null +++ b/doc/sphinxext/prs/2193.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "77a5801f45d9e6ed059a37aace5387faefb530ce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 4, + "d": 1 + }, + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 2 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 5, + "d": 5 + }, + "mne/bem.py": { + "a": 872, + "d": 6 + }, + "mne/coreg.py": { + "a": 3, + "d": 3 + }, + "mne/dipole.py": { + "a": 3, + "d": 3 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 2, + "d": 1 + }, + "mne/surface.py": { + "a": 8, + "d": 301 + }, + "mne/tests/test_bem.py": { + "a": 155, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2194.json b/doc/sphinxext/prs/2194.json new file mode 100644 index 00000000000..46329bc2a30 --- /dev/null +++ b/doc/sphinxext/prs/2194.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "0dc2ad484fe8d75fe5acbfbbd36f37123c4f6fab", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "Makefile": { + "a": 4, + "d": 2 + }, + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "doc/source/python_reference.rst": { + "a": 8, + "d": 0 + }, + "mne/bem.py": { + "a": 119, + "d": 1 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 62, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 27, + "d": 2 + }, + "mne/utils.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2196.json b/doc/sphinxext/prs/2196.json new file mode 100644 index 00000000000..b6b894765cf --- /dev/null +++ b/doc/sphinxext/prs/2196.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "411a2e36e92bda5c3690f5802f7ed4575793d751", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/dipole.py": { + "a": 57, + "d": 17 + }, + "mne/tests/test_dipole.py": { + "a": 68, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2199.json b/doc/sphinxext/prs/2199.json new file mode 100644 index 00000000000..9c50d92802d --- /dev/null +++ b/doc/sphinxext/prs/2199.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2b21479a3427ee2a2e05691969506e3271e766d5", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/tests/test_cov.py": { + "a": 15, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/22.json b/doc/sphinxext/prs/22.json new file mode 100644 index 00000000000..4056c8b77f1 --- /dev/null +++ b/doc/sphinxext/prs/22.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dfd726a2a47ba83e281a65181dcb546e124c5ece", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 28, + "d": 26 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/220.json b/doc/sphinxext/prs/220.json new file mode 100644 index 00000000000..00e51eaaf8c --- /dev/null +++ b/doc/sphinxext/prs/220.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1948d586dd9d108124a4b5bf18e8576af35e4dfe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 15, + "d": 28 + }, + "mne/tests/test_source_space.py": { + "a": 18, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2206.json b/doc/sphinxext/prs/2206.json new file mode 100644 index 00000000000..02b0a20c60b --- /dev/null +++ b/doc/sphinxext/prs/2206.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8e9ca7cc106d34282392c726816030a482848400", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 2 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 10, + "d": 8 + }, + "mne/viz/epochs.py": { + "a": 294, + "d": 61 + }, + "mne/viz/tests/test_epochs.py": { + "a": 4, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2208.json b/doc/sphinxext/prs/2208.json new file mode 100644 index 00000000000..0848588c0be --- /dev/null +++ b/doc/sphinxext/prs/2208.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e5ae39081645843336abf0ee308f75d17d1a53b8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fiff/tests/test_raw.py": { + "a": 8, + "d": 2 + }, + "mne/source_space.py": { + "a": 9, + "d": 8 + }, + "mne/utils.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2209.json b/doc/sphinxext/prs/2209.json new file mode 100644 index 00000000000..df9b7fd0156 --- /dev/null +++ b/doc/sphinxext/prs/2209.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "71cfe38ca884273ba61f4b70467c9f1057b5e6be", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/io/base.py": { + "a": 6, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 10, + "d": 5 + }, + "mne/viz/raw.py": { + "a": 74, + "d": 21 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/221.json b/doc/sphinxext/prs/221.json new file mode 100644 index 00000000000..4693abd6634 --- /dev/null +++ b/doc/sphinxext/prs/221.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2a4bde822752d7e023339949cac44c274cfb2b14", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 19, + "d": 4 + }, + "mne/viz.py": { + "a": 26, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2211.json b/doc/sphinxext/prs/2211.json new file mode 100644 index 00000000000..7eb8ed39a69 --- /dev/null +++ b/doc/sphinxext/prs/2211.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "632bb07ab246beff57c3e56d5322225524287cd1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_tf_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 5 + }, + "mne/channels/tests/test_layout.py": { + "a": 4, + "d": 4 + }, + "mne/epochs.py": { + "a": 709, + "d": 682 + }, + "mne/evoked.py": { + "a": 13, + "d": 13 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 6, + "d": 1 + }, + "mne/io/base.py": { + "a": 10, + "d": 6 + }, + "mne/io/kit/kit.py": { + "a": 7, + "d": 8 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/io/pick.py": { + "a": 4, + "d": 0 + }, + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/epochs.py": { + "a": 3, + "d": 7 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 6, + "d": 9 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 9, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 205, + "d": 15 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2214.json b/doc/sphinxext/prs/2214.json new file mode 100644 index 00000000000..0848f4c2526 --- /dev/null +++ b/doc/sphinxext/prs/2214.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bce71d0e2b90cc7ec8ab2cbecbc29c69100807d8", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 237, + "d": 24 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 16, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2215.json b/doc/sphinxext/prs/2215.json new file mode 100644 index 00000000000..d177c39acba --- /dev/null +++ b/doc/sphinxext/prs/2215.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a6d15f2fcef509d800b7e4accb6b40a40e20d4d", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/222.json b/doc/sphinxext/prs/222.json new file mode 100644 index 00000000000..ec6dc9ca6e7 --- /dev/null +++ b/doc/sphinxext/prs/222.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3247e9481fafd6ae835a590ab108bfbbdeb432b5", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2221.json b/doc/sphinxext/prs/2221.json new file mode 100644 index 00000000000..fdb970014f2 --- /dev/null +++ b/doc/sphinxext/prs/2221.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8e080b2b0aa185ff54176688be68d3c515cbfd08", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/reference.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2222.json b/doc/sphinxext/prs/2222.json new file mode 100644 index 00000000000..903894d9644 --- /dev/null +++ b/doc/sphinxext/prs/2222.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cc54b626f74ab4574cb6b17eed788d132330df60", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 16, + "d": 18 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2228.json b/doc/sphinxext/prs/2228.json new file mode 100644 index 00000000000..7a5b7c8cd7f --- /dev/null +++ b/doc/sphinxext/prs/2228.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "eed10bdf499aee6abc88f92b806bc81b0cd83615", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/multi_comp.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_multi_comp.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2229.json b/doc/sphinxext/prs/2229.json new file mode 100644 index 00000000000..2838e590dd0 --- /dev/null +++ b/doc/sphinxext/prs/2229.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "af858a3a4f9f94a8e462a9c7b626fdd91fe20c7a", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/io/pick.py": { + "a": 22, + "d": 12 + }, + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 3 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 3, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 66, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2231.json b/doc/sphinxext/prs/2231.json new file mode 100644 index 00000000000..e0908ffce3a --- /dev/null +++ b/doc/sphinxext/prs/2231.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0529b2ec7fbc3a561d57e7ca29401d77f2b9c948", + "authors": [ + { + "n": "dgwakeman", + "e": "dgwakeman@users.noreply.github.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2232.json b/doc/sphinxext/prs/2232.json new file mode 100644 index 00000000000..1b585cfafb3 --- /dev/null +++ b/doc/sphinxext/prs/2232.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc57aeaa36ba002499a5de623e187e9a74c8d035", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "mne/io/fiff/raw.py": { + "a": 15, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2233.json b/doc/sphinxext/prs/2233.json new file mode 100644 index 00000000000..2f6699b9285 --- /dev/null +++ b/doc/sphinxext/prs/2233.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "09e9eb80cee45a920d47593d1ff9adcf0c8f7529", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2234.json b/doc/sphinxext/prs/2234.json new file mode 100644 index 00000000000..7dfd672a58c --- /dev/null +++ b/doc/sphinxext/prs/2234.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7e8845d2de02fbd07460a41e4016d7444f3c7c30", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/visualization/make_report.py": { + "a": 37, + "d": 0 + }, + "mne/report.py": { + "a": 142, + "d": 28 + }, + "mne/tests/test_report.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2238.json b/doc/sphinxext/prs/2238.json new file mode 100644 index 00000000000..dac95a965ea --- /dev/null +++ b/doc/sphinxext/prs/2238.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "90a8c6e841d14fff45faff5c2e4da8ccb5dd5d5f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cuda.py": { + "a": 6, + "d": 2 + }, + "mne/filter.py": { + "a": 40, + "d": 45 + }, + "mne/tests/test_filter.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2239.json b/doc/sphinxext/prs/2239.json new file mode 100644 index 00000000000..f5936a3865b --- /dev/null +++ b/doc/sphinxext/prs/2239.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "89d70b27a56e13a7a36b167aab129f4695cfd0b2", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/gui/__init__.py": { + "a": 11, + "d": 5 + }, + "mne/gui/_coreg_gui.py": { + "a": 7, + "d": 7 + }, + "mne/gui/_file_traits.py": { + "a": 24, + "d": 23 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 12, + "d": 12 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2241.json b/doc/sphinxext/prs/2241.json new file mode 100644 index 00000000000..bf19dc07cf8 --- /dev/null +++ b/doc/sphinxext/prs/2241.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "fd9a8a72b577aaf4202ec579f2bbcb3b1255e261", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 9, + "d": 1 + }, + "examples/visualization/plot_clickable_image.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/plot_evoked_topomap_delayed_ssp.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 69, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 71, + "d": 0 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 74, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2244.json b/doc/sphinxext/prs/2244.json new file mode 100644 index 00000000000..9eccc40f2c5 --- /dev/null +++ b/doc/sphinxext/prs/2244.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "164d92dde41658435d30a7cadf026c91a8e94799", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2247.json b/doc/sphinxext/prs/2247.json new file mode 100644 index 00000000000..d5ea351c3d1 --- /dev/null +++ b/doc/sphinxext/prs/2247.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f51168f03681380089cdf14fda87d3b517c3444d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 2, + "d": 8 + }, + "doc/source/mne_report_tutorial.rst": { + "a": 1, + "d": 1 + }, + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2249.json b/doc/sphinxext/prs/2249.json new file mode 100644 index 00000000000..615ea95d6c4 --- /dev/null +++ b/doc/sphinxext/prs/2249.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "48dbc81c8f404c139a99ffd6d518b5238f179502", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 21, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/225.json b/doc/sphinxext/prs/225.json new file mode 100644 index 00000000000..f382f807504 --- /dev/null +++ b/doc/sphinxext/prs/225.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "af52f1bb7dc6811c73f7b3f952c328d447f0bdbc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/epochs.py": { + "a": 22, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2250.json b/doc/sphinxext/prs/2250.json new file mode 100644 index 00000000000..8df697ae1ca --- /dev/null +++ b/doc/sphinxext/prs/2250.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "46e254e6914ea6bbc4833fc6c9c9b7db4ed916cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 0 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2251.json b/doc/sphinxext/prs/2251.json new file mode 100644 index 00000000000..6055310cd4c --- /dev/null +++ b/doc/sphinxext/prs/2251.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "a2472257051de3b69115e0f4526e038021cc4afe", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 19, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 250, + "d": 298 + }, + "mne/viz/ica.py": { + "a": 234, + "d": 24 + }, + "mne/viz/raw.py": { + "a": 204, + "d": 331 + }, + "mne/viz/tests/test_epochs.py": { + "a": 5, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 53, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 297, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2252.json b/doc/sphinxext/prs/2252.json new file mode 100644 index 00000000000..850197ac64e --- /dev/null +++ b/doc/sphinxext/prs/2252.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "138afe102c1410b512b6d6322237b9cec5101e84", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainak.jas@telecom-paristech.fr" + } + ], + "changes": { + "mne/report.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2261.json b/doc/sphinxext/prs/2261.json new file mode 100644 index 00000000000..3628c9f7889 --- /dev/null +++ b/doc/sphinxext/prs/2261.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c1bcd9a01ed38a3580546ea33550f12a9f49616a", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2262.json b/doc/sphinxext/prs/2262.json new file mode 100644 index 00000000000..0d1eb750980 --- /dev/null +++ b/doc/sphinxext/prs/2262.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d541a3e2e9c3657bdd98c528ef1a67bd96669cbe", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2263.json b/doc/sphinxext/prs/2263.json new file mode 100644 index 00000000000..9f5be1e4010 --- /dev/null +++ b/doc/sphinxext/prs/2263.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "24918d788f982d4798bcf3fefea45ae3f1757662", + "authors": [ + { + "n": "unknown", + "e": "rkmaddox@uw.edu" + } + ], + "changes": { + "mne/filter.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2265.json b/doc/sphinxext/prs/2265.json new file mode 100644 index 00000000000..2a577c22167 --- /dev/null +++ b/doc/sphinxext/prs/2265.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8ed69d9cf13c5d3141217337ea4832f28c948209", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 63, + "d": 40 + }, + "mne/tests/test_filter.py": { + "a": 55, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2266.json b/doc/sphinxext/prs/2266.json new file mode 100644 index 00000000000..a68ce1c73f6 --- /dev/null +++ b/doc/sphinxext/prs/2266.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e43d4d4edb3c055b95ad620a28874d8ab22f9d45", + "authors": [ + { + "n": "aestrivex", + "e": "aestrivex@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2269.json b/doc/sphinxext/prs/2269.json new file mode 100644 index 00000000000..1c3eab72c72 --- /dev/null +++ b/doc/sphinxext/prs/2269.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bd0d47a28974694b0e7ac8775c226b0802208b69", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/channels/data/neighbors/KIT-157_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/KIT-208_neighb.mat": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2271.json b/doc/sphinxext/prs/2271.json new file mode 100644 index 00000000000..8b43b45dd5d --- /dev/null +++ b/doc/sphinxext/prs/2271.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b2bb5d33777ddab1d036e3568a31484e366cc5a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2272.json b/doc/sphinxext/prs/2272.json new file mode 100644 index 00000000000..7dd229b694d --- /dev/null +++ b/doc/sphinxext/prs/2272.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4a1635dc4a4649b668658d02270320d5a20f4ab4", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2274.json b/doc/sphinxext/prs/2274.json new file mode 100644 index 00000000000..3221d0ca891 --- /dev/null +++ b/doc/sphinxext/prs/2274.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e3d7a6db0665877ce573df3f24128b2457978fc1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/data/coil_def_Elekta.dat": { + "a": 70, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 568, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 138, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2278.json b/doc/sphinxext/prs/2278.json new file mode 100644 index 00000000000..9cdc96ade25 --- /dev/null +++ b/doc/sphinxext/prs/2278.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0b96c85bb2366f33221840d610215f995881a455", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/report.py": { + "a": 19, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2281.json b/doc/sphinxext/prs/2281.json new file mode 100644 index 00000000000..65de30459f7 --- /dev/null +++ b/doc/sphinxext/prs/2281.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "67c4c7e29aeced45d8b32234acf428ed994b8c77", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/evoked.py": { + "a": 13, + "d": 5 + }, + "mne/io/proj.py": { + "a": 6, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 25, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 30, + "d": 12 + }, + "mne/viz/tests/test_topomap.py": { + "a": 8, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 10, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 66, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2285.json b/doc/sphinxext/prs/2285.json new file mode 100644 index 00000000000..dbe719dbce1 --- /dev/null +++ b/doc/sphinxext/prs/2285.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3736825ade6308d6f8831e57f64bab5acc6d2354", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 4, + "d": 9 + }, + "mne/tests/test_filter.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2288.json b/doc/sphinxext/prs/2288.json new file mode 100644 index 00000000000..52ed6cc6bc3 --- /dev/null +++ b/doc/sphinxext/prs/2288.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "16e34444ae185bb1c8dd8961b9341151dc9ac2ee", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2291.json b/doc/sphinxext/prs/2291.json new file mode 100644 index 00000000000..e236d5d275e --- /dev/null +++ b/doc/sphinxext/prs/2291.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "53ee179171629bbe41d7a4ebb713414b8fec0e5b", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 216, + "d": 121 + }, + "mne/io/base.py": { + "a": 2, + "d": 10 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 38 + }, + "mne/io/open.py": { + "a": 42, + "d": 1 + }, + "mne/io/write.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 0 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2296.json b/doc/sphinxext/prs/2296.json new file mode 100644 index 00000000000..1bf1fe94246 --- /dev/null +++ b/doc/sphinxext/prs/2296.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a4347895e92ab188472b884c26a528fda2214b9e", + "authors": [ + { + "n": "alexandre barachant", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 101, + "d": 0 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 75, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 145, + "d": 0 + }, + "mne/preprocessing/xdawn.py": { + "a": 557, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2298.json b/doc/sphinxext/prs/2298.json new file mode 100644 index 00000000000..aa2bbd01d0b --- /dev/null +++ b/doc/sphinxext/prs/2298.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "16a78f8dcc00a874300e02ce39f955315a509962", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/preprocessing/plot_resample.py": { + "a": 85, + "d": 0 + }, + "mne/epochs.py": { + "a": 19, + "d": 6 + }, + "mne/event.py": { + "a": 5, + "d": 4 + }, + "mne/filter.py": { + "a": 55, + "d": 0 + }, + "mne/io/base.py": { + "a": 91, + "d": 37 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 62, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 15, + "d": 6 + }, + "mne/tests/test_filter.py": { + "a": 24, + "d": 3 + }, + "mne/tests/test_utils.py": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 34, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/23.json b/doc/sphinxext/prs/23.json new file mode 100644 index 00000000000..86784466716 --- /dev/null +++ b/doc/sphinxext/prs/23.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0722fe5803341001511200d07621974d5ed84be2", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/230.json b/doc/sphinxext/prs/230.json new file mode 100644 index 00000000000..993402f2110 --- /dev/null +++ b/doc/sphinxext/prs/230.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8a69346b2aaf878d8f7fc931e35ccf02c7d08208", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 23, + "d": 4 + }, + "mne/fiff/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 7, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 17, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2301.json b/doc/sphinxext/prs/2301.json new file mode 100644 index 00000000000..a5e48911200 --- /dev/null +++ b/doc/sphinxext/prs/2301.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "71d8694ffc2bf47f7153d6d9461ff187ed12b30e", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2302.json b/doc/sphinxext/prs/2302.json new file mode 100644 index 00000000000..6e190cdaeb9 --- /dev/null +++ b/doc/sphinxext/prs/2302.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "d1aba302a4758085eaeb828ccdc3089af887df3b", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 66, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 31, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 37, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 30, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 37, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2303.json b/doc/sphinxext/prs/2303.json new file mode 100644 index 00000000000..b26533b3940 --- /dev/null +++ b/doc/sphinxext/prs/2303.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c46dabe0265c3681ccfac403cc5d85af907a7486", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 5, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 46, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2304.json b/doc/sphinxext/prs/2304.json new file mode 100644 index 00000000000..fa2f65f3cb1 --- /dev/null +++ b/doc/sphinxext/prs/2304.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "174edbd8b46d30ce234c21c3d5773cf24447c34d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/stats/plot_linear_regression_raw.py": { + "a": 64, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 10 + }, + "mne/stats/regression.py": { + "a": 205, + "d": 2 + }, + "mne/stats/tests/test_regression.py": { + "a": 46, + "d": 2 + }, + "mne/utils.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2305.json b/doc/sphinxext/prs/2305.json new file mode 100644 index 00000000000..c2cd01117e9 --- /dev/null +++ b/doc/sphinxext/prs/2305.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2a57065d8e9480f215d745209772eeefb629655a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 92, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2309.json b/doc/sphinxext/prs/2309.json new file mode 100644 index 00000000000..fdfafae5da7 --- /dev/null +++ b/doc/sphinxext/prs/2309.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "54bae3c498edebd04237de5490bc68ff1208004b", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 21, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 47, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/231.json b/doc/sphinxext/prs/231.json new file mode 100644 index 00000000000..7b8e5d92ac5 --- /dev/null +++ b/doc/sphinxext/prs/231.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c495307786db777ecacf0d1ed2465e3de74448a6", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 127, + "d": 2 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2318.json b/doc/sphinxext/prs/2318.json new file mode 100644 index 00000000000..e7f9bc45cde --- /dev/null +++ b/doc/sphinxext/prs/2318.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ddb68713863e62e3fdc70305b4f69bfa57cc34fd", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 36, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 24, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2319.json b/doc/sphinxext/prs/2319.json new file mode 100644 index 00000000000..3e473a5a98a --- /dev/null +++ b/doc/sphinxext/prs/2319.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "569ca6a7d38e6e13af2dd73eb1052d8123bf4ec0", + "authors": [ + { + "n": "jmontoyam", + "e": "montoya.jair.m@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 15, + "d": 1 + }, + "mne/utils.py": { + "a": 38, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2320.json b/doc/sphinxext/prs/2320.json new file mode 100644 index 00000000000..9235f671b1b --- /dev/null +++ b/doc/sphinxext/prs/2320.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "44800aa75a0c7ef3b0b5352de51f722c382e64c3", + "authors": [ + { + "n": "alexandre barachant", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 145, + "d": 65 + }, + "mne/decoding/csp.py": { + "a": 21, + "d": 69 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 75 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2322.json b/doc/sphinxext/prs/2322.json new file mode 100644 index 00000000000..450212344d8 --- /dev/null +++ b/doc/sphinxext/prs/2322.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a0696cffd123b4bacc4bce498d230b71dee98fcd", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 68, + "d": 18 + }, + "mne/viz/tests/test_topomap.py": { + "a": 15, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 9, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 96, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2325.json b/doc/sphinxext/prs/2325.json new file mode 100644 index 00000000000..b5c4dde174d --- /dev/null +++ b/doc/sphinxext/prs/2325.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d1d1368712265209473b80adc6c498614119008e", + "authors": [ + { + "n": "Alan Leggitt", + "e": "leggitta3@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 7, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2327.json b/doc/sphinxext/prs/2327.json new file mode 100644 index 00000000000..6c2044d6e1a --- /dev/null +++ b/doc/sphinxext/prs/2327.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7ce3774a19b0e2faef28bc4bc4774ca3f1462607", + "authors": [ + { + "n": "jmontoyam", + "e": "montoya.jair.m@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 0 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/infomax_.py": { + "a": 48, + "d": 16 + }, + "mne/preprocessing/tests/data/eeglab_infomax_results_eeg_data.mat": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/data/eeglab_infomax_results_meg_data.mat": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 178, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2329.json b/doc/sphinxext/prs/2329.json new file mode 100644 index 00000000000..a9512aa8a80 --- /dev/null +++ b/doc/sphinxext/prs/2329.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0812b220e96e610764f6ecc7ba6de21895bc548d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/233.json b/doc/sphinxext/prs/233.json new file mode 100644 index 00000000000..796e7af937a --- /dev/null +++ b/doc/sphinxext/prs/233.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8347d3e4ff875e68826b54969600678d6b87f9d3", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_transformation.py": { + "a": 33, + "d": 0 + }, + "mne/transforms.py": { + "a": 81, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2335.json b/doc/sphinxext/prs/2335.json new file mode 100644 index 00000000000..0d8c00f61bd --- /dev/null +++ b/doc/sphinxext/prs/2335.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "888272a5d4d272a8feef8c46958386433107c572", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2337.json b/doc/sphinxext/prs/2337.json new file mode 100644 index 00000000000..9542649d72a --- /dev/null +++ b/doc/sphinxext/prs/2337.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f4e04ad386c56677f3610129f6f629f3f6c4a7cb", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 4 + }, + "mne/epochs.py": { + "a": 39, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2338.json b/doc/sphinxext/prs/2338.json new file mode 100644 index 00000000000..1cf31de8479 --- /dev/null +++ b/doc/sphinxext/prs/2338.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8e40657cd7131d09dbcfbcafaad7613faf60a627", + "authors": [ + { + "n": "Ross Maddox", + "e": "rkmaddox@uw.edu" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 32, + "d": 10 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 3, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 43, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2339.json b/doc/sphinxext/prs/2339.json new file mode 100644 index 00000000000..19809fbe6ef --- /dev/null +++ b/doc/sphinxext/prs/2339.json @@ -0,0 +1,815 @@ +{ + "merge_commit_sha": "77ba8d1838c047aabb65262b32efe1515901eb10", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 3, + "d": 3 + }, + "Makefile": { + "a": 3, + "d": 3 + }, + "doc/Makefile": { + "a": 28, + "d": 28 + }, + "doc/_images/mne_helmet.png": { + "a": 0, + "d": 0 + }, + "doc/_static/branch_dropdown.png": { + "a": 0, + "d": 0 + }, + "doc/_static/favicon.ico": { + "a": 0, + "d": 0 + }, + "doc/_static/flow_diagram.svg": { + "a": 204, + "d": 0 + }, + "doc/_static/forking_button.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institutions.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_logo.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_logo_small.png": { + "a": 0, + "d": 0 + }, + "doc/_static/pull_button.png": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 46, + "d": 0 + }, + "doc/_templates/class.rst": { + "a": 0, + "d": 0 + }, + "doc/_templates/function.rst": { + "a": 0, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 4, + "d": 21 + }, + "doc/advanced_setup.rst": { + "a": 0, + "d": 0 + }, + "doc/cite.rst": { + "a": 4, + "d": 6 + }, + "doc/conf.py": { + "a": 107, + "d": 52 + }, + "doc/contributing.rst": { + "a": 10, + "d": 5 + }, + "doc/customizing_git.rst": { + "a": 0, + "d": 0 + }, + "doc/faq.rst": { + "a": 3, + "d": 1 + }, + "doc/getting_started.rst": { + "a": 321, + "d": 0 + }, + "doc/git_links.inc": { + "a": 0, + "d": 0 + }, + "doc/index.rst": { + "a": 136, + "d": 0 + }, + "doc/known_projects.inc": { + "a": 0, + "d": 0 + }, + "doc/links.inc": { + "a": 0, + "d": 0 + }, + "doc/manual/appendix/bem_model.rst": { + "a": 24, + "d": 49 + }, + "doc/manual/appendix/c_EULA.rst": { + "a": 8, + "d": 2 + }, + "doc/manual/appendix/c_misc.rst": { + "a": 99, + "d": 0 + }, + "doc/manual/appendix/c_release_notes.rst": { + "a": 91, + "d": 114 + }, + "doc/manual/appendix/martinos.rst": { + "a": 117, + "d": 0 + }, + "doc/manual/c_reference.rst": { + "a": 6442, + "d": 0 + }, + "doc/manual/cookbook.rst": { + "a": 412, + "d": 0 + }, + "doc/manual/datasets.rst": { + "a": 14, + "d": 75 + }, + "doc/manual/gui/analyze.rst": { + "a": 12, + "d": 85 + }, + "doc/manual/gui/browse.rst": { + "a": 31, + "d": 426 + }, + "doc/manual/gui/mne_analyze/MNE_preferences.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/adjust_alignment.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/adjust_lights.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/adjust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/cont_hpi_data.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/dipole_list.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/dipole_parameters.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/dipoles_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/epoch_selector.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/field_mapping_pref.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/file_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/hardcopy_controls.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/help_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/image_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/label_list.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/labels_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/main_window.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/movie_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/mri_viewer.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/open_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/overlay_management.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/patch_selection_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/save_label_timecourse.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/scales_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/surface_controls.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/surface_selection_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/timecourse_manager.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/view_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/viewer.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/viewer_options.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/visualize_hpi.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/windows_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/adjust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/adust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/average_pref.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/channel_selection.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/file_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/filter_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/help_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/main.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/manage_averages_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/new_selection.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/new_ssp.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/open_dialog copy.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/open_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/process_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/process_menu2.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/scales_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/scales_dialog2.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/toolbar.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-0.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-1.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-10.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-11.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-12.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-13.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-14.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-15.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-16.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-17.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-2.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-3.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-4.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-5.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-6.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-7.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-8.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-9.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 142, + "d": 0 + }, + "doc/manual/io.rst": { + "a": 343, + "d": 0 + }, + "doc/manual/matlab.rst": { + "a": 13, + "d": 7 + }, + "doc/manual/pics/CoordinateSystems.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/Digitizer-example.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/Flowchart.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/HeadCS.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/ICA_primer.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/cover.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/flat.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/morphed.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/neuromag.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/orig.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/proj-off-on.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/title_page.png": { + "a": 0, + "d": 0 + }, + "doc/manual/preprocessing/bads.rst": { + "a": 3, + "d": 0 + }, + "doc/manual/preprocessing/filter.rst": { + "a": 3, + "d": 0 + }, + "doc/manual/preprocessing/ica.rst": { + "a": 118, + "d": 0 + }, + "doc/manual/preprocessing/overview.rst": { + "a": 3, + "d": 0 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 128, + "d": 0 + }, + "doc/manual/source_localization/covariance.rst": { + "a": 5, + "d": 0 + }, + "doc/manual/source_localization/forward.rst": { + "a": 28, + "d": 476 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 520, + "d": 0 + }, + "doc/manual/source_localization/morph.rst": { + "a": 141, + "d": 0 + }, + "doc/manual/statistics.rst": { + "a": 100, + "d": 0 + }, + "doc/manual/time_frequency.rst": { + "a": 3, + "d": 0 + }, + "doc/manual/visualization.rst": { + "a": 3, + "d": 0 + }, + "doc/mne_cpp.rst": { + "a": 0, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 5, + "d": 11 + }, + "doc/references.rst": { + "a": 0, + "d": 0 + }, + "doc/source/_static/default.css": { + "a": 0, + "d": 515 + }, + "doc/source/_static/institutions.png": { + "a": 0, + "d": 0 + }, + "doc/source/_static/navy.css": { + "a": 0, + "d": 515 + }, + "doc/source/_templates/sidebar.html": { + "a": 0, + "d": 5 + }, + "doc/source/getting_started.rst": { + "a": 0, + "d": 102 + }, + "doc/source/index.rst": { + "a": 0, + "d": 57 + }, + "doc/source/manual.rst": { + "a": 0, + "d": 28 + }, + "doc/source/manual/AppB.rst": { + "a": 0, + "d": 294 + }, + "doc/source/manual/AppInstall.rst": { + "a": 0, + "d": 174 + }, + "doc/source/manual/convert.rst": { + "a": 0, + "d": 2312 + }, + "doc/source/manual/cookbook.rst": { + "a": 0, + "d": 1066 + }, + "doc/source/manual/intro.rst": { + "a": 0, + "d": 45 + }, + "doc/source/manual/list.rst": { + "a": 0, + "d": 439 + }, + "doc/source/manual/mne.rst": { + "a": 0, + "d": 1323 + }, + "doc/source/manual/morph.rst": { + "a": 0, + "d": 409 + }, + "doc/source/manual/pics/Averaging-flowchart.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/utilities.rst": { + "a": 0, + "d": 1402 + }, + "doc/source/mne-python.rst": { + "a": 0, + "d": 29 + }, + "doc/source/python_tutorial.rst": { + "a": 0, + "d": 398 + }, + "doc/sphinxext/commands.py": { + "a": 80, + "d": 0 + }, + "doc/sphinxext/gen_rst.py": { + "a": 0, + "d": 1222 + }, + "doc/this_project.inc": { + "a": 0, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 84, + "d": 0 + }, + "doc/tutorials/_images/plot_read_and_write_raw_data.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/_images/plot_read_epochs.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/_images/plot_time_frequency.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/command_line.rst": { + "a": 0, + "d": 0 + }, + "doc/tutorials/report.rst": { + "a": 3, + "d": 3 + }, + "doc/upload_html.sh": { + "a": 1, + "d": 1 + }, + "doc/utils/extract_config_doc.py": { + "a": 0, + "d": 73 + }, + "doc/utils/lut2sphinxtbl.py": { + "a": 0, + "d": 65 + }, + "doc/utils/make_clean_config.py": { + "a": 0, + "d": 30 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 7 + }, + "examples/README.txt": { + "a": 2, + "d": 2 + }, + "examples/forward/plot_decimate_head_surface.py": { + "a": 9, + "d": 9 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 0, + "d": 81 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 47, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 6, + "d": 7 + }, + "mne/decoding/time_gen.py": { + "a": 18, + "d": 20 + }, + "mne/io/base.py": { + "a": 4, + "d": 4 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 98, + "d": 1 + }, + "mne/label.py": { + "a": 8, + "d": 7 + }, + "mne/selection.py": { + "a": 12, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 5 + }, + "mne/stats/parametric.py": { + "a": 15, + "d": 15 + }, + "mne/viz/epochs.py": { + "a": 6, + "d": 4 + }, + "tutorials/README.txt": { + "a": 4, + "d": 0 + }, + "tutorials/plot_cluster_1samp_test_time_frequency.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_cluster_methods_tutorial.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_cluster_stats_spatio_temporal.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_cluster_stats_time_frequency.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 180, + "d": 0 + }, + "tutorials/plot_epochs_objects.py": { + "a": 103, + "d": 0 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 11, + "d": 11 + }, + "tutorials/plot_info.py": { + "a": 90, + "d": 0 + }, + "tutorials/plot_introduction.py": { + "a": 373, + "d": 0 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 74, + "d": 0 + }, + "tutorials/plot_raw_objects.py": { + "a": 133, + "d": 0 + }, + "tutorials/plot_source_localization_basics.py": { + "a": 98, + "d": 0 + }, + "tutorials/plot_spatio_temporal_cluster_stats_sensor.py": { + "a": 40, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2340.json b/doc/sphinxext/prs/2340.json new file mode 100644 index 00000000000..34ca64c5958 --- /dev/null +++ b/doc/sphinxext/prs/2340.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "11af4dcfc9d7a5a00d9b033a9a21c1f93ab29beb", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/bem.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2343.json b/doc/sphinxext/prs/2343.json new file mode 100644 index 00000000000..25988835c45 --- /dev/null +++ b/doc/sphinxext/prs/2343.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "010df1aac1b488d4a25312a83e0454650cba554d", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2348.json b/doc/sphinxext/prs/2348.json new file mode 100644 index 00000000000..e67ce168146 --- /dev/null +++ b/doc/sphinxext/prs/2348.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0497f8fdce9ec6e8bc6b0e854fa10af545538276", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 3, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/235.json b/doc/sphinxext/prs/235.json new file mode 100644 index 00000000000..d59fbc142ef --- /dev/null +++ b/doc/sphinxext/prs/235.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2ff1807312d587d03da2976b30342bf14647e6ab", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 2, + "d": 1 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 2, + "d": 1 + }, + "mne/fixes.py": { + "a": 13, + "d": 0 + }, + "mne/tests/test_fixes.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2350.json b/doc/sphinxext/prs/2350.json new file mode 100644 index 00000000000..cfda9ccba92 --- /dev/null +++ b/doc/sphinxext/prs/2350.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "792306dfa81d540d12abb4dbfed1e3736adc85b5", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 25, + "d": 4 + }, + "mne/io/tests/test_raw.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2351.json b/doc/sphinxext/prs/2351.json new file mode 100644 index 00000000000..f8bf6d472f7 --- /dev/null +++ b/doc/sphinxext/prs/2351.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "630ef2fb56b5e934d0102381a0b12a20939dd05e", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 8, + "d": 7 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2352.json b/doc/sphinxext/prs/2352.json new file mode 100644 index 00000000000..225555e1674 --- /dev/null +++ b/doc/sphinxext/prs/2352.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0b60da7036b68f5467a45646df1366d16edcde0d", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/csp.py": { + "a": 7, + "d": 2 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2355.json b/doc/sphinxext/prs/2355.json new file mode 100644 index 00000000000..69a768dfe84 --- /dev/null +++ b/doc/sphinxext/prs/2355.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "16ac16d99850f4cc473ccceeab0fc56a10b7b196", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 16, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 149, + "d": 71 + }, + "mne/io/write.py": { + "a": 12, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 25, + "d": 86 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 27, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2358.json b/doc/sphinxext/prs/2358.json new file mode 100644 index 00000000000..50dd57b6f9d --- /dev/null +++ b/doc/sphinxext/prs/2358.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "b6e4658a9a98e0ee934d6e930347406ff6910cc4", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 16, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 74, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/brainstorm/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 60, + "d": 0 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 59, + "d": 0 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 51, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 6, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 46, + "d": 3 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2359.json b/doc/sphinxext/prs/2359.json new file mode 100644 index 00000000000..546ea85ad5e --- /dev/null +++ b/doc/sphinxext/prs/2359.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc385512f74b42ac67af3802e8c358d3f594ef58", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2361.json b/doc/sphinxext/prs/2361.json new file mode 100644 index 00000000000..042b859c9cc --- /dev/null +++ b/doc/sphinxext/prs/2361.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e0f8417275e3650500832ba40020509058f971f9", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 3, + "d": 3 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 9, + "d": 9 + }, + "mne/decoding/transformer.py": { + "a": 14, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2364.json b/doc/sphinxext/prs/2364.json new file mode 100644 index 00000000000..5a998b53ac7 --- /dev/null +++ b/doc/sphinxext/prs/2364.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ae47e17db8e9575c0fcaede1224e09a640050742", + "authors": [ + { + "n": "Natalie Klein", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_event.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2366.json b/doc/sphinxext/prs/2366.json new file mode 100644 index 00000000000..d4d6c806e02 --- /dev/null +++ b/doc/sphinxext/prs/2366.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8ffe2c39146c25dadd822a7e587d0cd018065828", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 2 + }, + "mne/defaults.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 64, + "d": 21 + }, + "mne/viz/ica.py": { + "a": 0, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2367.json b/doc/sphinxext/prs/2367.json new file mode 100644 index 00000000000..849f1d977ed --- /dev/null +++ b/doc/sphinxext/prs/2367.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9795ce87b9c16e0b1e9bf7073f5719ab8207d3d6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_rap_music.py": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 17, + "d": 7 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2369.json b/doc/sphinxext/prs/2369.json new file mode 100644 index 00000000000..12a3ba147c4 --- /dev/null +++ b/doc/sphinxext/prs/2369.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f902d090828b02c687532e34cff5ba5a26046b24", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/forward/_make_forward.py": { + "a": 5, + "d": 5 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/237.json b/doc/sphinxext/prs/237.json new file mode 100644 index 00000000000..83d1a661ffd --- /dev/null +++ b/doc/sphinxext/prs/237.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "968c410c51800a6f26da6d798dc1cb2033c1f3d0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/proj.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2374.json b/doc/sphinxext/prs/2374.json new file mode 100644 index 00000000000..ca91623e79a --- /dev/null +++ b/doc/sphinxext/prs/2374.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "08d49b8275a28665b6ca9dc6a69eb373a1aa417f", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 4, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 9, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 15, + "d": 3 + }, + "mne/commands/mne_show_fiff.py": { + "a": 27, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 8, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 4 + }, + "mne/data/coil_def_Elekta.dat": { + "a": 7, + "d": 3 + }, + "mne/decoding/base.py": { + "a": 4, + "d": 3 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 29, + "d": 31 + }, + "mne/forward/_make_forward.py": { + "a": 32, + "d": 15 + }, + "mne/io/constants.py": { + "a": 5, + "d": 2 + }, + "mne/io/open.py": { + "a": 9, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 3 + }, + "mne/io/proc_history.py": { + "a": 35, + "d": 13 + }, + "mne/io/tests/test_apply_function.py": { + "a": 6, + "d": 12 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 698, + "d": 287 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 10 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 344, + "d": 144 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 12, + "d": 15 + }, + "mne/tests/test_chpi.py": { + "a": 4, + "d": 10 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_filter.py": { + "a": 27, + "d": 39 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 3, + "d": 3 + }, + "mne/transforms.py": { + "a": 10, + "d": 9 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2375.json b/doc/sphinxext/prs/2375.json new file mode 100644 index 00000000000..51db349cc69 --- /dev/null +++ b/doc/sphinxext/prs/2375.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b97e8732f748b18ddedf526c9673e78ab9620a5a", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/io/pick.py": { + "a": 41, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 5, + "d": 1 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2376.json b/doc/sphinxext/prs/2376.json new file mode 100644 index 00000000000..b72402af2b9 --- /dev/null +++ b/doc/sphinxext/prs/2376.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb6b1340ca3676e0b29e1d726fe300f643a3b65d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 18, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2378.json b/doc/sphinxext/prs/2378.json new file mode 100644 index 00000000000..2b9617f23da --- /dev/null +++ b/doc/sphinxext/prs/2378.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "26e88fbe56e9c5675d3a66a9cbe3c47d8cc21c7d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/flow_diagram.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 2, + "d": 1 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 2, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 4, + "d": 3 + }, + "mne/epochs.py": { + "a": 5, + "d": 2 + }, + "mne/event.py": { + "a": 2, + "d": 1 + }, + "mne/fixes.py": { + "a": 0, + "d": 2 + }, + "mne/forward/_lead_dots.py": { + "a": 14, + "d": 10 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 3, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/infomax_.py": { + "a": 2, + "d": 4 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 98, + "d": 55 + }, + "mne/preprocessing/xdawn.py": { + "a": 5, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 11, + "d": 15 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 1 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 2, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 16, + "d": 10 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2380.json b/doc/sphinxext/prs/2380.json new file mode 100644 index 00000000000..b4be7617df8 --- /dev/null +++ b/doc/sphinxext/prs/2380.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b264aef94db83da0dd1b274f9e5d6055651e00a0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 33, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2382.json b/doc/sphinxext/prs/2382.json new file mode 100644 index 00000000000..7272dacd3dd --- /dev/null +++ b/doc/sphinxext/prs/2382.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "06911f97a88915c02490bd35045dcd133c6babc9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 27, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2384.json b/doc/sphinxext/prs/2384.json new file mode 100644 index 00000000000..c4dad9579a4 --- /dev/null +++ b/doc/sphinxext/prs/2384.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7deb3ba770acc2aca1006259d599fc957a970a0c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/Makefile": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2386.json b/doc/sphinxext/prs/2386.json new file mode 100644 index 00000000000..8b21d841803 --- /dev/null +++ b/doc/sphinxext/prs/2386.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8121b2d60a751b506585cc91d648f7b0052c124e", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_sensors.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 6, + "d": 7 + }, + "mne/time_frequency/tfr.py": { + "a": 14, + "d": 13 + }, + "mne/viz/topomap.py": { + "a": 55, + "d": 56 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2387.json b/doc/sphinxext/prs/2387.json new file mode 100644 index 00000000000..8a08e204647 --- /dev/null +++ b/doc/sphinxext/prs/2387.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "17138ca0586bdb502af102fcaf9940808a80a326", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/239.json b/doc/sphinxext/prs/239.json new file mode 100644 index 00000000000..254e6337b0d --- /dev/null +++ b/doc/sphinxext/prs/239.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2048cd364bc86442163adf0ec9b6dc80325c8b88", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/proj.py": { + "a": 4, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2394.json b/doc/sphinxext/prs/2394.json new file mode 100644 index 00000000000..80c264d6af3 --- /dev/null +++ b/doc/sphinxext/prs/2394.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1fedc47e766b954e11d884aaa295b3b5c01dbb95", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 50, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 4, + "d": 2 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2395.json b/doc/sphinxext/prs/2395.json new file mode 100644 index 00000000000..7ac76c7ade7 --- /dev/null +++ b/doc/sphinxext/prs/2395.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b8bf44c0910e28fe437f348db10427e6354725d7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/mne_logo.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_logo_small.png": { + "a": 0, + "d": 0 + }, + "doc/generate_mne_logos.py": { + "a": 154, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2396.json b/doc/sphinxext/prs/2396.json new file mode 100644 index 00000000000..ac6ead2334f --- /dev/null +++ b/doc/sphinxext/prs/2396.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "319ba7a281e73af160667d84796f1644a473c5d4", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2398.json b/doc/sphinxext/prs/2398.json new file mode 100644 index 00000000000..f56c4d2e53e --- /dev/null +++ b/doc/sphinxext/prs/2398.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bd5b94c9050a6484956f9f3c670db053e2448cc9", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 9, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2399.json b/doc/sphinxext/prs/2399.json new file mode 100644 index 00000000000..9a151ef6003 --- /dev/null +++ b/doc/sphinxext/prs/2399.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "58f1acbce1f2e6395ace5119e00dee2ff5ccf10b", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 22, + "d": 20 + }, + "mne/decoding/csp.py": { + "a": 22, + "d": 20 + }, + "mne/epochs.py": { + "a": 11, + "d": 9 + }, + "mne/evoked.py": { + "a": 11, + "d": 9 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 11, + "d": 9 + }, + "mne/viz/tests/test_topomap.py": { + "a": 33, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 116, + "d": 82 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/24.json b/doc/sphinxext/prs/24.json new file mode 100644 index 00000000000..4568a374a80 --- /dev/null +++ b/doc/sphinxext/prs/24.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6f9d6a2f0214cd21973843f9c662a62e1e00dda3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/cov.py": { + "a": 80, + "d": 3 + }, + "mne/epochs.py": { + "a": 8, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 22, + "d": 18 + }, + "mne/fiff/raw.py": { + "a": 13, + "d": 19 + }, + "mne/tests/test_cov.py": { + "a": 33, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2400.json b/doc/sphinxext/prs/2400.json new file mode 100644 index 00000000000..20c2dab26c8 --- /dev/null +++ b/doc/sphinxext/prs/2400.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "659bc393465355df2a065fb0ec8f4bd3276a7038", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 156, + "d": 32 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 102, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2401.json b/doc/sphinxext/prs/2401.json new file mode 100644 index 00000000000..2c86c4f6e03 --- /dev/null +++ b/doc/sphinxext/prs/2401.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cd5970dddd3f250dfc49f127adf890acbbcf1c39", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/time_gen.py": { + "a": 60, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2402.json b/doc/sphinxext/prs/2402.json new file mode 100644 index 00000000000..0cb1ffb98b0 --- /dev/null +++ b/doc/sphinxext/prs/2402.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2e7df491b5182069859d8fb7d6f4896a9f48392c", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 36, + "d": 23 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 43, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2410.json b/doc/sphinxext/prs/2410.json new file mode 100644 index 00000000000..567d00014c2 --- /dev/null +++ b/doc/sphinxext/prs/2410.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2d1a3128da0337c877e3e6d967bb4b0d2c555780", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2412.json b/doc/sphinxext/prs/2412.json new file mode 100644 index 00000000000..dd41d4079d3 --- /dev/null +++ b/doc/sphinxext/prs/2412.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "aea08472266f673f06c9c45c1ab0e73a466d2a5d", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 6, + "d": 4 + }, + "mne/decoding/time_gen.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2413.json b/doc/sphinxext/prs/2413.json new file mode 100644 index 00000000000..96a79dfa523 --- /dev/null +++ b/doc/sphinxext/prs/2413.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "14e10d78482ef917a8ce80e96f5db9605671bec3", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_eog_artifact_histogram.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_shift_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2414.json b/doc/sphinxext/prs/2414.json new file mode 100644 index 00000000000..a1cf50713cb --- /dev/null +++ b/doc/sphinxext/prs/2414.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0bca5b35fa4e6ef664d231f8af5574b92c2639b0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/index.rst": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2415.json b/doc/sphinxext/prs/2415.json new file mode 100644 index 00000000000..9ab97e08d06 --- /dev/null +++ b/doc/sphinxext/prs/2415.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "9d7f3947435c22f1f2ec2ee25b17a13c8f623818", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 2 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 8, + "d": 2 + }, + "mne/filter.py": { + "a": 5, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/eog.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_decoding.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2417.json b/doc/sphinxext/prs/2417.json new file mode 100644 index 00000000000..0db16b9785e --- /dev/null +++ b/doc/sphinxext/prs/2417.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "873aac06d2dd727d6b584fabc36863c3ebc7bd0e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/_static/mne_helmet.png": { + "a": 0, + "d": 0 + }, + "doc/conf.py": { + "a": 3, + "d": 2 + }, + "doc/contributing.rst": { + "a": 10, + "d": 3 + }, + "doc/customizing_git.rst": { + "a": 4, + "d": 0 + }, + "examples/README.txt": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2418.json b/doc/sphinxext/prs/2418.json new file mode 100644 index 00000000000..96287927c25 --- /dev/null +++ b/doc/sphinxext/prs/2418.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "745605ee16a731274fbb5ea4ee0a9d4d02c294fc", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/242.json b/doc/sphinxext/prs/242.json new file mode 100644 index 00000000000..678958bfd99 --- /dev/null +++ b/doc/sphinxext/prs/242.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2a32d359695b6e8623c5c377a39ec1a9dd45dfc1", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_label_activation_from_stc.py": { + "a": 62, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2424.json b/doc/sphinxext/prs/2424.json new file mode 100644 index 00000000000..0fc3a0dfa82 --- /dev/null +++ b/doc/sphinxext/prs/2424.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "bd0ae234195402942260dc51b3ef7872b19cef36", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 4, + "d": 1 + }, + "mne/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/event.py": { + "a": 3, + "d": 2 + }, + "mne/externals/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/externals/h5io/__init__.py": { + "a": 6, + "d": 0 + }, + "mne/externals/h5io/_h5io.py": { + "a": 131, + "d": 14 + }, + "mne/source_estimate.py": { + "a": 7, + "d": 6 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_hdf5.py": { + "a": 0, + "d": 31 + }, + "mne/time_frequency/csd.py": { + "a": 7, + "d": 5 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 41, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 4, + "d": 4 + }, + "setup.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2427.json b/doc/sphinxext/prs/2427.json new file mode 100644 index 00000000000..5ae49a69660 --- /dev/null +++ b/doc/sphinxext/prs/2427.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "26d14bafcef40af134074f23a28c26d5dfb19f96", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2429.json b/doc/sphinxext/prs/2429.json new file mode 100644 index 00000000000..6f5f2fd9eb7 --- /dev/null +++ b/doc/sphinxext/prs/2429.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "351f5f620c80e8854630929903a56b1d85e05924", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 44, + "d": 8 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/243.json b/doc/sphinxext/prs/243.json new file mode 100644 index 00000000000..a2f45869cc1 --- /dev/null +++ b/doc/sphinxext/prs/243.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3a297af274af4f009d9c8dcf92842d1b286f3e22", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 7, + "d": 3 + }, + "mne/event.py": { + "a": 18, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2431.json b/doc/sphinxext/prs/2431.json new file mode 100644 index 00000000000..87352f7710c --- /dev/null +++ b/doc/sphinxext/prs/2431.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f6fb7db3a435265bb7de19ee10b78a3cb4b344ba", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_transformer.py": { + "a": 5, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2432.json b/doc/sphinxext/prs/2432.json new file mode 100644 index 00000000000..fe33617299f --- /dev/null +++ b/doc/sphinxext/prs/2432.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "70586b176875ae74a988c7e07b1093ef8010bcf4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2438.json b/doc/sphinxext/prs/2438.json new file mode 100644 index 00000000000..e7c15963a50 --- /dev/null +++ b/doc/sphinxext/prs/2438.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "edc31f9c5b8ccdcdf3a1566f08f4962bf6262885", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2441.json b/doc/sphinxext/prs/2441.json new file mode 100644 index 00000000000..0d12dde0d1e --- /dev/null +++ b/doc/sphinxext/prs/2441.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a3a499ea759d7cf82838ddc190ad4759217febc4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_csp.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 40, + "d": 40 + }, + "mne/io/proj.py": { + "a": 14, + "d": 23 + }, + "mne/realtime/epochs.py": { + "a": 3, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 51, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2443.json b/doc/sphinxext/prs/2443.json new file mode 100644 index 00000000000..d42dcb2efbc --- /dev/null +++ b/doc/sphinxext/prs/2443.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d0c566f34ecc8dc4d2914b9c59a9eaf42dca785c", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2444.json b/doc/sphinxext/prs/2444.json new file mode 100644 index 00000000000..dca4909384f --- /dev/null +++ b/doc/sphinxext/prs/2444.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c2ff48783921fa9f98a16f8b805e74f971644798", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cuda.py": { + "a": 23, + "d": 10 + }, + "mne/filter.py": { + "a": 1, + "d": 0 + }, + "mne/proj.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_proj.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2449.json b/doc/sphinxext/prs/2449.json new file mode 100644 index 00000000000..a6d932eaaec --- /dev/null +++ b/doc/sphinxext/prs/2449.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "2c930405985d3c233e4a4f601c4a3e9590519178", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 8, + "d": 24 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 181 + }, + "mne/viz/tests/test_epochs.py": { + "a": 6, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/245.json b/doc/sphinxext/prs/245.json new file mode 100644 index 00000000000..8823755cc09 --- /dev/null +++ b/doc/sphinxext/prs/245.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "025ac9c3517e589588784d32312722be4bd1ed1f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/data/icos.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 27 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2452.json b/doc/sphinxext/prs/2452.json new file mode 100644 index 00000000000..72c7cab0208 --- /dev/null +++ b/doc/sphinxext/prs/2452.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "b91c2f914c6f7e7910c88c1bd3d52ef10bbdc293", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 5, + "d": 7 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 46, + "d": 34 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 3, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 17, + "d": 18 + }, + "mne/channels/tests/test_layout.py": { + "a": 3, + "d": 2 + }, + "mne/cov.py": { + "a": 5, + "d": 4 + }, + "mne/decoding/time_gen.py": { + "a": 7, + "d": 4 + }, + "mne/epochs.py": { + "a": 10, + "d": 10 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 1 + }, + "mne/io/base.py": { + "a": 6, + "d": 20 + }, + "mne/io/meas_info.py": { + "a": 23, + "d": 1 + }, + "mne/io/pick.py": { + "a": 19, + "d": 6 + }, + "mne/io/tests/test_pick.py": { + "a": 13, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 2 + }, + "mne/realtime/mockclient.py": { + "a": 3, + "d": 6 + }, + "mne/stats/regression.py": { + "a": 19, + "d": 14 + }, + "mne/tests/test_epochs.py": { + "a": 22, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2453.json b/doc/sphinxext/prs/2453.json new file mode 100644 index 00000000000..b98dc36ea1e --- /dev/null +++ b/doc/sphinxext/prs/2453.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c65288f90b1beef2b0ddf0d72815aa65f063a846", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/manual/time_frequency.rst": { + "a": 34, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 52, + "d": 91 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2454.json b/doc/sphinxext/prs/2454.json new file mode 100644 index 00000000000..95af9d2ffdd --- /dev/null +++ b/doc/sphinxext/prs/2454.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc9782eb8540abfc6e7b734d3a1ebd5b1a4d3f76", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2455.json b/doc/sphinxext/prs/2455.json new file mode 100644 index 00000000000..e66000452f3 --- /dev/null +++ b/doc/sphinxext/prs/2455.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "08583e8b49edee05298ff3656fe11ff9cc486e80", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2456.json b/doc/sphinxext/prs/2456.json new file mode 100644 index 00000000000..26c51c09975 --- /dev/null +++ b/doc/sphinxext/prs/2456.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7af1ee3d52354334c7db4c06a71c5759bf7a1815", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/minimum_norm/time_frequency.py": { + "a": 74, + "d": 50 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2458.json b/doc/sphinxext/prs/2458.json new file mode 100644 index 00000000000..a3e66e53092 --- /dev/null +++ b/doc/sphinxext/prs/2458.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1a329b5f0554f76477b08f8cd2ccba09c40fb0c6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2459.json b/doc/sphinxext/prs/2459.json new file mode 100644 index 00000000000..52c91459d16 --- /dev/null +++ b/doc/sphinxext/prs/2459.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "eda1216f583debcbc067a9b7c5db2509ded735e1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2460.json b/doc/sphinxext/prs/2460.json new file mode 100644 index 00000000000..9c795122180 --- /dev/null +++ b/doc/sphinxext/prs/2460.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "af1f4ede9d8bdc412c871f67c448d719a8c96928", + "authors": [ + { + "n": "jmontoyam", + "e": "montoya.jair.m@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/infomax_.py": { + "a": 24, + "d": 14 + }, + "mne/preprocessing/tests/data/eeglab_extended_infomax_results_eeg_data.mat": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/data/eeglab_extended_infomax_results_meg_data.mat": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 35, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2461.json b/doc/sphinxext/prs/2461.json new file mode 100644 index 00000000000..e0f65ef8567 --- /dev/null +++ b/doc/sphinxext/prs/2461.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d46e566c99d976a89acc975e350c1704e7cf697c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 15, + "d": 3 + }, + "mne/viz/tests/test_epochs.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2463.json b/doc/sphinxext/prs/2463.json new file mode 100644 index 00000000000..9e99a5d1d80 --- /dev/null +++ b/doc/sphinxext/prs/2463.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "199341c8017cf8a90593157de627997cd6cba691", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 13, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2466.json b/doc/sphinxext/prs/2466.json new file mode 100644 index 00000000000..7174c8dc518 --- /dev/null +++ b/doc/sphinxext/prs/2466.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c9e0b402b6519af499a030ca360242d11102f299", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 221, + "d": 98 + }, + "mne/io/tag.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 132, + "d": 94 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2468.json b/doc/sphinxext/prs/2468.json new file mode 100644 index 00000000000..f63388045ad --- /dev/null +++ b/doc/sphinxext/prs/2468.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "28009f687074bc8e794fe4b20ff1365f99a34f2d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 145, + "d": 40 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 8, + "d": 4 + }, + "mne/tests/test_dipole.py": { + "a": 54, + "d": 17 + }, + "mne/transforms.py": { + "a": 19, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/247.json b/doc/sphinxext/prs/247.json new file mode 100644 index 00000000000..2c5f2089483 --- /dev/null +++ b/doc/sphinxext/prs/247.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "4f9eae9c70f9385ccfc08fb8a5bc25df634560aa", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 5, + "d": 0 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 10, + "d": 7 + }, + "mne/fiff/tests/test_raw.py": { + "a": 21, + "d": 17 + }, + "mne/layouts/tests/test_layout.py": { + "a": 11, + "d": 6 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 7, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 18, + "d": 11 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 6 + }, + "mne/tests/test_event.py": { + "a": 19, + "d": 16 + }, + "mne/tests/test_label.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 10, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 8, + "d": 5 + }, + "mne/tests/test_source_space.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2473.json b/doc/sphinxext/prs/2473.json new file mode 100644 index 00000000000..e98edbd8f16 --- /dev/null +++ b/doc/sphinxext/prs/2473.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "31057cbd1893c14b111867d4d0bcb7d89de6fd83", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 154, + "d": 114 + }, + "mne/io/bti/constants.py": { + "a": 2, + "d": 10 + }, + "mne/io/bti/read.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 19, + "d": 3 + }, + "mne/io/bti/transforms.py": { + "a": 0, + "d": 102 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 17 + }, + "mne/io/meas_info.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2474.json b/doc/sphinxext/prs/2474.json new file mode 100644 index 00000000000..3363d4df810 --- /dev/null +++ b/doc/sphinxext/prs/2474.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1feddd90039aba4a34c9eaed5fb1de0042573fdd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2475.json b/doc/sphinxext/prs/2475.json new file mode 100644 index 00000000000..74c7862451e --- /dev/null +++ b/doc/sphinxext/prs/2475.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "55f13faf86167ea6c823c3f6fbf69e682a71431a", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 22, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 23, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2477.json b/doc/sphinxext/prs/2477.json new file mode 100644 index 00000000000..ddc4853bc33 --- /dev/null +++ b/doc/sphinxext/prs/2477.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "98c69aea01a7e33987ff9ef49f88c33ef962a77a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "examples/forward/plot_coregistration_transform.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 2, + "d": 5 + }, + "mne/dipole.py": { + "a": 2, + "d": 4 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 6 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 8 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 3, + "d": 4 + }, + "mne/gui/_coreg_gui.py": { + "a": 2, + "d": 6 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 3, + "d": 5 + }, + "mne/io/bti/bti.py": { + "a": 12, + "d": 24 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 5 + }, + "mne/io/tag.py": { + "a": 6, + "d": 8 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 11 + }, + "mne/source_space.py": { + "a": 9, + "d": 20 + }, + "mne/surface.py": { + "a": 17, + "d": 5 + }, + "mne/tests/test_bem.py": { + "a": 2, + "d": 6 + }, + "mne/tests/test_transforms.py": { + "a": 13, + "d": 9 + }, + "mne/transforms.py": { + "a": 122, + "d": 54 + }, + "mne/viz/_3d.py": { + "a": 103, + "d": 47 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2479.json b/doc/sphinxext/prs/2479.json new file mode 100644 index 00000000000..29ab8d2e95d --- /dev/null +++ b/doc/sphinxext/prs/2479.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "618a736bf8c9c362ae01381d1a459338592eb9ed", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 28, + "d": 23 + }, + "mne/tests/test_bem.py": { + "a": 40, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2480.json b/doc/sphinxext/prs/2480.json new file mode 100644 index 00000000000..c418a0602d8 --- /dev/null +++ b/doc/sphinxext/prs/2480.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9e0976aa0b6d0c1ec928410ce1de5e943b2ced98", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 84, + "d": 47 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 108, + "d": 49 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2481.json b/doc/sphinxext/prs/2481.json new file mode 100644 index 00000000000..9f8e7f95dd0 --- /dev/null +++ b/doc/sphinxext/prs/2481.json @@ -0,0 +1,151 @@ +{ + "merge_commit_sha": "b7ccda04c0897622c4cc0892af9f3e6e7e29a252", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 4, + "d": 2 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 20, + "d": 30 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 79, + "d": 0 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 3, + "d": 4 + }, + "mne/bem.py": { + "a": 44, + "d": 19 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 2 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/forward/__init__.py": { + "a": 5, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 115, + "d": 108 + }, + "mne/forward/forward.py": { + "a": 32, + "d": 15 + }, + "mne/forward/tests/test_forward.py": { + "a": 2, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 3, + "d": 6 + }, + "mne/simulation/__init__.py": { + "a": 6, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 90, + "d": 11 + }, + "mne/simulation/metrics.py": { + "a": 68, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 571, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 131, + "d": 2 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 17, + "d": 25 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 52, + "d": 0 + }, + "mne/simulation/tests/test_raw.py": { + "a": 234, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 23, + "d": 47 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 9, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 28, + "d": 0 + }, + "mne/time_frequency/ar.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 5, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2483.json b/doc/sphinxext/prs/2483.json new file mode 100644 index 00000000000..4fb9260767b --- /dev/null +++ b/doc/sphinxext/prs/2483.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "717276880a1b66e12f52cd85270697a53d125b16", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/time_gen.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2484.json b/doc/sphinxext/prs/2484.json new file mode 100644 index 00000000000..e146229e68b --- /dev/null +++ b/doc/sphinxext/prs/2484.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3125f6ef834a1796afc2fbb6b3805166751ae831", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2485.json b/doc/sphinxext/prs/2485.json new file mode 100644 index 00000000000..b7d79663408 --- /dev/null +++ b/doc/sphinxext/prs/2485.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1aca4fd960624e40e3ce2b2d8505979cffa42360", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2487.json b/doc/sphinxext/prs/2487.json new file mode 100644 index 00000000000..24389d6c264 --- /dev/null +++ b/doc/sphinxext/prs/2487.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1df96993b84bcd553adc46ecaf2aabf662096ddf", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 262, + "d": 207 + }, + "mne/io/bti/read.py": { + "a": 44, + "d": 51 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 34, + "d": 2 + }, + "mne/io/tag.py": { + "a": 5, + "d": 4 + }, + "mne/io/tree.py": { + "a": 1, + "d": 2 + }, + "mne/realtime/client.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2488.json b/doc/sphinxext/prs/2488.json new file mode 100644 index 00000000000..b9361f387cf --- /dev/null +++ b/doc/sphinxext/prs/2488.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4d3b24e598b56dc72fa46974603f4d0e724da4bd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 64, + "d": 48 + }, + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 15, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 13 + }, + "mne/utils.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2492.json b/doc/sphinxext/prs/2492.json new file mode 100644 index 00000000000..28798b2bd4e --- /dev/null +++ b/doc/sphinxext/prs/2492.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9571df483a7624fa700ed7542350eb9de518df93", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/eog.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2495.json b/doc/sphinxext/prs/2495.json new file mode 100644 index 00000000000..e272da9177e --- /dev/null +++ b/doc/sphinxext/prs/2495.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "e3938f170f3d682398e51c7fc97d1a6f3391c65b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/manual/cookbook.rst": { + "a": 3, + "d": 3 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_estimate_covariance_matrix_raw.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 2, + "d": 2 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 14, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2496.json b/doc/sphinxext/prs/2496.json new file mode 100644 index 00000000000..097329a2852 --- /dev/null +++ b/doc/sphinxext/prs/2496.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d9ec048f2eb6cd57d89e7b168be1acb4c1d81111", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/tag.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2498.json b/doc/sphinxext/prs/2498.json new file mode 100644 index 00000000000..2873812df3b --- /dev/null +++ b/doc/sphinxext/prs/2498.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "337587fc9bf729837aac65896fedebacf99825fd", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 8, + "d": 8 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2499.json b/doc/sphinxext/prs/2499.json new file mode 100644 index 00000000000..12be2660bd7 --- /dev/null +++ b/doc/sphinxext/prs/2499.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "a8172e6432632bd442c43b75f1da91474228a36e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/chpi.py": { + "a": 428, + "d": 0 + }, + "mne/forward/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 6, + "d": 2 + }, + "mne/forward/_field_interpolation.py": { + "a": 8, + "d": 8 + }, + "mne/forward/_make_forward.py": { + "a": 40, + "d": 56 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 2, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 45, + "d": 1 + }, + "mne/io/chpi.py": { + "a": 0, + "d": 133 + }, + "mne/io/constants.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 5 + }, + "mne/io/tests/test_chpi.py": { + "a": 0, + "d": 78 + }, + "mne/simulation/raw.py": { + "a": 18, + "d": 39 + }, + "mne/simulation/tests/test_raw.py": { + "a": 21, + "d": 11 + }, + "mne/tests/test_chpi.py": { + "a": 165, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 5, + "d": 13 + }, + "mne/transforms.py": { + "a": 9, + "d": 5 + }, + "mne/utils.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/25.json b/doc/sphinxext/prs/25.json new file mode 100644 index 00000000000..021eead97ec --- /dev/null +++ b/doc/sphinxext/prs/25.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ea471e0c297cef2804458d1120363845597e40ff", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 5, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/write.py": { + "a": 22, + "d": 0 + }, + "mne/surface.py": { + "a": 53, + "d": 8 + }, + "mne/tests/test_surface.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/250.json b/doc/sphinxext/prs/250.json new file mode 100644 index 00000000000..f2ff3a17bc6 --- /dev/null +++ b/doc/sphinxext/prs/250.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f317eb64a253949145769420bdc9588c8d18b37e", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 182, + "d": 52 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2501.json b/doc/sphinxext/prs/2501.json new file mode 100644 index 00000000000..0bc3b6ef218 --- /dev/null +++ b/doc/sphinxext/prs/2501.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "127c85d90614aafbd3e10453ee62ffbb481cbd0b", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 25, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2503.json b/doc/sphinxext/prs/2503.json new file mode 100644 index 00000000000..cd91f5a6be9 --- /dev/null +++ b/doc/sphinxext/prs/2503.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ac10457e321d67fe9933b49e85f5686298c68afa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 37, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 26, + "d": 0 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2504.json b/doc/sphinxext/prs/2504.json new file mode 100644 index 00000000000..93f8d4340ea --- /dev/null +++ b/doc/sphinxext/prs/2504.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e24e720801842286c058f89a97a7cee35414c3a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2508.json b/doc/sphinxext/prs/2508.json new file mode 100644 index 00000000000..30908978ab6 --- /dev/null +++ b/doc/sphinxext/prs/2508.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "533e718d389c1c6a12b22c138dbc725be0746ff1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/manual/cookbook.rst": { + "a": 8, + "d": 0 + }, + "mne/bem.py": { + "a": 5, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2510.json b/doc/sphinxext/prs/2510.json new file mode 100644 index 00000000000..b6fd7e6cf2c --- /dev/null +++ b/doc/sphinxext/prs/2510.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bdbddab75d233055722afb6abe90e07f17dd7eb6", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/kit/constants.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2512.json b/doc/sphinxext/prs/2512.json new file mode 100644 index 00000000000..42d15ea72a2 --- /dev/null +++ b/doc/sphinxext/prs/2512.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "511a6293a0c9da989f0a039c67edc03be2cf947d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 55, + "d": 27 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 40, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2513.json b/doc/sphinxext/prs/2513.json new file mode 100644 index 00000000000..d1120414410 --- /dev/null +++ b/doc/sphinxext/prs/2513.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2e1f390e328fe71d93948a3d55cda66793da373a", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2514.json b/doc/sphinxext/prs/2514.json new file mode 100644 index 00000000000..74c67ef3afa --- /dev/null +++ b/doc/sphinxext/prs/2514.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "936dce4d46f9de1c91a3f5b6836d81c1039d9dbe", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_evoked_erf_erp.py": { + "a": 3, + "d": 0 + }, + "mne/evoked.py": { + "a": 4, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 82, + "d": 5 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2515.json b/doc/sphinxext/prs/2515.json new file mode 100644 index 00000000000..ef1f615d4f0 --- /dev/null +++ b/doc/sphinxext/prs/2515.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "589a1c9e8990178b26c7babb548c392979efaf51", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2516.json b/doc/sphinxext/prs/2516.json new file mode 100644 index 00000000000..703d242519d --- /dev/null +++ b/doc/sphinxext/prs/2516.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d6eecdfcf402d2787fd0520f4a09049a214f7dcf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 14, + "d": 4 + }, + "mne/io/base.py": { + "a": 10, + "d": 7 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 2, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 29, + "d": 9 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2517.json b/doc/sphinxext/prs/2517.json new file mode 100644 index 00000000000..287f01bb6a5 --- /dev/null +++ b/doc/sphinxext/prs/2517.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2dd9b435be5c44d141ed0ace4fc9783dc378bd57", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 120, + "d": 250 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 13, + "d": 12 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/252.json b/doc/sphinxext/prs/252.json new file mode 100644 index 00000000000..1f1cbff7af6 --- /dev/null +++ b/doc/sphinxext/prs/252.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "8bd48a05148f52ed235781d278400c5640ea7c6d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 13, + "d": 1 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 73, + "d": 0 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 3, + "d": 2 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 170, + "d": 23 + }, + "mne/fiff/evoked.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 56, + "d": 5 + }, + "mne/viz.py": { + "a": 18, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2522.json b/doc/sphinxext/prs/2522.json new file mode 100644 index 00000000000..52a2baff82b --- /dev/null +++ b/doc/sphinxext/prs/2522.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "92a0c5f1bdb1cfea779229a054f3691bfd6c024b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 14 + }, + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 3 + }, + "mne/simulation/source.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 60 + }, + "mne/source_space.py": { + "a": 142, + "d": 17 + }, + "mne/surface.py": { + "a": 62, + "d": 7 + }, + "mne/tests/test_source_space.py": { + "a": 16, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 20, + "d": 1 + }, + "mne/utils.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2525.json b/doc/sphinxext/prs/2525.json new file mode 100644 index 00000000000..ff41a76d04c --- /dev/null +++ b/doc/sphinxext/prs/2525.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "95f3a68ec4f967c5d76b53bfd9be752f778d401a", + "authors": [ + { + "n": "jona-sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2526.json b/doc/sphinxext/prs/2526.json new file mode 100644 index 00000000000..195c0c0c1d4 --- /dev/null +++ b/doc/sphinxext/prs/2526.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8cd93206924b9971e4ef0cf24161d7f426cc53ac", + "authors": [ + { + "n": "jona-sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 16, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 32, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2527.json b/doc/sphinxext/prs/2527.json new file mode 100644 index 00000000000..e1d0e073d68 --- /dev/null +++ b/doc/sphinxext/prs/2527.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9019cc469a1500125e80e393bb978ab319cd91c1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 10, + "d": 6 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2528.json b/doc/sphinxext/prs/2528.json new file mode 100644 index 00000000000..503f81f0bdd --- /dev/null +++ b/doc/sphinxext/prs/2528.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "d44b98154806443f89984b3dff81f066c12cf6a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 0, + "d": 1 + }, + "mne/channels/tests/test_layout.py": { + "a": 2, + "d": 8 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 6 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 7, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 3 + }, + "mne/io/bti/bti.py": { + "a": 31, + "d": 39 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 8, + "d": 8 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 0, + "d": 3 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 0, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/reference.py": { + "a": 0, + "d": 3 + }, + "mne/io/tag.py": { + "a": 27, + "d": 25 + }, + "mne/io/tests/test_meas_info.py": { + "a": 27, + "d": 14 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 3 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 0, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 3 + }, + "mne/viz/tests/test_topomap.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2529.json b/doc/sphinxext/prs/2529.json new file mode 100644 index 00000000000..844e8417b57 --- /dev/null +++ b/doc/sphinxext/prs/2529.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1acce09eba5f275f67508acd2ad308db6e2fdf8f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 27, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 7, + "d": 1 + }, + "mne/commands/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 8, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2531.json b/doc/sphinxext/prs/2531.json new file mode 100644 index 00000000000..7f1df71fe71 --- /dev/null +++ b/doc/sphinxext/prs/2531.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "60f16dfc4e254e0cbd89e4a6bd9fdacd47fe98f0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/connectivity/tests/test_spectral.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2536.json b/doc/sphinxext/prs/2536.json new file mode 100644 index 00000000000..76c39c058ea --- /dev/null +++ b/doc/sphinxext/prs/2536.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "738918db1a06444e642c98d19d7351bf1e5a5812", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/time_gen.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2538.json b/doc/sphinxext/prs/2538.json new file mode 100644 index 00000000000..46d7e7a9e32 --- /dev/null +++ b/doc/sphinxext/prs/2538.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8aa412c21d6d445d5585f349511c5bb293174d63", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 5, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 33, + "d": 7 + }, + "mne/viz/tests/test_evoked.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2541.json b/doc/sphinxext/prs/2541.json new file mode 100644 index 00000000000..52690563d91 --- /dev/null +++ b/doc/sphinxext/prs/2541.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "aa274d59ed5b71ee5e408cc9f8f2f2305d5a7e58", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/report.py": { + "a": 19, + "d": 3 + }, + "mne/tests/test_report.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2542.json b/doc/sphinxext/prs/2542.json new file mode 100644 index 00000000000..2d1ccbd9af4 --- /dev/null +++ b/doc/sphinxext/prs/2542.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d961a1223e3209a3be9708ec29f41ad3d4cd9cfd", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/index.rst": { + "a": 22, + "d": 22 + }, + "logo/generate_mne_logos.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_epochs_objects.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2545.json b/doc/sphinxext/prs/2545.json new file mode 100644 index 00000000000..0f60cb896b8 --- /dev/null +++ b/doc/sphinxext/prs/2545.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b07c80fcfe44b57d53a2e4c3ff3ed35db94c90ff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/forward/plot_source_space_morphing.py": { + "a": 68, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 23, + "d": 8 + }, + "mne/source_estimate.py": { + "a": 37, + "d": 1 + }, + "mne/source_space.py": { + "a": 134, + "d": 37 + }, + "mne/tests/test_source_space.py": { + "a": 64, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2546.json b/doc/sphinxext/prs/2546.json new file mode 100644 index 00000000000..7f6a06f0f2b --- /dev/null +++ b/doc/sphinxext/prs/2546.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "1882f87647c0a87d22f5ed1c5af9f79a75f8dbc7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 3, + "d": 1 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/source.py": { + "a": 3, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 3, + "d": 3 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_import_nesting.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 5, + "d": 6 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 4, + "d": 4 + }, + "mne/utils.py": { + "a": 16, + "d": 35 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2549.json b/doc/sphinxext/prs/2549.json new file mode 100644 index 00000000000..03edc16c3e9 --- /dev/null +++ b/doc/sphinxext/prs/2549.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f5dd1b5694bb42a974b8988a574f9dc741c7c77f", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 22, + "d": 21 + }, + "mne/forward/tests/test_forward.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2551.json b/doc/sphinxext/prs/2551.json new file mode 100644 index 00000000000..830259b91cb --- /dev/null +++ b/doc/sphinxext/prs/2551.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "cc3b95b5bf7fced80f5e3d911c52073a37fd0bfd", + "authors": [ + { + "n": "kingjr", + "e": "jeanremi.king+github@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/tests/test_utils.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 9, + "d": 13 + }, + "mne/evoked.py": { + "a": 14, + "d": 8 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 5, + "d": 4 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 5, + "d": 3 + }, + "mne/io/pick.py": { + "a": 9, + "d": 0 + }, + "mne/stats/tests/test_parametric.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 41, + "d": 15 + }, + "mne/tests/test_filter.py": { + "a": 7, + "d": 8 + }, + "mne/tests/test_fixes.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 6 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_surface.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_transforms.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2552.json b/doc/sphinxext/prs/2552.json new file mode 100644 index 00000000000..dc07a0efff2 --- /dev/null +++ b/doc/sphinxext/prs/2552.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e29410ec4522d6b245121523939c0a721d40f272", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/proj.py": { + "a": 12, + "d": 8 + }, + "mne/tests/test_proj.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2554.json b/doc/sphinxext/prs/2554.json new file mode 100644 index 00000000000..13586ba30a3 --- /dev/null +++ b/doc/sphinxext/prs/2554.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "7a1f14015cf0dc2ceac5c0d61a3f3be47eb9e6cf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 3, + "d": 4 + }, + "mne/bem.py": { + "a": 1, + "d": 21 + }, + "mne/commands/mne_flash_bem_model.py": { + "a": 0, + "d": 145 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 7 + }, + "mne/cov.py": { + "a": 0, + "d": 11 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 0, + "d": 6 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 7 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 28 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 6 + }, + "mne/io/__init__.py": { + "a": 0, + "d": 45 + }, + "mne/io/base.py": { + "a": 1, + "d": 23 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 25 + }, + "mne/simulation/__init__.py": { + "a": 2, + "d": 4 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 69 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 110 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/stats/parametric.py": { + "a": 0, + "d": 22 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 37 + }, + "mne/transforms.py": { + "a": 1, + "d": 126 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 13 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 4 + }, + "mne/viz/tests/test_topo.py": { + "a": 6, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 73 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2559.json b/doc/sphinxext/prs/2559.json new file mode 100644 index 00000000000..4c5df19a6d8 --- /dev/null +++ b/doc/sphinxext/prs/2559.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "07ca1676c051097a527cbfe838f0267fe038b342", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/256.json b/doc/sphinxext/prs/256.json new file mode 100644 index 00000000000..001b7b36d01 --- /dev/null +++ b/doc/sphinxext/prs/256.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6ea3a4bf583025b494734c18628a43c06a074660", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 2, + "d": 1 + }, + "mne/fixes.py": { + "a": 146, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2560.json b/doc/sphinxext/prs/2560.json new file mode 100644 index 00000000000..bf70429efeb --- /dev/null +++ b/doc/sphinxext/prs/2560.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ba09bd44757771c447c4e3a7fcd0f763e83a8069", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 53, + "d": 44 + }, + "mne/viz/ica.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2562.json b/doc/sphinxext/prs/2562.json new file mode 100644 index 00000000000..550d80fa6ef --- /dev/null +++ b/doc/sphinxext/prs/2562.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bb26e23f7d4b73064edd6b7380a18eb3c191bcb4", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/visualization/plot_clickable_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_topography.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2565.json b/doc/sphinxext/prs/2565.json new file mode 100644 index 00000000000..65c9a4dbe9d --- /dev/null +++ b/doc/sphinxext/prs/2565.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a6fa7fea3690137731b87cbce79a3c3cf0826c01", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 8, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2566.json b/doc/sphinxext/prs/2566.json new file mode 100644 index 00000000000..57ec1884e1c --- /dev/null +++ b/doc/sphinxext/prs/2566.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c91495a07f04b66a4cffbfecf918ad0a9dff51e3", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/visualization/plot_clickable_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_topography.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2569.json b/doc/sphinxext/prs/2569.json new file mode 100644 index 00000000000..3892ce73bde --- /dev/null +++ b/doc/sphinxext/prs/2569.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9253da5c7b3ddfd7f44a0250a1c8b43109ad480d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 2, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 27, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 61, + "d": 0 + }, + "mne/utils.py": { + "a": 73, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/257.json b/doc/sphinxext/prs/257.json new file mode 100644 index 00000000000..fde40a9f027 --- /dev/null +++ b/doc/sphinxext/prs/257.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "dd0734b483bafa8143bfe7ac41fa705f574267ed", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 1, + "d": 2 + }, + "mne/fixes.py": { + "a": 165, + "d": 2 + }, + "mne/tests/test_fixes.py": { + "a": 17, + "d": 0 + }, + "mne/tests/test_utils.py": { + "a": 2, + "d": 12 + }, + "mne/utils.py": { + "a": 0, + "d": 157 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2570.json b/doc/sphinxext/prs/2570.json new file mode 100644 index 00000000000..46b44e10fe7 --- /dev/null +++ b/doc/sphinxext/prs/2570.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a26590ffb4fc8691e11133f52b1b00f296a55535", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/visualization/plot_clickable_image.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2571.json b/doc/sphinxext/prs/2571.json new file mode 100644 index 00000000000..69bd54fd2f7 --- /dev/null +++ b/doc/sphinxext/prs/2571.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f558693d683005b1d3b8b4b900b8af167bf0f1ea", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/visualization/plot_clickable_image.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2573.json b/doc/sphinxext/prs/2573.json new file mode 100644 index 00000000000..ecc98afb889 --- /dev/null +++ b/doc/sphinxext/prs/2573.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "62df47fef6a385d4ce6b8f88e3908f126c795f11", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/io/base.py": { + "a": 61, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 81, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2576.json b/doc/sphinxext/prs/2576.json new file mode 100644 index 00000000000..1f0118b9b34 --- /dev/null +++ b/doc/sphinxext/prs/2576.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "03d01bcb6dc629d4d4dd80dad4add66bce322f57", + "authors": [ + { + "n": "Deleted user", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2578.json b/doc/sphinxext/prs/2578.json new file mode 100644 index 00000000000..611cfa6c875 --- /dev/null +++ b/doc/sphinxext/prs/2578.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "37ffe6236e71dafc6d44422ddf1b5b2d0fdbfaa8", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/topo.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2580.json b/doc/sphinxext/prs/2580.json new file mode 100644 index 00000000000..78ab01f3765 --- /dev/null +++ b/doc/sphinxext/prs/2580.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "e6ec06975cb5be82e0daa1c828ba41f406cf3287", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_help.py": { + "a": 16, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 102, + "d": 49 + }, + "mne/gui/_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_viewer.py": { + "a": 1, + "d": 1 + }, + "mne/gui/help/kit2fiff.json": { + "a": 7, + "d": 0 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 16, + "d": 6 + }, + "mne/io/kit/constants.py": { + "a": 27, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 52, + "d": 30 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 25, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2586.json b/doc/sphinxext/prs/2586.json new file mode 100644 index 00000000000..7990bc3253a --- /dev/null +++ b/doc/sphinxext/prs/2586.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "20cfc04ef77c4d2184cc17b7ea8d66c910b51db0", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 263, + "d": 226 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2588.json b/doc/sphinxext/prs/2588.json new file mode 100644 index 00000000000..207440a8bb8 --- /dev/null +++ b/doc/sphinxext/prs/2588.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7fb6fc02a7542ad5d246dc9c458ab3041f85db22", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "appveyor.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/259.json b/doc/sphinxext/prs/259.json new file mode 100644 index 00000000000..5bcb852645e --- /dev/null +++ b/doc/sphinxext/prs/259.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "faf8db5da525ffbe919a680637b00e021ff34c27", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 13, + "d": 145 + }, + "mne/tests/test_fixes.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2590.json b/doc/sphinxext/prs/2590.json new file mode 100644 index 00000000000..1cfe97861b1 --- /dev/null +++ b/doc/sphinxext/prs/2590.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6414329f73fbe96d5d2bd7878535b80c9d12e160", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 7, + "d": 1 + }, + "examples/preprocessing/plot_maxwell_filter.py": { + "a": 45, + "d": 0 + }, + "mne/evoked.py": { + "a": 5, + "d": 2 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 378, + "d": 111 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 186, + "d": 43 + }, + "mne/viz/evoked.py": { + "a": 8, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/26.json b/doc/sphinxext/prs/26.json new file mode 100644 index 00000000000..b72ff84a714 --- /dev/null +++ b/doc/sphinxext/prs/26.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0e12d7806d0cc11766928c2bff648de67ae93065", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "bin/mne_surf2bem.py": { + "a": 34, + "d": 0 + }, + "mne/surface.py": { + "a": 3, + "d": 2 + }, + "setup.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/260.json b/doc/sphinxext/prs/260.json new file mode 100644 index 00000000000..f2eb8bfded6 --- /dev/null +++ b/doc/sphinxext/prs/260.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c92cc436f72f70fc572078c553aebe502d2755ed", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2601.json b/doc/sphinxext/prs/2601.json new file mode 100644 index 00000000000..2398e105abe --- /dev/null +++ b/doc/sphinxext/prs/2601.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cd0f9771588da97e9eba8454076c1642ce592aef", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 6, + "d": 10 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2605.json b/doc/sphinxext/prs/2605.json new file mode 100644 index 00000000000..d683e0be2db --- /dev/null +++ b/doc/sphinxext/prs/2605.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "5832f7ac715d29bb65a4fdf5ea19dee280f4ffe8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 6, + "d": 6 + }, + "doc/sphinxext/numpy_ext/docscrape.py": { + "a": 3, + "d": 2 + }, + "mne/connectivity/spectral.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 13, + "d": 7 + }, + "mne/fixes.py": { + "a": 25, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 8 + }, + "mne/parallel.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/epochs.py": { + "a": 2, + "d": 5 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2607.json b/doc/sphinxext/prs/2607.json new file mode 100644 index 00000000000..b890182036a --- /dev/null +++ b/doc/sphinxext/prs/2607.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "98fff46f59c371ae1332a302c853b0a7bba48409", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 48, + "d": 8 + }, + "mne/preprocessing/ica.py": { + "a": 43, + "d": 21 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 25, + "d": 6 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 73, + "d": 9 + }, + "mne/viz/tests/test_ica.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/261.json b/doc/sphinxext/prs/261.json new file mode 100644 index 00000000000..f0dfd1e5839 --- /dev/null +++ b/doc/sphinxext/prs/261.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1227d06512040eb43baf344d61d1f52e981896ad", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 14, + "d": 3 + }, + "mne/proj.py": { + "a": 19, + "d": 10 + }, + "mne/tests/test_cov.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2610.json b/doc/sphinxext/prs/2610.json new file mode 100644 index 00000000000..b49c3055feb --- /dev/null +++ b/doc/sphinxext/prs/2610.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6bf5cd02b8ea599dd59d33ec201ebe00d06e4157", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2612.json b/doc/sphinxext/prs/2612.json new file mode 100644 index 00000000000..8160863c870 --- /dev/null +++ b/doc/sphinxext/prs/2612.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8c5281c874799a9ceee53fd0ed5592e4e025f35b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 1 + }, + "mne/report.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2617.json b/doc/sphinxext/prs/2617.json new file mode 100644 index 00000000000..2bd2e06ca3f --- /dev/null +++ b/doc/sphinxext/prs/2617.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5a47387e604a430504278e94938903267849e53c", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 10, + "d": 10 + }, + "mne/decoding/time_gen.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2618.json b/doc/sphinxext/prs/2618.json new file mode 100644 index 00000000000..4f6622148fc --- /dev/null +++ b/doc/sphinxext/prs/2618.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2b1dbbb1dfcc31983adf3873d6c8ef20bbf2df8c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_report.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/262.json b/doc/sphinxext/prs/262.json new file mode 100644 index 00000000000..7782019ce03 --- /dev/null +++ b/doc/sphinxext/prs/262.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "506de4178e251ec0a6febccbdf8a1fc3b5abce1b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/export/plot_epochs_as_data_frame.py": { + "a": 140, + "d": 56 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 4 + }, + "mne/epochs.py": { + "a": 79, + "d": 22 + }, + "mne/fiff/evoked.py": { + "a": 67, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 75, + "d": 7 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 21, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 18, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 21, + "d": 1 + }, + "mne/viz.py": { + "a": 33, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2620.json b/doc/sphinxext/prs/2620.json new file mode 100644 index 00000000000..7a3d9cc90d0 --- /dev/null +++ b/doc/sphinxext/prs/2620.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5cbc2b03e0287fba64a25fc3b8b36f9d1866ee4f", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 6, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2625.json b/doc/sphinxext/prs/2625.json new file mode 100644 index 00000000000..915fa045ef6 --- /dev/null +++ b/doc/sphinxext/prs/2625.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "94202d929aef8517edbefb69eea534998adcb8fa", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2626.json b/doc/sphinxext/prs/2626.json new file mode 100644 index 00000000000..1d141a78e4a --- /dev/null +++ b/doc/sphinxext/prs/2626.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "5e476bb05419205b537f5eb351b4834414ac7fbc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 4, + "d": 15 + }, + "mne/io/brainvision/brainvision.py": { + "a": 22, + "d": 36 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 9, + "d": 48 + }, + "mne/io/bti/bti.py": { + "a": 7, + "d": 2 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 5, + "d": 9 + }, + "mne/io/ctf/info.py": { + "a": 2, + "d": 7 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 3, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 71, + "d": 71 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 44, + "d": 96 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 8, + "d": 25 + }, + "mne/io/fiff/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 20, + "d": 64 + }, + "mne/io/meas_info.py": { + "a": 15, + "d": 7 + }, + "mne/io/nicolet/nicolet.py": { + "a": 7, + "d": 3 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 3, + "d": 27 + }, + "mne/io/tests/test_raw.py": { + "a": 92, + "d": 1 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2627.json b/doc/sphinxext/prs/2627.json new file mode 100644 index 00000000000..d149e3ff1c4 --- /dev/null +++ b/doc/sphinxext/prs/2627.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "073907fc72930ae0d6e0348832237b2555d278e2", + "authors": [ + { + "n": "Emily Stephen", + "e": "emilyps14@gmail.com" + } + ], + "changes": { + "doc/advanced_setup.rst": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2629.json b/doc/sphinxext/prs/2629.json new file mode 100644 index 00000000000..3bd7ac828d0 --- /dev/null +++ b/doc/sphinxext/prs/2629.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b1fa700044b4519fd4ee340ec58f4f2a466b8aec", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 4 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/263.json b/doc/sphinxext/prs/263.json new file mode 100644 index 00000000000..6e93c90b473 --- /dev/null +++ b/doc/sphinxext/prs/263.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e5f8ca61b4528f920d96814861e000d8498e22cc", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 157, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 95, + "d": 4 + }, + "mne/utils.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2630.json b/doc/sphinxext/prs/2630.json new file mode 100644 index 00000000000..d01fea0fe3d --- /dev/null +++ b/doc/sphinxext/prs/2630.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4371f76a94e54f3b2c8dcd672b8343d14e3d58be", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 25, + "d": 41 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2631.json b/doc/sphinxext/prs/2631.json new file mode 100644 index 00000000000..cbce1522b3d --- /dev/null +++ b/doc/sphinxext/prs/2631.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "80664105f7f9ad904d9a59db5ed2cfceb191541c", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/report.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2633.json b/doc/sphinxext/prs/2633.json new file mode 100644 index 00000000000..b8fb527af47 --- /dev/null +++ b/doc/sphinxext/prs/2633.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b6e3de8dd409e946b044c6b4f299abe639764643", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/tests/test_report.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2634.json b/doc/sphinxext/prs/2634.json new file mode 100644 index 00000000000..afeee745e38 --- /dev/null +++ b/doc/sphinxext/prs/2634.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4a263217d2a9d44817e992cf2dc7b1af9d3b282a", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 92, + "d": 29 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/tests/data/testv2.vhdr": { + "a": 107, + "d": 0 + }, + "mne/io/brainvision/tests/data/testv2.vmrk": { + "a": 33, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2637.json b/doc/sphinxext/prs/2637.json new file mode 100644 index 00000000000..8862d176dfb --- /dev/null +++ b/doc/sphinxext/prs/2637.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "e020ef1047ae0cedd8458f670eb68f47e157a83d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 5 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 8 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 6 + }, + "mne/viz/circle.py": { + "a": 2, + "d": 2 + }, + "mne/viz/decoding.py": { + "a": 4, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 13, + "d": 23 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 9 + }, + "mne/viz/ica.py": { + "a": 17, + "d": 37 + }, + "mne/viz/misc.py": { + "a": 6, + "d": 14 + }, + "mne/viz/montage.py": { + "a": 3, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 10, + "d": 16 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 9 + }, + "mne/viz/topomap.py": { + "a": 21, + "d": 27 + }, + "mne/viz/utils.py": { + "a": 13, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2640.json b/doc/sphinxext/prs/2640.json new file mode 100644 index 00000000000..4f0c1b94333 --- /dev/null +++ b/doc/sphinxext/prs/2640.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "148f918923b68051bb64677cec6715d135fd2b4f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2642.json b/doc/sphinxext/prs/2642.json new file mode 100644 index 00000000000..729ff4aacac --- /dev/null +++ b/doc/sphinxext/prs/2642.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "777916e82149e8f888f972ecba8db9bc752c7206", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 3 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 5, + "d": 6 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 9 + }, + "mne/io/tests/test_pick.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 11, + "d": 11 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 347, + "d": 347 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 6 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2643.json b/doc/sphinxext/prs/2643.json new file mode 100644 index 00000000000..8dc7b99d7f2 --- /dev/null +++ b/doc/sphinxext/prs/2643.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f9820d23269309da09ef44fdd3044a2c7991e9cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 38, + "d": 9 + }, + "mne/source_space.py": { + "a": 8, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 42, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/265.json b/doc/sphinxext/prs/265.json new file mode 100644 index 00000000000..19b96a017e9 --- /dev/null +++ b/doc/sphinxext/prs/265.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "cb712f48ed9829bedc535c8c507e293871429a4e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/mixed_norm/optim.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2650.json b/doc/sphinxext/prs/2650.json new file mode 100644 index 00000000000..f3adce86bc4 --- /dev/null +++ b/doc/sphinxext/prs/2650.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a430e2d551f5d47be846311836055ac09bd4f737", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 204, + "d": 31 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 24, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2651.json b/doc/sphinxext/prs/2651.json new file mode 100644 index 00000000000..5245f8dd687 --- /dev/null +++ b/doc/sphinxext/prs/2651.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6da9437c25c9e2595fbb21a52b7b061bca6963f0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 48, + "d": 43 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2654.json b/doc/sphinxext/prs/2654.json new file mode 100644 index 00000000000..37152dec625 --- /dev/null +++ b/doc/sphinxext/prs/2654.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2a6c549cdd40fc1cce761552649979e58d967a33", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/bem.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2656.json b/doc/sphinxext/prs/2656.json new file mode 100644 index 00000000000..7a08bdc3dd0 --- /dev/null +++ b/doc/sphinxext/prs/2656.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4cc4d6907609bd81701eacced142cc8903abc10b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/bem.py": { + "a": 20, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2657.json b/doc/sphinxext/prs/2657.json new file mode 100644 index 00000000000..aaae934270f --- /dev/null +++ b/doc/sphinxext/prs/2657.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d4ab103133759ca7225f19708973cfa54e9a7f09", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 17, + "d": 38 + }, + "mne/tests/test_epochs.py": { + "a": 17, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2658.json b/doc/sphinxext/prs/2658.json new file mode 100644 index 00000000000..809c72bb196 --- /dev/null +++ b/doc/sphinxext/prs/2658.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8360a8e3348eccdaea8da4663470c1eecd924369", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 9, + "d": 3 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2659.json b/doc/sphinxext/prs/2659.json new file mode 100644 index 00000000000..634a963370b --- /dev/null +++ b/doc/sphinxext/prs/2659.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "048923b7e4cb04197d7b7dcf566ece14674b7b22", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 3, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 24, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/chpi.py": { + "a": 47, + "d": 8 + }, + "mne/epochs.py": { + "a": 181, + "d": 5 + }, + "mne/forward/_field_interpolation.py": { + "a": 55, + "d": 44 + }, + "mne/forward/_lead_dots.py": { + "a": 67, + "d": 62 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/maxwell.py": { + "a": 135, + "d": 114 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 15, + "d": 38 + }, + "mne/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/tests/common.py": { + "a": 48, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 18, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 91, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/266.json b/doc/sphinxext/prs/266.json new file mode 100644 index 00000000000..c81208e84b5 --- /dev/null +++ b/doc/sphinxext/prs/266.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f5d90265f49e15cd3b44491d915211099bcab6b6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2666.json b/doc/sphinxext/prs/2666.json new file mode 100644 index 00000000000..f15f994a135 --- /dev/null +++ b/doc/sphinxext/prs/2666.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3da3e972f404da577f59e0e36f3f0a521187c5d3", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 32, + "d": 10 + }, + "mne/decoding/tests/test_csp.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2667.json b/doc/sphinxext/prs/2667.json new file mode 100644 index 00000000000..e7b1471254c --- /dev/null +++ b/doc/sphinxext/prs/2667.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4713411fd97d8c970fd38bc79481198f62115ba9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/evoked.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2668.json b/doc/sphinxext/prs/2668.json new file mode 100644 index 00000000000..558adeea46c --- /dev/null +++ b/doc/sphinxext/prs/2668.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "c1b029af86bf1304526164fa42edb3ecd7ef3be3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 3, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 10, + "d": 8 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 67, + "d": 93 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 24, + "d": 29 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 101, + "d": 124 + }, + "mne/io/egi/tests/data/test_egi.txt": { + "a": 257, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 27, + "d": 10 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 4 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 6 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 29, + "d": 41 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2669.json b/doc/sphinxext/prs/2669.json new file mode 100644 index 00000000000..c9fcc1e7887 --- /dev/null +++ b/doc/sphinxext/prs/2669.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fb00a6e323d318349981925b0d81e4721b96d148", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 5, + "d": 4 + }, + "mne/utils.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/267.json b/doc/sphinxext/prs/267.json new file mode 100644 index 00000000000..f61163840bd --- /dev/null +++ b/doc/sphinxext/prs/267.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "37c378dc6ce7ed7004697b33ebcdb4670950bb2f", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2670.json b/doc/sphinxext/prs/2670.json new file mode 100644 index 00000000000..e30bd925822 --- /dev/null +++ b/doc/sphinxext/prs/2670.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d1b9b5017e3c8c38d135bf370842bcfe4392a28b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 13, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2673.json b/doc/sphinxext/prs/2673.json new file mode 100644 index 00000000000..560c9d103ce --- /dev/null +++ b/doc/sphinxext/prs/2673.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1154c9ae04849bf011fa216b9256721dcca1a328", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 3, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 94, + "d": 34 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2675.json b/doc/sphinxext/prs/2675.json new file mode 100644 index 00000000000..a1f9e41f8d1 --- /dev/null +++ b/doc/sphinxext/prs/2675.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fc851c21f64741571b123541d212c2d5072e99f6", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2676.json b/doc/sphinxext/prs/2676.json new file mode 100644 index 00000000000..1aec631d807 --- /dev/null +++ b/doc/sphinxext/prs/2676.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "f4ffbb42a0bd50150babd1369b65ab1cd2f565cd", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 12, + "d": 4 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 34, + "d": 5 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 5, + "d": 120 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + }, + "mne/io/eeglab/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 447, + "d": 0 + }, + "mne/io/eeglab/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 85, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 58, + "d": 61 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/io/utils.py": { + "a": 164, + "d": 0 + }, + "mne/transforms.py": { + "a": 7, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2678.json b/doc/sphinxext/prs/2678.json new file mode 100644 index 00000000000..6718dca17b8 --- /dev/null +++ b/doc/sphinxext/prs/2678.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "479e4ebfdf297022db64edd27e11d1b6d0fa00ae", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2681.json b/doc/sphinxext/prs/2681.json new file mode 100644 index 00000000000..72ad4cb5bdb --- /dev/null +++ b/doc/sphinxext/prs/2681.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "91c08a85b2a4cca5ab50c0ea8b50c11d6c10cd31", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/stim.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2684.json b/doc/sphinxext/prs/2684.json new file mode 100644 index 00000000000..bca6f2644cc --- /dev/null +++ b/doc/sphinxext/prs/2684.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0f5dd61b9f778b49b4dae1acbb7490e1086b344b", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2686.json b/doc/sphinxext/prs/2686.json new file mode 100644 index 00000000000..da5204c3ae9 --- /dev/null +++ b/doc/sphinxext/prs/2686.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "34d7e901710337a2bed38393f397a06fc9b4cefe", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/advanced_setup.rst": { + "a": 34, + "d": 5 + }, + "doc/getting_started.rst": { + "a": 3, + "d": 3 + }, + "doc/manual/datasets_index.rst": { + "a": 125, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 5, + "d": 17 + }, + "doc/manual/io.rst": { + "a": 16, + "d": 2 + }, + "doc/manual/memory.rst": { + "a": 44, + "d": 0 + }, + "doc/manual/preprocessing/overview.rst": { + "a": 5, + "d": 3 + }, + "doc/manual/sample_dataset.rst": { + "a": 0, + "d": 0 + }, + "doc/tutorials/mne-report.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/report.rst": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2689.json b/doc/sphinxext/prs/2689.json new file mode 100644 index 00000000000..141553b5987 --- /dev/null +++ b/doc/sphinxext/prs/2689.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "26a3de0a18b84f01d065948839fb6f671c7f7c9f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2695.json b/doc/sphinxext/prs/2695.json new file mode 100644 index 00000000000..de77ce0a427 --- /dev/null +++ b/doc/sphinxext/prs/2695.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "78771fd0ab7d1aef3fcf2e2054ce1c0bd8a4755e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2696.json b/doc/sphinxext/prs/2696.json new file mode 100644 index 00000000000..3e0fa08a0b9 --- /dev/null +++ b/doc/sphinxext/prs/2696.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "afb8dfbc4b59f4cacdee12478c6f41085ab42d3f", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_gen.py": { + "a": 11, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2697.json b/doc/sphinxext/prs/2697.json new file mode 100644 index 00000000000..b22d4ff6f8c --- /dev/null +++ b/doc/sphinxext/prs/2697.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e36331e399756eda0067f4a4da2dd9475c547399", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/advanced_setup.rst": { + "a": 3, + "d": 3 + }, + "doc/getting_started.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 2 + }, + "tutorials/plot_introduction.py": { + "a": 14, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2699.json b/doc/sphinxext/prs/2699.json new file mode 100644 index 00000000000..109630f2b0d --- /dev/null +++ b/doc/sphinxext/prs/2699.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ddfe0714786e770af16e986c8e65f2580511a2a6", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/27.json b/doc/sphinxext/prs/27.json new file mode 100644 index 00000000000..b4452c65ac3 --- /dev/null +++ b/doc/sphinxext/prs/27.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1694b8adbd630fcf6139a00a24dc4c0e5c3f29c1", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 279, + "d": 7 + }, + "mne/fiff/tests/test_raw.py": { + "a": 72, + "d": 0 + }, + "mne/fiff/write.py": { + "a": 7, + "d": 0 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2701.json b/doc/sphinxext/prs/2701.json new file mode 100644 index 00000000000..e26c327e834 --- /dev/null +++ b/doc/sphinxext/prs/2701.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "75193005557fd323e11feb7bfbf0c49de04336b3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 46, + "d": 44 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2703.json b/doc/sphinxext/prs/2703.json new file mode 100644 index 00000000000..1cffad669f1 --- /dev/null +++ b/doc/sphinxext/prs/2703.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "baa884653e918a02506eee290dbf4db14ba3a809", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 14, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 22, + "d": 0 + }, + "mne/proj.py": { + "a": 8, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2704.json b/doc/sphinxext/prs/2704.json new file mode 100644 index 00000000000..5626b047b29 --- /dev/null +++ b/doc/sphinxext/prs/2704.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "a6c1633c5802f51f94f7f0cbb23cbfc113977ee7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/index.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/preprocessing/maxwell.rst": { + "a": 76, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 22, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_movement_compensation.py": { + "a": 52, + "d": 0 + }, + "mne/chpi.py": { + "a": 406, + "d": 225 + }, + "mne/datasets/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/datasets/misc/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/misc/_misc.py": { + "a": 18, + "d": 0 + }, + "mne/datasets/testing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 42, + "d": 32 + }, + "mne/epochs.py": { + "a": 52, + "d": 22 + }, + "mne/forward/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 3, + "d": 3 + }, + "mne/forward/_make_forward.py": { + "a": 39, + "d": 15 + }, + "mne/io/array/array.py": { + "a": 9, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 12, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 478, + "d": 225 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 99, + "d": 18 + }, + "mne/simulation/raw.py": { + "a": 19, + "d": 15 + }, + "mne/simulation/tests/test_raw.py": { + "a": 7, + "d": 7 + }, + "mne/tests/common.py": { + "a": 35, + "d": 15 + }, + "mne/tests/test_chpi.py": { + "a": 68, + "d": 60 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 33, + "d": 19 + }, + "mne/tests/test_transforms.py": { + "a": 62, + "d": 3 + }, + "mne/transforms.py": { + "a": 127, + "d": 0 + }, + "mne/utils.py": { + "a": 23, + "d": 6 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2705.json b/doc/sphinxext/prs/2705.json new file mode 100644 index 00000000000..e3ead4a9289 --- /dev/null +++ b/doc/sphinxext/prs/2705.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c8bb2a36df34a9da29217f56e5d37a968c7ba4f5", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2707.json b/doc/sphinxext/prs/2707.json new file mode 100644 index 00000000000..b1ba5479406 --- /dev/null +++ b/doc/sphinxext/prs/2707.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fba74e044a732841e17b406b569ba34efa7b7dfc", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 11, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 24, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2709.json b/doc/sphinxext/prs/2709.json new file mode 100644 index 00000000000..7e87624a8a1 --- /dev/null +++ b/doc/sphinxext/prs/2709.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "81546d7f2011c7495b0e00a382d74c8ffeda0fec", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 6, + "d": 3 + }, + "mne/epochs.py": { + "a": 9, + "d": 5 + }, + "mne/forward/_make_forward.py": { + "a": 62, + "d": 58 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 2, + "d": 3 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 6, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 12, + "d": 12 + }, + "mne/io/pick.py": { + "a": 33, + "d": 18 + }, + "mne/io/tests/test_pick.py": { + "a": 63, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 135, + "d": 52 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 102, + "d": 16 + }, + "mne/tests/common.py": { + "a": 27, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 24, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/271.json b/doc/sphinxext/prs/271.json new file mode 100644 index 00000000000..0ef1d35635a --- /dev/null +++ b/doc/sphinxext/prs/271.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "24c4a1e0f0d203c78991d7ec338d53b27e223008", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/constants.py": { + "a": 4, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 157, + "d": 189 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 23, + "d": 28 + }, + "mne/viz.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2710.json b/doc/sphinxext/prs/2710.json new file mode 100644 index 00000000000..953f25e4768 --- /dev/null +++ b/doc/sphinxext/prs/2710.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "2534cdbf5f7ad55ec24848935f9712b2ec94f67d", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 1 + }, + "examples/realtime/ftclient_rt_compute_psd.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 18, + "d": 1 + }, + "examples/time_frequency/plot_epochs_spectra.py": { + "a": 21, + "d": 0 + }, + "examples/visualization/plot_topo_customized.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 10, + "d": 18 + }, + "mne/epochs.py": { + "a": 43, + "d": 23 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 16, + "d": 8 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 79, + "d": 26 + }, + "mne/time_frequency/psd.py": { + "a": 256, + "d": 20 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 12, + "d": 5 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 121, + "d": 92 + }, + "mne/viz/epochs.py": { + "a": 24, + "d": 14 + }, + "mne/viz/raw.py": { + "a": 8, + "d": 9 + }, + "mne/viz/topomap.py": { + "a": 22, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2712.json b/doc/sphinxext/prs/2712.json new file mode 100644 index 00000000000..30bd1a5a550 --- /dev/null +++ b/doc/sphinxext/prs/2712.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "814dac7c7b6dfa93c8f79546066bd3c912baa8b8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/ctf/ctf.py": { + "a": 33, + "d": 10 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2713.json b/doc/sphinxext/prs/2713.json new file mode 100644 index 00000000000..dfad21b0847 --- /dev/null +++ b/doc/sphinxext/prs/2713.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a8e93eca08e5c60a2ec2382fa5e3d40bd8625ce0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/regression.py": { + "a": 18, + "d": 4 + }, + "mne/stats/tests/test_regression.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2714.json b/doc/sphinxext/prs/2714.json new file mode 100644 index 00000000000..530fbf8e5bc --- /dev/null +++ b/doc/sphinxext/prs/2714.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "8ed962512312b147d123cdf10b2dc818b11b81e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 117, + "d": 19 + }, + "mne/channels/tests/test_montage.py": { + "a": 53, + "d": 10 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 12, + "d": 12 + }, + "mne/io/meas_info.py": { + "a": 33, + "d": 20 + }, + "mne/tests/common.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2716.json b/doc/sphinxext/prs/2716.json new file mode 100644 index 00000000000..5e6df916b42 --- /dev/null +++ b/doc/sphinxext/prs/2716.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b4bffef13ec405ff4c712bd2fef50912bb10f53a", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2717.json b/doc/sphinxext/prs/2717.json new file mode 100644 index 00000000000..55c0eb43016 --- /dev/null +++ b/doc/sphinxext/prs/2717.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4a9364557efe3a124d2688848e2f47493e221558", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/this_project.inc": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 12, + "d": 5 + }, + "mne/tests/test_utils.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 93, + "d": 107 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2718.json b/doc/sphinxext/prs/2718.json new file mode 100644 index 00000000000..29cf72e3d0c --- /dev/null +++ b/doc/sphinxext/prs/2718.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f7f9c9f34eaf241c2d69143d4e1c762a1baef0c4", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 26, + "d": 18 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/272.json b/doc/sphinxext/prs/272.json new file mode 100644 index 00000000000..c6b5bf651f2 --- /dev/null +++ b/doc/sphinxext/prs/272.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "664c86d29f5d0f7046e2bbc68edc8ca2fc26a4e8", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 3, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 126, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 19, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 235, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 13, + "d": 6 + }, + "mne/tests/test_source_estimate.py": { + "a": 86, + "d": 2 + }, + "mne/viz.py": { + "a": 284, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2723.json b/doc/sphinxext/prs/2723.json new file mode 100644 index 00000000000..2035606a4db --- /dev/null +++ b/doc/sphinxext/prs/2723.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "792af3dd160d21d66e8c59e0da12f3d6c841372e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2725.json b/doc/sphinxext/prs/2725.json new file mode 100644 index 00000000000..546ab742536 --- /dev/null +++ b/doc/sphinxext/prs/2725.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "be31d6d5523b0dcea2056c16244fc7815f0336ee", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2728.json b/doc/sphinxext/prs/2728.json new file mode 100644 index 00000000000..ca80c0ee0fc --- /dev/null +++ b/doc/sphinxext/prs/2728.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "821dd3d3cf866762114c905423b7b8f12ab64ead", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/contributing.rst": { + "a": 19, + "d": 3 + }, + "doc/manual/decoding.rst": { + "a": 168, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 17, + "d": 1 + }, + "doc/manual/memory.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/pitfalls.rst": { + "a": 27, + "d": 0 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 106, + "d": 38 + }, + "doc/python_reference.rst": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2729.json b/doc/sphinxext/prs/2729.json new file mode 100644 index 00000000000..6bc97afccbc --- /dev/null +++ b/doc/sphinxext/prs/2729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "51c3fc154bbc0325e8ddd447ce34040f91dc172a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/273.json b/doc/sphinxext/prs/273.json new file mode 100644 index 00000000000..0a3a749ba86 --- /dev/null +++ b/doc/sphinxext/prs/273.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cd113fe0c672be244c01c9e31953e23f45348162", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 57, + "d": 71 + }, + "mne/fiff/raw.py": { + "a": 47, + "d": 17 + }, + "mne/fiff/tests/test_raw.py": { + "a": 13, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 32, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2732.json b/doc/sphinxext/prs/2732.json new file mode 100644 index 00000000000..cb96e51f0a8 --- /dev/null +++ b/doc/sphinxext/prs/2732.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e4f408e0a8ad14e9ae14f24731f0012a4dbf2c0", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 13, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2733.json b/doc/sphinxext/prs/2733.json new file mode 100644 index 00000000000..2d81bb54f10 --- /dev/null +++ b/doc/sphinxext/prs/2733.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d034d63236538eb81387a9521cd2ba93f66df0f9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 24, + "d": 4 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 7, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2734.json b/doc/sphinxext/prs/2734.json new file mode 100644 index 00000000000..d0adb35c919 --- /dev/null +++ b/doc/sphinxext/prs/2734.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b9db9c2dada4ea5179514fb7c779f47d5443e5b6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 8, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 48, + "d": 35 + }, + "mne/channels/tests/test_channels.py": { + "a": 4, + "d": 2 + }, + "mne/cov.py": { + "a": 6, + "d": 6 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 11, + "d": 3 + }, + "mne/io/proj.py": { + "a": 10, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 30, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2735.json b/doc/sphinxext/prs/2735.json new file mode 100644 index 00000000000..83f76db7799 --- /dev/null +++ b/doc/sphinxext/prs/2735.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "871243a80239b0b77f7c3e244823a47c4c8d22ff", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2736.json b/doc/sphinxext/prs/2736.json new file mode 100644 index 00000000000..df708b58793 --- /dev/null +++ b/doc/sphinxext/prs/2736.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "76358323962c0ad00f7a04d4251840435e0b7d49", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_maxwell.py": { + "a": 40, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2737.json b/doc/sphinxext/prs/2737.json new file mode 100644 index 00000000000..c6641272e0f --- /dev/null +++ b/doc/sphinxext/prs/2737.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d60e40c1447147e745813ff7057ea1922d532bc0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 12, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2738.json b/doc/sphinxext/prs/2738.json new file mode 100644 index 00000000000..1c1f3d36905 --- /dev/null +++ b/doc/sphinxext/prs/2738.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "823ec507c3ee78d8791c546340c154028afa47eb", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_joint_evoked_topomap.py": { + "a": 34, + "d": 0 + }, + "mne/evoked.py": { + "a": 52, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 180, + "d": 3 + }, + "mne/viz/tests/test_topo.py": { + "a": 5, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 7, + "d": 45 + }, + "mne/viz/utils.py": { + "a": 50, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2739.json b/doc/sphinxext/prs/2739.json new file mode 100644 index 00000000000..a4322c6761c --- /dev/null +++ b/doc/sphinxext/prs/2739.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dda326cafc8b55e3c0749ee85bc2bea229b91762", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/cov.py": { + "a": 9, + "d": 2 + }, + "mne/decoding/ems.py": { + "a": 13, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2741.json b/doc/sphinxext/prs/2741.json new file mode 100644 index 00000000000..b88c58734d3 --- /dev/null +++ b/doc/sphinxext/prs/2741.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "68f9a9fc34c6762d72d330782e21b5e9ec6e3bac", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 28, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2743.json b/doc/sphinxext/prs/2743.json new file mode 100644 index 00000000000..be2594f9ed3 --- /dev/null +++ b/doc/sphinxext/prs/2743.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1fba8c52d2de3cdff225f0bd122f7d3682a7265d", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2755.json b/doc/sphinxext/prs/2755.json new file mode 100644 index 00000000000..a11f964e637 --- /dev/null +++ b/doc/sphinxext/prs/2755.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "03888d2c6a2347922a1cd9558f5a551acba4c66d", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 20, + "d": 6 + }, + "mne/decoding/time_gen.py": { + "a": 41, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2758.json b/doc/sphinxext/prs/2758.json new file mode 100644 index 00000000000..4feb3ddb102 --- /dev/null +++ b/doc/sphinxext/prs/2758.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "919564156ced45329039cf4bd277396c5653f653", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/manual/channel_interpolation.rst": { + "a": 74, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 8, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/276.json b/doc/sphinxext/prs/276.json new file mode 100644 index 00000000000..70f982a60ab --- /dev/null +++ b/doc/sphinxext/prs/276.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4c19b12ad4e79f16c4d39c0e0679f60100e5ad2b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 7, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 50, + "d": 0 + }, + "mne/viz.py": { + "a": 83, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2761.json b/doc/sphinxext/prs/2761.json new file mode 100644 index 00000000000..b8204e837ec --- /dev/null +++ b/doc/sphinxext/prs/2761.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dbb30a703dd9052994ed8907eaa165a457487ea6", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/Makefile": { + "a": 19, + "d": 13 + }, + "doc/conf.py": { + "a": 6, + "d": 8 + }, + "doc/contributing.rst": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2762.json b/doc/sphinxext/prs/2762.json new file mode 100644 index 00000000000..f11edbda4f9 --- /dev/null +++ b/doc/sphinxext/prs/2762.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6476eac6bab76ce448073d9569648cf86db357f0", + "authors": [ + { + "n": "Sagun Pai", + "e": null + } + ], + "changes": { + "LICENSE.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2764.json b/doc/sphinxext/prs/2764.json new file mode 100644 index 00000000000..e3a059b92f2 --- /dev/null +++ b/doc/sphinxext/prs/2764.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aea8bc93b4319a9922cc80c45e30166b8ceab5b6", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/io/tests/test_meas_info.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2765.json b/doc/sphinxext/prs/2765.json new file mode 100644 index 00000000000..d7e7bfb62f8 --- /dev/null +++ b/doc/sphinxext/prs/2765.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "ec0e0791cf7d7e81729e158cc6516a9aa5229014", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 0, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 0, + "d": 2 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 3 + }, + "mne/evoked.py": { + "a": 0, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 0, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 6, + "d": 7 + }, + "mne/forward/forward.py": { + "a": 0, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 6, + "d": 7 + }, + "mne/io/bti/bti.py": { + "a": 0, + "d": 2 + }, + "mne/io/ctf/info.py": { + "a": 0, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 192, + "d": 30 + }, + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 3 + }, + "mne/io/pick.py": { + "a": 8, + "d": 6 + }, + "mne/io/reference.py": { + "a": 0, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 142, + "d": 3 + }, + "mne/io/tests/test_pick.py": { + "a": 0, + "d": 8 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 5 + }, + "mne/preprocessing/maxwell.py": { + "a": 0, + "d": 2 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 2 + }, + "mne/utils.py": { + "a": 7, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2767.json b/doc/sphinxext/prs/2767.json new file mode 100644 index 00000000000..c0e18b69cfe --- /dev/null +++ b/doc/sphinxext/prs/2767.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "57c520e0585c04bc37894f9c8ea0a2ed3c466b29", + "authors": [ + { + "n": "Kambiz Tavabi", + "e": "ktavabi@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2769.json b/doc/sphinxext/prs/2769.json new file mode 100644 index 00000000000..90638a9d2cd --- /dev/null +++ b/doc/sphinxext/prs/2769.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fd2ccb6908ceb806702ccbea77f206260fa129c9", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 5, + "d": 17 + }, + "mne/io/bti/bti.py": { + "a": 23, + "d": 12 + }, + "mne/io/egi/egi.py": { + "a": 6, + "d": 15 + }, + "mne/io/kit/kit.py": { + "a": 42, + "d": 36 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/io/utils.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2777.json b/doc/sphinxext/prs/2777.json new file mode 100644 index 00000000000..d1bdee206ab --- /dev/null +++ b/doc/sphinxext/prs/2777.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a0ce3c3b36b0da788449adcf7fd8187b19cfb5e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 37, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2779.json b/doc/sphinxext/prs/2779.json new file mode 100644 index 00000000000..80266a1461f --- /dev/null +++ b/doc/sphinxext/prs/2779.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1e8b0fe292fa8013ab4f3837097a89d2dfb5fd77", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 5, + "d": 1 + }, + "mne/stats/tests/test_regression.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2781.json b/doc/sphinxext/prs/2781.json new file mode 100644 index 00000000000..af6c6756958 --- /dev/null +++ b/doc/sphinxext/prs/2781.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4ca4d7594c1476a4c0327d49a5d72de70d7c65e5", + "authors": [ + { + "n": "FuTe Wong", + "e": "zuxfoucault@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2783.json b/doc/sphinxext/prs/2783.json new file mode 100644 index 00000000000..9a7f3c38e96 --- /dev/null +++ b/doc/sphinxext/prs/2783.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "7711dcdc30d35867f366e74afe44af3156386c1b", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/manual/memory.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 90, + "d": 0 + }, + "mne/io/base.py": { + "a": 23, + "d": 4 + }, + "mne/io/constants.py": { + "a": 3, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 38, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 8, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 58, + "d": 0 + }, + "mne/utils.py": { + "a": 0, + "d": 9 + }, + "mne/viz/raw.py": { + "a": 59, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2785.json b/doc/sphinxext/prs/2785.json new file mode 100644 index 00000000000..96624b7addf --- /dev/null +++ b/doc/sphinxext/prs/2785.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1e6af29a8e3cccbb94addea534462261ca555cd0", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2786.json b/doc/sphinxext/prs/2786.json new file mode 100644 index 00000000000..7c7bb5ca0e2 --- /dev/null +++ b/doc/sphinxext/prs/2786.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cb5ad96b347cbd55ae06c0a5f7517c09ec24dc41", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 17, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2791.json b/doc/sphinxext/prs/2791.json new file mode 100644 index 00000000000..dadd4b4c959 --- /dev/null +++ b/doc/sphinxext/prs/2791.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cf8e5d625ba2ebe1593a882eed7a49b02433f8b1", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 10, + "d": 3 + }, + "mne/stats/tests/test_regression.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2797.json b/doc/sphinxext/prs/2797.json new file mode 100644 index 00000000000..4d9454853e4 --- /dev/null +++ b/doc/sphinxext/prs/2797.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ebe2bfaf53f5617fd3529d47d5395a13136f56f5", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2798.json b/doc/sphinxext/prs/2798.json new file mode 100644 index 00000000000..ed3960eb34f --- /dev/null +++ b/doc/sphinxext/prs/2798.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "51595b55e9e357d8a95ba7f0dc320f5da1761899", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/dipole.py": { + "a": 3, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/280.json b/doc/sphinxext/prs/280.json new file mode 100644 index 00000000000..f0f7b9cdcad --- /dev/null +++ b/doc/sphinxext/prs/280.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0329e3f4da544f313912c8485e7a85d0a305281f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2802.json b/doc/sphinxext/prs/2802.json new file mode 100644 index 00000000000..2130fdcbda9 --- /dev/null +++ b/doc/sphinxext/prs/2802.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "e79aead7cb4302cccaad614c530d63476e7aec05", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 4, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 4, + "d": 3 + }, + "mne/io/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 7, + "d": 7 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 6 + }, + "mne/utils.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/281.json b/doc/sphinxext/prs/281.json new file mode 100644 index 00000000000..55c2ff9b5bb --- /dev/null +++ b/doc/sphinxext/prs/281.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e6ca398a5d1c4667136a819f7b1630a1d00cab86", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/forward.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2819.json b/doc/sphinxext/prs/2819.json new file mode 100644 index 00000000000..4401871a123 --- /dev/null +++ b/doc/sphinxext/prs/2819.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b5f225538295ae6489d1bc5dd1d515d3e1760b8d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 1, + "d": 0 + }, + "mne/utils.py": { + "a": 16, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/282.json b/doc/sphinxext/prs/282.json new file mode 100644 index 00000000000..f33423e0972 --- /dev/null +++ b/doc/sphinxext/prs/282.json @@ -0,0 +1,159 @@ +{ + "merge_commit_sha": "adcdb20b96e6bae91e2f5bd6d2bcd1dfee9777a7", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "bin/mne_clean_eog_ecg.py": { + "a": 6, + "d": 6 + }, + "bin/mne_flash_bem_model.py": { + "a": 5, + "d": 4 + }, + "mne/baseline.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 18, + "d": 5 + }, + "mne/epochs.py": { + "a": 15, + "d": 15 + }, + "mne/event.py": { + "a": 6, + "d": 6 + }, + "mne/fiff/channels.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/cov.py": { + "a": 6, + "d": 6 + }, + "mne/fiff/ctf.py": { + "a": 6, + "d": 6 + }, + "mne/fiff/matrix.py": { + "a": 6, + "d": 6 + }, + "mne/fiff/meas_info.py": { + "a": 7, + "d": 7 + }, + "mne/fiff/open.py": { + "a": 4, + "d": 4 + }, + "mne/fiff/proj.py": { + "a": 4, + "d": 4 + }, + "mne/fiff/raw.py": { + "a": 19, + "d": 19 + }, + "mne/fiff/tag.py": { + "a": 8, + "d": 8 + }, + "mne/filter.py": { + "a": 12, + "d": 12 + }, + "mne/fixes.py": { + "a": 0, + "d": 1 + }, + "mne/forward.py": { + "a": 5, + "d": 5 + }, + "mne/label.py": { + "a": 11, + "d": 11 + }, + "mne/layouts/layout.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 9, + "d": 10 + }, + "mne/preprocessing/ecg.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/maxfilter.py": { + "a": 21, + "d": 21 + }, + "mne/preprocessing/peak_finder.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 83, + "d": 83 + }, + "mne/preprocessing/stim.py": { + "a": 3, + "d": 3 + }, + "mne/proj.py": { + "a": 17, + "d": 18 + }, + "mne/source_estimate.py": { + "a": 12, + "d": 11 + }, + "mne/source_space.py": { + "a": 4, + "d": 4 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 31, + "d": 31 + }, + "mne/time_frequency/psd.py": { + "a": 10, + "d": 10 + }, + "mne/time_frequency/stft.py": { + "a": 8, + "d": 8 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "mne/viz.py": { + "a": 14, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2821.json b/doc/sphinxext/prs/2821.json new file mode 100644 index 00000000000..b02d8bd36d0 --- /dev/null +++ b/doc/sphinxext/prs/2821.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "045abeb46be8088f5fd8c3dd0415f5a4f79a182a", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/manual/cookbook.rst": { + "a": 13, + "d": 0 + }, + "doc/manual/preprocessing/annotations.rst": { + "a": 35, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/annotations.py": { + "a": 28, + "d": 5 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 32, + "d": 14 + }, + "mne/io/base.py": { + "a": 41, + "d": 8 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 10, + "d": 24 + }, + "mne/viz/utils.py": { + "a": 13, + "d": 10 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 357, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2824.json b/doc/sphinxext/prs/2824.json new file mode 100644 index 00000000000..18a34c0c6f4 --- /dev/null +++ b/doc/sphinxext/prs/2824.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d9d9f9a0c1df3b1a36d1aa2abc3a76a16825375c", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2826.json b/doc/sphinxext/prs/2826.json new file mode 100644 index 00000000000..b8abaf77bae --- /dev/null +++ b/doc/sphinxext/prs/2826.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "c49d7f686031a016335a98f421909a1c2f35c4ca", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 0, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 8, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 0, + "d": 1 + }, + "mne/utils.py": { + "a": 16, + "d": 11 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/283.json b/doc/sphinxext/prs/283.json new file mode 100644 index 00000000000..d05ec64ebcb --- /dev/null +++ b/doc/sphinxext/prs/283.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5df5a6996192c7541e8e2dbb33a104be09a4f8e0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 2, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 15, + "d": 78 + }, + "mne/viz.py": { + "a": 55, + "d": 307 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2830.json b/doc/sphinxext/prs/2830.json new file mode 100644 index 00000000000..d110bcca674 --- /dev/null +++ b/doc/sphinxext/prs/2830.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a7cf79130bf306ca4c47d2d1e74f3f3f653fbb52", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 12, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2833.json b/doc/sphinxext/prs/2833.json new file mode 100644 index 00000000000..1dbc928b2af --- /dev/null +++ b/doc/sphinxext/prs/2833.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e4c07c7dc5a22b158874f39fb4d8997c77c6a219", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 8, + "d": 0 + }, + "mne/decoding/time_gen.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2836.json b/doc/sphinxext/prs/2836.json new file mode 100644 index 00000000000..5bb56ff74f6 --- /dev/null +++ b/doc/sphinxext/prs/2836.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9cda691349854df2d08eb921e323555fc9b867ff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 10, + "d": 1 + }, + "mne/utils.py": { + "a": 52, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2837.json b/doc/sphinxext/prs/2837.json new file mode 100644 index 00000000000..3fe38d68051 --- /dev/null +++ b/doc/sphinxext/prs/2837.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "eff4d66788d0fb2567df325561bfb9c9e3df3419", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_csp.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2839.json b/doc/sphinxext/prs/2839.json new file mode 100644 index 00000000000..beba42fb39a --- /dev/null +++ b/doc/sphinxext/prs/2839.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "17035dfbe2ed130f3c6e7dd82c1798a2ef96383c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 3, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 17, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/284.json b/doc/sphinxext/prs/284.json new file mode 100644 index 00000000000..8cb0f475c71 --- /dev/null +++ b/doc/sphinxext/prs/284.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a28a9942b94a1ad9752accfefce88f48b6e89aee", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 5, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2843.json b/doc/sphinxext/prs/2843.json new file mode 100644 index 00000000000..b45b1cba0e3 --- /dev/null +++ b/doc/sphinxext/prs/2843.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "242089e518b76d80e3fb5228647e2d82bfc51593", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 13, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 0, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2844.json b/doc/sphinxext/prs/2844.json new file mode 100644 index 00000000000..b1ffeb64537 --- /dev/null +++ b/doc/sphinxext/prs/2844.json @@ -0,0 +1,391 @@ +{ + "merge_commit_sha": "a61531d68ca55e9389855a2d87bc623bcbcb517c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 4, + "d": 6 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 4 + }, + "mne/bem.py": { + "a": 5, + "d": 5 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 6 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 7, + "d": 8 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 4, + "d": 4 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 2 + }, + "mne/cov.py": { + "a": 10, + "d": 14 + }, + "mne/cuda.py": { + "a": 7, + "d": 9 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 31, + "d": 15 + }, + "mne/decoding/time_gen.py": { + "a": 4, + "d": 5 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 20, + "d": 26 + }, + "mne/event.py": { + "a": 3, + "d": 5 + }, + "mne/evoked.py": { + "a": 2, + "d": 3 + }, + "mne/filter.py": { + "a": 17, + "d": 18 + }, + "mne/forward/forward.py": { + "a": 12, + "d": 13 + }, + "mne/forward/tests/test_forward.py": { + "a": 4, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 8, + "d": 9 + }, + "mne/io/base.py": { + "a": 12, + "d": 17 + }, + "mne/io/brainvision/brainvision.py": { + "a": 6, + "d": 8 + }, + "mne/io/bti/bti.py": { + "a": 5, + "d": 6 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 22, + "d": 14 + }, + "mne/io/ctf/info.py": { + "a": 3, + "d": 4 + }, + "mne/io/edf/edf.py": { + "a": 12, + "d": 13 + }, + "mne/io/eeglab/eeglab.py": { + "a": 7, + "d": 8 + }, + "mne/io/egi/egi.py": { + "a": 3, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 16, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 6, + "d": 8 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 2 + }, + "mne/io/proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 4, + "d": 5 + }, + "mne/io/reference.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_pick.py": { + "a": 4, + "d": 2 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 1 + }, + "mne/label.py": { + "a": 11, + "d": 13 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 5 + }, + "mne/parallel.py": { + "a": 5, + "d": 6 + }, + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 8, + "d": 10 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 35, + "d": 28 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 6, + "d": 8 + }, + "mne/report.py": { + "a": 15, + "d": 17 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 5 + }, + "mne/simulation/source.py": { + "a": 4, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 12, + "d": 14 + }, + "mne/source_space.py": { + "a": 6, + "d": 7 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 10 + }, + "mne/stats/regression.py": { + "a": 5, + "d": 6 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 10, + "d": 9 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 6, + "d": 8 + }, + "mne/tests/common.py": { + "a": 22, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 10, + "d": 7 + }, + "mne/tests/test_chpi.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 19, + "d": 7 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_event.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 23, + "d": 15 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/_stockwell.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/csd.py": { + "a": 5, + "d": 6 + }, + "mne/time_frequency/multitaper.py": { + "a": 5, + "d": 9 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 10, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 5, + "d": 5 + }, + "mne/utils.py": { + "a": 52, + "d": 11 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 12 + }, + "mne/viz/decoding.py": { + "a": 3, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 8, + "d": 9 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2849.json b/doc/sphinxext/prs/2849.json new file mode 100644 index 00000000000..890e91125fe --- /dev/null +++ b/doc/sphinxext/prs/2849.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "f1a3e085aca64d88afaca15dfeef406af24ddd91", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 66, + "d": 0 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/forward/plot_make_forward.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 8, + "d": 6 + }, + "mne/datasets/megsim/megsim.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/megsim/urls.py": { + "a": 7, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 29, + "d": 25 + }, + "mne/forward/_lead_dots.py": { + "a": 26, + "d": 13 + }, + "tutorials/plot_cluster_methods_tutorial.py": { + "a": 6, + "d": 5 + }, + "tutorials/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_spatio_temporal_cluster_stats_sensor.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2851.json b/doc/sphinxext/prs/2851.json new file mode 100644 index 00000000000..818743680e0 --- /dev/null +++ b/doc/sphinxext/prs/2851.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6a053953470b856254390cf13c2e7c06d82bf0d1", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2857.json b/doc/sphinxext/prs/2857.json new file mode 100644 index 00000000000..5176cadfac5 --- /dev/null +++ b/doc/sphinxext/prs/2857.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ea6689e43870f87c39b2051b56bf455b589aa26c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 44, + "d": 13 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 32, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2861.json b/doc/sphinxext/prs/2861.json new file mode 100644 index 00000000000..7fe7ada4b0f --- /dev/null +++ b/doc/sphinxext/prs/2861.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a87d7cbb6e3a3ffd27236b28d825f94f4284127d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/io/proj.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2863.json b/doc/sphinxext/prs/2863.json new file mode 100644 index 00000000000..bc474ec9ce6 --- /dev/null +++ b/doc/sphinxext/prs/2863.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "724ee0b2edd969f0679c78f0733e7be4b56a33c5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 6, + "d": 2 + }, + "mne/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 3, + "d": 4 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 28, + "d": 23 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 12, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 1, + "d": 0 + }, + "mne/utils.py": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2865.json b/doc/sphinxext/prs/2865.json new file mode 100644 index 00000000000..7258e2cccd6 --- /dev/null +++ b/doc/sphinxext/prs/2865.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ba4521f97495edbe55ce281843ee1e9ad92972bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 0, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 2 + }, + "mne/io/proj.py": { + "a": 1, + "d": 3 + }, + "mne/io/tag.py": { + "a": 291, + "d": 261 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2868.json b/doc/sphinxext/prs/2868.json new file mode 100644 index 00000000000..7ec14ef817a --- /dev/null +++ b/doc/sphinxext/prs/2868.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9edc61acebec2b76b391f78c6c23665caf004925", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/cov.py": { + "a": 153, + "d": 109 + }, + "mne/event.py": { + "a": 2, + "d": 1 + }, + "mne/tests/common.py": { + "a": 10, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 43, + "d": 24 + }, + "mne/tests/test_event.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/287.json b/doc/sphinxext/prs/287.json new file mode 100644 index 00000000000..c51b4b68d8a --- /dev/null +++ b/doc/sphinxext/prs/287.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "a9d769ed7c7b29ed706216e26d43691a38ce6e47", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/_static/branch_dropdown.png": { + "a": 0, + "d": 0 + }, + "doc/source/_static/forking_button.png": { + "a": 0, + "d": 0 + }, + "doc/source/_static/pull_button.png": { + "a": 0, + "d": 0 + }, + "doc/source/contributing.rst": { + "a": 649, + "d": 0 + }, + "doc/source/customizing_git.rst": { + "a": 7, + "d": 7 + }, + "doc/source/git_links.inc": { + "a": 27, + "d": 17 + }, + "doc/source/gitwash/branch_list.png": { + "a": 0, + "d": 0 + }, + "doc/source/gitwash/branch_list_compare.png": { + "a": 0, + "d": 0 + }, + "doc/source/gitwash/development_workflow.rst": { + "a": 0, + "d": 265 + }, + "doc/source/gitwash/following_latest.rst": { + "a": 0, + "d": 36 + }, + "doc/source/gitwash/forking_hell.rst": { + "a": 0, + "d": 33 + }, + "doc/source/gitwash/git_development.rst": { + "a": 0, + "d": 16 + }, + "doc/source/gitwash/git_install.rst": { + "a": 0, + "d": 26 + }, + "doc/source/gitwash/git_intro.rst": { + "a": 0, + "d": 18 + }, + "doc/source/gitwash/git_resources.rst": { + "a": 0, + "d": 59 + }, + "doc/source/gitwash/index.rst": { + "a": 0, + "d": 16 + }, + "doc/source/gitwash/patching.rst": { + "a": 0, + "d": 134 + }, + "doc/source/gitwash/set_up_fork.rst": { + "a": 0, + "d": 68 + }, + "doc/source/gitwash/setup.sh": { + "a": 0, + "d": 3 + }, + "doc/source/gitwash/this_project.inc": { + "a": 0, + "d": 5 + }, + "doc/source/known_projects.inc": { + "a": 6, + "d": 1 + }, + "doc/source/links.inc": { + "a": 0, + "d": 0 + }, + "doc/source/mne-python.rst": { + "a": 1, + "d": 1 + }, + "doc/source/this_project.inc": { + "a": 5, + "d": 0 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 5, + "d": 1 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 5, + "d": 1 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 5, + "d": 1 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 4, + "d": 1 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 5, + "d": 1 + }, + "examples/plot_read_forward.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2880.json b/doc/sphinxext/prs/2880.json new file mode 100644 index 00000000000..a4162b3424a --- /dev/null +++ b/doc/sphinxext/prs/2880.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "672493edcbf45b444c288b4c4acbc08d7c062f94", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_dipole.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 14, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2882.json b/doc/sphinxext/prs/2882.json new file mode 100644 index 00000000000..5f1628c32cf --- /dev/null +++ b/doc/sphinxext/prs/2882.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dee6196375ec4f30bc7681fc5a707b12ed4236ba", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 7, + "d": 1 + }, + "mne/io/ctf/eeg.py": { + "a": 47, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2883.json b/doc/sphinxext/prs/2883.json new file mode 100644 index 00000000000..d43f7ccb56f --- /dev/null +++ b/doc/sphinxext/prs/2883.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2b532f331404c78d0aebb5997d4b042a81ae69a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 15, + "d": 7 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 22, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2885.json b/doc/sphinxext/prs/2885.json new file mode 100644 index 00000000000..f48ec9fc7ea --- /dev/null +++ b/doc/sphinxext/prs/2885.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "32bb34fd97b7e652ace090a38fe37b0a45dfde5c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2888.json b/doc/sphinxext/prs/2888.json new file mode 100644 index 00000000000..44894d233b0 --- /dev/null +++ b/doc/sphinxext/prs/2888.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "17ba5e3ef0c0d90401fab63cd1862fb20cacc330", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2889.json b/doc/sphinxext/prs/2889.json new file mode 100644 index 00000000000..594a92fdc1f --- /dev/null +++ b/doc/sphinxext/prs/2889.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b4d089e1a9088ec6dac237b4c323395e7005b897", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/289.json b/doc/sphinxext/prs/289.json new file mode 100644 index 00000000000..83516584854 --- /dev/null +++ b/doc/sphinxext/prs/289.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "20fc139f6da759cf8a78b8fe0666060ada0b29d7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/tests/test_evoked.py": { + "a": 4, + "d": 21 + }, + "mne/fiff/tests/test_raw.py": { + "a": 4, + "d": 20 + }, + "mne/layouts/tests/test_layout.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 5, + "d": 4 + }, + "mne/mixed_norm/tests/test_optim.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 21 + }, + "mne/tests/test_event.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 6 + }, + "mne/tests/test_proj.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 8 + }, + "mne/tests/test_source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_utils.py": { + "a": 4, + "d": 2 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 5, + "d": 10 + }, + "mne/utils.py": { + "a": 64, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2890.json b/doc/sphinxext/prs/2890.json new file mode 100644 index 00000000000..009b8b2f4f1 --- /dev/null +++ b/doc/sphinxext/prs/2890.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "15b81e358f39b1746911cc72009a164999a49215", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2891.json b/doc/sphinxext/prs/2891.json new file mode 100644 index 00000000000..978302a167f --- /dev/null +++ b/doc/sphinxext/prs/2891.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "85b1dcd1697b64d22ea3b50e50c65e098229dba4", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2893.json b/doc/sphinxext/prs/2893.json new file mode 100644 index 00000000000..0a4c47f5b00 --- /dev/null +++ b/doc/sphinxext/prs/2893.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0fad905afb248b023aa7b427f72ba4531823c8f6", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 4, + "d": 0 + }, + "mne/utils.py": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2895.json b/doc/sphinxext/prs/2895.json new file mode 100644 index 00000000000..8d92c0a588b --- /dev/null +++ b/doc/sphinxext/prs/2895.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "722ac102aa37a77c02cabf10ebfaecb2f76540dc", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2896.json b/doc/sphinxext/prs/2896.json new file mode 100644 index 00000000000..6498d2e30a1 --- /dev/null +++ b/doc/sphinxext/prs/2896.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "df5cdae8c78f519f26ba14e5ea9ba73519acf730", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/29.json b/doc/sphinxext/prs/29.json new file mode 100644 index 00000000000..48eef63d628 --- /dev/null +++ b/doc/sphinxext/prs/29.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "ef31128ae7504e4ad5c4b812190595e8a2a6085d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/ctf.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/matrix.py": { + "a": 42, + "d": 20 + }, + "mne/fiff/write.py": { + "a": 40, + "d": 66 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 113, + "d": 18 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 40, + "d": 4 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 97, + "d": 2 + }, + "mne/surface.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/290.json b/doc/sphinxext/prs/290.json new file mode 100644 index 00000000000..42f8d1eabe8 --- /dev/null +++ b/doc/sphinxext/prs/290.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b0cfaad1ee679654e0a314f322941870ac6bda3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 6, + "d": 6 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 38, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 20, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2902.json b/doc/sphinxext/prs/2902.json new file mode 100644 index 00000000000..32a99313d40 --- /dev/null +++ b/doc/sphinxext/prs/2902.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8b3a6441b1076340acbd8392fc2962b2fac4020a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2903.json b/doc/sphinxext/prs/2903.json new file mode 100644 index 00000000000..257b04d9792 --- /dev/null +++ b/doc/sphinxext/prs/2903.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bcf9b597231d0cca33aa03a55ec409d7ff9747ed", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/evoked.py": { + "a": 7, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2905.json b/doc/sphinxext/prs/2905.json new file mode 100644 index 00000000000..341a5f0fc65 --- /dev/null +++ b/doc/sphinxext/prs/2905.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fe4e3248e1d373c533f132f4d70bdb0dacd85e7c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 89, + "d": 52 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 56, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2907.json b/doc/sphinxext/prs/2907.json new file mode 100644 index 00000000000..c7e3bca4dc0 --- /dev/null +++ b/doc/sphinxext/prs/2907.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6b8c4a89c6ff3cdb0b70bb5ca0cd721db34bede3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/index.rst": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/291.json b/doc/sphinxext/prs/291.json new file mode 100644 index 00000000000..575ade5bf59 --- /dev/null +++ b/doc/sphinxext/prs/291.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "528e3ff3a9a0a2756d7940fcfb5769b0f25c3bf7", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2910.json b/doc/sphinxext/prs/2910.json new file mode 100644 index 00000000000..ce7b8fdc5fb --- /dev/null +++ b/doc/sphinxext/prs/2910.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "757e637886d5f383a043c8aabfe705a7e172557e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 1 + }, + "mne/bem.py": { + "a": 68, + "d": 23 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 3 + }, + "mne/channels/tests/test_layout.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 3 + }, + "mne/tests/common.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_bem.py": { + "a": 67, + "d": 35 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2911.json b/doc/sphinxext/prs/2911.json new file mode 100644 index 00000000000..51bc1905591 --- /dev/null +++ b/doc/sphinxext/prs/2911.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3d4c74297b411e9a414bbeb5b3be6e23bac956f0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/tag.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2913.json b/doc/sphinxext/prs/2913.json new file mode 100644 index 00000000000..e7a3d1af463 --- /dev/null +++ b/doc/sphinxext/prs/2913.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f82bf0b75655b88f438abaf06594c1c9aeac87c2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fiff/raw.py": { + "a": 6, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 16, + "d": 29 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_chpi.py": { + "a": 4, + "d": 10 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2914.json b/doc/sphinxext/prs/2914.json new file mode 100644 index 00000000000..afb32cf83a5 --- /dev/null +++ b/doc/sphinxext/prs/2914.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ccbaf544646cf41f4d72575dcfd715050ff73f56", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2915.json b/doc/sphinxext/prs/2915.json new file mode 100644 index 00000000000..b3de17139d2 --- /dev/null +++ b/doc/sphinxext/prs/2915.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5112448c3e04b26027d0f3603dfafe64876781c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_pick.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2916.json b/doc/sphinxext/prs/2916.json new file mode 100644 index 00000000000..e4fcc43c86d --- /dev/null +++ b/doc/sphinxext/prs/2916.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4928671d35052c78fe35864bd87df4b036c20492", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2917.json b/doc/sphinxext/prs/2917.json new file mode 100644 index 00000000000..9fb63d430f4 --- /dev/null +++ b/doc/sphinxext/prs/2917.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ffacbfa2ef6ebff0b621113809dd695d682e10d4", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 26, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2918.json b/doc/sphinxext/prs/2918.json new file mode 100644 index 00000000000..74bb9aaefd1 --- /dev/null +++ b/doc/sphinxext/prs/2918.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "1428b8772101dba6eecd9b7eaa563aa6aeba871c", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": "bailey.cj@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/visualization/plot_topography.py": { + "a": 4, + "d": 2 + }, + "mne/channels/data/layouts/Vectorview-grad_norm.lout": { + "a": 103, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 2 + }, + "mne/evoked.py": { + "a": 19, + "d": 16 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 60, + "d": 35 + }, + "mne/viz/topomap.py": { + "a": 13, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2921.json b/doc/sphinxext/prs/2921.json new file mode 100644 index 00000000000..573fa56b26f --- /dev/null +++ b/doc/sphinxext/prs/2921.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6a3a6f60f6bc3384adcedbdb457be18ce535b87b", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2924.json b/doc/sphinxext/prs/2924.json new file mode 100644 index 00000000000..154054a543b --- /dev/null +++ b/doc/sphinxext/prs/2924.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8d6bf3c670512981dbf7d8358d5d243535e751f2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 7, + "d": 4 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2925.json b/doc/sphinxext/prs/2925.json new file mode 100644 index 00000000000..49778e1d400 --- /dev/null +++ b/doc/sphinxext/prs/2925.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6b0c16e77a0b8ba1a3b22d902b1b368037a5e05f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/_templates/function.rst": { + "a": 4, + "d": 0 + }, + "doc/conf.py": { + "a": 3, + "d": 2 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 81, + "d": 81 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2926.json b/doc/sphinxext/prs/2926.json new file mode 100644 index 00000000000..5e41aa27a1a --- /dev/null +++ b/doc/sphinxext/prs/2926.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "48033b68ce172439316a1dd5ede59a236e92da92", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/baseline.py": { + "a": 13, + "d": 7 + }, + "mne/epochs.py": { + "a": 6, + "d": 4 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 7 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_utils.py": { + "a": 10, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 5 + }, + "mne/utils.py": { + "a": 6, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2929.json b/doc/sphinxext/prs/2929.json new file mode 100644 index 00000000000..460e02edb53 --- /dev/null +++ b/doc/sphinxext/prs/2929.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8a13540e95a4e39121e88810ffd1edd15af4f6df", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/293.json b/doc/sphinxext/prs/293.json new file mode 100644 index 00000000000..9bae1d46012 --- /dev/null +++ b/doc/sphinxext/prs/293.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "749b12925f4d31abf67ffcfd1faa84b921937946", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2930.json b/doc/sphinxext/prs/2930.json new file mode 100644 index 00000000000..ea5ffb523ef --- /dev/null +++ b/doc/sphinxext/prs/2930.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e4bd5f0710f4b21abe72d67d871b0a5ea0f0c4e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 14, + "d": 5 + }, + "mne/utils.py": { + "a": 4, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2932.json b/doc/sphinxext/prs/2932.json new file mode 100644 index 00000000000..8240de3634e --- /dev/null +++ b/doc/sphinxext/prs/2932.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "815c52a9f0f1f438c0bbec7e7ef6da7a6c448b6d", + "authors": [ + { + "n": "Sourav Singh", + "e": "ssouravsingh12@gmail.com" + } + ], + "changes": { + "doc/manual/source_localization/forward.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2934.json b/doc/sphinxext/prs/2934.json new file mode 100644 index 00000000000..d8e662ef6fd --- /dev/null +++ b/doc/sphinxext/prs/2934.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b6f99a07c5e688622d31b467fa0140a5907a621", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 9, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2936.json b/doc/sphinxext/prs/2936.json new file mode 100644 index 00000000000..166a3dd4824 --- /dev/null +++ b/doc/sphinxext/prs/2936.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b0618d337c09d6aaadd8d46efe2f4bb550530e8b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 8, + "d": 7 + }, + "mne/tests/test_utils.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2938.json b/doc/sphinxext/prs/2938.json new file mode 100644 index 00000000000..8f5c9d889b0 --- /dev/null +++ b/doc/sphinxext/prs/2938.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ab0cdf112a9bf56da8461363e7fd4b27c75dcbe2", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_show_info.py": { + "a": 34, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2939.json b/doc/sphinxext/prs/2939.json new file mode 100644 index 00000000000..243b230d657 --- /dev/null +++ b/doc/sphinxext/prs/2939.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b9485698c371b957a80e401d8ca94c4790192c32", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/compensator.py": { + "a": 2, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 28, + "d": 17 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 31, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2940.json b/doc/sphinxext/prs/2940.json new file mode 100644 index 00000000000..9f50980685b --- /dev/null +++ b/doc/sphinxext/prs/2940.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "31396d3103f60490329ed20faae38528037021f7", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 21, + "d": 19 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2941.json b/doc/sphinxext/prs/2941.json new file mode 100644 index 00000000000..3e84d756f5f --- /dev/null +++ b/doc/sphinxext/prs/2941.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8d01478e04a7c30ac46db443e82e2a37c46bbdad", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2942.json b/doc/sphinxext/prs/2942.json new file mode 100644 index 00000000000..b1d9b4af54e --- /dev/null +++ b/doc/sphinxext/prs/2942.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "6df109e7f21b553f253e624f7fd486cb40759465", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/defaults.py": { + "a": 7, + "d": 6 + }, + "mne/dipole.py": { + "a": 93, + "d": 5 + }, + "mne/evoked.py": { + "a": 212, + "d": 184 + }, + "mne/io/constants.py": { + "a": 18, + "d": 14 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 2 + }, + "mne/io/pick.py": { + "a": 16, + "d": 4 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 19, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_utils.py": { + "a": 15, + "d": 13 + }, + "mne/utils.py": { + "a": 8, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 31, + "d": 19 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2943.json b/doc/sphinxext/prs/2943.json new file mode 100644 index 00000000000..8a91a6b3def --- /dev/null +++ b/doc/sphinxext/prs/2943.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "963eda039acfa4b7dbcaacad02e2b21f386159d7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topo.py": { + "a": 6, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 158, + "d": 67 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2946.json b/doc/sphinxext/prs/2946.json new file mode 100644 index 00000000000..ee90087e128 --- /dev/null +++ b/doc/sphinxext/prs/2946.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "4716a5461c9395e0ce24184b07b663520a76260a", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 19, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 33, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/read.py": { + "a": 0, + "d": 12 + }, + "mne/io/cnt/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 369, + "d": 0 + }, + "mne/io/cnt/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 28, + "d": 0 + }, + "mne/io/constants.py": { + "a": 7, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 13, + "d": 20 + }, + "mne/io/nicolet/nicolet.py": { + "a": 4, + "d": 25 + }, + "mne/io/utils.py": { + "a": 43, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/295.json b/doc/sphinxext/prs/295.json new file mode 100644 index 00000000000..86255bda5db --- /dev/null +++ b/doc/sphinxext/prs/295.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "20196c73cec697f0d6a9317725e4ac21f6c4d5d1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/export/plot_epochs_as_data_frame.py": { + "a": 21, + "d": 24 + }, + "mne/utils.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2950.json b/doc/sphinxext/prs/2950.json new file mode 100644 index 00000000000..86dcd02367c --- /dev/null +++ b/doc/sphinxext/prs/2950.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "68218218a9066afa8d2dc65222e5d9a4b8c48079", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 17, + "d": 13 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 27, + "d": 0 + }, + "mne/io/pick.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2951.json b/doc/sphinxext/prs/2951.json new file mode 100644 index 00000000000..60ba21874cf --- /dev/null +++ b/doc/sphinxext/prs/2951.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "094a5bb7fbb71cb9941292f100893a843b9bacea", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 12, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2952.json b/doc/sphinxext/prs/2952.json new file mode 100644 index 00000000000..47223a8365f --- /dev/null +++ b/doc/sphinxext/prs/2952.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "da42e7f395cd4a3bcf43725870a16548d4fbfdf3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_dipole_fit.py": { + "a": 40, + "d": 1 + }, + "mne/dipole.py": { + "a": 14, + "d": 9 + }, + "mne/forward/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 135, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 111, + "d": 4 + }, + "mne/source_space.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2954.json b/doc/sphinxext/prs/2954.json new file mode 100644 index 00000000000..bcba6f52af5 --- /dev/null +++ b/doc/sphinxext/prs/2954.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "94eb7cba22aa4a0786d5582ebc896e0b2464911f", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/cited.rst": { + "a": 112, + "d": 0 + }, + "doc/cited_mne.py": { + "a": 251, + "d": 0 + }, + "doc/index.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2955.json b/doc/sphinxext/prs/2955.json new file mode 100644 index 00000000000..7690d9f1012 --- /dev/null +++ b/doc/sphinxext/prs/2955.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3554e677c64db90388ef15dce6651f72d78c6d0c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 4, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2956.json b/doc/sphinxext/prs/2956.json new file mode 100644 index 00000000000..d2816363cde --- /dev/null +++ b/doc/sphinxext/prs/2956.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "876f8936c1d291169333e3dc3fdb1b46a172854a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 5, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2957.json b/doc/sphinxext/prs/2957.json new file mode 100644 index 00000000000..0890ffd6d62 --- /dev/null +++ b/doc/sphinxext/prs/2957.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e3fc2b11cfa517304113443a427879db1f53746b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 16, + "d": 16 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 10 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2958.json b/doc/sphinxext/prs/2958.json new file mode 100644 index 00000000000..1dae7099304 --- /dev/null +++ b/doc/sphinxext/prs/2958.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "66a9285b100e2cca3d350d5b83b2732c6ee8a6cc", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/296.json b/doc/sphinxext/prs/296.json new file mode 100644 index 00000000000..68ee92153f8 --- /dev/null +++ b/doc/sphinxext/prs/296.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "354bf2d4f94773b2133cb83ef7db15744d567a62", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 284, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2961.json b/doc/sphinxext/prs/2961.json new file mode 100644 index 00000000000..5a40f1ae8b6 --- /dev/null +++ b/doc/sphinxext/prs/2961.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bc724819b62985445184a7f2e381a13ce175b944", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/manual/index.rst": { + "a": 8, + "d": 0 + }, + "doc/manual/migrating.rst": { + "a": 54, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2962.json b/doc/sphinxext/prs/2962.json new file mode 100644 index 00000000000..5c7f58318fb --- /dev/null +++ b/doc/sphinxext/prs/2962.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d3f61fd6b57cc56730aa6c34c70574ad67cf7c94", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2963.json b/doc/sphinxext/prs/2963.json new file mode 100644 index 00000000000..20a3b62c850 --- /dev/null +++ b/doc/sphinxext/prs/2963.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1295b83c460727df8e529622c3dfcaa39f5ac0d5", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2964.json b/doc/sphinxext/prs/2964.json new file mode 100644 index 00000000000..41802f6e39d --- /dev/null +++ b/doc/sphinxext/prs/2964.json @@ -0,0 +1,155 @@ +{ + "merge_commit_sha": "10c20e72c41fcc58cc3a2e7807c6765d9b3c8889", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/advanced_setup.rst": { + "a": 0, + "d": 163 + }, + "doc/cite.rst": { + "a": 2, + "d": 2 + }, + "doc/cited.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 12, + "d": 11 + }, + "doc/contributing.rst": { + "a": 4, + "d": 4 + }, + "doc/faq.rst": { + "a": 54, + "d": 1 + }, + "doc/getting_started.rst": { + "a": 325, + "d": 104 + }, + "doc/git_links.inc": { + "a": 2, + "d": 6 + }, + "doc/index.rst": { + "a": 38, + "d": 33 + }, + "doc/manual/datasets_index.rst": { + "a": 6, + "d": 5 + }, + "doc/manual/index.rst": { + "a": 2, + "d": 5 + }, + "doc/manual/migrating.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/preprocessing/maxwell.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 3, + "d": 3 + }, + "doc/manual/time_frequency.rst": { + "a": 4, + "d": 4 + }, + "doc/martinos.rst": { + "a": 35, + "d": 0 + }, + "doc/mne_cpp.rst": { + "a": 9, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 10 + }, + "doc/this_project.inc": { + "a": 4, + "d": 1 + }, + "doc/tutorials/command_line.rst": { + "a": 2, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 6, + "d": 5 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/evoked.py": { + "a": 18, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 31, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_raw_objects.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2966.json b/doc/sphinxext/prs/2966.json new file mode 100644 index 00000000000..f451645b31f --- /dev/null +++ b/doc/sphinxext/prs/2966.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d8416766ea5302764a4716836ab9c3658511a578", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2969.json b/doc/sphinxext/prs/2969.json new file mode 100644 index 00000000000..ab2982d1dfc --- /dev/null +++ b/doc/sphinxext/prs/2969.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "055dbcf36cf7055ac647961dae37e47a31884881", + "authors": [ + { + "n": "Yousra Bekhti", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 58, + "d": 6 + }, + "doc/conf.py": { + "a": 5, + "d": 1 + }, + "doc/getting_started.rst": { + "a": 107, + "d": 528 + }, + "doc/index.rst": { + "a": 2, + "d": 1 + }, + "doc/install_mne_c.rst": { + "a": 258, + "d": 0 + }, + "doc/install_mne_python.rst": { + "a": 269, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 115, + "d": 88 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 8, + "d": 15 + }, + "doc/tutorials.rst": { + "a": 112, + "d": 52 + }, + "doc/tutorials/command_line.rst": { + "a": 19, + "d": 8 + }, + "doc/tutorials/mne-report.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/preprocessing/artifacts_suppression.rst": { + "a": 88, + "d": 0 + }, + "doc/tutorials/preprocessing/basic_preprocessing.rst": { + "a": 59, + "d": 0 + }, + "doc/tutorials/preprocessing/data_selection.rst": { + "a": 90, + "d": 0 + }, + "doc/tutorials/report.rst": { + "a": 36, + "d": 24 + }, + "examples/README.txt": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_maxfilter.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/297.json b/doc/sphinxext/prs/297.json new file mode 100644 index 00000000000..1cd2819aebf --- /dev/null +++ b/doc/sphinxext/prs/297.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a5a6edf28d8a26ca67a5ac7616bb2748a5bd666", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2970.json b/doc/sphinxext/prs/2970.json new file mode 100644 index 00000000000..b4f1c9de197 --- /dev/null +++ b/doc/sphinxext/prs/2970.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "31805d288d4ff7f4bcae8d708d568b59a6c0e26d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2972.json b/doc/sphinxext/prs/2972.json new file mode 100644 index 00000000000..8381315292c --- /dev/null +++ b/doc/sphinxext/prs/2972.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0b1d5d5ac013514c0202e12aa310751a5906fb48", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_resample.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_linear_regression_raw.py": { + "a": 6, + "d": 5 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 6, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 7, + "d": 5 + }, + "mne/proj.py": { + "a": 7, + "d": 6 + }, + "mne/time_frequency/multitaper.py": { + "a": 30, + "d": 24 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2973.json b/doc/sphinxext/prs/2973.json new file mode 100644 index 00000000000..07b5c2d3f39 --- /dev/null +++ b/doc/sphinxext/prs/2973.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a3509ede1c9de56677824c5dae2a718b89872630", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 15, + "d": 0 + }, + "mne/decoding/time_gen.py": { + "a": 18, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2974.json b/doc/sphinxext/prs/2974.json new file mode 100644 index 00000000000..5b42ec63859 --- /dev/null +++ b/doc/sphinxext/prs/2974.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8fe026ef051216ca39d6622a661be1dc50f1e672", + "authors": [ + { + "n": "Camilo Lamus", + "e": null + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 24, + "d": 15 + }, + "mne/forward/tests/test_forward.py": { + "a": 14, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2977.json b/doc/sphinxext/prs/2977.json new file mode 100644 index 00000000000..dfe35a4b177 --- /dev/null +++ b/doc/sphinxext/prs/2977.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f4d6e141cfba1dde8eb90b1a3a5c520a574f51fc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 14, + "d": 10 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 72, + "d": 42 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 11, + "d": 9 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 20, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2978.json b/doc/sphinxext/prs/2978.json new file mode 100644 index 00000000000..3b54126203e --- /dev/null +++ b/doc/sphinxext/prs/2978.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "85b6215e13c6c4caa0dd997c8fbb4bfa26b7cc1c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_resample.py": { + "a": 5, + "d": 4 + }, + "mne/cuda.py": { + "a": 32, + "d": 20 + }, + "mne/epochs.py": { + "a": 9, + "d": 2 + }, + "mne/evoked.py": { + "a": 9, + "d": 2 + }, + "mne/filter.py": { + "a": 27, + "d": 7 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 9, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 15, + "d": 15 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 10, + "d": 7 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_spatio_temporal.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/298.json b/doc/sphinxext/prs/298.json new file mode 100644 index 00000000000..9e1f5e795cf --- /dev/null +++ b/doc/sphinxext/prs/298.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "62c1193f58857bfd9ab54f7d2e4dfa474c67aa01", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 46, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2980.json b/doc/sphinxext/prs/2980.json new file mode 100644 index 00000000000..a4387f4c51a --- /dev/null +++ b/doc/sphinxext/prs/2980.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a72e96ff8523f9b1a2bedb3f482fe2495a43e80f", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2982.json b/doc/sphinxext/prs/2982.json new file mode 100644 index 00000000000..7d944646792 --- /dev/null +++ b/doc/sphinxext/prs/2982.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5c390fd94c1adc9edbb232f71c36324ceaae25f6", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 0, + "d": 2 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2984.json b/doc/sphinxext/prs/2984.json new file mode 100644 index 00000000000..ef1f7db9da9 --- /dev/null +++ b/doc/sphinxext/prs/2984.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4fa4c5c383a80e48b5871815f1dfdf88810d1815", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2985.json b/doc/sphinxext/prs/2985.json new file mode 100644 index 00000000000..b190f31404b --- /dev/null +++ b/doc/sphinxext/prs/2985.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "af8a0369e8b5dcffd7ffbd188fb7ca5b499e799a", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2987.json b/doc/sphinxext/prs/2987.json new file mode 100644 index 00000000000..7547081a61e --- /dev/null +++ b/doc/sphinxext/prs/2987.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c728d5a9c523a2346785020c17b056747b689c6a", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/decoding.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2989.json b/doc/sphinxext/prs/2989.json new file mode 100644 index 00000000000..b9cbcc24da0 --- /dev/null +++ b/doc/sphinxext/prs/2989.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5efb1ff2d3de0ea74733dc69c2be601d34659ecc", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 3, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/299.json b/doc/sphinxext/prs/299.json new file mode 100644 index 00000000000..8b693679887 --- /dev/null +++ b/doc/sphinxext/prs/299.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "480f54b0934a2eaa18f3b3f72e90b3d283f2d44a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 4, + "d": 6 + }, + "examples/plot_read_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 31, + "d": 18 + }, + "mne/fiff/evoked.py": { + "a": 82, + "d": 44 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 32, + "d": 16 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 3, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 35, + "d": 11 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2990.json b/doc/sphinxext/prs/2990.json new file mode 100644 index 00000000000..f152ebe87d6 --- /dev/null +++ b/doc/sphinxext/prs/2990.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "e4e46a69a4cb233f04ffd02c9bc9e1c81140e733", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 3, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_linear_regression_raw.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_stockwell.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_clickable_image.py": { + "a": 1, + "d": 1 + }, + "mne/baseline.py": { + "a": 40, + "d": 40 + }, + "mne/datasets/spm_face/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 13, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 3, + "d": 3 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 14, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 7 + }, + "setup.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2991.json b/doc/sphinxext/prs/2991.json new file mode 100644 index 00000000000..0e79201d658 --- /dev/null +++ b/doc/sphinxext/prs/2991.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8e4b3472dca575dae469f994c648aaf117bf11ec", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/defaults.py": { + "a": 4, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2992.json b/doc/sphinxext/prs/2992.json new file mode 100644 index 00000000000..453ebdcca32 --- /dev/null +++ b/doc/sphinxext/prs/2992.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1579466cb630fb000087a73f983a1a6469e2df9b", + "authors": [ + { + "n": "sviter", + "e": "sviter33@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/2993.json b/doc/sphinxext/prs/2993.json new file mode 100644 index 00000000000..d759dbeebb0 --- /dev/null +++ b/doc/sphinxext/prs/2993.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bb7a220bcfc6ea7b6dd5abaad5ec218369999742", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 64, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 8, + "d": 8 + }, + "mne/viz/tests/test_epochs.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3.json b/doc/sphinxext/prs/3.json new file mode 100644 index 00000000000..fa20109a540 --- /dev/null +++ b/doc/sphinxext/prs/3.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8891d764a4b580caa934e1864d43e1608a6d01d2", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/getting_started.rst": { + "a": 2, + "d": 22 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 84, + "d": 0 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 3, + "d": 3 + }, + "mne/baseline.py": { + "a": 5, + "d": 2 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 28, + "d": 12 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 192, + "d": 93 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/30.json b/doc/sphinxext/prs/30.json new file mode 100644 index 00000000000..3305ce4ec6a --- /dev/null +++ b/doc/sphinxext/prs/30.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e62145fa93c2609fb2fbba40702e8a23db28b894", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/plot_estimate_covariance_matrix.py": { + "a": 2, + "d": 21 + }, + "examples/plot_read_noise_covariance_matrix.py": { + "a": 3, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 63, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/300.json b/doc/sphinxext/prs/300.json new file mode 100644 index 00000000000..b53d3e9594f --- /dev/null +++ b/doc/sphinxext/prs/300.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "465d0ad8f5c6caf635aa37d3c6f1ac6676351cf1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 2, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3000.json b/doc/sphinxext/prs/3000.json new file mode 100644 index 00000000000..8299f9aa96a --- /dev/null +++ b/doc/sphinxext/prs/3000.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "be82dc45ed1c1cce9d86a5eca5266f89882c60d9", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3003.json b/doc/sphinxext/prs/3003.json new file mode 100644 index 00000000000..489a9dbd7be --- /dev/null +++ b/doc/sphinxext/prs/3003.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d786b6fcd76ee24adfe223d001d75fba470d6aef", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 7, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3004.json b/doc/sphinxext/prs/3004.json new file mode 100644 index 00000000000..a52d6ec8f0e --- /dev/null +++ b/doc/sphinxext/prs/3004.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "65ca3c13900453604292f0c1a1cf4cc282d1e132", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3007.json b/doc/sphinxext/prs/3007.json new file mode 100644 index 00000000000..c3c4fb93400 --- /dev/null +++ b/doc/sphinxext/prs/3007.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d3c9b765a39379fea432f174ed1b77a21e06fe09", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3008.json b/doc/sphinxext/prs/3008.json new file mode 100644 index 00000000000..8dc8f179660 --- /dev/null +++ b/doc/sphinxext/prs/3008.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "35193e90f8cb14a3d5441bf2ba7d6b0b148e130e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 50, + "d": 15 + }, + "mne/stats/tests/test_regression.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3011.json b/doc/sphinxext/prs/3011.json new file mode 100644 index 00000000000..910d90e8ac5 --- /dev/null +++ b/doc/sphinxext/prs/3011.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "710bb22848698f61bdd3f8b943f564ba3d890ce2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 0 + }, + "doc/faq.rst": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3012.json b/doc/sphinxext/prs/3012.json new file mode 100644 index 00000000000..dd55539af94 --- /dev/null +++ b/doc/sphinxext/prs/3012.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "06b0f6991d4b575b33cb5978dcbd222290427509", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/data/coil_def.dat": { + "a": 28, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3016.json b/doc/sphinxext/prs/3016.json new file mode 100644 index 00000000000..b0b3293230a --- /dev/null +++ b/doc/sphinxext/prs/3016.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c9c743f0bb3631cf83d7579d84f3dc748c90cfa1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/cited_mne.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3018.json b/doc/sphinxext/prs/3018.json new file mode 100644 index 00000000000..0ac4ee4c291 --- /dev/null +++ b/doc/sphinxext/prs/3018.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "49fef4e73b95ffd62607f7da0579afe8601f45c9", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 11, + "d": 7 + }, + "mne/defaults.py": { + "a": 9, + "d": 8 + }, + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 0 + }, + "mne/io/pick.py": { + "a": 12, + "d": 5 + }, + "mne/io/tests/test_pick.py": { + "a": 11, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3019.json b/doc/sphinxext/prs/3019.json new file mode 100644 index 00000000000..c508db9b3b2 --- /dev/null +++ b/doc/sphinxext/prs/3019.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f2d2bcc41ffee3f27937948d8db5c5dc91c2d205", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/realtime/plot_compute_rt_average.py": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3020.json b/doc/sphinxext/prs/3020.json new file mode 100644 index 00000000000..797a36fe683 --- /dev/null +++ b/doc/sphinxext/prs/3020.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a430554085baaf53dcdde4ed79e7c19215ca4932", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 28, + "d": 4 + }, + "mne/fixes.py": { + "a": 87, + "d": 8 + }, + "mne/io/base.py": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 25, + "d": 1 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3024.json b/doc/sphinxext/prs/3024.json new file mode 100644 index 00000000000..f96adfca288 --- /dev/null +++ b/doc/sphinxext/prs/3024.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2f425a6de169503fa36e2bb95790adf66fa5f37a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 65, + "d": 65 + }, + "doc/faq.rst": { + "a": 123, + "d": 5 + }, + "doc/getting_started.rst": { + "a": 2, + "d": 0 + }, + "doc/index.rst": { + "a": 1, + "d": 1 + }, + "doc/this_project.inc": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3026.json b/doc/sphinxext/prs/3026.json new file mode 100644 index 00000000000..af3c9c4af0a --- /dev/null +++ b/doc/sphinxext/prs/3026.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7861b8329e0129d5703955eff1202e8806fe57e4", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3027.json b/doc/sphinxext/prs/3027.json new file mode 100644 index 00000000000..22f3848905c --- /dev/null +++ b/doc/sphinxext/prs/3027.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "39a3a3a89c6815ca529370ba0b5281e94920d290", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 13, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/303.json b/doc/sphinxext/prs/303.json new file mode 100644 index 00000000000..18d3e469436 --- /dev/null +++ b/doc/sphinxext/prs/303.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "99fb1a0b55608226546eebb4017b2914527823a3", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 8, + "d": 0 + }, + "mne/viz.py": { + "a": 20, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3030.json b/doc/sphinxext/prs/3030.json new file mode 100644 index 00000000000..94d0a29d23b --- /dev/null +++ b/doc/sphinxext/prs/3030.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0862dd1b9a322be04f64a3b2963f12bea39fde69", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/forward/plot_coregistration_transform.py": { + "a": 1, + "d": 3 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 41, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 128, + "d": 29 + }, + "mne/viz/tests/test_3d.py": { + "a": 32, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3031.json b/doc/sphinxext/prs/3031.json new file mode 100644 index 00000000000..b6c9f098df7 --- /dev/null +++ b/doc/sphinxext/prs/3031.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b923a5f10fd720857e5100809462eded8013746", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 9, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 19, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3032.json b/doc/sphinxext/prs/3032.json new file mode 100644 index 00000000000..aad3e66009a --- /dev/null +++ b/doc/sphinxext/prs/3032.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3092b72daf804ead4a1c38da707962786c938020", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/index.rst": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3033.json b/doc/sphinxext/prs/3033.json new file mode 100644 index 00000000000..78ee493127b --- /dev/null +++ b/doc/sphinxext/prs/3033.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "96ed572d38921e6305b686a8dbdc426da2833dd9", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 60, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 14, + "d": 10 + }, + "mne/viz/raw.py": { + "a": 8, + "d": 4 + }, + "mne/viz/tests/test_topo.py": { + "a": 9, + "d": 5 + }, + "mne/viz/topo.py": { + "a": 123, + "d": 70 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3034.json b/doc/sphinxext/prs/3034.json new file mode 100644 index 00000000000..141bc570039 --- /dev/null +++ b/doc/sphinxext/prs/3034.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b531a80d4906ed1dc7ecf5b52ac2923bd99c47ad", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 13, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 57, + "d": 70 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3035.json b/doc/sphinxext/prs/3035.json new file mode 100644 index 00000000000..04b42640789 --- /dev/null +++ b/doc/sphinxext/prs/3035.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "77eb1fccd9e1a78b94cc5291ed1ec05f01c96457", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 22, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3040.json b/doc/sphinxext/prs/3040.json new file mode 100644 index 00000000000..cfd2dc0ab87 --- /dev/null +++ b/doc/sphinxext/prs/3040.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1579eb5f3e71e4c73bbd4545b93c03310939fb26", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3041.json b/doc/sphinxext/prs/3041.json new file mode 100644 index 00000000000..88b1c063b97 --- /dev/null +++ b/doc/sphinxext/prs/3041.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1a9c1b0b4983bae6d3970560e896097e3069d449", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 13, + "d": 6 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3042.json b/doc/sphinxext/prs/3042.json new file mode 100644 index 00000000000..d3975c9782b --- /dev/null +++ b/doc/sphinxext/prs/3042.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "da6adca4bef00de277368eeae9fab43831169f3a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 24, + "d": 10 + }, + "mne/tests/test_event.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3048.json b/doc/sphinxext/prs/3048.json new file mode 100644 index 00000000000..bd94448c4f1 --- /dev/null +++ b/doc/sphinxext/prs/3048.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ecd7b17f7d0ee0ba8908e69514e629e70b2ba21e", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 17, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3050.json b/doc/sphinxext/prs/3050.json new file mode 100644 index 00000000000..ce2c9297fce --- /dev/null +++ b/doc/sphinxext/prs/3050.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d7d0224dac62ab47d11f0fd3c4e33a501a14420e", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_sensors.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 7, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3051.json b/doc/sphinxext/prs/3051.json new file mode 100644 index 00000000000..2ca5449a970 --- /dev/null +++ b/doc/sphinxext/prs/3051.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7774365d4ee44eb105b4b491a5f74ef525c7071c", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3055.json b/doc/sphinxext/prs/3055.json new file mode 100644 index 00000000000..8c98bf642dd --- /dev/null +++ b/doc/sphinxext/prs/3055.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4ea477bf6e876d353b270445b0fd8ffb48841d89", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3057.json b/doc/sphinxext/prs/3057.json new file mode 100644 index 00000000000..8e708f8d029 --- /dev/null +++ b/doc/sphinxext/prs/3057.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "caab61377a56cacbe75ea9804599e9a09152a831", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3058.json b/doc/sphinxext/prs/3058.json new file mode 100644 index 00000000000..b5cc269ff13 --- /dev/null +++ b/doc/sphinxext/prs/3058.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "af40bd630ead348979fdaa055d510b8e7fb72f91", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 50, + "d": 24 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3059.json b/doc/sphinxext/prs/3059.json new file mode 100644 index 00000000000..e5256574188 --- /dev/null +++ b/doc/sphinxext/prs/3059.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5f389c5c44f80e9df4efeae15506c10515a56276", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/306.json b/doc/sphinxext/prs/306.json new file mode 100644 index 00000000000..ad4c7742291 --- /dev/null +++ b/doc/sphinxext/prs/306.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e862d55d1df9cbd952b83ab80c7848c58adc1ebd", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/proj.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3064.json b/doc/sphinxext/prs/3064.json new file mode 100644 index 00000000000..cf8a1d074db --- /dev/null +++ b/doc/sphinxext/prs/3064.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4227b7603898eb29f43787c3388517f758777875", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3065.json b/doc/sphinxext/prs/3065.json new file mode 100644 index 00000000000..43e96812bae --- /dev/null +++ b/doc/sphinxext/prs/3065.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "547e80eb73e054c535b49325b037b1a1459f6ecc", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_topo_channel_epochs_image.py": { + "a": 3, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 43, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 24, + "d": 2 + }, + "mne/channels/tests/test_layout.py": { + "a": 7, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 7 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 12, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 52, + "d": 12 + }, + "mne/viz/utils.py": { + "a": 129, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3066.json b/doc/sphinxext/prs/3066.json new file mode 100644 index 00000000000..46f36fa1e3f --- /dev/null +++ b/doc/sphinxext/prs/3066.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "62be106bae5b0f7b9cf455f1efd3186e88ef70b5", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 4, + "d": 6 + }, + "doc/Makefile": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3067.json b/doc/sphinxext/prs/3067.json new file mode 100644 index 00000000000..d57313d86ab --- /dev/null +++ b/doc/sphinxext/prs/3067.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b16542351011e56ec1c778ea264260e5e9278a3b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 19, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/307.json b/doc/sphinxext/prs/307.json new file mode 100644 index 00000000000..dfb44ff72db --- /dev/null +++ b/doc/sphinxext/prs/307.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1cb92741cfee06b70c08ce24b847a6d667de0db9", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/tests/test_ica.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3072.json b/doc/sphinxext/prs/3072.json new file mode 100644 index 00000000000..1b37e0dc841 --- /dev/null +++ b/doc/sphinxext/prs/3072.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4e83089dd911ff580f766c1ad2d4cfd0e2b700f1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3074.json b/doc/sphinxext/prs/3074.json new file mode 100644 index 00000000000..38b34dd5ffa --- /dev/null +++ b/doc/sphinxext/prs/3074.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cfd4f2e12217918c55fcb1eb1f0cc5ff390a14fd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 28, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3076.json b/doc/sphinxext/prs/3076.json new file mode 100644 index 00000000000..7cb6c5c7203 --- /dev/null +++ b/doc/sphinxext/prs/3076.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e61f8be7a8fbe77cfe2dc1aeede62440ebe216a5", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/event.py": { + "a": 36, + "d": 8 + }, + "mne/tests/test_event.py": { + "a": 32, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3077.json b/doc/sphinxext/prs/3077.json new file mode 100644 index 00000000000..5c3e654cfe7 --- /dev/null +++ b/doc/sphinxext/prs/3077.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4a1b3d517d4a8d922cf27f09f3a9cf4493c7abd5", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 22, + "d": 15 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 18, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3079.json b/doc/sphinxext/prs/3079.json new file mode 100644 index 00000000000..8c5d5e42cf4 --- /dev/null +++ b/doc/sphinxext/prs/3079.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "3b82ea99fe1a4637cef5f757c7f0feaf4cd9501a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 6, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 18, + "d": 10 + }, + "mne/io/base.py": { + "a": 109, + "d": 76 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/308.json b/doc/sphinxext/prs/308.json new file mode 100644 index 00000000000..9a257845a55 --- /dev/null +++ b/doc/sphinxext/prs/308.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb7aaff1664d8e09e60a3dda98babadc5000bf8e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3080.json b/doc/sphinxext/prs/3080.json new file mode 100644 index 00000000000..06ef528096d --- /dev/null +++ b/doc/sphinxext/prs/3080.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2bf44489c08a7d37380071f1667e484daf1d00f2", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/stats/regression.py": { + "a": 9, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3085.json b/doc/sphinxext/prs/3085.json new file mode 100644 index 00000000000..fef4840e510 --- /dev/null +++ b/doc/sphinxext/prs/3085.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f4f302f78486b9e6af99faac072e0ac6cda3b2d2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 7, + "d": 9 + }, + "mne/evoked.py": { + "a": 6, + "d": 4 + }, + "mne/stats/regression.py": { + "a": 8, + "d": 9 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3087.json b/doc/sphinxext/prs/3087.json new file mode 100644 index 00000000000..5a99b2c2f7c --- /dev/null +++ b/doc/sphinxext/prs/3087.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4c0c323f79bfc77c80e7277da8734334782abc93", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/install_mne_c.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/cookbook.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/source_localization/forward.rst": { + "a": 6, + "d": 2 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 7, + "d": 7 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3093.json b/doc/sphinxext/prs/3093.json new file mode 100644 index 00000000000..14afe800327 --- /dev/null +++ b/doc/sphinxext/prs/3093.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3ca35b7e3d6403e77788158922e7924146696761", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 12, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3096.json b/doc/sphinxext/prs/3096.json new file mode 100644 index 00000000000..09b381af4a3 --- /dev/null +++ b/doc/sphinxext/prs/3096.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "98d13c860df1f62841825194fc471fe6ddcd31d2", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3098.json b/doc/sphinxext/prs/3098.json new file mode 100644 index 00000000000..da236cd90d8 --- /dev/null +++ b/doc/sphinxext/prs/3098.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b96689e6d67c3c1d1375e2f728dfb31bfbebedac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_utils.py": { + "a": 7, + "d": 1 + }, + "mne/utils.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/31.json b/doc/sphinxext/prs/31.json new file mode 100644 index 00000000000..0f3dc6bd6fb --- /dev/null +++ b/doc/sphinxext/prs/31.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "1a62954a82835c4e9781151cdf0a387a0bc20dfd", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 30, + "d": 193 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/constants.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/cov.py": { + "a": 174, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 6, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3102.json b/doc/sphinxext/prs/3102.json new file mode 100644 index 00000000000..2dcdf6085ac --- /dev/null +++ b/doc/sphinxext/prs/3102.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4ab8801ded0649edb350156aaca300319ca1cb95", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/surface.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3104.json b/doc/sphinxext/prs/3104.json new file mode 100644 index 00000000000..673f7a1dfd6 --- /dev/null +++ b/doc/sphinxext/prs/3104.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1e885652ec705edc9b39d3cd15fc0704e13ea74f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3106.json b/doc/sphinxext/prs/3106.json new file mode 100644 index 00000000000..46eb94d6004 --- /dev/null +++ b/doc/sphinxext/prs/3106.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6cb00360f539c413a79690ec67377e5b920279c2", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/decoding/transformer.py": { + "a": 2, + "d": 2 + }, + "mne/io/pick.py": { + "a": 7, + "d": 0 + }, + "mne/io/tests/test_pick.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3108.json b/doc/sphinxext/prs/3108.json new file mode 100644 index 00000000000..67162f668e3 --- /dev/null +++ b/doc/sphinxext/prs/3108.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e549bf408a1adcb10f41caf51350f7af1aebfc38", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 10, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3109.json b/doc/sphinxext/prs/3109.json new file mode 100644 index 00000000000..61dbdebfa5d --- /dev/null +++ b/doc/sphinxext/prs/3109.json @@ -0,0 +1,447 @@ +{ + "merge_commit_sha": "dd945658a8d45b2b867d6d3e379050086a9e64e0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/index.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/memory.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/migrating.rst": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 1, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 1, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 1, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 1, + "d": 2 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 1, + "d": 2 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_sensors.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 2, + "d": 3 + }, + "examples/io/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 1, + "d": 2 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_corrmap_detection.py": { + "a": 1, + "d": 3 + }, + "examples/preprocessing/plot_define_target_events.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_eog_artifact_histogram.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_estimate_covariance_matrix_baseline.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_estimate_covariance_matrix_raw.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_maxwell_filter.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_movement_compensation.py": { + "a": 3, + "d": 2 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_resample.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 9, + "d": 11 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 3, + "d": 2 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 1, + "d": 2 + }, + "examples/time_frequency/plot_epochs_spectra.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_stockwell.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_multitaper_sensors.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_sensors.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_evoked_delayed_ssp.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_evoked_topomap_delayed_ssp.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 3, + "d": 2 + }, + "examples/visualization/plot_topo_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 1, + "d": 2 + }, + "examples/visualization/plot_topo_customized.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_browse_raw.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_csp.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 2, + "d": 1 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 3, + "d": 3 + }, + "mne/stats/tests/test_regression.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_event.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_decoding.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_source_localization_basics.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_spatio_temporal_cluster_stats_sensor.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/311.json b/doc/sphinxext/prs/311.json new file mode 100644 index 00000000000..dd4299babfd --- /dev/null +++ b/doc/sphinxext/prs/311.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "caeb8c2ae2d6b4a4e1e319d391f9c040a3d374a2", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3110.json b/doc/sphinxext/prs/3110.json new file mode 100644 index 00000000000..c5b23eddcbb --- /dev/null +++ b/doc/sphinxext/prs/3110.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "f202ce34fcd296c5a9727bde808f6aa2383c1b9f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".gitignore": { + "a": 2, + "d": 1 + }, + "doc/manual/memory.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 56, + "d": 25 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 22, + "d": 22 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_epochs.py": { + "a": 24, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3111.json b/doc/sphinxext/prs/3111.json new file mode 100644 index 00000000000..a77a26f95f8 --- /dev/null +++ b/doc/sphinxext/prs/3111.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7f84bd70c7830eb6cfd9673d21bd4a6c39019e1d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/base.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3114.json b/doc/sphinxext/prs/3114.json new file mode 100644 index 00000000000..97a3b5ed358 --- /dev/null +++ b/doc/sphinxext/prs/3114.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d7530ddf5da295e0a23178df48fd648c7039a897", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/epochs.py": { + "a": 6, + "d": 0 + }, + "mne/evoked.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3115.json b/doc/sphinxext/prs/3115.json new file mode 100644 index 00000000000..ee0d98a2342 --- /dev/null +++ b/doc/sphinxext/prs/3115.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4c2b4a765c3a973b8bedfea798ad18796e309b39", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 26, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 37, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3119.json b/doc/sphinxext/prs/3119.json new file mode 100644 index 00000000000..95d1909fc28 --- /dev/null +++ b/doc/sphinxext/prs/3119.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "90c43d7ccaca46dc784b2001cf00e5d990f80e62", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_evoked_topomap_delayed_ssp.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_ssp_projs_sensitivity_map.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_ssp_projs_topomaps.py": { + "a": 4, + "d": 2 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 2 + }, + "mne/io/proj.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3122.json b/doc/sphinxext/prs/3122.json new file mode 100644 index 00000000000..72f1ebb4d44 --- /dev/null +++ b/doc/sphinxext/prs/3122.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "38db591159f0f73b642594864a5770b78de36f0a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 6 + }, + "mne/dipole.py": { + "a": 286, + "d": 102 + }, + "mne/tests/test_dipole.py": { + "a": 54, + "d": 8 + }, + "tutorials/plot_dipole_fit.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3129.json b/doc/sphinxext/prs/3129.json new file mode 100644 index 00000000000..b6cc632ba52 --- /dev/null +++ b/doc/sphinxext/prs/3129.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "741259072d81d17c70acf4f247cf18640934d5cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 31, + "d": 23 + }, + "mne/utils.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/313.json b/doc/sphinxext/prs/313.json new file mode 100644 index 00000000000..a009bf864b1 --- /dev/null +++ b/doc/sphinxext/prs/313.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c033cdf5100eb707997da37bd06a5c722b631570", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 33, + "d": 5 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3131.json b/doc/sphinxext/prs/3131.json new file mode 100644 index 00000000000..817296de28b --- /dev/null +++ b/doc/sphinxext/prs/3131.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "62bd319e587c0af0207bcead609545cc1ffa72c4", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/io/cnt/cnt.py": { + "a": 16, + "d": 14 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3132.json b/doc/sphinxext/prs/3132.json new file mode 100644 index 00000000000..85739ad965b --- /dev/null +++ b/doc/sphinxext/prs/3132.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "eb3fdfd6e7adf381744cb6d283efb9c3a087867f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 52, + "d": 47 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 9, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 25 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 8, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3136.json b/doc/sphinxext/prs/3136.json new file mode 100644 index 00000000000..3fc7834eff6 --- /dev/null +++ b/doc/sphinxext/prs/3136.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c30f617fa59f4c4d1270c34be631d7889ebf87a0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_ssp_projs_sensitivity_map.py": { + "a": 2, + "d": 1 + }, + "mne/proj.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_proj.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3137.json b/doc/sphinxext/prs/3137.json new file mode 100644 index 00000000000..37033eb79c8 --- /dev/null +++ b/doc/sphinxext/prs/3137.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "28f41b0add25d8e191f4bc208da21eb1fbd142c7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 1, + "d": 0 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3139.json b/doc/sphinxext/prs/3139.json new file mode 100644 index 00000000000..eaff5172aa0 --- /dev/null +++ b/doc/sphinxext/prs/3139.json @@ -0,0 +1,275 @@ +{ + "merge_commit_sha": "5346d694acc187c5407cd2356db31eba9721f1f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_brainstorm_data.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_resample.py": { + "a": 6, + "d": 6 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 2, + "d": 5 + }, + "mne/baseline.py": { + "a": 2, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 41, + "d": 29 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 5, + "d": 6 + }, + "mne/cov.py": { + "a": 13, + "d": 17 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_ems.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 29, + "d": 28 + }, + "mne/evoked.py": { + "a": 7, + "d": 4 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 14, + "d": 14 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 39, + "d": 57 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 43, + "d": 43 + }, + "mne/io/matrix.py": { + "a": 8, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 6, + "d": 9 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 5 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_reference.py": { + "a": 9, + "d": 10 + }, + "mne/label.py": { + "a": 12, + "d": 10 + }, + "mne/minimum_norm/inverse.py": { + "a": 17, + "d": 12 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 15, + "d": 27 + }, + "mne/preprocessing/stim.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 13, + "d": 13 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 13, + "d": 12 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 3, + "d": 3 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 4 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 32, + "d": 33 + }, + "mne/tests/test_evoked.py": { + "a": 9, + "d": 9 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 14, + "d": 6 + }, + "mne/utils.py": { + "a": 25, + "d": 6 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 5, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 3 + }, + "mne/viz/tests/test_topo.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 4, + "d": 6 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_raw.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/314.json b/doc/sphinxext/prs/314.json new file mode 100644 index 00000000000..89670ccd404 --- /dev/null +++ b/doc/sphinxext/prs/314.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "17dc87d5fb2bf5c00fbca5d9e9cbda757ce4611d", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/_templates/layout.html": { + "a": 17, + "d": 0 + }, + "doc/source/conf.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3141.json b/doc/sphinxext/prs/3141.json new file mode 100644 index 00000000000..709d75d38dd --- /dev/null +++ b/doc/sphinxext/prs/3141.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e0f8b1134aff9278721951629b97e1c5059635a0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 25, + "d": 15 + }, + "mne/source_space.py": { + "a": 34, + "d": 15 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3143.json b/doc/sphinxext/prs/3143.json new file mode 100644 index 00000000000..f069ae4abbb --- /dev/null +++ b/doc/sphinxext/prs/3143.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "767d84b0f73cbfefa94b400287f8579f970b85f9", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 19, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 133, + "d": 59 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3144.json b/doc/sphinxext/prs/3144.json new file mode 100644 index 00000000000..c7bf80b3a58 --- /dev/null +++ b/doc/sphinxext/prs/3144.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "40b668f8ea6d0d4412f586493d98ebd071a5081c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 41, + "d": 5 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 11, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3145.json b/doc/sphinxext/prs/3145.json new file mode 100644 index 00000000000..e759c1cee51 --- /dev/null +++ b/doc/sphinxext/prs/3145.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e48d5e92b8a03b8636f7930e78f32d9167b9812", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3146.json b/doc/sphinxext/prs/3146.json new file mode 100644 index 00000000000..492fc2d6615 --- /dev/null +++ b/doc/sphinxext/prs/3146.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "1c2791dc6447678957f3c00b1f44d4dd6f600cd0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/faq.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/appendix/c_release_notes.rst": { + "a": 4, + "d": 6 + }, + "doc/manual/c_reference.rst": { + "a": 17, + "d": 17 + }, + "doc/manual/channel_interpolation.rst": { + "a": 2, + "d": 1 + }, + "doc/manual/decoding.rst": { + "a": 2, + "d": 1 + }, + "doc/manual/gui/analyze.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/gui/browse.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/index.rst": { + "a": 0, + "d": 1 + }, + "doc/manual/io.rst": { + "a": 6, + "d": 8 + }, + "doc/manual/matlab.rst": { + "a": 5, + "d": 6 + }, + "doc/manual/memory.rst": { + "a": 2, + "d": 1 + }, + "doc/manual/pitfalls.rst": { + "a": 21, + "d": 5 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/sample_dataset.rst": { + "a": 4, + "d": 3 + }, + "doc/manual/source_localization/covariance.rst": { + "a": 0, + "d": 5 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 49, + "d": 33 + }, + "doc/tutorials.rst": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 0, + "d": 84 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 1 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_compute_covariance.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_eeg_erp.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_info.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_raw.py": { + "a": 8, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3147.json b/doc/sphinxext/prs/3147.json new file mode 100644 index 00000000000..9064197218f --- /dev/null +++ b/doc/sphinxext/prs/3147.json @@ -0,0 +1,147 @@ +{ + "merge_commit_sha": "c5a52b496a19592ec53c153a4e861913eae4ff77", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 0, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_layout.py": { + "a": 22, + "d": 42 + }, + "mne/dipole.py": { + "a": 2, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 0, + "d": 1 + }, + "mne/io/base.py": { + "a": 5, + "d": 5 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 20, + "d": 170 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 5, + "d": 3 + }, + "mne/io/reference.py": { + "a": 3, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 5, + "d": 50 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3148.json b/doc/sphinxext/prs/3148.json new file mode 100644 index 00000000000..40601006118 --- /dev/null +++ b/doc/sphinxext/prs/3148.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "604cf50d6e7a65d80f7795e78557392587a1c7d5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/selection.py": { + "a": 61, + "d": 56 + }, + "mne/tests/test_selection.py": { + "a": 27, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3150.json b/doc/sphinxext/prs/3150.json new file mode 100644 index 00000000000..dab23355f7b --- /dev/null +++ b/doc/sphinxext/prs/3150.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "5bfde4dc14e643e30a009316437ee424c77b6b9d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 6, + "d": 6 + }, + "mne/cov.py": { + "a": 3, + "d": 3 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 4, + "d": 4 + }, + "tutorials/plot_info.py": { + "a": 4, + "d": 4 + }, + "tutorials/plot_object_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_raw.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3151.json b/doc/sphinxext/prs/3151.json new file mode 100644 index 00000000000..e2e50cb500a --- /dev/null +++ b/doc/sphinxext/prs/3151.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a9c5d3850fa5e8a35ef7817d4e7b6549484348d4", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/evoked.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3152.json b/doc/sphinxext/prs/3152.json new file mode 100644 index 00000000000..2ac7483856f --- /dev/null +++ b/doc/sphinxext/prs/3152.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "bbaa5a80b4ebdfc99a942b87e670859c23a384f1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 4, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 0, + "d": 2 + }, + "examples/README.txt": { + "a": 0, + "d": 3 + }, + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 0, + "d": 55 + }, + "examples/forward/plot_bem_contour_mri.py": { + "a": 0, + "d": 27 + }, + "examples/forward/plot_coregistration_transform.py": { + "a": 0, + "d": 31 + }, + "examples/io/README.txt": { + "a": 2, + "d": 1 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 5, + "d": 4 + }, + "examples/io/plot_read_evoked.py": { + "a": 5, + "d": 0 + }, + "examples/plot_compute_mne_inverse.py": { + "a": 0, + "d": 60 + }, + "examples/plot_extract_events_from_raw.py": { + "a": 0, + "d": 40 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 0, + "d": 77 + }, + "examples/preprocessing/plot_corrmap_detection.py": { + "a": 0, + "d": 74 + }, + "examples/preprocessing/plot_estimate_covariance_matrix_baseline.py": { + "a": 0, + "d": 55 + }, + "examples/preprocessing/plot_estimate_covariance_matrix_raw.py": { + "a": 0, + "d": 38 + }, + "examples/time_frequency/plot_stockwell.py": { + "a": 0, + "d": 50 + }, + "examples/time_frequency/plot_time_frequency_multitaper_sensors.py": { + "a": 0, + "d": 55 + }, + "examples/visualization/plot_evoked_erf_erp.py": { + "a": 0, + "d": 51 + }, + "examples/visualization/plot_joint_evoked_topomap.py": { + "a": 0, + "d": 34 + }, + "examples/visualization/plot_meg_eeg_fields_3d.py": { + "a": 0, + "d": 46 + }, + "examples/visualization/plot_ssp_projs_topomaps.py": { + "a": 0, + "d": 31 + }, + "examples/visualization/plot_topo_channel_epochs_image.py": { + "a": 0, + "d": 58 + }, + "examples/visualization/plot_topography.py": { + "a": 0, + "d": 33 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 5, + "d": 5 + }, + "tutorials/plot_artifacts_detection.py": { + "a": 56, + "d": 1 + }, + "tutorials/plot_artifacts_intro.py": { + "a": 0, + "d": 60 + }, + "tutorials/plot_compute_covariance.py": { + "a": 16, + "d": 7 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 9, + "d": 1 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 29, + "d": 11 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 3, + "d": 0 + }, + "tutorials/plot_stats_cluster_methods.py": { + "a": 16, + "d": 8 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 30, + "d": 20 + }, + "tutorials/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 6, + "d": 3 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 52, + "d": 38 + }, + "tutorials/plot_stats_cluster_time_frequency.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 49, + "d": 31 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 17, + "d": 14 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 35, + "d": 30 + }, + "tutorials/plot_visualize_raw.py": { + "a": 21, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3154.json b/doc/sphinxext/prs/3154.json new file mode 100644 index 00000000000..5ffca38642c --- /dev/null +++ b/doc/sphinxext/prs/3154.json @@ -0,0 +1,179 @@ +{ + "merge_commit_sha": "b9f34a994136c81b10b91bf1b3afc1c96e2d1ead", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 8, + "d": 0 + }, + "Makefile": { + "a": 8, + "d": 4 + }, + "dictionary.txt": { + "a": 254, + "d": 19 + }, + "doc/manual/c_reference.rst": { + "a": 3, + "d": 3 + }, + "doc/manual/io.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/preprocessing/maxwell.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/time_frequency.rst": { + "a": 3, + "d": 3 + }, + "doc/references.rst": { + "a": 7, + "d": 7 + }, + "doc/tutorials/seven_stories_about_mne.rst": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_resample.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 4, + "d": 4 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 2 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/infomax_.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/xdawn.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_forward.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3155.json b/doc/sphinxext/prs/3155.json new file mode 100644 index 00000000000..c5f290450b1 --- /dev/null +++ b/doc/sphinxext/prs/3155.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f7c9eead4c0846ef997c22d79a2ae07092652313", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 56, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3157.json b/doc/sphinxext/prs/3157.json new file mode 100644 index 00000000000..7b53bb88f13 --- /dev/null +++ b/doc/sphinxext/prs/3157.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "f05bd2a2563c96eb92f47a0225e0abf1a5ddeedc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 17, + "d": 14 + }, + "mne/channels/tests/test_channels.py": { + "a": 10, + "d": 6 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 14, + "d": 7 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/time_gen.py": { + "a": 2, + "d": 4 + }, + "mne/defaults.py": { + "a": 7, + "d": 6 + }, + "mne/epochs.py": { + "a": 7, + "d": 9 + }, + "mne/evoked.py": { + "a": 11, + "d": 13 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 2 + }, + "mne/io/pick.py": { + "a": 27, + "d": 18 + }, + "mne/io/tests/test_pick.py": { + "a": 36, + "d": 22 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 11 + }, + "mne/tests/test_epochs.py": { + "a": 10, + "d": 7 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3158.json b/doc/sphinxext/prs/3158.json new file mode 100644 index 00000000000..56fdb68cabb --- /dev/null +++ b/doc/sphinxext/prs/3158.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0a181054a2dfa02ea2f9fa2e6a28f46989a3face", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 0, + "d": 1 + }, + "mne/forward/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 6 + }, + "mne/forward/forward.py": { + "a": 90, + "d": 17 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 14, + "d": 88 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3159.json b/doc/sphinxext/prs/3159.json new file mode 100644 index 00000000000..3bfd4475665 --- /dev/null +++ b/doc/sphinxext/prs/3159.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3f53de69ba7e2354bf304e25196bb2556fc5863a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 10, + "d": 2 + }, + "mne/dipole.py": { + "a": 16, + "d": 5 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 35, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/316.json b/doc/sphinxext/prs/316.json new file mode 100644 index 00000000000..fa889de0f6e --- /dev/null +++ b/doc/sphinxext/prs/316.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4a886f6e8a7fd46bf7f990bbad2b786755555a1e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 13, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 123, + "d": 20 + }, + "mne/fiff/tests/test_raw.py": { + "a": 38, + "d": 14 + }, + "mne/filter.py": { + "a": 324, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 59, + "d": 7 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3161.json b/doc/sphinxext/prs/3161.json new file mode 100644 index 00000000000..d77c4e5b092 --- /dev/null +++ b/doc/sphinxext/prs/3161.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b5a7c26f8a843d615dbb63ed3b13aae7d65fde2e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/stats/plot_linear_regression_raw.py": { + "a": 22, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3162.json b/doc/sphinxext/prs/3162.json new file mode 100644 index 00000000000..b672f779bfa --- /dev/null +++ b/doc/sphinxext/prs/3162.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "95355592de583a9758b4531ede4f12a88d543a2a", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 42, + "d": 25 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 81, + "d": 57 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3165.json b/doc/sphinxext/prs/3165.json new file mode 100644 index 00000000000..a16aa0bd095 --- /dev/null +++ b/doc/sphinxext/prs/3165.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4badef68fdb60412f0bdbda2d8f3d2a426e32c58", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 8, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 40, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3166.json b/doc/sphinxext/prs/3166.json new file mode 100644 index 00000000000..598375c9e79 --- /dev/null +++ b/doc/sphinxext/prs/3166.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d170cd108b11aa828696c2b50c3f5ccaef09bc7c", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_eeg_erp.py": { + "a": 64, + "d": 1 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 12, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 24, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3167.json b/doc/sphinxext/prs/3167.json new file mode 100644 index 00000000000..9071b240c24 --- /dev/null +++ b/doc/sphinxext/prs/3167.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b25fbf28a83ea45021933d891a74f02b4bdfe5a9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/getting_started.rst": { + "a": 3, + "d": 3 + }, + "tutorials/plot_introduction.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3171.json b/doc/sphinxext/prs/3171.json new file mode 100644 index 00000000000..913ab03e483 --- /dev/null +++ b/doc/sphinxext/prs/3171.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0ac35dc88f4711fbd54677025dd15a0627b18b39", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3172.json b/doc/sphinxext/prs/3172.json new file mode 100644 index 00000000000..43f7ae3a73c --- /dev/null +++ b/doc/sphinxext/prs/3172.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "87d397fdb7098be195ad93c39fffc15943bce0d6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 49, + "d": 12 + }, + "doc/manual/pitfalls.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3174.json b/doc/sphinxext/prs/3174.json new file mode 100644 index 00000000000..37ee80c4d65 --- /dev/null +++ b/doc/sphinxext/prs/3174.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "ae040b3a1045c2fed73458ad0b8da6c919c533e2", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/faq.rst": { + "a": 11, + "d": 1 + }, + "doc/install_mne_python.rst": { + "a": 25, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 87, + "d": 111 + }, + "doc/python_reference.rst": { + "a": 9, + "d": 5 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 8, + "d": 4 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 17, + "d": 11 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 24, + "d": 24 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3176.json b/doc/sphinxext/prs/3176.json new file mode 100644 index 00000000000..975da6e5e05 --- /dev/null +++ b/doc/sphinxext/prs/3176.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "47304def9521ca29ae4835d5ef591e1d8fce7872", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_epoching_and_averaging.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3177.json b/doc/sphinxext/prs/3177.json new file mode 100644 index 00000000000..295dd81a76e --- /dev/null +++ b/doc/sphinxext/prs/3177.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "831c0ba4089ad67367777f4ef1b27853584679e4", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_artifacts_correction_maxwell_filtering.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 9, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3178.json b/doc/sphinxext/prs/3178.json new file mode 100644 index 00000000000..4ba15655ba0 --- /dev/null +++ b/doc/sphinxext/prs/3178.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "393c2c81de10128f3b6f4aff26727f681fcb29fb", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/manual/migrating.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/318.json b/doc/sphinxext/prs/318.json new file mode 100644 index 00000000000..ed8fae8836e --- /dev/null +++ b/doc/sphinxext/prs/318.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "39e67d2543425a65ffd79e0f6c04841d1c8f2c5c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 4, + "d": 5 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 10, + "d": 11 + }, + "mne/fiff/constants.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 43, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 20, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3181.json b/doc/sphinxext/prs/3181.json new file mode 100644 index 00000000000..bcf234efee1 --- /dev/null +++ b/doc/sphinxext/prs/3181.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ba0b3eff94e019c69fb063c04ccd50601413dd91", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3182.json b/doc/sphinxext/prs/3182.json new file mode 100644 index 00000000000..e8b956336b3 --- /dev/null +++ b/doc/sphinxext/prs/3182.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4bf30daf575bdaf3c6155ce552d171a5bf3b3e72", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3185.json b/doc/sphinxext/prs/3185.json new file mode 100644 index 00000000000..550f3880974 --- /dev/null +++ b/doc/sphinxext/prs/3185.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "80fadbc7d5e586d90a14cc60ae82b602eee04dd5", + "authors": [ + { + "n": "cmoutard", + "e": "clement.moutard@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 39, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 87, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3188.json b/doc/sphinxext/prs/3188.json new file mode 100644 index 00000000000..bd6e4a2fb61 --- /dev/null +++ b/doc/sphinxext/prs/3188.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7cf5cd4d3c68eb8d2bee417a5826252ba67c5b2b", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 6, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3191.json b/doc/sphinxext/prs/3191.json new file mode 100644 index 00000000000..3bda0e1c8d5 --- /dev/null +++ b/doc/sphinxext/prs/3191.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1795fbd55533fd75c856a928cd02fd243437cc4e", + "authors": [ + { + "n": "Mark Wronkiewicz", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 131, + "d": 30 + }, + "mne/tests/test_label.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3194.json b/doc/sphinxext/prs/3194.json new file mode 100644 index 00000000000..8010fd74782 --- /dev/null +++ b/doc/sphinxext/prs/3194.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3a7cf4201535a061dbf3dad9f8ded23729886bb0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 12, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3195.json b/doc/sphinxext/prs/3195.json new file mode 100644 index 00000000000..cb877258dbf --- /dev/null +++ b/doc/sphinxext/prs/3195.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5064139dc838ebe02f366b73dcee357eb77f8421", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/event.py": { + "a": 18, + "d": 5 + }, + "mne/tests/test_event.py": { + "a": 13, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3197.json b/doc/sphinxext/prs/3197.json new file mode 100644 index 00000000000..e48ad4a3a29 --- /dev/null +++ b/doc/sphinxext/prs/3197.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5a71c715331b590c2c9d95233e3bd38b972a6dce", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 12, + "d": 5 + }, + "mne/tests/test_event.py": { + "a": 15, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3199.json b/doc/sphinxext/prs/3199.json new file mode 100644 index 00000000000..8f542130c97 --- /dev/null +++ b/doc/sphinxext/prs/3199.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d3705ff657fe1b0e0b0eb9b4c0c4cb2cc60fbdc8", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 19, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 41, + "d": 9 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/32.json b/doc/sphinxext/prs/32.json new file mode 100644 index 00000000000..fc2b0a73143 --- /dev/null +++ b/doc/sphinxext/prs/32.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3877ecb995794504094120a7c3f073d4620e24ee", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/320.json b/doc/sphinxext/prs/320.json new file mode 100644 index 00000000000..244a1f24e06 --- /dev/null +++ b/doc/sphinxext/prs/320.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "7e1026beff25b29128e6c3ef6fd1160d9b6caee6", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 4, + "d": 4 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 15 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3202.json b/doc/sphinxext/prs/3202.json new file mode 100644 index 00000000000..98bc8575812 --- /dev/null +++ b/doc/sphinxext/prs/3202.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c93586e339393474d2fbc6f68ae43e88d1826551", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 8, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 11, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 37, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3203.json b/doc/sphinxext/prs/3203.json new file mode 100644 index 00000000000..a5f9bfc336d --- /dev/null +++ b/doc/sphinxext/prs/3203.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f932004db951abaf7d0522f961ca9a3be6e72167", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 17, + "d": 9 + }, + "mne/viz/tests/test_raw.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3205.json b/doc/sphinxext/prs/3205.json new file mode 100644 index 00000000000..a16f2794dcb --- /dev/null +++ b/doc/sphinxext/prs/3205.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "577d5fa2f244b83dc4bf30f3a46b1c5022969ef4", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/io/plot_elekta_epochs.py": { + "a": 68, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/multimodal/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 30, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 8, + "d": 2 + }, + "mne/event.py": { + "a": 457, + "d": 2 + }, + "mne/tests/test_event.py": { + "a": 81, + "d": 3 + }, + "mne/utils.py": { + "a": 5, + "d": 6 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3206.json b/doc/sphinxext/prs/3206.json new file mode 100644 index 00000000000..9a4d957006c --- /dev/null +++ b/doc/sphinxext/prs/3206.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3b182bf67002ed51bed2a917bbc475162a3836eb", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 7, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3207.json b/doc/sphinxext/prs/3207.json new file mode 100644 index 00000000000..7236892024b --- /dev/null +++ b/doc/sphinxext/prs/3207.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "747e1cb3873322adc5a7443ae7eb639577a06a00", + "authors": [ + { + "n": "pbnsilva", + "e": "pedrobnsilva@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3208.json b/doc/sphinxext/prs/3208.json new file mode 100644 index 00000000000..962c47eabcb --- /dev/null +++ b/doc/sphinxext/prs/3208.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9108028e91255e1fa9ab255c25929ccada1a0cb6", + "authors": [ + { + "n": "MartinBaBer", + "e": "Martinb.nmb@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3209.json b/doc/sphinxext/prs/3209.json new file mode 100644 index 00000000000..16b3dfc3acd --- /dev/null +++ b/doc/sphinxext/prs/3209.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0271393994bf44d6a2489b078912733b62de1bee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/plot_object_epochs.py": { + "a": 11, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/321.json b/doc/sphinxext/prs/321.json new file mode 100644 index 00000000000..4c1a513af50 --- /dev/null +++ b/doc/sphinxext/prs/321.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "87a851d459d44ee91a9fdc46c80e4d90035a821f", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 48, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3211.json b/doc/sphinxext/prs/3211.json new file mode 100644 index 00000000000..b6c3c5a5026 --- /dev/null +++ b/doc/sphinxext/prs/3211.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1edafe34f02c14cda0d4fd28942c56d2871f9135", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 8 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 34, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3213.json b/doc/sphinxext/prs/3213.json new file mode 100644 index 00000000000..c4ace65e567 --- /dev/null +++ b/doc/sphinxext/prs/3213.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "23aed0a07ba2cfa84452107dd005750cd4aba9b9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 56, + "d": 38 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 10, + "d": 6 + }, + "mne/io/fiff/raw.py": { + "a": 31, + "d": 23 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 69, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3215.json b/doc/sphinxext/prs/3215.json new file mode 100644 index 00000000000..6904c7d5382 --- /dev/null +++ b/doc/sphinxext/prs/3215.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "76245d0bc41170e1e02c0410659121eedaffe135", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 6, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 8, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3216.json b/doc/sphinxext/prs/3216.json new file mode 100644 index 00000000000..040e143080d --- /dev/null +++ b/doc/sphinxext/prs/3216.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ee8ac19d81650426735be88bda023dd82e1f92bf", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/viz/tests/test_utils.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3217.json b/doc/sphinxext/prs/3217.json new file mode 100644 index 00000000000..ac8b3163198 --- /dev/null +++ b/doc/sphinxext/prs/3217.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6248b46f9e41dc3fbdbad4f36e407f046aaa5a94", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3218.json b/doc/sphinxext/prs/3218.json new file mode 100644 index 00000000000..a6f98893665 --- /dev/null +++ b/doc/sphinxext/prs/3218.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "f533b636d2c2428d00e9c0e495a4353386828423", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 18, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/bem.py": { + "a": 2, + "d": 8 + }, + "mne/channels/channels.py": { + "a": 20, + "d": 40 + }, + "mne/chpi.py": { + "a": 1, + "d": 78 + }, + "mne/cov.py": { + "a": 9, + "d": 18 + }, + "mne/epochs.py": { + "a": 41, + "d": 85 + }, + "mne/evoked.py": { + "a": 9, + "d": 19 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 82 + }, + "mne/io/base.py": { + "a": 4, + "d": 63 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 28 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 0, + "d": 9 + }, + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 9 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 6 + }, + "mne/tests/test_bem.py": { + "a": 2, + "d": 5 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 27 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 5 + }, + "mne/time_frequency/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 57 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 173 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 71, + "d": 82 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 12 + }, + "mne/utils.py": { + "a": 8, + "d": 11 + }, + "mne/viz/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/322.json b/doc/sphinxext/prs/322.json new file mode 100644 index 00000000000..a3f886519f9 --- /dev/null +++ b/doc/sphinxext/prs/322.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ccf9522e79dd5333b83e20dd96c0f0126189584b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3222.json b/doc/sphinxext/prs/3222.json new file mode 100644 index 00000000000..9b80522eb5a --- /dev/null +++ b/doc/sphinxext/prs/3222.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "aa33645ada61a3d3f808c61fe7da1f1eac8d849c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/baseline.py": { + "a": 19, + "d": 8 + }, + "mne/epochs.py": { + "a": 36, + "d": 16 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3224.json b/doc/sphinxext/prs/3224.json new file mode 100644 index 00000000000..099fc0bba9a --- /dev/null +++ b/doc/sphinxext/prs/3224.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a7525730809a7f591850a97039afa561730b7146", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/class.rst": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/numpy_ext/docscrape.py": { + "a": 3, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 22, + "d": 1 + }, + "mne/dipole.py": { + "a": 39, + "d": 16 + }, + "mne/epochs.py": { + "a": 82, + "d": 37 + }, + "mne/io/base.py": { + "a": 50, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3228.json b/doc/sphinxext/prs/3228.json new file mode 100644 index 00000000000..8fe73303755 --- /dev/null +++ b/doc/sphinxext/prs/3228.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5e759b43b5c8a5aa69a4678b415efae77627ebc1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 3 + }, + "codecov.yml": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/323.json b/doc/sphinxext/prs/323.json new file mode 100644 index 00000000000..fe4b6512b02 --- /dev/null +++ b/doc/sphinxext/prs/323.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cf18e25d167169f40b598aced1a9bb56a86cf3d7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3230.json b/doc/sphinxext/prs/3230.json new file mode 100644 index 00000000000..46adc225016 --- /dev/null +++ b/doc/sphinxext/prs/3230.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "74d85eca4e43aa542c2fce0d1d348a04e5658196", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 9, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3231.json b/doc/sphinxext/prs/3231.json new file mode 100644 index 00000000000..16c2762cf8c --- /dev/null +++ b/doc/sphinxext/prs/3231.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8cb4ffbf3b9194b39b316f0247146a31f47e2f44", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 5, + "d": 6 + }, + "codecov.yml": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3233.json b/doc/sphinxext/prs/3233.json new file mode 100644 index 00000000000..8ab1253f74a --- /dev/null +++ b/doc/sphinxext/prs/3233.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0a861bf8fcbd3eb833cba9fc9468a2fd877532a9", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "mne/surface.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3234.json b/doc/sphinxext/prs/3234.json new file mode 100644 index 00000000000..18902091c42 --- /dev/null +++ b/doc/sphinxext/prs/3234.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5fdb8b0c6795900e022e449596617d2f2560ebc6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_epochs.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3235.json b/doc/sphinxext/prs/3235.json new file mode 100644 index 00000000000..286d2358269 --- /dev/null +++ b/doc/sphinxext/prs/3235.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "60a7440269b12bb5c44bd246e90af80dd8551cf2", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 68, + "d": 20 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3236.json b/doc/sphinxext/prs/3236.json new file mode 100644 index 00000000000..a6c3ea8f39a --- /dev/null +++ b/doc/sphinxext/prs/3236.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ab461ad72a7530d5161d10e7d68d12ec9dba27ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 34, + "d": 73 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3237.json b/doc/sphinxext/prs/3237.json new file mode 100644 index 00000000000..15a24882e2a --- /dev/null +++ b/doc/sphinxext/prs/3237.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "b86ba4a6f4d6378a6499adfd918f20385a73becc", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 17, + "d": 2 + }, + "mne/data/mne_analyze.sel": { + "a": 7, + "d": 1 + }, + "mne/io/base.py": { + "a": 8, + "d": 4 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 0 + }, + "mne/selection.py": { + "a": 68, + "d": 2 + }, + "mne/utils.py": { + "a": 5, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 189, + "d": 46 + }, + "mne/viz/tests/test_raw.py": { + "a": 23, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 225, + "d": 35 + }, + "tutorials/plot_visualize_raw.py": { + "a": 33, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3238.json b/doc/sphinxext/prs/3238.json new file mode 100644 index 00000000000..067d791f536 --- /dev/null +++ b/doc/sphinxext/prs/3238.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "251ec96443f560c3fdac752f1ad36788eed960ba", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 20, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 5, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 0, + "d": 12 + }, + "mne/io/meas_info.py": { + "a": 30, + "d": 8 + }, + "mne/io/tests/test_meas_info.py": { + "a": 43, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/324.json b/doc/sphinxext/prs/324.json new file mode 100644 index 00000000000..d0d17971f20 --- /dev/null +++ b/doc/sphinxext/prs/324.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "929b10daea23e4e6d23bd736f9460ed6a95230c8", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 311, + "d": 138 + }, + "mne/epochs.py": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3241.json b/doc/sphinxext/prs/3241.json new file mode 100644 index 00000000000..a4aad779de5 --- /dev/null +++ b/doc/sphinxext/prs/3241.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e51fa95b309b13de1f242fde49f25d53361900b3", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/manual/appendix/bem_model.rst": { + "a": 8, + "d": 2 + }, + "mne/bem.py": { + "a": 15, + "d": 2 + }, + "mne/commands/mne_flash_bem.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3245.json b/doc/sphinxext/prs/3245.json new file mode 100644 index 00000000000..a579ce4bd37 --- /dev/null +++ b/doc/sphinxext/prs/3245.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3efd6f8b411571c481f45d03028c6fb9b83d43b1", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_xdawn_transformer.py": { + "a": 48, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_xdawn.py": { + "a": 97, + "d": 0 + }, + "mne/decoding/xdawn.py": { + "a": 174, + "d": 0 + }, + "mne/preprocessing/xdawn.py": { + "a": 164, + "d": 128 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3246.json b/doc/sphinxext/prs/3246.json new file mode 100644 index 00000000000..e6ed3e6e011 --- /dev/null +++ b/doc/sphinxext/prs/3246.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e55b6f3f0a6beb2273592be3404ee4ccb3659e2f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 69, + "d": 18 + }, + "mne/tests/test_dipole.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3250.json b/doc/sphinxext/prs/3250.json new file mode 100644 index 00000000000..c96cfed1452 --- /dev/null +++ b/doc/sphinxext/prs/3250.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f04d1460d10ebe3b9466ccad9c077040d42a6d5f", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3251.json b/doc/sphinxext/prs/3251.json new file mode 100644 index 00000000000..10534f3820e --- /dev/null +++ b/doc/sphinxext/prs/3251.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2cacb24bedc0ac09077f71c7637cb7d707800268", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/evoked.py": { + "a": 15, + "d": 7 + }, + "mne/viz/evoked.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3253.json b/doc/sphinxext/prs/3253.json new file mode 100644 index 00000000000..174185446e6 --- /dev/null +++ b/doc/sphinxext/prs/3253.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bacd4adfa5645827784e2d34ab80c33d129d98a6", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 6 + }, + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3254.json b/doc/sphinxext/prs/3254.json new file mode 100644 index 00000000000..aeec60562d9 --- /dev/null +++ b/doc/sphinxext/prs/3254.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "63feb683cd1f8ddd598a78d12c8ef522f9ca2d78", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 7, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 9, + "d": 3 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/326.json b/doc/sphinxext/prs/326.json new file mode 100644 index 00000000000..3706bcd68a4 --- /dev/null +++ b/doc/sphinxext/prs/326.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0be6f829b6157d1b94dbca119dde5c69a69ea9a8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ecg_artifacts_from_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_eog_artifacts_from_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 9, + "d": 10 + }, + "mne/preprocessing/ica.py": { + "a": 83, + "d": 63 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 39, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3261.json b/doc/sphinxext/prs/3261.json new file mode 100644 index 00000000000..793d06083cd --- /dev/null +++ b/doc/sphinxext/prs/3261.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0fff718f956ad86fb8b9154f87e6414c64c758c4", + "authors": [ + { + "n": "Pablo-Arias", + "e": "pablo12co@hotmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 34, + "d": 16 + }, + "mne/io/brainvision/tests/data/test.vhdr": { + "a": 10, + "d": 10 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 10, + "d": 1 + }, + "mne/io/constants.py": { + "a": 25, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3266.json b/doc/sphinxext/prs/3266.json new file mode 100644 index 00000000000..03e2d3e8211 --- /dev/null +++ b/doc/sphinxext/prs/3266.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bfb3f81e4813f4d65af8c3d84933ee08a94b9d09", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/externals/tempita/_looper.py": { + "a": 2, + "d": 2 + }, + "setup.cfg": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3270.json b/doc/sphinxext/prs/3270.json new file mode 100644 index 00000000000..b140810fcc1 --- /dev/null +++ b/doc/sphinxext/prs/3270.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f5b226b3298bccbfe412f2ac5b7d83e762ef2e6f", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 37, + "d": 7 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 62, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3272.json b/doc/sphinxext/prs/3272.json new file mode 100644 index 00000000000..2464f91fbeb --- /dev/null +++ b/doc/sphinxext/prs/3272.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a1cbb6bf96612dec707e19876bef492f9f5d12d0", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "mne/channels/tests/test_interpolation.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3273.json b/doc/sphinxext/prs/3273.json new file mode 100644 index 00000000000..23a415e2da3 --- /dev/null +++ b/doc/sphinxext/prs/3273.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "6be10dbd4462874acabad42eab48a277754ff6f0", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 73, + "d": 25 + }, + "mne/commands/mne_flash_bem.py": { + "a": 1, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 58, + "d": 0 + }, + "mne/surface.py": { + "a": 139, + "d": 15 + }, + "mne/tests/test_bem.py": { + "a": 46, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 6, + "d": 3 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3275.json b/doc/sphinxext/prs/3275.json new file mode 100644 index 00000000000..a1ea8b18575 --- /dev/null +++ b/doc/sphinxext/prs/3275.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "6f69b79fea6c2af992a9a1efecaf5d9a8d8b2688", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 27, + "d": 1 + }, + "mne/event.py": { + "a": 9, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 53, + "d": 0 + }, + "mne/tests/test_event.py": { + "a": 5, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 24, + "d": 6 + }, + "mne/viz/ica.py": { + "a": 249, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 55, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 21, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 50, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 26, + "d": 0 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 35, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3277.json b/doc/sphinxext/prs/3277.json new file mode 100644 index 00000000000..f428dfdbc49 --- /dev/null +++ b/doc/sphinxext/prs/3277.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "97b38c9a75c06ffaecf5eef588a689899f86ded2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 2 + }, + "circle.yml": { + "a": 3, + "d": 3 + }, + "doc/conf.py": { + "a": 6, + "d": 1 + }, + "doc/faq.rst": { + "a": 1, + "d": 2 + }, + "doc/manual/cookbook.rst": { + "a": 0, + "d": 2 + }, + "doc/manual/io.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 1 + }, + "doc/tutorials.rst": { + "a": 11, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 24, + "d": 23 + }, + "mne/filter.py": { + "a": 126, + "d": 5 + }, + "mne/io/base.py": { + "a": 31, + "d": 73 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 64, + "d": 57 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 2 + }, + "mne/stats/parametric.py": { + "a": 7, + "d": 5 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 6, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_background_filtering.py": { + "a": 396, + "d": 0 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_eeg_erp.py": { + "a": 1, + "d": 0 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_forward.py": { + "a": 3, + "d": 1 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 15, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3279.json b/doc/sphinxext/prs/3279.json new file mode 100644 index 00000000000..f5b39de4887 --- /dev/null +++ b/doc/sphinxext/prs/3279.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "81bdd0f2f96e70d0628bd47d3c81540d75e052cb", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/328.json b/doc/sphinxext/prs/328.json new file mode 100644 index 00000000000..38f84ca5126 --- /dev/null +++ b/doc/sphinxext/prs/328.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f3ae96e77cecd6decf887e2186f0fa62732c668c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/mixed_norm/tests/test_inverse.py": { + "a": 9, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3289.json b/doc/sphinxext/prs/3289.json new file mode 100644 index 00000000000..c03d013201f --- /dev/null +++ b/doc/sphinxext/prs/3289.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5e7f9e22679ca1f88972701ac403bb48ec7817f9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 19, + "d": 27 + }, + "mne/evoked.py": { + "a": 67, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 39, + "d": 5 + }, + "mne/tests/test_evoked.py": { + "a": 47, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/329.json b/doc/sphinxext/prs/329.json new file mode 100644 index 00000000000..2aa10b39b85 --- /dev/null +++ b/doc/sphinxext/prs/329.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "10fb256c66a3b75e1ac828554c0b720bfd9b14ae", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3290.json b/doc/sphinxext/prs/3290.json new file mode 100644 index 00000000000..f6271f21409 --- /dev/null +++ b/doc/sphinxext/prs/3290.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "a3ed1052e21b39c104915418890a6dbb7a5d823d", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 11, + "d": 3 + }, + "mne/decoding/base.py": { + "a": 28, + "d": 6 + }, + "mne/decoding/csp.py": { + "a": 28, + "d": 6 + }, + "mne/epochs.py": { + "a": 19, + "d": 5 + }, + "mne/evoked.py": { + "a": 23, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 21, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 41, + "d": 15 + }, + "mne/viz/epochs.py": { + "a": 23, + "d": 10 + }, + "mne/viz/evoked.py": { + "a": 27, + "d": 8 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 27, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 11, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 146, + "d": 50 + }, + "mne/viz/utils.py": { + "a": 102, + "d": 3 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3298.json b/doc/sphinxext/prs/3298.json new file mode 100644 index 00000000000..98d67e3612e --- /dev/null +++ b/doc/sphinxext/prs/3298.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9ac4a3c32964d77a41a4da9b70c8e92a4fc3e2ac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 6 + }, + "Makefile": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/33.json b/doc/sphinxext/prs/33.json new file mode 100644 index 00000000000..3ec8d0e0497 --- /dev/null +++ b/doc/sphinxext/prs/33.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ce3e0e99f4974eec09701dc239afe30948348dbc", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 75, + "d": 26 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/proj.py": { + "a": 16, + "d": 1 + }, + "mne/fiff/tests/data/test_raw-eve.fif": { + "a": 0, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 14, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3303.json b/doc/sphinxext/prs/3303.json new file mode 100644 index 00000000000..71e73ac75d2 --- /dev/null +++ b/doc/sphinxext/prs/3303.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "80f8f9165daad65ae534c66ff0b86725aa2712c8", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 3 + }, + "mne/io/brainvision/tests/data/test.vhdr": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3304.json b/doc/sphinxext/prs/3304.json new file mode 100644 index 00000000000..586ee35b7fd --- /dev/null +++ b/doc/sphinxext/prs/3304.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7ba03032d4d7fb70cbbe0816b21eaa97a953ced7", + "authors": [ + { + "n": "Michael", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3306.json b/doc/sphinxext/prs/3306.json new file mode 100644 index 00000000000..fc59eadff5b --- /dev/null +++ b/doc/sphinxext/prs/3306.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c0e73eeb959a636fb25a6f4ca550ca3111f05852", + "authors": [ + { + "n": "Camilo Lamus", + "e": null + } + ], + "changes": { + "mne/simulation/tests/test_evoked.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3309.json b/doc/sphinxext/prs/3309.json new file mode 100644 index 00000000000..08aaf40c697 --- /dev/null +++ b/doc/sphinxext/prs/3309.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "811e8fc8783507c166bbe079b4647ebfe01adc38", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 22, + "d": 7 + }, + "mne/io/base.py": { + "a": 8, + "d": 5 + }, + "mne/selection.py": { + "a": 9, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 27, + "d": 23 + }, + "mne/viz/tests/test_raw.py": { + "a": 37, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 173, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/331.json b/doc/sphinxext/prs/331.json new file mode 100644 index 00000000000..a2316bbdf84 --- /dev/null +++ b/doc/sphinxext/prs/331.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c51c7050ddce81ba8ce59ffc13b9c745c59a39b4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 5, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 8, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 3, + "d": 1 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3312.json b/doc/sphinxext/prs/3312.json new file mode 100644 index 00000000000..013cdec12cb --- /dev/null +++ b/doc/sphinxext/prs/3312.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5ecd19e4a696a07f294e8e7f89c0cda46ed1fb70", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 10, + "d": 1 + }, + "mne/utils.py": { + "a": 20, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3313.json b/doc/sphinxext/prs/3313.json new file mode 100644 index 00000000000..50ba941d408 --- /dev/null +++ b/doc/sphinxext/prs/3313.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7dc194df1b92ac54c8772c656ef55289afb80224", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 32, + "d": 8 + }, + "mne/epochs.py": { + "a": 13, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 40, + "d": 9 + }, + "mne/tests/test_epochs.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3314.json b/doc/sphinxext/prs/3314.json new file mode 100644 index 00000000000..1ff9171278d --- /dev/null +++ b/doc/sphinxext/prs/3314.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "ef1831679b0829518a5b889ccb74ac899ffba517", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 56, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 77, + "d": 55 + }, + "mne/source_space.py": { + "a": 32, + "d": 15 + }, + "mne/tests/test_label.py": { + "a": 52, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3315.json b/doc/sphinxext/prs/3315.json new file mode 100644 index 00000000000..dc387787483 --- /dev/null +++ b/doc/sphinxext/prs/3315.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8a7921ed7d9c89887ac707608b1871e638adbf1a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 5, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3316.json b/doc/sphinxext/prs/3316.json new file mode 100644 index 00000000000..b8186058f9c --- /dev/null +++ b/doc/sphinxext/prs/3316.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "81e8f6309be5f864a691f85ba3f767c13cd7f86d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/simulation/evoked.py": { + "a": 7, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 6, + "d": 3 + }, + "mne/simulation/tests/test_source.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3318.json b/doc/sphinxext/prs/3318.json new file mode 100644 index 00000000000..35fbd6731b5 --- /dev/null +++ b/doc/sphinxext/prs/3318.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bfeb3fbed17d8e4868699119daf2db1eb65fd8fe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 7, + "d": 6 + }, + "mne/simulation/evoked.py": { + "a": 6, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 7, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 101, + "d": 15 + }, + "mne/simulation/tests/test_source.py": { + "a": 35, + "d": 13 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3319.json b/doc/sphinxext/prs/3319.json new file mode 100644 index 00000000000..337c4c18c04 --- /dev/null +++ b/doc/sphinxext/prs/3319.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "83e95452fd1d0f2ded57481fd1a7312299011999", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/tests/test_transforms.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/332.json b/doc/sphinxext/prs/332.json new file mode 100644 index 00000000000..f7b1992ed74 --- /dev/null +++ b/doc/sphinxext/prs/332.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "683b93219a2027d8064b0b3bfa8f96b6518adc3c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 11, + "d": 4 + }, + "mne/fiff/tests/test_raw.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3320.json b/doc/sphinxext/prs/3320.json new file mode 100644 index 00000000000..ab04484f6e1 --- /dev/null +++ b/doc/sphinxext/prs/3320.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "727a8711bde1fc5fadabc02e0999379f7fb36593", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 87, + "d": 67 + }, + "mne/channels/tests/test_montage.py": { + "a": 18, + "d": 3 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 15, + "d": 16 + }, + "mne/gui/help/kit2fiff.json": { + "a": 4, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 3, + "d": 4 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/tests/data/test.elp": { + "a": 37, + "d": 0 + }, + "mne/io/kit/tests/data/test.hsp": { + "a": 514, + "d": 0 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 15, + "d": 0 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 41, + "d": 13 + }, + "mne/io/meas_info.py": { + "a": 34, + "d": 4 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/transforms.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3321.json b/doc/sphinxext/prs/3321.json new file mode 100644 index 00000000000..26b2ce34b04 --- /dev/null +++ b/doc/sphinxext/prs/3321.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "545816811e9bdcdbbd4ee950230a51fb891260bd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_show_fiff.py": { + "a": 6, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/open.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3322.json b/doc/sphinxext/prs/3322.json new file mode 100644 index 00000000000..0a0e083870e --- /dev/null +++ b/doc/sphinxext/prs/3322.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "61885e6c5db39b1147dd9450ebc0922b2aa77c0b", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3323.json b/doc/sphinxext/prs/3323.json new file mode 100644 index 00000000000..d060f4f5122 --- /dev/null +++ b/doc/sphinxext/prs/3323.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "08cc8b8bfca1f869df563e51766f4cf65bbcdbc3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 9, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3324.json b/doc/sphinxext/prs/3324.json new file mode 100644 index 00000000000..00e20a79ba9 --- /dev/null +++ b/doc/sphinxext/prs/3324.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "42678aa7ec1d0c5eda464e31072892915095ae39", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 4 + }, + "mne/filter.py": { + "a": 169, + "d": 69 + }, + "mne/fixes.py": { + "a": 246, + "d": 11 + }, + "mne/io/base.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 83, + "d": 17 + }, + "mne/tests/test_fixes.py": { + "a": 6, + "d": 3 + }, + "tutorials/plot_background_filtering.py": { + "a": 135, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3325.json b/doc/sphinxext/prs/3325.json new file mode 100644 index 00000000000..a37245233e0 --- /dev/null +++ b/doc/sphinxext/prs/3325.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "714ba90ec44cef321a6ffe4c9d9efd02ca9d89d6", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_forward.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3327.json b/doc/sphinxext/prs/3327.json new file mode 100644 index 00000000000..b8790a2fb35 --- /dev/null +++ b/doc/sphinxext/prs/3327.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b25bf9af35547d62a4dea7e359de00381a44661e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3329.json b/doc/sphinxext/prs/3329.json new file mode 100644 index 00000000000..fa0b5765f1b --- /dev/null +++ b/doc/sphinxext/prs/3329.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "64a9c91c6cab7c34fada862cb825a5cb6add51a7", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 10, + "d": 103 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3331.json b/doc/sphinxext/prs/3331.json new file mode 100644 index 00000000000..f9b060ff7ac --- /dev/null +++ b/doc/sphinxext/prs/3331.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a361dced7663c616ac1fd184f7eed183d2b71580", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3332.json b/doc/sphinxext/prs/3332.json new file mode 100644 index 00000000000..fe7a03de318 --- /dev/null +++ b/doc/sphinxext/prs/3332.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e5054176380023eb19849a7b0e729d0d33b1dcfb", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_coreg.py": { + "a": 15, + "d": 1 + }, + "mne/coreg.py": { + "a": 90, + "d": 42 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 100, + "d": 88 + }, + "mne/gui/_fiducials_gui.py": { + "a": 8, + "d": 16 + }, + "mne/gui/_file_traits.py": { + "a": 35, + "d": 12 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 60, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3333.json b/doc/sphinxext/prs/3333.json new file mode 100644 index 00000000000..bb9b64ce014 --- /dev/null +++ b/doc/sphinxext/prs/3333.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "51a6e8ad22315b3dd16ab6e42f0bf939659d8b7c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 19, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3334.json b/doc/sphinxext/prs/3334.json new file mode 100644 index 00000000000..8b0069840d7 --- /dev/null +++ b/doc/sphinxext/prs/3334.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0a52ab33fa889e02242f5003fcd5d5f6cd7fc838", + "authors": [ + { + "n": "Nick Foti", + "e": null + } + ], + "changes": { + "mne/externals/h5io/_h5io.py": { + "a": 157, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3335.json b/doc/sphinxext/prs/3335.json new file mode 100644 index 00000000000..7cb153639b9 --- /dev/null +++ b/doc/sphinxext/prs/3335.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "01fa5736e64d990994513430a48c7256f4c8edfc", + "authors": [ + { + "n": "Félix Raimundo", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 36, + "d": 3 + }, + "mne/io/tests/data/test-ave-2.log": { + "a": 2, + "d": 0 + }, + "mne/io/tests/data/test-ave.log": { + "a": 1, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3337.json b/doc/sphinxext/prs/3337.json new file mode 100644 index 00000000000..de33ef2672e --- /dev/null +++ b/doc/sphinxext/prs/3337.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "cd0300a12613b20052af79655e0a1a38a1ac5d06", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 8, + "d": 2 + }, + "doc/contributing.rst": { + "a": 216, + "d": 151 + }, + "doc/index.rst": { + "a": 2, + "d": 3 + }, + "doc/sphinxext/gen_commands.py": { + "a": 8, + "d": 5 + }, + "doc/sphinxext/numpy_ext/__init__.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext/docscrape.py": { + "a": 0, + "d": 514 + }, + "doc/sphinxext/numpy_ext/docscrape_sphinx.py": { + "a": 0, + "d": 240 + }, + "doc/sphinxext/numpy_ext/numpydoc.py": { + "a": 0, + "d": 192 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/time_gen.py": { + "a": 27, + "d": 29 + }, + "mne/decoding/transformer.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3339.json b/doc/sphinxext/prs/3339.json new file mode 100644 index 00000000000..7adf2420b1a --- /dev/null +++ b/doc/sphinxext/prs/3339.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf568d5676714263821d2ec97bdfe7f4e738bf11", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "doc/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3341.json b/doc/sphinxext/prs/3341.json new file mode 100644 index 00000000000..c10279f3352 --- /dev/null +++ b/doc/sphinxext/prs/3341.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a4ea969b41d0f58c047ad481fff69c9abd73bc23", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "doc/getting_started.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3342.json b/doc/sphinxext/prs/3342.json new file mode 100644 index 00000000000..52c1c71f74d --- /dev/null +++ b/doc/sphinxext/prs/3342.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bd3a8c16dca17435b70e572c2e5e29bf66a14ac2", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 6, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3343.json b/doc/sphinxext/prs/3343.json new file mode 100644 index 00000000000..33424213387 --- /dev/null +++ b/doc/sphinxext/prs/3343.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "93e369654c2b0f881d433ac4a80c00ff3121a964", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3345.json b/doc/sphinxext/prs/3345.json new file mode 100644 index 00000000000..213e1d87fb8 --- /dev/null +++ b/doc/sphinxext/prs/3345.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "46ae90465382af29e2aa52be8ae42dc3da905fcd", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3346.json b/doc/sphinxext/prs/3346.json new file mode 100644 index 00000000000..08615dedd51 --- /dev/null +++ b/doc/sphinxext/prs/3346.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "18836be178713f97e941827525501a80002b0d5a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3348.json b/doc/sphinxext/prs/3348.json new file mode 100644 index 00000000000..ce409b3920b --- /dev/null +++ b/doc/sphinxext/prs/3348.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1ff3f45a142754b17d9a437806c165a123ec3384", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/channels/data/neighbors/KIT-UMD_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/__init__.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3349.json b/doc/sphinxext/prs/3349.json new file mode 100644 index 00000000000..085fc2f5bba --- /dev/null +++ b/doc/sphinxext/prs/3349.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5d33ad298c409bee6b28e8688fe90391a3df063c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/channels/data/layouts/KIT-UMD.lout": { + "a": 158, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 38, + "d": 4 + }, + "mne/channels/tests/test_layout.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/335.json b/doc/sphinxext/prs/335.json new file mode 100644 index 00000000000..5da6102edfb --- /dev/null +++ b/doc/sphinxext/prs/335.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a16b6c0a5a1713ed2f09b6c4168145471133b92c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 37, + "d": 8 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3350.json b/doc/sphinxext/prs/3350.json new file mode 100644 index 00000000000..8ea6d3bc63a --- /dev/null +++ b/doc/sphinxext/prs/3350.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fff293de2003075d6b84f2716a43089910503f32", + "authors": [ + { + "n": "Nick Foti", + "e": null + } + ], + "changes": { + "doc/tutorials.rst": { + "a": 1, + "d": 0 + }, + "tutorials/plot_point_spread.py": { + "a": 171, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3352.json b/doc/sphinxext/prs/3352.json new file mode 100644 index 00000000000..cd47f94b86c --- /dev/null +++ b/doc/sphinxext/prs/3352.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "994ac805a3d1ec4a5f2a98dcd0961789847daa41", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 5, + "d": 14 + }, + "mne/utils.py": { + "a": 8, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3353.json b/doc/sphinxext/prs/3353.json new file mode 100644 index 00000000000..64fc96e9b72 --- /dev/null +++ b/doc/sphinxext/prs/3353.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b1ec497be3f265d58f8a25c983d7a509aa8a16d2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 9, + "d": 21 + }, + "make/install_python.ps1": { + "a": 0, + "d": 93 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3354.json b/doc/sphinxext/prs/3354.json new file mode 100644 index 00000000000..b3f3b28c6e6 --- /dev/null +++ b/doc/sphinxext/prs/3354.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "11fc0ef2d923f35623a6d9baa6f1aa8af27fc278", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3355.json b/doc/sphinxext/prs/3355.json new file mode 100644 index 00000000000..2a620e32b43 --- /dev/null +++ b/doc/sphinxext/prs/3355.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "e5fb9e9e0d3e72dfdab19d987ed0cc2304dd479d", + "authors": [ + { + "n": "Nick Foti", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 2, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 5, + "d": 5 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 6, + "d": 6 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 5, + "d": 5 + }, + "mne/beamformer/_dics.py": { + "a": 8, + "d": 8 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 19, + "d": 19 + }, + "mne/time_frequency/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 239, + "d": 54 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 225, + "d": 70 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3360.json b/doc/sphinxext/prs/3360.json new file mode 100644 index 00000000000..bd1803b2444 --- /dev/null +++ b/doc/sphinxext/prs/3360.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1a641d7bb800e8170d916b0289443abc9627b7eb", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 11, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3364.json b/doc/sphinxext/prs/3364.json new file mode 100644 index 00000000000..fd3415c07b9 --- /dev/null +++ b/doc/sphinxext/prs/3364.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5a299f1e43cc7465b282da62f461784ccd980592", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3365.json b/doc/sphinxext/prs/3365.json new file mode 100644 index 00000000000..76e19aea5de --- /dev/null +++ b/doc/sphinxext/prs/3365.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "52a3ca01a66fd78c750f5bc62b756bbb06a62538", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 36, + "d": 11 + }, + "mne/surface.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 2 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3368.json b/doc/sphinxext/prs/3368.json new file mode 100644 index 00000000000..2f3969ff120 --- /dev/null +++ b/doc/sphinxext/prs/3368.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "474b31a72659420a900b2453670c40b1938b5e91", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 12, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3371.json b/doc/sphinxext/prs/3371.json new file mode 100644 index 00000000000..b1c41a75051 --- /dev/null +++ b/doc/sphinxext/prs/3371.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7fe99df50119d06803cdd929c512a8d799ecd962", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 7, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 3, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 10, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3372.json b/doc/sphinxext/prs/3372.json new file mode 100644 index 00000000000..147a782e9f3 --- /dev/null +++ b/doc/sphinxext/prs/3372.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "abbc09736cccefe4e4b5dcd7865836aad4b08980", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/simulation/evoked.py": { + "a": 8, + "d": 2 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3373.json b/doc/sphinxext/prs/3373.json new file mode 100644 index 00000000000..e012f1a5093 --- /dev/null +++ b/doc/sphinxext/prs/3373.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b6bab37a6154aa21c629ad19ac62f49f08f249bb", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 8, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3374.json b/doc/sphinxext/prs/3374.json new file mode 100644 index 00000000000..95f66ae5025 --- /dev/null +++ b/doc/sphinxext/prs/3374.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "420028b89d7cb9c17bc970b7b795a0f1bff10f18", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/bem.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3375.json b/doc/sphinxext/prs/3375.json new file mode 100644 index 00000000000..e7eee1db693 --- /dev/null +++ b/doc/sphinxext/prs/3375.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "23137be41a8fff39a869673c48804db772d9daef", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/defaults.py": { + "a": 0, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 38, + "d": 11 + }, + "mne/viz/_3d.py": { + "a": 69, + "d": 31 + }, + "mne/viz/tests/test_3d.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3376.json b/doc/sphinxext/prs/3376.json new file mode 100644 index 00000000000..9142d9df0bf --- /dev/null +++ b/doc/sphinxext/prs/3376.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "19fb11a78fc10e66a51def22369d8975c4501d64", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3377.json b/doc/sphinxext/prs/3377.json new file mode 100644 index 00000000000..a4b83f2b05e --- /dev/null +++ b/doc/sphinxext/prs/3377.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "16b30b6665281e53ac2dd14c9a0074ff8ed59e02", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/data/layouts/KIT-UMD-3.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/KIT-UMD-1_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/KIT-UMD-2_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/KIT-UMD-3_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 16, + "d": 2 + }, + "mne/channels/tests/test_layout.py": { + "a": 30, + "d": 33 + }, + "mne/io/constants.py": { + "a": 4, + "d": 0 + }, + "mne/io/kit/constants.py": { + "a": 43, + "d": 18 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/kit/tests/data/test_umd-raw.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 15, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 29, + "d": 6 + }, + "mne/io/tests/test_meas_info.py": { + "a": 7, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3379.json b/doc/sphinxext/prs/3379.json new file mode 100644 index 00000000000..bac27ef80e9 --- /dev/null +++ b/doc/sphinxext/prs/3379.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0623de9142aa1aeabfde66f065618ef78e613ee5", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "tutorials/plot_artifacts_correction_ica.py": { + "a": 18, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3381.json b/doc/sphinxext/prs/3381.json new file mode 100644 index 00000000000..77923774477 --- /dev/null +++ b/doc/sphinxext/prs/3381.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0c57855cad9e77e02f2f4352abe1b44dd4c57199", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 38, + "d": 23 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 13, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 574, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 109, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3382.json b/doc/sphinxext/prs/3382.json new file mode 100644 index 00000000000..fde2d51bb01 --- /dev/null +++ b/doc/sphinxext/prs/3382.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4c7efc7b41cd616d233bf2a8e557376f1fee493d", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 14, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 13, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3383.json b/doc/sphinxext/prs/3383.json new file mode 100644 index 00000000000..cc464e424e0 --- /dev/null +++ b/doc/sphinxext/prs/3383.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "4aa3f3a283b3cefd225e2020fcd288564ff96738", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 32, + "d": 4 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 4, + "d": 17 + }, + "mne/gui/_fiducials_gui.py": { + "a": 22, + "d": 9 + }, + "mne/gui/_file_traits.py": { + "a": 33, + "d": 15 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 4, + "d": 1 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3384.json b/doc/sphinxext/prs/3384.json new file mode 100644 index 00000000000..f58ef92e8c6 --- /dev/null +++ b/doc/sphinxext/prs/3384.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "bcca63c1efdd53a6ec5cd4bc7cf2b249e13e8c45", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 9, + "d": 3 + }, + "mne/cov.py": { + "a": 48, + "d": 44 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 14, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3387.json b/doc/sphinxext/prs/3387.json new file mode 100644 index 00000000000..347dd1670fe --- /dev/null +++ b/doc/sphinxext/prs/3387.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4ff58df47075b1c1edbc933d57db9eca17c38a61", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 13, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3389.json b/doc/sphinxext/prs/3389.json new file mode 100644 index 00000000000..2144ea87f0a --- /dev/null +++ b/doc/sphinxext/prs/3389.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f038626699ee95d27680dd5f8ae39c2df3b10ae6", + "authors": [ + { + "n": "Félix Raimundo", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 8, + "d": 0 + }, + "mne/io/tests/data/test-ave-2.log": { + "a": 4, + "d": 4 + }, + "mne/io/tests/data/test-ave.log": { + "a": 2, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 1, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/339.json b/doc/sphinxext/prs/339.json new file mode 100644 index 00000000000..a3ffd78a1e2 --- /dev/null +++ b/doc/sphinxext/prs/339.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1db0559c691039a8d1596af0c563d109201e8c42", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 17, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3390.json b/doc/sphinxext/prs/3390.json new file mode 100644 index 00000000000..6fe81ee954c --- /dev/null +++ b/doc/sphinxext/prs/3390.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f7c223e53b21c668191c1b36166850b5e3041511", + "authors": [ + { + "n": "Félix Raimundo", + "e": null + } + ], + "changes": { + "mne/baseline.py": { + "a": 7, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 35, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3391.json b/doc/sphinxext/prs/3391.json new file mode 100644 index 00000000000..193cece9e76 --- /dev/null +++ b/doc/sphinxext/prs/3391.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "47bf725320f9b0cc06d973d6a8d94cf7da634f2e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 16, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3392.json b/doc/sphinxext/prs/3392.json new file mode 100644 index 00000000000..174c40e6354 --- /dev/null +++ b/doc/sphinxext/prs/3392.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cc106f790840c87bcb75eda9d12e4d5a58f0b3de", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/cov.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3394.json b/doc/sphinxext/prs/3394.json new file mode 100644 index 00000000000..a35f5b8dd52 --- /dev/null +++ b/doc/sphinxext/prs/3394.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "920f0c3d31e1b939a93fd7ec37044a89076f0b71", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "examples/realtime/ftclient_rt_average.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3396.json b/doc/sphinxext/prs/3396.json new file mode 100644 index 00000000000..d5036f33de7 --- /dev/null +++ b/doc/sphinxext/prs/3396.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3924c38cacf99c711138ca7d012bd08bb5992b31", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/stim_server_client.py": { + "a": 2, + "d": 6 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3397.json b/doc/sphinxext/prs/3397.json new file mode 100644 index 00000000000..f521a6b019e --- /dev/null +++ b/doc/sphinxext/prs/3397.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "48ef96d37a5f6d8c6648ca0f54002898fa759bab", + "authors": [ + { + "n": "Félix Raimundo", + "e": null + } + ], + "changes": { + "doc/contributing.rst": { + "a": 3, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3398.json b/doc/sphinxext/prs/3398.json new file mode 100644 index 00000000000..f72a53f0f8a --- /dev/null +++ b/doc/sphinxext/prs/3398.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0712b67327b009bd492ab51a34dbf997afedd967", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 15, + "d": 3 + }, + "mne/fixes.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 60, + "d": 17 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/34.json b/doc/sphinxext/prs/34.json new file mode 100644 index 00000000000..c5f5a4dca11 --- /dev/null +++ b/doc/sphinxext/prs/34.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "ad83d323d4afd6e50853bb165a9574d66e1dc3b9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/_images/plot_read_and_write_raw_data.png": { + "a": 0, + "d": 0 + }, + "doc/source/_images/plot_read_epochs.png": { + "a": 0, + "d": 0 + }, + "doc/source/_images/plot_time_frequency.png": { + "a": 0, + "d": 0 + }, + "doc/source/_templates/class.rst": { + "a": 12, + "d": 0 + }, + "doc/source/_templates/function.rst": { + "a": 8, + "d": 0 + }, + "doc/source/conf.py": { + "a": 1, + "d": 1 + }, + "doc/source/mne-python.rst": { + "a": 1, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 117, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 7, + "d": 4 + }, + "mne/artifacts/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/pick.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3400.json b/doc/sphinxext/prs/3400.json new file mode 100644 index 00000000000..68d7ca92aa5 --- /dev/null +++ b/doc/sphinxext/prs/3400.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "c0d956cd46f7274d2b85e634873f88be34945c82", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/_templates/class.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/pitfalls.rst": { + "a": 0, + "d": 13 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 52, + "d": 12 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 37, + "d": 36 + }, + "mne/utils.py": { + "a": 12, + "d": 4 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_eeg_erp.py": { + "a": 15, + "d": 9 + }, + "tutorials/plot_introduction.py": { + "a": 29, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3402.json b/doc/sphinxext/prs/3402.json new file mode 100644 index 00000000000..6fc82416adc --- /dev/null +++ b/doc/sphinxext/prs/3402.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "0324f0009d408a8c7d6b2c50cd035369112904d3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 11, + "d": 5 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/utils.py": { + "a": 7, + "d": 10 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 5 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 15, + "d": 15 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 11, + "d": 10 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 3 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3403.json b/doc/sphinxext/prs/3403.json new file mode 100644 index 00000000000..364bb5a9189 --- /dev/null +++ b/doc/sphinxext/prs/3403.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2ac5a96af70155dbfaabc4f2da73932593ad2f25", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 8, + "d": 5 + }, + "mne/simulation/tests/test_raw.py": { + "a": 31, + "d": 8 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3404.json b/doc/sphinxext/prs/3404.json new file mode 100644 index 00000000000..678686d62b4 --- /dev/null +++ b/doc/sphinxext/prs/3404.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "570c7decd734e6b6c6318ecc218cfb87185fd691", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/epochs.py": { + "a": 6, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3407.json b/doc/sphinxext/prs/3407.json new file mode 100644 index 00000000000..2be31c0223d --- /dev/null +++ b/doc/sphinxext/prs/3407.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "7e82836403d6f2a593e63620e489c99c00b10471", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 13, + "d": 1 + }, + "doc/Makefile": { + "a": 3, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 12, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 6, + "d": 2 + }, + "mne/datasets/brainstorm/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 50, + "d": 0 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 51, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 7, + "d": 4 + }, + "mne/dipole.py": { + "a": 63, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 20, + "d": 3 + }, + "mne/tests/test_import_nesting.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_line_endings.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 93, + "d": 0 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 101, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3408.json b/doc/sphinxext/prs/3408.json new file mode 100644 index 00000000000..1129daf3b5c --- /dev/null +++ b/doc/sphinxext/prs/3408.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0fbc4cd0b54e3e51b3451e2c9f69fea194f3a633", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 6, + "d": 9 + }, + "mne/evoked.py": { + "a": 4, + "d": 7 + }, + "mne/io/base.py": { + "a": 38, + "d": 10 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_utils.py": { + "a": 20, + "d": 1 + }, + "mne/utils.py": { + "a": 37, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3409.json b/doc/sphinxext/prs/3409.json new file mode 100644 index 00000000000..46828e07400 --- /dev/null +++ b/doc/sphinxext/prs/3409.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "8a3fbf59d0e15b3e4f6abd6b479a37f32d2324b6", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/manual/decoding.rst": { + "a": 4, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_xdawn_transformer.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 27, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 105, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/341.json b/doc/sphinxext/prs/341.json new file mode 100644 index 00000000000..dae89a60e3d --- /dev/null +++ b/doc/sphinxext/prs/341.json @@ -0,0 +1,363 @@ +{ + "merge_commit_sha": "a96b6473515a92af946d32e86524625a49661696", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 51, + "d": 7 + }, + "doc/source/python_tutorial.rst": { + "a": 27, + "d": 16 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/export/plot_epochs_as_data_frame.py": { + "a": 1, + "d": 1 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/extract_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_label_activation_from_stc.py": { + "a": 33, + "d": 33 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_morph_data.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/plot_estimate_covariance_matrix_baseline.py": { + "a": 1, + "d": 1 + }, + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 1, + "d": 1 + }, + "examples/plot_evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_forward.py": { + "a": 1, + "d": 1 + }, + "examples/plot_read_noise_covariance_matrix.py": { + "a": 1, + "d": 1 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topography.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ecg_artifacts_from_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_eog_artifacts_from_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "examples/read_events.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/megsim/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/megsim/megsim.py": { + "a": 192, + "d": 0 + }, + "mne/datasets/megsim/urls.py": { + "a": 160, + "d": 0 + }, + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 97 + }, + "mne/datasets/sample/sample.py": { + "a": 141, + "d": 0 + }, + "mne/label.py": { + "a": 26, + "d": 28 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 2 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/mixed_norm/tests/test_inverse.py": { + "a": 1, + "d": 2 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 2 + }, + "mne/simulation/tests/test_source.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_forward.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 21, + "d": 2 + }, + "mne/utils.py": { + "a": 341, + "d": 145 + }, + "mne/viz.py": { + "a": 10, + "d": 9 + }, + "setup.py": { + "a": 49, + "d": 49 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3413.json b/doc/sphinxext/prs/3413.json new file mode 100644 index 00000000000..c9c17dfee3e --- /dev/null +++ b/doc/sphinxext/prs/3413.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "08691929b5a56cd1e6d3a33b9642e59afd05f5ae", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 446, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 42, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3414.json b/doc/sphinxext/prs/3414.json new file mode 100644 index 00000000000..479954f0551 --- /dev/null +++ b/doc/sphinxext/prs/3414.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc9ecbd9c1e15a0a15de6ad76b1e9d7e3ab44111", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 18, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3415.json b/doc/sphinxext/prs/3415.json new file mode 100644 index 00000000000..e6126cc91fa --- /dev/null +++ b/doc/sphinxext/prs/3415.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "22cbba65a894601e760eb64e2ce5ca7d097093cd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3418.json b/doc/sphinxext/prs/3418.json new file mode 100644 index 00000000000..c23f318f718 --- /dev/null +++ b/doc/sphinxext/prs/3418.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e680383bbd3f96a0313fee599ea96d3149887f96", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/data/coil_def.dat": { + "a": 16, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/342.json b/doc/sphinxext/prs/342.json new file mode 100644 index 00000000000..42de9c2b3aa --- /dev/null +++ b/doc/sphinxext/prs/342.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "7b8a41751982740bed876de25870f01a6c5b253c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/baseline.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 5, + "d": 3 + }, + "mne/fiff/evoked.py": { + "a": 8, + "d": 0 + }, + "mne/fiff/open.py": { + "a": 5, + "d": 3 + }, + "mne/fiff/raw.py": { + "a": 5, + "d": 1 + }, + "mne/fiff/tag.py": { + "a": 90, + "d": 17 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 4, + "d": 7 + }, + "mne/fiff/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/forward.py": { + "a": 4, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/mixed_norm/tests/test_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 3, + "d": 1 + }, + "mne/utils.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3422.json b/doc/sphinxext/prs/3422.json new file mode 100644 index 00000000000..98b598c2b0e --- /dev/null +++ b/doc/sphinxext/prs/3422.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2921f56c5a23ae39e5d6b4a9173695925d291bae", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 5, + "d": 5 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 3, + "d": 3 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 5, + "d": 4 + }, + "mne/utils.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3423.json b/doc/sphinxext/prs/3423.json new file mode 100644 index 00000000000..fe28fb478a6 --- /dev/null +++ b/doc/sphinxext/prs/3423.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d9a2d09acb93619ef4c63dd04f907e4305c472e0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 16, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3425.json b/doc/sphinxext/prs/3425.json new file mode 100644 index 00000000000..936eb8736e3 --- /dev/null +++ b/doc/sphinxext/prs/3425.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "8823240c015a993c7a4585c0b268f42306faddf3", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_xdawn_transformer.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/tests/test_xdawn.py": { + "a": 0, + "d": 97 + }, + "mne/decoding/xdawn.py": { + "a": 0, + "d": 174 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 123, + "d": 56 + }, + "mne/preprocessing/xdawn.py": { + "a": 421, + "d": 288 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3429.json b/doc/sphinxext/prs/3429.json new file mode 100644 index 00000000000..506a55305eb --- /dev/null +++ b/doc/sphinxext/prs/3429.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "aeceebdfc8d4e21d594263fc6b1304d209446944", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 36, + "d": 18 + }, + "mne/viz/tests/test_misc.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/343.json b/doc/sphinxext/prs/343.json new file mode 100644 index 00000000000..98842912e23 --- /dev/null +++ b/doc/sphinxext/prs/343.json @@ -0,0 +1,10 @@ +{ + "merge_commit_sha": "dfbd985583a9bc372951bc54311be17038210124", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": {} +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3431.json b/doc/sphinxext/prs/3431.json new file mode 100644 index 00000000000..38849b02d20 --- /dev/null +++ b/doc/sphinxext/prs/3431.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bd7d7b7a90553a48eebf73fc05062d9c6ea5c3c5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3434.json b/doc/sphinxext/prs/3434.json new file mode 100644 index 00000000000..8a5a7303f81 --- /dev/null +++ b/doc/sphinxext/prs/3434.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "facfc051d1411b3d8c80023305a2850b746b16ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_csp.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 8, + "d": 7 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 5, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3436.json b/doc/sphinxext/prs/3436.json new file mode 100644 index 00000000000..0b659bb4022 --- /dev/null +++ b/doc/sphinxext/prs/3436.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "df88642e06748bb321e623e290d935e0142545e1", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 10, + "d": 10 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3437.json b/doc/sphinxext/prs/3437.json new file mode 100644 index 00000000000..9b6628437f4 --- /dev/null +++ b/doc/sphinxext/prs/3437.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "96765bbd30d3de2a50de2c699046fc33cedb176c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 0, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 10, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 25, + "d": 29 + }, + "mne/utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3438.json b/doc/sphinxext/prs/3438.json new file mode 100644 index 00000000000..52fc15d6b63 --- /dev/null +++ b/doc/sphinxext/prs/3438.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a03fb06100be7c9f8ef4efdf7153c24b866ad54f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 7, + "d": 4 + }, + "mne/commands/tests/test_commands.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3439.json b/doc/sphinxext/prs/3439.json new file mode 100644 index 00000000000..cad6fb0cbfb --- /dev/null +++ b/doc/sphinxext/prs/3439.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "ca0ded303cac3a5c6187186f0e517a23e4aa0df6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 8, + "d": 3 + }, + "mne/utils.py": { + "a": 16, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3440.json b/doc/sphinxext/prs/3440.json new file mode 100644 index 00000000000..12e7c39922f --- /dev/null +++ b/doc/sphinxext/prs/3440.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "90e9e9c9840b537226b6f841f823dcfbdc1bfa24", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 2 + }, + "mne/io/base.py": { + "a": 68, + "d": 29 + }, + "mne/io/compensator.py": { + "a": 22, + "d": 15 + }, + "mne/io/ctf/info.py": { + "a": 20, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 12, + "d": 27 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 107, + "d": 28 + }, + "mne/io/tests/test_compensator.py": { + "a": 32, + "d": 10 + }, + "mne/preprocessing/maxwell.py": { + "a": 5, + "d": 8 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 15, + "d": 12 + }, + "mne/tests/test_epochs.py": { + "a": 0, + "d": 3 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3443.json b/doc/sphinxext/prs/3443.json new file mode 100644 index 00000000000..35b9986f20a --- /dev/null +++ b/doc/sphinxext/prs/3443.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50f0a4062e0d4985fe953def7d608bd0b7452613", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_xdawn_transformer.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3446.json b/doc/sphinxext/prs/3446.json new file mode 100644 index 00000000000..e82b79da47b --- /dev/null +++ b/doc/sphinxext/prs/3446.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e17e31fc42874461648c40a8b5d11e3aced74ab7", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 70, + "d": 33 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 57, + "d": 0 + }, + "mne/decoding/ems.py": { + "a": 118, + "d": 28 + }, + "mne/decoding/tests/test_ems.py": { + "a": 35, + "d": 3 + }, + "mne/decoding/time_gen.py": { + "a": 2, + "d": 35 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3448.json b/doc/sphinxext/prs/3448.json new file mode 100644 index 00000000000..9538f393d79 --- /dev/null +++ b/doc/sphinxext/prs/3448.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b0a6ed3a75530e2d8a54d81bdba854a0db978deb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 16, + "d": 1 + }, + "mne/utils.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3449.json b/doc/sphinxext/prs/3449.json new file mode 100644 index 00000000000..e87c12eba8b --- /dev/null +++ b/doc/sphinxext/prs/3449.json @@ -0,0 +1,155 @@ +{ + "merge_commit_sha": "c674fe7e305c0eebc4ccb8f4bcbd0ee170f33d17", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 3, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 17, + "d": 1 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 3, + "d": 1 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 2, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 3, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 9, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 685, + "d": 446 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 10, + "d": 9 + }, + "mne/io/base.py": { + "a": 85, + "d": 39 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 4 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 34, + "d": 27 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 7, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 127, + "d": 151 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 9, + "d": 8 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 107, + "d": 104 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 3 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 44, + "d": 16 + }, + "tutorials/plot_background_filtering.py": { + "a": 351, + "d": 25 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 16, + "d": 10 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/345.json b/doc/sphinxext/prs/345.json new file mode 100644 index 00000000000..aece076d843 --- /dev/null +++ b/doc/sphinxext/prs/345.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "90834d3c52aae1f2fecd4ee7e77001e949704b9f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz.py": { + "a": 21, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3450.json b/doc/sphinxext/prs/3450.json new file mode 100644 index 00000000000..a0f3c598867 --- /dev/null +++ b/doc/sphinxext/prs/3450.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ad3dc3a6bbc342334f8b418cbd23b1786785620c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 52, + "d": 17 + }, + "mne/viz/tests/test_misc.py": { + "a": 3, + "d": 0 + }, + "tutorials/plot_forward.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3452.json b/doc/sphinxext/prs/3452.json new file mode 100644 index 00000000000..21cb74cb13e --- /dev/null +++ b/doc/sphinxext/prs/3452.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5ea20abbb53dc13cd6b39eeefa9721904eb0417c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 15, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3453.json b/doc/sphinxext/prs/3453.json new file mode 100644 index 00000000000..e40801d67f8 --- /dev/null +++ b/doc/sphinxext/prs/3453.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6957b937e15fab9768c9f0cfd235b549bfceced6", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3456.json b/doc/sphinxext/prs/3456.json new file mode 100644 index 00000000000..b53af04491f --- /dev/null +++ b/doc/sphinxext/prs/3456.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "403ab40f91a8bd196159e779a34d06823dc7209f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_montage.py": { + "a": 20, + "d": 36 + }, + "mne/datasets/megsim/urls.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 35, + "d": 35 + }, + "tutorials/plot_stats_cluster_methods.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3457.json b/doc/sphinxext/prs/3457.json new file mode 100644 index 00000000000..be2ae87a0d9 --- /dev/null +++ b/doc/sphinxext/prs/3457.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cb1f1197f6521740c03ab84337cc253ac45c2195", + "authors": [ + { + "n": "Guillaume Dumas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 7, + "d": 4 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/346.json b/doc/sphinxext/prs/346.json new file mode 100644 index 00000000000..69885f885fd --- /dev/null +++ b/doc/sphinxext/prs/346.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "611a36e7bb008fe9a1d9f069ab06db916aab9ebb", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3462.json b/doc/sphinxext/prs/3462.json new file mode 100644 index 00000000000..8957567cdd1 --- /dev/null +++ b/doc/sphinxext/prs/3462.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f28a3d54d5f30335359fc2b49a5784314c9a028b", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3463.json b/doc/sphinxext/prs/3463.json new file mode 100644 index 00000000000..851eb884797 --- /dev/null +++ b/doc/sphinxext/prs/3463.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b02f5bd8a51274d6f683cd079d28af04e28bce63", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 180, + "d": 66 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 10, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 35, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3466.json b/doc/sphinxext/prs/3466.json new file mode 100644 index 00000000000..3058ddcb6ea --- /dev/null +++ b/doc/sphinxext/prs/3466.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "31c2c28a9134f3ca86f762ebb1a48de7659e9c22", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 39, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 35, + "d": 7 + }, + "mne/transforms.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3469.json b/doc/sphinxext/prs/3469.json new file mode 100644 index 00000000000..ac761420980 --- /dev/null +++ b/doc/sphinxext/prs/3469.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50bee3363f1ab8a782fa6ed9d40a736061f4e13d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_morph_data.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3472.json b/doc/sphinxext/prs/3472.json new file mode 100644 index 00000000000..b2e36c2a15c --- /dev/null +++ b/doc/sphinxext/prs/3472.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "87b570c4592dcf68b631dfcba1d7f9d919514e57", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 30, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 157, + "d": 1 + }, + "mne/filter.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3476.json b/doc/sphinxext/prs/3476.json new file mode 100644 index 00000000000..af8835b5cb6 --- /dev/null +++ b/doc/sphinxext/prs/3476.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "7f716027a49e884978de738152aaca76b1f18b5b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 32 + }, + "mne/time_frequency/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 158, + "d": 16 + }, + "mne/time_frequency/tfr.py": { + "a": 798, + "d": 581 + }, + "mne/utils.py": { + "a": 31, + "d": 0 + }, + "tutorials/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 28, + "d": 45 + }, + "tutorials/plot_stats_cluster_time_frequency.py": { + "a": 37, + "d": 52 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 26, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3477.json b/doc/sphinxext/prs/3477.json new file mode 100644 index 00000000000..67170f21dd8 --- /dev/null +++ b/doc/sphinxext/prs/3477.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5d2a6493b3f3e5a4f3e7cbb8067617e0b7cbfcb0", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 11, + "d": 5 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3479.json b/doc/sphinxext/prs/3479.json new file mode 100644 index 00000000000..4ba051fc622 --- /dev/null +++ b/doc/sphinxext/prs/3479.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "de463affe20f58dc50093c479c311ce40bed4e49", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 15, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 69, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 11, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 28, + "d": 3 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3481.json b/doc/sphinxext/prs/3481.json new file mode 100644 index 00000000000..fb06f26b008 --- /dev/null +++ b/doc/sphinxext/prs/3481.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bc8cab4b8b4b45085f3f786a73685dfdcd5bc306", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/search_light.py": { + "a": 93, + "d": 64 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 34, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3485.json b/doc/sphinxext/prs/3485.json new file mode 100644 index 00000000000..11f27297233 --- /dev/null +++ b/doc/sphinxext/prs/3485.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "728b189f7221293affb1aa75ca5b05af8ab37b8a", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 209, + "d": 84 + }, + "mne/decoding/tests/test_csp.py": { + "a": 67, + "d": 27 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3486.json b/doc/sphinxext/prs/3486.json new file mode 100644 index 00000000000..ee79cbecf63 --- /dev/null +++ b/doc/sphinxext/prs/3486.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "859d2814d9b78c86396de176715457f3d3ef513a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/plot_object_raw.py": { + "a": 21, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3487.json b/doc/sphinxext/prs/3487.json new file mode 100644 index 00000000000..9d0fef8c671 --- /dev/null +++ b/doc/sphinxext/prs/3487.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "b8ccffee83f0b6c15348924d1125f1b743ad9a5b", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 13, + "d": 21 + }, + "mne/channels/layout.py": { + "a": 0, + "d": 2 + }, + "mne/cov.py": { + "a": 7, + "d": 35 + }, + "mne/epochs.py": { + "a": 35, + "d": 370 + }, + "mne/evoked.py": { + "a": 25, + "d": 382 + }, + "mne/io/base.py": { + "a": 9, + "d": 209 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/pick.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 9, + "d": 244 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 96 + }, + "mne/tests/test_utils.py": { + "a": 120, + "d": 1 + }, + "mne/utils.py": { + "a": 171, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 6, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3488.json b/doc/sphinxext/prs/3488.json new file mode 100644 index 00000000000..ef7af1e1698 --- /dev/null +++ b/doc/sphinxext/prs/3488.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "a446cf13b0c34bd390af22aa192ba42f04eddc86", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/time_frequency/README.txt": { + "a": 1, + "d": 2 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 41, + "d": 0 + }, + "mne/decoding/time_frequency.py": { + "a": 152, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 82, + "d": 39 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3489.json b/doc/sphinxext/prs/3489.json new file mode 100644 index 00000000000..0a8b91d78a6 --- /dev/null +++ b/doc/sphinxext/prs/3489.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "59c6e749bcdfbe4c43f553070452b3fe57ee3974", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/annotations.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3490.json b/doc/sphinxext/prs/3490.json new file mode 100644 index 00000000000..4eb2ea9afbc --- /dev/null +++ b/doc/sphinxext/prs/3490.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "93972513cfbd2f96f14d281e27b2a8f852f3ffef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 220, + "d": 59 + }, + "mne/io/base.py": { + "a": 30, + "d": 8 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 8, + "d": 10 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 6, + "d": 4 + }, + "mne/tests/test_filter.py": { + "a": 79, + "d": 33 + }, + "tutorials/plot_background_filtering.py": { + "a": 123, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3491.json b/doc/sphinxext/prs/3491.json new file mode 100644 index 00000000000..3fb8b9cfbab --- /dev/null +++ b/doc/sphinxext/prs/3491.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "baf9bf4f8f28725ffe70aa8f8b94281df8fef7f6", + "authors": [ + { + "n": "Simon-Shlomo Poil", + "e": null + } + ], + "changes": { + "tutorials/plot_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3492.json b/doc/sphinxext/prs/3492.json new file mode 100644 index 00000000000..c4a6946ae57 --- /dev/null +++ b/doc/sphinxext/prs/3492.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2cd00085753883b195987a0053c993c0cd679f4b", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 30, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3494.json b/doc/sphinxext/prs/3494.json new file mode 100644 index 00000000000..4933d6c4390 --- /dev/null +++ b/doc/sphinxext/prs/3494.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5f623d90c8c71e91d07a7a44627bfa81411756a0", + "authors": [ + { + "n": "alexandre barachant", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_xdawn_transformer.py": { + "a": 38, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3496.json b/doc/sphinxext/prs/3496.json new file mode 100644 index 00000000000..a3531971c30 --- /dev/null +++ b/doc/sphinxext/prs/3496.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "112ffac0eba6df83cc6269c5c50ca7e64e5cd22a", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 16, + "d": 15 + }, + "mne/viz/tests/test_ica.py": { + "a": 23, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 21, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 48, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3497.json b/doc/sphinxext/prs/3497.json new file mode 100644 index 00000000000..ba4f59953ee --- /dev/null +++ b/doc/sphinxext/prs/3497.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1145f0b815337be0e91beb7fdaf960dc4160d27a", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3498.json b/doc/sphinxext/prs/3498.json new file mode 100644 index 00000000000..6adf1ad45a1 --- /dev/null +++ b/doc/sphinxext/prs/3498.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c2e14b8789c6d5f79023ef45f09e4b0745d3bfd4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/35.json b/doc/sphinxext/prs/35.json new file mode 100644 index 00000000000..a3073c62a8f --- /dev/null +++ b/doc/sphinxext/prs/35.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "16a81f211c2c2b4410bb75d55a8e7ceadaf07fea", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 133, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/proj.py": { + "a": 4, + "d": 41 + }, + "mne/proj.py": { + "a": 115, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 17, + "d": 13 + }, + "mne/utils.py": { + "a": 92, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/350.json b/doc/sphinxext/prs/350.json new file mode 100644 index 00000000000..ce161288d4a --- /dev/null +++ b/doc/sphinxext/prs/350.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "b4303ef661798f9dd128ce9db8d232cdb59535ca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 22, + "d": 3 + }, + "mne/fiff/evoked.py": { + "a": 25, + "d": 5 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 13, + "d": 1 + }, + "mne/filter.py": { + "a": 49, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 31, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3500.json b/doc/sphinxext/prs/3500.json new file mode 100644 index 00000000000..9e18aa90928 --- /dev/null +++ b/doc/sphinxext/prs/3500.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3505b0e11dc66974158029f48bbe453d3c2c79a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/event.py": { + "a": 69, + "d": 25 + }, + "mne/realtime/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_event.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3501.json b/doc/sphinxext/prs/3501.json new file mode 100644 index 00000000000..1d99c5e4c5f --- /dev/null +++ b/doc/sphinxext/prs/3501.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4b72527146af2e71675e3c19838fd6e13609ee36", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 58, + "d": 42 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 17, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3502.json b/doc/sphinxext/prs/3502.json new file mode 100644 index 00000000000..a465f0931dc --- /dev/null +++ b/doc/sphinxext/prs/3502.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a294c335d5bc5911aa452dad801f2ae62ae80224", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 31, + "d": 5 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3504.json b/doc/sphinxext/prs/3504.json new file mode 100644 index 00000000000..18b0e70c126 --- /dev/null +++ b/doc/sphinxext/prs/3504.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ee84544768ea747825801634966b0bedbab20b00", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3505.json b/doc/sphinxext/prs/3505.json new file mode 100644 index 00000000000..f3c9fc216dd --- /dev/null +++ b/doc/sphinxext/prs/3505.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9ed6a7e43c060cba4d7a16f4014293fc0bf975b0", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/infomax_.py": { + "a": 60, + "d": 63 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3506.json b/doc/sphinxext/prs/3506.json new file mode 100644 index 00000000000..cd249a28230 --- /dev/null +++ b/doc/sphinxext/prs/3506.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e4a6257d5b3da5f454a9d7b967d3f9b016881d14", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 20, + "d": 6 + }, + "tutorials/plot_eeg_erp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/351.json b/doc/sphinxext/prs/351.json new file mode 100644 index 00000000000..3b74d2bd7d8 --- /dev/null +++ b/doc/sphinxext/prs/351.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e957546f1750cc0e4582dea8d81c8a5b43ffb516", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/sample/sample.py": { + "a": 5, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3510.json b/doc/sphinxext/prs/3510.json new file mode 100644 index 00000000000..ef845d4b63d --- /dev/null +++ b/doc/sphinxext/prs/3510.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d84d5cfb64507cd2e437cf0f35fbe25be55716d9", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 56, + "d": 15 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 24, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3511.json b/doc/sphinxext/prs/3511.json new file mode 100644 index 00000000000..a6c0e1c38ce --- /dev/null +++ b/doc/sphinxext/prs/3511.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "038e8687a974184184390523c996aa74c3370d25", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3514.json b/doc/sphinxext/prs/3514.json new file mode 100644 index 00000000000..c7d7608aeb7 --- /dev/null +++ b/doc/sphinxext/prs/3514.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "93c8902a0c559b6c4274a4f1f652f8e1a2075bd8", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 0 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3517.json b/doc/sphinxext/prs/3517.json new file mode 100644 index 00000000000..c03e5af3b3a --- /dev/null +++ b/doc/sphinxext/prs/3517.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e09124ed975eb7877897226c150979fda8976521", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/annotations.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3520.json b/doc/sphinxext/prs/3520.json new file mode 100644 index 00000000000..6d4339a796e --- /dev/null +++ b/doc/sphinxext/prs/3520.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5584a5c3d9d73985cf1501fa3e90465113158497", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/array/array.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 17, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3522.json b/doc/sphinxext/prs/3522.json new file mode 100644 index 00000000000..074917cedab --- /dev/null +++ b/doc/sphinxext/prs/3522.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ad19aef0f96376132b1fd4d8217a8dda17cdd1d6", + "authors": [ + { + "n": "Christopher Mullins", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 9, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 17, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3524.json b/doc/sphinxext/prs/3524.json new file mode 100644 index 00000000000..3038ec8a80f --- /dev/null +++ b/doc/sphinxext/prs/3524.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "32dd32a288d85117747a8fb205e137df6ff58568", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 5, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3526.json b/doc/sphinxext/prs/3526.json new file mode 100644 index 00000000000..7bc56298f3e --- /dev/null +++ b/doc/sphinxext/prs/3526.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c11647a74fb604b6783dd92d89872c63c738348", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/353.json b/doc/sphinxext/prs/353.json new file mode 100644 index 00000000000..363a47455ca --- /dev/null +++ b/doc/sphinxext/prs/353.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9689ff0499d2d4e0fabea86385720c3151f68b80", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3530.json b/doc/sphinxext/prs/3530.json new file mode 100644 index 00000000000..221b1680c4c --- /dev/null +++ b/doc/sphinxext/prs/3530.json @@ -0,0 +1,271 @@ +{ + "merge_commit_sha": "019756bbc6006874a867ddcf2ada64133627532f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 6, + "d": 11 + }, + "README.rst": { + "a": 7, + "d": 8 + }, + "doc/Makefile": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 4 + }, + "mne/bem.py": { + "a": 3, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 3, + "d": 3 + }, + "mne/chpi.py": { + "a": 3, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 4, + "d": 3 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 29, + "d": 22 + }, + "mne/cov.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/somato/somato.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/testing/_testing.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_ems.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 5, + "d": 3 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 4, + "d": 6 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/evoked.py": { + "a": 2, + "d": 4 + }, + "mne/filter.py": { + "a": 8, + "d": 10 + }, + "mne/fixes.py": { + "a": 5, + "d": 875 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 4 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 5, + "d": 9 + }, + "mne/io/base.py": { + "a": 2, + "d": 3 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 6, + "d": 6 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 7, + "d": 6 + }, + "mne/label.py": { + "a": 7, + "d": 8 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 5 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 7, + "d": 8 + }, + "mne/source_space.py": { + "a": 12, + "d": 10 + }, + "mne/stats/cluster_level.py": { + "a": 6, + "d": 7 + }, + "mne/stats/parametric.py": { + "a": 1, + "d": 2 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_fixes.py": { + "a": 4, + "d": 179 + }, + "mne/tests/test_label.py": { + "a": 9, + "d": 12 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_report.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 3, + "d": 20 + }, + "mne/viz/circle.py": { + "a": 3, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3532.json b/doc/sphinxext/prs/3532.json new file mode 100644 index 00000000000..8947513fcff --- /dev/null +++ b/doc/sphinxext/prs/3532.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "89f5f5486259c96988d553db692d8a6a52243c56", + "authors": [ + { + "n": "Phillip Alday", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 10, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 114, + "d": 31 + }, + "mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter.eeg": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter.vhdr": { + "a": 156, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter.vmrk": { + "a": 14, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 19, + "d": 4 + }, + "mne/tests/test_line_endings.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3534.json b/doc/sphinxext/prs/3534.json new file mode 100644 index 00000000000..bce18578289 --- /dev/null +++ b/doc/sphinxext/prs/3534.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "71b15a704b94f2054cff9bd04c553cc862040405", + "authors": [ + { + "n": "Phillip Alday", + "e": null + } + ], + "changes": { + "mne/io/reference.py": { + "a": 22, + "d": 13 + }, + "mne/io/tests/test_reference.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3535.json b/doc/sphinxext/prs/3535.json new file mode 100644 index 00000000000..ecc69870f26 --- /dev/null +++ b/doc/sphinxext/prs/3535.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9f056c80697b35e3a6b5d2c9b3f3715d240568af", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/index.rst": { + "a": 1, + "d": 1 + }, + "doc/tutorials.rst": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_introduction.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/354.json b/doc/sphinxext/prs/354.json new file mode 100644 index 00000000000..8f542c2d008 --- /dev/null +++ b/doc/sphinxext/prs/354.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "893e18b6f7f4a8214a1faecde81c72d25cbd2983", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 8, + "d": 21 + }, + "mne/utils.py": { + "a": 12, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3540.json b/doc/sphinxext/prs/3540.json new file mode 100644 index 00000000000..263469148cf --- /dev/null +++ b/doc/sphinxext/prs/3540.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7ce837ad076a08f1de0c79ca7b4909e2a6a2d0d6", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 12, + "d": 12 + }, + "mne/io/base.py": { + "a": 67, + "d": 6 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 23, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 25, + "d": 16 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3544.json b/doc/sphinxext/prs/3544.json new file mode 100644 index 00000000000..10df5865d66 --- /dev/null +++ b/doc/sphinxext/prs/3544.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6d780f01d7b888992441e602e8b6913e58992414", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 49, + "d": 10 + }, + "mne/tests/test_chpi.py": { + "a": 51, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3545.json b/doc/sphinxext/prs/3545.json new file mode 100644 index 00000000000..2cc5784ba9a --- /dev/null +++ b/doc/sphinxext/prs/3545.json @@ -0,0 +1,447 @@ +{ + "merge_commit_sha": "765cff8a214f45045b45ff2500a4a91ecdaa2b32", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "doc/manual/io.rst": { + "a": 19, + "d": 3 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 2, + "d": 3 + }, + "mne/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 3, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 9, + "d": 6 + }, + "mne/channels/channels.py": { + "a": 47, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 9, + "d": 12 + }, + "mne/channels/tests/test_layout.py": { + "a": 6, + "d": 6 + }, + "mne/commands/tests/test_commands.py": { + "a": 20, + "d": 19 + }, + "mne/cov.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/tests/test_csp.py": { + "a": 7, + "d": 5 + }, + "mne/decoding/tests/test_ems.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 18, + "d": 17 + }, + "mne/epochs.py": { + "a": 46, + "d": 22 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 3, + "d": 2 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 4, + "d": 4 + }, + "mne/io/array/tests/test_array.py": { + "a": 3, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 6, + "d": 6 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 11, + "d": 12 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 14, + "d": 23 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 25, + "d": 21 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 184, + "d": 143 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 11, + "d": 14 + }, + "mne/io/proj.py": { + "a": 4, + "d": 3 + }, + "mne/io/reference.py": { + "a": 11, + "d": 8 + }, + "mne/io/tests/test_compensator.py": { + "a": 15, + "d": 9 + }, + "mne/io/tests/test_meas_info.py": { + "a": 17, + "d": 20 + }, + "mne/io/tests/test_pick.py": { + "a": 8, + "d": 5 + }, + "mne/io/tests/test_proc_history.py": { + "a": 7, + "d": 8 + }, + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 5 + }, + "mne/io/tests/test_reference.py": { + "a": 40, + "d": 30 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 17, + "d": 15 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 11, + "d": 12 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 36, + "d": 35 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 84, + "d": 79 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 7, + "d": 6 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 14, + "d": 9 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 14, + "d": 10 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 14, + "d": 10 + }, + "mne/stats/tests/test_regression.py": { + "a": 7, + "d": 8 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 21, + "d": 15 + }, + "mne/tests/test_cov.py": { + "a": 37, + "d": 31 + }, + "mne/tests/test_dipole.py": { + "a": 16, + "d": 14 + }, + "mne/tests/test_epochs.py": { + "a": 260, + "d": 245 + }, + "mne/tests/test_event.py": { + "a": 25, + "d": 20 + }, + "mne/tests/test_evoked.py": { + "a": 23, + "d": 22 + }, + "mne/tests/test_filter.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_proj.py": { + "a": 19, + "d": 18 + }, + "mne/tests/test_report.py": { + "a": 11, + "d": 15 + }, + "mne/tests/test_selection.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_surface.py": { + "a": 14, + "d": 18 + }, + "mne/tests/test_utils.py": { + "a": 4, + "d": 2 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 3, + "d": 5 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 9, + "d": 9 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 8, + "d": 10 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 10, + "d": 16 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 15, + "d": 15 + }, + "mne/utils.py": { + "a": 6, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 11, + "d": 9 + }, + "mne/viz/misc.py": { + "a": 4, + "d": 1 + }, + "mne/viz/tests/test_decoding.py": { + "a": 5, + "d": 3 + }, + "mne/viz/tests/test_epochs.py": { + "a": 13, + "d": 9 + }, + "mne/viz/tests/test_evoked.py": { + "a": 13, + "d": 8 + }, + "mne/viz/tests/test_ica.py": { + "a": 17, + "d": 18 + }, + "mne/viz/tests/test_misc.py": { + "a": 11, + "d": 14 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 16, + "d": 13 + }, + "mne/viz/tests/test_topomap.py": { + "a": 6, + "d": 7 + }, + "mne/viz/tests/test_utils.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 12 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_maxwell_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_compute_covariance.py": { + "a": 4, + "d": 3 + }, + "tutorials/plot_eeg_erp.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 4, + "d": 3 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_epochs.py": { + "a": 4, + "d": 3 + }, + "tutorials/plot_object_raw.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_visualize_raw.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3546.json b/doc/sphinxext/prs/3546.json new file mode 100644 index 00000000000..6cd50721650 --- /dev/null +++ b/doc/sphinxext/prs/3546.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a28f47bf61df9cb7908a511db88de575c99da8c9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 19, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3549.json b/doc/sphinxext/prs/3549.json new file mode 100644 index 00000000000..7d3143b24d4 --- /dev/null +++ b/doc/sphinxext/prs/3549.json @@ -0,0 +1,911 @@ +{ + "merge_commit_sha": "24ca938c616b3ef5b4f57a9ffbcf9f35a9dfdf56", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 1 + }, + "Makefile": { + "a": 6, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/annotations.py": { + "a": 4, + "d": 4 + }, + "mne/baseline.py": { + "a": 5, + "d": 6 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 4 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 4 + }, + "mne/beamformer/_rap_music.py": { + "a": 5, + "d": 12 + }, + "mne/bem.py": { + "a": 48, + "d": 48 + }, + "mne/channels/__init__.py": { + "a": 3, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 24, + "d": 26 + }, + "mne/channels/data/neighbors/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 5 + }, + "mne/channels/layout.py": { + "a": 18, + "d": 15 + }, + "mne/channels/montage.py": { + "a": 13, + "d": 11 + }, + "mne/channels/tests/test_montage.py": { + "a": 5, + "d": 5 + }, + "mne/chpi.py": { + "a": 13, + "d": 13 + }, + "mne/commands/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 7, + "d": 2 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_flash_bem.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 6, + "d": 2 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_maxfilter.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_report.py": { + "a": 3, + "d": 1 + }, + "mne/commands/mne_show_fiff.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_show_info.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_surf2bem.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 4, + "d": 3 + }, + "mne/commands/utils.py": { + "a": 2, + "d": 4 + }, + "mne/connectivity/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/connectivity/effective.py": { + "a": 1, + "d": 2 + }, + "mne/connectivity/spectral.py": { + "a": 46, + "d": 37 + }, + "mne/connectivity/utils.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 29, + "d": 25 + }, + "mne/cov.py": { + "a": 13, + "d": 13 + }, + "mne/cuda.py": { + "a": 10, + "d": 11 + }, + "mne/data/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/_fake/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/_fake/_fake.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 3, + "d": 4 + }, + "mne/datasets/eegbci/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/megsim/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/megsim/megsim.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/megsim/urls.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/misc/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/misc/_misc.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/multimodal/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/sample/sample.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/somato/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/somato/somato.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/spm_face/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/testing/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/testing/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 10, + "d": 11 + }, + "mne/decoding/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 25, + "d": 25 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/ems.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/mixin.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/search_light.py": { + "a": 36, + "d": 33 + }, + "mne/decoding/time_frequency.py": { + "a": 4, + "d": 5 + }, + "mne/decoding/time_gen.py": { + "a": 40, + "d": 39 + }, + "mne/decoding/transformer.py": { + "a": 27, + "d": 23 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 33, + "d": 29 + }, + "mne/epochs.py": { + "a": 78, + "d": 69 + }, + "mne/event.py": { + "a": 39, + "d": 40 + }, + "mne/evoked.py": { + "a": 34, + "d": 33 + }, + "mne/filter.py": { + "a": 21, + "d": 21 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/forward/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 3, + "d": 6 + }, + "mne/forward/_field_interpolation.py": { + "a": 7, + "d": 5 + }, + "mne/forward/_lead_dots.py": { + "a": 9, + "d": 9 + }, + "mne/forward/_make_forward.py": { + "a": 13, + "d": 15 + }, + "mne/forward/forward.py": { + "a": 29, + "d": 38 + }, + "mne/gui/__init__.py": { + "a": 4, + "d": 4 + }, + "mne/gui/_backend.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 31, + "d": 23 + }, + "mne/gui/_fiducials_gui.py": { + "a": 12, + "d": 8 + }, + "mne/gui/_file_traits.py": { + "a": 21, + "d": 15 + }, + "mne/gui/_help.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 19, + "d": 18 + }, + "mne/gui/_marker_gui.py": { + "a": 18, + "d": 11 + }, + "mne/gui/_viewer.py": { + "a": 14, + "d": 11 + }, + "mne/inverse_sparse/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 9, + "d": 13 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 27, + "d": 25 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 4, + "d": 3 + }, + "mne/io/base.py": { + "a": 58, + "d": 54 + }, + "mne/io/brainvision/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 13, + "d": 10 + }, + "mne/io/bti/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 34, + "d": 41 + }, + "mne/io/bti/read.py": { + "a": 18, + "d": 18 + }, + "mne/io/cnt/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 5, + "d": 5 + }, + "mne/io/compensator.py": { + "a": 4, + "d": 7 + }, + "mne/io/constants.py": { + "a": 5, + "d": 6 + }, + "mne/io/ctf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 7, + "d": 7 + }, + "mne/io/ctf/eeg.py": { + "a": 3, + "d": 5 + }, + "mne/io/ctf/hc.py": { + "a": 3, + "d": 4 + }, + "mne/io/ctf/info.py": { + "a": 13, + "d": 14 + }, + "mne/io/ctf/res4.py": { + "a": 12, + "d": 13 + }, + "mne/io/ctf/trans.py": { + "a": 5, + "d": 5 + }, + "mne/io/ctf_comp.py": { + "a": 4, + "d": 4 + }, + "mne/io/diff.py": { + "a": 3, + "d": 2 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 10, + "d": 14 + }, + "mne/io/eeglab/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 13, + "d": 12 + }, + "mne/io/egi/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 9, + "d": 9 + }, + "mne/io/fiff/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 9, + "d": 8 + }, + "mne/io/kit/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 18, + "d": 16 + }, + "mne/io/matrix.py": { + "a": 3, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 20, + "d": 21 + }, + "mne/io/nicolet/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 4, + "d": 3 + }, + "mne/io/open.py": { + "a": 5, + "d": 5 + }, + "mne/io/pick.py": { + "a": 20, + "d": 20 + }, + "mne/io/proc_history.py": { + "a": 6, + "d": 6 + }, + "mne/io/proj.py": { + "a": 22, + "d": 20 + }, + "mne/io/tag.py": { + "a": 26, + "d": 28 + }, + "mne/io/tree.py": { + "a": 3, + "d": 5 + }, + "mne/io/utils.py": { + "a": 7, + "d": 8 + }, + "mne/io/write.py": { + "a": 27, + "d": 26 + }, + "mne/label.py": { + "a": 39, + "d": 43 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 25, + "d": 28 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 5, + "d": 5 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 12, + "d": 13 + }, + "mne/misc.py": { + "a": 3, + "d": 5 + }, + "mne/parallel.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/bads.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ctps_.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/eog.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 49, + "d": 55 + }, + "mne/preprocessing/maxfilter.py": { + "a": 3, + "d": 28 + }, + "mne/preprocessing/maxwell.py": { + "a": 39, + "d": 40 + }, + "mne/preprocessing/peak_finder.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/stim.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 3 + }, + "mne/proj.py": { + "a": 5, + "d": 5 + }, + "mne/realtime/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/client.py": { + "a": 21, + "d": 21 + }, + "mne/realtime/epochs.py": { + "a": 10, + "d": 10 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 12, + "d": 16 + }, + "mne/realtime/mockclient.py": { + "a": 8, + "d": 8 + }, + "mne/realtime/stim_server_client.py": { + "a": 15, + "d": 20 + }, + "mne/report.py": { + "a": 43, + "d": 88 + }, + "mne/selection.py": { + "a": 2, + "d": 3 + }, + "mne/simulation/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/simulation/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/simulation/metrics.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/raw.py": { + "a": 5, + "d": 5 + }, + "mne/simulation/source.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 93, + "d": 106 + }, + "mne/source_space.py": { + "a": 43, + "d": 47 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 35, + "d": 32 + }, + "mne/stats/multi_comp.py": { + "a": 3, + "d": 4 + }, + "mne/stats/parametric.py": { + "a": 8, + "d": 9 + }, + "mne/stats/permutations.py": { + "a": 3, + "d": 5 + }, + "mne/stats/regression.py": { + "a": 10, + "d": 14 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 31, + "d": 32 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/_stockwell.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/ar.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 11, + "d": 10 + }, + "mne/time_frequency/multitaper.py": { + "a": 14, + "d": 17 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/stft.py": { + "a": 4, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 45, + "d": 38 + }, + "mne/transforms.py": { + "a": 36, + "d": 32 + }, + "mne/utils.py": { + "a": 114, + "d": 115 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 11 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/viz/circle.py": { + "a": 5, + "d": 4 + }, + "mne/viz/decoding.py": { + "a": 5, + "d": 7 + }, + "mne/viz/epochs.py": { + "a": 29, + "d": 30 + }, + "mne/viz/evoked.py": { + "a": 29, + "d": 32 + }, + "mne/viz/ica.py": { + "a": 17, + "d": 17 + }, + "mne/viz/misc.py": { + "a": 8, + "d": 9 + }, + "mne/viz/montage.py": { + "a": 3, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 13, + "d": 14 + }, + "mne/viz/tests/test_montage.py": { + "a": 5, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 15, + "d": 17 + }, + "mne/viz/topomap.py": { + "a": 25, + "d": 28 + }, + "mne/viz/utils.py": { + "a": 32, + "d": 33 + }, + "setup.cfg": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3551.json b/doc/sphinxext/prs/3551.json new file mode 100644 index 00000000000..dbae1c1fadc --- /dev/null +++ b/doc/sphinxext/prs/3551.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8aa4ad7c4eac0e0a87bc09f71a9be0aa59e6adaa", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 24, + "d": 30 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3553.json b/doc/sphinxext/prs/3553.json new file mode 100644 index 00000000000..1bc0c838c2c --- /dev/null +++ b/doc/sphinxext/prs/3553.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8ea72065261f5cc03445e66980f782e9137b85a1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 0 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3557.json b/doc/sphinxext/prs/3557.json new file mode 100644 index 00000000000..f2fe1b0eb35 --- /dev/null +++ b/doc/sphinxext/prs/3557.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c4174f7f8ac085d6b14b4d44c75075256dae7f57", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 13, + "d": 8 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3559.json b/doc/sphinxext/prs/3559.json new file mode 100644 index 00000000000..5c83846492b --- /dev/null +++ b/doc/sphinxext/prs/3559.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7845d39a3cd642bdab6a162174697069d1c9056e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/356.json b/doc/sphinxext/prs/356.json new file mode 100644 index 00000000000..c6dbe01fe0c --- /dev/null +++ b/doc/sphinxext/prs/356.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0ae991fe24bd08e785d380f1b17c5b0513a28b57", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fiff/open.py": { + "a": 5, + "d": 2 + }, + "mne/fiff/tag.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3560.json b/doc/sphinxext/prs/3560.json new file mode 100644 index 00000000000..41151214e82 --- /dev/null +++ b/doc/sphinxext/prs/3560.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "29645cd04cf795c077d7d5dd60737c87208155ee", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/event.py": { + "a": 3, + "d": 3 + }, + "mne/io/constants.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3561.json b/doc/sphinxext/prs/3561.json new file mode 100644 index 00000000000..86fa3707ac8 --- /dev/null +++ b/doc/sphinxext/prs/3561.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3df939c6eae7e906f298db6715669a8084865d49", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3564.json b/doc/sphinxext/prs/3564.json new file mode 100644 index 00000000000..8ab4bb7844e --- /dev/null +++ b/doc/sphinxext/prs/3564.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "90d1b1f2fc6b772b825f76a047b34ec511b47f55", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cuda.py": { + "a": 5, + "d": 4 + }, + "mne/utils.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3568.json b/doc/sphinxext/prs/3568.json new file mode 100644 index 00000000000..25ea691bace --- /dev/null +++ b/doc/sphinxext/prs/3568.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7cd5d0e9cbf2b0472c8f0ba7b14ca97e6f3c874b", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/annotations.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/357.json b/doc/sphinxext/prs/357.json new file mode 100644 index 00000000000..c50df690191 --- /dev/null +++ b/doc/sphinxext/prs/357.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "876a3e9cb795f3eaa49df29fc1bdd94eb29ddcf9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/evoked.py": { + "a": 46, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3574.json b/doc/sphinxext/prs/3574.json new file mode 100644 index 00000000000..d09c15847b7 --- /dev/null +++ b/doc/sphinxext/prs/3574.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "558a3a0e4e5bf6265e9c6c4904a8404ed635bb5f", + "authors": [ + { + "n": "Andreas Højlund", + "e": "linahn@clin.au.dk" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 6, + "d": 7 + }, + "mne/viz/tests/test_ica.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3575.json b/doc/sphinxext/prs/3575.json new file mode 100644 index 00000000000..c975e80e66e --- /dev/null +++ b/doc/sphinxext/prs/3575.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b906cf79f045729615deaeb921345536efd1dd55", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/epochs.py": { + "a": 27, + "d": 23 + }, + "mne/evoked.py": { + "a": 14, + "d": 10 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3576.json b/doc/sphinxext/prs/3576.json new file mode 100644 index 00000000000..78fb872d6b7 --- /dev/null +++ b/doc/sphinxext/prs/3576.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "fc56f61f315f8f5ec4467ddb89573e15fb064cd8", + "authors": [ + { + "n": "Phillip Alday", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 60, + "d": 5 + }, + "mne/io/brainvision/tests/data/test_highpass_hz.vhdr": { + "a": 103, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_lowpass_s.vhdr": { + "a": 103, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_mixed_highpass.vhdr": { + "a": 103, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_mixed_highpass_hz.vhdr": { + "a": 103, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_mixed_lowpass.vhdr": { + "a": 103, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_mixed_lowpass_s.vhdr": { + "a": 103, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_partially_disabled_hw_filter.vhdr": { + "a": 103, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 158, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/358.json b/doc/sphinxext/prs/358.json new file mode 100644 index 00000000000..a16a27e165f --- /dev/null +++ b/doc/sphinxext/prs/358.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "10c8ff7c5feb341c363b2acd8d1f261a15d70cd9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxfilter.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3580.json b/doc/sphinxext/prs/3580.json new file mode 100644 index 00000000000..a15c39fdd5e --- /dev/null +++ b/doc/sphinxext/prs/3580.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f350bd44754030f3e02afd63ad94197b4bd77299", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3581.json b/doc/sphinxext/prs/3581.json new file mode 100644 index 00000000000..b7e505dc164 --- /dev/null +++ b/doc/sphinxext/prs/3581.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fa0cdb03f72ea29ceaa8f5ecec058f3d5f2fdfa4", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3586.json b/doc/sphinxext/prs/3586.json new file mode 100644 index 00000000000..624d92f2101 --- /dev/null +++ b/doc/sphinxext/prs/3586.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9e5cd907229a86efdd35bb9757eef8ebe9ec639a", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 45, + "d": 13 + }, + "mne/decoding/tests/test_csp.py": { + "a": 29, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3587.json b/doc/sphinxext/prs/3587.json new file mode 100644 index 00000000000..37250ae3ac7 --- /dev/null +++ b/doc/sphinxext/prs/3587.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8ade1fffea0bd7d252fae5d4d2f60207e95c6f69", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_time_gen.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 17, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3589.json b/doc/sphinxext/prs/3589.json new file mode 100644 index 00000000000..6a2dfb76ce2 --- /dev/null +++ b/doc/sphinxext/prs/3589.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "69345bf04f2dbdf9d4198c11251db9652016f7b0", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 4, + "d": 3 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3593.json b/doc/sphinxext/prs/3593.json new file mode 100644 index 00000000000..b7d6e48287c --- /dev/null +++ b/doc/sphinxext/prs/3593.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f0fa05578b96d2cea83ecd811e1c71b03ec5267d", + "authors": [ + { + "n": "Phillip Alday", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 37, + "d": 22 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3594.json b/doc/sphinxext/prs/3594.json new file mode 100644 index 00000000000..56d01f4ef93 --- /dev/null +++ b/doc/sphinxext/prs/3594.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d738cb996c3fa6edacf568e3a35c8f08d42131b3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 42, + "d": 15 + }, + "mne/io/base.py": { + "a": 18, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 16 + }, + "mne/tests/test_evoked.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3595.json b/doc/sphinxext/prs/3595.json new file mode 100644 index 00000000000..13053e66b1e --- /dev/null +++ b/doc/sphinxext/prs/3595.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "94a4dc230e78283f741d7c9925e1c2fccd99bfc6", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/index.rst": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3596.json b/doc/sphinxext/prs/3596.json new file mode 100644 index 00000000000..07a86fdf34c --- /dev/null +++ b/doc/sphinxext/prs/3596.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e8f11200f18c789b4eddcada7094a5bf18218021", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 6, + "d": 5 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 31, + "d": 41 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/36.json b/doc/sphinxext/prs/36.json new file mode 100644 index 00000000000..e96acadbeff --- /dev/null +++ b/doc/sphinxext/prs/36.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fe27317f8b701fade51771ca5948888f3058bed2", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 61, + "d": 8 + }, + "mne/epochs.py": { + "a": 4, + "d": 12 + }, + "mne/fiff/evoked.py": { + "a": 2, + "d": 5 + }, + "mne/fiff/proj.py": { + "a": 52, + "d": 4 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3600.json b/doc/sphinxext/prs/3600.json new file mode 100644 index 00000000000..3e6dfc3b331 --- /dev/null +++ b/doc/sphinxext/prs/3600.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "62b3bbd4a8cb717fdd544518f907931913167298", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 26, + "d": 8 + }, + "mne/channels/tests/test_channels.py": { + "a": 6, + "d": 1 + }, + "mne/defaults.py": { + "a": 15, + "d": 13 + }, + "mne/epochs.py": { + "a": 5, + "d": 4 + }, + "mne/evoked.py": { + "a": 10, + "d": 8 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 1 + }, + "mne/io/constants.py": { + "a": 29, + "d": 24 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 5, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 1 + }, + "mne/io/pick.py": { + "a": 33, + "d": 8 + }, + "mne/io/tests/test_pick.py": { + "a": 13, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 7 + }, + "mne/viz/epochs.py": { + "a": 10, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 6, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3602.json b/doc/sphinxext/prs/3602.json new file mode 100644 index 00000000000..ffca8e08404 --- /dev/null +++ b/doc/sphinxext/prs/3602.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e0938066a381720f54656ad6eadb5b754954c49", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3603.json b/doc/sphinxext/prs/3603.json new file mode 100644 index 00000000000..2da833d8cc1 --- /dev/null +++ b/doc/sphinxext/prs/3603.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1d6bade5f7b881b9e0c1acd128f65ce9add5fee0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 43, + "d": 32 + }, + "mne/tests/test_epochs.py": { + "a": 28, + "d": 2 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3606.json b/doc/sphinxext/prs/3606.json new file mode 100644 index 00000000000..8b8fb599c07 --- /dev/null +++ b/doc/sphinxext/prs/3606.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ee585e818380436f6d6765dbeaad040dfa35181a", + "authors": [ + { + "n": "Johannes Niediek", + "e": null + } + ], + "changes": { + "mne/time_frequency/_stockwell.py": { + "a": 7, + "d": 3 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 28, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3610.json b/doc/sphinxext/prs/3610.json new file mode 100644 index 00000000000..f01affff8a2 --- /dev/null +++ b/doc/sphinxext/prs/3610.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e9dd187de6d4ef7262f3c6a14d7c090ddbdb86ba", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/evoked.py": { + "a": 10, + "d": 8 + }, + "mne/io/fiff/raw.py": { + "a": 11, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3612.json b/doc/sphinxext/prs/3612.json new file mode 100644 index 00000000000..f0f2cb81694 --- /dev/null +++ b/doc/sphinxext/prs/3612.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8394c866b516ea7f1cb5885681381805bfe5bf69", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 0, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3613.json b/doc/sphinxext/prs/3613.json new file mode 100644 index 00000000000..b936e95eb99 --- /dev/null +++ b/doc/sphinxext/prs/3613.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "c44781dc96fba5aeb82437ae808eeea55d0ce428", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 18, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 4, + "d": 4 + }, + "examples/visualization/plot_warp_surfaces.py": { + "a": 83, + "d": 0 + }, + "mne/bem.py": { + "a": 65, + "d": 31 + }, + "mne/channels/layout.py": { + "a": 3, + "d": 6 + }, + "mne/channels/montage.py": { + "a": 18, + "d": 39 + }, + "mne/channels/tests/test_montage.py": { + "a": 13, + "d": 1 + }, + "mne/data/fsaverage/fsaverage-fiducials.fif": { + "a": 0, + "d": 0 + }, + "mne/data/fsaverage/fsaverage-head.fif": { + "a": 0, + "d": 0 + }, + "mne/data/fsaverage/fsaverage-inner_skull-bem.fif": { + "a": 0, + "d": 0 + }, + "mne/data/fsaverage/fsaverage-trans.fif": { + "a": 0, + "d": 0 + }, + "mne/filter.py": { + "a": 8, + "d": 8 + }, + "mne/fixes.py": { + "a": 64, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 20, + "d": 203 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 5, + "d": 28 + }, + "mne/selection.py": { + "a": 6, + "d": 4 + }, + "mne/source_space.py": { + "a": 3, + "d": 3 + }, + "mne/surface.py": { + "a": 44, + "d": 23 + }, + "mne/tests/common.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_fixes.py": { + "a": 17, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 120, + "d": 39 + }, + "mne/transforms.py": { + "a": 428, + "d": 27 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 3 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3614.json b/doc/sphinxext/prs/3614.json new file mode 100644 index 00000000000..02a8c286ff0 --- /dev/null +++ b/doc/sphinxext/prs/3614.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b5a6bfc57199f7ecedf9ac11fbeb7674b7b6c46a", + "authors": [ + { + "n": "jmontoyam", + "e": "montoya.jair.m@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/infomax_.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3616.json b/doc/sphinxext/prs/3616.json new file mode 100644 index 00000000000..8cd90e3ccc0 --- /dev/null +++ b/doc/sphinxext/prs/3616.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b7771efabf3f2e22abd9963347de5e4a1b540bd7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 2 + }, + "mne/io/base.py": { + "a": 4, + "d": 7 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3617.json b/doc/sphinxext/prs/3617.json new file mode 100644 index 00000000000..551a0d1d893 --- /dev/null +++ b/doc/sphinxext/prs/3617.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "a2b655f4dce5b848137932cfed504297c3b167cc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 0, + "d": 3 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 17, + "d": 40 + }, + "examples/decoding/plot_decoding_xdawn_transformer.py": { + "a": 0, + "d": 76 + }, + "mne/decoding/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 10, + "d": 10 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 21, + "d": 21 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 11, + "d": 11 + }, + "mne/preprocessing/xdawn.py": { + "a": 4, + "d": 5 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3620.json b/doc/sphinxext/prs/3620.json new file mode 100644 index 00000000000..6784302b810 --- /dev/null +++ b/doc/sphinxext/prs/3620.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3b09ed062226bbe017243071f450e0254fc3a769", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/externals/FieldTrip.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3622.json b/doc/sphinxext/prs/3622.json new file mode 100644 index 00000000000..6ee49fa66af --- /dev/null +++ b/doc/sphinxext/prs/3622.json @@ -0,0 +1,559 @@ +{ + "merge_commit_sha": "0ad50cbb649122c5638ab5cdd6cf7fe9b0a6b702", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 12, + "d": 6 + }, + "doc/manual/io.rst": { + "a": 0, + "d": 8 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 2, + "d": 2 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_unsupervised_spatial_filter.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 7, + "d": 6 + }, + "examples/stats/plot_linear_regression_raw.py": { + "a": 4, + "d": 2 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 6, + "d": 9 + }, + "mne/channels/tests/test_channels.py": { + "a": 4, + "d": 4 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 4 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 13 + }, + "mne/decoding/tests/test_csp.py": { + "a": 6, + "d": 9 + }, + "mne/decoding/tests/test_ems.py": { + "a": 9, + "d": 7 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 5, + "d": 6 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 12, + "d": 53 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 103 + }, + "mne/epochs.py": { + "a": 21, + "d": 42 + }, + "mne/event.py": { + "a": 8, + "d": 7 + }, + "mne/evoked.py": { + "a": 3, + "d": 57 + }, + "mne/filter.py": { + "a": 87, + "d": 119 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 26 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 2, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 3 + }, + "mne/io/base.py": { + "a": 65, + "d": 77 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 3, + "d": 3 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 3, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 8, + "d": 65 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 104, + "d": 160 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 3, + "d": 5 + }, + "mne/io/proj.py": { + "a": 1, + "d": 16 + }, + "mne/io/tests/test_compensator.py": { + "a": 5, + "d": 6 + }, + "mne/io/tests/test_meas_info.py": { + "a": 5, + "d": 5 + }, + "mne/io/tests/test_pick.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_reference.py": { + "a": 18, + "d": 22 + }, + "mne/label.py": { + "a": 11, + "d": 21 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 4, + "d": 5 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/stim.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 7, + "d": 10 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 15, + "d": 24 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 6, + "d": 11 + }, + "mne/proj.py": { + "a": 3, + "d": 4 + }, + "mne/realtime/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 10, + "d": 14 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 8 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 3 + }, + "mne/stats/tests/test_regression.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 11, + "d": 17 + }, + "mne/tests/test_cov.py": { + "a": 17, + "d": 24 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 184, + "d": 274 + }, + "mne/tests/test_event.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_evoked.py": { + "a": 3, + "d": 5 + }, + "mne/tests/test_filter.py": { + "a": 41, + "d": 56 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_proj.py": { + "a": 10, + "d": 11 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_selection.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_utils.py": { + "a": 34, + "d": 46 + }, + "mne/time_frequency/__init__.py": { + "a": 3, + "d": 5 + }, + "mne/time_frequency/csd.py": { + "a": 0, + "d": 14 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 9, + "d": 9 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 7, + "d": 5 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 5, + "d": 3 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 24, + "d": 47 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 162 + }, + "mne/utils.py": { + "a": 1, + "d": 21 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 20 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_decoding.py": { + "a": 8, + "d": 9 + }, + "mne/viz/tests/test_epochs.py": { + "a": 10, + "d": 24 + }, + "mne/viz/tests/test_evoked.py": { + "a": 6, + "d": 19 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 5 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 4, + "d": 10 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 6 + }, + "mne/viz/tests/test_utils.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_maxwell_filtering.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_compute_covariance.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_eeg_erp.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_epochs.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_object_evoked.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_object_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 3, + "d": 4 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_visualize_raw.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3626.json b/doc/sphinxext/prs/3626.json new file mode 100644 index 00000000000..3e62ae0d188 --- /dev/null +++ b/doc/sphinxext/prs/3626.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2b0d9c990a3bf3af9fe96fc42565d4928e05a9c9", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 11, + "d": 1 + }, + "mne/decoding/time_gen.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3628.json b/doc/sphinxext/prs/3628.json new file mode 100644 index 00000000000..703b8bcd8e1 --- /dev/null +++ b/doc/sphinxext/prs/3628.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "a9723e8215cdbf2721497516173c82f2ed64cf42", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 3, + "d": 4 + }, + "mne/decoding/transformer.py": { + "a": 8, + "d": 51 + }, + "mne/filter.py": { + "a": 326, + "d": 186 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 6 + }, + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/eog.py": { + "a": 7, + "d": 9 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 10 + }, + "mne/tests/test_filter.py": { + "a": 62, + "d": 63 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 280, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 30, + "d": 2 + }, + "tutorials/plot_background_filtering.py": { + "a": 76, + "d": 134 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 4, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/363.json b/doc/sphinxext/prs/363.json new file mode 100644 index 00000000000..3eb983d8da7 --- /dev/null +++ b/doc/sphinxext/prs/363.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "32b04de7899d1d18b48b037c95967112b87860a5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 98, + "d": 20 + }, + "mne/source_estimate.py": { + "a": 0, + "d": 5 + }, + "mne/tests/test_label.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3630.json b/doc/sphinxext/prs/3630.json new file mode 100644 index 00000000000..c09254c9df1 --- /dev/null +++ b/doc/sphinxext/prs/3630.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e4ed71baf2b9e4175758e6d9939d7fe252b9d949", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3634.json b/doc/sphinxext/prs/3634.json new file mode 100644 index 00000000000..ffd015b7204 --- /dev/null +++ b/doc/sphinxext/prs/3634.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bcd57d65450b5f0271a221f786d1d42d27ec5f4c", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3637.json b/doc/sphinxext/prs/3637.json new file mode 100644 index 00000000000..12234669aaa --- /dev/null +++ b/doc/sphinxext/prs/3637.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4b6a1744e8be9e8ff6e1e238953f5b6667062673", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/ar.py": { + "a": 33, + "d": 115 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 29, + "d": 14 + }, + "mne/utils.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3638.json b/doc/sphinxext/prs/3638.json new file mode 100644 index 00000000000..45d63cfa0c6 --- /dev/null +++ b/doc/sphinxext/prs/3638.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "279f460a3bc27f7e1bcc75902e119b5c99358094", + "authors": [ + { + "n": "Annalisa Pascarella", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mixed_source_space_connectity.py": { + "a": 185, + "d": 0 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 142, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 42, + "d": 6 + }, + "mne/source_space.py": { + "a": 126, + "d": 21 + }, + "mne/tests/test_source_space.py": { + "a": 37, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3644.json b/doc/sphinxext/prs/3644.json new file mode 100644 index 00000000000..c4ca50c4660 --- /dev/null +++ b/doc/sphinxext/prs/3644.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b45ac02756f4ce6644ab17a16ac0c77a6f4abb80", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 6, + "d": 4 + }, + "mne/gui/_fiducials_gui.py": { + "a": 11, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3646.json b/doc/sphinxext/prs/3646.json new file mode 100644 index 00000000000..ae0b16184fb --- /dev/null +++ b/doc/sphinxext/prs/3646.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f621a69c40950ac4f241331707468ddc87d313c1", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/realtime/fieldtrip_client.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3648.json b/doc/sphinxext/prs/3648.json new file mode 100644 index 00000000000..904efc48575 --- /dev/null +++ b/doc/sphinxext/prs/3648.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "85c42d12f4a99227673846601c343e393203ba79", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 11, + "d": 23 + }, + "mne/gui/_fiducials_gui.py": { + "a": 10, + "d": 19 + }, + "mne/gui/_file_traits.py": { + "a": 6, + "d": 15 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 11, + "d": 21 + }, + "mne/gui/_marker_gui.py": { + "a": 9, + "d": 18 + }, + "mne/gui/_viewer.py": { + "a": 10, + "d": 19 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 4, + "d": 4 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 6, + "d": 6 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 1, + "d": 3 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 20, + "d": 3 + }, + "mne/utils.py": { + "a": 0, + "d": 7 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3649.json b/doc/sphinxext/prs/3649.json new file mode 100644 index 00000000000..57fadbfb578 --- /dev/null +++ b/doc/sphinxext/prs/3649.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "570eab27f3871389cc10ef529eb10c667bb07b51", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3650.json b/doc/sphinxext/prs/3650.json new file mode 100644 index 00000000000..26cb5fb09df --- /dev/null +++ b/doc/sphinxext/prs/3650.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6c215a52cb436a30f0f519b0e9c1ed504c659c68", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "circle.yml": { + "a": 4, + "d": 0 + }, + "examples/datasets/plot_megsim_data_single_trial.py": { + "a": 2, + "d": 2 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 4, + "d": 5 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 0 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3653.json b/doc/sphinxext/prs/3653.json new file mode 100644 index 00000000000..96c8671923b --- /dev/null +++ b/doc/sphinxext/prs/3653.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "55c587d043aae896c233b4a21cb413ab88b07bc8", + "authors": [ + { + "n": "Sheraz Khan", + "e": "sheraz.khan@pfizer.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3655.json b/doc/sphinxext/prs/3655.json new file mode 100644 index 00000000000..2448fdf53a9 --- /dev/null +++ b/doc/sphinxext/prs/3655.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0dd4f75194fbd65a84662e6043b437426fa4ed6c", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 2, + "d": 0 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 2, + "d": 0 + }, + "mne/utils.py": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3656.json b/doc/sphinxext/prs/3656.json new file mode 100644 index 00000000000..e70fe14b8df --- /dev/null +++ b/doc/sphinxext/prs/3656.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c22b9ae75893723a875579862bedbdf80110da83", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3659.json b/doc/sphinxext/prs/3659.json new file mode 100644 index 00000000000..0a10b018c0c --- /dev/null +++ b/doc/sphinxext/prs/3659.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "65adad73899a68213a49f9b991ff2a48fca02a9f", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3660.json b/doc/sphinxext/prs/3660.json new file mode 100644 index 00000000000..d7b55a2fa18 --- /dev/null +++ b/doc/sphinxext/prs/3660.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fcad5f6c42b6d733cef5861b7d9281109e1cd5d4", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3661.json b/doc/sphinxext/prs/3661.json new file mode 100644 index 00000000000..f1867003e37 --- /dev/null +++ b/doc/sphinxext/prs/3661.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2fa35d453670d759df3ff296c1f5c012f3f8a785", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3662.json b/doc/sphinxext/prs/3662.json new file mode 100644 index 00000000000..73084f5710c --- /dev/null +++ b/doc/sphinxext/prs/3662.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ae1b85553a183b6a15e2e26188ad12bccdccacf1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "tutorials/plot_creating_data_structures.py": { + "a": 15, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/367.json b/doc/sphinxext/prs/367.json new file mode 100644 index 00000000000..e7321291dd6 --- /dev/null +++ b/doc/sphinxext/prs/367.json @@ -0,0 +1,315 @@ +{ + "merge_commit_sha": "0f2c4dced889807eab11c7ccf3afa92d2cf58ebb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 7, + "d": 7 + }, + "doc/source/whats_new.rst": { + "a": 7, + "d": 0 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 3, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 4, + "d": 4 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 3, + "d": 4 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 3, + "d": 3 + }, + "examples/export/plot_epochs_as_data_frame.py": { + "a": 3, + "d": 2 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 2, + "d": 2 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 2, + "d": 2 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 4, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/plot_define_target_events.py": { + "a": 2, + "d": 2 + }, + "examples/plot_estimate_covariance_matrix_baseline.py": { + "a": 6, + "d": 6 + }, + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 2, + "d": 2 + }, + "examples/plot_evoked_whitening.py": { + "a": 2, + "d": 2 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 3, + "d": 4 + }, + "examples/plot_read_epochs.py": { + "a": 2, + "d": 2 + }, + "examples/plot_read_evoked.py": { + "a": 3, + "d": 2 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_ecg_artifacts_from_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_eog_artifacts_from_ica.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 2, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 6, + "d": 6 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 3, + "d": 2 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 5, + "d": 5 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 14, + "d": 13 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 4 + }, + "mne/cov.py": { + "a": 21, + "d": 24 + }, + "mne/epochs.py": { + "a": 12, + "d": 10 + }, + "mne/event.py": { + "a": 1, + "d": 2 + }, + "mne/fiff/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 50, + "d": 40 + }, + "mne/fiff/proj.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 9, + "d": 14 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 9, + "d": 10 + }, + "mne/layouts/layout.py": { + "a": 4, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 17, + "d": 18 + }, + "mne/preprocessing/ssp.py": { + "a": 26, + "d": 17 + }, + "mne/preprocessing/stim.py": { + "a": 11, + "d": 11 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 4 + }, + "mne/proj.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 6, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 17, + "d": 7 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 4, + "d": 4 + }, + "mne/viz.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3670.json b/doc/sphinxext/prs/3670.json new file mode 100644 index 00000000000..79c0670910c --- /dev/null +++ b/doc/sphinxext/prs/3670.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "334a3cdd5bedef2ad6e789078b401f683adf3a0f", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/chpi.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3675.json b/doc/sphinxext/prs/3675.json new file mode 100644 index 00000000000..f97063fbdf4 --- /dev/null +++ b/doc/sphinxext/prs/3675.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6849756618335bb52bfb950050d9d1805528806f", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 18, + "d": 1 + }, + "mne/utils.py": { + "a": 18, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3677.json b/doc/sphinxext/prs/3677.json new file mode 100644 index 00000000000..8df27d0b76a --- /dev/null +++ b/doc/sphinxext/prs/3677.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "dbf108ba9cf563fdd9def67b07447998840f8f46", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/mne_logo.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_logo_small.png": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 43, + "d": 7 + }, + "doc/_templates/layout.html": { + "a": 2, + "d": 2 + }, + "doc/cite.rst": { + "a": 0, + "d": 17 + }, + "doc/conf.py": { + "a": 84, + "d": 19 + }, + "doc/faq.rst": { + "a": 48, + "d": 0 + }, + "doc/getting_started.rst": { + "a": 91, + "d": 73 + }, + "doc/index.rst": { + "a": 119, + "d": 148 + }, + "doc/manual/appendix/c_release_notes.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/index.rst": { + "a": 37, + "d": 50 + }, + "doc/mne_cpp.rst": { + "a": 1, + "d": 3 + }, + "doc/tutorials.rst": { + "a": 0, + "d": 19 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 4, + "d": 4 + }, + "logo/generate_mne_logos.py": { + "a": 12, + "d": 10 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_stats_cluster_methods.py": { + "a": 17, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3685.json b/doc/sphinxext/prs/3685.json new file mode 100644 index 00000000000..f44de9b2f84 --- /dev/null +++ b/doc/sphinxext/prs/3685.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "65a4eb0407985c3320ed6efa058f4d97a324131c", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 3 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3687.json b/doc/sphinxext/prs/3687.json new file mode 100644 index 00000000000..19f6b267ece --- /dev/null +++ b/doc/sphinxext/prs/3687.json @@ -0,0 +1,387 @@ +{ + "merge_commit_sha": "11047bc0ade8216ac10a15dfe0766d62c356c1e1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 1, + "d": 0 + }, + "mne/baseline.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 10, + "d": 5 + }, + "mne/beamformer/_lcmv.py": { + "a": 12, + "d": 6 + }, + "mne/beamformer/_rap_music.py": { + "a": 2, + "d": 1 + }, + "mne/bem.py": { + "a": 16, + "d": 8 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 2 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/chpi.py": { + "a": 4, + "d": 2 + }, + "mne/connectivity/effective.py": { + "a": 2, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 1 + }, + "mne/cov.py": { + "a": 15, + "d": 8 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/megsim/megsim.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/time_frequency.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 8, + "d": 5 + }, + "mne/dipole.py": { + "a": 12, + "d": 5 + }, + "mne/epochs.py": { + "a": 30, + "d": 20 + }, + "mne/event.py": { + "a": 2, + "d": 1 + }, + "mne/evoked.py": { + "a": 9, + "d": 5 + }, + "mne/filter.py": { + "a": 21, + "d": 12 + }, + "mne/forward/_compute_forward.py": { + "a": 4, + "d": 2 + }, + "mne/forward/_field_interpolation.py": { + "a": 4, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 10, + "d": 8 + }, + "mne/forward/forward.py": { + "a": 14, + "d": 7 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 4, + "d": 2 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 4, + "d": 2 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 8, + "d": 4 + }, + "mne/io/array/array.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 26, + "d": 14 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 4, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 4, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 4, + "d": 2 + }, + "mne/io/ctf_comp.py": { + "a": 2, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 4, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 8, + "d": 4 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 8, + "d": 4 + }, + "mne/io/matrix.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 4 + }, + "mne/io/nicolet/nicolet.py": { + "a": 4, + "d": 2 + }, + "mne/io/open.py": { + "a": 4, + "d": 2 + }, + "mne/io/pick.py": { + "a": 2, + "d": 1 + }, + "mne/io/proj.py": { + "a": 13, + "d": 6 + }, + "mne/label.py": { + "a": 14, + "d": 7 + }, + "mne/minimum_norm/inverse.py": { + "a": 14, + "d": 8 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 8, + "d": 4 + }, + "mne/parallel.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/eog.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 22, + "d": 13 + }, + "mne/preprocessing/infomax_.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/peak_finder.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 6, + "d": 3 + }, + "mne/proj.py": { + "a": 8, + "d": 4 + }, + "mne/realtime/client.py": { + "a": 2, + "d": 5 + }, + "mne/realtime/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 1 + }, + "mne/realtime/mockclient.py": { + "a": 2, + "d": 1 + }, + "mne/realtime/stim_server_client.py": { + "a": 17, + "d": 6 + }, + "mne/report.py": { + "a": 5, + "d": 2 + }, + "mne/selection.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 52, + "d": 26 + }, + "mne/source_space.py": { + "a": 23, + "d": 11 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 4 + }, + "mne/stats/permutations.py": { + "a": 2, + "d": 1 + }, + "mne/surface.py": { + "a": 12, + "d": 6 + }, + "mne/time_frequency/_stockwell.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 2 + }, + "mne/time_frequency/stft.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 7 + }, + "mne/transforms.py": { + "a": 6, + "d": 2 + }, + "mne/utils.py": { + "a": 26, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_configuration.py": { + "a": 87, + "d": 0 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3689.json b/doc/sphinxext/prs/3689.json new file mode 100644 index 00000000000..3729619526d --- /dev/null +++ b/doc/sphinxext/prs/3689.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "faf973d77a2500af139d942ad3d21c934be912c9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3690.json b/doc/sphinxext/prs/3690.json new file mode 100644 index 00000000000..04232c6b8a8 --- /dev/null +++ b/doc/sphinxext/prs/3690.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8ede39da8e036ca707b890be14d9ad2f31255978", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 16, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3694.json b/doc/sphinxext/prs/3694.json new file mode 100644 index 00000000000..834f5ac5192 --- /dev/null +++ b/doc/sphinxext/prs/3694.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a184b6821f40c9e8f660a7809d697906dfa345eb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 9, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3698.json b/doc/sphinxext/prs/3698.json new file mode 100644 index 00000000000..066635221ee --- /dev/null +++ b/doc/sphinxext/prs/3698.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "93d800cdd19ebb43178924c550583215563cfc10", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 4, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 8, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_parcellation.py": { + "a": 40, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 114, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/37.json b/doc/sphinxext/prs/37.json new file mode 100644 index 00000000000..a67fe6a5e06 --- /dev/null +++ b/doc/sphinxext/prs/37.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5f776f2696d394ed3e193502b2cf99d9c081865c", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/forward.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/370.json b/doc/sphinxext/prs/370.json new file mode 100644 index 00000000000..3b65182f199 --- /dev/null +++ b/doc/sphinxext/prs/370.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6536216b72f90ebbc1fd35acb36fbd89b4f70e50", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 217, + "d": 34 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 14, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3704.json b/doc/sphinxext/prs/3704.json new file mode 100644 index 00000000000..cbc1d193569 --- /dev/null +++ b/doc/sphinxext/prs/3704.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9aa6c94b0871bb6b8f8f0b354eed61199bff6ca8", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3705.json b/doc/sphinxext/prs/3705.json new file mode 100644 index 00000000000..94f656588d2 --- /dev/null +++ b/doc/sphinxext/prs/3705.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "930106e9e94fed915ca42b6c5f9fe9774eb30510", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 26, + "d": 16 + }, + "tutorials/plot_point_spread.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3706.json b/doc/sphinxext/prs/3706.json new file mode 100644 index 00000000000..1fc8d3919d6 --- /dev/null +++ b/doc/sphinxext/prs/3706.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6440eae0f542f33e539cd992375a809c1a02e3e4", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 5 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 18, + "d": 27 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3707.json b/doc/sphinxext/prs/3707.json new file mode 100644 index 00000000000..352197effaf --- /dev/null +++ b/doc/sphinxext/prs/3707.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "7c51e252cc90602c91be6df0763baab62083d159", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/visualization/plot_eeg_on_scalp.py": { + "a": 25, + "d": 0 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 15, + "d": 23 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 5, + "d": 8 + }, + "mne/io/ctf/trans.py": { + "a": 5, + "d": 5 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 37, + "d": 12 + }, + "mne/transforms.py": { + "a": 6, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 93, + "d": 57 + }, + "mne/viz/tests/test_3d.py": { + "a": 20, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/371.json b/doc/sphinxext/prs/371.json new file mode 100644 index 00000000000..ce614d2757d --- /dev/null +++ b/doc/sphinxext/prs/371.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "69602c9ed026b5be7b1c595d323849a2e8be1f4a", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 13, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3710.json b/doc/sphinxext/prs/3710.json new file mode 100644 index 00000000000..63706663ef7 --- /dev/null +++ b/doc/sphinxext/prs/3710.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "259b3ded70038abc4e806948efe1c8dea5faeeee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 19 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3712.json b/doc/sphinxext/prs/3712.json new file mode 100644 index 00000000000..e8b4c166aa3 --- /dev/null +++ b/doc/sphinxext/prs/3712.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7fea04ebb53d65e09c5877fbc0f227cf86b4cb92", + "authors": [ + { + "n": "Asish Panda", + "e": "asishrocks95@gmail.com" + } + ], + "changes": { + "doc/manual/decoding.rst": { + "a": 0, + "d": 6 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3713.json b/doc/sphinxext/prs/3713.json new file mode 100644 index 00000000000..72faa5ee829 --- /dev/null +++ b/doc/sphinxext/prs/3713.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b0fa44ec2fd88e2bc17ad0827fc7c42a90b8e092", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 10, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 29, + "d": 14 + }, + "mne/viz/tests/test_raw.py": { + "a": 6, + "d": 4 + }, + "tutorials/plot_artifacts_detection.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_visualize_raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3714.json b/doc/sphinxext/prs/3714.json new file mode 100644 index 00000000000..9b8cec37159 --- /dev/null +++ b/doc/sphinxext/prs/3714.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2d104379b76ff7e37b94718aa837c83ddc358eaf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "doc/index.rst": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3715.json b/doc/sphinxext/prs/3715.json new file mode 100644 index 00000000000..555aa230286 --- /dev/null +++ b/doc/sphinxext/prs/3715.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "089cf3500ae98b3c5697e43ec06de421308146a0", + "authors": [ + { + "n": "Leonardo S. Barbosa", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 63, + "d": 48 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3716.json b/doc/sphinxext/prs/3716.json new file mode 100644 index 00000000000..b0cab5028bb --- /dev/null +++ b/doc/sphinxext/prs/3716.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "31883dfcef70860aa1f21238031773ebfe740317", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3718.json b/doc/sphinxext/prs/3718.json new file mode 100644 index 00000000000..83fcd3d7a07 --- /dev/null +++ b/doc/sphinxext/prs/3718.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f7ffe8aa068c1c743f6968ec09705270bef60c08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 3, + "d": 3 + }, + "doc/index.rst": { + "a": 7, + "d": 21 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/cited_mne.py": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/372.json b/doc/sphinxext/prs/372.json new file mode 100644 index 00000000000..e4cd27b826a --- /dev/null +++ b/doc/sphinxext/prs/372.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "335794d50b3c176ac922b7c14c7b04ee8021338d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3721.json b/doc/sphinxext/prs/3721.json new file mode 100644 index 00000000000..78adf2e1733 --- /dev/null +++ b/doc/sphinxext/prs/3721.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "99cf565723612b35af67a23a38fbbfd7ace82c7f", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 27, + "d": 32 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3724.json b/doc/sphinxext/prs/3724.json new file mode 100644 index 00000000000..06231bd386e --- /dev/null +++ b/doc/sphinxext/prs/3724.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "33d76289203fd5bd6a5083a36e5aa6fb84ceae9e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 22, + "d": 25 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 21, + "d": 40 + }, + "mne/commands/mne_surf2bem.py": { + "a": 2, + "d": 4 + }, + "mne/commands/tests/test_commands.py": { + "a": 16, + "d": 10 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 79, + "d": 1 + }, + "mne/gui/_file_traits.py": { + "a": 2, + "d": 1 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 5, + "d": 6 + }, + "mne/surface.py": { + "a": 19, + "d": 89 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3727.json b/doc/sphinxext/prs/3727.json new file mode 100644 index 00000000000..26327bcabcd --- /dev/null +++ b/doc/sphinxext/prs/3727.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9df58a59140c8a723373120a45756554e1d2b726", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 12, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3728.json b/doc/sphinxext/prs/3728.json new file mode 100644 index 00000000000..e287b688b3c --- /dev/null +++ b/doc/sphinxext/prs/3728.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "be63cd40049a3d5526f9316c60b721cedf02bd74", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "circle.yml": { + "a": 4, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 18, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 2, + "d": 1 + }, + "examples/decoding/plot_receptive_field.py": { + "a": 154, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/mtrf/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 30, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 22, + "d": 11 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 362, + "d": 0 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 150, + "d": 0 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "tutorials/plot_receptive_field.py": { + "a": 261, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3729.json b/doc/sphinxext/prs/3729.json new file mode 100644 index 00000000000..6208810e1c8 --- /dev/null +++ b/doc/sphinxext/prs/3729.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "6c43c09312a86c4010385c12c055dfd06ae2efce", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 1 + }, + "mne/annotations.py": { + "a": 1, + "d": 2 + }, + "mne/io/base.py": { + "a": 7, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 12, + "d": 5 + }, + "mne/viz/raw.py": { + "a": 24, + "d": 12 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3730.json b/doc/sphinxext/prs/3730.json new file mode 100644 index 00000000000..818a18fda87 --- /dev/null +++ b/doc/sphinxext/prs/3730.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "7562e23c3ee59681802e4768c0ed08676c45d426", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 3, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 2 + }, + "mne/transforms.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 66, + "d": 22 + }, + "mne/viz/tests/test_3d.py": { + "a": 6, + "d": 3 + }, + "tutorials/plot_ecog.py": { + "a": 47, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3732.json b/doc/sphinxext/prs/3732.json new file mode 100644 index 00000000000..b7e2c233b19 --- /dev/null +++ b/doc/sphinxext/prs/3732.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "96e704761e580d9d20d07b8aa816175519ed9423", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3734.json b/doc/sphinxext/prs/3734.json new file mode 100644 index 00000000000..32494a86812 --- /dev/null +++ b/doc/sphinxext/prs/3734.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2331044def4a8dfc2b3f65a2211defc5b726389d", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3737.json b/doc/sphinxext/prs/3737.json new file mode 100644 index 00000000000..4c1f6896593 --- /dev/null +++ b/doc/sphinxext/prs/3737.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bc323fb0b38abc8f31aa0f31b2c4868c4d2aad96", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 7, + "d": 5 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3738.json b/doc/sphinxext/prs/3738.json new file mode 100644 index 00000000000..67052c05be0 --- /dev/null +++ b/doc/sphinxext/prs/3738.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6c2e2222bcf7e45a911ecd6fdc3e4247dae31e18", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/realtime/epochs.py": { + "a": 28, + "d": 3 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 44, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/374.json b/doc/sphinxext/prs/374.json new file mode 100644 index 00000000000..9dd6bdd4f9c --- /dev/null +++ b/doc/sphinxext/prs/374.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5c6c2775ec6ba86dea4c918e87ce7184a41d382d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3741.json b/doc/sphinxext/prs/3741.json new file mode 100644 index 00000000000..4bfc40ab2cc --- /dev/null +++ b/doc/sphinxext/prs/3741.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3addedf8bfc108fd50d975d39fff605ea3744bb9", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 128, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 22, + "d": 2 + }, + "tutorials/plot_ecog.py": { + "a": 29, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3743.json b/doc/sphinxext/prs/3743.json new file mode 100644 index 00000000000..5ccca5e3165 --- /dev/null +++ b/doc/sphinxext/prs/3743.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "09b6ada84a24c5574c749ec62ce96e383ae81a7b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 25, + "d": 12 + }, + "mne/io/tests/test_apply_function.py": { + "a": 8, + "d": 11 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3744.json b/doc/sphinxext/prs/3744.json new file mode 100644 index 00000000000..bc103910f80 --- /dev/null +++ b/doc/sphinxext/prs/3744.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "97fb61ed342fd05bacef03fb813e99812c406b1c", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 14, + "d": 4 + }, + "mne/io/base.py": { + "a": 4, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 251, + "d": 198 + }, + "mne/viz/raw.py": { + "a": 60, + "d": 11 + }, + "mne/viz/tests/test_evoked.py": { + "a": 12, + "d": 5 + }, + "mne/viz/tests/test_raw.py": { + "a": 10, + "d": 1 + }, + "tutorials/plot_visualize_raw.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3745.json b/doc/sphinxext/prs/3745.json new file mode 100644 index 00000000000..8e34029ad4d --- /dev/null +++ b/doc/sphinxext/prs/3745.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a9ab34c03a9cd06e3996379aed41413aae6619b1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 2, + "d": 10 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3747.json b/doc/sphinxext/prs/3747.json new file mode 100644 index 00000000000..98a714ece0d --- /dev/null +++ b/doc/sphinxext/prs/3747.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "527a67f5b00c7c9b4591e797c86bca05223b5398", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 7, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 34, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3748.json b/doc/sphinxext/prs/3748.json new file mode 100644 index 00000000000..fcb8acb9b20 --- /dev/null +++ b/doc/sphinxext/prs/3748.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4c7c061e1de3a6398e5f95f37281b9dfe48067a0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3749.json b/doc/sphinxext/prs/3749.json new file mode 100644 index 00000000000..81f91189c98 --- /dev/null +++ b/doc/sphinxext/prs/3749.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "07102b961768c3c844a56002459cff3f3fdbdead", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 213, + "d": 167 + }, + "mne/channels/tests/test_montage.py": { + "a": 84, + "d": 44 + }, + "mne/io/meas_info.py": { + "a": 75, + "d": 71 + }, + "mne/io/tests/test_meas_info.py": { + "a": 8, + "d": 8 + }, + "mne/io/write.py": { + "a": 19, + "d": 13 + }, + "mne/transforms.py": { + "a": 4, + "d": 3 + }, + "mne/viz/montage.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3750.json b/doc/sphinxext/prs/3750.json new file mode 100644 index 00000000000..c1b3909e38c --- /dev/null +++ b/doc/sphinxext/prs/3750.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b7f691299ec60a6ba60bb6c30c917696dc5d3e55", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3751.json b/doc/sphinxext/prs/3751.json new file mode 100644 index 00000000000..7c2fccd4d5f --- /dev/null +++ b/doc/sphinxext/prs/3751.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "599ae129dc82cbe435077c462ad8e19fee4504bf", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/io/plot_objects_from_arrays.py": { + "a": 37, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3752.json b/doc/sphinxext/prs/3752.json new file mode 100644 index 00000000000..4bcf0ef6e18 --- /dev/null +++ b/doc/sphinxext/prs/3752.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6973da4709c62f54e5e16c125239180f04cdb07b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 46, + "d": 25 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 129, + "d": 114 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3754.json b/doc/sphinxext/prs/3754.json new file mode 100644 index 00000000000..21c67bcde46 --- /dev/null +++ b/doc/sphinxext/prs/3754.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "e58701054feef461c6598055a14efcaee84bb8bc", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 4, + "d": 1 + }, + "mne/data/coil_def.dat": { + "a": 78, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/artemis123/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/artemis123/artemis123.py": { + "a": 268, + "d": 0 + }, + "mne/io/artemis123/resources/Artemis123_ChannelMap.csv": { + "a": 146, + "d": 0 + }, + "mne/io/artemis123/resources/Artemis123_mneLoc.csv": { + "a": 144, + "d": 0 + }, + "mne/io/artemis123/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 46, + "d": 0 + }, + "mne/io/artemis123/utils.py": { + "a": 84, + "d": 0 + }, + "mne/tests/test_transforms.py": { + "a": 21, + "d": 1 + }, + "mne/transforms.py": { + "a": 41, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 2 + }, + "setup.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3756.json b/doc/sphinxext/prs/3756.json new file mode 100644 index 00000000000..45284a3cc8a --- /dev/null +++ b/doc/sphinxext/prs/3756.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9dcc47684a24a6160d9b205e05e25f5f84a29f75", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/proj.py": { + "a": 4, + "d": 3 + }, + "mne/io/reference.py": { + "a": 11, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3758.json b/doc/sphinxext/prs/3758.json new file mode 100644 index 00000000000..c180b1d0a3f --- /dev/null +++ b/doc/sphinxext/prs/3758.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e83c880e7c5b2176a6dfe4a3e7c8279780176dde", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 6, + "d": 1 + }, + "mne/io/reference.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3759.json b/doc/sphinxext/prs/3759.json new file mode 100644 index 00000000000..00f00f85083 --- /dev/null +++ b/doc/sphinxext/prs/3759.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4bdbd00cf0366608c6b70c62898c48d19f6baeca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 49, + "d": 20 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 0 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/376.json b/doc/sphinxext/prs/376.json new file mode 100644 index 00000000000..99c551874f8 --- /dev/null +++ b/doc/sphinxext/prs/376.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e90384186c53a74f384536f319f86835ef1cea03", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 17, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3760.json b/doc/sphinxext/prs/3760.json new file mode 100644 index 00000000000..a4b15ab8fe5 --- /dev/null +++ b/doc/sphinxext/prs/3760.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4292364b9d77a0e15159571485bf12e382b0e4c1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 6, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3762.json b/doc/sphinxext/prs/3762.json new file mode 100644 index 00000000000..b5c27addf6b --- /dev/null +++ b/doc/sphinxext/prs/3762.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "81d238ab772a0b91e603881daa71d835b9be6a13", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 7, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 5, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 46, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3763.json b/doc/sphinxext/prs/3763.json new file mode 100644 index 00000000000..f92ac18b80b --- /dev/null +++ b/doc/sphinxext/prs/3763.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e5ce0cf1712ce080b9e8d3977dd1f0d2a019e70d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 10, + "d": 46 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3765.json b/doc/sphinxext/prs/3765.json new file mode 100644 index 00000000000..5eefbf33474 --- /dev/null +++ b/doc/sphinxext/prs/3765.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50672ae26009083fe99a3d84b74c5b9adae33afd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3766.json b/doc/sphinxext/prs/3766.json new file mode 100644 index 00000000000..87b846420cf --- /dev/null +++ b/doc/sphinxext/prs/3766.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cb8707f43e62184c4a0f8651495ff6d3b2b81b7a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/externals/tempita/__init__.py": { + "a": 3, + "d": 1302 + }, + "mne/externals/tempita/_looper.py": { + "a": 2, + "d": 2 + }, + "mne/externals/tempita/_tempita.py": { + "a": 1182, + "d": 0 + }, + "mne/externals/tempita/compat3.py": { + "a": 11, + "d": 9 + }, + "setup.cfg": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3771.json b/doc/sphinxext/prs/3771.json new file mode 100644 index 00000000000..9eeef8b0dfc --- /dev/null +++ b/doc/sphinxext/prs/3771.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0b104fa12a4710c38977879b7cb9779b1d36687d", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 6, + "d": 1 + }, + "mne/source_space.py": { + "a": 28, + "d": 12 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3772.json b/doc/sphinxext/prs/3772.json new file mode 100644 index 00000000000..894e68cfe74 --- /dev/null +++ b/doc/sphinxext/prs/3772.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "fe262fbbff21e52b56df68d830524ebb1848041e", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 6, + "d": 0 + }, + "mne/datasets/visual_92_categories/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 78, + "d": 0 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3773.json b/doc/sphinxext/prs/3773.json new file mode 100644 index 00000000000..90b71595e40 --- /dev/null +++ b/doc/sphinxext/prs/3773.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "952de45498c3b108ac5f854335f98fb3bb5eec69", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 4, + "d": 3 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 4 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "setup.cfg": { + "a": 1, + "d": 1 + }, + "tutorials/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_python_intro.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3775.json b/doc/sphinxext/prs/3775.json new file mode 100644 index 00000000000..9a9ba977234 --- /dev/null +++ b/doc/sphinxext/prs/3775.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bc8cc665d132a2fe7147460261ac0df7a90547d0", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3776.json b/doc/sphinxext/prs/3776.json new file mode 100644 index 00000000000..320d7589ef7 --- /dev/null +++ b/doc/sphinxext/prs/3776.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bc3627ed4e60f7b19bfda0d6736e7ccb0515e905", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/visualization/plot_channel_epochs_image.py": { + "a": 5, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3777.json b/doc/sphinxext/prs/3777.json new file mode 100644 index 00000000000..2a12267ffb7 --- /dev/null +++ b/doc/sphinxext/prs/3777.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8f87050533b0938758771fb0006c629a3a408074", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 34, + "d": 37 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3778.json b/doc/sphinxext/prs/3778.json new file mode 100644 index 00000000000..cbb693af5b5 --- /dev/null +++ b/doc/sphinxext/prs/3778.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fcf328a89e24c082ca41597a792643ae21d438cc", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/forward/plot_read_bem_surfaces.py": { + "a": 16, + "d": 23 + }, + "mne/surface.py": { + "a": 14, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 87, + "d": 16 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3779.json b/doc/sphinxext/prs/3779.json new file mode 100644 index 00000000000..df5ca5f5968 --- /dev/null +++ b/doc/sphinxext/prs/3779.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e28b2b319847dc797957faf6ec8c8968ecd2efa8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/inverse_sparse/_gamma_map.py": { + "a": 7, + "d": 6 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/378.json b/doc/sphinxext/prs/378.json new file mode 100644 index 00000000000..ead289b8c90 --- /dev/null +++ b/doc/sphinxext/prs/378.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "789cae1ae1901f8899bd313f04a801ecc6cc67af", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/constants.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3780.json b/doc/sphinxext/prs/3780.json new file mode 100644 index 00000000000..546f9d3270d --- /dev/null +++ b/doc/sphinxext/prs/3780.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "eeb763060d134edc9fecb71df66b91f6116c318c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 22, + "d": 12 + }, + "mne/fixes.py": { + "a": 54, + "d": 0 + }, + "mne/io/base.py": { + "a": 6, + "d": 4 + }, + "mne/tests/test_filter.py": { + "a": 22, + "d": 7 + }, + "tutorials/plot_background_filtering.py": { + "a": 49, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3782.json b/doc/sphinxext/prs/3782.json new file mode 100644 index 00000000000..d7cb818a525 --- /dev/null +++ b/doc/sphinxext/prs/3782.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "496db659369c671fc4a4a244cfdec3c6845cc800", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 4 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3783.json b/doc/sphinxext/prs/3783.json new file mode 100644 index 00000000000..79f9deaf772 --- /dev/null +++ b/doc/sphinxext/prs/3783.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a85cd392c112adef021916af2af8ba9866febdb5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 5, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 15, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3789.json b/doc/sphinxext/prs/3789.json new file mode 100644 index 00000000000..a92187992c5 --- /dev/null +++ b/doc/sphinxext/prs/3789.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "0f5aaa5ee6c24eefc7cfbb0035fa07a06f44705f", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 5, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 4, + "d": 3 + }, + "mne/time_frequency/__init__.py": { + "a": 5, + "d": 4 + }, + "mne/time_frequency/_stockwell.py": { + "a": 20, + "d": 11 + }, + "mne/time_frequency/multitaper.py": { + "a": 93, + "d": 6 + }, + "mne/time_frequency/psd.py": { + "a": 22, + "d": 11 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 12, + "d": 13 + }, + "mne/time_frequency/tfr.py": { + "a": 88, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3792.json b/doc/sphinxext/prs/3792.json new file mode 100644 index 00000000000..e7fb2223d4c --- /dev/null +++ b/doc/sphinxext/prs/3792.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "85670121195b5021f100e5470dcb4d66ebcf2e38", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 6, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 89, + "d": 12 + }, + "mne/viz/misc.py": { + "a": 24, + "d": 19 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 37, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3793.json b/doc/sphinxext/prs/3793.json new file mode 100644 index 00000000000..8ee1e13a617 --- /dev/null +++ b/doc/sphinxext/prs/3793.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4aa330ed031f0c5b8ac0e72f46c155b50ba242e6", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3794.json b/doc/sphinxext/prs/3794.json new file mode 100644 index 00000000000..fba08b6f897 --- /dev/null +++ b/doc/sphinxext/prs/3794.json @@ -0,0 +1,183 @@ +{ + "merge_commit_sha": "23575ddc79324e5e846318ee6adf9daf55baf4dd", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 16, + "d": 16 + }, + "mne/channels/interpolation.py": { + "a": 4, + "d": 4 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 79, + "d": 17 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 3 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 52, + "d": 11 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 4 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 4 + }, + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 5, + "d": 5 + }, + "mne/io/reference.py": { + "a": 6, + "d": 6 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 18, + "d": 18 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/stim.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/xdawn.py": { + "a": 7, + "d": 7 + }, + "mne/realtime/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 2 + }, + "mne/tests/common.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/psd.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 6, + "d": 6 + }, + "mne/viz/ica.py": { + "a": 11, + "d": 11 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3796.json b/doc/sphinxext/prs/3796.json new file mode 100644 index 00000000000..d3a00e252c1 --- /dev/null +++ b/doc/sphinxext/prs/3796.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "095db9748280807d3e5c01c13d6b850cfb66f741", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 64, + "d": 43 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3797.json b/doc/sphinxext/prs/3797.json new file mode 100644 index 00000000000..4ff52fa5118 --- /dev/null +++ b/doc/sphinxext/prs/3797.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "eb4ccea83a2eb483a4a51dce04e03364f4bacb78", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 23, + "d": 9 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 30, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3798.json b/doc/sphinxext/prs/3798.json new file mode 100644 index 00000000000..f2cabb72309 --- /dev/null +++ b/doc/sphinxext/prs/3798.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "61b2decaacc3184ff01dff956836ea24065e542c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/38.json b/doc/sphinxext/prs/38.json new file mode 100644 index 00000000000..0a9aa633008 --- /dev/null +++ b/doc/sphinxext/prs/38.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dac473e1be85086a7509ec4f5285ced4a9d12254", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz.py": { + "a": 28, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3800.json b/doc/sphinxext/prs/3800.json new file mode 100644 index 00000000000..a84afd1334f --- /dev/null +++ b/doc/sphinxext/prs/3800.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3842c11a33e919e3296aa4a9615bd52f1a545e2b", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3802.json b/doc/sphinxext/prs/3802.json new file mode 100644 index 00000000000..48e87c160f4 --- /dev/null +++ b/doc/sphinxext/prs/3802.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "630632c83b4e3b258a77e9bb4d602310b367660e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 4, + "d": 3 + }, + "mne/io/constants.py": { + "a": 16, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3803.json b/doc/sphinxext/prs/3803.json new file mode 100644 index 00000000000..be5643f1aba --- /dev/null +++ b/doc/sphinxext/prs/3803.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e3c5cc0b80fdbbb0b5aac7d2e56f3d17195fe0d4", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3805.json b/doc/sphinxext/prs/3805.json new file mode 100644 index 00000000000..2c05ef3f7b9 --- /dev/null +++ b/doc/sphinxext/prs/3805.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e2f79af9ad9d97a417aab8633161d10652fa158f", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_visualize_evoked.py": { + "a": 24, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3806.json b/doc/sphinxext/prs/3806.json new file mode 100644 index 00000000000..55ae1cf8cf2 --- /dev/null +++ b/doc/sphinxext/prs/3806.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4ca914d00351d3f56eaf8a02ddf582dc8f4758d5", + "authors": [ + { + "n": "ppasler", + "e": "paul@ppasler.de" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3808.json b/doc/sphinxext/prs/3808.json new file mode 100644 index 00000000000..5242dd400d6 --- /dev/null +++ b/doc/sphinxext/prs/3808.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8ecb2af8a969a76f5b3d115f4a362f06aebaf747", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 42, + "d": 24 + }, + "mne/tests/test_dipole.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3813.json b/doc/sphinxext/prs/3813.json new file mode 100644 index 00000000000..24e9df9b2ea --- /dev/null +++ b/doc/sphinxext/prs/3813.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b915f8481412633421c8546fe7c11f6af274c86d", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 27, + "d": 17 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3815.json b/doc/sphinxext/prs/3815.json new file mode 100644 index 00000000000..ccc65efd50c --- /dev/null +++ b/doc/sphinxext/prs/3815.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "16e7d2c89cc4f0430e701528588cbcec43e4f933", + "authors": [ + { + "n": "ppasler", + "e": "paul@ppasler.de" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3817.json b/doc/sphinxext/prs/3817.json new file mode 100644 index 00000000000..662171c81d3 --- /dev/null +++ b/doc/sphinxext/prs/3817.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7919f70b64866247ab1c172e2fa86c53ae13cc9c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/event.py": { + "a": 4, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 14, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3818.json b/doc/sphinxext/prs/3818.json new file mode 100644 index 00000000000..430bc57bb88 --- /dev/null +++ b/doc/sphinxext/prs/3818.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cf2c0cc1f509243d3e32e73b6b877aa0c2f20535", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 12 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 10, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 14, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3821.json b/doc/sphinxext/prs/3821.json new file mode 100644 index 00000000000..cd2dae7190c --- /dev/null +++ b/doc/sphinxext/prs/3821.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d6c6dc36aceed1b5900d8920f76555967a448192", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 30, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 7, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 16, + "d": 33 + }, + "mne/viz/tests/test_raw.py": { + "a": 61, + "d": 20 + }, + "mne/viz/utils.py": { + "a": 401, + "d": 11 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 3, + "d": 0 + }, + "tutorials/plot_visualize_raw.py": { + "a": 13, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3823.json b/doc/sphinxext/prs/3823.json new file mode 100644 index 00000000000..5491880c06b --- /dev/null +++ b/doc/sphinxext/prs/3823.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d0e4c98368a553b72c8a805944cb8b5d55e60b48", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/fixes.py": { + "a": 432, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 7, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3825.json b/doc/sphinxext/prs/3825.json new file mode 100644 index 00000000000..9b380cda755 --- /dev/null +++ b/doc/sphinxext/prs/3825.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4eac2dd9e8c70ea3c3c4db98533376c3ed31fde4", + "authors": [ + { + "n": "Niklas Wilming", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3826.json b/doc/sphinxext/prs/3826.json new file mode 100644 index 00000000000..2eaf360f310 --- /dev/null +++ b/doc/sphinxext/prs/3826.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "70062c9d9fcb70ce8e17dcdc4418cba78c6a39e6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 0 + }, + "mne/bem.py": { + "a": 6, + "d": 6 + }, + "mne/cov.py": { + "a": 11, + "d": 9 + }, + "mne/dipole.py": { + "a": 4, + "d": 3 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 2, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 6, + "d": 1 + }, + "mne/io/base.py": { + "a": 4, + "d": 0 + }, + "mne/io/pick.py": { + "a": 4, + "d": 0 + }, + "mne/io/tests/test_pick.py": { + "a": 6, + "d": 4 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 4 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 33, + "d": 33 + }, + "mne/preprocessing/stim.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 22, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 5 + }, + "mne/stats/cluster_level.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_cov.py": { + "a": 38, + "d": 2 + }, + "mne/utils.py": { + "a": 7, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3828.json b/doc/sphinxext/prs/3828.json new file mode 100644 index 00000000000..e0a4631aa5e --- /dev/null +++ b/doc/sphinxext/prs/3828.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "22f62a56109a87b62878962fcea332939f0b6c75", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 14, + "d": 4 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 4, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3829.json b/doc/sphinxext/prs/3829.json new file mode 100644 index 00000000000..a31805f4fdb --- /dev/null +++ b/doc/sphinxext/prs/3829.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bac50dd08361b10d0a65c614ea2de06308750411", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3831.json b/doc/sphinxext/prs/3831.json new file mode 100644 index 00000000000..9bdf051e5e3 --- /dev/null +++ b/doc/sphinxext/prs/3831.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "9254f5af667ee70673d5a0f69fa48686be02dced", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 45, + "d": 104 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 4, + "d": 3 + }, + "mne/report.py": { + "a": 16, + "d": 9 + }, + "mne/tests/test_report.py": { + "a": 7, + "d": 9 + }, + "mne/utils.py": { + "a": 5, + "d": 7 + }, + "mne/viz/misc.py": { + "a": 12, + "d": 12 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 16, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3833.json b/doc/sphinxext/prs/3833.json new file mode 100644 index 00000000000..56c56b91c0c --- /dev/null +++ b/doc/sphinxext/prs/3833.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "86abfc63c71d0beb95bf7ba17b48dc3b8c00dbc5", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 26, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 23, + "d": 15 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 40, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3834.json b/doc/sphinxext/prs/3834.json new file mode 100644 index 00000000000..35f1d47b5ce --- /dev/null +++ b/doc/sphinxext/prs/3834.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f83392c5420ddd13f0dc19d843525e89b1f7043d", + "authors": [ + { + "n": "Matteo Visconti di Oleggio Castello", + "e": null + } + ], + "changes": { + "mne/viz/decoding.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3835.json b/doc/sphinxext/prs/3835.json new file mode 100644 index 00000000000..57f48a6c99e --- /dev/null +++ b/doc/sphinxext/prs/3835.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b62de8656c4606ec3858bb098abcd6731659d62e", + "authors": [ + { + "n": "Lorenzo Alfine", + "e": "lorenzo.alfine@gmail.com" + } + ], + "changes": { + "tutorials/plot_creating_data_structures.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3836.json b/doc/sphinxext/prs/3836.json new file mode 100644 index 00000000000..46bf71f495f --- /dev/null +++ b/doc/sphinxext/prs/3836.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c2d1f3e7a569bd356416d0c9dde76ff03b5bb958", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3842.json b/doc/sphinxext/prs/3842.json new file mode 100644 index 00000000000..6cb6f9ba7dc --- /dev/null +++ b/doc/sphinxext/prs/3842.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "6175ac800a991d84748ae4e9e5b325d9a8fb9fab", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 4 + }, + "doc/documentation.rst": { + "a": 3, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 41, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 111, + "d": 58 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 84, + "d": 39 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 589, + "d": 310 + }, + "mne/tests/test_source_estimate.py": { + "a": 173, + "d": 83 + }, + "mne/viz/_3d.py": { + "a": 187, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 22, + "d": 2 + }, + "tutorials/plot_dipole_orientations.py": { + "a": 206, + "d": 0 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 24, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3843.json b/doc/sphinxext/prs/3843.json new file mode 100644 index 00000000000..b7090229d06 --- /dev/null +++ b/doc/sphinxext/prs/3843.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "79d2ab8e03c03b4564c5c3904496659950b1e7ca", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_transformer.py": { + "a": 5, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3845.json b/doc/sphinxext/prs/3845.json new file mode 100644 index 00000000000..684427b2c1a --- /dev/null +++ b/doc/sphinxext/prs/3845.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f852fb2d1c7d901f08894ada60c34464d0c735f3", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 14, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3848.json b/doc/sphinxext/prs/3848.json new file mode 100644 index 00000000000..815e0d8f390 --- /dev/null +++ b/doc/sphinxext/prs/3848.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "faec5756927289374197667e8de563448de3281c", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 3, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3851.json b/doc/sphinxext/prs/3851.json new file mode 100644 index 00000000000..fc93531ebf4 --- /dev/null +++ b/doc/sphinxext/prs/3851.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "990159a3b1598b931449828bf6345e8a97bad6d0", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/baseline.py": { + "a": 5, + "d": 4 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3854.json b/doc/sphinxext/prs/3854.json new file mode 100644 index 00000000000..7fe0a03d975 --- /dev/null +++ b/doc/sphinxext/prs/3854.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "06e814b4b19834191e8b6d2dc50e4ac46d1de672", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 8, + "d": 0 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 9, + "d": 8 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 8, + "d": 7 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 8, + "d": 6 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 8, + "d": 6 + }, + "mne/beamformer/_dics.py": { + "a": 92, + "d": 103 + }, + "mne/beamformer/_lcmv.py": { + "a": 43, + "d": 125 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 41, + "d": 38 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 12, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3860.json b/doc/sphinxext/prs/3860.json new file mode 100644 index 00000000000..30d8e762c47 --- /dev/null +++ b/doc/sphinxext/prs/3860.json @@ -0,0 +1,151 @@ +{ + "merge_commit_sha": "5355095401dea28687d5158ddf8a2ec3bcef3451", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/cited.rst": { + "a": 2, + "d": 0 + }, + "doc/contributing.rst": { + "a": 3, + "d": 1 + }, + "doc/customizing_git.rst": { + "a": 2, + "d": 0 + }, + "doc/faq.rst": { + "a": 2, + "d": 0 + }, + "doc/getting_started.rst": { + "a": 2, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 2, + "d": 0 + }, + "doc/manual/sample_dataset.rst": { + "a": 1, + "d": 1 + }, + "doc/martinos.rst": { + "a": 3, + "d": 1 + }, + "doc/mne_cpp.rst": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 14, + "d": 0 + }, + "doc/references.rst": { + "a": 1, + "d": 1 + }, + "doc/tutorials.rst": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 14, + "d": 7 + }, + "mne/bem.py": { + "a": 8, + "d": 8 + }, + "mne/epochs.py": { + "a": 15, + "d": 6 + }, + "mne/evoked.py": { + "a": 0, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 0, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 8, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 6, + "d": 6 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 2 + }, + "mne/io/egi/egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 0, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 10, + "d": 10 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 7, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 8 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 0, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3861.json b/doc/sphinxext/prs/3861.json new file mode 100644 index 00000000000..d5887a173cd --- /dev/null +++ b/doc/sphinxext/prs/3861.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "04a9c9c96977c9d1abb86fddb7f4aaeb76fb374a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 45, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3865.json b/doc/sphinxext/prs/3865.json new file mode 100644 index 00000000000..baffc3048bd --- /dev/null +++ b/doc/sphinxext/prs/3865.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "80b7446482997c2221e855541576d0f5f21323e4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3866.json b/doc/sphinxext/prs/3866.json new file mode 100644 index 00000000000..7d8c9c70a10 --- /dev/null +++ b/doc/sphinxext/prs/3866.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0b457fff9cc0bcc095d65d11c707ccd3f45b6e60", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_montage.py": { + "a": 15, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3867.json b/doc/sphinxext/prs/3867.json new file mode 100644 index 00000000000..6fa87091be0 --- /dev/null +++ b/doc/sphinxext/prs/3867.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ba3d5852fd24cf55c06bae7d2d87177ba38c038b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/proj.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3868.json b/doc/sphinxext/prs/3868.json new file mode 100644 index 00000000000..e2528561518 --- /dev/null +++ b/doc/sphinxext/prs/3868.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "86c98fe1cbcbca9b7e8e8e4433a7f5238f99ba54", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 2, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 4, + "d": 4 + }, + "mne/viz/misc.py": { + "a": 8, + "d": 7 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/387.json b/doc/sphinxext/prs/387.json new file mode 100644 index 00000000000..c889a7b9f6c --- /dev/null +++ b/doc/sphinxext/prs/387.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a45d96be422003c474474879a4022acdb6b7c597", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 11, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3872.json b/doc/sphinxext/prs/3872.json new file mode 100644 index 00000000000..6a7e525105c --- /dev/null +++ b/doc/sphinxext/prs/3872.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9c6a99e3c3ec693e3fb21014aea42c03f589bc5e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3873.json b/doc/sphinxext/prs/3873.json new file mode 100644 index 00000000000..d4588325677 --- /dev/null +++ b/doc/sphinxext/prs/3873.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "74e1af45de44715ce93455fed74fd27b69a52947", + "authors": [ + { + "n": null, + "e": "erkkahe@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3874.json b/doc/sphinxext/prs/3874.json new file mode 100644 index 00000000000..0a8514817c1 --- /dev/null +++ b/doc/sphinxext/prs/3874.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ab42e6a42548a98eb18910a21aeb0194bd506a40", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/search_light.py": { + "a": 20, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3881.json b/doc/sphinxext/prs/3881.json new file mode 100644 index 00000000000..56729cfafc7 --- /dev/null +++ b/doc/sphinxext/prs/3881.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "33e2f85e24400581d4c604ef86c7697e1b703d91", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "doc/contributing.rst": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3885.json b/doc/sphinxext/prs/3885.json new file mode 100644 index 00000000000..842b3c6a058 --- /dev/null +++ b/doc/sphinxext/prs/3885.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0f3a38aa6f1bddd18e43168544298a187972770a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 17, + "d": 1 + }, + "mne/utils.py": { + "a": 32, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3887.json b/doc/sphinxext/prs/3887.json new file mode 100644 index 00000000000..7a965d2a422 --- /dev/null +++ b/doc/sphinxext/prs/3887.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2169dd336b0956d3ba132b374e6f0302f13253dd", + "authors": [ + { + "n": "ppasler", + "e": "paul@ppasler.de" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 7, + "d": 11 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3888.json b/doc/sphinxext/prs/3888.json new file mode 100644 index 00000000000..5ad94d8f16d --- /dev/null +++ b/doc/sphinxext/prs/3888.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d00d27eff1ac14e21b1b7041316e8c8e8847e668", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 6, + "d": 10 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3892.json b/doc/sphinxext/prs/3892.json new file mode 100644 index 00000000000..572476e092f --- /dev/null +++ b/doc/sphinxext/prs/3892.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "e0faad3d65e3f4ad1c2eced112c4070b08cb1963", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 0, + "d": 3 + }, + "doc/advanced_setup.rst": { + "a": 120, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "doc/configure_git.rst": { + "a": 609, + "d": 0 + }, + "doc/contributing.rst": { + "a": 77, + "d": 859 + }, + "doc/customizing_git.rst": { + "a": 2, + "d": 0 + }, + "doc/faq.rst": { + "a": 0, + "d": 4 + }, + "doc/getting_started.rst": { + "a": 10, + "d": 10 + }, + "doc/git_links.inc": { + "a": 5, + "d": 2 + }, + "doc/install_mne_c.rst": { + "a": 17, + "d": 37 + }, + "doc/install_mne_python.rst": { + "a": 43, + "d": 196 + }, + "doc/known_projects.inc": { + "a": 6, + "d": 0 + }, + "mne/source_space.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3895.json b/doc/sphinxext/prs/3895.json new file mode 100644 index 00000000000..5ff9e3cc4b3 --- /dev/null +++ b/doc/sphinxext/prs/3895.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "eb418d24e0cdab1254b4524e5893bf13bcc800ba", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3896.json b/doc/sphinxext/prs/3896.json new file mode 100644 index 00000000000..39ff4a19f04 --- /dev/null +++ b/doc/sphinxext/prs/3896.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "04de57b3da07d47489be4f82076d0012fd25d121", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 46, + "d": 18 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 5, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3898.json b/doc/sphinxext/prs/3898.json new file mode 100644 index 00000000000..692680f5e37 --- /dev/null +++ b/doc/sphinxext/prs/3898.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "34a68cc849be748d062701b6b9fdf16e801be176", + "authors": [ + { + "n": "ppasler", + "e": "paul@ppasler.de" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3899.json b/doc/sphinxext/prs/3899.json new file mode 100644 index 00000000000..0da73e4d8db --- /dev/null +++ b/doc/sphinxext/prs/3899.json @@ -0,0 +1,231 @@ +{ + "merge_commit_sha": "10f69603a5788e6a44692dfed204d32cdbe90502", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "doc/configure_git.rst": { + "a": 4, + "d": 4 + }, + "doc/contributing.rst": { + "a": 5, + "d": 5 + }, + "doc/faq.rst": { + "a": 1, + "d": 1 + }, + "doc/getting_started.rst": { + "a": 2, + "d": 2 + }, + "doc/index.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/datasets_index.rst": { + "a": 37, + "d": 23 + }, + "doc/manual/decoding.rst": { + "a": 3, + "d": 2 + }, + "doc/manual/migrating.rst": { + "a": 29, + "d": 29 + }, + "doc/manual/preprocessing/bads.rst": { + "a": 2, + "d": 0 + }, + "doc/manual/preprocessing/filter.rst": { + "a": 2, + "d": 0 + }, + "doc/manual/preprocessing/overview.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 6, + "d": 8 + }, + "doc/manual/statistics.rst": { + "a": 5, + "d": 5 + }, + "doc/manual/visualization.rst": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 129, + "d": 55 + }, + "doc/tutorials/seven_stories_about_mne.rst": { + "a": 4, + "d": 4 + }, + "doc/whats_new.rst": { + "a": 37, + "d": 37 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 45, + "d": 39 + }, + "mne/datasets/megsim/megsim.py": { + "a": 22, + "d": 20 + }, + "mne/datasets/utils.py": { + "a": 6, + "d": 8 + }, + "mne/decoding/base.py": { + "a": 13, + "d": 16 + }, + "mne/decoding/ems.py": { + "a": 27, + "d": 24 + }, + "mne/decoding/time_frequency.py": { + "a": 4, + "d": 5 + }, + "mne/decoding/transformer.py": { + "a": 9, + "d": 7 + }, + "mne/dipole.py": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 13, + "d": 8 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 4, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 12, + "d": 6 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/infomax_.py": { + "a": 12, + "d": 14 + }, + "mne/preprocessing/xdawn.py": { + "a": 16, + "d": 45 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 5 + }, + "mne/source_space.py": { + "a": 2, + "d": 1 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_ecog.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_eeg_erp.py": { + "a": 5, + "d": 5 + }, + "tutorials/plot_forward.py": { + "a": 7, + "d": 2 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/39.json b/doc/sphinxext/prs/39.json new file mode 100644 index 00000000000..c5323ebcb2d --- /dev/null +++ b/doc/sphinxext/prs/39.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "51367f258781e9d4e27cb43d5ea6054bab1573f3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 5, + "d": 0 + }, + "mne/cov.py": { + "a": 122, + "d": 39 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 31, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 22, + "d": 1 + }, + "mne/viz.py": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3904.json b/doc/sphinxext/prs/3904.json new file mode 100644 index 00000000000..2ad46ce0081 --- /dev/null +++ b/doc/sphinxext/prs/3904.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a68fde2c71a475fd547bd14b31497beab7a5827e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 20, + "d": 41 + }, + "mne/gui/_file_traits.py": { + "a": 5, + "d": 59 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3905.json b/doc/sphinxext/prs/3905.json new file mode 100644 index 00000000000..8a86150cbb9 --- /dev/null +++ b/doc/sphinxext/prs/3905.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7cbfd5d295fcb32977f700485a008ea9a0fdd9d8", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 16, + "d": 8 + }, + "mne/viz/tests/test_topo.py": { + "a": 44, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 63, + "d": 22 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3909.json b/doc/sphinxext/prs/3909.json new file mode 100644 index 00000000000..7364667c671 --- /dev/null +++ b/doc/sphinxext/prs/3909.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "953764b3ca2e9a9e4ee6a1d59fdf3d64ecdf2e2a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 23, + "d": 14 + }, + "mne/io/pick.py": { + "a": 8, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/391.json b/doc/sphinxext/prs/391.json new file mode 100644 index 00000000000..38ab32156e5 --- /dev/null +++ b/doc/sphinxext/prs/391.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a5336ccdd4764dfa0c4baeea123e24626149cd68", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3910.json b/doc/sphinxext/prs/3910.json new file mode 100644 index 00000000000..812230263bb --- /dev/null +++ b/doc/sphinxext/prs/3910.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "685a0d8db149bcc464a93c6a34fb56042ead2adc", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 22, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3911.json b/doc/sphinxext/prs/3911.json new file mode 100644 index 00000000000..800d3295e4d --- /dev/null +++ b/doc/sphinxext/prs/3911.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a94f81a5e228fe6592ee9e31902a48f24724b530", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3912.json b/doc/sphinxext/prs/3912.json new file mode 100644 index 00000000000..a576b001cf1 --- /dev/null +++ b/doc/sphinxext/prs/3912.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "06697e3f4926f59f1170a0dfbdcb1d6189c5bd4c", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3913.json b/doc/sphinxext/prs/3913.json new file mode 100644 index 00000000000..88e17c3c1ed --- /dev/null +++ b/doc/sphinxext/prs/3913.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b23d87371da5970c79f2d8b031af6071efef8482", + "authors": [ + { + "n": "Jon Houck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/label.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3918.json b/doc/sphinxext/prs/3918.json new file mode 100644 index 00000000000..beb526b049e --- /dev/null +++ b/doc/sphinxext/prs/3918.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5fd78c159cf442853308393e774086367efdff27", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 0, + "d": 10 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3920.json b/doc/sphinxext/prs/3920.json new file mode 100644 index 00000000000..81ec0ed99ee --- /dev/null +++ b/doc/sphinxext/prs/3920.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b3b04811d1c7412e00b47e86d4126dad4843397c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 63, + "d": 32 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 8 + }, + "mne/tests/test_chpi.py": { + "a": 167, + "d": 27 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3922.json b/doc/sphinxext/prs/3922.json new file mode 100644 index 00000000000..6c81bf2cea9 --- /dev/null +++ b/doc/sphinxext/prs/3922.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "86777861f193d25d8c717bc01b7de048671554d1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/info.py": { + "a": 14, + "d": 2 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 17, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3923.json b/doc/sphinxext/prs/3923.json new file mode 100644 index 00000000000..83ccdab209f --- /dev/null +++ b/doc/sphinxext/prs/3923.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "ef6b1063b642af19c08221923dc7679e5a865650", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 20, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 14, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 5, + "d": 1 + }, + "examples/decoding/decoding_rsa.py": { + "a": 180, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 4, + "d": 4 + }, + "mne/epochs.py": { + "a": 7, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3924.json b/doc/sphinxext/prs/3924.json new file mode 100644 index 00000000000..44af9a0862d --- /dev/null +++ b/doc/sphinxext/prs/3924.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f255803a30b153b217b6ad335a4c159fbf00f194", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3927.json b/doc/sphinxext/prs/3927.json new file mode 100644 index 00000000000..a2f456b3aa2 --- /dev/null +++ b/doc/sphinxext/prs/3927.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7f725beb32c5fb2bc05c416f74967d9bb2f3e98b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3929.json b/doc/sphinxext/prs/3929.json new file mode 100644 index 00000000000..2dbd8aacbc6 --- /dev/null +++ b/doc/sphinxext/prs/3929.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "883a276f417e8df6e31d9db6eb3ab6e838daf270", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 18, + "d": 11 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/393.json b/doc/sphinxext/prs/393.json new file mode 100644 index 00000000000..120372c9ae2 --- /dev/null +++ b/doc/sphinxext/prs/393.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "1a39aaa4d1510abc2c37e98b9c39d0611e628b5a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 7, + "d": 0 + }, + "doc/source/getting_started.rst": { + "a": 27, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 13, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 6, + "d": 1 + }, + "mne/cuda.py": { + "a": 386, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 9 + }, + "mne/fiff/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 51, + "d": 62 + }, + "mne/filter.py": { + "a": 373, + "d": 144 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 5 + }, + "mne/tests/test_filter.py": { + "a": 59, + "d": 6 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 4, + "d": 1 + }, + "mne/utils.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3931.json b/doc/sphinxext/prs/3931.json new file mode 100644 index 00000000000..46d3c3d5c87 --- /dev/null +++ b/doc/sphinxext/prs/3931.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "83b9cedc1d460a20dc20ca9e84f29da01e26c762", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 4, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3935.json b/doc/sphinxext/prs/3935.json new file mode 100644 index 00000000000..dad0e578508 --- /dev/null +++ b/doc/sphinxext/prs/3935.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4f948fa379b832b48e9c33af2fb458b27a173f32", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 13, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 17, + "d": 4 + }, + "mne/gui/_coreg_gui.py": { + "a": 80, + "d": 65 + }, + "mne/gui/_file_traits.py": { + "a": 3, + "d": 4 + }, + "mne/gui/_viewer.py": { + "a": 7, + "d": 7 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 43, + "d": 35 + }, + "mne/surface.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3936.json b/doc/sphinxext/prs/3936.json new file mode 100644 index 00000000000..afa0c417344 --- /dev/null +++ b/doc/sphinxext/prs/3936.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "1241392f40554b7523a28ccc36cd9d991a72c65c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "examples/visualization/plot_warp_surfaces.py": { + "a": 0, + "d": 83 + }, + "mne/source_space.py": { + "a": 74, + "d": 62 + }, + "mne/surface.py": { + "a": 5, + "d": 8 + }, + "mne/tests/test_source_space.py": { + "a": 30, + "d": 40 + }, + "mne/tests/test_transforms.py": { + "a": 7, + "d": 3 + }, + "mne/transforms.py": { + "a": 37, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3937.json b/doc/sphinxext/prs/3937.json new file mode 100644 index 00000000000..e9af3d5c052 --- /dev/null +++ b/doc/sphinxext/prs/3937.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7d653f91d9cc3661dc7a27f411df457fd3b1ebeb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/__init__.py": { + "a": 18, + "d": 5 + }, + "mne/gui/_coreg_gui.py": { + "a": 89, + "d": 71 + }, + "mne/gui/_fiducials_gui.py": { + "a": 11, + "d": 8 + }, + "mne/gui/_file_traits.py": { + "a": 12, + "d": 8 + }, + "mne/utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3940.json b/doc/sphinxext/prs/3940.json new file mode 100644 index 00000000000..e55c505bae7 --- /dev/null +++ b/doc/sphinxext/prs/3940.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d568dcd1254410739a06b9a9852814fbc9ffe35f", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 0, + "d": 5 + }, + "mne/gui/_viewer.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3941.json b/doc/sphinxext/prs/3941.json new file mode 100644 index 00000000000..e0dbde77015 --- /dev/null +++ b/doc/sphinxext/prs/3941.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7f780890efefa6bcfb151ac5f76f7d3ed907b3e4", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 3, + "d": 10 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 4, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3942.json b/doc/sphinxext/prs/3942.json new file mode 100644 index 00000000000..f130219c93e --- /dev/null +++ b/doc/sphinxext/prs/3942.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f7e4051b97ff36f855cca38ed4889301d837c0c1", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 0, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3943.json b/doc/sphinxext/prs/3943.json new file mode 100644 index 00000000000..5c8c6175c6e --- /dev/null +++ b/doc/sphinxext/prs/3943.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7e0b06d3f0492c51a3b1c5591ddb5e3e0c5083b2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/forward/plot_left_cerebellum_volume_source.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3944.json b/doc/sphinxext/prs/3944.json new file mode 100644 index 00000000000..21296b9479a --- /dev/null +++ b/doc/sphinxext/prs/3944.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "86fbb24595382b695904e608948239de824cd6e4", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_raw.py": { + "a": 6, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3946.json b/doc/sphinxext/prs/3946.json new file mode 100644 index 00000000000..5ce6a22dc25 --- /dev/null +++ b/doc/sphinxext/prs/3946.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "117696c20f1fbf9a60015602c48373dccd7b3e9b", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/annotations.py": { + "a": 16, + "d": 6 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 21, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3947.json b/doc/sphinxext/prs/3947.json new file mode 100644 index 00000000000..9bd15d04d65 --- /dev/null +++ b/doc/sphinxext/prs/3947.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e08029c2211807e8b4c2531268918a1e6a7898aa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3948.json b/doc/sphinxext/prs/3948.json new file mode 100644 index 00000000000..df68960704a --- /dev/null +++ b/doc/sphinxext/prs/3948.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "4f677bcc08befee58666ea8c6d5bf75a5e37ddda", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 17, + "d": 4 + }, + "mne/gui/__init__.py": { + "a": 48, + "d": 22 + }, + "mne/gui/_backend.py": { + "a": 7, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 36, + "d": 20 + }, + "mne/gui/_file_traits.py": { + "a": 5, + "d": 3 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 61, + "d": 13 + }, + "mne/gui/_marker_gui.py": { + "a": 3, + "d": 6 + }, + "mne/gui/_viewer.py": { + "a": 6, + "d": 15 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 45, + "d": 25 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 4, + "d": 1 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 5, + "d": 3 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 26, + "d": 3 + }, + "mne/utils.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3949.json b/doc/sphinxext/prs/3949.json new file mode 100644 index 00000000000..019315d0fa4 --- /dev/null +++ b/doc/sphinxext/prs/3949.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "bbfdb076e00727cbdd5aae6ced190c4a853dfdaf", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/decoding/decoding_rsa.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 16, + "d": 17 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 6, + "d": 4 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 9, + "d": 7 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 9, + "d": 8 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 15, + "d": 14 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 50, + "d": 21 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 135, + "d": 38 + }, + "mne/decoding/tests/test_base.py": { + "a": 129, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3950.json b/doc/sphinxext/prs/3950.json new file mode 100644 index 00000000000..b0a279af373 --- /dev/null +++ b/doc/sphinxext/prs/3950.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8d95b2c3b9df9d27c6d41430b3f3d5c07c9363bd", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 19, + "d": 8 + }, + "mne/utils.py": { + "a": 13, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3954.json b/doc/sphinxext/prs/3954.json new file mode 100644 index 00000000000..25de3526af9 --- /dev/null +++ b/doc/sphinxext/prs/3954.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4c7a243bc8a91c1f4c968287694d89899f6636bf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/proj.py": { + "a": 10, + "d": 7 + }, + "mne/tests/test_proj.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3955.json b/doc/sphinxext/prs/3955.json new file mode 100644 index 00000000000..c75e7228968 --- /dev/null +++ b/doc/sphinxext/prs/3955.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "210852aacf73f4da8e9a38cedf0f900f18a8bfcf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 16, + "d": 16 + }, + "mne/io/proj.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3958.json b/doc/sphinxext/prs/3958.json new file mode 100644 index 00000000000..b5e9c707d6b --- /dev/null +++ b/doc/sphinxext/prs/3958.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "07c47b90e4590d0ff8077b0e88c2c553e8c326ff", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 28, + "d": 14 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 68, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3959.json b/doc/sphinxext/prs/3959.json new file mode 100644 index 00000000000..231dce5d035 --- /dev/null +++ b/doc/sphinxext/prs/3959.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "50983c9dc5f4866c094d2e58ced0f8c88044ef14", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 4, + "d": 4 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 56, + "d": 37 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/396.json b/doc/sphinxext/prs/396.json new file mode 100644 index 00000000000..d8fc4bf1cce --- /dev/null +++ b/doc/sphinxext/prs/396.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c869fe4b99a4a9b07b8702ab2d3da2e93608a2f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/channels.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3962.json b/doc/sphinxext/prs/3962.json new file mode 100644 index 00000000000..82b4c40355a --- /dev/null +++ b/doc/sphinxext/prs/3962.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "071d8a20fd7d39371b0b67c070a79f968b8ccd1e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 69, + "d": 54 + }, + "mne/io/bti/read.py": { + "a": 8, + "d": 0 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3964.json b/doc/sphinxext/prs/3964.json new file mode 100644 index 00000000000..b5ed3989614 --- /dev/null +++ b/doc/sphinxext/prs/3964.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "af2b41e0272c681ad7a7b34ee3c3377f72e68cd7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/cited.rst": { + "a": 193, + "d": 110 + }, + "doc/sphinxext/cited_mne.py": { + "a": 12, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3965.json b/doc/sphinxext/prs/3965.json new file mode 100644 index 00000000000..052cd754d37 --- /dev/null +++ b/doc/sphinxext/prs/3965.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f1490275d16886b877c52cd1ca65524344ba2bba", + "authors": [ + { + "n": "Cristóbal Moënne-Loccoz", + "e": null + } + ], + "changes": { + "mne/io/pick.py": { + "a": 5, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3966.json b/doc/sphinxext/prs/3966.json new file mode 100644 index 00000000000..278f2d3cc0d --- /dev/null +++ b/doc/sphinxext/prs/3966.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "ec164a09e92b3faf998d5c32dd5ed0343b72edfb", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 85, + "d": 18 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 325, + "d": 15 + }, + "mne/viz/tests/test_3d.py": { + "a": 26, + "d": 1 + }, + "tutorials/plot_dipole_fit.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3967.json b/doc/sphinxext/prs/3967.json new file mode 100644 index 00000000000..efc000a9ac6 --- /dev/null +++ b/doc/sphinxext/prs/3967.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "baffd5af53612d2d06abbfe12675c05e869491d8", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 86, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3968.json b/doc/sphinxext/prs/3968.json new file mode 100644 index 00000000000..8ae25d6779b --- /dev/null +++ b/doc/sphinxext/prs/3968.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3fa4664534a249c9fb5dd72c131d7b768d193893", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/397.json b/doc/sphinxext/prs/397.json new file mode 100644 index 00000000000..c1496b6d401 --- /dev/null +++ b/doc/sphinxext/prs/397.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c4f77855e6d132cd8b642eacf6e899433161d90e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 149, + "d": 8 + }, + "mne/viz.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3972.json b/doc/sphinxext/prs/3972.json new file mode 100644 index 00000000000..64092574c48 --- /dev/null +++ b/doc/sphinxext/prs/3972.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "296154c3403532d131506bddd1d8f0a60cb4b562", + "authors": [ + { + "n": "Jakub Kaczmarzyk", + "e": "jakub.kaczmarzyk@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 12, + "d": 1 + }, + "mne/evoked.py": { + "a": 11, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 11, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 0 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3973.json b/doc/sphinxext/prs/3973.json new file mode 100644 index 00000000000..a945f27c246 --- /dev/null +++ b/doc/sphinxext/prs/3973.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ca463521216d4cdefc81a0be50cb99a7b02ed268", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 0, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3975.json b/doc/sphinxext/prs/3975.json new file mode 100644 index 00000000000..b56e5c1a819 --- /dev/null +++ b/doc/sphinxext/prs/3975.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "2c6d6faeaa38805b20249d966f0ec2dd8950843b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/cited.rst": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/cited_mne.py": { + "a": 3, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 15, + "d": 16 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 31, + "d": 23 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 4, + "d": 16 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 18, + "d": 18 + }, + "mne/report.py": { + "a": 3, + "d": 2 + }, + "mne/source_space.py": { + "a": 49, + "d": 0 + }, + "mne/utils.py": { + "a": 12, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 127, + "d": 76 + }, + "mne/viz/tests/test_3d.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3977.json b/doc/sphinxext/prs/3977.json new file mode 100644 index 00000000000..2e4e6b88061 --- /dev/null +++ b/doc/sphinxext/prs/3977.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "1e3f0b7f193fe370dd4c69d37c4779c5e0c5e877", + "authors": [ + { + "n": "Martin", + "e": "mperezguevara@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 11, + "d": 3 + }, + "mne/channels/layout.py": { + "a": 19, + "d": 6 + }, + "mne/io/array/tests/test_array.py": { + "a": 17, + "d": 1 + }, + "mne/transforms.py": { + "a": 9, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 35, + "d": 34 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 6, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 18, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 22, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3980.json b/doc/sphinxext/prs/3980.json new file mode 100644 index 00000000000..155aa898488 --- /dev/null +++ b/doc/sphinxext/prs/3980.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bba62a5e3b2d71ef0027e91fe1ba9530cc653e27", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 5, + "d": 5 + }, + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3982.json b/doc/sphinxext/prs/3982.json new file mode 100644 index 00000000000..1ed28ec2882 --- /dev/null +++ b/doc/sphinxext/prs/3982.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "78191cd1f9ee4a9ae3c357f5863b2a2ed1824da6", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3983.json b/doc/sphinxext/prs/3983.json new file mode 100644 index 00000000000..22e6e7b7824 --- /dev/null +++ b/doc/sphinxext/prs/3983.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dc0967552dc7f4f5c9a5e265da6b2ea754a139b7", + "authors": [ + { + "n": "Matt Boggess", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 53, + "d": 5 + }, + "mne/channels/tests/test_montage.py": { + "a": 41, + "d": 3 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3984.json b/doc/sphinxext/prs/3984.json new file mode 100644 index 00000000000..81b8b92fc57 --- /dev/null +++ b/doc/sphinxext/prs/3984.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a1f3b8f2b5978b7421e0da3787bee92e469d6bd5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3986.json b/doc/sphinxext/prs/3986.json new file mode 100644 index 00000000000..3de2c086705 --- /dev/null +++ b/doc/sphinxext/prs/3986.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0c31b1e248bea43343bded9b7fa0e2d7aa664210", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 106, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 94, + "d": 133 + }, + "mne/simulation/tests/test_raw.py": { + "a": 11, + "d": 15 + }, + "mne/tests/test_filter.py": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3989.json b/doc/sphinxext/prs/3989.json new file mode 100644 index 00000000000..ecbb60648e6 --- /dev/null +++ b/doc/sphinxext/prs/3989.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "25e6dfa4d933b637ec0e60908976787a88191114", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 11 + }, + "mne/forward/_lead_dots.py": { + "a": 2, + "d": 38 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 13, + "d": 17 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 7, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/399.json b/doc/sphinxext/prs/399.json new file mode 100644 index 00000000000..fbcbf18131c --- /dev/null +++ b/doc/sphinxext/prs/399.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "013d3bec907ac52270304e05598aef423ca90177", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cuda.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3991.json b/doc/sphinxext/prs/3991.json new file mode 100644 index 00000000000..694d3f73eab --- /dev/null +++ b/doc/sphinxext/prs/3991.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "d136b9ac808dc65f1ffbbc3e6ef7b050f5ef0c84", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_meg_sensors.py": { + "a": 8, + "d": 1 + }, + "mne/commands/mne_coreg.py": { + "a": 3, + "d": 0 + }, + "mne/coreg.py": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 18, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 4, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 115, + "d": 34 + }, + "mne/gui/_fiducials_gui.py": { + "a": 6, + "d": 3 + }, + "mne/gui/_file_traits.py": { + "a": 50, + "d": 25 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 6, + "d": 5 + }, + "mne/gui/_marker_gui.py": { + "a": 6, + "d": 3 + }, + "mne/gui/_viewer.py": { + "a": 73, + "d": 79 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 3, + "d": 3 + }, + "mne/surface.py": { + "a": 3, + "d": 1 + }, + "mne/utils.py": { + "a": 12, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 79, + "d": 41 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3993.json b/doc/sphinxext/prs/3993.json new file mode 100644 index 00000000000..ee12dff8893 --- /dev/null +++ b/doc/sphinxext/prs/3993.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "2beb31277d5c09c2c0b80bed624773f40bd529b1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 98, + "d": 5 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 15, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 31, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3994.json b/doc/sphinxext/prs/3994.json new file mode 100644 index 00000000000..a719cbc4ac2 --- /dev/null +++ b/doc/sphinxext/prs/3994.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "11826686eeb84572fa84883abcc3901ca721b166", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 10, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3995.json b/doc/sphinxext/prs/3995.json new file mode 100644 index 00000000000..0e595524dc7 --- /dev/null +++ b/doc/sphinxext/prs/3995.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "670bc2862bdea5a92fd8a5f874f457cfcbaaf7df", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_rap_music.py": { + "a": 12, + "d": 3 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/3999.json b/doc/sphinxext/prs/3999.json new file mode 100644 index 00000000000..d6b5f199eeb --- /dev/null +++ b/doc/sphinxext/prs/3999.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b1786b2750b68b8d1d8501b3d2ae60ec40cb6d51", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/defaults.py": { + "a": 2, + "d": 0 + }, + "mne/gui/_fiducials_gui.py": { + "a": 9, + "d": 3 + }, + "mne/gui/_file_traits.py": { + "a": 4, + "d": 10 + }, + "mne/viz/_3d.py": { + "a": 84, + "d": 27 + }, + "mne/viz/tests/test_3d.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4.json b/doc/sphinxext/prs/4.json new file mode 100644 index 00000000000..53cb6249b71 --- /dev/null +++ b/doc/sphinxext/prs/4.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b2ebd3fb53294bcd52c822cbaa0d0a9f3a99de3", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/40.json b/doc/sphinxext/prs/40.json new file mode 100644 index 00000000000..e74109bfd72 --- /dev/null +++ b/doc/sphinxext/prs/40.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "0bd6816104cc7e38ca6c97a7a22d34f005fadcf7", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 45, + "d": 90 + }, + "bin/mne_compute_proj_eog.py": { + "a": 157, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing.py": { + "a": 0, + "d": 59 + }, + "mne/preprocessing/__init__.py": { + "a": 10, + "d": 0 + }, + "mne/preprocessing/maxfilter.py": { + "a": 268, + "d": 0 + }, + "mne/preprocessing/ssp.py": { + "a": 313, + "d": 0 + }, + "mne/preprocessing/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/400.json b/doc/sphinxext/prs/400.json new file mode 100644 index 00000000000..b15648b3786 --- /dev/null +++ b/doc/sphinxext/prs/400.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6f1c35b7ddb7c10bbae9cede4b3456889e6202b6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 0, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4003.json b/doc/sphinxext/prs/4003.json new file mode 100644 index 00000000000..f1b1ab04644 --- /dev/null +++ b/doc/sphinxext/prs/4003.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9e4ac8a4670b3906c9c9dea5acd574bdd318c85a", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 41, + "d": 24 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4004.json b/doc/sphinxext/prs/4004.json new file mode 100644 index 00000000000..71c9789c150 --- /dev/null +++ b/doc/sphinxext/prs/4004.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "18a02e81fd9ededfa7e34a1147940d36e56a3846", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 9, + "d": 13 + }, + "mne/io/reference.py": { + "a": 86, + "d": 29 + }, + "mne/io/tests/test_reference.py": { + "a": 58, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4005.json b/doc/sphinxext/prs/4005.json new file mode 100644 index 00000000000..cc5d0771675 --- /dev/null +++ b/doc/sphinxext/prs/4005.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e75afaded3f43e06ea6ccdbe623bc5faaf6cf0c9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 19, + "d": 4 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 50, + "d": 19 + }, + "mne/gui/_fiducials_gui.py": { + "a": 9, + "d": 2 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 14, + "d": 8 + }, + "mne/utils.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4006.json b/doc/sphinxext/prs/4006.json new file mode 100644 index 00000000000..b2991164af7 --- /dev/null +++ b/doc/sphinxext/prs/4006.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5fa5f29ae9f7349dd6b0abffb5de82feb31290d2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 8, + "d": 4 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 18, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4007.json b/doc/sphinxext/prs/4007.json new file mode 100644 index 00000000000..8f0fe364d1f --- /dev/null +++ b/doc/sphinxext/prs/4007.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "862a5c93e132557a305b067f10da4b8c3bbfbfc8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 172 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 8, + "d": 9 + }, + "mne/decoding/transformer.py": { + "a": 13, + "d": 3 + }, + "mne/externals/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/externals/funcsigs.py": { + "a": 815, + "d": 0 + }, + "mne/fixes.py": { + "a": 127, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4009.json b/doc/sphinxext/prs/4009.json new file mode 100644 index 00000000000..bc438c4a785 --- /dev/null +++ b/doc/sphinxext/prs/4009.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "c59903d7b1ca6087b3417887b60e99b35c1d7cf5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 21, + "d": 36 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 3 + }, + "mne/evoked.py": { + "a": 12, + "d": 0 + }, + "mne/filter.py": { + "a": 4, + "d": 11 + }, + "mne/io/proj.py": { + "a": 3, + "d": 4 + }, + "mne/io/reference.py": { + "a": 8, + "d": 20 + }, + "mne/io/tests/test_reference.py": { + "a": 3, + "d": 7 + }, + "mne/preprocessing/stim.py": { + "a": 2, + "d": 10 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 5 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 11, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4010.json b/doc/sphinxext/prs/4010.json new file mode 100644 index 00000000000..e8163ae3949 --- /dev/null +++ b/doc/sphinxext/prs/4010.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d94aee0c4f2143af435effc829e28fd20e2e8e76", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 15, + "d": 16 + }, + "mne/coreg.py": { + "a": 3, + "d": 3 + }, + "mne/source_space.py": { + "a": 8, + "d": 49 + }, + "mne/surface.py": { + "a": 60, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4013.json b/doc/sphinxext/prs/4013.json new file mode 100644 index 00000000000..e738025eacd --- /dev/null +++ b/doc/sphinxext/prs/4013.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "25993fa6531a81c4246f4f0203686bd3d9d8b79c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_kit2fiff_gui.py": { + "a": 5, + "d": 0 + }, + "mne/gui/_marker_gui.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4014.json b/doc/sphinxext/prs/4014.json new file mode 100644 index 00000000000..339f28486b9 --- /dev/null +++ b/doc/sphinxext/prs/4014.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6bb8995be480f2a888242890c97f9bb83806794b", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4015.json b/doc/sphinxext/prs/4015.json new file mode 100644 index 00000000000..92669e42811 --- /dev/null +++ b/doc/sphinxext/prs/4015.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a547bb6105c9af646d5d3a326e5397d61d1e3c89", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 8, + "d": 3 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4017.json b/doc/sphinxext/prs/4017.json new file mode 100644 index 00000000000..8a2f5a4e3a0 --- /dev/null +++ b/doc/sphinxext/prs/4017.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7cbd69caa638bb308c72eafe590b9c00cedfd21d", + "authors": [ + { + "n": "ramonapariciog", + "e": "moncho_apa@hotmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 22, + "d": 33 + }, + "mne/io/egi/egimff.py": { + "a": 393, + "d": 0 + }, + "mne/io/egi/events.py": { + "a": 162, + "d": 0 + }, + "mne/io/egi/general.py": { + "a": 155, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 38, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4018.json b/doc/sphinxext/prs/4018.json new file mode 100644 index 00000000000..9ac99afe851 --- /dev/null +++ b/doc/sphinxext/prs/4018.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "2c17b8f6e587326d32845f0318e42661cee06d50", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_sensor_noise_level.py": { + "a": 30, + "d": 0 + }, + "mne/io/base.py": { + "a": 8, + "d": 9 + }, + "mne/viz/epochs.py": { + "a": 8, + "d": 11 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 75, + "d": 40 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 4, + "d": 4 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 0, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4020.json b/doc/sphinxext/prs/4020.json new file mode 100644 index 00000000000..a915fbf6319 --- /dev/null +++ b/doc/sphinxext/prs/4020.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "214ba4135685fba881f25345ff29231a8a301ced", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 8, + "d": 6 + }, + "mne/io/base.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_annotations.py": { + "a": 8, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 59, + "d": 55 + }, + "mne/viz/utils.py": { + "a": 21, + "d": 23 + }, + "tutorials/plot_visualize_raw.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4022.json b/doc/sphinxext/prs/4022.json new file mode 100644 index 00000000000..fafa62d8ddf --- /dev/null +++ b/doc/sphinxext/prs/4022.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2a1022a08c550f3c56c4e7e2eb61e433ed57bc3c", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4023.json b/doc/sphinxext/prs/4023.json new file mode 100644 index 00000000000..6aa9e2de5cb --- /dev/null +++ b/doc/sphinxext/prs/4023.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "e0bca2f7d1d791201aa588b622778ce18f53e241", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 25, + "d": 26 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 17, + "d": 17 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 8 + }, + "mne/stats/permutations.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 9, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 5, + "d": 10 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 0, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 7 + }, + "setup.cfg": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4024.json b/doc/sphinxext/prs/4024.json new file mode 100644 index 00000000000..88dadf6931c --- /dev/null +++ b/doc/sphinxext/prs/4024.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "af290944ce3c0b164b92d96f9861823db149f2f2", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 4, + "d": 6 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4025.json b/doc/sphinxext/prs/4025.json new file mode 100644 index 00000000000..48e9e9c56a9 --- /dev/null +++ b/doc/sphinxext/prs/4025.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e9be0d3784e0e44e2e8450fafcda413331f5e80d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/decoding.rst": { + "a": 5, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 3 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 48, + "d": 24 + }, + "mne/decoding/transformer.py": { + "a": 113, + "d": 83 + }, + "mne/utils.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4029.json b/doc/sphinxext/prs/4029.json new file mode 100644 index 00000000000..6c440b98a12 --- /dev/null +++ b/doc/sphinxext/prs/4029.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8c88601b261e9b7fa77e4d6cf724214f7a75862e", + "authors": [ + { + "n": "Lorenz Esch", + "e": null + } + ], + "changes": { + "doc/mne_cpp.rst": { + "a": 19, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/403.json b/doc/sphinxext/prs/403.json new file mode 100644 index 00000000000..96746a378fb --- /dev/null +++ b/doc/sphinxext/prs/403.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ceb4dc25c96f87034bf2555c3f935d80fcc39768", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 16, + "d": 8 + }, + "mne/epochs.py": { + "a": 12, + "d": 6 + }, + "mne/fiff/pick.py": { + "a": 1, + "d": 2 + }, + "mne/forward.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 9, + "d": 2 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4030.json b/doc/sphinxext/prs/4030.json new file mode 100644 index 00000000000..0eb45d28dfc --- /dev/null +++ b/doc/sphinxext/prs/4030.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "60a80b77b557d4963de2bd45040b959593bb5043", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 2 + }, + "examples/preprocessing/plot_head_positions.py": { + "a": 31, + "d": 0 + }, + "examples/preprocessing/plot_movement_compensation.py": { + "a": 6, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 91, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 17, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4031.json b/doc/sphinxext/prs/4031.json new file mode 100644 index 00000000000..fb394f6ee5c --- /dev/null +++ b/doc/sphinxext/prs/4031.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8b1087a9574c46185a921e4006a6735122e9edbf", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 46, + "d": 14 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 5 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4040.json b/doc/sphinxext/prs/4040.json new file mode 100644 index 00000000000..1c10b1bf2b2 --- /dev/null +++ b/doc/sphinxext/prs/4040.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "85b7374f31b656111e8f7201495682cb021fb339", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/viz/topo.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4042.json b/doc/sphinxext/prs/4042.json new file mode 100644 index 00000000000..b2bee255bd2 --- /dev/null +++ b/doc/sphinxext/prs/4042.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7bdeaf5a72865506195c773e64b7111ec95edbac", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 0, + "d": 5 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 21, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4043.json b/doc/sphinxext/prs/4043.json new file mode 100644 index 00000000000..464f10bd83d --- /dev/null +++ b/doc/sphinxext/prs/4043.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e657529827b878d7210c70d9f72cebcb18197971", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 173, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4046.json b/doc/sphinxext/prs/4046.json new file mode 100644 index 00000000000..82b44857f4d --- /dev/null +++ b/doc/sphinxext/prs/4046.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ce9364bfccbbf83522f0dfe2c9498bcfece152ca", + "authors": [ + { + "n": "akshay0724", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4047.json b/doc/sphinxext/prs/4047.json new file mode 100644 index 00000000000..877b07eb1df --- /dev/null +++ b/doc/sphinxext/prs/4047.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "384ab09e6c4ebb07596c4cfa27821b1a25a7c05f", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/evoked.py": { + "a": 24, + "d": 7 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4048.json b/doc/sphinxext/prs/4048.json new file mode 100644 index 00000000000..8c8d2f4882b --- /dev/null +++ b/doc/sphinxext/prs/4048.json @@ -0,0 +1,223 @@ +{ + "merge_commit_sha": "b3021dd7f974734da1bd4741f9c50ae90cb81041", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 4, + "d": 4 + }, + "mne/bem.py": { + "a": 9, + "d": 9 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 4 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 3, + "d": 3 + }, + "mne/chpi.py": { + "a": 4, + "d": 4 + }, + "mne/connectivity/spectral.py": { + "a": 3, + "d": 3 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/megsim/urls.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/time_gen.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 3, + "d": 3 + }, + "mne/epochs.py": { + "a": 7, + "d": 7 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 4, + "d": 4 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 4, + "d": 4 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/mockclient.py": { + "a": 5, + "d": 5 + }, + "mne/report.py": { + "a": 2, + "d": 2 + }, + "mne/selection.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 3, + "d": 3 + }, + "mne/stats/parametric.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 15, + "d": 15 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/decoding.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 51, + "d": 51 + }, + "setup.cfg": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/405.json b/doc/sphinxext/prs/405.json new file mode 100644 index 00000000000..463a04cec3a --- /dev/null +++ b/doc/sphinxext/prs/405.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "05f845c12adb0f415b3684663f5d17f1b55320cd", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/filter.py": { + "a": 16, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4052.json b/doc/sphinxext/prs/4052.json new file mode 100644 index 00000000000..7ac1c2a5e1a --- /dev/null +++ b/doc/sphinxext/prs/4052.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8eb11aa31e5b8e342f3909f4ba9836b822643ea0", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4057.json b/doc/sphinxext/prs/4057.json new file mode 100644 index 00000000000..7183b7eb059 --- /dev/null +++ b/doc/sphinxext/prs/4057.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e983e93cf915312ca027d08c5fa54b443d0fceec", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 3, + "d": 0 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4058.json b/doc/sphinxext/prs/4058.json new file mode 100644 index 00000000000..a510c1d1d44 --- /dev/null +++ b/doc/sphinxext/prs/4058.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a5f0dc9f2f2571c08f75df4e4e92734f29cabbbc", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 7, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4061.json b/doc/sphinxext/prs/4061.json new file mode 100644 index 00000000000..725d292061f --- /dev/null +++ b/doc/sphinxext/prs/4061.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b0f362df04b21249faf6c9a2495c71c337a14a8", + "authors": [ + { + "n": "akshay0724", + "e": null + } + ], + "changes": { + "mne/decoding/transformer.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4062.json b/doc/sphinxext/prs/4062.json new file mode 100644 index 00000000000..0c11748b31a --- /dev/null +++ b/doc/sphinxext/prs/4062.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7b27c333e2b8409002302dbfa40b073c652fbf7a", + "authors": [ + { + "n": "Cristóbal Moënne-Loccoz", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4063.json b/doc/sphinxext/prs/4063.json new file mode 100644 index 00000000000..57610b67a8b --- /dev/null +++ b/doc/sphinxext/prs/4063.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5f2db6fbe84e77eda7c265d82d03f0740fb82069", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/topo.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4064.json b/doc/sphinxext/prs/4064.json new file mode 100644 index 00000000000..e27eb842fb8 --- /dev/null +++ b/doc/sphinxext/prs/4064.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "68c2d32fc915c5a730cc074a1cfff7b753fccde3", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4065.json b/doc/sphinxext/prs/4065.json new file mode 100644 index 00000000000..f7c3885ae19 --- /dev/null +++ b/doc/sphinxext/prs/4065.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "56c637f271e03e70072e5c25f2dbbf91ac29dd19", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/commands/mne_kit2fiff.py": { + "a": 6, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 13, + "d": 4 + }, + "mne/gui/_marker_gui.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4069.json b/doc/sphinxext/prs/4069.json new file mode 100644 index 00000000000..9f114f623b5 --- /dev/null +++ b/doc/sphinxext/prs/4069.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "395aff36c4adb512f49dbf87af5092a25ee0cf2e", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 451, + "d": 171 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 180, + "d": 14 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 56, + "d": 5 + }, + "mne/io/artemis123/utils.py": { + "a": 55, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 50, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4071.json b/doc/sphinxext/prs/4071.json new file mode 100644 index 00000000000..f4d484716b9 --- /dev/null +++ b/doc/sphinxext/prs/4071.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bae9149e5fc14b5711191525ea96b19542ccfe62", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4074.json b/doc/sphinxext/prs/4074.json new file mode 100644 index 00000000000..34c8717c6b9 --- /dev/null +++ b/doc/sphinxext/prs/4074.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "724eefccdc3fd085347be949799b8d1e44eb8f46", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4077.json b/doc/sphinxext/prs/4077.json new file mode 100644 index 00000000000..0bb85b50d9c --- /dev/null +++ b/doc/sphinxext/prs/4077.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d4a0f1c9b32f9b11e545a90680fbab08214b20dd", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 11, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 14, + "d": 4 + }, + "mne/preprocessing/eog.py": { + "a": 15, + "d": 8 + }, + "mne/preprocessing/ica.py": { + "a": 53, + "d": 27 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4079.json b/doc/sphinxext/prs/4079.json new file mode 100644 index 00000000000..655b5956303 --- /dev/null +++ b/doc/sphinxext/prs/4079.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a2c5bb1e2e7a1adbf31df02b5fdb9eae2f39e207", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 13, + "d": 5 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4080.json b/doc/sphinxext/prs/4080.json new file mode 100644 index 00000000000..c8565635cb3 --- /dev/null +++ b/doc/sphinxext/prs/4080.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "79b5da63d441a2008a1aae756bc9a7b776d0bc1e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/bem.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4081.json b/doc/sphinxext/prs/4081.json new file mode 100644 index 00000000000..e851283a311 --- /dev/null +++ b/doc/sphinxext/prs/4081.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "db38c67918b19530219a18c30e93bd5b1b1eafad", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 5, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 94, + "d": 41 + }, + "mne/viz/tests/test_ica.py": { + "a": 6, + "d": 26 + }, + "mne/viz/tests/test_raw.py": { + "a": 34, + "d": 22 + }, + "mne/viz/utils.py": { + "a": 121, + "d": 13 + }, + "tutorials/plot_visualize_raw.py": { + "a": 18, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4082.json b/doc/sphinxext/prs/4082.json new file mode 100644 index 00000000000..a03d799277c --- /dev/null +++ b/doc/sphinxext/prs/4082.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7d2ae7f33928dd6d8f612290579495aff3f3da3a", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 17, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4083.json b/doc/sphinxext/prs/4083.json new file mode 100644 index 00000000000..bbf02283f3e --- /dev/null +++ b/doc/sphinxext/prs/4083.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e5b0447baf2e0fe7d24fdcb1f11338257c89d33c", + "authors": [ + { + "n": "Steven Gutstein", + "e": null + } + ], + "changes": { + "mne/tests/test_report.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4084.json b/doc/sphinxext/prs/4084.json new file mode 100644 index 00000000000..5f50ebbfff4 --- /dev/null +++ b/doc/sphinxext/prs/4084.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "96d8b48e4f9e989616beec897e642e5772cd590b", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 39, + "d": 5 + }, + "mne/io/reference.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4086.json b/doc/sphinxext/prs/4086.json new file mode 100644 index 00000000000..6b79537e449 --- /dev/null +++ b/doc/sphinxext/prs/4086.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6b16b7f4cea6fa6810d2ba7b132f02ba4b6b152d", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 7, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 264, + "d": 46 + }, + "mne/viz/tests/test_3d.py": { + "a": 34, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4088.json b/doc/sphinxext/prs/4088.json new file mode 100644 index 00000000000..0e70b7fcb16 --- /dev/null +++ b/doc/sphinxext/prs/4088.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "52109930a4e483f3d325291c2e12cca9ff502c0b", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 115, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 18, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4089.json b/doc/sphinxext/prs/4089.json new file mode 100644 index 00000000000..129b6506210 --- /dev/null +++ b/doc/sphinxext/prs/4089.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b245cad2942873f6d0e1f78936cbf29a78d2e751", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 7, + "d": 0 + }, + "mne/io/reference.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/409.json b/doc/sphinxext/prs/409.json new file mode 100644 index 00000000000..8ec600563e4 --- /dev/null +++ b/doc/sphinxext/prs/409.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8be43ab6f3a476b60cc5059ade85839af2c026aa", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4090.json b/doc/sphinxext/prs/4090.json new file mode 100644 index 00000000000..dad2eb551e2 --- /dev/null +++ b/doc/sphinxext/prs/4090.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "674e2cdf3bd128637c52e38c1f2e55cb0af3b95a", + "authors": [ + { + "n": "Matt Boggess", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 11, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4096.json b/doc/sphinxext/prs/4096.json new file mode 100644 index 00000000000..cb9f2d97467 --- /dev/null +++ b/doc/sphinxext/prs/4096.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "de4eb02405d4c80610f00cdb26d4cf0dbb4282de", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/commands/mne_show_fiff.py": { + "a": 12, + "d": 4 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 0 + }, + "mne/io/open.py": { + "a": 23, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4097.json b/doc/sphinxext/prs/4097.json new file mode 100644 index 00000000000..5263af33a45 --- /dev/null +++ b/doc/sphinxext/prs/4097.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "12b329c9421376160055957ae88784ec639cbe33", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "doc/_static/favicon.ico": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4098.json b/doc/sphinxext/prs/4098.json new file mode 100644 index 00000000000..fd294385b83 --- /dev/null +++ b/doc/sphinxext/prs/4098.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2aebcc4cde8601aa105a2b1d7fa6618763ef3bb5", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/visualization/plot_3d_to_2d.py": { + "a": 120, + "d": 0 + }, + "examples/visualization/plot_clickable_image.py": { + "a": 0, + "d": 66 + }, + "mne/data/image/custom_layout.lout": { + "a": 23, + "d": 24 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4099.json b/doc/sphinxext/prs/4099.json new file mode 100644 index 00000000000..97204794a17 --- /dev/null +++ b/doc/sphinxext/prs/4099.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7e15c0f5552aca66a10a3ca54d92a61bb3d781b9", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/io/plot_read_events.py": { + "a": 77, + "d": 0 + }, + "examples/io/read_events.py": { + "a": 0, + "d": 31 + }, + "mne/event.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/41.json b/doc/sphinxext/prs/41.json new file mode 100644 index 00000000000..eba675894ba --- /dev/null +++ b/doc/sphinxext/prs/41.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "25f550f1586b339cedd203fff13921f02e84fe10", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/cov.py": { + "a": 2, + "d": 2 + }, + "mne/forward.py": { + "a": 6, + "d": 5 + }, + "mne/minimum_norm/inverse.py": { + "a": 37, + "d": 8 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/410.json b/doc/sphinxext/prs/410.json new file mode 100644 index 00000000000..337857c5579 --- /dev/null +++ b/doc/sphinxext/prs/410.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "badca76e57ce230c71cde14efef1169472a31039", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 24, + "d": 7 + }, + "mne/fiff/raw.py": { + "a": 34, + "d": 24 + }, + "mne/fiff/tag.py": { + "a": 80, + "d": 27 + }, + "mne/fiff/tests/test_raw.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4100.json b/doc/sphinxext/prs/4100.json new file mode 100644 index 00000000000..16e030a2520 --- /dev/null +++ b/doc/sphinxext/prs/4100.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d14d7b492cd8c7253e2b3216f1040491cb649de3", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/decoding/decoding_rsa.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4103.json b/doc/sphinxext/prs/4103.json new file mode 100644 index 00000000000..f86ed029228 --- /dev/null +++ b/doc/sphinxext/prs/4103.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "aa4e18940fbad7626c323502994141854f6879ba", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/manual/decoding.rst": { + "a": 38, + "d": 25 + }, + "doc/python_reference.rst": { + "a": 5, + "d": 5 + }, + "doc/whats_new.rst": { + "a": 10, + "d": 2 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 36, + "d": 28 + }, + "mne/decoding/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 230, + "d": 31 + }, + "mne/decoding/search_light.py": { + "a": 93, + "d": 114 + }, + "mne/decoding/tests/test_base.py": { + "a": 50, + "d": 5 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 47, + "d": 46 + }, + "mne/decoding/time_gen.py": { + "a": 5, + "d": 1 + }, + "mne/parallel.py": { + "a": 19, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/viz/decoding.py": { + "a": 3, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 82, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4104.json b/doc/sphinxext/prs/4104.json new file mode 100644 index 00000000000..fba9a72874a --- /dev/null +++ b/doc/sphinxext/prs/4104.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e945956678fecdfd381b9a2078b4128adfe1abf1", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "examples/realtime/ftclient_rt_average.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 13, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4105.json b/doc/sphinxext/prs/4105.json new file mode 100644 index 00000000000..41e1fb710a6 --- /dev/null +++ b/doc/sphinxext/prs/4105.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "00a23a3cc62f14a563dc45aac38dc839a7277bf1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 11 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 325 + }, + "mne/dipole.py": { + "a": 2, + "d": 7 + }, + "mne/epochs.py": { + "a": 39, + "d": 98 + }, + "mne/filter.py": { + "a": 1, + "d": 459 + }, + "mne/forward/_make_forward.py": { + "a": 10, + "d": 31 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 4 + }, + "mne/io/base.py": { + "a": 1, + "d": 6 + }, + "mne/io/fiff/raw.py": { + "a": 3, + "d": 20 + }, + "mne/realtime/epochs.py": { + "a": 4, + "d": 11 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 12, + "d": 51 + }, + "mne/tests/test_source_space.py": { + "a": 10, + "d": 16 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 151 + }, + "mne/viz/tests/test_3d.py": { + "a": 0, + "d": 14 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4106.json b/doc/sphinxext/prs/4106.json new file mode 100644 index 00000000000..193f2c5265f --- /dev/null +++ b/doc/sphinxext/prs/4106.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3506ebbe9146b337dc22c05297c4d39daed93ac9", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 5, + "d": 2 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4107.json b/doc/sphinxext/prs/4107.json new file mode 100644 index 00000000000..38f6436356c --- /dev/null +++ b/doc/sphinxext/prs/4107.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "826c24616b3dde5e6ec3030800d499380392f126", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 20, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4109.json b/doc/sphinxext/prs/4109.json new file mode 100644 index 00000000000..7c5368c9b7e --- /dev/null +++ b/doc/sphinxext/prs/4109.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "176795b73e34e53a19f14d46a11bff8cd63646fe", + "authors": [ + { + "n": "Jean-Baptiste SCHIRATTI", + "e": "jean.baptiste.schiratti@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 12, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4111.json b/doc/sphinxext/prs/4111.json new file mode 100644 index 00000000000..8bf09bde1b8 --- /dev/null +++ b/doc/sphinxext/prs/4111.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "148cd23d1a3fd2d0dd98fdd2381530c44ec2a428", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4112.json b/doc/sphinxext/prs/4112.json new file mode 100644 index 00000000000..1cb3029658f --- /dev/null +++ b/doc/sphinxext/prs/4112.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1574ad4342ec1b8b85ef109849a756b7a1b62c87", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 43, + "d": 14 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 41, + "d": 11 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 38, + "d": 6 + }, + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 115, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4113.json b/doc/sphinxext/prs/4113.json new file mode 100644 index 00000000000..7dda8c10702 --- /dev/null +++ b/doc/sphinxext/prs/4113.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "add8370d54eae2ddef00ee9290b4dfcca411c44a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 1 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 31, + "d": 44 + }, + "mne/decoding/transformer.py": { + "a": 16, + "d": 4 + }, + "mne/filter.py": { + "a": 146, + "d": 59 + }, + "mne/io/array/tests/test_array.py": { + "a": 6, + "d": 11 + }, + "mne/io/base.py": { + "a": 31, + "d": 18 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 5, + "d": 4 + }, + "mne/io/pick.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 5 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 9, + "d": 12 + }, + "mne/tests/test_filter.py": { + "a": 51, + "d": 37 + }, + "tutorials/plot_background_filtering.py": { + "a": 52, + "d": 28 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4114.json b/doc/sphinxext/prs/4114.json new file mode 100644 index 00000000000..a3f9b649c92 --- /dev/null +++ b/doc/sphinxext/prs/4114.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "2b2510e53be77bddbe23a835dfdb06aa812f86ec", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 7, + "d": 5 + }, + "mne/io/base.py": { + "a": 6, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 5, + "d": 0 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4115.json b/doc/sphinxext/prs/4115.json new file mode 100644 index 00000000000..b7a85514ead --- /dev/null +++ b/doc/sphinxext/prs/4115.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f823a00bae2d7e39ebc587128822e673369b9420", + "authors": [ + { + "n": "Laura Gwilliams", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 111, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4118.json b/doc/sphinxext/prs/4118.json new file mode 100644 index 00000000000..55a2b0a321e --- /dev/null +++ b/doc/sphinxext/prs/4118.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9a9c69e2caa5a5b268f44a53f7c55faaa7d211f2", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 21, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4119.json b/doc/sphinxext/prs/4119.json new file mode 100644 index 00000000000..676ab9f70d5 --- /dev/null +++ b/doc/sphinxext/prs/4119.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "8fc2a545f494de0f828b931f2285dbff426e72ad", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/decoding/plot_receptive_field.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 66, + "d": 35 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 92, + "d": 5 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 243, + "d": 0 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 5, + "d": 4 + }, + "tutorials/plot_receptive_field.py": { + "a": 101, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/412.json b/doc/sphinxext/prs/412.json new file mode 100644 index 00000000000..dc745099722 --- /dev/null +++ b/doc/sphinxext/prs/412.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c0918bfd1dbbe92139b639a9c1bd891e7371f882", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/sphinxext/gen_rst.py": { + "a": 307, + "d": 139 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4120.json b/doc/sphinxext/prs/4120.json new file mode 100644 index 00000000000..54643e18c4b --- /dev/null +++ b/doc/sphinxext/prs/4120.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e3a4dce3ee3bcfe744086b7ebdd4374793560e96", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4121.json b/doc/sphinxext/prs/4121.json new file mode 100644 index 00000000000..516b87bd474 --- /dev/null +++ b/doc/sphinxext/prs/4121.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "992662a474192778f709dfbc588f4e1fcfcab916", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/decoding/receptive_field.py": { + "a": 25, + "d": 14 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_receptive_field.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4123.json b/doc/sphinxext/prs/4123.json new file mode 100644 index 00000000000..f2e454333e0 --- /dev/null +++ b/doc/sphinxext/prs/4123.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0130bf93adbe6f7efadcac250fc0985fe6f927d4", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/plot_forward.py": { + "a": 20, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/413.json b/doc/sphinxext/prs/413.json new file mode 100644 index 00000000000..bb74515356c --- /dev/null +++ b/doc/sphinxext/prs/413.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "1692365eab0d92ec33e37efac27a26249b90c4ca", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 6, + "d": 7 + }, + "examples/plot_read_evoked.py": { + "a": 3, + "d": 5 + }, + "mne/fiff/evoked.py": { + "a": 8, + "d": 3 + }, + "mne/tests/test_viz.py": { + "a": 6, + "d": 1 + }, + "mne/viz.py": { + "a": 37, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4132.json b/doc/sphinxext/prs/4132.json new file mode 100644 index 00000000000..f16b80cad5f --- /dev/null +++ b/doc/sphinxext/prs/4132.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "847347d14b858b86e5aac347a9db1bcf44b554d6", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 12, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4135.json b/doc/sphinxext/prs/4135.json new file mode 100644 index 00000000000..2dd451a81e2 --- /dev/null +++ b/doc/sphinxext/prs/4135.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b53c5bdc1fd89df68dcc1fbebcf67858ac12ff23", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_rap_music.py": { + "a": 14, + "d": 17 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4138.json b/doc/sphinxext/prs/4138.json new file mode 100644 index 00000000000..834635cbed7 --- /dev/null +++ b/doc/sphinxext/prs/4138.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1df8195ac81a829e8db9ada4fa266a4c19676e46", + "authors": [ + { + "n": "Nicole Proulx", + "e": null + } + ], + "changes": { + "tutorials/plot_decoding_csp_timefreq.py": { + "a": 56, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4139.json b/doc/sphinxext/prs/4139.json new file mode 100644 index 00000000000..224b38b70bd --- /dev/null +++ b/doc/sphinxext/prs/4139.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b6bb7bf285b144c34a93b76963283b8d35e3f97c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 143, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/414.json b/doc/sphinxext/prs/414.json new file mode 100644 index 00000000000..f41bfa7a065 --- /dev/null +++ b/doc/sphinxext/prs/414.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2689932e0e52937b479c6063a29a0c31feca866d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4141.json b/doc/sphinxext/prs/4141.json new file mode 100644 index 00000000000..b78de802f78 --- /dev/null +++ b/doc/sphinxext/prs/4141.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3295ba7543d69abf4c740d4b1d3669649e894c2e", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4143.json b/doc/sphinxext/prs/4143.json new file mode 100644 index 00000000000..ffd3fa739ae --- /dev/null +++ b/doc/sphinxext/prs/4143.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6fd0c4fb416e4705517b1aeee0c8dc2393887172", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "examples/decoding/README.txt": { + "a": 3, + "d": 3 + }, + "tutorials/plot_receptive_field.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4144.json b/doc/sphinxext/prs/4144.json new file mode 100644 index 00000000000..58a84bfdf4a --- /dev/null +++ b/doc/sphinxext/prs/4144.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "1239f23d5c44c1e8b79d035221559163c595978e", + "authors": [ + { + "n": "alexandre barachant", + "e": null + } + ], + "changes": { + "circle.yml": { + "a": 4, + "d": 0 + }, + "doc/manual/decoding.rst": { + "a": 16, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 7, + "d": 0 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 84, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/fieldtrip_cmc/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 32, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 14, + "d": 4 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 209, + "d": 29 + }, + "mne/decoding/tests/test_csp.py": { + "a": 103, + "d": 26 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4145.json b/doc/sphinxext/prs/4145.json new file mode 100644 index 00000000000..20d8eba6dfa --- /dev/null +++ b/doc/sphinxext/prs/4145.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8207646a4af0b551163f98a3d89a1111ca6775d5", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 8, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4152.json b/doc/sphinxext/prs/4152.json new file mode 100644 index 00000000000..bb7234c83b3 --- /dev/null +++ b/doc/sphinxext/prs/4152.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "85b471be65f046b81b681a3b061897868a697fbc", + "authors": [ + { + "n": "Rodrigo Hübner", + "e": "rodrigohubner@utfpr.edu.br" + } + ], + "changes": { + "mne/dipole.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4154.json b/doc/sphinxext/prs/4154.json new file mode 100644 index 00000000000..9711479d203 --- /dev/null +++ b/doc/sphinxext/prs/4154.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ee5ecfe78d9a7ec5698ab015644e25d1b7596755", + "authors": [ + { + "n": "Rodrigo Hübner", + "e": "rodrigohubner@utfpr.edu.br" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4156.json b/doc/sphinxext/prs/4156.json new file mode 100644 index 00000000000..694ca5c47ae --- /dev/null +++ b/doc/sphinxext/prs/4156.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7b812a2c654290c509beae6fb3ef035127ea3a68", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/io/plot_objects_from_arrays.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4158.json b/doc/sphinxext/prs/4158.json new file mode 100644 index 00000000000..e0670d08e79 --- /dev/null +++ b/doc/sphinxext/prs/4158.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d512801d56091376f51c0d60cf0a1e82a87a351b", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/416.json b/doc/sphinxext/prs/416.json new file mode 100644 index 00000000000..968ee853c07 --- /dev/null +++ b/doc/sphinxext/prs/416.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "906dff5a263cb84fb3425c3fd1f02741a14c1d79", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4160.json b/doc/sphinxext/prs/4160.json new file mode 100644 index 00000000000..5f008918ca2 --- /dev/null +++ b/doc/sphinxext/prs/4160.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f1b0fd4216324fe9609eca77a73d2a4617fcd876", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 16, + "d": 9 + }, + "mne/io/fiff/raw.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4162.json b/doc/sphinxext/prs/4162.json new file mode 100644 index 00000000000..dbbc5d00612 --- /dev/null +++ b/doc/sphinxext/prs/4162.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c8d52520e07091a83375ef0fcd170ef16cc932e4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4163.json b/doc/sphinxext/prs/4163.json new file mode 100644 index 00000000000..e9ea5b979fa --- /dev/null +++ b/doc/sphinxext/prs/4163.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "63c31843d61508a28d34e4cf2472ca388d4a8ef8", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4166.json b/doc/sphinxext/prs/4166.json new file mode 100644 index 00000000000..e881cd484e2 --- /dev/null +++ b/doc/sphinxext/prs/4166.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ab51322a930062375f9b92ab4fac79e44e5a4dc6", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 1, + "d": 0 + }, + "mne/io/base.py": { + "a": 1, + "d": 5 + }, + "mne/io/fiff/raw.py": { + "a": 5, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4167.json b/doc/sphinxext/prs/4167.json new file mode 100644 index 00000000000..86ab236abf1 --- /dev/null +++ b/doc/sphinxext/prs/4167.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3f961aa21a1d4e7a98327880da80dcd3dd782c34", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/eog.py": { + "a": 18, + "d": 7 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4168.json b/doc/sphinxext/prs/4168.json new file mode 100644 index 00000000000..d16e8cc1924 --- /dev/null +++ b/doc/sphinxext/prs/4168.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "a8f7ad3eb49ea540814183635b6bc8a246e7f3ce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_unsupervised_spatial_filter.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 3, + "d": 1 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_linear_regression_raw.py": { + "a": 3, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_topo_customized.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 3, + "d": 6 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 2 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 3, + "d": 4 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4169.json b/doc/sphinxext/prs/4169.json new file mode 100644 index 00000000000..201d436bd45 --- /dev/null +++ b/doc/sphinxext/prs/4169.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "efc5096d141a785d9f525208fee0e2105a690aea", + "authors": [ + { + "n": "alexandre barachant", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 11, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4170.json b/doc/sphinxext/prs/4170.json new file mode 100644 index 00000000000..36686fdf4a2 --- /dev/null +++ b/doc/sphinxext/prs/4170.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "c86a836e27caf4e34194aa74205e48c027bbedb2", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "circle.yml": { + "a": 3, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 11, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_hf_sef_data.py": { + "a": 37, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/hf_sef/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 99, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4172.json b/doc/sphinxext/prs/4172.json new file mode 100644 index 00000000000..b5b557a21d8 --- /dev/null +++ b/doc/sphinxext/prs/4172.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "04584cd6d9cadfd81f521bfde92fafab2bad2c66", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/contributing.rst": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4173.json b/doc/sphinxext/prs/4173.json new file mode 100644 index 00000000000..020f16392ab --- /dev/null +++ b/doc/sphinxext/prs/4173.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "d902ce646ae5195dd2e8a589947441fbb1b17cd4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 5 + }, + "doc/configure_git.rst": { + "a": 1, + "d": 1 + }, + "doc/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/documentation.rst": { + "a": 680, + "d": 0 + }, + "doc/getting_started.rst": { + "a": 38, + "d": 104 + }, + "doc/index.rst": { + "a": 0, + "d": 1 + }, + "doc/install_mne_python.rst": { + "a": 3, + "d": 4 + }, + "doc/manual/channel_interpolation.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/cookbook.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/datasets_index.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/decoding.rst": { + "a": 7, + "d": 7 + }, + "doc/manual/index.rst": { + "a": 0, + "d": 8 + }, + "doc/manual/io.rst": { + "a": 27, + "d": 0 + }, + "doc/manual/matlab.rst": { + "a": 3, + "d": 3 + }, + "doc/manual/pitfalls.rst": { + "a": 0, + "d": 30 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 4, + "d": 4 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/tutorials.rst": { + "a": 0, + "d": 178 + }, + "doc/tutorials/philosophy.rst": { + "a": 27, + "d": 45 + }, + "doc/whats_new.rst": { + "a": 14, + "d": 12 + }, + "examples/decoding/plot_receptive_field.py": { + "a": 1, + "d": 1 + }, + "examples/io/README.txt": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 7, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 16, + "d": 26 + }, + "mne/decoding/search_light.py": { + "a": 28, + "d": 31 + }, + "mne/decoding/transformer.py": { + "a": 2, + "d": 2 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_configuration.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4174.json b/doc/sphinxext/prs/4174.json new file mode 100644 index 00000000000..ac89544a7db --- /dev/null +++ b/doc/sphinxext/prs/4174.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d446fe79bc8bf6cadb7d8ab1209800b729b0c06a", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 3 + }, + "mne/io/ctf/info.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4179.json b/doc/sphinxext/prs/4179.json new file mode 100644 index 00000000000..9cbbce009d9 --- /dev/null +++ b/doc/sphinxext/prs/4179.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "654c6e8a7c97156d9f65eec3d9d9686d83fe3646", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/418.json b/doc/sphinxext/prs/418.json new file mode 100644 index 00000000000..2fd41de635b --- /dev/null +++ b/doc/sphinxext/prs/418.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "43f44da622cf2990b4b6f0441b3a39dce0fcb397", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 9, + "d": 8 + }, + "mne/source_estimate.py": { + "a": 7, + "d": 4 + }, + "mne/viz.py": { + "a": 47, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4181.json b/doc/sphinxext/prs/4181.json new file mode 100644 index 00000000000..cdb6f4f3db7 --- /dev/null +++ b/doc/sphinxext/prs/4181.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c9fa7918750c6e708f4fe8c0154428366251872e", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4182.json b/doc/sphinxext/prs/4182.json new file mode 100644 index 00000000000..ef3ba42551e --- /dev/null +++ b/doc/sphinxext/prs/4182.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0c44a12ac95276c7c102660974fa8f5e1b074b26", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 10, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 24, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4183.json b/doc/sphinxext/prs/4183.json new file mode 100644 index 00000000000..ef3e276a247 --- /dev/null +++ b/doc/sphinxext/prs/4183.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e68c0662a829d68207f8ff8c111579965546641a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4186.json b/doc/sphinxext/prs/4186.json new file mode 100644 index 00000000000..f9f4c044b19 --- /dev/null +++ b/doc/sphinxext/prs/4186.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1fd891b772942e1885047a40795a25c5302c9f32", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 8, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4187.json b/doc/sphinxext/prs/4187.json new file mode 100644 index 00000000000..becbf49a306 --- /dev/null +++ b/doc/sphinxext/prs/4187.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "884846fa1a3254ca7d9e5e4548b8f5ff521a2cf9", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 48, + "d": 32 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/419.json b/doc/sphinxext/prs/419.json new file mode 100644 index 00000000000..6bfbc75ed8c --- /dev/null +++ b/doc/sphinxext/prs/419.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "633145e4e0d6555109ff1c46f29eca8270b18b32", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 1, + "d": 1 + }, + "mne/fiff/meas_info.py": { + "a": 9, + "d": 3 + }, + "mne/fiff/raw.py": { + "a": 75, + "d": 19 + }, + "mne/fiff/tests/test_raw.py": { + "a": 52, + "d": 27 + }, + "mne/fiff/write.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4191.json b/doc/sphinxext/prs/4191.json new file mode 100644 index 00000000000..224110c1da3 --- /dev/null +++ b/doc/sphinxext/prs/4191.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "52e7bb712e8ed480d6bf66dbefab50e8bd6fe647", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4193.json b/doc/sphinxext/prs/4193.json new file mode 100644 index 00000000000..7e52e90a2fe --- /dev/null +++ b/doc/sphinxext/prs/4193.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b7f37de81210f0349faf78e091ee3cb73ebae647", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 70, + "d": 23 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/42.json b/doc/sphinxext/prs/42.json new file mode 100644 index 00000000000..ddf90f0ab84 --- /dev/null +++ b/doc/sphinxext/prs/42.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e2b8a83f7ca79c0cc8c513dbf539f51049dd05b0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_make_inverse_operator.py": { + "a": 6, + "d": 2 + }, + "mne/fiff/channels.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/cov.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/meas_info.py": { + "a": 4, + "d": 2 + }, + "mne/fiff/pick.py": { + "a": 9, + "d": 6 + }, + "mne/forward.py": { + "a": 99, + "d": 20 + }, + "mne/minimum_norm/inverse.py": { + "a": 32, + "d": 15 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_forward.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/420.json b/doc/sphinxext/prs/420.json new file mode 100644 index 00000000000..f96ff33b7f8 --- /dev/null +++ b/doc/sphinxext/prs/420.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "22e0b13221f96931b59b0d5f595bdd680bd9edc4", + "authors": [ + { + "n": "Simon Kornblith", + "e": "simon@simonster.com" + } + ], + "changes": { + "mne/event.py": { + "a": 88, + "d": 6 + }, + "mne/tests/test_event.py": { + "a": 72, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4202.json b/doc/sphinxext/prs/4202.json new file mode 100644 index 00000000000..9532b9a81b4 --- /dev/null +++ b/doc/sphinxext/prs/4202.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "33b10960f6f88e898fcb694032eff876826c4b22", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4203.json b/doc/sphinxext/prs/4203.json new file mode 100644 index 00000000000..15b1a812481 --- /dev/null +++ b/doc/sphinxext/prs/4203.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a2b75f7d06fd753b7732f71a1f92802abc28dd63", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 12, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 14, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 4 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 46, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4204.json b/doc/sphinxext/prs/4204.json new file mode 100644 index 00000000000..7994311ab5b --- /dev/null +++ b/doc/sphinxext/prs/4204.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "edbbcf475fa6a216193f9f4ed37f1efddbd1f646", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4205.json b/doc/sphinxext/prs/4205.json new file mode 100644 index 00000000000..f075fa2e4c0 --- /dev/null +++ b/doc/sphinxext/prs/4205.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d130b748b43ba0ae944b322cf37c4b1a599a821b", + "authors": [ + { + "n": "Nicolas Barascud", + "e": null + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 24, + "d": 12 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 634, + "d": 137 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 68, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4210.json b/doc/sphinxext/prs/4210.json new file mode 100644 index 00000000000..7415aea4a6e --- /dev/null +++ b/doc/sphinxext/prs/4210.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "53900e6d5430a5b8a7179032d402f8fe6f8d90d6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 14, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 12, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4212.json b/doc/sphinxext/prs/4212.json new file mode 100644 index 00000000000..324d73c057a --- /dev/null +++ b/doc/sphinxext/prs/4212.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8526a27fc9fa43040a1fd72bcdf254cb428b82a9", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/io/egi/egi.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4213.json b/doc/sphinxext/prs/4213.json new file mode 100644 index 00000000000..aec80c7acb4 --- /dev/null +++ b/doc/sphinxext/prs/4213.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9f908e74c53cb8a3608080560d0072cbcdebc97f", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 69, + "d": 4 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 24, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4218.json b/doc/sphinxext/prs/4218.json new file mode 100644 index 00000000000..01f272f148c --- /dev/null +++ b/doc/sphinxext/prs/4218.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ddd84d8e199761fca6926f91c0493446e3ff0260", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/reference.py": { + "a": 60, + "d": 17 + }, + "mne/io/tests/test_reference.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4219.json b/doc/sphinxext/prs/4219.json new file mode 100644 index 00000000000..5c8722070e0 --- /dev/null +++ b/doc/sphinxext/prs/4219.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ebabe331f06df5f222b2a00f9ce782b6267cf8f7", + "authors": [ + { + "n": "jdue", + "e": "jdue@dtu.dk" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4220.json b/doc/sphinxext/prs/4220.json new file mode 100644 index 00000000000..2d5f875ab47 --- /dev/null +++ b/doc/sphinxext/prs/4220.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "912c0e41e4467f622932f1ecb6f673e0717a1c0b", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 35, + "d": 34 + }, + "mne/viz/tests/test_topo.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4223.json b/doc/sphinxext/prs/4223.json new file mode 100644 index 00000000000..9ab9a87c438 --- /dev/null +++ b/doc/sphinxext/prs/4223.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cda9d7b8362cb53618c71967b0b63ff46d311d9c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 22, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4224.json b/doc/sphinxext/prs/4224.json new file mode 100644 index 00000000000..22db2f472cf --- /dev/null +++ b/doc/sphinxext/prs/4224.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a42677ff2f7342f2de93e6ec84f74d29c1983b4b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_evoked_topomap.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 3 + }, + "mne/viz/circle.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 79, + "d": 71 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 0 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4225.json b/doc/sphinxext/prs/4225.json new file mode 100644 index 00000000000..accbf11aa93 --- /dev/null +++ b/doc/sphinxext/prs/4225.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bda07a6a73d3dd24a93bb5ddcc43b578b5f274b8", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_ica.py": { + "a": 8, + "d": 5 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 47, + "d": 44 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4236.json b/doc/sphinxext/prs/4236.json new file mode 100644 index 00000000000..a50029c8611 --- /dev/null +++ b/doc/sphinxext/prs/4236.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c602de23c70cd0af18256865f3b4b44b1be55ded", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4237.json b/doc/sphinxext/prs/4237.json new file mode 100644 index 00000000000..a02fd95b775 --- /dev/null +++ b/doc/sphinxext/prs/4237.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "932af71c4a0828fa87ddc141cd223c5a4ad9bede", + "authors": [ + { + "n": "Jean-Baptiste SCHIRATTI", + "e": "jean.baptiste.schiratti@gmail.com" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 11, + "d": 3 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4238.json b/doc/sphinxext/prs/4238.json new file mode 100644 index 00000000000..9a4adfec862 --- /dev/null +++ b/doc/sphinxext/prs/4238.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7fdb3819cd76102a257a0b6edc8242a7d2b8ea3b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 5 + }, + "mne/viz/tests/test_topomap.py": { + "a": 12, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4239.json b/doc/sphinxext/prs/4239.json new file mode 100644 index 00000000000..7af9e19f61f --- /dev/null +++ b/doc/sphinxext/prs/4239.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3c6b4671b2f3fce21aec75fe0ba0253a1be0f2e7", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 12, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/424.json b/doc/sphinxext/prs/424.json new file mode 100644 index 00000000000..b1a751628f7 --- /dev/null +++ b/doc/sphinxext/prs/424.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1d8ad7be8ffd3d43993a87c0a5b45ec0e29cd359", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 7, + "d": 5 + }, + "mne/fiff/evoked.py": { + "a": 8, + "d": 5 + }, + "mne/fiff/raw.py": { + "a": 49, + "d": 23 + }, + "mne/fiff/tests/test_raw.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4243.json b/doc/sphinxext/prs/4243.json new file mode 100644 index 00000000000..f0c32bdd015 --- /dev/null +++ b/doc/sphinxext/prs/4243.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "18db2bb3f62bc17f3971c00b33f3f0a69891b6db", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_viewer.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4244.json b/doc/sphinxext/prs/4244.json new file mode 100644 index 00000000000..7a649a044af --- /dev/null +++ b/doc/sphinxext/prs/4244.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ed4bb163b97e5753029680a74044499693cbc0ca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/_make_forward.py": { + "a": 2, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4247.json b/doc/sphinxext/prs/4247.json new file mode 100644 index 00000000000..ad801be0be1 --- /dev/null +++ b/doc/sphinxext/prs/4247.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "64b070cc6014955ac125facfeb83d7755def48cd", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 24, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4248.json b/doc/sphinxext/prs/4248.json new file mode 100644 index 00000000000..d6f82e090c9 --- /dev/null +++ b/doc/sphinxext/prs/4248.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f9892442ea3cfe2bc916e391998e64d216a61744", + "authors": [ + { + "n": "LaetitiaG", + "e": "laetitia.grabot@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/time_frequency.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/425.json b/doc/sphinxext/prs/425.json new file mode 100644 index 00000000000..602914136d2 --- /dev/null +++ b/doc/sphinxext/prs/425.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "55697a4d6f585149ddacc459623d0b82c17d618e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 15 + }, + "doc/source/python_tutorial.rst": { + "a": 8, + "d": 2 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/cuda.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 32, + "d": 5 + }, + "mne/fiff/tests/test_raw.py": { + "a": 7, + "d": 5 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 11, + "d": 1 + }, + "mne/utils.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4250.json b/doc/sphinxext/prs/4250.json new file mode 100644 index 00000000000..064cd9cb382 --- /dev/null +++ b/doc/sphinxext/prs/4250.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "edd2f0eec6ae3d5f3fb0ac39c91f8df753de7b41", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 17, + "d": 6 + }, + "doc/Makefile": { + "a": 3, + "d": 0 + }, + "doc/_static/flag-icon.css": { + "a": 23, + "d": 0 + }, + "doc/_static/font-awesome.css": { + "a": 2337, + "d": 0 + }, + "doc/_static/fontawesome-webfont.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fontawesome-webfont.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fontawesome-webfont.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fontawesome-webfont.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fr.svg": { + "a": 7, + "d": 0 + }, + "doc/_static/institutions.png": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 149, + "d": 26 + }, + "doc/_static/us.svg": { + "a": 18, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 23, + "d": 4 + }, + "doc/carousel.inc": { + "a": 87, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "doc/documentation.rst": { + "a": 40, + "d": 15 + }, + "doc/index.rst": { + "a": 89, + "d": 83 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 4 + }, + "examples/decoding/plot_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 6 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 8, + "d": 7 + }, + "tutorials/plot_receptive_field.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 3, + "d": 1 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 5, + "d": 4 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4254.json b/doc/sphinxext/prs/4254.json new file mode 100644 index 00000000000..cc1b7552e6e --- /dev/null +++ b/doc/sphinxext/prs/4254.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "28a05e1f1eeaaa82ee10101a135449e8dadf0993", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 22, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4258.json b/doc/sphinxext/prs/4258.json new file mode 100644 index 00000000000..f4ff5ec500e --- /dev/null +++ b/doc/sphinxext/prs/4258.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "85be8ca1db9d4749518597f30b6516376ae190df", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/426.json b/doc/sphinxext/prs/426.json new file mode 100644 index 00000000000..f97cb66f628 --- /dev/null +++ b/doc/sphinxext/prs/426.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6491f8b3a4b19c7a18b55f11c1e9f133a8ce4b51", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4263.json b/doc/sphinxext/prs/4263.json new file mode 100644 index 00000000000..21024ed74a2 --- /dev/null +++ b/doc/sphinxext/prs/4263.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d25b1a0fdabf0d0b2e9bf1d95c12042da1234a6d", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 3, + "d": 3 + }, + "mne/coreg.py": { + "a": 1, + "d": 0 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4267.json b/doc/sphinxext/prs/4267.json new file mode 100644 index 00000000000..600ca416797 --- /dev/null +++ b/doc/sphinxext/prs/4267.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e78c637f054aaa9110c8ee7c6c4e1769e016ee48", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 12, + "d": 7 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/evoked.py": { + "a": 55, + "d": 18 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 21, + "d": 25 + }, + "mne/tests/test_dipole.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/427.json b/doc/sphinxext/prs/427.json new file mode 100644 index 00000000000..112fe53c3a5 --- /dev/null +++ b/doc/sphinxext/prs/427.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "abd356be9a845d3d504bb15f4b418ab7b7cedab1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 218, + "d": 83 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4270.json b/doc/sphinxext/prs/4270.json new file mode 100644 index 00000000000..33d971e7fca --- /dev/null +++ b/doc/sphinxext/prs/4270.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6072ab04378c01557f29e67f1d1cbc5fa226943a", + "authors": [ + { + "n": "jdue", + "e": "jdue@dtu.dk" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4271.json b/doc/sphinxext/prs/4271.json new file mode 100644 index 00000000000..37adc7035c6 --- /dev/null +++ b/doc/sphinxext/prs/4271.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "67e27f74d616cea7ddae1cf78f67d078f37529e0", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 98, + "d": 37 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 0 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 25, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4273.json b/doc/sphinxext/prs/4273.json new file mode 100644 index 00000000000..4a820513f73 --- /dev/null +++ b/doc/sphinxext/prs/4273.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d6c7f899f87c37ec0a35e74bacb23302a3ccb4ab", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4274.json b/doc/sphinxext/prs/4274.json new file mode 100644 index 00000000000..9c513257858 --- /dev/null +++ b/doc/sphinxext/prs/4274.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4b4615d7f163263556edece4d5e4c1b28a165922", + "authors": [ + { + "n": "Laura Gwilliams", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 22, + "d": 23 + }, + "mne/decoding/tests/test_base.py": { + "a": 12, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4276.json b/doc/sphinxext/prs/4276.json new file mode 100644 index 00000000000..7bf946d7c4a --- /dev/null +++ b/doc/sphinxext/prs/4276.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "690bd2124f63b81766e3e9a4da326f6048c9d05d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 4 + }, + "mne/viz/montage.py": { + "a": 29, + "d": 36 + }, + "mne/viz/tests/test_montage.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4277.json b/doc/sphinxext/prs/4277.json new file mode 100644 index 00000000000..5562fea205a --- /dev/null +++ b/doc/sphinxext/prs/4277.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a4ba5dd96fda159010f709e054eb0a9666d10d58", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 5, + "d": 3 + }, + "tutorials/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_point_spread.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4278.json b/doc/sphinxext/prs/4278.json new file mode 100644 index 00000000000..bc088543356 --- /dev/null +++ b/doc/sphinxext/prs/4278.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "55f4fbf470af0e3b33de58530b95566aed3b8861", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4280.json b/doc/sphinxext/prs/4280.json new file mode 100644 index 00000000000..5abc318ad9e --- /dev/null +++ b/doc/sphinxext/prs/4280.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "685f6858315c6924c441d834976cfe7d55d9c2b1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/chpi.py": { + "a": 12, + "d": 3 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4281.json b/doc/sphinxext/prs/4281.json new file mode 100644 index 00000000000..a0c535a50ab --- /dev/null +++ b/doc/sphinxext/prs/4281.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cac45d4113276138fb50f9df252eeafd999ebf97", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4282.json b/doc/sphinxext/prs/4282.json new file mode 100644 index 00000000000..1b2f837bf58 --- /dev/null +++ b/doc/sphinxext/prs/4282.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "573ad7eedc333fc3640e3097522546395c5e1a95", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4285.json b/doc/sphinxext/prs/4285.json new file mode 100644 index 00000000000..5688cce0734 --- /dev/null +++ b/doc/sphinxext/prs/4285.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b9f46fa05c8693f6cdc36f1009d2a5083c3a0bb7", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4289.json b/doc/sphinxext/prs/4289.json new file mode 100644 index 00000000000..53f1b7350a3 --- /dev/null +++ b/doc/sphinxext/prs/4289.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "525dab575f6003df9441a86358de5415c8e2eb1e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 71, + "d": 0 + }, + "mne/epochs.py": { + "a": 8, + "d": 5 + }, + "mne/viz/epochs.py": { + "a": 400, + "d": 120 + }, + "mne/viz/evoked.py": { + "a": 99, + "d": 66 + }, + "mne/viz/ica.py": { + "a": 8, + "d": 31 + }, + "mne/viz/tests/test_epochs.py": { + "a": 22, + "d": 6 + }, + "mne/viz/tests/test_evoked.py": { + "a": 13, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 32, + "d": 5 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 16, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/429.json b/doc/sphinxext/prs/429.json new file mode 100644 index 00000000000..3346d48cbd5 --- /dev/null +++ b/doc/sphinxext/prs/429.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "10f590909a58b264440cb49cfd4855914e6be21e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 51, + "d": 37 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4291.json b/doc/sphinxext/prs/4291.json new file mode 100644 index 00000000000..f48fd603f3f --- /dev/null +++ b/doc/sphinxext/prs/4291.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d40958f5cab696acb0fa407f9dd55431f9b7746f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 13, + "d": 16 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 7 + }, + "mne/transforms.py": { + "a": 4, + "d": 4 + }, + "mne/utils.py": { + "a": 12, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 5 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4292.json b/doc/sphinxext/prs/4292.json new file mode 100644 index 00000000000..1a0a71697bf --- /dev/null +++ b/doc/sphinxext/prs/4292.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3da80f58e68ab1ed8e2231976013f9da7f85eb77", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/event.py": { + "a": 13, + "d": 5 + }, + "mne/tests/test_event.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4293.json b/doc/sphinxext/prs/4293.json new file mode 100644 index 00000000000..81822928a4e --- /dev/null +++ b/doc/sphinxext/prs/4293.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8bf97d2a0763b5d317b98b26c0805bcf2bcd3df0", + "authors": [ + { + "n": "Phillip Alday", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 45, + "d": 9 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 41, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4294.json b/doc/sphinxext/prs/4294.json new file mode 100644 index 00000000000..7161f467f80 --- /dev/null +++ b/doc/sphinxext/prs/4294.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4e8f76472b28f089f6853bd6440e2d5add9be050", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 9 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4295.json b/doc/sphinxext/prs/4295.json new file mode 100644 index 00000000000..fee798dee64 --- /dev/null +++ b/doc/sphinxext/prs/4295.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ca98e76c7c2b11dd0b220bc085c5a06eae524aee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 6, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4296.json b/doc/sphinxext/prs/4296.json new file mode 100644 index 00000000000..8332261b072 --- /dev/null +++ b/doc/sphinxext/prs/4296.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "60151f8621fb2e324428385258408c71bf58d8e2", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 43, + "d": 15 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 37, + "d": 8 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 48, + "d": 23 + }, + "mne/inverse_sparse/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 24, + "d": 9 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 133, + "d": 17 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 4, + "d": 4 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 21, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 25, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4297.json b/doc/sphinxext/prs/4297.json new file mode 100644 index 00000000000..9a1c0d45594 --- /dev/null +++ b/doc/sphinxext/prs/4297.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "26187fd29fa1f3cb769a216cf0a1df22c3be2626", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 11, + "d": 11 + }, + "doc/python_reference.rst": { + "a": 174, + "d": 326 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 4, + "d": 4 + }, + "mne/chpi.py": { + "a": 30, + "d": 30 + }, + "mne/connectivity/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/__init__.py": { + "a": 4, + "d": 1 + }, + "mne/datasets/brainstorm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 8, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 251, + "d": 58 + }, + "mne/io/proc_history.py": { + "a": 39, + "d": 42 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 5 + }, + "mne/realtime/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 4, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 4, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 16, + "d": 4 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_object_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4298.json b/doc/sphinxext/prs/4298.json new file mode 100644 index 00000000000..08dac61c5ab --- /dev/null +++ b/doc/sphinxext/prs/4298.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fba848755adab14ebc88c2b89cf8cca4b23b6a7e", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 13, + "d": 14 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 300, + "d": 223 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4299.json b/doc/sphinxext/prs/4299.json new file mode 100644 index 00000000000..ab39ac6c84f --- /dev/null +++ b/doc/sphinxext/prs/4299.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "75d17d52c6596234bd1e80d7206044aab07230dc", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "examples/io/plot_elekta_epochs.py": { + "a": 10, + "d": 10 + }, + "mne/event.py": { + "a": 1, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/43.json b/doc/sphinxext/prs/43.json new file mode 100644 index 00000000000..1ae4b7bae8c --- /dev/null +++ b/doc/sphinxext/prs/43.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fe9f7b7836aeb8cd508b318fbd6d61c592f2fb32", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 9, + "d": 0 + }, + "bin/mne_compute_proj_eog.py": { + "a": 5, + "d": 4 + }, + "bin/mne_maxfilter.py": { + "a": 143, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxfilter.py": { + "a": 4, + "d": 2 + }, + "setup.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/430.json b/doc/sphinxext/prs/430.json new file mode 100644 index 00000000000..ea931906bca --- /dev/null +++ b/doc/sphinxext/prs/430.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9b91357c24632345d9c2562ff76787c24926512c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 39, + "d": 23 + }, + "mne/tests/test_event.py": { + "a": 13, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4300.json b/doc/sphinxext/prs/4300.json new file mode 100644 index 00000000000..a4619a4601a --- /dev/null +++ b/doc/sphinxext/prs/4300.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "14da6b7c037a9946d1bfd33e970e4c4957258385", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4303.json b/doc/sphinxext/prs/4303.json new file mode 100644 index 00000000000..1269c929a5e --- /dev/null +++ b/doc/sphinxext/prs/4303.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "ccd2974b621274a1cdc43b4f7f66708a39c720b9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 3, + "d": 3 + }, + "mne/evoked.py": { + "a": 6, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 72, + "d": 41 + }, + "mne/viz/tests/test_topo.py": { + "a": 5, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 33, + "d": 16 + }, + "mne/viz/utils.py": { + "a": 40, + "d": 0 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4306.json b/doc/sphinxext/prs/4306.json new file mode 100644 index 00000000000..430161dbab2 --- /dev/null +++ b/doc/sphinxext/prs/4306.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "000c9d29750c78b84af2e17b8d695055c14d5524", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4307.json b/doc/sphinxext/prs/4307.json new file mode 100644 index 00000000000..0530f8bca67 --- /dev/null +++ b/doc/sphinxext/prs/4307.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "30c44bc1eaa06274b42fb00351c26350e8c8b9c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4310.json b/doc/sphinxext/prs/4310.json new file mode 100644 index 00000000000..087523c5b4f --- /dev/null +++ b/doc/sphinxext/prs/4310.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "966442898b1db39fd4562a6999848653597a5d9c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 6, + "d": 2 + }, + "mne/source_space.py": { + "a": 3, + "d": 3 + }, + "mne/transforms.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4315.json b/doc/sphinxext/prs/4315.json new file mode 100644 index 00000000000..3325163f408 --- /dev/null +++ b/doc/sphinxext/prs/4315.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a22466f326cdc0724b5dbd4ae5e85005193eef14", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_receptive_field.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 158, + "d": 81 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 70, + "d": 31 + }, + "tutorials/plot_receptive_field.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4316.json b/doc/sphinxext/prs/4316.json new file mode 100644 index 00000000000..ace42103d64 --- /dev/null +++ b/doc/sphinxext/prs/4316.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "acb6685bd2edee2216defd26d2eecc30275d51ed", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/432.json b/doc/sphinxext/prs/432.json new file mode 100644 index 00000000000..4cf40eb3fc5 --- /dev/null +++ b/doc/sphinxext/prs/432.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b229e9dbe768998b32b83ff7b49474c197ff02e4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 16, + "d": 14 + }, + "mne/filter.py": { + "a": 105, + "d": 40 + }, + "mne/preprocessing/ecg.py": { + "a": 10, + "d": 4 + }, + "mne/preprocessing/eog.py": { + "a": 12, + "d": 5 + }, + "mne/preprocessing/ssp.py": { + "a": 9, + "d": 7 + }, + "mne/tests/test_filter.py": { + "a": 26, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4321.json b/doc/sphinxext/prs/4321.json new file mode 100644 index 00000000000..f92b5e4ae24 --- /dev/null +++ b/doc/sphinxext/prs/4321.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b69afd1ff3337ac84f219b26c53537a5c8ceb1b9", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 17, + "d": 11 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 14, + "d": 5 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 200, + "d": 85 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 59, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4322.json b/doc/sphinxext/prs/4322.json new file mode 100644 index 00000000000..7900ecf67af --- /dev/null +++ b/doc/sphinxext/prs/4322.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "b5363306d24ec645ea0d2b77a1ce652cfc4d1927", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 35, + "d": 43 + }, + "mne/cov.py": { + "a": 24, + "d": 16 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 40, + "d": 23 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 4, + "d": 3 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 59, + "d": 47 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 0, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 14 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/io/pick.py": { + "a": 10, + "d": 13 + }, + "mne/io/proj.py": { + "a": 3, + "d": 1 + }, + "mne/io/tag.py": { + "a": 4, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 20, + "d": 11 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 48, + "d": 56 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 19, + "d": 21 + }, + "mne/tests/test_chpi.py": { + "a": 11, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 35, + "d": 39 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 0 + }, + "mne/transforms.py": { + "a": 2, + "d": 5 + }, + "mne/utils.py": { + "a": 19, + "d": 14 + }, + "mne/viz/tests/test_3d.py": { + "a": 17, + "d": 34 + }, + "mne/viz/tests/test_raw.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4324.json b/doc/sphinxext/prs/4324.json new file mode 100644 index 00000000000..fbf2fcf5e08 --- /dev/null +++ b/doc/sphinxext/prs/4324.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "671dcea98e7cef23a9e5678beee912491137f8a2", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4325.json b/doc/sphinxext/prs/4325.json new file mode 100644 index 00000000000..96e2ee05ad6 --- /dev/null +++ b/doc/sphinxext/prs/4325.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "76ac750d307cc99bd67037cbc8bc46497362b0a5", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4326.json b/doc/sphinxext/prs/4326.json new file mode 100644 index 00000000000..8cd946c7297 --- /dev/null +++ b/doc/sphinxext/prs/4326.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1376596067158d348bf530a514e22ddb8337438c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/channels/data/montages/mgh60.elc": { + "a": 132, + "d": 0 + }, + "mne/channels/data/montages/mgh70.elc": { + "a": 152, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 10, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 18, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4332.json b/doc/sphinxext/prs/4332.json new file mode 100644 index 00000000000..8e16e1155ee --- /dev/null +++ b/doc/sphinxext/prs/4332.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "42323bfc7e7ecf306ba3bfe5aac953a2130d5bae", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/forward/plot_read_bem_surfaces.py": { + "a": 0, + "d": 32 + }, + "examples/visualization/plot_3d_to_2d.py": { + "a": 4, + "d": 3 + }, + "examples/visualization/plot_eeg_on_scalp.py": { + "a": 4, + "d": 4 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 6, + "d": 4 + }, + "mne/bem.py": { + "a": 6, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 4, + "d": 3 + }, + "mne/source_space.py": { + "a": 27, + "d": 5 + }, + "mne/surface.py": { + "a": 11, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 509, + "d": 3 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 69, + "d": 25 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_ecog.py": { + "a": 5, + "d": 3 + }, + "tutorials/plot_forward.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_source_alignment.py": { + "a": 98, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4333.json b/doc/sphinxext/prs/4333.json new file mode 100644 index 00000000000..4e1d77517bb --- /dev/null +++ b/doc/sphinxext/prs/4333.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "40a66bde3326263b2ef32dc002a260d09edcb2f5", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 23, + "d": 15 + }, + "mne/decoding/tests/test_base.py": { + "a": 78, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4337.json b/doc/sphinxext/prs/4337.json new file mode 100644 index 00000000000..41414b0c975 --- /dev/null +++ b/doc/sphinxext/prs/4337.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "07954161fc77636d7d28b00e5a4f1143a5c748e7", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 4, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 38, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4339.json b/doc/sphinxext/prs/4339.json new file mode 100644 index 00000000000..d29262e0e05 --- /dev/null +++ b/doc/sphinxext/prs/4339.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ad4ccbe285e6d92e3dac68fccda36da7658f49d1", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/434.json b/doc/sphinxext/prs/434.json new file mode 100644 index 00000000000..77194e3cd32 --- /dev/null +++ b/doc/sphinxext/prs/434.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "342555b5026488f25403263fe92b6f838944c6a6", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/event.py": { + "a": 169, + "d": 62 + }, + "mne/tests/test_event.py": { + "a": 65, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4341.json b/doc/sphinxext/prs/4341.json new file mode 100644 index 00000000000..0ea479995da --- /dev/null +++ b/doc/sphinxext/prs/4341.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "ae380ee984031b9d70bcc4abf9161e0478d10427", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 76, + "d": 39 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 16, + "d": 10 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4344.json b/doc/sphinxext/prs/4344.json new file mode 100644 index 00000000000..c7803833130 --- /dev/null +++ b/doc/sphinxext/prs/4344.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "12de055f9f2755430f33ba81e785d75a80a75228", + "authors": [ + { + "n": "Annalisa Pascarella", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/source_space.py": { + "a": 45, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 31, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4345.json b/doc/sphinxext/prs/4345.json new file mode 100644 index 00000000000..402e6942bd5 --- /dev/null +++ b/doc/sphinxext/prs/4345.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d0ba4b355bc88165b9e29b379599f0468e1bd5e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 14, + "d": 5 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 21, + "d": 9 + }, + "mne/viz/raw.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4346.json b/doc/sphinxext/prs/4346.json new file mode 100644 index 00000000000..ecffd0f7f8f --- /dev/null +++ b/doc/sphinxext/prs/4346.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "3dc19624816b02fe750de44c0100bb925bd67cb4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 30, + "d": 5 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_visualize_raw.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4347.json b/doc/sphinxext/prs/4347.json new file mode 100644 index 00000000000..a5f44c8a3b0 --- /dev/null +++ b/doc/sphinxext/prs/4347.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "032c654121907a490d11dafb87b4e9ea27c1ed85", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "examples/visualization/make_report.py": { + "a": 23, + "d": 10 + }, + "mne/commands/mne_report.py": { + "a": 6, + "d": 1 + }, + "mne/report.py": { + "a": 135, + "d": 131 + }, + "mne/tests/test_report.py": { + "a": 10, + "d": 0 + }, + "mne/transforms.py": { + "a": 14, + "d": 9 + }, + "mne/viz/_3d.py": { + "a": 15, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4348.json b/doc/sphinxext/prs/4348.json new file mode 100644 index 00000000000..7359de7c91c --- /dev/null +++ b/doc/sphinxext/prs/4348.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce59e1dd07acb9750178f512a18cc9e29c97a3da", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4349.json b/doc/sphinxext/prs/4349.json new file mode 100644 index 00000000000..c156bc871e7 --- /dev/null +++ b/doc/sphinxext/prs/4349.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f6791a47f8e763bf5c35817fcf5a4c9448f9536f", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 4 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 22, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 15 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4351.json b/doc/sphinxext/prs/4351.json new file mode 100644 index 00000000000..a5a4a70ee95 --- /dev/null +++ b/doc/sphinxext/prs/4351.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "24ce31b48eb0921924573330149ba61f2b0e15ad", + "authors": [ + { + "n": "robintibor", + "e": "robintibor@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4354.json b/doc/sphinxext/prs/4354.json new file mode 100644 index 00000000000..2f31dc283dd --- /dev/null +++ b/doc/sphinxext/prs/4354.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "541bf857d7296fd1944fe59d63c6527d3b8771c1", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 21, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4355.json b/doc/sphinxext/prs/4355.json new file mode 100644 index 00000000000..a87f838a379 --- /dev/null +++ b/doc/sphinxext/prs/4355.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "6e61eb5147d25da0618b2d2b91b538141e9a8b49", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 16, + "d": 6 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/dipole.py": { + "a": 228, + "d": 74 + }, + "mne/source_space.py": { + "a": 22, + "d": 15 + }, + "mne/tests/test_dipole.py": { + "a": 51, + "d": 28 + }, + "tutorials/plot_dipole_fit.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4356.json b/doc/sphinxext/prs/4356.json new file mode 100644 index 00000000000..58f890ded2b --- /dev/null +++ b/doc/sphinxext/prs/4356.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5a9fe0332a768334b0e200a6b5ce50f0af3c4805", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 184, + "d": 6 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 32 + }, + "mne/channels/tests/test_channels.py": { + "a": 44, + "d": 3 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4357.json b/doc/sphinxext/prs/4357.json new file mode 100644 index 00000000000..f7c9d02df03 --- /dev/null +++ b/doc/sphinxext/prs/4357.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8ab4ac83359a789a8efbc94c217c3af9a44016a1", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 14, + "d": 12 + }, + "mne/io/tests/test_reference.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4358.json b/doc/sphinxext/prs/4358.json new file mode 100644 index 00000000000..4001205aadf --- /dev/null +++ b/doc/sphinxext/prs/4358.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3c69124e598b101e50996d110f5e64e360216b8f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 60, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/436.json b/doc/sphinxext/prs/436.json new file mode 100644 index 00000000000..6f800f965b0 --- /dev/null +++ b/doc/sphinxext/prs/436.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b05cccb509800f1c93af912cf0a87bc733f87159", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "mne/forward.py": { + "a": 111, + "d": 0 + }, + "mne/tests/test_forward.py": { + "a": 93, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4362.json b/doc/sphinxext/prs/4362.json new file mode 100644 index 00000000000..694db274e7e --- /dev/null +++ b/doc/sphinxext/prs/4362.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "99e10b1c90fa665b603f7dc93c3d2c3a5ba4ee04", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4363.json b/doc/sphinxext/prs/4363.json new file mode 100644 index 00000000000..82fc304fbda --- /dev/null +++ b/doc/sphinxext/prs/4363.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "15a0856f15538200b8b2eb236b610342502551cf", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4365.json b/doc/sphinxext/prs/4365.json new file mode 100644 index 00000000000..113b1334ebd --- /dev/null +++ b/doc/sphinxext/prs/4365.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cbcbc43e98369622a90b8b025876be5b14f3f287", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 0 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/437.json b/doc/sphinxext/prs/437.json new file mode 100644 index 00000000000..bbacb4ed899 --- /dev/null +++ b/doc/sphinxext/prs/437.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7f537474e7a4846d62ea077b9c56b3d22736815f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 1 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/tests/data/fsaverage_audvis-meg-cropped-lh.stc": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/data/fsaverage_audvis-meg-cropped-rh.stc": { + "a": 0, + "d": 0 + }, + "mne/forward.py": { + "a": 18, + "d": 22 + }, + "mne/minimum_norm/inverse.py": { + "a": 12, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 17, + "d": 11 + }, + "mne/mixed_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 19, + "d": 16 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 22, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4371.json b/doc/sphinxext/prs/4371.json new file mode 100644 index 00000000000..c13f0fce75d --- /dev/null +++ b/doc/sphinxext/prs/4371.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "593da2a002b9cfe3df158fa7017be6fa67f8dbb6", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 38, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4373.json b/doc/sphinxext/prs/4373.json new file mode 100644 index 00000000000..fb2c20b2cb6 --- /dev/null +++ b/doc/sphinxext/prs/4373.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e68219bf8afe4c948534aa625cf1d010e85148ff", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 13, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4376.json b/doc/sphinxext/prs/4376.json new file mode 100644 index 00000000000..c0e93d5b4b2 --- /dev/null +++ b/doc/sphinxext/prs/4376.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f2edf685353b54e3d307bbb77115fd927b84d1d6", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 9, + "d": 13 + }, + "mne/preprocessing/xdawn.py": { + "a": 4, + "d": 8 + }, + "mne/utils.py": { + "a": 1, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4379.json b/doc/sphinxext/prs/4379.json new file mode 100644 index 00000000000..da56cd8a1f9 --- /dev/null +++ b/doc/sphinxext/prs/4379.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "67d9cec9f99016221415790c220caa203d386916", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 5, + "d": 25 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/438.json b/doc/sphinxext/prs/438.json new file mode 100644 index 00000000000..701682ad428 --- /dev/null +++ b/doc/sphinxext/prs/438.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0037034f90b49541592cc9fa08aecf12dcfb397c", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "mne/simulation/source.py": { + "a": 19, + "d": 7 + }, + "mne/simulation/tests/test_source.py": { + "a": 51, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4380.json b/doc/sphinxext/prs/4380.json new file mode 100644 index 00000000000..151367f7c3e --- /dev/null +++ b/doc/sphinxext/prs/4380.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "62d2139c9c05b37835111cc15d67416558ee6fa2", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/commands/utils.py": { + "a": 38, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4382.json b/doc/sphinxext/prs/4382.json new file mode 100644 index 00000000000..47dda87e2da --- /dev/null +++ b/doc/sphinxext/prs/4382.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "dddbd843e46eaa0dd91d90880eaf4034f4ce05bc", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 41, + "d": 33 + }, + "mne/io/reference.py": { + "a": 83, + "d": 55 + }, + "mne/io/tests/test_reference.py": { + "a": 24, + "d": 7 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 12, + "d": 10 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_compute_covariance.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_eeg_erp.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4383.json b/doc/sphinxext/prs/4383.json new file mode 100644 index 00000000000..ac8d632b70c --- /dev/null +++ b/doc/sphinxext/prs/4383.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c2fc6dba536f159068f2d0b3cb27839d867db7c0", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_xhemi.py": { + "a": 47, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 57, + "d": 25 + }, + "mne/surface.py": { + "a": 109, + "d": 78 + }, + "mne/tests/test_surface.py": { + "a": 20, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4384.json b/doc/sphinxext/prs/4384.json new file mode 100644 index 00000000000..ba6cc95ea8a --- /dev/null +++ b/doc/sphinxext/prs/4384.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6216b79bd9f8ed7c50bfa11c27c5d130b4b26dba", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 60, + "d": 23 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 14, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4387.json b/doc/sphinxext/prs/4387.json new file mode 100644 index 00000000000..e184fa9a85f --- /dev/null +++ b/doc/sphinxext/prs/4387.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5d1fc7af7301a26f3415a876efee3c08cda28bb1", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 1 + }, + "MANIFEST.in": { + "a": 41, + "d": 19 + }, + "mne/viz/tests/__init__py": { + "a": 0, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4388.json b/doc/sphinxext/prs/4388.json new file mode 100644 index 00000000000..a1737ffaa9c --- /dev/null +++ b/doc/sphinxext/prs/4388.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6d563135f4d9cb0312b5492b78d204f2e1580ce9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4389.json b/doc/sphinxext/prs/4389.json new file mode 100644 index 00000000000..10fa412dc85 --- /dev/null +++ b/doc/sphinxext/prs/4389.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b58905d8b4da181c050866f77925db745e9d1b9", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 353, + "d": 102 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4394.json b/doc/sphinxext/prs/4394.json new file mode 100644 index 00000000000..e9b25f26f67 --- /dev/null +++ b/doc/sphinxext/prs/4394.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6840bb56ccbc642f77607072cc549ce7e5c86794", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4399.json b/doc/sphinxext/prs/4399.json new file mode 100644 index 00000000000..b0bbf54e3ed --- /dev/null +++ b/doc/sphinxext/prs/4399.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e2cb866381273824459909270e8188c62a645822", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/44.json b/doc/sphinxext/prs/44.json new file mode 100644 index 00000000000..6eab5ce5f54 --- /dev/null +++ b/doc/sphinxext/prs/44.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9d946c7ff78141433fa7ed5fa7bb443ca855659c", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 13, + "d": 13 + }, + "bin/mne_compute_proj_eog.py": { + "a": 13, + "d": 13 + }, + "bin/mne_maxfilter.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/440.json b/doc/sphinxext/prs/440.json new file mode 100644 index 00000000000..fa391e318ed --- /dev/null +++ b/doc/sphinxext/prs/440.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "60cd1bece96954c0f56f8fb415f2c5b39bace109", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 37, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4400.json b/doc/sphinxext/prs/4400.json new file mode 100644 index 00000000000..4b9b1d9d8cd --- /dev/null +++ b/doc/sphinxext/prs/4400.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c0013f35286e0134e37f8c7109f86d5c5c1b49bd", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/data/montages/10-5_EGI129.csd": { + "a": 0, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 42, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4401.json b/doc/sphinxext/prs/4401.json new file mode 100644 index 00000000000..d4fe251f9b5 --- /dev/null +++ b/doc/sphinxext/prs/4401.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1a80a0e3b05b75bb96af3834757814d2c8ca83a1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 6, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4405.json b/doc/sphinxext/prs/4405.json new file mode 100644 index 00000000000..2f3da189af7 --- /dev/null +++ b/doc/sphinxext/prs/4405.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "828c241e66f5e4f1b26b96196e6beab880b230a2", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4406.json b/doc/sphinxext/prs/4406.json new file mode 100644 index 00000000000..8a862f950c4 --- /dev/null +++ b/doc/sphinxext/prs/4406.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "778d532a1e3ff20d9d9e1a0755f8001133206c7e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/441.json b/doc/sphinxext/prs/441.json new file mode 100644 index 00000000000..7352bb14ab1 --- /dev/null +++ b/doc/sphinxext/prs/441.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b3f22b7ff8adacecb05328b70b548547f773660", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/tests/data/fsaverage_audvis-meg-cropped-lh.stc": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/data/fsaverage_audvis-meg-cropped-rh.stc": { + "a": 0, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4413.json b/doc/sphinxext/prs/4413.json new file mode 100644 index 00000000000..4de72ca6972 --- /dev/null +++ b/doc/sphinxext/prs/4413.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "58e051aba3b3a35ae5b0db2c0d49339223da1ce2", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 3, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 4, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 92, + "d": 76 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4414.json b/doc/sphinxext/prs/4414.json new file mode 100644 index 00000000000..9e7e2d277c6 --- /dev/null +++ b/doc/sphinxext/prs/4414.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "7ad946f01bdc901581efcae235c6a207d9d656f1", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "circle.yml": { + "a": 7, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 11, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_metadata_query.py": { + "a": 108, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/kiloword/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 55, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 11, + "d": 4 + }, + "mne/datasets/visual_92_categories/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 4, + "d": 22 + }, + "mne/epochs.py": { + "a": 210, + "d": 47 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/constants.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 181, + "d": 28 + }, + "mne/utils.py": { + "a": 9, + "d": 5 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 7, + "d": 0 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 0, + "d": 2 + }, + "tutorials/plot_metadata_epochs.py": { + "a": 109, + "d": 0 + }, + "tutorials/plot_object_epochs.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4416.json b/doc/sphinxext/prs/4416.json new file mode 100644 index 00000000000..268b520bf50 --- /dev/null +++ b/doc/sphinxext/prs/4416.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "49007cf33e6674ffe3049e0dae375249dc9a3d2f", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4418.json b/doc/sphinxext/prs/4418.json new file mode 100644 index 00000000000..bd8cf6b9037 --- /dev/null +++ b/doc/sphinxext/prs/4418.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "62627553e06949ac0bc7e92fffcf07031976cbb8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 4, + "d": 2 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 13, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 4, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/442.json b/doc/sphinxext/prs/442.json new file mode 100644 index 00000000000..790744c0502 --- /dev/null +++ b/doc/sphinxext/prs/442.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f0402859017622176f136c72c6c113b7031faaf1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_viz.py": { + "a": 10, + "d": 1 + }, + "mne/viz.py": { + "a": 50, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4422.json b/doc/sphinxext/prs/4422.json new file mode 100644 index 00000000000..30388ea7550 --- /dev/null +++ b/doc/sphinxext/prs/4422.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "51d554ef9a2530877899a7eac33090851e8110ff", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 15, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 17, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4424.json b/doc/sphinxext/prs/4424.json new file mode 100644 index 00000000000..bd18b8431fa --- /dev/null +++ b/doc/sphinxext/prs/4424.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b3a54072cd67068269d300d51def4014c1af1719", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 4, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 14, + "d": 7 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 11 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4426.json b/doc/sphinxext/prs/4426.json new file mode 100644 index 00000000000..9d4e06fae14 --- /dev/null +++ b/doc/sphinxext/prs/4426.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a5677e94ea2f32ed9b5d7175c4215f44051bd2f8", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 83, + "d": 31 + }, + "mne/tests/test_source_estimate.py": { + "a": 84, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4427.json b/doc/sphinxext/prs/4427.json new file mode 100644 index 00000000000..ed80377a1e6 --- /dev/null +++ b/doc/sphinxext/prs/4427.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5d8d11a665f2d2736cdbac0cadb832b5f78c9e76", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 5, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4428.json b/doc/sphinxext/prs/4428.json new file mode 100644 index 00000000000..0bf6ab44a5f --- /dev/null +++ b/doc/sphinxext/prs/4428.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3791f9c945f5f411e87a19dbf05295bec891c79b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_docstring_parameters.py": { + "a": 125, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4429.json b/doc/sphinxext/prs/4429.json new file mode 100644 index 00000000000..0e6420658e3 --- /dev/null +++ b/doc/sphinxext/prs/4429.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bde61eddb0dc133f2e500a7429e2ed1744358416", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 41, + "d": 26 + }, + "Makefile": { + "a": 13, + "d": 16 + }, + "appveyor.yml": { + "a": 3, + "d": 4 + }, + "mne/io/tests/test_pick.py": { + "a": 3, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/443.json b/doc/sphinxext/prs/443.json new file mode 100644 index 00000000000..9b866e8d0a0 --- /dev/null +++ b/doc/sphinxext/prs/443.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "32d2ec90d60b132d6e893bfe954937f975297ecd", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 6, + "d": 3 + }, + "mne/fiff/evoked.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4432.json b/doc/sphinxext/prs/4432.json new file mode 100644 index 00000000000..9da1b0d10f1 --- /dev/null +++ b/doc/sphinxext/prs/4432.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "74f9b3fd9f63795a7e4f81a284cc783107ce6550", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4433.json b/doc/sphinxext/prs/4433.json new file mode 100644 index 00000000000..7a7efcbfaf4 --- /dev/null +++ b/doc/sphinxext/prs/4433.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "578613071dcb1bd4bc4ecb034ca072adb2c9b55a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 3, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4435.json b/doc/sphinxext/prs/4435.json new file mode 100644 index 00000000000..0d16346bb7a --- /dev/null +++ b/doc/sphinxext/prs/4435.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "01ba44236708589fbc6f7dbd566b0c83ff5e6835", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 15, + "d": 19 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 19, + "d": 35 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4437.json b/doc/sphinxext/prs/4437.json new file mode 100644 index 00000000000..c01d1f84c05 --- /dev/null +++ b/doc/sphinxext/prs/4437.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8f4e1d8c97d88af5d98beb5a33587bf3acd52c2a", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4439.json b/doc/sphinxext/prs/4439.json new file mode 100644 index 00000000000..05d783c2cc5 --- /dev/null +++ b/doc/sphinxext/prs/4439.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e2c5128f6a6a4060870f95281587a1b5db023ae7", + "authors": [ + { + "n": "Lukáš Hejtmánek", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/444.json b/doc/sphinxext/prs/444.json new file mode 100644 index 00000000000..3f4764b8e21 --- /dev/null +++ b/doc/sphinxext/prs/444.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "169f52bb934ce68fcdea2ca804377d6eb02230f6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 72, + "d": 0 + }, + "mne/fiff/tests/test_pick.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4440.json b/doc/sphinxext/prs/4440.json new file mode 100644 index 00000000000..fa2dd1eb63e --- /dev/null +++ b/doc/sphinxext/prs/4440.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "32a7c88c20151e7086f6fda0dc517b5acff8b6da", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4441.json b/doc/sphinxext/prs/4441.json new file mode 100644 index 00000000000..e814d44497d --- /dev/null +++ b/doc/sphinxext/prs/4441.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "af0753b6627e8eec958c84fa89face45bb4eac84", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4442.json b/doc/sphinxext/prs/4442.json new file mode 100644 index 00000000000..81e8491a5ae --- /dev/null +++ b/doc/sphinxext/prs/4442.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cb6a7ff692c6625dd8a59470f9706a627e3e2571", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4443.json b/doc/sphinxext/prs/4443.json new file mode 100644 index 00000000000..ea670bb5194 --- /dev/null +++ b/doc/sphinxext/prs/4443.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "79a0eb102fa3a1cd58b0574ae97c28341803e8b4", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "tutorials/plot_forward.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4444.json b/doc/sphinxext/prs/4444.json new file mode 100644 index 00000000000..f9abc2d6f98 --- /dev/null +++ b/doc/sphinxext/prs/4444.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "25c1e644d64dc6ec83776508591c7206031a15a9", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_file_traits.py": { + "a": 36, + "d": 4 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4447.json b/doc/sphinxext/prs/4447.json new file mode 100644 index 00000000000..b1edabde87d --- /dev/null +++ b/doc/sphinxext/prs/4447.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a412bac98d22ee759d67e7977bdf953bbdb2f120", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4448.json b/doc/sphinxext/prs/4448.json new file mode 100644 index 00000000000..96e96b067ad --- /dev/null +++ b/doc/sphinxext/prs/4448.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b956419dfcf9c615c0c08784689d3d79f7f662e8", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "tutorials/plot_object_epochs.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_object_raw.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4449.json b/doc/sphinxext/prs/4449.json new file mode 100644 index 00000000000..aa8d429f968 --- /dev/null +++ b/doc/sphinxext/prs/4449.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fec3aae7e78cba4da6ce790265b54817430e4aa5", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4451.json b/doc/sphinxext/prs/4451.json new file mode 100644 index 00000000000..544072d1b76 --- /dev/null +++ b/doc/sphinxext/prs/4451.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "71eb161e3e95dbc0dbb456fd97c247bc85c01fb0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4453.json b/doc/sphinxext/prs/4453.json new file mode 100644 index 00000000000..431ea34455e --- /dev/null +++ b/doc/sphinxext/prs/4453.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "372bdcecce8272ada0094583a06d66b1d6d2e565", + "authors": [ + { + "n": "jaeilepp", + "e": "jaeilepp@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4456.json b/doc/sphinxext/prs/4456.json new file mode 100644 index 00000000000..a9da4fa0d35 --- /dev/null +++ b/doc/sphinxext/prs/4456.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "916b168270d01d81f3cc6286190aa83bdceee6bd", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 16, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4458.json b/doc/sphinxext/prs/4458.json new file mode 100644 index 00000000000..150a7d4820e --- /dev/null +++ b/doc/sphinxext/prs/4458.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1486a513435e7fe030543b3691c00c31fdbfd401", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 5, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_dipole.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_source_space.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4460.json b/doc/sphinxext/prs/4460.json new file mode 100644 index 00000000000..9593ce53d87 --- /dev/null +++ b/doc/sphinxext/prs/4460.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f9f98e189f0ab6f74dcb31acae2329bb120399e6", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_forward.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4461.json b/doc/sphinxext/prs/4461.json new file mode 100644 index 00000000000..c85460d6428 --- /dev/null +++ b/doc/sphinxext/prs/4461.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "134c50f731beed3913f2e2c24a0c3c60aae952f2", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_run_ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 0 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4464.json b/doc/sphinxext/prs/4464.json new file mode 100644 index 00000000000..e11b1726605 --- /dev/null +++ b/doc/sphinxext/prs/4464.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3f40409c849080e6dd5a62c460427c3bad2992f3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 3 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4465.json b/doc/sphinxext/prs/4465.json new file mode 100644 index 00000000000..b59a9f51590 --- /dev/null +++ b/doc/sphinxext/prs/4465.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5c8e9665e674663e1ed5ae4c3093a91a2378412a", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 52, + "d": 89 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4466.json b/doc/sphinxext/prs/4466.json new file mode 100644 index 00000000000..62e11c1845d --- /dev/null +++ b/doc/sphinxext/prs/4466.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c439aef13a8ee2e600c34a0acf70899ea9d3e0b7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4468.json b/doc/sphinxext/prs/4468.json new file mode 100644 index 00000000000..c58dfd99e8f --- /dev/null +++ b/doc/sphinxext/prs/4468.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "601a98f580aafbee3ea7687d91ffda7e83b57af5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 18, + "d": 8 + }, + "doc/documentation.rst": { + "a": 0, + "d": 1 + }, + "doc/manual/cookbook.rst": { + "a": 2, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 15, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 22, + "d": 22 + }, + "examples/decoding/decoding_rsa.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/plot_resample.py": { + "a": 0, + "d": 2 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 6 + }, + "mne/decoding/mixin.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 1 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_compute_covariance.py": { + "a": 7, + "d": 5 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 5, + "d": 4 + }, + "tutorials/plot_info.py": { + "a": 15, + "d": 17 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 11, + "d": 12 + }, + "tutorials/plot_object_epochs.py": { + "a": 6, + "d": 8 + }, + "tutorials/plot_object_evoked.py": { + "a": 6, + "d": 4 + }, + "tutorials/plot_object_raw.py": { + "a": 15, + "d": 18 + }, + "tutorials/plot_python_intro.py": { + "a": 24, + "d": 24 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/447.json b/doc/sphinxext/prs/447.json new file mode 100644 index 00000000000..5f0729af88c --- /dev/null +++ b/doc/sphinxext/prs/447.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f0ec89479364df5ab4dad5ddf099938cc92e2136", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/parallel.py": { + "a": 38, + "d": 6 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4470.json b/doc/sphinxext/prs/4470.json new file mode 100644 index 00000000000..69c6e864e1c --- /dev/null +++ b/doc/sphinxext/prs/4470.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "61a531a78e46a6d042c5f14e238dc7fec14d8a62", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 36, + "d": 0 + }, + "mne/io/base.py": { + "a": 52, + "d": 33 + }, + "mne/tests/test_annotations.py": { + "a": 93, + "d": 6 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4476.json b/doc/sphinxext/prs/4476.json new file mode 100644 index 00000000000..6941127e947 --- /dev/null +++ b/doc/sphinxext/prs/4476.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "93b342eb589471c1d6ec46725cd61fcf8d312d53", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 13, + "d": 4 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 6, + "d": 5 + }, + "mne/epochs.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4478.json b/doc/sphinxext/prs/4478.json new file mode 100644 index 00000000000..202501a7403 --- /dev/null +++ b/doc/sphinxext/prs/4478.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7097a122d682a7b09dcee6c2be0de38a3239a141", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 158, + "d": 150 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 87, + "d": 61 + }, + "mne/stats/tests/test_permutations.py": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/448.json b/doc/sphinxext/prs/448.json new file mode 100644 index 00000000000..66821eeefc0 --- /dev/null +++ b/doc/sphinxext/prs/448.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a94e6db67890d95be0cda54437f1e647b3e3a9f6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cuda.py": { + "a": 53, + "d": 34 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 4, + "d": 16 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 21, + "d": 10 + }, + "mne/utils.py": { + "a": 49, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4482.json b/doc/sphinxext/prs/4482.json new file mode 100644 index 00000000000..94372d4eeff --- /dev/null +++ b/doc/sphinxext/prs/4482.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "27413cbfcbbec4eab0778e80439eb34e93dd3e90", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 7, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 29, + "d": 18 + }, + "mne/channels/tests/test_montage.py": { + "a": 14, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4483.json b/doc/sphinxext/prs/4483.json new file mode 100644 index 00000000000..2e06f000d79 --- /dev/null +++ b/doc/sphinxext/prs/4483.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "be4c1739167643d4b14aba3780b35d3d6b83eb3f", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 13, + "d": 6 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4485.json b/doc/sphinxext/prs/4485.json new file mode 100644 index 00000000000..8a134abfcfa --- /dev/null +++ b/doc/sphinxext/prs/4485.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "6286b04e1214d0be8c2cda8c16ae0d1c8c8db2d8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_search_light.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 10, + "d": 6 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 8 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 9, + "d": 9 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 4, + "d": 3 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 3, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 1 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 1 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 17 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4487.json b/doc/sphinxext/prs/4487.json new file mode 100644 index 00000000000..e1edc23ea02 --- /dev/null +++ b/doc/sphinxext/prs/4487.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "171996ee7bd014993448d6222ff1f48c3bdcaaea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 5, + "d": 3 + }, + "doc/conf.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4488.json b/doc/sphinxext/prs/4488.json new file mode 100644 index 00000000000..790d348007b --- /dev/null +++ b/doc/sphinxext/prs/4488.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "20cead898c64b5f78f513ec97ce84b4ef5ede366", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "doc/_templates/layout.html": { + "a": 24, + "d": 3 + }, + "doc/conf.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4490.json b/doc/sphinxext/prs/4490.json new file mode 100644 index 00000000000..a856022e2b7 --- /dev/null +++ b/doc/sphinxext/prs/4490.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aa371d291bb0b373b55090cbaa3336dc2507dc1d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4492.json b/doc/sphinxext/prs/4492.json new file mode 100644 index 00000000000..ab5c678c4b4 --- /dev/null +++ b/doc/sphinxext/prs/4492.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "96bc9909107b078701c86c9d69004a357f1d9a65", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 7, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4495.json b/doc/sphinxext/prs/4495.json new file mode 100644 index 00000000000..56d063cd9dd --- /dev/null +++ b/doc/sphinxext/prs/4495.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aa6ad318087e179c1984df2377399a89c86858af", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_montage.py": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/45.json b/doc/sphinxext/prs/45.json new file mode 100644 index 00000000000..ad65eb961fb --- /dev/null +++ b/doc/sphinxext/prs/45.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d622caa3ffdcb74c561141cb4b6dbf1e8e2654a5", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 15, + "d": 3 + }, + "bin/mne_compute_proj_eog.py": { + "a": 15, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 23, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/450.json b/doc/sphinxext/prs/450.json new file mode 100644 index 00000000000..ac968144623 --- /dev/null +++ b/doc/sphinxext/prs/450.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2fdd8867b0209f8b97ce99c1120ec4cc37d7e84f", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4500.json b/doc/sphinxext/prs/4500.json new file mode 100644 index 00000000000..ec1680d8930 --- /dev/null +++ b/doc/sphinxext/prs/4500.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2fd1241562e4086f6ae046994ff568af3e086817", + "authors": [ + { + "n": "Alejandro Weinstein", + "e": "alejandro.weinstein@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4504.json b/doc/sphinxext/prs/4504.json new file mode 100644 index 00000000000..c78503968c6 --- /dev/null +++ b/doc/sphinxext/prs/4504.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "ed67c91916022b2d6afd47aa2d01ea23d980336e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/carousel.inc": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 12, + "d": 4 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 7, + "d": 7 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4505.json b/doc/sphinxext/prs/4505.json new file mode 100644 index 00000000000..2b815dfeb6a --- /dev/null +++ b/doc/sphinxext/prs/4505.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "881724422db9c50d45b13b0c374adddea7d14fc0", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4509.json b/doc/sphinxext/prs/4509.json new file mode 100644 index 00000000000..7d7c20b90d8 --- /dev/null +++ b/doc/sphinxext/prs/4509.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "65ebb736fb2d236609b32ba1a6283072d16c4138", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/451.json b/doc/sphinxext/prs/451.json new file mode 100644 index 00000000000..579f1f26656 --- /dev/null +++ b/doc/sphinxext/prs/451.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ae3708758ac342e123cf333f800ef95b7c2c0707", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 41, + "d": 41 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4510.json b/doc/sphinxext/prs/4510.json new file mode 100644 index 00000000000..9d707b4317a --- /dev/null +++ b/doc/sphinxext/prs/4510.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "187f098409b9688c91f7843bdafec97db2df9aa5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 15, + "d": 11 + }, + "mne/viz/ica.py": { + "a": 10, + "d": 9 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 8 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 17, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4511.json b/doc/sphinxext/prs/4511.json new file mode 100644 index 00000000000..aae55b21d1c --- /dev/null +++ b/doc/sphinxext/prs/4511.json @@ -0,0 +1,231 @@ +{ + "merge_commit_sha": "e5498454977ec9208011c48146dfac6fc9d84ca9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 2, + "d": 4 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 1, + "d": 2 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 1, + "d": 9 + }, + "mne/beamformer/_rap_music.py": { + "a": 4, + "d": 4 + }, + "mne/channels/channels.py": { + "a": 33, + "d": 21 + }, + "mne/channels/layout.py": { + "a": 16, + "d": 18 + }, + "mne/cov.py": { + "a": 7, + "d": 15 + }, + "mne/decoding/ems.py": { + "a": 4, + "d": 6 + }, + "mne/decoding/transformer.py": { + "a": 5, + "d": 6 + }, + "mne/epochs.py": { + "a": 14, + "d": 26 + }, + "mne/evoked.py": { + "a": 5, + "d": 5 + }, + "mne/filter.py": { + "a": 17, + "d": 34 + }, + "mne/io/base.py": { + "a": 22, + "d": 72 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 6, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 2 + }, + "mne/io/pick.py": { + "a": 161, + "d": 18 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 66, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 32, + "d": 51 + }, + "mne/preprocessing/otp.py": { + "a": 3, + "d": 6 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 6, + "d": 12 + }, + "mne/realtime/mockclient.py": { + "a": 9, + "d": 6 + }, + "mne/stats/regression.py": { + "a": 5, + "d": 7 + }, + "mne/time_frequency/ar.py": { + "a": 3, + "d": 5 + }, + "mne/time_frequency/csd.py": { + "a": 7, + "d": 16 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 9 + }, + "mne/time_frequency/tfr.py": { + "a": 33, + "d": 40 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 0, + "d": 17 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 1, + "d": 17 + }, + "mne/viz/epochs.py": { + "a": 14, + "d": 18 + }, + "mne/viz/evoked.py": { + "a": 27, + "d": 28 + }, + "mne/viz/ica.py": { + "a": 20, + "d": 29 + }, + "mne/viz/raw.py": { + "a": 6, + "d": 5 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 17, + "d": 20 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 3, + "d": 6 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 5 + }, + "tutorials/plot_artifacts_correction_maxwell_filtering.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 3 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 5, + "d": 10 + }, + "tutorials/plot_compute_covariance.py": { + "a": 5, + "d": 7 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_introduction.py": { + "a": 6, + "d": 6 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 3, + "d": 5 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 1, + "d": 3 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4512.json b/doc/sphinxext/prs/4512.json new file mode 100644 index 00000000000..cef59874e54 --- /dev/null +++ b/doc/sphinxext/prs/4512.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "39217c4fcace87cdb19fc09f3cbaeb94cf4a9a83", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/constants.py": { + "a": 142, + "d": 116 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 215, + "d": 219 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 43, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4515.json b/doc/sphinxext/prs/4515.json new file mode 100644 index 00000000000..e248af14368 --- /dev/null +++ b/doc/sphinxext/prs/4515.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c3350041127bebed517bfc729f3b7e0a5ccc944a", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 29, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4519.json b/doc/sphinxext/prs/4519.json new file mode 100644 index 00000000000..cef567cc3e2 --- /dev/null +++ b/doc/sphinxext/prs/4519.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb92dbbcd9be1b03e846f8ca2d07d0099400e1bf", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/452.json b/doc/sphinxext/prs/452.json new file mode 100644 index 00000000000..8baef62c04b --- /dev/null +++ b/doc/sphinxext/prs/452.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c0b8dfcc84ea97682e79a13d06526caaff03695f", + "authors": [ + { + "n": "Christoph Dinh", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4520.json b/doc/sphinxext/prs/4520.json new file mode 100644 index 00000000000..f2971251d01 --- /dev/null +++ b/doc/sphinxext/prs/4520.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "83e46c4b4b3dc3901065ce44532f7f4a99299ddf", + "authors": [ + { + "n": "Jaakko Leppakangas", + "e": "jaeilepp@student.jyu.fi" + } + ], + "changes": { + "mne/io/cnt/cnt.py": { + "a": 16, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4524.json b/doc/sphinxext/prs/4524.json new file mode 100644 index 00000000000..585ae242314 --- /dev/null +++ b/doc/sphinxext/prs/4524.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e613a6444b2aa8631d1d4e75baf37397a165ee99", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 12, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 31, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4525.json b/doc/sphinxext/prs/4525.json new file mode 100644 index 00000000000..6767e1a437b --- /dev/null +++ b/doc/sphinxext/prs/4525.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c691fbf269a38290fae6fd07c75ab57aa5213d45", + "authors": [ + { + "n": "Alejandro Weinstein", + "e": "alejandro.weinstein@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4527.json b/doc/sphinxext/prs/4527.json new file mode 100644 index 00000000000..d00206a88d6 --- /dev/null +++ b/doc/sphinxext/prs/4527.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ccfc5029da960cfa9003a441b31418bdcfd4045b", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_base.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4528.json b/doc/sphinxext/prs/4528.json new file mode 100644 index 00000000000..fd4841acff0 --- /dev/null +++ b/doc/sphinxext/prs/4528.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cd775ca372fcc911d92297dc3131bac38530e131", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 25, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/453.json b/doc/sphinxext/prs/453.json new file mode 100644 index 00000000000..04c9b5c7c56 --- /dev/null +++ b/doc/sphinxext/prs/453.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c4a0a60c939416e8bb10925faf992ba0f7719f69", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/proj.py": { + "a": 7, + "d": 5 + }, + "mne/tests/test_proj.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4531.json b/doc/sphinxext/prs/4531.json new file mode 100644 index 00000000000..8ecd3220aa9 --- /dev/null +++ b/doc/sphinxext/prs/4531.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "73c80d65dda142165f3bbc27a4d3893244df150a", + "authors": [ + { + "n": "jdue", + "e": "jdue@dtu.dk" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 24, + "d": 11 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4535.json b/doc/sphinxext/prs/4535.json new file mode 100644 index 00000000000..2c863f31817 --- /dev/null +++ b/doc/sphinxext/prs/4535.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "af920972b9c7a7917d2d214c564713e959443573", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4536.json b/doc/sphinxext/prs/4536.json new file mode 100644 index 00000000000..7e3a80144a1 --- /dev/null +++ b/doc/sphinxext/prs/4536.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4e41f9a198c9746c6b7993f3bf93546a43f5cfaf", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 11, + "d": 27 + }, + "mne/stats/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/stats/parametric.py": { + "a": 13, + "d": 0 + }, + "mne/stats/permutations.py": { + "a": 31, + "d": 0 + }, + "mne/stats/tests/test_permutations.py": { + "a": 12, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 45 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4537.json b/doc/sphinxext/prs/4537.json new file mode 100644 index 00000000000..95d9a334b31 --- /dev/null +++ b/doc/sphinxext/prs/4537.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0c0b8de28d6d127aea8829ac0578231203ce856e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4539.json b/doc/sphinxext/prs/4539.json new file mode 100644 index 00000000000..c372e4bf119 --- /dev/null +++ b/doc/sphinxext/prs/4539.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0ca1bc1b92d9ab86c438d141b78427184c77bbb2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/454.json b/doc/sphinxext/prs/454.json new file mode 100644 index 00000000000..0b8afcc0eaa --- /dev/null +++ b/doc/sphinxext/prs/454.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "37aeb75b61fd3f67257edfa2be0058a4bf9370ee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/constants.py": { + "a": 170, + "d": 7 + }, + "mne/fiff/matrix.py": { + "a": 17, + "d": 8 + }, + "mne/fiff/open.py": { + "a": 94, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 18, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4541.json b/doc/sphinxext/prs/4541.json new file mode 100644 index 00000000000..701ff3fb954 --- /dev/null +++ b/doc/sphinxext/prs/4541.json @@ -0,0 +1,287 @@ +{ + "merge_commit_sha": "876192598da7c557dbf46a6dda2ced4b7088d4bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 27, + "d": 29 + }, + "Makefile": { + "a": 3, + "d": 3 + }, + "appveyor.yml": { + "a": 7, + "d": 4 + }, + "doc/advanced_setup.rst": { + "a": 1, + "d": 1 + }, + "doc/configure_git.rst": { + "a": 1, + "d": 1 + }, + "doc/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/faq.rst": { + "a": 0, + "d": 1 + }, + "doc/git_links.inc": { + "a": 0, + "d": 2 + }, + "doc/known_projects.inc": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 3 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 3, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 9, + "d": 6 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 3, + "d": 2 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 4 + }, + "mne/decoding/receptive_field.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_base.py": { + "a": 8, + "d": 12 + }, + "mne/decoding/tests/test_csp.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_ems.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 8, + "d": 8 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_gen.py": { + "a": 2, + "d": 8 + }, + "mne/fixes.py": { + "a": 33, + "d": 0 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 3, + "d": 2 + }, + "mne/forward/tests/test_forward.py": { + "a": 3, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 5, + "d": 4 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 4, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 3, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 3, + "d": 2 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 3, + "d": 2 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 6, + "d": 5 + }, + "mne/io/tests/test_apply_function.py": { + "a": 4, + "d": 2 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 6, + "d": 5 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 10, + "d": 9 + }, + "mne/simulation/tests/test_raw.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_bem.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 11, + "d": 11 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_report.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_source_estimate.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_source_space.py": { + "a": 9, + "d": 7 + }, + "mne/tests/test_surface.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 17, + "d": 43 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 0 + }, + "mne/viz/tests/test_decoding.py": { + "a": 6, + "d": 6 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_misc.py": { + "a": 4, + "d": 3 + }, + "mne/viz/tests/test_topomap.py": { + "a": 3, + "d": 3 + }, + "setup.cfg": { + "a": 2, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4542.json b/doc/sphinxext/prs/4542.json new file mode 100644 index 00000000000..1b35c1a164b --- /dev/null +++ b/doc/sphinxext/prs/4542.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "33146156f2660f122ecc04fa0d5b3fd3c34b549e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 17, + "d": 24 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 12, + "d": 3 + }, + "mne/io/reference.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/report.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4543.json b/doc/sphinxext/prs/4543.json new file mode 100644 index 00000000000..a92c8a7f42c --- /dev/null +++ b/doc/sphinxext/prs/4543.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "cf9ff8bcbcb4fec21e8ebbef46305e60e6ead20d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 25, + "d": 87 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 156, + "d": 29 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 180, + "d": 82 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_receptive_field.py": { + "a": 51, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4544.json b/doc/sphinxext/prs/4544.json new file mode 100644 index 00000000000..40f14c27ede --- /dev/null +++ b/doc/sphinxext/prs/4544.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "276d9c5dd5428b7eceabd240862e01847850d93d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 70, + "d": 37 + }, + "mne/tests/test_source_estimate.py": { + "a": 49, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4546.json b/doc/sphinxext/prs/4546.json new file mode 100644 index 00000000000..d6d82efde81 --- /dev/null +++ b/doc/sphinxext/prs/4546.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ce040f091f6e9346dbae05a8ab06649f6292319a", + "authors": [ + { + "n": "Nicolas Barascud", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/decoding/plot_receptive_field.py": { + "a": 120, + "d": 9 + }, + "mne/decoding/receptive_field.py": { + "a": 47, + "d": 6 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 59, + "d": 7 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4547.json b/doc/sphinxext/prs/4547.json new file mode 100644 index 00000000000..bb071585e78 --- /dev/null +++ b/doc/sphinxext/prs/4547.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "eccaf028b2c594b862975ae9e1c5cc92f625927e", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/search_light.py": { + "a": 10, + "d": 1 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4548.json b/doc/sphinxext/prs/4548.json new file mode 100644 index 00000000000..a0607de9892 --- /dev/null +++ b/doc/sphinxext/prs/4548.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d2b214f410e61bb31b49dc40ac4e392c3bdd2383", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 8 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4549.json b/doc/sphinxext/prs/4549.json new file mode 100644 index 00000000000..d2cbf29a416 --- /dev/null +++ b/doc/sphinxext/prs/4549.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cd24596e14383dc978d754a51f8fe9fabbd76b21", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 8, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4550.json b/doc/sphinxext/prs/4550.json new file mode 100644 index 00000000000..c1dfcd8227e --- /dev/null +++ b/doc/sphinxext/prs/4550.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4a99b6fff771031763c774eab9f66d32f2702f21", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_receptive_field.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 34, + "d": 22 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 148, + "d": 79 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 7, + "d": 6 + }, + "setup.cfg": { + "a": 1, + "d": 1 + }, + "tutorials/plot_receptive_field.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4552.json b/doc/sphinxext/prs/4552.json new file mode 100644 index 00000000000..6c48d595b00 --- /dev/null +++ b/doc/sphinxext/prs/4552.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "305621c64ade2e19579add8661953deb6645f26c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 26, + "d": 16 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4559.json b/doc/sphinxext/prs/4559.json new file mode 100644 index 00000000000..a5581cd7704 --- /dev/null +++ b/doc/sphinxext/prs/4559.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9ac40200cc72ac81efc2d59996bbdbf91bf4666c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/baseline.py": { + "a": 18, + "d": 14 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 30, + "d": 17 + }, + "mne/time_frequency/tfr.py": { + "a": 66, + "d": 41 + }, + "mne/viz/topomap.py": { + "a": 46, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4560.json b/doc/sphinxext/prs/4560.json new file mode 100644 index 00000000000..513344a0b11 --- /dev/null +++ b/doc/sphinxext/prs/4560.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d11af66dae0261b6edc8f3e4d5a7ceb051b85061", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 14, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 31, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4561.json b/doc/sphinxext/prs/4561.json new file mode 100644 index 00000000000..b54628680a9 --- /dev/null +++ b/doc/sphinxext/prs/4561.json @@ -0,0 +1,123 @@ +{ + "merge_commit_sha": "f9443967f85880b95d36720bbd25d678bdccb6ad", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 5, + "d": 5 + }, + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 2, + "d": 2 + }, + "examples/visualization/plot_topo_customized.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 4, + "d": 4 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 3, + "d": 3 + }, + "mne/connectivity/effective.py": { + "a": 6, + "d": 5 + }, + "mne/connectivity/spectral.py": { + "a": 23, + "d": 30 + }, + "mne/connectivity/tests/test_effective.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/time_frequency.py": { + "a": 19, + "d": 10 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 4, + "d": 7 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 15, + "d": 18 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 9, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 27, + "d": 21 + }, + "mne/time_frequency/multitaper.py": { + "a": 21, + "d": 18 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 23, + "d": 24 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 38, + "d": 32 + }, + "mne/utils.py": { + "a": 8, + "d": 0 + }, + "tutorials/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 4, + "d": 4 + }, + "tutorials/plot_stats_cluster_time_frequency.py": { + "a": 5, + "d": 5 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4562.json b/doc/sphinxext/prs/4562.json new file mode 100644 index 00000000000..cf3afb6849b --- /dev/null +++ b/doc/sphinxext/prs/4562.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "300f8734796c1d10da94f45eb8de0e9ee45fa90e", + "authors": [ + { + "n": "Yousra Bekhti", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 9, + "d": 8 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 7, + "d": 10 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 35, + "d": 24 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 37, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 51, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 50, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4563.json b/doc/sphinxext/prs/4563.json new file mode 100644 index 00000000000..8e7f4321111 --- /dev/null +++ b/doc/sphinxext/prs/4563.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4e0635e0e3e718d4aa723f0ffc28c22020caf51c", + "authors": [ + { + "n": "Cristóbal Moënne-Loccoz", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 21, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4565.json b/doc/sphinxext/prs/4565.json new file mode 100644 index 00000000000..dd591c24d25 --- /dev/null +++ b/doc/sphinxext/prs/4565.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "21dfeea128e1ac4fc4dab8789694ffe0cfbeaf74", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/cuda.py": { + "a": 22, + "d": 9 + }, + "mne/epochs.py": { + "a": 1, + "d": 53 + }, + "mne/evoked.py": { + "a": 1, + "d": 33 + }, + "mne/filter.py": { + "a": 278, + "d": 24 + }, + "mne/io/base.py": { + "a": 35, + "d": 33 + }, + "mne/tests/test_epochs.py": { + "a": 40, + "d": 14 + }, + "mne/tests/test_evoked.py": { + "a": 12, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 19, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4566.json b/doc/sphinxext/prs/4566.json new file mode 100644 index 00000000000..048b1436510 --- /dev/null +++ b/doc/sphinxext/prs/4566.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f5b8267f3750c450d411711c1c1a02ccc8b791ed", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 121, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4567.json b/doc/sphinxext/prs/4567.json new file mode 100644 index 00000000000..5c2c4cb1814 --- /dev/null +++ b/doc/sphinxext/prs/4567.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a36dec28cea9b123102b082af95c3f69c180f878", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 35, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4569.json b/doc/sphinxext/prs/4569.json new file mode 100644 index 00000000000..e8f7c2d6d72 --- /dev/null +++ b/doc/sphinxext/prs/4569.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "be9ae352cb40edbe16a5826a3a79d66fd5a099a6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 10, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 20, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/457.json b/doc/sphinxext/prs/457.json new file mode 100644 index 00000000000..09d095b68df --- /dev/null +++ b/doc/sphinxext/prs/457.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2f9d389171799dd0a84232166f17d69ab393f990", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 20, + "d": 3 + }, + "mne/tests/test_cov.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4570.json b/doc/sphinxext/prs/4570.json new file mode 100644 index 00000000000..fa9c1c4d240 --- /dev/null +++ b/doc/sphinxext/prs/4570.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "3d503127e6ca99c3b5bb189dfb0fbaa1c889df4c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 7, + "d": 8 + }, + "doc/manual/channel_interpolation.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/cookbook.rst": { + "a": 20, + "d": 20 + }, + "doc/manual/decoding.rst": { + "a": 3, + "d": 3 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/sample_dataset.rst": { + "a": 1, + "d": 1 + }, + "doc/tutorials/report.rst": { + "a": 9, + "d": 9 + }, + "environment.yml": { + "a": 33, + "d": 0 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4571.json b/doc/sphinxext/prs/4571.json new file mode 100644 index 00000000000..06682457483 --- /dev/null +++ b/doc/sphinxext/prs/4571.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d75f0821c39fff49e418ebb4df189f2a88a204c2", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/plot_info.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4576.json b/doc/sphinxext/prs/4576.json new file mode 100644 index 00000000000..769003edece --- /dev/null +++ b/doc/sphinxext/prs/4576.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2dcdc3ae1c61c6e4843ddf4cafdd3dcf51ed8aec", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/458.json b/doc/sphinxext/prs/458.json new file mode 100644 index 00000000000..da6be6c53f2 --- /dev/null +++ b/doc/sphinxext/prs/458.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1f42cd617522a04432172f93cca364aa308f1542", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4581.json b/doc/sphinxext/prs/4581.json new file mode 100644 index 00000000000..64a1054480c --- /dev/null +++ b/doc/sphinxext/prs/4581.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9ef2b0dde60701f1597619edb5ac955f84320224", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4582.json b/doc/sphinxext/prs/4582.json new file mode 100644 index 00000000000..d8da807af49 --- /dev/null +++ b/doc/sphinxext/prs/4582.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d2ac5a40c9f6660a2391fe4de55f9a74df5b6fc8", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 2, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 81, + "d": 38 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 6, + "d": 2 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4583.json b/doc/sphinxext/prs/4583.json new file mode 100644 index 00000000000..0d9a7404b9e --- /dev/null +++ b/doc/sphinxext/prs/4583.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c79b74e9c67dbe429e29007746aec2691189a794", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 245, + "d": 62 + }, + "mne/io/egi/events.py": { + "a": 4, + "d": 1 + }, + "mne/io/egi/general.py": { + "a": 16, + "d": 9 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4585.json b/doc/sphinxext/prs/4585.json new file mode 100644 index 00000000000..dd47e7b4fb3 --- /dev/null +++ b/doc/sphinxext/prs/4585.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "c6a9c3e4cc4cf3e671e05bd01fae890d6aa2b523", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 6, + "d": 2 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 6, + "d": 3 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 2, + "d": 2 + }, + "examples/datasets/plot_megsim_data_single_trial.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 3, + "d": 11 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 5, + "d": 3 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 1, + "d": 2 + }, + "examples/decoding/plot_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 5, + "d": 9 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 5, + "d": 9 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 6, + "d": 9 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 4, + "d": 9 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 5, + "d": 9 + }, + "mne/datasets/utils.py": { + "a": 182, + "d": 133 + }, + "mne/utils.py": { + "a": 26, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4587.json b/doc/sphinxext/prs/4587.json new file mode 100644 index 00000000000..0ba78de0f51 --- /dev/null +++ b/doc/sphinxext/prs/4587.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "69d1945404d8954d08ac526d92836ae57c9dab37", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 0, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4588.json b/doc/sphinxext/prs/4588.json new file mode 100644 index 00000000000..a9f396b0d19 --- /dev/null +++ b/doc/sphinxext/prs/4588.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "ab34726258cc124846134f350c0a907d8dd11318", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 7, + "d": 7 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/459.json b/doc/sphinxext/prs/459.json new file mode 100644 index 00000000000..58a68a5c076 --- /dev/null +++ b/doc/sphinxext/prs/459.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "823e6d6d509d16be7a84efbeb6fe0a51227be5bf", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 13, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4590.json b/doc/sphinxext/prs/4590.json new file mode 100644 index 00000000000..43b5182df9f --- /dev/null +++ b/doc/sphinxext/prs/4590.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e9bbae9bbbb37b37cc9b661a1e76c80af28b7f00", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/data/montages/10-5_EGI129.csd": { + "a": 0, + "d": 467 + }, + "mne/channels/montage.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4591.json b/doc/sphinxext/prs/4591.json new file mode 100644 index 00000000000..0d4de09f346 --- /dev/null +++ b/doc/sphinxext/prs/4591.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "791528101de7553dc3cdbefd82efd2d528842a3f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 11, + "d": 2 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 45, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4592.json b/doc/sphinxext/prs/4592.json new file mode 100644 index 00000000000..9934c612dcd --- /dev/null +++ b/doc/sphinxext/prs/4592.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "42794b4472d0fbf8712bab2fb6a8705092291fdd", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/montage.py": { + "a": 25, + "d": 1 + }, + "mne/viz/tests/test_montage.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4593.json b/doc/sphinxext/prs/4593.json new file mode 100644 index 00000000000..5793b36c730 --- /dev/null +++ b/doc/sphinxext/prs/4593.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5fafe7202a61ef0cf0c4b8105ee4d845b0d15ae9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_file_traits.py": { + "a": 59, + "d": 63 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4595.json b/doc/sphinxext/prs/4595.json new file mode 100644 index 00000000000..3d9e2c30b52 --- /dev/null +++ b/doc/sphinxext/prs/4595.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "d12520c75882016f7cced853048cc85e78055402", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 31, + "d": 34 + }, + "mne/evoked.py": { + "a": 15, + "d": 18 + }, + "mne/io/base.py": { + "a": 9, + "d": 5 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 8, + "d": 2 + }, + "mne/utils.py": { + "a": 8, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_decoding.py": { + "a": 41, + "d": 32 + }, + "mne/viz/tests/test_epochs.py": { + "a": 14, + "d": 26 + }, + "mne/viz/tests/test_montage.py": { + "a": 10, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 12, + "d": 9 + }, + "mne/viz/tests/test_topo.py": { + "a": 7, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_utils.py": { + "a": 26, + "d": 19 + }, + "mne/viz/topo.py": { + "a": 9, + "d": 8 + }, + "mne/viz/topomap.py": { + "a": 28, + "d": 23 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 5 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4596.json b/doc/sphinxext/prs/4596.json new file mode 100644 index 00000000000..4f1d58ba01e --- /dev/null +++ b/doc/sphinxext/prs/4596.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1742decca63dd6d1bb6517a7b3ce465aebf1943f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4597.json b/doc/sphinxext/prs/4597.json new file mode 100644 index 00000000000..28ee66fe09d --- /dev/null +++ b/doc/sphinxext/prs/4597.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4314b7b1a5cc8f5f17101b4b79da53a54640c173", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 3, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4598.json b/doc/sphinxext/prs/4598.json new file mode 100644 index 00000000000..b0c7cc3aca8 --- /dev/null +++ b/doc/sphinxext/prs/4598.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7f2066fe4d1ed0c59c5ae0105485479352163ec4", + "authors": [ + { + "n": "Jon Houck", + "e": null + } + ], + "changes": { + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/46.json b/doc/sphinxext/prs/46.json new file mode 100644 index 00000000000..e54f24c5dd3 --- /dev/null +++ b/doc/sphinxext/prs/46.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8ee29163c81d0509713037e3292755ce523857b9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 13, + "d": 1 + }, + "bin/mne_compute_proj_eog.py": { + "a": 13, + "d": 1 + }, + "mne/artifacts/ecg.py": { + "a": 28, + "d": 7 + }, + "mne/artifacts/eog.py": { + "a": 7, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 40, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4600.json b/doc/sphinxext/prs/4600.json new file mode 100644 index 00000000000..c2eae41238f --- /dev/null +++ b/doc/sphinxext/prs/4600.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "12f35943facddea33d32e99308fd4f148ecede05", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 8, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 5, + "d": 0 + }, + "mne/io/array/tests/test_array.py": { + "a": 13, + "d": 0 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 9, + "d": 5 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 11, + "d": 10 + }, + "mne/io/meas_info.py": { + "a": 23, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 3, + "d": 2 + }, + "mne/io/write.py": { + "a": 2, + "d": 7 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4601.json b/doc/sphinxext/prs/4601.json new file mode 100644 index 00000000000..5805700e8de --- /dev/null +++ b/doc/sphinxext/prs/4601.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ce2486cda91f359cdcb9a7fc3410e2a36db7d428", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 7, + "d": 5 + }, + "mne/cov.py": { + "a": 52, + "d": 42 + }, + "mne/dipole.py": { + "a": 3, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 22 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4602.json b/doc/sphinxext/prs/4602.json new file mode 100644 index 00000000000..ce2666a87d2 --- /dev/null +++ b/doc/sphinxext/prs/4602.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a24fc0bc9a749c061d5c15684e44c559c9ae64ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_receptive_field.py": { + "a": 3, + "d": 1 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_import_nesting.py": { + "a": 3, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4603.json b/doc/sphinxext/prs/4603.json new file mode 100644 index 00000000000..eb527a1f2d6 --- /dev/null +++ b/doc/sphinxext/prs/4603.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5971f0ef4a90c8d369197e28fc3326e35ba5e973", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 23, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_file_traits.py": { + "a": 35, + "d": 26 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4608.json b/doc/sphinxext/prs/4608.json new file mode 100644 index 00000000000..1d9e3d64f1b --- /dev/null +++ b/doc/sphinxext/prs/4608.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "76afe82e742a97719cba58b8c4cba76068f29996", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 4 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 3, + "d": 3 + }, + "examples/io/plot_elekta_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_virtual_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 8, + "d": 7 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 15, + "d": 13 + }, + "mne/decoding/transformer.py": { + "a": 12, + "d": 2 + }, + "mne/evoked.py": { + "a": 6, + "d": 5 + }, + "mne/filter.py": { + "a": 5, + "d": 5 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 8, + "d": 13 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4609.json b/doc/sphinxext/prs/4609.json new file mode 100644 index 00000000000..7a874a3317c --- /dev/null +++ b/doc/sphinxext/prs/4609.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "53669a18d1f7ae19b01ae4cedb1b5b27289b9b08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 42, + "d": 46 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4610.json b/doc/sphinxext/prs/4610.json new file mode 100644 index 00000000000..36f2f1d59d8 --- /dev/null +++ b/doc/sphinxext/prs/4610.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "52c1e6f5472f823e49e5427e29548dfa594e9f44", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 1, + "d": 2 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 5, + "d": 5 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4611.json b/doc/sphinxext/prs/4611.json new file mode 100644 index 00000000000..5f3a363137e --- /dev/null +++ b/doc/sphinxext/prs/4611.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "36f88e54cdcf27c785857025af602376463ad3ed", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 35, + "d": 40 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4616.json b/doc/sphinxext/prs/4616.json new file mode 100644 index 00000000000..7d5a09f48d8 --- /dev/null +++ b/doc/sphinxext/prs/4616.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f39093ef0d8099c88458abbdc25a8b8dfca3eea1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4617.json b/doc/sphinxext/prs/4617.json new file mode 100644 index 00000000000..13084091687 --- /dev/null +++ b/doc/sphinxext/prs/4617.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "06962e287e7ce4f2f40b2defbbb47c9c7e986b96", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 17, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 54, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4618.json b/doc/sphinxext/prs/4618.json new file mode 100644 index 00000000000..0dc10080c30 --- /dev/null +++ b/doc/sphinxext/prs/4618.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "72a9b26153ed6e71218dd6f13b57412bb5def205", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 0, + "d": 1 + }, + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 1, + "d": 1 + }, + "examples/forward/plot_source_space_morphing.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 0, + "d": 1 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 2, + "d": 3 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 28, + "d": 18 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 4, + "d": 4 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 3, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/io/pick.py": { + "a": 12, + "d": 11 + }, + "mne/minimum_norm/inverse.py": { + "a": 89, + "d": 87 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 119, + "d": 78 + }, + "mne/simulation/evoked.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4619.json b/doc/sphinxext/prs/4619.json new file mode 100644 index 00000000000..bf0173a60aa --- /dev/null +++ b/doc/sphinxext/prs/4619.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d6b390d6764778800988b2a44a62c25299a96ab8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 2 + }, + "mne/io/proj.py": { + "a": 78, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 26, + "d": 15 + }, + "mne/viz/topomap.py": { + "a": 83, + "d": 53 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 10, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/462.json b/doc/sphinxext/prs/462.json new file mode 100644 index 00000000000..a555c6e9bd3 --- /dev/null +++ b/doc/sphinxext/prs/462.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "baabed706357525bd3b29f4cd1183fa3fed624b7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 79, + "d": 32 + }, + "mne/surface.py": { + "a": 22, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4620.json b/doc/sphinxext/prs/4620.json new file mode 100644 index 00000000000..50445403958 --- /dev/null +++ b/doc/sphinxext/prs/4620.json @@ -0,0 +1,175 @@ +{ + "merge_commit_sha": "bdbc4f9207822901f1af229394ba4bc05b347aa4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 7, + "d": 1 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_rap_music.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 4, + "d": 1 + }, + "examples/visualization/plot_ssp_projs_sensitivity_map.py": { + "a": 3, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 8, + "d": 5 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 7, + "d": 5 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 236, + "d": 83 + }, + "mne/forward/tests/test_forward.py": { + "a": 82, + "d": 15 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 7, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 15, + "d": 4 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 5, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 5, + "d": 4 + }, + "mne/minimum_norm/inverse.py": { + "a": 10, + "d": 6 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 13, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 13, + "d": 8 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/raw.py": { + "a": 6, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 4, + "d": 4 + }, + "mne/simulation/tests/test_raw.py": { + "a": 40, + "d": 25 + }, + "mne/simulation/tests/test_source.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_forward.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_point_spread.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4622.json b/doc/sphinxext/prs/4622.json new file mode 100644 index 00000000000..189cd212927 --- /dev/null +++ b/doc/sphinxext/prs/4622.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bbf2afaa3798c979002693be7de6e3e43ca0bdaa", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 9, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 15, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4624.json b/doc/sphinxext/prs/4624.json new file mode 100644 index 00000000000..e9522913f1e --- /dev/null +++ b/doc/sphinxext/prs/4624.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "25d565466a31e9d834b0bb6b8962022adcc2c983", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_parcellation.py": { + "a": 7, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 3, + "d": 6 + }, + "mne/datasets/utils.py": { + "a": 119, + "d": 17 + }, + "mne/label.py": { + "a": 6, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4625.json b/doc/sphinxext/prs/4625.json new file mode 100644 index 00000000000..dd9ca0a4c33 --- /dev/null +++ b/doc/sphinxext/prs/4625.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "a6fe01b56fa073d120c72454830bc858f8ea5ad0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 3, + "d": 3 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 3, + "d": 8 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 5, + "d": 5 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 0, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 4, + "d": 5 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 2, + "d": 4 + }, + "mne/label.py": { + "a": 7, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4626.json b/doc/sphinxext/prs/4626.json new file mode 100644 index 00000000000..0066b846550 --- /dev/null +++ b/doc/sphinxext/prs/4626.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "57fe40c98a97db7d1be9143b859ff86d89e1a7d8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 3, + "d": 2 + }, + "environment.yml": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4628.json b/doc/sphinxext/prs/4628.json new file mode 100644 index 00000000000..d33c899324a --- /dev/null +++ b/doc/sphinxext/prs/4628.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7ae3c15886f17f70f988e5c5a49454a7162e0e5b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 87, + "d": 10 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 62, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4629.json b/doc/sphinxext/prs/4629.json new file mode 100644 index 00000000000..55901a446f9 --- /dev/null +++ b/doc/sphinxext/prs/4629.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7e1d900658c5952758278edd219a5eb12b08a4a5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 5, + "d": 5 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_receptive_field.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4630.json b/doc/sphinxext/prs/4630.json new file mode 100644 index 00000000000..7151269fe89 --- /dev/null +++ b/doc/sphinxext/prs/4630.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "93958f591b3ffc2faac31b8d20b2bc2685810941", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_rap_music.py": { + "a": 8, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 16, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4632.json b/doc/sphinxext/prs/4632.json new file mode 100644 index 00000000000..a66d546bad5 --- /dev/null +++ b/doc/sphinxext/prs/4632.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6e980003c7f9295d13a4aaf7c05794bd68e73fb8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 109, + "d": 107 + }, + "mne/tests/test_cov.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4633.json b/doc/sphinxext/prs/4633.json new file mode 100644 index 00000000000..9bd988d3d9b --- /dev/null +++ b/doc/sphinxext/prs/4633.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "35eb5be96e7504c4192d608ceb4c27eb96c5ee48", + "authors": [ + { + "n": "Yousra Bekhti", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 15, + "d": 7 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 17, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 20, + "d": 0 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4635.json b/doc/sphinxext/prs/4635.json new file mode 100644 index 00000000000..460de2bec9e --- /dev/null +++ b/doc/sphinxext/prs/4635.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "933eccfe732b5ef3b73f40bf357d2c6cd44745b2", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_kit2fiff_gui.py": { + "a": 16, + "d": 3 + }, + "mne/io/kit/constants.py": { + "a": 7, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 52, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4636.json b/doc/sphinxext/prs/4636.json new file mode 100644 index 00000000000..b8dc4222423 --- /dev/null +++ b/doc/sphinxext/prs/4636.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e9a5c6df6751f99c1fbc83fde6b780ba4305075b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4638.json b/doc/sphinxext/prs/4638.json new file mode 100644 index 00000000000..1bca35fc6fb --- /dev/null +++ b/doc/sphinxext/prs/4638.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8bf65d5032b8bf11c4efeba6ce6d954d88a0326d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".gitignore": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/464.json b/doc/sphinxext/prs/464.json new file mode 100644 index 00000000000..03f13159c35 --- /dev/null +++ b/doc/sphinxext/prs/464.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d34d9fbdbd613f68813c949de4cdff323b79072c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4640.json b/doc/sphinxext/prs/4640.json new file mode 100644 index 00000000000..a21663193cd --- /dev/null +++ b/doc/sphinxext/prs/4640.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0e3579fd08d2f45c2291ff7c50a111fdcb4d512d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 45, + "d": 27 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4641.json b/doc/sphinxext/prs/4641.json new file mode 100644 index 00000000000..876544d011b --- /dev/null +++ b/doc/sphinxext/prs/4641.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "2a98e818567ddf0caf7b3e32d1c946f2c61a2727", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 14, + "d": 20 + }, + "doc/_static/copybutton.js": { + "a": 66, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 2, + "d": 0 + }, + "doc/advanced_setup.rst": { + "a": 50, + "d": 7 + }, + "doc/conf.py": { + "a": 0, + "d": 8 + }, + "doc/configure_git.rst": { + "a": 24, + "d": 24 + }, + "doc/contributing.rst": { + "a": 5, + "d": 5 + }, + "doc/faq.rst": { + "a": 2, + "d": 2 + }, + "doc/install_mne_c.rst": { + "a": 8, + "d": 8 + }, + "doc/install_mne_python.rst": { + "a": 49, + "d": 95 + }, + "doc/martinos.rst": { + "a": 3, + "d": 3 + }, + "doc/tutorials/report.rst": { + "a": 6, + "d": 6 + }, + "environment.yml": { + "a": 12, + "d": 4 + }, + "environment2.yml": { + "a": 38, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 0 + }, + "mne/gui/_viewer.py": { + "a": 2, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 12, + "d": 0 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 10, + "d": 5 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 9, + "d": 0 + }, + "mne/tests/test_line_endings.py": { + "a": 3, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4644.json b/doc/sphinxext/prs/4644.json new file mode 100644 index 00000000000..50c881ccd09 --- /dev/null +++ b/doc/sphinxext/prs/4644.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d5a8aa2ac687d0c423a8e222dab750ba51147ec9", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 12, + "d": 8 + }, + "mne/io/reference.py": { + "a": 11, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4645.json b/doc/sphinxext/prs/4645.json new file mode 100644 index 00000000000..58cb8fcea98 --- /dev/null +++ b/doc/sphinxext/prs/4645.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9b6b0b27620111d8cc59c17a6af0486c41d9d606", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 16, + "d": 14 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4646.json b/doc/sphinxext/prs/4646.json new file mode 100644 index 00000000000..e19e31ee693 --- /dev/null +++ b/doc/sphinxext/prs/4646.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "40dfc1163fbccf7da42e342fd717e5bb59575b70", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_marker_gui.py": { + "a": 39, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4652.json b/doc/sphinxext/prs/4652.json new file mode 100644 index 00000000000..3526d035da3 --- /dev/null +++ b/doc/sphinxext/prs/4652.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b5a2a96cdde880d894d0d10560b2a007b6ea81aa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ssp.py": { + "a": 38, + "d": 33 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4654.json b/doc/sphinxext/prs/4654.json new file mode 100644 index 00000000000..39b3c07e0a2 --- /dev/null +++ b/doc/sphinxext/prs/4654.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "efa7b4cae0a84496ce2c6f9da9714fc73bd41ea2", + "authors": [ + { + "n": "Alejandro Weinstein", + "e": "alejandro.weinstein@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/io/base.py": { + "a": 6, + "d": 6 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 70, + "d": 22 + }, + "mne/viz/tests/test_raw.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4656.json b/doc/sphinxext/prs/4656.json new file mode 100644 index 00000000000..7ab1b822c73 --- /dev/null +++ b/doc/sphinxext/prs/4656.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "64d5d222d008b84b0f1d4d341d1cd074e389ff21", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 11, + "d": 13 + }, + "mne/preprocessing/maxwell.py": { + "a": 6, + "d": 5 + }, + "mne/source_space.py": { + "a": 5, + "d": 4 + }, + "mne/utils.py": { + "a": 10, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4657.json b/doc/sphinxext/prs/4657.json new file mode 100644 index 00000000000..7b4f2bc1674 --- /dev/null +++ b/doc/sphinxext/prs/4657.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "47368ab51847c4551e9df789f0de2fc5df3e17cc", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4661.json b/doc/sphinxext/prs/4661.json new file mode 100644 index 00000000000..8208b43405f --- /dev/null +++ b/doc/sphinxext/prs/4661.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e189992d84d3e31f403afa4fa7773711ecec7dce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4662.json b/doc/sphinxext/prs/4662.json new file mode 100644 index 00000000000..b1e590f87f1 --- /dev/null +++ b/doc/sphinxext/prs/4662.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5fac33d8950acbdabfc97ffc3dde16cd49991905", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4664.json b/doc/sphinxext/prs/4664.json new file mode 100644 index 00000000000..5f8f681c07e --- /dev/null +++ b/doc/sphinxext/prs/4664.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ddc8b342c8ba2462f304a05dee954d57225863e6", + "authors": [ + { + "n": "Alexander Rudiuk", + "e": null + } + ], + "changes": { + "examples/decoding/decoding_rsa.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4665.json b/doc/sphinxext/prs/4665.json new file mode 100644 index 00000000000..e4b817d37d7 --- /dev/null +++ b/doc/sphinxext/prs/4665.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5dfd1fc65e1e8105ca280a8a422413f297840f61", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 5, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4666.json b/doc/sphinxext/prs/4666.json new file mode 100644 index 00000000000..7ea5244c17c --- /dev/null +++ b/doc/sphinxext/prs/4666.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ec9e82714a93529c8e1fdc98acd1614c89463b98", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 9, + "d": 9 + }, + "mne/stats/tests/test_permutations.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4668.json b/doc/sphinxext/prs/4668.json new file mode 100644 index 00000000000..3661f92fc73 --- /dev/null +++ b/doc/sphinxext/prs/4668.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d09c02fda48bce2a8bffe432d8dad191f8622ad2", + "authors": [ + { + "n": "Yousra Bekhti", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4669.json b/doc/sphinxext/prs/4669.json new file mode 100644 index 00000000000..6f748f2f3ab --- /dev/null +++ b/doc/sphinxext/prs/4669.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "cb9f94891ed4252c9e7d88b1d6d77f06297cb375", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/cov.py": { + "a": 9, + "d": 5 + }, + "mne/evoked.py": { + "a": 10, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 3 + }, + "mne/io/proc_history.py": { + "a": 42, + "d": 2 + }, + "mne/io/tests/test_proc_history.py": { + "a": 5, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 90, + "d": 25 + }, + "mne/viz/tests/test_evoked.py": { + "a": 15, + "d": 3 + }, + "tutorials/plot_compute_covariance.py": { + "a": 56, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/467.json b/doc/sphinxext/prs/467.json new file mode 100644 index 00000000000..d8581088c51 --- /dev/null +++ b/doc/sphinxext/prs/467.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c4456e0113b4fa5485e9272e73c68cb15589872a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/bti/raw.py": { + "a": 13, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4672.json b/doc/sphinxext/prs/4672.json new file mode 100644 index 00000000000..84697154e51 --- /dev/null +++ b/doc/sphinxext/prs/4672.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ebfaf79e6476741b30d4c636a5c8c93ce075ae33", + "authors": [ + { + "n": "Alejandro Weinstein", + "e": "alejandro.weinstein@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4677.json b/doc/sphinxext/prs/4677.json new file mode 100644 index 00000000000..b24e4b259c6 --- /dev/null +++ b/doc/sphinxext/prs/4677.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "585ee85cd9148d18863cd11c0c06578f3ec6c1b9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 0, + "d": 3 + }, + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4679.json b/doc/sphinxext/prs/4679.json new file mode 100644 index 00000000000..7bd89823b72 --- /dev/null +++ b/doc/sphinxext/prs/4679.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "8dc6afc53f3c0ee484859d7648af8f8bc6449b2c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/realtime/rt_feedback_server.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 1 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_base.py": { + "a": 1, + "d": 0 + }, + "mne/forward/tests/test_forward.py": { + "a": 23, + "d": 7 + }, + "mne/gui/_file_traits.py": { + "a": 2, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 5, + "d": 2 + }, + "mne/io/base.py": { + "a": 2, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 4, + "d": 2 + }, + "mne/stats/tests/test_regression.py": { + "a": 7, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 6, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 8, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 8, + "d": 5 + }, + "mne/viz/tests/test_decoding.py": { + "a": 5, + "d": 5 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 1 + }, + "setup.cfg": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4681.json b/doc/sphinxext/prs/4681.json new file mode 100644 index 00000000000..51ca1f20770 --- /dev/null +++ b/doc/sphinxext/prs/4681.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "14595ede292ac1db1cfcc0b85ef7a9c303a12e72", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/receptive_field.py": { + "a": 4, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4683.json b/doc/sphinxext/prs/4683.json new file mode 100644 index 00000000000..b9b4db8300d --- /dev/null +++ b/doc/sphinxext/prs/4683.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7c5e6c8061c66ed31924195efdfee9696689d7a6", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4684.json b/doc/sphinxext/prs/4684.json new file mode 100644 index 00000000000..d4cd046f37d --- /dev/null +++ b/doc/sphinxext/prs/4684.json @@ -0,0 +1,175 @@ +{ + "merge_commit_sha": "9ac6547284bbd73b4c18d7d7031042585388005c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/gen_commands.py": { + "a": 7, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 24, + "d": 99 + }, + "mne/beamformer/_rap_music.py": { + "a": 2, + "d": 9 + }, + "mne/connectivity/effective.py": { + "a": 2, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 6, + "d": 15 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 0, + "d": 491 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/time_frequency.py": { + "a": 2, + "d": 11 + }, + "mne/decoding/time_gen.py": { + "a": 0, + "d": 1567 + }, + "mne/decoding/transformer.py": { + "a": 10, + "d": 25 + }, + "mne/event.py": { + "a": 7, + "d": 18 + }, + "mne/evoked.py": { + "a": 11, + "d": 16 + }, + "mne/filter.py": { + "a": 22, + "d": 38 + }, + "mne/forward/forward.py": { + "a": 9, + "d": 41 + }, + "mne/forward/tests/test_forward.py": { + "a": 0, + "d": 23 + }, + "mne/io/base.py": { + "a": 12, + "d": 27 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 7 + }, + "mne/io/reference.py": { + "a": 7, + "d": 28 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 8 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 3, + "d": 76 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 8 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 13 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 10 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 14 + }, + "mne/utils.py": { + "a": 0, + "d": 16 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 448 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/viz/decoding.py": { + "a": 0, + "d": 238 + }, + "mne/viz/evoked.py": { + "a": 20, + "d": 59 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 11 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 8 + }, + "mne/viz/tests/test_decoding.py": { + "a": 0, + "d": 135 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 12 + }, + "tutorials/plot_dipole_orientations.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4685.json b/doc/sphinxext/prs/4685.json new file mode 100644 index 00000000000..c96cc0513f9 --- /dev/null +++ b/doc/sphinxext/prs/4685.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c458b903d9fbb2d4d02c5c5bc510ecb751eebd67", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 200, + "d": 0 + }, + ".travis.yml": { + "a": 1, + "d": 1 + }, + "circle.yml": { + "a": 0, + "d": 204 + }, + "doc/manual/datasets_index.rst": { + "a": 1, + "d": 1 + }, + "examples/datasets/spm_faces_dataset.py": { + "a": 0, + "d": 0 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4687.json b/doc/sphinxext/prs/4687.json new file mode 100644 index 00000000000..6b22199ce82 --- /dev/null +++ b/doc/sphinxext/prs/4687.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b33a264cca70c0e78847dfdcb42c96545e7313be", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 1, + "d": 1 + }, + "doc/_templates/navbar.html": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4688.json b/doc/sphinxext/prs/4688.json new file mode 100644 index 00000000000..a8b545e6dcc --- /dev/null +++ b/doc/sphinxext/prs/4688.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d0b959e867322c65edc52cfc35b9a00aad54cacf", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/469.json b/doc/sphinxext/prs/469.json new file mode 100644 index 00000000000..32562145886 --- /dev/null +++ b/doc/sphinxext/prs/469.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f0695c93332505a3999b582fe2368d6da170c2a8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/bti/raw.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4692.json b/doc/sphinxext/prs/4692.json new file mode 100644 index 00000000000..5484f090134 --- /dev/null +++ b/doc/sphinxext/prs/4692.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c772ede93a7eb54608ab6f66080c6bb8622ff18b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4696.json b/doc/sphinxext/prs/4696.json new file mode 100644 index 00000000000..5a94ac25b06 --- /dev/null +++ b/doc/sphinxext/prs/4696.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f0d9b8b22257562f4475d4eb72ca73bc14d5f469", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 13, + "d": 7 + }, + "mne/io/reference.py": { + "a": 5, + "d": 3 + }, + "mne/io/tests/test_reference.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4698.json b/doc/sphinxext/prs/4698.json new file mode 100644 index 00000000000..90924ff028c --- /dev/null +++ b/doc/sphinxext/prs/4698.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "cc749f5b41ec282acaa7b09e59b61ed35ed4d59a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/misc.py": { + "a": 6, + "d": 15 + }, + "mne/realtime/client.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 8, + "d": 8 + }, + "mne/time_frequency/multitaper.py": { + "a": 3, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/47.json b/doc/sphinxext/prs/47.json new file mode 100644 index 00000000000..8f1b9505eb7 --- /dev/null +++ b/doc/sphinxext/prs/47.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "f301620f7107e3f0fac172cec0fab17ee0e9a79b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_evoked_whitening.py": { + "a": 48, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/cov.py": { + "a": 45, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 24, + "d": 2 + }, + "mne/viz.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/470.json b/doc/sphinxext/prs/470.json new file mode 100644 index 00000000000..deda3ea1671 --- /dev/null +++ b/doc/sphinxext/prs/470.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dc62e4384bf93b07a9a873f6693ef23734d21056", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/bti/read.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4701.json b/doc/sphinxext/prs/4701.json new file mode 100644 index 00000000000..501330718e4 --- /dev/null +++ b/doc/sphinxext/prs/4701.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d1c039da7ef745187f45aa6c9576772183fd7fbd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_viewer.py": { + "a": 8, + "d": 6 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 46, + "d": 44 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 31, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4702.json b/doc/sphinxext/prs/4702.json new file mode 100644 index 00000000000..8177e7e0dc5 --- /dev/null +++ b/doc/sphinxext/prs/4702.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ad870227c6d9810b1a873c60c2139aa00fdd3d14", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4703.json b/doc/sphinxext/prs/4703.json new file mode 100644 index 00000000000..4535d3cdbf0 --- /dev/null +++ b/doc/sphinxext/prs/4703.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f58c782d120d8805d0d0f5553fd34067e85c2a73", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 24 + }, + "tutorials/plot_forward.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4704.json b/doc/sphinxext/prs/4704.json new file mode 100644 index 00000000000..f7319d70984 --- /dev/null +++ b/doc/sphinxext/prs/4704.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4f61d23742382181f3a5fb12969abb48b7f30311", + "authors": [ + { + "n": "Jen Evans", + "e": null + } + ], + "changes": { + "mne/io/egi/egimff.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4705.json b/doc/sphinxext/prs/4705.json new file mode 100644 index 00000000000..961587ead83 --- /dev/null +++ b/doc/sphinxext/prs/4705.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1324f79c390a8cdf874513b1bb15481e8344700f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 129, + "d": 83 + }, + "mne/viz/tests/test_3d.py": { + "a": 7, + "d": 2 + }, + "tutorials/plot_forward.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4706.json b/doc/sphinxext/prs/4706.json new file mode 100644 index 00000000000..abfe0ea0b72 --- /dev/null +++ b/doc/sphinxext/prs/4706.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "de0e785737bbff0b0ba2393e6bdfff8677a4f403", + "authors": [ + { + "n": "Alejandro Weinstein", + "e": "alejandro.weinstein@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 0 + }, + "mne/evoked.py": { + "a": 4, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4707.json b/doc/sphinxext/prs/4707.json new file mode 100644 index 00000000000..4ce7e46b84d --- /dev/null +++ b/doc/sphinxext/prs/4707.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "405ef473f9f8eda9acffc8fd0ab89cf5349bc567", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "tutorials/plot_python_intro.py": { + "a": 19, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4708.json b/doc/sphinxext/prs/4708.json new file mode 100644 index 00000000000..40e41a50cc5 --- /dev/null +++ b/doc/sphinxext/prs/4708.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "46c8f2f67f2cf7b90bdb75171f1f8d2919782391", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 5, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 24, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4711.json b/doc/sphinxext/prs/4711.json new file mode 100644 index 00000000000..02e5e4d3983 --- /dev/null +++ b/doc/sphinxext/prs/4711.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8ad8cf87ac8da137aebcd8d058ce19085cc8f21c", + "authors": [ + { + "n": "Erik Hornberger", + "e": "erik.hornberger@shi-g.com" + } + ], + "changes": { + "tutorials/plot_epoching_and_averaging.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4713.json b/doc/sphinxext/prs/4713.json new file mode 100644 index 00000000000..3dfe30ccf49 --- /dev/null +++ b/doc/sphinxext/prs/4713.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "58a188253aebaa1934fdfcc044731ce1222c9b93", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 8, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 5, + "d": 0 + }, + "tutorials/plot_source_alignment.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4716.json b/doc/sphinxext/prs/4716.json new file mode 100644 index 00000000000..78e9f6d62a8 --- /dev/null +++ b/doc/sphinxext/prs/4716.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "966e862b95861de47ffe259bd66c86feece524f6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4717.json b/doc/sphinxext/prs/4717.json new file mode 100644 index 00000000000..5bde582e2b6 --- /dev/null +++ b/doc/sphinxext/prs/4717.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "062e9b39983dcb288e78041532f35f34788a6fce", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 27, + "d": 32 + }, + "mne/io/base.py": { + "a": 20, + "d": 15 + }, + "mne/io/fiff/raw.py": { + "a": 6, + "d": 9 + }, + "mne/tests/test_annotations.py": { + "a": 123, + "d": 35 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4718.json b/doc/sphinxext/prs/4718.json new file mode 100644 index 00000000000..262a5fb9d29 --- /dev/null +++ b/doc/sphinxext/prs/4718.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "867fdb5e08e159c6a7b77104f124011d773751e4", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 24, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4721.json b/doc/sphinxext/prs/4721.json new file mode 100644 index 00000000000..9bc3f55ba2c --- /dev/null +++ b/doc/sphinxext/prs/4721.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "de256c8c314e3e158371e0fb90242f86af9a4753", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/externals/h5io/_h5io.py": { + "a": 70, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4722.json b/doc/sphinxext/prs/4722.json new file mode 100644 index 00000000000..ca43d0577a2 --- /dev/null +++ b/doc/sphinxext/prs/4722.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "40b61be17116fee3f0e96b84f1b2a8eed3028092", + "authors": [ + { + "n": "Jon Houck", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 14, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 11, + "d": 0 + }, + "mne/viz/circle.py": { + "a": 14, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4725.json b/doc/sphinxext/prs/4725.json new file mode 100644 index 00000000000..78276cb3f0d --- /dev/null +++ b/doc/sphinxext/prs/4725.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "031141291063a15c24ca9f0b6e7255a0c468db00", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/bem.py": { + "a": 3, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 12, + "d": 1 + }, + "mne/source_space.py": { + "a": 15, + "d": 6 + }, + "mne/tests/test_bem.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 11, + "d": 1 + }, + "mne/transforms.py": { + "a": 19, + "d": 8 + }, + "mne/viz/_3d.py": { + "a": 59, + "d": 15 + }, + "mne/viz/tests/test_3d.py": { + "a": 8, + "d": 2 + }, + "tutorials/plot_source_alignment.py": { + "a": 154, + "d": 45 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4729.json b/doc/sphinxext/prs/4729.json new file mode 100644 index 00000000000..d57c2847417 --- /dev/null +++ b/doc/sphinxext/prs/4729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f90ddb194df1cbb6651b921a6817c8b64fa85bb5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/institutions.png": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/473.json b/doc/sphinxext/prs/473.json new file mode 100644 index 00000000000..d4674a12e67 --- /dev/null +++ b/doc/sphinxext/prs/473.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4467b441a07a57ecda2332076b88003c731f78d8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/pick.py": { + "a": 21, + "d": 16 + }, + "mne/fiff/tests/test_raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4733.json b/doc/sphinxext/prs/4733.json new file mode 100644 index 00000000000..86711815010 --- /dev/null +++ b/doc/sphinxext/prs/4733.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "73806bc0b00accd99bb2209c9d406dc11ab94bc4", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4735.json b/doc/sphinxext/prs/4735.json new file mode 100644 index 00000000000..531e815bab9 --- /dev/null +++ b/doc/sphinxext/prs/4735.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5c9d7df3b13701169212bd3772af06c65a4521db", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/baseline.py": { + "a": 44, + "d": 32 + }, + "mne/epochs.py": { + "a": 11, + "d": 18 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 26, + "d": 24 + }, + "mne/tests/test_epochs.py": { + "a": 46, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 53, + "d": 58 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4737.json b/doc/sphinxext/prs/4737.json new file mode 100644 index 00000000000..5c08f803148 --- /dev/null +++ b/doc/sphinxext/prs/4737.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "029e14a7f94e6dccb34b75ed2de408871ebbc798", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4739.json b/doc/sphinxext/prs/4739.json new file mode 100644 index 00000000000..3c20c6d5247 --- /dev/null +++ b/doc/sphinxext/prs/4739.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9b70d8ab0ce3289dd2a9f5a8f21f90ea1d592b2b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4740.json b/doc/sphinxext/prs/4740.json new file mode 100644 index 00000000000..6dc1e3d9761 --- /dev/null +++ b/doc/sphinxext/prs/4740.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5c5429ae0ebeba7dfc58edbda4388bc26304dc7a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 4, + "d": 3 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4743.json b/doc/sphinxext/prs/4743.json new file mode 100644 index 00000000000..85083b05aee --- /dev/null +++ b/doc/sphinxext/prs/4743.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "14e8f6064d7c9f8bdd62ecd4d773f8b5878df0c4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 8, + "d": 1 + }, + "doc/conf.py": { + "a": 4, + "d": 3 + }, + "doc/documentation.rst": { + "a": 6, + "d": 3 + }, + "doc/manual/index.rst": { + "a": 0, + "d": 136 + }, + "doc/manual/statistics.rst": { + "a": 0, + "d": 100 + }, + "doc/python_reference.rst": { + "a": 28, + "d": 7 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 0, + "d": 1 + }, + "mne/stats/__init__.py": { + "a": 5, + "d": 8 + }, + "mne/stats/cluster_level.py": { + "a": 269, + "d": 268 + }, + "mne/stats/parametric.py": { + "a": 73, + "d": 31 + }, + "mne/stats/permutations.py": { + "a": 27, + "d": 73 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 9, + "d": 0 + }, + "mne/stats/tests/test_permutations.py": { + "a": 7, + "d": 6 + }, + "tutorials/plot_background_statistics.py": { + "a": 646, + "d": 0 + }, + "tutorials/plot_stats_cluster_methods.py": { + "a": 0, + "d": 214 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4744.json b/doc/sphinxext/prs/4744.json new file mode 100644 index 00000000000..621bf129906 --- /dev/null +++ b/doc/sphinxext/prs/4744.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9d52566923e0ab1518a5100a050acea26a9545f1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/event.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4745.json b/doc/sphinxext/prs/4745.json new file mode 100644 index 00000000000..a6dc5fd62bc --- /dev/null +++ b/doc/sphinxext/prs/4745.json @@ -0,0 +1,163 @@ +{ + "merge_commit_sha": "59f56acde198b1eba20b4411cf3d334ec328a36f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 10, + "d": 15 + }, + "doc/tutorials/report.rst": { + "a": 17, + "d": 17 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "environment2.yml": { + "a": 9, + "d": 5 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 7, + "d": 7 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 18, + "d": 31 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 4, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 21, + "d": 11 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_interpolate_bad_channels.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_resample.py": { + "a": 2, + "d": 2 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 5, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 2 + }, + "mne/connectivity/spectral.py": { + "a": 12, + "d": 8 + }, + "mne/label.py": { + "a": 5, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 4, + "d": 4 + }, + "mne/report.py": { + "a": 17, + "d": 12 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 12, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_compute_covariance.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_eeg_erp.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 14, + "d": 15 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 4, + "d": 4 + }, + "tutorials/plot_visualize_epochs.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4751.json b/doc/sphinxext/prs/4751.json new file mode 100644 index 00000000000..5e1c86ab4f8 --- /dev/null +++ b/doc/sphinxext/prs/4751.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a99d78a7c3b865bd7c34a348e14e06dfebd4bb28", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4752.json b/doc/sphinxext/prs/4752.json new file mode 100644 index 00000000000..04053a9caa8 --- /dev/null +++ b/doc/sphinxext/prs/4752.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "086478e8334b38fbe02be4c0f2a147429906ba4a", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/cov.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4755.json b/doc/sphinxext/prs/4755.json new file mode 100644 index 00000000000..58a39ed56cb --- /dev/null +++ b/doc/sphinxext/prs/4755.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "44bac44efbc85f234c1cc642fbb6b6c9524fc101", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/evoked.py": { + "a": 13, + "d": 8 + }, + "mne/io/pick.py": { + "a": 27, + "d": 11 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 419, + "d": 202 + }, + "mne/viz/tests/test_evoked.py": { + "a": 72, + "d": 24 + }, + "tutorials/plot_metadata_epochs.py": { + "a": 66, + "d": 24 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4756.json b/doc/sphinxext/prs/4756.json new file mode 100644 index 00000000000..4b2b90b5387 --- /dev/null +++ b/doc/sphinxext/prs/4756.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "54897b95d624c3e86bd30ae7654aadbedb60b7a2", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/event.py": { + "a": 19, + "d": 4 + }, + "mne/io/edf/edf.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_event.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4758.json b/doc/sphinxext/prs/4758.json new file mode 100644 index 00000000000..2ee87a1454b --- /dev/null +++ b/doc/sphinxext/prs/4758.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4e5a6c36d120d988939e22edd9d6a818039be3d6", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/476.json b/doc/sphinxext/prs/476.json new file mode 100644 index 00000000000..725cd4c0116 --- /dev/null +++ b/doc/sphinxext/prs/476.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1b4f703817bbff32e592e4d776593dd5364f9ff5", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4761.json b/doc/sphinxext/prs/4761.json new file mode 100644 index 00000000000..b023eb3048d --- /dev/null +++ b/doc/sphinxext/prs/4761.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "a7a62f2b77927b6e257a69fe3ce559d9d2e06fc9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/read.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 54, + "d": 54 + }, + "mne/io/tree.py": { + "a": 2, + "d": 5 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/client.py": { + "a": 3, + "d": 3 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4763.json b/doc/sphinxext/prs/4763.json new file mode 100644 index 00000000000..05f57bff7c1 --- /dev/null +++ b/doc/sphinxext/prs/4763.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e0f75c4eca651314eec63d1788b060f0e5980f0c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4765.json b/doc/sphinxext/prs/4765.json new file mode 100644 index 00000000000..e7b26916b94 --- /dev/null +++ b/doc/sphinxext/prs/4765.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "caa5c8bbfe9042577502d837683d7033a4174b7d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/stats/plot_sensor_regression.py": { + "a": 36, + "d": 59 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4767.json b/doc/sphinxext/prs/4767.json new file mode 100644 index 00000000000..481bbb38b30 --- /dev/null +++ b/doc/sphinxext/prs/4767.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c2d7e24d41060917da66ec94ffc866b7023ef4d5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 48, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/477.json b/doc/sphinxext/prs/477.json new file mode 100644 index 00000000000..c30829372dd --- /dev/null +++ b/doc/sphinxext/prs/477.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4c93d0381866ada142292ad9575ff4039ad4f48e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/forward.py": { + "a": 49, + "d": 29 + }, + "mne/tests/test_forward.py": { + "a": 5, + "d": 2 + }, + "mne/utils.py": { + "a": 36, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4772.json b/doc/sphinxext/prs/4772.json new file mode 100644 index 00000000000..a22fbf02e01 --- /dev/null +++ b/doc/sphinxext/prs/4772.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe6fa4963cf2080160b9d435c122b4c8ba87d77a", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "tutorials/plot_artifacts_correction_ica.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4773.json b/doc/sphinxext/prs/4773.json new file mode 100644 index 00000000000..03834ad027b --- /dev/null +++ b/doc/sphinxext/prs/4773.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2d9a08e2a59dfd32763435fd1ef96b5be62fdf43", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 7, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4775.json b/doc/sphinxext/prs/4775.json new file mode 100644 index 00000000000..c518cf8eecb --- /dev/null +++ b/doc/sphinxext/prs/4775.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "857b1fed003004aed704db7ffaad10b2e94ac402", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/stats/plot_sensor_regression.py": { + "a": 25, + "d": 9 + }, + "mne/viz/evoked.py": { + "a": 44, + "d": 12 + }, + "mne/viz/tests/test_evoked.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4778.json b/doc/sphinxext/prs/4778.json new file mode 100644 index 00000000000..fc45f09d135 --- /dev/null +++ b/doc/sphinxext/prs/4778.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "723a5ddb885fa76f21b6a12e15e0d6725ca33528", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/478.json b/doc/sphinxext/prs/478.json new file mode 100644 index 00000000000..476695a75a6 --- /dev/null +++ b/doc/sphinxext/prs/478.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "183b9f1806a054797b358370ec7647fcaf95ff0e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/bti/raw.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4781.json b/doc/sphinxext/prs/4781.json new file mode 100644 index 00000000000..9dcba995f00 --- /dev/null +++ b/doc/sphinxext/prs/4781.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "57179af6e426d59591e2d6ff684e1478bc5b13f8", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 20, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4782.json b/doc/sphinxext/prs/4782.json new file mode 100644 index 00000000000..4493d06f664 --- /dev/null +++ b/doc/sphinxext/prs/4782.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0613771cbfb76022a1e9fb5594977ffbcc3e841f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4785.json b/doc/sphinxext/prs/4785.json new file mode 100644 index 00000000000..f992ac1213e --- /dev/null +++ b/doc/sphinxext/prs/4785.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "63e4e3b98493dd488c9e5806daf83ea2bb227463", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4786.json b/doc/sphinxext/prs/4786.json new file mode 100644 index 00000000000..e0403875739 --- /dev/null +++ b/doc/sphinxext/prs/4786.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7ff5d64cb8b772e868087e56352563ea21952f0b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 4, + "d": 3 + }, + "mne/stats/tests/test_permutations.py": { + "a": 20, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4787.json b/doc/sphinxext/prs/4787.json new file mode 100644 index 00000000000..824b6bf54f2 --- /dev/null +++ b/doc/sphinxext/prs/4787.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6c641dd74f869f316877e43dc15ebc4cfe8077e6", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 18, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 37, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4789.json b/doc/sphinxext/prs/4789.json new file mode 100644 index 00000000000..d51884d38e1 --- /dev/null +++ b/doc/sphinxext/prs/4789.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c3a5cf4c6e9afec1b0706eac731af4127dfe13ea", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/479.json b/doc/sphinxext/prs/479.json new file mode 100644 index 00000000000..281bc21923b --- /dev/null +++ b/doc/sphinxext/prs/479.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c14ee59f039927c3b7911949c43ed0c1e24769de", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ssp.py": { + "a": 18, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4790.json b/doc/sphinxext/prs/4790.json new file mode 100644 index 00000000000..688eb314896 --- /dev/null +++ b/doc/sphinxext/prs/4790.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b251871936be2d6c6489bf05412a13a9654c1f52", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4791.json b/doc/sphinxext/prs/4791.json new file mode 100644 index 00000000000..8857818bdd3 --- /dev/null +++ b/doc/sphinxext/prs/4791.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2c17fa7776497b7b021299b69ce6151adf8fe83d", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 12, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4793.json b/doc/sphinxext/prs/4793.json new file mode 100644 index 00000000000..7422f4d4ec8 --- /dev/null +++ b/doc/sphinxext/prs/4793.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fb0a8eb27b8a14fa894688e70242a895d4fc4790", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 6 + }, + "mne/report.py": { + "a": 13, + "d": 23 + }, + "mne/tests/test_report.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4794.json b/doc/sphinxext/prs/4794.json new file mode 100644 index 00000000000..48565ab6a35 --- /dev/null +++ b/doc/sphinxext/prs/4794.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "04ecb9a6cbe73ba786783196fa39fbc2ca02e47c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4798.json b/doc/sphinxext/prs/4798.json new file mode 100644 index 00000000000..c9bfb81da59 --- /dev/null +++ b/doc/sphinxext/prs/4798.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fd5b5fa864ebe140d6f3311e35933a51e67c41ca", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 26, + "d": 8 + }, + "mne/viz/topomap.py": { + "a": 43, + "d": 8 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4799.json b/doc/sphinxext/prs/4799.json new file mode 100644 index 00000000000..33c10960fb5 --- /dev/null +++ b/doc/sphinxext/prs/4799.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "e84b316cb188043c69a4958b74884bfb556d78d8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 4, + "d": 3 + }, + "examples/io/plot_read_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 12, + "d": 8 + }, + "mne/cov.py": { + "a": 2, + "d": 1 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 12, + "d": 54 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/io/pick.py": { + "a": 2, + "d": 2 + }, + "mne/io/proc_history.py": { + "a": 3, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 40, + "d": 20 + }, + "mne/viz/evoked.py": { + "a": 97, + "d": 151 + }, + "mne/viz/ica.py": { + "a": 7, + "d": 10 + }, + "mne/viz/raw.py": { + "a": 41, + "d": 12 + }, + "mne/viz/tests/test_epochs.py": { + "a": 28, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 191, + "d": 168 + }, + "mne/viz/tests/test_raw.py": { + "a": 10, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 3, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 32, + "d": 10 + }, + "mne/viz/utils.py": { + "a": 200, + "d": 3 + }, + "tutorials/plot_compute_covariance.py": { + "a": 7, + "d": 8 + }, + "tutorials/plot_visualize_raw.py": { + "a": 11, + "d": 4 + }, + "tutorials/plot_whitened.py": { + "a": 71, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/48.json b/doc/sphinxext/prs/48.json new file mode 100644 index 00000000000..d5edabfe8b6 --- /dev/null +++ b/doc/sphinxext/prs/48.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "118b15b45738f49e44787d32f8520d0e95073386", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 65, + "d": 11 + }, + "mne/fiff/tests/test_raw.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/480.json b/doc/sphinxext/prs/480.json new file mode 100644 index 00000000000..9aea8e0d477 --- /dev/null +++ b/doc/sphinxext/prs/480.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3fb7465e3b4135bd5f274ea267ff84ae31784f9b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4800.json b/doc/sphinxext/prs/4800.json new file mode 100644 index 00000000000..04e593e46ce --- /dev/null +++ b/doc/sphinxext/prs/4800.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cb3f93d45d9e4391ab2ef3e060fa69ce8a3755fa", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 4, + "d": 2 + }, + "mne/decoding/tests/test_base.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4801.json b/doc/sphinxext/prs/4801.json new file mode 100644 index 00000000000..2f84a41800a --- /dev/null +++ b/doc/sphinxext/prs/4801.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6dcc003dde543663ae11c389fbd5f830079f4056", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4803.json b/doc/sphinxext/prs/4803.json new file mode 100644 index 00000000000..71457421baf --- /dev/null +++ b/doc/sphinxext/prs/4803.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4c03ce1d7d5581c2e8ed0c96112d4b7bb35e54a1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4806.json b/doc/sphinxext/prs/4806.json new file mode 100644 index 00000000000..53d200ae150 --- /dev/null +++ b/doc/sphinxext/prs/4806.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a4d91852fcecf2b9db99884653e255ba02eeecf9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_movement_compensation.py": { + "a": 23, + "d": 8 + }, + "mne/chpi.py": { + "a": 11, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 118, + "d": 10 + }, + "mne/viz/tests/test_3d.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4807.json b/doc/sphinxext/prs/4807.json new file mode 100644 index 00000000000..3015020da6d --- /dev/null +++ b/doc/sphinxext/prs/4807.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "12e509c5384c7d5b73c056500ddc04788fb36506", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + ".travis.yml": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4808.json b/doc/sphinxext/prs/4808.json new file mode 100644 index 00000000000..f1749313a60 --- /dev/null +++ b/doc/sphinxext/prs/4808.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "5c9b89ac88c1747f50e0d665a7ccd3dd2b22c296", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 3, + "d": 3 + }, + "mne/cov.py": { + "a": 233, + "d": 259 + }, + "mne/decoding/csp.py": { + "a": 36, + "d": 28 + }, + "mne/decoding/tests/test_csp.py": { + "a": 2, + "d": 1 + }, + "mne/fixes.py": { + "a": 281, + "d": 0 + }, + "mne/preprocessing/xdawn.py": { + "a": 19, + "d": 11 + }, + "mne/tests/test_cov.py": { + "a": 13, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4809.json b/doc/sphinxext/prs/4809.json new file mode 100644 index 00000000000..bff49501640 --- /dev/null +++ b/doc/sphinxext/prs/4809.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "993240469782dd4eeb3717f088df19c70f8d3f11", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/event.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_event.py": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/481.json b/doc/sphinxext/prs/481.json new file mode 100644 index 00000000000..cbe931f7e0a --- /dev/null +++ b/doc/sphinxext/prs/481.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "affa3e884ae52fcd92350360b502b4a7543debe8", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 11, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 7, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 42, + "d": 31 + }, + "mne/minimum_norm/inverse.py": { + "a": 21, + "d": 8 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 6, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 232, + "d": 33 + }, + "mne/tests/test_source_estimate.py": { + "a": 69, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4810.json b/doc/sphinxext/prs/4810.json new file mode 100644 index 00000000000..af2b10c143a --- /dev/null +++ b/doc/sphinxext/prs/4810.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "20920d731f1ab6c60b8101290c5212e471f229c1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/io/tag.py": { + "a": 21, + "d": 14 + }, + "mne/io/tests/test_meas_info.py": { + "a": 37, + "d": 1 + }, + "mne/io/write.py": { + "a": 24, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4811.json b/doc/sphinxext/prs/4811.json new file mode 100644 index 00000000000..ab33f217678 --- /dev/null +++ b/doc/sphinxext/prs/4811.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f568738b10c5186a1475d41e5d62373f9134ead6", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "mne/externals/tempita/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4812.json b/doc/sphinxext/prs/4812.json new file mode 100644 index 00000000000..c1e9a8d9705 --- /dev/null +++ b/doc/sphinxext/prs/4812.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "04ff09d594287d5fe6df20d124d13e7bc446477e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 11, + "d": 11 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4814.json b/doc/sphinxext/prs/4814.json new file mode 100644 index 00000000000..84ca64565fe --- /dev/null +++ b/doc/sphinxext/prs/4814.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2d4b1d847222364fad93a4f9bc4f9d5b17542f46", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "mne/decoding/tests/test_receptive_field.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4825.json b/doc/sphinxext/prs/4825.json new file mode 100644 index 00000000000..ac953f6a108 --- /dev/null +++ b/doc/sphinxext/prs/4825.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d6654b1bbd0459557b320de3674a0a877aa055f8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4827.json b/doc/sphinxext/prs/4827.json new file mode 100644 index 00000000000..b4d4367d208 --- /dev/null +++ b/doc/sphinxext/prs/4827.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c3535731c2531b8ae5166a69ab78df9ca4c9badb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_metadata_query.py": { + "a": 5, + "d": 2 + }, + "mne/epochs.py": { + "a": 69, + "d": 38 + }, + "mne/tests/test_epochs.py": { + "a": 46, + "d": 4 + }, + "tutorials/plot_metadata_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4830.json b/doc/sphinxext/prs/4830.json new file mode 100644 index 00000000000..7691cf4cd04 --- /dev/null +++ b/doc/sphinxext/prs/4830.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "5c9d1404904cfdcfd33d4452c6697353e2f24e35", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 3 + }, + "mne/chpi.py": { + "a": 4, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 42, + "d": 34 + }, + "mne/tests/test_chpi.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 8, + "d": 8 + }, + "mne/utils.py": { + "a": 0, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 5 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4836.json b/doc/sphinxext/prs/4836.json new file mode 100644 index 00000000000..020a911f271 --- /dev/null +++ b/doc/sphinxext/prs/4836.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0fef95cd69f031318fe6dc339b5d3d3e2707cd6a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/externals/h5io/_h5io.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_receptive_field.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4838.json b/doc/sphinxext/prs/4838.json new file mode 100644 index 00000000000..629b340e7bc --- /dev/null +++ b/doc/sphinxext/prs/4838.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2f21c1b3a8146501fefd770e466095053651bb33", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_base.py": { + "a": 19, + "d": 2 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/484.json b/doc/sphinxext/prs/484.json new file mode 100644 index 00000000000..c30c50cba73 --- /dev/null +++ b/doc/sphinxext/prs/484.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9c4f57e85c55b761fe600f6ce99f61a2bbeca753", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 4 + }, + "mne/viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4840.json b/doc/sphinxext/prs/4840.json new file mode 100644 index 00000000000..e811a81d020 --- /dev/null +++ b/doc/sphinxext/prs/4840.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e23c3d148948bf8a720c6187fd750a6188dd09e5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4842.json b/doc/sphinxext/prs/4842.json new file mode 100644 index 00000000000..5f8b944e000 --- /dev/null +++ b/doc/sphinxext/prs/4842.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "df7e00ecb760863bc6766e3052be625fc9a8ec5d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/pick.py": { + "a": 51, + "d": 49 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4844.json b/doc/sphinxext/prs/4844.json new file mode 100644 index 00000000000..8b29ba3941b --- /dev/null +++ b/doc/sphinxext/prs/4844.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d4350a5d617a3422c69f40501568886b304b0bf4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 82, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 12 + }, + "mne/io/fiff/raw.py": { + "a": 3, + "d": 26 + }, + "mne/tests/test_annotations.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4845.json b/doc/sphinxext/prs/4845.json new file mode 100644 index 00000000000..8f13fd05f16 --- /dev/null +++ b/doc/sphinxext/prs/4845.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "79959786786311aedd20da272fb4de94a20f16ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 10, + "d": 0 + }, + "mne/annotations.py": { + "a": 25, + "d": 12 + }, + "mne/io/base.py": { + "a": 49, + "d": 18 + }, + "mne/io/constants.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 61, + "d": 9 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/io/open.py": { + "a": 7, + "d": 3 + }, + "mne/io/tag.py": { + "a": 17, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 4, + "d": 0 + }, + "mne/io/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 10, + "d": 5 + }, + "mne/tests/test_annotations.py": { + "a": 35, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 14, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4846.json b/doc/sphinxext/prs/4846.json new file mode 100644 index 00000000000..61a2e044d53 --- /dev/null +++ b/doc/sphinxext/prs/4846.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "350416249589047f117f1194e8d8cf6061258f0c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 7, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4848.json b/doc/sphinxext/prs/4848.json new file mode 100644 index 00000000000..6bc160c3976 --- /dev/null +++ b/doc/sphinxext/prs/4848.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b50f0d8b198768cae3f33fd79f64271abd461f3e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/485.json b/doc/sphinxext/prs/485.json new file mode 100644 index 00000000000..7aa8564abff --- /dev/null +++ b/doc/sphinxext/prs/485.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ae20baeb3fe621164341717720dbf9bea92577a9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4854.json b/doc/sphinxext/prs/4854.json new file mode 100644 index 00000000000..cc9637249bd --- /dev/null +++ b/doc/sphinxext/prs/4854.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "e62a46a1ae3200bfdfb8cb8d48c6bde3810c1fe6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "circle.yml": { + "a": 3, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_otp.py": { + "a": 84, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/otp.py": { + "a": 141, + "d": 0 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 104, + "d": 0 + }, + "mne/preprocessing/utils.py": { + "a": 59, + "d": 0 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4857.json b/doc/sphinxext/prs/4857.json new file mode 100644 index 00000000000..7b621833ef9 --- /dev/null +++ b/doc/sphinxext/prs/4857.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a556375549dc45c2d1c573de935d47f846468372", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 9, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4858.json b/doc/sphinxext/prs/4858.json new file mode 100644 index 00000000000..1cbe1211c9e --- /dev/null +++ b/doc/sphinxext/prs/4858.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "548d8131d452720d9c6af7b74831c3182fd107fc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 4, + "d": 3 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 6, + "d": 6 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 2, + "d": 1 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 16, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 8, + "d": 8 + }, + "mne/forward/_lead_dots.py": { + "a": 10, + "d": 9 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 14, + "d": 14 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/surface.py": { + "a": 11, + "d": 11 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/486.json b/doc/sphinxext/prs/486.json new file mode 100644 index 00000000000..d597e5bbc3e --- /dev/null +++ b/doc/sphinxext/prs/486.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "24952bf337564fb63cbadc7225f6b5232cfe5747", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "bin/mne_browse_raw.py": { + "a": 73, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/python_tutorial.rst": { + "a": 1, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 4, + "d": 1 + }, + "examples/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 6 + }, + "mne/fiff/proj.py": { + "a": 15, + "d": 5 + }, + "mne/fiff/raw.py": { + "a": 59, + "d": 0 + }, + "mne/tests/test_viz.py": { + "a": 6, + "d": 0 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "mne/viz.py": { + "a": 507, + "d": 4 + }, + "setup.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4860.json b/doc/sphinxext/prs/4860.json new file mode 100644 index 00000000000..4dad9f96827 --- /dev/null +++ b/doc/sphinxext/prs/4860.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "5551a88d7c6203483abb334d7e526f25c2885818", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 5, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 3, + "d": 1 + }, + "mne/dipole.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 4, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 21, + "d": 21 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 10, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 3 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/io/tag.py": { + "a": 3, + "d": 1 + }, + "mne/source_space.py": { + "a": 6, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 5, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4861.json b/doc/sphinxext/prs/4861.json new file mode 100644 index 00000000000..66e899049fb --- /dev/null +++ b/doc/sphinxext/prs/4861.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "244a2508cf7b4bfb75cb94f55d19e137c3bf5163", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 8, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 25, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4862.json b/doc/sphinxext/prs/4862.json new file mode 100644 index 00000000000..4c39c3b139f --- /dev/null +++ b/doc/sphinxext/prs/4862.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9b6f0cab5763fa5b8e8672561fb67c5d7bcdac6c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 20, + "d": 13 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4864.json b/doc/sphinxext/prs/4864.json new file mode 100644 index 00000000000..03d2a0a3d29 --- /dev/null +++ b/doc/sphinxext/prs/4864.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fce5e9810af6df61b4fea853f9bb940fcaa3e43c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 55, + "d": 7 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4866.json b/doc/sphinxext/prs/4866.json new file mode 100644 index 00000000000..f474ad83331 --- /dev/null +++ b/doc/sphinxext/prs/4866.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e159bffa218683f62d2ea1a4fffc63f3d0d8a240", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4867.json b/doc/sphinxext/prs/4867.json new file mode 100644 index 00000000000..9dba6151836 --- /dev/null +++ b/doc/sphinxext/prs/4867.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1efbe92e6e82b48ff33a74a97c24e5c68853e0e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 11, + "d": 16 + }, + "mne/tests/test_report.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4868.json b/doc/sphinxext/prs/4868.json new file mode 100644 index 00000000000..31f614c05f3 --- /dev/null +++ b/doc/sphinxext/prs/4868.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b7db5e66e5ad82ac1e76d5b75705fd3c3d0dede", + "authors": [ + { + "n": "Kambiz Tavabi", + "e": "ktavabi@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4870.json b/doc/sphinxext/prs/4870.json new file mode 100644 index 00000000000..d670d41d5de --- /dev/null +++ b/doc/sphinxext/prs/4870.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "b9c712b2376b54f2ae80550945a0df8fe7ec156b", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/externals/decorator.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 40, + "d": 21 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_raw.py": { + "a": 12, + "d": 2 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 20, + "d": 17 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_raw.py": { + "a": 7, + "d": 5 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4871.json b/doc/sphinxext/prs/4871.json new file mode 100644 index 00000000000..69dde1d1a17 --- /dev/null +++ b/doc/sphinxext/prs/4871.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8a302bd0659bf41c77d505d52fc7a3ae94621599", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 14, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 4, + "d": 2 + }, + "mne/source_space.py": { + "a": 5, + "d": 10 + }, + "mne/utils.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4878.json b/doc/sphinxext/prs/4878.json new file mode 100644 index 00000000000..518177e8d88 --- /dev/null +++ b/doc/sphinxext/prs/4878.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "491b412ee4f521e98a13e95d032e1c13ad67d3cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 15, + "d": 9 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4879.json b/doc/sphinxext/prs/4879.json new file mode 100644 index 00000000000..147f10aa9e7 --- /dev/null +++ b/doc/sphinxext/prs/4879.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "329f1b10f625dc3169d82a6b36e941e10681bc96", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 61, + "d": 5 + }, + "doc/carousel.inc": { + "a": 1, + "d": 1 + }, + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/488.json b/doc/sphinxext/prs/488.json new file mode 100644 index 00000000000..174cc4bbd27 --- /dev/null +++ b/doc/sphinxext/prs/488.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f5b4ebeb02872dfc4e855eccaf3f17fa1996fda7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4881.json b/doc/sphinxext/prs/4881.json new file mode 100644 index 00000000000..c94df87ad52 --- /dev/null +++ b/doc/sphinxext/prs/4881.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "56b9f40de9bff253afa63e7a927db4b4597d7b1a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/mne_browse_raw.py": { + "a": 5, + "d": 5 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4883.json b/doc/sphinxext/prs/4883.json new file mode 100644 index 00000000000..81def3b673a --- /dev/null +++ b/doc/sphinxext/prs/4883.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d1a1c47c9fb2cc9c57bdd119736414e31b917525", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4884.json b/doc/sphinxext/prs/4884.json new file mode 100644 index 00000000000..4a059a4fdca --- /dev/null +++ b/doc/sphinxext/prs/4884.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "323c2be3fd0d27fc42942d0f30df47f81df87ce3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4886.json b/doc/sphinxext/prs/4886.json new file mode 100644 index 00000000000..887f4a987cd --- /dev/null +++ b/doc/sphinxext/prs/4886.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2336e3603f219b35e2e4a9e4a3d7cf9f7132d22f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4887.json b/doc/sphinxext/prs/4887.json new file mode 100644 index 00000000000..a5889c5262b --- /dev/null +++ b/doc/sphinxext/prs/4887.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b223ea351d9b6579aa64554748249eaa95bfba95", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 29, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4888.json b/doc/sphinxext/prs/4888.json new file mode 100644 index 00000000000..78722f79342 --- /dev/null +++ b/doc/sphinxext/prs/4888.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e6c2bc01588f11fb9f769f43451885024bcdb1f8", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 5 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/489.json b/doc/sphinxext/prs/489.json new file mode 100644 index 00000000000..1aa2404a42c --- /dev/null +++ b/doc/sphinxext/prs/489.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "64bbcd3188411034490deb8fe8cf22590d00551a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 38, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4891.json b/doc/sphinxext/prs/4891.json new file mode 100644 index 00000000000..03e6fa7e3c8 --- /dev/null +++ b/doc/sphinxext/prs/4891.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c2e88b0498d23b067374444038455c7ea2428650", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 8, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4894.json b/doc/sphinxext/prs/4894.json new file mode 100644 index 00000000000..cfb147959c3 --- /dev/null +++ b/doc/sphinxext/prs/4894.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ba7447ec2b2a3e6a006440e8151cae159b6335e0", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 24, + "d": 21 + }, + "mne/source_space.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_coreg.py": { + "a": 32, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4895.json b/doc/sphinxext/prs/4895.json new file mode 100644 index 00000000000..e8e00466982 --- /dev/null +++ b/doc/sphinxext/prs/4895.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "412a1a65263495cdbbe42a455fd6899599adc794", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 14, + "d": 15 + }, + "mne/tests/test_transforms.py": { + "a": 28, + "d": 1 + }, + "mne/transforms.py": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/49.json b/doc/sphinxext/prs/49.json new file mode 100644 index 00000000000..5b30cba2988 --- /dev/null +++ b/doc/sphinxext/prs/49.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a26c197ea96739d8fe4345bc700623d447b83e4b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_estimate_covariance_matrix_baseline.py": { + "a": 55, + "d": 0 + }, + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 0, + "d": 0 + }, + "mne/cov.py": { + "a": 14, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 18, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4900.json b/doc/sphinxext/prs/4900.json new file mode 100644 index 00000000000..cbd46cb6555 --- /dev/null +++ b/doc/sphinxext/prs/4900.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "64d03eb6114e85168f9d4a0d5eae8cdc1470c378", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4902.json b/doc/sphinxext/prs/4902.json new file mode 100644 index 00000000000..025ef25747b --- /dev/null +++ b/doc/sphinxext/prs/4902.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a3c274c2c04346e26f1cb697b3a2561cf9480e15", + "authors": [ + { + "n": "Henrich Kolkhorst", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/epochs.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4903.json b/doc/sphinxext/prs/4903.json new file mode 100644 index 00000000000..01de54da2e2 --- /dev/null +++ b/doc/sphinxext/prs/4903.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ed4ef2df783669b0b3fa5095a60f84caf8f1093e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/defaults.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 16, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4905.json b/doc/sphinxext/prs/4905.json new file mode 100644 index 00000000000..4fe501c162a --- /dev/null +++ b/doc/sphinxext/prs/4905.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4438451fb53180529ffc511b28899658cdeeaa43", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 8, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4908.json b/doc/sphinxext/prs/4908.json new file mode 100644 index 00000000000..16948cff16f --- /dev/null +++ b/doc/sphinxext/prs/4908.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "94eb475d82125bfba0acade328d64fcae0f127a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 57, + "d": 7 + }, + "mne/channels/tests/test_channels.py": { + "a": 13, + "d": 0 + }, + "mne/io/base.py": { + "a": 4, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 7, + "d": 2 + }, + "mne/io/tests/test_pick.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_event.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4909.json b/doc/sphinxext/prs/4909.json new file mode 100644 index 00000000000..5dd11cf4311 --- /dev/null +++ b/doc/sphinxext/prs/4909.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ab1580266d01561b6e273ee3c3c90c042a182d6b", + "authors": [ + { + "n": "Maksymenko Kostiantyn", + "e": "makkostya@ukr.net" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4910.json b/doc/sphinxext/prs/4910.json new file mode 100644 index 00000000000..91f42023fd0 --- /dev/null +++ b/doc/sphinxext/prs/4910.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "76904ec1506f3efc0bf59e2675e071160b3e92ed", + "authors": [ + { + "n": "AnneSo", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/annotations.py": { + "a": 43, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4911.json b/doc/sphinxext/prs/4911.json new file mode 100644 index 00000000000..4633d29cf35 --- /dev/null +++ b/doc/sphinxext/prs/4911.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0e4cf70b742f36ada54550500b9e7d0133ec3d9b", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/io/ctf/eeg.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4913.json b/doc/sphinxext/prs/4913.json new file mode 100644 index 00000000000..77cb480333b --- /dev/null +++ b/doc/sphinxext/prs/4913.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "02334a9ce1ad1547d825028429fa8a5029262e91", + "authors": [ + { + "n": "Larry Eisenman", + "e": "lneisenman@hotmail.com" + } + ], + "changes": { + "examples/visualization/plot_3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_ecog.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4915.json b/doc/sphinxext/prs/4915.json new file mode 100644 index 00000000000..2dcba60075d --- /dev/null +++ b/doc/sphinxext/prs/4915.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "5a9d6c608907166aab72dbf94d7bf31aeea5e81a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "circle.yml": { + "a": 4, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 8, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/phantom_4dbti/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 29, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 9, + "d": 3 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_phantom_4DBTi.py": { + "a": 77, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4916.json b/doc/sphinxext/prs/4916.json new file mode 100644 index 00000000000..629bc8a5805 --- /dev/null +++ b/doc/sphinxext/prs/4916.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7414eb93b62e5eb8acac6a5b8902a2cb51f40614", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/defaults.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4918.json b/doc/sphinxext/prs/4918.json new file mode 100644 index 00000000000..a5934cab6df --- /dev/null +++ b/doc/sphinxext/prs/4918.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "83274909e0772e2ca00ff8826dda813d620f1e2e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 27, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4919.json b/doc/sphinxext/prs/4919.json new file mode 100644 index 00000000000..a4df99c6109 --- /dev/null +++ b/doc/sphinxext/prs/4919.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "38bd3c4fccf06445b6f5e8cdfa2a763c5bc95b5a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_phantom_4DBTi.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/492.json b/doc/sphinxext/prs/492.json new file mode 100644 index 00000000000..950666dfe6b --- /dev/null +++ b/doc/sphinxext/prs/492.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "85799b69570c4b530b48f8a0471184a469d76a62", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/kit/coreg.py": { + "a": 85, + "d": 43 + }, + "mne/fiff/kit/kit.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4920.json b/doc/sphinxext/prs/4920.json new file mode 100644 index 00000000000..f0cc067c5ed --- /dev/null +++ b/doc/sphinxext/prs/4920.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bcbde3bcdc62e1e3e1bf967f2f79c51f9fc4168e", + "authors": [ + { + "n": "Larry Eisenman", + "e": "lneisenman@hotmail.com" + } + ], + "changes": { + "doc/faq.rst": { + "a": 12, + "d": 0 + }, + "doc/install_mne_python.rst": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4921.json b/doc/sphinxext/prs/4921.json new file mode 100644 index 00000000000..ac1da051e7e --- /dev/null +++ b/doc/sphinxext/prs/4921.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "06afcc8166f9304d25a72ab159e954542f0cc177", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 7, + "d": 2 + }, + "mne/chpi.py": { + "a": 36, + "d": 35 + }, + "mne/preprocessing/maxwell.py": { + "a": 12, + "d": 5 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 8, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4924.json b/doc/sphinxext/prs/4924.json new file mode 100644 index 00000000000..075ed1e1d4d --- /dev/null +++ b/doc/sphinxext/prs/4924.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5c8f9a58c7572962659bf3cb63935c1808eaf196", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 24, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4925.json b/doc/sphinxext/prs/4925.json new file mode 100644 index 00000000000..46bcf7a6400 --- /dev/null +++ b/doc/sphinxext/prs/4925.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "107a887543447e3bf5d8f49b50378dd9cd6b87a8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4927.json b/doc/sphinxext/prs/4927.json new file mode 100644 index 00000000000..e8b9214e594 --- /dev/null +++ b/doc/sphinxext/prs/4927.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7572b8c65bef636b3a630aa960f7b8259b721862", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4929.json b/doc/sphinxext/prs/4929.json new file mode 100644 index 00000000000..c611f6c0fc3 --- /dev/null +++ b/doc/sphinxext/prs/4929.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0db9155293dc1d402eb762bdea04748d030ba8df", + "authors": [ + { + "n": "Claire Braboszcz", + "e": "claire@guakamole.org" + } + ], + "changes": { + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/493.json b/doc/sphinxext/prs/493.json new file mode 100644 index 00000000000..4a12b9b2114 --- /dev/null +++ b/doc/sphinxext/prs/493.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6f098e5e25667b6a816380685cf25cf31ee68c9d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/pick.py": { + "a": 15, + "d": 4 + }, + "mne/viz.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4932.json b/doc/sphinxext/prs/4932.json new file mode 100644 index 00000000000..fdab495173f --- /dev/null +++ b/doc/sphinxext/prs/4932.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "52bef7c42b32114016321819656b331cda2092ba", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".travis.yml": { + "a": 12, + "d": 8 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 10, + "d": 10 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4933.json b/doc/sphinxext/prs/4933.json new file mode 100644 index 00000000000..45fb89b26cf --- /dev/null +++ b/doc/sphinxext/prs/4933.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "21357b8f5ca4e44a568c63ac5af0d9c8923a7f5d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 117, + "d": 84 + }, + "mne/defaults.py": { + "a": 5, + "d": 4 + }, + "mne/io/pick.py": { + "a": 15, + "d": 12 + }, + "mne/tests/test_cov.py": { + "a": 7, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 5 + }, + "mne/viz/misc.py": { + "a": 21, + "d": 20 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4934.json b/doc/sphinxext/prs/4934.json new file mode 100644 index 00000000000..1f126f14bc9 --- /dev/null +++ b/doc/sphinxext/prs/4934.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f60a6ba591e45eefbe400725e27ea8a22fe52a65", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 4, + "d": 8 + }, + "mne/tests/test_cov.py": { + "a": 5, + "d": 0 + }, + "tutorials/plot_compute_covariance.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4935.json b/doc/sphinxext/prs/4935.json new file mode 100644 index 00000000000..66dbc2bbe38 --- /dev/null +++ b/doc/sphinxext/prs/4935.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f8ec6d74afa43a1053aa560605af171633ac344f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4936.json b/doc/sphinxext/prs/4936.json new file mode 100644 index 00000000000..cda52e35b21 --- /dev/null +++ b/doc/sphinxext/prs/4936.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "2b01a969f24531eb73f1695da1b6198c6df04d2e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 82, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 30, + "d": 7 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 55, + "d": 5 + }, + "mne/tests/test_annotations.py": { + "a": 172, + "d": 2 + }, + "mne/utils.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4937.json b/doc/sphinxext/prs/4937.json new file mode 100644 index 00000000000..bb129356460 --- /dev/null +++ b/doc/sphinxext/prs/4937.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a78a355417bd46fc35b8650bf4a167d932242c83", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_coreg.py": { + "a": 19, + "d": 10 + }, + "mne/coreg.py": { + "a": 3, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 9, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 10, + "d": 3 + }, + "mne/gui/_file_traits.py": { + "a": 11, + "d": 12 + }, + "mne/gui/_viewer.py": { + "a": 57, + "d": 11 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 7, + "d": 0 + }, + "mne/surface.py": { + "a": 28, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4938.json b/doc/sphinxext/prs/4938.json new file mode 100644 index 00000000000..96317eb543f --- /dev/null +++ b/doc/sphinxext/prs/4938.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bab48aba426fc40c23cea9a9bb713e604a19d057", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 0 + }, + "mne/gui/_viewer.py": { + "a": 21, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4942.json b/doc/sphinxext/prs/4942.json new file mode 100644 index 00000000000..a1fb8b234c2 --- /dev/null +++ b/doc/sphinxext/prs/4942.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "318f433510afb0b0995f95614568da6e03b922df", + "authors": [ + { + "n": "Nathalie", + "e": "nat.gayraud@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 16, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4945.json b/doc/sphinxext/prs/4945.json new file mode 100644 index 00000000000..5e4e45097f5 --- /dev/null +++ b/doc/sphinxext/prs/4945.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "99f8f0400563c2c49934349b3cf0aaf8167ed0bc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/utils.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4946.json b/doc/sphinxext/prs/4946.json new file mode 100644 index 00000000000..fe4fb2e9364 --- /dev/null +++ b/doc/sphinxext/prs/4946.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "c702c75158626fbe4dd6cdff0485037d7cf98f61", + "authors": [ + { + "n": "Katrin Leinweber", + "e": null + } + ], + "changes": { + "doc/faq.rst": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_rap_music.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_sensor_noise_level.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_dipole_orientations.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/495.json b/doc/sphinxext/prs/495.json new file mode 100644 index 00000000000..ecf6e245546 --- /dev/null +++ b/doc/sphinxext/prs/495.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d4ea654245b2ff006d168e8ed807d7456f73edce", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4951.json b/doc/sphinxext/prs/4951.json new file mode 100644 index 00000000000..2dacd539933 --- /dev/null +++ b/doc/sphinxext/prs/4951.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f31066490555870f7a5d160880126a29220cae09", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 6, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 23, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4955.json b/doc/sphinxext/prs/4955.json new file mode 100644 index 00000000000..34405126aa9 --- /dev/null +++ b/doc/sphinxext/prs/4955.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8ef95c9afacc9139922896f805395a3166bafb74", + "authors": [ + { + "n": "Maksymenko Kostiantyn", + "e": "makkostya@ukr.net" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 159, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 40, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4956.json b/doc/sphinxext/prs/4956.json new file mode 100644 index 00000000000..9c1881dd1ae --- /dev/null +++ b/doc/sphinxext/prs/4956.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c783f3f667c99d0ae015407cad220251173d8439", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 24, + "d": 8 + }, + "mne/preprocessing/xdawn.py": { + "a": 11, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/496.json b/doc/sphinxext/prs/496.json new file mode 100644 index 00000000000..5e0799c9f9c --- /dev/null +++ b/doc/sphinxext/prs/496.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "43d48ca1b9659770a5b8c312c33d4df280c858e1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4960.json b/doc/sphinxext/prs/4960.json new file mode 100644 index 00000000000..ec4fb1d2eaa --- /dev/null +++ b/doc/sphinxext/prs/4960.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6e36f2b2094c041462a849b2222ecb26676bd308", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4962.json b/doc/sphinxext/prs/4962.json new file mode 100644 index 00000000000..1268d93418f --- /dev/null +++ b/doc/sphinxext/prs/4962.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e22d010c183371e8700a19285564270f7fb9d505", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4964.json b/doc/sphinxext/prs/4964.json new file mode 100644 index 00000000000..a1d74352852 --- /dev/null +++ b/doc/sphinxext/prs/4964.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3bb9b7458cc5d9872da1ed094fd6d527e279cc65", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4965.json b/doc/sphinxext/prs/4965.json new file mode 100644 index 00000000000..c8494061cac --- /dev/null +++ b/doc/sphinxext/prs/4965.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a4d027096fe21a35606b6fd02dfab6b1e9521cf8", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_channel_epochs_image.py": { + "a": 2, + "d": 2 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4966.json b/doc/sphinxext/prs/4966.json new file mode 100644 index 00000000000..c86f2246a6e --- /dev/null +++ b/doc/sphinxext/prs/4966.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "24668f4e9434cc6f81f05f641b2973e56a5b6b1e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/forward/plot_decimate_head_surface.py": { + "a": 1, + "d": 2 + }, + "mne/bem.py": { + "a": 4, + "d": 4 + }, + "mne/commands/mne_coreg.py": { + "a": 18, + "d": 3 + }, + "mne/coreg.py": { + "a": 1, + "d": 3 + }, + "mne/gui/__init__.py": { + "a": 38, + "d": 6 + }, + "mne/gui/_coreg_gui.py": { + "a": 181, + "d": 106 + }, + "mne/gui/_fiducials_gui.py": { + "a": 33, + "d": 25 + }, + "mne/gui/_file_traits.py": { + "a": 19, + "d": 5 + }, + "mne/gui/_viewer.py": { + "a": 91, + "d": 22 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 34, + "d": 8 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 5, + "d": 5 + }, + "mne/utils.py": { + "a": 4, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4967.json b/doc/sphinxext/prs/4967.json new file mode 100644 index 00000000000..b79761f423b --- /dev/null +++ b/doc/sphinxext/prs/4967.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe067e403cf1d20e5ede87d274933b026284083a", + "authors": [ + { + "n": "Erik Hornberger", + "e": null + } + ], + "changes": { + "examples/inverse/plot_vector_mne_solution.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4969.json b/doc/sphinxext/prs/4969.json new file mode 100644 index 00000000000..28d1d60babb --- /dev/null +++ b/doc/sphinxext/prs/4969.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "239c83dfe0686084d1c96c945579dc8cc10e895c", + "authors": [ + { + "n": "Phillip Alday", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 5, + "d": 4 + }, + "mne/stats/tests/test_regression.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/497.json b/doc/sphinxext/prs/497.json new file mode 100644 index 00000000000..219880e6002 --- /dev/null +++ b/doc/sphinxext/prs/497.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7ec2d7355fe2e47a455625ae5321403cb8afbf3c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4971.json b/doc/sphinxext/prs/4971.json new file mode 100644 index 00000000000..70350c4c20a --- /dev/null +++ b/doc/sphinxext/prs/4971.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "c51705e1915835fe48fd37ff040edadfcae45853", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 11, + "d": 3 + }, + "mne/commands/mne_coreg.py": { + "a": 11, + "d": 2 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 1 + }, + "mne/coreg.py": { + "a": 76, + "d": 197 + }, + "mne/gui/__init__.py": { + "a": 36, + "d": 33 + }, + "mne/gui/_coreg_gui.py": { + "a": 1252, + "d": 828 + }, + "mne/gui/_fiducials_gui.py": { + "a": 96, + "d": 39 + }, + "mne/gui/_file_traits.py": { + "a": 40, + "d": 35 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 22, + "d": 5 + }, + "mne/gui/_marker_gui.py": { + "a": 27, + "d": 49 + }, + "mne/gui/_viewer.py": { + "a": 221, + "d": 131 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 57, + "d": 45 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 11, + "d": 11 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 34, + "d": 22 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 43, + "d": 28 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 6, + "d": 5 + }, + "mne/surface.py": { + "a": 73, + "d": 40 + }, + "mne/tests/test_coreg.py": { + "a": 4, + "d": 69 + }, + "mne/tests/test_report.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 11, + "d": 6 + }, + "mne/tests/test_utils.py": { + "a": 2, + "d": 1 + }, + "mne/transforms.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 24, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 15, + "d": 17 + }, + "mne/viz/tests/test_3d.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4973.json b/doc/sphinxext/prs/4973.json new file mode 100644 index 00000000000..05d022e102c --- /dev/null +++ b/doc/sphinxext/prs/4973.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d2850c8a11288348cb322bf18c0e188883c98cbe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 29, + "d": 1 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 9, + "d": 7 + }, + "tutorials/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 8, + "d": 4 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 13, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4974.json b/doc/sphinxext/prs/4974.json new file mode 100644 index 00000000000..64c043833e4 --- /dev/null +++ b/doc/sphinxext/prs/4974.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "da6d93195cdd3ce7e9a4859c0ff7cc2d259c85e8", + "authors": [ + { + "n": "Erik Hornberger", + "e": null + } + ], + "changes": { + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 1, + "d": 6 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4976.json b/doc/sphinxext/prs/4976.json new file mode 100644 index 00000000000..627e6bdde50 --- /dev/null +++ b/doc/sphinxext/prs/4976.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f6e3fe522bff721114212066d9dcecd330efdfd1", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4978.json b/doc/sphinxext/prs/4978.json new file mode 100644 index 00000000000..3b91989d672 --- /dev/null +++ b/doc/sphinxext/prs/4978.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "cdffbc0f437f185bd7fbb59a53f70e29419a500b", + "authors": [ + { + "n": "Pierre Ablin", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "environment.yml": { + "a": 2, + "d": 0 + }, + "environment2.yml": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_ica_comparison.py": { + "a": 65, + "d": 0 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 0, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 54, + "d": 16 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 102, + "d": 47 + }, + "mne/utils.py": { + "a": 12, + "d": 13 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 19, + "d": 2 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 15, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/498.json b/doc/sphinxext/prs/498.json new file mode 100644 index 00000000000..9a1b7f1b559 --- /dev/null +++ b/doc/sphinxext/prs/498.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "06e22e578dfbb1d6f16e6fc83488a748e35c186c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 1 + }, + "mne/viz.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4981.json b/doc/sphinxext/prs/4981.json new file mode 100644 index 00000000000..62535e5d3b7 --- /dev/null +++ b/doc/sphinxext/prs/4981.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "d0f8da7d0f53a7c596b78f22d09197637b9230aa", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 31, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 44, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 487, + "d": 47 + }, + "mne/viz/epochs.py": { + "a": 6, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 18, + "d": 52 + }, + "mne/viz/topo.py": { + "a": 8, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 29, + "d": 37 + }, + "mne/viz/utils.py": { + "a": 70, + "d": 0 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 14, + "d": 4 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4983.json b/doc/sphinxext/prs/4983.json new file mode 100644 index 00000000000..dae13c95600 --- /dev/null +++ b/doc/sphinxext/prs/4983.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a188859b57044fa158af05852bcce2870fabde91", + "authors": [ + { + "n": "Pierre Ablin", + "e": null + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4985.json b/doc/sphinxext/prs/4985.json new file mode 100644 index 00000000000..eb50ae04478 --- /dev/null +++ b/doc/sphinxext/prs/4985.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5b0ab911c897ef2f6830b97f1b366b68ff5f94de", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/manual/c_reference.rst": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4986.json b/doc/sphinxext/prs/4986.json new file mode 100644 index 00000000000..1062e2cb36f --- /dev/null +++ b/doc/sphinxext/prs/4986.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "10b9ab9ef352a5f2fca4e786046b3c7ca7cf1098", + "authors": [ + { + "n": "Dominik Krzeminski (dokato)", + "e": "raymon92@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4988.json b/doc/sphinxext/prs/4988.json new file mode 100644 index 00000000000..a869605b213 --- /dev/null +++ b/doc/sphinxext/prs/4988.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5f53bd96450e138bf98605d0d90d8018651926d2", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/_static/institutions.png": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/499.json b/doc/sphinxext/prs/499.json new file mode 100644 index 00000000000..8faec1617fa --- /dev/null +++ b/doc/sphinxext/prs/499.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e92672eb5837fe47a915d325ae53f2121796a8d0", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/__init__.py": { + "a": 2, + "d": 3 + }, + "mne/fiff/raw.py": { + "a": 210, + "d": 242 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4990.json b/doc/sphinxext/prs/4990.json new file mode 100644 index 00000000000..93a96c92f49 --- /dev/null +++ b/doc/sphinxext/prs/4990.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d77cc95b0cf56b1df5886c9ebc52261a55695331", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 13 + }, + "mne/io/tests/test_meas_info.py": { + "a": 111, + "d": 93 + }, + "mne/io/write.py": { + "a": 8, + "d": 12 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4993.json b/doc/sphinxext/prs/4993.json new file mode 100644 index 00000000000..2f7edb3c0cf --- /dev/null +++ b/doc/sphinxext/prs/4993.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c179f30b18e1290c7d9f2d1c730631dcc67e621c", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 7, + "d": 7 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 60, + "d": 17 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 132, + "d": 10 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 23, + "d": 10 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 69, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4995.json b/doc/sphinxext/prs/4995.json new file mode 100644 index 00000000000..52c3a6f9988 --- /dev/null +++ b/doc/sphinxext/prs/4995.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0afb4e2e8130a38fcb6d793629640d8865b557bc", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4996.json b/doc/sphinxext/prs/4996.json new file mode 100644 index 00000000000..d41763138ee --- /dev/null +++ b/doc/sphinxext/prs/4996.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f4eb3600c56b4ba6bd511eb947948e8b91e1ab96", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 7, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4997.json b/doc/sphinxext/prs/4997.json new file mode 100644 index 00000000000..2b3b24945a3 --- /dev/null +++ b/doc/sphinxext/prs/4997.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4887a6c05204b0a03d7f92357f004a429bd712d6", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_file_traits.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/4999.json b/doc/sphinxext/prs/4999.json new file mode 100644 index 00000000000..fff517150d2 --- /dev/null +++ b/doc/sphinxext/prs/4999.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8270c1840ea821d8307ba2e16e3f334f8913f01f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5.json b/doc/sphinxext/prs/5.json new file mode 100644 index 00000000000..5fa245a8588 --- /dev/null +++ b/doc/sphinxext/prs/5.json @@ -0,0 +1,531 @@ +{ + "merge_commit_sha": "ad5ca5f0c222c47deb3dc1d2632629de05e21976", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "AUTHORS.rst": { + "a": 0, + "d": 0 + }, + "LICENSE.txt": { + "a": 0, + "d": 0 + }, + "MANIFEST.in": { + "a": 0, + "d": 0 + }, + "README.rst": { + "a": 0, + "d": 0 + }, + "bin/mne_flash_bem_model.py": { + "a": 0, + "d": 0 + }, + "doc/Makefile": { + "a": 0, + "d": 0 + }, + "doc/source/conf.py": { + "a": 0, + "d": 0 + }, + "doc/source/getting_started.rst": { + "a": 0, + "d": 0 + }, + "doc/source/index.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/AppA.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/AppB.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/AppEULA.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/AppInstall.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/AppReleaseNotes.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/analyze.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/browse.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/convert.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/cookbook.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/forward.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/intro.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/list.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/matlab.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/MNE_preferences.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/adjust_alignment.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/adjust_lights.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/adjust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/cont_hpi_data.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/dipole_list.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/dipole_parameters.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/dipoles_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/epoch_selector.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/field_mapping_pref.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/file_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/hardcopy_controls.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/help_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/image_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/label_list.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/labels_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/main_window.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/movie_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/mri_viewer.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/open_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/overlay_management.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/patch_selection_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/save_label_timecourse.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/scales_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/surface_controls.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/surface_selection_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/timecourse_manager.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/view_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/viewer.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/viewer_options.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/visualize_hpi.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_analyze/windows_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/adjust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/adust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/average_pref.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/channel_selection.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/file_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/filter_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/help_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/main.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/manage_averages_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/new_selection.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/new_ssp.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/open_dialog copy.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/open_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/process_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/process_menu2.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/scales_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/scales_dialog2.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/toolbar.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-0.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-1.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-10.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-11.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-12.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-13.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-14.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-15.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-16.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-17.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-2.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-3.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-4.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-5.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-6.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-7.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-8.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu-9.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/mne_browse_raw/windows_menu.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/morph.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/Averaging-flowchart.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/CoordinateSystems.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/Digitizer-example.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/Flowchart.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/HeadCS.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/cover.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/flat.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/morphed.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/neuromag.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/orig.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/proj-off-on.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/pics/title_page.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/reading.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/sampledata.rst": { + "a": 0, + "d": 0 + }, + "doc/source/manual/utilities.rst": { + "a": 0, + "d": 0 + }, + "doc/source/mne-python.rst": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/README.txt": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/docscrape.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/docscrape_sphinx.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/gen_rst.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/ipython_console_highlighting.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext/__init__.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext/docscrape.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext/docscrape_sphinx.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext/numpydoc.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext_old/__init__.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext_old/docscrape.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext_old/docscrape_sphinx.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/numpy_ext_old/numpydoc.py": { + "a": 0, + "d": 0 + }, + "doc/sphinxext/only_directives.py": { + "a": 0, + "d": 0 + }, + "examples/README.txt": { + "a": 0, + "d": 0 + }, + "examples/inverse/README.txt": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/README.txt": { + "a": 0, + "d": 0 + }, + "examples/stats/README.txt": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/README.txt": { + "a": 0, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 0, + "d": 0 + }, + "setup.cfg": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/50.json b/doc/sphinxext/prs/50.json new file mode 100644 index 00000000000..8d6211662fa --- /dev/null +++ b/doc/sphinxext/prs/50.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "82f35f7b09f05bbab9af9f64994e9364cb21d57a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 7, + "d": 5 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 6, + "d": 6 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 7, + "d": 7 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 5, + "d": 6 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 3, + "d": 4 + }, + "mne/epochs.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 60, + "d": 30 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 15, + "d": 16 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 20, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/500.json b/doc/sphinxext/prs/500.json new file mode 100644 index 00000000000..ffd5337b0c1 --- /dev/null +++ b/doc/sphinxext/prs/500.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c42c1c0f1a680ab9ee1634f233b87ffcc09190c8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5000.json b/doc/sphinxext/prs/5000.json new file mode 100644 index 00000000000..125da27059d --- /dev/null +++ b/doc/sphinxext/prs/5000.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "58e59df8aa99aab046eced1374c80eff290c4f2a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 26, + "d": 7 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 11, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5001.json b/doc/sphinxext/prs/5001.json new file mode 100644 index 00000000000..d211674a53a --- /dev/null +++ b/doc/sphinxext/prs/5001.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dc5698e8b6e89179412699ba0237b6b24100b3f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/dipole.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 12, + "d": 0 + }, + "tutorials/plot_dipole_fit.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5004.json b/doc/sphinxext/prs/5004.json new file mode 100644 index 00000000000..9cceb044967 --- /dev/null +++ b/doc/sphinxext/prs/5004.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "eb444700eed3157a6d42c129c3ccd8dfbb0babbc", + "authors": [ + { + "n": "Oleh Kozynets", + "e": null + } + ], + "changes": { + ".gitignore": { + "a": 3, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 24, + "d": 4 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 36, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5006.json b/doc/sphinxext/prs/5006.json new file mode 100644 index 00000000000..9d60fbe980a --- /dev/null +++ b/doc/sphinxext/prs/5006.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ade69aa3650712a94b40c7395f548a2109690c49", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5007.json b/doc/sphinxext/prs/5007.json new file mode 100644 index 00000000000..e15ec7b79bd --- /dev/null +++ b/doc/sphinxext/prs/5007.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "44ddced58a39977e2c7b6b0af8be59736f38dc0e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/501.json b/doc/sphinxext/prs/501.json new file mode 100644 index 00000000000..81da1927031 --- /dev/null +++ b/doc/sphinxext/prs/501.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a0d91e18cf71565c312001a4282c565465f09fd0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/kit/test/test_kit.py": { + "a": 6, + "d": 2 + }, + "mne/forward.py": { + "a": 4, + "d": 0 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 8, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 17, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5010.json b/doc/sphinxext/prs/5010.json new file mode 100644 index 00000000000..2dcc966b887 --- /dev/null +++ b/doc/sphinxext/prs/5010.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "36446621d6e001ddf21fe850bf8d42ad59035930", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 14, + "d": 1 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 8, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 127, + "d": 26 + }, + "mne/viz/tests/test_evoked.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5011.json b/doc/sphinxext/prs/5011.json new file mode 100644 index 00000000000..7a3ad7081b2 --- /dev/null +++ b/doc/sphinxext/prs/5011.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fdd38711177ffd74af93c6a9cd8f37e7cebfb823", + "authors": [ + { + "n": "Dominik Krzeminski (dokato)", + "e": "raymon92@gmail.com" + } + ], + "changes": { + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5012.json b/doc/sphinxext/prs/5012.json new file mode 100644 index 00000000000..d230a598791 --- /dev/null +++ b/doc/sphinxext/prs/5012.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7915893dc27aea13c5ca1b550da043bc6867468b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 14, + "d": 7 + }, + ".travis.yml": { + "a": 1, + "d": 1 + }, + "doc/advanced_setup.rst": { + "a": 7, + "d": 19 + }, + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "doc/install_mne_python.rst": { + "a": 57, + "d": 59 + }, + "doc/manual/datasets_index.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 4 + }, + "environment.yml": { + "a": 10, + "d": 7 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 11, + "d": 0 + }, + "tutorials/plot_compute_covariance.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5016.json b/doc/sphinxext/prs/5016.json new file mode 100644 index 00000000000..425af35508e --- /dev/null +++ b/doc/sphinxext/prs/5016.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "265c6696904bdb70f8f2cbf4f3c8dd93f617d8f8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5019.json b/doc/sphinxext/prs/5019.json new file mode 100644 index 00000000000..f2a17ea8cb7 --- /dev/null +++ b/doc/sphinxext/prs/5019.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e85df44c0636ab0910e4de1de4f49700b68efcb5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5021.json b/doc/sphinxext/prs/5021.json new file mode 100644 index 00000000000..ed6cbe8f506 --- /dev/null +++ b/doc/sphinxext/prs/5021.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "96654288dd107afcba9ae5cede7c72daf8c66ef0", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 14, + "d": 6 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 11, + "d": 9 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 8, + "d": 10 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 6, + "d": 6 + }, + "examples/time_frequency/plot_compute_csd.py": { + "a": 88, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 57, + "d": 56 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 77, + "d": 86 + }, + "mne/time_frequency/__init__.py": { + "a": 4, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1302, + "d": 235 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 679, + "d": 172 + }, + "mne/viz/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/viz/misc.py": { + "a": 119, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/503.json b/doc/sphinxext/prs/503.json new file mode 100644 index 00000000000..90953715b9a --- /dev/null +++ b/doc/sphinxext/prs/503.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "eb9d1153cf9a0a14450577c8bf45ac741bb58b32", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/kit/constants.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/kit/kit.py": { + "a": 244, + "d": 132 + }, + "mne/fiff/kit/test/data/test.sqd": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/test/data/test_Ykgw.mat": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/test/data/test_bin.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/test/data/test_py.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/test/test_kit.py": { + "a": 71, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5031.json b/doc/sphinxext/prs/5031.json new file mode 100644 index 00000000000..af58ec91000 --- /dev/null +++ b/doc/sphinxext/prs/5031.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "11ae455f3e69949c92d0fc7f98204a807a7f924f", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 7, + "d": 3 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 83, + "d": 60 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5034.json b/doc/sphinxext/prs/5034.json new file mode 100644 index 00000000000..e86b8bd3c51 --- /dev/null +++ b/doc/sphinxext/prs/5034.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6b40ace7df1a24f94c7477abb057f77fba776dfc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/evoked.py": { + "a": 5, + "d": 5 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5035.json b/doc/sphinxext/prs/5035.json new file mode 100644 index 00000000000..e71bc7bce61 --- /dev/null +++ b/doc/sphinxext/prs/5035.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "3a109ba9d5cbf9a08912d1c34ba44ddce8329a58", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 3, + "d": 4 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 1, + "d": 1 + }, + "mne/defaults.py": { + "a": 1, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 2 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 145, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 148, + "d": 58 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 7, + "d": 41 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 107, + "d": 16 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 48, + "d": 34 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 29, + "d": 46 + }, + "mne/viz/tests/test_3d.py": { + "a": 5, + "d": 1 + }, + "tutorials/plot_dipole_orientations.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 43, + "d": 39 + }, + "tutorials/plot_mne_solutions.py": { + "a": 112, + "d": 0 + }, + "tutorials/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 0, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 1, + "d": 3 + }, + "tutorials/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 0, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 6, + "d": 8 + }, + "tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 0, + "d": 2 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5036.json b/doc/sphinxext/prs/5036.json new file mode 100644 index 00000000000..37c1c6d49c7 --- /dev/null +++ b/doc/sphinxext/prs/5036.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "59c116e0ec3718586f2a6664702a889e06e3873e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 36, + "d": 17 + }, + "mne/time_frequency/tfr.py": { + "a": 17, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5037.json b/doc/sphinxext/prs/5037.json new file mode 100644 index 00000000000..d0ac9490aa0 --- /dev/null +++ b/doc/sphinxext/prs/5037.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce9a165fd57ceeb7dfa8d5b75f732d95260c61bf", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5039.json b/doc/sphinxext/prs/5039.json new file mode 100644 index 00000000000..ef3a3acfdd8 --- /dev/null +++ b/doc/sphinxext/prs/5039.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb8dd9aa2b1c387497c396382e38defdb4698c0e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/504.json b/doc/sphinxext/prs/504.json new file mode 100644 index 00000000000..1fd823d2355 --- /dev/null +++ b/doc/sphinxext/prs/504.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2225ce9909964db387d962e55f6dfe2605334e84", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 98, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5040.json b/doc/sphinxext/prs/5040.json new file mode 100644 index 00000000000..48483c1a860 --- /dev/null +++ b/doc/sphinxext/prs/5040.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3154e6dc07b0da3b6709172b0c0348dbed6ad21d", + "authors": [ + { + "n": "Tommy Clausner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/peak_finder.py": { + "a": 10, + "d": 4 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 35, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5041.json b/doc/sphinxext/prs/5041.json new file mode 100644 index 00000000000..3bde4ed4a15 --- /dev/null +++ b/doc/sphinxext/prs/5041.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9f01effada074332ef93750138fc9deabef51ca9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 24, + "d": 12 + }, + "mne/viz/evoked.py": { + "a": 27, + "d": 17 + }, + "mne/viz/utils.py": { + "a": 13, + "d": 10 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 35, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5042.json b/doc/sphinxext/prs/5042.json new file mode 100644 index 00000000000..cef96752876 --- /dev/null +++ b/doc/sphinxext/prs/5042.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ce59305d4ffaa4c6ce2bccbf7b1fc0cd7713eda7", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5043.json b/doc/sphinxext/prs/5043.json new file mode 100644 index 00000000000..217e62f2cd8 --- /dev/null +++ b/doc/sphinxext/prs/5043.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "2d0d5b95c65674f9dd5b90cd8af55467e7b04c93", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 5, + "d": 6 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 7 + }, + "mne/io/eximia/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/eximia/eximia.py": { + "a": 93, + "d": 0 + }, + "mne/io/eximia/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 45, + "d": 0 + }, + "mne/io/utils.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5044.json b/doc/sphinxext/prs/5044.json new file mode 100644 index 00000000000..5c416b2f9bf --- /dev/null +++ b/doc/sphinxext/prs/5044.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "374d4b53e17a3aa54efb6628e443448f0c0ca9f5", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "mne/inverse_sparse/mxne_inverse.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5045.json b/doc/sphinxext/prs/5045.json new file mode 100644 index 00000000000..bbdc8ca86c7 --- /dev/null +++ b/doc/sphinxext/prs/5045.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "caf0862daf01ddbd47e6f51065b9fbedbb6ba494", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 37, + "d": 19 + }, + "mne/io/meas_info.py": { + "a": 11, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 38, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5048.json b/doc/sphinxext/prs/5048.json new file mode 100644 index 00000000000..74631cde9cd --- /dev/null +++ b/doc/sphinxext/prs/5048.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4ef915d9af22199654a5763670b4789da35d28f8", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5051.json b/doc/sphinxext/prs/5051.json new file mode 100644 index 00000000000..0e53c0a4b56 --- /dev/null +++ b/doc/sphinxext/prs/5051.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9f6d4f4432eb218153d6b7da193d549a23739a3a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5052.json b/doc/sphinxext/prs/5052.json new file mode 100644 index 00000000000..7f07fa1b1c0 --- /dev/null +++ b/doc/sphinxext/prs/5052.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9a7f28c52799044660a9844b6b115a2438cfaa7a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 3, + "d": 20 + }, + "mne/filter.py": { + "a": 5, + "d": 12 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 14, + "d": 29 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 16 + }, + "mne/time_frequency/csd.py": { + "a": 6, + "d": 55 + }, + "mne/time_frequency/multitaper.py": { + "a": 35, + "d": 18 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 89, + "d": 61 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5055.json b/doc/sphinxext/prs/5055.json new file mode 100644 index 00000000000..24aa54645a0 --- /dev/null +++ b/doc/sphinxext/prs/5055.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "375c3b67f6b5b73d7be6ec5902c6911e0c87b504", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 23, + "d": 3 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 111, + "d": 65 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 23, + "d": 19 + }, + "mne/time_frequency/stft.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5056.json b/doc/sphinxext/prs/5056.json new file mode 100644 index 00000000000..e074b417a67 --- /dev/null +++ b/doc/sphinxext/prs/5056.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b1448c0b08f8a9c9595b29f365c89b71503cb40a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 40, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5057.json b/doc/sphinxext/prs/5057.json new file mode 100644 index 00000000000..c8d11eed268 --- /dev/null +++ b/doc/sphinxext/prs/5057.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3521f790656edbaf4a8b55e574f441f49078ff97", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/506.json b/doc/sphinxext/prs/506.json new file mode 100644 index 00000000000..87bd98b03c6 --- /dev/null +++ b/doc/sphinxext/prs/506.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e4c7db52455d484c854e72d9e82a1677a1e10d5b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 121, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 400, + "d": 282 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 97, + "d": 81 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5062.json b/doc/sphinxext/prs/5062.json new file mode 100644 index 00000000000..c6d1de067cc --- /dev/null +++ b/doc/sphinxext/prs/5062.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2b5aa25eb4a48bbdaec02a2b7eb53075cba186d3", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/time_frequency/csd.py": { + "a": 9, + "d": 9 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5063.json b/doc/sphinxext/prs/5063.json new file mode 100644 index 00000000000..f6a83b4e71b --- /dev/null +++ b/doc/sphinxext/prs/5063.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b130a04973548819ebcf6cd4500ce92d77e3d7e8", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 5 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5064.json b/doc/sphinxext/prs/5064.json new file mode 100644 index 00000000000..bc52f36dbdc --- /dev/null +++ b/doc/sphinxext/prs/5064.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "33161b33187113c44850d7f7a648922370d90c2b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5065.json b/doc/sphinxext/prs/5065.json new file mode 100644 index 00000000000..b9c6e76ec9c --- /dev/null +++ b/doc/sphinxext/prs/5065.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6bd1280150f4ad312265cf233273a1c77d2b30cf", + "authors": [ + { + "n": "Nathalie", + "e": "nat.gayraud@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 9, + "d": 12 + }, + "mne/defaults.py": { + "a": 1, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 26, + "d": 11 + }, + "mne/simulation/tests/test_raw.py": { + "a": 19, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5066.json b/doc/sphinxext/prs/5066.json new file mode 100644 index 00000000000..fe2eb044453 --- /dev/null +++ b/doc/sphinxext/prs/5066.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "aefa4343fa9ed3cde21309ea73ca471851ce0c77", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 4, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 0, + "d": 88 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 19, + "d": 24 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 3, + "d": 5 + }, + "mne/beamformer/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 767, + "d": 95 + }, + "mne/beamformer/_lcmv.py": { + "a": 93, + "d": 14 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 470, + "d": 118 + }, + "tutorials/plot_dics.py": { + "a": 309, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5068.json b/doc/sphinxext/prs/5068.json new file mode 100644 index 00000000000..3fd51bcc92f --- /dev/null +++ b/doc/sphinxext/prs/5068.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "55da4e49709ca6d5189cdceffba84c107b0ac517", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/stats/plot_sensor_regression.py": { + "a": 6, + "d": 6 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 44, + "d": 16 + }, + "mne/viz/evoked.py": { + "a": 17, + "d": 63 + }, + "mne/viz/topo.py": { + "a": 10, + "d": 66 + }, + "mne/viz/utils.py": { + "a": 138, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5069.json b/doc/sphinxext/prs/5069.json new file mode 100644 index 00000000000..e20a6a343c2 --- /dev/null +++ b/doc/sphinxext/prs/5069.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c1a0dc908a9ced0f938b3873f28b8a5998f3fd24", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 36, + "d": 20 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 88, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/507.json b/doc/sphinxext/prs/507.json new file mode 100644 index 00000000000..4ef16b2a9f8 --- /dev/null +++ b/doc/sphinxext/prs/507.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2d62a90a6cd120338542c8460fee6333036c807e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/plot_megsim_data.py": { + "a": 53, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5070.json b/doc/sphinxext/prs/5070.json new file mode 100644 index 00000000000..a29747f3830 --- /dev/null +++ b/doc/sphinxext/prs/5070.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "76b6d1e0fb2443d365adde706d75d43e1ad56883", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 7, + "d": 5 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5071.json b/doc/sphinxext/prs/5071.json new file mode 100644 index 00000000000..d31b0f013e1 --- /dev/null +++ b/doc/sphinxext/prs/5071.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0444774dc5251465d157aaa7ccfa4b6feaf4c5aa", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "tutorials/plot_stats_cluster_erp.py": { + "a": 149, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5075.json b/doc/sphinxext/prs/5075.json new file mode 100644 index 00000000000..d911d201d42 --- /dev/null +++ b/doc/sphinxext/prs/5075.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8e0a423d9b62268e2dc8e03cbb1810a3cab34391", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_multidict_reweighted_tfmxne.py": { + "a": 126, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 22, + "d": 8 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 313, + "d": 68 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 50, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5076.json b/doc/sphinxext/prs/5076.json new file mode 100644 index 00000000000..2589ccca6bd --- /dev/null +++ b/doc/sphinxext/prs/5076.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0813968da09d87ae21d4e88163198c2c8ecc7e55", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/decoding.rst": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 10, + "d": 8 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 19, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 4, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 8, + "d": 3 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5077.json b/doc/sphinxext/prs/5077.json new file mode 100644 index 00000000000..7a48f95d81c --- /dev/null +++ b/doc/sphinxext/prs/5077.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "09b8fd3bdfec0650b390bf7c3ae9271af5c8903b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/kit/constants.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5078.json b/doc/sphinxext/prs/5078.json new file mode 100644 index 00000000000..a93342c8f45 --- /dev/null +++ b/doc/sphinxext/prs/5078.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cffd02d4065eeb23ca4798f4f674678103105096", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/egi/egimff.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5079.json b/doc/sphinxext/prs/5079.json new file mode 100644 index 00000000000..c9bf6847c14 --- /dev/null +++ b/doc/sphinxext/prs/5079.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "dba99f19e24ed3e59f72bb8eaff6174c104568ca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/cookbook.rst": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/filter.py": { + "a": 8, + "d": 4 + }, + "mne/io/base.py": { + "a": 4, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 15 + }, + "mne/viz/ica.py": { + "a": 12, + "d": 21 + }, + "mne/viz/misc.py": { + "a": 20, + "d": 23 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 5 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_artifacts_correction_filtering.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 5 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 0, + "d": 3 + }, + "tutorials/plot_artifacts_detection.py": { + "a": 11, + "d": 10 + }, + "tutorials/plot_background_filtering.py": { + "a": 5, + "d": 4 + }, + "tutorials/plot_visualize_raw.py": { + "a": 0, + "d": 2 + }, + "tutorials/plot_whitened.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/508.json b/doc/sphinxext/prs/508.json new file mode 100644 index 00000000000..3450025a30a --- /dev/null +++ b/doc/sphinxext/prs/508.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0d219bf734b7837d5b3f00838eec6f818b48a709", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 13, + "d": 7 + }, + "mne/source_estimate.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 0 + }, + "mne/viz.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5080.json b/doc/sphinxext/prs/5080.json new file mode 100644 index 00000000000..022b74d3294 --- /dev/null +++ b/doc/sphinxext/prs/5080.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5364ec5b842ab879a8fc71cf120bcd599a86260c", + "authors": [ + { + "n": "Desislava Petkova", + "e": "desislavka@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5082.json b/doc/sphinxext/prs/5082.json new file mode 100644 index 00000000000..ff87ed7f93d --- /dev/null +++ b/doc/sphinxext/prs/5082.json @@ -0,0 +1,303 @@ +{ + "merge_commit_sha": "a2dd739e3d0a1cf82433d5cb2a075ebe4b1cd647", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/tutorials/report.rst": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 3, + "d": 2 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 2, + "d": 2 + }, + "examples/datasets/plot_megsim_data_single_trial.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_unsupervised_spatial_filter.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_rap_music.py": { + "a": 4, + "d": 2 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/io/plot_elekta_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_define_target_events.py": { + "a": 5, + "d": 9 + }, + "examples/preprocessing/plot_interpolate_bad_channels.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_metadata_query.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_movement_compensation.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 6, + "d": 3 + }, + "examples/preprocessing/plot_shift_evoked.py": { + "a": 3, + "d": 3 + }, + "examples/preprocessing/plot_virtual_evoked.py": { + "a": 4, + "d": 4 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 2, + "d": 1 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_linear_regression_raw.py": { + "a": 2, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 2, + "d": 1 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 3, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 7, + "d": 5 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 8, + "d": 2 + }, + "mne/evoked.py": { + "a": 34, + "d": 26 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/stats/parametric.py": { + "a": 5, + "d": 1 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 9 + }, + "mne/viz/evoked.py": { + "a": 65, + "d": 85 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 43, + "d": 35 + }, + "mne/viz/tests/test_topo.py": { + "a": 12, + "d": 6 + }, + "mne/viz/tests/test_topomap.py": { + "a": 16, + "d": 10 + }, + "mne/viz/topomap.py": { + "a": 44, + "d": 13 + }, + "mne/viz/utils.py": { + "a": 18, + "d": 0 + }, + "tutorials/plot_artifacts_correction_maxwell_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_artifacts_detection.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_background_filtering.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 5, + "d": 5 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_compute_covariance.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_dipole_fit.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_eeg_erp.py": { + "a": 12, + "d": 10 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_metadata_epochs.py": { + "a": 5, + "d": 3 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 4, + "d": 3 + }, + "tutorials/plot_object_epochs.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_object_evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_phantom_4DBTi.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 4, + "d": 1 + }, + "tutorials/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_cluster_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 9, + "d": 9 + }, + "tutorials/plot_whitened.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5083.json b/doc/sphinxext/prs/5083.json new file mode 100644 index 00000000000..a5e7741faa5 --- /dev/null +++ b/doc/sphinxext/prs/5083.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e92335a2320a98782966b89abf82d0af96ffe5b5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 32, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 44, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5084.json b/doc/sphinxext/prs/5084.json new file mode 100644 index 00000000000..0518bfc78ff --- /dev/null +++ b/doc/sphinxext/prs/5084.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "a11c4650afe01e71366ef44b89cffe4d7168f2bb", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 10, + "d": 12 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 9 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 2 + }, + "mne/utils.py": { + "a": 14, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 15 + }, + "mne/viz/ica.py": { + "a": 122, + "d": 101 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5086.json b/doc/sphinxext/prs/5086.json new file mode 100644 index 00000000000..03de6dd694e --- /dev/null +++ b/doc/sphinxext/prs/5086.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "342c0598381d4a8d6743c4ab6d924f2d1da8e634", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 56, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 53, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/509.json b/doc/sphinxext/prs/509.json new file mode 100644 index 00000000000..1d57c993031 --- /dev/null +++ b/doc/sphinxext/prs/509.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "bc9f87af581de15ffb321447250dc74ff560667e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/event.py": { + "a": 8, + "d": 3 + }, + "mne/fiff/kit/kit.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/kit/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/sns.txt": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/test.sqd": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/test_Ykgw.mat": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/test_bin.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/test_elp.txt": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/test_hsp.txt": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/test_mrk.sqd": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/test_kit.py": { + "a": 2, + "d": 2 + }, + "mne/forward.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 19, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5090.json b/doc/sphinxext/prs/5090.json new file mode 100644 index 00000000000..1b5840f2620 --- /dev/null +++ b/doc/sphinxext/prs/5090.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2318bf21f7011ef084c045f21e93b4ff752aed52", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "mne/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5091.json b/doc/sphinxext/prs/5091.json new file mode 100644 index 00000000000..d4019c87094 --- /dev/null +++ b/doc/sphinxext/prs/5091.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b97735d87a58df107192319f042e2875b99d8470", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/channels/data/layouts/KIT-AS-2008.lout": { + "a": 158, + "d": 0 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5092.json b/doc/sphinxext/prs/5092.json new file mode 100644 index 00000000000..56875684218 --- /dev/null +++ b/doc/sphinxext/prs/5092.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "09a8da01cc1019eaa6a10dcdd4f0357805ec019d", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 6, + "d": 4 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 4 + }, + "mne/beamformer/_lcmv.py": { + "a": 8, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 65, + "d": 61 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5093.json b/doc/sphinxext/prs/5093.json new file mode 100644 index 00000000000..f649cbc7a4d --- /dev/null +++ b/doc/sphinxext/prs/5093.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3b6302205bc13fd68f9be05f85b6a5f2b2f3990a", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "mne/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5094.json b/doc/sphinxext/prs/5094.json new file mode 100644 index 00000000000..0d443169b53 --- /dev/null +++ b/doc/sphinxext/prs/5094.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f771b51939e8266adcb3838e0331f1d8dcd9ef25", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5095.json b/doc/sphinxext/prs/5095.json new file mode 100644 index 00000000000..890f6154aba --- /dev/null +++ b/doc/sphinxext/prs/5095.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "613e1c6931edb6da151036595789dc9923988ac7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 7, + "d": 11 + }, + "mne/utils.py": { + "a": 10, + "d": 7 + }, + "mne/viz/tests/test_topomap.py": { + "a": 14, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5096.json b/doc/sphinxext/prs/5096.json new file mode 100644 index 00000000000..71e18316b86 --- /dev/null +++ b/doc/sphinxext/prs/5096.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a5fa0ceb47cf95ec4ee2619e02a91ae49896dc5d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 28, + "d": 21 + }, + "mne/stats/tests/test_permutations.py": { + "a": 14, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5097.json b/doc/sphinxext/prs/5097.json new file mode 100644 index 00000000000..f84910d5cfa --- /dev/null +++ b/doc/sphinxext/prs/5097.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "eae216d42d5975771059544b8d2487ee0c4ccc52", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 36, + "d": 38 + }, + "mne/viz/tests/test_raw.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5098.json b/doc/sphinxext/prs/5098.json new file mode 100644 index 00000000000..8a73cc98ce0 --- /dev/null +++ b/doc/sphinxext/prs/5098.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ed90f692d135535c094fa4cbc9e42eac9f2f7c78", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5099.json b/doc/sphinxext/prs/5099.json new file mode 100644 index 00000000000..e04b8616d37 --- /dev/null +++ b/doc/sphinxext/prs/5099.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "16920f11e5a643221bd9f9807195f66234ea06a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 4, + "d": 0 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/51.json b/doc/sphinxext/prs/51.json new file mode 100644 index 00000000000..79f28d34f4a --- /dev/null +++ b/doc/sphinxext/prs/51.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fea6bdc752b765bb447615b077b235a948b0f8e4", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/data/mne_analyze.sel": { + "a": 13, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 73, + "d": 0 + }, + "mne/fiff/tests/test_pick.py": { + "a": 15, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/510.json b/doc/sphinxext/prs/510.json new file mode 100644 index 00000000000..a9547f1888d --- /dev/null +++ b/doc/sphinxext/prs/510.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "758eb05a4bfa80e6fed13c017ea88fa67dc285ad", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5100.json b/doc/sphinxext/prs/5100.json new file mode 100644 index 00000000000..a1d5fa8c6a1 --- /dev/null +++ b/doc/sphinxext/prs/5100.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6abe9fc70168e10500a1e35ef0145ff13383a8fb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 27, + "d": 15 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5101.json b/doc/sphinxext/prs/5101.json new file mode 100644 index 00000000000..f14e61d8084 --- /dev/null +++ b/doc/sphinxext/prs/5101.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "edde36d378ff535e59088fcb13490045c90bb31c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5103.json b/doc/sphinxext/prs/5103.json new file mode 100644 index 00000000000..03d04ce31f6 --- /dev/null +++ b/doc/sphinxext/prs/5103.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7e3fd16ff7b3e10c2b370b7025286f89d09cb389", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5106.json b/doc/sphinxext/prs/5106.json new file mode 100644 index 00000000000..92bf239c618 --- /dev/null +++ b/doc/sphinxext/prs/5106.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4630594e54f2d7e5be5f57092f649daabe1a9ccd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 2, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5107.json b/doc/sphinxext/prs/5107.json new file mode 100644 index 00000000000..b0f5532b129 --- /dev/null +++ b/doc/sphinxext/prs/5107.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e138e9779dc5f003670effe30a62c3abd6a9e376", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "tutorials/plot_forward.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/511.json b/doc/sphinxext/prs/511.json new file mode 100644 index 00000000000..68dc470f560 --- /dev/null +++ b/doc/sphinxext/prs/511.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c9a955a2db7c612e2a3dd1f335e689b757282a9a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 37, + "d": 20 + }, + "mne/tests/test_source_estimate.py": { + "a": 35, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5113.json b/doc/sphinxext/prs/5113.json new file mode 100644 index 00000000000..88c872edebf --- /dev/null +++ b/doc/sphinxext/prs/5113.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "394b2d668999232f53d3da48ffc23cce00771edf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 4, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 0, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 3, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 5, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 4 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 5, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 0, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5114.json b/doc/sphinxext/prs/5114.json new file mode 100644 index 00000000000..91d98d887d8 --- /dev/null +++ b/doc/sphinxext/prs/5114.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1a09859c6e5a7749b25a6c78403c0f02b225f0f5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 65, + "d": 84 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5119.json b/doc/sphinxext/prs/5119.json new file mode 100644 index 00000000000..b850fde19c0 --- /dev/null +++ b/doc/sphinxext/prs/5119.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f5e3399ad6d0c163f3b86f002fd3c6fb687e6315", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 66, + "d": 23 + }, + "mne/viz/utils.py": { + "a": 38, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/512.json b/doc/sphinxext/prs/512.json new file mode 100644 index 00000000000..19890759ff4 --- /dev/null +++ b/doc/sphinxext/prs/512.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "44c11f764f42726e2df31e72509e681125056495", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 3, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 4, + "d": 1 + }, + "mne/forward.py": { + "a": 5, + "d": 0 + }, + "mne/label.py": { + "a": 106, + "d": 96 + }, + "mne/minimum_norm/inverse.py": { + "a": 14, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 14, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 10, + "d": 4 + }, + "mne/proj.py": { + "a": 4, + "d": 3 + }, + "mne/simulation/source.py": { + "a": 0, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 156, + "d": 71 + }, + "mne/source_space.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 24, + "d": 10 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 9, + "d": 10 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + }, + "mne/viz.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5120.json b/doc/sphinxext/prs/5120.json new file mode 100644 index 00000000000..fde1ab1478b --- /dev/null +++ b/doc/sphinxext/prs/5120.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "13d6aadcd1b9e3d1524daadc5ba3318ce2658c93", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 8, + "d": 8 + }, + "mne/decoding/ems.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 7, + "d": 7 + }, + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 8 + }, + "mne/preprocessing/xdawn.py": { + "a": 5, + "d": 5 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5121.json b/doc/sphinxext/prs/5121.json new file mode 100644 index 00000000000..8b105b08134 --- /dev/null +++ b/doc/sphinxext/prs/5121.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f68f4cfd351b48add9626af5993ab475a9c9af35", + "authors": [ + { + "n": "Jen Evans", + "e": null + } + ], + "changes": { + "mne/io/egi/events.py": { + "a": 5, + "d": 3 + }, + "mne/io/egi/general.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5125.json b/doc/sphinxext/prs/5125.json new file mode 100644 index 00000000000..8615bd2f398 --- /dev/null +++ b/doc/sphinxext/prs/5125.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "21d35c8963e9c616813fae8237ee6d8c6387c2dc", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ssp.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 57, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5126.json b/doc/sphinxext/prs/5126.json new file mode 100644 index 00000000000..01819fc1646 --- /dev/null +++ b/doc/sphinxext/prs/5126.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d91a41c1cc34af7337a2d00d3b036e7cc0666636", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 13, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5130.json b/doc/sphinxext/prs/5130.json new file mode 100644 index 00000000000..0e7b4fb1a8b --- /dev/null +++ b/doc/sphinxext/prs/5130.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "96d891774553a6a11fe4332937eab072510741e2", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5131.json b/doc/sphinxext/prs/5131.json new file mode 100644 index 00000000000..894d4264d76 --- /dev/null +++ b/doc/sphinxext/prs/5131.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "85fca344548cf6ee26cb4cf565188338569eb020", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils.py": { + "a": 33, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5132.json b/doc/sphinxext/prs/5132.json new file mode 100644 index 00000000000..a7bc8453576 --- /dev/null +++ b/doc/sphinxext/prs/5132.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "74ac2f37cd62acfe44c91a0633b15ccf47868150", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 159, + "d": 23 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 14, + "d": 51 + }, + "mne/tests/test_coreg.py": { + "a": 80, + "d": 6 + }, + "mne/tests/test_transforms.py": { + "a": 43, + "d": 10 + }, + "mne/transforms.py": { + "a": 44, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5133.json b/doc/sphinxext/prs/5133.json new file mode 100644 index 00000000000..063281c5435 --- /dev/null +++ b/doc/sphinxext/prs/5133.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "786945eab780a04f9ffb9ea44de6034c09ed7bb2", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 26 + }, + "mne/cov.py": { + "a": 8, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 8, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 12, + "d": 5 + }, + "mne/io/meas_info.py": { + "a": 53, + "d": 0 + }, + "mne/io/pick.py": { + "a": 22, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 36, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 18, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 43, + "d": 14 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 45, + "d": 1 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 35, + "d": 1 + }, + "mne/utils.py": { + "a": 9, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5134.json b/doc/sphinxext/prs/5134.json new file mode 100644 index 00000000000..2863c363b2d --- /dev/null +++ b/doc/sphinxext/prs/5134.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b659764133b00ee3d47eefa1214a5b14125a9ed1", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 241, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 3, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 6, + "d": 237 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5135.json b/doc/sphinxext/prs/5135.json new file mode 100644 index 00000000000..1f246272f07 --- /dev/null +++ b/doc/sphinxext/prs/5135.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "65e7ce0d49855f636e932697de80565dacc033eb", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 0, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 189, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 8, + "d": 99 + }, + "mne/beamformer/_lcmv.py": { + "a": 9, + "d": 125 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5137.json b/doc/sphinxext/prs/5137.json new file mode 100644 index 00000000000..045dd40211f --- /dev/null +++ b/doc/sphinxext/prs/5137.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1dc1502215a53385cda15c6c336fcc4341dc4d3b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 66, + "d": 81 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5139.json b/doc/sphinxext/prs/5139.json new file mode 100644 index 00000000000..2821dbdebb3 --- /dev/null +++ b/doc/sphinxext/prs/5139.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7212c5bfb10430fd8a80341d5e923cc1e497aee4", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 19, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/514.json b/doc/sphinxext/prs/514.json new file mode 100644 index 00000000000..00c3fa9c05c --- /dev/null +++ b/doc/sphinxext/prs/514.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "397eab931514dff656cb6de434a573480265173c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 9, + "d": 7 + }, + "mne/fiff/proj.py": { + "a": 5, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/forward.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 9, + "d": 4 + }, + "mne/utils.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5140.json b/doc/sphinxext/prs/5140.json new file mode 100644 index 00000000000..936e0015d00 --- /dev/null +++ b/doc/sphinxext/prs/5140.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "248fe7eeb45d596592b4dc3277a229afc7b55a3d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 59 + }, + "mne/viz/topomap.py": { + "a": 101, + "d": 78 + }, + "tutorials/plot_dipole_fit.py": { + "a": 3, + "d": 7 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5141.json b/doc/sphinxext/prs/5141.json new file mode 100644 index 00000000000..5db59ba2e77 --- /dev/null +++ b/doc/sphinxext/prs/5141.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "fea9566c5571f40781568c1f1a43f4c28757545f", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 47, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/externals/pymatreader/pymatreader.py": { + "a": 1, + "d": 177 + }, + "mne/externals/pymatreader/utils.py": { + "a": 230, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/fieldtrip/__init__.py": { + "a": 8, + "d": 0 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 174, + "d": 0 + }, + "mne/io/fieldtrip/tests/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 270, + "d": 0 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 225, + "d": 0 + }, + "mne/io/fieldtrip/utils.py": { + "a": 324, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5143.json b/doc/sphinxext/prs/5143.json new file mode 100644 index 00000000000..25ba50d7666 --- /dev/null +++ b/doc/sphinxext/prs/5143.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5ec3ac4a18838191047cbc2d67fabdd4f416767a", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/time_frequency/csd.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5145.json b/doc/sphinxext/prs/5145.json new file mode 100644 index 00000000000..c6f5396cb7a --- /dev/null +++ b/doc/sphinxext/prs/5145.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8c07f6002034041b695f80e6248586453dca44bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 17, + "d": 14 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 34, + "d": 20 + }, + "mne/preprocessing/xdawn.py": { + "a": 10, + "d": 12 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5146.json b/doc/sphinxext/prs/5146.json new file mode 100644 index 00000000000..c99f31b4e20 --- /dev/null +++ b/doc/sphinxext/prs/5146.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "12c4d7a77e272e186564927746d8fccbe21d525d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ssp.py": { + "a": 24, + "d": 7 + }, + "mne/proj.py": { + "a": 55, + "d": 9 + }, + "mne/rank.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 43, + "d": 11 + }, + "mne/tests/test_rank.py": { + "a": 15, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5147.json b/doc/sphinxext/prs/5147.json new file mode 100644 index 00000000000..a3880ecf0fa --- /dev/null +++ b/doc/sphinxext/prs/5147.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a7d3c208813732321aff4a4d7c853bf149653604", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_source_estimate.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5150.json b/doc/sphinxext/prs/5150.json new file mode 100644 index 00000000000..dfdff8810d2 --- /dev/null +++ b/doc/sphinxext/prs/5150.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b07993e017b33830c5f448e7e64a758aeb1853b0", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 7, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 14, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5153.json b/doc/sphinxext/prs/5153.json new file mode 100644 index 00000000000..c7da6a4a43d --- /dev/null +++ b/doc/sphinxext/prs/5153.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9865f9275f596443668a06e84e54f2e2c645bddb", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/peak_finder.py": { + "a": 13, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5155.json b/doc/sphinxext/prs/5155.json new file mode 100644 index 00000000000..a9d0e0b4865 --- /dev/null +++ b/doc/sphinxext/prs/5155.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "8341e066a6b11b3e9fd9d804f37b7e2cb6f81b3a", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 9, + "d": 4 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 42, + "d": 27 + }, + "mne/preprocessing/eog.py": { + "a": 20, + "d": 4 + }, + "mne/preprocessing/ssp.py": { + "a": 67, + "d": 108 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 20, + "d": 7 + }, + "mne/preprocessing/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5156.json b/doc/sphinxext/prs/5156.json new file mode 100644 index 00000000000..19c282e771b --- /dev/null +++ b/doc/sphinxext/prs/5156.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7472168a9ba8d7b54c5fecd0ce104b18d9147a74", + "authors": [ + { + "n": "Juergen Dammers", + "e": "j.dammers@fz-juelich.de" + } + ], + "changes": { + "mne/fixes.py": { + "a": 13, + "d": 0 + }, + "mne/preprocessing/ctps_.py": { + "a": 7, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5157.json b/doc/sphinxext/prs/5157.json new file mode 100644 index 00000000000..50a2df23ed9 --- /dev/null +++ b/doc/sphinxext/prs/5157.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "75a058902bd142b0f2f5c203fd7232f02ecabf60", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_viewer.py": { + "a": 10, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5160.json b/doc/sphinxext/prs/5160.json new file mode 100644 index 00000000000..3809b4420a2 --- /dev/null +++ b/doc/sphinxext/prs/5160.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c2ad41f27f1e92a46a4aa4794ec900b1a7b5b661", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 11, + "d": 4 + }, + "mne/io/edf/tests/data/test_bdf_stim_channel.bdf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5166.json b/doc/sphinxext/prs/5166.json new file mode 100644 index 00000000000..2820c118c82 --- /dev/null +++ b/doc/sphinxext/prs/5166.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "057026f9a38c63607401ca568790202cf57a08c8", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/annotations.py": { + "a": 2, + "d": 1 + }, + "mne/cov.py": { + "a": 4, + "d": 2 + }, + "mne/dipole.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/event.py": { + "a": 4, + "d": 2 + }, + "mne/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 2 + }, + "mne/proj.py": { + "a": 4, + "d": 2 + }, + "mne/source_space.py": { + "a": 6, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/517.json b/doc/sphinxext/prs/517.json new file mode 100644 index 00000000000..1600f1b05b8 --- /dev/null +++ b/doc/sphinxext/prs/517.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9f4c4ec215225d4baa15bf58843be3e8dee4f981", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 27, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5173.json b/doc/sphinxext/prs/5173.json new file mode 100644 index 00000000000..21ced61c5fe --- /dev/null +++ b/doc/sphinxext/prs/5173.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9b132399497f5a982691e2f41bb30de02e925475", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5174.json b/doc/sphinxext/prs/5174.json new file mode 100644 index 00000000000..bb480731448 --- /dev/null +++ b/doc/sphinxext/prs/5174.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "ca35ce9923ae9dc02fe55d7b321f71337903a81b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/documentation.rst": { + "a": 0, + "d": 1 + }, + "doc/install_mne_python.rst": { + "a": 31, + "d": 27 + }, + "doc/manual/time_frequency.rst": { + "a": 0, + "d": 4 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 5, + "d": 5 + }, + "tutorials/plot_dics.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5175.json b/doc/sphinxext/prs/5175.json new file mode 100644 index 00000000000..a97e03c4e3f --- /dev/null +++ b/doc/sphinxext/prs/5175.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e17d46935861447a5112085dd010c33a865f835b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/navbar.html": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5177.json b/doc/sphinxext/prs/5177.json new file mode 100644 index 00000000000..ecf9ce0bf19 --- /dev/null +++ b/doc/sphinxext/prs/5177.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "a3e412691c8cdd734514084e1b421311b662f809", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 0, + "d": 6 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 306 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 304 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 183 + }, + "mne/dipole.py": { + "a": 2, + "d": 3 + }, + "mne/evoked.py": { + "a": 7, + "d": 7 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 17 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 12, + "d": 40 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 17 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 18, + "d": 26 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/ssp.py": { + "a": 4, + "d": 19 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 0, + "d": 13 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 9 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 151 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 19, + "d": 343 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 12, + "d": 17 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 3 + }, + "mne/viz/tests/test_topomap.py": { + "a": 0, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 8, + "d": 23 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5178.json b/doc/sphinxext/prs/5178.json new file mode 100644 index 00000000000..26faebe4a8c --- /dev/null +++ b/doc/sphinxext/prs/5178.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d7ec5c8e0cb5957fe6249d72f9e0437c4777b9da", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 9, + "d": 4 + }, + "mne/simulation/raw.py": { + "a": 62, + "d": 25 + }, + "mne/simulation/tests/test_raw.py": { + "a": 49, + "d": 37 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5179.json b/doc/sphinxext/prs/5179.json new file mode 100644 index 00000000000..b9192b18d1c --- /dev/null +++ b/doc/sphinxext/prs/5179.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "eefc3fdfecf238249e1b0d8544c6fa5c53090c03", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5183.json b/doc/sphinxext/prs/5183.json new file mode 100644 index 00000000000..a4e2e18ebbf --- /dev/null +++ b/doc/sphinxext/prs/5183.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bd35715c21098a15b8bf3450a5ea2d6091e3477c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 9, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5184.json b/doc/sphinxext/prs/5184.json new file mode 100644 index 00000000000..76cc0d139c8 --- /dev/null +++ b/doc/sphinxext/prs/5184.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a9a8027690d79f3995e8da884fefd01c4c1eb07d", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/time_frequency/csd.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5186.json b/doc/sphinxext/prs/5186.json new file mode 100644 index 00000000000..14faa68589b --- /dev/null +++ b/doc/sphinxext/prs/5186.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f21b843da8fc85de53ce188ab6c7cec7be5795d8", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 2, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 9 + }, + "mne/viz/misc.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_utils.py": { + "a": 9, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 47, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5188.json b/doc/sphinxext/prs/5188.json new file mode 100644 index 00000000000..49311db0491 --- /dev/null +++ b/doc/sphinxext/prs/5188.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9fefdf4ade454c0c0a40a4d8e4677c37fbef56e6", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/advanced_setup.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5189.json b/doc/sphinxext/prs/5189.json new file mode 100644 index 00000000000..bc2d92351f5 --- /dev/null +++ b/doc/sphinxext/prs/5189.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "563867c2fb3560f06fae786e43b5e2dd8fb3cf60", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5193.json b/doc/sphinxext/prs/5193.json new file mode 100644 index 00000000000..e2c1bf205c3 --- /dev/null +++ b/doc/sphinxext/prs/5193.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "5cafa0f4287ae02fb2beafa557c0f70db8b7a1ed", + "authors": [ + { + "n": "Henrich Kolkhorst", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 8, + "d": 0 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/ftclient_rt_compute_psd.py": { + "a": 5, + "d": 0 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/rt_feedback_client.py": { + "a": 9, + "d": 52 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 10, + "d": 6 + }, + "mne/epochs.py": { + "a": 51, + "d": 7 + }, + "mne/realtime/epochs.py": { + "a": 159, + "d": 18 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 11, + "d": 7 + }, + "mne/realtime/mockclient.py": { + "a": 6, + "d": 2 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 116, + "d": 33 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 226, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5194.json b/doc/sphinxext/prs/5194.json new file mode 100644 index 00000000000..525cd3e6fbe --- /dev/null +++ b/doc/sphinxext/prs/5194.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a277bdbe145a461d864c36555b2fff286619b009", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 29, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5196.json b/doc/sphinxext/prs/5196.json new file mode 100644 index 00000000000..c7da71ac2c4 --- /dev/null +++ b/doc/sphinxext/prs/5196.json @@ -0,0 +1,291 @@ +{ + "merge_commit_sha": "04c6256dfa87fb6d58689d383f29b9de731e378c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 0, + "d": 1 + }, + "MANIFEST.in": { + "a": 1, + "d": 1 + }, + "Makefile": { + "a": 7, + "d": 4 + }, + "dictionary.txt": { + "a": 0, + "d": 4499 + }, + "doc/install_mne_c.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/appendix/c_release_notes.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/c_reference.rst": { + "a": 12, + "d": 12 + }, + "doc/manual/cookbook.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/gui/analyze.rst": { + "a": 3, + "d": 3 + }, + "doc/manual/gui/browse.rst": { + "a": 5, + "d": 5 + }, + "doc/manual/preprocessing/maxwell.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/time_frequency.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 11, + "d": 9 + }, + "examples/io/README.txt": { + "a": 2, + "d": 2 + }, + "ignore_words.txt": { + "a": 11, + "d": 0 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_viewer.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 3, + "d": 3 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/data/test_empty_room.cov": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 7 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/parallel.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 6 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_maxwell_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_background_statistics.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_configuration.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_eeg_erp.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5197.json b/doc/sphinxext/prs/5197.json new file mode 100644 index 00000000000..7705fcf1c5c --- /dev/null +++ b/doc/sphinxext/prs/5197.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "71cea4bf9a2ced60503272c1570b6993aa38df35", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/_eloreta.py": { + "a": 4, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5198.json b/doc/sphinxext/prs/5198.json new file mode 100644 index 00000000000..e07b7ea8f73 --- /dev/null +++ b/doc/sphinxext/prs/5198.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "c38073288f2c32fea37c5d7c3a1bb5bcabc175f9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 9, + "d": 11 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 11 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 4, + "d": 5 + }, + "mne/decoding/search_light.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 2, + "d": 3 + }, + "mne/epochs.py": { + "a": 2, + "d": 5 + }, + "mne/evoked.py": { + "a": 3, + "d": 4 + }, + "mne/filter.py": { + "a": 2, + "d": 3 + }, + "mne/io/base.py": { + "a": 1, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/reference.py": { + "a": 3, + "d": 7 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 5 + }, + "mne/utils.py": { + "a": 72, + "d": 61 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5199.json b/doc/sphinxext/prs/5199.json new file mode 100644 index 00000000000..e57497603d7 --- /dev/null +++ b/doc/sphinxext/prs/5199.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "37003533a68353b46b8900789dc99989350f0162", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/52.json b/doc/sphinxext/prs/52.json new file mode 100644 index 00000000000..7a2d415d3a0 --- /dev/null +++ b/doc/sphinxext/prs/52.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "23de0270b725b1eea298798d9ac927f807831f11", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 14, + "d": 75 + }, + "mne/fiff/tests/test_pick.py": { + "a": 1, + "d": 14 + }, + "mne/selection.py": { + "a": 99, + "d": 0 + }, + "mne/tests/test_selection.py": { + "a": 29, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/520.json b/doc/sphinxext/prs/520.json new file mode 100644 index 00000000000..59aee479773 --- /dev/null +++ b/doc/sphinxext/prs/520.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a4adae4fcf8862689afaef2b0e5c9a80bbcfa3ea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/megsim/megsim.py": { + "a": 11, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5203.json b/doc/sphinxext/prs/5203.json new file mode 100644 index 00000000000..aee26c7b1ae --- /dev/null +++ b/doc/sphinxext/prs/5203.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dc8fab44153c7e01cfc339dc65e17de538d391ec", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_stats_cluster_erp.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5209.json b/doc/sphinxext/prs/5209.json new file mode 100644 index 00000000000..222034a2480 --- /dev/null +++ b/doc/sphinxext/prs/5209.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "60070e37959ad01125fa21f14f7479c07bf45dd5", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 7, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/521.json b/doc/sphinxext/prs/521.json new file mode 100644 index 00000000000..eb165f58750 --- /dev/null +++ b/doc/sphinxext/prs/521.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e4bbd64e756fb94270746db6041220b7f6faa252", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 10, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5211.json b/doc/sphinxext/prs/5211.json new file mode 100644 index 00000000000..15ff79c939e --- /dev/null +++ b/doc/sphinxext/prs/5211.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d2802775539f5200bda637a6419794bf9d107990", + "authors": [ + { + "n": "Sara Sommariva", + "e": "sommariva@dima.unige.it" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 16, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5213.json b/doc/sphinxext/prs/5213.json new file mode 100644 index 00000000000..d7866936e35 --- /dev/null +++ b/doc/sphinxext/prs/5213.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ad438f61f0ba4c455acfeffa6068b7c4a7858aeb", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "mne/externals/pymatreader/__init__.py": { + "a": 28, + "d": 0 + }, + "mne/externals/pymatreader/pymatreader.py": { + "a": 202, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5215.json b/doc/sphinxext/prs/5215.json new file mode 100644 index 00000000000..714abc44787 --- /dev/null +++ b/doc/sphinxext/prs/5215.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9e266dd7001a05ef86aa56ecc39ede205ee12cb2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 6 + }, + "mne/tests/test_proj.py": { + "a": 34, + "d": 27 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5216.json b/doc/sphinxext/prs/5216.json new file mode 100644 index 00000000000..71ab2fcb897 --- /dev/null +++ b/doc/sphinxext/prs/5216.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "424e768ea43fac9e4f9adf920ee30d0d5adaf949", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "ignore_words.txt": { + "a": 2, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 1 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_dipole_fit.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5219.json b/doc/sphinxext/prs/5219.json new file mode 100644 index 00000000000..766b6d051d6 --- /dev/null +++ b/doc/sphinxext/prs/5219.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "197a8418672a8e69c65a1940e45da4966decf45f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 5, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 64, + "d": 25 + }, + "mne/parallel.py": { + "a": 66, + "d": 69 + }, + "mne/stats/cluster_level.py": { + "a": 13, + "d": 22 + }, + "mne/tests/test_utils.py": { + "a": 70, + "d": 1 + }, + "mne/utils.py": { + "a": 88, + "d": 8 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 5, + "d": 4 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/522.json b/doc/sphinxext/prs/522.json new file mode 100644 index 00000000000..c03d6e78d0e --- /dev/null +++ b/doc/sphinxext/prs/522.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e014f859ebd7db68d0a41158711c9e06fb87c867", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/plot_megsim_data_single_trial.py": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5220.json b/doc/sphinxext/prs/5220.json new file mode 100644 index 00000000000..21efdeadf67 --- /dev/null +++ b/doc/sphinxext/prs/5220.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "430d0d4b0603e785308f827f39a53be1dcf55e48", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 55, + "d": 6 + }, + "mne/tests/test_source_estimate.py": { + "a": 20, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5225.json b/doc/sphinxext/prs/5225.json new file mode 100644 index 00000000000..648c556c410 --- /dev/null +++ b/doc/sphinxext/prs/5225.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "8efbe5c3034ca6fc2c96b033ab63e42081788b38", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/forward/_field_interpolation.py": { + "a": 4, + "d": 1 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 16, + "d": 1 + }, + "mne/stats/tests/test_parametric.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 7, + "d": 8 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 12, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 14, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 11, + "d": 0 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5227.json b/doc/sphinxext/prs/5227.json new file mode 100644 index 00000000000..5d4aba807d7 --- /dev/null +++ b/doc/sphinxext/prs/5227.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9ceb57d6d1cea1f7944265940d5f4005d6cf7ef7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5228.json b/doc/sphinxext/prs/5228.json new file mode 100644 index 00000000000..9fefc8071f5 --- /dev/null +++ b/doc/sphinxext/prs/5228.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "45898628a416691a89a8d48ca38860298cfb58fc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/data/montages/mgh70.elc": { + "a": 5, + "d": 5 + }, + "mne/channels/tests/test_montage.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5230.json b/doc/sphinxext/prs/5230.json new file mode 100644 index 00000000000..661ed334d54 --- /dev/null +++ b/doc/sphinxext/prs/5230.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "05a09f7e563bb8ef96aef9b64e206af216d8d909", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 6, + "d": 5 + }, + "doc/_static/reset-syntax.css": { + "a": 12, + "d": 0 + }, + "doc/_static/style.css": { + "a": 4, + "d": 64 + }, + "doc/_templates/layout.html": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 12, + "d": 11 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 4, + "d": 5 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 3 + }, + "tutorials/plot_configuration.py": { + "a": 1, + "d": 0 + }, + "tutorials/plot_dics.py": { + "a": 0, + "d": 3 + }, + "tutorials/plot_introduction.py": { + "a": 7, + "d": 7 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5231.json b/doc/sphinxext/prs/5231.json new file mode 100644 index 00000000000..7aa0fdbcba9 --- /dev/null +++ b/doc/sphinxext/prs/5231.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "a6ac948a4331d4560ad60dc4ae18eca3704bd576", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 0, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 8, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 3, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 85, + "d": 21 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 44, + "d": 30 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5232.json b/doc/sphinxext/prs/5232.json new file mode 100644 index 00000000000..ceb573bd1b7 --- /dev/null +++ b/doc/sphinxext/prs/5232.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bbda6a575b488cf92d30402a5d766afae38f80cb", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/filter.py": { + "a": 27, + "d": 15 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5233.json b/doc/sphinxext/prs/5233.json new file mode 100644 index 00000000000..7e048f634d5 --- /dev/null +++ b/doc/sphinxext/prs/5233.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "005f4ab7fc0fc8b145172b72831b2262622b4075", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/filter.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5239.json b/doc/sphinxext/prs/5239.json new file mode 100644 index 00000000000..d399797212f --- /dev/null +++ b/doc/sphinxext/prs/5239.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "62828d56ff129a271433cf673e417eec596ffabc", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "tutorials/plot_background_filtering.py": { + "a": 102, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/524.json b/doc/sphinxext/prs/524.json new file mode 100644 index 00000000000..e398c139ee8 --- /dev/null +++ b/doc/sphinxext/prs/524.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0c5263ae172a61998a41e2b26f7188c49c347905", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 55, + "d": 35 + }, + "mne/tests/test_source_space.py": { + "a": 10, + "d": 15 + }, + "mne/utils.py": { + "a": 46, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5241.json b/doc/sphinxext/prs/5241.json new file mode 100644 index 00000000000..cd72b1b6aa8 --- /dev/null +++ b/doc/sphinxext/prs/5241.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a62497855cfd2703e5043ff8def72f4a9d7b97f3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5242.json b/doc/sphinxext/prs/5242.json new file mode 100644 index 00000000000..9921b23ebc6 --- /dev/null +++ b/doc/sphinxext/prs/5242.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d8182e75d1be580e25556bb5e405b0657fc016fb", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5243.json b/doc/sphinxext/prs/5243.json new file mode 100644 index 00000000000..bd8fa434260 --- /dev/null +++ b/doc/sphinxext/prs/5243.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "eaad1b8e22a1ffeddcafaaf5473e0a181b9abcc9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 5, + "d": 8 + }, + "mne/tests/test_filter.py": { + "a": 53, + "d": 50 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5245.json b/doc/sphinxext/prs/5245.json new file mode 100644 index 00000000000..e874f3c2aa2 --- /dev/null +++ b/doc/sphinxext/prs/5245.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "819d8933eeabddf5cc40632467e5518605d1e4a7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5246.json b/doc/sphinxext/prs/5246.json new file mode 100644 index 00000000000..dfeca03c8f6 --- /dev/null +++ b/doc/sphinxext/prs/5246.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "55689e29123dc55b6ed2259b0ece375fa7db20f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_vector_mne_solution.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 10, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5247.json b/doc/sphinxext/prs/5247.json new file mode 100644 index 00000000000..6c9b39d72c8 --- /dev/null +++ b/doc/sphinxext/prs/5247.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "76c3662d7d036505522bc00c0a7de80d28aa75fa", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5250.json b/doc/sphinxext/prs/5250.json new file mode 100644 index 00000000000..82beabd4d9e --- /dev/null +++ b/doc/sphinxext/prs/5250.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "14ab6b69b4738007cafca005300df57139089857", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 2, + "d": 1 + }, + "mne/tests/test_import_nesting.py": { + "a": 13, + "d": 11 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5251.json b/doc/sphinxext/prs/5251.json new file mode 100644 index 00000000000..edee23d95ac --- /dev/null +++ b/doc/sphinxext/prs/5251.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "039cb1bf52770019bd48ac028795af0861792fa2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 34, + "d": 15 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "environment2.yml": { + "a": 0, + "d": 44 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 3, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 17, + "d": 18 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 4, + "d": 5 + }, + "mne/io/ctf/res4.py": { + "a": 40, + "d": 51 + }, + "mne/label.py": { + "a": 5, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 9, + "d": 6 + }, + "mne/tests/test_cov.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_dipole.py": { + "a": 8, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 1, + "d": 8 + }, + "mne/utils.py": { + "a": 4, + "d": 47 + }, + "requirements.txt": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5252.json b/doc/sphinxext/prs/5252.json new file mode 100644 index 00000000000..f6f636a8980 --- /dev/null +++ b/doc/sphinxext/prs/5252.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b784a40919ba957f48009447df9cfb5327ac84e4", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 6, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5254.json b/doc/sphinxext/prs/5254.json new file mode 100644 index 00000000000..cd93c6df018 --- /dev/null +++ b/doc/sphinxext/prs/5254.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "34676a287b6af246df6b59fc9cce26ba7309fa54", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 4, + "d": 8 + }, + "mne/channels/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 59, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 29, + "d": 2 + }, + "tutorials/plot_stats_cluster_erp.py": { + "a": 6, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5255.json b/doc/sphinxext/prs/5255.json new file mode 100644 index 00000000000..2087e29a762 --- /dev/null +++ b/doc/sphinxext/prs/5255.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "1aea53a5e7b67ed82fa78c3fcf80d954bdf99a23", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 6, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 35, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 9, + "d": 7 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 5, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 4, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5256.json b/doc/sphinxext/prs/5256.json new file mode 100644 index 00000000000..503d671a86b --- /dev/null +++ b/doc/sphinxext/prs/5256.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ab4571a83c4e5d5de0dcb2e9346f809f113dd619", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/install_mne_python.rst": { + "a": 38, + "d": 51 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "environment2.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5257.json b/doc/sphinxext/prs/5257.json new file mode 100644 index 00000000000..8927b5ffb70 --- /dev/null +++ b/doc/sphinxext/prs/5257.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a10845b59dcf537818e0192fbce3cef8f858dba3", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 39, + "d": 8 + }, + "mne/viz/tests/test_evoked.py": { + "a": 8, + "d": 0 + }, + "tutorials/plot_stats_cluster_erp.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5259.json b/doc/sphinxext/prs/5259.json new file mode 100644 index 00000000000..ae380c578bf --- /dev/null +++ b/doc/sphinxext/prs/5259.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6614a5be4c573bf06d2a2b19bd77d3b9adb5a84d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 4 + }, + "mne/bem.py": { + "a": 4, + "d": 5 + }, + "mne/event.py": { + "a": 10, + "d": 16 + }, + "mne/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 10, + "d": 22 + }, + "mne/tests/test_event.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/526.json b/doc/sphinxext/prs/526.json new file mode 100644 index 00000000000..043c764b469 --- /dev/null +++ b/doc/sphinxext/prs/526.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e74cdf0e9c04da7a14e87750c9697fb075bbcd21", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 54, + "d": 21 + }, + "mne/tests/test_source_space.py": { + "a": 20, + "d": 3 + }, + "mne/utils.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5261.json b/doc/sphinxext/prs/5261.json new file mode 100644 index 00000000000..19af8fe9140 --- /dev/null +++ b/doc/sphinxext/prs/5261.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "68084ccfd8114d496bd64d28bb4f10bf792239c9", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 74, + "d": 5 + }, + "mne/viz/tests/test_evoked.py": { + "a": 9, + "d": 0 + }, + "tutorials/plot_stats_cluster_erp.py": { + "a": 14, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5262.json b/doc/sphinxext/prs/5262.json new file mode 100644 index 00000000000..88a57235e05 --- /dev/null +++ b/doc/sphinxext/prs/5262.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "106f567b2b3a16411e585373cb064d90d7928945", + "authors": [ + { + "n": "Manu Sutela", + "e": null + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5263.json b/doc/sphinxext/prs/5263.json new file mode 100644 index 00000000000..3751d7a8692 --- /dev/null +++ b/doc/sphinxext/prs/5263.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9af6fa422d7c5240540e4a5c020afd65f8d8f3d1", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 27, + "d": 6 + }, + "mne/viz/tests/test_topo.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5268.json b/doc/sphinxext/prs/5268.json new file mode 100644 index 00000000000..acb8a697ea5 --- /dev/null +++ b/doc/sphinxext/prs/5268.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "da1d32978b6033dd89609f12af291ed619264c7d", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 2, + "d": 5 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 2, + "d": 5 + }, + "mne/utils.py": { + "a": 4, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 20, + "d": 29 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5270.json b/doc/sphinxext/prs/5270.json new file mode 100644 index 00000000000..cdca6da45a8 --- /dev/null +++ b/doc/sphinxext/prs/5270.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5fb47e0e75e8923a67c47715a3d030f74bf8a369", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 18, + "d": 12 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 73, + "d": 4 + }, + "mne/utils.py": { + "a": 29, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5271.json b/doc/sphinxext/prs/5271.json new file mode 100644 index 00000000000..c841ecb1fd2 --- /dev/null +++ b/doc/sphinxext/prs/5271.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e1b441460b3eb426dea9a90e1d250da18a169f12", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 12, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5272.json b/doc/sphinxext/prs/5272.json new file mode 100644 index 00000000000..704824ca514 --- /dev/null +++ b/doc/sphinxext/prs/5272.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3022961cef39b79b750bd0c64660324cb2d08a73", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 17, + "d": 16 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 0 + }, + "mne/data/helmets/ARTEMIS123.fif.gz": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5274.json b/doc/sphinxext/prs/5274.json new file mode 100644 index 00000000000..e685e91530a --- /dev/null +++ b/doc/sphinxext/prs/5274.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5c30a008cec4fe694b05294e4011460c41b206be", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 17, + "d": 4 + }, + "mne/io/utils.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5278.json b/doc/sphinxext/prs/5278.json new file mode 100644 index 00000000000..8b2fa23d06b --- /dev/null +++ b/doc/sphinxext/prs/5278.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "23054e2567fbfb3c8679523d364dbe7191dfbe1e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/528.json b/doc/sphinxext/prs/528.json new file mode 100644 index 00000000000..7d91348cd1a --- /dev/null +++ b/doc/sphinxext/prs/528.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "85b0cf97f340a0ec2d98bfa03595c75d0cb46b9b", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 39, + "d": 6 + }, + "mne/tests/test_source_estimate.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5285.json b/doc/sphinxext/prs/5285.json new file mode 100644 index 00000000000..08864e7a8e1 --- /dev/null +++ b/doc/sphinxext/prs/5285.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "76294a7b2e370fb207081e3887b1d7b246238eed", + "authors": [ + { + "n": "Steven Bethard", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 56, + "d": 54 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/529.json b/doc/sphinxext/prs/529.json new file mode 100644 index 00000000000..45ef6712b09 --- /dev/null +++ b/doc/sphinxext/prs/529.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d4b634b283083ceff8a260da967bfcb41d05944f", + "authors": [ + { + "n": "Mainak", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "bin/mne_compute_proj_eog.py": { + "a": 59, + "d": 53 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/eog.py": { + "a": 34, + "d": 14 + }, + "mne/preprocessing/ssp.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5291.json b/doc/sphinxext/prs/5291.json new file mode 100644 index 00000000000..53a56884e45 --- /dev/null +++ b/doc/sphinxext/prs/5291.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ef08c9ef02e198b443e40364f502b192ee14f70e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 6 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 2, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 13, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5295.json b/doc/sphinxext/prs/5295.json new file mode 100644 index 00000000000..79eb0a09d49 --- /dev/null +++ b/doc/sphinxext/prs/5295.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e4c995b4517089f7f105a9afb997f1b05a4e3653", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5298.json b/doc/sphinxext/prs/5298.json new file mode 100644 index 00000000000..a8efef5dc3f --- /dev/null +++ b/doc/sphinxext/prs/5298.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a44caaa3a5b89318cadd11c7f0a27301f727cf24", + "authors": [ + { + "n": "Tristan Stenner", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/brainvision/tests/data/test_nV.vhdr": { + "a": 151, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/53.json b/doc/sphinxext/prs/53.json new file mode 100644 index 00000000000..50757ef6477 --- /dev/null +++ b/doc/sphinxext/prs/53.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8b8de0c0285175e18ea680899a581381b70bf615", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 73, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 114, + "d": 0 + }, + "mne/beamformer/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 66, + "d": 0 + }, + "mne/cov.py": { + "a": 44, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5301.json b/doc/sphinxext/prs/5301.json new file mode 100644 index 00000000000..dfe27e55bb0 --- /dev/null +++ b/doc/sphinxext/prs/5301.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8b5e48aae28efb6d0f09a57244dcced7c041396d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "CODE_OF_CONDUCT.md": { + "a": 46, + "d": 0 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5304.json b/doc/sphinxext/prs/5304.json new file mode 100644 index 00000000000..ed12032124c --- /dev/null +++ b/doc/sphinxext/prs/5304.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "96bd8661599558024cdcd9edcc5eb0e12919722b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "README.rst": { + "a": 19, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5306.json b/doc/sphinxext/prs/5306.json new file mode 100644 index 00000000000..5d44fb71e84 --- /dev/null +++ b/doc/sphinxext/prs/5306.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8812f9309ce2e9b345461684c7a8c190db111cc2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_source_space.py": { + "a": 60, + "d": 59 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5308.json b/doc/sphinxext/prs/5308.json new file mode 100644 index 00000000000..290fe43c472 --- /dev/null +++ b/doc/sphinxext/prs/5308.json @@ -0,0 +1,515 @@ +{ + "merge_commit_sha": "1442cc4b8cd6e30595bf34dae2972093a7ac584c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 5, + "d": 5 + }, + "Makefile": { + "a": 2, + "d": 3 + }, + "environment.yml": { + "a": 0, + "d": 1 + }, + "environment2.yml": { + "a": 0, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 44, + "d": 46 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 14, + "d": 16 + }, + "mne/channels/tests/test_channels.py": { + "a": 64, + "d": 65 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 15, + "d": 16 + }, + "mne/channels/tests/test_layout.py": { + "a": 38, + "d": 38 + }, + "mne/channels/tests/test_montage.py": { + "a": 30, + "d": 30 + }, + "mne/commands/tests/test_commands.py": { + "a": 15, + "d": 15 + }, + "mne/connectivity/tests/test_effective.py": { + "a": 5, + "d": 6 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 36, + "d": 39 + }, + "mne/connectivity/tests/test_utils.py": { + "a": 6, + "d": 7 + }, + "mne/datasets/sample/sample.py": { + "a": 8, + "d": 4 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 8, + "d": 4 + }, + "mne/datasets/testing/_testing.py": { + "a": 8, + "d": 4 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 8, + "d": 9 + }, + "mne/decoding/tests/test_base.py": { + "a": 14, + "d": 13 + }, + "mne/decoding/tests/test_csp.py": { + "a": 37, + "d": 38 + }, + "mne/decoding/tests/test_ems.py": { + "a": 7, + "d": 7 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 25, + "d": 26 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 27, + "d": 25 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 4, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 22, + "d": 22 + }, + "mne/fixes.py": { + "a": 0, + "d": 78 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 21, + "d": 21 + }, + "mne/forward/tests/test_forward.py": { + "a": 48, + "d": 54 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 17, + "d": 19 + }, + "mne/gui/_marker_gui.py": { + "a": 2, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 26, + "d": 26 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 11, + "d": 11 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/io/array/tests/test_array.py": { + "a": 7, + "d": 7 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 3, + "d": 4 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 14, + "d": 14 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 19, + "d": 19 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 5, + "d": 6 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 16, + "d": 18 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 2, + "d": 6 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 14, + "d": 12 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 23, + "d": 22 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 7, + "d": 7 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 64, + "d": 67 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 5, + "d": 5 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 13, + "d": 15 + }, + "mne/io/tests/test_apply_function.py": { + "a": 11, + "d": 10 + }, + "mne/io/tests/test_compensator.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_pick.py": { + "a": 15, + "d": 15 + }, + "mne/io/tests/test_proc_history.py": { + "a": 20, + "d": 19 + }, + "mne/io/tests/test_raw.py": { + "a": 11, + "d": 11 + }, + "mne/io/tests/test_reference.py": { + "a": 44, + "d": 44 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 46, + "d": 49 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 6, + "d": 9 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 19, + "d": 20 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 3, + "d": 4 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 83, + "d": 84 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 3, + "d": 7 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 48, + "d": 49 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 10, + "d": 11 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 8, + "d": 7 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 3, + "d": 3 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 2, + "d": 4 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 8, + "d": 10 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 4, + "d": 5 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 8, + "d": 7 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/tests/test_source.py": { + "a": 23, + "d": 20 + }, + "mne/source_space.py": { + "a": 15, + "d": 16 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 33, + "d": 32 + }, + "mne/stats/tests/test_multi_comp.py": { + "a": 14, + "d": 15 + }, + "mne/stats/tests/test_parametric.py": { + "a": 22, + "d": 20 + }, + "mne/stats/tests/test_permutations.py": { + "a": 1, + "d": 0 + }, + "mne/stats/tests/test_regression.py": { + "a": 16, + "d": 17 + }, + "mne/tests/common.py": { + "a": 17, + "d": 19 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 38, + "d": 39 + }, + "mne/tests/test_chpi.py": { + "a": 29, + "d": 32 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 44, + "d": 50 + }, + "mne/tests/test_defaults.py": { + "a": 7, + "d": 9 + }, + "mne/tests/test_dipole.py": { + "a": 28, + "d": 28 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 7, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 212, + "d": 214 + }, + "mne/tests/test_event.py": { + "a": 35, + "d": 35 + }, + "mne/tests/test_evoked.py": { + "a": 68, + "d": 70 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_fixes.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 69, + "d": 68 + }, + "mne/tests/test_line_endings.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_misc.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 32, + "d": 32 + }, + "mne/tests/test_selection.py": { + "a": 9, + "d": 8 + }, + "mne/tests/test_source_estimate.py": { + "a": 81, + "d": 84 + }, + "mne/tests/test_surface.py": { + "a": 9, + "d": 9 + }, + "mne/tests/test_transforms.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_utils.py": { + "a": 141, + "d": 134 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 23, + "d": 25 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 13, + "d": 12 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 65, + "d": 65 + }, + "mne/viz/tests/test_3d.py": { + "a": 32, + "d": 34 + }, + "mne/viz/tests/test_circle.py": { + "a": 4, + "d": 5 + }, + "mne/viz/tests/test_epochs.py": { + "a": 11, + "d": 12 + }, + "mne/viz/tests/test_evoked.py": { + "a": 22, + "d": 22 + }, + "mne/viz/tests/test_ica.py": { + "a": 20, + "d": 20 + }, + "mne/viz/tests/test_misc.py": { + "a": 7, + "d": 8 + }, + "mne/viz/tests/test_montage.py": { + "a": 2, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 17, + "d": 16 + }, + "mne/viz/tests/test_topo.py": { + "a": 16, + "d": 17 + }, + "mne/viz/tests/test_topomap.py": { + "a": 42, + "d": 43 + }, + "mne/viz/tests/test_utils.py": { + "a": 27, + "d": 25 + }, + "setup.cfg": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5312.json b/doc/sphinxext/prs/5312.json new file mode 100644 index 00000000000..89e0dfd1cb3 --- /dev/null +++ b/doc/sphinxext/prs/5312.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "de86b1d8f2c9cae6b5f98567dd1516457b0ed648", + "authors": [ + { + "n": null, + "e": "erkkahe@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 23, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 8, + "d": 6 + }, + "tutorials/plot_sensors_time_frequency.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5313.json b/doc/sphinxext/prs/5313.json new file mode 100644 index 00000000000..83b8aba03e2 --- /dev/null +++ b/doc/sphinxext/prs/5313.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d6f2ee670ea89d9d0d43c5d4d3aca58ee0881154", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 6 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 42, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5314.json b/doc/sphinxext/prs/5314.json new file mode 100644 index 00000000000..9390df1c00c --- /dev/null +++ b/doc/sphinxext/prs/5314.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f49022a6cf0a3d9617d7050838f09bff3e2757fd", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/data/layouts/KIT-160.lay": { + "a": 162, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 4, + "d": 1 + }, + "mne/io/kit/constants.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5316.json b/doc/sphinxext/prs/5316.json new file mode 100644 index 00000000000..a84c4100ed1 --- /dev/null +++ b/doc/sphinxext/prs/5316.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1f70786a6bcef53568585d4984acbb9eab5034a8", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/tutorial_links.inc": { + "a": 4, + "d": 0 + }, + "tutorials/plot_background_statistics.py": { + "a": 128, + "d": 121 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5317.json b/doc/sphinxext/prs/5317.json new file mode 100644 index 00000000000..be446856d9f --- /dev/null +++ b/doc/sphinxext/prs/5317.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e60cc0ab0be847a47f61f6f1f31a9ff7e961e611", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 10, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 30, + "d": 14 + }, + "mne/beamformer/_lcmv.py": { + "a": 32, + "d": 23 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 27, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 17, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5319.json b/doc/sphinxext/prs/5319.json new file mode 100644 index 00000000000..7062a75810a --- /dev/null +++ b/doc/sphinxext/prs/5319.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9d47022eaa39d5ade0654dc9bc3f0b3eb3485a7c", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5321.json b/doc/sphinxext/prs/5321.json new file mode 100644 index 00000000000..ff1b01af148 --- /dev/null +++ b/doc/sphinxext/prs/5321.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f2fcc5aec08893f2b29502992d8a283b83027b15", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 8, + "d": 4 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5322.json b/doc/sphinxext/prs/5322.json new file mode 100644 index 00000000000..82dabe72936 --- /dev/null +++ b/doc/sphinxext/prs/5322.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1264cc711bbce07276c3fc2b79d6b64514e53d28", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5326.json b/doc/sphinxext/prs/5326.json new file mode 100644 index 00000000000..e6391d5f112 --- /dev/null +++ b/doc/sphinxext/prs/5326.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "16feb1ae3ce490d6accbc078c83c0f737fe9ca9e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/known_projects.inc": { + "a": 3, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/annotations.py": { + "a": 3, + "d": 1 + }, + "mne/externals/pymatreader/pymatreader.py": { + "a": 18, + "d": 7 + }, + "mne/io/eeglab/eeglab.py": { + "a": 73, + "d": 55 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 99, + "d": 60 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5327.json b/doc/sphinxext/prs/5327.json new file mode 100644 index 00000000000..b27f5819388 --- /dev/null +++ b/doc/sphinxext/prs/5327.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4df573c93fc76f42bd4bdb0568e96961e22fe7b6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5328.json b/doc/sphinxext/prs/5328.json new file mode 100644 index 00000000000..974d4e782e3 --- /dev/null +++ b/doc/sphinxext/prs/5328.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "001e5c84dca33e10d8e72558b87e33c10bfbe65a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 39, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5329.json b/doc/sphinxext/prs/5329.json new file mode 100644 index 00000000000..dd6f8933d2d --- /dev/null +++ b/doc/sphinxext/prs/5329.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "0706ad95fa091003f29bd1d568a17324453bb297", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 15, + "d": 33 + }, + "mne/fixes.py": { + "a": 27, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 11, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 309, + "d": 11 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 43, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/533.json b/doc/sphinxext/prs/533.json new file mode 100644 index 00000000000..4482982855f --- /dev/null +++ b/doc/sphinxext/prs/533.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7605647251572c12d4f873007e8bee1cac49d3cb", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5331.json b/doc/sphinxext/prs/5331.json new file mode 100644 index 00000000000..9f9179cffcc --- /dev/null +++ b/doc/sphinxext/prs/5331.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9b3883d4af81b239d6270e517f8296ec84b85f87", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 7, + "d": 16 + }, + "mne/io/reference.py": { + "a": 0, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5332.json b/doc/sphinxext/prs/5332.json new file mode 100644 index 00000000000..06ae2f1c1d8 --- /dev/null +++ b/doc/sphinxext/prs/5332.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "8da6623382c2eca6aad1e431d6da51400b0bc6ec", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 5, + "d": 5 + }, + "mne/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/externals/decorator.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 4 + }, + "mne/forward/_make_forward.py": { + "a": 5, + "d": 5 + }, + "mne/io/constants.py": { + "a": 26, + "d": 23 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 3 + }, + "mne/io/tag.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_constants.py": { + "a": 252, + "d": 0 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 3, + "d": 4 + }, + "mne/utils.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/534.json b/doc/sphinxext/prs/534.json new file mode 100644 index 00000000000..25c74590251 --- /dev/null +++ b/doc/sphinxext/prs/534.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c95036a4a8c00443f52d6490b4e6986bae6d4bbe", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 6, + "d": 2 + }, + "mne/viz.py": { + "a": 16, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5340.json b/doc/sphinxext/prs/5340.json new file mode 100644 index 00000000000..44b469d968d --- /dev/null +++ b/doc/sphinxext/prs/5340.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "575164175b9c0faa506b6e87b25762c1e238403d", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "CONTRIBUTING.md": { + "a": 13, + "d": 0 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5341.json b/doc/sphinxext/prs/5341.json new file mode 100644 index 00000000000..99328e40125 --- /dev/null +++ b/doc/sphinxext/prs/5341.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1cedd32abe7b9ae1817e455788f664d2faa13e6c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 4, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5343.json b/doc/sphinxext/prs/5343.json new file mode 100644 index 00000000000..a2e075f4d98 --- /dev/null +++ b/doc/sphinxext/prs/5343.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "a75970522eaf8ea2da04085024b32669adfb43dc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/array/array.py": { + "a": 0, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 10, + "d": 5 + }, + "mne/io/brainvision/brainvision.py": { + "a": 0, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 0, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 5, + "d": 3 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 2, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 0, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 0, + "d": 1 + }, + "mne/io/eximia/eximia.py": { + "a": 0, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 8, + "d": 8 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 5 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 0, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5344.json b/doc/sphinxext/prs/5344.json new file mode 100644 index 00000000000..63a09b7791b --- /dev/null +++ b/doc/sphinxext/prs/5344.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "512fafcf40faf457bb9eae577290df7e9ec34fc9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 27, + "d": 26 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5348.json b/doc/sphinxext/prs/5348.json new file mode 100644 index 00000000000..9e0e2473783 --- /dev/null +++ b/doc/sphinxext/prs/5348.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5389a456c400701c7bc7c7f8452fb4e6c53a11ae", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 26 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 8, + "d": 7 + }, + "mne/tests/common.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5350.json b/doc/sphinxext/prs/5350.json new file mode 100644 index 00000000000..9d31a47d4a8 --- /dev/null +++ b/doc/sphinxext/prs/5350.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8bccec4977f0fd75e2652572d81c9cee2e959525", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 57 + }, + "mne/tests/test_utils.py": { + "a": 19, + "d": 1 + }, + "mne/utils.py": { + "a": 73, + "d": 0 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5352.json b/doc/sphinxext/prs/5352.json new file mode 100644 index 00000000000..93645f26d2f --- /dev/null +++ b/doc/sphinxext/prs/5352.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e39e1f04cdaf4f7b7b1e1586e085ae790af1243f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5354.json b/doc/sphinxext/prs/5354.json new file mode 100644 index 00000000000..bd9d6b7c424 --- /dev/null +++ b/doc/sphinxext/prs/5354.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "241161d00d685c988a5937888f8c76959d0cd224", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5361.json b/doc/sphinxext/prs/5361.json new file mode 100644 index 00000000000..4cb670d08cf --- /dev/null +++ b/doc/sphinxext/prs/5361.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7e0e988b6e8f3461855a1dd36aec7e5d17b6c379", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 31, + "d": 5 + }, + "mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter.vmrk": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/tests/data/testv2.vmrk": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 20, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5363.json b/doc/sphinxext/prs/5363.json new file mode 100644 index 00000000000..929629dde03 --- /dev/null +++ b/doc/sphinxext/prs/5363.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d109dd8d438e0287dd7b98a167166879204f41a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 3 + }, + "setup.cfg": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5367.json b/doc/sphinxext/prs/5367.json new file mode 100644 index 00000000000..bc751ff2a62 --- /dev/null +++ b/doc/sphinxext/prs/5367.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9358407481379f8738782ab0b9e0c124e974a3ac", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "mne/externals/pymatreader/pymatreader.py": { + "a": 15, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/537.json b/doc/sphinxext/prs/537.json new file mode 100644 index 00000000000..b0e00163648 --- /dev/null +++ b/doc/sphinxext/prs/537.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d9fc70aaba832713e20d5bd9222da8a687b8e404", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/_static/default.css": { + "a": 11, + "d": 0 + }, + "doc/source/_static/navy.css": { + "a": 10, + "d": 0 + }, + "doc/source/_templates/layout.html": { + "a": 20, + "d": 0 + }, + "doc/source/conf.py": { + "a": 2, + "d": 1 + }, + "doc/source/mne-python.rst": { + "a": 3, + "d": 0 + }, + "doc/sphinxext/gen_rst.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5371.json b/doc/sphinxext/prs/5371.json new file mode 100644 index 00000000000..d34499a6e7d --- /dev/null +++ b/doc/sphinxext/prs/5371.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fdba42d36107a9c6388578cb7147de242b54a1d4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/proj.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5376.json b/doc/sphinxext/prs/5376.json new file mode 100644 index 00000000000..ab9adfd0e92 --- /dev/null +++ b/doc/sphinxext/prs/5376.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0af60b0598e492ff2e43037ec7c838a361d21bd4", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5378.json b/doc/sphinxext/prs/5378.json new file mode 100644 index 00000000000..0072b25576b --- /dev/null +++ b/doc/sphinxext/prs/5378.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "52c432908a7d21ffd5c844a470e0ee7d2b626949", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/538.json b/doc/sphinxext/prs/538.json new file mode 100644 index 00000000000..b3705c42496 --- /dev/null +++ b/doc/sphinxext/prs/538.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "acc704c2a4f409cb9e3ef47d695dca7557f7f47f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fiff/pick.py": { + "a": 15, + "d": 5 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5380.json b/doc/sphinxext/prs/5380.json new file mode 100644 index 00000000000..7ef59551658 --- /dev/null +++ b/doc/sphinxext/prs/5380.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8e44a98692f6dbec059390d33cc78134a0e4f5ba", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 53, + "d": 0 + }, + "tutorials/plot_dipole_fit.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5381.json b/doc/sphinxext/prs/5381.json new file mode 100644 index 00000000000..a42c1d8d405 --- /dev/null +++ b/doc/sphinxext/prs/5381.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5179d6c809b2000b4aeee4fe479640e4b9e0955a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 24, + "d": 38 + }, + "mne/tests/test_bem.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5385.json b/doc/sphinxext/prs/5385.json new file mode 100644 index 00000000000..53757e172b0 --- /dev/null +++ b/doc/sphinxext/prs/5385.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "609515033e9917423aa4005c294a7c64919b21e6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 11, + "d": 6 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5386.json b/doc/sphinxext/prs/5386.json new file mode 100644 index 00000000000..d12e12aaaa8 --- /dev/null +++ b/doc/sphinxext/prs/5386.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "19a44282662786b12f6246bc0921bb5b0a2afab5", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5388.json b/doc/sphinxext/prs/5388.json new file mode 100644 index 00000000000..4d8aca35d99 --- /dev/null +++ b/doc/sphinxext/prs/5388.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "83e16e22b74f3eefd8eb69a8390a63519cd501ba", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/539.json b/doc/sphinxext/prs/539.json new file mode 100644 index 00000000000..15e4ce8838a --- /dev/null +++ b/doc/sphinxext/prs/539.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2ea0b66ce7a7a499e87d7668ea0bf7ab7b6507b1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 11, + "d": 10 + }, + "mne/fiff/raw.py": { + "a": 12, + "d": 17 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 5 + }, + "mne/viz.py": { + "a": 76, + "d": 55 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5391.json b/doc/sphinxext/prs/5391.json new file mode 100644 index 00000000000..35111808679 --- /dev/null +++ b/doc/sphinxext/prs/5391.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3d08007b755931bcd0955b6aacc6d3fb424ff7e6", + "authors": [ + { + "n": "Henrich Kolkhorst", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 72, + "d": 17 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 57, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5392.json b/doc/sphinxext/prs/5392.json new file mode 100644 index 00000000000..f9f4beb507c --- /dev/null +++ b/doc/sphinxext/prs/5392.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "83217a86b28a07ed024d9647c942df1960519411", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 19, + "d": 17 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5393.json b/doc/sphinxext/prs/5393.json new file mode 100644 index 00000000000..13560e9f244 --- /dev/null +++ b/doc/sphinxext/prs/5393.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "012b02bf402873c91987b9cac4fe0aaf56c89755", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/minimum_norm/psf_ctf.py": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5394.json b/doc/sphinxext/prs/5394.json new file mode 100644 index 00000000000..12c84ff741b --- /dev/null +++ b/doc/sphinxext/prs/5394.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5ae2ad3dce9b1289cd33439d49f94a8143a53997", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 8, + "d": 0 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 35, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5395.json b/doc/sphinxext/prs/5395.json new file mode 100644 index 00000000000..5437aea71fa --- /dev/null +++ b/doc/sphinxext/prs/5395.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2e808f876e8bc09c75b2238e6b87547604f5545c", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/glossary.rst": { + "a": 147, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5396.json b/doc/sphinxext/prs/5396.json new file mode 100644 index 00000000000..501cafcb588 --- /dev/null +++ b/doc/sphinxext/prs/5396.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a3431eff2d7143faf57bc7f6f12e8d703bab7b19", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_object_epochs.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5398.json b/doc/sphinxext/prs/5398.json new file mode 100644 index 00000000000..48f7e45d485 --- /dev/null +++ b/doc/sphinxext/prs/5398.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "80dfec2b6324c761e4b186d96586ff0d423e4bae", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/utils.py": { + "a": 7, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5399.json b/doc/sphinxext/prs/5399.json new file mode 100644 index 00000000000..d4fee8ff142 --- /dev/null +++ b/doc/sphinxext/prs/5399.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9490d528eef7d1c87b1b9b530132aee70b7c7771", + "authors": [ + { + "n": "Sam Perry", + "e": null + } + ], + "changes": { + "mne/cuda.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_filter.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/54.json b/doc/sphinxext/prs/54.json new file mode 100644 index 00000000000..349f8b429bb --- /dev/null +++ b/doc/sphinxext/prs/54.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_utils.py": { + "a": 11, + "d": 0 + }, + "mne/utils.py": { + "a": 158, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/540.json b/doc/sphinxext/prs/540.json new file mode 100644 index 00000000000..ddec0ffcf0b --- /dev/null +++ b/doc/sphinxext/prs/540.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fb2186201103c44d2e4adf5873ba09f4f6ea5c29", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 16, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 51, + "d": 15 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 25, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5402.json b/doc/sphinxext/prs/5402.json new file mode 100644 index 00000000000..d68e173fc8a --- /dev/null +++ b/doc/sphinxext/prs/5402.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c248bd4c0ac151c76e17b56a5649e659bad58c7e", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 51, + "d": 14 + }, + "mne/viz/epochs.py": { + "a": 11, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5403.json b/doc/sphinxext/prs/5403.json new file mode 100644 index 00000000000..d2227c89c20 --- /dev/null +++ b/doc/sphinxext/prs/5403.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "1a499ea04322036f93e7e2d46e9994b856c40beb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/cookbook.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 10, + "d": 8 + }, + "examples/visualization/make_report.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 51, + "d": 15 + }, + "mne/tests/test_report.py": { + "a": 30, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5404.json b/doc/sphinxext/prs/5404.json new file mode 100644 index 00000000000..ac2f5419e39 --- /dev/null +++ b/doc/sphinxext/prs/5404.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "63763c7aa6f55726b5694d85a56b035b7a45c822", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5405.json b/doc/sphinxext/prs/5405.json new file mode 100644 index 00000000000..bd33aade526 --- /dev/null +++ b/doc/sphinxext/prs/5405.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3bc4bebf8cd1a2c52bea27b94c9c8c6d40bfcdd5", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5407.json b/doc/sphinxext/prs/5407.json new file mode 100644 index 00000000000..7d8a434b62b --- /dev/null +++ b/doc/sphinxext/prs/5407.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ef0b547ec9ecd50dc9fa3312a0442406bd266130", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 9, + "d": 3 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5409.json b/doc/sphinxext/prs/5409.json new file mode 100644 index 00000000000..e8b2044620e --- /dev/null +++ b/doc/sphinxext/prs/5409.json @@ -0,0 +1,375 @@ +{ + "merge_commit_sha": "1d3cb1e2441d94a724a27a415c5bfd15eee58ed2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 4, + "d": 9 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 19, + "d": 29 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 0, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 4, + "d": 10 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 13, + "d": 15 + }, + "mne/channels/tests/test_layout.py": { + "a": 0, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 18, + "d": 27 + }, + "mne/commands/tests/test_commands.py": { + "a": 6, + "d": 8 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 0, + "d": 4 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 3, + "d": 11 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 7, + "d": 13 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 3, + "d": 4 + }, + "mne/forward/tests/test_forward.py": { + "a": 28, + "d": 36 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 6, + "d": 10 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 2, + "d": 6 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 0, + "d": 3 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 1, + "d": 6 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 20, + "d": 16 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 70, + "d": 54 + }, + "mne/io/array/tests/test_array.py": { + "a": 0, + "d": 3 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 4, + "d": 5 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 3 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 19, + "d": 22 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 0, + "d": 3 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 2, + "d": 6 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 6, + "d": 12 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 5, + "d": 16 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 3, + "d": 7 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 22, + "d": 38 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 5, + "d": 15 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 20, + "d": 43 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 9 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 4, + "d": 7 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 5, + "d": 9 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 0, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 0, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 74, + "d": 82 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 16, + "d": 28 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 5, + "d": 12 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 30, + "d": 35 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 1, + "d": 0 + }, + "mne/report.py": { + "a": 14, + "d": 9 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 0, + "d": 3 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 0, + "d": 3 + }, + "mne/simulation/tests/test_raw.py": { + "a": 0, + "d": 3 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 13, + "d": 23 + }, + "mne/stats/tests/test_regression.py": { + "a": 5, + "d": 11 + }, + "mne/tests/common.py": { + "a": 0, + "d": 20 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 12, + "d": 23 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 6 + }, + "mne/tests/test_coreg.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 27, + "d": 40 + }, + "mne/tests/test_dipole.py": { + "a": 4, + "d": 9 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 8, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 30, + "d": 56 + }, + "mne/tests/test_event.py": { + "a": 6, + "d": 17 + }, + "mne/tests/test_evoked.py": { + "a": 5, + "d": 13 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 9, + "d": 18 + }, + "mne/tests/test_proj.py": { + "a": 15, + "d": 33 + }, + "mne/tests/test_report.py": { + "a": 7, + "d": 20 + }, + "mne/tests/test_source_estimate.py": { + "a": 14, + "d": 26 + }, + "mne/tests/test_source_space.py": { + "a": 5, + "d": 12 + }, + "mne/tests/test_surface.py": { + "a": 6, + "d": 8 + }, + "mne/tests/test_transforms.py": { + "a": 2, + "d": 7 + }, + "mne/tests/test_utils.py": { + "a": 17, + "d": 34 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 5 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 4, + "d": 4 + }, + "mne/viz/misc.py": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 15, + "d": 25 + }, + "mne/viz/tests/test_epochs.py": { + "a": 32, + "d": 44 + }, + "mne/viz/tests/test_evoked.py": { + "a": 5, + "d": 9 + }, + "mne/viz/tests/test_ica.py": { + "a": 68, + "d": 74 + }, + "mne/viz/tests/test_misc.py": { + "a": 13, + "d": 9 + }, + "mne/viz/tests/test_raw.py": { + "a": 104, + "d": 101 + }, + "mne/viz/tests/test_topo.py": { + "a": 41, + "d": 39 + }, + "mne/viz/tests/test_topomap.py": { + "a": 6, + "d": 18 + }, + "mne/viz/tests/test_utils.py": { + "a": 0, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "setup.cfg": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/541.json b/doc/sphinxext/prs/541.json new file mode 100644 index 00000000000..ae98b88ac09 --- /dev/null +++ b/doc/sphinxext/prs/541.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4fcd9386f0446b5c9e224f0207e988e674681afa", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5410.json b/doc/sphinxext/prs/5410.json new file mode 100644 index 00000000000..7bd7a2033d5 --- /dev/null +++ b/doc/sphinxext/prs/5410.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "18755d22c3752b767dd493ffb4ffa5360bd322b4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5412.json b/doc/sphinxext/prs/5412.json new file mode 100644 index 00000000000..bdfdcfba6c6 --- /dev/null +++ b/doc/sphinxext/prs/5412.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3b0270cad41e53da97fbaa96d2c361afc62e44a5", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 1 + }, + "mne/io/brainvision/tests/data/testv2.vmrk": { + "a": 7, + "d": 6 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5413.json b/doc/sphinxext/prs/5413.json new file mode 100644 index 00000000000..aa365d592a1 --- /dev/null +++ b/doc/sphinxext/prs/5413.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ae560615a72dea7f807983a359f9b57838c1bd9d", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".github/CONTRIBUTING.md": { + "a": 0, + "d": 0 + }, + ".github/ISSUE_TEMPLATE/blank.md": { + "a": 5, + "d": 0 + }, + ".github/ISSUE_TEMPLATE/bug_report.md": { + "a": 50, + "d": 0 + }, + ".github/ISSUE_TEMPLATE/feature_request.md": { + "a": 22, + "d": 0 + }, + ".github/PULL_REQUEST_TEMPLATE.md": { + "a": 23, + "d": 0 + }, + "MANIFEST.in": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5414.json b/doc/sphinxext/prs/5414.json new file mode 100644 index 00000000000..0143ee398d8 --- /dev/null +++ b/doc/sphinxext/prs/5414.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c78271f983093f2c0bb5ab89a58714e036539dd7", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/commands/mne_flash_bem.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5415.json b/doc/sphinxext/prs/5415.json new file mode 100644 index 00000000000..ea6f14b893e --- /dev/null +++ b/doc/sphinxext/prs/5415.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "beda451422b4d8016643438ce7e998c76cbd2f59", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5416.json b/doc/sphinxext/prs/5416.json new file mode 100644 index 00000000000..fe0a3d734b5 --- /dev/null +++ b/doc/sphinxext/prs/5416.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "487762415ef41c8d5dbc2f21e40bcf02b874f19a", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 3 + }, + "mne/annotations.py": { + "a": 16, + "d": 5 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 5 + }, + "mne/io/base.py": { + "a": 19, + "d": 18 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 2, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 7 + }, + "mne/io/pick.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 5, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5418.json b/doc/sphinxext/prs/5418.json new file mode 100644 index 00000000000..d29032f703a --- /dev/null +++ b/doc/sphinxext/prs/5418.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e6e61b30b616eed039959544340234f7897a2852", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 74, + "d": 1 + }, + "mne/io/base.py": { + "a": 31, + "d": 56 + }, + "mne/tests/test_annotations.py": { + "a": 54, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5422.json b/doc/sphinxext/prs/5422.json new file mode 100644 index 00000000000..62ace603779 --- /dev/null +++ b/doc/sphinxext/prs/5422.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b070c7087c2e575624515d70a6ec150f17d661e2", + "authors": [ + { + "n": "Thomas Jochmann", + "e": null + } + ], + "changes": { + "doc/faq.rst": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5423.json b/doc/sphinxext/prs/5423.json new file mode 100644 index 00000000000..e8f767209b8 --- /dev/null +++ b/doc/sphinxext/prs/5423.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce9289fe13c2bee6506a18c63d93fbfea48f0f26", + "authors": [ + { + "n": "Thomas Jochmann", + "e": null + } + ], + "changes": { + "doc/configure_git.rst": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5424.json b/doc/sphinxext/prs/5424.json new file mode 100644 index 00000000000..079f97dee69 --- /dev/null +++ b/doc/sphinxext/prs/5424.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "62ecf9e482bf84a7f58c5fefff766336068f8bce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5426.json b/doc/sphinxext/prs/5426.json new file mode 100644 index 00000000000..0653bb65131 --- /dev/null +++ b/doc/sphinxext/prs/5426.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "eb934fb23d58b86644578240160a2c5332f469be", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 25, + "d": 17 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 12, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5428.json b/doc/sphinxext/prs/5428.json new file mode 100644 index 00000000000..aee031cc7e9 --- /dev/null +++ b/doc/sphinxext/prs/5428.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "ff8ebca4a9d2497d4f8613c11b42300ccd3c9ba4", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 6, + "d": 4 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 7, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5429.json b/doc/sphinxext/prs/5429.json new file mode 100644 index 00000000000..0b416b11e21 --- /dev/null +++ b/doc/sphinxext/prs/5429.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d3aba149f87adb34bdb64e75aa230f7203b0e86d", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "mne/externals/pymatreader/__init__.py": { + "a": 13, + "d": 9 + }, + "mne/externals/pymatreader/pymatreader.py": { + "a": 109, + "d": 64 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5431.json b/doc/sphinxext/prs/5431.json new file mode 100644 index 00000000000..2ffeef9d6e7 --- /dev/null +++ b/doc/sphinxext/prs/5431.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "35f54e709e8d8e33d2c5996f37699ae7e4b1a651", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 13, + "d": 5 + }, + "mne/tests/test_evoked.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5433.json b/doc/sphinxext/prs/5433.json new file mode 100644 index 00000000000..510a5d92806 --- /dev/null +++ b/doc/sphinxext/prs/5433.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "7ecc4a52faf0cc363e56f1fbb0e2109a76c7c8cb", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 74, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 0 + }, + "mne/io/base.py": { + "a": 37, + "d": 5 + }, + "mne/io/fiff/raw.py": { + "a": 19, + "d": 14 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 5, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 61, + "d": 28 + }, + "mne/io/tests/test_raw.py": { + "a": 64, + "d": 3 + }, + "mne/tests/test_annotations.py": { + "a": 23, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5434.json b/doc/sphinxext/prs/5434.json new file mode 100644 index 00000000000..0914ac30e22 --- /dev/null +++ b/doc/sphinxext/prs/5434.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5a60d29d6561204d95a4e0edbe95066c5404abdd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5435.json b/doc/sphinxext/prs/5435.json new file mode 100644 index 00000000000..a2657aab564 --- /dev/null +++ b/doc/sphinxext/prs/5435.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dd45dba7acc7289cceaca6aa669c2f70264e6643", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cuda.py": { + "a": 40, + "d": 53 + }, + "mne/filter.py": { + "a": 8, + "d": 15 + }, + "mne/tests/test_filter.py": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5437.json b/doc/sphinxext/prs/5437.json new file mode 100644 index 00000000000..b6708bc09fb --- /dev/null +++ b/doc/sphinxext/prs/5437.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "986e8a3004f94bb740ffe74d12dce9d222493339", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 1, + "d": 5 + }, + "mne/stats/tests/test_regression.py": { + "a": 6, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5438.json b/doc/sphinxext/prs/5438.json new file mode 100644 index 00000000000..1e2d338b8f4 --- /dev/null +++ b/doc/sphinxext/prs/5438.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a0ffcf9f63dce1f6f6af89713e98903f905dcf34", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5442.json b/doc/sphinxext/prs/5442.json new file mode 100644 index 00000000000..521e75ce0ee --- /dev/null +++ b/doc/sphinxext/prs/5442.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0d99a0d199a1f7a954003e86974ff88ed28b36d4", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 32, + "d": 28 + }, + "mne/io/brainvision/tests/data/testv2.vmrk": { + "a": 13, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5443.json b/doc/sphinxext/prs/5443.json new file mode 100644 index 00000000000..49ee1e05f06 --- /dev/null +++ b/doc/sphinxext/prs/5443.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "d232d8ec611513044d6a60d732e8f1d50fb6b457", + "authors": [ + { + "n": "Tommy Clausner", + "e": null + } + ], + "changes": { + "doc/_templates/class.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 5, + "d": 1 + }, + "doc/manual/cookbook.rst": { + "a": 2, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 5 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_morph_data.py": { + "a": 0, + "d": 59 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 85, + "d": 0 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 88, + "d": 0 + }, + "examples/visualization/plot_xhemi.py": { + "a": 8, + "d": 6 + }, + "mne/__init__.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 189, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 8, + "d": 99 + }, + "mne/beamformer/_lcmv.py": { + "a": 9, + "d": 125 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 9, + "d": 1 + }, + "mne/label.py": { + "a": 10, + "d": 6 + }, + "mne/morph.py": { + "a": 1475, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 59, + "d": 519 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 275, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 83, + "d": 65 + }, + "mne/tests/test_source_space.py": { + "a": 26, + "d": 10 + }, + "mne/utils.py": { + "a": 7, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_background_freesurfer.py": { + "a": 118, + "d": 0 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 9, + "d": 4 + }, + "tutorials/plot_morph_stc.py": { + "a": 266, + "d": 0 + }, + "tutorials/plot_object_source_estimate.py": { + "a": 229, + "d": 0 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 5, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5444.json b/doc/sphinxext/prs/5444.json new file mode 100644 index 00000000000..9aebc25fedb --- /dev/null +++ b/doc/sphinxext/prs/5444.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c6e62d6c11c557cd69ce0b4e88bb17b5ca8c47dd", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 7 + }, + "mne/transforms.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5447.json b/doc/sphinxext/prs/5447.json new file mode 100644 index 00000000000..5df82a6c546 --- /dev/null +++ b/doc/sphinxext/prs/5447.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "552b4334457eef4d617c0420803199c974059c8e", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 15, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 6, + "d": 10 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 224, + "d": 213 + }, + "mne/beamformer/_dics.py": { + "a": 51, + "d": 22 + }, + "mne/beamformer/_lcmv.py": { + "a": 44, + "d": 26 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 23, + "d": 5 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 20, + "d": 52 + }, + "mne/cov.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 48, + "d": 2 + }, + "mne/utils.py": { + "a": 128, + "d": 6 + }, + "tutorials/plot_dics.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/545.json b/doc/sphinxext/prs/545.json new file mode 100644 index 00000000000..07f9f5a5836 --- /dev/null +++ b/doc/sphinxext/prs/545.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b6ce07846a7cbde7426303eb3c5991fbd4de34c4", + "authors": [ + { + "n": "Brad Buran", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 93, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5452.json b/doc/sphinxext/prs/5452.json new file mode 100644 index 00000000000..5f119885baf --- /dev/null +++ b/doc/sphinxext/prs/5452.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "0fe359e9ad4002b2e4d1023e199bf7a005b27f25", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 5 + }, + "doc/advanced_setup.rst": { + "a": 16, + "d": 40 + }, + "doc/install_mne_python.rst": { + "a": 1, + "d": 2 + }, + "doc/known_projects.inc": { + "a": 3, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/cuda.py": { + "a": 74, + "d": 134 + }, + "mne/decoding/transformer.py": { + "a": 6, + "d": 6 + }, + "mne/filter.py": { + "a": 22, + "d": 22 + }, + "mne/io/base.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/ssp.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 12, + "d": 7 + }, + "mne/utils.py": { + "a": 3, + "d": 6 + }, + "setup.cfg": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5453.json b/doc/sphinxext/prs/5453.json new file mode 100644 index 00000000000..91fca4dfe77 --- /dev/null +++ b/doc/sphinxext/prs/5453.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0802291704a45000b04c31d696c5c84828997c27", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5454.json b/doc/sphinxext/prs/5454.json new file mode 100644 index 00000000000..4f3b60a9a76 --- /dev/null +++ b/doc/sphinxext/prs/5454.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f6d4084bf0d304fe49a23545b34c4c2cf998951b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 0 + }, + "doc/advanced_setup.rst": { + "a": 17, + "d": 1 + }, + "doc/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/faq.rst": { + "a": 3, + "d": 27 + }, + "doc/install_mne_python.rst": { + "a": 26, + "d": 2 + }, + "environment.yml": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5459.json b/doc/sphinxext/prs/5459.json new file mode 100644 index 00000000000..5d8c3b57f1e --- /dev/null +++ b/doc/sphinxext/prs/5459.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f7e2a95bf01981072c598bb806204e72630b26f6", + "authors": [ + { + "n": "Sergey Antopolskiy", + "e": "s.antopolsky@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/546.json b/doc/sphinxext/prs/546.json new file mode 100644 index 00000000000..a341636ec4a --- /dev/null +++ b/doc/sphinxext/prs/546.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "edc00a11911b6fdc872e03988840677133ff19a6", + "authors": [ + { + "n": "Brad Buran", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5460.json b/doc/sphinxext/prs/5460.json new file mode 100644 index 00000000000..9519690a79a --- /dev/null +++ b/doc/sphinxext/prs/5460.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "c5b43b0bea7017a2171d5e22b1351f3ee80e3906", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/annotations.py": { + "a": 12, + "d": 7 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 247, + "d": 133 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 97, + "d": 75 + }, + "mne/io/utils.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5461.json b/doc/sphinxext/prs/5461.json new file mode 100644 index 00000000000..54b1324897a --- /dev/null +++ b/doc/sphinxext/prs/5461.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9807547fee74b85f1db85295ae6d986523d68455", + "authors": [ + { + "n": "Peter J. Molfese", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/eog.py": { + "a": 14, + "d": 8 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5462.json b/doc/sphinxext/prs/5462.json new file mode 100644 index 00000000000..6d97a8c574a --- /dev/null +++ b/doc/sphinxext/prs/5462.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8cc1068aa34c0aa43cdd3d2f42ba4961d0a189e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 1, + "d": 1 + }, + "mne/data/helmets/CTF_275.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5464.json b/doc/sphinxext/prs/5464.json new file mode 100644 index 00000000000..70d9aaca37f --- /dev/null +++ b/doc/sphinxext/prs/5464.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2dd7aa0bd05f3cae8dc5c17e951454ef6c71b17c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cuda.py": { + "a": 9, + "d": 6 + }, + "mne/decoding/receptive_field.py": { + "a": 30, + "d": 6 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 44, + "d": 29 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 109, + "d": 95 + }, + "mne/utils/progressbar.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5466.json b/doc/sphinxext/prs/5466.json new file mode 100644 index 00000000000..87482712804 --- /dev/null +++ b/doc/sphinxext/prs/5466.json @@ -0,0 +1,151 @@ +{ + "merge_commit_sha": "6104472f1101633c7a8166b36d2e9206e5aee0b3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/_templates/class.rst": { + "a": 0, + "d": 2 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 5, + "d": 3 + }, + "doc/faq.rst": { + "a": 0, + "d": 26 + }, + "doc/glossary.rst": { + "a": 4, + "d": 0 + }, + "doc/manual/cookbook.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/source_localization/morph.rst": { + "a": 2, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 4 + }, + "doc/whats_new.rst": { + "a": 5, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_morph_data.py": { + "a": 0, + "d": 59 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 151, + "d": 0 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 160, + "d": 0 + }, + "examples/visualization/plot_xhemi.py": { + "a": 13, + "d": 20 + }, + "mne/__init__.py": { + "a": 4, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 19, + "d": 18 + }, + "mne/morph.py": { + "a": 1200, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 84, + "d": 588 + }, + "mne/surface.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 497, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 31, + "d": 123 + }, + "mne/tests/test_source_space.py": { + "a": 37, + "d": 10 + }, + "mne/utils.py": { + "a": 24, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_background_freesurfer.py": { + "a": 4, + "d": 3 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 7, + "d": 5 + }, + "tutorials/plot_morph_stc.py": { + "a": 265, + "d": 0 + }, + "tutorials/plot_object_source_estimate.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5467.json b/doc/sphinxext/prs/5467.json new file mode 100644 index 00000000000..fcf3a8bbb0e --- /dev/null +++ b/doc/sphinxext/prs/5467.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fd0ecac68cab7ddb6d1649b72b70042eef081550", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 2, + "d": 0 + }, + "doc/glossary.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/cookbook.rst": { + "a": 2, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 2 + }, + "tutorials/plot_background_freesurfer.py": { + "a": 117, + "d": 0 + }, + "tutorials/plot_object_source_estimate.py": { + "a": 238, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5468.json b/doc/sphinxext/prs/5468.json new file mode 100644 index 00000000000..19982089ae0 --- /dev/null +++ b/doc/sphinxext/prs/5468.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9dd6b8aa7927a86761d21992564f54697411f52c", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/io/kit/kit.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/547.json b/doc/sphinxext/prs/547.json new file mode 100644 index 00000000000..764ba85c92e --- /dev/null +++ b/doc/sphinxext/prs/547.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "acd30858a4ad4a577d002e2a8486747771df85e1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5470.json b/doc/sphinxext/prs/5470.json new file mode 100644 index 00000000000..a7a2e32108d --- /dev/null +++ b/doc/sphinxext/prs/5470.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "14d0c6a3e6257692fd497614d9cedfc1c3bc2a60", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/base.py": { + "a": 33, + "d": 8 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5473.json b/doc/sphinxext/prs/5473.json new file mode 100644 index 00000000000..b440573588b --- /dev/null +++ b/doc/sphinxext/prs/5473.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "971267567b443f5a7d6c9812711ee634bf01e874", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_parcellation.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5475.json b/doc/sphinxext/prs/5475.json new file mode 100644 index 00000000000..aa3b2a9e849 --- /dev/null +++ b/doc/sphinxext/prs/5475.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d91f99f56e4beb80808f396b4404f5434bedae8e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 23, + "d": 30 + }, + "mne/tests/test_dipole.py": { + "a": 14, + "d": 15 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5477.json b/doc/sphinxext/prs/5477.json new file mode 100644 index 00000000000..0465be7cfdb --- /dev/null +++ b/doc/sphinxext/prs/5477.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b7c8d9c7c9f84a62a3de28af678f32e6a314a2e0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 20, + "d": 18 + }, + "mne/minimum_norm/inverse.py": { + "a": 41, + "d": 34 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 53, + "d": 4 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 15, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5478.json b/doc/sphinxext/prs/5478.json new file mode 100644 index 00000000000..947a3b3965b --- /dev/null +++ b/doc/sphinxext/prs/5478.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cfe3b1e0705622c903e44893702396e1dd968a8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5481.json b/doc/sphinxext/prs/5481.json new file mode 100644 index 00000000000..bcfa6c5ca75 --- /dev/null +++ b/doc/sphinxext/prs/5481.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "c80e57cd4eef1b753fa885af757ad8927430307f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 3, + "d": 2 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 3, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 283, + "d": 170 + }, + "mne/decoding/csp.py": { + "a": 10, + "d": 3 + }, + "mne/decoding/tests/test_csp.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/io/pick.py": { + "a": 13, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 8, + "d": 2 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 143, + "d": 32 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 8, + "d": 6 + }, + "tutorials/plot_compute_covariance.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_whitened.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5482.json b/doc/sphinxext/prs/5482.json new file mode 100644 index 00000000000..f15bcc48254 --- /dev/null +++ b/doc/sphinxext/prs/5482.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "aabc501d988298f286381bd94ec3ec628a9d4dac", + "authors": [ + { + "n": "Henrich Kolkhorst", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 9, + "d": 0 + }, + "mne/realtime/client.py": { + "a": 3, + "d": 4 + }, + "mne/realtime/epochs.py": { + "a": 7, + "d": 2 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 6, + "d": 4 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5486.json b/doc/sphinxext/prs/5486.json new file mode 100644 index 00000000000..3694e36a6cb --- /dev/null +++ b/doc/sphinxext/prs/5486.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "6cb40dbb55eb77938b9557388329bba0e53b3061", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/manual/cookbook.rst": { + "a": 3, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 9, + "d": 7 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 4, + "d": 4 + }, + "examples/visualization/plot_xhemi.py": { + "a": 4, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 7, + "d": 7 + }, + "mne/source_estimate.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_morph.py": { + "a": 26, + "d": 26 + }, + "mne/tests/test_source_space.py": { + "a": 7, + "d": 5 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5489.json b/doc/sphinxext/prs/5489.json new file mode 100644 index 00000000000..ded3ece1dad --- /dev/null +++ b/doc/sphinxext/prs/5489.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6aff6e6cc11e7428fe6a89bf8516d9b3669d3938", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/549.json b/doc/sphinxext/prs/549.json new file mode 100644 index 00000000000..f73583aafba --- /dev/null +++ b/doc/sphinxext/prs/549.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b6dd77ab5d108816734b45f65e503a6a07c4b9fb", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 7, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5496.json b/doc/sphinxext/prs/5496.json new file mode 100644 index 00000000000..344a6856fc5 --- /dev/null +++ b/doc/sphinxext/prs/5496.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "613df87f097323175784f3a7fdc32bbad0e604ac", + "authors": [ + { + "n": "Jasper J.F. van den Bosch", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5497.json b/doc/sphinxext/prs/5497.json new file mode 100644 index 00000000000..317d02db050 --- /dev/null +++ b/doc/sphinxext/prs/5497.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fc6a6766b908c83dca8018b8aedba9405fb7d88f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "setup.cfg": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5498.json b/doc/sphinxext/prs/5498.json new file mode 100644 index 00000000000..f2c3c695081 --- /dev/null +++ b/doc/sphinxext/prs/5498.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "64be522285d5eeea4e3fb4c6df133984fc65b74e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 31, + "d": 23 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 17, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 6, + "d": 5 + }, + "mne/utils.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/55.json b/doc/sphinxext/prs/55.json new file mode 100644 index 00000000000..23e0f12b2a3 --- /dev/null +++ b/doc/sphinxext/prs/55.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b2b9ff400caac1561732793110b95d6f1ee64666", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_morph_data.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 56, + "d": 28 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/550.json b/doc/sphinxext/prs/550.json new file mode 100644 index 00000000000..34e9a03d135 --- /dev/null +++ b/doc/sphinxext/prs/550.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5e5de599e8010031720b45b90fc3a26ccbd5b361", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5500.json b/doc/sphinxext/prs/5500.json new file mode 100644 index 00000000000..02f3df9b5d1 --- /dev/null +++ b/doc/sphinxext/prs/5500.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "d1e2cc88b83d26d45140ea6ff7b66e04082e7728", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 3 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 2, + "d": 3 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 25, + "d": 14 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5502.json b/doc/sphinxext/prs/5502.json new file mode 100644 index 00000000000..47cc97e11d4 --- /dev/null +++ b/doc/sphinxext/prs/5502.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c95051f553d31da2ec7945b1ef0afe3594672f7b", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5504.json b/doc/sphinxext/prs/5504.json new file mode 100644 index 00000000000..8e98a7b087c --- /dev/null +++ b/doc/sphinxext/prs/5504.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5a84ba9f247362da66b209634916485d1c691c13", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5505.json b/doc/sphinxext/prs/5505.json new file mode 100644 index 00000000000..39023436b79 --- /dev/null +++ b/doc/sphinxext/prs/5505.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9aaa6636c3a508df1aa87c6a38d1a388b5de0b39", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 6, + "d": 4 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 140, + "d": 23 + }, + "mne/tests/test_report.py": { + "a": 32, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5506.json b/doc/sphinxext/prs/5506.json new file mode 100644 index 00000000000..acc69f47e8f --- /dev/null +++ b/doc/sphinxext/prs/5506.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b6b0afe7b0e124f0b04465cd5b65b0a9f2651e68", + "authors": [ + { + "n": "ezemikulan", + "e": "e.mikulan@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5511.json b/doc/sphinxext/prs/5511.json new file mode 100644 index 00000000000..d505c34583c --- /dev/null +++ b/doc/sphinxext/prs/5511.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "438206d4a3f41f6bd9fbc45ee756a516760ce86c", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 11, + "d": 7 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5513.json b/doc/sphinxext/prs/5513.json new file mode 100644 index 00000000000..776ab81f1b6 --- /dev/null +++ b/doc/sphinxext/prs/5513.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9445db3e18f6b4f62497a484f6cfc92d20dbf325", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5514.json b/doc/sphinxext/prs/5514.json new file mode 100644 index 00000000000..084892093c2 --- /dev/null +++ b/doc/sphinxext/prs/5514.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "9382ee8c46d2c1d56e0fd9f8b79b07cd0bc0a855", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 1, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 11, + "d": 6 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/plot_evoked_arrowmap.py": { + "a": 17, + "d": 15 + }, + "mne/data/helmets/CTF_275.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 41, + "d": 23 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 158, + "d": 89 + }, + "mne/source_estimate.py": { + "a": 28, + "d": 21 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 9, + "d": 1 + }, + "mne/utils.py": { + "a": 4, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5518.json b/doc/sphinxext/prs/5518.json new file mode 100644 index 00000000000..9d463a62159 --- /dev/null +++ b/doc/sphinxext/prs/5518.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ee69dff86525644090f1e5a9c3587f200652f313", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/sphinxext/cited_mne.py": { + "a": 2, + "d": 2 + }, + "doc/sphinxext/ipython_console_highlighting.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/552.json b/doc/sphinxext/prs/552.json new file mode 100644 index 00000000000..94720de6a58 --- /dev/null +++ b/doc/sphinxext/prs/552.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "749715585be234efa7b6ce9ec16136e57ec2d7ed", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5522.json b/doc/sphinxext/prs/5522.json new file mode 100644 index 00000000000..78b81938981 --- /dev/null +++ b/doc/sphinxext/prs/5522.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4aa86acaeabd91dd6175b74c25fd3fdcbac531f0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 2, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 73, + "d": 47 + }, + "mne/viz/tests/test_3d.py": { + "a": 14, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5524.json b/doc/sphinxext/prs/5524.json new file mode 100644 index 00000000000..20993d2c33d --- /dev/null +++ b/doc/sphinxext/prs/5524.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "56ca913254a551ab6cb34b3b208f44374f80d121", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 31, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 22, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5525.json b/doc/sphinxext/prs/5525.json new file mode 100644 index 00000000000..24cd65ae2b9 --- /dev/null +++ b/doc/sphinxext/prs/5525.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "7f0d522073cbe5b0920ee133471b503f45fa6d60", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 4, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 34, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/datasets/plot_opm_data.py": { + "a": 117, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/opm/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/opm/opm.py": { + "a": 32, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 7, + "d": 1 + }, + "mne/dipole.py": { + "a": 3, + "d": 9 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 49, + "d": 7 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 52, + "d": 13 + }, + "mne/viz/tests/test_3d.py": { + "a": 33, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5527.json b/doc/sphinxext/prs/5527.json new file mode 100644 index 00000000000..896b9b4e5b8 --- /dev/null +++ b/doc/sphinxext/prs/5527.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "445ca6951b6f92d2ae88a2ae07b410275ff0678c", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/tutorials/report.rst": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5528.json b/doc/sphinxext/prs/5528.json new file mode 100644 index 00000000000..f16cc279679 --- /dev/null +++ b/doc/sphinxext/prs/5528.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8f6ff4237b0f9cf31053de21fed4b66cbaf39b53", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 129, + "d": 111 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_visualize_stc.py": { + "a": 140, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/553.json b/doc/sphinxext/prs/553.json new file mode 100644 index 00000000000..6fbaa3098f0 --- /dev/null +++ b/doc/sphinxext/prs/553.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8c70883aca410f5f239a362c20c2d76ceee9290d", + "authors": [ + { + "n": "Brad Buran", + "e": null + } + ], + "changes": { + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 12, + "d": 2 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 17, + "d": 5 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 21, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5530.json b/doc/sphinxext/prs/5530.json new file mode 100644 index 00000000000..491d4e5a70e --- /dev/null +++ b/doc/sphinxext/prs/5530.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d69d7ba6dec94056d78526428b2496bb5f009588", + "authors": [ + { + "n": "Hubert Banville", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5531.json b/doc/sphinxext/prs/5531.json new file mode 100644 index 00000000000..79d140f6bd7 --- /dev/null +++ b/doc/sphinxext/prs/5531.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fa119971035fb8cf0f92acc98c846645d5f410d2", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/tutorials/report.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 147, + "d": 53 + }, + "mne/tests/test_report.py": { + "a": 64, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5532.json b/doc/sphinxext/prs/5532.json new file mode 100644 index 00000000000..27320f43ca2 --- /dev/null +++ b/doc/sphinxext/prs/5532.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "50efb02eda5f9ea14e1e9ae4b6045b5e4bc0dba0", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 20, + "d": 18 + }, + "mne/tests/test_utils.py": { + "a": 14, + "d": 1 + }, + "mne/utils.py": { + "a": 49, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5533.json b/doc/sphinxext/prs/5533.json new file mode 100644 index 00000000000..add9a9fa1b1 --- /dev/null +++ b/doc/sphinxext/prs/5533.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "02e17aed22045107553f01e88d8feed40c3d73ca", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 2 + }, + "mne/io/base.py": { + "a": 6, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 4 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 1, + "d": 3 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5536.json b/doc/sphinxext/prs/5536.json new file mode 100644 index 00000000000..5a4647e45c0 --- /dev/null +++ b/doc/sphinxext/prs/5536.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "de8f3bf0d40aa4532300eee227e326f6787a3895", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_backend.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5537.json b/doc/sphinxext/prs/5537.json new file mode 100644 index 00000000000..c5dab730d62 --- /dev/null +++ b/doc/sphinxext/prs/5537.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "204f29b9724ae32f1983944e82e635abb2da8055", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5538.json b/doc/sphinxext/prs/5538.json new file mode 100644 index 00000000000..b7898d4bb20 --- /dev/null +++ b/doc/sphinxext/prs/5538.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "59d51f0ce128921db74a8c1076f46a488e6de4e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/mne_coreg.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/554.json b/doc/sphinxext/prs/554.json new file mode 100644 index 00000000000..05d1888ed79 --- /dev/null +++ b/doc/sphinxext/prs/554.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "56c917bc2d234e147d3548295450dcd6f42708cb", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 115, + "d": 0 + }, + "mne/connectivity/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/connectivity/effective.py": { + "a": 164, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 131, + "d": 54 + }, + "mne/connectivity/tests/test_effective.py": { + "a": 42, + "d": 0 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 12, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5542.json b/doc/sphinxext/prs/5542.json new file mode 100644 index 00000000000..8a42acfc65b --- /dev/null +++ b/doc/sphinxext/prs/5542.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3c1546a97243b7bbf6901bfbbab9bdcf3b70aaf9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 2 + }, + "mne/channels/data/layouts/Neuromag_122.lout": { + "a": 123, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 42, + "d": 4 + }, + "mne/viz/tests/test_topomap.py": { + "a": 31, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 18, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5543.json b/doc/sphinxext/prs/5543.json new file mode 100644 index 00000000000..6b03d465e7a --- /dev/null +++ b/doc/sphinxext/prs/5543.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b73ea28300a2d0a1dcfb518a5de904b411d743ff", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5545.json b/doc/sphinxext/prs/5545.json new file mode 100644 index 00000000000..b7eeca3f8c3 --- /dev/null +++ b/doc/sphinxext/prs/5545.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b147f4310252040e3ad44de4e2f30c4116fdde37", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5547.json b/doc/sphinxext/prs/5547.json new file mode 100644 index 00000000000..aa7e35aec24 --- /dev/null +++ b/doc/sphinxext/prs/5547.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "473b24fbb71ab946ed19268a0f10e4b51e860629", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5549.json b/doc/sphinxext/prs/5549.json new file mode 100644 index 00000000000..f2dd8e4165b --- /dev/null +++ b/doc/sphinxext/prs/5549.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "75c29da8e1e3857726156a6caea8fd727140a919", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/555.json b/doc/sphinxext/prs/555.json new file mode 100644 index 00000000000..c85d686ddb3 --- /dev/null +++ b/doc/sphinxext/prs/555.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "76c3a6df023b43fd4dfbc8fd8d411087c8473564", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5554.json b/doc/sphinxext/prs/5554.json new file mode 100644 index 00000000000..f1ad2302619 --- /dev/null +++ b/doc/sphinxext/prs/5554.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b619f1cd1f0860155b4d9ada2376b664fbce37b4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5557.json b/doc/sphinxext/prs/5557.json new file mode 100644 index 00000000000..e98015ea1a0 --- /dev/null +++ b/doc/sphinxext/prs/5557.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "6fd6dbd3350ff62f4093205df76bfda351a0ef6c", + "authors": [ + { + "n": "Sheraz Khan", + "e": "sheraz.khan@pfizer.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_evoked_arrowmap.py": { + "a": 74, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 18, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 175, + "d": 0 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5558.json b/doc/sphinxext/prs/5558.json new file mode 100644 index 00000000000..88813b8e9f0 --- /dev/null +++ b/doc/sphinxext/prs/5558.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fef6f063d0d81c54400f2868d91bf9c01b3ea2b3", + "authors": [ + { + "n": "Kambiz Tavabi", + "e": "ktavabi@gmail.com" + } + ], + "changes": { + "bin/mne": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_flash_bem.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5559.json b/doc/sphinxext/prs/5559.json new file mode 100644 index 00000000000..67bb2f11c54 --- /dev/null +++ b/doc/sphinxext/prs/5559.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "bcabf319eab3dcd3902125da344d13eafd1669a6", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 4, + "d": 1 + }, + "mne/decoding/tests/test_base.py": { + "a": 12, + "d": 7 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 27, + "d": 17 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/556.json b/doc/sphinxext/prs/556.json new file mode 100644 index 00000000000..4d88a797557 --- /dev/null +++ b/doc/sphinxext/prs/556.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f3041f5cdd64502fd8575dca53f601ea9ac33ae1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 15, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_event.py": { + "a": 19, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5560.json b/doc/sphinxext/prs/5560.json new file mode 100644 index 00000000000..60e5f7cdcaf --- /dev/null +++ b/doc/sphinxext/prs/5560.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1d6c5104656ba772e7527241568c228ca8f887c2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/constants.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5562.json b/doc/sphinxext/prs/5562.json new file mode 100644 index 00000000000..6879cff5e42 --- /dev/null +++ b/doc/sphinxext/prs/5562.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "04549bc89e7cb3d177ae523ef69f42208481545e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 71, + "d": 35 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 51, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5564.json b/doc/sphinxext/prs/5564.json new file mode 100644 index 00000000000..83c0ed37e11 --- /dev/null +++ b/doc/sphinxext/prs/5564.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a87ce1c5a041bc3c4aad98dbb1fce309ec963349", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 61, + "d": 13 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 40, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5566.json b/doc/sphinxext/prs/5566.json new file mode 100644 index 00000000000..ad47bbdcd76 --- /dev/null +++ b/doc/sphinxext/prs/5566.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d331cef343c116fe987ff9c22d9c0f2fc1c9d555", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 10, + "d": 0 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 43, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5569.json b/doc/sphinxext/prs/5569.json new file mode 100644 index 00000000000..75045ba0990 --- /dev/null +++ b/doc/sphinxext/prs/5569.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fd23ba8bfc1f11dd046abcfa89868f7dd8569d76", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 4, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5570.json b/doc/sphinxext/prs/5570.json new file mode 100644 index 00000000000..a1f2e313d12 --- /dev/null +++ b/doc/sphinxext/prs/5570.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f38ab2e529cb73f91dda31bdd7288b722f5c99b9", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/manual/c_reference.rst": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 9, + "d": 7 + }, + "mne/io/utils.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5572.json b/doc/sphinxext/prs/5572.json new file mode 100644 index 00000000000..886edea8f3e --- /dev/null +++ b/doc/sphinxext/prs/5572.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f02f043312e70e211a1bc9f194f7b2b4ac999a60", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5573.json b/doc/sphinxext/prs/5573.json new file mode 100644 index 00000000000..64e52b68736 --- /dev/null +++ b/doc/sphinxext/prs/5573.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f5e6965a89a85098bbd389bb6fc9c90e78e192d3", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/edf/tests/test_edf.py": { + "a": 16, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5574.json b/doc/sphinxext/prs/5574.json new file mode 100644 index 00000000000..106bf6f8c55 --- /dev/null +++ b/doc/sphinxext/prs/5574.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0ddc0e4175f99105b025d879cf832cf29f25c268", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 6, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 155, + "d": 77 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 68, + "d": 4 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5575.json b/doc/sphinxext/prs/5575.json new file mode 100644 index 00000000000..38cd690522f --- /dev/null +++ b/doc/sphinxext/prs/5575.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6521476a878ac71e2ffd27a701f3fd88595fd823", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 2, + "d": 3 + }, + "mne/utils.py": { + "a": 13, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5576.json b/doc/sphinxext/prs/5576.json new file mode 100644 index 00000000000..c394b8abc6c --- /dev/null +++ b/doc/sphinxext/prs/5576.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "30d67493fb9231b3277d8470e006636e62084cc2", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/manual/c_reference.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 16, + "d": 16 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/558.json b/doc/sphinxext/prs/558.json new file mode 100644 index 00000000000..29f9fe8cd73 --- /dev/null +++ b/doc/sphinxext/prs/558.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0eed1ff1033b5614726a769a2ca0ed94569798ab", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 5, + "d": 1 + }, + "mne/connectivity/tests/test_effective.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5580.json b/doc/sphinxext/prs/5580.json new file mode 100644 index 00000000000..2cabac0b036 --- /dev/null +++ b/doc/sphinxext/prs/5580.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "462695d5fbfcce6eed853dff6025076c7f616353", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 29, + "d": 15 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 41, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5581.json b/doc/sphinxext/prs/5581.json new file mode 100644 index 00000000000..a51fc62dfdd --- /dev/null +++ b/doc/sphinxext/prs/5581.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "46f3fa12d06827d975ba925f183637ad6916eaa8", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 0, + "d": 4 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 5 + }, + "mne/cov.py": { + "a": 0, + "d": 8 + }, + "mne/forward/_make_forward.py": { + "a": 0, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 0, + "d": 8 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 20, + "d": 14 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 6 + }, + "mne/io/pick.py": { + "a": 9, + "d": 15 + }, + "mne/io/tests/test_meas_info.py": { + "a": 5, + "d": 8 + }, + "mne/io/tests/test_pick.py": { + "a": 5, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 11 + }, + "mne/preprocessing/ica.py": { + "a": 12, + "d": 28 + }, + "mne/viz/tests/test_topomap.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5582.json b/doc/sphinxext/prs/5582.json new file mode 100644 index 00000000000..90590f3d538 --- /dev/null +++ b/doc/sphinxext/prs/5582.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0db3c5776de04ec2f926295f4a191010efe49c39", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 8, + "d": 8 + }, + "mne/viz/utils.py": { + "a": 19, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5583.json b/doc/sphinxext/prs/5583.json new file mode 100644 index 00000000000..fb82bf5f043 --- /dev/null +++ b/doc/sphinxext/prs/5583.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "28ce1c39503e86f343ac3d4520881019461b2ef6", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 29, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5585.json b/doc/sphinxext/prs/5585.json new file mode 100644 index 00000000000..603b4c1012d --- /dev/null +++ b/doc/sphinxext/prs/5585.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "81a69cc637d7a4aa68a766a1514af14eb0ffa956", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5586.json b/doc/sphinxext/prs/5586.json new file mode 100644 index 00000000000..8fbb988bea5 --- /dev/null +++ b/doc/sphinxext/prs/5586.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a10feadcbacc350c23a79e8c14fa7eec2ebda282", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 28, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5587.json b/doc/sphinxext/prs/5587.json new file mode 100644 index 00000000000..ca76f45f701 --- /dev/null +++ b/doc/sphinxext/prs/5587.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "672d66b9f3121ef3ba7f7aba1c6050094c56904c", + "authors": [ + { + "n": "Lorenz Esch", + "e": null + } + ], + "changes": { + "mne/data/coil_def.dat": { + "a": 104, + "d": 91 + }, + "mne/io/constants.py": { + "a": 8, + "d": 4 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5588.json b/doc/sphinxext/prs/5588.json new file mode 100644 index 00000000000..88538d6fd42 --- /dev/null +++ b/doc/sphinxext/prs/5588.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e64802326c9ef0b048b785adcee4f4bd26ef268d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 2 + }, + "mne/channels/interpolation.py": { + "a": 7, + "d": 2 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_field_interpolation.py": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5589.json b/doc/sphinxext/prs/5589.json new file mode 100644 index 00000000000..5fee330d1a9 --- /dev/null +++ b/doc/sphinxext/prs/5589.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "17e1c2906c64e13e1f60213b5422ef7c103eae7c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 7, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 4, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 110, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 11, + "d": 11 + }, + "mne/beamformer/_lcmv.py": { + "a": 22, + "d": 16 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 19, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 24, + "d": 3 + }, + "mne/utils.py": { + "a": 2, + "d": 0 + }, + "tutorials/plot_dics.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5590.json b/doc/sphinxext/prs/5590.json new file mode 100644 index 00000000000..d42b96b5a81 --- /dev/null +++ b/doc/sphinxext/prs/5590.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6cc5c42657e33ca165bbf9e009b3d9cac57d264e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/morph.py": { + "a": 16, + "d": 7 + }, + "mne/tests/test_morph.py": { + "a": 39, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5591.json b/doc/sphinxext/prs/5591.json new file mode 100644 index 00000000000..fed9c1afb46 --- /dev/null +++ b/doc/sphinxext/prs/5591.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b5d4b6b455eeb3da5e2045696607aa5a47e7647b", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 83, + "d": 20 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 28, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5595.json b/doc/sphinxext/prs/5595.json new file mode 100644 index 00000000000..edfc55eedaf --- /dev/null +++ b/doc/sphinxext/prs/5595.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "168121c49f763187c41f2766c1cd2a1317f30ad3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 26, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 46, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5596.json b/doc/sphinxext/prs/5596.json new file mode 100644 index 00000000000..739d2c3217d --- /dev/null +++ b/doc/sphinxext/prs/5596.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3703da76ac4ba3fb40fa2f248c407ac345046a64", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 12, + "d": 17 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 10, + "d": 1 + }, + "mne/utils.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5598.json b/doc/sphinxext/prs/5598.json new file mode 100644 index 00000000000..63db1b2122b --- /dev/null +++ b/doc/sphinxext/prs/5598.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f942ab14bbebe13206d76e02c4a5d6232084b36f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/tests/test_constants.py": { + "a": 108, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/56.json b/doc/sphinxext/prs/56.json new file mode 100644 index 00000000000..8c791b8218a --- /dev/null +++ b/doc/sphinxext/prs/56.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c08bd5f6a40c7b2bc80571659c974a594c000178", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 17, + "d": 9 + }, + "mne/filter.py": { + "a": 69, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5601.json b/doc/sphinxext/prs/5601.json new file mode 100644 index 00000000000..e882c316947 --- /dev/null +++ b/doc/sphinxext/prs/5601.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "349636572155eced005e2105962467eabaa63261", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_opm_data.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 10, + "d": 7 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 18, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 2 + }, + "tutorials/plot_introduction.py": { + "a": 50, + "d": 54 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5603.json b/doc/sphinxext/prs/5603.json new file mode 100644 index 00000000000..a77ff5bd13e --- /dev/null +++ b/doc/sphinxext/prs/5603.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f3a2a9085fc5a34060d8087f3dc74d71b80f47fe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5606.json b/doc/sphinxext/prs/5606.json new file mode 100644 index 00000000000..8f41ddf8055 --- /dev/null +++ b/doc/sphinxext/prs/5606.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8521fc3cdec5bfb1eb0ff20dd748cb63c4b87b6d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 35, + "d": 12 + }, + "mne/forward/_compute_forward.py": { + "a": 9, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 16, + "d": 6 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5608.json b/doc/sphinxext/prs/5608.json new file mode 100644 index 00000000000..e197f5d8820 --- /dev/null +++ b/doc/sphinxext/prs/5608.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "df832f4b9b9ef41d30bf269bb056650e7566b080", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/glossary.rst": { + "a": 3, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 33, + "d": 12 + }, + "mne/tests/test_annotations.py": { + "a": 15, + "d": 0 + }, + "tutorials/plot_object_annotations.py": { + "a": 170, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5609.json b/doc/sphinxext/prs/5609.json new file mode 100644 index 00000000000..3eb0ea7d4fe --- /dev/null +++ b/doc/sphinxext/prs/5609.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "935866b466da206569cc508c65c003de9a36c65c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 18, + "d": 14 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 13, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/561.json b/doc/sphinxext/prs/561.json new file mode 100644 index 00000000000..d64cd3a8cb2 --- /dev/null +++ b/doc/sphinxext/prs/561.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "999509cb812349be69b1500baa9a716a3bc42ff4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 5, + "d": 0 + }, + "dictionary.txt": { + "a": 4264, + "d": 0 + }, + "doc/source/manual/AppB.rst": { + "a": 1, + "d": 1 + }, + "doc/source/manual/AppReleaseNotes.rst": { + "a": 1, + "d": 1 + }, + "doc/source/manual/analyze.rst": { + "a": 1, + "d": 1 + }, + "doc/source/manual/convert.rst": { + "a": 3, + "d": 3 + }, + "doc/source/manual/forward.rst": { + "a": 2, + "d": 2 + }, + "doc/source/manual/mne.rst": { + "a": 8, + "d": 8 + }, + "doc/source/python_tutorial.rst": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 1, + "d": 1 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "mne/baseline.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/bti/raw.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/compensator.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 3, + "d": 3 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/peak_finder.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5610.json b/doc/sphinxext/prs/5610.json new file mode 100644 index 00000000000..3504348882e --- /dev/null +++ b/doc/sphinxext/prs/5610.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "66891112791f9441f9e670401c30e109b9c51615", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 16, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5613.json b/doc/sphinxext/prs/5613.json new file mode 100644 index 00000000000..6c9dee96067 --- /dev/null +++ b/doc/sphinxext/prs/5613.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9df12aa57dce2a75ba619c90abc6a6547dde9997", + "authors": [ + { + "n": "Tom Dupré la Tour", + "e": null + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/562.json b/doc/sphinxext/prs/562.json new file mode 100644 index 00000000000..62f6ccc7d60 --- /dev/null +++ b/doc/sphinxext/prs/562.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "dc4cb72e6ad8a37b9910fb7fd9ec320861241d0d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 34, + "d": 30 + }, + "mne/fiff/evoked.py": { + "a": 12, + "d": 12 + }, + "mne/fiff/raw.py": { + "a": 10, + "d": 12 + }, + "mne/fiff/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 60, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 13, + "d": 1 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5620.json b/doc/sphinxext/prs/5620.json new file mode 100644 index 00000000000..beb05cff8da --- /dev/null +++ b/doc/sphinxext/prs/5620.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cc3f02e4fa1b0fc827348374462e656f8169d4d5", + "authors": [ + { + "n": "jeythekey", + "e": "44215387+jeythekey@users.noreply.github.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 116, + "d": 57 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5622.json b/doc/sphinxext/prs/5622.json new file mode 100644 index 00000000000..d8614138c5f --- /dev/null +++ b/doc/sphinxext/prs/5622.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "517d3815a4ff519ecf9d09b9d714e87fa0ae92a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 2, + "d": 3 + }, + "mne/data/coil_def_Elekta.dat": { + "a": 2, + "d": 6 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 15 + }, + "mne/io/constants.py": { + "a": 0, + "d": 2 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 7 + }, + "mne/preprocessing/maxwell.py": { + "a": 15, + "d": 8 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 53, + "d": 35 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5625.json b/doc/sphinxext/prs/5625.json new file mode 100644 index 00000000000..e50bd03034e --- /dev/null +++ b/doc/sphinxext/prs/5625.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f4096adaf2646da32e6d2f5536e74c14b312ad5f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 35, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5626.json b/doc/sphinxext/prs/5626.json new file mode 100644 index 00000000000..56f81e19c48 --- /dev/null +++ b/doc/sphinxext/prs/5626.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6ec77282ce7e1d5b74d96eb44daa6e1287eadd09", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 11, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5628.json b/doc/sphinxext/prs/5628.json new file mode 100644 index 00000000000..615ddddd903 --- /dev/null +++ b/doc/sphinxext/prs/5628.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "46585fb139a15d024941eb24273f7a0ab097515c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 2, + "d": 3 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 4, + "d": 3 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5633.json b/doc/sphinxext/prs/5633.json new file mode 100644 index 00000000000..9b95a581f1f --- /dev/null +++ b/doc/sphinxext/prs/5633.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "628b9c8ee2ae8d2912ed9a283ccb149d98084afc", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/base.py": { + "a": 37, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 26, + "d": 13 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 15, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 16, + "d": 7 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 15, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 55, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 16, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 36, + "d": 2 + }, + "mne/io/tests/test_utils.py": { + "a": 19, + "d": 0 + }, + "mne/io/utils.py": { + "a": 48, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5634.json b/doc/sphinxext/prs/5634.json new file mode 100644 index 00000000000..5a4b85588c8 --- /dev/null +++ b/doc/sphinxext/prs/5634.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4c670aae28eb83ed1feb67525eb2b67f81b3b1f7", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 3, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 11 + }, + "mne/viz/tests/test_evoked.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5638.json b/doc/sphinxext/prs/5638.json new file mode 100644 index 00000000000..aa0ebd79b7e --- /dev/null +++ b/doc/sphinxext/prs/5638.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2636aa90eb364a41931bfd31b34a03e9fc042b0e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/install_mne_c.rst": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/564.json b/doc/sphinxext/prs/564.json new file mode 100644 index 00000000000..36d74243210 --- /dev/null +++ b/doc/sphinxext/prs/564.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "52a2ce106e89446e4b693e142e2b8fc365750839", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/datasets/megsim/megsim.py": { + "a": 2, + "d": 5 + }, + "mne/datasets/sample/sample.py": { + "a": 5, + "d": 6 + }, + "mne/utils.py": { + "a": 178, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5641.json b/doc/sphinxext/prs/5641.json new file mode 100644 index 00000000000..97b470b5860 --- /dev/null +++ b/doc/sphinxext/prs/5641.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7fef2d5eb698b7a2654f1d8535778f294eff815c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "examples/io/plot_read_proj.py": { + "a": 91, + "d": 0 + }, + "mne/viz/tests/test_utils.py": { + "a": 9, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 52, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 26, + "d": 9 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 6, + "d": 0 + }, + "tutorials/plot_visualize_raw.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5647.json b/doc/sphinxext/prs/5647.json new file mode 100644 index 00000000000..b56196b8b59 --- /dev/null +++ b/doc/sphinxext/prs/5647.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "26cfc8fc85ec44021e251da40e7ac94116c0d982", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5648.json b/doc/sphinxext/prs/5648.json new file mode 100644 index 00000000000..050cb063c39 --- /dev/null +++ b/doc/sphinxext/prs/5648.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ea3549486711c50b2cc4b7b3b7fc1512f193bcc9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install_mne_python.rst": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5649.json b/doc/sphinxext/prs/5649.json new file mode 100644 index 00000000000..577829044ee --- /dev/null +++ b/doc/sphinxext/prs/5649.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "af89a09629689d1aea2aa64fc9d44c6195cc8561", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 3 + }, + "doc/tutorial_links.inc": { + "a": 1, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 30, + "d": 31 + }, + "mne/viz/topomap.py": { + "a": 12, + "d": 7 + }, + "tutorials/plot_background_statistics.py": { + "a": 28, + "d": 19 + }, + "tutorials/plot_stats_cluster_spatio_temporal.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_stats_cluster_time_frequency.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/565.json b/doc/sphinxext/prs/565.json new file mode 100644 index 00000000000..f8684c8c14e --- /dev/null +++ b/doc/sphinxext/prs/565.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "980acf98ced0514224ef4a9e245634fa2c9ac3b5", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/forward.py": { + "a": 3, + "d": 8 + }, + "mne/source_space.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_forward.py": { + "a": 33, + "d": 32 + }, + "mne/tests/test_label.py": { + "a": 6, + "d": 8 + }, + "mne/utils.py": { + "a": 21, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5651.json b/doc/sphinxext/prs/5651.json new file mode 100644 index 00000000000..7ed7811956c --- /dev/null +++ b/doc/sphinxext/prs/5651.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6f02d38ff83c0e078707d554054886b513ebb11e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 18, + "d": 4 + }, + "mne/filter.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 20, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5652.json b/doc/sphinxext/prs/5652.json new file mode 100644 index 00000000000..21980f44a12 --- /dev/null +++ b/doc/sphinxext/prs/5652.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf7eba6bf372b6b5878d59aa0a241edd28a6909d", + "authors": [ + { + "n": "Johan van der Meer", + "e": null + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5656.json b/doc/sphinxext/prs/5656.json new file mode 100644 index 00000000000..c8376b8fca1 --- /dev/null +++ b/doc/sphinxext/prs/5656.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b9020db166f08093bd24215c7b6a83ed030d1402", + "authors": [ + { + "n": "Antoine Gauthier", + "e": "antoine.gauthier@ensta.fr" + } + ], + "changes": { + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5658.json b/doc/sphinxext/prs/5658.json new file mode 100644 index 00000000000..13a1bb58a7f --- /dev/null +++ b/doc/sphinxext/prs/5658.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ce8df3b1a4b2c168e4dd2acddd9b43b8a2d1fb3e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 6 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_background_statistics.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5659.json b/doc/sphinxext/prs/5659.json new file mode 100644 index 00000000000..550e61297fd --- /dev/null +++ b/doc/sphinxext/prs/5659.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e6ce2be09f57042e895d4affe86c62124a15f140", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 5, + "d": 1 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_import_nesting.py": { + "a": 38, + "d": 2 + }, + "mne/tests/test_report.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/566.json b/doc/sphinxext/prs/566.json new file mode 100644 index 00000000000..71cf5ade379 --- /dev/null +++ b/doc/sphinxext/prs/566.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bd0b01222875ad0c9b34562f25862a145bc3abe8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5663.json b/doc/sphinxext/prs/5663.json new file mode 100644 index 00000000000..343c9e1a913 --- /dev/null +++ b/doc/sphinxext/prs/5663.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a77e2736f48551702ffbb7c79a50a728cfb30d22", + "authors": [ + { + "n": "Antoine Gauthier", + "e": "antoine.gauthier@ensta.fr" + } + ], + "changes": { + "mne/commands/mne_browse_raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5664.json b/doc/sphinxext/prs/5664.json new file mode 100644 index 00000000000..9b030e8ff27 --- /dev/null +++ b/doc/sphinxext/prs/5664.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "922d00a893cadbceae932b9c66ce952a2b890661", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 29, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5666.json b/doc/sphinxext/prs/5666.json new file mode 100644 index 00000000000..b2e91d20404 --- /dev/null +++ b/doc/sphinxext/prs/5666.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cd54354f4fc89fce84936ff9b5301d0a0d924d01", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 7, + "d": 2 + }, + "mne/gui/_viewer.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 15, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5667.json b/doc/sphinxext/prs/5667.json new file mode 100644 index 00000000000..e4c32ab1eb8 --- /dev/null +++ b/doc/sphinxext/prs/5667.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9eb86b4497125caf28fc1cf6c034ee47ccf3345b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install_mne_c.rst": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5672.json b/doc/sphinxext/prs/5672.json new file mode 100644 index 00000000000..8dc59e9ee7f --- /dev/null +++ b/doc/sphinxext/prs/5672.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e38a5737915032118a11c91abbeb62cdb99c7dd6", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 6 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5676.json b/doc/sphinxext/prs/5676.json new file mode 100644 index 00000000000..4279acde07c --- /dev/null +++ b/doc/sphinxext/prs/5676.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c47f79914bc98fb042f53823b4f56e9cfe2310cf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "ignore_words.txt": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_montage.py": { + "a": 14, + "d": 6 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5677.json b/doc/sphinxext/prs/5677.json new file mode 100644 index 00000000000..7d17319e3f9 --- /dev/null +++ b/doc/sphinxext/prs/5677.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "41e308cde25bf3371e179907d117e1b7f072fd2f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5679.json b/doc/sphinxext/prs/5679.json new file mode 100644 index 00000000000..206300cf2c6 --- /dev/null +++ b/doc/sphinxext/prs/5679.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b71585bac18f613ea20adf41b5ac98a09a58d98a", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 0 + }, + "mne/utils.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/568.json b/doc/sphinxext/prs/568.json new file mode 100644 index 00000000000..49711f370a1 --- /dev/null +++ b/doc/sphinxext/prs/568.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "425fcf959ede4b61c60cc52bd644dda348cf779e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/open.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5681.json b/doc/sphinxext/prs/5681.json new file mode 100644 index 00000000000..4ef38603d61 --- /dev/null +++ b/doc/sphinxext/prs/5681.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "944ab2906440e248bae9670807f68a2bdfdcc98b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_opm_data.py": { + "a": 2, + "d": 2 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 2 + }, + "mne/data/helmets/ARTEMIS123.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/surface.py": { + "a": 40, + "d": 8 + }, + "mne/tests/test_surface.py": { + "a": 20, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 11 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5683.json b/doc/sphinxext/prs/5683.json new file mode 100644 index 00000000000..9cfa0e9b026 --- /dev/null +++ b/doc/sphinxext/prs/5683.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "daa29715cf3e4447eb3267db5a6be28aebfeb890", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_interpolation.py": { + "a": 10, + "d": 11 + }, + "mne/cov.py": { + "a": 1, + "d": 2 + }, + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 27, + "d": 20 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 13, + "d": 10 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/open.py": { + "a": 3, + "d": 3 + }, + "mne/io/pick.py": { + "a": 5, + "d": 0 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 2, + "d": 0 + }, + "mne/surface.py": { + "a": 27, + "d": 33 + }, + "mne/tests/test_coreg.py": { + "a": 12, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 4, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 67, + "d": 64 + }, + "mne/viz/tests/test_topo.py": { + "a": 8, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5687.json b/doc/sphinxext/prs/5687.json new file mode 100644 index 00000000000..6e0901c366b --- /dev/null +++ b/doc/sphinxext/prs/5687.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d28e8b5f0eb046c17acf82b8216b0d7c40189684", + "authors": [ + { + "n": "buildqa", + "e": "s1152yb68@contbay.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 3, + "d": 0 + }, + "doc/install_mne_c.rst": { + "a": 51, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5689.json b/doc/sphinxext/prs/5689.json new file mode 100644 index 00000000000..04286d6d2ce --- /dev/null +++ b/doc/sphinxext/prs/5689.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8185632046476d0ed14155b78620c922176068eb", + "authors": [ + { + "n": "Marcin Koculak", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5691.json b/doc/sphinxext/prs/5691.json new file mode 100644 index 00000000000..2c87594c025 --- /dev/null +++ b/doc/sphinxext/prs/5691.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0f9eb55a951ff431f785b984fb8b58893ae1ebfb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/matlab.rst": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 30, + "d": 1 + }, + "mne/io/proj.py": { + "a": 3, + "d": 3 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_constants.py": { + "a": 17, + "d": 3 + }, + "mne/preprocessing/maxwell.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5692.json b/doc/sphinxext/prs/5692.json new file mode 100644 index 00000000000..cdf45344295 --- /dev/null +++ b/doc/sphinxext/prs/5692.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7ca50604d73f92ad7cfc6ea20b9b20df3e73efff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_opm_rest_data.py": { + "a": 214, + "d": 0 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5694.json b/doc/sphinxext/prs/5694.json new file mode 100644 index 00000000000..6ae2a8bb607 --- /dev/null +++ b/doc/sphinxext/prs/5694.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f0ef23913f127781d5800e22a6ed8f947d7a6e74", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5695.json b/doc/sphinxext/prs/5695.json new file mode 100644 index 00000000000..4076dc43fa5 --- /dev/null +++ b/doc/sphinxext/prs/5695.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "ae38e1b7927c8a2e4e3f46025965f0cab2e0b4ff", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 0, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 7, + "d": 3 + }, + "mne/annotations.py": { + "a": 53, + "d": 7 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 4 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 3, + "d": 4 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 26 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/eeglab/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 20, + "d": 8 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 14, + "d": 6 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5696.json b/doc/sphinxext/prs/5696.json new file mode 100644 index 00000000000..188234db916 --- /dev/null +++ b/doc/sphinxext/prs/5696.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "49f6131955676e8aec5c9b17c328483a16564276", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 1, + "d": 22 + }, + "mne/viz/tests/test_utils.py": { + "a": 19, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 46, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5697.json b/doc/sphinxext/prs/5697.json new file mode 100644 index 00000000000..ca9812c4525 --- /dev/null +++ b/doc/sphinxext/prs/5697.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a7acc27894df173c295e65eb98ecdb4d670eaa67", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "environment.yml": { + "a": 0, + "d": 1 + }, + "requirements.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5698.json b/doc/sphinxext/prs/5698.json new file mode 100644 index 00000000000..b6cb3ea961f --- /dev/null +++ b/doc/sphinxext/prs/5698.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8886f42e8a28d4987849802bc568003c0f44b9e1", + "authors": [ + { + "n": "David Sabbagh", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 0 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 3, + "d": 3 + }, + "mne/cov.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5699.json b/doc/sphinxext/prs/5699.json new file mode 100644 index 00000000000..e058a48113c --- /dev/null +++ b/doc/sphinxext/prs/5699.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f2be3438a8f2ea8a2622ec693c7556354485de5a", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 125, + "d": 24 + }, + "mne/tests/test_annotations.py": { + "a": 124, + "d": 1 + }, + "tutorials/plot_object_annotations.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/57.json b/doc/sphinxext/prs/57.json new file mode 100644 index 00000000000..fc8b52fd015 --- /dev/null +++ b/doc/sphinxext/prs/57.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2bca78be7c9834179f1faccd97752fca9fe86344", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/570.json b/doc/sphinxext/prs/570.json new file mode 100644 index 00000000000..7308f1e3775 --- /dev/null +++ b/doc/sphinxext/prs/570.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "01cbb6f4113bb069a0e1c61275af8438fef802b9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5700.json b/doc/sphinxext/prs/5700.json new file mode 100644 index 00000000000..4e405213a02 --- /dev/null +++ b/doc/sphinxext/prs/5700.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "0483a394f73db39b6ac28bed093aa5b5ec6c38ce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/mtrf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 14, + "d": 6 + }, + "mne/datasets/utils.py": { + "a": 42, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5701.json b/doc/sphinxext/prs/5701.json new file mode 100644 index 00000000000..3da7008bfe9 --- /dev/null +++ b/doc/sphinxext/prs/5701.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "32710eef50fae631c8514bc2dcd3c4fac3f8a074", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5702.json b/doc/sphinxext/prs/5702.json new file mode 100644 index 00000000000..b2994461146 --- /dev/null +++ b/doc/sphinxext/prs/5702.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "90ebb365e77e6d34c9b2b92db2ac177f89ee2dc1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 21, + "d": 15 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5705.json b/doc/sphinxext/prs/5705.json new file mode 100644 index 00000000000..775f3c4d1c4 --- /dev/null +++ b/doc/sphinxext/prs/5705.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b5dcc7a89d87cca33572478e73c8dc445c47e7f0", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "tutorials/plot_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5707.json b/doc/sphinxext/prs/5707.json new file mode 100644 index 00000000000..6fbb5ca2524 --- /dev/null +++ b/doc/sphinxext/prs/5707.json @@ -0,0 +1,207 @@ +{ + "merge_commit_sha": "908ef204f4110c1b003089d60fd259fa5faaf951", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 1 + }, + ".mailmap": { + "a": 3, + "d": 0 + }, + "doc/Makefile": { + "a": 2, + "d": 8 + }, + "doc/carousel.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 67, + "d": 14 + }, + "doc/contributing.rst": { + "a": 8, + "d": 3 + }, + "doc/documentation.rst": { + "a": 0, + "d": 2 + }, + "doc/manual/channel_interpolation.rst": { + "a": 1, + "d": 5 + }, + "doc/manual/decoding.rst": { + "a": 0, + "d": 200 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 16, + "d": 2 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 9, + "d": 10 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 2, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_space.py": { + "a": 0, + "d": 113 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 13, + "d": 10 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 2, + "d": 1 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 2, + "d": 1 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 7, + "d": 11 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 7, + "d": 2 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 31, + "d": 39 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_otp.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 14, + "d": 14 + }, + "examples/visualization/plot_xhemi.py": { + "a": 2, + "d": 1 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/connectivity/effective.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 9, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 7, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 2 + }, + "mne/io/proj.py": { + "a": 5, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 3 + }, + "mne/utils.py": { + "a": 13, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 9, + "d": 9 + }, + "setup.cfg": { + "a": 1, + "d": 0 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_morph_stc.py": { + "a": 0, + "d": 12 + }, + "tutorials/plot_receptive_field.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 313, + "d": 46 + }, + "tutorials/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_visualize_stc.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5710.json b/doc/sphinxext/prs/5710.json new file mode 100644 index 00000000000..ee2598437c5 --- /dev/null +++ b/doc/sphinxext/prs/5710.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a399a006b90daaf698bb5ca977dad5e703e5a54", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "tutorials/plot_object_annotations.py": { + "a": 44, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5712.json b/doc/sphinxext/prs/5712.json new file mode 100644 index 00000000000..c2d0b8a7720 --- /dev/null +++ b/doc/sphinxext/prs/5712.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "b064e1db4b4aa5aeff09b1853006c58ebdc2ef06", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 29, + "d": 13 + }, + "mne/annotations.py": { + "a": 4, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 87, + "d": 49 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 51, + "d": 43 + }, + "mne/io/edf/edf.py": { + "a": 31, + "d": 10 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 13, + "d": 5 + }, + "mne/io/eeglab/eeglab.py": { + "a": 85, + "d": 51 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 26, + "d": 18 + }, + "mne/io/utils.py": { + "a": 11, + "d": 0 + }, + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5713.json b/doc/sphinxext/prs/5713.json new file mode 100644 index 00000000000..b9704f7eed1 --- /dev/null +++ b/doc/sphinxext/prs/5713.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "92ba16db1db88831a9a9ce618592943d706bad8a", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 44, + "d": 15 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5714.json b/doc/sphinxext/prs/5714.json new file mode 100644 index 00000000000..bd45f38980e --- /dev/null +++ b/doc/sphinxext/prs/5714.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9db64d818d48d91da5ec1d790822c72326fcf5e7", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 18, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5715.json b/doc/sphinxext/prs/5715.json new file mode 100644 index 00000000000..69dd5e7509b --- /dev/null +++ b/doc/sphinxext/prs/5715.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a6762914cc158cbac298f2a9f257cbb20320918a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.cfg": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5716.json b/doc/sphinxext/prs/5716.json new file mode 100644 index 00000000000..859d8ba4a76 --- /dev/null +++ b/doc/sphinxext/prs/5716.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "56511470c630305ef84895ee6e80043dbecfe8e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 3, + "d": 2 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 4, + "d": 2 + }, + "mne/utils.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 25, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5717.json b/doc/sphinxext/prs/5717.json new file mode 100644 index 00000000000..3b1677acde8 --- /dev/null +++ b/doc/sphinxext/prs/5717.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "57272be1d342e1058670ebac0e3ab292deb56767", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5718.json b/doc/sphinxext/prs/5718.json new file mode 100644 index 00000000000..61837c2a7a3 --- /dev/null +++ b/doc/sphinxext/prs/5718.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "b2de2e9a192301e4dfb106cef3963d1283d74024", + "authors": [ + { + "n": "Stanislas Chambon", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 0 + }, + ".gitignore": { + "a": 1, + "d": 0 + }, + ".travis.yml": { + "a": 3, + "d": 3 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 19, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 7, + "d": 0 + }, + "environment.yml": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 3, + "d": 5 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 201, + "d": 0 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 110, + "d": 0 + }, + "mne/datasets/sleep_physionet/age_records.csv": { + "a": 79, + "d": 0 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 107, + "d": 0 + }, + "mne/datasets/sleep_physionet/temazepam_records.csv": { + "a": 23, + "d": 0 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 161, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_utils.py": { + "a": 29, + "d": 4 + }, + "mne/utils.py": { + "a": 38, + "d": 6 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tutorials/plot_sleep.py": { + "a": 306, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/572.json b/doc/sphinxext/prs/572.json new file mode 100644 index 00000000000..43dc921df23 --- /dev/null +++ b/doc/sphinxext/prs/572.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8c8867a55faa43ed68b4a0408f17bd9a1d5cc857", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 87, + "d": 39 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5723.json b/doc/sphinxext/prs/5723.json new file mode 100644 index 00000000000..ce2981cbb4a --- /dev/null +++ b/doc/sphinxext/prs/5723.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5b1ac27c54b280bda065f7741953e829b0f03305", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + "doc/_templates/navbar.html": { + "a": 2, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5724.json b/doc/sphinxext/prs/5724.json new file mode 100644 index 00000000000..91af75269a7 --- /dev/null +++ b/doc/sphinxext/prs/5724.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c32f1363ed76be5cd96994c620a4853f52833194", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 4, + "d": 1 + }, + "README.rst": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5725.json b/doc/sphinxext/prs/5725.json new file mode 100644 index 00000000000..b0d7c0679e2 --- /dev/null +++ b/doc/sphinxext/prs/5725.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "dcc8ccf51e4fe024668d6f5c715b50b67646076b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/datasets/spm_faces_dataset.py": { + "a": 9, + "d": 3 + }, + "examples/decoding/decoding_rsa.py": { + "a": 5, + "d": 2 + }, + "mne/data/helmets/CTF_275.fif.gz": { + "a": 0, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5726.json b/doc/sphinxext/prs/5726.json new file mode 100644 index 00000000000..35a3de058b2 --- /dev/null +++ b/doc/sphinxext/prs/5726.json @@ -0,0 +1,531 @@ +{ + "merge_commit_sha": "d7c8e865fbfe2dc8854956a446005b05a82300c6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".travis.yml": { + "a": 3, + "d": 3 + }, + "README.rst": { + "a": 6, + "d": 6 + }, + "doc/configure_git.rst": { + "a": 2, + "d": 1 + }, + "doc/install_mne_python.rst": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 0, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 10, + "d": 11 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 3 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 2 + }, + "mne/bem.py": { + "a": 12, + "d": 13 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 6 + }, + "mne/channels/layout.py": { + "a": 0, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 8 + }, + "mne/chpi.py": { + "a": 1, + "d": 2 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 0, + "d": 2 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 1, + "d": 4 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 1, + "d": 4 + }, + "mne/commands/mne_flash_bem.py": { + "a": 0, + "d": 2 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 0, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 0, + "d": 2 + }, + "mne/commands/mne_surf2bem.py": { + "a": 0, + "d": 1 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 0, + "d": 1 + }, + "mne/connectivity/effective.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 3, + "d": 5 + }, + "mne/cov.py": { + "a": 6, + "d": 8 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/mixin.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 2 + }, + "mne/dipole.py": { + "a": 3, + "d": 4 + }, + "mne/epochs.py": { + "a": 11, + "d": 13 + }, + "mne/event.py": { + "a": 1, + "d": 3 + }, + "mne/evoked.py": { + "a": 2, + "d": 4 + }, + "mne/externals/__init__.py": { + "a": 0, + "d": 6 + }, + "mne/externals/six.py": { + "a": 0, + "d": 577 + }, + "mne/externals/tempita/__init__.py": { + "a": 8, + "d": 8 + }, + "mne/filter.py": { + "a": 9, + "d": 10 + }, + "mne/fixes.py": { + "a": 18, + "d": 579 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 5 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 4 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 0, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_fiducials_gui.py": { + "a": 0, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 2, + "d": 3 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 0, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 0, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 0, + "d": 2 + }, + "mne/io/base.py": { + "a": 9, + "d": 10 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 3 + }, + "mne/io/bti/bti.py": { + "a": 13, + "d": 13 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 7, + "d": 7 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 11, + "d": 6 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 8, + "d": 9 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 3, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 6, + "d": 7 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 14, + "d": 15 + }, + "mne/io/open.py": { + "a": 5, + "d": 6 + }, + "mne/io/pick.py": { + "a": 2, + "d": 3 + }, + "mne/io/proc_history.py": { + "a": 3, + "d": 5 + }, + "mne/io/proj.py": { + "a": 2, + "d": 3 + }, + "mne/io/tag.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 2 + }, + "mne/io/utils.py": { + "a": 2, + "d": 3 + }, + "mne/io/write.py": { + "a": 4, + "d": 5 + }, + "mne/label.py": { + "a": 14, + "d": 16 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 6 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/morph.py": { + "a": 3, + "d": 4 + }, + "mne/parallel.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 8 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/maxwell.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 6 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 3 + }, + "mne/realtime/client.py": { + "a": 1, + "d": 3 + }, + "mne/realtime/stim_server_client.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 5, + "d": 5 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 3 + }, + "mne/source_space.py": { + "a": 7, + "d": 8 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 2 + }, + "mne/stats/parametric.py": { + "a": 3, + "d": 4 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 2 + }, + "mne/surface.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_fixes.py": { + "a": 2, + "d": 17 + }, + "mne/tests/test_import_nesting.py": { + "a": 0, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_source_space.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 2 + }, + "mne/transforms.py": { + "a": 7, + "d": 8 + }, + "mne/utils.py": { + "a": 20, + "d": 18 + }, + "mne/viz/_3d.py": { + "a": 11, + "d": 12 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 4 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 8, + "d": 10 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 3 + }, + "mne/viz/misc.py": { + "a": 3, + "d": 6 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 0, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 7 + }, + "setup.cfg": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5727.json b/doc/sphinxext/prs/5727.json new file mode 100644 index 00000000000..1c5872e2a6c --- /dev/null +++ b/doc/sphinxext/prs/5727.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "531cb0afaa0313c51fef555a3bc377b0df11585c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_kit2fiff_gui.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5729.json b/doc/sphinxext/prs/5729.json new file mode 100644 index 00000000000..ac2fcc69559 --- /dev/null +++ b/doc/sphinxext/prs/5729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9220f501dae4ae5eef25ebeadea51be96df890c3", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "AUTHORS.rst": { + "a": 0, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/573.json b/doc/sphinxext/prs/573.json new file mode 100644 index 00000000000..173e75b2923 --- /dev/null +++ b/doc/sphinxext/prs/573.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "196f09fffd30c3b0748c49aae8599ccb319c26ab", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "setup.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5730.json b/doc/sphinxext/prs/5730.json new file mode 100644 index 00000000000..95d200c099f --- /dev/null +++ b/doc/sphinxext/prs/5730.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2a9a0261d1fc27f1972efed13ed7287e9906ac7b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_parcellation.py": { + "a": 17, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 36, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 40, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5733.json b/doc/sphinxext/prs/5733.json new file mode 100644 index 00000000000..466f7ca1453 --- /dev/null +++ b/doc/sphinxext/prs/5733.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "ac2157d7ff204cfc81771dafd195310ef1e10bd1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 5, + "d": 7 + }, + "mne/cov.py": { + "a": 165, + "d": 163 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/io/base.py": { + "a": 2, + "d": 22 + }, + "mne/io/brainvision/brainvision.py": { + "a": 18, + "d": 275 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 18, + "d": 234 + }, + "mne/io/eeglab/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 17, + "d": 313 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 16, + "d": 112 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 24 + }, + "mne/io/utils.py": { + "a": 3, + "d": 8 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 4, + "d": 5 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 11 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 0, + "d": 224 + }, + "mne/stats/parametric.py": { + "a": 1, + "d": 9 + }, + "mne/tests/test_cov.py": { + "a": 41, + "d": 40 + }, + "mne/tests/test_morph.py": { + "a": 5, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/574.json b/doc/sphinxext/prs/574.json new file mode 100644 index 00000000000..5e1f260a74a --- /dev/null +++ b/doc/sphinxext/prs/574.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f91a3e163c052ad2714037f223794ab6c24d1fd2", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 155, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5741.json b/doc/sphinxext/prs/5741.json new file mode 100644 index 00000000000..611103a6746 --- /dev/null +++ b/doc/sphinxext/prs/5741.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f85de59c9e052e6282c23aeb1f71fc2847ba84cd", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 167, + "d": 339 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 115, + "d": 259 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 10, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5742.json b/doc/sphinxext/prs/5742.json new file mode 100644 index 00000000000..060e5b6930f --- /dev/null +++ b/doc/sphinxext/prs/5742.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e4ed5fd91c93a69a7d34e9d304a2fc06494f9099", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/tutorials/philosophy.rst": { + "a": 11, + "d": 11 + }, + "tutorials/plot_creating_data_structures.py": { + "a": 12, + "d": 12 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_info.py": { + "a": 3, + "d": 5 + }, + "tutorials/plot_object_raw.py": { + "a": 19, + "d": 21 + }, + "tutorials/plot_point_spread.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5743.json b/doc/sphinxext/prs/5743.json new file mode 100644 index 00000000000..8f5832df5f5 --- /dev/null +++ b/doc/sphinxext/prs/5743.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7ddeded7213f0f23707382eb1455f761f4a50957", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/contributing.rst": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5747.json b/doc/sphinxext/prs/5747.json new file mode 100644 index 00000000000..5c9616bb8f5 --- /dev/null +++ b/doc/sphinxext/prs/5747.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "400f5389c8dbaafa97e0c8f0c66c4d38b7720a60", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/io/fiff/raw.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5748.json b/doc/sphinxext/prs/5748.json new file mode 100644 index 00000000000..b909a685fa4 --- /dev/null +++ b/doc/sphinxext/prs/5748.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3bd81a08f2ba4a837d5a078c5b666d15a0434fc1", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 34, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/575.json b/doc/sphinxext/prs/575.json new file mode 100644 index 00000000000..e0243ba06e9 --- /dev/null +++ b/doc/sphinxext/prs/575.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6bd182407fda06e7bfe24d76b46dfb4046243311", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/layouts/layout.py": { + "a": 26, + "d": 0 + }, + "mne/viz.py": { + "a": 19, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5752.json b/doc/sphinxext/prs/5752.json new file mode 100644 index 00000000000..e1d7383fd70 --- /dev/null +++ b/doc/sphinxext/prs/5752.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d5c52ec6d978be1c6c011884b7fa17134a5ae698", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5754.json b/doc/sphinxext/prs/5754.json new file mode 100644 index 00000000000..0e37b7d704c --- /dev/null +++ b/doc/sphinxext/prs/5754.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "ef1929f4f71d1cef78eab43e67a79a2ce8f7a736", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 70, + "d": 14 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 18, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 19, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 175, + "d": 43 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5756.json b/doc/sphinxext/prs/5756.json new file mode 100644 index 00000000000..74bbbbb06a0 --- /dev/null +++ b/doc/sphinxext/prs/5756.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "8dc6485155f3571325a9f1cde3299cd2288ecf8e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 4 + }, + "appveyor.yml": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 4, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 84, + "d": 72 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 28, + "d": 24 + }, + "mne/commands/mne_flash_bem.py": { + "a": 7, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 29, + "d": 12 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_import_nesting.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 2, + "d": 0 + }, + "requirements.txt": { + "a": 2, + "d": 0 + }, + "setup.cfg": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5762.json b/doc/sphinxext/prs/5762.json new file mode 100644 index 00000000000..3544306ccf7 --- /dev/null +++ b/doc/sphinxext/prs/5762.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "87c0563c86677be75fc23f3e372492a5c9b3ce9d", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 14, + "d": 11 + }, + "mne/epochs.py": { + "a": 2, + "d": 346 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 64, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 38, + "d": 29 + }, + "mne/utils.py": { + "a": 354, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5765.json b/doc/sphinxext/prs/5765.json new file mode 100644 index 00000000000..ebc26869c62 --- /dev/null +++ b/doc/sphinxext/prs/5765.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "71551ea8d0e5568473fa0865aaf0f46b0780476f", + "authors": [ + { + "n": "David Haslacher", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 8, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5768.json b/doc/sphinxext/prs/5768.json new file mode 100644 index 00000000000..fdef6faa93d --- /dev/null +++ b/doc/sphinxext/prs/5768.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d23e624ced15b9314b11199a42b39a33863aa03e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/contributing.rst": { + "a": 29, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/577.json b/doc/sphinxext/prs/577.json new file mode 100644 index 00000000000..bbd8b1c4c3f --- /dev/null +++ b/doc/sphinxext/prs/577.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "da9bf190a491c426daf40c599fbf9943e1f29cca", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5770.json b/doc/sphinxext/prs/5770.json new file mode 100644 index 00000000000..1e3430e1ac3 --- /dev/null +++ b/doc/sphinxext/prs/5770.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f830351301282f2e350ae600046a2f6169a96927", + "authors": [ + { + "n": "Cristóbal Moënne-Loccoz", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5774.json b/doc/sphinxext/prs/5774.json new file mode 100644 index 00000000000..4a8e51de5c0 --- /dev/null +++ b/doc/sphinxext/prs/5774.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e96d706b65b6c6a21821d3dcbc06e10903e9c05b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_coreg.py": { + "a": 6, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 12, + "d": 3 + }, + "mne/gui/_coreg_gui.py": { + "a": 36, + "d": 8 + }, + "mne/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5775.json b/doc/sphinxext/prs/5775.json new file mode 100644 index 00000000000..b4c0f30419c --- /dev/null +++ b/doc/sphinxext/prs/5775.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b5860aa43569acaa59368605fa1ae4988ad027ea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 4, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 2 + }, + "mne/utils.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5776.json b/doc/sphinxext/prs/5776.json new file mode 100644 index 00000000000..eb25ad0a043 --- /dev/null +++ b/doc/sphinxext/prs/5776.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "48aa54ad3277b58afdec20b7bcba603f0abdbddb", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 4, + "d": 2 + }, + "doc/glossary.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/appendix/bem_model.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/appendix/c_EULA.rst": { + "a": 2, + "d": 1 + }, + "doc/manual/appendix/c_misc.rst": { + "a": 2, + "d": 0 + }, + "doc/manual/appendix/c_release_notes.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/c_reference.rst": { + "a": 60, + "d": 58 + }, + "doc/manual/cookbook.rst": { + "a": 12, + "d": 6 + }, + "doc/manual/gui/analyze.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/gui/browse.rst": { + "a": 3, + "d": 3 + }, + "doc/manual/io.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/matlab.rst": { + "a": 5, + "d": 4 + }, + "doc/manual/sample_dataset.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/source_localization/c_forward.rst": { + "a": 891, + "d": 0 + }, + "doc/manual/source_localization/c_inverse.rst": { + "a": 535, + "d": 0 + }, + "doc/manual/source_localization/c_morph.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/source_localization/coil_geometries.rst": { + "a": 266, + "d": 0 + }, + "doc/manual/source_localization/forward.rst": { + "a": 59, + "d": 516 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 45, + "d": 140 + }, + "doc/manual/source_localization/morph_stc.rst": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 7, + "d": 0 + }, + "tutorials/plot_background_freesurfer.py": { + "a": 2, + "d": 3 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 6, + "d": 2 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 5, + "d": 3 + }, + "tutorials/plot_compute_covariance.py": { + "a": 4, + "d": 2 + }, + "tutorials/plot_dipole_orientations.py": { + "a": 8, + "d": 0 + }, + "tutorials/plot_forward.py": { + "a": 23, + "d": 6 + }, + "tutorials/plot_object_source_estimate.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_source_alignment.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5777.json b/doc/sphinxext/prs/5777.json new file mode 100644 index 00000000000..cc513f7b2c7 --- /dev/null +++ b/doc/sphinxext/prs/5777.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4b4e0630a6cf8577b16f66e62c544226c92ae232", + "authors": [ + { + "n": "Simon Kern", + "e": null + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/578.json b/doc/sphinxext/prs/578.json new file mode 100644 index 00000000000..31e1b1c79d6 --- /dev/null +++ b/doc/sphinxext/prs/578.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "9cc08caf7878ac52d0f528fc350c513a68b26845", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 4, + "d": 3 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 66, + "d": 0 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/__init__.py": { + "a": 8, + "d": 0 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 304, + "d": 0 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 10, + "d": 4 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 51, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/mixed_norm/__init__.py": { + "a": 5, + "d": 5 + }, + "mne/viz.py": { + "a": 6, + "d": 2 + }, + "setup.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5780.json b/doc/sphinxext/prs/5780.json new file mode 100644 index 00000000000..cc9455a94c4 --- /dev/null +++ b/doc/sphinxext/prs/5780.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "aea196ef6dcf43dbda5ca16d40704eed9b0db1ee", + "authors": [ + { + "n": "Cristóbal Moënne-Loccoz", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/reference.py": { + "a": 9, + "d": 9 + }, + "mne/io/tests/test_reference.py": { + "a": 47, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5783.json b/doc/sphinxext/prs/5783.json new file mode 100644 index 00000000000..a89e708d6e7 --- /dev/null +++ b/doc/sphinxext/prs/5783.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "857de6e1c1e66f1f8b7adad0d98529ab744d6313", + "authors": [ + { + "n": "Jon Houck", + "e": null + } + ], + "changes": { + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5786.json b/doc/sphinxext/prs/5786.json new file mode 100644 index 00000000000..8cc4fe499a7 --- /dev/null +++ b/doc/sphinxext/prs/5786.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "91bf597e0f18259f25d0969a2f0956b17c3d8889", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5787.json b/doc/sphinxext/prs/5787.json new file mode 100644 index 00000000000..46df346e89b --- /dev/null +++ b/doc/sphinxext/prs/5787.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "05afda7778f1746030785dccb9f86be5279c8613", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".travis.yml": { + "a": 1, + "d": 1 + }, + "appveyor.yml": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 2 + }, + "requirements.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5788.json b/doc/sphinxext/prs/5788.json new file mode 100644 index 00000000000..7bbf445dc32 --- /dev/null +++ b/doc/sphinxext/prs/5788.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fa91217b895957c4106d6175e27d72a48f4f05dc", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 5, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 36, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/579.json b/doc/sphinxext/prs/579.json new file mode 100644 index 00000000000..ac196d60a1d --- /dev/null +++ b/doc/sphinxext/prs/579.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "025bc08e7bcf283f780b794c69d1033a15926016", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 28, + "d": 31 + }, + "mne/fiff/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 4, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 5, + "d": 1 + }, + "mne/viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5791.json b/doc/sphinxext/prs/5791.json new file mode 100644 index 00000000000..c9bb48bc0d3 --- /dev/null +++ b/doc/sphinxext/prs/5791.json @@ -0,0 +1,147 @@ +{ + "merge_commit_sha": "382205d1f2074987b2540464b83d90f4785fc6cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 175, + "d": 63 + }, + "doc/Makefile": { + "a": 6, + "d": 6 + }, + "doc/conf.py": { + "a": 9, + "d": 3 + }, + "doc/contributing.rst": { + "a": 2, + "d": 2 + }, + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "doc/faq.rst": { + "a": 6, + "d": 6 + }, + "doc/git_links.inc": { + "a": 9, + "d": 9 + }, + "doc/install_mne_c.rst": { + "a": 6, + "d": 6 + }, + "doc/known_projects.inc": { + "a": 3, + "d": 3 + }, + "doc/manual/appendix/bem_model.rst": { + "a": 1, + "d": 1 + }, + "doc/manual/datasets_index.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/io.rst": { + "a": 2, + "d": 2 + }, + "doc/mne_cpp.rst": { + "a": 19, + "d": 19 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 2 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 2 + }, + "mne/stats/parametric.py": { + "a": 0, + "d": 1 + }, + "tutorials/plot_background_filtering.py": { + "a": 15, + "d": 13 + }, + "tutorials/plot_brainstorm_auditory.py": { + "a": 3, + "d": 2 + }, + "tutorials/plot_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_python_intro.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 5, + "d": 5 + }, + "tutorials/plot_source_alignment.py": { + "a": 4, + "d": 3 + }, + "tutorials/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5793.json b/doc/sphinxext/prs/5793.json new file mode 100644 index 00000000000..7e58e2f4b56 --- /dev/null +++ b/doc/sphinxext/prs/5793.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "92e8becbc2afedaf0ffde3d6933e042396d7eb73", + "authors": [ + { + "n": "Chris Holdgraf", + "e": null + } + ], + "changes": { + "logo/logo_hex.svg": { + "a": 189, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5795.json b/doc/sphinxext/prs/5795.json new file mode 100644 index 00000000000..2d20a6026d1 --- /dev/null +++ b/doc/sphinxext/prs/5795.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e3ef29e667391e6977ea6b0e0861893e91054d95", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/annotations.py": { + "a": 71, + "d": 40 + }, + "mne/tests/test_annotations.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5796.json b/doc/sphinxext/prs/5796.json new file mode 100644 index 00000000000..8466755147f --- /dev/null +++ b/doc/sphinxext/prs/5796.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "76ccce401f9efaf1b9f40bfe487ea04ea0486d4f", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_object_evoked.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5799.json b/doc/sphinxext/prs/5799.json new file mode 100644 index 00000000000..d9182fd3b66 --- /dev/null +++ b/doc/sphinxext/prs/5799.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "58bc99fc1704a4654198b05ff19110b9f9252189", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 3, + "d": 5 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 3, + "d": 5 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/58.json b/doc/sphinxext/prs/58.json new file mode 100644 index 00000000000..d0752c5e670 --- /dev/null +++ b/doc/sphinxext/prs/58.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6778ce339d577988f25d96360c3b7950a60c508c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 2, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 38, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/580.json b/doc/sphinxext/prs/580.json new file mode 100644 index 00000000000..fc03d1e8903 --- /dev/null +++ b/doc/sphinxext/prs/580.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "947bf7d8d1ea065458f81c94f6ea5382354b7259", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 280, + "d": 0 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 223, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 7, + "d": 0 + }, + "mne/stats/parametric.py": { + "a": 171, + "d": 1 + }, + "mne/stats/tests/test_parametric.py": { + "a": 68, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5800.json b/doc/sphinxext/prs/5800.json new file mode 100644 index 00000000000..076c4353ccb --- /dev/null +++ b/doc/sphinxext/prs/5800.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e2c538b923cb05be626419ddcca3b112d7306d51", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 24, + "d": 8 + }, + "mne/tests/test_annotations.py": { + "a": 68, + "d": 0 + }, + "tutorials/plot_object_annotations.py": { + "a": 59, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5801.json b/doc/sphinxext/prs/5801.json new file mode 100644 index 00000000000..54745148782 --- /dev/null +++ b/doc/sphinxext/prs/5801.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3c0fdee5e71f5a6004c0093493c7c576753040a8", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 9, + "d": 37 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 22, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 19, + "d": 5 + }, + "mne/utils.py": { + "a": 53, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5804.json b/doc/sphinxext/prs/5804.json new file mode 100644 index 00000000000..8e45bd4833a --- /dev/null +++ b/doc/sphinxext/prs/5804.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "10209218a4e169453f4d9eeadd1cb8a37a80c5a4", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/faq.rst": { + "a": 11, + "d": 11 + }, + "doc/glossary.rst": { + "a": 42, + "d": 35 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 7, + "d": 7 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/plot_object_evoked.py": { + "a": 3, + "d": 2 + }, + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5805.json b/doc/sphinxext/prs/5805.json new file mode 100644 index 00000000000..243b3dcad13 --- /dev/null +++ b/doc/sphinxext/prs/5805.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d4c24cb24cde540572d93f9541ee0c5f4f16a728", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 6, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/event.py": { + "a": 12, + "d": 2 + }, + "mne/fixes.py": { + "a": 60, + "d": 0 + }, + "mne/label.py": { + "a": 183, + "d": 19 + }, + "mne/source_estimate.py": { + "a": 27, + "d": 38 + }, + "mne/tests/test_event.py": { + "a": 11, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 53, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5807.json b/doc/sphinxext/prs/5807.json new file mode 100644 index 00000000000..4fce2630c83 --- /dev/null +++ b/doc/sphinxext/prs/5807.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "49fd91882f52130ff66c613c2178bf7d981d1bd5", + "authors": [ + { + "n": "Jevri Hanna", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 170, + "d": 58 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 25, + "d": 10 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5808.json b/doc/sphinxext/prs/5808.json new file mode 100644 index 00000000000..82d8faf6761 --- /dev/null +++ b/doc/sphinxext/prs/5808.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6a40c41fb83d9b912eea1bfe363a383f0ef8f1c7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 4, + "d": 2 + }, + "mne/fixes.py": { + "a": 156, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 7, + "d": 139 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5809.json b/doc/sphinxext/prs/5809.json new file mode 100644 index 00000000000..f9a504195c1 --- /dev/null +++ b/doc/sphinxext/prs/5809.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0ef0ffdc2aa91fe2028d3e922553c039d80dbd19", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/externals/pymatreader/utils.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/581.json b/doc/sphinxext/prs/581.json new file mode 100644 index 00000000000..e2c6d996c7f --- /dev/null +++ b/doc/sphinxext/prs/581.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5ae3e54dfb403d8d3954d297d23f6df061f37a23", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 25, + "d": 12 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 5, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 95, + "d": 9 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 24, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5810.json b/doc/sphinxext/prs/5810.json new file mode 100644 index 00000000000..8dbc50d02a8 --- /dev/null +++ b/doc/sphinxext/prs/5810.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "abe827fa9dd5334ec0dc7cc70d8381d7efd3881c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 7, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5811.json b/doc/sphinxext/prs/5811.json new file mode 100644 index 00000000000..bb3e3edb4a0 --- /dev/null +++ b/doc/sphinxext/prs/5811.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "1532f98616ec875a2414b2631b0b8f7bc5e4b693", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "conftest.py": { + "a": 20, + "d": 0 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 3 + }, + "mne/commands/tests/test_commands.py": { + "a": 0, + "d": 3 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 23, + "d": 25 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 4 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 48, + "d": 38 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 6 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 1, + "d": 5 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 0, + "d": 4 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_import_nesting.py": { + "a": 0, + "d": 45 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 6 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 3, + "d": 8 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 7 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 5 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 9 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 10 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 11 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 11 + }, + "mne/viz/tests/test_montage.py": { + "a": 1, + "d": 5 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 10 + }, + "mne/viz/tests/test_topo.py": { + "a": 2, + "d": 10 + }, + "mne/viz/tests/test_topomap.py": { + "a": 10, + "d": 16 + }, + "mne/viz/tests/test_utils.py": { + "a": 2, + "d": 9 + }, + "setup.cfg": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5813.json b/doc/sphinxext/prs/5813.json new file mode 100644 index 00000000000..f2e3e64e76c --- /dev/null +++ b/doc/sphinxext/prs/5813.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "feb0bec3acf334ee391711109c9a541697813a30", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/autosummary/class.rst": { + "a": 0, + "d": 0 + }, + "doc/_templates/autosummary/function.rst": { + "a": 0, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/glossary.rst": { + "a": 11, + "d": 11 + }, + "doc/python_reference.rst": { + "a": 21, + "d": 144 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5814.json b/doc/sphinxext/prs/5814.json new file mode 100644 index 00000000000..4d1e26ef1c6 --- /dev/null +++ b/doc/sphinxext/prs/5814.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fd2e3d23bb41dce572d948f42a2e2990fb0e31e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/inverse_sparse/mxne_optim.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5819.json b/doc/sphinxext/prs/5819.json new file mode 100644 index 00000000000..a0928fae965 --- /dev/null +++ b/doc/sphinxext/prs/5819.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b535a53711fe1efb954ec81e6d94c4ceb768a686", + "authors": [ + { + "n": "Antoine Gauthier", + "e": "antoine.gauthier@ensta.fr" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 5, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 85, + "d": 9 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/582.json b/doc/sphinxext/prs/582.json new file mode 100644 index 00000000000..7b7e78bc305 --- /dev/null +++ b/doc/sphinxext/prs/582.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "34a4c873dac6e7da730ef6cf239a06cf4b2bbfc9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 11, + "d": 5 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5821.json b/doc/sphinxext/prs/5821.json new file mode 100644 index 00000000000..4b52808b6df --- /dev/null +++ b/doc/sphinxext/prs/5821.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b2b551274071f8b21fe2906631488b161c9aa312", + "authors": [ + { + "n": "Larry Eisenman", + "e": "leisenman@wustl.edu" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 7, + "d": 2 + }, + "mne/io/edf/tests/data/duplicate_channel_labels.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 12, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 32, + "d": 16 + }, + "mne/io/tests/test_meas_info.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5822.json b/doc/sphinxext/prs/5822.json new file mode 100644 index 00000000000..8c18a13b35d --- /dev/null +++ b/doc/sphinxext/prs/5822.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "78561338b1ffe376455e8a1324f55e2833979ce1", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_viewer.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 80, + "d": 202 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 441, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 62, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 91, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 12, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5823.json b/doc/sphinxext/prs/5823.json new file mode 100644 index 00000000000..266f3b87b22 --- /dev/null +++ b/doc/sphinxext/prs/5823.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "68410a1468b7fa5d72ec631a844a1b448caf8223", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "mne/externals/pymatreader/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5824.json b/doc/sphinxext/prs/5824.json new file mode 100644 index 00000000000..6a1c36163f0 --- /dev/null +++ b/doc/sphinxext/prs/5824.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d97fcc0c98981c8c36a81cd995ac2746632d14ee", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 34, + "d": 15 + }, + "mne/io/tests/test_raw.py": { + "a": 25, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5825.json b/doc/sphinxext/prs/5825.json new file mode 100644 index 00000000000..3dc3bafd0d4 --- /dev/null +++ b/doc/sphinxext/prs/5825.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "41fc42be6a229c7d41d2be59850fa4ba66a194f9", + "authors": [ + { + "n": "Burkhard Maess", + "e": null + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5827.json b/doc/sphinxext/prs/5827.json new file mode 100644 index 00000000000..041791b000a --- /dev/null +++ b/doc/sphinxext/prs/5827.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "25dd447f892a89fc5ba220ebf9fa10bfb532a7f4", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 7, + "d": 2 + }, + "mne/io/edf/tests/data/duplicate_channel_labels.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 13, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 32, + "d": 16 + }, + "mne/io/tests/test_meas_info.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5828.json b/doc/sphinxext/prs/5828.json new file mode 100644 index 00000000000..68fed453410 --- /dev/null +++ b/doc/sphinxext/prs/5828.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "57091f02565402b2b7ba132ee7afe2b05f4647fc", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5829.json b/doc/sphinxext/prs/5829.json new file mode 100644 index 00000000000..5695dcf8957 --- /dev/null +++ b/doc/sphinxext/prs/5829.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e0588eedc173902a9b55dfe6436ea433c19bbf30", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "tutorials/plot_eeg_erp.py": { + "a": 8, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/583.json b/doc/sphinxext/prs/583.json new file mode 100644 index 00000000000..88c235e4cfe --- /dev/null +++ b/doc/sphinxext/prs/583.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "67be230abb59dfe592bd560df537b7271b594c5b", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "mne/simulation/source.py": { + "a": 23, + "d": 17 + }, + "mne/simulation/tests/test_source.py": { + "a": 96, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5831.json b/doc/sphinxext/prs/5831.json new file mode 100644 index 00000000000..3104edcddf8 --- /dev/null +++ b/doc/sphinxext/prs/5831.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d7866514ffa4aa2e35f9989fa67262defe7588f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 6, + "d": 6 + }, + "conftest.py": { + "a": 18, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 3 + }, + "mne/utils.py": { + "a": 0, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "setup.cfg": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5832.json b/doc/sphinxext/prs/5832.json new file mode 100644 index 00000000000..7e85aa70f88 --- /dev/null +++ b/doc/sphinxext/prs/5832.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "78e9e728619193e5a41e4218bf39c12d2e130a90", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "mne/externals/pymatreader/utils.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5835.json b/doc/sphinxext/prs/5835.json new file mode 100644 index 00000000000..70cbeca36fd --- /dev/null +++ b/doc/sphinxext/prs/5835.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6a35cd65ede14f493ce4fcfa82c0fe94c6ab5a63", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.cfg": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5837.json b/doc/sphinxext/prs/5837.json new file mode 100644 index 00000000000..d29f5bdad22 --- /dev/null +++ b/doc/sphinxext/prs/5837.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f951ba3331e6db05a30057b71fcf9f9f118c0e90", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 3 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "conftest.py": { + "a": 37, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 3 + }, + "setup.cfg": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5838.json b/doc/sphinxext/prs/5838.json new file mode 100644 index 00000000000..d6ec89352da --- /dev/null +++ b/doc/sphinxext/prs/5838.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "658ddbe28a3dcb2d6d00bf15c3507ceb837e25d5", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5840.json b/doc/sphinxext/prs/5840.json new file mode 100644 index 00000000000..8a4e5690070 --- /dev/null +++ b/doc/sphinxext/prs/5840.json @@ -0,0 +1,319 @@ +{ + "merge_commit_sha": "d7fe74851707950408d2c2b2e34dfa5fdc1d0669", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "doc/_static/style.css": { + "a": 7, + "d": 0 + }, + "doc/conf.py": { + "a": 114, + "d": 9 + }, + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 5 + }, + "mne/channels/layout.py": { + "a": 3, + "d": 3 + }, + "mne/connectivity/effective.py": { + "a": 3, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 4, + "d": 4 + }, + "mne/connectivity/utils.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 4, + "d": 3 + }, + "mne/decoding/csp.py": { + "a": 7, + "d": 7 + }, + "mne/decoding/ems.py": { + "a": 7, + "d": 6 + }, + "mne/decoding/mixin.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_frequency.py": { + "a": 10, + "d": 10 + }, + "mne/decoding/transformer.py": { + "a": 21, + "d": 21 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/event.py": { + "a": 8, + "d": 4 + }, + "mne/evoked.py": { + "a": 8, + "d": 8 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 4 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 5, + "d": 5 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 10 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 3, + "d": 3 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 15, + "d": 15 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 3, + "d": 3 + }, + "mne/morph.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 12, + "d": 11 + }, + "mne/preprocessing/xdawn.py": { + "a": 6, + "d": 6 + }, + "mne/realtime/client.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 6, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 28, + "d": 28 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 6, + "d": 6 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/stft.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 56, + "d": 63 + }, + "mne/utils/_logging.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 27, + "d": 26 + }, + "mne/viz/circle.py": { + "a": 9, + "d": 9 + }, + "mne/viz/epochs.py": { + "a": 10, + "d": 10 + }, + "mne/viz/evoked.py": { + "a": 9, + "d": 9 + }, + "mne/viz/ica.py": { + "a": 13, + "d": 13 + }, + "mne/viz/misc.py": { + "a": 9, + "d": 9 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 11, + "d": 11 + }, + "mne/viz/topomap.py": { + "a": 9, + "d": 9 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5841.json b/doc/sphinxext/prs/5841.json new file mode 100644 index 00000000000..04cc421f6c8 --- /dev/null +++ b/doc/sphinxext/prs/5841.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "42cf6876414b9b3d1ad97882962110ae5cbd908d", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 102, + "d": 67 + }, + "mne/io/edf/tests/data/test_stim_channel.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 27, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5842.json b/doc/sphinxext/prs/5842.json new file mode 100644 index 00000000000..ee2fc347ce8 --- /dev/null +++ b/doc/sphinxext/prs/5842.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "52d2c268171070643e6946497ab192dee9b68b69", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "conftest.py": { + "a": 6, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 0, + "d": 9 + }, + "mne/report.py": { + "a": 5, + "d": 5 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5845.json b/doc/sphinxext/prs/5845.json new file mode 100644 index 00000000000..dcf8306bb7c --- /dev/null +++ b/doc/sphinxext/prs/5845.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "faefe1e78091d1df63c365869660441747146ca1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "appveyor.yml": { + "a": 4, + "d": 3 + }, + "environment.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5851.json b/doc/sphinxext/prs/5851.json new file mode 100644 index 00000000000..e12da2ab643 --- /dev/null +++ b/doc/sphinxext/prs/5851.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7331ad6e0613f32f7dfb0c675fd8f58ee0f1d07c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/base.py": { + "a": 5, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5855.json b/doc/sphinxext/prs/5855.json new file mode 100644 index 00000000000..edf83dfd2d1 --- /dev/null +++ b/doc/sphinxext/prs/5855.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d5f1de425afaec65e86c12fb695514e2f730af1b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 5, + "d": 4 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5857.json b/doc/sphinxext/prs/5857.json new file mode 100644 index 00000000000..20636611575 --- /dev/null +++ b/doc/sphinxext/prs/5857.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1567fef198b0dbbdc25365f10ba71aa91d73eab0", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 12 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5859.json b/doc/sphinxext/prs/5859.json new file mode 100644 index 00000000000..15378ef43d9 --- /dev/null +++ b/doc/sphinxext/prs/5859.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "014f87ccd14955dbacfaf52ea4bf205604937d91", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/simulation/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 81, + "d": 6 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 53, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/586.json b/doc/sphinxext/prs/586.json new file mode 100644 index 00000000000..f454001c930 --- /dev/null +++ b/doc/sphinxext/prs/586.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "77d7aea5eb35a6911abe4eea85f567656d7ae273", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 12, + "d": 6 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 33, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5861.json b/doc/sphinxext/prs/5861.json new file mode 100644 index 00000000000..512c1c9294c --- /dev/null +++ b/doc/sphinxext/prs/5861.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "647c7d3a95042b6f3d673855e89f3dc9029bddd6", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 8, + "d": 10 + }, + "mne/io/edf/tests/data/test_stim_channel.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 23, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5862.json b/doc/sphinxext/prs/5862.json new file mode 100644 index 00000000000..1cccd139066 --- /dev/null +++ b/doc/sphinxext/prs/5862.json @@ -0,0 +1,123 @@ +{ + "merge_commit_sha": "203a96cbba2732d2e349a8f96065e74bbfd2a53b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 19 + }, + "mne/tests/test_utils.py": { + "a": 0, + "d": 969 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 6, + "d": 1 + }, + "mne/utils.py": { + "a": 0, + "d": 3615 + }, + "mne/utils/__init__.py": { + "a": 43, + "d": 0 + }, + "mne/utils/check.py": { + "a": 332, + "d": 0 + }, + "mne/utils/config.py": { + "a": 528, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 391, + "d": 0 + }, + "mne/utils/fetching.py": { + "a": 184, + "d": 0 + }, + "mne/utils/logging.py": { + "a": 337, + "d": 0 + }, + "mne/utils/misc.py": { + "a": 226, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 456, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 784, + "d": 0 + }, + "mne/utils/progressbar.py": { + "a": 250, + "d": 0 + }, + "mne/utils/testing.py": { + "a": 404, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 38, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 95, + "d": 0 + }, + "mne/utils/tests/test_docs.py": { + "a": 174, + "d": 0 + }, + "mne/utils/tests/test_fetching.py": { + "a": 29, + "d": 0 + }, + "mne/utils/tests/test_logging.py": { + "a": 114, + "d": 0 + }, + "mne/utils/tests/test_misc.py": { + "a": 8, + "d": 0 + }, + "mne/utils/tests/test_mixin.py": { + "a": 0, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 341, + "d": 0 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 94, + "d": 0 + }, + "mne/utils/tests/test_testing.py": { + "a": 114, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5865.json b/doc/sphinxext/prs/5865.json new file mode 100644 index 00000000000..25f8f806127 --- /dev/null +++ b/doc/sphinxext/prs/5865.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "62d32c2bb616ef9ef6fb0e83c5f4838252029125", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5866.json b/doc/sphinxext/prs/5866.json new file mode 100644 index 00000000000..5858a79b7e9 --- /dev/null +++ b/doc/sphinxext/prs/5866.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "217aecf9efad53dadf0c48aef99fd73c7b01244d", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 1, + "d": 0 + }, + "mne/io/edf/_utils.py": { + "a": 33, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 37, + "d": 24 + }, + "mne/io/edf/gdf_encodes.txt": { + "a": 284, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 11, + "d": 0 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 11, + "d": 4 + }, + "setup.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5869.json b/doc/sphinxext/prs/5869.json new file mode 100644 index 00000000000..a16092b5b24 --- /dev/null +++ b/doc/sphinxext/prs/5869.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d214798d5ab53c41150f86263758480f00f7e74c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 36, + "d": 12 + }, + "mne/simulation/tests/test_raw.py": { + "a": 45, + "d": 45 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5870.json b/doc/sphinxext/prs/5870.json new file mode 100644 index 00000000000..35095ac18e2 --- /dev/null +++ b/doc/sphinxext/prs/5870.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "c6956e7c2385712a8b87c15682aaef5f0fcb9168", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/cov.py": { + "a": 10, + "d": 178 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 0, + "d": 34 + }, + "mne/io/proc_history.py": { + "a": 1, + "d": 50 + }, + "mne/io/tests/test_proc_history.py": { + "a": 1, + "d": 11 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 4 + }, + "mne/rank.py": { + "a": 234, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 8, + "d": 124 + }, + "mne/tests/test_rank.py": { + "a": 169, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 72 + }, + "mne/utils/tests/test_numerics.py": { + "a": 39, + "d": 12 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5871.json b/doc/sphinxext/prs/5871.json new file mode 100644 index 00000000000..447b29bd782 --- /dev/null +++ b/doc/sphinxext/prs/5871.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "d5ae1c83a70dfb3c40f23914d389976c65a3cd11", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/utils/__init__.py": { + "a": 3, + "d": 3 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 3 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/fetching.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/utils/progressbar.py": { + "a": 1, + "d": 1 + }, + "mne/utils/testing.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5872.json b/doc/sphinxext/prs/5872.json new file mode 100644 index 00000000000..d3a50e4aca0 --- /dev/null +++ b/doc/sphinxext/prs/5872.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "44c2ba414f2ab11d83bd5a477c599b9370f28449", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 9, + "d": 61 + }, + "mne/beamformer/_dics.py": { + "a": 6, + "d": 7 + }, + "mne/beamformer/_lcmv.py": { + "a": 9, + "d": 9 + }, + "mne/beamformer/_rap_music.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 7, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 5, + "d": 4 + }, + "mne/utils/check.py": { + "a": 67, + "d": 2 + }, + "mne/utils/tests/test_check.py": { + "a": 83, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5875.json b/doc/sphinxext/prs/5875.json new file mode 100644 index 00000000000..618276a4897 --- /dev/null +++ b/doc/sphinxext/prs/5875.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cd53a2712c341a1641989962950f07fdb6b11a85", + "authors": [ + { + "n": "Hubert Banville", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 28, + "d": 15 + }, + "mne/io/tests/test_apply_function.py": { + "a": 20, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5876.json b/doc/sphinxext/prs/5876.json new file mode 100644 index 00000000000..f52964b1b1f --- /dev/null +++ b/doc/sphinxext/prs/5876.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "544989c7ff9922e04bddad72c1b13156ce344ace", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 4, + "d": 46 + }, + "mne/beamformer/_dics.py": { + "a": 4, + "d": 3 + }, + "mne/beamformer/_lcmv.py": { + "a": 28, + "d": 21 + }, + "mne/cov.py": { + "a": 99, + "d": 148 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 4 + }, + "mne/io/base.py": { + "a": 9, + "d": 5 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 22, + "d": 8 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 10, + "d": 11 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 6, + "d": 2 + }, + "mne/rank.py": { + "a": 214, + "d": 50 + }, + "mne/tests/test_cov.py": { + "a": 54, + "d": 18 + }, + "mne/tests/test_rank.py": { + "a": 99, + "d": 47 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 4, + "d": 2 + }, + "mne/utils/check.py": { + "a": 44, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 21, + "d": 3 + }, + "mne/utils/numerics.py": { + "a": 17, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 8 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 39 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5877.json b/doc/sphinxext/prs/5877.json new file mode 100644 index 00000000000..05f6a13c53f --- /dev/null +++ b/doc/sphinxext/prs/5877.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "c4cfc5d18dd4b8d6499d3ff4eea4b469c9b2b9e5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 41, + "d": 47 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 459, + "d": 213 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 23, + "d": 15 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/588.json b/doc/sphinxext/prs/588.json new file mode 100644 index 00000000000..81a0b595a70 --- /dev/null +++ b/doc/sphinxext/prs/588.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "df5f5509b87bb71c2dcb9992b28d7a57e7b190d9", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/tests/test_utils.py": { + "a": 16, + "d": 1 + }, + "mne/utils.py": { + "a": 12, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5887.json b/doc/sphinxext/prs/5887.json new file mode 100644 index 00000000000..7ec24830ea1 --- /dev/null +++ b/doc/sphinxext/prs/5887.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "285dffd6f7a7ef19065cbffd40dc1bbd354977f9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 3 + }, + ".travis.yml": { + "a": 2, + "d": 2 + }, + "appveyor.yml": { + "a": 2, + "d": 2 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5888.json b/doc/sphinxext/prs/5888.json new file mode 100644 index 00000000000..8f9e7c26948 --- /dev/null +++ b/doc/sphinxext/prs/5888.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "49a3e84755a44ef1d30b2f35d8b46f80de1b5688", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 1 + }, + "doc/install_mne_python.rst": { + "a": 15, + "d": 14 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5890.json b/doc/sphinxext/prs/5890.json new file mode 100644 index 00000000000..e5185475066 --- /dev/null +++ b/doc/sphinxext/prs/5890.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50c5da639662ce3f4cb30e5a397f51e4bd7dd5ce", + "authors": [ + { + "n": "José C. García Alanis", + "e": null + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5892.json b/doc/sphinxext/prs/5892.json new file mode 100644 index 00000000000..032aece0f5a --- /dev/null +++ b/doc/sphinxext/prs/5892.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "33b317b688fc932856d874bb85400bab048f8f38", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/receptive_field.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 9, + "d": 8 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/stim_server_client.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5895.json b/doc/sphinxext/prs/5895.json new file mode 100644 index 00000000000..8dc107a7837 --- /dev/null +++ b/doc/sphinxext/prs/5895.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "8f6d2946c778ee2b131726f877e06b7a7c59d875", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/annotations.py": { + "a": 32, + "d": 8 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 12, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 58, + "d": 15 + }, + "mne/preprocessing/ssp.py": { + "a": 3, + "d": 6 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 16, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 13, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5896.json b/doc/sphinxext/prs/5896.json new file mode 100644 index 00000000000..7dff75c5387 --- /dev/null +++ b/doc/sphinxext/prs/5896.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "14e45a5be062703429687385630787ae56e42abf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "appveyor.yml": { + "a": 4, + "d": 5 + }, + "doc/install_mne_python.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5897.json b/doc/sphinxext/prs/5897.json new file mode 100644 index 00000000000..eabefdefe9f --- /dev/null +++ b/doc/sphinxext/prs/5897.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6fdfb41246a1ca1a5ae2c381c5f7d97137a63265", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 22, + "d": 3 + }, + "mne/io/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_constants.py": { + "a": 7, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 149, + "d": 129 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5898.json b/doc/sphinxext/prs/5898.json new file mode 100644 index 00000000000..b34dc0ffa71 --- /dev/null +++ b/doc/sphinxext/prs/5898.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bee5f219ab835e33746af62cac4f188ddd987227", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 11, + "d": 7 + }, + "tutorials/plot_python_intro.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_sensors_decoding.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/59.json b/doc/sphinxext/prs/59.json new file mode 100644 index 00000000000..86e4048299c --- /dev/null +++ b/doc/sphinxext/prs/59.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "187d462ec2f7e47384cf61aff912dc727f558a93", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 34, + "d": 17 + }, + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/proj.py": { + "a": 11, + "d": 8 + }, + "mne/fiff/raw.py": { + "a": 18, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 12, + "d": 2 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5900.json b/doc/sphinxext/prs/5900.json new file mode 100644 index 00000000000..ff7b09fc623 --- /dev/null +++ b/doc/sphinxext/prs/5900.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "39fab32f4a93bc7675fce4398fe3a05346cbae09", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 61, + "d": 29 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_annotations.py": { + "a": 31, + "d": 27 + }, + "mne/utils/testing.py": { + "a": 11, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5901.json b/doc/sphinxext/prs/5901.json new file mode 100644 index 00000000000..93901d39d84 --- /dev/null +++ b/doc/sphinxext/prs/5901.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c1e353965e341919c22ece4d502720201f53b6e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_coreg.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5902.json b/doc/sphinxext/prs/5902.json new file mode 100644 index 00000000000..5a43afa609e --- /dev/null +++ b/doc/sphinxext/prs/5902.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "65e510049d81ae3517f63c27244eacbc5cc8d320", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 5, + "d": 6 + }, + "mne/utils/config.py": { + "a": 24, + "d": 15 + }, + "mne/utils/tests/test_fetching.py": { + "a": 17, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5904.json b/doc/sphinxext/prs/5904.json new file mode 100644 index 00000000000..79c9ac44b24 --- /dev/null +++ b/doc/sphinxext/prs/5904.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "fd91ebed2e1f69d742e32e40597938fd3b924706", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 105, + "d": 0 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 12, + "d": 14 + }, + "mne/connectivity/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/connectivity/envelope.py": { + "a": 100, + "d": 0 + }, + "mne/connectivity/tests/test_envelope.py": { + "a": 60, + "d": 0 + }, + "mne/connectivity/tests/test_utils.py": { + "a": 34, + "d": 1 + }, + "mne/connectivity/utils.py": { + "a": 47, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 3 + }, + "mne/io/ctf_comp.py": { + "a": 3, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 25, + "d": 19 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 16, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5905.json b/doc/sphinxext/prs/5905.json new file mode 100644 index 00000000000..69ecec6dae6 --- /dev/null +++ b/doc/sphinxext/prs/5905.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "3db66e03c9add0646cb722ee1d714da5c73f5c2d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/advanced_setup.rst": { + "a": 126, + "d": 66 + }, + "doc/contributing.rst": { + "a": 4, + "d": 4 + }, + "doc/documentation.rst": { + "a": 12, + "d": 5 + }, + "doc/getting_started.rst": { + "a": 12, + "d": 70 + }, + "doc/git_links.inc": { + "a": 19, + "d": 2 + }, + "doc/index.rst": { + "a": 1, + "d": 1 + }, + "doc/install_freesurfer.rst": { + "a": 25, + "d": 0 + }, + "doc/install_mne_c.rst": { + "a": 125, + "d": 236 + }, + "doc/install_mne_python.rst": { + "a": 48, + "d": 45 + }, + "doc/manual/c_reference.rst": { + "a": 4, + "d": 2 + }, + "doc/pre_install.rst": { + "a": 72, + "d": 0 + }, + "doc/this_project.inc": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5910.json b/doc/sphinxext/prs/5910.json new file mode 100644 index 00000000000..971dec68dc5 --- /dev/null +++ b/doc/sphinxext/prs/5910.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "eb20842546fe4e8d19d5659af69dbbf33d275451", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5911.json b/doc/sphinxext/prs/5911.json new file mode 100644 index 00000000000..478a2020176 --- /dev/null +++ b/doc/sphinxext/prs/5911.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3e29d773e537c502f4e6504384846e3cbef99052", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5913.json b/doc/sphinxext/prs/5913.json new file mode 100644 index 00000000000..c5e79343798 --- /dev/null +++ b/doc/sphinxext/prs/5913.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "62acff0fe93ac9abe2460ab731c4ae804461e786", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 27, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5914.json b/doc/sphinxext/prs/5914.json new file mode 100644 index 00000000000..0931b7c4b79 --- /dev/null +++ b/doc/sphinxext/prs/5914.json @@ -0,0 +1,227 @@ +{ + "merge_commit_sha": "d54144fb725acc2b97665288b1162b4c2f1752db", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".lgtm.yml": { + "a": 13, + "d": 0 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 1, + "d": 0 + }, + "logo/generate_mne_logos.py": { + "a": 0, + "d": 1 + }, + "mne/annotations.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/_dics.py": { + "a": 0, + "d": 2 + }, + "mne/bem.py": { + "a": 0, + "d": 1 + }, + "mne/coreg.py": { + "a": 13, + "d": 12 + }, + "mne/cov.py": { + "a": 0, + "d": 3 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/megsim/megsim.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/mixin.py": { + "a": 9, + "d": 3 + }, + "mne/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/fixes.py": { + "a": 1, + "d": 5 + }, + "mne/forward/_lead_dots.py": { + "a": 6, + "d": 7 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 9, + "d": 19 + }, + "mne/gui/_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 2 + }, + "mne/io/base.py": { + "a": 5, + "d": 6 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 4, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 2 + }, + "mne/io/proc_history.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 5, + "d": 6 + }, + "mne/minimum_norm/inverse.py": { + "a": 0, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/ctps_.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 17 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/source_space.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 14, + "d": 16 + }, + "mne/utils/mixin.py": { + "a": 20, + "d": 13 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 6 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 11, + "d": 9 + }, + "mne/viz/utils.py": { + "a": 15, + "d": 15 + }, + "setup.py": { + "a": 31, + "d": 28 + }, + "tutorials/plot_info.py": { + "a": 6, + "d": 5 + }, + "tutorials/plot_sleep.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5915.json b/doc/sphinxext/prs/5915.json new file mode 100644 index 00000000000..d8e919c0e60 --- /dev/null +++ b/doc/sphinxext/prs/5915.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "11e73a493a830f7b148413ba82d8a3399ad921cf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5917.json b/doc/sphinxext/prs/5917.json new file mode 100644 index 00000000000..17b62c3081d --- /dev/null +++ b/doc/sphinxext/prs/5917.json @@ -0,0 +1,247 @@ +{ + "merge_commit_sha": "63a8ce271eac9b2d488c353e04a3117aa1c5e772", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 3 + }, + "mne/channels/interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 2 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 7, + "d": 7 + }, + "mne/event.py": { + "a": 3, + "d": 3 + }, + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/externals/funcsigs.py": { + "a": 20, + "d": 20 + }, + "mne/externals/h5io/_h5io.py": { + "a": 6, + "d": 6 + }, + "mne/fixes.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_help.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 6, + "d": 6 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 2, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 3 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 6 + }, + "mne/io/pick.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 2 + }, + "mne/morph.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 18, + "d": 18 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 4 + }, + "mne/selection.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/parametric.py": { + "a": 3, + "d": 3 + }, + "mne/stats/regression.py": { + "a": 9, + "d": 9 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + }, + "mne/transforms.py": { + "a": 3, + "d": 3 + }, + "mne/utils/check.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 11 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 3 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 8, + "d": 8 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5920.json b/doc/sphinxext/prs/5920.json new file mode 100644 index 00000000000..9c150f26a1b --- /dev/null +++ b/doc/sphinxext/prs/5920.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4ec61d8710c698cecba9c2dbb7c050f1d907316c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 67, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5921.json b/doc/sphinxext/prs/5921.json new file mode 100644 index 00000000000..7dbae2f6c21 --- /dev/null +++ b/doc/sphinxext/prs/5921.json @@ -0,0 +1,483 @@ +{ + "merge_commit_sha": "5a0bbee4ddaaa880dadee3efd092c7f5c2168e93", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 4 + }, + "mne/baseline.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 4 + }, + "mne/beamformer/_dics.py": { + "a": 5, + "d": 15 + }, + "mne/beamformer/_lcmv.py": { + "a": 5, + "d": 15 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 3 + }, + "mne/bem.py": { + "a": 10, + "d": 28 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 19 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 3 + }, + "mne/chpi.py": { + "a": 4, + "d": 12 + }, + "mne/connectivity/effective.py": { + "a": 1, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 3 + }, + "mne/coreg.py": { + "a": 4, + "d": 12 + }, + "mne/cov.py": { + "a": 8, + "d": 23 + }, + "mne/cuda.py": { + "a": 1, + "d": 4 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 2, + "d": 5 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/megsim/megsim.py": { + "a": 2, + "d": 4 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/utils.py": { + "a": 6, + "d": 7 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/ems.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/search_light.py": { + "a": 5, + "d": 9 + }, + "mne/decoding/time_frequency.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 7, + "d": 12 + }, + "mne/dipole.py": { + "a": 13, + "d": 19 + }, + "mne/epochs.py": { + "a": 20, + "d": 51 + }, + "mne/event.py": { + "a": 1, + "d": 3 + }, + "mne/evoked.py": { + "a": 9, + "d": 16 + }, + "mne/externals/decorator.py": { + "a": 314, + "d": 113 + }, + "mne/externals/doccer.py": { + "a": 144, + "d": 0 + }, + "mne/filter.py": { + "a": 8, + "d": 30 + }, + "mne/forward/_compute_forward.py": { + "a": 2, + "d": 6 + }, + "mne/forward/_field_interpolation.py": { + "a": 2, + "d": 6 + }, + "mne/forward/_make_forward.py": { + "a": 6, + "d": 15 + }, + "mne/forward/forward.py": { + "a": 7, + "d": 21 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 6 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 9 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 3, + "d": 9 + }, + "mne/io/array/array.py": { + "a": 3, + "d": 4 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 4 + }, + "mne/io/base.py": { + "a": 13, + "d": 43 + }, + "mne/io/brainvision/brainvision.py": { + "a": 5, + "d": 7 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 6 + }, + "mne/io/cnt/cnt.py": { + "a": 6, + "d": 9 + }, + "mne/io/ctf/ctf.py": { + "a": 5, + "d": 7 + }, + "mne/io/ctf_comp.py": { + "a": 1, + "d": 3 + }, + "mne/io/edf/edf.py": { + "a": 11, + "d": 16 + }, + "mne/io/eeglab/eeglab.py": { + "a": 8, + "d": 13 + }, + "mne/io/egi/egi.py": { + "a": 3, + "d": 5 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 2 + }, + "mne/io/eximia/eximia.py": { + "a": 5, + "d": 5 + }, + "mne/io/fiff/raw.py": { + "a": 6, + "d": 10 + }, + "mne/io/kit/kit.py": { + "a": 8, + "d": 13 + }, + "mne/io/matrix.py": { + "a": 1, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 18 + }, + "mne/io/nicolet/nicolet.py": { + "a": 4, + "d": 7 + }, + "mne/io/open.py": { + "a": 3, + "d": 6 + }, + "mne/io/pick.py": { + "a": 2, + "d": 6 + }, + "mne/io/proj.py": { + "a": 6, + "d": 19 + }, + "mne/io/reference.py": { + "a": 2, + "d": 6 + }, + "mne/label.py": { + "a": 12, + "d": 32 + }, + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 22 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 2, + "d": 6 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 12 + }, + "mne/morph.py": { + "a": 13, + "d": 24 + }, + "mne/parallel.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 6 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 13, + "d": 39 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/peak_finder.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 6 + }, + "mne/proj.py": { + "a": 5, + "d": 12 + }, + "mne/realtime/client.py": { + "a": 3, + "d": 4 + }, + "mne/realtime/epochs.py": { + "a": 5, + "d": 11 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 3, + "d": 4 + }, + "mne/realtime/mockclient.py": { + "a": 4, + "d": 3 + }, + "mne/realtime/stim_server_client.py": { + "a": 8, + "d": 24 + }, + "mne/report.py": { + "a": 5, + "d": 8 + }, + "mne/selection.py": { + "a": 1, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 2, + "d": 6 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 31, + "d": 72 + }, + "mne/source_space.py": { + "a": 14, + "d": 44 + }, + "mne/stats/cluster_level.py": { + "a": 4, + "d": 12 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 3 + }, + "mne/surface.py": { + "a": 6, + "d": 18 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 8, + "d": 8 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 7, + "d": 28 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 6 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 9 + }, + "mne/time_frequency/stft.py": { + "a": 1, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 12, + "d": 28 + }, + "mne/transforms.py": { + "a": 2, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 48, + "d": 25 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 2 + }, + "mne/utils/fetching.py": { + "a": 1, + "d": 3 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 4 + }, + "mne/utils/tests/test_docs.py": { + "a": 11, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 17 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 10 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5922.json b/doc/sphinxext/prs/5922.json new file mode 100644 index 00000000000..85e12469dfb --- /dev/null +++ b/doc/sphinxext/prs/5922.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e4848ae37bc6847af3081600b4253c061ec454a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 5, + "d": 1 + }, + "mne/externals/funcsigs.py": { + "a": 0, + "d": 815 + }, + "mne/externals/h5io/_h5io.py": { + "a": 119, + "d": 11 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5923.json b/doc/sphinxext/prs/5923.json new file mode 100644 index 00000000000..eb33cb83bef --- /dev/null +++ b/doc/sphinxext/prs/5923.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "6067ee7f82560429e2b1346a10283bb2065bc158", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "environment.yml": { + "a": 3, + "d": 3 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 5, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 67, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5927.json b/doc/sphinxext/prs/5927.json new file mode 100644 index 00000000000..ae81713d320 --- /dev/null +++ b/doc/sphinxext/prs/5927.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c7f9624cf1af7ec9751daa32adc96f4adc02a233", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/593.json b/doc/sphinxext/prs/593.json new file mode 100644 index 00000000000..60c95e6a489 --- /dev/null +++ b/doc/sphinxext/prs/593.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9af93f1d3679c3fd624ac9cfe58c7444c7eab7e2", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/stats/tests/test_parametric.py": { + "a": 26, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5930.json b/doc/sphinxext/prs/5930.json new file mode 100644 index 00000000000..b7d9ef0804f --- /dev/null +++ b/doc/sphinxext/prs/5930.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1e532042a5877b28fe2a2ef88977d98264716a9b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5931.json b/doc/sphinxext/prs/5931.json new file mode 100644 index 00000000000..88cee016974 --- /dev/null +++ b/doc/sphinxext/prs/5931.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2502260f48b84299c00530ab09fdc9e2b2a4cddf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 8, + "d": 17 + }, + ".travis.yml": { + "a": 4, + "d": 1 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 1, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5932.json b/doc/sphinxext/prs/5932.json new file mode 100644 index 00000000000..76bfbefa0ee --- /dev/null +++ b/doc/sphinxext/prs/5932.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "90b91a313f2f96fcec046f4a254e4d2f8981a50f", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "mne/datasets/sleep_physionet/SHA1SUMS": { + "a": 394, + "d": 0 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 17, + "d": 21 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 4, + "d": 2 + }, + "mne/datasets/sleep_physionet/age_records.csv": { + "a": 228, + "d": 0 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 11, + "d": 10 + }, + "mne/datasets/sleep_physionet/temazepam_records.csv": { + "a": 22, + "d": 0 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 4, + "d": 2 + }, + "setup.py": { + "a": 2, + "d": 1 + }, + "tutorials/plot_sleep.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5934.json b/doc/sphinxext/prs/5934.json new file mode 100644 index 00000000000..edc7113d0ef --- /dev/null +++ b/doc/sphinxext/prs/5934.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fd99e1b16616af12dd99a13d5c8860a6e3094c08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 31, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5936.json b/doc/sphinxext/prs/5936.json new file mode 100644 index 00000000000..f047d13674a --- /dev/null +++ b/doc/sphinxext/prs/5936.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a4aac69816a02337ce3448ec58a21f77ce6e2ab5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 3, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ssp.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_ecog.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5939.json b/doc/sphinxext/prs/5939.json new file mode 100644 index 00000000000..8a62f2ad406 --- /dev/null +++ b/doc/sphinxext/prs/5939.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "58f22d75b0f80fa643ddc8db10dbaaa607cce47a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 6, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 5, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/594.json b/doc/sphinxext/prs/594.json new file mode 100644 index 00000000000..1d207dac964 --- /dev/null +++ b/doc/sphinxext/prs/594.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "200fe5660f8226799501abeca73c5b56b1f9f9b4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 11, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5940.json b/doc/sphinxext/prs/5940.json new file mode 100644 index 00000000000..c6e85461207 --- /dev/null +++ b/doc/sphinxext/prs/5940.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0656d8eef747ecb0d86631c18675feba3ab1fcae", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 5, + "d": 3 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 7, + "d": 15 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 36, + "d": 7 + }, + "mne/simulation/tests/test_raw.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5942.json b/doc/sphinxext/prs/5942.json new file mode 100644 index 00000000000..7c5f2ef7c6f --- /dev/null +++ b/doc/sphinxext/prs/5942.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "12a85495333e07a80d676dcb0e19a562bd7d380f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/edf/_utils.py": { + "a": 8, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5946.json b/doc/sphinxext/prs/5946.json new file mode 100644 index 00000000000..56cd76b015d --- /dev/null +++ b/doc/sphinxext/prs/5946.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b9f18ea32c6e303aaa1e0b0e7aa4f7e3695dfcf4", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5947.json b/doc/sphinxext/prs/5947.json new file mode 100644 index 00000000000..10dc502d906 --- /dev/null +++ b/doc/sphinxext/prs/5947.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "8dc463497f160e0c7fde772c7effec76beaa7f32", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 0, + "d": 0 + }, + "doc/manual/c_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 4, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 2, + "d": 7 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 77, + "d": 56 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 151, + "d": 36 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 20, + "d": 56 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 176, + "d": 221 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 7 + }, + "mne/simulation/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 32, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 5, + "d": 0 + }, + "mne/utils/check.py": { + "a": 18, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5949.json b/doc/sphinxext/prs/5949.json new file mode 100644 index 00000000000..7bd0cd44c36 --- /dev/null +++ b/doc/sphinxext/prs/5949.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8b00fc96ec8574c364007ea6b9655eb288d464bb", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5950.json b/doc/sphinxext/prs/5950.json new file mode 100644 index 00000000000..f2b418ccb81 --- /dev/null +++ b/doc/sphinxext/prs/5950.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "4fef3d02881d1b75d22c4de5399c11c736855f86", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 8 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 21, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 30, + "d": 30 + }, + "mne/rank.py": { + "a": 18, + "d": 10 + }, + "mne/tests/test_cov.py": { + "a": 39, + "d": 24 + }, + "mne/tests/test_rank.py": { + "a": 13, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5954.json b/doc/sphinxext/prs/5954.json new file mode 100644 index 00000000000..711355dd4cb --- /dev/null +++ b/doc/sphinxext/prs/5954.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5778168e9eaf597997fdd638c712fa3d01326fbc", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 10, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 42, + "d": 14 + }, + "mne/tests/test_source_space.py": { + "a": 8, + "d": 3 + }, + "tutorials/plot_forward.py": { + "a": 46, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5955.json b/doc/sphinxext/prs/5955.json new file mode 100644 index 00000000000..12adc92b9cc --- /dev/null +++ b/doc/sphinxext/prs/5955.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4d932c2d2fea79e073715e42c08e94b06ea3cf9e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5957.json b/doc/sphinxext/prs/5957.json new file mode 100644 index 00000000000..1682137c583 --- /dev/null +++ b/doc/sphinxext/prs/5957.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c2f9adc34625990ea6ec287c158d7cbed706f7a1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 14, + "d": 14 + }, + "mne/channels/tests/test_channels.py": { + "a": 9, + "d": 0 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5958.json b/doc/sphinxext/prs/5958.json new file mode 100644 index 00000000000..e09ac587f1b --- /dev/null +++ b/doc/sphinxext/prs/5958.json @@ -0,0 +1,235 @@ +{ + "merge_commit_sha": "73ea718b82c7ea5f09b70490ca3ba7de2fd79f78", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/baseline.py": { + "a": 3, + "d": 5 + }, + "mne/beamformer/_dics.py": { + "a": 4, + "d": 9 + }, + "mne/beamformer/_lcmv.py": { + "a": 4, + "d": 9 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 7 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 5 + }, + "mne/chpi.py": { + "a": 5, + "d": 11 + }, + "mne/cov.py": { + "a": 4, + "d": 8 + }, + "mne/datasets/megsim/megsim.py": { + "a": 4, + "d": 7 + }, + "mne/decoding/csp.py": { + "a": 3, + "d": 4 + }, + "mne/decoding/time_frequency.py": { + "a": 2, + "d": 4 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 5 + }, + "mne/dipole.py": { + "a": 2, + "d": 5 + }, + "mne/epochs.py": { + "a": 5, + "d": 12 + }, + "mne/event.py": { + "a": 2, + "d": 4 + }, + "mne/evoked.py": { + "a": 5, + "d": 15 + }, + "mne/filter.py": { + "a": 2, + "d": 4 + }, + "mne/fixes.py": { + "a": 2, + "d": 4 + }, + "mne/forward/_field_interpolation.py": { + "a": 9, + "d": 17 + }, + "mne/io/base.py": { + "a": 6, + "d": 10 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 3, + "d": 4 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 6 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 14 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 2, + "d": 4 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 4 + }, + "mne/morph.py": { + "a": 2, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/maxwell.py": { + "a": 5, + "d": 8 + }, + "mne/preprocessing/stim.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/xdawn.py": { + "a": 3, + "d": 3 + }, + "mne/proj.py": { + "a": 5, + "d": 10 + }, + "mne/report.py": { + "a": 9, + "d": 17 + }, + "mne/simulation/metrics.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/source.py": { + "a": 2, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 10 + }, + "mne/stats/cluster_level.py": { + "a": 4, + "d": 7 + }, + "mne/stats/parametric.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 5, + "d": 13 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 35, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 7 + }, + "mne/utils/tests/test_check.py": { + "a": 24, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 11, + "d": 36 + }, + "mne/viz/misc.py": { + "a": 3, + "d": 3 + }, + "mne/viz/montage.py": { + "a": 2, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 4, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/596.json b/doc/sphinxext/prs/596.json new file mode 100644 index 00000000000..90e576f031d --- /dev/null +++ b/doc/sphinxext/prs/596.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a78f46ff7299c0e036d3661884dd4aefdd5cbffa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/matrix.py": { + "a": 6, + "d": 0 + }, + "mne/forward.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_forward.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5962.json b/doc/sphinxext/prs/5962.json new file mode 100644 index 00000000000..caec5b00d00 --- /dev/null +++ b/doc/sphinxext/prs/5962.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b0e691f895be4d7d7dbd8a445a6242c567924ee0", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_ica_comparison.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 17, + "d": 14 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 1 + }, + "tutorials/plot_artifacts_correction_ica.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_ica_from_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5963.json b/doc/sphinxext/prs/5963.json new file mode 100644 index 00000000000..92d6265e2fd --- /dev/null +++ b/doc/sphinxext/prs/5963.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c3e7571a9b698034cda94aae53004b55265057a9", + "authors": [ + { + "n": "Jevri Hanna", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 10, + "d": 7 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 54, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5964.json b/doc/sphinxext/prs/5964.json new file mode 100644 index 00000000000..13eea7d2ec3 --- /dev/null +++ b/doc/sphinxext/prs/5964.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "09901960a0b9c4254b3f8ca0ab483444445ffa31", + "authors": [ + { + "n": "Jeyhte Key", + "e": "jeythekey@tutanota.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5969.json b/doc/sphinxext/prs/5969.json new file mode 100644 index 00000000000..101203b707e --- /dev/null +++ b/doc/sphinxext/prs/5969.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4e1f0632aea44af1d0e07dba331391cee8979df1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 14, + "d": 3 + }, + "mne/viz/tests/test_topo.py": { + "a": 14, + "d": 9 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/597.json b/doc/sphinxext/prs/597.json new file mode 100644 index 00000000000..d57845b9e5a --- /dev/null +++ b/doc/sphinxext/prs/597.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6fda3c4a6ae34fea103605551beed4bc377d85cd", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 19, + "d": 7 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5971.json b/doc/sphinxext/prs/5971.json new file mode 100644 index 00000000000..922081ef401 --- /dev/null +++ b/doc/sphinxext/prs/5971.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b9ab993c6e34195499d2c4406dca7a54d58eaa4f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5973.json b/doc/sphinxext/prs/5973.json new file mode 100644 index 00000000000..7d3ccdb051a --- /dev/null +++ b/doc/sphinxext/prs/5973.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "7c21bd6c43c79d325fa47c39925daaa001882790", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 23, + "d": 8 + }, + "mne/source_estimate.py": { + "a": 273, + "d": 233 + }, + "mne/tests/test_morph.py": { + "a": 20, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 33, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5976.json b/doc/sphinxext/prs/5976.json new file mode 100644 index 00000000000..fc3e00e823d --- /dev/null +++ b/doc/sphinxext/prs/5976.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "571a792299d07c298b9707fa84ad0b31badd5deb", + "authors": [ + { + "n": "Quentin Bertrand", + "e": "quentin.bertrand@mila.quebec" + } + ], + "changes": { + "mne/simulation/source.py": { + "a": 3, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5978.json b/doc/sphinxext/prs/5978.json new file mode 100644 index 00000000000..e28dd1816ac --- /dev/null +++ b/doc/sphinxext/prs/5978.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2c659d59f89ff0e107c0af02d3cd5b506d8896cc", + "authors": [ + { + "n": "Simon Kern", + "e": null + } + ], + "changes": { + "mne/io/pick.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5979.json b/doc/sphinxext/prs/5979.json new file mode 100644 index 00000000000..5c196f31d93 --- /dev/null +++ b/doc/sphinxext/prs/5979.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5633254b08c6cd97c4e261c06e08459eb51f9516", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 6, + "d": 11 + }, + "mne/beamformer/_lcmv.py": { + "a": 30, + "d": 53 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 22, + "d": 10 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 57, + "d": 1 + }, + "mne/utils/check.py": { + "a": 14, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5981.json b/doc/sphinxext/prs/5981.json new file mode 100644 index 00000000000..2764dabc3d9 --- /dev/null +++ b/doc/sphinxext/prs/5981.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3d12b2b7b3b0ddb3f50962e4293cddf8ccf6ba52", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5982.json b/doc/sphinxext/prs/5982.json new file mode 100644 index 00000000000..90cb5b8da1a --- /dev/null +++ b/doc/sphinxext/prs/5982.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "46055a3a4122bf30740f6cefcfb03310ce064738", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/manual/io.rst": { + "a": 37, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5983.json b/doc/sphinxext/prs/5983.json new file mode 100644 index 00000000000..02a8290323e --- /dev/null +++ b/doc/sphinxext/prs/5983.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d3bff5a6570c071da590243b493cc59198b143f2", + "authors": [ + { + "n": "José C. García Alanis", + "e": null + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5984.json b/doc/sphinxext/prs/5984.json new file mode 100644 index 00000000000..db0a0a94a46 --- /dev/null +++ b/doc/sphinxext/prs/5984.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "f7bb2ef4a40bf3bc3e20c82feba145e0e6e35aac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "conftest.py": { + "a": 64, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 52, + "d": 0 + }, + "mne/defaults.py": { + "a": 4, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 76, + "d": 15 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 6, + "d": 4 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 22, + "d": 17 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 10, + "d": 5 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 4, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 44, + "d": 43 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 80, + "d": 139 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 8, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5987.json b/doc/sphinxext/prs/5987.json new file mode 100644 index 00000000000..0c74bb32384 --- /dev/null +++ b/doc/sphinxext/prs/5987.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "25dafcf3cc0a9e53f7ccd2d5117da47400c0a644", + "authors": [ + { + "n": "Jeyhte Key", + "e": "jeythekey@tutanota.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5988.json b/doc/sphinxext/prs/5988.json new file mode 100644 index 00000000000..0d51aaba3dd --- /dev/null +++ b/doc/sphinxext/prs/5988.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "541860518239b541eaf7be85db271327ec90085c", + "authors": [ + { + "n": "Jeyhte Key", + "e": "jeythekey@tutanota.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 7, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 33, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5991.json b/doc/sphinxext/prs/5991.json new file mode 100644 index 00000000000..70e8608d44b --- /dev/null +++ b/doc/sphinxext/prs/5991.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f9d19f48971d23113c2ef047df8910b6da37cd9f", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/datasets/sleep_physionet/_utils.py": { + "a": 1, + "d": 3 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 0, + "d": 1 + }, + "tutorials/plot_sleep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5998.json b/doc/sphinxext/prs/5998.json new file mode 100644 index 00000000000..9f3093ee646 --- /dev/null +++ b/doc/sphinxext/prs/5998.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "57d33722dc80a5065444031068cac94c73aa9568", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 11, + "d": 1 + }, + "doc/sphinxext/gen_commands.py": { + "a": 3, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/5999.json b/doc/sphinxext/prs/5999.json new file mode 100644 index 00000000000..a0431845c9f --- /dev/null +++ b/doc/sphinxext/prs/5999.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "693bd2d8c958cd343d1c6ea4f132ad8714a0c116", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 3, + "d": 5 + }, + "mne/forward/forward.py": { + "a": 41, + "d": 42 + }, + "mne/forward/tests/test_forward.py": { + "a": 40, + "d": 9 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 5, + "d": 15 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 6, + "d": 21 + }, + "mne/io/pick.py": { + "a": 42, + "d": 13 + }, + "mne/minimum_norm/inverse.py": { + "a": 70, + "d": 107 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6.json b/doc/sphinxext/prs/6.json new file mode 100644 index 00000000000..f4653208fac --- /dev/null +++ b/doc/sphinxext/prs/6.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a280a289a55bcec49e62cd3675d52f40994a2ed0", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "doc/source/command_line_tutorial.rst": { + "a": 1, + "d": 1 + }, + "doc/source/manual/AppInstall.rst": { + "a": 1, + "d": 1 + }, + "doc/source/manual/cookbook.rst": { + "a": 3, + "d": 3 + }, + "doc/source/manual/forward.rst": { + "a": 2, + "d": 2 + }, + "doc/source/manual/intro.rst": { + "a": 1, + "d": 1 + }, + "doc/source/manual/matlab.rst": { + "a": 3, + "d": 3 + }, + "doc/source/manual/mne.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/60.json b/doc/sphinxext/prs/60.json new file mode 100644 index 00000000000..dc762c64b0b --- /dev/null +++ b/doc/sphinxext/prs/60.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a7b4502fdbc6a41e22ced21a87a62c53146e897a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 63, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/ar.py": { + "a": 111, + "d": 0 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 42, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/600.json b/doc/sphinxext/prs/600.json new file mode 100644 index 00000000000..d3dd1da5498 --- /dev/null +++ b/doc/sphinxext/prs/600.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ec9ec08976190404846c5357aae761ed83dc581d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 96, + "d": 0 + }, + "mne/stats/parametric.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6004.json b/doc/sphinxext/prs/6004.json new file mode 100644 index 00000000000..09d32435a83 --- /dev/null +++ b/doc/sphinxext/prs/6004.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "170e941b09ed18a7d7a83ee2554e8f9b3f40d220", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/data/coil_def.dat": { + "a": 11, + "d": 0 + }, + "mne/io/constants.py": { + "a": 4, + "d": 0 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6005.json b/doc/sphinxext/prs/6005.json new file mode 100644 index 00000000000..1bcce42de14 --- /dev/null +++ b/doc/sphinxext/prs/6005.json @@ -0,0 +1,203 @@ +{ + "merge_commit_sha": "e1e1f8bc4bbde70cb5ce48da4d8569714bdd6d7b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 1, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 0, + "d": 5 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 0, + "d": 1 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 3, + "d": 7 + }, + "examples/preprocessing/plot_resample.py": { + "a": 3, + "d": 3 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 0, + "d": 1 + }, + "logo/generate_mne_logos.py": { + "a": 3, + "d": 3 + }, + "mne/bem.py": { + "a": 0, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 108, + "d": 143 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 134, + "d": 138 + }, + "mne/cov.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 0, + "d": 1 + }, + "mne/fixes.py": { + "a": 0, + "d": 10 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_backend.py": { + "a": 31, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 4, + "d": 3 + }, + "mne/gui/_marker_gui.py": { + "a": 7, + "d": 7 + }, + "mne/gui/_viewer.py": { + "a": 0, + "d": 3 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 5, + "d": 32 + }, + "mne/io/utils.py": { + "a": 0, + "d": 1 + }, + "mne/label.py": { + "a": 0, + "d": 4 + }, + "mne/report.py": { + "a": 4, + "d": 5 + }, + "mne/stats/cluster_level.py": { + "a": 0, + "d": 1 + }, + "mne/surface.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 9, + "d": 10 + }, + "mne/transforms.py": { + "a": 0, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 0, + "d": 29 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_background_filtering.py": { + "a": 2, + "d": 4 + }, + "tutorials/plot_background_statistics.py": { + "a": 0, + "d": 2 + }, + "tutorials/plot_configuration.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_dipole_fit.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_dipole_orientations.py": { + "a": 4, + "d": 4 + }, + "tutorials/plot_ecog.py": { + "a": 3, + "d": 3 + }, + "tutorials/plot_epoching_and_averaging.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 4, + "d": 4 + }, + "tutorials/plot_modifying_data_inplace.py": { + "a": 2, + "d": 2 + }, + "tutorials/plot_receptive_field.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6007.json b/doc/sphinxext/prs/6007.json new file mode 100644 index 00000000000..36b93de0f3f --- /dev/null +++ b/doc/sphinxext/prs/6007.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ac83a6c229da94a137d6e15cdbc3af7add7be423", + "authors": [ + { + "n": "Steve Matindi", + "e": null + } + ], + "changes": { + "LICENSE.txt": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6008.json b/doc/sphinxext/prs/6008.json new file mode 100644 index 00000000000..df852f66aba --- /dev/null +++ b/doc/sphinxext/prs/6008.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a8f17bb0fa7cc70940757c202bbffe4cf46beefc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 8, + "d": 0 + }, + ".gitignore": { + "a": 0, + "d": 1 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/601.json b/doc/sphinxext/prs/601.json new file mode 100644 index 00000000000..2644a959ace --- /dev/null +++ b/doc/sphinxext/prs/601.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c90423c06047e581ff3ac61b522bc45a96ce83c4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6011.json b/doc/sphinxext/prs/6011.json new file mode 100644 index 00000000000..faa5f4fbb90 --- /dev/null +++ b/doc/sphinxext/prs/6011.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "ce50c2b20ac1197d79e68665900c9f6646505b13", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + ".travis.yml": { + "a": 6, + "d": 5 + }, + "azure-pipelines.yml": { + "a": 77, + "d": 0 + }, + "conftest.py": { + "a": 6, + "d": 2 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 0 + }, + "mne/utils/testing.py": { + "a": 2, + "d": 1 + }, + "requirements.txt": { + "a": 2, + "d": 2 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6012.json b/doc/sphinxext/prs/6012.json new file mode 100644 index 00000000000..857f459a019 --- /dev/null +++ b/doc/sphinxext/prs/6012.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2797c6a61087ce4b465db3360c2db7edf61dcd84", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6013.json b/doc/sphinxext/prs/6013.json new file mode 100644 index 00000000000..2247f17c43a --- /dev/null +++ b/doc/sphinxext/prs/6013.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3ced40c87a2a3007fc64e17ce1b86601836e52da", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6015.json b/doc/sphinxext/prs/6015.json new file mode 100644 index 00000000000..1d25e8a6653 --- /dev/null +++ b/doc/sphinxext/prs/6015.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b6571d655fa053a976320d48f987e5a696062656", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 1, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6017.json b/doc/sphinxext/prs/6017.json new file mode 100644 index 00000000000..b0efd296e9b --- /dev/null +++ b/doc/sphinxext/prs/6017.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "40f3fa070b82ade35d33ae3ec53e19efd92b7152", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/nihon/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 377, + "d": 0 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 65, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6019.json b/doc/sphinxext/prs/6019.json new file mode 100644 index 00000000000..cb43fbcda9f --- /dev/null +++ b/doc/sphinxext/prs/6019.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "194e2e3dc98dd3b545421d5750eff458263c25e7", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/602.json b/doc/sphinxext/prs/602.json new file mode 100644 index 00000000000..b4d7ed19272 --- /dev/null +++ b/doc/sphinxext/prs/602.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2c2f4223337a162cd2b39c4d001bd720d8f2cfb5", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 16, + "d": 19 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6020.json b/doc/sphinxext/prs/6020.json new file mode 100644 index 00000000000..d84051358ef --- /dev/null +++ b/doc/sphinxext/prs/6020.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf4a580cfb05af4b5ff22e0cbc98be3d88c81fa7", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6021.json b/doc/sphinxext/prs/6021.json new file mode 100644 index 00000000000..cbefa2084cb --- /dev/null +++ b/doc/sphinxext/prs/6021.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "914f3d2def9643d5d6157381db3b6588094ceb28", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".gitignore": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6022.json b/doc/sphinxext/prs/6022.json new file mode 100644 index 00000000000..27bac6427df --- /dev/null +++ b/doc/sphinxext/prs/6022.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b757ff19c5556e328111661a304d46dff279d818", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6023.json b/doc/sphinxext/prs/6023.json new file mode 100644 index 00000000000..3db8ce3c5c9 --- /dev/null +++ b/doc/sphinxext/prs/6023.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "64e3f3ba7c2a82ff039a06877334fe48fb400d6d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6025.json b/doc/sphinxext/prs/6025.json new file mode 100644 index 00000000000..8c1db15896b --- /dev/null +++ b/doc/sphinxext/prs/6025.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f15e4bc322be80e58398bb23fdff40e87b566437", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 9, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6026.json b/doc/sphinxext/prs/6026.json new file mode 100644 index 00000000000..eb4daffc9d2 --- /dev/null +++ b/doc/sphinxext/prs/6026.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e6a1f5e6e9640d2d90349b387b1395bf842560d7", + "authors": [ + { + "n": "Nicolas Legrand", + "e": "nicolas.legrand@cas.au.dk" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6028.json b/doc/sphinxext/prs/6028.json new file mode 100644 index 00000000000..c6627327026 --- /dev/null +++ b/doc/sphinxext/prs/6028.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "56ce204fda6763458540755988f97c76a69bbcbd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 29 + }, + "mne/cov.py": { + "a": 16, + "d": 10 + }, + "mne/io/array/array.py": { + "a": 22, + "d": 9 + }, + "mne/io/array/tests/test_array.py": { + "a": 36, + "d": 0 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 5, + "d": 0 + }, + "mne/io/pick.py": { + "a": 159, + "d": 100 + }, + "mne/io/tests/test_pick.py": { + "a": 130, + "d": 16 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 2, + "d": 4 + }, + "mne/rank.py": { + "a": 27, + "d": 10 + }, + "mne/tests/test_rank.py": { + "a": 38, + "d": 25 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 12, + "d": 5 + }, + "mne/utils/testing.py": { + "a": 7, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6029.json b/doc/sphinxext/prs/6029.json new file mode 100644 index 00000000000..33ffd873718 --- /dev/null +++ b/doc/sphinxext/prs/6029.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0a033205ef720f10d8f71bf0057f8b6bc5ad76eb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/externals/decorator.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 0, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_dipole_orientations.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6035.json b/doc/sphinxext/prs/6035.json new file mode 100644 index 00000000000..6e0ff7fc01b --- /dev/null +++ b/doc/sphinxext/prs/6035.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "03771c19f6275067a861381261baa4058565c31d", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "mne/externals/pymatreader/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6036.json b/doc/sphinxext/prs/6036.json new file mode 100644 index 00000000000..c9f372357c1 --- /dev/null +++ b/doc/sphinxext/prs/6036.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cffcf4b1aeaf04655f6917718029c59ff412c917", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 0 + }, + "mne/utils/check.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6037.json b/doc/sphinxext/prs/6037.json new file mode 100644 index 00000000000..44208d450c8 --- /dev/null +++ b/doc/sphinxext/prs/6037.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "f7dc2398ec827702d1998c13b119e2e05b0f74a3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "conftest.py": { + "a": 2, + "d": 6 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 7, + "d": 1 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 17, + "d": 15 + }, + "mne/datasets/testing/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/testing/_testing.py": { + "a": 8, + "d": 0 + }, + "mne/filter.py": { + "a": 11, + "d": 20 + }, + "mne/forward/_field_interpolation.py": { + "a": 2, + "d": 4 + }, + "mne/forward/forward.py": { + "a": 67, + "d": 44 + }, + "mne/io/base.py": { + "a": 5, + "d": 4 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 7, + "d": 4 + }, + "mne/simulation/raw.py": { + "a": 580, + "d": 353 + }, + "mne/simulation/tests/test_raw.py": { + "a": 286, + "d": 113 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 5 + }, + "mne/source_space.py": { + "a": 15, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 7, + "d": 21 + }, + "mne/utils/docs.py": { + "a": 35, + "d": 0 + }, + "tutorials/plot_dics.py": { + "a": 27, + "d": 43 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/604.json b/doc/sphinxext/prs/604.json new file mode 100644 index 00000000000..d00031218c0 --- /dev/null +++ b/doc/sphinxext/prs/604.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "54d655264475c519589cc70a131e2edeba7bee04", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 84, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 68, + "d": 6 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 51, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6040.json b/doc/sphinxext/prs/6040.json new file mode 100644 index 00000000000..3aca4843236 --- /dev/null +++ b/doc/sphinxext/prs/6040.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "208991216be438e61d6e62bdd0b8b3a5d87c7604", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6041.json b/doc/sphinxext/prs/6041.json new file mode 100644 index 00000000000..1ffcbbf85be --- /dev/null +++ b/doc/sphinxext/prs/6041.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "52cf011b50a26b05df3992530fef807a14311b40", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 4, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6042.json b/doc/sphinxext/prs/6042.json new file mode 100644 index 00000000000..99ca130e5b1 --- /dev/null +++ b/doc/sphinxext/prs/6042.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "7e0e09f4c2bdfc9d938fb709a6c07e055957ba4f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 17, + "d": 10 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 83, + "d": 102 + }, + "mne/beamformer/_dics.py": { + "a": 22, + "d": 109 + }, + "mne/beamformer/_lcmv.py": { + "a": 50, + "d": 58 + }, + "mne/beamformer/_rap_music.py": { + "a": 7, + "d": 13 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 48, + "d": 38 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 53, + "d": 24 + }, + "mne/cov.py": { + "a": 3, + "d": 5 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 41, + "d": 33 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 35, + "d": 25 + }, + "mne/rank.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 9, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6044.json b/doc/sphinxext/prs/6044.json new file mode 100644 index 00000000000..50d088c00a2 --- /dev/null +++ b/doc/sphinxext/prs/6044.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "01a1aef46792d22dff39f38516268434a2497795", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6045.json b/doc/sphinxext/prs/6045.json new file mode 100644 index 00000000000..342bf279a6d --- /dev/null +++ b/doc/sphinxext/prs/6045.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b52c9d5436bd3d887750249f47a24ec77dcd3c0d", + "authors": [ + { + "n": "Sebastián Castaño-Candamil", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6046.json b/doc/sphinxext/prs/6046.json new file mode 100644 index 00000000000..a129accda7d --- /dev/null +++ b/doc/sphinxext/prs/6046.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "951df43c1ae96c5cc94043eadfd2fca0602c126f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 10, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 32, + "d": 15 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/fixes.py": { + "a": 8, + "d": 0 + }, + "mne/morph.py": { + "a": 59, + "d": 53 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 6 + }, + "mne/source_space.py": { + "a": 13, + "d": 18 + }, + "mne/tests/test_morph.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 3 + }, + "mne/transforms.py": { + "a": 25, + "d": 7 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 15, + "d": 0 + }, + "mne/utils/check.py": { + "a": 9, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 248, + "d": 134 + }, + "mne/viz/tests/test_3d.py": { + "a": 64, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6047.json b/doc/sphinxext/prs/6047.json new file mode 100644 index 00000000000..9e27a9c1e55 --- /dev/null +++ b/doc/sphinxext/prs/6047.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9d71dc0c315190bb1572f534b55904035b54af59", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/annotations.py": { + "a": 4, + "d": 0 + }, + "mne/io/cnt/_utils.py": { + "a": 95, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 199, + "d": 81 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 35, + "d": 0 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 17, + "d": 15 + }, + "mne/io/utils.py": { + "a": 29, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6053.json b/doc/sphinxext/prs/6053.json new file mode 100644 index 00000000000..66fb663a75f --- /dev/null +++ b/doc/sphinxext/prs/6053.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c4c3abcd26bef60be757a02b90c7f7911292d0d5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 3, + "d": 2 + }, + "mne/connectivity/envelope.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6055.json b/doc/sphinxext/prs/6055.json new file mode 100644 index 00000000000..620d808fd0d --- /dev/null +++ b/doc/sphinxext/prs/6055.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "69a5b6aee344413a97e42aed3a6051fac8caea57", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 10, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6056.json b/doc/sphinxext/prs/6056.json new file mode 100644 index 00000000000..e68f31766f9 --- /dev/null +++ b/doc/sphinxext/prs/6056.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "39fdbdc0111ca6b80157cbee45c8e929769b094b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 72, + "d": 60 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6057.json b/doc/sphinxext/prs/6057.json new file mode 100644 index 00000000000..78bca08f8c9 --- /dev/null +++ b/doc/sphinxext/prs/6057.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7686801d1c4bf05b536b6078d51ef82d3bfe8cdb", + "authors": [ + { + "n": "Nicolas Legrand", + "e": "nicolas.legrand@cas.au.dk" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6058.json b/doc/sphinxext/prs/6058.json new file mode 100644 index 00000000000..bbc12d17242 --- /dev/null +++ b/doc/sphinxext/prs/6058.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9324ffdaabc6c7d41785675592a65892525dad8c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 3, + "d": 4 + }, + "appveyor.yml": { + "a": 0, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/606.json b/doc/sphinxext/prs/606.json new file mode 100644 index 00000000000..67ede0400a4 --- /dev/null +++ b/doc/sphinxext/prs/606.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5de74c891e199595123910390ebf4af9625c8149", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 48, + "d": 19 + }, + "mne/fiff/proj.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6060.json b/doc/sphinxext/prs/6060.json new file mode 100644 index 00000000000..6fc12449709 --- /dev/null +++ b/doc/sphinxext/prs/6060.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "41f6f2bcd0b8e90807837e131314be81e0cd5ca6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 1, + "d": 0 + }, + "mne/utils/fetching.py": { + "a": 18, + "d": 11 + }, + "mne/utils/tests/test_fetching.py": { + "a": 25, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6061.json b/doc/sphinxext/prs/6061.json new file mode 100644 index 00000000000..97e0625fc02 --- /dev/null +++ b/doc/sphinxext/prs/6061.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "717eedd5f9ce22431e40a15c378863de6730d3e8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 3, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 44, + "d": 31 + }, + "mne/viz/tests/test_raw.py": { + "a": 13, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6063.json b/doc/sphinxext/prs/6063.json new file mode 100644 index 00000000000..7e1d52ca326 --- /dev/null +++ b/doc/sphinxext/prs/6063.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f2594e24244353a950cbb2728d07ae63bcc30115", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6064.json b/doc/sphinxext/prs/6064.json new file mode 100644 index 00000000000..c93adaac1f8 --- /dev/null +++ b/doc/sphinxext/prs/6064.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5e46b9748412ef5059f57f702954855c41f9ab49", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_flash_bem.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6065.json b/doc/sphinxext/prs/6065.json new file mode 100644 index 00000000000..d4a15cbaf50 --- /dev/null +++ b/doc/sphinxext/prs/6065.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "100f0d8580c0a75b6177a081b1216dc30fd7cbbf", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6066.json b/doc/sphinxext/prs/6066.json new file mode 100644 index 00000000000..4ef66d5afa9 --- /dev/null +++ b/doc/sphinxext/prs/6066.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "32bf58dfb3f0b6c61faba5b7b33f4565eb36a205", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 3, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6067.json b/doc/sphinxext/prs/6067.json new file mode 100644 index 00000000000..299744f5045 --- /dev/null +++ b/doc/sphinxext/prs/6067.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "561ded7f8fa4c2c63ca8f429a2da98323d28d311", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 3, + "d": 8 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 7 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6068.json b/doc/sphinxext/prs/6068.json new file mode 100644 index 00000000000..fc5033803b4 --- /dev/null +++ b/doc/sphinxext/prs/6068.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d11419306011a484405dc1e2a882c50a71e7f6bf", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/607.json b/doc/sphinxext/prs/607.json new file mode 100644 index 00000000000..bdfbed92843 --- /dev/null +++ b/doc/sphinxext/prs/607.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c244a50e94036835221c4ff471856edea79fc696", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 6, + "d": 19 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 6, + "d": 20 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 6, + "d": 20 + }, + "mne/stats/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 55, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 16, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6070.json b/doc/sphinxext/prs/6070.json new file mode 100644 index 00000000000..0ac6d45f412 --- /dev/null +++ b/doc/sphinxext/prs/6070.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2a0a55c6a795f618cf0a1603e22a72ee8e879f62", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/constants.py": { + "a": 5, + "d": 2 + }, + "mne/io/tests/test_constants.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6073.json b/doc/sphinxext/prs/6073.json new file mode 100644 index 00000000000..4d9728bb9b5 --- /dev/null +++ b/doc/sphinxext/prs/6073.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7288928c346a49efd3489c848fadff9dbc4c9f81", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6076.json b/doc/sphinxext/prs/6076.json new file mode 100644 index 00000000000..190fe61e91d --- /dev/null +++ b/doc/sphinxext/prs/6076.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c37acb9d6166e2a5fa32698748ffdc8e3b8247ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6079.json b/doc/sphinxext/prs/6079.json new file mode 100644 index 00000000000..858ad4180c7 --- /dev/null +++ b/doc/sphinxext/prs/6079.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5fc58e5ddd6afb7f2bfd7f22a579fb91001815af", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/configure_git.rst": { + "a": 44, + "d": 40 + }, + "doc/git_links.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6080.json b/doc/sphinxext/prs/6080.json new file mode 100644 index 00000000000..788d85b68a5 --- /dev/null +++ b/doc/sphinxext/prs/6080.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fd667a2e4270d42e7a4942a1746dde4fee85a7ff", + "authors": [ + { + "n": "Dirk Gütlin", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 16, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 29, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 5, + "d": 5 + }, + "mne/utils/numerics.py": { + "a": 28, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 35, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6081.json b/doc/sphinxext/prs/6081.json new file mode 100644 index 00000000000..7bc8614510c --- /dev/null +++ b/doc/sphinxext/prs/6081.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b74bb10d779dfd8144a4b6fd8fc5444f441f2141", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 90, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 10 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 13 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 11, + "d": 10 + }, + "mne/utils/_testing.py": { + "a": 11, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 93, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 40, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6083.json b/doc/sphinxext/prs/6083.json new file mode 100644 index 00000000000..d444fa07be1 --- /dev/null +++ b/doc/sphinxext/prs/6083.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1d31e89d7c23aca1f4b6aedec85b611237dc3f3e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6088.json b/doc/sphinxext/prs/6088.json new file mode 100644 index 00000000000..3abafe04cc2 --- /dev/null +++ b/doc/sphinxext/prs/6088.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "c30e70c53cd25997a8a9831ab5c3e827dd5f2966", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 3, + "d": 19 + }, + "mne/filter.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/flat.py": { + "a": 99, + "d": 0 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 61, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 15, + "d": 0 + }, + "mne/utils/progressbar.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6089.json b/doc/sphinxext/prs/6089.json new file mode 100644 index 00000000000..8998f9b4a6c --- /dev/null +++ b/doc/sphinxext/prs/6089.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8610eddc10d2e32bdb42e59085aaae506819e140", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 8, + "d": 8 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 5, + "d": 1 + }, + "tutorials/plot_receptive_field.py": { + "a": 1, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6092.json b/doc/sphinxext/prs/6092.json new file mode 100644 index 00000000000..ade0c78302c --- /dev/null +++ b/doc/sphinxext/prs/6092.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0078e1af13a92ab47498dd167bc5ec73be864427", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_annotations.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6095.json b/doc/sphinxext/prs/6095.json new file mode 100644 index 00000000000..d2b01c0021a --- /dev/null +++ b/doc/sphinxext/prs/6095.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d7cb4380664368ea9aa12efb5157058d66921323", + "authors": [ + { + "n": "Achilleas Koutsou", + "e": "achilleas@koutsou.net" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6098.json b/doc/sphinxext/prs/6098.json new file mode 100644 index 00000000000..01c71e56e83 --- /dev/null +++ b/doc/sphinxext/prs/6098.json @@ -0,0 +1,159 @@ +{ + "merge_commit_sha": "f384253d77d46893d91c4ae678ff50f7287cc384", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/_static/style.css": { + "a": 33, + "d": 17 + }, + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 1, + "d": 10 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 2 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 3, + "d": 4 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 2 + }, + "mne/io/constants.py": { + "a": 3, + "d": 18 + }, + "mne/io/ctf/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 39, + "d": 39 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 221, + "d": 223 + }, + "mne/io/kit/constants.py": { + "a": 4, + "d": 3 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 3, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 65, + "d": 13 + }, + "mne/io/tests/test_meas_info.py": { + "a": 11, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 5 + }, + "mne/morph.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 42, + "d": 44 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 9, + "d": 10 + }, + "mne/source_space.py": { + "a": 4, + "d": 1 + }, + "mne/tests/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/tests/common.py": { + "a": 0, + "d": 106 + }, + "mne/tests/test_chpi.py": { + "a": 5, + "d": 8 + }, + "mne/tests/test_cov.py": { + "a": 10, + "d": 12 + }, + "mne/tests/test_dipole.py": { + "a": 32, + "d": 34 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 7 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/utils/_bunch.py": { + "a": 93, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 101, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6099.json b/doc/sphinxext/prs/6099.json new file mode 100644 index 00000000000..623a1f0ef58 --- /dev/null +++ b/doc/sphinxext/prs/6099.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "87cb63994a9ade4a0a0675f14ec2bb1edff88d65", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/61.json b/doc/sphinxext/prs/61.json new file mode 100644 index 00000000000..5627986d5f9 --- /dev/null +++ b/doc/sphinxext/prs/61.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7fa79864d502199866a5348b67865827aa0f9010", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/datasets/sample/__init__.py": { + "a": 44, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/610.json b/doc/sphinxext/prs/610.json new file mode 100644 index 00000000000..40d1154e05a --- /dev/null +++ b/doc/sphinxext/prs/610.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6ce8b7110aa91bee4e61219de72863aadd6b4de9", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 20, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6100.json b/doc/sphinxext/prs/6100.json new file mode 100644 index 00000000000..9aa600d1e2f --- /dev/null +++ b/doc/sphinxext/prs/6100.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "673573cabc21f7414741fee7e9c417d0e5f1d4b5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 29, + "d": 0 + }, + "doc/sphinxext/gen_commands.py": { + "a": 39, + "d": 14 + }, + "mne/commands/mne_browse_raw.py": { + "a": 6, + "d": 4 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 17, + "d": 11 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 5, + "d": 2 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 5, + "d": 2 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 7, + "d": 4 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 12, + "d": 8 + }, + "mne/commands/mne_coreg.py": { + "a": 6, + "d": 1 + }, + "mne/commands/mne_flash_bem.py": { + "a": 24, + "d": 20 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 5, + "d": 2 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 11, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 6, + "d": 1 + }, + "mne/commands/mne_maxfilter.py": { + "a": 4, + "d": 2 + }, + "mne/commands/mne_report.py": { + "a": 7, + "d": 4 + }, + "mne/commands/mne_show_fiff.py": { + "a": 9, + "d": 3 + }, + "mne/commands/mne_show_info.py": { + "a": 6, + "d": 2 + }, + "mne/commands/mne_surf2bem.py": { + "a": 7, + "d": 3 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6103.json b/doc/sphinxext/prs/6103.json new file mode 100644 index 00000000000..37c6d2cd418 --- /dev/null +++ b/doc/sphinxext/prs/6103.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "9d521373a4d4c1ca89e975c43348d2d5a6ff8d6f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/effective.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 26, + "d": 20 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/flat.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 12, + "d": 10 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 22, + "d": 8 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6104.json b/doc/sphinxext/prs/6104.json new file mode 100644 index 00000000000..dd2026c002e --- /dev/null +++ b/doc/sphinxext/prs/6104.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "eefbd78658664acd889887d4cb3e571db057b938", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 16, + "d": 13 + }, + "mne/time_frequency/tfr.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6105.json b/doc/sphinxext/prs/6105.json new file mode 100644 index 00000000000..8037ac68f20 --- /dev/null +++ b/doc/sphinxext/prs/6105.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0bb4e479e385fd9c67b469f8ea0ae00bb3c54bd9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6106.json b/doc/sphinxext/prs/6106.json new file mode 100644 index 00000000000..1eda09926e3 --- /dev/null +++ b/doc/sphinxext/prs/6106.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a35bcc361778de096ba2803ed255d8acf587b3dc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/git_links.inc": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6108.json b/doc/sphinxext/prs/6108.json new file mode 100644 index 00000000000..638b2dfbb0d --- /dev/null +++ b/doc/sphinxext/prs/6108.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "027f82d79b8055bad5c997829d409035de348fe8", + "authors": [ + { + "n": "Katarina Slama", + "e": "slamakatarina@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 22, + "d": 2 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 71, + "d": 24 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6110.json b/doc/sphinxext/prs/6110.json new file mode 100644 index 00000000000..a83d948aa0d --- /dev/null +++ b/doc/sphinxext/prs/6110.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "3cc76b9f0058ade446411f5c0cfa362e073d29f6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 2, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py": { + "a": 96, + "d": 0 + }, + "mne/connectivity/envelope.py": { + "a": 16, + "d": 9 + }, + "mne/epochs.py": { + "a": 5, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 144, + "d": 0 + }, + "mne/io/base.py": { + "a": 4, + "d": 124 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/parallel.py": { + "a": 9, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 26, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 5, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6111.json b/doc/sphinxext/prs/6111.json new file mode 100644 index 00000000000..69a376062a3 --- /dev/null +++ b/doc/sphinxext/prs/6111.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9b5ae5104ecdeaea13a88273c712382bf131162c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/topo.py": { + "a": 20, + "d": 16 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6113.json b/doc/sphinxext/prs/6113.json new file mode 100644 index 00000000000..b82af474ca4 --- /dev/null +++ b/doc/sphinxext/prs/6113.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "dd3cc4b087975357becef12a2f7258ada5b28478", + "authors": [ + { + "n": "José C. García Alanis", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 4, + "d": 0 + }, + "examples/datasets/plot_limo_data.py": { + "a": 178, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/limo/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/limo/limo.py": { + "a": 225, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6115.json b/doc/sphinxext/prs/6115.json new file mode 100644 index 00000000000..a9d08bb0b92 --- /dev/null +++ b/doc/sphinxext/prs/6115.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "127a75c55ca614477a927637e39cd3473535121e", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/utils/config.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6118.json b/doc/sphinxext/prs/6118.json new file mode 100644 index 00000000000..8e142f2d612 --- /dev/null +++ b/doc/sphinxext/prs/6118.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "56909cdbf35511d9837a5999ce1ffe2f47eee7bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/autosummary/class.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/612.json b/doc/sphinxext/prs/612.json new file mode 100644 index 00000000000..0c4c38e9f7f --- /dev/null +++ b/doc/sphinxext/prs/612.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7851b08c73d83fcbdb63230de90cfc47917345b8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 2 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 8, + "d": 19 + }, + "mne/fiff/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/fiff/compensator.py": { + "a": 89, + "d": 116 + }, + "mne/fiff/ctf.py": { + "a": 7, + "d": 9 + }, + "mne/fiff/kit/kit.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 52, + "d": 20 + }, + "mne/fiff/tests/data/test_ctf_comp_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/test_compensator.py": { + "a": 20, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 37, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6120.json b/doc/sphinxext/prs/6120.json new file mode 100644 index 00000000000..1ddaec54aed --- /dev/null +++ b/doc/sphinxext/prs/6120.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "56106059442264c43cdd489684418a35b2135fe4", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/realtime/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/base_client.py": { + "a": 199, + "d": 0 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 2, + "d": 2 + }, + "mne/realtime/lsl_client.py": { + "a": 120, + "d": 0 + }, + "mne/realtime/tests/test_lsl_client.py": { + "a": 67, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 3 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 0 + }, + "mne/utils/check.py": { + "a": 13, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6122.json b/doc/sphinxext/prs/6122.json new file mode 100644 index 00000000000..bcd92d89151 --- /dev/null +++ b/doc/sphinxext/prs/6122.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c2a5fe704e89ee4910049e7509cc17daac180e88", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 32, + "d": 24 + }, + "mne/viz/tests/test_evoked.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6124.json b/doc/sphinxext/prs/6124.json new file mode 100644 index 00000000000..2a171cb7208 --- /dev/null +++ b/doc/sphinxext/prs/6124.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6e1a2ba41e442a148d68bbcb06bb895099e29333", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6126.json b/doc/sphinxext/prs/6126.json new file mode 100644 index 00000000000..988659de942 --- /dev/null +++ b/doc/sphinxext/prs/6126.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8f52bc76f3992db96098ace9479c3f9c912b59bf", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "tutorials/plot_introduction.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_object_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6129.json b/doc/sphinxext/prs/6129.json new file mode 100644 index 00000000000..f4383ba5001 --- /dev/null +++ b/doc/sphinxext/prs/6129.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6b2d50e6ddbf2b274cec084b0e8ab8c29549afb9", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "tutorials/plot_object_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6131.json b/doc/sphinxext/prs/6131.json new file mode 100644 index 00000000000..acf148874d3 --- /dev/null +++ b/doc/sphinxext/prs/6131.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "205e9b2fe1e552ee6b76cfa5be0a6d64fd792fa1", + "authors": [ + { + "n": "Bruno Nicenboim", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/cuda.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6132.json b/doc/sphinxext/prs/6132.json new file mode 100644 index 00000000000..62ced5c99f1 --- /dev/null +++ b/doc/sphinxext/prs/6132.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d3747b5d05f214eb3745492fcd2160bb596c1ce9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 9, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6133.json b/doc/sphinxext/prs/6133.json new file mode 100644 index 00000000000..f17f73cfe83 --- /dev/null +++ b/doc/sphinxext/prs/6133.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f108848606d63434f6bb667954a3914327736b49", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 21, + "d": 11 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 15, + "d": 10 + }, + "mne/tests/test_bem.py": { + "a": 30, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6134.json b/doc/sphinxext/prs/6134.json new file mode 100644 index 00000000000..1b9a8d8d9d5 --- /dev/null +++ b/doc/sphinxext/prs/6134.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "546a2bbc3422af65def90706828d9e7f1a82ca62", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 2, + "d": 1 + }, + "mne/connectivity/utils.py": { + "a": 10, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6135.json b/doc/sphinxext/prs/6135.json new file mode 100644 index 00000000000..27e641dbdc0 --- /dev/null +++ b/doc/sphinxext/prs/6135.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "35d7c539c93bd0bff7788fad93ce08686e0cc760", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 40, + "d": 38 + }, + "mne/surface.py": { + "a": 43, + "d": 3 + }, + "mne/tests/test_source_space.py": { + "a": 38, + "d": 21 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6136.json b/doc/sphinxext/prs/6136.json new file mode 100644 index 00000000000..61013512388 --- /dev/null +++ b/doc/sphinxext/prs/6136.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cd3f4fb2fe42d99d3eaa2d6a7d1dc56e08d69749", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/connectivity/envelope.py": { + "a": 15, + "d": 12 + }, + "mne/connectivity/tests/test_envelope.py": { + "a": 9, + "d": 5 + }, + "mne/epochs.py": { + "a": 3, + "d": 16 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6139.json b/doc/sphinxext/prs/6139.json new file mode 100644 index 00000000000..c39ffd9de29 --- /dev/null +++ b/doc/sphinxext/prs/6139.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "aa74f02c3be4e69963b78cf835af9b9e08a81a67", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/614.json b/doc/sphinxext/prs/614.json new file mode 100644 index 00000000000..62df19a5f0d --- /dev/null +++ b/doc/sphinxext/prs/614.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "04b6e4c7a1912f0d0e311749cf51e2cd723d8fd3", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 11, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6141.json b/doc/sphinxext/prs/6141.json new file mode 100644 index 00000000000..fcb2ab794a8 --- /dev/null +++ b/doc/sphinxext/prs/6141.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "c9fa690f0d551f4290872e1a4fba69d25ea2f6a3", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_lslclient_rt.py": { + "a": 55, + "d": 0 + }, + "mne/realtime/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/realtime/base_client.py": { + "a": 16, + "d": 3 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 93, + "d": 200 + }, + "mne/realtime/lsl_client.py": { + "a": 5, + "d": 5 + }, + "mne/realtime/mock_lsl_stream.py": { + "a": 83, + "d": 0 + }, + "mne/realtime/tests/test_lsl_client.py": { + "a": 24, + "d": 44 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6142.json b/doc/sphinxext/prs/6142.json new file mode 100644 index 00000000000..df47781da15 --- /dev/null +++ b/doc/sphinxext/prs/6142.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "55ac67e0862ac517daa91c7c25eb99a13ea2a578", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "conftest.py": { + "a": 17, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 46, + "d": 58 + }, + "mne/viz/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 66, + "d": 47 + }, + "mne/viz/backends/_utils.py": { + "a": 21, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 47, + "d": 23 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 23, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 53, + "d": 14 + }, + "mne/viz/tests/test_3d.py": { + "a": 35, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6143.json b/doc/sphinxext/prs/6143.json new file mode 100644 index 00000000000..3fdd87a7faa --- /dev/null +++ b/doc/sphinxext/prs/6143.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "aa7b3a0ba1038e6ad7fb357e60844696e234c40b", + "authors": [ + { + "n": "Nichalas", + "e": "nikos.ch01@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/event.py": { + "a": 7, + "d": 3 + }, + "mne/tests/test_event.py": { + "a": 19, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6145.json b/doc/sphinxext/prs/6145.json new file mode 100644 index 00000000000..9bfb840ce5e --- /dev/null +++ b/doc/sphinxext/prs/6145.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc5bd2dd0ea17f87c9f887ed48fc48d3f19094c1", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6146.json b/doc/sphinxext/prs/6146.json new file mode 100644 index 00000000000..152ce0f0ccf --- /dev/null +++ b/doc/sphinxext/prs/6146.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "639b7381d0f3f57bfdeaa94513fece040a3dc8fb", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 4, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "tutorials/plot_eeg_no_mri.py": { + "a": 76, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6147.json b/doc/sphinxext/prs/6147.json new file mode 100644 index 00000000000..9084acdb78a --- /dev/null +++ b/doc/sphinxext/prs/6147.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b147676776d20d0027801a4ea3d83dbe8bc24d36", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6149.json b/doc/sphinxext/prs/6149.json new file mode 100644 index 00000000000..d01ce4fc2ee --- /dev/null +++ b/doc/sphinxext/prs/6149.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9a47e8e2b3c1f55a26683fee2f6d498cfca74802", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6150.json b/doc/sphinxext/prs/6150.json new file mode 100644 index 00000000000..bbd23773f1b --- /dev/null +++ b/doc/sphinxext/prs/6150.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "57bcaa877b896a4b7ccaca3207de81bf73e8b51b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6151.json b/doc/sphinxext/prs/6151.json new file mode 100644 index 00000000000..804b8cf2880 --- /dev/null +++ b/doc/sphinxext/prs/6151.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4555011717b9008f41eaa4926d598bb570e42fca", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 11, + "d": 3 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 22, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6152.json b/doc/sphinxext/prs/6152.json new file mode 100644 index 00000000000..a78d2f51f60 --- /dev/null +++ b/doc/sphinxext/prs/6152.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "ece75432a4d51692dfc946fc7e6615be00b6a0dd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/_static/style.css": { + "a": 4, + "d": 0 + }, + "doc/_templates/autosummary/class.rst": { + "a": 1, + "d": 3 + }, + "doc/conf.py": { + "a": 41, + "d": 69 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 3, + "d": 0 + }, + "mne/bem.py": { + "a": 3, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_background_filtering.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6153.json b/doc/sphinxext/prs/6153.json new file mode 100644 index 00000000000..32b87fbfde3 --- /dev/null +++ b/doc/sphinxext/prs/6153.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "3f9399f90d6660f8dcd27b00fbd4bb0b48ecf477", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 7, + "d": 0 + }, + "conftest.py": { + "a": 9, + "d": 4 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_vtki.py": { + "a": 296, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 16, + "d": 5 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 16, + "d": 10 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6158.json b/doc/sphinxext/prs/6158.json new file mode 100644 index 00000000000..67349369b4f --- /dev/null +++ b/doc/sphinxext/prs/6158.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0b637d8bafeccf44ff7046cc4ef9b42bc8b7c050", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "examples/visualization/plot_montage.py": { + "a": 47, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 13, + "d": 7 + }, + "tutorials/plot_eeg_no_mri.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6160.json b/doc/sphinxext/prs/6160.json new file mode 100644 index 00000000000..19825d53284 --- /dev/null +++ b/doc/sphinxext/prs/6160.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b3183fc68982d4a7d25b6488b06adcdfad53453c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/proj.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 36, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6161.json b/doc/sphinxext/prs/6161.json new file mode 100644 index 00000000000..f9698ddf50d --- /dev/null +++ b/doc/sphinxext/prs/6161.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2c215d8f61e79f86a908da7f9e995819c5d62ddf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 15, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6169.json b/doc/sphinxext/prs/6169.json new file mode 100644 index 00000000000..8133943a286 --- /dev/null +++ b/doc/sphinxext/prs/6169.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "69dbf4bbc1045e55894750bfac8f6aece2c20b1f", + "authors": [ + { + "n": "Dirk Gütlin", + "e": null + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 12, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6172.json b/doc/sphinxext/prs/6172.json new file mode 100644 index 00000000000..f365d65d170 --- /dev/null +++ b/doc/sphinxext/prs/6172.json @@ -0,0 +1,10 @@ +{ + "merge_commit_sha": "8ac51be3e8eeaeb25adaafd8d580fee4dff0f558", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": {} +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6173.json b/doc/sphinxext/prs/6173.json new file mode 100644 index 00000000000..13abecdf432 --- /dev/null +++ b/doc/sphinxext/prs/6173.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "52949f1bea6d21fc286997838239e3b8109998de", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6174.json b/doc/sphinxext/prs/6174.json new file mode 100644 index 00000000000..375387ef9da --- /dev/null +++ b/doc/sphinxext/prs/6174.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "9cc5d1457c29151eea73fa7f14619724182960c1", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 0 + }, + "MANIFEST.in": { + "a": 2, + "d": 0 + }, + "doc/manual/datasets_index.rst": { + "a": 7, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 9, + "d": 0 + }, + "mne/datasets/_fsaverage/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 146, + "d": 0 + }, + "mne/datasets/_fsaverage/bem.txt": { + "a": 11, + "d": 0 + }, + "mne/datasets/_fsaverage/root.txt": { + "a": 179, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 67, + "d": 18 + }, + "mne/datasets/utils.py": { + "a": 35, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6175.json b/doc/sphinxext/prs/6175.json new file mode 100644 index 00000000000..cb4de9d442f --- /dev/null +++ b/doc/sphinxext/prs/6175.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8aad459ab6d1443b38c5699822886a83ba2a8388", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/datasets/_fsaverage/base.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 11, + "d": 4 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6176.json b/doc/sphinxext/prs/6176.json new file mode 100644 index 00000000000..a3b6f7dfa77 --- /dev/null +++ b/doc/sphinxext/prs/6176.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "b94eff339a5be230fce74ead7e562f7813b8c40d", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 18, + "d": 6 + }, + "doc/conf.py": { + "a": 5, + "d": 1 + }, + "doc/documentation.rst": { + "a": 4, + "d": 3 + }, + "examples/realtime/ftclient_rt_average.py": { + "a": 0, + "d": 94 + }, + "examples/realtime/ftclient_rt_compute_psd.py": { + "a": 0, + "d": 79 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 2, + "d": 2 + }, + "examples/realtime/plot_ftclient_rt_average.py": { + "a": 104, + "d": 0 + }, + "examples/realtime/plot_ftclient_rt_compute_psd.py": { + "a": 90, + "d": 0 + }, + "examples/realtime/plot_lslclient_rt.py": { + "a": 16, + "d": 20 + }, + "examples/realtime/plot_rt_feedback_server.py": { + "a": 124, + "d": 0 + }, + "examples/realtime/rt_feedback_client.py": { + "a": 0, + "d": 1 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 0, + "d": 147 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 3, + "d": 0 + }, + "mne/realtime/mock_lsl_stream.py": { + "a": 10, + "d": 3 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 13, + "d": 42 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 50, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6181.json b/doc/sphinxext/prs/6181.json new file mode 100644 index 00000000000..4067ca4aa7c --- /dev/null +++ b/doc/sphinxext/prs/6181.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a2f4634d3bb4e00436ca6c90121d3f1e457875dd", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6182.json b/doc/sphinxext/prs/6182.json new file mode 100644 index 00000000000..8613154aac8 --- /dev/null +++ b/doc/sphinxext/prs/6182.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "287f42345aae86a134acedffae2bd64e0581f87b", + "authors": [ + { + "n": "Ivana Kojcic", + "e": null + } + ], + "changes": { + "doc/contributing.rst": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6184.json b/doc/sphinxext/prs/6184.json new file mode 100644 index 00000000000..d2c623875cb --- /dev/null +++ b/doc/sphinxext/prs/6184.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a3adefb9c60f7db5b8a314365244e1f519de8fe5", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/annotations.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 19, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6186.json b/doc/sphinxext/prs/6186.json new file mode 100644 index 00000000000..504c484077d --- /dev/null +++ b/doc/sphinxext/prs/6186.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a207b37f5d4356b139e7ccf102b4ad95d556217", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 83, + "d": 56 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6187.json b/doc/sphinxext/prs/6187.json new file mode 100644 index 00000000000..03cee0a77be --- /dev/null +++ b/doc/sphinxext/prs/6187.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5799a185a85e69043a9b3845e1b93cd59509ce81", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6188.json b/doc/sphinxext/prs/6188.json new file mode 100644 index 00000000000..71576ba143f --- /dev/null +++ b/doc/sphinxext/prs/6188.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1e6a2a0f57ab3ced1c9f2ab44e44e24caf585aa3", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 2 + }, + "mne/io/brainvision/tests/data/test_bad_date.vhdr": { + "a": 142, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_bad_date.vmrk": { + "a": 25, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6189.json b/doc/sphinxext/prs/6189.json new file mode 100644 index 00000000000..ff43aa93d0d --- /dev/null +++ b/doc/sphinxext/prs/6189.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ffa71dbd0b7890af75285848776dc192843ccddc", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 192, + "d": 132 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/619.json b/doc/sphinxext/prs/619.json new file mode 100644 index 00000000000..110741147ad --- /dev/null +++ b/doc/sphinxext/prs/619.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2e9ef2250532fc3f3da7d75f5b6fdac53b25bda4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6191.json b/doc/sphinxext/prs/6191.json new file mode 100644 index 00000000000..b21fa5eeab9 --- /dev/null +++ b/doc/sphinxext/prs/6191.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b9a76420a4a436c8460c3d431ef2a5ae78af866a", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "doc/manual/migrating.rst": { + "a": 31, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6192.json b/doc/sphinxext/prs/6192.json new file mode 100644 index 00000000000..ecbe7f54181 --- /dev/null +++ b/doc/sphinxext/prs/6192.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e8ea87f866b2cce26366e0964b5db416f70840a8", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 9, + "d": 1 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 3, + "d": 3 + }, + "examples/datasets/plot_opm_rest_data.py": { + "a": 0, + "d": 1 + }, + "mne/bem.py": { + "a": 37, + "d": 20 + }, + "mne/commands/mne_flash_bem.py": { + "a": 5, + "d": 1 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 6, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 46, + "d": 52 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6193.json b/doc/sphinxext/prs/6193.json new file mode 100644 index 00000000000..ab7ad7fc567 --- /dev/null +++ b/doc/sphinxext/prs/6193.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "42c6bf2bd822e2e81af0c8dd49b50c50fa193ba2", + "authors": [ + { + "n": "Samuel Deslauriers-Gauthier", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/label.py": { + "a": 90, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 52, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6194.json b/doc/sphinxext/prs/6194.json new file mode 100644 index 00000000000..368ea78b950 --- /dev/null +++ b/doc/sphinxext/prs/6194.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7b6998094dcbccf53594cd1e2b4662acd8dece0f", + "authors": [ + { + "n": "Deleted user", + "e": null + } + ], + "changes": { + "mne/realtime/lsl_client.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6197.json b/doc/sphinxext/prs/6197.json new file mode 100644 index 00000000000..1137977eaaa --- /dev/null +++ b/doc/sphinxext/prs/6197.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "92f61faef72a335d6de63e816bd5bcfeaceffa09", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 4, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 9, + "d": 0 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6198.json b/doc/sphinxext/prs/6198.json new file mode 100644 index 00000000000..6502b39946d --- /dev/null +++ b/doc/sphinxext/prs/6198.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b05968e60a039ca62bb1cb5be8a10fdb161d0b40", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_eog_artifact_histogram.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 23, + "d": 5 + }, + "mne/realtime/epochs.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 0 + }, + "tutorials/plot_sleep.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/62.json b/doc/sphinxext/prs/62.json new file mode 100644 index 00000000000..f1467f921c5 --- /dev/null +++ b/doc/sphinxext/prs/62.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7aa71d75b86e728d956b1c8d034895810c6162ac", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/fiff/proj.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6200.json b/doc/sphinxext/prs/6200.json new file mode 100644 index 00000000000..0a865811f5b --- /dev/null +++ b/doc/sphinxext/prs/6200.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "925423c50120462bd4125aa0c0378f7cc994b5a6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 55, + "d": 16 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 6, + "d": 0 + }, + "tutorials/plot_epochs_to_data_frame.py": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6201.json b/doc/sphinxext/prs/6201.json new file mode 100644 index 00000000000..590fd69a099 --- /dev/null +++ b/doc/sphinxext/prs/6201.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d66a8683911d217b594a82f46cedd209e52912e3", + "authors": [ + { + "n": "Nathalie", + "e": "nat.gayraud@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 41, + "d": 29 + }, + "mne/simulation/tests/test_source.py": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6202.json b/doc/sphinxext/prs/6202.json new file mode 100644 index 00000000000..f4dc0841a30 --- /dev/null +++ b/doc/sphinxext/prs/6202.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1889555ff187d19590c126c32fe68374ebebc9b8", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6205.json b/doc/sphinxext/prs/6205.json new file mode 100644 index 00000000000..32b473da0f9 --- /dev/null +++ b/doc/sphinxext/prs/6205.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c4a83c5a1792fa68315baa87f1d7b26d9b6d709b", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 40, + "d": 0 + }, + "mne/utils/tests/test_docs.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 182 + }, + "mne/viz/backends/_vtki.py": { + "a": 24, + "d": 2 + }, + "mne/viz/backends/base_renderer.py": { + "a": 246, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6207.json b/doc/sphinxext/prs/6207.json new file mode 100644 index 00000000000..88ae51dcc60 --- /dev/null +++ b/doc/sphinxext/prs/6207.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5a9ae3c3d66b6275fffec217cf560acf651913d7", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 7, + "d": 1 + }, + "tutorials/plot_eeg_erp.py": { + "a": 1, + "d": 1 + }, + "tutorials/plot_introduction.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6208.json b/doc/sphinxext/prs/6208.json new file mode 100644 index 00000000000..dd2042b7d1a --- /dev/null +++ b/doc/sphinxext/prs/6208.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ab49f9c124fe61f0d1c437ab33c2970833f2e71e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/manual/preprocessing/filter.rst": { + "a": 0, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/621.json b/doc/sphinxext/prs/621.json new file mode 100644 index 00000000000..92f774244ee --- /dev/null +++ b/doc/sphinxext/prs/621.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cc8077d73a383d0321408ef6def3bb0d4ef87ef4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6210.json b/doc/sphinxext/prs/6210.json new file mode 100644 index 00000000000..b38869acefa --- /dev/null +++ b/doc/sphinxext/prs/6210.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "bc94beb201ca0a7c7c9e6fcd86d9b793e8694194", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 9, + "d": 7 + }, + "mne/epochs.py": { + "a": 6, + "d": 4 + }, + "mne/io/pick.py": { + "a": 4, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 105, + "d": 146 + }, + "mne/viz/ica.py": { + "a": 6, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_epochs.py": { + "a": 36, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6211.json b/doc/sphinxext/prs/6211.json new file mode 100644 index 00000000000..e0ddd942eb0 --- /dev/null +++ b/doc/sphinxext/prs/6211.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3d33e8eeb3e77b50b6b405b8cdf0ebdd5d5d8658", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 272, + "d": 114 + }, + "mne/viz/tests/test_evoked.py": { + "a": 16, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 9, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 27, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 28, + "d": 10 + }, + "tutorials/plot_visualize_evoked.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6213.json b/doc/sphinxext/prs/6213.json new file mode 100644 index 00000000000..4042b705fd0 --- /dev/null +++ b/doc/sphinxext/prs/6213.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "39ae853190df11d5dcd49f142f268c815c6d260d", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 5, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 87, + "d": 62 + }, + "mne/beamformer/tests/test_external.py": { + "a": 98, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 7, + "d": 7 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6214.json b/doc/sphinxext/prs/6214.json new file mode 100644 index 00000000000..b067093de47 --- /dev/null +++ b/doc/sphinxext/prs/6214.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d0b8c005d3e14f911c6615788e4428052cbe2819", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6215.json b/doc/sphinxext/prs/6215.json new file mode 100644 index 00000000000..2025acfe5cb --- /dev/null +++ b/doc/sphinxext/prs/6215.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7a8404959f686ad7a0bcf9257836b1bd13c0fe40", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/annotations.py": { + "a": 9, + "d": 8 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6217.json b/doc/sphinxext/prs/6217.json new file mode 100644 index 00000000000..04a9c429810 --- /dev/null +++ b/doc/sphinxext/prs/6217.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "55c455b48884e4cff45219d4127c7706c30f7f71", + "authors": [ + { + "n": "Deleted user", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 20, + "d": 15 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6218.json b/doc/sphinxext/prs/6218.json new file mode 100644 index 00000000000..3b0a06923a3 --- /dev/null +++ b/doc/sphinxext/prs/6218.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "767cd9246370d25b132b4d98206095e15779044f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_rap_music.py": { + "a": 4, + "d": 2 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/622.json b/doc/sphinxext/prs/622.json new file mode 100644 index 00000000000..e0f68d6ea04 --- /dev/null +++ b/doc/sphinxext/prs/622.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7689d310146aa908a596b250e69620f518c4f58b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 96, + "d": 23 + }, + "mne/tests/test_proj.py": { + "a": 23, + "d": 48 + }, + "mne/tests/test_source_estimate.py": { + "a": 12, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6220.json b/doc/sphinxext/prs/6220.json new file mode 100644 index 00000000000..b931cfa33e8 --- /dev/null +++ b/doc/sphinxext/prs/6220.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cc175020e8949b435a11cf9db82f6f25713508b2", + "authors": [ + { + "n": "Quentin Bertrand", + "e": "quentin.bertrand@mila.quebec" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 79, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6224.json b/doc/sphinxext/prs/6224.json new file mode 100644 index 00000000000..0f0eb835f83 --- /dev/null +++ b/doc/sphinxext/prs/6224.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "be294971ab3f6467fc6930404ce6c0166c3519b9", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/cited.rst": { + "a": 4, + "d": 191 + }, + "doc/sphinxext/cited_mne.py": { + "a": 0, + "d": 261 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6227.json b/doc/sphinxext/prs/6227.json new file mode 100644 index 00000000000..22aa954281b --- /dev/null +++ b/doc/sphinxext/prs/6227.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "454b69eb865e0dea7a3bd3d9795c010796946ac6", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 30, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 4, + "d": 11 + }, + "mne/beamformer/_lcmv.py": { + "a": 50, + "d": 7 + }, + "mne/beamformer/tests/test_external.py": { + "a": 31, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6228.json b/doc/sphinxext/prs/6228.json new file mode 100644 index 00000000000..7de2ceffe0d --- /dev/null +++ b/doc/sphinxext/prs/6228.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a9bbb9eded90321545e211857eec935d387e065c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + }, + { + "n": "Teon Brooks", + "e": "teon.brooks@gmail.com" + }, + { + "n": "Teon Brooks", + "e": "teon.brooks@gmail.com" + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 77, + "d": 121 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 14, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6229.json b/doc/sphinxext/prs/6229.json new file mode 100644 index 00000000000..8413635342f --- /dev/null +++ b/doc/sphinxext/prs/6229.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "297422c6ef2b36ce0b4ad736fa97b5f00ae2399b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6230.json b/doc/sphinxext/prs/6230.json new file mode 100644 index 00000000000..7f9c632b27b --- /dev/null +++ b/doc/sphinxext/prs/6230.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6a3a438fe23f0c459a50af8401a3e3083edd8fea", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 8, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 55, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6231.json b/doc/sphinxext/prs/6231.json new file mode 100644 index 00000000000..f0ecfea2123 --- /dev/null +++ b/doc/sphinxext/prs/6231.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d2089d28ba1dae9b0a1159046f235e1dd596f8ea", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6233.json b/doc/sphinxext/prs/6233.json new file mode 100644 index 00000000000..f62c2f29cc9 --- /dev/null +++ b/doc/sphinxext/prs/6233.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "a750523f8364c935263a6c6d154cc1248cf515ee", + "authors": [ + { + "n": "Maksymenko Kostiantyn", + "e": "makkostya@ukr.net" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Joan Massich", + "e": "sik@visor.udg.edu" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 59, + "d": 0 + }, + "doc/simulation.rst": { + "a": 20, + "d": 0 + }, + "examples/simulation/plot_stc_metrics.py": { + "a": 260, + "d": 0 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/simulation/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/_metrics.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/metrics/__init__.py": { + "a": 9, + "d": 0 + }, + "mne/simulation/metrics/metrics.py": { + "a": 572, + "d": 0 + }, + "mne/simulation/metrics/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/metrics/tests/test_metrics.py": { + "a": 241, + "d": 0 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 18, + "d": 12 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6234.json b/doc/sphinxext/prs/6234.json new file mode 100644 index 00000000000..99060216225 --- /dev/null +++ b/doc/sphinxext/prs/6234.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5d80e26f541d83dbd267c3e0957d8aa8e6a154f4", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "examples/realtime/plot_rt_feedback_server.py": { + "a": 3, + "d": 3 + }, + "mne/realtime/mock_lsl_stream.py": { + "a": 1, + "d": 1 + }, + "mne/realtime/tests/test_lsl_client.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6236.json b/doc/sphinxext/prs/6236.json new file mode 100644 index 00000000000..c66cb851ddd --- /dev/null +++ b/doc/sphinxext/prs/6236.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "360c51342d7de61310b5ef2935f0f68c0f9dfb81", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6237.json b/doc/sphinxext/prs/6237.json new file mode 100644 index 00000000000..85f983db63c --- /dev/null +++ b/doc/sphinxext/prs/6237.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b9dcbe1eac621e22b5cff6883e4b3a889846e303", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.cfg": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6239.json b/doc/sphinxext/prs/6239.json new file mode 100644 index 00000000000..7213008dc0b --- /dev/null +++ b/doc/sphinxext/prs/6239.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "03bc75e9067483166bb8ebbb2827ccaf45425031", + "authors": [ + { + "n": "Oleh Kozynets", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 12, + "d": 10 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_vtki.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/624.json b/doc/sphinxext/prs/624.json new file mode 100644 index 00000000000..538aebb06cb --- /dev/null +++ b/doc/sphinxext/prs/624.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "389ff2de81b46424fb16af5b6a2d57be03d04646", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "AUTHORS.rst": { + "a": 11, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6240.json b/doc/sphinxext/prs/6240.json new file mode 100644 index 00000000000..86370aef3d4 --- /dev/null +++ b/doc/sphinxext/prs/6240.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ec8d35c35c8f550d3815eae8af4e9759f0d5627e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6241.json b/doc/sphinxext/prs/6241.json new file mode 100644 index 00000000000..2e3f34fb6aa --- /dev/null +++ b/doc/sphinxext/prs/6241.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce9953a72bbd54109168f5a0fb2ff79a0969cf6c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "tutorials/plot_epochs_to_data_frame.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6242.json b/doc/sphinxext/prs/6242.json new file mode 100644 index 00000000000..d2860060354 --- /dev/null +++ b/doc/sphinxext/prs/6242.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "24261065e59e82a84c83e4e8e006631c7d4795c3", + "authors": [ + { + "n": "Jeyhte Key", + "e": "jeythekey@tutanota.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 5, + "d": 3 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6243.json b/doc/sphinxext/prs/6243.json new file mode 100644 index 00000000000..a36f9b1af4d --- /dev/null +++ b/doc/sphinxext/prs/6243.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2970a72758df09d4c4d901ea39bcd3e650012c37", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/simulation/plot_source_simulator.py": { + "a": 84, + "d": 0 + }, + "mne/simulation/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 207, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 176, + "d": 81 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6244.json b/doc/sphinxext/prs/6244.json new file mode 100644 index 00000000000..1b0bca520fd --- /dev/null +++ b/doc/sphinxext/prs/6244.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ba0b59cabf25d25989ca10bf4df5bdd2e8ef9b24", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6245.json b/doc/sphinxext/prs/6245.json new file mode 100644 index 00000000000..09be7f87c8e --- /dev/null +++ b/doc/sphinxext/prs/6245.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4e81a4857fc9b3a99ec6f409f4ddd9b63ff35154", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_cov.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6246.json b/doc/sphinxext/prs/6246.json new file mode 100644 index 00000000000..e0d3f903517 --- /dev/null +++ b/doc/sphinxext/prs/6246.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8ce77a0ad91844b0bb2a5f99bf383f1cc9d39fe6", + "authors": [ + { + "n": "Jeyhte Key", + "e": "jeythekey@tutanota.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 92, + "d": 78 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6247.json b/doc/sphinxext/prs/6247.json new file mode 100644 index 00000000000..f6ac495c92e --- /dev/null +++ b/doc/sphinxext/prs/6247.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "af340be8359c3dd2514e103b83306eca0f78c5f0", + "authors": [ + { + "n": "Jeyhte Key", + "e": "jeythekey@tutanota.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6251.json b/doc/sphinxext/prs/6251.json new file mode 100644 index 00000000000..fa24f37e08a --- /dev/null +++ b/doc/sphinxext/prs/6251.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "36fad3c74b6c6d4773287be16d848842df2741e3", + "authors": [ + { + "n": "tom", + "e": "tdonoghue.research@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6252.json b/doc/sphinxext/prs/6252.json new file mode 100644 index 00000000000..71dc56d1e07 --- /dev/null +++ b/doc/sphinxext/prs/6252.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f80a8de283b45319e538b19c30e7eb9648380ca6", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/utils/_bunch.py": { + "a": 23, + "d": 13 + }, + "mne/utils/tests/test_bunch.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6253.json b/doc/sphinxext/prs/6253.json new file mode 100644 index 00000000000..f0f3ca11ead --- /dev/null +++ b/doc/sphinxext/prs/6253.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a96f0e0a62219b63868904e3330f1202e20cf67b", + "authors": [ + { + "n": "Jeyhte Key", + "e": "jeythekey@tutanota.com" + } + ], + "changes": { + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6254.json b/doc/sphinxext/prs/6254.json new file mode 100644 index 00000000000..837c56294a2 --- /dev/null +++ b/doc/sphinxext/prs/6254.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "39f46ade8e94c9b47acf40570c45c6c555c02820", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "CONTRIBUTING.rst": { + "a": 34, + "d": 0 + }, + "doc/_static/style.css": { + "a": 5, + "d": 1 + }, + "doc/advanced_setup.rst": { + "a": 4, + "d": 33 + }, + "doc/configure_git.rst": { + "a": 0, + "d": 613 + }, + "doc/contributing.rst": { + "a": 754, + "d": 154 + }, + "doc/customizing_git.rst": { + "a": 0, + "d": 131 + }, + "doc/documentation.rst": { + "a": 0, + "d": 2 + }, + "doc/install_mne_python.rst": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 13, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6255.json b/doc/sphinxext/prs/6255.json new file mode 100644 index 00000000000..d7981bd665b --- /dev/null +++ b/doc/sphinxext/prs/6255.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a0dbca96c3a1dfabb7bf28c298d04684b12e6cc7", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6260.json b/doc/sphinxext/prs/6260.json new file mode 100644 index 00000000000..ee694cea625 --- /dev/null +++ b/doc/sphinxext/prs/6260.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "14a0f195e7301c1504c72b3cc09476609b0b0029", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/annotations.py": { + "a": 18, + "d": 14 + }, + "mne/tests/test_annotations.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6261.json b/doc/sphinxext/prs/6261.json new file mode 100644 index 00000000000..7c8c649806e --- /dev/null +++ b/doc/sphinxext/prs/6261.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6bc9643448c41aca2b628397d0d1e9d17643e139", + "authors": [ + { + "n": "Ivana Kojcic", + "e": null + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py": { + "a": 195, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6262.json b/doc/sphinxext/prs/6262.json new file mode 100644 index 00000000000..3f5e53f8f95 --- /dev/null +++ b/doc/sphinxext/prs/6262.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "c086e760dc1c1f138e4b153102574abd571f62bd", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/glossary.rst": { + "a": 10, + "d": 0 + }, + "doc/overview/roadmap.rst": { + "a": 3, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_evoked_ers_source_power.py": { + "a": 130, + "d": 0 + }, + "examples/inverse/plot_mne_cov_power.py": { + "a": 139, + "d": 0 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 92, + "d": 68 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 53, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 20, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6264.json b/doc/sphinxext/prs/6264.json new file mode 100644 index 00000000000..61092af9696 --- /dev/null +++ b/doc/sphinxext/prs/6264.json @@ -0,0 +1,435 @@ +{ + "merge_commit_sha": "37b00c4cc16982eb0d0f210acd20e0cef3fc539d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 14, + "d": 1 + }, + "doc/documentation.rst": { + "a": 54, + "d": 55 + }, + "doc/glossary.rst": { + "a": 7, + "d": 11 + }, + "doc/manual/cookbook.rst": { + "a": 2, + "d": 3 + }, + "doc/manual/datasets_index.rst": { + "a": 15, + "d": 15 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 2, + "d": 2 + }, + "doc/manual/source_localization/c_inverse.rst": { + "a": 1, + "d": 2 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 4, + "d": 5 + }, + "doc/manual/source_localization/morph_stc.rst": { + "a": 8, + "d": 12 + }, + "doc/tutorials/philosophy.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 3, + "d": 3 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_opm_data.py": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_opm_rest_data.py": { + "a": 2, + "d": 0 + }, + "examples/datasets/spm_faces_dataset.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/decoding_rsa.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 2, + "d": 0 + }, + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 2, + "d": 0 + }, + "examples/io/plot_elekta_epochs.py": { + "a": 2, + "d": 0 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 2, + "d": 0 + }, + "examples/io/plot_read_events.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_proj.py": { + "a": 1, + "d": 2 + }, + "examples/preprocessing/plot_otp.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_3d_to_2d.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 2, + "d": 0 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 6 + }, + "mne/io/base.py": { + "a": 1, + "d": 3 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 2 + }, + "tutorials/discussions/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/discussions/plot_background_filtering.py": { + "a": 4, + "d": 4 + }, + "tutorials/discussions/plot_background_statistics.py": { + "a": 3, + "d": 1 + }, + "tutorials/epochs/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/epochs/plot_epochs_to_data_frame.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/plot_metadata_epochs.py": { + "a": 2, + "d": 1 + }, + "tutorials/epochs/plot_object_epochs.py": { + "a": 3, + "d": 3 + }, + "tutorials/epochs/plot_visualize_epochs.py": { + "a": 0, + "d": 0 + }, + "tutorials/evoked/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 0, + "d": 0 + }, + "tutorials/evoked/plot_object_evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/plot_visualize_evoked.py": { + "a": 3, + "d": 3 + }, + "tutorials/evoked/plot_whitened.py": { + "a": 0, + "d": 0 + }, + "tutorials/intro/README.txt": { + "a": 7, + "d": 0 + }, + "tutorials/intro/plot_epoching_and_averaging.py": { + "a": 3, + "d": 4 + }, + "tutorials/intro/plot_info.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/plot_introduction.py": { + "a": 0, + "d": 0 + }, + "tutorials/intro/plot_object_annotations.py": { + "a": 5, + "d": 3 + }, + "tutorials/machine-learning/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/machine-learning/plot_receptive_field.py": { + "a": 0, + "d": 0 + }, + "tutorials/machine-learning/plot_sensors_decoding.py": { + "a": 1, + "d": 1 + }, + "tutorials/misc/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/misc/plot_configuration.py": { + "a": 0, + "d": 0 + }, + "tutorials/misc/plot_ecog.py": { + "a": 0, + "d": 0 + }, + "tutorials/misc/plot_modifying_data_inplace.py": { + "a": 0, + "d": 0 + }, + "tutorials/plot_python_intro.py": { + "a": 0, + "d": 46 + }, + "tutorials/preprocessing/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/preprocessing/plot_artifacts_correction_filtering.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/plot_artifacts_correction_ica.py": { + "a": 4, + "d": 3 + }, + "tutorials/preprocessing/plot_artifacts_correction_maxwell_filtering.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/plot_artifacts_correction_rejection.py": { + "a": 4, + "d": 3 + }, + "tutorials/preprocessing/plot_artifacts_correction_ssp.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/plot_artifacts_detection.py": { + "a": 9, + "d": 10 + }, + "tutorials/preprocessing/plot_ica_from_raw.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/README.txt": { + "a": 6, + "d": 0 + }, + "tutorials/raw/plot_object_raw.py": { + "a": 4, + "d": 2 + }, + "tutorials/raw/plot_visualize_raw.py": { + "a": 2, + "d": 0 + }, + "tutorials/sample-datasets/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/sample-datasets/plot_brainstorm_auditory.py": { + "a": 2, + "d": 0 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_ctf.py": { + "a": 0, + "d": 0 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 2, + "d": 0 + }, + "tutorials/sample-datasets/plot_phantom_4DBTi.py": { + "a": 0, + "d": 0 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 2, + "d": 0 + }, + "tutorials/simulation/README.txt": { + "a": 6, + "d": 0 + }, + "tutorials/simulation/plot_creating_data_structures.py": { + "a": 1, + "d": 2 + }, + "tutorials/simulation/plot_dics.py": { + "a": 0, + "d": 0 + }, + "tutorials/simulation/plot_point_spread.py": { + "a": 0, + "d": 0 + }, + "tutorials/source-modeling/README.txt": { + "a": 4, + "d": 0 + }, + "tutorials/source-modeling/plot_background_freesurfer.py": { + "a": 4, + "d": 3 + }, + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 0, + "d": 0 + }, + "tutorials/source-modeling/plot_dipole_fit.py": { + "a": 0, + "d": 0 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 2, + "d": 0 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 3, + "d": 1 + }, + "tutorials/source-modeling/plot_mne_solutions.py": { + "a": 2, + "d": 0 + }, + "tutorials/source-modeling/plot_object_source_estimate.py": { + "a": 6, + "d": 6 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 0, + "d": 0 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_time_frequency.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-source-space/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 2, + "d": 0 + }, + "tutorials/time-freq/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/627.json b/doc/sphinxext/prs/627.json new file mode 100644 index 00000000000..581486c967a --- /dev/null +++ b/doc/sphinxext/prs/627.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c65515c51f3dffea9cb53b42ae6522805c365cb9", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_delayed_ssp_projector.py": { + "a": 11, + "d": 7 + }, + "mne/epochs.py": { + "a": 62, + "d": 36 + }, + "mne/tests/test_epochs.py": { + "a": 19, + "d": 6 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 1 + }, + "mne/viz.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6270.json b/doc/sphinxext/prs/6270.json new file mode 100644 index 00000000000..7911a80fb8f --- /dev/null +++ b/doc/sphinxext/prs/6270.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f39331cc51b7043ae1726561218d85a638ec1a7f", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 1, + "d": 7 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 16, + "d": 4 + }, + "mne/viz/tests/test_evoked.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6272.json b/doc/sphinxext/prs/6272.json new file mode 100644 index 00000000000..160837b783a --- /dev/null +++ b/doc/sphinxext/prs/6272.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "7d64af376f9c991b3645ca4ec95e516405b01c8d", + "authors": [ + { + "n": "Dirk Gütlin", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 13, + "d": 7 + }, + "mne/io/__init__.py": { + "a": 5, + "d": 4 + }, + "mne/io/curry/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/curry/curry.py": { + "a": 345, + "d": 0 + }, + "mne/io/curry/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 266, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6274.json b/doc/sphinxext/prs/6274.json new file mode 100644 index 00000000000..51893f46220 --- /dev/null +++ b/doc/sphinxext/prs/6274.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "86b8aeb54ff914064c96b9e21c884d06224c45f4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/git_links.inc": { + "a": 0, + "d": 8 + }, + "doc/install_mne_c.rst": { + "a": 25, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6275.json b/doc/sphinxext/prs/6275.json new file mode 100644 index 00000000000..a7c6398b216 --- /dev/null +++ b/doc/sphinxext/prs/6275.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1293e1dfebb5ebbaa39fb6aca4c8257d0c05f792", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/628.json b/doc/sphinxext/prs/628.json new file mode 100644 index 00000000000..41dbcf163d2 --- /dev/null +++ b/doc/sphinxext/prs/628.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b01cc08273304f006f73a1382248d581d940f8f0", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 1 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 15, + "d": 13 + }, + "examples/plot_evoked_topomap_delayed_ssp.py": { + "a": 61, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 1, + "d": 0 + }, + "mne/viz.py": { + "a": 70, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6280.json b/doc/sphinxext/prs/6280.json new file mode 100644 index 00000000000..03733fe3ffd --- /dev/null +++ b/doc/sphinxext/prs/6280.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c61298e30af477aa9d8b645910925b8b4f96f402", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/fixes.py": { + "a": 11, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 28, + "d": 13 + }, + "mne/viz/utils.py": { + "a": 31, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6281.json b/doc/sphinxext/prs/6281.json new file mode 100644 index 00000000000..28c3ec138a3 --- /dev/null +++ b/doc/sphinxext/prs/6281.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "499ae778697c8a06e8b258a45e7de1aaabe18ab5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/dipole.py": { + "a": 4, + "d": 1 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 12, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6285.json b/doc/sphinxext/prs/6285.json new file mode 100644 index 00000000000..bed9c3adb95 --- /dev/null +++ b/doc/sphinxext/prs/6285.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4a52511469992d2abe0686bc6d1588f7469cbb3f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/receptive_field.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 62, + "d": 50 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6286.json b/doc/sphinxext/prs/6286.json new file mode 100644 index 00000000000..2f899d9e76f --- /dev/null +++ b/doc/sphinxext/prs/6286.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2682e32be43af610bdb67f5eabfbfcb1472a3e7f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 23, + "d": 4 + }, + "doc/manual/datasets_index.rst": { + "a": 2, + "d": 0 + }, + "doc/manual/io.rst": { + "a": 3, + "d": 1 + }, + "tutorials/intro/plot_introduction.py": { + "a": 339, + "d": 366 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6288.json b/doc/sphinxext/prs/6288.json new file mode 100644 index 00000000000..323f16a14b8 --- /dev/null +++ b/doc/sphinxext/prs/6288.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5d58b3cca5e1426981820444ec0e6f8bef92044f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/io/pick.py": { + "a": 13, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 6, + "d": 9 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6289.json b/doc/sphinxext/prs/6289.json new file mode 100644 index 00000000000..c4bd799d2fa --- /dev/null +++ b/doc/sphinxext/prs/6289.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d5752051b37f74713233929382bcc632d404f837", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/629.json b/doc/sphinxext/prs/629.json new file mode 100644 index 00000000000..1a1bc784248 --- /dev/null +++ b/doc/sphinxext/prs/629.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dc8eccbfcee3a7e520c77552f60afb24da041955", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 76, + "d": 11 + }, + "mne/viz.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6291.json b/doc/sphinxext/prs/6291.json new file mode 100644 index 00000000000..7cd485e919e --- /dev/null +++ b/doc/sphinxext/prs/6291.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9db91c1775617fd26c161a8ee8cf68b8a5297009", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 47, + "d": 20 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 37, + "d": 0 + }, + "mne/io/open.py": { + "a": 23, + "d": 5 + }, + "mne/io/tag.py": { + "a": 6, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6293.json b/doc/sphinxext/prs/6293.json new file mode 100644 index 00000000000..3224e25fcd8 --- /dev/null +++ b/doc/sphinxext/prs/6293.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ee0b69b176951b22c1a1761122f051619881277c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/tests/test_datasets.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6294.json b/doc/sphinxext/prs/6294.json new file mode 100644 index 00000000000..257ccf5e746 --- /dev/null +++ b/doc/sphinxext/prs/6294.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "9a5d3743a831c2b46721727fabcf7aacd8032854", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 8, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 7, + "d": 25 + }, + "mne/filter.py": { + "a": 248, + "d": 362 + }, + "mne/fixes.py": { + "a": 14, + "d": 2 + }, + "mne/io/base.py": { + "a": 62, + "d": 171 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 4 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 148, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 117, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 90, + "d": 33 + }, + "tutorials/discussions/plot_background_filtering.py": { + "a": 212, + "d": 191 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6296.json b/doc/sphinxext/prs/6296.json new file mode 100644 index 00000000000..f3a475a0d08 --- /dev/null +++ b/doc/sphinxext/prs/6296.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fbec15ccfaba7d9ab8d817a10073450f1cec8811", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6297.json b/doc/sphinxext/prs/6297.json new file mode 100644 index 00000000000..e897503703b --- /dev/null +++ b/doc/sphinxext/prs/6297.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1dfbe87ef75a33aa59927c0e1692c6cbc0574189", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.cfg": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6299.json b/doc/sphinxext/prs/6299.json new file mode 100644 index 00000000000..2eb7c3c750a --- /dev/null +++ b/doc/sphinxext/prs/6299.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "ae02bcba43a50a70b714ca19a62ba155e6320050", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/git_links.inc": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 0, + "d": 3 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_dipole_fit.py": { + "a": 2, + "d": 4 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/63.json b/doc/sphinxext/prs/63.json new file mode 100644 index 00000000000..d0dcd55e7c1 --- /dev/null +++ b/doc/sphinxext/prs/63.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "244c2bd28d79da67bc7c901442f5c93803b870a4", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 3 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 11, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 22, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/630.json b/doc/sphinxext/prs/630.json new file mode 100644 index 00000000000..98a85f8ce79 --- /dev/null +++ b/doc/sphinxext/prs/630.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e5fb4eff992b87f349233a3e57c85c62f931bfea", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 113, + "d": 97 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6300.json b/doc/sphinxext/prs/6300.json new file mode 100644 index 00000000000..0f5ea025e2f --- /dev/null +++ b/doc/sphinxext/prs/6300.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "022acdb68ce11c6f149f414018d00c09c3ba10bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 5, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6301.json b/doc/sphinxext/prs/6301.json new file mode 100644 index 00000000000..e4a8b5f1714 --- /dev/null +++ b/doc/sphinxext/prs/6301.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "56c408d363a01043594b6965b5ede3e1123bdcff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "conftest.py": { + "a": 5, + "d": 5 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 14, + "d": 14 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 7, + "d": 7 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6303.json b/doc/sphinxext/prs/6303.json new file mode 100644 index 00000000000..46829f73f01 --- /dev/null +++ b/doc/sphinxext/prs/6303.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8cbdcccd31910111263d90bf61239a24bce15fcc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/gui/_viewer.py": { + "a": 4, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 27, + "d": 26 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6305.json b/doc/sphinxext/prs/6305.json new file mode 100644 index 00000000000..5daa18f4b22 --- /dev/null +++ b/doc/sphinxext/prs/6305.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "017d156706984b88a524b146ec71415c65b42391", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_dics_source_power.py": { + "a": 43, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6306.json b/doc/sphinxext/prs/6306.json new file mode 100644 index 00000000000..40d789f2577 --- /dev/null +++ b/doc/sphinxext/prs/6306.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ef7c95fd4ec9256753f81e7572d3ecd9d44eff36", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 7, + "d": 8 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 7, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6307.json b/doc/sphinxext/prs/6307.json new file mode 100644 index 00000000000..0b7951d182a --- /dev/null +++ b/doc/sphinxext/prs/6307.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0ee91f3dff16bdd4da8ff8bebcabb0b143559cec", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 12, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6308.json b/doc/sphinxext/prs/6308.json new file mode 100644 index 00000000000..6b9e24d9c8b --- /dev/null +++ b/doc/sphinxext/prs/6308.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "57ad16858a6c6a94831ac293b0eb45bedd5370fd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6309.json b/doc/sphinxext/prs/6309.json new file mode 100644 index 00000000000..636fb59546e --- /dev/null +++ b/doc/sphinxext/prs/6309.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5bc0a8ff7a4abc047c845b3f0f0d0edf71bb05e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 0, + "d": 1 + }, + "mne/conftest.py": { + "a": 55, + "d": 0 + }, + "setup.cfg": { + "a": 0, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6310.json b/doc/sphinxext/prs/6310.json new file mode 100644 index 00000000000..b9cdea657d5 --- /dev/null +++ b/doc/sphinxext/prs/6310.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0c76381a84153f0b44e5db132322f3fe4e14f0c9", + "authors": [ + { + "n": "Zhenya", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/flat.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 20, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6311.json b/doc/sphinxext/prs/6311.json new file mode 100644 index 00000000000..97700fdf08e --- /dev/null +++ b/doc/sphinxext/prs/6311.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "14d05270290a3c3b356cbd6de98c7146a8b0064f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 8, + "d": 6 + }, + "mne/utils/misc.py": { + "a": 5, + "d": 3 + }, + "mne/viz/backends/renderer.py": { + "a": 4, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6313.json b/doc/sphinxext/prs/6313.json new file mode 100644 index 00000000000..637b631b39a --- /dev/null +++ b/doc/sphinxext/prs/6313.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0f7c7aa1d8a8b59030cb62b0fa7d770847f5635d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_annotations.py": { + "a": 19, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6314.json b/doc/sphinxext/prs/6314.json new file mode 100644 index 00000000000..4d055619386 --- /dev/null +++ b/doc/sphinxext/prs/6314.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5a8533a2ab269c65dd61d7131a9aa9ffab52142b", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6316.json b/doc/sphinxext/prs/6316.json new file mode 100644 index 00000000000..6f8b259cd0b --- /dev/null +++ b/doc/sphinxext/prs/6316.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "419c8e4437ec6439f0681313ab27506fbb80ea7b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 0, + "d": 9 + }, + "mne/tests/test_epochs.py": { + "a": 27, + "d": 17 + }, + "setup.cfg": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6317.json b/doc/sphinxext/prs/6317.json new file mode 100644 index 00000000000..9603c3ca73b --- /dev/null +++ b/doc/sphinxext/prs/6317.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3b130c8dff4b766da4788a788f5401513f57d9f8", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/contributing.rst": { + "a": 23, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6318.json b/doc/sphinxext/prs/6318.json new file mode 100644 index 00000000000..7e3f8ba592b --- /dev/null +++ b/doc/sphinxext/prs/6318.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7cd131cc246f53b0754082979d3293206c8fe235", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/_make_forward.py": { + "a": 3, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 19, + "d": 10 + }, + "mne/tests/test_bem.py": { + "a": 4, + "d": 4 + }, + "mne/transforms.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6319.json b/doc/sphinxext/prs/6319.json new file mode 100644 index 00000000000..2668f0fda92 --- /dev/null +++ b/doc/sphinxext/prs/6319.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3926e12badfb5ca75aa375680f98403eba46170e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/632.json b/doc/sphinxext/prs/632.json new file mode 100644 index 00000000000..91f1d32ff6f --- /dev/null +++ b/doc/sphinxext/prs/632.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f69e35dbd7b6137fe0cf94838a1deac41245eab8", + "authors": [ + { + "n": "Yoursa BEKHTI", + "e": "ybekhti@is222485.intra.cea.fr" + } + ], + "changes": { + "mne/layouts/layout.py": { + "a": 36, + "d": 20 + }, + "mne/viz.py": { + "a": 12, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6320.json b/doc/sphinxext/prs/6320.json new file mode 100644 index 00000000000..bcb312c4f99 --- /dev/null +++ b/doc/sphinxext/prs/6320.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "805ff576565939a1cc970b7b533bb7f131bc7b3c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 12, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 0, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 0, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 10, + "d": 2 + }, + "mne/viz/tests/test_utils.py": { + "a": 4, + "d": 5 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6323.json b/doc/sphinxext/prs/6323.json new file mode 100644 index 00000000000..0176e13ecf1 --- /dev/null +++ b/doc/sphinxext/prs/6323.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b31e573bd1bb97ab8daf6e813a6f5fa77ba266c4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/intro/plot_introduction.py": { + "a": 15, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6324.json b/doc/sphinxext/prs/6324.json new file mode 100644 index 00000000000..03219d42e9f --- /dev/null +++ b/doc/sphinxext/prs/6324.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9d5a4a5f87ffa7ed0e1a659ae89f76b724ff51ee", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 16, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6326.json b/doc/sphinxext/prs/6326.json new file mode 100644 index 00000000000..5bdd3887942 --- /dev/null +++ b/doc/sphinxext/prs/6326.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3454474192ec8cb770b07d4c57dc0bcc7b1640a5", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/annotations.py": { + "a": 27, + "d": 11 + }, + "mne/io/brainvision/brainvision.py": { + "a": 38, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 64, + "d": 21 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6327.json b/doc/sphinxext/prs/6327.json new file mode 100644 index 00000000000..7bd177ed406 --- /dev/null +++ b/doc/sphinxext/prs/6327.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3c9073f2c872af70db696e577420ab329495966f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6328.json b/doc/sphinxext/prs/6328.json new file mode 100644 index 00000000000..54709a3ff3b --- /dev/null +++ b/doc/sphinxext/prs/6328.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "184486630b05c88948fd9044f40db98b707fe2d4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "logo/generate_mne_logos.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6331.json b/doc/sphinxext/prs/6331.json new file mode 100644 index 00000000000..4780de18f06 --- /dev/null +++ b/doc/sphinxext/prs/6331.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4ff3bad13ac4a37fe0c15128d41adc789a9ca510", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_hf_sef_data.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 9, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6333.json b/doc/sphinxext/prs/6333.json new file mode 100644 index 00000000000..b2c737ed7df --- /dev/null +++ b/doc/sphinxext/prs/6333.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "53520484c6df836d4acbc0ebbad8ecc5a665fb1d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 8, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 8, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6334.json b/doc/sphinxext/prs/6334.json new file mode 100644 index 00000000000..0e1ce3dbb3b --- /dev/null +++ b/doc/sphinxext/prs/6334.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fe7ee249d09f9b69453f23d1a5114dbed47927dd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + "mne/datasets/sleep_physionet/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 3 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6335.json b/doc/sphinxext/prs/6335.json new file mode 100644 index 00000000000..7846cd3ace4 --- /dev/null +++ b/doc/sphinxext/prs/6335.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "724d6defd1053e92263250524c4c9c0a4c803f63", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 4 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6336.json b/doc/sphinxext/prs/6336.json new file mode 100644 index 00000000000..245e43cca2f --- /dev/null +++ b/doc/sphinxext/prs/6336.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5105bad5229c93eabb09697aa3438008bf4b1f71", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_configuration.py": { + "a": 230, + "d": 0 + }, + "tutorials/misc/plot_configuration.py": { + "a": 0, + "d": 88 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6337.json b/doc/sphinxext/prs/6337.json new file mode 100644 index 00000000000..4bda12e3171 --- /dev/null +++ b/doc/sphinxext/prs/6337.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "441181daff6065c3ef6ee8daa9df3d19196fd129", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + "doc/_templates/navbar.html": { + "a": 2, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/634.json b/doc/sphinxext/prs/634.json new file mode 100644 index 00000000000..14a97d495bd --- /dev/null +++ b/doc/sphinxext/prs/634.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b28ec9b08101f6b4441bd486d47692c3b6b4ef28", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 9, + "d": 5 + }, + "mne/layouts/layout.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 11, + "d": 0 + }, + "mne/viz.py": { + "a": 155, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6342.json b/doc/sphinxext/prs/6342.json new file mode 100644 index 00000000000..c28c6a8eef8 --- /dev/null +++ b/doc/sphinxext/prs/6342.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "f26d81604a45577ff17bb2e63eb57cf66219eaa1", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_ftclient_rt_average.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_ftclient_rt_compute_psd.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_lslclient_rt.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/plot_rt_feedback_server.py": { + "a": 1, + "d": 1 + }, + "examples/realtime/rt_feedback_client.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/client.py": { + "a": 5, + "d": 1 + }, + "mne/realtime/epochs.py": { + "a": 6, + "d": 1 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 6, + "d": 1 + }, + "mne/realtime/lsl_client.py": { + "a": 6, + "d": 1 + }, + "mne/realtime/mock_lsl_stream.py": { + "a": 6, + "d": 1 + }, + "mne/realtime/mockclient.py": { + "a": 6, + "d": 1 + }, + "mne/realtime/stim_server_client.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6346.json b/doc/sphinxext/prs/6346.json new file mode 100644 index 00000000000..1f8b5f05103 --- /dev/null +++ b/doc/sphinxext/prs/6346.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0e461822c052d058cdea386e08200a04451c65a1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 13, + "d": 10 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6347.json b/doc/sphinxext/prs/6347.json new file mode 100644 index 00000000000..505d18a6365 --- /dev/null +++ b/doc/sphinxext/prs/6347.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "10f52395c8b9ae8f121d9401d0219c0eadb4d77a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 10, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6348.json b/doc/sphinxext/prs/6348.json new file mode 100644 index 00000000000..0dd02f6022c --- /dev/null +++ b/doc/sphinxext/prs/6348.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f0a1ae5b8803dcb3c6486b4c0089ba3b1a490c48", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 4, + "d": 4 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 17 + }, + "mne/io/cnt/cnt.py": { + "a": 6, + "d": 5 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 11, + "d": 17 + }, + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 18 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/635.json b/doc/sphinxext/prs/635.json new file mode 100644 index 00000000000..dddc239c506 --- /dev/null +++ b/doc/sphinxext/prs/635.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "38f133e51eb8bd74efd7c48e690a610b4d159b99", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 0, + "d": 5 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6350.json b/doc/sphinxext/prs/6350.json new file mode 100644 index 00000000000..9a520f384d4 --- /dev/null +++ b/doc/sphinxext/prs/6350.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "d12fffa6f73c4fc8f99822d649821e71f3e200cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 12 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 18 + }, + "doc/whats_new.rst": { + "a": 9, + "d": 9 + }, + "examples/realtime/README.txt": { + "a": 0, + "d": 5 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 0, + "d": 61 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 0, + "d": 122 + }, + "examples/realtime/plot_ftclient_rt_average.py": { + "a": 0, + "d": 104 + }, + "examples/realtime/plot_ftclient_rt_compute_psd.py": { + "a": 0, + "d": 90 + }, + "examples/realtime/plot_lslclient_rt.py": { + "a": 0, + "d": 51 + }, + "examples/realtime/plot_rt_feedback_server.py": { + "a": 0, + "d": 124 + }, + "examples/realtime/rt_feedback_client.py": { + "a": 0, + "d": 71 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6351.json b/doc/sphinxext/prs/6351.json new file mode 100644 index 00000000000..d4ee9e3d647 --- /dev/null +++ b/doc/sphinxext/prs/6351.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "ca3bb6392e75c6f80707ce36241667fbe0e8d95e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/documentation.rst": { + "a": 1, + "d": 12 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 18 + }, + "doc/whats_new.rst": { + "a": 18, + "d": 10 + }, + "examples/realtime/README.txt": { + "a": 0, + "d": 5 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 0, + "d": 61 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 0, + "d": 122 + }, + "examples/realtime/plot_ftclient_rt_average.py": { + "a": 0, + "d": 104 + }, + "examples/realtime/plot_ftclient_rt_compute_psd.py": { + "a": 0, + "d": 90 + }, + "examples/realtime/plot_lslclient_rt.py": { + "a": 0, + "d": 51 + }, + "examples/realtime/plot_rt_feedback_server.py": { + "a": 0, + "d": 124 + }, + "examples/realtime/rt_feedback_client.py": { + "a": 0, + "d": 71 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6352.json b/doc/sphinxext/prs/6352.json new file mode 100644 index 00000000000..ec10499a919 --- /dev/null +++ b/doc/sphinxext/prs/6352.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "30dbe3de08772c2b27515e7a98de988d926e8727", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 0, + "d": 1 + }, + "mne/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/realtime/__init__.py": { + "a": 0, + "d": 17 + }, + "mne/realtime/base_client.py": { + "a": 0, + "d": 212 + }, + "mne/realtime/client.py": { + "a": 0, + "d": 372 + }, + "mne/realtime/epochs.py": { + "a": 0, + "d": 588 + }, + "mne/realtime/fieldtrip_client.py": { + "a": 0, + "d": 264 + }, + "mne/realtime/lsl_client.py": { + "a": 0, + "d": 127 + }, + "mne/realtime/mock_lsl_stream.py": { + "a": 0, + "d": 95 + }, + "mne/realtime/mockclient.py": { + "a": 0, + "d": 204 + }, + "mne/realtime/stim_server_client.py": { + "a": 0, + "d": 306 + }, + "mne/realtime/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/realtime/tests/test_fieldtrip_client.py": { + "a": 0, + "d": 141 + }, + "mne/realtime/tests/test_lsl_client.py": { + "a": 0, + "d": 49 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 0, + "d": 386 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 0, + "d": 82 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6353.json b/doc/sphinxext/prs/6353.json new file mode 100644 index 00000000000..32a89481e49 --- /dev/null +++ b/doc/sphinxext/prs/6353.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "34d358d24bad903d65d184ac7720d6ee56d57741", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 4 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6355.json b/doc/sphinxext/prs/6355.json new file mode 100644 index 00000000000..eb0943f23cf --- /dev/null +++ b/doc/sphinxext/prs/6355.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "06c7785aabe72fc7394cc6e6ba1c7d5aec1d2c76", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 15, + "d": 6 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 11 + }, + "mne/tests/test_annotations.py": { + "a": 3, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/utils/misc.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6356.json b/doc/sphinxext/prs/6356.json new file mode 100644 index 00000000000..94936dbf438 --- /dev/null +++ b/doc/sphinxext/prs/6356.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "580373859af9e65518660a0b8357003e1a5fdb95", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/discussions/plot_background_filtering.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6357.json b/doc/sphinxext/prs/6357.json new file mode 100644 index 00000000000..55ab509c9b9 --- /dev/null +++ b/doc/sphinxext/prs/6357.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "595ab7e66fcc4854fa82eb6c00d5b20d5046fb3d", + "authors": [ + { + "n": "Kambiz Tavabi", + "e": "ktavabi@gmail.com" + } + ], + "changes": { + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6358.json b/doc/sphinxext/prs/6358.json new file mode 100644 index 00000000000..65d9af02dc3 --- /dev/null +++ b/doc/sphinxext/prs/6358.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "532f6589e35c4519dc16bcf8768ed758e6b3fe45", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/annotation-controls.png": { + "a": 0, + "d": 0 + }, + "doc/glossary.rst": { + "a": 14, + "d": 10 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 6 + }, + "mne/preprocessing/infomax_.py": { + "a": 5, + "d": 4 + }, + "mne/simulation/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/source.py": { + "a": 10, + "d": 4 + }, + "mne/stats/cluster_level.py": { + "a": 20, + "d": 8 + }, + "mne/stats/permutations.py": { + "a": 5, + "d": 2 + }, + "mne/utils/check.py": { + "a": 6, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_introduction.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/plot_object_annotations.py": { + "a": 262, + "d": 209 + }, + "tutorials/raw/plot_annotating_raw.py": { + "a": 249, + "d": 0 + }, + "tutorials/raw/plot_events.py": { + "a": 179, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6360.json b/doc/sphinxext/prs/6360.json new file mode 100644 index 00000000000..e1ed3b638eb --- /dev/null +++ b/doc/sphinxext/prs/6360.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b9b2c6884dc663ef42338bdccc9e7838820b29b3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 25, + "d": 7 + }, + "mne/io/constants.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 30, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6366.json b/doc/sphinxext/prs/6366.json new file mode 100644 index 00000000000..ca0de30bed8 --- /dev/null +++ b/doc/sphinxext/prs/6366.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "109312c1d3dac7f6204ec347a081adaf29793e21", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 2 + }, + "doc/documentation.rst": { + "a": 3, + "d": 3 + }, + "doc/git_links.inc": { + "a": 0, + "d": 99 + }, + "doc/glossary.rst": { + "a": 1, + "d": 3 + }, + "doc/index.rst": { + "a": 0, + "d": 3 + }, + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 30, + "d": 30 + }, + "doc/install/freesurfer.rst": { + "a": 10, + "d": 8 + }, + "doc/install/index.rst": { + "a": 6, + "d": 7 + }, + "doc/install/mne_c.rst": { + "a": 5, + "d": 5 + }, + "doc/install/mne_python.rst": { + "a": 3, + "d": 3 + }, + "doc/install/pre_install.rst": { + "a": 17, + "d": 18 + }, + "doc/known_projects.inc": { + "a": 0, + "d": 63 + }, + "doc/links.inc": { + "a": 216, + "d": 4 + }, + "doc/this_project.inc": { + "a": 0, + "d": 12 + }, + "doc/tutorial_links.inc": { + "a": 0, + "d": 5 + }, + "doc/tutorials/philosophy.rst": { + "a": 1, + "d": 1 + }, + "tutorials/discussions/plot_background_statistics.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_configuration.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/plot_sensors_decoding.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6368.json b/doc/sphinxext/prs/6368.json new file mode 100644 index 00000000000..1705fcdf9e1 --- /dev/null +++ b/doc/sphinxext/prs/6368.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "087a88edc27e41f63b1a3944fa5055c38893bed4", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/digitization/__init__.py": { + "a": 5, + "d": 0 + }, + "mne/digitization/_utils.py": { + "a": 393, + "d": 0 + }, + "mne/digitization/base.py": { + "a": 75, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 17, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 1 + }, + "mne/io/artemis123/utils.py": { + "a": 5, + "d": 24 + }, + "mne/io/bti/bti.py": { + "a": 12, + "d": 52 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 6, + "d": 6 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 38, + "d": 100 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 11, + "d": 267 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6369.json b/doc/sphinxext/prs/6369.json new file mode 100644 index 00000000000..874df983875 --- /dev/null +++ b/doc/sphinxext/prs/6369.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "dbda5849082e686770c02e12d97f2ef6f048937f", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/digitization/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/digitization/base.py": { + "a": 31, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 3, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_digitization.py": { + "a": 40, + "d": 0 + }, + "mne/utils/_bunch.py": { + "a": 3, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6370.json b/doc/sphinxext/prs/6370.json new file mode 100644 index 00000000000..57e048f8161 --- /dev/null +++ b/doc/sphinxext/prs/6370.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "62c0af865811cb4a2d22e231c2519a6e06f4bac4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 38, + "d": 35 + }, + "mne/viz/backends/renderer.py": { + "a": 49, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6372.json b/doc/sphinxext/prs/6372.json new file mode 100644 index 00000000000..ea9b2efc665 --- /dev/null +++ b/doc/sphinxext/prs/6372.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "56bc9909a35081d1bf86fbbab0bf4dfaf5e9cc33", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 6, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/documentation.rst": { + "a": 361, + "d": 649 + }, + "doc/sphinxext/sphinx_bootstrap_divs/__init__.py": { + "a": 160, + "d": 0 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css": { + "a": 9, + "d": 0 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6373.json b/doc/sphinxext/prs/6373.json new file mode 100644 index 00000000000..e72b48f54e0 --- /dev/null +++ b/doc/sphinxext/prs/6373.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5e3b220d121f83b5edbf0d2c6fe6586b4f493d29", + "authors": [ + { + "n": "Ivana Kojcic", + "e": null + } + ], + "changes": { + "examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py": { + "a": 67, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6374.json b/doc/sphinxext/prs/6374.json new file mode 100644 index 00000000000..b044c5f835d --- /dev/null +++ b/doc/sphinxext/prs/6374.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5093d0061500787c9c8a617d8ac7f4020afb460c", + "authors": [ + { + "n": "Dirk Gütlin", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6376.json b/doc/sphinxext/prs/6376.json new file mode 100644 index 00000000000..89b182ed540 --- /dev/null +++ b/doc/sphinxext/prs/6376.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f37903d48850a8af4eb6535df7d8dd1b5219385e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 2, + "d": 2 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/638.json b/doc/sphinxext/prs/638.json new file mode 100644 index 00000000000..3bfedd6b856 --- /dev/null +++ b/doc/sphinxext/prs/638.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2a359d02745cb9816c376ce3d3eed9ac7719a2f0", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 6, + "d": 0 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6381.json b/doc/sphinxext/prs/6381.json new file mode 100644 index 00000000000..c498fe15318 --- /dev/null +++ b/doc/sphinxext/prs/6381.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "90a7202352a1dc68177f15349ce806d262075c50", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 4, + "d": 6 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6384.json b/doc/sphinxext/prs/6384.json new file mode 100644 index 00000000000..d8f482f08d3 --- /dev/null +++ b/doc/sphinxext/prs/6384.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c5e205fa8ebdd0d0d01f7e0d3561077169a85f71", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6385.json b/doc/sphinxext/prs/6385.json new file mode 100644 index 00000000000..748d9384c5c --- /dev/null +++ b/doc/sphinxext/prs/6385.json @@ -0,0 +1,155 @@ +{ + "merge_commit_sha": "06b4a1ef458f61c2d7313a8bd4d284a547aa8229", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/cov.py": { + "a": 2, + "d": 4 + }, + "mne/decoding/base.py": { + "a": 3, + "d": 4 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/search_light.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 2, + "d": 3 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 10, + "d": 13 + }, + "mne/forward/_lead_dots.py": { + "a": 5, + "d": 5 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 4 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 3, + "d": 3 + }, + "mne/label.py": { + "a": 10, + "d": 15 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 6, + "d": 8 + }, + "mne/morph.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 12 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 5 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 4 + }, + "mne/proj.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 5, + "d": 11 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 2 + }, + "mne/source_space.py": { + "a": 4, + "d": 6 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 28 + }, + "mne/stats/permutations.py": { + "a": 2, + "d": 7 + }, + "mne/time_frequency/_stockwell.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 7, + "d": 14 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 19, + "d": 5 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6386.json b/doc/sphinxext/prs/6386.json new file mode 100644 index 00000000000..2155630bd0a --- /dev/null +++ b/doc/sphinxext/prs/6386.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5409a89233b764f3f3f3136cf9bf6b8d5fb0a4fe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 5, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6387.json b/doc/sphinxext/prs/6387.json new file mode 100644 index 00000000000..0a81d661463 --- /dev/null +++ b/doc/sphinxext/prs/6387.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0e71bec27a738dd3db69166264144729b9b1a8aa", + "authors": [ + { + "n": "Thomas Radman", + "e": "radman.thomas@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6388.json b/doc/sphinxext/prs/6388.json new file mode 100644 index 00000000000..18fe999ec4b --- /dev/null +++ b/doc/sphinxext/prs/6388.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e1c4f0c1c3c76f8fc4c04e34af92090cae6ea10b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 8, + "d": 0 + }, + "doc/data_formats.rst": { + "a": 76, + "d": 0 + }, + "doc/glossary.rst": { + "a": 9, + "d": 8 + }, + "doc/manual/io.rst": { + "a": 64, + "d": 83 + }, + "doc/precision.rst": { + "a": 27, + "d": 0 + }, + "doc/units.rst": { + "a": 35, + "d": 0 + }, + "tutorials/intro/plot_info.py": { + "a": 2, + "d": 2 + }, + "tutorials/intro/plot_introduction.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/plot_object_raw.py": { + "a": 527, + "d": 91 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6391.json b/doc/sphinxext/prs/6391.json new file mode 100644 index 00000000000..a674c608558 --- /dev/null +++ b/doc/sphinxext/prs/6391.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "db453878c085a2865cfe9801feabcb6d79065e5c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/annotations.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6392.json b/doc/sphinxext/prs/6392.json new file mode 100644 index 00000000000..6640f4dfb8d --- /dev/null +++ b/doc/sphinxext/prs/6392.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "109aa4b6f5b7e7bd4c5d9cac199cdaf55c569dde", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/mne_browse_raw.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6393.json b/doc/sphinxext/prs/6393.json new file mode 100644 index 00000000000..7b027d08319 --- /dev/null +++ b/doc/sphinxext/prs/6393.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3db7d50866ae625a40749ca353b0f067b2575e23", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 92, + "d": 37 + }, + "doc/sphinxext/sphinx_bootstrap_divs/__init__.py": { + "a": 72, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6394.json b/doc/sphinxext/prs/6394.json new file mode 100644 index 00000000000..0ac483834a3 --- /dev/null +++ b/doc/sphinxext/prs/6394.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a89b550c95ea7d55031d68503e16b6db296809c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/misc/plot_sensor_locations.py": { + "a": 215, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6395.json b/doc/sphinxext/prs/6395.json new file mode 100644 index 00000000000..d9fff626458 --- /dev/null +++ b/doc/sphinxext/prs/6395.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0679e350cc20a07f3d043fec0be9a2153aa99a2b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6396.json b/doc/sphinxext/prs/6396.json new file mode 100644 index 00000000000..a7134729075 --- /dev/null +++ b/doc/sphinxext/prs/6396.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6e0f8d83b5f5e95ef94aba541c639b213fa104d6", + "authors": [ + { + "n": "José C. García Alanis", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6398.json b/doc/sphinxext/prs/6398.json new file mode 100644 index 00000000000..6616a9463fd --- /dev/null +++ b/doc/sphinxext/prs/6398.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bd2e5e9b6ab33fca2e91e91bc13cb254961d85e1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "LICENSE.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6399.json b/doc/sphinxext/prs/6399.json new file mode 100644 index 00000000000..bc9cd3e9be5 --- /dev/null +++ b/doc/sphinxext/prs/6399.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ff70048d8ba11c9a395ddb11d07dd0afd16254da", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 3 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/64.json b/doc/sphinxext/prs/64.json new file mode 100644 index 00000000000..4ac391941cd --- /dev/null +++ b/doc/sphinxext/prs/64.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2fd38c04c8bd07fc231064c50133c6de76cad4b7", + "authors": [ + { + "n": "Louis Thibault", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 10, + "d": 5 + }, + "mne/fiff/raw.py": { + "a": 21, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/640.json b/doc/sphinxext/prs/640.json new file mode 100644 index 00000000000..95f9482397d --- /dev/null +++ b/doc/sphinxext/prs/640.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "422ea3229199c7c7b9fdf70de7a4f9583850d160", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/layouts/CTF151.lay": { + "a": 153, + "d": 0 + }, + "mne/layouts/CTF275.lay": { + "a": 275, + "d": 0 + }, + "mne/layouts/layout.py": { + "a": 68, + "d": 24 + }, + "mne/layouts/tests/test_layout.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6400.json b/doc/sphinxext/prs/6400.json new file mode 100644 index 00000000000..4bced4a744c --- /dev/null +++ b/doc/sphinxext/prs/6400.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "21fc47f8c385da235fd3c9fad37882774e376b49", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 66, + "d": 43 + }, + "examples/datasets/plot_hf_sef_data.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 2, + "d": 0 + }, + "tutorials/sample-datasets/plot_phantom_4DBTi.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6401.json b/doc/sphinxext/prs/6401.json new file mode 100644 index 00000000000..8eacd762e5c --- /dev/null +++ b/doc/sphinxext/prs/6401.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "43481253de2b21c2acb257115136c96bca3f4ba7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6403.json b/doc/sphinxext/prs/6403.json new file mode 100644 index 00000000000..64d823b5b9f --- /dev/null +++ b/doc/sphinxext/prs/6403.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "910c1e6276eb1abd9e6e5af312c0543a2fd7dcbb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 33, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6404.json b/doc/sphinxext/prs/6404.json new file mode 100644 index 00000000000..d6245eeef53 --- /dev/null +++ b/doc/sphinxext/prs/6404.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2191e3d403e6da5176e4fef646c454eadede2504", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6405.json b/doc/sphinxext/prs/6405.json new file mode 100644 index 00000000000..be8b0065466 --- /dev/null +++ b/doc/sphinxext/prs/6405.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d7d8c889112a40fb0bdb478dc7e393aac9c99b8d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 4, + "d": 2 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 11, + "d": 6 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 26, + "d": 30 + }, + "mne/viz/tests/test_3d.py": { + "a": 19, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6407.json b/doc/sphinxext/prs/6407.json new file mode 100644 index 00000000000..c817ee3eeb1 --- /dev/null +++ b/doc/sphinxext/prs/6407.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "3ea72bee527cc3e451733f80a8ca377144b185dc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 5, + "d": 5 + }, + "examples/time_frequency/plot_source_power_spectrum_opm.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6408.json b/doc/sphinxext/prs/6408.json new file mode 100644 index 00000000000..82dfc581167 --- /dev/null +++ b/doc/sphinxext/prs/6408.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e1a160c88b8409c8a582e8993cff4806fc0a921e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/pick.py": { + "a": 21, + "d": 13 + }, + "mne/viz/tests/test_epochs.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6409.json b/doc/sphinxext/prs/6409.json new file mode 100644 index 00000000000..2e99c808412 --- /dev/null +++ b/doc/sphinxext/prs/6409.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c2b917619c297110d4e1b9d907a10a16bd7473f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 55, + "d": 13 + }, + "mne/viz/tests/test_raw.py": { + "a": 34, + "d": 9 + }, + "mne/viz/utils.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6413.json b/doc/sphinxext/prs/6413.json new file mode 100644 index 00000000000..c5e868cfb98 --- /dev/null +++ b/doc/sphinxext/prs/6413.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bd4b8064cdc7c07f0c56193e95ae63ea13ee268d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 20, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6414.json b/doc/sphinxext/prs/6414.json new file mode 100644 index 00000000000..d99a5963e8c --- /dev/null +++ b/doc/sphinxext/prs/6414.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3acb8dc8c621629bb15c9d1c686e99af3d72c106", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 14, + "d": 7 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 9, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 9, + "d": 8 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 12, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6416.json b/doc/sphinxext/prs/6416.json new file mode 100644 index 00000000000..d6742927926 --- /dev/null +++ b/doc/sphinxext/prs/6416.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b786f219b15b881ff7e9a1a74f194a0818d657b0", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/tutorials/report.rst": { + "a": 4, + "d": 4 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 6, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 7 + }, + "mne/utils/tests/test_check.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6417.json b/doc/sphinxext/prs/6417.json new file mode 100644 index 00000000000..f58a74ff83e --- /dev/null +++ b/doc/sphinxext/prs/6417.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a420019e1360d3b1bfad28c5040070309c341cbc", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 13, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 7, + "d": 5 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/642.json b/doc/sphinxext/prs/642.json new file mode 100644 index 00000000000..3b515d208aa --- /dev/null +++ b/doc/sphinxext/prs/642.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7a66502914ca8d9b63639900306e77b5defd5e11", + "authors": [ + { + "n": "Yousra BEKHTI", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "mne/viz.py": { + "a": 14, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6420.json b/doc/sphinxext/prs/6420.json new file mode 100644 index 00000000000..1c935efc88d --- /dev/null +++ b/doc/sphinxext/prs/6420.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "f4b66b57ba6ea6ccfa6b835a49198aa2d5778f9c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/_static/institution_logos/Aalto.svg": { + "a": 2466, + "d": 0 + }, + "doc/_static/institution_logos/Aarhus.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/BIDS.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/BU.svg": { + "a": 1, + "d": 0 + }, + "doc/_static/institution_logos/CEA.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Graz.jpg": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Harvard.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/ICM.jpg": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Ilmenau.gif": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Inserm.svg": { + "a": 46, + "d": 0 + }, + "doc/_static/institution_logos/Julich.svg": { + "a": 41, + "d": 0 + }, + "doc/_static/institution_logos/MGH.svg": { + "a": 18, + "d": 0 + }, + "doc/_static/institution_logos/MIT.svg": { + "a": 9, + "d": 0 + }, + "doc/_static/institution_logos/Martinos.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/NYU.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Telecom_Paris_Tech.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Washington.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/inria.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institutions.png": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 26, + "d": 6 + }, + "doc/_templates/layout.html": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6422.json b/doc/sphinxext/prs/6422.json new file mode 100644 index 00000000000..75dcfe6fd10 --- /dev/null +++ b/doc/sphinxext/prs/6422.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "5859fb1292dfd95dc1b60171c8fa99d0ac5a0fd9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 16, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 408, + "d": 413 + }, + "mne/viz/evoked.py": { + "a": 709, + "d": 760 + }, + "mne/viz/tests/test_epochs.py": { + "a": 91, + "d": 35 + }, + "mne/viz/tests/test_evoked.py": { + "a": 111, + "d": 121 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 92, + "d": 76 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6423.json b/doc/sphinxext/prs/6423.json new file mode 100644 index 00000000000..795c6a15551 --- /dev/null +++ b/doc/sphinxext/prs/6423.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3f4c82a7e7401a38c485deca7d9e3b655d605b62", + "authors": [ + { + "n": "José C. García Alanis", + "e": null + } + ], + "changes": { + "mne/datasets/limo/limo.py": { + "a": 21, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6426.json b/doc/sphinxext/prs/6426.json new file mode 100644 index 00000000000..d84f945a65f --- /dev/null +++ b/doc/sphinxext/prs/6426.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "36522874ca0e0f50864a135de7a7a4624661fabf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 4 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6427.json b/doc/sphinxext/prs/6427.json new file mode 100644 index 00000000000..809111455f8 --- /dev/null +++ b/doc/sphinxext/prs/6427.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "69f8db962f2956f119000fd9fc4ee2cc609b9a53", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6428.json b/doc/sphinxext/prs/6428.json new file mode 100644 index 00000000000..abf2546a28d --- /dev/null +++ b/doc/sphinxext/prs/6428.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8dfbe7fd4d7528b5033407feba1bf0825682bf2a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_eog_artifact_histogram.py": { + "a": 5, + "d": 4 + }, + "tutorials/misc/plot_sensor_locations.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6429.json b/doc/sphinxext/prs/6429.json new file mode 100644 index 00000000000..29672905910 --- /dev/null +++ b/doc/sphinxext/prs/6429.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9b10fba6defa0df87eacc0b28d9486a0789de933", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 10, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/643.json b/doc/sphinxext/prs/643.json new file mode 100644 index 00000000000..0f7c92388f0 --- /dev/null +++ b/doc/sphinxext/prs/643.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "430adcd0c5ec2dc819219964bdf44642e1b33fab", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 7, + "d": 7 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6430.json b/doc/sphinxext/prs/6430.json new file mode 100644 index 00000000000..d01d9be5a8f --- /dev/null +++ b/doc/sphinxext/prs/6430.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "33b6dde0eec1449eb4257321de44b56972491972", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 21, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6432.json b/doc/sphinxext/prs/6432.json new file mode 100644 index 00000000000..0f3e5477925 --- /dev/null +++ b/doc/sphinxext/prs/6432.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2fedaac463db67ac7a3435e419d6161e11a5edfb", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 20, + "d": 17 + }, + "doc/glossary.rst": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_background_freesurfer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6433.json b/doc/sphinxext/prs/6433.json new file mode 100644 index 00000000000..cad24a052c6 --- /dev/null +++ b/doc/sphinxext/prs/6433.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "f2b7ed55cd8c80a3e3dd9bc89e2c32036bc1bd58", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 1 + }, + ".gitignore": { + "a": 1, + "d": 1 + }, + "doc/_static/style.css": { + "a": 8, + "d": 0 + }, + "doc/conf.py": { + "a": 10, + "d": 2 + }, + "doc/documentation.rst": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_make_report.py": { + "a": 5, + "d": 4 + }, + "mne/html/jquery.js": { + "a": 0, + "d": 0 + }, + "mne/report.py": { + "a": 70, + "d": 5 + }, + "mne/tests/test_report.py": { + "a": 43, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6435.json b/doc/sphinxext/prs/6435.json new file mode 100644 index 00000000000..39081b00d26 --- /dev/null +++ b/doc/sphinxext/prs/6435.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1f25533df6c2b75ef54122ff245d4f5cd2284179", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 15, + "d": 7 + }, + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 4 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6436.json b/doc/sphinxext/prs/6436.json new file mode 100644 index 00000000000..e5313814d96 --- /dev/null +++ b/doc/sphinxext/prs/6436.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0348292ec18f038982bf2d4e2ab99e7971933e2d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 66, + "d": 3 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6437.json b/doc/sphinxext/prs/6437.json new file mode 100644 index 00000000000..20de880468d --- /dev/null +++ b/doc/sphinxext/prs/6437.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a2c75903d4471587582bc2715985dc82eff9ac12", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 23, + "d": 2 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6438.json b/doc/sphinxext/prs/6438.json new file mode 100644 index 00000000000..d9cd087d107 --- /dev/null +++ b/doc/sphinxext/prs/6438.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8ea467d2799ddce525a59f490d72f1b50d71abc0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6439.json b/doc/sphinxext/prs/6439.json new file mode 100644 index 00000000000..5cfcf1f6b2a --- /dev/null +++ b/doc/sphinxext/prs/6439.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b07a871906ec4b03d40b3d8529e923b2d51037bf", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 8, + "d": 5 + }, + "mne/viz/tests/test_epochs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6440.json b/doc/sphinxext/prs/6440.json new file mode 100644 index 00000000000..4ef0ddc4d8e --- /dev/null +++ b/doc/sphinxext/prs/6440.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "033f8c13fb03772c26142a1782c18a2a65dc9e8e", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/defaults.py": { + "a": 3, + "d": 2 + }, + "mne/io/pick.py": { + "a": 3, + "d": 3 + }, + "mne/viz/tests/test_epochs.py": { + "a": 52, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6441.json b/doc/sphinxext/prs/6441.json new file mode 100644 index 00000000000..1ebed886cda --- /dev/null +++ b/doc/sphinxext/prs/6441.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d7c16766f7f32955bbd694ef0de2b1295ca6a96c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/freesurfer.rst": { + "a": 1, + "d": 1 + }, + "tutorials/raw/plot_object_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6442.json b/doc/sphinxext/prs/6442.json new file mode 100644 index 00000000000..e13b9b1e05d --- /dev/null +++ b/doc/sphinxext/prs/6442.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8e4acd7f35cf6ea34d80a8d950b7a889b10d5153", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 10, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6443.json b/doc/sphinxext/prs/6443.json new file mode 100644 index 00000000000..8030ee07594 --- /dev/null +++ b/doc/sphinxext/prs/6443.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "376c2bdc067636c38488b1f7e3502d1b4e558efc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 9 + }, + "mne/tests/test_source_estimate.py": { + "a": 20, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6446.json b/doc/sphinxext/prs/6446.json new file mode 100644 index 00000000000..661760ef0c4 --- /dev/null +++ b/doc/sphinxext/prs/6446.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1ea4b49412c81588b6f149658e3c0c066fe710e0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/tutorials/report.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6447.json b/doc/sphinxext/prs/6447.json new file mode 100644 index 00000000000..07cce11fe0a --- /dev/null +++ b/doc/sphinxext/prs/6447.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ec11e7b8a536d1177af299a5aea59666774ea5f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 16, + "d": 4 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 65, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6448.json b/doc/sphinxext/prs/6448.json new file mode 100644 index 00000000000..c49ae050bd8 --- /dev/null +++ b/doc/sphinxext/prs/6448.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5329dba29161aca72cb179759d258b0e0c9cd5ff", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/realtime/mock_lsl_stream.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6449.json b/doc/sphinxext/prs/6449.json new file mode 100644 index 00000000000..6fdf4517833 --- /dev/null +++ b/doc/sphinxext/prs/6449.json @@ -0,0 +1,283 @@ +{ + "merge_commit_sha": "0f9910f96c2ee82ff815ca4ab3758788c1a4dd1d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/_static/institution_logos/Aalto.svg": { + "a": 2466, + "d": 0 + }, + "doc/_static/institution_logos/Aarhus.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/BIDS.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/BU.svg": { + "a": 1, + "d": 0 + }, + "doc/_static/institution_logos/CEA.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Graz.jpg": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Harvard.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/ICM.jpg": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Ilmenau.gif": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Inserm.svg": { + "a": 46, + "d": 0 + }, + "doc/_static/institution_logos/Julich.svg": { + "a": 41, + "d": 0 + }, + "doc/_static/institution_logos/MGH.svg": { + "a": 18, + "d": 0 + }, + "doc/_static/institution_logos/MIT.svg": { + "a": 9, + "d": 0 + }, + "doc/_static/institution_logos/Martinos.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/NYU.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Telecom_Paris_Tech.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Washington.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/inria.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institutions.png": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 26, + "d": 6 + }, + "doc/_templates/layout.html": { + "a": 20, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 11, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 27, + "d": 6 + }, + "mne/conftest.py": { + "a": 2, + "d": 5 + }, + "mne/cov.py": { + "a": 2, + "d": 4 + }, + "mne/decoding/base.py": { + "a": 3, + "d": 4 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/search_light.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 2, + "d": 3 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 10, + "d": 13 + }, + "mne/forward/_lead_dots.py": { + "a": 5, + "d": 5 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 4 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 11, + "d": 16 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 14, + "d": 0 + }, + "mne/label.py": { + "a": 10, + "d": 12 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 6, + "d": 8 + }, + "mne/morph.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 6 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 4 + }, + "mne/proj.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 5, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 2 + }, + "mne/source_space.py": { + "a": 4, + "d": 6 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 16 + }, + "mne/stats/permutations.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_annotations.py": { + "a": 3, + "d": 0 + }, + "mne/time_frequency/_stockwell.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 7, + "d": 14 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/psd.py": { + "a": 3, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 6 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 19, + "d": 5 + }, + "mne/utils/misc.py": { + "a": 10, + "d": 6 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6450.json b/doc/sphinxext/prs/6450.json new file mode 100644 index 00000000000..e24169efad7 --- /dev/null +++ b/doc/sphinxext/prs/6450.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e793105333158d5e6bcd7b87d9c4c768e3cfdce4", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 10, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6451.json b/doc/sphinxext/prs/6451.json new file mode 100644 index 00000000000..37518bc85ec --- /dev/null +++ b/doc/sphinxext/prs/6451.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "3b1704f581798cf71a88cbb5c352dc8beefa20ca", + "authors": [ + { + "n": "Jevri Hanna", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 5, + "d": 0 + }, + "examples/visualization/plot_sensor_noise_level.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 11, + "d": 7 + }, + "mne/io/base.py": { + "a": 14, + "d": 13 + }, + "mne/utils/docs.py": { + "a": 73, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 51, + "d": 60 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 46, + "d": 277 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 209, + "d": 2 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 4, + "d": 2 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6452.json b/doc/sphinxext/prs/6452.json new file mode 100644 index 00000000000..8862f78e1e7 --- /dev/null +++ b/doc/sphinxext/prs/6452.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d36440176cf3f3532f64e6f046c4a6a3eca028de", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 9, + "d": 2 + }, + "mne/report.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6453.json b/doc/sphinxext/prs/6453.json new file mode 100644 index 00000000000..9b1e72a7e7e --- /dev/null +++ b/doc/sphinxext/prs/6453.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "569924b3ddcca4245725930ebca243ef213b31ee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/tests/test_datasets.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6456.json b/doc/sphinxext/prs/6456.json new file mode 100644 index 00000000000..1fa5d58185c --- /dev/null +++ b/doc/sphinxext/prs/6456.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0576ce744bb1649bab717a4a31c73d888673b32a", + "authors": [ + { + "n": "Ivana Kojcic", + "e": null + } + ], + "changes": { + "doc/manual/source_localization/forward.rst": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6457.json b/doc/sphinxext/prs/6457.json new file mode 100644 index 00000000000..9b126430f32 --- /dev/null +++ b/doc/sphinxext/prs/6457.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5d2f027758ca2f456ae266dc02da15f250e0eb06", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/io/plot_read_proj.py": { + "a": 4, + "d": 2 + }, + "tutorials/preprocessing/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/README.txt": { + "a": 3, + "d": 3 + }, + "tutorials/raw/plot_visualize_raw.py": { + "a": 164, + "d": 124 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6460.json b/doc/sphinxext/prs/6460.json new file mode 100644 index 00000000000..b1a951a4daa --- /dev/null +++ b/doc/sphinxext/prs/6460.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "ad305e0d51287e66b89e66cfdb49ba7620b5fad8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/__init__.py": { + "a": 14, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 448, + "d": 0 + }, + "mne/viz/_brain/colormap.py": { + "a": 108, + "d": 0 + }, + "mne/viz/_brain/surface.py": { + "a": 184, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 88, + "d": 0 + }, + "mne/viz/_brain/view.py": { + "a": 26, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 22, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 32, + "d": 18 + }, + "mne/viz/backends/_utils.py": { + "a": 13, + "d": 0 + }, + "mne/viz/backends/base_renderer.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6464.json b/doc/sphinxext/prs/6464.json new file mode 100644 index 00000000000..f537a4bc265 --- /dev/null +++ b/doc/sphinxext/prs/6464.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "2638d929ba01a6ee937196ee3b23492cdd7fbc5f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/documentation.rst": { + "a": 0, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 4, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 3, + "d": 64 + }, + "examples/datasets/plot_opm_data.py": { + "a": 3, + "d": 3 + }, + "examples/forward/plot_decimate_head_surface.py": { + "a": 0, + "d": 35 + }, + "examples/forward/plot_left_cerebellum_volume_source.py": { + "a": 12, + "d": 37 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 3, + "d": 4 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 3, + "d": 4 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 20, + "d": 17 + }, + "examples/time_frequency/plot_source_power_spectrum_opm.py": { + "a": 2, + "d": 2 + }, + "examples/visualization/plot_3d_to_2d.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/plot_eeg_on_scalp.py": { + "a": 2, + "d": 3 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 2, + "d": 12 + }, + "examples/visualization/plot_montage.py": { + "a": 3, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 88, + "d": 3 + }, + "mne/viz/__init__.py": { + "a": 4, + "d": 2 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 52, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 26, + "d": 3 + }, + "mne/viz/backends/base_renderer.py": { + "a": 83, + "d": 2 + }, + "mne/viz/backends/renderer.py": { + "a": 55, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 15, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6465.json b/doc/sphinxext/prs/6465.json new file mode 100644 index 00000000000..9c155611f97 --- /dev/null +++ b/doc/sphinxext/prs/6465.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ca870d2f04c20cee8957257df5f2d2c64d448b34", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "tutorials/evoked/plot_eeg_erp.py": { + "a": 4, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6469.json b/doc/sphinxext/prs/6469.json new file mode 100644 index 00000000000..058b1606261 --- /dev/null +++ b/doc/sphinxext/prs/6469.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d048fdea501502b42e6ad0a2597a3cf3c1d329b7", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 29, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 11, + "d": 2 + }, + "mne/utils/tests/test_numerics.py": { + "a": 23, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/647.json b/doc/sphinxext/prs/647.json new file mode 100644 index 00000000000..9ee626e97f5 --- /dev/null +++ b/doc/sphinxext/prs/647.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a9e074f60d58825eb22a580d3a98c48bdb0feb52", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/layouts/layout.py": { + "a": 32, + "d": 34 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6470.json b/doc/sphinxext/prs/6470.json new file mode 100644 index 00000000000..6df07ea2807 --- /dev/null +++ b/doc/sphinxext/prs/6470.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9dee0a7a37aeaf22d4ad9ddaf916f96802514340", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/io/egi/egi.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6471.json b/doc/sphinxext/prs/6471.json new file mode 100644 index 00000000000..0ac6de151c0 --- /dev/null +++ b/doc/sphinxext/prs/6471.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "352d3f04d7f2472a75e9738994bb8064f7edf2e0", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/cnt/cnt.py": { + "a": 3, + "d": 2 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6472.json b/doc/sphinxext/prs/6472.json new file mode 100644 index 00000000000..66990024e73 --- /dev/null +++ b/doc/sphinxext/prs/6472.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "f0149397503af6b585604d60bfe1d1327a4ac08a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 4 + }, + "doc/documentation.rst": { + "a": 0, + "d": 2 + }, + "doc/manual/datasets_index.rst": { + "a": 0, + "d": 19 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 1 + }, + "examples/datasets/plot_megsim_data.py": { + "a": 0, + "d": 55 + }, + "examples/datasets/plot_megsim_data_single_trial.py": { + "a": 0, + "d": 36 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/megsim/__init__.py": { + "a": 0, + "d": 3 + }, + "mne/datasets/megsim/megsim.py": { + "a": 0, + "d": 162 + }, + "mne/datasets/megsim/urls.py": { + "a": 0, + "d": 179 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 0, + "d": 11 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 7 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6473.json b/doc/sphinxext/prs/6473.json new file mode 100644 index 00000000000..f74a8583b2e --- /dev/null +++ b/doc/sphinxext/prs/6473.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2272435bd21157d32b3c90d34d1bb7d15930c1e2", + "authors": [ + { + "n": "Elizabeth DuPre", + "e": null + } + ], + "changes": { + "doc/manual/preprocessing/ica.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6474.json b/doc/sphinxext/prs/6474.json new file mode 100644 index 00000000000..6ba2c23c081 --- /dev/null +++ b/doc/sphinxext/prs/6474.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4b0c4e7fe9b2a2b8ca19c0f3efd7a84b944dc9e0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/intro/README.txt": { + "a": 4, + "d": 4 + }, + "tutorials/intro/plot_introduction.py": { + "a": 2, + "d": 5 + }, + "tutorials/intro/plot_projectors_intro.py": { + "a": 428, + "d": 0 + }, + "tutorials/raw/plot_annotating_raw.py": { + "a": 4, + "d": 8 + }, + "tutorials/raw/plot_object_raw.py": { + "a": 3, + "d": 5 + }, + "tutorials/raw/plot_visualize_raw.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6475.json b/doc/sphinxext/prs/6475.json new file mode 100644 index 00000000000..864f99c9458 --- /dev/null +++ b/doc/sphinxext/prs/6475.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "03d42b46c9c13d4a9205331058ee3755e72e27d3", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6476.json b/doc/sphinxext/prs/6476.json new file mode 100644 index 00000000000..828bd024bac --- /dev/null +++ b/doc/sphinxext/prs/6476.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e6dc3bf9bf3a7faf24188958d8965439caa9c4b7", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 3, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 45, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6477.json b/doc/sphinxext/prs/6477.json new file mode 100644 index 00000000000..8815b116b87 --- /dev/null +++ b/doc/sphinxext/prs/6477.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "bef2afe5260f5afb48410f0fbe40263e32a929d5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 1, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "ignore_words.txt": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 3 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 58, + "d": 14 + }, + "mne/io/base.py": { + "a": 16, + "d": 108 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6478.json b/doc/sphinxext/prs/6478.json new file mode 100644 index 00000000000..1c470d72fc7 --- /dev/null +++ b/doc/sphinxext/prs/6478.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "31ec9f401fb74f66f67d492e5ea04b636a04b5d8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 10, + "d": 3 + }, + "mne/io/reference.py": { + "a": 17, + "d": 9 + }, + "mne/io/tests/test_reference.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6480.json b/doc/sphinxext/prs/6480.json new file mode 100644 index 00000000000..cddd170e056 --- /dev/null +++ b/doc/sphinxext/prs/6480.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2549c536721dfad921940cdbd821e3596fc513fa", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 9, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 75, + "d": 58 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 34, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6483.json b/doc/sphinxext/prs/6483.json new file mode 100644 index 00000000000..7b35166d021 --- /dev/null +++ b/doc/sphinxext/prs/6483.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "6c36f8806dffe48bd82e461ad6cc8aad782e5f43", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/dipole.py": { + "a": 15, + "d": 12 + }, + "mne/viz/_3d.py": { + "a": 76, + "d": 36 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 5, + "d": 2 + }, + "tutorials/misc/plot_ecog.py": { + "a": 2, + "d": 3 + }, + "tutorials/misc/plot_sensor_locations.py": { + "a": 1, + "d": 2 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 16, + "d": 20 + }, + "tutorials/sample-datasets/plot_phantom_4DBTi.py": { + "a": 11, + "d": 9 + }, + "tutorials/simulation/plot_dics.py": { + "a": 8, + "d": 5 + }, + "tutorials/simulation/plot_point_spread.py": { + "a": 2, + "d": 5 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 38, + "d": 16 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 9, + "d": 13 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6484.json b/doc/sphinxext/prs/6484.json new file mode 100644 index 00000000000..99d34a9042f --- /dev/null +++ b/doc/sphinxext/prs/6484.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d78574cc91ce42bec1fcccca2c4c26e643038e28", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/intro/plot_projectors_intro.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6486.json b/doc/sphinxext/prs/6486.json new file mode 100644 index 00000000000..763ce74b5c6 --- /dev/null +++ b/doc/sphinxext/prs/6486.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1bec9934efc131321e4e5ad3625bf09007250166", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 2 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 2, + "d": 2 + }, + "mne/io/reference.py": { + "a": 34, + "d": 6 + }, + "mne/io/tests/test_reference.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6487.json b/doc/sphinxext/prs/6487.json new file mode 100644 index 00000000000..176e5d399a9 --- /dev/null +++ b/doc/sphinxext/prs/6487.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "16df3204d4c75e13b4319dbcddd22b09ad918b47", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/io/pick.py": { + "a": 8, + "d": 8 + }, + "tutorials/intro/plot_info.py": { + "a": 147, + "d": 78 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6489.json b/doc/sphinxext/prs/6489.json new file mode 100644 index 00000000000..45327b82e62 --- /dev/null +++ b/doc/sphinxext/prs/6489.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "2f9873e7759e62f8471326e37a2ad1cdd2c2e65d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/documentation.rst": { + "a": 7, + "d": 7 + }, + "tutorials/epochs/plot_epoching_and_averaging.py": { + "a": 0, + "d": 0 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 5, + "d": 4 + }, + "tutorials/intro/plot_20_events_from_raw.py": { + "a": 10, + "d": 8 + }, + "tutorials/intro/plot_30_info.py": { + "a": 3, + "d": 3 + }, + "tutorials/intro/plot_40_projectors_background.py": { + "a": 3, + "d": 1 + }, + "tutorials/intro/plot_50_configure_mne.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/plot_artifacts_correction_rejection.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/plot_10_raw_overview.py": { + "a": 5, + "d": 5 + }, + "tutorials/raw/plot_20_event_arrays.py": { + "a": 5, + "d": 3 + }, + "tutorials/raw/plot_30_annotate_raw.py": { + "a": 3, + "d": 1 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/649.json b/doc/sphinxext/prs/649.json new file mode 100644 index 00000000000..fc613ec0b8c --- /dev/null +++ b/doc/sphinxext/prs/649.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "65928ef2b39d035b3479f4dfe0cf53d2307f1e8a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_topo_compare_conditions.py": { + "a": 0, + "d": 1 + }, + "examples/plot_topography.py": { + "a": 1, + "d": 4 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 3, + "d": 5 + }, + "mne/viz.py": { + "a": 94, + "d": 58 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6490.json b/doc/sphinxext/prs/6490.json new file mode 100644 index 00000000000..cf046ff893f --- /dev/null +++ b/doc/sphinxext/prs/6490.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ec5dd172358f8b082f7d82f6f67bd150f8047362", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6491.json b/doc/sphinxext/prs/6491.json new file mode 100644 index 00000000000..acc53ea6c5b --- /dev/null +++ b/doc/sphinxext/prs/6491.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "54efecca8ec333831a13cbd288b93925c52b9fbb", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 23, + "d": 13 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 4, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6492.json b/doc/sphinxext/prs/6492.json new file mode 100644 index 00000000000..01936fe12da --- /dev/null +++ b/doc/sphinxext/prs/6492.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "226dcff0826518a385a7e62822dc7abf8e610d06", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 6, + "d": 4 + }, + "mne/evoked.py": { + "a": 35, + "d": 24 + }, + "mne/tests/test_evoked.py": { + "a": 18, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6494.json b/doc/sphinxext/prs/6494.json new file mode 100644 index 00000000000..0c2ee110229 --- /dev/null +++ b/doc/sphinxext/prs/6494.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "68f5d09da60a17b5d162cd902c4d64379ad3d87e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 48, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6495.json b/doc/sphinxext/prs/6495.json new file mode 100644 index 00000000000..77cbc41b841 --- /dev/null +++ b/doc/sphinxext/prs/6495.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e96b2d879c2c66bb69ede5a1519cf95a55b02ce4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + }, + "mne/cuda.py": { + "a": 4, + "d": 3 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 3, + "d": 3 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 9, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 6 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6496.json b/doc/sphinxext/prs/6496.json new file mode 100644 index 00000000000..3243786f2ed --- /dev/null +++ b/doc/sphinxext/prs/6496.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4939f5144775b67f8da5c9457fcd83dd6d824f67", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6497.json b/doc/sphinxext/prs/6497.json new file mode 100644 index 00000000000..2c8c815a422 --- /dev/null +++ b/doc/sphinxext/prs/6497.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "25b38f87c1a48385164396e0c5ad7a0dab85155d", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 2 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 33, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6499.json b/doc/sphinxext/prs/6499.json new file mode 100644 index 00000000000..0adba176c47 --- /dev/null +++ b/doc/sphinxext/prs/6499.json @@ -0,0 +1,263 @@ +{ + "merge_commit_sha": "cc1f8f29b6e297397c6dac9e752e287691fb7d2d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/font-awesome.css": { + "a": 2, + "d": 2 + }, + "doc/_static/font-source-code-pro.css": { + "a": 167, + "d": 0 + }, + "doc/_static/font-source-sans-pro.css": { + "a": 131, + "d": 0 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Bold.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-BoldIt.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-It.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Regular.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Semibold.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-SemiboldIt.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Bold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-BoldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-It.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Regular.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Semibold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-SemiboldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Bold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-BoldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-It.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Regular.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Semibold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-SemiboldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Bold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-BoldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-It.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Regular.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Semibold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-SemiboldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Bold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-BoldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-It.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Regular.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Semibold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-SemiboldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Bold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-BoldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-It.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Regular.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Semibold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-SemiboldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Bold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-BoldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-It.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Regular.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Semibold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-SemiboldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Bold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-BoldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-It.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Regular.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Semibold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-SemiboldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Bold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-BoldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-It.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Regular.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Semibold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-SemiboldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 23, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/65.json b/doc/sphinxext/prs/65.json new file mode 100644 index 00000000000..6fe04009a69 --- /dev/null +++ b/doc/sphinxext/prs/65.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "24c78ffc745e27c513efc11003d349bd61d1cf1c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/650.json b/doc/sphinxext/prs/650.json new file mode 100644 index 00000000000..8b791b6e584 --- /dev/null +++ b/doc/sphinxext/prs/650.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f2fdc2363df87e3a40e878d424c407ac7fc16916", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6503.json b/doc/sphinxext/prs/6503.json new file mode 100644 index 00000000000..4774cfa2704 --- /dev/null +++ b/doc/sphinxext/prs/6503.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "344cf674630c644ebb4d599e53cccdc35bf847d3", + "authors": [ + { + "n": "Dirk Gütlin", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6504.json b/doc/sphinxext/prs/6504.json new file mode 100644 index 00000000000..736740f136c --- /dev/null +++ b/doc/sphinxext/prs/6504.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fc1d25497fe2c47bd9aa129bf13ffb38aaaaf335", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 21, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6505.json b/doc/sphinxext/prs/6505.json new file mode 100644 index 00000000000..b258da35d6b --- /dev/null +++ b/doc/sphinxext/prs/6505.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5e9d54822d99d85cf8c2d88f6438e78a909f41b8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 43, + "d": 9 + }, + "mne/viz/tests/test_3d.py": { + "a": 12, + "d": 1 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 7, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6506.json b/doc/sphinxext/prs/6506.json new file mode 100644 index 00000000000..994925a577d --- /dev/null +++ b/doc/sphinxext/prs/6506.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3b3c6e6565d3ec97faef18cab9a4323ba7b71b9d", + "authors": [ + { + "n": "Jevri Hanna", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6507.json b/doc/sphinxext/prs/6507.json new file mode 100644 index 00000000000..16bc5396271 --- /dev/null +++ b/doc/sphinxext/prs/6507.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ec117dfea5a0f5e3591ab5d9710500ac4b2036f4", + "authors": [ + { + "n": "Paul Roujansky", + "e": "paul@roujansky.eu" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 18, + "d": 9 + }, + "mne/tests/test_epochs.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6509.json b/doc/sphinxext/prs/6509.json new file mode 100644 index 00000000000..10209f9349b --- /dev/null +++ b/doc/sphinxext/prs/6509.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "831aaf42e93237e75bfd49e7d672eea38045625c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_3d.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/651.json b/doc/sphinxext/prs/651.json new file mode 100644 index 00000000000..e3ccd3f5d5e --- /dev/null +++ b/doc/sphinxext/prs/651.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4bfad1779b4c8c07cf5870182e5e6f0777026804", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/artifacts/__init__.py": { + "a": 0, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6510.json b/doc/sphinxext/prs/6510.json new file mode 100644 index 00000000000..e36c583acf7 --- /dev/null +++ b/doc/sphinxext/prs/6510.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f2a6afa8e4c44881ab8c8dfbc744c205ddf564b3", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6511.json b/doc/sphinxext/prs/6511.json new file mode 100644 index 00000000000..2d0deb8ffe8 --- /dev/null +++ b/doc/sphinxext/prs/6511.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "87bd40c0be41183c2f640627577101ce0ed40acf", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/egi/egi.py": { + "a": 4, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 4, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 33, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6512.json b/doc/sphinxext/prs/6512.json new file mode 100644 index 00000000000..228a3515432 --- /dev/null +++ b/doc/sphinxext/prs/6512.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "897c3b2d12d0562f888784c5e18c2da31ed27ceb", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/bem.py": { + "a": 10, + "d": 8 + }, + "mne/surface.py": { + "a": 53, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 21, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 2 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6513.json b/doc/sphinxext/prs/6513.json new file mode 100644 index 00000000000..37a5e86bdda --- /dev/null +++ b/doc/sphinxext/prs/6513.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7df663911b414b5f21241d3c301dc7318150c16d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 4, + "d": 4 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6514.json b/doc/sphinxext/prs/6514.json new file mode 100644 index 00000000000..72b074cb8b3 --- /dev/null +++ b/doc/sphinxext/prs/6514.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "23fdb4136384696b866288032ceb5c47a4e0a589", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/commands/utils.py": { + "a": 5, + "d": 20 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6515.json b/doc/sphinxext/prs/6515.json new file mode 100644 index 00000000000..34b21068769 --- /dev/null +++ b/doc/sphinxext/prs/6515.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e922e9fff8d42e996e5faa2d2b635eed76234671", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 103, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6516.json b/doc/sphinxext/prs/6516.json new file mode 100644 index 00000000000..a81d3877963 --- /dev/null +++ b/doc/sphinxext/prs/6516.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "70789c466133eaba410e1fc254c4b88723e30b77", + "authors": [ + { + "n": "Maksymenko Kostiantyn", + "e": "makkostya@ukr.net" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 8, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6517.json b/doc/sphinxext/prs/6517.json new file mode 100644 index 00000000000..a5017778b35 --- /dev/null +++ b/doc/sphinxext/prs/6517.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "73e9f22ef2e9e3092121a09b46e74356606162d9", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_read_inverse.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6519.json b/doc/sphinxext/prs/6519.json new file mode 100644 index 00000000000..4b5e467493c --- /dev/null +++ b/doc/sphinxext/prs/6519.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4684eb5f38943dca74b04bf31f2527b1b2eb4977", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 0, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/652.json b/doc/sphinxext/prs/652.json new file mode 100644 index 00000000000..4a89b3b6833 --- /dev/null +++ b/doc/sphinxext/prs/652.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "dc270325491d236d948c49c4b23fc58272d13131", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 4, + "d": 4 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "mne/forward.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 53, + "d": 28 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 11, + "d": 10 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 4, + "d": 4 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 28, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6520.json b/doc/sphinxext/prs/6520.json new file mode 100644 index 00000000000..32151338f2a --- /dev/null +++ b/doc/sphinxext/prs/6520.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b3b14b91264d1089a61850b93847d44ba4bd587b", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 22, + "d": 4 + }, + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/base_renderer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 5, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6521.json b/doc/sphinxext/prs/6521.json new file mode 100644 index 00000000000..740edc7bc31 --- /dev/null +++ b/doc/sphinxext/prs/6521.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "dd54d3c9c7c555c2c5d89f4a20a0f0d32cfc766b", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_montage.py": { + "a": 127, + "d": 5 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 0, + "d": 29 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 0, + "d": 25 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 0, + "d": 71 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 29 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 32 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 1, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6522.json b/doc/sphinxext/prs/6522.json new file mode 100644 index 00000000000..03208fd42c3 --- /dev/null +++ b/doc/sphinxext/prs/6522.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e4caaf4142d476d3f108a1a0b1c77beeb447915", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 35, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6524.json b/doc/sphinxext/prs/6524.json new file mode 100644 index 00000000000..e3cda7a284e --- /dev/null +++ b/doc/sphinxext/prs/6524.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "047c9a707593095e8b2dd1af5e40afc82e7742aa", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 20, + "d": 9 + }, + "mne/viz/backends/base_renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6529.json b/doc/sphinxext/prs/6529.json new file mode 100644 index 00000000000..b36379cd582 --- /dev/null +++ b/doc/sphinxext/prs/6529.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5bc8457a1122da120148ee9e0dc8b09d33615c0b", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 86, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/653.json b/doc/sphinxext/prs/653.json new file mode 100644 index 00000000000..a90b761018e --- /dev/null +++ b/doc/sphinxext/prs/653.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "641f62b78ae7ee2f403b1efebf06b715477d9bc5", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/export/plot_epochs_as_data_frame.py": { + "a": 19, + "d": 17 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 1, + "d": 1 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 3, + "d": 3 + }, + "examples/plot_evoked_topomap_delayed_ssp.py": { + "a": 4, + "d": 4 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 11, + "d": 18 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 17, + "d": 15 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 3, + "d": 2 + }, + "mne/epochs.py": { + "a": 9, + "d": 10 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 21, + "d": 17 + }, + "mne/source_estimate.py": { + "a": 8, + "d": 10 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 2 + }, + "mne/viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6532.json b/doc/sphinxext/prs/6532.json new file mode 100644 index 00000000000..01ee579220c --- /dev/null +++ b/doc/sphinxext/prs/6532.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0bb360be833bde585f6d2cf59f9348e6b067dd48", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 12, + "d": 41 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6533.json b/doc/sphinxext/prs/6533.json new file mode 100644 index 00000000000..a9ef95c07bf --- /dev/null +++ b/doc/sphinxext/prs/6533.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "201ce50343a7a6abe33c6383a6dbb3efce1489d2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/carousel.inc": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6534.json b/doc/sphinxext/prs/6534.json new file mode 100644 index 00000000000..ee371865b97 --- /dev/null +++ b/doc/sphinxext/prs/6534.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "8c9a3a8ecbee675c7a2285fcd6b45d343e94c523", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 8 + }, + "mne/channels/montage.py": { + "a": 3, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 7, + "d": 3 + }, + "mne/epochs.py": { + "a": 1, + "d": 4 + }, + "mne/io/brainvision/brainvision.py": { + "a": 7, + "d": 15 + }, + "mne/io/cnt/cnt.py": { + "a": 10, + "d": 13 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 12, + "d": 25 + }, + "mne/io/eeglab/eeglab.py": { + "a": 38, + "d": 21 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 20, + "d": 16 + }, + "mne/io/egi/egi.py": { + "a": 5, + "d": 9 + }, + "mne/io/egi/egimff.py": { + "a": 5, + "d": 9 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 2 + }, + "mne/io/nicolet/nicolet.py": { + "a": 9, + "d": 9 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 0, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 6, + "d": 0 + }, + "mne/io/utils.py": { + "a": 14, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6536.json b/doc/sphinxext/prs/6536.json new file mode 100644 index 00000000000..89c8cabbf3c --- /dev/null +++ b/doc/sphinxext/prs/6536.json @@ -0,0 +1,123 @@ +{ + "merge_commit_sha": "f71468814df671700feb6460dce34c259ac77b39", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 20, + "d": 17 + }, + "mne/filter.py": { + "a": 14, + "d": 9 + }, + "mne/fixes.py": { + "a": 18, + "d": 10 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 2 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/source.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 5, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 8, + "d": 2 + }, + "mne/simulation/tests/test_source.py": { + "a": 7, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 6, + "d": 2 + }, + "mne/stats/tests/test_permutations.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 4 + }, + "mne/tests/test_filter.py": { + "a": 7, + "d": 11 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 0, + "d": 3 + }, + "mne/time_frequency/stft.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 5, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 7, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 3, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 10, + "d": 2 + }, + "tutorials/discussions/plot_background_filtering.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6537.json b/doc/sphinxext/prs/6537.json new file mode 100644 index 00000000000..a3f5c168a5f --- /dev/null +++ b/doc/sphinxext/prs/6537.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7f77f3c7829abb89027f6e1194392d65dd1e8b17", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/_utils.py": { + "a": 47, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 13, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/654.json b/doc/sphinxext/prs/654.json new file mode 100644 index 00000000000..348441ff71b --- /dev/null +++ b/doc/sphinxext/prs/654.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "53f0c5b6aa68668dfc08d94b288c6cdca5c01804", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 14, + "d": 1 + }, + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6540.json b/doc/sphinxext/prs/6540.json new file mode 100644 index 00000000000..fe0bb5e53b4 --- /dev/null +++ b/doc/sphinxext/prs/6540.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "eb8f19f2ad96c52ecab5d37a78da5ab426c36e94", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 17, + "d": 17 + }, + "azure-pipelines.yml": { + "a": 9, + "d": 12 + }, + "doc/install/contributing.rst": { + "a": 41, + "d": 84 + }, + "doc/install/mne_python.rst": { + "a": 44, + "d": 19 + }, + "doc/links.inc": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 3, + "d": 3 + }, + "requirements.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6541.json b/doc/sphinxext/prs/6541.json new file mode 100644 index 00000000000..f079e9653e2 --- /dev/null +++ b/doc/sphinxext/prs/6541.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "964ea6c360b3cd89733c554671d97dc9ec0054de", + "authors": [ + { + "n": "Dirk Gütlin", + "e": null + } + ], + "changes": { + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 12 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 6 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 12 + }, + "mne/io/cnt/cnt.py": { + "a": 2, + "d": 12 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 12 + }, + "mne/io/edf/edf.py": { + "a": 5, + "d": 30 + }, + "mne/io/eeglab/eeglab.py": { + "a": 6, + "d": 14 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 6 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 6 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 12 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 9 + }, + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 12 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6545.json b/doc/sphinxext/prs/6545.json new file mode 100644 index 00000000000..30233067e9f --- /dev/null +++ b/doc/sphinxext/prs/6545.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8fa3652b3a105a07230061fb90f3ee8371173798", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6546.json b/doc/sphinxext/prs/6546.json new file mode 100644 index 00000000000..327e3c91349 --- /dev/null +++ b/doc/sphinxext/prs/6546.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "988986a55555f9d08b793b8b7d2b4c9b5477d5ba", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 1, + "d": 5 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6548.json b/doc/sphinxext/prs/6548.json new file mode 100644 index 00000000000..71e2e6d9ec2 --- /dev/null +++ b/doc/sphinxext/prs/6548.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "26231ff6c79dd8543310302abb67f9be5bc31be9", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "CONTRIBUTING.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6549.json b/doc/sphinxext/prs/6549.json new file mode 100644 index 00000000000..32d0f7a6d59 --- /dev/null +++ b/doc/sphinxext/prs/6549.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2b11463a704a017b2e75dac71c99e5335de21395", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/655.json b/doc/sphinxext/prs/655.json new file mode 100644 index 00000000000..1e62ed9de08 --- /dev/null +++ b/doc/sphinxext/prs/655.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "407355aed4da0b07c01251efd352e27c0a7cc44a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_ecg_artifacts_from_ica.py": { + "a": 0, + "d": 109 + }, + "examples/preprocessing/plot_eog_artifacts_from_ica.py": { + "a": 0, + "d": 105 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 27, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6551.json b/doc/sphinxext/prs/6551.json new file mode 100644 index 00000000000..3703a437d83 --- /dev/null +++ b/doc/sphinxext/prs/6551.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "f64d6139464e83686cc44b15bc1b25774ffbf36c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 18, + "d": 4 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 2, + "d": 5 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 2, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 0, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 163, + "d": 93 + }, + "mne/viz/tests/test_3d.py": { + "a": 0, + "d": 3 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tutorials/misc/plot_ecog.py": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/plot_dics.py": { + "a": 9, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6553.json b/doc/sphinxext/prs/6553.json new file mode 100644 index 00000000000..93ecba234e1 --- /dev/null +++ b/doc/sphinxext/prs/6553.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e2090d79154ddb3244ed4961544053612af2aa76", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 6, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/sphinx_bootstrap_divs/__init__.py": { + "a": 196, + "d": 0 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css": { + "a": 9, + "d": 0 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js": { + "a": 6, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 17, + "d": 15 + }, + "requirements.txt": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6554.json b/doc/sphinxext/prs/6554.json new file mode 100644 index 00000000000..6a108fe7c03 --- /dev/null +++ b/doc/sphinxext/prs/6554.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "60ca3f434389779eeed865b1893f3fa1caf37889", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 2, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 3 + }, + "tutorials/epochs/plot_metadata_epochs.py": { + "a": 2, + "d": 2 + }, + "tutorials/epochs/plot_visualize_epochs.py": { + "a": 5, + "d": 7 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 1, + "d": 2 + }, + "tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6558.json b/doc/sphinxext/prs/6558.json new file mode 100644 index 00000000000..5e6ef8e9159 --- /dev/null +++ b/doc/sphinxext/prs/6558.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "31d356b6da8919835cfc7a69e6f07fc8680ceeb4", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/656.json b/doc/sphinxext/prs/656.json new file mode 100644 index 00000000000..1dbd22e58df --- /dev/null +++ b/doc/sphinxext/prs/656.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ad02680d49d4d827aef77cebbb0bef0c5a46c482", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/time_frequency/plot_tfr_topography.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6560.json b/doc/sphinxext/prs/6560.json new file mode 100644 index 00000000000..68bf410a46c --- /dev/null +++ b/doc/sphinxext/prs/6560.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "91b5f2aa7a51e60232c8e8501ccd3f8cb59f027a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/epochs/plot_visualize_epochs.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6561.json b/doc/sphinxext/prs/6561.json new file mode 100644 index 00000000000..3df61ed87b5 --- /dev/null +++ b/doc/sphinxext/prs/6561.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6fe6b64c6601a0eb58bf15ba6e01bb20cca786c5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_filter.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6563.json b/doc/sphinxext/prs/6563.json new file mode 100644 index 00000000000..998e9bf501d --- /dev/null +++ b/doc/sphinxext/prs/6563.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "58cc2d3d094d88a4a88f08831ae00b459d334999", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/documentation.rst": { + "a": 6, + "d": 6 + }, + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 248, + "d": 0 + }, + "tutorials/preprocessing/plot_20_rejecting_bad_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/plot_30_filtering_resampling.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_45_ica_from_raw.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/plot_artifacts_detection.py": { + "a": 0, + "d": 137 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6564.json b/doc/sphinxext/prs/6564.json new file mode 100644 index 00000000000..2c6387bd7e5 --- /dev/null +++ b/doc/sphinxext/prs/6564.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0e4354ebf423b2bc9e0b9ca9f000250af43688fb", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 3, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 66, + "d": 1 + }, + "mne/digitization/_utils.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6565.json b/doc/sphinxext/prs/6565.json new file mode 100644 index 00000000000..641114b762f --- /dev/null +++ b/doc/sphinxext/prs/6565.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2828cf7952ba892ad3116752c88226e6fca008e5", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/preprocessing/ecg.py": { + "a": 7, + "d": 3 + }, + "mne/preprocessing/eog.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6567.json b/doc/sphinxext/prs/6567.json new file mode 100644 index 00000000000..5c56340e2f8 --- /dev/null +++ b/doc/sphinxext/prs/6567.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7d27ad33bdfaa59465e45c620c8bb864adc7375f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6570.json b/doc/sphinxext/prs/6570.json new file mode 100644 index 00000000000..39732dc9454 --- /dev/null +++ b/doc/sphinxext/prs/6570.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3eb246e4d71e1d6b81652224de7209213a4db3ed", + "authors": [ + { + "n": "Robert Seymour", + "e": null + } + ], + "changes": { + "mne/channels/data/layouts/KIT-125.lout": { + "a": 126, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 3, + "d": 0 + }, + "mne/io/kit/constants.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6571.json b/doc/sphinxext/prs/6571.json new file mode 100644 index 00000000000..9d6dc95517e --- /dev/null +++ b/doc/sphinxext/prs/6571.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "702c6c91af896e4ddd0b56d5e803ba2ba0badd83", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6572.json b/doc/sphinxext/prs/6572.json new file mode 100644 index 00000000000..9d1dde65fe2 --- /dev/null +++ b/doc/sphinxext/prs/6572.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bfc1b0cc2eaf08a9f73652a8efcc0fce71667fe3", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/commands/utils.py": { + "a": 19, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6573.json b/doc/sphinxext/prs/6573.json new file mode 100644 index 00000000000..0d3829136a0 --- /dev/null +++ b/doc/sphinxext/prs/6573.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cea6f69f1aba4f495ff0808f542f6980f5ef4c10", + "authors": [ + { + "n": "Dirk Gütlin", + "e": null + } + ], + "changes": { + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6574.json b/doc/sphinxext/prs/6574.json new file mode 100644 index 00000000000..05f95969fd8 --- /dev/null +++ b/doc/sphinxext/prs/6574.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0cc24766de2f50fa062c009d00cf71c7caaa41a1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 38, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6575.json b/doc/sphinxext/prs/6575.json new file mode 100644 index 00000000000..9898b0add91 --- /dev/null +++ b/doc/sphinxext/prs/6575.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "04997085e5f9cb67ed9a7a61d28551906f615580", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/annotation-controls.png": { + "a": 0, + "d": 0 + }, + "doc/manual/cookbook.rst": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 7, + "d": 14 + }, + "tutorials/preprocessing/plot_15_handling_bad_channels.py": { + "a": 268, + "d": 0 + }, + "tutorials/preprocessing/plot_20_rejecting_bad_data.py": { + "a": 199, + "d": 163 + }, + "tutorials/raw/plot_30_annotate_raw.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6577.json b/doc/sphinxext/prs/6577.json new file mode 100644 index 00000000000..2cef0aef1c2 --- /dev/null +++ b/doc/sphinxext/prs/6577.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "141f061874197b795f2e6f3c9ce146e11e598316", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/morph.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6578.json b/doc/sphinxext/prs/6578.json new file mode 100644 index 00000000000..5f2f79b9544 --- /dev/null +++ b/doc/sphinxext/prs/6578.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dd236d928e036356019685cede2f8109b7b6fd77", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 3 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/658.json b/doc/sphinxext/prs/658.json new file mode 100644 index 00000000000..e4c4b996f82 --- /dev/null +++ b/doc/sphinxext/prs/658.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "de9fdfe2b012c8497e9f0e8a8ac78a1dfa09ce00", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "examples/plot_read_forward.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6582.json b/doc/sphinxext/prs/6582.json new file mode 100644 index 00000000000..c335e847870 --- /dev/null +++ b/doc/sphinxext/prs/6582.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a7b39388b3f5225df554cd7cf770a80b0136fe94", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 27, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6585.json b/doc/sphinxext/prs/6585.json new file mode 100644 index 00000000000..b2c7da8cd74 --- /dev/null +++ b/doc/sphinxext/prs/6585.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d54333b4411e0bd810f33b0bf7629351c7968408", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6586.json b/doc/sphinxext/prs/6586.json new file mode 100644 index 00000000000..dd2829f6dad --- /dev/null +++ b/doc/sphinxext/prs/6586.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9d4fdc2d36aeee9dd20f713f8fd23afd2149adf0", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/base.py": { + "a": 12, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6589.json b/doc/sphinxext/prs/6589.json new file mode 100644 index 00000000000..36f8da67395 --- /dev/null +++ b/doc/sphinxext/prs/6589.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "40f1f5e652cf0f85af0d82e796a366850322abef", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/659.json b/doc/sphinxext/prs/659.json new file mode 100644 index 00000000000..ace9dd787ce --- /dev/null +++ b/doc/sphinxext/prs/659.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "054938e95566ffd71e7ab10458dbbd9e70480984", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 2, + "d": 0 + }, + "mne/fiff/bti/raw.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/meas_info.py": { + "a": 52, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/tests/test_info.py": { + "a": 41, + "d": 0 + }, + "mne/viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6590.json b/doc/sphinxext/prs/6590.json new file mode 100644 index 00000000000..ed8abd4e7dc --- /dev/null +++ b/doc/sphinxext/prs/6590.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5bab96d4a76609f34b4e505b57746925b557173a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6591.json b/doc/sphinxext/prs/6591.json new file mode 100644 index 00000000000..cb0a21c8d49 --- /dev/null +++ b/doc/sphinxext/prs/6591.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "368a490f9d7a298050ee04c2aa7882629c9fab21", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/annotations.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 15, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 9, + "d": 1 + }, + "mne/io/ctf/markers.py": { + "a": 77, + "d": 0 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 73, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6593.json b/doc/sphinxext/prs/6593.json new file mode 100644 index 00000000000..7494c783eff --- /dev/null +++ b/doc/sphinxext/prs/6593.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e5a99df7c3440a3ca188218c12b82a212f779e09", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6594.json b/doc/sphinxext/prs/6594.json new file mode 100644 index 00000000000..bfba4ef816b --- /dev/null +++ b/doc/sphinxext/prs/6594.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d75bce15717ee4cbf2abbdb5c6e5148959000a91", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/manual/datasets_index.rst": { + "a": 8, + "d": 8 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/plot_10_raw_overview.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6595.json b/doc/sphinxext/prs/6595.json new file mode 100644 index 00000000000..82a58286e63 --- /dev/null +++ b/doc/sphinxext/prs/6595.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "00806ca8b7e33f65a9782bfe8eb9b08b89c9ec70", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 11, + "d": 8 + }, + "mne/viz/tests/test_ica.py": { + "a": 14, + "d": 8 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 5, + "d": 2 + }, + "tutorials/preprocessing/plot_45_ica_from_raw.py": { + "a": 12, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6596.json b/doc/sphinxext/prs/6596.json new file mode 100644 index 00000000000..1403fa089cf --- /dev/null +++ b/doc/sphinxext/prs/6596.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "060e40049f44893ffddb8a3dc28da7a5dc59f858", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 9, + "d": 0 + }, + "doc/_static/diagrams/ica.dot": { + "a": 72, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 0, + "d": 17 + }, + "mne/preprocessing/ica.py": { + "a": 43, + "d": 39 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 3 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 508, + "d": 229 + }, + "tutorials/preprocessing/plot_45_ica_from_raw.py": { + "a": 0, + "d": 135 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6598.json b/doc/sphinxext/prs/6598.json new file mode 100644 index 00000000000..60c4f0d0f5f --- /dev/null +++ b/doc/sphinxext/prs/6598.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1d0e34eb53c6f1d979eaf2704d5b440ecea9962d", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/bem.py": { + "a": 8, + "d": 8 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6599.json b/doc/sphinxext/prs/6599.json new file mode 100644 index 00000000000..0cc0bf1db7a --- /dev/null +++ b/doc/sphinxext/prs/6599.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "14fe4fa5e92884dcf665fd96a71ec5bb49e4da96", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/66.json b/doc/sphinxext/prs/66.json new file mode 100644 index 00000000000..83c600904ff --- /dev/null +++ b/doc/sphinxext/prs/66.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "aab4e168ffb3e1446d4e9c18b54debe74aace403", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/time_frequency/stft.py": { + "a": 203, + "d": 0 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/660.json b/doc/sphinxext/prs/660.json new file mode 100644 index 00000000000..fc94ba7e874 --- /dev/null +++ b/doc/sphinxext/prs/660.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ec1d44ace6a92043872a39940ded41a422c77941", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6600.json b/doc/sphinxext/prs/6600.json new file mode 100644 index 00000000000..042255eefe8 --- /dev/null +++ b/doc/sphinxext/prs/6600.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "69d992c03b35b797407c876aa60dd7daabcce053", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 24, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 12, + "d": 5 + }, + "mne/io/brainvision/brainvision.py": { + "a": 15, + "d": 4 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 24, + "d": 1 + }, + "mne/transforms.py": { + "a": 22, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6602.json b/doc/sphinxext/prs/6602.json new file mode 100644 index 00000000000..734b6b94e7a --- /dev/null +++ b/doc/sphinxext/prs/6602.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "65c4e60444da276ed09f086b81ee45bd13cb0375", + "authors": [ + { + "n": "Theodore Papadopoulo", + "e": "Theodore.Papadopoulo@inria.fr" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 3 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6603.json b/doc/sphinxext/prs/6603.json new file mode 100644 index 00000000000..9ea49922fa1 --- /dev/null +++ b/doc/sphinxext/prs/6603.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "988653c69e37876e0ca64fc211d3937e9e42076f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 27, + "d": 10 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 13, + "d": 9 + }, + "mne/cov.py": { + "a": 3, + "d": 3 + }, + "mne/dipole.py": { + "a": 12, + "d": 8 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 55, + "d": 30 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 4 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 12, + "d": 14 + }, + "mne/io/pick.py": { + "a": 7, + "d": 3 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 18, + "d": 15 + }, + "mne/minimum_norm/inverse.py": { + "a": 11, + "d": 7 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 10, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 4, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 230, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 14, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/661.json b/doc/sphinxext/prs/661.json new file mode 100644 index 00000000000..566aa995685 --- /dev/null +++ b/doc/sphinxext/prs/661.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3c7e1d8fe40dedd5a319e90f0764e423b145d268", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 33, + "d": 15 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 40, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6611.json b/doc/sphinxext/prs/6611.json new file mode 100644 index 00000000000..777f08aa80c --- /dev/null +++ b/doc/sphinxext/prs/6611.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4624e5fe7b110d49b3d67ebdb5dfbb73a5b4ea91", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 12, + "d": 9 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 56, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6612.json b/doc/sphinxext/prs/6612.json new file mode 100644 index 00000000000..49e2f2472ea --- /dev/null +++ b/doc/sphinxext/prs/6612.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "935193090b2d101fbe5849d8392873ae0ecfd327", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/install/mne_python.rst": { + "a": 4, + "d": 1 + }, + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 21, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 103, + "d": 83 + }, + "mne/tests/test_surface.py": { + "a": 5, + "d": 2 + }, + "mne/utils/config.py": { + "a": 7, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6614.json b/doc/sphinxext/prs/6614.json new file mode 100644 index 00000000000..1140a735616 --- /dev/null +++ b/doc/sphinxext/prs/6614.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cc836c2635b7a720ec6c60074a1997fe52d7ca2f", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 0, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6617.json b/doc/sphinxext/prs/6617.json new file mode 100644 index 00000000000..af0f6dd2c2e --- /dev/null +++ b/doc/sphinxext/prs/6617.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dec30fe5e4266898ac2f93af39f64e3ae9686fb5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 16, + "d": 13 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/662.json b/doc/sphinxext/prs/662.json new file mode 100644 index 00000000000..cbeeac58b41 --- /dev/null +++ b/doc/sphinxext/prs/662.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3b767fe77429a468b5fc9fb3a7f33a162afe1781", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_csp_space.py": { + "a": 101, + "d": 0 + }, + "mne/csp.py": { + "a": 163, + "d": 0 + }, + "mne/tests/test_csp.py": { + "a": 63, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6620.json b/doc/sphinxext/prs/6620.json new file mode 100644 index 00000000000..ccf1ead17a5 --- /dev/null +++ b/doc/sphinxext/prs/6620.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7a790639d51d99afe9590809b05cc4b01a871aac", + "authors": [ + { + "n": "Lorenz Esch", + "e": null + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6622.json b/doc/sphinxext/prs/6622.json new file mode 100644 index 00000000000..2bef835bd08 --- /dev/null +++ b/doc/sphinxext/prs/6622.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "18f06d118a45c23fa733b218c81630d968085e73", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/autosummary/class.rst": { + "a": 5, + "d": 1 + }, + "doc/_templates/autosummary/function.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 27, + "d": 3 + }, + "doc/sphinxext/README.txt": { + "a": 0, + "d": 25 + }, + "doc/sphinxext/gen_commands.py": { + "a": 10, + "d": 10 + }, + "doc/sphinxext/ipython_console_highlighting.py": { + "a": 0, + "d": 100 + }, + "doc/sphinxext/only_directives.py": { + "a": 0, + "d": 65 + }, + "examples/time_frequency/plot_compute_csd.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/.gitignore": { + "a": 1, + "d": 0 + }, + "tutorials/sample-datasets/plot_brainstorm_auditory.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6623.json b/doc/sphinxext/prs/6623.json new file mode 100644 index 00000000000..3f440ee6308 --- /dev/null +++ b/doc/sphinxext/prs/6623.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "07e605fa8ed49b1c7558051746033c2553db350e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/brainvision/tests/data/test_bad_date.vhdr": { + "a": 0, + "d": 142 + }, + "mne/io/brainvision/tests/data/test_bad_date.vmrk": { + "a": 0, + "d": 25 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 59, + "d": 68 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6624.json b/doc/sphinxext/prs/6624.json new file mode 100644 index 00000000000..8d60c27b61b --- /dev/null +++ b/doc/sphinxext/prs/6624.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "15c2cb0da6ab76072547e2a74a6a9e4564a55e8e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 10, + "d": 3 + }, + "mne/stats/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 28, + "d": 5 + }, + "mne/stats/tests/test_permutations.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6625.json b/doc/sphinxext/prs/6625.json new file mode 100644 index 00000000000..0dfe1e19627 --- /dev/null +++ b/doc/sphinxext/prs/6625.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4d8b50db8656bcd35d2ce84a1c8004c50cb70cb2", + "authors": [ + { + "n": "Jevri Hanna", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6628.json b/doc/sphinxext/prs/6628.json new file mode 100644 index 00000000000..aeb0253462e --- /dev/null +++ b/doc/sphinxext/prs/6628.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d89e3ae132e6e1d49d3bf60f9e831763ec8cb155", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 3, + "d": 1 + }, + "mne/annotations.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_annotations.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/663.json b/doc/sphinxext/prs/663.json new file mode 100644 index 00000000000..f1edfa569b7 --- /dev/null +++ b/doc/sphinxext/prs/663.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4f08ce2001bc024e777d59b2936de2ba9f99b601", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 10, + "d": 24 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6630.json b/doc/sphinxext/prs/6630.json new file mode 100644 index 00000000000..0e037bb32c2 --- /dev/null +++ b/doc/sphinxext/prs/6630.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "176ccfe72d22994cef2085b1b02fbaccd850d8ad", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 17, + "d": 23 + }, + "mne/forward/_compute_forward.py": { + "a": 9, + "d": 8 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 3 + }, + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6631.json b/doc/sphinxext/prs/6631.json new file mode 100644 index 00000000000..7559f3161b8 --- /dev/null +++ b/doc/sphinxext/prs/6631.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "486db19e7a06e4107d37c82290f22c4a3948d44c", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 3, + "d": 6 + }, + "mne/tests/test_annotations.py": { + "a": 20, + "d": 3 + }, + "tutorials/raw/plot_30_annotate_raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6633.json b/doc/sphinxext/prs/6633.json new file mode 100644 index 00000000000..e57f926e185 --- /dev/null +++ b/doc/sphinxext/prs/6633.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "98e720db27a5e02117d3e2372299bb0b9770be0d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6635.json b/doc/sphinxext/prs/6635.json new file mode 100644 index 00000000000..2e2fdbe8927 --- /dev/null +++ b/doc/sphinxext/prs/6635.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a193b52845b9484dccaec0cd68232cd33e02b258", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/reset-syntax.css": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6636.json b/doc/sphinxext/prs/6636.json new file mode 100644 index 00000000000..92f15fc958f --- /dev/null +++ b/doc/sphinxext/prs/6636.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8705374530947482dae5c07d0ed3fa6e45c08861", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 32, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6638.json b/doc/sphinxext/prs/6638.json new file mode 100644 index 00000000000..bc9757b17b5 --- /dev/null +++ b/doc/sphinxext/prs/6638.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2ceb9c70e59b5cf143f4a2d9d2f541cb387d8c5e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "tutorials/discussions/plot_background_statistics.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6639.json b/doc/sphinxext/prs/6639.json new file mode 100644 index 00000000000..03cf7fb77e3 --- /dev/null +++ b/doc/sphinxext/prs/6639.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "0600f811dbdcbe288d4c51b1b2b4a055b193e9c3", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_3d_to_2d.py": { + "a": 3, + "d": 4 + }, + "mne/channels/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 306, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 362, + "d": 258 + }, + "mne/channels/tests/test_montage.py": { + "a": 112, + "d": 47 + }, + "mne/coreg.py": { + "a": 1, + "d": 0 + }, + "mne/digitization/_utils.py": { + "a": 18, + "d": 8 + }, + "mne/gui/_file_traits.py": { + "a": 3, + "d": 11 + }, + "mne/io/array/tests/test_array.py": { + "a": 8, + "d": 8 + }, + "mne/io/fieldtrip/utils.py": { + "a": 4, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "tutorials/misc/plot_ecog.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6640.json b/doc/sphinxext/prs/6640.json new file mode 100644 index 00000000000..bb1141c0846 --- /dev/null +++ b/doc/sphinxext/prs/6640.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c6226d792907be781095e93955e75a8f0ef0bfcb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_source_power_spectrum_opm.py": { + "a": 8, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 13, + "d": 11 + }, + "tutorials/epochs/plot_visualize_epochs.py": { + "a": 2, + "d": 4 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6642.json b/doc/sphinxext/prs/6642.json new file mode 100644 index 00000000000..7cbacdecaa4 --- /dev/null +++ b/doc/sphinxext/prs/6642.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ae266c89814cd8f1ab3f77bbfeb7dfba8eeed295", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 10, + "d": 0 + }, + "examples/preprocessing/plot_movement_compensation.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 4 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 190, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6646.json b/doc/sphinxext/prs/6646.json new file mode 100644 index 00000000000..00a632003eb --- /dev/null +++ b/doc/sphinxext/prs/6646.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c952d71f78b63765037743ba4c641ec472516899", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6648.json b/doc/sphinxext/prs/6648.json new file mode 100644 index 00000000000..327dda1a73b --- /dev/null +++ b/doc/sphinxext/prs/6648.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dcf01314bc2895a60bb26cb83ade53ad50f9ad95", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_55_setting_eeg_reference.py": { + "a": 211, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6650.json b/doc/sphinxext/prs/6650.json new file mode 100644 index 00000000000..626e5e4309f --- /dev/null +++ b/doc/sphinxext/prs/6650.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "f954ef64ab2ff88f4e80ecebd50a87591c3a7225", + "authors": [ + { + "n": "Milan Rybář", + "e": "kontakt@milanrybar.cz" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 5, + "d": 0 + }, + "mne/io/tests/test_compensator.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6651.json b/doc/sphinxext/prs/6651.json new file mode 100644 index 00000000000..4ad584605af --- /dev/null +++ b/doc/sphinxext/prs/6651.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1f6f4f89a81df875ecb0a8c86c2542337dfbaaa9", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "environment.yml": { + "a": 0, + "d": 1 + }, + "requirements.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6653.json b/doc/sphinxext/prs/6653.json new file mode 100644 index 00000000000..4c20799f4fa --- /dev/null +++ b/doc/sphinxext/prs/6653.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9e5329b267068b358bb13eda303038276964f2cb", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6655.json b/doc/sphinxext/prs/6655.json new file mode 100644 index 00000000000..d0390ba4238 --- /dev/null +++ b/doc/sphinxext/prs/6655.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e13ab53cbf8e0024774ab14d5e968fee2076121f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 1, + "d": 0 + }, + "mne/io/edf/_utils.py": { + "a": 0, + "d": 39 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 6 + }, + "mne/io/edf/gdf_encodes.txt": { + "a": 0, + "d": 284 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6658.json b/doc/sphinxext/prs/6658.json new file mode 100644 index 00000000000..cc3fbb55eb5 --- /dev/null +++ b/doc/sphinxext/prs/6658.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "0ad37f0d156e74ed5949ad2a6046a5eb1826281d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 5, + "d": 0 + }, + "mne/fixes.py": { + "a": 3, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 119, + "d": 50 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 48, + "d": 25 + }, + "mne/surface.py": { + "a": 4, + "d": 4 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 10, + "d": 9 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py": { + "a": 0, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 0, + "d": 2 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6659.json b/doc/sphinxext/prs/6659.json new file mode 100644 index 00000000000..fc91be3ab78 --- /dev/null +++ b/doc/sphinxext/prs/6659.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "312b661e8822c2ae8e04d0e56fab5bf3779235c6", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/utils/mixin.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/666.json b/doc/sphinxext/prs/666.json new file mode 100644 index 00000000000..a291bf6c9f5 --- /dev/null +++ b/doc/sphinxext/prs/666.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8f64083a981745837dc4f52565de29b7a339abe4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 15, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 37, + "d": 1 + }, + "mne/tests/test_viz.py": { + "a": 14, + "d": 6 + }, + "mne/viz.py": { + "a": 141, + "d": 39 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6660.json b/doc/sphinxext/prs/6660.json new file mode 100644 index 00000000000..76051e932e4 --- /dev/null +++ b/doc/sphinxext/prs/6660.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3e3becd1062aa12e89a7e662df561321191de100", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 5, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6661.json b/doc/sphinxext/prs/6661.json new file mode 100644 index 00000000000..48c1a42759b --- /dev/null +++ b/doc/sphinxext/prs/6661.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3a95f054a46abd989a529cd16f4c6eff468ee2de", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/_eloreta.py": { + "a": 20, + "d": 10 + }, + "mne/utils/linalg.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6663.json b/doc/sphinxext/prs/6663.json new file mode 100644 index 00000000000..759367a58ca --- /dev/null +++ b/doc/sphinxext/prs/6663.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cc3473efd4a124897cb873ff482380c91c5a0cb7", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Alex Rockhill", + "e": "aprockhill206@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 66, + "d": 8 + }, + "mne/channels/tests/test_montage.py": { + "a": 40, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6664.json b/doc/sphinxext/prs/6664.json new file mode 100644 index 00000000000..efd5d32dfc6 --- /dev/null +++ b/doc/sphinxext/prs/6664.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2649f92ae577c6a25c8b943b04ecdd97bb470173", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 17, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 9, + "d": 12 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6668.json b/doc/sphinxext/prs/6668.json new file mode 100644 index 00000000000..048c32fe8f9 --- /dev/null +++ b/doc/sphinxext/prs/6668.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3db196b0e0ecee3718ae2945b9a0a977902513e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 39, + "d": 21 + }, + "mne/commands/mne_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_report.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 21, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6669.json b/doc/sphinxext/prs/6669.json new file mode 100644 index 00000000000..fc35044b5bd --- /dev/null +++ b/doc/sphinxext/prs/6669.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cc0391dd51efd1ce0287631114f05052dc9dacf2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 8, + "d": 20 + }, + "mne/tests/test_coreg.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/667.json b/doc/sphinxext/prs/667.json new file mode 100644 index 00000000000..a08a276659a --- /dev/null +++ b/doc/sphinxext/prs/667.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a2da0c468ed371bfb77b7fbad76d2fe0b24672bd", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "mne/proj.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6670.json b/doc/sphinxext/prs/6670.json new file mode 100644 index 00000000000..4e54b4fc7e7 --- /dev/null +++ b/doc/sphinxext/prs/6670.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "93c75710e7b991e78466e8361bf2d15d3bffa77a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 28, + "d": 49 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 6 + }, + "mne/viz/raw.py": { + "a": 17, + "d": 38 + }, + "mne/viz/utils.py": { + "a": 119, + "d": 60 + }, + "tutorials/preprocessing/plot_15_handling_bad_channels.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6674.json b/doc/sphinxext/prs/6674.json new file mode 100644 index 00000000000..5bf45dca355 --- /dev/null +++ b/doc/sphinxext/prs/6674.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "8861432a3711a5355820b34f76ff6f1ce5d4ad43", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/_includes/data_formats.rst": { + "a": 2, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 0 + }, + "mne/cov.py": { + "a": 9, + "d": 2 + }, + "mne/defaults.py": { + "a": 6, + "d": 5 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 0 + }, + "mne/io/nirx/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 283, + "d": 0 + }, + "mne/io/nirx/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 89, + "d": 0 + }, + "mne/io/pick.py": { + "a": 24, + "d": 9 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_pick.py": { + "a": 16, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 7, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 9 + }, + "tutorials/io/plot_30_reading_fnirs_data.py": { + "a": 38, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6677.json b/doc/sphinxext/prs/6677.json new file mode 100644 index 00000000000..d723d8ff6ac --- /dev/null +++ b/doc/sphinxext/prs/6677.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "694f4f2c184a538fc95444fd492e13a367c465ec", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6678.json b/doc/sphinxext/prs/6678.json new file mode 100644 index 00000000000..a53872983cd --- /dev/null +++ b/doc/sphinxext/prs/6678.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "86ab29166f75ec92a43f0c97241e784b4c527fbc", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6679.json b/doc/sphinxext/prs/6679.json new file mode 100644 index 00000000000..b37a4eec4c2 --- /dev/null +++ b/doc/sphinxext/prs/6679.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0123589b2d8e257c348ffe999f9d6d43f0100773", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/artifact_path": { + "a": 1, + "d": 0 + }, + ".circleci/config.yml": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6680.json b/doc/sphinxext/prs/6680.json new file mode 100644 index 00000000000..f1444ebce85 --- /dev/null +++ b/doc/sphinxext/prs/6680.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "55eac8915e23fb254cdc64c68c53eb0b6b6a266f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 19, + "d": 11 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 37, + "d": 32 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6681.json b/doc/sphinxext/prs/6681.json new file mode 100644 index 00000000000..e5c4011f6e9 --- /dev/null +++ b/doc/sphinxext/prs/6681.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "bf5df349e1046e607f1b46a7d553106f02ca7f8d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "bin/mne": { + "a": 1, + "d": 36 + }, + "doc/install/contributing.rst": { + "a": 32, + "d": 0 + }, + "mne/__main__.py": { + "a": 7, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 2, + "d": 5 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 7 + }, + "mne/commands/mne_flash_bem.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 4 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 3, + "d": 3 + }, + "mne/commands/mne_maxfilter.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_report.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_show_fiff.py": { + "a": 1, + "d": 11 + }, + "mne/commands/mne_show_info.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 3 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 2, + "d": 3 + }, + "mne/commands/utils.py": { + "a": 34, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6682.json b/doc/sphinxext/prs/6682.json new file mode 100644 index 00000000000..5ba019a07ed --- /dev/null +++ b/doc/sphinxext/prs/6682.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d41949910bfc1e88d171ef89e49ce58056bdf44e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 9, + "d": 0 + }, + "mne/io/open.py": { + "a": 6, + "d": 5 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6683.json b/doc/sphinxext/prs/6683.json new file mode 100644 index 00000000000..430af0783fa --- /dev/null +++ b/doc/sphinxext/prs/6683.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "9530c96ffbe6c16764c0d3b5e87ec155df368f95", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 5, + "d": 2 + }, + "mne/epochs.py": { + "a": 5, + "d": 2 + }, + "mne/evoked.py": { + "a": 5, + "d": 2 + }, + "mne/io/base.py": { + "a": 6, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 9, + "d": 0 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 9, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6684.json b/doc/sphinxext/prs/6684.json new file mode 100644 index 00000000000..dc27f926f3f --- /dev/null +++ b/doc/sphinxext/prs/6684.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "abaaccf5572dacc91415a47f01efb567edd15132", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/progressbar.py": { + "a": 11, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6688.json b/doc/sphinxext/prs/6688.json new file mode 100644 index 00000000000..bdfc2820b13 --- /dev/null +++ b/doc/sphinxext/prs/6688.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "490e39b726a6bdc0a1ebb44b425229b049a1f342", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 120, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 61, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6689.json b/doc/sphinxext/prs/6689.json new file mode 100644 index 00000000000..c71976fce62 --- /dev/null +++ b/doc/sphinxext/prs/6689.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e3cbdb5442b9e39dfe79b685b2eb5d75871cecaa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 168, + "d": 105 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6690.json b/doc/sphinxext/prs/6690.json new file mode 100644 index 00000000000..365fac7ae0f --- /dev/null +++ b/doc/sphinxext/prs/6690.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "65fe6d6a0d30a49c30b2a5f583dfc7a4395fa354", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/parallel.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6691.json b/doc/sphinxext/prs/6691.json new file mode 100644 index 00000000000..1bb853ac768 --- /dev/null +++ b/doc/sphinxext/prs/6691.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "1189d1a755f4297dcb7212d053f7e1b0d12fd082", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.10.inc": { + "a": 148, + "d": 0 + }, + "doc/changes/0.11.inc": { + "a": 92, + "d": 0 + }, + "doc/changes/0.12.inc": { + "a": 211, + "d": 0 + }, + "doc/changes/0.13.inc": { + "a": 248, + "d": 0 + }, + "doc/changes/0.14.inc": { + "a": 250, + "d": 0 + }, + "doc/changes/0.15.inc": { + "a": 329, + "d": 0 + }, + "doc/changes/0.16.inc": { + "a": 258, + "d": 0 + }, + "doc/changes/0.17.inc": { + "a": 317, + "d": 0 + }, + "doc/changes/0.18.inc": { + "a": 268, + "d": 0 + }, + "doc/changes/0.2.inc": { + "a": 40, + "d": 0 + }, + "doc/changes/0.3.inc": { + "a": 40, + "d": 0 + }, + "doc/changes/0.4.inc": { + "a": 52, + "d": 0 + }, + "doc/changes/0.5.inc": { + "a": 125, + "d": 0 + }, + "doc/changes/0.6.inc": { + "a": 156, + "d": 0 + }, + "doc/changes/0.7.inc": { + "a": 136, + "d": 0 + }, + "doc/changes/0.8.inc": { + "a": 194, + "d": 0 + }, + "doc/changes/0.9.inc": { + "a": 239, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 154, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 256, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 20, + "d": 3531 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6692.json b/doc/sphinxext/prs/6692.json new file mode 100644 index 00000000000..7605bfb5d0e --- /dev/null +++ b/doc/sphinxext/prs/6692.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "adb70865accfbf2e473d0f78e5be69c090581c0d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/glossary.md": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6694.json b/doc/sphinxext/prs/6694.json new file mode 100644 index 00000000000..37b0d3cd340 --- /dev/null +++ b/doc/sphinxext/prs/6694.json @@ -0,0 +1,147 @@ +{ + "merge_commit_sha": "d247e5577f30e8d101f400c49f17d1a71c0cfd35", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/channel_types.rst": { + "a": 66, + "d": 0 + }, + "doc/_includes/data_formats.rst": { + "a": 9, + "d": 5 + }, + "doc/_includes/precision.rst": { + "a": 6, + "d": 5 + }, + "doc/_includes/units.rst": { + "a": 6, + "d": 5 + }, + "doc/conf.py": { + "a": 4, + "d": 4 + }, + "doc/documentation.rst": { + "a": 0, + "d": 440 + }, + "doc/glossary.rst": { + "a": 12, + "d": 8 + }, + "doc/install/contributing.rst": { + "a": 11, + "d": 9 + }, + "doc/install/index.rst": { + "a": 4, + "d": 3 + }, + "doc/install/pre_install.rst": { + "a": 3, + "d": 9 + }, + "doc/manual/index.rst": { + "a": 39, + "d": 0 + }, + "doc/manual/io.rst": { + "a": 12, + "d": 6 + }, + "doc/overview/cite.rst": { + "a": 31, + "d": 0 + }, + "doc/overview/command_line.rst": { + "a": 2, + "d": 0 + }, + "doc/overview/design_philosophy.rst": { + "a": 94, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 110, + "d": 92 + }, + "doc/overview/get_help.rst": { + "a": 16, + "d": 0 + }, + "doc/overview/implementation.rst": { + "a": 46, + "d": 0 + }, + "doc/overview/index.rst": { + "a": 60, + "d": 0 + }, + "doc/overview/learn_python.rst": { + "a": 29, + "d": 0 + }, + "doc/sphinxext/gen_commands.py": { + "a": 3, + "d": 1 + }, + "doc/tutorials/_images/plot_read_and_write_raw_data.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/_images/plot_read_epochs.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/_images/plot_time_frequency.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/mne-report.png": { + "a": 0, + "d": 0 + }, + "doc/tutorials/philosophy.rst": { + "a": 0, + "d": 153 + }, + "doc/tutorials/report.rst": { + "a": 0, + "d": 172 + }, + "examples/README.txt": { + "a": 11, + "d": 5 + }, + "examples/visualization/plot_make_report.py": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_report.py": { + "a": 59, + "d": 5 + }, + "mne/report.py": { + "a": 20, + "d": 12 + }, + "tutorials/README.txt": { + "a": 9, + "d": 1 + }, + "tutorials/misc/plot_report.py": { + "a": 197, + "d": 0 + }, + "tutorials/raw/plot_10_raw_overview.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/67.json b/doc/sphinxext/prs/67.json new file mode 100644 index 00000000000..dbb0b0d2ee2 --- /dev/null +++ b/doc/sphinxext/prs/67.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "395e203452d16181b2da1a4dc18008734188c17c", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/artifacts/stim.py": { + "a": 61, + "d": 0 + }, + "mne/artifacts/tests/test_stim.py": { + "a": 32, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/670.json b/doc/sphinxext/prs/670.json new file mode 100644 index 00000000000..cb218cea0eb --- /dev/null +++ b/doc/sphinxext/prs/670.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6a232c948f32eaebb9c5ca0e118e803e9c37f105", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/fiff/pick.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 34, + "d": 9 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 2 + }, + "mne/viz.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6700.json b/doc/sphinxext/prs/6700.json new file mode 100644 index 00000000000..7ef4212d197 --- /dev/null +++ b/doc/sphinxext/prs/6700.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "376999338ffc88e9f889ee4848273628769632c9", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 0, + "d": 2 + }, + "doc/changes/names.inc": { + "a": 0, + "d": 1 + }, + "doc/conf.py": { + "a": 0, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 2 + }, + "mne/_digitization/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/_digitization/_utils.py": { + "a": 0, + "d": 0 + }, + "mne/_digitization/base.py": { + "a": 8, + "d": 5 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 18, + "d": 7 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_digitization.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6701.json b/doc/sphinxext/prs/6701.json new file mode 100644 index 00000000000..895171c97c6 --- /dev/null +++ b/doc/sphinxext/prs/6701.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "bf03e17f323341a877dea62963c86cf140757896", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 2, + "d": 1 + }, + "mne/conftest.py": { + "a": 26, + "d": 1 + }, + "mne/fixes.py": { + "a": 17, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 2 + }, + "mne/forward/tests/test_forward.py": { + "a": 10, + "d": 14 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 9, + "d": 13 + }, + "mne/io/base.py": { + "a": 12, + "d": 28 + }, + "mne/io/fiff/raw.py": { + "a": 9, + "d": 11 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 43, + "d": 56 + }, + "mne/io/write.py": { + "a": 8, + "d": 6 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 11, + "d": 2 + }, + "mne/source_space.py": { + "a": 8, + "d": 1 + }, + "mne/surface.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 8, + "d": 10 + }, + "mne/tests/test_cov.py": { + "a": 13, + "d": 16 + }, + "mne/tests/test_source_estimate.py": { + "a": 21, + "d": 27 + }, + "mne/tests/test_source_space.py": { + "a": 43, + "d": 52 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 34, + "d": 18 + }, + "mne/utils/misc.py": { + "a": 2, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6702.json b/doc/sphinxext/prs/6702.json new file mode 100644 index 00000000000..7d16b2beaeb --- /dev/null +++ b/doc/sphinxext/prs/6702.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d05dd9f9f5b7550189b4ffbd32dd5183a005fa28", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/faq.rst": { + "a": 15, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_what.py": { + "a": 25, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 9, + "d": 1 + }, + "mne/event.py": { + "a": 3, + "d": 1 + }, + "mne/io/tests/test_what.py": { + "a": 54, + "d": 0 + }, + "mne/io/what.py": { + "a": 70, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6704.json b/doc/sphinxext/prs/6704.json new file mode 100644 index 00000000000..eccb8b91f77 --- /dev/null +++ b/doc/sphinxext/prs/6704.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "bfef3b614f616afc14cad6f5e8e9330627d515b5", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "mne/_digitization/_utils.py": { + "a": 4, + "d": 0 + }, + "mne/_digitization/base.py": { + "a": 16, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 24, + "d": 1 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 18, + "d": 18 + }, + "mne/channels/montage.py": { + "a": 331, + "d": 50 + }, + "mne/channels/tests/test_montage.py": { + "a": 371, + "d": 5 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6706.json b/doc/sphinxext/prs/6706.json new file mode 100644 index 00000000000..1ef528b46c1 --- /dev/null +++ b/doc/sphinxext/prs/6706.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b7dd664ab27fcd1b61a15165369ad3cce51e0220", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 41, + "d": 9 + }, + "mne/channels/montage.py": { + "a": 164, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 59, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6709.json b/doc/sphinxext/prs/6709.json new file mode 100644 index 00000000000..0f600f23eb1 --- /dev/null +++ b/doc/sphinxext/prs/6709.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "27e0108b3a3d92973ceb92b339e18956499032f7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/671.json b/doc/sphinxext/prs/671.json new file mode 100644 index 00000000000..b1147c4fefe --- /dev/null +++ b/doc/sphinxext/prs/671.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "993ca205bf22e263a1e4a9ffdde99a3077eec31d", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 9, + "d": 4 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 7, + "d": 7 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 41, + "d": 33 + }, + "mne/connectivity/spectral.py": { + "a": 5, + "d": 4 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 11, + "d": 10 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 14, + "d": 9 + }, + "mne/source_estimate.py": { + "a": 460, + "d": 259 + }, + "mne/tests/test_source_estimate.py": { + "a": 59, + "d": 19 + }, + "mne/tests/test_viz.py": { + "a": 1, + "d": 1 + }, + "mne/viz.py": { + "a": 7, + "d": 92 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6711.json b/doc/sphinxext/prs/6711.json new file mode 100644 index 00000000000..e47f6b9fbf4 --- /dev/null +++ b/doc/sphinxext/prs/6711.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b97cfbe2195115772a27e7489df6b705f6216372", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/evoked.py": { + "a": 12, + "d": 25 + }, + "mne/tests/test_evoked.py": { + "a": 5, + "d": 5 + }, + "mne/utils/numerics.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6712.json b/doc/sphinxext/prs/6712.json new file mode 100644 index 00000000000..27a3771deb6 --- /dev/null +++ b/doc/sphinxext/prs/6712.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "3e60a9a45c2e48177c46348720737501dcd57918", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 0, + "d": 56 + }, + "mne/channels/montage.py": { + "a": 52, + "d": 14 + }, + "mne/channels/tests/test_montage.py": { + "a": 17, + "d": 10 + }, + "mne/gui/_file_traits.py": { + "a": 2, + "d": 2 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_montage.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6713.json b/doc/sphinxext/prs/6713.json new file mode 100644 index 00000000000..edd8cab8ea2 --- /dev/null +++ b/doc/sphinxext/prs/6713.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cd203d3b0f3d8054490b8705e40ebd318a939c74", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 4, + "d": 2 + }, + "mne/transforms.py": { + "a": 3, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 20, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6714.json b/doc/sphinxext/prs/6714.json new file mode 100644 index 00000000000..023797b5452 --- /dev/null +++ b/doc/sphinxext/prs/6714.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "e99226eeed132daff799ac9544267eef38046f26", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_montage.py": { + "a": 44, + "d": 16 + }, + "mne/_digitization/_utils.py": { + "a": 72, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 2, + "d": 72 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 181, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 97, + "d": 7 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 56, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6715.json b/doc/sphinxext/prs/6715.json new file mode 100644 index 00000000000..5acef2d0f68 --- /dev/null +++ b/doc/sphinxext/prs/6715.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "751347db894da259c6c2ecaf65e81ab8c4aaffdf", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/visualization/plot_eeg_on_scalp.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 1 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 4, + "d": 4 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6719.json b/doc/sphinxext/prs/6719.json new file mode 100644 index 00000000000..ca00ac4b259 --- /dev/null +++ b/doc/sphinxext/prs/6719.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6d1aa1c3017513efaa5c2a9a37b2ed6208dc5255", + "authors": [ + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6720.json b/doc/sphinxext/prs/6720.json new file mode 100644 index 00000000000..f856e4b76a8 --- /dev/null +++ b/doc/sphinxext/prs/6720.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b3f46657fad1599262b6290b0c97a14d264ffc78", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6721.json b/doc/sphinxext/prs/6721.json new file mode 100644 index 00000000000..bf762a002d0 --- /dev/null +++ b/doc/sphinxext/prs/6721.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9e8bb9113464800971895b0dff025bfa7718de0a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/data/coil_def.dat": { + "a": 7, + "d": 7 + }, + "mne/io/constants.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6722.json b/doc/sphinxext/prs/6722.json new file mode 100644 index 00000000000..65d4eb7c8e2 --- /dev/null +++ b/doc/sphinxext/prs/6722.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "79cc67d783cd95d400f1ba9c3f176cf41aca6550", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6723.json b/doc/sphinxext/prs/6723.json new file mode 100644 index 00000000000..3e6fc90083a --- /dev/null +++ b/doc/sphinxext/prs/6723.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d9d009fa4bd7f964a8a7756b93746fe9e814038d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/intro/plot_40_projectors_background.py": { + "a": 11, + "d": 9 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 370, + "d": 62 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6725.json b/doc/sphinxext/prs/6725.json new file mode 100644 index 00000000000..6e3aa88cb81 --- /dev/null +++ b/doc/sphinxext/prs/6725.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c0d472b8894dc8f57d184d3a00debde602ce645a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_eeg_on_scalp.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 4, + "d": 4 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6726.json b/doc/sphinxext/prs/6726.json new file mode 100644 index 00000000000..57d3136025c --- /dev/null +++ b/doc/sphinxext/prs/6726.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "ef84593ab1a60065758f0bfedb7a8b2630494704", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 8, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 10, + "d": 7 + }, + "mne/viz/raw.py": { + "a": 11, + "d": 6 + }, + "mne/viz/tests/test_raw.py": { + "a": 6, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 88, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6727.json b/doc/sphinxext/prs/6727.json new file mode 100644 index 00000000000..0d998f8e51b --- /dev/null +++ b/doc/sphinxext/prs/6727.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "84cee6d70c435b520ab3a3229c6c57d5634fccaf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 4, + "d": 1 + }, + "doc/manual/c_reference.rst": { + "a": 4, + "d": 7 + }, + "doc/manual/source_localization/morph_stc.rst": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6729.json b/doc/sphinxext/prs/6729.json new file mode 100644 index 00000000000..39fbf2656b2 --- /dev/null +++ b/doc/sphinxext/prs/6729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "68c2a57d433e3ec2dc7ebe0b642629f5df0e8f3c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 3, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6730.json b/doc/sphinxext/prs/6730.json new file mode 100644 index 00000000000..96ea0ded7bc --- /dev/null +++ b/doc/sphinxext/prs/6730.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "daa0458a0ca91ebada2682fee43bb95ec388cd0d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 3, + "d": 2 + }, + "doc/manual/appendix/c_misc.rst": { + "a": 64, + "d": 61 + }, + "doc/manual/c_reference.rst": { + "a": 6, + "d": 2 + }, + "doc/manual/cookbook.rst": { + "a": 4, + "d": 4 + }, + "doc/manual/gui/analyze.rst": { + "a": 32, + "d": 24 + }, + "doc/manual/gui/browse.rst": { + "a": 95, + "d": 66 + }, + "doc/manual/index.rst": { + "a": 20, + "d": 21 + }, + "doc/manual/io.rst": { + "a": 6, + "d": 3 + }, + "doc/manual/matlab.rst": { + "a": 6, + "d": 6 + }, + "doc/manual/preprocessing/bads.rst": { + "a": 0, + "d": 5 + }, + "doc/manual/preprocessing/overview.rst": { + "a": 0, + "d": 5 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 48, + "d": 144 + }, + "doc/manual/source_localization/c_forward.rst": { + "a": 323, + "d": 262 + }, + "doc/manual/source_localization/c_inverse.rst": { + "a": 351, + "d": 309 + }, + "doc/manual/source_localization/c_morph.rst": { + "a": 108, + "d": 98 + }, + "doc/manual/source_localization/coil_geometries.rst": { + "a": 0, + "d": 266 + }, + "doc/manual/source_localization/forward.rst": { + "a": 0, + "d": 436 + }, + "doc/manual/source_localization/inverse.rst": { + "a": 0, + "d": 439 + }, + "doc/manual/source_localization/morph_stc.rst": { + "a": 0, + "d": 249 + }, + "doc/overview/faq.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/index.rst": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 2, + "d": 0 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_background_freesurfer.py": { + "a": 3, + "d": 2 + }, + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 3, + "d": 3 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6731.json b/doc/sphinxext/prs/6731.json new file mode 100644 index 00000000000..47855a0c92d --- /dev/null +++ b/doc/sphinxext/prs/6731.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "93c525408450b01f61cf78851247c330d67a9f9e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_xhemi.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 26, + "d": 14 + }, + "mne/viz/_brain/_brain.py": { + "a": 453, + "d": 164 + }, + "mne/viz/_brain/colormap.py": { + "a": 105, + "d": 55 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 63, + "d": 18 + }, + "mne/viz/_brain/utils.py": { + "a": 100, + "d": 0 + }, + "mne/viz/_brain/view.py": { + "a": 25, + "d": 12 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 75, + "d": 13 + }, + "mne/viz/backends/_pyvista.py": { + "a": 86, + "d": 29 + }, + "mne/viz/backends/base_renderer.py": { + "a": 20, + "d": 6 + }, + "mne/viz/backends/renderer.py": { + "a": 6, + "d": 5 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 12, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 6 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6732.json b/doc/sphinxext/prs/6732.json new file mode 100644 index 00000000000..ccce55d806f --- /dev/null +++ b/doc/sphinxext/prs/6732.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d1730caab121416351bb88d92aa6ffae0ab71c83", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_coreg_gui.py": { + "a": 72, + "d": 35 + }, + "mne/gui/_viewer.py": { + "a": 27, + "d": 21 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 4 + }, + "mne/source_space.py": { + "a": 11, + "d": 70 + }, + "mne/surface.py": { + "a": 75, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6733.json b/doc/sphinxext/prs/6733.json new file mode 100644 index 00000000000..46b8179baf2 --- /dev/null +++ b/doc/sphinxext/prs/6733.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "17fe1132ea2ad9486217be1bc9ceedd738a602e1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 10, + "d": 0 + }, + "mne/coreg.py": { + "a": 55, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_fiducials_gui.py": { + "a": 10, + "d": 1 + }, + "mne/gui/_file_traits.py": { + "a": 6, + "d": 11 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 42, + "d": 43 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 12, + "d": 11 + }, + "mne/io/tests/test_what.py": { + "a": 2, + "d": 1 + }, + "mne/io/what.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 21, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6735.json b/doc/sphinxext/prs/6735.json new file mode 100644 index 00000000000..ee148942b06 --- /dev/null +++ b/doc/sphinxext/prs/6735.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "bf7dd06bc6e693dbe53dd607e122a5424a579cbe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 1 + }, + "README.rst": { + "a": 5, + "d": 6 + }, + "doc/changes/latest.inc": { + "a": 8, + "d": 0 + }, + "mne/filter.py": { + "a": 2, + "d": 4 + }, + "mne/fixes.py": { + "a": 26, + "d": 156 + }, + "mne/tests/test_fixes.py": { + "a": 0, + "d": 25 + }, + "mne/utils/linalg.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 21 + }, + "mne/viz/circle.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 7, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 9, + "d": 13 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6736.json b/doc/sphinxext/prs/6736.json new file mode 100644 index 00000000000..6da510cf8ef --- /dev/null +++ b/doc/sphinxext/prs/6736.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "25701d80296c53c7d1250b99a483569f61202072", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 5, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/transforms.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 23, + "d": 32 + }, + "mne/viz/tests/test_raw.py": { + "a": 9, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6739.json b/doc/sphinxext/prs/6739.json new file mode 100644 index 00000000000..0d20cb0adde --- /dev/null +++ b/doc/sphinxext/prs/6739.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ae1deeeded4d341e9991e92137c84d8590926d37", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6742.json b/doc/sphinxext/prs/6742.json new file mode 100644 index 00000000000..57b687df393 --- /dev/null +++ b/doc/sphinxext/prs/6742.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "21a224062e7f1cc088d94cdaef59aa2ed9efb675", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 2, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6743.json b/doc/sphinxext/prs/6743.json new file mode 100644 index 00000000000..089aae334ef --- /dev/null +++ b/doc/sphinxext/prs/6743.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "80471f114bdcb80be6d66f62f18a9ab060e80a1e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.15.inc": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 3, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6744.json b/doc/sphinxext/prs/6744.json new file mode 100644 index 00000000000..0f18f580ff5 --- /dev/null +++ b/doc/sphinxext/prs/6744.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8cc11b5fb08fd1aa1836f2e7f190cdb3e5dc53cc", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/viz/montage.py": { + "a": 4, + "d": 1 + }, + "mne/viz/tests/test_montage.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6745.json b/doc/sphinxext/prs/6745.json new file mode 100644 index 00000000000..dc3d0c8ad9f --- /dev/null +++ b/doc/sphinxext/prs/6745.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e16d112026d22850a12a5db8d819f334f7ff0886", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_publication_figure.py": { + "a": 165, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 44, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 84, + "d": 80 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 35, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6747.json b/doc/sphinxext/prs/6747.json new file mode 100644 index 00000000000..993d161111a --- /dev/null +++ b/doc/sphinxext/prs/6747.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dbed4f6f0e7c3c1efd1a76ba29f3149bbf6e15d1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 4, + "d": 2 + }, + "doc/install/contributing.rst": { + "a": 15, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6748.json b/doc/sphinxext/prs/6748.json new file mode 100644 index 00000000000..aaa25bdc3d6 --- /dev/null +++ b/doc/sphinxext/prs/6748.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "88a2e3dae4b22ca13d088db8b2213745cd9a653c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 75, + "d": 22 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 65, + "d": 1 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 45, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6749.json b/doc/sphinxext/prs/6749.json new file mode 100644 index 00000000000..9bca6be6885 --- /dev/null +++ b/doc/sphinxext/prs/6749.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "affc6854168e32e73a075a0104e7af8bddd7eefe", + "authors": [ + { + "n": "Victor Férat", + "e": "victor.ferat@live.Fr" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 135, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 33, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6750.json b/doc/sphinxext/prs/6750.json new file mode 100644 index 00000000000..fbac43409d1 --- /dev/null +++ b/doc/sphinxext/prs/6750.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a7ef05dd462aa4fa1710c5cd135bd2fb4f49a3b6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6751.json b/doc/sphinxext/prs/6751.json new file mode 100644 index 00000000000..4e2fb6d86a4 --- /dev/null +++ b/doc/sphinxext/prs/6751.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "13c8e69012d0b8a9cae44530c7acf96da66efb75", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 4, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6753.json b/doc/sphinxext/prs/6753.json new file mode 100644 index 00000000000..850eade3030 --- /dev/null +++ b/doc/sphinxext/prs/6753.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "17ffea91f5034005e6b47d8c9a89647bb11a678c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6754.json b/doc/sphinxext/prs/6754.json new file mode 100644 index 00000000000..b97fd61d096 --- /dev/null +++ b/doc/sphinxext/prs/6754.json @@ -0,0 +1,763 @@ +{ + "merge_commit_sha": "76ee63ff92b0424a304a12532d0cb53c0833a0ec", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 1, + "d": 1 + }, + "examples/datasets/spm_faces_dataset.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_rsa.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_stc.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_and_write_raw_data.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_events.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/io/plot_read_noise_covariance_matrix.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_evoked_whitening.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_ssp_projs_sensitivity_map.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_topo_compare_conditions.py": { + "a": 1, + "d": 1 + }, + "mne/_digitization/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/_digitization/base.py": { + "a": 1, + "d": 1 + }, + "mne/baseline.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/_fake/_fake.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/misc/_misc.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/opm/opm.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/somato/somato.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/testing/_testing.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf_comp.py": { + "a": 1, + "d": 1 + }, + "mne/io/diff.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/matrix.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_compensator.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tree.py": { + "a": 1, + "d": 1 + }, + "mne/io/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/misc.py": { + "a": 1, + "d": 1 + }, + "mne/parallel.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/selection.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/multi_comp.py": { + "a": 1, + "d": 1 + }, + "mne/stats/parametric.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_permutations.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/colormap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/view.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/base_renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 3, + "d": 3 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6755.json b/doc/sphinxext/prs/6755.json new file mode 100644 index 00000000000..0923d42f752 --- /dev/null +++ b/doc/sphinxext/prs/6755.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9585e93f8c2af699000bb47c2d6da1e9a6d79251", + "authors": [ + { + "n": "Abram Hindle", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 25, + "d": 20 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6756.json b/doc/sphinxext/prs/6756.json new file mode 100644 index 00000000000..ce942546f8d --- /dev/null +++ b/doc/sphinxext/prs/6756.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "edc1a48c1653f400ae4f223431c462ce5082d70a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "Makefile": { + "a": 5, + "d": 3 + }, + "setup.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6759.json b/doc/sphinxext/prs/6759.json new file mode 100644 index 00000000000..5504aee3760 --- /dev/null +++ b/doc/sphinxext/prs/6759.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "812f8c468901a7334127cc96cfb8dcacb99c9478", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 0, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6761.json b/doc/sphinxext/prs/6761.json new file mode 100644 index 00000000000..cd243201bd6 --- /dev/null +++ b/doc/sphinxext/prs/6761.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e51e2cd5c3244f6d9b6aae4da713d838089cfa63", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 25, + "d": 14 + }, + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 9 + }, + "mne/viz/backends/_utils.py": { + "a": 26, + "d": 1 + }, + "mne/viz/backends/base_renderer.py": { + "a": 50, + "d": 24 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 6, + "d": 6 + }, + "mne/viz/backends/tests/test_utils.py": { + "a": 41, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6764.json b/doc/sphinxext/prs/6764.json new file mode 100644 index 00000000000..56f59c6bd73 --- /dev/null +++ b/doc/sphinxext/prs/6764.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "52f064415e7c9fa8fe243d22108dcdf3d86505b9", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/_includes/dig_formats.rst": { + "a": 41, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 10, + "d": 6 + }, + "doc/glossary.rst": { + "a": 16, + "d": 1 + }, + "doc/overview/implementation.rst": { + "a": 9, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 1 + }, + "mne/channels/__init__.py": { + "a": 4, + "d": 1 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 4, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 187, + "d": 61 + }, + "mne/channels/montage.py": { + "a": 246, + "d": 44 + }, + "mne/channels/tests/test_channels.py": { + "a": 6, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 297, + "d": 72 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 3, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 7, + "d": 6 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 5, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 3, + "d": 3 + }, + "mne/viz/montage.py": { + "a": 33, + "d": 17 + }, + "mne/viz/tests/test_montage.py": { + "a": 14, + "d": 11 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 1, + "d": 1 + }, + "tutorials/misc/plot_sensor_locations.py": { + "a": 27, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6765.json b/doc/sphinxext/prs/6765.json new file mode 100644 index 00000000000..17e174c63c2 --- /dev/null +++ b/doc/sphinxext/prs/6765.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "f1f8e5413e04913011614e17f5daaa1ee84f973e", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 7, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 22, + "d": 31 + }, + "mne/channels/channels.py": { + "a": 23, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 195, + "d": 38 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 274, + "d": 83 + }, + "mne/io/array/tests/test_array.py": { + "a": 4, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 27, + "d": 21 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 50, + "d": 10 + }, + "mne/io/eeglab/tests/_utils.py": { + "a": 38, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 25, + "d": 33 + }, + "mne/io/fieldtrip/utils.py": { + "a": 3, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 8, + "d": 23 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 1 + }, + "mne/viz/tests/test_montage.py": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6766.json b/doc/sphinxext/prs/6766.json new file mode 100644 index 00000000000..c60c2f14b68 --- /dev/null +++ b/doc/sphinxext/prs/6766.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "7ffb345f1e3acf19070552b71961858b9c921373", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/CONTRIBUTING.md": { + "a": 1, + "d": 1 + }, + ".github/ISSUE_TEMPLATE/bug_report.md": { + "a": 1, + "d": 1 + }, + ".github/PULL_REQUEST_TEMPLATE.md": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 6, + "d": 6 + }, + "doc/index.rst": { + "a": 8, + "d": 8 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_docs.py": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6767.json b/doc/sphinxext/prs/6767.json new file mode 100644 index 00000000000..96afc253a8e --- /dev/null +++ b/doc/sphinxext/prs/6767.json @@ -0,0 +1,543 @@ +{ + "merge_commit_sha": "ff349f356edb04e1b5f0db13deda8d1a20aca351", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 1, + "d": 1 + }, + "doc/install/freesurfer.rst": { + "a": 1, + "d": 1 + }, + "doc/install/index.rst": { + "a": 9, + "d": 2 + }, + "doc/install/mne_c.rst": { + "a": 2, + "d": 5 + }, + "doc/install/pre_install.rst": { + "a": 15, + "d": 17 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "doc/manual/appendix/c_EULA.rst": { + "a": 0, + "d": 140 + }, + "doc/manual/appendix/c_misc.rst": { + "a": 0, + "d": 104 + }, + "doc/manual/appendix/c_release_notes.rst": { + "a": 0, + "d": 854 + }, + "doc/manual/appendix/martinos.rst": { + "a": 0, + "d": 117 + }, + "doc/manual/c_reference.rst": { + "a": 0, + "d": 6449 + }, + "doc/manual/gui/analyze.rst": { + "a": 0, + "d": 2681 + }, + "doc/manual/gui/browse.rst": { + "a": 0, + "d": 2267 + }, + "doc/manual/gui/mne_analyze/MNE_preferences.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/adjust_alignment.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/adjust_lights.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/adjust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/cont_hpi_data.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/dipole_list.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/dipole_parameters.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/dipoles_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/epoch_selector.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/field_mapping_pref.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/file_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/hardcopy_controls.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/help_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/image_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/label_list.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/labels_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/main_window.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/movie_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/mri_viewer.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/open_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/overlay_management.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/patch_selection_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/save_label_timecourse.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/scales_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/surface_controls.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/surface_selection_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/timecourse_manager.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/view_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/viewer.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/viewer_options.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/visualize_hpi.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_analyze/windows_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/adjust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/adust_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/average_pref.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/channel_selection.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/file_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/filter_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/help_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/main.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/manage_averages_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/new_selection.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/new_ssp.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/open_dialog copy.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/open_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/process_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/process_menu2.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/scales_dialog.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/scales_dialog2.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/toolbar.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-0.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-1.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-10.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-11.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-12.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-13.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-14.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-15.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-16.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-17.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-2.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-3.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-4.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-5.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-6.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-7.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-8.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu-9.png": { + "a": 0, + "d": 0 + }, + "doc/manual/gui/mne_browse_raw/windows_menu.png": { + "a": 0, + "d": 0 + }, + "doc/manual/index.rst": { + "a": 0, + "d": 38 + }, + "doc/manual/io.rst": { + "a": 0, + "d": 514 + }, + "doc/manual/migrating.rst": { + "a": 0, + "d": 56 + }, + "doc/manual/pics/Flowchart.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/ICA_primer.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/cover.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/flat.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/morphed.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/neuromag.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/orig.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/proj-off-on.png": { + "a": 0, + "d": 0 + }, + "doc/manual/pics/title_page.png": { + "a": 0, + "d": 0 + }, + "doc/manual/preprocessing/ica.rst": { + "a": 0, + "d": 118 + }, + "doc/manual/preprocessing/ssp.rst": { + "a": 0, + "d": 103 + }, + "doc/manual/sample_dataset.rst": { + "a": 0, + "d": 708 + }, + "doc/manual/visualization.rst": { + "a": 0, + "d": 5 + }, + "doc/overview/algorithms/bem_model.rst": { + "a": 15, + "d": 168 + }, + "doc/overview/algorithms/channel_interpolation.rst": { + "a": 0, + "d": 0 + }, + "doc/overview/algorithms/forward.rst": { + "a": 82, + "d": 238 + }, + "doc/overview/algorithms/images/CoordinateSystems.png": { + "a": 0, + "d": 0 + }, + "doc/overview/algorithms/images/HeadCS.png": { + "a": 0, + "d": 0 + }, + "doc/overview/algorithms/inverse.rst": { + "a": 23, + "d": 150 + }, + "doc/overview/algorithms/maxwell.rst": { + "a": 0, + "d": 0 + }, + "doc/overview/algorithms/memory.rst": { + "a": 1, + "d": 2 + }, + "doc/overview/algorithms/morph.rst": { + "a": 3, + "d": 36 + }, + "doc/overview/algorithms/ssp.rst": { + "a": 110, + "d": 0 + }, + "doc/overview/algorithms/time_frequency.rst": { + "a": 0, + "d": 0 + }, + "doc/overview/cookbook.rst": { + "a": 10, + "d": 21 + }, + "doc/overview/datasets_index.rst": { + "a": 65, + "d": 3 + }, + "doc/overview/faq.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/images/Digitizer-example.png": { + "a": 0, + "d": 0 + }, + "doc/overview/images/flow_diagram.svg": { + "a": 0, + "d": 0 + }, + "doc/overview/implementation.rst": { + "a": 18, + "d": 0 + }, + "doc/overview/index.rst": { + "a": 14, + "d": 14 + }, + "doc/overview/matlab.rst": { + "a": 52, + "d": 44 + }, + "doc/overview/migrating.rst": { + "a": 76, + "d": 0 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 0, + "d": 3 + }, + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "tutorials/discussions/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/discussions/plot_background_ica.py": { + "a": 97, + "d": 0 + }, + "tutorials/epochs/plot_visualize_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_40_projectors_background.py": { + "a": 61, + "d": 7 + }, + "tutorials/sample-datasets/plot_brainstorm_auditory.py": { + "a": 1, + "d": 3 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/plot_creating_data_structures.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_background_freesurfer.py": { + "a": 3, + "d": 4 + }, + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 4, + "d": 3 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6768.json b/doc/sphinxext/prs/6768.json new file mode 100644 index 00000000000..e11c874ab1f --- /dev/null +++ b/doc/sphinxext/prs/6768.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a551eb83f28c2b479718b0ed758cbb7fd57d0e7c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/cuda.py": { + "a": 32, + "d": 2 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6769.json b/doc/sphinxext/prs/6769.json new file mode 100644 index 00000000000..8221c65622b --- /dev/null +++ b/doc/sphinxext/prs/6769.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d9e278826e5301802d8ad22a91220304471b343d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/_make_forward.py": { + "a": 38, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6771.json b/doc/sphinxext/prs/6771.json new file mode 100644 index 00000000000..b3919129fa1 --- /dev/null +++ b/doc/sphinxext/prs/6771.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2545faf2f1b755d33184b9b1bcbbd8cd77a4bf4e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/links.inc": { + "a": 1, + "d": 1 + }, + "doc/overview/design_philosophy.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6773.json b/doc/sphinxext/prs/6773.json new file mode 100644 index 00000000000..5d7d88cd3fb --- /dev/null +++ b/doc/sphinxext/prs/6773.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "df6684669e974b3c58be9c8e3fe5386248f2db4a", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "tutorials/source-modeling/plot_forward.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6774.json b/doc/sphinxext/prs/6774.json new file mode 100644 index 00000000000..b4761e1e04e --- /dev/null +++ b/doc/sphinxext/prs/6774.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "da5ffc58556c475db36b8c0f6bcf9df5df9a9519", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/_digitization/_utils.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 5, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/data/coil_def.dat": { + "a": 128, + "d": 13 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/constants.py": { + "a": 25, + "d": 5 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 127, + "d": 12 + }, + "mne/io/proc_history.py": { + "a": 8, + "d": 9 + }, + "mne/io/tests/test_constants.py": { + "a": 4, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 38, + "d": 24 + }, + "mne/surface.py": { + "a": 5, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 7, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6776.json b/doc/sphinxext/prs/6776.json new file mode 100644 index 00000000000..d2d572f24c3 --- /dev/null +++ b/doc/sphinxext/prs/6776.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "09a5b563e4060da6a105b208e412a56237513c08", + "authors": [ + { + "n": "Christian Clauss", + "e": "cclauss@me.com" + } + ], + "changes": { + ".travis.yml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6780.json b/doc/sphinxext/prs/6780.json new file mode 100644 index 00000000000..4ed094ec771 --- /dev/null +++ b/doc/sphinxext/prs/6780.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "859b926b4afb287fce3bf93fff057ce54395096e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6781.json b/doc/sphinxext/prs/6781.json new file mode 100644 index 00000000000..8e62e03fd0c --- /dev/null +++ b/doc/sphinxext/prs/6781.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "864eff175dfba286ebf7458614e3ea21b912d7c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6783.json b/doc/sphinxext/prs/6783.json new file mode 100644 index 00000000000..6e7d53537f0 --- /dev/null +++ b/doc/sphinxext/prs/6783.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "df2cdd4654bcf562bb75dd1eb9137103b4b0a7c6", + "authors": [ + { + "n": "José C. García Alanis", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 27, + "d": 0 + }, + "examples/datasets/plot_limo_data.py": { + "a": 247, + "d": 106 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 3, + "d": 2 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6785.json b/doc/sphinxext/prs/6785.json new file mode 100644 index 00000000000..e13422b12cb --- /dev/null +++ b/doc/sphinxext/prs/6785.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5efc6ecf638853a1b7c1f9922e8913fc0ce9d138", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 5, + "d": 1 + }, + "mne/report.py": { + "a": 4, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 46, + "d": 14 + }, + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6786.json b/doc/sphinxext/prs/6786.json new file mode 100644 index 00000000000..c5b7b2b34ed --- /dev/null +++ b/doc/sphinxext/prs/6786.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "381c409bb5ed8fdb23c2a686f89da89ede699907", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/mne_browse_raw.py": { + "a": 2, + "d": 6 + }, + "mne/commands/mne_coreg.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_report.py": { + "a": 2, + "d": 3 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 2, + "d": 3 + }, + "mne/commands/utils.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6787.json b/doc/sphinxext/prs/6787.json new file mode 100644 index 00000000000..57dee42ab75 --- /dev/null +++ b/doc/sphinxext/prs/6787.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "29b05f2f593f997168d2f29a41829f68f56ad5df", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6789.json b/doc/sphinxext/prs/6789.json new file mode 100644 index 00000000000..a53748b6d7d --- /dev/null +++ b/doc/sphinxext/prs/6789.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8df597484c0fd53fdb857cc2df5ca2bb53cce2d3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 64, + "d": 48 + }, + "mne/tests/test_source_estimate.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/679.json b/doc/sphinxext/prs/679.json new file mode 100644 index 00000000000..819c5b36957 --- /dev/null +++ b/doc/sphinxext/prs/679.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2c9ff2fb5d4e454dd2d8c727d98a41aa06c84aca", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 29, + "d": 8 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6790.json b/doc/sphinxext/prs/6790.json new file mode 100644 index 00000000000..f4b638c6f57 --- /dev/null +++ b/doc/sphinxext/prs/6790.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5cc1331229208faf20f7e2eb00cd38ceccaf5bef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_read_source_space.py": { + "a": 0, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6791.json b/doc/sphinxext/prs/6791.json new file mode 100644 index 00000000000..d71a3709161 --- /dev/null +++ b/doc/sphinxext/prs/6791.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "e73dfe8ed1491ce2dada8ea70215229bdfc0c6eb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_montage.py": { + "a": 12, + "d": 28 + }, + "mne/_digitization/_utils.py": { + "a": 4, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 58, + "d": 36 + }, + "mne/channels/montage.py": { + "a": 6, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 36, + "d": 11 + }, + "mne/data/coil_def.dat": { + "a": 7, + "d": 7 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 7 + }, + "mne/io/tests/test_meas_info.py": { + "a": 5, + "d": 2 + }, + "mne/transforms.py": { + "a": 3, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 18, + "d": 32 + }, + "setup.cfg": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 14, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6793.json b/doc/sphinxext/prs/6793.json new file mode 100644 index 00000000000..43be2fca445 --- /dev/null +++ b/doc/sphinxext/prs/6793.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "5cd3000a6713c10361478b1b1293f687ee2781b9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 2, + "d": 3 + }, + "examples/visualization/plot_3d_to_2d.py": { + "a": 6, + "d": 1 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 16, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 6, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 3, + "d": 5 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 6 + }, + "tutorials/misc/plot_ecog.py": { + "a": 6, + "d": 1 + }, + "tutorials/misc/plot_sensor_locations.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 17, + "d": 1 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 17, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6797.json b/doc/sphinxext/prs/6797.json new file mode 100644 index 00000000000..ea2c6c639d7 --- /dev/null +++ b/doc/sphinxext/prs/6797.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "05d7eea578dbb8533dbaf34451fd6b92a80c0eb9", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 0, + "d": 7 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 27, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6798.json b/doc/sphinxext/prs/6798.json new file mode 100644 index 00000000000..555f012fc31 --- /dev/null +++ b/doc/sphinxext/prs/6798.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "42380c2d633c50b03c6c51d84a5db822a2fb9261", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_limo_data.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 6, + "d": 5 + }, + "mne/channels/montage.py": { + "a": 7, + "d": 1 + }, + "mne/datasets/limo/limo.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6799.json b/doc/sphinxext/prs/6799.json new file mode 100644 index 00000000000..2155a224478 --- /dev/null +++ b/doc/sphinxext/prs/6799.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9bb4bf21f8284ed7074ebc5f75f673a077f7e7b1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/68.json b/doc/sphinxext/prs/68.json new file mode 100644 index 00000000000..da934d175a7 --- /dev/null +++ b/doc/sphinxext/prs/68.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "5eda6a5b832cb9bedcc25bbf0b75c19d19685f16", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 0 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 85, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/label.py": { + "a": 147, + "d": 1 + }, + "mne/simulation/__init__.py": { + "a": 6, + "d": 0 + }, + "mne/simulation/evoked.py": { + "a": 125, + "d": 0 + }, + "mne/simulation/source.py": { + "a": 178, + "d": 0 + }, + "mne/simulation/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/simulation/tests/test_source.py": { + "a": 70, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 30, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 16, + "d": 1 + }, + "mne/time_frequency/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/ar.py": { + "a": 30, + "d": 0 + }, + "mne/utils.py": { + "a": 18, + "d": 0 + }, + "mne/viz.py": { + "a": 155, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6800.json b/doc/sphinxext/prs/6800.json new file mode 100644 index 00000000000..b1112865968 --- /dev/null +++ b/doc/sphinxext/prs/6800.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7c02660735181abc992662fcbd82c4b6f28f4775", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 0 + }, + "mne/channels/data/neighbors/KIT-NYU-2019_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 9, + "d": 2 + }, + "mne/io/kit/constants.py": { + "a": 21, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6804.json b/doc/sphinxext/prs/6804.json new file mode 100644 index 00000000000..d3886efb063 --- /dev/null +++ b/doc/sphinxext/prs/6804.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d123ec6f3d3a6e011469f2c04adfcd631915a178", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 7, + "d": 2 + }, + "mne/io/pick.py": { + "a": 11, + "d": 8 + }, + "mne/tests/test_filter.py": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6805.json b/doc/sphinxext/prs/6805.json new file mode 100644 index 00000000000..7b7987228a8 --- /dev/null +++ b/doc/sphinxext/prs/6805.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "80817108a43f152b5fa67bc94d25e3bdb675838e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 2, + "d": 5 + }, + "mne/minimum_norm/inverse.py": { + "a": 9, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 12, + "d": 5 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6806.json b/doc/sphinxext/prs/6806.json new file mode 100644 index 00000000000..ffd5c782397 --- /dev/null +++ b/doc/sphinxext/prs/6806.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1b25e8485609627abd39e31a4f1e396315211392", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6808.json b/doc/sphinxext/prs/6808.json new file mode 100644 index 00000000000..e047c6281c2 --- /dev/null +++ b/doc/sphinxext/prs/6808.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "007a7c0c2790f3bbaa301adf6f99fde90f67812a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6809.json b/doc/sphinxext/prs/6809.json new file mode 100644 index 00000000000..531a6fd001a --- /dev/null +++ b/doc/sphinxext/prs/6809.json @@ -0,0 +1,147 @@ +{ + "merge_commit_sha": "b0ea04b4217d4945c13525e81607a9061d7768dc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 6, + "d": 4 + }, + "doc/_images/CoordinateSystems.png": { + "a": 0, + "d": 0 + }, + "doc/_images/HeadCS.png": { + "a": 0, + "d": 0 + }, + "doc/_includes/bem_model.rst": { + "a": 166, + "d": 0 + }, + "doc/_includes/channel_interpolation.rst": { + "a": 18, + "d": 10 + }, + "doc/_includes/data_formats.rst": { + "a": 29, + "d": 36 + }, + "doc/_includes/forward.rst": { + "a": 164, + "d": 235 + }, + "doc/_includes/inverse.rst": { + "a": 29, + "d": 27 + }, + "doc/_includes/memory.rst": { + "a": 68, + "d": 0 + }, + "doc/_includes/morph.rst": { + "a": 18, + "d": 10 + }, + "doc/_includes/ssp.rst": { + "a": 17, + "d": 8 + }, + "doc/conf.py": { + "a": 6, + "d": 0 + }, + "doc/overview/algorithms/bem_model.rst": { + "a": 0, + "d": 205 + }, + "doc/overview/algorithms/maxwell.rst": { + "a": 0, + "d": 76 + }, + "doc/overview/algorithms/memory.rst": { + "a": 0, + "d": 53 + }, + "doc/overview/algorithms/time_frequency.rst": { + "a": 0, + "d": 30 + }, + "doc/overview/command_line.rst": { + "a": 0, + "d": 114 + }, + "doc/overview/cookbook.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/implementation.rst": { + "a": 85, + "d": 16 + }, + "doc/overview/matlab.rst": { + "a": 2, + "d": 3 + }, + "mne/bem.py": { + "a": 16, + "d": 16 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 8, + "d": 6 + }, + "tutorials/intro/README.txt": { + "a": 2, + "d": 2 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/README.txt": { + "a": 7, + "d": 0 + }, + "tutorials/io/plot_10_reading_meg_data.py": { + "a": 294, + "d": 0 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 189, + "d": 0 + }, + "tutorials/preprocessing/plot_15_handling_bad_channels.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_45_projectors_background.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 28, + "d": 1 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_brainstorm_auditory.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6811.json b/doc/sphinxext/prs/6811.json new file mode 100644 index 00000000000..158248a372d --- /dev/null +++ b/doc/sphinxext/prs/6811.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c019da4859be286999752d417b45859760a61388", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 18, + "d": 20 + }, + "mne/channels/tests/test_montage.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6812.json b/doc/sphinxext/prs/6812.json new file mode 100644 index 00000000000..2e36fe10e23 --- /dev/null +++ b/doc/sphinxext/prs/6812.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50657ba05301bcf1230d5034a2ed76ccaaf52352", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 13, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6815.json b/doc/sphinxext/prs/6815.json new file mode 100644 index 00000000000..519d68c168a --- /dev/null +++ b/doc/sphinxext/prs/6815.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dcada0c29b9de284d5b2fd323e8aa1d04929d1a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/tests/test_make_forward.py": { + "a": 18, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6817.json b/doc/sphinxext/prs/6817.json new file mode 100644 index 00000000000..58777837645 --- /dev/null +++ b/doc/sphinxext/prs/6817.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3247bd1f7aafb120a20c70dd0b348c396bb74cc5", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + "doc/_templates/navbar.html": { + "a": 2, + "d": 1 + }, + "doc/changes/0.19.inc": { + "a": 3, + "d": 3 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6818.json b/doc/sphinxext/prs/6818.json new file mode 100644 index 00000000000..b30d232ec01 --- /dev/null +++ b/doc/sphinxext/prs/6818.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6e332b112da0fe2b155c481c746adeeb82e3fda2", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 23, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6819.json b/doc/sphinxext/prs/6819.json new file mode 100644 index 00000000000..700474c2d22 --- /dev/null +++ b/doc/sphinxext/prs/6819.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5561394ca585b27dd7b276f85fd1584657667f5b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_import_nesting.py": { + "a": 15, + "d": 21 + }, + "mne/utils/misc.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/682.json b/doc/sphinxext/prs/682.json new file mode 100644 index 00000000000..aade8b3100d --- /dev/null +++ b/doc/sphinxext/prs/682.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9590d8c5b92d0725fdb2f307d3e47aeaac075bff", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 16, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6821.json b/doc/sphinxext/prs/6821.json new file mode 100644 index 00000000000..549e2588e63 --- /dev/null +++ b/doc/sphinxext/prs/6821.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5e1662522fd7481c6e76addded6c337f9b406364", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 5, + "d": 41 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 2 + }, + "mne/io/utils.py": { + "a": 0, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6822.json b/doc/sphinxext/prs/6822.json new file mode 100644 index 00000000000..c02cb88f501 --- /dev/null +++ b/doc/sphinxext/prs/6822.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5a623c7da096017c8fe27e7f357671e19ae5e66d", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/_templates/navbar.html": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6823.json b/doc/sphinxext/prs/6823.json new file mode 100644 index 00000000000..a0c9d6fd1db --- /dev/null +++ b/doc/sphinxext/prs/6823.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "8a4cef803db165e2f8a00ad0f304ab60a79bc6f3", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "doc/changes/0.13.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.14.inc": { + "a": 6, + "d": 6 + }, + "doc/changes/0.15.inc": { + "a": 6, + "d": 6 + }, + "doc/changes/0.17.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.19.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 0, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 3 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 4, + "d": 102 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 13 + }, + "mne/channels/montage.py": { + "a": 76, + "d": 975 + }, + "mne/channels/tests/test_montage.py": { + "a": 52, + "d": 650 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 3, + "d": 9 + }, + "mne/io/brainvision/brainvision.py": { + "a": 11, + "d": 17 + }, + "mne/io/cnt/cnt.py": { + "a": 13, + "d": 23 + }, + "mne/io/edf/edf.py": { + "a": 10, + "d": 22 + }, + "mne/io/eeglab/eeglab.py": { + "a": 11, + "d": 34 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 5, + "d": 8 + }, + "mne/io/egi/egi.py": { + "a": 5, + "d": 8 + }, + "mne/io/egi/egimff.py": { + "a": 4, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 18 + }, + "mne/io/nicolet/nicolet.py": { + "a": 3, + "d": 11 + }, + "mne/io/tests/test_meas_info.py": { + "a": 0, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + }, + "mne/viz/montage.py": { + "a": 12, + "d": 31 + }, + "mne/viz/tests/test_montage.py": { + "a": 10, + "d": 6 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6825.json b/doc/sphinxext/prs/6825.json new file mode 100644 index 00000000000..824a1515600 --- /dev/null +++ b/doc/sphinxext/prs/6825.json @@ -0,0 +1,155 @@ +{ + "merge_commit_sha": "222deb6f5e97036274b9ac117582c20bee18d13a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.12.inc": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulate_raw_data.py": { + "a": 1, + "d": 2 + }, + "examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py": { + "a": 1, + "d": 2 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 9 + }, + "mne/evoked.py": { + "a": 1, + "d": 41 + }, + "mne/forward/forward.py": { + "a": 7, + "d": 19 + }, + "mne/forward/tests/test_forward.py": { + "a": 1, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 9 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 80 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 11 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 9, + "d": 10 + }, + "mne/morph.py": { + "a": 1, + "d": 72 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 7 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 9 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/rank.py": { + "a": 5, + "d": 14 + }, + "mne/simulation/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 31 + }, + "mne/simulation/raw.py": { + "a": 12, + "d": 100 + }, + "mne/simulation/tests/test_raw.py": { + "a": 41, + "d": 119 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 11 + }, + "mne/tests/test_rank.py": { + "a": 0, + "d": 4 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 2, + "d": 20 + }, + "mne/utils/config.py": { + "a": 0, + "d": 6 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 28 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 17 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 23 + }, + "mne/viz/evoked.py": { + "a": 5, + "d": 26 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 10 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 4 + }, + "mne/viz/tests/test_evoked.py": { + "a": 0, + "d": 5 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6826.json b/doc/sphinxext/prs/6826.json new file mode 100644 index 00000000000..d9d075f2785 --- /dev/null +++ b/doc/sphinxext/prs/6826.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e154730cfe3dc65cc0715634f8e814ecd3015baa", + "authors": [ + { + "n": "Joan Massich", + "e": "mailsik@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 5, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6827.json b/doc/sphinxext/prs/6827.json new file mode 100644 index 00000000000..478aa988ff7 --- /dev/null +++ b/doc/sphinxext/prs/6827.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "978437d1b0eadf54c5b36d8223fb56a8c4e54881", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 8, + "d": 5 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_optical_density.py": { + "a": 47, + "d": 0 + }, + "mne/preprocessing/tests/test_optical_density.py": { + "a": 59, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/683.json b/doc/sphinxext/prs/683.json new file mode 100644 index 00000000000..7147f292d81 --- /dev/null +++ b/doc/sphinxext/prs/683.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ecc77d892f309b812daaafc1e2c6ec1a88ddc11d", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "mne/preprocessing/stim.py": { + "a": 9, + "d": 6 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 46, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6830.json b/doc/sphinxext/prs/6830.json new file mode 100644 index 00000000000..98e6f1dc5b6 --- /dev/null +++ b/doc/sphinxext/prs/6830.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b822c1c9a1925e57dab04cab0ab5dfdf1766ed06", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.15.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 58 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 0, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6831.json b/doc/sphinxext/prs/6831.json new file mode 100644 index 00000000000..4b06de9c583 --- /dev/null +++ b/doc/sphinxext/prs/6831.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7ae8e611f8722554f0c93b1314233604ae787914", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 6, + "d": 1 + }, + "doc/contributing.html": { + "a": 9, + "d": 0 + }, + "doc/documentation.html": { + "a": 9, + "d": 0 + }, + "doc/getting_started.html": { + "a": 9, + "d": 0 + }, + "doc/install_mne_python.html": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6835.json b/doc/sphinxext/prs/6835.json new file mode 100644 index 00000000000..45a9335f4c7 --- /dev/null +++ b/doc/sphinxext/prs/6835.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "3fd31bb969a0073e26463682c1fafd7dad62a2cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/event.py": { + "a": 39, + "d": 24 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 2, + "d": 2 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/data/fsaverage-fiducials.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_raw-annot.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_show_fiff.py": { + "a": 29, + "d": 0 + }, + "mne/tests/test_event.py": { + "a": 50, + "d": 24 + }, + "mne/utils/tests/test_testing.py": { + "a": 0, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6838.json b/doc/sphinxext/prs/6838.json new file mode 100644 index 00000000000..1cf4d751ce6 --- /dev/null +++ b/doc/sphinxext/prs/6838.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "713de842ffc17be20f0d6de67b2bd6e1fd4020b5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 2, + "d": 3 + }, + "mne/source_space.py": { + "a": 17, + "d": 6 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6848.json b/doc/sphinxext/prs/6848.json new file mode 100644 index 00000000000..c90e3f949fc --- /dev/null +++ b/doc/sphinxext/prs/6848.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9a65a1bd5a9cec2cf5fc0a016db6d4d16e53ef3b", + "authors": [ + { + "n": "Kaisu Lankinen", + "e": null + } + ], + "changes": { + "mne/source_space.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6849.json b/doc/sphinxext/prs/6849.json new file mode 100644 index 00000000000..2dbf14457da --- /dev/null +++ b/doc/sphinxext/prs/6849.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "08d699e15f891c39f2a145801dc1eb54caf1c16b", + "authors": [ + { + "n": "Padma Sundaram", + "e": "tottochan@gmail.com" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6852.json b/doc/sphinxext/prs/6852.json new file mode 100644 index 00000000000..110add349a1 --- /dev/null +++ b/doc/sphinxext/prs/6852.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9afcbcb21ca5df53eaf26911d0199dcc04bfc4ba", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 12, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6855.json b/doc/sphinxext/prs/6855.json new file mode 100644 index 00000000000..20abde3cae5 --- /dev/null +++ b/doc/sphinxext/prs/6855.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b42a91c6771ae85c3307d3ce945d5e0be1bb250a", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 40, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6856.json b/doc/sphinxext/prs/6856.json new file mode 100644 index 00000000000..4ff9f5add1f --- /dev/null +++ b/doc/sphinxext/prs/6856.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "03b58ad2e90fa6c609289c2ae49ba6cba06279eb", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 3 + }, + "mne/coreg.py": { + "a": 5, + "d": 1 + }, + "mne/defaults.py": { + "a": 2, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 31, + "d": 7 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 61, + "d": 39 + }, + "mne/viz/_3d.py": { + "a": 24, + "d": 20 + }, + "mne/viz/tests/test_3d.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6857.json b/doc/sphinxext/prs/6857.json new file mode 100644 index 00000000000..a7dd5d3d665 --- /dev/null +++ b/doc/sphinxext/prs/6857.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "36eaa1c9aca28efce74df449140e932177cf37f4", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 8, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6859.json b/doc/sphinxext/prs/6859.json new file mode 100644 index 00000000000..5fe6d116be1 --- /dev/null +++ b/doc/sphinxext/prs/6859.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "301f194e709d45065a24293b0786b3c2d821d2cf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 12, + "d": 7 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 15, + "d": 15 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 4, + "d": 7 + }, + "mne/datasets/_fsaverage/bem.txt": { + "a": 1, + "d": 0 + }, + "mne/morph.py": { + "a": 179, + "d": 101 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 5 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 31, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6863.json b/doc/sphinxext/prs/6863.json new file mode 100644 index 00000000000..31cef4fb06b --- /dev/null +++ b/doc/sphinxext/prs/6863.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8902d8b9b0d795059779eb4145932ef341177297", + "authors": [ + { + "n": "Adonay Nunes", + "e": "adonay.s.nunes@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 2, + "d": 2 + }, + "mne/connectivity/spectral.py": { + "a": 48, + "d": 11 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6864.json b/doc/sphinxext/prs/6864.json new file mode 100644 index 00000000000..28b35968165 --- /dev/null +++ b/doc/sphinxext/prs/6864.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ba5c83e55776fabe306f2a702a56f8b7391fb5bc", + "authors": [ + { + "n": "Nataliia Kozhemiako", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6865.json b/doc/sphinxext/prs/6865.json new file mode 100644 index 00000000000..ae029031ffd --- /dev/null +++ b/doc/sphinxext/prs/6865.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b5ac3014096e323053a7a155153dd09cf3d30b29", + "authors": [ + { + "n": "Kaisu Lankinen", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6866.json b/doc/sphinxext/prs/6866.json new file mode 100644 index 00000000000..9734732a4a9 --- /dev/null +++ b/doc/sphinxext/prs/6866.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "81ab01de410f7de45d08c21f564571a3c28f8863", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6867.json b/doc/sphinxext/prs/6867.json new file mode 100644 index 00000000000..86c1798651b --- /dev/null +++ b/doc/sphinxext/prs/6867.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "edaef78a9a8909decee02486cf7b9585117a7ab1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 3 + }, + "examples/datasets/plot_opm_data.py": { + "a": 2, + "d": 1 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 4, + "d": 2 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/687.json b/doc/sphinxext/prs/687.json new file mode 100644 index 00000000000..892be9599a2 --- /dev/null +++ b/doc/sphinxext/prs/687.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e4137fec06a78751cd39036e817c5f0d0d80074d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6871.json b/doc/sphinxext/prs/6871.json new file mode 100644 index 00000000000..2339d0aa78c --- /dev/null +++ b/doc/sphinxext/prs/6871.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1e9b065b91d70d401b49c1b1a5a16767e00d9592", + "authors": [ + { + "n": "Fahimeh Mamashli", + "e": "fahimeh.mamashli@pfizer.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 3 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 30, + "d": 12 + }, + "mne/tests/test_epochs.py": { + "a": 50, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6872.json b/doc/sphinxext/prs/6872.json new file mode 100644 index 00000000000..07911f34e0e --- /dev/null +++ b/doc/sphinxext/prs/6872.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e78ac34a81216ad3bcccfcfa1ea991dc7c8bfdc3", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_30_filtering_resampling.py": { + "a": 229, + "d": 91 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6873.json b/doc/sphinxext/prs/6873.json new file mode 100644 index 00000000000..7a7ab4047f6 --- /dev/null +++ b/doc/sphinxext/prs/6873.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "18bd6eeca4a00a4956fb72e755cb1213566fad2c", + "authors": [ + { + "n": "Mohammad Daneshzand", + "e": "mdaneshz@my.bridgeport.edu" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 16, + "d": 11 + }, + "mne/utils/tests/test_numerics.py": { + "a": 2, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6874.json b/doc/sphinxext/prs/6874.json new file mode 100644 index 00000000000..3784489670e --- /dev/null +++ b/doc/sphinxext/prs/6874.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cfab03350b00f4b0e129ce83dfb70ed73554b8e0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 0, + "d": 2 + }, + "tutorials/discussions/plot_background_filtering.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6875.json b/doc/sphinxext/prs/6875.json new file mode 100644 index 00000000000..8937a5a1d6d --- /dev/null +++ b/doc/sphinxext/prs/6875.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "70bf11cfce28ee3d989e4842939a7ca3c8b51ab6", + "authors": [ + { + "n": "Mohamed Sherif", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6876.json b/doc/sphinxext/prs/6876.json new file mode 100644 index 00000000000..bf903342ca2 --- /dev/null +++ b/doc/sphinxext/prs/6876.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9400438ffdced3f639bccb4c46dac4fab1f78cef", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/0.19.inc": { + "a": 11, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 30, + "d": 12 + }, + "mne/tests/test_epochs.py": { + "a": 50, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6877.json b/doc/sphinxext/prs/6877.json new file mode 100644 index 00000000000..632fef26536 --- /dev/null +++ b/doc/sphinxext/prs/6877.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "63522e48f0420081b5698591f0062c86ff4bcd38", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 3, + "d": 2 + }, + "examples/inverse/plot_mne_crosstalk_function.py": { + "a": 0, + "d": 67 + }, + "examples/inverse/plot_mne_point_spread_function.py": { + "a": 0, + "d": 82 + }, + "examples/inverse/plot_psf_ctf_vertices.py": { + "a": 97, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 5, + "d": 1 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 284, + "d": 0 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 10, + "d": 8 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 124, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6878.json b/doc/sphinxext/prs/6878.json new file mode 100644 index 00000000000..60d2be91a5d --- /dev/null +++ b/doc/sphinxext/prs/6878.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "474e1abc737d40d3c0a42a4362755b12321d3094", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6879.json b/doc/sphinxext/prs/6879.json new file mode 100644 index 00000000000..49f132390c1 --- /dev/null +++ b/doc/sphinxext/prs/6879.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e1b44fa5a2fc80f52d6d19a5d7c239ec4a09a8bc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/688.json b/doc/sphinxext/prs/688.json new file mode 100644 index 00000000000..f816e6c545d --- /dev/null +++ b/doc/sphinxext/prs/688.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5728b7a205a683851b87148dede318c5282b33f1", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/contributing.rst": { + "a": 124, + "d": 3 + }, + "doc/source/git_links.inc": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6880.json b/doc/sphinxext/prs/6880.json new file mode 100644 index 00000000000..450a404f629 --- /dev/null +++ b/doc/sphinxext/prs/6880.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d2e5b55c91bde5a0627cdd7ac2895c948cea39a4", + "authors": [ + { + "n": "Mohamed Sherif", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6881.json b/doc/sphinxext/prs/6881.json new file mode 100644 index 00000000000..41f10e44173 --- /dev/null +++ b/doc/sphinxext/prs/6881.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "311317b2fdf2c87b124c72032122914611daaede", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6882.json b/doc/sphinxext/prs/6882.json new file mode 100644 index 00000000000..4163daee4ae --- /dev/null +++ b/doc/sphinxext/prs/6882.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a26fade683b6582e9c0693f9eb728260e266f58", + "authors": [ + { + "n": "Robert Oostenveld", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_30_filtering_resampling.py": { + "a": 44, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6883.json b/doc/sphinxext/prs/6883.json new file mode 100644 index 00000000000..70384970cce --- /dev/null +++ b/doc/sphinxext/prs/6883.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a81a5f85507a8874ce2b59e9b3673e4155aa57aa", + "authors": [ + { + "n": "Nataliia Kozhemiako", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 18, + "d": 4 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_head_positions.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6885.json b/doc/sphinxext/prs/6885.json new file mode 100644 index 00000000000..3bb3ab2b5f2 --- /dev/null +++ b/doc/sphinxext/prs/6885.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f59d54ef3632fa7bde410d8550a6276b4311895b", + "authors": [ + { + "n": "Fahimeh Mamashli", + "e": "fahimeh.mamashli@pfizer.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6887.json b/doc/sphinxext/prs/6887.json new file mode 100644 index 00000000000..2e03f6c47f8 --- /dev/null +++ b/doc/sphinxext/prs/6887.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9de684d70e489d8a66f2e33c2c5af0170b766fda", + "authors": [ + { + "n": "Fahimeh Mamashli", + "e": "fahimeh.mamashli@pfizer.com" + } + ], + "changes": { + "mne/transforms.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6889.json b/doc/sphinxext/prs/6889.json new file mode 100644 index 00000000000..3efe99cef3d --- /dev/null +++ b/doc/sphinxext/prs/6889.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a7664c58674412cf4e842960581406533edcc615", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 12, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6892.json b/doc/sphinxext/prs/6892.json new file mode 100644 index 00000000000..46c0e7c3e66 --- /dev/null +++ b/doc/sphinxext/prs/6892.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e07cc5059cb6e2fb7e5abe0ed3db0fad7f590959", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 6 + }, + "mne/commands/mne_anonymize.py": { + "a": 99, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 13, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 117, + "d": 12 + }, + "mne/io/tests/test_meas_info.py": { + "a": 93, + "d": 65 + }, + "mne/tests/test_report.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6894.json b/doc/sphinxext/prs/6894.json new file mode 100644 index 00000000000..400c64b0ae0 --- /dev/null +++ b/doc/sphinxext/prs/6894.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "e86d78d3f1c6c530e7b3fdae03caeb56cf127b05", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 45, + "d": 13 + }, + "mne/channels/tests/test_montage.py": { + "a": 6, + "d": 3 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/_digitization.py": { + "a": 8, + "d": 5 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 18, + "d": 6 + }, + "mne/utils/check.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 11 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_eeg_mri_coords.py": { + "a": 156, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6895.json b/doc/sphinxext/prs/6895.json new file mode 100644 index 00000000000..51ef9600d79 --- /dev/null +++ b/doc/sphinxext/prs/6895.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5e52983a22a5e1727f01d4b1240c111dc09a7ff0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/preprocessing/plot_45_projectors_background.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6896.json b/doc/sphinxext/prs/6896.json new file mode 100644 index 00000000000..d68468ce1cc --- /dev/null +++ b/doc/sphinxext/prs/6896.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1fec322892911ba3a3175df378fbdc7458612845", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6897.json b/doc/sphinxext/prs/6897.json new file mode 100644 index 00000000000..1bb6ab386ef --- /dev/null +++ b/doc/sphinxext/prs/6897.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2c2488c172f6fbcc653cb38317a9bca5e1ddb867", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 4 + }, + "mne/io/brainvision/tests/data/test.hpts": { + "a": 3, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6899.json b/doc/sphinxext/prs/6899.json new file mode 100644 index 00000000000..092e4808337 --- /dev/null +++ b/doc/sphinxext/prs/6899.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0b20f152d0fdcb1c6217c60f06e9ba087d291fa8", + "authors": [ + { + "n": "Nicolas Legrand", + "e": "nicolas.legrand@cas.au.dk" + } + ], + "changes": { + "tutorials/misc/plot_modifying_data_inplace.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/69.json b/doc/sphinxext/prs/69.json new file mode 100644 index 00000000000..8f16e0cc458 --- /dev/null +++ b/doc/sphinxext/prs/69.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "edfe5063a9fb366d37af37d302822d637f86ab6d", + "authors": [ + { + "n": "Daniel Strohmeier", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 85, + "d": 10 + }, + "mne/tests/test_epochs.py": { + "a": 57, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/690.json b/doc/sphinxext/prs/690.json new file mode 100644 index 00000000000..aa5152a32bb --- /dev/null +++ b/doc/sphinxext/prs/690.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5ef1710b5074aef193983e21d31224ac07e45f11", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/decoding/plot_decoding_csp_space.py": { + "a": 2, + "d": 3 + }, + "mne/decoding/csp.py": { + "a": 12, + "d": 14 + }, + "mne/decoding/tests/test_csp.py": { + "a": 19, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6900.json b/doc/sphinxext/prs/6900.json new file mode 100644 index 00000000000..77bca536d57 --- /dev/null +++ b/doc/sphinxext/prs/6900.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "52498f95e1c56ba67e893eb974c07c2b3f755d78", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 27, + "d": 30 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6902.json b/doc/sphinxext/prs/6902.json new file mode 100644 index 00000000000..c70617d0bf5 --- /dev/null +++ b/doc/sphinxext/prs/6902.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ab7176a0fc1a9d7b81791ccb9d69f8a29ad3be8e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 11, + "d": 13 + }, + "mne/channels/tests/test_layout.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6903.json b/doc/sphinxext/prs/6903.json new file mode 100644 index 00000000000..f9153cc1c5b --- /dev/null +++ b/doc/sphinxext/prs/6903.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "32156db6c0819617791c23e389c3c088cfbe6f68", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_digitization/__init__.py": { + "a": 0, + "d": 5 + }, + "mne/_digitization/base.py": { + "a": 0, + "d": 124 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 3, + "d": 4 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 1, + "d": 2 + }, + "mne/io/_digitization.py": { + "a": 127, + "d": 24 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 3 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 10 + }, + "mne/io/tests/test_digitization.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/montage.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6904.json b/doc/sphinxext/prs/6904.json new file mode 100644 index 00000000000..fadc7d781d9 --- /dev/null +++ b/doc/sphinxext/prs/6904.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "050e3f84efc2be7dafaf65408f6b3152d41938ec", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/io/proj.py": { + "a": 14, + "d": 27 + }, + "mne/utils/docs.py": { + "a": 55, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 17, + "d": 67 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 2, + "d": 2 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6906.json b/doc/sphinxext/prs/6906.json new file mode 100644 index 00000000000..167651fb254 --- /dev/null +++ b/doc/sphinxext/prs/6906.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "359e3410dd5e61829433b3c41f102332973e96fe", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_resolution_metrics.py": { + "a": 124, + "d": 0 + }, + "examples/inverse/plot_resolution_metrics_eegmeg.py": { + "a": 134, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 334, + "d": 0 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 159, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6907.json b/doc/sphinxext/prs/6907.json new file mode 100644 index 00000000000..45bf95c2133 --- /dev/null +++ b/doc/sphinxext/prs/6907.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "0563399e20c9643a98fb909aba9647bc90f4c49f", + "authors": [ + { + "n": "Adonay Nunes", + "e": "adonay.s.nunes@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 14, + "d": 5 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 10, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 16, + "d": 9 + }, + "mne/utils/check.py": { + "a": 15, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6908.json b/doc/sphinxext/prs/6908.json new file mode 100644 index 00000000000..0db117d5266 --- /dev/null +++ b/doc/sphinxext/prs/6908.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "341a078160db624bc86a9c3be26ae3e1f1adbb4a", + "authors": [ + { + "n": "Kaisu Lankinen", + "e": null + } + ], + "changes": { + "examples/inverse/plot_source_space_snr.py": { + "a": 100, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 71, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6909.json b/doc/sphinxext/prs/6909.json new file mode 100644 index 00000000000..10dbde08c4e --- /dev/null +++ b/doc/sphinxext/prs/6909.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "194d4eda74621b9a2d9a8e3e89f4e12a07c74996", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/_includes/dig_formats.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 5, + "d": 5 + }, + "mne/channels/montage.py": { + "a": 74, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 43, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 15, + "d": 8 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 2, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/691.json b/doc/sphinxext/prs/691.json new file mode 100644 index 00000000000..32f0574c7c1 --- /dev/null +++ b/doc/sphinxext/prs/691.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d0aeec4b8d8fb0795b527b0306c51d2cd94775ed", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_decoding_csp_space.py": { + "a": 6, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 62, + "d": 8 + }, + "mne/decoding/tests/test_csp.py": { + "a": 29, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6910.json b/doc/sphinxext/prs/6910.json new file mode 100644 index 00000000000..691a2b2b498 --- /dev/null +++ b/doc/sphinxext/prs/6910.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "3fafe437413f29b91f36ca2726a4cceb87dfbab7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/preprocessing/plot_eeg_csd.py": { + "a": 96, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 4 + }, + "mne/channels/interpolation.py": { + "a": 23, + "d": 3 + }, + "mne/cov.py": { + "a": 7, + "d": 3 + }, + "mne/defaults.py": { + "a": 7, + "d": 6 + }, + "mne/io/ctf/info.py": { + "a": 2, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 4, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/pick.py": { + "a": 23, + "d": 14 + }, + "mne/io/tests/test_pick.py": { + "a": 14, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_csd.py": { + "a": 189, + "d": 0 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 195, + "d": 0 + }, + "mne/report.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 0, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 13, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6911.json b/doc/sphinxext/prs/6911.json new file mode 100644 index 00000000000..32159510919 --- /dev/null +++ b/doc/sphinxext/prs/6911.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "74ceb7dc4a4c71cd4749fc5af385d548eef57749", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/externals/jdcal.py": { + "a": 0, + "d": 116 + }, + "mne/io/tag.py": { + "a": 2, + "d": 2 + }, + "mne/io/write.py": { + "a": 2, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 96, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 23, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6912.json b/doc/sphinxext/prs/6912.json new file mode 100644 index 00000000000..46a1c81ce95 --- /dev/null +++ b/doc/sphinxext/prs/6912.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b8862d9d7549cdab9ed8d1e216f3bb8b268ac9e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 3, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 30 + }, + "mne/io/tests/test_digitization.py": { + "a": 0, + "d": 39 + }, + "mne/io/tests/test_raw.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6913.json b/doc/sphinxext/prs/6913.json new file mode 100644 index 00000000000..bf2e20e9475 --- /dev/null +++ b/doc/sphinxext/prs/6913.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "98ebb2e1333a491d40550e48fe2955992af7370e", + "authors": [ + { + "n": "Kaisu Lankinen", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6914.json b/doc/sphinxext/prs/6914.json new file mode 100644 index 00000000000..761f6cc549e --- /dev/null +++ b/doc/sphinxext/prs/6914.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ec1058fc03a4c5e4fb472916159a96398fe7b8cb", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6915.json b/doc/sphinxext/prs/6915.json new file mode 100644 index 00000000000..24abd2f2a8c --- /dev/null +++ b/doc/sphinxext/prs/6915.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "610ec2a50d7a0e17d1ae8a229793a51370dec81d", + "authors": [ + { + "n": "Fahimeh Mamashli", + "e": "fahimeh.mamashli@pfizer.com" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 2, + "d": 2 + }, + "doc/changes/devel/6915.newfeature.rst": { + "a": 1, + "d": 0 + }, + "examples/datasets/brainstorm_data.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/stim.py": { + "a": 56, + "d": 9 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 35, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6916.json b/doc/sphinxext/prs/6916.json new file mode 100644 index 00000000000..138d2bfbe62 --- /dev/null +++ b/doc/sphinxext/prs/6916.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a41dd08b695d02ffdbf0f37119728e8b5e4d1f26", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6918.json b/doc/sphinxext/prs/6918.json new file mode 100644 index 00000000000..c750b3e4d8e --- /dev/null +++ b/doc/sphinxext/prs/6918.json @@ -0,0 +1,211 @@ +{ + "merge_commit_sha": "e18950e9c8635997139c3cc4d940512a64dc5723", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 2, + "d": 2 + }, + "doc/changes/0.8.inc": { + "a": 1, + "d": 1 + }, + "doc/references.rst": { + "a": 2, + "d": 2 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 2, + "d": 2 + }, + "examples/io/plot_read_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 5, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/data/coil_def.dat": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 9, + "d": 6 + }, + "mne/forward/_field_interpolation.py": { + "a": 6, + "d": 0 + }, + "mne/forward/_lead_dots.py": { + "a": 5, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 5, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 2 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 6, + "d": 6 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/constants.py": { + "a": 2, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 2, + "d": 1 + }, + "mne/io/ctf/res4.py": { + "a": 2, + "d": 1 + }, + "mne/io/ctf_comp.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/matrix.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/tree.py": { + "a": 1, + "d": 1 + }, + "mne/io/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/selection.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 5, + "d": 2 + }, + "mne/surface.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/692.json b/doc/sphinxext/prs/692.json new file mode 100644 index 00000000000..bb9d049cdd1 --- /dev/null +++ b/doc/sphinxext/prs/692.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "ab82e2ba16163fe83b26f3e332a3c9d3761cd138", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/realtime/rt_feedback_client.py": { + "a": 116, + "d": 0 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 143, + "d": 0 + }, + "mne/realtime/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/realtime/mockclient.py": { + "a": 78, + "d": 2 + }, + "mne/realtime/stim_server_client.py": { + "a": 262, + "d": 0 + }, + "mne/realtime/tests/test_mockclient.py": { + "a": 26, + "d": 7 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 46, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6920.json b/doc/sphinxext/prs/6920.json new file mode 100644 index 00000000000..bed99f2474a --- /dev/null +++ b/doc/sphinxext/prs/6920.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3c5770522827205e65ed401dddf4abc9e50d776a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/morph.py": { + "a": 25, + "d": 32 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6921.json b/doc/sphinxext/prs/6921.json new file mode 100644 index 00000000000..a4228d3baab --- /dev/null +++ b/doc/sphinxext/prs/6921.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8fa427b8a5496115d9587fa6eaa82349c42bff69", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6922.json b/doc/sphinxext/prs/6922.json new file mode 100644 index 00000000000..7f0e8629a59 --- /dev/null +++ b/doc/sphinxext/prs/6922.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "249bdb60ffd68d325881282591a2e31485ef3455", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/proj.py": { + "a": 11, + "d": 59 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 4, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 94, + "d": 53 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6923.json b/doc/sphinxext/prs/6923.json new file mode 100644 index 00000000000..30f428d281f --- /dev/null +++ b/doc/sphinxext/prs/6923.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "475b55298715a2be04ad31c039e853456a3102a9", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 38, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6924.json b/doc/sphinxext/prs/6924.json new file mode 100644 index 00000000000..c55cca86267 --- /dev/null +++ b/doc/sphinxext/prs/6924.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "26a0d938a4a532615493718a3fbb689958ce2893", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 15, + "d": 12 + }, + "mne/coreg.py": { + "a": 6, + "d": 13 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 55, + "d": 93 + }, + "mne/tests/test_morph.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 11, + "d": 16 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 7, + "d": 26 + }, + "mne/viz/_3d.py": { + "a": 73, + "d": 69 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6925.json b/doc/sphinxext/prs/6925.json new file mode 100644 index 00000000000..d9198920fd2 --- /dev/null +++ b/doc/sphinxext/prs/6925.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1d4cc2749fccf4325ffd3e4efba594bcda5e5917", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/links.inc": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6926.json b/doc/sphinxext/prs/6926.json new file mode 100644 index 00000000000..1c0cdf72457 --- /dev/null +++ b/doc/sphinxext/prs/6926.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8e770f1de183940552b3b5415b1cbcb577c43e6b", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/preprocessing/_optical_density.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/tests/test_optical_density.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6927.json b/doc/sphinxext/prs/6927.json new file mode 100644 index 00000000000..d8a4cccf7b2 --- /dev/null +++ b/doc/sphinxext/prs/6927.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9d2fcbaf054fb168d56f8250a568afd3d9555d89", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 9, + "d": 7 + }, + "mne/tests/test_evoked.py": { + "a": 11, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6928.json b/doc/sphinxext/prs/6928.json new file mode 100644 index 00000000000..1175f22d2e3 --- /dev/null +++ b/doc/sphinxext/prs/6928.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a7db5f4924560344fa111b650dfbea5cd64c2452", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 4, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 3, + "d": 8 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 30, + "d": 13 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6930.json b/doc/sphinxext/prs/6930.json new file mode 100644 index 00000000000..dc522b8a7e9 --- /dev/null +++ b/doc/sphinxext/prs/6930.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "852a44536bfbfe2f35e43729c07d060902787991", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6931.json b/doc/sphinxext/prs/6931.json new file mode 100644 index 00000000000..51443ce7a57 --- /dev/null +++ b/doc/sphinxext/prs/6931.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "ee20e842efc6c950e0b93757b881c3fcf867c588", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/channel_interpolation.rst": { + "a": 2, + "d": 4 + }, + "doc/_includes/forward.rst": { + "a": 11, + "d": 3 + }, + "doc/_includes/inverse.rst": { + "a": 4, + "d": 6 + }, + "doc/_static/style.css": { + "a": 3, + "d": 1 + }, + "doc/bibliography.rst": { + "a": 8, + "d": 0 + }, + "doc/conf.py": { + "a": 11, + "d": 1 + }, + "doc/overview/datasets_index.rst": { + "a": 23, + "d": 29 + }, + "doc/overview/faq.rst": { + "a": 10, + "d": 14 + }, + "doc/overview/implementation.rst": { + "a": 2, + "d": 10 + }, + "doc/overview/index.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 1663, + "d": 0 + }, + "doc/references.rst": { + "a": 24, + "d": 99 + }, + "mne/beamformer/_dics.py": { + "a": 24, + "d": 39 + }, + "mne/filter.py": { + "a": 6, + "d": 7 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/plot_45_projectors_background.py": { + "a": 3, + "d": 4 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 3, + "d": 4 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 7, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6933.json b/doc/sphinxext/prs/6933.json new file mode 100644 index 00000000000..0911785fae1 --- /dev/null +++ b/doc/sphinxext/prs/6933.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cf005b93f60cd1f2aa1701e5069c892c9290c14c", + "authors": [ + { + "n": "Stefan Repplinger", + "e": "stefan.repplinger@posteo.net" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6934.json b/doc/sphinxext/prs/6934.json new file mode 100644 index 00000000000..c50862f6f29 --- /dev/null +++ b/doc/sphinxext/prs/6934.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ac4abee16e1f106055437f5b7feaa99786831a6f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 17, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 6 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 0, + "d": 1 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6936.json b/doc/sphinxext/prs/6936.json new file mode 100644 index 00000000000..47f19cfd760 --- /dev/null +++ b/doc/sphinxext/prs/6936.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3557ed1028ef8d572f3da294e23899376dc9e32d", + "authors": [ + { + "n": "Adonay Nunes", + "e": "adonay.s.nunes@gmail.com" + } + ], + "changes": { + "doc/overview/faq.rst": { + "a": 23, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6938.json b/doc/sphinxext/prs/6938.json new file mode 100644 index 00000000000..db004886c0c --- /dev/null +++ b/doc/sphinxext/prs/6938.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "ec63d985c7af85db32e01e559335bebe8d32a705", + "authors": [ + { + "n": "Christian O'Reilly", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Jona Sassenhagen", + "e": "jona.sassenhagen@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 107, + "d": 41 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 89, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6939.json b/doc/sphinxext/prs/6939.json new file mode 100644 index 00000000000..0e9caa212a2 --- /dev/null +++ b/doc/sphinxext/prs/6939.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "731973aaa2e7b3b7dac1e7cc75a482586a165151", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/flat.py": { + "a": 9, + "d": 3 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 19, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6940.json b/doc/sphinxext/prs/6940.json new file mode 100644 index 00000000000..1e6cff36562 --- /dev/null +++ b/doc/sphinxext/prs/6940.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "76d84953b7d3dcbf1e1cd0ba5ded6cb1c8d96fe6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/annotations.py": { + "a": 3, + "d": 3 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/flat.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 248, + "d": 96 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 18, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 88, + "d": 0 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 25, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6941.json b/doc/sphinxext/prs/6941.json new file mode 100644 index 00000000000..aa5bfb3b1b9 --- /dev/null +++ b/doc/sphinxext/prs/6941.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6601b7ce5cd2b8caa8dda9ac913ffadc62fda5bb", + "authors": [ + { + "n": "Darlene Heliokinde", + "e": "demetres.kostas@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 15, + "d": 8 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6942.json b/doc/sphinxext/prs/6942.json new file mode 100644 index 00000000000..01e01b0b29b --- /dev/null +++ b/doc/sphinxext/prs/6942.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "050665001dd8b5f30d9a718c378a4a3598eadc51", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/0.19.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6944.json b/doc/sphinxext/prs/6944.json new file mode 100644 index 00000000000..fbacb8ac195 --- /dev/null +++ b/doc/sphinxext/prs/6944.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "30fa09c049476ec2aa17811a0521420fca8cae49", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 14, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6946.json b/doc/sphinxext/prs/6946.json new file mode 100644 index 00000000000..91d740fa5f0 --- /dev/null +++ b/doc/sphinxext/prs/6946.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "47796f3349ddd84abec06f02c1bc263039dd30e8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_xdawn.py": { + "a": 11, + "d": 6 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 3, + "d": 4 + }, + "mne/utils/config.py": { + "a": 23, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6948.json b/doc/sphinxext/prs/6948.json new file mode 100644 index 00000000000..c96117bc9ee --- /dev/null +++ b/doc/sphinxext/prs/6948.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5f0430f81a40803a7d0ae72ec49d8bdcd9be4475", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 10, + "d": 0 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6949.json b/doc/sphinxext/prs/6949.json new file mode 100644 index 00000000000..a6b80e7ec51 --- /dev/null +++ b/doc/sphinxext/prs/6949.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "eb8d4a9ef5147a22b6945754be8bb651dd04b99e", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "tutorials/source-modeling/plot_dipole_fit.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/695.json b/doc/sphinxext/prs/695.json new file mode 100644 index 00000000000..7b4a55678c1 --- /dev/null +++ b/doc/sphinxext/prs/695.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a930fed6c8f00a89a939d0bac82feea69b7482e", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 11, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6954.json b/doc/sphinxext/prs/6954.json new file mode 100644 index 00000000000..be9c2870955 --- /dev/null +++ b/doc/sphinxext/prs/6954.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "91af33a3035b879d1efdaf0c3b0a7bc610ff7ae1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6955.json b/doc/sphinxext/prs/6955.json new file mode 100644 index 00000000000..23d04404f4e --- /dev/null +++ b/doc/sphinxext/prs/6955.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "19524d0fca2db68e0c358cd1eaf26f939a97ab7f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6959.json b/doc/sphinxext/prs/6959.json new file mode 100644 index 00000000000..3330140782b --- /dev/null +++ b/doc/sphinxext/prs/6959.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4dce919a08d40d8b505b3505e211086ea8a975b1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 8, + "d": 6 + }, + "mne/io/curry/curry.py": { + "a": 0, + "d": 1 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 8, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6960.json b/doc/sphinxext/prs/6960.json new file mode 100644 index 00000000000..94980175da5 --- /dev/null +++ b/doc/sphinxext/prs/6960.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "132a72023fec55d0e3e523aeecf1a854763f3738", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6961.json b/doc/sphinxext/prs/6961.json new file mode 100644 index 00000000000..0b143a5ca1e --- /dev/null +++ b/doc/sphinxext/prs/6961.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cf5698ce79c57d69ec17c41a0808d387dc388aef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/externals/tempita/__init__.py": { + "a": 7, + "d": 2 + }, + "mne/report.py": { + "a": 3, + "d": 2 + }, + "mne/utils/_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6962.json b/doc/sphinxext/prs/6962.json new file mode 100644 index 00000000000..54331e19f98 --- /dev/null +++ b/doc/sphinxext/prs/6962.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b4b03583bcb4cbb944a5bcff60f874e1e54db975", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 8, + "d": 6 + }, + "mne/io/curry/curry.py": { + "a": 0, + "d": 1 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 8, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6964.json b/doc/sphinxext/prs/6964.json new file mode 100644 index 00000000000..20d1ac89225 --- /dev/null +++ b/doc/sphinxext/prs/6964.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b0a89dbfc4aba6384fbad0f399fa9e856a0dac87", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6966.json b/doc/sphinxext/prs/6966.json new file mode 100644 index 00000000000..3d288056d69 --- /dev/null +++ b/doc/sphinxext/prs/6966.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b7f2cdb0f19424738f0d1781c610bf2f378910e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 17, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6967.json b/doc/sphinxext/prs/6967.json new file mode 100644 index 00000000000..d9a9905c241 --- /dev/null +++ b/doc/sphinxext/prs/6967.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e2befdb95131658dadce64d3d512b39499950cc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6969.json b/doc/sphinxext/prs/6969.json new file mode 100644 index 00000000000..5f0458c2ec7 --- /dev/null +++ b/doc/sphinxext/prs/6969.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fc451b57c8e4c3a0b273b25241288a517467400f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/697.json b/doc/sphinxext/prs/697.json new file mode 100644 index 00000000000..45fcb4d2d8b --- /dev/null +++ b/doc/sphinxext/prs/697.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "adab1d0c2008021ba40e3cd8f90e805f5d27c685", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 31, + "d": 18 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 6, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 96, + "d": 108 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 52, + "d": 32 + }, + "mne/viz.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6971.json b/doc/sphinxext/prs/6971.json new file mode 100644 index 00000000000..4c77cb78059 --- /dev/null +++ b/doc/sphinxext/prs/6971.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f37219e56099020848a38ca8ca1ad07b133b2b66", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 37, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6972.json b/doc/sphinxext/prs/6972.json new file mode 100644 index 00000000000..c7aeeac08c7 --- /dev/null +++ b/doc/sphinxext/prs/6972.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3e71210f034e53427fac25b8cda7c936c077f41d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_publication_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6975.json b/doc/sphinxext/prs/6975.json new file mode 100644 index 00000000000..3acacd219c1 --- /dev/null +++ b/doc/sphinxext/prs/6975.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a76fb4496310af8a080d27c06ca5746ec5cac834", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/data/extinction_coef.mat": { + "a": 0, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 0, + "d": 13 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_beer_lambert_law.py": { + "a": 137, + "d": 0 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 73, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6976.json b/doc/sphinxext/prs/6976.json new file mode 100644 index 00000000000..fc6b38fd1f2 --- /dev/null +++ b/doc/sphinxext/prs/6976.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0ffb606e36ce36a98db442de9a7784fce85c5928", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 18, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6977.json b/doc/sphinxext/prs/6977.json new file mode 100644 index 00000000000..c71de59ea4d --- /dev/null +++ b/doc/sphinxext/prs/6977.json @@ -0,0 +1,331 @@ +{ + "merge_commit_sha": "26d5a49c6e44da063e0b1f3b17fe8a72f2f53ac0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 3, + "d": 5 + }, + "mne/bem.py": { + "a": 12, + "d": 12 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 4 + }, + "mne/connectivity/effective.py": { + "a": 7, + "d": 7 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 2, + "d": 4 + }, + "mne/datasets/sample/sample.py": { + "a": 12, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/receptive_field.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/transformer.py": { + "a": 6, + "d": 6 + }, + "mne/dipole.py": { + "a": 0, + "d": 2 + }, + "mne/epochs.py": { + "a": 6, + "d": 8 + }, + "mne/event.py": { + "a": 17, + "d": 18 + }, + "mne/evoked.py": { + "a": 4, + "d": 4 + }, + "mne/filter.py": { + "a": 0, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 0, + "d": 2 + }, + "mne/gui/__init__.py": { + "a": 14, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 6, + "d": 7 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 6, + "d": 6 + }, + "mne/io/base.py": { + "a": 4, + "d": 4 + }, + "mne/io/brainvision/brainvision.py": { + "a": 0, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 41, + "d": 26 + }, + "mne/io/eeglab/eeglab.py": { + "a": 16, + "d": 17 + }, + "mne/io/egi/egi.py": { + "a": 4, + "d": 4 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 14, + "d": 18 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 2 + }, + "mne/io/open.py": { + "a": 5, + "d": 0 + }, + "mne/io/pick.py": { + "a": 4, + "d": 5 + }, + "mne/io/reference.py": { + "a": 33, + "d": 33 + }, + "mne/label.py": { + "a": 4, + "d": 7 + }, + "mne/minimum_norm/inverse.py": { + "a": 7, + "d": 7 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 11, + "d": 6 + }, + "mne/misc.py": { + "a": 5, + "d": 1 + }, + "mne/morph.py": { + "a": 5, + "d": 0 + }, + "mne/parallel.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/_optical_density.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 4, + "d": 4 + }, + "mne/selection.py": { + "a": 0, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 21, + "d": 21 + }, + "mne/source_space.py": { + "a": 4, + "d": 5 + }, + "mne/stats/cluster_level.py": { + "a": 6, + "d": 6 + }, + "mne/stats/multi_comp.py": { + "a": 0, + "d": 1 + }, + "mne/stats/parametric.py": { + "a": 0, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 0, + "d": 2 + }, + "mne/surface.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 42, + "d": 84 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stft.py": { + "a": 1, + "d": 11 + }, + "mne/time_frequency/_stockwell.py": { + "a": 8, + "d": 8 + }, + "mne/time_frequency/csd.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/multitaper.py": { + "a": 0, + "d": 3 + }, + "mne/time_frequency/psd.py": { + "a": 10, + "d": 10 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 12 + }, + "mne/utils/_logging.py": { + "a": 15, + "d": 10 + }, + "mne/utils/config.py": { + "a": 2, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 3 + }, + "mne/viz/backends/renderer.py": { + "a": 12, + "d": 13 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 5 + }, + "mne/viz/misc.py": { + "a": 8, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 7, + "d": 7 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 19, + "d": 14 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/698.json b/doc/sphinxext/prs/698.json new file mode 100644 index 00000000000..5701949f443 --- /dev/null +++ b/doc/sphinxext/prs/698.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a629463849b5e00bd01fc13e90d0f775ef52afd2", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/decoding/classifier.py": { + "a": 0, + "d": 73 + }, + "mne/decoding/csp.py": { + "a": 7, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6981.json b/doc/sphinxext/prs/6981.json new file mode 100644 index 00000000000..edb9dacc0c3 --- /dev/null +++ b/doc/sphinxext/prs/6981.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "309a63861723e073b133181dee857ea01c2178ed", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 41, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6982.json b/doc/sphinxext/prs/6982.json new file mode 100644 index 00000000000..065d092d5e1 --- /dev/null +++ b/doc/sphinxext/prs/6982.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e58b72fb3be7ff2b2ca7a7498e685fa599626b8e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/nirx/tests/test_nirx.py": { + "a": 8, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 17, + "d": 10 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 22, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6983.json b/doc/sphinxext/prs/6983.json new file mode 100644 index 00000000000..69fa79fbec1 --- /dev/null +++ b/doc/sphinxext/prs/6983.json @@ -0,0 +1,291 @@ +{ + "merge_commit_sha": "9e588fca0007e8bda43c8a1349d679aaa71c0768", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 6, + "d": 9 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 4 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 2 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 9, + "d": 9 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/search_light.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 3 + }, + "mne/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/event.py": { + "a": 9, + "d": 10 + }, + "mne/evoked.py": { + "a": 8, + "d": 8 + }, + "mne/filter.py": { + "a": 7, + "d": 6 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/open.py": { + "a": 2, + "d": 2 + }, + "mne/io/pick.py": { + "a": 25, + "d": 25 + }, + "mne/label.py": { + "a": 38, + "d": 49 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 7, + "d": 7 + }, + "mne/misc.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 4, + "d": 7 + }, + "mne/parallel.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/eog.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 38, + "d": 37 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 15, + "d": 15 + }, + "mne/simulation/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/source.py": { + "a": 15, + "d": 27 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 26, + "d": 31 + }, + "mne/stats/cluster_level.py": { + "a": 12, + "d": 12 + }, + "mne/stats/multi_comp.py": { + "a": 15, + "d": 16 + }, + "mne/stats/parametric.py": { + "a": 2, + "d": 2 + }, + "mne/stats/permutations.py": { + "a": 3, + "d": 5 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 8 + }, + "mne/time_frequency/_stft.py": { + "a": 12, + "d": 12 + }, + "mne/time_frequency/ar.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/multitaper.py": { + "a": 7, + "d": 7 + }, + "mne/time_frequency/psd.py": { + "a": 9, + "d": 9 + }, + "mne/time_frequency/tfr.py": { + "a": 10, + "d": 10 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 9 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 12, + "d": 12 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 6, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6985.json b/doc/sphinxext/prs/6985.json new file mode 100644 index 00000000000..cb9af09e3ba --- /dev/null +++ b/doc/sphinxext/prs/6985.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ea9137f55baf9ea96820a87098ce3313fd0f334c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 6, + "d": 0 + }, + "mne/evoked.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6986.json b/doc/sphinxext/prs/6986.json new file mode 100644 index 00000000000..41c01e31f42 --- /dev/null +++ b/doc/sphinxext/prs/6986.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "633689fada385d562beedacc1cafb518f5dca4ce", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 13, + "d": 11 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 51, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6989.json b/doc/sphinxext/prs/6989.json new file mode 100644 index 00000000000..8e1c322ceb5 --- /dev/null +++ b/doc/sphinxext/prs/6989.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f78c18fff5aeff05261278fdfb62c42c30736fd6", + "authors": [ + { + "n": "Victor Férat", + "e": "victor.ferat@live.Fr" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_setup_forward_model.py": { + "a": 121, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6991.json b/doc/sphinxext/prs/6991.json new file mode 100644 index 00000000000..e7c36de7574 --- /dev/null +++ b/doc/sphinxext/prs/6991.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "799c2ecb74604ad0ab432b6cee5739dca5c85ca6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6992.json b/doc/sphinxext/prs/6992.json new file mode 100644 index 00000000000..8cce4fe7117 --- /dev/null +++ b/doc/sphinxext/prs/6992.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0a45ec5f8a0fef47dea39739bd97618c310e6f34", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 72, + "d": 31 + }, + "mne/viz/tests/test_misc.py": { + "a": 8, + "d": 6 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 21, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6993.json b/doc/sphinxext/prs/6993.json new file mode 100644 index 00000000000..969d7327a18 --- /dev/null +++ b/doc/sphinxext/prs/6993.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "058f396660d763fcd175bedf917cd27d923f511d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 5 + }, + "mne/channels/interpolation.py": { + "a": 6, + "d": 10 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 26, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6996.json b/doc/sphinxext/prs/6996.json new file mode 100644 index 00000000000..4d5c113d02f --- /dev/null +++ b/doc/sphinxext/prs/6996.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "eb9fa1eb489491868453ed8a3c75440708547542", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 15, + "d": 14 + }, + "mne/epochs.py": { + "a": 2, + "d": 37 + }, + "mne/evoked.py": { + "a": 19, + "d": 57 + }, + "mne/filter.py": { + "a": 1, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 40, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 45, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6998.json b/doc/sphinxext/prs/6998.json new file mode 100644 index 00000000000..90b08f28bff --- /dev/null +++ b/doc/sphinxext/prs/6998.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9771649ba90e0129e779923d4891b80ccf0a9b6a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/6999.json b/doc/sphinxext/prs/6999.json new file mode 100644 index 00000000000..989ec811f6c --- /dev/null +++ b/doc/sphinxext/prs/6999.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d2e86a157608072374f898098bbd3ec64457f77b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/_beer_lambert_law.py": { + "a": 9, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7.json b/doc/sphinxext/prs/7.json new file mode 100644 index 00000000000..dd9a6d5b729 --- /dev/null +++ b/doc/sphinxext/prs/7.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "602a264c1ca3f037f7b531084123c1ac9a41da25", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 77, + "d": 11 + }, + "mne/tests/test_epochs.py": { + "a": 40, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/70.json b/doc/sphinxext/prs/70.json new file mode 100644 index 00000000000..5d343a5ae87 --- /dev/null +++ b/doc/sphinxext/prs/70.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "67524c1a03b36954407fbcdd65ca614905cd021a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 71, + "d": 0 + }, + "mne/forward.py": { + "a": 54, + "d": 5 + }, + "mne/minimum_norm/inverse.py": { + "a": 50, + "d": 36 + }, + "mne/mixed_norm/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/mixed_norm/debiasing.py": { + "a": 126, + "d": 0 + }, + "mne/mixed_norm/inverse.py": { + "a": 226, + "d": 0 + }, + "mne/mixed_norm/optim.py": { + "a": 315, + "d": 0 + }, + "mne/mixed_norm/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/mixed_norm/tests/test_debiasing.py": { + "a": 22, + "d": 0 + }, + "mne/mixed_norm/tests/test_inverse.py": { + "a": 48, + "d": 0 + }, + "mne/mixed_norm/tests/test_optim.py": { + "a": 36, + "d": 0 + }, + "mne/viz.py": { + "a": 29, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7000.json b/doc/sphinxext/prs/7000.json new file mode 100644 index 00000000000..9668ecbbb7b --- /dev/null +++ b/doc/sphinxext/prs/7000.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e0688805e90d30c74bb1fc17eff1343edada5ce0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 8, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 4 + }, + "mne/io/reference.py": { + "a": 26, + "d": 9 + }, + "mne/io/tests/test_constants.py": { + "a": 10, + "d": 5 + }, + "mne/io/tests/test_reference.py": { + "a": 16, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7001.json b/doc/sphinxext/prs/7001.json new file mode 100644 index 00000000000..10a81f3316e --- /dev/null +++ b/doc/sphinxext/prs/7001.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4cae8234a6d2c02982f1210907059ab944f8cc56", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7002.json b/doc/sphinxext/prs/7002.json new file mode 100644 index 00000000000..8592755e79a --- /dev/null +++ b/doc/sphinxext/prs/7002.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f6a7fa74289192030dbbc5bc23b0bb9f25180a14", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/defaults.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7004.json b/doc/sphinxext/prs/7004.json new file mode 100644 index 00000000000..820dd3684db --- /dev/null +++ b/doc/sphinxext/prs/7004.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6233196394b1712f65be858227d9846b534fa150", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/datasets/hf_sef/hf_sef.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7005.json b/doc/sphinxext/prs/7005.json new file mode 100644 index 00000000000..14013deda7b --- /dev/null +++ b/doc/sphinxext/prs/7005.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "54e07b3257ee44ae28c5253f47ef73909ef23bfd", + "authors": [ + { + "n": "Victor Férat", + "e": "victor.ferat@live.Fr" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_prepare_bem_model.py": { + "a": 55, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 21, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7008.json b/doc/sphinxext/prs/7008.json new file mode 100644 index 00000000000..c2f4f73f762 --- /dev/null +++ b/doc/sphinxext/prs/7008.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6ca8ec3786d864f53ab379f59e1f6668afd1ff5e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_make_report.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 2, + "d": 7 + }, + "mne/report.py": { + "a": 20, + "d": 27 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 49, + "d": 16 + }, + "mne/viz/backends/_pyvista.py": { + "a": 31, + "d": 8 + }, + "mne/viz/backends/renderer.py": { + "a": 14, + "d": 5 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7010.json b/doc/sphinxext/prs/7010.json new file mode 100644 index 00000000000..0c6d626688d --- /dev/null +++ b/doc/sphinxext/prs/7010.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2f787cecacf203cb037cd96d5840916a8a260a7c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 0, + "d": 3 + }, + "mne/bem.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7011.json b/doc/sphinxext/prs/7011.json new file mode 100644 index 00000000000..8425b084730 --- /dev/null +++ b/doc/sphinxext/prs/7011.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fbd75733e1d57f3fdbb65b01500de18d05185fc5", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 413, + "d": 0 + }, + "tutorials/epochs/plot_object_epochs.py": { + "a": 0, + "d": 167 + }, + "tutorials/epochs/plot_visualize_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_20_rejecting_bad_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/raw/plot_20_event_arrays.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7014.json b/doc/sphinxext/prs/7014.json new file mode 100644 index 00000000000..56a5d784a86 --- /dev/null +++ b/doc/sphinxext/prs/7014.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "23e10c19da1803159e24709a6dfdb9cde9413798", + "authors": [ + { + "n": "Adonay Nunes", + "e": "adonay.s.nunes@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 13, + "d": 6 + }, + "mne/tests/test_annotations.py": { + "a": 39, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7016.json b/doc/sphinxext/prs/7016.json new file mode 100644 index 00000000000..ff9d94eb6ff --- /dev/null +++ b/doc/sphinxext/prs/7016.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bb8cd6953757f051a9781f38198e5986511d7329", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 109, + "d": 29 + }, + "mne/io/tests/test_meas_info.py": { + "a": 48, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7017.json b/doc/sphinxext/prs/7017.json new file mode 100644 index 00000000000..881f96f4700 --- /dev/null +++ b/doc/sphinxext/prs/7017.json @@ -0,0 +1,243 @@ +{ + "merge_commit_sha": "42948689ea8c3fdb926f36a211d4d83fb9382789", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 2, + "d": 2 + }, + "examples/datasets/spm_faces_dataset.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_comparison.py": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 2, + "d": 1 + }, + "mne/commands/mne_coreg.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_maxfilter.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_prepare_bem_model.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_report.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_show_fiff.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_watershed_bem.py": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_what.py": { + "a": 1, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 0 + }, + "mne/commands/utils.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/_fake/_fake.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/misc/_misc.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 2, + "d": 0 + }, + "mne/datasets/somato/somato.py": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 1, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 2, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 0 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 5, + "d": 5 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 0 + }, + "mne/utils/check.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 4, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "setup.cfg": { + "a": 1, + "d": 5 + }, + "setup.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_45_projectors_background.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7018.json b/doc/sphinxext/prs/7018.json new file mode 100644 index 00000000000..3bcbf884bd9 --- /dev/null +++ b/doc/sphinxext/prs/7018.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "25f93eb8063936c8db2fd18d35c89724d65a21fd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 18, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/fnirs_motor/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 30, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 8, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 186, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7019.json b/doc/sphinxext/prs/7019.json new file mode 100644 index 00000000000..8a38554b759 --- /dev/null +++ b/doc/sphinxext/prs/7019.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b6f82798af05b1f457663179d64771341f66fd86", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 17, + "d": 21 + }, + "azure-pipelines.yml": { + "a": 69, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/702.json b/doc/sphinxext/prs/702.json new file mode 100644 index 00000000000..510630bdbb1 --- /dev/null +++ b/doc/sphinxext/prs/702.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ac3216cf5ae91af1b0d2ee24b32e675792a45403", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 2, + "d": 1 + }, + "bin/mne_compute_proj_eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 17, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7022.json b/doc/sphinxext/prs/7022.json new file mode 100644 index 00000000000..1e67df2b967 --- /dev/null +++ b/doc/sphinxext/prs/7022.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4106d6dfa3bf79d7d39e3b68edab4ae8cd67c245", + "authors": [ + { + "n": "mathurinm", + "e": "mathurin.massias@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7023.json b/doc/sphinxext/prs/7023.json new file mode 100644 index 00000000000..d88c81730e7 --- /dev/null +++ b/doc/sphinxext/prs/7023.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d37f45790ec28f85b342321e41ec50c0e43100b5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7025.json b/doc/sphinxext/prs/7025.json new file mode 100644 index 00000000000..7cfebd4691b --- /dev/null +++ b/doc/sphinxext/prs/7025.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0e09618b63085867204479bf2571a561d72f437c", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 17, + "d": 6 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7026.json b/doc/sphinxext/prs/7026.json new file mode 100644 index 00000000000..aad0838fb89 --- /dev/null +++ b/doc/sphinxext/prs/7026.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c80badd8939423d9c5dd342a60135bd8ead6cc13", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 30, + "d": 9 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7028.json b/doc/sphinxext/prs/7028.json new file mode 100644 index 00000000000..11916ab7ebd --- /dev/null +++ b/doc/sphinxext/prs/7028.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e04a8d127bbef82a96dd6659e1754759418b10cb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7029.json b/doc/sphinxext/prs/7029.json new file mode 100644 index 00000000000..c41861ccfff --- /dev/null +++ b/doc/sphinxext/prs/7029.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "b12a220d2b6d5b5c89b05e5536be28bb21cfc361", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 14, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 71, + "d": 26 + }, + "mne/channels/tests/test_channels.py": { + "a": 49, + "d": 3 + }, + "mne/cov.py": { + "a": 25, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 39, + "d": 1 + }, + "mne/forward/tests/test_forward.py": { + "a": 12, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 31, + "d": 1 + }, + "mne/io/pick.py": { + "a": 30, + "d": 8 + }, + "mne/io/tests/test_meas_info.py": { + "a": 11, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 30, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 13, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 69, + "d": 35 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 11, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/703.json b/doc/sphinxext/prs/703.json new file mode 100644 index 00000000000..a7e54e8b770 --- /dev/null +++ b/doc/sphinxext/prs/703.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7e037bb095e77528571d07a22879d2415c8a73bb", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fiff/constants.py": { + "a": 5, + "d": 1 + }, + "mne/fiff/pick.py": { + "a": 30, + "d": 3 + }, + "mne/viz.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7030.json b/doc/sphinxext/prs/7030.json new file mode 100644 index 00000000000..5e711bab32f --- /dev/null +++ b/doc/sphinxext/prs/7030.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "965cffb205b3e5d11df8ac81c019716799125ded", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 17, + "d": 0 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7031.json b/doc/sphinxext/prs/7031.json new file mode 100644 index 00000000000..9b8a1d98d09 --- /dev/null +++ b/doc/sphinxext/prs/7031.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "34b6ed1b6807d90669b2b42080f5826851ae21bd", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 25, + "d": 44 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 8, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7032.json b/doc/sphinxext/prs/7032.json new file mode 100644 index 00000000000..7b8eb145eb6 --- /dev/null +++ b/doc/sphinxext/prs/7032.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bd8616679dbf757930e34cb4b5ddc6d3d242c57a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 98, + "d": 86 + }, + "mne/fixes.py": { + "a": 27, + "d": 10 + }, + "mne/utils/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 0, + "d": 86 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7033.json b/doc/sphinxext/prs/7033.json new file mode 100644 index 00000000000..4bfd18bedec --- /dev/null +++ b/doc/sphinxext/prs/7033.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a2a0e2c763715d174398eb137270045bf21232c6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/_eloreta.py": { + "a": 9, + "d": 12 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 0, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7035.json b/doc/sphinxext/prs/7035.json new file mode 100644 index 00000000000..c95cf3ab912 --- /dev/null +++ b/doc/sphinxext/prs/7035.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e165f22dacd2a89c9181c2f2f97806eeea11dd37", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_beer_lambert_law.py": { + "a": 16, + "d": 3 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7036.json b/doc/sphinxext/prs/7036.json new file mode 100644 index 00000000000..83a5eb2fe7f --- /dev/null +++ b/doc/sphinxext/prs/7036.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fb98a50de7c03fa8a95456c05f33fdc743ed66e7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/704.json b/doc/sphinxext/prs/704.json new file mode 100644 index 00000000000..bb403fff4ad --- /dev/null +++ b/doc/sphinxext/prs/704.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2c1ddf0983d424c99c9d3cc001832d79d90a478b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 14, + "d": 14 + }, + "mne/preprocessing/ica.py": { + "a": 18, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7040.json b/doc/sphinxext/prs/7040.json new file mode 100644 index 00000000000..841691ab031 --- /dev/null +++ b/doc/sphinxext/prs/7040.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a26834b14ba0e1a9f67f6fa831657fd1f682a278", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/matrix.py": { + "a": 2, + "d": 4 + }, + "mne/io/open.py": { + "a": 2, + "d": 2 + }, + "mne/io/tag.py": { + "a": 19, + "d": 99 + }, + "mne/source_space.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7041.json b/doc/sphinxext/prs/7041.json new file mode 100644 index 00000000000..35279722b7d --- /dev/null +++ b/doc/sphinxext/prs/7041.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3ee879a9b6ff78f592e4a2d330ee2e6382fab143", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/search_light.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_base.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 10, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 2, + "d": 2 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7042.json b/doc/sphinxext/prs/7042.json new file mode 100644 index 00000000000..4a55d6ccc57 --- /dev/null +++ b/doc/sphinxext/prs/7042.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e84f78a7f01bd889746d3ec3c09a8d0db61c56f0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_resolution_metrics.py": { + "a": 28, + "d": 18 + }, + "examples/inverse/plot_resolution_metrics_eegmeg.py": { + "a": 27, + "d": 17 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 0, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7043.json b/doc/sphinxext/prs/7043.json new file mode 100644 index 00000000000..b5a8bddadd1 --- /dev/null +++ b/doc/sphinxext/prs/7043.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "e1e7cc1f891308261dc98144890392111bf49afa", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 6, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 26, + "d": 10 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 8, + "d": 15 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 14, + "d": 12 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 10, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7044.json b/doc/sphinxext/prs/7044.json new file mode 100644 index 00000000000..c60a427be25 --- /dev/null +++ b/doc/sphinxext/prs/7044.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "63ca985352d493ecb3a14df753dcbc8bc33b8e67", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7045.json b/doc/sphinxext/prs/7045.json new file mode 100644 index 00000000000..87380a85892 --- /dev/null +++ b/doc/sphinxext/prs/7045.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "793fdaa0bf5897b26bc6720ae9806a186635eb3c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/705.json b/doc/sphinxext/prs/705.json new file mode 100644 index 00000000000..9752f3ffa13 --- /dev/null +++ b/doc/sphinxext/prs/705.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f822aff865eec167ae7683cb30cb3006a601f721", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_stim.py": { + "a": 8, + "d": 8 + }, + "mne/source_estimate.py": { + "a": 13, + "d": 8 + }, + "mne/tests/test_viz.py": { + "a": 1, + "d": 1 + }, + "mne/viz.py": { + "a": 69, + "d": 57 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7050.json b/doc/sphinxext/prs/7050.json new file mode 100644 index 00000000000..84f039da36d --- /dev/null +++ b/doc/sphinxext/prs/7050.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "76f198a6dafde5930341c9b8d093cddc0d1f2593", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7051.json b/doc/sphinxext/prs/7051.json new file mode 100644 index 00000000000..d65eaa973ac --- /dev/null +++ b/doc/sphinxext/prs/7051.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ea33a0aa7adc758eb9e2f7899c03d561eec2b707", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/inverse_sparse/_gamma_map.py": { + "a": 3, + "d": 3 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 11, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7054.json b/doc/sphinxext/prs/7054.json new file mode 100644 index 00000000000..0c214844f98 --- /dev/null +++ b/doc/sphinxext/prs/7054.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "87756588a69514970ff5f24295773e9829e2d9ed", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 27, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7055.json b/doc/sphinxext/prs/7055.json new file mode 100644 index 00000000000..a4d67e86e48 --- /dev/null +++ b/doc/sphinxext/prs/7055.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "b906de881c150850eb0d36082c79add6970b2ca4", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 2 + }, + "doc/python_reference.rst": { + "a": 13, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 6, + "d": 6 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 11, + "d": 0 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 7, + "d": 34 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 55, + "d": 0 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_optical_density.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 6, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7059.json b/doc/sphinxext/prs/7059.json new file mode 100644 index 00000000000..c3853dbdde6 --- /dev/null +++ b/doc/sphinxext/prs/7059.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "67d03cc0d0554b0d939ddd8ec2d2665add84d4ed", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7060.json b/doc/sphinxext/prs/7060.json new file mode 100644 index 00000000000..fccccea4246 --- /dev/null +++ b/doc/sphinxext/prs/7060.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "6dda707b0714618c8cbf4484b3aa780b048b4122", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/inverse.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 5, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 52, + "d": 24 + }, + "mne/viz/_brain/_brain.py": { + "a": 141, + "d": 21 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 35, + "d": 16 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 11, + "d": 6 + }, + "mne/viz/_brain/utils.py": { + "a": 0, + "d": 120 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 7, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 104, + "d": 15 + }, + "mne/viz/backends/base_renderer.py": { + "a": 12, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 6, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 7 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7061.json b/doc/sphinxext/prs/7061.json new file mode 100644 index 00000000000..5dc97e7a77a --- /dev/null +++ b/doc/sphinxext/prs/7061.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ef7d7c1899911b821312cc5be1558cc2d6ae4816", + "authors": [ + { + "n": "ezemikulan", + "e": "e.mikulan@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7063.json b/doc/sphinxext/prs/7063.json new file mode 100644 index 00000000000..6f7128ace36 --- /dev/null +++ b/doc/sphinxext/prs/7063.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "3091cac2e780a3baf1763fc888da22660e5554a2", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 11, + "d": 11 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 60, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7064.json b/doc/sphinxext/prs/7064.json new file mode 100644 index 00000000000..b8cb1bf9964 --- /dev/null +++ b/doc/sphinxext/prs/7064.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f784fde5d4ee738eff603bdc3823cfc3ca12b5bd", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 3, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7066.json b/doc/sphinxext/prs/7066.json new file mode 100644 index 00000000000..b763e014e23 --- /dev/null +++ b/doc/sphinxext/prs/7066.json @@ -0,0 +1,227 @@ +{ + "merge_commit_sha": "1325c1abffa0db8cf57c9cd6410a4c92fcb0586c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 7, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 5, + "d": 4 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 1, + "d": 3 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 31, + "d": 36 + }, + "examples/forward/plot_left_cerebellum_volume_source.py": { + "a": 4, + "d": 4 + }, + "examples/io/plot_read_proj.py": { + "a": 4, + "d": 32 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 6, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 2 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 2, + "d": 1 + }, + "mne/bem.py": { + "a": 6, + "d": 8 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 13, + "d": 11 + }, + "mne/channels/layout.py": { + "a": 31, + "d": 21 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 6 + }, + "mne/channels/tests/test_layout.py": { + "a": 27, + "d": 22 + }, + "mne/datasets/eegbci/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 20, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 12, + "d": 40 + }, + "mne/defaults.py": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 6, + "d": 4 + }, + "mne/evoked.py": { + "a": 16, + "d": 11 + }, + "mne/externals/doccer.py": { + "a": 5, + "d": 1 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 0 + }, + "mne/io/proj.py": { + "a": 13, + "d": 9 + }, + "mne/preprocessing/ica.py": { + "a": 13, + "d": 21 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 3, + "d": 2 + }, + "mne/source_space.py": { + "a": 30, + "d": 20 + }, + "mne/tests/test_source_space.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 29 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 44, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 109, + "d": 62 + }, + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 9, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 48, + "d": 38 + }, + "mne/viz/ica.py": { + "a": 8, + "d": 7 + }, + "mne/viz/montage.py": { + "a": 7, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 15, + "d": 12 + }, + "mne/viz/tests/test_topomap.py": { + "a": 31, + "d": 52 + }, + "mne/viz/topomap.py": { + "a": 356, + "d": 586 + }, + "mne/viz/utils.py": { + "a": 37, + "d": 38 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 150, + "d": 125 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 23, + "d": 11 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 3, + "d": 3 + }, + "tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 5, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/707.json b/doc/sphinxext/prs/707.json new file mode 100644 index 00000000000..8a0e8fcccdc --- /dev/null +++ b/doc/sphinxext/prs/707.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f8ddaeca19ae20a4d73038de41408f9e7e13ec2d", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/parallel.py": { + "a": 49, + "d": 7 + }, + "mne/stats/cluster_level.py": { + "a": 97, + "d": 24 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 28, + "d": 12 + }, + "mne/utils.py": { + "a": 42, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7070.json b/doc/sphinxext/prs/7070.json new file mode 100644 index 00000000000..424c7986375 --- /dev/null +++ b/doc/sphinxext/prs/7070.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "d495b13bb0b4bef05118e3bd75ce6860af641337", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "vpeterson", + "e": "victoriapeterson09@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 21, + "d": 0 + }, + "examples/decoding/plot_ssd_spatial_filters.py": { + "a": 138, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/ssd.py": { + "a": 288, + "d": 0 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 233, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7074.json b/doc/sphinxext/prs/7074.json new file mode 100644 index 00000000000..93f977980a3 --- /dev/null +++ b/doc/sphinxext/prs/7074.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "50cc4f28f8ed0dfae8e78139ddabe93a80b6d1f9", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7076.json b/doc/sphinxext/prs/7076.json new file mode 100644 index 00000000000..e80293173b9 --- /dev/null +++ b/doc/sphinxext/prs/7076.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "01d964670e502f497670e7653512dc05689a59d4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 0 + }, + "bin/mne": { + "a": 0, + "d": 5 + }, + "doc/install/mne_python.rst": { + "a": 24, + "d": 5 + }, + "mne/gui/_backend.py": { + "a": 17, + "d": 34 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_marker_gui.py": { + "a": 2, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 19, + "d": 0 + }, + "mne/utils/config.py": { + "a": 4, + "d": 6 + }, + "setup.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7078.json b/doc/sphinxext/prs/7078.json new file mode 100644 index 00000000000..7e1c5825494 --- /dev/null +++ b/doc/sphinxext/prs/7078.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1e8c30d4969050a9313d720ba1feb41226a4e303", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7079.json b/doc/sphinxext/prs/7079.json new file mode 100644 index 00000000000..72252ff0ea1 --- /dev/null +++ b/doc/sphinxext/prs/7079.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0c4c3b710eda33a2333b9dd0b6c66c03fc65146a", + "authors": [ + { + "n": "Christian O'Reilly", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 0 + }, + "mne/viz/_brain/surface.py": { + "a": 5, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 25, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7084.json b/doc/sphinxext/prs/7084.json new file mode 100644 index 00000000000..1b602aae2d1 --- /dev/null +++ b/doc/sphinxext/prs/7084.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ec6df570e59d05ab4c5d35d74f9c153ba1fc7af0", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 13, + "d": 11 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7085.json b/doc/sphinxext/prs/7085.json new file mode 100644 index 00000000000..baa240ec064 --- /dev/null +++ b/doc/sphinxext/prs/7085.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5de659d2aa1dca4a0529e022036c9ff2a40ecc33", + "authors": [ + { + "n": "Britta Westner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan.mccloy@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/references.bib": { + "a": 11, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 0, + "d": 125 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 0, + "d": 132 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 274, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7086.json b/doc/sphinxext/prs/7086.json new file mode 100644 index 00000000000..18be3ec1bc6 --- /dev/null +++ b/doc/sphinxext/prs/7086.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a62d596be99949f104360b4ba794105d35bf94c7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/misc.py": { + "a": 16, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7087.json b/doc/sphinxext/prs/7087.json new file mode 100644 index 00000000000..5945b431d18 --- /dev/null +++ b/doc/sphinxext/prs/7087.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "61b058d9b7387713bf6f165c69230cd475ded4e4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Alex", + "e": "aprockhill206@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 182, + "d": 64 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7089.json b/doc/sphinxext/prs/7089.json new file mode 100644 index 00000000000..54d12d1d199 --- /dev/null +++ b/doc/sphinxext/prs/7089.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3459c84cc7cc76c37356dbf163c7768f03c81182", + "authors": [ + { + "n": "Chun-Hui Li", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/base.py": { + "a": 7, + "d": 2 + }, + "mne/decoding/tests/test_base.py": { + "a": 105, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/709.json b/doc/sphinxext/prs/709.json new file mode 100644 index 00000000000..267ed50ff2c --- /dev/null +++ b/doc/sphinxext/prs/709.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b94101eb1fe2b393ad076921e0ba9d5830dd818d", + "authors": [ + { + "n": "Mads Jensen", + "e": null + } + ], + "changes": { + "mne/preprocessing/maxfilter.py": { + "a": 16, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7090.json b/doc/sphinxext/prs/7090.json new file mode 100644 index 00000000000..5fcfe3a1cf3 --- /dev/null +++ b/doc/sphinxext/prs/7090.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f7e7bd3d460a609f22cca7166ddef9dd5311acea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7091.json b/doc/sphinxext/prs/7091.json new file mode 100644 index 00000000000..3cfe5856f1e --- /dev/null +++ b/doc/sphinxext/prs/7091.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6861595ea11e30db2a4cb1262d90518f1a01bfcf", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 10, + "d": 6 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7095.json b/doc/sphinxext/prs/7095.json new file mode 100644 index 00000000000..2e455fabe6e --- /dev/null +++ b/doc/sphinxext/prs/7095.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "1689df918f18a44fd4602b691ea59c781a0c8763", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 3, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_psf_ctf_vertices.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_psf_ctf_vertices_lcmv.py": { + "a": 137, + "d": 0 + }, + "examples/inverse/plot_resolution_metrics.py": { + "a": 5, + "d": 5 + }, + "examples/inverse/plot_resolution_metrics_eegmeg.py": { + "a": 23, + "d": 17 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 104, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 94, + "d": 0 + }, + "mne/cov.py": { + "a": 11, + "d": 4 + }, + "mne/minimum_norm/__init__.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 6, + "d": 3 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 12, + "d": 10 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 7, + "d": 7 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7096.json b/doc/sphinxext/prs/7096.json new file mode 100644 index 00000000000..319abe911f7 --- /dev/null +++ b/doc/sphinxext/prs/7096.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "cce705bcfa34226f0f3a1c0abaed94d565539095", + "authors": [ + { + "n": "Adonay Nunes", + "e": "adonay.s.nunes@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_artifact_detection.py": { + "a": 81, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 212, + "d": 0 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 49, + "d": 0 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7097.json b/doc/sphinxext/prs/7097.json new file mode 100644 index 00000000000..0493481f650 --- /dev/null +++ b/doc/sphinxext/prs/7097.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "76f38bdd351627504618516be29cf03947096a85", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/bug_report.md": { + "a": 26, + "d": 30 + }, + "CONTRIBUTING.rst": { + "a": 9, + "d": 19 + }, + "doc/_static/diagrams/git_setup.dot": { + "a": 64, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 60, + "d": 39 + }, + "doc/install/mne_python.rst": { + "a": 12, + "d": 1 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 49, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/710.json b/doc/sphinxext/prs/710.json new file mode 100644 index 00000000000..ebf30a17516 --- /dev/null +++ b/doc/sphinxext/prs/710.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ea0ddc4ac91b067b2200b374e5b860e7065862b2", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/constants.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 37, + "d": 15 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7100.json b/doc/sphinxext/prs/7100.json new file mode 100644 index 00000000000..9e6692109dd --- /dev/null +++ b/doc/sphinxext/prs/7100.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c59c5d959bd13715dc0ae7801ef95e7bc846bb4b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7102.json b/doc/sphinxext/prs/7102.json new file mode 100644 index 00000000000..083662dd538 --- /dev/null +++ b/doc/sphinxext/prs/7102.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "27e3f1d75f3c8605272d9071b5d5a2d77a7d1f22", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/io/plot_read_events.py": { + "a": 2, + "d": 2 + }, + "mne/event.py": { + "a": 2, + "d": 2 + }, + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 12, + "d": 0 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 210, + "d": 0 + }, + "tutorials/epochs/plot_epoching_and_averaging.py": { + "a": 0, + "d": 166 + }, + "tutorials/epochs/plot_visualize_epochs.py": { + "a": 0, + "d": 94 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 0, + "d": 3 + }, + "tutorials/evoked/plot_visualize_evoked.py": { + "a": 2, + "d": 1 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7103.json b/doc/sphinxext/prs/7103.json new file mode 100644 index 00000000000..2f032091307 --- /dev/null +++ b/doc/sphinxext/prs/7103.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "701312c2f66dbe6910d0845169d259afdfbd814a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 37, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7104.json b/doc/sphinxext/prs/7104.json new file mode 100644 index 00000000000..357b2a332a0 --- /dev/null +++ b/doc/sphinxext/prs/7104.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "538597485f8f69b343690cdc5911665b4b98c131", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 3, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 6, + "d": 3 + }, + "mne/io/ctf/markers.py": { + "a": 16, + "d": 10 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7105.json b/doc/sphinxext/prs/7105.json new file mode 100644 index 00000000000..fc7d38f1335 --- /dev/null +++ b/doc/sphinxext/prs/7105.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "7410696b8897c8782ae293e1c453a43b20197acd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 1 + }, + ".travis.yml": { + "a": 3, + "d": 22 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 4 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/install/mne_python.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/faq.rst": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_sys_info.py": { + "a": 29, + "d": 0 + }, + "mne/commands/tests/test_commands.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7106.json b/doc/sphinxext/prs/7106.json new file mode 100644 index 00000000000..c588f27da12 --- /dev/null +++ b/doc/sphinxext/prs/7106.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "fedd892e3cfd2e4243061187e164bf10aafe9ffc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/matlab.rst": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 3 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7108.json b/doc/sphinxext/prs/7108.json new file mode 100644 index 00000000000..af1a22d2e44 --- /dev/null +++ b/doc/sphinxext/prs/7108.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "15b45a74c762f559f1c25199a67b290a5f18a26e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/coreg.py": { + "a": 4, + "d": 1 + }, + "mne/label.py": { + "a": 73, + "d": 25 + }, + "mne/source_space.py": { + "a": 62, + "d": 43 + }, + "mne/tests/test_coreg.py": { + "a": 46, + "d": 30 + }, + "mne/tests/test_label.py": { + "a": 19, + "d": 11 + }, + "mne/tests/test_source_space.py": { + "a": 38, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7109.json b/doc/sphinxext/prs/7109.json new file mode 100644 index 00000000000..722b427c011 --- /dev/null +++ b/doc/sphinxext/prs/7109.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "de3e189f24cc029db9dab047f46c7deede6a255a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7110.json b/doc/sphinxext/prs/7110.json new file mode 100644 index 00000000000..358fc227380 --- /dev/null +++ b/doc/sphinxext/prs/7110.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "38973795d3c9c9ba694d74d710ce7a48c053a45b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/rank.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7111.json b/doc/sphinxext/prs/7111.json new file mode 100644 index 00000000000..0a21c7be024 --- /dev/null +++ b/doc/sphinxext/prs/7111.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "da9cd19a9b65e792e2523f159820549ec6082f9d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 26, + "d": 21 + }, + "mne/viz/tests/test_misc.py": { + "a": 28, + "d": 8 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7113.json b/doc/sphinxext/prs/7113.json new file mode 100644 index 00000000000..9de91a2284e --- /dev/null +++ b/doc/sphinxext/prs/7113.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "bb2beaf266254e1c2870e7a4b3cbc53d7cb72214", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 22, + "d": 10 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_base.py": { + "a": 4, + "d": 20 + }, + "mne/epochs.py": { + "a": 4, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7117.json b/doc/sphinxext/prs/7117.json new file mode 100644 index 00000000000..695c37ea7ff --- /dev/null +++ b/doc/sphinxext/prs/7117.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9216386526b76e4a6c39d06065b04a1077aa7bdb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 14, + "d": 7 + }, + "mne/tests/test_chpi.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7118.json b/doc/sphinxext/prs/7118.json new file mode 100644 index 00000000000..e3944cab1f5 --- /dev/null +++ b/doc/sphinxext/prs/7118.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c8e1759c75d1cbec43d19b6e49cbb43c8f01d51c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.19.inc": { + "a": 7, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 14, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 2 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7119.json b/doc/sphinxext/prs/7119.json new file mode 100644 index 00000000000..0a98fe62f6b --- /dev/null +++ b/doc/sphinxext/prs/7119.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f8358f7bf9920088313c08bd71d7e241f29cf7e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 16, + "d": 5 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/712.json b/doc/sphinxext/prs/712.json new file mode 100644 index 00000000000..3f09a6aa932 --- /dev/null +++ b/doc/sphinxext/prs/712.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5983b5c44b94a24f292636f1f38f7cd18596052b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7120.json b/doc/sphinxext/prs/7120.json new file mode 100644 index 00000000000..d71e117e239 --- /dev/null +++ b/doc/sphinxext/prs/7120.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a8e853ea3328d2d9de437d71161edc4fa729f204", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7121.json b/doc/sphinxext/prs/7121.json new file mode 100644 index 00000000000..b9632aaafb0 --- /dev/null +++ b/doc/sphinxext/prs/7121.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f1c048a763d6415d0743a248cd79bab089616f67", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7122.json b/doc/sphinxext/prs/7122.json new file mode 100644 index 00000000000..9381717e462 --- /dev/null +++ b/doc/sphinxext/prs/7122.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "22dfbb5c55f09b88f595dd5c6d83dac71335df97", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.19.inc": { + "a": 9, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 14, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 2 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 4 + }, + "mne/io/tests/test_meas_info.py": { + "a": 14, + "d": 2 + }, + "mne/label.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7124.json b/doc/sphinxext/prs/7124.json new file mode 100644 index 00000000000..55916304374 --- /dev/null +++ b/doc/sphinxext/prs/7124.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "b3f22f9e044443e3b448b9e38b1234d9f2dc9380", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 11, + "d": 1 + }, + "mne/annotations.py": { + "a": 129, + "d": 110 + }, + "mne/channels/channels.py": { + "a": 37, + "d": 4 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 8, + "d": 6 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 17 + }, + "mne/io/base.py": { + "a": 38, + "d": 42 + }, + "mne/io/brainvision/brainvision.py": { + "a": 5, + "d": 10 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 10, + "d": 9 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf/info.py": { + "a": 2, + "d": 3 + }, + "mne/io/ctf/markers.py": { + "a": 3, + "d": 4 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 8, + "d": 6 + }, + "mne/io/edf/edf.py": { + "a": 23, + "d": 21 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 2, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 7, + "d": 6 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 52, + "d": 75 + }, + "mne/io/tests/test_meas_info.py": { + "a": 48, + "d": 29 + }, + "mne/io/tests/test_raw.py": { + "a": 11, + "d": 16 + }, + "mne/report.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/evoked.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 99, + "d": 44 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 4 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 4, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 29, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 3 + }, + "tutorials/raw/plot_30_annotate_raw.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7125.json b/doc/sphinxext/prs/7125.json new file mode 100644 index 00000000000..42b052141a3 --- /dev/null +++ b/doc/sphinxext/prs/7125.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ba810eb69b4ebac07cb8fb756a6b2b5ceee901dc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 4 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7126.json b/doc/sphinxext/prs/7126.json new file mode 100644 index 00000000000..737e7c0a2e2 --- /dev/null +++ b/doc/sphinxext/prs/7126.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9a7b5e58e52a4d4007a942fa21a72d95a7124d8c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 30, + "d": 13 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7128.json b/doc/sphinxext/prs/7128.json new file mode 100644 index 00000000000..c0cbf592ab8 --- /dev/null +++ b/doc/sphinxext/prs/7128.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "558802eff2315bf60948c8087c9e983c577435fb", + "authors": [ + { + "n": "Henrich Kolkhorst", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 27, + "d": 2 + }, + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 8, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 97, + "d": 3 + }, + "mne/preprocessing/xdawn.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7129.json b/doc/sphinxext/prs/7129.json new file mode 100644 index 00000000000..d219b09a2cb --- /dev/null +++ b/doc/sphinxext/prs/7129.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "49cb69d72a57124709b5ed104b9d6255942cd939", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 9, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 22, + "d": 10 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/mixin.py": { + "a": 14, + "d": 2 + }, + "mne/decoding/search_light.py": { + "a": 8, + "d": 9 + }, + "mne/decoding/time_frequency.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 7, + "d": 7 + }, + "mne/dipole.py": { + "a": 3, + "d": 6 + }, + "mne/epochs.py": { + "a": 14, + "d": 10 + }, + "mne/event.py": { + "a": 0, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 10, + "d": 11 + }, + "mne/io/base.py": { + "a": 19, + "d": 16 + }, + "mne/io/proj.py": { + "a": 8, + "d": 7 + }, + "mne/label.py": { + "a": 6, + "d": 4 + }, + "mne/morph.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 20, + "d": 17 + }, + "mne/report.py": { + "a": 3, + "d": 10 + }, + "mne/simulation/source.py": { + "a": 3, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 43, + "d": 30 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 37, + "d": 7 + }, + "mne/time_frequency/csd.py": { + "a": 7, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 11, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/713.json b/doc/sphinxext/prs/713.json new file mode 100644 index 00000000000..9be1e4eeedb --- /dev/null +++ b/doc/sphinxext/prs/713.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8b5d827b0d9dd49fbab53efc8f05b67e8e07bc8e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 17, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7130.json b/doc/sphinxext/prs/7130.json new file mode 100644 index 00000000000..0e352a9e128 --- /dev/null +++ b/doc/sphinxext/prs/7130.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "74750eb88a3068790ad62400b309d11aefaa0c82", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "CODE_OF_CONDUCT.md": { + "a": 0, + "d": 46 + }, + "CONTRIBUTING.rst": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7131.json b/doc/sphinxext/prs/7131.json new file mode 100644 index 00000000000..652b7b55f9d --- /dev/null +++ b/doc/sphinxext/prs/7131.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9d2abf35b6983ceaf0016d68b7c63d49e8b7a093", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7132.json b/doc/sphinxext/prs/7132.json new file mode 100644 index 00000000000..f8284e35d03 --- /dev/null +++ b/doc/sphinxext/prs/7132.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "71a854d8eafe21676e545d8286b51422f34b26c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7133.json b/doc/sphinxext/prs/7133.json new file mode 100644 index 00000000000..51438f44add --- /dev/null +++ b/doc/sphinxext/prs/7133.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "68a7757c074e5fa51fdd13cca5bfd1559c0e2658", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 19, + "d": 14 + }, + "mne/fixes.py": { + "a": 14, + "d": 2 + }, + "mne/forward/_compute_forward.py": { + "a": 93, + "d": 59 + }, + "mne/surface.py": { + "a": 38, + "d": 40 + }, + "mne/transforms.py": { + "a": 13, + "d": 7 + }, + "mne/utils/_testing.py": { + "a": 5, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7135.json b/doc/sphinxext/prs/7135.json new file mode 100644 index 00000000000..d787960a30e --- /dev/null +++ b/doc/sphinxext/prs/7135.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a41ca39fcda5784cf2cd099de0f240b24b0f8cfa", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 40, + "d": 24 + }, + "mne/io/tests/test_meas_info.py": { + "a": 7, + "d": 0 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7138.json b/doc/sphinxext/prs/7138.json new file mode 100644 index 00000000000..06a005602d8 --- /dev/null +++ b/doc/sphinxext/prs/7138.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a3a71b7d6606e8393f6e26f6a3c934b3084b5e03", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/overview/datasets_index.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/utils/mixin.py": { + "a": 9, + "d": 8 + }, + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 6, + "d": 3 + }, + "tutorials/epochs/plot_30_epochs_metadata.py": { + "a": 177, + "d": 0 + }, + "tutorials/epochs/plot_metadata_epochs.py": { + "a": 0, + "d": 154 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/714.json b/doc/sphinxext/prs/714.json new file mode 100644 index 00000000000..984543fee29 --- /dev/null +++ b/doc/sphinxext/prs/714.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c3531e5524c66c711275ebb7f2d3a720a31e36b5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 36, + "d": 19 + }, + "mne/tests/test_label.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7142.json b/doc/sphinxext/prs/7142.json new file mode 100644 index 00000000000..72d137ad487 --- /dev/null +++ b/doc/sphinxext/prs/7142.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8ba88e2388d2ba5ff8e997401bfeac5b255d88a7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 4 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7144.json b/doc/sphinxext/prs/7144.json new file mode 100644 index 00000000000..5725c9efe69 --- /dev/null +++ b/doc/sphinxext/prs/7144.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "02173651e9986977899888d383319fe10c43eba8", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7145.json b/doc/sphinxext/prs/7145.json new file mode 100644 index 00000000000..e24d61ca259 --- /dev/null +++ b/doc/sphinxext/prs/7145.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "bbb57464b299824c5417beb3e9fcd5e9805f293f", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 4, + "d": 3 + }, + "mne/viz/tests/test_topomap.py": { + "a": 17, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7146.json b/doc/sphinxext/prs/7146.json new file mode 100644 index 00000000000..8b718e50519 --- /dev/null +++ b/doc/sphinxext/prs/7146.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "412ce469f55528b9657847fd3aea78aa2b1709c2", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7147.json b/doc/sphinxext/prs/7147.json new file mode 100644 index 00000000000..9bb6965cd66 --- /dev/null +++ b/doc/sphinxext/prs/7147.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "6c73272e1f057e4cdfb3e43063036f3d459c5d27", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 19, + "d": 10 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 4, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 6 + }, + "mne/viz/backends/base_renderer.py": { + "a": 12, + "d": 4 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7148.json b/doc/sphinxext/prs/7148.json new file mode 100644 index 00000000000..0b0cb6d9f5e --- /dev/null +++ b/doc/sphinxext/prs/7148.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9b03152ebb9cc312e418f496d9c929ed01b902d2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/715.json b/doc/sphinxext/prs/715.json new file mode 100644 index 00000000000..a3eae34fbfa --- /dev/null +++ b/doc/sphinxext/prs/715.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "368e62b5361caa0514d7f8988b9139118a6d0e53", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/time_frequency/csd.py": { + "a": 5, + "d": 0 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7150.json b/doc/sphinxext/prs/7150.json new file mode 100644 index 00000000000..325c8a8090e --- /dev/null +++ b/doc/sphinxext/prs/7150.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cd0eff12535880cd7a6551ad4ceeff771ea8b3a9", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7151.json b/doc/sphinxext/prs/7151.json new file mode 100644 index 00000000000..e02f21eda3d --- /dev/null +++ b/doc/sphinxext/prs/7151.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "724878c5f2baa2c54ce6eaf1bdb39a47d82475b7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7153.json b/doc/sphinxext/prs/7153.json new file mode 100644 index 00000000000..13615b29bfb --- /dev/null +++ b/doc/sphinxext/prs/7153.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "9cb664813f04be66d758c4ad6321a2a58924bfc8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 173, + "d": 22 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 228, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 34, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 35, + "d": 6 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7155.json b/doc/sphinxext/prs/7155.json new file mode 100644 index 00000000000..f99ee24f9f3 --- /dev/null +++ b/doc/sphinxext/prs/7155.json @@ -0,0 +1,151 @@ +{ + "merge_commit_sha": "361fa5fde83c192f705b9a0dd792858237c4b0b1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 4, + "d": 7 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 4, + "d": 5 + }, + "mne/externals/tqdm/__init__.py": { + "a": 12, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/__init__.py": { + "a": 40, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/__main__.py": { + "a": 2, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_main.py": { + "a": 7, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_monitor.py": { + "a": 99, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_tqdm.py": { + "a": 7, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_tqdm_gui.py": { + "a": 7, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_tqdm_notebook.py": { + "a": 7, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_tqdm_pandas.py": { + "a": 46, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_utils.py": { + "a": 6, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/_version.py": { + "a": 59, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/auto.py": { + "a": 6, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/autonotebook.py": { + "a": 18, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/cli.py": { + "a": 228, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/contrib/__init__.py": { + "a": 10, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/gui.py": { + "a": 321, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/notebook.py": { + "a": 261, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/std.py": { + "a": 1464, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/tqdm.1": { + "a": 255, + "d": 0 + }, + "mne/externals/tqdm/_tqdm/utils.py": { + "a": 345, + "d": 0 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 3 + }, + "mne/parallel.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/flat.py": { + "a": 26, + "d": 26 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/utils/fetching.py": { + "a": 5, + "d": 16 + }, + "mne/utils/progressbar.py": { + "a": 73, + "d": 167 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 33, + "d": 23 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7156.json b/doc/sphinxext/prs/7156.json new file mode 100644 index 00000000000..1b2d9eaa45d --- /dev/null +++ b/doc/sphinxext/prs/7156.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "7de4420d5dd79866719b52d65b88f41e3ab6f96c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 17, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 112, + "d": 32 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 207, + "d": 50 + }, + "mne/proj.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_proj.py": { + "a": 0, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7157.json b/doc/sphinxext/prs/7157.json new file mode 100644 index 00000000000..3cf1907e34b --- /dev/null +++ b/doc/sphinxext/prs/7157.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "47d1cd50392fbb6f57b7df17573080fb8f8e4b99", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 7, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7158.json b/doc/sphinxext/prs/7158.json new file mode 100644 index 00000000000..d975bf906c9 --- /dev/null +++ b/doc/sphinxext/prs/7158.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "642f4e13fedd1879bf6cad6c6d13ed28ebe68cd9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_transforms.py": { + "a": 12, + "d": 1 + }, + "mne/transforms.py": { + "a": 44, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/716.json b/doc/sphinxext/prs/716.json new file mode 100644 index 00000000000..3fa3fb2fc7a --- /dev/null +++ b/doc/sphinxext/prs/716.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "608e33bd310045ce30725e573aa6f0cd7b091b9a", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 11, + "d": 9 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 79, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 161, + "d": 77 + }, + "mne/beamformer/_lcmv.py": { + "a": 60, + "d": 42 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 94, + "d": 24 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 52, + "d": 28 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 40, + "d": 21 + }, + "mne/viz.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7160.json b/doc/sphinxext/prs/7160.json new file mode 100644 index 00000000000..1f7a602fa14 --- /dev/null +++ b/doc/sphinxext/prs/7160.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "eba30878532e1851b74bf99009c1582ed4591e16", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/channel_interpolation.rst": { + "a": 2, + "d": 2 + }, + "doc/_includes/inverse.rst": { + "a": 226, + "d": 129 + }, + "doc/_includes/ssp.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/implementation.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/matlab.rst": { + "a": 1, + "d": 1 + }, + "doc/references.bib": { + "a": 11, + "d": 12 + }, + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 15 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 12, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7163.json b/doc/sphinxext/prs/7163.json new file mode 100644 index 00000000000..fde1e678840 --- /dev/null +++ b/doc/sphinxext/prs/7163.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "241e11944787476271d45bb9c6bc946bf731104a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7164.json b/doc/sphinxext/prs/7164.json new file mode 100644 index 00000000000..8ecc4e460ef --- /dev/null +++ b/doc/sphinxext/prs/7164.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c4d352adc86cff78aca589d7bae80694847d023b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 11, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7165.json b/doc/sphinxext/prs/7165.json new file mode 100644 index 00000000000..782354c68dd --- /dev/null +++ b/doc/sphinxext/prs/7165.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6f1c79f1cfaa237e4182d283950706bbe7c841f3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7166.json b/doc/sphinxext/prs/7166.json new file mode 100644 index 00000000000..bbf59c0e9ee --- /dev/null +++ b/doc/sphinxext/prs/7166.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "96d6eef7f050de2640c5129c92c130ac70ac1071", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 14, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7167.json b/doc/sphinxext/prs/7167.json new file mode 100644 index 00000000000..993f0d94670 --- /dev/null +++ b/doc/sphinxext/prs/7167.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "13d52a7b347aeb75ff2d5977b6e7d6c6aeeb5831", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7168.json b/doc/sphinxext/prs/7168.json new file mode 100644 index 00000000000..fb6fc45f68b --- /dev/null +++ b/doc/sphinxext/prs/7168.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4701f2ba93c4ba53519ca9edc3f8c84a77f7a26c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 39, + "d": 20 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 22 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 14 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 3, + "d": 5 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 52, + "d": 48 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7171.json b/doc/sphinxext/prs/7171.json new file mode 100644 index 00000000000..89ba8ff3cc3 --- /dev/null +++ b/doc/sphinxext/prs/7171.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1403b54396618888e4475a3b32026ebe4a0c7adc", + "authors": [ + { + "n": "Nicolas Barascud", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/annotations.py": { + "a": 100, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 79, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7172.json b/doc/sphinxext/prs/7172.json new file mode 100644 index 00000000000..7cf24328b02 --- /dev/null +++ b/doc/sphinxext/prs/7172.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0b02ac91ae23fe50459bc11343c6aa2cae6d7c9c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7173.json b/doc/sphinxext/prs/7173.json new file mode 100644 index 00000000000..d6bef57b580 --- /dev/null +++ b/doc/sphinxext/prs/7173.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1672c60776fb71eeccc19ddd80a404f8763cc432", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 2, + "d": 3 + }, + "mne/fixes.py": { + "a": 34, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7174.json b/doc/sphinxext/prs/7174.json new file mode 100644 index 00000000000..65b4cef8b0a --- /dev/null +++ b/doc/sphinxext/prs/7174.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c437ed0bbcef3b6bf3a8beab2221ab5bff47722f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/reset-syntax.css": { + "a": 0, + "d": 9 + }, + "doc/_static/style.css": { + "a": 19, + "d": 1 + }, + "doc/_templates/layout.html": { + "a": 0, + "d": 2 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7175.json b/doc/sphinxext/prs/7175.json new file mode 100644 index 00000000000..de5bee19d50 --- /dev/null +++ b/doc/sphinxext/prs/7175.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2b75319adaa5711efcb5f5a8d6eca9484c359138", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 4 + }, + "mne/channels/montage.py": { + "a": 48, + "d": 46 + }, + "mne/channels/tests/test_montage.py": { + "a": 27, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7176.json b/doc/sphinxext/prs/7176.json new file mode 100644 index 00000000000..c12c1c01a11 --- /dev/null +++ b/doc/sphinxext/prs/7176.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d4aa1d48f1fa7c1199553101989d6311075cad73", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "doc/install/mne_python.rst": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7177.json b/doc/sphinxext/prs/7177.json new file mode 100644 index 00000000000..4b38f7d28a2 --- /dev/null +++ b/doc/sphinxext/prs/7177.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "69d271e942bf1397ccb0e8832873831a98cdbe83", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 5, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 9, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7178.json b/doc/sphinxext/prs/7178.json new file mode 100644 index 00000000000..b3195f578f0 --- /dev/null +++ b/doc/sphinxext/prs/7178.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "7a825b6bf8575d6440b262d4b03f0fa2407fa363", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 14, + "d": 18 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 4, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 2 + }, + "mne/viz/montage.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/718.json b/doc/sphinxext/prs/718.json new file mode 100644 index 00000000000..83a396307b7 --- /dev/null +++ b/doc/sphinxext/prs/718.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6cd7d77fa2fef767f7514a884c3778a721aa870d", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/event.py": { + "a": 56, + "d": 36 + }, + "mne/tests/test_event.py": { + "a": 21, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7180.json b/doc/sphinxext/prs/7180.json new file mode 100644 index 00000000000..6cdf487085f --- /dev/null +++ b/doc/sphinxext/prs/7180.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9f3b76f0c9ef71e997b522492d28d0a09c76c4bd", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 16, + "d": 11 + }, + "tutorials/epochs/plot_40_epochs_to_data_frame.py": { + "a": 155, + "d": 0 + }, + "tutorials/epochs/plot_epochs_to_data_frame.py": { + "a": 0, + "d": 252 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7181.json b/doc/sphinxext/prs/7181.json new file mode 100644 index 00000000000..808c6603f33 --- /dev/null +++ b/doc/sphinxext/prs/7181.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ce15d3f918587dc885e54e444244b2becbd5b75a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 16, + "d": 2 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7182.json b/doc/sphinxext/prs/7182.json new file mode 100644 index 00000000000..162063aad08 --- /dev/null +++ b/doc/sphinxext/prs/7182.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "c13590ab42ea50ece798c5b6fe7874a0796c2b4e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 43 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 8, + "d": 10 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 4 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 46, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7183.json b/doc/sphinxext/prs/7183.json new file mode 100644 index 00000000000..a43a646f059 --- /dev/null +++ b/doc/sphinxext/prs/7183.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "67d5c87d7ab7fc6b49a68c75f3edbe49b80e68de", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/annotations.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7184.json b/doc/sphinxext/prs/7184.json new file mode 100644 index 00000000000..b9fb6981404 --- /dev/null +++ b/doc/sphinxext/prs/7184.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1ec8f801893c05ff1f5dfd01a296d271cbeacd32", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_montage.py": { + "a": 4, + "d": 2 + }, + "tutorials/misc/plot_ecog.py": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/plot_creating_data_structures.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7186.json b/doc/sphinxext/prs/7186.json new file mode 100644 index 00000000000..cb8c11a0338 --- /dev/null +++ b/doc/sphinxext/prs/7186.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e49718902918d61108cc356c69a51c5e066909b3", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 3, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7187.json b/doc/sphinxext/prs/7187.json new file mode 100644 index 00000000000..c3d29da7741 --- /dev/null +++ b/doc/sphinxext/prs/7187.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0527c80feeb1c81de0ca3931fc581c7f6f4b2004", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7188.json b/doc/sphinxext/prs/7188.json new file mode 100644 index 00000000000..7ef96443ee3 --- /dev/null +++ b/doc/sphinxext/prs/7188.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "740638148806a3d73214ab6505a57a571f23204a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 58, + "d": 57 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 90, + "d": 16 + }, + "mne/viz/_brain/colormap.py": { + "a": 48, + "d": 33 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 99, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/719.json b/doc/sphinxext/prs/719.json new file mode 100644 index 00000000000..71539edc21a --- /dev/null +++ b/doc/sphinxext/prs/719.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "72f5de257385b9b5f423ea4243ac6a7130306496", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 12, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 35, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7190.json b/doc/sphinxext/prs/7190.json new file mode 100644 index 00000000000..06a331263ac --- /dev/null +++ b/doc/sphinxext/prs/7190.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9d512217cf367f0b71d69602c91839dfd96298cf", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7191.json b/doc/sphinxext/prs/7191.json new file mode 100644 index 00000000000..f6412632f2b --- /dev/null +++ b/doc/sphinxext/prs/7191.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a2143313006d078bfeef0c69172aaef749b0263", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/utils.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7193.json b/doc/sphinxext/prs/7193.json new file mode 100644 index 00000000000..e47663bcbfd --- /dev/null +++ b/doc/sphinxext/prs/7193.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6f0847ef6ac98348a313bef03406e1e6b462fca4", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/io/proj.py": { + "a": 15, + "d": 9 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 40, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 39, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7194.json b/doc/sphinxext/prs/7194.json new file mode 100644 index 00000000000..c1e0b8cd40a --- /dev/null +++ b/doc/sphinxext/prs/7194.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bf94f340c6f8da8f48e23af3cec7f853fd6cdca3", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 25, + "d": 9 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 26, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7196.json b/doc/sphinxext/prs/7196.json new file mode 100644 index 00000000000..7d8fdf86e38 --- /dev/null +++ b/doc/sphinxext/prs/7196.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "44b44b1d324c95a36d51e76e7fc369d191118b3b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7197.json b/doc/sphinxext/prs/7197.json new file mode 100644 index 00000000000..c0445a27234 --- /dev/null +++ b/doc/sphinxext/prs/7197.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "16503f0169f8c7246ea14d56003bb09b0ea2c298", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7199.json b/doc/sphinxext/prs/7199.json new file mode 100644 index 00000000000..957d280b474 --- /dev/null +++ b/doc/sphinxext/prs/7199.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f86caafd75130fa974519b9bc859a43289d98f90", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/72.json b/doc/sphinxext/prs/72.json new file mode 100644 index 00000000000..7214230a7bc --- /dev/null +++ b/doc/sphinxext/prs/72.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "16afe1b8739c8d0f692b796f416eaf40b937b534", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 66, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/720.json b/doc/sphinxext/prs/720.json new file mode 100644 index 00000000000..c1ee00aecbf --- /dev/null +++ b/doc/sphinxext/prs/720.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e5c12b35d7a34ec5ebac73f96e7d5865e83a9883", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 85, + "d": 0 + }, + "mne/fiff/tests/data/test_chpi_raw_hp.txt": { + "a": 5, + "d": 0 + }, + "mne/fiff/tests/data/test_chpi_raw_sss.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 21, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7200.json b/doc/sphinxext/prs/7200.json new file mode 100644 index 00000000000..f9226960483 --- /dev/null +++ b/doc/sphinxext/prs/7200.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a35f8cf303de97ebb9c90cbabe67ec70c61f3985", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 25, + "d": 6 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 104, + "d": 32 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 21, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7202.json b/doc/sphinxext/prs/7202.json new file mode 100644 index 00000000000..0b0788f8ae0 --- /dev/null +++ b/doc/sphinxext/prs/7202.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "68bed7e43adb1145dad4c3c0a769fa8785dcbe6a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 3 + }, + ".travis.yml": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7205.json b/doc/sphinxext/prs/7205.json new file mode 100644 index 00000000000..d960cf8e447 --- /dev/null +++ b/doc/sphinxext/prs/7205.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "634e30df5fdc644684a15c43330a2abfdb5e800c", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/channels/data/neighbors/KIT-UMD-4_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/io/kit/constants.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7206.json b/doc/sphinxext/prs/7206.json new file mode 100644 index 00000000000..9fc4825cfd6 --- /dev/null +++ b/doc/sphinxext/prs/7206.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "d2a5660db534c2d67af514b3d89b08abb67aa696", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 62, + "d": 3 + }, + "mne/evoked.py": { + "a": 53, + "d": 4 + }, + "mne/io/base.py": { + "a": 59, + "d": 189 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 31, + "d": 11 + }, + "mne/source_estimate.py": { + "a": 58, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 53, + "d": 28 + }, + "mne/tests/test_evoked.py": { + "a": 36, + "d": 13 + }, + "mne/tests/test_source_estimate.py": { + "a": 32, + "d": 15 + }, + "mne/utils/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/utils/check.py": { + "a": 29, + "d": 7 + }, + "mne/utils/dataframe.py": { + "a": 88, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 61, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7207.json b/doc/sphinxext/prs/7207.json new file mode 100644 index 00000000000..23e2600dbab --- /dev/null +++ b/doc/sphinxext/prs/7207.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b377cfa96033bcff3f2d69c95d9a88eac6bbafe4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7214.json b/doc/sphinxext/prs/7214.json new file mode 100644 index 00000000000..b43329b80e2 --- /dev/null +++ b/doc/sphinxext/prs/7214.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3b304eb2871e523d6b183b0603b02149841160c8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7215.json b/doc/sphinxext/prs/7215.json new file mode 100644 index 00000000000..129da8402bc --- /dev/null +++ b/doc/sphinxext/prs/7215.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "b15366d5d30805775f5849f223d058e2a59ec37f", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 2, + "d": 32 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 65, + "d": 0 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 30, + "d": 0 + }, + "mne/preprocessing/tests/test_scalp_coupling_index.py": { + "a": 60, + "d": 0 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7218.json b/doc/sphinxext/prs/7218.json new file mode 100644 index 00000000000..33a10b44dd1 --- /dev/null +++ b/doc/sphinxext/prs/7218.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "32291b774f786b358e5cb63d7c485fa21c5a7301", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 3, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 52, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 40, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7219.json b/doc/sphinxext/prs/7219.json new file mode 100644 index 00000000000..928aaa74af2 --- /dev/null +++ b/doc/sphinxext/prs/7219.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2b579cb676d317d8c1ec1817583f62087bd76887", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 120, + "d": 110 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 116, + "d": 45 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 20, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7220.json b/doc/sphinxext/prs/7220.json new file mode 100644 index 00000000000..c331895aee9 --- /dev/null +++ b/doc/sphinxext/prs/7220.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "726086f29612d0fb9911196ca5127610f9e57dce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 16, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7222.json b/doc/sphinxext/prs/7222.json new file mode 100644 index 00000000000..461c0626c78 --- /dev/null +++ b/doc/sphinxext/prs/7222.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "01b90ceef3f7714d63d65e79dec860e93b68b6c0", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7223.json b/doc/sphinxext/prs/7223.json new file mode 100644 index 00000000000..8023ffca236 --- /dev/null +++ b/doc/sphinxext/prs/7223.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "82087a212796e9b607493391d03da636f57f0d20", + "authors": [ + { + "n": "FuTe Wong", + "e": "zuxfoucault@gmail.com" + } + ], + "changes": { + "doc/_includes/bem_model.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7224.json b/doc/sphinxext/prs/7224.json new file mode 100644 index 00000000000..ace70a5ceef --- /dev/null +++ b/doc/sphinxext/prs/7224.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1ef6ebeccb4361da5ea18a4a35f731cd9a1bfc74", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 0 + }, + "examples/visualization/plot_montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 19, + "d": 28 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7226.json b/doc/sphinxext/prs/7226.json new file mode 100644 index 00000000000..218279e3a29 --- /dev/null +++ b/doc/sphinxext/prs/7226.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a1dea207a0c0cb00f255a705311329822a15ef1a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 20, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7227.json b/doc/sphinxext/prs/7227.json new file mode 100644 index 00000000000..063e75fb015 --- /dev/null +++ b/doc/sphinxext/prs/7227.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "f6e036d8191b00fc407cdcbc4965f71d84a9878d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 30, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 144, + "d": 68 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 16, + "d": 11 + }, + "mne/viz/backends/_pyvista.py": { + "a": 20, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 37, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/723.json b/doc/sphinxext/prs/723.json new file mode 100644 index 00000000000..bab5ef3606b --- /dev/null +++ b/doc/sphinxext/prs/723.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "73b5ba03e827aab59346109cc0995e95a555b041", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7231.json b/doc/sphinxext/prs/7231.json new file mode 100644 index 00000000000..62ca975d38e --- /dev/null +++ b/doc/sphinxext/prs/7231.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "291d8529819a454b26d4056f1f3fc069c4e8c351", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7236.json b/doc/sphinxext/prs/7236.json new file mode 100644 index 00000000000..9b0dbdbdfd8 --- /dev/null +++ b/doc/sphinxext/prs/7236.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "328e026795b787cbe6acd7ad2a8ea1535bad28a3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7237.json b/doc/sphinxext/prs/7237.json new file mode 100644 index 00000000000..6841b3f290a --- /dev/null +++ b/doc/sphinxext/prs/7237.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c45aec9bf94cadc97f7ccaaec0a6c06a12750f1c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/overview/faq.rst": { + "a": 76, + "d": 2 + }, + "mne/bem.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7238.json b/doc/sphinxext/prs/7238.json new file mode 100644 index 00000000000..b9cc944ddb9 --- /dev/null +++ b/doc/sphinxext/prs/7238.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2147495a620c0e6008ff7d34dcfdf5490f8d323a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/724.json b/doc/sphinxext/prs/724.json new file mode 100644 index 00000000000..cb7d522b212 --- /dev/null +++ b/doc/sphinxext/prs/724.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c5d3f9c23aa91342d0be1f6eb70bbf174be649e6", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 36, + "d": 50 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 16, + "d": 2 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7240.json b/doc/sphinxext/prs/7240.json new file mode 100644 index 00000000000..3647cf68287 --- /dev/null +++ b/doc/sphinxext/prs/7240.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "89f453882476d3f701642ed46271e2147c0edc92", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7242.json b/doc/sphinxext/prs/7242.json new file mode 100644 index 00000000000..ac1d0adb4a3 --- /dev/null +++ b/doc/sphinxext/prs/7242.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1bab4a5a0637a04769272dd3a0f75e1661f63f1d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 4 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7245.json b/doc/sphinxext/prs/7245.json new file mode 100644 index 00000000000..f63c4ca01b2 --- /dev/null +++ b/doc/sphinxext/prs/7245.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8b3ef2ddddd0e1e351454c987df13cfc7be36f6b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/dipole.py": { + "a": 140, + "d": 44 + }, + "mne/tests/test_dipole.py": { + "a": 65, + "d": 3 + }, + "setup.cfg": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7246.json b/doc/sphinxext/prs/7246.json new file mode 100644 index 00000000000..a4d3e46e8ec --- /dev/null +++ b/doc/sphinxext/prs/7246.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "17c5670b0f52243ee9e250f93250fd61a0d8edac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/overview/index.rst": { + "a": 1, + "d": 0 + }, + "doc/overview/roadmap.rst": { + "a": 177, + "d": 0 + }, + "doc/sphinxext/gh_substitutions.py": { + "a": 30, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7247.json b/doc/sphinxext/prs/7247.json new file mode 100644 index 00000000000..e46338a7024 --- /dev/null +++ b/doc/sphinxext/prs/7247.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "1a36309bcef356f06db8f51fd206afb1b4a30ec5", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 6, + "d": 3 + }, + "mne/source_space.py": { + "a": 4, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 32, + "d": 6 + }, + "mne/viz/_brain/_brain.py": { + "a": 27, + "d": 25 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 234, + "d": 8 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 117, + "d": 25 + }, + "mne/viz/backends/_pyvista.py": { + "a": 46, + "d": 17 + }, + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7249.json b/doc/sphinxext/prs/7249.json new file mode 100644 index 00000000000..eaeca5b07b3 --- /dev/null +++ b/doc/sphinxext/prs/7249.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "91811222a9f271f6f3c366998a43f8512a2f2c10", + "authors": [ + { + "n": "FuTe Wong", + "e": "zuxfoucault@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7252.json b/doc/sphinxext/prs/7252.json new file mode 100644 index 00000000000..47e6b18dbbf --- /dev/null +++ b/doc/sphinxext/prs/7252.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c2029d684cdc12daa7542bf9a02a1907ecfef179", + "authors": [ + { + "n": "Sebastian Major", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 11, + "d": 6 + }, + "mne/io/brainvision/tests/data/test_mixed_lowpass.vhdr": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7253.json b/doc/sphinxext/prs/7253.json new file mode 100644 index 00000000000..4a00bffd49e --- /dev/null +++ b/doc/sphinxext/prs/7253.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0e664011756a5e2f59a2dc6cc7c0327f3eaa835d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 3, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7256.json b/doc/sphinxext/prs/7256.json new file mode 100644 index 00000000000..4972249c9a0 --- /dev/null +++ b/doc/sphinxext/prs/7256.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "837d63ba9f9a43ee229306c508cad4f2eb2320cc", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7257.json b/doc/sphinxext/prs/7257.json new file mode 100644 index 00000000000..e4b3707ca68 --- /dev/null +++ b/doc/sphinxext/prs/7257.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "7f6120bf020ec39dfb58a5f172f5a9c1984093cd", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 195, + "d": 36 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 141, + "d": 9 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 27, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7258.json b/doc/sphinxext/prs/7258.json new file mode 100644 index 00000000000..0ce5d6973af --- /dev/null +++ b/doc/sphinxext/prs/7258.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "96911d657bee2077a04b8e35d32c816ef74135f3", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "examples/visualization/plot_evoked_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7261.json b/doc/sphinxext/prs/7261.json new file mode 100644 index 00000000000..7045118e118 --- /dev/null +++ b/doc/sphinxext/prs/7261.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "634f11d9c220b3725f9eb72cd9cb254f4fc4d5cb", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7262.json b/doc/sphinxext/prs/7262.json new file mode 100644 index 00000000000..e7a19c643e7 --- /dev/null +++ b/doc/sphinxext/prs/7262.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7b1918b0c46a8e5a8d902276552672c20ec8f2d6", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/curry/curry.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 25, + "d": 71 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 3, + "d": 7 + }, + "mne/io/utils.py": { + "a": 0, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7263.json b/doc/sphinxext/prs/7263.json new file mode 100644 index 00000000000..9a9b91f3a2d --- /dev/null +++ b/doc/sphinxext/prs/7263.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bd3d30cb1448a32fe9d6fe824c0d1265054f4bd1", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 10 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 47, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7264.json b/doc/sphinxext/prs/7264.json new file mode 100644 index 00000000000..df7b8495600 --- /dev/null +++ b/doc/sphinxext/prs/7264.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d977b5a87d8d6aaf0bcc5368fd0836c2c14d7c66", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/search_light.py": { + "a": 4, + "d": 5 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7265.json b/doc/sphinxext/prs/7265.json new file mode 100644 index 00000000000..10af597fbe5 --- /dev/null +++ b/doc/sphinxext/prs/7265.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "845e5d0cf764263d93a433a8fd0484f99c24d62f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/decoding/base.py": { + "a": 15, + "d": 3 + }, + "mne/decoding/tests/test_base.py": { + "a": 132, + "d": 27 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 17, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7266.json b/doc/sphinxext/prs/7266.json new file mode 100644 index 00000000000..64030400977 --- /dev/null +++ b/doc/sphinxext/prs/7266.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "03eaec1c3bd1a11b30d052b41d8452ca3cdda58c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 0, + "d": 2 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 2, + "d": 3 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 34, + "d": 14 + }, + "mne/tests/test_report.py": { + "a": 16, + "d": 20 + }, + "mne/tests/test_surface.py": { + "a": 3, + "d": 5 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 7 + }, + "mne/viz/_brain/surface.py": { + "a": 8, + "d": 10 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 16, + "d": 18 + }, + "mne/viz/backends/_pyvista.py": { + "a": 24, + "d": 25 + }, + "mne/viz/backends/_utils.py": { + "a": 0, + "d": 10 + }, + "mne/viz/backends/renderer.py": { + "a": 49, + "d": 34 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 8, + "d": 8 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7267.json b/doc/sphinxext/prs/7267.json new file mode 100644 index 00000000000..7219af78b35 --- /dev/null +++ b/doc/sphinxext/prs/7267.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "26b81437485f5287daa175c256608f011c185b67", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7269.json b/doc/sphinxext/prs/7269.json new file mode 100644 index 00000000000..2474c2cb5d9 --- /dev/null +++ b/doc/sphinxext/prs/7269.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "14d0f416d82e6a876a5a33de8349a77d92e272bd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 7 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 3, + "d": 5 + }, + "mne/source_space.py": { + "a": 2, + "d": 4 + }, + "mne/surface.py": { + "a": 95, + "d": 16 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 5 + }, + "mne/tests/test_surface.py": { + "a": 21, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 19, + "d": 2 + }, + "mne/utils/check.py": { + "a": 16, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7271.json b/doc/sphinxext/prs/7271.json new file mode 100644 index 00000000000..c6855b30385 --- /dev/null +++ b/doc/sphinxext/prs/7271.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6337b1e9bcd9fcdc5db15211acb8ead64015c242", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 12, + "d": 12 + }, + "mne/viz/backends/renderer.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7272.json b/doc/sphinxext/prs/7272.json new file mode 100644 index 00000000000..6a9d1f8545e --- /dev/null +++ b/doc/sphinxext/prs/7272.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1213e4c8c0c1765a56a36bc706ea9a89cbb856a1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_marker_gui.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_viewer.py": { + "a": 3, + "d": 2 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 32, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7273.json b/doc/sphinxext/prs/7273.json new file mode 100644 index 00000000000..a89897828a2 --- /dev/null +++ b/doc/sphinxext/prs/7273.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f529c5f127c62d8a4d548b8c2a113a990398ebb2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 24, + "d": 7 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 17, + "d": 13 + }, + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7276.json b/doc/sphinxext/prs/7276.json new file mode 100644 index 00000000000..1f65ddbdcb6 --- /dev/null +++ b/doc/sphinxext/prs/7276.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "00476f86595275683ef5c734665598f823e1c332", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 7, + "d": 2 + }, + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7278.json b/doc/sphinxext/prs/7278.json new file mode 100644 index 00000000000..fcd9837a149 --- /dev/null +++ b/doc/sphinxext/prs/7278.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8faa5f776015c06e899737cdd0fec69233f2b8ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 8, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7279.json b/doc/sphinxext/prs/7279.json new file mode 100644 index 00000000000..292183846d6 --- /dev/null +++ b/doc/sphinxext/prs/7279.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f474eb196eca50104f09948c6a4215d628a971f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/surface.py": { + "a": 3, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7280.json b/doc/sphinxext/prs/7280.json new file mode 100644 index 00000000000..18799192b2e --- /dev/null +++ b/doc/sphinxext/prs/7280.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3a5197d6fc3ca8ffcb20b5d8e25e1aa7c30961d4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 29, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 14 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 11, + "d": 5 + }, + "mne/viz/backends/renderer.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7281.json b/doc/sphinxext/prs/7281.json new file mode 100644 index 00000000000..8ca434f8f81 --- /dev/null +++ b/doc/sphinxext/prs/7281.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "5044aaebb242068da31bdd6fbb4782e8ea0adf9a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 115, + "d": 70 + }, + "mne/viz/_brain/_brain.py": { + "a": 9, + "d": 7 + }, + "mne/viz/_brain/colormap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 72, + "d": 5 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7284.json b/doc/sphinxext/prs/7284.json new file mode 100644 index 00000000000..a611c171941 --- /dev/null +++ b/doc/sphinxext/prs/7284.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "c081fbd58b8647eeeadc7c9ae11741daf8a4c2e9", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 10, + "d": 0 + }, + "mne/externals/h5io/_h5io.py": { + "a": 150, + "d": 7 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 3, + "d": 12 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 7, + "d": 12 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 8, + "d": 23 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 12, + "d": 18 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7285.json b/doc/sphinxext/prs/7285.json new file mode 100644 index 00000000000..6aaef9771dd --- /dev/null +++ b/doc/sphinxext/prs/7285.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f0f3b4d828bebff58e30841c5d4402b2823cc505", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 25, + "d": 6 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7286.json b/doc/sphinxext/prs/7286.json new file mode 100644 index 00000000000..1a459a6c62a --- /dev/null +++ b/doc/sphinxext/prs/7286.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "43098b81696f30ab5171a3b3b9064b191e961006", + "authors": [ + { + "n": "Geoff Brookshire", + "e": "Geoff.Brookshire@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7288.json b/doc/sphinxext/prs/7288.json new file mode 100644 index 00000000000..9570d0f4228 --- /dev/null +++ b/doc/sphinxext/prs/7288.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2434a41d5ccc5729371a2de1d7ad9b946b168fd2", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 9, + "d": 4 + }, + "mne/tests/test_annotations.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7289.json b/doc/sphinxext/prs/7289.json new file mode 100644 index 00000000000..745f81eee71 --- /dev/null +++ b/doc/sphinxext/prs/7289.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "33f224d4486b4b13facf543aae7e1867e3606898", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/surface.py": { + "a": 20, + "d": 4 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7290.json b/doc/sphinxext/prs/7290.json new file mode 100644 index 00000000000..d04e180bc83 --- /dev/null +++ b/doc/sphinxext/prs/7290.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "178d7bf345d9dbe6248cf939a1b2f7ce63ec04ab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "doc/glossary.rst": { + "a": 3, + "d": 3 + }, + "doc/python_reference.rst": { + "a": 4, + "d": 1 + }, + "examples/preprocessing/plot_artifact_detection.py": { + "a": 6, + "d": 5 + }, + "examples/preprocessing/plot_head_positions.py": { + "a": 0, + "d": 33 + }, + "mne/chpi.py": { + "a": 581, + "d": 599 + }, + "mne/dipole.py": { + "a": 7, + "d": 5 + }, + "mne/fixes.py": { + "a": 21, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 4, + "d": 4 + }, + "mne/io/artemis123/artemis123.py": { + "a": 15, + "d": 5 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 10, + "d": 6 + }, + "mne/io/base.py": { + "a": 19, + "d": 18 + }, + "mne/io/pick.py": { + "a": 14, + "d": 9 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/maxwell.py": { + "a": 25, + "d": 23 + }, + "mne/preprocessing/ssp.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 10, + "d": 8 + }, + "mne/simulation/tests/test_raw.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 166, + "d": 46 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 14, + "d": 1 + }, + "mne/transforms.py": { + "a": 51, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 39, + "d": 0 + }, + "mne/utils/progressbar.py": { + "a": 11, + "d": 5 + }, + "tutorials/preprocessing/plot_59_head_positions.py": { + "a": 80, + "d": 0 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7292.json b/doc/sphinxext/prs/7292.json new file mode 100644 index 00000000000..02a6daa625a --- /dev/null +++ b/doc/sphinxext/prs/7292.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4d51e1443a58b8dff8ee331e272690e4f56d499a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/renderer.py": { + "a": 31, + "d": 23 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7294.json b/doc/sphinxext/prs/7294.json new file mode 100644 index 00000000000..7304205b331 --- /dev/null +++ b/doc/sphinxext/prs/7294.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "95ad0eec8a2e1e9c7c74e25852157e650aea7df0", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 0, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 0, + "d": 2 + }, + "environment.yml": { + "a": 0, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 6 + }, + "mne/viz/_brain/_brain.py": { + "a": 44, + "d": 198 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 9, + "d": 141 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 27 + }, + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 0, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7295.json b/doc/sphinxext/prs/7295.json new file mode 100644 index 00000000000..8540de7c55c --- /dev/null +++ b/doc/sphinxext/prs/7295.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9081ae329c95b4657dea833f607d7d0bded4cb8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_3d.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7297.json b/doc/sphinxext/prs/7297.json new file mode 100644 index 00000000000..edfcdd79bc0 --- /dev/null +++ b/doc/sphinxext/prs/7297.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "0bb0eeb92928ce18c75280fb7740bb9017521fc6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 5, + "d": 7 + }, + "mne/io/ctf/trans.py": { + "a": 2, + "d": 2 + }, + "mne/io/curry/curry.py": { + "a": 183, + "d": 52 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 173, + "d": 5 + }, + "mne/surface.py": { + "a": 11, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 9, + "d": 1 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7298.json b/doc/sphinxext/prs/7298.json new file mode 100644 index 00000000000..d7f4cc3a304 --- /dev/null +++ b/doc/sphinxext/prs/7298.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "325bfc30c6533b63b3fb675fbf34df14b5ddc6e6", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 6, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7299.json b/doc/sphinxext/prs/7299.json new file mode 100644 index 00000000000..377973a85d2 --- /dev/null +++ b/doc/sphinxext/prs/7299.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "be4dfac706828d23580162381c543ac172810bd7", + "authors": [ + { + "n": "Sophie Herbst", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/73.json b/doc/sphinxext/prs/73.json new file mode 100644 index 00000000000..9f337adfec3 --- /dev/null +++ b/doc/sphinxext/prs/73.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0e7f195aa1b48ef7a80ed7a4cb238aa48f4c43f4", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/plot_simulate_evoked_data.py": { + "a": 3, + "d": 3 + }, + "mne/simulation/evoked.py": { + "a": 8, + "d": 8 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 19, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/730.json b/doc/sphinxext/prs/730.json new file mode 100644 index 00000000000..55799209903 --- /dev/null +++ b/doc/sphinxext/prs/730.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "205701479f2b8f8d8707b66f71039ce6fe6a7a02", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_label_from_stc.py": { + "a": 105, + "d": 0 + }, + "mne/label.py": { + "a": 78, + "d": 27 + }, + "mne/source_estimate.py": { + "a": 15, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7303.json b/doc/sphinxext/prs/7303.json new file mode 100644 index 00000000000..7c926597f2a --- /dev/null +++ b/doc/sphinxext/prs/7303.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f76c7b38614af324b5884176c1d526d960bca249", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 6, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 15, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7304.json b/doc/sphinxext/prs/7304.json new file mode 100644 index 00000000000..b0cc9bcb407 --- /dev/null +++ b/doc/sphinxext/prs/7304.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "478d38ead72139bc6bb2e354e29438f7b5519ba2", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 7, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 45, + "d": 3 + }, + "mne/io/fieldtrip/utils.py": { + "a": 18, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7305.json b/doc/sphinxext/prs/7305.json new file mode 100644 index 00000000000..552b7f5ef53 --- /dev/null +++ b/doc/sphinxext/prs/7305.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "18259f59917f105aea80242daba0dbf309f89abc", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 39, + "d": 11 + }, + "mne/viz/utils.py": { + "a": 14, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7307.json b/doc/sphinxext/prs/7307.json new file mode 100644 index 00000000000..fd34d3c88d8 --- /dev/null +++ b/doc/sphinxext/prs/7307.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b49528206273a5b5bcddd65cfa439ad241df1b24", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7309.json b/doc/sphinxext/prs/7309.json new file mode 100644 index 00000000000..dfb0c6c0265 --- /dev/null +++ b/doc/sphinxext/prs/7309.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "963278c135ffd7972c42d2060491177fd527a7cd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/731.json b/doc/sphinxext/prs/731.json new file mode 100644 index 00000000000..2a8b773e30a --- /dev/null +++ b/doc/sphinxext/prs/731.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "c4bccec6459e26b2a1f76e5e40e26d0279a9e61a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "bin/mne_make_scalp_surfaces.py": { + "a": 115, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 9, + "d": 0 + }, + "examples/plot_decimate_head_surface.py": { + "a": 40, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 56, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 22, + "d": 2 + }, + "mne/utils.py": { + "a": 21, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7312.json b/doc/sphinxext/prs/7312.json new file mode 100644 index 00000000000..76e9873d145 --- /dev/null +++ b/doc/sphinxext/prs/7312.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "740fdce08d8de0cbfc232806686fcaf45de8dc56", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/overview/roadmap.rst": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7314.json b/doc/sphinxext/prs/7314.json new file mode 100644 index 00000000000..a0154246276 --- /dev/null +++ b/doc/sphinxext/prs/7314.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8de73099c98679e0d6c9a5784e2d63ec95c5d4ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 9, + "d": 4 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7317.json b/doc/sphinxext/prs/7317.json new file mode 100644 index 00000000000..3ef249c3240 --- /dev/null +++ b/doc/sphinxext/prs/7317.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0226c5ee292f11894c57c72858e743cce3fdd008", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 2, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7318.json b/doc/sphinxext/prs/7318.json new file mode 100644 index 00000000000..a1a226b9a2a --- /dev/null +++ b/doc/sphinxext/prs/7318.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b0bdabd8f1a39db62b19fc4e9a27afa012047fc6", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7319.json b/doc/sphinxext/prs/7319.json new file mode 100644 index 00000000000..ea57a47ec55 --- /dev/null +++ b/doc/sphinxext/prs/7319.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5152d60bbc1714d4920decce3e9f8416d8d86598", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_scalp_coupling_index.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7322.json b/doc/sphinxext/prs/7322.json new file mode 100644 index 00000000000..eb37cf8528f --- /dev/null +++ b/doc/sphinxext/prs/7322.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a43db5731fad6bd1abed1faab0e723bbc38e7fff", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7324.json b/doc/sphinxext/prs/7324.json new file mode 100644 index 00000000000..59ff6bacb37 --- /dev/null +++ b/doc/sphinxext/prs/7324.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2b9a707f1600af9f120df20b15e55d6c6ce18c62", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 9, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7325.json b/doc/sphinxext/prs/7325.json new file mode 100644 index 00000000000..2ffad533c47 --- /dev/null +++ b/doc/sphinxext/prs/7325.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "ee47fc349ae1a5c907f84c8c8ee47526afc976ec", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 11, + "d": 10 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 3, + "d": 1 + }, + "mne/coreg.py": { + "a": 46, + "d": 31 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/trans.py": { + "a": 13, + "d": 69 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 116, + "d": 5 + }, + "mne/transforms.py": { + "a": 72, + "d": 10 + }, + "mne/utils/tests/test_numerics.py": { + "a": 4, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7326.json b/doc/sphinxext/prs/7326.json new file mode 100644 index 00000000000..ee03d708300 --- /dev/null +++ b/doc/sphinxext/prs/7326.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "1a3b63941c600cfa755d84f9b509bafacae11892", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 12, + "d": 5 + }, + "mne/evoked.py": { + "a": 12, + "d": 5 + }, + "mne/io/base.py": { + "a": 16, + "d": 10 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/dataframe.py": { + "a": 11, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7327.json b/doc/sphinxext/prs/7327.json new file mode 100644 index 00000000000..b4a6c1a6712 --- /dev/null +++ b/doc/sphinxext/prs/7327.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3f6dc9b496132f4673c26e4cd4e3c92de961a887", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 8, + "d": 10 + }, + "mne/channels/tests/test_montage.py": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7328.json b/doc/sphinxext/prs/7328.json new file mode 100644 index 00000000000..bebc872b8d0 --- /dev/null +++ b/doc/sphinxext/prs/7328.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6c0df72a979bd871b63f53e39d0afde8d8d42aca", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 24, + "d": 10 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 32, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7329.json b/doc/sphinxext/prs/7329.json new file mode 100644 index 00000000000..d63d7b1b601 --- /dev/null +++ b/doc/sphinxext/prs/7329.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "72e22a543f4b7c3404a04270b3fc67c6f3bca8a5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/infomax_.py": { + "a": 10, + "d": 2 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/733.json b/doc/sphinxext/prs/733.json new file mode 100644 index 00000000000..467d8e7b919 --- /dev/null +++ b/doc/sphinxext/prs/733.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "620609f79b40b17cb79b3d60dc5414352198f828", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7331.json b/doc/sphinxext/prs/7331.json new file mode 100644 index 00000000000..42842dd53fb --- /dev/null +++ b/doc/sphinxext/prs/7331.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "61382c1805efa5c6333d83ad27d24268b6fd2e13", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/overview/migrating.rst": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 12, + "d": 5 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 27, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7333.json b/doc/sphinxext/prs/7333.json new file mode 100644 index 00000000000..27e6f8f67e5 --- /dev/null +++ b/doc/sphinxext/prs/7333.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d9e729fb8c7379437f6f9647c494f76a67e489a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + ".travis.yml": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 13, + "d": 11 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7336.json b/doc/sphinxext/prs/7336.json new file mode 100644 index 00000000000..aebb8ce2ed9 --- /dev/null +++ b/doc/sphinxext/prs/7336.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fa38f2b24c65cdc16e65776a2912f255c2807343", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 29, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7337.json b/doc/sphinxext/prs/7337.json new file mode 100644 index 00000000000..b3882dc7180 --- /dev/null +++ b/doc/sphinxext/prs/7337.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4968e692ac4626930aac2a25035ca9afc1a940b5", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7338.json b/doc/sphinxext/prs/7338.json new file mode 100644 index 00000000000..74333a8edf6 --- /dev/null +++ b/doc/sphinxext/prs/7338.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fc49cde2f3a27d666624dc58dde1e367d272f950", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 11, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7339.json b/doc/sphinxext/prs/7339.json new file mode 100644 index 00000000000..c29e92c5d74 --- /dev/null +++ b/doc/sphinxext/prs/7339.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "da7b6c24dacdddec846cf12b33084a2cab9e78c2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 4, + "d": 4 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 36, + "d": 8 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/734.json b/doc/sphinxext/prs/734.json new file mode 100644 index 00000000000..9ab123edc14 --- /dev/null +++ b/doc/sphinxext/prs/734.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "df97a0c35a9959c3641c7f0bfcb5e7ea7854c8be", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "bin/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 6 + }, + "examples/plot_decimate_head_surface.py": { + "a": 9, + "d": 10 + }, + "mne/surface.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7342.json b/doc/sphinxext/prs/7342.json new file mode 100644 index 00000000000..8612ac7e22c --- /dev/null +++ b/doc/sphinxext/prs/7342.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e3c4474f3b7d932ca90ae264254e3432ee5c5d18", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 20, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7343.json b/doc/sphinxext/prs/7343.json new file mode 100644 index 00000000000..bb4e0dfcd9e --- /dev/null +++ b/doc/sphinxext/prs/7343.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "cc24a220a4e421f527e7616773c79dbccd04f4ed", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 24, + "d": 14 + }, + "mne/viz/tests/test_misc.py": { + "a": 5, + "d": 3 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7344.json b/doc/sphinxext/prs/7344.json new file mode 100644 index 00000000000..f57e7b32463 --- /dev/null +++ b/doc/sphinxext/prs/7344.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dae99dfa17baba405d2d1068950426f2535ea1ed", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7346.json b/doc/sphinxext/prs/7346.json new file mode 100644 index 00000000000..85361098e53 --- /dev/null +++ b/doc/sphinxext/prs/7346.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "20a6c4d8f3a074d36850b2572216628c19b6d2d2", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/preprocessing/nirs/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 31, + "d": 3 + }, + "mne/preprocessing/tests/test_nirs.py": { + "a": 81, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7348.json b/doc/sphinxext/prs/7348.json new file mode 100644 index 00000000000..d221887ae09 --- /dev/null +++ b/doc/sphinxext/prs/7348.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "afab0e1e9122997147008437aa4f5aa0c74a57cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/inverse.rst": { + "a": 68, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 11, + "d": 0 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/fixes.py": { + "a": 0, + "d": 27 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 116, + "d": 98 + }, + "mne/minimum_norm/inverse.py": { + "a": 26, + "d": 24 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 139, + "d": 60 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 29, + "d": 0 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 3, + "d": 5 + }, + "tutorials/source-modeling/plot_mne_solutions.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7349.json b/doc/sphinxext/prs/7349.json new file mode 100644 index 00000000000..9757a1d95db --- /dev/null +++ b/doc/sphinxext/prs/7349.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "beeb35dc8b92c55811521152c7d6ea03cbed7b56", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 17, + "d": 79 + }, + "mne/minimum_norm/inverse.py": { + "a": 46, + "d": 18 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 22, + "d": 17 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 27, + "d": 12 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 3 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 3 + }, + "mne/source_space.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 81, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7351.json b/doc/sphinxext/prs/7351.json new file mode 100644 index 00000000000..84fda6f09df --- /dev/null +++ b/doc/sphinxext/prs/7351.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b75bfec5d184784bdc5f2f208e8b05bdc018c9a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7352.json b/doc/sphinxext/prs/7352.json new file mode 100644 index 00000000000..0acf2ba3533 --- /dev/null +++ b/doc/sphinxext/prs/7352.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "00fea96eab2198dcc1c241bd2d305e9ff0fb014d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7353.json b/doc/sphinxext/prs/7353.json new file mode 100644 index 00000000000..aa58593e82b --- /dev/null +++ b/doc/sphinxext/prs/7353.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8d22915819d85f82a27c36b9c1b06c0b2132fbe6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 315, + "d": 0 + }, + "tutorials/evoked/plot_object_evoked.py": { + "a": 0, + "d": 89 + }, + "tutorials/evoked/plot_visualize_evoked.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7356.json b/doc/sphinxext/prs/7356.json new file mode 100644 index 00000000000..4072df7b5fb --- /dev/null +++ b/doc/sphinxext/prs/7356.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5b9166cf6b2e3ba8c5455445ae21c61f875bf93c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7357.json b/doc/sphinxext/prs/7357.json new file mode 100644 index 00000000000..2a6633fc58a --- /dev/null +++ b/doc/sphinxext/prs/7357.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "db89fd67b04f934ce0bec0a7ffc105ede50e5b71", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7358.json b/doc/sphinxext/prs/7358.json new file mode 100644 index 00000000000..14d880bcf4c --- /dev/null +++ b/doc/sphinxext/prs/7358.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "56f98a7b7a08ebf31ffc0b0a64021619fd42c5c5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 24, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7361.json b/doc/sphinxext/prs/7361.json new file mode 100644 index 00000000000..e72a9dbba3c --- /dev/null +++ b/doc/sphinxext/prs/7361.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e6c0a1bd8ed8a9eb9428eb01a70641f8272c3a06", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7362.json b/doc/sphinxext/prs/7362.json new file mode 100644 index 00000000000..84e8f481a27 --- /dev/null +++ b/doc/sphinxext/prs/7362.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7d691d2f3462234bb3f8fe75a75e449750708bf6", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/viz/circle.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7364.json b/doc/sphinxext/prs/7364.json new file mode 100644 index 00000000000..7a9efbb04fc --- /dev/null +++ b/doc/sphinxext/prs/7364.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8675bfaa98570e30cdb63a40dba72ecb93b8ef11", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.hpts": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7366.json b/doc/sphinxext/prs/7366.json new file mode 100644 index 00000000000..c81d8e671ce --- /dev/null +++ b/doc/sphinxext/prs/7366.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4400c2bcc45f4e673fa011f396184486a1923ba6", + "authors": [ + { + "n": "Sophie Herbst", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 32, + "d": 0 + }, + "mne/evoked.py": { + "a": 5, + "d": 2 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 41, + "d": 17 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 22, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7367.json b/doc/sphinxext/prs/7367.json new file mode 100644 index 00000000000..643191b01aa --- /dev/null +++ b/doc/sphinxext/prs/7367.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "076f8d5a4c9f109714e3bc2e11085cae3e539928", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 6, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 57, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7369.json b/doc/sphinxext/prs/7369.json new file mode 100644 index 00000000000..70ad984be38 --- /dev/null +++ b/doc/sphinxext/prs/7369.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c5aa0b88211eef54346539127fe80fede09f0ac8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 10, + "d": 9 + }, + "mne/tests/test_cov.py": { + "a": 53, + "d": 25 + }, + "mne/utils/_testing.py": { + "a": 4, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7370.json b/doc/sphinxext/prs/7370.json new file mode 100644 index 00000000000..b14ec641b08 --- /dev/null +++ b/doc/sphinxext/prs/7370.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "914f1ee2fb38e8e76fa0c8c7ce88b53fe68dca54", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 3, + "d": 4 + }, + "mne/commands/tests/test_commands.py": { + "a": 11, + "d": 10 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_scalp_coupling_index.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 3, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 20, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7371.json b/doc/sphinxext/prs/7371.json new file mode 100644 index 00000000000..5f92f7d27c6 --- /dev/null +++ b/doc/sphinxext/prs/7371.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1acbcfbad1a5a9d2bebb54c1c0f2b9b77c08519b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/overview/roadmap.rst": { + "a": 29, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7372.json b/doc/sphinxext/prs/7372.json new file mode 100644 index 00000000000..bef90695b44 --- /dev/null +++ b/doc/sphinxext/prs/7372.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "cb73c5601845b23dcea8b41d915e0a97afd18201", + "authors": [ + { + "n": "Ramiro Gatti", + "e": "rgatti@ingenieria.uner.edu.ar" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 1, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 1, + "d": 0 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7374.json b/doc/sphinxext/prs/7374.json new file mode 100644 index 00000000000..612fd7d24ca --- /dev/null +++ b/doc/sphinxext/prs/7374.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "66acf7e945d5d225018184add219e6f8691446cc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/evoked.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7378.json b/doc/sphinxext/prs/7378.json new file mode 100644 index 00000000000..eaa4e0303cb --- /dev/null +++ b/doc/sphinxext/prs/7378.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "17e4cca9efbfb5938ed1a573e816787196d3b6fb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/fetching.py": { + "a": 26, + "d": 61 + }, + "mne/utils/tests/test_fetching.py": { + "a": 7, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7380.json b/doc/sphinxext/prs/7380.json new file mode 100644 index 00000000000..c8425cce93e --- /dev/null +++ b/doc/sphinxext/prs/7380.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "b884155c707c9e7d5224bb38f1854a54599a5b37", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 16, + "d": 6 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 7, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 12, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7381.json b/doc/sphinxext/prs/7381.json new file mode 100644 index 00000000000..0215e9c2051 --- /dev/null +++ b/doc/sphinxext/prs/7381.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3bde3d23a60d5a52cceae916936ba08779e20673", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7382.json b/doc/sphinxext/prs/7382.json new file mode 100644 index 00000000000..2dd9fc13a3c --- /dev/null +++ b/doc/sphinxext/prs/7382.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bbd17ddbd332cb22f5b09e3f783d05392d3ce912", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7383.json b/doc/sphinxext/prs/7383.json new file mode 100644 index 00000000000..1eaceb3fbdc --- /dev/null +++ b/doc/sphinxext/prs/7383.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3f9ef1c3767a030074f181196d1c359e843cb3fe", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7386.json b/doc/sphinxext/prs/7386.json new file mode 100644 index 00000000000..0d003d94390 --- /dev/null +++ b/doc/sphinxext/prs/7386.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e88179aaef6c485db03e2432289a7f8cabb30775", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 10, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7387.json b/doc/sphinxext/prs/7387.json new file mode 100644 index 00000000000..7810983da37 --- /dev/null +++ b/doc/sphinxext/prs/7387.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "067b0144f3b9f8e572f53e1af1dbe417aa9e6d98", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "examples/inverse/plot_dics_source_power.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7388.json b/doc/sphinxext/prs/7388.json new file mode 100644 index 00000000000..89be4fd27a0 --- /dev/null +++ b/doc/sphinxext/prs/7388.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9afac0a8f8543922cef27bea55ca35a930241a17", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/source-modeling/plot_forward.py": { + "a": 17, + "d": 20 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 8, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7389.json b/doc/sphinxext/prs/7389.json new file mode 100644 index 00000000000..80caf90c6a3 --- /dev/null +++ b/doc/sphinxext/prs/7389.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "077d33fc909e9ae3e32b305a8b3a37461dda3c12", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7390.json b/doc/sphinxext/prs/7390.json new file mode 100644 index 00000000000..8b522e016d1 --- /dev/null +++ b/doc/sphinxext/prs/7390.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c7af21496a1ff80c9fbc9da4f1d95334bf8f9e90", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 0 + }, + "mne/viz/backends/base_renderer.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7391.json b/doc/sphinxext/prs/7391.json new file mode 100644 index 00000000000..96664f98b0c --- /dev/null +++ b/doc/sphinxext/prs/7391.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d6706509ec55f3cf4777dd6e96d2a2b42eb55c37", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/bads.py": { + "a": 6, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7392.json b/doc/sphinxext/prs/7392.json new file mode 100644 index 00000000000..3e1c471dfc8 --- /dev/null +++ b/doc/sphinxext/prs/7392.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f6c029be1a190c122ead63d0fbd0e6d44a0c4766", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7393.json b/doc/sphinxext/prs/7393.json new file mode 100644 index 00000000000..a31d2b56bba --- /dev/null +++ b/doc/sphinxext/prs/7393.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6fd65ef1324819e854d9b2758ddf510038b99be8", + "authors": [ + { + "n": "Liberty Hamilton", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/reference.py": { + "a": 30, + "d": 25 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7394.json b/doc/sphinxext/prs/7394.json new file mode 100644 index 00000000000..44f0872b1cd --- /dev/null +++ b/doc/sphinxext/prs/7394.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8e3fc949e177d10bb3acf9ecf93524924b346240", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 10, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7396.json b/doc/sphinxext/prs/7396.json new file mode 100644 index 00000000000..ea75ff56073 --- /dev/null +++ b/doc/sphinxext/prs/7396.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "32048ae44faf65dd2f0761d25f17b6a7b160d8f0", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7399.json b/doc/sphinxext/prs/7399.json new file mode 100644 index 00000000000..de9f56128a1 --- /dev/null +++ b/doc/sphinxext/prs/7399.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1cf95cc90f2e13a73a75d1d46799f0414e2deead", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/74.json b/doc/sphinxext/prs/74.json new file mode 100644 index 00000000000..085772f2f22 --- /dev/null +++ b/doc/sphinxext/prs/74.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "438dea14ed8aff90c5994ad17295a42a4d0e7e91", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 55, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 21, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 132, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/740.json b/doc/sphinxext/prs/740.json new file mode 100644 index 00000000000..cb66ffb3115 --- /dev/null +++ b/doc/sphinxext/prs/740.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a423272af7c686de62c0c79ab5e3f4a12c2444f3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7400.json b/doc/sphinxext/prs/7400.json new file mode 100644 index 00000000000..4349d969d7d --- /dev/null +++ b/doc/sphinxext/prs/7400.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ef65f8f0fc74fc703d96a85ede37a1ac5dc39d3d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7401.json b/doc/sphinxext/prs/7401.json new file mode 100644 index 00000000000..0455c85bea3 --- /dev/null +++ b/doc/sphinxext/prs/7401.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3cc92424a94b7d73f3838732ccf5c8abbb72e652", + "authors": [ + { + "n": "Britta Westner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 21, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7402.json b/doc/sphinxext/prs/7402.json new file mode 100644 index 00000000000..938231ea397 --- /dev/null +++ b/doc/sphinxext/prs/7402.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4622f8836d7946435452e291dc27605688a01711", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 93, + "d": 63 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7403.json b/doc/sphinxext/prs/7403.json new file mode 100644 index 00000000000..94534cac38d --- /dev/null +++ b/doc/sphinxext/prs/7403.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5a89c3d322139e1ae64202df1b40ec324346a6da", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 7, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7407.json b/doc/sphinxext/prs/7407.json new file mode 100644 index 00000000000..9bc025ec1fb --- /dev/null +++ b/doc/sphinxext/prs/7407.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "e3cc4b1cb5e6ac4e452494933377858d18c1268e", + "authors": [ + { + "n": "Adonay Nunes", + "e": "adonay.s.nunes@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "examples/preprocessing/plot_muscle_detection.py": { + "a": 80, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 107, + "d": 2 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 28, + "d": 1 + }, + "tutorials/preprocessing/plot_30_filtering_resampling.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7408.json b/doc/sphinxext/prs/7408.json new file mode 100644 index 00000000000..f78679dcd33 --- /dev/null +++ b/doc/sphinxext/prs/7408.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a2d1461142d7d4dfdd23586c6d6fdd08d4671934", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/conftest.py": { + "a": 26, + "d": 0 + }, + "mne/viz/tests/test_topo.py": { + "a": 9, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 8, + "d": 14 + }, + "mne/viz/topo.py": { + "a": 6, + "d": 0 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7409.json b/doc/sphinxext/prs/7409.json new file mode 100644 index 00000000000..1a8a037a420 --- /dev/null +++ b/doc/sphinxext/prs/7409.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ff2442f156004e452fb7c40a6e3477c7dbe6e9d1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/multi_comp.py": { + "a": 2, + "d": 0 + }, + "mne/stats/tests/test_multi_comp.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/741.json b/doc/sphinxext/prs/741.json new file mode 100644 index 00000000000..78d038ee6dc --- /dev/null +++ b/doc/sphinxext/prs/741.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "a35cc7e505270de0ee11492bda31cd4b3b5c7a5e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 4, + "d": 0 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 4, + "d": 25 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 7, + "d": 22 + }, + "mne/epochs.py": { + "a": 33, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 31, + "d": 33 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 6 + }, + "mne/tests/test_viz.py": { + "a": 7, + "d": 0 + }, + "mne/viz.py": { + "a": 126, + "d": 61 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7411.json b/doc/sphinxext/prs/7411.json new file mode 100644 index 00000000000..542dd712d50 --- /dev/null +++ b/doc/sphinxext/prs/7411.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9f05d2405b5597743bf4a10db17032f00086987a", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 12, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7412.json b/doc/sphinxext/prs/7412.json new file mode 100644 index 00000000000..2ba983aa4fc --- /dev/null +++ b/doc/sphinxext/prs/7412.json @@ -0,0 +1,123 @@ +{ + "merge_commit_sha": "7ce4bb77e7cfbbb887a9550915777d09d5ba787e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 3 + }, + "README.rst": { + "a": 6, + "d": 6 + }, + "doc/changes/latest.inc": { + "a": 8, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 11, + "d": 29 + }, + "mne/decoding/tests/test_base.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_ems.py": { + "a": 4, + "d": 8 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 5, + "d": 7 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 4 + }, + "mne/filter.py": { + "a": 5, + "d": 6 + }, + "mne/fixes.py": { + "a": 3, + "d": 168 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 2 + }, + "mne/label.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ctps_.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 9 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 6, + "d": 8 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 2 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + }, + "mne/viz/montage.py": { + "a": 2, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 7 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7413.json b/doc/sphinxext/prs/7413.json new file mode 100644 index 00000000000..8fdcf607203 --- /dev/null +++ b/doc/sphinxext/prs/7413.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1bbe385fb4473f10fc6942ce7c38ad75deefda9e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/discussions/plot_background_filtering.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7414.json b/doc/sphinxext/prs/7414.json new file mode 100644 index 00000000000..d65e1c7fb11 --- /dev/null +++ b/doc/sphinxext/prs/7414.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4e32966c5df0f294141ebb9da16ba3756228f808", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 37, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 21, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 95, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7416.json b/doc/sphinxext/prs/7416.json new file mode 100644 index 00000000000..2837f542367 --- /dev/null +++ b/doc/sphinxext/prs/7416.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b5ec42534f7651a9bf041b2d1a31888b533c06ab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 33, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7417.json b/doc/sphinxext/prs/7417.json new file mode 100644 index 00000000000..e21a5f2f734 --- /dev/null +++ b/doc/sphinxext/prs/7417.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9e852f266025ba15464fafaad6327fcf73d5ffa1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/annotations.py": { + "a": 7, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 16, + "d": 28 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 48, + "d": 17 + }, + "mne/utils/check.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7419.json b/doc/sphinxext/prs/7419.json new file mode 100644 index 00000000000..0a79a5d9af5 --- /dev/null +++ b/doc/sphinxext/prs/7419.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "34e72956f60326b883e07a9694a9808ca73f9627", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 58 + }, + "mne/utils/docs.py": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/742.json b/doc/sphinxext/prs/742.json new file mode 100644 index 00000000000..ec0ca1fe56b --- /dev/null +++ b/doc/sphinxext/prs/742.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "f3c07007862f82e4cbe171d0f3f3601f33d17cb9", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 112, + "d": 0 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 123, + "d": 0 + }, + "mne/beamformer/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 185, + "d": 8 + }, + "mne/beamformer/_lcmv.py": { + "a": 278, + "d": 5 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 102, + "d": 18 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 202, + "d": 54 + }, + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 28, + "d": 14 + }, + "mne/time_frequency/multitaper.py": { + "a": 9, + "d": 3 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 29, + "d": 19 + }, + "mne/viz.py": { + "a": 87, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7422.json b/doc/sphinxext/prs/7422.json new file mode 100644 index 00000000000..009edc5b8f4 --- /dev/null +++ b/doc/sphinxext/prs/7422.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "36fcc968e016fdb6e22f61e27e09df4926e6bea1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7423.json b/doc/sphinxext/prs/7423.json new file mode 100644 index 00000000000..62ade2f7e4d --- /dev/null +++ b/doc/sphinxext/prs/7423.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0083c2b99f6ba9da3849dd9edef05b1063e1affa", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 25, + "d": 54 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 294, + "d": 231 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7424.json b/doc/sphinxext/prs/7424.json new file mode 100644 index 00000000000..13dbc9602a8 --- /dev/null +++ b/doc/sphinxext/prs/7424.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d8db438592b4a6e62d71692715b176834192d9aa", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7427.json b/doc/sphinxext/prs/7427.json new file mode 100644 index 00000000000..1100fd8a899 --- /dev/null +++ b/doc/sphinxext/prs/7427.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "586c09f55615ffd8933ca7e2cc543d0e6bbe1ff5", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 29, + "d": 0 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 3, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/conftest.py": { + "a": 21, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 17, + "d": 16 + }, + "mne/viz/topo.py": { + "a": 6, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 36, + "d": 34 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7428.json b/doc/sphinxext/prs/7428.json new file mode 100644 index 00000000000..0f6664d57a9 --- /dev/null +++ b/doc/sphinxext/prs/7428.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "86d7988b89de6daa1575e60f42586efc5d2c6dec", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 56, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 21, + "d": 0 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/743.json b/doc/sphinxext/prs/743.json new file mode 100644 index 00000000000..b1009dd162f --- /dev/null +++ b/doc/sphinxext/prs/743.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "65a0a7faf7e8fe7d870e03c64eb591e4a62be27b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/tests/test_classifier.py": { + "a": 15, + "d": 9 + }, + "mne/fiff/tests/test_compensator.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/tests/test_raw.py": { + "a": 6, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 5, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 13, + "d": 5 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 6, + "d": 5 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 11, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 9, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_viz.py": { + "a": 6, + "d": 2 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7431.json b/doc/sphinxext/prs/7431.json new file mode 100644 index 00000000000..c5125dadfd8 --- /dev/null +++ b/doc/sphinxext/prs/7431.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7cec1e43350099358acc267ec3dd43203f63eb07", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7432.json b/doc/sphinxext/prs/7432.json new file mode 100644 index 00000000000..51851c42355 --- /dev/null +++ b/doc/sphinxext/prs/7432.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2fd229f75903fc2cca08874959369f749a864f6b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 117, + "d": 94 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 6, + "d": 7 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 19, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7434.json b/doc/sphinxext/prs/7434.json new file mode 100644 index 00000000000..d8065abb788 --- /dev/null +++ b/doc/sphinxext/prs/7434.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "9ae1a832b36f8c6688e36e567e0aa129ea0a03e4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/externals/decorator.py": { + "a": 5, + "d": 6 + }, + "mne/filter.py": { + "a": 0, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 0, + "d": 1 + }, + "mne/io/base.py": { + "a": 5, + "d": 7 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 2 + }, + "mne/io/ctf/ctf.py": { + "a": 0, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 0, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_logging.py": { + "a": 47, + "d": 39 + }, + "mne/utils/config.py": { + "a": 0, + "d": 14 + }, + "mne/utils/misc.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 19 + }, + "mne/utils/tests/test_logging.py": { + "a": 40, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7435.json b/doc/sphinxext/prs/7435.json new file mode 100644 index 00000000000..609267d7714 --- /dev/null +++ b/doc/sphinxext/prs/7435.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b11c4ab22d18732fd0d9eee2a7784098ccd07965", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 12, + "d": 13 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7438.json b/doc/sphinxext/prs/7438.json new file mode 100644 index 00000000000..a9e57a4bb9c --- /dev/null +++ b/doc/sphinxext/prs/7438.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "debae7841621e69e2f6b5d99898c4dd8c57c2068", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7439.json b/doc/sphinxext/prs/7439.json new file mode 100644 index 00000000000..59efc18ba1b --- /dev/null +++ b/doc/sphinxext/prs/7439.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5d74f84574a944ed201dfab61f49e3720a0244a2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 2, + "d": 0 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 254, + "d": 0 + }, + "tutorials/evoked/plot_visualize_evoked.py": { + "a": 0, + "d": 214 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/744.json b/doc/sphinxext/prs/744.json new file mode 100644 index 00000000000..da58fed6ccd --- /dev/null +++ b/doc/sphinxext/prs/744.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8c2a6911bd0a066dbd760c9172ade5a5760df175", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/fiff/constants.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7440.json b/doc/sphinxext/prs/7440.json new file mode 100644 index 00000000000..538ec656910 --- /dev/null +++ b/doc/sphinxext/prs/7440.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "69429ef9de6efb299ea27e9c4a7a5af20dc06089", + "authors": [ + { + "n": "FuTe Wong", + "e": "zuxfoucault@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_dics_source_power.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_evoked_ers_source_power.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7441.json b/doc/sphinxext/prs/7441.json new file mode 100644 index 00000000000..c7e3dc8d769 --- /dev/null +++ b/doc/sphinxext/prs/7441.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "41978e88e37b254f2adc3c92acf81f57bfc9e215", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/annotations.py": { + "a": 5, + "d": 8 + }, + "mne/tests/test_annotations.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7443.json b/doc/sphinxext/prs/7443.json new file mode 100644 index 00000000000..3ba4576640e --- /dev/null +++ b/doc/sphinxext/prs/7443.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8c153f3c695ec63093bb00da0412f9ba87899405", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 10, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7444.json b/doc/sphinxext/prs/7444.json new file mode 100644 index 00000000000..4b301b551c2 --- /dev/null +++ b/doc/sphinxext/prs/7444.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "248bb02ef73dc16487998e4099bbc58b006e3119", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 7, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 8, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 55, + "d": 33 + }, + "mne/viz/utils.py": { + "a": 43, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7446.json b/doc/sphinxext/prs/7446.json new file mode 100644 index 00000000000..3eb72c0ada8 --- /dev/null +++ b/doc/sphinxext/prs/7446.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d31ba496505fb1e8d8eea6cc6c7bc360dd941161", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7448.json b/doc/sphinxext/prs/7448.json new file mode 100644 index 00000000000..3551be9b8c2 --- /dev/null +++ b/doc/sphinxext/prs/7448.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "20124d8ed2ba27cd68aec3506a34a6f310af8ee2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 48, + "d": 55 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7451.json b/doc/sphinxext/prs/7451.json new file mode 100644 index 00000000000..cf0a935f863 --- /dev/null +++ b/doc/sphinxext/prs/7451.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a1d423d0f09e9b28f7ee9eaff4f8f1c8da0dcb0a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 3, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 64, + "d": 40 + }, + "mne/io/tests/test_meas_info.py": { + "a": 12, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7454.json b/doc/sphinxext/prs/7454.json new file mode 100644 index 00000000000..f4b2b786c35 --- /dev/null +++ b/doc/sphinxext/prs/7454.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fd5a8c5e9cbbbae047c39b320495525b9f5c96ba", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7455.json b/doc/sphinxext/prs/7455.json new file mode 100644 index 00000000000..4b7736f03d5 --- /dev/null +++ b/doc/sphinxext/prs/7455.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "f6cb5110deddeee724f153ecd93bc5a5aa13adca", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/visualization/plot_eeglab_head_sphere.py": { + "a": 124, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 3, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 4 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 56, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7456.json b/doc/sphinxext/prs/7456.json new file mode 100644 index 00000000000..0909355e12a --- /dev/null +++ b/doc/sphinxext/prs/7456.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9646e22ed8447234a25571e759633d8fa1c4c8dd", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 24, + "d": 10 + }, + "mne/utils/tests/test_numerics.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7457.json b/doc/sphinxext/prs/7457.json new file mode 100644 index 00000000000..a5ddca14eaf --- /dev/null +++ b/doc/sphinxext/prs/7457.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4cb2798cac7514b8f0d2f0b6f80252d378737e76", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7458.json b/doc/sphinxext/prs/7458.json new file mode 100644 index 00000000000..aa340becbcd --- /dev/null +++ b/doc/sphinxext/prs/7458.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e1b02121eb183c683b6b796fc01998aa4512d7c3", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 2, + "d": 32 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 10, + "d": 8 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7459.json b/doc/sphinxext/prs/7459.json new file mode 100644 index 00000000000..dd18812fef6 --- /dev/null +++ b/doc/sphinxext/prs/7459.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6915a368e1b47f4a2e495cad12da65a6353117ac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 23, + "d": 7 + }, + "mne/tests/test_chpi.py": { + "a": 57, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/746.json b/doc/sphinxext/prs/746.json new file mode 100644 index 00000000000..9bcd54b74d7 --- /dev/null +++ b/doc/sphinxext/prs/746.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4504172c8bb28f5d25e074627c2a0f88ac158f67", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/viz.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7460.json b/doc/sphinxext/prs/7460.json new file mode 100644 index 00000000000..bee8ffa0589 --- /dev/null +++ b/doc/sphinxext/prs/7460.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "487759e4b8f003311c1653f2f54592609a42c029", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 1 + }, + "mne/io/proc_history.py": { + "a": 2, + "d": 5 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 178, + "d": 152 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 7, + "d": 4 + }, + "mne/preprocessing/utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 6, + "d": 7 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 19, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7462.json b/doc/sphinxext/prs/7462.json new file mode 100644 index 00000000000..b8bd5b706dc --- /dev/null +++ b/doc/sphinxext/prs/7462.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8ac813c35941f70b317a36af91a6065453a7b0d8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 1, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 2, + "d": 2 + }, + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 10, + "d": 13 + }, + "mne/viz/_brain/_brain.py": { + "a": 11, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7465.json b/doc/sphinxext/prs/7465.json new file mode 100644 index 00000000000..c6718efa1f6 --- /dev/null +++ b/doc/sphinxext/prs/7465.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "aa7ea77b7f4be41e99665c45ae571cb7ea9e8a66", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/visualization/plot_evoked_arrowmap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7468.json b/doc/sphinxext/prs/7468.json new file mode 100644 index 00000000000..797e06f14e6 --- /dev/null +++ b/doc/sphinxext/prs/7468.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "acaaa84d3dac50f314917c981709c58962455ba0", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7469.json b/doc/sphinxext/prs/7469.json new file mode 100644 index 00000000000..9673a22786f --- /dev/null +++ b/doc/sphinxext/prs/7469.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "25f6e5e097d8492bfcc3abb6e77804d372f68a9d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 1, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 1, + "d": 1 + }, + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 100, + "d": 78 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/747.json b/doc/sphinxext/prs/747.json new file mode 100644 index 00000000000..c30bf3eb0e1 --- /dev/null +++ b/doc/sphinxext/prs/747.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e97fad175d1174b7f22d27e19ecf4382f7c0c512", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 10, + "d": 10 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_2samp.py": { + "a": 10, + "d": 10 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 4 + }, + "mne/viz.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7470.json b/doc/sphinxext/prs/7470.json new file mode 100644 index 00000000000..c0f7ebf245c --- /dev/null +++ b/doc/sphinxext/prs/7470.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b1b31428b60d485a81e6d2f643b689d9ce74558a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 88, + "d": 270 + }, + "mne/utils/docs.py": { + "a": 111, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7473.json b/doc/sphinxext/prs/7473.json new file mode 100644 index 00000000000..1b52ec4ff95 --- /dev/null +++ b/doc/sphinxext/prs/7473.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "11beee2b677e6971ba013f17041d047865c20050", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 46, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7474.json b/doc/sphinxext/prs/7474.json new file mode 100644 index 00000000000..6b25617e792 --- /dev/null +++ b/doc/sphinxext/prs/7474.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "fcbe5aec41cb346f5a3b0add845442ab93f6fcf3", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "drammock@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "drammock@users.noreply.github.com" + } + ], + "changes": { + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 8, + "d": 6 + }, + "mne/channels/channels.py": { + "a": 5, + "d": 54 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 0, + "d": 6 + }, + "mne/io/reference.py": { + "a": 9, + "d": 58 + }, + "mne/utils/docs.py": { + "a": 44, + "d": 0 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 11, + "d": 16 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/plot_55_setting_eeg_reference.py": { + "a": 3, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7476.json b/doc/sphinxext/prs/7476.json new file mode 100644 index 00000000000..f9b2a0f7863 --- /dev/null +++ b/doc/sphinxext/prs/7476.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3c51cf10ec0f573def4a8d67a739502cfc26dd0f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 8, + "d": 29 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7477.json b/doc/sphinxext/prs/7477.json new file mode 100644 index 00000000000..a8b7e3ffc91 --- /dev/null +++ b/doc/sphinxext/prs/7477.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "94f8ecf49fe8be427ffe9a3636fbc5cc4789da46", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/io/_digitization.py": { + "a": 7, + "d": 0 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/utils.py": { + "a": 3, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 62, + "d": 3 + }, + "mne/io/proj.py": { + "a": 14, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/748.json b/doc/sphinxext/prs/748.json new file mode 100644 index 00000000000..2fcd6b90404 --- /dev/null +++ b/doc/sphinxext/prs/748.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1e6607ef574f4a57027c048bdb90517232ec9cd4", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/meas_info.py": { + "a": 57, + "d": 4 + }, + "mne/fiff/tests/test_meas_info.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7480.json b/doc/sphinxext/prs/7480.json new file mode 100644 index 00000000000..a2baafd65ff --- /dev/null +++ b/doc/sphinxext/prs/7480.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb58e0346d844ecf6224248ed133901864eae538", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 58, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7481.json b/doc/sphinxext/prs/7481.json new file mode 100644 index 00000000000..373a117c2e0 --- /dev/null +++ b/doc/sphinxext/prs/7481.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "be48643efeba31cba7dcdc0c583b24dea9a77a8c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".mailmap": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7483.json b/doc/sphinxext/prs/7483.json new file mode 100644 index 00000000000..1012e4618e0 --- /dev/null +++ b/doc/sphinxext/prs/7483.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e4c1e7810f1b777793832a1ceb701855894cb161", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 26, + "d": 17 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7484.json b/doc/sphinxext/prs/7484.json new file mode 100644 index 00000000000..66fafcb63fe --- /dev/null +++ b/doc/sphinxext/prs/7484.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2953c77a907905311ac81205b03f4d3138f0a07d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 0, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7486.json b/doc/sphinxext/prs/7486.json new file mode 100644 index 00000000000..961181d6dde --- /dev/null +++ b/doc/sphinxext/prs/7486.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "9e785061f325191b809ade41590887230c1378ec", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 14, + "d": 4 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 2, + "d": 4 + }, + "mne/io/pick.py": { + "a": 12, + "d": 6 + }, + "mne/io/tests/test_pick.py": { + "a": 10, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 6, + "d": 7 + }, + "mne/viz/evoked.py": { + "a": 6, + "d": 7 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_30_info.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7488.json b/doc/sphinxext/prs/7488.json new file mode 100644 index 00000000000..cbda032da68 --- /dev/null +++ b/doc/sphinxext/prs/7488.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "55b78208c360cfeb25fe09cbb7b0e5a19911463e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7489.json b/doc/sphinxext/prs/7489.json new file mode 100644 index 00000000000..0cf9e4c8fc3 --- /dev/null +++ b/doc/sphinxext/prs/7489.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "943dee405fcc68f985107a8f7c6912ee2adfa87a", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/tests/test_scalp_coupling_index.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/749.json b/doc/sphinxext/prs/749.json new file mode 100644 index 00000000000..43f1eb85a6a --- /dev/null +++ b/doc/sphinxext/prs/749.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f524a10c5e0801882483ef5c6555bd06a9ee2293", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/meas_info.py": { + "a": 14, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7490.json b/doc/sphinxext/prs/7490.json new file mode 100644 index 00000000000..98920c0aa40 --- /dev/null +++ b/doc/sphinxext/prs/7490.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "98057d96ced28227a1f06f3a432eb129c6442171", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7491.json b/doc/sphinxext/prs/7491.json new file mode 100644 index 00000000000..3f838336087 --- /dev/null +++ b/doc/sphinxext/prs/7491.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e7cee41c3f30e4fedd4e6ae931cc7dd64e378d7b", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/nirs/nirs.py": { + "a": 9, + "d": 1 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7494.json b/doc/sphinxext/prs/7494.json new file mode 100644 index 00000000000..ce74f5476d2 --- /dev/null +++ b/doc/sphinxext/prs/7494.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "967cfc416bd65fad2d8f20ea3a79fc7d54d29c46", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7495.json b/doc/sphinxext/prs/7495.json new file mode 100644 index 00000000000..0579179d096 --- /dev/null +++ b/doc/sphinxext/prs/7495.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0b1bf55f8e3eca094b7928f21cedcfed7f4cf66a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 5, + "d": 4 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 18, + "d": 1 + }, + "mne/utils/check.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7496.json b/doc/sphinxext/prs/7496.json new file mode 100644 index 00000000000..37b47ed7df5 --- /dev/null +++ b/doc/sphinxext/prs/7496.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "96cfef579c7e10a040102bb10a0eadcafcaaabb8", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7497.json b/doc/sphinxext/prs/7497.json new file mode 100644 index 00000000000..50882dafea1 --- /dev/null +++ b/doc/sphinxext/prs/7497.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "c18283ab83c588f8dfc483bd3cd09683aa832aa0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/units.rst": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 2, + "d": 2 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/defaults.py": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 6, + "d": 6 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_utils.py": { + "a": 5, + "d": 5 + }, + "mne/io/utils.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 3 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/plot_40_epochs_to_data_frame.py": { + "a": 3, + "d": 3 + }, + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 2, + "d": 2 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/plot_20_rejecting_bad_data.py": { + "a": 5, + "d": 5 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 3, + "d": 3 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7498.json b/doc/sphinxext/prs/7498.json new file mode 100644 index 00000000000..fcae443684c --- /dev/null +++ b/doc/sphinxext/prs/7498.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4021b0b788c459b8de4188a6668efec4daa32123", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7499.json b/doc/sphinxext/prs/7499.json new file mode 100644 index 00000000000..15e6e279c16 --- /dev/null +++ b/doc/sphinxext/prs/7499.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "90f1d507f889c49987fa4c5627917cf3caec1b57", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/preprocessing/nirs/nirs.py": { + "a": 12, + "d": 0 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/75.json b/doc/sphinxext/prs/75.json new file mode 100644 index 00000000000..34603dd91ef --- /dev/null +++ b/doc/sphinxext/prs/75.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0e31c0df6e161aafe1481a052821aa7f83c70a1f", + "authors": [ + { + "n": "Brad Buran", + "e": null + } + ], + "changes": { + "mne/parallel.py": { + "a": 20, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/750.json b/doc/sphinxext/prs/750.json new file mode 100644 index 00000000000..6be5ca3cd2a --- /dev/null +++ b/doc/sphinxext/prs/750.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "be551d67972f4e3b82879e9e5c6b6eed3b37a2c1", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 14, + "d": 0 + }, + "mne/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/transforms/__init__.py": { + "a": 5, + "d": 1 + }, + "mne/transforms/coreg.py": { + "a": 843, + "d": 21 + }, + "mne/transforms/tests/test_coreg.py": { + "a": 139, + "d": 14 + }, + "mne/transforms/transforms.py": { + "a": 48, + "d": 9 + }, + "mne/utils.py": { + "a": 5, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7500.json b/doc/sphinxext/prs/7500.json new file mode 100644 index 00000000000..4e84d4bb61a --- /dev/null +++ b/doc/sphinxext/prs/7500.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2c344f61b29468eba1b4b7a12acd9d6dc2a0bafe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 4, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 10, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 6, + "d": 0 + }, + "mne/utils/check.py": { + "a": 4, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7501.json b/doc/sphinxext/prs/7501.json new file mode 100644 index 00000000000..ac0415748de --- /dev/null +++ b/doc/sphinxext/prs/7501.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6354f8ab496421606d043646e101da0614d97261", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7505.json b/doc/sphinxext/prs/7505.json new file mode 100644 index 00000000000..00afccfaf0c --- /dev/null +++ b/doc/sphinxext/prs/7505.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6918981e2788432e4fe6b704cb0503e240e253c2", + "authors": [ + { + "n": "Dmitrii Altukhov", + "e": "dm.altukhov@ya.ru" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7506.json b/doc/sphinxext/prs/7506.json new file mode 100644 index 00000000000..c42a46984bf --- /dev/null +++ b/doc/sphinxext/prs/7506.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "68fbbeed03f7304b79f9a8077a724efeefeb2bcd", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 3, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7507.json b/doc/sphinxext/prs/7507.json new file mode 100644 index 00000000000..0af86b3426a --- /dev/null +++ b/doc/sphinxext/prs/7507.json @@ -0,0 +1,163 @@ +{ + "merge_commit_sha": "c0b139bc2f9f8c14c5263415a3c5ab56b89f9124", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 2, + "d": 1 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_eeg_csd.py": { + "a": 3, + "d": 4 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_linear_regression_raw.py": { + "a": 2, + "d": 2 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 2, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 15, + "d": 2 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 59, + "d": 20 + }, + "mne/io/brainvision/brainvision.py": { + "a": 20, + "d": 16 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 15, + "d": 18 + }, + "mne/io/curry/curry.py": { + "a": 10, + "d": 14 + }, + "mne/io/edf/edf.py": { + "a": 28, + "d": 20 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/egi.py": { + "a": 6, + "d": 5 + }, + "mne/io/egi/egimff.py": { + "a": 112, + "d": 101 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 6, + "d": 2 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 4, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 10, + "d": 11 + }, + "mne/io/kit/kit.py": { + "a": 7, + "d": 8 + }, + "mne/io/nicolet/nicolet.py": { + "a": 2, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 1 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 32, + "d": 0 + }, + "mne/io/utils.py": { + "a": 3, + "d": 3 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 5, + "d": 5 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 2, + "d": 2 + }, + "tutorials/raw/plot_10_raw_overview.py": { + "a": 2, + "d": 2 + }, + "tutorials/sample-datasets/plot_brainstorm_auditory.py": { + "a": 4, + "d": 5 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_eeg_mri_coords.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7508.json b/doc/sphinxext/prs/7508.json new file mode 100644 index 00000000000..8e6a95d09bb --- /dev/null +++ b/doc/sphinxext/prs/7508.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fdbb78ab5029e7abb7fd39186d609e468f59ce3d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/filter.py": { + "a": 8, + "d": 2 + }, + "mne/io/base.py": { + "a": 48, + "d": 30 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 19, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7509.json b/doc/sphinxext/prs/7509.json new file mode 100644 index 00000000000..657b5cccd3b --- /dev/null +++ b/doc/sphinxext/prs/7509.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b3af2f66cb4ca46a1b1e1b24e60545dbe3693227", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 14, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7510.json b/doc/sphinxext/prs/7510.json new file mode 100644 index 00000000000..ad70a954561 --- /dev/null +++ b/doc/sphinxext/prs/7510.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e2155cf1906edff3b5b8adeb59558867f07b40f6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 57, + "d": 22 + }, + "mne/tests/test_morph.py": { + "a": 29, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7511.json b/doc/sphinxext/prs/7511.json new file mode 100644 index 00000000000..6e204713d45 --- /dev/null +++ b/doc/sphinxext/prs/7511.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "009fca888ad97d7c621681da316803eae7c9fcd0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 43, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7516.json b/doc/sphinxext/prs/7516.json new file mode 100644 index 00000000000..58929a85b1b --- /dev/null +++ b/doc/sphinxext/prs/7516.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "69f8defd065801969a8cb53d702630024b9f35aa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 37, + "d": 0 + }, + "mne/utils/tests/test_numerics.py": { + "a": 34, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7517.json b/doc/sphinxext/prs/7517.json new file mode 100644 index 00000000000..c08d5fb1109 --- /dev/null +++ b/doc/sphinxext/prs/7517.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "691c59b234d361e2d537e1407ef6d389e3cdbb0a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7519.json b/doc/sphinxext/prs/7519.json new file mode 100644 index 00000000000..502cb7ede66 --- /dev/null +++ b/doc/sphinxext/prs/7519.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b00f49122e053372b312053aa752a5e80a1aab04", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 10, + "d": 16 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 5 + }, + "mne/viz/backends/renderer.py": { + "a": 11, + "d": 17 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 8, + "d": 8 + }, + "mne/viz/tests/test_3d.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/752.json b/doc/sphinxext/prs/752.json new file mode 100644 index 00000000000..c6ae56a4610 --- /dev/null +++ b/doc/sphinxext/prs/752.json @@ -0,0 +1,259 @@ +{ + "merge_commit_sha": "a9c1604430c358c32b8c5b8580d6267be57993f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/baseline.py": { + "a": 1, + "d": 3 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 4 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 4 + }, + "mne/connectivity/effective.py": { + "a": 1, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 5 + }, + "mne/cov.py": { + "a": 2, + "d": 4 + }, + "mne/cuda.py": { + "a": 1, + "d": 4 + }, + "mne/datasets/megsim/megsim.py": { + "a": 0, + "d": 3 + }, + "mne/datasets/sample/sample.py": { + "a": 10, + "d": 5 + }, + "mne/decoding/classifier.py": { + "a": 0, + "d": 3 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 3 + }, + "mne/epochs.py": { + "a": 1, + "d": 4 + }, + "mne/event.py": { + "a": 1, + "d": 5 + }, + "mne/fiff/bti/raw.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/bti/read.py": { + "a": 0, + "d": 3 + }, + "mne/fiff/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/compensator.py": { + "a": 0, + "d": 4 + }, + "mne/fiff/cov.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/ctf.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/diff.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/evoked.py": { + "a": 1, + "d": 6 + }, + "mne/fiff/kit/kit.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/matrix.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/meas_info.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/open.py": { + "a": 1, + "d": 3 + }, + "mne/fiff/pick.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/proj.py": { + "a": 1, + "d": 5 + }, + "mne/fiff/raw.py": { + "a": 1, + "d": 5 + }, + "mne/fiff/tree.py": { + "a": 1, + "d": 4 + }, + "mne/fiff/write.py": { + "a": 1, + "d": 2 + }, + "mne/filter.py": { + "a": 1, + "d": 4 + }, + "mne/forward.py": { + "a": 1, + "d": 5 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 4 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 1, + "d": 5 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 4 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 4 + }, + "mne/label.py": { + "a": 1, + "d": 5 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 4 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 4 + }, + "mne/parallel.py": { + "a": 2, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 6, + "d": 8 + }, + "mne/preprocessing/eog.py": { + "a": 10, + "d": 12 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/maxfilter.py": { + "a": 9, + "d": 11 + }, + "mne/preprocessing/peak_finder.py": { + "a": 2, + "d": 6 + }, + "mne/preprocessing/ssp.py": { + "a": 2, + "d": 4 + }, + "mne/proj.py": { + "a": 2, + "d": 4 + }, + "mne/realtime/client.py": { + "a": 1, + "d": 4 + }, + "mne/realtime/epochs.py": { + "a": 6, + "d": 8 + }, + "mne/selection.py": { + "a": 1, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 5 + }, + "mne/source_space.py": { + "a": 1, + "d": 5 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 5 + }, + "mne/surface.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 6 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 17 + }, + "mne/time_frequency/stft.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 8 + }, + "mne/transforms/transforms.py": { + "a": 0, + "d": 3 + }, + "mne/utils.py": { + "a": 2, + "d": 2 + }, + "mne/viz.py": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7520.json b/doc/sphinxext/prs/7520.json new file mode 100644 index 00000000000..70901181334 --- /dev/null +++ b/doc/sphinxext/prs/7520.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1e882d05cab139530b2859bf4940c681f5b4b5e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/forward/plot_source_space_morphing.py": { + "a": 4, + "d": 4 + }, + "mne/utils/check.py": { + "a": 8, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 47, + "d": 53 + }, + "mne/viz/_brain/_brain.py": { + "a": 15, + "d": 25 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 73, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7522.json b/doc/sphinxext/prs/7522.json new file mode 100644 index 00000000000..ff86fd522f6 --- /dev/null +++ b/doc/sphinxext/prs/7522.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "84b71308b7a2e0afa2874116195ab7e9c957c61c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 3, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/glossary.rst": { + "a": 12, + "d": 1 + }, + "doc/sphinxext/mne_substitutions.py": { + "a": 48, + "d": 0 + }, + "mne/defaults.py": { + "a": 3, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 6, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 12, + "d": 10 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7523.json b/doc/sphinxext/prs/7523.json new file mode 100644 index 00000000000..d5134d97c18 --- /dev/null +++ b/doc/sphinxext/prs/7523.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "225c90a591c6755b5660fc6d74f5f3b7c4cc1544", + "authors": [ + { + "n": "Ariel Rokem", + "e": "arokem@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7524.json b/doc/sphinxext/prs/7524.json new file mode 100644 index 00000000000..23c4ea3d084 --- /dev/null +++ b/doc/sphinxext/prs/7524.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4f66d81b5f42dcba84cbf4314fa8b947ebe988f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7525.json b/doc/sphinxext/prs/7525.json new file mode 100644 index 00000000000..c28d553dd63 --- /dev/null +++ b/doc/sphinxext/prs/7525.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c38ccb70b8c40b28e484984f6151492da447da14", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/carousel.inc": { + "a": 2, + "d": 2 + }, + "doc/index.rst": { + "a": 0, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7527.json b/doc/sphinxext/prs/7527.json new file mode 100644 index 00000000000..2e2b55e4319 --- /dev/null +++ b/doc/sphinxext/prs/7527.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9ca6b11553a8dc9eaee7e5cdbcdbe7f02d26aaa6", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_psf_ctf_vertices_lcmv.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_publication_figure.py": { + "a": 6, + "d": 4 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 5 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7529.json b/doc/sphinxext/prs/7529.json new file mode 100644 index 00000000000..921769c8074 --- /dev/null +++ b/doc/sphinxext/prs/7529.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "43315379ec273c4c20df764ffc7a5cc004a5943f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 10, + "d": 1 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 33, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7530.json b/doc/sphinxext/prs/7530.json new file mode 100644 index 00000000000..0c7db8d7dea --- /dev/null +++ b/doc/sphinxext/prs/7530.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "750c6ac6b4f5e7e0d8ff031dc76930fd7b126e39", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7531.json b/doc/sphinxext/prs/7531.json new file mode 100644 index 00000000000..fc53a734d14 --- /dev/null +++ b/doc/sphinxext/prs/7531.json @@ -0,0 +1,163 @@ +{ + "merge_commit_sha": "e42c86a1210169f5f51e98b997bea6616d4856d9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.12.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.20.inc": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 60 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 27 + }, + "mne/chpi.py": { + "a": 1, + "d": 6 + }, + "mne/decoding/csp.py": { + "a": 8, + "d": 21 + }, + "mne/dipole.py": { + "a": 1, + "d": 9 + }, + "mne/epochs.py": { + "a": 4, + "d": 8 + }, + "mne/evoked.py": { + "a": 6, + "d": 10 + }, + "mne/io/base.py": { + "a": 2, + "d": 5 + }, + "mne/io/edf/edf.py": { + "a": 3, + "d": 12 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 2 + }, + "mne/io/fieldtrip/utils.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 29 + }, + "mne/io/pick.py": { + "a": 1, + "d": 7 + }, + "mne/io/proj.py": { + "a": 4, + "d": 9 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 6 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 7 + }, + "mne/minimum_norm/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/minimum_norm/psf_ctf.py": { + "a": 0, + "d": 407 + }, + "mne/minimum_norm/tests/test_psf_ctf.py": { + "a": 0, + "d": 81 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/bads.py": { + "a": 0, + "d": 26 + }, + "mne/preprocessing/ica.py": { + "a": 10, + "d": 178 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 14 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 5 + }, + "mne/source_space.py": { + "a": 7, + "d": 20 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 0, + "d": 3 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 24, + "d": 32 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/dataframe.py": { + "a": 0, + "d": 11 + }, + "mne/utils/docs.py": { + "a": 0, + "d": 26 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 7 + }, + "mne/viz/topomap.py": { + "a": 40, + "d": 92 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7532.json b/doc/sphinxext/prs/7532.json new file mode 100644 index 00000000000..d4a247c91bc --- /dev/null +++ b/doc/sphinxext/prs/7532.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "15b9c95db383a1462719d9fbf11f735fa774a0c6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "doc/_includes/forward.rst": { + "a": 2, + "d": 2 + }, + "doc/_static/CoordinateSystems.png": { + "a": 0, + "d": 0 + }, + "doc/_static/HeadCS.png": { + "a": 0, + "d": 0 + }, + "doc/_static/amazon.svg": { + "a": 17, + "d": 0 + }, + "doc/_static/anr.jpeg": { + "a": 0, + "d": 0 + }, + "doc/_static/cds.png": { + "a": 0, + "d": 0 + }, + "doc/_static/doe.png": { + "a": 0, + "d": 0 + }, + "doc/_static/erc.png": { + "a": 0, + "d": 0 + }, + "doc/_static/google.svg": { + "a": 1, + "d": 0 + }, + "doc/_static/mne_logo.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_logo.svg": { + "a": 744, + "d": 0 + }, + "doc/_static/mne_logo_small.png": { + "a": 0, + "d": 0 + }, + "doc/_static/mne_logo_small.svg": { + "a": 70, + "d": 0 + }, + "doc/_static/nih.png": { + "a": 0, + "d": 0 + }, + "doc/_static/nsf.png": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 314, + "d": 251 + }, + "doc/_templates/layout.html": { + "a": 25, + "d": 36 + }, + "doc/_templates/navbar.html": { + "a": 29, + "d": 21 + }, + "doc/carousel.inc": { + "a": 26, + "d": 32 + }, + "doc/conf.py": { + "a": 2, + "d": 6 + }, + "doc/index.rst": { + "a": 56, + "d": 76 + }, + "doc/overview/index.rst": { + "a": 1, + "d": 0 + }, + "doc/whats_new.rst": { + "a": 0, + "d": 2 + }, + "logo/generate_mne_logos.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7533.json b/doc/sphinxext/prs/7533.json new file mode 100644 index 00000000000..4faca4f5801 --- /dev/null +++ b/doc/sphinxext/prs/7533.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a932cbb76006329e62fb4901628db68b471774e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/cov.py": { + "a": 10, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7535.json b/doc/sphinxext/prs/7535.json new file mode 100644 index 00000000000..e48a07e8190 --- /dev/null +++ b/doc/sphinxext/prs/7535.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fdd0752de4c4ebbd34c520d5b6ace2806c27fbf0", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7536.json b/doc/sphinxext/prs/7536.json new file mode 100644 index 00000000000..be5177fd092 --- /dev/null +++ b/doc/sphinxext/prs/7536.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3d1646f9e2ba16e92a2c3ce6105da2c336a27fed", + "authors": [ + { + "n": "Maureen Shader", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 17, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7538.json b/doc/sphinxext/prs/7538.json new file mode 100644 index 00000000000..531c3dcb7e5 --- /dev/null +++ b/doc/sphinxext/prs/7538.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1639ab47dd41a73e9e95962e766c80c102cc70f1", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7539.json b/doc/sphinxext/prs/7539.json new file mode 100644 index 00000000000..a5e05f75f1d --- /dev/null +++ b/doc/sphinxext/prs/7539.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e69e581d58bdeada61c8b863885387aa28ede1da", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/brainvision/tests/data/test_units.vhdr": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 18, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7541.json b/doc/sphinxext/prs/7541.json new file mode 100644 index 00000000000..5aed1ba17e9 --- /dev/null +++ b/doc/sphinxext/prs/7541.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "83dc0e1e4f772805428cce0c0c1ca45936262394", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 5, + "d": 7 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 49, + "d": 42 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7542.json b/doc/sphinxext/prs/7542.json new file mode 100644 index 00000000000..f48a6a8aff1 --- /dev/null +++ b/doc/sphinxext/prs/7542.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "09d6c58a46d48749def8b8a01422c7cf35b88a25", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/0.20.inc": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7545.json b/doc/sphinxext/prs/7545.json new file mode 100644 index 00000000000..814801abec8 --- /dev/null +++ b/doc/sphinxext/prs/7545.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e2f32970fe2ee8244c1608d962f4a851bed937d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 56, + "d": 55 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7546.json b/doc/sphinxext/prs/7546.json new file mode 100644 index 00000000000..21f2933c4c0 --- /dev/null +++ b/doc/sphinxext/prs/7546.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "caae4b2b231abf4e58cd009f813c183d7d7b0234", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 44, + "d": 47 + }, + "mne/utils/progressbar.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7547.json b/doc/sphinxext/prs/7547.json new file mode 100644 index 00000000000..0689e74ec4b --- /dev/null +++ b/doc/sphinxext/prs/7547.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3b4992ae4abca36285290741d7dcedd4823821b6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 28, + "d": 3 + }, + "doc/overview/get_help.rst": { + "a": 14, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7550.json b/doc/sphinxext/prs/7550.json new file mode 100644 index 00000000000..33246a400bf --- /dev/null +++ b/doc/sphinxext/prs/7550.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ddc007f44c5eb840fcfd16dd3987b6535bed9f44", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 3 + }, + "mne/forward/tests/test_forward.py": { + "a": 19, + "d": 18 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 11, + "d": 1 + }, + "mne/source_space.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7551.json b/doc/sphinxext/prs/7551.json new file mode 100644 index 00000000000..f33820ed7e7 --- /dev/null +++ b/doc/sphinxext/prs/7551.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "2a74f551890bcbbc058ad3be5536f827ca9a1196", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 0, + "d": 6 + }, + "mne/dipole.py": { + "a": 14, + "d": 14 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/externals/h5io/_h5io.py": { + "a": 4, + "d": 4 + }, + "mne/fixes.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 2, + "d": 2 + }, + "mne/io/write.py": { + "a": 83, + "d": 83 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 9 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7553.json b/doc/sphinxext/prs/7553.json new file mode 100644 index 00000000000..66b4838fd29 --- /dev/null +++ b/doc/sphinxext/prs/7553.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ab29bdbb48f5283ed4b423700071d5677f1955ec", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/egi/general.py": { + "a": 8, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7554.json b/doc/sphinxext/prs/7554.json new file mode 100644 index 00000000000..26895686724 --- /dev/null +++ b/doc/sphinxext/prs/7554.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "901ffd8ce21943216375b89af1e573890e3de5ed", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7556.json b/doc/sphinxext/prs/7556.json new file mode 100644 index 00000000000..d60f1b837de --- /dev/null +++ b/doc/sphinxext/prs/7556.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "50f9f7b4539069128fc365624914b72e77767d86", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 11, + "d": 0 + }, + "examples/preprocessing/plot_fnirs_artifact_removal.py": { + "a": 92, + "d": 0 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/nirs/_temporal_derivative_distribution_repair.py": { + "a": 144, + "d": 0 + }, + "mne/preprocessing/tests/test_temporal_derivative_distribution_repair.py": { + "a": 35, + "d": 0 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 15, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7557.json b/doc/sphinxext/prs/7557.json new file mode 100644 index 00000000000..0cab45940de --- /dev/null +++ b/doc/sphinxext/prs/7557.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "01929ae628236bdb886c5f0a81a81b90c04beb67", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7558.json b/doc/sphinxext/prs/7558.json new file mode 100644 index 00000000000..989a548b83a --- /dev/null +++ b/doc/sphinxext/prs/7558.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "59925bb5adc8dc1b616cd2aaed50276ffd2fc530", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 6, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 18, + "d": 9 + }, + "mne/viz/tests/test_3d.py": { + "a": 14, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7559.json b/doc/sphinxext/prs/7559.json new file mode 100644 index 00000000000..2ec4be4cb02 --- /dev/null +++ b/doc/sphinxext/prs/7559.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ef1c86f4a9e022009e936468ec25d504e2b28d8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7560.json b/doc/sphinxext/prs/7560.json new file mode 100644 index 00000000000..a96c37dd3b8 --- /dev/null +++ b/doc/sphinxext/prs/7560.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0c6f22001b617124a8188e94d845c627128b9a77", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 30, + "d": 13 + }, + "mne/io/tag.py": { + "a": 7, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 27, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7562.json b/doc/sphinxext/prs/7562.json new file mode 100644 index 00000000000..8b77e5cd8d5 --- /dev/null +++ b/doc/sphinxext/prs/7562.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "036c2fd71f929a7c34c503691a927da81cf8fde7", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7563.json b/doc/sphinxext/prs/7563.json new file mode 100644 index 00000000000..76f11bd6652 --- /dev/null +++ b/doc/sphinxext/prs/7563.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4aba0b78ac9ec229da09cd5d12a3a38d04727d72", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7565.json b/doc/sphinxext/prs/7565.json new file mode 100644 index 00000000000..efc2dcced61 --- /dev/null +++ b/doc/sphinxext/prs/7565.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9193ce2dd8b892132613fb65b8a4baf959d16230", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 2, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7566.json b/doc/sphinxext/prs/7566.json new file mode 100644 index 00000000000..f3fb3b269bd --- /dev/null +++ b/doc/sphinxext/prs/7566.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "50d65402a8234c6b97b66dc1b75ff6249c348129", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/parallel.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7567.json b/doc/sphinxext/prs/7567.json new file mode 100644 index 00000000000..e47ef032369 --- /dev/null +++ b/doc/sphinxext/prs/7567.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fd349f5c079c905ac3ab65a72719f2512ecd6d43", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7569.json b/doc/sphinxext/prs/7569.json new file mode 100644 index 00000000000..de1617c34c4 --- /dev/null +++ b/doc/sphinxext/prs/7569.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cf1f8c663a2341319c4e36fcee049b0b64419900", + "authors": [ + { + "n": "Adonay Nunes", + "e": "adonay.s.nunes@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_movement_detection.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/757.json b/doc/sphinxext/prs/757.json new file mode 100644 index 00000000000..8aba8a0c292 --- /dev/null +++ b/doc/sphinxext/prs/757.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "30b95046462b50b78993abfd1bae7a7a0a91d868", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "bin/mne_make_scalp_surfaces.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7572.json b/doc/sphinxext/prs/7572.json new file mode 100644 index 00000000000..ee7d1d6466f --- /dev/null +++ b/doc/sphinxext/prs/7572.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ab50b2320e6aa40f2c087c6d27b309935ad79e9d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7573.json b/doc/sphinxext/prs/7573.json new file mode 100644 index 00000000000..d9ed8a3863c --- /dev/null +++ b/doc/sphinxext/prs/7573.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1867bfeb6119778f5697294244e0ad24eeb86b46", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_publication_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7574.json b/doc/sphinxext/prs/7574.json new file mode 100644 index 00000000000..1cd59da845f --- /dev/null +++ b/doc/sphinxext/prs/7574.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a720b8f4d5e615a45f0d1691e2bf204798cb67fb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 129, + "d": 5 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 13, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7578.json b/doc/sphinxext/prs/7578.json new file mode 100644 index 00000000000..0bdea6ced5d --- /dev/null +++ b/doc/sphinxext/prs/7578.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "d839a6ce32da840d7ed4f4cec5340e479f8bde55", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 4, + "d": 0 + }, + "doc/overview/cookbook.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/io/bti/bti.py": { + "a": 4, + "d": 2 + }, + "mne/io/constants.py": { + "a": 93, + "d": 0 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 5 + }, + "mne/io/tag.py": { + "a": 15, + "d": 5 + }, + "mne/io/tests/test_compensator.py": { + "a": 11, + "d": 14 + }, + "mne/io/tests/test_constants.py": { + "a": 24, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 12, + "d": 0 + }, + "mne/io/utils.py": { + "a": 1, + "d": 2 + }, + "mne/report.py": { + "a": 3, + "d": 2 + }, + "mne/source_space.py": { + "a": 40, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 16, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 3 + }, + "mne/viz/misc.py": { + "a": 3, + "d": 16 + }, + "tutorials/source-modeling/plot_background_freesurfer.py": { + "a": 49, + "d": 34 + }, + "tutorials/source-modeling/plot_background_freesurfer_mne.py": { + "a": 431, + "d": 0 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 11, + "d": 0 + }, + "tutorials/source-modeling/plot_object_source_estimate.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7579.json b/doc/sphinxext/prs/7579.json new file mode 100644 index 00000000000..4847e61fd8d --- /dev/null +++ b/doc/sphinxext/prs/7579.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ef9c259733fd5928dc7cf78fdb461d7a30d5fe02", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "tutorials/source-modeling/plot_eeg_mri_coords.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/758.json b/doc/sphinxext/prs/758.json new file mode 100644 index 00000000000..e5d3ed9761d --- /dev/null +++ b/doc/sphinxext/prs/758.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cd5ddf7e15f8d6c16957ff79afaad49e8aed7ba0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7580.json b/doc/sphinxext/prs/7580.json new file mode 100644 index 00000000000..c4d3b8be865 --- /dev/null +++ b/doc/sphinxext/prs/7580.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f3eb193897a5bbb8ebb501b649a89f2183cc6555", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 11, + "d": 6 + }, + "mne/viz/_brain/_brain.py": { + "a": 10, + "d": 3 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7581.json b/doc/sphinxext/prs/7581.json new file mode 100644 index 00000000000..609f28bc026 --- /dev/null +++ b/doc/sphinxext/prs/7581.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8801dcb53262022a221738b96dda248ea786fe61", + "authors": [ + { + "n": "Svea Marie Meyer", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7583.json b/doc/sphinxext/prs/7583.json new file mode 100644 index 00000000000..598e4c3a3d3 --- /dev/null +++ b/doc/sphinxext/prs/7583.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "09ac18f621efc177fc0a07e1f1f3eec4831a2ae8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 6, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7584.json b/doc/sphinxext/prs/7584.json new file mode 100644 index 00000000000..9060b46308c --- /dev/null +++ b/doc/sphinxext/prs/7584.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bc876405aeabb2e6ebcc47b468eecc14eb0f7ef9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 0, + "d": 3 + }, + "doc/install/mne_python.rst": { + "a": 1, + "d": 2 + }, + "environment.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7587.json b/doc/sphinxext/prs/7587.json new file mode 100644 index 00000000000..c5da83c01b2 --- /dev/null +++ b/doc/sphinxext/prs/7587.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3badeef66a6b7fcc91bf4280cb933fa2e2f6a727", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7588.json b/doc/sphinxext/prs/7588.json new file mode 100644 index 00000000000..f9f23ef1c27 --- /dev/null +++ b/doc/sphinxext/prs/7588.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c9295163319e2a55ec5ebbab77cf33727b2e00f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7589.json b/doc/sphinxext/prs/7589.json new file mode 100644 index 00000000000..86c121d72bf --- /dev/null +++ b/doc/sphinxext/prs/7589.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "550dd1b5e24aa4820cefbb1b101d03bf713bc182", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 3, + "d": 0 + }, + "mne.qrc": { + "a": 13, + "d": 0 + }, + "mne/images/clear-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/help-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/pause-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/play-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/restore-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/scale-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/screenshot-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/visibility_off-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/images/visibility_on-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 74, + "d": 0 + }, + "mne/viz/_brain/resources.py": { + "a": 394, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/759.json b/doc/sphinxext/prs/759.json new file mode 100644 index 00000000000..cccaef08901 --- /dev/null +++ b/doc/sphinxext/prs/759.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f2c1d8c6566ee5ddd95470278d7178a699273b3e", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/kit/kit.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7590.json b/doc/sphinxext/prs/7590.json new file mode 100644 index 00000000000..c176abcaa5f --- /dev/null +++ b/doc/sphinxext/prs/7590.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "688db27e0445b2edc7076b3a3c9443a2f2e12910", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 25, + "d": 19 + }, + "mne/io/_digitization.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7593.json b/doc/sphinxext/prs/7593.json new file mode 100644 index 00000000000..c388b032274 --- /dev/null +++ b/doc/sphinxext/prs/7593.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7b3d86907cc1bed2ba4b61b65d3fe06b6f6fd8ab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Roeland Hancock", + "e": "rhancock@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 137, + "d": 75 + }, + "mne/io/egi/events.py": { + "a": 5, + "d": 7 + }, + "mne/io/egi/general.py": { + "a": 8, + "d": 5 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 77, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7594.json b/doc/sphinxext/prs/7594.json new file mode 100644 index 00000000000..a399a85875e --- /dev/null +++ b/doc/sphinxext/prs/7594.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "83f84ba2c914ce1cf2bde8dfe97f93742f194e6d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_brainstorm_data.py": { + "a": 2, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 41, + "d": 34 + }, + "examples/time_frequency/plot_source_power_spectrum_opm.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7596.json b/doc/sphinxext/prs/7596.json new file mode 100644 index 00000000000..28bb994a16b --- /dev/null +++ b/doc/sphinxext/prs/7596.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8aa26ac5612e014fb839abe544a5748b85073231", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "tutorials/intro/plot_20_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/plot_20_event_arrays.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7597.json b/doc/sphinxext/prs/7597.json new file mode 100644 index 00000000000..f9755edeb74 --- /dev/null +++ b/doc/sphinxext/prs/7597.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "dd2a1f40d0d8e3499b941c5a3403ddd550fd4c2d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 11, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 32, + "d": 11 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/conftest.py": { + "a": 104, + "d": 3 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 7, + "d": 6 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 21, + "d": 31 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 35, + "d": 1 + }, + "mne/morph.py": { + "a": 12, + "d": 8 + }, + "mne/proj.py": { + "a": 4, + "d": 10 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 6, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 248, + "d": 202 + }, + "mne/source_space.py": { + "a": 68, + "d": 26 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_morph.py": { + "a": 6, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 95, + "d": 48 + }, + "mne/tests/test_source_space.py": { + "a": 22, + "d": 11 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 11, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 15, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7598.json b/doc/sphinxext/prs/7598.json new file mode 100644 index 00000000000..80fb2977078 --- /dev/null +++ b/doc/sphinxext/prs/7598.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b50a78fb3e099e148927734009ca2152b4635585", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_ola.py": { + "a": 436, + "d": 0 + }, + "mne/filter.py": { + "a": 0, + "d": 96 + }, + "mne/preprocessing/otp.py": { + "a": 52, + "d": 49 + }, + "mne/preprocessing/utils.py": { + "a": 0, + "d": 59 + }, + "mne/simulation/raw.py": { + "a": 102, + "d": 105 + }, + "mne/tests/test_filter.py": { + "a": 8, + "d": 23 + }, + "mne/tests/test_ola.py": { + "a": 112, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/76.json b/doc/sphinxext/prs/76.json new file mode 100644 index 00000000000..05c8ce5681b --- /dev/null +++ b/doc/sphinxext/prs/76.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "559121948c95463bb3a1a916fd43ec0fd74b4630", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 233, + "d": 41 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 66, + "d": 5 + }, + "mne/cov.py": { + "a": 24, + "d": 5 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 25 + }, + "mne/source_space.py": { + "a": 41, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7600.json b/doc/sphinxext/prs/7600.json new file mode 100644 index 00000000000..b887b5a3bf1 --- /dev/null +++ b/doc/sphinxext/prs/7600.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "51c4cbaf3d3c3fb7a5d9371ba89b54e3a9dbe4e6", + "authors": [ + { + "n": "alexandra.corneyllie", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7603.json b/doc/sphinxext/prs/7603.json new file mode 100644 index 00000000000..b03dd5e88a0 --- /dev/null +++ b/doc/sphinxext/prs/7603.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "093ac9cdb8075b6bdd4ed62302be0353ffb5fd51", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 19, + "d": 12 + }, + "mne/defaults.py": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/io/proj.py": { + "a": 6, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 24, + "d": 8 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 93, + "d": 54 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7606.json b/doc/sphinxext/prs/7606.json new file mode 100644 index 00000000000..498722b4124 --- /dev/null +++ b/doc/sphinxext/prs/7606.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "90c5c8efddfcd55210317e64852c46c2def7bd64", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 2 + }, + "mne.qrc": { + "a": 0, + "d": 14 + }, + "mne/icons/README.rst": { + "a": 18, + "d": 0 + }, + "mne/icons/clear-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/help-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/mne.qrc": { + "a": 14, + "d": 0 + }, + "mne/icons/movie-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/pause-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/play-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/resources.py": { + "a": 13, + "d": 23 + }, + "mne/icons/restore-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/scale-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/screenshot-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/visibility_off-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/icons/visibility_on-black-18dp.svg": { + "a": 0, + "d": 0 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 14, + "d": 6 + }, + "setup.cfg": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7607.json b/doc/sphinxext/prs/7607.json new file mode 100644 index 00000000000..73a6e57d484 --- /dev/null +++ b/doc/sphinxext/prs/7607.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0dbfe7f7070d82077b703a1626d5638f6a44d687", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7609.json b/doc/sphinxext/prs/7609.json new file mode 100644 index 00000000000..8b9d3c7b359 --- /dev/null +++ b/doc/sphinxext/prs/7609.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "58984593fc3b8bbc51c8b9732b118f7ef8916677", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/filter.py": { + "a": 134, + "d": 72 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 51, + "d": 26 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + }, + "tutorials/preprocessing/plot_30_filtering_resampling.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7610.json b/doc/sphinxext/prs/7610.json new file mode 100644 index 00000000000..f48810b180d --- /dev/null +++ b/doc/sphinxext/prs/7610.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "18d2cee8e96b83a5a08fe433a2eeffddf24276ae", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 10, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7612.json b/doc/sphinxext/prs/7612.json new file mode 100644 index 00000000000..1fd45c2a538 --- /dev/null +++ b/doc/sphinxext/prs/7612.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bfbb8cbecaea6620f2625507bbf4cc321b3bd3ce", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne.qrc": { + "a": 1, + "d": 0 + }, + "mne/images/movie-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 95, + "d": 3 + }, + "mne/viz/_brain/resources.py": { + "a": 189, + "d": 161 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7613.json b/doc/sphinxext/prs/7613.json new file mode 100644 index 00000000000..6fa279e3645 --- /dev/null +++ b/doc/sphinxext/prs/7613.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe1d1c6901e61a0578c76388a37af7b142597669", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7614.json b/doc/sphinxext/prs/7614.json new file mode 100644 index 00000000000..d64e35bb972 --- /dev/null +++ b/doc/sphinxext/prs/7614.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "54177ca854ebff50b645239d781afbce441ecf2e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 27, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 0, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 0, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7616.json b/doc/sphinxext/prs/7616.json new file mode 100644 index 00000000000..fb760b0e945 --- /dev/null +++ b/doc/sphinxext/prs/7616.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7157222b112d01133d49065f0f6e4824c6910ce2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_setup_forward_model.py": { + "a": 4, + "d": 0 + }, + "mne/commands/mne_setup_source_space.py": { + "a": 13, + "d": 3 + }, + "mne/commands/tests/test_commands.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7618.json b/doc/sphinxext/prs/7618.json new file mode 100644 index 00000000000..a610e11793f --- /dev/null +++ b/doc/sphinxext/prs/7618.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c4119388444e5867ee1a054100c78f57c9f2da25", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 69, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 21, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 94, + "d": 101 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 30, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7619.json b/doc/sphinxext/prs/7619.json new file mode 100644 index 00000000000..53189fe2bc9 --- /dev/null +++ b/doc/sphinxext/prs/7619.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "972ca8c980246fc0245504889e5087d0bc14919b", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 28, + "d": 7 + }, + "mne/channels/tests/test_montage.py": { + "a": 38, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 4, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/762.json b/doc/sphinxext/prs/762.json new file mode 100644 index 00000000000..ca7bf75f256 --- /dev/null +++ b/doc/sphinxext/prs/762.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3e79e6279853a1dd9ac2232cd7b7079c5e25a166", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 8, + "d": 2 + }, + "mne/viz.py": { + "a": 38, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7620.json b/doc/sphinxext/prs/7620.json new file mode 100644 index 00000000000..c3a6ecc4795 --- /dev/null +++ b/doc/sphinxext/prs/7620.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1d44c29cb0123f72176f251bf688be709284f522", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 24, + "d": 5 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 27, + "d": 15 + }, + "mne/viz/tests/test_3d.py": { + "a": 19, + "d": 0 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7621.json b/doc/sphinxext/prs/7621.json new file mode 100644 index 00000000000..2ccf46340a1 --- /dev/null +++ b/doc/sphinxext/prs/7621.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "db6f16531d0aaf826ce1fea6b47fd7e340913ba5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7623.json b/doc/sphinxext/prs/7623.json new file mode 100644 index 00000000000..18de73802f5 --- /dev/null +++ b/doc/sphinxext/prs/7623.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "26e38513ca7be1a96b012f0853c832b2ec3b2e9f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 76, + "d": 102 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7624.json b/doc/sphinxext/prs/7624.json new file mode 100644 index 00000000000..71e843384e7 --- /dev/null +++ b/doc/sphinxext/prs/7624.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "df0ace5ba0cec9cd339b87f6efb0cf97ccf79242", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 3, + "d": 2 + }, + "mne/utils/tests/test_config.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7625.json b/doc/sphinxext/prs/7625.json new file mode 100644 index 00000000000..95fc8e45fa7 --- /dev/null +++ b/doc/sphinxext/prs/7625.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d058bb54eb2733c12ab2edfe8893ca72b4869931", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/surface.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7626.json b/doc/sphinxext/prs/7626.json new file mode 100644 index 00000000000..fd7f9393ad7 --- /dev/null +++ b/doc/sphinxext/prs/7626.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1e791d703768045f34b4e0f25433979c3faba11b", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7627.json b/doc/sphinxext/prs/7627.json new file mode 100644 index 00000000000..9cd8e8ccf1d --- /dev/null +++ b/doc/sphinxext/prs/7627.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f1e2927d7d894935860676927d3993bce488fa8f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 63, + "d": 71 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7629.json b/doc/sphinxext/prs/7629.json new file mode 100644 index 00000000000..42fd2bda8d9 --- /dev/null +++ b/doc/sphinxext/prs/7629.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "86218c899db7e875d24d82d9f450f9036aa073e7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/tests/test_external.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 4, + "d": 2 + }, + "mne/epochs.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 46, + "d": 27 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/763.json b/doc/sphinxext/prs/763.json new file mode 100644 index 00000000000..26d62c22f81 --- /dev/null +++ b/doc/sphinxext/prs/763.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "65324a60211320696e44b7e5ca9ca4e3efb292da", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "bin/mne_kit2fiff.py": { + "a": 11, + "d": 11 + }, + "mne/fiff/kit/constants.py": { + "a": 11, + "d": 11 + }, + "mne/fiff/kit/coreg.py": { + "a": 147, + "d": 175 + }, + "mne/fiff/kit/kit.py": { + "a": 277, + "d": 83 + }, + "mne/fiff/kit/tests/test_coreg.py": { + "a": 75, + "d": 0 + }, + "mne/fiff/kit/tests/test_kit.py": { + "a": 42, + "d": 40 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7630.json b/doc/sphinxext/prs/7630.json new file mode 100644 index 00000000000..681935391e9 --- /dev/null +++ b/doc/sphinxext/prs/7630.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "46197d4926f4654dc1df2e0cabbfb26642fd884d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 3 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 17, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7634.json b/doc/sphinxext/prs/7634.json new file mode 100644 index 00000000000..cd71729916c --- /dev/null +++ b/doc/sphinxext/prs/7634.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7f8f5b595d9f4203c66e56c9ab9f7a9cc2a07759", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7638.json b/doc/sphinxext/prs/7638.json new file mode 100644 index 00000000000..9fd877e72c6 --- /dev/null +++ b/doc/sphinxext/prs/7638.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "24eae37c4098b897bfa170524e4788c5d8d5283e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 4, + "d": 4 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/install/mne_python.rst": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 5, + "d": 4 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 2, + "d": 4 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 7, + "d": 10 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 9 + }, + "mne/channels/tests/test_montage.py": { + "a": 11, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 23 + }, + "mne/datasets/utils.py": { + "a": 1, + "d": 2 + }, + "mne/fixes.py": { + "a": 0, + "d": 16 + }, + "mne/io/_digitization.py": { + "a": 6, + "d": 10 + }, + "mne/io/what.py": { + "a": 1, + "d": 3 + }, + "mne/io/write.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7639.json b/doc/sphinxext/prs/7639.json new file mode 100644 index 00000000000..4b0de5a59ee --- /dev/null +++ b/doc/sphinxext/prs/7639.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "676ed97f4500511b1dca88ecb1f1ee25dc705d19", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 7, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 21, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 317, + "d": 173 + }, + "mne/source_space.py": { + "a": 321, + "d": 229 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 167, + "d": 7 + }, + "mne/tests/test_source_space.py": { + "a": 71, + "d": 33 + }, + "mne/transforms.py": { + "a": 7, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 78, + "d": 14 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7640.json b/doc/sphinxext/prs/7640.json new file mode 100644 index 00000000000..def5a1be366 --- /dev/null +++ b/doc/sphinxext/prs/7640.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2376ad38fe1aac01ac9bc2c327100bba4a86b6d0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 6, + "d": 6 + }, + "mne/tests/test_report.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7641.json b/doc/sphinxext/prs/7641.json new file mode 100644 index 00000000000..5f461e52952 --- /dev/null +++ b/doc/sphinxext/prs/7641.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6004b08a5f5880c4c74c2d5dbe078096e9c229db", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 8, + "d": 5 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 17, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7642.json b/doc/sphinxext/prs/7642.json new file mode 100644 index 00000000000..a37567fcb4b --- /dev/null +++ b/doc/sphinxext/prs/7642.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d6a4eb056617056d31d2c71ecf1b6d9287015139", + "authors": [ + { + "n": "Britta Westner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 10, + "d": 24 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 9, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 16, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 2, + "d": 1 + }, + "mne/utils/tests/test_linalg.py": { + "a": 72, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7643.json b/doc/sphinxext/prs/7643.json new file mode 100644 index 00000000000..9c8a060769a --- /dev/null +++ b/doc/sphinxext/prs/7643.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "87e7157a932d4625a5aeb788bd35514d2bdb9f5a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7646.json b/doc/sphinxext/prs/7646.json new file mode 100644 index 00000000000..dfd78008c7f --- /dev/null +++ b/doc/sphinxext/prs/7646.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "fd43bbe2ca013d742b3d082f0284c2d73d06a52a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/_includes/dig_formats.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/0.19.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "mne/channels/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 40, + "d": 10 + }, + "mne/channels/tests/test_montage.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7647.json b/doc/sphinxext/prs/7647.json new file mode 100644 index 00000000000..f11a9bcc8c4 --- /dev/null +++ b/doc/sphinxext/prs/7647.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1270bb8d3d842f9e7c27f7a9855280e06033958c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 42, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 87, + "d": 78 + }, + "mne/tests/test_source_estimate.py": { + "a": 45, + "d": 25 + }, + "mne/utils/docs.py": { + "a": 13, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7649.json b/doc/sphinxext/prs/7649.json new file mode 100644 index 00000000000..48bbebe3a34 --- /dev/null +++ b/doc/sphinxext/prs/7649.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3cb776d78d9286eab9d83959fcd9ee0169a7516d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 34, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/765.json b/doc/sphinxext/prs/765.json new file mode 100644 index 00000000000..bbc17c501be --- /dev/null +++ b/doc/sphinxext/prs/765.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b10acb752ca725c28db33af0bf97e4c6a4ea75f0", + "authors": [ + { + "n": "Yousra BEKHTI", + "e": "yousra.bekhti@gmail.com" + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7650.json b/doc/sphinxext/prs/7650.json new file mode 100644 index 00000000000..2289c5e9e18 --- /dev/null +++ b/doc/sphinxext/prs/7650.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b111b2ebdbbb98e1fc80c70ac6b30d96c4133953", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/_ola.py": { + "a": 33, + "d": 26 + }, + "mne/tests/test_ola.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7651.json b/doc/sphinxext/prs/7651.json new file mode 100644 index 00000000000..cfd16708dab --- /dev/null +++ b/doc/sphinxext/prs/7651.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "066db2d13e4520db3b04c6610f0dfb508ff54871", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7653.json b/doc/sphinxext/prs/7653.json new file mode 100644 index 00000000000..fcafed6b2ad --- /dev/null +++ b/doc/sphinxext/prs/7653.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "beaff02d91a80c2cf12e626d47b3f1678188472d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 11, + "d": 13 + }, + "mne/tests/test_morph.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7656.json b/doc/sphinxext/prs/7656.json new file mode 100644 index 00000000000..d6c3fabe309 --- /dev/null +++ b/doc/sphinxext/prs/7656.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "1391ad239bfbc1ad989bda0f578a397b96db4c66", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "britta-wstnr", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Alex", + "e": "aprockhill206@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 11, + "d": 0 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 19, + "d": 14 + }, + "examples/inverse/plot_tf_lcmv.py": { + "a": 0, + "d": 145 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 164, + "d": 167 + }, + "mne/beamformer/_dics.py": { + "a": 113, + "d": 105 + }, + "mne/beamformer/_lcmv.py": { + "a": 88, + "d": 53 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 130, + "d": 78 + }, + "mne/beamformer/tests/test_external.py": { + "a": 22, + "d": 15 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 178, + "d": 26 + }, + "mne/cov.py": { + "a": 14, + "d": 8 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 60, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/rank.py": { + "a": 10, + "d": 8 + }, + "mne/time_frequency/csd.py": { + "a": 26, + "d": 6 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 3 + }, + "mne/utils/check.py": { + "a": 14, + "d": 14 + }, + "mne/utils/docs.py": { + "a": 58, + "d": 0 + }, + "mne/utils/linalg.py": { + "a": 21, + "d": 20 + }, + "mne/utils/numerics.py": { + "a": 32, + "d": 37 + }, + "mne/utils/tests/test_linalg.py": { + "a": 43, + "d": 12 + }, + "mne/utils/tests/test_numerics.py": { + "a": 8, + "d": 4 + }, + "tutorials/simulation/plot_dics.py": { + "a": 6, + "d": 4 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7658.json b/doc/sphinxext/prs/7658.json new file mode 100644 index 00000000000..2ef39aac3d6 --- /dev/null +++ b/doc/sphinxext/prs/7658.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b437776e8ebd8bd8295e90b50f0fd8c257ed7f2e", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 16, + "d": 21 + }, + "mne/commands/tests/test_commands.py": { + "a": 23, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7659.json b/doc/sphinxext/prs/7659.json new file mode 100644 index 00000000000..4379ebd38d5 --- /dev/null +++ b/doc/sphinxext/prs/7659.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "76f871511db93f11c27177a712df48b259092bdd", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_parcellation.py": { + "a": 1, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 197, + "d": 18 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 19, + "d": 3 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 2 + }, + "mne/viz/backends/base_renderer.py": { + "a": 16, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/766.json b/doc/sphinxext/prs/766.json new file mode 100644 index 00000000000..c5c481090be --- /dev/null +++ b/doc/sphinxext/prs/766.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "ae5165b2eb96d8adef298148f015cca1f7e5979a", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 9, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 6, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/coreg.py": { + "a": 5, + "d": 0 + }, + "mne/fiff/kit/tests/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/fiff/kit/tests/data/test_mrk_post.sqd": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/data/test_mrk_pre.sqd": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 89, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 1372, + "d": 0 + }, + "mne/gui/_fiducials_gui.py": { + "a": 462, + "d": 0 + }, + "mne/gui/_file_traits.py": { + "a": 427, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 518, + "d": 0 + }, + "mne/gui/_marker_gui.py": { + "a": 447, + "d": 0 + }, + "mne/gui/_viewer.py": { + "a": 340, + "d": 0 + }, + "mne/gui/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 157, + "d": 0 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 68, + "d": 0 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 110, + "d": 0 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 65, + "d": 0 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 47, + "d": 0 + }, + "mne/utils.py": { + "a": 7, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7660.json b/doc/sphinxext/prs/7660.json new file mode 100644 index 00000000000..3ac730938f1 --- /dev/null +++ b/doc/sphinxext/prs/7660.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "58b42b381561f0256fda54385a903701ab4f0247", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 4, + "d": 6 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7663.json b/doc/sphinxext/prs/7663.json new file mode 100644 index 00000000000..54f491ec5b9 --- /dev/null +++ b/doc/sphinxext/prs/7663.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a0691c8c8c0431b14fee57fecd35986e6f34e07f", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 5, + "d": 9 + }, + "mne/commands/tests/test_commands.py": { + "a": 18, + "d": 13 + }, + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/datasets/sample/sample.py": { + "a": 1, + "d": 27 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 5, + "d": 44 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7664.json b/doc/sphinxext/prs/7664.json new file mode 100644 index 00000000000..5fa8a67bc8f --- /dev/null +++ b/doc/sphinxext/prs/7664.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0ccd8028f8a2bbcf74595cce3649d1756b77dfdd", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 77, + "d": 27 + }, + "mne/viz/tests/test_misc.py": { + "a": 32, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7665.json b/doc/sphinxext/prs/7665.json new file mode 100644 index 00000000000..edee70a3d3a --- /dev/null +++ b/doc/sphinxext/prs/7665.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b8960d8d46fd4db6b1c4f07fdc94353942235e8d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7667.json b/doc/sphinxext/prs/7667.json new file mode 100644 index 00000000000..6ddbc7d0822 --- /dev/null +++ b/doc/sphinxext/prs/7667.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "01750aad31508519ecfe3e7945e54a3fbe1c9bbe", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 41, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 47, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 85, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 61, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7668.json b/doc/sphinxext/prs/7668.json new file mode 100644 index 00000000000..65523a765c2 --- /dev/null +++ b/doc/sphinxext/prs/7668.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "16ee599cba2758a6c645f79ce018f226ebcd303a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 12, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/767.json b/doc/sphinxext/prs/767.json new file mode 100644 index 00000000000..50ff3f46572 --- /dev/null +++ b/doc/sphinxext/prs/767.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a94e1347ee5c083098e83f3591bc83ffa8a6b7f1", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7670.json b/doc/sphinxext/prs/7670.json new file mode 100644 index 00000000000..633e1d752e9 --- /dev/null +++ b/doc/sphinxext/prs/7670.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4415abb6388f8abd2249a0f09056de2f537736ac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7672.json b/doc/sphinxext/prs/7672.json new file mode 100644 index 00000000000..7e79091c09f --- /dev/null +++ b/doc/sphinxext/prs/7672.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d0b8bcf5201d7b5c278e7410e4035dc5aead30b8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 10, + "d": 4 + }, + ".travis.yml": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7674.json b/doc/sphinxext/prs/7674.json new file mode 100644 index 00000000000..646891be724 --- /dev/null +++ b/doc/sphinxext/prs/7674.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b10e14592689f70e5f6cab48a1e0d439b92733c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 6, + "d": 3 + }, + ".travis.yml": { + "a": 4, + "d": 4 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/install/mne_python.rst": { + "a": 21, + "d": 19 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 29, + "d": 26 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7675.json b/doc/sphinxext/prs/7675.json new file mode 100644 index 00000000000..c8fd14a9c19 --- /dev/null +++ b/doc/sphinxext/prs/7675.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5ce8a086a67be5ac433f56023efab366310c9932", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7677.json b/doc/sphinxext/prs/7677.json new file mode 100644 index 00000000000..06945d13088 --- /dev/null +++ b/doc/sphinxext/prs/7677.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "df14ae94ff76572cf1896392910eb76f34a9f9da", + "authors": [ + { + "n": "Mainak Jas", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/preprocessing/plot_20_rejecting_bad_data.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7682.json b/doc/sphinxext/prs/7682.json new file mode 100644 index 00000000000..b48f8493a52 --- /dev/null +++ b/doc/sphinxext/prs/7682.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0adc1699cc235c0d3b9a5588cbd2dec818a59fd1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/overview/roadmap.rst": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 12 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7685.json b/doc/sphinxext/prs/7685.json new file mode 100644 index 00000000000..d285e215653 --- /dev/null +++ b/doc/sphinxext/prs/7685.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c166d0fe07c12fe859d6fe3cdd09449f70e450ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 9 + }, + "mne/channels/montage.py": { + "a": 18, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 44, + "d": 7 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7686.json b/doc/sphinxext/prs/7686.json new file mode 100644 index 00000000000..3d8b8b2b373 --- /dev/null +++ b/doc/sphinxext/prs/7686.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "68acfab7b6ffa3b5fb90f0d7e23915c482d7e9cb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/stats/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 7 + }, + "mne/stats/parametric.py": { + "a": 49, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 40, + "d": 1 + }, + "mne/stats/tests/test_parametric.py": { + "a": 46, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7687.json b/doc/sphinxext/prs/7687.json new file mode 100644 index 00000000000..10151c17712 --- /dev/null +++ b/doc/sphinxext/prs/7687.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cef447520ed78dc3849ed78f906a8106eb0885fb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7688.json b/doc/sphinxext/prs/7688.json new file mode 100644 index 00000000000..28954a4eab7 --- /dev/null +++ b/doc/sphinxext/prs/7688.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5f4c9046fb07cf75a316d56d809e4b7266753ceb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 16, + "d": 16 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 4, + "d": 4 + }, + "mne/viz/backends/renderer.py": { + "a": 14, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7690.json b/doc/sphinxext/prs/7690.json new file mode 100644 index 00000000000..1e3807ef832 --- /dev/null +++ b/doc/sphinxext/prs/7690.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9e0e02fa2f5995d4a41cb539fe1813a2e4b9d649", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 16, + "d": 0 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 4, + "d": 3 + }, + "mne/utils/check.py": { + "a": 1, + "d": 0 + }, + "mne/utils/tests/test_logging.py": { + "a": 72, + "d": 70 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7695.json b/doc/sphinxext/prs/7695.json new file mode 100644 index 00000000000..1472a411f5c --- /dev/null +++ b/doc/sphinxext/prs/7695.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9eeb1e2e3758e894cac05f7139d19c6e1d61072a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 8, + "d": 28 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7698.json b/doc/sphinxext/prs/7698.json new file mode 100644 index 00000000000..de66ff8b24e --- /dev/null +++ b/doc/sphinxext/prs/7698.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7096a3dbf38e6722f1e6383baea24df55e615aad", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/icons/mne.qrc": { + "a": 10, + "d": 10 + }, + "mne/icons/movie-black-18dp.svg": { + "a": 72, + "d": 1 + }, + "mne/icons/resources.py": { + "a": 332, + "d": 182 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7699.json b/doc/sphinxext/prs/7699.json new file mode 100644 index 00000000000..9bf1a81f277 --- /dev/null +++ b/doc/sphinxext/prs/7699.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e9c8a90b770cf345d3f5dca8d9623922e0cd0298", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/77.json b/doc/sphinxext/prs/77.json new file mode 100644 index 00000000000..cd9d840fc05 --- /dev/null +++ b/doc/sphinxext/prs/77.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c24cddb93f15771a8a5b50d2c9d128d4564d20d", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/manual/convert.rst": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7700.json b/doc/sphinxext/prs/7700.json new file mode 100644 index 00000000000..979d7eb4c19 --- /dev/null +++ b/doc/sphinxext/prs/7700.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a2fab967c637309caa07b9e58f3b6294cb58902f", + "authors": [ + { + "n": "Anton Nikolas Waniek", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7701.json b/doc/sphinxext/prs/7701.json new file mode 100644 index 00000000000..925d8fc3f26 --- /dev/null +++ b/doc/sphinxext/prs/7701.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "163ffb90958f997d3b95180b1f11f0255abb8247", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 7 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7702.json b/doc/sphinxext/prs/7702.json new file mode 100644 index 00000000000..c0e581ab164 --- /dev/null +++ b/doc/sphinxext/prs/7702.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3b5a497235b80688b6609f266c0386b05d83340c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7703.json b/doc/sphinxext/prs/7703.json new file mode 100644 index 00000000000..62d50059004 --- /dev/null +++ b/doc/sphinxext/prs/7703.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c66e1a262ee192627c39a77a85c7b3494a8949b9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_vector_mne_solution.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 47, + "d": 30 + }, + "mne/tests/test_source_estimate.py": { + "a": 51, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7704.json b/doc/sphinxext/prs/7704.json new file mode 100644 index 00000000000..aa697aa8ea6 --- /dev/null +++ b/doc/sphinxext/prs/7704.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5b45394a3a0c16097ceda56ee2500348b9b1827a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 6 + }, + ".travis.yml": { + "a": 1, + "d": 12 + }, + "MANIFEST.in": { + "a": 2, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 61, + "d": 6 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 2 + }, + "mne/utils/config.py": { + "a": 0, + "d": 1 + }, + "tools/get_minimal_commands.sh": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7705.json b/doc/sphinxext/prs/7705.json new file mode 100644 index 00000000000..fdb74297b2d --- /dev/null +++ b/doc/sphinxext/prs/7705.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a837aa3206e7826eb20c92cacebbddda8b0a6f2a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 17, + "d": 1 + }, + "examples/decoding/plot_linear_model_patterns.py": { + "a": 12, + "d": 13 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 25, + "d": 23 + }, + "mne/decoding/base.py": { + "a": 4, + "d": 10 + }, + "tutorials/machine-learning/plot_sensors_decoding.py": { + "a": 20, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7706.json b/doc/sphinxext/prs/7706.json new file mode 100644 index 00000000000..c19dded20c1 --- /dev/null +++ b/doc/sphinxext/prs/7706.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9ad12c935539d9ca0e6af0de90113f2fa8fc483e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 4, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 117, + "d": 123 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7707.json b/doc/sphinxext/prs/7707.json new file mode 100644 index 00000000000..b3442168772 --- /dev/null +++ b/doc/sphinxext/prs/7707.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "30646229f23cee554884fe144726baf7947ff0a1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7708.json b/doc/sphinxext/prs/7708.json new file mode 100644 index 00000000000..72971ef74b6 --- /dev/null +++ b/doc/sphinxext/prs/7708.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2dc244f7bb55a6f4bc119aab055290f601270d02", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/771.json b/doc/sphinxext/prs/771.json new file mode 100644 index 00000000000..62c11235616 --- /dev/null +++ b/doc/sphinxext/prs/771.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "85cb206d94758ff747e5a37eecf5e4094b2ff187", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 5, + "d": 3 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 80 + }, + "mne/surface.py": { + "a": 266, + "d": 4 + }, + "mne/tests/test_surface.py": { + "a": 17, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7712.json b/doc/sphinxext/prs/7712.json new file mode 100644 index 00000000000..11c6b7e55c8 --- /dev/null +++ b/doc/sphinxext/prs/7712.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6d044fedb3841ee3c65fb8e7dcc05225a0773268", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7714.json b/doc/sphinxext/prs/7714.json new file mode 100644 index 00000000000..85a4cba7620 --- /dev/null +++ b/doc/sphinxext/prs/7714.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "52fc7a9c2147c2f28ebe2d17eecb20978e859082", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 6, + "d": 4 + }, + "tutorials/source-modeling/plot_dipole_fit.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7717.json b/doc/sphinxext/prs/7717.json new file mode 100644 index 00000000000..8fcee156800 --- /dev/null +++ b/doc/sphinxext/prs/7717.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "832f48c28f212a9bb0312d2555e1f0f4754e9f36", + "authors": [ + { + "n": "Kyle Mathewson", + "e": "kylemath@gmail.com" + }, + { + "n": "Kyle Mathewson", + "e": "kylemath@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Jonathan Kuziek", + "e": "kuziek@ualberta.ca" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + } + ], + "changes": { + "doc/_includes/data_formats.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 13, + "d": 4 + }, + "mne/defaults.py": { + "a": 8, + "d": 2 + }, + "mne/evoked.py": { + "a": 8, + "d": 8 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/boxy/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/boxy/boxy.py": { + "a": 262, + "d": 0 + }, + "mne/io/boxy/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/boxy/tests/test_boxy.py": { + "a": 190, + "d": 0 + }, + "mne/io/pick.py": { + "a": 37, + "d": 20 + }, + "mne/io/utils.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_defaults.py": { + "a": 3, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/io/plot_30_reading_fnirs_data.py": { + "a": 29, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/772.json b/doc/sphinxext/prs/772.json new file mode 100644 index 00000000000..25cbcd95346 --- /dev/null +++ b/doc/sphinxext/prs/772.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2478b3173310439d04edd6a117e71ed6909810db", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 26, + "d": 18 + }, + "mne/utils.py": { + "a": 13, + "d": 0 + }, + "mne/viz.py": { + "a": 169, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7722.json b/doc/sphinxext/prs/7722.json new file mode 100644 index 00000000000..af950158acc --- /dev/null +++ b/doc/sphinxext/prs/7722.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "282754ec94cf018f62feb91750296a5dfb537187", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7724.json b/doc/sphinxext/prs/7724.json new file mode 100644 index 00000000000..02da041c741 --- /dev/null +++ b/doc/sphinxext/prs/7724.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "44ea73e9bf02fab3152331d462ddeaad1a06baa2", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/index.rst": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7725.json b/doc/sphinxext/prs/7725.json new file mode 100644 index 00000000000..b00e2d8fa9f --- /dev/null +++ b/doc/sphinxext/prs/7725.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5b7d8170804167b81965cb30c65f8abbd8ebedf5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 33, + "d": 42 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7726.json b/doc/sphinxext/prs/7726.json new file mode 100644 index 00000000000..24f977f9cba --- /dev/null +++ b/doc/sphinxext/prs/7726.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ba988750f43314760a1af15761f6dac776bc51ba", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 61, + "d": 97 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7728.json b/doc/sphinxext/prs/7728.json new file mode 100644 index 00000000000..4714550232e --- /dev/null +++ b/doc/sphinxext/prs/7728.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "96420e71ab6fad2d4cc702c230b342d3743d2ebc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/morph.py": { + "a": 66, + "d": 44 + }, + "mne/tests/test_morph.py": { + "a": 46, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7729.json b/doc/sphinxext/prs/7729.json new file mode 100644 index 00000000000..8bae0d17445 --- /dev/null +++ b/doc/sphinxext/prs/7729.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4ef1a84b2c8a81b04c1e018bd36963697723ff63", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7730.json b/doc/sphinxext/prs/7730.json new file mode 100644 index 00000000000..855d9bb4226 --- /dev/null +++ b/doc/sphinxext/prs/7730.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6f59cd9e2a39ef5783aa67856e251226d24fad43", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7731.json b/doc/sphinxext/prs/7731.json new file mode 100644 index 00000000000..af26d6894d5 --- /dev/null +++ b/doc/sphinxext/prs/7731.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ef16a2d7f38f78ceb4401d41dfa4afee1846889e", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 6, + "d": 2 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7733.json b/doc/sphinxext/prs/7733.json new file mode 100644 index 00000000000..5ea9cb2516f --- /dev/null +++ b/doc/sphinxext/prs/7733.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2cad786d17c3126e4cfbe9dc140671b5c5d51bcc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 25, + "d": 11 + }, + "mne/tests/test_epochs.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7734.json b/doc/sphinxext/prs/7734.json new file mode 100644 index 00000000000..91cbcdcf382 --- /dev/null +++ b/doc/sphinxext/prs/7734.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "92914823931c1e330f79b91f4ff6d7f3ff397d7c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 37, + "d": 24 + }, + "mne/morph.py": { + "a": 152, + "d": 96 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 97, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7735.json b/doc/sphinxext/prs/7735.json new file mode 100644 index 00000000000..910ab6abd42 --- /dev/null +++ b/doc/sphinxext/prs/7735.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "12fc0e7686be7fdf07e28ca97370c2fafed9c6be", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7736.json b/doc/sphinxext/prs/7736.json new file mode 100644 index 00000000000..abb9b339c3d --- /dev/null +++ b/doc/sphinxext/prs/7736.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "426ab6f954b54c224dd5f41582c19d461f91ca78", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/rank.py": { + "a": 25, + "d": 19 + }, + "mne/tests/test_rank.py": { + "a": 33, + "d": 13 + }, + "mne/utils/docs.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7737.json b/doc/sphinxext/prs/7737.json new file mode 100644 index 00000000000..b39001b1c62 --- /dev/null +++ b/doc/sphinxext/prs/7737.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7618a58eb45b75c3f00f08ebf18738f0f41e0886", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 29, + "d": 11 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7740.json b/doc/sphinxext/prs/7740.json new file mode 100644 index 00000000000..4e2a40fb7b1 --- /dev/null +++ b/doc/sphinxext/prs/7740.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "d0dc8f3d51cfd0db8bcec8f1adee98050152d2df", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/transformer.py": { + "a": 4, + "d": 2 + }, + "mne/epochs.py": { + "a": 40, + "d": 8 + }, + "mne/io/base.py": { + "a": 4, + "d": 5 + }, + "mne/io/write.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 34, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7742.json b/doc/sphinxext/prs/7742.json new file mode 100644 index 00000000000..d1dc8d45e67 --- /dev/null +++ b/doc/sphinxext/prs/7742.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "fdb2eaf26bc4b55bf89140eb6832b93bd9d40971", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7747.json b/doc/sphinxext/prs/7747.json new file mode 100644 index 00000000000..063f03027e7 --- /dev/null +++ b/doc/sphinxext/prs/7747.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4f493e1fd240acab1a3838a8c3f82fa062b76e97", + "authors": [ + { + "n": "Kyle Mathewson", + "e": "kylemath@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/defaults.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 30, + "d": 7 + }, + "mne/viz/tests/test_3d.py": { + "a": 29, + "d": 0 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7748.json b/doc/sphinxext/prs/7748.json new file mode 100644 index 00000000000..d0cbb266f88 --- /dev/null +++ b/doc/sphinxext/prs/7748.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "899c40f7cf9b2bc4ca1ffab0ee3dd875c893b921", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/autosummary/class.rst": { + "a": 4, + "d": 3 + }, + "doc/_templates/autosummary/function.rst": { + "a": 3, + "d": 2 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 4, + "d": 0 + }, + "mne/morph.py": { + "a": 4, + "d": 2 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 4, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7749.json b/doc/sphinxext/prs/7749.json new file mode 100644 index 00000000000..0dc1de78666 --- /dev/null +++ b/doc/sphinxext/prs/7749.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a2095faa49b115b9668dd4c94b94590088a18ac5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "README.rst": { + "a": 8, + "d": 3 + }, + "codecov.yml": { + "a": 18, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/775.json b/doc/sphinxext/prs/775.json new file mode 100644 index 00000000000..dc06ddcc0af --- /dev/null +++ b/doc/sphinxext/prs/775.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "911e95f4cc8825f07395e6cb55d6a3d2a37bca59", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/fiff/bti/raw.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 10, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 17, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7750.json b/doc/sphinxext/prs/7750.json new file mode 100644 index 00000000000..ed46d353896 --- /dev/null +++ b/doc/sphinxext/prs/7750.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d61b28174b8ada91774c99bb636f51170446c5f4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7752.json b/doc/sphinxext/prs/7752.json new file mode 100644 index 00000000000..851f084b368 --- /dev/null +++ b/doc/sphinxext/prs/7752.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "09530fdb7a8c197f852bd0f91c9ddeaaca964ffc", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 15, + "d": 16 + }, + "mne/channels/tests/test_montage.py": { + "a": 0, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7754.json b/doc/sphinxext/prs/7754.json new file mode 100644 index 00000000000..8f28429bf1d --- /dev/null +++ b/doc/sphinxext/prs/7754.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "807b02405750d7173298f7d1defdf244f70d5488", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7755.json b/doc/sphinxext/prs/7755.json new file mode 100644 index 00000000000..728643be16e --- /dev/null +++ b/doc/sphinxext/prs/7755.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "de3128b5f3434490943eff92dd9a4efbb5a8dca2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 14, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 7, + "d": 3 + }, + "mne/label.py": { + "a": 34, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7756.json b/doc/sphinxext/prs/7756.json new file mode 100644 index 00000000000..85b0a56a535 --- /dev/null +++ b/doc/sphinxext/prs/7756.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "60b2b2448173bb6d0fbd1c332bbf7669c6213bfa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 7, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 14, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7758.json b/doc/sphinxext/prs/7758.json new file mode 100644 index 00000000000..ff557acad6b --- /dev/null +++ b/doc/sphinxext/prs/7758.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "64499b764a77bd2ccdfcf397c3cd0a6b7b7f6e6d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 51, + "d": 12 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 13, + "d": 0 + }, + "doc/install/mne_python.rst": { + "a": 7, + "d": 0 + }, + "mne/conftest.py": { + "a": 20, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 14, + "d": 8 + }, + "mne/viz/_brain/_brain.py": { + "a": 16, + "d": 1 + }, + "mne/viz/_brain/_notebook.py": { + "a": 40, + "d": 0 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 25, + "d": 11 + }, + "mne/viz/_brain/tests/test.ipynb": { + "a": 62, + "d": 0 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 22, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 164, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 14, + "d": 3 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 5, + "d": 1 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 19, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 1, + "d": 2 + }, + "server_environment.yml": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7759.json b/doc/sphinxext/prs/7759.json new file mode 100644 index 00000000000..56b22e12aa1 --- /dev/null +++ b/doc/sphinxext/prs/7759.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "861e82505ce59e7ea84887227d6066776bdc6401", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 31, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/776.json b/doc/sphinxext/prs/776.json new file mode 100644 index 00000000000..1ce5dfb38b8 --- /dev/null +++ b/doc/sphinxext/prs/776.json @@ -0,0 +1,127 @@ +{ + "merge_commit_sha": "f9fc2a38040605e4972bc64647d7ab5a3e446267", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 4, + "d": 0 + }, + "examples/plot_make_forward.py": { + "a": 54, + "d": 0 + }, + "examples/plot_read_forward.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 4, + "d": 2 + }, + "mne/data/coil_def.dat": { + "a": 416, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/compensator.py": { + "a": 5, + "d": 6 + }, + "mne/fiff/constants.py": { + "a": 42, + "d": 0 + }, + "mne/fiff/meas_info.py": { + "a": 24, + "d": 2 + }, + "mne/fiff/pick.py": { + "a": 2, + "d": 0 + }, + "mne/fiff/tag.py": { + "a": 5, + "d": 1 + }, + "mne/fiff/tests/data/sample-audvis-raw-trans.txt": { + "a": 4, + "d": 0 + }, + "mne/fiff/write.py": { + "a": 22, + "d": 1 + }, + "mne/forward/__init__.py": { + "a": 12, + "d": 0 + }, + "mne/forward/_compute_forward.py": { + "a": 346, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 474, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 172, + "d": 96 + }, + "mne/forward/tests/test_forward.py": { + "a": 51, + "d": 92 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 172, + "d": 0 + }, + "mne/proj.py": { + "a": 11, + "d": 7 + }, + "mne/source_space.py": { + "a": 28, + "d": 14 + }, + "mne/surface.py": { + "a": 323, + "d": 187 + }, + "mne/tests/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 16 + }, + "mne/tests/test_source_space.py": { + "a": 9, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 28, + "d": 5 + }, + "mne/tests/test_transforms.py": { + "a": 14, + "d": 1 + }, + "mne/transforms.py": { + "a": 22, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7761.json b/doc/sphinxext/prs/7761.json new file mode 100644 index 00000000000..291a95f0fce --- /dev/null +++ b/doc/sphinxext/prs/7761.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9455fa845d393b6f083d266d337173826867a500", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 20, + "d": 18 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 48, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7762.json b/doc/sphinxext/prs/7762.json new file mode 100644 index 00000000000..886864513b0 --- /dev/null +++ b/doc/sphinxext/prs/7762.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "224cce85577ab8fc4f66a48eba684c1fd31bfc5b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/overview/roadmap.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7763.json b/doc/sphinxext/prs/7763.json new file mode 100644 index 00000000000..6b6e3392eed --- /dev/null +++ b/doc/sphinxext/prs/7763.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "cd84e02cf7ae2462272e1b2226a562694daa2763", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 2 + }, + "examples/visualization/plot_make_report.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 23, + "d": 39 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 119 + }, + "mne/viz/misc.py": { + "a": 109, + "d": 48 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7764.json b/doc/sphinxext/prs/7764.json new file mode 100644 index 00000000000..6d1e103990d --- /dev/null +++ b/doc/sphinxext/prs/7764.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3e12ef4d12f5178b13631bbaf667e2a97dc62525", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7766.json b/doc/sphinxext/prs/7766.json new file mode 100644 index 00000000000..76635aefbd8 --- /dev/null +++ b/doc/sphinxext/prs/7766.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4140bc82aed2cb9d032782ab989e26e01879e3c7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/index.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7767.json b/doc/sphinxext/prs/7767.json new file mode 100644 index 00000000000..50065346712 --- /dev/null +++ b/doc/sphinxext/prs/7767.json @@ -0,0 +1,119 @@ +{ + "merge_commit_sha": "ed6f12f3a292665b41554ddb590d18e3d8d44e7a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 4, + "d": 4 + }, + "mne/surface.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 8, + "d": 8 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + }, + "mne/utils/numerics.py": { + "a": 4, + "d": 4 + }, + "mne/utils/tests/test_logging.py": { + "a": 2, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 17, + "d": 17 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 4 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 3 + }, + "tutorials/simulation/plot_point_spread.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7768.json b/doc/sphinxext/prs/7768.json new file mode 100644 index 00000000000..08467bdbe9f --- /dev/null +++ b/doc/sphinxext/prs/7768.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "65b0c6ed590dc88a135ce3f7e78946bf5bbcf305", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/misc/plot_ecog.py": { + "a": 110, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7770.json b/doc/sphinxext/prs/7770.json new file mode 100644 index 00000000000..498fbe4f49d --- /dev/null +++ b/doc/sphinxext/prs/7770.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a367fef86c916cc0bb9272c41d4d81af324c3c7b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7772.json b/doc/sphinxext/prs/7772.json new file mode 100644 index 00000000000..1a6473f6bb6 --- /dev/null +++ b/doc/sphinxext/prs/7772.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "153963826b5bd16ed4fe9a96c52e13c079b24513", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7775.json b/doc/sphinxext/prs/7775.json new file mode 100644 index 00000000000..83a03354eca --- /dev/null +++ b/doc/sphinxext/prs/7775.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "de3237c9d91c64fe94330fb8d485f7c2837146f5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 21, + "d": 4 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7777.json b/doc/sphinxext/prs/7777.json new file mode 100644 index 00000000000..23b19745062 --- /dev/null +++ b/doc/sphinxext/prs/7777.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3e551c3e07b94655866ab69df4657b0d4e8cb2f9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 14, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7780.json b/doc/sphinxext/prs/7780.json new file mode 100644 index 00000000000..3642e8b9a1b --- /dev/null +++ b/doc/sphinxext/prs/7780.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e68d1963fee51c78969eb2523e9877a62024fb8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + ".travis.yml": { + "a": 4, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 5, + "d": 5 + }, + "mne/cov.py": { + "a": 2, + "d": 7 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 2, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7781.json b/doc/sphinxext/prs/7781.json new file mode 100644 index 00000000000..d3b9185eed8 --- /dev/null +++ b/doc/sphinxext/prs/7781.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f8276a8c6aebcb1f0cffcdfef5879342cf737fc1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 14, + "d": 8 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 41, + "d": 29 + }, + "mne/stats/tests/test_permutations.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7783.json b/doc/sphinxext/prs/7783.json new file mode 100644 index 00000000000..49836f3058e --- /dev/null +++ b/doc/sphinxext/prs/7783.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "99536bc419d64c5815b3d82e5abc0b14c10edbeb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 6, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7784.json b/doc/sphinxext/prs/7784.json new file mode 100644 index 00000000000..c9880871d5e --- /dev/null +++ b/doc/sphinxext/prs/7784.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "292a4784f05ae607e97686d3ca05ae4c3852f8db", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7786.json b/doc/sphinxext/prs/7786.json new file mode 100644 index 00000000000..502b19943c6 --- /dev/null +++ b/doc/sphinxext/prs/7786.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "07cdaf9f30aa8c421f43a7c75e92ae3fe9ccdc5f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 2, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 1, + "d": 1 + }, + "tutorials/discussions/plot_background_statistics.py": { + "a": 4, + "d": 4 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 2, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_time_frequency.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7791.json b/doc/sphinxext/prs/7791.json new file mode 100644 index 00000000000..b7a2b3f8e77 --- /dev/null +++ b/doc/sphinxext/prs/7791.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "9800cf3dbda3485f14d0ee4e92e70517abb520a4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "doc/install/mne_python.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 4, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 10, + "d": 3 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 10, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 96, + "d": 50 + }, + "requirements.txt": { + "a": 4, + "d": 2 + }, + "tutorials/machine-learning/plot_receptive_field.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7794.json b/doc/sphinxext/prs/7794.json new file mode 100644 index 00000000000..8aba3b4a691 --- /dev/null +++ b/doc/sphinxext/prs/7794.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a214d66520c3c111d4010e98de80627cbbd6f1a0", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/io/utils.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7795.json b/doc/sphinxext/prs/7795.json new file mode 100644 index 00000000000..63871a830d8 --- /dev/null +++ b/doc/sphinxext/prs/7795.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5874885c1736edc321ec202cb6e15d88e4324a5c", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/externals/pymatreader/pymatreader.py": { + "a": 1, + "d": 1 + }, + "mne/externals/pymatreader/utils.py": { + "a": 36, + "d": 8 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7796.json b/doc/sphinxext/prs/7796.json new file mode 100644 index 00000000000..909fb352c31 --- /dev/null +++ b/doc/sphinxext/prs/7796.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a6afbaa3f38219c842ead9bd1beac11c97286f66", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 46, + "d": 0 + }, + "doc/install/pre_install.rst": { + "a": 4, + "d": 2 + }, + "environment.yml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7797.json b/doc/sphinxext/prs/7797.json new file mode 100644 index 00000000000..b00b4f52732 --- /dev/null +++ b/doc/sphinxext/prs/7797.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bb908f197823c3ae27da493d2cd89895136717bf", + "authors": [ + { + "n": "Christian O'Reilly", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/780.json b/doc/sphinxext/prs/780.json new file mode 100644 index 00000000000..b26b7673d99 --- /dev/null +++ b/doc/sphinxext/prs/780.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c90e8a2330e0db99c1b8d4c2791d08c06753d13c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/fiff/pick.py": { + "a": 2, + "d": 7 + }, + "mne/fiff/proj.py": { + "a": 1, + "d": 33 + }, + "mne/fiff/raw.py": { + "a": 4, + "d": 24 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 101 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7800.json b/doc/sphinxext/prs/7800.json new file mode 100644 index 00000000000..a836a848691 --- /dev/null +++ b/doc/sphinxext/prs/7800.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "bfe65c72be7ff6b21f64c75f066b9c0fb8629c90", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_fnirs_artifact_removal.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 16, + "d": 7 + }, + "mne/preprocessing/tests/test_temporal_derivative_distribution_repair.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7801.json b/doc/sphinxext/prs/7801.json new file mode 100644 index 00000000000..e60bd8bf548 --- /dev/null +++ b/doc/sphinxext/prs/7801.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7726509b9b19eefc5e1908d1ec9bd34168f7a452", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 4, + "d": 2 + }, + "requirements.txt": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7802.json b/doc/sphinxext/prs/7802.json new file mode 100644 index 00000000000..fc0b4d55c43 --- /dev/null +++ b/doc/sphinxext/prs/7802.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ba960fd7752d2f041185ff35f139d511d98f3434", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 8, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 4, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 40, + "d": 2 + }, + "mne/io/fieldtrip/utils.py": { + "a": 37, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7804.json b/doc/sphinxext/prs/7804.json new file mode 100644 index 00000000000..b88ea5c4167 --- /dev/null +++ b/doc/sphinxext/prs/7804.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "683613a04fccf59826f1c9bfb984ef984a109632", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7806.json b/doc/sphinxext/prs/7806.json new file mode 100644 index 00000000000..41877f220ad --- /dev/null +++ b/doc/sphinxext/prs/7806.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5701738ba10d65e2c5a713e0c6480aa04cd1a4ae", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 6, + "d": 8 + }, + "doc/_templates/autosummary/class.rst": { + "a": 2, + "d": 5 + }, + "doc/_templates/autosummary/function.rst": { + "a": 2, + "d": 5 + }, + "doc/conf.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7809.json b/doc/sphinxext/prs/7809.json new file mode 100644 index 00000000000..39e7cebdb33 --- /dev/null +++ b/doc/sphinxext/prs/7809.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "432d5b77270e2900c8034cbc1f7f576935b7de68", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 84, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_read_raw.py": { + "a": 38, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/781.json b/doc/sphinxext/prs/781.json new file mode 100644 index 00000000000..3fa398e0f19 --- /dev/null +++ b/doc/sphinxext/prs/781.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "3a5c623e2d709febd8a5337a9cb0ef97bfd4b9e8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 4, + "d": 3 + }, + "mne/coreg/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/coreg/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/forward.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 43 + }, + "mne/source_space.py": { + "a": 612, + "d": 36 + }, + "mne/surface.py": { + "a": 108, + "d": 13 + }, + "mne/tests/test_source_space.py": { + "a": 93, + "d": 16 + }, + "mne/tests/test_surface.py": { + "a": 11, + "d": 1 + }, + "mne/tests/test_utils.py": { + "a": 22, + "d": 1 + }, + "mne/transforms/__init__.py": { + "a": 2, + "d": 3 + }, + "mne/transforms/transforms.py": { + "a": 55, + "d": 2 + }, + "mne/utils.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7810.json b/doc/sphinxext/prs/7810.json new file mode 100644 index 00000000000..65ece580e89 --- /dev/null +++ b/doc/sphinxext/prs/7810.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "7afa43778a0ef2096a61437eb30d2c98ad8c7598", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "jeff", + "e": "jeff.hanna@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 13, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 8, + "d": 0 + }, + "examples/preprocessing/plot_find_ref_artifacts.py": { + "a": 139, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/refmeg_noise/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 30, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 9, + "d": 2 + }, + "mne/preprocessing/bads.py": { + "a": 32, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 103, + "d": 30 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 17, + "d": 11 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7811.json b/doc/sphinxext/prs/7811.json new file mode 100644 index 00000000000..bdd73415eb7 --- /dev/null +++ b/doc/sphinxext/prs/7811.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "15866de8908d37337930a2afa2a54277a8a78ebe", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 11, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 21, + "d": 15 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7812.json b/doc/sphinxext/prs/7812.json new file mode 100644 index 00000000000..754cc538445 --- /dev/null +++ b/doc/sphinxext/prs/7812.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "18b8f40c37850748fae4bea30498c8d533b00134", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7817.json b/doc/sphinxext/prs/7817.json new file mode 100644 index 00000000000..85fedfc43fe --- /dev/null +++ b/doc/sphinxext/prs/7817.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb0b50c6be08961b10dfff8535c966f4c6675f46", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7819.json b/doc/sphinxext/prs/7819.json new file mode 100644 index 00000000000..7ee4f872d4b --- /dev/null +++ b/doc/sphinxext/prs/7819.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bbc853ec2560a741f8afbc08379a51a66db5056f", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 10, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 60, + "d": 7 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 19, + "d": 11 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/782.json b/doc/sphinxext/prs/782.json new file mode 100644 index 00000000000..8b09f06ce98 --- /dev/null +++ b/doc/sphinxext/prs/782.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b3dcbc9dca9d1c198f621143479bfbc7ba3f9b6a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7820.json b/doc/sphinxext/prs/7820.json new file mode 100644 index 00000000000..d188d9e7d9b --- /dev/null +++ b/doc/sphinxext/prs/7820.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5f95ebb543dac03ca3b6cde31342da4d80bf4451", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 16, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7822.json b/doc/sphinxext/prs/7822.json new file mode 100644 index 00000000000..8b54402f7c5 --- /dev/null +++ b/doc/sphinxext/prs/7822.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "a1638516b22ae128153087d72d601afdf7622ce0", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 5, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7823.json b/doc/sphinxext/prs/7823.json new file mode 100644 index 00000000000..fd886283d5e --- /dev/null +++ b/doc/sphinxext/prs/7823.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "002a5cfc9baecdd0ecdaff1f38555b44a141de35", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "examples/preprocessing/plot_find_ref_artifacts.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_ica_comparison.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 9, + "d": 7 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 7 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/pick.py": { + "a": 35, + "d": 19 + }, + "mne/io/tests/test_pick.py": { + "a": 21, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_30_filtering_resampling.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7824.json b/doc/sphinxext/prs/7824.json new file mode 100644 index 00000000000..f4cb1400ed2 --- /dev/null +++ b/doc/sphinxext/prs/7824.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "78bc2a4c6d7f2a0ad545d242fa715b711510eb02", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/blender_import_obj/blender_import_obj1.jpg": { + "a": 0, + "d": 0 + }, + "doc/_static/blender_import_obj/blender_import_obj2.jpg": { + "a": 0, + "d": 0 + }, + "doc/_static/blender_import_obj/blender_import_obj3.jpg": { + "a": 0, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/surface.py": { + "a": 117, + "d": 31 + }, + "mne/tests/test_surface.py": { + "a": 8, + "d": 0 + }, + "tutorials/source-modeling/plot_fix_bem_in_blender.py": { + "a": 136, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7825.json b/doc/sphinxext/prs/7825.json new file mode 100644 index 00000000000..23e72b78d94 --- /dev/null +++ b/doc/sphinxext/prs/7825.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "68b93fcca5231506837eb4be1b2ceb035fde89cb", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 37, + "d": 11 + }, + "mne/viz/tests/test_ica.py": { + "a": 21, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7826.json b/doc/sphinxext/prs/7826.json new file mode 100644 index 00000000000..b4206c3a98b --- /dev/null +++ b/doc/sphinxext/prs/7826.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d96532567f4f8f00e3629123eb4a7de8d01a85ed", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/mne_c.rst": { + "a": 4, + "d": 4 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7827.json b/doc/sphinxext/prs/7827.json new file mode 100644 index 00000000000..6c4696d7cfd --- /dev/null +++ b/doc/sphinxext/prs/7827.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "19c4b877fba3810341354c6445f21c9e1cd27b20", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7830.json b/doc/sphinxext/prs/7830.json new file mode 100644 index 00000000000..9117e6a82c8 --- /dev/null +++ b/doc/sphinxext/prs/7830.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f4982c28429d24f4f7dee772e46347b3806d9a44", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_snr_estimate.py": { + "a": 5, + "d": 4 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/viz/evoked.py": { + "a": 21, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7831.json b/doc/sphinxext/prs/7831.json new file mode 100644 index 00000000000..c807b4b670f --- /dev/null +++ b/doc/sphinxext/prs/7831.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a610fbcacda540c1ebff6c9500c85644aaef8ae7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 20, + "d": 28 + }, + "mne/viz/tests/test_evoked.py": { + "a": 18, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7832.json b/doc/sphinxext/prs/7832.json new file mode 100644 index 00000000000..35a3669ca31 --- /dev/null +++ b/doc/sphinxext/prs/7832.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe801b12256c798bdbcea32d444b9320c400bfcc", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7833.json b/doc/sphinxext/prs/7833.json new file mode 100644 index 00000000000..ffa02a803e3 --- /dev/null +++ b/doc/sphinxext/prs/7833.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a9b3a69094a8d84327d873375b19f48e91d8e79a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/tests/test_meas_info.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7835.json b/doc/sphinxext/prs/7835.json new file mode 100644 index 00000000000..5d304c42e98 --- /dev/null +++ b/doc/sphinxext/prs/7835.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d1662c1e74c738f5e2f177df82683174c73f1648", + "authors": [ + { + "n": "sarasommariva", + "e": "sommariva@dima.unige.it" + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7839.json b/doc/sphinxext/prs/7839.json new file mode 100644 index 00000000000..66411d0f167 --- /dev/null +++ b/doc/sphinxext/prs/7839.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "7d1a25088fe2b2c64acd27af8672b2abd884cdce", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/anr.jpeg": { + "a": 0, + "d": 0 + }, + "doc/_static/doe.png": { + "a": 0, + "d": 0 + }, + "doc/_static/erc.png": { + "a": 0, + "d": 0 + }, + "doc/_static/funding/amazon.svg": { + "a": 0, + "d": 0 + }, + "doc/_static/funding/anr.svg": { + "a": 6, + "d": 0 + }, + "doc/_static/funding/cds.png": { + "a": 0, + "d": 0 + }, + "doc/_static/funding/czi.svg": { + "a": 19, + "d": 0 + }, + "doc/_static/funding/doe.svg": { + "a": 1659, + "d": 0 + }, + "doc/_static/funding/erc.svg": { + "a": 662, + "d": 0 + }, + "doc/_static/funding/google.svg": { + "a": 0, + "d": 0 + }, + "doc/_static/funding/nih.png": { + "a": 0, + "d": 0 + }, + "doc/_static/funding/nsf.png": { + "a": 0, + "d": 0 + }, + "doc/index.rst": { + "a": 9, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7840.json b/doc/sphinxext/prs/7840.json new file mode 100644 index 00000000000..8f8cff3eb36 --- /dev/null +++ b/doc/sphinxext/prs/7840.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1fdacb362d765c76f22bbd6d00fe9ecbd8de12d2", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 2 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7841.json b/doc/sphinxext/prs/7841.json new file mode 100644 index 00000000000..f2952f31c17 --- /dev/null +++ b/doc/sphinxext/prs/7841.json @@ -0,0 +1,339 @@ +{ + "merge_commit_sha": "f641957dba4fbd4bfed8e3329cb33891792d3de5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/bem_model.rst": { + "a": 5, + "d": 5 + }, + "doc/_includes/channel_interpolation.rst": { + "a": 1, + "d": 1 + }, + "doc/_includes/forward.rst": { + "a": 4, + "d": 4 + }, + "doc/_includes/inverse.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/0.10.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.12.inc": { + "a": 4, + "d": 4 + }, + "doc/changes/0.13.inc": { + "a": 4, + "d": 4 + }, + "doc/changes/0.14.inc": { + "a": 3, + "d": 3 + }, + "doc/changes/0.15.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.16.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.17.inc": { + "a": 7, + "d": 7 + }, + "doc/changes/0.18.inc": { + "a": 10, + "d": 10 + }, + "doc/changes/0.19.inc": { + "a": 8, + "d": 8 + }, + "doc/changes/0.20.inc": { + "a": 9, + "d": 9 + }, + "doc/changes/0.6.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.7.inc": { + "a": 6, + "d": 6 + }, + "doc/changes/0.8.inc": { + "a": 3, + "d": 3 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 2 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/install/pre_install.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/cookbook.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/faq.rst": { + "a": 10, + "d": 10 + }, + "doc/overview/matlab.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/roadmap.rst": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/sphinxext/gen_commands.py": { + "a": 7, + "d": 8 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 1, + "d": 1 + }, + "examples/stats/plot_sensor_regression.py": { + "a": 3, + "d": 2 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 4, + "d": 4 + }, + "examples/visualization/plot_topo_customized.py": { + "a": 2, + "d": 2 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 7 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 3, + "d": 3 + }, + "mne/commands/mne_anonymize.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/csp.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 22, + "d": 19 + }, + "mne/evoked.py": { + "a": 7, + "d": 7 + }, + "mne/forward/_field_interpolation.py": { + "a": 3, + "d": 3 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 26, + "d": 25 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 1, + "d": 1 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/source_space.py": { + "a": 13, + "d": 13 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/permutations.py": { + "a": 3, + "d": 3 + }, + "mne/stats/regression.py": { + "a": 4, + "d": 4 + }, + "mne/surface.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 22, + "d": 22 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 8 + }, + "mne/viz/_3d.py": { + "a": 10, + "d": 8 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 4 + }, + "mne/viz/evoked.py": { + "a": 29, + "d": 28 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 3, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 4, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 9, + "d": 29 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 6 + }, + "tutorials/discussions/plot_background_filtering.py": { + "a": 4, + "d": 4 + }, + "tutorials/intro/plot_20_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/plot_10_reading_meg_data.py": { + "a": 1, + "d": 1 + }, + "tutorials/misc/plot_report.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_15_handling_bad_channels.py": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 3, + "d": 2 + }, + "tutorials/simulation/plot_creating_data_structures.py": { + "a": 14, + "d": 13 + }, + "tutorials/simulation/plot_point_spread.py": { + "a": 4, + "d": 4 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 5, + "d": 9 + }, + "tutorials/source-modeling/plot_object_source_estimate.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7843.json b/doc/sphinxext/prs/7843.json new file mode 100644 index 00000000000..648a85a5b53 --- /dev/null +++ b/doc/sphinxext/prs/7843.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "bbeb2ae768b1310094a59d42f09c55f7d5412429", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 5 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 6, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 10, + "d": 4 + }, + "mne/viz/backends/base_renderer.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7845.json b/doc/sphinxext/prs/7845.json new file mode 100644 index 00000000000..2f95e1bf2c1 --- /dev/null +++ b/doc/sphinxext/prs/7845.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3cda35975958e1bdc2b7e7b2ddef9f60e315b48f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 98, + "d": 10 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 81, + "d": 13 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 94, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7846.json b/doc/sphinxext/prs/7846.json new file mode 100644 index 00000000000..d0036873af5 --- /dev/null +++ b/doc/sphinxext/prs/7846.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "342dc600d7a5b1e1d63cb5c60ec110ef66c9f858", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/testing/_testing.py": { + "a": 6, + "d": 3 + }, + "mne/io/fiff/raw.py": { + "a": 11, + "d": 3 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 19, + "d": 0 + }, + "mne/io/tag.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7847.json b/doc/sphinxext/prs/7847.json new file mode 100644 index 00000000000..21ae18d970f --- /dev/null +++ b/doc/sphinxext/prs/7847.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4d02d026030c921b814bbc0ad47187dff4b96f17", + "authors": [ + { + "n": "Simeon Wong", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/785.json b/doc/sphinxext/prs/785.json new file mode 100644 index 00000000000..55a33df60da --- /dev/null +++ b/doc/sphinxext/prs/785.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3e28b3d473f185181ab7319f1e9474b396416b31", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 13, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7851.json b/doc/sphinxext/prs/7851.json new file mode 100644 index 00000000000..1d3ae7bcf9e --- /dev/null +++ b/doc/sphinxext/prs/7851.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "508cfbc13bc2e068efb3bf8b7999b83047851729", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 22, + "d": 7 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 5, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7852.json b/doc/sphinxext/prs/7852.json new file mode 100644 index 00000000000..c2522a22f20 --- /dev/null +++ b/doc/sphinxext/prs/7852.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7e2fbcc8a40bb4357543e8745c26c8edd65b60e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_movement_compensation.py": { + "a": 11, + "d": 2 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7855.json b/doc/sphinxext/prs/7855.json new file mode 100644 index 00000000000..cb85dced748 --- /dev/null +++ b/doc/sphinxext/prs/7855.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "89647f3363fcb5de306cc18e55e7b9fa89fe0315", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7863.json b/doc/sphinxext/prs/7863.json new file mode 100644 index 00000000000..cb2626f1a35 --- /dev/null +++ b/doc/sphinxext/prs/7863.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a6ea90c7619c4294f5ae5731ea622cebd45923a8", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 47, + "d": 10 + }, + "examples/preprocessing/plot_eeg_csd.py": { + "a": 4, + "d": 12 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/defaults.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/_csd.py": { + "a": 22, + "d": 32 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 77, + "d": 103 + }, + "mne/tests/test_defaults.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7864.json b/doc/sphinxext/prs/7864.json new file mode 100644 index 00000000000..097549ce182 --- /dev/null +++ b/doc/sphinxext/prs/7864.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "121364d0c59e36f6b71c25f5905297f1ead36116", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 11, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 7, + "d": 10 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7866.json b/doc/sphinxext/prs/7866.json new file mode 100644 index 00000000000..5137e4201b5 --- /dev/null +++ b/doc/sphinxext/prs/7866.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "110b57006d2aef39963a444036e1f458ba1e0582", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_external.py": { + "a": 8, + "d": 3 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 6, + "d": 1 + }, + "mne/gui/_viewer.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_defaults.py": { + "a": 62, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7869.json b/doc/sphinxext/prs/7869.json new file mode 100644 index 00000000000..4f6246a1c3a --- /dev/null +++ b/doc/sphinxext/prs/7869.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "4bfeb85c6b9e9f59c314d37ad42483f21b7fd5ff", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/datasets/plot_limo_data.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_linear_regression_raw.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 18, + "d": 17 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 6 + }, + "mne/tests/test_evoked.py": { + "a": 42, + "d": 57 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 3 + }, + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 4, + "d": 5 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 13, + "d": 11 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 4, + "d": 5 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_brainstorm_auditory.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/787.json b/doc/sphinxext/prs/787.json new file mode 100644 index 00000000000..35b9b492d28 --- /dev/null +++ b/doc/sphinxext/prs/787.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2d622cd3db26da9a6005d4db60359b2b7281075f", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/tests/test_source_space.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 12, + "d": 6 + }, + "mne/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7870.json b/doc/sphinxext/prs/7870.json new file mode 100644 index 00000000000..94a3861db8d --- /dev/null +++ b/doc/sphinxext/prs/7870.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "27341bb95a119b555234df13162b79c273b864aa", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7875.json b/doc/sphinxext/prs/7875.json new file mode 100644 index 00000000000..b3417213fac --- /dev/null +++ b/doc/sphinxext/prs/7875.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b2615a3cfe0dcc7f867fae576506fc520577bd2e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 13, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7877.json b/doc/sphinxext/prs/7877.json new file mode 100644 index 00000000000..3189966a08d --- /dev/null +++ b/doc/sphinxext/prs/7877.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "3fdd5024e8e45708fc807b6edc3ff9856bf0bfbb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 11, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/gui/_coreg_gui.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 0, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 53, + "d": 5 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7878.json b/doc/sphinxext/prs/7878.json new file mode 100644 index 00000000000..6959f3a7537 --- /dev/null +++ b/doc/sphinxext/prs/7878.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f8d2ce12fef6db5f8ec35d978797249ec3937a17", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/io/pick.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_pick.py": { + "a": 22, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7879.json b/doc/sphinxext/prs/7879.json new file mode 100644 index 00000000000..2a686eb423e --- /dev/null +++ b/doc/sphinxext/prs/7879.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5b2275657c8b79200c7d852cba8e02043cc2a68f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 36, + "d": 46 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 34, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/788.json b/doc/sphinxext/prs/788.json new file mode 100644 index 00000000000..cb9fcf453c3 --- /dev/null +++ b/doc/sphinxext/prs/788.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1dfeb5c635240418ffea4a391cc0a8e36bf52be4", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "mne/stats/cluster_level.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7880.json b/doc/sphinxext/prs/7880.json new file mode 100644 index 00000000000..56118d45599 --- /dev/null +++ b/doc/sphinxext/prs/7880.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "30ef40adc8ab9d6911ca90292ec6f9948bca7411", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 6, + "d": 1 + }, + "environment.yml": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 4, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 6, + "d": 1 + }, + "mne/gui/_viewer.py": { + "a": 6, + "d": 2 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 3, + "d": 1 + }, + "requirements.txt": { + "a": 2, + "d": 2 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7881.json b/doc/sphinxext/prs/7881.json new file mode 100644 index 00000000000..e1aca6b49ac --- /dev/null +++ b/doc/sphinxext/prs/7881.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fdd68c3fa037d058aae9f6dd76bbd6512b0fe658", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7882.json b/doc/sphinxext/prs/7882.json new file mode 100644 index 00000000000..f4a4e83de7a --- /dev/null +++ b/doc/sphinxext/prs/7882.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "98b0bb332076ca49b40abde0856cee169a9ed94e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7883.json b/doc/sphinxext/prs/7883.json new file mode 100644 index 00000000000..e6e2122a5e1 --- /dev/null +++ b/doc/sphinxext/prs/7883.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "181a57270f1f7aead8570bc8aa9a915c5322d39e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 16, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 19, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 90, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 70, + "d": 13 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7887.json b/doc/sphinxext/prs/7887.json new file mode 100644 index 00000000000..ff4a6ec5947 --- /dev/null +++ b/doc/sphinxext/prs/7887.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "f6c6ff2842ff020816bfc203f00e03410c9e0ea3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/events.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 23, + "d": 8 + }, + "mne/io/tests/test_raw.py": { + "a": 11, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7888.json b/doc/sphinxext/prs/7888.json new file mode 100644 index 00000000000..2aabb7565a7 --- /dev/null +++ b/doc/sphinxext/prs/7888.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e5f57cb36a316ba04aea7eba2da44dd5b58fe4c3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 3, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 120, + "d": 142 + }, + "mne/tests/test_source_space.py": { + "a": 25, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7889.json b/doc/sphinxext/prs/7889.json new file mode 100644 index 00000000000..881e483ee48 --- /dev/null +++ b/doc/sphinxext/prs/7889.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ddd54e11d6e11dbed9c9c7c4bf52a6bb1a9368e4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/index.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/789.json b/doc/sphinxext/prs/789.json new file mode 100644 index 00000000000..312151a6b23 --- /dev/null +++ b/doc/sphinxext/prs/789.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fbb9cb1137927f24848c9cd9a8bd92334fcf80ac", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/export/plot_epochs_as_data_frame.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/evoked.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7890.json b/doc/sphinxext/prs/7890.json new file mode 100644 index 00000000000..b7d9c84daca --- /dev/null +++ b/doc/sphinxext/prs/7890.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "03276c7c3d1d5a6cf7ca042f97d6cef51b14422e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/egi/events.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 40, + "d": 10 + }, + "mne/io/tests/test_raw.py": { + "a": 11, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 6, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 5, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7891.json b/doc/sphinxext/prs/7891.json new file mode 100644 index 00000000000..eb068ff64a5 --- /dev/null +++ b/doc/sphinxext/prs/7891.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0f9cf21c8ba6a8c30ed9c9d6f546fa50b1ba1a34", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 40, + "d": 11 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 21, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7895.json b/doc/sphinxext/prs/7895.json new file mode 100644 index 00000000000..32718506e19 --- /dev/null +++ b/doc/sphinxext/prs/7895.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "52128c2fa5f556048f0d3c49f3e77e5ce3f3bdd2", + "authors": [ + { + "n": "Alejandro Weinstein", + "e": "alejandro.weinstein@gmail.com" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7896.json b/doc/sphinxext/prs/7896.json new file mode 100644 index 00000000000..a4db5db2b76 --- /dev/null +++ b/doc/sphinxext/prs/7896.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "126cf98acaa42a226729da8a90ae0ea3431b7da5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 8, + "d": 0 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 5, + "d": 8 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 61, + "d": 39 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 100, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 19, + "d": 9 + }, + "mne/viz/tests/test_3d.py": { + "a": 12, + "d": 7 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7898.json b/doc/sphinxext/prs/7898.json new file mode 100644 index 00000000000..7eb1052c5b2 --- /dev/null +++ b/doc/sphinxext/prs/7898.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6a7636a3b3ac15b9381048a31b9cfde142ca4036", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 7, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7899.json b/doc/sphinxext/prs/7899.json new file mode 100644 index 00000000000..6e12947b9b2 --- /dev/null +++ b/doc/sphinxext/prs/7899.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0bf19a7bbb28cc399470690b31714f54efe11acc", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/79.json b/doc/sphinxext/prs/79.json new file mode 100644 index 00000000000..88fcd444448 --- /dev/null +++ b/doc/sphinxext/prs/79.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "15d2d83e606cca85a5502f741a88c4810476b255", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 3, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 123, + "d": 0 + }, + "mne/fiff/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/mixed_norm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/mixed_norm/inverse.py": { + "a": 178, + "d": 7 + }, + "mne/mixed_norm/optim.py": { + "a": 265, + "d": 15 + }, + "mne/mixed_norm/tests/test_inverse.py": { + "a": 46, + "d": 13 + }, + "mne/mixed_norm/tests/test_optim.py": { + "a": 54, + "d": 4 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/stft.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7900.json b/doc/sphinxext/prs/7900.json new file mode 100644 index 00000000000..d9a2ca45b1d --- /dev/null +++ b/doc/sphinxext/prs/7900.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7a16f2c4dce3d565cb2403091b7c7e7b2cc618f4", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_fix_bem_in_blender.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7902.json b/doc/sphinxext/prs/7902.json new file mode 100644 index 00000000000..864526ecc65 --- /dev/null +++ b/doc/sphinxext/prs/7902.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "408c5c803a6ad634a48829310b5b92ee4d907872", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 2, + "d": 2 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7904.json b/doc/sphinxext/prs/7904.json new file mode 100644 index 00000000000..2bdba33650d --- /dev/null +++ b/doc/sphinxext/prs/7904.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "676e627f3f9b9e292e131480c46aa7ab12429cdb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 3, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 3, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 1, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 9, + "d": 7 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 2, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 2, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 47, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 2 + }, + "mne/io/utils.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7905.json b/doc/sphinxext/prs/7905.json new file mode 100644 index 00000000000..773da89ca1e --- /dev/null +++ b/doc/sphinxext/prs/7905.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e4ace23bf16b2ed3ac66ead5c8b38e57fbad577c", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 38, + "d": 29 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7906.json b/doc/sphinxext/prs/7906.json new file mode 100644 index 00000000000..08b3b44619e --- /dev/null +++ b/doc/sphinxext/prs/7906.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "59b0307e551f76e1c5f7bb7fc907fd828ca34245", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 5, + "d": 2 + }, + ".travis.yml": { + "a": 14, + "d": 14 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/plot_sensors_decoding.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7908.json b/doc/sphinxext/prs/7908.json new file mode 100644 index 00000000000..4c449a376ef --- /dev/null +++ b/doc/sphinxext/prs/7908.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "437ae4763bf843c9c84f823fec3abba3f6d79886", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 13, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/791.json b/doc/sphinxext/prs/791.json new file mode 100644 index 00000000000..3031e0eddcc --- /dev/null +++ b/doc/sphinxext/prs/791.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5d379d65f8378dc3b39b4234e224efe3a02eedca", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7910.json b/doc/sphinxext/prs/7910.json new file mode 100644 index 00000000000..4271abc71c2 --- /dev/null +++ b/doc/sphinxext/prs/7910.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "171917801f6d197d31af82fda7350d3553d8300f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 12, + "d": 10 + }, + "mne/evoked.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 24, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7916.json b/doc/sphinxext/prs/7916.json new file mode 100644 index 00000000000..48e69105f28 --- /dev/null +++ b/doc/sphinxext/prs/7916.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "d122dca68ccd7c8681481b4e624ad1553cc60c6b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.15.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.19.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 9, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 13, + "d": 12 + }, + "mne/__init__.py": { + "a": 25, + "d": 8 + }, + "mne/_version.py": { + "a": 6, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 8, + "d": 4 + }, + "mne/channels/channels.py": { + "a": 52, + "d": 52 + }, + "mne/channels/tests/test_channels.py": { + "a": 30, + "d": 30 + }, + "mne/label.py": { + "a": 5, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 68, + "d": 68 + }, + "mne/source_space.py": { + "a": 3, + "d": 3 + }, + "mne/stats/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/stats/_adjacency.py": { + "a": 93, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 122, + "d": 107 + }, + "mne/stats/tests/test_adjacency.py": { + "a": 44, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 70, + "d": 68 + }, + "mne/stats/tests/test_permutations.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 42, + "d": 42 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 30, + "d": 11 + }, + "mne/utils/tests/test_docs.py": { + "a": 13, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 1 + }, + "tutorials/discussions/plot_background_statistics.py": { + "a": 21, + "d": 21 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 43, + "d": 14 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 2, + "d": 2 + }, + "tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py": { + "a": 8, + "d": 8 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py": { + "a": 4, + "d": 4 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_2samp.py": { + "a": 5, + "d": 5 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7917.json b/doc/sphinxext/prs/7917.json new file mode 100644 index 00000000000..15fe420049b --- /dev/null +++ b/doc/sphinxext/prs/7917.json @@ -0,0 +1,295 @@ +{ + "merge_commit_sha": "959d191e09294bb5238c369043af5b151876a0ab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 2 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 5, + "d": 0 + }, + "mne/connectivity/effective.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 5, + "d": 5 + }, + "mne/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 3, + "d": 3 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 4 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 13, + "d": 13 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/utils.py": { + "a": 3, + "d": 3 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 4 + }, + "mne/io/bti/read.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 2, + "d": 2 + }, + "mne/io/ctf_comp.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_compensator.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 4, + "d": 4 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 6, + "d": 6 + }, + "mne/morph.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/_peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/bads.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_source.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 3, + "d": 3 + }, + "mne/stats/cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stft.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/_stockwell.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/colormap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 5, + "d": 4 + }, + "mne/viz/circle.py": { + "a": 3, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7923.json b/doc/sphinxext/prs/7923.json new file mode 100644 index 00000000000..ebd4bc65c19 --- /dev/null +++ b/doc/sphinxext/prs/7923.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "c8364811813708ed13112b065b7d337ddd6f5282", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + } + ], + "changes": { + "examples/preprocessing/plot_fnirs_artifact_removal.py": { + "a": 3, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 6 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 6, + "d": 5 + }, + "mne/defaults.py": { + "a": 8, + "d": 7 + }, + "mne/evoked.py": { + "a": 5, + "d": 4 + }, + "mne/io/constants.py": { + "a": 4, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 11, + "d": 0 + }, + "mne/io/pick.py": { + "a": 32, + "d": 15 + }, + "mne/io/tests/test_constants.py": { + "a": 8, + "d": 5 + }, + "mne/io/tests/test_pick.py": { + "a": 6, + "d": 5 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 10, + "d": 3 + }, + "mne/preprocessing/tests/test_optical_density.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7924.json b/doc/sphinxext/prs/7924.json new file mode 100644 index 00000000000..d6639c76e04 --- /dev/null +++ b/doc/sphinxext/prs/7924.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4353b2c0e1c00320ff743f3d1f3deece11bc49da", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/time_frequency/csd.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7925.json b/doc/sphinxext/prs/7925.json new file mode 100644 index 00000000000..69438968574 --- /dev/null +++ b/doc/sphinxext/prs/7925.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dfcbd29086d5420accc81572927a922b29f31d23", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7927.json b/doc/sphinxext/prs/7927.json new file mode 100644 index 00000000000..659e8d51cf8 --- /dev/null +++ b/doc/sphinxext/prs/7927.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "bd6f426aa564522b21470ab7510b74a226cc507b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 2 + }, + "mne/viz/_brain/_scraper.py": { + "a": 63, + "d": 0 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 74, + "d": 68 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 20, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 0 + }, + "requirements_testing.txt": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7928.json b/doc/sphinxext/prs/7928.json new file mode 100644 index 00000000000..ad0756cbb2a --- /dev/null +++ b/doc/sphinxext/prs/7928.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "969c7d8c870d10760728362195398db9dcda7d5e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 5 + }, + "mne/viz/_brain/_notebook.py": { + "a": 12, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7929.json b/doc/sphinxext/prs/7929.json new file mode 100644 index 00000000000..fa21385af70 --- /dev/null +++ b/doc/sphinxext/prs/7929.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "a4088816648d07606b4b498ed08fca3d6f5949d2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 15, + "d": 5 + }, + "mne/chpi.py": { + "a": 3, + "d": 3 + }, + "mne/epochs.py": { + "a": 7, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 11, + "d": 10 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7930.json b/doc/sphinxext/prs/7930.json new file mode 100644 index 00000000000..a39d48edb03 --- /dev/null +++ b/doc/sphinxext/prs/7930.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c9064e763f4b4befd7c34a02a23ab061fcb03def", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/surface.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7931.json b/doc/sphinxext/prs/7931.json new file mode 100644 index 00000000000..a72399df7f9 --- /dev/null +++ b/doc/sphinxext/prs/7931.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "95a88e2884e44585f352744f35d86667195cdad6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 2 + }, + ".travis.yml": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 0, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7934.json b/doc/sphinxext/prs/7934.json new file mode 100644 index 00000000000..db7ed2df850 --- /dev/null +++ b/doc/sphinxext/prs/7934.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "31a82ecb3813e0016295ba1b8ec79fc10d2e2d6f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 39, + "d": 114 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 11, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7935.json b/doc/sphinxext/prs/7935.json new file mode 100644 index 00000000000..841798af0c3 --- /dev/null +++ b/doc/sphinxext/prs/7935.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1d9058074ba6ae41cf35188380b38ff48c8d4d1b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 5, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 14, + "d": 7 + }, + "mne/viz/tests/test_evoked.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/794.json b/doc/sphinxext/prs/794.json new file mode 100644 index 00000000000..df98bea9c0c --- /dev/null +++ b/doc/sphinxext/prs/794.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d29cdaa7e8a1b98084f4691828a1a180240c5553", + "authors": [ + { + "n": "Emanuele Olivetti", + "e": null + } + ], + "changes": { + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7940.json b/doc/sphinxext/prs/7940.json new file mode 100644 index 00000000000..2426262e6bb --- /dev/null +++ b/doc/sphinxext/prs/7940.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ce30458703d31c855d791546602a9c28df54b5ba", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_notebook.py": { + "a": 35, + "d": 18 + }, + "mne/viz/backends/_notebook.py": { + "a": 11, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7943.json b/doc/sphinxext/prs/7943.json new file mode 100644 index 00000000000..4d0fd8d44b0 --- /dev/null +++ b/doc/sphinxext/prs/7943.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "b04ff04df17852a8d933075c4c5e3f4b5ee78b12", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 29, + "d": 30 + }, + "doc/conftest.py": { + "a": 1, + "d": 1 + }, + "doc/sphinxext/flow_diagram.py": { + "a": 2, + "d": 2 + }, + "environment.yml": { + "a": 1, + "d": 3 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 2, + "d": 1 + }, + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 7 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 58, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "mne/io/proj.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 3, + "d": 2 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 7, + "d": 6 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 1 + }, + "server_environment.yml": { + "a": 2, + "d": 1 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7948.json b/doc/sphinxext/prs/7948.json new file mode 100644 index 00000000000..d9757d5bd9e --- /dev/null +++ b/doc/sphinxext/prs/7948.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "8fa7eb322e256c908c4cd0d234ae922941a1b421", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 7, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7949.json b/doc/sphinxext/prs/7949.json new file mode 100644 index 00000000000..c1370a16a98 --- /dev/null +++ b/doc/sphinxext/prs/7949.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0de98a86938917eab0667249f54eb62719c73c6d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 2, + "d": 2 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7950.json b/doc/sphinxext/prs/7950.json new file mode 100644 index 00000000000..6e74e7b8b39 --- /dev/null +++ b/doc/sphinxext/prs/7950.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "54b88d2c4c83a3dcd8afc403d017c3c0e0b33e79", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/source_space.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7951.json b/doc/sphinxext/prs/7951.json new file mode 100644 index 00000000000..377752986bd --- /dev/null +++ b/doc/sphinxext/prs/7951.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5acdd588e89d0cf2ed21e0240795fc7504b9e173", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 15, + "d": 14 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7952.json b/doc/sphinxext/prs/7952.json new file mode 100644 index 00000000000..1259c8ffef5 --- /dev/null +++ b/doc/sphinxext/prs/7952.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a8a82697f6773e70203f71b4db3c75a87fada0b6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7953.json b/doc/sphinxext/prs/7953.json new file mode 100644 index 00000000000..2d338ec4cad --- /dev/null +++ b/doc/sphinxext/prs/7953.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c7273e48cddfe547f62f78ee340e74e712e28228", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 0, + "d": 2 + }, + "mne/conftest.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7954.json b/doc/sphinxext/prs/7954.json new file mode 100644 index 00000000000..1da518f90e0 --- /dev/null +++ b/doc/sphinxext/prs/7954.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "517c49612da33491fe4f147588deb00072526874", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7955.json b/doc/sphinxext/prs/7955.json new file mode 100644 index 00000000000..b3c79b0c469 --- /dev/null +++ b/doc/sphinxext/prs/7955.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "b1cc4651e5d23b914329eb3b2b11a2de682408d6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/selection.py": { + "a": 5, + "d": 3 + }, + "mne/viz/_figure.py": { + "a": 1848, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 202, + "d": 398 + }, + "mne/viz/tests/test_raw.py": { + "a": 305, + "d": 121 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 76, + "d": 119 + }, + "tutorials/raw/plot_30_annotate_raw.py": { + "a": 27, + "d": 27 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7957.json b/doc/sphinxext/prs/7957.json new file mode 100644 index 00000000000..d9e40834e0a --- /dev/null +++ b/doc/sphinxext/prs/7957.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "666d169921897047b37b21d8f05cfbe02acd7f58", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/796.json b/doc/sphinxext/prs/796.json new file mode 100644 index 00000000000..0ff73dd17c1 --- /dev/null +++ b/doc/sphinxext/prs/796.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1ff24815855e7152b90cf1d6a0599d974d899294", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7961.json b/doc/sphinxext/prs/7961.json new file mode 100644 index 00000000000..8855afc820c --- /dev/null +++ b/doc/sphinxext/prs/7961.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "556ce8ed7f1990b1b164f1f20c45f3481035a7f6", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7962.json b/doc/sphinxext/prs/7962.json new file mode 100644 index 00000000000..0425af6d7eb --- /dev/null +++ b/doc/sphinxext/prs/7962.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "6e5beb64aa9abea19c89e321962856cfdb12ea5a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 6, + "d": 2 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 55, + "d": 20 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 5, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7963.json b/doc/sphinxext/prs/7963.json new file mode 100644 index 00000000000..01b9aa37772 --- /dev/null +++ b/doc/sphinxext/prs/7963.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a00ab1686ab84c9037d546305715cb4d4325eee5", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 9, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7964.json b/doc/sphinxext/prs/7964.json new file mode 100644 index 00000000000..e2857bf15da --- /dev/null +++ b/doc/sphinxext/prs/7964.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7267af6eb4e64567be160ab98674602af3b9beb7", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 30, + "d": 11 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 6, + "d": 1 + }, + "mne/viz/backends/base_renderer.py": { + "a": 16, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7965.json b/doc/sphinxext/prs/7965.json new file mode 100644 index 00000000000..c7d7d51a892 --- /dev/null +++ b/doc/sphinxext/prs/7965.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5ff1cb5fb9b2f105de2b7a750cf63c087e310dab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 7, + "d": 4 + }, + "mne/io/ctf/info.py": { + "a": 6, + "d": 14 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 44, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7967.json b/doc/sphinxext/prs/7967.json new file mode 100644 index 00000000000..093370f1ccb --- /dev/null +++ b/doc/sphinxext/prs/7967.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "85bc0063c7582a4deb2e453ea6c41e49036254a4", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 22, + "d": 5 + }, + "mne/io/base.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 26, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7968.json b/doc/sphinxext/prs/7968.json new file mode 100644 index 00000000000..61cd7c38381 --- /dev/null +++ b/doc/sphinxext/prs/7968.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c53ce68999b1d460ec8a2e120a213b9a34949807", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 71, + "d": 54 + }, + "mne/tests/test_epochs.py": { + "a": 50, + "d": 39 + }, + "mne/utils/mixin.py": { + "a": 4, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7970.json b/doc/sphinxext/prs/7970.json new file mode 100644 index 00000000000..0369c3eab28 --- /dev/null +++ b/doc/sphinxext/prs/7970.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "c3795a2fe5fbea43ca008d91e239f226ac9c9a85", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 9, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 2 + }, + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 0, + "d": 58 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 2 + }, + "mne/io/proj.py": { + "a": 1, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 3 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7971.json b/doc/sphinxext/prs/7971.json new file mode 100644 index 00000000000..9c7ce381e44 --- /dev/null +++ b/doc/sphinxext/prs/7971.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "2965cc3644538c97b297f131bbb46ce9f954ea6f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".travis.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/install/advanced.rst": { + "a": 7, + "d": 0 + }, + "doc/install/mne_python.rst": { + "a": 94, + "d": 57 + }, + "doc/install/pre_install.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7972.json b/doc/sphinxext/prs/7972.json new file mode 100644 index 00000000000..bfb0917059e --- /dev/null +++ b/doc/sphinxext/prs/7972.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "1439ff81d5fe6da13ab59bb1152bbf261123452b", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Robert Luke", + "e": "mail@robertluke.net" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 203, + "d": 0 + }, + "mne/io/snirf/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 100, + "d": 0 + }, + "mne/utils/check.py": { + "a": 7, + "d": 0 + }, + "tutorials/io/plot_30_reading_fnirs_data.py": { + "a": 17, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7974.json b/doc/sphinxext/prs/7974.json new file mode 100644 index 00000000000..a474212c896 --- /dev/null +++ b/doc/sphinxext/prs/7974.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "05df66f4441980211ba5e17279c1d89fd5e7b38c", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Jan Sedivy", + "e": "sedivy@rtsoft.cz" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 108, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_highpass.vhdr": { + "a": 1, + "d": 4 + }, + "mne/io/brainvision/tests/data/test_mixed_lowpass.vhdr": { + "a": 49, + "d": 6 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 55, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 4, + "d": 1 + }, + "mne/utils/tests/test_numerics.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7976.json b/doc/sphinxext/prs/7976.json new file mode 100644 index 00000000000..f93453452bc --- /dev/null +++ b/doc/sphinxext/prs/7976.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "75fc445cbf0fb406da94fb44df6bd2190214c05d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 27, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7979.json b/doc/sphinxext/prs/7979.json new file mode 100644 index 00000000000..5ac230c1686 --- /dev/null +++ b/doc/sphinxext/prs/7979.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "76fc838e954e70697f33cab0edbca61872ca1c23", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 10, + "d": 0 + }, + "doc/install/advanced.rst": { + "a": 7, + "d": 0 + }, + "doc/install/mne_python.rst": { + "a": 158, + "d": 55 + }, + "doc/install/pre_install.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7980.json b/doc/sphinxext/prs/7980.json new file mode 100644 index 00000000000..cf6de6226ca --- /dev/null +++ b/doc/sphinxext/prs/7980.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b75032c8a0713e6ff80f7b7127d085857f319b2f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7982.json b/doc/sphinxext/prs/7982.json new file mode 100644 index 00000000000..9a4037b69b5 --- /dev/null +++ b/doc/sphinxext/prs/7982.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "13be7776156cfab917daedf6e4fd45000f9128a1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/preprocessing/maxwell.py": { + "a": 37, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7983.json b/doc/sphinxext/prs/7983.json new file mode 100644 index 00000000000..dff7532b2d5 --- /dev/null +++ b/doc/sphinxext/prs/7983.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "31f4923342c112a1d9affcd580971af6ff09f0cc", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 19, + "d": 5 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 15, + "d": 8 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 12, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7986.json b/doc/sphinxext/prs/7986.json new file mode 100644 index 00000000000..84980ede632 --- /dev/null +++ b/doc/sphinxext/prs/7986.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6bfa9c8d95d2ebcd6b3fcc5cf82c394777d8377a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 59, + "d": 32 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7987.json b/doc/sphinxext/prs/7987.json new file mode 100644 index 00000000000..5dcf0793a04 --- /dev/null +++ b/doc/sphinxext/prs/7987.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0b907234c66cef2fd6e9ab135633bcf07d5f2842", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/cited.rst": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7988.json b/doc/sphinxext/prs/7988.json new file mode 100644 index 00000000000..5866d904371 --- /dev/null +++ b/doc/sphinxext/prs/7988.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4c4b9278c7232583a58c61b3dd243333a3c49bd6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 13, + "d": 3 + }, + "mne/viz/tests/test_misc.py": { + "a": 13, + "d": 6 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/799.json b/doc/sphinxext/prs/799.json new file mode 100644 index 00000000000..af51395f39d --- /dev/null +++ b/doc/sphinxext/prs/799.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a508da2e18b059460003dc984ce5db7b373a14d6", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 4, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 21, + "d": 1 + }, + "mne/surface.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7990.json b/doc/sphinxext/prs/7990.json new file mode 100644 index 00000000000..5b4bd66cae1 --- /dev/null +++ b/doc/sphinxext/prs/7990.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4903cf2b9c5aef7fabb01ce84bf7d20203568d94", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 44, + "d": 31 + }, + "tutorials/misc/plot_report.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7991.json b/doc/sphinxext/prs/7991.json new file mode 100644 index 00000000000..afe683d88a6 --- /dev/null +++ b/doc/sphinxext/prs/7991.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "492afc2c0aef39f0b822923adca1111037814428", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 87, + "d": 15 + }, + "mne/tests/test_report.py": { + "a": 11, + "d": 2 + }, + "tutorials/misc/plot_report.py": { + "a": 33, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7994.json b/doc/sphinxext/prs/7994.json new file mode 100644 index 00000000000..b92fb69b743 --- /dev/null +++ b/doc/sphinxext/prs/7994.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ca3595f0b073ed4c75470eae634d046236007838", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 29, + "d": 11 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7996.json b/doc/sphinxext/prs/7996.json new file mode 100644 index 00000000000..e3aff2ba6eb --- /dev/null +++ b/doc/sphinxext/prs/7996.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4c32643b996e6306018b49401a22c3c1b3feea7f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/_stockwell.py": { + "a": 9, + "d": 5 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7997.json b/doc/sphinxext/prs/7997.json new file mode 100644 index 00000000000..3a7c9b35a05 --- /dev/null +++ b/doc/sphinxext/prs/7997.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "bab6cb31f59341c3cf02cfe6f96d10e9adcf52cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 5 + }, + ".travis.yml": { + "a": 2, + "d": 4 + }, + "MANIFEST.in": { + "a": 2, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 5, + "d": 29 + }, + "doc/install/contributing.rst": { + "a": 10, + "d": 4 + }, + "environment.yml": { + "a": 1, + "d": 10 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 6, + "d": 6 + }, + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 4 + }, + "requirements.txt": { + "a": 1, + "d": 11 + }, + "requirements_doc.txt": { + "a": 9, + "d": 0 + }, + "requirements_testing.txt": { + "a": 14, + "d": 0 + }, + "setup.cfg": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/7998.json b/doc/sphinxext/prs/7998.json new file mode 100644 index 00000000000..01e976f420a --- /dev/null +++ b/doc/sphinxext/prs/7998.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "18ffbfa47005223dd343257daa69b4540f72b9b5", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 35, + "d": 30 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 8 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8.json b/doc/sphinxext/prs/8.json new file mode 100644 index 00000000000..c0c200dab2d --- /dev/null +++ b/doc/sphinxext/prs/8.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1c6805e6866a78edaea4d1d8e22bd46146ac2a70", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "Makefile": { + "a": 4, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 79, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/80.json b/doc/sphinxext/prs/80.json new file mode 100644 index 00000000000..5981cc60ebf --- /dev/null +++ b/doc/sphinxext/prs/80.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "70973e15373c8b70b5117b3d09d714633a30ae62", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/artifacts/stim.py": { + "a": 1, + "d": 1 + }, + "mne/artifacts/tests/test_ecg.py": { + "a": 2, + "d": 3 + }, + "mne/artifacts/tests/test_eog.py": { + "a": 2, + "d": 2 + }, + "mne/artifacts/tests/test_peak_finder.py": { + "a": 2, + "d": 2 + }, + "mne/artifacts/tests/test_stim.py": { + "a": 5, + "d": 5 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 6, + "d": 3 + }, + "mne/fiff/matrix.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/tests/test_pick.py": { + "a": 1, + "d": 2 + }, + "mne/fiff/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 7, + "d": 7 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 6, + "d": 6 + }, + "mne/mixed_norm/debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/mixed_norm/tests/test_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/mixed_norm/tests/test_inverse.py": { + "a": 4, + "d": 4 + }, + "mne/mixed_norm/tests/test_optim.py": { + "a": 1, + "d": 1 + }, + "mne/selection.py": { + "a": 0, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_source.py": { + "a": 3, + "d": 5 + }, + "mne/source_space.py": { + "a": 1, + "d": 0 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_permutations.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 8 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_forward.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_misc.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_selection.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_utils.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_viz.py": { + "a": 3, + "d": 3 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 5, + "d": 4 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 9, + "d": 9 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/utils.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8002.json b/doc/sphinxext/prs/8002.json new file mode 100644 index 00000000000..8a9d500f38c --- /dev/null +++ b/doc/sphinxext/prs/8002.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7cc37e2f6a98a62d4d405fb88a9380ec754519a6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 19, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 23, + "d": 13 + }, + "mne/io/proj.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8003.json b/doc/sphinxext/prs/8003.json new file mode 100644 index 00000000000..a3c2968b5cf --- /dev/null +++ b/doc/sphinxext/prs/8003.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b1dfd53f4c017b5b0e1e51af1d98d38e318c3676", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 12, + "d": 0 + }, + "mne/io/proj.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8005.json b/doc/sphinxext/prs/8005.json new file mode 100644 index 00000000000..b568b95e675 --- /dev/null +++ b/doc/sphinxext/prs/8005.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "327de96a146d89aaaa612137bc7afdb3bf4cc8f7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8006.json b/doc/sphinxext/prs/8006.json new file mode 100644 index 00000000000..5de816c2e59 --- /dev/null +++ b/doc/sphinxext/prs/8006.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "de43183adb18c744f25cb3461d1b32c6d04c8ae8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 19, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8007.json b/doc/sphinxext/prs/8007.json new file mode 100644 index 00000000000..772c5deadef --- /dev/null +++ b/doc/sphinxext/prs/8007.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "859796c0ae0ca23d3750c113e8ea8eb3b822b8f1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_topomap.py": { + "a": 5, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/801.json b/doc/sphinxext/prs/801.json new file mode 100644 index 00000000000..ed8f991b4ba --- /dev/null +++ b/doc/sphinxext/prs/801.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a8287ddc73bd02c3424e00f44a88444d81952234", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8010.json b/doc/sphinxext/prs/8010.json new file mode 100644 index 00000000000..87c7538d093 --- /dev/null +++ b/doc/sphinxext/prs/8010.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b20223fb430778a5eda4a6de7d5c5728138c1a22", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "server_environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8011.json b/doc/sphinxext/prs/8011.json new file mode 100644 index 00000000000..12160755e81 --- /dev/null +++ b/doc/sphinxext/prs/8011.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "638847ea5539be32919b6ac4dd85ef8c4c3b79aa", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8012.json b/doc/sphinxext/prs/8012.json new file mode 100644 index 00000000000..f5bf194b9f3 --- /dev/null +++ b/doc/sphinxext/prs/8012.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "056c273f1bb96bb3229e60874122f830f946746c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8014.json b/doc/sphinxext/prs/8014.json new file mode 100644 index 00000000000..2a226b765ce --- /dev/null +++ b/doc/sphinxext/prs/8014.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "1a3a392f63216f010de1658949a2bff5439eda46", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@inria.fr" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@inria.fr" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 142, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 68, + "d": 2 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8017.json b/doc/sphinxext/prs/8017.json new file mode 100644 index 00000000000..c46cb39cf28 --- /dev/null +++ b/doc/sphinxext/prs/8017.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c70260fead8b17c5f84bcf706702836010e05501", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 16, + "d": 5 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 6, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8018.json b/doc/sphinxext/prs/8018.json new file mode 100644 index 00000000000..7686c3ac176 --- /dev/null +++ b/doc/sphinxext/prs/8018.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6db1b3344c0f0d3643361961b89aa32a1a3dd623", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 70, + "d": 47 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 7, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 42, + "d": 17 + }, + "mne/viz/backends/base_renderer.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/802.json b/doc/sphinxext/prs/802.json new file mode 100644 index 00000000000..79d2ee0e941 --- /dev/null +++ b/doc/sphinxext/prs/802.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5eaceafb3fac57ee24fa284d0c211819a17e7131", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 2, + "d": 0 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/source_space.py": { + "a": 105, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 79, + "d": 2 + }, + "mne/utils.py": { + "a": 42, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8020.json b/doc/sphinxext/prs/8020.json new file mode 100644 index 00000000000..6468399e270 --- /dev/null +++ b/doc/sphinxext/prs/8020.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8b3ca8a88909dcd14fd2619c08c9587effe232e0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/mne_python.rst": { + "a": 6, + "d": 6 + }, + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8021.json b/doc/sphinxext/prs/8021.json new file mode 100644 index 00000000000..760ba9931d5 --- /dev/null +++ b/doc/sphinxext/prs/8021.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "adc70d404e88dad1f13c004c7283ae80220a9983", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "server_environment.yml": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8022.json b/doc/sphinxext/prs/8022.json new file mode 100644 index 00000000000..be332331a3f --- /dev/null +++ b/doc/sphinxext/prs/8022.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aab9bae4cad04783d9457d2ae0edcbccf527e7b0", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8023.json b/doc/sphinxext/prs/8023.json new file mode 100644 index 00000000000..70bc4b0ba64 --- /dev/null +++ b/doc/sphinxext/prs/8023.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "de5e36939f37c7e8aef7e1674f393b5b258297d4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_time_frequency_simulated.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 0, + "d": 1 + }, + "mne/viz/tests/test_utils.py": { + "a": 9, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 0, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 37, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8024.json b/doc/sphinxext/prs/8024.json new file mode 100644 index 00000000000..40f54c04707 --- /dev/null +++ b/doc/sphinxext/prs/8024.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "9613c71f24b8f3f289fcbace86be57dcc66360ed", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_interpolate_bad_channels.py": { + "a": 23, + "d": 17 + }, + "mne/channels/channels.py": { + "a": 34, + "d": 7 + }, + "mne/channels/interpolation.py": { + "a": 52, + "d": 38 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 49, + "d": 24 + }, + "mne/defaults.py": { + "a": 1, + "d": 0 + }, + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_field_interpolation.py": { + "a": 84, + "d": 40 + }, + "mne/forward/_lead_dots.py": { + "a": 4, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8029.json b/doc/sphinxext/prs/8029.json new file mode 100644 index 00000000000..baa276eafa0 --- /dev/null +++ b/doc/sphinxext/prs/8029.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bf08aca0bf47ee3d7a085c0a262371545e7eae2d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 19, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 34 + }, + "mne/utils/docs.py": { + "a": 40, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8033.json b/doc/sphinxext/prs/8033.json new file mode 100644 index 00000000000..ca3e0ff7f14 --- /dev/null +++ b/doc/sphinxext/prs/8033.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a12cbb70471766abc6ff8a322acb7663c37ea8cd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/proj.py": { + "a": 24, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 26, + "d": 9 + }, + "mne/viz/tests/test_evoked.py": { + "a": 71, + "d": 8 + }, + "mne/viz/tests/test_topo.py": { + "a": 15, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 7, + "d": 7 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 59, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8034.json b/doc/sphinxext/prs/8034.json new file mode 100644 index 00000000000..7b8e0f8140e --- /dev/null +++ b/doc/sphinxext/prs/8034.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4b282c833b9d6b86ab390b374863e2323fee131d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/visualization/plot_evoked_arrowmap.py": { + "a": 4, + "d": 7 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8035.json b/doc/sphinxext/prs/8035.json new file mode 100644 index 00000000000..c0373662885 --- /dev/null +++ b/doc/sphinxext/prs/8035.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0b3ad0344d041590635bb380c3880849a0d21881", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 9, + "d": 7 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 3, + "d": 1 + }, + "requirements_testing.txt": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8037.json b/doc/sphinxext/prs/8037.json new file mode 100644 index 00000000000..2fb24e088bc --- /dev/null +++ b/doc/sphinxext/prs/8037.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "607fb4613fb5a80dd225132a4a53fe43b8fde0fb", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8041.json b/doc/sphinxext/prs/8041.json new file mode 100644 index 00000000000..3322495ecef --- /dev/null +++ b/doc/sphinxext/prs/8041.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4acbad2cf9dff6704a975d42539aead9f327cc2b", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 11 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 0, + "d": 6 + }, + "mne/viz/backends/_pyvista.py": { + "a": 82, + "d": 77 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8042.json b/doc/sphinxext/prs/8042.json new file mode 100644 index 00000000000..66d1dbb6c37 --- /dev/null +++ b/doc/sphinxext/prs/8042.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a525db9e7c6f8f0939094699dd01ab98d791dce6", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "environment.yml": { + "a": 7, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8046.json b/doc/sphinxext/prs/8046.json new file mode 100644 index 00000000000..78b0d4cc007 --- /dev/null +++ b/doc/sphinxext/prs/8046.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0e56673d9032eee88156ca5af4dd4b95098a9d15", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_conda.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 13, + "d": 16 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8047.json b/doc/sphinxext/prs/8047.json new file mode 100644 index 00000000000..05d103f2531 --- /dev/null +++ b/doc/sphinxext/prs/8047.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "98b87d1e3e745aab3f7c56571f444162209b37eb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8049.json b/doc/sphinxext/prs/8049.json new file mode 100644 index 00000000000..8ca8a80ad32 --- /dev/null +++ b/doc/sphinxext/prs/8049.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0681845f4e7ffaa280c4fc0ba1e0bb1730790d62", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/805.json b/doc/sphinxext/prs/805.json new file mode 100644 index 00000000000..cb99e242943 --- /dev/null +++ b/doc/sphinxext/prs/805.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "872c0a14cfbb4a6b1bfc0ebf0fb27d05e932fe24", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 25, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8050.json b/doc/sphinxext/prs/8050.json new file mode 100644 index 00000000000..a68832294c3 --- /dev/null +++ b/doc/sphinxext/prs/8050.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "3545c3f26d4c1cffbe4b3b925eb059fb01663967", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 5, + "d": 5 + }, + "mne/conftest.py": { + "a": 8, + "d": 0 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 11, + "d": 0 + }, + "mne/io/tests/test_apply_function.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_what.py": { + "a": 1, + "d": 1 + }, + "mne/io/what.py": { + "a": 3, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 3, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 3 + }, + "mne/tests/test_chpi.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 3 + }, + "mne/transforms.py": { + "a": 3, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 13, + "d": 2 + }, + "tools/get_minimal_commands.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8051.json b/doc/sphinxext/prs/8051.json new file mode 100644 index 00000000000..f3a6f6fde5a --- /dev/null +++ b/doc/sphinxext/prs/8051.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "18ca39583175a75fe19177520b0858942e78611f", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8054.json b/doc/sphinxext/prs/8054.json new file mode 100644 index 00000000000..c51bde8f529 --- /dev/null +++ b/doc/sphinxext/prs/8054.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8b732abd096430bdf95d250823c5413f2342a3b4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 17, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8055.json b/doc/sphinxext/prs/8055.json new file mode 100644 index 00000000000..c29231dbda6 --- /dev/null +++ b/doc/sphinxext/prs/8055.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b5d651bd128c9009f6266896f8a18b59ad19f1fc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 2, + "d": 1 + }, + "requirements.txt": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8056.json b/doc/sphinxext/prs/8056.json new file mode 100644 index 00000000000..280af87ed16 --- /dev/null +++ b/doc/sphinxext/prs/8056.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "b62d8297f60c4b8c8f3ec44e499353b4a73efee1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 3 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 5 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 11, + "d": 9 + }, + "mne/utils/_testing.py": { + "a": 8, + "d": 3 + }, + "mne/utils/config.py": { + "a": 2, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 2, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 3, + "d": 3 + }, + "mne/utils/tests/test_logging.py": { + "a": 4, + "d": 1 + }, + "mne/viz/topo.py": { + "a": 1, + "d": 1 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8057.json b/doc/sphinxext/prs/8057.json new file mode 100644 index 00000000000..011bafe28f5 --- /dev/null +++ b/doc/sphinxext/prs/8057.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8af5846a8ae47fea351d7624f5d9a48b038ace65", + "authors": [ + { + "n": "Zhenya", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 25, + "d": 0 + }, + "doc/sphinxext/gen_commands.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8058.json b/doc/sphinxext/prs/8058.json new file mode 100644 index 00000000000..5ff9415da87 --- /dev/null +++ b/doc/sphinxext/prs/8058.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f7c898acb3d594d85177d1fc4a6130f7f0ff6481", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8059.json b/doc/sphinxext/prs/8059.json new file mode 100644 index 00000000000..efd2af3fd5a --- /dev/null +++ b/doc/sphinxext/prs/8059.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4c2f1c69747a2a9bbbbd54b1d009765c9b0129b1", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/806.json b/doc/sphinxext/prs/806.json new file mode 100644 index 00000000000..0caeef0d4cc --- /dev/null +++ b/doc/sphinxext/prs/806.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f263f3f470ac607254f53cd0125e435fccd5c6b9", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/kit/kit.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8061.json b/doc/sphinxext/prs/8061.json new file mode 100644 index 00000000000..750d0a2c786 --- /dev/null +++ b/doc/sphinxext/prs/8061.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b4e47862011f891cd9888930fde18897cc4df0e6", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8063.json b/doc/sphinxext/prs/8063.json new file mode 100644 index 00000000000..b087b78255f --- /dev/null +++ b/doc/sphinxext/prs/8063.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "6bf644f79e019a171ca4cf15dcb59a639f6c8090", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 3, + "d": 2 + }, + "mne/conftest.py": { + "a": 8, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 79, + "d": 22 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 1, + "d": 7 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 2, + "d": 14 + }, + "mne/minimum_norm/inverse.py": { + "a": 9, + "d": 41 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 107, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 8, + "d": 9 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8064.json b/doc/sphinxext/prs/8064.json new file mode 100644 index 00000000000..fe8dcfcf6c1 --- /dev/null +++ b/doc/sphinxext/prs/8064.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "a78e20d99f7d51f6b1617dc0330d139def76bc35", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 12, + "d": 2 + }, + "mne/conftest.py": { + "a": 16, + "d": 0 + }, + "mne/defaults.py": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 97, + "d": 45 + }, + "mne/utils/docs.py": { + "a": 33, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 80, + "d": 32 + }, + "mne/viz/_brain/_brain.py": { + "a": 411, + "d": 180 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 253, + "d": 109 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 201, + "d": 63 + }, + "mne/viz/_brain/view.py": { + "a": 34, + "d": 29 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 13, + "d": 11 + }, + "mne/viz/backends/_pyvista.py": { + "a": 45, + "d": 19 + }, + "mne/viz/backends/base_renderer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 66, + "d": 27 + }, + "setup.cfg": { + "a": 2, + "d": 1 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 22, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8066.json b/doc/sphinxext/prs/8066.json new file mode 100644 index 00000000000..17854379187 --- /dev/null +++ b/doc/sphinxext/prs/8066.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7f30d1d840f953cbb0b01c77610f4acab6e09493", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Robert Luke", + "e": "mail@robertluke.net" + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 4, + "d": 9 + }, + "mne/io/snirf/_snirf.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8068.json b/doc/sphinxext/prs/8068.json new file mode 100644 index 00000000000..248907a9736 --- /dev/null +++ b/doc/sphinxext/prs/8068.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "000dac95c0f7a4439ee60fb92f1007fd288f9611", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 131, + "d": 174 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/807.json b/doc/sphinxext/prs/807.json new file mode 100644 index 00000000000..e5dc178820d --- /dev/null +++ b/doc/sphinxext/prs/807.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "da1c1c44f162ec4a5cc6ed7cc862dcfe1cffbc0d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/simulation/__init__.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8072.json b/doc/sphinxext/prs/8072.json new file mode 100644 index 00000000000..76723910db6 --- /dev/null +++ b/doc/sphinxext/prs/8072.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "01f504710baaec959ec3296287faa31005fe0554", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 21, + "d": 21 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8073.json b/doc/sphinxext/prs/8073.json new file mode 100644 index 00000000000..a228afa9af9 --- /dev/null +++ b/doc/sphinxext/prs/8073.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "1783b970b39b5cf70363549d728cb108aff22286", + "authors": [ + { + "n": "Rahul Nadkarni", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 17, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8075.json b/doc/sphinxext/prs/8075.json new file mode 100644 index 00000000000..d74db05c70d --- /dev/null +++ b/doc/sphinxext/prs/8075.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0e5ec90ebc3a5d428ef3a80aa9c3f14ee5329c0d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/io/fieldtrip/fieldtrip.py": { + "a": 3, + "d": 3 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 14, + "d": 1 + }, + "mne/io/fieldtrip/utils.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8077.json b/doc/sphinxext/prs/8077.json new file mode 100644 index 00000000000..19e0c65be9f --- /dev/null +++ b/doc/sphinxext/prs/8077.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f52272a4cacc12624f0e92270495c22191367bc9", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 5 + }, + "mne/evoked.py": { + "a": 2, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8079.json b/doc/sphinxext/prs/8079.json new file mode 100644 index 00000000000..b30e5b84ee6 --- /dev/null +++ b/doc/sphinxext/prs/8079.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d82a2e7fe36cb6d1374df74ab667d89d88d9496c", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Robert Luke", + "e": "mail@robertluke.net" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 0 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 3, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 45, + "d": 0 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 38, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/808.json b/doc/sphinxext/prs/808.json new file mode 100644 index 00000000000..0ab3bae948e --- /dev/null +++ b/doc/sphinxext/prs/808.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e21546e88e75dc5e0e88099a70e37b69948bf2cf", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/getting_started.rst": { + "a": 44, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8082.json b/doc/sphinxext/prs/8082.json new file mode 100644 index 00000000000..e37a27dd82d --- /dev/null +++ b/doc/sphinxext/prs/8082.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "64e3dd09ec3307586514bb8c063ff2d06d8b7986", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 15, + "d": 10 + }, + "mne/surface.py": { + "a": 47, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 7, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 58, + "d": 14 + }, + "mne/viz/_brain/_brain.py": { + "a": 42, + "d": 18 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 12, + "d": 5 + }, + "mne/viz/_brain/colormap.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/surface.py": { + "a": 18, + "d": 14 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 10, + "d": 8 + }, + "mne/viz/_brain/view.py": { + "a": 28, + "d": 22 + }, + "mne/viz/backends/_pyvista.py": { + "a": 27, + "d": 18 + }, + "mne/viz/tests/test_3d.py": { + "a": 29, + "d": 3 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8083.json b/doc/sphinxext/prs/8083.json new file mode 100644 index 00000000000..0826dd3fbe8 --- /dev/null +++ b/doc/sphinxext/prs/8083.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "8fd3c1096c07c1a6dfaa7bb2a4021ba97f02b4fb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 40, + "d": 0 + }, + "mne/epochs.py": { + "a": 9, + "d": 18 + }, + "mne/evoked.py": { + "a": 6, + "d": 13 + }, + "mne/externals/doccer.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_regress.py": { + "a": 87, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/eog.py": { + "a": 6, + "d": 2 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 38, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 5, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 37, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 0 + }, + "tutorials/preprocessing/plot_35_artifact_correction_regression.py": { + "a": 172, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8084.json b/doc/sphinxext/prs/8084.json new file mode 100644 index 00000000000..3045622b6cd --- /dev/null +++ b/doc/sphinxext/prs/8084.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bb56d3e6c10925540ce4a970e3c3811c50d2a26e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8086.json b/doc/sphinxext/prs/8086.json new file mode 100644 index 00000000000..5b0e5c46707 --- /dev/null +++ b/doc/sphinxext/prs/8086.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "20963b79a57adab8a8058fc29d46ac037208fbe5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 13, + "d": 0 + }, + "mne/baseline.py": { + "a": 2, + "d": 9 + }, + "mne/epochs.py": { + "a": 9, + "d": 28 + }, + "mne/evoked.py": { + "a": 5, + "d": 18 + }, + "mne/report.py": { + "a": 24, + "d": 11 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 6, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 20, + "d": 0 + }, + "tutorials/misc/plot_report.py": { + "a": 29, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8089.json b/doc/sphinxext/prs/8089.json new file mode 100644 index 00000000000..610551815a3 --- /dev/null +++ b/doc/sphinxext/prs/8089.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "38f3c2bc34a604d4f39c88697e846389581ea50e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 7, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8090.json b/doc/sphinxext/prs/8090.json new file mode 100644 index 00000000000..37f90f19086 --- /dev/null +++ b/doc/sphinxext/prs/8090.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b16e81957731b1373a1819702a1c6d1a220dc2ac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/morph.py": { + "a": 17, + "d": 14 + }, + "mne/source_space.py": { + "a": 23, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 9, + "d": 0 + }, + "mne/utils/_logging.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8091.json b/doc/sphinxext/prs/8091.json new file mode 100644 index 00000000000..e6e1a21f68a --- /dev/null +++ b/doc/sphinxext/prs/8091.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8f6b548229ffd0f6a92f1b72e357a5000fbfd6c2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8095.json b/doc/sphinxext/prs/8095.json new file mode 100644 index 00000000000..d7ee107dc39 --- /dev/null +++ b/doc/sphinxext/prs/8095.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "256589149882fab099f01ec1dcea1da3bf5a0a8e", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8098.json b/doc/sphinxext/prs/8098.json new file mode 100644 index 00000000000..31750bbd69e --- /dev/null +++ b/doc/sphinxext/prs/8098.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "81c10a0f1cba9f1ba3c38d9da0c48a626b1147e7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 6, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 23, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8099.json b/doc/sphinxext/prs/8099.json new file mode 100644 index 00000000000..2f5831b98cc --- /dev/null +++ b/doc/sphinxext/prs/8099.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9dbb10156a8faf6610f3a8d979ffe9b853dd20dd", + "authors": [ + { + "n": "Martin", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/filter.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/81.json b/doc/sphinxext/prs/81.json new file mode 100644 index 00000000000..d70d5f2cb36 --- /dev/null +++ b/doc/sphinxext/prs/81.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "7581f380b790d169af687b37c286dd4880ab1b8d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/Makefile": { + "a": 5, + "d": 1 + }, + "doc/source/_images/mne_helmet.png": { + "a": 0, + "d": 0 + }, + "doc/source/manual/list.rst": { + "a": 4, + "d": 1 + }, + "doc/source/python_reference.rst": { + "a": 55, + "d": 2 + }, + "doc/sphinxext/gen_rst.py": { + "a": 254, + "d": 52 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 3, + "d": 4 + }, + "examples/inverse/plot_read_inverse.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/plot_read_source_space.py": { + "a": 1, + "d": 0 + }, + "examples/plot_read_bem_surfaces.py": { + "a": 1, + "d": 2 + }, + "examples/plot_read_forward.py": { + "a": 1, + "d": 0 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/maxfilter.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 2 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + }, + "mne/viz.py": { + "a": 12, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8100.json b/doc/sphinxext/prs/8100.json new file mode 100644 index 00000000000..2d2783c970b --- /dev/null +++ b/doc/sphinxext/prs/8100.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "9a80b610299fe798285296682fc5c84558302d07", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 8, + "d": 0 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 2, + "d": 3 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 2 + }, + "mne/channels/layout.py": { + "a": 5, + "d": 4 + }, + "mne/channels/tests/test_layout.py": { + "a": 6, + "d": 4 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/_digitization.py": { + "a": 8, + "d": 5 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/constants.py": { + "a": 34, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 198, + "d": 74 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 66, + "d": 2 + }, + "mne/surface.py": { + "a": 14, + "d": 5 + }, + "mne/tests/test_surface.py": { + "a": 3, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 2 + }, + "tutorials/io/plot_10_reading_meg_data.py": { + "a": 13, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8102.json b/doc/sphinxext/prs/8102.json new file mode 100644 index 00000000000..67a1e4ce497 --- /dev/null +++ b/doc/sphinxext/prs/8102.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0441154d287f3c5593843082389cf188df740887", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 5, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8103.json b/doc/sphinxext/prs/8103.json new file mode 100644 index 00000000000..d22bba156d6 --- /dev/null +++ b/doc/sphinxext/prs/8103.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "9c7788a1a40e1476f9d7d94cfdd978812c75d65b", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 4 + }, + "mne/epochs.py": { + "a": 9, + "d": 9 + }, + "mne/preprocessing/ecg.py": { + "a": 2, + "d": 6 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 9, + "d": 15 + }, + "mne/tests/test_epochs.py": { + "a": 16, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 17, + "d": 7 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 5 + }, + "mne/viz/tests/test_ica.py": { + "a": 19, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8107.json b/doc/sphinxext/prs/8107.json new file mode 100644 index 00000000000..12948145cde --- /dev/null +++ b/doc/sphinxext/prs/8107.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "270b77768e4d2cf0f3f7ae4defd229c9f39eb2af", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 2, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 8, + "d": 7 + }, + "mne/channels/montage.py": { + "a": 21, + "d": 20 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 1 + }, + "mne/io/what.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8108.json b/doc/sphinxext/prs/8108.json new file mode 100644 index 00000000000..85319227061 --- /dev/null +++ b/doc/sphinxext/prs/8108.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c85cf38a2c4550caf5520295756654ce6bb890a5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 11, + "d": 41 + }, + "mne/tests/test_report.py": { + "a": 13, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8109.json b/doc/sphinxext/prs/8109.json new file mode 100644 index 00000000000..460c33858ff --- /dev/null +++ b/doc/sphinxext/prs/8109.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "421cb1bc2f432f8eb93a49a3c38be1ac48838497", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/811.json b/doc/sphinxext/prs/811.json new file mode 100644 index 00000000000..88c03b9745a --- /dev/null +++ b/doc/sphinxext/prs/811.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8dc2d3e627fbf19e9dce7240928519d9c4d8485e", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8111.json b/doc/sphinxext/prs/8111.json new file mode 100644 index 00000000000..878b8ca367a --- /dev/null +++ b/doc/sphinxext/prs/8111.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "989f2d4d6a58c73963fb907bf9da3a53587c9b62", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 4, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 38, + "d": 97 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 122, + "d": 160 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 23, + "d": 10 + }, + "mne/viz/backends/_pyvista.py": { + "a": 82, + "d": 12 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8112.json b/doc/sphinxext/prs/8112.json new file mode 100644 index 00000000000..791ccef5aca --- /dev/null +++ b/doc/sphinxext/prs/8112.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dc3bffdbbbd9dd4249561792f73328ced0b5c2c0", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 11, + "d": 2 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 60, + "d": 15 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8113.json b/doc/sphinxext/prs/8113.json new file mode 100644 index 00000000000..dd87f6450ad --- /dev/null +++ b/doc/sphinxext/prs/8113.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "c997dc0d0ebed9f4f1fede5c869e700c8d687a6a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "wronk", + "e": "wronk.mark@gmail.com" + } + ], + "changes": { + "doc/python_reference.rst": { + "a": 3, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 8, + "d": 40 + }, + "mne/externals/doccer.py": { + "a": 5, + "d": 4 + }, + "mne/io/egi/egimff.py": { + "a": 2, + "d": 2 + }, + "mne/io/tag.py": { + "a": 6, + "d": 4 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 434, + "d": 19 + }, + "mne/preprocessing/maxwell.py": { + "a": 68, + "d": 46 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 116, + "d": 17 + }, + "mne/utils/docs.py": { + "a": 26, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8116.json b/doc/sphinxext/prs/8116.json new file mode 100644 index 00000000000..c5bfa594998 --- /dev/null +++ b/doc/sphinxext/prs/8116.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "13a5ea40f005e9d9f45ab0057c8faaefadb10eb5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_mne_helmet.py": { + "a": 32, + "d": 0 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/icons/mne_icon.png": { + "a": 0, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 25, + "d": 10 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 12 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 15, + "d": 6 + }, + "mne/viz/backends/base_renderer.py": { + "a": 9, + "d": 3 + }, + "mne/viz/backends/renderer.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8117.json b/doc/sphinxext/prs/8117.json new file mode 100644 index 00000000000..5950b7fdfd4 --- /dev/null +++ b/doc/sphinxext/prs/8117.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1c29127598282be7f177406c02acde3fd38eb4d8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "logo/generate_mne_logos.py": { + "a": 19, + "d": 2 + }, + "mne/icons/mne.qrc": { + "a": 1, + "d": 0 + }, + "mne/icons/resources.py": { + "a": 824, + "d": 151 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 1, + "d": 14 + }, + "mne/viz/backends/_pyvista.py": { + "a": 27, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/812.json b/doc/sphinxext/prs/812.json new file mode 100644 index 00000000000..9cb891be52f --- /dev/null +++ b/doc/sphinxext/prs/812.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8683b15314d53ff3cbd141a80ab7c8afe4b9e56b", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "examples/inverse/plot_tf_lcmv.py": { + "a": 9, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 11, + "d": 4 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8122.json b/doc/sphinxext/prs/8122.json new file mode 100644 index 00000000000..0f4e24a329a --- /dev/null +++ b/doc/sphinxext/prs/8122.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "6d0b634bda6488919feb468855563674d430a421", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 3 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 90, + "d": 4 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 6, + "d": 4 + }, + "mne/preprocessing/tests/test_nirs.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/tests/test_optical_density.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_scalp_coupling_index.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/tests/test_temporal_derivative_distribution_repair.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8123.json b/doc/sphinxext/prs/8123.json new file mode 100644 index 00000000000..e392c6ef1e8 --- /dev/null +++ b/doc/sphinxext/prs/8123.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bfbdc83d87135890ef18f3663394e96302fb816b", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8125.json b/doc/sphinxext/prs/8125.json new file mode 100644 index 00000000000..7b78fc906bc --- /dev/null +++ b/doc/sphinxext/prs/8125.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "0686c1424dd46b9275791734cb3cd1b5fab4b40d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/icons/mne-circle-black.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne.qrc": { + "a": 1, + "d": 0 + }, + "mne/icons/reset-black-18dp.svg": { + "a": 1, + "d": 0 + }, + "mne/icons/resources.py": { + "a": 368, + "d": 335 + }, + "mne/viz/_brain/_brain.py": { + "a": 11, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 16, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8126.json b/doc/sphinxext/prs/8126.json new file mode 100644 index 00000000000..8aee5511b00 --- /dev/null +++ b/doc/sphinxext/prs/8126.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c3283d3b0dfd8f28851b27ee6ae745c4b0404aba", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 4, + "d": 2 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 38, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8129.json b/doc/sphinxext/prs/8129.json new file mode 100644 index 00000000000..b0f98fcb343 --- /dev/null +++ b/doc/sphinxext/prs/8129.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "19358a187ad405866df4bdd8c74c226afa1bc31b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/fixes.py": { + "a": 3, + "d": 4 + }, + "mne/morph.py": { + "a": 31, + "d": 17 + }, + "mne/tests/test_morph.py": { + "a": 27, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/813.json b/doc/sphinxext/prs/813.json new file mode 100644 index 00000000000..3b8de0ecaaa --- /dev/null +++ b/doc/sphinxext/prs/813.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "b873db2c77ca57bd95203eb23a36162f83a8ebb5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 4, + "d": 1 + }, + "mne/baseline.py": { + "a": 5, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 8, + "d": 5 + }, + "mne/cov.py": { + "a": 15, + "d": 9 + }, + "mne/fiff/raw.py": { + "a": 29, + "d": 15 + }, + "mne/fiff/tests/test_raw.py": { + "a": 4, + "d": 4 + }, + "mne/filter.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 10, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 18, + "d": 3 + }, + "mne/preprocessing/peak_finder.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 3 + }, + "mne/stats/cluster_level.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/multitaper.py": { + "a": 6, + "d": 6 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8132.json b/doc/sphinxext/prs/8132.json new file mode 100644 index 00000000000..b7bb00d19a4 --- /dev/null +++ b/doc/sphinxext/prs/8132.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "8b0a515ccd2c6389fc62c99bc60a4f1b7136b49e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 14, + "d": 0 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 20, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 23 + }, + "mne/io/reference.py": { + "a": 41, + "d": 74 + }, + "mne/io/tests/test_reference.py": { + "a": 62, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 46, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8134.json b/doc/sphinxext/prs/8134.json new file mode 100644 index 00000000000..3c79d0deb8b --- /dev/null +++ b/doc/sphinxext/prs/8134.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4287eda8985e9131634cd0abee89e444d14aeedd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 16, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 28, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8135.json b/doc/sphinxext/prs/8135.json new file mode 100644 index 00000000000..a1833f2fc84 --- /dev/null +++ b/doc/sphinxext/prs/8135.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9502518b5214ea7fdf1e835d849fd9cbdefc884f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8137.json b/doc/sphinxext/prs/8137.json new file mode 100644 index 00000000000..80accf1ce07 --- /dev/null +++ b/doc/sphinxext/prs/8137.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "f2d019339ad786d8704d63a52871ade6d05f3248", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 0 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 6, + "d": 8 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 7, + "d": 1 + }, + "mne/beamformer/_rap_music.py": { + "a": 60, + "d": 35 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 22, + "d": 15 + }, + "mne/forward/forward.py": { + "a": 30, + "d": 17 + }, + "mne/forward/tests/test_forward.py": { + "a": 10, + "d": 1 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 16, + "d": 23 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 71, + "d": 23 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 46, + "d": 24 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 129, + "d": 24 + }, + "mne/io/proj.py": { + "a": 6, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 10, + "d": 5 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 44, + "d": 29 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8138.json b/doc/sphinxext/prs/8138.json new file mode 100644 index 00000000000..c7cff1034fb --- /dev/null +++ b/doc/sphinxext/prs/8138.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ae68fc4b23d505ec3b57f6a65a7c4fd2203784d8", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 8, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/814.json b/doc/sphinxext/prs/814.json new file mode 100644 index 00000000000..c99c96e2b0c --- /dev/null +++ b/doc/sphinxext/prs/814.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c3614e11dc054edb481a757106c83a1970657f37", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "examples/inverse/plot_tf_dics.py": { + "a": 5, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 10, + "d": 3 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8141.json b/doc/sphinxext/prs/8141.json new file mode 100644 index 00000000000..8b789ddb4eb --- /dev/null +++ b/doc/sphinxext/prs/8141.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4c44bd6ae90dd4d5c6c2e925d93f1b2fbf7bda6a", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8146.json b/doc/sphinxext/prs/8146.json new file mode 100644 index 00000000000..7c6b00c7c18 --- /dev/null +++ b/doc/sphinxext/prs/8146.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "56cc7bb8dd0a955c319e800a14eba4146d471866", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8151.json b/doc/sphinxext/prs/8151.json new file mode 100644 index 00000000000..3c8dbacc88f --- /dev/null +++ b/doc/sphinxext/prs/8151.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9f4ace5a51174f3922fdbbf887ad3d866faa8e24", + "authors": [ + { + "n": null, + "e": "flKazemakase@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 9, + "d": 0 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 2, + "d": 2 + }, + "examples/decoding/plot_decoding_csp_timefreq.py": { + "a": 5, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 166, + "d": 127 + }, + "mne/decoding/tests/test_csp.py": { + "a": 78, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8153.json b/doc/sphinxext/prs/8153.json new file mode 100644 index 00000000000..174c4ab5220 --- /dev/null +++ b/doc/sphinxext/prs/8153.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3168a9aa68c4989420c5a447ac1dc38bba07fc7e", + "authors": [ + { + "n": "Lau Møller Andersen", + "e": "lmandersen@cfin.au.dk" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 11, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 8, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 37, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8155.json b/doc/sphinxext/prs/8155.json new file mode 100644 index 00000000000..8a7d6f85895 --- /dev/null +++ b/doc/sphinxext/prs/8155.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1cf59be1e6454f6ef5aa81209866346c1c0fbd65", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/pick.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8156.json b/doc/sphinxext/prs/8156.json new file mode 100644 index 00000000000..2a9a9ca662b --- /dev/null +++ b/doc/sphinxext/prs/8156.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "7f34517b9444a5d6c0585699a27d8c94e3840ab7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 0 + }, + "mne/baseline.py": { + "a": 41, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 3, + "d": 2 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 5, + "d": 2 + }, + "mne/conftest.py": { + "a": 2, + "d": 1 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 58, + "d": 55 + }, + "mne/evoked.py": { + "a": 164, + "d": 27 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 5, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 4, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 6, + "d": 2 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 6, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 22, + "d": 5 + }, + "mne/tests/test_evoked.py": { + "a": 36, + "d": 3 + }, + "mne/tests/test_morph.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 6, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 44, + "d": 15 + }, + "mne/viz/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 1 + }, + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 19, + "d": 0 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8161.json b/doc/sphinxext/prs/8161.json new file mode 100644 index 00000000000..400b53a0577 --- /dev/null +++ b/doc/sphinxext/prs/8161.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e09fa30b86ba2af0574e01abe45f9674cdd5cc4d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".github/workflows/ci.yml": { + "a": 75, + "d": 0 + }, + ".travis.yml": { + "a": 2, + "d": 17 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 6, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8164.json b/doc/sphinxext/prs/8164.json new file mode 100644 index 00000000000..a2b4f4aeca5 --- /dev/null +++ b/doc/sphinxext/prs/8164.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "3bd482e446d7fcd0751dfcf666afb0393a24809f", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 49, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8165.json b/doc/sphinxext/prs/8165.json new file mode 100644 index 00000000000..1c2d0d5747c --- /dev/null +++ b/doc/sphinxext/prs/8165.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fbc546240c1123d2942cc87ad5d9336fbfbea39f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_timeviewer.py": { + "a": 14, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8166.json b/doc/sphinxext/prs/8166.json new file mode 100644 index 00000000000..50cd17441aa --- /dev/null +++ b/doc/sphinxext/prs/8166.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7ce47156f6e7405977d4fc9c470b3358768049a5", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 24, + "d": 4 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 1 + }, + "tutorials/source-modeling/plot_background_freesurfer_mne.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8169.json b/doc/sphinxext/prs/8169.json new file mode 100644 index 00000000000..2c73ac30d7a --- /dev/null +++ b/doc/sphinxext/prs/8169.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "7cf71560436e8ca6156a9fe95cb87cbe3ab655b3", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/report.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/817.json b/doc/sphinxext/prs/817.json new file mode 100644 index 00000000000..86ff6a148b1 --- /dev/null +++ b/doc/sphinxext/prs/817.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "603e479f4d50938927759ee39fa227b97c132762", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/kit/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/fiff/kit/coreg.py": { + "a": 6, + "d": 4 + }, + "mne/fiff/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/kit/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/kit/tests/test_kit.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8170.json b/doc/sphinxext/prs/8170.json new file mode 100644 index 00000000000..194e59010da --- /dev/null +++ b/doc/sphinxext/prs/8170.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d4b76bd7ca518d6fbd56e7b266fbb804ab4eee61", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 22, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8174.json b/doc/sphinxext/prs/8174.json new file mode 100644 index 00000000000..1ff2fd6142e --- /dev/null +++ b/doc/sphinxext/prs/8174.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "61feb1686b8859bd666866f62c1179e6c0ddbe08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 5 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8175.json b/doc/sphinxext/prs/8175.json new file mode 100644 index 00000000000..7d2fef7e9d9 --- /dev/null +++ b/doc/sphinxext/prs/8175.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cabc08f9ee07eccd8e9ee0c610b2f7a0db90b755", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8176.json b/doc/sphinxext/prs/8176.json new file mode 100644 index 00000000000..651f4183785 --- /dev/null +++ b/doc/sphinxext/prs/8176.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "1cda89a6dc845f717a6498e685a938392efb01aa", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_includes/data_formats.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 0, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/io/persyst/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/persyst/persyst.py": { + "a": 410, + "d": 0 + }, + "mne/io/persyst/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 215, + "d": 0 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8177.json b/doc/sphinxext/prs/8177.json new file mode 100644 index 00000000000..40e151b7e4b --- /dev/null +++ b/doc/sphinxext/prs/8177.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "92254198f02e19ca9227888fd2b1d5f8a90c8084", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/utils/progressbar.py": { + "a": 13, + "d": 3 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8178.json b/doc/sphinxext/prs/8178.json new file mode 100644 index 00000000000..c94c925aee9 --- /dev/null +++ b/doc/sphinxext/prs/8178.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "54259a8e151163381b0bb72de691607839c27236", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 32, + "d": 10 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 81, + "d": 19 + }, + "mne/preprocessing/_csd.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 20, + "d": 2 + }, + "mne/viz/montage.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8179.json b/doc/sphinxext/prs/8179.json new file mode 100644 index 00000000000..9c85b087e8d --- /dev/null +++ b/doc/sphinxext/prs/8179.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dfe419501b2c77d8938f80845e1df283cebd0fb8", + "authors": [ + { + "n": "Hubert Banville", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/sample-datasets/plot_sleep.py": { + "a": 16, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/818.json b/doc/sphinxext/prs/818.json new file mode 100644 index 00000000000..ae76730859f --- /dev/null +++ b/doc/sphinxext/prs/818.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6d156371994920c6b7294b9eeef4712da4582589", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 198, + "d": 25 + }, + "mne/tests/test_label.py": { + "a": 47, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8180.json b/doc/sphinxext/prs/8180.json new file mode 100644 index 00000000000..5bda83b3597 --- /dev/null +++ b/doc/sphinxext/prs/8180.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "db0bacc5a01bf1d45ede7d21b5cdedfa234ded36", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/viz/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8181.json b/doc/sphinxext/prs/8181.json new file mode 100644 index 00000000000..3cb26becb29 --- /dev/null +++ b/doc/sphinxext/prs/8181.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0d88b2ef0ec924c1bcfdc6c1a9fe7b8cd7a752bd", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 160, + "d": 0 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 5, + "d": 443 + }, + "mne/viz/_brain/callback.py": { + "a": 185, + "d": 0 + }, + "mne/viz/_brain/mplcanvas.py": { + "a": 116, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 20, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8182.json b/doc/sphinxext/prs/8182.json new file mode 100644 index 00000000000..c839f6147b8 --- /dev/null +++ b/doc/sphinxext/prs/8182.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ff6751d8ef3b4b1e495ac3e68c59c3e1a41f2edf", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 40, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8185.json b/doc/sphinxext/prs/8185.json new file mode 100644 index 00000000000..f95067f15d1 --- /dev/null +++ b/doc/sphinxext/prs/8185.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b76890bb4f8202282a70cffb5dabed11977dc832", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 17, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8187.json b/doc/sphinxext/prs/8187.json new file mode 100644 index 00000000000..cf97db33bdc --- /dev/null +++ b/doc/sphinxext/prs/8187.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "174ab824bb99dc7d4ba8246d16f179f973554256", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/constants.py": { + "a": 12, + "d": 4 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8188.json b/doc/sphinxext/prs/8188.json new file mode 100644 index 00000000000..7fdac32886b --- /dev/null +++ b/doc/sphinxext/prs/8188.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e77de58173b0c2b3bd1a85ec42f55fd808d8b20e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 2, + "d": 3 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 10, + "d": 4 + }, + "tutorials/source-modeling/plot_dipole_fit.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8189.json b/doc/sphinxext/prs/8189.json new file mode 100644 index 00000000000..7c326929dfc --- /dev/null +++ b/doc/sphinxext/prs/8189.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "a240a55f78382baae269b2bc834fdfbec3a68899", + "authors": [ + { + "n": "Hubert Banville", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 3, + "d": 12 + }, + "mne/conftest.py": { + "a": 10, + "d": 0 + }, + "mne/datasets/sleep_physionet/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 25, + "d": 6 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 27, + "d": 8 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 69, + "d": 26 + }, + "mne/epochs.py": { + "a": 8, + "d": 14 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 7 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 29, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 15, + "d": 1 + }, + "requirements_testing.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8190.json b/doc/sphinxext/prs/8190.json new file mode 100644 index 00000000000..1e17f7625ab --- /dev/null +++ b/doc/sphinxext/prs/8190.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "f6b58d4ad1ee23a695b986b2e12e84af186be5cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 105, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 68, + "d": 5 + }, + "tutorials/misc/plot_ecog.py": { + "a": 22, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8191.json b/doc/sphinxext/prs/8191.json new file mode 100644 index 00000000000..3b79800725f --- /dev/null +++ b/doc/sphinxext/prs/8191.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "09d50eb26721930d660c8c0a09893588f64aab4e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/bem.py": { + "a": 122, + "d": 60 + }, + "mne/tests/test_bem.py": { + "a": 22, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8193.json b/doc/sphinxext/prs/8193.json new file mode 100644 index 00000000000..f4986ad5cac --- /dev/null +++ b/doc/sphinxext/prs/8193.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "af1120c4cdea7086c2af661fefd37ef93ac8f410", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 9, + "d": 6 + }, + "mne/channels/tests/test_montage.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8194.json b/doc/sphinxext/prs/8194.json new file mode 100644 index 00000000000..60e2a07b995 --- /dev/null +++ b/doc/sphinxext/prs/8194.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "62fabdbdd4453a52d9f87d15109ffe2e3eee8b87", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 21, + "d": 12 + }, + "mne/viz/backends/_pyvista.py": { + "a": 45, + "d": 59 + }, + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 17, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8197.json b/doc/sphinxext/prs/8197.json new file mode 100644 index 00000000000..2b9305a0c5f --- /dev/null +++ b/doc/sphinxext/prs/8197.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f0e20c05511145927b666b40b774fb99cc64d2f2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_mne_cov_power.py": { + "a": 8, + "d": 22 + }, + "mne/cov.py": { + "a": 86, + "d": 2 + }, + "mne/externals/doccer.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 111, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 32, + "d": 26 + }, + "mne/viz/tests/test_topomap.py": { + "a": 10, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 21, + "d": 82 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8198.json b/doc/sphinxext/prs/8198.json new file mode 100644 index 00000000000..f773502562b --- /dev/null +++ b/doc/sphinxext/prs/8198.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "fbbf85b44bd3ce000a6777eb3dc9cac2f34d3e2f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 3, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 6, + "d": 3 + }, + "mne/utils/check.py": { + "a": 7, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 9 + }, + "mne/viz/misc.py": { + "a": 16, + "d": 5 + }, + "mne/viz/tests/test_misc.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/82.json b/doc/sphinxext/prs/82.json new file mode 100644 index 00000000000..d65ea180aa0 --- /dev/null +++ b/doc/sphinxext/prs/82.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9244365010531de07ac37252f8d5a80cab6d0f56", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/artifacts/stim.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/forward.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8200.json b/doc/sphinxext/prs/8200.json new file mode 100644 index 00000000000..a950fdebd38 --- /dev/null +++ b/doc/sphinxext/prs/8200.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c1f90498dec08227c19cf7277745454f1551257f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8202.json b/doc/sphinxext/prs/8202.json new file mode 100644 index 00000000000..f3304986e3f --- /dev/null +++ b/doc/sphinxext/prs/8202.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "97498364165e8702e07d582cd13cfb48cea51b06", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8206.json b/doc/sphinxext/prs/8206.json new file mode 100644 index 00000000000..b728922b970 --- /dev/null +++ b/doc/sphinxext/prs/8206.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5e5c2e102d06eedcef19af799e22741afec92a68", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8207.json b/doc/sphinxext/prs/8207.json new file mode 100644 index 00000000000..ea99195659c --- /dev/null +++ b/doc/sphinxext/prs/8207.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "053faee1d4151d4dee9b82680e93772ed880faa3", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8209.json b/doc/sphinxext/prs/8209.json new file mode 100644 index 00000000000..03e91525453 --- /dev/null +++ b/doc/sphinxext/prs/8209.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "48823ad1dd1138b8a07e0770ef64368ae24de64e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/821.json b/doc/sphinxext/prs/821.json new file mode 100644 index 00000000000..408bd95118a --- /dev/null +++ b/doc/sphinxext/prs/821.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e8a6bab0af6466788453f9467ce62c258b597924", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 2, + "d": 1 + }, + "mne/utils.py": { + "a": 18, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8210.json b/doc/sphinxext/prs/8210.json new file mode 100644 index 00000000000..37efe01ea29 --- /dev/null +++ b/doc/sphinxext/prs/8210.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "06f232cd75c203765b7165a8d94df1a52d5c7cea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 25, + "d": 3 + }, + "mne/io/base.py": { + "a": 31, + "d": 9 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 5 + }, + "mne/io/cnt/cnt.py": { + "a": 19, + "d": 30 + }, + "mne/io/edf/edf.py": { + "a": 56, + "d": 69 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 8, + "d": 2 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 3, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 11, + "d": 9 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 14, + "d": 4 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 7, + "d": 4 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 2 + }, + "mne/io/snirf/_snirf.py": { + "a": 3, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 143, + "d": 3 + }, + "mne/io/utils.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8212.json b/doc/sphinxext/prs/8212.json new file mode 100644 index 00000000000..6c91c120c82 --- /dev/null +++ b/doc/sphinxext/prs/8212.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "039e712156066d5a4d2a10b5d8797f7b72614dce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/viz/raw.py": { + "a": 17, + "d": 7 + }, + "mne/viz/tests/test_raw.py": { + "a": 13, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 9, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8214.json b/doc/sphinxext/prs/8214.json new file mode 100644 index 00000000000..3b4b2cde063 --- /dev/null +++ b/doc/sphinxext/prs/8214.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3d16144b9e8779abbfdb7b66192d5ab339601996", + "authors": [ + { + "n": "Jeroen Van Der Donckt", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/822.json b/doc/sphinxext/prs/822.json new file mode 100644 index 00000000000..025a8e354aa --- /dev/null +++ b/doc/sphinxext/prs/822.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "061f1b0bd69ac4306c0fbaace8af4c9b5e48f033", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 23, + "d": 38 + }, + "mne/fiff/raw.py": { + "a": 44, + "d": 1 + }, + "mne/tests/test_viz.py": { + "a": 15, + "d": 0 + }, + "mne/viz.py": { + "a": 114, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8220.json b/doc/sphinxext/prs/8220.json new file mode 100644 index 00000000000..e5c74df34d5 --- /dev/null +++ b/doc/sphinxext/prs/8220.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6c882c68f18ef7321c18372b1baf11007bf673ba", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_utils.py": { + "a": 5, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8223.json b/doc/sphinxext/prs/8223.json new file mode 100644 index 00000000000..6377930d519 --- /dev/null +++ b/doc/sphinxext/prs/8223.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "84517fd90a5e57f02cc84157b086562e074efa1f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_coreg.py": { + "a": 7, + "d": 4 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8224.json b/doc/sphinxext/prs/8224.json new file mode 100644 index 00000000000..6ca0d199ffc --- /dev/null +++ b/doc/sphinxext/prs/8224.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "2a423f9aa7613e0809bbe2bafcd7469d313234bb", + "authors": [ + { + "n": "Nikolai M Chapochnikov", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/time_frequency/_stockwell.py": { + "a": 17, + "d": 19 + }, + "mne/time_frequency/multitaper.py": { + "a": 4, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 14, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8225.json b/doc/sphinxext/prs/8225.json new file mode 100644 index 00000000000..4223c85f015 --- /dev/null +++ b/doc/sphinxext/prs/8225.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b90ca523cef1b3260b4a3d69db4b7871b64d11e5", + "authors": [ + { + "n": "Steven Bierer", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 28, + "d": 28 + }, + "mne/tests/test_chpi.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8227.json b/doc/sphinxext/prs/8227.json new file mode 100644 index 00000000000..b6fc3cdfed6 --- /dev/null +++ b/doc/sphinxext/prs/8227.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ce24bdca2ed34aee6a123e572769913032921cd9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 11, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/plot_whitened.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8228.json b/doc/sphinxext/prs/8228.json new file mode 100644 index 00000000000..5efaf94a4dc --- /dev/null +++ b/doc/sphinxext/prs/8228.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d87f7f715900c149517153a05cf664af2442c20e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 27, + "d": 4 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 34, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8229.json b/doc/sphinxext/prs/8229.json new file mode 100644 index 00000000000..29715bc84ca --- /dev/null +++ b/doc/sphinxext/prs/8229.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9bb050bdf61a4a3c7f4d6514323fa4a638ef6874", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.18.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.20.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/flat.py": { + "a": 61, + "d": 11 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 22, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/823.json b/doc/sphinxext/prs/823.json new file mode 100644 index 00000000000..8cc7b3cdce0 --- /dev/null +++ b/doc/sphinxext/prs/823.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0ae7b614db435a4e8643e7d082024b1a5b935a58", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_csp.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8230.json b/doc/sphinxext/prs/8230.json new file mode 100644 index 00000000000..df2ac694ff7 --- /dev/null +++ b/doc/sphinxext/prs/8230.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bbe61820638c347f86982e56717be8d224e6c8aa", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 0, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8234.json b/doc/sphinxext/prs/8234.json new file mode 100644 index 00000000000..99f2190eaf8 --- /dev/null +++ b/doc/sphinxext/prs/8234.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bd1d12255deaa5b4b064aba9b7af428d27258e8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 70, + "d": 6 + }, + "mne/utils/tests/test_logging.py": { + "a": 29, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8235.json b/doc/sphinxext/prs/8235.json new file mode 100644 index 00000000000..69cf8b0cdef --- /dev/null +++ b/doc/sphinxext/prs/8235.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "821f186c3bdf58e0d2b71c4d6653b39fa390e0a6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_notebook.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8239.json b/doc/sphinxext/prs/8239.json new file mode 100644 index 00000000000..4d668b9bc86 --- /dev/null +++ b/doc/sphinxext/prs/8239.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b3e28c12f812eb81bd9ef453dc368c40bc504717", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/824.json b/doc/sphinxext/prs/824.json new file mode 100644 index 00000000000..1061d503502 --- /dev/null +++ b/doc/sphinxext/prs/824.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b732c2c42cfb03978b7eee9810492742309ebac3", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/getting_started.rst": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8240.json b/doc/sphinxext/prs/8240.json new file mode 100644 index 00000000000..7851e1f8fcc --- /dev/null +++ b/doc/sphinxext/prs/8240.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e63190c3b63ed080d7e7442405277515823d9d18", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 39, + "d": 0 + }, + "requirements_testing.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8243.json b/doc/sphinxext/prs/8243.json new file mode 100644 index 00000000000..beb4c9a9c2c --- /dev/null +++ b/doc/sphinxext/prs/8243.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "0bfa27fc31ec614b2046122ec4ded77b377006e3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/layout.py": { + "a": 2, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 12, + "d": 11 + }, + "mne/channels/tests/test_montage.py": { + "a": 18, + "d": 2 + }, + "mne/coreg.py": { + "a": 2, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 0 + }, + "tutorials/misc/plot_ecog.py": { + "a": 22, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8244.json b/doc/sphinxext/prs/8244.json new file mode 100644 index 00000000000..2f41da24d7f --- /dev/null +++ b/doc/sphinxext/prs/8244.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2806dd8f7bfba7302568466cc35a83ebb8018804", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_includes/forward.rst": { + "a": 2, + "d": 2 + }, + "doc/install/mne_c.rst": { + "a": 1, + "d": 1 + }, + "doc/overview/faq.rst": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8245.json b/doc/sphinxext/prs/8245.json new file mode 100644 index 00000000000..aae93bd0387 --- /dev/null +++ b/doc/sphinxext/prs/8245.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "132c05128be3b94a2d1bccdbcc69f8456166ae82", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/defaults.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 29, + "d": 3 + }, + "mne/viz/_brain/view.py": { + "a": 2, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8246.json b/doc/sphinxext/prs/8246.json new file mode 100644 index 00000000000..46433b7fd7f --- /dev/null +++ b/doc/sphinxext/prs/8246.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ca3e787ca40059ee72b93e517adbe7506688c2a3", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8248.json b/doc/sphinxext/prs/8248.json new file mode 100644 index 00000000000..2d9843a7546 --- /dev/null +++ b/doc/sphinxext/prs/8248.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "018320e72107c6cb6691772edc4f78b15cc73703", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 5, + "d": 97 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 115, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8252.json b/doc/sphinxext/prs/8252.json new file mode 100644 index 00000000000..14893845616 --- /dev/null +++ b/doc/sphinxext/prs/8252.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e5a963411e48c717da98c51565a816395bdfb70d", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8253.json b/doc/sphinxext/prs/8253.json new file mode 100644 index 00000000000..d55ea8df8ca --- /dev/null +++ b/doc/sphinxext/prs/8253.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f5080a8f6a2c0c3ffc1d2d36bebf34c630a6a5c7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/proj.py": { + "a": 31, + "d": 26 + }, + "mne/tests/test_proj.py": { + "a": 12, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8254.json b/doc/sphinxext/prs/8254.json new file mode 100644 index 00000000000..dc3228ebb5d --- /dev/null +++ b/doc/sphinxext/prs/8254.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "b911a769d098b17bf5d40c472a2bed16db8b371d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 2, + "d": 5 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 2 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 10, + "d": 5 + }, + "tutorials/source-modeling/plot_mne_solutions.py": { + "a": 3, + "d": 4 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8255.json b/doc/sphinxext/prs/8255.json new file mode 100644 index 00000000000..bf7f5494099 --- /dev/null +++ b/doc/sphinxext/prs/8255.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d5ea2b00c45a780b985e7a600546a45201461d3d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 7, + "d": 3 + }, + "mne/time_frequency/tfr.py": { + "a": 7, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 11, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8256.json b/doc/sphinxext/prs/8256.json new file mode 100644 index 00000000000..128e94396b8 --- /dev/null +++ b/doc/sphinxext/prs/8256.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "66314aca8df5e13deef848fd0aebea3c9c64c34d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/plot_opm_data.py": { + "a": 10, + "d": 4 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_multidict_reweighted_tfmxne.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_rap_music.py": { + "a": 2, + "d": 1 + }, + "examples/visualization/plot_make_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/826.json b/doc/sphinxext/prs/826.json new file mode 100644 index 00000000000..fc59a6c2f47 --- /dev/null +++ b/doc/sphinxext/prs/826.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d54cf6cc3803a9996b087dd66186003e1765e4b8", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/realtime/stim_server_client.py": { + "a": 54, + "d": 27 + }, + "mne/realtime/tests/test_stim_client_server.py": { + "a": 25, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8260.json b/doc/sphinxext/prs/8260.json new file mode 100644 index 00000000000..79fa65e8915 --- /dev/null +++ b/doc/sphinxext/prs/8260.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "0facbb48f719122acf215d27e7c703954a1c4ba9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_dics_source_power.py": { + "a": 12, + "d": 6 + }, + "examples/inverse/plot_source_space_snr.py": { + "a": 5, + "d": 18 + }, + "mne/fixes.py": { + "a": 19, + "d": 7 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 33, + "d": 22 + }, + "mne/minimum_norm/inverse.py": { + "a": 17, + "d": 23 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 28, + "d": 27 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_mne_solutions.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8261.json b/doc/sphinxext/prs/8261.json new file mode 100644 index 00000000000..b41c9b1c7fc --- /dev/null +++ b/doc/sphinxext/prs/8261.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "1c3a4288871ff87b69256f84e404adb11f3e6ca5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 0, + "d": 6 + }, + "mne/baseline.py": { + "a": 2, + "d": 41 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 3 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 2, + "d": 5 + }, + "mne/conftest.py": { + "a": 1, + "d": 2 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 55, + "d": 58 + }, + "mne/evoked.py": { + "a": 28, + "d": 165 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 2, + "d": 5 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 6 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 22 + }, + "mne/tests/test_evoked.py": { + "a": 3, + "d": 36 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 3 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 44 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 2 + }, + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 0, + "d": 19 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 2, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8262.json b/doc/sphinxext/prs/8262.json new file mode 100644 index 00000000000..fcf5f111e16 --- /dev/null +++ b/doc/sphinxext/prs/8262.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "cb11a650a9836cf0b3193179851522d638ffa23a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/glossary.rst": { + "a": 5, + "d": 0 + }, + "mne/conftest.py": { + "a": 10, + "d": 9 + }, + "mne/minimum_norm/inverse.py": { + "a": 35, + "d": 11 + }, + "mne/source_estimate.py": { + "a": 16, + "d": 10 + }, + "mne/source_space.py": { + "a": 8, + "d": 3 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 6, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8264.json b/doc/sphinxext/prs/8264.json new file mode 100644 index 00000000000..1612523d224 --- /dev/null +++ b/doc/sphinxext/prs/8264.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b96a760bc133f31715fa383b0a5b5999ae9aa564", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 24, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8265.json b/doc/sphinxext/prs/8265.json new file mode 100644 index 00000000000..6acd849bd3e --- /dev/null +++ b/doc/sphinxext/prs/8265.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "724f9ae3e9bbf36eec60bca71ccbb26f20961f37", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 3, + "d": 0 + }, + "doc/carousel.inc": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 6, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 31, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 171, + "d": 88 + }, + "mne/viz/_brain/_scraper.py": { + "a": 68, + "d": 43 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 23, + "d": 10 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 8 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 5, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8266.json b/doc/sphinxext/prs/8266.json new file mode 100644 index 00000000000..6b5c8c31f00 --- /dev/null +++ b/doc/sphinxext/prs/8266.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "8c1c30b7a5140f4a2cfaf1a2348cd04649aa1d68", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/carousel.inc": { + "a": 1, + "d": 1 + }, + "examples/forward/plot_left_cerebellum_volume_source.py": { + "a": 14, + "d": 17 + }, + "examples/inverse/plot_covariance_whitening_dspm.py": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_custom_inverse_solver.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_multidict_reweighted_tfmxne.py": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_rap_music.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 3, + "d": 0 + }, + "examples/visualization/montage.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_make_report.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 7, + "d": 3 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 1, + "d": 2 + }, + "tutorials/misc/plot_report.py": { + "a": 10, + "d": 10 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 2, + "d": 1 + }, + "tutorials/source-modeling/plot_dipole_fit.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_forward.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 24, + "d": 49 + }, + "tutorials/source-modeling/plot_mne_solutions.py": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 2, + "d": 4 + }, + "tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8268.json b/doc/sphinxext/prs/8268.json new file mode 100644 index 00000000000..844fcb4331b --- /dev/null +++ b/doc/sphinxext/prs/8268.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "eec85d4b864bb18fa0a5fa5b36a13b722d383bc8", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/references.bib": { + "a": 9, + "d": 0 + }, + "examples/inverse/plot_mne_cov_power.py": { + "a": 8, + "d": 1 + }, + "mne/cov.py": { + "a": 11, + "d": 22 + }, + "mne/minimum_norm/inverse.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8269.json b/doc/sphinxext/prs/8269.json new file mode 100644 index 00000000000..b1cfef7036c --- /dev/null +++ b/doc/sphinxext/prs/8269.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7a713186f216b1e80c3c01b15c6d47eb5af4daff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/ci.yml": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 49, + "d": 36 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8275.json b/doc/sphinxext/prs/8275.json new file mode 100644 index 00000000000..26e1e114fdd --- /dev/null +++ b/doc/sphinxext/prs/8275.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "edb868390d0e2aadc66dc837a6eca109026fac81", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_psf_ctf_label_leakage.py": { + "a": 187, + "d": 0 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 182, + "d": 43 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 62, + "d": 19 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 1, + "d": 11 + }, + "mne/utils/docs.py": { + "a": 61, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8276.json b/doc/sphinxext/prs/8276.json new file mode 100644 index 00000000000..07e1abf0df0 --- /dev/null +++ b/doc/sphinxext/prs/8276.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3ea9a661c1a33a7dbd2f029a529ba1a22817e46d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 3, + "d": 3 + }, + "mne/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8278.json b/doc/sphinxext/prs/8278.json new file mode 100644 index 00000000000..6c05a1a30c9 --- /dev/null +++ b/doc/sphinxext/prs/8278.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e50d215454c88727007db2f5598c355e5c775a8a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 0, + "d": 9 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8279.json b/doc/sphinxext/prs/8279.json new file mode 100644 index 00000000000..4ed6e75ae09 --- /dev/null +++ b/doc/sphinxext/prs/8279.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "678b5cb72e319cfb17d6d58ab01d2f0e72fda896", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 14, + "d": 1 + }, + "mne/utils/tests/test_config.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8282.json b/doc/sphinxext/prs/8282.json new file mode 100644 index 00000000000..fcb5b7168dd --- /dev/null +++ b/doc/sphinxext/prs/8282.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d056af3d463edb2beb237a563a14a60af35c53a7", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 3 + }, + ".mailmap": { + "a": 2, + "d": 0 + }, + "doc/_templates/navbar.html": { + "a": 2, + "d": 1 + }, + "doc/changes/0.21.inc": { + "a": 54, + "d": 3 + }, + "doc/cited.rst": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 1 + }, + "mne/_version.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8283.json b/doc/sphinxext/prs/8283.json new file mode 100644 index 00000000000..c8b9459d0e3 --- /dev/null +++ b/doc/sphinxext/prs/8283.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e6a22e8950a4aa665eee4450170d94f02013b4d7", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 1 + }, + "doc/sphinxext/gh_substitutions.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 7, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 25, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8285.json b/doc/sphinxext/prs/8285.json new file mode 100644 index 00000000000..7cbcc9b8f02 --- /dev/null +++ b/doc/sphinxext/prs/8285.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f81fbe4c9118f6633f80a15fdd8e2a490a9c46d7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8286.json b/doc/sphinxext/prs/8286.json new file mode 100644 index 00000000000..0360ed865cf --- /dev/null +++ b/doc/sphinxext/prs/8286.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a95ecdcb57172d284c5d7c4c1b672254d18903b3", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.21.inc": { + "a": 12, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8287.json b/doc/sphinxext/prs/8287.json new file mode 100644 index 00000000000..bc7ae2fce7d --- /dev/null +++ b/doc/sphinxext/prs/8287.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cffdc0843428b9841bd493a961e795e570e58549", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 27, + "d": 4 + }, + "doc/index.rst": { + "a": 62, + "d": 27 + }, + "doc/overview/index.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8288.json b/doc/sphinxext/prs/8288.json new file mode 100644 index 00000000000..8c6e86f9617 --- /dev/null +++ b/doc/sphinxext/prs/8288.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fa168dccc221b12ad91d919c5d2b1afc9465a7ba", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/machine-learning/plot_sensors_decoding.py": { + "a": 17, + "d": 35 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8297.json b/doc/sphinxext/prs/8297.json new file mode 100644 index 00000000000..cec917a7666 --- /dev/null +++ b/doc/sphinxext/prs/8297.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5227c19a5f88835659785226148fbeb8c64f43bf", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8298.json b/doc/sphinxext/prs/8298.json new file mode 100644 index 00000000000..1fb39d0a1b6 --- /dev/null +++ b/doc/sphinxext/prs/8298.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "14bbb047ab2f188c5916397bf1ed4752c8d0b160", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8299.json b/doc/sphinxext/prs/8299.json new file mode 100644 index 00000000000..126f05d3aa8 --- /dev/null +++ b/doc/sphinxext/prs/8299.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9e8943b629a6edd1e947ce0838cd055597c3e4af", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "setup.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/83.json b/doc/sphinxext/prs/83.json new file mode 100644 index 00000000000..214e695d560 --- /dev/null +++ b/doc/sphinxext/prs/83.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0141232b6509ac733de1f337a13ed9749229005c", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/simulation/tests/test_evoked.py": { + "a": 70, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/830.json b/doc/sphinxext/prs/830.json new file mode 100644 index 00000000000..a4d1a5e5a7c --- /dev/null +++ b/doc/sphinxext/prs/830.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1a806b0d7882581a0709e538502e9f52da020a36", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "bin/mne_make_scalp_surfaces.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8301.json b/doc/sphinxext/prs/8301.json new file mode 100644 index 00000000000..7fbffa649d5 --- /dev/null +++ b/doc/sphinxext/prs/8301.json @@ -0,0 +1,167 @@ +{ + "merge_commit_sha": "ecf639298f7bc9b9e9406a712eb852cb95586c74", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.15.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.19.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.20.inc": { + "a": 2, + "d": 2 + }, + "doc/python_reference.rst": { + "a": 0, + "d": 1 + }, + "mne/__init__.py": { + "a": 0, + "d": 17 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 3, + "d": 12 + }, + "mne/beamformer/_lcmv.py": { + "a": 2, + "d": 293 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 174 + }, + "mne/bem.py": { + "a": 7, + "d": 17 + }, + "mne/channels/__init__.py": { + "a": 0, + "d": 5 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 31 + }, + "mne/channels/tests/test_montage.py": { + "a": 0, + "d": 9 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/filter.py": { + "a": 1, + "d": 4 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 8 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 2 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 3, + "d": 5 + }, + "mne/io/pick.py": { + "a": 5, + "d": 34 + }, + "mne/io/tests/test_pick.py": { + "a": 6, + "d": 6 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/flat.py": { + "a": 1, + "d": 50 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 16 + }, + "mne/preprocessing/tests/test_beer_lambert_law.py": { + "a": 0, + "d": 6 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 1, + "d": 6 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 3 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 105 + }, + "mne/source_space.py": { + "a": 1, + "d": 6 + }, + "mne/stats/cluster_level.py": { + "a": 6, + "d": 33 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 0, + "d": 5 + }, + "mne/tests/test_bem.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 6, + "d": 10 + }, + "mne/tests/test_source_estimate.py": { + "a": 0, + "d": 5 + }, + "mne/utils/check.py": { + "a": 0, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 6 + }, + "mne/viz/tests/test_epochs.py": { + "a": 0, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 6, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8303.json b/doc/sphinxext/prs/8303.json new file mode 100644 index 00000000000..7002cca08e0 --- /dev/null +++ b/doc/sphinxext/prs/8303.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7c1233ba612bdb08d640c924ef4220b2fe13fb71", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/reference.py": { + "a": 4, + "d": 5 + }, + "mne/io/tests/test_reference.py": { + "a": 30, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8306.json b/doc/sphinxext/prs/8306.json new file mode 100644 index 00000000000..26cf127c1dc --- /dev/null +++ b/doc/sphinxext/prs/8306.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b2d759a8231003b0555283617f6d319e0657089e", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8310.json b/doc/sphinxext/prs/8310.json new file mode 100644 index 00000000000..2f0891e9b8d --- /dev/null +++ b/doc/sphinxext/prs/8310.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c7f9773ccab6fcf8f5fe38334a99aa1c6a43d97b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 28 + }, + "mne/utils/docs.py": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8314.json b/doc/sphinxext/prs/8314.json new file mode 100644 index 00000000000..b6419da6ac0 --- /dev/null +++ b/doc/sphinxext/prs/8314.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "695ab958376a3e284f6ecbf76555a3db59276f6a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 65, + "d": 61 + }, + "mne/viz/_brain/_scraper.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 12, + "d": 12 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8317.json b/doc/sphinxext/prs/8317.json new file mode 100644 index 00000000000..5e53d737d5e --- /dev/null +++ b/doc/sphinxext/prs/8317.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "82df946e29a8b66da0184d08b9a016ae6371545f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "examples/inverse/plot_dics_source_power.py": { + "a": 5, + "d": 5 + }, + "examples/inverse/plot_evoked_ers_source_power.py": { + "a": 4, + "d": 2 + }, + "examples/inverse/plot_multidict_reweighted_tfmxne.py": { + "a": 4, + "d": 14 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 6, + "d": 21 + }, + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8318.json b/doc/sphinxext/prs/8318.json new file mode 100644 index 00000000000..33dac2f56a9 --- /dev/null +++ b/doc/sphinxext/prs/8318.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5a04a45aca85d2ff32c3eb13c4eb4eff0a0181bf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 4, + "d": 2 + }, + "mne/io/tests/test_compensator.py": { + "a": 18, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/832.json b/doc/sphinxext/prs/832.json new file mode 100644 index 00000000000..040a153235b --- /dev/null +++ b/doc/sphinxext/prs/832.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "1e2d0be9595a9d6f1f823d4d8bbec61ef9052b23", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/edf/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/fiff/edf/edf.py": { + "a": 509, + "d": 0 + }, + "mne/fiff/edf/tests/data/biosemi.hpts": { + "a": 82, + "d": 0 + }, + "mne/fiff/edf/tests/data/test.bdf": { + "a": 3222, + "d": 0 + }, + "mne/fiff/edf/tests/data/test.edf": { + "a": 9591, + "d": 0 + }, + "mne/fiff/edf/tests/data/test_bdf_eeglab.mat": { + "a": 0, + "d": 0 + }, + "mne/fiff/edf/tests/data/test_edf_eeglab.mat": { + "a": 0, + "d": 0 + }, + "mne/fiff/edf/tests/data/test_eeglab.mat": { + "a": 0, + "d": 0 + }, + "mne/fiff/edf/tests/test_edf.py": { + "a": 88, + "d": 0 + }, + "mne/fiff/meas_info.py": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8320.json b/doc/sphinxext/prs/8320.json new file mode 100644 index 00000000000..47e78a35beb --- /dev/null +++ b/doc/sphinxext/prs/8320.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "ff8ee2bbdd9d6b5f542b04e2dce76c31af46a2d3", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8321.json b/doc/sphinxext/prs/8321.json new file mode 100644 index 00000000000..91272a3a4a2 --- /dev/null +++ b/doc/sphinxext/prs/8321.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4270859dc3ba2208bb3eed4f73403b5de26125b8", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 104, + "d": 30 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 62, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8323.json b/doc/sphinxext/prs/8323.json new file mode 100644 index 00000000000..ee2cd2737b2 --- /dev/null +++ b/doc/sphinxext/prs/8323.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5fb608a82d5c82ced7fc66abc7781a9162d9b8ac", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 20, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8326.json b/doc/sphinxext/prs/8326.json new file mode 100644 index 00000000000..a2372ed2de7 --- /dev/null +++ b/doc/sphinxext/prs/8326.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "47ce37e3cacd63fed04ca4e078dba60f0330f161", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/glossary.rst": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 98, + "d": 84 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 125, + "d": 63 + }, + "mne/utils/tests/test_numerics.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8329.json b/doc/sphinxext/prs/8329.json new file mode 100644 index 00000000000..bd0a7b76d3f --- /dev/null +++ b/doc/sphinxext/prs/8329.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4483677c5d203897ad343754d8fa6701591d3bae", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 23, + "d": 19 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/833.json b/doc/sphinxext/prs/833.json new file mode 100644 index 00000000000..079e892c7f1 --- /dev/null +++ b/doc/sphinxext/prs/833.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "48477b4803e1f4d1059bd480fe00f0c6dfddaeee", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/datasets/sample/sample.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8330.json b/doc/sphinxext/prs/8330.json new file mode 100644 index 00000000000..cd1a9fbfc17 --- /dev/null +++ b/doc/sphinxext/prs/8330.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "74f6f41f827df552d258c4db4d4c99a5b71863d6", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + } + ], + "changes": { + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8331.json b/doc/sphinxext/prs/8331.json new file mode 100644 index 00000000000..57022e06505 --- /dev/null +++ b/doc/sphinxext/prs/8331.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "145a2b6eb60d5700d3cdc427e73bcd0eaa2212d4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8333.json b/doc/sphinxext/prs/8333.json new file mode 100644 index 00000000000..f49fec012f8 --- /dev/null +++ b/doc/sphinxext/prs/8333.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "3d8816c0571050aacee8dcb26ebad65a85f590dd", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 5, + "d": 6 + }, + "mne/decoding/time_frequency.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/xdawn.py": { + "a": 3, + "d": 3 + }, + "mne/utils/check.py": { + "a": 10, + "d": 4 + }, + "mne/utils/tests/test_check.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 10, + "d": 10 + }, + "mne/viz/_brain/surface.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8335.json b/doc/sphinxext/prs/8335.json new file mode 100644 index 00000000000..dc2f59d95f0 --- /dev/null +++ b/doc/sphinxext/prs/8335.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "957556e19be1c5ee899b30065816a8e9dbe79f6c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 33, + "d": 18 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 291, + "d": 96 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 8, + "d": 8 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 118, + "d": 16 + }, + "mne/viz/backends/renderer.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/834.json b/doc/sphinxext/prs/834.json new file mode 100644 index 00000000000..97263e99f63 --- /dev/null +++ b/doc/sphinxext/prs/834.json @@ -0,0 +1,291 @@ +{ + "merge_commit_sha": "317ebed92115799af5af8dfdd5516a5c14ac5329", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "bin/mne_browse_raw.py": { + "a": 6, + "d": 6 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 4, + "d": 4 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 3, + "d": 3 + }, + "examples/decoding/plot_decoding_sensors.py": { + "a": 12, + "d": 12 + }, + "examples/export/plot_epochs_as_data_frame.py": { + "a": 3, + "d": 3 + }, + "examples/export/plot_epochs_to_nitime.py": { + "a": 2, + "d": 2 + }, + "examples/export/plot_evoked_to_nitime.py": { + "a": 2, + "d": 2 + }, + "examples/export/plot_raw_to_nitime.py": { + "a": 7, + "d": 7 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 5, + "d": 5 + }, + "examples/inverse/plot_compute_mne_inverse_epochs_in_label.py": { + "a": 10, + "d": 10 + }, + "examples/inverse/plot_compute_mne_inverse_raw_in_label.py": { + "a": 5, + "d": 5 + }, + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 10, + "d": 11 + }, + "examples/inverse/plot_dics_beamformer.py": { + "a": 8, + "d": 8 + }, + "examples/inverse/plot_gamma_map_inverse.py": { + "a": 5, + "d": 4 + }, + "examples/inverse/plot_label_activation_from_stc.py": { + "a": 12, + "d": 12 + }, + "examples/inverse/plot_label_from_stc.py": { + "a": 8, + "d": 8 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 12, + "d": 12 + }, + "examples/inverse/plot_lcmv_beamformer.py": { + "a": 10, + "d": 10 + }, + "examples/inverse/plot_lcmv_beamformer_volume.py": { + "a": 14, + "d": 14 + }, + "examples/inverse/plot_make_inverse_operator.py": { + "a": 15, + "d": 15 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 6, + "d": 6 + }, + "examples/inverse/plot_morph_data.py": { + "a": 8, + "d": 8 + }, + "examples/inverse/plot_read_stc.py": { + "a": 5, + "d": 5 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 7, + "d": 6 + }, + "examples/plot_channel_epochs_image.py": { + "a": 3, + "d": 3 + }, + "examples/plot_define_target_events.py": { + "a": 10, + "d": 10 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 6, + "d": 6 + }, + "examples/plot_evoked_whitening.py": { + "a": 2, + "d": 2 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 7, + "d": 7 + }, + "examples/plot_from_raw_to_multiple_epochs_to_evoked.py": { + "a": 9, + "d": 9 + }, + "examples/plot_make_forward.py": { + "a": 12, + "d": 12 + }, + "examples/plot_megsim_data.py": { + "a": 3, + "d": 3 + }, + "examples/plot_megsim_data_single_trial.py": { + "a": 0, + "d": 1 + }, + "examples/plot_read_forward.py": { + "a": 12, + "d": 12 + }, + "examples/plot_read_noise_covariance_matrix.py": { + "a": 4, + "d": 4 + }, + "examples/plot_shift_evoked.py": { + "a": 2, + "d": 2 + }, + "examples/plot_simulate_evoked_data.py": { + "a": 5, + "d": 5 + }, + "examples/plot_ssp_projs_sensitivity_map.py": { + "a": 3, + "d": 3 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 2, + "d": 2 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 2, + "d": 2 + }, + "examples/plot_topo_compare_conditions.py": { + "a": 3, + "d": 3 + }, + "examples/plot_topography.py": { + "a": 2, + "d": 2 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 6, + "d": 6 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 5, + "d": 5 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 4, + "d": 4 + }, + "examples/preprocessing/plot_ica_from_raw.py": { + "a": 25, + "d": 25 + }, + "examples/realtime/plot_compute_rt_decoder.py": { + "a": 13, + "d": 13 + }, + "examples/realtime/rt_feedback_server.py": { + "a": 14, + "d": 13 + }, + "examples/stats/plot_cluster_1samp_test_time_frequency.py": { + "a": 24, + "d": 26 + }, + "examples/stats/plot_cluster_methods_tutorial.py": { + "a": 9, + "d": 9 + }, + "examples/stats/plot_cluster_stats_evoked.py": { + "a": 18, + "d": 17 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 14, + "d": 14 + }, + "examples/stats/plot_cluster_stats_time_frequency.py": { + "a": 22, + "d": 22 + }, + "examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py": { + "a": 34, + "d": 34 + }, + "examples/stats/plot_fdr_stats_evoked.py": { + "a": 14, + "d": 14 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 10, + "d": 10 + }, + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 6, + "d": 6 + }, + "examples/time_frequency/plot_source_label_time_frequency.py": { + "a": 22, + "d": 22 + }, + "examples/time_frequency/plot_source_power_spectrum.py": { + "a": 6, + "d": 6 + }, + "examples/time_frequency/plot_source_space_time_frequency.py": { + "a": 8, + "d": 8 + }, + "examples/time_frequency/plot_temporal_whitening.py": { + "a": 13, + "d": 13 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 3, + "d": 3 + }, + "examples/time_frequency/plot_time_frequency.py": { + "a": 29, + "d": 29 + }, + "mne/fiff/evoked.py": { + "a": 18, + "d": 16 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 4 + }, + "mne/viz.py": { + "a": 223, + "d": 216 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8340.json b/doc/sphinxext/prs/8340.json new file mode 100644 index 00000000000..dc957d3447d --- /dev/null +++ b/doc/sphinxext/prs/8340.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "629db04a49b5dec6aedad5a2857d3ea9781c1c2e", + "authors": [ + { + "n": "examplename", + "e": "e.ort@vu.nl" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 22, + "d": 1 + }, + "mne/io/open.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8341.json b/doc/sphinxext/prs/8341.json new file mode 100644 index 00000000000..baef78d4daf --- /dev/null +++ b/doc/sphinxext/prs/8341.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "666669ae2b2d5147d8cd58290e2b4d5de2f08ce0", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8343.json b/doc/sphinxext/prs/8343.json new file mode 100644 index 00000000000..bcd336879e9 --- /dev/null +++ b/doc/sphinxext/prs/8343.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "697f7e9a4730c2e7e1dc21e101cefb1e5da8c57f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 60, + "d": 48 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 86, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8344.json b/doc/sphinxext/prs/8344.json new file mode 100644 index 00000000000..14bfcffb77e --- /dev/null +++ b/doc/sphinxext/prs/8344.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ce4d514ad022c685084947c3e098cf5b16ba2821", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_evoked.py": { + "a": 13, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8345.json b/doc/sphinxext/prs/8345.json new file mode 100644 index 00000000000..d1a8917465d --- /dev/null +++ b/doc/sphinxext/prs/8345.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ad43de7ce030189e6b595eb3c44c32b9ae9d4567", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/835.json b/doc/sphinxext/prs/835.json new file mode 100644 index 00000000000..1f22e6671ed --- /dev/null +++ b/doc/sphinxext/prs/835.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5dfeb56593d0103687efe354ae929fec25e46748", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 0 + }, + "mne/viz.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8350.json b/doc/sphinxext/prs/8350.json new file mode 100644 index 00000000000..7a4936cdef8 --- /dev/null +++ b/doc/sphinxext/prs/8350.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "09513aa1a7cf1c1d6fe3bc71ba5001cd8725a6e1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 28, + "d": 25 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 1 + }, + "mne/utils/tests/test_docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8351.json b/doc/sphinxext/prs/8351.json new file mode 100644 index 00000000000..cdaa46794b3 --- /dev/null +++ b/doc/sphinxext/prs/8351.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "91473147e57464173dedb00ce20f3d5aefd883bd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_static/diagrams/ica.dot": { + "a": 9, + "d": 4 + }, + "doc/changes/latest.inc": { + "a": 8, + "d": 4 + }, + "mne/epochs.py": { + "a": 10, + "d": 35 + }, + "mne/preprocessing/ica.py": { + "a": 201, + "d": 193 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 114, + "d": 150 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 22, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 15, + "d": 9 + }, + "mne/viz/tests/test_ica.py": { + "a": 35, + "d": 39 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 22, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8352.json b/doc/sphinxext/prs/8352.json new file mode 100644 index 00000000000..38b4b5bb538 --- /dev/null +++ b/doc/sphinxext/prs/8352.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "af671e78561d7eb9e05eb69170149142ae336855", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/icons/README.rst": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 8 + }, + "mne/viz/_brain/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 1134, + "d": 29 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 53, + "d": 57 + }, + "mne/viz/_brain/_notebook.py": { + "a": 5, + "d": 6 + }, + "mne/viz/_brain/_scraper.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_timeviewer.py": { + "a": 0, + "d": 1049 + }, + "mne/viz/_brain/mplcanvas.py": { + "a": 8, + "d": 8 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 48, + "d": 55 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8354.json b/doc/sphinxext/prs/8354.json new file mode 100644 index 00000000000..0efb483c627 --- /dev/null +++ b/doc/sphinxext/prs/8354.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "734e6ace282f510e017bc67851142c02954be440", + "authors": [ + { + "n": "Evan Hathaway", + "e": null + }, + { + "n": "Justus Schwabedal", + "e": "jschwabedal@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 237, + "d": 34 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 82, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "server_environment.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8356.json b/doc/sphinxext/prs/8356.json new file mode 100644 index 00000000000..807613986c7 --- /dev/null +++ b/doc/sphinxext/prs/8356.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c2d9a3fab4de5cc9974cc4bc23ec84c0a3e5bfca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 20, + "d": 25 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 98, + "d": 104 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 7, + "d": 11 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8357.json b/doc/sphinxext/prs/8357.json new file mode 100644 index 00000000000..6cbbb5421fa --- /dev/null +++ b/doc/sphinxext/prs/8357.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "c0388970e1d267713f5ead4e28c7ae297c8d36a1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "examplename", + "e": "e.ort@vu.nl" + }, + { + "n": "eort", + "e": "eduardxort@gmail.com" + }, + { + "n": "examplename", + "e": "e.ort@vu.nl" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/raw.py": { + "a": 23, + "d": 6 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 11, + "d": 6 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 14, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8358.json b/doc/sphinxext/prs/8358.json new file mode 100644 index 00000000000..6918af267c3 --- /dev/null +++ b/doc/sphinxext/prs/8358.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7781b9eb8604cc9e2c466a10e90ea83bf6202cd6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "SECURITY.md": { + "a": 30, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8359.json b/doc/sphinxext/prs/8359.json new file mode 100644 index 00000000000..70cb1c35d3b --- /dev/null +++ b/doc/sphinxext/prs/8359.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "b20e7964c12b5e8e34e4feba368a1a30d8f36903", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 26, + "d": 0 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 7, + "d": 5 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 3, + "d": 7 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py": { + "a": 3, + "d": 7 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 7, + "d": 7 + }, + "examples/datasets/plot_brainstorm_data.py": { + "a": 6, + "d": 8 + }, + "examples/datasets/plot_limo_data.py": { + "a": 12, + "d": 18 + }, + "examples/decoding/decoding_rsa.py": { + "a": 9, + "d": 18 + }, + "examples/decoding/plot_decoding_csp_eeg.py": { + "a": 9, + "d": 16 + }, + "examples/decoding/plot_decoding_spoc_CMC.py": { + "a": 7, + "d": 7 + }, + "examples/decoding/plot_decoding_time_generalization_conditions.py": { + "a": 7, + "d": 8 + }, + "examples/decoding/plot_decoding_xdawn_eeg.py": { + "a": 5, + "d": 12 + }, + "examples/decoding/plot_ems_filtering.py": { + "a": 6, + "d": 8 + }, + "mne/datasets/utils.py": { + "a": 4, + "d": 5 + }, + "mne/decoding/csp.py": { + "a": 3, + "d": 5 + }, + "mne/decoding/ems.py": { + "a": 6, + "d": 8 + }, + "mne/decoding/receptive_field.py": { + "a": 6, + "d": 25 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8361.json b/doc/sphinxext/prs/8361.json new file mode 100644 index 00000000000..4b666b879a1 --- /dev/null +++ b/doc/sphinxext/prs/8361.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3c8f49dd750c425d564d13e5481e1448233e2fcb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 4, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8365.json b/doc/sphinxext/prs/8365.json new file mode 100644 index 00000000000..d5e9e356cc0 --- /dev/null +++ b/doc/sphinxext/prs/8365.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "6dff3f2102f5cc4c8b9cb35a64b0ead88b21ecac", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/blank.md": { + "a": 0, + "d": 5 + }, + ".github/ISSUE_TEMPLATE/bug_report.md": { + "a": 24, + "d": 16 + }, + ".github/ISSUE_TEMPLATE/config.yml": { + "a": 8, + "d": 0 + }, + ".github/ISSUE_TEMPLATE/documentation.md": { + "a": 15, + "d": 0 + }, + ".github/ISSUE_TEMPLATE/feature_request.md": { + "a": 16, + "d": 11 + }, + ".github/ISSUE_TEMPLATE/glossary.md": { + "a": 0, + "d": 13 + }, + ".github/ISSUE_TEMPLATE/question.md": { + "a": 19, + "d": 0 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "SECURITY.md": { + "a": 30, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8366.json b/doc/sphinxext/prs/8366.json new file mode 100644 index 00000000000..28ba710adc2 --- /dev/null +++ b/doc/sphinxext/prs/8366.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f2cdcc8a87a87a8c13fbb7f355660017a0ee56ee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_morph_volume_stc.py": { + "a": 15, + "d": 1 + }, + "mne/morph.py": { + "a": 171, + "d": 92 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 118, + "d": 48 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8367.json b/doc/sphinxext/prs/8367.json new file mode 100644 index 00000000000..2db79b5199b --- /dev/null +++ b/doc/sphinxext/prs/8367.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6f16c3771d79dda19b5ff0a058396f468a3e57f6", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 4 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 11, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8368.json b/doc/sphinxext/prs/8368.json new file mode 100644 index 00000000000..22239dee643 --- /dev/null +++ b/doc/sphinxext/prs/8368.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "092a1268ba0cff0b184797e6457dd14c5244a22b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/837.json b/doc/sphinxext/prs/837.json new file mode 100644 index 00000000000..7e5b5fe9cc1 --- /dev/null +++ b/doc/sphinxext/prs/837.json @@ -0,0 +1,171 @@ +{ + "merge_commit_sha": "8bddf07d25443889c1864819e4e1c9d795d3b2c8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 2, + "d": 2 + }, + "examples/stats/plot_cluster_stats_spatio_temporal.py": { + "a": 4, + "d": 4 + }, + "examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py": { + "a": 4, + "d": 4 + }, + "mne/__init__.py": { + "a": 36, + "d": 36 + }, + "mne/connectivity/spectral.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/megsim/megsim.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/classifier.py": { + "a": 2, + "d": 2 + }, + "mne/epochs.py": { + "a": 7, + "d": 8 + }, + "mne/fiff/__init__.py": { + "a": 7, + "d": 7 + }, + "mne/fiff/bti/raw.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/bti/tests/test_bti.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/cov.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/evoked.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/matrix.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/fiff/proj.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 2, + "d": 2 + }, + "mne/fiff/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 14, + "d": 13 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/__init__.py": { + "a": 6, + "d": 6 + }, + "mne/minimum_norm/inverse.py": { + "a": 11, + "d": 11 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 6, + "d": 5 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 13, + "d": 13 + }, + "mne/stats/__init__.py": { + "a": 7, + "d": 7 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 7, + "d": 7 + }, + "mne/stats/tests/test_parametric.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_filter.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_utils.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_viz.py": { + "a": 7, + "d": 7 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 2 + }, + "mne/viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8371.json b/doc/sphinxext/prs/8371.json new file mode 100644 index 00000000000..080ccf8a392 --- /dev/null +++ b/doc/sphinxext/prs/8371.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "59e6c546020d3ec6e123e46a72cbb803a33591f3", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8372.json b/doc/sphinxext/prs/8372.json new file mode 100644 index 00000000000..a6a0038b88a --- /dev/null +++ b/doc/sphinxext/prs/8372.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "56bf0c9c49cfeac1aec6f1a2782c919506c4346a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.21.inc": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8373.json b/doc/sphinxext/prs/8373.json new file mode 100644 index 00000000000..688847f5d68 --- /dev/null +++ b/doc/sphinxext/prs/8373.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "db510cc35604bd0d72cae6a1bb7d1d05eb18c0ac", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fixes.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8374.json b/doc/sphinxext/prs/8374.json new file mode 100644 index 00000000000..e9a11ee5186 --- /dev/null +++ b/doc/sphinxext/prs/8374.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "864426c4839bab05fd0d142ee20938c336c0b78e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 2 + }, + "README.rst": { + "a": 6, + "d": 7 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 4, + "d": 19 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 5 + }, + "mne/decoding/tests/test_base.py": { + "a": 2, + "d": 5 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/fixes.py": { + "a": 3, + "d": 183 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 4, + "d": 9 + }, + "mne/time_frequency/multitaper.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 18, + "d": 15 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 11 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 0, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 0, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 4 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8377.json b/doc/sphinxext/prs/8377.json new file mode 100644 index 00000000000..2ecfd4852ac --- /dev/null +++ b/doc/sphinxext/prs/8377.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6f2465dbe5e9baa53ffddb8503d673d4f995376d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 13, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8379.json b/doc/sphinxext/prs/8379.json new file mode 100644 index 00000000000..a951e5ceef7 --- /dev/null +++ b/doc/sphinxext/prs/8379.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "06298716dc48db52f69496e4248927f90925ee65", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 19, + "d": 111 + }, + ".gitignore": { + "a": 2, + "d": 0 + }, + "doc/Makefile": { + "a": 17, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "doc/conf.py": { + "a": 23, + "d": 5 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 4, + "d": 4 + }, + "examples/decoding/plot_receptive_field_mtrf.py": { + "a": 0, + "d": 3 + }, + "examples/inverse/plot_evoked_ers_source_power.py": { + "a": 23, + "d": 10 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 14, + "d": 14 + }, + "examples/inverse/plot_psf_ctf_vertices.py": { + "a": 13, + "d": 9 + }, + "examples/inverse/plot_psf_ctf_vertices_lcmv.py": { + "a": 17, + "d": 14 + }, + "examples/inverse/plot_resolution_metrics.py": { + "a": 14, + "d": 4 + }, + "examples/inverse/plot_resolution_metrics_eegmeg.py": { + "a": 12, + "d": 2 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 4, + "d": 0 + }, + "examples/preprocessing/plot_find_ref_artifacts.py": { + "a": 7, + "d": 4 + }, + "examples/time_frequency/plot_source_power_spectrum_opm.py": { + "a": 28, + "d": 14 + }, + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 50, + "d": 22 + }, + "mne/conftest.py": { + "a": 34, + "d": 1 + }, + "mne/fixes.py": { + "a": 12, + "d": 3 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 14, + "d": 15 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 9, + "d": 20 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 48, + "d": 17 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 26, + "d": 0 + }, + "mne/utils/numerics.py": { + "a": 16, + "d": 6 + }, + "mne/utils/tests/test_numerics.py": { + "a": 15, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 71, + "d": 19 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 26, + "d": 8 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 9, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 1 + }, + "tools/circleci_download.sh": { + "a": 111, + "d": 0 + }, + "tutorials/misc/plot_ecog.py": { + "a": 8, + "d": 7 + }, + "tutorials/simulation/plot_dics.py": { + "a": 28, + "d": 26 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 18, + "d": 22 + }, + "tutorials/source-modeling/plot_eeg_mri_coords.py": { + "a": 4, + "d": 3 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 2, + "d": 2 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/838.json b/doc/sphinxext/prs/838.json new file mode 100644 index 00000000000..c062e3b8bec --- /dev/null +++ b/doc/sphinxext/prs/838.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3438259e1bc336ec9be4db87ec046a45418b6e46", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fixes.py": { + "a": 14, + "d": 1 + }, + "mne/tests/test_viz.py": { + "a": 16, + "d": 1 + }, + "mne/viz.py": { + "a": 17, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8381.json b/doc/sphinxext/prs/8381.json new file mode 100644 index 00000000000..3ced551bf85 --- /dev/null +++ b/doc/sphinxext/prs/8381.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "8cf161eee8c7e938eee2e1c16bc3fc9e9322d28d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 0 + }, + "mne/epochs.py": { + "a": 15, + "d": 5 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 20, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 440, + "d": 153 + }, + "mne/viz/epochs.py": { + "a": 164, + "d": 1214 + }, + "mne/viz/ica.py": { + "a": 172, + "d": 259 + }, + "mne/viz/raw.py": { + "a": 16, + "d": 339 + }, + "mne/viz/tests/test_epochs.py": { + "a": 116, + "d": 104 + }, + "mne/viz/tests/test_figure.py": { + "a": 12, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 41, + "d": 24 + }, + "mne/viz/tests/test_raw.py": { + "a": 42, + "d": 52 + }, + "mne/viz/tests/test_utils.py": { + "a": 23, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 44, + "d": 965 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 16, + "d": 9 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 27, + "d": 26 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 21, + "d": 21 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8384.json b/doc/sphinxext/prs/8384.json new file mode 100644 index 00000000000..832491090c9 --- /dev/null +++ b/doc/sphinxext/prs/8384.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "108e165b600635ad1990d612c3503f5cc1001432", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8389.json b/doc/sphinxext/prs/8389.json new file mode 100644 index 00000000000..57b0d920242 --- /dev/null +++ b/doc/sphinxext/prs/8389.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "59cfcd7a7ffb0740deecbcea587b3fc55c3c5e63", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 27, + "d": 15 + }, + "mne/tests/test_source_estimate.py": { + "a": 7, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/839.json b/doc/sphinxext/prs/839.json new file mode 100644 index 00000000000..882053a51bd --- /dev/null +++ b/doc/sphinxext/prs/839.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "2504fee2d311003a049acb636798f66c047fc2ce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 4, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 5, + "d": 5 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 4, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 13, + "d": 5 + }, + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 27, + "d": 1 + }, + "mne/forward/tests/test_forward.py": { + "a": 7, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 6, + "d": 3 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 39, + "d": 8 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 6, + "d": 3 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 3, + "d": 2 + }, + "mne/simulation/tests/test_source.py": { + "a": 17, + "d": 11 + }, + "mne/tests/test_dipole.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 11, + "d": 1 + }, + "mne/tests/test_proj.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 10, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 14, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_transforms.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_viz.py": { + "a": 64, + "d": 21 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 4, + "d": 4 + }, + "mne/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8390.json b/doc/sphinxext/prs/8390.json new file mode 100644 index 00000000000..0dcb696c3d8 --- /dev/null +++ b/doc/sphinxext/prs/8390.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6b7352f2f9e4c7d26e06d1977984392ea61896ac", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 15, + "d": 3 + }, + "examples/inverse/plot_mixed_norm_inverse.py": { + "a": 3, + "d": 10 + }, + "examples/inverse/plot_morph_surface_stc.py": { + "a": 7, + "d": 8 + }, + "examples/inverse/plot_rap_music.py": { + "a": 6, + "d": 8 + }, + "examples/inverse/plot_resolution_metrics.py": { + "a": 5, + "d": 8 + }, + "examples/inverse/plot_resolution_metrics_eegmeg.py": { + "a": 6, + "d": 9 + }, + "examples/inverse/plot_source_space_snr.py": { + "a": 3, + "d": 6 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 7, + "d": 7 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 7, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8392.json b/doc/sphinxext/prs/8392.json new file mode 100644 index 00000000000..4a46d674f72 --- /dev/null +++ b/doc/sphinxext/prs/8392.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8fa5aa57947274a4bda7de3381ae898037aac504", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test.ipynb": { + "a": 27, + "d": 5 + }, + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8393.json b/doc/sphinxext/prs/8393.json new file mode 100644 index 00000000000..9be0fdd2d61 --- /dev/null +++ b/doc/sphinxext/prs/8393.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "641e9d161987de1e5ff270b1c9828f9990f2ba2b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 20, + "d": 13 + }, + "mne/viz/tests/test_3d.py": { + "a": 20, + "d": 51 + }, + "tutorials/misc/plot_ecog.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8395.json b/doc/sphinxext/prs/8395.json new file mode 100644 index 00000000000..e9fff136ed1 --- /dev/null +++ b/doc/sphinxext/prs/8395.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "71dd449e24824a7f855060f209d5ad25ac304069", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8396.json b/doc/sphinxext/prs/8396.json new file mode 100644 index 00000000000..9689824715f --- /dev/null +++ b/doc/sphinxext/prs/8396.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "69340341e44decadb3b3df1a34f6b9489f54b797", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + } + ], + "changes": { + "mne/io/nihon/nihon.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/84.json b/doc/sphinxext/prs/84.json new file mode 100644 index 00000000000..066d523f284 --- /dev/null +++ b/doc/sphinxext/prs/84.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "67a72ea7856f05010201a1d9fa7d60793d4011a8", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/840.json b/doc/sphinxext/prs/840.json new file mode 100644 index 00000000000..be13144eec9 --- /dev/null +++ b/doc/sphinxext/prs/840.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "75cdcf3546bd084595c847a9f010e39d266a5e1c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 5, + "d": 2 + }, + "mne/fiff/tests/test_raw.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8400.json b/doc/sphinxext/prs/8400.json new file mode 100644 index 00000000000..06aac175217 --- /dev/null +++ b/doc/sphinxext/prs/8400.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f4077c0582ed70520e51315a6f9bc025c3bc4cce", + "authors": [ + { + "n": "Tod Flak", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/curry/curry.py": { + "a": 67, + "d": 3 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 52, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8402.json b/doc/sphinxext/prs/8402.json new file mode 100644 index 00000000000..02e905c1b21 --- /dev/null +++ b/doc/sphinxext/prs/8402.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "cdeb44348efb1f73f90619b4dd6365a64a06ad00", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 100, + "d": 28 + }, + "mne/tests/test_source_estimate.py": { + "a": 33, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 2 + }, + "mne/viz/_brain/_scraper.py": { + "a": 1, + "d": 0 + }, + "tutorials/misc/plot_ecog.py": { + "a": 9, + "d": 0 + }, + "tutorials/misc/plot_seeg.py": { + "a": 163, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8404.json b/doc/sphinxext/prs/8404.json new file mode 100644 index 00000000000..125550f32a8 --- /dev/null +++ b/doc/sphinxext/prs/8404.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "539d5c973fb01ced7cd0c8b26272bc49b3955a60", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/bem.py": { + "a": 15, + "d": 15 + }, + "mne/forward/_compute_forward.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8405.json b/doc/sphinxext/prs/8405.json new file mode 100644 index 00000000000..583b5a64e6f --- /dev/null +++ b/doc/sphinxext/prs/8405.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2cc6d7e275a60ae27c377de9c09272db5705155d", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8406.json b/doc/sphinxext/prs/8406.json new file mode 100644 index 00000000000..6e8d2f26183 --- /dev/null +++ b/doc/sphinxext/prs/8406.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "529dd9ce448d2cbb2d987e4caa524c72cb2ab273", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/misc.py": { + "a": 5, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 15, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8408.json b/doc/sphinxext/prs/8408.json new file mode 100644 index 00000000000..5ba17cea8d5 --- /dev/null +++ b/doc/sphinxext/prs/8408.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "86db55a81fb7a0cb5e9d43ad63919c69af17b86a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8409.json b/doc/sphinxext/prs/8409.json new file mode 100644 index 00000000000..cc043cfe99a --- /dev/null +++ b/doc/sphinxext/prs/8409.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a60424def32dd7ae4f91a82df1be8ed656e84d55", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/841.json b/doc/sphinxext/prs/841.json new file mode 100644 index 00000000000..c142b531f31 --- /dev/null +++ b/doc/sphinxext/prs/841.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cd4e480db06ed39bcfb125f037b3d3be9c13e333", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "mne/label.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8412.json b/doc/sphinxext/prs/8412.json new file mode 100644 index 00000000000..1a83f8cc193 --- /dev/null +++ b/doc/sphinxext/prs/8412.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0c8d48c1b0ac4b6d79643fed0b26b8db0b6ddc79", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8413.json b/doc/sphinxext/prs/8413.json new file mode 100644 index 00000000000..1e2810a8a24 --- /dev/null +++ b/doc/sphinxext/prs/8413.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d882353358ea174bf3c7da3bafc5cda229f2032d", + "authors": [ + { + "n": "Tod Flak", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8414.json b/doc/sphinxext/prs/8414.json new file mode 100644 index 00000000000..e1f3263c0f4 --- /dev/null +++ b/doc/sphinxext/prs/8414.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "5cb467c54c4f37935f1de86fad35e664aa9de2db", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 4, + "d": 11 + }, + "mne/source_space.py": { + "a": 30, + "d": 23 + }, + "mne/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 8, + "d": 9 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 4 + }, + "tutorials/source-modeling/plot_background_freesurfer_mne.py": { + "a": 45, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8415.json b/doc/sphinxext/prs/8415.json new file mode 100644 index 00000000000..c7a1172f36e --- /dev/null +++ b/doc/sphinxext/prs/8415.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "21a3a08476c33ac90bf16437fd6bbbf35964fc8c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 19, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8416.json b/doc/sphinxext/prs/8416.json new file mode 100644 index 00000000000..11d47c39238 --- /dev/null +++ b/doc/sphinxext/prs/8416.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9ffaaf7fe60cd41061a1221ed080078281ec20cf", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_layout.py": { + "a": 2, + "d": 2 + }, + "mne/io/meas_info.py": { + "a": 22, + "d": 3 + }, + "mne/io/reference.py": { + "a": 4, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 8, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8417.json b/doc/sphinxext/prs/8417.json new file mode 100644 index 00000000000..3f7624a66c6 --- /dev/null +++ b/doc/sphinxext/prs/8417.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "74041b7175828badd6fc2f2896bbad0c35f5401c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.21.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8418.json b/doc/sphinxext/prs/8418.json new file mode 100644 index 00000000000..df039c40ebc --- /dev/null +++ b/doc/sphinxext/prs/8418.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fae559c520cd010bde64f9d8c3b09d64d8d5bd45", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test.ipynb": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/842.json b/doc/sphinxext/prs/842.json new file mode 100644 index 00000000000..40a95a841ed --- /dev/null +++ b/doc/sphinxext/prs/842.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "aef6c57aadb5baddcaf219901238f12bdaa611eb", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 107, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 52, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8420.json b/doc/sphinxext/prs/8420.json new file mode 100644 index 00000000000..4fac6675584 --- /dev/null +++ b/doc/sphinxext/prs/8420.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d759c60d718811988998e9c1c24d5326f7da7a22", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/visualization/plot_publication_figure.py": { + "a": 127, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8422.json b/doc/sphinxext/prs/8422.json new file mode 100644 index 00000000000..262a32459c3 --- /dev/null +++ b/doc/sphinxext/prs/8422.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "360e62d2d353fe158de758c1b8fbb5e44409a877", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 18, + "d": 0 + }, + "tutorials/intro/plot_20_events_from_raw.py": { + "a": 13, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8427.json b/doc/sphinxext/prs/8427.json new file mode 100644 index 00000000000..b3d43151316 --- /dev/null +++ b/doc/sphinxext/prs/8427.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "92bcf294ed543a2569ed1bbaae27a63d23050ee4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 0 + }, + ".travis.yml": { + "a": 2, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 10, + "d": 4 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8431.json b/doc/sphinxext/prs/8431.json new file mode 100644 index 00000000000..b8009a7546f --- /dev/null +++ b/doc/sphinxext/prs/8431.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "010d4c6363a36193d7fa2ddb2139a155e33f693c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 2, + "d": 1 + }, + "tutorials/intro/plot_20_events_from_raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8433.json b/doc/sphinxext/prs/8433.json new file mode 100644 index 00000000000..91548409e9c --- /dev/null +++ b/doc/sphinxext/prs/8433.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3a7e354a2eb120274b475a30a391aabce9e4dcfc", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.21.inc": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8434.json b/doc/sphinxext/prs/8434.json new file mode 100644 index 00000000000..8395ad86a39 --- /dev/null +++ b/doc/sphinxext/prs/8434.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "87553ab47e6b0fb0a3be09ba5302029f549b3ee7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 3 + }, + "mne/io/brainvision/tests/data/test.vhdr": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8435.json b/doc/sphinxext/prs/8435.json new file mode 100644 index 00000000000..3385c75749f --- /dev/null +++ b/doc/sphinxext/prs/8435.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cc9e53cb96ceea205a500273af0b427ebe54d67b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 6, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8437.json b/doc/sphinxext/prs/8437.json new file mode 100644 index 00000000000..af9893e6460 --- /dev/null +++ b/doc/sphinxext/prs/8437.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2690cc0a464a1a268d92e83d2cf314b2c21f1f03", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.21.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 8, + "d": 3 + }, + "mne/io/brainvision/tests/data/test.vhdr": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8439.json b/doc/sphinxext/prs/8439.json new file mode 100644 index 00000000000..4592a821a19 --- /dev/null +++ b/doc/sphinxext/prs/8439.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ff5eaffe9c39ad6adc434a484b3c49df278f9390", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/844.json b/doc/sphinxext/prs/844.json new file mode 100644 index 00000000000..75c09ada13c --- /dev/null +++ b/doc/sphinxext/prs/844.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "c02ca49596eed8144947074ce0e7ca47947c97ee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "bin/mne_compute_proj_ecg.py": { + "a": 76, + "d": 57 + }, + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "mne/fiff/constants.py": { + "a": 0, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 5, + "d": 1 + }, + "mne/filter.py": { + "a": 7, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 68, + "d": 40 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 3 + }, + "mne/preprocessing/ssp.py": { + "a": 31, + "d": 22 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 17, + "d": 16 + }, + "mne/proj.py": { + "a": 9, + "d": 8 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_proj.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8442.json b/doc/sphinxext/prs/8442.json new file mode 100644 index 00000000000..6de91e6f489 --- /dev/null +++ b/doc/sphinxext/prs/8442.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "157a0409de8790a105b60266b8970496b21da895", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/style.css": { + "a": 4, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 50, + "d": 50 + }, + "doc/install/contributing.rst": { + "a": 70, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8444.json b/doc/sphinxext/prs/8444.json new file mode 100644 index 00000000000..59eac2e8fd1 --- /dev/null +++ b/doc/sphinxext/prs/8444.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1ba8cfe3a1a5dab21033fb44f762e9abe77fb3e0", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 2 + }, + "mne/epochs.py": { + "a": 124, + "d": 57 + }, + "mne/io/tests/test_pick.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 81, + "d": 15 + }, + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8446.json b/doc/sphinxext/prs/8446.json new file mode 100644 index 00000000000..418a7a6d571 --- /dev/null +++ b/doc/sphinxext/prs/8446.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "67d5013d513c48f71780fba340bd5a2bdce29158", + "authors": [ + { + "n": "Keith Doelling", + "e": "keith.doelling@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "examples/datasets/plot_opm_data.py": { + "a": 5, + "d": 4 + }, + "examples/preprocessing/plot_movement_detection.py": { + "a": 4, + "d": 6 + }, + "examples/visualization/plot_eeg_on_scalp.py": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 34, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 12, + "d": 2 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 1, + "d": 2 + }, + "tutorials/misc/plot_seeg.py": { + "a": 2, + "d": 1 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 1, + "d": 2 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8447.json b/doc/sphinxext/prs/8447.json new file mode 100644 index 00000000000..410bfb701d3 --- /dev/null +++ b/doc/sphinxext/prs/8447.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "29a32f49b943616de4c72bf6a5e17fcad1ff8501", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 48, + "d": 11 + }, + "mne/morph.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 194, + "d": 3 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8448.json b/doc/sphinxext/prs/8448.json new file mode 100644 index 00000000000..d4ec6cf807e --- /dev/null +++ b/doc/sphinxext/prs/8448.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "606163ef1372b4f96e1245eb3312a0f87f6e3f43", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 57, + "d": 24 + }, + "mne/io/base.py": { + "a": 5, + "d": 0 + }, + "mne/io/tests/test_write.py": { + "a": 21, + "d": 0 + }, + "mne/io/write.py": { + "a": 16, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 109, + "d": 31 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8449.json b/doc/sphinxext/prs/8449.json new file mode 100644 index 00000000000..22af48a78c9 --- /dev/null +++ b/doc/sphinxext/prs/8449.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1922bbdc9b4008c5d14dbef69462f761214953f2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 66, + "d": 17 + }, + "mne/tests/test_epochs.py": { + "a": 53, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8450.json b/doc/sphinxext/prs/8450.json new file mode 100644 index 00000000000..05f5003a3ad --- /dev/null +++ b/doc/sphinxext/prs/8450.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a96f65415a55f7f22da516a2bdf43e84b7c9510b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 3, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8452.json b/doc/sphinxext/prs/8452.json new file mode 100644 index 00000000000..76ed69141b1 --- /dev/null +++ b/doc/sphinxext/prs/8452.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9e15df76f003677b8ddc9fdfa4be32c10f7620c3", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 24, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 29, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8454.json b/doc/sphinxext/prs/8454.json new file mode 100644 index 00000000000..58c2e0e185f --- /dev/null +++ b/doc/sphinxext/prs/8454.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8d6905d35dbae5cc16769989bc666e01c4e289ef", + "authors": [ + { + "n": "Jussi Nurminen", + "e": "jnu@iki.fi" + } + ], + "changes": { + "examples/visualization/plot_topo_customized.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8458.json b/doc/sphinxext/prs/8458.json new file mode 100644 index 00000000000..154c4000cab --- /dev/null +++ b/doc/sphinxext/prs/8458.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0e31cd647c6e273f96ed94749f2b9968748ced48", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/annotations.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8459.json b/doc/sphinxext/prs/8459.json new file mode 100644 index 00000000000..a894ae80425 --- /dev/null +++ b/doc/sphinxext/prs/8459.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a738bab71c17fdb18621794bcf3aee84f9be8e32", + "authors": [ + { + "n": "Mads Jensen", + "e": null + } + ], + "changes": { + "tutorials/machine-learning/plot_sensors_decoding.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/846.json b/doc/sphinxext/prs/846.json new file mode 100644 index 00000000000..0786a8d67a5 --- /dev/null +++ b/doc/sphinxext/prs/846.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4361537350956926bc9c82568198c13ac1eb6def", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/tests/data/test-lh.label": { + "a": 101, + "d": 0 + }, + "mne/fiff/tests/data/test-rh.label": { + "a": 102, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 24, + "d": 15 + }, + "mne/tests/test_viz.py": { + "a": 4, + "d": 2 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 4, + "d": 6 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 3, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8460.json b/doc/sphinxext/prs/8460.json new file mode 100644 index 00000000000..f69173c3d8c --- /dev/null +++ b/doc/sphinxext/prs/8460.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "844847cd70afa78e21b6262f940ee0f1aecdcf3e", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 45, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8462.json b/doc/sphinxext/prs/8462.json new file mode 100644 index 00000000000..e8a73f36ed6 --- /dev/null +++ b/doc/sphinxext/prs/8462.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "111302b2bcc046556183485ef7342b0233f8d21e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 0 + }, + "mne/morph.py": { + "a": 66, + "d": 43 + }, + "mne/tests/test_morph.py": { + "a": 7, + "d": 13 + }, + "mne/tests/test_source_estimate.py": { + "a": 34, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8463.json b/doc/sphinxext/prs/8463.json new file mode 100644 index 00000000000..ad27da0f16d --- /dev/null +++ b/doc/sphinxext/prs/8463.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9f393dead191f60504ab7a215ae74d1a8ca0d89e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 0, + "d": 1 + }, + "mne/label.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8464.json b/doc/sphinxext/prs/8464.json new file mode 100644 index 00000000000..76a40dd2070 --- /dev/null +++ b/doc/sphinxext/prs/8464.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6e3a560ba7d620acb0c7cac1a6eef74d93005708", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 30, + "d": 21 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 26, + "d": 5 + }, + "mne/preprocessing/stim.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8466.json b/doc/sphinxext/prs/8466.json new file mode 100644 index 00000000000..13165912cda --- /dev/null +++ b/doc/sphinxext/prs/8466.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9f73f5a3c857d21dc2fb6e8f14509f79d8126ad6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/visualization/plot_publication_figure.py": { + "a": 15, + "d": 28 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8467.json b/doc/sphinxext/prs/8467.json new file mode 100644 index 00000000000..0bda1cfd39a --- /dev/null +++ b/doc/sphinxext/prs/8467.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4bc95d273ab34df9b1d1b5ca3ba9d007f069317d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/morph.py": { + "a": 64, + "d": 144 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8468.json b/doc/sphinxext/prs/8468.json new file mode 100644 index 00000000000..7698087baa7 --- /dev/null +++ b/doc/sphinxext/prs/8468.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "57248c9ad7b17e76cf3bcfe25e9b2ffc3cff38d6", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/inverse/plot_label_source_activations.py": { + "a": 1, + "d": 1 + }, + "tutorials/misc/plot_seeg.py": { + "a": 20, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8469.json b/doc/sphinxext/prs/8469.json new file mode 100644 index 00000000000..aebb767fa92 --- /dev/null +++ b/doc/sphinxext/prs/8469.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7a87d0c627c98c840cc86664355c9fbb38229e28", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/847.json b/doc/sphinxext/prs/847.json new file mode 100644 index 00000000000..e6e48c09193 --- /dev/null +++ b/doc/sphinxext/prs/847.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "83d9e2aa2438fdc491d2b3493c9f08b497f6818c", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8471.json b/doc/sphinxext/prs/8471.json new file mode 100644 index 00000000000..653ff089f51 --- /dev/null +++ b/doc/sphinxext/prs/8471.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3930aec85edcb8a63a664f7658ddb33579a00f62", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8474.json b/doc/sphinxext/prs/8474.json new file mode 100644 index 00000000000..33e9fb9eabc --- /dev/null +++ b/doc/sphinxext/prs/8474.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8db510fd078bb58fae8cf0fa13aa5b7ed13c3d7e", + "authors": [ + { + "n": "Aniket Pradhan", + "e": "aniketpradhan1999@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 1 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8475.json b/doc/sphinxext/prs/8475.json new file mode 100644 index 00000000000..049f81ac3a6 --- /dev/null +++ b/doc/sphinxext/prs/8475.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "dd1f8b7563ec962694aad301b580c4a63c3faace", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 9, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8477.json b/doc/sphinxext/prs/8477.json new file mode 100644 index 00000000000..753ea19f2b8 --- /dev/null +++ b/doc/sphinxext/prs/8477.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "a1a3e27b437c3eab6b029d89ea4b2b50464ef073", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 3, + "d": 3 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 0 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 1, + "d": 0 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 5, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 3, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/conftest.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8478.json b/doc/sphinxext/prs/8478.json new file mode 100644 index 00000000000..293a026c69e --- /dev/null +++ b/doc/sphinxext/prs/8478.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0e080a5a645b5934445ef4da6da744d7dfa7f68d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/base.py": { + "a": 42, + "d": 13 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 28, + "d": 5 + }, + "mne/io/utils.py": { + "a": 10, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/848.json b/doc/sphinxext/prs/848.json new file mode 100644 index 00000000000..2c0e7857e52 --- /dev/null +++ b/doc/sphinxext/prs/848.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "89dc2f9b6fc69c0acc574a91081620c7a1abb13a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8482.json b/doc/sphinxext/prs/8482.json new file mode 100644 index 00000000000..0649bddf7ff --- /dev/null +++ b/doc/sphinxext/prs/8482.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ef42e43c1583b1e50fbafaba65bcb1b9918abaf2", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/stim.py": { + "a": 8, + "d": 4 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8483.json b/doc/sphinxext/prs/8483.json new file mode 100644 index 00000000000..75343ad10f9 --- /dev/null +++ b/doc/sphinxext/prs/8483.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6ed368d4ac8b607dcc0dd7ceb86e4f9cca726332", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 23, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8484.json b/doc/sphinxext/prs/8484.json new file mode 100644 index 00000000000..8b0b5a424d2 --- /dev/null +++ b/doc/sphinxext/prs/8484.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2db6ffac6016346adb97eccd121bfcba482e0e2d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 3, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 15, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8485.json b/doc/sphinxext/prs/8485.json new file mode 100644 index 00000000000..acbd02cd96f --- /dev/null +++ b/doc/sphinxext/prs/8485.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0febcfa17efc651d9539fcc7be3498ab58d5b877", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8486.json b/doc/sphinxext/prs/8486.json new file mode 100644 index 00000000000..504c86d2958 --- /dev/null +++ b/doc/sphinxext/prs/8486.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7cc7766df0d27208b9a345680206a1a84aa41e64", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 24, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8489.json b/doc/sphinxext/prs/8489.json new file mode 100644 index 00000000000..ba512b7418f --- /dev/null +++ b/doc/sphinxext/prs/8489.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "3b909b7361f3f00f2b15b03e7057e59dc1027c44", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".github/workflows/ci.yml": { + "a": 0, + "d": 76 + }, + ".github/workflows/linux.yml": { + "a": 61, + "d": 0 + }, + ".github/workflows/macos.yml": { + "a": 56, + "d": 0 + }, + ".travis.yml": { + "a": 1, + "d": 11 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 2, + "d": 4 + }, + "tools/get_minimal_commands.sh": { + "a": 1, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 0 + }, + "tools/github_actions_download.sh": { + "a": 3, + "d": 0 + }, + "tools/github_actions_infos.sh": { + "a": 4, + "d": 0 + }, + "tools/github_actions_install.sh": { + "a": 4, + "d": 0 + }, + "tools/github_actions_locale.sh": { + "a": 5, + "d": 0 + }, + "tools/github_actions_test.sh": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/849.json b/doc/sphinxext/prs/849.json new file mode 100644 index 00000000000..1708a965fd9 --- /dev/null +++ b/doc/sphinxext/prs/849.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d3c9079798dc9529462fe6f92b41e80bd92e5abc", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 4, + "d": 4 + }, + "Makefile": { + "a": 6, + "d": 0 + }, + "mne/parallel.py": { + "a": 11, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_viz.py": { + "a": 1, + "d": 0 + }, + "mne/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8490.json b/doc/sphinxext/prs/8490.json new file mode 100644 index 00000000000..e2eea046ca0 --- /dev/null +++ b/doc/sphinxext/prs/8490.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "28b36c06a744074b54993128fa1908ea10464ff2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 11, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8491.json b/doc/sphinxext/prs/8491.json new file mode 100644 index 00000000000..e90de531d8f --- /dev/null +++ b/doc/sphinxext/prs/8491.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b9a7fb7a5e1fdb823a10ead989b1bd74ed117154", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 0, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8492.json b/doc/sphinxext/prs/8492.json new file mode 100644 index 00000000000..a72a99419e5 --- /dev/null +++ b/doc/sphinxext/prs/8492.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "bb16a3e6ea99024e468fa84c2ec55c344b47df7e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 6, + "d": 1 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 60, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8494.json b/doc/sphinxext/prs/8494.json new file mode 100644 index 00000000000..ab30e1b7ab0 --- /dev/null +++ b/doc/sphinxext/prs/8494.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e70247800458ff0a36fbe90b134841087ce95c6e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8495.json b/doc/sphinxext/prs/8495.json new file mode 100644 index 00000000000..5e888ee837a --- /dev/null +++ b/doc/sphinxext/prs/8495.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "dacca023885184a5063152de6cf6705ceb721c16", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".travis.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 3, + "d": 4 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 6 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 0 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8496.json b/doc/sphinxext/prs/8496.json new file mode 100644 index 00000000000..5512db25a9e --- /dev/null +++ b/doc/sphinxext/prs/8496.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "49bb34f993b93a9ca28dda26f7bc23cb3af2bcb5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.21.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_label_source_activations.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 9, + "d": 20 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 2, + "d": 7 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8498.json b/doc/sphinxext/prs/8498.json new file mode 100644 index 00000000000..d270ba86d74 --- /dev/null +++ b/doc/sphinxext/prs/8498.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0ca1c5c8eb3a46b8a170ed21e8a98d2dfa533a04", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/overview/faq.rst": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/filter.py": { + "a": 3, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/85.json b/doc/sphinxext/prs/85.json new file mode 100644 index 00000000000..a30817acb25 --- /dev/null +++ b/doc/sphinxext/prs/85.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a18146827bfe216375f4e48066d36004903183e4", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/constants.py": { + "a": 29, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/850.json b/doc/sphinxext/prs/850.json new file mode 100644 index 00000000000..461c5a35cf5 --- /dev/null +++ b/doc/sphinxext/prs/850.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e85f39b06783eaecb34bb7d84d8895a4cadcecba", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "README.rst": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8500.json b/doc/sphinxext/prs/8500.json new file mode 100644 index 00000000000..e584793d6c9 --- /dev/null +++ b/doc/sphinxext/prs/8500.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6734953dd15070550cf792004b5ffa1079001541", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/mixin.py": { + "a": 48, + "d": 34 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8502.json b/doc/sphinxext/prs/8502.json new file mode 100644 index 00000000000..9d656efba5a --- /dev/null +++ b/doc/sphinxext/prs/8502.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "3e1b5819ab640c3f349baa856d56d2c65c125d98", + "authors": [ + { + "n": "Zhenya", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 9, + "d": 0 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_scraper.py": { + "a": 5, + "d": 1 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8503.json b/doc/sphinxext/prs/8503.json new file mode 100644 index 00000000000..2f7d5c7f566 --- /dev/null +++ b/doc/sphinxext/prs/8503.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "cab14251d246268997c27a07fb9260b5f173b149", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 141, + "d": 86 + }, + "mne/viz/_brain/_notebook.py": { + "a": 0, + "d": 67 + }, + "mne/viz/_brain/mplcanvas.py": { + "a": 24, + "d": 12 + }, + "mne/viz/_brain/tests/test.ipynb": { + "a": 12, + "d": 7 + }, + "mne/viz/backends/_notebook.py": { + "a": 5, + "d": 148 + }, + "server_environment.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8504.json b/doc/sphinxext/prs/8504.json new file mode 100644 index 00000000000..4506f713cfe --- /dev/null +++ b/doc/sphinxext/prs/8504.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b8f5e5ce0da8acfeb7298c8eb1d26a75d5526eac", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/preprocessing/nirs/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8506.json b/doc/sphinxext/prs/8506.json new file mode 100644 index 00000000000..c63fbdadad4 --- /dev/null +++ b/doc/sphinxext/prs/8506.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "db49d5dad2fb077d2a9bc8a30cfc5569e2671144", + "authors": [ + { + "n": "Thomas Hartmann", + "e": "thomas.hartmann@th-ht.de" + } + ], + "changes": { + "mne/externals/pymatreader/pymatreader.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8508.json b/doc/sphinxext/prs/8508.json new file mode 100644 index 00000000000..0bee29729b4 --- /dev/null +++ b/doc/sphinxext/prs/8508.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1fa17ddf895b42c08aea3120d127572834ac5c96", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 36, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/851.json b/doc/sphinxext/prs/851.json new file mode 100644 index 00000000000..0f818a76fb8 --- /dev/null +++ b/doc/sphinxext/prs/851.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "7742303f25ef9784031c0a0db9b82c94e69217db", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 1 + }, + "doc/source/whats_new.rst": { + "a": 3, + "d": 0 + }, + "examples/datasets/README.txt": { + "a": 5, + "d": 0 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 118, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/sample/sample.py": { + "a": 15, + "d": 152 + }, + "mne/datasets/spm_face/__init__.py": { + "a": 4, + "d": 0 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 30, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 185, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 1 + }, + "mne/utils.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8514.json b/doc/sphinxext/prs/8514.json new file mode 100644 index 00000000000..39c627173d2 --- /dev/null +++ b/doc/sphinxext/prs/8514.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "c046c9c84a0f7f9874fd0f58ef259eaa5e2f6c08", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 38, + "d": 0 + }, + ".github/workflows/compat_minimal.yml": { + "a": 65, + "d": 0 + }, + ".github/workflows/compat_old.yml": { + "a": 60, + "d": 0 + }, + ".github/workflows/linux_conda.yml": { + "a": 6, + "d": 5 + }, + ".github/workflows/linux_pip.yml": { + "a": 60, + "d": 0 + }, + ".github/workflows/macos_conda.yml": { + "a": 4, + "d": 3 + }, + ".travis.yml": { + "a": 0, + "d": 135 + }, + "azure-pipelines.yml": { + "a": 244, + "d": 230 + }, + "tools/get_minimal_commands.sh": { + "a": 19, + "d": 17 + }, + "tools/github_actions_dependencies.sh": { + "a": 15, + "d": 2 + }, + "tools/github_actions_download.sh": { + "a": 3, + "d": 1 + }, + "tools/github_actions_test.sh": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8515.json b/doc/sphinxext/prs/8515.json new file mode 100644 index 00000000000..44d04bfa3a4 --- /dev/null +++ b/doc/sphinxext/prs/8515.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "d73e0ab014c1fe6125d80f4649a5c066858d8c5f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/utils.py": { + "a": 25, + "d": 22 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 2, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 12, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + }, + "mne/utils/progressbar.py": { + "a": 3, + "d": 3 + }, + "mne/utils/tests/test_check.py": { + "a": 13, + "d": 1 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 7, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8516.json b/doc/sphinxext/prs/8516.json new file mode 100644 index 00000000000..9872abcd6d5 --- /dev/null +++ b/doc/sphinxext/prs/8516.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e82aee041d55fc0f18dd18a94939646be3fcca7f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/io/plot_objects_from_arrays.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 3, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 16, + "d": 2 + }, + "tutorials/evoked/plot_whitened.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8519.json b/doc/sphinxext/prs/8519.json new file mode 100644 index 00000000000..281bd624a25 --- /dev/null +++ b/doc/sphinxext/prs/8519.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "23acc99e65aae9180ee72eddc2ba0ae2842e5e26", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/label.py": { + "a": 30, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/852.json b/doc/sphinxext/prs/852.json new file mode 100644 index 00000000000..03f08c1fc6f --- /dev/null +++ b/doc/sphinxext/prs/852.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4d1a6617239cdd21ed7aa7d9270dafd3f3c0a504", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 53, + "d": 0 + }, + "mne/fiff/kit/coreg.py": { + "a": 0, + "d": 53 + }, + "mne/fiff/kit/kit.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/kit/tests/test_coreg.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8520.json b/doc/sphinxext/prs/8520.json new file mode 100644 index 00000000000..61c1f96f211 --- /dev/null +++ b/doc/sphinxext/prs/8520.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5efddb9e80dbf9935ae9340d1708737b867e4237", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 20, + "d": 17 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8521.json b/doc/sphinxext/prs/8521.json new file mode 100644 index 00000000000..52bcf5dd719 --- /dev/null +++ b/doc/sphinxext/prs/8521.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "aacafb171a13ad7325fb854d5603efd6bb951e00", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/source_space.py": { + "a": 103, + "d": 71 + }, + "mne/tests/test_morph.py": { + "a": 98, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8522.json b/doc/sphinxext/prs/8522.json new file mode 100644 index 00000000000..b57b669f3fc --- /dev/null +++ b/doc/sphinxext/prs/8522.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9775db1407c7c91f1b020d0c63f868430130b77c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8524.json b/doc/sphinxext/prs/8524.json new file mode 100644 index 00000000000..cb2d3f3dfe9 --- /dev/null +++ b/doc/sphinxext/prs/8524.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5c3e7424b63798a5093ff3a1971e841d23d4f61a", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8526.json b/doc/sphinxext/prs/8526.json new file mode 100644 index 00000000000..3497f4d5631 --- /dev/null +++ b/doc/sphinxext/prs/8526.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cedae25792da3dede4fa7fed943f4b7b7b1bb7c5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/forward/plot_left_cerebellum_volume_source.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 57, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8527.json b/doc/sphinxext/prs/8527.json new file mode 100644 index 00000000000..27e5a949a77 --- /dev/null +++ b/doc/sphinxext/prs/8527.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5d43084ffc12ae4c8d73c5db991856ea8e2e47e5", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/decoding/plot_ssd_spatial_filters.py": { + "a": 3, + "d": 2 + }, + "mne/decoding/ssd.py": { + "a": 13, + "d": 14 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8528.json b/doc/sphinxext/prs/8528.json new file mode 100644 index 00000000000..2a92011a82c --- /dev/null +++ b/doc/sphinxext/prs/8528.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0473162138943de2675d9a8cce0bb4766c3d5523", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8529.json b/doc/sphinxext/prs/8529.json new file mode 100644 index 00000000000..18074fa2d37 --- /dev/null +++ b/doc/sphinxext/prs/8529.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5db8876c5ca038c5cc5f0277e2bf17a19ddde717", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_morph_volume_stc.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 54, + "d": 27 + }, + "mne/tests/test_morph.py": { + "a": 40, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8532.json b/doc/sphinxext/prs/8532.json new file mode 100644 index 00000000000..8471a474749 --- /dev/null +++ b/doc/sphinxext/prs/8532.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "40872a8b836a86a48d013a1654b2bfffb59ea5b8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 7, + "d": 7 + }, + "mne/channels/tests/test_montage.py": { + "a": 15, + "d": 22 + }, + "mne/io/_digitization.py": { + "a": 7, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 18, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8533.json b/doc/sphinxext/prs/8533.json new file mode 100644 index 00000000000..58dbd7dad03 --- /dev/null +++ b/doc/sphinxext/prs/8533.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "7b0468f805c027818e864b92137664364d2a9a1b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/circle_artifacts.yml": { + "a": 0, + "d": 0 + }, + ".github/workflows/linux_conda.yml": { + "a": 1, + "d": 3 + }, + ".github/workflows/linux_pip.yml": { + "a": 4, + "d": 6 + }, + "README.rst": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 5, + "d": 7 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 14, + "d": 7 + }, + "tools/setup_xvfb.sh": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8534.json b/doc/sphinxext/prs/8534.json new file mode 100644 index 00000000000..75a077c6fad --- /dev/null +++ b/doc/sphinxext/prs/8534.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "fe8eb45abb779e5f8ec5739486a0dbadf8d408de", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "examples/forward/plot_forward_sensitivity_maps.py": { + "a": 40, + "d": 3 + }, + "mne/source_space.py": { + "a": 64, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 66, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8536.json b/doc/sphinxext/prs/8536.json new file mode 100644 index 00000000000..e476c2998a8 --- /dev/null +++ b/doc/sphinxext/prs/8536.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5305feef94a1b8df9355900cb5281f90eeb762f4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/utils.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8537.json b/doc/sphinxext/prs/8537.json new file mode 100644 index 00000000000..de5a200f096 --- /dev/null +++ b/doc/sphinxext/prs/8537.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "02db0b82d3cc42c1f30fc27a850340d890c4a538", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 6, + "d": 36 + }, + "mne/channels/tests/test_channels.py": { + "a": 8, + "d": 7 + }, + "mne/io/array/tests/test_array.py": { + "a": 4, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 4, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 19, + "d": 36 + }, + "mne/io/pick.py": { + "a": 62, + "d": 19 + }, + "mne/io/tests/test_pick.py": { + "a": 7, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8538.json b/doc/sphinxext/prs/8538.json new file mode 100644 index 00000000000..d6c62e1549d --- /dev/null +++ b/doc/sphinxext/prs/8538.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "8a2a765ff0b959de55ff851d279b2ec3774f8be8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 16, + "d": 1 + }, + ".github/workflows/compat_minimal.yml": { + "a": 17, + "d": 4 + }, + ".github/workflows/compat_old.yml": { + "a": 17, + "d": 6 + }, + ".github/workflows/linux_conda.yml": { + "a": 16, + "d": 3 + }, + ".github/workflows/linux_pip.yml": { + "a": 16, + "d": 3 + }, + ".github/workflows/macos_conda.yml": { + "a": 16, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8539.json b/doc/sphinxext/prs/8539.json new file mode 100644 index 00000000000..7b4fe6542ce --- /dev/null +++ b/doc/sphinxext/prs/8539.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d120a32dca2a1dc66e2827a1586d86ab3212d94e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/visualization/plot_eeglab_head_sphere.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/854.json b/doc/sphinxext/prs/854.json new file mode 100644 index 00000000000..f09ee375a36 --- /dev/null +++ b/doc/sphinxext/prs/854.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ac07d28fb314c0856eb67a7f5aa15d97d6912f53", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "debian/changelog": { + "a": 1, + "d": 1 + }, + "debian/compat": { + "a": 1, + "d": 1 + }, + "debian/control": { + "a": 2, + "d": 2 + }, + "debian/gbp.conf": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8540.json b/doc/sphinxext/prs/8540.json new file mode 100644 index 00000000000..6b3a3995ac1 --- /dev/null +++ b/doc/sphinxext/prs/8540.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "50d5863e1a823db06b369547a5452b9cd9a67a85", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/realign.py": { + "a": 107, + "d": 0 + }, + "mne/preprocessing/tests/test_realign.py": { + "a": 116, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8541.json b/doc/sphinxext/prs/8541.json new file mode 100644 index 00000000000..054cb0ba4cf --- /dev/null +++ b/doc/sphinxext/prs/8541.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "344f5ab46d3dee155837cf3299fecfa4a884040c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_conda.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/setup_xvfb.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8542.json b/doc/sphinxext/prs/8542.json new file mode 100644 index 00000000000..ab30ffbeb5b --- /dev/null +++ b/doc/sphinxext/prs/8542.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "92a29ff3c4e95fdae546cafd193cd0dc899a43bd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/constants.py": { + "a": 6, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 17, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8543.json b/doc/sphinxext/prs/8543.json new file mode 100644 index 00000000000..6b835bfdab7 --- /dev/null +++ b/doc/sphinxext/prs/8543.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8b0b8a967b1a8e2fd88c47221f535d73ca7d6fb2", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 15, + "d": 7 + }, + "mne/tests/test_label.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8545.json b/doc/sphinxext/prs/8545.json new file mode 100644 index 00000000000..4f015d30319 --- /dev/null +++ b/doc/sphinxext/prs/8545.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "75866351036ea00d36450dfa754be907d1e9c36a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 1 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 88, + "d": 95 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8546.json b/doc/sphinxext/prs/8546.json new file mode 100644 index 00000000000..d4023fd7b57 --- /dev/null +++ b/doc/sphinxext/prs/8546.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "2b4605fbf50d1d03edc34ad013b5b4577f948b81", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_figure.py": { + "a": 171, + "d": 27 + }, + "mne/viz/epochs.py": { + "a": 11, + "d": 24 + }, + "mne/viz/raw.py": { + "a": 11, + "d": 18 + }, + "mne/viz/tests/test_figure.py": { + "a": 7, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 22, + "d": 19 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 85 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 25, + "d": 25 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8547.json b/doc/sphinxext/prs/8547.json new file mode 100644 index 00000000000..1237115c795 --- /dev/null +++ b/doc/sphinxext/prs/8547.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "54ac2d86a63a5a2ee556944df3f091cc612c4b8a", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 16, + "d": 29 + }, + "mne/viz/_brain/_brain.py": { + "a": 35, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8548.json b/doc/sphinxext/prs/8548.json new file mode 100644 index 00000000000..4be8b071487 --- /dev/null +++ b/doc/sphinxext/prs/8548.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e280d1d2309d4bf22daf54066f05cc251840a31c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 8 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 24, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8549.json b/doc/sphinxext/prs/8549.json new file mode 100644 index 00000000000..39e6792d483 --- /dev/null +++ b/doc/sphinxext/prs/8549.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce6713c2df66d57974eb822cacebbc31425b0d12", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/tests/test_3d.py": { + "a": 5, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/855.json b/doc/sphinxext/prs/855.json new file mode 100644 index 00000000000..96a866635d2 --- /dev/null +++ b/doc/sphinxext/prs/855.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0070ef4308c22426583d1d62724f05535f8036b2", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "debian/rules": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8552.json b/doc/sphinxext/prs/8552.json new file mode 100644 index 00000000000..8efc77b47a1 --- /dev/null +++ b/doc/sphinxext/prs/8552.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0eefe3145d17fa5bbac75baf83a21b3f3ff16da9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8553.json b/doc/sphinxext/prs/8553.json new file mode 100644 index 00000000000..d81014a3738 --- /dev/null +++ b/doc/sphinxext/prs/8553.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "9139c345ef40a6b5c4a1ee4e6943ae76e25d8276", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/defaults.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_coreg_gui.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_viewer.py": { + "a": 12, + "d": 6 + }, + "mne/viz/_3d.py": { + "a": 40, + "d": 16 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 35, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 16, + "d": 3 + }, + "mne/viz/backends/_utils.py": { + "a": 2, + "d": 1 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8554.json b/doc/sphinxext/prs/8554.json new file mode 100644 index 00000000000..6a4ce83a559 --- /dev/null +++ b/doc/sphinxext/prs/8554.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "622f910ac9c54cc2ccba89013176be788fb2e016", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/gui/_backend.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8557.json b/doc/sphinxext/prs/8557.json new file mode 100644 index 00000000000..1fbcf8d0c4c --- /dev/null +++ b/doc/sphinxext/prs/8557.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d34b797bf2d5df0356a2c6f6ef1cf2f549fcdac4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "ignore_words.txt": { + "a": 1, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8558.json b/doc/sphinxext/prs/8558.json new file mode 100644 index 00000000000..e659710bed3 --- /dev/null +++ b/doc/sphinxext/prs/8558.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2c5bf45edf3f9039d5a77baf53db1ef22434ce9f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 42, + "d": 14 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 49, + "d": 13 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/856.json b/doc/sphinxext/prs/856.json new file mode 100644 index 00000000000..1b8f841dc4d --- /dev/null +++ b/doc/sphinxext/prs/856.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e2db91ccfe440bc11a1c6985b2247e68db5501a2", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/fiff/meas_info.py": { + "a": 17, + "d": 1 + }, + "mne/fiff/tag.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/tests/data/fsaverage-fiducials.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/tests/test_meas_info.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8560.json b/doc/sphinxext/prs/8560.json new file mode 100644 index 00000000000..999c79020fc --- /dev/null +++ b/doc/sphinxext/prs/8560.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "895ebf905adc22fd7ff7839ff73647b3ce293314", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 4, + "d": 0 + }, + "mne/viz/tests/test_epochs.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8561.json b/doc/sphinxext/prs/8561.json new file mode 100644 index 00000000000..e120fe6b846 --- /dev/null +++ b/doc/sphinxext/prs/8561.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bd3fa021b2161ddf77418c35a139e3546ca5a986", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8564.json b/doc/sphinxext/prs/8564.json new file mode 100644 index 00000000000..9145a5c30fc --- /dev/null +++ b/doc/sphinxext/prs/8564.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ea8120f4a516124b4bd5fd884efb47bfd848ad3e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 2 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8567.json b/doc/sphinxext/prs/8567.json new file mode 100644 index 00000000000..f2334173823 --- /dev/null +++ b/doc/sphinxext/prs/8567.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "245d455bbf8b32ce96a445862029710ca5564489", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 20, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8569.json b/doc/sphinxext/prs/8569.json new file mode 100644 index 00000000000..bd03dc5e7f8 --- /dev/null +++ b/doc/sphinxext/prs/8569.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe9361ad49985c1f97984df494a93ce2fdf25ef9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements.txt": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/857.json b/doc/sphinxext/prs/857.json new file mode 100644 index 00000000000..d7a919f730e --- /dev/null +++ b/doc/sphinxext/prs/857.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1c67a41c74424c90af131ffd9afab7a92857cfa5", + "authors": [ + { + "n": "Romain Trachel", + "e": "trachelr@gmail.com" + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8570.json b/doc/sphinxext/prs/8570.json new file mode 100644 index 00000000000..633fc43ded6 --- /dev/null +++ b/doc/sphinxext/prs/8570.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ff45e9fc45fbb18776c4a5162a1b437d2e3b9345", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/discussions/plot_background_filtering.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_background_freesurfer.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8572.json b/doc/sphinxext/prs/8572.json new file mode 100644 index 00000000000..85ee0927a57 --- /dev/null +++ b/doc/sphinxext/prs/8572.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7206f8e434106982e64728939794bee46adfc5f2", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 3, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 4 + }, + "mne/viz/backends/base_renderer.py": { + "a": 108, + "d": 104 + }, + "mne/viz/backends/renderer.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8574.json b/doc/sphinxext/prs/8574.json new file mode 100644 index 00000000000..4b8c2d51ee2 --- /dev/null +++ b/doc/sphinxext/prs/8574.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "f9e62e04cbb4e0f2f5019919be58a470c8e9289e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 21, + "d": 17 + }, + "mne/channels/montage.py": { + "a": 3, + "d": 3 + }, + "mne/channels/tests/test_channels.py": { + "a": 0, + "d": 4 + }, + "mne/cov.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 4, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 13, + "d": 11 + }, + "mne/io/array/tests/test_array.py": { + "a": 5, + "d": 1 + }, + "mne/io/constants.py": { + "a": 18, + "d": 0 + }, + "mne/io/ctf_comp.py": { + "a": 27, + "d": 0 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 6, + "d": 7 + }, + "mne/io/meas_info.py": { + "a": 140, + "d": 47 + }, + "mne/io/proj.py": { + "a": 8, + "d": 20 + }, + "mne/io/tag.py": { + "a": 9, + "d": 1 + }, + "mne/io/tests/test_constants.py": { + "a": 5, + "d": 4 + }, + "mne/io/tests/test_meas_info.py": { + "a": 157, + "d": 3 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 0, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8575.json b/doc/sphinxext/prs/8575.json new file mode 100644 index 00000000000..c107c543f73 --- /dev/null +++ b/doc/sphinxext/prs/8575.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "606e88fb976b0a320154e5912629705957f7b71b", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8576.json b/doc/sphinxext/prs/8576.json new file mode 100644 index 00000000000..cb03b0d9ad5 --- /dev/null +++ b/doc/sphinxext/prs/8576.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5274110cb3be610beed591cba6bab633a0cc4211", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_source_space_snr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 249, + "d": 187 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/callback.py": { + "a": 8, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 37, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8577.json b/doc/sphinxext/prs/8577.json new file mode 100644 index 00000000000..5d1c8369ec4 --- /dev/null +++ b/doc/sphinxext/prs/8577.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "679f954b51ae6d51be29423eab53e500be9d8755", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 24, + "d": 29 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8580.json b/doc/sphinxext/prs/8580.json new file mode 100644 index 00000000000..eb221057173 --- /dev/null +++ b/doc/sphinxext/prs/8580.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "34c944a6c2e67f7ba464a5520726d3890dc73f07", + "authors": [ + { + "n": "Hongjiang YE", + "e": "hongjiang.ye@outlook.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8584.json b/doc/sphinxext/prs/8584.json new file mode 100644 index 00000000000..42ff4acff11 --- /dev/null +++ b/doc/sphinxext/prs/8584.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "01a6ec08ee90266435cbccc460396e137313251e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/datasets/testing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/testing/_testing.py": { + "a": 8, + "d": 5 + }, + "mne/io/edf/edf.py": { + "a": 9, + "d": 2 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 53, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8585.json b/doc/sphinxext/prs/8585.json new file mode 100644 index 00000000000..c56ed6a57a4 --- /dev/null +++ b/doc/sphinxext/prs/8585.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "25524596480868d83fa0adb9748b4091c3568d55", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8586.json b/doc/sphinxext/prs/8586.json new file mode 100644 index 00000000000..b69804c9093 --- /dev/null +++ b/doc/sphinxext/prs/8586.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "021fc1c7113b688586e2908c52a65ce056025020", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/overview/faq.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8589.json b/doc/sphinxext/prs/8589.json new file mode 100644 index 00000000000..1b3a6aa7590 --- /dev/null +++ b/doc/sphinxext/prs/8589.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2b0f0c5a4cd1a3fe68ba04a0af2f1d141e1ec750", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8590.json b/doc/sphinxext/prs/8590.json new file mode 100644 index 00000000000..3e99000b0e4 --- /dev/null +++ b/doc/sphinxext/prs/8590.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6e627f964f30d18b1af286bc4526a66b1cac24fe", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 8 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 46, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8594.json b/doc/sphinxext/prs/8594.json new file mode 100644 index 00000000000..d860fbeb00f --- /dev/null +++ b/doc/sphinxext/prs/8594.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c23df983dbd0e8f5bd481af07ceeb5453de8ca84", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 22, + "d": 3 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 69, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8595.json b/doc/sphinxext/prs/8595.json new file mode 100644 index 00000000000..cfbc3e5129e --- /dev/null +++ b/doc/sphinxext/prs/8595.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "73e2e7a8d6c71d6abe4f5c3e8d88f8efcade89ff", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8596.json b/doc/sphinxext/prs/8596.json new file mode 100644 index 00000000000..4a369058941 --- /dev/null +++ b/doc/sphinxext/prs/8596.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "5a62f4719f28733687b5dd1f8fb9674f9f641c2f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "examples/io/README.txt": { + "a": 3, + "d": 2 + }, + "examples/io/plot_objects_from_arrays.py": { + "a": 0, + "d": 166 + }, + "examples/io/plot_read_and_write_raw_data.py": { + "a": 0, + "d": 45 + }, + "examples/io/plot_read_epochs.py": { + "a": 0, + "d": 48 + }, + "examples/io/plot_read_events.py": { + "a": 0, + "d": 77 + }, + "examples/io/plot_read_evoked.py": { + "a": 0, + "d": 36 + }, + "examples/io/plot_read_neo_format.py": { + "a": 34, + "d": 0 + }, + "examples/io/plot_read_proj.py": { + "a": 0, + "d": 64 + }, + "examples/preprocessing/plot_find_ecg_artifacts.py": { + "a": 0, + "d": 51 + }, + "examples/preprocessing/plot_find_eog_artifacts.py": { + "a": 0, + "d": 49 + }, + "examples/preprocessing/plot_metadata_query.py": { + "a": 0, + "d": 111 + }, + "examples/preprocessing/plot_rereference_eeg.py": { + "a": 0, + "d": 90 + }, + "examples/preprocessing/plot_resample.py": { + "a": 0, + "d": 83 + }, + "examples/time_frequency/plot_compute_raw_data_spectrum.py": { + "a": 0, + "d": 125 + }, + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/plot_10_reading_meg_data.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 3, + "d": 3 + }, + "tutorials/preprocessing/plot_55_setting_eeg_reference.py": { + "a": 27, + "d": 0 + }, + "tutorials/raw/plot_20_event_arrays.py": { + "a": 5, + "d": 0 + }, + "tutorials/simulation/plot_creating_data_structures.py": { + "a": 151, + "d": 148 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8598.json b/doc/sphinxext/prs/8598.json new file mode 100644 index 00000000000..bd413e63a56 --- /dev/null +++ b/doc/sphinxext/prs/8598.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "63dabf5165a1ccf0800267f2c56ed942a41d96be", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 46, + "d": 35 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 16, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tutorials/misc/plot_ecog.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/86.json b/doc/sphinxext/prs/86.json new file mode 100644 index 00000000000..f0c1190c72f --- /dev/null +++ b/doc/sphinxext/prs/86.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ccf0bc9a3ecdebf20f3368920bf74285f775f3cd", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/constants.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8601.json b/doc/sphinxext/prs/8601.json new file mode 100644 index 00000000000..bc4a76177dd --- /dev/null +++ b/doc/sphinxext/prs/8601.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce4c5ad2055d69d95131e9a215e6d142b5c58d0d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/fixes.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8603.json b/doc/sphinxext/prs/8603.json new file mode 100644 index 00000000000..71b295b6387 --- /dev/null +++ b/doc/sphinxext/prs/8603.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1e622deb5257ea59dd7929eadf549ce8ca564a17", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 98, + "d": 42 + }, + "mne/viz/tests/test_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8605.json b/doc/sphinxext/prs/8605.json new file mode 100644 index 00000000000..ac65332b4b9 --- /dev/null +++ b/doc/sphinxext/prs/8605.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d45a85a841709e48861db73a3d4fde0619c243a9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 12, + "d": 10 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8608.json b/doc/sphinxext/prs/8608.json new file mode 100644 index 00000000000..a5ee7418331 --- /dev/null +++ b/doc/sphinxext/prs/8608.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ef2a2e491763db89d7411eee4c0dd49b1c8032ee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/csd.py": { + "a": 13, + "d": 13 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 37, + "d": 23 + }, + "mne/utils/tests/test_progressbar.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/861.json b/doc/sphinxext/prs/861.json new file mode 100644 index 00000000000..0bd05b7ddad --- /dev/null +++ b/doc/sphinxext/prs/861.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bcb98eb48dfe3944c67786d712ab3bbc0c98195f", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 13, + "d": 2 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8610.json b/doc/sphinxext/prs/8610.json new file mode 100644 index 00000000000..c99668f7fcf --- /dev/null +++ b/doc/sphinxext/prs/8610.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "5ba77c8b947001da1598671503b165f232687dca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 24, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 13, + "d": 10 + }, + "mne/beamformer/_lcmv.py": { + "a": 4, + "d": 22 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 64, + "d": 10 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 101, + "d": 32 + }, + "mne/conftest.py": { + "a": 15, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 44, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8611.json b/doc/sphinxext/prs/8611.json new file mode 100644 index 00000000000..5708cab5b02 --- /dev/null +++ b/doc/sphinxext/prs/8611.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a00ed0ab22852941b1cd6288590d25b88086bfaf", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8612.json b/doc/sphinxext/prs/8612.json new file mode 100644 index 00000000000..b3429da70b0 --- /dev/null +++ b/doc/sphinxext/prs/8612.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "76e386ca0d588c30a58519b125448162f735a8e9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 18 + }, + "mne/preprocessing/ecg.py": { + "a": 28, + "d": 66 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ssp.py": { + "a": 8, + "d": 4 + }, + "mne/proj.py": { + "a": 9, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 86, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8618.json b/doc/sphinxext/prs/8618.json new file mode 100644 index 00000000000..4a3194f2c8d --- /dev/null +++ b/doc/sphinxext/prs/8618.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2ae5c408513293d5f9f32a379afc5f99540398d9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 81, + "d": 71 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 3, + "d": 3 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 7, + "d": 4 + }, + "mne/io/kit/kit.py": { + "a": 46, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8619.json b/doc/sphinxext/prs/8619.json new file mode 100644 index 00000000000..3dfb99bb1ac --- /dev/null +++ b/doc/sphinxext/prs/8619.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3db29c459f6b612b3981e2c54cc05111038d292c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8621.json b/doc/sphinxext/prs/8621.json new file mode 100644 index 00000000000..4b5f10652e5 --- /dev/null +++ b/doc/sphinxext/prs/8621.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "35fcbca549a1a7b91cbda18768b8dc8dcf32b547", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8622.json b/doc/sphinxext/prs/8622.json new file mode 100644 index 00000000000..aefd3140e16 --- /dev/null +++ b/doc/sphinxext/prs/8622.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "90f4dec944d6096b0a8860e6923572fedd7728cd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 16, + "d": 5 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 4, + "d": 3 + }, + "mne/io/_digitization.py": { + "a": 3, + "d": 155 + }, + "mne/io/kit/coreg.py": { + "a": 115, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 12, + "d": 13 + }, + "mne/utils/docs.py": { + "a": 14, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8624.json b/doc/sphinxext/prs/8624.json new file mode 100644 index 00000000000..225675b4fcd --- /dev/null +++ b/doc/sphinxext/prs/8624.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "cfd5659ba5639b1af9b64edd9e9701a58c3262c3", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 123, + "d": 50 + }, + "mne/viz/raw.py": { + "a": 0, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 12, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8626.json b/doc/sphinxext/prs/8626.json new file mode 100644 index 00000000000..810a0efb0f8 --- /dev/null +++ b/doc/sphinxext/prs/8626.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cfada9d37b4f9e2f01657779c1f2f89e7832f763", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8627.json b/doc/sphinxext/prs/8627.json new file mode 100644 index 00000000000..a0fa2635469 --- /dev/null +++ b/doc/sphinxext/prs/8627.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "46f3c151ba8c431a882daa609403d4f996c0b1e7", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_source_space_snr.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 19, + "d": 13 + }, + "tutorials/misc/plot_seeg.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8629.json b/doc/sphinxext/prs/8629.json new file mode 100644 index 00000000000..aad65441b6f --- /dev/null +++ b/doc/sphinxext/prs/8629.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c4c96152d2e28b95f2b3f644bd2fc5f2a3cf006a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/863.json b/doc/sphinxext/prs/863.json new file mode 100644 index 00000000000..fc34e76c928 --- /dev/null +++ b/doc/sphinxext/prs/863.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fe29b83339d418baf06b9b133622f0ec6b580d37", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/python_tutorial.rst": { + "a": 17, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8630.json b/doc/sphinxext/prs/8630.json new file mode 100644 index 00000000000..f9cd067af82 --- /dev/null +++ b/doc/sphinxext/prs/8630.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d282fbec34de472136dfd3d5449c591e84fc9300", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 4 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8631.json b/doc/sphinxext/prs/8631.json new file mode 100644 index 00000000000..b518f7c6b77 --- /dev/null +++ b/doc/sphinxext/prs/8631.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b3fd2f16895ecd785d02f2cbf7762919819cfa71", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8633.json b/doc/sphinxext/prs/8633.json new file mode 100644 index 00000000000..d2e425a44b8 --- /dev/null +++ b/doc/sphinxext/prs/8633.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5eeca84cef086a3ee6edab577b138ffa43ff965c", + "authors": [ + { + "n": "Jeff Stout", + "e": null + }, + { + "n": "Jeff Stout", + "e": "stoutjd@nih.gov" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 38, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8636.json b/doc/sphinxext/prs/8636.json new file mode 100644 index 00000000000..c1e137d844b --- /dev/null +++ b/doc/sphinxext/prs/8636.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "89cb6bbdc9547594a6a3ff5f524b9e68289588f3", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + } + ], + "changes": { + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8638.json b/doc/sphinxext/prs/8638.json new file mode 100644 index 00000000000..a222a1d1ff3 --- /dev/null +++ b/doc/sphinxext/prs/8638.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "74e098581f06cbf078cdbb0eb3379e7e02d40bd4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 0, + "d": 1 + }, + "examples/visualization/plot_evoked_topomap.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 7, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 12, + "d": 6 + }, + "mne/viz/tests/test_topomap.py": { + "a": 9, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 60, + "d": 53 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8639.json b/doc/sphinxext/prs/8639.json new file mode 100644 index 00000000000..b5e52e968ca --- /dev/null +++ b/doc/sphinxext/prs/8639.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4e4e70798535c6df881e7d840547be7f905fa3b0", + "authors": [ + { + "n": null, + "e": "olaf.hauk@mrc-cbu.cam.ac.uk" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 0, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 65, + "d": 20 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 18, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 22, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/864.json b/doc/sphinxext/prs/864.json new file mode 100644 index 00000000000..d2acf1f42d3 --- /dev/null +++ b/doc/sphinxext/prs/864.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e1bb84310f30e250573f43f17763eb15c39801b8", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 9, + "d": 6 + }, + "mne/source_estimate.py": { + "a": 25, + "d": 39 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8643.json b/doc/sphinxext/prs/8643.json new file mode 100644 index 00000000000..78de47f4f1a --- /dev/null +++ b/doc/sphinxext/prs/8643.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "e2357ad7848a3c3968678557ef5ded4602bed0c8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/inverse/plot_source_space_snr.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_vector_mne_solution.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 14, + "d": 12 + }, + "mne/viz/backends/_utils.py": { + "a": 7, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 1 + }, + "tutorials/misc/plot_seeg.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8644.json b/doc/sphinxext/prs/8644.json new file mode 100644 index 00000000000..7cf93b07a4d --- /dev/null +++ b/doc/sphinxext/prs/8644.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a087895ebd69d98afc3e236677d7bf17a76d5921", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8645.json b/doc/sphinxext/prs/8645.json new file mode 100644 index 00000000000..3e0a8b6c162 --- /dev/null +++ b/doc/sphinxext/prs/8645.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ae9ed2332cf24209ec2a178e61c0d02fdd35b03c", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 9, + "d": 9 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8647.json b/doc/sphinxext/prs/8647.json new file mode 100644 index 00000000000..769fa0fd291 --- /dev/null +++ b/doc/sphinxext/prs/8647.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "76b64ff884474c7b1569cf74d6183e2ab13b7e9f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 33, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8648.json b/doc/sphinxext/prs/8648.json new file mode 100644 index 00000000000..7597744fec6 --- /dev/null +++ b/doc/sphinxext/prs/8648.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "52e97df12b6da69a66e645c951a35795733ac9a5", + "authors": [ + { + "n": "dengemann", + "e": "denis.engemann@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/decoding/plot_ssd_spatial_filters.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ssd.py": { + "a": 22, + "d": 16 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 94, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8651.json b/doc/sphinxext/prs/8651.json new file mode 100644 index 00000000000..334289f4a81 --- /dev/null +++ b/doc/sphinxext/prs/8651.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5d7ddd2bb64f22a150ff49e7f2c61f7dab2a6e53", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 8, + "d": 2 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 13, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8653.json b/doc/sphinxext/prs/8653.json new file mode 100644 index 00000000000..e385a3aeba7 --- /dev/null +++ b/doc/sphinxext/prs/8653.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e40e592d3d2c6aeaa5c44bab2602d4aa486fc830", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 0, + "d": 1 + }, + "examples/preprocessing/plot_eeg_csd.py": { + "a": 1, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8655.json b/doc/sphinxext/prs/8655.json new file mode 100644 index 00000000000..5e79be68c63 --- /dev/null +++ b/doc/sphinxext/prs/8655.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "51f38b25034cbe24ebcdf0a1d4158597a64e04e0", + "authors": [ + { + "n": "Josh Koen", + "e": "koen.joshua@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/references.bib": { + "a": 35, + "d": 0 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 247, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8656.json b/doc/sphinxext/prs/8656.json new file mode 100644 index 00000000000..04c6a394be4 --- /dev/null +++ b/doc/sphinxext/prs/8656.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "52c4fffc32fdcce39e3c8b654f77b2ba5f89543f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 3, + "d": 2 + }, + "server_environment.yml": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8657.json b/doc/sphinxext/prs/8657.json new file mode 100644 index 00000000000..0fc117a1838 --- /dev/null +++ b/doc/sphinxext/prs/8657.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "391f462317e5487b6dceeaa3c5361fe97f9deba2", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8658.json b/doc/sphinxext/prs/8658.json new file mode 100644 index 00000000000..e13f4b8ef4c --- /dev/null +++ b/doc/sphinxext/prs/8658.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d4f99de184f80a069a42302e9427f2e9b0aad8fb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 1, + "d": 0 + }, + "mne/connectivity/envelope.py": { + "a": 33, + "d": 6 + }, + "mne/connectivity/tests/test_envelope.py": { + "a": 40, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/866.json b/doc/sphinxext/prs/866.json new file mode 100644 index 00000000000..0870ca58e30 --- /dev/null +++ b/doc/sphinxext/prs/866.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "5995a3a5cbd1fa935c106fc72a688d59b21c7f65", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 1 + }, + "bin/mne": { + "a": 46, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_flash_bem_model.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_maxfilter.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_surf2bem.py": { + "a": 0, + "d": 0 + }, + "setup.py": { + "a": 5, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8662.json b/doc/sphinxext/prs/8662.json new file mode 100644 index 00000000000..8047680a507 --- /dev/null +++ b/doc/sphinxext/prs/8662.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "ae750c60b3a6f97b8ba610fecf272c10c5a4a227", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 2 + }, + "mne/time_frequency/psd.py": { + "a": 18, + "d": 14 + }, + "mne/time_frequency/tests/test_psd.py": { + "a": 16, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 7, + "d": 2 + }, + "mne/viz/tests/test_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8663.json b/doc/sphinxext/prs/8663.json new file mode 100644 index 00000000000..92b6ec5f231 --- /dev/null +++ b/doc/sphinxext/prs/8663.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "88d9be9f8694911c2a923154411f24229b437310", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 10, + "d": 5 + }, + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 4 + }, + "mne/viz/_brain/colormap.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 16, + "d": 8 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 2 + }, + "tutorials/misc/plot_seeg.py": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8665.json b/doc/sphinxext/prs/8665.json new file mode 100644 index 00000000000..0cea7def619 --- /dev/null +++ b/doc/sphinxext/prs/8665.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "1b9d2228bc40f817ef1765686bc2ec6e81079d13", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 130, + "d": 130 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 31, + "d": 33 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 9, + "d": 5 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 5, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 23, + "d": 18 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 13, + "d": 15 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 192, + "d": 164 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 124, + "d": 117 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8668.json b/doc/sphinxext/prs/8668.json new file mode 100644 index 00000000000..8429c6e3d08 --- /dev/null +++ b/doc/sphinxext/prs/8668.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "92a41a284093a845697b81dab89afed8bc3eeb4b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 2, + "d": 12 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 8 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ica.py": { + "a": 14, + "d": 62 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 12, + "d": 86 + }, + "mne/source_estimate.py": { + "a": 9, + "d": 20 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 9 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 12 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8669.json b/doc/sphinxext/prs/8669.json new file mode 100644 index 00000000000..ab3af8371f3 --- /dev/null +++ b/doc/sphinxext/prs/8669.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "415ae684e35a8facbe9e8c5c049b2461de119fe1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".mailmap": { + "a": 48, + "d": 6 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "codemeta.json": { + "a": 1413, + "d": 0 + }, + "requirements.txt": { + "a": 2, + "d": 2 + }, + "setup.py": { + "a": 10, + "d": 1 + }, + "tools/generate_codemeta.py": { + "a": 137, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8672.json b/doc/sphinxext/prs/8672.json new file mode 100644 index 00000000000..0a0ef1c92af --- /dev/null +++ b/doc/sphinxext/prs/8672.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3cc39e31a9f74f72e27dd64855d533c89cd74c58", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 2, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 7 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 45, + "d": 45 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 19, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8673.json b/doc/sphinxext/prs/8673.json new file mode 100644 index 00000000000..2594958aba9 --- /dev/null +++ b/doc/sphinxext/prs/8673.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7a689df121d1193d0efec099fbc26b63db40e583", + "authors": [ + { + "n": "Mainak Jas", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 37, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8675.json b/doc/sphinxext/prs/8675.json new file mode 100644 index 00000000000..d9a4836ee68 --- /dev/null +++ b/doc/sphinxext/prs/8675.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1371d440e9e162c6b734276dad501f363363172e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 7, + "d": 7 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8676.json b/doc/sphinxext/prs/8676.json new file mode 100644 index 00000000000..41711cf06d2 --- /dev/null +++ b/doc/sphinxext/prs/8676.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8204c90211bf6a0d9385cd9e8f5fe41741a54ea2", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/868.json b/doc/sphinxext/prs/868.json new file mode 100644 index 00000000000..033fde52b7d --- /dev/null +++ b/doc/sphinxext/prs/868.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "23d0f05c64ab129d05f08cbabd5cb28926505032", + "authors": [ + { + "n": "Yaroslav Halchenko", + "e": null + } + ], + "changes": { + "bin/mne": { + "a": 1, + "d": 11 + }, + "mne/commands/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 3, + "d": 3 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_surf2bem.py": { + "a": 7, + "d": 11 + }, + "mne/commands/utils.py": { + "a": 39, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8682.json b/doc/sphinxext/prs/8682.json new file mode 100644 index 00000000000..d906307f596 --- /dev/null +++ b/doc/sphinxext/prs/8682.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "40629b6709e7352eecdd948e4b96307cef4a55c7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8683.json b/doc/sphinxext/prs/8683.json new file mode 100644 index 00000000000..26b38c7846c --- /dev/null +++ b/doc/sphinxext/prs/8683.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "14a0e9a81350ee42a5f7a9eee6bf3408701433d0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 0, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8688.json b/doc/sphinxext/prs/8688.json new file mode 100644 index 00000000000..773a2b42a20 --- /dev/null +++ b/doc/sphinxext/prs/8688.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "53b4c1807dc16189db82d2ffc9a45160c2ee3c7a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8689.json b/doc/sphinxext/prs/8689.json new file mode 100644 index 00000000000..d6973893295 --- /dev/null +++ b/doc/sphinxext/prs/8689.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "303fabe4438c366194b1ea5bf7f507813ff5bab1", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8690.json b/doc/sphinxext/prs/8690.json new file mode 100644 index 00000000000..9feb180a5b4 --- /dev/null +++ b/doc/sphinxext/prs/8690.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a36cd9e27f893476a7961d77203a96be2bc6a01c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 71, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8695.json b/doc/sphinxext/prs/8695.json new file mode 100644 index 00000000000..831121cb6ba --- /dev/null +++ b/doc/sphinxext/prs/8695.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "c4215cb7bcf0e9709549fec09d3ed751b3678fba", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 8, + "d": 0 + }, + ".github/workflows/compat_old.yml": { + "a": 8, + "d": 0 + }, + ".github/workflows/linux_conda.yml": { + "a": 8, + "d": 0 + }, + ".github/workflows/linux_pip.yml": { + "a": 8, + "d": 0 + }, + ".github/workflows/macos_conda.yml": { + "a": 8, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 23, + "d": 0 + }, + "tools/get_testing_version.sh": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8696.json b/doc/sphinxext/prs/8696.json new file mode 100644 index 00000000000..f2ad1dfcbf6 --- /dev/null +++ b/doc/sphinxext/prs/8696.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "1386e1444f2097d247592ec7e6a6c7190aafceb6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/tests/test_datasets.py": { + "a": 54, + "d": 12 + }, + "mne/datasets/utils.py": { + "a": 13, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8697.json b/doc/sphinxext/prs/8697.json new file mode 100644 index 00000000000..eb3aa8bd040 --- /dev/null +++ b/doc/sphinxext/prs/8697.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "9f231cfa8416b5bfe06dc996fa6d50c87b488d37", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 0, + "d": 6 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_multidict_reweighted_tfmxne.py": { + "a": 26, + "d": 39 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 36, + "d": 25 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/_stft.py": { + "a": 19, + "d": 20 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 13, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8698.json b/doc/sphinxext/prs/8698.json new file mode 100644 index 00000000000..b452795c236 --- /dev/null +++ b/doc/sphinxext/prs/8698.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "77e7735e2a5b02c597ae5df353179e83f888859b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/bug_report.md": { + "a": 3, + "d": 5 + }, + ".github/ISSUE_TEMPLATE/config.yml": { + "a": 2, + "d": 5 + }, + ".github/ISSUE_TEMPLATE/question.md": { + "a": 3, + "d": 7 + }, + "CONTRIBUTING.rst": { + "a": 3, + "d": 6 + }, + "README.rst": { + "a": 3, + "d": 3 + }, + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 3, + "d": 5 + }, + "doc/install/mne_c.rst": { + "a": 2, + "d": 2 + }, + "doc/install/mne_python.rst": { + "a": 2, + "d": 2 + }, + "doc/install/pre_install.rst": { + "a": 2, + "d": 2 + }, + "doc/links.inc": { + "a": 1, + "d": 2 + }, + "doc/overview/datasets_index.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/faq.rst": { + "a": 9, + "d": 18 + }, + "doc/overview/get_help.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8702.json b/doc/sphinxext/prs/8702.json new file mode 100644 index 00000000000..8bec860e938 --- /dev/null +++ b/doc/sphinxext/prs/8702.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "68e02f51918b038979e4bbd1ed295e412fbb7a8c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 137, + "d": 61 + }, + "mne/utils/config.py": { + "a": 6, + "d": 1 + }, + "requirements_doc.txt": { + "a": 2, + "d": 2 + }, + "requirements_testing.txt": { + "a": 3, + "d": 3 + }, + "tools/circleci_download.sh": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8707.json b/doc/sphinxext/prs/8707.json new file mode 100644 index 00000000000..fb1335755a5 --- /dev/null +++ b/doc/sphinxext/prs/8707.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "01b8b364698f86630d22fad573058233ab6a58a5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/rank.py": { + "a": 0, + "d": 19 + }, + "mne/utils/docs.py": { + "a": 41, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8708.json b/doc/sphinxext/prs/8708.json new file mode 100644 index 00000000000..46a1cf1d156 --- /dev/null +++ b/doc/sphinxext/prs/8708.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "72c8f61e615b04b2f00e9e36202572cd23ae0c47", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 3, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 83, + "d": 59 + }, + "mne/viz/_brain/tests/test.ipynb": { + "a": 39, + "d": 5 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 52, + "d": 1 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 0, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 42, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 44, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8709.json b/doc/sphinxext/prs/8709.json new file mode 100644 index 00000000000..708b9be5e24 --- /dev/null +++ b/doc/sphinxext/prs/8709.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d9a47232e7eaf6382a7c89c8cc6688f6a6695731", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 17, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8711.json b/doc/sphinxext/prs/8711.json new file mode 100644 index 00000000000..110b7aa0572 --- /dev/null +++ b/doc/sphinxext/prs/8711.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "22770ec3d2fbd83e40fa98377ff1a86eac52e6e2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8712.json b/doc/sphinxext/prs/8712.json new file mode 100644 index 00000000000..0b6c4d45a57 --- /dev/null +++ b/doc/sphinxext/prs/8712.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8756d81963f0fe742667bb40d2c66ca37722a69f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nicolet/nicolet.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8713.json b/doc/sphinxext/prs/8713.json new file mode 100644 index 00000000000..c8983ef4365 --- /dev/null +++ b/doc/sphinxext/prs/8713.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "7d5cd307bc1c7de4e7587624fda263c4b4230ce5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 21, + "d": 67 + }, + "mne/tests/test_report.py": { + "a": 38, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_topomap.py": { + "a": 1, + "d": 0 + }, + "tools/setup_xvfb.sh": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8720.json b/doc/sphinxext/prs/8720.json new file mode 100644 index 00000000000..1ff9c76e74a --- /dev/null +++ b/doc/sphinxext/prs/8720.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "48d848e8ca0972da9edfaa7701003b3735bdf40e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/bug_report.md": { + "a": 3, + "d": 5 + }, + ".github/ISSUE_TEMPLATE/config.yml": { + "a": 2, + "d": 5 + }, + ".github/ISSUE_TEMPLATE/question.md": { + "a": 3, + "d": 7 + }, + "CONTRIBUTING.rst": { + "a": 3, + "d": 6 + }, + "README.rst": { + "a": 3, + "d": 3 + }, + "doc/install/advanced.rst": { + "a": 1, + "d": 1 + }, + "doc/install/contributing.rst": { + "a": 3, + "d": 5 + }, + "doc/install/mne_c.rst": { + "a": 2, + "d": 2 + }, + "doc/install/mne_python.rst": { + "a": 2, + "d": 2 + }, + "doc/install/pre_install.rst": { + "a": 2, + "d": 2 + }, + "doc/links.inc": { + "a": 1, + "d": 2 + }, + "doc/overview/datasets_index.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/faq.rst": { + "a": 9, + "d": 18 + }, + "doc/overview/get_help.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8721.json b/doc/sphinxext/prs/8721.json new file mode 100644 index 00000000000..34b432294a0 --- /dev/null +++ b/doc/sphinxext/prs/8721.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f2caf79ae5db82378ef0ed717992312b8bdefff4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/rank.py": { + "a": 0, + "d": 19 + }, + "mne/utils/docs.py": { + "a": 41, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8723.json b/doc/sphinxext/prs/8723.json new file mode 100644 index 00000000000..6fc06d3a2a2 --- /dev/null +++ b/doc/sphinxext/prs/8723.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "700d7c0a06655933adb9d7806153a7d00bbdde25", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "examples/visualization/plot_make_report.py": { + "a": 0, + "d": 53 + }, + "mne/report.py": { + "a": 64, + "d": 29 + }, + "tutorials/misc/plot_report.py": { + "a": 20, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8725.json b/doc/sphinxext/prs/8725.json new file mode 100644 index 00000000000..71631358e10 --- /dev/null +++ b/doc/sphinxext/prs/8725.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "8808612e787773118fcc3a0e5b8b321da0e35c8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/report.py": { + "a": 79, + "d": 61 + }, + "mne/viz/_brain/mplcanvas.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 6 + }, + "mne/viz/misc.py": { + "a": 13, + "d": 8 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 17, + "d": 0 + }, + "tutorials/misc/plot_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8727.json b/doc/sphinxext/prs/8727.json new file mode 100644 index 00000000000..85efac4144f --- /dev/null +++ b/doc/sphinxext/prs/8727.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "038832403ae7d2cf3ea4d05a944b2bb354bb386f", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/forward/_compute_forward.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8728.json b/doc/sphinxext/prs/8728.json new file mode 100644 index 00000000000..f1f45236812 --- /dev/null +++ b/doc/sphinxext/prs/8728.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "99f957fa3a3289da8bedc113ec187f7397811277", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/io/reference.py": { + "a": 3, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 36, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8730.json b/doc/sphinxext/prs/8730.json new file mode 100644 index 00000000000..226be4db901 --- /dev/null +++ b/doc/sphinxext/prs/8730.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1a990c8c0404824b6aea0daa338e5d6113d4975e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 18, + "d": 7 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 13, + "d": 1 + }, + "tutorials/misc/plot_report.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8731.json b/doc/sphinxext/prs/8731.json new file mode 100644 index 00000000000..c52212b306f --- /dev/null +++ b/doc/sphinxext/prs/8731.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f394bbd643c49544bffd157ae42e341568d54c6e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8734.json b/doc/sphinxext/prs/8734.json new file mode 100644 index 00000000000..b02006084c0 --- /dev/null +++ b/doc/sphinxext/prs/8734.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "81051e6eb9ec1a58c1b1f1c956dddfedfcf6b47f", + "authors": [ + { + "n": "Tristan Stenner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/nedf/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/nedf/nedf.py": { + "a": 217, + "d": 0 + }, + "mne/io/nedf/tests/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 131, + "d": 0 + }, + "mne/io/utils.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8736.json b/doc/sphinxext/prs/8736.json new file mode 100644 index 00000000000..c83b7a1b4bf --- /dev/null +++ b/doc/sphinxext/prs/8736.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "826ddd2f669fb35e574698042378c83e9b1217ca", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 9, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8738.json b/doc/sphinxext/prs/8738.json new file mode 100644 index 00000000000..825f93a5759 --- /dev/null +++ b/doc/sphinxext/prs/8738.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "5f17b357bbd549626d92c73f82daede8751bb711", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 6, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 33, + "d": 1 + }, + "mne/datasets/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 13, + "d": 24 + }, + "mne/datasets/_infant/ANTS1-0Months3T.txt": { + "a": 117, + "d": 0 + }, + "mne/datasets/_infant/ANTS10-5Months3T.txt": { + "a": 115, + "d": 0 + }, + "mne/datasets/_infant/ANTS12-0Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS15-0Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS18-0Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS2-0Weeks3T.txt": { + "a": 117, + "d": 0 + }, + "mne/datasets/_infant/ANTS2-0Years3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS3-0Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS4-5Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS6-0Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS7-5Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/ANTS9-0Months3T.txt": { + "a": 121, + "d": 0 + }, + "mne/datasets/_infant/base.py": { + "a": 94, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 18, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 4, + "d": 2 + }, + "setup.py": { + "a": 2, + "d": 0 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 65, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8739.json b/doc/sphinxext/prs/8739.json new file mode 100644 index 00000000000..8b5f28d1871 --- /dev/null +++ b/doc/sphinxext/prs/8739.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "e4fcd77a60bc50085640aec34e9fba1f1c74a8b3", + "authors": [ + { + "n": "Richard M. Köhler", + "e": "koehler.richard@charite.de" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "codemeta.json": { + "a": 2, + "d": 1 + }, + "doc/_includes/channel_types.rst": { + "a": 2, + "d": 0 + }, + "doc/_includes/units.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 14, + "d": 12 + }, + "mne/channels/montage.py": { + "a": 4, + "d": 4 + }, + "mne/channels/tests/test_channels.py": { + "a": 7, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 2 + }, + "mne/cov.py": { + "a": 10, + "d": 6 + }, + "mne/defaults.py": { + "a": 17, + "d": 15 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 6, + "d": 4 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 4, + "d": 2 + }, + "mne/io/constants.py": { + "a": 2, + "d": 0 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 4, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 3 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 23, + "d": 14 + }, + "mne/io/reference.py": { + "a": 5, + "d": 5 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 25, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 22, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 5 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 24, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 5, + "d": 5 + }, + "mne/tests/test_epochs.py": { + "a": 20, + "d": 9 + }, + "mne/tests/test_filter.py": { + "a": 3, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 10, + "d": 7 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 8, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 5 + }, + "tools/generate_codemeta.py": { + "a": 2, + "d": 1 + }, + "tutorials/raw/plot_10_raw_overview.py": { + "a": 3, + "d": 3 + }, + "tutorials/simulation/plot_creating_data_structures.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/874.json b/doc/sphinxext/prs/874.json new file mode 100644 index 00000000000..c4b2980c418 --- /dev/null +++ b/doc/sphinxext/prs/874.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "301e1767ccc7d1f72e2412079e451a42db08807a", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8741.json b/doc/sphinxext/prs/8741.json new file mode 100644 index 00000000000..fabacfe92c8 --- /dev/null +++ b/doc/sphinxext/prs/8741.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "203d6fe6e13e6f7beac5258368784f24fc084b73", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 18, + "d": 4 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 6, + "d": 1 + }, + "mne/viz/backends/_notebook.py": { + "a": 24, + "d": 9 + }, + "mne/viz/backends/_qt.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8742.json b/doc/sphinxext/prs/8742.json new file mode 100644 index 00000000000..4ebe99c03ab --- /dev/null +++ b/doc/sphinxext/prs/8742.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0c4db6f8ef12999d119085aa7c496cadb32411bd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8744.json b/doc/sphinxext/prs/8744.json new file mode 100644 index 00000000000..c02812493e3 --- /dev/null +++ b/doc/sphinxext/prs/8744.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d528da8e94f5b4754e529a194c7393958b7a493b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 58, + "d": 27 + }, + "mne/tests/test_report.py": { + "a": 47, + "d": 8 + }, + "server_environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8752.json b/doc/sphinxext/prs/8752.json new file mode 100644 index 00000000000..1cbf898c743 --- /dev/null +++ b/doc/sphinxext/prs/8752.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3a83e2cc6b5323e7b8bf97d1f9b0daa3698c1f1d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8754.json b/doc/sphinxext/prs/8754.json new file mode 100644 index 00000000000..8651d014390 --- /dev/null +++ b/doc/sphinxext/prs/8754.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "63927883e90cd65bff4ee947edd6322b175d2ed6", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 3 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 3, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8757.json b/doc/sphinxext/prs/8757.json new file mode 100644 index 00000000000..9ea9d84afc9 --- /dev/null +++ b/doc/sphinxext/prs/8757.json @@ -0,0 +1,651 @@ +{ + "merge_commit_sha": "b1d4124b29bde8c5c0ea97fc0ac34d38eb05fb58", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Joris Van den Bossche", + "e": "jorisvandenbossche@gmail.com" + } + ], + "changes": { + ".mailmap": { + "a": 1, + "d": 0 + }, + "doc/_includes/bem_model.rst": { + "a": 1, + "d": 6 + }, + "doc/_includes/channel_interpolation.rst": { + "a": 0, + "d": 6 + }, + "doc/_includes/forward.rst": { + "a": 0, + "d": 9 + }, + "doc/_includes/inverse.rst": { + "a": 0, + "d": 9 + }, + "doc/_includes/morph.rst": { + "a": 0, + "d": 5 + }, + "doc/_includes/ssp.rst": { + "a": 1, + "d": 1 + }, + "doc/_static/flag-icon.css": { + "a": 0, + "d": 23 + }, + "doc/_static/font-awesome.css": { + "a": 0, + "d": 2337 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/fontawesome/fontawesome-webfont.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Light.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-LightIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Light.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-LightIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Light.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-LightIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Light.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-LightIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fr.svg": { + "a": 0, + "d": 7 + }, + "doc/_static/mne_logo_small.svg": { + "a": 78, + "d": 66 + }, + "doc/_static/scrollfix.js": { + "a": 31, + "d": 0 + }, + "doc/_static/style.css": { + "a": 144, + "d": 550 + }, + "doc/_static/us.svg": { + "a": 0, + "d": 18 + }, + "doc/_templates/docs-navbar.html": { + "a": 68, + "d": 0 + }, + "doc/_templates/docs-toc.html": { + "a": 17, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 60, + "d": 74 + }, + "doc/_templates/navbar.html": { + "a": 0, + "d": 35 + }, + "doc/_templates/sidebar-funders.html": { + "a": 17, + "d": 0 + }, + "doc/_templates/sidebar-quicklinks.html": { + "a": 12, + "d": 0 + }, + "doc/bibliography.rst": { + "a": 4, + "d": 0 + }, + "doc/carousel.inc": { + "a": 0, + "d": 81 + }, + "doc/cited.rst": { + "a": 5, + "d": 7 + }, + "doc/conf.py": { + "a": 537, + "d": 486 + }, + "doc/connectivity.rst": { + "a": 20, + "d": 0 + }, + "doc/covariance.rst": { + "a": 19, + "d": 0 + }, + "doc/creating_from_arrays.rst": { + "a": 13, + "d": 0 + }, + "doc/datasets.rst": { + "a": 40, + "d": 0 + }, + "doc/decoding.rst": { + "a": 40, + "d": 0 + }, + "doc/events.rst": { + "a": 55, + "d": 0 + }, + "doc/file_io.rst": { + "a": 63, + "d": 0 + }, + "doc/forward.rst": { + "a": 60, + "d": 0 + }, + "doc/glossary.rst": { + "a": 85, + "d": 60 + }, + "doc/index.rst": { + "a": 21, + "d": 81 + }, + "doc/install/advanced.rst": { + "a": 29, + "d": 33 + }, + "doc/install/contributing.rst": { + "a": 7, + "d": 32 + }, + "doc/install/freesurfer.rst": { + "a": 0, + "d": 2 + }, + "doc/install/index.rst": { + "a": 33, + "d": 17 + }, + "doc/install/mne_c.rst": { + "a": 2, + "d": 4 + }, + "doc/install/mne_python.rst": { + "a": 15, + "d": 21 + }, + "doc/install/pre_install.rst": { + "a": 17, + "d": 19 + }, + "doc/inverse.rst": { + "a": 98, + "d": 0 + }, + "doc/logging.rst": { + "a": 47, + "d": 0 + }, + "doc/most_used_classes.rst": { + "a": 12, + "d": 0 + }, + "doc/mri.rst": { + "a": 24, + "d": 0 + }, + "doc/overview/cite.rst": { + "a": 35, + "d": 17 + }, + "doc/overview/cookbook.rst": { + "a": 11, + "d": 11 + }, + "doc/overview/datasets_index.rst": { + "a": 13, + "d": 18 + }, + "doc/overview/design_philosophy.rst": { + "a": 3, + "d": 3 + }, + "doc/overview/development.rst": { + "a": 28, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 2, + "d": 5 + }, + "doc/overview/get_help.rst": { + "a": 12, + "d": 7 + }, + "doc/overview/implementation.rst": { + "a": 0, + "d": 5 + }, + "doc/overview/index.rst": { + "a": 19, + "d": 21 + }, + "doc/overview/learn_python.rst": { + "a": 0, + "d": 2 + }, + "doc/overview/matlab.rst": { + "a": 0, + "d": 4 + }, + "doc/overview/roadmap.rst": { + "a": 0, + "d": 7 + }, + "doc/preprocessing.rst": { + "a": 181, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 33, + "d": 1099 + }, + "doc/reading_raw_data.rst": { + "a": 58, + "d": 0 + }, + "doc/realtime.rst": { + "a": 5, + "d": 0 + }, + "doc/references.bib": { + "a": 23, + "d": 1 + }, + "doc/references.rst": { + "a": 0, + "d": 70 + }, + "doc/report.rst": { + "a": 13, + "d": 0 + }, + "doc/sensor_space.rst": { + "a": 37, + "d": 0 + }, + "doc/simulation.rst": { + "a": 25, + "d": 0 + }, + "doc/source_space.rst": { + "a": 44, + "d": 0 + }, + "doc/sphinxext/gen_commands.py": { + "a": 0, + "d": 4 + }, + "doc/sphinxext/sphinx_bootstrap_divs/__init__.py": { + "a": 7, + "d": 7 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css": { + "a": 2, + "d": 2 + }, + "doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js": { + "a": 2, + "d": 2 + }, + "doc/statistics.rst": { + "a": 66, + "d": 0 + }, + "doc/time_frequency.rst": { + "a": 71, + "d": 0 + }, + "doc/upload_html.sh": { + "a": 0, + "d": 5 + }, + "doc/visualization.rst": { + "a": 78, + "d": 0 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/plot_source_power_spectrum_opm.py": { + "a": 0, + "d": 4 + }, + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/plot_eeglab_head_sphere.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/plot_meg_sensors.py": { + "a": 0, + "d": 4 + }, + "examples/visualization/plot_publication_figure.py": { + "a": 0, + "d": 4 + }, + "examples/visualization/plot_roi_erpimage_by_rt.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 17, + "d": 10 + }, + "requirements_doc.txt": { + "a": 2, + "d": 3 + }, + "tools/generate_codemeta.py": { + "a": 1, + "d": 0 + }, + "tutorials/discussions/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/discussions/plot_background_filtering.py": { + "a": 0, + "d": 3 + }, + "tutorials/discussions/plot_background_ica.py": { + "a": 0, + "d": 4 + }, + "tutorials/discussions/plot_background_statistics.py": { + "a": 0, + "d": 5 + }, + "tutorials/epochs/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 0, + "d": 4 + }, + "tutorials/epochs/plot_20_visualize_epochs.py": { + "a": 0, + "d": 4 + }, + "tutorials/epochs/plot_30_epochs_metadata.py": { + "a": 0, + "d": 4 + }, + "tutorials/epochs/plot_40_epochs_to_data_frame.py": { + "a": 0, + "d": 4 + }, + "tutorials/evoked/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 1, + "d": 5 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 1, + "d": 5 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 0, + "d": 4 + }, + "tutorials/intro/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/intro/plot_10_overview.py": { + "a": 0, + "d": 4 + }, + "tutorials/intro/plot_20_events_from_raw.py": { + "a": 0, + "d": 4 + }, + "tutorials/intro/plot_30_info.py": { + "a": 0, + "d": 4 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 1, + "d": 5 + }, + "tutorials/intro/plot_50_configure_mne.py": { + "a": 0, + "d": 4 + }, + "tutorials/io/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/io/plot_10_reading_meg_data.py": { + "a": 0, + "d": 5 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 0, + "d": 5 + }, + "tutorials/io/plot_30_reading_fnirs_data.py": { + "a": 0, + "d": 5 + }, + "tutorials/machine-learning/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/plot_sensors_decoding.py": { + "a": 0, + "d": 4 + }, + "tutorials/misc/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/misc/plot_report.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/plot_10_preprocessing_overview.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/plot_15_handling_bad_channels.py": { + "a": 1, + "d": 5 + }, + "tutorials/preprocessing/plot_20_rejecting_bad_data.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/plot_30_filtering_resampling.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/plot_35_artifact_correction_regression.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/plot_45_projectors_background.py": { + "a": 1, + "d": 5 + }, + "tutorials/preprocessing/plot_50_artifact_correction_ssp.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/plot_55_setting_eeg_reference.py": { + "a": 0, + "d": 4 + }, + "tutorials/preprocessing/plot_60_maxwell_filtering_sss.py": { + "a": 1, + "d": 5 + }, + "tutorials/preprocessing/plot_70_fnirs_processing.py": { + "a": 0, + "d": 4 + }, + "tutorials/raw/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/raw/plot_10_raw_overview.py": { + "a": 0, + "d": 4 + }, + "tutorials/raw/plot_20_event_arrays.py": { + "a": 0, + "d": 4 + }, + "tutorials/raw/plot_30_annotate_raw.py": { + "a": 0, + "d": 4 + }, + "tutorials/raw/plot_40_visualize_raw.py": { + "a": 0, + "d": 4 + }, + "tutorials/sample-datasets/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 0, + "d": 4 + }, + "tutorials/simulation/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/plot_creating_data_structures.py": { + "a": 0, + "d": 4 + }, + "tutorials/source-modeling/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_background_freesurfer.py": { + "a": 0, + "d": 3 + }, + "tutorials/source-modeling/plot_background_freesurfer_mne.py": { + "a": 0, + "d": 3 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 0, + "d": 5 + }, + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_eeg_mri_coords.py": { + "a": 0, + "d": 5 + }, + "tutorials/source-modeling/plot_eeg_no_mri.py": { + "a": 6, + "d": 9 + }, + "tutorials/source-modeling/plot_object_source_estimate.py": { + "a": 0, + "d": 3 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 0, + "d": 4 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 1, + "d": 4 + }, + "tutorials/stats-sensor-space/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/stats-source-space/README.txt": { + "a": 1, + "d": 1 + }, + "tutorials/time-freq/README.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8758.json b/doc/sphinxext/prs/8758.json new file mode 100644 index 00000000000..960c8bfb4ac --- /dev/null +++ b/doc/sphinxext/prs/8758.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "baaf9e33f2b54f6a40454a8aa146577bde1d6f58", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 7, + "d": 7 + }, + "CONTRIBUTING.rst": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 9, + "d": 9 + }, + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/_includes/forward.rst": { + "a": 1, + "d": 1 + }, + "doc/_templates/layout.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/glossary.rst": { + "a": 1, + "d": 1 + }, + "doc/install/advanced.rst": { + "a": 6, + "d": 6 + }, + "doc/install/contributing.rst": { + "a": 17, + "d": 17 + }, + "doc/install/mne_python.rst": { + "a": 4, + "d": 4 + }, + "doc/links.inc": { + "a": 1, + "d": 1 + }, + "doc/overview/faq.rst": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_fetching.py": { + "a": 1, + "d": 1 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8759.json b/doc/sphinxext/prs/8759.json new file mode 100644 index 00000000000..9adc2102528 --- /dev/null +++ b/doc/sphinxext/prs/8759.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "5feaee815b65ad4bf779987ee2ff9c5b154720c9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 22, + "d": 12 + }, + "mne/io/egi/egi.py": { + "a": 10, + "d": 4 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/876.json b/doc/sphinxext/prs/876.json new file mode 100644 index 00000000000..e4e0db69437 --- /dev/null +++ b/doc/sphinxext/prs/876.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6626d2e6c29607636115640a81530d5870ff6531", + "authors": [ + { + "n": "Andrew R. Dykstra", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_viz.py": { + "a": 1, + "d": 1 + }, + "mne/viz.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8760.json b/doc/sphinxext/prs/8760.json new file mode 100644 index 00000000000..765cb901c6f --- /dev/null +++ b/doc/sphinxext/prs/8760.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "51fd3cd93f2017c7c30ac6f47c43ae38c4d7603b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 87, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 40, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8762.json b/doc/sphinxext/prs/8762.json new file mode 100644 index 00000000000..4f8fbb6a9ce --- /dev/null +++ b/doc/sphinxext/prs/8762.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "52011825822c9c79ded515c952077176f309e56e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 16, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8763.json b/doc/sphinxext/prs/8763.json new file mode 100644 index 00000000000..f886dadc662 --- /dev/null +++ b/doc/sphinxext/prs/8763.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a46546ba86bfea5903d306b5598ca20b9aa6296d", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/glossary.rst": { + "a": 16, + "d": 3 + }, + "examples/visualization/plot_eeglab_head_sphere.py": { + "a": 2, + "d": 3 + }, + "tutorials/intro/plot_40_sensor_locations.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8768.json b/doc/sphinxext/prs/8768.json new file mode 100644 index 00000000000..71fa9926e45 --- /dev/null +++ b/doc/sphinxext/prs/8768.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "94a75a5c6c4124c950d96c9eec7e1d69c068b7ef", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/event.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8769.json b/doc/sphinxext/prs/8769.json new file mode 100644 index 00000000000..81746c6a5af --- /dev/null +++ b/doc/sphinxext/prs/8769.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a2031897735de9b7ffc0a9658b7c4f89b36d88c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 31, + "d": 54 + }, + "mne/viz/backends/_pyvista.py": { + "a": 180, + "d": 192 + }, + "mne/viz/backends/_utils.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8770.json b/doc/sphinxext/prs/8770.json new file mode 100644 index 00000000000..cdf3bbcee11 --- /dev/null +++ b/doc/sphinxext/prs/8770.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "250eabfb3e5ca4806d302d6de12935daea418cc6", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8771.json b/doc/sphinxext/prs/8771.json new file mode 100644 index 00000000000..2473edd88ee --- /dev/null +++ b/doc/sphinxext/prs/8771.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "fa779fc945dba0b77c466ad91aefe213a5be9681", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 10, + "d": 8 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 12, + "d": 5 + }, + "mne/viz/_brain/_brain.py": { + "a": 28, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 1 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 2, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8774.json b/doc/sphinxext/prs/8774.json new file mode 100644 index 00000000000..b694d470420 --- /dev/null +++ b/doc/sphinxext/prs/8774.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ae8a466051a43ac929eaf23e02b40c27a6d62e1a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 11, + "d": 3 + }, + "mne/viz/tests/test_evoked.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8775.json b/doc/sphinxext/prs/8775.json new file mode 100644 index 00000000000..030a9daada9 --- /dev/null +++ b/doc/sphinxext/prs/8775.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "21b3c10445675b9578f2efd11b76719a74402587", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "doc/references.bib": { + "a": 45, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 30, + "d": 8 + }, + "mne/viz/tests/test_evoked.py": { + "a": 41, + "d": 17 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 6, + "d": 1 + }, + "tutorials/evoked/plot_eeg_erp.py": { + "a": 47, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8777.json b/doc/sphinxext/prs/8777.json new file mode 100644 index 00000000000..b30336c122e --- /dev/null +++ b/doc/sphinxext/prs/8777.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "33f76fae73f37dac8b402fcf6dd4899e8eafe1c8", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/bem.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8778.json b/doc/sphinxext/prs/8778.json new file mode 100644 index 00000000000..f62a919161d --- /dev/null +++ b/doc/sphinxext/prs/8778.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6ae3b22033c745cce5cd5de9b92da54c13c36484", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 0, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 11 + }, + "mne/viz/tests/test_evoked.py": { + "a": 0, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8780.json b/doc/sphinxext/prs/8780.json new file mode 100644 index 00000000000..2592d5d3e90 --- /dev/null +++ b/doc/sphinxext/prs/8780.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f70f508a1bd5f73bdb4797e657340c11cc138d64", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 24, + "d": 8 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 29, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8783.json b/doc/sphinxext/prs/8783.json new file mode 100644 index 00000000000..a9e2ddba43f --- /dev/null +++ b/doc/sphinxext/prs/8783.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9db75c108ae2d475050c395e57dd0062f78ee44d", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 21, + "d": 9 + }, + "mne/tests/test_annotations.py": { + "a": 22, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8787.json b/doc/sphinxext/prs/8787.json new file mode 100644 index 00000000000..18355f9a6b6 --- /dev/null +++ b/doc/sphinxext/prs/8787.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d8d5474fa5ce7b81b3abef0359235e231f39a53b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8789.json b/doc/sphinxext/prs/8789.json new file mode 100644 index 00000000000..e489a2e172f --- /dev/null +++ b/doc/sphinxext/prs/8789.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a36c74f8b88b93e41806c14465ff1308bf585c4a", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 47, + "d": 10 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8790.json b/doc/sphinxext/prs/8790.json new file mode 100644 index 00000000000..92ee01fb53c --- /dev/null +++ b/doc/sphinxext/prs/8790.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b45fc5cdb83114ed855b5aed7fd2a0c3a01d4cb5", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/gui/_file_traits.py": { + "a": 74, + "d": 64 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 20, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8792.json b/doc/sphinxext/prs/8792.json new file mode 100644 index 00000000000..60bb0ad796e --- /dev/null +++ b/doc/sphinxext/prs/8792.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "74a7f32d561e84f918a4f42bd6471d204cafe2f9", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 59, + "d": 4 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8793.json b/doc/sphinxext/prs/8793.json new file mode 100644 index 00000000000..852443269eb --- /dev/null +++ b/doc/sphinxext/prs/8793.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9fc8623eebd62d31039e90927744e376f5ee611c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.9.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 12, + "d": 2 + }, + "mne/gui/_coreg_gui.py": { + "a": 26, + "d": 10 + }, + "mne/gui/_fiducials_gui.py": { + "a": 21, + "d": 53 + }, + "mne/gui/_viewer.py": { + "a": 4, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8794.json b/doc/sphinxext/prs/8794.json new file mode 100644 index 00000000000..98e5d1967b0 --- /dev/null +++ b/doc/sphinxext/prs/8794.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "982b0e093865ec4cc8116c47a1fc7349fbdf682e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/label.py": { + "a": 10, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 45, + "d": 11 + }, + "mne/viz/_brain/surface.py": { + "a": 15, + "d": 15 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 21, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8799.json b/doc/sphinxext/prs/8799.json new file mode 100644 index 00000000000..6d3195028d2 --- /dev/null +++ b/doc/sphinxext/prs/8799.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "bd72072b1ff4690cdb2c4a7b36a15c93e25e8fee", + "authors": [ + { + "n": "ZHANG Zhi", + "e": "zhi271.zhang@connect.polyu.hk" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 25, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 29, + "d": 0 + }, + "mne/io/constants.py": { + "a": 20, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 3, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/88.json b/doc/sphinxext/prs/88.json new file mode 100644 index 00000000000..08da0198629 --- /dev/null +++ b/doc/sphinxext/prs/88.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "04188fbd948cf7dac0bae3035d83e9b236c5a04f", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 76, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/dipole.py": { + "a": 40, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/880.json b/doc/sphinxext/prs/880.json new file mode 100644 index 00000000000..1b2d12bfb45 --- /dev/null +++ b/doc/sphinxext/prs/880.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d34a4b09382ff764a9ab4c7216b71740a1253206", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "setup.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8800.json b/doc/sphinxext/prs/8800.json new file mode 100644 index 00000000000..72ea4607b77 --- /dev/null +++ b/doc/sphinxext/prs/8800.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cff4c3709aaa9f2a50e10fca29c199cd2f5bb526", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8801.json b/doc/sphinxext/prs/8801.json new file mode 100644 index 00000000000..47a1f333ce8 --- /dev/null +++ b/doc/sphinxext/prs/8801.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "59429c3b8022f61cf3539dc421cadff4ab675d24", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/baseline.py": { + "a": 5, + "d": 3 + }, + "mne/conftest.py": { + "a": 22, + "d": 13 + }, + "mne/epochs.py": { + "a": 32, + "d": 21 + }, + "mne/forward/forward.py": { + "a": 0, + "d": 1 + }, + "mne/io/base.py": { + "a": 20, + "d": 17 + }, + "mne/io/pick.py": { + "a": 7, + "d": 12 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 20 + }, + "mne/preprocessing/realign.py": { + "a": 0, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 18 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 10, + "d": 3 + }, + "mne/utils/numerics.py": { + "a": 19, + "d": 1 + }, + "mne/utils/tests/test_numerics.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8804.json b/doc/sphinxext/prs/8804.json new file mode 100644 index 00000000000..1074b24376a --- /dev/null +++ b/doc/sphinxext/prs/8804.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "f31643185741aa6e46ef4869673e5178740c4980", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 17, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 13, + "d": 4 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 3, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8805.json b/doc/sphinxext/prs/8805.json new file mode 100644 index 00000000000..4de5cd7e920 --- /dev/null +++ b/doc/sphinxext/prs/8805.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "7e867ce57cc035c32f1362734031d250b2ac0734", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 23, + "d": 12 + }, + "mne/rank.py": { + "a": 53, + "d": 15 + }, + "mne/tests/test_cov.py": { + "a": 28, + "d": 8 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 6, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 13, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 7, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8806.json b/doc/sphinxext/prs/8806.json new file mode 100644 index 00000000000..cb488304c18 --- /dev/null +++ b/doc/sphinxext/prs/8806.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "9b57cbd8ec711e3af33846e62ad7d1fbbdcaf5cd", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Andres Rodriguez", + "e": "rodriguezandr@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Andres Rodriguez", + "e": "rodriguezandr@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/persyst/persyst.py": { + "a": 63, + "d": 15 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 64, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8807.json b/doc/sphinxext/prs/8807.json new file mode 100644 index 00000000000..835348689d1 --- /dev/null +++ b/doc/sphinxext/prs/8807.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c5c6d2306194393cbbbc4d7235d9dcc91d3851bc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 7, + "d": 2 + }, + "mne/io/_read_raw.py": { + "a": 20, + "d": 17 + }, + "mne/io/tests/test_read_raw.py": { + "a": 12, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 5, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8808.json b/doc/sphinxext/prs/8808.json new file mode 100644 index 00000000000..e347f98070f --- /dev/null +++ b/doc/sphinxext/prs/8808.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d8201c9bbb2ade3e20dae0284f0333a80a62d3a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 5, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 23, + "d": 18 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 4 + }, + "mne/viz/tests/test_3d.py": { + "a": 41, + "d": 20 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8809.json b/doc/sphinxext/prs/8809.json new file mode 100644 index 00000000000..d03b7b432b3 --- /dev/null +++ b/doc/sphinxext/prs/8809.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "32402f9000aba9ca7ad62fc0439ba8a8c9f8020f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/tests/test_datasets.py": { + "a": 13, + "d": 9 + }, + "mne/datasets/utils.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/881.json b/doc/sphinxext/prs/881.json new file mode 100644 index 00000000000..caf0a9bf89d --- /dev/null +++ b/doc/sphinxext/prs/881.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7883ecbb4c81e076be6bd514cb81b4847947e8f8", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 18, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8811.json b/doc/sphinxext/prs/8811.json new file mode 100644 index 00000000000..de219cf14ba --- /dev/null +++ b/doc/sphinxext/prs/8811.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5a6688b94f97d365a0abf256d86109d6a092cc6a", + "authors": [ + { + "n": "Christopher J. Bailey", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8813.json b/doc/sphinxext/prs/8813.json new file mode 100644 index 00000000000..ba95ed05b83 --- /dev/null +++ b/doc/sphinxext/prs/8813.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "d4a1ce5d8a7f5db605543f09d29ccdd1ec92676f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "monkeyman192", + "e": "monkey_man_192@yahoo.com.au" + }, + { + "n": "monkeyman192", + "e": "monkey_man_192@yahoo.com.au" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 5, + "d": 1 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 89, + "d": 5 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/coreg.py": { + "a": 18, + "d": 9 + }, + "mne/io/kit/kit.py": { + "a": 13, + "d": 7 + }, + "mne/tests/test_chpi.py": { + "a": 40, + "d": 15 + }, + "mne/viz/_3d.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8817.json b/doc/sphinxext/prs/8817.json new file mode 100644 index 00000000000..3a8e190676b --- /dev/null +++ b/doc/sphinxext/prs/8817.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9069a696fa9b55e3376cb8f4f013b8e8a40c1929", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 13, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8818.json b/doc/sphinxext/prs/8818.json new file mode 100644 index 00000000000..98710dbd8fa --- /dev/null +++ b/doc/sphinxext/prs/8818.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dbe91585fb505de2acf9ade215aa05bb1400c5bb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 14, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8819.json b/doc/sphinxext/prs/8819.json new file mode 100644 index 00000000000..f96beb8d981 --- /dev/null +++ b/doc/sphinxext/prs/8819.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "02572faaeba4c8e33f0e7cde0f4a336224271d8b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/tests/test_edf.py": { + "a": 7, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 10, + "d": 2 + }, + "mne/utils/tests/test_testing.py": { + "a": 10, + "d": 13 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 5, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/882.json b/doc/sphinxext/prs/882.json new file mode 100644 index 00000000000..a363ec5784d --- /dev/null +++ b/doc/sphinxext/prs/882.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "d9c062402b7a5efb89d606e132b1f70b91325eb8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/bti/raw.py": { + "a": 18, + "d": 16 + }, + "mne/fiff/bti/tests/test_bti.py": { + "a": 4, + "d": 3 + }, + "mne/fiff/kit/kit.py": { + "a": 8, + "d": 6 + }, + "mne/fiff/kit/tests/data/trans-sample.fif": { + "a": 0, + "d": 0 + }, + "mne/fiff/kit/tests/test_kit.py": { + "a": 3, + "d": 0 + }, + "mne/fiff/pick.py": { + "a": 12, + "d": 0 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/tag.py": { + "a": 11, + "d": 6 + }, + "mne/fiff/write.py": { + "a": 11, + "d": 7 + }, + "mne/forward/_make_forward.py": { + "a": 30, + "d": 12 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 76, + "d": 25 + }, + "mne/source_space.py": { + "a": 12, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8820.json b/doc/sphinxext/prs/8820.json new file mode 100644 index 00000000000..59f3e63a517 --- /dev/null +++ b/doc/sphinxext/prs/8820.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9458a30bacc5fa96ca56c3a31b4cceb5925990a7", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 10, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8821.json b/doc/sphinxext/prs/8821.json new file mode 100644 index 00000000000..ab3bdd97355 --- /dev/null +++ b/doc/sphinxext/prs/8821.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "6bcf041825abde39aefce603022f900c0be80024", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 40, + "d": 11 + }, + "mne/tests/test_epochs.py": { + "a": 37, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8822.json b/doc/sphinxext/prs/8822.json new file mode 100644 index 00000000000..d4c2741cd28 --- /dev/null +++ b/doc/sphinxext/prs/8822.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "1bc407a2814edd0537b08c6ffdbc8fbd60749bce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 68, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 31, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8823.json b/doc/sphinxext/prs/8823.json new file mode 100644 index 00000000000..892cea5cf59 --- /dev/null +++ b/doc/sphinxext/prs/8823.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "971c290a0dd2fc564607c6eeff042bde371e3608", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 17, + "d": 1 + }, + "mne/annotations.py": { + "a": 7, + "d": 1 + }, + "mne/bem.py": { + "a": 5, + "d": 10 + }, + "mne/coreg.py": { + "a": 2, + "d": 5 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 19, + "d": 17 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 6, + "d": 2 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 7, + "d": 5 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 7, + "d": 6 + }, + "mne/decoding/base.py": { + "a": 0, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 3, + "d": 2 + }, + "mne/dipole.py": { + "a": 0, + "d": 1 + }, + "mne/epochs.py": { + "a": 7, + "d": 4 + }, + "mne/event.py": { + "a": 11, + "d": 5 + }, + "mne/fixes.py": { + "a": 21, + "d": 20 + }, + "mne/io/base.py": { + "a": 0, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 2 + }, + "mne/label.py": { + "a": 16, + "d": 5 + }, + "mne/preprocessing/_regress.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 3, + "d": 2 + }, + "mne/report.py": { + "a": 7, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 2, + "d": 2 + }, + "mne/utils/mixin.py": { + "a": 0, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8827.json b/doc/sphinxext/prs/8827.json new file mode 100644 index 00000000000..75ae44a2b49 --- /dev/null +++ b/doc/sphinxext/prs/8827.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8178cca9fa63011b4113b9f84a7bbe8a5a06285d", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "mne/chpi.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8828.json b/doc/sphinxext/prs/8828.json new file mode 100644 index 00000000000..6a713bc8326 --- /dev/null +++ b/doc/sphinxext/prs/8828.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2f7df8843401433b28af0278666fe629fd9f0c28", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 8 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 6, + "d": 4 + }, + "mne/viz/backends/_utils.py": { + "a": 11, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8829.json b/doc/sphinxext/prs/8829.json new file mode 100644 index 00000000000..55057f65517 --- /dev/null +++ b/doc/sphinxext/prs/8829.json @@ -0,0 +1,323 @@ +{ + "merge_commit_sha": "0735446dd5bfe265346d08aa64d6b7cee680cff8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 7, + "d": 7 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_rap_music.py": { + "a": 4, + "d": 2 + }, + "mne/bem.py": { + "a": 2, + "d": 2 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 3, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 2, + "d": 1 + }, + "mne/cov.py": { + "a": 2, + "d": 1 + }, + "mne/cuda.py": { + "a": 3, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 3, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ssd.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 2, + "d": 1 + }, + "mne/dipole.py": { + "a": 14, + "d": 4 + }, + "mne/externals/h5io/_h5io.py": { + "a": 11, + "d": 5 + }, + "mne/filter.py": { + "a": 3, + "d": 1 + }, + "mne/fixes.py": { + "a": 28, + "d": 8 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 2, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 2, + "d": 3 + }, + "mne/inverse_sparse/_gamma_map.py": { + "a": 4, + "d": 5 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 4, + "d": 5 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 19, + "d": 13 + }, + "mne/io/compensator.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 2, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 2 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/proc_history.py": { + "a": 2, + "d": 2 + }, + "mne/io/proj.py": { + "a": 2, + "d": 2 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 2, + "d": 2 + }, + "mne/label.py": { + "a": 3, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 2, + "d": 3 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 9, + "d": 4 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 3, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/xdawn.py": { + "a": 2, + "d": 1 + }, + "mne/proj.py": { + "a": 3, + "d": 3 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/metrics.py": { + "a": 1, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 8, + "d": 3 + }, + "mne/source_space.py": { + "a": 6, + "d": 3 + }, + "mne/stats/_adjacency.py": { + "a": 1, + "d": 1 + }, + "mne/stats/cluster_level.py": { + "a": 4, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 3, + "d": 2 + }, + "mne/surface.py": { + "a": 4, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_import_nesting.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/_stft.py": { + "a": 4, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 14, + "d": 10 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 4, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 5 + }, + "mne/transforms.py": { + "a": 8, + "d": 6 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/utils/_logging.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 51, + "d": 6 + }, + "mne/utils/fetching.py": { + "a": 4, + "d": 2 + }, + "mne/utils/linalg.py": { + "a": 40, + "d": 35 + }, + "mne/utils/numerics.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/883.json b/doc/sphinxext/prs/883.json new file mode 100644 index 00000000000..42fcf7e927b --- /dev/null +++ b/doc/sphinxext/prs/883.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b658f0636df1f118c1831eab4aac72bfd08608e4", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/kit/kit.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8831.json b/doc/sphinxext/prs/8831.json new file mode 100644 index 00000000000..548c2c4b2f0 --- /dev/null +++ b/doc/sphinxext/prs/8831.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "74750e6b53a26e92a0d509c8458790b5d42d5117", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 5, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 16, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8834.json b/doc/sphinxext/prs/8834.json new file mode 100644 index 00000000000..064eb6824ec --- /dev/null +++ b/doc/sphinxext/prs/8834.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "91f60810771db6cc24268be18eb910161779dea2", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/datasets.rst": { + "a": 1, + "d": 0 + }, + "doc/events.rst": { + "a": 1, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 26, + "d": 0 + }, + "doc/references.bib": { + "a": 13, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 3, + "d": 2 + }, + "mne/datasets/erp_core/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/erp_core/erp_core.py": { + "a": 26, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 13, + "d": 5 + }, + "mne/epochs.py": { + "a": 295, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 76, + "d": 2 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 0 + }, + "tutorials/epochs/plot_40_autogenerate_metadata.py": { + "a": 432, + "d": 0 + }, + "tutorials/epochs/plot_50_epochs_to_data_frame.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8835.json b/doc/sphinxext/prs/8835.json new file mode 100644 index 00000000000..f5c293c3b1e --- /dev/null +++ b/doc/sphinxext/prs/8835.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7b6b2c53d6f14fee8fa4515b9ca2a30ec0e7daeb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 45, + "d": 45 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8838.json b/doc/sphinxext/prs/8838.json new file mode 100644 index 00000000000..c52e7c910cc --- /dev/null +++ b/doc/sphinxext/prs/8838.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fa83dadb9cd2d3607b91638aedd5b400059b149f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "codecov.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8839.json b/doc/sphinxext/prs/8839.json new file mode 100644 index 00000000000..9372170d437 --- /dev/null +++ b/doc/sphinxext/prs/8839.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c5f58fb48972127dcfc262a10591de2d7e390e98", + "authors": [ + { + "n": "apadee", + "e": "anna.padee@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 25, + "d": 14 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 46, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/884.json b/doc/sphinxext/prs/884.json new file mode 100644 index 00000000000..dc82f3831c7 --- /dev/null +++ b/doc/sphinxext/prs/884.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "73a35fb025e7656901ecad508da5167744d5ecd3", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/forward/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8841.json b/doc/sphinxext/prs/8841.json new file mode 100644 index 00000000000..72aa205dab6 --- /dev/null +++ b/doc/sphinxext/prs/8841.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "19fb479de44865d5ce79ad5af539a06081fae0c5", + "authors": [ + { + "n": "Yu-Han Luo", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 31, + "d": 2 + }, + "mne/tests/test_bem.py": { + "a": 29, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + }, + "tutorials/source-modeling/plot_fix_bem_in_blender.py": { + "a": 78, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8842.json b/doc/sphinxext/prs/8842.json new file mode 100644 index 00000000000..e7e7fe81e4a --- /dev/null +++ b/doc/sphinxext/prs/8842.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "a3719e58b8b8891e58571ac01513916964e8002d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/stats/cluster_level.py": { + "a": 15, + "d": 6 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 41, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 9, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8843.json b/doc/sphinxext/prs/8843.json new file mode 100644 index 00000000000..1eaa52b8666 --- /dev/null +++ b/doc/sphinxext/prs/8843.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bf57aa0fa63e1605355b4f89b76cadb6e88b94ff", + "authors": [ + { + "n": "Manorama Kadwani", + "e": "54645550+mkadwani@users.noreply.github.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/source-modeling/plot_fix_bem_in_blender.py": { + "a": 66, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8844.json b/doc/sphinxext/prs/8844.json new file mode 100644 index 00000000000..fb26e94d19f --- /dev/null +++ b/doc/sphinxext/prs/8844.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3034b4d2384f652cad4a44205a3db904620c20ec", + "authors": [ + { + "n": "enricovara", + "e": "enricovarano@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8845.json b/doc/sphinxext/prs/8845.json new file mode 100644 index 00000000000..740bb2ef14b --- /dev/null +++ b/doc/sphinxext/prs/8845.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c304072d1c67912da9b4ac62fe64e07ed41f2dd6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8846.json b/doc/sphinxext/prs/8846.json new file mode 100644 index 00000000000..c6fa659c78d --- /dev/null +++ b/doc/sphinxext/prs/8846.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4f5ccc3e3af7f3bd1dad8589f8639e33051f7b6f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 18, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8847.json b/doc/sphinxext/prs/8847.json new file mode 100644 index 00000000000..45bb4fb1db9 --- /dev/null +++ b/doc/sphinxext/prs/8847.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d8ce89703471bec29a43d3413ec69587625720b5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/linux_conda.yml": { + "a": 3, + "d": 0 + }, + ".github/workflows/linux_pip.yml": { + "a": 3, + "d": 0 + }, + ".github/workflows/macos_conda.yml": { + "a": 3, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 0, + "d": 6 + }, + "tools/get_minimal_commands.sh": { + "a": 29, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8849.json b/doc/sphinxext/prs/8849.json new file mode 100644 index 00000000000..c6abc1d942e --- /dev/null +++ b/doc/sphinxext/prs/8849.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6a7d660b72b3535ada54517c3a60f8d11bd6578a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 3 + }, + "tools/get_minimal_commands.sh": { + "a": 58, + "d": 44 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/885.json b/doc/sphinxext/prs/885.json new file mode 100644 index 00000000000..b171a56eda5 --- /dev/null +++ b/doc/sphinxext/prs/885.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2d903aa84fbcd719a17032dc4a7e8dcfdd54fb9d", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/tests/test_transforms.py": { + "a": 17, + "d": 1 + }, + "mne/transforms.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8851.json b/doc/sphinxext/prs/8851.json new file mode 100644 index 00000000000..648e30183de --- /dev/null +++ b/doc/sphinxext/prs/8851.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9c7bb88b324e8c94df0538fd7e165f8b469ace5f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8852.json b/doc/sphinxext/prs/8852.json new file mode 100644 index 00000000000..455f7134971 --- /dev/null +++ b/doc/sphinxext/prs/8852.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5faf63705cd795a846f305d9545fc2fd72b1fdee", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + }, + "tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8856.json b/doc/sphinxext/prs/8856.json new file mode 100644 index 00000000000..39a696b874e --- /dev/null +++ b/doc/sphinxext/prs/8856.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2c6b0dd6407c7c6b30f4a2437d85b88edbe08c62", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8857.json b/doc/sphinxext/prs/8857.json new file mode 100644 index 00000000000..15d26406f33 --- /dev/null +++ b/doc/sphinxext/prs/8857.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "851885cb713b7e5b44775d40efdc4ad935ff1aa9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/bem.py": { + "a": 99, + "d": 47 + }, + "mne/dipole.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 7, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 5, + "d": 20 + }, + "mne/viz/_3d.py": { + "a": 33, + "d": 93 + }, + "mne/viz/tests/test_3d.py": { + "a": 8, + "d": 3 + }, + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8858.json b/doc/sphinxext/prs/8858.json new file mode 100644 index 00000000000..021bbcdb345 --- /dev/null +++ b/doc/sphinxext/prs/8858.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "97dc6e18e06bc39072db37b63feff51b76a413e8", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 7, + "d": 1 + }, + "mne/io/pick.py": { + "a": 4, + "d": 4 + }, + "tutorials/sample-datasets/plot_sleep.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8860.json b/doc/sphinxext/prs/8860.json new file mode 100644 index 00000000000..4cee7e803f6 --- /dev/null +++ b/doc/sphinxext/prs/8860.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7efad3b52c7b79460b5aa05475e1287c2c6acc4e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/_peak_finder.py": { + "a": 4, + "d": 3 + }, + "mne/preprocessing/eog.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8861.json b/doc/sphinxext/prs/8861.json new file mode 100644 index 00000000000..96148d9f93b --- /dev/null +++ b/doc/sphinxext/prs/8861.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "204720ea7b92831cdf2ca15602d96cf3839e85b4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/plot_sensor_connectivity.py": { + "a": 2, + "d": 3 + }, + "mne/connectivity/spectral.py": { + "a": 19, + "d": 10 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 17, + "d": 2 + }, + "mne/utils/_logging.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8862.json b/doc/sphinxext/prs/8862.json new file mode 100644 index 00000000000..cf3ce071775 --- /dev/null +++ b/doc/sphinxext/prs/8862.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "940a1fe97be5744bae9eba21b8aafbef02cd2a13", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 372, + "d": 370 + }, + "mne/viz/_brain/_linkviewer.py": { + "a": 25, + "d": 25 + }, + "mne/viz/_brain/callback.py": { + "a": 94, + "d": 112 + }, + "mne/viz/_brain/tests/test.ipynb": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 23, + "d": 20 + }, + "mne/viz/backends/_abstract.py": { + "a": 92, + "d": 6 + }, + "mne/viz/backends/_notebook.py": { + "a": 201, + "d": 39 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 45 + }, + "mne/viz/backends/_qt.py": { + "a": 251, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8863.json b/doc/sphinxext/prs/8863.json new file mode 100644 index 00000000000..923a48a6118 --- /dev/null +++ b/doc/sphinxext/prs/8863.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "13d2dd486d12f711594bca828358cf89e56ffd40", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/utils/_logging.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8867.json b/doc/sphinxext/prs/8867.json new file mode 100644 index 00000000000..3e59239ef42 --- /dev/null +++ b/doc/sphinxext/prs/8867.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "4514ea7887ddef6357d0197b5271cb5020c669ce", + "authors": [ + { + "n": "Dominik Welke", + "e": "dominik.welke@web.de" + }, + { + "n": "kalenkovich", + "e": "e.kalenkovich@gmail.com" + }, + { + "n": "Zhenya", + "e": "e.kalenkovich@gmail.com" + }, + { + "n": "Zhenya", + "e": "e.kalenkovich@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "kalenkovich", + "e": "e.kalenkovich@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/datasets.rst": { + "a": 1, + "d": 0 + }, + "doc/overview/datasets_index.rst": { + "a": 18, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/ssvep/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 30, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 13, + "d": 5 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 0 + }, + "tutorials/time-freq/plot_ssvep.py": { + "a": 675, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8869.json b/doc/sphinxext/prs/8869.json new file mode 100644 index 00000000000..b124633fce7 --- /dev/null +++ b/doc/sphinxext/prs/8869.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b487dcf662a9249e67ab01823a75f227a637fd10", + "authors": [ + { + "n": "Christian O'Reilly", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/datasets/_infant/ANTS2-0Months3T.txt": { + "a": 117, + "d": 0 + }, + "mne/datasets/_infant/base.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8870.json b/doc/sphinxext/prs/8870.json new file mode 100644 index 00000000000..653bb45f33e --- /dev/null +++ b/doc/sphinxext/prs/8870.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "53f1b859d2bea2a432e153a02c386b428c71d566", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.12.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 3, + "d": 2 + }, + "doc/sensor_space.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 6, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/__init__.py": { + "a": 4, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 166, + "d": 2 + }, + "mne/selection.py": { + "a": 0, + "d": 189 + }, + "mne/tests/test_read_vectorview_selection.py": { + "a": 13, + "d": 12 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 3, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 3, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 4 + }, + "tutorials/simulation/plot_dics.py": { + "a": 2, + "d": 1 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8874.json b/doc/sphinxext/prs/8874.json new file mode 100644 index 00000000000..104822c622e --- /dev/null +++ b/doc/sphinxext/prs/8874.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dfc05947ebdd047aa078776328ae456e9d0fba77", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 3 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8875.json b/doc/sphinxext/prs/8875.json new file mode 100644 index 00000000000..3a48fa452d4 --- /dev/null +++ b/doc/sphinxext/prs/8875.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3810dde8d5c0e580a08ea3d81e2e23f042592887", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8879.json b/doc/sphinxext/prs/8879.json new file mode 100644 index 00000000000..e0692c8e961 --- /dev/null +++ b/doc/sphinxext/prs/8879.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "2b09a1752dd227640cc4234115676d05bba6350a", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/fixes.py": { + "a": 15, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 42, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 34, + "d": 3 + }, + "mne/utils/check.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/888.json b/doc/sphinxext/prs/888.json new file mode 100644 index 00000000000..7192b88cb03 --- /dev/null +++ b/doc/sphinxext/prs/888.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a8cf342b0eb6da7abd448f84cd79b544baa14d80", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/source_space.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8884.json b/doc/sphinxext/prs/8884.json new file mode 100644 index 00000000000..323cfe4de74 --- /dev/null +++ b/doc/sphinxext/prs/8884.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "24377ad3200b6099ed47576e9cf8b27578d571ef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 3 + }, + "mne/io/base.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 2, + "d": 1 + }, + "mne/utils/tests/test_logging.py": { + "a": 59, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8886.json b/doc/sphinxext/prs/8886.json new file mode 100644 index 00000000000..2b5de656290 --- /dev/null +++ b/doc/sphinxext/prs/8886.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "3f1c589c93b891d3afe8cf5871cbe8e67c9887fe", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/commands/mne_report.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 10, + "d": 5 + }, + "mne/io/fiff/raw.py": { + "a": 9, + "d": 7 + }, + "mne/report.py": { + "a": 1, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 11, + "d": 5 + }, + "tutorials/misc/plot_report.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8889.json b/doc/sphinxext/prs/8889.json new file mode 100644 index 00000000000..d8880c41676 --- /dev/null +++ b/doc/sphinxext/prs/8889.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "08baf0d82d11a0829dd3cff3e02953f8b7bd9937", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 0, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/889.json b/doc/sphinxext/prs/889.json new file mode 100644 index 00000000000..9c2ea44f4ec --- /dev/null +++ b/doc/sphinxext/prs/889.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7f44bbcc4789b7c7aae71fccf96a57a404fbfe14", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/edf/tests/__init__.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8891.json b/doc/sphinxext/prs/8891.json new file mode 100644 index 00000000000..aad56df3b78 --- /dev/null +++ b/doc/sphinxext/prs/8891.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "dc86ec0519632ac86eb8f1f640972d0f6df45562", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 6, + "d": 4 + }, + "mne/channels/data/neighbors/neuromag122_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 13, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8896.json b/doc/sphinxext/prs/8896.json new file mode 100644 index 00000000000..fea9294bb0f --- /dev/null +++ b/doc/sphinxext/prs/8896.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "0de802aab6d858132e92c4dd74534ca0299f960f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 1 + }, + "mne/annotations.py": { + "a": 185, + "d": 57 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 2 + }, + "mne/bem.py": { + "a": 4, + "d": 8 + }, + "mne/channels/channels.py": { + "a": 14, + "d": 12 + }, + "mne/dipole.py": { + "a": 4, + "d": 3 + }, + "mne/epochs.py": { + "a": 1, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 4, + "d": 8 + }, + "mne/io/base.py": { + "a": 8, + "d": 5 + }, + "mne/morph.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 2 + }, + "mne/report.py": { + "a": 5, + "d": 3 + }, + "mne/source_space.py": { + "a": 6, + "d": 9 + }, + "mne/surface.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_annotations.py": { + "a": 143, + "d": 113 + }, + "mne/time_frequency/tfr.py": { + "a": 8, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 24, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8898.json b/doc/sphinxext/prs/8898.json new file mode 100644 index 00000000000..3e22fa5d111 --- /dev/null +++ b/doc/sphinxext/prs/8898.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9b01f00073c7f232dbbee9c3ea187cda59776480", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8899.json b/doc/sphinxext/prs/8899.json new file mode 100644 index 00000000000..d9907719463 --- /dev/null +++ b/doc/sphinxext/prs/8899.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5909e1bb0044e558536e62d441f0d694be65377a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/89.json b/doc/sphinxext/prs/89.json new file mode 100644 index 00000000000..9ce54b366a4 --- /dev/null +++ b/doc/sphinxext/prs/89.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "3ed097155ce04da18fab2873ea2574e0c7b8576f", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/plot_compute_source_psd_epochs.py": { + "a": 88, + "d": 0 + }, + "mne/minimum_norm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 64, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 202, + "d": 0 + }, + "mne/time_frequency/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/time_frequency/multitaper.py": { + "a": 480, + "d": 0 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 43, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/891.json b/doc/sphinxext/prs/891.json new file mode 100644 index 00000000000..cbda5d5274f --- /dev/null +++ b/doc/sphinxext/prs/891.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b6c6fd2ec6e2f381e473b335c1fde54700a83134", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/coreg.py": { + "a": 196, + "d": 98 + }, + "mne/tests/test_coreg.py": { + "a": 10, + "d": 1 + }, + "mne/utils.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/892.json b/doc/sphinxext/prs/892.json new file mode 100644 index 00000000000..ab8127fb121 --- /dev/null +++ b/doc/sphinxext/prs/892.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6237972a4a8be14979cdde7d4e957b467492dc6b", + "authors": [ + { + "n": "Mainak Jas", + "e": "mainakjas@gmail.com" + } + ], + "changes": { + "mne/__init__.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/893.json b/doc/sphinxext/prs/893.json new file mode 100644 index 00000000000..8d535366e7e --- /dev/null +++ b/doc/sphinxext/prs/893.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d08e8b8028ba88dfa412b27f4fd1750ce18f3a08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/source_space.py": { + "a": 18, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 7, + "d": 0 + }, + "mne/utils.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/895.json b/doc/sphinxext/prs/895.json new file mode 100644 index 00000000000..aaabd844f3f --- /dev/null +++ b/doc/sphinxext/prs/895.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "67d65c4ae68c2267a2a5042c87ca93f3570e0c66", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/mne-python.rst": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8953.json b/doc/sphinxext/prs/8953.json new file mode 100644 index 00000000000..1184ed5f486 --- /dev/null +++ b/doc/sphinxext/prs/8953.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0b784b857de259d8ddce59b207533052483014bb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 2, + "d": 2 + }, + "requirements.txt": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8954.json b/doc/sphinxext/prs/8954.json new file mode 100644 index 00000000000..63782ba7c22 --- /dev/null +++ b/doc/sphinxext/prs/8954.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "52ebe7258a2430b4f826f4f471dfeb148ad8038a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 3, + "d": 1 + }, + "mne/viz/tests/test_raw.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8956.json b/doc/sphinxext/prs/8956.json new file mode 100644 index 00000000000..0f2fb614a71 --- /dev/null +++ b/doc/sphinxext/prs/8956.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4928c5cc7d7f37668eb9b69c07d898fd39f27303", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/chpi.py": { + "a": 12, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8959.json b/doc/sphinxext/prs/8959.json new file mode 100644 index 00000000000..7f6e8edd501 --- /dev/null +++ b/doc/sphinxext/prs/8959.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "5ea4bda51244d36b5af8bf198af4672e66fab6ee", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/896.json b/doc/sphinxext/prs/896.json new file mode 100644 index 00000000000..ab04fd9fa30 --- /dev/null +++ b/doc/sphinxext/prs/896.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "934fc1827441ac80bb83a7b59b2b0418592b36bc", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/edf/edf.py": { + "a": 19, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8960.json b/doc/sphinxext/prs/8960.json new file mode 100644 index 00000000000..80ce8d8a516 --- /dev/null +++ b/doc/sphinxext/prs/8960.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8e3bcefe2da6b47d1725eee5a487468277e8f5dd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/edf/tests/test_gdf.py": { + "a": 9, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8961.json b/doc/sphinxext/prs/8961.json new file mode 100644 index 00000000000..ea20a1cf9d3 --- /dev/null +++ b/doc/sphinxext/prs/8961.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3a0812f1ef4823cc2b9e012ee948c82ebc0a2e7f", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 0 + }, + "doc/_static/scrollfix.js": { + "a": 0, + "d": 31 + }, + "doc/_static/style.css": { + "a": 0, + "d": 21 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8963.json b/doc/sphinxext/prs/8963.json new file mode 100644 index 00000000000..a1237e21794 --- /dev/null +++ b/doc/sphinxext/prs/8963.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "df227e7a9f67f61cf1322686308a78627d2289f4", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8965.json b/doc/sphinxext/prs/8965.json new file mode 100644 index 00000000000..fd0db4837ae --- /dev/null +++ b/doc/sphinxext/prs/8965.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "fe6775b7069812e8589bbf6819d4d55e72d19fb6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 11, + "d": 9 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8967.json b/doc/sphinxext/prs/8967.json new file mode 100644 index 00000000000..ba8ee575bcb --- /dev/null +++ b/doc/sphinxext/prs/8967.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6f64726adffead86cc9397d390f3c0396a3a3615", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/preprocessing/plot_run_ica.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 3 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/897.json b/doc/sphinxext/prs/897.json new file mode 100644 index 00000000000..3bf3ca32de4 --- /dev/null +++ b/doc/sphinxext/prs/897.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "fdd7d3b901825c5c8981124e86790cd51d28bde4", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 6, + "d": 3 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 1 + }, + "mne/cov.py": { + "a": 14, + "d": 8 + }, + "mne/decoding/classifier.py": { + "a": 8, + "d": 4 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/fiff/kit/kit.py": { + "a": 4, + "d": 2 + }, + "mne/fiff/kit/tests/test_kit.py": { + "a": 4, + "d": 2 + }, + "mne/fiff/pick.py": { + "a": 9, + "d": 3 + }, + "mne/fiff/proj.py": { + "a": 4, + "d": 2 + }, + "mne/fiff/raw.py": { + "a": 11, + "d": 9 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 6, + "d": 4 + }, + "mne/layouts/layout.py": { + "a": 7, + "d": 3 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 15, + "d": 14 + }, + "mne/preprocessing/ssp.py": { + "a": 10, + "d": 9 + }, + "mne/proj.py": { + "a": 3, + "d": 3 + }, + "mne/realtime/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 1 + }, + "mne/viz.py": { + "a": 20, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8970.json b/doc/sphinxext/prs/8970.json new file mode 100644 index 00000000000..6ac6535a4d8 --- /dev/null +++ b/doc/sphinxext/prs/8970.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f20c60ec23481e8c9888fb9f7da1544378dd2489", + "authors": [ + { + "n": "kimcoco", + "e": "kimjico@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 6, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8971.json b/doc/sphinxext/prs/8971.json new file mode 100644 index 00000000000..70405752102 --- /dev/null +++ b/doc/sphinxext/prs/8971.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "22ea75b6d981b6233462ed511c9865cb74e7fdbc", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8972.json b/doc/sphinxext/prs/8972.json new file mode 100644 index 00000000000..2a33b96ca6b --- /dev/null +++ b/doc/sphinxext/prs/8972.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "04d31007afc081512146f2879937840819f00ced", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 0, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 17, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8973.json b/doc/sphinxext/prs/8973.json new file mode 100644 index 00000000000..8c721328798 --- /dev/null +++ b/doc/sphinxext/prs/8973.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "01bf14e0cb60fd73270f3f91d81a3b4698afdc26", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 8 + }, + ".github/workflows/linux_pip.yml": { + "a": 2, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 4, + "d": 29 + }, + "tools/azure_dependencies.sh": { + "a": 24, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 10, + "d": 0 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8975.json b/doc/sphinxext/prs/8975.json new file mode 100644 index 00000000000..a39e85479ec --- /dev/null +++ b/doc/sphinxext/prs/8975.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "d6476f81fcb5639af507ef15ec911818ca8e1163", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 34, + "d": 22 + }, + "mne/viz/_brain/callback.py": { + "a": 5, + "d": 7 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 14, + "d": 28 + }, + "mne/viz/backends/_qt.py": { + "a": 67, + "d": 45 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8977.json b/doc/sphinxext/prs/8977.json new file mode 100644 index 00000000000..3396759b4e8 --- /dev/null +++ b/doc/sphinxext/prs/8977.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fc6ebb04890cf53b3be85e43f33d4adfaada4fe2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 6, + "d": 1 + }, + "tutorials/epochs/plot_30_epochs_metadata.py": { + "a": 9, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8978.json b/doc/sphinxext/prs/8978.json new file mode 100644 index 00000000000..2d42d243212 --- /dev/null +++ b/doc/sphinxext/prs/8978.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "632f11915d3027dbba9cd156e6817455c2c74831", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 12, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/898.json b/doc/sphinxext/prs/898.json new file mode 100644 index 00000000000..9824c2cd2a7 --- /dev/null +++ b/doc/sphinxext/prs/898.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "95b49c1caead546a5cd45f7e21de3e8e9b6cfd9b", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "setup.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8980.json b/doc/sphinxext/prs/8980.json new file mode 100644 index 00000000000..783ac613fb0 --- /dev/null +++ b/doc/sphinxext/prs/8980.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "951dbb8562f3f071f254adfdb6b3a334480bb8b4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/overview/index.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8983.json b/doc/sphinxext/prs/8983.json new file mode 100644 index 00000000000..2bdff582ddf --- /dev/null +++ b/doc/sphinxext/prs/8983.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f68d047d4305cc0622241e2886c8632059192d64", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 9, + "d": 16 + }, + "tutorials/misc/plot_ecog.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8984.json b/doc/sphinxext/prs/8984.json new file mode 100644 index 00000000000..e2a7a70c719 --- /dev/null +++ b/doc/sphinxext/prs/8984.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e98991e2c328c558854bc37933e6ff34471e8acc", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/0.22.inc": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8985.json b/doc/sphinxext/prs/8985.json new file mode 100644 index 00000000000..9f0aaceee0c --- /dev/null +++ b/doc/sphinxext/prs/8985.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "0135fcabe720801d4bf00442a366a9f59eeeb1c7", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 7, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 89, + "d": 43 + }, + "mne/viz/_brain/callback.py": { + "a": 13, + "d": 42 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 22, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8988.json b/doc/sphinxext/prs/8988.json new file mode 100644 index 00000000000..b26426a4804 --- /dev/null +++ b/doc/sphinxext/prs/8988.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "20ed020d32092fd38269962974be861006340f42", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8989.json b/doc/sphinxext/prs/8989.json new file mode 100644 index 00000000000..ad291d536cc --- /dev/null +++ b/doc/sphinxext/prs/8989.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "31d1d0c042fa7bb25702708eb489a129adbcc1a9", + "authors": [ + { + "n": "Judy D Zhu", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/kit/coreg.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/899.json b/doc/sphinxext/prs/899.json new file mode 100644 index 00000000000..f6d5faede38 --- /dev/null +++ b/doc/sphinxext/prs/899.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d89655b94dd590e544f2195062fdf8292b14f6e3", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/edf/edf.py": { + "a": 98, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8996.json b/doc/sphinxext/prs/8996.json new file mode 100644 index 00000000000..1ffd8b61e0a --- /dev/null +++ b/doc/sphinxext/prs/8996.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "75fa7b125edea476802a571517e7fbef8534acc9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 7, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8998.json b/doc/sphinxext/prs/8998.json new file mode 100644 index 00000000000..d3cf7cf95a3 --- /dev/null +++ b/doc/sphinxext/prs/8998.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4cecd9d09c0f4219bfd525f804c49c81a78ae98c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/8999.json b/doc/sphinxext/prs/8999.json new file mode 100644 index 00000000000..d4e416113b9 --- /dev/null +++ b/doc/sphinxext/prs/8999.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "14a418fff0889be6cfa9b38a158c50bdcf7c0915", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 4, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 46, + "d": 41 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9.json b/doc/sphinxext/prs/9.json new file mode 100644 index 00000000000..c80630e985b --- /dev/null +++ b/doc/sphinxext/prs/9.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "777bdac4668e072a5897b6e495e6c0784e0eb627", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/inverse/plot_compute_mne_inverse_volume.py": { + "a": 56, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/constants.py": { + "a": 6, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 28, + "d": 30 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 17, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 5 + }, + "mne/parallel.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 134, + "d": 27 + }, + "mne/source_space.py": { + "a": 63, + "d": 1 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/90.json b/doc/sphinxext/prs/90.json new file mode 100644 index 00000000000..f30d6211e75 --- /dev/null +++ b/doc/sphinxext/prs/90.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "b72c2db75f0d21d80c03502b3fbc942f50c86b3d", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_dipole_fit_result.py": { + "a": 78, + "d": 0 + }, + "examples/inverse/plot_morph_data.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/plot_read_stc.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_lcmv.py": { + "a": 3, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 40, + "d": 0 + }, + "mne/fiff/constants.py": { + "a": 42, + "d": 0 + }, + "mne/fiff/tag.py": { + "a": 6, + "d": 5 + }, + "mne/label.py": { + "a": 279, + "d": 89 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 13 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 6, + "d": 3 + }, + "mne/mixed_norm/inverse.py": { + "a": 3, + "d": 2 + }, + "mne/mixed_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 18, + "d": 13 + }, + "mne/simulation/tests/test_source.py": { + "a": 9, + "d": 5 + }, + "mne/source_estimate.py": { + "a": 248, + "d": 128 + }, + "mne/source_space.py": { + "a": 6, + "d": 4 + }, + "mne/tests/test_dipole.py": { + "a": 21, + "d": 0 + }, + "mne/tests/test_forward.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 75, + "d": 18 + }, + "mne/tests/test_source_estimate.py": { + "a": 22, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9009.json b/doc/sphinxext/prs/9009.json new file mode 100644 index 00000000000..b8749853916 --- /dev/null +++ b/doc/sphinxext/prs/9009.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2d2e2ce2dc95813c88ee13d4452be4118952c170", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/viz/epochs.py": { + "a": 4, + "d": 6 + }, + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 10, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/901.json b/doc/sphinxext/prs/901.json new file mode 100644 index 00000000000..65fc8a9c2bb --- /dev/null +++ b/doc/sphinxext/prs/901.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "44f3c09324fbfbfaa46e88daf545cfe53ba14f62", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "examples/plot_ssp_projs_sensitivity_map.py": { + "a": 3, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9010.json b/doc/sphinxext/prs/9010.json new file mode 100644 index 00000000000..9996e42090b --- /dev/null +++ b/doc/sphinxext/prs/9010.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4f0ba23e8df4bbd875e3040c72a079da3628c1a0", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/connectivity/sensor_connectivity.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 19, + "d": 2 + }, + "mne/viz/_brain/view.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 16, + "d": 11 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9012.json b/doc/sphinxext/prs/9012.json new file mode 100644 index 00000000000..241e82f4265 --- /dev/null +++ b/doc/sphinxext/prs/9012.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a40ad4bc22a707135c924b001c9d7a3644f00cca", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 10, + "d": 10 + }, + "tools/github_actions_dependencies.sh": { + "a": 5, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9013.json b/doc/sphinxext/prs/9013.json new file mode 100644 index 00000000000..a048962ef7a --- /dev/null +++ b/doc/sphinxext/prs/9013.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9d2c2b6c04331c3bb445204bf14c2ecf86f4bbd7", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 15, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9014.json b/doc/sphinxext/prs/9014.json new file mode 100644 index 00000000000..2bf18d43825 --- /dev/null +++ b/doc/sphinxext/prs/9014.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "97a69e6004ca0647e0f26d9024e8f144a504262b", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 24, + "d": 29 + }, + "mne/viz/backends/_abstract.py": { + "a": 30, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 29, + "d": 5 + }, + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 8 + }, + "mne/viz/backends/_qt.py": { + "a": 41, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9015.json b/doc/sphinxext/prs/9015.json new file mode 100644 index 00000000000..2648fafcd73 --- /dev/null +++ b/doc/sphinxext/prs/9015.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "633bee397ed2f63dded0e8f3169556893408976b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 4, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 10, + "d": 3 + }, + "tutorials/misc/plot_report.py": { + "a": 42, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9016.json b/doc/sphinxext/prs/9016.json new file mode 100644 index 00000000000..38025ba5061 --- /dev/null +++ b/doc/sphinxext/prs/9016.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "9e5916e6ad3951e43106fae55f9312e7de1d59b2", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 124, + "d": 280 + }, + "mne/viz/_brain/callback.py": { + "a": 0, + "d": 32 + }, + "mne/viz/_brain/mplcanvas.py": { + "a": 0, + "d": 128 + }, + "mne/viz/_brain/tests/test.ipynb": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_abstract.py": { + "a": 189, + "d": 9 + }, + "mne/viz/backends/_notebook.py": { + "a": 141, + "d": 52 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 6, + "d": 3 + }, + "mne/viz/backends/_qt.py": { + "a": 194, + "d": 26 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9017.json b/doc/sphinxext/prs/9017.json new file mode 100644 index 00000000000..43b233f7e69 --- /dev/null +++ b/doc/sphinxext/prs/9017.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "64c90ce224b96358eb7d083e5b89c8faf226c909", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + } + ], + "changes": { + "tutorials/epochs/plot_10_epochs_overview.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9019.json b/doc/sphinxext/prs/9019.json new file mode 100644 index 00000000000..471d5f54113 --- /dev/null +++ b/doc/sphinxext/prs/9019.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aabdda6f443ca1699b9bc047bffdf820835730f8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/902.json b/doc/sphinxext/prs/902.json new file mode 100644 index 00000000000..1afcbceccaf --- /dev/null +++ b/doc/sphinxext/prs/902.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9423f28a8998ec659cbbb32ff2770e35ef3e3f81", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/Makefile": { + "a": 5, + "d": 0 + }, + "doc/source/_static/logo.png": { + "a": 0, + "d": 0 + }, + "doc/source/_templates/layout.html": { + "a": 1, + "d": 0 + }, + "doc/source/cite.rst": { + "a": 10, + "d": 0 + }, + "doc/source/index.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9023.json b/doc/sphinxext/prs/9023.json new file mode 100644 index 00000000000..421fec7f24b --- /dev/null +++ b/doc/sphinxext/prs/9023.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e94b439fb5f507488de311dd8556249daf7de364", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 8, + "d": 6 + }, + "tutorials/misc/plot_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9024.json b/doc/sphinxext/prs/9024.json new file mode 100644 index 00000000000..6860f8fa351 --- /dev/null +++ b/doc/sphinxext/prs/9024.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7b4d4e41910473db53f97f0aaa8fe699e87f0331", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9026.json b/doc/sphinxext/prs/9026.json new file mode 100644 index 00000000000..56871918393 --- /dev/null +++ b/doc/sphinxext/prs/9026.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b707d37dc880205154773de703b2e47a438cc051", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "codecov.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9027.json b/doc/sphinxext/prs/9027.json new file mode 100644 index 00000000000..7587fe64f9d --- /dev/null +++ b/doc/sphinxext/prs/9027.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "c2d4e4bd276760b895acbee5597ad2834aa2691f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/evoked.py": { + "a": 25, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 21, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9028.json b/doc/sphinxext/prs/9028.json new file mode 100644 index 00000000000..f82d0eafb58 --- /dev/null +++ b/doc/sphinxext/prs/9028.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e889e26285aacc5a20caaeaacaa90e7d98578d76", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 0, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9029.json b/doc/sphinxext/prs/9029.json new file mode 100644 index 00000000000..23dd59d2ecf --- /dev/null +++ b/doc/sphinxext/prs/9029.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6cfd3040aed69d9970c618b782e6126ed56adb65", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 55, + "d": 42 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 11, + "d": 29 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 18 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9032.json b/doc/sphinxext/prs/9032.json new file mode 100644 index 00000000000..16eba63d95c --- /dev/null +++ b/doc/sphinxext/prs/9032.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "623553f46701ce679f4c62d28d3b4bc3a1df97a4", + "authors": [ + { + "n": "Gensollen", + "e": "nicolas.gensollen@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_parcellation.py": { + "a": 9, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9033.json b/doc/sphinxext/prs/9033.json new file mode 100644 index 00000000000..fa31355817f --- /dev/null +++ b/doc/sphinxext/prs/9033.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "014e9526306a07f8cd47bbcb30ac33cd7779392e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 50, + "d": 10 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 45, + "d": 7 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 6, + "d": 0 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 24, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9034.json b/doc/sphinxext/prs/9034.json new file mode 100644 index 00000000000..7e45848b3ef --- /dev/null +++ b/doc/sphinxext/prs/9034.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "4add7d137958468c5accc2a1877664ebb9603c23", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 0, + "d": 1 + }, + "environment.yml": { + "a": 3, + "d": 0 + }, + "mne/conftest.py": { + "a": 5, + "d": 3 + }, + "mne/viz/_brain/tests/test.ipynb": { + "a": 0, + "d": 123 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 108, + "d": 16 + }, + "mne/viz/conftest.py": { + "a": 129, + "d": 0 + }, + "mne/viz/tests/conftest.py": { + "a": 0, + "d": 45 + }, + "requirements.txt": { + "a": 2, + "d": 0 + }, + "requirements_testing.txt": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9035.json b/doc/sphinxext/prs/9035.json new file mode 100644 index 00000000000..49155fb710d --- /dev/null +++ b/doc/sphinxext/prs/9035.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1ae8e10cf49b5aecb3d0b21f1e2b86052a98471c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9036.json b/doc/sphinxext/prs/9036.json new file mode 100644 index 00000000000..ea6a46b051f --- /dev/null +++ b/doc/sphinxext/prs/9036.json @@ -0,0 +1,287 @@ +{ + "merge_commit_sha": "d309e3dc37657e0dfbc0a663e5292db6f49e4e5b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + "doc/_static/font-source-code-pro.css": { + "a": 0, + "d": 167 + }, + "doc/_static/font-source-sans-pro.css": { + "a": 0, + "d": 131 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Bold.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-BoldIt.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-It.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Regular.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Semibold.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/EOT/SourceCodePro-SemiboldIt.eot": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Bold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-BoldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-It.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Regular.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Semibold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/OTF/SourceCodePro-SemiboldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Bold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-BoldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-It.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Regular.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Semibold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/TTF/SourceCodePro-SemiboldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Bold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-BoldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-It.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Regular.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Semibold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-SemiboldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Bold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-BoldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-It.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Regular.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Semibold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-SemiboldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Bold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-BoldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-It.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Light.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-LightIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Regular.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Semibold.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-SemiboldIt.otf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Bold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-BoldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-It.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Light.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-LightIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Regular.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Semibold.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-SemiboldIt.ttf": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Bold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-BoldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-It.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Light.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-LightIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Regular.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Semibold.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-SemiboldIt.otf.woff": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Bold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-BoldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-It.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Light.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-LightIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Regular.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Semibold.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-SemiboldIt.ttf.woff2": { + "a": 0, + "d": 0 + }, + "doc/_static/style.css": { + "a": 36, + "d": 13 + }, + "doc/_templates/docs-navbar.html": { + "a": 18, + "d": 66 + }, + "doc/conf.py": { + "a": 15, + "d": 4 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9037.json b/doc/sphinxext/prs/9037.json new file mode 100644 index 00000000000..aeafe9eabdf --- /dev/null +++ b/doc/sphinxext/prs/9037.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2114c06d8f6f8901ec03d1d03ae17ad75101bb64", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 16, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9039.json b/doc/sphinxext/prs/9039.json new file mode 100644 index 00000000000..3b564bda211 --- /dev/null +++ b/doc/sphinxext/prs/9039.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3319fafd30d3d68c10ae07307ad330f56580dcea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_beamformer_lcmv.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9041.json b/doc/sphinxext/prs/9041.json new file mode 100644 index 00000000000..46b557eec57 --- /dev/null +++ b/doc/sphinxext/prs/9041.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f751fe390b775926a72f63c8f217021deea14965", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9042.json b/doc/sphinxext/prs/9042.json new file mode 100644 index 00000000000..76b7faeb9b1 --- /dev/null +++ b/doc/sphinxext/prs/9042.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b0778b86404506414501bf83a4629d3a73970ae9", + "authors": [ + { + "n": "Eduard", + "e": "eduardxort@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_evoked_whitening.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9043.json b/doc/sphinxext/prs/9043.json new file mode 100644 index 00000000000..33c7fefcfbb --- /dev/null +++ b/doc/sphinxext/prs/9043.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d489bcf68f02a8535a1ec7bc323fcd7107ab84e9", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 19, + "d": 24 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9044.json b/doc/sphinxext/prs/9044.json new file mode 100644 index 00000000000..54760dff951 --- /dev/null +++ b/doc/sphinxext/prs/9044.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0d8a7b0717dc33e75dc20c9cdd356f51a515309d", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "tutorials/sample-datasets/plot_sleep.py": { + "a": 10, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9046.json b/doc/sphinxext/prs/9046.json new file mode 100644 index 00000000000..cd3034fc66d --- /dev/null +++ b/doc/sphinxext/prs/9046.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2f908df809f3c099d26bce373286e3bf16efa6fa", + "authors": [ + { + "n": "Silvia Cotroneo", + "e": "silvia.cotroneo@aalto.fi" + } + ], + "changes": { + "tutorials/discussions/plot_background_statistics.py": { + "a": 9, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9047.json b/doc/sphinxext/prs/9047.json new file mode 100644 index 00000000000..e46199a45ec --- /dev/null +++ b/doc/sphinxext/prs/9047.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9798a638a50ed5a034d81f6619deee483fe57154", + "authors": [ + { + "n": "kimcoco", + "e": "kimjico@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9048.json b/doc/sphinxext/prs/9048.json new file mode 100644 index 00000000000..46d4be4d1e8 --- /dev/null +++ b/doc/sphinxext/prs/9048.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "51677154fea57d90cbe7a989c5b59803ec901064", + "authors": [ + { + "n": "Ram PARI", + "e": null + } + ], + "changes": { + "mne/simulation/raw.py": { + "a": 4, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9049.json b/doc/sphinxext/prs/9049.json new file mode 100644 index 00000000000..a0de79f9d21 --- /dev/null +++ b/doc/sphinxext/prs/9049.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1171b4a12f91756a1576077c181cf37b267da446", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "examples/preprocessing/plot_otp.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9050.json b/doc/sphinxext/prs/9050.json new file mode 100644 index 00000000000..7819eb38cb8 --- /dev/null +++ b/doc/sphinxext/prs/9050.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f98e9222700e47308c0cd759410d82f6da058dda", + "authors": [ + { + "n": "Matteo Anelli", + "e": "matteanelli.96.11@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_source_power_spectrum_opm.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9051.json b/doc/sphinxext/prs/9051.json new file mode 100644 index 00000000000..e846c407b14 --- /dev/null +++ b/doc/sphinxext/prs/9051.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "18cd16b971902b5bed979fb5a5c88a3e318245b5", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 4, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9052.json b/doc/sphinxext/prs/9052.json new file mode 100644 index 00000000000..9c9f1a22d4a --- /dev/null +++ b/doc/sphinxext/prs/9052.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c25aeee579fabdc2533c5ee591d9654e6d40f063", + "authors": [ + { + "n": "Giorgio_M", + "e": "giorgio.marinato@unitn.it" + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_global_field_power.py": { + "a": 8, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9053.json b/doc/sphinxext/prs/9053.json new file mode 100644 index 00000000000..557df9c6bb1 --- /dev/null +++ b/doc/sphinxext/prs/9053.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b495090403202aebb8bbf6dd02a81a439cbcfcb5", + "authors": [ + { + "n": null, + "e": "timon.merk@charite.de" + } + ], + "changes": { + "examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9054.json b/doc/sphinxext/prs/9054.json new file mode 100644 index 00000000000..6beaf55a177 --- /dev/null +++ b/doc/sphinxext/prs/9054.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "eec545f5c6a94877cd87060a7f2f12adc1c7fcba", + "authors": [ + { + "n": "Felix Klotzsche", + "e": "klotzsche@cbs.mpg.de" + } + ], + "changes": { + "examples/time_frequency/plot_time_frequency_erds.py": { + "a": 5, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9055.json b/doc/sphinxext/prs/9055.json new file mode 100644 index 00000000000..a8479ecad3b --- /dev/null +++ b/doc/sphinxext/prs/9055.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4dacaa33542d028820b96caba99e44d0f04a0d7b", + "authors": [ + { + "n": "DataFox", + "e": "sfox@riseup.net" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/references.bib": { + "a": 14, + "d": 0 + }, + "mne/stats/multi_comp.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9056.json b/doc/sphinxext/prs/9056.json new file mode 100644 index 00000000000..ea54e0d397d --- /dev/null +++ b/doc/sphinxext/prs/9056.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3ca43776bc2a380a10b1c1f7aef33c619c5a2af2", + "authors": [ + { + "n": "Falach", + "e": "rotemfa0@gmail.com" + } + ], + "changes": { + "mne/label.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9058.json b/doc/sphinxext/prs/9058.json new file mode 100644 index 00000000000..34251183a5b --- /dev/null +++ b/doc/sphinxext/prs/9058.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c8f0f51b4a747dc3f08128c7c232ce3ac005cdba", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/stats/plot_sensor_regression.py": { + "a": 6, + "d": 10 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 2, + "d": 4 + }, + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9059.json b/doc/sphinxext/prs/9059.json new file mode 100644 index 00000000000..0e6c90e62ab --- /dev/null +++ b/doc/sphinxext/prs/9059.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "428e73badd512e528aa7eb236785b30a2eaac53e", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + } + ], + "changes": { + "examples/visualization/plot_mne_helmet.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/906.json b/doc/sphinxext/prs/906.json new file mode 100644 index 00000000000..056cb6b56f6 --- /dev/null +++ b/doc/sphinxext/prs/906.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0b01386260f71eff2252d3ca81ba5ca143ba77da", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/getting_started.rst": { + "a": 9, + "d": 4 + }, + "examples/plot_evoked_whitening.py": { + "a": 0, + "d": 1 + }, + "mne/viz.py": { + "a": 22, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9061.json b/doc/sphinxext/prs/9061.json new file mode 100644 index 00000000000..3d55e8d1d62 --- /dev/null +++ b/doc/sphinxext/prs/9061.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "77cf51cf70c8b1a891d9dd0dbd93e70ca4ed2f34", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9062.json b/doc/sphinxext/prs/9062.json new file mode 100644 index 00000000000..0b1bf95b58f --- /dev/null +++ b/doc/sphinxext/prs/9062.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "04bb93a2e8bf47e39049b9b2b874a23d1f079ee3", + "authors": [ + { + "n": "Valerii", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9063.json b/doc/sphinxext/prs/9063.json new file mode 100644 index 00000000000..6ee1f48bbe5 --- /dev/null +++ b/doc/sphinxext/prs/9063.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d167f95aa5534136f8390e3c971e668b5bb02ef2", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "mne/stats/permutations.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9064.json b/doc/sphinxext/prs/9064.json new file mode 100644 index 00000000000..88ac8c55c6a --- /dev/null +++ b/doc/sphinxext/prs/9064.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1fb6e41e7de3c896114453880267e301659c7105", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "tutorials/discussions/plot_background_filtering.py": { + "a": 46, + "d": 68 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9065.json b/doc/sphinxext/prs/9065.json new file mode 100644 index 00000000000..d36dba995ed --- /dev/null +++ b/doc/sphinxext/prs/9065.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8be65850012d54155758545594d671d9cc93e04b", + "authors": [ + { + "n": "Silvia Cotroneo", + "e": "silvia.cotroneo@aalto.fi" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 9, + "d": 9 + }, + "tutorials/discussions/plot_background_statistics.py": { + "a": 9, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9066.json b/doc/sphinxext/prs/9066.json new file mode 100644 index 00000000000..e2a95b7ea4c --- /dev/null +++ b/doc/sphinxext/prs/9066.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8e6858e0cfda78923cd745bb83d2f53bc6f6b02f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "tutorials/source-modeling/plot_compute_covariance.py": { + "a": 5, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9067.json b/doc/sphinxext/prs/9067.json new file mode 100644 index 00000000000..d3f38097f76 --- /dev/null +++ b/doc/sphinxext/prs/9067.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5e63884ceff9bc7658f56c0699d5bf115d20723a", + "authors": [ + { + "n": "Matteo Anelli", + "e": "matteanelli.96.11@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/preprocessing/xdawn.py": { + "a": 7, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9068.json b/doc/sphinxext/prs/9068.json new file mode 100644 index 00000000000..a4aaa026e2f --- /dev/null +++ b/doc/sphinxext/prs/9068.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c22263c70c85d755ccb424b98d226a43c73534c3", + "authors": [ + { + "n": "Felix Klotzsche", + "e": "klotzsche@cbs.mpg.de" + } + ], + "changes": { + "tutorials/stats-sensor-space/plot_stats_cluster_erp.py": { + "a": 4, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/907.json b/doc/sphinxext/prs/907.json new file mode 100644 index 00000000000..3933ee10ec5 --- /dev/null +++ b/doc/sphinxext/prs/907.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "437e3d08a1f09e53e0605e41ee329e74dfa13f8c", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 3 + }, + "mne/epochs.py": { + "a": 7, + "d": 17 + }, + "mne/fiff/compensator.py": { + "a": 11, + "d": 2 + }, + "mne/fiff/ctf.py": { + "a": 12, + "d": 8 + }, + "mne/fiff/evoked.py": { + "a": 4, + "d": 3 + }, + "mne/fiff/raw.py": { + "a": 14, + "d": 9 + }, + "mne/fiff/tests/test_compensator.py": { + "a": 51, + "d": 2 + }, + "mne/fiff/tests/test_raw.py": { + "a": 19, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 18, + "d": 15 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 15, + "d": 1 + }, + "mne/realtime/epochs.py": { + "a": 7, + "d": 10 + }, + "mne/tests/test_viz.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9070.json b/doc/sphinxext/prs/9070.json new file mode 100644 index 00000000000..7242e48854b --- /dev/null +++ b/doc/sphinxext/prs/9070.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f0aa3841b0023d83c96bd2b27ec6f0515cb82719", + "authors": [ + { + "n": "vpeterson", + "e": "victoriapeterson09@gmail.com" + } + ], + "changes": { + "examples/visualization/plot_sensor_noise_level.py": { + "a": 6, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9071.json b/doc/sphinxext/prs/9071.json new file mode 100644 index 00000000000..3b3b5eebdfb --- /dev/null +++ b/doc/sphinxext/prs/9071.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0974fc51f0f9bc76c8ebeec3695726f2680b11ff", + "authors": [ + { + "n": "Eduard", + "e": "eduardxort@gmail.com" + } + ], + "changes": { + "tutorials/preprocessing/plot_15_handling_bad_channels.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9072.json b/doc/sphinxext/prs/9072.json new file mode 100644 index 00000000000..4ee144991b9 --- /dev/null +++ b/doc/sphinxext/prs/9072.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6d1ee18a4cd7e753a0c42306a89630d7e8d092cb", + "authors": [ + { + "n": "Catalina M. Galván", + "e": null + } + ], + "changes": { + "mne/connectivity/envelope.py": { + "a": 2, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9074.json b/doc/sphinxext/prs/9074.json new file mode 100644 index 00000000000..420f58472ce --- /dev/null +++ b/doc/sphinxext/prs/9074.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f1ff6ba253c1a09dfbbb7369ad34e1a34a7865d6", + "authors": [ + { + "n": "nordme", + "e": "nordme@uw.edu" + } + ], + "changes": { + "mne/datasets/limo/limo.py": { + "a": 8, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9075.json b/doc/sphinxext/prs/9075.json new file mode 100644 index 00000000000..4f4c44a4b4b --- /dev/null +++ b/doc/sphinxext/prs/9075.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "271eb1076f9ce5c1afc40fc9cd5356bdc7f37df2", + "authors": [ + { + "n": "ChristinaZhao", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/datasets/eegbci/eegbci.py": { + "a": 5, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9077.json b/doc/sphinxext/prs/9077.json new file mode 100644 index 00000000000..d1df2347759 --- /dev/null +++ b/doc/sphinxext/prs/9077.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "505e7a03e389e0552cf521dc044cf7ee93614be6", + "authors": [ + { + "n": "Maggie Clarke", + "e": "mdclarke@uw.edu" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9079.json b/doc/sphinxext/prs/9079.json new file mode 100644 index 00000000000..55ea0497775 --- /dev/null +++ b/doc/sphinxext/prs/9079.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b465989273481d3b6264964a0baf877d1e913873", + "authors": [ + { + "n": "Steven Bierer", + "e": null + }, + { + "n": "Steven Bierer", + "e": "neurolaunch@gmail.copm" + } + ], + "changes": { + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 7, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/908.json b/doc/sphinxext/prs/908.json new file mode 100644 index 00000000000..6f6b7d95b9b --- /dev/null +++ b/doc/sphinxext/prs/908.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6d2ff00952ebc332ef514af397f65c66778c2c55", + "authors": [ + { + "n": "Romek Goj", + "e": "roman.goj@gmail.com" + } + ], + "changes": { + "mne/viz.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9080.json b/doc/sphinxext/prs/9080.json new file mode 100644 index 00000000000..97ade673957 --- /dev/null +++ b/doc/sphinxext/prs/9080.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c29a32d474809785225c24053dcb41bf054a6e32", + "authors": [ + { + "n": "nordme", + "e": "nordme@uw.edu" + } + ], + "changes": { + "mne/datasets/hf_sef/hf_sef.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/limo/limo.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9081.json b/doc/sphinxext/prs/9081.json new file mode 100644 index 00000000000..1c559f27121 --- /dev/null +++ b/doc/sphinxext/prs/9081.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "94870a5532a43a0ca850d58789a77ceeb532a3b5", + "authors": [ + { + "n": "Ram PARI", + "e": null + } + ], + "changes": { + "tutorials/machine-learning/plot_receptive_field.py": { + "a": 9, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9083.json b/doc/sphinxext/prs/9083.json new file mode 100644 index 00000000000..269c1ee9263 --- /dev/null +++ b/doc/sphinxext/prs/9083.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fabd29fc8e0d2508399ed006a74f5fd3463b38b4", + "authors": [ + { + "n": "Maggie Clarke", + "e": "mdclarke@uw.edu" + } + ], + "changes": { + "tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9084.json b/doc/sphinxext/prs/9084.json new file mode 100644 index 00000000000..540c2abffb1 --- /dev/null +++ b/doc/sphinxext/prs/9084.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e1dacb5e63ea97d89ff71016a4edcf35e7edb1dc", + "authors": [ + { + "n": "Maggie Clarke", + "e": "mdclarke@uw.edu" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/otp.py": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9085.json b/doc/sphinxext/prs/9085.json new file mode 100644 index 00000000000..489fb195945 --- /dev/null +++ b/doc/sphinxext/prs/9085.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7168e683e319f99a3c57ec68f7f0e844eafcb463", + "authors": [ + { + "n": "Maggie Clarke", + "e": "mdclarke@uw.edu" + } + ], + "changes": { + "examples/io/plot_read_noise_covariance_matrix.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9087.json b/doc/sphinxext/prs/9087.json new file mode 100644 index 00000000000..cc179cbcb43 --- /dev/null +++ b/doc/sphinxext/prs/9087.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "59008cfa021f617c9c6de50593f55cdbdbf17538", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Adam Li", + "e": "adam2392@gmail.com" + }, + { + "n": "Liberty Hamilton", + "e": "libertyhamilton@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "doc/datasets.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/epilepsy_ecog/__init__.py": { + "a": 3, + "d": 0 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 31, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 14, + "d": 8 + }, + "mne/source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/utils/config.py": { + "a": 2, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 0 + }, + "tutorials/misc/plot_ecog.py": { + "a": 118, + "d": 130 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9088.json b/doc/sphinxext/prs/9088.json new file mode 100644 index 00000000000..11affaf4b7d --- /dev/null +++ b/doc/sphinxext/prs/9088.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "49536e31fdd3a69dcfe21a0b2efd69958dead5cd", + "authors": [ + { + "n": "vpeterson", + "e": "victoriapeterson09@gmail.com" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "nordme", + "e": "nordme@uw.edu" + } + ], + "changes": { + "mne/epochs.py": { + "a": 55, + "d": 1 + }, + "mne/evoked.py": { + "a": 54, + "d": 2 + }, + "mne/io/base.py": { + "a": 7, + "d": 36 + }, + "mne/tests/test_epochs.py": { + "a": 35, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 21, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 55, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9089.json b/doc/sphinxext/prs/9089.json new file mode 100644 index 00000000000..adfaabcdc24 --- /dev/null +++ b/doc/sphinxext/prs/9089.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "727807d2922b473a43cd7665fd56a5d7a645eed9", + "authors": [ + { + "n": "ChristinaZhao", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9090.json b/doc/sphinxext/prs/9090.json new file mode 100644 index 00000000000..874cbfcede2 --- /dev/null +++ b/doc/sphinxext/prs/9090.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cc80458ba6aec4f790a56cc445e3f024740732de", + "authors": [ + { + "n": "Christian Mista", + "e": "cmista@ingenieria.uner.edu.ar" + } + ], + "changes": { + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 15, + "d": 42 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9091.json b/doc/sphinxext/prs/9091.json new file mode 100644 index 00000000000..166b13facef --- /dev/null +++ b/doc/sphinxext/prs/9091.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "53757d01c8f5e86fa6c2c0bd1934cb0a1f58aa3f", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 22, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9092.json b/doc/sphinxext/prs/9092.json new file mode 100644 index 00000000000..bf10f19ec5f --- /dev/null +++ b/doc/sphinxext/prs/9092.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "04c58d73112916444b71710bd2b60f77723028fc", + "authors": [ + { + "n": "Eduard", + "e": "eduardxort@gmail.com" + } + ], + "changes": { + "examples/preprocessing/plot_virtual_evoked.py": { + "a": 18, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9095.json b/doc/sphinxext/prs/9095.json new file mode 100644 index 00000000000..63118640501 --- /dev/null +++ b/doc/sphinxext/prs/9095.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "80b029d305872aef82e03d1f9353a191e17da7c6", + "authors": [ + { + "n": "Giorgio_M", + "e": "giorgio.marinato@unitn.it" + } + ], + "changes": { + "doc/references.bib": { + "a": 13, + "d": 0 + }, + "examples/visualization/plot_channel_epochs_image.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9096.json b/doc/sphinxext/prs/9096.json new file mode 100644 index 00000000000..0c4fd99793d --- /dev/null +++ b/doc/sphinxext/prs/9096.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "a6abf3649bdf754c86bb983f393f690791816048", + "authors": [ + { + "n": "Silvia Cotroneo", + "e": "silvia.cotroneo@aalto.fi" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 8, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9097.json b/doc/sphinxext/prs/9097.json new file mode 100644 index 00000000000..586f6837f6d --- /dev/null +++ b/doc/sphinxext/prs/9097.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "523413f911e23bfef98ca92e3cfa00c80501f94d", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/glossary.rst": { + "a": 8, + "d": 0 + }, + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "mne/epochs.py": { + "a": 9, + "d": 4 + }, + "tutorials/epochs/plot_40_autogenerate_metadata.py": { + "a": 13, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9098.json b/doc/sphinxext/prs/9098.json new file mode 100644 index 00000000000..4e45214c54b --- /dev/null +++ b/doc/sphinxext/prs/9098.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "602a94f62127736834fa65e0c33fe037763efdb2", + "authors": [ + { + "n": "Ram PARI", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9099.json b/doc/sphinxext/prs/9099.json new file mode 100644 index 00000000000..46ab3abf988 --- /dev/null +++ b/doc/sphinxext/prs/9099.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "56e9fbe67a554272a71dfd2b9ae96eb7fd58d104", + "authors": [ + { + "n": "kimcoco", + "e": "kimjico@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 30, + "d": 10 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 51, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 10, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/91.json b/doc/sphinxext/prs/91.json new file mode 100644 index 00000000000..2090d5b980c --- /dev/null +++ b/doc/sphinxext/prs/91.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a77d9f05ee4b0b964e2098c0e247913f2412a900", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/minimum_norm/time_frequency.py": { + "a": 5, + "d": 3 + }, + "mne/time_frequency/multitaper.py": { + "a": 13, + "d": 12 + }, + "mne/time_frequency/tests/test_multitaper.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/910.json b/doc/sphinxext/prs/910.json new file mode 100644 index 00000000000..aaa652e25b8 --- /dev/null +++ b/doc/sphinxext/prs/910.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "88f6478aedd6bc1285be07b1b5aa3dec50dff29f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "Makefile": { + "a": 1, + "d": 0 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 0, + "d": 1 + }, + "mne/fixes.py": { + "a": 42, + "d": 63 + }, + "mne/tests/test_fixes.py": { + "a": 63, + "d": 7 + }, + "mne/tests/test_utils.py": { + "a": 54, + "d": 4 + }, + "mne/tests/test_viz.py": { + "a": 37, + "d": 2 + }, + "mne/utils.py": { + "a": 8, + "d": 14 + }, + "mne/viz.py": { + "a": 18, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9100.json b/doc/sphinxext/prs/9100.json new file mode 100644 index 00000000000..1ab71051df5 --- /dev/null +++ b/doc/sphinxext/prs/9100.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1703bfd2a159feb105e145699764c54b2592e5cf", + "authors": [ + { + "n": "Ram PARI", + "e": null + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + "tutorials/time-freq/plot_sensors_time_frequency.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9102.json b/doc/sphinxext/prs/9102.json new file mode 100644 index 00000000000..a77ac78256f --- /dev/null +++ b/doc/sphinxext/prs/9102.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0a61e1c7c271829bb3cc9480ea8fb08142c91e7b", + "authors": [ + { + "n": "Catalina M. Galván", + "e": null + } + ], + "changes": { + "examples/preprocessing/plot_xdawn_denoising.py": { + "a": 6, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9103.json b/doc/sphinxext/prs/9103.json new file mode 100644 index 00000000000..3def439a642 --- /dev/null +++ b/doc/sphinxext/prs/9103.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "050a1f35fa91e013973ee3df2c3ca8a4ad362264", + "authors": [ + { + "n": "Falach", + "e": "rotemfa0@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 2 + }, + "mne/io/tests/test_meas_info.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9104.json b/doc/sphinxext/prs/9104.json new file mode 100644 index 00000000000..dd60a3a10ba --- /dev/null +++ b/doc/sphinxext/prs/9104.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3da7e332c3ecd75613feb31c6f34c3b362e32d97", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 6, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9105.json b/doc/sphinxext/prs/9105.json new file mode 100644 index 00000000000..405780bec17 --- /dev/null +++ b/doc/sphinxext/prs/9105.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1026ad762371b0a802c67af89e163434ac9640b6", + "authors": [ + { + "n": "Catalina M. Galván", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/datasets/sleep_physionet/age.py": { + "a": 3, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9106.json b/doc/sphinxext/prs/9106.json new file mode 100644 index 00000000000..760158ded0c --- /dev/null +++ b/doc/sphinxext/prs/9106.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b239ad314fb7d74fb43fdf1e34eada94ed32659d", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9109.json b/doc/sphinxext/prs/9109.json new file mode 100644 index 00000000000..350258b1184 --- /dev/null +++ b/doc/sphinxext/prs/9109.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "88cbfa8f42f885a6e4a400d4c9eece839cee0c77", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9113.json b/doc/sphinxext/prs/9113.json new file mode 100644 index 00000000000..1e5c644e2fa --- /dev/null +++ b/doc/sphinxext/prs/9113.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fc3f2b56bc172eb3b747751e636d55b0cbb56861", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/report.py": { + "a": 48, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9114.json b/doc/sphinxext/prs/9114.json new file mode 100644 index 00000000000..b4f7983e9ae --- /dev/null +++ b/doc/sphinxext/prs/9114.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "0ddde8c7703331644dbe06b1b023d979738eebb8", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_mixed_source_space_inverse.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 31, + "d": 18 + }, + "mne/viz/tests/test_3d.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9115.json b/doc/sphinxext/prs/9115.json new file mode 100644 index 00000000000..e9e007faa69 --- /dev/null +++ b/doc/sphinxext/prs/9115.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "afce272d889feac294c2c1cd27e28ceb3e68f880", + "authors": [ + { + "n": "Matteo Anelli", + "e": "matteanelli.96.11@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9117.json b/doc/sphinxext/prs/9117.json new file mode 100644 index 00000000000..060a7474f16 --- /dev/null +++ b/doc/sphinxext/prs/9117.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c638f74ac727aeaca3b9616e56115b990a84c7ba", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/datasets/limo/limo.py": { + "a": 6, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9118.json b/doc/sphinxext/prs/9118.json new file mode 100644 index 00000000000..6c5f734cbdb --- /dev/null +++ b/doc/sphinxext/prs/9118.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "ee187764ca1ecaddcb4c5f4bd94b192ccb5886e2", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 55, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 32, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9121.json b/doc/sphinxext/prs/9121.json new file mode 100644 index 00000000000..7b671e4c76e --- /dev/null +++ b/doc/sphinxext/prs/9121.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bd6dcf71e0ac975363491ddc2136845fd5a11e11", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 12, + "d": 24 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9122.json b/doc/sphinxext/prs/9122.json new file mode 100644 index 00000000000..8e723fb4c57 --- /dev/null +++ b/doc/sphinxext/prs/9122.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b9859ed6cc20cc5f006e00ae72621aa513f0cc9a", + "authors": [ + { + "n": "Britta Westner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "examples/inverse/plot_tf_dics.py": { + "a": 0, + "d": 124 + }, + "mne/beamformer/_dics.py": { + "a": 4, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9123.json b/doc/sphinxext/prs/9123.json new file mode 100644 index 00000000000..0c82d2fd281 --- /dev/null +++ b/doc/sphinxext/prs/9123.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5f0beb93f325ecd53e88fca500f1d08fbead863e", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 14, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 4, + "d": 5 + }, + "mne/viz/backends/_abstract.py": { + "a": 49, + "d": 22 + }, + "mne/viz/backends/_notebook.py": { + "a": 22, + "d": 10 + }, + "mne/viz/backends/_qt.py": { + "a": 25, + "d": 13 + }, + "mne/viz/utils.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9124.json b/doc/sphinxext/prs/9124.json new file mode 100644 index 00000000000..43e7259cf1a --- /dev/null +++ b/doc/sphinxext/prs/9124.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "5ae29cd69aa6075dc9ecd97402571ff7b2a03d21", + "authors": [ + { + "n": "Felix Klotzsche", + "e": "klotzsche@cbs.mpg.de" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "eioe", + "e": "klotzsche@cbs.mpg.de" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 252, + "d": 65 + }, + "mne/time_frequency/tfr.py": { + "a": 135, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9127.json b/doc/sphinxext/prs/9127.json new file mode 100644 index 00000000000..547b1a66f5d --- /dev/null +++ b/doc/sphinxext/prs/9127.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "ab0f417b94767890e06ba62e6f770e46e912be6d", + "authors": [ + { + "n": "Ram PARI", + "e": null + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "tutorials/preprocessing/plot_55_setting_eeg_reference.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9128.json b/doc/sphinxext/prs/9128.json new file mode 100644 index 00000000000..ba71facb5b2 --- /dev/null +++ b/doc/sphinxext/prs/9128.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "82f8476316385233dd35a4dc4e448730f4c39726", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/cancel.yml": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/913.json b/doc/sphinxext/prs/913.json new file mode 100644 index 00000000000..2226f7aedb9 --- /dev/null +++ b/doc/sphinxext/prs/913.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cf980143f2302af3d452d0d7615878674570a6d6", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 11, + "d": 55 + }, + "mne/tests/test_source_estimate.py": { + "a": 0, + "d": 25 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9130.json b/doc/sphinxext/prs/9130.json new file mode 100644 index 00000000000..e41b35ce1af --- /dev/null +++ b/doc/sphinxext/prs/9130.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "34d338bcff0f21fae35114e14c692ccbe7213e10", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + } + ], + "changes": { + ".gitignore": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9132.json b/doc/sphinxext/prs/9132.json new file mode 100644 index 00000000000..b2962fd2c4e --- /dev/null +++ b/doc/sphinxext/prs/9132.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "d8dbf98c005822f99112f5927975e13894f9c2ca", + "authors": [ + { + "n": "Matteo Anelli", + "e": "matteanelli.96.11@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 7, + "d": 0 + }, + "mne/viz/tests/test_evoked.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9135.json b/doc/sphinxext/prs/9135.json new file mode 100644 index 00000000000..a5ea9f94612 --- /dev/null +++ b/doc/sphinxext/prs/9135.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "62ca227ef00086cd644b2a1ddcb291501a506485", + "authors": [ + { + "n": "Falach", + "e": "rotemfa0@gmail.com" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/channels/channels.py": { + "a": 21, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 25, + "d": 0 + }, + "mne/io/reference.py": { + "a": 3, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9136.json b/doc/sphinxext/prs/9136.json new file mode 100644 index 00000000000..3e6a3ad4e0f --- /dev/null +++ b/doc/sphinxext/prs/9136.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "98fb5c26dee0b5c36a666e945fecd4423e0b712e", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/defaults.py": { + "a": 7, + "d": 0 + }, + "mne/io/base.py": { + "a": 122, + "d": 2 + }, + "mne/io/tests/test_raw.py": { + "a": 73, + "d": 0 + }, + "mne/tests/test_defaults.py": { + "a": 5, + "d": 47 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9139.json b/doc/sphinxext/prs/9139.json new file mode 100644 index 00000000000..52f042eebdf --- /dev/null +++ b/doc/sphinxext/prs/9139.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "598cde5e7b6304a06d80fd68dbd3c163d2563711", + "authors": [ + { + "n": "Catalina M. Galván", + "e": null + } + ], + "changes": { + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/914.json b/doc/sphinxext/prs/914.json new file mode 100644 index 00000000000..3f81a99a9a6 --- /dev/null +++ b/doc/sphinxext/prs/914.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2181d56d29adbe6a23f0d9f5fd744b8fb810beef", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/classifier.py": { + "a": 6, + "d": 5 + }, + "mne/fiff/raw.py": { + "a": 8, + "d": 7 + }, + "mne/filter.py": { + "a": 54, + "d": 37 + }, + "mne/preprocessing/ssp.py": { + "a": 13, + "d": 10 + }, + "mne/tests/test_filter.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9140.json b/doc/sphinxext/prs/9140.json new file mode 100644 index 00000000000..e2c8677cc16 --- /dev/null +++ b/doc/sphinxext/prs/9140.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b27e06fb1d7f2d8445f7d26f3b4be907fe066335", + "authors": [ + { + "n": "Catalina M. Galván", + "e": null + } + ], + "changes": { + "tutorials/sample-datasets/plot_brainstorm_auditory.py": { + "a": 4, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9141.json b/doc/sphinxext/prs/9141.json new file mode 100644 index 00000000000..b1d745bc018 --- /dev/null +++ b/doc/sphinxext/prs/9141.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "e4a157174c3ae386072a6ead360efb8b32a5f729", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Johann Benerradi", + "e": "johann.benerradi@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 6, + "d": 2 + }, + "mne/channels/data/montages/artinis-brite23.elc": { + "a": 48, + "d": 0 + }, + "mne/channels/data/montages/artinis-octamon.elc": { + "a": 32, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 59, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 173, + "d": 4 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 138, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9142.json b/doc/sphinxext/prs/9142.json new file mode 100644 index 00000000000..f9d8307d5f3 --- /dev/null +++ b/doc/sphinxext/prs/9142.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f09dfb38010e1d0bfbb251409ae53730b5787014", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "mne/stats/parametric.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9143.json b/doc/sphinxext/prs/9143.json new file mode 100644 index 00000000000..45e7e7d5aa6 --- /dev/null +++ b/doc/sphinxext/prs/9143.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "56e41c942af6b06082f2756b19cf2e499674289c", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "tutorials/sample-datasets/plot_brainstorm_phantom_ctf.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9144.json b/doc/sphinxext/prs/9144.json new file mode 100644 index 00000000000..ba7e5ad7643 --- /dev/null +++ b/doc/sphinxext/prs/9144.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4ca89e6322a583fd16bfe957d98314b2ce8f0f72", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "tutorials/source-modeling/plot_dipole_orientations.py": { + "a": 10, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9145.json b/doc/sphinxext/prs/9145.json new file mode 100644 index 00000000000..6faf506563d --- /dev/null +++ b/doc/sphinxext/prs/9145.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b24f531d39c954d7923d49ff454c50bb4dc8e43d", + "authors": [ + { + "n": "Giorgio_M", + "e": "giorgio.marinato@unitn.it" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/preprocessing/plot_run_ica.py": { + "a": 0, + "d": 64 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 51, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9146.json b/doc/sphinxext/prs/9146.json new file mode 100644 index 00000000000..59d86013302 --- /dev/null +++ b/doc/sphinxext/prs/9146.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "0816e31439a01e7ba6a1488fb006ae75dd25ac57", + "authors": [ + { + "n": "Catalina M. Galván", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9147.json b/doc/sphinxext/prs/9147.json new file mode 100644 index 00000000000..c49e3937494 --- /dev/null +++ b/doc/sphinxext/prs/9147.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "82b3459a35c1c61c3c2d1ab724aaf9ee831d791d", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "mne/connectivity/spectral.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9149.json b/doc/sphinxext/prs/9149.json new file mode 100644 index 00000000000..016643c3ee6 --- /dev/null +++ b/doc/sphinxext/prs/9149.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "acf25ef4d0b781aeff573797238f422a048cb9c8", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 11, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 4, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 3, + "d": 0 + }, + "mne/viz/backends/_qt.py": { + "a": 42, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9150.json b/doc/sphinxext/prs/9150.json new file mode 100644 index 00000000000..4a8e90fcd3d --- /dev/null +++ b/doc/sphinxext/prs/9150.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "10e1869f39a41250ef4706949b906361a9bc33fd", + "authors": [ + { + "n": "Eduard", + "e": "eduardxort@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 42, + "d": 9 + }, + "mne/time_frequency/tfr.py": { + "a": 56, + "d": 45 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9151.json b/doc/sphinxext/prs/9151.json new file mode 100644 index 00000000000..c21ceda509d --- /dev/null +++ b/doc/sphinxext/prs/9151.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "39524d220bafae4935c9224140179b767ebd2689", + "authors": [ + { + "n": "Matteo Anelli", + "e": "matteanelli.96.11@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/check.py": { + "a": 10, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_evoked.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9152.json b/doc/sphinxext/prs/9152.json new file mode 100644 index 00000000000..66727bec0d9 --- /dev/null +++ b/doc/sphinxext/prs/9152.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "46d00e571a2bc7511f17ab103d64b4d6fafbde3d", + "authors": [ + { + "n": "Maggie Clarke", + "e": "mdclarke@uw.edu" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 11, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9153.json b/doc/sphinxext/prs/9153.json new file mode 100644 index 00000000000..d397d1aedf0 --- /dev/null +++ b/doc/sphinxext/prs/9153.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "916f364ebacc0ac6ed8ee7854dbf904d143ee984", + "authors": [ + { + "n": "Ana Radanovic", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "mne/connectivity/spectral.py": { + "a": 14, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9154.json b/doc/sphinxext/prs/9154.json new file mode 100644 index 00000000000..18096e410d5 --- /dev/null +++ b/doc/sphinxext/prs/9154.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b1148741a95ac102e250d47c4f965c0df3e62618", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/datasets/spm_faces_dataset.py": { + "a": 2, + "d": 1 + }, + "examples/preprocessing/plot_find_ref_artifacts.py": { + "a": 4, + "d": 2 + }, + "examples/preprocessing/plot_ica_comparison.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/plot_run_ica.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/plot_40_artifact_correction_ica.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9156.json b/doc/sphinxext/prs/9156.json new file mode 100644 index 00000000000..6afcccda90b --- /dev/null +++ b/doc/sphinxext/prs/9156.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d885e4a43dd407a3d4e137e57e7df34683ed9390", + "authors": [ + { + "n": "nordme", + "e": "nordme@uw.edu" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/epochs/plot_60_make_fixed_length_epochs.py": { + "a": 132, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9157.json b/doc/sphinxext/prs/9157.json new file mode 100644 index 00000000000..850610419e5 --- /dev/null +++ b/doc/sphinxext/prs/9157.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8c5f92d112b3c01a1fe96ee31f75552d0781d067", + "authors": [ + { + "n": "Apoorva Karekal", + "e": null + } + ], + "changes": { + "mne/transforms.py": { + "a": 8, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/916.json b/doc/sphinxext/prs/916.json new file mode 100644 index 00000000000..eb6b98f6e32 --- /dev/null +++ b/doc/sphinxext/prs/916.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4a1f46b77a384564e47cf3a69d8891f4f38969ab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 37, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9160.json b/doc/sphinxext/prs/9160.json new file mode 100644 index 00000000000..8036f4baca6 --- /dev/null +++ b/doc/sphinxext/prs/9160.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fd1723fecccdbe01150b033a2943b03d9fe5292b", + "authors": [ + { + "n": "Eduard", + "e": "eduardxort@gmail.com" + } + ], + "changes": { + "mne/source_estimate.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9161.json b/doc/sphinxext/prs/9161.json new file mode 100644 index 00000000000..0444e3ab3b5 --- /dev/null +++ b/doc/sphinxext/prs/9161.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "bc78db538a6cd761742c614d716904a571aa8ee1", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/data/html_templates.py": { + "a": 67, + "d": 0 + }, + "mne/io/base.py": { + "a": 8, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 26, + "d": 1 + }, + "mne/report.py": { + "a": 3, + "d": 85 + }, + "tutorials/intro/plot_30_info.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9162.json b/doc/sphinxext/prs/9162.json new file mode 100644 index 00000000000..aeb4a24d535 --- /dev/null +++ b/doc/sphinxext/prs/9162.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "bbcba957bcef31af9b49262ff1435a3c45c2e1df", + "authors": [ + { + "n": "Ram PARI", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9163.json b/doc/sphinxext/prs/9163.json new file mode 100644 index 00000000000..c08c6c1d0dd --- /dev/null +++ b/doc/sphinxext/prs/9163.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e9dc11fd496c1085bab519e3881cf5764c8645ee", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/channels/layout.py": { + "a": 6, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 10, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9164.json b/doc/sphinxext/prs/9164.json new file mode 100644 index 00000000000..80ed20db9ee --- /dev/null +++ b/doc/sphinxext/prs/9164.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2500586c484a759062c802fa5679322afaf8237c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_transformer.py": { + "a": 44, + "d": 33 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9165.json b/doc/sphinxext/prs/9165.json new file mode 100644 index 00000000000..9f8abf26668 --- /dev/null +++ b/doc/sphinxext/prs/9165.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "856f6122001f34436b2b11bdf7ebcf471c7cef63", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 1, + "d": 2 + }, + "mne/coreg.py": { + "a": 4, + "d": 51 + }, + "mne/source_space.py": { + "a": 49, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9166.json b/doc/sphinxext/prs/9166.json new file mode 100644 index 00000000000..68aa4127127 --- /dev/null +++ b/doc/sphinxext/prs/9166.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "0c225f44f990b13c42389c56719f2008b0f9b4f6", + "authors": [ + { + "n": "Steven Bierer", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/simulation/raw.py": { + "a": 11, + "d": 4 + }, + "mne/simulation/source.py": { + "a": 63, + "d": 38 + }, + "mne/simulation/tests/test_raw.py": { + "a": 28, + "d": 1 + }, + "mne/simulation/tests/test_source.py": { + "a": 36, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9169.json b/doc/sphinxext/prs/9169.json new file mode 100644 index 00000000000..c893985026b --- /dev/null +++ b/doc/sphinxext/prs/9169.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dc4545c7d0a80791a26d9e3c3651d336202b2bb1", + "authors": [ + { + "n": "Apoorva Karekal", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/overview/migrating.rst": { + "a": 33, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/917.json b/doc/sphinxext/prs/917.json new file mode 100644 index 00000000000..d472e11c02e --- /dev/null +++ b/doc/sphinxext/prs/917.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "397036343047346375672e8e78b9591914ae4a9a", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 9, + "d": 1 + }, + "examples/connectivity/plot_cwt_sensor_connectivity.py": { + "a": 3, + "d": 3 + }, + "examples/datasets/plot_spm_faces_dataset.py": { + "a": 0, + "d": 2 + }, + "examples/plot_ssp_projs_topomaps.py": { + "a": 5, + "d": 5 + }, + "examples/plot_topo_channel_epochs_image.py": { + "a": 1, + "d": 2 + }, + "examples/stats/plot_sensor_permutation_test.py": { + "a": 1, + "d": 2 + }, + "examples/time_frequency/plot_tfr_topography.py": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 2 + }, + "mne/layouts/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/layouts/layout.py": { + "a": 98, + "d": 16 + }, + "mne/layouts/tests/test_layout.py": { + "a": 99, + "d": 3 + }, + "mne/utils.py": { + "a": 28, + "d": 0 + }, + "mne/viz.py": { + "a": 10, + "d": 54 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9170.json b/doc/sphinxext/prs/9170.json new file mode 100644 index 00000000000..47e4c45045c --- /dev/null +++ b/doc/sphinxext/prs/9170.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "12b43d605332b567382d2a9ed350d37d1e0042a2", + "authors": [ + { + "n": "nordme", + "e": "nordme@uw.edu" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9171.json b/doc/sphinxext/prs/9171.json new file mode 100644 index 00000000000..9beb6af30e6 --- /dev/null +++ b/doc/sphinxext/prs/9171.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8eda56f0c0ca106194f81e9a27064291d1ffa245", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/epochs/plot_60_make_fixed_length_epochs.py": { + "a": 31, + "d": 51 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9172.json b/doc/sphinxext/prs/9172.json new file mode 100644 index 00000000000..08aaae88774 --- /dev/null +++ b/doc/sphinxext/prs/9172.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "1e403b2f5708f624871eed7661a2b81b6259114c", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/data/html_templates.py": { + "a": 13, + "d": 3 + }, + "mne/io/base.py": { + "a": 5, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9174.json b/doc/sphinxext/prs/9174.json new file mode 100644 index 00000000000..7c2fc3c6da0 --- /dev/null +++ b/doc/sphinxext/prs/9174.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7db3238ce80d094bc65518486c4f3d1c83421640", + "authors": [ + { + "n": "Valerii", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 5, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 24, + "d": 35 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9175.json b/doc/sphinxext/prs/9175.json new file mode 100644 index 00000000000..a535f2ecedf --- /dev/null +++ b/doc/sphinxext/prs/9175.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b5e9c38eef38565799596205ecc0e1a2dc967f0b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 30, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9178.json b/doc/sphinxext/prs/9178.json new file mode 100644 index 00000000000..a2abbc62bf4 --- /dev/null +++ b/doc/sphinxext/prs/9178.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "66430ed77e2f08745b9f4622801c5dc4ca1a5d73", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/io/pick.py": { + "a": 10, + "d": 8 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 5, + "d": 2 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 112, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9179.json b/doc/sphinxext/prs/9179.json new file mode 100644 index 00000000000..699e742433f --- /dev/null +++ b/doc/sphinxext/prs/9179.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "75a408ed7069bdcd6c6fd12ae12385328ef13e70", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/918.json b/doc/sphinxext/prs/918.json new file mode 100644 index 00000000000..ee8abe59321 --- /dev/null +++ b/doc/sphinxext/prs/918.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "29b509dd5045cf62cbf092e95e358dd77e45938f", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/datasets/sample/sample.py": { + "a": 3, + "d": 6 + }, + "mne/datasets/utils.py": { + "a": 10, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9180.json b/doc/sphinxext/prs/9180.json new file mode 100644 index 00000000000..3eae20754a9 --- /dev/null +++ b/doc/sphinxext/prs/9180.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "2c888c5e68af64a8babe62539d34704c9ffca2bd", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 6 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_abstract.py": { + "a": 14, + "d": 6 + }, + "mne/viz/backends/_notebook.py": { + "a": 6, + "d": 12 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 37 + }, + "mne/viz/backends/_qt.py": { + "a": 69, + "d": 60 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9183.json b/doc/sphinxext/prs/9183.json new file mode 100644 index 00000000000..ee8f7814033 --- /dev/null +++ b/doc/sphinxext/prs/9183.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e430158cee998776551efffbb343e4e42a45ae37", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 9, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 94, + "d": 46 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9184.json b/doc/sphinxext/prs/9184.json new file mode 100644 index 00000000000..22af9ad1ba6 --- /dev/null +++ b/doc/sphinxext/prs/9184.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e365bfb18f48f863975ed3e099c7c290787c4b45", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/preprocessing/nirs/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 3, + "d": 0 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 21, + "d": 2 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 120, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9185.json b/doc/sphinxext/prs/9185.json new file mode 100644 index 00000000000..2bc8d97087b --- /dev/null +++ b/doc/sphinxext/prs/9185.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "70709f5069ddb413c2719f4d2b41fd6bfea2349a", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 66, + "d": 24 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 56, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9187.json b/doc/sphinxext/prs/9187.json new file mode 100644 index 00000000000..ac7599a071c --- /dev/null +++ b/doc/sphinxext/prs/9187.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "19db73552fda221499f652b7c7b30335fccae045", + "authors": [ + { + "n": "Sumalyo Datta", + "e": null + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9189.json b/doc/sphinxext/prs/9189.json new file mode 100644 index 00000000000..b5836711a98 --- /dev/null +++ b/doc/sphinxext/prs/9189.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "a98c4f5ce9d8272b0b7528b3054c74b8d43373ae", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/data/html_templates.py": { + "a": 25, + "d": 0 + }, + "mne/epochs.py": { + "a": 26, + "d": 0 + }, + "mne/report.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 0 + }, + "tutorials/misc/plot_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9190.json b/doc/sphinxext/prs/9190.json new file mode 100644 index 00000000000..49a97a8163d --- /dev/null +++ b/doc/sphinxext/prs/9190.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "176680a238a7c4f132fdd9198fc7bbe7797d764f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 63, + "d": 83 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 8, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 26, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 61, + "d": 38 + }, + "mne/viz/backends/_pyvista.py": { + "a": 0, + "d": 6 + }, + "mne/viz/backends/_qt.py": { + "a": 59, + "d": 39 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9191.json b/doc/sphinxext/prs/9191.json new file mode 100644 index 00000000000..66adfee0ef9 --- /dev/null +++ b/doc/sphinxext/prs/9191.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "13a0de51bd0f13113d866e73522fb60bdde37809", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9192.json b/doc/sphinxext/prs/9192.json new file mode 100644 index 00000000000..bc67c40683a --- /dev/null +++ b/doc/sphinxext/prs/9192.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "c6b22e82b64da0ed0f52a5f28bd4711e47090f9e", + "authors": [ + { + "n": "Jack Zhang", + "e": "me@jackzhang.me" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 52, + "d": 2 + }, + "mne/io/base.py": { + "a": 57, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 24, + "d": 1 + }, + "mne/io/utils.py": { + "a": 21, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 29, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 56, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 21, + "d": 0 + }, + "requirements_testing_extra.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9194.json b/doc/sphinxext/prs/9194.json new file mode 100644 index 00000000000..e44c1dad713 --- /dev/null +++ b/doc/sphinxext/prs/9194.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7350ca8697d149ead8454045fcd4fd1f7db50192", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + ".github/config.yml": { + "a": 21, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9196.json b/doc/sphinxext/prs/9196.json new file mode 100644 index 00000000000..c342398208f --- /dev/null +++ b/doc/sphinxext/prs/9196.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "acffc2ec986de456792503b5a6901fd68889bf8a", + "authors": [ + { + "n": "Sumalyo Datta", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9197.json b/doc/sphinxext/prs/9197.json new file mode 100644 index 00000000000..77cbf243e57 --- /dev/null +++ b/doc/sphinxext/prs/9197.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4ba386be39394ac1406e0e4366b27a5e416e8f03", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/simulation/plot_source_simulator.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 2, + "d": 12 + }, + "mne/simulation/source.py": { + "a": 8, + "d": 5 + }, + "mne/simulation/tests/test_source.py": { + "a": 2, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9200.json b/doc/sphinxext/prs/9200.json new file mode 100644 index 00000000000..f08f77d9da8 --- /dev/null +++ b/doc/sphinxext/prs/9200.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "148de1661d5e43cc88d62e27731ce44e78892951", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 7, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 12, + "d": 13 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9202.json b/doc/sphinxext/prs/9202.json new file mode 100644 index 00000000000..7c1575556d9 --- /dev/null +++ b/doc/sphinxext/prs/9202.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "88df35fc65015a65b806d825ea7ea39488ea597c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 8, + "d": 20 + }, + "doc/_templates/docs-toc.html": { + "a": 0, + "d": 17 + }, + "doc/_templates/layout.html": { + "a": 28, + "d": 11 + }, + "doc/_templates/sidebar-funders.html": { + "a": 0, + "d": 17 + }, + "doc/_templates/sidebar-quicklinks.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 56, + "d": 18 + }, + "doc/funding.rst": { + "a": 50, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9203.json b/doc/sphinxext/prs/9203.json new file mode 100644 index 00000000000..e75cb797073 --- /dev/null +++ b/doc/sphinxext/prs/9203.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2897d4b2609300da29f86707caf06fe29ecee74f", + "authors": [ + { + "n": "Jack Zhang", + "e": "me@jackzhang.me" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 41, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 7, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 53, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9204.json b/doc/sphinxext/prs/9204.json new file mode 100644 index 00000000000..bc6d1f60c0f --- /dev/null +++ b/doc/sphinxext/prs/9204.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3afa03029f7237ff26b35b4fa4ae962661dcfd1d", + "authors": [ + { + "n": "Jack Zhang", + "e": "me@jackzhang.me" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 0 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9207.json b/doc/sphinxext/prs/9207.json new file mode 100644 index 00000000000..c019aa64013 --- /dev/null +++ b/doc/sphinxext/prs/9207.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a067a3fbf2c19e60237e1b7ee76ed3ee9038a366", + "authors": [ + { + "n": "Ram PARI", + "e": null + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 16, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9208.json b/doc/sphinxext/prs/9208.json new file mode 100644 index 00000000000..07aa44b7dd3 --- /dev/null +++ b/doc/sphinxext/prs/9208.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2a8d56b453ccf4e7b3b22a80bdd42d4ac604474e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 28, + "d": 101 + }, + "mne/utils/docs.py": { + "a": 75, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/921.json b/doc/sphinxext/prs/921.json new file mode 100644 index 00000000000..2e098380fdf --- /dev/null +++ b/doc/sphinxext/prs/921.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "bb7cbaf14b8e1be34fc671ed760b32621beab3e0", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/getting_started.rst": { + "a": 4, + "d": 5 + }, + "doc/source/whats_new.rst": { + "a": 6, + "d": 2 + }, + "examples/inverse/plot_mixed_norm_L21_inverse.py": { + "a": 0, + "d": 3 + }, + "examples/inverse/plot_time_frequency_mixed_norm_inverse.py": { + "a": 0, + "d": 3 + }, + "examples/plot_evoked_delayed_ssp.py": { + "a": 1, + "d": 3 + }, + "examples/plot_megsim_data.py": { + "a": 0, + "d": 2 + }, + "examples/plot_shift_evoked.py": { + "a": 7, + "d": 4 + }, + "examples/preprocessing/plot_ica_from_epochs.py": { + "a": 0, + "d": 1 + }, + "examples/realtime/plot_compute_rt_average.py": { + "a": 7, + "d": 14 + }, + "mne/tests/test_viz.py": { + "a": 86, + "d": 77 + }, + "mne/viz.py": { + "a": 26, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9210.json b/doc/sphinxext/prs/9210.json new file mode 100644 index 00000000000..978305d455b --- /dev/null +++ b/doc/sphinxext/prs/9210.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "53a86345321d5289e5895de2d59a1c188a182a74", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/baseline.py": { + "a": 77, + "d": 0 + }, + "mne/dipole.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 7, + "d": 80 + }, + "mne/evoked.py": { + "a": 127, + "d": 18 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_evoked.py": { + "a": 46, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 4, + "d": 2 + }, + "tutorials/evoked/plot_10_evoked_overview.py": { + "a": 20, + "d": 0 + }, + "tutorials/evoked/plot_20_visualize_evoked.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9211.json b/doc/sphinxext/prs/9211.json new file mode 100644 index 00000000000..249e27a1e6b --- /dev/null +++ b/doc/sphinxext/prs/9211.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8d6b115fa3475e87c2b2dbf638724a1697df09cd", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/preprocessing/nirs/nirs.py": { + "a": 5, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9212.json b/doc/sphinxext/prs/9212.json new file mode 100644 index 00000000000..6109b0d80a0 --- /dev/null +++ b/doc/sphinxext/prs/9212.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5e38787306c147595252dbcf59572c017f4c8795", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 0 + }, + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 0 + }, + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 0 + }, + ".github/workflows/linux_conda.yml": { + "a": 1, + "d": 0 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 0 + }, + ".github/workflows/macos_conda.yml": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9213.json b/doc/sphinxext/prs/9213.json new file mode 100644 index 00000000000..dc9d089afd3 --- /dev/null +++ b/doc/sphinxext/prs/9213.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "02bb98b35a34d49d22037e3d494ba1e4f83994de", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + ".github/workflows/cancel.yml": { + "a": 0, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9217.json b/doc/sphinxext/prs/9217.json new file mode 100644 index 00000000000..abd36ceea47 --- /dev/null +++ b/doc/sphinxext/prs/9217.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b619f54aa80d248e353b54c8be1f6c4932dc3d80", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/utils/_testing.py": { + "a": 6, + "d": 4 + }, + "mne/viz/tests/test_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9218.json b/doc/sphinxext/prs/9218.json new file mode 100644 index 00000000000..b7701e754bf --- /dev/null +++ b/doc/sphinxext/prs/9218.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "58ccac70efcea3c4c785a14b4ca593523e466237", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 3, + "d": 9 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 21, + "d": 57 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9227.json b/doc/sphinxext/prs/9227.json new file mode 100644 index 00000000000..f8999f82714 --- /dev/null +++ b/doc/sphinxext/prs/9227.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "53ce5871b6ceb466f665d41bc8db2938a2aa0084", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 2, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/boxy/boxy.py": { + "a": 2, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 3, + "d": 6 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 6, + "d": 4 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 4 + }, + "mne/io/egi/egi.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 3, + "d": 1 + }, + "mne/io/eximia/eximia.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 7, + "d": 7 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 29, + "d": 6 + }, + "mne/io/nedf/nedf.py": { + "a": 2, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 2, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 5, + "d": 4 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 2, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 36, + "d": 3 + }, + "mne/utils/check.py": { + "a": 19, + "d": 8 + }, + "mne/utils/tests/test_check.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9229.json b/doc/sphinxext/prs/9229.json new file mode 100644 index 00000000000..ae7c65c604d --- /dev/null +++ b/doc/sphinxext/prs/9229.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e243ee0e3e055c4e20a3f426bc9709fe5529010e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 57, + "d": 8 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 17, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9230.json b/doc/sphinxext/prs/9230.json new file mode 100644 index 00000000000..3327894b91b --- /dev/null +++ b/doc/sphinxext/prs/9230.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b6542f5bbf4b5f5b444b8a9c601e32cd0b667de8", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9235.json b/doc/sphinxext/prs/9235.json new file mode 100644 index 00000000000..eb244e608c0 --- /dev/null +++ b/doc/sphinxext/prs/9235.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5cc6eb8d7ebd102049843336d9d4a342f0f3e966", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 8, + "d": 8 + }, + "mne/evoked.py": { + "a": 3, + "d": 5 + }, + "mne/io/base.py": { + "a": 4, + "d": 5 + }, + "mne/io/tests/test_apply_function.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 24, + "d": 28 + }, + "mne/tests/test_evoked.py": { + "a": 8, + "d": 15 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9236.json b/doc/sphinxext/prs/9236.json new file mode 100644 index 00000000000..de32a7edc7b --- /dev/null +++ b/doc/sphinxext/prs/9236.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "067d25ab95b097f41de05c6b05a7b0967bafccbb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 26, + "d": 15 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9237.json b/doc/sphinxext/prs/9237.json new file mode 100644 index 00000000000..3b40c4c832c --- /dev/null +++ b/doc/sphinxext/prs/9237.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5cc8f2912329ea2aaf92bd02b6f144a37972d5c1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 15, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9239.json b/doc/sphinxext/prs/9239.json new file mode 100644 index 00000000000..eaf104d9c60 --- /dev/null +++ b/doc/sphinxext/prs/9239.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a055120031b46ada317ebcf24b6eecd6a57cb322", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/stats/regression.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/924.json b/doc/sphinxext/prs/924.json new file mode 100644 index 00000000000..8bdeeab0773 --- /dev/null +++ b/doc/sphinxext/prs/924.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ad3378490c66fa749873b295de3087646bf59890", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/forward/forward.py": { + "a": 8, + "d": 5 + }, + "mne/forward/tests/test_forward.py": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/inverse.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 0, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9240.json b/doc/sphinxext/prs/9240.json new file mode 100644 index 00000000000..b2d9bc3681b --- /dev/null +++ b/doc/sphinxext/prs/9240.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5f676980648e755f3fb84415eb44a504a27e3a8b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 1, + "d": 0 + }, + "tutorials/source-modeling/plot_mne_dspm_source_localization.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9241.json b/doc/sphinxext/prs/9241.json new file mode 100644 index 00000000000..8e9b41e9564 --- /dev/null +++ b/doc/sphinxext/prs/9241.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "4080b4366fd5465a3f27df59f20cee050c430502", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 11, + "d": 5 + }, + "mne/channels/interpolation.py": { + "a": 8, + "d": 48 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 40, + "d": 2 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/interpolate.py": { + "a": 67, + "d": 0 + }, + "mne/preprocessing/tests/test_interpolate.py": { + "a": 60, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9243.json b/doc/sphinxext/prs/9243.json new file mode 100644 index 00000000000..5d11e248054 --- /dev/null +++ b/doc/sphinxext/prs/9243.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "7fbbef4dacff82d249826f99b2ec12474cef8f08", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/icons/README.rst": { + "a": 1, + "d": 1 + }, + "mne/icons/mne-bigsur-white.png": { + "a": 0, + "d": 0 + }, + "mne/icons/mne.qrc": { + "a": 1, + "d": 0 + }, + "mne/icons/resources.py": { + "a": 3445, + "d": 461 + }, + "mne/viz/backends/_pyvista.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9246.json b/doc/sphinxext/prs/9246.json new file mode 100644 index 00000000000..253b551f348 --- /dev/null +++ b/doc/sphinxext/prs/9246.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ed485f9651c8d0a4ca116e3e7a6b6ad95741f364", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 4 + }, + "mne/channels/channels.py": { + "a": 8, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 34, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9247.json b/doc/sphinxext/prs/9247.json new file mode 100644 index 00000000000..a376ef4b450 --- /dev/null +++ b/doc/sphinxext/prs/9247.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "dcdbd071b117746d68972821da668d67fd225841", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 130, + "d": 78 + }, + "doc/conf.py": { + "a": 24, + "d": 8 + }, + "doc/install/mne_c.rst": { + "a": 2, + "d": 3 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/morph.py": { + "a": 5, + "d": 6 + }, + "tools/circleci_dependencies.sh": { + "a": 38, + "d": 9 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9248.json b/doc/sphinxext/prs/9248.json new file mode 100644 index 00000000000..041602c6b77 --- /dev/null +++ b/doc/sphinxext/prs/9248.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2bfac6534e07a1b6f6f2e41aea446cdfbc4a4b32", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 15, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/925.json b/doc/sphinxext/prs/925.json new file mode 100644 index 00000000000..8bc104ef549 --- /dev/null +++ b/doc/sphinxext/prs/925.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "04a0e30246ea617ec8c8be7dee455476b0016ee5", + "authors": [ + { + "n": "Praveen Sripad", + "e": "pravsripad@gmail.com" + } + ], + "changes": { + "examples/time_frequency/plot_single_trial_spectra.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9251.json b/doc/sphinxext/prs/9251.json new file mode 100644 index 00000000000..17ba464b993 --- /dev/null +++ b/doc/sphinxext/prs/9251.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f6b4dd8326975aac3f1a1ec9899dd38c04879c4a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/tests/test_file_traits.py": { + "a": 4, + "d": 4 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 31, + "d": 43 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 68, + "d": 38 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9252.json b/doc/sphinxext/prs/9252.json new file mode 100644 index 00000000000..4316f67b84e --- /dev/null +++ b/doc/sphinxext/prs/9252.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d33e105e8f75965ae5171167032525e2e6b5501b", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9253.json b/doc/sphinxext/prs/9253.json new file mode 100644 index 00000000000..7cf4a31b8ec --- /dev/null +++ b/doc/sphinxext/prs/9253.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4891268d6e846324ca17884a5ac55354589b23e8", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9254.json b/doc/sphinxext/prs/9254.json new file mode 100644 index 00000000000..a30d11a6d7d --- /dev/null +++ b/doc/sphinxext/prs/9254.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "96b1d89364f867e9f901a2dcdd63d681371b6f98", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/_static/style.css": { + "a": 2, + "d": 1 + }, + "doc/_templates/layout.html": { + "a": 0, + "d": 12 + }, + "doc/references.bib": { + "a": 104, + "d": 134 + }, + "tutorials/epochs/plot_40_autogenerate_metadata.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9259.json b/doc/sphinxext/prs/9259.json new file mode 100644 index 00000000000..72e702e9302 --- /dev/null +++ b/doc/sphinxext/prs/9259.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ee639afd82c6ef2f04dc26eb156226a2fd0f1f2a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 9, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9261.json b/doc/sphinxext/prs/9261.json new file mode 100644 index 00000000000..ac0d6287a27 --- /dev/null +++ b/doc/sphinxext/prs/9261.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3ea17053b81df811e4a13f01985bce2f2d0291e6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 56, + "d": 27 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9262.json b/doc/sphinxext/prs/9262.json new file mode 100644 index 00000000000..ecaea1b4c6e --- /dev/null +++ b/doc/sphinxext/prs/9262.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3893c4e61d5d17526ecd8cb4b4cfbbf5799a195b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 30, + "d": 23 + }, + "mne/viz/tests/test_raw.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9266.json b/doc/sphinxext/prs/9266.json new file mode 100644 index 00000000000..eec26e09fb8 --- /dev/null +++ b/doc/sphinxext/prs/9266.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3bc8f4e35fb09f135a1b2c0e5ba9519cba470080", + "authors": [ + { + "n": "Mainak Jas", + "e": null + } + ], + "changes": { + "mne/channels/interpolation.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9268.json b/doc/sphinxext/prs/9268.json new file mode 100644 index 00000000000..641f4f6f4c1 --- /dev/null +++ b/doc/sphinxext/prs/9268.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3d00c4567f59315ea253fb53f2fcfecdb00f31eb", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 7, + "d": 7 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 22, + "d": 0 + }, + "mne/io/open.py": { + "a": 10, + "d": 4 + }, + "mne/io/tag.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9269.json b/doc/sphinxext/prs/9269.json new file mode 100644 index 00000000000..1cb75d4df94 --- /dev/null +++ b/doc/sphinxext/prs/9269.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e57f087e8bb368f75dcc5dad95f70417f7b10172", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/eog.py": { + "a": 37, + "d": 31 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 18, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/927.json b/doc/sphinxext/prs/927.json new file mode 100644 index 00000000000..45d776bf206 --- /dev/null +++ b/doc/sphinxext/prs/927.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e391254f632e170ae14906de4fd26e2e7a8197e2", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/layouts/layout.py": { + "a": 5, + "d": 0 + }, + "mne/layouts/tests/test_layout.py": { + "a": 3, + "d": 0 + }, + "mne/tests/test_viz.py": { + "a": 8, + "d": 0 + }, + "mne/viz.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9270.json b/doc/sphinxext/prs/9270.json new file mode 100644 index 00000000000..0c4231934ab --- /dev/null +++ b/doc/sphinxext/prs/9270.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "350f76bff4b590e3662830cf0308492638c27703", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + }, + { + "n": "Mainak Jas", + "e": "jasmainak@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/reference.py": { + "a": 80, + "d": 43 + }, + "mne/io/tests/test_reference.py": { + "a": 6, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9274.json b/doc/sphinxext/prs/9274.json new file mode 100644 index 00000000000..80310177134 --- /dev/null +++ b/doc/sphinxext/prs/9274.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "392d5cab6e9460f177b5832d3201366ce58967e4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 6, + "d": 2 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9275.json b/doc/sphinxext/prs/9275.json new file mode 100644 index 00000000000..1312449294b --- /dev/null +++ b/doc/sphinxext/prs/9275.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "dece6792280a7e3fe566b58e0e3c917b75edbcb5", + "authors": [ + { + "n": "Felix Klotzsche", + "e": "klotzsche@cbs.mpg.de" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 79, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9277.json b/doc/sphinxext/prs/9277.json new file mode 100644 index 00000000000..3208e0ecb6f --- /dev/null +++ b/doc/sphinxext/prs/9277.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "eaeef19153154c2e087961fd6d1fdcc6613d0b88", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ssp.py": { + "a": 4, + "d": 4 + }, + "mne/preprocessing/tests/test_ssp.py": { + "a": 12, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9278.json b/doc/sphinxext/prs/9278.json new file mode 100644 index 00000000000..3aa959a144e --- /dev/null +++ b/doc/sphinxext/prs/9278.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "7b428b96c2a719902ae72a498618062f879d674c", + "authors": [ + { + "n": "Ram PARI", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/evoked.py": { + "a": 7, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 10, + "d": 6 + }, + "mne/viz/tests/test_topo.py": { + "a": 7, + "d": 0 + }, + "mne/viz/topo.py": { + "a": 17, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/928.json b/doc/sphinxext/prs/928.json new file mode 100644 index 00000000000..8312c410c87 --- /dev/null +++ b/doc/sphinxext/prs/928.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9044b1fa76fa17de4a5e98e1709fa82323279841", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9280.json b/doc/sphinxext/prs/9280.json new file mode 100644 index 00000000000..45567ad5a31 --- /dev/null +++ b/doc/sphinxext/prs/9280.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "53a79ec1b6d3c42951263a1e1aa6e7f9a4c2c6be", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 19, + "d": 19 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 23, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9282.json b/doc/sphinxext/prs/9282.json new file mode 100644 index 00000000000..22a424ec79b --- /dev/null +++ b/doc/sphinxext/prs/9282.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "df6115ead7474c0374eda5958266fd02c9b5b1e0", + "authors": [ + { + "n": "Johann Benerradi", + "e": null + } + ], + "changes": { + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 6, + "d": 7 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9283.json b/doc/sphinxext/prs/9283.json new file mode 100644 index 00000000000..f4d96c80afe --- /dev/null +++ b/doc/sphinxext/prs/9283.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "dcec6c68a887c06541ddee1cf0ecd253982b5936", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/edf/edf.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9284.json b/doc/sphinxext/prs/9284.json new file mode 100644 index 00000000000..6f87b52a2bc --- /dev/null +++ b/doc/sphinxext/prs/9284.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "e8b28f25c000bc2ea7e730c170eecf65acc18b34", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/connectivity/plot_mixed_source_space_connectivity.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_coherence_epochs.py": { + "a": 0, + "d": 1 + }, + "examples/connectivity/plot_mne_inverse_connectivity_spectrum.py": { + "a": 1, + "d": 0 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_label_connectivity.py": { + "a": 2, + "d": 2 + }, + "examples/connectivity/plot_mne_inverse_psi_visual.py": { + "a": 0, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 16, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 51, + "d": 0 + }, + "mne/utils/check.py": { + "a": 16, + "d": 13 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 5 + }, + "tools/get_minimal_commands.sh": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9285.json b/doc/sphinxext/prs/9285.json new file mode 100644 index 00000000000..acc8866d6f2 --- /dev/null +++ b/doc/sphinxext/prs/9285.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "36f5dcd355ab15dbc5b5e86fa7cc679997078f45", + "authors": [ + { + "n": "Ram PARI", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topo.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9287.json b/doc/sphinxext/prs/9287.json new file mode 100644 index 00000000000..cdb20d4492d --- /dev/null +++ b/doc/sphinxext/prs/9287.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "02978dfa847d5b089064897c2a22796ea0b1b88d", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/nicolet/nicolet.py": { + "a": 12, + "d": 5 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9288.json b/doc/sphinxext/prs/9288.json new file mode 100644 index 00000000000..cd27cab92d3 --- /dev/null +++ b/doc/sphinxext/prs/9288.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "f6d9c737cd3eb83b17b7cfead9e8fe78b3ffee53", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/copybutton.js": { + "a": 0, + "d": 66 + }, + "doc/_static/institution_logos/Telecom_Paris_Tech.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/Telecom_Paris_Tech.svg": { + "a": 1, + "d": 0 + }, + "doc/_static/mne_logo.svg": { + "a": 172, + "d": 740 + }, + "doc/_static/style.css": { + "a": 6, + "d": 5 + }, + "doc/_templates/copyright.html": { + "a": 1, + "d": 0 + }, + "doc/_templates/docs-navbar.html": { + "a": 0, + "d": 20 + }, + "doc/_templates/homepage.html": { + "a": 44, + "d": 0 + }, + "doc/_templates/layout.html": { + "a": 6, + "d": 53 + }, + "doc/_templates/version-switcher.html": { + "a": 11, + "d": 0 + }, + "doc/conf.py": { + "a": 8, + "d": 5 + }, + "doc/funding.rst": { + "a": 1, + "d": 1 + }, + "doc/index.rst": { + "a": 1, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9289.json b/doc/sphinxext/prs/9289.json new file mode 100644 index 00000000000..e3ac18e6603 --- /dev/null +++ b/doc/sphinxext/prs/9289.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "c5b12979e4f3b0dd6861b71d26d4302e2288a027", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/references.bib": { + "a": 24, + "d": 14 + }, + "examples/io/plot_read_xdf.py": { + "a": 42, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + }, + "tutorials/io/plot_20_reading_eeg_data.py": { + "a": 101, + "d": 79 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/929.json b/doc/sphinxext/prs/929.json new file mode 100644 index 00000000000..5247917cf75 --- /dev/null +++ b/doc/sphinxext/prs/929.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bf0afc451c7967c03d8000786382ff194dedf4dc", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/_static/institutions.png": { + "a": 0, + "d": 0 + }, + "doc/source/_static/logo.png": { + "a": 0, + "d": 0 + }, + "doc/source/_static/mne_logo.png": { + "a": 0, + "d": 0 + }, + "doc/source/_templates/layout.html": { + "a": 10, + "d": 2 + }, + "doc/upload_html.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9291.json b/doc/sphinxext/prs/9291.json new file mode 100644 index 00000000000..f80ef5c84d0 --- /dev/null +++ b/doc/sphinxext/prs/9291.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "d0a069547470ecbbf8210abae4ef06fff32d1e59", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 3, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 9, + "d": 12 + }, + "mne/viz/tests/test_3d.py": { + "a": 6, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9292.json b/doc/sphinxext/prs/9292.json new file mode 100644 index 00000000000..fe9d672b2c1 --- /dev/null +++ b/doc/sphinxext/prs/9292.json @@ -0,0 +1,819 @@ +{ + "merge_commit_sha": "507b3a4ed46325f1505ff2277ec2df58df36d875", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/_includes/channel_interpolation.rst": { + "a": 1, + "d": 1 + }, + "doc/_templates/layout.html": { + "a": 13, + "d": 0 + }, + "doc/conf.py": { + "a": 210, + "d": 4 + }, + "doc/overview/design_philosophy.rst": { + "a": 2, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 1, + "d": 2 + }, + "examples/connectivity/cwt_sensor_connectivity.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/mixed_source_space_connectivity.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/mne_inverse_coherence_epochs.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/mne_inverse_connectivity_spectrum.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/mne_inverse_envelope_correlation.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/mne_inverse_envelope_correlation_volume.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/mne_inverse_label_connectivity.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/mne_inverse_psi_visual.py": { + "a": 0, + "d": 0 + }, + "examples/connectivity/sensor_connectivity.py": { + "a": 0, + "d": 0 + }, + "examples/datasets/brainstorm_data.py": { + "a": 0, + "d": 0 + }, + "examples/datasets/hf_sef_data.py": { + "a": 0, + "d": 0 + }, + "examples/datasets/limo_data.py": { + "a": 0, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 0, + "d": 0 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 0, + "d": 0 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 0, + "d": 0 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 0, + "d": 0 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 0, + "d": 0 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 0, + "d": 0 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 0, + "d": 0 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/ems_filtering.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 2, + "d": 0 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 0, + "d": 0 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 0, + "d": 0 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 0, + "d": 0 + }, + "examples/forward/left_cerebellum_volume_source.py": { + "a": 0, + "d": 0 + }, + "examples/forward/source_space_morphing.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/compute_mne_inverse_epochs_in_label.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/covariance_whitening_dspm.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/dics_source_power.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/label_from_stc.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/label_source_activations.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/mne_cov_power.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/morph_surface_stc.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 1, + "d": 2 + }, + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/rap_music.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/read_inverse.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/read_stc.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/resolution_metrics.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/resolution_metrics_eegmeg.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/snr_estimate.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/source_space_snr.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 0, + "d": 0 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 0, + "d": 0 + }, + "examples/io/elekta_epochs.py": { + "a": 0, + "d": 0 + }, + "examples/io/read_neo_format.py": { + "a": 0, + "d": 0 + }, + "examples/io/read_noise_covariance_matrix.py": { + "a": 0, + "d": 0 + }, + "examples/io/read_xdf.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/define_target_events.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/fnirs_artifact_removal.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/interpolate_bad_channels.py": { + "a": 2, + "d": 0 + }, + "examples/preprocessing/movement_compensation.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/movement_detection.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/muscle_detection.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/otp.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/virtual_evoked.py": { + "a": 0, + "d": 0 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 2, + "d": 0 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 0, + "d": 0 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 0, + "d": 0 + }, + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 0, + "d": 0 + }, + "examples/simulation/source_simulator.py": { + "a": 0, + "d": 0 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 0, + "d": 0 + }, + "examples/stats/fdr_stats_evoked.py": { + "a": 0, + "d": 0 + }, + "examples/stats/linear_regression_raw.py": { + "a": 0, + "d": 0 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 0, + "d": 0 + }, + "examples/stats/sensor_regression.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/compute_csd.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/compute_source_psd_epochs.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 0, + "d": 0 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/3d_to_2d.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/channel_epochs_image.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/eeglab_head_sphere.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/evoked_topomap.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/evoked_whitening.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/meg_sensors.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/mne_helmet.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/montage_sgskip.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/parcellation.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/publication_figure.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/ssp_projs_sensitivity_map.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/topo_customized.py": { + "a": 0, + "d": 0 + }, + "examples/visualization/xhemi.py": { + "a": 0, + "d": 0 + }, + "tutorials/clinical/20_seeg.py": { + "a": 0, + "d": 0 + }, + "tutorials/clinical/30_ecog.py": { + "a": 0, + "d": 0 + }, + "tutorials/clinical/60_sleep.py": { + "a": 0, + "d": 0 + }, + "tutorials/clinical/README.txt": { + "a": 4, + "d": 0 + }, + "tutorials/discussions/README.txt": { + "a": 0, + "d": 5 + }, + "tutorials/discussions/plot_background_ica.py": { + "a": 0, + "d": 93 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 0, + "d": 0 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 0, + "d": 0 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 0, + "d": 0 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 0, + "d": 0 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 0, + "d": 0 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 0, + "d": 0 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 0, + "d": 0 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 0, + "d": 0 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 0, + "d": 0 + }, + "tutorials/evoked/40_whitened.py": { + "a": 0, + "d": 0 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 0, + "d": 0 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 0, + "d": 0 + }, + "tutorials/forward/30_forward.py": { + "a": 1, + "d": 2 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 0, + "d": 0 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 0, + "d": 0 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 2, + "d": 0 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 2, + "d": 3 + }, + "tutorials/forward/README.txt": { + "a": 5, + "d": 0 + }, + "tutorials/intro/10_overview.py": { + "a": 0, + "d": 0 + }, + "tutorials/intro/15_inplace.py": { + "a": 101, + "d": 0 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/30_info.py": { + "a": 0, + "d": 0 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 1, + "d": 2 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 0, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 13, + "d": 5 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 0, + "d": 0 + }, + "tutorials/inverse/README.txt": { + "a": 2, + "d": 2 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 3, + "d": 3 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 0, + "d": 0 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 7, + "d": 8 + }, + "tutorials/misc/README.txt": { + "a": 0, + "d": 5 + }, + "tutorials/misc/plot_modifying_data_inplace.py": { + "a": 0, + "d": 78 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 9, + "d": 2 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 0, + "d": 0 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 0, + "d": 0 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 0, + "d": 0 + }, + "tutorials/sample-datasets/README.txt": { + "a": 0, + "d": 5 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 0, + "d": 0 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 0, + "d": 0 + }, + "tutorials/simulation/80_dics.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 0, + "d": 0 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 0, + "d": 0 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 0, + "d": 0 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9296.json b/doc/sphinxext/prs/9296.json new file mode 100644 index 00000000000..0a79a9309b0 --- /dev/null +++ b/doc/sphinxext/prs/9296.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "70bc735b3f95574fe93394c57bfebff15e12cf0c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_epochs.py": { + "a": 21, + "d": 0 + }, + "mne/utils/mixin.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9298.json b/doc/sphinxext/prs/9298.json new file mode 100644 index 00000000000..0fa525c90f6 --- /dev/null +++ b/doc/sphinxext/prs/9298.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8b2394b3ad6b18461c989b872396cc6228be6662", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9299.json b/doc/sphinxext/prs/9299.json new file mode 100644 index 00000000000..3d3626eff4a --- /dev/null +++ b/doc/sphinxext/prs/9299.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "19b62e9054e954f147273a146e8e1d95f7339fa1", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 7, + "d": 2 + }, + "mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter_longname.vhdr": { + "a": 156, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 13, + "d": 2 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/93.json b/doc/sphinxext/prs/93.json new file mode 100644 index 00000000000..32f5f89fc0c --- /dev/null +++ b/doc/sphinxext/prs/93.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5e3c3c65356d1a01c22d5a17dba5653f8fd7483d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/raw.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/930.json b/doc/sphinxext/prs/930.json new file mode 100644 index 00000000000..c7ddf63c952 --- /dev/null +++ b/doc/sphinxext/prs/930.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "1cbb07e2a47e5321453dff540ec547baf2646cbf", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/source/_static/institutions.png": { + "a": 0, + "d": 0 + }, + "doc/source/_static/logo.png": { + "a": 0, + "d": 0 + }, + "doc/source/_static/mne_logo.png": { + "a": 0, + "d": 0 + }, + "doc/source/_templates/layout.html": { + "a": 10, + "d": 2 + }, + "doc/source/index.rst": { + "a": 7, + "d": 0 + }, + "doc/source/python_reference.rst": { + "a": 2, + "d": 2 + }, + "doc/upload_html.sh": { + "a": 2, + "d": 2 + }, + "mne/gui/__init__.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9300.json b/doc/sphinxext/prs/9300.json new file mode 100644 index 00000000000..fc482ca6e02 --- /dev/null +++ b/doc/sphinxext/prs/9300.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc9ed3f48d7fe1619af033a80ba0fef89abbcb1b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_transformer.py": { + "a": 0, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9305.json b/doc/sphinxext/prs/9305.json new file mode 100644 index 00000000000..6301b8dab05 --- /dev/null +++ b/doc/sphinxext/prs/9305.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ead743b4e6fbeb37aa8eca5547b958931f150c90", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9313.json b/doc/sphinxext/prs/9313.json new file mode 100644 index 00000000000..5c589d6e510 --- /dev/null +++ b/doc/sphinxext/prs/9313.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e8207aecf6bff7dda7b4290d2e30daed6b776669", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 1 + }, + "tutorials/sample-datasets/plot_phantom_4DBTi.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9314.json b/doc/sphinxext/prs/9314.json new file mode 100644 index 00000000000..0d0e1e00f79 --- /dev/null +++ b/doc/sphinxext/prs/9314.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "208a081ff36f867d33df1b0472d9a2e3b5156cb9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9315.json b/doc/sphinxext/prs/9315.json new file mode 100644 index 00000000000..5de32c89a21 --- /dev/null +++ b/doc/sphinxext/prs/9315.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "71e6476a471a548e72a9506ce70beaf7337b88fd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/surface.py": { + "a": 4, + "d": 0 + }, + "mne/viz/_brain/view.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 7, + "d": 3 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_visualize_stc.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9316.json b/doc/sphinxext/prs/9316.json new file mode 100644 index 00000000000..a713d0ed254 --- /dev/null +++ b/doc/sphinxext/prs/9316.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6cd38e39dfc95ae99958373575f8d607fa6d1029", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9317.json b/doc/sphinxext/prs/9317.json new file mode 100644 index 00000000000..7ae8aedd82e --- /dev/null +++ b/doc/sphinxext/prs/9317.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7c5448a3408a045f9e3bd872f8a2d58645bc4454", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 7, + "d": 4 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9318.json b/doc/sphinxext/prs/9318.json new file mode 100644 index 00000000000..bc24c4586c3 --- /dev/null +++ b/doc/sphinxext/prs/9318.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0afbf07446a9b76d8094ea3c3377cf02833754de", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "examples/simulation/plot_simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 2, + "d": 0 + }, + "tutorials/sample-datasets/plot_phantom_4DBTi.py": { + "a": 1, + "d": 1 + }, + "tutorials/source-modeling/plot_source_alignment.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/932.json b/doc/sphinxext/prs/932.json new file mode 100644 index 00000000000..4a5e188e889 --- /dev/null +++ b/doc/sphinxext/prs/932.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "882fb9a5a06dc54cdc09df418490a76d065574ac", + "authors": [ + { + "n": "Hari Bharadwaj", + "e": null + } + ], + "changes": { + "mne/fiff/edf/edf.py": { + "a": 5, + "d": 4 + }, + "mne/fiff/edf/tests/data/biosemi.hpts": { + "a": 1, + "d": 1 + }, + "mne/fiff/edf/tests/test_edf.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9320.json b/doc/sphinxext/prs/9320.json new file mode 100644 index 00000000000..4d2a7583441 --- /dev/null +++ b/doc/sphinxext/prs/9320.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "87c9eafdaf8fdab33ff1063d0d7bb03afac60bb5", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9321.json b/doc/sphinxext/prs/9321.json new file mode 100644 index 00000000000..989b56bc12c --- /dev/null +++ b/doc/sphinxext/prs/9321.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f10d0d34a7a9e01c8d9f7db3fb35df528de68989", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9323.json b/doc/sphinxext/prs/9323.json new file mode 100644 index 00000000000..ccc612ad14b --- /dev/null +++ b/doc/sphinxext/prs/9323.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ec1d69950f94047c91a92e9c53b521304cba6a9c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 15, + "d": 15 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/sphinxext/gen_names.py": { + "a": 30, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9324.json b/doc/sphinxext/prs/9324.json new file mode 100644 index 00000000000..690ae4ad829 --- /dev/null +++ b/doc/sphinxext/prs/9324.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "fe0d3e3317eef48dd7f18bacda620c43abc8cb0c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 11, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 4, + "d": 4 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 3, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 6, + "d": 3 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9325.json b/doc/sphinxext/prs/9325.json new file mode 100644 index 00000000000..ae7671ae967 --- /dev/null +++ b/doc/sphinxext/prs/9325.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9b2fc9cac7e96244d52f961ab734fe3dbf3348ab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 12, + "d": 12 + }, + "mne/report.py": { + "a": 5, + "d": 2 + }, + "tools/circleci_download.sh": { + "a": 3, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9326.json b/doc/sphinxext/prs/9326.json new file mode 100644 index 00000000000..042344ee8a9 --- /dev/null +++ b/doc/sphinxext/prs/9326.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f96dfe27b2ccf9b21a513478fb96e62e133f0b38", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 32, + "d": 34 + }, + "mne/viz/epochs.py": { + "a": 0, + "d": 4 + }, + "mne/viz/ica.py": { + "a": 0, + "d": 4 + }, + "mne/viz/raw.py": { + "a": 0, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9329.json b/doc/sphinxext/prs/9329.json new file mode 100644 index 00000000000..157ee1d584f --- /dev/null +++ b/doc/sphinxext/prs/9329.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aa526c8ed7049046734ca28493d99e841672b0eb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "tutorials/raw/30_annotate_raw.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9331.json b/doc/sphinxext/prs/9331.json new file mode 100644 index 00000000000..91f074a5cf2 --- /dev/null +++ b/doc/sphinxext/prs/9331.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "723ac8fe6f32ae2fbd6775983cfbe9885fcd67e6", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9332.json b/doc/sphinxext/prs/9332.json new file mode 100644 index 00000000000..9f559fa80ec --- /dev/null +++ b/doc/sphinxext/prs/9332.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f90c5957bd99a98f7cfa286f62ccf2501f6ae6ed", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/tests/test_report.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9337.json b/doc/sphinxext/prs/9337.json new file mode 100644 index 00000000000..442b7cff88d --- /dev/null +++ b/doc/sphinxext/prs/9337.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "73bf7083d9b931d89227b28d5aab76e1f94a8332", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 3, + "d": 3 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "requirements_doc.txt": { + "a": 2, + "d": 1 + }, + "requirements_testing.txt": { + "a": 1, + "d": 3 + }, + "requirements_testing_extra.txt": { + "a": 4, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9340.json b/doc/sphinxext/prs/9340.json new file mode 100644 index 00000000000..532fdf795fe --- /dev/null +++ b/doc/sphinxext/prs/9340.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6ac763359a7fb2e9ee9aa610d02c475aedb44a7d", + "authors": [ + { + "n": "Britta Westner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/beamformer/_dics.py": { + "a": 14, + "d": 2 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9341.json b/doc/sphinxext/prs/9341.json new file mode 100644 index 00000000000..e5857c5f953 --- /dev/null +++ b/doc/sphinxext/prs/9341.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "844d53c866bbea932dd6c89ab444bb7f882f0b6f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 2, + "d": 2 + }, + "environment.yml": { + "a": 3, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 6 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 6 + }, + "mne/viz/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 6 + }, + "requirements.txt": { + "a": 3, + "d": 3 + }, + "server_environment.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9344.json b/doc/sphinxext/prs/9344.json new file mode 100644 index 00000000000..242d98b6bde --- /dev/null +++ b/doc/sphinxext/prs/9344.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "b3cbe52713075f79087de3239133f25ad1606cf1", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 3 + }, + "mne/epochs.py": { + "a": 9, + "d": 7 + }, + "mne/io/base.py": { + "a": 6, + "d": 5 + }, + "mne/io/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9345.json b/doc/sphinxext/prs/9345.json new file mode 100644 index 00000000000..675099606d1 --- /dev/null +++ b/doc/sphinxext/prs/9345.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "307c99722201649873867ecb5ddda96384af1942", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/evoked/30_eeg_erp.py": { + "a": 286, + "d": 151 + }, + "tutorials/intro/15_inplace.py": { + "a": 16, + "d": 14 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 19, + "d": 17 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9346.json b/doc/sphinxext/prs/9346.json new file mode 100644 index 00000000000..8d65a971c10 --- /dev/null +++ b/doc/sphinxext/prs/9346.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8a7115b731df45c0b997f68822eb94bff94f18ce", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9347.json b/doc/sphinxext/prs/9347.json new file mode 100644 index 00000000000..3039f0242c3 --- /dev/null +++ b/doc/sphinxext/prs/9347.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "e9924c0a693b4d5f0788168967c9e02c1d59fda1", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 9 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 69, + "d": 14 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 65, + "d": 12 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 11, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 0 + }, + "tools/get_testing_version.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9348.json b/doc/sphinxext/prs/9348.json new file mode 100644 index 00000000000..8aa0032b094 --- /dev/null +++ b/doc/sphinxext/prs/9348.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "2d1e3dfe569c569bf0331b7faca41c483e8b8237", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "David JULIEN", + "e": "david.julien@ifsttar.fr" + }, + { + "n": "Romain Derollepot", + "e": "romain.derollepot@univ-eiffel.fr" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 4, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 96, + "d": 31 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 91, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/annotate_nan.py": { + "a": 35, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 30, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9349.json b/doc/sphinxext/prs/9349.json new file mode 100644 index 00000000000..8e6be3f9e8f --- /dev/null +++ b/doc/sphinxext/prs/9349.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "26abeb6b56cd1705d70acb09c804ff2c35bdb336", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/report.py": { + "a": 38, + "d": 13 + }, + "tutorials/intro/70_report.py": { + "a": 14, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/935.json b/doc/sphinxext/prs/935.json new file mode 100644 index 00000000000..6d15be9ae49 --- /dev/null +++ b/doc/sphinxext/prs/935.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d22d8640d4864cf0ec6c778c5c8a6edb255f8fc7", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + ".travis.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9351.json b/doc/sphinxext/prs/9351.json new file mode 100644 index 00000000000..cc7dfe21d9f --- /dev/null +++ b/doc/sphinxext/prs/9351.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "255092b586a7aaded68c311bac4258bd75374eb9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 21, + "d": 8 + }, + "mne/viz/backends/_qt.py": { + "a": 17, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9352.json b/doc/sphinxext/prs/9352.json new file mode 100644 index 00000000000..6bbe5ecd710 --- /dev/null +++ b/doc/sphinxext/prs/9352.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a352a8db3e6e57dddec4c0fb25c5dc31bd37c749", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".mailmap": { + "a": 27, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9353.json b/doc/sphinxext/prs/9353.json new file mode 100644 index 00000000000..f8d5280c4b4 --- /dev/null +++ b/doc/sphinxext/prs/9353.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "286d39c91e79542e79d7ef1707726e860400111d", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9356.json b/doc/sphinxext/prs/9356.json new file mode 100644 index 00000000000..4201ab0fec1 --- /dev/null +++ b/doc/sphinxext/prs/9356.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "96fffbb3f4796ea116fe1775abdaf30c35c88c22", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 36, + "d": 0 + }, + "doc/file_io.rst": { + "a": 0, + "d": 1 + }, + "doc/inverse.rst": { + "a": 0, + "d": 1 + }, + "mne/__init__.py": { + "a": 0, + "d": 4 + }, + "mne/beamformer/__init__.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_dics.py": { + "a": 5, + "d": 339 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 148 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 8 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 10 + }, + "mne/preprocessing/tests/test_eog.py": { + "a": 0, + "d": 6 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 37 + }, + "mne/viz/tests/test_ica.py": { + "a": 0, + "d": 10 + }, + "mne/viz/tests/test_topomap.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9357.json b/doc/sphinxext/prs/9357.json new file mode 100644 index 00000000000..7380bbb060d --- /dev/null +++ b/doc/sphinxext/prs/9357.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bc49d19a824d431aad3b1e8c6eacd93411fb9a11", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "environment.yml": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9358.json b/doc/sphinxext/prs/9358.json new file mode 100644 index 00000000000..f8cbdcccd14 --- /dev/null +++ b/doc/sphinxext/prs/9358.json @@ -0,0 +1,575 @@ +{ + "merge_commit_sha": "f1e6097c36e7232514d67d4b04af568e36ac5758", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_old.yml": { + "a": 3, + "d": 3 + }, + "README.rst": { + "a": 11, + "d": 10 + }, + "doc/_templates/layout.html": { + "a": 0, + "d": 13 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "doc/overview/matlab.rst": { + "a": 3, + "d": 2 + }, + "doc/overview/roadmap.rst": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 3, + "d": 5 + }, + "mne/beamformer/tests/test_external.py": { + "a": 0, + "d": 4 + }, + "mne/bem.py": { + "a": 5, + "d": 6 + }, + "mne/channels/_dig_montage_utils.py": { + "a": 1, + "d": 9 + }, + "mne/channels/montage.py": { + "a": 2, + "d": 18 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 6 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 0, + "d": 4 + }, + "mne/channels/tests/test_layout.py": { + "a": 8, + "d": 13 + }, + "mne/channels/tests/test_montage.py": { + "a": 7, + "d": 9 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 3, + "d": 3 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 4, + "d": 4 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 4 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/coreg.py": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 2, + "d": 9 + }, + "mne/decoding/base.py": { + "a": 2, + "d": 6 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 4 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 4 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 4 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/fixes.py": { + "a": 20, + "d": 76 + }, + "mne/forward/_field_interpolation.py": { + "a": 15, + "d": 33 + }, + "mne/forward/_lead_dots.py": { + "a": 9, + "d": 10 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 2 + }, + "mne/forward/tests/test_field_interpolation.py": { + "a": 1, + "d": 5 + }, + "mne/forward/tests/test_forward.py": { + "a": 1, + "d": 5 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 5 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 1, + "d": 5 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 1, + "d": 4 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 1, + "d": 5 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 1, + "d": 5 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 1, + "d": 5 + }, + "mne/inverse_sparse/tests/test_gamma_map.py": { + "a": 1, + "d": 4 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 4 + }, + "mne/io/_digitization.py": { + "a": 0, + "d": 2 + }, + "mne/io/array/tests/test_array.py": { + "a": 0, + "d": 4 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 2, + "d": 6 + }, + "mne/io/base.py": { + "a": 1, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 4 + }, + "mne/io/bti/bti.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 4 + }, + "mne/io/cnt/cnt.py": { + "a": 0, + "d": 3 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 1, + "d": 5 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 5 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 1, + "d": 4 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 0, + "d": 4 + }, + "mne/io/eeglab/tests/_utils.py": { + "a": 0, + "d": 38 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 2, + "d": 2 + }, + "mne/io/eximia/tests/test_eximia.py": { + "a": 0, + "d": 4 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 2, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 0, + "d": 1 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 0, + "d": 4 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 0, + "d": 4 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 4, + "d": 9 + }, + "mne/io/proc_history.py": { + "a": 1, + "d": 2 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 1, + "d": 4 + }, + "mne/io/tests/test_compensator.py": { + "a": 1, + "d": 4 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_pick.py": { + "a": 1, + "d": 4 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 2 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 5 + }, + "mne/io/tests/test_what.py": { + "a": 1, + "d": 4 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 14, + "d": 15 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 0, + "d": 4 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/tests/test_time_frequency.py": { + "a": 0, + "d": 4 + }, + "mne/morph.py": { + "a": 4, + "d": 2 + }, + "mne/morph_map.py": { + "a": 232, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/tests/test_eeglab_infomax.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 3, + "d": 6 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/tests/test_peak_finder.py": { + "a": 0, + "d": 4 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 1, + "d": 2 + }, + "mne/report.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/raw.py": { + "a": 3, + "d": 4 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 4 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 0, + "d": 4 + }, + "mne/simulation/tests/test_source.py": { + "a": 1, + "d": 4 + }, + "mne/source_estimate.py": { + "a": 11, + "d": 15 + }, + "mne/stats/tests/test_permutations.py": { + "a": 1, + "d": 4 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 4 + }, + "mne/surface.py": { + "a": 11, + "d": 225 + }, + "mne/tests/test_annotations.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_coreg.py": { + "a": 1, + "d": 5 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_event.py": { + "a": 0, + "d": 4 + }, + "mne/tests/test_evoked.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_filter.py": { + "a": 1, + "d": 5 + }, + "mne/tests/test_label.py": { + "a": 7, + "d": 11 + }, + "mne/tests/test_line_endings.py": { + "a": 3, + "d": 6 + }, + "mne/tests/test_misc.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_morph_map.py": { + "a": 60, + "d": 0 + }, + "mne/tests/test_proj.py": { + "a": 0, + "d": 4 + }, + "mne/tests/test_read_vectorview_selection.py": { + "a": 0, + "d": 4 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 5, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_surface.py": { + "a": 13, + "d": 57 + }, + "mne/tests/test_transforms.py": { + "a": 30, + "d": 34 + }, + "mne/time_frequency/_stockwell.py": { + "a": 0, + "d": 1 + }, + "mne/time_frequency/tests/test_ar.py": { + "a": 1, + "d": 4 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 5, + "d": 8 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 0, + "d": 4 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 4, + "d": 7 + }, + "mne/transforms.py": { + "a": 2, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 4 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 28 + }, + "mne/utils/check.py": { + "a": 1, + "d": 11 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "mne/viz/_figure.py": { + "a": 1, + "d": 2 + }, + "mne/viz/backends/_abstract.py": { + "a": 3, + "d": 2 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 1, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 7 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 4 + }, + "mne/viz/tests/test_ica.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_topo.py": { + "a": 0, + "d": 4 + }, + "mne/viz/tests/test_utils.py": { + "a": 0, + "d": 4 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 3 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_test.sh": { + "a": 0, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9359.json b/doc/sphinxext/prs/9359.json new file mode 100644 index 00000000000..347283d5e2b --- /dev/null +++ b/doc/sphinxext/prs/9359.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "61349981d3de5d749043e604790422b6754081b8", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "tutorials/evoked/20_visualize_evoked.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 96, + "d": 84 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 38, + "d": 40 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 3, + "d": 5 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9361.json b/doc/sphinxext/prs/9361.json new file mode 100644 index 00000000000..1c88c53f848 --- /dev/null +++ b/doc/sphinxext/prs/9361.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a3655f33e38d9703fe51b90d39d880557a2fc168", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 0, + "d": 16 + }, + ".github/workflows/compat_old.yml": { + "a": 0, + "d": 17 + }, + ".github/workflows/linux_conda.yml": { + "a": 0, + "d": 16 + }, + ".github/workflows/linux_pip.yml": { + "a": 0, + "d": 16 + }, + ".github/workflows/macos_conda.yml": { + "a": 0, + "d": 16 + }, + "doc/install/contributing.rst": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9362.json b/doc/sphinxext/prs/9362.json new file mode 100644 index 00000000000..06331e6ceda --- /dev/null +++ b/doc/sphinxext/prs/9362.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "7e241cce1f78d1a21a0607ae9e2186e7f964e402", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/install/advanced.rst": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9363.json b/doc/sphinxext/prs/9363.json new file mode 100644 index 00000000000..6a6df1e142c --- /dev/null +++ b/doc/sphinxext/prs/9363.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "674b8bd7f832d765266e048762074bc25cd100da", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/evoked/30_eeg_erp.py": { + "a": 8, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9365.json b/doc/sphinxext/prs/9365.json new file mode 100644 index 00000000000..dbfcdf0bcd3 --- /dev/null +++ b/doc/sphinxext/prs/9365.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "68f1ec9670fdb38fd19020df4708c35c280bbc90", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/stats/_adjacency.py": { + "a": 24, + "d": 9 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9366.json b/doc/sphinxext/prs/9366.json new file mode 100644 index 00000000000..3b764aafe20 --- /dev/null +++ b/doc/sphinxext/prs/9366.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4ea049e5e825eec545b9aac4acf5156da6da5f2d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report.py": { + "a": 1, + "d": 2 + }, + "tutorials/intro/70_report.py": { + "a": 16, + "d": 14 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9367.json b/doc/sphinxext/prs/9367.json new file mode 100644 index 00000000000..2c9248b19c0 --- /dev/null +++ b/doc/sphinxext/prs/9367.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "cb3c34f9a32fdc78777e2e4a0ecf0f7258860182", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/data/html_templates.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9368.json b/doc/sphinxext/prs/9368.json new file mode 100644 index 00000000000..f156e49f15a --- /dev/null +++ b/doc/sphinxext/prs/9368.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f3ff4f795a0248e757d025528ae80d3b02a73d78", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/nirx/nirx.py": { + "a": 3, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 9, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9369.json b/doc/sphinxext/prs/9369.json new file mode 100644 index 00000000000..bcf99d80f9e --- /dev/null +++ b/doc/sphinxext/prs/9369.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "8ccc3999da6c15efa03840230c13aeb7bab5618d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.23.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 39, + "d": 9 + }, + "mne/simulation/raw.py": { + "a": 2, + "d": 2 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 33, + "d": 7 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 0 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9375.json b/doc/sphinxext/prs/9375.json new file mode 100644 index 00000000000..03b34aadba8 --- /dev/null +++ b/doc/sphinxext/prs/9375.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3149819463d4690146b3f038dca8441157abb36b", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 11, + "d": 10 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 19, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9376.json b/doc/sphinxext/prs/9376.json new file mode 100644 index 00000000000..09d01ec353b --- /dev/null +++ b/doc/sphinxext/prs/9376.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a68e3dbf816946ba357e202d9f3d9dfe2a558636", + "authors": [ + { + "n": "mdovgialo", + "e": "marian.dowgialo@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/source_space.rst": { + "a": 1, + "d": 0 + }, + "mne/label.py": { + "a": 55, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 20, + "d": 2 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9378.json b/doc/sphinxext/prs/9378.json new file mode 100644 index 00000000000..f96629407f0 --- /dev/null +++ b/doc/sphinxext/prs/9378.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "908f52a6e2c98308b4860e724ea3daa0a1d7ee3a", + "authors": [ + { + "n": "Jan Sosulski", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/938.json b/doc/sphinxext/prs/938.json new file mode 100644 index 00000000000..fa660e58f17 --- /dev/null +++ b/doc/sphinxext/prs/938.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "dc015f8f2500751a28f3404c880c74561633c7f4", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 3, + "d": 3 + }, + "mne/gui/_coreg_gui.py": { + "a": 8, + "d": 5 + }, + "mne/gui/_file_traits.py": { + "a": 3, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9381.json b/doc/sphinxext/prs/9381.json new file mode 100644 index 00000000000..43f5c1a4a84 --- /dev/null +++ b/doc/sphinxext/prs/9381.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6afc7224de9e7423e57604ca307941cf0b3449f1", + "authors": [ + { + "n": "Xiaokai Xia", + "e": "xia@xiaokai.me" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/curry/curry.py": { + "a": 9, + "d": 6 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9384.json b/doc/sphinxext/prs/9384.json new file mode 100644 index 00000000000..ea948b566d4 --- /dev/null +++ b/doc/sphinxext/prs/9384.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5e2a6d8eb0edefe3c46a5b3250a3a546224d1ce2", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9386.json b/doc/sphinxext/prs/9386.json new file mode 100644 index 00000000000..508fd99d667 --- /dev/null +++ b/doc/sphinxext/prs/9386.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e8727cb016d034263a4f17e68e12b1ebf5dacc8d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 2, + "d": 2 + }, + "mne/dipole.py": { + "a": 2, + "d": 1 + }, + "mne/fixes.py": { + "a": 30, + "d": 0 + }, + "mne/io/reference.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9387.json b/doc/sphinxext/prs/9387.json new file mode 100644 index 00000000000..19cfa7149b5 --- /dev/null +++ b/doc/sphinxext/prs/9387.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "ca991d8f6ec9121c6da00147baa961c04c74fa2a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/install/index.rst": { + "a": 1, + "d": 1 + }, + "doc/install/mne_python.rst": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9388.json b/doc/sphinxext/prs/9388.json new file mode 100644 index 00000000000..4049873d42c --- /dev/null +++ b/doc/sphinxext/prs/9388.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "ed64dc7bd0750bab2e99a1cfcbce2928eb37e202", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "CONTRIBUTING.rst": { + "a": 1, + "d": 1 + }, + "doc/_includes/institutional-partners.rst": { + "a": 41, + "d": 0 + }, + "doc/_static/institution_logos/CHOP.svg": { + "a": 1, + "d": 0 + }, + "doc/_static/institution_logos/MPIB.svg": { + "a": 3, + "d": 0 + }, + "doc/_static/institution_logos/Macquarie.png": { + "a": 0, + "d": 0 + }, + "doc/_static/institution_logos/SWPS.svg": { + "a": 151, + "d": 0 + }, + "doc/_templates/version-switcher.html": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 18, + "d": 1 + }, + "doc/funding.rst": { + "a": 19, + "d": 23 + }, + "doc/glossary.rst": { + "a": 5, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 3, + "d": 3 + }, + "doc/links.inc": { + "a": 10, + "d": 5 + }, + "doc/overview/development.rst": { + "a": 1, + "d": 0 + }, + "doc/overview/governance.rst": { + "a": 314, + "d": 0 + }, + "doc/overview/people.rst": { + "a": 44, + "d": 0 + }, + "doc/overview/roadmap.rst": { + "a": 140, + "d": 121 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/939.json b/doc/sphinxext/prs/939.json new file mode 100644 index 00000000000..6fec6e29864 --- /dev/null +++ b/doc/sphinxext/prs/939.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "948584a6662cb82d7018aeb81e6f488328e387df", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9390.json b/doc/sphinxext/prs/9390.json new file mode 100644 index 00000000000..3ee6be52563 --- /dev/null +++ b/doc/sphinxext/prs/9390.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e0ec4a0de8d9722f2f8488797bafdf9334cb4b0a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tools/generate_codemeta.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9391.json b/doc/sphinxext/prs/9391.json new file mode 100644 index 00000000000..b22f7bfa41b --- /dev/null +++ b/doc/sphinxext/prs/9391.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "5bc18f950aa4fec908873c528f68fcfc63f48f16", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/reading_raw_data.rst": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 6, + "d": 0 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/io/hitachi/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/io/hitachi/hitachi.py": { + "a": 283, + "d": 0 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 274, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 5, + "d": 3 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 18, + "d": 26 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 13, + "d": 0 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9392.json b/doc/sphinxext/prs/9392.json new file mode 100644 index 00000000000..b7aef5a35e5 --- /dev/null +++ b/doc/sphinxext/prs/9392.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8aeb4ac07c9b2a2694badb33656fa0b510c8bbcd", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "README.rst": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9394.json b/doc/sphinxext/prs/9394.json new file mode 100644 index 00000000000..e2bbc2b0d81 --- /dev/null +++ b/doc/sphinxext/prs/9394.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "323ab3fd683d81be55bf328d7c2bc8083c4ec104", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/data/coil_def.dat": { + "a": 64, + "d": 8 + }, + "mne/io/constants.py": { + "a": 10, + "d": 0 + }, + "mne/io/tests/test_constants.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9396.json b/doc/sphinxext/prs/9396.json new file mode 100644 index 00000000000..4473d191a06 --- /dev/null +++ b/doc/sphinxext/prs/9396.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "052ff0131250e68bf2869cbe088f70b29dbbf084", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 7, + "d": 8 + }, + "mne/morph.py": { + "a": 8, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 26, + "d": 4 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 30, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9398.json b/doc/sphinxext/prs/9398.json new file mode 100644 index 00000000000..29baccb044d --- /dev/null +++ b/doc/sphinxext/prs/9398.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c56228da616ae3fc3c6f900d37fed9e52e5714f1", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 10, + "d": 4 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9399.json b/doc/sphinxext/prs/9399.json new file mode 100644 index 00000000000..8bcc728d62c --- /dev/null +++ b/doc/sphinxext/prs/9399.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f1ea915b3f27349411d5c6231d7223e4b46445c6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/epochs.py": { + "a": 4, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 22, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9401.json b/doc/sphinxext/prs/9401.json new file mode 100644 index 00000000000..6bc7018f83e --- /dev/null +++ b/doc/sphinxext/prs/9401.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "b3f4b05b3933a3b73458205c7e786166f90ad1f7", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 0, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 112, + "d": 54 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 94, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 30, + "d": 6 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 84, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9403.json b/doc/sphinxext/prs/9403.json new file mode 100644 index 00000000000..ce59af4bcd7 --- /dev/null +++ b/doc/sphinxext/prs/9403.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ddacc565962bab96d163e2cdb790304febf22582", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/data/html_templates.py": { + "a": 7, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 8, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 8, + "d": 0 + }, + "tutorials/intro/30_info.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9404.json b/doc/sphinxext/prs/9404.json new file mode 100644 index 00000000000..d80ad181174 --- /dev/null +++ b/doc/sphinxext/prs/9404.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "22c570577840eaa510af06278e687fda67515801", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/data/html_templates.py": { + "a": 2, + "d": 0 + }, + "mne/io/base.py": { + "a": 3, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9405.json b/doc/sphinxext/prs/9405.json new file mode 100644 index 00000000000..ddcac88bd58 --- /dev/null +++ b/doc/sphinxext/prs/9405.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4de23ac16f120442d5fd322436ce6e746a94236e", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/overview/people.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9406.json b/doc/sphinxext/prs/9406.json new file mode 100644 index 00000000000..ae711ca2147 --- /dev/null +++ b/doc/sphinxext/prs/9406.json @@ -0,0 +1,59 @@ +{ + "merge_commit_sha": "8702d0e25f737070315f59a395b441662c7911e0", + "authors": [ + { + "n": "Evan Hathaway", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/export.rst": { + "a": 2, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/export/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/export/_egimff.py": { + "a": 150, + "d": 0 + }, + "mne/export/_export.py": { + "a": 59, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 83, + "d": 3 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 19, + "d": 10 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 3, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9407.json b/doc/sphinxext/prs/9407.json new file mode 100644 index 00000000000..68d586dafdb --- /dev/null +++ b/doc/sphinxext/prs/9407.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "65f74fa1c4609d7e5219bca2cc3b840650cf978f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 10, + "d": 7 + }, + "mne/surface.py": { + "a": 29, + "d": 20 + }, + "mne/tests/test_coreg.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_source_space.py": { + "a": 6, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 12, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 60, + "d": 34 + }, + "mne/viz/_brain/surface.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 21, + "d": 6 + }, + "mne/viz/backends/_abstract.py": { + "a": 6, + "d": 4 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 5, + "d": 4 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9408.json b/doc/sphinxext/prs/9408.json new file mode 100644 index 00000000000..7a1b697d798 --- /dev/null +++ b/doc/sphinxext/prs/9408.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "81034518a5d96db2905d9230175c46991404271e", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tutorials/io/30_reading_fnirs_data.py": { + "a": 66, + "d": 22 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9411.json b/doc/sphinxext/prs/9411.json new file mode 100644 index 00000000000..e2cdcc0c4f1 --- /dev/null +++ b/doc/sphinxext/prs/9411.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "9a56c47d5bed3e6f98c9940d2611468d4b90d5d0", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9412.json b/doc/sphinxext/prs/9412.json new file mode 100644 index 00000000000..f7fa0fa3de8 --- /dev/null +++ b/doc/sphinxext/prs/9412.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d8768e2c0b4fbd65f92c9c7d625b03f4ec171d34", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9414.json b/doc/sphinxext/prs/9414.json new file mode 100644 index 00000000000..ef089869c12 --- /dev/null +++ b/doc/sphinxext/prs/9414.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0079f5b2973b979743ac97d9ba8ea882e50e3548", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9416.json b/doc/sphinxext/prs/9416.json new file mode 100644 index 00000000000..f1cc123b5df --- /dev/null +++ b/doc/sphinxext/prs/9416.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "162368a48429c848e083072a9ab39205928e9e22", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "tutorials/io/30_reading_fnirs_data.py": { + "a": 47, + "d": 48 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9419.json b/doc/sphinxext/prs/9419.json new file mode 100644 index 00000000000..afe774d1748 --- /dev/null +++ b/doc/sphinxext/prs/9419.json @@ -0,0 +1,95 @@ +{ + "merge_commit_sha": "bd644550776f9cf343118deb6551237c06b6ebed", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/io/base.py": { + "a": 6, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 4, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 74, + "d": 9 + }, + "mne/viz/epochs.py": { + "a": 1, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 7, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 6, + "d": 4 + }, + "mne/viz/tests/test_epochs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_raw.py": { + "a": 18, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/942.json b/doc/sphinxext/prs/942.json new file mode 100644 index 00000000000..18cc911e030 --- /dev/null +++ b/doc/sphinxext/prs/942.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "40b9ce0d74da647960f7ca25c6507e204461dd79", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/event.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9420.json b/doc/sphinxext/prs/9420.json new file mode 100644 index 00000000000..c5980c3c73c --- /dev/null +++ b/doc/sphinxext/prs/9420.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "208968d83181ef55af6f31da82ae02d67b1e321c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/egi/egimff.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9422.json b/doc/sphinxext/prs/9422.json new file mode 100644 index 00000000000..41725578030 --- /dev/null +++ b/doc/sphinxext/prs/9422.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3384bbcc12595a89f2ceaa1f0c12abc4af920687", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/tests/test_notebook.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9423.json b/doc/sphinxext/prs/9423.json new file mode 100644 index 00000000000..aae76686c24 --- /dev/null +++ b/doc/sphinxext/prs/9423.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4ecca868906479eba9f90428b616e4408e781a35", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 2, + "d": 3 + }, + "tools/github_actions_dependencies.sh": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9424.json b/doc/sphinxext/prs/9424.json new file mode 100644 index 00000000000..5bd6bcee231 --- /dev/null +++ b/doc/sphinxext/prs/9424.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "cd2fd28134826438c4f308fb1c90f0f1ab4215a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/label.py": { + "a": 114, + "d": 44 + }, + "mne/surface.py": { + "a": 8, + "d": 0 + }, + "mne/tests/test_dipole.py": { + "a": 4, + "d": 0 + }, + "mne/tests/test_label.py": { + "a": 59, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 11, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 16, + "d": 22 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 12, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9426.json b/doc/sphinxext/prs/9426.json new file mode 100644 index 00000000000..9acffc7985a --- /dev/null +++ b/doc/sphinxext/prs/9426.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3b69e9b936ee01fcb357e732ee35cb75086a6bf8", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 12 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9427.json b/doc/sphinxext/prs/9427.json new file mode 100644 index 00000000000..82ca7d2c392 --- /dev/null +++ b/doc/sphinxext/prs/9427.json @@ -0,0 +1,83 @@ +{ + "merge_commit_sha": "abfe37b2c046375bebb210cd711025c455068a3e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/export.rst": { + "a": 17, + "d": 0 + }, + "doc/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 3, + "d": 0 + }, + "mne/epochs.py": { + "a": 4, + "d": 35 + }, + "mne/export/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/export/_eeglab.py": { + "a": 69, + "d": 0 + }, + "mne/export/_export.py": { + "a": 121, + "d": 0 + }, + "mne/export/tests/test_export.py": { + "a": 64, + "d": 0 + }, + "mne/io/base.py": { + "a": 4, + "d": 39 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 23 + }, + "mne/io/utils.py": { + "a": 0, + "d": 21 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 29 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 0, + "d": 43 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9430.json b/doc/sphinxext/prs/9430.json new file mode 100644 index 00000000000..3849e6ae25e --- /dev/null +++ b/doc/sphinxext/prs/9430.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "ea1212dfb044f138bf2cd2b290483e28fd68b2fd", + "authors": [ + { + "n": "PAB", + "e": null + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@macbook-pro-de-pierre-antoine.home" + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@mbpdepieantoine.home" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 6, + "d": 2 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 5, + "d": 5 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 209, + "d": 19 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 26, + "d": 9 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 81, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9431.json b/doc/sphinxext/prs/9431.json new file mode 100644 index 00000000000..6df55a2c497 --- /dev/null +++ b/doc/sphinxext/prs/9431.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f42f61284c280145c4f396aa3e9fa9ed504d7114", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/interpolation.py": { + "a": 2, + "d": 4 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9434.json b/doc/sphinxext/prs/9434.json new file mode 100644 index 00000000000..688385fcb6e --- /dev/null +++ b/doc/sphinxext/prs/9434.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e6a029802bb2174eaee21f0ab544b4b64c71c1da", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg_gui.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9435.json b/doc/sphinxext/prs/9435.json new file mode 100644 index 00000000000..31b5bda9682 --- /dev/null +++ b/doc/sphinxext/prs/9435.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f3fa1ca666419893def1635f16065f502739604b", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9437.json b/doc/sphinxext/prs/9437.json new file mode 100644 index 00000000000..f4a6015c544 --- /dev/null +++ b/doc/sphinxext/prs/9437.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "52ea86beac5df90eb067776cf8226b72ae6df42f", + "authors": [ + { + "n": "Fede Raimondo", + "e": "f.raimondo@fz-juelich.de" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 98, + "d": 26 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9438.json b/doc/sphinxext/prs/9438.json new file mode 100644 index 00000000000..a9155d175ef --- /dev/null +++ b/doc/sphinxext/prs/9438.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "3b85ebb33c6f1f54671ee129c64ea6836fc874d1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 16, + "d": 39 + }, + "mne/evoked.py": { + "a": 13, + "d": 6 + }, + "mne/io/base.py": { + "a": 10, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 19, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 50, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9440.json b/doc/sphinxext/prs/9440.json new file mode 100644 index 00000000000..e115f4e136c --- /dev/null +++ b/doc/sphinxext/prs/9440.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "e009e5c538c70df4b2dd7e9d3b8315437ef71413", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/connectivity/mne_inverse_envelope_correlation.py": { + "a": 12, + "d": 10 + }, + "examples/connectivity/mne_inverse_envelope_correlation_volume.py": { + "a": 4, + "d": 3 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 13, + "d": 7 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 1, + "d": 0 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 6, + "d": 7 + }, + "mne/label.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 4, + "d": 3 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 7, + "d": 13 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 4 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9442.json b/doc/sphinxext/prs/9442.json new file mode 100644 index 00000000000..6f6691c6009 --- /dev/null +++ b/doc/sphinxext/prs/9442.json @@ -0,0 +1,107 @@ +{ + "merge_commit_sha": "deec72a2d14c3ecacc0f7d67b1a71593cb3ed0ce", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/0.1.inc": { + "a": 18, + "d": 0 + }, + "doc/changes/0.10.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.11.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.12.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.13.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.14.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.15.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.16.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.17.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.18.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.19.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.2.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.20.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.21.inc": { + "a": 4, + "d": 4 + }, + "doc/changes/0.22.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.23.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.3.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.4.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.5.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.6.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.7.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.8.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/0.9.inc": { + "a": 2, + "d": 2 + }, + "doc/whats_new.rst": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9443.json b/doc/sphinxext/prs/9443.json new file mode 100644 index 00000000000..6cc004133e2 --- /dev/null +++ b/doc/sphinxext/prs/9443.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "04fd78be051ec4fa193d29e532fd36ba8bac5408", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/_read_raw.py": { + "a": 2, + "d": 1 + }, + "mne/report.py": { + "a": 3, + "d": 2 + }, + "mne/tests/test_report.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9444.json b/doc/sphinxext/prs/9444.json new file mode 100644 index 00000000000..8242be54a2b --- /dev/null +++ b/doc/sphinxext/prs/9444.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a0d5d76ee35d6a59120f91500dddfda05b02da6d", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 19, + "d": 19 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9445.json b/doc/sphinxext/prs/9445.json new file mode 100644 index 00000000000..d474f4da00f --- /dev/null +++ b/doc/sphinxext/prs/9445.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "07af2f95fa92427120575f923b588160046a7ca3", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 218, + "d": 2 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 138, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 90, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9447.json b/doc/sphinxext/prs/9447.json new file mode 100644 index 00000000000..350fd7c381f --- /dev/null +++ b/doc/sphinxext/prs/9447.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6953e748bad6cf4c55df71c1fb6bb702c0dd35fc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 163, + "d": 151 + }, + "mne/channels/tests/test_montage.py": { + "a": 36, + "d": 1 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 25, + "d": 66 + }, + "mne/io/fieldtrip/utils.py": { + "a": 23, + "d": 55 + }, + "mne/utils/numerics.py": { + "a": 17, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9448.json b/doc/sphinxext/prs/9448.json new file mode 100644 index 00000000000..01e87438562 --- /dev/null +++ b/doc/sphinxext/prs/9448.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b05983ea589836c6f63ef665fcf12df659fc8930", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 40, + "d": 31 + }, + "mne/utils/misc.py": { + "a": 6, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 15, + "d": 7 + }, + "mne/viz/backends/_qt.py": { + "a": 5, + "d": 0 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 0, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9449.json b/doc/sphinxext/prs/9449.json new file mode 100644 index 00000000000..fa8934761ed --- /dev/null +++ b/doc/sphinxext/prs/9449.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0d5068dbbce76598c9c6433e1ace1265e1955564", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/time_frequency/multitaper.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9450.json b/doc/sphinxext/prs/9450.json new file mode 100644 index 00000000000..5ea5e00ab52 --- /dev/null +++ b/doc/sphinxext/prs/9450.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a759bb35cb08cc5b5eb9e7fc177f12bcdc3b86db", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/CONTRIBUTING.md": { + "a": 2, + "d": 2 + }, + "CONTRIBUTING.md": { + "a": 9, + "d": 0 + }, + "CONTRIBUTING.rst": { + "a": 0, + "d": 21 + }, + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "doc/overview/development.rst": { + "a": 18, + "d": 12 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9453.json b/doc/sphinxext/prs/9453.json new file mode 100644 index 00000000000..c897390e615 --- /dev/null +++ b/doc/sphinxext/prs/9453.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "903f66aabd769db88a7ba7973abd624ed666695a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 2 + }, + ".github/workflows/circle_artifacts.yml": { + "a": 1, + "d": 0 + }, + ".gitignore": { + "a": 1, + "d": 0 + }, + "azure-pipelines.yml": { + "a": 27, + "d": 18 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9456.json b/doc/sphinxext/prs/9456.json new file mode 100644 index 00000000000..eec89175e2c --- /dev/null +++ b/doc/sphinxext/prs/9456.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "43777569587b7ba5f07210eabe72ab1e87226576", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + ".github/ISSUE_TEMPLATE/question.md": { + "a": 0, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9458.json b/doc/sphinxext/prs/9458.json new file mode 100644 index 00000000000..806d20682bc --- /dev/null +++ b/doc/sphinxext/prs/9458.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c0d431acf0307a8bf92ab01995397194563f6920", + "authors": [ + { + "n": "Ross", + "e": "ross.maddox@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9459.json b/doc/sphinxext/prs/9459.json new file mode 100644 index 00000000000..01a0fef09c5 --- /dev/null +++ b/doc/sphinxext/prs/9459.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8d9b3521bd1a06847304fed9b8f36277f05d4607", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9460.json b/doc/sphinxext/prs/9460.json new file mode 100644 index 00000000000..01d0e522421 --- /dev/null +++ b/doc/sphinxext/prs/9460.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "651720ea6954f458332d03f609c91c9c67bc8cab", + "authors": [ + { + "n": "Ryan M.C. Law", + "e": "ryan.law@mrc-cbu.cam.ac.uk" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Ryan Law", + "e": "ryalaw@dccn.nl" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/ctf/ctf.py": { + "a": 5, + "d": 0 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9461.json b/doc/sphinxext/prs/9461.json new file mode 100644 index 00000000000..3ccc0a1da08 --- /dev/null +++ b/doc/sphinxext/prs/9461.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8432c29c193d96f13f521b5059a1dd616c942eaa", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/snirf/_snirf.py": { + "a": 11, + "d": 10 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 18, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9463.json b/doc/sphinxext/prs/9463.json new file mode 100644 index 00000000000..b6ab97611a4 --- /dev/null +++ b/doc/sphinxext/prs/9463.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "c40056c11023ff2d57b4e7bd16904deda280880a", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/forward.rst": { + "a": 1, + "d": 0 + }, + "mne/bem.py": { + "a": 102, + "d": 2 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 10, + "d": 85 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9464.json b/doc/sphinxext/prs/9464.json new file mode 100644 index 00000000000..0b5550a6f0f --- /dev/null +++ b/doc/sphinxext/prs/9464.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "51f25d33231e1e05c0f54e05d0e9f8c84d3603d6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9465.json b/doc/sphinxext/prs/9465.json new file mode 100644 index 00000000000..35166be4216 --- /dev/null +++ b/doc/sphinxext/prs/9465.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "6d7dadd73d1243bf650985aca0e7123017653da9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/check.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 5, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 16, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9467.json b/doc/sphinxext/prs/9467.json new file mode 100644 index 00000000000..8676da72cf5 --- /dev/null +++ b/doc/sphinxext/prs/9467.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "66d839032bd66c0fd1d6f2761509b38ee88e5ec9", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/tests/test_config.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9468.json b/doc/sphinxext/prs/9468.json new file mode 100644 index 00000000000..798af005c2e --- /dev/null +++ b/doc/sphinxext/prs/9468.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "b6a36089a963a97380488845491fe38e8d3b610b", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 31, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 51, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/947.json b/doc/sphinxext/prs/947.json new file mode 100644 index 00000000000..8fb02f007d7 --- /dev/null +++ b/doc/sphinxext/prs/947.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bb5fcd25fca2abb16f34c0cad5dc57ce65b03c57", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "doc/source/python_reference.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 111, + "d": 44 + }, + "mne/gui/_coreg_gui.py": { + "a": 18, + "d": 7 + }, + "mne/gui/_fiducials_gui.py": { + "a": 1, + "d": 0 + }, + "mne/gui/_file_traits.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9471.json b/doc/sphinxext/prs/9471.json new file mode 100644 index 00000000000..bae8b9e0f57 --- /dev/null +++ b/doc/sphinxext/prs/9471.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ad443737b81949a79ff8330c95705ae90aaafefc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9472.json b/doc/sphinxext/prs/9472.json new file mode 100644 index 00000000000..4c72ef81ffc --- /dev/null +++ b/doc/sphinxext/prs/9472.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "dfc673936734a418d1b2d23380c591e551f6136c", + "authors": [ + { + "n": "Lukas Hecker", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/install/pre_install.rst": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9474.json b/doc/sphinxext/prs/9474.json new file mode 100644 index 00000000000..7cebe840175 --- /dev/null +++ b/doc/sphinxext/prs/9474.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "fa6738da08480ed5142851b0dcf38e78d3eb2fee", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/montage.py": { + "a": 12, + "d": 5 + }, + "mne/channels/tests/test_montage.py": { + "a": 7, + "d": 0 + }, + "mne/io/meas_info.py": { + "a": 7, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9476.json b/doc/sphinxext/prs/9476.json new file mode 100644 index 00000000000..10cad30bedc --- /dev/null +++ b/doc/sphinxext/prs/9476.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "25f60579aca14e879a98399d1c083c70f9ae1327", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9477.json b/doc/sphinxext/prs/9477.json new file mode 100644 index 00000000000..a0766984110 --- /dev/null +++ b/doc/sphinxext/prs/9477.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8b9616dfbc377ce468a6d52b43f7a973eac47cab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/overview/roadmap.rst": { + "a": 23, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9478.json b/doc/sphinxext/prs/9478.json new file mode 100644 index 00000000000..5373e757c16 --- /dev/null +++ b/doc/sphinxext/prs/9478.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f5460789ec67f59bdd21d0f29898d38683b7d7d4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 7, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/clinical/20_seeg.py": { + "a": 33, + "d": 9 + }, + "tutorials/clinical/30_ecog.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/948.json b/doc/sphinxext/prs/948.json new file mode 100644 index 00000000000..a7e3e5a3a20 --- /dev/null +++ b/doc/sphinxext/prs/948.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "117e322f75cf00cf6c2647a033de07a6d46a3395", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/epochs.py": { + "a": 15, + "d": 0 + }, + "mne/tests/test_epochs.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9480.json b/doc/sphinxext/prs/9480.json new file mode 100644 index 00000000000..5ab2a0d158c --- /dev/null +++ b/doc/sphinxext/prs/9480.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "fb7cf920c18a41d64aaa452bfb1b3bda5d9ae6a7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/_includes/inverse.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/0.13.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.21.inc": { + "a": 1, + "d": 1 + }, + "doc/overview/datasets_index.rst": { + "a": 1, + "d": 1 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 1, + "d": 1 + }, + "examples/io/README.txt": { + "a": 1, + "d": 1 + }, + "examples/io/read_neo_format.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 2, + "d": 2 + }, + "tutorials/clinical/30_ecog.py": { + "a": 2, + "d": 2 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 1, + "d": 1 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 7, + "d": 7 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9481.json b/doc/sphinxext/prs/9481.json new file mode 100644 index 00000000000..3d03ab23765 --- /dev/null +++ b/doc/sphinxext/prs/9481.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "080e7a879d325ad8f0c11fe28a8ff9f5983dfd2f", + "authors": [ + { + "n": "PAB", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@macbook-pro-de-pierre-antoine.home" + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@mbpdepieantoine.home" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@222-194.wifi-inria-saclay.saclay.inria.fr" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 15, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 94, + "d": 19 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 3, + "d": 3 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9484.json b/doc/sphinxext/prs/9484.json new file mode 100644 index 00000000000..a332dcd4663 --- /dev/null +++ b/doc/sphinxext/prs/9484.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "44f5ecb6b19d15bddd13582e489bd2a5a88827f6", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "doc/mri.rst": { + "a": 2, + "d": 0 + }, + "doc/references.bib": { + "a": 12, + "d": 0 + }, + "mne/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/morph.py": { + "a": 9, + "d": 2 + }, + "mne/surface.py": { + "a": 109, + "d": 0 + }, + "mne/tests/test_surface.py": { + "a": 27, + "d": 3 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 469, + "d": 0 + }, + "tutorials/clinical/20_seeg.py": { + "a": 3, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9485.json b/doc/sphinxext/prs/9485.json new file mode 100644 index 00000000000..4ce176921d5 --- /dev/null +++ b/doc/sphinxext/prs/9485.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "22b95895860ffe7cb907c0e8ac8929140c170a8c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 20, + "d": 12 + }, + "mne/tests/test_epochs.py": { + "a": 27, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9486.json b/doc/sphinxext/prs/9486.json new file mode 100644 index 00000000000..8d55ccd8eac --- /dev/null +++ b/doc/sphinxext/prs/9486.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b3768fe8347039e90d8e5a0bfd648315f9f345fa", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/datasets/_fsaverage/base.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9487.json b/doc/sphinxext/prs/9487.json new file mode 100644 index 00000000000..03463467adf --- /dev/null +++ b/doc/sphinxext/prs/9487.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "03affdcd7a7e706471aeb757932d465f93e8259e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9489.json b/doc/sphinxext/prs/9489.json new file mode 100644 index 00000000000..adfd39b9fb4 --- /dev/null +++ b/doc/sphinxext/prs/9489.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bdaa1d460201a3bc3cec95b67fc2b8d31a933652", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9490.json b/doc/sphinxext/prs/9490.json new file mode 100644 index 00000000000..485fb1188da --- /dev/null +++ b/doc/sphinxext/prs/9490.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f8a51e0afb6c2eba37b06a18eaaabf62ac98a6a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9491.json b/doc/sphinxext/prs/9491.json new file mode 100644 index 00000000000..9b33a6fe08c --- /dev/null +++ b/doc/sphinxext/prs/9491.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "869bf9e3ad78f3e7e5626351ad34cc0d5a25e62f", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9493.json b/doc/sphinxext/prs/9493.json new file mode 100644 index 00000000000..58753c1ec34 --- /dev/null +++ b/doc/sphinxext/prs/9493.json @@ -0,0 +1,139 @@ +{ + "merge_commit_sha": "702bdaccee9131798c68a82d81ad497c286aac4a", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Adam Li", + "e": "adam2392@Adams-MBP-2.home" + }, + { + "n": "Adam Li", + "e": "adam2392@new-host-2.home" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 34, + "d": 15 + }, + "doc/overview/datasets_index.rst": { + "a": 1, + "d": 1 + }, + "examples/connectivity/README.txt": { + "a": 4, + "d": 0 + }, + "examples/connectivity/cwt_sensor_connectivity.py": { + "a": 0, + "d": 85 + }, + "examples/connectivity/mixed_source_space_connectivity.py": { + "a": 0, + "d": 187 + }, + "examples/connectivity/mne_inverse_coherence_epochs.py": { + "a": 0, + "d": 149 + }, + "examples/connectivity/mne_inverse_connectivity_spectrum.py": { + "a": 0, + "d": 99 + }, + "examples/connectivity/mne_inverse_envelope_correlation.py": { + "a": 0, + "d": 110 + }, + "examples/connectivity/mne_inverse_envelope_correlation_volume.py": { + "a": 0, + "d": 95 + }, + "examples/connectivity/mne_inverse_label_connectivity.py": { + "a": 0, + "d": 181 + }, + "examples/connectivity/mne_inverse_psi_visual.py": { + "a": 0, + "d": 118 + }, + "examples/connectivity/sensor_connectivity.py": { + "a": 0, + "d": 57 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/connectivity/effective.py": { + "a": 7, + "d": 8 + }, + "mne/connectivity/envelope.py": { + "a": 3, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 3, + "d": 2 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 3, + "d": 1 + }, + "mne/connectivity/utils.py": { + "a": 4, + "d": 0 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 3, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 8, + "d": 1 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9494.json b/doc/sphinxext/prs/9494.json new file mode 100644 index 00000000000..2d6d282196c --- /dev/null +++ b/doc/sphinxext/prs/9494.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e4b16dc57a6a188aa06332b73d911e8131972522", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/backends/_pyvista.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9497.json b/doc/sphinxext/prs/9497.json new file mode 100644 index 00000000000..061da5f2ca7 --- /dev/null +++ b/doc/sphinxext/prs/9497.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "0af111f4238d735fc05a260b4347ddb9958f39f1", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9498.json b/doc/sphinxext/prs/9498.json new file mode 100644 index 00000000000..c5df12b9102 --- /dev/null +++ b/doc/sphinxext/prs/9498.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0a0f3cef00ccf61278530ded0b98d90b2a751d45", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/0.23.inc": { + "a": 2, + "d": 2 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9499.json b/doc/sphinxext/prs/9499.json new file mode 100644 index 00000000000..a7d304b2933 --- /dev/null +++ b/doc/sphinxext/prs/9499.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8c9c078e38cc4c7d67aa4e4f8a9b19eb32bb075e", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/io/meas_info.py": { + "a": 4, + "d": 5 + }, + "mne/io/tests/test_meas_info.py": { + "a": 2, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9500.json b/doc/sphinxext/prs/9500.json new file mode 100644 index 00000000000..4430f9c52fe --- /dev/null +++ b/doc/sphinxext/prs/9500.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "15168c50a362af6753851eeb2aff4ac06fa022b2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 7, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 18, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9504.json b/doc/sphinxext/prs/9504.json new file mode 100644 index 00000000000..4f2b4237b64 --- /dev/null +++ b/doc/sphinxext/prs/9504.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "9690573def6fd672c7a18bce5fe7ba94a5e64c01", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/surface.py": { + "a": 4, + "d": 0 + }, + "requirements_doc.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9505.json b/doc/sphinxext/prs/9505.json new file mode 100644 index 00000000000..8fb2a838c44 --- /dev/null +++ b/doc/sphinxext/prs/9505.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3078533f8704820a168797578f5b47ddff4be347", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 23, + "d": 10 + }, + "mne/viz/tests/test_epochs.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/951.json b/doc/sphinxext/prs/951.json new file mode 100644 index 00000000000..4d55c262052 --- /dev/null +++ b/doc/sphinxext/prs/951.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b3f227ca3b0f39d973db85e56bf0eb0b95dcfe2b", + "authors": [ + { + "n": "Praveen Sripad", + "e": "pravsripad@gmail.com" + } + ], + "changes": { + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 3, + "d": 3 + }, + "mne/commands/utils.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9512.json b/doc/sphinxext/prs/9512.json new file mode 100644 index 00000000000..48a5f1b71b7 --- /dev/null +++ b/doc/sphinxext/prs/9512.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f1ff361a6426513bba4110b122ab867dea44e012", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_conda.yml": { + "a": 2, + "d": 2 + }, + "azure-pipelines.yml": { + "a": 6, + "d": 6 + }, + "requirements.txt": { + "a": 4, + "d": 3 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9513.json b/doc/sphinxext/prs/9513.json new file mode 100644 index 00000000000..d806e8fb73e --- /dev/null +++ b/doc/sphinxext/prs/9513.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "99e39ef2a268da0b8f89db14858671d0314eafb0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/forward/forward.py": { + "a": 9, + "d": 5 + }, + "mne/simulation/evoked.py": { + "a": 7, + "d": 4 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9514.json b/doc/sphinxext/prs/9514.json new file mode 100644 index 00000000000..146ba7d3612 --- /dev/null +++ b/doc/sphinxext/prs/9514.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2e200aa7778a1462b4da6ce7bf4253b99cde3d6a", + "authors": [ + { + "n": "nordme", + "e": "nordme@uw.edu" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/realign.py": { + "a": 14, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9516.json b/doc/sphinxext/prs/9516.json new file mode 100644 index 00000000000..5b790c401d6 --- /dev/null +++ b/doc/sphinxext/prs/9516.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "f15663010c468a2abeda6ba44833e089374a2349", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 1 + }, + "doc/mri.rst": { + "a": 1, + "d": 0 + }, + "mne/coreg.py": { + "a": 645, + "d": 2 + }, + "mne/io/_digitization.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_meas_info.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 86, + "d": 7 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 82, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9517.json b/doc/sphinxext/prs/9517.json new file mode 100644 index 00000000000..634dda9b201 --- /dev/null +++ b/doc/sphinxext/prs/9517.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "630ab79374b9f71a978f6ddd15f1590ada5270fe", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 2, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9518.json b/doc/sphinxext/prs/9518.json new file mode 100644 index 00000000000..6835591e0ae --- /dev/null +++ b/doc/sphinxext/prs/9518.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "bdd7a12f94aadbfb547d97b02f4de1cc02197e33", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 6, + "d": 5 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 2, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 5, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9519.json b/doc/sphinxext/prs/9519.json new file mode 100644 index 00000000000..f2ca8203937 --- /dev/null +++ b/doc/sphinxext/prs/9519.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "4656e131bd04c85335531f475dffdb35388d0d4e", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 3, + "d": 2 + }, + "mne/viz/epochs.py": { + "a": 8, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 8, + "d": 2 + }, + "mne/viz/tests/test_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9521.json b/doc/sphinxext/prs/9521.json new file mode 100644 index 00000000000..c2c41ba2ff5 --- /dev/null +++ b/doc/sphinxext/prs/9521.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "9fb78760f0fa367db02574feb981978353f845e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alex", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 4 + }, + "doc/mri.rst": { + "a": 3, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/defaults.py": { + "a": 8, + "d": 1 + }, + "mne/morph.py": { + "a": 13, + "d": 94 + }, + "mne/tests/test_morph.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_transforms.py": { + "a": 31, + "d": 1 + }, + "mne/transforms.py": { + "a": 248, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 74, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 116, + "d": 192 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9522.json b/doc/sphinxext/prs/9522.json new file mode 100644 index 00000000000..445d2b9a839 --- /dev/null +++ b/doc/sphinxext/prs/9522.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8b4ed3cc0ef7c1e6ddac899992cddcdbc86a9a7b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 18, + "d": 10 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 14, + "d": 11 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 4, + "d": 2 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9524.json b/doc/sphinxext/prs/9524.json new file mode 100644 index 00000000000..ae62c6e4b65 --- /dev/null +++ b/doc/sphinxext/prs/9524.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e1801abea4cb18f08a34e0a017825ab53175298d", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 3, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 24, + "d": 3 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9525.json b/doc/sphinxext/prs/9525.json new file mode 100644 index 00000000000..0ce06df96e3 --- /dev/null +++ b/doc/sphinxext/prs/9525.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "ae98d7e5aa0a7c1030526e2bdc288389ee5461a1", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 2 + }, + "mne/annotations.py": { + "a": 36, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 54, + "d": 1 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9528.json b/doc/sphinxext/prs/9528.json new file mode 100644 index 00000000000..983f06dfd12 --- /dev/null +++ b/doc/sphinxext/prs/9528.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3c60678f631978dec042ec518e570c9ef195406e", + "authors": [ + { + "n": "Mikolaj Magnuski", + "e": "mmagnuski@swps.edu.pl" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 19, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9529.json b/doc/sphinxext/prs/9529.json new file mode 100644 index 00000000000..29ba9fd1fe7 --- /dev/null +++ b/doc/sphinxext/prs/9529.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a4025a9eb5dd88874dc6942f23313bb81fd98795", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/annotations.py": { + "a": 44, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 30, + "d": 0 + }, + "mne/utils/check.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/953.json b/doc/sphinxext/prs/953.json new file mode 100644 index 00000000000..6a1c4451b21 --- /dev/null +++ b/doc/sphinxext/prs/953.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "b48e9adac2103b6951bd091cea951c3879dd9958", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/decoding/plot_decoding_time_generalization.py": { + "a": 93, + "d": 0 + }, + "mne/decoding/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/decoding/tests/test_time_gen.py": { + "a": 41, + "d": 0 + }, + "mne/decoding/time_gen.py": { + "a": 123, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9530.json b/doc/sphinxext/prs/9530.json new file mode 100644 index 00000000000..9ee301286c9 --- /dev/null +++ b/doc/sphinxext/prs/9530.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8a0f5cdd8039245e84aec1312c0e302326ad15e3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9531.json b/doc/sphinxext/prs/9531.json new file mode 100644 index 00000000000..814fd509893 --- /dev/null +++ b/doc/sphinxext/prs/9531.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "7e27ce91070569f76bc5ffa0e0732c7cf4297821", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/maxwell.py": { + "a": 8, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 47, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9532.json b/doc/sphinxext/prs/9532.json new file mode 100644 index 00000000000..7d432a074e5 --- /dev/null +++ b/doc/sphinxext/prs/9532.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "032d6112d1a141ae00c6807713106a880d46e8ad", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "tutorials/intro/40_sensor_locations.py": { + "a": 131, + "d": 141 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9534.json b/doc/sphinxext/prs/9534.json new file mode 100644 index 00000000000..1b921293aa1 --- /dev/null +++ b/doc/sphinxext/prs/9534.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b377a687466dc10b7b342d236dcba9c6eb8b9b5b", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/time_frequency/time_frequency_erds.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9535.json b/doc/sphinxext/prs/9535.json new file mode 100644 index 00000000000..a4d61732f8e --- /dev/null +++ b/doc/sphinxext/prs/9535.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "e7cb1b795016ef5d88ab167e011ded1f543fd79d", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 16, + "d": 19 + }, + "mne/tests/test_epochs.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9537.json b/doc/sphinxext/prs/9537.json new file mode 100644 index 00000000000..b2a5a9e88d7 --- /dev/null +++ b/doc/sphinxext/prs/9537.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "720a95c7599d933526b5db7fae823e4349176a01", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/viz/raw.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9538.json b/doc/sphinxext/prs/9538.json new file mode 100644 index 00000000000..a3dbf5c864f --- /dev/null +++ b/doc/sphinxext/prs/9538.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "14c139d492b8826764645a17cd3a3bb69e576819", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/annotations.py": { + "a": 6, + "d": 14 + }, + "mne/channels/channels.py": { + "a": 3, + "d": 6 + }, + "mne/tests/test_annotations.py": { + "a": 2, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 35, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9539.json b/doc/sphinxext/prs/9539.json new file mode 100644 index 00000000000..8da4b11abf0 --- /dev/null +++ b/doc/sphinxext/prs/9539.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4308cc3912808d12f65e595221cc36ac60017503", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/annotations.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_annotations.py": { + "a": 5, + "d": 0 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 19, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9540.json b/doc/sphinxext/prs/9540.json new file mode 100644 index 00000000000..7bbf29f7dda --- /dev/null +++ b/doc/sphinxext/prs/9540.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "3a4a048c5fc5ee77875a705848eb4be8354732f7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/glossary.rst": { + "a": 10, + "d": 0 + }, + "doc/links.inc": { + "a": 0, + "d": 9 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/defaults.py": { + "a": 38, + "d": 0 + }, + "mne/surface.py": { + "a": 21, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 109, + "d": 15 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 0 + }, + "mne/viz/backends/_pyvista.py": { + "a": 13, + "d": 7 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 0, + "d": 2 + }, + "tutorials/clinical/20_seeg.py": { + "a": 21, + "d": 3 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 20, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9541.json b/doc/sphinxext/prs/9541.json new file mode 100644 index 00000000000..0e9df5a01c4 --- /dev/null +++ b/doc/sphinxext/prs/9541.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ba053a6843a0acae892d6046ce9bc2c752fbbcb9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 51, + "d": 8 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 10, + "d": 3 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 6, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 39, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9543.json b/doc/sphinxext/prs/9543.json new file mode 100644 index 00000000000..4702c311e6b --- /dev/null +++ b/doc/sphinxext/prs/9543.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "3b9ec94e43aed102fe759752b899514c80afc5f5", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/links.inc": { + "a": 7, + "d": 0 + }, + "doc/mri.rst": { + "a": 5, + "d": 0 + }, + "doc/source_space.rst": { + "a": 0, + "d": 4 + }, + "mne/__init__.py": { + "a": 5, + "d": 4 + }, + "mne/_freesurfer.py": { + "a": 499, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 4, + "d": 2 + }, + "mne/datasets/_fsaverage/base.py": { + "a": 1, + "d": 1 + }, + "mne/dipole.py": { + "a": 2, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 4, + "d": 3 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 2, + "d": 2 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 3, + "d": 3 + }, + "mne/source_space.py": { + "a": 9, + "d": 495 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_freesurfer.py": { + "a": 143, + "d": 0 + }, + "mne/tests/test_morph.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_source_space.py": { + "a": 4, + "d": 128 + }, + "mne/utils/__init__.py": { + "a": 6, + "d": 5 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9544.json b/doc/sphinxext/prs/9544.json new file mode 100644 index 00000000000..eb3e61093cc --- /dev/null +++ b/doc/sphinxext/prs/9544.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "4875e04cdebba132fc637ca65b3cb50584aee8b7", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/mri.rst": { + "a": 1, + "d": 1 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/_digitization.py": { + "a": 3, + "d": 2 + }, + "mne/surface.py": { + "a": 208, + "d": 4 + }, + "mne/tests/test_surface.py": { + "a": 103, + "d": 5 + }, + "mne/tests/test_transforms.py": { + "a": 16, + "d": 1 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 57, + "d": 93 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9545.json b/doc/sphinxext/prs/9545.json new file mode 100644 index 00000000000..df97d466701 --- /dev/null +++ b/doc/sphinxext/prs/9545.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "b91e4f77d31f434dfcf496c3115ab1168539c514", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/links.inc": { + "a": 8, + "d": 0 + }, + "doc/mri.rst": { + "a": 1, + "d": 0 + }, + "doc/visualization.rst": { + "a": 1, + "d": 0 + }, + "mne/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 28, + "d": 1 + }, + "mne/surface.py": { + "a": 120, + "d": 47 + }, + "mne/tests/test_freesurfer.py": { + "a": 9, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 34, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 79, + "d": 0 + }, + "mne/viz/tests/test_circle.py": { + "a": 21, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 40, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9547.json b/doc/sphinxext/prs/9547.json new file mode 100644 index 00000000000..938af4f8207 --- /dev/null +++ b/doc/sphinxext/prs/9547.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f367c0b4d8d8153150e53b9fe54b92066b508aa7", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/io/_digitization.py": { + "a": 5, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 13, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9549.json b/doc/sphinxext/prs/9549.json new file mode 100644 index 00000000000..3f85dad6b7c --- /dev/null +++ b/doc/sphinxext/prs/9549.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ff02cb1806d5671cc93edb0ceb16b05085681dae", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 0 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9550.json b/doc/sphinxext/prs/9550.json new file mode 100644 index 00000000000..c78cc0c9b53 --- /dev/null +++ b/doc/sphinxext/prs/9550.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "0156284d20f0b1ae2ca8630acf3a507b4b50b21d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 14, + "d": 7 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_topo.py": { + "a": 4, + "d": 1 + }, + "mne/viz/tests/test_topomap.py": { + "a": 20, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 5, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9552.json b/doc/sphinxext/prs/9552.json new file mode 100644 index 00000000000..c8a41433e57 --- /dev/null +++ b/doc/sphinxext/prs/9552.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6a074844ccd1fd8eaf4f9b84095aaaaff357d532", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9553.json b/doc/sphinxext/prs/9553.json new file mode 100644 index 00000000000..838c48cc824 --- /dev/null +++ b/doc/sphinxext/prs/9553.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "19bdd7941e3dedaf4fd8dcc49805d2e7dacfbecc", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Johann Benerradi", + "e": "johann.benerradi@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 34, + "d": 15 + }, + "mne/io/base.py": { + "a": 53, + "d": 46 + }, + "mne/tests/test_epochs.py": { + "a": 35, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 20, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9554.json b/doc/sphinxext/prs/9554.json new file mode 100644 index 00000000000..fc837c7d05f --- /dev/null +++ b/doc/sphinxext/prs/9554.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a8c197d47cb32d07b16cf8e257ff168b05d9fe1a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9555.json b/doc/sphinxext/prs/9555.json new file mode 100644 index 00000000000..658211d2c52 --- /dev/null +++ b/doc/sphinxext/prs/9555.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "3843c36cec7310eb7b7c793e825be608f81af852", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 2, + "d": 8 + }, + "mne/evoked.py": { + "a": 21, + "d": 0 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 3 + }, + "mne/tests/test_evoked.py": { + "a": 14, + "d": 0 + }, + "setup.cfg": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9556.json b/doc/sphinxext/prs/9556.json new file mode 100644 index 00000000000..58d7825b34a --- /dev/null +++ b/doc/sphinxext/prs/9556.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "08b4b0ec8494b85977802872ee539b08e30a7685", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/epochs.py": { + "a": 23, + "d": 3 + }, + "mne/evoked.py": { + "a": 9, + "d": 2 + }, + "mne/io/base.py": { + "a": 64, + "d": 3 + }, + "mne/io/tests/test_raw.py": { + "a": 39, + "d": 0 + }, + "mne/preprocessing/ecg.py": { + "a": 30, + "d": 6 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 11, + "d": 2 + }, + "mne/rank.py": { + "a": 1, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 0 + }, + "mne/tests/test_evoked.py": { + "a": 14, + "d": 0 + }, + "mne/utils/check.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9557.json b/doc/sphinxext/prs/9557.json new file mode 100644 index 00000000000..3bbda895ceb --- /dev/null +++ b/doc/sphinxext/prs/9557.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "308a0abf7c8b8b14c9afc6db2e3ed5b57787be31", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/visualization/evoked_topomap.py": { + "a": 1, + "d": 0 + }, + "examples/visualization/evoked_whitening.py": { + "a": 0, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9558.json b/doc/sphinxext/prs/9558.json new file mode 100644 index 00000000000..5d8efa91c1c --- /dev/null +++ b/doc/sphinxext/prs/9558.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "bd9bf85b28beb3e488289cf532dc0e30919b3ba2", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/edf/edf.py": { + "a": 40, + "d": 34 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 13, + "d": 0 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 5, + "d": 0 + }, + "mne/time_frequency/psd.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9561.json b/doc/sphinxext/prs/9561.json new file mode 100644 index 00000000000..691f112127a --- /dev/null +++ b/doc/sphinxext/prs/9561.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "80e4910831d7eec822786314daccd43bdd558b87", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 4, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 7, + "d": 5 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9564.json b/doc/sphinxext/prs/9564.json new file mode 100644 index 00000000000..9700267e37c --- /dev/null +++ b/doc/sphinxext/prs/9564.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c0d99c262ac5d042e5cf48e4ff9679d8ea6d4f5c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 3 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9565.json b/doc/sphinxext/prs/9565.json new file mode 100644 index 00000000000..b70f734ea19 --- /dev/null +++ b/doc/sphinxext/prs/9565.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "155540e5325b43dac44c7f35575ef10f305e4e94", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/egimff.py": { + "a": 19, + "d": 7 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 18, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9567.json b/doc/sphinxext/prs/9567.json new file mode 100644 index 00000000000..1e6ebeadbae --- /dev/null +++ b/doc/sphinxext/prs/9567.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "688c4cf9753c357666b0428a3974d379a8ce8a3d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9568.json b/doc/sphinxext/prs/9568.json new file mode 100644 index 00000000000..c811854d916 --- /dev/null +++ b/doc/sphinxext/prs/9568.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "03dc756c9fa93f3929cc47d1012fb103959a766c", + "authors": [ + { + "n": "Jean-Rémi KING", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 38, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9569.json b/doc/sphinxext/prs/9569.json new file mode 100644 index 00000000000..4c93fd90210 --- /dev/null +++ b/doc/sphinxext/prs/9569.json @@ -0,0 +1,1979 @@ +{ + "merge_commit_sha": "a22a3ca42687fb63975070cc46ac7754af409732", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "examples/connectivity/cwt_sensor_connectivity.py": { + "a": 5, + "d": 3 + }, + "examples/connectivity/mixed_source_space_connectivity.py": { + "a": 4, + "d": 2 + }, + "examples/connectivity/mne_inverse_coherence_epochs.py": { + "a": 8, + "d": 6 + }, + "examples/connectivity/mne_inverse_connectivity_spectrum.py": { + "a": 3, + "d": 1 + }, + "examples/connectivity/mne_inverse_envelope_correlation.py": { + "a": 3, + "d": 1 + }, + "examples/connectivity/mne_inverse_envelope_correlation_volume.py": { + "a": 3, + "d": 1 + }, + "examples/connectivity/mne_inverse_label_connectivity.py": { + "a": 8, + "d": 6 + }, + "examples/connectivity/mne_inverse_psi_visual.py": { + "a": 4, + "d": 2 + }, + "examples/connectivity/sensor_connectivity.py": { + "a": 4, + "d": 2 + }, + "examples/datasets/brainstorm_data.py": { + "a": 4, + "d": 2 + }, + "examples/datasets/hf_sef_data.py": { + "a": 6, + "d": 4 + }, + "examples/datasets/limo_data.py": { + "a": 20, + "d": 18 + }, + "examples/datasets/opm_data.py": { + "a": 6, + "d": 4 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 8, + "d": 6 + }, + "examples/decoding/decoding_csp_eeg.py": { + "a": 5, + "d": 3 + }, + "examples/decoding/decoding_csp_timefreq.py": { + "a": 8, + "d": 6 + }, + "examples/decoding/decoding_rsa_sgskip.py": { + "a": 3, + "d": 1 + }, + "examples/decoding/decoding_spatio_temporal_source.py": { + "a": 7, + "d": 5 + }, + "examples/decoding/decoding_spoc_CMC.py": { + "a": 3, + "d": 1 + }, + "examples/decoding/decoding_time_generalization_conditions.py": { + "a": 6, + "d": 4 + }, + "examples/decoding/decoding_unsupervised_spatial_filter.py": { + "a": 3, + "d": 1 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 6, + "d": 4 + }, + "examples/decoding/ems_filtering.py": { + "a": 3, + "d": 1 + }, + "examples/decoding/linear_model_patterns.py": { + "a": 7, + "d": 5 + }, + "examples/decoding/receptive_field_mtrf.py": { + "a": 10, + "d": 8 + }, + "examples/decoding/ssd_spatial_filters.py": { + "a": 8, + "d": 6 + }, + "examples/forward/forward_sensitivity_maps.py": { + "a": 8, + "d": 6 + }, + "examples/forward/left_cerebellum_volume_source.py": { + "a": 6, + "d": 4 + }, + "examples/forward/source_space_morphing.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/compute_mne_inverse_epochs_in_label.py": { + "a": 5, + "d": 3 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 4, + "d": 2 + }, + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/covariance_whitening_dspm.py": { + "a": 5, + "d": 3 + }, + "examples/inverse/custom_inverse_solver.py": { + "a": 6, + "d": 4 + }, + "examples/inverse/dics_source_power.py": { + "a": 11, + "d": 9 + }, + "examples/inverse/evoked_ers_source_power.py": { + "a": 9, + "d": 7 + }, + "examples/inverse/gamma_map_inverse.py": { + "a": 8, + "d": 6 + }, + "examples/inverse/label_activation_from_stc.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/label_from_stc.py": { + "a": 5, + "d": 3 + }, + "examples/inverse/label_source_activations.py": { + "a": 6, + "d": 4 + }, + "examples/inverse/mixed_norm_inverse.py": { + "a": 10, + "d": 8 + }, + "examples/inverse/mixed_source_space_inverse.py": { + "a": 14, + "d": 12 + }, + "examples/inverse/mne_cov_power.py": { + "a": 10, + "d": 8 + }, + "examples/inverse/morph_surface_stc.py": { + "a": 12, + "d": 10 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 10, + "d": 8 + }, + "examples/inverse/multidict_reweighted_tfmxne.py": { + "a": 8, + "d": 6 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 12, + "d": 10 + }, + "examples/inverse/psf_ctf_vertices.py": { + "a": 5, + "d": 3 + }, + "examples/inverse/psf_ctf_vertices_lcmv.py": { + "a": 5, + "d": 3 + }, + "examples/inverse/rap_music.py": { + "a": 4, + "d": 2 + }, + "examples/inverse/read_inverse.py": { + "a": 4, + "d": 2 + }, + "examples/inverse/read_stc.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/resolution_metrics.py": { + "a": 12, + "d": 10 + }, + "examples/inverse/resolution_metrics_eegmeg.py": { + "a": 12, + "d": 10 + }, + "examples/inverse/snr_estimate.py": { + "a": 3, + "d": 1 + }, + "examples/inverse/source_space_snr.py": { + "a": 5, + "d": 3 + }, + "examples/inverse/time_frequency_mixed_norm_inverse.py": { + "a": 10, + "d": 8 + }, + "examples/inverse/vector_mne_solution.py": { + "a": 7, + "d": 5 + }, + "examples/io/elekta_epochs.py": { + "a": 8, + "d": 6 + }, + "examples/io/read_neo_format.py": { + "a": 3, + "d": 1 + }, + "examples/io/read_noise_covariance_matrix.py": { + "a": 4, + "d": 2 + }, + "examples/io/read_xdf.py": { + "a": 4, + "d": 2 + }, + "examples/preprocessing/define_target_events.py": { + "a": 6, + "d": 4 + }, + "examples/preprocessing/eeg_csd.py": { + "a": 10, + "d": 8 + }, + "examples/preprocessing/eog_artifact_histogram.py": { + "a": 5, + "d": 3 + }, + "examples/preprocessing/find_ref_artifacts.py": { + "a": 11, + "d": 9 + }, + "examples/preprocessing/fnirs_artifact_removal.py": { + "a": 9, + "d": 7 + }, + "examples/preprocessing/ica_comparison.py": { + "a": 9, + "d": 7 + }, + "examples/preprocessing/interpolate_bad_channels.py": { + "a": 6, + "d": 4 + }, + "examples/preprocessing/movement_compensation.py": { + "a": 10, + "d": 8 + }, + "examples/preprocessing/movement_detection.py": { + "a": 5, + "d": 3 + }, + "examples/preprocessing/muscle_detection.py": { + "a": 7, + "d": 5 + }, + "examples/preprocessing/otp.py": { + "a": 7, + "d": 5 + }, + "examples/preprocessing/shift_evoked.py": { + "a": 3, + "d": 1 + }, + "examples/preprocessing/virtual_evoked.py": { + "a": 7, + "d": 5 + }, + "examples/preprocessing/xdawn_denoising.py": { + "a": 7, + "d": 5 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 7, + "d": 5 + }, + "examples/simulation/simulate_raw_data.py": { + "a": 3, + "d": 1 + }, + "examples/simulation/simulated_raw_data_using_subject_anatomy.py": { + "a": 10, + "d": 8 + }, + "examples/simulation/source_simulator.py": { + "a": 3, + "d": 1 + }, + "examples/stats/cluster_stats_evoked.py": { + "a": 7, + "d": 5 + }, + "examples/stats/fdr_stats_evoked.py": { + "a": 7, + "d": 5 + }, + "examples/stats/linear_regression_raw.py": { + "a": 3, + "d": 1 + }, + "examples/stats/sensor_permutation_test.py": { + "a": 5, + "d": 3 + }, + "examples/stats/sensor_regression.py": { + "a": 3, + "d": 1 + }, + "examples/time_frequency/compute_csd.py": { + "a": 9, + "d": 7 + }, + "examples/time_frequency/compute_source_psd_epochs.py": { + "a": 6, + "d": 4 + }, + "examples/time_frequency/source_label_time_frequency.py": { + "a": 4, + "d": 2 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 5, + "d": 3 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 9, + "d": 7 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 5, + "d": 3 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 4, + "d": 2 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 7, + "d": 5 + }, + "examples/time_frequency/time_frequency_global_field_power.py": { + "a": 6, + "d": 4 + }, + "examples/time_frequency/time_frequency_simulated.py": { + "a": 11, + "d": 9 + }, + "examples/visualization/3d_to_2d.py": { + "a": 6, + "d": 4 + }, + "examples/visualization/channel_epochs_image.py": { + "a": 6, + "d": 4 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 3, + "d": 1 + }, + "examples/visualization/eeglab_head_sphere.py": { + "a": 7, + "d": 5 + }, + "examples/visualization/evoked_arrowmap.py": { + "a": 6, + "d": 4 + }, + "examples/visualization/evoked_topomap.py": { + "a": 12, + "d": 10 + }, + "examples/visualization/evoked_whitening.py": { + "a": 7, + "d": 5 + }, + "examples/visualization/meg_sensors.py": { + "a": 8, + "d": 6 + }, + "examples/visualization/mne_helmet.py": { + "a": 2, + "d": 0 + }, + "examples/visualization/montage_sgskip.py": { + "a": 5, + "d": 3 + }, + "examples/visualization/parcellation.py": { + "a": 6, + "d": 4 + }, + "examples/visualization/publication_figure.py": { + "a": 14, + "d": 14 + }, + "examples/visualization/roi_erpimage_by_rt.py": { + "a": 7, + "d": 5 + }, + "examples/visualization/sensor_noise_level.py": { + "a": 5, + "d": 3 + }, + "examples/visualization/ssp_projs_sensitivity_map.py": { + "a": 4, + "d": 2 + }, + "examples/visualization/topo_compare_conditions.py": { + "a": 5, + "d": 3 + }, + "examples/visualization/topo_customized.py": { + "a": 3, + "d": 1 + }, + "examples/visualization/xhemi.py": { + "a": 3, + "d": 1 + }, + "logo/generate_mne_logos.py": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 1, + "d": 1 + }, + "mne/_ola.py": { + "a": 1, + "d": 1 + }, + "mne/_version.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 1, + "d": 1 + }, + "mne/baseline.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_compute_beamformer.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_lcmv.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_external.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/bem.py": { + "a": 1, + "d": 1 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_channels.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_surf2bem.py": { + "a": 1, + "d": 1 + }, + "mne/commands/utils.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/effective.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/envelope.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/spectral.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/tests/test_envelope.py": { + "a": 1, + "d": 1 + }, + "mne/connectivity/utils.py": { + "a": 1, + "d": 1 + }, + "mne/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/cuda.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/limo/limo.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_base.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_csp.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_ems.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 1 + }, + "mne/defaults.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/event.py": { + "a": 1, + "d": 1 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/export/_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/export/_egimff.py": { + "a": 1, + "d": 1 + }, + "mne/export/_export.py": { + "a": 1, + "d": 1 + }, + "mne/export/tests/test_export.py": { + "a": 1, + "d": 1 + }, + "mne/externals/h5io/_h5io.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_compute_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_lead_dots.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/forward.py": { + "a": 1, + "d": 1 + }, + "mne/gui/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_backend.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_help.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_viewer.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 1, + "d": 1 + }, + "mne/io/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/_digitization.py": { + "a": 1, + "d": 1 + }, + "mne/io/_read_raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 1 + }, + "mne/io/array/tests/test_array.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/io/boxy/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/boxy/boxy.py": { + "a": 1, + "d": 1 + }, + "mne/io/boxy/tests/test_boxy.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/bti/tests/test_bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/tests/test_cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/eeg.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/hc.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/markers.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/res4.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/trans.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf_comp.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/events.py": { + "a": 1, + "d": 1 + }, + "mne/io/egi/general.py": { + "a": 1, + "d": 1 + }, + "mne/io/eximia/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/eximia/eximia.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/fieldtrip.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/tests/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/tests/test_fieldtrip.py": { + "a": 1, + "d": 1 + }, + "mne/io/fieldtrip/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/matrix.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/tests/test_nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/nicolet/tests/test_nicolet.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/open.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 1 + }, + "mne/io/pick.py": { + "a": 1, + "d": 1 + }, + "mne/io/proj.py": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/io/tag.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_apply_function.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_compensator.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_read_raw.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_reference.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_show_fiff.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_write.py": { + "a": 1, + "d": 1 + }, + "mne/io/tree.py": { + "a": 1, + "d": 1 + }, + "mne/io/utils.py": { + "a": 1, + "d": 1 + }, + "mne/io/what.py": { + "a": 1, + "d": 1 + }, + "mne/io/write.py": { + "a": 1, + "d": 1 + }, + "mne/label.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/_eloreta.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/inverse.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/spatial_resolution.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_resolution_matrix.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_resolution_metrics.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 1, + "d": 1 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/misc.py": { + "a": 1, + "d": 1 + }, + "mne/morph.py": { + "a": 1, + "d": 1 + }, + "mne/morph_map.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_csd.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/_regress.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/annotate_nan.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/bads.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/flat.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/infomax_.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxfilter.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_optical_density.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_tddr.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_optical_density.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/realign.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ssp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/stim.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ctps.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_infomax.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_otp.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_realign.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_regress.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_stim.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/xdawn.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 1, + "d": 1 + }, + "mne/rank.py": { + "a": 1, + "d": 1 + }, + "mne/report.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/metrics.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/source.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_metrics.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 1, + "d": 1 + }, + "mne/simulation/tests/test_source.py": { + "a": 1, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 1 + }, + "mne/stats/multi_comp.py": { + "a": 1, + "d": 1 + }, + "mne/stats/regression.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_permutations.py": { + "a": 1, + "d": 1 + }, + "mne/stats/tests/test_regression.py": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_line_endings.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_misc.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_morph_map.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_estimate.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_surface.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/ar.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_stft.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tests/test_stockwell.py": { + "a": 1, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_logging.py": { + "a": 1, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/utils/dataframe.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/utils/fetching.py": { + "a": 1, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 1, + "d": 1 + }, + "mne/utils/misc.py": { + "a": 1, + "d": 1 + }, + "mne/utils/mixin.py": { + "a": 1, + "d": 1 + }, + "mne/utils/numerics.py": { + "a": 1, + "d": 1 + }, + "mne/utils/progressbar.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_bunch.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_check.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_linalg.py": { + "a": 1, + "d": 1 + }, + "mne/viz/conftest.py": { + "a": 2, + "d": 2 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 15, + "d": 13 + }, + "tutorials/clinical/20_seeg.py": { + "a": 12, + "d": 10 + }, + "tutorials/clinical/30_ecog.py": { + "a": 8, + "d": 6 + }, + "tutorials/clinical/60_sleep.py": { + "a": 3, + "d": 1 + }, + "tutorials/epochs/10_epochs_overview.py": { + "a": 30, + "d": 28 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 17, + "d": 15 + }, + "tutorials/epochs/30_epochs_metadata.py": { + "a": 13, + "d": 11 + }, + "tutorials/epochs/40_autogenerate_metadata.py": { + "a": 18, + "d": 16 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 9, + "d": 7 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 8, + "d": 6 + }, + "tutorials/evoked/10_evoked_overview.py": { + "a": 17, + "d": 15 + }, + "tutorials/evoked/20_visualize_evoked.py": { + "a": 19, + "d": 17 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 28, + "d": 26 + }, + "tutorials/evoked/40_whitened.py": { + "a": 7, + "d": 5 + }, + "tutorials/forward/10_background_freesurfer.py": { + "a": 3, + "d": 1 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 11, + "d": 9 + }, + "tutorials/forward/30_forward.py": { + "a": 16, + "d": 14 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 3, + "d": 1 + }, + "tutorials/forward/50_background_freesurfer_mne.py": { + "a": 22, + "d": 20 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 9, + "d": 7 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 11, + "d": 9 + }, + "tutorials/intro/10_overview.py": { + "a": 16, + "d": 14 + }, + "tutorials/intro/15_inplace.py": { + "a": 6, + "d": 4 + }, + "tutorials/intro/20_events_from_raw.py": { + "a": 13, + "d": 11 + }, + "tutorials/intro/30_info.py": { + "a": 15, + "d": 13 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 19, + "d": 17 + }, + "tutorials/intro/50_configure_mne.py": { + "a": 17, + "d": 15 + }, + "tutorials/intro/70_report.py": { + "a": 17, + "d": 15 + }, + "tutorials/inverse/10_stc_class.py": { + "a": 10, + "d": 8 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 6, + "d": 4 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 13, + "d": 11 + }, + "tutorials/inverse/35_dipole_orientations.py": { + "a": 10, + "d": 10 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 13, + "d": 11 + }, + "tutorials/inverse/50_beamformer_lcmv.py": { + "a": 18, + "d": 16 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 17, + "d": 15 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 3, + "d": 1 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 13, + "d": 11 + }, + "tutorials/inverse/85_brainstorm_phantom_ctf.py": { + "a": 12, + "d": 10 + }, + "tutorials/inverse/90_phantom_4DBTi.py": { + "a": 6, + "d": 4 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 2, + "d": 0 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 8, + "d": 6 + }, + "tutorials/io/60_ctf_bst_auditory.py": { + "a": 30, + "d": 28 + }, + "tutorials/machine-learning/30_strf.py": { + "a": 12, + "d": 10 + }, + "tutorials/machine-learning/50_decoding.py": { + "a": 12, + "d": 10 + }, + "tutorials/preprocessing/10_preprocessing_overview.py": { + "a": 11, + "d": 9 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 12, + "d": 10 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 15, + "d": 13 + }, + "tutorials/preprocessing/25_background_filtering.py": { + "a": 28, + "d": 28 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 12, + "d": 10 + }, + "tutorials/preprocessing/35_artifact_correction_regression.py": { + "a": 9, + "d": 7 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 30, + "d": 28 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 15, + "d": 13 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 23, + "d": 21 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 12, + "d": 10 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 10, + "d": 8 + }, + "tutorials/preprocessing/60_maxwell_filtering_sss.py": { + "a": 12, + "d": 10 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 22, + "d": 20 + }, + "tutorials/raw/10_raw_overview.py": { + "a": 26, + "d": 24 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 12, + "d": 10 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 14, + "d": 12 + }, + "tutorials/raw/40_visualize_raw.py": { + "a": 10, + "d": 8 + }, + "tutorials/simulation/10_array_objs.py": { + "a": 11, + "d": 9 + }, + "tutorials/simulation/70_point_spread.py": { + "a": 12, + "d": 10 + }, + "tutorials/simulation/80_dics.py": { + "a": 14, + "d": 14 + }, + "tutorials/stats-sensor-space/10_background_stats.py": { + "a": 19, + "d": 17 + }, + "tutorials/stats-sensor-space/20_erp_stats.py": { + "a": 3, + "d": 1 + }, + "tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py": { + "a": 7, + "d": 5 + }, + "tutorials/stats-sensor-space/50_cluster_between_time_freq.py": { + "a": 7, + "d": 5 + }, + "tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py": { + "a": 9, + "d": 7 + }, + "tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py": { + "a": 11, + "d": 9 + }, + "tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py": { + "a": 6, + "d": 4 + }, + "tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py": { + "a": 15, + "d": 13 + }, + "tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py": { + "a": 14, + "d": 12 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 15, + "d": 13 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 13, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/957.json b/doc/sphinxext/prs/957.json new file mode 100644 index 00000000000..8b5945260d0 --- /dev/null +++ b/doc/sphinxext/prs/957.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f8ee3acee790b249d52015afbeece81db6ced5ed", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/tests/test_viz.py": { + "a": 1, + "d": 1 + }, + "mne/viz.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9570.json b/doc/sphinxext/prs/9570.json new file mode 100644 index 00000000000..8e3db8a535f --- /dev/null +++ b/doc/sphinxext/prs/9570.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "0446fcf6efb4a04e51f38b3f72b1d1a187e50d26", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "doc/visualization.rst": { + "a": 1, + "d": 0 + }, + "mne/chpi.py": { + "a": 149, + "d": 32 + }, + "mne/tests/test_chpi.py": { + "a": 17, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 77, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 27, + "d": 9 + }, + "tutorials/preprocessing/59_head_positions.py": { + "a": 17, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9574.json b/doc/sphinxext/prs/9574.json new file mode 100644 index 00000000000..953a0908a2a --- /dev/null +++ b/doc/sphinxext/prs/9574.json @@ -0,0 +1,247 @@ +{ + "merge_commit_sha": "ec2aaa7b1ff40fa2d7a38bd1c376b217d251376f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/beamformer/_dics.py": { + "a": 1, + "d": 2 + }, + "mne/beamformer/_lcmv.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/_rap_music.py": { + "a": 3, + "d": 3 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 4, + "d": 5 + }, + "mne/bem.py": { + "a": 3, + "d": 7 + }, + "mne/channels/channels.py": { + "a": 14, + "d": 17 + }, + "mne/channels/layout.py": { + "a": 13, + "d": 15 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 2 + }, + "mne/chpi.py": { + "a": 3, + "d": 6 + }, + "mne/coreg.py": { + "a": 6, + "d": 3 + }, + "mne/cov.py": { + "a": 5, + "d": 10 + }, + "mne/decoding/csp.py": { + "a": 4, + "d": 6 + }, + "mne/decoding/ssd.py": { + "a": 1, + "d": 3 + }, + "mne/decoding/transformer.py": { + "a": 3, + "d": 5 + }, + "mne/dipole.py": { + "a": 1, + "d": 2 + }, + "mne/epochs.py": { + "a": 4, + "d": 7 + }, + "mne/event.py": { + "a": 3, + "d": 3 + }, + "mne/evoked.py": { + "a": 3, + "d": 5 + }, + "mne/forward/_field_interpolation.py": { + "a": 1, + "d": 2 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 8 + }, + "mne/forward/forward.py": { + "a": 7, + "d": 10 + }, + "mne/io/array/array.py": { + "a": 1, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 4 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/compensator.py": { + "a": 3, + "d": 2 + }, + "mne/io/fiff/raw.py": { + "a": 1, + "d": 2 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 4 + }, + "mne/io/meas_info.py": { + "a": 11, + "d": 15 + }, + "mne/io/pick.py": { + "a": 10, + "d": 12 + }, + "mne/io/proj.py": { + "a": 7, + "d": 10 + }, + "mne/minimum_norm/inverse.py": { + "a": 4, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 2 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 3, + "d": 4 + }, + "mne/rank.py": { + "a": 6, + "d": 9 + }, + "mne/simulation/evoked.py": { + "a": 1, + "d": 2 + }, + "mne/simulation/raw.py": { + "a": 1, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 1, + "d": 2 + }, + "mne/source_space.py": { + "a": 1, + "d": 2 + }, + "mne/surface.py": { + "a": 2, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 4, + "d": 8 + }, + "mne/utils/config.py": { + "a": 3, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 5, + "d": 9 + }, + "mne/viz/misc.py": { + "a": 5, + "d": 5 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 3 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9575.json b/doc/sphinxext/prs/9575.json new file mode 100644 index 00000000000..c08ac0a6c20 --- /dev/null +++ b/doc/sphinxext/prs/9575.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "144a58c9466563de2d379f3d51b7c3404729e37a", + "authors": [ + { + "n": "Darin Erat Sleiter", + "e": "darin@ae.studio" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 2, + "d": 1 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 10, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9578.json b/doc/sphinxext/prs/9578.json new file mode 100644 index 00000000000..55fa232a7f8 --- /dev/null +++ b/doc/sphinxext/prs/9578.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "c659bde91d22c994570571d84d61b960acbd09bb", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/0.22.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.23.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 0, + "d": 2 + }, + "mne/evoked.py": { + "a": 8, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 3, + "d": 3 + }, + "mne/tests/test_event.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 13, + "d": 2 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9579.json b/doc/sphinxext/prs/9579.json new file mode 100644 index 00000000000..76ec1291743 --- /dev/null +++ b/doc/sphinxext/prs/9579.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "f3a7dece8d4361265e055f617e98c197c7cba07e", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/source_space.rst": { + "a": 0, + "d": 1 + }, + "mne/dipole.py": { + "a": 63, + "d": 2 + }, + "mne/label.py": { + "a": 0, + "d": 52 + }, + "mne/tests/test_dipole.py": { + "a": 11, + "d": 1 + }, + "mne/tests/test_label.py": { + "a": 3, + "d": 21 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 12, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/958.json b/doc/sphinxext/prs/958.json new file mode 100644 index 00000000000..9a84b2b2d6d --- /dev/null +++ b/doc/sphinxext/prs/958.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6ce00d9c5bd57d2d461ef450a6b6b6878bb65121", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "examples/plot_estimate_covariance_matrix_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9582.json b/doc/sphinxext/prs/9582.json new file mode 100644 index 00000000000..4378656a62f --- /dev/null +++ b/doc/sphinxext/prs/9582.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "b5767013e44a6b47393bc55d4534ea17a6b7b8bf", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "tutorials/machine-learning/50_decoding.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9585.json b/doc/sphinxext/prs/9585.json new file mode 100644 index 00000000000..9f8319c0ef8 --- /dev/null +++ b/doc/sphinxext/prs/9585.json @@ -0,0 +1,123 @@ +{ + "merge_commit_sha": "6756eab3dc55dc9c8fc5d30c3e4a5f5ad18659bd", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/_includes/forward.rst": { + "a": 1, + "d": 1 + }, + "doc/changes/0.21.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 8, + "d": 0 + }, + "doc/mri.rst": { + "a": 1, + "d": 0 + }, + "doc/overview/cookbook.rst": { + "a": 1, + "d": 1 + }, + "mne/_freesurfer.py": { + "a": 83, + "d": 2 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 1, + "d": 3 + }, + "mne/coreg.py": { + "a": 2, + "d": 1 + }, + "mne/defaults.py": { + "a": 2, + "d": 0 + }, + "mne/io/tag.py": { + "a": 4, + "d": 0 + }, + "mne/io/tests/test_meas_info.py": { + "a": 12, + "d": 0 + }, + "mne/surface.py": { + "a": 33, + "d": 42 + }, + "mne/tests/test_surface.py": { + "a": 42, + "d": 39 + }, + "mne/transforms.py": { + "a": 16, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 42, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 389, + "d": 419 + }, + "mne/viz/_brain/_brain.py": { + "a": 101, + "d": 12 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 32, + "d": 23 + }, + "mne/viz/tests/test_3d.py": { + "a": 55, + "d": 40 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 41, + "d": 35 + }, + "tutorials/forward/20_source_alignment.py": { + "a": 8, + "d": 7 + }, + "tutorials/intro/40_sensor_locations.py": { + "a": 2, + "d": 2 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 10, + "d": 13 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 8, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9586.json b/doc/sphinxext/prs/9586.json new file mode 100644 index 00000000000..90df6c37e88 --- /dev/null +++ b/doc/sphinxext/prs/9586.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "4375989ef05990574bfe70c34cef111340ac2383", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/mri.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/gui/__init__.py": { + "a": 41, + "d": 0 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 883, + "d": 0 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 155, + "d": 0 + }, + "mne/surface.py": { + "a": 54, + "d": 34 + }, + "mne/tests/test_surface.py": { + "a": 8, + "d": 5 + }, + "mne/utils/_testing.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 4 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 121, + "d": 44 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9587.json b/doc/sphinxext/prs/9587.json new file mode 100644 index 00000000000..114f40563e2 --- /dev/null +++ b/doc/sphinxext/prs/9587.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "ace9e127a76e46883daa302ac3dacf80a9cbd675", + "authors": [ + { + "n": "PAB", + "e": null + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@macbook-pro-de-pierre-antoine.home" + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@mbpdepieantoine.home" + } + ], + "changes": { + "mne/inverse_sparse/mxne_optim.py": { + "a": 3, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 23, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9588.json b/doc/sphinxext/prs/9588.json new file mode 100644 index 00000000000..581f1d35383 --- /dev/null +++ b/doc/sphinxext/prs/9588.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "35e867ad6506d50a4fa1bef71cc2610bf1aad6f3", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/overview/roadmap.rst": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9589.json b/doc/sphinxext/prs/9589.json new file mode 100644 index 00000000000..b6fd0a92dfd --- /dev/null +++ b/doc/sphinxext/prs/9589.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "49253e74308137e14187561a204d784ea28f12a7", + "authors": [ + { + "n": "Gansheng TAN", + "e": null + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/959.json b/doc/sphinxext/prs/959.json new file mode 100644 index 00000000000..087d4e16b79 --- /dev/null +++ b/doc/sphinxext/prs/959.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3652601497564e5c1235a10771b2908489161729", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/epochs.py": { + "a": 16, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9590.json b/doc/sphinxext/prs/9590.json new file mode 100644 index 00000000000..c50ba013aa2 --- /dev/null +++ b/doc/sphinxext/prs/9590.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "575fae2221bd02f37603308b7151d03f81dfe7a9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_figure.py": { + "a": 4, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9592.json b/doc/sphinxext/prs/9592.json new file mode 100644 index 00000000000..fd5f3d35739 --- /dev/null +++ b/doc/sphinxext/prs/9592.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "57d792ff0a2240fc87599faac8e17eda33f04143", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 5, + "d": 0 + }, + ".github/workflows/compat_minimal.yml": { + "a": 3, + "d": 0 + }, + ".github/workflows/compat_old.yml": { + "a": 3, + "d": 0 + }, + ".github/workflows/linux_conda.yml": { + "a": 3, + "d": 0 + }, + ".github/workflows/linux_pip.yml": { + "a": 3, + "d": 0 + }, + ".github/workflows/macos_conda.yml": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9593.json b/doc/sphinxext/prs/9593.json new file mode 100644 index 00000000000..9b83937f8bb --- /dev/null +++ b/doc/sphinxext/prs/9593.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "c1557623904877274607b6b7af7706c759d50571", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "doc/mri.rst": { + "a": 0, + "d": 1 + }, + "mne/surface.py": { + "a": 57, + "d": 44 + }, + "mne/tests/test_surface.py": { + "a": 25, + "d": 6 + }, + "mne/transforms.py": { + "a": 4, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 12, + "d": 13 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 0, + "d": 2 + }, + "tutorials/clinical/20_seeg.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9596.json b/doc/sphinxext/prs/9596.json new file mode 100644 index 00000000000..12aafdc6c5a --- /dev/null +++ b/doc/sphinxext/prs/9596.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "6c896a649d0de38dd68e98d2836aabe6fc17a123", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 103, + "d": 2 + }, + "mne/utils/config.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 235, + "d": 2244 + }, + "mne/viz/_mpl_figure.py": { + "a": 2352, + "d": 0 + }, + "mne/viz/backends/_utils.py": { + "a": 5, + "d": 0 + }, + "mne/viz/backends/renderer.py": { + "a": 6, + "d": 6 + }, + "mne/viz/epochs.py": { + "a": 6, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 3 + }, + "mne/viz/tests/test_epochs.py": { + "a": 83, + "d": 125 + }, + "mne/viz/tests/test_figure.py": { + "a": 3, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 14, + "d": 14 + }, + "mne/viz/tests/test_raw.py": { + "a": 217, + "d": 221 + }, + "mne/viz/utils.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9598.json b/doc/sphinxext/prs/9598.json new file mode 100644 index 00000000000..71ce5b9212c --- /dev/null +++ b/doc/sphinxext/prs/9598.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1df66682b6d460ac9b22d4c678034cc8706ff680", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 27, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 86, + "d": 60 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 18, + "d": 12 + }, + "mne/viz/backends/renderer.py": { + "a": 7, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9599.json b/doc/sphinxext/prs/9599.json new file mode 100644 index 00000000000..675ada0adac --- /dev/null +++ b/doc/sphinxext/prs/9599.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "fb074dd23f3a3364349415c4612d6401e1affece", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "examples/connectivity/mne_inverse_coherence_epochs.py": { + "a": 7, + "d": 5 + }, + "examples/inverse/covariance_whitening_dspm.py": { + "a": 10, + "d": 6 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 1, + "d": 1 + }, + "tutorials/simulation/80_dics.py": { + "a": 2, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/96.json b/doc/sphinxext/prs/96.json new file mode 100644 index 00000000000..1abdede2e79 --- /dev/null +++ b/doc/sphinxext/prs/96.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6a1dc4d8784ff7dbbec063b066fefb89437ed47d", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/fiff/ctf.py": { + "a": 8, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9600.json b/doc/sphinxext/prs/9600.json new file mode 100644 index 00000000000..554bb4831e1 --- /dev/null +++ b/doc/sphinxext/prs/9600.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cd735c4d18918c4e0b1fa336d7d91fb828b32c4e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/tests/test_brain.py": { + "a": 1, + "d": 3 + }, + "server_environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9601.json b/doc/sphinxext/prs/9601.json new file mode 100644 index 00000000000..c98a932111a --- /dev/null +++ b/doc/sphinxext/prs/9601.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "64d51d6b122c5520a13825a0a360623993075d66", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 18, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 9, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 14, + "d": 26 + }, + "tutorials/clinical/20_seeg.py": { + "a": 33, + "d": 63 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9603.json b/doc/sphinxext/prs/9603.json new file mode 100644 index 00000000000..ff4d3744541 --- /dev/null +++ b/doc/sphinxext/prs/9603.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9b43b2beeca84931049bdc83717aa579a747f2ea", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 1 + }, + "mne/conftest.py": { + "a": 5, + "d": 4 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 4, + "d": 3 + }, + "mne/source_space.py": { + "a": 7, + "d": 2 + }, + "mne/tests/test_source_space.py": { + "a": 11, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9605.json b/doc/sphinxext/prs/9605.json new file mode 100644 index 00000000000..79294c41680 --- /dev/null +++ b/doc/sphinxext/prs/9605.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "5815375bf84f941e64d5338b7f159076dad3b010", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 50, + "d": 29 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9607.json b/doc/sphinxext/prs/9607.json new file mode 100644 index 00000000000..43725252fd6 --- /dev/null +++ b/doc/sphinxext/prs/9607.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "fe1086261e1849e983f36338aeb6a9b104b9a7a0", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 20, + "d": 19 + }, + "mne/utils/config.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 7, + "d": 7 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 10, + "d": 10 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 4 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 88, + "d": 65 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 14, + "d": 3 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 40, + "d": 12 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 16 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9608.json b/doc/sphinxext/prs/9608.json new file mode 100644 index 00000000000..c0e80b84d99 --- /dev/null +++ b/doc/sphinxext/prs/9608.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "4d37ad3ced0f1a653b019a73f4a8fdaa2e49cab0", + "authors": [ + { + "n": "PAB", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@mbpdepieantoine.home" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 8, + "d": 1 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 5, + "d": 4 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9612.json b/doc/sphinxext/prs/9612.json new file mode 100644 index 00000000000..29c9039b42e --- /dev/null +++ b/doc/sphinxext/prs/9612.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "6c9f5af6dc0e07c209d3bc8f0d1b964a80e7ad71", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 3, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 8, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 8, + "d": 3 + }, + "mne/tests/test_chpi.py": { + "a": 1, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 0 + }, + "tools/github_actions_test.sh": { + "a": 0, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9613.json b/doc/sphinxext/prs/9613.json new file mode 100644 index 00000000000..cfcb0bf756f --- /dev/null +++ b/doc/sphinxext/prs/9613.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "96e830b3b02394f2c6aaea619ab46664580ed72b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/cov.py": { + "a": 4, + "d": 2 + }, + "mne/epochs.py": { + "a": 17, + "d": 17 + }, + "mne/evoked.py": { + "a": 7, + "d": 2 + }, + "mne/forward/forward.py": { + "a": 5, + "d": 5 + }, + "mne/io/base.py": { + "a": 8, + "d": 4 + }, + "mne/io/eeglab/eeglab.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 20, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 2 + }, + "mne/minimum_norm/inverse.py": { + "a": 5, + "d": 2 + }, + "mne/report/report.py": { + "a": 4, + "d": 4 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 1 + }, + "mne/source_estimate.py": { + "a": 19, + "d": 9 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/time_frequency/csd.py": { + "a": 1, + "d": 0 + }, + "mne/transforms.py": { + "a": 4, + "d": 2 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 9, + "d": 2 + }, + "mne/utils/config.py": { + "a": 7, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 0 + }, + "mne/utils/tests/test_check.py": { + "a": 6, + "d": 6 + }, + "mne/utils/tests/test_config.py": { + "a": 11, + "d": 5 + }, + "mne/viz/tests/test_evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9614.json b/doc/sphinxext/prs/9614.json new file mode 100644 index 00000000000..64f0a04a067 --- /dev/null +++ b/doc/sphinxext/prs/9614.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f73907c01ce6a18e4941969d3491cc561617c32d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "mne/gui/_file_traits.py": { + "a": 3, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9617.json b/doc/sphinxext/prs/9617.json new file mode 100644 index 00000000000..d569ba289a6 --- /dev/null +++ b/doc/sphinxext/prs/9617.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "474da964415c5233e955663b56e85531093be0a4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/_freesurfer.py": { + "a": 13, + "d": 0 + }, + "mne/source_space.py": { + "a": 1, + "d": 14 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9618.json b/doc/sphinxext/prs/9618.json new file mode 100644 index 00000000000..5c10340e667 --- /dev/null +++ b/doc/sphinxext/prs/9618.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a7be363558c2126a88812e373408fa3e3cb69b26", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/_freesurfer.py": { + "a": 69, + "d": 4 + }, + "mne/viz/_3d.py": { + "a": 11, + "d": 62 + }, + "mne/viz/_brain/_brain.py": { + "a": 42, + "d": 3 + }, + "mne/viz/_brain/surface.py": { + "a": 3, + "d": 0 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 5, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9621.json b/doc/sphinxext/prs/9621.json new file mode 100644 index 00000000000..3281e629f80 --- /dev/null +++ b/doc/sphinxext/prs/9621.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3c720e8f7a93d86e1db20fd949eee315cbced981", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/bem.py": { + "a": 1, + "d": 4 + }, + "mne/tests/test_bem.py": { + "a": 3, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 10, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9622.json b/doc/sphinxext/prs/9622.json new file mode 100644 index 00000000000..ba798c06183 --- /dev/null +++ b/doc/sphinxext/prs/9622.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "33d2aea4a3790953c38dcc28582d614c20419649", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/transforms.py": { + "a": 33, + "d": 30 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9625.json b/doc/sphinxext/prs/9625.json new file mode 100644 index 00000000000..f4ff54eb37c --- /dev/null +++ b/doc/sphinxext/prs/9625.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b66734cf6f174b6806b52d4162830adb8d2a1825", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/_freesurfer.py": { + "a": 46, + "d": 22 + }, + "mne/report.py": { + "a": 5, + "d": 7 + }, + "mne/viz/misc.py": { + "a": 21, + "d": 34 + }, + "tutorials/forward/30_forward.py": { + "a": 4, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 21, + "d": 20 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9626.json b/doc/sphinxext/prs/9626.json new file mode 100644 index 00000000000..d491805d447 --- /dev/null +++ b/doc/sphinxext/prs/9626.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "57e5e7f73b4a10d1cf3bfeb91685df7f88fcdbd9", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "examples/visualization/brain.py": { + "a": 113, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9629.json b/doc/sphinxext/prs/9629.json new file mode 100644 index 00000000000..f437ae50376 --- /dev/null +++ b/doc/sphinxext/prs/9629.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2328a7fad54c40cbc0cea03e3fc60d594f036b93", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_figure.py": { + "a": 2, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 15, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9630.json b/doc/sphinxext/prs/9630.json new file mode 100644 index 00000000000..2c15858014e --- /dev/null +++ b/doc/sphinxext/prs/9630.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8d8609ade86b6daca5173faa7414885bde04df29", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9634.json b/doc/sphinxext/prs/9634.json new file mode 100644 index 00000000000..1fd35574d6f --- /dev/null +++ b/doc/sphinxext/prs/9634.json @@ -0,0 +1,79 @@ +{ + "merge_commit_sha": "8f62deb0f34b8f1abb40558cc804d412c716cb16", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Robert Luke", + "e": "748691+rob-luke@users.noreply.github.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 13, + "d": 4 + }, + "doc/install/advanced.rst": { + "a": 22, + "d": 22 + }, + "doc/install/index.rst": { + "a": 22, + "d": 12 + }, + "doc/install/install_python.rst": { + "a": 132, + "d": 0 + }, + "doc/install/mne_python.rst": { + "a": 58, + "d": 140 + }, + "doc/install/updating.rst": { + "a": 77, + "d": 0 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + }, + "doc/overview/faq.rst": { + "a": 2, + "d": 2 + }, + "doc/overview/get_help.rst": { + "a": 2, + "d": 1 + }, + "doc/overview/index.rst": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9637.json b/doc/sphinxext/prs/9637.json new file mode 100644 index 00000000000..a718c5f3f35 --- /dev/null +++ b/doc/sphinxext/prs/9637.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "89a975df53bea691398000b5a8e5261c3fb9ac0d", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/reference.py": { + "a": 22, + "d": 9 + }, + "mne/io/tests/test_reference.py": { + "a": 7, + "d": 7 + }, + "mne/utils/docs.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9640.json b/doc/sphinxext/prs/9640.json new file mode 100644 index 00000000000..42e7fd3266f --- /dev/null +++ b/doc/sphinxext/prs/9640.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "53d0f5de8a8a99e3f4211a8dc1361930e3cc3cd8", + "authors": [ + { + "n": "PAB", + "e": null + }, + { + "n": "Pierre-Antoine Bannier", + "e": "pierre-antoine@MacBook-Pro-de-Pierre-Antoine.local" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 22, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9642.json b/doc/sphinxext/prs/9642.json new file mode 100644 index 00000000000..11c56354396 --- /dev/null +++ b/doc/sphinxext/prs/9642.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8df2aeebb87e0b34937c9a70c2b8d0025470f1a4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9643.json b/doc/sphinxext/prs/9643.json new file mode 100644 index 00000000000..4f84636916e --- /dev/null +++ b/doc/sphinxext/prs/9643.json @@ -0,0 +1,111 @@ +{ + "merge_commit_sha": "59c3b921cf7609d0f2ffc19c1a7dc276110100a3", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/conftest.py": { + "a": 4, + "d": 1 + }, + "mne/export/_edf.py": { + "a": 225, + "d": 0 + }, + "mne/export/_export.py": { + "a": 5, + "d": 2 + }, + "mne/export/tests/test_export.py": { + "a": 132, + "d": 4 + }, + "mne/io/base.py": { + "a": 5, + "d": 2 + }, + "mne/io/tests/data/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/utils/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/utils/check.py": { + "a": 14, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 0 + }, + "requirements_testing_extra.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9644.json b/doc/sphinxext/prs/9644.json new file mode 100644 index 00000000000..bd891cf5340 --- /dev/null +++ b/doc/sphinxext/prs/9644.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "759d1f9630c82109210da1eb41b45a7cd30a0f12", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "examples/visualization/eeglab_head_sphere.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9646.json b/doc/sphinxext/prs/9646.json new file mode 100644 index 00000000000..4334b5bc57c --- /dev/null +++ b/doc/sphinxext/prs/9646.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c1891279801e1a9690017c53d211e0d7e4a45a90", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9647.json b/doc/sphinxext/prs/9647.json new file mode 100644 index 00000000000..998a25c99bf --- /dev/null +++ b/doc/sphinxext/prs/9647.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c9752455b79de10fac672d3ed25911f2618f71cc", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/decoding/csp.py": { + "a": 2, + "d": 9 + }, + "mne/utils/docs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/topomap.py": { + "a": 7, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9648.json b/doc/sphinxext/prs/9648.json new file mode 100644 index 00000000000..58e70232356 --- /dev/null +++ b/doc/sphinxext/prs/9648.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c556894a79e94c3a079725cb2ba9f5cc00076bb9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9650.json b/doc/sphinxext/prs/9650.json new file mode 100644 index 00000000000..2a0a73a899b --- /dev/null +++ b/doc/sphinxext/prs/9650.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "855edbf48835bcee80e773eb7047d0a4caa14a1c", + "authors": [ + { + "n": "Jeff Stout", + "e": null + }, + { + "n": "Jeff Stout", + "e": "stoutjd@nih.gov" + } + ], + "changes": { + "mne/_freesurfer.py": { + "a": 6, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9651.json b/doc/sphinxext/prs/9651.json new file mode 100644 index 00000000000..76e7a33beae --- /dev/null +++ b/doc/sphinxext/prs/9651.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4fd5b1d8dc0e89de4b0953cb4ca1825774e27588", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "examples/visualization/evoked_topomap.py": { + "a": 31, + "d": 1 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 5 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9656.json b/doc/sphinxext/prs/9656.json new file mode 100644 index 00000000000..d851e071d46 --- /dev/null +++ b/doc/sphinxext/prs/9656.json @@ -0,0 +1,307 @@ +{ + "merge_commit_sha": "9b16524bc8b98211d351afb19efd264f04c18e60", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/_templates/layout.html": { + "a": 0, + "d": 0 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 0, + "d": 0 + }, + "mne/beamformer/resolution_matrix.py": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/CTF-275.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/KIT-AS-2008.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/Vectorview-all.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/Vectorview-grad.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/Vectorview-mag.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/layouts/magnesWH3600.lout": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/biosemi16_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/biosemi32_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/biosemi64_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/bti148_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/bti248_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/bti248grad_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ctf151_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ctf275_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/ctf64_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycap128ch-avg_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycap32ch-avg_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycap64ch-avg_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM11_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM14_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM15_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/easycapM1_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/neuromag306mag_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/channels/data/neighbors/neuromag306planar_neighb.mat": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_bti2fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_clean_eog_ecg.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_compare_fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_compute_proj_ecg.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_compute_proj_eog.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_coreg.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_freeview_bem_surfaces.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_make_scalp_surfaces.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_maxfilter.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_report.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_show_fiff.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_show_info.py": { + "a": 0, + "d": 0 + }, + "mne/commands/mne_surf2bem.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/mxne_debiasing.py": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/tests/test_mxne_debiasing.py": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.eeg": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.vhdr": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test.vmrk": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_highpass.vhdr": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_lowpass_s.vhdr": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_mixed_highpass.vhdr": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_partially_disabled_hw_filter.vhdr": { + "a": 0, + "d": 0 + }, + "mne/io/brainvision/tests/data/test_units.vhdr": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_config_linux": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_hs_linux": { + "a": 0, + "d": 0 + }, + "mne/io/bti/tests/data/test_pdf_linux": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_edf_stim_channel.edf": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_edf_stim_channel.txt": { + "a": 0, + "d": 0 + }, + "mne/io/edf/tests/data/test_uneven_samp.edf": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_elp.txt": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_hsp.txt": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_mrk.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_mrk_post.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/kit/tests/data/test_mrk_pre.sqd": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-ave.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-cov.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-eve.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test-no-reject.ave": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test.ave": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test.cov": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_keepmean.cov": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_raw-eve.fif": { + "a": 0, + "d": 0 + }, + "mne/io/tests/data/test_raw.fif": { + "a": 0, + "d": 0 + }, + "mne/preprocessing/tests/test_artifact_detection.py": { + "a": 0, + "d": 0 + }, + "setup.py": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9657.json b/doc/sphinxext/prs/9657.json new file mode 100644 index 00000000000..5bf0a1a5294 --- /dev/null +++ b/doc/sphinxext/prs/9657.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "95facbd1a28e471cf81e1d86735fa272a66d13d1", + "authors": [ + { + "n": "Hubert Banville", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/datasets/sleep_physionet/_utils.py": { + "a": 4, + "d": 10 + }, + "mne/datasets/sleep_physionet/age.py": { + "a": 6, + "d": 10 + }, + "mne/datasets/sleep_physionet/temazepam.py": { + "a": 7, + "d": 10 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 17, + "d": 19 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9658.json b/doc/sphinxext/prs/9658.json new file mode 100644 index 00000000000..650709ecf32 --- /dev/null +++ b/doc/sphinxext/prs/9658.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "2f6d6b4667af1a3214e1a84ae21f8109429770a3", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/__init__.py": { + "a": 3, + "d": 3 + }, + "mne/channels/montage.py": { + "a": 55, + "d": 0 + }, + "mne/channels/tests/test_montage.py": { + "a": 33, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9663.json b/doc/sphinxext/prs/9663.json new file mode 100644 index 00000000000..04c88b02d36 --- /dev/null +++ b/doc/sphinxext/prs/9663.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "977397f575f8f37177bb62a05d272f2aa1874a6e", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_evoked.py": { + "a": 16, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9665.json b/doc/sphinxext/prs/9665.json new file mode 100644 index 00000000000..f21de316da5 --- /dev/null +++ b/doc/sphinxext/prs/9665.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "4f510337d5008a6fcfbaa134fbba921a49ec18b9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 4, + "d": 0 + }, + "tools/circleci_dependencies.sh": { + "a": 3, + "d": 4 + }, + "tools/setup_xvfb.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9666.json b/doc/sphinxext/prs/9666.json new file mode 100644 index 00000000000..33a6a0b4aec --- /dev/null +++ b/doc/sphinxext/prs/9666.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8e5ab68d05cb59a7e7d95275ca01d548432d2c9a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/datasets/utils.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9667.json b/doc/sphinxext/prs/9667.json new file mode 100644 index 00000000000..6b788730d3d --- /dev/null +++ b/doc/sphinxext/prs/9667.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7636a58a744bb95b59816144cce7d01746fe6db7", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/967.json b/doc/sphinxext/prs/967.json new file mode 100644 index 00000000000..91b8887ec87 --- /dev/null +++ b/doc/sphinxext/prs/967.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "d6a2a01f1c0db51ca8d397cb238add43afd035a8", + "authors": [ + { + "n": "Hari Bharadwaj", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 236, + "d": 44 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9670.json b/doc/sphinxext/prs/9670.json new file mode 100644 index 00000000000..7ba3c4ccaf9 --- /dev/null +++ b/doc/sphinxext/prs/9670.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "9d75a529c6a30e9de47409c8e6e9730dd7026bcc", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/psf_ctf_label_leakage.py": { + "a": 3, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9671.json b/doc/sphinxext/prs/9671.json new file mode 100644 index 00000000000..3554237fe30 --- /dev/null +++ b/doc/sphinxext/prs/9671.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a14e7e35cbbc5f55585492a8993196198dc392b7", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9672.json b/doc/sphinxext/prs/9672.json new file mode 100644 index 00000000000..2e3168442b2 --- /dev/null +++ b/doc/sphinxext/prs/9672.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e0e98c0bd9c73f1a84430e66923c0e414ff320d9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 29, + "d": 15 + }, + "mne/tests/test_coreg.py": { + "a": 12, + "d": 9 + }, + "tools/circleci_dependencies.sh": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9674.json b/doc/sphinxext/prs/9674.json new file mode 100644 index 00000000000..52e6a00f2ad --- /dev/null +++ b/doc/sphinxext/prs/9674.json @@ -0,0 +1,75 @@ +{ + "merge_commit_sha": "68c19a6fd81f101999ef910d8907c765e7c61392", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 0 + }, + "doc/changes/0.12.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.15.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.19.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.20.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.22.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.23.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 2, + "d": 2 + }, + "doc/connectivity.rst": { + "a": 2, + "d": 16 + }, + "doc/visualization.rst": { + "a": 0, + "d": 1 + }, + "examples/io/read_neo_format.py": { + "a": 1, + "d": 1 + }, + "mne/viz/circle.py": { + "a": 21, + "d": 2 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9676.json b/doc/sphinxext/prs/9676.json new file mode 100644 index 00000000000..c252d4caaf0 --- /dev/null +++ b/doc/sphinxext/prs/9676.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6f8947d5a611297cd32298af9f55d974ba2cdee0", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9677.json b/doc/sphinxext/prs/9677.json new file mode 100644 index 00000000000..8fcbd2f6ad1 --- /dev/null +++ b/doc/sphinxext/prs/9677.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "9b4ec7e68a6bb5ada2296058dce131397ebbeec4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/artifact_detection.py": { + "a": 2, + "d": 2 + }, + "mne/stats/cluster_level.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_rank.py": { + "a": 1, + "d": 1 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 3, + "d": 3 + }, + "tutorials/inverse/20_dipole_fit.py": { + "a": 4, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9678.json b/doc/sphinxext/prs/9678.json new file mode 100644 index 00000000000..9674ed5b0ef --- /dev/null +++ b/doc/sphinxext/prs/9678.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "043e7d5e5dabccb5504c59f54b44b6c0e0cb5694", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "examples/inverse/compute_mne_inverse_volume.py": { + "a": 2, + "d": 2 + }, + "examples/visualization/publication_figure.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 6, + "d": 1 + }, + "tutorials/forward/30_forward.py": { + "a": 3, + "d": 3 + }, + "tutorials/inverse/30_mne_dspm_loreta.py": { + "a": 2, + "d": 1 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 10, + "d": 8 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 2, + "d": 1 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9682.json b/doc/sphinxext/prs/9682.json new file mode 100644 index 00000000000..9159a5bf83a --- /dev/null +++ b/doc/sphinxext/prs/9682.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "cf307a48e130ca27868c3706100bf2c402c9de1f", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/datasets/opm_data.py": { + "a": 1, + "d": 1 + }, + "examples/forward/left_cerebellum_volume_source.py": { + "a": 5, + "d": 4 + }, + "examples/time_frequency/source_power_spectrum_opm.py": { + "a": 3, + "d": 3 + }, + "examples/visualization/3d_to_2d.py": { + "a": 20, + "d": 11 + }, + "mne/channels/montage.py": { + "a": 45, + "d": 3 + }, + "mne/channels/tests/test_montage.py": { + "a": 14, + "d": 3 + }, + "mne/data/image/custom_layout.lout": { + "a": 65, + "d": 23 + }, + "mne/viz/_brain/_brain.py": { + "a": 15, + "d": 11 + }, + "tutorials/clinical/20_seeg.py": { + "a": 1, + "d": 3 + }, + "tutorials/clinical/30_ecog.py": { + "a": 18, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9684.json b/doc/sphinxext/prs/9684.json new file mode 100644 index 00000000000..35035c70873 --- /dev/null +++ b/doc/sphinxext/prs/9684.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "041068d510cc746b0a994609a59213f279be0c81", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/montage.py": { + "a": 28, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9685.json b/doc/sphinxext/prs/9685.json new file mode 100644 index 00000000000..1902cf94e28 --- /dev/null +++ b/doc/sphinxext/prs/9685.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5ac71c35403983091c6fb6ebe552b73d79c3d9db", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 15, + "d": 12 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9687.json b/doc/sphinxext/prs/9687.json new file mode 100644 index 00000000000..9a8de586597 --- /dev/null +++ b/doc/sphinxext/prs/9687.json @@ -0,0 +1,143 @@ +{ + "merge_commit_sha": "5bafe5287dd27ef6e07a64609615326110536578", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 7, + "d": 5 + }, + "doc/changes/latest.inc": { + "a": 5, + "d": 1 + }, + "doc/cited.rst": { + "a": 3, + "d": 3 + }, + "doc/visualization.rst": { + "a": 3, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/commands/mne_browse_raw.py": { + "a": 2, + "d": 3 + }, + "mne/conftest.py": { + "a": 48, + "d": 6 + }, + "mne/io/base.py": { + "a": 5, + "d": 4 + }, + "mne/utils/config.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 0 + }, + "mne/utils/tests/test_config.py": { + "a": 3, + "d": 1 + }, + "mne/viz/__init__.py": { + "a": 4, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 305, + "d": 56 + }, + "mne/viz/_mpl_figure.py": { + "a": 53, + "d": 187 + }, + "mne/viz/epochs.py": { + "a": 2, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 25, + "d": 10 + }, + "mne/viz/tests/test_epochs.py": { + "a": 16, + "d": 16 + }, + "mne/viz/tests/test_ica.py": { + "a": 4, + "d": 4 + }, + "mne/viz/tests/test_raw.py": { + "a": 327, + "d": 226 + }, + "mne/viz/utils.py": { + "a": 32, + "d": 1 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "tools/github_actions_infos.sh": { + "a": 1, + "d": 0 + }, + "tools/github_actions_test.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9688.json b/doc/sphinxext/prs/9688.json new file mode 100644 index 00000000000..62f3c785139 --- /dev/null +++ b/doc/sphinxext/prs/9688.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "27c5d536808d9c8226047a825582b43df1218c95", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 23, + "d": 13 + }, + "mne/viz/_brain/_brain.py": { + "a": 123, + "d": 15 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 14, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 9, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 9, + "d": 6 + }, + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 4 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9689.json b/doc/sphinxext/prs/9689.json new file mode 100644 index 00000000000..f5b88e10fc1 --- /dev/null +++ b/doc/sphinxext/prs/9689.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "53335f68227374b584ede6f4300f0f97f5efb0aa", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 54, + "d": 16 + }, + "mne/gui/_coreg.py": { + "a": 976, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 91, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 7, + "d": 3 + }, + "requirements.txt": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/969.json b/doc/sphinxext/prs/969.json new file mode 100644 index 00000000000..f4fa7739247 --- /dev/null +++ b/doc/sphinxext/prs/969.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "5c97f2c476dc45fc7eccf167f7201c42696b7fb6", + "authors": [ + { + "n": "Teon L Brooks", + "e": null + } + ], + "changes": { + "mne/fiff/kit/kit.py": { + "a": 20, + "d": 13 + }, + "mne/fiff/kit/tests/test_coreg.py": { + "a": 3, + "d": 4 + }, + "mne/fiff/kit/tests/test_kit.py": { + "a": 7, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9690.json b/doc/sphinxext/prs/9690.json new file mode 100644 index 00000000000..b0fcee856b3 --- /dev/null +++ b/doc/sphinxext/prs/9690.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e2ea2d093e0b3fded4251794ed7998bc7a51c53c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".gitignore": { + "a": 2, + "d": 0 + }, + "mne/coreg.py": { + "a": 25, + "d": 0 + }, + "mne/tests/test_coreg.py": { + "a": 88, + "d": 6 + }, + "tutorials/forward/25_automated_coreg.py": { + "a": 10, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9691.json b/doc/sphinxext/prs/9691.json new file mode 100644 index 00000000000..7c20c669ace --- /dev/null +++ b/doc/sphinxext/prs/9691.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c890338c82c1da589221f41c59ee50cd2e185373", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/references.bib": { + "a": 6, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9692.json b/doc/sphinxext/prs/9692.json new file mode 100644 index 00000000000..f0f29a1a7d3 --- /dev/null +++ b/doc/sphinxext/prs/9692.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "accfe88c75eebac39c85f85ceb648de999198e41", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Britta Westner", + "e": "britta.wstnr@gmail.com" + } + ], + "changes": { + ".github/CODEOWNERS": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9693.json b/doc/sphinxext/prs/9693.json new file mode 100644 index 00000000000..ce1b6548412 --- /dev/null +++ b/doc/sphinxext/prs/9693.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9d00983119a41e5e0292577e746c03474f981f8e", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 3, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9694.json b/doc/sphinxext/prs/9694.json new file mode 100644 index 00000000000..736def681ce --- /dev/null +++ b/doc/sphinxext/prs/9694.json @@ -0,0 +1,99 @@ +{ + "merge_commit_sha": "a2036aaaf1f34a5fdfd84a6c1eb75986dd2608b7", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 5, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 2, + "d": 2 + }, + "mne/export/_edf.py": { + "a": 55, + "d": 13 + }, + "mne/export/_export.py": { + "a": 8, + "d": 4 + }, + "mne/export/tests/test_export.py": { + "a": 137, + "d": 16 + }, + "mne/io/base.py": { + "a": 4, + "d": 2 + }, + "mne/io/edf/edf.py": { + "a": 101, + "d": 3 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 19, + "d": 3 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 23, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9695.json b/doc/sphinxext/prs/9695.json new file mode 100644 index 00000000000..2657be68d27 --- /dev/null +++ b/doc/sphinxext/prs/9695.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bcf2b2f7d5479060d6cff9a227ffbb795cbe7f85", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 2, + "d": 1 + }, + "mne/io/nihon/tests/test_nihon.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9696.json b/doc/sphinxext/prs/9696.json new file mode 100644 index 00000000000..c809dc9a91a --- /dev/null +++ b/doc/sphinxext/prs/9696.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2e672de3de6599320c11480f41e353e16877ba24", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_mpl_figure.py": { + "a": 11, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9697.json b/doc/sphinxext/prs/9697.json new file mode 100644 index 00000000000..e98e05c2720 --- /dev/null +++ b/doc/sphinxext/prs/9697.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2291946a916798dff4e26a0f3a0c7a35d395fe6c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 2 + }, + "mne/viz/tests/test_3d_mpl.py": { + "a": 9, + "d": 0 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9698.json b/doc/sphinxext/prs/9698.json new file mode 100644 index 00000000000..9f1b382d152 --- /dev/null +++ b/doc/sphinxext/prs/9698.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "89dba30445e759ca98102998f6b39eaca292b0fd", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fixes.py": { + "a": 20, + "d": 0 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 5 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 1 + }, + "mne/viz/backends/_pyvista.py": { + "a": 11, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9699.json b/doc/sphinxext/prs/9699.json new file mode 100644 index 00000000000..2f5b2d7593d --- /dev/null +++ b/doc/sphinxext/prs/9699.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "22cd7747529db7895345917c4d3884a482ed8ab3", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/forward/25_automated_coreg.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/970.json b/doc/sphinxext/prs/970.json new file mode 100644 index 00000000000..f3e0efcd3f2 --- /dev/null +++ b/doc/sphinxext/prs/970.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "13483b856318e3866a9ae1064a0d29decc17b2a9", + "authors": [ + { + "n": "Alan Leggitt", + "e": "leggitta3@gmail.com" + } + ], + "changes": { + "mne/fiff/edf/edf.py": { + "a": 6, + "d": 0 + }, + "mne/fiff/edf/tests/test_edf.py": { + "a": 11, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9705.json b/doc/sphinxext/prs/9705.json new file mode 100644 index 00000000000..c0e6911379a --- /dev/null +++ b/doc/sphinxext/prs/9705.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3ff69f61d692ce34731ce94c277d16ae18319516", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 1 + }, + "mne/utils/tests/test_numerics.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9706.json b/doc/sphinxext/prs/9706.json new file mode 100644 index 00000000000..188fc77c7f6 --- /dev/null +++ b/doc/sphinxext/prs/9706.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "2606a9bbf53f038e3c79d2799442ea9edb7fd2dc", + "authors": [ + { + "n": "Tim Gates", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/io/constants.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 1, + "d": 1 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9708.json b/doc/sphinxext/prs/9708.json new file mode 100644 index 00000000000..0fbd1be4e13 --- /dev/null +++ b/doc/sphinxext/prs/9708.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "6ab3c551910fd21d6da7c2a9b9aa3712b75c9352", + "authors": [ + { + "n": "Evgeny Goldstein", + "e": "evgenygoldstein@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_channels.py": { + "a": 20, + "d": 0 + }, + "mne/io/pick.py": { + "a": 10, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9711.json b/doc/sphinxext/prs/9711.json new file mode 100644 index 00000000000..66a34392f1e --- /dev/null +++ b/doc/sphinxext/prs/9711.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "b20f6b314b57d53c1c0f0277c1aaee5658004e06", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/defaults.py": { + "a": 4, + "d": 4 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9712.json b/doc/sphinxext/prs/9712.json new file mode 100644 index 00000000000..5a622ad8bd0 --- /dev/null +++ b/doc/sphinxext/prs/9712.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8983996fdeb679157db102de075852bce97796b0", + "authors": [ + { + "n": "Reza Shoorangiz", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/io/curry/curry.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9714.json b/doc/sphinxext/prs/9714.json new file mode 100644 index 00000000000..80d6943b36f --- /dev/null +++ b/doc/sphinxext/prs/9714.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3e7914e18549d28087440c3f87d88a5520064bd4", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 161, + "d": 113 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + }, + "tutorials/forward/35_eeg_no_mri.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9719.json b/doc/sphinxext/prs/9719.json new file mode 100644 index 00000000000..e22ed81415e --- /dev/null +++ b/doc/sphinxext/prs/9719.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "db0219e5f6e083f7e377beb5301d725d03ec3526", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/cov.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 34, + "d": 60 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 26, + "d": 13 + }, + "mne/rank.py": { + "a": 5, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 19, + "d": 0 + }, + "mne/tests/test_rank.py": { + "a": 39, + "d": 0 + }, + "mne/viz/tests/test_ica.py": { + "a": 12, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9721.json b/doc/sphinxext/prs/9721.json new file mode 100644 index 00000000000..deb48041e73 --- /dev/null +++ b/doc/sphinxext/prs/9721.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ff3ba8629496bed2fc5b34e77abac48f930abe1c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 9, + "d": 0 + }, + "tutorials/inverse/40_mne_fixed_free.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9723.json b/doc/sphinxext/prs/9723.json new file mode 100644 index 00000000000..0d33a66c832 --- /dev/null +++ b/doc/sphinxext/prs/9723.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f20ffa0eb46e76baf4e30437bc03d765da83b38a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/funding.rst": { + "a": 2, + "d": 2 + }, + "doc/links.inc": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9724.json b/doc/sphinxext/prs/9724.json new file mode 100644 index 00000000000..27097abf016 --- /dev/null +++ b/doc/sphinxext/prs/9724.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a9782c46a1c09991941d1392ffa3d8b493e7b93", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 0 + }, + "doc/funding.rst": { + "a": 28, + "d": 8 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9725.json b/doc/sphinxext/prs/9725.json new file mode 100644 index 00000000000..ca8fae597b0 --- /dev/null +++ b/doc/sphinxext/prs/9725.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "e43136c22014cde2a0567f9e332faacb75f876dd", + "authors": [ + { + "n": "Valerii", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/data/html_templates.py": { + "a": 2, + "d": 5 + }, + "mne/defaults.py": { + "a": 3, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 44, + "d": 19 + }, + "mne/io/tests/test_meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9728.json b/doc/sphinxext/prs/9728.json new file mode 100644 index 00000000000..19b43c0aaad --- /dev/null +++ b/doc/sphinxext/prs/9728.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "4327725e18a87e6fa3c75c637fa9297d053d95ed", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9729.json b/doc/sphinxext/prs/9729.json new file mode 100644 index 00000000000..6401092cc9f --- /dev/null +++ b/doc/sphinxext/prs/9729.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "2adf9a63218e932399710618a095305ae0bd5d0d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9731.json b/doc/sphinxext/prs/9731.json new file mode 100644 index 00000000000..4a47347757d --- /dev/null +++ b/doc/sphinxext/prs/9731.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b527dc6683165d1e5982036d6144010fd710860f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_cov.py": { + "a": 4, + "d": 2 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 4 + }, + "tools/github_actions_dependencies.sh": { + "a": 3, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9732.json b/doc/sphinxext/prs/9732.json new file mode 100644 index 00000000000..9e5afe8d638 --- /dev/null +++ b/doc/sphinxext/prs/9732.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "82487bc6a8976b0fa55ab9c7a2d8af56fd8b7ae3", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/utils/_logging.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9735.json b/doc/sphinxext/prs/9735.json new file mode 100644 index 00000000000..3779adc496a --- /dev/null +++ b/doc/sphinxext/prs/9735.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a000c574a1a3aec40cffff6a0a8ad6499625fa5f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/_logging.py": { + "a": 10, + "d": 4 + }, + "mne/utils/tests/test_logging.py": { + "a": 22, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9739.json b/doc/sphinxext/prs/9739.json new file mode 100644 index 00000000000..47f7ac5223a --- /dev/null +++ b/doc/sphinxext/prs/9739.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e4ed2a607c9090f153da7c1558cb56edb975c1a7", + "authors": [ + { + "n": "Steve Matindi", + "e": null + } + ], + "changes": { + "LICENSE.txt": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9741.json b/doc/sphinxext/prs/9741.json new file mode 100644 index 00000000000..43d69f4f8b6 --- /dev/null +++ b/doc/sphinxext/prs/9741.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "41033b0edd7d770625006b908c5131307fef9fa8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 9, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 54, + "d": 23 + }, + "tutorials/clinical/30_ecog.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9742.json b/doc/sphinxext/prs/9742.json new file mode 100644 index 00000000000..b6456e8efe9 --- /dev/null +++ b/doc/sphinxext/prs/9742.json @@ -0,0 +1,155 @@ +{ + "merge_commit_sha": "c42a7b7a6dee8a48239c293c32d21d88040b9adf", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "MANIFEST.in": { + "a": 2, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 2, + "d": 1 + }, + "mne/data/dataset_checksums.txt": { + "a": 46, + "d": 0 + }, + "mne/data/eegbci_checksums.txt": { + "a": 3058, + "d": 0 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 4, + "d": 6 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 4, + "d": 5 + }, + "mne/datasets/config.py": { + "a": 217, + "d": 0 + }, + "mne/datasets/eegbci/eegbci.py": { + "a": 55, + "d": 8 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 19, + "d": 54 + }, + "mne/datasets/limo/limo.py": { + "a": 116, + "d": 68 + }, + "mne/datasets/sleep_physionet/_utils.py": { + "a": 34, + "d": 13 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 14, + "d": 11 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 23, + "d": 17 + }, + "mne/datasets/utils.py": { + "a": 238, + "d": 419 + }, + "mne/export/_edf.py": { + "a": 6, + "d": 0 + }, + "mne/export/tests/test_export.py": { + "a": 3, + "d": 8 + }, + "mne/io/tests/test_constants.py": { + "a": 14, + "d": 4 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 4, + "d": 4 + }, + "mne/utils/check.py": { + "a": 23, + "d": 36 + }, + "mne/utils/fetching.py": { + "a": 0, + "d": 127 + }, + "mne/utils/misc.py": { + "a": 24, + "d": 0 + }, + "mne/utils/tests/test_fetching.py": { + "a": 0, + "d": 44 + }, + "requirements.txt": { + "a": 1, + "d": 0 + }, + "requirements_testing.txt": { + "a": 1, + "d": 0 + }, + "server_environment.yml": { + "a": 1, + "d": 0 + }, + "setup.py": { + "a": 2, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 0 + }, + "tools/get_testing_version.sh": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9743.json b/doc/sphinxext/prs/9743.json new file mode 100644 index 00000000000..f10148f30eb --- /dev/null +++ b/doc/sphinxext/prs/9743.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "533f5895e8686a41285ad2f15289e23ff6762df2", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/io/bti/bti.py": { + "a": 16, + "d": 13 + }, + "mne/io/edf/edf.py": { + "a": 9, + "d": 15 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 0 + }, + "tools/circleci_download.sh": { + "a": 1, + "d": 1 + }, + "tutorials/clinical/60_sleep.py": { + "a": 6, + "d": 12 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9746.json b/doc/sphinxext/prs/9746.json new file mode 100644 index 00000000000..1f347fe626f --- /dev/null +++ b/doc/sphinxext/prs/9746.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a0faac5bbf5d43f5b264444fd7138ea10af1fd13", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/persyst/persyst.py": { + "a": 5, + "d": 2 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9749.json b/doc/sphinxext/prs/9749.json new file mode 100644 index 00000000000..d5720aae424 --- /dev/null +++ b/doc/sphinxext/prs/9749.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "c74cc7180b25f20b4b99d0144cbb6ef99c597e8a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 2, + "d": 0 + }, + "mne/report/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 24, + "d": 24 + }, + "mne/report/tests/test_report.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9750.json b/doc/sphinxext/prs/9750.json new file mode 100644 index 00000000000..cb93552aefc --- /dev/null +++ b/doc/sphinxext/prs/9750.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "aea40ff54c976a1166b573da5ec3a6188ba7b725", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/visualization/eeg_on_scalp.py": { + "a": 9, + "d": 0 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/utils/check.py": { + "a": 12, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 13, + "d": 14 + }, + "mne/viz/_brain/_brain.py": { + "a": 116, + "d": 111 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 6, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 4, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 17, + "d": 7 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 7 + }, + "mne/viz/utils.py": { + "a": 5, + "d": 5 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9751.json b/doc/sphinxext/prs/9751.json new file mode 100644 index 00000000000..ed67c79fd4a --- /dev/null +++ b/doc/sphinxext/prs/9751.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3aeee826450c823464282657a38c6751d9cdc5a2", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9754.json b/doc/sphinxext/prs/9754.json new file mode 100644 index 00000000000..7a3e145edb9 --- /dev/null +++ b/doc/sphinxext/prs/9754.json @@ -0,0 +1,299 @@ +{ + "merge_commit_sha": "6763cac6b4b3d379a9e5319dd3575f1f124c0060", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Adam Li", + "e": "adam2392@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Valerii", + "e": "42982039+vagechirkov@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 26, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 7, + "d": 3 + }, + "mne/data/html_templates.py": { + "a": 40, + "d": 2 + }, + "mne/defaults.py": { + "a": 10, + "d": 0 + }, + "mne/evoked.py": { + "a": 1, + "d": 0 + }, + "mne/fixes.py": { + "a": 35, + "d": 3 + }, + "mne/forward/forward.py": { + "a": 36, + "d": 20 + }, + "mne/html/bootstrap.min.css": { + "a": 0, + "d": 7 + }, + "mne/html/bootstrap.min.js": { + "a": 0, + "d": 7 + }, + "mne/html/jquery-ui.min.css": { + "a": 0, + "d": 6 + }, + "mne/html/jquery-ui.min.js": { + "a": 0, + "d": 12 + }, + "mne/html/jquery.js": { + "a": 0, + "d": 6 + }, + "mne/icons/mne_icon-cropped.png": { + "a": 0, + "d": 0 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 0, + "d": 16 + }, + "mne/io/meas_info.py": { + "a": 12, + "d": 6 + }, + "mne/minimum_norm/inverse.py": { + "a": 33, + "d": 14 + }, + "mne/report/js_and_css/bootstrap.bundle.min.js": { + "a": 7, + "d": 0 + }, + "mne/report/js_and_css/bootstrap.min.css": { + "a": 7, + "d": 0 + }, + "mne/report/js_and_css/highlightjs/atom-one-dark-reasonable.min.css": { + "a": 1, + "d": 0 + }, + "mne/report/js_and_css/highlightjs/atom-one-light.min.css": { + "a": 1, + "d": 0 + }, + "mne/report/js_and_css/highlightjs/default.min.css": { + "a": 9, + "d": 0 + }, + "mne/report/js_and_css/highlightjs/highlight.min.js": { + "a": 1282, + "d": 0 + }, + "mne/report/js_and_css/jquery-3.6.0.min.js": { + "a": 2, + "d": 0 + }, + "mne/report/js_and_css/report.js": { + "a": 224, + "d": 0 + }, + "mne/report/js_and_css/report.sass": { + "a": 19, + "d": 0 + }, + "mne/report/report.py": { + "a": 2448, + "d": 1401 + }, + "mne/report/templates/code.html": { + "a": 24, + "d": 0 + }, + "mne/report/templates/cov.html": { + "a": 23, + "d": 0 + }, + "mne/report/templates/epochs.html": { + "a": 25, + "d": 0 + }, + "mne/report/templates/evoked.html": { + "a": 26, + "d": 0 + }, + "mne/report/templates/footer.html": { + "a": 10, + "d": 0 + }, + "mne/report/templates/forward.html": { + "a": 23, + "d": 0 + }, + "mne/report/templates/header.html": { + "a": 56, + "d": 0 + }, + "mne/report/templates/html.html": { + "a": 23, + "d": 0 + }, + "mne/report/templates/image.html": { + "a": 36, + "d": 0 + }, + "mne/report/templates/inverse.html": { + "a": 23, + "d": 0 + }, + "mne/report/templates/raw.html": { + "a": 25, + "d": 0 + }, + "mne/report/templates/slider.html": { + "a": 60, + "d": 0 + }, + "mne/report/templates/toc.html": { + "a": 12, + "d": 0 + }, + "mne/report/tests/test_report.py": { + "a": 318, + "d": 170 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 3, + "d": 5 + }, + "mne/utils/docs.py": { + "a": 69, + "d": 52 + }, + "mne/viz/_3d.py": { + "a": 10, + "d": 19 + }, + "mne/viz/backends/renderer.py": { + "a": 9, + "d": 3 + }, + "mne/viz/evoked.py": { + "a": 9, + "d": 1 + }, + "mne/viz/misc.py": { + "a": 89, + "d": 48 + }, + "mne/viz/tests/test_topo.py": { + "a": 1, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "mne/viz/utils.py": { + "a": 6, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 441, + "d": 207 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9758.json b/doc/sphinxext/prs/9758.json new file mode 100644 index 00000000000..ec0d5a35a2e --- /dev/null +++ b/doc/sphinxext/prs/9758.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "034ff3f03ae1ae01a6fe52e3e91e10682c977aff", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 9, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 17, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9760.json b/doc/sphinxext/prs/9760.json new file mode 100644 index 00000000000..9493fcbe3df --- /dev/null +++ b/doc/sphinxext/prs/9760.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "05382cf02d49a5f8fbaf6030979e9ebb9d126608", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/export/_edf.py": { + "a": 142, + "d": 134 + }, + "tools/azure_dependencies.sh": { + "a": 0, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9762.json b/doc/sphinxext/prs/9762.json new file mode 100644 index 00000000000..6a26e89aaab --- /dev/null +++ b/doc/sphinxext/prs/9762.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9627d0965ced345fc5339caeb828e4180afacaa6", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/data/dataset_checksums.txt": { + "a": 0, + "d": 46 + }, + "mne/datasets/config.py": { + "a": 267, + "d": 120 + }, + "mne/datasets/testing/_testing.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 63, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9763.json b/doc/sphinxext/prs/9763.json new file mode 100644 index 00000000000..7952c9cc486 --- /dev/null +++ b/doc/sphinxext/prs/9763.json @@ -0,0 +1,167 @@ +{ + "merge_commit_sha": "0b503d8b64bd1383eee0450c3998700ad354a1ef", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "doc/datasets.rst": { + "a": 2, + "d": 1 + }, + "examples/visualization/3d_to_2d.py": { + "a": 15, + "d": 7 + }, + "mne/datasets/__init__.py": { + "a": 3, + "d": 1 + }, + "mne/datasets/_fake/_fake.py": { + "a": 5, + "d": 4 + }, + "mne/datasets/_fetch.py": { + "a": 292, + "d": 0 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 7, + "d": 6 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/config.py": { + "a": 5, + "d": 4 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/erp_core/erp_core.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/hf_sef/hf_sef.py": { + "a": 24, + "d": 10 + }, + "mne/datasets/kiloword/kiloword.py": { + "a": 5, + "d": 4 + }, + "mne/datasets/misc/_misc.py": { + "a": 5, + "d": 4 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/opm/opm.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 6, + "d": 5 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 7, + "d": 6 + }, + "mne/datasets/sample/sample.py": { + "a": 7, + "d": 5 + }, + "mne/datasets/somato/somato.py": { + "a": 7, + "d": 5 + }, + "mne/datasets/spm_face/spm_data.py": { + "a": 7, + "d": 5 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 8, + "d": 5 + }, + "mne/datasets/testing/_testing.py": { + "a": 7, + "d": 5 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 49, + "d": 235 + }, + "mne/datasets/visual_92_categories/visual_92_categories.py": { + "a": 6, + "d": 4 + }, + "mne/export/tests/test_export.py": { + "a": 2, + "d": 4 + }, + "setup.py": { + "a": 0, + "d": 1 + }, + "tutorials/clinical/20_seeg.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9764.json b/doc/sphinxext/prs/9764.json new file mode 100644 index 00000000000..1d048f75b8a --- /dev/null +++ b/doc/sphinxext/prs/9764.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6c0006dfc73588927936b95a1ae364556a55fbc0", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 32, + "d": 72 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9766.json b/doc/sphinxext/prs/9766.json new file mode 100644 index 00000000000..0c51d05ccc8 --- /dev/null +++ b/doc/sphinxext/prs/9766.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "45b257d96a352310b00f1478b7053b46bb57e0b7", + "authors": [ + { + "n": "Jan Sosulski", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 10, + "d": 9 + }, + "mne/tests/test_epochs.py": { + "a": 8, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/977.json b/doc/sphinxext/prs/977.json new file mode 100644 index 00000000000..bd3dc68ecc5 --- /dev/null +++ b/doc/sphinxext/prs/977.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "2ff835f644f3fc751f84398e1758da06ea631fb8", + "authors": [ + { + "n": "Martin Luessi", + "e": null + } + ], + "changes": { + "doc/Makefile": { + "a": 12, + "d": 1 + }, + "doc/source/_templates/layout.html": { + "a": 6, + "d": 3 + }, + "doc/source/_templates/sidebar.html": { + "a": 5, + "d": 0 + }, + "doc/source/conf.py": { + "a": 5, + "d": 2 + }, + "doc/sphinxext/gen_rst.py": { + "a": 14, + "d": 10 + }, + "doc/upload_html.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9770.json b/doc/sphinxext/prs/9770.json new file mode 100644 index 00000000000..2060439de4f --- /dev/null +++ b/doc/sphinxext/prs/9770.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "59560dfe179ffbb30d79ad88898121043539a2c1", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/gui/_ieeg_locate_gui.py": { + "a": 5, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9771.json b/doc/sphinxext/prs/9771.json new file mode 100644 index 00000000000..393b31f7b5c --- /dev/null +++ b/doc/sphinxext/prs/9771.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f967b3dd4fb6f3db1af09cf1a708774acde24a0d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/data/html_templates.py": { + "a": 1, + "d": 1 + }, + "mne/defaults.py": { + "a": 8, + "d": 5 + }, + "mne/tests/test_defaults.py": { + "a": 16, + "d": 0 + }, + "mne/viz/tests/test_raw.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9772.json b/doc/sphinxext/prs/9772.json new file mode 100644 index 00000000000..872ab365c94 --- /dev/null +++ b/doc/sphinxext/prs/9772.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "52c7007b461181a15720438d46eaffc81385f0cc", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 45, + "d": 9 + }, + "doc/conf.py": { + "a": 13, + "d": 3 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "mne/tests/test_cov.py": { + "a": 1, + "d": 1 + }, + "requirements_doc.txt": { + "a": 1, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9777.json b/doc/sphinxext/prs/9777.json new file mode 100644 index 00000000000..20826e8e1b8 --- /dev/null +++ b/doc/sphinxext/prs/9777.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "a39a0e8caa340ff22eba7441bec2399c93fa496d", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9780.json b/doc/sphinxext/prs/9780.json new file mode 100644 index 00000000000..b687454517e --- /dev/null +++ b/doc/sphinxext/prs/9780.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9c7e837f59e480fff1b6ea5841630e089aa5a235", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/annotations.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 7, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9781.json b/doc/sphinxext/prs/9781.json new file mode 100644 index 00000000000..f1a3563cd77 --- /dev/null +++ b/doc/sphinxext/prs/9781.json @@ -0,0 +1,115 @@ +{ + "merge_commit_sha": "040a40509ec600e83d572d6bc03a209a05a72a07", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/datasets.rst": { + "a": 1, + "d": 0 + }, + "mne/datasets/__init__.py": { + "a": 2, + "d": 2 + }, + "mne/datasets/_fetch.py": { + "a": 44, + "d": 42 + }, + "mne/datasets/brainstorm/bst_auditory.py": { + "a": 2, + "d": 7 + }, + "mne/datasets/brainstorm/bst_phantom_ctf.py": { + "a": 2, + "d": 7 + }, + "mne/datasets/brainstorm/bst_phantom_elekta.py": { + "a": 1, + "d": 7 + }, + "mne/datasets/brainstorm/bst_raw.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/brainstorm/bst_resting.py": { + "a": 2, + "d": 6 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 11, + "d": 5 + }, + "mne/datasets/erp_core/erp_core.py": { + "a": 1, + "d": 5 + }, + "mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py": { + "a": 5, + "d": 12 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 10, + "d": 5 + }, + "mne/datasets/mtrf/mtrf.py": { + "a": 2, + "d": 5 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 9, + "d": 5 + }, + "mne/datasets/opm/opm.py": { + "a": 9, + "d": 5 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 10, + "d": 5 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 12, + "d": 7 + }, + "mne/datasets/sample/sample.py": { + "a": 9, + "d": 5 + }, + "mne/datasets/somato/somato.py": { + "a": 9, + "d": 5 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 10, + "d": 6 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 8, + "d": 8 + }, + "mne/datasets/utils.py": { + "a": 31, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9782.json b/doc/sphinxext/prs/9782.json new file mode 100644 index 00000000000..cc9336a3a24 --- /dev/null +++ b/doc/sphinxext/prs/9782.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "c74e60a00825bbe3bbb977369df53b1f2287b00f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 7, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9784.json b/doc/sphinxext/prs/9784.json new file mode 100644 index 00000000000..93b350ee426 --- /dev/null +++ b/doc/sphinxext/prs/9784.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "08a02e3ce7af03604cde74b2d2b4352f00ab879d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 5, + "d": 2 + }, + "doc/overview/datasets_index.rst": { + "a": 18, + "d": 9 + }, + "examples/visualization/3d_to_2d.py": { + "a": 25, + "d": 39 + }, + "mne/data/image/custom_layout.lout": { + "a": 257, + "d": 65 + }, + "mne/gui/__init__.py": { + "a": 40, + "d": 0 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 30, + "d": 1 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 26, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9789.json b/doc/sphinxext/prs/9789.json new file mode 100644 index 00000000000..995a65d6f7d --- /dev/null +++ b/doc/sphinxext/prs/9789.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "51a1ea3f370bcdadd2306bfd27ac4f2b5d6e4a7f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 3, + "d": 3 + }, + "azure-pipelines.yml": { + "a": 26, + "d": 28 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 8, + "d": 0 + }, + "doc/install/contributing.rst": { + "a": 2, + "d": 3 + }, + "doc/install/index.rst": { + "a": 1, + "d": 1 + }, + "doc/links.inc": { + "a": 4, + "d": 4 + }, + "doc/references.bib": { + "a": 2, + "d": 4 + }, + "mne/commands/mne_sys_info.py": { + "a": 7, + "d": 1 + }, + "mne/conftest.py": { + "a": 4, + "d": 1 + }, + "mne/utils/config.py": { + "a": 20, + "d": 6 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 3 + }, + "tools/github_actions_infos.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9794.json b/doc/sphinxext/prs/9794.json new file mode 100644 index 00000000000..881bf155cb5 --- /dev/null +++ b/doc/sphinxext/prs/9794.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "d973a1fa714534eab6e553f8073ceb9abce35fae", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/time_frequency/tests/test_tfr.py": { + "a": 72, + "d": 1 + }, + "mne/time_frequency/tfr.py": { + "a": 46, + "d": 12 + }, + "mne/utils/check.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9795.json b/doc/sphinxext/prs/9795.json new file mode 100644 index 00000000000..f35481fbf36 --- /dev/null +++ b/doc/sphinxext/prs/9795.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "66ac824c7f2a04baccd33699f69a5f2443768184", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/brainvision/brainvision.py": { + "a": 22, + "d": 2 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 10, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9796.json b/doc/sphinxext/prs/9796.json new file mode 100644 index 00000000000..5e0c7f6a00c --- /dev/null +++ b/doc/sphinxext/prs/9796.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "fb98000c6b9998b3a05ab3d887d0e65c4d3d72a0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "MANIFEST.in": { + "a": 1, + "d": 0 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/datasets.rst": { + "a": 2, + "d": 1 + }, + "mne/datasets/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/datasets/_phantom/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/datasets/_phantom/base.py": { + "a": 62, + "d": 0 + }, + "mne/datasets/_phantom/phantom_otaniemi.txt": { + "a": 3, + "d": 0 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 33, + "d": 6 + }, + "mne/dipole.py": { + "a": 4, + "d": 0 + }, + "setup.py": { + "a": 1, + "d": 0 + }, + "tutorials/inverse/80_brainstorm_phantom_elekta.py": { + "a": 21, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9799.json b/doc/sphinxext/prs/9799.json new file mode 100644 index 00000000000..82fe346aa55 --- /dev/null +++ b/doc/sphinxext/prs/9799.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "4f37eb6f895728cdf8e4ebf320427136bba76896", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/utils.py": { + "a": 4, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/980.json b/doc/sphinxext/prs/980.json new file mode 100644 index 00000000000..7fc54894150 --- /dev/null +++ b/doc/sphinxext/prs/980.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c493a447f7415b961bf187db8615dc77bc00ffbf", + "authors": [ + { + "n": "Brad Buran", + "e": null + } + ], + "changes": { + "mne/utils.py": { + "a": 11, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9800.json b/doc/sphinxext/prs/9800.json new file mode 100644 index 00000000000..ba512b183cd --- /dev/null +++ b/doc/sphinxext/prs/9800.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "6bb3eb742b880a520431e10bfb5b113872ff67a0", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 13, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ieeg/__init__.py": { + "a": 7, + "d": 0 + }, + "mne/preprocessing/ieeg/_projection.py": { + "a": 104, + "d": 0 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 68, + "d": 0 + }, + "mne/preprocessing/nirs/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 31, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9801.json b/doc/sphinxext/prs/9801.json new file mode 100644 index 00000000000..000aa6687e9 --- /dev/null +++ b/doc/sphinxext/prs/9801.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "8863c1bed699d0ca428a6840101fd44730173347", + "authors": [ + { + "n": "Joshua Teves", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/utils/config.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9802.json b/doc/sphinxext/prs/9802.json new file mode 100644 index 00000000000..655ca2e802b --- /dev/null +++ b/doc/sphinxext/prs/9802.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "32f4ee82bf6178c287539f72323ab470ef711cb4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/datasets/_fetch.py": { + "a": 26, + "d": 35 + }, + "mne/datasets/config.py": { + "a": 11, + "d": 13 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 16, + "d": 2 + }, + "mne/datasets/utils.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9803.json b/doc/sphinxext/prs/9803.json new file mode 100644 index 00000000000..27637760eb1 --- /dev/null +++ b/doc/sphinxext/prs/9803.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ccc8f5cd09fd5fd7c766c10cf12b30853d1884e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "mne/utils/_testing.py": { + "a": 8, + "d": 5 + }, + "mne/viz/evoked.py": { + "a": 1, + "d": 0 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 2 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9804.json b/doc/sphinxext/prs/9804.json new file mode 100644 index 00000000000..a5f2c0337b7 --- /dev/null +++ b/doc/sphinxext/prs/9804.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "3c84129576713d74e1fd889c2691240909e44161", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 4, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 35, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 12, + "d": 3 + }, + "mne/viz/tests/test_3d.py": { + "a": 13, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9805.json b/doc/sphinxext/prs/9805.json new file mode 100644 index 00000000000..81f04d12364 --- /dev/null +++ b/doc/sphinxext/prs/9805.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "61608aa4038a888d1c1f7c9dd5bb1a02203719de", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "requirements_testing_extra.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9807.json b/doc/sphinxext/prs/9807.json new file mode 100644 index 00000000000..cd2440d9958 --- /dev/null +++ b/doc/sphinxext/prs/9807.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "aabdefdc97c035f8d269b6acdc4065a4ee21786c", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/datasets/_fetch.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9808.json b/doc/sphinxext/prs/9808.json new file mode 100644 index 00000000000..27274564fd0 --- /dev/null +++ b/doc/sphinxext/prs/9808.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "2e3aca50802415686a4223c711f8f1d535d407c9", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/nirx/nirx.py": { + "a": 8, + "d": 5 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 15, + "d": 4 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 18, + "d": 4 + }, + "mne/io/tests/test_raw.py": { + "a": 9, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/981.json b/doc/sphinxext/prs/981.json new file mode 100644 index 00000000000..2b4f3a3b999 --- /dev/null +++ b/doc/sphinxext/prs/981.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3ce0cdea0bfd52d90fb71435c802c8b082b0ae43", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/fiff/kit/coreg.py": { + "a": 1, + "d": 1 + }, + "mne/fiff/kit/kit.py": { + "a": 2, + "d": 2 + }, + "mne/source_estimate.py": { + "a": 14, + "d": 17 + }, + "mne/surface.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9810.json b/doc/sphinxext/prs/9810.json new file mode 100644 index 00000000000..bf7c6abef98 --- /dev/null +++ b/doc/sphinxext/prs/9810.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "8cb783813a6691fa9b55d12aa8c90690228868e9", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/dipole.py": { + "a": 24, + "d": 12 + }, + "mne/evoked.py": { + "a": 1, + "d": 1 + }, + "mne/forward/_make_forward.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_dipole.py": { + "a": 8, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9814.json b/doc/sphinxext/prs/9814.json new file mode 100644 index 00000000000..54bc289ce25 --- /dev/null +++ b/doc/sphinxext/prs/9814.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "dd5ca5b7c5a66c5a6afc31089e0bbb58e9e1f2c5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 6, + "d": 5 + }, + "mne/report/report.py": { + "a": 18, + "d": 23 + }, + "mne/utils/misc.py": { + "a": 6, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 2, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 15 + }, + "tutorials/io/10_reading_meg_data.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9815.json b/doc/sphinxext/prs/9815.json new file mode 100644 index 00000000000..53b62057041 --- /dev/null +++ b/doc/sphinxext/prs/9815.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "48a33e376843b7ecc0279805b0b9e24c996ba972", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/epochs.py": { + "a": 3, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 0 + }, + "mne/viz/epochs.py": { + "a": 6, + "d": 2 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9817.json b/doc/sphinxext/prs/9817.json new file mode 100644 index 00000000000..4e9308c4607 --- /dev/null +++ b/doc/sphinxext/prs/9817.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cfd743aa33350c94a9b41dd51e72f53977939de6", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/io/snirf/_snirf.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9818.json b/doc/sphinxext/prs/9818.json new file mode 100644 index 00000000000..9bb2c04c872 --- /dev/null +++ b/doc/sphinxext/prs/9818.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f3f0e086b4fbc18ebd3018770e0650604d70b9c5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/report/report.py": { + "a": 13, + "d": 8 + }, + "mne/viz/misc.py": { + "a": 44, + "d": 28 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9820.json b/doc/sphinxext/prs/9820.json new file mode 100644 index 00000000000..b9753ef1da0 --- /dev/null +++ b/doc/sphinxext/prs/9820.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "194ba0f8a849dbe16c66c3425e919b9a9f9f59eb", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/docs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/ica.py": { + "a": 14, + "d": 15 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9822.json b/doc/sphinxext/prs/9822.json new file mode 100644 index 00000000000..de7bb2b8697 --- /dev/null +++ b/doc/sphinxext/prs/9822.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7154c6d16389c38447f16534847f7ae7503d42fc", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 0 + }, + "requirements_testing_extra.txt": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9823.json b/doc/sphinxext/prs/9823.json new file mode 100644 index 00000000000..8005def2c8f --- /dev/null +++ b/doc/sphinxext/prs/9823.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "42c5a72d1f7e1981fb60fa98a75fddf46418ca0d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 3, + "d": 1 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9824.json b/doc/sphinxext/prs/9824.json new file mode 100644 index 00000000000..bf56a462c40 --- /dev/null +++ b/doc/sphinxext/prs/9824.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "118f37901e348aaed3972085b9c20a4f06241e5d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 21, + "d": 7 + }, + "mne/viz/tests/test_ica.py": { + "a": 9, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9825.json b/doc/sphinxext/prs/9825.json new file mode 100644 index 00000000000..d71569ab0bb --- /dev/null +++ b/doc/sphinxext/prs/9825.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "832b71dca7148bf68e6891cf9d6c308f1bc3fabc", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/ica.py": { + "a": 8, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9826.json b/doc/sphinxext/prs/9826.json new file mode 100644 index 00000000000..db719b568eb --- /dev/null +++ b/doc/sphinxext/prs/9826.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "adca0e6c4b25ad84406301c204d48a413fa657e1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/topomap.py": { + "a": 10, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9827.json b/doc/sphinxext/prs/9827.json new file mode 100644 index 00000000000..6ae5b8056c1 --- /dev/null +++ b/doc/sphinxext/prs/9827.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "11864803848d08ce2abf1d379f4a6b0f03e133ee", + "authors": [ + { + "n": "Dominique Makowski", + "e": "D.Makowski@sussex.ac.uk" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 17, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9828.json b/doc/sphinxext/prs/9828.json new file mode 100644 index 00000000000..66b092c4288 --- /dev/null +++ b/doc/sphinxext/prs/9828.json @@ -0,0 +1,71 @@ +{ + "merge_commit_sha": "1b0f8280f9b3a93b58daacffd85598b63511e5b1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/data/html_templates.py": { + "a": 35, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 92, + "d": 15 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 11, + "d": 0 + }, + "mne/report/report.py": { + "a": 370, + "d": 9 + }, + "mne/report/templates/ica.html": { + "a": 29, + "d": 0 + }, + "mne/report/tests/test_report.py": { + "a": 26, + "d": 3 + }, + "mne/viz/ica.py": { + "a": 3, + "d": 0 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 3, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 72, + "d": 0 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9829.json b/doc/sphinxext/prs/9829.json new file mode 100644 index 00000000000..af8323a611c --- /dev/null +++ b/doc/sphinxext/prs/9829.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4f74ee2a96c618b506afeda3c09e0bf578cc607a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "environment.yml": { + "a": 1, + "d": 0 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/report/report.py": { + "a": 14, + "d": 19 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 3 + }, + "requirements.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9831.json b/doc/sphinxext/prs/9831.json new file mode 100644 index 00000000000..15d5862a26e --- /dev/null +++ b/doc/sphinxext/prs/9831.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e285e91bb33c9b5855e09563ed8cb2e7a2b997ab", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/datasets/config.py": { + "a": 20, + "d": 18 + }, + "mne/io/edf/edf.py": { + "a": 8, + "d": 3 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 28, + "d": 8 + }, + "mne/tests/test_annotations.py": { + "a": 15, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9834.json b/doc/sphinxext/prs/9834.json new file mode 100644 index 00000000000..261e99fbae0 --- /dev/null +++ b/doc/sphinxext/prs/9834.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "22728cee967bdf8bdfb69002d92df49bd9de3948", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/minimum_norm/tests/test_inverse.py": { + "a": 4, + "d": 3 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 3 + }, + "mne/viz/backends/_pyvista.py": { + "a": 17, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9835.json b/doc/sphinxext/prs/9835.json new file mode 100644 index 00000000000..7f1f39aa8e3 --- /dev/null +++ b/doc/sphinxext/prs/9835.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "9a474f51fffe4e5da4179cedcd649dcf2b86d351", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 4, + "d": 5 + }, + "doc/install/index.rst": { + "a": 23, + "d": 8 + }, + "doc/install/mne_python.rst": { + "a": 4, + "d": 2 + }, + "doc/links.inc": { + "a": 2, + "d": 42 + }, + "requirements.txt": { + "a": 1, + "d": 1 + }, + "setup.py": { + "a": 30, + "d": 9 + }, + "tools/azure_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9837.json b/doc/sphinxext/prs/9837.json new file mode 100644 index 00000000000..e7cbbcc312f --- /dev/null +++ b/doc/sphinxext/prs/9837.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a04b6e518ce20ec0835f8813d5b7155b6bd7e5d5", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9839.json b/doc/sphinxext/prs/9839.json new file mode 100644 index 00000000000..52521740f53 --- /dev/null +++ b/doc/sphinxext/prs/9839.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "718a14901f81de8a0261e1f7e2529378cce783c1", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/viz/backends/renderer.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/984.json b/doc/sphinxext/prs/984.json new file mode 100644 index 00000000000..a5a6b70647b --- /dev/null +++ b/doc/sphinxext/prs/984.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "dc9bfe6970f6b0bd9ff3c8bcbe4147c56b13201b", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/inverse/plot_compute_mne_inverse.py": { + "a": 9, + "d": 1 + }, + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 10, + "d": 0 + }, + "mne/fiff/evoked.py": { + "a": 146, + "d": 0 + }, + "mne/fiff/tests/test_evoked.py": { + "a": 45, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 74, + "d": 0 + }, + "mne/tests/test_source_estimate.py": { + "a": 28, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9840.json b/doc/sphinxext/prs/9840.json new file mode 100644 index 00000000000..77b1f9740a8 --- /dev/null +++ b/doc/sphinxext/prs/9840.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d32125d5da1996dd7537f652ecd022cc24055204", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 0 + }, + "mne/export/tests/test_export.py": { + "a": 1, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_misc.py": { + "a": 1, + "d": 0 + }, + "setup.cfg": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9841.json b/doc/sphinxext/prs/9841.json new file mode 100644 index 00000000000..3de962114b2 --- /dev/null +++ b/doc/sphinxext/prs/9841.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "402077808100f5c0f6e90ba64176bab38ec32295", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 12, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9843.json b/doc/sphinxext/prs/9843.json new file mode 100644 index 00000000000..07272830916 --- /dev/null +++ b/doc/sphinxext/prs/9843.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "44f4b0f027f8f926d155ea0dc23c17de21d0488e", + "authors": [ + { + "n": "Rob Luke", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/tests/test_interpolation.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_standard_montage.py": { + "a": 1, + "d": 1 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 5, + "d": 5 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 4, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/viz/conftest.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9845.json b/doc/sphinxext/prs/9845.json new file mode 100644 index 00000000000..562e46ab09a --- /dev/null +++ b/doc/sphinxext/prs/9845.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "022f1a8f69c52062f956f015fba3c3b6f5ee7a1d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 11, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9847.json b/doc/sphinxext/prs/9847.json new file mode 100644 index 00000000000..7dfb9f36ee1 --- /dev/null +++ b/doc/sphinxext/prs/9847.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "138f0459ba19de9a84ea0a8b5799df49d45bce54", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9851.json b/doc/sphinxext/prs/9851.json new file mode 100644 index 00000000000..302548d336d --- /dev/null +++ b/doc/sphinxext/prs/9851.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "556751d8e23025deb281902ad04f2c8b40b27171", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "examples/time_frequency/time_frequency_erds.py": { + "a": 46, + "d": 49 + }, + "mne/time_frequency/tfr.py": { + "a": 10, + "d": 4 + }, + "mne/viz/tests/test_utils.py": { + "a": 1, + "d": 19 + }, + "mne/viz/topo.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 10, + "d": 11 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9858.json b/doc/sphinxext/prs/9858.json new file mode 100644 index 00000000000..d6ad12811be --- /dev/null +++ b/doc/sphinxext/prs/9858.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b5b3f78c1b8b7ffacf27f04149428d6ea541ff84", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 120, + "d": 81 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 5, + "d": 6 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9859.json b/doc/sphinxext/prs/9859.json new file mode 100644 index 00000000000..af73e2b11ea --- /dev/null +++ b/doc/sphinxext/prs/9859.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "b8d1cb9512d8a04061180fc063e5e2256a16bd55", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 42, + "d": 8 + }, + "mne/tests/test_epochs.py": { + "a": 14, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9860.json b/doc/sphinxext/prs/9860.json new file mode 100644 index 00000000000..fa6b11e8f5e --- /dev/null +++ b/doc/sphinxext/prs/9860.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "75a13167e3d89fae36aeda7da2f34cd00cd3e995", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9861.json b/doc/sphinxext/prs/9861.json new file mode 100644 index 00000000000..6efb83cee2d --- /dev/null +++ b/doc/sphinxext/prs/9861.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "668c28b22d05a334d3d5202d9cb882eac2f038e0", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 3, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9862.json b/doc/sphinxext/prs/9862.json new file mode 100644 index 00000000000..84b4a75f2ff --- /dev/null +++ b/doc/sphinxext/prs/9862.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "4c286e9159539e1e51163e01c0ae241cbdd79fb5", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 6, + "d": 8 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 3 + }, + "tutorials/intro/70_report.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9863.json b/doc/sphinxext/prs/9863.json new file mode 100644 index 00000000000..88b506685e3 --- /dev/null +++ b/doc/sphinxext/prs/9863.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "e9a5dbab56785bfe2cd56c06abcd0c73b3c2f80f", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 59, + "d": 38 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9864.json b/doc/sphinxext/prs/9864.json new file mode 100644 index 00000000000..3471e801d2e --- /dev/null +++ b/doc/sphinxext/prs/9864.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "bf6a510c9f1c96f6f043a0d1d1dfb02d7d6dc34d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 0, + "d": 1 + }, + "mne/viz/_mpl_figure.py": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/20_visualize_epochs.py": { + "a": 5, + "d": 5 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9866.json b/doc/sphinxext/prs/9866.json new file mode 100644 index 00000000000..55962342003 --- /dev/null +++ b/doc/sphinxext/prs/9866.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "9e6b3c8e670f0415116b94f2631eae781dbede38", + "authors": [ + { + "n": "Riessarius", + "e": "rie.acad@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 3, + "d": 1 + }, + "mne/time_frequency/multitaper.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/psd.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9867.json b/doc/sphinxext/prs/9867.json new file mode 100644 index 00000000000..b3d3f6ea065 --- /dev/null +++ b/doc/sphinxext/prs/9867.json @@ -0,0 +1,419 @@ +{ + "merge_commit_sha": "fde89c53dedd89338346bebd1ac67335fb80375f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/decoding/decoding_xdawn_eeg.py": { + "a": 4, + "d": 4 + }, + "examples/inverse/mne_cov_power.py": { + "a": 1, + "d": 1 + }, + "examples/simulation/simulate_evoked_data.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/temporal_whitening.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 2, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 6, + "d": 3 + }, + "mne/channels/tests/test_layout.py": { + "a": 6, + "d": 2 + }, + "mne/channels/tests/test_montage.py": { + "a": 2, + "d": 1 + }, + "mne/chpi.py": { + "a": 4, + "d": 2 + }, + "mne/decoding/csp.py": { + "a": 4, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 4, + "d": 2 + }, + "mne/dipole.py": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 2, + "d": 1 + }, + "mne/evoked.py": { + "a": 5, + "d": 2 + }, + "mne/filter.py": { + "a": 7, + "d": 4 + }, + "mne/forward/forward.py": { + "a": 15, + "d": 13 + }, + "mne/gui/_file_traits.py": { + "a": 1, + "d": 0 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 6, + "d": 5 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 2 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 2, + "d": 1 + }, + "mne/io/artemis123/artemis123.py": { + "a": 18, + "d": 11 + }, + "mne/io/base.py": { + "a": 7, + "d": 5 + }, + "mne/io/brainvision/brainvision.py": { + "a": 4, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 16, + "d": 13 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 0 + }, + "mne/io/ctf/ctf.py": { + "a": 3, + "d": 2 + }, + "mne/io/ctf/info.py": { + "a": 1, + "d": 0 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 2, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 12, + "d": 8 + }, + "mne/io/edf/edf.py": { + "a": 9, + "d": 5 + }, + "mne/io/egi/egi.py": { + "a": 1, + "d": 0 + }, + "mne/io/egi/egimff.py": { + "a": 8, + "d": 4 + }, + "mne/io/fieldtrip/tests/helpers.py": { + "a": 10, + "d": 6 + }, + "mne/io/fieldtrip/utils.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/raw.py": { + "a": 2, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 7, + "d": 4 + }, + "mne/io/hitachi/hitachi.py": { + "a": 4, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 4, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 5, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 216, + "d": 37 + }, + "mne/io/nedf/nedf.py": { + "a": 2, + "d": 1 + }, + "mne/io/nicolet/nicolet.py": { + "a": 1, + "d": 0 + }, + "mne/io/nihon/nihon.py": { + "a": 2, + "d": 1 + }, + "mne/io/nirx/nirx.py": { + "a": 2, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 5, + "d": 4 + }, + "mne/io/pick.py": { + "a": 9, + "d": 5 + }, + "mne/io/proj.py": { + "a": 17, + "d": 9 + }, + "mne/io/reference.py": { + "a": 8, + "d": 4 + }, + "mne/io/snirf/_snirf.py": { + "a": 17, + "d": 13 + }, + "mne/io/tests/test_meas_info.py": { + "a": 106, + "d": 44 + }, + "mne/io/tests/test_reference.py": { + "a": 10, + "d": 5 + }, + "mne/minimum_norm/resolution_matrix.py": { + "a": 3, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 11, + "d": 8 + }, + "mne/minimum_norm/time_frequency.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/_csd.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 5, + "d": 4 + }, + "mne/preprocessing/ica.py": { + "a": 11, + "d": 8 + }, + "mne/preprocessing/maxwell.py": { + "a": 10, + "d": 6 + }, + "mne/preprocessing/realign.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_csd.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ecg.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_flat.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 14, + "d": 7 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 2, + "d": 1 + }, + "mne/report/tests/test_report.py": { + "a": 3, + "d": 1 + }, + "mne/simulation/tests/test_evoked.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/tests/test_raw.py": { + "a": 5, + "d": 3 + }, + "mne/stats/regression.py": { + "a": 2, + "d": 1 + }, + "mne/stats/tests/test_regression.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_chpi.py": { + "a": 22, + "d": 15 + }, + "mne/tests/test_cov.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 43, + "d": 25 + }, + "mne/tests/test_event.py": { + "a": 2, + "d": 1 + }, + "mne/tests/test_evoked.py": { + "a": 10, + "d": 5 + }, + "mne/tests/test_proj.py": { + "a": 10, + "d": 5 + }, + "mne/tests/test_source_estimate.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 2, + "d": 1 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 7, + "d": 5 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 1 + }, + "mne/utils/_testing.py": { + "a": 2, + "d": 2 + }, + "mne/utils/check.py": { + "a": 2, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 3, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 4, + "d": 2 + }, + "mne/viz/ica.py": { + "a": 4, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 2, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_3d.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_epochs.py": { + "a": 2, + "d": 1 + }, + "mne/viz/tests/test_evoked.py": { + "a": 4, + "d": 2 + }, + "mne/viz/tests/test_ica.py": { + "a": 6, + "d": 3 + }, + "mne/viz/tests/test_raw.py": { + "a": 14, + "d": 7 + }, + "mne/viz/tests/test_topomap.py": { + "a": 6, + "d": 3 + }, + "mne/viz/topo.py": { + "a": 3, + "d": 4 + }, + "mne/viz/topomap.py": { + "a": 4, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 4 + }, + "tutorials/time-freq/50_ssvep.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9869.json b/doc/sphinxext/prs/9869.json new file mode 100644 index 00000000000..c7cb38f3ed2 --- /dev/null +++ b/doc/sphinxext/prs/9869.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "c3a483fbc0418f41d8046fca68ee9452931cd082", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 23, + "d": 9 + }, + "mne/io/base.py": { + "a": 1, + "d": 2 + }, + "mne/io/utils.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_epochs.py": { + "a": 27, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 7, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9871.json b/doc/sphinxext/prs/9871.json new file mode 100644 index 00000000000..e3449f9f404 --- /dev/null +++ b/doc/sphinxext/prs/9871.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "3ed5afdb484122d5bb0d829ae4b48cd1101ae9c4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 59, + "d": 25 + }, + "mne/report/templates/epochs.html": { + "a": 1, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9873.json b/doc/sphinxext/prs/9873.json new file mode 100644 index 00000000000..3a9cf50775b --- /dev/null +++ b/doc/sphinxext/prs/9873.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2385d91b3681d1ce6f02d0916c2b39929150a3f4", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 3, + "d": 0 + }, + "tutorials/time-freq/20_sensors_time_frequency.py": { + "a": 13, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9874.json b/doc/sphinxext/prs/9874.json new file mode 100644 index 00000000000..dab29ec5662 --- /dev/null +++ b/doc/sphinxext/prs/9874.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "2dd65c0cf74667b0dc5978fd1cdfa0afc465d927", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "doc/visualization.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 16, + "d": 0 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 5, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9875.json b/doc/sphinxext/prs/9875.json new file mode 100644 index 00000000000..84488b631f0 --- /dev/null +++ b/doc/sphinxext/prs/9875.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "598b7253228c27eeb953d15d7cdab908b1bcf83c", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 52, + "d": 28 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9876.json b/doc/sphinxext/prs/9876.json new file mode 100644 index 00000000000..1cc9f96c9ab --- /dev/null +++ b/doc/sphinxext/prs/9876.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f87be3000ce333ff9ccfddc45b47a2da7d92d69c", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "mne/io/reference.py": { + "a": 8, + "d": 9 + }, + "mne/io/tests/test_reference.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9877.json b/doc/sphinxext/prs/9877.json new file mode 100644 index 00000000000..aa7ecfb23b5 --- /dev/null +++ b/doc/sphinxext/prs/9877.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "5226862369e28682e919af9eb69fa5014565d7eb", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 83, + "d": 1 + }, + "mne/gui/tests/test_gui_api.py": { + "a": 117, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 43, + "d": 4 + }, + "mne/viz/backends/_notebook.py": { + "a": 114, + "d": 10 + }, + "mne/viz/backends/_qt.py": { + "a": 147, + "d": 18 + }, + "mne/viz/conftest.py": { + "a": 0, + "d": 84 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9878.json b/doc/sphinxext/prs/9878.json new file mode 100644 index 00000000000..5bf70f7c7dd --- /dev/null +++ b/doc/sphinxext/prs/9878.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6ca1752cc3ad49cd8cd9b162e98ec3e4e07da99f", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/viz/_3d.py": { + "a": 107, + "d": 32 + }, + "mne/viz/tests/test_3d.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/988.json b/doc/sphinxext/prs/988.json new file mode 100644 index 00000000000..ba3aa5c2b3c --- /dev/null +++ b/doc/sphinxext/prs/988.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f114073096311ba775eaaa82f8500ca28df51ac1", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/fiff/evoked.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9880.json b/doc/sphinxext/prs/9880.json new file mode 100644 index 00000000000..9597ee53381 --- /dev/null +++ b/doc/sphinxext/prs/9880.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ce04060614315ae048473c387e1f74eea2ed1d2d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/tests/test_transforms.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9881.json b/doc/sphinxext/prs/9881.json new file mode 100644 index 00000000000..2fd01b4c6b4 --- /dev/null +++ b/doc/sphinxext/prs/9881.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "44ac64b42a942846061e42a2f0064f430df56153", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/channels/layout.py": { + "a": 1, + "d": 1 + }, + "mne/channels/tests/test_layout.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 59, + "d": 25 + }, + "mne/report/tests/test_report.py": { + "a": 42, + "d": 0 + }, + "mne/utils/check.py": { + "a": 20, + "d": 3 + }, + "mne/utils/tests/test_check.py": { + "a": 29, + "d": 2 + }, + "mne/viz/evoked.py": { + "a": 3, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 3, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9884.json b/doc/sphinxext/prs/9884.json new file mode 100644 index 00000000000..dfe968e902b --- /dev/null +++ b/doc/sphinxext/prs/9884.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "0a70b126e12315a713f56f0151620dbb218c219a", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + ".gitignore": { + "a": 0, + "d": 1 + }, + "brain.png": { + "a": 0, + "d": 0 + }, + "figures/Figure_1.png": { + "a": 0, + "d": 0 + }, + "figures/Figure_2.png": { + "a": 0, + "d": 0 + }, + "figures/Figure_3.png": { + "a": 0, + "d": 0 + }, + "figures/Figure_4.png": { + "a": 0, + "d": 0 + }, + "figures/Figure_5.png": { + "a": 0, + "d": 0 + }, + "figures/Figure_6.png": { + "a": 0, + "d": 0 + }, + "paper.bib": { + "a": 110, + "d": 0 + }, + "paper.md": { + "a": 87, + "d": 0 + }, + "test.png": { + "a": 0, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9886.json b/doc/sphinxext/prs/9886.json new file mode 100644 index 00000000000..480aca1b4e6 --- /dev/null +++ b/doc/sphinxext/prs/9886.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "549096c35b775cbdddbc1936f72a1fc843055508", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".coveragerc": { + "a": 0, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 4, + "d": 2 + }, + "mne/utils/config.py": { + "a": 4, + "d": 17 + }, + "requirements_testing_extra.txt": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9887.json b/doc/sphinxext/prs/9887.json new file mode 100644 index 00000000000..e6bdcf328f7 --- /dev/null +++ b/doc/sphinxext/prs/9887.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "68dcfd7d9a46d1d6f04151aac3877bcebf0c5c6e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "mne/report/report.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9888.json b/doc/sphinxext/prs/9888.json new file mode 100644 index 00000000000..7114a626f18 --- /dev/null +++ b/doc/sphinxext/prs/9888.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "6920db460f9bb709f2c78ffcfaf625106d66a7ad", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_chpi.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9889.json b/doc/sphinxext/prs/9889.json new file mode 100644 index 00000000000..f8cb26bb788 --- /dev/null +++ b/doc/sphinxext/prs/9889.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8e435cf5d19ce2f56180360633180181248f6ad7", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/io/base.py": { + "a": 7, + "d": 7 + }, + "mne/io/tests/test_raw.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9890.json b/doc/sphinxext/prs/9890.json new file mode 100644 index 00000000000..c069a109b91 --- /dev/null +++ b/doc/sphinxext/prs/9890.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "babf7780c8479a7d260a98ae11b67372c25bdb7d", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "tutorials/evoked/30_eeg_erp.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9891.json b/doc/sphinxext/prs/9891.json new file mode 100644 index 00000000000..fc7ba51d483 --- /dev/null +++ b/doc/sphinxext/prs/9891.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "3887bb2900f3eb1a8101658e8bd87d38b761e301", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 29, + "d": 34 + }, + "tutorials/intro/70_report.py": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9892.json b/doc/sphinxext/prs/9892.json new file mode 100644 index 00000000000..386ce326ba2 --- /dev/null +++ b/doc/sphinxext/prs/9892.json @@ -0,0 +1,67 @@ +{ + "merge_commit_sha": "c7734446c054e95485a27bf9cd132e4b19a1e281", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + }, + { + "n": "johnsam7", + "e": "johnsam@mit.edu" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 2, + "d": 0 + }, + "doc/preprocessing.rst": { + "a": 1, + "d": 0 + }, + "doc/references.bib": { + "a": 10, + "d": 0 + }, + "examples/preprocessing/css.py": { + "a": 95, + "d": 0 + }, + "mne/preprocessing/__init__.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/_css.py": { + "a": 91, + "d": 0 + }, + "mne/preprocessing/tests/test_css.py": { + "a": 37, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9893.json b/doc/sphinxext/prs/9893.json new file mode 100644 index 00000000000..26df274e4af --- /dev/null +++ b/doc/sphinxext/prs/9893.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "e3c46808b3f116c94ba5866179499a55420f0c2c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 34, + "d": 7 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 29, + "d": 1 + }, + "mne/source_space.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9896.json b/doc/sphinxext/prs/9896.json new file mode 100644 index 00000000000..eb5d2a52a59 --- /dev/null +++ b/doc/sphinxext/prs/9896.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "d2b4de1811e9ba146b2f6b86c1b510e31640745b", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/annotations.py": { + "a": 2, + "d": 2 + }, + "mne/io/brainvision/brainvision.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9897.json b/doc/sphinxext/prs/9897.json new file mode 100644 index 00000000000..2e66a29e80c --- /dev/null +++ b/doc/sphinxext/prs/9897.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "131c67682a2b1f1343afdefeb814db22543d539f", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/filter.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_filter.py": { + "a": 26, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9898.json b/doc/sphinxext/prs/9898.json new file mode 100644 index 00000000000..073d9a756f2 --- /dev/null +++ b/doc/sphinxext/prs/9898.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "940125813cda9212767f8a97774841c49d35f6c4", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/names.inc": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9899.json b/doc/sphinxext/prs/9899.json new file mode 100644 index 00000000000..fe47c1ee48c --- /dev/null +++ b/doc/sphinxext/prs/9899.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "d336828ffaba5095950cbcb7691073a466f7a258", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/conf.py": { + "a": 3, + "d": 1 + }, + "mne/fixes.py": { + "a": 0, + "d": 1 + }, + "mne/utils/linalg.py": { + "a": 6, + "d": 2 + }, + "mne/viz/_brain/_brain.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/990.json b/doc/sphinxext/prs/990.json new file mode 100644 index 00000000000..7faac6f7be6 --- /dev/null +++ b/doc/sphinxext/prs/990.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a9c5a18c1e2dbb801edc08081e33dafcfe05ff66", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "doc/source/whats_new.rst": { + "a": 2, + "d": 0 + }, + "examples/plot_topo_customized.py": { + "a": 58, + "d": 0 + }, + "mne/viz.py": { + "a": 162, + "d": 86 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9900.json b/doc/sphinxext/prs/9900.json new file mode 100644 index 00000000000..c578d6dc8f0 --- /dev/null +++ b/doc/sphinxext/prs/9900.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4b47a143ac787fc5791e43be148e5aa498db19a8", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/glossary.rst": { + "a": 7, + "d": 3 + }, + "mne/beamformer/tests/test_dics.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ecg.py": { + "a": 11, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 2 + }, + "mne/report/report.py": { + "a": 6, + "d": 6 + }, + "mne/report/tests/test_report.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 27, + "d": 17 + }, + "mne/viz/misc.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9901.json b/doc/sphinxext/prs/9901.json new file mode 100644 index 00000000000..ce2d1c689a2 --- /dev/null +++ b/doc/sphinxext/prs/9901.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "616b9430392d0d2bbf73063e738c7b8c378defcd", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 27, + "d": 23 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9902.json b/doc/sphinxext/prs/9902.json new file mode 100644 index 00000000000..6506d0b29bd --- /dev/null +++ b/doc/sphinxext/prs/9902.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "cd76bf2d839c6003d919a385d6daf3ebb808f340", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/decoding/search_light.py": { + "a": 9, + "d": 26 + }, + "mne/utils/docs.py": { + "a": 19, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9903.json b/doc/sphinxext/prs/9903.json new file mode 100644 index 00000000000..ea892f98abd --- /dev/null +++ b/doc/sphinxext/prs/9903.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "cd50d8e68708b8667f04b1efb9b2cacf21001a28", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/tests/test_epochs.py": { + "a": 31, + "d": 27 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9904.json b/doc/sphinxext/prs/9904.json new file mode 100644 index 00000000000..fe775e9fa53 --- /dev/null +++ b/doc/sphinxext/prs/9904.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "3a9b06652f644a8bda3c99f91705396b7373368a", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 8, + "d": 0 + }, + "doc/mri.rst": { + "a": 0, + "d": 1 + }, + "examples/datasets/spm_faces_dataset_sgskip.py": { + "a": 1, + "d": 1 + }, + "examples/inverse/label_from_stc.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 11, + "d": 2 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/gui/__init__.py": { + "a": 7, + "d": 1 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 5, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 12, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9907.json b/doc/sphinxext/prs/9907.json new file mode 100644 index 00000000000..b74088a4817 --- /dev/null +++ b/doc/sphinxext/prs/9907.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bf945f19d221a4955b59758fdb9777d35315c042", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/surface.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9909.json b/doc/sphinxext/prs/9909.json new file mode 100644 index 00000000000..421aaec7795 --- /dev/null +++ b/doc/sphinxext/prs/9909.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "b005ea698a1ef53e0a4bbc2e3d411201bd94307c", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 3, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 2, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 19, + "d": 9 + }, + "mne/utils/tests/test_docs.py": { + "a": 16, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/991.json b/doc/sphinxext/prs/991.json new file mode 100644 index 00000000000..8975dd96ce4 --- /dev/null +++ b/doc/sphinxext/prs/991.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "7870269da65ec9341a09d9c0ef60bd4eb760d409", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": { + "mne/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/datasets/utils.py": { + "a": 8, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9911.json b/doc/sphinxext/prs/9911.json new file mode 100644 index 00000000000..78017c724df --- /dev/null +++ b/doc/sphinxext/prs/9911.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f6b1ba7a52a0907e1f04782e055409a2d12c330a", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9913.json b/doc/sphinxext/prs/9913.json new file mode 100644 index 00000000000..71159738c6b --- /dev/null +++ b/doc/sphinxext/prs/9913.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "3eca0adfadbe1a9ac0e45845c57278cf7d19ec21", + "authors": [ + { + "n": "Guillaume Favelier", + "e": "guillaume.favelier@gmail.com" + } + ], + "changes": { + "mne/gui/_coreg.py": { + "a": 1, + "d": 1 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 18, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9914.json b/doc/sphinxext/prs/9914.json new file mode 100644 index 00000000000..44569617c17 --- /dev/null +++ b/doc/sphinxext/prs/9914.json @@ -0,0 +1,55 @@ +{ + "merge_commit_sha": "dac4f16903171c5b8a03b1a35b9c99e7bb753e25", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 1 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 1 + }, + "mne/io/fieldtrip/utils.py": { + "a": 2, + "d": 2 + }, + "mne/io/hitachi/hitachi.py": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 10, + "d": 0 + }, + "mne/io/nirx/nirx.py": { + "a": 1, + "d": 1 + }, + "mne/io/reference.py": { + "a": 2, + "d": 1 + }, + "mne/simulation/raw.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_cov.py": { + "a": 2, + "d": 1 + }, + "tutorials/simulation/80_dics.py": { + "a": 1, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9915.json b/doc/sphinxext/prs/9915.json new file mode 100644 index 00000000000..700670f066e --- /dev/null +++ b/doc/sphinxext/prs/9915.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "ee164e673af146c60fa9cd6cb9925316cec9117a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/versions.json": { + "a": 46, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9916.json b/doc/sphinxext/prs/9916.json new file mode 100644 index 00000000000..1588ed6a2fe --- /dev/null +++ b/doc/sphinxext/prs/9916.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "4679e87ece4bfd5174e56b595671088a5da6e925", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 0, + "d": 11 + }, + "doc/_static/versions.json": { + "a": 6, + "d": 3 + }, + "doc/_templates/version-switcher.html": { + "a": 0, + "d": 11 + }, + "doc/conf.py": { + "a": 6, + "d": 17 + }, + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9917.json b/doc/sphinxext/prs/9917.json new file mode 100644 index 00000000000..22cbad89c93 --- /dev/null +++ b/doc/sphinxext/prs/9917.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e8dd72b3920e6f7d9ee6de315e76cec51ba9153c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 15, + "d": 11 + }, + "mne/conftest.py": { + "a": 1, + "d": 0 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 1, + "d": 0 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 0 + }, + "mne/surface.py": { + "a": 1, + "d": 0 + }, + "mne/tests/test_bem.py": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9918.json b/doc/sphinxext/prs/9918.json new file mode 100644 index 00000000000..0229aca817a --- /dev/null +++ b/doc/sphinxext/prs/9918.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "f10502b4fa861738d72f581cfb93b5b58117de48", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 7, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/992.json b/doc/sphinxext/prs/992.json new file mode 100644 index 00000000000..8b5c3e36b49 --- /dev/null +++ b/doc/sphinxext/prs/992.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "45eafe53e5974ed62710ea5bada516f9e20e2de1", + "authors": [ + { + "n": "Denis A. Engemann", + "e": null + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 8, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9920.json b/doc/sphinxext/prs/9920.json new file mode 100644 index 00000000000..37760062488 --- /dev/null +++ b/doc/sphinxext/prs/9920.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "f078b9076d0d9ca83bb5e973315066a3c4911e24", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/channels/channels.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 15, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9921.json b/doc/sphinxext/prs/9921.json new file mode 100644 index 00000000000..bb0fec5635a --- /dev/null +++ b/doc/sphinxext/prs/9921.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "47c011266cc4df88ca92ad122317f96886347538", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/_static/blender_import_obj/blender_import_obj4.jpg": { + "a": 0, + "d": 0 + }, + "tutorials/forward/80_fix_bem_in_blender.py": { + "a": 22, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9922.json b/doc/sphinxext/prs/9922.json new file mode 100644 index 00000000000..fa1bc1c6ab1 --- /dev/null +++ b/doc/sphinxext/prs/9922.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "870d867c8b60697a5fef8cb353e6a51e209d99c3", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/kit/kit.py": { + "a": 3, + "d": 3 + }, + "mne/io/meas_info.py": { + "a": 101, + "d": 67 + }, + "mne/io/tests/test_meas_info.py": { + "a": 0, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9924.json b/doc/sphinxext/prs/9924.json new file mode 100644 index 00000000000..32c3515c138 --- /dev/null +++ b/doc/sphinxext/prs/9924.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5d1f34ce0ae4f5b2d6175f67eb598daf70df30f7", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9925.json b/doc/sphinxext/prs/9925.json new file mode 100644 index 00000000000..37e2e03c994 --- /dev/null +++ b/doc/sphinxext/prs/9925.json @@ -0,0 +1,339 @@ +{ + "merge_commit_sha": "b97da4357f9ffb584eef5949c958187b3a7cc566", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_dics.py": { + "a": 2, + "d": 2 + }, + "mne/beamformer/tests/test_lcmv.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_channels.py": { + "a": 2, + "d": 2 + }, + "mne/channels/tests/test_layout.py": { + "a": 8, + "d": 8 + }, + "mne/channels/tests/test_montage.py": { + "a": 22, + "d": 21 + }, + "mne/commands/tests/test_commands.py": { + "a": 20, + "d": 20 + }, + "mne/conftest.py": { + "a": 3, + "d": 3 + }, + "mne/datasets/sleep_physionet/tests/test_physionet.py": { + "a": 6, + "d": 6 + }, + "mne/datasets/tests/test_datasets.py": { + "a": 22, + "d": 22 + }, + "mne/export/tests/test_export.py": { + "a": 6, + "d": 6 + }, + "mne/forward/tests/test_forward.py": { + "a": 9, + "d": 9 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 15, + "d": 15 + }, + "mne/gui/_coreg.py": { + "a": 2, + "d": 0 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 6, + "d": 9 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 4, + "d": 4 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 4, + "d": 4 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 4, + "d": 4 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 2, + "d": 2 + }, + "mne/io/artemis123/tests/test_artemis123.py": { + "a": 2, + "d": 2 + }, + "mne/io/base.py": { + "a": 3, + "d": 0 + }, + "mne/io/brainvision/tests/test_brainvision.py": { + "a": 23, + "d": 23 + }, + "mne/io/ctf/tests/test_ctf.py": { + "a": 5, + "d": 5 + }, + "mne/io/curry/tests/test_curry.py": { + "a": 9, + "d": 9 + }, + "mne/io/edf/tests/test_edf.py": { + "a": 18, + "d": 16 + }, + "mne/io/edf/tests/test_gdf.py": { + "a": 2, + "d": 2 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 18, + "d": 18 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 4, + "d": 4 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 73, + "d": 71 + }, + "mne/io/hitachi/tests/test_hitachi.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/tests/test_coreg.py": { + "a": 2, + "d": 2 + }, + "mne/io/kit/tests/test_kit.py": { + "a": 4, + "d": 4 + }, + "mne/io/nirx/tests/test_nirx.py": { + "a": 12, + "d": 12 + }, + "mne/io/persyst/tests/test_persyst.py": { + "a": 8, + "d": 8 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 3, + "d": 3 + }, + "mne/io/tests/test_compensator.py": { + "a": 4, + "d": 4 + }, + "mne/io/tests/test_constants.py": { + "a": 11, + "d": 10 + }, + "mne/io/tests/test_meas_info.py": { + "a": 28, + "d": 28 + }, + "mne/io/tests/test_what.py": { + "a": 2, + "d": 2 + }, + "mne/io/tests/test_write.py": { + "a": 2, + "d": 2 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 12, + "d": 11 + }, + "mne/minimum_norm/tests/test_snr.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/ieeg/tests/test_projection.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 3, + "d": 3 + }, + "mne/preprocessing/nirs/tests/test_scalp_coupling_index.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_fine_cal.py": { + "a": 2, + "d": 2 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 17, + "d": 17 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 8, + "d": 8 + }, + "mne/report/tests/test_report.py": { + "a": 40, + "d": 40 + }, + "mne/simulation/tests/test_raw.py": { + "a": 2, + "d": 2 + }, + "mne/stats/tests/test_cluster_level.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_annotations.py": { + "a": 34, + "d": 28 + }, + "mne/tests/test_bem.py": { + "a": 15, + "d": 15 + }, + "mne/tests/test_chpi.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_coreg.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_cov.py": { + "a": 17, + "d": 17 + }, + "mne/tests/test_dipole.py": { + "a": 14, + "d": 14 + }, + "mne/tests/test_epochs.py": { + "a": 47, + "d": 47 + }, + "mne/tests/test_event.py": { + "a": 7, + "d": 7 + }, + "mne/tests/test_evoked.py": { + "a": 19, + "d": 19 + }, + "mne/tests/test_filter.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_freesurfer.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_label.py": { + "a": 8, + "d": 8 + }, + "mne/tests/test_line_endings.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_morph.py": { + "a": 10, + "d": 10 + }, + "mne/tests/test_morph_map.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_proj.py": { + "a": 4, + "d": 4 + }, + "mne/tests/test_source_estimate.py": { + "a": 27, + "d": 26 + }, + "mne/tests/test_source_space.py": { + "a": 30, + "d": 29 + }, + "mne/tests/test_surface.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_transforms.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_csd.py": { + "a": 4, + "d": 4 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_check.py": { + "a": 4, + "d": 5 + }, + "mne/utils/tests/test_config.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_logging.py": { + "a": 4, + "d": 4 + }, + "mne/utils/tests/test_numerics.py": { + "a": 2, + "d": 2 + }, + "mne/utils/tests/test_testing.py": { + "a": 6, + "d": 5 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 18, + "d": 16 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 5, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9927.json b/doc/sphinxext/prs/9927.json new file mode 100644 index 00000000000..d7040edd664 --- /dev/null +++ b/doc/sphinxext/prs/9927.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8ba1888704d7611be3a0a22aa38a024287b2de46", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "mne/viz/evoked.py": { + "a": 39, + "d": 4 + }, + "mne/viz/topo.py": { + "a": 11, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9928.json b/doc/sphinxext/prs/9928.json new file mode 100644 index 00000000000..5c0a5de5063 --- /dev/null +++ b/doc/sphinxext/prs/9928.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "9cbff29afd2cd7bb4063108dd243998149134257", + "authors": [ + { + "n": "Luke Bloy", + "e": "luke.bloy@gmail.com" + } + ], + "changes": { + "mne/coreg.py": { + "a": 11, + "d": 5 + }, + "mne/gui/_coreg.py": { + "a": 46, + "d": 12 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 45, + "d": 0 + }, + "mne/io/snirf/_snirf.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_coreg.py": { + "a": 25, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9929.json b/doc/sphinxext/prs/9929.json new file mode 100644 index 00000000000..0edd364cae7 --- /dev/null +++ b/doc/sphinxext/prs/9929.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "f6213029576560493a3c5c5f4bcef63e24ffc108", + "authors": [ + { + "n": "Rob Luke", + "e": null + } + ], + "changes": { + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/snirf/_snirf.py": { + "a": 88, + "d": 53 + }, + "mne/io/snirf/tests/test_snirf.py": { + "a": 48, + "d": 19 + }, + "mne/io/tests/test_raw.py": { + "a": 4, + "d": 0 + }, + "mne/preprocessing/nirs/nirs.py": { + "a": 47, + "d": 29 + }, + "tutorials/io/30_reading_fnirs_data.py": { + "a": 20, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/993.json b/doc/sphinxext/prs/993.json new file mode 100644 index 00000000000..307de033f15 --- /dev/null +++ b/doc/sphinxext/prs/993.json @@ -0,0 +1,10 @@ +{ + "merge_commit_sha": "fc15283fcbcdc6c6dad5764a9c27720910e2d6ff", + "authors": [ + { + "n": "Tal Linzen", + "e": "linzen@nyu.edu" + } + ], + "changes": {} +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9933.json b/doc/sphinxext/prs/9933.json new file mode 100644 index 00000000000..a1922996521 --- /dev/null +++ b/doc/sphinxext/prs/9933.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a1d4eb907b1ed30c8bc713c0f315c7d481e0dd57", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "examples/visualization/3d_to_2d.py": { + "a": 0, + "d": 2 + }, + "mne/viz/_3d.py": { + "a": 8, + "d": 5 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 13, + "d": 61 + }, + "mne/viz/backends/_pyvista.py": { + "a": 4, + "d": 2 + }, + "tutorials/clinical/30_ecog.py": { + "a": 3, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9934.json b/doc/sphinxext/prs/9934.json new file mode 100644 index 00000000000..00f7bac2fc0 --- /dev/null +++ b/doc/sphinxext/prs/9934.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "f8283e89d4d4464b84c68d3eee7e4be0168aed56", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/preprocessing/ica.py": { + "a": 1, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 16, + "d": 17 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9936.json b/doc/sphinxext/prs/9936.json new file mode 100644 index 00000000000..e6b8963e2e4 --- /dev/null +++ b/doc/sphinxext/prs/9936.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "93da0582c09eebd6778df97e994e4dc27d89202b", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/decoding/receptive_field.py": { + "a": 3, + "d": 5 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 1, + "d": 3 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9937.json b/doc/sphinxext/prs/9937.json new file mode 100644 index 00000000000..06bbb513900 --- /dev/null +++ b/doc/sphinxext/prs/9937.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b8b49ab0ea4bbf0f18942c293445be3bf7b0d565", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/beamformer/tests/test_lcmv.py": { + "a": 4, + "d": 2 + }, + "mne/beamformer/tests/test_rap_music.py": { + "a": 3, + "d": 1 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 40, + "d": 38 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 21, + "d": 9 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 4, + "d": 3 + }, + "mne/tests/test_epochs.py": { + "a": 9, + "d": 1 + }, + "mne/tests/test_rank.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9940.json b/doc/sphinxext/prs/9940.json new file mode 100644 index 00000000000..2a30a25d72e --- /dev/null +++ b/doc/sphinxext/prs/9940.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "27de73142d5f35c019fe962e6a0d6c90eb677ac5", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/surface.py": { + "a": 6, + "d": 2 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9943.json b/doc/sphinxext/prs/9943.json new file mode 100644 index 00000000000..48de5e377cc --- /dev/null +++ b/doc/sphinxext/prs/9943.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "f873c3f2059ead9dbc1588a2f0a022ecc2e4b0e5", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 2, + "d": 1 + }, + "README.rst": { + "a": 1, + "d": 1 + }, + "azure-pipelines.yml": { + "a": 2, + "d": 2 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 13 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9944.json b/doc/sphinxext/prs/9944.json new file mode 100644 index 00000000000..a3fe00c060d --- /dev/null +++ b/doc/sphinxext/prs/9944.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "1221b9ca280de1df1c52b6ab3960a3c99ca34968", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/conftest.py": { + "a": 7, + "d": 0 + }, + "mne/coreg.py": { + "a": 5, + "d": 0 + }, + "mne/gui/_coreg.py": { + "a": 13, + "d": 4 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 5, + "d": 2 + }, + "mne/viz/backends/_abstract.py": { + "a": 6, + "d": 0 + }, + "mne/viz/backends/_notebook.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_qt.py": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9945.json b/doc/sphinxext/prs/9945.json new file mode 100644 index 00000000000..c3f0df3a723 --- /dev/null +++ b/doc/sphinxext/prs/9945.json @@ -0,0 +1,255 @@ +{ + "merge_commit_sha": "a7bbdf0d5f2a1e228ed8c0240f97e1236fb81c76", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + }, + "doc/conf.py": { + "a": 5, + "d": 19 + }, + "doc/install/advanced.rst": { + "a": 2, + "d": 24 + }, + "doc/install/contributing.rst": { + "a": 1, + "d": 1 + }, + "doc/install/mne_python.rst": { + "a": 0, + "d": 24 + }, + "doc/links.inc": { + "a": 0, + "d": 1 + }, + "doc/overview/design_philosophy.rst": { + "a": 5, + "d": 4 + }, + "doc/overview/faq.rst": { + "a": 4, + "d": 5 + }, + "doc/overview/roadmap.rst": { + "a": 0, + "d": 14 + }, + "environment.yml": { + "a": 0, + "d": 3 + }, + "examples/visualization/3d_to_2d.py": { + "a": 1, + "d": 1 + }, + "mne/channels/channels.py": { + "a": 1, + "d": 1 + }, + "mne/commands/mne_coreg.py": { + "a": 14, + "d": 19 + }, + "mne/commands/mne_kit2fiff.py": { + "a": 4, + "d": 9 + }, + "mne/commands/tests/test_commands.py": { + "a": 2, + "d": 5 + }, + "mne/conftest.py": { + "a": 7, + "d": 39 + }, + "mne/dipole.py": { + "a": 4, + "d": 4 + }, + "mne/gui/__init__.py": { + "a": 35, + "d": 120 + }, + "mne/gui/_backend.py": { + "a": 0, + "d": 53 + }, + "mne/gui/_coreg_gui.py": { + "a": 0, + "d": 2147 + }, + "mne/gui/_fiducials_gui.py": { + "a": 0, + "d": 515 + }, + "mne/gui/_file_traits.py": { + "a": 0, + "d": 577 + }, + "mne/gui/_help.py": { + "a": 0, + "d": 16 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 0, + "d": 6 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 0, + "d": 758 + }, + "mne/gui/_marker_gui.py": { + "a": 0, + "d": 455 + }, + "mne/gui/_viewer.py": { + "a": 0, + "d": 564 + }, + "mne/gui/help/kit2fiff.json": { + "a": 0, + "d": 10 + }, + "mne/gui/tests/test_coreg_gui.py": { + "a": 1, + "d": 320 + }, + "mne/gui/tests/test_fiducials_gui.py": { + "a": 0, + "d": 68 + }, + "mne/gui/tests/test_file_traits.py": { + "a": 0, + "d": 137 + }, + "mne/gui/tests/test_kit2fiff_gui.py": { + "a": 0, + "d": 162 + }, + "mne/gui/tests/test_marker_gui.py": { + "a": 0, + "d": 78 + }, + "mne/io/egi/tests/test_egi.py": { + "a": 1, + "d": 1 + }, + "mne/report/report.py": { + "a": 8, + "d": 8 + }, + "mne/report/tests/test_report.py": { + "a": 2, + "d": 2 + }, + "mne/source_space.py": { + "a": 2, + "d": 2 + }, + "mne/surface.py": { + "a": 8, + "d": 1 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 4, + "d": 28 + }, + "mne/tests/test_import_nesting.py": { + "a": 1, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 5, + "d": 6 + }, + "mne/utils/_logging.py": { + "a": 0, + "d": 14 + }, + "mne/utils/_testing.py": { + "a": 0, + "d": 48 + }, + "mne/utils/check.py": { + "a": 5, + "d": 8 + }, + "mne/utils/config.py": { + "a": 1, + "d": 5 + }, + "mne/utils/tests/test_check.py": { + "a": 4, + "d": 13 + }, + "mne/viz/_3d.py": { + "a": 44, + "d": 111 + }, + "mne/viz/_brain/_brain.py": { + "a": 5, + "d": 9 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 2, + "d": 8 + }, + "mne/viz/backends/_abstract.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_pysurfer_mayavi.py": { + "a": 0, + "d": 511 + }, + "mne/viz/backends/_utils.py": { + "a": 0, + "d": 1 + }, + "mne/viz/backends/renderer.py": { + "a": 45, + "d": 54 + }, + "mne/viz/backends/tests/_utils.py": { + "a": 0, + "d": 12 + }, + "mne/viz/backends/tests/test_renderer.py": { + "a": 2, + "d": 5 + }, + "mne/viz/tests/test_3d.py": { + "a": 27, + "d": 64 + }, + "mne/viz/utils.py": { + "a": 7, + "d": 5 + }, + "requirements.txt": { + "a": 0, + "d": 3 + }, + "setup.py": { + "a": 0, + "d": 1 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/60_visualize_stc.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9946.json b/doc/sphinxext/prs/9946.json new file mode 100644 index 00000000000..46a60069c92 --- /dev/null +++ b/doc/sphinxext/prs/9946.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6c9fca05e49f23da2794f4a5f112e90f6552e882", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/0.24.inc": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9947.json b/doc/sphinxext/prs/9947.json new file mode 100644 index 00000000000..ffb9a8ba8d5 --- /dev/null +++ b/doc/sphinxext/prs/9947.json @@ -0,0 +1,203 @@ +{ + "merge_commit_sha": "c69ec3db5e5f7945b3be4f0143716d747b0d1573", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + }, + "README.rst": { + "a": 7, + "d": 7 + }, + "mne/__init__.py": { + "a": 0, + "d": 1 + }, + "mne/conftest.py": { + "a": 0, + "d": 1 + }, + "mne/connectivity/__init__.py": { + "a": 0, + "d": 6 + }, + "mne/connectivity/effective.py": { + "a": 0, + "d": 159 + }, + "mne/connectivity/envelope.py": { + "a": 0, + "d": 154 + }, + "mne/connectivity/spectral.py": { + "a": 0, + "d": 1094 + }, + "mne/connectivity/tests/__init__.py": { + "a": 0, + "d": 0 + }, + "mne/connectivity/tests/test_effective.py": { + "a": 0, + "d": 39 + }, + "mne/connectivity/tests/test_envelope.py": { + "a": 0, + "d": 100 + }, + "mne/connectivity/tests/test_spectral.py": { + "a": 0, + "d": 274 + }, + "mne/connectivity/tests/test_utils.py": { + "a": 0, + "d": 56 + }, + "mne/connectivity/utils.py": { + "a": 0, + "d": 97 + }, + "mne/cuda.py": { + "a": 2, + "d": 3 + }, + "mne/datasets/epilepsy_ecog/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/epilepsy_ecog/_data.py": { + "a": 3, + "d": 13 + }, + "mne/datasets/fnirs_motor/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/fnirs_motor/fnirs_motor.py": { + "a": 3, + "d": 13 + }, + "mne/datasets/multimodal/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/multimodal/multimodal.py": { + "a": 3, + "d": 12 + }, + "mne/datasets/opm/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/opm/opm.py": { + "a": 3, + "d": 12 + }, + "mne/datasets/phantom_4dbti/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/phantom_4dbti/phantom_4dbti.py": { + "a": 3, + "d": 13 + }, + "mne/datasets/refmeg_noise/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/refmeg_noise/refmeg_noise.py": { + "a": 3, + "d": 13 + }, + "mne/datasets/sample/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/sample/sample.py": { + "a": 3, + "d": 12 + }, + "mne/datasets/somato/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/somato/somato.py": { + "a": 3, + "d": 12 + }, + "mne/datasets/ssvep/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/datasets/ssvep/ssvep.py": { + "a": 3, + "d": 12 + }, + "mne/datasets/utils.py": { + "a": 0, + "d": 11 + }, + "mne/filter.py": { + "a": 2, + "d": 3 + }, + "mne/fixes.py": { + "a": 4, + "d": 28 + }, + "mne/inverse_sparse/mxne_inverse.py": { + "a": 3, + "d": 4 + }, + "mne/inverse_sparse/mxne_optim.py": { + "a": 7, + "d": 196 + }, + "mne/inverse_sparse/tests/test_mxne_inverse.py": { + "a": 0, + "d": 9 + }, + "mne/inverse_sparse/tests/test_mxne_optim.py": { + "a": 0, + "d": 33 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 1, + "d": 2 + }, + "mne/time_frequency/_stft.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/_stockwell.py": { + "a": 4, + "d": 5 + }, + "mne/time_frequency/csd.py": { + "a": 2, + "d": 3 + }, + "mne/time_frequency/multitaper.py": { + "a": 3, + "d": 4 + }, + "mne/time_frequency/tfr.py": { + "a": 1, + "d": 2 + }, + "mne/viz/tests/test_3d.py": { + "a": 1, + "d": 27 + }, + "mne/viz/tests/test_circle.py": { + "a": 1, + "d": 81 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9948.json b/doc/sphinxext/prs/9948.json new file mode 100644 index 00000000000..0db1ea092eb --- /dev/null +++ b/doc/sphinxext/prs/9948.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "490b0b8ec2c5b0d2c0a36f0a5151d1b7683d7805", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9949.json b/doc/sphinxext/prs/9949.json new file mode 100644 index 00000000000..273348ee6c8 --- /dev/null +++ b/doc/sphinxext/prs/9949.json @@ -0,0 +1,103 @@ +{ + "merge_commit_sha": "7892c081b7b26f100a40e934c22971cbd8bcda46", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.10.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.16.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.20.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.21.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.23.inc": { + "a": 4, + "d": 4 + }, + "doc/changes/0.24.inc": { + "a": 5, + "d": 5 + }, + "doc/changes/0.9.inc": { + "a": 1, + "d": 1 + }, + "mne/io/meas_info.py": { + "a": 5, + "d": 7 + }, + "mne/io/tests/test_meas_info.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/eog.py": { + "a": 2, + "d": 9 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 175 + }, + "mne/preprocessing/nirs/_beer_lambert_law.py": { + "a": 3, + "d": 8 + }, + "mne/preprocessing/nirs/tests/test_beer_lambert_law.py": { + "a": 1, + "d": 5 + }, + "mne/preprocessing/nirs/tests/test_nirs.py": { + "a": 1, + "d": 4 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 0, + "d": 44 + }, + "mne/report/report.py": { + "a": 4, + "d": 374 + }, + "mne/report/tests/test_report.py": { + "a": 0, + "d": 51 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_brain/_brain.py": { + "a": 6, + "d": 22 + }, + "mne/viz/_brain/tests/test_brain.py": { + "a": 0, + "d": 3 + }, + "mne/viz/utils.py": { + "a": 2, + "d": 48 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/995.json b/doc/sphinxext/prs/995.json new file mode 100644 index 00000000000..803784a2d4c --- /dev/null +++ b/doc/sphinxext/prs/995.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "1d0412f3b7b0109b45c65eb1ca1c2bc89567894e", + "authors": [ + { + "n": "Alexandre Gramfort", + "e": "alexandre.gramfort@m4x.org" + } + ], + "changes": { + "examples/plot_from_raw_to_epochs_to_evoked.py": { + "a": 1, + "d": 2 + }, + "mne/epochs.py": { + "a": 78, + "d": 63 + }, + "mne/fiff/constants.py": { + "a": 2, + "d": 2 + }, + "mne/tests/test_epochs.py": { + "a": 61, + "d": 30 + }, + "mne/viz.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9950.json b/doc/sphinxext/prs/9950.json new file mode 100644 index 00000000000..7615ad4217e --- /dev/null +++ b/doc/sphinxext/prs/9950.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c2ee7ae2d50f4de4558f305f53d1bbc7b64a6193", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "mne/tests/test_surface.py": { + "a": 6, + "d": 6 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9951.json b/doc/sphinxext/prs/9951.json new file mode 100644 index 00000000000..71d103de47c --- /dev/null +++ b/doc/sphinxext/prs/9951.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0eb4aa64228bb55b79d521a827bc3d2644539564", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9952.json b/doc/sphinxext/prs/9952.json new file mode 100644 index 00000000000..d3fb312d6e9 --- /dev/null +++ b/doc/sphinxext/prs/9952.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "c2aaaa8aad83d62ee36b7030bca56d50315020e4", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/gui/_ieeg_locate_gui.py": { + "a": 51, + "d": 3 + }, + "mne/gui/tests/test_ieeg_locate_gui.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9955.json b/doc/sphinxext/prs/9955.json new file mode 100644 index 00000000000..97be1d5465b --- /dev/null +++ b/doc/sphinxext/prs/9955.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "67c7f3e6f33f0ed91fe818754488ab9645b5c30f", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/utils/config.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9956.json b/doc/sphinxext/prs/9956.json new file mode 100644 index 00000000000..2b8f756f973 --- /dev/null +++ b/doc/sphinxext/prs/9956.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4a7d2a9b1e69da8ac14a5e6de7510d79126431d6", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 1 + }, + "tutorials/epochs/60_make_fixed_length_epochs.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9957.json b/doc/sphinxext/prs/9957.json new file mode 100644 index 00000000000..71ab6c93d4f --- /dev/null +++ b/doc/sphinxext/prs/9957.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "81fa0411b8ec68985c92b182d91a561d5407ad8c", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 4, + "d": 3 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9959.json b/doc/sphinxext/prs/9959.json new file mode 100644 index 00000000000..d52bf27f5e6 --- /dev/null +++ b/doc/sphinxext/prs/9959.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "4d1510388a66b2d8fffa0992e12bb8611aa09a9e", + "authors": [ + { + "n": "Adam Li", + "e": null + } + ], + "changes": { + "doc/changes/0.19.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.23.inc": { + "a": 1, + "d": 1 + }, + "doc/visualization.rst": { + "a": 0, + "d": 1 + }, + "mne/utils/__init__.py": { + "a": 0, + "d": 7 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 90 + }, + "mne/viz/__init__.py": { + "a": 2, + "d": 4 + }, + "mne/viz/backends/renderer.py": { + "a": 0, + "d": 2 + }, + "mne/viz/circle.py": { + "a": 1, + "d": 112 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9960.json b/doc/sphinxext/prs/9960.json new file mode 100644 index 00000000000..da16190a6a5 --- /dev/null +++ b/doc/sphinxext/prs/9960.json @@ -0,0 +1,187 @@ +{ + "merge_commit_sha": "ad095ba6e0e5b97ddbbb0def3846fa8551add30d", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Martin Schulz", + "e": "dev@earthman-music.de" + } + ], + "changes": { + ".circleci/config.yml": { + "a": 10, + "d": 4 + }, + ".gitignore": { + "a": 1, + "d": 0 + }, + "Makefile": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 16, + "d": 0 + }, + "examples/inverse/compute_mne_inverse_raw_in_label.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_power_spectrum.py": { + "a": 1, + "d": 1 + }, + "examples/time_frequency/source_space_time_frequency.py": { + "a": 1, + "d": 1 + }, + "mne/conftest.py": { + "a": 20, + "d": 2 + }, + "mne/epochs.py": { + "a": 1, + "d": 1 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 1 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 6, + "d": 7 + }, + "mne/report/report.py": { + "a": 8, + "d": 6 + }, + "mne/tests/test_epochs.py": { + "a": 1, + "d": 3 + }, + "mne/utils/config.py": { + "a": 2, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 3 + }, + "mne/utils/tests/test_config.py": { + "a": 2, + "d": 2 + }, + "mne/viz/__init__.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_figure.py": { + "a": 24, + "d": 9 + }, + "mne/viz/_mpl_figure.py": { + "a": 5, + "d": 1 + }, + "mne/viz/_scraper.py": { + "a": 57, + "d": 0 + }, + "mne/viz/backends/_utils.py": { + "a": 1, + "d": 1 + }, + "mne/viz/epochs.py": { + "a": 5, + "d": 5 + }, + "mne/viz/ica.py": { + "a": 6, + "d": 5 + }, + "mne/viz/raw.py": { + "a": 5, + "d": 11 + }, + "mne/viz/tests/test_epochs.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_figure.py": { + "a": 1, + "d": 1 + }, + "mne/viz/tests/test_ica.py": { + "a": 15, + "d": 15 + }, + "mne/viz/tests/test_scraper.py": { + "a": 24, + "d": 0 + }, + "mne/viz/utils.py": { + "a": 33, + "d": 10 + }, + "tools/circleci_dependencies.sh": { + "a": 1, + "d": 0 + }, + "tutorials/epochs/50_epochs_to_data_frame.py": { + "a": 6, + "d": 0 + }, + "tutorials/evoked/30_eeg_erp.py": { + "a": 2, + "d": 1 + }, + "tutorials/intro/10_overview.py": { + "a": 2, + "d": 2 + }, + "tutorials/preprocessing/15_handling_bad_channels.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/20_rejecting_bad_data.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/30_filtering_resampling.py": { + "a": 3, + "d": 2 + }, + "tutorials/preprocessing/40_artifact_correction_ica.py": { + "a": 2, + "d": 1 + }, + "tutorials/preprocessing/45_projectors_background.py": { + "a": 4, + "d": 2 + }, + "tutorials/preprocessing/50_artifact_correction_ssp.py": { + "a": 6, + "d": 3 + }, + "tutorials/preprocessing/55_setting_eeg_reference.py": { + "a": 4, + "d": 2 + }, + "tutorials/raw/30_annotate_raw.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9962.json b/doc/sphinxext/prs/9962.json new file mode 100644 index 00000000000..69179252733 --- /dev/null +++ b/doc/sphinxext/prs/9962.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "0dd33332144db31a087ccd535fc52bef7e3ada89", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "azure-pipelines.yml": { + "a": 8, + "d": 2 + }, + "environment.yml": { + "a": 2, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9963.json b/doc/sphinxext/prs/9963.json new file mode 100644 index 00000000000..bfc7e8e1712 --- /dev/null +++ b/doc/sphinxext/prs/9963.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "1be2f5619ab4cd9661a8de9442db47eadeab22ab", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/homepage.html": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9964.json b/doc/sphinxext/prs/9964.json new file mode 100644 index 00000000000..541062cd32e --- /dev/null +++ b/doc/sphinxext/prs/9964.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6ab3cfb90d330d1779dc4df061149362042645d9", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_templates/homepage.html": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9966.json b/doc/sphinxext/prs/9966.json new file mode 100644 index 00000000000..d841fa8f703 --- /dev/null +++ b/doc/sphinxext/prs/9966.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8d3f48b776459affc0a7ce1314f1abeb65d03986", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9968.json b/doc/sphinxext/prs/9968.json new file mode 100644 index 00000000000..4641a7bef55 --- /dev/null +++ b/doc/sphinxext/prs/9968.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "8ee595239fb0b5a04da04d189a4c708b4fc0d4f4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/forward/_make_forward.py": { + "a": 1, + "d": 1 + }, + "mne/forward/tests/test_make_forward.py": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9969.json b/doc/sphinxext/prs/9969.json new file mode 100644 index 00000000000..98ba9d47240 --- /dev/null +++ b/doc/sphinxext/prs/9969.json @@ -0,0 +1,51 @@ +{ + "merge_commit_sha": "8d24f39220d6b2080f4837ccb9a37e2e37035e35", + "authors": [ + { + "n": "Adam Li", + "e": null + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Adam Li", + "e": "adam2392@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 187, + "d": 1 + }, + "mne/tests/test_epochs.py": { + "a": 194, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9970.json b/doc/sphinxext/prs/9970.json new file mode 100644 index 00000000000..934f3d01970 --- /dev/null +++ b/doc/sphinxext/prs/9970.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "b470ddbd0eafcf18fa1af40fafa641b10b82c2d6", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/0.24.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 53, + "d": 17 + }, + "mne/report/tests/test_report.py": { + "a": 36, + "d": 0 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9972.json b/doc/sphinxext/prs/9972.json new file mode 100644 index 00000000000..b7748b95f99 --- /dev/null +++ b/doc/sphinxext/prs/9972.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "83d25a31d715b086f6c6e6fad3d082c5815562fa", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/gui/__init__.py": { + "a": 8, + "d": 8 + }, + "mne/gui/_coreg.py": { + "a": 7, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 20, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 2, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9973.json b/doc/sphinxext/prs/9973.json new file mode 100644 index 00000000000..33990833a97 --- /dev/null +++ b/doc/sphinxext/prs/9973.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "09621ea3d14b40392744d21b47bc40943e756987", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/externals/pymatreader/pymatreader.py": { + "a": 2, + "d": 3 + }, + "mne/externals/pymatreader/utils.py": { + "a": 11, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/viz/_3d.py": { + "a": 4, + "d": 1 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9975.json b/doc/sphinxext/prs/9975.json new file mode 100644 index 00000000000..5d121ab564f --- /dev/null +++ b/doc/sphinxext/prs/9975.json @@ -0,0 +1,63 @@ +{ + "merge_commit_sha": "7169435c716581d51fd407f96efa79eb6170a48e", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 5, + "d": 2 + }, + "mne/export/_egimff.py": { + "a": 14, + "d": 2 + }, + "mne/export/_export.py": { + "a": 18, + "d": 5 + }, + "mne/export/tests/test_export.py": { + "a": 35, + "d": 5 + }, + "mne/io/base.py": { + "a": 6, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9976.json b/doc/sphinxext/prs/9976.json new file mode 100644 index 00000000000..47435c0d491 --- /dev/null +++ b/doc/sphinxext/prs/9976.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "0335fc7cb480dc8e6cd70ac2cb400678dabbac1a", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9977.json b/doc/sphinxext/prs/9977.json new file mode 100644 index 00000000000..a79706d6320 --- /dev/null +++ b/doc/sphinxext/prs/9977.json @@ -0,0 +1,131 @@ +{ + "merge_commit_sha": "d995aa2aa8361ef29d2ffb896fc2387ed1cde2cc", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "73893616+mscheltienne@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/0.24.inc": { + "a": 4, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 0, + "d": 2 + }, + "doc/conf.py": { + "a": 1, + "d": 0 + }, + "mne/epochs.py": { + "a": 5, + "d": 2 + }, + "mne/export/_egimff.py": { + "a": 14, + "d": 2 + }, + "mne/export/_export.py": { + "a": 18, + "d": 5 + }, + "mne/export/tests/test_export.py": { + "a": 35, + "d": 5 + }, + "mne/externals/pymatreader/pymatreader.py": { + "a": 2, + "d": 3 + }, + "mne/externals/pymatreader/utils.py": { + "a": 11, + "d": 2 + }, + "mne/gui/__init__.py": { + "a": 2, + "d": 5 + }, + "mne/gui/_coreg.py": { + "a": 7, + "d": 1 + }, + "mne/io/base.py": { + "a": 6, + "d": 2 + }, + "mne/io/eeglab/eeglab.py": { + "a": 5, + "d": 3 + }, + "mne/minimum_norm/tests/test_inverse.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 20, + "d": 0 + }, + "mne/viz/_3d.py": { + "a": 6, + "d": 6 + }, + "mne/viz/topomap.py": { + "a": 1, + "d": 1 + }, + "tools/github_actions_dependencies.sh": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9978.json b/doc/sphinxext/prs/9978.json new file mode 100644 index 00000000000..611bf00f8cc --- /dev/null +++ b/doc/sphinxext/prs/9978.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4c7fa46e20c071968ef305195738d414925d3442", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 27, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9979.json b/doc/sphinxext/prs/9979.json new file mode 100644 index 00000000000..056d90c8ced --- /dev/null +++ b/doc/sphinxext/prs/9979.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "bdedc788b79f7b620167ba6358965ce312ec51be", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/changes/0.18.inc": { + "a": 1, + "d": 1 + }, + "doc/changes/names.inc": { + "a": 0, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/998.json b/doc/sphinxext/prs/998.json new file mode 100644 index 00000000000..03715b62897 --- /dev/null +++ b/doc/sphinxext/prs/998.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d6a3ae446dc58aa750655681868797d08a89995d", + "authors": [ + { + "n": "Christian Brodbeck", + "e": null + } + ], + "changes": { + "mne/coreg.py": { + "a": 24, + "d": 0 + }, + "mne/fiff/brainvision/brainvision.py": { + "a": 229, + "d": 134 + }, + "mne/fiff/brainvision/tests/test_brainvision.py": { + "a": 64, + "d": 8 + }, + "mne/fiff/kit/__init__.py": { + "a": 2, + "d": 1 + }, + "mne/fiff/kit/coreg.py": { + "a": 0, + "d": 28 + }, + "mne/fiff/kit/kit.py": { + "a": 9, + "d": 4 + }, + "mne/gui/_kit2fiff_gui.py": { + "a": 4, + "d": 2 + }, + "mne/tests/test_coreg.py": { + "a": 14, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9980.json b/doc/sphinxext/prs/9980.json new file mode 100644 index 00000000000..6fc5259796a --- /dev/null +++ b/doc/sphinxext/prs/9980.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "84dd3ff13cf9fad9212456cdb9066ff6e4122142", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/source_estimate.py": { + "a": 31, + "d": 18 + }, + "mne/tests/test_source_estimate.py": { + "a": 4, + "d": 4 + }, + "tutorials/clinical/30_ecog.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9981.json b/doc/sphinxext/prs/9981.json new file mode 100644 index 00000000000..4dbe9df66de --- /dev/null +++ b/doc/sphinxext/prs/9981.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "a8522d213cf4b54ef682f2a874065bf62af65fdb", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/conf.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9982.json b/doc/sphinxext/prs/9982.json new file mode 100644 index 00000000000..ca5802d2c59 --- /dev/null +++ b/doc/sphinxext/prs/9982.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "a50535b709d02acad0d48b9ac4bc701dc5659022", + "authors": [ + { + "n": "Martin G. Schulz", + "e": "dev@mgschulz.de" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 1, + "d": 1 + }, + "mne/utils/config.py": { + "a": 2, + "d": 2 + }, + "mne/utils/docs.py": { + "a": 3, + "d": 2 + }, + "mne/viz/_figure.py": { + "a": 12, + "d": 12 + }, + "mne/viz/raw.py": { + "a": 2, + "d": 2 + }, + "mne/viz/utils.py": { + "a": 0, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9984.json b/doc/sphinxext/prs/9984.json new file mode 100644 index 00000000000..8388f0d6d09 --- /dev/null +++ b/doc/sphinxext/prs/9984.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "43bef6607a48fb272428d2f4a931df97cae688e1", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/cuda.py": { + "a": 4, + "d": 6 + }, + "mne/filter.py": { + "a": 3, + "d": 2 + }, + "mne/io/base.py": { + "a": 1, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 6, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9986.json b/doc/sphinxext/prs/9986.json new file mode 100644 index 00000000000..c2f776d59e9 --- /dev/null +++ b/doc/sphinxext/prs/9986.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "aa6ad2f6e6d946425ad53ca4b3bb3f8e14080a1d", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "paper.md": { + "a": 7, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9987.json b/doc/sphinxext/prs/9987.json new file mode 100644 index 00000000000..84effb8082b --- /dev/null +++ b/doc/sphinxext/prs/9987.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "faf4569ff3d26e79cc439727254460c6bae3aae1", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "tutorials/clinical/10_ieeg_localize.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9988.json b/doc/sphinxext/prs/9988.json new file mode 100644 index 00000000000..69a81a56409 --- /dev/null +++ b/doc/sphinxext/prs/9988.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "bf116a35a68e00bee351f2ac7f36ff103d64df0d", + "authors": [ + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "mne/utils/check.py": { + "a": 6, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9990.json b/doc/sphinxext/prs/9990.json new file mode 100644 index 00000000000..f1fd43cb9e7 --- /dev/null +++ b/doc/sphinxext/prs/9990.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "7491370569855921981b993c9ca63c3e72046589", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/io/eeglab/eeglab.py": { + "a": 34, + "d": 8 + }, + "mne/io/eeglab/tests/test_eeglab.py": { + "a": 27, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9991.json b/doc/sphinxext/prs/9991.json new file mode 100644 index 00000000000..1705ba1eae4 --- /dev/null +++ b/doc/sphinxext/prs/9991.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "e44caebb8f50127af268ba152b183267be9a4483", + "authors": [ + { + "n": "Alex Rockhill", + "e": "aprockhill@mailbox.org" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/tests/test_morph.py": { + "a": 3, + "d": 4 + }, + "mne/tests/test_transforms.py": { + "a": 1, + "d": 1 + }, + "mne/transforms.py": { + "a": 17, + "d": 22 + }, + "tutorials/clinical/10_ieeg_localize.py": { + "a": 4, + "d": 9 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9992.json b/doc/sphinxext/prs/9992.json new file mode 100644 index 00000000000..04fbfeb7b55 --- /dev/null +++ b/doc/sphinxext/prs/9992.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "8c611886826db50f42b4ccbd6fd5718269d1fb31", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9994.json b/doc/sphinxext/prs/9994.json new file mode 100644 index 00000000000..ab8a570e8e3 --- /dev/null +++ b/doc/sphinxext/prs/9994.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "6509aa8c75b5d1015983ccc9d229d2d9954a21f7", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/epochs.py": { + "a": 5, + "d": 1 + }, + "mne/export/_egimff.py": { + "a": 10, + "d": 1 + }, + "mne/export/_export.py": { + "a": 24, + "d": 3 + }, + "mne/export/tests/test_export.py": { + "a": 14, + "d": 2 + }, + "mne/io/base.py": { + "a": 5, + "d": 1 + }, + "mne/utils/docs.py": { + "a": 15, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9995.json b/doc/sphinxext/prs/9995.json new file mode 100644 index 00000000000..7c7cabf1f95 --- /dev/null +++ b/doc/sphinxext/prs/9995.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "ca40d9a44b9864cdeb745e7abef3111a610e2652", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/workflows/codespell_and_flake.yml": { + "a": 2, + "d": 1 + }, + ".github/workflows/compat_minimal.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/compat_old.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_conda.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/linux_pip.yml": { + "a": 1, + "d": 1 + }, + ".github/workflows/macos_conda.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9996.json b/doc/sphinxext/prs/9996.json new file mode 100644 index 00000000000..fe7acb08052 --- /dev/null +++ b/doc/sphinxext/prs/9996.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "6cbf4708dcaa196c7a5ddf232ef80c79ea87aa98", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "requirements_doc.txt": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/9998.json b/doc/sphinxext/prs/9998.json new file mode 100644 index 00000000000..ab1a0cb79c4 --- /dev/null +++ b/doc/sphinxext/prs/9998.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "a2ab1e25fde473dc1e47dcc83e4426ce60397461", + "authors": [ + { + "n": "Marijn van Vliet", + "e": "w.m.vanvliet@gmail.com" + }, + { + "n": "Richard Höchenberger", + "e": "richard.hoechenberger@gmail.com" + } + ], + "changes": { + "doc/changes/latest.inc": { + "a": 2, + "d": 0 + }, + "mne/report/report.py": { + "a": 4, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/related_software.py b/doc/sphinxext/related_software.py new file mode 100644 index 00000000000..01ad35632b7 --- /dev/null +++ b/doc/sphinxext/related_software.py @@ -0,0 +1,269 @@ +"""Create a list of related software. + +To add a package to the list: + +1. Add it to the MNE-installers if possible, and it will automatically appear. +2. If it's on PyPI and not in the MNE-installers, add it to the PYPI_PACKAGES set. +3. If it's not on PyPI, add it to the MANUAL_PACKAGES dictionary. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import functools +import importlib.metadata +import os +import pathlib +import urllib.error +import urllib.request + +import joblib +from docutils import nodes +from docutils.parsers.rst import Directive +from mne_doc_utils import sphinx_logger +from sphinx.errors import ExtensionError +from sphinx.util.display import status_iterator + +# If a package is in MNE-Installers, it will be automatically added! + +# If it's available on PyPI, add it to this set: +PYPI_PACKAGES = { + "cross-domain-saliency-maps", + "meggie", + "niseq", + "sesameeg", +} + +# If it's not available on PyPI, add it to this dict: +MANUAL_PACKAGES = { + # TODO: These packages are not pip-installable as of 2024/07/17, so we have to + # manually populate them -- should open issues on their package repos. + "best-python": { + "Home-page": "https://github.com/multifunkim/best-python", + "Summary": "The goal of this project is to provide a way to use the best-brainstorm Matlab solvers in Python, compatible with MNE-Python.", # noqa: E501 + }, + "mne-hcp": { + "Home-page": "https://github.com/mne-tools/mne-hcp", + "Summary": "We provide Python tools for seamless integration of MEG data from the Human Connectome Project into the Python ecosystem", # noqa: E501 + }, + "posthoc": { + "Home-page": "https://users.aalto.fi/~vanvlm1/posthoc/python", + "Summary": "post-hoc modification of linear models", + }, + # This package does not provide wheels, so don't force CircleCI to build it. + # If it eventually provides binary wheels we could add it to + # `tools/circleci_dependencies.sh` and remove from here. + "eelbrain": { + "Home-page": "https://eelbrain.readthedocs.io/en/stable/", + "Summary": "Open-source Python toolkit for MEG and EEG data analysis.", + }, + # mne-kit-gui requires mayavi (ugh) + "mne-kit-gui": { + "Home-page": "https://github.com/mne-tools/mne-kit-gui", + "Summary": "A module for KIT MEG coregistration.", + }, + # fsleyes requires wxpython, which needs to build + "fsleyes": { + "Home-page": "https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FSLeyes", + "Summary": "FSLeyes is the FSL image viewer.", + }, + # dcm2niix must be built from source + "dcm2niix": { + "Home-page": "https://github.com/rordenlab/dcm2niix", + "Summary": "DICOM to NIfTI converter", + }, + # TODO: these do not set a valid homepage or documentation page on PyPI + "python-picard": { # https://github.com/mind-inria/picard/issues/60 + "Home-page": "https://github.com/mind-inria/picard", + "Summary": "Preconditioned ICA for Real Data", + }, + "eeg_positions": { + "Home-page": "https://eeg-positions.readthedocs.io/", + "Summary": "Compute and plot standard EEG electrode positions.", + }, + "mne-faster": { + "Home-page": "https://github.com/wmvanvliet/mne-faster", + "Summary": "MNE-FASTER: automatic bad channel/epoch/component detection.", # noqa: E501 + }, + "mne-features": { + "Home-page": "https://mne.tools/mne-features", + "Summary": "MNE-Features software for extracting features from multivariate time series", # noqa: E501 + }, + "mne-rsa": { + "Home-page": "https://users.aalto.fi/~vanvlm1/mne-rsa", + "Summary": "Code for performing Representational Similarity Analysis on MNE-Python data structures.", # noqa: E501 + }, + "mffpy": { + "Home-page": "https://github.com/BEL-Public/mffpy", + "Summary": "Reader and Writer for Philips' MFF file format.", + }, + "emd": { + "Home-page": "https://emd.readthedocs.io/en/stable", + "Summary": "Empirical Mode Decomposition in Python.", + }, + # Needs a release with homepage set properly + "meegkit": { + "Home-page": "https://nbara.github.io/python-meegkit", + "Summary": "Denoising tools for M/EEG processing.", + }, + # not on PyPI + "conpy": { + "Home-page": "https://github.com/aaltoimaginglanguage/conpy", + "Summary": "Functions and classes for performing connectivity analysis on MEG data.", # noqa: E501 + }, +} + +REQUIRE_METADATA = os.getenv("MNE_REQUIRE_RELATED_SOFTWARE_INSTALLED", "false").lower() +REQUIRE_METADATA = REQUIRE_METADATA in ("true", "1") + +# These packages pip-install with a different name than the package name +RENAMES = { + "python-neo": "neo", + "matplotlib-base": "matplotlib", +} + +_memory = joblib.Memory(location=pathlib.Path(__file__).parent / ".joblib", verbose=0) + + +@_memory.cache(cache_validation_callback=joblib.expires_after(days=7)) +def _get_installer_packages(): + """Get the MNE-Python installer package list YAML.""" + with urllib.request.urlopen( + "https://raw.githubusercontent.com/mne-tools/mne-installers/main/recipes/mne-python/construct.yaml" + ) as url: + data = url.read().decode("utf-8") + # Parse data for list of names of packages + lines = [line.strip() for line in data.splitlines()] + start_idx = lines.index("# <<< BEGIN RELATED SOFTWARE LIST >>>") + 1 + stop_idx = lines.index("# <<< END RELATED SOFTWARE LIST >>>") + packages = [ + # Lines look like + # - mne-ari =0.0.0 + # or similar. + line.split()[1] + for line in lines[start_idx:stop_idx] + if not line.startswith("#") + ] + return packages + + +@functools.lru_cache +def _get_packages() -> dict[str, str]: + try: + packages = _get_installer_packages() + except urllib.error.URLError as exc: # e.g., bad internet connection + if not REQUIRE_METADATA: + sphinx_logger.warning(f"Could not fetch package list, got: {exc}") + return dict() + raise + # There can be duplicates in manual and installer packages because some of the + # PyPI entries for installer packages are incorrect or unusable (see above), so + # we don't enforce that. But PyPI and manual should be disjoint: + dups = set(MANUAL_PACKAGES) & set(PYPI_PACKAGES) + assert not dups, f"Duplicates in MANUAL_PACKAGES and PYPI_PACKAGES: {sorted(dups)}" + # And the installer and PyPI-only should be disjoint: + dups = set(PYPI_PACKAGES) & set(packages) + assert not dups, ( + f"Duplicates in PYPI_PACKAGES and installer packages: {sorted(dups)}" + ) + for name in PYPI_PACKAGES | set(MANUAL_PACKAGES): + if name not in packages: + packages.append(name) + # Simple alphabetical order + packages = sorted(packages, key=lambda x: x.lower()) + packages = [RENAMES.get(package, package) for package in packages] + out = dict() + reasons = [] + for package in status_iterator( + packages, f"Adding {len(packages)} related software packages: " + ): + out[package] = dict() + try: + if package in MANUAL_PACKAGES: + md = MANUAL_PACKAGES[package] + else: + md = importlib.metadata.metadata(package) + except importlib.metadata.PackageNotFoundError: + reasons.append(f"{package}: not found, needs to be installed") + continue # raise a complete error later + else: + # Every project should really have this + do_continue = False + for key in ("Summary",): + if key not in md: + reasons.extend(f"{package}: missing {repr(key)}") + do_continue = True + if do_continue: + continue + # It is annoying to find the home page + url = None + if "Home-page" in md: + url = md["Home-page"] + else: + for prefix in ("homepage", "documentation"): + for key, val in md.items(): + if key == "Project-URL" and val.lower().startswith( + f"{prefix}, " + ): + url = val.split(", ", 1)[1] + break + if url is not None: + break + else: + reasons.append( + f"{package}: could not find Home-page in {sorted(md)}" + ) + continue + out[package]["url"] = url + out[package]["description"] = md["Summary"].replace("\n", "") + reason_str = "\n".join(reasons) + if reason_str and REQUIRE_METADATA: + raise ExtensionError( + f"Could not find suitable metadata for related software:\n{reason_str}" + ) + + return out + + +class RelatedSoftwareDirective(Directive): + """Create a directive that inserts a bullet list of related software.""" + + def run(self): + """Run the directive.""" + my_list = nodes.bullet_list(bullet="*") + for package, data in _get_packages().items(): + item = nodes.list_item() + if "description" not in data: + para = nodes.paragraph(text=f"{package}") + else: + para = nodes.paragraph(text=f": {data['description']}") + refnode = nodes.reference( + "url", + package, + internal=False, + refuri=data["url"], + ) + para.insert(0, refnode) + item += para + my_list.append(item) + return [my_list] + + +def setup(app): + app.add_directive("related-software", RelatedSoftwareDirective) + # Run it as soon as this is added as a Sphinx extension so that any errors + # / new packages are reported early. The next call in run() will be cached. + _get_packages() + return { + "version": "0.1", + "parallel_read_safe": True, + "parallel_write_safe": True, + } + + +if __name__ == "__main__": # pragma: no cover + items = list(RelatedSoftwareDirective.run(None)[0].children) + print(f"Got {len(items)} related software packages:") + for item in items: + print(f"- {item.astext()}") diff --git a/doc/sphinxext/related_software.txt b/doc/sphinxext/related_software.txt new file mode 100644 index 00000000000..f55e0afbd42 --- /dev/null +++ b/doc/sphinxext/related_software.txt @@ -0,0 +1,37 @@ +# cross-domain-saliency-maps requirements are onerous (torch and tensorflow) +# so we don't add it here, and install it separately in circleci_dependencies.sh +alphaCSC +autoreject +bycycle +conpy +curryreader +eeg_positions +emd +fooof +meegkit +meggie +mne-ari +mne-bids-pipeline +mne-faster +mne-features +mne-icalabel +mne-lsl +mne-microstates +mne-nirs +mne-rsa +mnelab +neurodsp +neurokit2 +niseq +nitime +pactools +plotly +pycrostates +pyprep +pyriemann +python-picard +sesameeg +sleepecg +tensorpac +wfdb +yasa diff --git a/doc/sphinxext/unit_role.py b/doc/sphinxext/unit_role.py index d912786b474..bf31ddf76c4 100644 --- a/doc/sphinxext/unit_role.py +++ b/doc/sphinxext/unit_role.py @@ -1,13 +1,18 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from docutils import nodes -def unit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): +def unit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # noqa: B006 parts = text.split() def pass_error_to_sphinx(rawtext, text, lineno, inliner): msg = inliner.reporter.error( - 'The :unit: role requires a space-separated number and unit; ' - f'got {text}', line=lineno) + f"The :unit: role requires a space-separated number and unit; got {text}", + line=lineno, + ) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] @@ -20,10 +25,10 @@ def pass_error_to_sphinx(rawtext, text, lineno, inliner): except ValueError: return pass_error_to_sphinx(rawtext, text, lineno, inliner) # input is well-formatted: proceed - node = nodes.Text('\u202F'.join(parts)) + node = nodes.Text("\u202f".join(parts)) return [node], [] def setup(app): - app.add_role('unit', unit_role) + app.add_role("unit", unit_role) return diff --git a/doc/visualization.rst b/doc/visualization.rst deleted file mode 100644 index 62fa54a8cee..00000000000 --- a/doc/visualization.rst +++ /dev/null @@ -1,88 +0,0 @@ - -Visualization -============= - -.. currentmodule:: mne.viz - -:py:mod:`mne.viz`: - -.. automodule:: mne.viz - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - Brain - ClickableImage - Figure3D - add_background_image - centers_to_edges - compare_fiff - circular_layout - iter_topography - mne_analyze_colormap - plot_bem - plot_brain_colorbar - plot_bridged_electrodes - plot_chpi_snr - plot_cov - plot_channel_labels_circle - plot_ch_adjacency - plot_csd - plot_dipole_amplitudes - plot_dipole_locations - plot_drop_log - plot_epochs - plot_epochs_psd_topomap - plot_events - plot_evoked - plot_evoked_image - plot_evoked_topo - plot_evoked_topomap - plot_evoked_joint - plot_evoked_field - plot_evoked_white - plot_filter - plot_head_positions - plot_ideal_filter - plot_compare_evokeds - plot_ica_sources - plot_ica_components - plot_ica_properties - plot_ica_scores - plot_ica_overlay - plot_epochs_image - plot_layout - plot_montage - plot_projs_topomap - plot_projs_joint - plot_raw - plot_raw_psd - plot_regression_weights - plot_sensors - plot_snr_estimate - plot_source_estimates - link_brains - plot_volume_source_estimates - plot_vector_source_estimates - plot_sparse_source_estimates - plot_tfr_topomap - plot_topo_image_epochs - plot_topomap - plot_alignment - snapshot_brain_montage - plot_arrowmap - set_3d_backend - get_3d_backend - use_3d_backend - set_3d_options - set_3d_view - set_3d_title - create_3d_figure - close_3d_figure - close_all_3d_figures - get_brain_class - set_browser_backend - get_browser_backend - use_browser_backend diff --git a/doc/whats_new.rst b/doc/whats_new.rst deleted file mode 100644 index 8cf178a3571..00000000000 --- a/doc/whats_new.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. include:: links.inc - -.. _whats_new: - -What's new -========== - -.. currentmodule:: mne - -.. include:: changes/latest.inc -.. include:: changes/1.3.inc -.. include:: changes/1.2.inc -.. include:: changes/1.1.inc -.. include:: changes/1.0.inc -.. include:: changes/0.24.inc -.. include:: changes/0.23.inc -.. include:: changes/0.22.inc -.. include:: changes/0.21.inc -.. include:: changes/0.20.inc -.. include:: changes/0.19.inc -.. include:: changes/0.18.inc -.. include:: changes/0.17.inc -.. include:: changes/0.16.inc -.. include:: changes/0.15.inc -.. include:: changes/0.14.inc -.. include:: changes/0.13.inc -.. include:: changes/0.12.inc -.. include:: changes/0.11.inc -.. include:: changes/0.10.inc -.. include:: changes/0.9.inc -.. include:: changes/0.8.inc -.. include:: changes/0.7.inc -.. include:: changes/0.6.inc -.. include:: changes/0.5.inc -.. include:: changes/0.4.inc -.. include:: changes/0.3.inc -.. include:: changes/0.2.inc -.. include:: changes/0.1.inc -.. include:: changes/names.inc -.. include:: links.inc diff --git a/environment.yml b/environment.yml index bbd8deb3c1a..b72d39925e4 100644 --- a/environment.yml +++ b/environment.yml @@ -1,58 +1,67 @@ +# THIS FILE IS AUTO-GENERATED BY tools/hooks/update_environment_file.py AND WILL BE OVERWRITTEN name: mne channels: -- conda-forge + - conda-forge dependencies: -- python>=3.8 -- pip -- numpy -- scipy -- matplotlib -- tqdm -- pooch>=1.5 -- decorator -- h5io -- packaging -- numba -- pandas -- xlrd -- scikit-learn -- h5py -- jinja2 -- pillow -- statsmodels -- jupyter -- ipython !=8.7.0 -- joblib -- psutil -- numexpr -- imageio -- spyder-kernels>=1.10.0 -- imageio-ffmpeg>=0.4.1 -- vtk>=9.2 -- traitlets -- pyvista>=0.32,!=0.35.2,<0.38.0 -- pyvistaqt>=0.4 -- qdarkstyle -- darkdetect -- dipy -- nibabel -- openmeeg>=2.5.5 -- nilearn -- python-picard -- qtpy -- pyqt!=5.15.3,!=5.15.4 -- mne -- mffpy>=0.5.7 -- ipyevents -- ipywidgets -- ipympl -- ipyvtklink -- jupyter_client -- nbformat -- nbclient -- mne-qt-browser -- pymatreader -- eeglabio -- edflib-python -- pybv -- mamba + - python >=3.10 + - antio >=0.5.0 + - curryreader >=0.1.2 + - darkdetect + - decorator + - defusedxml + - dipy + - edfio >=0.4.10 + - eeglabio + - filelock >=3.18.0 + - h5io >=0.2.4 + - h5py + - imageio >=2.6.1 + - imageio-ffmpeg >=0.4.1 + - ipyevents + - ipympl + - ipython !=8.7.0 + - ipywidgets + - jinja2 + - joblib + - jupyter + - lazy_loader >=0.3 + - mamba + - matplotlib >=3.8 + - mffpy >=0.5.7 + - mne-qt-browser + - nibabel + - nilearn + - nomkl + - numba + - numpy >=1.26,<3 + - openmeeg >=2.5.7 + - packaging + - pandas >=2.1 + - pillow + - pip + - pooch >=1.5 + - pyarrow + - pybv + - pymatreader + - PySide6 !=6.9.1 + - python-neo + - python-picard + - pyvista >=0.42.1 + - pyvistaqt >=0.11 + - qdarkstyle !=3.2.2 + - qtpy + - scikit-learn >=1.3 + - scipy >=1.11 + - sip + - snirf + - statsmodels + - threadpoolctl + - tqdm + - traitlets + - trame + - trame-vtk + - trame-vuetify + - vtk >=9.2 + - xlrd + - pip: + - pyobjc-framework-Cocoa >=5.2.0;platform_system=='Darwin' diff --git a/examples/datasets/brainstorm_data.py b/examples/datasets/brainstorm_data.py index 61bf9087476..ab5499fea71 100644 --- a/examples/datasets/brainstorm_data.py +++ b/examples/datasets/brainstorm_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-brainstorm-raw: @@ -7,14 +6,14 @@ ===================================== Here we compute the evoked from raw for the Brainstorm -tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and: - - https://neuroimage.usc.edu/brainstorm/Tutorials/MedianNerveCtf +tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and +https://neuroimage.usc.edu/brainstorm/Tutorials/MedianNerveCtf. """ # Authors: Mainak Jas # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -31,29 +30,40 @@ data_path = bst_raw.data_path() -raw_path = (data_path / 'MEG' / 'bst_raw' / - 'subj001_somatosensory_20111109_01_AUX-f.ds') +raw_path = data_path / "MEG" / "bst_raw" / "subj001_somatosensory_20111109_01_AUX-f.ds" # Here we crop to half the length to save memory raw = read_raw_ctf(raw_path).crop(0, 120).load_data() raw.plot() # set EOG channel -raw.set_channel_types({'EEG058': 'eog'}) -raw.set_eeg_reference('average', projection=True) +raw.set_channel_types({"EEG058": "eog"}) +raw.set_eeg_reference("average", projection=True) # show power line interference and remove it -raw.compute_psd(tmax=60).plot(average=False) -raw.notch_filter(np.arange(60, 181, 60), fir_design='firwin') +raw.compute_psd(tmax=60).plot( + average=False, amplitude=False, picks="data", exclude="bads" +) +raw.notch_filter(np.arange(60, 181, 60), fir_design="firwin") -events = mne.find_events(raw, stim_channel='UPPT001') +events = mne.find_events(raw, stim_channel="UPPT001") # pick MEG channels -picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, - exclude='bads') +picks = mne.pick_types( + raw.info, meg=True, eeg=False, stim=False, eog=True, exclude="bads" +) # Compute epochs -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=reject, preload=False) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + reject=reject, + preload=False, +) # compute evoked evoked = epochs.average() @@ -69,11 +79,10 @@ evoked.shift_time(-0.004) # plot the result -evoked.plot(time_unit='s') +evoked.plot(time_unit="s") # show topomaps -evoked.plot_topomap(times=np.array([0.016, 0.030, 0.060, 0.070]), - time_unit='s') +evoked.plot_topomap(times=np.array([0.016, 0.030, 0.060, 0.070]), time_unit="s") # %% # References diff --git a/examples/datasets/hf_sef_data.py b/examples/datasets/hf_sef_data.py index fee1630277c..44aa6e8f9a4 100644 --- a/examples/datasets/hf_sef_data.py +++ b/examples/datasets/hf_sef_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-hf-sef-data: @@ -11,16 +10,16 @@ # Author: Jussi Nurminen (jnu@iki.fi) # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import os import mne -import os from mne.datasets import hf_sef -fname_evoked = os.path.join(hf_sef.data_path(), - 'MEG/subject_b/hf_sef_15min-ave.fif') +fname_evoked = os.path.join(hf_sef.data_path(), "MEG/subject_b/hf_sef_15min-ave.fif") print(__doc__) @@ -35,7 +34,7 @@ # %% # Compare high-pass filtered and unfiltered data on a single channel -ch = 'MEG0443' +ch = "MEG0443" pick = evoked.ch_names.index(ch) -edi = {'HF': evoked_hp, 'Regular': evoked} +edi = {"HF": evoked_hp, "Regular": evoked} mne.viz.plot_compare_evokeds(edi, picks=pick) diff --git a/examples/datasets/kernel_phantom.py b/examples/datasets/kernel_phantom.py new file mode 100644 index 00000000000..da17f708454 --- /dev/null +++ b/examples/datasets/kernel_phantom.py @@ -0,0 +1,108 @@ +""" +.. _ex-kernel-opm-phantom: + +Kernel OPM phantom data +======================= + +In this dataset, a Neuromag phantom was placed inside the Kernel OPM helmet and +stimulated with 7 modules active (121 channels). Here we show some example traces. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +import mne + +data_path = mne.datasets.phantom_kernel.data_path() +fname = data_path / "phantom_32_100nam_raw.fif" +raw = mne.io.read_raw_fif(fname).load_data() +events = mne.find_events(raw, stim_channel="STI101") + +# Bads identified by inspecting averages +raw.info["bads"] = [ + "RC2.bx.ave", + "LC3.bx.ave", + "RC2.by.7", + "RC2.bz.7", + "RC2.bx.4", + "RC2.by.4", + "LC3.bx.5", +] +# Drop the module-average channels +raw.drop_channels([ch_name for ch_name in raw.ch_names if ".ave" in ch_name]) +# Add field correction projectors +raw.add_proj(mne.preprocessing.compute_proj_hfc(raw.info, order=2)) +raw.pick("meg", exclude="bads") +raw.filter(0.5, 40) +epochs = mne.Epochs( + raw, + events, + tmin=-0.1, + tmax=0.25, + decim=5, + preload=True, + baseline=(None, 0), +) +evoked = epochs["17"].average() # a high-SNR dipole for these data +fig = evoked.plot() +t_peak = 0.016 # based on visual inspection of evoked +fig.axes[0].axvline(t_peak, color="k", ls=":", lw=3, zorder=2) + +# %% +# The data covariance has an interesting structure because of densely packed sensors: + +cov = mne.compute_covariance(epochs, tmax=-0.01) +mne.viz.plot_cov(cov, raw.info) + +# %% +# So let's be careful and compute rank ahead of time and regularize: + +rank = mne.compute_rank(epochs, tol=1e-3, tol_kind="relative") +cov = mne.compute_covariance(epochs, tmax=-0.01, rank=rank, method="shrunk") +mne.viz.plot_cov(cov, raw.info) + +# %% +# Look at our alignment: + +sphere = mne.make_sphere_model(r0=(0.0, 0.0, 0.0), head_radius=0.08) +trans = mne.transforms.Transform("head", "mri", np.eye(4)) +align_kwargs = dict( + trans=trans, + bem=sphere, + surfaces={"outer_skin": 0.2}, + show_axes=True, +) +mne.viz.plot_alignment( + raw.info, + coord_frame="meg", + meg=dict(sensors=1.0, helmet=0.05), + **align_kwargs, +) + +# %% +# Let's do dipole fits, which are not great because the dev_head_t is approximate and +# the sensor coverage is sparse: + +data = list() +for ii in range(1, 33): + evoked = epochs[str(ii)][1:-1].average().crop(t_peak, t_peak) + data.append(evoked.data[:, 0]) +evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.0) +del epochs +dip, residual = mne.fit_dipole(evoked, cov, sphere, n_jobs=None) +actual_pos, actual_ori = mne.dipole.get_phantom_dipoles() +actual_amp = np.ones(len(dip)) # fake amp, needed to create Dipole instance +actual_gof = np.ones(len(dip)) # fake GOF, needed to create Dipole instance +dip_true = mne.Dipole(dip.times, actual_pos, actual_amp, actual_ori, actual_gof) + +fig = mne.viz.plot_alignment( + evoked.info, coord_frame="head", meg="sensors", **align_kwargs +) +mne.viz.plot_dipole_locations( + dipoles=dip_true, mode="arrow", color=(0.0, 0.0, 0.0), fig=fig +) +mne.viz.plot_dipole_locations(dipoles=dip, mode="arrow", color=(0.2, 1.0, 0.5), fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=30, elevation=70, distance=0.4) diff --git a/examples/datasets/limo_data.py b/examples/datasets/limo_data.py index c897bc69cf8..f7f6d58cf19 100644 --- a/examples/datasets/limo_data.py +++ b/examples/datasets/limo_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-limo-data: @@ -36,17 +35,16 @@ # Authors: Jose C. Garcia Alanis # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - -import numpy as np import matplotlib.pyplot as plt +import numpy as np +from mne import combine_evoked from mne.datasets.limo import load_data from mne.stats import linear_regression -from mne.viz import plot_events, plot_compare_evokeds -from mne import combine_evoked - +from mne.viz import plot_compare_evokeds, plot_events print(__doc__) @@ -109,11 +107,11 @@ print(limo_epochs.metadata.head()) # %% -# Now let's take a closer look at the information in the epochs +# Now let us take a closer look at the information in the epochs # metadata. # We want include all columns in the summary table -epochs_summary = limo_epochs.metadata.describe(include='all').round(3) +epochs_summary = limo_epochs.metadata.describe(include="all").round(3) print(epochs_summary) # %% @@ -138,13 +136,13 @@ ts_args = dict(xlim=(-0.25, 0.5)) # plot evoked response for face A -limo_epochs['Face/A'].average().plot_joint(times=[0.15], - title='Evoked response: Face A', - ts_args=ts_args) +limo_epochs["Face/A"].average().plot_joint( + times=[0.15], title="Evoked response: Face A", ts_args=ts_args +) # and face B -limo_epochs['Face/B'].average().plot_joint(times=[0.15], - title='Evoked response: Face B', - ts_args=ts_args) +limo_epochs["Face/B"].average().plot_joint( + times=[0.15], title="Evoked response: Face B", ts_args=ts_args +) # %% # We can also compute the difference wave contrasting Face A and Face B. @@ -152,12 +150,12 @@ # differences among these face-stimuli. # Face A minus Face B -difference_wave = combine_evoked([limo_epochs['Face/A'].average(), - limo_epochs['Face/B'].average()], - weights=[1, -1]) +difference_wave = combine_evoked( + [limo_epochs["Face/A"].average(), limo_epochs["Face/B"].average()], weights=[1, -1] +) # plot difference wave -difference_wave.plot_joint(times=[0.15], title='Difference Face A - Face B') +difference_wave.plot_joint(times=[0.15], title="Difference Face A - Face B") # %% # As expected, no clear pattern appears when contrasting @@ -168,11 +166,10 @@ # Create a dictionary containing the evoked responses conditions = ["Face/A", "Face/B"] -evokeds = {condition: limo_epochs[condition].average() - for condition in conditions} +evokeds = {condition: limo_epochs[condition].average() for condition in conditions} # concentrate analysis an occipital electrodes (e.g. B11) -pick = evokeds["Face/A"].ch_names.index('B11') +pick = evokeds["Face/A"].ch_names.index("B11") # compare evoked responses plot_compare_evokeds(evokeds, picks=pick, ylim=dict(eeg=(-15, 7.5))) @@ -189,26 +186,30 @@ # one could expect that faces with high phase-coherence should evoke stronger # activation patterns along occipital electrodes. -phase_coh = limo_epochs.metadata['phase-coherence'] +phase_coh = limo_epochs.metadata["phase-coherence"] # get levels of phase coherence levels = sorted(phase_coh.unique()) # create labels for levels of phase coherence (i.e., 0 - 85%) -labels = ["{0:.2f}".format(i) for i in np.arange(0., 0.90, 0.05)] +labels = [f"{i:.2f}" for i in np.arange(0.0, 0.90, 0.05)] # create dict of evokeds for each level of phase-coherence -evokeds = {label: limo_epochs[phase_coh == level].average() - for level, label in zip(levels, labels)} +evokeds = { + label: limo_epochs[phase_coh == level].average() + for level, label in zip(levels, labels) +} # pick channel to plot -electrodes = ['C22', 'B11'] +electrodes = ["C22", "B11"] # create figures for electrode in electrodes: fig, ax = plt.subplots(figsize=(8, 4)) - plot_compare_evokeds(evokeds, - axes=ax, - ylim=dict(eeg=(-20, 15)), - picks=electrode, - cmap=("Phase coherence", "magma")) + plot_compare_evokeds( + evokeds, + axes=ax, + ylim=dict(eeg=(-20, 15)), + picks=electrode, + cmap=("Phase coherence", "magma"), + ) # %% # As shown above, there are some considerable differences between the @@ -226,7 +227,7 @@ # present in the data: limo_epochs.interpolate_bads(reset_bads=True) -limo_epochs.drop_channels(['EXG1', 'EXG2', 'EXG3', 'EXG4']) +limo_epochs.drop_channels(["EXG1", "EXG2", "EXG3", "EXG4"]) # %% # Define predictor variables and design matrix @@ -239,21 +240,19 @@ # ``limo_epochs.metadata``: phase-coherence and Face A vs. Face B. # name of predictors + intercept -predictor_vars = ['face a - face b', 'phase-coherence', 'intercept'] +predictor_vars = ["face a - face b", "phase-coherence", "intercept"] # create design matrix -design = limo_epochs.metadata[['phase-coherence', 'face']].copy() -design['face a - face b'] = np.where(design['face'] == 'A', 1, -1) -design['intercept'] = 1 +design = limo_epochs.metadata[["phase-coherence", "face"]].copy() +design["face a - face b"] = np.where(design["face"] == "A", 1, -1) +design["intercept"] = 1 design = design[predictor_vars] # %% # Now we can set up the linear model to be used in the analysis using # MNE-Python's func:`~mne.stats.linear_regression` function. -reg = linear_regression(limo_epochs, - design_matrix=design, - names=predictor_vars) +reg = linear_regression(limo_epochs, design_matrix=design, names=predictor_vars) # %% # Extract regression coefficients @@ -263,8 +262,8 @@ # which is a dictionary of evoked objects containing # multiple inferential measures for each predictor in the design matrix. -print('predictors are:', list(reg)) -print('fields are:', [field for field in getattr(reg['intercept'], '_fields')]) +print("predictors are:", list(reg)) +print("fields are:", [field for field in getattr(reg["intercept"], "_fields")]) # %% # Plot model results @@ -280,25 +279,23 @@ # the activity measured at occipital electrodes around 200 to 250 ms following # stimulus onset. -reg['phase-coherence'].beta.plot_joint(ts_args=ts_args, - title='Effect of Phase-coherence', - times=[0.23]) +reg["phase-coherence"].beta.plot_joint( + ts_args=ts_args, title="Effect of Phase-coherence", times=[0.23] +) # %% # We can also plot the corresponding T values. # use unit=False and scale=1 to keep values at their original # scale (i.e., avoid conversion to micro-volt). -ts_args = dict(xlim=(-0.25, 0.5), - unit=False) -topomap_args = dict(scalings=dict(eeg=1), - average=0.05) +ts_args = dict(xlim=(-0.25, 0.5), unit=False) +topomap_args = dict(scalings=dict(eeg=1), average=0.05) # sphinx_gallery_thumbnail_number = 9 -fig = reg['phase-coherence'].t_val.plot_joint(ts_args=ts_args, - topomap_args=topomap_args, - times=[0.23]) -fig.axes[0].set_ylabel('T-value') +fig = reg["phase-coherence"].t_val.plot_joint( + ts_args=ts_args, topomap_args=topomap_args, times=[0.23] +) +fig.axes[0].set_ylabel("T-value") # %% # Conversely, there appears to be no (or very small) systematic effects when @@ -306,9 +303,9 @@ # difference wave approach presented above. ts_args = dict(xlim=(-0.25, 0.5)) -reg['face a - face b'].beta.plot_joint(ts_args=ts_args, - title='Effect of Face A vs. Face B', - times=[0.23]) +reg["face a - face b"].beta.plot_joint( + ts_args=ts_args, title="Effect of Face A vs. Face B", times=[0.23] +) # %% # References diff --git a/examples/datasets/opm_data.py b/examples/datasets/opm_data.py index b849c5b098c..8f68e29d318 100644 --- a/examples/datasets/opm_data.py +++ b/examples/datasets/opm_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-opm-somatosensory: @@ -11,19 +10,23 @@ we demonstrate how to localize these custom OPM data in MNE. """ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + # sphinx_gallery_thumbnail_number = 4 import numpy as np + import mne data_path = mne.datasets.opm.data_path() -subject = 'OPM_sample' -subjects_dir = data_path / 'subjects' -raw_fname = data_path / 'MEG' / 'OPM' / 'OPM_SEF_raw.fif' -bem_fname = (subjects_dir / subject / 'bem' / - f'{subject}-5120-5120-5120-bem-sol.fif') -fwd_fname = data_path / 'MEG' / 'OPM' / 'OPM_sample-fwd.fif' -coil_def_fname = data_path / 'MEG' / 'OPM' / 'coil_def.dat' +subject = "OPM_sample" +subjects_dir = data_path / "subjects" +raw_fname = data_path / "MEG" / "OPM" / "OPM_SEF_raw.fif" +bem_fname = subjects_dir / subject / "bem" / f"{subject}-5120-5120-5120-bem-sol.fif" +fwd_fname = data_path / "MEG" / "OPM" / "OPM_sample-fwd.fif" +coil_def_fname = data_path / "MEG" / "OPM" / "coil_def.dat" # %% # Prepare data for localization @@ -31,8 +34,8 @@ # First we filter and epoch the data: raw = mne.io.read_raw_fif(raw_fname, preload=True) -raw.filter(None, 90, h_trans_bandwidth=10.) -raw.notch_filter(50., notch_widths=1) +raw.filter(None, 90, h_trans_bandwidth=10.0) +raw.notch_filter(50.0, notch_widths=1) # Set epoch rejection threshold a bit larger than for SQUIDs @@ -41,16 +44,26 @@ # Find median nerve stimulator trigger event_id = dict(Median=257) -events = mne.find_events(raw, stim_channel='STI101', mask=257, mask_type='and') +events = mne.find_events(raw, stim_channel="STI101", mask=257, mask_type="and") picks = mne.pick_types(raw.info, meg=True, eeg=False) # We use verbose='error' to suppress warning about decimation causing aliasing, # ideally we would low-pass and then decimate instead -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, verbose='error', - reject=reject, picks=picks, proj=False, decim=10, - preload=True) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + verbose="error", + reject=reject, + picks=picks, + proj=False, + decim=10, + preload=True, +) evoked = epochs.average() evoked.plot() -cov = mne.compute_covariance(epochs, tmax=0.) +cov = mne.compute_covariance(epochs, tmax=0.0) del epochs, raw # %% @@ -64,7 +77,7 @@ # but should be fine for these analyses. bem = mne.read_bem_solution(bem_fname) -trans = mne.transforms.Transform('head', 'mri') # identity transformation +trans = mne.transforms.Transform("head", "mri") # identity transformation # To compute the forward solution, we must # provide our temporary/custom coil definitions, which can be done as:: @@ -79,12 +92,18 @@ mne.convert_forward_solution(fwd, force_fixed=True, copy=False) with mne.use_coil_def(coil_def_fname): - fig = mne.viz.plot_alignment(evoked.info, trans=trans, subject=subject, - subjects_dir=subjects_dir, - surfaces=('head', 'pial'), bem=bem) - -mne.viz.set_3d_view(figure=fig, azimuth=45, elevation=60, distance=0.4, - focalpoint=(0.02, 0, 0.04)) + fig = mne.viz.plot_alignment( + evoked.info, + trans=trans, + subject=subject, + subjects_dir=subjects_dir, + surfaces=("head", "pial"), + bem=bem, + ) + +mne.viz.set_3d_view( + figure=fig, azimuth=45, elevation=60, distance=0.4, focalpoint=(0.02, 0, 0.04) +) # %% # Perform dipole fitting @@ -92,15 +111,17 @@ # Fit dipoles on a subset of time points with mne.use_coil_def(coil_def_fname): - dip_opm, _ = mne.fit_dipole(evoked.copy().crop(0.040, 0.080), - cov, bem, trans, verbose=True) + dip_opm, _ = mne.fit_dipole( + evoked.copy().crop(0.040, 0.080), cov, bem, trans, verbose=True + ) idx = np.argmax(dip_opm.gof) -print('Best dipole at t=%0.1f ms with %0.1f%% GOF' - % (1000 * dip_opm.times[idx], dip_opm.gof[idx])) +print( + f"Best dipole at t={1000 * dip_opm.times[idx]:0.1f} ms with " + f"{dip_opm.gof[idx]:0.1f}% GOF" +) # Plot N20m dipole as an example -dip_opm.plot_locations(trans, subject, subjects_dir, - mode='orthoview', idx=idx) +dip_opm.plot_locations(trans, subject, subjects_dir, mode="orthoview", idx=idx) # %% # Perform minimum-norm localization @@ -110,18 +131,24 @@ # areas we are sensitive to might be a good idea. inverse_operator = mne.minimum_norm.make_inverse_operator( - evoked.info, fwd, cov, loose=0., depth=None) + evoked.info, fwd, cov, loose=0.0, depth=None +) del fwd, cov method = "MNE" -snr = 3. -lambda2 = 1. / snr ** 2 +snr = 3.0 +lambda2 = 1.0 / snr**2 stc = mne.minimum_norm.apply_inverse( - evoked, inverse_operator, lambda2, method=method, - pick_ori=None, verbose=True) + evoked, inverse_operator, lambda2, method=method, pick_ori=None, verbose=True +) # Plot source estimate at time of best dipole fit -brain = stc.plot(hemi='rh', views='lat', subjects_dir=subjects_dir, - initial_time=dip_opm.times[idx], - clim=dict(kind='percent', lims=[99, 99.9, 99.99]), - size=(400, 300), background='w') +brain = stc.plot( + hemi="rh", + views="lat", + subjects_dir=subjects_dir, + initial_time=dip_opm.times[idx], + clim=dict(kind="percent", lims=[99, 99.9, 99.99]), + size=(800, 600), + background="w", +) diff --git a/examples/datasets/spm_faces_dataset.py b/examples/datasets/spm_faces_dataset.py new file mode 100644 index 00000000000..32df7d1a9ed --- /dev/null +++ b/examples/datasets/spm_faces_dataset.py @@ -0,0 +1,102 @@ +""" +.. _ex-spm-faces: + +========================================== +From raw data to dSPM on SPM Faces dataset +========================================== + +Runs a full pipeline using MNE-Python. This example does quite a bit of processing, so +even on a fast machine it can take several minutes to complete. +""" +# Authors: Alexandre Gramfort +# Denis Engemann +# +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import mne +from mne import combine_evoked, io +from mne.datasets import spm_face +from mne.minimum_norm import apply_inverse, make_inverse_operator +from mne.preprocessing import ICA, create_eog_epochs + +print(__doc__) + +data_path = spm_face.data_path() +subjects_dir = data_path / "subjects" +spm_path = data_path / "MEG" / "spm" + +# %% +# Load data, filter it, and fit ICA. + +raw_fname = spm_path / "SPM_CTF_MEG_example_faces1_3D.ds" +raw = io.read_raw_ctf(raw_fname, preload=True) # Take first run +# Here to save memory and time we'll downsample heavily -- this is not +# advised for real data as it can effectively jitter events! +raw.resample(100) +raw.filter(1.0, None) # high-pass +reject = dict(mag=5e-12) +ica = ICA(n_components=0.95, max_iter="auto", random_state=0) +ica.fit(raw, reject=reject) +# compute correlation scores, get bad indices sorted by score +eog_epochs = create_eog_epochs(raw, ch_name="MRT31-2908", reject=reject) +eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name="MRT31-2908") +ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on +ica.plot_components(eog_inds) # view topographic sensitivity of components +ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar +ica.plot_overlay(eog_epochs.average()) # inspect artifact removal + +# %% +# Epoch data and apply ICA. +events = mne.find_events(raw, stim_channel="UPPT001") +event_ids = {"faces": 1, "scrambled": 2} +tmin, tmax = -0.2, 0.6 +epochs = mne.Epochs( + raw, + events, + event_ids, + tmin, + tmax, + picks="meg", + baseline=None, + preload=True, + reject=reject, +) +del raw +ica.apply(epochs) # clean data, default in place +evoked = [epochs[k].average() for k in event_ids] +contrast = combine_evoked(evoked, weights=[-1, 1]) # Faces - scrambled +evoked.append(contrast) +for e in evoked: + e.plot(ylim=dict(mag=[-400, 400])) + +# %% +# Estimate noise covariance and look at the whitened evoked data + +noise_cov = mne.compute_covariance(epochs, tmax=0, method="shrunk", rank=None) +evoked[0].plot_white(noise_cov) + +# %% +# Compute forward model + +trans_fname = spm_path / "SPM_CTF_MEG_example_faces1_3D_raw-trans.fif" +src = subjects_dir / "spm" / "bem" / "spm-oct-6-src.fif" +bem = subjects_dir / "spm" / "bem" / "spm-5120-5120-5120-bem-sol.fif" +forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem) + +# %% +# Compute inverse solution and plot + +# sphinx_gallery_thumbnail_number = 8 + +snr = 3.0 +lambda2 = 1.0 / snr**2 +inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov) +stc = apply_inverse(contrast, inverse_operator, lambda2, method="dSPM", pick_ori=None) +brain = stc.plot( + hemi="both", + subjects_dir=subjects_dir, + initial_time=0.170, + views=["ven"], + clim={"kind": "value", "lims": [3.0, 6.0, 9.0]}, +) diff --git a/examples/datasets/spm_faces_dataset_sgskip.py b/examples/datasets/spm_faces_dataset_sgskip.py deleted file mode 100644 index ce538d4b382..00000000000 --- a/examples/datasets/spm_faces_dataset_sgskip.py +++ /dev/null @@ -1,138 +0,0 @@ -# -*- coding: utf-8 -*- -""" -.. _ex-spm-faces: - -========================================== -From raw data to dSPM on SPM Faces dataset -========================================== - -Runs a full pipeline using MNE-Python: - - - artifact removal - - averaging Epochs - - forward model computation - - source reconstruction using dSPM on the contrast : "faces - scrambled" - -.. note:: This example does quite a bit of processing, so even on a - fast machine it can take several minutes to complete. -""" -# Authors: Alexandre Gramfort -# Denis Engemann -# -# License: BSD-3-Clause - -# %% - -# sphinx_gallery_thumbnail_number = 10 - -import matplotlib.pyplot as plt - -import mne -from mne.datasets import spm_face -from mne.preprocessing import ICA, create_eog_epochs -from mne import io, combine_evoked -from mne.minimum_norm import make_inverse_operator, apply_inverse - -print(__doc__) - -data_path = spm_face.data_path() -subjects_dir = data_path / 'subjects' -spm_path = data_path / 'MEG' / 'spm' - -# %% -# Load and filter data, set up epochs - -raw_fname = spm_path / 'SPM_CTF_MEG_example_faces%d_3D.ds' - -raw = io.read_raw_ctf(raw_fname % 1, preload=True) # Take first run -# Here to save memory and time we'll downsample heavily -- this is not -# advised for real data as it can effectively jitter events! -raw.resample(120., npad='auto') - -picks = mne.pick_types(raw.info, meg=True, exclude='bads') -raw.filter(1, 30, method='fir', fir_design='firwin') - -events = mne.find_events(raw, stim_channel='UPPT001') - -# plot the events to get an idea of the paradigm -mne.viz.plot_events(events, raw.info['sfreq']) - -event_ids = {"faces": 1, "scrambled": 2} - -tmin, tmax = -0.2, 0.6 -baseline = None # no baseline as high-pass is applied -reject = dict(mag=5e-12) - -epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks, - baseline=baseline, preload=True, reject=reject) - -# Fit ICA, find and remove major artifacts -ica = ICA(n_components=0.95, max_iter='auto', random_state=0) -ica.fit(raw, decim=1, reject=reject) - -# compute correlation scores, get bad indices sorted by score -eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject) -eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='MRT31-2908') -ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on -ica.plot_components(eog_inds) # view topographic sensitivity of components -ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar -ica.plot_overlay(eog_epochs.average()) # inspect artifact removal -ica.apply(epochs) # clean data, default in place - -evoked = [epochs[k].average() for k in event_ids] - -contrast = combine_evoked(evoked, weights=[-1, 1]) # Faces - scrambled - -evoked.append(contrast) - -for e in evoked: - e.plot(ylim=dict(mag=[-400, 400])) - -plt.show() - -# estimate noise covarariance -noise_cov = mne.compute_covariance(epochs, tmax=0, method='shrunk', - rank=None) - -# %% -# Visualize fields on MEG helmet - -# The transformation here was aligned using the dig-montage. It's included in -# the spm_faces dataset and is named SPM_dig_montage.fif. -trans_fname = spm_path / 'SPM_CTF_MEG_example_faces1_3D_raw-trans.fif' - -maps = mne.make_field_map(evoked[0], trans_fname, subject='spm', - subjects_dir=subjects_dir, n_jobs=None) - -evoked[0].plot_field(maps, time=0.170) - -# %% -# Look at the whitened evoked daat - -evoked[0].plot_white(noise_cov) - -# %% -# Compute forward model - -src = subjects_dir / 'spm' / 'bem' / 'spm-oct-6-src.fif' -bem = subjects_dir / 'spm' / 'bem' / 'spm-5120-5120-5120-bem-sol.fif' -forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem) - -# %% -# Compute inverse solution - -snr = 3.0 -lambda2 = 1.0 / snr ** 2 -method = 'dSPM' - -inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov, - loose=0.2, depth=0.8) - -# Compute inverse solution on contrast -stc = apply_inverse(contrast, inverse_operator, lambda2, method, pick_ori=None) -# stc.save('spm_%s_dSPM_inverse' % contrast.comment) - -# Plot contrast in 3D with mne.viz.Brain if available -brain = stc.plot(hemi='both', subjects_dir=subjects_dir, initial_time=0.170, - views=['ven'], clim={'kind': 'value', 'lims': [3., 6., 9.]}) -# brain.save_image('dSPM_map.png') diff --git a/examples/decoding/decoding_csp_eeg.py b/examples/decoding/decoding_csp_eeg.py index dcb91e66ad4..758c674e16e 100644 --- a/examples/decoding/decoding_csp_eeg.py +++ b/examples/decoding/decoding_csp_eeg.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-decoding-csp-eeg: @@ -11,28 +10,29 @@ See https://en.wikipedia.org/wiki/Common_spatial_pattern and :footcite:`Koles1991`. The EEGBCI dataset is documented in -:footcite:`SchalkEtAl2004` and is available at PhysioNet -:footcite:`GoldbergerEtAl2000`. +:footcite:`SchalkEtAl2004` and on the +`PhysioNet documentation page `_. +The dataset is available at PhysioNet :footcite:`GoldbergerEtAl2000`. """ + # Authors: Martin Billinger # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - -import numpy as np import matplotlib.pyplot as plt - -from sklearn.pipeline import Pipeline +import numpy as np from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import ShuffleSplit, cross_val_score +from sklearn.pipeline import Pipeline -from mne import Epochs, pick_types, events_from_annotations +from mne import Epochs, pick_types from mne.channels import make_standard_montage -from mne.io import concatenate_raws, read_raw_edf from mne.datasets import eegbci -from mne.decoding import CSP +from mne.decoding import CSP, get_spatial_filter_from_estimator +from mne.io import concatenate_raws, read_raw_edf print(__doc__) @@ -41,30 +41,36 @@ # avoid classification of evoked responses by using epochs that start 1s after # cue onset. -tmin, tmax = -1., 4. -event_id = dict(hands=2, feet=3) -subject = 1 +tmin, tmax = -1.0, 4.0 +subjects = 1 runs = [6, 10, 14] # motor imagery: hands vs feet -raw_fnames = eegbci.load_data(subject, runs) +raw_fnames = eegbci.load_data(subjects, runs) raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames]) eegbci.standardize(raw) # set channel names -montage = make_standard_montage('standard_1005') +montage = make_standard_montage("standard_1005") raw.set_montage(montage) +raw.annotations.rename(dict(T1="hands", T2="feet")) # as documented on PhysioNet +raw.set_eeg_reference(projection=True) # Apply band-pass filter -raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge') +raw.filter(7.0, 30.0, fir_design="firwin", skip_by_annotation="edge") -events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3)) - -picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, - exclude='bads') +picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads") # Read epochs (train will be done only between 1 and 2s) # Testing will be done with a running classifier -epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, - baseline=None, preload=True) -epochs_train = epochs.copy().crop(tmin=1., tmax=2.) +epochs = Epochs( + raw, + event_id=["hands", "feet"], + tmin=tmin, + tmax=tmax, + proj=True, + picks=picks, + baseline=None, + preload=True, +) +epochs_train = epochs.copy().crop(tmin=1.0, tmax=2.0) labels = epochs.events[:, -1] - 2 # %% @@ -72,8 +78,8 @@ # Define a monte-carlo cross-validation generator (reduce variance): scores = [] -epochs_data = epochs.get_data() -epochs_data_train = epochs_train.get_data() +epochs_data = epochs.get_data(copy=False) +epochs_data_train = epochs_train.get_data(copy=False) cv = ShuffleSplit(10, test_size=0.2, random_state=42) cv_split = cv.split(epochs_data_train) @@ -82,25 +88,25 @@ csp = CSP(n_components=4, reg=None, log=True, norm_trace=False) # Use scikit-learn Pipeline with cross_val_score function -clf = Pipeline([('CSP', csp), ('LDA', lda)]) +clf = Pipeline([("CSP", csp), ("LDA", lda)]) scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=None) # Printing the results class_balance = np.mean(labels == labels[0]) -class_balance = max(class_balance, 1. - class_balance) -print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores), - class_balance)) +class_balance = max(class_balance, 1.0 - class_balance) +print(f"Classification accuracy: {np.mean(scores)} / Chance level: {class_balance}") -# plot CSP patterns estimated on full data for visualization +# plot eigenvalues and patterns estimated on full data for visualization csp.fit_transform(epochs_data, labels) - -csp.plot_patterns(epochs.info, ch_type='eeg', units='Patterns (AU)', size=1.5) +spf = get_spatial_filter_from_estimator(csp, info=epochs.info) +spf.plot_scree() +spf.plot_patterns(components=np.arange(4)) # %% # Look at performance over time -sfreq = raw.info['sfreq'] -w_length = int(sfreq * 0.5) # running classifier: window length +sfreq = raw.info["sfreq"] +w_length = int(sfreq * 0.5) # running classifier: window length w_step = int(sfreq * 0.1) # running classifier: window step size w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step) @@ -118,21 +124,21 @@ # running classifier: test classifier on sliding window score_this_window = [] for n in w_start: - X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)]) + X_test = csp.transform(epochs_data[test_idx][:, :, n : (n + w_length)]) score_this_window.append(lda.score(X_test, y_test)) scores_windows.append(score_this_window) # Plot scores over time -w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin +w_times = (w_start + w_length / 2.0) / sfreq + epochs.tmin plt.figure() -plt.plot(w_times, np.mean(scores_windows, 0), label='Score') -plt.axvline(0, linestyle='--', color='k', label='Onset') -plt.axhline(0.5, linestyle='-', color='k', label='Chance') -plt.xlabel('time (s)') -plt.ylabel('classification accuracy') -plt.title('Classification score over time') -plt.legend(loc='lower right') +plt.plot(w_times, np.mean(scores_windows, 0), label="Score") +plt.axvline(0, linestyle="--", color="k", label="Onset") +plt.axhline(0.5, linestyle="-", color="k", label="Chance") +plt.xlabel("time (s)") +plt.ylabel("classification accuracy") +plt.title("Classification score over time") +plt.legend(loc="lower right") plt.show() ############################################################################## diff --git a/examples/decoding/decoding_csp_timefreq.py b/examples/decoding/decoding_csp_timefreq.py index a5cb84d6f4f..9c26bf05444 100644 --- a/examples/decoding/decoding_csp_timefreq.py +++ b/examples/decoding/decoding_csp_timefreq.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-decoding-csp-eeg-timefreq: @@ -13,54 +12,54 @@ signals. """ # Authors: Laura Gwilliams -# Jean-Remi King +# Jean-Rémi King # Alex Barachant # Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - -import numpy as np import matplotlib.pyplot as plt - -from mne import Epochs, create_info, events_from_annotations -from mne.io import concatenate_raws, read_raw_edf -from mne.datasets import eegbci -from mne.decoding import CSP -from mne.time_frequency import AverageTFR - +import numpy as np from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import StratifiedKFold, cross_val_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import LabelEncoder +from mne import Epochs, create_info +from mne.datasets import eegbci +from mne.decoding import CSP +from mne.io import concatenate_raws, read_raw_edf +from mne.time_frequency import AverageTFRArray + # %% # Set parameters and read data -event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet subject = 1 runs = [6, 10, 14] raw_fnames = eegbci.load_data(subject, runs) raw = concatenate_raws([read_raw_edf(f) for f in raw_fnames]) +raw.annotations.rename(dict(T1="hands", T2="feet")) # Extract information from the raw file -sfreq = raw.info['sfreq'] -events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3)) -raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads') +sfreq = raw.info["sfreq"] +raw.pick(picks="eeg", exclude="bads") raw.load_data() # Assemble the classifier using scikit-learn pipeline -clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False), - LinearDiscriminantAnalysis()) +clf = make_pipeline( + CSP(n_components=4, reg=None, log=True, norm_trace=False), + LinearDiscriminantAnalysis(), +) n_splits = 3 # for cross-validation, 5 is better, here we use 3 for speed cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42) # Classification & time-frequency parameters -tmin, tmax = -.200, 2.000 -n_cycles = 10. # how many complete cycles: used to define window size -min_freq = 8. -max_freq = 20. +tmin, tmax = -0.200, 2.000 +n_cycles = 10.0 # how many complete cycles: used to define window size +min_freq = 8.0 +max_freq = 20.0 n_freqs = 6 # how many frequency bins to use # Assemble list of frequency range tuples @@ -68,7 +67,7 @@ freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples # Infer window spacing from the max freq and number of cycles to avoid gaps -window_spacing = (n_cycles / np.max(freqs) / 2.) +window_spacing = n_cycles / np.max(freqs) / 2.0 centered_w_times = np.arange(tmin, tmax, window_spacing)[1:] n_windows = len(centered_w_times) @@ -83,39 +82,49 @@ # Loop through each frequency range of interest for freq, (fmin, fmax) in enumerate(freq_ranges): - # Infer window size based on the frequency being used - w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds + w_size = n_cycles / ((fmax + fmin) / 2.0) # in seconds # Apply band-pass filter to isolate the specified frequencies - raw_filter = raw.copy().filter(fmin, fmax, fir_design='firwin', - skip_by_annotation='edge') + raw_filter = raw.copy().filter( + fmin, fmax, fir_design="firwin", skip_by_annotation="edge" + ) # Extract epochs from filtered data, padded by window size - epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size, - proj=False, baseline=None, preload=True) + epochs = Epochs( + raw_filter, + event_id=["hands", "feet"], + tmin=tmin - w_size, + tmax=tmax + w_size, + proj=False, + baseline=None, + preload=True, + ) epochs.drop_bad() y = le.fit_transform(epochs.events[:, 2]) - X = epochs.get_data() + X = epochs.get_data(copy=False) # Save mean scores over folds for each frequency and time window - freq_scores[freq] = np.mean(cross_val_score( - estimator=clf, X=X, y=y, scoring='roc_auc', cv=cv), axis=0) + freq_scores[freq] = np.mean( + cross_val_score(estimator=clf, X=X, y=y, scoring="roc_auc", cv=cv), axis=0 + ) # %% # Plot frequency results -plt.bar(freqs[:-1], freq_scores, width=np.diff(freqs)[0], - align='edge', edgecolor='black') +plt.bar( + freqs[:-1], freq_scores, width=np.diff(freqs)[0], align="edge", edgecolor="black" +) plt.xticks(freqs) plt.ylim([0, 1]) -plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--', - label='chance level') +plt.axhline( + len(epochs["feet"]) / len(epochs), color="k", linestyle="--", label="chance level" +) plt.legend() -plt.xlabel('Frequency (Hz)') -plt.ylabel('Decoding Scores') -plt.title('Frequency Decoding Scores') +plt.xlabel("Frequency (Hz)") +plt.ylabel("Decoding Scores") +plt.title("Frequency Decoding Scores") # %% # Loop through frequencies and time, apply classifier and save scores @@ -125,41 +134,54 @@ # Loop through each frequency range of interest for freq, (fmin, fmax) in enumerate(freq_ranges): - # Infer window size based on the frequency being used - w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds + w_size = n_cycles / ((fmax + fmin) / 2.0) # in seconds # Apply band-pass filter to isolate the specified frequencies - raw_filter = raw.copy().filter(fmin, fmax, fir_design='firwin', - skip_by_annotation='edge') + raw_filter = raw.copy().filter( + fmin, fmax, fir_design="firwin", skip_by_annotation="edge" + ) # Extract epochs from filtered data, padded by window size - epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size, - proj=False, baseline=None, preload=True) + epochs = Epochs( + raw_filter, + event_id=["hands", "feet"], + tmin=tmin - w_size, + tmax=tmax + w_size, + proj=False, + baseline=None, + preload=True, + ) epochs.drop_bad() y = le.fit_transform(epochs.events[:, 2]) # Roll covariance, csp and lda over time for t, w_time in enumerate(centered_w_times): - # Center the min and max of the window - w_tmin = w_time - w_size / 2. - w_tmax = w_time + w_size / 2. + w_tmin = w_time - w_size / 2.0 + w_tmax = w_time + w_size / 2.0 # Crop data into time-window of interest - X = epochs.copy().crop(w_tmin, w_tmax).get_data() + X = epochs.get_data(tmin=w_tmin, tmax=w_tmax, copy=False) # Save mean scores over folds for each frequency and time window - tf_scores[freq, t] = np.mean(cross_val_score( - estimator=clf, X=X, y=y, scoring='roc_auc', cv=cv), axis=0) + tf_scores[freq, t] = np.mean( + cross_val_score(estimator=clf, X=X, y=y, scoring="roc_auc", cv=cv), axis=0 + ) # %% # Plot time-frequency results # Set up time frequency object -av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :], - centered_w_times, freqs[1:], 1) +av_tfr = AverageTFRArray( + info=create_info(["freq"], sfreq), + data=tf_scores[np.newaxis, :], + times=centered_w_times, + freqs=freqs[1:], + nave=1, +) chance = np.mean(y) # set chance level to white in the plot -av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores", - cmap=plt.cm.Reds) +av_tfr.plot( + [0], vlim=(chance, None), title="Time-Frequency Decoding Scores", cmap=plt.cm.Reds +) diff --git a/examples/decoding/decoding_rsa_sgskip.py b/examples/decoding/decoding_rsa_sgskip.py index cd53cf5382e..1daaabb9619 100644 --- a/examples/decoding/decoding_rsa_sgskip.py +++ b/examples/decoding/decoding_rsa_sgskip.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-rsa-noplot: @@ -22,36 +21,36 @@ build the images below. """ -# Authors: Jean-Remi King +# Authors: Jean-Rémi King # Jaakko Leppakangas # Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import matplotlib.pyplot as plt import numpy as np from pandas import read_csv -import matplotlib.pyplot as plt - +from sklearn.linear_model import LogisticRegression +from sklearn.manifold import MDS +from sklearn.metrics import roc_auc_score from sklearn.model_selection import StratifiedKFold +from sklearn.multiclass import OneVsRestClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler -from sklearn.linear_model import LogisticRegression -from sklearn.metrics import roc_auc_score -from sklearn.manifold import MDS import mne -from mne.io import read_raw_fif, concatenate_raws from mne.datasets import visual_92_categories - +from mne.io import concatenate_raws, read_raw_fif print(__doc__) data_path = visual_92_categories.data_path() # Define stimulus - trigger mapping -fname = data_path / 'visual_stimuli.csv' +fname = data_path / "visual_stimuli.csv" conds = read_csv(fname) print(conds.head(5)) @@ -65,38 +64,48 @@ conditions = [] for c in conds.values: cond_tags = list(c[:2]) - cond_tags += [('not-' if i == 0 else '') + conds.columns[k] - for k, i in enumerate(c[2:], 2)] - conditions.append('/'.join(map(str, cond_tags))) + cond_tags += [ + ("not-" if i == 0 else "") + conds.columns[k] for k, i in enumerate(c[2:], 2) + ] + conditions.append("/".join(map(str, cond_tags))) print(conditions[:10]) ############################################################################## # Let's make the event_id dictionary event_id = dict(zip(conditions, conds.trigger + 1)) -event_id['0/human bodypart/human/not-face/animal/natural'] +event_id["0/human bodypart/human/not-face/animal/natural"] ############################################################################## # Read MEG data n_runs = 4 # 4 for full data (use less to speed up computations) -fnames = [data_path / f'sample_subject_{b}_tsss_mc.fif' for b in range(n_runs)] -raws = [read_raw_fif(fname, verbose='error', on_split_missing='ignore') - for fname in fnames] # ignore filename warnings +fnames = [data_path / f"sample_subject_{b}_tsss_mc.fif" for b in range(n_runs)] +raws = [ + read_raw_fif(fname, verbose="error", on_split_missing="ignore") for fname in fnames +] # ignore filename warnings raw = concatenate_raws(raws) -events = mne.find_events(raw, min_duration=.002) +events = mne.find_events(raw, min_duration=0.002) events = events[events[:, 2] <= max_trigger] ############################################################################## # Epoch data picks = mne.pick_types(raw.info, meg=True) -epochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None, - picks=picks, tmin=-.1, tmax=.500, preload=True) +epochs = mne.Epochs( + raw, + events=events, + event_id=event_id, + baseline=None, + picks=picks, + tmin=-0.1, + tmax=0.500, + preload=True, +) ############################################################################## # Let's plot some conditions -epochs['face'].average().plot() -epochs['not-face'].average().plot() +epochs["face"].average().plot() +epochs["not-face"].average().plot() ############################################################################## # Representational Similarity Analysis (RSA) is a neuroimaging-specific @@ -113,9 +122,10 @@ # Classify using the average signal in the window 50ms to 300ms # to focus the classifier on the time interval with best SNR. -clf = make_pipeline(StandardScaler(), - LogisticRegression(C=1, solver='liblinear', - multi_class='auto')) +clf = make_pipeline( + StandardScaler(), + OneVsRestClassifier(LogisticRegression(C=1)), +) X = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2) y = epochs.events[:, 2] @@ -140,38 +150,42 @@ ############################################################################## # Plot -labels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6 -fig, ax = plt.subplots(1) -im = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7]) +labels = [""] * 5 + ["face"] + [""] * 11 + ["bodypart"] + [""] * 6 +fig, ax = plt.subplots(1, layout="constrained") +im = ax.matshow(confusion, cmap="RdBu_r", clim=[0.3, 0.7]) ax.set_yticks(range(len(classes))) ax.set_yticklabels(labels) ax.set_xticks(range(len(classes))) -ax.set_xticklabels(labels, rotation=40, ha='left') -ax.axhline(11.5, color='k') -ax.axvline(11.5, color='k') +ax.set_xticklabels(labels, rotation=40, ha="left") +ax.axhline(11.5, color="k") +ax.axvline(11.5, color="k") plt.colorbar(im) -plt.tight_layout() plt.show() ############################################################################## # Confusion matrix related to mental representations have been historically # summarized with dimensionality reduction using multi-dimensional scaling [1]. # See how the face samples cluster together. -fig, ax = plt.subplots(1) -mds = MDS(2, random_state=0, dissimilarity='precomputed') +fig, ax = plt.subplots(1, layout="constrained") +mds = MDS(2, random_state=0, dissimilarity="precomputed") chance = 0.5 summary = mds.fit_transform(chance - confusion) -cmap = plt.colormaps['rainbow'] -colors = ['r', 'b'] -names = list(conds['condition'].values) +cmap = plt.colormaps["rainbow"] +colors = ["r", "b"] +names = list(conds["condition"].values) for color, name in zip(colors, set(names)): sel = np.where([this_name == name for this_name in names])[0] - size = 500 if name == 'human face' else 100 - ax.scatter(summary[sel, 0], summary[sel, 1], s=size, - facecolors=color, label=name, edgecolors='k') -ax.axis('off') -ax.legend(loc='lower right', scatterpoints=1, ncol=2) -plt.tight_layout() + size = 500 if name == "human face" else 100 + ax.scatter( + summary[sel, 0], + summary[sel, 1], + s=size, + facecolors=color, + label=name, + edgecolors="k", + ) +ax.axis("off") +ax.legend(loc="lower right", scatterpoints=1, ncol=2) plt.show() ############################################################################## diff --git a/examples/decoding/decoding_spatio_temporal_source.py b/examples/decoding/decoding_spatio_temporal_source.py index 01187ea1e47..f724ea97b3b 100644 --- a/examples/decoding/decoding_spatio_temporal_source.py +++ b/examples/decoding/decoding_spatio_temporal_source.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-dec-st-source: @@ -15,59 +14,68 @@ # Author: Denis A. Engemann # Alexandre Gramfort -# Jean-Remi King +# Jean-Rémi King # Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt - -from sklearn.pipeline import make_pipeline -from sklearn.preprocessing import StandardScaler +import numpy as np from sklearn.feature_selection import SelectKBest, f_classif from sklearn.linear_model import LogisticRegression +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler import mne +from mne.decoding import LinearModel, SlidingEstimator, cross_val_multiscore, get_coef from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator -from mne.decoding import (cross_val_multiscore, LinearModel, SlidingEstimator, - get_coef) print(__doc__) data_path = mne.datasets.sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname_fwd = meg_path / 'sample_audvis-meg-oct-6-fwd.fif' -fname_evoked = meg_path / 'sample_audvis-ave.fif' -subjects_dir = data_path / 'subjects' +meg_path = data_path / "MEG" / "sample" +fname_fwd = meg_path / "sample_audvis-meg-oct-6-fwd.fif" +fname_evoked = meg_path / "sample_audvis-ave.fif" +subjects_dir = data_path / "subjects" # %% # Set parameters -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' -fname_cov = meg_path / 'sample_audvis-cov.fif' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" +fname_cov = meg_path / "sample_audvis-cov.fif" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" tmin, tmax = -0.2, 0.8 event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname, preload=True) -raw.filter(None, 10., fir_design='firwin') +raw.filter(None, 10.0, fir_design="firwin") events = mne.read_events(event_fname) # Set up pick list: MEG - bad channels (modify to your needs) -raw.info['bads'] += ['MEG 2443'] # mark bads -picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True, - exclude='bads') +raw.info["bads"] += ["MEG 2443"] # mark bads +picks = mne.pick_types( + raw.info, meg=True, eeg=False, stim=True, eog=True, exclude="bads" +) # Read epochs -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, - picks=picks, baseline=(None, 0), preload=True, - reject=dict(grad=4000e-13, eog=150e-6), - decim=5) # decimate to save memory and increase speed +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + proj=True, + picks=picks, + baseline=(None, 0), + preload=True, + reject=dict(grad=4000e-13, eog=150e-6), + decim=5, +) # decimate to save memory and increase speed # %% # Compute inverse solution @@ -75,9 +83,14 @@ noise_cov = mne.read_cov(fname_cov) inverse_operator = read_inverse_operator(fname_inv) -stcs = apply_inverse_epochs(epochs, inverse_operator, - lambda2=1.0 / snr ** 2, verbose=False, - method="dSPM", pick_ori="normal") +stcs = apply_inverse_epochs( + epochs, + inverse_operator, + lambda2=1.0 / snr**2, + verbose=False, + method="dSPM", + pick_ori="normal", +) # %% # Decoding in sensor space using a logistic regression @@ -87,19 +100,21 @@ y = epochs.events[:, 2] # prepare a series of classifier applied at each time sample -clf = make_pipeline(StandardScaler(), # z-score normalization - SelectKBest(f_classif, k=500), # select features for speed - LinearModel(LogisticRegression(C=1, solver='liblinear'))) -time_decod = SlidingEstimator(clf, scoring='roc_auc') +clf = make_pipeline( + StandardScaler(), # z-score normalization + SelectKBest(f_classif, k=500), # select features for speed + LinearModel(LogisticRegression(C=1, solver="liblinear")), +) +time_decod = SlidingEstimator(clf, scoring="roc_auc") # Run cross-validated decoding analyses: scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=None) # Plot average decoding scores of 5 splits fig, ax = plt.subplots(1) -ax.plot(epochs.times, scores.mean(0), label='score') -ax.axhline(.5, color='k', linestyle='--', label='chance') -ax.axvline(0, color='k') +ax.plot(epochs.times, scores.mean(0), label="score") +ax.axhline(0.5, color="k", linestyle="--", label="chance") +ax.axvline(0, color="k") plt.legend() # %% @@ -110,13 +125,22 @@ time_decod.fit(X, y) # Retrieve patterns after inversing the z-score normalization step: -patterns = get_coef(time_decod, 'patterns_', inverse_transform=True) +patterns = get_coef(time_decod, "patterns_", inverse_transform=True) stc = stcs[0] # for convenience, lookup parameters from first stc vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi -stc_feat = mne.SourceEstimate(np.abs(patterns), vertices=vertices, - tmin=stc.tmin, tstep=stc.tstep, subject='sample') - -brain = stc_feat.plot(views=['lat'], transparent=True, - initial_time=0.1, time_unit='s', - subjects_dir=subjects_dir) +stc_feat = mne.SourceEstimate( + np.abs(patterns), + vertices=vertices, + tmin=stc.tmin, + tstep=stc.tstep, + subject="sample", +) + +brain = stc_feat.plot( + views=["lat"], + transparent=True, + initial_time=0.1, + time_unit="s", + subjects_dir=subjects_dir, +) diff --git a/examples/decoding/decoding_spoc_CMC.py b/examples/decoding/decoding_spoc_CMC.py index 78a6918f22a..3accd5b2cd6 100644 --- a/examples/decoding/decoding_spoc_CMC.py +++ b/examples/decoding/decoding_spoc_CMC.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-spoc-cmc: @@ -19,75 +18,82 @@ """ # Author: Alexandre Barachant -# Jean-Remi King +# Jean-Rémi King # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import matplotlib.pyplot as plt +from sklearn.linear_model import Ridge +from sklearn.model_selection import KFold, cross_val_predict +from sklearn.pipeline import make_pipeline import mne from mne import Epochs -from mne.decoding import SPoC from mne.datasets.fieldtrip_cmc import data_path - -from sklearn.pipeline import make_pipeline -from sklearn.linear_model import Ridge -from sklearn.model_selection import KFold, cross_val_predict +from mne.decoding import SPoC, get_spatial_filter_from_estimator # Define parameters -fname = data_path() / 'SubjectCMC.ds' +fname = data_path() / "SubjectCMC.ds" raw = mne.io.read_raw_ctf(fname) -raw.crop(50., 200.) # crop for memory purposes +raw.crop(50.0, 200.0) # crop for memory purposes # Filter muscular activity to only keep high frequencies -emg = raw.copy().pick_channels(['EMGlft']).load_data() -emg.filter(20., None) +emg = raw.copy().pick(["EMGlft"]).load_data() +emg.filter(20.0, None) # Filter MEG data to focus on beta band -raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False).load_data() -raw.filter(15., 30.) +raw.pick(picks=["meg", "ref_meg"]).load_data() +raw.filter(15.0, 30.0) # Build epochs as sliding windows over the continuous raw file events = mne.make_fixed_length_events(raw, id=1, duration=0.75) # Epoch length is 1.5 second -meg_epochs = Epochs(raw, events, tmin=0., tmax=1.5, baseline=None, - detrend=1, decim=12) -emg_epochs = Epochs(emg, events, tmin=0., tmax=1.5, baseline=None) +meg_epochs = Epochs(raw, events, tmin=0.0, tmax=1.5, baseline=None, detrend=1, decim=12) +emg_epochs = Epochs(emg, events, tmin=0.0, tmax=1.5, baseline=None) # Prepare classification X = meg_epochs.get_data() y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power # Classification pipeline with SPoC spatial filtering and Ridge Regression -spoc = SPoC(n_components=2, log=True, reg='oas', rank='full') +spoc = SPoC(n_components=2, log=True, reg="oas", rank="full") clf = make_pipeline(spoc, Ridge()) # Define a two fold cross-validation cv = KFold(n_splits=2, shuffle=False) -# Run cross validaton +# Run cross validation y_preds = cross_val_predict(clf, X, y, cv=cv) # Plot the True EMG power and the EMG power predicted from MEG data -fig, ax = plt.subplots(1, 1, figsize=[10, 4]) +fig, ax = plt.subplots(1, 1, figsize=[10, 4], layout="constrained") times = raw.times[meg_epochs.events[:, 0] - raw.first_samp] -ax.plot(times, y_preds, color='b', label='Predicted EMG') -ax.plot(times, y, color='r', label='True EMG') -ax.set_xlabel('Time (s)') -ax.set_ylabel('EMG Power') -ax.set_title('SPoC MEG Predictions') +ax.plot(times, y_preds, color="b", label="Predicted EMG") +ax.plot(times, y, color="r", label="True EMG") +ax.set_xlabel("Time (s)") +ax.set_ylabel("EMG Power") +ax.set_title("SPoC MEG Predictions") plt.legend() -mne.viz.tight_layout() plt.show() ############################################################################## # Plot the contributions to the detected components (i.e., the forward model) spoc.fit(X, y) -spoc.plot_patterns(meg_epochs.info) +spf = get_spatial_filter_from_estimator(spoc, info=meg_epochs.info) +spf.plot_scree() + +# Plot patterns for the first three components +# with largest absolute generalized eigenvalues, +# as we can see on the scree plot +spf.plot_patterns(components=[0, 1, 2]) + ############################################################################## # References # ---------- # .. footbibliography:: + +# %% diff --git a/examples/decoding/decoding_time_generalization_conditions.py b/examples/decoding/decoding_time_generalization_conditions.py index 01f6d1da00d..e71112e8375 100644 --- a/examples/decoding/decoding_time_generalization_conditions.py +++ b/examples/decoding/decoding_time_generalization_conditions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-linear-sensor-decoding: @@ -12,19 +11,19 @@ instant and subsequently assess whether this linear model can accurately predict all of the time samples of a second set of conditions. """ -# Authors: Jean-Remi King +# Authors: Jean-Rémi King # Alexandre Gramfort # Denis Engemann # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import matplotlib.pyplot as plt - +from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler -from sklearn.linear_model import LogisticRegression import mne from mne.datasets import sample @@ -35,56 +34,78 @@ # Preprocess data data_path = sample.data_path() # Load and filter data, set up epochs -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -events_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +events_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" raw = mne.io.read_raw_fif(raw_fname, preload=True) -picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels -raw.filter(1., 30., fir_design='firwin') # Band pass filtering signals +picks = mne.pick_types(raw.info, meg=True, exclude="bads") # Pick MEG channels +raw.filter(1.0, 30.0, fir_design="firwin") # Band pass filtering signals events = mne.read_events(events_fname) -event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, - 'Visual/Left': 3, 'Visual/Right': 4} +event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, +} tmin = -0.050 tmax = 0.400 # decimate to make the example faster to run, but then use verbose='error' in # the Epochs constructor to suppress warning about decimation causing aliasing decim = 2 -epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax, - proj=True, picks=picks, baseline=None, preload=True, - reject=dict(mag=5e-12), decim=decim, verbose='error') +epochs = mne.Epochs( + raw, + events, + event_id=event_id, + tmin=tmin, + tmax=tmax, + proj=True, + picks=picks, + baseline=None, + preload=True, + reject=dict(mag=5e-12), + decim=decim, + verbose="error", +) # %% # We will train the classifier on all left visual vs auditory trials # and test on all right visual vs auditory trials. clf = make_pipeline( StandardScaler(), - LogisticRegression(solver='liblinear') # liblinear is faster than lbfgs + LogisticRegression(solver="liblinear"), # liblinear is faster than lbfgs ) -time_gen = GeneralizingEstimator(clf, scoring='roc_auc', n_jobs=None, - verbose=True) +time_gen = GeneralizingEstimator(clf, scoring="roc_auc", n_jobs=None, verbose=True) # Fit classifiers on the epochs where the stimulus was presented to the left. # Note that the experimental condition y indicates auditory or visual -time_gen.fit(X=epochs['Left'].get_data(), - y=epochs['Left'].events[:, 2] > 2) +time_gen.fit(X=epochs["Left"].get_data(copy=False), y=epochs["Left"].events[:, 2] > 2) # %% # Score on the epochs where the stimulus was presented to the right. -scores = time_gen.score(X=epochs['Right'].get_data(), - y=epochs['Right'].events[:, 2] > 2) +scores = time_gen.score( + X=epochs["Right"].get_data(copy=False), y=epochs["Right"].events[:, 2] > 2 +) # %% # Plot -fig, ax = plt.subplots(constrained_layout=True) -im = ax.matshow(scores, vmin=0, vmax=1., cmap='RdBu_r', origin='lower', - extent=epochs.times[[0, -1, 0, -1]]) -ax.axhline(0., color='k') -ax.axvline(0., color='k') -ax.xaxis.set_ticks_position('bottom') -ax.set_xlabel('Condition: "Right"\nTesting Time (s)',) +fig, ax = plt.subplots(layout="constrained") +im = ax.matshow( + scores, + vmin=0, + vmax=1.0, + cmap="RdBu_r", + origin="lower", + extent=epochs.times[[0, -1, 0, -1]], +) +ax.axhline(0.0, color="k") +ax.axvline(0.0, color="k") +ax.xaxis.set_ticks_position("bottom") +ax.set_xlabel( + 'Condition: "Right"\nTesting Time (s)', +) ax.set_ylabel('Condition: "Left"\nTraining Time (s)') -ax.set_title('Generalization across time and condition', fontweight='bold') -fig.colorbar(im, ax=ax, label='Performance (ROC AUC)') +ax.set_title("Generalization across time and condition", fontweight="bold") +fig.colorbar(im, ax=ax, label="Performance (ROC AUC)") plt.show() ############################################################################## diff --git a/examples/decoding/decoding_unsupervised_spatial_filter.py b/examples/decoding/decoding_unsupervised_spatial_filter.py index a7514625842..2fb1a8fec46 100644 --- a/examples/decoding/decoding_unsupervised_spatial_filter.py +++ b/examples/decoding/decoding_unsupervised_spatial_filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-ica-pca-decoding: @@ -11,64 +10,77 @@ the channel data to new sources / virtual channels. The output is visualized on the average of all the epochs. """ -# Authors: Jean-Remi King +# Authors: Jean-Rémi King # Asish Panda # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np +from sklearn.decomposition import PCA, FastICA import mne from mne.datasets import sample from mne.decoding import UnsupervisedSpatialFilter -from sklearn.decomposition import PCA, FastICA - print(__doc__) # Preprocess data data_path = sample.data_path() # Load and filter data, set up epochs -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" tmin, tmax = -0.1, 0.3 event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4) raw = mne.io.read_raw_fif(raw_fname, preload=True) -raw.filter(1, 20, fir_design='firwin') +raw.filter(1, 20, fir_design="firwin") events = mne.read_events(event_fname) -picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, - exclude='bads') - -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False, - picks=picks, baseline=None, preload=True, - verbose=False) - -X = epochs.get_data() +picks = mne.pick_types( + raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads" +) + +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + proj=False, + picks=picks, + baseline=None, + preload=True, + verbose=False, +) + +X = epochs.get_data(copy=False) ############################################################################## # Transform data with PCA computed on the average ie evoked response pca = UnsupervisedSpatialFilter(PCA(30), average=False) pca_data = pca.fit_transform(X) -ev = mne.EvokedArray(np.mean(pca_data, axis=0), - mne.create_info(30, epochs.info['sfreq'], - ch_types='eeg'), tmin=tmin) -ev.plot(show=False, window_title="PCA", time_unit='s') +ev = mne.EvokedArray( + np.mean(pca_data, axis=0), + mne.create_info(30, epochs.info["sfreq"], ch_types="eeg"), + tmin=tmin, +) +ev.plot(show=False, window_title="PCA", time_unit="s") ############################################################################## # Transform data with ICA computed on the raw epochs (no averaging) -ica = UnsupervisedSpatialFilter( - FastICA(30, whiten='unit-variance'), average=False) +ica = UnsupervisedSpatialFilter(FastICA(30, whiten="unit-variance"), average=False) ica_data = ica.fit_transform(X) -ev1 = mne.EvokedArray(np.mean(ica_data, axis=0), - mne.create_info(30, epochs.info['sfreq'], - ch_types='eeg'), tmin=tmin) -ev1.plot(show=False, window_title='ICA', time_unit='s') +ev1 = mne.EvokedArray( + np.mean(ica_data, axis=0), + mne.create_info(30, epochs.info["sfreq"], ch_types="eeg"), + tmin=tmin, +) +ev1.plot(show=False, window_title="ICA", time_unit="s") plt.show() diff --git a/examples/decoding/decoding_xdawn_eeg.py b/examples/decoding/decoding_xdawn_eeg.py index 484082b6085..1d1bf3f8760 100644 --- a/examples/decoding/decoding_xdawn_eeg.py +++ b/examples/decoding/decoding_xdawn_eeg.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-xdawn-decoding: @@ -11,26 +10,26 @@ Channels are concatenated and rescaled to create features vectors that will be fed into a logistic regression. """ + # Authors: Alexandre Barachant # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt - -from sklearn.model_selection import StratifiedKFold -from sklearn.pipeline import make_pipeline +import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, confusion_matrix +from sklearn.model_selection import StratifiedKFold +from sklearn.multiclass import OneVsRestClassifier +from sklearn.pipeline import make_pipeline from sklearn.preprocessing import MinMaxScaler -from mne import io, pick_types, read_events, Epochs, EvokedArray, create_info +from mne import Epochs, io, pick_types, read_events from mne.datasets import sample -from mne.preprocessing import Xdawn -from mne.decoding import Vectorizer - +from mne.decoding import Vectorizer, XdawnTransformer, get_spatial_filter_from_estimator print(__doc__) @@ -38,83 +37,109 @@ # %% # Set parameters and read data -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" tmin, tmax = -0.1, 0.3 -event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, - 'Visual/Left': 3, 'Visual/Right': 4} +event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, +} n_filter = 3 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) -raw.filter(1, 20, fir_design='firwin') +raw.filter(1, 20, fir_design="firwin") events = read_events(event_fname) -picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, - exclude='bads') - -epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False, - picks=picks, baseline=None, preload=True, - verbose=False) +picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads") + +epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + proj=False, + picks=picks, + baseline=None, + preload=True, + verbose=False, +) # Create classification pipeline -clf = make_pipeline(Xdawn(n_components=n_filter), - Vectorizer(), - MinMaxScaler(), - LogisticRegression(penalty='l1', solver='liblinear', - multi_class='auto')) - -# Get the labels -labels = epochs.events[:, -1] +clf = make_pipeline( + XdawnTransformer(n_components=n_filter), + Vectorizer(), + MinMaxScaler(), + OneVsRestClassifier(LogisticRegression(penalty="l1", solver="liblinear")), +) + +# Get the data and labels +# X is of shape (n_epochs, n_channels, n_times) +X = epochs.get_data(copy=False) +y = epochs.events[:, -1] # Cross validator cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42) # Do cross-validation -preds = np.empty(len(labels)) -for train, test in cv.split(epochs, labels): - clf.fit(epochs[train], labels[train]) - preds[test] = clf.predict(epochs[test]) +preds = np.empty(len(y)) +for train, test in cv.split(epochs, y): + clf.fit(X[train], y[train]) + preds[test] = clf.predict(X[test]) # Classification report -target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r'] -report = classification_report(labels, preds, target_names=target_names) +target_names = ["aud_l", "aud_r", "vis_l", "vis_r"] +report = classification_report(y, preds, target_names=target_names) print(report) # Normalized confusion matrix -cm = confusion_matrix(labels, preds) +cm = confusion_matrix(y, preds) cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis] # Plot confusion matrix -fig, ax = plt.subplots(1) -im = ax.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues) -ax.set(title='Normalized Confusion matrix') +fig, ax = plt.subplots(1, layout="constrained") +im = ax.imshow(cm_normalized, interpolation="nearest", cmap=plt.cm.Blues) +ax.set(title="Normalized Confusion matrix") fig.colorbar(im) tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) -fig.tight_layout() -ax.set(ylabel='True label', xlabel='Predicted label') +ax.set(ylabel="True label", xlabel="Predicted label") # %% -# The ``patterns_`` attribute of a fitted Xdawn instance (here from the last -# cross-validation fold) can be used for visualization. - -fig, axes = plt.subplots(nrows=len(event_id), ncols=n_filter, - figsize=(n_filter, len(event_id) * 2)) -fitted_xdawn = clf.steps[0][1] -info = create_info(epochs.ch_names, 1, epochs.get_channel_types()) -info.set_montage(epochs.get_montage()) -for ii, cur_class in enumerate(sorted(event_id)): - cur_patterns = fitted_xdawn.patterns_[cur_class] - pattern_evoked = EvokedArray(cur_patterns[:n_filter].T, info, tmin=0) - pattern_evoked.plot_topomap( - times=np.arange(n_filter), - time_format='Component %d' if ii == 0 else '', colorbar=False, - show_names=False, axes=axes[ii], show=False) - axes[ii, 0].set(ylabel=cur_class) -fig.tight_layout(h_pad=1.0, w_pad=1.0, pad=0.1) +# Patterns of a fitted XdawnTransformer instance (here from the last +# cross-validation fold) can be visualized using SpatialFilter container. + +# Instantiate SpatialFilter +spf = get_spatial_filter_from_estimator( + clf, info=epochs.info, step_name="xdawntransformer" +) + +# Let's first examine the scree plot of generalized eigenvalues +# for each class. +spf.plot_scree(title="") + +# We can see that for all four classes ~five largest components +# capture most of the variance, let's plot their patterns. +# Each class will now return its own figure +components_to_plot = np.arange(5) +figs = spf.plot_patterns( + # Indices of patterns to plot, + # we will plot the first three for each class + components=components_to_plot, + show=False, # to set the titles below +) + +# Set the class titles +event_id_reversed = {v: k for k, v in event_id.items()} +for fig, class_idx in zip(figs, clf[0].classes_): + class_name = event_id_reversed[class_idx] + fig.suptitle(class_name, fontsize=16) + # %% # References diff --git a/examples/decoding/ems_filtering.py b/examples/decoding/ems_filtering.py index 8f40837b9d4..d1e04e2e096 100644 --- a/examples/decoding/ems_filtering.py +++ b/examples/decoding/ems_filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-ems-filtering: @@ -20,47 +19,53 @@ evolution of the spatial filters. """ # Author: Denis Engemann -# Jean-Remi King +# Jean-Rémi King # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np +from sklearn.model_selection import StratifiedKFold import mne -from mne import io, EvokedArray +from mne import EvokedArray, io from mne.datasets import sample from mne.decoding import EMS, compute_ems -from sklearn.model_selection import StratifiedKFold print(__doc__) data_path = sample.data_path() # Preprocess the data -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' -event_ids = {'AudL': 1, 'VisL': 3} +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" +event_ids = {"AudL": 1, "VisL": 3} # Read data and create epochs raw = io.read_raw_fif(raw_fname, preload=True) -raw.filter(0.5, 45, fir_design='firwin') +raw.filter(0.5, 45, fir_design="firwin") events = mne.read_events(event_fname) -picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, - exclude='bads') +raw.pick(["grad", "eog"], exclude="bads") -epochs = mne.Epochs(raw, events, event_ids, tmin=-0.2, tmax=0.5, picks=picks, - baseline=None, reject=dict(grad=4000e-13, eog=150e-6), - preload=True) -epochs.drop_bad() -epochs.pick_types(meg='grad') +epochs = mne.Epochs( + raw, + events, + event_ids, + tmin=-0.2, + tmax=0.5, + baseline=None, + reject=dict(grad=4000e-13, eog=150e-6), + preload=True, +) +epochs.pick("grad") # Setup the data to use it a scikit-learn way: -X = epochs.get_data() # The MEG data +X = epochs.get_data(copy=False) # The MEG data y = epochs.events[:, 2] # The conditions indices n_epochs, n_channels, n_times = X.shape @@ -99,23 +104,27 @@ # Plot individual trials plt.figure() -plt.title('single trial surrogates') -plt.imshow(X_transform[y.argsort()], origin='lower', aspect='auto', - extent=[epochs.times[0], epochs.times[-1], 1, len(X_transform)], - cmap='RdBu_r') -plt.xlabel('Time (ms)') -plt.ylabel('Trials (reordered by condition)') +plt.title("single trial surrogates") +plt.imshow( + X_transform[y.argsort()], + origin="lower", + aspect="auto", + extent=[epochs.times[0], epochs.times[-1], 1, len(X_transform)], + cmap="RdBu_r", +) +plt.xlabel("Time (ms)") +plt.ylabel("Trials (reordered by condition)") # Plot average response plt.figure() -plt.title('Average EMS signal') +plt.title("Average EMS signal") mappings = [(key, value) for key, value in event_ids.items()] for key, value in mappings: ems_ave = X_transform[y == value] plt.plot(epochs.times, ems_ave.mean(0), label=key) -plt.xlabel('Time (ms)') -plt.ylabel('a.u.') -plt.legend(loc='best') +plt.xlabel("Time (ms)") +plt.ylabel("a.u.") +plt.legend(loc="best") plt.show() # Visualize spatial filters across time diff --git a/examples/decoding/linear_model_patterns.py b/examples/decoding/linear_model_patterns.py index 3bf30e11161..48d679ed1fd 100644 --- a/examples/decoding/linear_model_patterns.py +++ b/examples/decoding/linear_model_patterns.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-linear-patterns: @@ -15,59 +14,66 @@ Note patterns/filters in MEG data are more similar than EEG data because the noise is less spatially correlated in MEG than EEG. """ + # Authors: Alexandre Gramfort # Romain Trachel -# Jean-Remi King +# Jean-Rémi King # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import mne -from mne import io, EvokedArray -from mne.datasets import sample -from mne.decoding import Vectorizer, get_coef - -from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler + +import mne +from mne import io +from mne.datasets import sample # import a linear classifier from mne.decoding -from mne.decoding import LinearModel +from mne.decoding import ( + LinearModel, + SpatialFilter, + Vectorizer, + get_spatial_filter_from_estimator, +) print(__doc__) data_path = sample.data_path() -sample_path = data_path / 'MEG' / 'sample' +sample_path = data_path / "MEG" / "sample" # %% # Set parameters -raw_fname = sample_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = sample_path / 'sample_audvis_filt-0-40_raw-eve.fif' +raw_fname = sample_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = sample_path / "sample_audvis_filt-0-40_raw-eve.fif" tmin, tmax = -0.1, 0.4 event_id = dict(aud_l=1, vis_l=3) # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) -raw.filter(.5, 25, fir_design='firwin') +raw.filter(0.5, 25, fir_design="firwin") events = mne.read_events(event_fname) # Read epochs -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, - decim=2, baseline=None, preload=True) +epochs = mne.Epochs( + raw, events, event_id, tmin, tmax, proj=True, decim=2, baseline=None, preload=True +) del raw labels = epochs.events[:, -1] -# get MEG and EEG data -meg_epochs = epochs.copy().pick_types(meg=True, eeg=False) -meg_data = meg_epochs.get_data().reshape(len(labels), -1) +# get MEG data +meg_epochs = epochs.copy().pick(picks="meg", exclude="bads") +meg_data = meg_epochs.get_data(copy=False).reshape(len(labels), -1) # %% # Decoding in sensor space using a LogisticRegression classifier # -------------------------------------------------------------- -clf = LogisticRegression(solver='liblinear') # liblinear is faster than lbfgs +clf = LogisticRegression(solver="liblinear") # liblinear is faster than lbfgs scaler = StandardScaler() # create a linear model with LogisticRegression @@ -77,45 +83,74 @@ X = scaler.fit_transform(meg_data) model.fit(X, labels) -# Extract and plot spatial filters and spatial patterns -for name, coef in (('patterns', model.patterns_), ('filters', model.filters_)): - # We fitted the linear model onto Z-scored data. To make the filters +coefs = dict() +for name, coef in (("patterns", model.patterns_), ("filters", model.filters_)): + # We fit the linear model on Z-scored data. To make the filters # interpretable, we must reverse this normalization step coef = scaler.inverse_transform([coef])[0] # The data was vectorized to fit a single model across all time points and # all channels. We thus reshape it: - coef = coef.reshape(len(meg_epochs.ch_names), -1) - - # Plot - evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin) - fig = evoked.plot_topomap() - fig.suptitle(f'MEG {name}') + coefs[name] = coef.reshape(len(meg_epochs.ch_names), -1).T + +# Now we can instantiate the visualization container +spf = SpatialFilter(info=meg_epochs.info, **coefs) +fig = spf.plot_patterns( + # we will automatically select patterns + components="auto", + # as our filters and patterns correspond to actual times + # we can align them + tmin=epochs.tmin, + units="fT", # it's physical - we inversed the scaling + show=False, # to set the title below + name_format=None, # to plot actual times +) +fig.suptitle("MEG patterns") +# Same for filters +fig = spf.plot_filters( + components="auto", + tmin=epochs.tmin, + units="fT", + show=False, + name_format=None, +) +fig.suptitle("MEG filters") # %% # Let's do the same on EEG data using a scikit-learn pipeline -X = epochs.pick_types(meg=False, eeg=True) +X = epochs.pick(picks="eeg", exclude="bads") y = epochs.events[:, 2] # Define a unique pipeline to sequentially: clf = make_pipeline( - Vectorizer(), # 1) vectorize across time and channels - StandardScaler(), # 2) normalize features across trials - LinearModel( # 3) fits a logistic regression - LogisticRegression(solver='liblinear') - ) + Vectorizer(), # 1) vectorize across time and channels + StandardScaler(), # 2) normalize features across trials + LinearModel( # 3) fits a logistic regression + LogisticRegression(solver="liblinear") + ), ) clf.fit(X, y) - -# Extract and plot patterns and filters -for name in ('patterns_', 'filters_'): - # The `inverse_transform` parameter will call this method on any estimator - # contained in the pipeline, in reverse order. - coef = get_coef(clf, name, inverse_transform=True) - evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin) - fig = evoked.plot_topomap() - fig.suptitle(f'EEG {name[:-1]}') +spf = get_spatial_filter_from_estimator( + clf, info=epochs.info, inverse_transform=True, step_name="linearmodel" +) +fig = spf.plot_patterns( + components="auto", + tmin=epochs.tmin, + units="uV", + show=False, + name_format=None, +) +fig.suptitle("EEG patterns") +# Same for filters +fig = spf.plot_filters( + components="auto", + tmin=epochs.tmin, + units="uV", + show=False, + name_format=None, +) +fig.suptitle("EEG filters") # %% # References diff --git a/examples/decoding/receptive_field_mtrf.py b/examples/decoding/receptive_field_mtrf.py index d4aa6a9b4df..89a97956559 100644 --- a/examples/decoding/receptive_field_mtrf.py +++ b/examples/decoding/receptive_field_mtrf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-receptive-field-mtrf: @@ -18,27 +17,25 @@ .. _figure 1: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F1 .. _figure 2: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F2 .. _figure 5: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F5 -""" # noqa: E501 +""" # Authors: Chris Holdgraf # Eric Larson # Nicolas Barascud # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -# %% -# sphinx_gallery_thumbnail_number = 3 +from os.path import join -import numpy as np import matplotlib.pyplot as plt +import numpy as np from scipy.io import loadmat -from os.path import join - -import mne -from mne.decoding import ReceptiveField from sklearn.model_selection import KFold from sklearn.preprocessing import scale +import mne +from mne.decoding import ReceptiveField # %% # Load the data from the publication @@ -53,27 +50,26 @@ path = mne.datasets.mtrf.data_path() decim = 2 -data = loadmat(join(path, 'speech_data.mat')) -raw = data['EEG'].T -speech = data['envelope'].T -sfreq = float(data['Fs']) +data = loadmat(join(path, "speech_data.mat")) +raw = data["EEG"].T +speech = data["envelope"].T +sfreq = float(data["Fs"].item()) sfreq /= decim -speech = mne.filter.resample(speech, down=decim, npad='auto') -raw = mne.filter.resample(raw, down=decim, npad='auto') +speech = mne.filter.resample(speech, down=decim, method="polyphase") +raw = mne.filter.resample(raw, down=decim, method="polyphase") # Read in channel positions and create our MNE objects from the raw data -montage = mne.channels.make_standard_montage('biosemi128') -info = mne.create_info(montage.ch_names, sfreq, 'eeg').set_montage(montage) +montage = mne.channels.make_standard_montage("biosemi128") +info = mne.create_info(montage.ch_names, sfreq, "eeg").set_montage(montage) raw = mne.io.RawArray(raw, info) n_channels = len(raw.ch_names) # Plot a sample of brain and stimulus activity -fig, ax = plt.subplots() -lns = ax.plot(scale(raw[:, :800][0].T), color='k', alpha=.1) -ln1 = ax.plot(scale(speech[0, :800]), color='r', lw=2) -ax.legend([lns[0], ln1[0]], ['EEG', 'Speech Envelope'], frameon=False) +fig, ax = plt.subplots(layout="constrained") +lns = ax.plot(scale(raw[:, :800][0].T), color="k", alpha=0.1) +ln1 = ax.plot(scale(speech[0, :800]), color="r", lw=2) +ax.legend([lns[0], ln1[0]], ["EEG", "Speech Envelope"], frameon=False) ax.set(title="Sample activity", xlabel="Time (s)") -mne.viz.tight_layout() # %% # Create and fit a receptive field model @@ -84,11 +80,12 @@ # us to make predictions about the response to new stimuli. # Define the delays that we will use in the receptive field -tmin, tmax = -.2, .4 +tmin, tmax = -0.2, 0.4 # Initialize the model -rf = ReceptiveField(tmin, tmax, sfreq, feature_names=['envelope'], - estimator=1., scoring='corrcoef') +rf = ReceptiveField( + tmin, tmax, sfreq, feature_names=["envelope"], estimator=1.0, scoring="corrcoef" +) # We'll have (tmax - tmin) * sfreq delays # and an extra 2 delays since we are inclusive on the beginning / end index n_delays = int((tmax - tmin) * sfreq) + 2 @@ -105,7 +102,7 @@ coefs = np.zeros((n_splits, n_channels, n_delays)) scores = np.zeros((n_splits, n_channels)) for ii, (train, test) in enumerate(cv.split(speech)): - print('split %s / %s' % (ii + 1, n_splits)) + print(f"split {ii + 1} / {n_splits}") rf.fit(speech[train], Y[train]) scores[ii] = rf.score(speech[test], Y[test]) # coef_ is shape (n_outputs, n_features, n_delays). we only have 1 feature @@ -117,12 +114,11 @@ mean_scores = scores.mean(axis=0) # Plot mean prediction scores across all channels -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ix_chs = np.arange(n_channels) ax.plot(ix_chs, mean_scores) -ax.axhline(0, ls='--', color='r') +ax.axhline(0, ls="--", color="r") ax.set(title="Mean prediction score", xlabel="Channel", ylabel="Score ($r$)") -mne.viz.tight_layout() # %% # Investigate model coefficients @@ -132,26 +128,39 @@ # across the scalp. We will recreate `figure 1`_ and `figure 2`_ from # :footcite:`CrosseEtAl2016`. +# sphinx_gallery_thumbnail_number = 3 + # Print mean coefficients across all time delays / channels (see Fig 1) time_plot = 0.180 # For highlighting a specific time. -fig, ax = plt.subplots(figsize=(4, 8)) +fig, ax = plt.subplots(figsize=(4, 8), layout="constrained") max_coef = mean_coefs.max() -ax.pcolormesh(times, ix_chs, mean_coefs, cmap='RdBu_r', - vmin=-max_coef, vmax=max_coef, shading='gouraud') -ax.axvline(time_plot, ls='--', color='k', lw=2) -ax.set(xlabel='Delay (s)', ylabel='Channel', title="Mean Model\nCoefficients", - xlim=times[[0, -1]], ylim=[len(ix_chs) - 1, 0], - xticks=np.arange(tmin, tmax + .2, .2)) +ax.pcolormesh( + times, + ix_chs, + mean_coefs, + cmap="RdBu_r", + vmin=-max_coef, + vmax=max_coef, + shading="gouraud", +) +ax.axvline(time_plot, ls="--", color="k", lw=2) +ax.set( + xlabel="Delay (s)", + ylabel="Channel", + title="Mean Model\nCoefficients", + xlim=times[[0, -1]], + ylim=[len(ix_chs) - 1, 0], + xticks=np.arange(tmin, tmax + 0.2, 0.2), +) plt.setp(ax.get_xticklabels(), rotation=45) -mne.viz.tight_layout() # Make a topographic map of coefficients for a given delay (see Fig 2C) ix_plot = np.argmin(np.abs(time_plot - times)) -fig, ax = plt.subplots() -mne.viz.plot_topomap(mean_coefs[:, ix_plot], pos=info, axes=ax, show=False, - vlim=(-max_coef, max_coef)) -ax.set(title="Topomap of model coefficients\nfor delay %s" % time_plot) -mne.viz.tight_layout() +fig, ax = plt.subplots(layout="constrained") +mne.viz.plot_topomap( + mean_coefs[:, ix_plot], pos=info, axes=ax, show=False, vlim=(-max_coef, max_coef) +) +ax.set(title=f"Topomap of model coefficients\nfor delay {time_plot}") # %% # Create and fit a stimulus reconstruction model @@ -175,15 +184,22 @@ # positive lags would index how a unit change in the amplitude of the EEG would # affect later stimulus activity (obviously this should have an amplitude of # zero). -tmin, tmax = -.2, 0. +tmin, tmax = -0.2, 0.0 # Initialize the model. Here the features are the EEG data. We also specify # ``patterns=True`` to compute inverse-transformed coefficients during model # fitting (cf. next section and :footcite:`HaufeEtAl2014`). # We'll use a ridge regression estimator with an alpha value similar to # Crosse et al. -sr = ReceptiveField(tmin, tmax, sfreq, feature_names=raw.ch_names, - estimator=1e4, scoring='corrcoef', patterns=True) +sr = ReceptiveField( + tmin, + tmax, + sfreq, + feature_names=raw.ch_names, + estimator=1e4, + scoring="corrcoef", + patterns=True, +) # We'll have (tmax - tmin) * sfreq delays # and an extra 2 delays since we are inclusive on the beginning / end index n_delays = int((tmax - tmin) * sfreq) + 2 @@ -196,7 +212,7 @@ patterns = coefs.copy() scores = np.zeros((n_splits,)) for ii, (train, test) in enumerate(cv.split(speech)): - print('split %s / %s' % (ii + 1, n_splits)) + print(f"split {ii + 1} / {n_splits}") sr.fit(Y[train], speech[train]) scores[ii] = sr.score(Y[test], speech[test])[0] # coef_ is shape (n_outputs, n_features, n_delays). We have 128 features @@ -219,15 +235,15 @@ # stimulus envelopes side by side. y_pred = sr.predict(Y[test]) -time = np.linspace(0, 2., 5 * int(sfreq)) -fig, ax = plt.subplots(figsize=(8, 4)) -ax.plot(time, speech[test][sr.valid_samples_][:int(5 * sfreq)], - color='grey', lw=2, ls='--') -ax.plot(time, y_pred[sr.valid_samples_][:int(5 * sfreq)], color='r', lw=2) -ax.legend([lns[0], ln1[0]], ['Envelope', 'Reconstruction'], frameon=False) +time = np.linspace(0, 2.0, 5 * int(sfreq)) +fig, ax = plt.subplots(figsize=(8, 4), layout="constrained") +ax.plot( + time, speech[test][sr.valid_samples_][: int(5 * sfreq)], color="grey", lw=2, ls="--" +) +ax.plot(time, y_pred[sr.valid_samples_][: int(5 * sfreq)], color="r", lw=2) +ax.legend([lns[0], ln1[0]], ["Envelope", "Reconstruction"], frameon=False) ax.set(title="Stimulus reconstruction") -ax.set_xlabel('Time (s)') -mne.viz.tight_layout() +ax.set_xlabel("Time (s)") # %% # Investigate model coefficients @@ -244,22 +260,33 @@ # interpretation as their value (and sign) directly relates to the stimulus # signal's strength (and effect direction). -time_plot = (-.140, -.125) # To average between two timepoints. -ix_plot = np.arange(np.argmin(np.abs(time_plot[0] - times)), - np.argmin(np.abs(time_plot[1] - times))) +time_plot = (-0.140, -0.125) # To average between two timepoints. +ix_plot = np.arange( + np.argmin(np.abs(time_plot[0] - times)), np.argmin(np.abs(time_plot[1] - times)) +) fig, ax = plt.subplots(1, 2) -mne.viz.plot_topomap(np.mean(mean_coefs[:, ix_plot], axis=1), - pos=info, axes=ax[0], show=False, - vlim=(-max_coef, max_coef)) -ax[0].set(title="Model coefficients\nbetween delays %s and %s" - % (time_plot[0], time_plot[1])) - -mne.viz.plot_topomap(np.mean(mean_patterns[:, ix_plot], axis=1), - pos=info, axes=ax[1], - show=False, vlim=(-max_patterns, max_patterns)) -ax[1].set(title="Inverse-transformed coefficients\nbetween delays %s and %s" - % (time_plot[0], time_plot[1])) -mne.viz.tight_layout() +mne.viz.plot_topomap( + np.mean(mean_coefs[:, ix_plot], axis=1), + pos=info, + axes=ax[0], + show=False, + vlim=(-max_coef, max_coef), +) +ax[0].set(title=f"Model coefficients\nbetween delays {time_plot[0]} and {time_plot[1]}") + +mne.viz.plot_topomap( + np.mean(mean_patterns[:, ix_plot], axis=1), + pos=info, + axes=ax[1], + show=False, + vlim=(-max_patterns, max_patterns), +) +ax[1].set( + title=( + f"Inverse-transformed coefficients\nbetween delays {time_plot[0]} and " + f"{time_plot[1]}" + ) +) # %% # References diff --git a/examples/decoding/ssd_spatial_filters.py b/examples/decoding/ssd_spatial_filters.py index eb780192456..7938fe6ad2a 100644 --- a/examples/decoding/ssd_spatial_filters.py +++ b/examples/decoding/ssd_spatial_filters.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- """ .. _ex-ssd-spatial-filters: -=========================================================== -Compute Spectro-Spatial Decomposition (SSD) spatial filters -=========================================================== +================================================================ +Compute spatial filters with Spatio-Spectral Decomposition (SSD) +================================================================ In this example, we will compute spatial filters for retaining oscillatory brain activity and down-weighting 1/f background signals @@ -14,54 +13,65 @@ frequency band of interest and the noise covariance based on surrounding frequencies. """ + # Author: Denis A. Engemann # Victoria Peterson # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - import matplotlib.pyplot as plt + import mne from mne import Epochs from mne.datasets.fieldtrip_cmc import data_path -from mne.decoding import SSD +from mne.decoding import SSD, get_spatial_filter_from_estimator # %% # Define parameters -fname = data_path() / 'SubjectCMC.ds' +fname = data_path() / "SubjectCMC.ds" # Prepare data raw = mne.io.read_raw_ctf(fname) -raw.crop(50., 110.).load_data() # crop for memory purposes +raw.crop(tmin=50.0, tmax=110.0).load_data() # crop for memory purposes raw.resample(sfreq=250) -raw.pick_types(meg=True, eeg=False, ref_meg=False) +raw.pick_types(meg=True, ref_meg=False) freqs_sig = 9, 12 freqs_noise = 8, 13 -ssd = SSD(info=raw.info, - reg='oas', - sort_by_spectral_ratio=False, # False for purpose of example. - filt_params_signal=dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1), - filt_params_noise=dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1)) +ssd = SSD( + info=raw.info, + reg="oas", + sort_by_spectral_ratio=False, # False for purpose of example. + filt_params_signal=dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ), + filt_params_noise=dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ), +) ssd.fit(X=raw.get_data()) # %% -# Let's investigate spatial filter with max power ratio. +# Let's investigate spatial filter with the max power ratio. # We will first inspect the topographies. -# According to Nikulin et al. 2011 this is done by either inverting the filters +# According to Nikulin et al. (2011), this is done by either inverting the filters # (W^{-1}) or by multiplying the noise cov with the filters Eq. (22) (C_n W)^t. # We rely on the inversion approach here. -pattern = mne.EvokedArray(data=ssd.patterns_[:4].T, - info=ssd.info) -pattern.plot_topomap(units=dict(mag='A.U.'), time_format='') +spf = get_spatial_filter_from_estimator(ssd, info=ssd.info) +spf.plot_patterns(components=list(range(4))) # The topographies suggest that we picked up a parietal alpha generator. @@ -70,39 +80,40 @@ # Get psd of SSD-filtered signals. psd, freqs = mne.time_frequency.psd_array_welch( - ssd_sources, sfreq=raw.info['sfreq'], n_fft=4096) + ssd_sources, sfreq=raw.info["sfreq"], n_fft=4096 +) -# Get spec_ratio information (already sorted). -# Note that this is not necessary if sort_by_spectral_ratio=True (default). +# Get spec_ratio information (already sorted) +# Note that this is not necessary if sort_by_spectral_ratio=True (default) spec_ratio, sorter = ssd.get_spectral_ratio(ssd_sources) -# Plot spectral ratio (see Eq. 24 in Nikulin 2011). +# Plot spectral ratio (see Eq. 24 in Nikulin et al., 2011). fig, ax = plt.subplots(1) -ax.plot(spec_ratio, color='black') -ax.plot(spec_ratio[sorter], color='orange', label='sorted eigenvalues') +ax.plot(spec_ratio, color="black") +ax.plot(spec_ratio[sorter], color="orange", label="sorted eigenvalues") ax.set_xlabel("Eigenvalue Index") ax.set_ylabel(r"Spectral Ratio $\frac{P_f}{P_{sf}}$") ax.legend() -ax.axhline(1, linestyle='--') +ax.axhline(1, linestyle="--") # We can see that the initial sorting based on the eigenvalues # was already quite good. However, when using few components only # the sorting might make a difference. # %% -# Let's also look at the power spectrum of that source and compare it to +# Let's also look at the power spectrum of that source and compare it # to the power spectrum of the source with lowest SNR. below50 = freqs < 50 # for highlighting the freq. band of interest bandfilt = (freqs_sig[0] <= freqs) & (freqs <= freqs_sig[1]) fig, ax = plt.subplots(1) -ax.loglog(freqs[below50], psd[0, below50], label='max SNR') -ax.loglog(freqs[below50], psd[-1, below50], label='min SNR') -ax.loglog(freqs[below50], psd[:, below50].mean(axis=0), label='mean') -ax.fill_between(freqs[bandfilt], 0, 10000, color='green', alpha=0.15) -ax.set_xlabel('log(frequency)') -ax.set_ylabel('log(power)') +ax.loglog(freqs[below50], psd[0, below50], label="max SNR") +ax.loglog(freqs[below50], psd[-1, below50], label="min SNR") +ax.loglog(freqs[below50], psd[:, below50].mean(axis=0), label="mean") +ax.fill_between(freqs[bandfilt], 0, 10000, color="green", alpha=0.15) +ax.set_xlabel("log(frequency)") +ax.set_ylabel("log(power)") ax.legend() # We can clearly see that the selected component enjoys an SNR that is @@ -111,32 +122,36 @@ # %% # Epoched data # ------------ -# Although we suggest to use this method before epoching, there might be some +# Although we suggest using this method before epoching, there might be some # situations in which data can only be treated by chunks. # Build epochs as sliding windows over the continuous raw file. events = mne.make_fixed_length_events(raw, id=1, duration=5.0, overlap=0.0) # Epoch length is 5 seconds. -epochs = Epochs(raw, events, tmin=0., tmax=5, - baseline=None, preload=True) - -ssd_epochs = SSD(info=epochs.info, - reg='oas', - filt_params_signal=dict(l_freq=freqs_sig[0], - h_freq=freqs_sig[1], - l_trans_bandwidth=1, - h_trans_bandwidth=1), - filt_params_noise=dict(l_freq=freqs_noise[0], - h_freq=freqs_noise[1], - l_trans_bandwidth=1, - h_trans_bandwidth=1)) -ssd_epochs.fit(X=epochs.get_data()) +epochs = Epochs(raw, events, tmin=0.0, tmax=5, baseline=None, preload=True) + +ssd_epochs = SSD( + info=epochs.info, + reg="oas", + filt_params_signal=dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ), + filt_params_noise=dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ), +) +ssd_epochs.fit(X=epochs.get_data(copy=False)) # Plot topographies. -pattern_epochs = mne.EvokedArray(data=ssd_epochs.patterns_[:4].T, - info=ssd_epochs.info) -pattern_epochs.plot_topomap(units=dict(mag='A.U.'), time_format='') +spf = get_spatial_filter_from_estimator(ssd_epochs, info=ssd_epochs.info) +spf.plot_patterns(components=list(range(4))) # %% # References # ---------- diff --git a/examples/forward/forward_sensitivity_maps.py b/examples/forward/forward_sensitivity_maps.py index e2ad5acae4b..c9163b5b792 100644 --- a/examples/forward/forward_sensitivity_maps.py +++ b/examples/forward/forward_sensitivity_maps.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-sensitivity-maps: @@ -16,93 +15,99 @@ # Author: Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import matplotlib.pyplot as plt import numpy as np + import mne from mne.datasets import sample -from mne.source_space import compute_distance_to_sensors from mne.source_estimate import SourceEstimate -import matplotlib.pyplot as plt +from mne.source_space import compute_distance_to_sensors print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -subjects_dir = data_path / 'subjects' +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +subjects_dir = data_path / "subjects" # Read the forward solutions with surface orientation fwd = mne.read_forward_solution(fwd_fname) mne.convert_forward_solution(fwd, surf_ori=True, copy=False) -leadfield = fwd['sol']['data'] -print("Leadfield size : %d x %d" % leadfield.shape) +leadfield = fwd["sol"]["data"] +print("Leadfield shape : {leadfield.shape}") # %% # Compute sensitivity maps -grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed') -mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed') -eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed') +grad_map = mne.sensitivity_map(fwd, ch_type="grad", mode="fixed") +mag_map = mne.sensitivity_map(fwd, ch_type="mag", mode="fixed") +eeg_map = mne.sensitivity_map(fwd, ch_type="eeg", mode="fixed") # %% # Show gain matrix a.k.a. leadfield matrix with sensitivity map -picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False) -picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True) +picks_meg = mne.pick_types(fwd["info"], meg=True, eeg=False) +picks_eeg = mne.pick_types(fwd["info"], meg=False, eeg=True) fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True) -fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14) -for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']): - im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto', - cmap='RdBu_r') +fig.suptitle("Lead field matrix (500 dipoles only)", fontsize=14) +for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ["meg", "eeg"]): + im = ax.imshow(leadfield[picks, :500], origin="lower", aspect="auto", cmap="RdBu_r") ax.set_title(ch_type.upper()) - ax.set_xlabel('sources') - ax.set_ylabel('sensors') + ax.set_xlabel("sources") + ax.set_ylabel("sensors") fig.colorbar(im, ax=ax) fig_2, ax = plt.subplots() -ax.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()], - bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'], - color=['c', 'b', 'k']) +ax.hist( + [grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()], + bins=20, + label=["Gradiometers", "Magnetometers", "EEG"], + color=["c", "b", "k"], +) fig_2.legend() -ax.set(title='Normal orientation sensitivity', - xlabel='sensitivity', ylabel='count') +ax.set(title="Normal orientation sensitivity", xlabel="sensitivity", ylabel="count") # sphinx_gallery_thumbnail_number = 3 brain_sens = grad_map.plot( - subjects_dir=subjects_dir, clim=dict(lims=[0, 50, 100]), figure=1) -brain_sens.add_text(0.1, 0.9, 'Gradiometer sensitivity', 'title', font_size=16) + subjects_dir=subjects_dir, clim=dict(lims=[0, 50, 100]), figure=1 +) +brain_sens.add_text(0.1, 0.9, "Gradiometer sensitivity", "title", font_size=16) # %% # Compare sensitivity map with distribution of source depths # source space with vertices -src = fwd['src'] +src = fwd["src"] # Compute minimum Euclidean distances between vertices and MEG sensors -depths = compute_distance_to_sensors(src=src, info=fwd['info'], - picks=picks_meg).min(axis=1) +depths = compute_distance_to_sensors(src=src, info=fwd["info"], picks=picks_meg).min( + axis=1 +) maxdep = depths.max() # for scaling -vertices = [src[0]['vertno'], src[1]['vertno']] +vertices = [src[0]["vertno"], src[1]["vertno"]] -depths_map = SourceEstimate(data=depths, vertices=vertices, tmin=0., - tstep=1.) +depths_map = SourceEstimate(data=depths, vertices=vertices, tmin=0.0, tstep=1.0) brain_dep = depths_map.plot( - subject='sample', subjects_dir=subjects_dir, - clim=dict(kind='value', lims=[0, maxdep / 2., maxdep]), figure=2) -brain_dep.add_text(0.1, 0.9, 'Source depth (m)', 'title', font_size=16) + subject="sample", + subjects_dir=subjects_dir, + clim=dict(kind="value", lims=[0, maxdep / 2.0, maxdep]), + figure=2, +) +brain_dep.add_text(0.1, 0.9, "Source depth (m)", "title", font_size=16) # %% # Sensitivity is likely to co-vary with the distance between sources to # sensors. To determine the strength of this relationship, we can compute the # correlation between source depth and sensitivity values. corr = np.corrcoef(depths, grad_map.data[:, 0])[0, 1] -print('Correlation between source depth and gradiomter sensitivity values: %f.' - % corr) +print(f"Correlation between source depth and gradiomter sensitivity values: {corr:f}.") # %% # Gradiometer sensitiviy is highest close to the sensors, and decreases rapidly diff --git a/examples/forward/left_cerebellum_volume_source.py b/examples/forward/left_cerebellum_volume_source.py index f2b9353b755..ff810493e99 100644 --- a/examples/forward/left_cerebellum_volume_source.py +++ b/examples/forward/left_cerebellum_volume_source.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-cerebellum-source-space: @@ -14,6 +13,7 @@ # Author: Alan Leggitt # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -24,9 +24,9 @@ print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -subject = 'sample' -aseg_fname = subjects_dir / 'sample' / 'mri' / 'aseg.mgz' +subjects_dir = data_path / "subjects" +subject = "sample" +aseg_fname = subjects_dir / "sample" / "mri" / "aseg.mgz" # %% # Setup the source spaces @@ -36,11 +36,16 @@ lh_surf = surf[0] # setup a volume source space of the left cerebellum cortex -volume_label = 'Left-Cerebellum-Cortex' +volume_label = "Left-Cerebellum-Cortex" sphere = (0, 0, 0, 0.12) lh_cereb = setup_volume_source_space( - subject, mri=aseg_fname, sphere=sphere, volume_label=volume_label, - subjects_dir=subjects_dir, sphere_units='m') + subject, + mri=aseg_fname, + sphere=sphere, + volume_label=volume_label, + subjects_dir=subjects_dir, + sphere_units="m", +) # Combine the source spaces src = surf + lh_cereb @@ -48,11 +53,16 @@ # %% # Plot the positions of each source space -fig = mne.viz.plot_alignment(subject=subject, subjects_dir=subjects_dir, - surfaces='white', coord_frame='mri', - src=src) -mne.viz.set_3d_view(fig, azimuth=180, elevation=90, - distance=0.30, focalpoint=(-0.03, -0.01, 0.03)) +fig = mne.viz.plot_alignment( + subject=subject, + subjects_dir=subjects_dir, + surfaces="white", + coord_frame="mri", + src=src, +) +mne.viz.set_3d_view( + fig, azimuth=180, elevation=90, distance=0.30, focalpoint=(-0.03, -0.01, 0.03) +) # %% # You can export source positions to a NIfTI file:: @@ -63,8 +73,8 @@ # And display source positions in freeview:: # # >>> from mne.utils import run_subprocess -# >>> mri_fname = subjects_dir + '/sample/mri/brain.mgz' -# >>> run_subprocess(['freeview', '-v', mri_fname, '-v', -# '%s:colormap=lut:opacity=0.5' % aseg_fname, '-v', -# '%s:colormap=jet:colorscale=0,2' % nii_fname, -# '-slice', '157 75 105']) +# >>> mri_fname = subjects_dir / "sample" / "mri" / "brain.mgz" +# >>> run_subprocess(["freeview", "-v", str(mri_fname), "-v", +# f"{aseg_fname}:colormap=lut:opacity=0.5", +# "-v", f"{nii_fname}:colormap=jet:colorscale=0,2", +# "--slice", "157", "75", "105"]) diff --git a/examples/forward/source_space_morphing.py b/examples/forward/source_space_morphing.py index 712562ee358..fd5b992696e 100644 --- a/examples/forward/source_space_morphing.py +++ b/examples/forward/source_space_morphing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-source-space-morphing: @@ -19,44 +18,45 @@ # Eric larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne data_path = mne.datasets.sample.data_path() -subjects_dir = data_path / 'subjects' -fname_trans = ( - data_path / 'MEG' / 'sample' / 'sample_audvis_raw-trans.fif') -fname_bem = ( - subjects_dir / 'sample' / 'bem' / 'sample-5120-bem-sol.fif') -fname_src_fs = ( - subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-ico-5-src.fif') -raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif' +subjects_dir = data_path / "subjects" +fname_trans = data_path / "MEG" / "sample" / "sample_audvis_raw-trans.fif" +fname_bem = subjects_dir / "sample" / "bem" / "sample-5120-bem-sol.fif" +fname_src_fs = subjects_dir / "fsaverage" / "bem" / "fsaverage-ico-5-src.fif" +raw_fname = data_path / "MEG" / "sample" / "sample_audvis_raw.fif" # Get relevant channel information info = mne.io.read_info(raw_fname) -info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False, - exclude=[])) +info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False, exclude=[])) # Morph fsaverage's source space to sample src_fs = mne.read_source_spaces(fname_src_fs) -src_morph = mne.morph_source_spaces(src_fs, subject_to='sample', - subjects_dir=subjects_dir) +src_morph = mne.morph_source_spaces( + src_fs, subject_to="sample", subjects_dir=subjects_dir +) # Compute the forward with our morphed source space -fwd = mne.make_forward_solution(info, trans=fname_trans, - src=src_morph, bem=fname_bem) -mag_map = mne.sensitivity_map(fwd, ch_type='mag') +fwd = mne.make_forward_solution(info, trans=fname_trans, src=src_morph, bem=fname_bem) +mag_map = mne.sensitivity_map(fwd, ch_type="mag") # Return this SourceEstimate (on sample's surfaces) to fsaverage's surfaces mag_map_fs = mag_map.to_original_src(src_fs, subjects_dir=subjects_dir) # Plot the result, which tracks the sulcal-gyral folding # outliers may occur, we'll place the cutoff at 99 percent. -kwargs = dict(clim=dict(kind='percent', lims=[0, 50, 99]), - # no smoothing, let's see the dipoles on the cortex. - smoothing_steps=1, hemi='rh', views=['lat']) +kwargs = dict( + clim=dict(kind="percent", lims=[0, 50, 99]), + # no smoothing, let's see the dipoles on the cortex. + smoothing_steps=1, + hemi="rh", + views=["lat"], +) # Now note that the dipoles on fsaverage are almost equidistant while # morphing will distribute the dipoles unevenly across the given subject's @@ -64,7 +64,9 @@ # Our testing code suggests a correlation of higher than 0.99. brain_subject = mag_map.plot( # plot forward in subject source space (morphed) - time_label='Morphed', subjects_dir=subjects_dir, **kwargs) + time_label="Morphed", subjects_dir=subjects_dir, **kwargs +) brain_fs = mag_map_fs.plot( # plot forward in original source space (remapped) - time_label='Remapped', subjects_dir=subjects_dir, **kwargs) + time_label="Remapped", subjects_dir=subjects_dir, **kwargs +) diff --git a/examples/inverse/compute_mne_inverse_epochs_in_label.py b/examples/inverse/compute_mne_inverse_epochs_in_label.py index 4a5129d9ca8..ca15c80efcc 100644 --- a/examples/inverse/compute_mne_inverse_epochs_in_label.py +++ b/examples/inverse/compute_mne_inverse_epochs_in_label.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-dSPM-epochs: @@ -12,32 +11,32 @@ # Author: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne from mne.datasets import sample -from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator -from mne.minimum_norm import apply_inverse +from mne.minimum_norm import apply_inverse, apply_inverse_epochs, read_inverse_operator print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' -fname_raw = meg_path / 'sample_audvis_filt-0-40_raw.fif' -fname_event = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' -label_name = 'Aud-lh' -fname_label = meg_path / 'labels' / f'{label_name}.label' +meg_path = data_path / "MEG" / "sample" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_raw = meg_path / "sample_audvis_filt-0-40_raw.fif" +fname_event = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" +label_name = "Aud-lh" +fname_label = meg_path / "labels" / f"{label_name}.label" event_id, tmin, tmax = 1, -0.2, 0.5 # Using the same inverse operator when inspecting single trials Vs. evoked snr = 3.0 # Standard assumption for average data but using it for single trial -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) @@ -51,15 +50,23 @@ include = [] # Add a bad channel -raw.info['bads'] += ['EEG 053'] # bads + 1 more +raw.info["bads"] += ["EEG 053"] # bads + 1 more # pick MEG channels -picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, - include=include, exclude='bads') +picks = mne.pick_types( + raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude="bads" +) # Read epochs -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, - eog=150e-6)) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6), +) # Get evoked data (averaging across trials in sensor space) evoked = epochs.average() @@ -67,21 +74,27 @@ # Compute inverse solution and stcs for each epoch # Use the same inverse operator as with evoked data (i.e., set nave) # If you use a different nave, dSPM just scales by a factor sqrt(nave) -stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label, - pick_ori="normal", nave=evoked.nave) +stcs = apply_inverse_epochs( + epochs, + inverse_operator, + lambda2, + method, + label, + pick_ori="normal", + nave=evoked.nave, +) # Mean across trials but not across vertices in label mean_stc = sum(stcs) / len(stcs) # compute sign flip to avoid signal cancellation when averaging signed values -flip = mne.label_sign_flip(label, inverse_operator['src']) +flip = mne.label_sign_flip(label, inverse_operator["src"]) label_mean = np.mean(mean_stc.data, axis=0) label_mean_flip = np.mean(flip[:, np.newaxis] * mean_stc.data, axis=0) # Get inverse solution by inverting evoked data -stc_evoked = apply_inverse(evoked, inverse_operator, lambda2, method, - pick_ori="normal") +stc_evoked = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori="normal") # apply_inverse() does whole brain, so sub-select label of interest stc_evoked_label = stc_evoked.in_label(label) @@ -95,13 +108,12 @@ times = 1e3 * stcs[0].times # times in ms plt.figure() -h0 = plt.plot(times, mean_stc.data.T, 'k') -h1, = plt.plot(times, label_mean, 'r', linewidth=3) -h2, = plt.plot(times, label_mean_flip, 'g', linewidth=3) -plt.legend((h0[0], h1, h2), ('all dipoles in label', 'mean', - 'mean with sign flip')) -plt.xlabel('time (ms)') -plt.ylabel('dSPM value') +h0 = plt.plot(times, mean_stc.data.T, "k") +(h1,) = plt.plot(times, label_mean, "r", linewidth=3) +(h2,) = plt.plot(times, label_mean_flip, "g", linewidth=3) +plt.legend((h0[0], h1, h2), ("all dipoles in label", "mean", "mean with sign flip")) +plt.xlabel("time (ms)") +plt.ylabel("dSPM value") plt.show() # %% @@ -111,19 +123,21 @@ # Single trial plt.figure() for k, stc_trial in enumerate(stcs): - plt.plot(times, np.mean(stc_trial.data, axis=0).T, 'k--', - label='Single Trials' if k == 0 else '_nolegend_', - alpha=0.5) + plt.plot( + times, + np.mean(stc_trial.data, axis=0).T, + "k--", + label="Single Trials" if k == 0 else "_nolegend_", + alpha=0.5, + ) # Single trial inverse then average.. making linewidth large to not be masked -plt.plot(times, label_mean, 'b', linewidth=6, - label='dSPM first, then average') +plt.plot(times, label_mean, "b", linewidth=6, label="dSPM first, then average") # Evoked and then inverse -plt.plot(times, label_mean_evoked, 'r', linewidth=2, - label='Average first, then dSPM') +plt.plot(times, label_mean_evoked, "r", linewidth=2, label="Average first, then dSPM") -plt.xlabel('time (ms)') -plt.ylabel('dSPM value') +plt.xlabel("time (ms)") +plt.ylabel("dSPM value") plt.legend() plt.show() diff --git a/examples/inverse/compute_mne_inverse_raw_in_label.py b/examples/inverse/compute_mne_inverse_raw_in_label.py index c38199035fb..b462c09e180 100644 --- a/examples/inverse/compute_mne_inverse_raw_in_label.py +++ b/examples/inverse/compute_mne_inverse_raw_in_label.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _example-sLORETA: @@ -14,6 +13,7 @@ # Author: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -26,14 +26,13 @@ print(__doc__) data_path = sample.data_path() -fname_inv = ( - data_path / 'MEG' / 'sample' / 'sample_audvis-meg-oct-6-meg-inv.fif') -fname_raw = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif' -label_name = 'Aud-lh' -fname_label = data_path / 'MEG' / 'sample' / 'labels' / f'{label_name}.label' +fname_inv = data_path / "MEG" / "sample" / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_raw = data_path / "MEG" / "sample" / "sample_audvis_raw.fif" +label_name = "Aud-lh" +fname_label = data_path / "MEG" / "sample" / "labels" / f"{label_name}.label" snr = 1.0 # use smaller SNR for raw data -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 method = "sLORETA" # use sLORETA method (could also be MNE or dSPM) # Load data @@ -41,19 +40,20 @@ inverse_operator = read_inverse_operator(fname_inv) label = mne.read_label(fname_label) -raw.set_eeg_reference('average', projection=True) # set average reference. +raw.set_eeg_reference("average", projection=True) # set average reference. start, stop = raw.time_as_index([0, 15]) # read the first 15s of data # Compute inverse solution -stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label, - start, stop, pick_ori=None) +stc = apply_inverse_raw( + raw, inverse_operator, lambda2, method, label, start, stop, pick_ori=None +) # Save result in stc files -stc.save('mne_%s_raw_inverse_%s' % (method, label_name), overwrite=True) +stc.save(f"mne_{method}_raw_inverse_{label_name}", overwrite=True) # %% # View activation time-series plt.plot(1e3 * stc.times, stc.data[::100, :].T) -plt.xlabel('time (ms)') -plt.ylabel('%s value' % method) +plt.xlabel("time (ms)") +plt.ylabel(f"{method} value") plt.show() diff --git a/examples/inverse/compute_mne_inverse_volume.py b/examples/inverse/compute_mne_inverse_volume.py index 02c5af5744c..b4889df67a6 100644 --- a/examples/inverse/compute_mne_inverse_volume.py +++ b/examples/inverse/compute_mne_inverse_volume.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-inverse-volume: @@ -12,46 +11,50 @@ # Author: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -from nilearn.plotting import plot_stat_map from nilearn.image import index_img +from nilearn.plotting import plot_stat_map -from mne.datasets import sample from mne import read_evokeds +from mne.datasets import sample from mne.minimum_norm import apply_inverse, read_inverse_operator print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname_inv = meg_path / 'sample_audvis-meg-vol-7-meg-inv.fif' -fname_evoked = meg_path / 'sample_audvis-ave.fif' +meg_path = data_path / "MEG" / "sample" +fname_inv = meg_path / "sample_audvis-meg-vol-7-meg-inv.fif" +fname_evoked = meg_path / "sample_audvis-ave.fif" snr = 3.0 -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Load data evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) inverse_operator = read_inverse_operator(fname_inv) -src = inverse_operator['src'] +src = inverse_operator["src"] # Compute inverse solution stc = apply_inverse(evoked, inverse_operator, lambda2, method) stc.crop(0.0, 0.2) # Export result as a 4D nifti object -img = stc.as_volume(src, - mri_resolution=False) # set True for full MRI resolution +img = stc.as_volume(src, mri_resolution=False) # set True for full MRI resolution # Save it as a nifti file -# nib.save(img, 'mne_%s_inverse.nii.gz' % method) +# nib.save(img, f"mne_{method}_inverse.nii.gz") -t1_fname = data_path / 'subjects' / 'sample' / 'mri' / 'T1.mgz' +t1_fname = data_path / "subjects" / "sample" / "mri" / "T1.mgz" # %% # Plot with nilearn: -plot_stat_map(index_img(img, 61), str(t1_fname), threshold=8., - title='%s (t=%.1f s.)' % (method, stc.times[61])) +plot_stat_map( + index_img(img, 61), + str(t1_fname), + threshold=8.0, + title=f"{method} (t={stc.times[61]:.1f} s.)", +) diff --git a/examples/inverse/custom_inverse_solver.py b/examples/inverse/custom_inverse_solver.py index f5ad92b3daf..e8444dcd046 100644 --- a/examples/inverse/custom_inverse_solver.py +++ b/examples/inverse/custom_inverse_solver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-custom-inverse: @@ -20,22 +19,26 @@ in order to try out another inverse algorithm. """ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + # %% import numpy as np from scipy import linalg + import mne from mne.datasets import sample from mne.viz import plot_sparse_source_estimates - data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -ave_fname = meg_path / 'sample_audvis-ave.fif' -cov_fname = meg_path / 'sample_audvis-shrunk-cov.fif' -subjects_dir = data_path / 'subjects' -condition = 'Left Auditory' +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +ave_fname = meg_path / "sample_audvis-ave.fif" +cov_fname = meg_path / "sample_audvis-shrunk-cov.fif" +subjects_dir = data_path / "subjects" +condition = "Left Auditory" # Read noise covariance matrix noise_cov = mne.read_cov(cov_fname) @@ -43,7 +46,7 @@ evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0)) evoked.crop(tmin=0.04, tmax=0.18) -evoked = evoked.pick_types(eeg=False, meg=True) +evoked = evoked.pick(picks="meg", exclude="bads") # Handling forward solution forward = mne.read_forward_solution(fwd_fname) @@ -51,6 +54,7 @@ # %% # Auxiliary function to run the solver + def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8): """Call a custom solver on evoked data. @@ -94,19 +98,30 @@ def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8): The source estimates. """ # Import the necessary private functions - from mne.inverse_sparse.mxne_inverse import \ - (_prepare_gain, is_fixed_orient, - _reapply_source_weighting, _make_sparse_stc) + from mne.inverse_sparse.mxne_inverse import ( + _make_sparse_stc, + _prepare_gain, + _reapply_source_weighting, + is_fixed_orient, + ) all_ch_names = evoked.ch_names # Handle depth weighting and whitening (here is no weights) forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( - forward, evoked.info, noise_cov, pca=False, depth=depth, - loose=loose, weights=None, weights_min=None, rank=None) + forward, + evoked.info, + noise_cov, + pca=False, + depth=depth, + loose=loose, + weights=None, + weights_min=None, + rank=None, + ) # Select channels of interest - sel = [all_ch_names.index(name) for name in gain_info['ch_names']] + sel = [all_ch_names.index(name) for name in gain_info["ch_names"]] M = evoked.data[sel] # Whiten data @@ -116,8 +131,9 @@ def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8): X, active_set = solver(M, gain, n_orient) X = _reapply_source_weighting(X, source_weighting, active_set) - stc = _make_sparse_stc(X, active_set, forward, tmin=evoked.times[0], - tstep=1. / evoked.info['sfreq']) + stc = _make_sparse_stc( + X, active_set, forward, tmin=evoked.times[0], tstep=1.0 / evoked.info["sfreq"] + ) return stc @@ -125,6 +141,7 @@ def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8): # %% # Define your solver + def solver(M, G, n_orient): """Run L2 penalized regression and keep 10 strongest locations. @@ -156,11 +173,11 @@ def solver(M, G, n_orient): K /= np.linalg.norm(K, axis=1)[:, None] X = np.dot(K, M) - indices = np.argsort(np.sum(X ** 2, axis=1))[-10:] + indices = np.argsort(np.sum(X**2, axis=1))[-10:] active_set = np.zeros(G.shape[1], dtype=bool) for idx in indices: idx -= idx % n_orient - active_set[idx:idx + n_orient] = True + active_set[idx : idx + n_orient] = True X = X[active_set] return X, active_set @@ -169,10 +186,9 @@ def solver(M, G, n_orient): # Apply your custom solver # loose, depth = 0.2, 0.8 # corresponds to loose orientation -loose, depth = 1., 0. # corresponds to free orientation +loose, depth = 1.0, 0.0 # corresponds to free orientation stc = apply_solver(solver, evoked, forward, noise_cov, loose, depth) # %% # View in 2D and 3D ("glass" brain like 3D plot) -plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1), - opacity=0.1) +plot_sparse_source_estimates(forward["src"], stc, bgcolor=(1, 1, 1), opacity=0.1) diff --git a/examples/inverse/dics_epochs.py b/examples/inverse/dics_epochs.py index 30039c86ad3..c359c30c0fb 100644 --- a/examples/inverse/dics_epochs.py +++ b/examples/inverse/dics_epochs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-inverse-dics-epochs: @@ -16,12 +15,14 @@ # Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np + import mne +from mne.beamformer import apply_dics_tfr_epochs, make_dics from mne.datasets import somato -from mne.time_frequency import tfr_morlet, csd_tfr -from mne.beamformer import make_dics, apply_dics_tfr_epochs +from mne.time_frequency import csd_tfr print(__doc__) @@ -29,13 +30,13 @@ # Organize the data that we will use for this example. data_path = somato.data_path() -subject = '01' -task = 'somato' -raw_fname = (data_path / f'sub-{subject}' / 'meg' / - f'sub-{subject}_task-{task}_meg.fif') -fname_fwd = (data_path / 'derivatives' / f'sub-{subject}' / - f'sub-{subject}_task-{task}-fwd.fif') -subjects_dir = data_path / 'derivatives' / 'freesurfer' / 'subjects' +subject = "01" +task = "somato" +raw_fname = data_path / f"sub-{subject}" / "meg" / f"sub-{subject}_task-{task}_meg.fif" +fname_fwd = ( + data_path / "derivatives" / f"sub-{subject}" / f"sub-{subject}_task-{task}-fwd.fif" +) +subjects_dir = data_path / "derivatives" / "freesurfer" / "subjects" # %% # First, we load the data and compute for each epoch the time-frequency @@ -44,12 +45,19 @@ # Load raw data and make epochs. raw = mne.io.read_raw_fif(raw_fname) events = mne.find_events(raw) -epochs = mne.Epochs(raw, events, event_id=1, tmin=-1, tmax=2.5, - reject=dict(grad=5000e-13, # unit: T / m (gradiometers) - mag=5e-12, # unit: T (magnetometers) - eog=250e-6, # unit: V (EOG channels) - ), preload=True) -epochs = epochs[:10] # just for speed of execution for the tutorial +epochs = mne.Epochs( + raw, + events[:22], # just for execution speed of the tutorial + event_id=1, + tmin=-1, + tmax=2.5, + reject=dict( + grad=5000e-13, # unit: T / m (gradiometers) + mag=5e-12, # unit: T (magnetometers) + eog=250e-6, # unit: V (EOG channels) + ), + preload=True, +) # We are mostly interested in the beta band since it has been shown to be # active for somatosensory stimulation @@ -59,8 +67,9 @@ # decomposition for each epoch. We must pass ``output='complex'`` if we wish to # use this TFR later with a DICS beamformer. We also pass ``average=False`` to # compute the TFR for each individual epoch. -epochs_tfr = tfr_morlet(epochs, freqs, n_cycles=5, return_itc=False, - output='complex', average=False) +epochs_tfr = epochs.compute_tfr( + "morlet", freqs, n_cycles=5, return_itc=False, output="complex", average=False +) # crop either side to use a buffer to remove edge artifact epochs_tfr.crop(tmin=-0.5, tmax=2) @@ -79,15 +88,21 @@ fwd = mne.read_forward_solution(fname_fwd) # compute scalar DICS beamfomer -filters = make_dics(epochs.info, fwd, csd, noise_csd=baseline_csd, - pick_ori='max-power', reduce_rank=True, real_filter=True) +filters = make_dics( + epochs.info, + fwd, + csd, + noise_csd=baseline_csd, + pick_ori="max-power", + reduce_rank=True, + real_filter=True, +) # project the TFR for each epoch to source space -epochs_stcs = apply_dics_tfr_epochs( - epochs_tfr, filters, return_generator=True) +epochs_stcs = apply_dics_tfr_epochs(epochs_tfr, filters, return_generator=True) # average across frequencies and epochs -data = np.zeros((fwd['nsource'], epochs_tfr.times.size)) +data = np.zeros((fwd["nsource"], epochs_tfr.times.size)) for epoch_stcs in epochs_stcs: for stc in epoch_stcs: data += (stc.data * np.conj(stc.data)).real @@ -105,15 +120,15 @@ fmax = 4500 brain = stc.plot( subjects_dir=subjects_dir, - hemi='both', - views='dorsal', - initial_time=0.55, + hemi="both", + views="dorsal", + initial_time=1.2, brain_kwargs=dict(show=False), - add_data_kwargs=dict(fmin=fmax / 10, fmid=fmax / 2, fmax=fmax, - scale_factor=0.0001, - colorbar_kwargs=dict(label_font_size=10)) + add_data_kwargs=dict( + fmin=fmax / 10, + fmid=fmax / 2, + fmax=fmax, + scale_factor=0.0001, + colorbar_kwargs=dict(label_font_size=10), + ), ) - -# You can save a movie like the one on our documentation website with: -# brain.save_movie(tmin=0.55, tmax=1.5, interpolation='linear', -# time_viewer=True) diff --git a/examples/inverse/dics_source_power.py b/examples/inverse/dics_source_power.py index d9ae604b5ba..586044cdd9f 100644 --- a/examples/inverse/dics_source_power.py +++ b/examples/inverse/dics_source_power.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-inverse-source-power: @@ -18,24 +17,25 @@ # Stefan Appelhoff # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import numpy as np + import mne +from mne.beamformer import apply_dics_csd, make_dics from mne.datasets import somato from mne.time_frequency import csd_morlet -from mne.beamformer import make_dics, apply_dics_csd print(__doc__) # %% # Reading the raw data and creating epochs: data_path = somato.data_path() -subject = '01' -task = 'somato' -raw_fname = (data_path / f'sub-{subject}' / 'meg' / - f'sub-{subject}_task-{task}_meg.fif') +subject = "01" +task = "somato" +raw_fname = data_path / f"sub-{subject}" / "meg" / f"sub-{subject}_task-{task}_meg.fif" # Use a shorter segment of raw just for speed here raw = mne.io.read_raw_fif(raw_fname) @@ -48,10 +48,11 @@ del raw # Paths to forward operator and FreeSurfer subject directory -fname_fwd = (data_path / 'derivatives' / f'sub-{subject}' / - f'sub-{subject}_task-{task}-fwd.fif') +fname_fwd = ( + data_path / "derivatives" / f"sub-{subject}" / f"sub-{subject}_task-{task}-fwd.fif" +) -subjects_dir = data_path / 'derivatives' / 'freesurfer' / 'subjects' +subjects_dir = data_path / "derivatives" / "freesurfer" / "subjects" # %% # We are interested in the beta band. Define a range of frequencies, using a @@ -80,8 +81,15 @@ # Computing DICS spatial filters using the CSD that was computed on the entire # timecourse. fwd = mne.read_forward_solution(fname_fwd) -filters = make_dics(info, fwd, csd, noise_csd=csd_baseline, - pick_ori='max-power', reduce_rank=True, real_filter=True) +filters = make_dics( + info, + fwd, + csd, + noise_csd=csd_baseline, + pick_ori="max-power", + reduce_rank=True, + real_filter=True, +) del fwd # %% @@ -93,9 +101,14 @@ # %% # Visualizing source power during ERS activity relative to the baseline power. stc = beta_source_power / baseline_source_power -message = 'DICS source power in the 12-30 Hz frequency band' -brain = stc.plot(hemi='both', views='axial', subjects_dir=subjects_dir, - subject=subject, time_label=message) +message = "DICS source power in the 12-30 Hz frequency band" +brain = stc.plot( + hemi="both", + views="axial", + subjects_dir=subjects_dir, + subject=subject, + time_label=message, +) # %% # References diff --git a/examples/inverse/evoked_ers_source_power.py b/examples/inverse/evoked_ers_source_power.py index 5fa07d3ab52..7e12b3c8028 100644 --- a/examples/inverse/evoked_ers_source_power.py +++ b/examples/inverse/evoked_ers_source_power.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-source-loc-methods: @@ -15,27 +14,27 @@ # Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import numpy as np + import mne +from mne.beamformer import apply_dics_csd, apply_lcmv_cov, make_dics, make_lcmv from mne.cov import compute_covariance from mne.datasets import somato +from mne.minimum_norm import apply_inverse_cov, make_inverse_operator from mne.time_frequency import csd_morlet -from mne.beamformer import (make_dics, apply_dics_csd, make_lcmv, - apply_lcmv_cov) -from mne.minimum_norm import (make_inverse_operator, apply_inverse_cov) print(__doc__) # %% # Reading the raw data and creating epochs: data_path = somato.data_path() -subject = '01' -task = 'somato' -raw_fname = (data_path / 'sub-{}'.format(subject) / 'meg' / - 'sub-{}_task-{}_meg.fif'.format(subject, task)) +subject = "01" +task = "somato" +raw_fname = data_path / f"sub-{subject}" / "meg" / f"sub-{subject}_task-{task}_meg.fif" # crop to 5 minutes to save memory raw = mne.io.read_raw_fif(raw_fname).crop(0, 300) @@ -45,17 +44,19 @@ # The DICS beamformer currently only supports a single sensor type. # We'll use the gradiometers in this example. -picks = mne.pick_types(raw.info, meg='grad', exclude='bads') +picks = mne.pick_types(raw.info, meg="grad", exclude="bads") # Read epochs events = mne.find_events(raw) -epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks, - preload=True, decim=3) +epochs = mne.Epochs( + raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks, preload=True, decim=3 +) # Read forward operator and point to freesurfer subject directory -fname_fwd = (data_path / 'derivatives' / 'sub-{}'.format(subject) / - 'sub-{}_task-{}-fwd.fif'.format(subject, task)) -subjects_dir = data_path / 'derivatives' / 'freesurfer' / 'subjects' +fname_fwd = ( + data_path / "derivatives" / f"sub-{subject}" / f"sub-{subject}_task-{task}-fwd.fif" +) +subjects_dir = data_path / "derivatives" / "freesurfer" / "subjects" fwd = mne.read_forward_solution(fname_fwd) @@ -69,18 +70,29 @@ # combination with an advanced covariance estimator like "shrunk", the rank # will be correctly preserved. -rank = mne.compute_rank(epochs, tol=1e-6, tol_kind='relative') +rank = mne.compute_rank(epochs, tol=1e-6, tol_kind="relative") active_win = (0.5, 1.5) baseline_win = (-1, 0) -baseline_cov = compute_covariance(epochs, tmin=baseline_win[0], - tmax=baseline_win[1], method='shrunk', - rank=rank, verbose=True) -active_cov = compute_covariance(epochs, tmin=active_win[0], tmax=active_win[1], - method='shrunk', rank=rank, verbose=True) +baseline_cov = compute_covariance( + epochs, + tmin=baseline_win[0], + tmax=baseline_win[1], + method="shrunk", + rank=rank, + verbose=True, +) +active_cov = compute_covariance( + epochs, + tmin=active_win[0], + tmax=active_win[1], + method="shrunk", + rank=rank, + verbose=True, +) # Weighted averaging is already in the addition of covariance objects. common_cov = baseline_cov + active_cov -mne.viz.plot_cov(baseline_cov, epochs.info) +baseline_cov.plot(epochs.info) # %% # Compute some source estimates @@ -93,12 +105,21 @@ def _gen_dics(active_win, baseline_win, epochs): freqs = np.logspace(np.log10(12), np.log10(30), 9) csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20) - csd_baseline = csd_morlet(epochs, freqs, tmin=baseline_win[0], - tmax=baseline_win[1], decim=20) - csd_ers = csd_morlet(epochs, freqs, tmin=active_win[0], tmax=active_win[1], - decim=20) - filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power', - reduce_rank=True, real_filter=True, rank=rank) + csd_baseline = csd_morlet( + epochs, freqs, tmin=baseline_win[0], tmax=baseline_win[1], decim=20 + ) + csd_ers = csd_morlet( + epochs, freqs, tmin=active_win[0], tmax=active_win[1], decim=20 + ) + filters = make_dics( + epochs.info, + fwd, + csd.mean(), + pick_ori="max-power", + reduce_rank=True, + real_filter=True, + rank=rank, + ) stc_base, freqs = apply_dics_csd(csd_baseline.mean(), filters) stc_act, freqs = apply_dics_csd(csd_ers.mean(), filters) stc_act /= stc_base @@ -107,8 +128,9 @@ def _gen_dics(active_win, baseline_win, epochs): # generate lcmv source estimate def _gen_lcmv(active_cov, baseline_cov, common_cov): - filters = make_lcmv(epochs.info, fwd, common_cov, reg=0.05, - noise_cov=None, pick_ori='max-power') + filters = make_lcmv( + epochs.info, fwd, common_cov, reg=0.05, noise_cov=None, pick_ori="max-power" + ) stc_base = apply_lcmv_cov(baseline_cov, filters) stc_act = apply_lcmv_cov(active_cov, filters) stc_act /= stc_base @@ -116,12 +138,14 @@ def _gen_lcmv(active_cov, baseline_cov, common_cov): # generate mne/dSPM source estimate -def _gen_mne(active_cov, baseline_cov, common_cov, fwd, info, method='dSPM'): +def _gen_mne(active_cov, baseline_cov, common_cov, fwd, info, method="dSPM"): inverse_operator = make_inverse_operator(info, fwd, common_cov) - stc_act = apply_inverse_cov(active_cov, info, inverse_operator, - method=method, verbose=True) - stc_base = apply_inverse_cov(baseline_cov, info, inverse_operator, - method=method, verbose=True) + stc_act = apply_inverse_cov( + active_cov, info, inverse_operator, method=method, verbose=True + ) + stc_base = apply_inverse_cov( + baseline_cov, info, inverse_operator, method=method, verbose=True + ) stc_act /= stc_base return stc_act @@ -136,20 +160,34 @@ def _gen_mne(active_cov, baseline_cov, common_cov, fwd, info, method='dSPM'): # --------------------- # DICS: +# sphinx_gallery_thumbnail_number = 3 brain_dics = stc_dics.plot( - hemi='rh', subjects_dir=subjects_dir, subject=subject, - time_label='DICS source power in the 12-30 Hz frequency band') + hemi="rh", + subjects_dir=subjects_dir, + subject=subject, + time_label="DICS source power in the 12-30 Hz frequency band", +) # %% # LCMV: brain_lcmv = stc_lcmv.plot( - hemi='rh', subjects_dir=subjects_dir, subject=subject, - time_label='LCMV source power in the 12-30 Hz frequency band') + hemi="rh", + subjects_dir=subjects_dir, + subject=subject, + time_label="LCMV source power in the 12-30 Hz frequency band", +) # %% # dSPM: brain_dspm = stc_dspm.plot( - hemi='rh', subjects_dir=subjects_dir, subject=subject, - time_label='dSPM source power in the 12-30 Hz frequency band') + hemi="rh", + subjects_dir=subjects_dir, + subject=subject, + time_label="dSPM source power in the 12-30 Hz frequency band", +) + +# %% +# For more advanced usage, see +# :ref:`mne-gui-addons:sphx_glr_auto_examples_evoked_ers_source_power.py`. diff --git a/examples/inverse/gamma_map_inverse.py b/examples/inverse/gamma_map_inverse.py index dd5b8343fb2..1085a6bb749 100644 --- a/examples/inverse/gamma_map_inverse.py +++ b/examples/inverse/gamma_map_inverse.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-gamma-map: @@ -12,6 +11,7 @@ # Daniel Strohmeier # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -20,22 +20,22 @@ import mne from mne.datasets import sample from mne.inverse_sparse import gamma_map, make_stc_from_dipoles -from mne.viz import (plot_sparse_source_estimates, - plot_dipole_locations, plot_dipole_amplitudes) - -print(__doc__) +from mne.viz import ( + plot_dipole_amplitudes, + plot_dipole_locations, + plot_sparse_source_estimates, +) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -evoked_fname = meg_path / 'sample_audvis-ave.fif' -cov_fname = meg_path / 'sample_audvis-cov.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +evoked_fname = meg_path / "sample_audvis-ave.fif" +cov_fname = meg_path / "sample_audvis-cov.fif" # Read the evoked response and crop it -condition = 'Left visual' -evoked = mne.read_evokeds(evoked_fname, condition=condition, - baseline=(None, 0)) +condition = "Left visual" +evoked = mne.read_evokeds(evoked_fname, condition=condition, baseline=(None, 0)) evoked.crop(tmin=-50e-3, tmax=300e-3) # Read the forward solution @@ -48,8 +48,14 @@ # Run the Gamma-MAP method with dipole output alpha = 0.5 dipoles, residual = gamma_map( - evoked, forward, cov, alpha, xyz_same_gamma=True, return_residual=True, - return_as_dipoles=True) + evoked, + forward, + cov, + alpha, + xyz_same_gamma=True, + return_residual=True, + return_as_dipoles=True, +) # %% # Plot dipole activations @@ -57,9 +63,14 @@ # Plot dipole location of the strongest dipole with MRI slices idx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles]) -plot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample', - subjects_dir=subjects_dir, mode='orthoview', - idx='amplitude') +plot_dipole_locations( + dipoles[idx], + forward["mri_head_t"], + "sample", + subjects_dir=subjects_dir, + mode="orthoview", + idx="amplitude", +) # # Plot dipole locations of all dipoles with MRI slices # for dip in dipoles: @@ -70,17 +81,22 @@ # %% # Show the evoked response and the residual for gradiometers ylim = dict(grad=[-120, 120]) -evoked.pick_types(meg='grad', exclude='bads') -evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim, - proj=True, time_unit='s') - -residual.pick_types(meg='grad', exclude='bads') -residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim, - proj=True, time_unit='s') +evoked.pick(picks="grad", exclude="bads") +evoked.plot( + titles=dict(grad="Evoked Response Gradiometers"), + ylim=ylim, + proj=True, + time_unit="s", +) + +residual.pick(picks="grad", exclude="bads") +residual.plot( + titles=dict(grad="Residuals Gradiometers"), ylim=ylim, proj=True, time_unit="s" +) # %% # Generate stc from dipoles -stc = make_stc_from_dipoles(dipoles, forward['src']) +stc = make_stc_from_dipoles(dipoles, forward["src"]) # %% # View in 2D and 3D ("glass" brain like 3D plot) @@ -89,9 +105,14 @@ scale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors)) plot_sparse_source_estimates( - forward['src'], stc, bgcolor=(1, 1, 1), - modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None), - fig_name="Gamma-MAP") + forward["src"], + stc, + bgcolor=(1, 1, 1), + modes=["sphere"], + opacity=0.1, + scale_factors=(scale_factors, None), + fig_name="Gamma-MAP", +) # %% # References diff --git a/examples/inverse/label_activation_from_stc.py b/examples/inverse/label_activation_from_stc.py index f67821e81f6..daaf4c4ae12 100644 --- a/examples/inverse/label_activation_from_stc.py +++ b/examples/inverse/label_activation_from_stc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-label-time-course: @@ -15,25 +14,27 @@ # Author: Christian Brodbeck # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import matplotlib.pyplot as plt + import mne from mne.datasets import sample -import matplotlib.pyplot as plt print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" # load the stc -stc = mne.read_source_estimate(meg_path / 'sample_audvis-meg') +stc = mne.read_source_estimate(meg_path / "sample_audvis-meg") # load the labels -aud_lh = mne.read_label(meg_path / 'labels' / 'Aud-lh.label') -aud_rh = mne.read_label(meg_path / 'labels' / 'Aud-rh.label') +aud_lh = mne.read_label(meg_path / "labels" / "Aud-lh.label") +aud_rh = mne.read_label(meg_path / "labels" / "Aud-rh.label") # extract the time course for different labels from the stc stc_lh = stc.in_label(aud_lh) @@ -41,25 +42,27 @@ stc_bh = stc.in_label(aud_lh + aud_rh) # calculate center of mass and transform to mni coordinates -vtx, _, t_lh = stc_lh.center_of_mass('sample', subjects_dir=subjects_dir) -mni_lh = mne.vertex_to_mni(vtx, 0, 'sample', subjects_dir=subjects_dir)[0] -vtx, _, t_rh = stc_rh.center_of_mass('sample', subjects_dir=subjects_dir) -mni_rh = mne.vertex_to_mni(vtx, 1, 'sample', subjects_dir=subjects_dir)[0] +vtx, _, t_lh = stc_lh.center_of_mass("sample", subjects_dir=subjects_dir) +mni_lh = mne.vertex_to_mni(vtx, 0, "sample", subjects_dir=subjects_dir)[0] +vtx, _, t_rh = stc_rh.center_of_mass("sample", subjects_dir=subjects_dir) +mni_rh = mne.vertex_to_mni(vtx, 1, "sample", subjects_dir=subjects_dir)[0] # plot the activation plt.figure() -plt.axes([.1, .275, .85, .625]) -hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0] -hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0] -hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0] -plt.xlabel('Time (s)') -plt.ylabel('Source amplitude (dSPM)') +plt.axes([0.1, 0.275, 0.85, 0.625]) +hl = plt.plot(stc.times, stc_lh.data.mean(0), "b")[0] +hr = plt.plot(stc.times, stc_rh.data.mean(0), "g")[0] +hb = plt.plot(stc.times, stc_bh.data.mean(0), "r")[0] +plt.xlabel("Time (s)") +plt.ylabel("Source amplitude (dSPM)") plt.xlim(stc.times[0], stc.times[-1]) # add a legend including center-of-mass mni coordinates to the plot -labels = ['LH: center of mass = %s' % mni_lh.round(2), - 'RH: center of mass = %s' % mni_rh.round(2), - 'Combined LH & RH'] -plt.figlegend([hl, hr, hb], labels, loc='lower center') -plt.suptitle('Average activation in auditory cortex labels', fontsize=20) +labels = [ + f"LH: center of mass = {mni_lh.round(2)}", + f"RH: center of mass = {mni_rh.round(2)}", + "Combined LH & RH", +] +plt.figlegend([hl, hr, hb], labels, loc="lower center") +plt.suptitle("Average activation in auditory cortex labels", fontsize=20) plt.show() diff --git a/examples/inverse/label_from_stc.py b/examples/inverse/label_from_stc.py index 88b45faabd7..76545d4895f 100644 --- a/examples/inverse/label_from_stc.py +++ b/examples/inverse/label_from_stc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-functional-label: @@ -16,42 +15,41 @@ # Author: Luke Bloy # Alex Gramfort # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne -from mne.minimum_norm import read_inverse_operator, apply_inverse from mne.datasets import sample +from mne.minimum_norm import apply_inverse, read_inverse_operator print(__doc__) data_path = sample.data_path() -fname_inv = ( - data_path / 'MEG' / 'sample' / 'sample_audvis-meg-oct-6-meg-inv.fif') -fname_evoked = data_path / 'MEG' / 'sample' / 'sample_audvis-ave.fif' -subjects_dir = data_path / 'subjects' -subject = 'sample' +fname_inv = data_path / "MEG" / "sample" / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_evoked = data_path / "MEG" / "sample" / "sample_audvis-ave.fif" +subjects_dir = data_path / "subjects" +subject = "sample" snr = 3.0 -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Compute a label/ROI based on the peak power between 80 and 120 ms. # The label bankssts-lh is used for the comparison. -aparc_label_name = 'bankssts-lh' +aparc_label_name = "bankssts-lh" tmin, tmax = 0.080, 0.120 # Load data evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) inverse_operator = read_inverse_operator(fname_inv) -src = inverse_operator['src'] # get the source space +src = inverse_operator["src"] # get the source space # Compute inverse solution -stc = apply_inverse(evoked, inverse_operator, lambda2, method, - pick_ori='normal') +stc = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori="normal") # Make an STC in the time interval of interest and take the mean stc_mean = stc.copy().crop(tmin, tmax).mean() @@ -59,33 +57,38 @@ # use the stc_mean to generate a functional label # region growing is halted at 60% of the peak value within the # anatomical label / ROI specified by aparc_label_name -label = mne.read_labels_from_annot(subject, parc='aparc', - subjects_dir=subjects_dir, - regexp=aparc_label_name)[0] +label = mne.read_labels_from_annot( + subject, parc="aparc", subjects_dir=subjects_dir, regexp=aparc_label_name +)[0] stc_mean_label = stc_mean.in_label(label) data = np.abs(stc_mean_label.data) -stc_mean_label.data[data < 0.6 * np.max(data)] = 0. +stc_mean_label.data[data < 0.6 * np.max(data)] = 0.0 # 8.5% of original source space vertices were omitted during forward # calculation, suppress the warning here with verbose='error' -func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True, - subjects_dir=subjects_dir, connected=True, - verbose='error') +func_labels, _ = mne.stc_to_label( + stc_mean_label, + src=src, + smooth=True, + subjects_dir=subjects_dir, + connected=True, + verbose="error", +) # take first as func_labels are ordered based on maximum values in stc func_label = func_labels[0] # load the anatomical ROI for comparison -anat_label = mne.read_labels_from_annot(subject, parc='aparc', - subjects_dir=subjects_dir, - regexp=aparc_label_name)[0] +anat_label = mne.read_labels_from_annot( + subject, parc="aparc", subjects_dir=subjects_dir, regexp=aparc_label_name +)[0] # extract the anatomical time course for each label stc_anat_label = stc.in_label(anat_label) -pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0] +pca_anat = stc.extract_label_time_course(anat_label, src, mode="pca_flip")[0] stc_func_label = stc.in_label(func_label) -pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0] +pca_func = stc.extract_label_time_course(func_label, src, mode="pca_flip")[0] # flip the pca so that the max power between tmin and tmax is positive pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))]) @@ -94,18 +97,20 @@ # %% # plot the time courses.... plt.figure() -plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k', - label='Anatomical %s' % aparc_label_name) -plt.plot(1e3 * stc_func_label.times, pca_func, 'b', - label='Functional %s' % aparc_label_name) +plt.plot( + 1e3 * stc_anat_label.times, pca_anat, "k", label=f"Anatomical {aparc_label_name}" +) +plt.plot( + 1e3 * stc_func_label.times, pca_func, "b", label=f"Functional {aparc_label_name}" +) plt.legend() plt.show() # %% # plot brain in 3D with mne.viz.Brain if available -brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir) -brain.show_view('lateral') +brain = stc_mean.plot(hemi="lh", subjects_dir=subjects_dir) +brain.show_view("lateral") # show both labels -brain.add_label(anat_label, borders=True, color='k') -brain.add_label(func_label, borders=True, color='b') +brain.add_label(anat_label, borders=True, color="k") +brain.add_label(func_label, borders=True, color="b") diff --git a/examples/inverse/label_source_activations.py b/examples/inverse/label_source_activations.py index 9818cb07d3b..74d338486b0 100644 --- a/examples/inverse/label_source_activations.py +++ b/examples/inverse/label_source_activations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-label-time-series: @@ -16,68 +15,73 @@ # Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import matplotlib.pyplot as plt import matplotlib.patheffects as path_effects +import matplotlib.pyplot as plt import mne from mne.datasets import sample -from mne.minimum_norm import read_inverse_operator, apply_inverse +from mne.minimum_norm import apply_inverse, read_inverse_operator print(__doc__) data_path = sample.data_path() -label = 'Aud-lh' -meg_path = data_path / 'MEG' / 'sample' -label_fname = meg_path / 'labels' / f'{label}.label' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' -fname_evoked = meg_path / 'sample_audvis-ave.fif' +label = "Aud-lh" +meg_path = data_path / "MEG" / "sample" +label_fname = meg_path / "labels" / f"{label}.label" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_evoked = meg_path / "sample_audvis-ave.fif" snr = 3.0 -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Load data evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) inverse_operator = read_inverse_operator(fname_inv) -src = inverse_operator['src'] +src = inverse_operator["src"] # %% # Compute inverse solution # ------------------------ pick_ori = "normal" # Get signed values to see the effect of sign flip -stc = apply_inverse(evoked, inverse_operator, lambda2, method, - pick_ori=pick_ori) +stc = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori=pick_ori) label = mne.read_label(label_fname) stc_label = stc.in_label(label) -modes = ('mean', 'mean_flip', 'pca_flip') +modes = ("mean", "mean_flip", "pca_flip") tcs = dict() for mode in modes: tcs[mode] = stc.extract_label_time_course(label, src, mode=mode) -print("Number of vertices : %d" % len(stc_label.data)) +print(f"Number of vertices : {len(stc_label.data)}") # %% # View source activations # ----------------------- -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") t = 1e3 * stc_label.times -ax.plot(t, stc_label.data.T, 'k', linewidth=0.5, alpha=0.5) -pe = [path_effects.Stroke(linewidth=5, foreground='w', alpha=0.5), - path_effects.Normal()] +ax.plot(t, stc_label.data.T, "k", linewidth=0.5, alpha=0.5) +pe = [ + path_effects.Stroke(linewidth=5, foreground="w", alpha=0.5), + path_effects.Normal(), +] for mode, tc in tcs.items(): ax.plot(t, tc[0], linewidth=3, label=str(mode), path_effects=pe) xlim = t[[0, -1]] ylim = [-27, 22] -ax.legend(loc='upper right') -ax.set(xlabel='Time (ms)', ylabel='Source amplitude', - title='Activations in Label %r' % (label.name), - xlim=xlim, ylim=ylim) -mne.viz.tight_layout() +ax.legend(loc="upper right") +ax.set( + xlabel="Time (ms)", + ylabel="Source amplitude", + title=f"Activations in Label {label.name!r}", + xlim=xlim, + ylim=ylim, +) # %% # Using vector solutions @@ -85,21 +89,31 @@ # It's also possible to compute label time courses for a # :class:`mne.VectorSourceEstimate`, but only with ``mode='mean'``. -pick_ori = 'vector' -stc_vec = apply_inverse(evoked, inverse_operator, lambda2, method, - pick_ori=pick_ori) +pick_ori = "vector" +stc_vec = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori=pick_ori) data = stc_vec.extract_label_time_course(label, src) -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") stc_vec_label = stc_vec.in_label(label) -colors = ['#EE6677', '#228833', '#4477AA'] -for ii, name in enumerate('XYZ'): +colors = ["#EE6677", "#228833", "#4477AA"] +for ii, name in enumerate("XYZ"): color = colors[ii] - ax.plot(t, stc_vec_label.data[:, ii].T, color=color, lw=0.5, alpha=0.5, - zorder=5 - ii) - ax.plot(t, data[0, ii], lw=3, color=color, label='+' + name, zorder=8 - ii, - path_effects=pe) -ax.legend(loc='upper right') -ax.set(xlabel='Time (ms)', ylabel='Source amplitude', - title='Mean vector activations in Label %r' % (label.name,), - xlim=xlim, ylim=ylim) -mne.viz.tight_layout() + ax.plot( + t, stc_vec_label.data[:, ii].T, color=color, lw=0.5, alpha=0.5, zorder=5 - ii + ) + ax.plot( + t, + data[0, ii], + lw=3, + color=color, + label="+" + name, + zorder=8 - ii, + path_effects=pe, + ) +ax.legend(loc="upper right") +ax.set( + xlabel="Time (ms)", + ylabel="Source amplitude", + title=f"Mean vector activations in Label {label.name!r}", + xlim=xlim, + ylim=ylim, +) diff --git a/examples/inverse/mixed_norm_inverse.py b/examples/inverse/mixed_norm_inverse.py index ed2a425025f..70764a53973 100644 --- a/examples/inverse/mixed_norm_inverse.py +++ b/examples/inverse/mixed_norm_inverse.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-mixed-norm-inverse: @@ -15,6 +14,7 @@ # Daniel Strohmeier # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -24,24 +24,27 @@ import mne from mne.datasets import sample -from mne.inverse_sparse import mixed_norm, make_stc_from_dipoles -from mne.minimum_norm import make_inverse_operator, apply_inverse -from mne.viz import (plot_sparse_source_estimates, - plot_dipole_locations, plot_dipole_amplitudes) +from mne.inverse_sparse import make_stc_from_dipoles, mixed_norm +from mne.minimum_norm import apply_inverse, make_inverse_operator +from mne.viz import ( + plot_dipole_amplitudes, + plot_dipole_locations, + plot_sparse_source_estimates, +) print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -ave_fname = meg_path / 'sample_audvis-ave.fif' -cov_fname = meg_path / 'sample_audvis-shrunk-cov.fif' -subjects_dir = data_path / 'subjects' +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +ave_fname = meg_path / "sample_audvis-ave.fif" +cov_fname = meg_path / "sample_audvis-shrunk-cov.fif" +subjects_dir = data_path / "subjects" # Read noise covariance matrix cov = mne.read_cov(cov_fname) # Handling average file -condition = 'Left Auditory' +condition = "Left Auditory" evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0)) evoked.crop(tmin=0, tmax=0.3) # Handling forward solution @@ -55,28 +58,39 @@ # if n_mxne_iter > 1 dSPM weighting can be avoided. # Compute dSPM solution to be used as weights in MxNE -inverse_operator = make_inverse_operator(evoked.info, forward, cov, - depth=depth, fixed=True, - use_cps=True) -stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9., - method='dSPM') +inverse_operator = make_inverse_operator( + evoked.info, forward, cov, depth=depth, fixed=True, use_cps=True +) +stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1.0 / 9.0, method="dSPM") # Compute (ir)MxNE inverse solution with dipole output dipoles, residual = mixed_norm( - evoked, forward, cov, alpha, loose=loose, depth=depth, maxit=3000, - tol=1e-4, active_set_size=10, debias=False, weights=stc_dspm, - weights_min=8., n_mxne_iter=n_mxne_iter, return_residual=True, - return_as_dipoles=True, verbose=True, random_state=0, + evoked, + forward, + cov, + alpha, + loose=loose, + depth=depth, + maxit=3000, + tol=1e-4, + active_set_size=10, + debias=False, + weights=stc_dspm, + weights_min=8.0, + n_mxne_iter=n_mxne_iter, + return_residual=True, + return_as_dipoles=True, + verbose=True, + random_state=0, # for this dataset we know we should use a high alpha, so avoid some # of the slower (lower) alpha values sure_alpha_grid=np.linspace(100, 40, 10), ) t = 0.083 -tidx = evoked.time_as_index(t) +tidx = evoked.time_as_index(t).item() for di, dip in enumerate(dipoles, 1): - print(f'Dipole #{di} GOF at {1000 * t:0.1f} ms: ' - f'{float(dip.gof[tidx]):0.1f}%') + print(f"Dipole #{di} GOF at {1000 * t:0.1f} ms: {float(dip.gof[tidx]):0.1f}%") # %% # Plot dipole activations @@ -84,48 +98,70 @@ # Plot dipole location of the strongest dipole with MRI slices idx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles]) -plot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample', - subjects_dir=subjects_dir, mode='orthoview', - idx='amplitude') +plot_dipole_locations( + dipoles[idx], + forward["mri_head_t"], + "sample", + subjects_dir=subjects_dir, + mode="orthoview", + idx="amplitude", +) # Plot dipole locations of all dipoles with MRI slices for dip in dipoles: - plot_dipole_locations(dip, forward['mri_head_t'], 'sample', - subjects_dir=subjects_dir, mode='orthoview', - idx='amplitude') + plot_dipole_locations( + dip, + forward["mri_head_t"], + "sample", + subjects_dir=subjects_dir, + mode="orthoview", + idx="amplitude", + ) # %% # Plot residual ylim = dict(eeg=[-10, 10], grad=[-400, 400], mag=[-600, 600]) -evoked.pick_types(meg=True, eeg=True, exclude='bads') -evoked.plot(ylim=ylim, proj=True, time_unit='s') -residual.pick_types(meg=True, eeg=True, exclude='bads') -residual.plot(ylim=ylim, proj=True, time_unit='s') +evoked.pick(picks=["meg", "eeg"], exclude="bads") +evoked.plot(ylim=ylim, proj=True, time_unit="s") +residual.pick(picks=["meg", "eeg"], exclude="bads") +residual.plot(ylim=ylim, proj=True, time_unit="s") # %% # Generate stc from dipoles -stc = make_stc_from_dipoles(dipoles, forward['src']) +stc = make_stc_from_dipoles(dipoles, forward["src"]) # %% # View in 2D and 3D ("glass" brain like 3D plot) solver = "MxNE" if n_mxne_iter == 1 else "irMxNE" -plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1), - fig_name="%s (cond %s)" % (solver, condition), - opacity=0.1) +plot_sparse_source_estimates( + forward["src"], + stc, + bgcolor=(1, 1, 1), + fig_name=f"{solver} (cond {condition})", + opacity=0.1, +) # %% # Morph onto fsaverage brain and view -morph = mne.compute_source_morph(stc, subject_from='sample', - subject_to='fsaverage', spacing=None, - sparse=True, subjects_dir=subjects_dir) +morph = mne.compute_source_morph( + stc, + subject_from="sample", + subject_to="fsaverage", + spacing=None, + sparse=True, + subjects_dir=subjects_dir, +) stc_fsaverage = morph.apply(stc) -src_fsaverage_fname = ( - subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-ico-5-src.fif') +src_fsaverage_fname = subjects_dir / "fsaverage" / "bem" / "fsaverage-ico-5-src.fif" src_fsaverage = mne.read_source_spaces(src_fsaverage_fname) -plot_sparse_source_estimates(src_fsaverage, stc_fsaverage, bgcolor=(1, 1, 1), - fig_name="Morphed %s (cond %s)" % (solver, - condition), opacity=0.1) +plot_sparse_source_estimates( + src_fsaverage, + stc_fsaverage, + bgcolor=(1, 1, 1), + fig_name=f"Morphed {solver} (cond {condition})", + opacity=0.1, +) # %% # References diff --git a/examples/inverse/mixed_source_space_inverse.py b/examples/inverse/mixed_source_space_inverse.py index 485fac0d26d..a339c3ac667 100644 --- a/examples/inverse/mixed_source_space_inverse.py +++ b/examples/inverse/mixed_source_space_inverse.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-mixed-source-space-inverse: @@ -12,34 +11,34 @@ # Author: Annalisa Pascarella # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import matplotlib.pyplot as plt - from nilearn import plotting import mne -from mne.minimum_norm import make_inverse_operator, apply_inverse +from mne.minimum_norm import apply_inverse, make_inverse_operator # Set dir data_path = mne.datasets.sample.data_path() -subject = 'sample' -data_dir = data_path / 'MEG' / subject -subjects_dir = data_path / 'subjects' -bem_dir = subjects_dir / subject / 'bem' +subject = "sample" +data_dir = data_path / "MEG" / subject +subjects_dir = data_path / "subjects" +bem_dir = subjects_dir / subject / "bem" # Set file names -fname_mixed_src = bem_dir / f'{subject}-oct-6-mixed-src.fif' -fname_aseg = subjects_dir / subject / 'mri' / 'aseg.mgz' +fname_mixed_src = bem_dir / f"{subject}-oct-6-mixed-src.fif" +fname_aseg = subjects_dir / subject / "mri" / "aseg.mgz" -fname_model = bem_dir / f'{subject}-5120-bem.fif' -fname_bem = bem_dir / f'{subject}-5120-bem-sol.fif' +fname_model = bem_dir / f"{subject}-5120-bem.fif" +fname_bem = bem_dir / f"{subject}-5120-bem-sol.fif" -fname_evoked = data_dir / f'{subject}_audvis-ave.fif' -fname_trans = data_dir / f'{subject}_audvis_raw-trans.fif' -fname_fwd = data_dir / f'{subject}_audvis-meg-oct-6-mixed-fwd.fif' -fname_cov = data_dir / f'{subject}_audvis-shrunk-cov.fif' +fname_evoked = data_dir / f"{subject}_audvis-ave.fif" +fname_trans = data_dir / f"{subject}_audvis_raw-trans.fif" +fname_fwd = data_dir / f"{subject}_audvis-meg-oct-6-mixed-fwd.fif" +fname_cov = data_dir / f"{subject}_audvis-shrunk-cov.fif" # %% # Set up our source space @@ -47,19 +46,22 @@ # List substructures we are interested in. We select only the # sub structures we want to include in the source space: -labels_vol = ['Left-Amygdala', - 'Left-Thalamus-Proper', - 'Left-Cerebellum-Cortex', - 'Brain-Stem', - 'Right-Amygdala', - 'Right-Thalamus-Proper', - 'Right-Cerebellum-Cortex'] +labels_vol = [ + "Left-Amygdala", + "Left-Thalamus-Proper", + "Left-Cerebellum-Cortex", + "Brain-Stem", + "Right-Amygdala", + "Right-Thalamus-Proper", + "Right-Cerebellum-Cortex", +] # %% # Get a surface-based source space, here with few source points for speed # in this demonstration, in general you should use oct6 spacing! -src = mne.setup_source_space(subject, spacing='oct5', - add_dist=False, subjects_dir=subjects_dir) +src = mne.setup_source_space( + subject, spacing="oct5", add_dist=False, subjects_dir=subjects_dir +) # %% # Now we create a mixed src space by adding the volume regions specified in the @@ -68,15 +70,22 @@ # we recommend something smaller like 5.0 in actual analyses): vol_src = mne.setup_volume_source_space( - subject, mri=fname_aseg, pos=10.0, bem=fname_model, - volume_label=labels_vol, subjects_dir=subjects_dir, + subject, + mri=fname_aseg, + pos=10.0, + bem=fname_model, + volume_label=labels_vol, + subjects_dir=subjects_dir, add_interpolator=False, # just for speed, usually this should be True - verbose=True) + verbose=True, +) # Generate the mixed source space src += vol_src -print(f"The source space contains {len(src)} spaces and " - f"{sum(s['nuse'] for s in src)} vertices") +print( + f"The source space contains {len(src)} spaces and " + f"{sum(s['nuse'] for s in src)} vertices" +) # %% # View the source space @@ -91,47 +100,55 @@ # # We can also export source positions to NIfTI file and visualize it again: -nii_fname = bem_dir / f'{subject}-mixed-src.nii' +nii_fname = bem_dir / f"{subject}-mixed-src.nii" src.export_volume(nii_fname, mri_resolution=True, overwrite=True) -plotting.plot_img(str(nii_fname), cmap='nipy_spectral') +plotting.plot_img(str(nii_fname), cmap="nipy_spectral") # %% # Compute the fwd matrix # ---------------------- fwd = mne.make_forward_solution( - fname_evoked, fname_trans, src, fname_bem, + fname_evoked, + fname_trans, + src, + fname_bem, mindist=5.0, # ignore sources<=5mm from innerskull - meg=True, eeg=False, n_jobs=None) + meg=True, + eeg=False, + n_jobs=None, +) del src # save memory -leadfield = fwd['sol']['data'] -print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape) -print(f"The fwd source space contains {len(fwd['src'])} spaces and " - f"{sum(s['nuse'] for s in fwd['src'])} vertices") +leadfield = fwd["sol"]["data"] +ns, nd = leadfield.shape +print(f"Leadfield size : {ns} sensors x {nd} dipoles") +print( + f"The fwd source space contains {len(fwd['src'])} spaces and " + f"{sum(s['nuse'] for s in fwd['src'])} vertices" +) # Load data -condition = 'Left Auditory' -evoked = mne.read_evokeds(fname_evoked, condition=condition, - baseline=(None, 0)) +condition = "Left Auditory" +evoked = mne.read_evokeds(fname_evoked, condition=condition, baseline=(None, 0)) noise_cov = mne.read_cov(fname_cov) # %% # Compute inverse solution # ------------------------ -snr = 3.0 # use smaller SNR for raw data -inv_method = 'dSPM' # sLORETA, MNE, dSPM -parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s' -loose = dict(surface=0.2, volume=1.) +snr = 3.0 # use smaller SNR for raw data +inv_method = "dSPM" # sLORETA, MNE, dSPM +parc = "aparc" # the parcellation to use, e.g., 'aparc' 'aparc.a2009s' +loose = dict(surface=0.2, volume=1.0) -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 inverse_operator = make_inverse_operator( - evoked.info, fwd, noise_cov, depth=None, loose=loose, verbose=True) + evoked.info, fwd, noise_cov, depth=None, loose=loose, verbose=True +) del fwd -stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method, - pick_ori=None) -src = inverse_operator['src'] +stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method, pick_ori=None) +src = inverse_operator["src"] # %% # Plot the mixed source estimate @@ -139,24 +156,30 @@ # sphinx_gallery_thumbnail_number = 3 initial_time = 0.1 -stc_vec = apply_inverse(evoked, inverse_operator, lambda2, inv_method, - pick_ori='vector') +stc_vec = apply_inverse( + evoked, inverse_operator, lambda2, inv_method, pick_ori="vector" +) brain = stc_vec.plot( - hemi='both', src=inverse_operator['src'], views='coronal', - initial_time=initial_time, subjects_dir=subjects_dir, - brain_kwargs=dict(silhouette=True), smoothing_steps=7) + hemi="both", + src=inverse_operator["src"], + views="coronal", + initial_time=initial_time, + subjects_dir=subjects_dir, + brain_kwargs=dict(silhouette=True), + smoothing_steps=7, +) # %% # Plot the surface # ---------------- -brain = stc.surface().plot(initial_time=initial_time, - subjects_dir=subjects_dir, smoothing_steps=7) +brain = stc.surface().plot( + initial_time=initial_time, subjects_dir=subjects_dir, smoothing_steps=7 +) # %% # Plot the volume # --------------- -fig = stc.volume().plot(initial_time=initial_time, src=src, - subjects_dir=subjects_dir) +fig = stc.volume().plot(initial_time=initial_time, src=src, subjects_dir=subjects_dir) # %% # Process labels @@ -165,16 +188,15 @@ # and each sub structure contained in the src space # Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi -labels_parc = mne.read_labels_from_annot( - subject, parc=parc, subjects_dir=subjects_dir) +labels_parc = mne.read_labels_from_annot(subject, parc=parc, subjects_dir=subjects_dir) label_ts = mne.extract_label_time_course( - [stc], labels_parc, src, mode='mean', allow_empty=True) + [stc], labels_parc, src, mode="mean", allow_empty=True +) # plot the times series of 2 labels -fig, axes = plt.subplots(1) -axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh') -axes.plot(1e3 * stc.times, label_ts[0][-1, :].T, 'r', label='Brain-stem') -axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)') +fig, axes = plt.subplots(1, layout="constrained") +axes.plot(1e3 * stc.times, label_ts[0][0, :], "k", label="bankssts-lh") +axes.plot(1e3 * stc.times, label_ts[0][-1, :].T, "r", label="Brain-stem") +axes.set(xlabel="Time (ms)", ylabel="MNE current (nAm)") axes.legend() -mne.viz.tight_layout() diff --git a/examples/inverse/mne_cov_power.py b/examples/inverse/mne_cov_power.py index b7c5137e3b7..a6cf0df181f 100644 --- a/examples/inverse/mne_cov_power.py +++ b/examples/inverse/mne_cov_power.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-cov-power: @@ -21,6 +20,7 @@ # Luke Bloy # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -28,12 +28,12 @@ import mne from mne.datasets import sample -from mne.minimum_norm import make_inverse_operator, apply_inverse_cov +from mne.minimum_norm import apply_inverse_cov, make_inverse_operator data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" raw = mne.io.read_raw_fif(raw_fname) # %% @@ -42,28 +42,39 @@ # First we compute an empty-room covariance, which captures noise from the # sensors and environment. -raw_empty_room_fname = data_path / 'MEG' / 'sample' / 'ernoise_raw.fif' +raw_empty_room_fname = data_path / "MEG" / "sample" / "ernoise_raw.fif" raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname) raw_empty_room.crop(0, 30) # cropped just for speed -raw_empty_room.info['bads'] = ['MEG 2443'] -raw_empty_room.add_proj(raw.info['projs']) -noise_cov = mne.compute_raw_covariance(raw_empty_room, method='shrunk') +raw_empty_room.info["bads"] = ["MEG 2443"] +raw_empty_room.add_proj(raw.info["projs"]) +noise_cov = mne.compute_raw_covariance(raw_empty_room, method="shrunk") del raw_empty_room # %% # Epoch the data # -------------- -raw.pick(['meg', 'stim', 'eog']).load_data().filter(4, 12) -raw.info['bads'] = ['MEG 2443'] -events = mne.find_events(raw, stim_channel='STI 014') +raw.pick(["meg", "stim", "eog"]).load_data().filter(4, 12) +raw.info["bads"] = ["MEG 2443"] +events = mne.find_events(raw, stim_channel="STI 014") event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4) tmin, tmax = -0.2, 0.5 baseline = (None, 0) # means from the first instant to t = 0 reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, - proj=True, picks=('meg', 'eog'), baseline=None, - reject=reject, preload=True, decim=5, verbose='error') +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + proj=True, + picks=("meg", "eog"), + baseline=None, + reject=reject, + preload=True, + decim=5, + verbose="error", +) del raw # %% @@ -79,9 +90,11 @@ # to noise sources). base_cov = mne.compute_covariance( - epochs, tmin=-0.2, tmax=0, method='shrunk', verbose=True) + epochs, tmin=-0.2, tmax=0, method="shrunk", verbose=True +) data_cov = mne.compute_covariance( - epochs, tmin=0., tmax=0.2, method='shrunk', verbose=True) + epochs, tmin=0.0, tmax=0.2, method="shrunk", verbose=True +) fig_noise_cov = mne.viz.plot_cov(noise_cov, epochs.info, show_svd=False) fig_base_cov = mne.viz.plot_cov(base_cov, epochs.info, show_svd=False) @@ -92,16 +105,18 @@ # baseline and data covariances, followed by the data covariance whitened # by the baseline covariance: -evoked = epochs.average().pick('meg') -evoked.drop_channels(evoked.info['bads']) -evoked.plot(time_unit='s') -evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag') +evoked = epochs.average().pick("meg") +evoked.drop_channels(evoked.info["bads"]) +evoked.plot(time_unit="s") +evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type="mag") -loop = {'Noise': (noise_cov, dict()), - 'Data': (data_cov, dict()), - 'Whitened data': (data_cov, dict(noise_cov=noise_cov))} +loop = { + "Noise": (noise_cov, dict()), + "Data": (data_cov, dict()), + "Whitened data": (data_cov, dict(noise_cov=noise_cov)), +} for title, (_cov, _kw) in loop.items(): - fig = _cov.plot_topomap(evoked.info, 'grad', **_kw) + fig = _cov.plot_topomap(evoked.info, "grad", **_kw) fig.suptitle(title) # %% @@ -110,20 +125,31 @@ # Finally, we can construct an inverse using the empty-room noise covariance: # Read the forward solution and compute the inverse operator -fname_fwd = meg_path / 'sample_audvis-meg-oct-6-fwd.fif' +fname_fwd = meg_path / "sample_audvis-meg-oct-6-fwd.fif" fwd = mne.read_forward_solution(fname_fwd) # make an MEG inverse operator info = evoked.info -inverse_operator = make_inverse_operator(info, fwd, noise_cov, - loose=0.2, depth=0.8) +inverse_operator = make_inverse_operator(info, fwd, noise_cov, loose=0.2, depth=0.8) # %% # Project our data and baseline covariance to source space: -stc_data = apply_inverse_cov(data_cov, evoked.info, inverse_operator, - nave=len(epochs), method='dSPM', verbose=True) -stc_base = apply_inverse_cov(base_cov, evoked.info, inverse_operator, - nave=len(epochs), method='dSPM', verbose=True) +stc_data = apply_inverse_cov( + data_cov, + evoked.info, + inverse_operator, + nave=len(epochs), + method="dSPM", + verbose=True, +) +stc_base = apply_inverse_cov( + base_cov, + evoked.info, + inverse_operator, + nave=len(epochs), + method="dSPM", + verbose=True, +) # %% # And visualize power is relative to the baseline: @@ -131,6 +157,9 @@ # sphinx_gallery_thumbnail_number = 9 stc_data /= stc_base -brain = stc_data.plot(subject='sample', subjects_dir=subjects_dir, - clim=dict(kind='percent', lims=(50, 90, 98)), - smoothing_steps=7) +brain = stc_data.plot( + subject="sample", + subjects_dir=subjects_dir, + clim=dict(kind="percent", lims=(50, 90, 98)), + smoothing_steps=7, +) diff --git a/examples/inverse/morph_surface_stc.py b/examples/inverse/morph_surface_stc.py index c918a6e6f3b..abf84345e14 100644 --- a/examples/inverse/morph_surface_stc.py +++ b/examples/inverse/morph_surface_stc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-morph-surface: @@ -27,6 +26,7 @@ # Author: Tommy Clausner # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne @@ -38,19 +38,18 @@ # Setup paths data_path = sample.data_path() -sample_dir = data_path / 'MEG' / 'sample' -subjects_dir = data_path / 'subjects' -fname_src = subjects_dir / 'sample' / 'bem' / 'sample-oct-6-src.fif' -fname_fwd = sample_dir / 'sample_audvis-meg-oct-6-fwd.fif' -fname_fsaverage_src = (subjects_dir / 'fsaverage' / 'bem' / - 'fsaverage-ico-5-src.fif') -fname_stc = sample_dir / 'sample_audvis-meg' +sample_dir = data_path / "MEG" / "sample" +subjects_dir = data_path / "subjects" +fname_src = subjects_dir / "sample" / "bem" / "sample-oct-6-src.fif" +fname_fwd = sample_dir / "sample_audvis-meg-oct-6-fwd.fif" +fname_fsaverage_src = subjects_dir / "fsaverage" / "bem" / "fsaverage-ico-5-src.fif" +fname_stc = sample_dir / "sample_audvis-meg" # %% # Load example data # Read stc from file -stc = mne.read_source_estimate(fname_stc, subject='sample') +stc = mne.read_source_estimate(fname_stc, subject="sample") # %% # Setting up SourceMorph for SourceEstimate @@ -67,7 +66,7 @@ src_orig = mne.read_source_spaces(fname_src) print(src_orig) # n_used=4098, 4098 fwd = mne.read_forward_solution(fname_fwd) -print(fwd['src']) # n_used=3732, 3766 +print(fwd["src"]) # n_used=3732, 3766 print([len(v) for v in stc.vertices]) # %% @@ -87,10 +86,14 @@ # Initialize SourceMorph for SourceEstimate src_to = mne.read_source_spaces(fname_fsaverage_src) -print(src_to[0]['vertno']) # special, np.arange(10242) -morph = mne.compute_source_morph(stc, subject_from='sample', - subject_to='fsaverage', src_to=src_to, - subjects_dir=subjects_dir) +print(src_to[0]["vertno"]) # special, np.arange(10242) +morph = mne.compute_source_morph( + stc, + subject_from="sample", + subject_to="fsaverage", + src_to=src_to, + subjects_dir=subjects_dir, +) # %% # Apply morph to (Vector) SourceEstimate @@ -107,25 +110,28 @@ # Define plotting parameters surfer_kwargs = dict( - hemi='lh', subjects_dir=subjects_dir, - clim=dict(kind='value', lims=[8, 12, 15]), views='lateral', - initial_time=0.09, time_unit='s', size=(800, 800), - smoothing_steps=5) + hemi="lh", + subjects_dir=subjects_dir, + clim=dict(kind="value", lims=[8, 12, 15]), + views="lateral", + initial_time=0.09, + time_unit="s", + size=(800, 800), + smoothing_steps=5, +) # As spherical surface -brain = stc_fsaverage.plot(surface='sphere', **surfer_kwargs) +brain = stc_fsaverage.plot(surface="sphere", **surfer_kwargs) # Add title -brain.add_text(0.1, 0.9, 'Morphed to fsaverage (spherical)', 'title', - font_size=16) +brain.add_text(0.1, 0.9, "Morphed to fsaverage (spherical)", "title", font_size=16) # %% # As inflated surface -brain_inf = stc_fsaverage.plot(surface='inflated', **surfer_kwargs) +brain_inf = stc_fsaverage.plot(surface="inflated", **surfer_kwargs) # Add title -brain_inf.add_text(0.1, 0.9, 'Morphed to fsaverage (inflated)', 'title', - font_size=16) +brain_inf.add_text(0.1, 0.9, "Morphed to fsaverage (inflated)", "title", font_size=16) # %% # Reading and writing SourceMorph from and to disk @@ -154,8 +160,7 @@ # easily chained into a handy one-liner. Taking this together the shortest # possible way to morph data directly would be: -stc_fsaverage = mne.compute_source_morph(stc, - subjects_dir=subjects_dir).apply(stc) +stc_fsaverage = mne.compute_source_morph(stc, subjects_dir=subjects_dir).apply(stc) # %% # For more examples, check out :ref:`examples using SourceMorph.apply diff --git a/examples/inverse/morph_volume_stc.py b/examples/inverse/morph_volume_stc.py index 5be18ba6218..83d27691a46 100644 --- a/examples/inverse/morph_volume_stc.py +++ b/examples/inverse/morph_volume_stc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-morph-volume: @@ -7,48 +6,47 @@ ================================ This example demonstrates how to morph an individual subject's -:class:`mne.VolSourceEstimate` to a common reference space. We achieve this -using :class:`mne.SourceMorph`. Data will be morphed based on -an affine transformation and a nonlinear registration method -known as Symmetric Diffeomorphic Registration (SDR) by +:class:`mne.VolSourceEstimate` to a common reference space. We achieve this using +:class:`mne.SourceMorph`. Data will be morphed based on an affine transformation and +a nonlinear registration method known as Symmetric Diffeomorphic Registration (SDR) by :footcite:`AvantsEtAl2008`. Transformation is estimated from the subject's anatomical T1 weighted MRI (brain) to `FreeSurfer's 'fsaverage' T1 weighted MRI (brain) `__. -Afterwards the transformation will be applied to the volumetric source -estimate. The result will be plotted, showing the fsaverage T1 weighted -anatomical MRI, overlaid with the morphed volumetric source estimate. +Afterwards the transformation will be applied to the volumetric source estimate. The +result will be plotted, showing the fsaverage T1 weighted anatomical MRI, overlaid with +the morphed volumetric source estimate. """ + # Author: Tommy Clausner # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import os - import nibabel as nib +from nilearn.plotting import plot_glass_brain + import mne -from mne.datasets import sample, fetch_fsaverage +from mne.datasets import fetch_fsaverage, sample from mne.minimum_norm import apply_inverse, read_inverse_operator -from nilearn.plotting import plot_glass_brain print(__doc__) # %% # Setup paths sample_dir_raw = sample.data_path() -sample_dir = os.path.join(sample_dir_raw, 'MEG', 'sample') -subjects_dir = os.path.join(sample_dir_raw, 'subjects') +sample_dir = sample_dir_raw / "MEG" / "sample" +subjects_dir = sample_dir_raw / "subjects" -fname_evoked = os.path.join(sample_dir, 'sample_audvis-ave.fif') -fname_inv = os.path.join(sample_dir, 'sample_audvis-meg-vol-7-meg-inv.fif') +fname_evoked = sample_dir / "sample_audvis-ave.fif" +fname_inv = sample_dir / "sample_audvis-meg-vol-7-meg-inv.fif" -fname_t1_fsaverage = os.path.join(subjects_dir, 'fsaverage', 'mri', - 'brain.mgz') +fname_t1_fsaverage = subjects_dir / "fsaverage" / "mri" / "brain.mgz" fetch_fsaverage(subjects_dir) # ensure fsaverage src exists -fname_src_fsaverage = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif' +fname_src_fsaverage = subjects_dir / "fsaverage" / "bem" / "fsaverage-vol-5-src.fif" # %% # Compute example data. For reference see :ref:`ex-inverse-volume`. @@ -58,7 +56,7 @@ inverse_operator = read_inverse_operator(fname_inv) # Apply inverse operator -stc = apply_inverse(evoked, inverse_operator, 1.0 / 3.0 ** 2, "dSPM") +stc = apply_inverse(evoked, inverse_operator, 1.0 / 3.0**2, "dSPM") # To save time stc.crop(0.09, 0.09) @@ -85,9 +83,14 @@ src_fs = mne.read_source_spaces(fname_src_fsaverage) morph = mne.compute_source_morph( - inverse_operator['src'], subject_from='sample', subjects_dir=subjects_dir, - niter_affine=[10, 10, 5], niter_sdr=[10, 10, 5], # just for speed - src_to=src_fs, verbose=True) + inverse_operator["src"], + subject_from="sample", + subjects_dir=subjects_dir, + niter_affine=[10, 10, 5], + niter_sdr=[10, 10, 5], # just for speed + src_to=src_fs, + verbose=True, +) # %% # Apply morph to VolSourceEstimate @@ -120,7 +123,7 @@ # :meth:`morph.apply(..., output='nifti1') `. # Create mri-resolution volume of results -img_fsaverage = morph.apply(stc, mri_resolution=2, output='nifti1') +img_fsaverage = morph.apply(stc, mri_resolution=2, output="nifti1") # %% # Plot results @@ -130,13 +133,12 @@ t1_fsaverage = nib.load(fname_t1_fsaverage) # Plot glass brain (change to plot_anat to display an overlaid anatomical T1) -display = plot_glass_brain(t1_fsaverage, - title='subject results to fsaverage', - draw_cross=False, - annotate=True) +display = plot_glass_brain( + t1_fsaverage, title="subject results to fsaverage", draw_cross=False, annotate=True +) # Add functional data as overlay -display.add_overlay(img_fsaverage, alpha=0.75) +display.add_overlay(img_fsaverage, transparency=0.75) # %% diff --git a/examples/inverse/multi_dipole_model.py b/examples/inverse/multi_dipole_model.py index fe8f6acecf3..b6985bcc182 100644 --- a/examples/inverse/multi_dipole_model.py +++ b/examples/inverse/multi_dipole_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-multi-dipole: @@ -27,42 +26,49 @@ # Author: Marijn van Vliet # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. ############################################################################### # Importing everything and setting up the data paths for the MNE-Sample # dataset. -import mne -from mne.datasets import sample -from mne.channels import read_vectorview_selection -from mne.minimum_norm import (make_inverse_operator, apply_inverse, - apply_inverse_epochs) import matplotlib.pyplot as plt import numpy as np +import mne +from mne.channels import read_vectorview_selection +from mne.datasets import sample +from mne.minimum_norm import apply_inverse, apply_inverse_epochs, make_inverse_operator + data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_raw.fif' -cov_fname = meg_path / 'sample_audvis-shrunk-cov.fif' -bem_dir = data_path / 'subjects' / 'sample' / 'bem' -bem_fname = bem_dir / 'sample-5120-5120-5120-bem-sol.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_raw.fif" +cov_fname = meg_path / "sample_audvis-shrunk-cov.fif" +bem_dir = data_path / "subjects" / "sample" / "bem" +bem_fname = bem_dir / "sample-5120-5120-5120-bem-sol.fif" ############################################################################### # Read the MEG data from the audvis experiment. Make epochs and evokeds for the # left and right auditory conditions. raw = mne.io.read_raw_fif(raw_fname) -raw = raw.pick_types(meg=True, eog=True, stim=True) +raw = raw.pick(picks=["meg", "eog", "stim"]) info = raw.info # Create epochs for auditory events events = mne.find_events(raw) event_id = dict(right=1, left=2) -epochs = mne.Epochs(raw, events, event_id, - tmin=-0.1, tmax=0.3, baseline=(None, 0), - reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6)) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin=-0.1, + tmax=0.3, + baseline=(None, 0), + reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6), +) # Create evokeds for left and right auditory stimulation -evoked_left = epochs['left'].average() -evoked_right = epochs['right'].average() +evoked_left = epochs["left"].average() +evoked_right = epochs["right"].average() ############################################################################### # Guided dipole modeling, meaning fitting dipoles to a manually selected subset @@ -71,28 +77,29 @@ # in MNE-Python. # Setup conductor model -cov = mne.read_cov(cov_fname) +cov = mne.read_cov(cov_fname) # bad channels were already excluded here bem = mne.read_bem_solution(bem_fname) # Fit two dipoles at t=80ms. The first dipole is fitted using only the sensors # on the left side of the helmet. The second dipole is fitted using only the # sensors on the right side of the helmet. -picks_left = read_vectorview_selection('Left', info=info) +picks_left = read_vectorview_selection("Left", info=info) evoked_fit_left = evoked_left.copy().crop(0.08, 0.08) -evoked_fit_left.pick_channels(picks_left) -cov_fit_left = cov.copy().pick_channels(picks_left) +evoked_fit_left.pick(picks_left) +cov_fit_left = cov.copy().pick_channels(picks_left, ordered=True) -picks_right = read_vectorview_selection('Right', info=info) +picks_right = read_vectorview_selection("Right", info=info) +picks_right = list(set(picks_right) - set(info["bads"])) evoked_fit_right = evoked_right.copy().crop(0.08, 0.08) -evoked_fit_right.pick_channels(picks_right) -cov_fit_right = cov.copy().pick_channels(picks_right) +evoked_fit_right.pick(picks_right) +cov_fit_right = cov.copy().pick_channels(picks_right, ordered=True) # Any SSS projections that are active on this data need to be re-normalized # after picking channels. evoked_fit_left.info.normalize_proj() evoked_fit_right.info.normalize_proj() -cov_fit_left['projs'] = evoked_fit_left.info['projs'] -cov_fit_right['projs'] = evoked_fit_right.info['projs'] +cov_fit_left["projs"] = evoked_fit_left.info["projs"] +cov_fit_right["projs"] = evoked_fit_right.info["projs"] # Fit the dipoles with the subset of sensors. dip_left, _ = mne.fit_dipole(evoked_fit_left, cov_fit_left, bem) @@ -108,27 +115,25 @@ # Apply MNE inverse inv = make_inverse_operator(info, fwd, cov, fixed=True, depth=0) -stc_left = apply_inverse(evoked_left, inv, method='MNE', lambda2=1E-6) -stc_right = apply_inverse(evoked_right, inv, method='MNE', lambda2=1E-6) +stc_left = apply_inverse(evoked_left, inv, method="MNE", lambda2=1e-6) +stc_right = apply_inverse(evoked_right, inv, method="MNE", lambda2=1e-6) # Plot the timecourses of the resulting source estimate fig, axes = plt.subplots(nrows=2, sharex=True, sharey=True) axes[0].plot(stc_left.times, stc_left.data.T) -axes[0].set_title('Left auditory stimulation') -axes[0].legend(['Dipole 1', 'Dipole 2']) +axes[0].set_title("Left auditory stimulation") +axes[0].legend(["Dipole 1", "Dipole 2"]) axes[1].plot(stc_right.times, stc_right.data.T) -axes[1].set_title('Right auditory stimulation') -axes[1].set_xlabel('Time (s)') -fig.supylabel('Dipole amplitude') +axes[1].set_title("Right auditory stimulation") +axes[1].set_xlabel("Time (s)") +fig.supylabel("Dipole amplitude") ############################################################################### # We can also fit the timecourses to single epochs. Here, we do it for each # experimental condition separately. -stcs_left = apply_inverse_epochs(epochs['left'], inv, lambda2=1E-6, - method='MNE') -stcs_right = apply_inverse_epochs(epochs['right'], inv, lambda2=1E-6, - method='MNE') +stcs_left = apply_inverse_epochs(epochs["left"], inv, lambda2=1e-6, method="MNE") +stcs_right = apply_inverse_epochs(epochs["right"], inv, lambda2=1e-6, method="MNE") ############################################################################### # To summarize and visualize the single-epoch dipole amplitudes, we will create @@ -152,17 +157,17 @@ mean_right = np.mean(amplitudes_right, axis=0) fig, ax = plt.subplots(figsize=(8, 4)) -ax.scatter(np.arange(n), amplitudes[:, 0], label='Dipole 1') -ax.scatter(np.arange(n), amplitudes[:, 1], label='Dipole 2') +ax.scatter(np.arange(n), amplitudes[:, 0], label="Dipole 1") +ax.scatter(np.arange(n), amplitudes[:, 1], label="Dipole 2") transition_point = n_left - 0.5 -ax.plot([0, transition_point], [mean_left[0], mean_left[0]], color='C0') -ax.plot([0, transition_point], [mean_left[1], mean_left[1]], color='C1') -ax.plot([transition_point, n], [mean_right[0], mean_right[0]], color='C0') -ax.plot([transition_point, n], [mean_right[1], mean_right[1]], color='C1') -ax.axvline(transition_point, color='black') -ax.set_xlabel('Epochs') -ax.set_ylabel('Dipole amplitude') +ax.plot([0, transition_point], [mean_left[0], mean_left[0]], color="C0") +ax.plot([0, transition_point], [mean_left[1], mean_left[1]], color="C1") +ax.plot([transition_point, n], [mean_right[0], mean_right[0]], color="C0") +ax.plot([transition_point, n], [mean_right[1], mean_right[1]], color="C1") +ax.axvline(transition_point, color="black") +ax.set_xlabel("Epochs") +ax.set_ylabel("Dipole amplitude") ax.legend() -fig.suptitle('Single epoch dipole amplitudes') -fig.text(0.30, 0.9, 'Left auditory stimulation', ha='center') -fig.text(0.70, 0.9, 'Right auditory stimulation', ha='center') +fig.suptitle("Single epoch dipole amplitudes") +fig.text(0.30, 0.9, "Left auditory stimulation", ha="center") +fig.text(0.70, 0.9, "Right auditory stimulation", ha="center") diff --git a/examples/inverse/multidict_reweighted_tfmxne.py b/examples/inverse/multidict_reweighted_tfmxne.py index 1cb111834b6..aed27ea880b 100644 --- a/examples/inverse/multidict_reweighted_tfmxne.py +++ b/examples/inverse/multidict_reweighted_tfmxne.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-iterative-reweighted-tf-mxne: @@ -24,12 +23,13 @@ # Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne from mne.datasets import somato -from mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles +from mne.inverse_sparse import make_stc_from_dipoles, tf_mixed_norm from mne.viz import plot_sparse_source_estimates print(__doc__) @@ -39,28 +39,29 @@ # Load somatosensory MEG data data_path = somato.data_path() -subject = '01' -task = 'somato' -raw_fname = (data_path / f'sub-{subject}' / 'meg' / - f'sub-{subject}_task-{task}_meg.fif') -fwd_fname = (data_path / 'derivatives' / f'sub-{subject}' / - f'sub-{subject}_task-{task}-fwd.fif') +subject = "01" +task = "somato" +raw_fname = data_path / f"sub-{subject}" / "meg" / f"sub-{subject}_task-{task}_meg.fif" +fwd_fname = ( + data_path / "derivatives" / f"sub-{subject}" / f"sub-{subject}_task-{task}-fwd.fif" +) # Read evoked raw = mne.io.read_raw_fif(raw_fname) -raw.pick_types(meg=True, eog=True, stim=True) -events = mne.find_events(raw, stim_channel='STI 014') +raw.pick(picks=["meg", "eog", "stim"]) +events = mne.find_events(raw, stim_channel="STI 014") reject = dict(grad=4000e-13, eog=350e-6) event_id, tmin, tmax = dict(unknown=1), -0.5, 0.5 -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=reject, - baseline=(None, 0)) +epochs = mne.Epochs( + raw, events, event_id, tmin, tmax, reject=reject, baseline=(None, 0) +) evoked = epochs.average() evoked.crop(tmin=0.0, tmax=0.2) # Compute noise covariance matrix -cov = mne.compute_covariance(epochs, rank='info', tmax=0.) +cov = mne.compute_covariance(epochs, rank="info", tmax=0.0) del epochs, raw # Handling forward solution @@ -70,7 +71,7 @@ # Run iterative reweighted multidict TF-MxNE solver alpha, l1_ratio = 20, 0.05 -loose, depth = 0.9, 1. +loose, depth = 0.9, 1.0 # Use a multiscale time-frequency dictionary wsize, tstep = [4, 16], [2, 4] @@ -78,27 +79,42 @@ n_tfmxne_iter = 10 # Compute TF-MxNE inverse solution with dipole output dipoles, residual = tf_mixed_norm( - evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio, - n_tfmxne_iter=n_tfmxne_iter, loose=loose, - depth=depth, tol=1e-3, - wsize=wsize, tstep=tstep, return_as_dipoles=True, - return_residual=True) + evoked, + forward, + cov, + alpha=alpha, + l1_ratio=l1_ratio, + n_tfmxne_iter=n_tfmxne_iter, + loose=loose, + depth=depth, + tol=1e-3, + wsize=wsize, + tstep=tstep, + return_as_dipoles=True, + return_residual=True, +) # %% # Generate stc from dipoles -stc = make_stc_from_dipoles(dipoles, forward['src']) +stc = make_stc_from_dipoles(dipoles, forward["src"]) plot_sparse_source_estimates( - forward['src'], stc, bgcolor=(1, 1, 1), opacity=0.1, - fig_name=f"irTF-MxNE (cond {evoked.comment})") + forward["src"], + stc, + bgcolor=(1, 1, 1), + opacity=0.1, + fig_name=f"irTF-MxNE (cond {evoked.comment})", +) # %% # Show the evoked response and the residual for gradiometers ylim = dict(grad=[-300, 300]) -evoked.copy().pick_types(meg='grad').plot( - titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim) -residual.copy().pick_types(meg='grad').plot( - titles=dict(grad='Residuals: Gradiometers'), ylim=ylim) +evoked.copy().pick(picks="grad").plot( + titles=dict(grad="Evoked Response: Gradiometers"), ylim=ylim +) +residual.copy().pick(picks="grad").plot( + titles=dict(grad="Residuals: Gradiometers"), ylim=ylim +) # %% # References diff --git a/examples/inverse/psf_ctf_label_leakage.py b/examples/inverse/psf_ctf_label_leakage.py index ce174faee29..4991ae6eaf3 100644 --- a/examples/inverse/psf_ctf_label_leakage.py +++ b/examples/inverse/psf_ctf_label_leakage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-source-leakage: @@ -18,21 +17,22 @@ # Nicolas P. Rougier (graph code borrowed from his matplotlib gallery) # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np +from mne_connectivity.viz import plot_connectivity_circle import mne from mne.datasets import sample -from mne.minimum_norm import (read_inverse_operator, - make_inverse_resolution_matrix, - get_point_spread) - +from mne.minimum_norm import ( + get_point_spread, + make_inverse_resolution_matrix, + read_inverse_operator, +) from mne.viz import circular_layout -from mne_connectivity.viz import plot_connectivity_circle - print(__doc__) @@ -44,20 +44,20 @@ # resolution matrices for different methods. data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fname_fwd = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-fixed-inv.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fname_fwd = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-fixed-inv.fif" forward = mne.read_forward_solution(fname_fwd) # Convert forward solution to fixed source orientations -mne.convert_forward_solution( - forward, surf_ori=True, force_fixed=True, copy=False) +mne.convert_forward_solution(forward, surf_ori=True, force_fixed=True, copy=False) inverse_operator = read_inverse_operator(fname_inv) # Compute resolution matrices for MNE -rm_mne = make_inverse_resolution_matrix(forward, inverse_operator, - method='MNE', lambda2=1. / 3.**2) -src = inverse_operator['src'] +rm_mne = make_inverse_resolution_matrix( + forward, inverse_operator, method="MNE", lambda2=1.0 / 3.0**2 +) +src = inverse_operator["src"] del forward, inverse_operator # save memory # %% @@ -65,13 +65,12 @@ # -------------------------------------------------- # # Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi -labels = mne.read_labels_from_annot('sample', parc='aparc', - subjects_dir=subjects_dir) +labels = mne.read_labels_from_annot("sample", parc="aparc", subjects_dir=subjects_dir) n_labels = len(labels) label_colors = [label.color for label in labels] # First, we reorder the labels based on their location in the left hemi label_names = [label.name for label in labels] -lh_labels = [name for name in label_names if name.endswith('lh')] +lh_labels = [name for name in label_names if name.endswith("lh")] # Get the y-location of the label label_ypos = list() @@ -84,7 +83,7 @@ lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))] # For the right hemi -rh_labels = [label[:-2] + 'rh' for label in lh_labels] +rh_labels = [label[:-2] + "rh" for label in lh_labels] # %% # Compute point-spread function summaries (PCA) for all labels @@ -98,8 +97,8 @@ # spatial extents of labels. n_comp = 5 stcs_psf_mne, pca_vars_mne = get_point_spread( - rm_mne, src, labels, mode='pca', n_comp=n_comp, norm=None, - return_pca_vars=True) + rm_mne, src, labels, mode="pca", n_comp=n_comp, norm=None, return_pca_vars=True +) n_verts = rm_mne.shape[0] del rm_mne @@ -110,7 +109,7 @@ with np.printoptions(precision=1): for [name, var] in zip(label_names, pca_vars_mne): - print(f'{name}: {var.sum():.1f}% {var}') + print(f"{name}: {var.sum():.1f}% {var}") # %% # The output shows the summed variance explained by the first five principal @@ -133,15 +132,23 @@ # Save the plot order and create a circular layout node_order = lh_labels[::-1] + rh_labels # mirror label order across hemis -node_angles = circular_layout(label_names, node_order, start_pos=90, - group_boundaries=[0, len(label_names) / 2]) +node_angles = circular_layout( + label_names, node_order, start_pos=90, group_boundaries=[0, len(label_names) / 2] +) # Plot the graph using node colors from the FreeSurfer parcellation. We only # show the 200 strongest connections. fig, ax = plt.subplots( - figsize=(8, 8), facecolor='black', subplot_kw=dict(projection='polar')) -plot_connectivity_circle(leakage_mne, label_names, n_lines=200, - node_angles=node_angles, node_colors=label_colors, - title='MNE Leakage', ax=ax) + figsize=(8, 8), facecolor="black", subplot_kw=dict(projection="polar") +) +plot_connectivity_circle( + leakage_mne, + label_names, + n_lines=200, + node_angles=node_angles, + node_colors=label_colors, + title="MNE Leakage", + ax=ax, +) # %% # Most leakage occurs for neighbouring regions, but also for deeper regions @@ -176,20 +183,26 @@ # %% # Point-spread function for the lateral occipital label in the left hemisphere -brain_lh = stc_lh.plot(subjects_dir=subjects_dir, subject='sample', - hemi='both', views='caudal', - clim=dict(kind='value', - pos_lims=(0, max_val / 2., max_val))) -brain_lh.add_text(0.1, 0.9, label_names[idx[0]], 'title', font_size=16) +brain_lh = stc_lh.plot( + subjects_dir=subjects_dir, + subject="sample", + hemi="both", + views="caudal", + clim=dict(kind="value", pos_lims=(0, max_val / 2.0, max_val)), +) +brain_lh.add_text(0.1, 0.9, label_names[idx[0]], "title", font_size=16) # %% # and in the right hemisphere. -brain_rh = stc_rh.plot(subjects_dir=subjects_dir, subject='sample', - hemi='both', views='caudal', - clim=dict(kind='value', - pos_lims=(0, max_val / 2., max_val))) -brain_rh.add_text(0.1, 0.9, label_names[idx[1]], 'title', font_size=16) +brain_rh = stc_rh.plot( + subjects_dir=subjects_dir, + subject="sample", + hemi="both", + views="caudal", + clim=dict(kind="value", pos_lims=(0, max_val / 2.0, max_val)), +) +brain_rh.add_text(0.1, 0.9, label_names[idx[1]], "title", font_size=16) # %% # Both summary PSFs are confined to their respective hemispheres, indicating diff --git a/examples/inverse/psf_ctf_vertices.py b/examples/inverse/psf_ctf_vertices.py index f6616106e6b..e6ab88a5fef 100644 --- a/examples/inverse/psf_ctf_vertices.py +++ b/examples/inverse/psf_ctf_vertices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-psd-ctf: @@ -12,28 +11,31 @@ # Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne from mne.datasets import sample -from mne.minimum_norm import (make_inverse_resolution_matrix, get_cross_talk, - get_point_spread) +from mne.minimum_norm import ( + get_cross_talk, + get_point_spread, + make_inverse_resolution_matrix, +) print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fname_fwd = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -fname_cov = meg_path / 'sample_audvis-cov.fif' -fname_evo = meg_path / 'sample_audvis-ave.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fname_fwd = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +fname_cov = meg_path / "sample_audvis-cov.fif" +fname_evo = meg_path / "sample_audvis-ave.fif" # read forward solution forward = mne.read_forward_solution(fname_fwd) # forward operator with fixed source orientations -mne.convert_forward_solution(forward, surf_ori=True, - force_fixed=True, copy=False) +mne.convert_forward_solution(forward, surf_ori=True, force_fixed=True, copy=False) # noise covariance matrix noise_cov = mne.read_cov(fname_cov) @@ -44,23 +46,24 @@ # make inverse operator from forward solution # free source orientation inverse_operator = mne.minimum_norm.make_inverse_operator( - info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0., - depth=None) + info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.0, depth=None +) # regularisation parameter snr = 3.0 -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 # compute resolution matrix for sLORETA -rm_lor = make_inverse_resolution_matrix(forward, inverse_operator, - method='sLORETA', lambda2=lambda2) +rm_lor = make_inverse_resolution_matrix( + forward, inverse_operator, method="sLORETA", lambda2=lambda2 +) # get PSF and CTF for sLORETA at one vertex sources = [1000] -stc_psf = get_point_spread(rm_lor, forward['src'], sources, norm=True) +stc_psf = get_point_spread(rm_lor, forward["src"], sources, norm=True) -stc_ctf = get_cross_talk(rm_lor, forward['src'], sources, norm=True) +stc_ctf = get_cross_talk(rm_lor, forward["src"], sources, norm=True) del rm_lor ############################################################################## @@ -69,37 +72,41 @@ # PSF: # Which vertex corresponds to selected source -vertno_lh = forward['src'][0]['vertno'] +vertno_lh = forward["src"][0]["vertno"] verttrue = [vertno_lh[sources[0]]] # just one vertex # find vertices with maxima in PSF and CTF vert_max_psf = vertno_lh[stc_psf.data.argmax()] vert_max_ctf = vertno_lh[stc_ctf.data.argmax()] -brain_psf = stc_psf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir) -brain_psf.show_view('ventral') -brain_psf.add_text(0.1, 0.9, 'sLORETA PSF', 'title', font_size=16) +brain_psf = stc_psf.plot("sample", "inflated", "lh", subjects_dir=subjects_dir) +brain_psf.show_view("ventral") +brain_psf.add_text(0.1, 0.9, "sLORETA PSF", "title", font_size=16) # True source location for PSF -brain_psf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh', - color='green') +brain_psf.add_foci( + verttrue, coords_as_verts=True, scale_factor=1.0, hemi="lh", color="green" +) # Maximum of PSF -brain_psf.add_foci(vert_max_psf, coords_as_verts=True, scale_factor=1., - hemi='lh', color='black') +brain_psf.add_foci( + vert_max_psf, coords_as_verts=True, scale_factor=1.0, hemi="lh", color="black" +) # %% # CTF: -brain_ctf = stc_ctf.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir) -brain_ctf.add_text(0.1, 0.9, 'sLORETA CTF', 'title', font_size=16) -brain_ctf.show_view('ventral') -brain_ctf.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh', - color='green') +brain_ctf = stc_ctf.plot("sample", "inflated", "lh", subjects_dir=subjects_dir) +brain_ctf.add_text(0.1, 0.9, "sLORETA CTF", "title", font_size=16) +brain_ctf.show_view("ventral") +brain_ctf.add_foci( + verttrue, coords_as_verts=True, scale_factor=1.0, hemi="lh", color="green" +) # Maximum of CTF -brain_ctf.add_foci(vert_max_ctf, coords_as_verts=True, scale_factor=1., - hemi='lh', color='black') +brain_ctf.add_foci( + vert_max_ctf, coords_as_verts=True, scale_factor=1.0, hemi="lh", color="black" +) # %% diff --git a/examples/inverse/psf_ctf_vertices_lcmv.py b/examples/inverse/psf_ctf_vertices_lcmv.py index fc8f740029e..bf7009374e0 100644 --- a/examples/inverse/psf_ctf_vertices_lcmv.py +++ b/examples/inverse/psf_ctf_vertices_lcmv.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-psf-ctf-lcmv: @@ -13,86 +12,103 @@ # Author: Olaf Hauk # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne -from mne.datasets import sample from mne.beamformer import make_lcmv, make_lcmv_resolution_matrix +from mne.datasets import sample from mne.minimum_norm import get_cross_talk print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fname_fwd = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -fname_cov = meg_path / 'sample_audvis-cov.fif' -fname_evo = meg_path / 'sample_audvis-ave.fif' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fname_fwd = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +fname_cov = meg_path / "sample_audvis-cov.fif" +fname_evo = meg_path / "sample_audvis-ave.fif" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" # Read raw data raw = mne.io.read_raw_fif(raw_fname) # only pick good EEG/MEG sensors -raw.info['bads'] += ['EEG 053'] # bads + 1 more -picks = mne.pick_types(raw.info, meg=True, eeg=True, exclude='bads') +raw.info["bads"] += ["EEG 053"] # bads + 1 more +picks = mne.pick_types(raw.info, meg=True, eeg=True, exclude="bads") # Find events events = mne.find_events(raw) # event_id = {'aud/l': 1, 'aud/r': 2, 'vis/l': 3, 'vis/r': 4} -event_id = {'vis/l': 3, 'vis/r': 4} - -tmin, tmax = -.2, .25 # epoch duration -epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax, - picks=picks, baseline=(-.2, 0.), preload=True) +event_id = {"vis/l": 3, "vis/r": 4} + +tmin, tmax = -0.2, 0.25 # epoch duration +epochs = mne.Epochs( + raw, + events, + event_id=event_id, + tmin=tmin, + tmax=tmax, + picks=picks, + baseline=(-0.2, 0.0), + preload=True, +) del raw # covariance matrix for pre-stimulus interval -tmin, tmax = -.2, 0. -cov_pre = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, - method='empirical') +tmin, tmax = -0.2, 0.0 +cov_pre = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method="empirical") # covariance matrix for post-stimulus interval (around main evoked responses) -tmin, tmax = 0.05, .25 -cov_post = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, - method='empirical') +tmin, tmax = 0.05, 0.25 +cov_post = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method="empirical") info = epochs.info del epochs # read forward solution forward = mne.read_forward_solution(fname_fwd) # use forward operator with fixed source orientations -mne.convert_forward_solution(forward, surf_ori=True, - force_fixed=True, copy=False) +mne.convert_forward_solution(forward, surf_ori=True, force_fixed=True, copy=False) # read noise covariance matrix noise_cov = mne.read_cov(fname_cov) # regularize noise covariance (we used 'empirical' above) -noise_cov = mne.cov.regularize(noise_cov, info, mag=0.1, grad=0.1, - eeg=0.1, rank='info') +noise_cov = mne.cov.regularize(noise_cov, info, mag=0.1, grad=0.1, eeg=0.1, rank="info") ############################################################################## # Compute LCMV filters with different data covariance matrices # ------------------------------------------------------------ # compute LCMV beamformer filters for pre-stimulus interval -filters_pre = make_lcmv(info, forward, cov_pre, reg=0.05, - noise_cov=noise_cov, - pick_ori=None, rank=None, - weight_norm=None, - reduce_rank=False, - verbose=False) +filters_pre = make_lcmv( + info, + forward, + cov_pre, + reg=0.05, + noise_cov=noise_cov, + pick_ori=None, + rank=None, + weight_norm=None, + reduce_rank=False, + verbose=False, +) # compute LCMV beamformer filters for post-stimulus interval -filters_post = make_lcmv(info, forward, cov_post, reg=0.05, - noise_cov=noise_cov, - pick_ori=None, rank=None, - weight_norm=None, - reduce_rank=False, - verbose=False) +filters_post = make_lcmv( + info, + forward, + cov_post, + reg=0.05, + noise_cov=noise_cov, + pick_ori=None, + rank=None, + weight_norm=None, + reduce_rank=False, + verbose=False, +) ############################################################################## # Compute resolution matrices for the two LCMV beamformers @@ -100,14 +116,14 @@ # compute cross-talk functions (CTFs) for one target vertex sources = [3000] -verttrue = [forward['src'][0]['vertno'][sources[0]]] # pick one vertex +verttrue = [forward["src"][0]["vertno"][sources[0]]] # pick one vertex rm_pre = make_lcmv_resolution_matrix(filters_pre, forward, info) -stc_pre = get_cross_talk(rm_pre, forward['src'], sources, norm=True) +stc_pre = get_cross_talk(rm_pre, forward["src"], sources, norm=True) del rm_pre ############################################################################## rm_post = make_lcmv_resolution_matrix(filters_post, forward, info) -stc_post = get_cross_talk(rm_post, forward['src'], sources, norm=True) +stc_post = get_cross_talk(rm_post, forward["src"], sources, norm=True) del rm_post ############################################################################## @@ -115,28 +131,51 @@ # --------- # Pre: -brain_pre = stc_pre.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir, - figure=1, clim=dict(kind='value', lims=(0, .2, .4))) - -brain_pre.add_text(0.1, 0.9, 'LCMV beamformer with pre-stimulus\ndata ' - 'covariance matrix', 'title', font_size=16) +brain_pre = stc_pre.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=1, + clim=dict(kind="value", lims=(0, 0.2, 0.4)), +) + +brain_pre.add_text( + 0.1, + 0.9, + "LCMV beamformer with pre-stimulus\ndata covariance matrix", + "title", + font_size=16, +) # mark true source location for CTFs -brain_pre.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh', - color='green') +brain_pre.add_foci( + verttrue, coords_as_verts=True, scale_factor=1.0, hemi="lh", color="green" +) # %% # Post: -brain_post = stc_post.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, - figure=2, clim=dict(kind='value', lims=(0, .2, .4))) - -brain_post.add_text(0.1, 0.9, 'LCMV beamformer with post-stimulus\ndata ' - 'covariance matrix', 'title', font_size=16) - -brain_post.add_foci(verttrue, coords_as_verts=True, scale_factor=1., - hemi='lh', color='green') +brain_post = stc_post.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=2, + clim=dict(kind="value", lims=(0, 0.2, 0.4)), +) + +brain_post.add_text( + 0.1, + 0.9, + "LCMV beamformer with post-stimulus\ndata covariance matrix", + "title", + font_size=16, +) + +brain_post.add_foci( + verttrue, coords_as_verts=True, scale_factor=1.0, hemi="lh", color="green" +) # %% # The pre-stimulus beamformer's CTF has lower values in parietal regions diff --git a/examples/inverse/psf_volume.py b/examples/inverse/psf_volume.py index 042efb7be4a..91af78b9dfc 100644 --- a/examples/inverse/psf_volume.py +++ b/examples/inverse/psf_volume.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-psd-vol: @@ -13,6 +12,7 @@ # Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -20,18 +20,17 @@ import mne from mne.datasets import sample -from mne.minimum_norm import make_inverse_resolution_matrix, get_point_spread +from mne.minimum_norm import get_point_spread, make_inverse_resolution_matrix print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fname_cov = meg_path / 'sample_audvis-cov.fif' -fname_evo = meg_path / 'sample_audvis-ave.fif' -fname_trans = meg_path / 'sample_audvis_raw-trans.fif' -fname_bem = ( - subjects_dir / 'sample' / 'bem' / 'sample-5120-bem-sol.fif') +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fname_cov = meg_path / "sample_audvis-cov.fif" +fname_evo = meg_path / "sample_audvis-ave.fif" +fname_trans = meg_path / "sample_audvis_raw-trans.fif" +fname_bem = subjects_dir / "sample" / "bem" / "sample-5120-bem-sol.fif" # %% # For the volume, create a coarse source space for speed (don't do this in @@ -43,27 +42,29 @@ # create a coarse source space src_vol = mne.setup_volume_source_space( # this is a very course resolution! - 'sample', pos=15., subjects_dir=subjects_dir, - add_interpolator=False) # usually you want True, this is just for speed + "sample", pos=15.0, subjects_dir=subjects_dir, add_interpolator=False +) # usually you want True, this is just for speed # compute the forward forward_vol = mne.make_forward_solution( # MEG-only for speed - evoked.info, fname_trans, src_vol, fname_bem, eeg=False) + evoked.info, fname_trans, src_vol, fname_bem, eeg=False +) del src_vol # %% # Now make an inverse operator and compute the PSF at a source. inverse_operator_vol = mne.minimum_norm.make_inverse_operator( - info=evoked.info, forward=forward_vol, noise_cov=noise_cov) + info=evoked.info, forward=forward_vol, noise_cov=noise_cov +) # compute resolution matrix for sLORETA rm_lor_vol = make_inverse_resolution_matrix( - forward_vol, inverse_operator_vol, method='sLORETA', lambda2=1. / 9.) + forward_vol, inverse_operator_vol, method="sLORETA", lambda2=1.0 / 9.0 +) # get PSF and CTF for sLORETA at one vertex sources_vol = [100] -stc_psf_vol = get_point_spread( - rm_lor_vol, forward_vol['src'], sources_vol, norm=True) +stc_psf_vol = get_point_spread(rm_lor_vol, forward_vol["src"], sources_vol, norm=True) del rm_lor_vol ############################################################################## @@ -72,23 +73,30 @@ # PSF: # Which vertex corresponds to selected source -src_vol = forward_vol['src'] -verttrue_vol = src_vol[0]['vertno'][sources_vol] +src_vol = forward_vol["src"] +verttrue_vol = src_vol[0]["vertno"][sources_vol] # find vertex with maximum in PSF -max_vert_idx, _ = np.unravel_index( - stc_psf_vol.data.argmax(), stc_psf_vol.data.shape) -vert_max_ctf_vol = src_vol[0]['vertno'][[max_vert_idx]] +max_vert_idx, _ = np.unravel_index(stc_psf_vol.data.argmax(), stc_psf_vol.data.shape) +vert_max_ctf_vol = src_vol[0]["vertno"][[max_vert_idx]] # plot them brain_psf_vol = stc_psf_vol.plot_3d( - 'sample', src=forward_vol['src'], views='ven', subjects_dir=subjects_dir, - volume_options=dict(alpha=0.5)) -brain_psf_vol.add_text( - 0.1, 0.9, 'Volumetric sLORETA PSF', 'title', font_size=16) + "sample", + src=forward_vol["src"], + views="ven", + subjects_dir=subjects_dir, + volume_options=dict(alpha=0.5), +) +brain_psf_vol.add_text(0.1, 0.9, "Volumetric sLORETA PSF", "title", font_size=16) brain_psf_vol.add_foci( - verttrue_vol, coords_as_verts=True, - scale_factor=1, hemi='vol', color='green') + verttrue_vol, coords_as_verts=True, scale_factor=1, hemi="vol", color="green" +) brain_psf_vol.add_foci( - vert_max_ctf_vol, coords_as_verts=True, - scale_factor=1.25, hemi='vol', color='black', alpha=0.3) + vert_max_ctf_vol, + coords_as_verts=True, + scale_factor=1.25, + hemi="vol", + color="black", + alpha=0.3, +) diff --git a/examples/inverse/rap_music.py b/examples/inverse/rap_music.py index fc5751afe09..cc386605dc1 100644 --- a/examples/inverse/rap_music.py +++ b/examples/inverse/rap_music.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-rap-music: @@ -13,32 +12,31 @@ # Author: Yousra Bekhti # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne - -from mne.datasets import sample from mne.beamformer import rap_music -from mne.viz import plot_dipole_locations, plot_dipole_amplitudes +from mne.datasets import sample +from mne.viz import plot_dipole_amplitudes, plot_dipole_locations print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -evoked_fname = meg_path / 'sample_audvis-ave.fif' -cov_fname = meg_path / 'sample_audvis-cov.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +evoked_fname = meg_path / "sample_audvis-ave.fif" +cov_fname = meg_path / "sample_audvis-cov.fif" # Read the evoked response and crop it -condition = 'Right Auditory' -evoked = mne.read_evokeds(evoked_fname, condition=condition, - baseline=(None, 0)) +condition = "Right Auditory" +evoked = mne.read_evokeds(evoked_fname, condition=condition, baseline=(None, 0)) # select N100 evoked.crop(tmin=0.05, tmax=0.15) -evoked.pick_types(meg=True, eeg=False) +evoked.pick(picks="meg", exclude="bads") # Read the forward solution forward = mne.read_forward_solution(fwd_fname) @@ -46,17 +44,16 @@ # Read noise covariance matrix noise_cov = mne.read_cov(cov_fname) -dipoles, residual = rap_music(evoked, forward, noise_cov, n_dipoles=2, - return_residual=True, verbose=True) -trans = forward['mri_head_t'] -plot_dipole_locations(dipoles, trans, 'sample', subjects_dir=subjects_dir) +dipoles, residual = rap_music( + evoked, forward, noise_cov, n_dipoles=2, return_residual=True, verbose=True +) +trans = forward["mri_head_t"] +plot_dipole_locations(dipoles, trans, "sample", subjects_dir=subjects_dir) plot_dipole_amplitudes(dipoles) # Plot the evoked data and the residual. -evoked.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]), - time_unit='s') -residual.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]), - time_unit='s') +evoked.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]), time_unit="s") +residual.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]), time_unit="s") # %% # References diff --git a/examples/inverse/read_inverse.py b/examples/inverse/read_inverse.py index 730fec92668..148d09d84af 100644 --- a/examples/inverse/read_inverse.py +++ b/examples/inverse/read_inverse.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-read-inverse: @@ -11,6 +10,7 @@ # Author: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -22,30 +22,35 @@ print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fname_trans = meg_path / 'sample_audvis_raw-trans.fif' -inv_fname = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fname_trans = meg_path / "sample_audvis_raw-trans.fif" +inv_fname = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" inv = read_inverse_operator(inv_fname) -print("Method: %s" % inv['methods']) -print("fMRI prior: %s" % inv['fmri_prior']) -print("Number of sources: %s" % inv['nsource']) -print("Number of channels: %s" % inv['nchan']) +print(f"Method: {inv['methods']}") +print(f"fMRI prior: {inv['fmri_prior']}") +print(f"Number of sources: {inv['nsource']}") +print(f"Number of channels: {inv['nchan']}") -src = inv['src'] # get the source space +src = inv["src"] # get the source space # Get access to the triangulation of the cortex -print("Number of vertices on the left hemisphere: %d" % len(src[0]['rr'])) -print("Number of triangles on left hemisphere: %d" % len(src[0]['use_tris'])) -print("Number of vertices on the right hemisphere: %d" % len(src[1]['rr'])) -print("Number of triangles on right hemisphere: %d" % len(src[1]['use_tris'])) +print(f"Number of vertices on the left hemisphere: {len(src[0]['rr'])}") +print(f"Number of triangles on left hemisphere: {len(src[0]['use_tris'])}") +print(f"Number of vertices on the right hemisphere: {len(src[1]['rr'])}") +print(f"Number of triangles on right hemisphere: {len(src[1]['use_tris'])}") # %% # Show the 3D source space -fig = mne.viz.plot_alignment(subject='sample', subjects_dir=subjects_dir, - trans=fname_trans, surfaces='white', src=src) -set_3d_view(fig, focalpoint=(0., 0., 0.06)) +fig = mne.viz.plot_alignment( + subject="sample", + subjects_dir=subjects_dir, + trans=fname_trans, + surfaces="white", + src=src, +) +set_3d_view(fig, focalpoint=(0.0, 0.0, 0.06)) diff --git a/examples/inverse/read_stc.py b/examples/inverse/read_stc.py index b421671ce73..b06f61d14f8 100644 --- a/examples/inverse/read_stc.py +++ b/examples/inverse/read_stc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-read-stc: @@ -12,6 +11,7 @@ # Author: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -23,17 +23,16 @@ print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname = meg_path / 'sample_audvis-meg' +meg_path = data_path / "MEG" / "sample" +fname = meg_path / "sample_audvis-meg" stc = mne.read_source_estimate(fname) n_vertices, n_samples = stc.data.shape -print("stc data size: %s (nb of vertices) x %s (nb of samples)" - % (n_vertices, n_samples)) +print(f"stc data size: {n_vertices} (nb of vertices) x {n_samples} (nb of samples)") # View source activations plt.plot(stc.times, stc.data[::100, :].T) -plt.xlabel('time (ms)') -plt.ylabel('Source amplitude') +plt.xlabel("time (ms)") +plt.ylabel("Source amplitude") plt.show() diff --git a/examples/inverse/resolution_metrics.py b/examples/inverse/resolution_metrics.py index aa795ccc972..5ab39d3c645 100644 --- a/examples/inverse/resolution_metrics.py +++ b/examples/inverse/resolution_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-res-metrics: @@ -15,28 +14,27 @@ # Author: Olaf Hauk # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne from mne.datasets import sample -from mne.minimum_norm import make_inverse_resolution_matrix -from mne.minimum_norm import resolution_metrics +from mne.minimum_norm import make_inverse_resolution_matrix, resolution_metrics print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fname_fwd = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -fname_cov = meg_path / 'sample_audvis-cov.fif' -fname_evo = meg_path / 'sample_audvis-ave.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fname_fwd = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +fname_cov = meg_path / "sample_audvis-cov.fif" +fname_evo = meg_path / "sample_audvis-ave.fif" # read forward solution forward = mne.read_forward_solution(fname_fwd) # forward operator with fixed source orientations -mne.convert_forward_solution(forward, surf_ori=True, - force_fixed=True, copy=False) +mne.convert_forward_solution(forward, surf_ori=True, force_fixed=True, copy=False) # noise covariance matrix noise_cov = mne.read_cov(fname_cov) @@ -47,12 +45,12 @@ # make inverse operator from forward solution # free source orientation inverse_operator = mne.minimum_norm.make_inverse_operator( - info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0., - depth=None) + info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.0, depth=None +) # regularisation parameter snr = 3.0 -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 # %% # MNE @@ -60,12 +58,15 @@ # Compute resolution matrices, peak localisation error (PLE) for point spread # functions (PSFs), spatial deviation (SD) for PSFs: -rm_mne = make_inverse_resolution_matrix(forward, inverse_operator, - method='MNE', lambda2=lambda2) -ple_mne_psf = resolution_metrics(rm_mne, inverse_operator['src'], - function='psf', metric='peak_err') -sd_mne_psf = resolution_metrics(rm_mne, inverse_operator['src'], - function='psf', metric='sd_ext') +rm_mne = make_inverse_resolution_matrix( + forward, inverse_operator, method="MNE", lambda2=lambda2 +) +ple_mne_psf = resolution_metrics( + rm_mne, inverse_operator["src"], function="psf", metric="peak_err" +) +sd_mne_psf = resolution_metrics( + rm_mne, inverse_operator["src"], function="psf", metric="sd_ext" +) del rm_mne # %% @@ -73,39 +74,57 @@ # ---- # Do the same for dSPM: -rm_dspm = make_inverse_resolution_matrix(forward, inverse_operator, - method='dSPM', lambda2=lambda2) -ple_dspm_psf = resolution_metrics(rm_dspm, inverse_operator['src'], - function='psf', metric='peak_err') -sd_dspm_psf = resolution_metrics(rm_dspm, inverse_operator['src'], - function='psf', metric='sd_ext') +rm_dspm = make_inverse_resolution_matrix( + forward, inverse_operator, method="dSPM", lambda2=lambda2 +) +ple_dspm_psf = resolution_metrics( + rm_dspm, inverse_operator["src"], function="psf", metric="peak_err" +) +sd_dspm_psf = resolution_metrics( + rm_dspm, inverse_operator["src"], function="psf", metric="sd_ext" +) del rm_dspm, forward # %% # Visualize results # ----------------- # Visualise peak localisation error (PLE) across the whole cortex for MNE PSF: -brain_ple_mne = ple_mne_psf.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=1, - clim=dict(kind='value', lims=(0, 2, 4))) -brain_ple_mne.add_text(0.1, 0.9, 'PLE MNE', 'title', font_size=16) +brain_ple_mne = ple_mne_psf.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=1, + clim=dict(kind="value", lims=(0, 2, 4)), +) +brain_ple_mne.add_text(0.1, 0.9, "PLE MNE", "title", font_size=16) # %% # And dSPM: -brain_ple_dspm = ple_dspm_psf.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=2, - clim=dict(kind='value', lims=(0, 2, 4))) -brain_ple_dspm.add_text(0.1, 0.9, 'PLE dSPM', 'title', font_size=16) +brain_ple_dspm = ple_dspm_psf.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=2, + clim=dict(kind="value", lims=(0, 2, 4)), +) +brain_ple_dspm.add_text(0.1, 0.9, "PLE dSPM", "title", font_size=16) # %% # Subtract the two distributions and plot this difference diff_ple = ple_mne_psf - ple_dspm_psf -brain_ple_diff = diff_ple.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=3, - clim=dict(kind='value', pos_lims=(0., 1., 2.))) -brain_ple_diff.add_text(0.1, 0.9, 'PLE MNE-dSPM', 'title', font_size=16) +brain_ple_diff = diff_ple.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=3, + clim=dict(kind="value", pos_lims=(0.0, 1.0, 2.0)), +) +brain_ple_diff.add_text(0.1, 0.9, "PLE MNE-dSPM", "title", font_size=16) # %% # These plots show that dSPM has generally lower peak localization error (red @@ -115,28 +134,43 @@ # Next we'll visualise spatial deviation (SD) across the whole cortex for MNE # PSF: -brain_sd_mne = sd_mne_psf.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=4, - clim=dict(kind='value', lims=(0, 2, 4))) -brain_sd_mne.add_text(0.1, 0.9, 'SD MNE', 'title', font_size=16) +brain_sd_mne = sd_mne_psf.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=4, + clim=dict(kind="value", lims=(0, 2, 4)), +) +brain_sd_mne.add_text(0.1, 0.9, "SD MNE", "title", font_size=16) # %% # And dSPM: -brain_sd_dspm = sd_dspm_psf.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=5, - clim=dict(kind='value', lims=(0, 2, 4))) -brain_sd_dspm.add_text(0.1, 0.9, 'SD dSPM', 'title', font_size=16) +brain_sd_dspm = sd_dspm_psf.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=5, + clim=dict(kind="value", lims=(0, 2, 4)), +) +brain_sd_dspm.add_text(0.1, 0.9, "SD dSPM", "title", font_size=16) # %% # Subtract the two distributions and plot this difference: diff_sd = sd_mne_psf - sd_dspm_psf -brain_sd_diff = diff_sd.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=6, - clim=dict(kind='value', pos_lims=(0., 1., 2.))) -brain_sd_diff.add_text(0.1, 0.9, 'SD MNE-dSPM', 'title', font_size=16) +brain_sd_diff = diff_sd.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=6, + clim=dict(kind="value", pos_lims=(0.0, 1.0, 2.0)), +) +brain_sd_diff.add_text(0.1, 0.9, "SD MNE-dSPM", "title", font_size=16) # %% # These plots show that dSPM has generally higher spatial deviation than MNE diff --git a/examples/inverse/resolution_metrics_eegmeg.py b/examples/inverse/resolution_metrics_eegmeg.py index 3c3b67ca926..51acd51fd94 100644 --- a/examples/inverse/resolution_metrics_eegmeg.py +++ b/examples/inverse/resolution_metrics_eegmeg.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-res-metrics-meeg: @@ -17,6 +16,7 @@ # Author: Olaf Hauk # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -28,17 +28,18 @@ print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects/' -meg_path = data_path / 'MEG' / 'sample' -fname_fwd_emeg = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -fname_cov = meg_path / 'sample_audvis-cov.fif' -fname_evo = meg_path / 'sample_audvis-ave.fif' +subjects_dir = data_path / "subjects/" +meg_path = data_path / "MEG" / "sample" +fname_fwd_emeg = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +fname_cov = meg_path / "sample_audvis-cov.fif" +fname_evo = meg_path / "sample_audvis-ave.fif" # read forward solution with EEG and MEG forward_emeg = mne.read_forward_solution(fname_fwd_emeg) # forward operator with fixed source orientations -forward_emeg = mne.convert_forward_solution(forward_emeg, surf_ori=True, - force_fixed=True) +forward_emeg = mne.convert_forward_solution( + forward_emeg, surf_ori=True, force_fixed=True +) # create a forward solution with MEG only forward_meg = mne.pick_types_forward(forward_emeg, meg=True, eeg=False) @@ -51,16 +52,16 @@ # make inverse operator from forward solution for MEG and EEGMEG inv_emeg = mne.minimum_norm.make_inverse_operator( - info=evoked.info, forward=forward_emeg, noise_cov=noise_cov, loose=0., - depth=None) + info=evoked.info, forward=forward_emeg, noise_cov=noise_cov, loose=0.0, depth=None +) inv_meg = mne.minimum_norm.make_inverse_operator( - info=evoked.info, forward=forward_meg, noise_cov=noise_cov, loose=0., - depth=None) + info=evoked.info, forward=forward_meg, noise_cov=noise_cov, loose=0.0, depth=None +) # regularisation parameter snr = 3.0 -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 # %% # EEGMEG @@ -68,12 +69,15 @@ # Compute resolution matrices, localization error, and spatial deviations # for MNE: -rm_emeg = make_inverse_resolution_matrix(forward_emeg, inv_emeg, - method='MNE', lambda2=lambda2) -ple_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'], - function='psf', metric='peak_err') -sd_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'], - function='psf', metric='sd_ext') +rm_emeg = make_inverse_resolution_matrix( + forward_emeg, inv_emeg, method="MNE", lambda2=lambda2 +) +ple_psf_emeg = resolution_metrics( + rm_emeg, inv_emeg["src"], function="psf", metric="peak_err" +) +sd_psf_emeg = resolution_metrics( + rm_emeg, inv_emeg["src"], function="psf", metric="sd_ext" +) del rm_emeg # %% @@ -81,12 +85,13 @@ # --- # Do the same for MEG: -rm_meg = make_inverse_resolution_matrix(forward_meg, inv_meg, - method='MNE', lambda2=lambda2) -ple_psf_meg = resolution_metrics(rm_meg, inv_meg['src'], - function='psf', metric='peak_err') -sd_psf_meg = resolution_metrics(rm_meg, inv_meg['src'], - function='psf', metric='sd_ext') +rm_meg = make_inverse_resolution_matrix( + forward_meg, inv_meg, method="MNE", lambda2=lambda2 +) +ple_psf_meg = resolution_metrics( + rm_meg, inv_meg["src"], function="psf", metric="peak_err" +) +sd_psf_meg = resolution_metrics(rm_meg, inv_meg["src"], function="psf", metric="sd_ext") del rm_meg # %% @@ -94,64 +99,94 @@ # ------------- # Look at peak localisation error (PLE) across the whole cortex for PSF: -brain_ple_emeg = ple_psf_emeg.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=1, - clim=dict(kind='value', lims=(0, 2, 4))) +brain_ple_emeg = ple_psf_emeg.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=1, + clim=dict(kind="value", lims=(0, 2, 4)), +) -brain_ple_emeg.add_text(0.1, 0.9, 'PLE PSF EMEG', 'title', font_size=16) +brain_ple_emeg.add_text(0.1, 0.9, "PLE PSF EMEG", "title", font_size=16) # %% # For MEG only: -brain_ple_meg = ple_psf_meg.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=2, - clim=dict(kind='value', lims=(0, 2, 4))) +brain_ple_meg = ple_psf_meg.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=2, + clim=dict(kind="value", lims=(0, 2, 4)), +) -brain_ple_meg.add_text(0.1, 0.9, 'PLE PSF MEG', 'title', font_size=16) +brain_ple_meg.add_text(0.1, 0.9, "PLE PSF MEG", "title", font_size=16) # %% # Subtract the two distributions and plot this difference: diff_ple = ple_psf_emeg - ple_psf_meg -brain_ple_diff = diff_ple.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=3, - clim=dict(kind='value', pos_lims=(0., .5, 1.)), - smoothing_steps=20) +brain_ple_diff = diff_ple.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=3, + clim=dict(kind="value", pos_lims=(0.0, 0.5, 1.0)), + smoothing_steps=20, +) -brain_ple_diff.add_text(0.1, 0.9, 'PLE EMEG-MEG', 'title', font_size=16) +brain_ple_diff.add_text(0.1, 0.9, "PLE EMEG-MEG", "title", font_size=16) # %% # These plots show that with respect to peak localization error, adding EEG to # MEG does not bring much benefit. Next let's visualise spatial deviation (SD) # across the whole cortex for PSF: -brain_sd_emeg = sd_psf_emeg.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=4, - clim=dict(kind='value', lims=(0, 2, 4))) +brain_sd_emeg = sd_psf_emeg.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=4, + clim=dict(kind="value", lims=(0, 2, 4)), +) -brain_sd_emeg.add_text(0.1, 0.9, 'SD PSF EMEG', 'title', font_size=16) +brain_sd_emeg.add_text(0.1, 0.9, "SD PSF EMEG", "title", font_size=16) # %% # For MEG only: -brain_sd_meg = sd_psf_meg.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=5, - clim=dict(kind='value', lims=(0, 2, 4))) +brain_sd_meg = sd_psf_meg.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=5, + clim=dict(kind="value", lims=(0, 2, 4)), +) -brain_sd_meg.add_text(0.1, 0.9, 'SD PSF MEG', 'title', font_size=16) +brain_sd_meg.add_text(0.1, 0.9, "SD PSF MEG", "title", font_size=16) # %% # Subtract the two distributions and plot this difference: diff_sd = sd_psf_emeg - sd_psf_meg -brain_sd_diff = diff_sd.plot('sample', 'inflated', 'lh', - subjects_dir=subjects_dir, figure=6, - clim=dict(kind='value', pos_lims=(0., .5, 1.)), - smoothing_steps=20) - -brain_sd_diff.add_text(0.1, 0.9, 'SD EMEG-MEG', 'title', font_size=16) +brain_sd_diff = diff_sd.plot( + "sample", + "inflated", + "lh", + subjects_dir=subjects_dir, + figure=6, + clim=dict(kind="value", pos_lims=(0.0, 0.5, 1.0)), + smoothing_steps=20, +) + +brain_sd_diff.add_text(0.1, 0.9, "SD EMEG-MEG", "title", font_size=16) # %% # Adding EEG to MEG decreases the spatial extent of point-spread diff --git a/examples/inverse/snr_estimate.py b/examples/inverse/snr_estimate.py index ccf6385e14e..fda8ada5d0c 100644 --- a/examples/inverse/snr_estimate.py +++ b/examples/inverse/snr_estimate.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-snr-estimate: @@ -12,19 +11,20 @@ # Author: Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +from mne import read_evokeds from mne.datasets.sample import data_path from mne.minimum_norm import read_inverse_operator -from mne import read_evokeds from mne.viz import plot_snr_estimate print(__doc__) -data_dir = data_path() / 'MEG' / 'sample' -fname_inv = data_dir / 'sample_audvis-meg-oct-6-meg-inv.fif' -fname_evoked = data_dir / 'sample_audvis-ave.fif' +data_dir = data_path() / "MEG" / "sample" +fname_inv = data_dir / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_evoked = data_dir / "sample_audvis-ave.fif" inv = read_inverse_operator(fname_inv) evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0] diff --git a/examples/inverse/source_space_snr.py b/examples/inverse/source_space_snr.py index cdcdbb6351c..965c57d86ca 100644 --- a/examples/inverse/source_space_snr.py +++ b/examples/inverse/source_space_snr.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-source-space-snr: @@ -9,33 +8,35 @@ This example shows how to compute and plot source space SNR as in :footcite:`GoldenholzEtAl2009`. """ + # Author: Padma Sundaram # Kaisu Lankinen # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% # sphinx_gallery_thumbnail_number = 2 +import matplotlib.pyplot as plt +import numpy as np + import mne from mne.datasets import sample -from mne.minimum_norm import make_inverse_operator, apply_inverse -import numpy as np -import matplotlib.pyplot as plt +from mne.minimum_norm import apply_inverse, make_inverse_operator print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' +subjects_dir = data_path / "subjects" # Read data -meg_path = data_path / 'MEG' / 'sample' -fname_evoked = meg_path / 'sample_audvis-ave.fif' -evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory', - baseline=(None, 0)) -fname_fwd = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -fname_cov = meg_path / 'sample_audvis-cov.fif' +meg_path = data_path / "MEG" / "sample" +fname_evoked = meg_path / "sample_audvis-ave.fif" +evoked = mne.read_evokeds(fname_evoked, condition="Left Auditory", baseline=(None, 0)) +fname_fwd = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +fname_cov = meg_path / "sample_audvis-cov.fif" fwd = mne.read_forward_solution(fname_fwd) cov = mne.read_cov(fname_cov) @@ -44,8 +45,8 @@ # Calculate MNE: snr = 3.0 -lambda2 = 1.0 / snr ** 2 -stc = apply_inverse(evoked, inv_op, lambda2, 'MNE', verbose=True) +lambda2 = 1.0 / snr**2 +stc = apply_inverse(evoked, inv_op, lambda2, "MNE", verbose=True) # Calculate SNR in source space: snr_stc = stc.estimate_snr(evoked.info, fwd, cov) @@ -53,19 +54,24 @@ # Plot an average SNR across source points over time: ave = np.mean(snr_stc.data, axis=0) -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.plot(evoked.times, ave) -ax.set(xlabel='Time (s)', ylabel='SNR MEG-EEG') -fig.tight_layout() +ax.set(xlabel="Time (s)", ylabel="SNR MEG-EEG") # Find time point of maximum SNR maxidx = np.argmax(ave) # Plot SNR on source space at the time point of maximum SNR: -kwargs = dict(initial_time=evoked.times[maxidx], hemi='split', - views=['lat', 'med'], subjects_dir=subjects_dir, size=(600, 600), - clim=dict(kind='value', lims=(-100, -70, -40)), - transparent=True, colormap='viridis') +kwargs = dict( + initial_time=evoked.times[maxidx], + hemi="split", + views=["lat", "med"], + subjects_dir=subjects_dir, + size=(600, 600), + clim=dict(kind="value", lims=(-100, -70, -40)), + transparent=True, + colormap="viridis", +) brain = snr_stc.plot(**kwargs) # %% @@ -73,10 +79,9 @@ # --- # Next we do the same for EEG and plot the result on the cortex: -evoked_eeg = evoked.copy().pick_types(eeg=True, meg=False) -inv_op_eeg = make_inverse_operator(evoked_eeg.info, fwd, cov, fixed=True, - verbose=True) -stc_eeg = apply_inverse(evoked_eeg, inv_op_eeg, lambda2, 'MNE', verbose=True) +evoked_eeg = evoked.copy().pick(picks="eeg", exclude="bads") +inv_op_eeg = make_inverse_operator(evoked_eeg.info, fwd, cov, fixed=True, verbose=True) +stc_eeg = apply_inverse(evoked_eeg, inv_op_eeg, lambda2, "MNE", verbose=True) snr_stc_eeg = stc_eeg.estimate_snr(evoked_eeg.info, fwd, cov) brain = snr_stc_eeg.plot(**kwargs) diff --git a/examples/inverse/time_frequency_mixed_norm_inverse.py b/examples/inverse/time_frequency_mixed_norm_inverse.py index db55fa60b8f..bdd1134f39a 100644 --- a/examples/inverse/time_frequency_mixed_norm_inverse.py +++ b/examples/inverse/time_frequency_mixed_norm_inverse.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-tfr-mixed-norm: @@ -24,6 +23,7 @@ # Daniel Strohmeier # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -31,27 +31,29 @@ import mne from mne.datasets import sample -from mne.minimum_norm import make_inverse_operator, apply_inverse -from mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles -from mne.viz import (plot_sparse_source_estimates, - plot_dipole_locations, plot_dipole_amplitudes) +from mne.inverse_sparse import make_stc_from_dipoles, tf_mixed_norm +from mne.minimum_norm import apply_inverse, make_inverse_operator +from mne.viz import ( + plot_dipole_amplitudes, + plot_dipole_locations, + plot_sparse_source_estimates, +) print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -ave_fname = meg_path / 'sample_audvis-no-filter-ave.fif' -cov_fname = meg_path / 'sample_audvis-shrunk-cov.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +ave_fname = meg_path / "sample_audvis-no-filter-ave.fif" +cov_fname = meg_path / "sample_audvis-shrunk-cov.fif" # Read noise covariance matrix cov = mne.read_cov(cov_fname) # Handling average file -condition = 'Left visual' +condition = "Left visual" evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0)) -evoked = mne.pick_channels_evoked(evoked) # We make the window slightly larger than what you'll eventually be interested # in ([-0.05, 0.3]) to avoid edge effects. evoked.crop(tmin=-0.1, tmax=0.4) @@ -63,7 +65,7 @@ # Run solver # alpha parameter is between 0 and 100 (100 gives 0 active source) -alpha = 40. # general regularization parameter +alpha = 40.0 # general regularization parameter # l1_ratio parameter between 0 and 1 promotes temporal smoothness # (0 means no temporal regularization) l1_ratio = 0.03 # temporal regularization parameter @@ -71,17 +73,31 @@ loose, depth = 0.2, 0.9 # loose orientation & depth weighting # Compute dSPM solution to be used as weights in MxNE -inverse_operator = make_inverse_operator(evoked.info, forward, cov, - loose=loose, depth=depth) -stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9., - method='dSPM') +inverse_operator = make_inverse_operator( + evoked.info, forward, cov, loose=loose, depth=depth +) +stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1.0 / 9.0, method="dSPM") # Compute TF-MxNE inverse solution with dipole output dipoles, residual = tf_mixed_norm( - evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio, loose=loose, - depth=depth, maxit=200, tol=1e-6, weights=stc_dspm, weights_min=8., - debias=True, wsize=16, tstep=4, window=0.05, return_as_dipoles=True, - return_residual=True) + evoked, + forward, + cov, + alpha=alpha, + l1_ratio=l1_ratio, + loose=loose, + depth=depth, + maxit=200, + tol=1e-6, + weights=stc_dspm, + weights_min=8.0, + debias=True, + wsize=16, + tstep=4, + window=0.05, + return_as_dipoles=True, + return_residual=True, +) # Crop to remove edges for dip in dipoles: @@ -96,9 +112,14 @@ # %% # Plot location of the strongest dipole with MRI slices idx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles]) -plot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample', - subjects_dir=subjects_dir, mode='orthoview', - idx='amplitude') +plot_dipole_locations( + dipoles[idx], + forward["mri_head_t"], + "sample", + subjects_dir=subjects_dir, + mode="orthoview", + idx="amplitude", +) # # Plot dipole locations of all dipoles with MRI slices: # for dip in dipoles: @@ -109,31 +130,51 @@ # %% # Show the evoked response and the residual for gradiometers ylim = dict(grad=[-120, 120]) -evoked.pick_types(meg='grad', exclude='bads') -evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim, - proj=True, time_unit='s') - -residual.pick_types(meg='grad', exclude='bads') -residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim, - proj=True, time_unit='s') +evoked.pick(picks="grad", exclude="bads") +evoked.plot( + titles=dict(grad="Evoked Response: Gradiometers"), + ylim=ylim, + proj=True, + time_unit="s", +) + +residual.pick(picks="grad", exclude="bads") +residual.plot( + titles=dict(grad="Residuals: Gradiometers"), ylim=ylim, proj=True, time_unit="s" +) # %% # Generate stc from dipoles -stc = make_stc_from_dipoles(dipoles, forward['src']) +stc = make_stc_from_dipoles(dipoles, forward["src"]) # %% # View in 2D and 3D ("glass" brain like 3D plot) -plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1), - opacity=0.1, fig_name="TF-MxNE (cond %s)" - % condition, modes=['sphere'], scale_factors=[1.]) - -time_label = 'TF-MxNE time=%0.2f ms' -clim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9]) -brain = stc.plot('sample', 'inflated', 'rh', views='medial', - clim=clim, time_label=time_label, smoothing_steps=5, - subjects_dir=subjects_dir, initial_time=150, time_unit='ms') -brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True) -brain.add_label("V2", color="red", scalar_thresh=.5, borders=True) +plot_sparse_source_estimates( + forward["src"], + stc, + bgcolor=(1, 1, 1), + opacity=0.1, + fig_name=f"TF-MxNE (cond {condition})", + modes=["sphere"], + scale_factors=[1.0], +) + +time_label = "TF-MxNE time=%0.2f ms" +clim = dict(kind="value", lims=[10e-9, 15e-9, 20e-9]) +brain = stc.plot( + "sample", + "inflated", + "rh", + views="medial", + clim=clim, + time_label=time_label, + smoothing_steps=5, + subjects_dir=subjects_dir, + initial_time=150, + time_unit="ms", +) +brain.add_label("V1", color="yellow", scalar_thresh=0.5, borders=True) +brain.add_label("V2", color="red", scalar_thresh=0.5, borders=True) # %% # References diff --git a/examples/inverse/trap_music.py b/examples/inverse/trap_music.py new file mode 100644 index 00000000000..08dee1e54a9 --- /dev/null +++ b/examples/inverse/trap_music.py @@ -0,0 +1,61 @@ +""" +.. _ex-trap-music: + +================================= +Compute Trap-Music on evoked data +================================= + +Compute a Truncated Recursively Applied and Projected MUltiple Signal Classification +(TRAP-MUSIC) :footcite:`Makela2018` on evoked data. +""" + +# Author: Théodore Papadopoulo +# +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# %% + +import mne +from mne.beamformer import trap_music +from mne.datasets import sample +from mne.viz import plot_dipole_amplitudes, plot_dipole_locations + +print(__doc__) + +data_path = sample.data_path() +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +evoked_fname = meg_path / "sample_audvis-ave.fif" +cov_fname = meg_path / "sample_audvis-cov.fif" + +# Read the evoked response and crop it +condition = "Right Auditory" +evoked = mne.read_evokeds(evoked_fname, condition=condition, baseline=(None, 0)) +# select N100 +evoked.crop(tmin=0.05, tmax=0.15) + +evoked.pick(picks="meg", exclude="bads") + +# Read the forward solution +forward = mne.read_forward_solution(fwd_fname) + +# Read noise covariance matrix +noise_cov = mne.read_cov(cov_fname) + +dipoles, residual = trap_music( + evoked, forward, noise_cov, n_dipoles=2, return_residual=True, verbose=True +) +trans = forward["mri_head_t"] +plot_dipole_locations(dipoles, trans, "sample", subjects_dir=subjects_dir) +plot_dipole_amplitudes(dipoles) + +# Plot the evoked data and the residual. +evoked.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]), time_unit="s") +residual.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]), time_unit="s") + +# %% +# References +# ---------- +# .. footbibliography:: diff --git a/examples/inverse/vector_mne_solution.py b/examples/inverse/vector_mne_solution.py index baa0f65e335..f6ae788c145 100644 --- a/examples/inverse/vector_mne_solution.py +++ b/examples/inverse/vector_mne_solution.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-vector-mne-solution: @@ -23,45 +22,50 @@ # Author: Marijn van Vliet # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import numpy as np + import mne from mne.datasets import sample -from mne.minimum_norm import read_inverse_operator, apply_inverse +from mne.minimum_norm import apply_inverse, read_inverse_operator print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' +subjects_dir = data_path / "subjects" smoothing_steps = 7 # Read evoked data -meg_path = data_path / 'MEG' / 'sample' -fname_evoked = meg_path / 'sample_audvis-ave.fif' +meg_path = data_path / "MEG" / "sample" +fname_evoked = meg_path / "sample_audvis-ave.fif" evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) # Read inverse solution -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" inv = read_inverse_operator(fname_inv) # Apply inverse solution, set pick_ori='vector' to obtain a # :class:`mne.VectorSourceEstimate` object snr = 3.0 -lambda2 = 1.0 / snr ** 2 -stc = apply_inverse(evoked, inv, lambda2, 'dSPM', pick_ori='vector') +lambda2 = 1.0 / snr**2 +stc = apply_inverse(evoked, inv, lambda2, "dSPM", pick_ori="vector") # Use peak getter to move visualization to the time point of the peak magnitude -_, peak_time = stc.magnitude().get_peak(hemi='lh') +_, peak_time = stc.magnitude().get_peak(hemi="lh") # %% # Plot the source estimate: # sphinx_gallery_thumbnail_number = 2 brain = stc.plot( - initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, - smoothing_steps=smoothing_steps) + initial_time=peak_time, + hemi="lh", + subjects_dir=subjects_dir, + smoothing_steps=smoothing_steps, +) # You can save a brain movie with: # brain.save_movie(time_dilation=20, tmin=0.05, tmax=0.16, framerate=10, @@ -70,32 +74,43 @@ # %% # Plot the activation in the direction of maximal power for this data: -stc_max, directions = stc.project('pca', src=inv['src']) +stc_max, directions = stc.project("pca", src=inv["src"]) # These directions must by design be close to the normals because this # inverse was computed with loose=0.2 -print('Absolute cosine similarity between source normals and directions: ' - f'{np.abs(np.sum(directions * inv["source_nn"][2::3], axis=-1)).mean()}') +print( + "Absolute cosine similarity between source normals and directions: " + f"{np.abs(np.sum(directions * inv['source_nn'][2::3], axis=-1)).mean()}" +) brain_max = stc_max.plot( - initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, - time_label='Max power', smoothing_steps=smoothing_steps) + initial_time=peak_time, + hemi="lh", + subjects_dir=subjects_dir, + time_label="Max power", + smoothing_steps=smoothing_steps, +) # %% # The normal is very similar: -brain_normal = stc.project('normal', inv['src'])[0].plot( - initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, - time_label='Normal', smoothing_steps=smoothing_steps) +brain_normal = stc.project("normal", inv["src"])[0].plot( + initial_time=peak_time, + hemi="lh", + subjects_dir=subjects_dir, + time_label="Normal", + smoothing_steps=smoothing_steps, +) # %% # You can also do this with a fixed-orientation inverse. It looks a lot like # the result above because the ``loose=0.2`` orientation constraint keeps # sources close to fixed orientation: -fname_inv_fixed = ( - meg_path / 'sample_audvis-meg-oct-6-meg-fixed-inv.fif') +fname_inv_fixed = meg_path / "sample_audvis-meg-oct-6-meg-fixed-inv.fif" inv_fixed = read_inverse_operator(fname_inv_fixed) -stc_fixed = apply_inverse( - evoked, inv_fixed, lambda2, 'dSPM', pick_ori='vector') +stc_fixed = apply_inverse(evoked, inv_fixed, lambda2, "dSPM", pick_ori="vector") brain_fixed = stc_fixed.plot( - initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, - smoothing_steps=smoothing_steps) + initial_time=peak_time, + hemi="lh", + subjects_dir=subjects_dir, + smoothing_steps=smoothing_steps, +) diff --git a/examples/io/elekta_epochs.py b/examples/io/elekta_epochs.py index 6a3fff183ad..35922b69531 100644 --- a/examples/io/elekta_epochs.py +++ b/examples/io/elekta_epochs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-io-ave-fiff: @@ -6,22 +5,22 @@ Getting averaging info from .fif files ====================================== -Parse averaging information defined in Elekta Vectorview/TRIUX DACQ (data -acquisition). Extract and average epochs accordingly. Modify some -averaging parameters and get epochs. +Parse averaging information defined in Elekta Vectorview/TRIUX DACQ (data acquisition). +Extract and average epochs accordingly. Modify some averaging parameters and get epochs. """ # Author: Jussi Nurminen (jnu@iki.fi) # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import os import mne -import os from mne.datasets import multimodal -fname_raw = os.path.join(multimodal.data_path(), 'multimodal_raw.fif') +fname_raw = os.path.join(multimodal.data_path(), "multimodal_raw.fif") print(__doc__) @@ -36,9 +35,9 @@ # %% # Extract epochs corresponding to a category -cond = raw.acqparser.get_condition(raw, 'Auditory right') +cond = raw.acqparser.get_condition(raw, "Auditory right") epochs = mne.Epochs(raw, **cond) -epochs.average().plot_topo(background_color='w') +epochs.average().plot_topo(background_color="w") # %% # Get epochs from all conditions, average @@ -46,28 +45,25 @@ for cat in raw.acqparser.categories: cond = raw.acqparser.get_condition(raw, cat) # copy (supported) rejection parameters from DACQ settings - epochs = mne.Epochs(raw, reject=raw.acqparser.reject, - flat=raw.acqparser.flat, **cond) + epochs = mne.Epochs( + raw, reject=raw.acqparser.reject, flat=raw.acqparser.flat, **cond + ) evoked = epochs.average() - evoked.comment = cat['comment'] + evoked.comment = cat["comment"] evokeds.append(evoked) -# save all averages to an evoked fiff file -# fname_out = 'multimodal-ave.fif' -# mne.write_evokeds(fname_out, evokeds) # %% # Make a new averaging category newcat = dict() -newcat['comment'] = 'Visual lower left, longer epochs' -newcat['event'] = 3 # reference event -newcat['start'] = -.2 # epoch start rel. to ref. event (in seconds) -newcat['end'] = .7 # epoch end -newcat['reqevent'] = 0 # additional required event; 0 if none -newcat['reqwithin'] = .5 # ...required within .5 s (before or after) -newcat['reqwhen'] = 2 # ...required before (1) or after (2) ref. event -newcat['index'] = 9 # can be set freely +newcat["comment"] = "Visual lower left, longer epochs" +newcat["event"] = 3 # reference event +newcat["start"] = -0.2 # epoch start rel. to ref. event (in seconds) +newcat["end"] = 0.7 # epoch end +newcat["reqevent"] = 0 # additional required event; 0 if none +newcat["reqwithin"] = 0.5 # ...required within .5 s (before or after) +newcat["reqwhen"] = 2 # ...required before (1) or after (2) ref. event +newcat["index"] = 9 # can be set freely cond = raw.acqparser.get_condition(raw, newcat) -epochs = mne.Epochs(raw, reject=raw.acqparser.reject, - flat=raw.acqparser.flat, **cond) -epochs.average().plot(time_unit='s') +epochs = mne.Epochs(raw, reject=raw.acqparser.reject, flat=raw.acqparser.flat, **cond) +epochs.average().plot(time_unit="s") diff --git a/examples/io/read_impedances.py b/examples/io/read_impedances.py new file mode 100644 index 00000000000..e1b7061c4c9 --- /dev/null +++ b/examples/io/read_impedances.py @@ -0,0 +1,77 @@ +""" +.. _ex-io-impedances: + +================================= +Getting impedances from raw files +================================= + +Many EEG systems provide impedance measurements for each channel within their file +format. MNE does not parse this information and does not store it in the +:class:`~mne.io.Raw` object. However, it is possible to extract this information from +the raw data and store it in a separate data structure. + +ANT Neuro +--------- + +The ``.cnt`` file format from ANT Neuro stores impedance information in the form of +triggers. The function :func:`mne.io.read_raw_ant` reads this information and marks the +time-segment during which an impedance measurement was performed as +:class:`~mne.Annotations` with the description set in the argument +``impedance_annotation``. However, it doesn't extract the impedance values themselves. +To do so, use the function ``antio.parser.read_triggers``. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from antio import read_cnt +from antio.parser import read_triggers +from matplotlib import pyplot as plt + +from mne.datasets import testing +from mne.io import read_raw_ant +from mne.viz import plot_topomap + +fname = testing.data_path() / "antio" / "CA_208" / "test_CA_208.cnt" +cnt = read_cnt(fname) +_, _, _, impedances, _ = read_triggers(cnt) + +raw = read_raw_ant(fname, eog=r"EOG") +impedances = [{ch: imp[k] for k, ch in enumerate(raw.ch_names)} for imp in impedances] +print(impedances[0]) # impedances measurement at the beginning of the recording + +# %% +# Note that the impedance measurement contains all channels, including the bipolar ones. +# We can visualize the impedances on a topographic map; below we show a topography of +# impedances before and after the recording for the EEG channels only. + +raw.pick("eeg").set_montage("standard_1020") +impedances = [{ch: imp[ch] for ch in raw.ch_names} for imp in impedances] + +f, ax = plt.subplots(1, 2, layout="constrained", figsize=(10, 5)) +f.suptitle("Impedances (kOhm)") +impedance = list(impedances[0].values()) +plot_topomap( + impedance, + raw.info, + vlim=(0, 50), + axes=ax[0], + show=False, + names=[f"{elt:.1f}" for elt in impedance], +) +ax[0].set_title("Impedances at the beginning of the recording") +impedance = list(impedances[-1].values()) +plot_topomap( + impedance, + raw.info, + vlim=(0, 50), + axes=ax[1], + show=False, + names=[f"{elt:.1f}" for elt in impedance], +) +ax[1].set_title("Impedances at the end of the recording") +plt.show() + +# %% +# In this very short test file, the impedances are stable over time. diff --git a/examples/io/read_neo_format.py b/examples/io/read_neo_format.py index d000a49a7b1..a34ff0cd4f4 100644 --- a/examples/io/read_neo_format.py +++ b/examples/io/read_neo_format.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-read-neo: @@ -7,31 +6,34 @@ =============================================== This example shows how to create an MNE-Python `~mne.io.Raw` object from data -in the `neural ensemble `__ format. For general +in the `neural ensemble `_ format. For general information on creating MNE-Python's data objects from NumPy arrays, see :ref:`tut-creating-data-structures`. """ -# %% +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import neo + import mne # %% -# This example uses NEO's ``ExampleIO`` object for creating fake data. The data -# will be all zeros, so the plot won't be very interesting, but it should -# demonstrate the steps to using NEO data. For actual data and different file -# formats, consult the NEO documentation. +# This example uses NEO's ``ExampleIO`` object for creating fake data. The data will be +# all zeros, so the plot won't be very interesting, but it should demonstrate the steps +# to using NEO data. For actual data and different file formats, consult the NEO +# documentation. -reader = neo.io.ExampleIO('fakedata.nof') +reader = neo.io.ExampleIO("fakedata.nof") block = reader.read(lazy=False)[0] # get the first block -segment = block.segments[0] # get data from first (and only) segment +segment = block.segments[0] # get data from first (and only) segment signals = segment.analogsignals[0] # get first (multichannel) signal -data = signals.rescale('V').magnitude.T +data = signals.rescale("V").magnitude.T sfreq = signals.sampling_rate.magnitude -ch_names = [f'Neo {(idx + 1):02}' for idx in range(signals.shape[1])] -ch_types = ['eeg'] * len(ch_names) # if not specified, type 'misc' is assumed +ch_names = [f"Neo {(idx + 1):02}" for idx in range(signals.shape[1])] +ch_types = ["eeg"] * len(ch_names) # if not specified, type 'misc' is assumed info = mne.create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) raw = mne.io.RawArray(data, info) diff --git a/examples/io/read_noise_covariance_matrix.py b/examples/io/read_noise_covariance_matrix.py index 1cf604f521f..b8b1dc5832c 100644 --- a/examples/io/read_noise_covariance_matrix.py +++ b/examples/io/read_noise_covariance_matrix.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-read-noise-cov: @@ -11,6 +10,7 @@ # Author: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -18,8 +18,8 @@ from mne.datasets import sample data_path = sample.data_path() -fname_cov = data_path / 'MEG' / 'sample' / 'sample_audvis-cov.fif' -fname_evo = data_path / 'MEG' / 'sample' / 'sample_audvis-ave.fif' +fname_cov = data_path / "MEG" / "sample" / "sample_audvis-cov.fif" +fname_evo = data_path / "MEG" / "sample" / "sample_audvis-ave.fif" cov = mne.read_cov(fname_cov) print(cov) @@ -28,4 +28,4 @@ # %% # Plot covariance -cov.plot(ev_info, exclude='bads', show_svd=False) +cov.plot(ev_info, exclude="bads", show_svd=False) diff --git a/examples/io/read_xdf.py b/examples/io/read_xdf.py index 475dd3c1ee0..ee8524702a5 100644 --- a/examples/io/read_xdf.py +++ b/examples/io/read_xdf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-read-xdf: @@ -8,13 +7,13 @@ Here we read some sample XDF data. Although we do not analyze it here, this recording is of a short parallel auditory response (pABR) experiment -:footcite:`PolonenkoMaddox2019` and was provided by the `Maddox Lab -`__. +:footcite:`PolonenkoMaddox2019` and was provided by the `Maddox Lab `_. """ # Authors: Clemens Brunner # Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -23,15 +22,13 @@ import mne from mne.datasets import misc -fname = ( - misc.data_path() / 'xdf' / - 'sub-P001_ses-S004_task-Default_run-001_eeg_a2.xdf') +fname = misc.data_path() / "xdf" / "sub-P001_ses-S004_task-Default_run-001_eeg_a2.xdf" streams, header = pyxdf.load_xdf(fname) data = streams[0]["time_series"].T assert data.shape[0] == 5 # four raw EEG plus one stim channel data[:4:2] -= data[1:4:2] # subtract (rereference) to get two bipolar EEG data = data[::2] # subselect -data[:2] *= (1e-6 / 50 / 2) # uV -> V and preamp gain +data[:2] *= 1e-6 / 50 / 2 # uV -> V and preamp gain sfreq = float(streams[0]["info"]["nominal_srate"][0]) info = mne.create_info(3, sfreq, ["eeg", "eeg", "stim"]) raw = mne.io.RawArray(data, info) diff --git a/examples/preprocessing/README.txt b/examples/preprocessing/README.txt index ff3ee6ed6c2..bd197e073b5 100644 --- a/examples/preprocessing/README.txt +++ b/examples/preprocessing/README.txt @@ -4,3 +4,26 @@ Preprocessing Examples related to data preprocessing (artifact detection / rejection etc.) +.. raw:: html + +
+ +.. raw:: html + +
+ +.. only:: html + + .. image:: https://mne.tools/mne-gui-addons/_images/sphx_glr_locate_ieeg_micro_001.png + :alt: + + :ref:`ex-ieeg-micro` + +.. raw:: html + +
Locating micro-scale intracranial electrode contacts
+
+ +.. raw:: html + +
\ No newline at end of file diff --git a/examples/preprocessing/contralateral_referencing.py b/examples/preprocessing/contralateral_referencing.py new file mode 100644 index 00000000000..b2ae199817d --- /dev/null +++ b/examples/preprocessing/contralateral_referencing.py @@ -0,0 +1,66 @@ +""" +.. _ex-contralateral-referencing: + +======================================= +Using contralateral referencing for EEG +======================================= + +Instead of using a single reference electrode for all channels, some +researchers reference the EEG electrodes in each hemisphere to an electrode in +the contralateral hemisphere (often an electrode over the mastoid bone; this is +common in sleep research for example). Here we demonstrate how to set a +contralateral EEG reference. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import mne + +ssvep_folder = mne.datasets.ssvep.data_path() +ssvep_data_raw_path = ( + ssvep_folder / "sub-02" / "ses-01" / "eeg" / "sub-02_ses-01_task-ssvep_eeg.vhdr" +) +raw = mne.io.read_raw(ssvep_data_raw_path, preload=True) +_ = raw.set_montage("easycap-M1") + +# %% +# The electrodes TP9 and TP10 are near the mastoids so we'll use them as our +# contralateral reference channels. Then we'll create our hemisphere groups. + +raw.rename_channels({"TP9": "M1", "TP10": "M2"}) + +# this splits electrodes into 3 groups; left, midline, and right +ch_names = mne.channels.make_1020_channel_selections(raw.info, return_ch_names=True) + +# remove the ref channels from the lists of to-be-rereferenced channels +ch_names["Left"].remove("M1") +ch_names["Right"].remove("M2") + +# %% +# Finally we do the referencing. For the midline channels we'll reference them +# to the mean of the two mastoid channels; the left and right hemispheres we'll +# reference to the single contralateral mastoid channel. + +# midline referencing to mean of mastoids: +mastoids = ["M1", "M2"] +rereferenced_midline_chs = ( + raw.copy() + .pick(mastoids + ch_names["Midline"]) + .set_eeg_reference(mastoids) + .drop_channels(mastoids) +) + +# contralateral referencing (alters channels in `raw` in-place): +for ref, hemi in dict(M2=ch_names["Left"], M1=ch_names["Right"]).items(): + mne.set_bipolar_reference(raw, anode=hemi, cathode=[ref] * len(hemi), copy=False) +# strip off '-M1' and '-M2' suffixes added to each bipolar-referenced channel +raw.rename_channels(lambda ch_name: ch_name.split("-")[0]) + +# replace unreferenced midline with rereferenced midline +_ = raw.drop_channels(ch_names["Midline"]).add_channels([rereferenced_midline_chs]) + +# %% +# Make sure the channel locations still look right: +fig = raw.plot_sensors(show_names=True, sphere="eeglab") diff --git a/examples/preprocessing/css.py b/examples/preprocessing/css.py index 47447b06736..c6684b74273 100644 --- a/examples/preprocessing/css.py +++ b/examples/preprocessing/css.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-css: @@ -17,37 +16,38 @@ """ # Author: John G Samuelsson +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne from mne.datasets import sample -from mne.simulation import simulate_sparse_stc, simulate_evoked +from mne.simulation import simulate_evoked, simulate_sparse_stc ############################################################################### # Load sample subject data data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -ave_fname = meg_path / 'sample_audvis-no-filter-ave.fif' -cov_fname = meg_path / 'sample_audvis-cov.fif' -trans_fname = meg_path / 'sample_audvis_raw-trans.fif' -bem_fname = subjects_dir / 'sample' / 'bem' / '/sample-5120-bem-sol.fif' - -raw = mne.io.read_raw_fif(meg_path / 'sample_audvis_raw.fif') +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +ave_fname = meg_path / "sample_audvis-no-filter-ave.fif" +cov_fname = meg_path / "sample_audvis-cov.fif" +trans_fname = meg_path / "sample_audvis_raw-trans.fif" +bem_fname = subjects_dir / "sample" / "bem" / "/sample-5120-bem-sol.fif" + +raw = mne.io.read_raw_fif(meg_path / "sample_audvis_raw.fif") fwd = mne.read_forward_solution(fwd_fname) fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True) -fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads']) +fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info["bads"]) cov = mne.read_cov(cov_fname) ############################################################################### # Find patches (labels) to activate -all_labels = mne.read_labels_from_annot(subject='sample', - subjects_dir=subjects_dir) +all_labels = mne.read_labels_from_annot(subject="sample", subjects_dir=subjects_dir) labels = [] -for select_label in ['parahippocampal-lh', 'postcentral-rh']: +for select_label in ["parahippocampal-lh", "postcentral-rh"]: labels.append([lab for lab in all_labels if lab.name in select_label][0]) hiplab, postcenlab = labels @@ -65,34 +65,41 @@ def subcortical_waveform(times): return 10e-9 * np.cos(times * 2 * np.pi * 239) -times = np.linspace(0, 0.5, int(0.5 * raw.info['sfreq'])) -stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times, - location='center', subjects_dir=subjects_dir, - labels=[postcenlab, hiplab], - data_fun=cortical_waveform) -stc.data[np.where(np.isin(stc.vertices[0], hiplab.vertices))[0], :] = \ +times = np.linspace(0, 0.5, int(0.5 * raw.info["sfreq"])) +stc = simulate_sparse_stc( + fwd["src"], + n_dipoles=2, + times=times, + location="center", + subjects_dir=subjects_dir, + labels=[postcenlab, hiplab], + data_fun=cortical_waveform, +) +stc.data[np.where(np.isin(stc.vertices[0], hiplab.vertices))[0], :] = ( subcortical_waveform(times) +) evoked = simulate_evoked(fwd, stc, raw.info, cov, nave=15) ############################################################################### # Process with CSS and plot PSD of EEG data before and after processing -evoked_subcortical = mne.preprocessing.cortical_signal_suppression(evoked, - n_proj=6) +evoked_subcortical = mne.preprocessing.cortical_signal_suppression(evoked, n_proj=6) chs = mne.pick_types(evoked.info, meg=False, eeg=True) -psd = np.mean(np.abs(np.fft.rfft(evoked.data))**2, axis=0) -psd_proc = np.mean(np.abs(np.fft.rfft(evoked_subcortical.data))**2, axis=0) -freq = np.arange(0, stop=int(evoked.info['sfreq'] / 2), - step=evoked.info['sfreq'] / (2 * len(psd))) +psd = np.mean(np.abs(np.fft.rfft(evoked.data)) ** 2, axis=0) +psd_proc = np.mean(np.abs(np.fft.rfft(evoked_subcortical.data)) ** 2, axis=0) +freq = np.arange( + 0, stop=int(evoked.info["sfreq"] / 2), step=evoked.info["sfreq"] / (2 * len(psd)) +) fig, ax = plt.subplots() -ax.plot(freq, psd, label='raw') -ax.plot(freq, psd_proc, label='processed') -ax.text(.2, .7, 'cortical', transform=ax.transAxes) -ax.text(.8, .25, 'subcortical', transform=ax.transAxes) -ax.set(ylabel='EEG Power spectral density', xlabel='Frequency (Hz)') +ax.plot(freq, psd, label="raw") +ax.plot(freq, psd_proc, label="processed") +ax.text(0.2, 0.7, "cortical", transform=ax.transAxes) +ax.text(0.8, 0.25, "subcortical", transform=ax.transAxes) +ax.set(ylabel="EEG Power spectral density", xlabel="Frequency (Hz)") ax.legend() +############################################################################### # References # ^^^^^^^^^^ # diff --git a/examples/preprocessing/define_target_events.py b/examples/preprocessing/define_target_events.py index 356bc9e89ff..f438d07c753 100644 --- a/examples/preprocessing/define_target_events.py +++ b/examples/preprocessing/define_target_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-tag-events: @@ -19,14 +18,16 @@ # Authors: Denis Engemann # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import matplotlib.pyplot as plt + import mne from mne import io -from mne.event import define_target_events from mne.datasets import sample -import matplotlib.pyplot as plt +from mne.event import define_target_events print(__doc__) @@ -34,9 +35,9 @@ # %% # Set parameters -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) @@ -44,25 +45,33 @@ # Set up pick list: EEG + STI 014 - bad channels (modify to your needs) include = [] # or stim channels ['STI 014'] -raw.info['bads'] += ['EEG 053'] # bads +raw.info["bads"] += ["EEG 053"] # bads # pick MEG channels -picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True, - include=include, exclude='bads') +picks = mne.pick_types( + raw.info, + meg="mag", + eeg=False, + stim=False, + eog=True, + include=include, + exclude="bads", +) # %% # Find stimulus event followed by quick button presses reference_id = 5 # presentation of a smiley face target_id = 32 # button press -sfreq = raw.info['sfreq'] # sampling rate +sfreq = raw.info["sfreq"] # sampling rate tmin = 0.1 # trials leading to very early responses will be rejected tmax = 0.59 # ignore face stimuli followed by button press later than 590 ms new_id = 42 # the new event id for a hit. If None, reference_id is used. fill_na = 99 # the fill value for misses -events_, lag = define_target_events(events, reference_id, target_id, - sfreq, tmin, tmax, new_id, fill_na) +events_, lag = define_target_events( + events, reference_id, target_id, sfreq, tmin, tmax, new_id, fill_na +) print(events_) # The 99 indicates missing or too late button presses @@ -78,23 +87,30 @@ tmax_ = 0.4 event_id = dict(early=new_id, late=fill_na) -epochs = mne.Epochs(raw, events_, event_id, tmin_, - tmax_, picks=picks, baseline=(None, 0), - reject=dict(mag=4e-12)) +epochs = mne.Epochs( + raw, + events_, + event_id, + tmin_, + tmax_, + picks=picks, + baseline=(None, 0), + reject=dict(mag=4e-12), +) # average epochs and get an Evoked dataset. -early, late = [epochs[k].average() for k in event_id] +early, late = (epochs[k].average() for k in event_id) # %% # View evoked response times = 1e3 * epochs.times # time in milliseconds -title = 'Evoked response followed by %s button press' +title = "Evoked response followed by {} button press" fig, axes = plt.subplots(2, 1) -early.plot(axes=axes[0], time_unit='s') -axes[0].set(title=title % 'late', ylabel='Evoked field (fT)') -late.plot(axes=axes[1], time_unit='s') -axes[1].set(title=title % 'early', ylabel='Evoked field (fT)') +early.plot(axes=axes[0], time_unit="s") +axes[0].set(title=title.format("late"), ylabel="Evoked field (fT)") +late.plot(axes=axes[1], time_unit="s") +axes[1].set(title=title.format("early"), ylabel="Evoked field (fT)") plt.show() diff --git a/examples/preprocessing/eeg_bridging.py b/examples/preprocessing/eeg_bridging.py index f25d526e42d..37d85c55df6 100644 --- a/examples/preprocessing/eeg_bridging.py +++ b/examples/preprocessing/eeg_bridging.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-eeg-bridging: @@ -11,7 +10,7 @@ electrode connects with the gel conducting signal from another electrode "bridging" the two signals. This is undesirable because the signals from the two (or more) electrodes are not as independent as they would otherwise be; -they are very similar to each other introducting additional +they are very similar to each other introducing additional spatial smearing. An algorithm has been developed to detect electrode bridging :footcite:`TenkeKayser2001`, which has been implemented in EEGLAB :footcite:`DelormeMakeig2004`. Unfortunately, there is not a lot to be @@ -23,21 +22,21 @@ effect and exclude subjects with bridging that might effect the outcome of a study. Preventing electrode bridging is ideal but awareness of the problem at least will mitigate its potential as a confound to a study. This tutorial -follows -https://psychophysiology.cpmc.columbia.edu/software/eBridge/tutorial.html. +follows the eBridge tutorial from https://psychophysiology.cpmc.columbia.edu. .. _electrodes.tsv: https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/03-electroencephalography.html#electrodes-description-_electrodestsv """ # noqa: E501 # Authors: Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% # sphinx_gallery_thumbnail_number = 2 -import numpy as np import matplotlib.pyplot as plt +import numpy as np from matplotlib.colors import LinearSegmentedColormap import mne @@ -61,12 +60,12 @@ # bridging so using the last segment of the data will # give the most conservative estimate. -montage = mne.channels.make_standard_montage('standard_1005') +montage = mne.channels.make_standard_montage("standard_1005") ed_data = dict() # electrical distance/bridging data raw_data = dict() # store infos for electrode positions for sub in range(1, 11): - print(f'Computing electrode bridges for subject {sub}') - raw_fname = mne.datasets.eegbci.load_data(subject=sub, runs=(1,))[0] + print(f"Computing electrode bridges for subject {sub}") + raw_fname = mne.datasets.eegbci.load_data(subjects=sub, runs=(1,))[0] raw = mne.io.read_raw(raw_fname, preload=True, verbose=False) mne.datasets.eegbci.standardize(raw) # set channel names raw.set_montage(montage, verbose=False) @@ -89,8 +88,8 @@ bridged_idx, ed_matrix = ed_data[6] -fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) -fig.suptitle('Subject 6 Electrical Distance Matrix') +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), layout="constrained") +fig.suptitle("Subject 6 Electrical Distance Matrix") # take median across epochs, only use upper triangular, lower is NaNs ed_plot = np.zeros(ed_matrix.shape[1:]) * np.nan @@ -99,19 +98,17 @@ ed_plot[idx0, idx1] = np.nanmedian(ed_matrix[:, idx0, idx1]) # plot full distribution color range -im1 = ax1.imshow(ed_plot, aspect='auto') +im1 = ax1.imshow(ed_plot, aspect="auto") cax1 = fig.colorbar(im1, ax=ax1) -cax1.set_label(r'Electrical Distance ($\mu$$V^2$)') +cax1.set_label(r"Electrical Distance ($\mu$$V^2$)") # plot zoomed in colors -im2 = ax2.imshow(ed_plot, aspect='auto', vmax=5) +im2 = ax2.imshow(ed_plot, aspect="auto", vmax=5) cax2 = fig.colorbar(im2, ax=ax2) -cax2.set_label(r'Electrical Distance ($\mu$$V^2$)') +cax2.set_label(r"Electrical Distance ($\mu$$V^2$)") for ax in (ax1, ax2): - ax.set_xlabel('Channel Index') - ax.set_ylabel('Channel Index') - -fig.tight_layout() + ax.set_xlabel("Channel Index") + ax.set_ylabel("Channel Index") # %% # Examine the Distribution of Electrical Distances @@ -126,10 +123,10 @@ # without bridged electrodes do not have a peak near zero. fig, ax = plt.subplots(figsize=(5, 5)) -fig.suptitle('Subject 6 Electrical Distance Matrix Distribution') +fig.suptitle("Subject 6 Electrical Distance Matrix Distribution") ax.hist(ed_matrix[~np.isnan(ed_matrix)], bins=np.linspace(0, 500, 51)) -ax.set_xlabel(r'Electrical Distance ($\mu$$V^2$)') -ax.set_ylabel('Count (channel pairs for all epochs)') +ax.set_xlabel(r"Electrical Distance ($\mu$$V^2$)") +ax.set_ylabel("Count (channel pairs for all epochs)") # %% # Plot Electrical Distances on a Topomap @@ -146,8 +143,12 @@ # may have inserted the gel syringe tip in too far). mne.viz.plot_bridged_electrodes( - raw_data[6].info, bridged_idx, ed_matrix, - title='Subject 6 Bridged Electrodes', topomap_args=dict(vlim=(None, 5))) + raw_data[6].info, + bridged_idx, + ed_matrix, + title="Subject 6 Bridged Electrodes", + topomap_args=dict(vlim=(None, 5)), +) # %% # Plot the Raw Voltage Time Series for Bridged Electrodes @@ -161,18 +162,30 @@ # pairs, meaning that it is unlikely that all four of these electrodes are # bridged. -raw = raw_data[6].copy().pick_channels(['F2', 'F4', 'FC2', 'FC4']) -raw.add_channels([mne.io.RawArray( - raw.get_data(ch1) - raw.get_data(ch2), - mne.create_info([f'{ch1}-{ch2}'], raw.info['sfreq'], 'eeg'), - raw.first_samp) for ch1, ch2 in [('F2', 'F4'), ('FC2', 'FC4')]]) +raw = raw_data[6].copy().pick(["FC2", "FC4", "F2", "F4"]) +raw.add_channels( + [ + mne.io.RawArray( + raw.get_data(ch1) - raw.get_data(ch2), + mne.create_info([f"{ch1}-{ch2}"], raw.info["sfreq"], "eeg"), + raw.first_samp, + ) + for ch1, ch2 in [("F2", "F4"), ("FC2", "FC4")] + ] +) raw.plot(duration=20, scalings=dict(eeg=2e-4)) -raw = raw_data[1].copy().pick_channels(['F2', 'F4', 'FC2', 'FC4']) -raw.add_channels([mne.io.RawArray( - raw.get_data(ch1) - raw.get_data(ch2), - mne.create_info([f'{ch1}-{ch2}'], raw.info['sfreq'], 'eeg'), - raw.first_samp) for ch1, ch2 in [('F2', 'F4'), ('FC2', 'FC4')]]) +raw = raw_data[1].copy().pick(["FC2", "FC4", "F2", "F4"]) +raw.add_channels( + [ + mne.io.RawArray( + raw.get_data(ch1) - raw.get_data(ch2), + mne.create_info([f"{ch1}-{ch2}"], raw.info["sfreq"], "eeg"), + raw.first_samp, + ) + for ch1, ch2 in [("F2", "F4"), ("FC2", "FC4")] + ] +) raw.plot(duration=20, scalings=dict(eeg=2e-4)) # %% @@ -193,26 +206,27 @@ # reflect neural or at least anatomical differences as well (i.e. the # distance from the sensors to the brain). -fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) -fig.suptitle('Electrical Distance Distribution for EEGBCI Subjects') +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), layout="constrained") +fig.suptitle("Electrical Distance Distribution for EEGBCI Subjects") for ax in (ax1, ax2): - ax.set_ylabel('Count') - ax.set_xlabel(r'Electrical Distance ($\mu$$V^2$)') + ax.set_ylabel("Count") + ax.set_xlabel(r"Electrical Distance ($\mu$$V^2$)") for sub, (bridged_idx, ed_matrix) in ed_data.items(): # ed_matrix is upper triangular so exclude bottom half of NaNs - hist, edges = np.histogram(ed_matrix[~np.isnan(ed_matrix)].flatten(), - bins=np.linspace(0, 1000, 101)) + hist, edges = np.histogram( + ed_matrix[~np.isnan(ed_matrix)].flatten(), bins=np.linspace(0, 1000, 101) + ) centers = (edges[1:] + edges[:-1]) / 2 ax1.plot(centers, hist) - hist, edges = np.histogram(ed_matrix[~np.isnan(ed_matrix)].flatten(), - bins=np.linspace(0, 30, 21)) + hist, edges = np.histogram( + ed_matrix[~np.isnan(ed_matrix)].flatten(), bins=np.linspace(0, 30, 21) + ) centers = (edges[1:] + edges[:-1]) / 2 - ax2.plot(centers, hist, label=f'Sub {sub} #={len(bridged_idx)}') + ax2.plot(centers, hist, label=f"Sub {sub} #={len(bridged_idx)}") -ax1.axvspan(0, 30, color='r', alpha=0.5) +ax1.axvspan(0, 30, color="r", alpha=0.5) ax2.legend(loc=(1.04, 0)) -fig.subplots_adjust(right=0.725, bottom=0.15, wspace=0.4) # %% # For the group of subjects, let's look at their electrical distances @@ -224,9 +238,12 @@ for sub, (bridged_idx, ed_matrix) in ed_data.items(): mne.viz.plot_bridged_electrodes( - raw_data[sub].info, bridged_idx, ed_matrix, - title=f'Subject {sub} Bridged Electrodes', - topomap_args=dict(vlim=(None, 5))) + raw_data[sub].info, + bridged_idx, + ed_matrix, + title=f"Subject {sub} Bridged Electrodes", + topomap_args=dict(vlim=(None, 5)), + ) # %% # For subjects with many bridged channels like Subject 6 shown in the example @@ -243,7 +260,8 @@ # use subject 2, only one bridged electrode pair bridged_idx = ed_data[2][0] raw = mne.preprocessing.interpolate_bridged_electrodes( - raw_data[2].copy(), bridged_idx=bridged_idx) + raw_data[2].copy(), bridged_idx=bridged_idx +) # %% # Let's make sure that our virtual channel aided the interpolation. We can do @@ -275,41 +293,70 @@ bridged_data[0] += 1e-7 * rng.normal(size=raw.times.size) bridged_data[1] += 1e-7 * rng.normal(size=raw.times.size) # add back simulated data -raw_sim = raw_sim.add_channels([mne.io.RawArray( - bridged_data, mne.create_info([ch0, ch1], raw.info['sfreq'], 'eeg'), - raw.first_samp)]) +raw_sim = raw_sim.add_channels( + [ + mne.io.RawArray( + bridged_data, + mne.create_info([ch0, ch1], raw.info["sfreq"], "eeg"), + raw.first_samp, + ) + ] +) raw_sim.set_montage(montage) # add back channel positions # use virtual channel method raw_virtual = mne.preprocessing.interpolate_bridged_electrodes( - raw_sim.copy(), bridged_idx=bridged_idx_simulated) + raw_sim.copy(), bridged_idx=bridged_idx_simulated +) data_virtual = raw_virtual.get_data(picks=(idx0, idx1)) # set bads to be bridged electrodes to interpolate without a virtual channel raw_comp = raw_sim.copy() -raw_comp.info['bads'] = [raw_sim.ch_names[idx0], raw_sim.ch_names[idx1]] +raw_comp.info["bads"] = [raw_sim.ch_names[idx0], raw_sim.ch_names[idx1]] raw_comp.interpolate_bads() data_comp = raw_comp.get_data(picks=(idx0, idx1)) # compute variance of residuals -print('Variance of residual (interpolated data - original data)\n\n' - 'With adding virtual channel: {}\n' - 'Compared to interpolation only using other channels: {}' - ''.format(np.mean(np.var(data_virtual - data_orig, axis=1)), - np.mean(np.var(data_comp - data_orig, axis=1)))) +print( + "Variance of residual (interpolated data - original data)\n\n" + f"With adding virtual channel: {np.mean(np.var(data_virtual - data_orig, axis=1))}\n" + f"Compared to interpolation only using other channels: {np.mean(np.var(data_comp - data_orig, axis=1))}" + "" +) # plot results -raw = raw.pick_channels([ch0, ch1]) -raw = raw.add_channels([mne.io.RawArray( - np.concatenate([data_virtual, data_virtual - data_orig]), - mne.create_info([f'{ch0} virtual', f'{ch1} virtual', - f'{ch0} virtual diff', f'{ch1} virtual diff'], - raw.info['sfreq'], 'eeg'), raw.first_samp)]) -raw = raw.add_channels([mne.io.RawArray( - np.concatenate([data_comp, data_comp - data_orig]), - mne.create_info([f'{ch0} comp', f'{ch1} comp', - f'{ch0} comp diff', f'{ch1} comp diff'], - raw.info['sfreq'], 'eeg'), raw.first_samp)]) +raw = raw.pick([ch0, ch1]) +raw = raw.add_channels( + [ + mne.io.RawArray( + np.concatenate([data_virtual, data_virtual - data_orig]), + mne.create_info( + [ + f"{ch0} virtual", + f"{ch1} virtual", + f"{ch0} virtual diff", + f"{ch1} virtual diff", + ], + raw.info["sfreq"], + "eeg", + ), + raw.first_samp, + ) + ] +) +raw = raw.add_channels( + [ + mne.io.RawArray( + np.concatenate([data_comp, data_comp - data_orig]), + mne.create_info( + [f"{ch0} comp", f"{ch1} comp", f"{ch0} comp diff", f"{ch1} comp diff"], + raw.info["sfreq"], + "eeg", + ), + raw.first_samp, + ) + ] +) raw.plot(scalings=dict(eeg=7e-5)) # %% @@ -333,17 +380,17 @@ raw = raw_data[1] # typically impedances < 25 kOhm are acceptable for active systems and # impedances < 5 kOhm are desirable for a passive system -impedances = rng.random((len(raw.ch_names,))) * 30 +impedances = rng.random(len(raw.ch_names)) * 30 impedances[10] = 80 # set a few bad impendances impedances[25] = 99 -cmap = LinearSegmentedColormap.from_list(name='impedance_cmap', - colors=['g', 'y', 'r'], N=256) +cmap = LinearSegmentedColormap.from_list( + name="impedance_cmap", colors=["g", "y", "r"], N=256 +) fig, ax = plt.subplots(figsize=(5, 5)) -im, cn = mne.viz.plot_topomap(impedances, raw.info, axes=ax, - cmap=cmap, vlim=(25, 75)) -ax.set_title('Electrode Impendances') +im, cn = mne.viz.plot_topomap(impedances, raw.info, axes=ax, cmap=cmap, vlim=(25, 75)) +ax.set_title("Electrode Impendances") cax = fig.colorbar(im, ax=ax) -cax.set_label(r'Impedance (k$\Omega$)') +cax.set_label(r"Impedance (k$\Omega$)") # %% # Summary diff --git a/examples/preprocessing/eeg_csd.py b/examples/preprocessing/eeg_csd.py index d4176de1ceb..e5c6086c068 100644 --- a/examples/preprocessing/eeg_csd.py +++ b/examples/preprocessing/eeg_csd.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-eeg-csd: @@ -16,13 +15,14 @@ # Authors: Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% # sphinx_gallery_thumbnail_number = 6 -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne from mne.datasets import sample @@ -33,10 +33,9 @@ # %% # Load sample subject data -meg_path = data_path / 'MEG' / 'sample' -raw = mne.io.read_raw_fif(meg_path / 'sample_audvis_raw.fif') -raw = raw.pick_types(meg=False, eeg=True, eog=True, ecg=True, stim=True, - exclude=raw.info['bads']).load_data() +meg_path = data_path / "MEG" / "sample" +raw = mne.io.read_raw_fif(meg_path / "sample_audvis_raw.fif") +raw = raw.pick(picks=["eeg", "eog", "ecg", "stim"], exclude="bads").load_data() events = mne.find_events(raw) raw.set_eeg_reference(projection=True).apply_proj() @@ -50,42 +49,47 @@ # %% # Also look at the power spectral densities: -raw.compute_psd().plot() -raw_csd.compute_psd().plot() +raw.compute_psd().plot(picks="data", exclude="bads", amplitude=False) +raw_csd.compute_psd().plot(picks="data", exclude="bads", amplitude=False) # %% # CSD can also be computed on Evoked (averaged) data. # Here we epoch and average the data so we can demonstrate that. -event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3, - 'visual/right': 4, 'smiley': 5, 'button': 32} -epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=.5, - preload=True) -evoked = epochs['auditory'].average() +event_id = { + "auditory/left": 1, + "auditory/right": 2, + "visual/left": 3, + "visual/right": 4, + "smiley": 5, + "button": 32, +} +epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=0.5, preload=True) +evoked = epochs["auditory"].average() # %% # First let's look at how CSD affects scalp topography: -times = np.array([-0.1, 0., 0.05, 0.1, 0.15]) +times = np.array([-0.1, 0.0, 0.05, 0.1, 0.15]) evoked_csd = mne.preprocessing.compute_current_source_density(evoked) -evoked.plot_joint(title='Average Reference', show=False) -evoked_csd.plot_joint(title='Current Source Density') +evoked.plot_joint(title="Average Reference", show=False) +evoked_csd.plot_joint(title="Current Source Density") # %% # CSD has parameters ``stiffness`` and ``lambda2`` affecting smoothing and # spline flexibility, respectively. Let's see how they affect the solution: -fig, ax = plt.subplots(4, 4) -fig.subplots_adjust(hspace=0.5) +fig, ax = plt.subplots(4, 4, layout="constrained") fig.set_size_inches(10, 10) for i, lambda2 in enumerate([0, 1e-7, 1e-5, 1e-3]): for j, m in enumerate([5, 4, 3, 2]): this_evoked_csd = mne.preprocessing.compute_current_source_density( - evoked, stiffness=m, lambda2=lambda2) + evoked, stiffness=m, lambda2=lambda2 + ) this_evoked_csd.plot_topomap( - 0.1, axes=ax[i, j], contours=4, time_unit='s', - colorbar=False, show=False) - ax[i, j].set_title('stiffness=%i\nλ²=%s' % (m, lambda2)) + 0.1, axes=ax[i, j], contours=4, time_unit="s", colorbar=False, show=False + ) + ax[i, j].set_title(f"stiffness={m}\nλ²={lambda2}") # %% # References diff --git a/examples/preprocessing/eog_artifact_histogram.py b/examples/preprocessing/eog_artifact_histogram.py index d14eee57802..ac51d8b1f39 100644 --- a/examples/preprocessing/eog_artifact_histogram.py +++ b/examples/preprocessing/eog_artifact_histogram.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-eog: @@ -12,12 +11,12 @@ # Authors: Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne from mne import io @@ -29,31 +28,28 @@ # %% # Set parameters -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) -events = mne.find_events(raw, 'STI 014') +events = mne.find_events(raw, "STI 014") eog_event_id = 512 eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id) -raw.add_events(eog_events, 'STI 014') +raw.add_events(eog_events, "STI 014") # Read epochs picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False) tmin, tmax = -0.2, 0.5 -event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4} +event_ids = {"AudL": 1, "AudR": 2, "VisL": 3, "VisR": 4} epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks) # Get the stim channel data -pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0] -data = epochs.get_data()[:, pick_ch, :] +data = epochs.get_data(picks="STI 014").squeeze() data = np.sum((data.astype(int) & eog_event_id) == eog_event_id, axis=0) # %% # Plot EOG artifact distribution -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.stem(1e3 * epochs.times, data) -ax.set(xlabel='Times (ms)', - ylabel='Blink counts (from %s trials)' % len(epochs)) -fig.tight_layout() +ax.set(xlabel="Times (ms)", ylabel=f"Blink counts (from {len(epochs)} trials)") diff --git a/examples/preprocessing/eog_regression.py b/examples/preprocessing/eog_regression.py index 62c52c396ac..e3b8341e744 100644 --- a/examples/preprocessing/eog_regression.py +++ b/examples/preprocessing/eog_regression.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ ======================================= Reduce EOG artifacts through regression @@ -14,7 +13,8 @@ # Author: Marijn van Vliet # -# License: BSD (3-clause) +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% # Import packages and load data @@ -23,22 +23,23 @@ # We begin as always by importing the necessary Python modules and loading some # data, in this case the :ref:`MNE sample dataset `. +from matplotlib import pyplot as plt + import mne from mne.datasets import sample from mne.preprocessing import EOGRegression -from matplotlib import pyplot as plt print(__doc__) data_path = sample.data_path() -raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_filt-0-40_raw.fif' +raw_fname = data_path / "MEG" / "sample" / "sample_audvis_filt-0-40_raw.fif" # Read raw data raw = mne.io.read_raw_fif(raw_fname, preload=True) -events = mne.find_events(raw, 'STI 014') +events = mne.find_events(raw, "STI 014") # Highpass filter to eliminate slow drifts -raw.filter(0.3, None, picks='all') +raw.filter(0.3, None, picks="all") # %% # Perform regression and remove EOG @@ -58,21 +59,21 @@ # is best visualized by extracting epochs and plotting the evoked potential. tmin, tmax = -0.1, 0.5 -event_id = {'visual/left': 3, 'visual/right': 4} -evoked_before = mne.Epochs(raw, events, event_id, tmin, tmax, - baseline=(tmin, 0)).average() -evoked_after = mne.Epochs(raw_clean, events, event_id, tmin, tmax, - baseline=(tmin, 0)).average() +event_id = {"visual/left": 3, "visual/right": 4} +evoked_before = mne.Epochs( + raw, events, event_id, tmin, tmax, baseline=(tmin, 0) +).average() +evoked_after = mne.Epochs( + raw_clean, events, event_id, tmin, tmax, baseline=(tmin, 0) +).average() # Create epochs after EOG correction -epochs_after = mne.Epochs(raw_clean, events, event_id, tmin, tmax, - baseline=(tmin, 0)) +epochs_after = mne.Epochs(raw_clean, events, event_id, tmin, tmax, baseline=(tmin, 0)) evoked_after = epochs_after.average() -fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(10, 7), - sharex=True, sharey='row') +fig, ax = plt.subplots( + nrows=3, ncols=2, figsize=(10, 7), sharex=True, sharey="row", layout="constrained" +) evoked_before.plot(axes=ax[:, 0], spatial_colors=True) evoked_after.plot(axes=ax[:, 1], spatial_colors=True) -fig.subplots_adjust(top=0.905, bottom=0.09, left=0.08, right=0.975, - hspace=0.325, wspace=0.145) -fig.suptitle('Before --> After') +fig.suptitle("Before --> After") diff --git a/examples/preprocessing/epochs_metadata.py b/examples/preprocessing/epochs_metadata.py new file mode 100644 index 00000000000..9c46368afa0 --- /dev/null +++ b/examples/preprocessing/epochs_metadata.py @@ -0,0 +1,171 @@ +""" +.. _epochs-metadata: + +=============================================================== +Automated epochs metadata generation with variable time windows +=============================================================== + +When working with :class:`~mne.Epochs`, :ref:`metadata ` can be +invaluable. There is an extensive tutorial on +:ref:`how it can be generated automatically `. +In the brief examples below, we will demonstrate different ways to bound the time +windows used to generate the metadata. + +""" +# Authors: Richard Höchenberger +# +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# %% +# We will use data from an EEG recording during an Eriksen flanker task. For the +# purpose of demonstration, we'll only load the first 60 seconds of data. + +import mne + +data_dir = mne.datasets.erp_core.data_path() +infile = data_dir / "ERP-CORE_Subject-001_Task-Flankers_eeg.fif" + +raw = mne.io.read_raw(infile, preload=True) +raw.crop(tmax=60).filter(l_freq=0.1, h_freq=40) + +# %% +# Visualizing the events +# ^^^^^^^^^^^^^^^^^^^^^^ +# +# All experimental events are stored in the :class:`~mne.io.Raw` instance as +# :class:`~mne.Annotations`. We first need to convert these to events and the +# corresponding mapping from event codes to event names (``event_id``). +# We then visualize the events. +all_events, all_event_id = mne.events_from_annotations(raw) +mne.viz.plot_events(events=all_events, event_id=all_event_id, sfreq=raw.info["sfreq"]) + + +# %% +# As you can see, there are four types of ``stimulus`` and two types of ``response`` +# events. +# +# Declaring "row events" +# ^^^^^^^^^^^^^^^^^^^^^^ +# +# For the sake of this example, we will assume that during analysis our epochs will be +# time-locked to the stimulus onset events. Hence, we would like to create metadata with +# one row per ``stimulus``. We can achieve this by specifying all stimulus event names +# as ``row_events``. + +row_events = [ + "stimulus/compatible/target_left", + "stimulus/compatible/target_right", + "stimulus/incompatible/target_left", + "stimulus/incompatible/target_right", +] + +# %% +# Specifying metadata time windows +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# Now, we will explore different ways of specifying the time windows around the +# ``row_events`` when generating metadata. Any events falling within the same time +# window will be added to the same row in the metadata table. +# +# Fixed time window +# ~~~~~~~~~~~~~~~~~ +# +# A simple way to specify the time window extent is by specifying the time in seconds +# relative to the row event. In the following example, the time window spans from the +# row event (time point zero) up until three seconds later. + +metadata_tmin = 0.0 +metadata_tmax = 3.0 + +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, + event_id=all_event_id, + tmin=metadata_tmin, + tmax=metadata_tmax, + sfreq=raw.info["sfreq"], + row_events=row_events, +) + +metadata + +# %% +# This looks good at the first glance. However, for example in the 2nd and 3rd row, we +# have two responses listed (left and right). This is because the 3-second time window +# is obviously a bit too wide and captures more than one trial. While we could make it +# narrower, this could lead to a loss of events – if the window might become **too** +# narrow. Ultimately, this problem arises because the response time varies from trial +# to trial, so it's difficult for us to set a fixed upper bound for the time window. +# +# Fixed time window with ``keep_first`` +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# One workaround is using the ``keep_first`` parameter, which will create a new column +# containing the first event of the specified type. + +metadata_tmin = 0.0 +metadata_tmax = 3.0 +keep_first = "response" # <-- new + +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, + event_id=all_event_id, + tmin=metadata_tmin, + tmax=metadata_tmax, + sfreq=raw.info["sfreq"], + row_events=row_events, + keep_first=keep_first, # <-- new +) + +metadata + +# %% +# As you can see, a new column ``response`` was created with the time of the first +# response event falling inside the time window. The ``first_response`` column specifies +# **which** response occurred first (left or right). +# +# Variable time window +# ~~~~~~~~~~~~~~~~~~~~ +# +# Another way to address the challenge of variable time windows **without** the need to +# create new columns is by specifying ``tmin`` and ``tmax`` as event names. In this +# example, we use ``tmin=row_events``, because we want the time window to start +# with the time-locked event. ``tmax``, on the other hand, are the response events: +# The first response event following ``tmin`` will be used to determine the duration of +# the time window. + +metadata_tmin = row_events +metadata_tmax = ["response/left", "response/right"] + +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, + event_id=all_event_id, + tmin=metadata_tmin, + tmax=metadata_tmax, + sfreq=raw.info["sfreq"], + row_events=row_events, +) + +metadata + +# %% +# Variable time window (simplified) +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# We can slightly simplify the above code: Since ``tmin`` shall be set to the +# ``row_events``, we can paass ``tmin=None``, which is a more convenient way to express +# ``tmin=row_events``. The resulting metadata looks the same as in the previous example. + +metadata_tmin = None # <-- new +metadata_tmax = ["response/left", "response/right"] + +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, + event_id=all_event_id, + tmin=metadata_tmin, + tmax=metadata_tmax, + sfreq=raw.info["sfreq"], + row_events=row_events, +) + +metadata diff --git a/examples/preprocessing/esg_rm_heart_artefact_pcaobs.py b/examples/preprocessing/esg_rm_heart_artefact_pcaobs.py new file mode 100755 index 00000000000..a6c6bb3c2ba --- /dev/null +++ b/examples/preprocessing/esg_rm_heart_artefact_pcaobs.py @@ -0,0 +1,196 @@ +""" +.. _ex-pcaobs: + +===================================================================================== +Principal Component Analysis - Optimal Basis Sets (PCA-OBS) removing cardiac artefact +===================================================================================== + +This script shows an example of how to use an adaptation of PCA-OBS +:footcite:`NiazyEtAl2005`. PCA-OBS was originally designed to remove +the ballistocardiographic artefact in simultaneous EEG-fMRI. Here, it +has been adapted to remove the delay between the detected R-peak and the +ballistocardiographic artefact such that the algorithm can be applied to +remove the cardiac artefact in EEG (electroencephalography) and ESG +(electrospinography) data. We will illustrate how it works by applying the +algorithm to ESG data, where the effect of removal is most pronounced. + +See: https://www.biorxiv.org/content/10.1101/2024.09.05.611423v1 +for more details on the dataset and application for ESG data. + +""" + +# Authors: Emma Bailey , +# Steinn Hauser Magnusson +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import glob + +import numpy as np + +# %% +# Download sample subject data from OpenNeuro if you haven't already. +# This will download simultaneous EEG and ESG data from a single run of a +# single participant after median nerve stimulation of the left wrist. +import openneuro +from matplotlib import pyplot as plt + +import mne +from mne import Epochs, events_from_annotations +from mne.io import read_raw_eeglab +from mne.preprocessing import find_ecg_events, fix_stim_artifact + +# add the path where you want the OpenNeuro data downloaded. Each run is ~2GB of data +ds = "ds004388" +target_dir = mne.datasets.default_path() / ds +run_name = "sub-001/eeg/*median_run-03_eeg*.set" +if not glob.glob(str(target_dir / run_name)): + target_dir.mkdir(exist_ok=True) + openneuro.download(dataset=ds, target_dir=target_dir, include=run_name[:-4]) +block_files = glob.glob(str(target_dir / run_name)) +assert len(block_files) == 1 + +# %% +# Define the esg channels (arranged in two patches over the neck and lower back). + +esg_chans = [ + "S35", + "S24", + "S36", + "Iz", + "S17", + "S15", + "S32", + "S22", + "S19", + "S26", + "S28", + "S9", + "S13", + "S11", + "S7", + "SC1", + "S4", + "S18", + "S8", + "S31", + "SC6", + "S12", + "S16", + "S5", + "S30", + "S20", + "S34", + "S21", + "S25", + "L1", + "S29", + "S14", + "S33", + "S3", + "L4", + "S6", + "S23", +] + +# Interpolation window in seconds for ESG data to remove stimulation artefact +tstart_esg = -7e-3 +tmax_esg = 7e-3 + +# Define timing of heartbeat epochs in seconds relative to R-peaks +iv_baseline = [-400e-3, -300e-3] +iv_epoch = [-400e-3, 600e-3] + +# %% +# Next, we perform minimal preprocessing including removing the +# stimulation artefact, downsampling and filtering. + +raw = read_raw_eeglab(block_files[0], verbose="error") +raw.set_channel_types(dict(ECG="ecg")) +# Isolate the ESG channels (include the ECG channel for R-peak detection) +raw.pick(esg_chans + ["ECG"]) +# Trim duration and downsample (from 10kHz) to improve example speed +raw.crop(0, 60).load_data().resample(2000) + +# Find trigger timings to remove the stimulation artefact +events, event_dict = events_from_annotations(raw) +trigger_name = "Median - Stimulation" + +fix_stim_artifact( + raw, + events=events, + event_id=event_dict[trigger_name], + tmin=tstart_esg, + tmax=tmax_esg, + mode="linear", + stim_channel=None, +) + +# %% +# Find ECG events and add to the raw structure as event annotations. + +ecg_events, ch_ecg, average_pulse = find_ecg_events(raw, ch_name="ECG") +ecg_event_samples = np.asarray( + [[ecg_event[0] for ecg_event in ecg_events]] +) # Samples only + +qrs_event_time = [ + x / raw.info["sfreq"] for x in ecg_event_samples.reshape(-1) +] # Divide by sampling rate to make times +duration = np.repeat(0.0, len(ecg_event_samples)) +description = ["qrs"] * len(ecg_event_samples) + +raw.annotations.append( + qrs_event_time, duration, description, ch_names=[esg_chans] * len(qrs_event_time) +) + +# %% +# Create evoked response around the detected R-peaks +# before and after cardiac artefact correction. + +events, event_ids = events_from_annotations(raw) +event_id_dict = {key: value for key, value in event_ids.items() if key == "qrs"} +epochs = Epochs( + raw, + events, + event_id=event_id_dict, + tmin=iv_epoch[0], + tmax=iv_epoch[1], + baseline=tuple(iv_baseline), +) +evoked_before = epochs.average() + +# Apply function - modifies the data in place. Optionally high-pass filter +# the data before applying PCA-OBS to remove low frequency drifts +raw = mne.preprocessing.apply_pca_obs( + raw, picks=esg_chans, n_jobs=5, qrs_times=raw.times[ecg_event_samples.reshape(-1)] +) + +epochs = Epochs( + raw, + events, + event_id=event_id_dict, + tmin=iv_epoch[0], + tmax=iv_epoch[1], + baseline=tuple(iv_baseline), +) +evoked_after = epochs.average() + +# %% +# Compare evoked responses to assess completeness of artefact removal. + +fig, axes = plt.subplots(1, 1, layout="constrained") +data_before = evoked_before.get_data(units=dict(eeg="uV")).T +data_after = evoked_after.get_data(units=dict(eeg="uV")).T +hs = list() +hs.append(axes.plot(epochs.times, data_before, color="k")[0]) +hs.append(axes.plot(epochs.times, data_after, color="green", label="after")[0]) +axes.set(ylim=[-500, 1000], ylabel="Amplitude (µV)", xlabel="Time (s)") +axes.set(title="ECG artefact removal using PCA-OBS") +axes.legend(hs, ["before", "after"]) +plt.show() + +# %% +# References +# ---------- +# .. footbibliography:: diff --git a/examples/preprocessing/find_ref_artifacts.py b/examples/preprocessing/find_ref_artifacts.py index 969e714f684..90e3d1fb0da 100644 --- a/examples/preprocessing/find_ref_artifacts.py +++ b/examples/preprocessing/find_ref_artifacts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-megnoise_processing: @@ -30,14 +29,16 @@ # Authors: Jeff Hanna # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import numpy as np + import mne from mne import io from mne.datasets import refmeg_noise from mne.preprocessing import ICA -import numpy as np print(__doc__) @@ -46,7 +47,7 @@ # %% # Read raw data, cropping to 5 minutes to save memory -raw_fname = data_path / 'sample_reference_MEG_noise-raw.fif' +raw_fname = data_path / "sample_reference_MEG_noise-raw.fif" raw = io.read_raw_fif(raw_fname).crop(300, 600).load_data() # %% @@ -54,27 +55,32 @@ # been applied to these data, much of the noise in the reference channels # (bottom of the plot) can still be seen in the standard channels. select_picks = np.concatenate( - (mne.pick_types(raw.info, meg=True)[-32:], - mne.pick_types(raw.info, meg=False, ref_meg=True))) + ( + mne.pick_types(raw.info, meg=True)[-32:], + mne.pick_types(raw.info, meg=False, ref_meg=True), + ) +) plot_kwargs = dict( - duration=100, order=select_picks, n_channels=len(select_picks), - scalings={"mag": 8e-13, "ref_meg": 2e-11}) + duration=100, + order=select_picks, + n_channels=len(select_picks), + scalings={"mag": 8e-13, "ref_meg": 2e-11}, +) raw.plot(**plot_kwargs) # %% # The PSD of these data show the noise as clear peaks. -raw.compute_psd(fmax=30).plot() +raw.compute_psd(fmax=30).plot(picks="data", exclude="bads", amplitude=False) # %% # Run the "together" algorithm. raw_tog = raw.copy() ica_kwargs = dict( - method='picard', + method="picard", fit_params=dict(tol=1e-4), # use a high tol here for speed ) all_picks = mne.pick_types(raw_tog.info, meg=True, ref_meg=True) -ica_tog = ICA(n_components=60, max_iter='auto', allow_ref_meg=True, - **ica_kwargs) +ica_tog = ICA(n_components=60, max_iter="auto", allow_ref_meg=True, **ica_kwargs) ica_tog.fit(raw_tog, picks=all_picks) # low threshold (2.0) here because of cropped data, entire recording can use # a higher threshold (2.5) @@ -93,7 +99,7 @@ # %% # Cleaned data: -raw_tog.compute_psd(fmax=30).plot() +raw_tog.compute_psd(fmax=30).plot(picks="data", exclude="bads", amplitude=False) # %% # Now try the "separate" algorithm. @@ -101,8 +107,7 @@ # Do ICA only on the reference channels. ref_picks = mne.pick_types(raw_sep.info, meg=False, ref_meg=True) -ica_ref = ICA(n_components=2, max_iter='auto', allow_ref_meg=True, - **ica_kwargs) +ica_ref = ICA(n_components=2, max_iter="auto", allow_ref_meg=True, **ica_kwargs) ica_ref.fit(raw_sep, picks=ref_picks) # Do ICA on both reference and standard channels. Here, we can just reuse @@ -138,7 +143,7 @@ # %% # Cleaned raw data PSD: -raw_sep.compute_psd(fmax=30).plot() +raw_sep.compute_psd(fmax=30).plot(picks="data", exclude="bads", amplitude=False) ############################################################################## # References diff --git a/examples/preprocessing/fnirs_artifact_removal.py b/examples/preprocessing/fnirs_artifact_removal.py index c6882d38a09..7c4855086a7 100644 --- a/examples/preprocessing/fnirs_artifact_removal.py +++ b/examples/preprocessing/fnirs_artifact_removal.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-fnirs-artifacts: @@ -13,14 +12,17 @@ # Authors: Robert Luke # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import os -import mne -from mne.preprocessing.nirs import (optical_density, - temporal_derivative_distribution_repair) +import mne +from mne.preprocessing.nirs import ( + optical_density, + temporal_derivative_distribution_repair, +) # %% # Import data @@ -32,12 +34,13 @@ # and plot these signals. fnirs_data_folder = mne.datasets.fnirs_motor.data_path() -fnirs_cw_amplitude_dir = os.path.join(fnirs_data_folder, 'Participant-1') +fnirs_cw_amplitude_dir = os.path.join(fnirs_data_folder, "Participant-1") raw_intensity = mne.io.read_raw_nirx(fnirs_cw_amplitude_dir, verbose=True) raw_intensity.load_data().resample(3, npad="auto") raw_od = optical_density(raw_intensity) -new_annotations = mne.Annotations([31, 187, 317], [8, 8, 8], - ["Movement", "Movement", "Movement"]) +new_annotations = mne.Annotations( + [31, 187, 317], [8, 8, 8], ["Movement", "Movement", "Movement"] +) raw_od.set_annotations(new_annotations) raw_od.plot(n_channels=15, duration=400, show_scrollbars=False) @@ -62,10 +65,10 @@ corrupted_data = raw_od.get_data() corrupted_data[:, 298:302] = corrupted_data[:, 298:302] - 0.06 corrupted_data[:, 450:750] = corrupted_data[:, 450:750] + 0.03 -corrupted_od = mne.io.RawArray(corrupted_data, raw_od.info, - first_samp=raw_od.first_samp) -new_annotations.append([95, 145, 245], [10, 10, 10], - ["Spike", "Baseline", "Baseline"]) +corrupted_od = mne.io.RawArray( + corrupted_data, raw_od.info, first_samp=raw_od.first_samp +) +new_annotations.append([95, 145, 245], [10, 10, 10], ["Spike", "Baseline", "Baseline"]) corrupted_od.set_annotations(new_annotations) corrupted_od.plot(n_channels=15, duration=400, show_scrollbars=False) diff --git a/examples/preprocessing/ica_comparison.py b/examples/preprocessing/ica_comparison.py index f9fb7d75764..d4246b80362 100644 --- a/examples/preprocessing/ica_comparison.py +++ b/examples/preprocessing/ica_comparison.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-ica-comp: @@ -13,15 +12,15 @@ # Authors: Pierre Ablin # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% from time import time import mne -from mne.preprocessing import ICA from mne.datasets import sample - +from mne.preprocessing import ICA print(__doc__) @@ -32,13 +31,13 @@ # - 1-30 Hz band-pass filter data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" -raw = mne.io.read_raw_fif(raw_fname).crop(0, 60).pick('meg').load_data() +raw = mne.io.read_raw_fif(raw_fname).crop(0, 60).pick("meg").load_data() reject = dict(mag=5e-12, grad=4000e-13) -raw.filter(1, 30, fir_design='firwin') +raw.filter(1, 30, fir_design="firwin") # %% @@ -46,27 +45,32 @@ def run_ica(method, fit_params=None): - ica = ICA(n_components=20, method=method, fit_params=fit_params, - max_iter='auto', random_state=0) + ica = ICA( + n_components=20, + method=method, + fit_params=fit_params, + max_iter="auto", + random_state=0, + ) t0 = time() ica.fit(raw, reject=reject) fit_time = time() - t0 - title = ('ICA decomposition using %s (took %.1fs)' % (method, fit_time)) + title = f"ICA decomposition using {method} (took {fit_time:.1f}s)" ica.plot_components(title=title) # %% # FastICA -run_ica('fastica') +run_ica("fastica") # %% # Picard -run_ica('picard') +run_ica("picard") # %% # Infomax -run_ica('infomax') +run_ica("infomax") # %% # Extended Infomax -run_ica('infomax', fit_params=dict(extended=True)) +run_ica("infomax", fit_params=dict(extended=True)) diff --git a/examples/preprocessing/interpolate_bad_channels.py b/examples/preprocessing/interpolate_bad_channels.py index b2893152e1f..a56aec7d8f7 100644 --- a/examples/preprocessing/interpolate_bad_channels.py +++ b/examples/preprocessing/interpolate_bad_channels.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-interpolate-bad-channels: @@ -18,6 +17,7 @@ # Mainak Jas # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -29,24 +29,24 @@ print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname = meg_path / 'sample_audvis-ave.fif' -evoked = mne.read_evokeds(fname, condition='Left Auditory', - baseline=(None, 0)) +meg_path = data_path / "MEG" / "sample" +fname = meg_path / "sample_audvis-ave.fif" +evoked = mne.read_evokeds(fname, condition="Left Auditory", baseline=(None, 0)) # plot with bads -evoked.plot(exclude=[], picks=('grad', 'eeg')) +evoked.plot(exclude=[], picks=("grad", "eeg")) # %% # Compute interpolation (also works with Raw and Epochs objects) evoked_interp = evoked.copy().interpolate_bads(reset_bads=False) -evoked_interp.plot(exclude=[], picks=('grad', 'eeg')) +evoked_interp.plot(exclude=[], picks=("grad", "eeg")) # %% # You can also use minimum-norm for EEG as well as MEG evoked_interp_mne = evoked.copy().interpolate_bads( - reset_bads=False, method=dict(eeg='MNE'), verbose=True) -evoked_interp_mne.plot(exclude=[], picks=('grad', 'eeg')) + reset_bads=False, method=dict(eeg="MNE"), verbose=True +) +evoked_interp_mne.plot(exclude=[], picks=("grad", "eeg")) # %% # References diff --git a/examples/preprocessing/interpolate_to.py b/examples/preprocessing/interpolate_to.py new file mode 100644 index 00000000000..b97a7251cbb --- /dev/null +++ b/examples/preprocessing/interpolate_to.py @@ -0,0 +1,81 @@ +""" +.. _ex-interpolate-to-any-montage: + +====================================================== +Interpolate EEG data to any montage +====================================================== + +This example demonstrates how to interpolate EEG channels to match a given montage. +This can be useful for standardizing +EEG channel layouts across different datasets (see :footcite:`MellotEtAl2024`). + +- Using the field interpolation for EEG data. +- Using the target montage "biosemi16". + +In this example, the data from the original EEG channels will be +interpolated onto the positions defined by the "biosemi16" montage. +""" + +# Authors: Antoine Collas +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import matplotlib.pyplot as plt + +import mne +from mne.channels import make_standard_montage +from mne.datasets import sample + +print(__doc__) +ylim = (-10, 10) + +# %% +# Load EEG data +data_path = sample.data_path() +eeg_file_path = data_path / "MEG" / "sample" / "sample_audvis-ave.fif" +evoked = mne.read_evokeds(eeg_file_path, condition="Left Auditory", baseline=(None, 0)) + +# Select only EEG channels +evoked.pick("eeg") + +# Plot the original EEG layout +evoked.plot(exclude=[], picks="eeg", ylim=dict(eeg=ylim)) + +# %% +# Define the target montage +standard_montage = make_standard_montage("biosemi16") + +# %% +# Use interpolate_to to project EEG data to the standard montage +evoked_interpolated_spline = evoked.copy().interpolate_to( + standard_montage, method="spline" +) + +# Plot the interpolated EEG layout +evoked_interpolated_spline.plot(exclude=[], picks="eeg", ylim=dict(eeg=ylim)) + +# %% +# Use interpolate_to to project EEG data to the standard montage +evoked_interpolated_mne = evoked.copy().interpolate_to(standard_montage, method="MNE") + +# Plot the interpolated EEG layout +evoked_interpolated_mne.plot(exclude=[], picks="eeg", ylim=dict(eeg=ylim)) + +# %% +# Comparing before and after interpolation +fig, axs = plt.subplots(3, 1, figsize=(8, 6), constrained_layout=True) +evoked.plot(exclude=[], picks="eeg", axes=axs[0], show=False, ylim=dict(eeg=ylim)) +axs[0].set_title("Original EEG Layout") +evoked_interpolated_spline.plot( + exclude=[], picks="eeg", axes=axs[1], show=False, ylim=dict(eeg=ylim) +) +axs[1].set_title("Interpolated to Standard 1020 Montage using spline interpolation") +evoked_interpolated_mne.plot( + exclude=[], picks="eeg", axes=axs[2], show=False, ylim=dict(eeg=ylim) +) +axs[2].set_title("Interpolated to Standard 1020 Montage using MNE interpolation") + +# %% +# References +# ---------- +# .. footbibliography:: diff --git a/examples/preprocessing/locate_ieeg_micro.py b/examples/preprocessing/locate_ieeg_micro.py deleted file mode 100644 index 6433d2f6829..00000000000 --- a/examples/preprocessing/locate_ieeg_micro.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -""" -.. _ex-ieeg-micro: - -==================================================== -Locating micro-scale intracranial electrode contacts -==================================================== - -When intracranial electrode contacts are very small, sometimes -the computed tomography (CT) scan is higher resolution than the -magnetic resonance (MR) image and so you want to find the contacts -on the CT without downsampling to the MR resolution. This example -shows how to do this. -""" - -# Authors: Alex Rockhill -# -# License: BSD-3-Clause - -import numpy as np -import nibabel as nib -import mne - -# path to sample sEEG -misc_path = mne.datasets.misc.data_path() -subjects_dir = misc_path / 'seeg' - -# GUI requires pyvista backend -mne.viz.set_3d_backend('pyvistaqt') - -# we need three things: -# 1) The electrophysiology file which contains the channels names -# that we would like to associate with positions in the brain -# 2) The CT where the electrode contacts show up with high intensity -# 3) The MR where the brain is best visible (low contrast in CT) -raw = mne.io.read_raw(misc_path / 'seeg' / 'sample_seeg_ieeg.fif') -CT_orig = nib.load(misc_path / 'seeg' / 'sample_seeg_CT.mgz') -T1 = nib.load(misc_path / 'seeg' / 'sample_seeg' / 'mri' / 'T1.mgz') - -# we'll also need a head-CT surface RAS transform, this can be faked with an -# identify matrix but we'll find the fiducials on the CT in freeview (be sure -# to find them in surface RAS (TkReg RAS in freeview) and not scanner RAS -# (RAS in freeview)) (also be sure to note left is generally on the right in -# freeview) and reproduce them here: -montage = mne.channels.make_dig_montage( - nasion=[-28.97, -5.88, -76.40], lpa=[-96.35, -16.26, 17.63], - rpa=[31.28, -52.95, -0.69], coord_frame='mri') -raw.set_montage(montage, on_missing='ignore') # haven't located yet! -head_ct_t = mne.transforms.invert_transform( - mne.channels.compute_native_head_t(montage)) - -# note: coord_frame = 'mri' is a bit of a misnormer, it is a reference to -# the surface RAS coordinate frame, here it is of the CT - - -# launch the viewer with only the CT (note, we won't be able to use -# the MR in this case to help determine which brain area the contact is -# in), and use the user interface to find the locations of the contacts -gui = mne.gui.locate_ieeg(raw.info, head_ct_t, CT_orig) - -# we'll programmatically mark all the contacts on one electrode shaft -for i, pos in enumerate([(-52.66, -40.84, -26.99), (-55.47, -38.03, -27.92), - (-57.68, -36.27, -28.85), (-59.89, -33.81, -29.32), - (-62.57, -31.35, -30.37), (-65.13, -29.07, -31.30), - (-67.57, -26.26, -31.88)]): - gui.set_RAS(pos) - gui.mark_channel(f'LENT {i + 1}') - -# finally, the coordinates will be in "head" (unless the trans was faked -# as the identity, in which case they will be in surface RAS of the CT already) -# so we need to convert them to scanner RAS of the CT, apply the alignment so -# that they are in scanner RAS of the MRI and from there to surface RAS -# of the MRI for viewing using freesurfer recon-all surfaces--fortunately -# that is done for us in `mne.transforms.apply_volume_registration_points` - -# note that since we didn't fake the head->CT surface RAS transform, we -# could apply the head->mri transform directly but that relies of the -# fiducial points being marked exactly the same on the CT as on the MRI-- -# the error from this is not precise enough for intracranial electrophysiology, -# better is to rely on the precision of the CT-MR image registration - -reg_affine = np.array([ # CT-MR registration - [0.99270756, -0.03243313, 0.11610254, -133.094156], - [0.04374389, 0.99439665, -0.09623816, -97.58320673], - [-0.11233068, 0.10061512, 0.98856381, -84.45551601], - [0., 0., 0., 1.]]) - -raw.info, head_mri_t = mne.transforms.apply_volume_registration_points( - raw.info, head_ct_t, CT_orig, T1, reg_affine) - -brain = mne.viz.Brain(subject='sample_seeg', subjects_dir=subjects_dir, - alpha=0.5) -brain.add_sensors(raw.info, head_mri_t) -brain.show_view(azimuth=120, elevation=100) diff --git a/examples/preprocessing/movement_compensation.py b/examples/preprocessing/movement_compensation.py index 51913e30ec0..4577a7e0a51 100644 --- a/examples/preprocessing/movement_compensation.py +++ b/examples/preprocessing/movement_compensation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-movement-comp: @@ -17,6 +16,7 @@ # Authors: Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -25,11 +25,11 @@ print(__doc__) -data_path = mne.datasets.misc.data_path(verbose=True) / 'movement' +data_path = mne.datasets.misc.data_path(verbose=True) / "movement" -head_pos = mne.chpi.read_head_pos(data_path / 'simulated_quats.pos') -raw = mne.io.read_raw_fif(data_path / 'simulated_movement_raw.fif') -raw_stat = mne.io.read_raw_fif(data_path / 'simulated_stationary_raw.fif') +head_pos = mne.chpi.read_head_pos(data_path / "simulated_quats.pos") +raw = mne.io.read_raw_fif(data_path / "simulated_movement_raw.fif") +raw_stat = mne.io.read_raw_fif(data_path / "simulated_stationary_raw.fif") # %% # Visualize the "subject" head movements. By providing the measurement @@ -38,29 +38,31 @@ # be shown in blue, and the destination (if given) shown in red. mne.viz.plot_head_positions( - head_pos, mode='traces', destination=raw.info['dev_head_t'], info=raw.info) + head_pos, mode="traces", destination=raw.info["dev_head_t"], info=raw.info +) # %% # This can also be visualized using a quiver. mne.viz.plot_head_positions( - head_pos, mode='field', destination=raw.info['dev_head_t'], info=raw.info) + head_pos, mode="field", destination=raw.info["dev_head_t"], info=raw.info +) # %% # Process our simulated raw data (taking into account head movements). # extract our resulting events -events = mne.find_events(raw, stim_channel='STI 014') +events = mne.find_events(raw, stim_channel="STI 014") events[:, 2] = 1 raw.plot(events=events) -topo_kwargs = dict(times=[0, 0.1, 0.2], ch_type='mag', vlim=(-500, 500)) +topo_kwargs = dict(times=[0, 0.1, 0.2], ch_type="mag", vlim=(-500, 500)) # %% # First, take the average of stationary data (bilateral auditory patterns). evoked_stat = mne.Epochs(raw_stat, events, 1, -0.2, 0.8).average() fig = evoked_stat.plot_topomap(**topo_kwargs) -fig.suptitle('Stationary') +fig.suptitle("Stationary") # %% # Second, take a naive average, which averages across epochs that have been @@ -69,18 +71,18 @@ epochs = mne.Epochs(raw, events, 1, -0.2, 0.8) evoked = epochs.average() fig = evoked.plot_topomap(**topo_kwargs) -fig.suptitle('Moving: naive average') +fig.suptitle("Moving: naive average") # %% # Third, use raw movement compensation (restores pattern). -raw_sss = maxwell_filter(raw, head_pos=head_pos) +raw_sss = maxwell_filter(raw, head_pos=head_pos, mc_interp="hann") evoked_raw_mc = mne.Epochs(raw_sss, events, 1, -0.2, 0.8).average() fig = evoked_raw_mc.plot_topomap(**topo_kwargs) -fig.suptitle('Moving: movement compensated (raw)') +fig.suptitle("Moving: movement compensated (raw)") # %% # Fourth, use evoked movement compensation. For these data, which contain # very large rotations, it does not as cleanly restore the pattern. evoked_evo_mc = mne.epochs.average_movements(epochs, head_pos=head_pos) fig = evoked_evo_mc.plot_topomap(**topo_kwargs) -fig.suptitle('Moving: movement compensated (evoked)') +fig.suptitle("Moving: movement compensated (evoked)") diff --git a/examples/preprocessing/movement_detection.py b/examples/preprocessing/movement_detection.py index d84548792df..dd468feb464 100644 --- a/examples/preprocessing/movement_detection.py +++ b/examples/preprocessing/movement_detection.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-movement-detect: @@ -20,6 +19,7 @@ # Authors: Adonay Nunes # Luke Bloy # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -30,16 +30,17 @@ # Load data data_path = bst_auditory.data_path() -data_path_MEG = data_path / 'MEG' -subject = 'bst_auditory' -subjects_dir = data_path / 'subjects' -trans_fname = data_path / 'MEG' / 'bst_auditory' / 'bst_auditory-trans.fif' -raw_fname1 = data_path_MEG / 'bst_auditory' / 'S01_AEF_20131218_01.ds' -raw_fname2 = data_path_MEG / 'bst_auditory' / 'S01_AEF_20131218_02.ds' +data_path_MEG = data_path / "MEG" +subject = "bst_auditory" +subjects_dir = data_path / "subjects" +trans_fname = data_path / "MEG" / "bst_auditory" / "bst_auditory-trans.fif" +raw_fname1 = data_path_MEG / "bst_auditory" / "S01_AEF_20131218_01.ds" +raw_fname2 = data_path_MEG / "bst_auditory" / "S01_AEF_20131218_02.ds" # read and concatenate two files, ignoring device<->head mismatch raw = read_raw_ctf(raw_fname1, preload=False) mne.io.concatenate_raws( - [raw, read_raw_ctf(raw_fname2, preload=False)], on_mismatch='ignore') + [raw, read_raw_ctf(raw_fname2, preload=False)], on_mismatch="ignore" +) raw.crop(350, 410).load_data() raw.resample(100, npad="auto") @@ -50,15 +51,18 @@ # Get cHPI time series and compute average chpi_locs = mne.chpi.extract_chpi_locs_ctf(raw) head_pos = mne.chpi.compute_head_pos(raw.info, chpi_locs) -original_head_dev_t = mne.transforms.invert_transform( - raw.info['dev_head_t']) +original_head_dev_t = mne.transforms.invert_transform(raw.info["dev_head_t"]) average_head_dev_t = mne.transforms.invert_transform( - compute_average_dev_head_t(raw, head_pos)) + compute_average_dev_head_t(raw, head_pos) +) fig = mne.viz.plot_head_positions(head_pos) -for ax, val, val_ori in zip(fig.axes[::2], average_head_dev_t['trans'][:3, 3], - original_head_dev_t['trans'][:3, 3]): - ax.axhline(1000 * val, color='r') - ax.axhline(1000 * val_ori, color='g') +for ax, val, val_ori in zip( + fig.axes[::2], + average_head_dev_t["trans"][:3, 3], + original_head_dev_t["trans"][:3, 3], +): + ax.axhline(1000 * val, color="r") + ax.axhline(1000 * val_ori, color="g") # The green horizontal lines represent the original head position, whereas the # red lines are the new head position averaged over all the time points. @@ -67,17 +71,24 @@ # Plot raw data with annotated movement # ------------------------------------------------------------------ -mean_distance_limit = .0015 # in meters +mean_distance_limit = 0.0015 # in meters annotation_movement, hpi_disp = annotate_movement( - raw, head_pos, mean_distance_limit=mean_distance_limit) + raw, head_pos, mean_distance_limit=mean_distance_limit +) raw.set_annotations(annotation_movement) raw.plot(n_channels=100, duration=20) ############################################################################## # After checking the annotated movement artifacts, calculate the new transform # and plot it: + new_dev_head_t = compute_average_dev_head_t(raw, head_pos) -raw.info['dev_head_t'] = new_dev_head_t -fig = mne.viz.plot_alignment(raw.info, show_axes=True, subject=subject, - trans=trans_fname, subjects_dir=subjects_dir) +raw.info["dev_head_t"] = new_dev_head_t +fig = mne.viz.plot_alignment( + raw.info, + show_axes=True, + subject=subject, + trans=trans_fname, + subjects_dir=subjects_dir, +) mne.viz.set_3d_view(fig, azimuth=90, elevation=60) diff --git a/examples/preprocessing/muscle_detection.py b/examples/preprocessing/muscle_detection.py index d2369b87cd1..3e0e140c802 100644 --- a/examples/preprocessing/muscle_detection.py +++ b/examples/preprocessing/muscle_detection.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-muscle-artifacts: @@ -28,19 +27,20 @@ # Authors: Adonay Nunes # Luke Bloy # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import matplotlib.pyplot as plt import numpy as np + from mne.datasets.brainstorm import bst_auditory from mne.io import read_raw_ctf from mne.preprocessing import annotate_muscle_zscore - # Load data data_path = bst_auditory.data_path() -raw_fname = data_path / 'MEG' / 'bst_auditory' / 'S01_AEF_20131218_01.ds' +raw_fname = data_path / "MEG" / "bst_auditory" / "S01_AEF_20131218_01.ds" raw = read_raw_ctf(raw_fname, preload=False) @@ -65,8 +65,12 @@ # Choose one channel type, if there are axial gradiometers and magnetometers, # select magnetometers as they are more sensitive to muscle activity. annot_muscle, scores_muscle = annotate_muscle_zscore( - raw, ch_type="mag", threshold=threshold_muscle, min_length_good=0.2, - filter_freq=[110, 140]) + raw, + ch_type="mag", + threshold=threshold_muscle, + min_length_good=0.2, + filter_freq=[110, 140], +) # %% # Plot muscle z-scores across recording @@ -74,8 +78,8 @@ fig, ax = plt.subplots() ax.plot(raw.times, scores_muscle) -ax.axhline(y=threshold_muscle, color='r') -ax.set(xlabel='time, (s)', ylabel='zscore', title='Muscle activity') +ax.axhline(y=threshold_muscle, color="r") +ax.set(xlabel="time, (s)", ylabel="zscore", title="Muscle activity") # %% # View the annotations # -------------------------------------------------------------------------- diff --git a/examples/preprocessing/muscle_ica.py b/examples/preprocessing/muscle_ica.py index 14960a761b9..f61d1e22bc4 100644 --- a/examples/preprocessing/muscle_ica.py +++ b/examples/preprocessing/muscle_ica.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-muscle-ica: @@ -12,34 +11,35 @@ artifact is produced during postural maintenance. This is more appropriately removed by ICA otherwise there wouldn't be any epochs left! Note that muscle artifacts of this kind are much more pronounced in EEG than they are in MEG. - """ # Authors: Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne data_path = mne.datasets.sample.data_path() -raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif' +raw_fname = data_path / "MEG" / "sample" / "sample_audvis_raw.fif" raw = mne.io.read_raw_fif(raw_fname) raw.crop(tmin=100, tmax=130) # take 30 seconds for speed # pick only EEG channels, muscle artifact is basically not picked up by MEG # if you have a simultaneous recording, you may want to do ICA on MEG and EEG # separately -raw.pick_types(eeg=True) +raw.pick(picks="eeg", exclude="bads") # ICA works best with a highpass filter applied raw.load_data() -raw.filter(l_freq=1., h_freq=None) +raw.filter(l_freq=1.0, h_freq=None) # %% # Run ICA ica = mne.preprocessing.ICA( - n_components=15, method='picard', max_iter='auto', random_state=97) + n_components=15, method="picard", max_iter="auto", random_state=97 +) ica.fit(raw) # %% @@ -86,8 +86,10 @@ # and ensure that it gets the same components we did manually. muscle_idx_auto, scores = ica.find_bads_muscle(raw) ica.plot_scores(scores, exclude=muscle_idx_auto) -print(f'Manually found muscle artifact ICA components: {muscle_idx}\n' - f'Automatically found muscle artifact ICA components: {muscle_idx_auto}') +print( + f"Manually found muscle artifact ICA components: {muscle_idx}\n" + f"Automatically found muscle artifact ICA components: {muscle_idx_auto}" +) # %% # Let's now replicate this on the EEGBCI dataset @@ -95,24 +97,28 @@ for sub in (1, 2): raw = mne.io.read_raw_edf( - mne.datasets.eegbci.load_data(subject=sub, runs=(1,))[0], preload=True) + mne.datasets.eegbci.load_data(subjects=sub, runs=(1,))[0], preload=True + ) mne.datasets.eegbci.standardize(raw) # set channel names - montage = mne.channels.make_standard_montage('standard_1005') + montage = mne.channels.make_standard_montage("standard_1005") raw.set_montage(montage) - raw.filter(l_freq=1., h_freq=None) + raw.filter(l_freq=1.0, h_freq=None) # Run ICA ica = mne.preprocessing.ICA( - n_components=15, method='picard', max_iter='auto', random_state=97) + n_components=15, method="picard", max_iter="auto", random_state=97 + ) ica.fit(raw) ica.plot_sources(raw) muscle_idx_auto, scores = ica.find_bads_muscle(raw) ica.plot_properties(raw, picks=muscle_idx_auto, log_scale=True) ica.plot_scores(scores, exclude=muscle_idx_auto) - print(f'Manually found muscle artifact ICA components: {muscle_idx}\n' - 'Automatically found muscle artifact ICA components: ' - f'{muscle_idx_auto}') + print( + f"Manually found muscle artifact ICA components: {muscle_idx}\n" + "Automatically found muscle artifact ICA components: " + f"{muscle_idx_auto}" + ) # %% # References diff --git a/examples/preprocessing/otp.py b/examples/preprocessing/otp.py index e2bf81f7480..a7d45d4cbc7 100644 --- a/examples/preprocessing/otp.py +++ b/examples/preprocessing/otp.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-otp: @@ -6,18 +5,18 @@ Plot sensor denoising using oversampled temporal projection =========================================================== -This demonstrates denoising using the OTP algorithm :footcite:`LarsonTaulu2018` -on data with with sensor artifacts (flux jumps) and random noise. +This demonstrates denoising using the OTP algorithm :footcite:`LarsonTaulu2018` on data +with with sensor artifacts (flux jumps) and random noise. """ # Author: Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - -import mne import numpy as np +import mne from mne import find_events, fit_dipole from mne.datasets.brainstorm import bst_phantom_elekta from mne.io import read_raw_fif @@ -33,17 +32,17 @@ dipole_number = 1 data_path = bst_phantom_elekta.data_path() -raw = read_raw_fif(data_path / 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif') -raw.crop(40., 50.).load_data() +raw = read_raw_fif(data_path / "kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif") +raw.crop(40.0, 50.0).load_data() order = list(range(160, 170)) -raw.copy().filter(0., 40.).plot(order=order, n_channels=10) +raw.copy().filter(0.0, 40.0).plot(order=order, n_channels=10) # %% # Now we can clean the data with OTP, lowpass, and plot. The flux jumps have # been suppressed alongside the random sensor noise. raw_clean = mne.preprocessing.oversampled_temporal_projection(raw) -raw_clean.filter(0., 40.) +raw_clean.filter(0.0, 40.0) raw_clean.plot(order=order, n_channels=10) @@ -53,31 +52,36 @@ # for more information. Here we use a version that does single-trial # localization across the 17 trials are in our 10-second window: + def compute_bias(raw): - events = find_events(raw, 'STI201', verbose=False) + events = find_events(raw, "STI201", verbose=False) events = events[1:] # first one has an artifact tmin, tmax = -0.2, 0.1 - epochs = mne.Epochs(raw, events, dipole_number, tmin, tmax, - baseline=(None, -0.01), preload=True, verbose=False) - sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None, - verbose=False) - cov = mne.compute_covariance(epochs, tmax=0, method='oas', - rank=None, verbose=False) + epochs = mne.Epochs( + raw, + events, + dipole_number, + tmin, + tmax, + baseline=(None, -0.01), + preload=True, + verbose=False, + ) + sphere = mne.make_sphere_model(r0=(0.0, 0.0, 0.0), head_radius=None, verbose=False) + cov = mne.compute_covariance(epochs, tmax=0, method="oas", rank=None, verbose=False) idx = epochs.time_as_index(0.036)[0] - data = epochs.get_data()[:, :, idx].T - evoked = mne.EvokedArray(data, epochs.info, tmin=0.) - dip = fit_dipole(evoked, cov, sphere, n_jobs=None, verbose=False)[0] + data = epochs.get_data(copy=False)[:, :, idx].T + evoked = mne.EvokedArray(data, epochs.info, tmin=0.0) + dip = fit_dipole(evoked, cov, sphere, verbose=False)[0] actual_pos = mne.dipole.get_phantom_dipoles()[0][dipole_number - 1] misses = 1000 * np.linalg.norm(dip.pos - actual_pos, axis=-1) return misses bias = compute_bias(raw) -print('Raw bias: %0.1fmm (worst: %0.1fmm)' - % (np.mean(bias), np.max(bias))) +print(f"Raw bias: {np.mean(bias):0.1f}mm (worst: {np.max(bias):0.1f}mm)") bias_clean = compute_bias(raw_clean) -print('OTP bias: %0.1fmm (worst: %0.1fmm)' - % (np.mean(bias_clean), np.max(bias_clean),)) +print(f"OTP bias: {np.mean(bias_clean):0.1f}mm (worst: {np.max(bias_clean):0.1f}m)") # %% # References diff --git a/examples/preprocessing/shift_evoked.py b/examples/preprocessing/shift_evoked.py index ba938d88993..0e8c52676fe 100644 --- a/examples/preprocessing/shift_evoked.py +++ b/examples/preprocessing/shift_evoked.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-shift-evoked: @@ -10,43 +9,55 @@ # Author: Mainak Jas # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import matplotlib.pyplot as plt + import mne -from mne.viz import tight_layout from mne.datasets import sample print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname = meg_path / 'sample_audvis-ave.fif' +meg_path = data_path / "MEG" / "sample" +fname = meg_path / "sample_audvis-ave.fif" # Reading evoked data -condition = 'Left Auditory' -evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0), - proj=True) +condition = "Left Auditory" +evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0), proj=True) -ch_names = evoked.info['ch_names'] -picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"]) +picks = ["MEG 2332"] # Create subplots f, (ax1, ax2, ax3) = plt.subplots(3) -evoked.plot(exclude=[], picks=picks, axes=ax1, - titles=dict(grad='Before time shifting'), time_unit='s') +evoked.plot( + exclude=[], + picks=picks, + axes=ax1, + titles=dict(grad="Before time shifting"), + time_unit="s", +) # Apply relative time-shift of 500 ms evoked.shift_time(0.5, relative=True) -evoked.plot(exclude=[], picks=picks, axes=ax2, - titles=dict(grad='Relative shift: 500 ms'), time_unit='s') +evoked.plot( + exclude=[], + picks=picks, + axes=ax2, + titles=dict(grad="Relative shift: 500 ms"), + time_unit="s", +) # Apply absolute time-shift of 500 ms evoked.shift_time(0.5, relative=False) -evoked.plot(exclude=[], picks=picks, axes=ax3, - titles=dict(grad='Absolute shift: 500 ms'), time_unit='s') - -tight_layout() +evoked.plot( + exclude=[], + picks=picks, + axes=ax3, + titles=dict(grad="Absolute shift: 500 ms"), + time_unit="s", +) diff --git a/examples/preprocessing/virtual_evoked.py b/examples/preprocessing/virtual_evoked.py index 32c93a4929a..20f7527f1da 100644 --- a/examples/preprocessing/virtual_evoked.py +++ b/examples/preprocessing/virtual_evoked.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-virtual-evoked: @@ -18,6 +17,7 @@ # Author: Mainak Jas # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -28,35 +28,35 @@ # read the evoked data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname = meg_path / 'sample_audvis-ave.fif' -evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0)) +meg_path = data_path / "MEG" / "sample" +fname = meg_path / "sample_audvis-ave.fif" +evoked = mne.read_evokeds(fname, condition="Left Auditory", baseline=(None, 0)) # %% # First, let's call remap gradiometers to magnometers, and plot # the original and remapped topomaps of the magnetometers. # go from grad + mag to mag and plot original mag -virt_evoked = evoked.as_type('mag') -fig = evoked.plot_topomap(ch_type='mag') -fig.suptitle('mag (original)') +virt_evoked = evoked.as_type("mag") +fig = evoked.plot_topomap(ch_type="mag") +fig.suptitle("mag (original)") # %% # plot interpolated grad + mag -fig = virt_evoked.plot_topomap(ch_type='mag') -fig.suptitle('mag (interpolated from mag + grad)') +fig = virt_evoked.plot_topomap(ch_type="mag") +fig.suptitle("mag (interpolated from mag + grad)") # %% # Now, we remap magnometers to gradiometers, and plot # the original and remapped topomaps of the gradiometers # go from grad + mag to grad and plot original grad -virt_evoked = evoked.as_type('grad') -fig = evoked.plot_topomap(ch_type='grad') -fig.suptitle('grad (original)') +virt_evoked = evoked.as_type("grad") +fig = evoked.plot_topomap(ch_type="grad") +fig.suptitle("grad (original)") # %% # plot interpolated grad + mag -fig = virt_evoked.plot_topomap(ch_type='grad') -fig.suptitle('grad (interpolated from mag + grad)') +fig = virt_evoked.plot_topomap(ch_type="grad") +fig.suptitle("grad (interpolated from mag + grad)") diff --git a/examples/preprocessing/xdawn_denoising.py b/examples/preprocessing/xdawn_denoising.py index 10699c41998..20a6abc72fb 100644 --- a/examples/preprocessing/xdawn_denoising.py +++ b/examples/preprocessing/xdawn_denoising.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-xdawn-denoising: @@ -22,11 +21,11 @@ # Authors: Alexandre Barachant # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - -from mne import (io, compute_raw_covariance, read_events, pick_types, Epochs) +from mne import Epochs, compute_raw_covariance, io, pick_types, read_events from mne.datasets import sample from mne.preprocessing import Xdawn from mne.viz import plot_epochs_image @@ -37,27 +36,35 @@ # %% # Set parameters and read data -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" tmin, tmax = -0.1, 0.3 event_id = dict(vis_r=4) # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) -raw.filter(1, 20, fir_design='firwin') # replace baselining with high-pass +raw.filter(1, 20, fir_design="firwin") # replace baselining with high-pass events = read_events(event_fname) -raw.info['bads'] = ['MEG 2443'] # set bad channels -picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, - exclude='bads') +raw.info["bads"] = ["MEG 2443"] # set bad channels +picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude="bads") # Epoching -epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False, - picks=picks, baseline=None, preload=True, - verbose=False) +epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + proj=False, + picks=picks, + baseline=None, + preload=True, + verbose=False, +) # Plot image epoch before xdawn -plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500) +plot_epochs_image(epochs["vis_r"], picks=[230], vmin=-500, vmax=500) # %% # Now, we estimate a set of xDAWN filters for the epochs (which contain only @@ -79,7 +86,7 @@ epochs_denoised = xd.apply(epochs) # Plot image epoch after Xdawn -plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500) +plot_epochs_image(epochs_denoised["vis_r"], picks=[230], vmin=-500, vmax=500) # %% # References diff --git a/examples/simulation/plot_stc_metrics.py b/examples/simulation/plot_stc_metrics.py index 2e53c6bcd02..8b481aed9e6 100644 --- a/examples/simulation/plot_stc_metrics.py +++ b/examples/simulation/plot_stc_metrics.py @@ -11,40 +11,47 @@ """ # Author: Kostiantyn Maksymenko # -# License: BSD (3-clause) +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np -import matplotlib.pyplot as plt from functools import partial +import matplotlib.pyplot as plt +import numpy as np + import mne from mne.datasets import sample -from mne.minimum_norm import make_inverse_operator, apply_inverse -from mne.simulation.metrics import (region_localization_error, - f1_score, precision_score, - recall_score, cosine_score, - peak_position_error, - spatial_deviation_error) +from mne.minimum_norm import apply_inverse, make_inverse_operator +from mne.simulation.metrics import ( + cosine_score, + f1_score, + peak_position_error, + precision_score, + recall_score, + region_localization_error, + spatial_deviation_error, +) random_state = 42 # set random state to make this example deterministic # Import sample data data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -subject = 'sample' -evoked_fname = data_path / 'MEG' / subject / 'sample_audvis-ave.fif' +subjects_dir = data_path / "subjects" +subject = "sample" +evoked_fname = data_path / "MEG" / subject / "sample_audvis-ave.fif" info = mne.io.read_info(evoked_fname) -tstep = 1. / info['sfreq'] +tstep = 1.0 / info["sfreq"] # Import forward operator and source space -fwd_fname = data_path / 'MEG' / subject / 'sample_audvis-meg-eeg-oct-6-fwd.fif' +fwd_fname = data_path / "MEG" / subject / "sample_audvis-meg-eeg-oct-6-fwd.fif" fwd = mne.read_forward_solution(fwd_fname) -src = fwd['src'] +src = fwd["src"] # To select source, we use the caudal middle frontal to grow # a region of interest. selected_label = mne.read_labels_from_annot( - subject, regexp='caudalmiddlefrontal-lh', subjects_dir=subjects_dir)[0] + subject, regexp="caudalmiddlefrontal-lh", subjects_dir=subjects_dir +)[0] ############################################################################### @@ -61,22 +68,32 @@ # WHERE? # Region -location = 'center' # Use the center of the label as a seed. -extent = 20. # Extent in mm of the region. +location = "center" # Use the center of the label as a seed. +extent = 20.0 # Extent in mm of the region. label_region = mne.label.select_sources( - subject, selected_label, location=location, extent=extent, - subjects_dir=subjects_dir, random_state=random_state) + subject, + selected_label, + location=location, + extent=extent, + subjects_dir=subjects_dir, + random_state=random_state, +) # Dipole location = 1915 # Use the index of the vertex as a seed -extent = 0. # One dipole source +extent = 0.0 # One dipole source label_dipole = mne.label.select_sources( - subject, selected_label, location=location, extent=extent, - subjects_dir=subjects_dir, random_state=random_state) + subject, + selected_label, + location=location, + extent=extent, + subjects_dir=subjects_dir, + random_state=random_state, +) # WHAT? # Define the time course of the activity -source_time_series = np.sin(2. * np.pi * 18. * np.arange(100) * tstep) * 10e-9 +source_time_series = np.sin(2.0 * np.pi * 18.0 * np.arange(100) * tstep) * 10e-9 # WHEN? # Define when the activity occurs using events. @@ -107,20 +124,20 @@ # noise obtained from the noise covariance from the sample data. # Region -raw_region = mne.simulation.simulate_raw(info, source_simulator_region, - forward=fwd) -raw_region = raw_region.pick_types(meg=False, eeg=True, stim=True) +raw_region = mne.simulation.simulate_raw(info, source_simulator_region, forward=fwd) +raw_region = raw_region.pick(picks=["eeg", "stim"], exclude="bads") cov = mne.make_ad_hoc_cov(raw_region.info) -mne.simulation.add_noise(raw_region, cov, iir_filter=[0.2, -0.2, 0.04], - random_state=random_state) +mne.simulation.add_noise( + raw_region, cov, iir_filter=[0.2, -0.2, 0.04], random_state=random_state +) # Dipole -raw_dipole = mne.simulation.simulate_raw(info, source_simulator_dipole, - forward=fwd) -raw_dipole = raw_dipole.pick_types(meg=False, eeg=True, stim=True) +raw_dipole = mne.simulation.simulate_raw(info, source_simulator_dipole, forward=fwd) +raw_dipole = raw_dipole.pick(picks=["eeg", "stim"], exclude="bads") cov = mne.make_ad_hoc_cov(raw_dipole.info) -mne.simulation.add_noise(raw_dipole, cov, iir_filter=[0.2, -0.2, 0.04], - random_state=random_state) +mne.simulation.add_noise( + raw_dipole, cov, iir_filter=[0.2, -0.2, 0.04], random_state=random_state +) ############################################################################### # Compute evoked from raw data @@ -149,14 +166,14 @@ # same number of time samples. # Region -stc_true_region = \ - source_simulator_region.get_stc(start_sample=0, - stop_sample=len(source_time_series)) +stc_true_region = source_simulator_region.get_stc( + start_sample=0, stop_sample=len(source_time_series) +) # Dipole -stc_true_dipole = \ - source_simulator_dipole.get_stc(start_sample=0, - stop_sample=len(source_time_series)) +stc_true_dipole = source_simulator_dipole.get_stc( + start_sample=0, stop_sample=len(source_time_series) +) ############################################################################### # Reconstruct simulated sources @@ -166,27 +183,29 @@ # Region snr = 30.0 -inv_method = 'sLORETA' -lambda2 = 1.0 / snr ** 2 +inv_method = "sLORETA" +lambda2 = 1.0 / snr**2 -inverse_operator = make_inverse_operator(evoked_region.info, fwd, cov, - loose='auto', depth=0.8, - fixed=True) +inverse_operator = make_inverse_operator( + evoked_region.info, fwd, cov, loose="auto", depth=0.8, fixed=True +) -stc_est_region = apply_inverse(evoked_region, inverse_operator, lambda2, - inv_method, pick_ori=None) +stc_est_region = apply_inverse( + evoked_region, inverse_operator, lambda2, inv_method, pick_ori=None +) # Dipole snr = 3.0 -inv_method = 'sLORETA' -lambda2 = 1.0 / snr ** 2 +inv_method = "sLORETA" +lambda2 = 1.0 / snr**2 -inverse_operator = make_inverse_operator(evoked_dipole.info, fwd, cov, - loose='auto', depth=0.8, - fixed=True) +inverse_operator = make_inverse_operator( + evoked_dipole.info, fwd, cov, loose="auto", depth=0.8, fixed=True +) -stc_est_dipole = apply_inverse(evoked_dipole, inverse_operator, lambda2, - inv_method, pick_ori=None) +stc_est_dipole = apply_inverse( + evoked_dipole, inverse_operator, lambda2, inv_method, pick_ori=None +) ############################################################################### # Compute performance scores for different source amplitude thresholds @@ -201,32 +220,34 @@ # # create a set of scorers -scorers = {'RLE': partial(region_localization_error, src=src), - 'Precision': precision_score, 'Recall': recall_score, - 'F1 score': f1_score} +scorers = { + "RLE": partial(region_localization_error, src=src), + "Precision": precision_score, + "Recall": recall_score, + "F1 score": f1_score, +} # compute results region_results = {} for name, scorer in scorers.items(): - region_results[name] = [scorer(stc_true_region, stc_est_region, - threshold=f'{thx}%', per_sample=False) - for thx in thresholds] + region_results[name] = [ + scorer(stc_true_region, stc_est_region, threshold=f"{thx}%", per_sample=False) + for thx in thresholds + ] # Plot the results -f, ((ax1, ax2), (ax3, ax4)) = plt.subplots( - 2, 2, sharex='col', constrained_layout=True) +f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex="col", layout="constrained") for ax, (title, results) in zip([ax1, ax2, ax3, ax4], region_results.items()): - ax.plot(thresholds, results, '.-') - ax.set(title=title, ylabel='score', xlabel='Threshold', - xticks=thresholds) + ax.plot(thresholds, results, ".-") + ax.set(title=title, ylabel="score", xlabel="Threshold", xticks=thresholds) -f.suptitle('Performance scores per threshold') # Add Super title -ax1.ticklabel_format(axis='y', style='sci', scilimits=(0, 1)) # tweak RLE +f.suptitle("Performance scores per threshold") # Add Super title +ax1.ticklabel_format(axis="y", style="sci", scilimits=(0, 1)) # tweak RLE # Cosine score with respect to time -f, ax1 = plt.subplots(constrained_layout=True) +f, ax1 = plt.subplots(layout="constrained") ax1.plot(stc_true_region.times, cosine_score(stc_true_region, stc_est_region)) -ax1.set(title='Cosine score', xlabel='Time', ylabel='Score') +ax1.set(title="Cosine score", xlabel="Time", ylabel="Score") ############################################################################### @@ -236,22 +257,28 @@ # create a set of scorers scorers = { - 'Peak Position Error': peak_position_error, - 'Spatial Deviation Error': spatial_deviation_error, + "Peak Position Error": peak_position_error, + "Spatial Deviation Error": spatial_deviation_error, } # compute results dipole_results = {} for name, scorer in scorers.items(): - dipole_results[name] = [scorer(stc_true_dipole, stc_est_dipole, src=src, - threshold=f'{thx}%', per_sample=False) - for thx in thresholds] + dipole_results[name] = [ + scorer( + stc_true_dipole, + stc_est_dipole, + src=src, + threshold=f"{thx}%", + per_sample=False, + ) + for thx in thresholds + ] # Plot the results for name, results in dipole_results.items(): - f, ax1 = plt.subplots(constrained_layout=True) - ax1.plot(thresholds, 100 * np.array(results), '.-') - ax1.set(title=name, ylabel='Error (cm)', xlabel='Threshold', - xticks=thresholds) + f, ax1 = plt.subplots(layout="constrained") + ax1.plot(thresholds, 100 * np.array(results), ".-") + ax1.set(title=name, ylabel="Error (cm)", xlabel="Threshold", xticks=thresholds) diff --git a/examples/simulation/simulate_evoked_data.py b/examples/simulation/simulate_evoked_data.py index 037b1dcbbc7..447f548e779 100644 --- a/examples/simulation/simulate_evoked_data.py +++ b/examples/simulation/simulate_evoked_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-sim-evoked: @@ -12,72 +11,83 @@ # Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne from mne.datasets import sample +from mne.simulation import simulate_evoked, simulate_sparse_stc from mne.time_frequency import fit_iir_model_raw from mne.viz import plot_sparse_source_estimates -from mne.simulation import simulate_sparse_stc, simulate_evoked print(__doc__) # %% # Load real data as templates data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw = mne.io.read_raw_fif(meg_path / 'sample_audvis_raw.fif') -proj = mne.read_proj(meg_path / 'sample_audvis_ecg-proj.fif') +meg_path = data_path / "MEG" / "sample" +raw = mne.io.read_raw_fif(meg_path / "sample_audvis_raw.fif") +proj = mne.read_proj(meg_path / "sample_audvis_ecg-proj.fif") raw.add_proj(proj) -raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels +raw.info["bads"] = ["MEG 2443", "EEG 053"] # mark bad channels -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -ave_fname = meg_path / 'sample_audvis-no-filter-ave.fif' -cov_fname = meg_path / 'sample_audvis-cov.fif' +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +ave_fname = meg_path / "sample_audvis-no-filter-ave.fif" +cov_fname = meg_path / "sample_audvis-cov.fif" fwd = mne.read_forward_solution(fwd_fname) -fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads']) +fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info["bads"]) cov = mne.read_cov(cov_fname) info = mne.io.read_info(ave_fname) -label_names = ['Aud-lh', 'Aud-rh'] -labels = [mne.read_label(meg_path / 'labels' / f'{ln}.label') - for ln in label_names] +label_names = ["Aud-lh", "Aud-rh"] +labels = [mne.read_label(meg_path / "labels" / f"{ln}.label") for ln in label_names] # %% # Generate source time courses from 2 dipoles and the corresponding evoked data -times = np.arange(300, dtype=np.float64) / raw.info['sfreq'] - 0.1 +times = np.arange(300, dtype=np.float64) / raw.info["sfreq"] - 0.1 rng = np.random.RandomState(42) def data_fun(times): - """Function to generate random source time courses""" - return (50e-9 * np.sin(30. * times) * - np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01)) - - -stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times, - random_state=42, labels=labels, data_fun=data_fun) + """Generate random source time courses.""" + return ( + 50e-9 + * np.sin(30.0 * times) + * np.exp(-((times - 0.15 + 0.05 * rng.randn(1)) ** 2) / 0.01) + ) + + +stc = simulate_sparse_stc( + fwd["src"], + n_dipoles=2, + times=times, + random_state=42, + labels=labels, + data_fun=data_fun, +) # %% # Generate noisy evoked data -picks = mne.pick_types(raw.info, meg=True, exclude='bads') +picks = mne.pick_types(raw.info, meg=True, exclude="bads") iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1] nave = 100 # simulate average of 100 epochs -evoked = simulate_evoked(fwd, stc, info, cov, nave=nave, use_cps=True, - iir_filter=iir_filter) +evoked = simulate_evoked( + fwd, stc, info, cov, nave=nave, use_cps=True, iir_filter=iir_filter +) # %% # Plot -plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1), - opacity=0.5, high_resolution=True) +plot_sparse_source_estimates( + fwd["src"], stc, bgcolor=(1, 1, 1), opacity=0.5, high_resolution=True +) plt.figure() plt.psd(evoked.data[0]) -evoked.plot(time_unit='s') +evoked.plot(time_unit="s") diff --git a/examples/simulation/simulate_raw_data.py b/examples/simulation/simulate_raw_data.py index 641f0171707..0fbefca4480 100644 --- a/examples/simulation/simulate_raw_data.py +++ b/examples/simulation/simulate_raw_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-sim-raw: @@ -14,24 +13,28 @@ # Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne -from mne import find_events, Epochs, compute_covariance, make_ad_hoc_cov +from mne import Epochs, compute_covariance, find_events, make_ad_hoc_cov from mne.datasets import sample -from mne.simulation import (simulate_sparse_stc, simulate_raw, - add_noise, add_ecg, add_eog) - -print(__doc__) +from mne.simulation import ( + add_ecg, + add_eog, + add_noise, + simulate_raw, + simulate_sparse_stc, +) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_raw.fif' -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_raw.fif" +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" # Load real data as the template raw = mne.io.read_raw_fif(raw_fname) @@ -40,34 +43,36 @@ ############################################################################## # Generate dipole time series n_dipoles = 4 # number of dipoles to create -epoch_duration = 2. # duration of each epoch/event +epoch_duration = 2.0 # duration of each epoch/event n = 0 # harmonic number rng = np.random.RandomState(0) # random state (make reproducible) def data_fun(times): - """Generate time-staggered sinusoids at harmonics of 10Hz""" + """Generate time-staggered sinusoids at harmonics of 10Hz.""" global n n_samp = len(times) window = np.zeros(n_samp) - start, stop = [int(ii * float(n_samp) / (2 * n_dipoles)) - for ii in (2 * n, 2 * n + 1)] - window[start:stop] = 1. + start, stop = ( + int(ii * float(n_samp) / (2 * n_dipoles)) for ii in (2 * n, 2 * n + 1) + ) + window[start:stop] = 1.0 n += 1 - data = 25e-9 * np.sin(2. * np.pi * 10. * n * times) + data = 25e-9 * np.sin(2.0 * np.pi * 10.0 * n * times) data *= window return data -times = raw.times[:int(raw.info['sfreq'] * epoch_duration)] +times = raw.times[: int(raw.info["sfreq"] * epoch_duration)] fwd = mne.read_forward_solution(fwd_fname) -src = fwd['src'] -stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times, - data_fun=data_fun, random_state=rng) +src = fwd["src"] +stc = simulate_sparse_stc( + src, n_dipoles=n_dipoles, times=times, data_fun=data_fun, random_state=rng +) # look at our source data fig, ax = plt.subplots(1) ax.plot(times, 1e9 * stc.data.T) -ax.set(ylabel='Amplitude (nAm)', xlabel='Time (s)') +ax.set(ylabel="Amplitude (nAm)", xlabel="Time (s)") mne.viz.utils.plt_show() ############################################################################## @@ -83,7 +88,8 @@ def data_fun(times): # Plot evoked data events = find_events(raw_sim) # only 1 pos, so event number == 1 epochs = Epochs(raw_sim, events, 1, tmin=-0.2, tmax=epoch_duration) -cov = compute_covariance(epochs, tmax=0., method='empirical', - verbose='error') # quick calc +cov = compute_covariance( + epochs, tmax=0.0, method="empirical", verbose="error" +) # quick calc evoked = epochs.average() -evoked.plot_white(cov, time_unit='s') +evoked.plot_white(cov, time_unit="s") diff --git a/examples/simulation/simulated_raw_data_using_subject_anatomy.py b/examples/simulation/simulated_raw_data_using_subject_anatomy.py index 393fb66d0b1..ce6803e2ebe 100644 --- a/examples/simulation/simulated_raw_data_using_subject_anatomy.py +++ b/examples/simulation/simulated_raw_data_using_subject_anatomy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-sim-raw-sub: @@ -18,6 +17,7 @@ # Samuel Deslauriers-Gauthier # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -26,8 +26,6 @@ import mne from mne.datasets import sample -print(__doc__) - # %% # In this example, raw data will be simulated for the sample subject, so its # information needs to be loaded. This step will download the data if it not @@ -35,24 +33,24 @@ # to be given to functions. data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -subject = 'sample' -meg_path = data_path / 'MEG' / subject +subjects_dir = data_path / "subjects" +subject = "sample" +meg_path = data_path / "MEG" / subject # %% # First, we get an info structure from the sample subject. -fname_info = meg_path / 'sample_audvis_raw.fif' +fname_info = meg_path / "sample_audvis_raw.fif" info = mne.io.read_info(fname_info) -tstep = 1 / info['sfreq'] +tstep = 1 / info["sfreq"] # %% # To simulate sources, we also need a source space. It can be obtained from the # forward solution of the sample subject. -fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' +fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" fwd = mne.read_forward_solution(fwd_fname) -src = fwd['src'] +src = fwd["src"] # %% # To simulate raw data, we need to define when the activity occurs using events @@ -61,16 +59,22 @@ # Here, both are loaded from the sample dataset, but they can also be specified # by the user. -fname_event = meg_path / 'sample_audvis_raw-eve.fif' -fname_cov = meg_path / 'sample_audvis-cov.fif' +fname_event = meg_path / "sample_audvis_raw-eve.fif" +fname_cov = meg_path / "sample_audvis-cov.fif" events = mne.read_events(fname_event) noise_cov = mne.read_cov(fname_cov) # Standard sample event IDs. These values will correspond to the third column # in the events matrix. -event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3, - 'visual/right': 4, 'smiley': 5, 'button': 32} +event_id = { + "auditory/left": 1, + "auditory/right": 2, + "visual/left": 3, + "visual/right": 4, + "smiley": 5, + "button": 32, +} # Take only a few events for speed @@ -93,26 +97,28 @@ # times more) than the ipsilateral. activations = { - 'auditory/left': - [('G_temp_sup-G_T_transv-lh', 30), # label, activation (nAm) - ('G_temp_sup-G_T_transv-rh', 60)], - 'auditory/right': - [('G_temp_sup-G_T_transv-lh', 60), - ('G_temp_sup-G_T_transv-rh', 30)], - 'visual/left': - [('S_calcarine-lh', 30), - ('S_calcarine-rh', 60)], - 'visual/right': - [('S_calcarine-lh', 60), - ('S_calcarine-rh', 30)], + "auditory/left": [ + ("G_temp_sup-G_T_transv-lh", 30), # label, activation (nAm) + ("G_temp_sup-G_T_transv-rh", 60), + ], + "auditory/right": [ + ("G_temp_sup-G_T_transv-lh", 60), + ("G_temp_sup-G_T_transv-rh", 30), + ], + "visual/left": [("S_calcarine-lh", 30), ("S_calcarine-rh", 60)], + "visual/right": [("S_calcarine-lh", 60), ("S_calcarine-rh", 30)], } -annot = 'aparc.a2009s' +annot = "aparc.a2009s" # Load the 4 necessary label names. -label_names = sorted(set(activation[0] - for activation_list in activations.values() - for activation in activation_list)) +label_names = sorted( + set( + activation[0] + for activation_list in activations.values() + for activation in activation_list + ) +) region_names = list(activations.keys()) # %% @@ -125,13 +131,13 @@ def data_fun(times, latency, duration): - """Function to generate source time courses for evoked responses, - parametrized by latency and duration.""" + """Generate source time courses for evoked responses.""" f = 15 # oscillating frequency, beta band [Hz] sigma = 0.375 * duration sinusoid = np.sin(2 * np.pi * f * (times - latency)) - gf = np.exp(- (times - latency - (sigma / 4.) * rng.rand(1)) ** 2 / - (2 * (sigma ** 2))) + gf = np.exp( + -((times - latency - (sigma / 4.0) * rng.rand(1)) ** 2) / (2 * (sigma**2)) + ) return 1e-9 * sinusoid * gf @@ -154,7 +160,7 @@ def data_fun(times, latency, duration): # event, the second is not used. The third one is the event id, which is # different for each of the 4 areas. -times = np.arange(150, dtype=np.float64) / info['sfreq'] +times = np.arange(150, dtype=np.float64) / info["sfreq"] duration = 0.03 rng = np.random.RandomState(7) source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep) @@ -163,20 +169,17 @@ def data_fun(times, latency, duration): events_tmp = events[np.where(events[:, 2] == region_id)[0], :] for i in range(2): label_name = activations[region_name][i][0] - label_tmp = mne.read_labels_from_annot(subject, annot, - subjects_dir=subjects_dir, - regexp=label_name, - verbose=False) + label_tmp = mne.read_labels_from_annot( + subject, annot, subjects_dir=subjects_dir, regexp=label_name, verbose=False + ) label_tmp = label_tmp[0] amplitude_tmp = activations[region_name][i][1] - if region_name.split('/')[1][0] == label_tmp.hemi[0]: + if region_name.split("/")[1][0] == label_tmp.hemi[0]: latency_tmp = 0.115 else: latency_tmp = 0.1 wf_tmp = data_fun(times, latency_tmp, duration) - source_simulator.add_data(label_tmp, - amplitude_tmp * wf_tmp, - events_tmp) + source_simulator.add_data(label_tmp, amplitude_tmp * wf_tmp, events_tmp) # To obtain a SourceEstimate object, we need to use `get_stc()` method of # SourceSimulator class. @@ -205,17 +208,16 @@ def data_fun(times, latency, duration): mne.simulation.add_ecg(raw_sim, random_state=0) # Plot original and simulated raw data. -raw_sim.plot(title='Simulated raw data') +raw_sim.plot(title="Simulated raw data") # %% # Extract epochs and compute evoked responsses # -------------------------------------------- # -epochs = mne.Epochs(raw_sim, events, event_id, tmin=-0.2, tmax=0.3, - baseline=(None, 0)) -evoked_aud_left = epochs['auditory/left'].average() -evoked_vis_right = epochs['visual/right'].average() +epochs = mne.Epochs(raw_sim, events, event_id, tmin=-0.2, tmax=0.3, baseline=(None, 0)) +evoked_aud_left = epochs["auditory/left"].average() +evoked_vis_right = epochs["visual/right"].average() # Visualize the evoked data evoked_aud_left.plot(spatial_colors=True) @@ -231,16 +233,15 @@ def data_fun(times, latency, duration): # As expected, when high activations appear in primary auditory areas, primary # visual areas will have low activations and vice versa. -method, lambda2 = 'dSPM', 1. / 9. +method, lambda2 = "dSPM", 1.0 / 9.0 inv = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, noise_cov) -stc_aud = mne.minimum_norm.apply_inverse( - evoked_aud_left, inv, lambda2, method) -stc_vis = mne.minimum_norm.apply_inverse( - evoked_vis_right, inv, lambda2, method) +stc_aud = mne.minimum_norm.apply_inverse(evoked_aud_left, inv, lambda2, method) +stc_vis = mne.minimum_norm.apply_inverse(evoked_vis_right, inv, lambda2, method) stc_diff = stc_aud - stc_vis -brain = stc_diff.plot(subjects_dir=subjects_dir, initial_time=0.1, - hemi='split', views=['lat', 'med']) +brain = stc_diff.plot( + subjects_dir=subjects_dir, initial_time=0.1, hemi="split", views=["lat", "med"] +) # %% # References diff --git a/examples/simulation/source_simulator.py b/examples/simulation/source_simulator.py index 9a1ced9d7ff..f417b96f181 100644 --- a/examples/simulation/source_simulator.py +++ b/examples/simulation/source_simulator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-sim-source: @@ -15,6 +14,7 @@ class to generate source estimates and raw data. It is meant to be a brief # Samuel Deslauriers-Gauthier # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -30,38 +30,39 @@ class to generate source estimates and raw data. It is meant to be a brief # This will download the data if it not already on your machine. We also set # the subjects directory so we don't need to give it to functions. data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -subject = 'sample' +subjects_dir = data_path / "subjects" +subject = "sample" # %% # First, we get an info structure from the test subject. -evoked_fname = data_path / 'MEG' / subject / 'sample_audvis-ave.fif' +evoked_fname = data_path / "MEG" / subject / "sample_audvis-ave.fif" info = mne.io.read_info(evoked_fname) -tstep = 1. / info['sfreq'] +tstep = 1.0 / info["sfreq"] # %% # To simulate sources, we also need a source space. It can be obtained from the # forward solution of the sample subject. -fwd_fname = data_path / 'MEG' / subject / 'sample_audvis-meg-eeg-oct-6-fwd.fif' +fwd_fname = data_path / "MEG" / subject / "sample_audvis-meg-eeg-oct-6-fwd.fif" fwd = mne.read_forward_solution(fwd_fname) -src = fwd['src'] +src = fwd["src"] # %% # To select a region to activate, we use the caudal middle frontal to grow # a region of interest. selected_label = mne.read_labels_from_annot( - subject, regexp='caudalmiddlefrontal-lh', subjects_dir=subjects_dir)[0] -location = 'center' # Use the center of the region as a seed. -extent = 10. # Extent in mm of the region. + subject, regexp="caudalmiddlefrontal-lh", subjects_dir=subjects_dir +)[0] +location = "center" # Use the center of the region as a seed. +extent = 10.0 # Extent in mm of the region. label = mne.label.select_sources( - subject, selected_label, location=location, extent=extent, - subjects_dir=subjects_dir) + subject, selected_label, location=location, extent=extent, subjects_dir=subjects_dir +) # %% # Define the time course of the activity for each source of the region to # activate. Here we use a sine wave at 18 Hz with a peak amplitude # of 10 nAm. -source_time_series = np.sin(2. * np.pi * 18. * np.arange(100) * tstep) * 10e-9 +source_time_series = np.sin(2.0 * np.pi * 18.0 * np.arange(100) * tstep) * 10e-9 # %% # Define when the activity occurs using events. The first column is the sample diff --git a/examples/stats/cluster_stats_evoked.py b/examples/stats/cluster_stats_evoked.py index e50a5018058..b51601f2f32 100644 --- a/examples/stats/cluster_stats_evoked.py +++ b/examples/stats/cluster_stats_evoked.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-cluster-evoked: @@ -14,6 +13,7 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -21,17 +21,17 @@ import mne from mne import io -from mne.stats import permutation_cluster_test from mne.datasets import sample +from mne.stats import permutation_cluster_test print(__doc__) # %% # Set parameters data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" tmin = -0.2 tmax = 0.5 @@ -39,22 +39,23 @@ raw = io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) -channel = 'MEG 1332' # include only this channel in analysis +channel = "MEG 1332" # include only this channel in analysis include = [channel] # %% # Read epochs for the channel of interest -picks = mne.pick_types(raw.info, meg=False, eog=True, include=include, - exclude='bads') +picks = mne.pick_types(raw.info, meg=False, eog=True, include=include, exclude="bads") event_id = 1 reject = dict(grad=4000e-13, eog=150e-6) -epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=reject) +epochs1 = mne.Epochs( + raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=reject +) condition1 = epochs1.get_data() # as 3D matrix event_id = 2 -epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=reject) +epochs2 = mne.Epochs( + raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=reject +) condition2 = epochs2.get_data() # as 3D matrix condition1 = condition1[:, 0, :] # take only one channel to get a 2D array @@ -63,31 +64,36 @@ # %% # Compute statistic threshold = 6.0 -T_obs, clusters, cluster_p_values, H0 = \ - permutation_cluster_test([condition1, condition2], n_permutations=1000, - threshold=threshold, tail=1, n_jobs=None, - out_type='mask') +T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test( + [condition1, condition2], + n_permutations=1000, + threshold=threshold, + tail=1, + n_jobs=None, + out_type="mask", +) # %% # Plot times = epochs1.times fig, (ax, ax2) = plt.subplots(2, 1, figsize=(8, 4)) -ax.set_title('Channel : ' + channel) -ax.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0), - label="ERF Contrast (Event 1 - Event 2)") +ax.set_title("Channel : " + channel) +ax.plot( + times, + condition1.mean(axis=0) - condition2.mean(axis=0), + label="ERF Contrast (Event 1 - Event 2)", +) ax.set_ylabel("MEG (T / m)") ax.legend() for i_c, c in enumerate(clusters): c = c[0] if cluster_p_values[i_c] <= 0.05: - h = ax2.axvspan(times[c.start], times[c.stop - 1], - color='r', alpha=0.3) + h = ax2.axvspan(times[c.start], times[c.stop - 1], color="r", alpha=0.3) else: - ax2.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3), - alpha=0.3) + ax2.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3), alpha=0.3) -hf = plt.plot(times, T_obs, 'g') -ax2.legend((h, ), ('cluster p-value < 0.05', )) +hf = plt.plot(times, T_obs, "g") +ax2.legend((h,), ("cluster p-value < 0.05",)) ax2.set_xlabel("time (ms)") ax2.set_ylabel("f-values") diff --git a/examples/stats/fdr_stats_evoked.py b/examples/stats/fdr_stats_evoked.py index 5007042fccc..f7b78f7c559 100644 --- a/examples/stats/fdr_stats_evoked.py +++ b/examples/stats/fdr_stats_evoked.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-fdr-evoked: @@ -14,12 +13,13 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import matplotlib.pyplot as plt import numpy as np from scipy import stats -import matplotlib.pyplot as plt import mne from mne import io @@ -31,26 +31,26 @@ # %% # Set parameters data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" event_id, tmin, tmax = 1, -0.2, 0.5 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.read_events(event_fname)[:30] -channel = 'MEG 1332' # include only this channel in analysis +channel = "MEG 1332" # include only this channel in analysis include = [channel] # %% # Read epochs for the channel of interest -picks = mne.pick_types(raw.info, meg=False, eog=True, include=include, - exclude='bads') +picks = mne.pick_types(raw.info, meg=False, eog=True, include=include, exclude="bads") event_id = 1 reject = dict(grad=4000e-13, eog=150e-6) -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=reject) +epochs = mne.Epochs( + raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=reject +) X = epochs.get_data() # as 3D matrix X = X[:, 0, :] # take only one channel to get a 2D array @@ -65,22 +65,43 @@ reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha) threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1) -reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep') +reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method="indep") threshold_fdr = np.min(np.abs(T)[reject_fdr]) # %% # Plot times = 1e3 * epochs.times -plt.close('all') -plt.plot(times, T, 'k', label='T-stat') +plt.close("all") +plt.plot(times, T, "k", label="T-stat") xmin, xmax = plt.xlim() -plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k', - label='p=0.05 (uncorrected)', linewidth=2) -plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r', - label='p=0.05 (Bonferroni)', linewidth=2) -plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b', - label='p=0.05 (FDR)', linewidth=2) +plt.hlines( + threshold_uncorrected, + xmin, + xmax, + linestyle="--", + colors="k", + label="p=0.05 (uncorrected)", + linewidth=2, +) +plt.hlines( + threshold_bonferroni, + xmin, + xmax, + linestyle="--", + colors="r", + label="p=0.05 (Bonferroni)", + linewidth=2, +) +plt.hlines( + threshold_fdr, + xmin, + xmax, + linestyle="--", + colors="b", + label="p=0.05 (FDR)", + linewidth=2, +) plt.legend() plt.xlabel("Time (ms)") plt.ylabel("T-stat") diff --git a/examples/stats/linear_regression_raw.py b/examples/stats/linear_regression_raw.py index 53b288fc5ee..5c09e5a9443 100644 --- a/examples/stats/linear_regression_raw.py +++ b/examples/stats/linear_regression_raw.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-linear-regression-raw: @@ -15,14 +14,12 @@ is inapplicable, but regression can estimate effects, including those of continuous predictors. -rERPs are described in: -Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP -waveforms: II. Non-linear effects, overlap correction, and practical -considerations. Psychophysiology, 52(2), 169-189. -""" +rERPs are described in :footcite:t:`SmithKutas2015`. +""" # noqa D400 # Authors: Jona Sassenhagen # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -34,39 +31,52 @@ # Load and preprocess data data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" raw = mne.io.read_raw_fif(raw_fname) -raw.pick_types(meg='grad', stim=True, eeg=False).load_data() -raw.filter(1, None, fir_design='firwin') # high-pass +raw.pick(["grad", "stim"], exclude="bads").load_data() +raw.filter(1, None, fir_design="firwin") # high-pass # Set up events events = mne.find_events(raw) -event_id = {'Aud/L': 1, 'Aud/R': 2} -tmin, tmax = -.1, .5 +event_id = {"Aud/L": 1, "Aud/R": 2} +tmin, tmax = -0.1, 0.5 # regular epoching picks = mne.pick_types(raw.info, meg=True) -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None, - baseline=None, preload=True, verbose=False) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + reject=None, + baseline=None, + preload=True, + verbose=False, +) # rERF -evokeds = linear_regression_raw(raw, events=events, event_id=event_id, - reject=None, tmin=tmin, tmax=tmax) +evokeds = linear_regression_raw( + raw, events=events, event_id=event_id, reject=None, tmin=tmin, tmax=tmax +) # linear_regression_raw returns a dict of evokeds # select conditions similarly to mne.Epochs objects # plot both results, and their difference cond = "Aud/L" fig, (ax1, ax2, ax3) = plt.subplots(3, 1) -params = dict(spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)), - time_unit='s') +params = dict( + spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)), time_unit="s" +) epochs[cond].average().plot(axes=ax1, **params) evokeds[cond].plot(axes=ax2, **params) -contrast = mne.combine_evoked([evokeds[cond], epochs[cond].average()], - weights=[1, -1]) +contrast = mne.combine_evoked([evokeds[cond], epochs[cond].average()], weights=[1, -1]) contrast.plot(axes=ax3, **params) ax1.set_title("Traditional averaging") ax2.set_title("rERF") ax3.set_title("Difference") plt.show() + +# %% +# .. footbibliography:: diff --git a/examples/stats/sensor_permutation_test.py b/examples/stats/sensor_permutation_test.py index 0cbb0e855e7..ded8cb9c314 100644 --- a/examples/stats/sensor_permutation_test.py +++ b/examples/stats/sensor_permutation_test.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-perm-test: @@ -14,6 +13,7 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -21,17 +21,17 @@ import mne from mne import io -from mne.stats import permutation_t_test from mne.datasets import sample +from mne.stats import permutation_t_test print(__doc__) # %% # Set parameters data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" event_id = 1 tmin = -0.2 tmax = 0.5 @@ -41,10 +41,19 @@ events = mne.read_events(event_fname) # pick MEG Gradiometers -picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, - exclude='bads') -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6)) +picks = mne.pick_types( + raw.info, meg="grad", eeg=False, stim=False, eog=True, exclude="bads" +) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + reject=dict(grad=4000e-13, eog=150e-6), +) data = epochs.get_data() times = epochs.times @@ -57,21 +66,28 @@ significant_sensors = picks[p_values <= 0.05] significant_sensors_names = [raw.ch_names[k] for k in significant_sensors] -print("Number of significant sensors : %d" % len(significant_sensors)) -print("Sensors names : %s" % significant_sensors_names) +print(f"Number of significant sensors : {len(significant_sensors)}") +print(f"Sensors names : {significant_sensors_names}") # %% # View location of significantly active sensors -evoked = mne.EvokedArray(-np.log10(p_values)[:, np.newaxis], - epochs.info, tmin=0.) +evoked = mne.EvokedArray(-np.log10(p_values)[:, np.newaxis], epochs.info, tmin=0.0) # Extract mask and indices of active sensors in the layout -stats_picks = mne.pick_channels(evoked.ch_names, significant_sensors_names) mask = p_values[:, np.newaxis] <= 0.05 -evoked.plot_topomap(ch_type='grad', times=[0], scalings=1, - time_format=None, cmap='Reds', vlim=(0., np.max), - units='-log10(p)', cbar_fmt='-%0.1f', mask=mask, - size=3, show_names=lambda x: x[4:] + ' ' * 20, - time_unit='s') +evoked.plot_topomap( + ch_type="grad", + times=[0], + scalings=1, + time_format=None, + cmap="Reds", + vlim=(0.0, np.max), + units="-log10(p)", + cbar_fmt="-%0.1f", + mask=mask, + size=3, + show_names=lambda x: x[4:] + " " * 20, + time_unit="s", +) diff --git a/examples/stats/sensor_regression.py b/examples/stats/sensor_regression.py index 5bb68a61f77..e3f1452badb 100644 --- a/examples/stats/sensor_regression.py +++ b/examples/stats/sensor_regression.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-sensor-regression: @@ -19,31 +18,29 @@ of the words for which we have EEG activity. For the general methodology, see e.g. :footcite:`HaukEtAl2006`. - -References ----------- -.. footbibliography:: """ # Authors: Tal Linzen # Denis A. Engemann # Jona Sassenhagen # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import pandas as pd + import mne -from mne.stats import linear_regression, fdr_correction -from mne.viz import plot_compare_evokeds from mne.datasets import kiloword +from mne.stats import fdr_correction, linear_regression +from mne.viz import plot_compare_evokeds # Load the data -path = kiloword.data_path() / 'kword_metadata-epo.fif' +path = kiloword.data_path() / "kword_metadata-epo.fif" epochs = mne.read_epochs(path) print(epochs.metadata.head()) -############################################################################## +# %% # Psycholinguistically relevant word characteristics are continuous. I.e., # concreteness or imaginability is a graded property. In the metadata, # we have concreteness ratings on a 5-point scale. We can show the dependence @@ -55,10 +52,11 @@ colors = {str(val): val for val in df[name].unique()} epochs.metadata = df.assign(Intercept=1) # Add an intercept for later evokeds = {val: epochs[name + " == " + val].average() for val in colors} -plot_compare_evokeds(evokeds, colors=colors, split_legend=True, - cmap=(name + " Percentile", "viridis")) +plot_compare_evokeds( + evokeds, colors=colors, split_legend=True, cmap=(name + " Percentile", "viridis") +) -############################################################################## +# %% # We observe that there appears to be a monotonic dependence of EEG on # concreteness. We can also conduct a continuous analysis: single-trial level # regression with concreteness as a continuous (although here, binned) @@ -67,10 +65,11 @@ names = ["Intercept", name] res = linear_regression(epochs, epochs.metadata[names], names=names) for cond in names: - res[cond].beta.plot_joint(title=cond, ts_args=dict(time_unit='s'), - topomap_args=dict(time_unit='s')) + res[cond].beta.plot_joint( + title=cond, ts_args=dict(time_unit="s"), topomap_args=dict(time_unit="s") + ) -############################################################################## +# %% # Because the :func:`~mne.stats.linear_regression` function also estimates # p values, we can -- # after applying FDR correction for multiple comparisons -- also visualise the @@ -82,4 +81,9 @@ # by dark contour lines. reject_H0, fdr_pvals = fdr_correction(res["Concreteness"].p_val.data) evoked = res["Concreteness"].beta -evoked.plot_image(mask=reject_H0, time_unit='s') +evoked.plot_image(mask=reject_H0, time_unit="s") + +# %% +# References +# ---------- +# .. footbibliography:: diff --git a/examples/time_frequency/compute_csd.py b/examples/time_frequency/compute_csd.py index 6c1b0fc2d48..7d46770c14c 100644 --- a/examples/time_frequency/compute_csd.py +++ b/examples/time_frequency/compute_csd.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-csd-matrix: @@ -19,11 +18,12 @@ """ # Author: Marijn van Vliet # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne from mne.datasets import sample -from mne.time_frequency import csd_fourier, csd_multitaper, csd_morlet +from mne.time_frequency import csd_fourier, csd_morlet, csd_multitaper print(__doc__) @@ -36,9 +36,9 @@ # %% # Loading the sample dataset. data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname_raw = meg_path / 'sample_audvis_raw.fif' -fname_event = meg_path / 'sample_audvis_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +fname_raw = meg_path / "sample_audvis_raw.fif" +fname_event = meg_path / "sample_audvis_raw-eve.fif" raw = mne.io.read_raw_fif(fname_raw) events = mne.read_events(fname_event) @@ -48,12 +48,20 @@ # measurement units, and thus the scalings, differ across sensors. In this # example, for speed and clarity, we select a single channel type: # gradiometers. -picks = mne.pick_types(raw.info, meg='grad') +picks = mne.pick_types(raw.info, meg="grad") # Make some epochs, based on events with trigger code 1 -epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=1, - picks=picks, baseline=(None, 0), - reject=dict(grad=4000e-13), preload=True) +epochs = mne.Epochs( + raw, + events, + event_id=1, + tmin=-0.2, + tmax=1, + picks=picks, + baseline=(None, 0), + reject=dict(grad=4000e-13), + preload=True, +) # %% # Computing CSD matrices using short-term Fourier transform and (adaptive) @@ -86,9 +94,11 @@ # created figures; in this case, each returned list has only one figure # so we use a Python trick of including a comma after our variable name # to assign the figure (not the list) to our ``fig`` variable: -plot_dict = {'Short-time Fourier transform': csd_fft, - 'Adaptive multitapers': csd_mt, - 'Morlet wavelet transform': csd_wav} +plot_dict = { + "Short-time Fourier transform": csd_fft, + "Adaptive multitapers": csd_mt, + "Morlet wavelet transform": csd_wav, +} for title, csd in plot_dict.items(): - fig, = csd.mean().plot() + (fig,) = csd.mean().plot() fig.suptitle(title) diff --git a/examples/time_frequency/compute_source_psd_epochs.py b/examples/time_frequency/compute_source_psd_epochs.py index d93b5e77bd0..21901e82eaa 100644 --- a/examples/time_frequency/compute_source_psd_epochs.py +++ b/examples/time_frequency/compute_source_psd_epochs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-psd-inverse: @@ -13,6 +12,7 @@ # Author: Martin Luessi # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -20,22 +20,22 @@ import mne from mne.datasets import sample -from mne.minimum_norm import read_inverse_operator, compute_source_psd_epochs +from mne.minimum_norm import compute_source_psd_epochs, read_inverse_operator print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' -fname_raw = meg_path / 'sample_audvis_raw.fif' -fname_event = meg_path / 'sample_audvis_raw-eve.fif' -label_name = 'Aud-lh' -fname_label = meg_path / 'labels' / f'{label_name}.label' -subjects_dir = data_path / 'subjects' +meg_path = data_path / "MEG" / "sample" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_raw = meg_path / "sample_audvis_raw.fif" +fname_event = meg_path / "sample_audvis_raw-eve.fif" +label_name = "Aud-lh" +fname_label = meg_path / "labels" / f"{label_name}.label" +subjects_dir = data_path / "subjects" event_id, tmin, tmax = 1, -0.2, 0.5 snr = 1.0 # use smaller SNR for raw data -lambda2 = 1.0 / snr ** 2 +lambda2 = 1.0 / snr**2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Load data @@ -46,37 +46,52 @@ # Set up pick list include = [] -raw.info['bads'] += ['EEG 053'] # bads + 1 more +raw.info["bads"] += ["EEG 053"] # bads + 1 more # pick MEG channels -picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, - include=include, exclude='bads') +picks = mne.pick_types( + raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude="bads" +) # Read epochs -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, - eog=150e-6)) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6), +) # define frequencies of interest -fmin, fmax = 0., 70. -bandwidth = 4. # bandwidth of the windows in Hz +fmin, fmax = 0.0, 70.0 +bandwidth = 4.0 # bandwidth of the windows in Hz # %% # Compute source space PSD in label # --------------------------------- # -# ..note:: By using "return_generator=True" stcs will be a generator object +# .. note:: By using "return_generator=True" stcs will be a generator object # instead of a list. This allows us so to iterate without having to # keep everything in memory. n_epochs_use = 10 -stcs = compute_source_psd_epochs(epochs[:n_epochs_use], inverse_operator, - lambda2=lambda2, - method=method, fmin=fmin, fmax=fmax, - bandwidth=bandwidth, label=label, - return_generator=True, verbose=True) +stcs = compute_source_psd_epochs( + epochs[:n_epochs_use], + inverse_operator, + lambda2=lambda2, + method=method, + fmin=fmin, + fmax=fmax, + bandwidth=bandwidth, + label=label, + return_generator=True, + verbose=True, +) # compute average PSD over the first 10 epochs -psd_avg = 0. +psd_avg = 0.0 for i, stc in enumerate(stcs): psd_avg += stc.data psd_avg /= n_epochs_use @@ -86,16 +101,21 @@ # %% # Visualize the 10 Hz PSD: -brain = stc.plot(initial_time=10., hemi='lh', views='lat', # 10 HZ - clim=dict(kind='value', lims=(20, 40, 60)), - smoothing_steps=3, subjects_dir=subjects_dir) -brain.add_label(label, borders=True, color='k') +brain = stc.plot( + initial_time=10.0, + hemi="lh", + views="lat", # 10 HZ + clim=dict(kind="value", lims=(20, 40, 60)), + smoothing_steps=3, + subjects_dir=subjects_dir, +) +brain.add_label(label, borders=True, color="k") # %% # Visualize the entire spectrum: fig, ax = plt.subplots() ax.plot(freqs, psd_avg.mean(axis=0)) -ax.set_xlabel('Freq (Hz)') +ax.set_xlabel("Freq (Hz)") ax.set_xlim(stc.times[[0, -1]]) -ax.set_ylabel('Power Spectral Density') +ax.set_ylabel("Power Spectral Density") diff --git a/examples/time_frequency/source_label_time_frequency.py b/examples/time_frequency/source_label_time_frequency.py index 736abed8499..80a25fffab9 100644 --- a/examples/time_frequency/source_label_time_frequency.py +++ b/examples/time_frequency/source_label_time_frequency.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-source-space-power-phase-locking: @@ -17,11 +16,12 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne from mne import io @@ -33,50 +33,66 @@ # %% # Set parameters data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_raw.fif' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' -label_name = 'Aud-rh' -fname_label = meg_path / 'labels' / f'{label_name}.label' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_raw.fif" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" +label_names = ["Aud-lh", "Aud-rh"] +fname_labels = [meg_path / "labels" / f"{ln}.label" for ln in label_names] tmin, tmax, event_id = -0.2, 0.5, 2 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) -events = mne.find_events(raw, stim_channel='STI 014') +events = mne.find_events(raw, stim_channel="STI 014") inverse_operator = read_inverse_operator(fname_inv) include = [] -raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more +raw.info["bads"] += ["MEG 2443", "EEG 053"] # bads + 2 more # Picks MEG channels -picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, - stim=False, include=include, exclude='bads') +picks = mne.pick_types( + raw.info, meg=True, eeg=False, eog=True, stim=False, include=include, exclude="bads" +) reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) # Load epochs -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=reject, - preload=True) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + reject=reject, + preload=True, +) # Compute a source estimate per frequency band including and excluding the # evoked response freqs = np.arange(7, 30, 2) # define frequencies of interest -label = mne.read_label(fname_label) -n_cycles = freqs / 3. # different number of cycle per frequency +labels = [mne.read_label(fl) for fl in fname_labels] +label = labels[0] +n_cycles = freqs / 3.0 # different number of cycle per frequency # subtract the evoked response in order to exclude evoked activity epochs_induced = epochs.copy().subtract_evoked() -plt.close('all') - -for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced], - ['evoked + induced', - 'induced only'])): +fig, axes = plt.subplots(2, 2, layout="constrained") +for ii, (this_epochs, title) in enumerate( + zip([epochs, epochs_induced], ["evoked + induced", "induced only"]) +): # compute the source space power and the inter-trial coherence power, itc = source_induced_power( - this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0), - baseline_mode='percent', n_cycles=n_cycles, n_jobs=None) + this_epochs, + inverse_operator, + freqs, + label, + baseline=(-0.1, 0), + baseline_mode="percent", + n_cycles=n_cycles, + n_jobs=None, + ) power = np.mean(power, axis=0) # average over sources itc = np.mean(itc, axis=0) # average over sources @@ -84,24 +100,68 @@ ########################################################################## # View time-frequency plots - plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43) - plt.subplot(2, 2, 2 * ii + 1) - plt.imshow(20 * power, - extent=[times[0], times[-1], freqs[0], freqs[-1]], - aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r') - plt.xlabel('Time (s)') - plt.ylabel('Frequency (Hz)') - plt.title('Power (%s)' % title) - plt.colorbar() - - plt.subplot(2, 2, 2 * ii + 2) - plt.imshow(itc, - extent=[times[0], times[-1], freqs[0], freqs[-1]], - aspect='auto', origin='lower', vmin=0, vmax=0.7, - cmap='RdBu_r') - plt.xlabel('Time (s)') - plt.ylabel('Frequency (Hz)') - plt.title('ITC (%s)' % title) - plt.colorbar() - -plt.show() + ax = axes[ii, 0] + ax.imshow( + 20 * power, + extent=[times[0], times[-1], freqs[0], freqs[-1]], + aspect="auto", + origin="lower", + vmin=0.0, + vmax=30.0, + cmap="RdBu_r", + ) + ax.set(xlabel="Time (s)", ylabel="Frequency (Hz)", title=f"Power ({title})") + + ax = axes[ii, 1] + ax.imshow( + itc, + extent=[times[0], times[-1], freqs[0], freqs[-1]], + aspect="auto", + origin="lower", + vmin=0, + vmax=0.7, + cmap="RdBu_r", + ) + ax.set(xlabel="Time (s)", ylabel="Frequency (Hz)", title=f"ITC ({title})") + fig.colorbar(ax.images[0], ax=axes[ii]) + +# %% + +############################################################################## +# In the example above, we averaged power across vertices after calculating +# power because we provided a single label for power calculation and therefore +# power of all sources within the single label were returned separately. When +# we provide a list of labels, power is averaged across sources within each +# label automatically. With a list of labels, averaging is performed before +# rescaling, so choose a baseline method appropriately. + + +# Get power from multiple labels +multi_label_power = source_induced_power( + epochs, + inverse_operator, + freqs, + labels, + baseline=(-0.1, 0), + baseline_mode="mean", + n_cycles=n_cycles, + n_jobs=None, + return_plv=False, +) + +# visually compare evoked power in left and right auditory regions +fig, axes = plt.subplots(ncols=2, layout="constrained") +for l_idx, l_power in enumerate(multi_label_power): + ax = axes[l_idx] + ax.imshow( + l_power, + extent=[epochs.times[0], epochs.times[-1], freqs[0], freqs[-1]], + aspect="auto", + origin="lower", + vmin=multi_label_power.min(), + vmax=multi_label_power.max(), + cmap="RdBu_r", + ) + title = f"{labels[l_idx].hemi.upper()} Evoked Power" + ax.set(xlabel="Time (s)", ylabel="Frequency (Hz)", title=title) + fig.colorbar(ax.images[0], ax=ax) diff --git a/examples/time_frequency/source_power_spectrum.py b/examples/time_frequency/source_power_spectrum.py index 8c32e8e78df..77af97a8726 100644 --- a/examples/time_frequency/source_power_spectrum.py +++ b/examples/time_frequency/source_power_spectrum.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-label-psd: @@ -12,6 +11,7 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -20,44 +20,55 @@ import mne from mne import io from mne.datasets import sample -from mne.minimum_norm import read_inverse_operator, compute_source_psd +from mne.minimum_norm import compute_source_psd, read_inverse_operator print(__doc__) # %% # Set parameters data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_raw.fif' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' -fname_label = meg_path / 'labels' / 'Aud-lh.label' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_raw.fif" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_label = meg_path / "labels" / "Aud-lh.label" # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, verbose=False) -events = mne.find_events(raw, stim_channel='STI 014') +events = mne.find_events(raw, stim_channel="STI 014") inverse_operator = read_inverse_operator(fname_inv) -raw.info['bads'] = ['MEG 2443', 'EEG 053'] +raw.info["bads"] = ["MEG 2443", "EEG 053"] # picks MEG gradiometers -picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, - stim=False, exclude='bads') +picks = mne.pick_types( + raw.info, meg=True, eeg=False, eog=True, stim=False, exclude="bads" +) tmin, tmax = 0, 120 # use the first 120s of data fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 label = mne.read_label(fname_label) -stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM", - tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, - pick_ori="normal", n_fft=n_fft, label=label, - dB=True) +stc = compute_source_psd( + raw, + inverse_operator, + lambda2=1.0 / 9.0, + method="dSPM", + tmin=tmin, + tmax=tmax, + fmin=fmin, + fmax=fmax, + pick_ori="normal", + n_fft=n_fft, + label=label, + dB=True, +) -stc.save('psd_dSPM', overwrite=True) +stc.save("psd_dSPM", overwrite=True) # %% # View PSD of sources in label plt.plot(stc.times, stc.data.T) -plt.xlabel('Frequency (Hz)') -plt.ylabel('PSD (dB)') -plt.title('Source Power Spectrum (PSD)') +plt.xlabel("Frequency (Hz)") +plt.ylabel("PSD (dB)") +plt.title("Source Power Spectrum (PSD)") plt.show() diff --git a/examples/time_frequency/source_power_spectrum_opm.py b/examples/time_frequency/source_power_spectrum_opm.py index 813a08244e1..ae8152670f2 100644 --- a/examples/time_frequency/source_power_spectrum_opm.py +++ b/examples/time_frequency/source_power_spectrum_opm.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-opm-resting-state: @@ -27,6 +26,7 @@ # Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -36,19 +36,19 @@ print(__doc__) data_path = mne.datasets.opm.data_path() -subject = 'OPM_sample' - -subjects_dir = data_path / 'subjects' -bem_dir = subjects_dir / subject / 'bem' -bem_fname = bem_dir / f'{subject}-5120-5120-5120-bem-sol.fif' -src_fname = bem_dir / f'{subject}-oct6-src.fif' -vv_fname = data_path / 'MEG' / 'SQUID' / 'SQUID_resting_state.fif' -vv_erm_fname = data_path / 'MEG' / 'SQUID' / 'SQUID_empty_room.fif' -vv_trans_fname = data_path / 'MEG' / 'SQUID' / 'SQUID-trans.fif' -opm_fname = data_path / 'MEG' / 'OPM' / 'OPM_resting_state_raw.fif' -opm_erm_fname = data_path / 'MEG' / 'OPM' / 'OPM_empty_room_raw.fif' -opm_trans = mne.transforms.Transform('head', 'mri') # use identity transform -opm_coil_def_fname = data_path / 'MEG' / 'OPM' / 'coil_def.dat' +subject = "OPM_sample" + +subjects_dir = data_path / "subjects" +bem_dir = subjects_dir / subject / "bem" +bem_fname = bem_dir / f"{subject}-5120-5120-5120-bem-sol.fif" +src_fname = bem_dir / f"{subject}-oct6-src.fif" +vv_fname = data_path / "MEG" / "SQUID" / "SQUID_resting_state.fif" +vv_erm_fname = data_path / "MEG" / "SQUID" / "SQUID_empty_room.fif" +vv_trans_fname = data_path / "MEG" / "SQUID" / "SQUID-trans.fif" +opm_fname = data_path / "MEG" / "OPM" / "OPM_resting_state_raw.fif" +opm_erm_fname = data_path / "MEG" / "OPM" / "OPM_empty_room_raw.fif" +opm_trans = mne.transforms.Transform("head", "mri") # use identity transform +opm_coil_def_fname = data_path / "MEG" / "OPM" / "coil_def.dat" ############################################################################## # Load data, resample. We will store the raw objects in dicts with entries @@ -56,69 +56,83 @@ raws = dict() raw_erms = dict() -new_sfreq = 60. # Nyquist frequency (30 Hz) < line noise freq (50 Hz) -raws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming -raws['vv'].load_data().resample(new_sfreq) -raws['vv'].info['bads'] = ['MEG2233', 'MEG1842'] -raw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error') -raw_erms['vv'].load_data().resample(new_sfreq) -raw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842'] - -raws['opm'] = mne.io.read_raw_fif(opm_fname) -raws['opm'].load_data().resample(new_sfreq) -raw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname) -raw_erms['opm'].load_data().resample(new_sfreq) +new_sfreq = 60.0 # Nyquist frequency (30 Hz) < line noise freq (50 Hz) +raws["vv"] = mne.io.read_raw_fif(vv_fname, verbose="error") # ignore naming +raws["vv"].load_data().resample(new_sfreq, method="polyphase") +raws["vv"].info["bads"] = ["MEG2233", "MEG1842"] +raw_erms["vv"] = mne.io.read_raw_fif(vv_erm_fname, verbose="error") +raw_erms["vv"].load_data().resample(new_sfreq, method="polyphase") +raw_erms["vv"].info["bads"] = ["MEG2233", "MEG1842"] + +raws["opm"] = mne.io.read_raw_fif(opm_fname) +raws["opm"].load_data().resample(new_sfreq, method="polyphase") +raw_erms["opm"] = mne.io.read_raw_fif(opm_erm_fname) +raw_erms["opm"].load_data().resample(new_sfreq, method="polyphase") # Make sure our assumptions later hold -assert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq'] +assert raws["opm"].info["sfreq"] == raws["vv"].info["sfreq"] ############################################################################## # Explore data -titles = dict(vv='VectorView', opm='OPM') -kinds = ('vv', 'opm') +titles = dict(vv="VectorView", opm="OPM") +kinds = ("vv", "opm") n_fft = next_fast_len(int(round(4 * new_sfreq))) -print('Using n_fft=%d (%0.1f s)' % (n_fft, n_fft / raws['vv'].info['sfreq'])) +print(f"Using n_fft={n_fft} ({n_fft / raws['vv'].info['sfreq']:0.1f} s)") for kind in kinds: - fig = raws[kind].compute_psd(n_fft=n_fft, proj=True).plot() + fig = ( + raws[kind] + .compute_psd(n_fft=n_fft, proj=True) + .plot(picks="data", exclude="bads", amplitude=True) + ) fig.suptitle(titles[kind]) - fig.subplots_adjust(0.1, 0.1, 0.95, 0.85) ############################################################################## # Alignment and forward # --------------------- # Here we use a reduced size source space (oct5) just for speed -src = mne.setup_source_space( - subject, 'oct5', add_dist=False, subjects_dir=subjects_dir) +src = mne.setup_source_space(subject, "oct5", add_dist=False, subjects_dir=subjects_dir) # This line removes source-to-source distances that we will not need. # We only do it here to save a bit of memory, in general this is not required. -del src[0]['dist'], src[1]['dist'] +del src[0]["dist"], src[1]["dist"] bem = mne.read_bem_solution(bem_fname) # For speed, let's just use a 1-layer BEM -bem = mne.make_bem_solution(bem['surfs'][-1:]) +bem = mne.make_bem_solution(bem["surfs"][-1:]) fwd = dict() # check alignment and generate forward for VectorView -kwargs = dict(azimuth=0, elevation=90, distance=0.6, focalpoint=(0., 0., 0.)) +kwargs = dict(azimuth=0, elevation=90, distance=0.6, focalpoint=(0.0, 0.0, 0.0)) fig = mne.viz.plot_alignment( - raws['vv'].info, trans=vv_trans_fname, subject=subject, - subjects_dir=subjects_dir, dig=True, coord_frame='mri', - surfaces=('head', 'white')) + raws["vv"].info, + trans=vv_trans_fname, + subject=subject, + subjects_dir=subjects_dir, + dig=True, + coord_frame="mri", + surfaces=("head", "white"), +) mne.viz.set_3d_view(figure=fig, **kwargs) -fwd['vv'] = mne.make_forward_solution( - raws['vv'].info, vv_trans_fname, src, bem, eeg=False, verbose=True) +fwd["vv"] = mne.make_forward_solution( + raws["vv"].info, vv_trans_fname, src, bem, eeg=False, verbose=True +) ############################################################################## # And for OPM: with mne.use_coil_def(opm_coil_def_fname): fig = mne.viz.plot_alignment( - raws['opm'].info, trans=opm_trans, subject=subject, - subjects_dir=subjects_dir, dig=False, coord_frame='mri', - surfaces=('head', 'white')) + raws["opm"].info, + trans=opm_trans, + subject=subject, + subjects_dir=subjects_dir, + dig=False, + coord_frame="mri", + surfaces=("head", "white"), + ) mne.viz.set_3d_view(figure=fig, **kwargs) - fwd['opm'] = mne.make_forward_solution( - raws['opm'].info, opm_trans, src, bem, eeg=False, verbose=True) + fwd["opm"] = mne.make_forward_solution( + raws["opm"].info, opm_trans, src, bem, eeg=False, verbose=True + ) del src, bem @@ -132,24 +146,29 @@ topos = dict(vv=dict(), opm=dict()) stcs = dict(vv=dict(), opm=dict()) -snr = 3. -lambda2 = 1. / snr ** 2 +snr = 3.0 +lambda2 = 1.0 / snr**2 for kind in kinds: noise_cov = mne.compute_raw_covariance(raw_erms[kind]) inverse_operator = mne.minimum_norm.make_inverse_operator( - raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True) + raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True + ) stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd( - raws[kind], inverse_operator, lambda2=lambda2, - n_fft=n_fft, dB=False, return_sensor=True, verbose=True) + raws[kind], + inverse_operator, + lambda2=lambda2, + n_fft=n_fft, + dB=False, + return_sensor=True, + verbose=True, + ) topo_norm = sensor_psd.data.sum(axis=1, keepdims=True) stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs # Normalize each source point by the total power across freqs for band, limits in freq_bands.items(): data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True) - topos[kind][band] = mne.EvokedArray( - 100 * data / topo_norm, sensor_psd.info) - stcs[kind][band] = \ - 100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data + topos[kind][band] = mne.EvokedArray(100 * data / topo_norm, sensor_psd.info) + stcs[kind][band] = 100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data del inverse_operator del fwd, raws, raw_erms @@ -162,22 +181,37 @@ # Alpha # ----- + def plot_band(kind, band): """Plot activity within a frequency band on the subject's brain.""" - title = "%s %s\n(%d-%d Hz)" % ((titles[kind], band,) + freq_bands[band]) + lf, hf = freq_bands[band] + title = f"{titles[kind]} {band}\n({lf:d}-{hf:d} Hz)" topos[kind][band].plot_topomap( - times=0., scalings=1., cbar_fmt='%0.1f', vlim=(0, None), - cmap='inferno', time_format=title) + times=0.0, + scalings=1.0, + cbar_fmt="%0.1f", + vlim=(0, None), + cmap="inferno", + time_format=title, + ) brain = stcs[kind][band].plot( - subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both', - time_label=title, title=title, colormap='inferno', - time_viewer=False, show_traces=False, - clim=dict(kind='percent', lims=(70, 85, 99)), smoothing_steps=10) + subject=subject, + subjects_dir=subjects_dir, + views="cau", + hemi="both", + time_label=title, + title=title, + colormap="inferno", + time_viewer=False, + show_traces=False, + clim=dict(kind="percent", lims=(70, 85, 99)), + smoothing_steps=10, + ) brain.show_view(azimuth=0, elevation=0, roll=0) return fig, brain -fig_alpha, brain_alpha = plot_band('vv', 'alpha') +fig_alpha, brain_alpha = plot_band("vv", "alpha") # %% # Beta @@ -185,13 +219,13 @@ def plot_band(kind, band): # Here we also show OPM data, which shows a profile similar to the VectorView # data beneath the sensors. VectorView first: -fig_beta, brain_beta = plot_band('vv', 'beta') +fig_beta, brain_beta = plot_band("vv", "beta") # %% # Then OPM: # sphinx_gallery_thumbnail_number = 10 -fig_beta_opm, brain_beta_opm = plot_band('opm', 'beta') +fig_beta_opm, brain_beta_opm = plot_band("opm", "beta") # %% # References diff --git a/examples/time_frequency/source_space_time_frequency.py b/examples/time_frequency/source_space_time_frequency.py index 7573dabbb2f..c5ca425dd4c 100644 --- a/examples/time_frequency/source_space_time_frequency.py +++ b/examples/time_frequency/source_space_time_frequency.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-source-space-tfr: @@ -14,6 +13,7 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -29,46 +29,57 @@ # %% # Set parameters data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_raw.fif' -fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_raw.fif" +fname_inv = meg_path / "sample_audvis-meg-oct-6-meg-inv.fif" tmin, tmax, event_id = -0.2, 0.5, 1 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) -events = mne.find_events(raw, stim_channel='STI 014') +events = mne.find_events(raw, stim_channel="STI 014") inverse_operator = read_inverse_operator(fname_inv) include = [] -raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more +raw.info["bads"] += ["MEG 2443", "EEG 053"] # bads + 2 more # picks MEG gradiometers -picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, - stim=False, include=include, exclude='bads') +picks = mne.pick_types( + raw.info, meg=True, eeg=False, eog=True, stim=False, include=include, exclude="bads" +) # Load condition 1 event_id = 1 events = events[:10] # take 10 events to keep the computation time low # Use linear detrend to reduce any edge artifacts -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6), - preload=True, detrend=1) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + reject=dict(grad=4000e-13, eog=150e-6), + preload=True, + detrend=1, +) # Compute a source estimate per frequency band bands = dict(alpha=[9, 11], beta=[18, 22]) -stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2, - use_fft=False, n_jobs=None) +stcs = source_band_induced_power( + epochs, inverse_operator, bands, n_cycles=2, use_fft=False, n_jobs=None +) for b, stc in stcs.items(): - stc.save('induced_power_%s' % b, overwrite=True) + stc.save(f"induced_power_{b}", overwrite=True) # %% # plot mean power -plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha') -plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta') -plt.xlabel('Time (ms)') -plt.ylabel('Power') +plt.plot(stcs["alpha"].times, stcs["alpha"].data.mean(axis=0), label="Alpha") +plt.plot(stcs["beta"].times, stcs["beta"].data.mean(axis=0), label="Beta") +plt.xlabel("Time (ms)") +plt.ylabel("Power") plt.legend() -plt.title('Mean source induced power') +plt.title("Mean source induced power") plt.show() diff --git a/examples/time_frequency/temporal_whitening.py b/examples/time_frequency/temporal_whitening.py index 0b85e1695fa..3a0a04c01c5 100644 --- a/examples/time_frequency/temporal_whitening.py +++ b/examples/time_frequency/temporal_whitening.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-temporal-whitening: @@ -13,31 +12,32 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% +import matplotlib.pyplot as plt import numpy as np from scipy import signal -import matplotlib.pyplot as plt import mne -from mne.time_frequency import fit_iir_model_raw from mne.datasets import sample +from mne.time_frequency import fit_iir_model_raw print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_raw.fif' -proj_fname = meg_path / 'sample_audvis_ecg-proj.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_raw.fif" +proj_fname = meg_path / "sample_audvis_ecg-proj.fif" raw = mne.io.read_raw_fif(raw_fname) proj = mne.read_proj(proj_fname) raw.add_proj(proj) -raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels +raw.info["bads"] = ["MEG 2443", "EEG 053"] # mark bad channels # Set up pick list: Gradiometers - bad channels -picks = mne.pick_types(raw.info, meg='grad', exclude='bads') +picks = mne.pick_types(raw.info, meg="grad", exclude="bads") order = 5 # define model order picks = picks[:1] @@ -46,21 +46,21 @@ b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180) d, times = raw[0, 10000:20000] # look at one channel from now on d = d.ravel() # make flat vector -innovation = signal.convolve(d, a, 'valid') +innovation = signal.convolve(d, a, "valid") d_ = signal.lfilter(b, a, innovation) # regenerate the signal d_ = np.r_[d_[0] * np.ones(order), d_] # dummy samples to keep signal length # %% # Plot the different time series and PSDs -plt.close('all') +plt.close("all") plt.figure() -plt.plot(d[:100], label='signal') -plt.plot(d_[:100], label='regenerated signal') +plt.plot(d[:100], label="signal") +plt.plot(d_[:100], label="regenerated signal") plt.legend() plt.figure() -plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048) -plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048) -plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--') -plt.legend(('Signal', 'Innovation', 'Regenerated signal')) +plt.psd(d, Fs=raw.info["sfreq"], NFFT=2048) +plt.psd(innovation, Fs=raw.info["sfreq"], NFFT=2048) +plt.psd(d_, Fs=raw.info["sfreq"], NFFT=2048, linestyle="--") +plt.legend(("Signal", "Innovation", "Regenerated signal")) plt.show() diff --git a/examples/time_frequency/time_frequency_erds.py b/examples/time_frequency/time_frequency_erds.py index 69bd0bba98f..93272eb7aa3 100644 --- a/examples/time_frequency/time_frequency_erds.py +++ b/examples/time_frequency/time_frequency_erds.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-tfr-erds: @@ -31,39 +30,48 @@ # Felix Klotzsche # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% # As usual, we import everything we need. -import numpy as np import matplotlib.pyplot as plt -from matplotlib.colors import TwoSlopeNorm +import numpy as np import pandas as pd import seaborn as sns +from matplotlib.colors import TwoSlopeNorm + import mne from mne.datasets import eegbci from mne.io import concatenate_raws, read_raw_edf -from mne.time_frequency import tfr_multitaper from mne.stats import permutation_cluster_1samp_test as pcluster_test - # %% # First, we load and preprocess the data. We use runs 6, 10, and 14 from # subject 1 (these runs contains hand and feet motor imagery). -fnames = eegbci.load_data(subject=1, runs=(6, 10, 14)) -raw = concatenate_raws([read_raw_edf(f, preload=True) for f in fnames]) -raw.rename_channels(lambda x: x.strip('.')) # remove dots from channel names +fnames = eegbci.load_data(subjects=1, runs=(6, 10, 14)) +raw = concatenate_raws([read_raw_edf(f, preload=True) for f in fnames]) -events, _ = mne.events_from_annotations(raw, event_id=dict(T1=2, T2=3)) +raw.rename_channels(lambda x: x.strip(".")) # remove dots from channel names +# rename descriptions to be more easily interpretable +raw.annotations.rename(dict(T1="hands", T2="feet")) # %% # Now we can create 5-second epochs around events of interest. + tmin, tmax = -1, 4 event_ids = dict(hands=2, feet=3) # map event IDs to tasks -epochs = mne.Epochs(raw, events, event_ids, tmin - 0.5, tmax + 0.5, - picks=('C3', 'Cz', 'C4'), baseline=None, preload=True) +epochs = mne.Epochs( + raw, + event_id=["hands", "feet"], + tmin=tmin - 0.5, + tmax=tmax + 0.5, + picks=("C3", "Cz", "C4"), + baseline=None, + preload=True, +) # %% # .. _cnorm-example: @@ -81,20 +89,29 @@ baseline = (-1, 0) # baseline interval (in s) cnorm = TwoSlopeNorm(vmin=vmin, vcenter=0, vmax=vmax) # min, center & max ERDS -kwargs = dict(n_permutations=100, step_down_p=0.05, seed=1, - buffer_size=None, out_type='mask') # for cluster test +kwargs = dict( + n_permutations=100, step_down_p=0.05, seed=1, buffer_size=None, out_type="mask" +) # for cluster test # %% # Finally, we perform time/frequency decomposition over all epochs. -tfr = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs, use_fft=True, - return_itc=False, average=False, decim=2) +tfr = epochs.compute_tfr( + method="multitaper", + freqs=freqs, + n_cycles=freqs, + use_fft=True, + return_itc=False, + average=False, + decim=2, +) tfr.crop(tmin, tmax).apply_baseline(baseline, mode="percent") for event in event_ids: # select desired epochs for visualization tfr_ev = tfr[event] - fig, axes = plt.subplots(1, 4, figsize=(12, 4), - gridspec_kw={"width_ratios": [10, 10, 10, 1]}) + fig, axes = plt.subplots( + 1, 4, figsize=(12, 4), gridspec_kw={"width_ratios": [10, 10, 10, 1]} + ) for ch, ax in enumerate(axes[:-1]): # for each channel # positive clusters _, c1, p1, _ = pcluster_test(tfr_ev.data[:, ch], tail=1, **kwargs) @@ -109,9 +126,16 @@ mask = c[..., p <= 0.05].any(axis=-1) # plot TFR (ERDS map with masking) - tfr_ev.average().plot([ch], cmap="RdBu", cnorm=cnorm, axes=ax, - colorbar=False, show=False, mask=mask, - mask_style="mask") + tfr_ev.average().plot( + [ch], + cmap="RdBu", + cnorm=cnorm, + axes=ax, + colorbar=False, + show=False, + mask=mask, + mask_style="mask", + ) ax.set_title(epochs.ch_names[ch], fontsize=10) ax.axvline(0, linewidth=1, color="black", linestyle=":") # event @@ -140,33 +164,28 @@ df = tfr.to_data_frame(time_format=None, long_format=True) # Map to frequency bands: -freq_bounds = {'_': 0, - 'delta': 3, - 'theta': 7, - 'alpha': 13, - 'beta': 35, - 'gamma': 140} -df['band'] = pd.cut(df['freq'], list(freq_bounds.values()), - labels=list(freq_bounds)[1:]) +freq_bounds = {"_": 0, "delta": 3, "theta": 7, "alpha": 13, "beta": 35, "gamma": 140} +df["band"] = pd.cut( + df["freq"], list(freq_bounds.values()), labels=list(freq_bounds)[1:] +) # Filter to retain only relevant frequency bands: -freq_bands_of_interest = ['delta', 'theta', 'alpha', 'beta'] +freq_bands_of_interest = ["delta", "theta", "alpha", "beta"] df = df[df.band.isin(freq_bands_of_interest)] -df['band'] = df['band'].cat.remove_unused_categories() +df["band"] = df["band"].cat.remove_unused_categories() # Order channels for plotting: -df['channel'] = df['channel'].cat.reorder_categories(('C3', 'Cz', 'C4'), - ordered=True) +df["channel"] = df["channel"].cat.reorder_categories(("C3", "Cz", "C4"), ordered=True) -g = sns.FacetGrid(df, row='band', col='channel', margin_titles=True) -g.map(sns.lineplot, 'time', 'value', 'condition', n_boot=10) -axline_kw = dict(color='black', linestyle='dashed', linewidth=0.5, alpha=0.5) +g = sns.FacetGrid(df, row="band", col="channel", margin_titles=True) +g.map(sns.lineplot, "time", "value", "condition", n_boot=10) +axline_kw = dict(color="black", linestyle="dashed", linewidth=0.5, alpha=0.5) g.map(plt.axhline, y=0, **axline_kw) g.map(plt.axvline, x=0, **axline_kw) g.set(ylim=(None, 1.5)) -g.set_axis_labels("Time (s)", "ERDS (%)") +g.set_axis_labels("Time (s)", "ERDS") g.set_titles(col_template="{col_name}", row_template="{row_name}") -g.add_legend(ncol=2, loc='lower center') +g.add_legend(ncol=2, loc="lower center") g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.08) # %% @@ -175,20 +194,30 @@ # Here, we use seaborn to plot the average ERDS in the motor imagery interval # as a function of frequency band and imagery condition: -df_mean = (df.query('time > 1') - .groupby(['condition', 'epoch', 'band', 'channel'])[['value']] - .mean() - .reset_index()) - -g = sns.FacetGrid(df_mean, col='condition', col_order=['hands', 'feet'], - margin_titles=True) -g = (g.map(sns.violinplot, 'channel', 'value', 'band', n_boot=10, - palette='deep', order=['C3', 'Cz', 'C4'], - hue_order=freq_bands_of_interest, - linewidth=0.5).add_legend(ncol=4, loc='lower center')) +df_mean = ( + df.query("time > 1") + .groupby(["condition", "epoch", "band", "channel"], observed=False)[["value"]] + .mean() + .reset_index() +) + +g = sns.FacetGrid( + df_mean, col="condition", col_order=["hands", "feet"], margin_titles=True +) +g = g.map( + sns.violinplot, + "channel", + "value", + "band", + cut=0, + palette="deep", + order=["C3", "Cz", "C4"], + hue_order=freq_bands_of_interest, + linewidth=0.5, +).add_legend(ncol=4, loc="lower center") g.map(plt.axhline, **axline_kw) -g.set_axis_labels("", "ERDS (%)") +g.set_axis_labels("", "ERDS") g.set_titles(col_template="{col_name}", row_template="{row_name}") g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.3) diff --git a/examples/time_frequency/time_frequency_global_field_power.py b/examples/time_frequency/time_frequency_global_field_power.py index ff3031d4623..cc4ff14ce2a 100644 --- a/examples/time_frequency/time_frequency_global_field_power.py +++ b/examples/time_frequency/time_frequency_global_field_power.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-time-freq-global-field-power: @@ -42,6 +41,7 @@ # Stefan Appelhoff # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import matplotlib.pyplot as plt @@ -55,47 +55,52 @@ # %% # Set parameters data_path = somato.data_path() -subject = '01' -task = 'somato' -raw_fname = (data_path / f'sub-{subject}' / 'meg' / - f'sub-{subject}_task-{task}_meg.fif') +subject = "01" +task = "somato" +raw_fname = data_path / f"sub-{subject}" / "meg" / f"sub-{subject}_task-{task}_meg.fif" # let's explore some frequency bands -iter_freqs = [ - ('Theta', 4, 7), - ('Alpha', 8, 12), - ('Beta', 13, 25), - ('Gamma', 30, 45) -] +iter_freqs = [("Theta", 4, 7), ("Alpha", 8, 12), ("Beta", 13, 25), ("Gamma", 30, 45)] # %% # We create average power time courses for each frequency band # set epoching parameters -event_id, tmin, tmax = 1, -1., 3. +event_id, tmin, tmax = 1, -1.0, 3.0 baseline = None # get the header to extract events raw = mne.io.read_raw_fif(raw_fname) -events = mne.find_events(raw, stim_channel='STI 014') +events = mne.find_events(raw, stim_channel="STI 014") frequency_map = list() for band, fmin, fmax in iter_freqs: # (re)load the data to save memory raw = mne.io.read_raw_fif(raw_fname) - raw.pick_types(meg='grad', eog=True) # we just look at gradiometers + raw.pick(picks=["grad", "eog"]) # we just look at gradiometers raw.load_data() # bandpass filter - raw.filter(fmin, fmax, n_jobs=None, # use more jobs to speed up. - l_trans_bandwidth=1, # make sure filter params are the same - h_trans_bandwidth=1) # in each band and skip "auto" option. + raw.filter( + fmin, + fmax, + n_jobs=None, # use more jobs to speed up. + l_trans_bandwidth=1, # make sure filter params are the same + h_trans_bandwidth=1, + ) # in each band and skip "auto" option. # epoch - epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline, - reject=dict(grad=4000e-13, eog=350e-6), - preload=True) + epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + baseline=baseline, + reject=dict(grad=4000e-13, eog=350e-6), + preload=True, + ) # remove evoked response epochs.subtract_evoked() @@ -116,30 +121,34 @@ # Helper function for plotting spread def stat_fun(x): """Return sum of squares.""" - return np.sum(x ** 2, axis=0) + return np.sum(x**2, axis=0) # Plot fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True) -colors = plt.colormaps['winter_r'](np.linspace(0, 1, 4)) +colors = plt.colormaps["winter_r"](np.linspace(0, 1, 4)) for ((freq_name, fmin, fmax), average), color, ax in zip( - frequency_map, colors, axes.ravel()[::-1]): + frequency_map, colors, axes.ravel()[::-1] +): times = average.times * 1e3 - gfp = np.sum(average.data ** 2, axis=0) + gfp = np.sum(average.data**2, axis=0) gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0)) ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5) - ax.axhline(0, linestyle='--', color='grey', linewidth=2) - ci_low, ci_up = bootstrap_confidence_interval(average.data, random_state=0, - stat_fun=stat_fun) + ax.axhline(0, linestyle="--", color="grey", linewidth=2) + ci_low, ci_up = bootstrap_confidence_interval( + average.data, random_state=0, stat_fun=stat_fun + ) ci_low = rescale(ci_low, average.times, baseline=(None, 0)) ci_up = rescale(ci_up, average.times, baseline=(None, 0)) ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3) ax.grid(True) - ax.set_ylabel('GFP') - ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax), - xy=(0.95, 0.8), - horizontalalignment='right', - xycoords='axes fraction') + ax.set_ylabel("GFP") + ax.annotate( + f"{freq_name} ({fmin:d}-{fmax:d}Hz)", + xy=(0.95, 0.8), + horizontalalignment="right", + xycoords="axes fraction", + ) ax.set_xlim(-1000, 3000) -axes.ravel()[-1].set_xlabel('Time [ms]') +axes.ravel()[-1].set_xlabel("Time [ms]") diff --git a/examples/time_frequency/time_frequency_simulated.py b/examples/time_frequency/time_frequency_simulated.py index 48c74e7607c..dc42f16da3a 100644 --- a/examples/time_frequency/time_frequency_simulated.py +++ b/examples/time_frequency/time_frequency_simulated.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-tfr-comparison: @@ -18,18 +17,16 @@ # Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import numpy as np from matplotlib import pyplot as plt -from mne import create_info, Epochs -from mne.baseline import rescale +from mne import Epochs, create_info from mne.io import RawArray -from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet, - tfr_array_morlet, AverageTFR) -from mne.viz import centers_to_edges +from mne.time_frequency import AverageTFRArray, EpochsTFRArray, tfr_array_morlet print(__doc__) @@ -40,8 +37,8 @@ # We'll simulate data with a known spectro-temporal structure. sfreq = 1000.0 -ch_names = ['SIM0001', 'SIM0002'] -ch_types = ['grad', 'grad'] +ch_names = ["SIM0001", "SIM0002"] +ch_types = ["grad", "grad"] info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) n_times = 1024 # Just over 1 second epochs @@ -52,17 +49,24 @@ # Add a 50 Hz sinusoidal burst to the noise and ramp it. t = np.arange(n_times, dtype=np.float64) / sfreq -signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal -signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing +signal = np.sin(np.pi * 2.0 * 50.0 * t) # 50 Hz sinusoid signal +signal[np.logical_or(t < 0.45, t > 0.55)] = 0.0 # hard windowing on_time = np.logical_and(t >= 0.45, t <= 0.55) -signal[on_time] *= np.hanning(on_time.sum()) # Ramping +signal[on_time] *= np.hanning(on_time.sum()) # ramping data[:, 100:-100] += np.tile(signal, n_epochs) # add signal raw = RawArray(data, info) events = np.zeros((n_epochs, 3), dtype=int) events[:, 0] = np.arange(n_epochs) * n_times -epochs = Epochs(raw, events, dict(sin50hz=0), tmin=0, tmax=n_times / sfreq, - reject=dict(grad=4000), baseline=None) +epochs = Epochs( + raw, + events, + dict(sin50hz=0), + tmin=0, + tmax=n_times / sfreq, + reject=dict(grad=4000), + baseline=None, +) epochs.average().plot() @@ -86,24 +90,39 @@ # properties, and thus a different TFR. You can trade time resolution or # frequency resolution or both in order to get a reduction in variance. -freqs = np.arange(5., 100., 3.) -vmin, vmax = -3., 3. # Define our color limits. +freqs = np.arange(5.0, 100.0, 3.0) +vmin, vmax = -3.0, 3.0 # Define our color limits. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") for n_cycles, time_bandwidth, ax, title in zip( - [freqs / 2, freqs, freqs / 2], # number of cycles - [2.0, 4.0, 8.0], # time bandwidth - axs, - ['Sim: Least smoothing, most variance', - 'Sim: Less frequency smoothing,\nmore time smoothing', - 'Sim: Less time smoothing,\nmore frequency smoothing']): - power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles, - time_bandwidth=time_bandwidth, return_itc=False) + [freqs / 2, freqs, freqs / 2], # number of cycles + [2.0, 4.0, 8.0], # time bandwidth + axs, + [ + "Sim: Least smoothing, most variance", + "Sim: Less frequency smoothing,\nmore time smoothing", + "Sim: Less time smoothing,\nmore frequency smoothing", + ], +): + power = epochs.compute_tfr( + method="multitaper", + freqs=freqs, + n_cycles=n_cycles, + time_bandwidth=time_bandwidth, + return_itc=False, + average=True, + ) ax.set_title(title) # Plot results. Baseline correct based on first 100 ms. - power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax, - axes=ax, show=False, colorbar=False) -plt.tight_layout() + power.plot( + [0], + baseline=(0.0, 0.1), + mode="mean", + vlim=(vmin, vmax), + axes=ax, + show=False, + colorbar=False, + ) ############################################################################## # Stockwell (S) transform @@ -116,14 +135,14 @@ # we control the spectral / temporal resolution by specifying different widths # of the gaussian window using the ``width`` parameter. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") fmin, fmax = freqs[[0, -1]] for width, ax in zip((0.2, 0.7, 3.0), axs): - power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width) - power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False, - colorbar=False) - ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width)) -plt.tight_layout() + power = epochs.compute_tfr(method="stockwell", freqs=(fmin, fmax), width=width) + power.plot( + [0], baseline=(0.0, 0.1), mode="mean", axes=ax, show=False, colorbar=False + ) + ax.set_title(f"Sim: Using S transform, width = {width:0.1f}") # %% # Morlet Wavelets @@ -134,16 +153,23 @@ # temporal resolution with the ``n_cycles`` parameter, which defines the # number of cycles to include in the window. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) -all_n_cycles = [1, 3, freqs / 2.] +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") +all_n_cycles = [1, 3, freqs / 2.0] for n_cycles, ax in zip(all_n_cycles, axs): - power = tfr_morlet(epochs, freqs=freqs, - n_cycles=n_cycles, return_itc=False) - power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax, - axes=ax, show=False, colorbar=False) - n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles - ax.set_title(f'Sim: Using Morlet wavelet, n_cycles = {n_cycles}') -plt.tight_layout() + power = epochs.compute_tfr( + method="morlet", freqs=freqs, n_cycles=n_cycles, return_itc=False, average=True + ) + power.plot( + [0], + baseline=(0.0, 0.1), + mode="mean", + vlim=(vmin, vmax), + axes=ax, + show=False, + colorbar=False, + ) + n_cycles = "scaled by freqs" if not isinstance(n_cycles, int) else n_cycles + ax.set_title(f"Sim: Using Morlet wavelet, n_cycles = {n_cycles}") # %% # Narrow-bandpass Filter and Hilbert Transform @@ -154,11 +180,12 @@ # important so that you isolate only one oscillation of interest, generally # the width of this filter is recommended to be about 2 Hz. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) -bandwidths = [1., 2., 4.] +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") +bandwidths = [1.0, 2.0, 4.0] for bandwidth, ax in zip(bandwidths, axs): - data = np.zeros((len(ch_names), freqs.size, epochs.times.size), - dtype=complex) + data = np.zeros( + (len(epochs), len(ch_names), freqs.size, epochs.times.size), dtype=complex + ) for idx, freq in enumerate(freqs): # Filter raw data and re-epoch to avoid the filter being longer than # the epoch data for low frequencies and short epochs, such as here. @@ -168,25 +195,33 @@ # these are all very similar because the filters are almost the same. # In practice, using the default is usually a wise choice. raw_filter.filter( - l_freq=freq - bandwidth / 2, h_freq=freq + bandwidth / 2, + l_freq=freq - bandwidth / 2, + h_freq=freq + bandwidth / 2, # no negative values for large bandwidth and low freq l_trans_bandwidth=min([4 * bandwidth, freq - bandwidth]), - h_trans_bandwidth=4 * bandwidth) + h_trans_bandwidth=4 * bandwidth, + ) raw_filter.apply_hilbert() - epochs_hilb = Epochs(raw_filter, events, tmin=0, tmax=n_times / sfreq, - baseline=(0, 0.1)) - tfr_data = epochs_hilb.get_data() - tfr_data = tfr_data * tfr_data.conj() # compute power - tfr_data = np.mean(tfr_data, axis=0) # average over epochs - data[:, idx] = tfr_data - power = AverageTFR(info, data, epochs.times, freqs, nave=n_epochs) - power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-0.1, vmax=0.1, - axes=ax, show=False, colorbar=False) - n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles - ax.set_title('Sim: Using narrow bandpass filter Hilbert,\n' - f'bandwidth = {bandwidth}, ' - f'transition bandwidth = {4 * bandwidth}') -plt.tight_layout() + epochs_hilb = Epochs( + raw_filter, events, tmin=0, tmax=n_times / sfreq, baseline=(0, 0.1) + ) + data[:, :, idx] = epochs_hilb.get_data() + power = EpochsTFRArray(epochs.info, data, epochs.times, freqs, method="hilbert") + power.average().plot( + [0], + baseline=(0.0, 0.1), + mode="mean", + vlim=(0, 0.1), + axes=ax, + show=False, + colorbar=False, + ) + n_cycles = "scaled by freqs" if not isinstance(n_cycles, int) else n_cycles + ax.set_title( + "Sim: Using narrow bandpass filter Hilbert,\n" + f"bandwidth = {bandwidth}, " + f"transition bandwidth = {4 * bandwidth}" + ) # %% # Calculating a TFR without averaging over epochs @@ -196,34 +231,48 @@ # We can do this by using ``average=False``. In this case, an instance of # :class:`mne.time_frequency.EpochsTFR` is returned. -n_cycles = freqs / 2. -power = tfr_morlet(epochs, freqs=freqs, - n_cycles=n_cycles, return_itc=False, average=False) +n_cycles = freqs / 2.0 +power = epochs.compute_tfr( + method="morlet", freqs=freqs, n_cycles=n_cycles, return_itc=False, average=False +) print(type(power)) avgpower = power.average() -avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax, - title='Using Morlet wavelets and EpochsTFR', show=False) +avgpower.plot( + [0], + baseline=(0.0, 0.1), + mode="mean", + vlim=(vmin, vmax), + title="Using Morlet wavelets and EpochsTFR", + show=False, +) # %% # Operating on arrays # ------------------- # -# MNE also has versions of the functions above which operate on numpy arrays -# instead of MNE objects. They expect inputs of the shape -# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array -# of shape ``(n_epochs, n_channels, n_freqs, n_times)``. - -power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'], - freqs=freqs, n_cycles=n_cycles, - output='avg_power') -# Baseline the output -rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False) -fig, ax = plt.subplots() -x, y = centers_to_edges(epochs.times * 1000, freqs) -mesh = ax.pcolormesh(x, y, power[0], cmap='RdBu_r', vmin=vmin, vmax=vmax) -ax.set_title('TFR calculated on a numpy array') -ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)') -fig.colorbar(mesh) -plt.tight_layout() - -plt.show() +# MNE-Python also has functions that operate on :class:`NumPy arrays ` +# instead of MNE-Python objects. These are :func:`~mne.time_frequency.tfr_array_morlet` +# and :func:`~mne.time_frequency.tfr_array_multitaper`. They expect inputs of the shape +# ``(n_epochs, n_channels, n_times)`` and return an array of shape +# ``(n_epochs, n_channels, n_freqs, n_times)`` (or optionally, can collapse the epochs +# dimension if you want average power or inter-trial coherence; see ``output`` param). + +power = tfr_array_morlet( + epochs.get_data(), + sfreq=epochs.info["sfreq"], + freqs=freqs, + n_cycles=n_cycles, + output="avg_power", + zero_mean=False, +) +# Put it into a TFR container for easy plotting +tfr = AverageTFRArray( + info=epochs.info, data=power, times=epochs.times, freqs=freqs, nave=len(epochs) +) +tfr.plot( + baseline=(0.0, 0.1), + picks=[0], + mode="mean", + vlim=(vmin, vmax), + title="TFR calculated on a NumPy array", +) diff --git a/examples/visualization/3d_to_2d.py b/examples/visualization/3d_to_2d.py index ebe592e9dd4..47b223e8396 100644 --- a/examples/visualization/3d_to_2d.py +++ b/examples/visualization/3d_to_2d.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-electrode-pos-2d: @@ -21,26 +20,28 @@ # Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -from os.path import dirname -from pathlib import Path import numpy as np from matplotlib import pyplot as plt import mne from mne.io.fiff.raw import read_raw_fif -from mne.viz import ClickableImage # noqa: F401 -from mne.viz import plot_alignment, set_3d_view, snapshot_brain_montage +from mne.viz import ( + ClickableImage, # noqa: F401 + plot_alignment, + set_3d_view, + snapshot_brain_montage, +) misc_path = mne.datasets.misc.data_path() -subjects_dir = misc_path / 'ecog' -ecog_data_fname = subjects_dir / 'sample_ecog_ieeg.fif' +subjects_dir = misc_path / "ecog" +ecog_data_fname = subjects_dir / "sample_ecog_ieeg.fif" # We've already clicked and exported -layout_path = Path(dirname(mne.__file__)) / 'data' / 'image' -layout_name = 'custom_layout.lout' +layout_name = subjects_dir / "custom_layout.lout" # %% # Load data @@ -50,14 +51,14 @@ # a 2D snapshot. raw = read_raw_fif(ecog_data_fname) -raw.pick_channels([f'G{i}' for i in range(1, 257)]) # pick just one grid +raw.pick([f"G{i}" for i in range(1, 257)]) # pick just one grid # Since we loaded in the ecog data from FIF, the coordinates # are in 'head' space, but we actually want them in 'mri' space. # So we will apply the head->mri transform that was used when # generating the dataset (the estimated head->mri transform). montage = raw.get_montage() -trans = mne.coreg.estimate_head_mri_t('sample_ecog', subjects_dir) +trans = mne.coreg.estimate_head_mri_t("sample_ecog", subjects_dir) montage.apply_trans(trans) # %% @@ -69,8 +70,13 @@ # with the electrode positions on that image. We use this in conjunction with # :func:`mne.viz.plot_alignment`, which visualizes electrode positions. -fig = plot_alignment(raw.info, trans=trans, subject='sample_ecog', - subjects_dir=subjects_dir, surfaces=dict(pial=0.9)) +fig = plot_alignment( + raw.info, + trans=trans, + subject="sample_ecog", + subjects_dir=subjects_dir, + surfaces=dict(pial=0.9), +) set_3d_view(figure=fig, azimuth=20, elevation=80) xy, im = snapshot_brain_montage(fig, montage) @@ -85,9 +91,9 @@ # This allows us to use matplotlib to create arbitrary 2d scatterplots fig2, ax = plt.subplots(figsize=(10, 10)) ax.imshow(im) -cmap = ax.scatter(*xy_pts.T, c=beta_power, s=100, cmap='coolwarm') +cmap = ax.scatter(*xy_pts.T, c=beta_power, s=100, cmap="coolwarm") cbar = fig2.colorbar(cmap) -cbar.ax.set_ylabel('Beta Power') +cbar.ax.set_ylabel("Beta Power") ax.set_axis_off() # fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage @@ -119,14 +125,13 @@ # # Generate a layout from our clicks and normalize by the image # print('Generating and saving layout...') # lt = click.to_layout() -# lt.save(layout_path / layout_name) # save if we want +# lt.save(layout_name) # save if we want # # We've already got the layout, load it -lt = mne.channels.read_layout(layout_path / layout_name, scale=False) +lt = mne.channels.read_layout(layout_name, scale=False) x = lt.pos[:, 0] * float(im.shape[1]) y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.imshow(im) -ax.scatter(x, y, s=80, color='r') -fig.tight_layout() +ax.scatter(x, y, s=80, color="r") ax.set_axis_off() diff --git a/examples/visualization/brain.py b/examples/visualization/brain.py index 71e53e1f8c8..004842f0052 100644 --- a/examples/visualization/brain.py +++ b/examples/visualization/brain.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-brain: @@ -11,6 +10,7 @@ # Author: Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% # Load data @@ -30,8 +30,8 @@ print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -sample_dir = data_path / 'MEG' / 'sample' +subjects_dir = data_path / "subjects" +sample_dir = data_path / "MEG" / "sample" # %% # Add source information @@ -39,16 +39,21 @@ # # Plot source information. -brain_kwargs = dict(alpha=0.1, background='white', cortex='low_contrast') -brain = mne.viz.Brain('sample', subjects_dir=subjects_dir, **brain_kwargs) +brain_kwargs = dict(alpha=0.1, background="white", cortex="low_contrast") +brain = mne.viz.Brain("sample", subjects_dir=subjects_dir, **brain_kwargs) -stc = mne.read_source_estimate(sample_dir / 'sample_audvis-meg') +stc = mne.read_source_estimate(sample_dir / "sample_audvis-meg") stc.crop(0.09, 0.1) -kwargs = dict(fmin=stc.data.min(), fmax=stc.data.max(), alpha=0.25, - smoothing_steps='nearest', time=stc.times) -brain.add_data(stc.lh_data, hemi='lh', vertices=stc.lh_vertno, **kwargs) -brain.add_data(stc.rh_data, hemi='rh', vertices=stc.rh_vertno, **kwargs) +kwargs = dict( + fmin=stc.data.min(), + fmax=stc.data.max(), + alpha=0.25, + smoothing_steps="nearest", + time=stc.times, +) +brain.add_data(stc.lh_data, hemi="lh", vertices=stc.lh_vertno, **kwargs) +brain.add_data(stc.rh_data, hemi="rh", vertices=stc.rh_vertno, **kwargs) # %% # Modify the view of the brain @@ -56,7 +61,7 @@ # # You can adjust the view of the brain using ``show_view`` method. -brain = mne.viz.Brain('sample', subjects_dir=subjects_dir, **brain_kwargs) +brain = mne.viz.Brain("sample", subjects_dir=subjects_dir, **brain_kwargs) brain.show_view(azimuth=190, elevation=70, distance=350, focalpoint=(0, 0, 20)) # %% @@ -74,8 +79,8 @@ # .. note:: The MNE sample dataset contains only a subselection of the # Freesurfer labels created during the ``recon-all``. -brain = mne.viz.Brain('sample', subjects_dir=subjects_dir, **brain_kwargs) -brain.add_label('BA44', hemi='lh', color='green', borders=True) +brain = mne.viz.Brain("sample", subjects_dir=subjects_dir, **brain_kwargs) +brain.add_label("BA44", hemi="lh", color="green", borders=True) brain.show_view(azimuth=190, elevation=70, distance=350, focalpoint=(0, 0, 20)) # %% @@ -84,7 +89,7 @@ # # Add a head image using the ``add_head`` method. -brain = mne.viz.Brain('sample', subjects_dir=subjects_dir, **brain_kwargs) +brain = mne.viz.Brain("sample", subjects_dir=subjects_dir, **brain_kwargs) brain.add_head(alpha=0.5) # %% @@ -94,9 +99,9 @@ # To put into context the data that generated the source time course, # the sensor positions can be displayed as well. -brain = mne.viz.Brain('sample', subjects_dir=subjects_dir, **brain_kwargs) -evoked = mne.read_evokeds(sample_dir / 'sample_audvis-ave.fif')[0] -trans = mne.read_trans(sample_dir / 'sample_audvis_raw-trans.fif') +brain = mne.viz.Brain("sample", subjects_dir=subjects_dir, **brain_kwargs) +evoked = mne.read_evokeds(sample_dir / "sample_audvis-ave.fif")[0] +trans = mne.read_trans(sample_dir / "sample_audvis_raw-trans.fif") brain.add_sensors(evoked.info, trans) brain.show_view(distance=500) # move back to show sensors @@ -107,9 +112,9 @@ # Dipole modeling as in :ref:`tut-dipole-orientations` can be plotted on the # brain as well. -brain = mne.viz.Brain('sample', subjects_dir=subjects_dir, **brain_kwargs) -dip = mne.read_dipole(sample_dir / 'sample_audvis_set1.dip') -cmap = plt.colormaps['YlOrRd'] +brain = mne.viz.Brain("sample", subjects_dir=subjects_dir, **brain_kwargs) +dip = mne.read_dipole(sample_dir / "sample_audvis_set1.dip") +cmap = plt.colormaps["YlOrRd"] colors = [cmap(gof / dip.gof.max()) for gof in dip.gof] brain.add_dipole(dip, trans, colors=colors, scales=list(dip.amplitude * 1e8)) brain.show_view(azimuth=-20, elevation=60, distance=300) @@ -124,8 +129,8 @@ fig, ax = plt.subplots() ax.imshow(img) -ax.axis('off') +ax.axis("off") cax = fig.add_axes([0.9, 0.1, 0.05, 0.8]) norm = Normalize(vmin=0, vmax=dip.gof.max()) fig.colorbar(ScalarMappable(norm=norm, cmap=cmap), cax=cax) -fig.suptitle('Dipole Fits Scaled by Amplitude and Colored by GOF') +fig.suptitle("Dipole Fits Scaled by Amplitude and Colored by GOF") diff --git a/examples/visualization/channel_epochs_image.py b/examples/visualization/channel_epochs_image.py index ecbe0a789dc..9281270a8c1 100644 --- a/examples/visualization/channel_epochs_image.py +++ b/examples/visualization/channel_epochs_image.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-epochs-image: @@ -18,11 +17,12 @@ # Authors: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne from mne import io @@ -34,9 +34,9 @@ # %% # Set parameters -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" event_id, tmin, tmax = 1, -0.2, 0.4 # Setup for reading the raw data @@ -44,12 +44,21 @@ events = mne.read_events(event_fname) # Set up pick list: EEG + MEG - bad channels (modify to your needs) -raw.info['bads'] = ['MEG 2443', 'EEG 053'] +raw.info["bads"] = ["MEG 2443", "EEG 053"] # Create epochs, here for gradiometers + EOG only for simplicity -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, - picks=('grad', 'eog'), baseline=(None, 0), preload=True, - reject=dict(grad=4000e-13, eog=150e-6)) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + proj=True, + picks=("grad", "eog"), + baseline=(None, 0), + preload=True, + reject=dict(grad=4000e-13, eog=150e-6), +) # %% # Show event-related fields images @@ -57,26 +66,36 @@ # and order with spectral reordering # If you don't have scikit-learn installed set order_func to None from sklearn.manifold import spectral_embedding # noqa -from sklearn.metrics.pairwise import rbf_kernel # noqa +from sklearn.metrics.pairwise import rbf_kernel # noqa def order_func(times, data): this_data = data[:, (times > 0.0) & (times < 0.350)] - this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis] - return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.), - n_components=1, random_state=0).ravel()) + this_data /= np.sqrt(np.sum(this_data**2, axis=1))[:, np.newaxis] + return np.argsort( + spectral_embedding( + rbf_kernel(this_data, gamma=1.0), n_components=1, random_state=0 + ).ravel() + ) good_pick = 97 # channel with a clear evoked response bad_pick = 98 # channel with no evoked response # We'll also plot a sample time onset for each trial -plt_times = np.linspace(0, .2, len(epochs)) - -plt.close('all') -mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5, - order=order_func, vmin=-250, vmax=250, - overlay_times=plt_times, show=True) +plt_times = np.linspace(0, 0.2, len(epochs)) + +plt.close("all") +mne.viz.plot_epochs_image( + epochs, + [good_pick, bad_pick], + sigma=0.5, + order=order_func, + vmin=-250, + vmax=250, + overlay_times=plt_times, + show=True, +) # %% # References diff --git a/examples/visualization/eeg_on_scalp.py b/examples/visualization/eeg_on_scalp.py index 96fff94a523..f3201bd39f1 100644 --- a/examples/visualization/eeg_on_scalp.py +++ b/examples/visualization/eeg_on_scalp.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-eeg-on-scalp: @@ -6,11 +5,12 @@ Plotting EEG sensors on the scalp ================================= -In this example, digitized EEG sensor locations are shown on the scalp. +In this example, digitized EEG sensor locations are shown on the scalp surface. """ # Author: Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -20,15 +20,22 @@ print(__doc__) data_path = mne.datasets.sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -trans = mne.read_trans(meg_path / 'sample_audvis_raw-trans.fif') -raw = mne.io.read_raw_fif(meg_path / 'sample_audvis_raw.fif') +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +trans = mne.read_trans(meg_path / "sample_audvis_raw-trans.fif") +raw = mne.io.read_raw_fif(meg_path / "sample_audvis_raw.fif") # Plot electrode locations on scalp -fig = plot_alignment(raw.info, trans, subject='sample', dig=False, - eeg=['original', 'projected'], meg=[], - coord_frame='head', subjects_dir=subjects_dir) +fig = plot_alignment( + raw.info, + trans, + subject="sample", + dig=False, + eeg=["original", "projected"], + meg=[], + coord_frame="head", + subjects_dir=subjects_dir, +) # Set viewing angle set_3d_view(figure=fig, azimuth=135, elevation=80) diff --git a/examples/visualization/evoked_arrowmap.py b/examples/visualization/evoked_arrowmap.py index df0c1123481..bdad8d4ad79 100644 --- a/examples/visualization/evoked_arrowmap.py +++ b/examples/visualization/evoked_arrowmap.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-arrowmap: @@ -21,26 +20,28 @@ # Authors: Sheraz Khan # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import numpy as np + import mne +from mne import read_evokeds from mne.datasets import sample from mne.datasets.brainstorm import bst_raw -from mne import read_evokeds from mne.viz import plot_arrowmap print(__doc__) path = sample.data_path() -fname = path / 'MEG' / 'sample' / 'sample_audvis-ave.fif' +fname = path / "MEG" / "sample" / "sample_audvis-ave.fif" # load evoked data -condition = 'Left Auditory' +condition = "Left Auditory" evoked = read_evokeds(fname, condition=condition, baseline=(None, 0)) -evoked_mag = evoked.copy().pick_types(meg='mag') -evoked_grad = evoked.copy().pick_types(meg='grad') +evoked_mag = evoked.copy().pick(picks="mag", exclude="bads") +evoked_grad = evoked.copy().pick(picks="grad", exclude="bads") # %% # Plot magnetometer data as an arrowmap along with the topoplot at the time @@ -58,8 +59,11 @@ # %% # Plot gradiometer data as an arrowmap along with the topoplot at the time # of the maximum sensor space activity: -plot_arrowmap(evoked_grad.data[:, max_time_idx], info_from=evoked_grad.info, - info_to=evoked_mag.info) +plot_arrowmap( + evoked_grad.data[:, max_time_idx], + info_from=evoked_grad.info, + info_to=evoked_mag.info, +) # %% # Since Vectorview 102 system perform sparse spatial sampling of the magnetic @@ -69,10 +73,14 @@ # Plot gradiometer data as an arrowmap along with the topoplot at the time # of the maximum sensor space activity: path = bst_raw.data_path() -raw_fname = (path / 'MEG' / 'bst_raw' / - 'subj001_somatosensory_20111109_01_AUX-f.ds') +raw_fname = path / "MEG" / "bst_raw" / "subj001_somatosensory_20111109_01_AUX-f.ds" raw_ctf = mne.io.read_raw_ctf(raw_fname) raw_ctf_info = mne.pick_info( - raw_ctf.info, mne.pick_types(raw_ctf.info, meg=True, ref_meg=False)) -plot_arrowmap(evoked_grad.data[:, max_time_idx], info_from=evoked_grad.info, - info_to=raw_ctf_info, scale=6e-10) + raw_ctf.info, mne.pick_types(raw_ctf.info, meg=True, ref_meg=False) +) +plot_arrowmap( + evoked_grad.data[:, max_time_idx], + info_from=evoked_grad.info, + info_to=raw_ctf_info, + scale=6e-10, +) diff --git a/examples/visualization/evoked_topomap.py b/examples/visualization/evoked_topomap.py index 7b576ab2376..53b7a60dbba 100644 --- a/examples/visualization/evoked_topomap.py +++ b/examples/visualization/evoked_topomap.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-evoked-topomap: @@ -6,8 +5,8 @@ Plotting topographic maps of evoked data ======================================== -Load evoked data and plot topomaps for selected time points using multiple -additional options. +Load evoked data and plot topomaps for selected time points using +multiple additional options. """ # Authors: Christian Brodbeck # Tal Linzen @@ -17,24 +16,26 @@ # Alex Rockhill # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% + # sphinx_gallery_thumbnail_number = 5 -import numpy as np import matplotlib.pyplot as plt +import numpy as np -from mne.datasets import sample from mne import read_evokeds +from mne.datasets import sample print(__doc__) path = sample.data_path() -fname = path / 'MEG' / 'sample' / 'sample_audvis-ave.fif' +fname = path / "MEG" / "sample" / "sample_audvis-ave.fif" # load evoked corresponding to a specific condition # from the fif file and subtract baseline -condition = 'Left Auditory' +condition = "Left Auditory" evoked = read_evokeds(fname, condition=condition, baseline=(None, 0)) # %% @@ -46,28 +47,28 @@ # topographies will be shown. We select timepoints from 50 to 150 ms with a # step of 20ms and plot magnetometer data: times = np.arange(0.05, 0.151, 0.02) -evoked.plot_topomap(times, ch_type='mag') +evoked.plot_topomap(times, ch_type="mag") # %% # If times is set to None at most 10 regularly spaced topographies will be # shown: -evoked.plot_topomap(ch_type='mag') +evoked.plot_topomap(ch_type="mag") # %% # We can use ``nrows`` and ``ncols`` parameter to create multiline plots # with more timepoints. all_times = np.arange(-0.2, 0.5, 0.03) -evoked.plot_topomap(all_times, ch_type='mag', ncols=8, nrows='auto') +evoked.plot_topomap(all_times, ch_type="mag", ncols=8, nrows="auto") # %% # Instead of showing topographies at specific time points we can compute # averages of 50 ms bins centered on these time points to reduce the noise in # the topographies: -evoked.plot_topomap(times, ch_type='mag', average=0.05) +evoked.plot_topomap(times, ch_type="mag", average=0.05) # %% # We can plot gradiometer data (plots the RMS for each pair of gradiometers) -evoked.plot_topomap(times, ch_type='grad') +evoked.plot_topomap(times, ch_type="grad") # %% # Additional :func:`~mne.viz.plot_topomap` options @@ -80,8 +81,7 @@ # * ``res`` - to control the resolution of the topographies (lower resolution # means faster plotting) # * ``contours`` to define how many contour lines should be plotted -evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32, - contours=4) +evoked.plot_topomap(times, ch_type="mag", cmap="Spectral_r", res=32, contours=4) # %% # If you look at the edges of the head circle of a single topomap you'll see @@ -95,18 +95,24 @@ # The default value ``extrapolate='auto'`` will use ``'local'`` for MEG sensors # and ``'head'`` otherwise. Here we show each option: -extrapolations = ['local', 'head', 'box'] -fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3) +extrapolations = ["local", "head", "box"] +fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3, layout="constrained") # Here we look at EEG channels, and use a custom head sphere to get all the # sensors to be well within the drawn head surface -for axes_row, ch_type in zip(axes, ('mag', 'eeg')): +for axes_row, ch_type in zip(axes, ("mag", "eeg")): for ax, extr in zip(axes_row, extrapolations): - evoked.plot_topomap(0.1, ch_type=ch_type, size=2, extrapolate=extr, - axes=ax, show=False, colorbar=False, - sphere=(0., 0., 0., 0.09)) - ax.set_title('%s %s' % (ch_type.upper(), extr), fontsize=14) -fig.tight_layout() + evoked.plot_topomap( + 0.1, + ch_type=ch_type, + size=2, + extrapolate=extr, + axes=ax, + show=False, + colorbar=False, + sphere=(0.0, 0.0, 0.0, 0.09), + ) + ax.set_title(f"{ch_type.upper()} {extr}", fontsize=14) # %% # More advanced usage @@ -115,10 +121,10 @@ # Now we plot magnetometer data as topomap at a single time point: 100 ms # post-stimulus, add channel labels, title and adjust plot margins: -fig = evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False, - size=6, res=128) -fig.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88) -fig.suptitle('Auditory response') +fig = evoked.plot_topomap( + 0.1, ch_type="mag", show_names=True, colorbar=False, size=6, res=128 +) +fig.suptitle("Auditory response") # %% # We can also highlight specific channels by adding a mask, to e.g. mark @@ -129,8 +135,8 @@ # Select times and plot times = (0.09, 0.1, 0.11) -mask_params = dict(markersize=10, markerfacecolor='y') -evoked.plot_topomap(times, ch_type='mag', mask=mask, mask_params=mask_params) +mask_params = dict(markersize=10, markerfacecolor="y") +evoked.plot_topomap(times, ch_type="mag", mask=mask, mask_params=mask_params) # %% # Or by manually picking the channels to highlight at different times: @@ -138,16 +144,17 @@ times = (0.09, 0.1, 0.11) _times = ((np.abs(evoked.times - t)).argmin() for t in times) significant_channels = [ - ('MEG 0231', 'MEG 1611', 'MEG 1621', 'MEG 1631', 'MEG 1811'), - ('MEG 2411', 'MEG 2421'), - ('MEG 1621')] -_channels = [np.in1d(evoked.ch_names, ch) for ch in significant_channels] + ("MEG 0231", "MEG 1611", "MEG 1621", "MEG 1631", "MEG 1811"), + ("MEG 2411", "MEG 2421"), + ("MEG 1621"), +] +_channels = [np.isin(evoked.ch_names, ch) for ch in significant_channels] -mask = np.zeros(evoked.data.shape, dtype='bool') +mask = np.zeros(evoked.data.shape, dtype="bool") for _chs, _time in zip(_channels, _times): mask[_chs, _time] = True -evoked.plot_topomap(times, ch_type='mag', mask=mask, mask_params=mask_params) +evoked.plot_topomap(times, ch_type="mag", mask=mask, mask_params=mask_params) # %% # Interpolating topomaps @@ -163,18 +170,18 @@ # The default cubic interpolation is the smoothest and is great for # publications. -evoked.plot_topomap(times, ch_type='eeg', image_interp='cubic') +evoked.plot_topomap(times, ch_type="eeg", image_interp="cubic") # %% # The linear interpolation might be helpful in some cases. -evoked.plot_topomap(times, ch_type='eeg', image_interp='linear') +evoked.plot_topomap(times, ch_type="eeg", image_interp="linear") # %% # The nearest (Voronoi, no interpolation) interpolation is especially helpful # for debugging and seeing the values assigned to the topomap unaltered. -evoked.plot_topomap(times, ch_type='eeg', image_interp='nearest', contours=0) +evoked.plot_topomap(times, ch_type="eeg", image_interp="nearest", contours=0) # %% # Animating the topomap @@ -185,5 +192,4 @@ # sphinx_gallery_thumbnail_number = 9 times = np.arange(0.05, 0.151, 0.01) -fig, anim = evoked.animate_topomap( - times=times, ch_type='mag', frame_rate=2, blit=False) +fig, anim = evoked.animate_topomap(times=times, ch_type="mag", frame_rate=2, blit=False) diff --git a/examples/visualization/evoked_whitening.py b/examples/visualization/evoked_whitening.py index 73d88013de4..4bcb4bc8c04 100644 --- a/examples/visualization/evoked_whitening.py +++ b/examples/visualization/evoked_whitening.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-evoked-whitening: @@ -17,18 +16,19 @@ ---------- .. footbibliography:: """ + # Authors: Alexandre Gramfort # Denis A. Engemann # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne - from mne import io -from mne.datasets import sample from mne.cov import compute_covariance +from mne.datasets import sample print(__doc__) @@ -36,21 +36,30 @@ # Set parameters data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" raw = io.read_raw_fif(raw_fname, preload=True) -raw.filter(1, 40, fir_design='firwin') -raw.info['bads'] += ['MEG 2443'] # bads + 1 more +raw.filter(1, 40, fir_design="firwin") +raw.info["bads"] += ["MEG 2443"] # bads + 1 more events = mne.read_events(event_fname) # let's look at rare events, button presses event_id, tmin, tmax = 2, -0.2, 0.5 reject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6) -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'), - baseline=None, reject=reject, preload=True) +epochs = mne.Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=("meg", "eeg"), + baseline=None, + reject=reject, + preload=True, +) # Uncomment next line to use fewer samples and study regularization effects # epochs = epochs[:20] # For your data, use as many samples as you can! @@ -58,24 +67,32 @@ # %% # Compute covariance using automated regularization method_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01)) -noise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto', - return_estimators=True, n_jobs=None, - projs=None, rank=None, - method_params=method_params, verbose=True) +noise_covs = compute_covariance( + epochs, + tmin=None, + tmax=0, + method="auto", + return_estimators=True, + n_jobs=None, + projs=None, + rank=None, + method_params=method_params, + verbose=True, +) # With "return_estimator=True" all estimated covariances sorted # by log-likelihood are returned. -print('Covariance estimates sorted from best to worst') +print("Covariance estimates sorted from best to worst") for c in noise_covs: - print("%s : %s" % (c['method'], c['loglik'])) + print(f"{c['method']} : {c['loglik']}") # %% # Show the evoked data: evoked = epochs.average() -evoked.plot(time_unit='s') # plot evoked response +evoked.plot(time_unit="s") # plot evoked response # %% # We can then show whitening for our various noise covariance estimates. @@ -86,4 +103,4 @@ # # For the Global field power we expect a value of 1. -evoked.plot_white(noise_covs, time_unit='s') +evoked.plot_white(noise_covs, time_unit="s") diff --git a/examples/visualization/eyetracking_plot_heatmap.py b/examples/visualization/eyetracking_plot_heatmap.py new file mode 100644 index 00000000000..07983685b5e --- /dev/null +++ b/examples/visualization/eyetracking_plot_heatmap.py @@ -0,0 +1,112 @@ +""" +.. _tut-eyetrack-heatmap: + +============================================= +Plotting eye-tracking heatmaps in MNE-Python +============================================= + +This tutorial covers plotting eye-tracking position data as a heatmap. + +.. seealso:: + + :ref:`tut-importing-eyetracking-data` + :ref:`tut-eyetrack` + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# %% +# Data loading +# ------------ +# +# As usual we start by importing the modules we need and loading some +# :ref:`example data `: eye-tracking data recorded from SR research's +# ``'.asc'`` file format. + +import matplotlib.pyplot as plt + +import mne +from mne.viz.eyetracking import plot_gaze + +task_fpath = mne.datasets.eyelink.data_path() / "freeviewing" +et_fpath = task_fpath / "sub-01_task-freeview_eyetrack.asc" +stim_fpath = task_fpath / "stim" / "naturalistic.png" + +raw = mne.io.read_raw_eyelink(et_fpath) +calibration = mne.preprocessing.eyetracking.read_eyelink_calibration( + et_fpath, + screen_resolution=(1920, 1080), + screen_size=(0.53, 0.3), + screen_distance=0.9, +)[0] + +# %% +# Process and epoch the data +# -------------------------- +# +# First we will interpolate missing data during blinks and epoch the data. + +mne.preprocessing.eyetracking.interpolate_blinks(raw, interpolate_gaze=True) +raw.annotations.rename({"dvns": "natural"}) # more intuitive + +epochs = mne.Epochs(raw, event_id=["natural"], tmin=0, tmax=20, baseline=None) + + +# %% +# Plot a heatmap of the eye-tracking data +# --------------------------------------- +# +# To make a heatmap of the eye-tracking data, we can use the function +# :func:`~mne.viz.eyetracking.plot_gaze`. We will need to define the dimensions of our +# canvas; for this file, the eye position data are reported in pixels, so we'll use the +# screen resolution of the participant screen (1920x1080) as the width and height. We +# can also use the sigma parameter to smooth the plot. + +cmap = plt.get_cmap("viridis") +plot_gaze(epochs["natural"], calibration=calibration, cmap=cmap, sigma=50) + +# %% +# .. note:: The (0, 0) pixel coordinates are at the top-left of the +# trackable area of the screen for many eye trackers. + +# %% +# Overlaying plots with images +# ---------------------------- +# +# We can use matplotlib to plot gaze heatmaps on top of stimuli images. We'll +# customize a :class:`~matplotlib.colors.Colormap` to make some values of the heatmap +# completely transparent. We'll then use the ``vlim`` parameter to force the heatmap to +# start at a value greater than the darkest value in our previous heatmap, which will +# make the darkest colors of the heatmap transparent. + +cmap.set_under("k", alpha=0) # make the lowest values transparent +ax = plt.subplot() +ax.imshow(plt.imread(stim_fpath)) +plot_gaze( + epochs["natural"], + calibration=calibration, + vlim=(0.0003, None), + sigma=50, + cmap=cmap, + axes=ax, +) + +# %% +# Displaying the heatmap in units of visual angle +# ----------------------------------------------- +# +# In scientific publications it is common to report gaze data as the visual angle +# from the participants eye to the screen. We can convert the units of our gaze data to +# radians of visual angle before plotting the heatmap: + +# %% +epochs.load_data() +mne.preprocessing.eyetracking.convert_units(epochs, calibration, to="radians") +plot_gaze( + epochs["natural"], + calibration=calibration, + sigma=50, +) diff --git a/examples/visualization/meg_sensors.py b/examples/visualization/meg_sensors.py index 28cea87b215..182a8ee8940 100644 --- a/examples/visualization/meg_sensors.py +++ b/examples/visualization/meg_sensors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-plot-meg-sensors: @@ -11,6 +10,7 @@ # Author: Eric Larson # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% @@ -18,8 +18,13 @@ import mne from mne.datasets import sample, spm_face, testing -from mne.io import (read_raw_artemis123, read_raw_bti, read_raw_ctf, - read_raw_fif, read_raw_kit) +from mne.io import ( + read_raw_artemis123, + read_raw_bti, + read_raw_ctf, + read_raw_fif, + read_raw_kit, +) from mne.viz import plot_alignment, set_3d_title print(__doc__) @@ -30,48 +35,52 @@ # Neuromag # -------- -kwargs = dict(eeg=False, coord_frame='meg', show_axes=True, verbose=True) +kwargs = dict(eeg=False, coord_frame="meg", show_axes=True, verbose=True) -raw = read_raw_fif( - sample.data_path() / 'MEG' / 'sample' / 'sample_audvis_raw.fif') -fig = plot_alignment(raw.info, meg=('helmet', 'sensors'), **kwargs) -set_3d_title(figure=fig, title='Neuromag') +raw = read_raw_fif(sample.data_path() / "MEG" / "sample" / "sample_audvis_raw.fif") +fig = plot_alignment(raw.info, meg=("helmet", "sensors"), **kwargs) +set_3d_title(figure=fig, title="Neuromag") # %% # CTF # --- raw = read_raw_ctf( - spm_face.data_path() / 'MEG' / 'spm' / 'SPM_CTF_MEG_example_faces1_3D.ds') -fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs) -set_3d_title(figure=fig, title='CTF 275') + spm_face.data_path() / "MEG" / "spm" / "SPM_CTF_MEG_example_faces1_3D.ds" +) +fig = plot_alignment(raw.info, meg=("helmet", "sensors", "ref"), **kwargs) +set_3d_title(figure=fig, title="CTF 275") # %% # BTi # --- -bti_path = root_path / 'io' / 'bti' / 'tests' / 'data' -raw = read_raw_bti(bti_path / 'test_pdf_linux', - bti_path / 'test_config_linux', - bti_path / 'test_hs_linux') -fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs) -set_3d_title(figure=fig, title='Magnes 3600wh') +bti_path = root_path / "io" / "bti" / "tests" / "data" +raw = read_raw_bti( + bti_path / "test_pdf_linux", + bti_path / "test_config_linux", + bti_path / "test_hs_linux", +) +fig = plot_alignment(raw.info, meg=("helmet", "sensors", "ref"), **kwargs) +set_3d_title(figure=fig, title="Magnes 3600wh") # %% # KIT # --- -kit_path = root_path / 'io' / 'kit' / 'tests' / 'data' -raw = read_raw_kit(kit_path / 'test.sqd') -fig = plot_alignment(raw.info, meg=('helmet', 'sensors'), **kwargs) -set_3d_title(figure=fig, title='KIT') +kit_path = root_path / "io" / "kit" / "tests" / "data" +raw = read_raw_kit(kit_path / "test.sqd") +fig = plot_alignment(raw.info, meg=("helmet", "sensors"), **kwargs) +set_3d_title(figure=fig, title="KIT") # %% # Artemis123 # ---------- raw = read_raw_artemis123( - testing.data_path() / 'ARTEMIS123' / - 'Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin') -fig = plot_alignment(raw.info, meg=('helmet', 'sensors', 'ref'), **kwargs) -set_3d_title(figure=fig, title='Artemis123') + testing.data_path() + / "ARTEMIS123" + / "Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin" +) +fig = plot_alignment(raw.info, meg=("helmet", "sensors", "ref"), **kwargs) +set_3d_title(figure=fig, title="Artemis123") diff --git a/examples/visualization/mne_helmet.py b/examples/visualization/mne_helmet.py index 9653a705981..ceb149d77ba 100644 --- a/examples/visualization/mne_helmet.py +++ b/examples/visualization/mne_helmet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-mne-helmet: @@ -9,28 +8,62 @@ This tutorial shows how to make the MNE helmet + brain image. """ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + # %% import mne sample_path = mne.datasets.sample.data_path() -subjects_dir = sample_path / 'subjects' -fname_evoked = sample_path / 'MEG' / 'sample' / 'sample_audvis-ave.fif' -fname_inv = (sample_path / 'MEG' / 'sample' / - 'sample_audvis-meg-oct-6-meg-inv.fif') -fname_trans = sample_path / 'MEG' / 'sample' / 'sample_audvis_raw-trans.fif' +subjects_dir = sample_path / "subjects" +fname_evoked = sample_path / "MEG" / "sample" / "sample_audvis-ave.fif" +fname_inv = sample_path / "MEG" / "sample" / "sample_audvis-meg-oct-6-meg-inv.fif" +fname_trans = sample_path / "MEG" / "sample" / "sample_audvis_raw-trans.fif" inv = mne.minimum_norm.read_inverse_operator(fname_inv) -evoked = mne.read_evokeds(fname_evoked, baseline=(None, 0), - proj=True, verbose=False, condition='Left Auditory') -maps = mne.make_field_map(evoked, trans=fname_trans, ch_type='meg', - subject='sample', subjects_dir=subjects_dir) +evoked = mne.read_evokeds( + fname_evoked, + baseline=(None, 0), + proj=True, + verbose=False, + condition="Left Auditory", +) +maps = mne.make_field_map( + evoked, + trans=fname_trans, + ch_type="meg", + subject="sample", + subjects_dir=subjects_dir, + upsampling=2, + origin="auto", +) time = 0.083 -fig = mne.viz.create_3d_figure((256, 256)) +fig = mne.viz.create_3d_figure((512, 512), bgcolor="w", title="MNE helmet") mne.viz.plot_alignment( - evoked.info, subject='sample', subjects_dir=subjects_dir, fig=fig, - trans=fname_trans, meg='sensors', eeg=False, surfaces='pial', - coord_frame='mri') -evoked.plot_field(maps, time=time, fig=fig, time_label=None, vmax=5e-13) + evoked.info, + subject="sample", + subjects_dir=subjects_dir, + fig=fig, + trans=fname_trans, + meg="sensors", + eeg=False, + surfaces="pial", + coord_frame="mri", +) +evoked.plot_field( + maps, + time=time, + fig=fig, + time_label=None, + vmax=5e-13, + time_viewer=False, +) mne.viz.set_3d_view( - fig, azimuth=40, elevation=87, focalpoint=(0., -0.01, 0.04), roll=-25, - distance=0.55) + fig, + azimuth=40, + elevation=87, + focalpoint=(0.0, -0.01, 0.04), + roll=-105, + distance=0.55, +) diff --git a/examples/visualization/montage_sgskip.py b/examples/visualization/montage_sgskip.py index 17bfc686e36..b3d05247855 100644 --- a/examples/visualization/montage_sgskip.py +++ b/examples/visualization/montage_sgskip.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _plot_montage: @@ -12,10 +11,12 @@ # Joan Massich # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import os.path as op + import numpy as np import mne @@ -23,21 +24,23 @@ from mne.datasets import fetch_fsaverage from mne.viz import set_3d_title, set_3d_view - # %% # Check all montages against a sphere for current_montage in get_builtin_montages(): montage = mne.channels.make_standard_montage(current_montage) - info = mne.create_info( - ch_names=montage.ch_names, sfreq=100., ch_types='eeg') + info = mne.create_info(ch_names=montage.ch_names, sfreq=100.0, ch_types="eeg") info.set_montage(montage) - sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info) + sphere = mne.make_sphere_model(r0="auto", head_radius="auto", info=info) fig = mne.viz.plot_alignment( # Plot options - show_axes=True, dig='fiducials', surfaces='head', + show_axes=True, + dig="fiducials", + surfaces="head", trans=mne.Transform("head", "mri", trans=np.eye(4)), # identity - bem=sphere, info=info) + bem=sphere, + info=info, + ) set_3d_view(figure=fig, azimuth=135, elevation=80) set_3d_title(figure=fig, title=current_montage) @@ -50,15 +53,19 @@ for current_montage in get_builtin_montages(): montage = mne.channels.make_standard_montage(current_montage) # Create dummy info - info = mne.create_info( - ch_names=montage.ch_names, sfreq=100., ch_types='eeg') + info = mne.create_info(ch_names=montage.ch_names, sfreq=100.0, ch_types="eeg") info.set_montage(montage) fig = mne.viz.plot_alignment( # Plot options - show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True, - subject='fsaverage', subjects_dir=subjects_dir, info=info, - coord_frame='mri', - trans='fsaverage', # transform from head coords to fsaverage's MRI + show_axes=True, + dig="fiducials", + surfaces="head", + mri_fiducials=True, + subject="fsaverage", + subjects_dir=subjects_dir, + info=info, + coord_frame="mri", + trans="fsaverage", # transform from head coords to fsaverage's MRI ) set_3d_view(figure=fig, azimuth=135, elevation=80) set_3d_title(figure=fig, title=current_montage) diff --git a/examples/visualization/parcellation.py b/examples/visualization/parcellation.py index 63f1dd9d177..d92a849b970 100644 --- a/examples/visualization/parcellation.py +++ b/examples/visualization/parcellation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-parcellation: @@ -23,41 +22,63 @@ # Denis Engemann # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne + Brain = mne.viz.get_brain_class() -subjects_dir = mne.datasets.sample.data_path() / 'subjects' -mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir, - verbose=True) +subjects_dir = mne.datasets.sample.data_path() / "subjects" +mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir, verbose=True) -mne.datasets.fetch_aparc_sub_parcellation(subjects_dir=subjects_dir, - verbose=True) +mne.datasets.fetch_aparc_sub_parcellation(subjects_dir=subjects_dir, verbose=True) labels = mne.read_labels_from_annot( - 'fsaverage', 'HCPMMP1', 'lh', subjects_dir=subjects_dir) - -brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir, - cortex='low_contrast', background='white', size=(800, 600)) -brain.add_annotation('HCPMMP1') -aud_label = [label for label in labels if label.name == 'L_A1_ROI-lh'][0] + "fsaverage", "HCPMMP1", "lh", subjects_dir=subjects_dir +) + +brain = Brain( + "fsaverage", + "lh", + "inflated", + subjects_dir=subjects_dir, + cortex="low_contrast", + background="white", + size=(800, 600), +) +brain.add_annotation("HCPMMP1") +aud_label = [label for label in labels if label.name == "L_A1_ROI-lh"][0] brain.add_label(aud_label, borders=False) # %% # We can also plot a combined set of labels (23 per hemisphere). -brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir, - cortex='low_contrast', background='white', size=(800, 600)) -brain.add_annotation('HCPMMP1_combined') +brain = Brain( + "fsaverage", + "lh", + "inflated", + subjects_dir=subjects_dir, + cortex="low_contrast", + background="white", + size=(800, 600), +) +brain.add_annotation("HCPMMP1_combined") # %% # We can add another custom parcellation -brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir, - cortex='low_contrast', background='white', size=(800, 600)) -brain.add_annotation('aparc_sub') +brain = Brain( + "fsaverage", + "lh", + "inflated", + subjects_dir=subjects_dir, + cortex="low_contrast", + background="white", + size=(800, 600), +) +brain.add_annotation("aparc_sub") # %% # References diff --git a/examples/visualization/publication_figure.py b/examples/visualization/publication_figure.py deleted file mode 100644 index fc9385c385f..00000000000 --- a/examples/visualization/publication_figure.py +++ /dev/null @@ -1,274 +0,0 @@ -# -*- coding: utf-8 -*- -""" -.. _ex-publication-figure: - -=================================== -Make figures more publication ready -=================================== - -In this example, we show several use cases to take MNE plots and -customize them for a more publication-ready look. -""" - -# Authors: Eric Larson -# Daniel McCloy -# Stefan Appelhoff -# -# License: BSD-3-Clause - -# %% -# Imports -# ------- -# We are importing everything we need for this example: - -import numpy as np -import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import (make_axes_locatable, ImageGrid, - inset_locator) - -import mne - -# %% -# Evoked plot with brain activation -# --------------------------------- -# -# Suppose we want a figure with an evoked plot on top, and the brain activation -# below, with the brain subplot slightly bigger than the evoked plot. Let's -# start by loading some :ref:`example data `. - -data_path = mne.datasets.sample.data_path() -subjects_dir = data_path / 'subjects' -fname_stc = data_path / 'MEG' / 'sample' / 'sample_audvis-meg-eeg-lh.stc' -fname_evoked = data_path / 'MEG' / 'sample' / 'sample_audvis-ave.fif' - -evoked = mne.read_evokeds(fname_evoked, 'Left Auditory') -evoked.pick_types(meg='grad').apply_baseline((None, 0.)) -max_t = evoked.get_peak()[1] - -stc = mne.read_source_estimate(fname_stc) - -# %% -# During interactive plotting, we might see figures like this: - -evoked.plot() - -stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample', - subjects_dir=subjects_dir, initial_time=max_t, - time_viewer=False, show_traces=False) - -# %% -# To make a publication-ready figure, first we'll re-plot the brain on a white -# background, take a screenshot of it, and then crop out the white margins. -# While we're at it, let's change the colormap, set custom colormap limits and -# remove the default colorbar (so we can add a smaller, vertical one later): - -colormap = 'viridis' -clim = dict(kind='value', lims=[4, 8, 12]) - -# Plot the STC, get the brain image, crop it: -brain = stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample', - subjects_dir=subjects_dir, initial_time=max_t, background='w', - colorbar=False, clim=clim, colormap=colormap, - time_viewer=False, show_traces=False) -screenshot = brain.screenshot() -brain.close() - -# %% -# Now let's crop out the white margins and the white gap between hemispheres. -# The screenshot has dimensions ``(h, w, 3)``, with the last axis being R, G, B -# values for each pixel, encoded as integers between ``0`` and ``255``. ``(255, -# 255, 255)`` encodes a white pixel, so we'll detect any pixels that differ -# from that: - -nonwhite_pix = (screenshot != 255).any(-1) -nonwhite_row = nonwhite_pix.any(1) -nonwhite_col = nonwhite_pix.any(0) -cropped_screenshot = screenshot[nonwhite_row][:, nonwhite_col] - -# before/after results -fig = plt.figure(figsize=(4, 4)) -axes = ImageGrid(fig, 111, nrows_ncols=(2, 1), axes_pad=0.5) -for ax, image, title in zip(axes, [screenshot, cropped_screenshot], - ['Before', 'After']): - ax.imshow(image) - ax.set_title('{} cropping'.format(title)) - -# %% -# A lot of figure settings can be adjusted after the figure is created, but -# many can also be adjusted in advance by updating the -# :data:`~matplotlib.rcParams` dictionary. This is especially useful when your -# script generates several figures that you want to all have the same style: - -# Tweak the figure style -plt.rcParams.update({ - 'ytick.labelsize': 'small', - 'xtick.labelsize': 'small', - 'axes.labelsize': 'small', - 'axes.titlesize': 'medium', - 'grid.color': '0.75', - 'grid.linestyle': ':', -}) - -# %% -# Now let's create our custom figure. There are lots of ways to do this step. -# Here we'll create the figure and the subplot axes in one step, specifying -# overall figure size, number and arrangement of subplots, and the ratio of -# subplot heights for each row using :mod:`GridSpec keywords -# `. Other approaches (using -# :func:`~matplotlib.pyplot.subplot2grid`, or adding each axes manually) are -# shown commented out, for reference. - -# sphinx_gallery_thumbnail_number = 4 -# figsize unit is inches -fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(4.5, 3.), - gridspec_kw=dict(height_ratios=[3, 4])) - -# alternate way #1: using subplot2grid -# fig = plt.figure(figsize=(4.5, 3.)) -# axes = [plt.subplot2grid((7, 1), (0, 0), rowspan=3), -# plt.subplot2grid((7, 1), (3, 0), rowspan=4)] - -# alternate way #2: using figure-relative coordinates -# fig = plt.figure(figsize=(4.5, 3.)) -# axes = [fig.add_axes([0.125, 0.58, 0.775, 0.3]), # left, bot., width, height -# fig.add_axes([0.125, 0.11, 0.775, 0.4])] - -# we'll put the evoked plot in the upper axes, and the brain below -evoked_idx = 0 -brain_idx = 1 - -# plot the evoked in the desired subplot, and add a line at peak activation -evoked.plot(axes=axes[evoked_idx]) -peak_line = axes[evoked_idx].axvline(max_t, color='#66CCEE', ls='--') -# custom legend -axes[evoked_idx].legend( - [axes[evoked_idx].lines[0], peak_line], ['MEG data', 'Peak time'], - frameon=True, columnspacing=0.1, labelspacing=0.1, - fontsize=8, fancybox=True, handlelength=1.8) -# remove the "N_ave" annotation -for text in list(axes[evoked_idx].texts): - text.remove() -# Remove spines and add grid -axes[evoked_idx].grid(True) -axes[evoked_idx].set_axisbelow(True) -for key in ('top', 'right'): - axes[evoked_idx].spines[key].set(visible=False) -# Tweak the ticks and limits -axes[evoked_idx].set( - yticks=np.arange(-200, 201, 100), xticks=np.arange(-0.2, 0.51, 0.1)) -axes[evoked_idx].set( - ylim=[-225, 225], xlim=[-0.2, 0.5]) - -# now add the brain to the lower axes -axes[brain_idx].imshow(cropped_screenshot) -axes[brain_idx].axis('off') -# add a vertical colorbar with the same properties as the 3D one -divider = make_axes_locatable(axes[brain_idx]) -cax = divider.append_axes('right', size='5%', pad=0.2) -cbar = mne.viz.plot_brain_colorbar(cax, clim, colormap, label='Activation (F)') - -# tweak margins and spacing -fig.subplots_adjust( - left=0.15, right=0.9, bottom=0.01, top=0.9, wspace=0.1, hspace=0.5) - -# add subplot labels -for ax, label in zip(axes, 'AB'): - ax.text(0.03, ax.get_position().ymax, label, transform=fig.transFigure, - fontsize=12, fontweight='bold', va='top', ha='left') - -# %% -# Custom timecourse with montage inset -# ------------------------------------ -# -# Suppose we want a figure with some mean timecourse extracted from a number of -# sensors, and we want a smaller panel within the figure to show a head outline -# with the positions of those sensors clearly marked. -# If you are familiar with MNE, you know that this is something that -# :func:`mne.viz.plot_compare_evokeds` does, see an example output in -# :ref:`ex-hf-sef-data` at the bottom. -# -# In this part of the example, we will show you how to achieve this result on -# your own figure, without having to use :func:`mne.viz.plot_compare_evokeds`! -# -# Let's start by loading some :ref:`example data `. - -data_path = mne.datasets.sample.data_path() -fname_raw = data_path / "MEG" / "sample" / "sample_audvis_raw.fif" -raw = mne.io.read_raw_fif(fname_raw) - -# For the sake of the example, we focus on EEG data -raw.pick_types(meg=False, eeg=True) - - -# %% -# Let's make a plot. - -# channels to plot: -to_plot = [f"EEG {i:03}" for i in range(1, 5)] - -# get the data for plotting in a short time interval from 10 to 20 seconds -start = int(raw.info['sfreq'] * 10) -stop = int(raw.info['sfreq'] * 20) -data, times = raw.get_data(picks=to_plot, - start=start, stop=stop, return_times=True) - -# Scale the data from the MNE internal unit V to µV -data *= 1e6 -# Take the mean of the channels -mean = np.mean(data, axis=0) -# make a figure -fig, ax = plt.subplots(figsize=(4.5, 3)) -# plot some EEG data -ax.plot(times, mean) - -# %% -# So far so good. Now let's add the smaller figure within the figure to show -# exactly, which sensors we used to make the timecourse. -# For that, we use an "inset_axes" that we plot into our existing axes. -# The head outline with the sensor positions can be plotted using the -# `~mne.io.Raw` object that is the source of our data. -# Specifically, that object already contains all the sensor positions, -# and we can plot them using the ``plot_sensors`` method. - -# recreate the figure (only necessary for our documentation server) -fig, ax = plt.subplots(figsize=(4.5, 3)) -ax.plot(times, mean) -axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2) - -# pick_channels() edits the raw object in place, so we'll make a copy here -# so that our raw object stays intact for potential later analysis -raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins) - -# %% -# That looks nice. But the sensor dots are way too big for our taste. Luckily, -# all MNE-Python plots use Matplotlib under the hood and we can customize -# each and every facet of them. -# To make the sensor dots smaller, we need to first get a handle on them to -# then apply a ``*.set_*`` method on them. - -# If we inspect our axes we find the objects contained in our plot: -print(axins.get_children()) - -# %% -# That's quite a a lot of objects, but we know that we want to change the -# sensor dots, and those are most certainly a "PathCollection" object. -# So let's have a look at how many "collections" we have in the axes. -print(axins.collections) - -# %% -# There is only one! Those must be the sensor dots we were looking for. -# We finally found exactly what we needed. Sometimes this can take a bit of -# experimentation. - -sensor_dots = axins.collections[0] - -# Recreate the figure once more; shrink the sensor dots; add axis labels -fig, ax = plt.subplots(figsize=(4.5, 3)) -ax.plot(times, mean) -axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2) -raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins) -sensor_dots = axins.collections[0] -sensor_dots.set_sizes([1]) -# add axis labels, and adjust bottom figure margin to make room for them -ax.set(xlabel="Time (s)", ylabel="Amplitude (µV)") -fig.subplots_adjust(bottom=0.2) diff --git a/examples/visualization/roi_erpimage_by_rt.py b/examples/visualization/roi_erpimage_by_rt.py index 26a07ff07a8..f9cd9f708cf 100644 --- a/examples/visualization/roi_erpimage_by_rt.py +++ b/examples/visualization/roi_erpimage_by_rt.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _roi-erp: @@ -10,8 +9,7 @@ potential / field (ERP/ERF) image. The EEGLAB example file, which contains an experiment with button press -responses to simple visual stimuli, is read in and response times are -calculated. +responses to simple visual stimuli, is read in and response times are calculated. Regions of Interest are determined by the channel types (in 10/20 channel notation, even channels are right, odd are left, and 'z' are central). The median and the Global Field Power within each channel group is calculated, @@ -20,36 +18,61 @@ # Authors: Jona Sassenhagen # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne -from mne.event import define_target_events from mne.channels import make_1020_channel_selections +from mne.event import define_target_events print(__doc__) # %% # Load EEGLAB example data (a small EEG dataset) data_path = mne.datasets.testing.data_path() -fname = data_path / 'EEGLAB' / 'test_raw.set' +fname = data_path / "EEGLAB" / "test_raw.set" event_id = {"rt": 1, "square": 2} # must be specified for str events raw = mne.io.read_raw_eeglab(fname) mapping = { - 'EEG 000': 'Fpz', 'EEG 001': 'EOG1', 'EEG 002': 'F3', 'EEG 003': 'Fz', - 'EEG 004': 'F4', 'EEG 005': 'EOG2', 'EEG 006': 'FC5', 'EEG 007': 'FC1', - 'EEG 008': 'FC2', 'EEG 009': 'FC6', 'EEG 010': 'T7', 'EEG 011': 'C3', - 'EEG 012': 'C4', 'EEG 013': 'Cz', 'EEG 014': 'T8', 'EEG 015': 'CP5', - 'EEG 016': 'CP1', 'EEG 017': 'CP2', 'EEG 018': 'CP6', 'EEG 019': 'P7', - 'EEG 020': 'P3', 'EEG 021': 'Pz', 'EEG 022': 'P4', 'EEG 023': 'P8', - 'EEG 024': 'PO7', 'EEG 025': 'PO3', 'EEG 026': 'POz', 'EEG 027': 'PO4', - 'EEG 028': 'PO8', 'EEG 029': 'O1', 'EEG 030': 'Oz', 'EEG 031': 'O2' + "EEG 000": "Fpz", + "EEG 001": "EOG1", + "EEG 002": "F3", + "EEG 003": "Fz", + "EEG 004": "F4", + "EEG 005": "EOG2", + "EEG 006": "FC5", + "EEG 007": "FC1", + "EEG 008": "FC2", + "EEG 009": "FC6", + "EEG 010": "T7", + "EEG 011": "C3", + "EEG 012": "C4", + "EEG 013": "Cz", + "EEG 014": "T8", + "EEG 015": "CP5", + "EEG 016": "CP1", + "EEG 017": "CP2", + "EEG 018": "CP6", + "EEG 019": "P7", + "EEG 020": "P3", + "EEG 021": "Pz", + "EEG 022": "P4", + "EEG 023": "P8", + "EEG 024": "PO7", + "EEG 025": "PO3", + "EEG 026": "POz", + "EEG 027": "PO4", + "EEG 028": "PO8", + "EEG 029": "O1", + "EEG 030": "Oz", + "EEG 031": "O2", } raw.rename_channels(mapping) -raw.set_channel_types({"EOG1": 'eog', "EOG2": 'eog'}) -raw.set_montage('standard_1020') +raw.set_channel_types({"EOG1": "eog", "EOG2": "eog"}) +raw.set_montage("standard_1020") events = mne.events_from_annotations(raw, event_id)[0] @@ -62,11 +85,11 @@ tmax = 0.7 sfreq = raw.info["sfreq"] reference_id, target_id = 2, 1 -new_events, rts = define_target_events(events, reference_id, target_id, sfreq, - tmin=0., tmax=tmax, new_id=2) +new_events, rts = define_target_events( + events, reference_id, target_id, sfreq, tmin=0.0, tmax=tmax, new_id=2 +) -epochs = mne.Epochs(raw, events=new_events, tmax=tmax + 0.1, - event_id={"square": 2}) +epochs = mne.Epochs(raw, events=new_events, tmax=tmax + 0.1, event_id={"square": 2}) # %% # Plot using :term:`global field power` @@ -77,13 +100,23 @@ selections = make_1020_channel_selections(epochs.info, midline="12z") # The actual plots (GFP) -epochs.plot_image(group_by=selections, order=order, sigma=1.5, - overlay_times=rts / 1000., combine='gfp', - ts_args=dict(vlines=[0, rts.mean() / 1000.])) +epochs.plot_image( + group_by=selections, + order=order, + sigma=1.5, + overlay_times=rts / 1000.0, + combine="gfp", + ts_args=dict(vlines=[0, rts.mean() / 1000.0]), +) # %% # Plot using median -epochs.plot_image(group_by=selections, order=order, sigma=1.5, - overlay_times=rts / 1000., combine='median', - ts_args=dict(vlines=[0, rts.mean() / 1000.])) +epochs.plot_image( + group_by=selections, + order=order, + sigma=1.5, + overlay_times=rts / 1000.0, + combine="median", + ts_args=dict(vlines=[0, rts.mean() / 1000.0]), +) diff --git a/examples/visualization/sensor_noise_level.py b/examples/visualization/sensor_noise_level.py deleted file mode 100644 index 17b4dbd354a..00000000000 --- a/examples/visualization/sensor_noise_level.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -""" -.. _ex-noise-level: - -====================================== -Show noise levels from empty room data -====================================== - -This shows how to use :meth:`mne.io.Raw.plot_psd` to examine noise levels -of systems. See :footcite:`KhanCohen2013` for an example. -""" -# Author: Eric Larson -# -# License: BSD-3-Clause - -# %% - -import mne - -data_path = mne.datasets.sample.data_path() - -raw_erm = mne.io.read_raw_fif( - data_path / 'MEG' / 'sample' / 'ernoise_raw.fif', preload=True -) - -# %% -# We can plot the absolute noise levels: -raw_erm.compute_psd(tmax=10).plot(average=True, spatial_colors=False, - dB=False, xscale='log') -# %% -# References -# ---------- -# -# .. footbibliography:: diff --git a/examples/visualization/ssp_projs_sensitivity_map.py b/examples/visualization/ssp_projs_sensitivity_map.py index a4b3e4bc341..65a96cd9908 100644 --- a/examples/visualization/ssp_projs_sensitivity_map.py +++ b/examples/visualization/ssp_projs_sensitivity_map.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-ssp-proj: @@ -12,23 +11,23 @@ # Author: Alexandre Gramfort # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import matplotlib.pyplot as plt from mne import read_forward_solution, read_proj, sensitivity_map - from mne.datasets import sample print(__doc__) data_path = sample.data_path() -subjects_dir = data_path / 'subjects' -meg_path = data_path / 'MEG' / 'sample' -fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif' -ecg_fname = meg_path / 'sample_audvis_ecg-proj.fif' +subjects_dir = data_path / "subjects" +meg_path = data_path / "MEG" / "sample" +fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" +ecg_fname = meg_path / "sample_audvis_ecg-proj.fif" fwd = read_forward_solution(fname) @@ -37,7 +36,7 @@ projs = projs[::2] # Compute sensitivity map -ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle') +ssp_ecg_map = sensitivity_map(fwd, ch_type="grad", projs=projs, mode="angle") # %% # Show sensitivity map @@ -45,6 +44,10 @@ plt.hist(ssp_ecg_map.data.ravel()) plt.show() -args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)), smoothing_steps=7, - hemi='rh', subjects_dir=subjects_dir) -ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args) +args = dict( + clim=dict(kind="value", lims=(0.2, 0.6, 1.0)), + smoothing_steps=7, + hemi="rh", + subjects_dir=subjects_dir, +) +ssp_ecg_map.plot(subject="sample", time_label="ECG SSP sensitivity", **args) diff --git a/examples/visualization/topo_compare_conditions.py b/examples/visualization/topo_compare_conditions.py index eb5c9d13b0b..eb2699eb262 100644 --- a/examples/visualization/topo_compare_conditions.py +++ b/examples/visualization/topo_compare_conditions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-topo-compare: @@ -16,50 +15,50 @@ # Alexandre Gramfort # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - import matplotlib.pyplot as plt -import mne -from mne.viz import plot_evoked_topo +import mne from mne.datasets import sample +from mne.viz import plot_evoked_topo print(__doc__) data_path = sample.data_path() # %% -# Set parameters -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' -event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' +# Set parameters. +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" +event_fname = meg_path / "sample_audvis_filt-0-40_raw-eve.fif" tmin = -0.2 tmax = 0.5 -# Setup for reading the raw data +# Setup for reading the raw data. raw = mne.io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) -# Set up amplitude-peak rejection values for MEG channels +# Set up amplitude-peak rejection values for MEG channels. reject = dict(grad=4000e-13, mag=4e-12) -# Create epochs including different events -event_id = {'audio/left': 1, 'audio/right': 2, - 'visual/left': 3, 'visual/right': 4} -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, - picks='meg', baseline=(None, 0), reject=reject) +# Create epochs including different events. +event_id = {"audio/left": 1, "audio/right": 2, "visual/left": 3, "visual/right": 4} +epochs = mne.Epochs( + raw, events, event_id, tmin, tmax, picks="meg", baseline=(None, 0), reject=reject +) # Generate list of evoked objects from conditions names -evokeds = [epochs[name].average() for name in ('left', 'right')] +evokeds = [epochs[name].average() for name in ("left", "right")] # %% -# Show topography for two different conditions +# Show topography for two different conditions. -colors = 'blue', 'red' -title = 'MNE sample data\nleft vs right (A/V combined)' +colors = "blue", "red" +title = "MNE sample data\nleft vs right (A/V combined)" -plot_evoked_topo(evokeds, color=colors, title=title, background_color='w') +plot_evoked_topo(evokeds, color=colors, title=title, background_color="w") plt.show() diff --git a/examples/visualization/topo_customized.py b/examples/visualization/topo_customized.py index 6e864802ba5..2303961f9da 100644 --- a/examples/visualization/topo_customized.py +++ b/examples/visualization/topo_customized.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-topo-custom: @@ -16,54 +15,56 @@ # Author: Denis A. Engemann # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% - -import numpy as np import matplotlib.pyplot as plt +import numpy as np import mne -from mne.viz import iter_topography from mne import io from mne.datasets import sample +from mne.viz import iter_topography print(__doc__) data_path = sample.data_path() -meg_path = data_path / 'MEG' / 'sample' -raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' +meg_path = data_path / "MEG" / "sample" +raw_fname = meg_path / "sample_audvis_filt-0-40_raw.fif" raw = io.read_raw_fif(raw_fname, preload=True) -raw.filter(1, 20, fir_design='firwin') +raw.filter(1, 20, fir_design="firwin") picks = mne.pick_types(raw.info, meg=True, exclude=[]) tmin, tmax = 0, 120 # use the first 120s of data fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 -spectrum = raw.compute_psd( - picks=picks, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax) +spectrum = raw.compute_psd(picks=picks, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax) psds, freqs = spectrum.get_data(exclude=(), return_freqs=True) psds = 20 * np.log10(psds) # scale to dB def my_callback(ax, ch_idx): - """ + """Handle axes callback. + This block of code is executed once you click on one of the channel axes in the plot. To work with the viz internals, this function should only take two parameters, the axis and the channel or data index. """ - ax.plot(freqs, psds[ch_idx], color='red') - ax.set_xlabel('Frequency (Hz)') - ax.set_ylabel('Power (dB)') + ax.plot(freqs, psds[ch_idx], color="red") + ax.set_xlabel("Frequency (Hz)") + ax.set_ylabel("Power (dB)") -for ax, idx in iter_topography(raw.info, - fig_facecolor='white', - axis_facecolor='white', - axis_spinecolor='white', - on_pick=my_callback): - ax.plot(psds[idx], color='red') +for ax, idx in iter_topography( + raw.info, + fig_facecolor="white", + axis_facecolor="white", + axis_spinecolor="white", + on_pick=my_callback, +): + ax.plot(psds[idx], color="red") -plt.gcf().suptitle('Power spectral densities') +plt.gcf().suptitle("Power spectral densities") plt.show() diff --git a/examples/visualization/xhemi.py b/examples/visualization/xhemi.py index c6bf6e5a961..e0974a30a58 100644 --- a/examples/visualization/xhemi.py +++ b/examples/visualization/xhemi.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. _ex-xhemi: @@ -14,32 +13,38 @@ # Author: Christian Brodbeck # # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # %% import mne data_dir = mne.datasets.sample.data_path() -subjects_dir = data_dir / 'subjects' -stc_path = data_dir / 'MEG' / 'sample' / 'sample_audvis-meg-eeg' -stc = mne.read_source_estimate(stc_path, 'sample') +subjects_dir = data_dir / "subjects" +stc_path = data_dir / "MEG" / "sample" / "sample_audvis-meg-eeg" +stc = mne.read_source_estimate(stc_path, "sample") # First, morph the data to fsaverage_sym, for which we have left_right # registrations: -stc = mne.compute_source_morph(stc, 'sample', 'fsaverage_sym', smooth=5, - warn=False, - subjects_dir=subjects_dir).apply(stc) +stc = mne.compute_source_morph( + stc, "sample", "fsaverage_sym", smooth=5, warn=False, subjects_dir=subjects_dir +).apply(stc) # Compute a morph-matrix mapping the right to the left hemisphere, # and vice-versa. -morph = mne.compute_source_morph(stc, 'fsaverage_sym', 'fsaverage_sym', - spacing=stc.vertices, warn=False, - subjects_dir=subjects_dir, xhemi=True, - verbose='error') # creating morph map +morph = mne.compute_source_morph( + stc, + "fsaverage_sym", + "fsaverage_sym", + spacing=stc.vertices, + warn=False, + subjects_dir=subjects_dir, + xhemi=True, + verbose="error", +) # creating morph map stc_xhemi = morph.apply(stc) # Now we can subtract them and plot the result: diff = stc - stc_xhemi -diff.plot(hemi='lh', subjects_dir=subjects_dir, initial_time=0.07, - size=(800, 600)) +diff.plot(hemi="lh", subjects_dir=subjects_dir, initial_time=0.07, size=(800, 600)) diff --git a/ignore_words.txt b/ignore_words.txt index 8dde5403c07..12e1a14ae0e 100644 --- a/ignore_words.txt +++ b/ignore_words.txt @@ -14,6 +14,7 @@ nd cas thes ba +bu ist od fo @@ -33,3 +34,12 @@ recuse ro nam shs +pres +aas +vor +connec +sme +tim +whitelists +gotcha +uner diff --git a/logo/generate_mne_logos.py b/logo/generate_mne_logos.py index f9d62ddc581..3dbca9918aa 100644 --- a/logo/generate_mne_logos.py +++ b/logo/generate_mne_logos.py @@ -1,41 +1,45 @@ -# -*- coding: utf-8 -*- -""" -=============================================================================== -Script 'mne logo' -=============================================================================== - -This script makes the logo for MNE. -""" -# @author: drmccloy -# Created on Mon Jul 20 11:28:16 2015 +"""Generate the MNE-Python logos.""" + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import pathlib -import numpy as np -import os.path as op import matplotlib.pyplot as plt -from matplotlib import rcParams -from scipy.stats import multivariate_normal +import numpy as np +from matplotlib import font_manager, rcParams +from matplotlib.colors import LinearSegmentedColormap +from matplotlib.patches import Ellipse, FancyBboxPatch, PathPatch, Rectangle from matplotlib.path import Path from matplotlib.text import TextPath -from matplotlib.patches import PathPatch, Ellipse, FancyBboxPatch, Rectangle -from matplotlib.colors import LinearSegmentedColormap +from scipy.stats import multivariate_normal # manually set values dpi = 300 center_fudge = np.array([15, 30]) # compensate for font bounding box padding tagline_scale_fudge = 0.97 # to get justification right -tagline_offset_fudge = np.array([0, -100.]) +tagline_offset_fudge = np.array([0, -100.0]) -# font, etc -rcp = {'font.sans-serif': ['Primetime'], 'font.style': 'normal', - 'font.weight': 'black', 'font.variant': 'normal', 'figure.dpi': dpi, - 'savefig.dpi': dpi, 'contour.negative_linestyle': 'solid'} +# font, etc (default to MNE font) +rcp = { + "font.sans-serif": ["Primetime"], + "font.style": "normal", + "font.weight": "black", + "font.variant": "normal", + "figure.dpi": dpi, + "savefig.dpi": dpi, + "contour.negative_linestyle": "solid", +} plt.rcdefaults() rcParams.update(rcp) +# %% +# mne_logo.svg and mne_logo_dark.svg + # initialize figure (no axes, margins, etc) fig = plt.figure(1, figsize=(5, 2.25), frameon=False, dpi=dpi) -ax = plt.Axes(fig, [0., 0., 1., 1.]) +ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.set_axis_off() fig.add_axes(ax) @@ -45,10 +49,12 @@ y = np.arange(-3.0, 3.0, delta) X, Y = np.meshgrid(x, y) xy = np.array([X, Y]).transpose(1, 2, 0) -Z1 = multivariate_normal.pdf(xy, mean=[-5.0, 0.9], - cov=np.array([[8.0, 1.0], [1.0, 7.0]]) ** 2) -Z2 = multivariate_normal.pdf(xy, mean=[2.6, -2.5], - cov=np.array([[15.0, 2.5], [2.5, 2.5]]) ** 2) +Z1 = multivariate_normal.pdf( + xy, mean=[-5.0, 0.9], cov=np.array([[8.0, 1.0], [1.0, 7.0]]) ** 2 +) +Z2 = multivariate_normal.pdf( + xy, mean=[2.6, -2.5], cov=np.array([[15.0, 2.5], [2.5, 2.5]]) ** 2 +) Z = Z2 - 0.7 * Z1 # color map: field gradient (yellow-red-gray-blue-cyan) @@ -57,105 +63,160 @@ # 'blue': ((0, 0, 0), (0.4, 0, 0), (0.5, 0.5, 0.5), (0.6, 1, 1), (1, 1, 1)), # noqa # 'green': ((0, 1, 1), (0.4, 0, 0), (0.5, 0.5, 0.5), (0.6, 0, 0), (1, 1, 1)), # noqa # } -yrtbc = {'red': ((0.0, 1.0, 1.0), (0.5, 1.0, 0.0), (1.0, 0.0, 0.0)), - 'blue': ((0.0, 0.0, 0.0), (0.5, 0.0, 1.0), (1.0, 1.0, 1.0)), - 'green': ((0.0, 1.0, 1.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)), - 'alpha': ((0.0, 1.0, 1.0), (0.4, 0.8, 0.8), (0.5, 0.2, 0.2), - (0.6, 0.8, 0.8), (1.0, 1.0, 1.0))} +yrtbc = { + "red": ((0.0, 1.0, 1.0), (0.5, 1.0, 0.0), (1.0, 0.0, 0.0)), + "blue": ((0.0, 0.0, 0.0), (0.5, 0.0, 1.0), (1.0, 1.0, 1.0)), + "green": ((0.0, 1.0, 1.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)), + "alpha": ( + (0.0, 1.0, 1.0), + (0.4, 0.8, 0.8), + (0.5, 0.2, 0.2), + (0.6, 0.8, 0.8), + (1.0, 1.0, 1.0), + ), +} # color map: field lines (red | blue) -redbl = {'red': ((0., 1., 1.), (0.5, 1., 0.), (1., 0., 0.)), - 'blue': ((0., 0., 0.), (0.5, 0., 1.), (1., 1., 1.)), - 'green': ((0., 0., 0.), (1., 0., 0.)), - 'alpha': ((0., 0.4, 0.4), (1., 0.4, 0.4))} -mne_field_grad_cols = LinearSegmentedColormap('mne_grad', yrtbc) -mne_field_line_cols = LinearSegmentedColormap('mne_line', redbl) +redbl = { + "red": ((0.0, 1.0, 1.0), (0.5, 1.0, 0.0), (1.0, 0.0, 0.0)), + "blue": ((0.0, 0.0, 0.0), (0.5, 0.0, 1.0), (1.0, 1.0, 1.0)), + "green": ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), + "alpha": ((0.0, 0.4, 0.4), (1.0, 0.4, 0.4)), +} +mne_field_grad_cols = LinearSegmentedColormap("mne_grad", yrtbc) +mne_field_line_cols = LinearSegmentedColormap("mne_line", redbl) # plot gradient and contour lines -im = ax.imshow(Z, cmap=mne_field_grad_cols, aspect='equal', zorder=1) +im = ax.imshow(Z, cmap=mne_field_grad_cols, aspect="equal", zorder=1) cs = ax.contour(Z, 9, cmap=mne_field_line_cols, linewidths=1, zorder=1) xlim, ylim = ax.get_xbound(), ax.get_ybound() plot_dims = np.r_[np.diff(xlim), np.diff(ylim)] rect = Rectangle( - [xlim[0], ylim[0]], plot_dims[0], plot_dims[1], facecolor='w', zorder=0.5) + [xlim[0], ylim[0]], plot_dims[0], plot_dims[1], facecolor="w", zorder=0.5 +) # create MNE clipping mask -mne_path = TextPath((0, 0), 'MNE') +mne_path = TextPath((0, 0), "MNE") dims = mne_path.vertices.max(0) - mne_path.vertices.min(0) -vert = mne_path.vertices - dims / 2. +vert = mne_path.vertices - dims / 2.0 mult = (plot_dims / dims).min() mult = [mult, -mult] # y axis is inverted (origin at top left) -offset = plot_dims / 2. - center_fudge +offset = plot_dims / 2.0 - center_fudge mne_clip = Path(offset + vert * mult, mne_path.codes) -ax.add_patch(PathPatch(mne_clip, color='w', zorder=0, linewidth=0)) +ax.add_patch(PathPatch(mne_clip, color="w", zorder=0, linewidth=0)) # apply clipping mask to field gradient and lines im.set_clip_path(mne_clip, transform=im.get_transform()) ax.add_patch(rect) rect.set_clip_path(mne_clip, transform=im.get_transform()) -for coll in cs.collections: - coll.set_clip_path(mne_clip, transform=im.get_transform()) +cs.set_clip_path(mne_clip, transform=im.get_transform()) # get final position of clipping mask mne_corners = mne_clip.get_extents().corners() +# For this make sure that this gives something like "" +fnt = font_manager.findfont("Cooper Hewitt:style=normal:weight=book") +if "Book" not in fnt or "CooperHewitt" not in fnt: + print( + f"WARNING: Might not use correct Cooper Hewitt, got {fnt} but want " + "CooperHewitt-Book.otf or similar" + ) + # add tagline -rcParams.update({'font.sans-serif': ['Cooper Hewitt'], 'font.weight': '300'}) -tag_path = TextPath((0, 0), 'MEG + EEG ANALYSIS & VISUALIZATION') +with plt.rc_context({"font.sans-serif": ["Cooper Hewitt"], "font.weight": "book"}): + tag_path = TextPath((0, 0), "MEG + EEG ANALYSIS & VISUALIZATION") dims = tag_path.vertices.max(0) - tag_path.vertices.min(0) -vert = tag_path.vertices - dims / 2. +vert = tag_path.vertices - dims / 2.0 mult = tagline_scale_fudge * (plot_dims / dims).min() mult = [mult, -mult] # y axis is inverted -offset = mne_corners[-1] - np.array([mne_clip.get_extents().size[0] / 2., - -dims[1]]) - tagline_offset_fudge +offset = ( + mne_corners[-1] + - np.array([mne_clip.get_extents().size[0] / 2.0, -dims[1]]) + - tagline_offset_fudge +) tag_clip = Path(offset + vert * mult, tag_path.codes) -tag_patch = PathPatch(tag_clip, facecolor='k', edgecolor='none', zorder=10) +tag_patch = PathPatch(tag_clip, facecolor="0.6", edgecolor="none", zorder=10) ax.add_patch(tag_patch) yl = ax.get_ylim() -yy = np.max([tag_clip.vertices.max(0)[-1], - tag_clip.vertices.min(0)[-1]]) +yy = np.max([tag_clip.vertices.max(0)[-1], tag_clip.vertices.min(0)[-1]]) ax.set_ylim(np.ceil(yy), yl[-1]) # only save actual image extent plus a bit of padding -plt.draw() -static_dir = op.join(op.dirname(__file__), '..', 'doc', '_static') -assert op.isdir(static_dir) -plt.savefig(op.join(static_dir, 'mne_logo.svg'), transparent=True) -tag_patch.set_facecolor('w') -rect.set_facecolor('0.5') -plt.savefig(op.join(static_dir, 'mne_logo_dark.svg'), transparent=True) -tag_patch.set_facecolor('k') -rect.set_facecolor('w') +fig.canvas.draw_idle() +static_dir = pathlib.Path(__file__).parents[1] / "doc" / "_static" +assert static_dir.is_dir() +kind_color = dict( + mne_logo_dark=("0.8", "0.5"), + mne_logo_gray=("0.6", "0.75"), + mne_logo=("0.3", "w"), # always last +) +for kind, (tag_color, rect_color) in kind_color.items(): + tag_patch.set_facecolor(tag_color) + rect.set_facecolor(rect_color) + fig.savefig( + static_dir / f"{kind}.svg", + transparent=True, + ) + +# %% +# mne_splash.png # modify to make the splash screen -data_dir = op.join(op.dirname(__file__), '..', 'mne', 'icons') -ax.patches[-1].set_facecolor('w') +data_dir = pathlib.Path(__file__).parents[1] / "mne" / "icons" +assert data_dir.is_dir() +tag_patch.set_facecolor("0.7") for coll in list(ax.collections): coll.remove() -bounds = np.array([ - [mne_path.vertices[:, ii].min(), mne_path.vertices[:, ii].max()] - for ii in range(2)]) -bounds *= (plot_dims / dims) +bounds = np.array( + [ + [mne_path.vertices[:, ii].min(), mne_path.vertices[:, ii].max()] + for ii in range(2) + ] +) +bounds *= plot_dims / dims xy = np.mean(bounds, axis=1) - [100, 0] r = np.diff(bounds, axis=1).max() * 1.2 w, h = r, r * (2 / 3) box_xy = [xy[0] - w * 0.5, xy[1] - h * (2 / 5)] -ax.set_ylim(box_xy[1] + h * 1.001, box_xy[1] - h * 0.001) +ax.set( + ylim=(box_xy[1] + h * 1.001, box_xy[1] - h * 0.001), + xlim=(box_xy[0] - w * 0.001, box_xy[0] + w * 1.001), +) patch = FancyBboxPatch( - box_xy, w, h, clip_on=False, zorder=-1, fc='k', ec='none', alpha=0.75, - boxstyle="round,rounding_size=200.0", mutation_aspect=1) + box_xy, + w, + h, + clip_on=False, + zorder=-1, + fc="k", + ec="none", + alpha=0.75, + boxstyle="round,rounding_size=200.0", + mutation_aspect=1, +) ax.add_patch(patch) fig.set_size_inches((512 / dpi, 512 * (h / w) / dpi)) -plt.savefig(op.join(data_dir, 'mne_splash.png'), transparent=True) +fig.savefig( + data_dir / "mne_splash.png", + transparent=True, +) patch.remove() +# %% +# mne_default_icon.png + # modify to make an icon -ax.patches.pop(-1) # no tag line for our icon -patch = Ellipse(xy, r, r, clip_on=False, zorder=-1, fc='k') +ax.patches[-1].remove() # no tag line for our icon +patch = Ellipse(xy, r, r, clip_on=False, zorder=-1, fc="k") ax.add_patch(patch) -ax.set_ylim(xy[1] + r / 1.9, xy[1] - r / 1.9) +ax.set_ylim(xy[1] + r / 1.99, xy[1] - r / 1.99) fig.set_size_inches((256 / dpi, 256 / dpi)) # Qt does not support clip paths in SVG rendering so we have to use PNG here # then use "optipng -o7" on it afterward (14% reduction in file size) -plt.savefig(op.join(data_dir, 'mne_default_icon.png'), transparent=True) -plt.close() +fig.savefig( + data_dir / "mne_default_icon.png", + transparent=True, +) + +# %% +# mne_logo_small.svg # 188x45 image dpi = 96 # for SVG it's different @@ -163,31 +224,32 @@ h_px = 45 center_fudge = np.array([60, 0]) scale_fudge = 2.1 -rcParams.update({'font.sans-serif': ['Primetime'], 'font.weight': 'black'}) -x = np.linspace(-1., 1., w_px // 2) -y = np.linspace(-1., 1., h_px // 2) +x = np.linspace(-1.0, 1.0, w_px // 2) +y = np.linspace(-1.0, 1.0, h_px // 2) X, Y = np.meshgrid(x, y) # initialize figure (no axes, margins, etc) -fig = plt.figure(1, figsize=(w_px / dpi, h_px / dpi), facecolor='k', - frameon=False, dpi=dpi) -ax = plt.Axes(fig, [0., 0., 1., 1.]) +fig = plt.figure( + 2, figsize=(w_px / dpi, h_px / dpi), facecolor="k", frameon=False, dpi=dpi +) +ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.set_axis_off() fig.add_axes(ax) # plot rainbow -ax.imshow(X, cmap=mne_field_grad_cols, aspect='equal', zorder=1) -ax.imshow(np.ones_like(X) * 0.5, cmap='Greys', aspect='equal', zorder=0, - clim=[0, 1]) +ax.imshow(X, cmap=mne_field_grad_cols, aspect="equal", zorder=1) +ax.imshow(np.ones_like(X) * 0.5, cmap="Greys", aspect="equal", zorder=0, clim=[0, 1]) plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())] # MNE text in white -mne_path = TextPath((0, 0), 'MNE') +mne_path = TextPath((0, 0), "MNE") dims = mne_path.vertices.max(0) - mne_path.vertices.min(0) -vert = mne_path.vertices - dims / 2. +vert = mne_path.vertices - dims / 2.0 mult = scale_fudge * (plot_dims / dims).min() mult = [mult, -mult] # y axis is inverted (origin at top left) -offset = np.array([scale_fudge, 1.]) * \ - np.array([-dims[0], plot_dims[-1]]) / 2. - center_fudge +offset = ( + np.array([scale_fudge, 1.0]) * np.array([-dims[0], plot_dims[-1]]) / 2.0 + - center_fudge +) mne_clip = Path(offset + vert * mult, mne_path.codes) -mne_patch = PathPatch(mne_clip, facecolor='0.5', edgecolor='none', zorder=10) +mne_patch = PathPatch(mne_clip, facecolor="0.5", edgecolor="none", zorder=10) ax.add_patch(mne_patch) # adjust xlim and ylim mne_corners = mne_clip.get_extents().corners() @@ -195,11 +257,13 @@ xmax, ymax = np.max(mne_corners, axis=0) xl = ax.get_xlim() yl = ax.get_ylim() -xpad = np.abs(np.diff([xmin, xl[1]])) / 20. -ypad = np.abs(np.diff([ymax, ymin])) / 20. +xpad = np.abs(np.diff([xmin, xl[1]])) / 20.0 +ypad = np.abs(np.diff([ymax, ymin])) / 20.0 ax.set_xlim(xmin - xpad, xl[1] + xpad) ax.set_ylim(ymax + ypad, ymin - ypad) -plt.draw() -plt.savefig(op.join(static_dir, 'mne_logo_small.svg'), - dpi=dpi, transparent=True) -plt.close() +fig.canvas.draw_idle() +fig.savefig( + static_dir / "mne_logo_small.svg", + dpi=dpi, + transparent=True, +) diff --git a/mne/__init__.py b/mne/__init__.py index fc799f9d59a..6abe2bccb62 100644 --- a/mne/__init__.py +++ b/mne/__init__.py @@ -1,5 +1,8 @@ -"""MNE software for MEG and EEG data analysis.""" +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. +"""MNE software for MEG and EEG data analysis.""" # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # @@ -15,124 +18,19 @@ # # Dev branch marker is: 'X.Y.devN' where N is an integer. # +import lazy_loader as lazy try: from importlib.metadata import version + __version__ = version("mne") except Exception: - try: - from ._version import __version__ - except ImportError: - __version__ = '0.0.0' + __version__ = "0.0.0" -# have to import verbose first since it's needed by many things -from .utils import (set_log_level, set_log_file, verbose, set_config, - get_config, get_config_path, set_cache_dir, - set_memmap_min_size, grand_average, sys_info, open_docs, - use_log_level) -from .io.pick import (pick_types, pick_channels, - pick_channels_regexp, pick_channels_forward, - pick_types_forward, pick_channels_cov, - pick_channels_evoked, pick_info, - channel_type, channel_indices_by_type) -from .io.base import concatenate_raws -from .io.meas_info import create_info, Info -from .io.proj import Projection -from .io.kit import read_epochs_kit -from .io.eeglab import read_epochs_eeglab -from .io.reference import (set_eeg_reference, set_bipolar_reference, - add_reference_channels) -from .io.what import what -from .bem import (make_sphere_model, make_bem_model, make_bem_solution, - read_bem_surfaces, write_bem_surfaces, write_head_bem, - read_bem_solution, write_bem_solution) -from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance, - compute_covariance, whiten_evoked, make_ad_hoc_cov) -from .event import (read_events, write_events, find_events, merge_events, - pick_events, make_fixed_length_events, concatenate_events, - find_stim_steps, AcqParserFIF, count_events) -from ._freesurfer import (head_to_mni, head_to_mri, read_talxfm, - get_volume_labels_from_aseg, read_freesurfer_lut, - vertex_to_mni, read_lta) -from .forward import (read_forward_solution, apply_forward, apply_forward_raw, - average_forward_solutions, Forward, - write_forward_solution, make_forward_solution, - convert_forward_solution, make_field_map, - make_forward_dipole, use_coil_def) -from .source_estimate import (read_source_estimate, - SourceEstimate, VectorSourceEstimate, - VolSourceEstimate, VolVectorSourceEstimate, - MixedSourceEstimate, MixedVectorSourceEstimate, - grade_to_tris, - spatial_src_adjacency, - spatial_tris_adjacency, - spatial_dist_adjacency, - spatial_inter_hemi_adjacency, - spatio_temporal_src_adjacency, - spatio_temporal_tris_adjacency, - spatio_temporal_dist_adjacency, - extract_label_time_course, stc_near_sensors) -from .surface import (read_surface, write_surface, decimate_surface, read_tri, - get_head_surf, get_meg_helmet_surf, dig_mri_distances, - warp_montage_volume, get_montage_volume_labels) -from .morph_map import read_morph_map -from .morph import (SourceMorph, read_source_morph, grade_to_vertices, - compute_source_morph) -from .source_space import (read_source_spaces, - write_source_spaces, setup_source_space, - setup_volume_source_space, SourceSpaces, - add_source_space_distances, morph_source_spaces, - get_volume_labels_from_src) -from .annotations import (Annotations, read_annotations, annotations_from_events, - events_from_annotations) -from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs, - concatenate_epochs, make_fixed_length_epochs) -from .evoked import (Evoked, EvokedArray, read_evokeds, write_evokeds, - combine_evoked) -from .label import (read_label, label_sign_flip, - write_label, stc_to_label, grow_labels, Label, split_label, - BiHemiLabel, read_labels_from_annot, write_labels_to_annot, - random_parcellation, morph_labels, labels_to_stc) -from .misc import parse_config, read_reject_parameters -from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels, - scale_source_space) -from .transforms import (read_trans, write_trans, - transform_surface_to, Transform) -from .proj import (read_proj, write_proj, compute_proj_epochs, - compute_proj_evoked, compute_proj_raw, sensitivity_map) -from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole -from .channels import (equalize_channels, rename_channels, find_layout, - read_vectorview_selection) -from .report import Report, open_report - -from .io import (read_epochs_fieldtrip, read_evoked_besa, - read_evoked_fieldtrip, read_evokeds_mff) -from .rank import compute_rank - -from . import beamformer -from . import channels -from . import chpi -from . import commands -from . import coreg -from . import cuda -from . import datasets -from . import dipole -from . import epochs -from . import event -from . import io -from . import filter -from . import gui -from . import inverse_sparse -from . import minimum_norm -from . import preprocessing -from . import simulation -from . import stats -from . import surface -from . import time_frequency -from . import viz -from . import decoding -from . import export +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) # initialize logging +from .utils import set_log_level, set_log_file + set_log_level(None, False) set_log_file() diff --git a/mne/__init__.pyi b/mne/__init__.pyi new file mode 100644 index 00000000000..d50b5209346 --- /dev/null +++ b/mne/__init__.pyi @@ -0,0 +1,430 @@ +__all__ = [ + "AcqParserFIF", + "Annotations", + "BaseEpochs", + "BiHemiLabel", + "Covariance", + "Dipole", + "DipoleFixed", + "Epochs", + "EpochsArray", + "Evoked", + "EvokedArray", + "Forward", + "Info", + "Label", + "MixedSourceEstimate", + "MixedVectorSourceEstimate", + "Projection", + "Report", + "SourceEstimate", + "SourceMorph", + "SourceSpaces", + "Transform", + "VectorSourceEstimate", + "VolSourceEstimate", + "VolVectorSourceEstimate", + "add_reference_channels", + "add_source_space_distances", + "annotations_from_events", + "apply_forward", + "apply_forward_raw", + "average_forward_solutions", + "beamformer", + "channel_indices_by_type", + "channel_type", + "channels", + "chpi", + "combine_evoked", + "commands", + "compute_covariance", + "compute_proj_epochs", + "compute_proj_evoked", + "compute_proj_raw", + "compute_rank", + "compute_raw_covariance", + "compute_source_morph", + "concatenate_epochs", + "concatenate_events", + "concatenate_raws", + "convert_forward_solution", + "coreg", + "count_annotations", + "count_events", + "create_default_subject", + "create_info", + "cuda", + "datasets", + "decimate_surface", + "decoding", + "defaults", + "dig_mri_distances", + "dipole", + "epochs", + "equalize_channels", + "event", + "events_from_annotations", + "export", + "extract_label_time_course", + "filter", + "find_events", + "find_layout", + "find_stim_steps", + "fit_dipole", + "forward", + "get_config", + "get_config_path", + "get_head_surf", + "get_meg_helmet_surf", + "get_montage_volume_labels", + "get_volume_labels_from_aseg", + "get_volume_labels_from_src", + "grade_to_tris", + "grade_to_vertices", + "grand_average", + "grow_labels", + "gui", + "head_to_mni", + "head_to_mri", + "inverse_sparse", + "io", + "label_sign_flip", + "labels_to_stc", + "make_ad_hoc_cov", + "make_bem_model", + "make_bem_solution", + "make_field_map", + "make_fixed_length_epochs", + "make_fixed_length_events", + "make_forward_dipole", + "make_forward_solution", + "make_sphere_model", + "match_channel_orders", + "merge_events", + "minimum_norm", + "morph_labels", + "morph_source_spaces", + "open_docs", + "open_report", + "parse_config", + "pick_channels", + "pick_channels_cov", + "pick_channels_forward", + "pick_channels_regexp", + "pick_events", + "pick_info", + "pick_types", + "pick_types_forward", + "preprocessing", + "random_parcellation", + "read_annotations", + "read_bem_solution", + "read_bem_surfaces", + "read_cov", + "read_dipole", + "read_epochs", + "read_epochs_eeglab", + "read_epochs_fieldtrip", + "read_epochs_kit", + "read_events", + "read_evoked_besa", + "read_evoked_fieldtrip", + "read_evokeds", + "read_evokeds_mff", + "read_forward_solution", + "read_freesurfer_lut", + "read_label", + "read_labels_from_annot", + "read_lta", + "read_morph_map", + "read_proj", + "read_reject_parameters", + "read_source_estimate", + "read_source_morph", + "read_source_spaces", + "read_surface", + "read_talxfm", + "read_trans", + "read_tri", + "read_vectorview_selection", + "rename_channels", + "report", + "scale_bem", + "scale_labels", + "scale_mri", + "scale_source_space", + "sensitivity_map", + "set_bipolar_reference", + "set_cache_dir", + "set_config", + "set_eeg_reference", + "set_log_file", + "set_log_level", + "set_memmap_min_size", + "setup_source_space", + "setup_volume_source_space", + "simulation", + "source_space", + "spatial_dist_adjacency", + "spatial_inter_hemi_adjacency", + "spatial_src_adjacency", + "spatial_tris_adjacency", + "spatio_temporal_dist_adjacency", + "spatio_temporal_src_adjacency", + "spatio_temporal_tris_adjacency", + "split_label", + "stats", + "stc_near_sensors", + "stc_to_label", + "surface", + "sys_info", + "time_frequency", + "transform_surface_to", + "use_coil_def", + "use_log_level", + "verbose", + "vertex_to_mni", + "viz", + "what", + "whiten_evoked", + "write_bem_solution", + "write_bem_surfaces", + "write_cov", + "write_events", + "write_evokeds", + "write_forward_solution", + "write_head_bem", + "write_label", + "write_labels_to_annot", + "write_proj", + "write_source_spaces", + "write_surface", + "write_trans", +] +from . import ( + beamformer, + channels, + chpi, + commands, + coreg, + cuda, + datasets, + decoding, + defaults, + dipole, + epochs, + event, + export, + filter, + forward, + gui, + inverse_sparse, + io, + minimum_norm, + preprocessing, + report, + simulation, + source_space, + stats, + surface, + time_frequency, + viz, +) +from ._fiff.meas_info import Info, create_info +from ._fiff.pick import ( + channel_indices_by_type, + channel_type, + pick_channels, + pick_channels_cov, + pick_channels_forward, + pick_channels_regexp, + pick_info, + pick_types, + pick_types_forward, +) +from ._fiff.proj import Projection +from ._fiff.reference import ( + add_reference_channels, + set_bipolar_reference, + set_eeg_reference, +) +from ._fiff.what import what +from ._freesurfer import ( + get_volume_labels_from_aseg, + head_to_mni, + head_to_mri, + read_freesurfer_lut, + read_lta, + read_talxfm, + vertex_to_mni, +) +from .annotations import ( + Annotations, + annotations_from_events, + count_annotations, + events_from_annotations, + read_annotations, +) +from .bem import ( + make_bem_model, + make_bem_solution, + make_sphere_model, + read_bem_solution, + read_bem_surfaces, + write_bem_solution, + write_bem_surfaces, + write_head_bem, +) +from .channels import ( + equalize_channels, + find_layout, + read_vectorview_selection, + rename_channels, +) +from .coreg import ( + create_default_subject, + scale_bem, + scale_labels, + scale_mri, + scale_source_space, +) +from .cov import ( + Covariance, + compute_covariance, + compute_raw_covariance, + make_ad_hoc_cov, + read_cov, + whiten_evoked, + write_cov, +) +from .dipole import Dipole, DipoleFixed, fit_dipole, read_dipole +from .epochs import ( + BaseEpochs, + Epochs, + EpochsArray, + concatenate_epochs, + make_fixed_length_epochs, + read_epochs, +) +from .event import ( + AcqParserFIF, + concatenate_events, + count_events, + find_events, + find_stim_steps, + make_fixed_length_events, + merge_events, + pick_events, + read_events, + write_events, +) +from .evoked import Evoked, EvokedArray, combine_evoked, read_evokeds, write_evokeds +from .forward import ( + Forward, + apply_forward, + apply_forward_raw, + average_forward_solutions, + convert_forward_solution, + make_field_map, + make_forward_dipole, + make_forward_solution, + read_forward_solution, + use_coil_def, + write_forward_solution, +) +from .io import ( + read_epochs_fieldtrip, + read_evoked_besa, + read_evoked_fieldtrip, + read_evokeds_mff, +) +from .io.base import concatenate_raws, match_channel_orders +from .io.eeglab import read_epochs_eeglab +from .io.kit import read_epochs_kit +from .label import ( + BiHemiLabel, + Label, + grow_labels, + label_sign_flip, + labels_to_stc, + morph_labels, + random_parcellation, + read_label, + read_labels_from_annot, + split_label, + stc_to_label, + write_label, + write_labels_to_annot, +) +from .misc import parse_config, read_reject_parameters +from .morph import ( + SourceMorph, + compute_source_morph, + grade_to_vertices, + read_source_morph, +) +from .morph_map import read_morph_map +from .proj import ( + compute_proj_epochs, + compute_proj_evoked, + compute_proj_raw, + read_proj, + sensitivity_map, + write_proj, +) +from .rank import compute_rank +from .report import Report, open_report +from .source_estimate import ( + MixedSourceEstimate, + MixedVectorSourceEstimate, + SourceEstimate, + VectorSourceEstimate, + VolSourceEstimate, + VolVectorSourceEstimate, + extract_label_time_course, + grade_to_tris, + read_source_estimate, + spatial_dist_adjacency, + spatial_inter_hemi_adjacency, + spatial_src_adjacency, + spatial_tris_adjacency, + spatio_temporal_dist_adjacency, + spatio_temporal_src_adjacency, + spatio_temporal_tris_adjacency, + stc_near_sensors, +) +from .source_space._source_space import ( + SourceSpaces, + add_source_space_distances, + get_volume_labels_from_src, + morph_source_spaces, + read_source_spaces, + setup_source_space, + setup_volume_source_space, + write_source_spaces, +) +from .surface import ( + decimate_surface, + dig_mri_distances, + get_head_surf, + get_meg_helmet_surf, + get_montage_volume_labels, + read_surface, + read_tri, + write_surface, +) +from .transforms import Transform, read_trans, transform_surface_to, write_trans +from .utils import ( + get_config, + get_config_path, + grand_average, + open_docs, + set_cache_dir, + set_config, + set_log_file, + set_log_level, + set_memmap_min_size, + sys_info, + use_log_level, + verbose, +) diff --git a/mne/__main__.py b/mne/__main__.py index 414754c1885..e0a0dc552d2 100644 --- a/mne/__main__.py +++ b/mne/__main__.py @@ -1,7 +1,8 @@ -# Authors: Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .commands.utils import main -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/mne/_edf/open.py b/mne/_edf/open.py new file mode 100644 index 00000000000..2fd97833b29 --- /dev/null +++ b/mne/_edf/open.py @@ -0,0 +1,23 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# Maybe we can move this one to utils or something like that. +from pathlib import Path + +from mne._fiff.open import _NoCloseRead + +from ..utils import _file_like, _validate_type, logger + + +def _gdf_edf_get_fid(fname, **kwargs): + """Open a EDF/BDF/GDF file with no additional parsing.""" + if _file_like(fname): + logger.debug("Using file-like I/O") + fid = _NoCloseRead(fname) + fid.seek(0) + else: + _validate_type(fname, [Path, str], "fname", extra="or file-like") + logger.debug("Using normal I/O") + fid = open(fname, "rb", **kwargs) # Open in binary mode + return fid diff --git a/mne/_fiff/__init__.py b/mne/_fiff/__init__.py new file mode 100644 index 00000000000..48a5871c58b --- /dev/null +++ b/mne/_fiff/__init__.py @@ -0,0 +1,8 @@ +"""Private module for FIF basic I/O routines.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# All imports should be done directly to submodules, so we don't import +# anything here or use lazy_loader. diff --git a/mne/_fiff/_digitization.py b/mne/_fiff/_digitization.py new file mode 100644 index 00000000000..c198d606dee --- /dev/null +++ b/mne/_fiff/_digitization.py @@ -0,0 +1,608 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import heapq +from collections import Counter + +import numpy as np + +from ..utils import Bunch, _check_fname, _validate_type, logger, verbose, warn +from .constants import FIFF, _coord_frame_named +from .tag import read_tag +from .tree import dir_tree_find +from .write import _safe_name_list, start_and_end_file, write_dig_points + +_dig_kind_dict = { + "cardinal": FIFF.FIFFV_POINT_CARDINAL, + "hpi": FIFF.FIFFV_POINT_HPI, + "eeg": FIFF.FIFFV_POINT_EEG, + "extra": FIFF.FIFFV_POINT_EXTRA, +} +_dig_kind_ints = tuple(sorted(_dig_kind_dict.values())) +_dig_kind_proper = { + "cardinal": "Cardinal", + "hpi": "HPI", + "eeg": "EEG", + "extra": "Extra", + "unknown": "Unknown", +} +_dig_kind_rev = {val: key for key, val in _dig_kind_dict.items()} +_cardinal_kind_rev = {1: "LPA", 2: "Nasion", 3: "RPA", 4: "Inion"} + + +def _format_dig_points(dig, enforce_order=False): + """Format the dig points nicely.""" + if enforce_order and dig is not None: + # reorder points based on type: + # Fiducials/HPI, EEG, extra (headshape) + fids_digpoints = [] + hpi_digpoints = [] + eeg_digpoints = [] + extra_digpoints = [] + head_digpoints = [] + + # use a heap to enforce order on FIDS, EEG, Extra + for idx, digpoint in enumerate(dig): + ident = digpoint["ident"] + kind = digpoint["kind"] + + # push onto heap based on 'ident' (for the order) for + # each of the possible DigPoint 'kind's + # keep track of 'idx' in case of any clashes in + # the 'ident' variable, which can occur when + # user passes in DigMontage + DigMontage + if kind == FIFF.FIFFV_POINT_CARDINAL: + heapq.heappush(fids_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_HPI: + heapq.heappush(hpi_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_EEG: + heapq.heappush(eeg_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_EXTRA: + heapq.heappush(extra_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_HEAD: + heapq.heappush(head_digpoints, (ident, idx, digpoint)) + + # now recreate dig based on sorted order + fids_digpoints.sort(), hpi_digpoints.sort() + eeg_digpoints.sort() + extra_digpoints.sort(), head_digpoints.sort() + new_dig = [] + for idx, d in enumerate( + fids_digpoints + + hpi_digpoints + + extra_digpoints + + eeg_digpoints + + head_digpoints + ): + new_dig.append(d[-1]) + dig = new_dig + + return [DigPoint(d) for d in dig] if dig is not None else dig + + +def _get_dig_eeg(dig): + return [d for d in dig if d["kind"] == FIFF.FIFFV_POINT_EEG] + + +def _count_points_by_type(dig): + """Get the number of points of each type.""" + occurrences = Counter([d["kind"] for d in dig]) + return dict( + fid=occurrences[FIFF.FIFFV_POINT_CARDINAL], + hpi=occurrences[FIFF.FIFFV_POINT_HPI], + eeg=occurrences[FIFF.FIFFV_POINT_EEG], + extra=occurrences[FIFF.FIFFV_POINT_EXTRA], + ) + + +_dig_keys = {"kind", "ident", "r", "coord_frame"} + + +class DigPoint(dict): + """Container for a digitization point. + + This is a simple subclass of the standard dict type designed to provide + a readable string representation. + + Parameters + ---------- + kind : int + The kind of channel, + e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. + r : array, shape (3,) + 3D position in m. and coord_frame. + ident : int + Number specifying the identity of the point. + e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, + or 42 if kind is ``FIFFV_POINT_EEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + """ + + def __repr__(self): # noqa: D105 + from ..transforms import _coord_frame_name + + if self["kind"] == FIFF.FIFFV_POINT_CARDINAL: + id_ = _cardinal_kind_rev.get(self["ident"], "Unknown cardinal") + else: + id_ = _dig_kind_proper[_dig_kind_rev.get(self["kind"], "unknown")] + id_ = f"{id_} #{self['ident']}" + id_ = id_.rjust(10) + cf = _coord_frame_name(self["coord_frame"]) + x, y, z = self["r"] + if "voxel" in cf: + pos = (f"({x:0.1f}, {y:0.1f}, {z:0.1f})").ljust(25) + else: + pos = (f"({x * 1e3:0.1f}, {y * 1e3:0.1f}, {z * 1e3:0.1f}) mm").ljust(25) + return f"" + + # speed up info copy by only deep copying the mutable item + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + return DigPoint( + kind=self["kind"], + r=self["r"].copy(), + ident=self["ident"], + coord_frame=self["coord_frame"], + ) + + def __eq__(self, other): # noqa: D105 + """Compare two DigPoints. + + Two digpoints are equal if they are the same kind, share the same + coordinate frame and position. + """ + my_keys = ["kind", "ident", "coord_frame"] + if set(self.keys()) != set(other.keys()): + return False + elif any(self[_] != other[_] for _ in my_keys): + return False + else: + return np.allclose(self["r"], other["r"]) + + +def _read_dig_fif(fid, meas_info, *, return_ch_names=False): + """Read digitizer data from a FIFF file.""" + isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK) + dig = None + ch_names = None + if len(isotrak) == 0: + logger.info("Isotrak not found") + elif len(isotrak) > 1: + warn("Multiple Isotrak found") + else: + isotrak = isotrak[0] + coord_frame = FIFF.FIFFV_COORD_HEAD + dig = [] + for k in range(isotrak["nent"]): + kind = isotrak["directory"][k].kind + pos = isotrak["directory"][k].pos + if kind == FIFF.FIFF_DIG_POINT: + tag = read_tag(fid, pos) + dig.append(tag.data) + elif kind == FIFF.FIFF_DIG_STRING: + tag = read_tag(fid, pos) + dig.extend(tag.data) + elif kind == FIFF.FIFF_MNE_COORD_FRAME: + tag = read_tag(fid, pos) + coord_frame = int(tag.data.item()) + coord_frame = _coord_frame_named.get(coord_frame, coord_frame) + elif kind == FIFF.FIFF_MNE_CH_NAME_LIST: + tag = read_tag(fid, pos) + ch_names = _safe_name_list(tag.data, "read", "ch_names") + for d in dig: + d["coord_frame"] = coord_frame + out = _format_dig_points(dig) + if return_ch_names: + out = (out, ch_names) + return out + + +@verbose +def write_dig( + fname, pts, coord_frame=None, *, ch_names=None, overwrite=False, verbose=None +): + """Write digitization data to a FIF file. + + Parameters + ---------- + fname : path-like + Destination file name. + pts : iterator of dict + Iterator through digitizer points. Each point is a dictionary with + the keys 'kind', 'ident' and 'r'. + coord_frame : int | str | None + If all the points have the same coordinate frame, specify the type + here. Can be None (default) if the points could have varying + coordinate frames. + ch_names : list of str | None + Channel names associated with the digitization points, if available. + + .. versionadded:: 1.9 + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + + .. versionadded:: 1.0 + """ + from ..transforms import _to_const + + fname = _check_fname(fname, overwrite=overwrite) + if coord_frame is not None: + coord_frame = _to_const(coord_frame) + pts_frames = {pt.get("coord_frame", coord_frame) for pt in pts} + bad_frames = pts_frames - {coord_frame} + if len(bad_frames) > 0: + raise ValueError( + "Points have coord_frame entries that are incompatible with " + f"coord_frame={coord_frame}: {tuple(bad_frames)}." + ) + _validate_type(ch_names, (None, list, tuple), "ch_names") + if ch_names is not None: + for ci, ch_name in enumerate(ch_names): + _validate_type(ch_name, str, f"ch_names[{ci}]") + + with start_and_end_file(fname) as fid: + write_dig_points( + fid, pts, block=True, coord_frame=coord_frame, ch_names=ch_names + ) + + +_cardinal_ident_mapping = { + FIFF.FIFFV_POINT_NASION: "nasion", + FIFF.FIFFV_POINT_LPA: "lpa", + FIFF.FIFFV_POINT_RPA: "rpa", +} + + +def _ensure_fiducials_head(dig): + # Ensure that there are all three fiducials in the head coord frame + fids = dict() + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: + name = _cardinal_ident_mapping.get(d["ident"], None) + if name is not None: + fids[name] = d + radius = None + mults = dict( + lpa=[-1, 0, 0], + rpa=[1, 0, 0], + nasion=[0, 1, 0], + ) + for ident, name in _cardinal_ident_mapping.items(): + if name not in fids: + if radius is None: + radius = [ + np.linalg.norm(d["r"]) + for d in dig + if d["coord_frame"] == FIFF.FIFFV_COORD_HEAD + and not np.isnan(d["r"]).any() + ] + if not radius: + return # can't complete, no head points + radius = np.mean(radius) + dig.append( + DigPoint( + kind=FIFF.FIFFV_POINT_CARDINAL, + ident=ident, + r=np.array(mults[name], float) * radius, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) + + +# XXXX: +# This does something really similar to _read_dig_montage_fif but: +# - does not check coord_frame +# - does not do any operation that implies assumptions with the names +def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): + """Obtain coordinate data from a Dig. + + Parameters + ---------- + dig : list of dicts + A container of DigPoints to be added to the info['dig']. + + Returns + ------- + ch_pos : dict + The container of all relevant channel positions inside dig. + """ + # Split up the dig points by category + hsp, hpi, elp = list(), list(), list() + fids, dig_ch_pos_location = dict(), list() + dig = [] if dig is None else dig + + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: + fids[_cardinal_ident_mapping[d["ident"]]] = d["r"] + elif d["kind"] == FIFF.FIFFV_POINT_HPI: + hpi.append(d["r"]) + elp.append(d["r"]) + elif d["kind"] == FIFF.FIFFV_POINT_EXTRA: + hsp.append(d["r"]) + elif d["kind"] == FIFF.FIFFV_POINT_EEG: + if d["ident"] != 0 or not exclude_ref_channel: + dig_ch_pos_location.append(d["r"]) + + dig_coord_frames = set([d["coord_frame"] for d in dig]) + if len(dig_coord_frames) == 0: + dig_coord_frames = set([FIFF.FIFFV_COORD_HEAD]) + if len(dig_coord_frames) != 1: + raise RuntimeError( + f"Only single coordinate frame in dig is supported, got {dig_coord_frames}" + ) + dig_ch_pos_location = np.array(dig_ch_pos_location) + dig_ch_pos_location.shape = (-1, 3) # empty will be (0, 3) + return Bunch( + nasion=fids.get("nasion", None), + lpa=fids.get("lpa", None), + rpa=fids.get("rpa", None), + hsp=np.array(hsp) if len(hsp) else None, + hpi=np.array(hpi) if len(hpi) else None, + elp=np.array(elp) if len(elp) else None, + dig_ch_pos_location=dig_ch_pos_location, + coord_frame=dig_coord_frames.pop(), + ) + + +def _get_fid_coords(dig, raise_error=True): + fid_coords = Bunch(nasion=None, lpa=None, rpa=None) + fid_coord_frames = dict() + + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: + key = _cardinal_ident_mapping[d["ident"]] + fid_coords[key] = d["r"] + fid_coord_frames[key] = d["coord_frame"] + + if len(fid_coord_frames) > 0 and raise_error: + if set(fid_coord_frames.keys()) != set(["nasion", "lpa", "rpa"]): + raise ValueError( + f"Some fiducial points are missing (got {fid_coord_frames.keys()})." + ) + + if len(set(fid_coord_frames.values())) > 1: + raise ValueError( + "All fiducial points must be in the same coordinate system " + f"(got {len(fid_coord_frames)})" + ) + + coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None + + return fid_coords, coord_frame + + +def _coord_frame_const(coord_frame): + from ..transforms import _str_to_frame + + if not isinstance(coord_frame, str) or coord_frame not in _str_to_frame: + raise ValueError( + f"coord_frame must be one of {sorted(_str_to_frame.keys())}, got " + f"{coord_frame}" + ) + return _str_to_frame[coord_frame] + + +def _make_dig_points( + nasion=None, + lpa=None, + rpa=None, + hpi=None, + extra_points=None, + dig_ch_pos=None, + *, + coord_frame="head", + add_missing_fiducials=False, +): + """Construct digitizer info for the info. + + Parameters + ---------- + nasion : array-like | numpy.ndarray, shape (3,) | None + Point designated as the nasion point. + lpa : array-like | numpy.ndarray, shape (3,) | None + Point designated as the left auricular point. + rpa : array-like | numpy.ndarray, shape (3,) | None + Point designated as the right auricular point. + hpi : array-like | numpy.ndarray, shape (n_points, 3) | None + Points designated as head position indicator points. + extra_points : array-like | numpy.ndarray, shape (n_points, 3) + Points designed as the headshape points. + dig_ch_pos : dict + Dict of EEG channel positions. + coord_frame : str + The coordinate frame of the points. Usually this is "unknown" + for native digitizer space. Defaults to "head". + add_missing_fiducials : bool + If True, add fiducials to the dig points if they are not present. + Requires that coord_frame='head' and that lpa, nasion, and rpa are all + None. + + Returns + ------- + dig : list of dicts + A container of DigPoints to be added to the info['dig']. + """ + coord_frame = _coord_frame_const(coord_frame) + + dig = [] + if lpa is not None: + lpa = np.asarray(lpa) + if lpa.shape != (3,): + raise ValueError(f"LPA should have the shape (3,) instead of {lpa.shape}") + dig.append( + { + "r": lpa, + "ident": FIFF.FIFFV_POINT_LPA, + "kind": FIFF.FIFFV_POINT_CARDINAL, + "coord_frame": coord_frame, + } + ) + if nasion is not None: + nasion = np.asarray(nasion) + if nasion.shape != (3,): + raise ValueError( + f"Nasion should have the shape (3,) instead of {nasion.shape}" + ) + dig.append( + { + "r": nasion, + "ident": FIFF.FIFFV_POINT_NASION, + "kind": FIFF.FIFFV_POINT_CARDINAL, + "coord_frame": coord_frame, + } + ) + if rpa is not None: + rpa = np.asarray(rpa) + if rpa.shape != (3,): + raise ValueError(f"RPA should have the shape (3,) instead of {rpa.shape}") + dig.append( + { + "r": rpa, + "ident": FIFF.FIFFV_POINT_RPA, + "kind": FIFF.FIFFV_POINT_CARDINAL, + "coord_frame": coord_frame, + } + ) + if hpi is not None: + hpi = np.asarray(hpi) + if hpi.ndim != 2 or hpi.shape[1] != 3: + raise ValueError( + f"HPI should have the shape (n_points, 3) instead of {hpi.shape}" + ) + for idx, point in enumerate(hpi): + dig.append( + { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": coord_frame, + } + ) + if extra_points is not None: + extra_points = np.asarray(extra_points) + if len(extra_points) and extra_points.shape[1] != 3: + raise ValueError( + "Points should have the shape (n_points, 3) instead of " + f"{extra_points.shape}" + ) + for idx, point in enumerate(extra_points): + dig.append( + { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_EXTRA, + "coord_frame": coord_frame, + } + ) + if dig_ch_pos is not None: + idents = [] + use_arange = False + for key, value in dig_ch_pos.items(): + _validate_type(key, str, "dig_ch_pos") + try: + idents.append(int(key[-3:])) + except ValueError: + use_arange = True + _validate_type(value, (np.ndarray, list, tuple), "dig_ch_pos") + value = np.array(value, dtype=float) + dig_ch_pos[key] = value + if value.shape != (3,): + raise RuntimeError( + "The position should be a 1D array of 3 floats. " + f"Provided shape {value.shape}." + ) + if use_arange: + idents = np.arange(1, len(dig_ch_pos) + 1) + for key, ident in zip(dig_ch_pos, idents): + dig.append( + { + "r": dig_ch_pos[key], + "ident": int(ident), + "kind": FIFF.FIFFV_POINT_EEG, + "coord_frame": coord_frame, + } + ) + if add_missing_fiducials: + assert coord_frame == FIFF.FIFFV_COORD_HEAD + # These being none is really an assumption that if you have one you + # should have all three. But we can relax this later if necessary. + assert lpa is None + assert rpa is None + assert nasion is None + _ensure_fiducials_head(dig) + + return _format_dig_points(dig) + + +def _call_make_dig_points(nasion, lpa, rpa, hpi, extra, convert=True): + from ..transforms import ( + Transform, + apply_trans, + get_ras_to_neuromag_trans, + ) + + if convert: + neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa) + nasion = apply_trans(neuromag_trans, nasion) + lpa = apply_trans(neuromag_trans, lpa) + rpa = apply_trans(neuromag_trans, rpa) + + if hpi is not None: + hpi = apply_trans(neuromag_trans, hpi) + + extra = apply_trans(neuromag_trans, extra).astype(np.float32) + else: + neuromag_trans = None + + ctf_head_t = Transform(fro="ctf_head", to="head", trans=neuromag_trans) + + info_dig = _make_dig_points( + nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=extra + ) + + return info_dig, ctf_head_t + + +############################################################################## +# From artemis123 (we have modified the function a bit) +def _artemis123_read_pos(nas, lpa, rpa, hpi, extra): + # move into MNE head coords + dig_points, _ = _call_make_dig_points(nas, lpa, rpa, hpi, extra) + return dig_points + + +############################################################################## +# From bti +def _make_bti_dig_points( + nasion, + lpa, + rpa, + hpi, + extra, + convert=False, + use_hpi=False, + bti_dev_t=False, + dev_ctf_t=False, +): + from ..transforms import ( + Transform, + combine_transforms, + invert_transform, + ) + + _hpi = hpi if use_hpi else None + info_dig, ctf_head_t = _call_make_dig_points(nasion, lpa, rpa, _hpi, extra, convert) + + if convert: + t = combine_transforms( + invert_transform(bti_dev_t), dev_ctf_t, "meg", "ctf_head" + ) + dev_head_t = combine_transforms(t, ctf_head_t, "meg", "head") + else: + dev_head_t = Transform("meg", "head", trans=None) + + return info_dig, dev_head_t, ctf_head_t # ctf_head_t should not be needed diff --git a/mne/_fiff/compensator.py b/mne/_fiff/compensator.py new file mode 100644 index 00000000000..914dc1bb82b --- /dev/null +++ b/mne/_fiff/compensator.py @@ -0,0 +1,168 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..utils import fill_doc +from .constants import FIFF + + +def get_current_comp(info): + """Get the current compensation in effect in the data.""" + comp = None + first_comp = -1 + for k, chan in enumerate(info["chs"]): + if chan["kind"] == FIFF.FIFFV_MEG_CH: + comp = int(chan["coil_type"]) >> 16 + if first_comp < 0: + first_comp = comp + elif comp != first_comp: + raise ValueError("Compensation is not set equally on all MEG channels") + return comp + + +def set_current_comp(info, comp): + """Set the current compensation in effect in the data.""" + comp_now = get_current_comp(info) + for k, chan in enumerate(info["chs"]): + if chan["kind"] == FIFF.FIFFV_MEG_CH: + rem = chan["coil_type"] - (comp_now << 16) + chan["coil_type"] = int(rem + (comp << 16)) + + +def _make_compensator(info, grade): + """Auxiliary function for make_compensator.""" + for k in range(len(info["comps"])): + if info["comps"][k]["kind"] == grade: + this_data = info["comps"][k]["data"] + + # Create the preselector + presel = np.zeros((this_data["ncol"], info["nchan"])) + for col, col_name in enumerate(this_data["col_names"]): + ind = [k for k, ch in enumerate(info["ch_names"]) if ch == col_name] + if len(ind) == 0: + raise ValueError(f"Channel {col_name} is not available in data") + elif len(ind) > 1: + raise ValueError(f"Ambiguous channel {col_name}") + presel[col, ind[0]] = 1.0 + + # Create the postselector (zero entries for channels not found) + postsel = np.zeros((info["nchan"], this_data["nrow"])) + for c, ch_name in enumerate(info["ch_names"]): + ind = [ + k for k, ch in enumerate(this_data["row_names"]) if ch == ch_name + ] + if len(ind) > 1: + raise ValueError(f"Ambiguous channel {ch_name}") + elif len(ind) == 1: + postsel[c, ind[0]] = 1.0 + # else, don't use it at all (postsel[c, ?] = 0.0) by allocation + this_comp = np.dot(postsel, np.dot(this_data["data"], presel)) + return this_comp + + raise ValueError(f"Desired compensation matrix (grade = {grade:d}) not found") + + +@fill_doc +def make_compensator(info, from_, to, exclude_comp_chs=False): + """Return compensation matrix eg. for CTF system. + + Create a compensation matrix to bring the data from one compensation + state to another. + + Parameters + ---------- + %(info_not_none)s + from_ : int + Compensation in the input data. + to : int + Desired compensation in the output. + exclude_comp_chs : bool + Exclude compensation channels from the output. + + Returns + ------- + comp : array | None. + The compensation matrix. Might be None if no compensation + is needed (from == to). + """ + if from_ == to: + return None + + # s_orig = s_from + C1*s_from = (I + C1)*s_from + # s_to = s_orig - C2*s_orig = (I - C2)*s_orig + # s_to = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from + if from_ != 0: + C1 = _make_compensator(info, from_) + comp_from_0 = np.linalg.inv(np.eye(info["nchan"]) - C1) + if to != 0: + C2 = _make_compensator(info, to) + comp_0_to = np.eye(info["nchan"]) - C2 + if from_ != 0: + if to != 0: + # This is mathematically equivalent, but has higher numerical + # error than using the inverse to always go to zero and back + # comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1) + comp = np.dot(comp_0_to, comp_from_0) + else: + comp = comp_from_0 + else: + # from == 0, to != 0 guaranteed here + comp = comp_0_to + + if exclude_comp_chs: + pick = [ + k for k, c in enumerate(info["chs"]) if c["kind"] != FIFF.FIFFV_REF_MEG_CH + ] + + if len(pick) == 0: + raise ValueError( + "Nothing remains after excluding the compensation channels" + ) + + comp = comp[pick, :] + + return comp + + +# @verbose +# def compensate_to(data, to, verbose=None): +# """ +# % +# % [newdata] = mne_compensate_to(data,to) +# % +# % Apply compensation to the data as desired +# % +# """ +# +# newdata = data.copy() +# now = get_current_comp(newdata['info']) +# +# # Are we there already? +# if now == to: +# logger.info('Data are already compensated as desired') +# +# # Make the compensator and apply it to all data sets +# comp = make_compensator(newdata['info'], now, to) +# for k in range(len(newdata['evoked'])): +# newdata['evoked'][k]['epochs'] = np.dot(comp, +# newdata['evoked'][k]['epochs']) +# +# # Update the compensation info in the channel descriptors +# newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to) +# return newdata + + +# def set_current_comp(chs, value): +# """Set the current compensation value in the channel info structures +# """ +# new_chs = chs +# +# lower_half = int('FFFF', 16) # hex2dec('FFFF') +# for k in range(len(chs)): +# if chs[k]['kind'] == FIFF.FIFFV_MEG_CH: +# coil_type = float(chs[k]['coil_type']) & lower_half +# new_chs[k]['coil_type'] = int(coil_type | (value << 16)) +# +# return new_chs diff --git a/mne/_fiff/constants.py b/mne/_fiff/constants.py new file mode 100644 index 00000000000..cf604db530c --- /dev/null +++ b/mne/_fiff/constants.py @@ -0,0 +1,1230 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ..utils._bunch import BunchConstNamed + +FIFF = BunchConstNamed() + +# +# FIFF version number in use +# +FIFF.FIFFC_MAJOR_VERSION = 1 +FIFF.FIFFC_MINOR_VERSION = 4 +FIFF.FIFFC_VERSION = FIFF.FIFFC_MAJOR_VERSION << 16 | FIFF.FIFFC_MINOR_VERSION + +# +# Blocks +# +FIFF.FIFFB_ROOT = 999 +FIFF.FIFFB_MEAS = 100 +FIFF.FIFFB_MEAS_INFO = 101 +FIFF.FIFFB_RAW_DATA = 102 +FIFF.FIFFB_PROCESSED_DATA = 103 +FIFF.FIFFB_EVOKED = 104 +FIFF.FIFFB_ASPECT = 105 +FIFF.FIFFB_SUBJECT = 106 +FIFF.FIFFB_ISOTRAK = 107 +FIFF.FIFFB_HPI_MEAS = 108 # HPI measurement +FIFF.FIFFB_HPI_RESULT = 109 # Result of a HPI fitting procedure +FIFF.FIFFB_HPI_COIL = 110 # Data acquired from one HPI coil +FIFF.FIFFB_PROJECT = 111 +FIFF.FIFFB_CONTINUOUS_DATA = 112 +FIFF.FIFFB_CH_INFO = 113 # Extra channel information +FIFF.FIFFB_VOID = 114 +FIFF.FIFFB_EVENTS = 115 +FIFF.FIFFB_INDEX = 116 +FIFF.FIFFB_DACQ_PARS = 117 +FIFF.FIFFB_REF = 118 +FIFF.FIFFB_IAS_RAW_DATA = 119 +FIFF.FIFFB_IAS_ASPECT = 120 +FIFF.FIFFB_HPI_SUBSYSTEM = 121 +# FIFF.FIFFB_PHANTOM_SUBSYSTEM = 122 +# FIFF.FIFFB_STATUS_SUBSYSTEM = 123 +FIFF.FIFFB_DEVICE = 124 +FIFF.FIFFB_HELIUM = 125 +FIFF.FIFFB_CHANNEL_INFO = 126 + +FIFF.FIFFB_SPHERE = 300 # Concentric sphere model related +FIFF.FIFFB_BEM = 310 # Boundary-element method +FIFF.FIFFB_BEM_SURF = 311 # Boundary-element method surfaces +FIFF.FIFFB_CONDUCTOR_MODEL = 312 # One conductor model definition +FIFF.FIFFB_PROJ = 313 +FIFF.FIFFB_PROJ_ITEM = 314 +FIFF.FIFFB_MRI = 200 +FIFF.FIFFB_MRI_SET = 201 +FIFF.FIFFB_MRI_SLICE = 202 +FIFF.FIFFB_MRI_SCENERY = 203 # These are for writing unrelated 'slices' +FIFF.FIFFB_MRI_SCENE = 204 # Which are actually 3D scenes... +FIFF.FIFFB_MRI_SEG = 205 # MRI segmentation data +FIFF.FIFFB_MRI_SEG_REGION = 206 # One MRI segmentation region +FIFF.FIFFB_PROCESSING_HISTORY = 900 +FIFF.FIFFB_PROCESSING_RECORD = 901 + +FIFF.FIFFB_DATA_CORRECTION = 500 +FIFF.FIFFB_CHANNEL_DECOUPLER = 501 +FIFF.FIFFB_SSS_INFO = 502 +FIFF.FIFFB_SSS_CAL = 503 +FIFF.FIFFB_SSS_ST_INFO = 504 +FIFF.FIFFB_SSS_BASES = 505 +FIFF.FIFFB_IAS = 510 +# +# Of general interest +# +FIFF.FIFF_FILE_ID = 100 +FIFF.FIFF_DIR_POINTER = 101 +FIFF.FIFF_BLOCK_ID = 103 +FIFF.FIFF_BLOCK_START = 104 +FIFF.FIFF_BLOCK_END = 105 +FIFF.FIFF_FREE_LIST = 106 +FIFF.FIFF_FREE_BLOCK = 107 +FIFF.FIFF_NOP = 108 +FIFF.FIFF_PARENT_FILE_ID = 109 +FIFF.FIFF_PARENT_BLOCK_ID = 110 +FIFF.FIFF_BLOCK_NAME = 111 +FIFF.FIFF_BLOCK_VERSION = 112 +FIFF.FIFF_CREATOR = 113 # Program that created the file (string) +FIFF.FIFF_MODIFIER = 114 # Program that modified the file (string) +FIFF.FIFF_REF_ROLE = 115 +FIFF.FIFF_REF_FILE_ID = 116 +FIFF.FIFF_REF_FILE_NUM = 117 +FIFF.FIFF_REF_FILE_NAME = 118 +# +# Megacq saves the parameters in these tags +# +FIFF.FIFF_DACQ_PARS = 150 +FIFF.FIFF_DACQ_STIM = 151 + +FIFF.FIFF_DEVICE_TYPE = 152 +FIFF.FIFF_DEVICE_MODEL = 153 +FIFF.FIFF_DEVICE_SERIAL = 154 +FIFF.FIFF_DEVICE_SITE = 155 + +FIFF.FIFF_HE_LEVEL_RAW = 156 +FIFF.FIFF_HELIUM_LEVEL = 157 +FIFF.FIFF_ORIG_FILE_GUID = 158 +FIFF.FIFF_UTC_OFFSET = 159 + +FIFF.FIFF_NCHAN = 200 +FIFF.FIFF_SFREQ = 201 +FIFF.FIFF_DATA_PACK = 202 +FIFF.FIFF_CH_INFO = 203 +FIFF.FIFF_MEAS_DATE = 204 +FIFF.FIFF_SUBJECT = 205 +FIFF.FIFF_COMMENT = 206 +FIFF.FIFF_NAVE = 207 +FIFF.FIFF_FIRST_SAMPLE = 208 # The first sample of an epoch +FIFF.FIFF_LAST_SAMPLE = 209 # The last sample of an epoch +FIFF.FIFF_ASPECT_KIND = 210 +FIFF.FIFF_REF_EVENT = 211 +FIFF.FIFF_EXPERIMENTER = 212 +FIFF.FIFF_DIG_POINT = 213 +FIFF.FIFF_CH_POS = 214 +FIFF.FIFF_HPI_SLOPES = 215 # HPI data +FIFF.FIFF_HPI_NCOIL = 216 +FIFF.FIFF_REQ_EVENT = 217 +FIFF.FIFF_REQ_LIMIT = 218 +FIFF.FIFF_LOWPASS = 219 +FIFF.FIFF_BAD_CHS = 220 +FIFF.FIFF_ARTEF_REMOVAL = 221 +FIFF.FIFF_COORD_TRANS = 222 +FIFF.FIFF_HIGHPASS = 223 +FIFF.FIFF_CH_CALS = 224 # This will not occur in new files +FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi +FIFF.FIFF_HPI_CORR_COEFF = 226 # HPI curve fit correlations +FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging +FIFF.FIFF_NO_SAMPLES = 228 # Number of samples in an epoch +FIFF.FIFF_FIRST_TIME = 229 # Time scale minimum + +FIFF.FIFF_SUBAVE_SIZE = 230 # Size of a subaverage +FIFF.FIFF_SUBAVE_FIRST = 231 # The first epoch # contained in the subaverage +FIFF.FIFF_NAME = 233 # Intended to be a short name. +FIFF.FIFF_DESCRIPTION = FIFF.FIFF_COMMENT # (Textual) Description of an object +FIFF.FIFF_DIG_STRING = 234 # String of digitized points +FIFF.FIFF_LINE_FREQ = 235 # Line frequency +FIFF.FIFF_GANTRY_ANGLE = 282 # Tilt angle of the gantry in degrees. + +# +# HPI fitting program tags +# +FIFF.FIFF_HPI_COIL_FREQ = 236 # HPI coil excitation frequency +FIFF.FIFF_HPI_COIL_MOMENTS = ( + 240 # Estimated moment vectors for the HPI coil magnetic dipoles +) +FIFF.FIFF_HPI_FIT_GOODNESS = 241 # Three floats indicating the goodness of fit +FIFF.FIFF_HPI_FIT_ACCEPT = 242 # Bitmask indicating acceptance (see below) +FIFF.FIFF_HPI_FIT_GOOD_LIMIT = 243 # Limit for the goodness-of-fit +FIFF.FIFF_HPI_FIT_DIST_LIMIT = 244 # Limit for the coil distance difference +FIFF.FIFF_HPI_COIL_NO = 245 # Coil number listed by HPI measurement +FIFF.FIFF_HPI_COILS_USED = ( + 246 # List of coils finally used when the transformation was computed +) +FIFF.FIFF_HPI_DIGITIZATION_ORDER = ( + 247 # Which Isotrak digitization point corresponds to each of the coils energized +) + + +# +# Tags used for storing channel info +# +FIFF.FIFF_CH_SCAN_NO = ( + 250 # Channel scan number. Corresponds to fiffChInfoRec.scanNo field +) +FIFF.FIFF_CH_LOGICAL_NO = ( + 251 # Channel logical number. Corresponds to fiffChInfoRec.logNo field +) +FIFF.FIFF_CH_KIND = 252 # Channel type. Corresponds to fiffChInfoRec.kind field" +FIFF.FIFF_CH_RANGE = ( + 253 # Conversion from recorded number to (possibly virtual) voltage at the output" +) +FIFF.FIFF_CH_CAL = 254 # Calibration coefficient from output voltage to some real units +FIFF.FIFF_CH_LOC = 255 # Channel loc +FIFF.FIFF_CH_UNIT = 256 # Unit of the data +FIFF.FIFF_CH_UNIT_MUL = 257 # Unit multiplier exponent +FIFF.FIFF_CH_DACQ_NAME = 258 # Name of the channel in the data acquisition system. Corresponds to fiffChInfoRec.name. +FIFF.FIFF_CH_COIL_TYPE = 350 # Coil type in coil_def.dat +FIFF.FIFF_CH_COORD_FRAME = 351 # Coordinate frame (integer) + +# +# Pointers +# +FIFF.FIFFV_NEXT_SEQ = 0 +FIFF.FIFFV_NEXT_NONE = -1 +# +# Channel types +# +FIFF.FIFFV_BIO_CH = 102 +FIFF.FIFFV_MEG_CH = 1 +FIFF.FIFFV_REF_MEG_CH = 301 +FIFF.FIFFV_EEG_CH = 2 +FIFF.FIFFV_MCG_CH = 201 +FIFF.FIFFV_STIM_CH = 3 +FIFF.FIFFV_EOG_CH = 202 +FIFF.FIFFV_EMG_CH = 302 +FIFF.FIFFV_ECG_CH = 402 +FIFF.FIFFV_MISC_CH = 502 +FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring +FIFF.FIFFV_SEEG_CH = 802 # stereotactic EEG +FIFF.FIFFV_DBS_CH = 803 # deep brain stimulation +FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only) +FIFF.FIFFV_ECOG_CH = 902 +FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only) +FIFF.FIFFV_EXCI_CH = 920 # flux excitation channel used to be a stimulus channel +FIFF.FIFFV_DIPOLE_WAVE = 1000 # Dipole time curve (xplotter/xfit) +FIFF.FIFFV_GOODNESS_FIT = 1001 # Goodness of fit (xplotter/xfit) +FIFF.FIFFV_FNIRS_CH = 1100 # Functional near-infrared spectroscopy +FIFF.FIFFV_TEMPERATURE_CH = 1200 # Functional near-infrared spectroscopy +FIFF.FIFFV_GALVANIC_CH = 1300 # Galvanic skin response +FIFF.FIFFV_EYETRACK_CH = 1400 # Eye-tracking + +_ch_kind_named = { + key: key + for key in ( + FIFF.FIFFV_BIO_CH, + FIFF.FIFFV_MEG_CH, + FIFF.FIFFV_REF_MEG_CH, + FIFF.FIFFV_EEG_CH, + FIFF.FIFFV_MCG_CH, + FIFF.FIFFV_STIM_CH, + FIFF.FIFFV_EOG_CH, + FIFF.FIFFV_EMG_CH, + FIFF.FIFFV_ECG_CH, + FIFF.FIFFV_MISC_CH, + FIFF.FIFFV_RESP_CH, + FIFF.FIFFV_SEEG_CH, + FIFF.FIFFV_DBS_CH, + FIFF.FIFFV_SYST_CH, + FIFF.FIFFV_ECOG_CH, + FIFF.FIFFV_IAS_CH, + FIFF.FIFFV_EXCI_CH, + FIFF.FIFFV_DIPOLE_WAVE, + FIFF.FIFFV_GOODNESS_FIT, + FIFF.FIFFV_FNIRS_CH, + FIFF.FIFFV_GALVANIC_CH, + FIFF.FIFFV_TEMPERATURE_CH, + FIFF.FIFFV_EYETRACK_CH, + ) +} + +# +# Quaternion channels for head position monitoring +# +FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion +FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation +FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation +FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation +FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation +FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation +FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation +FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi +FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi +FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi +# +# Coordinate frames +# +FIFF.FIFFV_COORD_UNKNOWN = 0 +FIFF.FIFFV_COORD_DEVICE = 1 +FIFF.FIFFV_COORD_ISOTRAK = 2 +FIFF.FIFFV_COORD_HPI = 3 +FIFF.FIFFV_COORD_HEAD = 4 +FIFF.FIFFV_COORD_MRI = 5 +FIFF.FIFFV_COORD_MRI_SLICE = 6 +FIFF.FIFFV_COORD_MRI_DISPLAY = 7 +FIFF.FIFFV_COORD_DICOM_DEVICE = 8 +FIFF.FIFFV_COORD_IMAGING_DEVICE = 9 +_coord_frame_named = { + key: key + for key in ( + FIFF.FIFFV_COORD_UNKNOWN, + FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_COORD_ISOTRAK, + FIFF.FIFFV_COORD_HPI, + FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_COORD_MRI, + # We never use these but could add at some point + # FIFF.FIFFV_COORD_MRI_SLICE, + # FIFF.FIFFV_COORD_MRI_DISPLAY, + # FIFF.FIFFV_COORD_DICOM_DEVICE, + # FIFF.FIFFV_COORD_IMAGING_DEVICE, + ) +} +# +# Needed for raw and evoked-response data +# +FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data +FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers +FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel +FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples + +# +# Info on subject +# +FIFF.FIFF_SUBJ_ID = 400 # Subject ID +FIFF.FIFF_SUBJ_FIRST_NAME = 401 # First name of the subject +FIFF.FIFF_SUBJ_MIDDLE_NAME = 402 # Middle name of the subject +FIFF.FIFF_SUBJ_LAST_NAME = 403 # Last name of the subject +FIFF.FIFF_SUBJ_BIRTH_DAY = 404 # Birthday of the subject +FIFF.FIFF_SUBJ_SEX = 405 # Sex of the subject +FIFF.FIFF_SUBJ_HAND = 406 # Handedness of the subject +FIFF.FIFF_SUBJ_WEIGHT = 407 # Weight of the subject in kg +FIFF.FIFF_SUBJ_HEIGHT = 408 # Height of the subject in m +FIFF.FIFF_SUBJ_COMMENT = 409 # Comment about the subject +FIFF.FIFF_SUBJ_HIS_ID = 410 # ID used in the Hospital Information System + +FIFF.FIFFV_SUBJ_HAND_RIGHT = 1 # Righthanded +FIFF.FIFFV_SUBJ_HAND_LEFT = 2 # Lefthanded +FIFF.FIFFV_SUBJ_HAND_AMBI = 3 # Ambidextrous + +FIFF.FIFFV_SUBJ_SEX_UNKNOWN = 0 # Unknown gender +FIFF.FIFFV_SUBJ_SEX_MALE = 1 # Male +FIFF.FIFFV_SUBJ_SEX_FEMALE = 2 # Female + +FIFF.FIFF_PROJ_ID = 500 +FIFF.FIFF_PROJ_NAME = 501 +FIFF.FIFF_PROJ_AIM = 502 +FIFF.FIFF_PROJ_PERSONS = 503 +FIFF.FIFF_PROJ_COMMENT = 504 + +FIFF.FIFF_EVENT_CHANNELS = 600 # Event channel numbers +FIFF.FIFF_EVENT_LIST = 601 # List of events (integers: +FIFF.FIFF_EVENT_CHANNEL = 602 # Event channel +FIFF.FIFF_EVENT_BITS = 603 # Event bits array + +# +# Tags used in saving SQUID characteristics etc. +# +FIFF.FIFF_SQUID_BIAS = 701 +FIFF.FIFF_SQUID_OFFSET = 702 +FIFF.FIFF_SQUID_GATE = 703 +# +# Aspect values used to save characteristic curves of SQUIDs. (mjk) +# +FIFF.FIFFV_ASPECT_IFII_LOW = 1100 +FIFF.FIFFV_ASPECT_IFII_HIGH = 1101 +FIFF.FIFFV_ASPECT_GATE = 1102 + +# +# Values for file references +# +FIFF.FIFFV_ROLE_PREV_FILE = 1 +FIFF.FIFFV_ROLE_NEXT_FILE = 2 + +# +# References +# +FIFF.FIFF_REF_PATH = 1101 + +# +# Different aspects of data +# +FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs +FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean +FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data +FIFF.FIFFV_ASPECT_SUBAVERAGE = 103 # Partial average (subaverage) +FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage +FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph +FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum +FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve + +# +# BEM surface IDs +# +FIFF.FIFFV_BEM_SURF_ID_UNKNOWN = -1 +FIFF.FIFFV_BEM_SURF_ID_NOT_KNOWN = 0 +FIFF.FIFFV_BEM_SURF_ID_BRAIN = 1 +FIFF.FIFFV_BEM_SURF_ID_CSF = 2 +FIFF.FIFFV_BEM_SURF_ID_SKULL = 3 +FIFF.FIFFV_BEM_SURF_ID_HEAD = 4 + +FIFF.FIFF_SPHERE_ORIGIN = 3001 +FIFF.FIFF_SPHERE_RADIUS = 3002 + +FIFF.FIFF_BEM_SURF_ID = 3101 # int surface number +FIFF.FIFF_BEM_SURF_NAME = 3102 # string surface name +FIFF.FIFF_BEM_SURF_NNODE = 3103 # int number of nodes on a surface +FIFF.FIFF_BEM_SURF_NTRI = 3104 # int number of triangles on a surface +FIFF.FIFF_BEM_SURF_NODES = 3105 # float surface nodes (nnode,3) +FIFF.FIFF_BEM_SURF_TRIANGLES = 3106 # int surface triangles (ntri,3) +FIFF.FIFF_BEM_SURF_NORMALS = 3107 # float surface node normal unit vectors + +FIFF.FIFF_BEM_POT_SOLUTION = 3110 # float ** The solution matrix +FIFF.FIFF_BEM_APPROX = 3111 # int approximation method, see below +FIFF.FIFF_BEM_COORD_FRAME = 3112 # The coordinate frame of the model +FIFF.FIFF_BEM_SIGMA = 3113 # Conductivity of a compartment +FIFF.FIFFV_BEM_APPROX_CONST = 1 # The constant potential approach +FIFF.FIFFV_BEM_APPROX_LINEAR = 2 # The linear potential approach + +# +# More of those defined in MNE +# +FIFF.FIFFV_MNE_SURF_UNKNOWN = -1 +FIFF.FIFFV_MNE_SURF_LEFT_HEMI = 101 +FIFF.FIFFV_MNE_SURF_RIGHT_HEMI = 102 +FIFF.FIFFV_MNE_SURF_MEG_HELMET = 201 # Use this irrespective of the system +# +# These relate to the Isotrak data (enum(point)) +# +FIFF.FIFFV_POINT_CARDINAL = 1 +FIFF.FIFFV_POINT_HPI = 2 +FIFF.FIFFV_POINT_EEG = 3 +FIFF.FIFFV_POINT_ECG = FIFF.FIFFV_POINT_EEG +FIFF.FIFFV_POINT_EXTRA = 4 +FIFF.FIFFV_POINT_HEAD = 5 # Point on the surface of the head +_dig_kind_named = { + key: key + for key in ( + FIFF.FIFFV_POINT_CARDINAL, + FIFF.FIFFV_POINT_HPI, + FIFF.FIFFV_POINT_EEG, + FIFF.FIFFV_POINT_EXTRA, + FIFF.FIFFV_POINT_HEAD, + ) +} +# +# Cardinal point types (enum(cardinal_point)) +# +FIFF.FIFFV_POINT_LPA = 1 +FIFF.FIFFV_POINT_NASION = 2 +FIFF.FIFFV_POINT_RPA = 3 +FIFF.FIFFV_POINT_INION = 4 +_dig_cardinal_named = { + key: key + for key in ( + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_RPA, + FIFF.FIFFV_POINT_INION, + ) +} +# +# SSP +# +FIFF.FIFF_PROJ_ITEM_KIND = 3411 +FIFF.FIFF_PROJ_ITEM_TIME = 3412 +FIFF.FIFF_PROJ_ITEM_NVEC = 3414 +FIFF.FIFF_PROJ_ITEM_VECTORS = 3415 +FIFF.FIFF_PROJ_ITEM_DEFINITION = 3416 +FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417 +# XPlotter +FIFF.FIFF_XPLOTTER_LAYOUT = 3501 # string - "Xplotter layout tag" +# +# MRIs +# +FIFF.FIFF_MRI_SOURCE_PATH = FIFF.FIFF_REF_PATH +FIFF.FIFF_MRI_SOURCE_FORMAT = 2002 +FIFF.FIFF_MRI_PIXEL_ENCODING = 2003 +FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004 +FIFF.FIFF_MRI_PIXEL_SCALE = 2005 +FIFF.FIFF_MRI_PIXEL_DATA = 2006 +FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007 +FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA = 2008 +FIFF.FIFF_MRI_BOUNDING_BOX = 2009 +FIFF.FIFF_MRI_WIDTH = 2010 +FIFF.FIFF_MRI_WIDTH_M = 2011 +FIFF.FIFF_MRI_HEIGHT = 2012 +FIFF.FIFF_MRI_HEIGHT_M = 2013 +FIFF.FIFF_MRI_DEPTH = 2014 +FIFF.FIFF_MRI_DEPTH_M = 2015 +FIFF.FIFF_MRI_THICKNESS = 2016 +FIFF.FIFF_MRI_SCENE_AIM = 2017 +FIFF.FIFF_MRI_ORIG_SOURCE_PATH = 2020 +FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT = 2021 +FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING = 2022 +FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023 +FIFF.FIFF_MRI_VOXEL_DATA = 2030 +FIFF.FIFF_MRI_VOXEL_ENCODING = 2031 +FIFF.FIFF_MRI_MRILAB_SETUP = 2100 +FIFF.FIFF_MRI_SEG_REGION_ID = 2200 +# +FIFF.FIFFV_MRI_PIXEL_UNKNOWN = 0 +FIFF.FIFFV_MRI_PIXEL_BYTE = 1 +FIFF.FIFFV_MRI_PIXEL_WORD = 2 +FIFF.FIFFV_MRI_PIXEL_SWAP_WORD = 3 +FIFF.FIFFV_MRI_PIXEL_FLOAT = 4 +FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5 +FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR = 6 +FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7 +FIFF.FIFFV_MRI_PIXEL_BIT_RLE = 8 +# +# These are the MNE fiff definitions (range 350-390 reserved for MNE) +# +FIFF.FIFFB_MNE = 350 +FIFF.FIFFB_MNE_SOURCE_SPACE = 351 +FIFF.FIFFB_MNE_FORWARD_SOLUTION = 352 +FIFF.FIFFB_MNE_PARENT_MRI_FILE = 353 +FIFF.FIFFB_MNE_PARENT_MEAS_FILE = 354 +FIFF.FIFFB_MNE_COV = 355 +FIFF.FIFFB_MNE_INVERSE_SOLUTION = 356 +FIFF.FIFFB_MNE_NAMED_MATRIX = 357 +FIFF.FIFFB_MNE_ENV = 358 +FIFF.FIFFB_MNE_BAD_CHANNELS = 359 +FIFF.FIFFB_MNE_VERTEX_MAP = 360 +FIFF.FIFFB_MNE_EVENTS = 361 +FIFF.FIFFB_MNE_MORPH_MAP = 362 +FIFF.FIFFB_MNE_SURFACE_MAP = 363 +FIFF.FIFFB_MNE_SURFACE_MAP_GROUP = 364 + +# +# CTF compensation data +# +FIFF.FIFFB_MNE_CTF_COMP = 370 +FIFF.FIFFB_MNE_CTF_COMP_DATA = 371 +FIFF.FIFFB_MNE_DERIVATIONS = 372 + +FIFF.FIFFB_MNE_EPOCHS = 373 +FIFF.FIFFB_MNE_ICA = 374 +# +# Fiff tags associated with MNE computations (3500...) +# +# +# 3500... Bookkeeping +# +FIFF.FIFF_MNE_ROW_NAMES = 3502 +FIFF.FIFF_MNE_COL_NAMES = 3503 +FIFF.FIFF_MNE_NROW = 3504 +FIFF.FIFF_MNE_NCOL = 3505 +FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults: +# FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI +# FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD +# FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD +FIFF.FIFF_MNE_CH_NAME_LIST = 3507 +FIFF.FIFF_MNE_FILE_NAME = ( + 3508 # This removes the collision with fiff_file.h (used to be 3501) +) +# +# 3510... 3590... Source space or surface +# +FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices +FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals +FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices +FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space +FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use +FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = ( + 3515 # Nearest source space vertex for all vertices +) +FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = ( + 3516 # Distance to the Nearest source space vertex for all vertices +) +FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier +FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume +FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES = 3519 # List of vertices (zero based) + +FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS = ( + 3596 # Voxel space dimensions in a volume source space +) +FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR = ( + 3597 # Matrix to interpolate a volume source space into a mri volume +) +FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE = 3598 # MRI file used in the interpolation + +FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles +FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation +FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = ( + 3592 # Number of triangles corresponding to the number of vertices in use +) +FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = ( + 3593 # The triangulation of the used vertices in the source space +) +FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS = 3594 # Number of neighbors for each source space point (used for volume source spaces) +FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS = ( + 3595 # Neighbors for each source space point (used for volume source spaces) +) + +FIFF.FIFF_MNE_SOURCE_SPACE_DIST = ( + 3599 # Distances between vertices in use (along the surface) +) +FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT = ( + 3600 # If distance is above this limit (in the volume) it has not been calculated +) + +FIFF.FIFF_MNE_SURFACE_MAP_DATA = 3610 # Surface map data +FIFF.FIFF_MNE_SURFACE_MAP_KIND = 3611 # Type of map + +# +# 3520... Forward solution +# +FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520 +FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free +FIFF.FIFF_MNE_INCLUDED_METHODS = 3522 +FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523 +# +# 3530... Covariance matrix +# +FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix +FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension +FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle) +FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix +FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above +FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535 +FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom +FIFF.FIFF_MNE_COV_METHOD = 3537 # The estimator used +FIFF.FIFF_MNE_COV_SCORE = 3538 # Negative log-likelihood + +# +# 3540... Inverse operator +# +# We store the inverse operator as the eigenleads, eigenfields, +# and weights +# +FIFF.FIFF_MNE_INVERSE_LEADS = 3540 # The eigenleads +FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED = ( + 3546 # The eigenleads (already weighted with R^0.5) +) +FIFF.FIFF_MNE_INVERSE_FIELDS = 3541 # The eigenfields +FIFF.FIFF_MNE_INVERSE_SING = 3542 # The singular values +FIFF.FIFF_MNE_PRIORS_USED = ( + 3543 # Which kind of priors have been used for the source covariance matrix +) +FIFF.FIFF_MNE_INVERSE_FULL = 3544 # Inverse operator as one matrix +# This matrix includes the whitening operator as well +# The regularization is applied +FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = ( + 3545 # Contains the orientation of one source per row +) +# The source orientations must be expressed in the coordinate system +# given by FIFF_MNE_COORD_FRAME +FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT = 3547 # Are the sources given in Am or Am/m^2 ? +# +# 3550... Saved environment info +# +FIFF.FIFF_MNE_ENV_WORKING_DIR = 3550 # Working directory where the file was created +FIFF.FIFF_MNE_ENV_COMMAND_LINE = 3551 # The command used to create the file +FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN = ( + 3552 # Reference to an external binary file (big-endian) */ +) +FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = ( + 3553 # Reference to an external binary file (little-endian) */ +) +# +# 3560... Miscellaneous +# +FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE = 3560 # Is this projection item active? +FIFF.FIFF_MNE_EVENT_LIST = 3561 # An event list (for STI101 / STI 014) +FIFF.FIFF_MNE_HEMI = 3562 # Hemisphere association for general purposes +FIFF.FIFF_MNE_DATA_SKIP_NOP = 3563 # A data skip turned off in the raw data +FIFF.FIFF_MNE_ORIG_CH_INFO = 3564 # Channel information before any changes +FIFF.FIFF_MNE_EVENT_TRIGGER_MASK = 3565 # Mask applied to the trigger channel values +FIFF.FIFF_MNE_EVENT_COMMENTS = 3566 # Event comments merged into one long string +FIFF.FIFF_MNE_CUSTOM_REF = 3567 # Whether a custom reference was applied to the data +FIFF.FIFF_MNE_BASELINE_MIN = 3568 # Time of baseline beginning +FIFF.FIFF_MNE_BASELINE_MAX = 3569 # Time of baseline end +# +# 3570... Morphing maps +# +FIFF.FIFF_MNE_MORPH_MAP = 3570 # Mapping of closest vertices on the sphere +FIFF.FIFF_MNE_MORPH_MAP_FROM = 3571 # Which subject is this map from +FIFF.FIFF_MNE_MORPH_MAP_TO = 3572 # Which subject is this map to +# +# 3580... CTF compensation data +# +FIFF.FIFF_MNE_CTF_COMP_KIND = 3580 # What kind of compensation +FIFF.FIFF_MNE_CTF_COMP_DATA = 3581 # The compensation data itself +FIFF.FIFF_MNE_CTF_COMP_CALIBRATED = 3582 # Are the coefficients calibrated? + +FIFF.FIFF_MNE_DERIVATION_DATA = ( + 3585 # Used to store information about EEG and other derivations +) +# +# 3601... values associated with ICA decomposition +# +FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS = 3601 # ICA interface parameters +FIFF.FIFF_MNE_ICA_CHANNEL_NAMES = 3602 # ICA channel names +FIFF.FIFF_MNE_ICA_WHITENER = 3603 # ICA whitener +FIFF.FIFF_MNE_ICA_PCA_COMPONENTS = 3604 # PCA components +FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605 # PCA explained variance +FIFF.FIFF_MNE_ICA_PCA_MEAN = 3606 # PCA mean +FIFF.FIFF_MNE_ICA_MATRIX = 3607 # ICA unmixing matrix +FIFF.FIFF_MNE_ICA_BADS = 3608 # ICA bad sources +FIFF.FIFF_MNE_ICA_MISC_PARAMS = 3609 # ICA misc params +# +# Miscellaneous +# +FIFF.FIFF_MNE_KIT_SYSTEM_ID = 3612 # Unique ID assigned to KIT systems +# +# Maxfilter tags +# +FIFF.FIFF_SSS_FRAME = 263 +FIFF.FIFF_SSS_JOB = 264 +FIFF.FIFF_SSS_ORIGIN = 265 +FIFF.FIFF_SSS_ORD_IN = 266 +FIFF.FIFF_SSS_ORD_OUT = 267 +FIFF.FIFF_SSS_NMAG = 268 +FIFF.FIFF_SSS_COMPONENTS = 269 +FIFF.FIFF_SSS_CAL_CHANS = 270 +FIFF.FIFF_SSS_CAL_CORRS = 271 +FIFF.FIFF_SSS_ST_CORR = 272 +FIFF.FIFF_SSS_NFREE = 278 +FIFF.FIFF_SSS_ST_LENGTH = 279 +FIFF.FIFF_DECOUPLER_MATRIX = 800 +# +# Fiff values associated with MNE computations +# +FIFF.FIFFV_MNE_UNKNOWN_ORI = 0 +FIFF.FIFFV_MNE_FIXED_ORI = 1 +FIFF.FIFFV_MNE_FREE_ORI = 2 + +FIFF.FIFFV_MNE_MEG = 1 +FIFF.FIFFV_MNE_EEG = 2 +FIFF.FIFFV_MNE_MEG_EEG = 3 + +FIFF.FIFFV_MNE_PRIORS_NONE = 0 +FIFF.FIFFV_MNE_PRIORS_DEPTH = 1 +FIFF.FIFFV_MNE_PRIORS_LORETA = 2 +FIFF.FIFFV_MNE_PRIORS_SULCI = 3 + +FIFF.FIFFV_MNE_UNKNOWN_COV = 0 +FIFF.FIFFV_MNE_SENSOR_COV = 1 +FIFF.FIFFV_MNE_NOISE_COV = 1 # This is what it should have been called +FIFF.FIFFV_MNE_SOURCE_COV = 2 +FIFF.FIFFV_MNE_FMRI_PRIOR_COV = 3 +FIFF.FIFFV_MNE_SIGNAL_COV = 4 # This will be potentially employed in beamformers +FIFF.FIFFV_MNE_DEPTH_PRIOR_COV = 5 # The depth weighting prior +FIFF.FIFFV_MNE_ORIENT_PRIOR_COV = 6 # The orientation prior + +# +# Output map types +# +FIFF.FIFFV_MNE_MAP_UNKNOWN = -1 # Unspecified +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT = 1 # Scalar current value +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE = 2 # Absolute value of the above +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT = 3 # Current vector components +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE = 4 # Vector current size +FIFF.FIFFV_MNE_MAP_T_STAT = 5 # Student's t statistic +FIFF.FIFFV_MNE_MAP_F_STAT = 6 # F statistic +FIFF.FIFFV_MNE_MAP_F_STAT_SQRT = 7 # Square root of the F statistic +FIFF.FIFFV_MNE_MAP_CHI2_STAT = 8 # (Approximate) chi^2 statistic +FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT = ( + 9 # Square root of the (approximate) chi^2 statistic +) +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE = 10 # Current noise approximation (scalar) +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE = 11 # Current noise approximation (vector) +# +# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE) +# +FIFF.FIFFV_MNE_SPACE_UNKNOWN = -1 +FIFF.FIFFV_MNE_SPACE_SURFACE = 1 +FIFF.FIFFV_MNE_SPACE_VOLUME = 2 +FIFF.FIFFV_MNE_SPACE_DISCRETE = 3 +# +# Covariance matrix channel classification +# +FIFF.FIFFV_MNE_COV_CH_UNKNOWN = -1 # No idea +FIFF.FIFFV_MNE_COV_CH_MEG_MAG = 0 # Axial gradiometer or magnetometer [T] +FIFF.FIFFV_MNE_COV_CH_MEG_GRAD = 1 # Planar gradiometer [T/m] +FIFF.FIFFV_MNE_COV_CH_EEG = 2 # EEG [V] +# +# Projection item kinds +# +FIFF.FIFFV_PROJ_ITEM_NONE = 0 +FIFF.FIFFV_PROJ_ITEM_FIELD = 1 +FIFF.FIFFV_PROJ_ITEM_DIP_FIX = 2 +FIFF.FIFFV_PROJ_ITEM_DIP_ROT = 3 +FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD = 4 +FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD = 5 +FIFF.FIFFV_PROJ_ITEM_EEG_AVREF = ( + 10 # Linear projection related to EEG average reference +) +FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF = ( + FIFF.FIFFV_PROJ_ITEM_EEG_AVREF +) # backward compat alias +# +# Custom EEG references +# +FIFF.FIFFV_MNE_CUSTOM_REF_OFF = 0 +FIFF.FIFFV_MNE_CUSTOM_REF_ON = 1 +FIFF.FIFFV_MNE_CUSTOM_REF_CSD = 2 +# +# SSS job options +# +FIFF.FIFFV_SSS_JOB_NOTHING = 0 # No SSS, just copy input to output +FIFF.FIFFV_SSS_JOB_CTC = 1 # No SSS, only cross-talk correction +FIFF.FIFFV_SSS_JOB_FILTER = 2 # Spatial maxwell filtering +FIFF.FIFFV_SSS_JOB_VIRT = 3 # Transform data to another sensor array +FIFF.FIFFV_SSS_JOB_HEAD_POS = 4 # Estimate head positions, no SSS +FIFF.FIFFV_SSS_JOB_MOVEC_FIT = 5 # Estimate and compensate head movement +FIFF.FIFFV_SSS_JOB_MOVEC_QUA = ( + 6 # Compensate head movement from previously estimated head positions +) +FIFF.FIFFV_SSS_JOB_REC_ALL = 7 # Reconstruct inside and outside signals +FIFF.FIFFV_SSS_JOB_REC_IN = 8 # Reconstruct inside signals +FIFF.FIFFV_SSS_JOB_REC_OUT = 9 # Reconstruct outside signals +FIFF.FIFFV_SSS_JOB_ST = 10 # Spatio-temporal maxwell filtering +FIFF.FIFFV_SSS_JOB_TPROJ = 11 # Temporal projection, no SSS +FIFF.FIFFV_SSS_JOB_XSSS = 12 # Cross-validation SSS +FIFF.FIFFV_SSS_JOB_XSUB = 13 # Cross-validation subtraction, no SSS +FIFF.FIFFV_SSS_JOB_XWAV = 14 # Cross-validation noise waveforms +FIFF.FIFFV_SSS_JOB_NCOV = 15 # Noise covariance estimation +FIFF.FIFFV_SSS_JOB_SCOV = 16 # SSS sample covariance estimation +# } + +# +# Additional coordinate frames +# +FIFF.FIFFV_MNE_COORD_TUFTS_EEG = 300 # For Tufts EEG data +FIFF.FIFFV_MNE_COORD_CTF_DEVICE = 1001 # CTF device coordinates +FIFF.FIFFV_MNE_COORD_CTF_HEAD = 1004 # CTF head coordinates +FIFF.FIFFV_MNE_COORD_DIGITIZER = ( + FIFF.FIFFV_COORD_ISOTRAK +) # Original (Polhemus) digitizer coordinates +FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI # The surface RAS coordinates +FIFF.FIFFV_MNE_COORD_MRI_VOXEL = 2001 # The MRI voxel coordinates +FIFF.FIFFV_MNE_COORD_RAS = 2002 # Surface RAS coordinates with non-zero origin +FIFF.FIFFV_MNE_COORD_MNI_TAL = 2003 # MNI Talairach coordinates +FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ = 2004 # FreeSurfer Talairach coordinates (MNI z > 0) +FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ = 2005 # FreeSurfer Talairach coordinates (MNI z < 0) +FIFF.FIFFV_MNE_COORD_FS_TAL = 2006 # FreeSurfer Talairach coordinates +# +# 4D and KIT use the same head coordinate system definition as CTF +# +FIFF.FIFFV_MNE_COORD_4D_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD +FIFF.FIFFV_MNE_COORD_KIT_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD +_coord_frame_named.update({ + key: key + for key in ( + FIFF.FIFFV_MNE_COORD_CTF_DEVICE, + FIFF.FIFFV_MNE_COORD_MRI_VOXEL, + FIFF.FIFFV_MNE_COORD_RAS, + FIFF.FIFFV_MNE_COORD_MNI_TAL, + FIFF.FIFFV_MNE_COORD_FS_TAL, + FIFF.FIFFV_MNE_COORD_KIT_HEAD, + ) +}) + +# +# FWD Types +# + +FWD = BunchConstNamed() + +FWD.COIL_UNKNOWN = 0 +FWD.COILC_UNKNOWN = 0 +FWD.COILC_EEG = 1000 +FWD.COILC_MAG = 1 +FWD.COILC_AXIAL_GRAD = 2 +FWD.COILC_PLANAR_GRAD = 3 +FWD.COILC_AXIAL_GRAD2 = 4 + +FWD.COIL_ACCURACY_POINT = 0 +FWD.COIL_ACCURACY_NORMAL = 1 +FWD.COIL_ACCURACY_ACCURATE = 2 + +FWD.BEM_IP_APPROACH_LIMIT = 0.1 + +FWD.BEM_LIN_FIELD_SIMPLE = 1 +FWD.BEM_LIN_FIELD_FERGUSON = 2 +FWD.BEM_LIN_FIELD_URANKAR = 3 + +# +# Data types +# +FIFF.FIFFT_VOID = 0 +FIFF.FIFFT_BYTE = 1 +FIFF.FIFFT_SHORT = 2 +FIFF.FIFFT_INT = 3 +FIFF.FIFFT_FLOAT = 4 +FIFF.FIFFT_DOUBLE = 5 +FIFF.FIFFT_JULIAN = 6 +FIFF.FIFFT_USHORT = 7 +FIFF.FIFFT_UINT = 8 +FIFF.FIFFT_ULONG = 9 +FIFF.FIFFT_STRING = 10 +FIFF.FIFFT_LONG = 11 +FIFF.FIFFT_DAU_PACK13 = 13 +FIFF.FIFFT_DAU_PACK14 = 14 +FIFF.FIFFT_DAU_PACK16 = 16 +FIFF.FIFFT_COMPLEX_FLOAT = 20 +FIFF.FIFFT_COMPLEX_DOUBLE = 21 +FIFF.FIFFT_OLD_PACK = 23 +FIFF.FIFFT_CH_INFO_STRUCT = 30 +FIFF.FIFFT_ID_STRUCT = 31 +FIFF.FIFFT_DIR_ENTRY_STRUCT = 32 +FIFF.FIFFT_DIG_POINT_STRUCT = 33 +FIFF.FIFFT_CH_POS_STRUCT = 34 +FIFF.FIFFT_COORD_TRANS_STRUCT = 35 +FIFF.FIFFT_DIG_STRING_STRUCT = 36 +FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37 +FIFF.FIFFT_MATRIX = 0x40000000 # 1073741824, 1 << 30 +FIFF.FIFFT_SPARSE_CCS_MATRIX = 0x00100000 # 1048576 +FIFF.FIFFT_SPARSE_RCS_MATRIX = 0x00200000 # 2097152 + +# +# Units of measurement +# +FIFF.FIFF_UNIT_NONE = -1 +# +# SI base units +# +FIFF.FIFF_UNIT_UNITLESS = 0 +FIFF.FIFF_UNIT_M = 1 # meter +FIFF.FIFF_UNIT_KG = 2 # kilogram +FIFF.FIFF_UNIT_SEC = 3 # second +FIFF.FIFF_UNIT_A = 4 # ampere +FIFF.FIFF_UNIT_K = 5 # Kelvin +FIFF.FIFF_UNIT_MOL = 6 # mole +# +# SI Supplementary units +# +FIFF.FIFF_UNIT_RAD = 7 # radian +FIFF.FIFF_UNIT_SR = 8 # steradian +# +# SI base candela +# +FIFF.FIFF_UNIT_CD = 9 # candela +# +# SI derived units +# +FIFF.FIFF_UNIT_MOL_M3 = 10 # mol/m^3 +FIFF.FIFF_UNIT_HZ = 101 # hertz +FIFF.FIFF_UNIT_N = 102 # Newton +FIFF.FIFF_UNIT_PA = 103 # pascal +FIFF.FIFF_UNIT_J = 104 # joule +FIFF.FIFF_UNIT_W = 105 # watt +FIFF.FIFF_UNIT_C = 106 # coulomb +FIFF.FIFF_UNIT_V = 107 # volt +FIFF.FIFF_UNIT_F = 108 # farad +FIFF.FIFF_UNIT_OHM = 109 # ohm +FIFF.FIFF_UNIT_S = 110 # Siemens (same as Moh, what fiff-constants calls it) +FIFF.FIFF_UNIT_WB = 111 # weber +FIFF.FIFF_UNIT_T = 112 # tesla +FIFF.FIFF_UNIT_H = 113 # Henry +FIFF.FIFF_UNIT_CEL = 114 # celsius +FIFF.FIFF_UNIT_LM = 115 # lumen +FIFF.FIFF_UNIT_LX = 116 # lux +FIFF.FIFF_UNIT_V_M2 = 117 # V/m^2 +# +# Others we need +# +FIFF.FIFF_UNIT_T_M = 201 # T/m +FIFF.FIFF_UNIT_AM = 202 # Am +FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2 +FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3 + +FIFF.FIFF_UNIT_PX = 210 # Pixel +_ch_unit_named = { + key: key + for key in ( + FIFF.FIFF_UNIT_NONE, + FIFF.FIFF_UNIT_UNITLESS, + FIFF.FIFF_UNIT_M, + FIFF.FIFF_UNIT_KG, + FIFF.FIFF_UNIT_SEC, + FIFF.FIFF_UNIT_A, + FIFF.FIFF_UNIT_K, + FIFF.FIFF_UNIT_MOL, + FIFF.FIFF_UNIT_RAD, + FIFF.FIFF_UNIT_SR, + FIFF.FIFF_UNIT_CD, + FIFF.FIFF_UNIT_MOL_M3, + FIFF.FIFF_UNIT_HZ, + FIFF.FIFF_UNIT_N, + FIFF.FIFF_UNIT_PA, + FIFF.FIFF_UNIT_J, + FIFF.FIFF_UNIT_W, + FIFF.FIFF_UNIT_C, + FIFF.FIFF_UNIT_V, + FIFF.FIFF_UNIT_F, + FIFF.FIFF_UNIT_OHM, + FIFF.FIFF_UNIT_S, + FIFF.FIFF_UNIT_WB, + FIFF.FIFF_UNIT_T, + FIFF.FIFF_UNIT_H, + FIFF.FIFF_UNIT_CEL, + FIFF.FIFF_UNIT_LM, + FIFF.FIFF_UNIT_LX, + FIFF.FIFF_UNIT_V_M2, + FIFF.FIFF_UNIT_T_M, + FIFF.FIFF_UNIT_AM, + FIFF.FIFF_UNIT_AM_M2, + FIFF.FIFF_UNIT_AM_M3, + FIFF.FIFF_UNIT_PX, + ) +} +# +# Multipliers +# +FIFF.FIFF_UNITM_E = 18 +FIFF.FIFF_UNITM_PET = 15 +FIFF.FIFF_UNITM_T = 12 +FIFF.FIFF_UNITM_GIG = 9 +FIFF.FIFF_UNITM_MEG = 6 +FIFF.FIFF_UNITM_K = 3 +FIFF.FIFF_UNITM_H = 2 +FIFF.FIFF_UNITM_DA = 1 +FIFF.FIFF_UNITM_NONE = 0 +FIFF.FIFF_UNITM_D = -1 +FIFF.FIFF_UNITM_C = -2 +FIFF.FIFF_UNITM_M = -3 +FIFF.FIFF_UNITM_MU = -6 +FIFF.FIFF_UNITM_N = -9 +FIFF.FIFF_UNITM_P = -12 +FIFF.FIFF_UNITM_F = -15 +FIFF.FIFF_UNITM_A = -18 +_ch_unit_mul_named = { + key: key + for key in ( + FIFF.FIFF_UNITM_E, + FIFF.FIFF_UNITM_PET, + FIFF.FIFF_UNITM_T, + FIFF.FIFF_UNITM_GIG, + FIFF.FIFF_UNITM_MEG, + FIFF.FIFF_UNITM_K, + FIFF.FIFF_UNITM_H, + FIFF.FIFF_UNITM_DA, + FIFF.FIFF_UNITM_NONE, + FIFF.FIFF_UNITM_D, + FIFF.FIFF_UNITM_C, + FIFF.FIFF_UNITM_M, + FIFF.FIFF_UNITM_MU, + FIFF.FIFF_UNITM_N, + FIFF.FIFF_UNITM_P, + FIFF.FIFF_UNITM_F, + FIFF.FIFF_UNITM_A, + ) +} + +# +# Coil types +# +FIFF.FIFFV_COIL_NONE = 0 # The location info contains no data +FIFF.FIFFV_COIL_EEG = 1 # EEG electrode position in r0 +FIFF.FIFFV_COIL_NM_122 = 2 # Neuromag 122 coils +FIFF.FIFFV_COIL_NM_24 = 3 # Old 24 channel system in HUT +FIFF.FIFFV_COIL_NM_MCG_AXIAL = 4 # The axial devices in the HUCS MCG system +FIFF.FIFFV_COIL_EEG_BIPOLAR = 5 # Bipolar EEG lead +FIFF.FIFFV_COIL_EEG_CSD = 6 # CSD-transformed EEG lead + +FIFF.FIFFV_COIL_DIPOLE = 200 # Time-varying dipole definition +# The coil info contains dipole location (r0) and +# direction (ex) +FIFF.FIFFV_COIL_FNIRS_HBO = 300 # fNIRS oxyhemoglobin +FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin +FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE = 302 # fNIRS continuous wave amplitude +FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density +FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE = 304 # fNIRS frequency domain AC amplitude +FIFF.FIFFV_COIL_FNIRS_FD_PHASE = 305 # fNIRS frequency domain phase +FIFF.FIFFV_COIL_FNIRS_RAW = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE # old alias +FIFF.FIFFV_COIL_FNIRS_TD_GATED_AMPLITUDE = 306 # fNIRS time-domain gated amplitude +FIFF.FIFFV_COIL_FNIRS_TD_MOMENTS_AMPLITUDE = 307 # fNIRS time-domain moments amplitude + +FIFF.FIFFV_COIL_EYETRACK_POS = 400 # Eye-tracking gaze position +FIFF.FIFFV_COIL_EYETRACK_PUPIL = 401 # Eye-tracking pupil size + +FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software + +FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000 # Simple point magnetometer +FIFF.FIFFV_COIL_AXIAL_GRAD_5CM = 2001 # Generic axial gradiometer + +FIFF.FIFFV_COIL_VV_PLANAR_W = 3011 # VV prototype wirewound planar sensor +FIFF.FIFFV_COIL_VV_PLANAR_T1 = 3012 # Vectorview SQ20483N planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T2 = 3013 # Vectorview SQ20483N-A planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T3 = 3014 # Vectorview SQ20950N planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T4 = 3015 # Vectorview planar gradiometer (MEG-MRI) +FIFF.FIFFV_COIL_VV_MAG_W = 3021 # VV prototype wirewound magnetometer +FIFF.FIFFV_COIL_VV_MAG_T1 = 3022 # Vectorview SQ20483N magnetometer +FIFF.FIFFV_COIL_VV_MAG_T2 = 3023 # Vectorview SQ20483-A magnetometer +FIFF.FIFFV_COIL_VV_MAG_T3 = 3024 # Vectorview SQ20950N magnetometer +FIFF.FIFFV_COIL_VV_MAG_T4 = 3025 # Vectorview magnetometer (MEG-MRI) + +FIFF.FIFFV_COIL_MAGNES_MAG = 4001 # Magnes WH magnetometer +FIFF.FIFFV_COIL_MAGNES_GRAD = 4002 # Magnes WH gradiometer +# +# Magnes reference sensors +# +FIFF.FIFFV_COIL_MAGNES_REF_MAG = 4003 +FIFF.FIFFV_COIL_MAGNES_REF_GRAD = 4004 +FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005 +FIFF.FIFFV_COIL_MAGNES_R_MAG = FIFF.FIFFV_COIL_MAGNES_REF_MAG +FIFF.FIFFV_COIL_MAGNES_R_GRAD = FIFF.FIFFV_COIL_MAGNES_REF_GRAD +FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF = FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD + +# +# CTF coil and channel types +# +FIFF.FIFFV_COIL_CTF_GRAD = 5001 +FIFF.FIFFV_COIL_CTF_REF_MAG = 5002 +FIFF.FIFFV_COIL_CTF_REF_GRAD = 5003 +FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004 +# +# KIT system coil types +# +FIFF.FIFFV_COIL_KIT_GRAD = 6001 +FIFF.FIFFV_COIL_KIT_REF_MAG = 6002 +# +# BabySQUID sensors +# +FIFF.FIFFV_COIL_BABY_GRAD = 7001 +# +# BabyMEG sensors +# +FIFF.FIFFV_COIL_BABY_MAG = 7002 +FIFF.FIFFV_COIL_BABY_REF_MAG = 7003 +FIFF.FIFFV_COIL_BABY_REF_MAG2 = 7004 +# +# Artemis123 sensors +# +FIFF.FIFFV_COIL_ARTEMIS123_GRAD = 7501 +FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG = 7502 +FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD = 7503 +# +# QuSpin sensors +# +FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG = 8001 +FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2 = 8002 +# +# FieldLine sensors +# +FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1 = 8101 +# +# Kernel sensors +# +FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1 = 8201 +# +# KRISS sensors +# +FIFF.FIFFV_COIL_KRISS_GRAD = 9001 +# +# Compumedics adult/pediatric gradiometer +# +FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD = 9101 +FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD = 9102 +_ch_coil_type_named = { + key: key + for key in ( + FIFF.FIFFV_COIL_NONE, + FIFF.FIFFV_COIL_EEG, + FIFF.FIFFV_COIL_NM_122, + FIFF.FIFFV_COIL_NM_24, + FIFF.FIFFV_COIL_NM_MCG_AXIAL, + FIFF.FIFFV_COIL_EEG_BIPOLAR, + FIFF.FIFFV_COIL_EEG_CSD, + FIFF.FIFFV_COIL_DIPOLE, + FIFF.FIFFV_COIL_FNIRS_HBO, + FIFF.FIFFV_COIL_FNIRS_HBR, + FIFF.FIFFV_COIL_FNIRS_RAW, + FIFF.FIFFV_COIL_FNIRS_OD, + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, + FIFF.FIFFV_COIL_FNIRS_FD_PHASE, + FIFF.FIFFV_COIL_FNIRS_TD_GATED_AMPLITUDE, + FIFF.FIFFV_COIL_FNIRS_TD_MOMENTS_AMPLITUDE, + FIFF.FIFFV_COIL_MCG_42, + FIFF.FIFFV_COIL_EYETRACK_POS, + FIFF.FIFFV_COIL_EYETRACK_PUPIL, + FIFF.FIFFV_COIL_POINT_MAGNETOMETER, + FIFF.FIFFV_COIL_AXIAL_GRAD_5CM, + FIFF.FIFFV_COIL_VV_PLANAR_W, + FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_VV_PLANAR_T2, + FIFF.FIFFV_COIL_VV_PLANAR_T3, + FIFF.FIFFV_COIL_VV_PLANAR_T4, + FIFF.FIFFV_COIL_VV_MAG_W, + FIFF.FIFFV_COIL_VV_MAG_T1, + FIFF.FIFFV_COIL_VV_MAG_T2, + FIFF.FIFFV_COIL_VV_MAG_T3, + FIFF.FIFFV_COIL_VV_MAG_T4, + FIFF.FIFFV_COIL_MAGNES_MAG, + FIFF.FIFFV_COIL_MAGNES_GRAD, + FIFF.FIFFV_COIL_MAGNES_REF_MAG, + FIFF.FIFFV_COIL_MAGNES_REF_GRAD, + FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD, + FIFF.FIFFV_COIL_CTF_GRAD, + FIFF.FIFFV_COIL_CTF_REF_MAG, + FIFF.FIFFV_COIL_CTF_REF_GRAD, + FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, + FIFF.FIFFV_COIL_KIT_GRAD, + FIFF.FIFFV_COIL_KIT_REF_MAG, + FIFF.FIFFV_COIL_BABY_GRAD, + FIFF.FIFFV_COIL_BABY_MAG, + FIFF.FIFFV_COIL_BABY_REF_MAG, + FIFF.FIFFV_COIL_BABY_REF_MAG2, + FIFF.FIFFV_COIL_ARTEMIS123_GRAD, + FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG, + FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD, + FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG, + FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, + FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1, + FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1, + FIFF.FIFFV_COIL_KRISS_GRAD, + FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD, + FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD, + ) +} + +# MNE RealTime +FIFF.FIFF_MNE_RT_COMMAND = 3700 # realtime command +FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client + +# MNE epochs bookkeeping +FIFF.FIFF_MNE_EPOCHS_SELECTION = 3800 # the epochs selection +FIFF.FIFF_MNE_EPOCHS_DROP_LOG = 3801 # the drop log +FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT = 3802 # rejection and flat params +FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ = 3803 # original raw sfreq + +# MNE annotations +FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block + +# MNE Metadata Dataframes +FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block + +# Table to match unrecognized channel location names to their known aliases +CHANNEL_LOC_ALIASES = { + # this set of aliases are published in doi:10.1097/WNP.0000000000000316 and + # doi:10.1016/S1388-2457(00)00527-7. + "Cb1": "POO7", + "Cb2": "POO8", + "CB1": "POO7", + "CB2": "POO8", + "T1": "T9", + "T2": "T10", + "T3": "T7", + "T4": "T8", + "T5": "T9", + "T6": "T10", + "M1": "TP9", + "M2": "TP10", + # EGI ref chan is named VREF/Vertex Ref. + # In the standard montages for EGI, the ref is named Cz + "VREF": "Cz", + "Vertex Reference": "Cz" + # add a comment here (with doi of a published source) above any new + # aliases, as they are added +} diff --git a/mne/_fiff/ctf_comp.py b/mne/_fiff/ctf_comp.py new file mode 100644 index 00000000000..87269badb8e --- /dev/null +++ b/mne/_fiff/ctf_comp.py @@ -0,0 +1,189 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from copy import deepcopy + +import numpy as np + +from ..utils import _pl, logger, verbose +from .constants import FIFF +from .matrix import _read_named_matrix, write_named_matrix +from .tag import read_tag +from .tree import dir_tree_find +from .write import end_block, start_block, write_int + + +def _add_kind(one): + """Convert CTF kind to MNE kind.""" + if one["ctfkind"] == int("47314252", 16): + one["kind"] = 1 + elif one["ctfkind"] == int("47324252", 16): + one["kind"] = 2 + elif one["ctfkind"] == int("47334252", 16): + one["kind"] = 3 + else: + one["kind"] = int(one["ctfkind"]) + + +def _calibrate_comp( + comp, chs, row_names, col_names, mult_keys=("range", "cal"), flip=False +): + """Get row and column cals.""" + ch_names = [c["ch_name"] for c in chs] + row_cals = np.zeros(len(row_names)) + col_cals = np.zeros(len(col_names)) + for names, cals, inv in zip( + (row_names, col_names), (row_cals, col_cals), (False, True) + ): + for ii in range(len(cals)): + p = ch_names.count(names[ii]) + if p != 1: + raise RuntimeError( + f"Channel {names[ii]} does not appear exactly once " + f"in data, found {p:d} instance{_pl(p)}" + ) + idx = ch_names.index(names[ii]) + val = chs[idx][mult_keys[0]] * chs[idx][mult_keys[1]] + val = float(1.0 / val) if inv else float(val) + val = 1.0 / val if flip else val + cals[ii] = val + comp["rowcals"] = row_cals + comp["colcals"] = col_cals + comp["data"]["data"] = row_cals[:, None] * comp["data"]["data"] * col_cals[None, :] + + +@verbose +def read_ctf_comp(fid, node, chs, verbose=None): + """Read the CTF software compensation data from the given node. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node in the FIF tree. + chs : list + The list of channels from info['chs'] to match with + compensators that are read. + %(verbose)s + + Returns + ------- + compdata : list + The compensation data + """ + return _read_ctf_comp(fid, node, chs, None) + + +def _read_ctf_comp(fid, node, chs, ch_names_mapping): + """Read the CTF software compensation data from the given node. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node in the FIF tree. + chs : list + The list of channels from info['chs'] to match with + compensators that are read. + ch_names_mapping : dict | None + The channel renaming to use. + %(verbose)s + + Returns + ------- + compdata : list + The compensation data + """ + from .meas_info import _rename_comps + + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + compdata = [] + comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA) + + for node in comps: + # Read the data we need + mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA) + for p in range(node["nent"]): + kind = node["directory"][p].kind + pos = node["directory"][p].pos + if kind == FIFF.FIFF_MNE_CTF_COMP_KIND: + tag = read_tag(fid, pos) + break + else: + raise Exception("Compensation type not found") + + # Get the compensation kind and map it to a simple number + one = dict(ctfkind=tag.data.item()) + del tag + _add_kind(one) + for p in range(node["nent"]): + kind = node["directory"][p].kind + pos = node["directory"][p].pos + if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED: + tag = read_tag(fid, pos) + calibrated = tag.data + break + else: + calibrated = False + + one["save_calibrated"] = bool(calibrated) + one["data"] = mat + _rename_comps([one], ch_names_mapping) + if not calibrated: + # Calibrate... + _calibrate_comp(one, chs, mat["row_names"], mat["col_names"]) + else: + one["rowcals"] = np.ones(mat["data"].shape[0], dtype=np.float64) + one["colcals"] = np.ones(mat["data"].shape[1], dtype=np.float64) + + compdata.append(one) + + if len(compdata) > 0: + logger.info(f" Read {len(compdata)} compensation matrices") + + return compdata + + +############################################################################### +# Writing + + +def write_ctf_comp(fid, comps): + """Write the CTF compensation data into a fif file. + + Parameters + ---------- + fid : file + The open FIF file descriptor + + comps : list + The compensation data to write + """ + if len(comps) <= 0: + return + + # This is very simple in fact + start_block(fid, FIFF.FIFFB_MNE_CTF_COMP) + for comp in comps: + start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) + # Write the compensation kind + write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp["ctfkind"]) + if comp.get("save_calibrated", False): + write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED, comp["save_calibrated"]) + + if not comp.get("save_calibrated", True): + # Undo calibration + comp = deepcopy(comp) + data = ( + (1.0 / comp["rowcals"][:, None]) + * comp["data"]["data"] + * (1.0 / comp["colcals"][None, :]) + ) + comp["data"]["data"] = data + write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp["data"]) + end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) + + end_block(fid, FIFF.FIFFB_MNE_CTF_COMP) diff --git a/mne/_fiff/matrix.py b/mne/_fiff/matrix.py new file mode 100644 index 00000000000..f27d86808f6 --- /dev/null +++ b/mne/_fiff/matrix.py @@ -0,0 +1,137 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ..utils import logger +from .constants import FIFF +from .tag import find_tag, has_tag +from .write import ( + end_block, + start_block, + write_float_matrix, + write_int, + write_name_list, +) + + +def _transpose_named_matrix(mat): + """Transpose mat inplace (no copy).""" + mat["nrow"], mat["ncol"] = mat["ncol"], mat["nrow"] + mat["row_names"], mat["col_names"] = mat["col_names"], mat["row_names"] + mat["data"] = mat["data"].T + + +def _read_named_matrix(fid, node, matkind, indent=" ", transpose=False): + """Read named matrix from the given node. + + Parameters + ---------- + fid : file + The opened file descriptor. + node : dict + The node in the tree. + matkind : int + The type of matrix. + transpose : bool + If True, transpose the matrix. Default is False. + %(verbose)s + + Returns + ------- + mat: dict + The matrix data + """ + # Descend one level if necessary + if node["block"] != FIFF.FIFFB_MNE_NAMED_MATRIX: + for k in range(node["nchild"]): + if node["children"][k]["block"] == FIFF.FIFFB_MNE_NAMED_MATRIX: + if has_tag(node["children"][k], matkind): + node = node["children"][k] + break + else: + logger.info( + f"{indent}Desired named matrix (kind = {matkind}) not available" + ) + return None + else: + if not has_tag(node, matkind): + logger.info( + f"{indent}Desired named matrix (kind = {matkind}) not available" + ) + return None + + # Read everything we need + tag = find_tag(fid, node, matkind) + if tag is None: + raise ValueError("Matrix data missing") + else: + data = tag.data + + nrow, ncol = data.shape + tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW) + if tag is not None and tag.data != nrow: + raise ValueError( + "Number of rows in matrix data and FIFF_MNE_NROW tag do not match" + ) + + tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL) + if tag is not None and tag.data != ncol: + raise ValueError( + "Number of columns in matrix data and FIFF_MNE_NCOL tag do not match" + ) + + tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES) + row_names = tag.data.split(":") if tag is not None else [] + + tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES) + col_names = tag.data.split(":") if tag is not None else [] + + mat = dict( + nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names, data=data + ) + if transpose: + _transpose_named_matrix(mat) + return mat + + +def write_named_matrix(fid, kind, mat): + """Write named matrix from the given node. + + Parameters + ---------- + fid : file + The opened file descriptor. + kind : int + The kind of the matrix. + matkind : int + The type of matrix. + """ + # let's save ourselves from disaster + n_tot = mat["nrow"] * mat["ncol"] + if mat["data"].size != n_tot: + ratio = n_tot / float(mat["data"].size) + if n_tot < mat["data"].size and ratio > 0: + ratio = 1 / ratio + raise ValueError( + f"Cannot write matrix: row ({mat['nrow']}) and column ({mat['ncol']}) " + f"total element ({n_tot}) mismatch with data size ({mat['data'].size}), " + f"appears to be off by a factor of {ratio:g}x" + ) + start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) + write_int(fid, FIFF.FIFF_MNE_NROW, mat["nrow"]) + write_int(fid, FIFF.FIFF_MNE_NCOL, mat["ncol"]) + + if len(mat["row_names"]) > 0: + # let's prevent unintentional stupidity + if len(mat["row_names"]) != mat["nrow"]: + raise ValueError('len(mat["row_names"]) != mat["nrow"]') + write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat["row_names"]) + + if len(mat["col_names"]) > 0: + # let's prevent unintentional stupidity + if len(mat["col_names"]) != mat["ncol"]: + raise ValueError('len(mat["col_names"]) != mat["ncol"]') + write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat["col_names"]) + + write_float_matrix(fid, kind, mat["data"]) + end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py new file mode 100644 index 00000000000..95a9f04e6f1 --- /dev/null +++ b/mne/_fiff/meas_info.py @@ -0,0 +1,3791 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import contextlib +import datetime +import operator +import re +import string +from collections import Counter, OrderedDict +from collections.abc import Mapping +from copy import deepcopy +from functools import partial +from io import BytesIO +from textwrap import shorten + +import numpy as np + +from ..defaults import _handle_default +from ..html_templates import _get_html_template +from ..utils import ( + _check_fname, + _check_on_missing, + _check_option, + _dt_to_stamp, + _on_missing, + _pl, + _stamp_to_dt, + _validate_type, + check_fname, + fill_doc, + logger, + object_diff, + repr_html, + verbose, + warn, +) +from ._digitization import ( + DigPoint, + _dig_kind_ints, + _dig_kind_proper, + _dig_kind_rev, + _format_dig_points, + _get_data_as_dict_from_dig, + _read_dig_fif, + write_dig, +) +from .compensator import get_current_comp +from .constants import FIFF, _ch_unit_mul_named +from .ctf_comp import _read_ctf_comp, write_ctf_comp +from .open import fiff_open +from .pick import ( + _DATA_CH_TYPES_SPLIT, + _contains_ch_type, + _picks_to_idx, + channel_type, + get_channel_type_constants, + pick_types, +) +from .proc_history import _read_proc_history, _write_proc_history +from .proj import ( + Projection, + _normalize_proj, + _proj_equal, + _read_proj, + _uniquify_projs, + _write_proj, +) +from .tag import ( + _ch_coord_dict, + _float_item, + _int_item, + _rename_list, + _update_ch_info_named, + find_tag, + read_tag, +) +from .tree import dir_tree_find +from .write import ( + DATE_NONE, + _safe_name_list, + end_block, + start_and_end_file, + start_block, + write_ch_info, + write_coord_trans, + write_dig_points, + write_float, + write_float_matrix, + write_id, + write_int, + write_julian, + write_name_list_sanitized, + write_string, +) + +b = bytes # alias + +_SCALAR_CH_KEYS = ( + "scanno", + "logno", + "kind", + "range", + "cal", + "coil_type", + "unit", + "unit_mul", + "coord_frame", +) +_ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ("loc", "ch_name")) +# XXX we need to require these except when doing simplify_info +_MIN_CH_KEYS_SET = set(("kind", "cal", "unit", "loc", "ch_name")) + + +def _get_valid_units(): + """Get valid units according to the International System of Units (SI). + + The International System of Units (SI, :footcite:`WikipediaSI`) is the + default system for describing units in the Brain Imaging Data Structure + (BIDS). For more information, see the BIDS specification + :footcite:`BIDSdocs` and the appendix "Units" therein. + + References + ---------- + .. footbibliography:: + """ + valid_prefix_names = [ + "yocto", + "zepto", + "atto", + "femto", + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "mega", + "giga", + "tera", + "peta", + "exa", + "zetta", + "yotta", + ] + valid_prefix_symbols = [ + "y", + "z", + "a", + "f", + "p", + "n", + "µ", + "m", + "c", + "d", + "da", + "h", + "k", + "M", + "G", + "T", + "P", + "E", + "Z", + "Y", + ] + valid_unit_names = [ + "metre", + "kilogram", + "second", + "ampere", + "kelvin", + "mole", + "candela", + "radian", + "steradian", + "hertz", + "newton", + "pascal", + "joule", + "watt", + "coulomb", + "volt", + "farad", + "ohm", + "siemens", + "weber", + "tesla", + "henry", + "degree Celsius", + "lumen", + "lux", + "becquerel", + "gray", + "sievert", + "katal", + ] + valid_unit_symbols = [ + "m", + "kg", + "s", + "A", + "K", + "mol", + "cd", + "rad", + "sr", + "Hz", + "N", + "Pa", + "J", + "W", + "C", + "V", + "F", + "Ω", + "S", + "Wb", + "T", + "H", + "°C", + "lm", + "lx", + "Bq", + "Gy", + "Sv", + "kat", + ] + + # Valid units are all possible combinations of either prefix name or prefix + # symbol together with either unit name or unit symbol. E.g., nV for + # nanovolt + valid_units = [] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_names + for unit in valid_unit_names + ] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_names + for unit in valid_unit_symbols + ] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_symbols + for unit in valid_unit_names + ] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_symbols + for unit in valid_unit_symbols + ] + + # units are also valid without a prefix + valid_units += valid_unit_names + valid_units += valid_unit_symbols + + # we also accept "n/a" as a unit, which is the default missing value in + # BIDS + valid_units += ["n/a"] + + return tuple(valid_units) + + +@verbose +def _unique_channel_names(ch_names, max_length=None, verbose=None): + """Ensure unique channel names.""" + suffixes = tuple(string.ascii_lowercase) + if max_length is not None: + ch_names[:] = [name[:max_length] for name in ch_names] + unique_ids = np.unique(ch_names, return_index=True)[1] + if len(unique_ids) != len(ch_names): + dups = {ch_names[x] for x in np.setdiff1d(range(len(ch_names)), unique_ids)} + warn( + "Channel names are not unique, found duplicates for: " + f"{dups}. Applying running numbers for duplicates." + ) + for ch_stem in dups: + overlaps = np.where(np.array(ch_names) == ch_stem)[0] + # We need an extra character since we append '-'. + # np.ceil(...) is the maximum number of appended digits. + if max_length is not None: + n_keep = max_length - 1 - int(np.ceil(np.log10(len(overlaps)))) + else: + n_keep = np.inf + n_keep = min(len(ch_stem), n_keep) + ch_stem = ch_stem[:n_keep] + for idx, ch_idx in enumerate(overlaps): + # try idx first, then loop through lower case chars + for suffix in (idx,) + suffixes: + ch_name = ch_stem + f"-{suffix}" + if ch_name not in ch_names: + break + if ch_name not in ch_names: + ch_names[ch_idx] = ch_name + else: + raise ValueError( + "Adding a single alphanumeric for a " + "duplicate resulted in another " + f"duplicate name {ch_name}" + ) + return ch_names + + +# %% Mixin classes + + +class MontageMixin: + """Mixin for Montage getting and setting.""" + + @fill_doc + def get_montage(self): + """Get a DigMontage from instance. + + Returns + ------- + montage : None | DigMontage + A copy of the channel positions, if available, otherwise ``None``. + """ + from ..channels.montage import make_dig_montage + from ..transforms import _frame_to_str + + info = self if isinstance(self, Info) else self.info + if info["dig"] is None: + return None + # obtain coord_frame, and landmark coords + # (nasion, lpa, rpa, hsp, hpi) from DigPoints + montage_bunch = _get_data_as_dict_from_dig(info["dig"]) + coord_frame = _frame_to_str.get(montage_bunch.coord_frame) + + # get the channel names and chs data structure + ch_names, chs = info["ch_names"], info["chs"] + picks = pick_types( + info, + meg=False, + eeg=True, + seeg=True, + ecog=True, + dbs=True, + fnirs=True, + exclude=[], + ) + + # channel positions from dig do not match ch_names one to one, + # so use loc[:3] instead + ch_pos = {ch_names[ii]: chs[ii]["loc"][:3] for ii in picks} + + # fNIRS uses multiple channels for the same sensors, we use + # a private function to format these for dig montage. + fnirs_picks = pick_types(info, fnirs=True, exclude=[]) + if len(ch_pos) == len(fnirs_picks): + ch_pos = _get_fnirs_ch_pos(info) + elif len(fnirs_picks) > 0: + raise ValueError( + "MNE does not support getting the montage " + "for a mix of fNIRS and other data types. " + "Please raise a GitHub issue if you " + "require this feature." + ) + + # create montage + montage = make_dig_montage( + ch_pos=ch_pos, + coord_frame=coord_frame, + nasion=montage_bunch.nasion, + lpa=montage_bunch.lpa, + rpa=montage_bunch.rpa, + hsp=montage_bunch.hsp, + hpi=montage_bunch.hpi, + ) + return montage + + @verbose + def set_montage( + self, + montage, + match_case=True, + match_alias=False, + on_missing="raise", + verbose=None, + ): + """Set %(montage_types)s channel positions and digitization points. + + Parameters + ---------- + %(montage)s + %(match_case)s + %(match_alias)s + %(on_missing_montage)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance, modified in-place. + + See Also + -------- + mne.channels.make_standard_montage + mne.channels.make_dig_montage + mne.channels.read_custom_montage + + Notes + ----- + .. warning:: + Only %(montage_types)s channels can have their positions set using + a montage. Other channel types (e.g., MEG channels) should have + their positions defined properly using their data reading + functions. + .. warning:: + Applying a montage will only set locations of channels that exist + at the time it is applied. This means when + :ref:`re-referencing ` + make sure to apply the montage only after calling + :func:`mne.add_reference_channels` + """ + # How to set up a montage to old named fif file (walk through example) + # https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df + + from ..channels.montage import _set_montage + + info = self if isinstance(self, Info) else self.info + _set_montage(info, montage, match_case, match_alias, on_missing) + return self + + +channel_type_constants = get_channel_type_constants(include_defaults=True) +_human2fiff = { + k: v.get("kind", FIFF.FIFFV_COIL_NONE) for k, v in channel_type_constants.items() +} +_human2unit = { + k: v.get("unit", FIFF.FIFF_UNIT_NONE) for k, v in channel_type_constants.items() +} +_unit2human = { + FIFF.FIFF_UNIT_V: "V", + FIFF.FIFF_UNIT_T: "T", + FIFF.FIFF_UNIT_T_M: "T/m", + FIFF.FIFF_UNIT_MOL: "M", + FIFF.FIFF_UNIT_NONE: "NA", + FIFF.FIFF_UNIT_CEL: "C", + FIFF.FIFF_UNIT_S: "S", + FIFF.FIFF_UNIT_PX: "px", +} + + +def _check_set(ch, projs, ch_type): + """Ensure type change is compatible with projectors.""" + new_kind = _human2fiff[ch_type] + if ch["kind"] != new_kind: + for proj in projs: + if ch["ch_name"] in proj["data"]["col_names"]: + raise RuntimeError( + f"Cannot change channel type for channel {ch['ch_name']} in " + f'projector "{proj["desc"]}"' + ) + ch["kind"] = new_kind + + +class SetChannelsMixin(MontageMixin): + """Mixin class for Raw, Evoked, Epochs.""" + + def _get_channel_positions(self, picks=None): + """Get channel locations from info. + + Parameters + ---------- + picks : str | list | slice | None + None gets good data indices. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + info = self if isinstance(self, Info) else self.info + picks = _picks_to_idx(info, picks) + chs = info["chs"] + pos = np.array([chs[k]["loc"][:3] for k in picks]) + n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0) + if n_zero > 1: # XXX some systems have origin (0, 0, 0) + raise ValueError( + f"Could not extract channel positions for {n_zero} channels" + ) + return pos + + def _set_channel_positions(self, pos, names): + """Update channel locations in info. + + Parameters + ---------- + pos : array-like | np.ndarray, shape (n_points, 3) + The channel positions to be set. + names : list of str + The names of the channels to be set. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + info = self if isinstance(self, Info) else self.info + if len(pos) != len(names): + raise ValueError( + "Number of channel positions not equal to the number of names given." + ) + pos = np.asarray(pos, dtype=np.float64) + if pos.shape[-1] != 3 or pos.ndim != 2: + msg = ( + f"Channel positions must have the shape (n_points, 3) not {pos.shape}." + ) + raise ValueError(msg) + for name, p in zip(names, pos): + if name in self.ch_names: + idx = self.ch_names.index(name) + info["chs"][idx]["loc"][:3] = p + else: + msg = f"{name} was not found in the info. Cannot be updated." + raise ValueError(msg) + + @verbose + def set_channel_types(self, mapping, *, on_unit_change="warn", verbose=None): + """Specify the sensor types of channels. + + Parameters + ---------- + mapping : dict + A dictionary mapping channel names to sensor types, e.g., + ``{'EEG061': 'eog'}``. + on_unit_change : ``'raise'`` | ``'warn'`` | ``'ignore'`` + What to do if the measurement unit of a channel is changed + automatically to match the new sensor type. + + .. versionadded:: 1.4 + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance (modified in place). + + .. versionchanged:: 0.20 + Return the instance. + + Notes + ----- + The following :term:`sensor types` are accepted: + + bio, chpi, csd, dbs, dipole, ecg, ecog, eeg, emg, eog, exci, + eyegaze, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, fnirs_fd_phase, + fnirs_od, gof, gsr, hbo, hbr, ias, misc, pupil, ref_meg, resp, + seeg, stim, syst, temperature. + + When working with eye-tracking data, see + :func:`mne.preprocessing.eyetracking.set_channel_types_eyetrack`. + + .. versionadded:: 0.9.0 + """ + info = self if isinstance(self, Info) else self.info + ch_names = info["ch_names"] + + # first check and assemble clean mappings of index and name + unit_changes = dict() + for ch_name, ch_type in mapping.items(): + if ch_name not in ch_names: + raise ValueError( + f"This channel name ({ch_name}) doesn't exist in info." + ) + + c_ind = ch_names.index(ch_name) + if ch_type not in _human2fiff: + raise ValueError( + f"This function cannot change to this channel type: {ch_type}. " + "Accepted channel types are " + f"{', '.join(sorted(_human2unit.keys()))}." + ) + # Set sensor type + _check_set(info["chs"][c_ind], info["projs"], ch_type) + unit_old = info["chs"][c_ind]["unit"] + unit_new = _human2unit[ch_type] + if unit_old not in _unit2human: + raise ValueError( + f"Channel '{ch_name}' has unknown unit ({unit_old}). Please fix the" + " measurement info of your data." + ) + if unit_old != _human2unit[ch_type]: + this_change = (_unit2human[unit_old], _unit2human[unit_new]) + if this_change not in unit_changes: + unit_changes[this_change] = list() + unit_changes[this_change].append(ch_name) + # reset unit multiplication factor since the unit has now changed + info["chs"][c_ind]["unit_mul"] = _ch_unit_mul_named[0] + info["chs"][c_ind]["unit"] = _human2unit[ch_type] + if ch_type in ["eeg", "seeg", "ecog", "dbs"]: + coil_type = FIFF.FIFFV_COIL_EEG + elif ch_type == "hbo": + coil_type = FIFF.FIFFV_COIL_FNIRS_HBO + elif ch_type == "hbr": + coil_type = FIFF.FIFFV_COIL_FNIRS_HBR + elif ch_type == "fnirs_cw_amplitude": + coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE + elif ch_type == "fnirs_fd_ac_amplitude": + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE + elif ch_type == "fnirs_fd_phase": + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE + elif ch_type == "fnirs_od": + coil_type = FIFF.FIFFV_COIL_FNIRS_OD + elif ch_type == "eyetrack_pos": + coil_type = FIFF.FIFFV_COIL_EYETRACK_POS + elif ch_type == "eyetrack_pupil": + coil_type = FIFF.FIFFV_COIL_EYETRACK_PUPIL + else: + coil_type = FIFF.FIFFV_COIL_NONE + info["chs"][c_ind]["coil_type"] = coil_type + + msg = "The unit for channel(s) {0} has changed from {1} to {2}." + for this_change, names in unit_changes.items(): + _on_missing( + on_missing=on_unit_change, + msg=msg.format(", ".join(sorted(names)), *this_change), + name="on_unit_change", + ) + + return self + + @verbose + def rename_channels( + self, mapping, allow_duplicates=False, *, on_missing="raise", verbose=None + ): + """Rename channels. + + Parameters + ---------- + %(mapping_rename_channels_duplicates)s + %(on_missing_ch_names)s + + .. versionadded:: 1.11.0 + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance (modified in place). + + .. versionchanged:: 0.20 + Return the instance. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + from ..channels.channels import rename_channels + from ..io import BaseRaw + + info = self if isinstance(self, Info) else self.info + + ch_names_orig = list(info["ch_names"]) + rename_channels(info, mapping, allow_duplicates, on_missing=on_missing) + + # Update self._orig_units for Raw + if isinstance(self, BaseRaw): + # whatever mapping was provided, now we can just use a dict + mapping = dict(zip(ch_names_orig, info["ch_names"])) + for old_name, new_name in mapping.items(): + if old_name in self._orig_units: + self._orig_units[new_name] = self._orig_units.pop(old_name) + ch_names = self.annotations.ch_names + for ci, ch in enumerate(ch_names): + ch_names[ci] = tuple(mapping.get(name, name) for name in ch) + + return self + + @verbose + def plot_sensors( + self, + kind="topomap", + ch_type=None, + title=None, + show_names=False, + ch_groups=None, + to_sphere=True, + axes=None, + block=False, + show=True, + sphere=None, + *, + verbose=None, + ): + """Plot sensor positions. + + Parameters + ---------- + kind : str + Whether to plot the sensors as 3d, topomap or as an interactive + sensor selection dialog. Available options 'topomap', '3d', + 'select'. If 'select', a set of channels can be selected + interactively by using lasso selector or clicking while holding + control key. The selected channels are returned along with the + figure instance. Defaults to 'topomap'. + ch_type : None | str + The channel type to plot. Available options ``'mag'``, ``'grad'``, + ``'eeg'``, ``'seeg'``, ``'dbs'``, ``'ecog'``, ``'all'``. If ``'all'``, all + the available mag, grad, eeg, seeg, dbs, and ecog channels are plotted. If + None (default), then channels are chosen in the order given above. + title : str | None + Title for the figure. If None (default), equals to ``'Sensor + positions (%%s)' %% ch_type``. + show_names : bool | array of str + Whether to display all channel names. If an array, only the channel + names in the array are shown. Defaults to False. + ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None + Channel groups for coloring the sensors. If None (default), default + coloring scheme is used. If 'position', the sensors are divided + into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If + array, the channels are divided by picks given in the array. + + .. versionadded:: 0.13.0 + to_sphere : bool + Whether to project the 3d locations to a sphere. When False, the + sensor array appears similar as to looking downwards straight above + the subject's head. Has no effect when kind='3d'. Defaults to True. + + .. versionadded:: 0.14.0 + axes : instance of Axes | instance of Axes3D | None + Axes to draw the sensors to. If ``kind='3d'``, axes must be an + instance of Axes3D. If None (default), a new axes will be created. + + .. versionadded:: 0.13.0 + block : bool + Whether to halt program execution until the figure is closed. + Defaults to False. + + .. versionadded:: 0.13.0 + show : bool + Show figure if True. Defaults to True. + %(sphere_topomap_auto)s + %(verbose)s + + Returns + ------- + fig : instance of Figure + Figure containing the sensor topography. + selection : list + A list of selected channels. Only returned if ``kind=='select'``. + + See Also + -------- + mne.viz.plot_layout + + Notes + ----- + This function plots the sensor locations from the info structure using + matplotlib. For drawing the sensors using PyVista see + :func:`mne.viz.plot_alignment`. + + .. versionadded:: 0.12.0 + """ + from ..viz.utils import plot_sensors + + return plot_sensors( + self if isinstance(self, Info) else self.info, + kind=kind, + ch_type=ch_type, + title=title, + show_names=show_names, + ch_groups=ch_groups, + to_sphere=to_sphere, + axes=axes, + block=block, + show=show, + sphere=sphere, + verbose=verbose, + ) + + @verbose + def anonymize(self, daysback=None, keep_his=False, verbose=None): + """Anonymize measurement information in place. + + Parameters + ---------- + %(daysback_anonymize_info)s + %(keep_his_anonymize_info)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified instance. + + Notes + ----- + %(anonymize_info_notes)s + + .. versionadded:: 0.13.0 + """ + info = self if isinstance(self, Info) else self.info + anonymize_info(info, daysback=daysback, keep_his=keep_his, verbose=verbose) + self.set_meas_date(info["meas_date"]) # unify annot update + return self + + def set_meas_date(self, meas_date): + """Set the measurement start date. + + Parameters + ---------- + meas_date : datetime | float | tuple | None + The new measurement date. + If datetime object, it must be timezone-aware and in UTC. + A tuple of (seconds, microseconds) or float (alias for + ``(meas_date, 0)``) can also be passed and a datetime + object will be automatically created. If None, will remove + the time reference. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified raw instance. Operates in place. + + See Also + -------- + mne.io.Raw.anonymize + + Notes + ----- + If you want to remove all time references in the file, call + :func:`mne.io.anonymize_info(inst.info) ` + after calling ``inst.set_meas_date(None)``. + + .. versionadded:: 0.20 + """ + from ..annotations import _handle_meas_date + + info = self if isinstance(self, Info) else self.info + + meas_date = _handle_meas_date(meas_date) + with info._unlock(): + info["meas_date"] = meas_date + + # clear file_id and meas_id if needed + if meas_date is None: + for key in ("file_id", "meas_id"): + value = info.get(key) + if value is not None: + assert "msecs" not in value + value["secs"] = DATE_NONE[0] + value["usecs"] = DATE_NONE[1] + # The following copy is needed for a test CTF dataset + # otherwise value['machid'][:] = 0 would suffice + _tmp = value["machid"].copy() + _tmp[:] = 0 + value["machid"] = _tmp + + if hasattr(self, "annotations"): + self.annotations._orig_time = meas_date + return self + + +class ContainsMixin: + """Mixin class for Raw, Evoked, Epochs and Info.""" + + def __contains__(self, ch_type): + """Check channel type membership. + + Parameters + ---------- + ch_type : str + Channel type to check for. Can be e.g. ``'meg'``, ``'eeg'``, + ``'stim'``, etc. + + Returns + ------- + in : bool + Whether or not the instance contains the given channel type. + + Examples + -------- + Channel type membership can be tested as:: + + >>> 'meg' in inst # doctest: +SKIP + True + >>> 'seeg' in inst # doctest: +SKIP + False + + """ + # this method is not supported by Info object. An Info object inherits from a + # dictionary and the 'key' in Info call is present all across MNE codebase, e.g. + # to check for the presence of a key: + # >>> 'bads' in info + if ch_type == "meg": + has_ch_type = _contains_ch_type(self.info, "mag") or _contains_ch_type( + self.info, "grad" + ) + else: + has_ch_type = _contains_ch_type(self.info, ch_type) + return has_ch_type + + @property + def compensation_grade(self): + """The current gradient compensation grade.""" + info = self if isinstance(self, Info) else self.info + return get_current_comp(info) + + @fill_doc + def get_channel_types(self, picks=None, unique=False, only_data_chs=False): + """Get a list of channel type for each channel. + + Parameters + ---------- + %(picks_all)s + unique : bool + Whether to return only unique channel types. Default is ``False``. + only_data_chs : bool + Whether to ignore non-data channels. Default is ``False``. + + Returns + ------- + channel_types : list + The channel types. + """ + info = self if isinstance(self, Info) else self.info + none = "data" if only_data_chs else "all" + picks = _picks_to_idx(info, picks, none, (), allow_empty=False) + ch_types = [channel_type(info, pick) for pick in picks] + if only_data_chs: + ch_types = [ + ch_type for ch_type in ch_types if ch_type in _DATA_CH_TYPES_SPLIT + ] + if unique: + # set does not preserve order but dict does, so let's just use it + ch_types = list({k: k for k in ch_types}.keys()) + return ch_types + + +# %% ValidatedDict class + + +class ValidatedDict(dict): + _attributes = {} # subclasses should set this to validated attributes + + def __init__(self, *args, **kwargs): + self._unlocked = True + super().__init__(*args, **kwargs) + self._unlocked = False + + def __getstate__(self): + """Get state (for pickling).""" + return {"_unlocked": self._unlocked} + + def __setstate__(self, state): + """Set state (for pickling).""" + self._unlocked = state["_unlocked"] + + def __setitem__(self, key, val): + """Attribute setter.""" + # During unpickling, the _unlocked attribute has not been set, so + # let __setstate__ do it later and act unlocked now + unlocked = getattr(self, "_unlocked", True) + if key in self._attributes: + if isinstance(self._attributes[key], str): + if not unlocked: + raise RuntimeError(self._attributes[key]) + else: + val = self._attributes[key]( + val, info=self + ) # attribute checker function + else: + class_name = self.__class__.__name__ + extra = "" + if "temp" in self._attributes: + var_name = _camel_to_snake(class_name) + extra = ( + f"You can set {var_name}['temp'] to store temporary objects in " + f"{class_name} instances, but these will not survive an I/O " + "round-trip." + ) + raise RuntimeError( + f"{class_name} does not support directly setting the key {repr(key)}. " + + extra + ) + super().__setitem__(key, val) + + def update(self, other=None, **kwargs): + """Update method using __setitem__().""" + iterable = other.items() if isinstance(other, Mapping) else other + if other is not None: + for key, val in iterable: + self[key] = val + for key, val in kwargs.items(): + self[key] = val + + def copy(self): + """Copy the instance. + + Returns + ------- + info : instance of Info + The copied info. + """ + return deepcopy(self) + + def __repr__(self): + """Return a string representation.""" + mapping = ", ".join(f"{key}: {val}" for key, val in self.items()) + return f"<{_camel_to_snake(self.__class__.__name__)} | {mapping}>" + + +# %% Subject info + + +def _check_types(x, *, info, name, types, cast=None): + _validate_type(x, types, name) + if cast is not None and x is not None: + x = cast(x) + return x + + +def _check_bday(birthday_input, *, info): + date = _check_types( + birthday_input, + info=info, + name='subject_info["birthday"]', + types=(datetime.date, None), + ) + # test if we have a pd.Timestamp + if hasattr(date, "date"): + date = date.date() + return date + + +class SubjectInfo(ValidatedDict): + _attributes = { + "id": partial(_check_types, name='subject_info["id"]', types=int), + "his_id": partial(_check_types, name='subject_info["his_id"]', types=str), + "last_name": partial(_check_types, name='subject_info["last_name"]', types=str), + "first_name": partial( + _check_types, name='subject_info["first_name"]', types=str + ), + "middle_name": partial( + _check_types, name='subject_info["middle_name"]', types=str + ), + "birthday": partial(_check_bday), + "sex": partial(_check_types, name='subject_info["sex"]', types=int), + "hand": partial(_check_types, name='subject_info["hand"]', types=int), + "weight": partial( + _check_types, name='subject_info["weight"]', types="numeric", cast=float + ), + "height": partial( + _check_types, name='subject_info["height"]', types="numeric", cast=float + ), + } + + def __init__(self, initial): + _validate_type(initial, dict, "subject_info") + super().__init__() + for key, val in initial.items(): + self[key] = val + + +class HeliumInfo(ValidatedDict): + _attributes = { + "he_level_raw": partial( + _check_types, + name='helium_info["he_level_raw"]', + types="numeric", + cast=float, + ), + "helium_level": partial( + _check_types, + name='helium_info["helium_level"]', + types="numeric", + cast=float, + ), + "orig_file_guid": partial( + _check_types, name='helium_info["orig_file_guid"]', types=str + ), + "meas_date": partial( + _check_types, + name='helium_info["meas_date"]', + types=(datetime.datetime, None), + ), + } + + def __init__(self, initial): + _validate_type(initial, dict, "helium_info") + super().__init__() + for key, val in initial.items(): + self[key] = val + + +# %% Info class and helpers + + +def _format_trans(obj, key): + from ..transforms import Transform + + try: + t = obj[key] + except KeyError: + pass + else: + if t is not None: + obj[key] = Transform(t["from"], t["to"], t["trans"]) + + +def _check_ch_keys(ch, ci, name='info["chs"]', check_min=True): + ch_keys = set(ch) + bad = sorted(ch_keys.difference(_ALL_CH_KEYS_SET)) + if bad: + raise KeyError(f"key{_pl(bad)} errantly present for {name}[{ci}]: {bad}") + if check_min: + bad = sorted(_MIN_CH_KEYS_SET.difference(ch_keys)) + if bad: + raise KeyError( + f"key{_pl(bad)} missing for {name}[{ci}]: {bad}", + ) + + +def _check_bads_info_compat(bads, info): + _validate_type(bads, list, "bads") + if not len(bads): + return # e.g. in empty_info + for bi, bad in enumerate(bads): + _validate_type(bad, str, f"bads[{bi}]") + if "ch_names" not in info: # somewhere in init, or deepcopy, or _empty_info, etc. + return + missing = [bad for bad in bads if bad not in info["ch_names"]] + if len(missing) > 0: + raise ValueError(f"bad channel(s) {missing} marked do not exist in info") + + +class MNEBadsList(list): + """Subclass of bads that checks inplace operations.""" + + def __init__(self, *, bads, info): + _check_bads_info_compat(bads, info) + self._mne_info = info + super().__init__(bads) + + def extend(self, iterable): + if not isinstance(iterable, list): + iterable = list(iterable) + # can happen during pickling + try: + info = self._mne_info + except AttributeError: + pass # can happen during pickling + else: + _check_bads_info_compat(iterable, info) + return super().extend(iterable) + + def append(self, x): + return self.extend([x]) + + def __iadd__(self, x): + self.extend(x) + return self + + +# As options are added here, test_meas_info.py:test_info_bad should be updated +def _check_bads(bads, *, info): + return MNEBadsList(bads=bads, info=info) + + +def _check_dev_head_t(dev_head_t, *, info): + from ..transforms import Transform, _ensure_trans + + _validate_type(dev_head_t, (Transform, None), "info['dev_head_t']") + if dev_head_t is not None: + dev_head_t = _ensure_trans(dev_head_t, "meg", "head") + return dev_head_t + + +# TODO: Add fNIRS convention to loc +class Info(ValidatedDict, SetChannelsMixin, MontageMixin, ContainsMixin): + """Measurement information. + + This data structure behaves like a dictionary. It contains all metadata + that is available for a recording. However, its keys are restricted to + those provided by the + `FIF format specification `__, + so new entries should not be manually added. + + .. note:: + This class should not be instantiated directly via + ``mne.Info(...)``. Instead, use :func:`mne.create_info` to create + measurement information from scratch. + + .. warning:: + The only entries that should be manually changed by the user are: + ``info['bads']``, ``info['description']``, ``info['device_info']``, + ``info['proj_id']``, ``info['proj_name']``, ``info['dev_head_t']``, + ``info['experimenter']``, ``info['helium_info']``, + ``info['line_freq']``, ``info['temp']``, and ``info['subject_info']``. + + All other entries should be considered read-only, though they can be + modified by various MNE-Python functions or methods (which have + safeguards to ensure all fields remain in sync). + + Parameters + ---------- + *args : list + Arguments. + **kwargs : dict + Keyword arguments. + + Attributes + ---------- + acq_pars : str | None + MEG system acquisition parameters. + See :class:`mne.AcqParserFIF` for details. + acq_stim : str | None + MEG system stimulus parameters. + bads : list of str + List of bad (noisy/broken) channels, by name. These channels will by + default be ignored by many processing steps. + ch_names : list of str + The names of the channels. + chs : list of dict + A list of channel information dictionaries, one per channel. + See Notes for more information. + command_line : str + Contains the command and arguments used to create the source space + (used for source estimation). + comps : list of dict + CTF software gradient compensation data. + See Notes for more information. + ctf_head_t : Transform | None + The transformation from 4D/CTF head coordinates to Neuromag head + coordinates. This is only present in 4D/CTF data. + custom_ref_applied : int + Whether a custom (=other than an average projector) reference has been + applied to the EEG data. This flag is checked by some algorithms that + require an average reference to be set. + description : str | None + String description of the recording. + dev_ctf_t : Transform | None + The transformation from device coordinates to 4D/CTF head coordinates. + This is only present in 4D/CTF data. + dev_head_t : Transform | None + The device to head transformation. + device_info : dict | None + Information about the acquisition device. See Notes for details. + + .. versionadded:: 0.19 + dig : list of dict | None + The Polhemus digitization data in head coordinates. + See Notes for more information. + events : list of dict + Event list, sometimes extracted from the stim channels by Neuromag + systems. In general this should not be used and + :func:`mne.find_events` should be used for event processing. + See Notes for more information. + experimenter : str | None + Name of the person that ran the experiment. + file_id : dict | None + The FIF globally unique ID. See Notes for more information. + gantry_angle : float | None + Tilt angle of the gantry in degrees. + helium_info : dict | None + Information about the device helium. See Notes for details. + + .. versionadded:: 0.19 + highpass : float + Highpass corner frequency in Hertz. Zero indicates a DC recording. + hpi_meas : list of dict + HPI measurements that were taken at the start of the recording + (e.g. coil frequencies). + See Notes for details. + hpi_results : list of dict + Head position indicator (HPI) digitization points and fit information + (e.g., the resulting transform). + See Notes for details. + hpi_subsystem : dict | None + Information about the HPI subsystem that was used (e.g., event + channel used for cHPI measurements). + See Notes for details. + kit_system_id : int + Identifies the KIT system. + line_freq : float | None + Frequency of the power line in Hertz. + lowpass : float + Lowpass corner frequency in Hertz. + It is automatically set to half the sampling rate if there is + otherwise no low-pass applied to the data. + maxshield : bool + True if active shielding (IAS) was active during recording. + meas_date : datetime + The time (UTC) of the recording. + + .. versionchanged:: 0.20 + This is stored as a :class:`~python:datetime.datetime` object + instead of a tuple of seconds/microseconds. + meas_file : str | None + Raw measurement file (used for source estimation). + meas_id : dict | None + The ID assigned to this measurement by the acquisition system or + during file conversion. Follows the same format as ``file_id``. + mri_file : str | None + File containing the MRI to head transformation (used for source + estimation). + mri_head_t : dict | None + Transformation from MRI to head coordinates (used for source + estimation). + mri_id : dict | None + MRI unique ID (used for source estimation). + nchan : int + Number of channels. + proc_history : list of dict + The MaxFilter processing history. + See Notes for details. + proj_id : int | None + ID number of the project the experiment belongs to. + proj_name : str | None + Name of the project the experiment belongs to. + projs : list of Projection + List of SSP operators that operate on the data. + See :class:`mne.Projection` for details. + sfreq : float + Sampling frequency in Hertz. + subject_info : dict | None + Information about the subject. + See Notes for details. + temp : object | None + Can be used to store temporary objects in an Info instance. It will not + survive an I/O roundtrip. + + .. versionadded:: 0.24 + utc_offset : str + "UTC offset of related meas_date (sHH:MM). + + .. versionadded:: 0.19 + working_dir : str + Working directory used when the source space was created (used for + source estimation). + xplotter_layout : str + Layout of the Xplotter (Neuromag system only). + + See Also + -------- + mne.create_info + mne.pick_info + + Notes + ----- + The following parameters have a nested structure. + + * ``chs`` list of dict: + + cal : float + The calibration factor to bring the channels to physical + units. Used in product with ``range`` to scale the data read + from disk. + ch_name : str + The channel name. + coil_type : int + Coil type, e.g. ``FIFFV_COIL_MEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + kind : int + The kind of channel, e.g. ``FIFFV_EEG_CH``. + loc : array, shape (12,) + Channel location information. The first three elements ``[:3]`` always store + the nominal channel position. The remaining 9 elements store different + information based on the channel type: + + MEG + Remaining 9 elements ``[3:]``, contain the EX, EY, and EZ normal + triplets (columns) of the coil rotation/orientation matrix. + EEG + Elements ``[3:6]`` contain the reference channel position. + Eyetrack + Element ``[3]`` contains information about which eye was tracked + (-1 for left, 1 for right), and element ``[4]`` contains information + about the the axis of coordinate data (-1 for x-coordinate data, 1 for + y-coordinate data). + Dipole + Elements ``[3:6]`` contain dipole orientation information. + logno : int + Logical channel number, conventions in the usage of this + number vary. + range : float + The hardware-oriented part of the calibration factor. + This should be only applied to the continuous raw data. + Used in product with ``cal`` to scale data read from disk. + scanno : int + Scanning order number, starting from 1. + unit : int + The unit to use, e.g. ``FIFF_UNIT_T_M``. + unit_mul : int + Unit multipliers, most commonly ``FIFF_UNITM_NONE``. + + * ``comps`` list of dict: + + ctfkind : int + CTF compensation grade. + colcals : ndarray + Column calibrations. + mat : dict + A named matrix dictionary (with entries "data", "col_names", etc.) + containing the compensation matrix. + rowcals : ndarray + Row calibrations. + save_calibrated : bool + Were the compensation data saved in calibrated form. + + * ``device_info`` dict: + + type : str + Device type. + model : str + Device model. + serial : str + Device serial. + site : str + Device site. + + * ``dig`` list of dict: + + kind : int + The kind of channel, + e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. + r : array, shape (3,) + 3D position in m. and coord_frame. + ident : int + Number specifying the identity of the point. + e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, or + 42 if kind is ``FIFFV_POINT_EEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + + * ``events`` list of dict: + + channels : list of int + Channel indices for the events. + list : ndarray, shape (n_events * 3,) + Events in triplets as number of samples, before, after. + + * ``file_id`` dict: + + version : int + FIF format version, i.e. ``FIFFC_VERSION``. + machid : ndarray, shape (2,) + Unique machine ID, usually derived from the MAC address. + secs : int + Time in seconds. + usecs : int + Time in microseconds. + + * ``helium_info`` dict: + + he_level_raw : float + Helium level (%) before position correction. + helium_level : float + Helium level (%) after position correction. + orig_file_guid : str + Original file GUID. + meas_date : datetime.datetime | None + The helium level meas date. + + .. versionchanged:: 1.8 + This is stored as a :class:`~python:datetime.datetime` object + instead of a tuple of seconds/microseconds. + + * ``hpi_meas`` list of dict: + + creator : str + Program that did the measurement. + sfreq : float + Sample rate. + nchan : int + Number of channels used. + nave : int + Number of averages used. + ncoil : int + Number of coils used. + first_samp : int + First sample used. + last_samp : int + Last sample used. + hpi_coils : list of dict + Coils, containing: + + number: int + Coil number + epoch : ndarray + Buffer containing one epoch and channel. + slopes : ndarray, shape (n_channels,) + HPI data. + corr_coeff : ndarray, shape (n_channels,) + HPI curve fit correlations. + coil_freq : float + HPI coil excitation frequency + + * ``hpi_results`` list of dict: + + dig_points : list + Digitization points (see ``dig`` definition) for the HPI coils. + order : ndarray, shape (ncoil,) + The determined digitization order. + used : ndarray, shape (nused,) + The indices of the used coils. + moments : ndarray, shape (ncoil, 3) + The coil moments. + goodness : ndarray, shape (ncoil,) + The goodness of fits. + good_limit : float + The goodness of fit limit. + dist_limit : float + The distance limit. + accept : int + Whether or not the fit was accepted. + coord_trans : instance of Transform + The resulting MEG<->head transformation. + + * ``hpi_subsystem`` dict: + + ncoil : int + The number of coils. + event_channel : str + The event channel used to encode cHPI status (e.g., STI201). + hpi_coils : list of ndarray + List of length ``ncoil``, each 4-element ndarray contains the + event bits used on the event channel to indicate cHPI status + (using the first element of these arrays is typically + sufficient). + + * ``mri_id`` dict: + + version : int + FIF format version, i.e. ``FIFFC_VERSION``. + machid : ndarray, shape (2,) + Unique machine ID, usually derived from the MAC address. + secs : int + Time in seconds. + usecs : int + Time in microseconds. + + * ``proc_history`` list of dict: + + block_id : dict + See ``id`` above. + date : ndarray, shape (2,) + 2-element tuple of seconds and microseconds. + experimenter : str + Name of the person who ran the program. + creator : str + Program that did the processing. + max_info : dict + Maxwel filtering info, can contain: + + sss_info : dict + SSS processing information. + max_st + tSSS processing information. + sss_ctc : dict + Cross-talk processing information. + sss_cal : dict + Fine-calibration information. + smartshield : dict + MaxShield information. This dictionary is (always?) empty, + but its presence implies that MaxShield was used during + acquisition. + + * ``subject_info`` dict: + + id : int + Integer subject identifier. + his_id : str + String subject identifier. + last_name : str + Last name. + first_name : str + First name. + middle_name : str + Middle name. + birthday : datetime.date + The subject birthday. + + .. versionchanged:: 1.8 + This is stored as a :class:`~python:datetime.date` object + instead of a tuple of seconds/microseconds. + sex : int + Subject sex (0=unknown, 1=male, 2=female). + hand : int + Handedness (1=right, 2=left, 3=ambidextrous). + weight : float + Weight in kilograms. + height : float + Height in meters. + """ + + _attributes = { + "acq_pars": "acq_pars cannot be set directly. " + "See mne.AcqParserFIF() for details.", + "acq_stim": "acq_stim cannot be set directly.", + "bads": _check_bads, + "ch_names": "ch_names cannot be set directly. " + "Please use methods inst.add_channels(), " + "inst.drop_channels(), inst.pick(), " + "inst.rename_channels(), inst.reorder_channels() " + "and inst.set_channel_types() instead.", + "chs": "chs cannot be set directly. " + "Please use methods inst.add_channels(), " + "inst.drop_channels(), inst.pick(), " + "inst.rename_channels(), inst.reorder_channels() " + "and inst.set_channel_types() instead.", + "command_line": "command_line cannot be set directly.", + "comps": "comps cannot be set directly. " + "Please use method Raw.apply_gradient_compensation() " + "instead.", + "ctf_head_t": "ctf_head_t cannot be set directly.", + "custom_ref_applied": "custom_ref_applied cannot be set directly. " + "Please use method inst.set_eeg_reference() " + "instead.", + "description": partial(_check_types, name="description", types=(str, None)), + "dev_ctf_t": "dev_ctf_t cannot be set directly.", + "dev_head_t": _check_dev_head_t, + "device_info": partial(_check_types, name="device_info", types=(dict, None)), + "dig": "dig cannot be set directly. " + "Please use method inst.set_montage() instead.", + "events": "events cannot be set directly.", + "experimenter": partial(_check_types, name="experimenter", types=(str, None)), + "file_id": "file_id cannot be set directly.", + "gantry_angle": "gantry_angle cannot be set directly.", + "helium_info": partial( + _check_types, name="helium_info", types=(dict, None), cast=HeliumInfo + ), + "highpass": "highpass cannot be set directly. " + "Please use method inst.filter() instead.", + "hpi_meas": "hpi_meas can not be set directly.", + "hpi_results": "hpi_results cannot be set directly.", + "hpi_subsystem": "hpi_subsystem cannot be set directly.", + "kit_system_id": "kit_system_id cannot be set directly.", + "line_freq": partial( + _check_types, name="line_freq", types=("numeric", None), cast=float + ), + "lowpass": "lowpass cannot be set directly. " + "Please use method inst.filter() instead.", + "maxshield": "maxshield cannot be set directly.", + "meas_date": "meas_date cannot be set directly. " + "Please use method inst.set_meas_date() instead.", + "meas_file": "meas_file cannot be set directly.", + "meas_id": "meas_id cannot be set directly.", + "mri_file": "mri_file cannot be set directly.", + "mri_head_t": "mri_head_t cannot be set directly.", + "mri_id": "mri_id cannot be set directly.", + "nchan": "nchan cannot be set directly. " + "Please use methods inst.add_channels(), " + "inst.drop_channels(), and inst.pick() instead.", + "proc_history": "proc_history cannot be set directly.", + "proj_id": partial(_check_types, name="proj_id", types=(int, None), cast=int), + "proj_name": partial(_check_types, name="proj_name", types=(str, None)), + "projs": "projs cannot be set directly. " + "Please use methods inst.add_proj() and inst.del_proj() " + "instead.", + "sfreq": "sfreq cannot be set directly. " + "Please use method inst.resample() instead.", + "subject_info": partial( + _check_types, name="subject_info", types=(dict, None), cast=SubjectInfo + ), + "temp": lambda x, info=None: x, + "utc_offset": "utc_offset cannot be set directly.", + "working_dir": "working_dir cannot be set directly.", + "xplotter_layout": "xplotter_layout cannot be set directly.", + } + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._unlocked = True + # Deal with h5io writing things as dict + if "bads" in self: + self["bads"] = MNEBadsList(bads=self["bads"], info=self) + for key in ("dev_head_t", "ctf_head_t", "dev_ctf_t"): + _format_trans(self, key) + for res in self.get("hpi_results", []): + _format_trans(res, "coord_trans") + if self.get("dig", None) is not None and len(self["dig"]): + if isinstance(self["dig"], dict): # needs to be unpacked + self["dig"] = _dict_unpack(self["dig"], _DIG_CAST) + if not isinstance(self["dig"][0], DigPoint): + self["dig"] = _format_dig_points(self["dig"]) + if isinstance(self.get("chs", None), dict): + self["chs"]["ch_name"] = [ + str(x) for x in np.char.decode(self["chs"]["ch_name"], encoding="utf8") + ] + self["chs"] = _dict_unpack(self["chs"], _CH_CAST) + for pi, proj in enumerate(self.get("projs", [])): + if not isinstance(proj, Projection): + self["projs"][pi] = Projection(**proj) + # Old files could have meas_date as tuple instead of datetime + try: + meas_date = self["meas_date"] + except KeyError: + pass + else: + self["meas_date"] = _ensure_meas_date_none_or_dt(meas_date) + self._unlocked = False + # with validation and casting + for key in ("helium_info", "subject_info"): + if key in self: + self[key] = self[key] + + def __setstate__(self, state): + """Set state (for pickling).""" + super().__setstate__(state) + self["bads"] = MNEBadsList(bads=self["bads"], info=self) + + @contextlib.contextmanager + def _unlock(self, *, update_redundant=False, check_after=False): + """Context manager unlocking access to attributes.""" + # needed for nested _unlock() + state = self._unlocked if hasattr(self, "_unlocked") else False + + self._unlocked = True + try: + yield + except Exception: + raise + else: + if update_redundant: + self._update_redundant() + if check_after: + self._check_consistency() + finally: + self._unlocked = state + + def normalize_proj(self): + """(Re-)Normalize projection vectors after subselection. + + Applying projection after sub-selecting a set of channels that + were originally used to compute the original projection vectors + can be dangerous (e.g., if few channels remain, most power was + in channels that are no longer picked, etc.). By default, mne + will emit a warning when this is done. + + This function will re-normalize projectors to use only the + remaining channels, thus avoiding that warning. Only use this + function if you're confident that the projection vectors still + adequately capture the original signal of interest. + """ + _normalize_proj(self) + + def __repr__(self): + """Summarize info instead of printing all.""" + from ..io.kit.constants import KIT_SYSNAMES + from ..transforms import Transform, _coord_frame_name + + MAX_WIDTH = 68 + strs = [" {frame2} transform" + else: + entr = "" + elif k in ["sfreq", "lowpass", "highpass"]: + entr = f"{v:.1f} Hz" + elif isinstance(v, str): + entr = shorten(v, MAX_WIDTH, placeholder=" ...") + elif k == "chs": + # TODO someday we should refactor with _repr_html_ with + # bad vs good + ch_types = [channel_type(self, idx) for idx in range(len(v))] + ch_counts = Counter(ch_types) + entr = ", ".join( + f"{count} {titles.get(ch_type, ch_type.upper())}" + for ch_type, count in ch_counts.items() + ) + elif k == "custom_ref_applied": + entr = str(bool(v)) + if not v: + non_empty -= 1 # don't count if 0 + elif isinstance(v, ValidatedDict): + entr = repr(v) + else: + try: + this_len = len(v) + except TypeError: + entr = f"{v}" if v is not None else "" + else: + if this_len > 0: + entr = f"{this_len} item{_pl(this_len)} ({type(v).__name__})" + else: + entr = "" + if entr != "": + non_empty += 1 + strs.append(f"{k}: {entr}") + st = "\n ".join(sorted(strs)) + st += "\n>" + st %= non_empty + return st + + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + result = Info.__new__(Info) + result._unlocked = True + for k, v in self.items(): + # chs is roughly half the time but most are immutable + if k == "chs": + # dict shallow copy is fast, so use it then overwrite + result[k] = list() + for ch in v: + ch = ch.copy() # shallow + ch["loc"] = ch["loc"].copy() + result[k].append(ch) + elif k == "ch_names": + # we know it's list of str, shallow okay and saves ~100 µs + result[k] = v.copy() + elif k == "hpi_meas": + hms = list() + for hm in v: + hm = hm.copy() + # the only mutable thing here is some entries in coils + hm["hpi_coils"] = [coil.copy() for coil in hm["hpi_coils"]] + # There is a *tiny* risk here that someone could write + # raw.info['hpi_meas'][0]['hpi_coils'][1]['epoch'] = ... + # and assume that info.copy() will make an actual copy, + # but copying these entries has a 2x slowdown penalty so + # probably not worth it for such a deep corner case: + # for coil in hpi_coils: + # for key in ('epoch', 'slopes', 'corr_coeff'): + # coil[key] = coil[key].copy() + hms.append(hm) + result[k] = hms + else: + result[k] = deepcopy(v, memodict) + result._unlocked = False + return result + + def _check_consistency(self, prepend_error=""): + """Do some self-consistency checks and datatype tweaks.""" + meas_date = self.get("meas_date") + if meas_date is not None: + if ( + not isinstance(self["meas_date"], datetime.datetime) + or self["meas_date"].tzinfo is None + or self["meas_date"].tzinfo is not datetime.timezone.utc + ): + raise RuntimeError( + f'{prepend_error}info["meas_date"] must be a datetime object in UTC' + f" or None, got {repr(self['meas_date'])!r}" + ) + + chs = [ch["ch_name"] for ch in self["chs"]] + if ( + len(self["ch_names"]) != len(chs) + or any(ch_1 != ch_2 for ch_1, ch_2 in zip(self["ch_names"], chs)) + or self["nchan"] != len(chs) + ): + raise RuntimeError( + f"{prepend_error}info channel name inconsistency detected, please " + "notify MNE-Python developers" + ) + + # make sure we have the proper datatypes + with self._unlock(): + for key in ("sfreq", "highpass", "lowpass"): + if self.get(key) is not None: + self[key] = float(self[key]) + + for pi, proj in enumerate(self.get("projs", [])): + _validate_type(proj, Projection, f'info["projs"][{pi}]') + for key in ("kind", "active", "desc", "data", "explained_var"): + if key not in proj: + raise RuntimeError(f"Projection incomplete, missing {key}") + + # Ensure info['chs'] has immutable entries (copies much faster) + for ci, ch in enumerate(self["chs"]): + _check_ch_keys(ch, ci) + ch_name = ch["ch_name"] + _validate_type(ch_name, str, f'info["chs"][{ci}]["ch_name"]') + for key in _SCALAR_CH_KEYS: + val = ch.get(key, 1) + _validate_type(val, "numeric", f'info["chs"][{ci}][{key}]') + loc = ch["loc"] + if not (isinstance(loc, np.ndarray) and loc.shape == (12,)): + raise TypeError( + f'Bad info: info["chs"][{ci}]["loc"] must be ndarray with ' + f"12 elements, got {repr(loc)}" + ) + + # make sure channel names are unique + with self._unlock(): + self["ch_names"] = _unique_channel_names(self["ch_names"]) + for idx, ch_name in enumerate(self["ch_names"]): + self["chs"][idx]["ch_name"] = ch_name + + def _update_redundant(self): + """Update the redundant entries.""" + with self._unlock(): + self["ch_names"] = [ch["ch_name"] for ch in self["chs"]] + self["nchan"] = len(self["chs"]) + + @property + def ch_names(self): + try: + ch_names = self["ch_names"] + except KeyError: + ch_names = [] + + return ch_names + + @repr_html + def _repr_html_(self): + """Summarize info for HTML representation.""" + info_template = _get_html_template("repr", "info.html.jinja") + return info_template.render(info=self) + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Write measurement info in fif file. + + Parameters + ---------- + fname : path-like + The name of the file. Should end by ``'-info.fif'``. + %(overwrite)s + + .. versionadded:: 1.10 + %(verbose)s + + See Also + -------- + mne.io.write_info + """ + write_info(fname, self, overwrite=overwrite) + + +def _simplify_info(info, *, keep=()): + """Return a simplified info structure to speed up picking.""" + chs = [ + {key: ch[key] for key in ("ch_name", "kind", "unit", "coil_type", "loc", "cal")} + for ch in info["chs"] + ] + keys = ("bads", "comps", "projs", "custom_ref_applied") + keep + sub_info = Info((key, info[key]) for key in keys if key in info) + with sub_info._unlock(): + sub_info["chs"] = chs + sub_info._update_redundant() + return sub_info + + +@verbose +def read_fiducials(fname, *, verbose=None): + """Read fiducials from a fiff file. + + Parameters + ---------- + fname : path-like + The filename to read. + %(verbose)s + + Returns + ------- + pts : list of dict + List of digitizer points (each point in a dict). + coord_frame : int + The coordinate frame of the points (one of + ``mne.io.constants.FIFF.FIFFV_COORD_...``). + """ + fname = _check_fname(fname=fname, overwrite="read", must_exist=True) + fid, tree, _ = fiff_open(fname) + with fid: + pts = _read_dig_fif(fid, tree) + return pts, pts[0]["coord_frame"] + + +@verbose +def write_fiducials( + fname, pts, coord_frame="unknown", *, overwrite=False, verbose=None +): + """Write fiducials to a fiff file. + + Parameters + ---------- + fname : path-like + Destination file name. + pts : iterator of dict + Iterator through digitizer points. Each point is a dictionary with + the keys 'kind', 'ident' and 'r'. + coord_frame : str | int + The coordinate frame of the points. If a string, must be one of + ``'meg'``, ``'mri'``, ``'mri_voxel'``, ``'head'``, + ``'mri_tal'``, ``'ras'``, ``'fs_tal'``, ``'ctf_head'``, + ``'ctf_meg'``, and ``'unknown'`` + If an integer, must be one of the constants defined as + ``mne.io.constants.FIFF.FIFFV_COORD_...``. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + """ + write_dig(fname, pts, coord_frame, overwrite=overwrite) + + +@verbose +def read_info(fname, verbose=None): + """Read measurement info from a file. + + Parameters + ---------- + fname : path-like + File name. + %(verbose)s + + Returns + ------- + %(info_not_none)s + """ + check_fname(fname, "Info", (".fif", ".fif.gz")) + fname = _check_fname(fname, must_exist=True, overwrite="read") + f, tree, _ = fiff_open(fname) + with f as fid: + info = read_meas_info(fid, tree)[0] + return info + + +def read_bad_channels(fid, node): + """Read bad channels. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node of the FIF tree that contains info on the bad channels. + + Returns + ------- + bads : list + A list of bad channel's names. + """ + return _read_bad_channels(fid, node) + + +def _read_bad_channels(fid, node, ch_names_mapping): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS) + + bads = [] + if len(nodes) > 0: + for node in nodes: + tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST) + if tag is not None and tag.data is not None: + bads = _safe_name_list(tag.data, "read", "bads") + bads[:] = _rename_list(bads, ch_names_mapping) + return bads + + +def _write_bad_channels(fid, bads, ch_names_mapping): + if bads is not None and len(bads) > 0: + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + bads = _rename_list(bads, ch_names_mapping) + start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + write_name_list_sanitized(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads, "bads") + end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + + +@verbose +def read_meas_info(fid, tree, clean_bads=False, verbose=None): + """Read the measurement info. + + Parameters + ---------- + fid : file + Open file descriptor. + tree : tree + FIF tree structure. + clean_bads : bool + If True, clean info['bads'] before running consistency check. + Should only be needed for old files where we did not check bads + before saving. + %(verbose)s + + Returns + ------- + %(info_not_none)s + meas : dict + Node in tree that contains the info. + """ + from ..transforms import Transform, invert_transform + + # Find the desired blocks + meas = dir_tree_find(tree, FIFF.FIFFB_MEAS) + if len(meas) == 0: + raise ValueError("Could not find measurement data") + if len(meas) > 1: + raise ValueError("Cannot read more that 1 measurement data") + meas = meas[0] + + meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO) + if len(meas_info) == 0: + raise ValueError("Could not find measurement info") + if len(meas_info) > 1: + raise ValueError("Cannot read more that 1 measurement info") + meas_info = meas_info[0] + + # Read measurement info + dev_head_t = None + ctf_head_t = None + dev_ctf_t = None + meas_date = None + utc_offset = None + highpass = None + lowpass = None + nchan = None + sfreq = None + chs = [] + experimenter = None + description = None + proj_id = None + proj_name = None + line_freq = None + gantry_angle = None + custom_ref_applied = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + xplotter_layout = None + kit_system_id = None + for k in range(meas_info["nent"]): + kind = meas_info["directory"][k].kind + pos = meas_info["directory"][k].pos + if kind == FIFF.FIFF_NCHAN: + tag = read_tag(fid, pos) + nchan = int(tag.data.item()) + elif kind == FIFF.FIFF_SFREQ: + tag = read_tag(fid, pos) + sfreq = float(tag.data.item()) + elif kind == FIFF.FIFF_CH_INFO: + tag = read_tag(fid, pos) + chs.append(tag.data) + elif kind == FIFF.FIFF_LOWPASS: + tag = read_tag(fid, pos) + if not np.isnan(tag.data.item()): + lowpass = float(tag.data.item()) + elif kind == FIFF.FIFF_HIGHPASS: + tag = read_tag(fid, pos) + if not np.isnan(tag.data): + highpass = float(tag.data.item()) + elif kind == FIFF.FIFF_MEAS_DATE: + tag = read_tag(fid, pos) + meas_date = tuple(tag.data) + if len(meas_date) == 1: # can happen from old C conversions + meas_date = (meas_date[0], 0) + elif kind == FIFF.FIFF_UTC_OFFSET: + tag = read_tag(fid, pos) + utc_offset = str(tag.data) + elif kind == FIFF.FIFF_COORD_TRANS: + tag = read_tag(fid, pos) + cand = tag.data + + if ( + cand["from"] == FIFF.FIFFV_COORD_DEVICE + and cand["to"] == FIFF.FIFFV_COORD_HEAD + ): + dev_head_t = cand + elif ( + cand["from"] == FIFF.FIFFV_COORD_HEAD + and cand["to"] == FIFF.FIFFV_COORD_DEVICE + ): + # this reversal can happen with BabyMEG data + dev_head_t = invert_transform(cand) + elif ( + cand["from"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD + and cand["to"] == FIFF.FIFFV_COORD_HEAD + ): + ctf_head_t = cand + elif ( + cand["from"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE + and cand["to"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD + ): + dev_ctf_t = cand + elif kind == FIFF.FIFF_EXPERIMENTER: + tag = read_tag(fid, pos) + experimenter = tag.data + elif kind == FIFF.FIFF_DESCRIPTION: + tag = read_tag(fid, pos) + description = tag.data + elif kind == FIFF.FIFF_PROJ_ID: + tag = read_tag(fid, pos) + proj_id = int(tag.data.item()) + elif kind == FIFF.FIFF_PROJ_NAME: + tag = read_tag(fid, pos) + proj_name = tag.data + elif kind == FIFF.FIFF_LINE_FREQ: + tag = read_tag(fid, pos) + line_freq = float(tag.data.item()) + elif kind == FIFF.FIFF_GANTRY_ANGLE: + tag = read_tag(fid, pos) + gantry_angle = float(tag.data.item()) + elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11 + tag = read_tag(fid, pos) + custom_ref_applied = int(tag.data.item()) + elif kind == FIFF.FIFF_XPLOTTER_LAYOUT: + tag = read_tag(fid, pos) + xplotter_layout = str(tag.data) + elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID: + tag = read_tag(fid, pos) + kit_system_id = int(tag.data.item()) + ch_names_mapping = _read_extended_ch_info(chs, meas_info, fid) + + # Check that we have everything we need + if nchan is None: + raise ValueError("Number of channels is not defined") + + if sfreq is None: + raise ValueError("Sampling frequency is not defined") + + if len(chs) == 0: + raise ValueError("Channel information not defined") + + if len(chs) != nchan: + raise ValueError("Incorrect number of channel definitions found") + + if dev_head_t is None or ctf_head_t is None: + hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) + if len(hpi_result) == 1: + hpi_result = hpi_result[0] + for k in range(hpi_result["nent"]): + kind = hpi_result["directory"][k].kind + pos = hpi_result["directory"][k].pos + if kind == FIFF.FIFF_COORD_TRANS: + tag = read_tag(fid, pos) + cand = tag.data + if ( + cand["from"] == FIFF.FIFFV_COORD_DEVICE + and cand["to"] == FIFF.FIFFV_COORD_HEAD + and dev_head_t is None + ): + dev_head_t = cand + elif ( + cand["from"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD + and cand["to"] == FIFF.FIFFV_COORD_HEAD + and ctf_head_t is None + ): + ctf_head_t = cand + + # Locate the Polhemus data + dig = _read_dig_fif(fid, meas_info) + + # Locate the acquisition information + acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS) + acq_pars = None + acq_stim = None + if len(acqpars) == 1: + acqpars = acqpars[0] + for k in range(acqpars["nent"]): + kind = acqpars["directory"][k].kind + pos = acqpars["directory"][k].pos + if kind == FIFF.FIFF_DACQ_PARS: + tag = read_tag(fid, pos) + acq_pars = tag.data + elif kind == FIFF.FIFF_DACQ_STIM: + tag = read_tag(fid, pos) + acq_stim = tag.data + + # Load the SSP data + projs = _read_proj(fid, meas_info, ch_names_mapping=ch_names_mapping) + + # Load the CTF compensation data + comps = _read_ctf_comp(fid, meas_info, chs, ch_names_mapping=ch_names_mapping) + + # Load the bad channel list + bads = _read_bad_channels(fid, meas_info, ch_names_mapping=ch_names_mapping) + + # + # Put the data together + # + info = Info(file_id=tree["id"]) + info._unlocked = True + + # Locate events list + events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS) + evs = list() + for event in events: + ev = dict() + for k in range(event["nent"]): + kind = event["directory"][k].kind + pos = event["directory"][k].pos + if kind == FIFF.FIFF_EVENT_CHANNELS: + ev["channels"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_EVENT_LIST: + ev["list"] = read_tag(fid, pos).data + evs.append(ev) + info["events"] = evs + + # Locate HPI result + hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) + hrs = list() + for hpi_result in hpi_results: + hr = dict() + hr["dig_points"] = [] + for k in range(hpi_result["nent"]): + kind = hpi_result["directory"][k].kind + pos = hpi_result["directory"][k].pos + if kind == FIFF.FIFF_DIG_POINT: + hr["dig_points"].append(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER: + hr["order"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_COILS_USED: + hr["used"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_COIL_MOMENTS: + hr["moments"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_FIT_GOODNESS: + hr["goodness"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT: + hr["good_limit"] = float(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT: + hr["dist_limit"] = float(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_HPI_FIT_ACCEPT: + hr["accept"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_COORD_TRANS: + hr["coord_trans"] = read_tag(fid, pos).data + hrs.append(hr) + info["hpi_results"] = hrs + + # Locate HPI Measurement + hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS) + hms = list() + for hpi_meas in hpi_meass: + hm = dict() + for k in range(hpi_meas["nent"]): + kind = hpi_meas["directory"][k].kind + pos = hpi_meas["directory"][k].pos + if kind == FIFF.FIFF_CREATOR: + hm["creator"] = str(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_SFREQ: + hm["sfreq"] = float(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_NCHAN: + hm["nchan"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_NAVE: + hm["nave"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_HPI_NCOIL: + hm["ncoil"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_FIRST_SAMPLE: + hm["first_samp"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_LAST_SAMPLE: + hm["last_samp"] = int(read_tag(fid, pos).data.item()) + hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL) + hcs = [] + for hpi_coil in hpi_coils: + hc = dict() + for k in range(hpi_coil["nent"]): + kind = hpi_coil["directory"][k].kind + pos = hpi_coil["directory"][k].pos + if kind == FIFF.FIFF_HPI_COIL_NO: + hc["number"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_EPOCH: + hc["epoch"] = read_tag(fid, pos).data + hc["epoch"].flags.writeable = False + elif kind == FIFF.FIFF_HPI_SLOPES: + hc["slopes"] = read_tag(fid, pos).data + hc["slopes"].flags.writeable = False + elif kind == FIFF.FIFF_HPI_CORR_COEFF: + hc["corr_coeff"] = read_tag(fid, pos).data + hc["corr_coeff"].flags.writeable = False + elif kind == FIFF.FIFF_HPI_COIL_FREQ: + hc["coil_freq"] = float(read_tag(fid, pos).data.item()) + hcs.append(hc) + hm["hpi_coils"] = hcs + hms.append(hm) + info["hpi_meas"] = hms + del hms + + subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT) + si = None + if len(subject_info) == 1: + subject_info = subject_info[0] + si = dict() + for k in range(subject_info["nent"]): + kind = subject_info["directory"][k].kind + pos = subject_info["directory"][k].pos + if kind == FIFF.FIFF_SUBJ_ID: + tag = read_tag(fid, pos) + si["id"] = int(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_HIS_ID: + tag = read_tag(fid, pos) + si["his_id"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_LAST_NAME: + tag = read_tag(fid, pos) + si["last_name"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_FIRST_NAME: + tag = read_tag(fid, pos) + si["first_name"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME: + tag = read_tag(fid, pos) + si["middle_name"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY: + try: + tag = read_tag(fid, pos) + except OverflowError: + warn( + "Encountered an error while trying to read the " + "birthday from the input data. No birthday will be " + "set. Please check the integrity of the birthday " + "information in the input data." + ) + continue + si["birthday"] = tag.data + elif kind == FIFF.FIFF_SUBJ_SEX: + tag = read_tag(fid, pos) + si["sex"] = int(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_HAND: + tag = read_tag(fid, pos) + si["hand"] = int(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_WEIGHT: + tag = read_tag(fid, pos) + si["weight"] = float(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_HEIGHT: + tag = read_tag(fid, pos) + si["height"] = float(tag.data.item()) + info["subject_info"] = si + del si + + device_info = dir_tree_find(meas_info, FIFF.FIFFB_DEVICE) + di = None + if len(device_info) == 1: + device_info = device_info[0] + di = dict() + for k in range(device_info["nent"]): + kind = device_info["directory"][k].kind + pos = device_info["directory"][k].pos + if kind == FIFF.FIFF_DEVICE_TYPE: + tag = read_tag(fid, pos) + di["type"] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_MODEL: + tag = read_tag(fid, pos) + di["model"] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_SERIAL: + tag = read_tag(fid, pos) + di["serial"] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_SITE: + tag = read_tag(fid, pos) + di["site"] = str(tag.data) + info["device_info"] = di + del di + + helium_info = dir_tree_find(meas_info, FIFF.FIFFB_HELIUM) + hi = None + if len(helium_info) == 1: + helium_info = helium_info[0] + hi = dict() + for k in range(helium_info["nent"]): + kind = helium_info["directory"][k].kind + pos = helium_info["directory"][k].pos + if kind == FIFF.FIFF_HE_LEVEL_RAW: + tag = read_tag(fid, pos) + hi["he_level_raw"] = float(tag.data.item()) + elif kind == FIFF.FIFF_HELIUM_LEVEL: + tag = read_tag(fid, pos) + hi["helium_level"] = float(tag.data.item()) + elif kind == FIFF.FIFF_ORIG_FILE_GUID: + tag = read_tag(fid, pos) + hi["orig_file_guid"] = str(tag.data) + elif kind == FIFF.FIFF_MEAS_DATE: + tag = read_tag(fid, pos) + hi["meas_date"] = _ensure_meas_date_none_or_dt( + tuple(int(t) for t in tag.data), + ) + if "meas_date" not in hi: + hi["meas_date"] = None + info["helium_info"] = hi + del hi + + hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM) + hs = None + if len(hpi_subsystem) == 1: + hpi_subsystem = hpi_subsystem[0] + hs = dict() + for k in range(hpi_subsystem["nent"]): + kind = hpi_subsystem["directory"][k].kind + pos = hpi_subsystem["directory"][k].pos + if kind == FIFF.FIFF_HPI_NCOIL: + tag = read_tag(fid, pos) + hs["ncoil"] = int(tag.data.item()) + elif kind == FIFF.FIFF_EVENT_CHANNEL: + tag = read_tag(fid, pos) + hs["event_channel"] = str(tag.data) + hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL) + hc = [] + for coil in hpi_coils: + this_coil = dict() + for j in range(coil["nent"]): + kind = coil["directory"][j].kind + pos = coil["directory"][j].pos + if kind == FIFF.FIFF_EVENT_BITS: + tag = read_tag(fid, pos) + this_coil["event_bits"] = np.array(tag.data) + hc.append(this_coil) + hs["hpi_coils"] = hc + info["hpi_subsystem"] = hs + + # Read processing history + info["proc_history"] = _read_proc_history(fid, tree) + + # Make the most appropriate selection for the measurement id + if meas_info["parent_id"] is None: + if meas_info["id"] is None: + if meas["id"] is None: + if meas["parent_id"] is None: + info["meas_id"] = info["file_id"] + else: + info["meas_id"] = meas["parent_id"] + else: + info["meas_id"] = meas["id"] + else: + info["meas_id"] = meas_info["id"] + else: + info["meas_id"] = meas_info["parent_id"] + info["experimenter"] = experimenter + info["description"] = description + info["proj_id"] = proj_id + info["proj_name"] = proj_name + if meas_date is None: + meas_date = (info["meas_id"]["secs"], info["meas_id"]["usecs"]) + info["meas_date"] = _ensure_meas_date_none_or_dt(meas_date) + info["utc_offset"] = utc_offset + + info["sfreq"] = sfreq + info["highpass"] = highpass if highpass is not None else 0.0 + info["lowpass"] = lowpass if lowpass is not None else info["sfreq"] / 2.0 + info["line_freq"] = line_freq + info["gantry_angle"] = gantry_angle + + # Add the channel information and make a list of channel names + # for convenience + info["chs"] = chs + + # + # Add the coordinate transformations + # + info["dev_head_t"] = dev_head_t + info["ctf_head_t"] = ctf_head_t + info["dev_ctf_t"] = dev_ctf_t + if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None: + head_ctf_trans = np.linalg.inv(ctf_head_t["trans"]) + dev_ctf_trans = np.dot(head_ctf_trans, info["dev_head_t"]["trans"]) + info["dev_ctf_t"] = Transform("meg", "ctf_head", dev_ctf_trans) + + # All kinds of auxliary stuff + info["dig"] = _format_dig_points(dig) + info["bads"] = bads + info._update_redundant() + if clean_bads: + info["bads"] = [b for b in bads if b in info["ch_names"]] + info["projs"] = projs + info["comps"] = comps + info["acq_pars"] = acq_pars + info["acq_stim"] = acq_stim + info["custom_ref_applied"] = custom_ref_applied + info["xplotter_layout"] = xplotter_layout + info["kit_system_id"] = kit_system_id + info._check_consistency() + info._unlocked = False + return info, meas + + +def _read_extended_ch_info(chs, parent, fid): + ch_infos = dir_tree_find(parent, FIFF.FIFFB_CH_INFO) + if len(ch_infos) == 0: + return + _check_option("length of channel infos", len(ch_infos), [len(chs)]) + logger.info(" Reading extended channel information") + + # Here we assume that ``remap`` is in the same order as the channels + # themselves, which is hopefully safe enough. + ch_names_mapping = dict() + for new, ch in zip(ch_infos, chs): + for k in range(new["nent"]): + kind = new["directory"][k].kind + try: + key, cast = _CH_READ_MAP[kind] + except KeyError: + # This shouldn't happen if we're up to date with the FIFF + # spec + warn(f"Discarding extra channel information kind {kind}") + continue + assert key in ch + data = read_tag(fid, new["directory"][k].pos).data + if data is not None: + data = cast(data) + if key == "ch_name": + ch_names_mapping[ch[key]] = data + ch[key] = data + _update_ch_info_named(ch) + # we need to return ch_names_mapping so that we can also rename the + # bad channels + return ch_names_mapping + + +def _rename_comps(comps, ch_names_mapping): + if not (comps and ch_names_mapping): + return + for comp in comps: + data = comp["data"] + for key in ("row_names", "col_names"): + data[key][:] = _rename_list(data[key], ch_names_mapping) + + +def _ensure_meas_date_none_or_dt(meas_date): + if meas_date is None or np.array_equal(meas_date, DATE_NONE): + meas_date = None + elif not isinstance(meas_date, datetime.datetime): + meas_date = _stamp_to_dt(meas_date) + return meas_date + + +def _check_dates(info, prepend_error=""): + """Check dates before writing as fif files. + + It's needed because of the limited integer precision + of the fix standard. + """ + for key in ("file_id", "meas_id"): + value = info.get(key) + if value is not None: + assert "msecs" not in value + for key_2 in ("secs", "usecs"): + if ( + value[key_2] < np.iinfo(">i4").min + or value[key_2] > np.iinfo(">i4").max + ): + raise RuntimeError( + f"{prepend_error}info[{key}][{key_2}] must be between " + f'"{np.iinfo(">i4").min!r}" and "{np.iinfo(">i4").max!r}", got ' + f'"{value[key_2]!r}"' + ) + + meas_date = info.get("meas_date") + if meas_date is None: + return + + meas_date_stamp = _dt_to_stamp(meas_date) + if ( + meas_date_stamp[0] < np.iinfo(">i4").min + or meas_date_stamp[0] > np.iinfo(">i4").max + ): + raise RuntimeError( + f'{prepend_error}info["meas_date"] seconds must be between ' + f'"{(np.iinfo(">i4").min, 0)!r}" and "{(np.iinfo(">i4").max, 0)!r}", got ' + f'"{meas_date_stamp[0]!r}"' + ) + + +@fill_doc +def write_meas_info(fid, info, data_type=None, reset_range=True): + """Write measurement info into a file id (from a fif file). + + Parameters + ---------- + fid : file + Open file descriptor. + %(info_not_none)s + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for + raw data. + reset_range : bool + If True, info['chs'][k]['range'] will be set to unity. + + Notes + ----- + Tags are written in a particular order for compatibility with maxfilter. + """ + info._check_consistency() + _check_dates(info) + + # Measurement info + start_block(fid, FIFF.FIFFB_MEAS_INFO) + + # Add measurement id + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) + + for event in info["events"]: + start_block(fid, FIFF.FIFFB_EVENTS) + if event.get("channels") is not None: + write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event["channels"]) + if event.get("list") is not None: + write_int(fid, FIFF.FIFF_EVENT_LIST, event["list"]) + end_block(fid, FIFF.FIFFB_EVENTS) + + # HPI Result + for hpi_result in info["hpi_results"]: + start_block(fid, FIFF.FIFFB_HPI_RESULT) + write_dig_points(fid, hpi_result["dig_points"]) + if "order" in hpi_result: + write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER, hpi_result["order"]) + if "used" in hpi_result: + write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result["used"]) + if "moments" in hpi_result: + write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS, hpi_result["moments"]) + if "goodness" in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS, hpi_result["goodness"]) + if "good_limit" in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT, hpi_result["good_limit"]) + if "dist_limit" in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT, hpi_result["dist_limit"]) + if "accept" in hpi_result: + write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result["accept"]) + if "coord_trans" in hpi_result: + write_coord_trans(fid, hpi_result["coord_trans"]) + end_block(fid, FIFF.FIFFB_HPI_RESULT) + + # HPI Measurement + for hpi_meas in info["hpi_meas"]: + start_block(fid, FIFF.FIFFB_HPI_MEAS) + if hpi_meas.get("creator") is not None: + write_string(fid, FIFF.FIFF_CREATOR, hpi_meas["creator"]) + if hpi_meas.get("sfreq") is not None: + write_float(fid, FIFF.FIFF_SFREQ, hpi_meas["sfreq"]) + if hpi_meas.get("nchan") is not None: + write_int(fid, FIFF.FIFF_NCHAN, hpi_meas["nchan"]) + if hpi_meas.get("nave") is not None: + write_int(fid, FIFF.FIFF_NAVE, hpi_meas["nave"]) + if hpi_meas.get("ncoil") is not None: + write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas["ncoil"]) + if hpi_meas.get("first_samp") is not None: + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas["first_samp"]) + if hpi_meas.get("last_samp") is not None: + write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas["last_samp"]) + for hpi_coil in hpi_meas["hpi_coils"]: + start_block(fid, FIFF.FIFFB_HPI_COIL) + if hpi_coil.get("number") is not None: + write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil["number"]) + if hpi_coil.get("epoch") is not None: + write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil["epoch"]) + if hpi_coil.get("slopes") is not None: + write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil["slopes"]) + if hpi_coil.get("corr_coeff") is not None: + write_float(fid, FIFF.FIFF_HPI_CORR_COEFF, hpi_coil["corr_coeff"]) + if hpi_coil.get("coil_freq") is not None: + write_float(fid, FIFF.FIFF_HPI_COIL_FREQ, hpi_coil["coil_freq"]) + end_block(fid, FIFF.FIFFB_HPI_COIL) + end_block(fid, FIFF.FIFFB_HPI_MEAS) + + # Polhemus data + write_dig_points(fid, info["dig"], block=True) + + # megacq parameters + if info["acq_pars"] is not None or info["acq_stim"] is not None: + start_block(fid, FIFF.FIFFB_DACQ_PARS) + if info["acq_pars"] is not None: + write_string(fid, FIFF.FIFF_DACQ_PARS, info["acq_pars"]) + + if info["acq_stim"] is not None: + write_string(fid, FIFF.FIFF_DACQ_STIM, info["acq_stim"]) + + end_block(fid, FIFF.FIFFB_DACQ_PARS) + + # Coordinate transformations if the HPI result block was not there + if info["dev_head_t"] is not None: + write_coord_trans(fid, info["dev_head_t"]) + + if info["ctf_head_t"] is not None: + write_coord_trans(fid, info["ctf_head_t"]) + + if info["dev_ctf_t"] is not None: + write_coord_trans(fid, info["dev_ctf_t"]) + + # Projectors + ch_names_mapping = _make_ch_names_mapping(info["chs"]) + _write_proj(fid, info["projs"], ch_names_mapping=ch_names_mapping) + + # Bad channels + _write_bad_channels(fid, info["bads"], ch_names_mapping=ch_names_mapping) + + # General + if info.get("experimenter") is not None: + write_string(fid, FIFF.FIFF_EXPERIMENTER, info["experimenter"]) + if info.get("description") is not None: + write_string(fid, FIFF.FIFF_DESCRIPTION, info["description"]) + if info.get("proj_id") is not None: + write_int(fid, FIFF.FIFF_PROJ_ID, info["proj_id"]) + if info.get("proj_name") is not None: + write_string(fid, FIFF.FIFF_PROJ_NAME, info["proj_name"]) + if info.get("meas_date") is not None: + write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info["meas_date"])) + if info.get("utc_offset") is not None: + write_string(fid, FIFF.FIFF_UTC_OFFSET, info["utc_offset"]) + write_int(fid, FIFF.FIFF_NCHAN, info["nchan"]) + write_float(fid, FIFF.FIFF_SFREQ, info["sfreq"]) + if info["lowpass"] is not None: + write_float(fid, FIFF.FIFF_LOWPASS, info["lowpass"]) + if info["highpass"] is not None: + write_float(fid, FIFF.FIFF_HIGHPASS, info["highpass"]) + if info.get("line_freq") is not None: + write_float(fid, FIFF.FIFF_LINE_FREQ, info["line_freq"]) + if info.get("gantry_angle") is not None: + write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info["gantry_angle"]) + if data_type is not None: + write_int(fid, FIFF.FIFF_DATA_PACK, data_type) + if info.get("custom_ref_applied"): + write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info["custom_ref_applied"]) + if info.get("xplotter_layout"): + write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info["xplotter_layout"]) + + # Channel information + _write_ch_infos(fid, info["chs"], reset_range, ch_names_mapping) + + # Subject information + if info.get("subject_info") is not None: + start_block(fid, FIFF.FIFFB_SUBJECT) + si = info["subject_info"] + if si.get("id") is not None: + write_int(fid, FIFF.FIFF_SUBJ_ID, si["id"]) + if si.get("his_id") is not None: + write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si["his_id"]) + if si.get("last_name") is not None: + write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si["last_name"]) + if si.get("first_name") is not None: + write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si["first_name"]) + if si.get("middle_name") is not None: + write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si["middle_name"]) + if si.get("birthday") is not None: + write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si["birthday"]) + if si.get("sex") is not None: + write_int(fid, FIFF.FIFF_SUBJ_SEX, si["sex"]) + if si.get("hand") is not None: + write_int(fid, FIFF.FIFF_SUBJ_HAND, si["hand"]) + if si.get("weight") is not None: + write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si["weight"]) + if si.get("height") is not None: + write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si["height"]) + end_block(fid, FIFF.FIFFB_SUBJECT) + del si + + if info.get("device_info") is not None: + start_block(fid, FIFF.FIFFB_DEVICE) + di = info["device_info"] + if di.get("type") is not None: + write_string(fid, FIFF.FIFF_DEVICE_TYPE, di["type"]) + for key in ("model", "serial", "site"): + if di.get(key) is not None: + write_string(fid, getattr(FIFF, "FIFF_DEVICE_" + key.upper()), di[key]) + end_block(fid, FIFF.FIFFB_DEVICE) + del di + + if info.get("helium_info") is not None: + start_block(fid, FIFF.FIFFB_HELIUM) + hi = info["helium_info"] + if hi.get("he_level_raw") is not None: + write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi["he_level_raw"]) + if hi.get("helium_level") is not None: + write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi["helium_level"]) + if hi.get("orig_file_guid") is not None: + write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi["orig_file_guid"]) + if hi.get("meas_date", None) is not None: + write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(hi["meas_date"])) + end_block(fid, FIFF.FIFFB_HELIUM) + del hi + + if info.get("hpi_subsystem") is not None: + hs = info["hpi_subsystem"] + start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) + if hs.get("ncoil") is not None: + write_int(fid, FIFF.FIFF_HPI_NCOIL, hs["ncoil"]) + if hs.get("event_channel") is not None: + write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs["event_channel"]) + if hs.get("hpi_coils") is not None: + for coil in hs["hpi_coils"]: + start_block(fid, FIFF.FIFFB_HPI_COIL) + if coil.get("event_bits") is not None: + write_int(fid, FIFF.FIFF_EVENT_BITS, coil["event_bits"]) + end_block(fid, FIFF.FIFFB_HPI_COIL) + end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) + del hs + + # CTF compensation info + comps = info["comps"] + if ch_names_mapping: + comps = deepcopy(comps) + _rename_comps(comps, ch_names_mapping) + write_ctf_comp(fid, comps) + + # KIT system ID + if info.get("kit_system_id") is not None: + write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info["kit_system_id"]) + + end_block(fid, FIFF.FIFFB_MEAS_INFO) + + # Processing history + _write_proc_history(fid, info) + + +@verbose +def write_info( + fname, info, *, data_type=None, reset_range=True, overwrite=False, verbose=None +): + """Write measurement info in fif file. + + Parameters + ---------- + fname : path-like + The name of the file. Should end by ``-info.fif``. + %(info_not_none)s + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for + raw data. + reset_range : bool + If True, info['chs'][k]['range'] will be set to unity. + %(overwrite)s + %(verbose)s + """ + with start_and_end_file(fname, overwrite=overwrite) as fid: + start_block(fid, FIFF.FIFFB_MEAS) + write_meas_info(fid, info, data_type, reset_range) + end_block(fid, FIFF.FIFFB_MEAS) + + +@verbose +def _merge_info_values(infos, key, verbose=None): + """Merge things together. + + Fork for {'dict', 'list', 'array', 'other'} + and consider cases where one or all are of the same type. + + Does special things for "projs", "bads", and "meas_date". + """ + values = [d[key] for d in infos] + msg = ( + f"Don't know how to merge '{key}'. Make sure values are compatible, got types:" + f"\n {[type(v) for v in values]}" + ) + + def _flatten(lists): + return [item for sublist in lists for item in sublist] + + def _check_isinstance(values, kind, func): + return func([isinstance(v, kind) for v in values]) + + def _where_isinstance(values, kind): + """Get indices of instances.""" + return np.where([isinstance(v, type) for v in values])[0] + + # list + if _check_isinstance(values, list, all): + lists = (d[key] for d in infos) + if key == "projs": + return _uniquify_projs(_flatten(lists)) + elif key == "bads": + return sorted(set(_flatten(lists))) + else: + return _flatten(lists) + elif _check_isinstance(values, list, any): + idx = _where_isinstance(values, list) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + lists = (d[key] for d in infos if isinstance(d[key], list)) + return _flatten(lists) + # dict + elif _check_isinstance(values, dict, all): + is_qual = all(object_diff(values[0], v) == "" for v in values[1:]) + if is_qual: + return values[0] + else: + RuntimeError(msg) + elif _check_isinstance(values, dict, any): + idx = _where_isinstance(values, dict) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + raise RuntimeError(msg) + # ndarray + elif _check_isinstance(values, np.ndarray, all) or _check_isinstance( + values, tuple, all + ): + is_qual = all(np.array_equal(values[0], x) for x in values[1:]) + if is_qual: + return values[0] + elif key == "meas_date": + logger.info(f"Found multiple entries for {key}. Setting value to `None`") + return None + else: + raise RuntimeError(msg) + elif _check_isinstance(values, (np.ndarray, tuple), any): + idx = _where_isinstance(values, np.ndarray) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + raise RuntimeError(msg) + # proj_id + elif _check_isinstance(values, (int, type(None)), all) and key == "proj_id": + unique_values = set(values) + if len(unique_values) != 1: + logger.info("Found multiple proj_ids, using the first one.") + return list(unique_values)[0] + + elif key == "experimenter" or key == "proj_name": + if _check_isinstance(values, (str, type(None)), all): + unique_values = set(values) + unique_values.discard(None) + if len(unique_values) == 1: + return list(unique_values)[0] + else: + return None + # other + else: + unique_values = set(values) + if len(unique_values) == 1: + return list(values)[0] + elif isinstance(list(unique_values)[0], BytesIO): + logger.info("Found multiple StringIO instances. Setting value to `None`") + return None + elif isinstance(list(unique_values)[0], str): + logger.info(f"Found multiple {key}. Setting value to `None`") + return None + else: + raise RuntimeError(msg) + + +@verbose +def _merge_info(infos, force_update_to_first=False, verbose=None): + """Merge multiple measurement info dictionaries. + + - Fields that are present in only one info object will be used in the + merged info. + - Fields that are present in multiple info objects and are the same + will be used in the merged info. + - Fields that are present in multiple info objects and are different + will result in a None value in the merged info. + - Channels will be concatenated. If multiple info objects contain + channels with the same name, an exception is raised. + + Parameters + ---------- + infos | list of instance of Info + Info objects to merge into one info object. + force_update_to_first : bool + If True, force the fields for objects in `info` will be updated + to match those in the first item. Use at your own risk, as this + may overwrite important metadata. + %(verbose)s + + Returns + ------- + info : instance of Info + The merged info object. + """ + for info in infos: + info._check_consistency() + if force_update_to_first is True: + infos = deepcopy(infos) + _force_update_info(infos[0], infos[1:]) + info = Info() + info._unlocked = True + info["chs"] = [] + for this_info in infos: + info["chs"].extend(this_info["chs"]) + info._update_redundant() + duplicates = {ch for ch in info["ch_names"] if info["ch_names"].count(ch) > 1} + if len(duplicates) > 0: + msg = ( + "The following channels are present in more than one input " + f"measurement info objects: {list(duplicates)}" + ) + raise ValueError(msg) + + transforms = ["ctf_head_t", "dev_head_t", "dev_ctf_t"] + for trans_name in transforms: + trans = [i[trans_name] for i in infos if i[trans_name]] + if len(trans) == 0: + info[trans_name] = None + elif len(trans) == 1: + info[trans_name] = trans[0] + elif all( + np.all(trans[0]["trans"] == x["trans"]) + and trans[0]["from"] == x["from"] + and trans[0]["to"] == x["to"] + for x in trans[1:] + ): + info[trans_name] = trans[0] + else: + msg = f"Measurement infos provide mutually inconsistent {trans_name}" + raise ValueError(msg) + + # KIT system-IDs + kit_sys_ids = [i["kit_system_id"] for i in infos if i["kit_system_id"]] + if len(kit_sys_ids) == 0: + info["kit_system_id"] = None + elif len(set(kit_sys_ids)) == 1: + info["kit_system_id"] = kit_sys_ids[0] + else: + raise ValueError("Trying to merge channels from different KIT systems") + + # hpi infos and digitization data: + fields = ["hpi_results", "hpi_meas", "dig"] + for k in fields: + values = [i[k] for i in infos if i[k]] + if len(values) == 0: + info[k] = [] + elif len(values) == 1: + info[k] = values[0] + elif all(object_diff(values[0], v) == "" for v in values[1:]): + info[k] = values[0] + else: + msg = f"Measurement infos are inconsistent for {k}" + raise ValueError(msg) + + # other fields + other_fields = [ + "acq_pars", + "acq_stim", + "bads", + "comps", + "custom_ref_applied", + "description", + "experimenter", + "file_id", + "highpass", + "utc_offset", + "hpi_subsystem", + "events", + "device_info", + "helium_info", + "line_freq", + "lowpass", + "meas_id", + "proj_id", + "proj_name", + "projs", + "sfreq", + "gantry_angle", + "subject_info", + "sfreq", + "xplotter_layout", + "proc_history", + ] + + for k in other_fields: + info[k] = _merge_info_values(infos, k) + + info["meas_date"] = infos[0]["meas_date"] + info._unlocked = False + + return info + + +@verbose +def create_info(ch_names, sfreq, ch_types="misc", verbose=None): + """Create a basic Info instance suitable for use with create_raw. + + Parameters + ---------- + ch_names : list of str | int + Channel names. If an int, a list of channel names will be created + from ``range(ch_names)``. + sfreq : float + Sample rate of the data. + ch_types : list of str | str + Channel types, default is ``'misc'`` which is a + :term:`non-data channel `. + Currently supported fields are 'bio', 'chpi', 'csd', 'dbs', 'dipole', + 'ecg', 'ecog', 'eeg', 'emg', 'eog', 'exci', 'eyegaze', + 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', + 'fnirs_od', 'gof', 'gsr', 'hbo', 'hbr', 'ias', 'misc', 'pupil', + 'ref_meg', 'resp', 'seeg', 'stim', 'syst', 'temperature' (see also + :term:`sensor types`). + If str, then all channels are assumed to be of the same type. + %(verbose)s + + Returns + ------- + %(info_not_none)s + + Notes + ----- + The info dictionary will be sparsely populated to enable functionality + within the rest of the package. Advanced functionality such as source + localization can only be obtained through substantial, proper + modifications of the info structure (not recommended). + + Note that the MEG device-to-head transform ``info['dev_head_t']`` will + be initialized to the identity transform. + + Proper units of measure: + + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog, resp, fnirs_fd_ac_amplitude, + fnirs_cw_amplitude, fnirs_od + * T: mag, chpi, ref_meg + * T/m: grad + * M: hbo, hbr + * rad: fnirs_fd_phase + * Am: dipole + * S: gsr + * C: temperature + * V/m²: csd + * GOF: gof + * AU: misc, stim, eyegaze, pupil + """ + try: + ch_names = operator.index(ch_names) # int-like + except TypeError: + pass + else: + ch_names = list(np.arange(ch_names).astype(str)) + _validate_type(ch_names, (list, tuple), "ch_names", ("list, tuple, or int")) + sfreq = float(sfreq) + if sfreq <= 0: + raise ValueError("sfreq must be positive") + nchan = len(ch_names) + if isinstance(ch_types, str): + ch_types = [ch_types] * nchan + ch_types = np.atleast_1d(np.array(ch_types, np.str_)) + if ch_types.ndim != 1 or len(ch_types) != nchan: + raise ValueError( + f"ch_types and ch_names must be the same length ({len(ch_types)} != " + f"{nchan}) for ch_types={ch_types}" + ) + info = _empty_info(sfreq) + ch_types_dict = get_channel_type_constants(include_defaults=True) + for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)): + _validate_type(ch_name, "str", "each entry in ch_names") + _validate_type(ch_type, "str", "each entry in ch_types") + if ch_type not in ch_types_dict: + raise KeyError(f"kind must be one of {list(ch_types_dict)}, not {ch_type}") + this_ch_dict = ch_types_dict[ch_type] + kind = this_ch_dict["kind"] + # handle chpi, where kind is a *list* of FIFF constants: + kind = kind[0] if isinstance(kind, list | tuple) else kind + # mirror what tag.py does here + coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN) + coil_type = this_ch_dict.get("coil_type", FIFF.FIFFV_COIL_NONE) + unit = this_ch_dict.get("unit", FIFF.FIFF_UNIT_NONE) + chan_info = dict( + loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, + range=1.0, + cal=1.0, + kind=kind, + coil_type=coil_type, + unit=unit, + coord_frame=coord_frame, + ch_name=str(ch_name), + scanno=ci + 1, + logno=ci + 1, + ) + info["chs"].append(chan_info) + + info._update_redundant() + info._check_consistency() + info._unlocked = False + return info + + +RAW_INFO_FIELDS = ( + "acq_pars", + "acq_stim", + "bads", + "ch_names", + "chs", + "comps", + "ctf_head_t", + "custom_ref_applied", + "description", + "dev_ctf_t", + "dev_head_t", + "dig", + "experimenter", + "events", + "utc_offset", + "device_info", + "file_id", + "highpass", + "hpi_meas", + "hpi_results", + "helium_info", + "hpi_subsystem", + "kit_system_id", + "line_freq", + "lowpass", + "meas_date", + "meas_id", + "nchan", + "proj_id", + "proj_name", + "projs", + "sfreq", + "subject_info", + "xplotter_layout", + "proc_history", + "gantry_angle", +) + + +def _empty_info(sfreq): + """Create an empty info dictionary.""" + _none_keys = ( + "acq_pars", + "acq_stim", + "ctf_head_t", + "description", + "dev_head_t", + "dev_ctf_t", + "dig", + "experimenter", + "utc_offset", + "device_info", + "file_id", + "highpass", + "hpi_subsystem", + "kit_system_id", + "helium_info", + "line_freq", + "lowpass", + "meas_date", + "meas_id", + "proj_id", + "proj_name", + "subject_info", + "xplotter_layout", + "gantry_angle", + ) + _list_keys = ( + "bads", + "chs", + "comps", + "events", + "hpi_meas", + "hpi_results", + "projs", + "proc_history", + ) + info = Info() + info._unlocked = True + for k in _none_keys: + info[k] = None + for k in _list_keys: + info[k] = list() + info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + info["highpass"] = 0.0 + info["sfreq"] = float(sfreq) + info["lowpass"] = info["sfreq"] / 2.0 + info._update_redundant() + info._check_consistency() + return info + + +def _force_update_info(info_base, info_target): + """Update target info objects with values from info base. + + Note that values in info_target will be overwritten by those in info_base. + This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'. + + Parameters + ---------- + info_base : mne.Info + The Info object you want to use for overwriting values + in target Info objects. + info_target : mne.Info | list of mne.Info + The Info object(s) you wish to overwrite using info_base. These objects + will be modified in-place. + """ + exclude_keys = ["chs", "ch_names", "nchan", "bads"] + info_target = np.atleast_1d(info_target).ravel() + all_infos = np.hstack([info_base, info_target]) + for ii in all_infos: + if not isinstance(ii, Info): + raise ValueError(f"Inputs must be of type Info. Found type {type(ii)}") + for key, val in info_base.items(): + if key in exclude_keys: + continue + for i_targ in info_target: + with i_targ._unlock(): + i_targ[key] = val + + +def _add_timedelta_to_stamp(meas_date_stamp, delta_t): + """Add a timedelta to a meas_date tuple.""" + if meas_date_stamp is not None: + meas_date_stamp = _dt_to_stamp(_stamp_to_dt(meas_date_stamp) + delta_t) + return meas_date_stamp + + +@verbose +def anonymize_info(info, daysback=None, keep_his=False, verbose=None): + """Anonymize measurement information in place. + + .. warning:: If ``info`` is part of an object like + :class:`raw.info `, you should directly use + the method :meth:`raw.anonymize() ` + to ensure that all parts of the data are anonymized and + stay synchronized (e.g., + :class:`raw.annotations `). + + Parameters + ---------- + %(info_not_none)s + %(daysback_anonymize_info)s + %(keep_his_anonymize_info)s + %(verbose)s + + Returns + ------- + info : instance of Info + The anonymized measurement information. + + Notes + ----- + %(anonymize_info_notes)s + """ + _validate_type(info, "info", "self") + + default_anon_dos = datetime.datetime( + 2000, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc + ) + default_str = "mne_anonymize" + default_subject_id = 0 + default_sex = 0 + default_desc = "Anonymized using a time shift to preserve age at acquisition" + + none_meas_date = info["meas_date"] is None + + if none_meas_date: + if daysback is not None: + warn( + 'Input info has "meas_date" set to None. ' + "Removing all information from time/date structures, " + "*NOT* performing any time shifts!" + ) + else: + # compute timeshift delta + if daysback is None: + delta_t = info["meas_date"] - default_anon_dos + else: + delta_t = datetime.timedelta(days=daysback) + with info._unlock(): + info["meas_date"] = info["meas_date"] - delta_t + + # file_id and meas_id + for key in ("file_id", "meas_id"): + value = info.get(key) + if value is not None: + assert "msecs" not in value + if none_meas_date or ((value["secs"], value["usecs"]) == DATE_NONE): + # Don't try to shift backwards in time when no measurement + # date is available or when file_id is already a place holder + tmp = DATE_NONE + else: + tmp = _add_timedelta_to_stamp((value["secs"], value["usecs"]), -delta_t) + value["secs"] = tmp[0] + value["usecs"] = tmp[1] + # The following copy is needed for a test CTF dataset + # otherwise value['machid'][:] = 0 would suffice + _tmp = value["machid"].copy() + _tmp[:] = 0 + value["machid"] = _tmp + + # subject info + subject_info = info.get("subject_info") + if subject_info is not None: + if subject_info.get("id") is not None: + subject_info["id"] = default_subject_id + if keep_his: + logger.info( + "Not fully anonymizing info - keeping his_id, sex, and hand info" + ) + else: + if subject_info.get("his_id") is not None: + subject_info["his_id"] = str(default_subject_id) + if subject_info.get("sex") is not None: + subject_info["sex"] = default_sex + if subject_info.get("hand") is not None: + del subject_info["hand"] # there's no "unknown" setting + + for key in ("last_name", "first_name", "middle_name"): + if subject_info.get(key) is not None: + subject_info[key] = default_str + + # anonymize the subject birthday + if none_meas_date: + subject_info.pop("birthday", None) + elif subject_info.get("birthday") is not None: + subject_info["birthday"] = subject_info["birthday"] - delta_t + + for key in ("weight", "height"): + if subject_info.get(key) is not None: + subject_info[key] = 0 + + info["experimenter"] = default_str + info["description"] = default_desc + with info._unlock(): + if info["proj_id"] is not None: + info["proj_id"] = 0 + if info["proj_name"] is not None: + info["proj_name"] = default_str + if info["utc_offset"] is not None: + info["utc_offset"] = None + + proc_hist = info.get("proc_history") + if proc_hist is not None: + for record in proc_hist: + record["block_id"]["machid"][:] = 0 + record["experimenter"] = default_str + if none_meas_date: + record["block_id"]["secs"] = DATE_NONE[0] + record["block_id"]["usecs"] = DATE_NONE[1] + record["date"] = DATE_NONE + else: + this_t0 = (record["block_id"]["secs"], record["block_id"]["usecs"]) + this_t1 = _add_timedelta_to_stamp(this_t0, -delta_t) + record["block_id"]["secs"] = this_t1[0] + record["block_id"]["usecs"] = this_t1[1] + record["date"] = _add_timedelta_to_stamp(record["date"], -delta_t) + + hi = info.get("helium_info") + if hi is not None: + if hi.get("orig_file_guid") is not None: + hi["orig_file_guid"] = default_str + if none_meas_date and hi.get("meas_date") is not None: + hi["meas_date"] = _ensure_meas_date_none_or_dt(DATE_NONE) + elif hi.get("meas_date") is not None: + hi["meas_date"] = hi["meas_date"] - delta_t + + di = info.get("device_info") + if di is not None: + for k in ("serial", "site"): + if di.get(k) is not None: + di[k] = default_str + + err_mesg = ( + "anonymize_info generated an inconsistent info object. Underlying Error:\n" + ) + info._check_consistency(prepend_error=err_mesg) + err_mesg = ( + "anonymize_info generated an inconsistent info object. " + "daysback parameter was too large. " + "Underlying Error:\n" + ) + _check_dates(info, prepend_error=err_mesg) + + return info + + +@fill_doc +def _bad_chans_comp(info, ch_names): + """Check if channel names are consistent with current compensation status. + + Parameters + ---------- + %(info_not_none)s + + ch_names : list of str + The channel names to check. + + Returns + ------- + status : bool + True if compensation is *currently* in use but some compensation + channels are not included in picks + + False if compensation is *currently* not being used + or if compensation is being used and all compensation channels + in info and included in picks. + + missing_ch_names: array-like of str, shape (n_missing,) + The names of compensation channels not included in picks. + Returns [] if no channels are missing. + + """ + if "comps" not in info: + # should this be thought of as a bug? + return False, [] + + # only include compensation channels that would affect selected channels + ch_names_s = set(ch_names) + comp_names = [] + for comp in info["comps"]: + if len(ch_names_s.intersection(comp["data"]["row_names"])) > 0: + comp_names.extend(comp["data"]["col_names"]) + comp_names = sorted(set(comp_names)) + + missing_ch_names = sorted(set(comp_names).difference(ch_names)) + + if get_current_comp(info) != 0 and len(missing_ch_names) > 0: + return True, missing_ch_names + + return False, missing_ch_names + + +_DIG_CAST = dict(kind=int, ident=int, r=lambda x: x, coord_frame=int) +# key -> const, cast, write +_CH_INFO_MAP = OrderedDict( + scanno=(FIFF.FIFF_CH_SCAN_NO, _int_item, write_int), + logno=(FIFF.FIFF_CH_LOGICAL_NO, _int_item, write_int), + kind=(FIFF.FIFF_CH_KIND, _int_item, write_int), + range=(FIFF.FIFF_CH_RANGE, _float_item, write_float), + cal=(FIFF.FIFF_CH_CAL, _float_item, write_float), + coil_type=(FIFF.FIFF_CH_COIL_TYPE, _int_item, write_int), + loc=(FIFF.FIFF_CH_LOC, lambda x: x, write_float), + unit=(FIFF.FIFF_CH_UNIT, _int_item, write_int), + unit_mul=(FIFF.FIFF_CH_UNIT_MUL, _int_item, write_int), + ch_name=(FIFF.FIFF_CH_DACQ_NAME, str, write_string), + coord_frame=(FIFF.FIFF_CH_COORD_FRAME, _int_item, write_int), +) +# key -> cast +_CH_CAST = OrderedDict((key, val[1]) for key, val in _CH_INFO_MAP.items()) +# const -> key, cast +_CH_READ_MAP = OrderedDict((val[0], (key, val[1])) for key, val in _CH_INFO_MAP.items()) + + +@contextlib.contextmanager +def _writing_info_hdf5(info): + # Make info writing faster by packing chs and dig into numpy arrays + orig_dig = info.get("dig", None) + orig_chs = info["chs"] + with info._unlock(): + try: + if orig_dig is not None and len(orig_dig) > 0: + info["dig"] = _dict_pack(info["dig"], _DIG_CAST) + info["chs"] = _dict_pack(info["chs"], _CH_CAST) + info["chs"]["ch_name"] = np.char.encode( + info["chs"]["ch_name"], encoding="utf8" + ) + yield + finally: + if orig_dig is not None: + info["dig"] = orig_dig + info["chs"] = orig_chs + + +def _dict_pack(obj, casts): + # pack a list of dict into dict of array + return {key: np.array([o[key] for o in obj]) for key in casts} + + +def _dict_unpack(obj, casts): + # unpack a dict of array into a list of dict + n = len(obj[list(casts)[0]]) + return [{key: cast(obj[key][ii]) for key, cast in casts.items()} for ii in range(n)] + + +def _make_ch_names_mapping(chs): + orig_ch_names = [c["ch_name"] for c in chs] + ch_names = orig_ch_names.copy() + _unique_channel_names(ch_names, max_length=15, verbose="error") + ch_names_mapping = dict() + if orig_ch_names != ch_names: + ch_names_mapping.update(zip(orig_ch_names, ch_names)) + return ch_names_mapping + + +def _write_ch_infos(fid, chs, reset_range, ch_names_mapping): + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + for k, c in enumerate(chs): + # Scan numbers may have been messed up + c = c.copy() + c["ch_name"] = ch_names_mapping.get(c["ch_name"], c["ch_name"]) + assert len(c["ch_name"]) <= 15 + c["scanno"] = k + 1 + # for float/double, the "range" param is unnecessary + if reset_range: + c["range"] = 1.0 + write_ch_info(fid, c) + # only write new-style channel information if necessary + if len(ch_names_mapping): + logger.info( + " Writing channel names to FIF truncated to 15 characters with remapping" + ) + for ch in chs: + start_block(fid, FIFF.FIFFB_CH_INFO) + assert set(ch) == set(_CH_INFO_MAP) + for key, (const, _, write) in _CH_INFO_MAP.items(): + write(fid, const, ch[key]) + end_block(fid, FIFF.FIFFB_CH_INFO) + + +def _ensure_infos_match(info1, info2, name, *, on_mismatch="raise"): + """Check if infos match. + + Parameters + ---------- + info1, info2 : instance of Info + The infos to compare. + name : str + The name of the object appearing in the error message of the comparison + fails. + on_mismatch : 'raise' | 'warn' | 'ignore' + What to do in case of a mismatch of ``dev_head_t`` between ``info1`` + and ``info2``. + """ + _check_on_missing(on_missing=on_mismatch, name="on_mismatch") + + info1._check_consistency() + info2._check_consistency() + + if info1["nchan"] != info2["nchan"]: + raise ValueError(f"{name}.info['nchan'] must match") + if set(info1["bads"]) != set(info2["bads"]): + raise ValueError(f"{name}.info['bads'] must match") + if info1["sfreq"] != info2["sfreq"]: + raise ValueError(f"{name}.info['sfreq'] must match") + if set(info1["ch_names"]) != set(info2["ch_names"]): + raise ValueError(f"{name}.info['ch_names'] must match") + if info1["ch_names"] != info2["ch_names"]: + msg = ( + f"{name}.info['ch_names']: Channel order must match. Use " + '"mne.match_channel_orders()" to sort channels.' + ) + raise ValueError(msg) + if len(info2["projs"]) != len(info1["projs"]): + raise ValueError(f"SSP projectors in {name} must be the same") + if any(not _proj_equal(p1, p2) for p1, p2 in zip(info2["projs"], info1["projs"])): + raise ValueError(f"SSP projectors in {name} must be the same") + if (info1["dev_head_t"] is None) ^ (info2["dev_head_t"] is None) or ( + info1["dev_head_t"] is not None + and not np.allclose( + info1["dev_head_t"]["trans"], + info2["dev_head_t"]["trans"], + rtol=1e-6, + equal_nan=True, + ) + ): + msg = ( + f"{name}.info['dev_head_t'] differs. The " + f"instances probably come from different runs, and " + f"are therefore associated with different head " + f"positions. Manually change info['dev_head_t'] to " + f"avoid this message but beware that this means the " + f"MEG sensors will not be properly spatially aligned. " + f"See mne.preprocessing.maxwell_filter to realign the " + f"runs to a common head position." + ) + _on_missing(on_missing=on_mismatch, msg=msg, name="on_mismatch") + + +def _get_fnirs_ch_pos(info): + """Return positions of each fNIRS optode. + + fNIRS uses two types of optodes, sources and detectors. + There can be multiple connections between each source + and detector at different wavelengths. This function + returns the location of each source and detector. + """ + from ..preprocessing.nirs import _fnirs_optode_names, _optode_position + + srcs, dets = _fnirs_optode_names(info) + ch_pos = {} + for optode in [*srcs, *dets]: + ch_pos[optode] = _optode_position(info, optode) + return ch_pos + + +def _camel_to_snake(s): + return re.sub(r"(? 0: + dir_tag = read_tag(fid, dirpos) + if dir_tag is None or dir_tag.data is None: + fid.seek(0, 2) # move to end of file + size = fid.tell() + extra = "" if size > dirpos else f" > file size {size}" + warn( + "FIF tag directory missing at the end of the file " + f"(at byte {dirpos}{extra}), possibly corrupted file: {fname}" + ) + else: + directory = dir_tag.data + read_slow = False + if read_slow: + pos = 0 + fid.seek(pos, 0) + directory = list() + while pos is not None: + tag = _read_tag_header(fid, pos) + if tag is None: + break # HACK : to fix file ending with empty tag... + pos = tag.next_pos + directory.append(tag) + + tree, _ = make_dir_tree(fid, directory, indent=1) + + logger.debug("[done]") + + # Back to the beginning + fid.seek(0) + + return fid, tree, directory + + +@verbose +def show_fiff( + fname, + indent=" ", + read_limit=np.inf, + max_str=30, + output=str, + tag=None, + *, + show_bytes=False, + verbose=None, +): + """Show FIFF information. + + This function is similar to mne_show_fiff. + + Parameters + ---------- + fname : path-like + Filename to evaluate. + indent : str + How to indent the lines. + read_limit : int + Max number of bytes of data to read from a tag. Can be np.inf + to always read all data (helps test read completion). + max_str : int + Max number of characters of string representation to print for + each tag's data. + output : type + Either str or list. str is a convenience output for printing. + tag : int | None + Provide information about this tag. If None (default), all information + is shown. + show_bytes : bool + If True (default False), print the byte offsets of each tag. + %(verbose)s + + Returns + ------- + contents : str + The contents of the file. + """ + if output not in [list, str]: + raise ValueError("output must be list or str") + if isinstance(tag, str): # command mne show_fiff passes string + tag = int(tag) + fname = _check_fname(fname, "read", True) + f, tree, _ = fiff_open(fname) + # This gets set to 0 (unknown) by fiff_open, but FIFFB_ROOT probably + # makes more sense for display + tree["block"] = FIFF.FIFFB_ROOT + with f as fid: + out = _show_tree( + fid, + tree, + indent=indent, + level=0, + read_limit=read_limit, + max_str=max_str, + tag_id=tag, + show_bytes=show_bytes, + ) + if output is str: + out = "\n".join(out) + return out + + +def _find_type(value, fmts=("FIFF_",), exclude=("FIFF_UNIT",)): + """Find matching values.""" + value = int(value) + vals = [ + k + for k, v in FIFF.items() + if v == value + and any(fmt in k for fmt in fmts) + and not any(exc in k for exc in exclude) + ] + if len(vals) == 0: + vals = ["???"] + return vals + + +def _show_tree( + fid, + tree, + indent, + level, + read_limit, + max_str, + tag_id, + *, + show_bytes=False, +): + """Show FIFF tree.""" + this_idt = indent * level + next_idt = indent * (level + 1) + # print block-level information + found_types = "/".join(_find_type(tree["block"], fmts=["FIFFB_"])) + out = [f"{this_idt}{str(int(tree['block'])).ljust(4)} = {found_types}"] + tag_found = False + if tag_id is None or out[0].strip().startswith(str(tag_id)): + tag_found = True + + if tree["directory"] is not None: + kinds = [ent.kind for ent in tree["directory"]] + [-1] + types = [ent.type for ent in tree["directory"]] + sizes = [ent.size for ent in tree["directory"]] + poss = [ent.pos for ent in tree["directory"]] + counter = 0 + good = True + for k, kn, size, pos, type_ in zip(kinds[:-1], kinds[1:], sizes, poss, types): + if not tag_found and k != tag_id: + continue + tag = Tag(kind=k, type=type_, size=size, next=FIFF.FIFFV_NEXT_NONE, pos=pos) + if read_limit is None or size <= read_limit: + try: + tag = read_tag(fid, pos) + except Exception: + good = False + + if kn == k: + # don't print if the next item is the same type (count 'em) + counter += 1 + else: + if show_bytes: + at = f" @{pos}" + else: + at = "" + # find the tag type + this_type = _find_type(k, fmts=["FIFF_"]) + # prepend a count if necessary + prepend = "x" + str(counter + 1) + ": " if counter > 0 else "" + postpend = "" + # print tag data nicely + if tag.data is not None: + postpend = " = " + str(tag.data)[:max_str] + if isinstance(tag.data, np.ndarray): + if tag.data.size > 1: + postpend += " ... array size=" + str(tag.data.size) + elif isinstance(tag.data, dict): + postpend += " ... dict len=" + str(len(tag.data)) + elif isinstance(tag.data, str): + postpend += " ... str len=" + str(len(tag.data)) + elif isinstance(tag.data, list | tuple): + postpend += " ... list len=" + str(len(tag.data)) + elif issparse(tag.data): + postpend += ( + f" ... sparse ({tag.data.__class__.__name__}) shape=" + f"{tag.data.shape}" + ) + else: + postpend += " ... type=" + str(type(tag.data)) + postpend = ">" * 20 + f"BAD @{pos}" if not good else postpend + matrix_info = _matrix_info(tag) + if matrix_info is not None: + _, type_, _, _ = matrix_info + type_ = _call_dict_names.get(type_, f"?{type_}?") + this_type = "/".join(this_type) + out += [ + f"{next_idt}{prepend}{str(k).ljust(4)} = " + f"{this_type}{at} ({size}b {type_}) {postpend}" + ] + out[-1] = out[-1].replace("\n", "¶") + counter = 0 + good = True + if tag_id in kinds: + tag_found = True + if not tag_found: + out = [""] + level = -1 # removes extra indent + # deal with children + for branch in tree["children"]: + out += _show_tree( + fid, + branch, + indent, + level + 1, + read_limit, + max_str, + tag_id, + show_bytes=show_bytes, + ) + return out diff --git a/mne/_fiff/pick.py b/mne/_fiff/pick.py new file mode 100644 index 00000000000..3ffd6792d84 --- /dev/null +++ b/mne/_fiff/pick.py @@ -0,0 +1,1418 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re +from copy import deepcopy + +import numpy as np + +from ..utils import ( + _check_option, + _ensure_int, + _validate_type, + fill_doc, + logger, + verbose, +) +from .constants import FIFF + + +def get_channel_type_constants(include_defaults=False): + """Return all known channel types, and associated FIFF constants. + + Parameters + ---------- + include_defaults : bool + Whether to include default values for "unit" and "coil_type" for all + entries (see Notes). Defaults are generally based on values normally + present for a VectorView MEG system. Defaults to ``False``. + + Returns + ------- + channel_types : dict + The keys are channel type strings, and the values are dictionaries of + FIFF constants for "kind", and possibly "unit" and "coil_type". + + Notes + ----- + Values which might vary within a channel type across real data + recordings are excluded unless ``include_defaults=True``. For example, + "ref_meg" channels may have coil type + ``FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD``, ``FIFFV_COIL_VV_MAG_T3``, etc + (depending on the recording system), so no "coil_type" entry is given + for "ref_meg" unless ``include_defaults`` is requested. + """ + base = dict( + grad=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T_M), + mag=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T), + ref_meg=dict(kind=FIFF.FIFFV_REF_MEG_CH), + eeg=dict( + kind=FIFF.FIFFV_EEG_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG + ), + seeg=dict( + kind=FIFF.FIFFV_SEEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG, + ), + dbs=dict( + kind=FIFF.FIFFV_DBS_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG + ), + ecog=dict( + kind=FIFF.FIFFV_ECOG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG, + ), + eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V), + emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V), + ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V), + resp=dict(kind=FIFF.FIFFV_RESP_CH, unit=FIFF.FIFF_UNIT_V), + bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V), + misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V), + stim=dict(kind=FIFF.FIFFV_STIM_CH), + exci=dict(kind=FIFF.FIFFV_EXCI_CH), + syst=dict(kind=FIFF.FIFFV_SYST_CH), + ias=dict(kind=FIFF.FIFFV_IAS_CH), + gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), + dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), + chpi=dict( + kind=[ + FIFF.FIFFV_QUAT_0, + FIFF.FIFFV_QUAT_1, + FIFF.FIFFV_QUAT_2, + FIFF.FIFFV_QUAT_3, + FIFF.FIFFV_QUAT_4, + FIFF.FIFFV_QUAT_5, + FIFF.FIFFV_QUAT_6, + FIFF.FIFFV_HPI_G, + FIFF.FIFFV_HPI_ERR, + FIFF.FIFFV_HPI_MOV, + ] + ), + fnirs_cw_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE, + ), + fnirs_fd_ac_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, + ), + fnirs_fd_phase=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_RAD, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE, + ), + fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), + hbo=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, + coil_type=FIFF.FIFFV_COIL_FNIRS_HBO, + ), + hbr=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, + coil_type=FIFF.FIFFV_COIL_FNIRS_HBR, + ), + csd=dict( + kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V_M2, + coil_type=FIFF.FIFFV_COIL_EEG_CSD, + ), + temperature=dict(kind=FIFF.FIFFV_TEMPERATURE_CH, unit=FIFF.FIFF_UNIT_CEL), + gsr=dict(kind=FIFF.FIFFV_GALVANIC_CH, unit=FIFF.FIFF_UNIT_S), + eyegaze=dict( + kind=FIFF.FIFFV_EYETRACK_CH, coil_type=FIFF.FIFFV_COIL_EYETRACK_POS + ), + pupil=dict( + kind=FIFF.FIFFV_EYETRACK_CH, coil_type=FIFF.FIFFV_COIL_EYETRACK_PUPIL + ), + ) + if include_defaults: + coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE) + unit_none = dict(unit=FIFF.FIFF_UNIT_NONE) + defaults = dict( + grad=dict(coil_type=FIFF.FIFFV_COIL_VV_PLANAR_T1), + mag=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3), + ref_meg=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3, unit=FIFF.FIFF_UNIT_T), + misc=dict(**coil_none, **unit_none), # NB: overwrites UNIT_V + stim=dict(unit=FIFF.FIFF_UNIT_V, **coil_none), + eog=coil_none, + ecg=coil_none, + emg=coil_none, + bio=coil_none, + fnirs_od=unit_none, + pupil=unit_none, + eyegaze=dict(unit=FIFF.FIFF_UNIT_PX), + ) + for key, value in defaults.items(): + base[key].update(value) + return base + + +_first_rule = { + FIFF.FIFFV_MEG_CH: "meg", + FIFF.FIFFV_REF_MEG_CH: "ref_meg", + FIFF.FIFFV_EEG_CH: "eeg", + FIFF.FIFFV_STIM_CH: "stim", + FIFF.FIFFV_EOG_CH: "eog", + FIFF.FIFFV_EMG_CH: "emg", + FIFF.FIFFV_ECG_CH: "ecg", + FIFF.FIFFV_RESP_CH: "resp", + FIFF.FIFFV_MISC_CH: "misc", + FIFF.FIFFV_EXCI_CH: "exci", + FIFF.FIFFV_IAS_CH: "ias", + FIFF.FIFFV_SYST_CH: "syst", + FIFF.FIFFV_SEEG_CH: "seeg", + FIFF.FIFFV_DBS_CH: "dbs", + FIFF.FIFFV_BIO_CH: "bio", + FIFF.FIFFV_QUAT_0: "chpi", + FIFF.FIFFV_QUAT_1: "chpi", + FIFF.FIFFV_QUAT_2: "chpi", + FIFF.FIFFV_QUAT_3: "chpi", + FIFF.FIFFV_QUAT_4: "chpi", + FIFF.FIFFV_QUAT_5: "chpi", + FIFF.FIFFV_QUAT_6: "chpi", + FIFF.FIFFV_HPI_G: "chpi", + FIFF.FIFFV_HPI_ERR: "chpi", + FIFF.FIFFV_HPI_MOV: "chpi", + FIFF.FIFFV_DIPOLE_WAVE: "dipole", + FIFF.FIFFV_GOODNESS_FIT: "gof", + FIFF.FIFFV_ECOG_CH: "ecog", + FIFF.FIFFV_FNIRS_CH: "fnirs", + FIFF.FIFFV_TEMPERATURE_CH: "temperature", + FIFF.FIFFV_GALVANIC_CH: "gsr", + FIFF.FIFFV_EYETRACK_CH: "eyetrack", +} +# How to reduce our categories in channel_type (originally) +_second_rules = { + "meg": ("unit", {FIFF.FIFF_UNIT_T_M: "grad", FIFF.FIFF_UNIT_T: "mag"}), + "fnirs": ( + "coil_type", + { + FIFF.FIFFV_COIL_FNIRS_HBO: "hbo", + FIFF.FIFFV_COIL_FNIRS_HBR: "hbr", + FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: "fnirs_cw_amplitude", + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE: "fnirs_fd_ac_amplitude", + FIFF.FIFFV_COIL_FNIRS_FD_PHASE: "fnirs_fd_phase", + FIFF.FIFFV_COIL_FNIRS_OD: "fnirs_od", + }, + ), + "eeg": ( + "coil_type", + { + FIFF.FIFFV_COIL_EEG: "eeg", + FIFF.FIFFV_COIL_EEG_BIPOLAR: "eeg", + FIFF.FIFFV_COIL_NONE: "eeg", # MNE-C backward compat + FIFF.FIFFV_COIL_EEG_CSD: "csd", + }, + ), + "eyetrack": ( + "coil_type", + { + FIFF.FIFFV_COIL_EYETRACK_POS: "eyegaze", + FIFF.FIFFV_COIL_EYETRACK_PUPIL: "pupil", + }, + ), +} + + +@fill_doc +def channel_type(info, idx): + """Get channel type. + + Parameters + ---------- + %(info_not_none)s + idx : int + Index of channel. + + Returns + ------- + type : str + Type of channel. Will be one of:: + + {'bio', 'chpi', 'dbs', 'dipole', 'ecg', 'ecog', 'eeg', 'emg', + 'eog', 'exci', 'eyetrack', 'fnirs', 'gof', 'gsr', 'ias', 'misc', + 'meg', 'ref_meg', 'resp', 'seeg', 'stim', 'syst', 'temperature'} + """ + # This is faster than the original _channel_type_old now in test_pick.py + # because it uses (at most!) two dict lookups plus one conditional + # to get the channel type string. + ch = info["chs"][idx] + try: + first_kind = _first_rule[ch["kind"]] + except KeyError: + raise ValueError( + f'Unknown channel type ({ch["kind"]}) for channel "{ch["ch_name"]}"' + ) + if first_kind in _second_rules: + key, second_rule = _second_rules[first_kind] + first_kind = second_rule[ch[key]] + return first_kind + + +@verbose +def pick_channels(ch_names, include, exclude=(), ordered=True, *, verbose=None): + """Pick channels by names. + + Returns the indices of ``ch_names`` in ``include`` but not in ``exclude``. + + Parameters + ---------- + ch_names : list of str + List of channels. + include : list of str + List of channels to include (if empty include all available). + + .. note:: This is to be treated as a set. The order of this list + is not used or maintained in ``sel``. + + exclude : list of str + List of channels to exclude (if empty do not exclude any channel). + Defaults to []. + %(ordered)s + %(verbose)s + + Returns + ------- + sel : array of int + Indices of good channels. + + See Also + -------- + pick_channels_regexp, pick_types + """ + if len(np.unique(ch_names)) != len(ch_names): + raise RuntimeError("ch_names is not a unique list, picking is unsafe") + _validate_type(ordered, bool, "ordered") + _check_excludes_includes(include) + _check_excludes_includes(exclude) + if not isinstance(include, list): + include = list(include) + if len(include) == 0: + include = list(ch_names) + if not isinstance(exclude, list): + exclude = list(exclude) + sel, missing = list(), list() + for name in include: + if name in ch_names: + if name not in exclude: + sel.append(ch_names.index(name)) + else: + missing.append(name) + if len(missing) and ordered: + raise ValueError( + f"Missing channels from ch_names required by include:\n{missing}" + ) + if not ordered: + sel = np.unique(sel) + return np.array(sel, int) + + +def pick_channels_regexp(ch_names, regexp): + """Pick channels using regular expression. + + Returns the indices of the good channels in ch_names. + + Parameters + ---------- + ch_names : list of str + List of channels. + + regexp : str + The regular expression. See python standard module for regular + expressions. + + Returns + ------- + sel : array of int + Indices of good channels. + + See Also + -------- + pick_channels + + Examples + -------- + >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1') + [0] + >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *') + [0, 1, 2] + """ + r = re.compile(regexp) + return [k for k, name in enumerate(ch_names) if r.match(name)] + + +def _triage_meg_pick(ch, meg): + """Triage an MEG pick type.""" + if meg is True: + return True + elif ch["unit"] == FIFF.FIFF_UNIT_T_M: + if meg == "grad": + return True + elif meg == "planar1" and ch["ch_name"].endswith("2"): + return True + elif meg == "planar2" and ch["ch_name"].endswith("3"): + return True + elif meg == "mag" and ch["unit"] == FIFF.FIFF_UNIT_T: + return True + return False + + +def _triage_fnirs_pick(ch, fnirs, warned): + """Triage an fNIRS pick type.""" + if fnirs is True: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_HBO and "hbo" in fnirs: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_HBR and "hbr" in fnirs: + return True + elif ( + ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE + and "fnirs_cw_amplitude" in fnirs + ): + return True + elif ( + ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE + and "fnirs_fd_ac_amplitude" in fnirs + ): + return True + elif ( + ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and "fnirs_fd_phase" in fnirs + ): + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_OD and "fnirs_od" in fnirs: + return True + return False + + +def _triage_eyetrack_pick(ch, eyetrack): + """Triage an eyetrack pick type.""" + if eyetrack is False: + return False + elif eyetrack is True: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_EYETRACK_PUPIL and "pupil" in eyetrack: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_EYETRACK_POS and "eyegaze" in eyetrack: + return True + return False + + +def _check_meg_type(meg, allow_auto=False): + """Ensure a valid meg type.""" + if isinstance(meg, str): + allowed_types = ["grad", "mag", "planar1", "planar2"] + allowed_types += ["auto"] if allow_auto else [] + if meg not in allowed_types: + raise ValueError( + f"meg value must be one of {allowed_types} or bool, not {meg}" + ) + + +def _check_info_exclude(info, exclude): + _validate_type(info, "info") + info._check_consistency() + if exclude is None: + raise ValueError('exclude must be a list of strings or "bads"') + elif exclude == "bads": + exclude = info.get("bads", []) + elif not isinstance(exclude, list | tuple): + raise ValueError( + 'exclude must either be "bads" or a list of strings.' + " If only one channel is to be excluded, use " + "[ch_name] instead of passing ch_name." + ) + return exclude + + +@fill_doc +def pick_types( + info, + meg=False, + eeg=False, + stim=False, + eog=False, + ecg=False, + emg=False, + ref_meg="auto", + *, + misc=False, + resp=False, + chpi=False, + exci=False, + ias=False, + syst=False, + seeg=False, + dipole=False, + gof=False, + bio=False, + ecog=False, + fnirs=False, + csd=False, + dbs=False, + temperature=False, + gsr=False, + eyetrack=False, + include=(), + exclude="bads", + selection=None, +): + """Pick channels by type and names. + + Parameters + ---------- + %(info_not_none)s + %(pick_types_params)s + + Returns + ------- + sel : array of int + Indices of good channels. + """ + # NOTE: Changes to this function's signature should also be changed in + # PickChannelsMixin + _validate_type(meg, (bool, str), "meg") + + exclude = _check_info_exclude(info, exclude) + nchan = info["nchan"] + pick = np.zeros(nchan, dtype=bool) + + _check_meg_type(ref_meg, allow_auto=True) + _check_meg_type(meg) + if isinstance(ref_meg, str) and ref_meg == "auto": + ref_meg = ( + "comps" in info + and info["comps"] is not None + and len(info["comps"]) > 0 + and meg is not False + ) + + for param in ( + eeg, + stim, + eog, + ecg, + emg, + misc, + resp, + chpi, + exci, + ias, + syst, + seeg, + dipole, + gof, + bio, + ecog, + csd, + dbs, + temperature, + gsr, + ): + if not isinstance(param, bool): + w = ( + "Parameters for all channel types (with the exception of " + '"meg", "ref_meg", "fnirs", and "eyetrack") must be of type ' + "bool, not {}." + ) + raise ValueError(w.format(type(param))) + + param_dict = dict( + eeg=eeg, + stim=stim, + eog=eog, + ecg=ecg, + emg=emg, + misc=misc, + resp=resp, + chpi=chpi, + exci=exci, + ias=ias, + syst=syst, + seeg=seeg, + dbs=dbs, + dipole=dipole, + gof=gof, + bio=bio, + ecog=ecog, + csd=csd, + temperature=temperature, + gsr=gsr, + eyetrack=eyetrack, + ) + + # avoid triage if possible + if isinstance(meg, bool): + for key in ("grad", "mag"): + param_dict[key] = meg + if isinstance(fnirs, bool): + for key in _FNIRS_CH_TYPES_SPLIT: + param_dict[key] = fnirs + warned = [False] + for k in range(nchan): + ch_type = channel_type(info, k) + try: + pick[k] = param_dict[ch_type] + except KeyError: # not so simple + assert ( + ch_type + in ("grad", "mag", "ref_meg") + + _FNIRS_CH_TYPES_SPLIT + + _EYETRACK_CH_TYPES_SPLIT + ) + if ch_type in ("grad", "mag"): + pick[k] = _triage_meg_pick(info["chs"][k], meg) + elif ch_type == "ref_meg": + pick[k] = _triage_meg_pick(info["chs"][k], ref_meg) + elif ch_type in ("eyegaze", "pupil"): + pick[k] = _triage_eyetrack_pick(info["chs"][k], eyetrack) + else: # ch_type in ('hbo', 'hbr') + pick[k] = _triage_fnirs_pick(info["chs"][k], fnirs, warned) + + # restrict channels to selection if provided + if selection is not None: + # the selection only restricts these types of channels + sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH, FIFF.FIFFV_EEG_CH] + for k in np.where(pick)[0]: + if ( + info["chs"][k]["kind"] in sel_kind + and info["ch_names"][k] not in selection + ): + pick[k] = False + + myinclude = [info["ch_names"][k] for k in range(nchan) if pick[k]] + myinclude += include + + if len(myinclude) == 0: + sel = np.array([], int) + else: + sel = pick_channels(info["ch_names"], myinclude, exclude, ordered=False) + + return sel + + +@verbose +def pick_info(info, sel=(), copy=True, verbose=None): + """Restrict an info structure to a selection of channels. + + Parameters + ---------- + %(info_not_none)s + sel : list of int | None + Indices of channels to include. If None, all channels + are included. + copy : bool + If copy is False, info is modified inplace. + %(verbose)s + + Returns + ------- + res : dict + Info structure restricted to a selection of channels. + """ + # avoid circular imports + from .meas_info import _bad_chans_comp + + info._check_consistency() + info = info.copy() if copy else info + if sel is None: + return info + elif len(sel) == 0: + raise ValueError("No channels match the selection.") + ch_set = set(info["ch_names"][k] for k in sel) + n_unique = len(ch_set) + if n_unique != len(sel): + raise ValueError( + f"Found {n_unique} / {len(sel)} unique names, sel is not unique" + ) + + # make sure required the compensation channels are present + if len(info.get("comps", [])) > 0: + ch_names = [info["ch_names"][idx] for idx in sel] + _, comps_missing = _bad_chans_comp(info, ch_names) + if len(comps_missing) > 0: + logger.info( + f"Removing {len(info['comps'])} compensators from info because " + "not all compensation channels were picked." + ) + with info._unlock(): + info["comps"] = [] + with info._unlock(): + info["chs"] = [info["chs"][k] for k in sel] + info._update_redundant() + info["bads"] = [ch for ch in info["bads"] if ch in info["ch_names"]] + if "comps" in info: + comps = deepcopy(info["comps"]) + for c in comps: + row_idx = [ + k for k, n in enumerate(c["data"]["row_names"]) if n in info["ch_names"] + ] + row_names = [c["data"]["row_names"][i] for i in row_idx] + rowcals = c["rowcals"][row_idx] + c["rowcals"] = rowcals + c["data"]["nrow"] = len(row_names) + c["data"]["row_names"] = row_names + c["data"]["data"] = c["data"]["data"][row_idx] + with info._unlock(): + info["comps"] = comps + if info.get("custom_ref_applied", False) and not _electrode_types(info): + with info._unlock(): + info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + # remove unused projectors + if info.get("projs", False): + projs = list() + for p in info["projs"]: + if any(ch_name in ch_set for ch_name in p["data"]["col_names"]): + projs.append(p) + if len(projs) != len(info["projs"]): + with info._unlock(): + info["projs"] = projs + info._check_consistency() + + return info + + +def _has_kit_refs(info, picks): + """Determine if KIT ref channels are chosen. + + This is currently only used by make_forward_solution, which cannot + run when KIT reference channels are included. + """ + for p in picks: + if info["chs"][p]["coil_type"] == FIFF.FIFFV_COIL_KIT_REF_MAG: + return True + return False + + +@verbose +def pick_channels_forward( + orig, include=(), exclude=(), ordered=True, copy=True, *, verbose=None +): + """Pick channels from forward operator. + + Parameters + ---------- + orig : dict + A forward solution. + include : list of str + List of channels to include (if empty, include all available). + Defaults to []. + exclude : list of str | 'bads' + Channels to exclude (if empty, do not exclude any). Defaults to []. + If 'bads', then exclude bad channels in orig. + %(ordered)s + copy : bool + If True (default), make a copy. + + .. versionadded:: 0.19 + %(verbose)s + + Returns + ------- + res : dict + Forward solution restricted to selected channels. If include and + exclude are empty it returns orig without copy. + """ + orig["info"]._check_consistency() + if len(include) == 0 and len(exclude) == 0: + return orig.copy() if copy else orig + exclude = _check_excludes_includes(exclude, info=orig["info"], allow_bads=True) + + # Allow for possibility of channel ordering in forward solution being + # different from that of the M/EEG file it is based on. + sel_sol = pick_channels( + orig["sol"]["row_names"], include=include, exclude=exclude, ordered=ordered + ) + sel_info = pick_channels( + orig["info"]["ch_names"], include=include, exclude=exclude, ordered=ordered + ) + + fwd = deepcopy(orig) if copy else orig + + # Check that forward solution and original data file agree on #channels + if len(sel_sol) != len(sel_info): + raise ValueError( + "Forward solution and functional data appear to " + "have different channel names, please check." + ) + + # Do we have something? + nuse = len(sel_sol) + if nuse == 0: + raise ValueError("Nothing remains after picking") + + logger.info(f" {nuse:d} out of {fwd['nchan']} channels remain after picking") + + # Pick the correct rows of the forward operator using sel_sol + fwd["sol"]["data"] = fwd["sol"]["data"][sel_sol, :] + fwd["_orig_sol"] = fwd["_orig_sol"][sel_sol, :] + fwd["sol"]["nrow"] = nuse + + ch_names = [fwd["sol"]["row_names"][k] for k in sel_sol] + fwd["nchan"] = nuse + fwd["sol"]["row_names"] = ch_names + + # Pick the appropriate channel names from the info-dict using sel_info + with fwd["info"]._unlock(): + fwd["info"]["chs"] = [fwd["info"]["chs"][k] for k in sel_info] + fwd["info"]._update_redundant() + fwd["info"]["bads"] = [b for b in fwd["info"]["bads"] if b in ch_names] + + if fwd["sol_grad"] is not None: + fwd["sol_grad"]["data"] = fwd["sol_grad"]["data"][sel_sol, :] + fwd["_orig_sol_grad"] = fwd["_orig_sol_grad"][sel_sol, :] + fwd["sol_grad"]["nrow"] = nuse + fwd["sol_grad"]["row_names"] = [ + fwd["sol_grad"]["row_names"][k] for k in sel_sol + ] + + return fwd + + +def pick_types_forward( + orig, + meg=False, + eeg=False, + ref_meg=True, + seeg=False, + ecog=False, + dbs=False, + include=(), + exclude=(), +): + """Pick by channel type and names from a forward operator. + + Parameters + ---------- + orig : dict + A forward solution. + meg : bool | str + If True include MEG channels. If string it can be 'mag', 'grad', + 'planar1' or 'planar2' to select only magnetometers, all gradiometers, + or a specific type of gradiometer. + eeg : bool + If True include EEG channels. + ref_meg : bool + If True include CTF / 4D reference channels. + seeg : bool + If True include stereotactic EEG channels. + ecog : bool + If True include electrocorticography channels. + dbs : bool + If True include deep brain stimulation channels. + include : list of str + List of additional channels to include. If empty do not include any. + exclude : list of str | str + List of channels to exclude. If empty do not exclude any (default). + If 'bads', exclude channels in orig['info']['bads']. + + Returns + ------- + res : dict + Forward solution restricted to selected channel types. + """ + info = orig["info"] + sel = pick_types( + info, + meg, + eeg, + ref_meg=ref_meg, + seeg=seeg, + ecog=ecog, + dbs=dbs, + include=include, + exclude=exclude, + ) + if len(sel) == 0: + raise ValueError("No valid channels found") + include_ch_names = [info["ch_names"][k] for k in sel] + + return pick_channels_forward(orig, include_ch_names) + + +@fill_doc +def channel_indices_by_type(info, picks=None, *, exclude=()): + """Get indices of channels by type. + + Parameters + ---------- + %(info_not_none)s + %(picks_all)s + exclude : list | str + Set of channels to exclude, only used when picking based on + types (e.g., exclude="bads" when picks="meg"). + + .. versionadded:: 1.10.0 + + Returns + ------- + idx_by_type : dict + A dictionary that maps each channel type to a (possibly empty) list of + channel indices. + """ + idx_by_type = { + key: list() + for key in _PICK_TYPES_KEYS + if key not in ("meg", "fnirs", "eyetrack") + } + idx_by_type.update( + mag=list(), + grad=list(), + hbo=list(), + hbr=list(), + fnirs_cw_amplitude=list(), + fnirs_fd_ac_amplitude=list(), + fnirs_fd_phase=list(), + fnirs_od=list(), + eyegaze=list(), + pupil=list(), + ) + picks = _picks_to_idx(info, picks, none="all", exclude=exclude, allow_empty=True) + for k in picks: + ch_type = channel_type(info, k) + for key in idx_by_type.keys(): + if ch_type == key: + idx_by_type[key].append(k) + return idx_by_type + + +@verbose +def pick_channels_cov( + orig, include=(), exclude="bads", ordered=True, copy=True, *, verbose=None +): + """Pick channels from covariance matrix. + + Parameters + ---------- + orig : Covariance + A covariance. + include : list of str, (optional) + List of channels to include (if empty, include all available). + exclude : list of str, (optional) | 'bads' + Channels to exclude (if empty, do not exclude any). Defaults to 'bads'. + %(ordered)s + copy : bool + If True (the default), return a copy of the covariance matrix with the + modified channels. If False, channels are modified in-place. + + .. versionadded:: 0.20.0 + %(verbose)s + + Returns + ------- + res : dict + Covariance solution restricted to selected channels. + """ + if copy: + orig = orig.copy() + # A little peculiarity of the cov objects is that these two fields + # should not be copied over when None. + if "method" in orig and orig["method"] is None: + del orig["method"] + if "loglik" in orig and orig["loglik"] is None: + del orig["loglik"] + + exclude = orig["bads"] if exclude == "bads" else exclude + sel = pick_channels( + orig["names"], include=include, exclude=exclude, ordered=ordered + ) + data = orig["data"][sel][:, sel] if not orig["diag"] else orig["data"][sel] + names = [orig["names"][k] for k in sel] + bads = [name for name in orig["bads"] if name in orig["names"]] + + orig["data"] = data + orig["names"] = names + orig["bads"] = bads + orig["dim"] = len(data) + + return orig + + +def _mag_grad_dependent(info): + """Determine of mag and grad should be dealt with jointly.""" + # right now just uses SSS, could be computed / checked from cov + # but probably overkill + return any( + ph.get("max_info", {}).get("sss_info", {}).get("in_order", 0) + for ph in info.get("proc_history", []) + ) + + +@fill_doc +def _contains_ch_type(info, ch_type): + """Check whether a certain channel type is in an info object. + + Parameters + ---------- + %(info_not_none)s + ch_type : str + the channel type to be checked for + + Returns + ------- + has_ch_type : bool + Whether the channel type is present or not. + """ + _validate_type(ch_type, "str", "ch_type") + + meg_extras = list(_MEG_CH_TYPES_SPLIT) + fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT) + et_extras = list(_EYETRACK_CH_TYPES_SPLIT) + valid_channel_types = sorted( + [key for key in _PICK_TYPES_KEYS if key != "meg"] + + meg_extras + + fnirs_extras + + et_extras + ) + _check_option("ch_type", ch_type, valid_channel_types) + if info is None: + raise ValueError( + f'Cannot check for channels of type "{ch_type}" because info is None' + ) + return any(ch_type == channel_type(info, ii) for ii in range(info["nchan"])) + + +@fill_doc +def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude="bads"): + """Get data channel indices as separate list of tuples. + + Parameters + ---------- + %(info_not_none)s + meg_combined : bool | 'auto' + Whether to return combined picks for grad and mag. + Can be 'auto' to choose based on Maxwell filtering status. + ref_meg : bool + If True include CTF / 4D reference channels + exclude : list of str | str + List of channels to exclude. If 'bads' (default), exclude channels + in info['bads']. + + Returns + ------- + picks_list : list of tuples + The list of tuples of picks and the type string. + """ + _validate_type(ref_meg, bool, "ref_meg") + exclude = _check_info_exclude(info, exclude) + if meg_combined == "auto": + meg_combined = _mag_grad_dependent(info) + + picks_list = {ch_type: list() for ch_type in _DATA_CH_TYPES_SPLIT} + for k in range(info["nchan"]): + if info["chs"][k]["ch_name"] not in exclude: + this_type = channel_type(info, k) + try: + picks_list[this_type].append(k) + except KeyError: + # This annoyance is due to differences in pick_types + # and channel_type behavior + if this_type == "ref_meg": + ch = info["chs"][k] + if _triage_meg_pick(ch, ref_meg): + if ch["unit"] == FIFF.FIFF_UNIT_T: + picks_list["mag"].append(k) + elif ch["unit"] == FIFF.FIFF_UNIT_T_M: + picks_list["grad"].append(k) + else: + pass # not a data channel type + picks_list = [ + (ch_type, np.array(picks_list[ch_type], int)) + for ch_type in _DATA_CH_TYPES_SPLIT + ] + assert _DATA_CH_TYPES_SPLIT[:2] == ("mag", "grad") + if meg_combined and len(picks_list[0][1]) and len(picks_list[1][1]): + picks_list.insert( + 0, + ( + "meg", + np.unique(np.concatenate([picks_list.pop(0)[1], picks_list.pop(0)[1]])), + ), + ) + picks_list = [p for p in picks_list if len(p[1])] + return picks_list + + +def _check_excludes_includes(chs, info=None, allow_bads=False): + """Ensure that inputs to exclude/include are list-like or "bads". + + Parameters + ---------- + chs : any input, should be list, tuple, set, str + The channels passed to include or exclude. + allow_bads : bool + Allow the user to supply "bads" as a string for auto exclusion. + + Returns + ------- + chs : list + Channels to be excluded/excluded. If allow_bads, and chs=="bads", + this will be the bad channels found in 'info'. + """ + from .meas_info import Info + + if not isinstance(chs, list | tuple | set | np.ndarray): + if allow_bads is True: + if not isinstance(info, Info): + raise ValueError("Supply an info object if allow_bads is true") + elif chs != "bads": + raise ValueError('If chs is a string, it must be "bads"') + else: + chs = info["bads"] + else: + raise ValueError( + 'include/exclude must be list, tuple, ndarray, or "bads". You provided ' + f"type {type(chs)}." + ) + return chs + + +_PICK_TYPES_DATA_DICT = dict( + meg=True, + eeg=True, + csd=True, + stim=False, + eog=False, + ecg=False, + emg=False, + misc=False, + resp=False, + chpi=False, + exci=False, + ias=False, + syst=False, + seeg=True, + dipole=False, + gof=False, + bio=False, + ecog=True, + fnirs=True, + dbs=True, + temperature=False, + gsr=False, + eyetrack=True, +) +_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ["ref_meg"]) +_MEG_CH_TYPES_SPLIT = ("mag", "grad", "planar1", "planar2") +_FNIRS_CH_TYPES_SPLIT = ( + "hbo", + "hbr", + "fnirs_cw_amplitude", + "fnirs_fd_ac_amplitude", + "fnirs_fd_phase", + "fnirs_od", +) +_EYETRACK_CH_TYPES_SPLIT = ("eyegaze", "pupil") +_DATA_CH_TYPES_ORDER_DEFAULT = ( + ( + "mag", + "grad", + "eeg", + "csd", + "eog", + "ecg", + "resp", + "emg", + "ref_meg", + "misc", + "stim", + "chpi", + "exci", + "ias", + "syst", + "seeg", + "bio", + "ecog", + "dbs", + "temperature", + "gsr", + "gof", + "dipole", + ) + + _FNIRS_CH_TYPES_SPLIT + + _EYETRACK_CH_TYPES_SPLIT + + ("whitened",) +) + +# Valid data types, ordered for consistency, used in viz/evoked. +_VALID_CHANNEL_TYPES = ( + ( + "eeg", + "grad", + "mag", + "seeg", + "eog", + "ecg", + "resp", + "emg", + "dipole", + "gof", + "bio", + "ecog", + "dbs", + ) + + _FNIRS_CH_TYPES_SPLIT + + _EYETRACK_CH_TYPES_SPLIT + + ("misc", "csd") +) +_DATA_CH_TYPES_SPLIT = ( + "mag", + "grad", + "eeg", + "csd", + "seeg", + "ecog", + "dbs", +) + _FNIRS_CH_TYPES_SPLIT +# Electrode types (e.g., can be average-referenced together or separately) +_ELECTRODE_CH_TYPES = ("eeg", "ecog", "seeg", "dbs") + + +def _electrode_types(info, *, exclude="bads"): + return [ + ch_type + for ch_type in _ELECTRODE_CH_TYPES + if len(pick_types(info, exclude=exclude, **{ch_type: True})) + ] + + +def _pick_data_channels(info, exclude="bads", with_ref_meg=True, with_aux=False): + """Pick only data channels.""" + kwargs = _PICK_TYPES_DATA_DICT + if with_aux: + kwargs = kwargs.copy() + kwargs.update(eog=True, ecg=True, emg=True, bio=True) + return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, **kwargs) + + +def _pick_data_or_ica(info, exclude=()): + """Pick only data or ICA channels.""" + if any(ch_name.startswith("ICA") for ch_name in info["ch_names"]): + picks = pick_types(info, exclude=exclude, misc=True) + else: + picks = _pick_data_channels(info, exclude=exclude, with_ref_meg=True) + return picks + + +def _picks_to_idx( + info, + picks, + none="data", + exclude="bads", + allow_empty=False, + with_ref_meg=True, + return_kind=False, + picks_on="channels", +): + """Convert and check pick validity. + + Parameters + ---------- + picks_on : str + 'channels' (default) for error messages about selection of channels. + 'components' for error messages about selection of components. + """ + from .meas_info import Info + + picked_ch_type_or_generic = False + # + # None -> all, data, or data_or_ica (ndarray of int) + # + if isinstance(info, Info): + n_chan = info["nchan"] + else: + info = _ensure_int(info, "info", "an int or Info") + n_chan = info + assert n_chan >= 0 + + orig_picks = picks + # We do some extra_repr gymnastics to avoid calling repr(orig_picks) too + # soon as it can be a performance bottleneck (repr on ndarray is slow) + extra_repr = "" + if picks is None: + if isinstance(info, int): # special wrapper for no real info + picks = np.arange(n_chan) + extra_repr = ", treated as range({n_chan})" + else: + picks = none # let _picks_str_to_idx handle it + extra_repr = f'None, treated as "{none}"' + + # + # slice + # + if isinstance(picks, slice): + picks = np.arange(n_chan)[picks] + + # + # -> ndarray of int (and make a copy) + # + picks = np.atleast_1d(picks) # this works even for picks == 'something' + picks = np.array([], dtype=int) if len(picks) == 0 else picks + if picks.ndim != 1: + raise ValueError(f"picks must be 1D, got {picks.ndim}D") + if picks.dtype.char in ("S", "U"): + picks = _picks_str_to_idx( + info, + picks, + exclude, + with_ref_meg, + return_kind, + extra_repr, + allow_empty, + orig_picks, + ) + if return_kind: + picked_ch_type_or_generic = picks[1] + picks = picks[0] + if picks.dtype.kind not in ["i", "u"]: + extra_ch = " or list of str (names)" if picks_on == "channels" else "" + msg = ( + f"picks must be a list of int (indices){extra_ch}. " + f"The provided data type {picks.dtype} is invalid." + ) + raise TypeError(msg) + del extra_repr + picks = picks.astype(int) + + # + # ensure we have (optionally non-empty) ndarray of valid int + # + if len(picks) == 0 and not allow_empty: + raise ValueError( + f"No appropriate {picks_on} found for the given picks ({orig_picks!r})" + ) + if (picks < -n_chan).any(): + raise IndexError(f"All picks must be >= {-n_chan}, got {repr(orig_picks)}") + if (picks >= n_chan).any(): + raise IndexError( + f"All picks must be < n_{picks_on} ({n_chan}), got {repr(orig_picks)}" + ) + picks %= n_chan # ensure positive + if return_kind: + return picks, picked_ch_type_or_generic + return picks + + +def _picks_str_to_idx( + info, picks, exclude, with_ref_meg, return_kind, extra_repr, allow_empty, orig_picks +): + """Turn a list of str into ndarray of int.""" + # special case for _picks_to_idx w/no info: shouldn't really happen + if isinstance(info, int): + raise ValueError( + "picks as str can only be used when measurement info is available" + ) + + # + # first: check our special cases + # + + picks_generic = list() + if len(picks) == 1: + if picks[0] in ("all", "data", "data_or_ica"): + if picks[0] == "all": + use_exclude = info["bads"] if exclude == "bads" else exclude + picks_generic = pick_channels( + info["ch_names"], info["ch_names"], exclude=use_exclude + ) + elif picks[0] == "data": + picks_generic = _pick_data_channels( + info, exclude=exclude, with_ref_meg=with_ref_meg + ) + elif picks[0] == "data_or_ica": + picks_generic = _pick_data_or_ica(info, exclude=exclude) + if len(picks_generic) == 0 and orig_picks is None and not allow_empty: + raise ValueError( + f"picks ({repr(orig_picks) + extra_repr}) yielded no channels, " + "consider passing picks explicitly" + ) + + # + # second: match all to channel names + # + + bad_names = [] + picks_name = list() + for pick in picks: + try: + picks_name.append(info["ch_names"].index(pick)) + except ValueError: + bad_names.append(pick) + + # + # third: match all to types + # + bad_type = None + picks_type = list() + kwargs = dict(meg=False) + meg, fnirs, eyetrack = set(), set(), set() + for pick in picks: + if pick in _PICK_TYPES_KEYS: + kwargs[pick] = True + elif pick in _MEG_CH_TYPES_SPLIT: + meg |= {pick} + elif pick in _FNIRS_CH_TYPES_SPLIT: + fnirs |= {pick} + elif pick in _EYETRACK_CH_TYPES_SPLIT: + eyetrack |= {pick} + else: + bad_type = pick + break + else: + # bad_type is None but this could still be empty + bad_type = list(picks) + # triage MEG, FNIRS, and eyetrack, which are complicated due to non-bool entries + extra_picks = set() + if "ref_meg" not in picks and not with_ref_meg: + kwargs["ref_meg"] = False + if len(meg) > 0 and not kwargs.get("meg", False): + # easiest just to iterate + for use_meg in meg: + extra_picks |= set( + pick_types(info, meg=use_meg, ref_meg=False, exclude=exclude) + ) + if len(fnirs) and not kwargs.get("fnirs", False): + idx = 0 if len(fnirs) == 1 else slice(None) + kwargs["fnirs"] = list(fnirs)[idx] + if len(eyetrack) and not kwargs.get("eyetrack", False): + idx = 0 if len(eyetrack) == 1 else slice(None) + kwargs["eyetrack"] = list(eyetrack)[idx] # slice(None) is equivalent to all + picks_type = pick_types(info, exclude=exclude, **kwargs) + if len(extra_picks) > 0: + picks_type = sorted(set(picks_type) | set(extra_picks)) + + # + # finally: ensure we have exactly one usable list + # + all_picks = (picks_generic, picks_name, picks_type) + any_found = [len(p) > 0 for p in all_picks] + if sum(any_found) == 0: + if not allow_empty: + raise ValueError( + f"picks ({repr(orig_picks) + extra_repr}) could not be interpreted as " + f'channel names (no channel "{bad_names}"), channel types (no type' + f' "{bad_type}" present), or a generic type (just "all" or "data")' + ) + picks = np.array([], int) + elif sum(any_found) > 1: + raise RuntimeError( + "Some channel names are ambiguously equivalent to " + "channel types, cannot use string-based " + "picks for these" + ) + else: + picks = np.array(all_picks[np.where(any_found)[0][0]]) + + picked_ch_type_or_generic = not len(picks_name) + if len(bad_names) > 0 and not picked_ch_type_or_generic: + raise ValueError( + f"Channel(s) {bad_names} could not be picked, because " + "they are not present in the info instance." + ) + + if return_kind: + return picks, picked_ch_type_or_generic + return picks diff --git a/mne/_fiff/proc_history.py b/mne/_fiff/proc_history.py new file mode 100644 index 00000000000..caa2d3de554 --- /dev/null +++ b/mne/_fiff/proc_history.py @@ -0,0 +1,345 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..fixes import _csc_array_cast +from ..utils import _check_fname, warn +from .constants import FIFF +from .open import fiff_open, read_tag +from .tag import _float_item, _int_item, find_tag +from .tree import dir_tree_find +from .write import ( + _safe_name_list, + end_block, + start_block, + write_float, + write_float_matrix, + write_float_sparse, + write_id, + write_int, + write_int_matrix, + write_name_list_sanitized, + write_string, +) + +_proc_map = dict( # ID, caster, writer + parent_file_id=( + FIFF.FIFF_PARENT_FILE_ID, + dict, + write_id, + ), + block_id=( + FIFF.FIFF_BLOCK_ID, + dict, + write_id, + ), + parent_block_id=( + FIFF.FIFF_PARENT_BLOCK_ID, + dict, + write_id, + ), + date=( + FIFF.FIFF_MEAS_DATE, + lambda d: tuple(int(dd) for dd in d), + write_int, + ), + experimenter=( + FIFF.FIFF_EXPERIMENTER, + str, + write_string, + ), + creator=( + FIFF.FIFF_CREATOR, + str, + write_string, + ), +) + + +def _read_proc_history(fid, tree): + """Read processing history from fiff file. + + This function reads the SSS info, the CTC correction and the + calibaraions from the SSS processing logs inside af a raw file + (C.f. Maxfilter v2.2 manual (October 2010), page 21):: + + 104 = { 900 = proc. history + 104 = { 901 = proc. record + 103 = block ID + 204 = date + 212 = scientist + 113 = creator program + 104 = { 502 = SSS info + 264 = SSS task + 263 = SSS coord frame + 265 = SSS origin + 266 = SSS ins.order + 267 = SSS outs.order + 268 = SSS nr chnls + 269 = SSS components + 278 = SSS nfree + 243 = HPI g limit 0.98 + 244 = HPI dist limit 0.005 + 105 = } 502 = SSS info + 104 = { 504 = MaxST info + 264 = SSS task + 272 = SSST subspace correlation + 279 = SSST buffer length + 105 = } + 104 = { 501 = CTC correction + 103 = block ID + 204 = date + 113 = creator program + 800 = CTC matrix + 3417 = proj item chs + 105 = } 501 = CTC correction + 104 = { 503 = SSS finecalib. + 270 = SSS cal chnls + 271 = SSS cal coeff + 105 = } 503 = SSS finecalib. + 105 = } 901 = proc. record + 105 = } 900 = proc. history + """ + proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY) + out = list() + if len(proc_history) > 0: + proc_history = proc_history[0] + proc_records = dir_tree_find(proc_history, FIFF.FIFFB_PROCESSING_RECORD) + for proc_record in proc_records: + record = dict() + for i_ent in range(proc_record["nent"]): + kind = proc_record["directory"][i_ent].kind + pos = proc_record["directory"][i_ent].pos + for key, (id_, cast, _) in _proc_map.items(): + if kind == id_: + tag = read_tag(fid, pos) + record[key] = cast(tag.data) + break + else: + warn(f"Unknown processing history item {kind}") + record["max_info"] = _read_maxfilter_record(fid, proc_record) + iass = dir_tree_find(proc_record, FIFF.FIFFB_IAS) + if len(iass) > 0: + # XXX should eventually populate this + ss = [dict() for _ in range(len(iass))] + record["ias"] = ss + if len(record["max_info"]) > 0: + out.append(record) + return out + + +def _write_proc_history(fid, info): + """Write processing history to file.""" + if len(info["proc_history"]) > 0: + start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) + for record in info["proc_history"]: + start_block(fid, FIFF.FIFFB_PROCESSING_RECORD) + for key, (id_, _, writer) in _proc_map.items(): + if key in record: + writer(fid, id_, record[key]) + _write_maxfilter_record(fid, record["max_info"]) + if "ias" in record: + for _ in record["ias"]: + start_block(fid, FIFF.FIFFB_IAS) + # XXX should eventually populate this + end_block(fid, FIFF.FIFFB_IAS) + end_block(fid, FIFF.FIFFB_PROCESSING_RECORD) + end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) + + +_sss_info_keys = ( + "job", + "frame", + "origin", + "in_order", + "out_order", + "nchan", + "components", + "nfree", + "hpi_g_limit", + "hpi_dist_limit", +) +_sss_info_ids = ( + FIFF.FIFF_SSS_JOB, + FIFF.FIFF_SSS_FRAME, + FIFF.FIFF_SSS_ORIGIN, + FIFF.FIFF_SSS_ORD_IN, + FIFF.FIFF_SSS_ORD_OUT, + FIFF.FIFF_SSS_NMAG, + FIFF.FIFF_SSS_COMPONENTS, + FIFF.FIFF_SSS_NFREE, + FIFF.FIFF_HPI_FIT_GOOD_LIMIT, + FIFF.FIFF_HPI_FIT_DIST_LIMIT, +) +_sss_info_writers = ( + write_int, + write_int, + write_float, + write_int, + write_int, + write_int, + write_int, + write_int, + write_float, + write_float, +) +_sss_info_casters = ( + _int_item, + _int_item, + np.array, + _int_item, + _int_item, + _int_item, + np.array, + _int_item, + _float_item, + _float_item, +) + +_max_st_keys = ("job", "subspcorr", "buflen") +_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR, FIFF.FIFF_SSS_ST_LENGTH) +_max_st_writers = (write_int, write_float, write_float) +_max_st_casters = (_int_item, _float_item, _float_item) + +_sss_ctc_keys = ("block_id", "date", "creator", "decoupler") +_sss_ctc_ids = ( + FIFF.FIFF_BLOCK_ID, + FIFF.FIFF_MEAS_DATE, + FIFF.FIFF_CREATOR, + FIFF.FIFF_DECOUPLER_MATRIX, +) +_sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse) +_sss_ctc_casters = (dict, np.array, str, _csc_array_cast) + +_sss_cal_keys = ("cal_chans", "cal_corrs") +_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS) +_sss_cal_writers = (write_int_matrix, write_float_matrix) +_sss_cal_casters = (np.array, np.array) + + +def _read_ctc(fname): + """Read cross-talk correction matrix.""" + fname = _check_fname(fname, overwrite="read", must_exist=True) + f, tree, _ = fiff_open(fname) + with f as fid: + sss_ctc = _read_maxfilter_record(fid, tree)["sss_ctc"] + bad_str = f"Invalid cross-talk FIF: {fname}" + if len(sss_ctc) == 0: + raise ValueError(bad_str) + node = dir_tree_find(tree, FIFF.FIFFB_DATA_CORRECTION)[0] + comment = find_tag(fid, node, FIFF.FIFF_COMMENT).data + if comment != "cross-talk compensation matrix": + raise ValueError(bad_str) + sss_ctc["creator"] = find_tag(fid, node, FIFF.FIFF_CREATOR).data + sss_ctc["date"] = find_tag(fid, node, FIFF.FIFF_MEAS_DATE).data + return sss_ctc + + +def _read_maxfilter_record(fid, tree): + """Read maxfilter processing record from file.""" + sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO) # 502 + sss_info = dict() + if len(sss_info_block) > 0: + sss_info_block = sss_info_block[0] + for i_ent in range(sss_info_block["nent"]): + kind = sss_info_block["directory"][i_ent].kind + pos = sss_info_block["directory"][i_ent].pos + for key, id_, cast in zip(_sss_info_keys, _sss_info_ids, _sss_info_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_info[key] = cast(tag.data) + break + + max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO) # 504 + max_st = dict() + if len(max_st_block) > 0: + max_st_block = max_st_block[0] + for i_ent in range(max_st_block["nent"]): + kind = max_st_block["directory"][i_ent].kind + pos = max_st_block["directory"][i_ent].pos + for key, id_, cast in zip(_max_st_keys, _max_st_ids, _max_st_casters): + if kind == id_: + tag = read_tag(fid, pos) + max_st[key] = cast(tag.data) + break + + sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER) # 501 + sss_ctc = dict() + if len(sss_ctc_block) > 0: + sss_ctc_block = sss_ctc_block[0] + for i_ent in range(sss_ctc_block["nent"]): + kind = sss_ctc_block["directory"][i_ent].kind + pos = sss_ctc_block["directory"][i_ent].pos + for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids, _sss_ctc_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_ctc[key] = cast(tag.data) + break + else: + if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST: + tag = read_tag(fid, pos) + chs = _safe_name_list(tag.data, "read", "proj_items_chs") + # This list can null chars in the last entry, e.g.: + # [..., 'MEG2642', 'MEG2643', 'MEG2641\x00 ... \x00'] + chs[-1] = chs[-1].split("\x00")[0] + sss_ctc["proj_items_chs"] = chs + + sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL) # 503 + sss_cal = dict() + if len(sss_cal_block) > 0: + sss_cal_block = sss_cal_block[0] + for i_ent in range(sss_cal_block["nent"]): + kind = sss_cal_block["directory"][i_ent].kind + pos = sss_cal_block["directory"][i_ent].pos + for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids, _sss_cal_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_cal[key] = cast(tag.data) + break + + max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc, sss_cal=sss_cal, max_st=max_st) + return max_info + + +def _write_maxfilter_record(fid, record): + """Write maxfilter processing record to file.""" + sss_info = record["sss_info"] + if len(sss_info) > 0: + start_block(fid, FIFF.FIFFB_SSS_INFO) + for key, id_, writer in zip(_sss_info_keys, _sss_info_ids, _sss_info_writers): + if key in sss_info: + writer(fid, id_, sss_info[key]) + end_block(fid, FIFF.FIFFB_SSS_INFO) + + max_st = record["max_st"] + if len(max_st) > 0: + start_block(fid, FIFF.FIFFB_SSS_ST_INFO) + for key, id_, writer in zip(_max_st_keys, _max_st_ids, _max_st_writers): + if key in max_st: + writer(fid, id_, max_st[key]) + end_block(fid, FIFF.FIFFB_SSS_ST_INFO) + + sss_ctc = record["sss_ctc"] + if len(sss_ctc) > 0: # dict has entries + start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) + for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids, _sss_ctc_writers): + if key in sss_ctc: + writer(fid, id_, sss_ctc[key]) + if "proj_items_chs" in sss_ctc: + write_name_list_sanitized( + fid, + FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, + sss_ctc["proj_items_chs"], + "proj_items_chs", + ) + end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) + + sss_cal = record["sss_cal"] + if len(sss_cal) > 0: + start_block(fid, FIFF.FIFFB_SSS_CAL) + for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids, _sss_cal_writers): + if key in sss_cal: + writer(fid, id_, sss_cal[key]) + end_block(fid, FIFF.FIFFB_SSS_CAL) diff --git a/mne/_fiff/proj.py b/mne/_fiff/proj.py new file mode 100644 index 00000000000..aa010085904 --- /dev/null +++ b/mne/_fiff/proj.py @@ -0,0 +1,1187 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re +from copy import deepcopy +from itertools import count + +import numpy as np + +from ..defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT +from ..fixes import _safe_svd +from ..utils import ( + _check_option, + _validate_type, + fill_doc, + logger, + object_diff, + verbose, + warn, +) +from .constants import FIFF +from .pick import _ELECTRODE_CH_TYPES, _electrode_types, pick_info, pick_types +from .tag import _rename_list, find_tag +from .tree import dir_tree_find +from .write import ( + _safe_name_list, + end_block, + start_block, + write_float, + write_float_matrix, + write_int, + write_name_list_sanitized, + write_string, +) + + +class Projection(dict): + """Dictionary-like object holding a projection vector. + + Projection vectors are stored in a list in ``inst.info["projs"]``. Each projection + vector has 5 keys: ``active``, ``data``, ``desc``, ``explained_var``, ``kind``. + + .. warning:: This class is generally not meant to be instantiated + directly, use ``compute_proj_*`` functions instead. + + Parameters + ---------- + data : dict + The data dictionary. + desc : str + The projector description. + kind : int + The projector kind. + active : bool + Whether or not the projector has been applied. + explained_var : float | None + The proportion of explained variance. + """ + + def __init__( + self, + *, + data, + desc="", + kind=FIFF.FIFFV_PROJ_ITEM_FIELD, + active=False, + explained_var=None, + ): + super().__init__( + desc=desc, kind=kind, active=active, data=data, explained_var=explained_var + ) + + def __repr__(self): # noqa: D105 + s = str(self["desc"]) + s += f", active : {self['active']}" + s += f", n_channels : {len(self['data']['col_names'])}" + if self["explained_var"] is not None: + s += f", exp. var : {self['explained_var'] * 100:0.2f}%" + return f"" + + # speed up info copy by taking advantage of mutability + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + cls = self.__class__ + result = cls.__new__(cls) + for k, v in self.items(): + if k == "data": + v = v.copy() + v["data"] = v["data"].copy() + result[k] = v + else: + result[k] = v # kind, active, desc, explained_var immutable + return result + + def __eq__(self, other): + """Equality == method.""" + return True if len(object_diff(self, other)) == 0 else False + + def __ne__(self, other): + """Different != method.""" + return not self.__eq__(other) + + @fill_doc + def plot_topomap( + self, + info, + *, + sensors=True, + show_names=False, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=False, + cbar_fmt="%3.1f", + units=None, + axes=None, + show=True, + ): + """Plot topographic maps of SSP projections. + + Parameters + ---------- + %(info_not_none)s Used to determine the layout. + %(sensors_topomap)s + %(show_names_topomap)s + + .. versionadded:: 1.2 + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionadded:: 1.2 + %(border_topomap)s + + .. versionadded:: 0.20 + %(res_topomap)s + %(size_topomap)s + %(cmap_topomap)s + %(vlim_plot_topomap_proj)s + %(cnorm)s + + .. versionadded:: 1.2 + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + + .. versionadded:: 1.2 + %(units_topomap)s + + .. versionadded:: 1.2 + %(axes_plot_projs_topomap)s + %(show)s + + Returns + ------- + fig : instance of Figure + Figure distributing one image per channel across sensor topography. + + Notes + ----- + .. versionadded:: 0.15.0 + """ # noqa: E501 + from ..viz.topomap import plot_projs_topomap + + return plot_projs_topomap( + self, + info, + sensors=sensors, + show_names=show_names, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + show=show, + ) + + +class ProjMixin: + """Mixin class for Raw, Evoked, Epochs. + + Notes + ----- + This mixin adds a proj attribute as a property to data containers. + It is True if at least one proj is present and all of them are active. + The projs might not be applied yet if data are not preloaded. In + this case it's the _projector attribute that does the job. + If a private _data attribute is present then the projs applied + to it are the ones marked as active. + + A proj parameter passed in constructor of raw or epochs calls + apply_proj and hence after the .proj attribute is True. + + As soon as you've applied the projs it will stay active in the + remaining pipeline. + + The suggested pipeline is proj=True in epochs (it's cheaper than for raw). + + When you use delayed SSP in Epochs, projs are applied when you call + get_data() method. They are not applied to the evoked._data unless you call + apply_proj(). The reason is that you want to reject with projs although + it's not stored in proj mode. + """ + + @property + def proj(self): + """Whether or not projections are active.""" + return len(self.info["projs"]) > 0 and all( + p["active"] for p in self.info["projs"] + ) + + @verbose + def add_proj(self, projs, remove_existing=False, verbose=None): + """Add SSP projection vectors. + + Parameters + ---------- + projs : list + List with projection vectors. + remove_existing : bool + Remove the projection vectors currently in the file. + %(verbose)s + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The data container. + """ + if isinstance(projs, Projection): + projs = [projs] + + if not isinstance(projs, list) and not all( + isinstance(p, Projection) for p in projs + ): + raise ValueError("Only projs can be added. You supplied something else.") + + # mark proj as inactive, as they have not been applied + projs = deactivate_proj(projs, copy=True) + if remove_existing: + # we cannot remove the proj if they are active + if any(p["active"] for p in self.info["projs"]): + raise ValueError( + "Cannot remove projectors that have already been applied" + ) + with self.info._unlock(): + self.info["projs"] = projs + else: + self.info["projs"].extend(projs) + # We don't want to add projectors that are activated again. + with self.info._unlock(): + self.info["projs"] = _uniquify_projs( + self.info["projs"], check_active=False, sort=False + ) + return self + + @verbose + def apply_proj(self, verbose=None): + """Apply the signal space projection (SSP) operators to the data. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The instance. + + Notes + ----- + Once the projectors have been applied, they can no longer be + removed. It is usually not recommended to apply the projectors at + too early stages, as they are applied automatically later on + (e.g. when computing inverse solutions). + Hint: using the copy method individual projection vectors + can be tested without affecting the original data. + With evoked data, consider the following example:: + + projs_a = mne.read_proj('proj_a.fif') + projs_b = mne.read_proj('proj_b.fif') + # add the first, copy, apply and see ... + evoked.add_proj(a).copy().apply_proj().plot() + # add the second, copy, apply and see ... + evoked.add_proj(b).copy().apply_proj().plot() + # drop the first and see again + evoked.copy().del_proj(0).apply_proj().plot() + evoked.apply_proj() # finally keep both + """ + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..io import BaseRaw + + if self.info["projs"] is None or len(self.info["projs"]) == 0: + logger.info( + "No projector specified for this dataset. " + "Please consider the method self.add_proj." + ) + return self + + # Exit delayed mode if you apply proj + if isinstance(self, BaseEpochs) and self._do_delayed_proj: + logger.info("Leaving delayed SSP mode.") + self._do_delayed_proj = False + + if all(p["active"] for p in self.info["projs"]): + logger.info( + "Projections have already been applied. Setting proj attribute to True." + ) + return self + + _projector, info = setup_proj( + deepcopy(self.info), add_eeg_ref=False, activate=True + ) + # let's not raise a RuntimeError here, otherwise interactive plotting + if _projector is None: # won't be fun. + logger.info("The projections don't apply to these data. Doing nothing.") + return self + self._projector, self.info = _projector, info + if isinstance(self, BaseRaw | Evoked): + if self.preload: + self._data = np.dot(self._projector, self._data) + else: # BaseEpochs + if self.preload: + for ii, e in enumerate(self._data): + self._data[ii] = self._project_epoch(e) + else: + self.load_data() # will automatically apply + logger.info("SSP projectors applied...") + return self + + def del_proj(self, idx="all"): + """Remove SSP projection vector. + + .. note:: The projection vector can only be removed if it is inactive + (has not been applied to the data). + + Parameters + ---------- + idx : int | list of int | str + Index of the projector to remove. Can also be "all" (default) + to remove all projectors. + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The instance. + """ + if isinstance(idx, str) and idx == "all": + idx = list(range(len(self.info["projs"]))) + idx = np.atleast_1d(np.array(idx, int)).ravel() + + for ii in idx: + proj = self.info["projs"][ii] + if proj["active"] and set(self.info["ch_names"]) & set( + proj["data"]["col_names"] + ): + msg = ( + f"Cannot remove projector that has already been " + f"applied, unless you first remove all channels it " + f"applies to. The problematic projector is: {proj}" + ) + raise ValueError(msg) + + keep = np.ones(len(self.info["projs"])) + keep[idx] = False # works with negative indexing and does checks + with self.info._unlock(): + self.info["projs"] = [p for p, k in zip(self.info["projs"], keep) if k] + return self + + @fill_doc + def plot_projs_topomap( + self, + ch_type=None, + *, + sensors=True, + show_names=False, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=False, + cbar_fmt="%3.1f", + units=None, + axes=None, + show=True, + ): + """Plot SSP vector. + + Parameters + ---------- + %(ch_type_topomap_proj)s + %(sensors_topomap)s + %(show_names_topomap)s + + .. versionadded:: 1.2 + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionadded:: 0.20 + + .. versionchanged:: 0.21 + + - The default was changed to ``'local'`` for MEG sensors. + - ``'local'`` was changed to use a convex hull mask + - ``'head'`` was changed to extrapolate out to the clipping circle. + %(border_topomap)s + + .. versionadded:: 0.20 + %(res_topomap)s + %(size_topomap)s + Only applies when plotting multiple topomaps at a time. + %(cmap_topomap)s + %(vlim_plot_topomap_proj)s + %(cnorm)s + + .. versionadded:: 1.2 + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + + .. versionadded:: 1.2 + %(units_topomap)s + + .. versionadded:: 1.2 + %(axes_plot_projs_topomap)s + %(show)s + + Returns + ------- + fig : instance of Figure + Figure distributing one image per channel across sensor topography. + """ + _projs = [deepcopy(_proj) for _proj in self.info["projs"]] + if _projs is None or len(_projs) == 0: + raise ValueError("No projectors in Info; nothing to plot.") + if ch_type is not None: + # make sure the requested channel type(s) exist + _validate_type(ch_type, (str, list, tuple), "ch_type") + if isinstance(ch_type, str): + ch_type = [ch_type] + bad_ch_types = [_type not in self for _type in ch_type] + if any(bad_ch_types): + raise ValueError( + f"ch_type {ch_type[bad_ch_types]} not " + f"present in {self.__class__.__name__}." + ) + # remove projs from unrequested channel types. This is a bit + # convoluted because Projection objects don't store channel types, + # only channel names + available_ch_types = np.array(self.get_channel_types()) + for _proj in _projs[::-1]: + idx = np.isin(self.ch_names, _proj["data"]["col_names"]) + proj_ch_type = np.unique(available_ch_types[idx]) + err_msg = "Projector contains multiple channel types" + assert len(proj_ch_type) == 1, err_msg + if proj_ch_type[0] != ch_type: + _projs.remove(_proj) + if len(_projs) == 0: + raise ValueError( + f"Nothing to plot (no projectors for channel type {ch_type})." + ) + # now we have non-empty _projs list with correct channel type(s) + from ..viz.topomap import plot_projs_topomap + + fig = plot_projs_topomap( + _projs, + self.info, + sensors=sensors, + show_names=show_names, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + show=show, + ) + return fig + + def _reconstruct_proj(self, mode="accurate", origin="auto"): + from ..forward import _map_meg_or_eeg_channels + + if len(self.info["projs"]) == 0: + return self + self.apply_proj() + for kind in ("meg", "eeg"): + kwargs = dict(meg=False) + kwargs[kind] = True + picks = pick_types(self.info, **kwargs) + if len(picks) == 0: + continue + info_from = pick_info(self.info, picks) + info_to = info_from.copy() + with info_to._unlock(): + info_to["projs"] = [] + if kind == "eeg" and _has_eeg_average_ref_proj(info_from): + info_to["projs"] = [ + make_eeg_average_ref_proj(info_to, verbose=False) + ] + mapping = _map_meg_or_eeg_channels( + info_from, info_to, mode=mode, origin=origin + ) + self.data[..., picks, :] = np.matmul(mapping, self.data[..., picks, :]) + return self + + +def _proj_equal(a, b, check_active=True): + """Test if two projectors are equal.""" + equal = ( + (a["active"] == b["active"] or not check_active) + and a["kind"] == b["kind"] + and a["desc"] == b["desc"] + and a["data"]["col_names"] == b["data"]["col_names"] + and a["data"]["row_names"] == b["data"]["row_names"] + and a["data"]["ncol"] == b["data"]["ncol"] + and a["data"]["nrow"] == b["data"]["nrow"] + and np.all(a["data"]["data"] == b["data"]["data"]) + ) + return equal + + +@verbose +def _read_proj(fid, node, *, ch_names_mapping=None, verbose=None): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + projs = list() + + # Locate the projection data + nodes = dir_tree_find(node, FIFF.FIFFB_PROJ) + if len(nodes) == 0: + return projs + + # This might exist but we won't use it: + # global_nchan = None + # tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN) + # if tag is not None: + # global_nchan = int(tag.data.item()) + + items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM) + for item in items: + # Find all desired tags in one item + + # This probably also exists but used to be written incorrectly + # sometimes + # tag = find_tag(fid, item, FIFF.FIFF_NCHAN) + # if tag is not None: + # nchan = int(tag.data.item()) + # else: + # nchan = global_nchan + + tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION) + if tag is not None: + desc = tag.data + else: + tag = find_tag(fid, item, FIFF.FIFF_NAME) + if tag is not None: + desc = tag.data + else: + raise ValueError("Projection item description missing") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND) + if tag is not None: + kind = int(tag.data.item()) + else: + raise ValueError("Projection item kind missing") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC) + if tag is not None: + nvec = int(tag.data.item()) + else: + raise ValueError("Number of projection vectors not specified") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST) + if tag is not None: + names = _safe_name_list(tag.data, "read", "names") + else: + raise ValueError("Projection item channel list missing") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS) + if tag is not None: + data = tag.data + else: + raise ValueError("Projection item data missing") + + tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE) + if tag is not None: + active = bool(tag.data.item()) + else: + active = False + + tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR) + if tag is not None: + explained_var = float(tag.data.item()) + else: + explained_var = None + + # handle the case when data is transposed for some reason + if data.shape[0] == len(names) and data.shape[1] == nvec: + data = data.T + + if data.shape[1] != len(names): + raise ValueError( + "Number of channel names does not match the size of data matrix" + ) + + # just always use this, we used to have bugs with writing the + # number correctly... + nchan = len(names) + names[:] = _rename_list(names, ch_names_mapping) + # Use exactly the same fields in data as in a named matrix + one = Projection( + kind=kind, + active=active, + desc=desc, + data=dict( + nrow=nvec, ncol=nchan, row_names=None, col_names=names, data=data + ), + explained_var=explained_var, + ) + + projs.append(one) + + if len(projs) > 0: + logger.info(f" Read a total of {len(projs)} projection items:") + for proj in projs: + misc = "active" if proj["active"] else " idle" + logger.info( + f" {proj['desc']} " + f"({proj['data']['nrow']} x " + f"{len(proj['data']['col_names'])}) {misc}" + ) + + return projs + + +############################################################################### +# Write + + +def _write_proj(fid, projs, *, ch_names_mapping=None): + """Write a projection operator to a file. + + Parameters + ---------- + fid : file + The file descriptor of the open file. + projs : dict + The projection operator. + """ + if len(projs) == 0: + return + + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + # validation + _validate_type(projs, (list, tuple), "projs") + for pi, proj in enumerate(projs): + _validate_type(proj, Projection, f"projs[{pi}]") + + start_block(fid, FIFF.FIFFB_PROJ) + + for proj in projs: + start_block(fid, FIFF.FIFFB_PROJ_ITEM) + write_int(fid, FIFF.FIFF_NCHAN, len(proj["data"]["col_names"])) + names = _rename_list(proj["data"]["col_names"], ch_names_mapping) + write_name_list_sanitized( + fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, names, "col_names" + ) + write_string(fid, FIFF.FIFF_NAME, proj["desc"]) + write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj["kind"]) + if proj["kind"] == FIFF.FIFFV_PROJ_ITEM_FIELD: + write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0) + + write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj["data"]["nrow"]) + write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj["active"]) + write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS, proj["data"]["data"]) + if proj["explained_var"] is not None: + write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR, proj["explained_var"]) + end_block(fid, FIFF.FIFFB_PROJ_ITEM) + + end_block(fid, FIFF.FIFFB_PROJ) + + +############################################################################### +# Utils + + +def _check_projs(projs, copy=True): + """Check that projs is a list of Projection.""" + _validate_type(projs, (list, tuple), "projs") + for pi, p in enumerate(projs): + _validate_type(p, Projection, f"projs[{pi}]") + return deepcopy(projs) if copy else projs + + +def make_projector(projs, ch_names, bads=(), include_active=True): + """Create an SSP operator from SSP projection vectors. + + Parameters + ---------- + projs : list + List of projection vectors. + ch_names : list of str + List of channels to include in the projection matrix. + bads : list of str + Some bad channels to exclude. If bad channels were marked + in the raw file when projs were calculated using mne-python, + they should not need to be included here as they will + have been automatically omitted from the projectors. + include_active : bool + Also include projectors that are already active. + + Returns + ------- + proj : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + nproj : int + How many items in the projector. + U : array + The orthogonal basis of the projection vectors. + """ + return _make_projector(projs, ch_names, bads, include_active) + + +def _make_projector(projs, ch_names, bads=(), include_active=True, inplace=False): + """Subselect projs based on ch_names and bads. + + Use inplace=True mode to modify ``projs`` inplace so that no + warning will be raised next time projectors are constructed with + the given inputs. If inplace=True, no meaningful data are returned. + """ + nchan = len(ch_names) + if nchan == 0: + raise ValueError("No channel names specified") + + default_return = (np.eye(nchan, nchan), 0, np.empty((nchan, 0))) + + # Check trivial cases first + if projs is None: + return default_return + + nvec = 0 + nproj = 0 + for p in projs: + if not p["active"] or include_active: + nproj += 1 + nvec += p["data"]["nrow"] + + if nproj == 0: + return default_return + + # Pick the appropriate entries + vecs = np.zeros((nchan, nvec)) + nvec = 0 + nonzero = 0 + bads = set(bads) + for k, p in enumerate(projs): + if not p["active"] or include_active: + if len(p["data"]["col_names"]) != len(np.unique(p["data"]["col_names"])): + raise ValueError( + f"Channel name list in projection item {k} contains duplicate items" + ) + + # Get the two selection vectors to pick correct elements from + # the projection vectors omitting bad channels + sel = [] + vecsel = [] + p_set = set(p["data"]["col_names"]) # faster membership access + for c, name in enumerate(ch_names): + if name not in bads and name in p_set: + sel.append(c) + vecsel.append(p["data"]["col_names"].index(name)) + + # If there is something to pick, pickit + nrow = p["data"]["nrow"] + this_vecs = vecs[:, nvec : nvec + nrow] + if len(sel) > 0: + this_vecs[sel] = p["data"]["data"][:, vecsel].T + + # Rescale for better detection of small singular values + for v in range(p["data"]["nrow"]): + psize = np.linalg.norm(this_vecs[:, v]) + if psize > 0: + orig_n = p["data"]["data"].any(axis=0).sum() + # Average ref still works if channels are removed + # Use relative power to determine if we're in trouble. + # 10% loss is hopefully a reasonable threshold. + if ( + psize < 0.9 + and not inplace + and ( + p["kind"] != FIFF.FIFFV_PROJ_ITEM_EEG_AVREF + or len(vecsel) == 1 + ) + ): + warn( + f"Projection vector {repr(p['desc'])} has been " + f"reduced to {100 * psize:0.2f}% of its " + "original magnitude by subselecting " + f"{len(vecsel)}/{orig_n} of the original " + "channels. If the ignored channels were bad " + "during SSP computation, we recommend " + "recomputing proj (via compute_proj_raw " + "or related functions) with the bad channels " + "properly marked, because computing SSP with bad " + "channels present in the data but unmarked is " + "dangerous (it can bias the PCA used by SSP). " + "On the other hand, if you know that all channels " + "were good during SSP computation, you can safely " + "use info.normalize_proj() to suppress this " + "warning during projection." + ) + this_vecs[:, v] /= psize + nonzero += 1 + # If doing "inplace" mode, "fix" the projectors to only operate + # on this subset of channels. + if inplace: + p["data"]["data"] = this_vecs[sel].T + p["data"]["col_names"] = [p["data"]["col_names"][ii] for ii in vecsel] + p["data"]["ncol"] = len(p["data"]["col_names"]) + nvec += p["data"]["nrow"] + + # Check whether all of the vectors are exactly zero + if nonzero == 0 or inplace: + return default_return + + # Reorthogonalize the vectors + U, S, _ = _safe_svd(vecs[:, :nvec], full_matrices=False) + + # Throw away the linearly dependent guys + nproj = np.sum((S / S[0]) > 1e-2) + U = U[:, :nproj] + + # Here is the celebrated result + proj = np.eye(nchan, nchan) - np.dot(U, U.T) + if nproj >= nchan: # e.g., 3 channels and 3 projectors + raise RuntimeError( + f"Application of {nproj} projectors for {nchan} channels " + "will yield no components." + ) + + return proj, nproj, U + + +def _normalize_proj(info): + """Normalize proj after subselection to avoid warnings. + + This is really only useful for tests, and might not be needed + eventually if we change or improve our handling of projectors + with picks. + """ + # Here we do info.get b/c info can actually be a noise cov + _make_projector( + info["projs"], + info.get("ch_names", info.get("names")), + info["bads"], + include_active=True, + inplace=True, + ) + + +@fill_doc +def make_projector_info(info, include_active=True): + """Make an SSP operator using the measurement info. + + Calls make_projector on good channels. + + Parameters + ---------- + %(info_not_none)s + include_active : bool + Also include projectors that are already active. + + Returns + ------- + proj : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + nproj : int + How many items in the projector. + """ + proj, nproj, _ = make_projector( + info["projs"], info["ch_names"], info["bads"], include_active + ) + return proj, nproj + + +@verbose +def activate_proj(projs, copy=True, verbose=None): + """Set all projections to active. + + Useful before passing them to make_projector. + + Parameters + ---------- + projs : list + The projectors. + copy : bool + Modify projs in place or operate on a copy. + %(verbose)s + + Returns + ------- + projs : list + The projectors. + """ + if copy: + projs = deepcopy(projs) + + # Activate the projection items + for proj in projs: + proj["active"] = True + + logger.info(f"{len(projs)} projection items activated") + + return projs + + +@verbose +def deactivate_proj(projs, copy=True, verbose=None): + """Set all projections to inactive. + + Useful before saving raw data without projectors applied. + + Parameters + ---------- + projs : list + The projectors. + copy : bool + Modify projs in place or operate on a copy. + %(verbose)s + + Returns + ------- + projs : list + The projectors. + """ + if copy: + projs = deepcopy(projs) + + # Deactivate the projection items + for proj in projs: + proj["active"] = False + + logger.info(f"{len(projs)} projection items deactivated") + + return projs + + +# Keep in sync with doc below +_EEG_AVREF_PICK_DICT = {k: True for k in _ELECTRODE_CH_TYPES} + + +@verbose +def make_eeg_average_ref_proj(info, activate=True, *, ch_type="eeg", verbose=None): + """Create an EEG average reference SSP projection vector. + + Parameters + ---------- + %(info_not_none)s + activate : bool + If True projections are activated. + ch_type : str + The channel type to use for reference projection. + Valid types are ``'eeg'``, ``'ecog'``, ``'seeg'`` and ``'dbs'``. + + .. versionadded:: 1.2 + %(verbose)s + + Returns + ------- + proj: instance of Projection + The SSP/PCA projector. + """ + if info.get("custom_ref_applied", False): + raise RuntimeError( + "A custom reference has been applied to the " + "data earlier. Please use the " + "mne.io.set_eeg_reference function to move from " + "one EEG reference to another." + ) + + _validate_type(ch_type, (list, tuple, str), "ch_type") + singleton = False + if isinstance(ch_type, str): + ch_type = [ch_type] + singleton = True + for ci, this_ch_type in enumerate(ch_type): + _check_option( + "ch_type" + ("" if singleton else f"[{ci}]"), + this_ch_type, + list(_EEG_AVREF_PICK_DICT), + ) + + ch_type_name = "/".join(c.upper() for c in ch_type) + logger.info(f"Adding average {ch_type_name} reference projection.") + + ch_dict = {c: True for c in ch_type} + for c in ch_type: + one_picks = pick_types(info, exclude="bads", **{c: True}) + if len(one_picks) == 0: + raise ValueError( + f"Cannot create {ch_type_name} average reference " + f"projector (no {c.upper()} data found)" + ) + del ch_type + ch_sel = pick_types(info, **ch_dict, exclude="bads") + ch_names = info["ch_names"] + ch_names = [ch_names[k] for k in ch_sel] + n_chs = len(ch_sel) + vec = np.ones((1, n_chs)) + vec /= np.sqrt(n_chs) + explained_var = None + proj_data = dict(col_names=ch_names, row_names=None, data=vec, nrow=1, ncol=n_chs) + proj = Projection( + active=activate, + data=proj_data, + explained_var=explained_var, + desc=f"Average {ch_type_name} reference", + kind=FIFF.FIFFV_PROJ_ITEM_EEG_AVREF, + ) + return proj + + +@verbose +def _has_eeg_average_ref_proj( + info, *, projs=None, check_active=False, ch_type=None, verbose=None +): + """Determine if a list of projectors has an average EEG ref. + + Optionally, set check_active=True to additionally check if the CAR + has already been applied. + """ + from .meas_info import Info + + _validate_type(info, Info, "info") + projs = info.get("projs", []) if projs is None else projs + if ch_type is None: + pick_kwargs = _EEG_AVREF_PICK_DICT + else: + ch_type = [ch_type] if isinstance(ch_type, str) else ch_type + pick_kwargs = {ch_type: True for ch_type in ch_type} + ch_type = "/".join(c.upper() for c in pick_kwargs) + want_names = [ + info["ch_names"][pick] + for pick in pick_types(info, exclude="bads", **pick_kwargs) + ] + if not want_names: + return False + found_names = list() + for proj in projs: + if proj["kind"] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF or re.match( + "^Average .* reference$", proj["desc"] + ): + if not check_active or proj["active"]: + found_names.extend(proj["data"]["col_names"]) + # If some are missing we have a problem (keep order for the message, + # otherwise we could use set logic) + missing = [name for name in want_names if name not in found_names] + if missing: + if found_names: # found some but not all: warn + warn(f"Incomplete {ch_type} projector, missing channel(s) {missing}") + return False + return True + + +def _needs_eeg_average_ref_proj(info): + """Determine if the EEG needs an average EEG reference. + + This returns True if no custom reference has been applied and no average + reference projection is present in the list of projections. + """ + if info["custom_ref_applied"]: + return False + if not _electrode_types(info): + return False + if _has_eeg_average_ref_proj(info): + return False + return True + + +@verbose +def setup_proj( + info, add_eeg_ref=True, activate=True, *, eeg_ref_ch_type="eeg", verbose=None +): + """Set up projection for Raw and Epochs. + + Parameters + ---------- + %(info_not_none)s Warning: will be modified in-place. + add_eeg_ref : bool + If True, an EEG average reference will be added (unless one + already exists). + activate : bool + If True projections are activated. + eeg_ref_ch_type : str + The channel type to use for reference projection. + Valid types are 'eeg', 'ecog', 'seeg' and 'dbs'. + + .. versionadded:: 1.2 + %(verbose)s + + Returns + ------- + projector : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + info : mne.Info + The modified measurement info. + """ + # Add EEG ref reference proj if necessary + if add_eeg_ref and _needs_eeg_average_ref_proj(info): + eeg_proj = make_eeg_average_ref_proj( + info, activate=activate, ch_type=eeg_ref_ch_type + ) + info["projs"].append(eeg_proj) + + # Create the projector + projector, nproj = make_projector_info(info) + if nproj == 0: + if verbose: + logger.info("The projection vectors do not apply to these channels") + projector = None + else: + logger.info(f"Created an SSP operator (subspace dimension = {nproj})") + + # The projection items have been activated + if activate: + with info._unlock(): + info["projs"] = activate_proj(info["projs"], copy=False) + + return projector, info + + +def _uniquify_projs(projs, check_active=True, sort=True): + """Make unique projs.""" + final_projs = [] + for proj in projs: # flatten + if not any(_proj_equal(p, proj, check_active) for p in final_projs): + final_projs.append(proj) + + my_count = count(len(final_projs)) + + def sorter(x): + """Sort in a nice way.""" + digits = [s for s in x["desc"] if s.isdigit()] + if digits: + sort_idx = int(digits[-1]) + else: + sort_idx = next(my_count) + return (sort_idx, x["desc"]) + + return sorted(final_projs, key=sorter) if sort else final_projs diff --git a/mne/_fiff/reference.py b/mne/_fiff/reference.py new file mode 100644 index 00000000000..13fed0eebd2 --- /dev/null +++ b/mne/_fiff/reference.py @@ -0,0 +1,738 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..defaults import DEFAULTS +from ..utils import ( + _check_option, + _check_preload, + _on_missing, + _validate_type, + fill_doc, + logger, + pinv, + verbose, + warn, +) +from .constants import FIFF +from .meas_info import _check_ch_keys +from .pick import _ELECTRODE_CH_TYPES, pick_channels, pick_channels_forward, pick_types +from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj, setup_proj + + +def _check_before_reference(inst, ref_from, ref_to, ch_type): + """Prepare instance for referencing.""" + # Check to see that data is preloaded + _check_preload(inst, "Applying a reference") + + ch_type = _get_ch_type(inst, ch_type) + ch_dict = {**{type_: True for type_ in ch_type}, "meg": False, "ref_meg": False} + eeg_idx = pick_types(inst.info, **ch_dict) + + if ref_to is None: + ref_to = [inst.ch_names[i] for i in eeg_idx] + extra = "EEG channels found" + else: + extra = "channels supplied" + if len(ref_to) == 0: + raise ValueError(f"No {extra} to apply the reference to") + + _check_ssp(inst, ref_from + ref_to) + + # If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the + # info that a non-CAR has been applied. + ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True) + if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0: + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + + return ref_to + + +def _check_ssp(inst, ref_items): + """Check for SSPs that may block re-referencing.""" + projs_to_remove = [] + for i, proj in enumerate(inst.info["projs"]): + # Remove any average reference projections + if ( + proj["desc"] == "Average EEG reference" + or proj["kind"] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF + ): + logger.info("Removing existing average EEG reference projection.") + # Don't remove the projection right away, but do this at the end of + # this loop. + projs_to_remove.append(i) + + # Inactive SSPs may block re-referencing + elif ( + not proj["active"] + and len([ch for ch in ref_items if ch in proj["data"]["col_names"]]) > 0 + ): + raise RuntimeError( + "Inactive signal space projection (SSP) operators are " + "present that operate on sensors involved in the desired " + "referencing scheme. These projectors need to be applied " + "using the apply_proj() method function before the desired " + "reference can be set." + ) + + for i in projs_to_remove: + del inst.info["projs"][i] + + # Need to call setup_proj after changing the projs: + inst._projector, _ = setup_proj(inst.info, add_eeg_ref=False, activate=False) + + +def _check_before_dict_reference(inst, ref_dict): + """Prepare instance for dict-based referencing.""" + # Check to see that data is preloaded + _check_preload(inst, "Applying a reference") + + # Promote all values to list-like. This simplifies our logic and also helps catch + # self-referencing cases like `{"Cz": ["Cz"]}` + _refdict = {k: [v] if isinstance(v, str) else list(v) for k, v in ref_dict.items()} + + # Check that keys are strings and values are lists-of-strings + key_types = {type(k) for k in _refdict} + value_types = {type(v) for val in _refdict.values() for v in val} + for elem_name, elem in dict(key=key_types, value=value_types).items(): + if bad_elem := elem - {str}: + raise TypeError( + f"{elem_name.capitalize()}s in the ref_channels dict must be strings. " + f"Your dict has {elem_name}s of type " + f"{', '.join(map(lambda x: x.__name__, bad_elem))}." + ) + + # Check that keys are valid channels and values are lists-of-valid-channels + ch_set = set(inst.ch_names) + bad_ch_set = set(inst.info["bads"]) + keys = set(_refdict) + values = set(sum(_refdict.values(), [])) + for elem_name, elem in dict(key=keys, value=values).items(): + if bad_elem := elem - ch_set: + raise ValueError( + f"ref_channels dict contains invalid {elem_name}(s) " + f"({', '.join(bad_elem)}) " + "that are not names of channels in the instance." + ) + # Check that values are not bad channels + if bad_elem := elem.intersection(bad_ch_set): + warn( + f"ref_channels dict contains {elem_name}(s) " + f"({', '.join(bad_elem)}) " + "that are marked as bad channels." + ) + + _check_ssp(inst, keys.union(values)) + + # Check for self-referencing + self_ref = [[k] == v for k, v in _refdict.items()] + if any(self_ref): + which = np.array(list(_refdict))[np.nonzero(self_ref)] + for ch in which: + warn(f"Channel {ch} is self-referenced, which will nullify the channel.") + + # Check that channel types match. First unpack list-like vals into separate items: + pairs = [(k, v) for k in _refdict for v in _refdict[k]] + ch_type_map = dict(zip(inst.ch_names, inst.get_channel_types())) + mismatch = [ch_type_map[k] != ch_type_map[v] for k, v in pairs] + if any(mismatch): + mismatch_pairs = np.array(pairs)[mismatch] + for k, v in mismatch_pairs: + warn( + f"Channel {k} ({ch_type_map[k]}) is referenced to channel {v} which is " + f"a different channel type ({ch_type_map[v]})." + ) + + # convert channel names to indices + keys_ix = pick_channels(inst.ch_names, list(_refdict), ordered=True) + vals_ix = (pick_channels(inst.ch_names, v, ordered=True) for v in _refdict.values()) + return dict(zip(keys_ix, vals_ix)) + + +def _apply_reference(inst, ref_from, ref_to=None, forward=None, ch_type="auto"): + """Apply a custom EEG referencing scheme.""" + ref_to = _check_before_reference(inst, ref_from, ref_to, ch_type) + + # Compute reference + if len(ref_from) > 0: + # this is guaranteed below, but we should avoid the crazy pick_channels + # behavior that [] gives all. Also use ordered=True just to make sure + # that all supplied channels actually exist. + assert len(ref_to) > 0 + ref_names = ref_from + ref_from = pick_channels(inst.ch_names, ref_from, ordered=True) + ref_to = pick_channels(inst.ch_names, ref_to, ordered=True) + + data = inst._data + ref_data = data[..., ref_from, :].mean(-2, keepdims=True) + data[..., ref_to, :] -= ref_data + ref_data = ref_data[..., 0, :] + + # REST + if forward is not None: + # use ch_sel and the given forward + forward = pick_channels_forward(forward, ref_names, ordered=True) + # 1-3. Compute a forward (G) and avg-ref'ed data (done above) + G = forward["sol"]["data"] + assert G.shape[0] == len(ref_names) + # 4. Compute the forward (G) and average-reference it (Ga): + Ga = G - np.mean(G, axis=0, keepdims=True) + # 5. Compute the Ga_inv by SVD + Ga_inv = pinv(Ga, rtol=1e-6) + # 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv + Ra = G @ Ga_inv + # 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp) + Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True) + data[..., ref_to, :] += Vpa + else: + ref_data = None + + return inst, ref_data + + +def _apply_dict_reference(inst, ref_dict): + """Apply a dict-based custom EEG referencing scheme.""" + # this converts all keys to channel indices and all values to arrays of ch. indices: + ref_dict = _check_before_dict_reference(inst, ref_dict) + + data = inst._data + orig_data = data.copy() + for ref_to, ref_from in ref_dict.items(): + ref_data = orig_data[..., ref_from, :].mean(-2, keepdims=True) + data[..., [ref_to], :] -= ref_data + + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + return inst, None + + +@fill_doc +def add_reference_channels(inst, ref_channels, copy=True): + """Add reference channels to data that consists of all zeros. + + Adds reference channels to data that were not included during recording. + This is useful when you need to re-reference your data to different + channels. These added channels will consist of all zeros. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Instance of Raw or Epochs with EEG channels and reference channel(s). + %(ref_channels)s + copy : bool + Specifies whether the data will be copied (True) or modified in-place + (False). Defaults to True. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with added EEG reference channels. + + Notes + ----- + .. warning:: + When :ref:`re-referencing `, + make sure to apply the montage using :meth:`mne.io.Raw.set_montage` + only after calling this function. Applying a montage will only set + locations of channels that exist at the time it is applied. + """ + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..io import BaseRaw + + # Check to see that data is preloaded + _check_preload(inst, "add_reference_channels") + _validate_type(ref_channels, (list, tuple, str), "ref_channels") + if isinstance(ref_channels, str): + ref_channels = [ref_channels] + for ch in ref_channels: + if ch in inst.info["ch_names"]: + raise ValueError(f"Channel {ch} already specified in inst.") + + # Once CAR is applied (active), don't allow adding channels + if _has_eeg_average_ref_proj(inst.info, check_active=True): + raise RuntimeError("Average reference already applied to data.") + + if copy: + inst = inst.copy() + + if isinstance(inst, BaseRaw | Evoked): + data = inst._data + refs = np.zeros((len(ref_channels), data.shape[1])) + data = np.vstack((data, refs)) + inst._data = data + elif isinstance(inst, BaseEpochs): + data = inst._data + x, y, z = data.shape + refs = np.zeros((x * len(ref_channels), z)) + data = np.vstack((data.reshape((x * y, z), order="F"), refs)) + data = data.reshape(x, y + len(ref_channels), z, order="F") + inst._data = data + else: + raise TypeError( + f"inst should be Raw, Epochs, or Evoked instead of {type(inst)}." + ) + nchan = len(inst.info["ch_names"]) + + if inst.info.get("dig", None) is not None: + # A montage has been set. Try to infer location of reference channels. + # "zeroth" EEG electrode dig points is reference + ref_dig_loc = [ + dl + for dl in inst.info["dig"] + if (dl["kind"] == FIFF.FIFFV_POINT_EEG and dl["ident"] == 0) + ] + if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels): + ref_dig_array = np.full(12, np.nan) + warn( + "Location for this channel is unknown or ambiguous; consider calling " + "set_montage() after adding new reference channels if needed. " + "Applying a montage will only set locations of channels that " + "exist at the time it is applied." + ) + else: # n_ref_channels == 1 and a single ref digitization exists + ref_dig_array = np.concatenate( + (ref_dig_loc[0]["r"], ref_dig_loc[0]["r"], np.zeros(6)) + ) + # Replace the (possibly new) Ref location for each channel + for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]): + inst.info["chs"][idx]["loc"][3:6] = ref_dig_loc[0]["r"] + else: + # If no montage has ever been set, we cannot even try to infer a location. + ref_dig_array = np.full(12, np.nan) + + for ch in ref_channels: + chan_info = { + "ch_name": ch, + "coil_type": FIFF.FIFFV_COIL_EEG, + "kind": FIFF.FIFFV_EEG_CH, + "logno": nchan + 1, + "scanno": nchan + 1, + "cal": 1, + "range": 1.0, + "unit_mul": FIFF.FIFF_UNITM_NONE, + "unit": FIFF.FIFF_UNIT_V, + "coord_frame": FIFF.FIFFV_COORD_HEAD, + "loc": ref_dig_array.copy(), + } + inst.info["chs"].append(chan_info) + inst.info._update_redundant() + range_ = np.arange(1, len(ref_channels) + 1) + if isinstance(inst, BaseRaw): + inst._cals = np.hstack((inst._cals, [1] * len(ref_channels))) + for pi, picks in enumerate(inst._read_picks): + inst._read_picks[pi] = np.concatenate([picks, np.max(picks) + range_]) + elif isinstance(inst, BaseEpochs): + picks = inst.picks + inst.picks = np.concatenate([picks, np.max(picks) + range_]) + inst.info._check_consistency() + set_eeg_reference(inst, ref_channels=ref_channels, copy=False, verbose=False) + return inst + + +_ref_dict = { + FIFF.FIFFV_MNE_CUSTOM_REF_ON: "on", + FIFF.FIFFV_MNE_CUSTOM_REF_OFF: "off", + FIFF.FIFFV_MNE_CUSTOM_REF_CSD: "CSD", +} + + +def _check_can_reref(inst): + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..io import BaseRaw + + _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance") + current_custom = inst.info["custom_ref_applied"] + if current_custom not in ( + FIFF.FIFFV_MNE_CUSTOM_REF_ON, + FIFF.FIFFV_MNE_CUSTOM_REF_OFF, + ): + raise RuntimeError( + "Cannot set new reference on data with custom reference type " + f"{_ref_dict[current_custom]!r}" + ) + + +@verbose +def set_eeg_reference( + inst, + ref_channels="average", + copy=True, + projection=False, + ch_type="auto", + forward=None, + *, + joint=False, + verbose=None, +): + """Specify which reference to use for EEG data. + + Use this function to explicitly specify the desired reference for EEG. + This can be either an existing electrode or a new virtual channel. + This function will re-reference the data according to the desired + reference. + + Note that it is also possible to re-reference the signal using a + Laplacian (LAP) "reference-free" transformation using the + :func:`.compute_current_source_density` function. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Instance of Raw or Epochs with EEG channels and reference channel(s). + %(ref_channels_set_eeg_reference)s + copy : bool + Specifies whether the data will be copied (True) or modified in-place + (False). Defaults to True. + %(projection_set_eeg_reference)s + %(ch_type_set_eeg_reference)s + %(forward_set_eeg_reference)s + %(joint_set_eeg_reference)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with EEG channels re-referenced. If ``ref_channels="average"`` and + ``projection=True`` a projection will be added instead of directly + re-referencing the data. + ref_data : array + Array of reference data subtracted from EEG channels. This will be + ``None`` if ``projection=True``, or if ``ref_channels`` is ``"REST"`` or a + :class:`dict`. + %(set_eeg_reference_see_also_notes)s + """ + from ..forward import Forward + + _check_can_reref(inst) + + if isinstance(ref_channels, dict): + logger.info("Applying a custom dict-based reference.") + return _apply_dict_reference(inst, ref_channels) + + ch_type = _get_ch_type(inst, ch_type) + + if projection: # average reference projector + if ref_channels != "average": + raise ValueError( + 'Setting projection=True is only supported for ref_channels="average", ' + f"got {ref_channels!r}." + ) + # We need verbose='error' here in case we add projs sequentially + if _has_eeg_average_ref_proj(inst.info, ch_type=ch_type, verbose="error"): + warn( + "An average reference projection was already added. The data " + "has been left untouched." + ) + else: + # Creating an average reference may fail. In this case, make + # sure that the custom_ref_applied flag is left untouched. + custom_ref_applied = inst.info["custom_ref_applied"] + + try: + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + if joint: + inst.add_proj( + make_eeg_average_ref_proj( + inst.info, ch_type=ch_type, activate=False + ) + ) + else: + for this_ch_type in ch_type: + inst.add_proj( + make_eeg_average_ref_proj( + inst.info, ch_type=this_ch_type, activate=False + ) + ) + except Exception: + with inst.info._unlock(): + inst.info["custom_ref_applied"] = custom_ref_applied + raise + # If the data has been preloaded, projections will no + # longer be automatically applied. + if inst.preload: + logger.info( + "Average reference projection was added, " + "but has not been applied yet. Use the " + "apply_proj method to apply it." + ) + return inst, None + del projection # not used anymore + + inst = inst.copy() if copy else inst + ch_dict = {**{type_: True for type_ in ch_type}, "meg": False, "ref_meg": False} + ch_sel = [inst.ch_names[i] for i in pick_types(inst.info, **ch_dict)] + + if ref_channels == "REST": + _validate_type(forward, Forward, 'forward when ref_channels="REST"') + else: + forward = None # signal to _apply_reference not to do REST + + if ref_channels in ("average", "REST"): + logger.info(f"Applying {ref_channels} reference.") + ref_channels = ch_sel + + if ref_channels == []: + logger.info("EEG data marked as already having the desired reference.") + else: + logger.info( + "Applying a custom " + f"{tuple(DEFAULTS['titles'][type_] for type_ in ch_type)} " + "reference." + ) + + return _apply_reference(inst, ref_channels, ch_sel, forward, ch_type=ch_type) + + +def _get_ch_type(inst, ch_type): + _validate_type(ch_type, (str, list, tuple), "ch_type") + valid_ch_types = ("auto",) + _ELECTRODE_CH_TYPES + if isinstance(ch_type, str): + _check_option("ch_type", ch_type, valid_ch_types) + if ch_type != "auto": + ch_type = [ch_type] + elif isinstance(ch_type, list | tuple): + for type_ in ch_type: + _validate_type(type_, str, "ch_type") + _check_option("ch_type", type_, valid_ch_types[1:]) + ch_type = list(ch_type) + + # if ch_type is 'auto', search through list to find first reasonable + # reference-able channel type. + if ch_type == "auto": + for type_ in _ELECTRODE_CH_TYPES: + if type_ in inst: + ch_type = [type_] + logger.info( + f"{DEFAULTS['titles'][type_]} channel type selected for " + "re-referencing" + ) + break + # if auto comes up empty, or the user specifies a bad ch_type. + else: + raise ValueError("No EEG, ECoG, sEEG or DBS channels found to rereference.") + return ch_type + + +@verbose +def set_bipolar_reference( + inst, + anode, + cathode, + ch_name=None, + ch_info=None, + drop_refs=True, + copy=True, + on_bad="warn", + verbose=None, +): + """Re-reference selected channels using a bipolar referencing scheme. + + A bipolar reference takes the difference between two channels (the anode + minus the cathode) and adds it as a new virtual channel. The original + channels will be dropped by default. + + Multiple anodes and cathodes can be specified, in which case multiple + virtual channels will be created. The 1st cathode will be subtracted + from the 1st anode, the 2nd cathode from the 2nd anode, etc. + + By default, the virtual channels will be annotated with channel-info and + -location of the anodes and coil types will be set to EEG_BIPOLAR. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Data containing the unreferenced channels. + anode : str | list of str + The name(s) of the channel(s) to use as anode in the bipolar reference. + cathode : str | list of str + The name(s) of the channel(s) to use as cathode in the bipolar + reference. + ch_name : str | list of str | None + The channel name(s) for the virtual channel(s) containing the resulting + signal. By default, bipolar channels are named after the anode and + cathode, but it is recommended to supply a more meaningful name. + ch_info : dict | list of dict | None + This parameter can be used to supply a dictionary (or a dictionary for + each bipolar channel) containing channel information to merge in, + overwriting the default values. Defaults to None. + drop_refs : bool + Whether to drop the anode/cathode channels from the instance. + copy : bool + Whether to operate on a copy of the data (True) or modify it in-place + (False). Defaults to True. + on_bad : str + If a bipolar channel is created from a bad anode or a bad cathode, mne + warns if on_bad="warns", raises ValueError if on_bad="raise", and does + nothing if on_bad="ignore". For "warn" and "ignore", the new bipolar + channel will be marked as bad. Defaults to on_bad="warns". + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with the specified channels re-referenced. + + See Also + -------- + set_eeg_reference : Convenience function for creating an EEG reference. + + Notes + ----- + 1. If the anodes contain any EEG channels, this function removes + any pre-existing average reference projections. + + 2. During source localization, the EEG signal should have an average + reference. + + 3. The data must be preloaded. + + .. versionadded:: 0.9.0 + """ + from ..epochs import BaseEpochs, EpochsArray + from ..evoked import EvokedArray + from ..io import BaseRaw, RawArray + from .meas_info import create_info + + _check_can_reref(inst) + if not isinstance(anode, list): + anode = [anode] + + if not isinstance(cathode, list): + cathode = [cathode] + + if len(anode) != len(cathode): + raise ValueError( + f"Number of anodes (got {len(anode)}) must equal the number " + f"of cathodes (got {len(cathode)})." + ) + + if ch_name is None: + ch_name = [f"{a}-{c}" for (a, c) in zip(anode, cathode)] + elif not isinstance(ch_name, list): + ch_name = [ch_name] + if len(ch_name) != len(anode): + raise ValueError( + "Number of channel names must equal the number of " + f"anodes/cathodes (got {len(ch_name)})." + ) + + # Check for duplicate channel names (it is allowed to give the name of the + # anode or cathode channel, as they will be replaced). + for ch, a, c in zip(ch_name, anode, cathode): + if ch not in [a, c] and ch in inst.ch_names: + raise ValueError( + f'There is already a channel named "{ch}", please ' + "specify a different name for the bipolar " + "channel using the ch_name parameter." + ) + + if ch_info is None: + ch_info = [{} for _ in anode] + elif not isinstance(ch_info, list): + ch_info = [ch_info] + if len(ch_info) != len(anode): + raise ValueError( + "Number of channel info dictionaries must equal the " + "number of anodes/cathodes." + ) + + if copy: + inst = inst.copy() + + anode = _check_before_reference( + inst, ref_from=cathode, ref_to=anode, ch_type="auto" + ) + + # Create bipolar reference channels by multiplying the data + # (channels x time) with a matrix (n_virtual_channels x channels) + # and add them to the instance. + multiplier = np.zeros((len(anode), len(inst.ch_names))) + for idx, (a, c) in enumerate(zip(anode, cathode)): + multiplier[idx, inst.ch_names.index(a)] = 1 + multiplier[idx, inst.ch_names.index(c)] = -1 + + ref_info = create_info( + ch_names=ch_name, + sfreq=inst.info["sfreq"], + ch_types=inst.get_channel_types(picks=anode), + ) + + # Update "chs" in Reference-Info. + for ch_idx, (an, info) in enumerate(zip(anode, ch_info)): + _check_ch_keys(info, ch_idx, name="ch_info", check_min=False) + an_idx = inst.ch_names.index(an) + # Copy everything from anode (except ch_name). + an_chs = {k: v for k, v in inst.info["chs"][an_idx].items() if k != "ch_name"} + ref_info["chs"][ch_idx].update(an_chs) + # Set coil-type to bipolar. + ref_info["chs"][ch_idx]["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR + # Update with info from ch_info-parameter. + ref_info["chs"][ch_idx].update(info) + + # Set other info-keys from original instance. + pick_info = { + k: v + for k, v in inst.info.items() + if k not in ["chs", "ch_names", "bads", "nchan", "sfreq"] + } + + with ref_info._unlock(): + ref_info.update(pick_info) + + # Rereferencing of data. + ref_data = multiplier @ inst._data + + if isinstance(inst, BaseRaw): + ref_inst = RawArray(ref_data, ref_info, first_samp=inst.first_samp, copy=None) + elif isinstance(inst, BaseEpochs): + ref_inst = EpochsArray( + ref_data, + ref_info, + events=inst.events, + tmin=inst.tmin, + event_id=inst.event_id, + metadata=inst.metadata, + ) + else: + ref_inst = EvokedArray( + ref_data, + ref_info, + tmin=inst.tmin, + comment=inst.comment, + nave=inst.nave, + kind="average", + ) + + # Add referenced instance to original instance. + inst.add_channels([ref_inst], force_update_info=True) + + # Handle bad channels. + bad_bipolar_chs = [] + for ch_idx, (a, c) in enumerate(zip(anode, cathode)): + if a in inst.info["bads"] or c in inst.info["bads"]: + bad_bipolar_chs.append(ch_name[ch_idx]) + + # Add warnings if bad channels are present. + if bad_bipolar_chs: + msg = f"Bipolar channels are based on bad channels: {bad_bipolar_chs}." + _on_missing(on_bad, msg) + inst.info["bads"] += bad_bipolar_chs + + added_channels = ", ".join([name for name in ch_name]) + logger.info(f"Added the following bipolar channels:\n{added_channels}") + + for attr_name in ["picks", "_projector"]: + setattr(inst, attr_name, None) + + # Drop remaining channels. + if drop_refs: + drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names)) + inst.drop_channels(drop_channels) + + return inst diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py new file mode 100644 index 00000000000..96fee81da20 --- /dev/null +++ b/mne/_fiff/tag.py @@ -0,0 +1,532 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import html +import re +import struct +from dataclasses import dataclass +from functools import partial +from typing import Any + +import numpy as np +from scipy.sparse import csc_array, csr_array + +from ..utils import _check_option, warn +from ..utils.numerics import _julian_to_date +from .constants import ( + FIFF, + _ch_coil_type_named, + _ch_kind_named, + _ch_unit_mul_named, + _ch_unit_named, + _dig_cardinal_named, + _dig_kind_named, +) + +############################################################################## +# HELPERS + + +@dataclass +class Tag: + """Tag in FIF tree structure.""" + + kind: int + type: int + size: int + next: int + pos: int + data: Any = None + + def __eq__(self, tag): # noqa: D105 + return int( + self.kind == tag.kind + and self.type == tag.type + and self.size == tag.size + and self.next == tag.next + and self.pos == tag.pos + and self.data == tag.data + ) + + @property + def next_pos(self): + """The next tag position.""" + if self.next == FIFF.FIFFV_NEXT_SEQ: # 0 + return self.pos + 16 + self.size + elif self.next > 0: + return self.next + else: # self.next should be -1 if we get here + return None # safest to return None so that things like fid.seek die + + +def _frombuffer_rows(fid, tag_size, dtype=None, shape=None, rlims=None): + """Get a range of rows from a large tag.""" + if shape is not None: + item_size = np.dtype(dtype).itemsize + if not len(shape) == 2: + raise ValueError("Only implemented for 2D matrices") + want_shape = np.prod(shape) + have_shape = tag_size // item_size + if want_shape != have_shape: + raise ValueError( + f"Wrong shape specified, requested {want_shape} but got {have_shape}" + ) + if not len(rlims) == 2: + raise ValueError("rlims must have two elements") + n_row_out = rlims[1] - rlims[0] + if n_row_out <= 0: + raise ValueError("rlims must yield at least one output") + row_size = item_size * shape[1] + # # of bytes to skip at the beginning, # to read, where to end + start_skip = int(rlims[0] * row_size) + read_size = int(n_row_out * row_size) + end_pos = int(fid.tell() + tag_size) + # Move the pointer ahead to the read point + fid.seek(start_skip, 1) + # Do the reading + out = np.frombuffer(fid.read(read_size), dtype=dtype) + # Move the pointer ahead to the end of the tag + fid.seek(end_pos) + else: + out = np.frombuffer(fid.read(tag_size), dtype=dtype) + return out + + +def _loc_to_coil_trans(loc): + """Convert loc vector to coil_trans.""" + assert loc.shape[-1] == 12 + coil_trans = np.zeros(loc.shape[:-1] + (4, 4)) + coil_trans[..., :3, 3] = loc[..., :3] + coil_trans[..., :3, :3] = np.reshape( + loc[..., 3:], loc.shape[:-1] + (3, 3) + ).swapaxes(-1, -2) + coil_trans[..., -1, -1] = 1.0 + return coil_trans + + +def _coil_trans_to_loc(coil_trans): + """Convert coil_trans to loc.""" + coil_trans = coil_trans.astype(np.float64) + return np.roll(coil_trans.T[:, :3], 1, 0).flatten() + + +def _loc_to_eeg_loc(loc): + """Convert a loc to an EEG loc.""" + if not np.isfinite(loc[:3]).all(): + raise RuntimeError("Missing EEG channel location") + if np.isfinite(loc[3:6]).all() and (loc[3:6]).any(): + return np.array([loc[0:3], loc[3:6]]).T + else: + return loc[0:3][:, np.newaxis].copy() + + +############################################################################## +# READING FUNCTIONS + +# None of these functions have docstring because it's more compact that way, +# and hopefully it's clear what they do by their names and variable values. +# See ``read_tag`` for variable descriptions. Return values are implied +# by the function names. + + +def _read_tag_header(fid, pos): + """Read only the header of a Tag.""" + fid.seek(pos, 0) + s = fid.read(16) + if len(s) != 16: + where = fid.tell() - len(s) + extra = f" in file {fid.name}" if hasattr(fid, "name") else "" + warn(f"Invalid tag with only {len(s)}/16 bytes at position {where}{extra}") + return None + # struct.unpack faster than np.frombuffer, saves ~10% of time some places + kind, type_, size, next_ = struct.unpack(">iIii", s) + return Tag(kind, type_, size, next_, pos) + + +def _read_matrix(fid, tag, shape, rlims): + """Read a matrix (dense or sparse) tag.""" + # This should be easy to implement (see _frombuffer_rows) + # if we need it, but for now, it's not... + if shape is not None or rlims is not None: + raise ValueError("Row reading not implemented for matrices yet") + + matrix_coding, matrix_type, bit, dtype = _matrix_info(tag) + + pos = tag.pos + 16 + fid.seek(pos + tag.size - 4, 0) + if matrix_coding == "dense": + # Find dimensions and return to the beginning of tag data + ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + fid.seek(-(ndim + 1) * 4, 1) + dims = np.frombuffer(fid.read(4 * ndim), dtype=">i4")[::-1] + # + # Back to where the data start + # + fid.seek(pos, 0) + + if ndim > 3: + raise Exception( + "Only 2 or 3-dimensional matrices are supported at this time" + ) + + data = fid.read(int(bit * dims.prod())) + data = np.frombuffer(data, dtype=dtype) + # Note: we need the non-conjugate transpose here + if matrix_type == FIFF.FIFFT_COMPLEX_FLOAT: + data = data.view(">c8") + elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: + data = data.view(">c16") + data.shape = dims + else: + # Find dimensions and return to the beginning of tag data + ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + fid.seek(-(ndim + 2) * 4, 1) + dims = np.frombuffer(fid.read(4 * (ndim + 1)), dtype=">i4") + if ndim != 2: + raise Exception("Only two-dimensional matrices are supported at this time") + + # Back to where the data start + fid.seek(pos, 0) + nnz = int(dims[0]) + nrow = int(dims[1]) + ncol = int(dims[2]) + # We need to make a copy so that we can own the data, otherwise we get: + # _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr, + # E ValueError: WRITEBACKIFCOPY base is read-only + data = np.frombuffer(fid.read(bit * nnz), dtype=dtype).astype(np.float32) + shape = (dims[1], dims[2]) + if matrix_coding == "sparse CCS": + tmp_indices = fid.read(4 * nnz) + indices = np.frombuffer(tmp_indices, dtype=">i4") + tmp_ptr = fid.read(4 * (ncol + 1)) + indptr = np.frombuffer(tmp_ptr, dtype=">i4") + swap = nrow + klass = csc_array + else: + assert matrix_coding == "sparse RCS", matrix_coding + tmp_indices = fid.read(4 * nnz) + indices = np.frombuffer(tmp_indices, dtype=">i4") + tmp_ptr = fid.read(4 * (nrow + 1)) + indptr = np.frombuffer(tmp_ptr, dtype=">i4") + swap = ncol + klass = csr_array + if indptr[-1] > len(indices) or np.any(indptr < 0): + # There was a bug in MNE-C that caused some data to be + # stored without byte swapping + indices = np.concatenate( + ( + np.frombuffer(tmp_indices[: 4 * (swap + 1)], dtype=">i4"), + np.frombuffer(tmp_indices[4 * (swap + 1) :], dtype="c8") + return d + + +def _read_complex_double(fid, tag, shape, rlims): + """Read complex double tag.""" + # data gets stored twice as large + if shape is not None: + shape = (shape[0], shape[1] * 2) + d = _frombuffer_rows(fid, tag.size, dtype=">f8", shape=shape, rlims=rlims) + d = d.view(">c16") + return d + + +def _read_id_struct(fid, tag, shape, rlims): + """Read ID struct tag.""" + return dict( + version=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + machid=np.frombuffer(fid.read(8), dtype=">i4"), + secs=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + usecs=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + ) + + +def _read_dig_point_struct(fid, tag, shape, rlims, *, string=False): + """Read dig point struct tag.""" + kind = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + kind = _dig_kind_named.get(kind, kind) + ident = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + if kind == FIFF.FIFFV_POINT_CARDINAL: + ident = _dig_cardinal_named.get(ident, ident) + n = 1 if not string else int(np.frombuffer(fid.read(4), dtype=">i4").item()) + out = [ + dict( + kind=kind, + ident=ident, + r=np.frombuffer(fid.read(12), dtype=">f4"), + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + for _ in range(n) + ] + if not string: + out = out[0] + return out + + +def _read_coord_trans_struct(fid, tag, shape, rlims): + """Read coord trans struct tag.""" + from ..transforms import Transform + + fro = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + to = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + rot = np.frombuffer(fid.read(36), dtype=">f4").reshape(3, 3) + move = np.frombuffer(fid.read(12), dtype=">f4") + trans = np.r_[np.c_[rot, move], np.array([[0], [0], [0], [1]]).T] + data = Transform(fro, to, trans) + fid.seek(48, 1) # Skip over the inverse transformation + return data + + +_ch_coord_dict = { + FIFF.FIFFV_MEG_CH: FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_REF_MEG_CH: FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_EEG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_ECOG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_SEEG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_DBS_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_FNIRS_CH: FIFF.FIFFV_COORD_HEAD, +} + + +def _read_ch_info_struct(fid, tag, shape, rlims): + """Read channel info struct tag.""" + d = dict( + scanno=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + logno=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + kind=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + range=float(np.frombuffer(fid.read(4), dtype=">f4").item()), + cal=float(np.frombuffer(fid.read(4), dtype=">f4").item()), + coil_type=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + # deal with really old OSX Anaconda bug by casting to float64 + loc=np.frombuffer(fid.read(48), dtype=">f4").astype(np.float64), + # unit and exponent + unit=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + unit_mul=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + ) + # channel name + ch_name = np.frombuffer(fid.read(16), dtype=">c") + ch_name = ch_name[: np.argmax(ch_name == b"")].tobytes() + d["ch_name"] = ch_name.decode() + # coil coordinate system definition + _update_ch_info_named(d) + return d + + +def _update_ch_info_named(d): + d["coord_frame"] = _ch_coord_dict.get(d["kind"], FIFF.FIFFV_COORD_UNKNOWN) + d["kind"] = _ch_kind_named.get(d["kind"], d["kind"]) + d["coil_type"] = _ch_coil_type_named.get(d["coil_type"], d["coil_type"]) + d["unit"] = _ch_unit_named.get(d["unit"], d["unit"]) + d["unit_mul"] = _ch_unit_mul_named.get(d["unit_mul"], d["unit_mul"]) + + +def _read_old_pack(fid, tag, shape, rlims): + """Read old pack tag.""" + offset = float(np.frombuffer(fid.read(4), dtype=">f4").item()) + scale = float(np.frombuffer(fid.read(4), dtype=">f4").item()) + data = np.frombuffer(fid.read(tag.size - 8), dtype=">i2") + data = data * scale # to float64 + data += offset + return data + + +def _read_dir_entry_struct(fid, tag, shape, rlims): + """Read dir entry struct tag.""" + pos = tag.pos + 16 + entries = list() + for offset in range(tag.size // 16): + ent = _read_tag_header(fid, pos + offset * 16) + # The position of the real tag on disk is stored in the "next" entry within the + # directory, so we need to overwrite ent.pos. For safety let's also overwrite + # ent.next to point nowhere + ent.pos, ent.next = ent.next, FIFF.FIFFV_NEXT_NONE + entries.append(ent) + return entries + + +def _read_julian(fid, tag, shape, rlims): + """Read julian tag.""" + return _julian_to_date(int(np.frombuffer(fid.read(4), dtype=">i4").item())) + + +# Read types call dict +_call_dict = { + FIFF.FIFFT_STRING: _read_string, + FIFF.FIFFT_COMPLEX_FLOAT: _read_complex_float, + FIFF.FIFFT_COMPLEX_DOUBLE: _read_complex_double, + FIFF.FIFFT_ID_STRUCT: _read_id_struct, + FIFF.FIFFT_DIG_POINT_STRUCT: _read_dig_point_struct, + FIFF.FIFFT_DIG_STRING_STRUCT: partial(_read_dig_point_struct, string=True), + FIFF.FIFFT_COORD_TRANS_STRUCT: _read_coord_trans_struct, + FIFF.FIFFT_CH_INFO_STRUCT: _read_ch_info_struct, + FIFF.FIFFT_OLD_PACK: _read_old_pack, + FIFF.FIFFT_DIR_ENTRY_STRUCT: _read_dir_entry_struct, + FIFF.FIFFT_JULIAN: _read_julian, + FIFF.FIFFT_VOID: lambda fid, tag, shape, rlims: None, +} +_call_dict_names = { + FIFF.FIFFT_STRING: "str", + FIFF.FIFFT_COMPLEX_FLOAT: "c8", + FIFF.FIFFT_COMPLEX_DOUBLE: "c16", + FIFF.FIFFT_ID_STRUCT: "ids", + FIFF.FIFFT_DIG_POINT_STRUCT: "dps", + FIFF.FIFFT_DIG_STRING_STRUCT: "dss", + FIFF.FIFFT_COORD_TRANS_STRUCT: "cts", + FIFF.FIFFT_CH_INFO_STRUCT: "cis", + FIFF.FIFFT_OLD_PACK: "op_", + FIFF.FIFFT_DIR_ENTRY_STRUCT: "dir", + FIFF.FIFFT_JULIAN: "jul", + FIFF.FIFFT_VOID: "nul", # 0 +} + +# Append the simple types +_simple_dict = { + FIFF.FIFFT_BYTE: ">B", + FIFF.FIFFT_SHORT: ">i2", + FIFF.FIFFT_INT: ">i4", + FIFF.FIFFT_USHORT: ">u2", + FIFF.FIFFT_UINT: ">u4", + FIFF.FIFFT_FLOAT: ">f4", + FIFF.FIFFT_DOUBLE: ">f8", + FIFF.FIFFT_DAU_PACK16: ">i2", +} +for key, dtype in _simple_dict.items(): + _call_dict[key] = partial(_read_simple, dtype=dtype) + _call_dict_names[key] = dtype + + +def read_tag(fid, pos, shape=None, rlims=None): + """Read a Tag from a file at a given position. + + Parameters + ---------- + fid : file + The open FIF file descriptor. + pos : int + The position of the Tag in the file. + shape : tuple | None + If tuple, the shape of the stored matrix. Only to be used with + data stored as a vector (not implemented for matrices yet). + rlims : tuple | None + If tuple, the first (inclusive) and last (exclusive) rows to retrieve. + Note that data are assumed to be stored row-major in the file. Only to + be used with data stored as a vector (not implemented for matrices + yet). + + Returns + ------- + tag : Tag + The Tag read. + """ + tag = _read_tag_header(fid, pos) + if tag is None: + return tag + if tag.size > 0: + if _matrix_info(tag) is not None: + tag.data = _read_matrix(fid, tag, shape, rlims) + else: + # All other data types + try: + fun = _call_dict[tag.type] + except KeyError: + raise Exception(f"Unimplemented tag data type {tag.type}") from None + tag.data = fun(fid, tag, shape, rlims) + return tag + + +def find_tag(fid, node, findkind): + """Find Tag in an open FIF file descriptor. + + Parameters + ---------- + fid : file-like + Open file. + node : dict + Node to search. + findkind : int + Tag kind to find. + + Returns + ------- + tag : instance of Tag + The first tag found. + """ + if node["directory"] is not None: + for subnode in node["directory"]: + if subnode.kind == findkind: + return read_tag(fid, subnode.pos) + return None + + +def has_tag(node, kind): + """Check if the node contains a Tag of a given kind.""" + for d in node["directory"]: + if d.kind == kind: + return True + return False + + +def _rename_list(bads, ch_names_mapping): + return [ch_names_mapping.get(bad, bad) for bad in bads] + + +def _int_item(x): + return int(x.item()) + + +def _float_item(x): + return float(x.item()) + + +def _matrix_info(tag): + matrix_coding = tag.type & 0xFFFF0000 + if matrix_coding == 0 or tag.size == 0: + return None + matrix_type = tag.type & 0x0000FFFF + matrix_coding_dict = { + FIFF.FIFFT_MATRIX: "dense", + FIFF.FIFFT_MATRIX | FIFF.FIFFT_SPARSE_CCS_MATRIX: "sparse CCS", + FIFF.FIFFT_MATRIX | FIFF.FIFFT_SPARSE_RCS_MATRIX: "sparse RCS", + } + _check_option("matrix_coding", matrix_coding, list(matrix_coding_dict)) + matrix_coding = matrix_coding_dict[matrix_coding] + matrix_bit_dtype = { + FIFF.FIFFT_INT: (4, ">i4"), + FIFF.FIFFT_JULIAN: (4, ">i4"), + FIFF.FIFFT_FLOAT: (4, ">f4"), + FIFF.FIFFT_DOUBLE: (8, ">f8"), + FIFF.FIFFT_COMPLEX_FLOAT: (8, ">f4"), + FIFF.FIFFT_COMPLEX_DOUBLE: (16, ">f8"), + } + _check_option("matrix_type", matrix_type, list(matrix_bit_dtype)) + bit, dtype = matrix_bit_dtype[matrix_type] + return matrix_coding, matrix_type, bit, dtype diff --git a/mne/_fiff/tests/__init__.py b/mne/_fiff/tests/__init__.py new file mode 100644 index 00000000000..c45203dcc03 --- /dev/null +++ b/mne/_fiff/tests/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op + +data_dir = op.join(op.dirname(__file__), "data") diff --git a/mne/_fiff/tests/test_compensator.py b/mne/_fiff/tests/test_compensator.py new file mode 100644 index 00000000000..4854cc1831c --- /dev/null +++ b/mne/_fiff/tests/test_compensator.py @@ -0,0 +1,118 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from mne import Epochs, pick_types, read_evokeds +from mne._fiff.compensator import get_current_comp, make_compensator +from mne.io import read_raw_fif +from mne.utils import requires_mne, run_subprocess + +base_dir = Path(__file__).parents[2] / "io" / "tests" / "data" +ctf_comp_fname = base_dir / "test_ctf_comp_raw.fif" + + +def test_compensation_identity(): + """Test compensation identity.""" + raw = read_raw_fif(ctf_comp_fname) + assert get_current_comp(raw.info) == 3 + comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False) + assert comp1.shape == (340, 340) + comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True) + assert comp2.shape == (311, 340) + + # round-trip + desired = np.eye(340) + for from_ in range(3): + for to in range(3): + if from_ == to: + continue + comp1 = make_compensator(raw.info, from_, to) + comp2 = make_compensator(raw.info, to, from_) + # To get 1e-12 here (instead of 1e-6) we must use the linalg.inv + # method mentioned in compensator.py + assert_allclose(np.dot(comp1, comp2), desired, atol=1e-12) + assert_allclose(np.dot(comp2, comp1), desired, atol=1e-12) + + +@pytest.mark.parametrize("preload", (True, False)) +@pytest.mark.parametrize("pick", (False, True)) +def test_compensation_apply(tmp_path, preload, pick): + """Test applying compensation.""" + # make sure that changing the comp doesn't modify the original data + raw = read_raw_fif(ctf_comp_fname, preload=preload) + assert raw._comp is None + raw2 = raw.copy() + raw2.apply_gradient_compensation(2) + if pick: + raw2.pick([0] + list(range(2, len(raw.ch_names)))) + raw.pick([0] + list(range(2, len(raw.ch_names)))) + assert get_current_comp(raw2.info) == 2 + if preload: + assert raw2._comp is None + else: + assert raw2._comp.shape == (len(raw2.ch_names),) * 2 + fname = tmp_path / "ctf-raw.fif" + raw2.save(fname) + raw2 = read_raw_fif(fname) + assert raw2.compensation_grade == 2 + raw2.apply_gradient_compensation(3) + assert raw2.compensation_grade == 3 + data, _ = raw[:, :] + data2, _ = raw2[:, :] + # channels have norm ~1e-12 + assert_allclose(data, data2, rtol=1e-9, atol=1e-18) + for ch1, ch2 in zip(raw.info["chs"], raw2.info["chs"]): + assert ch1["coil_type"] == ch2["coil_type"] + + +@requires_mne +def test_compensation_mne(tmp_path): + """Test comensation by comparing with MNE.""" + + def make_evoked(fname, comp): + """Make evoked data.""" + raw = read_raw_fif(fname) + if comp is not None: + raw.apply_gradient_compensation(comp) + picks = pick_types(raw.info, meg=True, ref_meg=True) + events = np.array([[0, 0, 1]], dtype=np.int64) + evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks, baseline=None).average() + return evoked + + def compensate_mne(fname, comp): + """Compensate using MNE-C.""" + tmp_fname = f"{fname.stem}-{comp}-ave.fif" + cmd = [ + "mne_compensate_data", + "--in", + str(fname), + "--out", + tmp_fname, + "--grad", + str(comp), + ] + run_subprocess(cmd) + return read_evokeds(tmp_fname)[0] + + # save evoked response with default compensation + fname_default = tmp_path / "ctf_default-ave.fif" + make_evoked(ctf_comp_fname, None).save(fname_default) + + for comp in [0, 1, 2, 3]: + evoked_py = make_evoked(ctf_comp_fname, comp) + evoked_c = compensate_mne(fname_default, comp) + picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True) + picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True) + assert_allclose( + evoked_py.data[picks_py], evoked_c.data[picks_c], rtol=1e-3, atol=1e-17 + ) + chs_py = [evoked_py.info["chs"][ii] for ii in picks_py] + chs_c = [evoked_c.info["chs"][ii] for ii in picks_c] + for ch_py, ch_c in zip(chs_py, chs_c): + assert ch_py["coil_type"] == ch_c["coil_type"] diff --git a/mne/_fiff/tests/test_constants.py b/mne/_fiff/tests/test_constants.py new file mode 100644 index 00000000000..43ea2290346 --- /dev/null +++ b/mne/_fiff/tests/test_constants.py @@ -0,0 +1,457 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import re +import shutil +import zipfile + +import numpy as np +import pooch +import pytest + +from mne._fiff.constants import ( + FIFF, + FWD, + _ch_coil_type_named, + _ch_kind_named, + _ch_unit_mul_named, + _ch_unit_named, + _coord_frame_named, + _dig_cardinal_named, + _dig_kind_named, +) +from mne.forward._make_forward import _read_coil_defs +from mne.transforms import _frame_to_str, _verbose_frames +from mne.utils import requires_good_network + +# https://github.com/mne-tools/fiff-constants/commits/master +REPO = "mne-tools" +COMMIT = "e27f68cbf74dbfc5193ad429cc77900a59475181" + +# These are oddities that we won't address: +iod_dups = (355, 359) # these are in both MEGIN and MNE files +tag_dups = (3501,) # in both MEGIN and MNE files + +_dir_ignore_names = ( + "clear", + "copy", + "fromkeys", + "get", + "items", + "keys", + "pop", + "popitem", + "setdefault", + "update", + "values", + "has_key", + "iteritems", + "iterkeys", + "itervalues", # Py2 + "viewitems", + "viewkeys", + "viewvalues", # Py2 +) +_tag_ignore_names = () # for fiff-constants pending updates +_ignore_incomplete_enums = ( # XXX eventually we could complete these + "bem_surf_id", + "cardinal_point_cardiac", + "cond_model", + "coord", + "dacq_system", + "diffusion_param", + "gantry_type", + "map_surf", + "mne_lin_proj", + "mne_ori", + "mri_format", + "mri_pixel", + "proj_by", + "tags", + "type", + "iod", + "volume_type", + "vol_type", +) +# not in coil_def.dat but in DictionaryTypes:enum(coil) +_missing_coil_def = ( + 0, # The location info contains no data + 1, # EEG electrode position in r0 + 3, # Old 24 channel system in HUT + 4, # The axial devices in the HUCS MCG system + 5, # Bipolar EEG electrode position + 6, # CSD-transformed EEG electrodes + 200, # Time-varying dipole definition + 300, # fNIRS oxyhemoglobin + 301, # fNIRS deoxyhemoglobin + 302, # fNIRS continuous wave + 303, # fNIRS optical density + 304, # fNIRS frequency domain AC amplitude + 305, # fNIRS frequency domain phase + 306, # fNIRS time domain gated amplitude + 307, # fNIRS time domain moments amplitude + 400, # Eye-tracking gaze position + 401, # Eye-tracking pupil size + 1000, # For testing the MCG software + 2001, # Generic axial gradiometer + 3011, # VV prototype wirewound planar sensor + 3014, # Vectorview SQ20950N planar gradiometer + 3021, # VV prototype wirewound magnetometer +) +# explicit aliases in constants.py +_aliases = dict( + FIFFV_COIL_MAGNES_R_MAG="FIFFV_COIL_MAGNES_REF_MAG", + FIFFV_COIL_MAGNES_R_GRAD="FIFFV_COIL_MAGNES_REF_GRAD", + FIFFV_COIL_MAGNES_R_GRAD_OFF="FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD", + FIFFV_COIL_FNIRS_RAW="FIFFV_COIL_FNIRS_CW_AMPLITUDE", + FIFFV_MNE_COORD_CTF_HEAD="FIFFV_MNE_COORD_4D_HEAD", + FIFFV_MNE_COORD_KIT_HEAD="FIFFV_MNE_COORD_4D_HEAD", + FIFFV_MNE_COORD_DIGITIZER="FIFFV_COORD_ISOTRAK", + FIFFV_MNE_COORD_SURFACE_RAS="FIFFV_COORD_MRI", + FIFFV_MNE_SENSOR_COV="FIFFV_MNE_NOISE_COV", + FIFFV_POINT_EEG="FIFFV_POINT_ECG", + FIFF_DESCRIPTION="FIFF_COMMENT", + FIFF_REF_PATH="FIFF_MRI_SOURCE_PATH", +) + + +@pytest.mark.flaky +@requires_good_network +@pytest.mark.ultraslowtest # not that slow, just doesn't need to run very often +def test_constants(tmp_path): + """Test compensation.""" + fname = "fiff.zip" + dest = tmp_path / fname + pooch.retrieve( + url=f"https://codeload.github.com/%7BREPO%7D/fiff-constants/zip/%7BCOMMIT%7D", + path=tmp_path, + fname=fname, + known_hash=None, + ) + names = list() + with zipfile.ZipFile(dest, "r") as ff: + for name in ff.namelist(): + if "Dictionary" in name: + ff.extract(name, tmp_path) + names.append(os.path.basename(name)) + shutil.move(tmp_path / name, tmp_path / names[-1]) + names = sorted(names) + assert names == [ + "DictionaryIOD.txt", + "DictionaryIOD_MNE.txt", + "DictionaryStructures.txt", + "DictionaryTags.txt", + "DictionaryTags_MNE.txt", + "DictionaryTypes.txt", + "DictionaryTypes_MNE.txt", + ] + # IOD (MEGIN and MNE) + fif = dict(iod=dict(), tags=dict(), types=dict(), defines=dict()) + con = dict(iod=dict(), tags=dict(), types=dict(), defines=dict()) + fiff_version = None + for name in ["DictionaryIOD.txt", "DictionaryIOD_MNE.txt"]: + with open(tmp_path / name, "rb") as fid: + for line in fid: + line = line.decode("latin1").strip() + if line.startswith("# Packing revision"): + assert fiff_version is None + fiff_version = line.split()[-1] + if line.startswith("#") or line.startswith("alias") or len(line) == 0: + continue + line = line.split('"') + assert len(line) in (1, 2, 3) + desc = "" if len(line) == 1 else line[1] + line = line[0].split() + assert len(line) in (2, 3) + if len(line) == 2: + kind, id_ = line + else: + kind, id_, tagged = line + assert tagged in ("tagged",) + id_ = int(id_) + if id_ not in iod_dups: + assert id_ not in fif["iod"] + fif["iod"][id_] = [kind, desc] + # Tags (MEGIN) + with open(tmp_path / "DictionaryTags.txt", "rb") as fid: + for line in fid: + line = line.decode("ISO-8859-1").strip() + if ( + line.startswith("#") + or line.startswith("alias") + or line.startswith(":") + or len(line) == 0 + ): + continue + line = line.split('"') + assert len(line) in (1, 2, 3), line + desc = "" if len(line) == 1 else line[1] + line = line[0].split() + assert len(line) == 4, line + kind, id_, dtype, unit = line + id_ = int(id_) + val = [kind, dtype, unit] + assert id_ not in fif["tags"], (fif["tags"].get(id_), val) + fif["tags"][id_] = val + # Tags (MNE) + with open(tmp_path / "DictionaryTags_MNE.txt", "rb") as fid: + for li, line in enumerate(fid): + line = line.decode("ISO-8859-1").strip() + # ignore continuation lines (*) + if ( + line.startswith("#") + or line.startswith("alias") + or line.startswith(":") + or line.startswith("*") + or len(line) == 0 + ): + continue + # weird syntax around line 80: + if line in ("/*", '"'): + continue + line = line.split('"') + assert len(line) in (1, 2, 3), line + if len(line) == 3 and len(line[2]) > 0: + l2 = line[2].strip() + assert l2.startswith("/*") and l2.endswith("*/"), l2 + desc = "" if len(line) == 1 else line[1] + line = line[0].split() + assert len(line) == 3, (li + 1, line) + kind, id_, dtype = line + unit = "-" + id_ = int(id_) + val = [kind, dtype, unit] + if id_ not in tag_dups: + assert id_ not in fif["tags"], (fif["tags"].get(id_), val) + fif["tags"][id_] = val + + # Types and enums + in_ = None + re_prim = re.compile(r'^primitive\((.*)\)\s*(\S*)\s*"(.*)"$') + re_enum = re.compile(r'^enum\((\S*)\)\s*".*"$') + re_enum_entry = re.compile(r'\s*(\S*)\s*(\S*)\s*"(.*)"$') + re_deri = re.compile(r"^derived_type\((.*)\)\s*(\S*)\s*$") + re_defi = re.compile(r'#define\s*(\S*)\s*(\S*)\s*"(.*)"$') + used_enums = list() + for extra in ("", "_MNE"): + with open(tmp_path / f"DictionaryTypes{extra}.txt", "rb") as fid: + for li, line in enumerate(fid): + line = line.decode("ISO-8859-1").strip() + if in_ is None: + p = re_prim.match(line) + e = re_enum.match(line) + d = re_defi.match(line) + r = re_deri.match(line) + if p is not None: + t, s, d = p.groups() + s = int(s) + assert s not in fif["types"] + fif["types"][s] = [t, d] + elif e is not None: + # entering an enum + this_enum = e.group(1) + if this_enum not in fif: + used_enums.append(this_enum) + fif[this_enum] = dict() + con[this_enum] = dict() + in_ = fif[this_enum] + elif d is not None: + t, s, d = d.groups() + s = int(s) + fif["defines"][t] = [s, d] + elif r is not None: + t, s = r.groups() + s = int(s, 16) + fif["types"][s] = t + else: + assert not line.startswith("enum(") + assert not line.startswith("derived_type") + else: # in an enum + if line == "{": + continue + elif line == "}": + in_ = None + continue + t, s, d = re_enum_entry.match(line).groups() + s = int(s) + if t != "ecg" and s != 3: # ecg defined the same way + assert s not in in_ + in_[s] = [t, d] + + # + # Assertions + # + + # Version + mne_version = f"{FIFF.FIFFC_MAJOR_VERSION:d}.{FIFF.FIFFC_MINOR_VERSION:d}" + assert fiff_version == mne_version + unknowns = list() + + # Assert that all our constants are in the FIF def + assert "FIFFV_SSS_JOB_NOTHING" in dir(FIFF) + for name in sorted(dir(FIFF)): + if name.startswith("_") or name in _dir_ignore_names: + continue + check = None + val = getattr(FIFF, name) + if name in fif["defines"]: + assert fif["defines"][name][0] == val + elif name.startswith("FIFFC_"): + # Checked above + assert name in ( + "FIFFC_MAJOR_VERSION", + "FIFFC_MINOR_VERSION", + "FIFFC_VERSION", + ) + elif name.startswith("FIFFB_"): + check = "iod" + elif name.startswith("FIFFT_"): + check = "types" + elif name.startswith("FIFFV_"): + if name.startswith("FIFFV_MNE_") and name.endswith("_ORI"): + check = "mne_ori" + elif name.startswith("FIFFV_MNE_") and name.endswith("_COV"): + check = "covariance_type" + elif name.startswith("FIFFV_MNE_COORD"): + check = "coord" # weird wrapper + elif ( + name.endswith("_CH") + or "_QUAT_" in name + or name + in ( + "FIFFV_DIPOLE_WAVE", + "FIFFV_GOODNESS_FIT", + "FIFFV_HPI_ERR", + "FIFFV_HPI_G", + "FIFFV_HPI_MOV", + ) + ): + check = "ch_type" + elif name.startswith("FIFFV_SUBJ_"): + check = name.split("_")[2].lower() + elif name in ( + "FIFFV_POINT_LPA", + "FIFFV_POINT_NASION", + "FIFFV_POINT_RPA", + "FIFFV_POINT_INION", + ): + check = "cardinal_point" + else: + for check in used_enums: + if name.startswith("FIFFV_" + check.upper()): + break + else: + if name not in _tag_ignore_names: + raise RuntimeError(f"Could not find {name}") + assert check in used_enums, name + if "SSS" in check: + raise RuntimeError + elif name.startswith("FIFF_UNIT"): # units and multipliers + check = name.split("_")[1].lower() + elif name.startswith("FIFF_"): + check = "tags" + else: + unknowns.append((name, val)) + if check is not None and name not in _tag_ignore_names: + assert val in fif[check], f"{check}: {val}, {name}" + if val in con[check]: + msg = f"{name}='{con[check][val]}' ?" + assert _aliases.get(name) == con[check][val], msg + else: + con[check][val] = name + unknowns = "\n\t".join(f"{u[0]} ({u[1]})" for u in unknowns) + assert len(unknowns) == 0, f"Unknown types\n\t{unknowns}" + + # Assert that all the FIF defs are in our constants + assert set(fif.keys()) == set(con.keys()) + for key in sorted(set(fif.keys()) - {"defines"}): + this_fif, this_con = fif[key], con[key] + assert len(set(this_fif.keys())) == len(this_fif) + assert len(set(this_con.keys())) == len(this_con) + missing_from_con = sorted(set(this_con.keys()) - set(this_fif.keys())) + assert missing_from_con == [], key + if key not in _ignore_incomplete_enums: + missing_from_fif = sorted(set(this_fif.keys()) - set(this_con.keys())) + assert missing_from_fif == [], key + + # Assert that `coil_def.dat` has accurate descriptions of all enum(coil) + coil_def = _read_coil_defs() + coil_desc = np.array([c["desc"] for c in coil_def]) + coil_def = np.array([(c["coil_type"], c["accuracy"]) for c in coil_def], int) + mask = coil_def[:, 1] == FWD.COIL_ACCURACY_ACCURATE + coil_def = coil_def[mask, 0] + coil_desc = coil_desc[mask] + bad_list = [] + for key in fif["coil"]: + if key not in _missing_coil_def and key not in coil_def: + bad_list.append((f" {key},").ljust(10) + " # " + fif["coil"][key][1]) + assert len(bad_list) == 0, ( + "\nIn fiff-constants, missing from coil_def:\n" + "\n".join(bad_list) + ) + # Assert that enum(coil) has all `coil_def.dat` entries + for key, desc in zip(coil_def, coil_desc): + if key not in fif["coil"]: + bad_list.append((f" {key},").ljust(10) + " # " + desc) + assert len(bad_list) == 0, ( + "In coil_def, missing from fiff-constants:\n" + "\n".join(bad_list) + ) + + +@pytest.mark.parametrize( + "dict_, match, extras", + [ + ({**_dig_kind_named, **_dig_cardinal_named}, "FIFFV_POINT_", ()), + ( + _ch_kind_named, + "^FIFFV_.*_CH$", + (FIFF.FIFFV_DIPOLE_WAVE, FIFF.FIFFV_GOODNESS_FIT), + ), + pytest.param( + _coord_frame_named, + "FIFFV_(MNE_)?COORD_", + (), + marks=pytest.mark.xfail( + reason="Intentional mismatch tested by test_coord_frame_consistency", + ), + ), + (_ch_unit_named, "FIFF_UNIT_", ()), + (_ch_unit_mul_named, "FIFF_UNITM_", ()), + (_ch_coil_type_named, "FIFFV_COIL_", ()), + ], +) +def test_dict_completion(dict_, match, extras): + """Test readable dict completions.""" + regex = re.compile(match) + got = set(FIFF[key] for key in FIFF if regex.search(key) is not None) + for e in extras: + got.add(e) + want = set(dict_) + assert got == want, match + + +def test_coord_frame_consistency(): + """Test consistency between coord frame mappings.""" + all_frames = set( + key for key in dir(FIFF) if key.startswith(("FIFFV_COORD_", "FIFFV_MNE_COORD")) + ) + # ... but there are some frames that we never work in so let's cull those for now + ignore_frames = set( + f"FIFFV_COORD_{name}" + for name in """ + MRI_SLICE MRI_DISPLAY DICOM_DEVICE IMAGING_DEVICE + """.strip().split() + ) + ignore_frames |= set( + f"FIFFV_MNE_COORD_{name}" + for name in """ + DIGITIZER TUFTS_EEG FS_TAL_GTZ FS_TAL_LTZ + """.strip().split() + ) + assert ignore_frames.issubset(all_frames) + all_frames -= ignore_frames + all_ints = set(FIFF[key] for key in all_frames) + assert set(_frame_to_str) == all_ints + assert set(_verbose_frames) == all_ints + assert set(_coord_frame_named) == all_ints diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py new file mode 100644 index 00000000000..119829fce0a --- /dev/null +++ b/mne/_fiff/tests/test_meas_info.py @@ -0,0 +1,1328 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import pickle +import string +from datetime import date, datetime, timedelta, timezone +from pathlib import Path + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal +from scipy import sparse + +from mne import ( + Annotations, + Epochs, + compute_covariance, + make_forward_solution, + make_sphere_model, + pick_info, + pick_types, + read_cov, + read_epochs, + read_events, + read_evokeds, + read_forward_solution, + setup_volume_source_space, + write_cov, + write_forward_solution, +) +from mne._fiff import meas_info, tag +from mne._fiff._digitization import DigPoint, _make_dig_points +from mne._fiff.constants import FIFF +from mne._fiff.meas_info import ( + RAW_INFO_FIELDS, + Info, + MNEBadsList, + _add_timedelta_to_stamp, + _bad_chans_comp, + _dt_to_stamp, + _force_update_info, + _get_valid_units, + _merge_info, + _read_extended_ch_info, + _stamp_to_dt, + anonymize_info, + create_info, + read_fiducials, + read_info, + write_fiducials, + write_info, +) +from mne._fiff.proj import Projection +from mne._fiff.tag import _coil_trans_to_loc, _loc_to_coil_trans +from mne._fiff.write import DATE_NONE, _generate_meas_id +from mne.channels import ( + equalize_channels, + make_standard_montage, + read_polhemus_fastscan, +) +from mne.datasets import testing +from mne.event import make_fixed_length_events +from mne.io import BaseRaw, RawArray, read_raw_ctf, read_raw_fif +from mne.minimum_norm import ( + apply_inverse, + make_inverse_operator, + read_inverse_operator, + write_inverse_operator, +) +from mne.transforms import Transform +from mne.utils import ( + _empty_hash, + _record_warnings, + assert_object_equal, + catch_logging, + object_diff, +) + +root_dir = Path(__file__).parents[2] +fiducials_fname = root_dir / "data" / "fsaverage" / "fsaverage-fiducials.fif" +base_dir = root_dir / "io" / "tests" / "data" +raw_fname = base_dir / "test_raw.fif" +chpi_fname = base_dir / "test_chpi_raw_sss.fif" +event_name = base_dir / "test-eve.fif" + +kit_data_dir = root_dir / "io" / "kit" / "tests" / "data" +hsp_fname = kit_data_dir / "test_hsp.txt" +elp_fname = kit_data_dir / "test_elp.txt" + +data_path = testing.data_path(download=False) +sss_path = data_path / "SSS" +sss_ctc_fname = sss_path / "test_move_anon_crossTalk_raw_sss.fif" +ctf_fname = data_path / "CTF" / "testdata_ctf.ds" +raw_invalid_bday_fname = data_path / "misc" / "sample_invalid_birthday_raw.fif" + + +@pytest.mark.parametrize( + "kwargs, want", + [ + (dict(meg=False, eeg=True), [0]), + (dict(meg=False, fnirs=True), [5]), + (dict(meg=False, fnirs="hbo"), [5]), + (dict(meg=False, fnirs="hbr"), []), + (dict(meg=False, misc=True), [1]), + (dict(meg=True), [2, 3, 4]), + (dict(meg="grad"), [2, 3]), + (dict(meg="planar1"), [2]), + (dict(meg="planar2"), [3]), + (dict(meg="mag"), [4]), + ], +) +def test_create_info_grad(kwargs, want): + """Test create_info behavior with grad coils.""" + info = create_info(6, 256, ["eeg", "misc", "grad", "grad", "mag", "hbo"]) + # Put these in an order such that grads get named "2" and "3", since + # they get picked based first on coil_type then ch_name... + assert [ + ch["ch_name"] + for ch in info["chs"] + if ch["coil_type"] == FIFF.FIFFV_COIL_VV_PLANAR_T1 + ] == ["2", "3"] + picks = pick_types(info, **kwargs) + assert_array_equal(picks, want) + + +def test_get_valid_units(): + """Test the valid units.""" + valid_units = _get_valid_units() + assert isinstance(valid_units, tuple) + assert all(isinstance(unit, str) for unit in valid_units) + assert "n/a" in valid_units + + +def test_coil_trans(): + """Test loc<->coil_trans functions.""" + rng = np.random.RandomState(0) + x = rng.randn(4, 4) + x[3] = [0, 0, 0, 1] + assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x) + x = rng.randn(12) + assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x) + + +def test_make_info(): + """Test some create_info properties.""" + n_ch = np.longlong(1) + info = create_info(n_ch, 1000.0, "eeg") + assert set(info.keys()) == set(RAW_INFO_FIELDS) + + coil_types = {ch["coil_type"] for ch in info["chs"]} + assert FIFF.FIFFV_COIL_EEG in coil_types + + pytest.raises(TypeError, create_info, ch_names="Test Ch", sfreq=1000) + pytest.raises(ValueError, create_info, ch_names=["Test Ch"], sfreq=-1000) + pytest.raises( + ValueError, + create_info, + ch_names=["Test Ch"], + sfreq=1000, + ch_types=["eeg", "eeg"], + ) + pytest.raises(TypeError, create_info, ch_names=[np.array([1])], sfreq=1000) + pytest.raises( + KeyError, create_info, ch_names=["Test Ch"], sfreq=1000, ch_types=np.array([1]) + ) + pytest.raises( + KeyError, create_info, ch_names=["Test Ch"], sfreq=1000, ch_types="awesome" + ) + pytest.raises( + TypeError, create_info, ["Test Ch"], sfreq=1000, montage=np.array([1]) + ) + m = make_standard_montage("biosemi32") + info = create_info(ch_names=m.ch_names, sfreq=1000.0, ch_types="eeg") + info.set_montage(m) + ch_pos = [ch["loc"][:3] for ch in info["chs"]] + ch_pos_mon = m._get_ch_pos() + ch_pos_mon = np.array([ch_pos_mon[ch_name] for ch_name in info["ch_names"]]) + # transform to head + ch_pos_mon += (0.0, 0.0, 0.04014) + assert_allclose(ch_pos, ch_pos_mon, atol=1e-5) + + +def test_duplicate_name_correction(): + """Test duplicate channel names with running number.""" + # When running number is possible + info = create_info(["A", "A", "A"], 1000.0, verbose="error") + assert info["ch_names"] == ["A-0", "A-1", "A-2"] + + # When running number is not possible but alpha numeric is + info = create_info(["A", "A", "A-0"], 1000.0, verbose="error") + assert info["ch_names"] == ["A-a", "A-1", "A-0"] + + # When a single addition is not sufficient + with pytest.raises(ValueError, match="Adding a single alphanumeric"): + ch_n = ["A", "A"] + # add all options for first duplicate channel (0) + ch_n.extend([f"{ch_n[0]}-{c}" for c in string.ascii_lowercase + "0"]) + create_info(ch_n, 1000.0, verbose="error") + + +def test_fiducials_io(tmp_path): + """Test fiducials i/o.""" + pts, coord_frame = read_fiducials(fiducials_fname) + assert pts[0]["coord_frame"] == FIFF.FIFFV_COORD_MRI + assert pts[0]["ident"] == FIFF.FIFFV_POINT_CARDINAL + + temp_fname = tmp_path / "test.fif" + write_fiducials(temp_fname, pts, coord_frame) + pts_1, coord_frame_1 = read_fiducials(temp_fname) + assert coord_frame == coord_frame_1 + for pt, pt_1 in zip(pts, pts_1): + assert pt["kind"] == pt_1["kind"] + assert pt["ident"] == pt_1["ident"] + assert pt["coord_frame"] == pt_1["coord_frame"] + assert_array_equal(pt["r"], pt_1["r"]) + assert isinstance(pt, DigPoint) + assert isinstance(pt_1, DigPoint) + + # test safeguards + pts[0]["coord_frame"] += 1 + with pytest.raises(ValueError, match="coord_frame entries that are incom"): + write_fiducials(temp_fname, pts, coord_frame, overwrite=True) + + +def test_info(): + """Test info object.""" + raw = read_raw_fif(raw_fname) + event_id, tmin, tmax = 1, -0.2, 0.5 + events = read_events(event_name) + event_id = int(events[0, 2]) + epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None) + + evoked = epochs.average() + + # Test subclassing was successful. + info = Info(a=7, b="aaaaa") + assert "a" in info + assert "b" in info + + # Test info attribute in API objects + for obj in [raw, epochs, evoked]: + assert isinstance(obj.info, Info) + rep = repr(obj.info) + assert "2002-12-03 19:01:10 UTC" in rep, rep + assert "146 items (3 Cardinal, 4 HPI, 61 EEG, 78 Extra)" in rep + dig_rep = repr(obj.info["dig"][0]) + assert "LPA" in dig_rep, dig_rep + assert "(-71.4, 0.0, 0.0) mm" in dig_rep, dig_rep + assert "head frame" in dig_rep, dig_rep + # Test our BunchConstNamed support + for func in (str, repr): + assert "4 (FIFFV_COORD_HEAD)" == func(obj.info["dig"][0]["coord_frame"]) + + # Test read-only fields + info = raw.info.copy() + nchan = len(info["chs"]) + ch_names = [ch["ch_name"] for ch in info["chs"]] + assert info["nchan"] == nchan + assert list(info["ch_names"]) == ch_names + + # Deleting of regular fields should work + info["experimenter"] = "bar" + del info["experimenter"] + + # Test updating of fields + del info["chs"][-1] + info._update_redundant() + assert info["nchan"] == nchan - 1 + assert list(info["ch_names"]) == ch_names[:-1] + + info["chs"][0]["ch_name"] = "foo" + info._update_redundant() + assert info["ch_names"][0] == "foo" + + # Test casting to and from a dict + info_dict = dict(info) + info2 = Info(info_dict) + assert info == info2 + + +def test_read_write_info(tmp_path): + """Test IO of info.""" + info = read_info(raw_fname) + temp_file = tmp_path / "info.fif" + # check for bug `#1198` + info["dev_head_t"]["trans"] = np.eye(4) + t1 = info["dev_head_t"]["trans"] + write_info(temp_file, info) + info2 = read_info(temp_file) + t2 = info2["dev_head_t"]["trans"] + assert len(info["chs"]) == len(info2["chs"]) + assert_array_equal(t1, t2) + # proc_history (e.g., GH#1875) + creator = "é" + info = read_info(chpi_fname) + info["proc_history"][0]["creator"] = creator + info["hpi_meas"][0]["creator"] = creator + info["subject_info"]["his_id"] = creator + info["subject_info"]["weight"] = 11.1 + info["subject_info"]["height"] = 2.3 + + with info._unlock(): + if info["gantry_angle"] is None: # future testing data may include it + info["gantry_angle"] = 0.0 # Elekta supine position + gantry_angle = info["gantry_angle"] + + meas_id = info["meas_id"] + with pytest.raises(FileExistsError, match="Destination file exists"): + write_info(temp_file, info) + write_info(temp_file, info, overwrite=True) + info = read_info(temp_file) + assert info["proc_history"][0]["creator"] == creator + assert info["hpi_meas"][0]["creator"] == creator + assert info["subject_info"]["his_id"] == creator + assert info["gantry_angle"] == gantry_angle + assert_allclose(info["subject_info"]["height"], 2.3) + assert_allclose(info["subject_info"]["weight"], 11.1) + for key in ["secs", "usecs", "version"]: + assert info["meas_id"][key] == meas_id[key] + assert_array_equal(info["meas_id"]["machid"], meas_id["machid"]) + + # Test that writing twice produces the same file + m1 = _empty_hash() + with open(temp_file, "rb") as fid: + m1.update(fid.read()) + m1 = m1.hexdigest() + temp_file_2 = tmp_path / "info2.fif" + assert temp_file_2 != temp_file + write_info(temp_file_2, info) + m2 = _empty_hash() + with open(str(temp_file_2), "rb") as fid: + m2.update(fid.read()) + m2 = m2.hexdigest() + assert m1 == m2 + + info = read_info(raw_fname) + with info._unlock(): + info["meas_date"] = None + anonymize_info(info, verbose="error") + assert info["meas_date"] is None + tmp_fname_3 = tmp_path / "info3.fif" + write_info(tmp_fname_3, info) + assert info["meas_date"] is None + info2 = read_info(tmp_fname_3) + assert info2["meas_date"] is None + + # Check that having a very old date in fine until you try to save it to fif + with info._unlock(check_after=True): + info["meas_date"] = datetime(1800, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + fname = tmp_path / "test.fif" + with pytest.raises(RuntimeError, match="must be between "): + write_info(fname, info, overwrite=True) + + +@testing.requires_testing_data +def test_dir_warning(): + """Test that trying to read a bad filename emits a warning before an error.""" + with ( + pytest.raises(OSError, match="directory"), + pytest.warns(RuntimeWarning, match="does not conform"), + ): + read_info(ctf_fname) + + +def test_io_dig_points(tmp_path): + """Test Writing for dig files.""" + dest = tmp_path / "test.txt" + points2 = np.array([[-106.93, 99.80], [99.80, 68.81]]) + np.savetxt(dest, points2, delimiter="\t", newline="\n") + with pytest.raises(ValueError, match="must be of shape"): + with pytest.warns(RuntimeWarning, match="FastSCAN header"): + read_polhemus_fastscan(dest, on_header_missing="warn") + + +def test_io_coord_frame(tmp_path): + """Test round trip for coordinate frame.""" + fname = tmp_path / "test.fif" + for ch_type in ("eeg", "seeg", "ecog", "dbs", "hbo", "hbr"): + info = create_info(ch_names=["Test Ch"], sfreq=1000.0, ch_types=[ch_type]) + info["chs"][0]["loc"][:3] = [0.05, 0.01, -0.03] + write_info(fname, info, overwrite=True) + info2 = read_info(fname) + assert info2["chs"][0]["coord_frame"] == FIFF.FIFFV_COORD_HEAD + + +def test_make_dig_points(): + """Test application of Polhemus HSP to info.""" + extra_points = read_polhemus_fastscan(hsp_fname, on_header_missing="ignore") + info = create_info(ch_names=["Test Ch"], sfreq=1000.0) + assert info["dig"] is None + + with info._unlock(): + info["dig"] = _make_dig_points(extra_points=extra_points) + assert info["dig"] + assert_allclose(info["dig"][0]["r"], [-0.10693, 0.09980, 0.06881]) + + elp_points = read_polhemus_fastscan(elp_fname, on_header_missing="ignore") + nasion, lpa, rpa = elp_points[:3] + info = create_info(ch_names=["Test Ch"], sfreq=1000.0) + assert info["dig"] is None + + with info._unlock(): + info["dig"] = _make_dig_points(nasion, lpa, rpa, elp_points[3:], None) + assert info["dig"] + idx = [d["ident"] for d in info["dig"]].index(FIFF.FIFFV_POINT_NASION) + assert_allclose(info["dig"][idx]["r"], [0.0013930, 0.0131613, -0.0046967]) + pytest.raises(ValueError, _make_dig_points, nasion[:2]) + pytest.raises(ValueError, _make_dig_points, None, lpa[:2]) + pytest.raises(ValueError, _make_dig_points, None, None, rpa[:2]) + pytest.raises(ValueError, _make_dig_points, None, None, None, elp_points[:, :2]) + pytest.raises( + ValueError, _make_dig_points, None, None, None, None, elp_points[:, :2] + ) + + +def test_redundant(): + """Test some of the redundant properties of info.""" + # Indexing + info = create_info(ch_names=["a", "b", "c"], sfreq=1000.0) + assert info["ch_names"][0] == "a" + assert info["ch_names"][1] == "b" + assert info["ch_names"][2] == "c" + + # Equality + assert info["ch_names"] == info["ch_names"] + assert info["ch_names"] == ["a", "b", "c"] + + # No channels in info + info = create_info(ch_names=[], sfreq=1000.0) + assert info["ch_names"] == [] + + # List should be read-only + info = create_info(ch_names=["a", "b", "c"], sfreq=1000.0) + + +def test_merge_info(): + """Test merging of multiple Info objects.""" + info_a = create_info(ch_names=["a", "b", "c"], sfreq=1000.0) + info_b = create_info(ch_names=["d", "e", "f"], sfreq=1000.0) + info_merged = _merge_info([info_a, info_b]) + assert info_merged["nchan"], 6 + assert info_merged["ch_names"], ["a", "b", "c", "d", "e", "f"] + pytest.raises(ValueError, _merge_info, [info_a, info_a]) + + # Testing for force updates before merging + info_c = create_info(ch_names=["g", "h", "i"], sfreq=500.0) + # This will break because sfreq is not equal + pytest.raises(RuntimeError, _merge_info, [info_a, info_c]) + _force_update_info(info_a, info_c) + assert info_c["sfreq"] == info_a["sfreq"] + assert info_c["ch_names"][0] != info_a["ch_names"][0] + # Make sure it works now + _merge_info([info_a, info_c]) + # Check that you must supply Info + pytest.raises(ValueError, _force_update_info, info_a, dict([("sfreq", 1000.0)])) + # KIT System-ID + info_a._unlocked = info_b._unlocked = True + info_a["kit_system_id"] = 50 + assert _merge_info((info_a, info_b))["kit_system_id"] == 50 + info_b["kit_system_id"] = 50 + assert _merge_info((info_a, info_b))["kit_system_id"] == 50 + info_b["kit_system_id"] = 60 + pytest.raises(ValueError, _merge_info, (info_a, info_b)) + + # hpi infos + info_d = create_info(ch_names=["d", "e", "f"], sfreq=1000.0) + info_merged = _merge_info([info_a, info_d]) + assert not info_merged["hpi_meas"] + assert not info_merged["hpi_results"] + info_a["hpi_meas"] = [{"f1": 3, "f2": 4}] + assert _merge_info([info_a, info_d])["hpi_meas"] == info_a["hpi_meas"] + info_d._unlocked = True + info_d["hpi_meas"] = [{"f1": 3, "f2": 4}] + assert _merge_info([info_a, info_d])["hpi_meas"] == info_d["hpi_meas"] + # This will break because of inconsistency + info_d["hpi_meas"] = [{"f1": 3, "f2": 5}] + pytest.raises(ValueError, _merge_info, [info_a, info_d]) + + info_0 = read_info(raw_fname) + info_0["bads"] = ["MEG 2443", "EEG 053"] + assert len(info_0["chs"]) == 376 + assert len(info_0["dig"]) == 146 + info_1 = create_info(["STI YYY"], info_0["sfreq"], ["stim"]) + assert info_1["bads"] == [] + info_out = _merge_info([info_0, info_1], force_update_to_first=True) + assert len(info_out["chs"]) == 377 + assert len(info_out["bads"]) == 2 + assert len(info_out["dig"]) == 146 + assert len(info_0["chs"]) == 376 + assert len(info_0["bads"]) == 2 + assert len(info_0["dig"]) == 146 + + +def test_check_consistency(): + """Test consistency check of Info objects.""" + info = create_info(ch_names=["a", "b", "c"], sfreq=1000.0) + + # This should pass + info._check_consistency() + + # Info without any channels + info_empty = create_info(ch_names=[], sfreq=1000.0) + info_empty._check_consistency() + + # Bad channels that are not in the info object + info2 = info.copy() + with pytest.raises(ValueError, match="do not exist"): + info2["bads"] = ["b", "foo", "bar"] + + # Bad data types + info2 = info.copy() + with info2._unlock(): + info2["sfreq"] = "foo" + pytest.raises(ValueError, info2._check_consistency) + + info2 = info.copy() + with info2._unlock(): + info2["highpass"] = "foo" + pytest.raises(ValueError, info2._check_consistency) + + info2 = info.copy() + with info2._unlock(): + info2["lowpass"] = "foo" + pytest.raises(ValueError, info2._check_consistency) + + # Silent type conversion to float + info2 = info.copy() + with info2._unlock(check_after=True): + info2["sfreq"] = 1 + info2["highpass"] = 2 + info2["lowpass"] = 2 + assert isinstance(info2["sfreq"], float) + assert isinstance(info2["highpass"], float) + assert isinstance(info2["lowpass"], float) + + # Duplicate channel names + info2 = info.copy() + with info2._unlock(): + info2["chs"][2]["ch_name"] = "b" + pytest.raises(RuntimeError, info2._check_consistency) + + # Duplicates appended with running numbers + with pytest.warns(RuntimeWarning, match="Channel names are not"): + info3 = create_info(ch_names=["a", "b", "b", "c", "b"], sfreq=1000.0) + assert_array_equal(info3["ch_names"], ["a", "b-0", "b-1", "c", "b-2"]) + + # a few bad ones + idx = 0 + ch = info["chs"][idx] + for key, bad, match in ( + ("ch_name", 1.0, "must be an instance"), + ("loc", np.zeros(15), "12 elements"), + ("cal", np.ones(1), "numeric"), + ): + info._check_consistency() # okay + old = ch[key] + ch[key] = bad + if key == "ch_name": + info["ch_names"][idx] = bad + with pytest.raises(TypeError, match=match): + info._check_consistency() + ch[key] = old + if key == "ch_name": + info["ch_names"][idx] = old + + # bad channel entries + info2 = info.copy() + info2["chs"][0]["foo"] = "bar" + with pytest.raises(KeyError, match="key errantly present"): + info2._check_consistency() + info2 = info.copy() + del info2["chs"][0]["loc"] + with pytest.raises(KeyError, match="key missing"): + info2._check_consistency() + + # bad subject_info entries + info2 = info.copy() + with pytest.raises(TypeError, match="must be an instance"): + info2["subject_info"] = "bad" + info2["subject_info"] = dict() + with pytest.raises(TypeError, match="must be an instance"): + info2["subject_info"]["height"] = "bad" + with pytest.raises(TypeError, match="must be an instance"): + info2["subject_info"]["weight"] = [0] + with pytest.raises(TypeError, match=r'subject_info\["height"\] must be an .*'): + info2["subject_info"] = {"height": "bad"} + + +def _test_anonymize_info(base_info, tmp_path): + """Test that sensitive information can be anonymized.""" + pytest.raises(TypeError, anonymize_info, "foo") + assert isinstance(base_info, Info) + base_info = base_info.copy() + + default_anon_dos = datetime(2000, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + default_str = "mne_anonymize" + default_subject_id = 0 + default_desc = "Anonymized using a time shift" + " to preserve age at acquisition" + + # Test no error for incomplete info + bad_info = base_info.copy() + bad_info.pop("file_id") + anonymize_info(bad_info) + del bad_info + + # Fake some additional data + _complete_info(base_info) + meas_date = datetime(2010, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + with base_info._unlock(): + base_info["meas_date"] = meas_date + base_info["subject_info"].update( + birthday=date(1987, 4, 8), + his_id="foobar", + sex=0, + ) + + # generate expected info... + # first expected result with no options. + # will move DOS from 2010/1/1 to 2000/1/1 which is 3653 days. + exp_info = base_info.copy() + exp_info._unlocked = True + exp_info["description"] = default_desc + erase_strs = ( + ("experimenter",), + ("proj_name",), + ("subject_info", "first_name"), + ("subject_info", "middle_name"), + ("subject_info", "last_name"), + ("device_info", "site"), + ("device_info", "serial"), + ("helium_info", "orig_file_guid"), + ("proc_history", 0, "experimenter"), + ) + for tp in erase_strs: + this = exp_info + for lev in tp[:-1]: + this = this[lev] + this[tp[-1]] = default_str + exp_info["proj_id"] = 0 + for key in ("sex", "id", "height", "weight"): + exp_info["subject_info"][key] = 0 + exp_info["subject_info"]["his_id"] = str(default_subject_id) + del exp_info["subject_info"]["hand"] # there's no "unknown" setting + exp_info["utc_offset"] = None + exp_info["proc_history"][0]["block_id"]["machid"][:] = 0 + + # this bday is 3653 days different. the change in day is due to a + # different number of leap days between 1987 and 1977 than between + # 2010 and 2000. + exp_info["subject_info"]["birthday"] = date(1977, 4, 7) + exp_info["meas_date"] = default_anon_dos + exp_info._unlocked = False + + # make copies + exp_info_3 = exp_info.copy() + + # adjust each expected outcome + delta_t = timedelta(days=3653) + + def _adjust_back(e_i, dt): + for key in ("file_id", "meas_id"): + value = e_i.get(key) + if value is not None: + assert "msecs" not in value + tmp = _add_timedelta_to_stamp((value["secs"], value["usecs"]), -dt) + value["secs"] = tmp[0] + value["usecs"] = tmp[1] + value["machid"][:] = 0 + e_i["helium_info"]["meas_date"] -= dt + ds = int(round(dt.total_seconds())) + e_i["proc_history"][0]["date"] = ( + e_i["proc_history"][0]["date"][0] - ds, + e_i["proc_history"][0]["date"][1], + ) + e_i["proc_history"][0]["block_id"]["secs"] -= ds + + _adjust_back(exp_info, delta_t) + + # exp 2 tests the keep_his option + exp_info_2 = exp_info.copy() + with exp_info_2._unlock(): + exp_info_2["subject_info"]["his_id"] = "foobar" + exp_info_2["subject_info"]["sex"] = 0 + exp_info_2["subject_info"]["hand"] = 1 + + # exp 3 tests is a supplied daysback + delta_t_2 = timedelta(days=43) + with exp_info_3._unlock(): + exp_info_3["subject_info"]["birthday"] = date(1987, 2, 24) + exp_info_3["meas_date"] = meas_date - delta_t_2 + _adjust_back(exp_info_3, delta_t_2) + + # exp 4 tests is a supplied daysback + delta_t_3 = timedelta(days=223 + 364 * 500) + + def _check_equiv(got, want, err_msg): + __tracebackhide__ = True + fname_temp = tmp_path / "test.fif" + assert_object_equal(got, want, err_msg=err_msg) + write_info(fname_temp, got, reset_range=False, overwrite=True) + got = read_info(fname_temp) + # this gets changed on write but that's expected + with got._unlock(): + got["file_id"] = want["file_id"] + assert_object_equal(got, want, err_msg=f"{err_msg} (on I/O round trip)") + + new_info = anonymize_info(base_info.copy()) + _check_equiv(new_info, exp_info, err_msg="anon mismatch") + + new_info = anonymize_info(base_info.copy(), keep_his=True) + _check_equiv(new_info, exp_info_2, err_msg="anon keep_his mismatch") + + new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days) + _check_equiv(new_info, exp_info_3, err_msg="anon daysback mismatch") + + with pytest.raises(RuntimeError, match="anonymize_info generated"): + anonymize_info(base_info.copy(), daysback=delta_t_3.days) + # assert_object_equal(new_info, exp_info_4) + + # test with meas_date = None + with base_info._unlock(): + base_info["meas_date"] = None + with exp_info_3._unlock(): + exp_info_3["meas_date"] = None + exp_info_3["helium_info"]["meas_date"] = None + for var in ( + exp_info_3["file_id"], + exp_info_3["meas_id"], + exp_info_3["proc_history"][0]["block_id"], + ): + var["secs"] = DATE_NONE[0] + var["usecs"] = DATE_NONE[1] + exp_info_3["subject_info"].pop("birthday", None) + exp_info_3["proc_history"][0]["date"] = DATE_NONE + + if base_info["meas_date"] is None: + with pytest.warns(RuntimeWarning, match="all information"): + new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days) + else: + new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days) + _check_equiv( + new_info, + exp_info_3, + err_msg="meas_date=None daysback mismatch", + ) + + with _record_warnings(): # meas_date is None + new_info = anonymize_info(base_info.copy()) + _check_equiv(new_info, exp_info_3, err_msg="meas_date=None mismatch") + + +@pytest.mark.parametrize( + "stamp, dt", + [ + [(1346981585, 835782), (2012, 9, 7, 1, 33, 5, 835782)], + # test old dates for BIDS anonymization + [(-1533443343, 24382), (1921, 5, 29, 19, 30, 57, 24382)], + # gh-7116 + [(-908196946, 988669), (1941, 3, 22, 11, 4, 14, 988669)], + ], +) +def test_meas_date_convert(stamp, dt): + """Test conversions of meas_date to datetime objects.""" + meas_datetime = _stamp_to_dt(stamp) + stamp2 = _dt_to_stamp(meas_datetime) + assert stamp == stamp2 + assert meas_datetime == datetime(*dt, tzinfo=timezone.utc) + # smoke test for info __repr__ + info = create_info(1, 1000.0, "eeg") + with info._unlock(): + info["meas_date"] = meas_datetime + assert str(dt[0]) in repr(info) + + +def test_birthday_input(): + """Test that birthday input is handled correctly.""" + pd = pytest.importorskip("pandas") + + # Test valid date + info = create_info(ch_names=["EEG 001"], sfreq=1000.0, ch_types="eeg") + info["subject_info"] = {} + info["subject_info"]["birthday"] = date(2000, 1, 1) + assert info["subject_info"]["birthday"] == date(2000, 1, 1) + + # pandas Timestamp should convert to datetime date + info["subject_info"]["birthday"] = pd.Timestamp("2000-01-01") + assert info["subject_info"]["birthday"] == date(2000, 1, 1) + # Ensure we've converted it during setting + assert not isinstance(info["subject_info"]["birthday"], pd.Timestamp) + + # Test invalid date raises error + with pytest.raises(TypeError, match="must be an instance of date"): + info["subject_info"]["birthday"] = "not a date" + + +def _complete_info(info): + """Complete the meas info fields.""" + for key in ("file_id", "meas_id"): + assert info[key] is not None + info["subject_info"] = dict( + id=1, + sex=1, + hand=1, + first_name="a", + middle_name="b", + last_name="c", + his_id="d", + birthday=date(2000, 1, 1), + weight=1.0, + height=2.0, + ) + info["helium_info"] = dict( + he_level_raw=np.float32(12.34), + helium_level=np.float32(45.67), + meas_date=datetime(2024, 11, 14, 14, 8, 2, tzinfo=timezone.utc), + orig_file_guid="e", + ) + info["experimenter"] = "f" + info["description"] = "g" + with info._unlock(): + info["proj_id"] = 1 + info["proj_name"] = "h" + info["utc_offset"] = "i" + d = (1717707794, 2) + info["proc_history"] = [ + dict( + block_id=dict( + version=4, + machid=np.ones(2, int), + secs=d[0], + usecs=d[1], + ), + experimenter="j", + max_info=dict( + max_st=dict(), + sss_ctc=dict(), + sss_cal=dict(), + sss_info=dict(in_order=8), + ), + date=d, + ), + ] + info["device_info"] = dict(serial="k", site="l") + info._check_consistency() + + +def test_anonymize(tmp_path): + """Test that sensitive information can be anonymized.""" + pytest.raises(TypeError, anonymize_info, "foo") + + # Fake some subject data + raw = read_raw_fif(raw_fname) + _complete_info(raw.info) + raw.set_annotations( + Annotations(onset=[0, 1], duration=[1, 1], description="dummy", orig_time=None) + ) + first_samp = raw.first_samp + expected_onset = np.arange(2) + raw._first_time + assert raw.first_samp == first_samp + assert_allclose(raw.annotations.onset, expected_onset) + + # test mne.anonymize_info() + events = read_events(event_name) + epochs = Epochs(raw, events[:1], 2, 0.0, 0.1, baseline=None) + _test_anonymize_info(raw.info, tmp_path) + _test_anonymize_info(epochs.info, tmp_path) + + # test instance methods & I/O roundtrip + for inst, keep_his in zip((raw, epochs), (True, False)): + inst = inst.copy() + + subject_info = dict(his_id="Volunteer", sex=2, hand=1) + inst.info["subject_info"] = subject_info + inst.anonymize(keep_his=keep_his) + + si = inst.info["subject_info"] + if keep_his: + assert si == subject_info + else: + assert si["his_id"] == "0" + assert si["sex"] == 0 + assert "hand" not in si + + # write to disk & read back + inst_type = "raw" if isinstance(inst, BaseRaw) else "epo" + fname = "tmp_raw.fif" if inst_type == "raw" else "tmp_epo.fif" + out_path = tmp_path / fname + inst.save(out_path, overwrite=True) + if inst_type == "raw": + read_raw_fif(out_path) + else: + read_epochs(out_path) + + # test that annotations are correctly zeroed + raw.anonymize() + assert raw.first_samp == first_samp + assert_allclose(raw.annotations.onset, expected_onset) + assert raw.annotations.orig_time == raw.info["meas_date"] + stamp = _dt_to_stamp(raw.info["meas_date"]) + assert raw.annotations.orig_time == _stamp_to_dt(stamp) + + with raw.info._unlock(): + raw.info["meas_date"] = None + raw.anonymize(daysback=None) + with pytest.warns(RuntimeWarning, match="None"): + raw.anonymize(daysback=123) + assert raw.annotations.orig_time is None + assert raw.first_samp == first_samp + assert_allclose(raw.annotations.onset, expected_onset) + + +@pytest.mark.parametrize("daysback", [None, 28826]) +def test_anonymize_with_io(tmp_path, daysback): + """Test that IO does not break anonymization and all fields.""" + raw = read_raw_fif(raw_fname).crop(0, 1) + _complete_info(raw.info) + temp_path = tmp_path / "tmp_raw.fif" + raw.save(temp_path) + raw2 = read_raw_fif(temp_path).load_data() + raw2.anonymize(daysback=daysback) + raw2.save(temp_path, overwrite=True) + raw3 = read_raw_fif(temp_path) + d = object_diff(raw2.info, raw3.info) + assert d == "['file_id']['machid'] array mismatch\n" + + +@testing.requires_testing_data +def test_csr_csc(tmp_path): + """Test CSR and CSC.""" + info = read_info(sss_ctc_fname) + info = pick_info(info, pick_types(info, meg=True, exclude=[])) + sss_ctc = info["proc_history"][0]["max_info"]["sss_ctc"] + ct = sss_ctc["decoupler"].copy() + # CSC + assert isinstance(ct, sparse.csc_array) + fname = tmp_path / "test.fif" + write_info(fname, info) + info_read = read_info(fname) + ct_read = info_read["proc_history"][0]["max_info"]["sss_ctc"]["decoupler"] + assert isinstance(ct_read, sparse.csc_array) + assert_array_equal(ct_read.toarray(), ct.toarray()) + # Now CSR + csr = ct.tocsr() + assert isinstance(csr, sparse.csr_array) + assert_array_equal(csr.toarray(), ct.toarray()) + info["proc_history"][0]["max_info"]["sss_ctc"]["decoupler"] = csr + fname = tmp_path / "test1.fif" + write_info(fname, info) + info_read = read_info(fname) + ct_read = info_read["proc_history"][0]["max_info"]["sss_ctc"]["decoupler"] + assert isinstance(ct_read, sparse.csc_array) # this gets cast to CSC + assert_array_equal(ct_read.toarray(), ct.toarray()) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_check_compensation_consistency(): + """Test check picks compensation.""" + raw = read_raw_ctf(ctf_fname, preload=False) + events = make_fixed_length_events(raw, 99999) + picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=True) + pick_ch_names = [raw.info["ch_names"][idx] for idx in picks] + for comp, expected_result in zip([0, 1], [False, False]): + raw.apply_gradient_compensation(comp) + ret, missing = _bad_chans_comp(raw.info, pick_ch_names) + assert ret == expected_result + assert len(missing) == 0 + Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks) + + picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=False) + pick_ch_names = [raw.info["ch_names"][idx] for idx in picks] + + for comp, expected_result in zip([0, 1], [False, True]): + raw.apply_gradient_compensation(comp) + ret, missing = _bad_chans_comp(raw.info, pick_ch_names) + assert ret == expected_result + assert len(missing) == 17 + with catch_logging() as log: + Epochs( + raw, events, None, -0.2, 0.2, preload=False, picks=picks, verbose=True + ) + assert "Removing 5 compensators" in log.getvalue() + + +def test_field_round_trip(tmp_path): + """Test round-trip for new fields.""" + info = create_info(1, 1000.0, "eeg") + with info._unlock(): + for key in ("file_id", "meas_id"): + info[key] = _generate_meas_id() + info["device_info"] = dict(type="a", model="b", serial="c", site="d") + info["helium_info"] = dict( + he_level_raw=1.0, + helium_level=2.0, + orig_file_guid="e", + meas_date=_stamp_to_dt((1, 2)), + ) + fname = tmp_path / "temp-info.fif" + info.save(fname) + info_read = read_info(fname) + assert_object_equal(info, info_read) + with pytest.raises(TypeError, match="datetime"): + info["helium_info"]["meas_date"] = (1, 2) + # should allow it to be None, though (checking gh-13154) + info["helium_info"]["meas_date"] = None + info.save(fname, overwrite=True) + info_read = read_info(fname) + assert_object_equal(info, info_read) + assert info_read["helium_info"]["meas_date"] is None + # not 100% sure how someone could end up with it deleted, but should still be + # writeable + del info["helium_info"]["meas_date"] + info.save(fname, overwrite=True) + info_read = read_info(fname) + info["helium_info"]["meas_date"] = None # we always set it (which is reasonable) + assert_object_equal(info, info_read) + + +def test_equalize_channels(): + """Test equalization of channels for instances of Info.""" + info1 = create_info(["CH1", "CH2", "CH3"], sfreq=1.0) + info2 = create_info(["CH4", "CH2", "CH1"], sfreq=1.0) + info1, info2 = equalize_channels([info1, info2]) + + assert info1.ch_names == ["CH1", "CH2"] + assert info2.ch_names == ["CH1", "CH2"] + + +def test_repr(): + """Test Info repr.""" + info = create_info(1, 1000, "eeg") + assert "7 non-empty values" in repr(info) + + t = Transform("meg", "head", np.ones((4, 4))) + info["dev_head_t"] = t + assert "dev_head_t: MEG device -> head transform" in repr(info) + + +def test_repr_html(): + """Test Info HTML repr.""" + info = read_info(raw_fname) + assert "Projections" in info._repr_html_() + with info._unlock(): + info["projs"] = [] + assert "Projections" not in info._repr_html_() + info["bads"] = [] + assert "Bad " not in info._repr_html_() + info["bads"] = ["MEG 2443", "EEG 053"] + assert "Bad " in info._repr_html_() # 1 for each channel type + + html = info._repr_html_() + for ch in [ # good channel counts + "203", # grad + "102", # mag + "9", # stim + "59", # eeg + "1", # eog + ]: + assert ch in html + + +@testing.requires_testing_data +def test_invalid_subject_birthday(): + """Test handling of an invalid birthday in the raw file.""" + with pytest.warns(RuntimeWarning, match="No birthday will be set"): + raw = read_raw_fif(raw_invalid_bday_fname) + assert "birthday" not in raw.info["subject_info"] + + +@pytest.mark.slowtest +@pytest.mark.parametrize( + "fname", + [ + pytest.param(ctf_fname, marks=testing._pytest_mark()), + raw_fname, + ], +) +def test_channel_name_limit(tmp_path, monkeypatch, fname): + """Test that our remapping works properly.""" + # + # raw + # + if fname.suffix == ".fif": + raw = read_raw_fif(fname) + raw.pick(raw.ch_names[:3]) + ref_names = [] + data_names = raw.ch_names + else: + assert fname.suffix == ".ds" + raw = read_raw_ctf(fname) + ref_names = [ + raw.ch_names[pick] for pick in pick_types(raw.info, meg=False, ref_meg=True) + ] + data_names = raw.ch_names[32:35] + proj = dict( + data=np.ones((1, len(data_names))), + col_names=data_names[:2].copy(), + row_names=None, + nrow=1, + ) + proj = Projection(data=proj, active=False, desc="test", kind=0, explained_var=0.0) + raw.add_proj(proj, remove_existing=True) + raw.info.normalize_proj() + raw.pick(data_names + ref_names).crop(0, 2) + long_names = ["123456789abcdefg" + name for name in raw.ch_names] + fname = tmp_path / "test-raw.fif" + with catch_logging() as log: + raw.save(fname) + log = log.getvalue() + assert "truncated" not in log + rename = dict(zip(raw.ch_names, long_names)) + long_data_names = [rename[name] for name in data_names] + long_proj_names = long_data_names[:2] + raw.rename_channels(rename) + for comp in raw.info["comps"]: + for key in ("row_names", "col_names"): + for name in comp["data"][key]: + assert name in raw.ch_names + if raw.info["comps"]: + assert raw.compensation_grade == 0 + raw.apply_gradient_compensation(3) + assert raw.compensation_grade == 3 + assert len(raw.info["projs"]) == 1 + assert raw.info["projs"][0]["data"]["col_names"] == long_proj_names + raw.info["bads"] = bads = long_data_names[2:3] + good_long_data_names = [name for name in long_data_names if name not in bads] + with catch_logging() as log: + raw.save(fname, overwrite=True, verbose=True) + log = log.getvalue() + assert "truncated to 15" in log + for name in raw.ch_names: + assert len(name) > 15 + # first read the full way + with catch_logging() as log: + raw_read = read_raw_fif(fname, verbose=True) + log = log.getvalue() + assert "Reading extended channel information" in log + for ra in (raw, raw_read): + assert ra.ch_names == long_names + assert raw_read.info["projs"][0]["data"]["col_names"] == long_proj_names + del raw_read + # next read as if no longer names could be read + monkeypatch.setattr(meas_info, "_read_extended_ch_info", lambda x, y, z: None) + with catch_logging() as log: + raw_read = read_raw_fif(fname, verbose=True) + log = log.getvalue() + assert "extended" not in log + if raw.info["comps"]: + assert raw_read.compensation_grade == 3 + raw_read.apply_gradient_compensation(0) + assert raw_read.compensation_grade == 0 + monkeypatch.setattr( # restore + meas_info, "_read_extended_ch_info", _read_extended_ch_info + ) + short_proj_names = [ + f"{name[: 13 - bool(len(ref_names))]}-{ni}" + for ni, name in enumerate(long_proj_names) + ] + assert raw_read.info["projs"][0]["data"]["col_names"] == short_proj_names + # + # epochs + # + epochs = Epochs(raw, make_fixed_length_events(raw)) + fname = tmp_path / "test-epo.fif" + epochs.save(fname) + epochs_read = read_epochs(fname) + for ep in (epochs, epochs_read): + assert ep.info["ch_names"] == long_names + assert ep.ch_names == long_names + del raw, epochs_read + # cov + epochs.info["bads"] = [] + cov = compute_covariance(epochs, verbose="error") + fname = tmp_path / "test-cov.fif" + write_cov(fname, cov) + cov_read = read_cov(fname) + for co in (cov, cov_read): + assert co["names"] == long_data_names + assert co["bads"] == [] + del cov_read + + # + # evoked + # + evoked = epochs.average() + evoked.info["bads"] = bads + assert evoked.nave == 1 + fname = tmp_path / "test-ave.fif" + evoked.save(fname) + evoked_read = read_evokeds(fname)[0] + for ev in (evoked, evoked_read): + assert ev.ch_names == long_names + assert ev.info["bads"] == bads + del evoked_read, epochs + + # + # forward + # + with _record_warnings(): # not enough points for CTF + sphere = make_sphere_model("auto", "auto", evoked.info) + src = setup_volume_source_space(pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1.0, 0.0]])) + fwd = make_forward_solution(evoked.info, None, src, sphere) + fname = tmp_path / "temp-fwd.fif" + write_forward_solution(fname, fwd) + fwd_read = read_forward_solution(fname) + for fw in (fwd, fwd_read): + assert fw["sol"]["row_names"] == long_data_names + assert fw["info"]["ch_names"] == long_data_names + assert fw["info"]["bads"] == bads + del fwd_read + + # + # inv + # + inv = make_inverse_operator(evoked.info, fwd, cov) + fname = tmp_path / "test-inv.fif" + write_inverse_operator(fname, inv) + inv_read = read_inverse_operator(fname) + for iv in (inv, inv_read): + assert iv["info"]["ch_names"] == good_long_data_names + apply_inverse(evoked, inv) # smoke test + + +@pytest.mark.parametrize("protocol", ("highest", "default")) +@pytest.mark.parametrize("fname_info", (raw_fname, "create_info")) +@pytest.mark.parametrize("unlocked", (True, False)) +def test_pickle(fname_info, unlocked, protocol): + """Test that Info can be (un)pickled.""" + if fname_info == "create_info": + info = create_info(3, 1000.0, "eeg") + else: + info = read_info(fname_info) + protocol = getattr(pickle, f"{protocol.upper()}_PROTOCOL") + assert isinstance(info["bads"], MNEBadsList) + info["bads"] = info["ch_names"][:1] + assert not info._unlocked + info._unlocked = unlocked + data = pickle.dumps(info, protocol=protocol) + info_un = pickle.loads(data) # nosec B301 + assert isinstance(info_un, Info) + assert_object_equal(info, info_un) + assert info_un._unlocked == unlocked + assert isinstance(info_un["bads"], MNEBadsList) + assert info_un["bads"]._mne_info is info_un + + +def test_info_bad(): + """Test our info sanity checkers.""" + info = create_info(5, 1000.0, "eeg") + info["description"] = "foo" + info["experimenter"] = "bar" + info["line_freq"] = 50.0 + info["bads"] = info["ch_names"][:1] + info["temp"] = ("whatever", 1.0) + + with pytest.raises(RuntimeError, match=r"info\['temp'\]"): + info["bad_key"] = 1.0 + for key, match in [("sfreq", r"inst\.resample"), ("chs", r"inst\.add_channels")]: + with pytest.raises(RuntimeError, match=match): + info[key] = info[key] + with pytest.raises(ValueError, match="between meg<->head"): + info["dev_head_t"] = Transform("mri", "head", np.eye(4)) + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"] = ["foo"] + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"] += ["foo"] + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"].append("foo") + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"].extend(["foo"]) + assert isinstance(info["bads"], MNEBadsList) + x = info["bads"] + with pytest.raises(ValueError, match="do not exist in info"): + x.append("foo") + assert info["bads"] == info["ch_names"][:1] # unchonged + x = info["bads"] + info["ch_names"][1:2] + assert x == info["ch_names"][:2] + assert not isinstance(x, MNEBadsList) # plain list + x = info["ch_names"][1:2] + info["bads"] + assert x == info["ch_names"][1::-1] # like [1, 0] in fancy indexing + assert not isinstance(x, MNEBadsList) # plain list + + +def test_get_montage(): + """Test ContainsMixin.get_montage().""" + ch_names = make_standard_montage("standard_1020").ch_names + sfreq = 512 + data = np.zeros((len(ch_names), sfreq * 2)) + raw = RawArray(data, create_info(ch_names, sfreq, "eeg")) + raw.set_montage("standard_1020") + + assert len(raw.get_montage().ch_names) == len(ch_names) + raw.info["bads"] = [ch_names[0]] + assert len(raw.get_montage().ch_names) == len(ch_names) + + # test info + raw = RawArray(data, create_info(ch_names, sfreq, "eeg")) + raw.set_montage("standard_1020") + + assert len(raw.info.get_montage().ch_names) == len(ch_names) + raw.info["bads"] = [ch_names[0]] + assert len(raw.info.get_montage().ch_names) == len(ch_names) + + +def test_tag_consistency(): + """Test that structures for tag reading are consistent.""" + call_set = set(tag._call_dict) + call_names = set(tag._call_dict_names) + assert call_set == call_names, "Mismatch between _call_dict and _call_dict_names" + # TODO: This was inspired by FIFF_DIG_STRING gh-13083, we should ideally add a test + # that those dig points can actually be read in correctly at some point. + + +def test_proj_id_entries(): + """Test that proj_id entries are the right type.""" + info = create_info(5, 1000.0, "eeg") + info["proj_id"] = 123 + # Boolean should be cast into an int + info["proj_id"] = True + with pytest.raises(TypeError, match="must be an instance"): + info["proj_id"] = "bad" + with pytest.raises(TypeError, match="must be an instance"): + info["proj_id"] = np.array([123]) diff --git a/mne/_fiff/tests/test_pick.py b/mne/_fiff/tests/test_pick.py new file mode 100644 index 00000000000..5d1b24247ab --- /dev/null +++ b/mne/_fiff/tests/test_pick.py @@ -0,0 +1,764 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from copy import deepcopy +from pathlib import Path + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from mne import ( + Epochs, + channel_indices_by_type, + channel_type, + create_info, + make_ad_hoc_cov, + pick_channels, + pick_channels_cov, + pick_channels_regexp, + pick_info, + pick_types, + pick_types_forward, + read_forward_solution, + rename_channels, +) +from mne._fiff.constants import FIFF +from mne._fiff.pick import ( + _DATA_CH_TYPES_SPLIT, + _contains_ch_type, + _picks_by_type, + _picks_to_idx, + get_channel_type_constants, +) +from mne.channels import make_standard_montage +from mne.datasets import testing +from mne.io import ( + RawArray, + read_info, + read_raw_bti, + read_raw_fif, + read_raw_kit, +) +from mne.preprocessing import compute_current_source_density +from mne.utils import assert_object_equal, catch_logging + +data_path = testing.data_path(download=False) +fname_meeg = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" +fname_mc = data_path / "SSS" / "test_move_anon_movecomp_raw_sss.fif" + +io_dir = Path(__file__).parents[2] / "io" +ctf_fname = io_dir / "tests" / "data" / "test_ctf_raw.fif" +fif_fname = io_dir / "tests" / "data" / "test_raw.fif" + + +def _picks_by_type_old(info, meg_combined=False, ref_meg=False, exclude="bads"): + """Use the old, slower _picks_by_type code.""" + picks_list = [] + has = [_contains_ch_type(info, k) for k in _DATA_CH_TYPES_SPLIT] + has = dict(zip(_DATA_CH_TYPES_SPLIT, has)) + if has["mag"] and (meg_combined is not True or not has["grad"]): + picks_list.append( + ( + "mag", + pick_types( + info, + meg="mag", + eeg=False, + stim=False, + ref_meg=ref_meg, + exclude=exclude, + ), + ) + ) + if has["grad"] and (meg_combined is not True or not has["mag"]): + picks_list.append( + ( + "grad", + pick_types( + info, + meg="grad", + eeg=False, + stim=False, + ref_meg=ref_meg, + exclude=exclude, + ), + ) + ) + if has["mag"] and has["grad"] and meg_combined is True: + picks_list.append( + ( + "meg", + pick_types( + info, + meg=True, + eeg=False, + stim=False, + ref_meg=ref_meg, + exclude=exclude, + ), + ) + ) + for ch_type in _DATA_CH_TYPES_SPLIT: + if ch_type in ["grad", "mag"]: # exclude just MEG channels + continue + if has[ch_type]: + picks_list.append( + ( + ch_type, + pick_types( + info, + meg=False, + stim=False, + ref_meg=ref_meg, + exclude=exclude, + **{ch_type: True}, + ), + ) + ) + return picks_list + + +def _channel_type_old(info, idx): + """Get channel type using old, slower scheme.""" + ch = info["chs"][idx] + + # iterate through all defined channel types until we find a match with ch + # go in order from most specific (most rules entries) to least specific + channel_types = sorted( + get_channel_type_constants().items(), key=lambda x: len(x[1]), reverse=True + ) + for t, rules in channel_types: + for key, vals in rules.items(): # all keys must match the values + if ch.get(key, None) not in np.array(vals): + break # not channel type t, go to next iteration + else: + return t + + raise ValueError(f"Unknown channel type for {ch['ch_name']}") + + +def _assert_channel_types(info): + for k in range(info["nchan"]): + a, b = channel_type(info, k), _channel_type_old(info, k) + assert a == b + + +def test_pick_refs(): + """Test picking of reference sensors.""" + infos = list() + # KIT + kit_dir = io_dir / "kit" / "tests" / "data" + sqd_path = kit_dir / "test.sqd" + mrk_path = kit_dir / "test_mrk.sqd" + elp_path = kit_dir / "test_elp.txt" + hsp_path = kit_dir / "test_hsp.txt" + raw_kit = read_raw_kit(sqd_path, str(mrk_path), str(elp_path), str(hsp_path)) + infos.append(raw_kit.info) + # BTi + bti_dir = io_dir / "bti" / "tests" / "data" + bti_pdf = bti_dir / "test_pdf_linux" + bti_config = bti_dir / "test_config_linux" + bti_hs = bti_dir / "test_hs_linux" + raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False) + infos.append(raw_bti.info) + # CTF + fname_ctf_raw = io_dir / "tests" / "data" / "test_ctf_comp_raw.fif" + raw_ctf = read_raw_fif(fname_ctf_raw) + raw_ctf.apply_gradient_compensation(2) + for info in infos: + info["bads"] = [] + _assert_channel_types(info) + with pytest.raises(ValueError, match="'planar2'] or bool, not foo"): + pick_types(info, meg="foo") + with pytest.raises(ValueError, match="'planar2', 'auto'] or bool,"): + pick_types(info, ref_meg="foo") + picks_meg_ref = pick_types(info, meg=True, ref_meg=True) + picks_meg = pick_types(info, meg=True, ref_meg=False) + picks_ref = pick_types(info, meg=False, ref_meg=True) + assert_array_equal( + picks_meg_ref, np.sort(np.concatenate([picks_meg, picks_ref])) + ) + picks_grad = pick_types(info, meg="grad", ref_meg=False) + picks_ref_grad = pick_types(info, meg=False, ref_meg="grad") + picks_meg_ref_grad = pick_types(info, meg="grad", ref_meg="grad") + assert_array_equal( + picks_meg_ref_grad, np.sort(np.concatenate([picks_grad, picks_ref_grad])) + ) + picks_mag = pick_types(info, meg="mag", ref_meg=False) + picks_ref_mag = pick_types(info, meg=False, ref_meg="mag") + picks_meg_ref_mag = pick_types(info, meg="mag", ref_meg="mag") + assert_array_equal( + picks_meg_ref_mag, np.sort(np.concatenate([picks_mag, picks_ref_mag])) + ) + assert_array_equal(picks_meg, np.sort(np.concatenate([picks_mag, picks_grad]))) + assert_array_equal( + picks_ref, np.sort(np.concatenate([picks_ref_mag, picks_ref_grad])) + ) + assert_array_equal( + picks_meg_ref, + np.sort( + np.concatenate([picks_grad, picks_mag, picks_ref_grad, picks_ref_mag]) + ), + ) + + for pick in ( + picks_meg_ref, + picks_meg, + picks_ref, + picks_grad, + picks_ref_grad, + picks_meg_ref_grad, + picks_mag, + picks_ref_mag, + picks_meg_ref_mag, + ): + if len(pick) > 0: + pick_info(info, pick) + + # test CTF expected failures directly + info = raw_ctf.info + info["bads"] = [] + picks_meg_ref = pick_types(info, meg=True, ref_meg=True) + picks_meg = pick_types(info, meg=True, ref_meg=False) + picks_ref = pick_types(info, meg=False, ref_meg=True) + picks_mag = pick_types(info, meg="mag", ref_meg=False) + picks_ref_mag = pick_types(info, meg=False, ref_meg="mag") + picks_meg_ref_mag = pick_types(info, meg="mag", ref_meg="mag") + for pick in (picks_meg_ref, picks_ref, picks_ref_mag, picks_meg_ref_mag): + if len(pick) > 0: + pick_info(info, pick) + + for pick in (picks_meg, picks_mag): + if len(pick) > 0: + with catch_logging() as log: + pick_info(info, pick, verbose=True) + assert ( + "Removing {} compensators".format(len(info["comps"])) in log.getvalue() + ) + picks_ref_grad = pick_types(info, meg=False, ref_meg="grad") + assert set(picks_ref_mag) == set(picks_ref) + assert len(picks_ref_grad) == 0 + all_meg = np.arange(3, 306) + assert_array_equal(np.concatenate([picks_ref, picks_meg]), all_meg) + assert_array_equal(picks_meg_ref_mag, all_meg) + + +def test_pick_channels_regexp(): + """Test pick with regular expression.""" + ch_names = ["MEG 2331", "MEG 2332", "MEG 2333"] + assert_array_equal(pick_channels_regexp(ch_names, "MEG ...1"), [0]) + assert_array_equal(pick_channels_regexp(ch_names, "MEG ...[2-3]"), [1, 2]) + assert_array_equal(pick_channels_regexp(ch_names, "MEG *"), [0, 1, 2]) + + +def assert_indexing(info, picks_by_type, ref_meg=False, all_data=True): + """Assert our indexing functions work properly.""" + # First that our old and new channel typing functions are equivalent + _assert_channel_types(info) + # Next that channel_indices_by_type works + if not ref_meg: + idx = channel_indices_by_type(info) + for key in idx: + for p in picks_by_type: + if key == p[0]: + assert_array_equal(idx[key], p[1]) + break + else: + assert len(idx[key]) == 0 + # Finally, picks_by_type (if relevant) + if not all_data: + picks_by_type = [p for p in picks_by_type if p[0] in _DATA_CH_TYPES_SPLIT] + picks_by_type = [(p[0], np.array(p[1], int)) for p in picks_by_type] + actual = _picks_by_type(info, ref_meg=ref_meg) + assert_object_equal(actual, picks_by_type) + if not ref_meg and idx["hbo"]: # our old code had a bug + with pytest.raises(TypeError, match="unexpected keyword argument"): + _picks_by_type_old(info, ref_meg=ref_meg) + else: + old = _picks_by_type_old(info, ref_meg=ref_meg) + assert_object_equal(old, picks_by_type) + # test bads + info = info.copy() + info["bads"] = [info["chs"][picks_by_type[0][1][0]]["ch_name"]] + picks_by_type = deepcopy(picks_by_type) + picks_by_type[0] = (picks_by_type[0][0], picks_by_type[0][1][1:]) + actual = _picks_by_type(info, ref_meg=ref_meg) + assert_object_equal(actual, picks_by_type) + + +def test_pick_seeg_ecog(): + """Test picking with sEEG and ECoG.""" + names = "A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3".split() + types = "mag mag eeg eeg seeg seeg ecog seeg ecog ecog".split() + info = create_info(names, 1024.0, types) + picks_by_type = [ + ("mag", [0, 1]), + ("eeg", [2, 3]), + ("seeg", [4, 5, 7]), + ("ecog", [6, 8, 9]), + ] + assert_indexing(info, picks_by_type) + assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7]) + for i, t in enumerate(types): + assert channel_type(info, i) == types[i] + raw = RawArray(np.zeros((len(names), 10)), info) + events = np.array([[1, 0, 0], [2, 0, 0]]) + epochs = Epochs( + raw, + events=events, + event_id={"event": 0}, + tmin=-1e-5, + tmax=1e-5, + baseline=(0, 0), + ) # only one sample + evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True)) + e_seeg = evoked.copy().pick(picks="seeg") + for lt, rt in zip(e_seeg.ch_names, [names[4], names[5], names[7]]): + assert lt == rt + # Deal with constant debacle + raw = read_raw_fif(io_dir / "tests" / "data" / "test_chpi_raw_sss.fif") + assert len(pick_types(raw.info, meg=False, seeg=True, ecog=True)) == 0 + + +def test_pick_dbs(): + """Test picking with DBS.""" + # gh-8739 + names = "A1 A2 Fz O OTp1 OTp2 OTp3".split() + types = "mag mag eeg eeg dbs dbs dbs".split() + info = create_info(names, 1024.0, types) + picks_by_type = [("mag", [0, 1]), ("eeg", [2, 3]), ("dbs", [4, 5, 6])] + assert_indexing(info, picks_by_type) + assert_array_equal(pick_types(info, meg=False, dbs=True), [4, 5, 6]) + for i, t in enumerate(types): + assert channel_type(info, i) == types[i] + raw = RawArray(np.zeros((len(names), 7)), info) + events = np.array([[1, 0, 0], [2, 0, 0]]) + epochs = Epochs( + raw, + events=events, + event_id={"event": 0}, + tmin=-1e-5, + tmax=1e-5, + baseline=(0, 0), + ) # only one sample + evoked = epochs.average(pick_types(epochs.info, meg=True, dbs=True)) + e_dbs = evoked.copy().pick(picks="dbs") + for lt, rt in zip(e_dbs.ch_names, [names[4], names[5], names[6]]): + assert lt == rt + raw = read_raw_fif(io_dir / "tests" / "data" / "test_chpi_raw_sss.fif") + assert len(pick_types(raw.info, meg=False, dbs=True)) == 0 + + +def test_pick_chpi(): + """Test picking cHPI.""" + # Make sure we don't mis-classify cHPI channels + info = read_info(io_dir / "tests" / "data" / "test_chpi_raw_sss.fif") + _assert_channel_types(info) + channel_types = info.get_channel_types() + assert "chpi" in channel_types + assert "seeg" not in channel_types + assert "ecog" not in channel_types + + +def test_pick_csd(): + """Test picking current source density channels.""" + # Make sure we don't mis-classify cHPI channels + names = ["MEG 2331", "MEG 2332", "MEG 2333", "A1", "A2", "Fz"] + types = "mag mag grad csd csd csd".split() + info = create_info(names, 1024.0, types) + picks_by_type = [("mag", [0, 1]), ("grad", [2]), ("csd", [3, 4, 5])] + assert_indexing(info, picks_by_type, all_data=False) + + +def test_pick_bio(): + """Test picking BIO channels.""" + names = "A1 A2 Fz O BIO1 BIO2 BIO3".split() + types = "mag mag eeg eeg bio bio bio".split() + info = create_info(names, 1024.0, types) + picks_by_type = [("mag", [0, 1]), ("eeg", [2, 3]), ("bio", [4, 5, 6])] + assert_indexing(info, picks_by_type, all_data=False) + + +def test_pick_fnirs(): + """Test picking fNIRS channels.""" + names = "A1 A2 Fz O hbo1 hbo2 hbr1 fnirsRaw1 fnirsRaw2 fnirsOD1".split() + types = ( + "mag mag eeg eeg hbo hbo hbr fnirs_cw_" + "amplitude fnirs_cw_amplitude fnirs_od".split() + ) + info = create_info(names, 1024.0, types) + picks_by_type = [ + ("mag", [0, 1]), + ("eeg", [2, 3]), + ("hbo", [4, 5]), + ("hbr", [6]), + ("fnirs_cw_amplitude", [7, 8]), + ("fnirs_od", [9]), + ] + assert_indexing(info, picks_by_type) + + +def test_pick_ref(): + """Test picking ref_meg channels.""" + info = read_info(ctf_fname) + picks_by_type = [ + ("stim", [0]), + ("eog", [306, 307]), + ("ecg", [308]), + ("misc", [1]), + ("mag", np.arange(31, 306)), + ("ref_meg", np.arange(2, 31)), + ] + assert_indexing(info, picks_by_type, all_data=False) + picks_by_type.append( + ("mag", np.concatenate([picks_by_type.pop(-1)[1], picks_by_type.pop(-1)[1]])) + ) + assert_indexing(info, picks_by_type, ref_meg=True, all_data=False) + + +def _check_fwd_n_chan_consistent(fwd, n_expected): + n_ok = len(fwd["info"]["ch_names"]) + n_sol = fwd["sol"]["data"].shape[0] + assert n_expected == n_sol + assert n_expected == n_ok + + +@testing.requires_testing_data +def test_pick_forward_seeg_ecog(): + """Test picking forward with SEEG and ECoG.""" + fwd = read_forward_solution(fname_meeg) + counts = channel_indices_by_type(fwd["info"]) + for key in counts.keys(): + counts[key] = len(counts[key]) + counts["meg"] = counts["mag"] + counts["grad"] + fwd_ = pick_types_forward(fwd, meg=True) + _check_fwd_n_chan_consistent(fwd_, counts["meg"]) + fwd_ = pick_types_forward(fwd, meg=False, eeg=True) + _check_fwd_n_chan_consistent(fwd_, counts["eeg"]) + # should raise exception related to emptiness + pytest.raises(ValueError, pick_types_forward, fwd, meg=False, seeg=True) + pytest.raises(ValueError, pick_types_forward, fwd, meg=False, ecog=True) + # change last chan from EEG to sEEG, second-to-last to ECoG + ecog_name = "E1" + seeg_name = "OTp1" + rename_channels(fwd["info"], {"EEG 059": ecog_name}) + rename_channels(fwd["info"], {"EEG 060": seeg_name}) + for ch in fwd["info"]["chs"]: + if ch["ch_name"] == seeg_name: + ch["kind"] = FIFF.FIFFV_SEEG_CH + ch["coil_type"] = FIFF.FIFFV_COIL_EEG + elif ch["ch_name"] == ecog_name: + ch["kind"] = FIFF.FIFFV_ECOG_CH + ch["coil_type"] = FIFF.FIFFV_COIL_EEG + fwd["sol"]["row_names"][-1] = fwd["info"]["chs"][-1]["ch_name"] + fwd["sol"]["row_names"][-2] = fwd["info"]["chs"][-2]["ch_name"] + counts["eeg"] -= 2 + counts["seeg"] += 1 + counts["ecog"] += 1 + # repick & check + fwd_seeg = pick_types_forward(fwd, meg=False, seeg=True) + assert fwd_seeg["sol"]["row_names"] == [seeg_name] + assert fwd_seeg["info"]["ch_names"] == [seeg_name] + # should work fine + fwd_ = pick_types_forward(fwd, meg=True) + _check_fwd_n_chan_consistent(fwd_, counts["meg"]) + fwd_ = pick_types_forward(fwd, meg=False, eeg=True) + _check_fwd_n_chan_consistent(fwd_, counts["eeg"]) + fwd_ = pick_types_forward(fwd, meg=False, seeg=True) + _check_fwd_n_chan_consistent(fwd_, counts["seeg"]) + fwd_ = pick_types_forward(fwd, meg=False, ecog=True) + _check_fwd_n_chan_consistent(fwd_, counts["ecog"]) + + +def test_picks_by_channels(): + """Test creating pick_lists.""" + rng = np.random.RandomState(909) + + test_data = rng.random_sample((4, 2000)) + ch_names = [f"MEG {i:03d}" for i in [1, 2, 3, 4]] + ch_types = ["grad", "mag", "mag", "eeg"] + sfreq = 250.0 + info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) + _assert_channel_types(info) + raw = RawArray(test_data, info) + + pick_list = _picks_by_type(raw.info) + assert len(pick_list) == 3 + assert pick_list[0][0] == "mag" + pick_list2 = _picks_by_type(raw.info, meg_combined=False) + assert len(pick_list) == len(pick_list2) + assert pick_list2[0][0] == "mag" + + pick_list2 = _picks_by_type(raw.info, meg_combined=True) + assert len(pick_list) == len(pick_list2) + 1 + assert pick_list2[0][0] == "meg" + + test_data = rng.random_sample((4, 2000)) + ch_names = [f"MEG {i:03d}" for i in [1, 2, 3, 4]] + ch_types = ["mag", "mag", "mag", "mag"] + sfreq = 250.0 + info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) + raw = RawArray(test_data, info) + # This acts as a set, not an order + assert_array_equal( + pick_channels(info["ch_names"], ["MEG 002", "MEG 001"], ordered=False), [0, 1] + ) + + # Make sure checks for list input work. + pytest.raises(ValueError, pick_channels, ch_names, "MEG 001") + pytest.raises(ValueError, pick_channels, ch_names, ["MEG 001"], "hi") + + pick_list = _picks_by_type(raw.info) + assert len(pick_list) == 1 + assert pick_list[0][0] == "mag" + pick_list2 = _picks_by_type(raw.info, meg_combined=True) + assert len(pick_list) == len(pick_list2) + assert pick_list2[0][0] == "mag" + + # pick_types type check + with pytest.raises(ValueError, match="must be of type"): + raw.pick_types(eeg="string") + + # duplicate check + names = ["MEG 002", "MEG 002"] + assert len(pick_channels(raw.info["ch_names"], names, ordered=False)) == 1 + assert len(raw.copy().pick_channels(names, ordered=False)[0][0]) == 1 + + # missing ch_name + bad_names = names + ["BAD"] + with pytest.raises(ValueError, match="Missing channels"): + pick_channels(raw.info["ch_names"], bad_names, ordered=True) + with pytest.raises(ValueError, match="Missing channels"): + raw.copy().pick_channels(bad_names, ordered=True) # legacy method OK here + with pytest.raises(ValueError, match="could not be picked"): + raw.copy().pick(bad_names) + + +def test_clean_info_bads(): + """Test cleaning info['bads'] when bad_channels are excluded.""" + raw_file = io_dir / "tests" / "data" / "test_raw.fif" + raw = read_raw_fif(raw_file) + _assert_channel_types(raw.info) + + # select eeg channels + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + + # select 3 eeg channels as bads + idx_eeg_bad_ch = picks_eeg[[1, 5, 14]] + eeg_bad_ch = [raw.info["ch_names"][k] for k in idx_eeg_bad_ch] + + # select meg channels + picks_meg = pick_types(raw.info, meg=True, eeg=False) + + # select randomly 3 meg channels as bads + idx_meg_bad_ch = picks_meg[[0, 15, 34]] + meg_bad_ch = [raw.info["ch_names"][k] for k in idx_meg_bad_ch] + + # simulate the bad channels + raw.info["bads"] = eeg_bad_ch + meg_bad_ch + + assert len(raw.info["projs"]) == 3 + raw.set_eeg_reference(projection=True) + assert len(raw.info["projs"]) == 4 + + # simulate the call to pick_info excluding the bad eeg channels + info_eeg = pick_info(raw.info, picks_eeg) + assert len(info_eeg["projs"]) == 1 + + # simulate the call to pick_info excluding the bad meg channels + info_meg = pick_info(raw.info, picks_meg) + assert len(info_meg["projs"]) == 3 + + assert info_eeg["bads"] == eeg_bad_ch + assert info_meg["bads"] == meg_bad_ch + + info = pick_info(raw.info, picks_meg) + info._check_consistency() + with pytest.raises(ValueError, match="do not exist"): + info["bads"] += ["EEG 053"] + with pytest.raises(ValueError, match="unique"): + pick_info(raw.info, [0, 0]) + + +@testing.requires_testing_data +def test_picks_to_idx(): + """Test checking type integrity checks of picks.""" + info = create_info(12, 1000.0, "eeg") + _assert_channel_types(info) + picks = np.arange(info["nchan"]) + # Array and list + assert_array_equal(picks, _picks_to_idx(info, picks)) + assert_array_equal(picks, _picks_to_idx(info, list(picks))) + with pytest.raises(TypeError, match="data type float64 is invalid"): + _picks_to_idx(info, 1.0) + # None + assert_array_equal(picks, _picks_to_idx(info, None)) + # Type indexing + assert_array_equal(picks, _picks_to_idx(info, "eeg")) + assert_array_equal(picks, _picks_to_idx(info, ["eeg"])) + # Negative indexing + assert_array_equal([len(picks) - 1], _picks_to_idx(info, len(picks) - 1)) + assert_array_equal([len(picks) - 1], _picks_to_idx(info, -1)) + assert_array_equal([len(picks) - 1], _picks_to_idx(info, [-1])) + # Name indexing + assert_array_equal([2], _picks_to_idx(info, info["ch_names"][2])) + assert_array_equal(np.arange(5, 9), _picks_to_idx(info, info["ch_names"][5:9])) + with pytest.raises(IndexError, match="must be >= "): + _picks_to_idx(info, -len(picks) - 1) + with pytest.raises(IndexError, match="must be < "): + _picks_to_idx(info, len(picks)) + with pytest.raises(ValueError, match="could not be interpreted"): + _picks_to_idx(info, ["a", "b"]) + with pytest.raises(ValueError, match="could not be interpreted"): + _picks_to_idx(info, "b") + # bads behavior + info["bads"] = info["ch_names"][1:2] + picks_good = np.array([0] + list(range(2, 12))) + assert_array_equal(picks_good, _picks_to_idx(info, None)) + assert_array_equal(picks_good, _picks_to_idx(info, None, exclude=info["bads"])) + assert_array_equal(picks, _picks_to_idx(info, None, exclude=())) + with pytest.raises(ValueError, match=" 1D, got"): + _picks_to_idx(info, [[1]]) + # MEG types + info = read_info(fname_mc) + meg_picks = np.arange(306) + mag_picks = np.arange(2, 306, 3) + grad_picks = np.setdiff1d(meg_picks, mag_picks) + assert_array_equal(meg_picks, _picks_to_idx(info, "meg")) + assert_array_equal(meg_picks, _picks_to_idx(info, ("mag", "grad"))) + assert_array_equal(mag_picks, _picks_to_idx(info, "mag")) + assert_array_equal(grad_picks, _picks_to_idx(info, "grad")) + + info = create_info(["eeg", "foo"], 1000.0, "eeg") + with pytest.raises(RuntimeError, match="equivalent to channel types"): + _picks_to_idx(info, "eeg") + with pytest.raises(ValueError, match="same length"): + create_info(["a", "b"], 1000.0, dict(hbo=["a"], hbr=["b"])) + info = create_info(["a", "b"], 1000.0, ["hbo", "hbr"]) + assert_array_equal(np.arange(2), _picks_to_idx(info, "fnirs")) + assert_array_equal([0], _picks_to_idx(info, "hbo")) + assert_array_equal([1], _picks_to_idx(info, "hbr")) + info = create_info(["a", "b"], 1000.0, ["hbo", "misc"]) + assert_array_equal(np.arange(len(info["ch_names"])), _picks_to_idx(info, "all")) + assert_array_equal([0], _picks_to_idx(info, "data")) + info = create_info(["a", "b"], 1000.0, ["fnirs_cw_amplitude", "fnirs_od"]) + assert_array_equal(np.arange(2), _picks_to_idx(info, "fnirs")) + assert_array_equal([0], _picks_to_idx(info, "fnirs_cw_amplitude")) + assert_array_equal([1], _picks_to_idx(info, "fnirs_od")) + info = create_info(["a", "b"], 1000.0, ["fnirs_cw_amplitude", "misc"]) + assert_array_equal(np.arange(len(info["ch_names"])), _picks_to_idx(info, "all")) + assert_array_equal([0], _picks_to_idx(info, "data")) + info = create_info(["a", "b"], 1000.0, ["fnirs_od", "misc"]) + assert_array_equal(np.arange(len(info["ch_names"])), _picks_to_idx(info, "all")) + assert_array_equal([0], _picks_to_idx(info, "data")) + # MEG reference sensors + info_ref = read_info(ctf_fname) + picks_meg = pick_types(info_ref, meg=True, ref_meg=False) + assert len(picks_meg) == 275 + picks_ref = pick_types(info_ref, meg=False, ref_meg=True) + assert len(picks_ref) == 29 + picks_meg_ref = np.sort(np.concatenate([picks_meg, picks_ref])) + assert len(picks_meg_ref) == 275 + 29 + assert_array_equal(picks_meg_ref, pick_types(info_ref, meg=True, ref_meg=True)) + assert_array_equal(picks_meg, _picks_to_idx(info_ref, "meg", with_ref_meg=False)) + assert_array_equal( # explicit trumps implicit + picks_ref, _picks_to_idx(info_ref, "ref_meg", with_ref_meg=False) + ) + assert_array_equal(picks_meg_ref, _picks_to_idx(info_ref, "meg", with_ref_meg=True)) + # Eyetrack + info = create_info(["a", "b"], 1000.0, ["eyegaze", "pupil"]) + assert_array_equal(np.arange(2), _picks_to_idx(info, "eyetrack")) + assert_array_equal([0], _picks_to_idx(info, "eyegaze")) + assert_array_equal([1], _picks_to_idx(info, "pupil")) + + +def test_pick_channels_cov(): + """Test picking channels from a Covariance object.""" + info = create_info(["CH1", "CH2", "CH3"], 1.0, ch_types="eeg") + cov = make_ad_hoc_cov(info) + cov["data"] = np.array([1.0, 2.0, 3.0]) + + cov_copy = pick_channels_cov(cov, ["CH2", "CH1"], ordered=False, copy=True) + assert cov_copy.ch_names == ["CH1", "CH2"] + assert_array_equal(cov_copy["data"], [1.0, 2.0]) + + # Test re-ordering channels + cov_copy = pick_channels_cov(cov, ["CH2", "CH1"], ordered=True, copy=True) + assert cov_copy.ch_names == ["CH2", "CH1"] + assert_array_equal(cov_copy["data"], [2.0, 1.0]) + + # Test picking in-place + pick_channels_cov(cov, ["CH2", "CH1"], copy=False, ordered=False) + assert cov.ch_names == ["CH1", "CH2"] + assert_array_equal(cov["data"], [1.0, 2.0]) + + # Test whether `method` and `loglik` are dropped when None + cov["method"] = None + cov["loglik"] = None + cov_copy = pick_channels_cov(cov, ["CH1", "CH2"], copy=True) + assert "method" not in cov_copy + assert "loglik" not in cov_copy + + +def test_pick_types_meg(): + """Test pick_types(meg=True).""" + # info with MEG channels at indices 1, 2, and 4 + info1 = create_info(6, 256, ["eeg", "mag", "grad", "misc", "grad", "hbo"]) + + assert list(pick_types(info1, meg=True)) == [1, 2, 4] + assert list(pick_types(info1, meg=True, eeg=True)) == [0, 1, 2, 4] + + assert list(pick_types(info1, meg=True)) == [1, 2, 4] + assert not list(pick_types(info1, meg=False)) # empty + assert list(pick_types(info1, meg="planar1")) == [2] + assert not list(pick_types(info1, meg="planar2")) # empty + + # info without any MEG channels + info2 = create_info(6, 256, ["eeg", "eeg", "eog", "misc", "stim", "hbo"]) + + assert not list(pick_types(info2)) # empty + assert list(pick_types(info2, eeg=True)) == [0, 1] + + +def test_pick_types_csd(): + """Test pick_types(csd=True).""" + # info with laplacian/CSD channels at indices 1, 2 + names = ["F1", "F2", "C1", "C2", "A1", "A2", "misc1", "CSD1"] + info1 = create_info( + names, 256, ["eeg", "eeg", "eeg", "eeg", "mag", "mag", "misc", "csd"] + ) + raw = RawArray(np.zeros((8, 512)), info1) + raw.set_montage(make_standard_montage("standard_1020"), verbose="error") + raw_csd = compute_current_source_density(raw, verbose="error") + + assert_array_equal(pick_types(info1, csd=True), [7]) + + # pick from the raw object + assert raw_csd.copy().pick("csd").ch_names == [ + "F1", + "F2", + "C1", + "C2", + "CSD1", + ] + + +@pytest.mark.parametrize("meg", [True, False, "grad", "mag"]) +@pytest.mark.parametrize("eeg", [True, False]) +@pytest.mark.parametrize("ordered", [True, False]) +def test_get_channel_types_equiv(meg, eeg, ordered): + """Test equivalence of get_channel_types.""" + raw = read_raw_fif(fif_fname) + pick_types(raw.info, meg=meg, eeg=eeg) + picks = pick_types(raw.info, meg=meg, eeg=eeg) + if not ordered: + picks = np.random.RandomState(0).permutation(picks) + if not meg and not eeg: + with pytest.raises(ValueError, match="No appropriate channels"): + raw.get_channel_types(picks=picks) + return + types = np.array(raw.get_channel_types(picks=picks)) + types_iter = np.array([channel_type(raw.info, idx) for idx in picks]) + assert_array_equal(types, types_iter) diff --git a/mne/_fiff/tests/test_proc_history.py b/mne/_fiff/tests/test_proc_history.py new file mode 100644 index 00000000000..82f2d6262be --- /dev/null +++ b/mne/_fiff/tests/test_proc_history.py @@ -0,0 +1,42 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +import numpy as np +from numpy.testing import assert_array_equal + +from mne._fiff.constants import FIFF +from mne.io import read_info + +base_dir = Path(__file__).parents[2] / "io" / "tests" / "data" +raw_fname = base_dir / "test_chpi_raw_sss.fif" + + +def test_maxfilter_io(): + """Test maxfilter io.""" + info = read_info(raw_fname) + mf = info["proc_history"][1]["max_info"] + + assert mf["sss_info"]["frame"] == FIFF.FIFFV_COORD_HEAD + # based on manual 2.0, rev. 5.0 page 23 + assert 5 <= mf["sss_info"]["in_order"] <= 11 + assert mf["sss_info"]["out_order"] <= 5 + assert mf["sss_info"]["nchan"] > len(mf["sss_info"]["components"]) + + assert ( + info["ch_names"][: mf["sss_info"]["nchan"]] == mf["sss_ctc"]["proj_items_chs"] + ) + assert mf["sss_ctc"]["decoupler"].shape == ( + mf["sss_info"]["nchan"], + mf["sss_info"]["nchan"], + ) + assert_array_equal( + np.unique(np.diag(mf["sss_ctc"]["decoupler"].toarray())), + np.array([1.0], dtype=np.float32), + ) + assert mf["sss_cal"]["cal_corrs"].shape == (306, 14) + assert mf["sss_cal"]["cal_chans"].shape == (306, 2) + vv_coils = [v for k, v in FIFF.items() if "FIFFV_COIL_VV" in k] + assert all(k in vv_coils for k in set(mf["sss_cal"]["cal_chans"][:, 1])) diff --git a/mne/_fiff/tests/test_reference.py b/mne/_fiff/tests/test_reference.py new file mode 100644 index 00000000000..085f34e1bcb --- /dev/null +++ b/mne/_fiff/tests/test_reference.py @@ -0,0 +1,975 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import itertools +from contextlib import nullcontext +from pathlib import Path + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal, assert_equal + +from mne import ( + Epochs, + Projection, + add_reference_channels, + create_info, + find_events, + make_forward_solution, + make_sphere_model, + pick_channels, + pick_channels_forward, + pick_types, + read_events, + read_evokeds, + set_bipolar_reference, + set_eeg_reference, + setup_volume_source_space, +) +from mne._fiff.constants import FIFF +from mne._fiff.proj import _has_eeg_average_ref_proj +from mne._fiff.reference import _apply_reference +from mne.datasets import testing +from mne.epochs import BaseEpochs, make_fixed_length_epochs +from mne.io import RawArray, read_raw_fif +from mne.utils import _record_warnings, catch_logging + +base_dir = Path(__file__).parents[2] / "io" / "tests" / "data" +raw_fname = base_dir / "test_raw.fif" +data_dir = testing.data_path(download=False) / "MEG" / "sample" +fif_fname = data_dir / "sample_audvis_trunc_raw.fif" +eve_fname = data_dir / "sample_audvis_trunc_raw-eve.fif" +ave_fname = data_dir / "sample_audvis-ave.fif" + + +def _test_reference(raw, reref, ref_data, ref_from): + """Test whether a reference has been correctly applied.""" + # Separate EEG channels from other channel types + picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude="bads") + picks_other = pick_types( + raw.info, meg=True, eeg=False, eog=True, stim=True, exclude="bads" + ) + + # Calculate indices of reference channesl + picks_ref = [raw.ch_names.index(ch) for ch in ref_from] + + # Get data + _data = raw._data + _reref = reref._data + + # Check that the ref has been properly computed + if ref_data is not None: + assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2)) + + # Get the raw EEG data and other channel data + raw_eeg_data = _data[..., picks_eeg, :] + raw_other_data = _data[..., picks_other, :] + + # Get the rereferenced EEG data + reref_eeg_data = _reref[..., picks_eeg, :] + reref_other_data = _reref[..., picks_other, :] + + # Check that non-EEG channels are untouched + assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15) + + # Undo rereferencing of EEG channels if possible + if ref_data is not None: + if isinstance(raw, BaseEpochs): + unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :] + else: + unref_eeg_data = reref_eeg_data + ref_data + assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15) + + +@testing.requires_testing_data +def test_apply_reference(): + """Test base function for rereferencing.""" + raw = read_raw_fif(fif_fname, preload=True) + + # Rereference raw data by creating a copy of original data + reref, ref_data = _apply_reference(raw.copy(), ref_from=["EEG 001", "EEG 002"]) + assert reref.info["custom_ref_applied"] + _test_reference(raw, reref, ref_data, ["EEG 001", "EEG 002"]) + + # The CAR reference projection should have been removed by the function + assert not _has_eeg_average_ref_proj(reref.info) + + # Test that data is modified in place when copy=False + reref, ref_data = _apply_reference(raw, ["EEG 001", "EEG 002"]) + assert raw is reref + + # Test that disabling the reference does not change anything + reref, ref_data = _apply_reference(raw.copy(), []) + assert_array_equal(raw._data, reref._data) + + # Test re-referencing Epochs object + raw = read_raw_fif(fif_fname, preload=False) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + epochs = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + picks=picks_eeg, + preload=True, + ) + reref, ref_data = _apply_reference(epochs.copy(), ref_from=["EEG 001", "EEG 002"]) + assert reref.info["custom_ref_applied"] + _test_reference(epochs, reref, ref_data, ["EEG 001", "EEG 002"]) + + # Test re-referencing Evoked object + evoked = epochs.average() + reref, ref_data = _apply_reference(evoked.copy(), ref_from=["EEG 001", "EEG 002"]) + assert reref.info["custom_ref_applied"] + _test_reference(evoked, reref, ref_data, ["EEG 001", "EEG 002"]) + + # Referencing needs data to be preloaded + raw_np = read_raw_fif(fif_fname, preload=False) + pytest.raises(RuntimeError, _apply_reference, raw_np, ["EEG 001"]) + + # Test having inactive SSP projections that deal with channels involved + # during re-referencing + raw = read_raw_fif(fif_fname, preload=True) + raw.add_proj( + Projection( + active=False, + data=dict( + col_names=["EEG 001", "EEG 002"], + row_names=None, + data=np.array([[1, 1]]), + ncol=2, + nrow=1, + ), + desc="test", + kind=1, + ) + ) + # Projection concerns channels mentioned in projector + with pytest.raises(RuntimeError, match="Inactive signal space"): + _apply_reference(raw, ["EEG 001"]) + + # Projection does not concern channels mentioned in projector, no error + _apply_reference(raw, ["EEG 003"], ["EEG 004"]) + + # CSD cannot be rereferenced + with raw.info._unlock(): + raw.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_CSD + with pytest.raises(RuntimeError, match="Cannot set.* type 'CSD'"): + raw.set_eeg_reference() + + +@testing.requires_testing_data +def test_set_eeg_reference(): + """Test rereference eeg data.""" + raw = read_raw_fif(fif_fname, preload=True) + with raw.info._unlock(): + raw.info["projs"] = [] + + # Test setting an average reference projection + assert not _has_eeg_average_ref_proj(raw.info) + reref, ref_data = set_eeg_reference(raw, projection=True) + assert _has_eeg_average_ref_proj(reref.info) + assert not reref.info["projs"][0]["active"] + assert ref_data is None + reref.apply_proj() + eeg_chans = [raw.ch_names[ch] for ch in pick_types(raw.info, meg=False, eeg=True)] + _test_reference( + raw, reref, ref_data, [ch for ch in eeg_chans if ch not in raw.info["bads"]] + ) + + # Test setting an average reference when one was already present + with pytest.warns(RuntimeWarning, match="untouched"): + reref, ref_data = set_eeg_reference(raw, copy=False, projection=True) + assert ref_data is None + + # Test setting an average reference on non-preloaded data + raw_nopreload = read_raw_fif(fif_fname, preload=False) + with raw_nopreload.info._unlock(): + raw_nopreload.info["projs"] = [] + reref, ref_data = set_eeg_reference(raw_nopreload, projection=True) + assert _has_eeg_average_ref_proj(reref.info) + assert not reref.info["projs"][0]["active"] + + # Rereference raw data by creating a copy of original data + reref, ref_data = set_eeg_reference(raw, ["EEG 001", "EEG 002"], copy=True) + assert reref.info["custom_ref_applied"] + _test_reference(raw, reref, ref_data, ["EEG 001", "EEG 002"]) + + # Test that data is modified in place when copy=False + reref, ref_data = set_eeg_reference(raw, ["EEG 001", "EEG 002"], copy=False) + assert raw is reref + + # Test moving from custom to average reference + reref, ref_data = set_eeg_reference(raw, ["EEG 001", "EEG 002"]) + reref, _ = set_eeg_reference(reref, projection=True) + assert _has_eeg_average_ref_proj(reref.info) + assert not reref.info["custom_ref_applied"] + + # When creating an average reference fails, make sure the + # custom_ref_applied flag remains untouched. + reref = raw.copy() + with reref.info._unlock(): + reref.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + reref.pick(picks="meg") # Cause making average ref fail + # should have turned it off + assert reref.info["custom_ref_applied"] == FIFF.FIFFV_MNE_CUSTOM_REF_OFF + with pytest.raises(ValueError, match="found to rereference"): + set_eeg_reference(reref, projection=True) + + # Test moving from average to custom reference + reref, ref_data = set_eeg_reference(raw, projection=True) + reref, _ = set_eeg_reference(reref, ["EEG 001", "EEG 002"]) + assert not _has_eeg_average_ref_proj(reref.info) + assert len(reref.info["projs"]) == 0 + assert reref.info["custom_ref_applied"] == FIFF.FIFFV_MNE_CUSTOM_REF_ON + + # Test that disabling the reference does not change the data + assert _has_eeg_average_ref_proj(raw.info) + reref, _ = set_eeg_reference(raw, []) + assert_array_equal(raw._data, reref._data) + assert not _has_eeg_average_ref_proj(reref.info) + + # make sure ref_channels=[] removes average reference projectors + assert _has_eeg_average_ref_proj(raw.info) + reref, _ = set_eeg_reference(raw, []) + assert not _has_eeg_average_ref_proj(reref.info) + + # Test that average reference gives identical results when calculated + # via SSP projection (projection=True) or directly (projection=False) + with raw.info._unlock(): + raw.info["projs"] = [] + reref_1, _ = set_eeg_reference(raw.copy(), projection=True) + reref_1.apply_proj() + reref_2, _ = set_eeg_reference(raw.copy(), projection=False) + assert_allclose(reref_1._data, reref_2._data, rtol=1e-6, atol=1e-15) + + # Test average reference without projection + reref, ref_data = set_eeg_reference( + raw.copy(), ref_channels="average", projection=False + ) + _test_reference(raw, reref, ref_data, eeg_chans) + + with pytest.raises(ValueError, match='supported for ref_channels="averag'): + set_eeg_reference(raw, [], True, True) + with pytest.raises(ValueError, match='supported for ref_channels="averag'): + set_eeg_reference(raw, ["EEG 001"], True, True) + + +@pytest.mark.parametrize( + "ch_type, msg", + [ + ("auto", ("ECoG",)), + ("ecog", ("ECoG",)), + ("dbs", ("DBS",)), + (["ecog", "dbs"], ("ECoG", "DBS")), + ], +) +@pytest.mark.parametrize("projection", [False, True]) +def test_set_eeg_reference_ch_type(ch_type, msg, projection): + """Test setting EEG reference for ECoG or DBS.""" + # gh-6454 + # gh-8739 added DBS + ch_names = ["ECOG01", "ECOG02", "DBS01", "DBS02", "MISC"] + rng = np.random.RandomState(0) + data = rng.randn(5, 1000) + raw = RawArray( + data, create_info(ch_names, 1000.0, ["ecog"] * 2 + ["dbs"] * 2 + ["misc"]) + ) + + if ch_type == "auto": + ref_ch = ch_names[:2] + else: + ref_ch = raw.copy().pick(picks=ch_type).ch_names + + with catch_logging() as log: + reref, ref_data = set_eeg_reference( + raw.copy(), ch_type=ch_type, projection=projection, verbose=True + ) + + if not projection: + assert f"Applying a custom {msg}" in log.getvalue() + assert reref.info["custom_ref_applied"] # gh-7350 + _test_reference(raw, reref, ref_data, ref_ch) + match = "no EEG data found" if projection else "No channels supplied" + with pytest.raises(ValueError, match=match): + set_eeg_reference(raw, ch_type="eeg", projection=projection) + # gh-8739 + raw2 = RawArray(data, create_info(5, 1000.0, ["mag"] * 4 + ["misc"])) + with pytest.raises( + ValueError, match="No EEG, ECoG, sEEG or DBS channels found to rereference." + ): + set_eeg_reference(raw2, ch_type="auto", projection=projection) + + +@testing.requires_testing_data +def test_set_eeg_reference_rest(): + """Test setting a REST reference.""" + raw = read_raw_fif(fif_fname).crop(0, 1).pick(picks="eeg").load_data() + raw.info["bads"] = ["EEG 057"] # should be excluded + same = [raw.ch_names.index(raw.info["bads"][0])] + picks = np.setdiff1d(np.arange(len(raw.ch_names)), same) + trans = None + # Use fixed values from old sphere fit to reduce lines changed with fixed algorithm + sphere = make_sphere_model( + [-0.00413508, 0.01598787, 0.05175598], + 0.09100286249131773, + ) + src = setup_volume_source_space(pos=20.0, sphere=sphere, exclude=30.0) + assert src[0]["nuse"] == 223 # low but fast + fwd = make_forward_solution(raw.info, trans, src, sphere) + orig_data = raw.get_data() + avg_data = raw.copy().set_eeg_reference("average").get_data() + assert_array_equal(avg_data[same], orig_data[same]) # not processed + raw.set_eeg_reference("REST", forward=fwd) + rest_data = raw.get_data() + assert_array_equal(rest_data[same], orig_data[same]) + # should be more similar to an avg ref than nose ref + orig_corr = np.corrcoef(rest_data[picks].ravel(), orig_data[picks].ravel())[0, 1] + avg_corr = np.corrcoef(rest_data[picks].ravel(), avg_data[picks].ravel())[0, 1] + assert -0.6 < orig_corr < -0.5 + assert 0.1 < avg_corr < 0.2 + # and applying an avg ref after should work + avg_after = raw.set_eeg_reference("average").get_data() + assert_allclose(avg_after, avg_data, atol=1e-12) + with pytest.raises(TypeError, match='forward when ref_channels="REST"'): + raw.set_eeg_reference("REST") + fwd_bad = pick_channels_forward(fwd, raw.ch_names[:-1]) + with pytest.raises(ValueError, match="Missing channels"): + raw.set_eeg_reference("REST", forward=fwd_bad) + # compare to FieldTrip + evoked = read_evokeds(ave_fname, baseline=(None, 0))[0] + evoked.info["bads"] = [] + evoked.pick(picks="eeg") + assert len(evoked.ch_names) == 60 + # Data obtained from FieldTrip with something like (after evoked.save'ing + # then scipy.io.savemat'ing fwd['sol']['data']): + # dat = ft_read_data('ft-ave.fif'); + # load('leadfield.mat', 'G'); + # dat_ref = ft_preproc_rereference(dat, 'all', 'rest', true, G); + # sprintf('%g ', dat_ref(:, 171)); + data_array = "-3.3265e-05 -3.2419e-05 -3.18758e-05 -3.24079e-05 -3.39801e-05 -3.40573e-05 -3.24163e-05 -3.26896e-05 -3.33814e-05 -3.54734e-05 -3.51289e-05 -3.53229e-05 -3.51532e-05 -3.53149e-05 -3.4505e-05 -3.03462e-05 -2.81848e-05 -3.08895e-05 -3.27158e-05 -3.4605e-05 -3.47728e-05 -3.2459e-05 -3.06552e-05 -2.53255e-05 -2.69671e-05 -2.83425e-05 -3.12836e-05 -3.30965e-05 -3.34099e-05 -3.32766e-05 -3.32256e-05 -3.36385e-05 -3.20796e-05 -2.7108e-05 -2.47054e-05 -2.49589e-05 -2.7382e-05 -3.09774e-05 -3.12003e-05 -3.1246e-05 -3.07572e-05 -2.64942e-05 -2.25505e-05 -2.67194e-05 -2.86e-05 -2.94903e-05 -2.96249e-05 -2.92653e-05 -2.86472e-05 -2.81016e-05 -2.69737e-05 -2.48076e-05 -3.00473e-05 -2.73404e-05 -2.60153e-05 -2.41608e-05 -2.61937e-05 -2.5539e-05 -2.47104e-05 -2.35194e-05" # noqa: E501 + want = np.array(data_array.split(" "), float) + norm = np.linalg.norm(want) + idx = np.argmin(np.abs(evoked.times - 0.083)) + assert idx == 170 + old = evoked.data[:, idx].ravel() + exp_var = 1 - np.linalg.norm(want - old) / norm + assert 0.006 < exp_var < 0.008 + evoked.set_eeg_reference("REST", forward=fwd) + exp_var_old = 1 - np.linalg.norm(evoked.data[:, idx] - old) / norm + assert 0.005 < exp_var_old <= 0.009 + exp_var = 1 - np.linalg.norm(evoked.data[:, idx] - want) / norm + assert 0.995 < exp_var <= 1 + + +@testing.requires_testing_data +@pytest.mark.parametrize("inst_type", ["raw", "epochs"]) +@pytest.mark.parametrize( + "ref_channels, expectation", + [ + ( + {2: "EEG 001"}, + pytest.raises( + TypeError, + match="Keys in the ref_channels dict must be strings. " + "Your dict has keys of type int.", + ), + ), + ( + {"EEG 001": (1, 2)}, + pytest.raises( + TypeError, + match="Values in the ref_channels dict must be strings. " + "Your dict has values of type int.", + ), + ), + ( + {"EEG 001": [1, 2]}, + pytest.raises( + TypeError, + match="Values in the ref_channels dict must be strings. " + "Your dict has values of type int.", + ), + ), + ( + {"EEG 999": "EEG 001"}, + pytest.raises( + ValueError, + match=r"ref_channels dict contains invalid key\(s\) \(EEG 999\) " + "that are not names of channels in the instance.", + ), + ), + ( + {"EEG 001": "EEG 999"}, + pytest.raises( + ValueError, + match=r"ref_channels dict contains invalid value\(s\) \(EEG 999\) " + "that are not names of channels in the instance.", + ), + ), + ( + {"EEG 001": "EEG 057"}, + pytest.warns( + RuntimeWarning, + match=r"ref_channels dict contains value\(s\) \(EEG 057\) " + "that are marked as bad channels.", + ), + ), + ( + {"EEG 001": "STI 001"}, + pytest.warns( + RuntimeWarning, + match=( + r"Channel EEG 001 \(eeg\) is referenced to channel " + r"STI 001 which is a different channel type \(stim\)." + ), + ), + ), + ( + {"EEG 001": "EEG 001"}, + pytest.warns( + RuntimeWarning, + match=( + "Channel EEG 001 is self-referenced, " + "which will nullify the channel." + ), + ), + ), + ( + {"EEG 001": "EEG 002", "EEG 002": "EEG 003", "EEG 003": "EEG 005"}, + nullcontext(), + ), + ( + { + "EEG 001": ["EEG 002", "EEG 003"], + "EEG 002": "EEG 003", + "EEG 003": "EEG 005", + }, + nullcontext(), + ), + ], +) +def test_set_eeg_reference_dict(ref_channels, inst_type, expectation): + """Test setting dict-based reference.""" + if inst_type == "raw": + inst = read_raw_fif(fif_fname).crop(0, 1).pick(picks=["eeg", "stim"]) + # Test re-referencing Epochs object + elif inst_type == "epochs": + raw = read_raw_fif(fif_fname, preload=False) + events = read_events(eve_fname) + inst = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + preload=False, + ) + with pytest.raises( + RuntimeError, + match="By default, MNE does not load data.*Applying a reference requires.*", + ): + inst.set_eeg_reference(ref_channels=ref_channels) + inst.load_data() + inst.info["bads"] = ["EEG 057"] + with expectation: + reref, _ = set_eeg_reference(inst.copy(), ref_channels, copy=False) + + if isinstance(expectation, nullcontext): + # Check that the custom_ref_applied is set correctly: + assert reref.info["custom_ref_applied"] == FIFF.FIFFV_MNE_CUSTOM_REF_ON + + # Get raw data + _data = inst._data + + # Get that channels that were and weren't re-referenced: + ch_raw = pick_channels( + inst.ch_names, + [ch for ch in inst.ch_names if ch not in list(ref_channels.keys())], + ) + ch_reref = pick_channels(inst.ch_names, list(ref_channels.keys()), ordered=True) + + # Check that the non re-reference channels are untouched: + assert_allclose( + _data[..., ch_raw, :], reref._data[..., ch_raw, :], 1e-6, atol=1e-15 + ) + + # Compute the reference data: + ref_data = [] + for val in ref_channels.values(): + if isinstance(val, str): + val = [val] # pick_channels expects a list + ref_data.append( + _data[..., pick_channels(inst.ch_names, val, ordered=True), :].mean( + -2, keepdims=True + ) + ) + if inst_type == "epochs": + ref_data = np.concatenate(ref_data, axis=1) + else: + ref_data = np.squeeze(np.array(ref_data)) + assert_allclose( + _data[..., ch_reref, :], + reref._data[..., ch_reref, :] + ref_data, + 1e-6, + atol=1e-15, + ) + + +@testing.requires_testing_data +@pytest.mark.parametrize("inst_type", ("raw", "epochs", "evoked")) +def test_set_bipolar_reference(inst_type): + """Test bipolar referencing.""" + raw = read_raw_fif(fif_fname, preload=True) + raw.apply_proj() + + if inst_type == "raw": + inst = raw + del raw + elif inst_type in ["epochs", "evoked"]: + events = find_events(raw, stim_channel="STI 014") + epochs = Epochs(raw, events, tmin=-0.3, tmax=0.7, preload=True) + inst = epochs + if inst_type == "evoked": + inst = epochs.average() + del epochs + + ch_info = {"kind": FIFF.FIFFV_EOG_CH, "extra": "some extra value"} + with pytest.raises(KeyError, match="key errantly present"): + set_bipolar_reference(inst, "EEG 001", "EEG 002", "bipolar", ch_info) + ch_info.pop("extra") + reref = set_bipolar_reference(inst, "EEG 001", "EEG 002", "bipolar", ch_info) + assert reref.info["custom_ref_applied"] + + # Compare result to a manual calculation + a = inst.copy().pick(["EEG 001", "EEG 002"]) + a = a._data[..., 0, :] - a._data[..., 1, :] + b = reref.copy().pick(["bipolar"])._data[..., 0, :] + assert_allclose(a, b) + + # Original channels should be replaced by a virtual one + assert "EEG 001" not in reref.ch_names + assert "EEG 002" not in reref.ch_names + assert "bipolar" in reref.ch_names + + # Check channel information + bp_info = reref.info["chs"][reref.ch_names.index("bipolar")] + an_info = inst.info["chs"][inst.ch_names.index("EEG 001")] + for key in bp_info: + if key == "coil_type": + assert bp_info[key] == FIFF.FIFFV_COIL_EEG_BIPOLAR, key + elif key == "kind": + assert bp_info[key] == FIFF.FIFFV_EOG_CH, key + elif key != "ch_name": + assert_equal(bp_info[key], an_info[key], err_msg=key) + + # Minimalist call + reref = set_bipolar_reference(inst, "EEG 001", "EEG 002") + assert "EEG 001-EEG 002" in reref.ch_names + + # Minimalist call with twice the same anode + reref = set_bipolar_reference( + inst, ["EEG 001", "EEG 001", "EEG 002"], ["EEG 002", "EEG 003", "EEG 003"] + ) + assert "EEG 001-EEG 002" in reref.ch_names + assert "EEG 001-EEG 003" in reref.ch_names + + # Set multiple references at once + reref = set_bipolar_reference( + inst, + ["EEG 001", "EEG 003"], + ["EEG 002", "EEG 004"], + ["bipolar1", "bipolar2"], + [{"kind": FIFF.FIFFV_EOG_CH}, {"kind": FIFF.FIFFV_EOG_CH}], + ) + a = inst.copy().pick(["EEG 001", "EEG 002", "EEG 003", "EEG 004"]) + a = np.concatenate( + [ + a._data[..., :1, :] - a._data[..., 1:2, :], + a._data[..., 2:3, :] - a._data[..., 3:4, :], + ], + axis=-2, + ) + b = reref.copy().pick(["bipolar1", "bipolar2"])._data + assert_allclose(a, b) + + # Test creating a bipolar reference that doesn't involve EEG channels: + # it should not set the custom_ref_applied flag + reref = set_bipolar_reference( + inst, + "MEG 0111", + "MEG 0112", + ch_info={"kind": FIFF.FIFFV_MEG_CH}, + verbose="error", + ) + assert not reref.info["custom_ref_applied"] + assert "MEG 0111-MEG 0112" in reref.ch_names + + # Test a battery of invalid inputs + pytest.raises( + ValueError, + set_bipolar_reference, + inst, + "EEG 001", + ["EEG 002", "EEG 003"], + "bipolar", + ) + pytest.raises( + ValueError, + set_bipolar_reference, + inst, + ["EEG 001", "EEG 002"], + "EEG 003", + "bipolar", + ) + pytest.raises( + ValueError, + set_bipolar_reference, + inst, + "EEG 001", + "EEG 002", + ["bipolar1", "bipolar2"], + ) + pytest.raises( + ValueError, + set_bipolar_reference, + inst, + "EEG 001", + "EEG 002", + "bipolar", + ch_info=[{"foo": "bar"}, {"foo": "bar"}], + ) + pytest.raises( + ValueError, set_bipolar_reference, inst, "EEG 001", "EEG 002", ch_name="EEG 003" + ) + + # Test if bad anode/cathode raises error if on_bad="raise" + inst.info["bads"] = ["EEG 001"] + pytest.raises( + ValueError, set_bipolar_reference, inst, "EEG 001", "EEG 002", on_bad="raise" + ) + inst.info["bads"] = ["EEG 002"] + pytest.raises( + ValueError, set_bipolar_reference, inst, "EEG 001", "EEG 002", on_bad="raise" + ) + + # Test if bad anode/cathode raises warning if on_bad="warn" + inst.info["bads"] = ["EEG 001"] + pytest.warns( + RuntimeWarning, set_bipolar_reference, inst, "EEG 001", "EEG 002", on_bad="warn" + ) + inst.info["bads"] = ["EEG 002"] + pytest.warns( + RuntimeWarning, set_bipolar_reference, inst, "EEG 001", "EEG 002", on_bad="warn" + ) + + +def _check_channel_names(inst, ref_names): + """Check channel names.""" + if isinstance(ref_names, str): + ref_names = [ref_names] + + # Test that the names of the reference channels are present in `ch_names` + ref_idx = pick_channels(inst.info["ch_names"], ref_names) + assert len(ref_idx) == len(ref_names) + + # Test that the names of the reference channels are present in the `chs` + # list + inst.info._check_consistency() # Should raise no exceptions + + +@testing.requires_testing_data +def test_add_reference(): + """Test adding a reference.""" + raw = read_raw_fif(fif_fname, preload=True) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # check if channel already exists + pytest.raises(ValueError, add_reference_channels, raw, raw.info["ch_names"][0]) + # add reference channel to Raw + raw_ref = add_reference_channels(raw, "Ref", copy=True) + assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) + assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) + _check_channel_names(raw_ref, "Ref") + + orig_nchan = raw.info["nchan"] + raw = add_reference_channels(raw, "Ref", copy=False) + assert_array_equal(raw._data, raw_ref._data) + assert_equal(raw.info["nchan"], orig_nchan + 1) + _check_channel_names(raw, "Ref") + + # for Neuromag fif's, the reference electrode location is placed in + # elements [3:6] of each "data" electrode location + assert_allclose( + raw.info["chs"][-1]["loc"][:3], raw.info["chs"][picks_eeg[0]]["loc"][3:6], 1e-6 + ) + + ref_idx = raw.ch_names.index("Ref") + ref_data, _ = raw[ref_idx] + assert_array_equal(ref_data, 0) + + # add reference channel to Raw when no digitization points exist + raw = read_raw_fif(fif_fname).crop(0, 1).load_data() + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + del raw.info["dig"] + + raw_ref = add_reference_channels(raw, "Ref", copy=True) + + assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) + assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) + _check_channel_names(raw_ref, "Ref") + + orig_nchan = raw.info["nchan"] + raw = add_reference_channels(raw, "Ref", copy=False) + assert_array_equal(raw._data, raw_ref._data) + assert_equal(raw.info["nchan"], orig_nchan + 1) + _check_channel_names(raw, "Ref") + + # Test adding an existing channel as reference channel + pytest.raises(ValueError, add_reference_channels, raw, raw.info["ch_names"][0]) + + # add two reference channels to Raw + raw_ref = add_reference_channels(raw, ["M1", "M2"], copy=True) + _check_channel_names(raw_ref, ["M1", "M2"]) + assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2) + assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) + assert_array_equal(raw_ref._data[-2:, :], 0) + + raw = add_reference_channels(raw, ["M1", "M2"], copy=False) + _check_channel_names(raw, ["M1", "M2"]) + ref_idx = raw.ch_names.index("M1") + ref_idy = raw.ch_names.index("M2") + ref_data, _ = raw[[ref_idx, ref_idy]] + assert_array_equal(ref_data, 0) + + loc1 = raw.info["chs"][ref_idx]["loc"] + loc2 = raw.info["chs"][ref_idy]["loc"] + assert loc1 is not loc2 + + # add reference channel to epochs + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + epochs = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + picks=picks_eeg, + preload=True, + ) + # default: proj=True, after which adding a Ref channel is prohibited + pytest.raises(RuntimeError, add_reference_channels, epochs, "Ref") + + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + picks=picks_eeg, + preload=True, + proj="delayed", + ) + epochs_ref = add_reference_channels(epochs, "Ref", copy=True) + + assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1) + _check_channel_names(epochs_ref, "Ref") + ref_idx = epochs_ref.ch_names.index("Ref") + ref_data = epochs_ref.get_data(picks=[ref_idx])[:, 0] + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(epochs.info, meg=False, eeg=True) + assert_array_equal(epochs.get_data(picks_eeg), epochs_ref.get_data(picks_eeg)) + + # add two reference channels to epochs + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + picks=picks_eeg, + preload=True, + proj="delayed", + ) + with pytest.warns(RuntimeWarning, match="for this channel is unknown or ambiguous"): + epochs_ref = add_reference_channels(epochs, ["M1", "M2"], copy=True) + assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2) + _check_channel_names(epochs_ref, ["M1", "M2"]) + ref_idx = epochs_ref.ch_names.index("M1") + ref_idy = epochs_ref.ch_names.index("M2") + assert_equal(epochs_ref.info["chs"][ref_idx]["ch_name"], "M1") + assert_equal(epochs_ref.info["chs"][ref_idy]["ch_name"], "M2") + ref_data = epochs_ref.get_data([ref_idx, ref_idy]) + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(epochs.info, meg=False, eeg=True) + assert_array_equal(epochs.get_data(picks_eeg), epochs_ref.get_data(picks_eeg)) + + # add reference channel to evoked + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + picks=picks_eeg, + preload=True, + proj="delayed", + ) + evoked = epochs.average() + evoked_ref = add_reference_channels(evoked, "Ref", copy=True) + assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1) + _check_channel_names(evoked_ref, "Ref") + ref_idx = evoked_ref.ch_names.index("Ref") + ref_data = evoked_ref.data[ref_idx, :] + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(evoked.info, meg=False, eeg=True) + assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) + + # add two reference channels to evoked + raw = read_raw_fif(fif_fname, preload=True) + events = read_events(eve_fname) + picks_eeg = pick_types(raw.info, meg=False, eeg=True) + # create epochs in delayed mode, allowing removal of CAR when re-reffing + epochs = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + picks=picks_eeg, + preload=True, + proj="delayed", + ) + evoked = epochs.average() + with pytest.warns(RuntimeWarning, match="for this channel is unknown or ambiguous"): + evoked_ref = add_reference_channels(evoked, ["M1", "M2"], copy=True) + assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2) + _check_channel_names(evoked_ref, ["M1", "M2"]) + ref_idx = evoked_ref.ch_names.index("M1") + ref_idy = evoked_ref.ch_names.index("M2") + ref_data = evoked_ref.data[[ref_idx, ref_idy], :] + assert_array_equal(ref_data, 0) + picks_eeg = pick_types(evoked.info, meg=False, eeg=True) + assert_array_equal(evoked.data[picks_eeg, :], evoked_ref.data[picks_eeg, :]) + + # Test invalid inputs + raw = read_raw_fif(fif_fname, preload=False) + with pytest.raises(RuntimeError, match="loaded"): + add_reference_channels(raw, ["Ref"]) + raw.load_data() + with pytest.raises(ValueError, match="Channel.*already.*"): + add_reference_channels(raw, raw.ch_names[:1]) + with pytest.raises(TypeError, match="instance of"): + add_reference_channels(raw, 1) + + # gh-10878 + raw = read_raw_fif(raw_fname).crop(0, 1, include_tmax=False).load_data() + data = raw.copy().add_reference_channels(["REF"]).pick(picks="eeg") + data = data.get_data() + epochs = make_fixed_length_epochs(raw).load_data() + data_2 = epochs.copy().add_reference_channels(["REF"]).pick(picks="eeg") + data_2 = data_2.get_data(copy=False)[0] + assert_allclose(data, data_2) + evoked = epochs.average() + data_3 = evoked.copy().add_reference_channels(["REF"]).pick(picks="eeg") + data_3 = data_3.get_data() + assert_allclose(data, data_3) + + +@pytest.mark.parametrize("n_ref", (1, 2)) +def test_add_reorder(n_ref): + """Test that a reference channel can be added and then data reordered.""" + # gh-8300 + raw = read_raw_fif(raw_fname).crop(0, 0.1).del_proj().pick("eeg") + assert len(raw.ch_names) == 60 + chs = [f"EEG {60 + ii:03}" for ii in range(1, n_ref)] + ["EEG 000"] + with pytest.raises(RuntimeError, match="preload"): + with _record_warnings(): # ignore multiple warning + add_reference_channels(raw, chs, copy=False) + raw.load_data() + if n_ref == 1: + ctx = nullcontext() + else: + assert n_ref == 2 + ctx = pytest.warns(RuntimeWarning, match="this channel is unknown or ambiguous") + with ctx: + add_reference_channels(raw, chs, copy=False) + data = raw.get_data() + assert_array_equal(data[-1], 0.0) + assert raw.ch_names[-n_ref:] == chs + raw.reorder_channels(raw.ch_names[-1:] + raw.ch_names[:-1]) + assert raw.ch_names == [f"EEG {ii:03}" for ii in range(60 + n_ref)] + data_new = raw.get_data() + data_new = np.concatenate([data_new[1:], data_new[:1]]) + assert_allclose(data, data_new) + + +def test_bipolar_combinations(): + """Test bipolar channel generation.""" + ch_names = ["CH" + str(ni + 1) for ni in range(10)] + info = create_info( + ch_names=ch_names, sfreq=1000.0, ch_types=["eeg"] * len(ch_names) + ) + raw_data = np.random.randn(len(ch_names), 1000) + raw = RawArray(raw_data, info) + + def _check_bipolar(raw_test, ch_a, ch_b): + picks = [raw_test.ch_names.index(ch_a + "-" + ch_b)] + get_data_res = raw_test.get_data(picks=picks)[0, :] + manual_a = raw_data[ch_names.index(ch_a), :] + manual_b = raw_data[ch_names.index(ch_b), :] + assert_array_equal(get_data_res, manual_a - manual_b) + + # test classic EOG/ECG bipolar reference (only two channels per pair). + raw_test = set_bipolar_reference(raw, ["CH2"], ["CH1"], copy=True) + _check_bipolar(raw_test, "CH2", "CH1") + + # test all combinations. + a_channels, b_channels = zip(*itertools.combinations(ch_names, 2)) + a_channels, b_channels = list(a_channels), list(b_channels) + raw_test = set_bipolar_reference(raw, a_channels, b_channels, copy=True) + for ch_a, ch_b in zip(a_channels, b_channels): + _check_bipolar(raw_test, ch_a, ch_b) + # check if reference channels have been dropped. + assert len(raw_test.ch_names) == len(a_channels) + + raw_test = set_bipolar_reference( + raw, a_channels, b_channels, drop_refs=False, copy=True + ) + # check if reference channels have been kept correctly. + assert len(raw_test.ch_names) == len(a_channels) + len(ch_names) + for idx, ch_label in enumerate(ch_names): + manual_ch = raw_data[np.newaxis, idx] + assert_array_equal(raw_test.get_data(ch_label), manual_ch) + + # test bipolars with a channel in both list (anode & cathode). + raw_test = set_bipolar_reference(raw, ["CH2", "CH1"], ["CH1", "CH2"], copy=True) + _check_bipolar(raw_test, "CH2", "CH1") + _check_bipolar(raw_test, "CH1", "CH2") + + # test if bipolar channel is bad if anode is a bad channel + raw.info["bads"] = ["CH1"] + raw_test = set_bipolar_reference( + raw, ["CH1"], ["CH2"], on_bad="ignore", ch_name="bad_bipolar", copy=True + ) + assert raw_test.info["bads"] == ["bad_bipolar"] + + # test if bipolar channel is bad if cathode is a bad channel + raw.info["bads"] = ["CH2"] + raw_test = set_bipolar_reference( + raw, ["CH1"], ["CH2"], on_bad="ignore", ch_name="bad_bipolar", copy=True + ) + assert raw_test.info["bads"] == ["bad_bipolar"] diff --git a/mne/_fiff/tests/test_show_fiff.py b/mne/_fiff/tests/test_show_fiff.py new file mode 100644 index 00000000000..b771d3bd081 --- /dev/null +++ b/mne/_fiff/tests/test_show_fiff.py @@ -0,0 +1,35 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +from mne.io import show_fiff + +base_dir = Path(__file__).parents[2] / "io" / "tests" / "data" +fname_evoked = base_dir / "test-ave.fif" +fname_raw = base_dir / "test_raw.fif" +fname_c_annot = base_dir / "test_raw-annot.fif" + + +def test_show_fiff(): + """Test show_fiff.""" + # this is not exhaustive, but hopefully bugs will be found in use + info = show_fiff(fname_evoked) + assert "BAD" not in info + keys = [ + "FIFF_EPOCH", + "FIFFB_HPI_COIL", + "FIFFB_PROJ_ITEM", + "FIFFB_PROCESSED_DATA", + "FIFFB_EVOKED", + "FIFF_NAVE", + "FIFF_EPOCH", + "COORD_TRANS", + ] + assert all(key in info for key in keys) + info = show_fiff(fname_raw, read_limit=1024) + assert "BAD" not in info + info = show_fiff(fname_c_annot) + assert "BAD" not in info + assert ">B" in info, info diff --git a/mne/_fiff/tests/test_utils.py b/mne/_fiff/tests/test_utils.py new file mode 100644 index 00000000000..ba6748826a3 --- /dev/null +++ b/mne/_fiff/tests/test_utils.py @@ -0,0 +1,18 @@ +"""Run tests for the utilities.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from mne._fiff.utils import _check_orig_units + + +def test_check_orig_units(): + """Test the checking of original units.""" + orig_units = dict(FC1="nV", Hfp3erz="n/a", Pz="uV", greekMu="μV", microSign="µV") + orig_units = _check_orig_units(orig_units) + assert orig_units["FC1"] == "nV" + assert orig_units["Hfp3erz"] == "n/a" + assert orig_units["Pz"] == "µV" + assert orig_units["greekMu"] == "µV" + assert orig_units["microSign"] == "µV" diff --git a/mne/_fiff/tests/test_what.py b/mne/_fiff/tests/test_what.py new file mode 100644 index 00000000000..80bebdceeba --- /dev/null +++ b/mne/_fiff/tests/test_what.py @@ -0,0 +1,67 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import glob +from pathlib import Path + +import numpy as np +import pytest + +from mne import create_info, what +from mne.datasets import testing +from mne.io import RawArray +from mne.preprocessing import ICA +from mne.utils import _record_warnings + +data_path = testing.data_path(download=False) + + +@pytest.mark.slowtest +@testing.requires_testing_data +def test_what(tmp_path, verbose_debug): + """Test mne.what.""" + pytest.importorskip("sklearn") + # ICA + ica = ICA(max_iter=1, random_state=0) + raw = RawArray(np.random.RandomState(0).randn(3, 10), create_info(3, 1000.0, "eeg")) + with _record_warnings(): # convergence sometimes + ica.fit(raw) + fname = tmp_path / "x-ica.fif" + ica.save(fname) + assert what(fname) == "ica" + # test files + fnames = glob.glob(str(data_path / "MEG" / "sample" / "*.fif")) + fnames += glob.glob(str(data_path / "subjects" / "sample" / "bem" / "*.fif")) + fnames += [str(fname)] + fnames = sorted(fnames) + want_dict = dict( + eve="events", + ave="evoked", + cov="cov", + ica="ica", + inv="inverse", + fwd="forward", + trans="transform", + proj="proj", + raw="raw", + sol="bem solution", + bem="bem surfaces", + http://github.com/src="src", + dense="bem surfaces", + head="bem surfaces", + fiducials="fiducials", + ) + got = set() + for fname in fnames: + print(fname) + kind = Path(fname).stem.split("-")[-1] + if len(kind) > 5: + kind = kind.split("_")[-1] + this = what(fname) + assert this == want_dict[kind], fname + print() + got.add(kind) + assert set(want_dict) == got + fname = data_path / "MEG" / "sample" / "sample_audvis-ave_xfit.dip" + assert what(fname) == "unknown" diff --git a/mne/_fiff/tests/test_write.py b/mne/_fiff/tests/test_write.py new file mode 100644 index 00000000000..43087ec0dc8 --- /dev/null +++ b/mne/_fiff/tests/test_write.py @@ -0,0 +1,21 @@ +"""Run tests for writing.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import pytest + +from mne._fiff.constants import FIFF +from mne._fiff.write import start_file, write_int + + +def test_write_int(tmp_path): + """Test that write_int raises an error on bad values.""" + with start_file(tmp_path / "temp.fif") as fid: + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483647]) # 2 ** 31 - 1 + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, []) # 2 ** 31 - 1 + with pytest.raises(TypeError, match=r".*exceeds max.*EVENT_LIST\)"): + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483648]) # 2 ** 31 + with pytest.raises(TypeError, match="Cannot safely write"): + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [0.0]) # float diff --git a/mne/_fiff/tree.py b/mne/_fiff/tree.py new file mode 100644 index 00000000000..142c40aa5a8 --- /dev/null +++ b/mne/_fiff/tree.py @@ -0,0 +1,108 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + + +from ..utils import logger, verbose +from .constants import FIFF +from .tag import read_tag + + +def dir_tree_find(tree, kind): + """Find nodes of the given kind from a directory tree structure. + + Parameters + ---------- + tree : dict + Directory tree. + kind : int + Kind to find. + + Returns + ------- + nodes : list + List of matching nodes. + """ + nodes = [] + + if isinstance(tree, list): + for t in tree: + nodes += dir_tree_find(t, kind) + else: + # Am I desirable myself? + if tree["block"] == kind: + nodes.append(tree) + + # Search the subtrees + for child in tree["children"]: + nodes += dir_tree_find(child, kind) + return nodes + + +@verbose +def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): + """Create the directory tree structure.""" + if directory[start].kind == FIFF.FIFF_BLOCK_START: + tag = read_tag(fid, directory[start].pos) + block = tag.data.item() + else: + block = 0 + + start_separate = False + + this = start + + tree = dict() + tree["block"] = block + tree["id"] = None + tree["parent_id"] = None + tree["nent"] = 0 + tree["nchild"] = 0 + tree["directory"] = directory[this] + tree["children"] = [] + + while this < len(directory): + if directory[this].kind == FIFF.FIFF_BLOCK_START: + if this != start: + if not start_separate: + start_separate = True + logger.debug(" " * indent + f"start {{ {block}") + child, this = make_dir_tree(fid, directory, this, indent + 1) + tree["nchild"] += 1 + tree["children"].append(child) + elif directory[this].kind == FIFF.FIFF_BLOCK_END: + tag = read_tag(fid, directory[start].pos) + if tag.data == block: + break + else: + tree["nent"] += 1 + if tree["nent"] == 1: + tree["directory"] = list() + tree["directory"].append(directory[this]) + + # Add the id information if available + if block == 0: + if directory[this].kind == FIFF.FIFF_FILE_ID: + tag = read_tag(fid, directory[this].pos) + tree["id"] = tag.data + else: + if directory[this].kind == FIFF.FIFF_BLOCK_ID: + tag = read_tag(fid, directory[this].pos) + tree["id"] = tag.data + elif directory[this].kind == FIFF.FIFF_PARENT_BLOCK_ID: + tag = read_tag(fid, directory[this].pos) + tree["parent_id"] = tag.data + + this += 1 + + # Eliminate the empty directory + if tree["nent"] == 0: + tree["directory"] = None + + content = f"block = {tree['block']} nent = {tree['nent']} nchild = {tree['nchild']}" + if start_separate: + logger.debug(" " * indent + f"end }} {content}") + else: + logger.debug(" " * indent + content) + last = this + return tree, last diff --git a/mne/_fiff/utils.py b/mne/_fiff/utils.py new file mode 100644 index 00000000000..b158914bb88 --- /dev/null +++ b/mne/_fiff/utils.py @@ -0,0 +1,331 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import os.path as op +from pathlib import Path + +import numpy as np + +from .constants import FIFF +from .meas_info import _get_valid_units + + +def _check_orig_units(orig_units): + """Check original units from a raw file. + + Units that are close to a valid_unit but not equal can be remapped to fit + into the valid_units. All other units that are not valid will be replaced + with "n/a". + + Parameters + ---------- + orig_units : dict + Dictionary mapping channel names to their units as specified in + the header file. Example: {'FC1': 'nV'} + + Returns + ------- + orig_units_remapped : dict + Dictionary mapping channel names to their VALID units as specified in + the header file. Invalid units are now labeled "n/a". + Example: {'FC1': 'nV', 'Hfp3erz': 'n/a'} + """ + if orig_units is None: + return + valid_units = _get_valid_units() + valid_units_lowered = [unit.lower() for unit in valid_units] + orig_units_remapped = dict(orig_units) + for ch_name, unit in orig_units.items(): + # Be lenient: we ignore case for now. + if unit.lower() in valid_units_lowered: + continue + + # Common "invalid units" can be remapped to their valid equivalent + remap_dict = dict() + remap_dict["uv"] = "µV" + remap_dict["μv"] = "µV" # greek letter mu vs micro sign. use micro + remap_dict["\x83\xeav"] = "µV" # for shift-jis mu, use micro + if unit.lower() in remap_dict: + orig_units_remapped[ch_name] = remap_dict[unit.lower()] + continue + + # Some units cannot be saved, they are invalid: assign "n/a" + orig_units_remapped[ch_name] = "n/a" + + return orig_units_remapped + + +def _find_channels(ch_names, ch_type="EOG"): + """Find EOG channel.""" + substrings = (ch_type,) + substrings = [s.upper() for s in substrings] + if ch_type == "EOG": + substrings = ("EOG", "EYE") + eog_idx = [ + idx + for idx, ch in enumerate(ch_names) + if any(substring in ch.upper() for substring in substrings) + ] + return eog_idx + + +def _mult_cal_one(data_view, one, idx, cals, mult): + """Take a chunk of raw data, multiply by mult or cals, and store.""" + one = np.asarray(one, dtype=data_view.dtype) + assert data_view.shape[1] == one.shape[1], ( + data_view.shape[1], + one.shape[1], + ) # noqa: E501 + if mult is not None: + assert mult.ndim == one.ndim == 2 + data_view[:] = mult @ one[idx] + else: + assert cals is not None + if isinstance(idx, slice): + data_view[:] = one[idx] + else: + # faster than doing one = one[idx] + np.take(one, idx, axis=0, out=data_view) + data_view *= cals + + +def _blk_read_lims(start, stop, buf_len): + """Deal with indexing in the middle of a data block. + + Parameters + ---------- + start : int + Starting index. + stop : int + Ending index (exclusive). + buf_len : int + Buffer size in samples. + + Returns + ------- + block_start_idx : int + The first block to start reading from. + r_lims : list + The read limits. + d_lims : list + The write limits. + + Notes + ----- + Consider this example:: + + >>> start, stop, buf_len = 2, 27, 10 + + +---------+---------+--------- + File structure: | buf0 | buf1 | buf2 | + +---------+---------+--------- + File time: 0 10 20 30 + +---------+---------+--------- + Requested time: 2 27 + + | | + blockstart blockstop + | | + start stop + + We need 27 - 2 = 25 samples (per channel) to store our data, and + we need to read from 3 buffers (30 samples) to get all of our data. + + On all reads but the first, the data we read starts at + the first sample of the buffer. On all reads but the last, + the data we read ends on the last sample of the buffer. + + We call ``this_data`` the variable that stores the current buffer's data, + and ``data`` the variable that stores the total output. + + On the first read, we need to do this:: + + >>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP + + On the second read, we need to do:: + + >>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP + + On the final read, we need to do:: + + >>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP + + This function encapsulates this logic to allow a loop over blocks, where + data is stored using the following limits:: + + >>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP + + """ # noqa: E501 + # this is used to deal with indexing in the middle of a sampling period + assert all(isinstance(x, int) for x in (start, stop, buf_len)) + block_start_idx = start // buf_len + block_start = block_start_idx * buf_len + last_used_samp = stop - 1 + block_stop = last_used_samp - last_used_samp % buf_len + buf_len + read_size = block_stop - block_start + n_blk = read_size // buf_len + (read_size % buf_len != 0) + start_offset = start - block_start + end_offset = block_stop - stop + d_lims = np.empty((n_blk, 2), int) + r_lims = np.empty((n_blk, 2), int) + for bi in range(n_blk): + # Triage start (sidx) and end (eidx) indices for + # data (d) and read (r) + if bi == 0: + d_sidx = 0 + r_sidx = start_offset + else: + d_sidx = bi * buf_len - start_offset + r_sidx = 0 + if bi == n_blk - 1: + d_eidx = stop - start + r_eidx = buf_len - end_offset + else: + d_eidx = (bi + 1) * buf_len - start_offset + r_eidx = buf_len + d_lims[bi] = [d_sidx, d_eidx] + r_lims[bi] = [r_sidx, r_eidx] + return block_start_idx, r_lims, d_lims + + +def _file_size(fname): + """Get the file size in bytes.""" + with open(fname, "rb") as f: + f.seek(0, os.SEEK_END) + return f.tell() + + +def _read_segments_file( + raw, + data, + idx, + fi, + start, + stop, + cals, + mult, + dtype, + n_channels=None, + offset=0, + trigger_ch=None, +): + """Read a chunk of raw data.""" + if n_channels is None: + n_channels = raw._raw_extras[fi]["orig_nchan"] + + n_bytes = np.dtype(dtype).itemsize + # data_offset and data_left count data samples (channels x time points), + # not bytes. + data_offset = n_channels * start * n_bytes + offset + data_left = (stop - start) * n_channels + + # Read up to 100 MB of data at a time, block_size is in data samples + block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels + block_size = min(data_left, block_size) + with open(raw.filenames[fi], "rb", buffering=0) as fid: + fid.seek(data_offset) + # extract data in chunks + for sample_start in np.arange(0, data_left, block_size) // n_channels: + count = min(block_size, data_left - sample_start * n_channels) + block = np.fromfile(fid, dtype, count) + if block.size != count: + raise RuntimeError( + f"Incorrect number of samples ({block.size} != {count}), please " + "report this error to MNE-Python developers" + ) + block = block.reshape(n_channels, -1, order="F") + n_samples = block.shape[1] # = count // n_channels + sample_stop = sample_start + n_samples + if trigger_ch is not None: + stim_ch = trigger_ch[start:stop][sample_start:sample_stop] + block = np.vstack((block, stim_ch)) + data_view = data[:, sample_start:sample_stop] + _mult_cal_one(data_view, block, idx, cals, mult) + + +def read_str(fid, count=1): + """Read string from a binary file in a python version compatible way.""" + dtype = np.dtype(f">S{count}") + string = fid.read(dtype.itemsize) + data = np.frombuffer(string, dtype=dtype)[0] + bytestr = b"".join([data[0 : data.index(b"\x00") if b"\x00" in data else count]]) + + return str(bytestr.decode("ascii")) # Return native str type for Py2/3 + + +def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc): + """Initialize info['chs'] for eeg channels.""" + chs = list() + for idx, ch_name in enumerate(ch_names): + if ch_name in eog or idx in eog: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_EOG_CH + elif ch_name in ecg or idx in ecg: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_ECG_CH + elif ch_name in emg or idx in emg: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_EMG_CH + elif ch_name in misc or idx in misc: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_MISC_CH + else: + coil_type = ch_coil + kind = ch_kind + + chan_info = { + "cal": cals[idx], + "logno": idx + 1, + "scanno": idx + 1, + "range": 1.0, + "unit_mul": FIFF.FIFF_UNITM_NONE, + "ch_name": ch_name, + "unit": FIFF.FIFF_UNIT_V, + "coord_frame": FIFF.FIFFV_COORD_HEAD, + "coil_type": coil_type, + "kind": kind, + "loc": np.zeros(12), + } + if coil_type == FIFF.FIFFV_COIL_EEG: + chan_info["loc"][:3] = np.nan + chs.append(chan_info) + return chs + + +def _construct_bids_filename(base, ext, part_idx, validate=True): + """Construct a BIDS compatible filename for split files.""" + # insert index in filename + dirname = op.dirname(base) + base = op.basename(base) + deconstructed_base = base.split("_") + if len(deconstructed_base) < 2 and validate: + raise ValueError( + "Filename base must end with an underscore followed " + f"by the modality (e.g., _eeg or _meg), got {base}" + ) + suffix = deconstructed_base[-1] + base = "_".join(deconstructed_base[:-1]) + use_fname = f"{base}_split-{part_idx + 1:02}_{suffix}{ext}" + if dirname: + use_fname = op.join(dirname, use_fname) + return use_fname + + +def _make_split_fnames(fname, n_splits, split_naming): + """Make a list of split filenames.""" + if n_splits == 1: + fname = Path(fname) + return [fname] + res = [] + base, ext = op.splitext(fname) + for i in range(n_splits): + if split_naming == "neuromag": + path = Path(f"{base}-{i:d}{ext}" if i else fname) + res.append(path) + else: + assert split_naming == "bids" + path = Path(_construct_bids_filename(base, ext, i)) + res.append(path) + return res diff --git a/mne/_fiff/what.py b/mne/_fiff/what.py new file mode 100644 index 00000000000..d91f79da5cf --- /dev/null +++ b/mne/_fiff/what.py @@ -0,0 +1,70 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from collections import OrderedDict +from inspect import signature + +from ..utils import _check_fname, logger + + +def what(fname): + """Try to determine the type of the FIF file. + + Parameters + ---------- + fname : path-like + The filename. Should end in ``.fif`` or ``.fif.gz``. + + Returns + ------- + what : str | None + The type of the file. Will be 'unknown' if it could not be determined. + + Notes + ----- + .. versionadded:: 0.19 + """ + from ..bem import read_bem_solution, read_bem_surfaces + from ..cov import read_cov + from ..epochs import read_epochs + from ..event import read_events + from ..evoked import read_evokeds + from ..forward import read_forward_solution + from ..io import read_raw_fif + from ..minimum_norm import read_inverse_operator + from ..preprocessing import read_ica + from ..proj import read_proj + from ..source_space import read_source_spaces + from ..transforms import read_trans + from .meas_info import read_fiducials + + fname = _check_fname(fname, overwrite="read", must_exist=True) + checks = OrderedDict() + checks["raw"] = read_raw_fif + checks["ica"] = read_ica + checks["epochs"] = read_epochs + checks["evoked"] = read_evokeds + checks["forward"] = read_forward_solution + checks["inverse"] = read_inverse_operator + checks["src"] = read_source_spaces + checks["bem solution"] = read_bem_solution + checks["bem surfaces"] = read_bem_surfaces + checks["cov"] = read_cov + checks["transform"] = read_trans + checks["events"] = read_events + checks["fiducials"] = read_fiducials + checks["proj"] = read_proj + for what, func in checks.items(): + args = signature(func).parameters + assert "verbose" in args, func + kwargs = dict(verbose="error") + if "preload" in args: + kwargs["preload"] = False + try: + func(fname, **kwargs) + except Exception as exp: + logger.debug(f"Not {what}: {exp}") + else: + return what + return "unknown" diff --git a/mne/_fiff/write.py b/mne/_fiff/write.py new file mode 100644 index 00000000000..8486ca13121 --- /dev/null +++ b/mne/_fiff/write.py @@ -0,0 +1,455 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime +import os.path as op +import re +import time +import uuid +from contextlib import contextmanager +from gzip import GzipFile + +import numpy as np +from scipy.sparse import csc_array, csr_array + +from ..utils import _check_fname, _file_like, _validate_type, logger +from ..utils.numerics import _date_to_julian +from .constants import FIFF + +# We choose a "magic" date to store (because meas_date is obligatory) +# to treat as meas_date=None. This one should be impossible for systems +# to write -- the second field is microseconds, so anything >= 1e6 +# should be moved into the first field (seconds). +DATE_NONE = (0, 2**31 - 1) + + +def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype): + """Write data.""" + if isinstance(data, np.ndarray): + data_size *= data.size + + # XXX for string types the data size is used as + # computed in ``write_string``. + + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(FIFFT_TYPE, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + fid.write(np.array(data, dtype=dtype).tobytes()) + + +def _get_split_size(split_size): + """Convert human-readable bytes to machine-readable bytes.""" + if isinstance(split_size, str): + exp = dict(MB=20, GB=30).get(split_size[-2:], None) + if exp is None: + raise ValueError('split_size has to end with either "MB" or "GB"') + split_size = int(float(split_size[:-2]) * 2**exp) + + if split_size > 2147483648: + raise ValueError("split_size cannot be larger than 2GB") + return split_size + + +_NEXT_FILE_BUFFER = 1048576 # 2 ** 20 extra cushion for last post-data tags + + +def write_nop(fid, last=False): + """Write a FIFF_NOP.""" + fid.write(np.array(FIFF.FIFF_NOP, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_VOID, dtype=">i4").tobytes()) + fid.write(np.array(0, dtype=">i4").tobytes()) + next_ = FIFF.FIFFV_NEXT_NONE if last else FIFF.FIFFV_NEXT_SEQ + fid.write(np.array(next_, dtype=">i4").tobytes()) + + +INT32_MAX = 2147483647 + + +def write_int(fid, kind, data): + """Write a 32-bit integer tag to a fif file.""" + data_size = 4 + data = np.asarray(data) + if data.dtype.kind not in "uib" and data.size > 0: + raise TypeError( + f"Cannot safely write data kind {kind} with dtype {data.dtype} as int", + ) + max_val = data.max() if data.size > 0 else 0 + if max_val > INT32_MAX: + raise TypeError( + f"Value {max_val} exceeds maximum allowed ({INT32_MAX}) for tag {kind}" + ) + data = data.astype(">i4").T + _write(fid, data, kind, data_size, FIFF.FIFFT_INT, ">i4") + + +def write_double(fid, kind, data): + """Write a double-precision floating point tag to a fif file.""" + data_size = 8 + data = np.array(data, dtype=">f8").T + _write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, ">f8") + + +def write_float(fid, kind, data): + """Write a single-precision floating point tag to a fif file.""" + data_size = 4 + data = np.array(data, dtype=">f4").T + _write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, ">f4") + + +def write_dau_pack16(fid, kind, data): + """Write a dau_pack16 tag to a fif file.""" + data_size = 2 + data = np.array(data, dtype=">i2").T + _write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, ">i2") + + +def write_complex64(fid, kind, data): + """Write a 64 bit complex floating point tag to a fif file.""" + data_size = 8 + data = np.array(data, dtype=">c8").T + _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, ">c8") + + +def write_complex128(fid, kind, data): + """Write a 128 bit complex floating point tag to a fif file.""" + data_size = 16 + data = np.array(data, dtype=">c16").T + _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, ">c16") + + +def write_julian(fid, kind, data): + """Write a Julian-formatted date to a FIF file.""" + assert isinstance(data, datetime.date), type(data) + data_size = 4 + jd = _date_to_julian(data) + data = np.array(jd, dtype=">i4") + _write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, ">i4") + + +def write_string(fid, kind, data): + """Write a string tag.""" + try: + str_data = str(data).encode("latin1") + except UnicodeEncodeError: + str_data = str(data).encode("latin1", errors="xmlcharrefreplace") + data_size = len(str_data) # therefore compute size here + if data_size > 0: + _write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, ">S") + + +def write_name_list(fid, kind, data): + """Write a colon-separated list of names. + + Parameters + ---------- + data : list of strings + """ + write_string(fid, kind, ":".join(data)) + + +def write_name_list_sanitized(fid, kind, lst, name): + """Write a sanitized, colon-separated list of names.""" + write_string(fid, kind, _safe_name_list(lst, "write", name)) + + +def _safe_name_list(lst, operation, name): + if operation == "write": + assert isinstance(lst, list | tuple | np.ndarray), type(lst) + if any("{COLON}" in val for val in lst): + raise ValueError(f'The substring "{{COLON}}" in {name} not supported.') + return ":".join(val.replace(":", "{COLON}") for val in lst) + else: + # take a sanitized string and return a list of strings + assert operation == "read" + assert lst is None or isinstance(lst, str) + if not lst: # None or empty string + return [] + return [val.replace("{COLON}", ":") for val in lst.split(":")] + + +def write_float_matrix(fid, kind, mat): + """Write a single-precision floating-point matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_FLOAT) + + +def write_double_matrix(fid, kind, mat): + """Write a double-precision floating-point matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_DOUBLE) + + +def write_int_matrix(fid, kind, mat): + """Write integer 32 matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_INT) + + +def write_complex_float_matrix(fid, kind, mat): + """Write complex 64 matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_COMPLEX_FLOAT) + + +def write_complex_double_matrix(fid, kind, mat): + """Write complex 128 matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_COMPLEX_DOUBLE) + + +def _write_matrix_data(fid, kind, mat, data_type): + dtype = { + FIFF.FIFFT_FLOAT: ">f4", + FIFF.FIFFT_DOUBLE: ">f8", + FIFF.FIFFT_COMPLEX_FLOAT: ">c8", + FIFF.FIFFT_COMPLEX_DOUBLE: ">c16", + FIFF.FIFFT_INT: ">i4", + }[data_type] + dtype = np.dtype(dtype) + data_size = dtype.itemsize * mat.size + 4 * (mat.ndim + 1) + matrix_type = data_type | FIFF.FIFFT_MATRIX + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(matrix_type, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + fid.write(np.array(mat, dtype=dtype).tobytes()) + dims = np.empty(mat.ndim + 1, dtype=np.int32) + dims[: mat.ndim] = mat.shape[::-1] + dims[-1] = mat.ndim + fid.write(np.array(dims, dtype=">i4").tobytes()) + check_fiff_length(fid) + + +def get_machid(): + """Get (mostly) unique machine ID. + + Returns + ------- + ids : array (length 2, int32) + The machine identifier used in MNE. + """ + mac = f"{uuid.getnode():012x}".encode() # byte conversion for Py3 + mac = re.findall(b"..", mac) # split string + mac += [b"00", b"00"] # add two more fields + + # Convert to integer in reverse-order (for some reason) + from codecs import encode + + mac = b"".join([encode(h, "hex_codec") for h in mac[::-1]]) + ids = np.flipud(np.frombuffer(mac, np.int32, count=2)) + return ids + + +def get_new_file_id(): + """Create a new file ID tag.""" + secs, usecs = divmod(time.time(), 1.0) + secs, usecs = int(secs), int(usecs * 1e6) + return { + "machid": get_machid(), + "version": FIFF.FIFFC_VERSION, + "secs": secs, + "usecs": usecs, + } + + +def write_id(fid, kind, id_=None): + """Write fiff id.""" + id_ = _generate_meas_id() if id_ is None else id_ + + data_size = 5 * 4 # The id comprises five integers + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + + # Collect the bits together for one write + arr = np.array( + [id_["version"], id_["machid"][0], id_["machid"][1], id_["secs"], id_["usecs"]], + dtype=">i4", + ) + fid.write(arr.tobytes()) + + +def start_block(fid, kind): + """Write a FIFF_BLOCK_START tag.""" + write_int(fid, FIFF.FIFF_BLOCK_START, kind) + + +def end_block(fid, kind): + """Write a FIFF_BLOCK_END tag.""" + write_int(fid, FIFF.FIFF_BLOCK_END, kind) + + +def start_file(fname, id_=None, *, overwrite=True): + """Open a fif file for writing and writes the compulsory header tags. + + Parameters + ---------- + fname : path-like | fid + The name of the file to open. It is recommended + that the name ends with .fif or .fif.gz. Can also be an + already opened file. + id_ : dict | None + ID to use for the FIFF_FILE_ID. + """ + if _file_like(fname): + logger.debug(f"Writing using {type(fname)} I/O") + fid = fname + fid.seek(0) + else: + fname = _check_fname(fname, overwrite=overwrite) + fname = str(fname) + if op.splitext(fname)[1].lower() == ".gz": + logger.debug("Writing using gzip") + # defaults to compression level 9, which is barely smaller but much + # slower. 2 offers a good compromise. + fid = GzipFile(fname, "wb", compresslevel=2) + else: + logger.debug("Writing using normal I/O") + fid = open(fname, "wb") + # Write the compulsory items + write_id(fid, FIFF.FIFF_FILE_ID, id_) + write_int(fid, FIFF.FIFF_DIR_POINTER, -1) + write_int(fid, FIFF.FIFF_FREE_LIST, -1) + return fid + + +@contextmanager +def start_and_end_file(fname, id_=None, *, overwrite=True): + """Start and (if successfully written) close the file.""" + with start_file(fname, id_=id_, overwrite=overwrite) as fid: + yield fid + end_file(fid) # we only hit this line if the yield does not err + + +def check_fiff_length(fid, close=True): + """Ensure our file hasn't grown too large to work properly.""" + if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations + if close: + fid.close() + raise OSError( + "FIFF file exceeded 2GB limit, please split file, reduce" + " split_size (if possible), or save to a different " + "format" + ) + + +def end_file(fid): + """Write the closing tags to a fif file and closes the file.""" + write_nop(fid, last=True) + check_fiff_length(fid) + fid.close() + + +def write_coord_trans(fid, trans): + """Write a coordinate transformation structure.""" + data_size = 4 * 2 * 12 + 4 * 2 + fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + fid.write(np.array(trans["from"], dtype=">i4").tobytes()) + fid.write(np.array(trans["to"], dtype=">i4").tobytes()) + + # The transform... + rot = trans["trans"][:3, :3] + move = trans["trans"][:3, 3] + fid.write(np.array(rot, dtype=">f4").tobytes()) + fid.write(np.array(move, dtype=">f4").tobytes()) + + # ...and its inverse + trans_inv = np.linalg.inv(trans["trans"]) + rot = trans_inv[:3, :3] + move = trans_inv[:3, 3] + fid.write(np.array(rot, dtype=">f4").tobytes()) + fid.write(np.array(move, dtype=">f4").tobytes()) + + +def write_ch_info(fid, ch): + """Write a channel information record to a fif file.""" + data_size = 4 * 13 + 4 * 7 + 16 + + fid.write(np.array(FIFF.FIFF_CH_INFO, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + + # Start writing fiffChInfoRec + fid.write(np.array(ch["scanno"], dtype=">i4").tobytes()) + fid.write(np.array(ch["logno"], dtype=">i4").tobytes()) + fid.write(np.array(ch["kind"], dtype=">i4").tobytes()) + fid.write(np.array(ch["range"], dtype=">f4").tobytes()) + fid.write(np.array(ch["cal"], dtype=">f4").tobytes()) + fid.write(np.array(ch["coil_type"], dtype=">i4").tobytes()) + fid.write(np.array(ch["loc"], dtype=">f4").tobytes()) # writing 12 values + + # unit and unit multiplier + fid.write(np.array(ch["unit"], dtype=">i4").tobytes()) + fid.write(np.array(ch["unit_mul"], dtype=">i4").tobytes()) + + # Finally channel name + ch_name = ch["ch_name"][:15] + fid.write(np.array(ch_name, dtype=">c").tobytes()) + fid.write(b"\0" * (16 - len(ch_name))) + + +def write_dig_points(fid, dig, block=False, coord_frame=None, *, ch_names=None): + """Write a set of digitizer data points into a fif file.""" + if dig is not None: + data_size = 5 * 4 + if block: + start_block(fid, FIFF.FIFFB_ISOTRAK) + if coord_frame is not None: + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame) + for d in dig: + fid.write(np.array(FIFF.FIFF_DIG_POINT, ">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, ">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, ">i4").tobytes()) + # Start writing fiffDigPointRec + fid.write(np.array(d["kind"], ">i4").tobytes()) + fid.write(np.array(d["ident"], ">i4").tobytes()) + fid.write(np.array(d["r"][:3], ">f4").tobytes()) + if ch_names is not None: + write_name_list_sanitized( + fid, FIFF.FIFF_MNE_CH_NAME_LIST, ch_names, "ch_names" + ) + if block: + end_block(fid, FIFF.FIFFB_ISOTRAK) + + +def write_float_sparse_rcs(fid, kind, mat): + """Write a single-precision sparse compressed row matrix tag.""" + return write_float_sparse(fid, kind, mat, fmt="csr") + + +def write_float_sparse(fid, kind, mat, fmt="auto"): + """Write a single-precision floating-point sparse matrix tag.""" + if fmt == "auto": + fmt = "csr" if isinstance(mat, csr_array) else "csc" + need = csr_array if fmt == "csr" else csc_array + matrix_type = getattr(FIFF, f"FIFFT_SPARSE_{fmt[-1].upper()}CS_MATRIX") + _validate_type(mat, need, "sparse") + matrix_type = matrix_type | FIFF.FIFFT_MATRIX | FIFF.FIFFT_FLOAT + nnzm = mat.nnz + nrow = mat.shape[0] + data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4 + + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(matrix_type, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + + fid.write(np.array(mat.data, dtype=">f4").tobytes()) + fid.write(np.array(mat.indices, dtype=">i4").tobytes()) + fid.write(np.array(mat.indptr, dtype=">i4").tobytes()) + + dims = [nnzm, mat.shape[0], mat.shape[1], 2] + fid.write(np.array(dims, dtype=">i4").tobytes()) + check_fiff_length(fid) + + +def _generate_meas_id(): + """Generate a new meas_id dict.""" + id_ = dict() + id_["version"] = FIFF.FIFFC_VERSION + id_["machid"] = get_machid() + id_["secs"], id_["usecs"] = DATE_NONE + return id_ diff --git a/mne/_freesurfer.py b/mne/_freesurfer.py index 64442166d5c..9b4ec3d4a74 100644 --- a/mne/_freesurfer.py +++ b/mne/_freesurfer.py @@ -1,54 +1,71 @@ -# -*- coding: utf-8 -*- """Freesurfer handling functions.""" -# Authors: Alex Rockhill -# Eric Larson -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os.path as op -import numpy as np from gzip import GzipFile from pathlib import Path -from .bem import _bem_find_surface, read_bem_surfaces -from .io.constants import FIFF -from .io.meas_info import read_fiducials -from .transforms import (apply_trans, invert_transform, combine_transforms, - _ensure_trans, read_ras_mni_t, Transform) -from .surface import read_surface, _read_mri_surface -from .utils import (verbose, _validate_type, _check_fname, _check_option, - get_subjects_dir, _import_nibabel, logger) +import numpy as np + +from ._fiff.constants import FIFF +from ._fiff.meas_info import read_fiducials +from .surface import _read_mri_surface, read_surface +from .transforms import ( + Transform, + _ensure_trans, + apply_trans, + combine_transforms, + invert_transform, + read_ras_mni_t, +) +from .utils import ( + _check_fname, + _check_option, + _import_nibabel, + _validate_type, + get_subjects_dir, + logger, + verbose, +) def _check_subject_dir(subject, subjects_dir): """Check that the Freesurfer subject directory is as expected.""" subjects_dir = Path(get_subjects_dir(subjects_dir, raise_error=True)) - for img_name in ('T1', 'brain', 'aseg'): + for img_name in ("T1", "brain", "aseg"): if not (subjects_dir / subject / "mri" / f"{img_name}.mgz").is_file(): - raise ValueError('Freesurfer recon-all subject folder ' - 'is incorrect or improperly formatted, ' - f'got {subjects_dir / subject}') + raise ValueError( + "Freesurfer recon-all subject folder " + "is incorrect or improperly formatted, " + f"got {subjects_dir / subject}" + ) return subjects_dir / subject def _get_aseg(aseg, subject, subjects_dir): """Check that the anatomical segmentation file exists and load it.""" - nib = _import_nibabel('load aseg') + nib = _import_nibabel("load aseg") subjects_dir = Path(get_subjects_dir(subjects_dir, raise_error=True)) - if not aseg.endswith('aseg'): - raise RuntimeError( - f'`aseg` file path must end with "aseg", got {aseg}') - aseg = _check_fname( - subjects_dir / subject / "mri" / (aseg + ".mgz"), - overwrite="read", - must_exist=True, - ) + if aseg == "auto": # use aparc+aseg if auto + aseg = _check_fname( + subjects_dir / subject / "mri" / "aparc+aseg.mgz", + overwrite="read", + must_exist=False, + ) + if not aseg: # if doesn't exist use wmparc + aseg = subjects_dir / subject / "mri" / "wmparc.mgz" + else: + aseg = subjects_dir / subject / "mri" / f"{aseg}.mgz" + _check_fname(aseg, overwrite="read", must_exist=True) aseg = nib.load(aseg) aseg_data = np.array(aseg.dataobj) return aseg, aseg_data -def _reorient_image(img, axcodes='RAS'): +def _reorient_image(img, axcodes="RAS"): """Reorient an image to a given orientation. Parameters @@ -70,11 +87,12 @@ def _reorient_image(img, axcodes='RAS'): ----- .. versionadded:: 0.24 """ - nib = _import_nibabel('reorient MRI image') + nib = _import_nibabel("reorient MRI image") orig_data = np.array(img.dataobj).astype(np.float32) # reorient data to RAS ornt = nib.orientations.axcodes2ornt( - nib.orientations.aff2axcodes(img.affine)).astype(int) + nib.orientations.aff2axcodes(img.affine) + ).astype(int) ras_ornt = nib.orientations.axcodes2ornt(axcodes) ornt_trans = nib.orientations.ornt_transform(ornt, ras_ornt) img_data = nib.orientations.apply_orientation(orig_data, ornt_trans) @@ -106,7 +124,7 @@ def _mri_orientation(orientation): .. versionadded:: 0.21 .. versionchanged:: 0.24 """ - _check_option('orientation', orientation, ('coronal', 'axial', 'sagittal')) + _check_option("orientation", orientation, ("coronal", "axial", "sagittal")) axis = dict(coronal=1, axial=2, sagittal=0)[orientation] x, y = sorted(set([0, 1, 2]).difference(set([axis]))) return axis, x, y @@ -115,72 +133,81 @@ def _mri_orientation(orientation): def _get_mri_info_data(mri, data): # Read the segmentation data using nibabel if data: - _import_nibabel('load MRI atlas data') + _import_nibabel("load MRI atlas data") out = dict() - _, out['vox_mri_t'], out['mri_ras_t'], dims, _, mgz = _read_mri_info( - mri, return_img=True) + _, out["vox_mri_t"], out["mri_ras_t"], dims, _, mgz = _read_mri_info( + mri, return_img=True + ) out.update( - mri_width=dims[0], mri_height=dims[1], - mri_depth=dims[1], mri_volume_name=mri) + mri_width=dims[0], mri_height=dims[1], mri_depth=dims[1], mri_volume_name=mri + ) if data: assert mgz is not None - out['mri_vox_t'] = invert_transform(out['vox_mri_t']) - out['data'] = np.asarray(mgz.dataobj) + out["mri_vox_t"] = invert_transform(out["vox_mri_t"]) + out["data"] = np.asarray(mgz.dataobj) return out def _get_mgz_header(fname): """Adapted from nibabel to quickly extract header info.""" - fname = _check_fname(fname, overwrite='read', must_exist=True, - name='MRI image') + fname = _check_fname(fname, overwrite="read", must_exist=True, name="MRI image") if fname.suffix != ".mgz": - raise IOError('Filename must end with .mgz') - header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)), - ('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'), - ('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)), - ('Pxyz_c', '>f4', (3,))] + raise OSError("Filename must end with .mgz") + header_dtd = [ + ("version", ">i4"), + ("dims", ">i4", (4,)), + ("type", ">i4"), + ("dof", ">i4"), + ("goodRASFlag", ">i2"), + ("delta", ">f4", (3,)), + ("Mdc", ">f4", (3, 3)), + ("Pxyz_c", ">f4", (3,)), + ] header_dtype = np.dtype(header_dtd) - with GzipFile(fname, 'rb') as fid: + with GzipFile(fname, "rb") as fid: hdr_str = fid.read(header_dtype.itemsize) - header = np.ndarray(shape=(), dtype=header_dtype, - buffer=hdr_str) + header = np.ndarray(shape=(), dtype=header_dtype, buffer=hdr_str) # dims - dims = header['dims'].astype(int) + dims = header["dims"].astype(int) dims = dims[:3] if len(dims) == 4 else dims # vox2ras_tkr - delta = header['delta'] + delta = header["delta"] ds = np.array(delta, float) ns = np.array(dims * ds) / 2.0 - v2rtkr = np.array([[-ds[0], 0, 0, ns[0]], - [0, 0, ds[2], -ns[2]], - [0, -ds[1], 0, ns[1]], - [0, 0, 0, 1]], dtype=np.float32) + v2rtkr = np.array( + [ + [-ds[0], 0, 0, ns[0]], + [0, 0, ds[2], -ns[2]], + [0, -ds[1], 0, ns[1]], + [0, 0, 0, 1], + ], + dtype=np.float32, + ) # ras2vox d = np.diag(delta) pcrs_c = dims / 2.0 - Mdc = header['Mdc'].T - pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c)) + Mdc = header["Mdc"].T + pxyz_0 = header["Pxyz_c"] - np.dot(Mdc, np.dot(d, pcrs_c)) M = np.eye(4, 4) M[0:3, 0:3] = np.dot(Mdc, d) M[0:3, 3] = pxyz_0.T - header = dict(dims=dims, vox2ras_tkr=v2rtkr, vox2ras=M, - zooms=header['delta']) + header = dict(dims=dims, vox2ras_tkr=v2rtkr, vox2ras=M, zooms=header["delta"]) return header def _get_atlas_values(vol_info, rr): # Transform MRI coordinates (where our surfaces live) to voxels - rr_vox = apply_trans(vol_info['mri_vox_t'], rr) - good = ((rr_vox >= -.5) & - (rr_vox < np.array(vol_info['data'].shape, int) - 0.5)).all(-1) + rr_vox = apply_trans(vol_info["mri_vox_t"], rr) + good = ( + (rr_vox >= -0.5) & (rr_vox < np.array(vol_info["data"].shape, int) - 0.5) + ).all(-1) idx = np.round(rr_vox[good].T).astype(np.int64) values = np.full(rr.shape[0], np.nan) - values[good] = vol_info['data'][tuple(idx)] + values[good] = vol_info["data"][tuple(idx)] return values -def get_volume_labels_from_aseg(mgz_fname, return_colors=False, - atlas_ids=None): +def get_volume_labels_from_aseg(mgz_fname, return_colors=False, atlas_ids=None): """Return a list of names and colors of segmented volumes. Parameters @@ -215,7 +242,7 @@ def get_volume_labels_from_aseg(mgz_fname, return_colors=False, .. versionadded:: 0.9.0 """ - nib = _import_nibabel('load MRI atlas data') + nib = _import_nibabel("load MRI atlas data") mgz_fname = _check_fname( mgz_fname, overwrite="read", must_exist=True, name="mgz_fname" ) @@ -225,12 +252,13 @@ def get_volume_labels_from_aseg(mgz_fname, return_colors=False, if atlas_ids is None: atlas_ids, colors = read_freesurfer_lut() elif return_colors: - raise ValueError('return_colors must be False if atlas_ids are ' - 'provided') + raise ValueError("return_colors must be False if atlas_ids are provided") # restrict to the ones in the MRI, sorted by label name - keep = np.in1d(list(atlas_ids.values()), want) - keys = sorted((key for ki, key in enumerate(atlas_ids.keys()) if keep[ki]), - key=lambda x: atlas_ids[x]) + keep = np.isin(list(atlas_ids.values()), want) + keys = sorted( + (key for ki, key in enumerate(atlas_ids.keys()) if keep[ki]), + key=lambda x: atlas_ids[x], + ) if return_colors: colors = [colors[k] for k in keys] out = keys, colors @@ -244,8 +272,16 @@ def get_volume_labels_from_aseg(mgz_fname, return_colors=False, @verbose -def head_to_mri(pos, subject, mri_head_t, subjects_dir=None, *, - kind='mri', unscale=False, verbose=None): +def head_to_mri( + pos, + subject, + mri_head_t, + subjects_dir=None, + *, + kind="mri", + unscale=False, + verbose=None, +): """Convert pos from head coordinate system to MRI ones. Parameters @@ -280,23 +316,24 @@ def head_to_mri(pos, subject, mri_head_t, subjects_dir=None, *, This function requires nibabel. """ from .coreg import read_mri_cfg - _validate_type(kind, str, 'kind') - _check_option('kind', kind, ('ras', 'mri')) + + _validate_type(kind, str, "kind") + _check_option("kind", kind, ("ras", "mri")) subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) t1_fname = subjects_dir / subject / "mri" / "T1.mgz" - head_mri_t = _ensure_trans(mri_head_t, 'head', 'mri') - if kind == 'ras': + head_mri_t = _ensure_trans(mri_head_t, "head", "mri") + if kind == "ras": _, _, mri_ras_t, _, _ = _read_mri_info(t1_fname) - head_ras_t = combine_transforms(head_mri_t, mri_ras_t, 'head', 'ras') + head_ras_t = combine_transforms(head_mri_t, mri_ras_t, "head", "ras") head_dest_t = head_ras_t else: - assert kind == 'mri' + assert kind == "mri" head_dest_t = head_mri_t pos_dest = apply_trans(head_dest_t, pos) # unscale if requested if unscale: params = read_mri_cfg(subject, subjects_dir) - pos_dest /= params['scale'] + pos_dest /= params["scale"] pos_dest *= 1e3 # mm return pos_dest @@ -304,6 +341,7 @@ def head_to_mri(pos, subject, mri_head_t, subjects_dir=None, *, ############################################################################## # Surface to MNI conversion + @verbose def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, verbose=None): """Convert the array of vertices for a hemisphere to MNI coordinates. @@ -333,33 +371,30 @@ def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, verbose=None): hemis = [hemis] * len(vertices) if not len(hemis) == len(vertices): - raise ValueError('hemi and vertices must match in length') + raise ValueError("hemi and vertices must match in length") subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - surfs = [ - subjects_dir / subject / "surf" / f"{h}.white" - for h in ["lh", "rh"] - ] + surfs = [subjects_dir / subject / "surf" / f"{h}.white" for h in ["lh", "rh"]] # read surface locations in MRI space rr = [read_surface(s)[0] for s in surfs] # take point locations in MRI space and convert to MNI coordinates xfm = read_talxfm(subject, subjects_dir) - xfm['trans'][:3, 3] *= 1000. # m->mm + xfm["trans"][:3, 3] *= 1000.0 # m->mm data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)]) if singleton: data = data[0] - return apply_trans(xfm['trans'], data) + return apply_trans(xfm["trans"], data) ############################################################################## # Volume to MNI conversion + @verbose -def head_to_mni(pos, subject, mri_head_t, subjects_dir=None, - verbose=None): +def head_to_mni(pos, subject, mri_head_t, subjects_dir=None, verbose=None): """Convert pos from head coordinate system to MNI ones. Parameters @@ -385,9 +420,12 @@ def head_to_mni(pos, subject, mri_head_t, subjects_dir=None, # before we go from head to MRI (surface RAS) head_mni_t = combine_transforms( - _ensure_trans(mri_head_t, 'head', 'mri'), - read_talxfm(subject, subjects_dir), 'head', 'mni_tal') - return apply_trans(head_mni_t, pos) * 1000. + _ensure_trans(mri_head_t, "head", "mri"), + read_talxfm(subject, subjects_dir), + "head", + "mni_tal", + ) + return apply_trans(head_mni_t, pos) * 1000.0 @verbose @@ -425,20 +463,17 @@ def get_mni_fiducials(subject, subjects_dir=None, verbose=None): # transformation, and/or project the points onto the head surface # (if available). fname_fids_fs = ( - Path(__file__).parent - / "data" - / "fsaverage" - / "fsaverage-fiducials.fif" + Path(__file__).parent / "data" / "fsaverage" / "fsaverage-fiducials.fif" ) # Read fsaverage fiducials file and subject Talairach. fids, coord_frame = read_fiducials(fname_fids_fs) assert coord_frame == FIFF.FIFFV_COORD_MRI - if subject == 'fsaverage': + if subject == "fsaverage": return fids # special short-circuit for fsaverage mni_mri_t = invert_transform(read_talxfm(subject, subjects_dir)) for f in fids: - f['r'] = apply_trans(mni_mri_t, f['r']) + f["r"] = apply_trans(mni_mri_t, f["r"]) return fids @@ -460,38 +495,27 @@ def estimate_head_mri_t(subject, subjects_dir=None, verbose=None): ------- %(trans_not_none)s """ - from .channels.montage import make_dig_montage, compute_native_head_t + from .channels.montage import compute_native_head_t, make_dig_montage subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) lpa, nasion, rpa = get_mni_fiducials(subject, subjects_dir) - montage = make_dig_montage(lpa=lpa['r'], nasion=nasion['r'], rpa=rpa['r'], - coord_frame='mri') + montage = make_dig_montage( + lpa=lpa["r"], nasion=nasion["r"], rpa=rpa["r"], coord_frame="mri" + ) return invert_transform(compute_native_head_t(montage)) -def _ensure_image_in_surface_RAS(image, subject, subjects_dir): - """Check if the image is in Freesurfer surface RAS space.""" - nib = _import_nibabel('load a volume image') - if not isinstance(image, nib.spatialimages.SpatialImage): - image = nib.load(image) - image = nib.MGHImage(image.dataobj.astype(np.float32), image.affine) - fs_img = nib.load(op.join(subjects_dir, subject, 'mri', 'brain.mgz')) - if not np.allclose(image.affine, fs_img.affine, atol=1e-6): - raise RuntimeError('The `image` is not aligned to Freesurfer ' - 'surface RAS space. This space is required as ' - 'it is the space where the anatomical ' - 'segmentation and reconstructed surfaces are') - return image # returns MGH image for header - - def _get_affine_from_lta_info(lines): """Get the vox2ras affine from lta file info.""" - volume_data = np.loadtxt( - [line.split('=')[1] for line in lines]) + volume_data = np.loadtxt([line.split("=")[1] for line in lines]) # get the size of the volume (number of voxels), slice resolution. # the matrix of directional cosines and the ras at the center of the bore - dims, deltas, dir_cos, center_ras = \ - volume_data[0], volume_data[1], volume_data[2:5], volume_data[5] + dims, deltas, dir_cos, center_ras = ( + volume_data[0], + volume_data[1], + volume_data[2:5], + volume_data[5], + ) dir_cos_delta = dir_cos.T * deltas vol_center = (dir_cos_delta @ dims[:3]) / 2 affine = np.eye(4) @@ -515,11 +539,11 @@ def read_lta(fname, verbose=None): affine : ndarray The affine transformation described by the lta file. """ - _check_fname(fname, 'read', must_exist=True) - with open(fname, 'r') as fid: + _check_fname(fname, "read", must_exist=True) + with open(fname) as fid: lines = fid.readlines() # 0 is linear vox2vox, 1 is linear ras2ras - trans_type = int(lines[0].split('=')[1].strip()[0]) + trans_type = int(lines[0].split("=")[1].strip()[0]) assert trans_type in (0, 1) affine = np.loadtxt(lines[5:9]) if trans_type == 1: @@ -557,7 +581,7 @@ def read_talxfm(subject, subjects_dir=None, verbose=None): subjects_dir = get_subjects_dir(subjects_dir) # Setup the RAS to MNI transform ras_mni_t = read_ras_mni_t(subject, subjects_dir) - ras_mni_t['trans'][:3, 3] /= 1000. # mm->m + ras_mni_t["trans"][:3, 3] /= 1000.0 # mm->m # We want to get from Freesurfer surface RAS ('mri') to MNI ('mni_tal'). # This file only gives us RAS (non-zero origin) ('ras') to MNI ('mni_tal'). @@ -569,33 +593,39 @@ def read_talxfm(subject, subjects_dir=None, verbose=None): if not path.is_file(): path = subjects_dir / subject / "mri" / "T1.mgz" if not path.is_file(): - raise IOError('mri not found: %s' % path) + raise OSError(f"mri not found: {path}") _, _, mri_ras_t, _, _ = _read_mri_info(path) - mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal') + mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, "mri", "mni_tal") return mri_mni_t -def _check_mri(mri, subject, subjects_dir): +def _check_mri(mri, subject, subjects_dir) -> str: """Check whether an mri exists in the Freesurfer subject directory.""" - _validate_type(mri, 'path-like', 'mri') - if op.isfile(mri) and op.basename(mri) != mri: - return mri - if not op.isfile(mri): + _validate_type(mri, "path-like", mri) + mri = Path(mri) + if mri.is_file() and mri.name != mri: + return str(mri) + elif not mri.is_file(): if subject is None: raise FileNotFoundError( - f'MRI file {mri!r} not found and no subject provided') - subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) - mri = op.join(subjects_dir, subject, 'mri', mri) - if not op.isfile(mri): - raise FileNotFoundError(f'MRI file {mri!r} not found') - if op.basename(mri) == mri: - err = (f'Ambiguous filename - found {mri!r} in current folder.\n' - 'If this is correct prefix name with relative or absolute path') - raise IOError(err) - return mri - - -def _read_mri_info(path, units='m', return_img=False, use_nibabel=False): + f"MRI file {mri!r} not found and no subject provided." + ) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + mri = subjects_dir / subject / "mri" / mri + if not mri.is_file(): + raise FileNotFoundError( + f"MRI file {mri!r} not found in the subjects directory " + f"{subjects_dir!r} for subject {subject}." + ) + if mri.name == mri: + raise OSError( + f"Ambiguous filename - found {mri!r} in current folder. " + "If this is correct prefix name with relative or absolute path." + ) + return str(mri) + + +def _read_mri_info(path, units="m", return_img=False, use_nibabel=False): # This is equivalent but 100x slower, so only use nibabel if we need to # (later): if use_nibabel: @@ -607,29 +637,28 @@ def _read_mri_info(path, units='m', return_img=False, use_nibabel=False): zooms = hdr.get_zooms()[:3] else: hdr = _get_mgz_header(path) - n_orig = hdr['vox2ras'] - t_orig = hdr['vox2ras_tkr'] - dims = hdr['dims'] - zooms = hdr['zooms'] + n_orig = hdr["vox2ras"] + t_orig = hdr["vox2ras_tkr"] + dims = hdr["dims"] + zooms = hdr["zooms"] # extract the MRI_VOXEL to RAS (non-zero origin) transform - vox_ras_t = Transform('mri_voxel', 'ras', n_orig) + vox_ras_t = Transform("mri_voxel", "ras", n_orig) # extract the MRI_VOXEL to MRI transform - vox_mri_t = Transform('mri_voxel', 'mri', t_orig) + vox_mri_t = Transform("mri_voxel", "mri", t_orig) # construct the MRI to RAS (non-zero origin) transform - mri_ras_t = combine_transforms( - invert_transform(vox_mri_t), vox_ras_t, 'mri', 'ras') + mri_ras_t = combine_transforms(invert_transform(vox_mri_t), vox_ras_t, "mri", "ras") - assert units in ('m', 'mm') - if units == 'm': + assert units in ("m", "mm") + if units == "m": conv = np.array([[1e-3, 1e-3, 1e-3, 1]]).T # scaling and translation terms - vox_ras_t['trans'] *= conv - vox_mri_t['trans'] *= conv + vox_ras_t["trans"] *= conv + vox_mri_t["trans"] *= conv # just the translation term - mri_ras_t['trans'][:, 3:4] *= conv + mri_ras_t["trans"][:, 3:4] *= conv out = (vox_ras_t, vox_mri_t, mri_ras_t, dims, zooms) if return_img: @@ -654,8 +683,8 @@ def read_freesurfer_lut(fname=None): Mapping from label names to colors. """ lut = _get_lut(fname) - names, ids = lut['name'], lut['id'] - colors = np.array([lut['R'], lut['G'], lut['B'], lut['A']], float).T + names, ids = lut["name"], lut["id"] + colors = np.array([lut["R"], lut["G"], lut["B"], lut["A"]], float).T atlas_ids = dict(zip(names, ids)) colors = dict(zip(names, colors)) return atlas_ids, colors @@ -665,22 +694,29 @@ def _get_lut(fname=None): """Get a FreeSurfer LUT.""" if fname is None: fname = Path(__file__).parent / "data" / "FreeSurferColorLUT.txt" - _check_fname(fname, 'read', must_exist=True) - dtype = [('id', ' 0 + assert len(lut["name"]) > 0 + lut["name"] = [str(name) for name in lut["name"]] return lut @@ -710,44 +746,52 @@ def _get_head_surface(surf, subject, subjects_dir, bem=None, verbose=None): ----- .. versionadded: 0.24 """ - _check_option( - 'surf', surf, ('auto', 'head', 'outer_skin', 'head-dense', 'seghead')) - if surf in ('auto', 'head', 'outer_skin'): + from .bem import _bem_find_surface, read_bem_surfaces + + _check_option("surf", surf, ("auto", "head", "outer_skin", "head-dense", "seghead")) + if surf in ("auto", "head", "outer_skin"): if bem is not None: try: - return _bem_find_surface(bem, 'head') + return _bem_find_surface(bem, "head") except RuntimeError: - logger.info('Could not find the surface for ' - 'head in the provided BEM model, ' - 'looking in the subject directory.') + logger.info( + "Could not find the surface for " + "head in the provided BEM model, " + "looking in the subject directory." + ) if subject is None: - if surf == 'auto': + if surf == "auto": return - raise ValueError('To plot the head surface, the BEM/sphere' - ' model must contain a head surface ' - 'or "subject" must be provided (got ' - 'None)') - subject_dir = op.join( - get_subjects_dir(subjects_dir, raise_error=True), subject) - if surf in ('head-dense', 'seghead'): - try_fnames = [op.join(subject_dir, 'bem', f'{subject}-head-dense.fif'), - op.join(subject_dir, 'surf', 'lh.seghead')] + raise ValueError( + "To plot the head surface, the BEM/sphere" + " model must contain a head surface " + 'or "subject" must be provided (got ' + "None)" + ) + subject_dir = op.join(get_subjects_dir(subjects_dir, raise_error=True), subject) + if surf in ("head-dense", "seghead"): + try_fnames = [ + op.join(subject_dir, "bem", f"{subject}-head-dense.fif"), + op.join(subject_dir, "surf", "lh.seghead"), + ] else: try_fnames = [ - op.join(subject_dir, 'bem', 'outer_skin.surf'), - op.join(subject_dir, 'bem', 'flash', 'outer_skin.surf'), - op.join(subject_dir, 'bem', f'{subject}-head-sparse.fif'), - op.join(subject_dir, 'bem', f'{subject}-head.fif'), + op.join(subject_dir, "bem", "outer_skin.surf"), + op.join(subject_dir, "bem", "flash", "outer_skin.surf"), + op.join(subject_dir, "bem", f"{subject}-head-sparse.fif"), + op.join(subject_dir, "bem", f"{subject}-head.fif"), ] for fname in try_fnames: if op.exists(fname): - logger.info(f'Using {op.basename(fname)} for head surface.') - if op.splitext(fname)[-1] == '.fif': - return read_bem_surfaces(fname, on_defects='warn')[0] + logger.info(f"Using {op.basename(fname)} for head surface.") + if op.splitext(fname)[-1] == ".fif": + return read_bem_surfaces(fname, on_defects="warn")[0] else: return _read_mri_surface(fname) - raise IOError('No head surface found for subject ' - f'{subject} after trying:\n' + '\n'.join(try_fnames)) + raise OSError( + "No head surface found for subject " + f"{subject} after trying:\n" + "\n".join(try_fnames) + ) @verbose @@ -775,31 +819,36 @@ def _get_skull_surface(surf, subject, subjects_dir, bem=None, verbose=None): ----- .. versionadded: 0.24 """ + from .bem import _bem_find_surface + if bem is not None: try: - return _bem_find_surface(bem, surf + '_skull') + return _bem_find_surface(bem, surf + "_skull") except RuntimeError: - logger.info('Could not find the surface for ' - 'skull in the provided BEM model, ' - 'looking in the subject directory.') + logger.info( + "Could not find the surface for " + "skull in the provided BEM model, " + "looking in the subject directory." + ) subjects_dir = Path(get_subjects_dir(subjects_dir, raise_error=True)) fname = _check_fname( subjects_dir / subject / "bem" / (surf + "_skull.surf"), overwrite="read", must_exist=True, - name=f"{surf} skull surface" + name=f"{surf} skull surface", ) return _read_mri_surface(fname) def _estimate_talxfm_rigid(subject, subjects_dir): - from .coreg import fit_matched_points, _trans_from_params + from .coreg import _trans_from_params, fit_matched_points + xfm = read_talxfm(subject, subjects_dir) # XYZ+origin + halfway pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5]) pts_subj = apply_trans(invert_transform(xfm), pts_tal) # we fit with scaling enabled, but then discard it (we just need # the rigid-body components) - params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params') + params = fit_matched_points(pts_subj, pts_tal, scale=3, out="params") rigid = _trans_from_params((True, True, False), params[:6]) return rigid diff --git a/mne/_ola.py b/mne/_ola.py index d1ab34c235f..939aff39646 100644 --- a/mne/_ola.py +++ b/mne/_ola.py @@ -1,17 +1,17 @@ -# -*- coding: utf-8 -*- -# Authors: Eric Larson - +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np +from scipy.signal import get_window -from .utils import _ensure_int, verbose, logger - +from .utils import _ensure_int, _validate_type, logger, verbose ############################################################################### # Class for interpolation between adjacent points -class _Interp2(object): + +class _Interp2: r"""Interpolate between two points. Parameters @@ -42,56 +42,59 @@ class _Interp2(object): """ - def __init__(self, control_points, values, interp='hann'): + def __init__(self, control_points, values, interp="hann", *, name="Interp2"): # set up interpolation self.control_points = np.array(control_points, int).ravel() - if not np.array_equal(np.unique(self.control_points), - self.control_points): - raise ValueError('Control points must be sorted and unique') + if not np.array_equal(np.unique(self.control_points), self.control_points): + raise ValueError("Control points must be sorted and unique") if len(self.control_points) == 0: - raise ValueError('Must be at least one control point') + raise ValueError("Must be at least one control point") if not (self.control_points >= 0).all(): - raise ValueError('All control points must be positive (got %s)' - % (self.control_points[:3],)) + raise ValueError( + f"All control points must be positive (got {self.control_points[:3]})" + ) if isinstance(values, np.ndarray): values = [values] - if isinstance(values, (list, tuple)): + if isinstance(values, list | tuple): for v in values: if not (v is None or isinstance(v, np.ndarray)): - raise TypeError('All entries in "values" must be ndarray ' - 'or None, got %s' % (type(v),)) + raise TypeError( + 'All entries in "values" must be ndarray or None, got ' + f"{type(v)}" + ) if v is not None and v.shape[0] != len(self.control_points): - raise ValueError('Values, if provided, must be the same ' - 'length as the number of control points ' - '(%s), got %s' - % (len(self.control_points), v.shape[0])) + raise ValueError( + "Values, if provided, must be the same length as the number of " + f"control points ({len(self.control_points)}), got {v.shape[0]}" + ) use_values = values def val(pt): idx = np.where(control_points == pt)[0][0] return [v[idx] if v is not None else None for v in use_values] + values = val self.values = values self.n_last = None self._position = 0 # start at zero self._left_idx = 0 self._left = self._right = self._use_interp = None - known_types = ('cos2', 'linear', 'zero', 'hann') + self.name = name + known_types = ("cos2", "linear", "zero", "hann") if interp not in known_types: - raise ValueError('interp must be one of %s, got "%s"' - % (known_types, interp)) + raise ValueError(f'interp must be one of {known_types}, got "{interp}"') self._interp = interp def feed_generator(self, n_pts): """Feed data and get interpolators as a generator.""" self.n_last = 0 - n_pts = _ensure_int(n_pts, 'n_pts') + n_pts = _ensure_int(n_pts, "n_pts") original_position = self._position stop = self._position + n_pts - logger.debug('Feed %s (%s-%s)' % (n_pts, self._position, stop)) + logger.debug(f" ~ {self.name} Feed {n_pts} ({self._position}-{stop})") used = np.zeros(n_pts, bool) if self._left is None: # first one - logger.debug(' Eval @ %s (%s)' % (0, self.control_points[0])) + logger.debug(f" ~ {self.name} Eval @ 0 ({self.control_points[0]})") self._left = self.values(self.control_points[0]) if len(self.control_points) == 1: self._right = self._left @@ -99,9 +102,8 @@ def feed_generator(self, n_pts): # Left zero-order hold condition if self._position < self.control_points[self._left_idx]: - n_use = min(self.control_points[self._left_idx] - self._position, - n_pts) - logger.debug(' Left ZOH %s' % n_use) + n_use = min(self.control_points[self._left_idx] - self._position, n_pts) + logger.debug(f" ~ {self.name} Left ZOH {n_use}") this_sl = slice(None, n_use) assert used[this_sl].size == n_use assert not used[this_sl].any() @@ -126,35 +128,38 @@ def feed_generator(self, n_pts): self._left_idx += 1 self._use_interp = None # need to recreate it eval_pt = self.control_points[self._left_idx + 1] - logger.debug(' Eval @ %s (%s)' - % (self._left_idx + 1, eval_pt)) + logger.debug( + f" ~ {self.name} Eval @ {self._left_idx + 1} ({eval_pt})" + ) self._right = self.values(eval_pt) assert self._right is not None left_point = self.control_points[self._left_idx] right_point = self.control_points[self._left_idx + 1] if self._use_interp is None: interp_span = right_point - left_point - if self._interp == 'zero': + if self._interp == "zero": self._use_interp = None - elif self._interp == 'linear': - self._use_interp = np.linspace(1., 0., interp_span, - endpoint=False) + elif self._interp == "linear": + self._use_interp = np.linspace( + 1.0, 0.0, interp_span, endpoint=False + ) else: # self._interp in ('cos2', 'hann'): self._use_interp = np.cos( - np.linspace(0, np.pi / 2., interp_span, - endpoint=False)) + np.linspace(0, np.pi / 2.0, interp_span, endpoint=False) + ) self._use_interp *= self._use_interp n_use = min(stop, right_point) - self._position if n_use > 0: - logger.debug(' Interp %s %s (%s-%s)' % (self._interp, n_use, - left_point, right_point)) + logger.debug( + f" ~ {self.name} Interp {self._interp} {n_use} " + f"({left_point}-{right_point})" + ) interp_start = self._position - left_point assert interp_start >= 0 if self._use_interp is None: this_interp = None else: - this_interp = \ - self._use_interp[interp_start:interp_start + n_use] + this_interp = self._use_interp[interp_start : interp_start + n_use] assert this_interp.size == n_use this_sl = slice(n_used, n_used + n_use) assert used[this_sl].size == n_use @@ -168,7 +173,7 @@ def feed_generator(self, n_pts): if self.control_points[self._left_idx] <= self._position: n_use = stop - self._position if n_use > 0: - logger.debug(' Right ZOH %s' % n_use) + logger.debug(f" ~ {self.name} Right ZOH %s" % n_use) this_sl = slice(n_pts - n_use, None) assert not used[this_sl].any() used[this_sl] = True @@ -188,16 +193,18 @@ def feed(self, n_pts): out_arrays = None for o in self.feed_generator(n_pts): if out_arrays is None: - out_arrays = [np.empty(v.shape + (n_pts,)) - if v is not None else None for v in o[1]] + out_arrays = [ + np.empty(v.shape + (n_pts,)) if v is not None else None + for v in o[1] + ] for ai, arr in enumerate(out_arrays): if arr is not None: if o[3] is None: arr[..., o[0]] = o[1][ai][..., np.newaxis] else: - arr[..., o[0]] = ( - o[1][ai][..., np.newaxis] * o[3] + - o[2][ai][..., np.newaxis] * (1. - o[3])) + arr[..., o[0]] = o[1][ai][..., np.newaxis] * o[3] + o[2][ai][ + ..., np.newaxis + ] * (1.0 - o[3]) assert out_arrays is not None return out_arrays @@ -207,14 +214,13 @@ def feed(self, n_pts): def _check_store(store): + _validate_type(store, (np.ndarray, list, tuple, _Storer), "store") if isinstance(store, np.ndarray): store = [store] - if isinstance(store, (list, tuple)) and all(isinstance(s, np.ndarray) - for s in store): + if not isinstance(store, _Storer): + if not all(isinstance(s, np.ndarray) for s in store): + raise TypeError("All instances must be ndarrays") store = _Storer(*store) - if not callable(store): - raise TypeError('store must be callable, got type %s' - % (type(store),)) return store @@ -226,10 +232,8 @@ class _COLA: process : callable A function that takes a chunk of input data with shape ``(n_channels, n_samples)`` and processes it. - store : callable | ndarray - A function that takes a completed chunk of output data. - Can also be an ``ndarray``, in which case it is treated as the - output data in which to store the results. + store : ndarray | list of ndarray | _Storer + The output data in which to store the results. n_total : int The total number of samples. n_samples : int @@ -262,54 +266,70 @@ class _COLA: """ @verbose - def __init__(self, process, store, n_total, n_samples, n_overlap, - sfreq, window='hann', tol=1e-10, *, verbose=None): - from scipy.signal import get_window - n_samples = _ensure_int(n_samples, 'n_samples') - n_overlap = _ensure_int(n_overlap, 'n_overlap') - n_total = _ensure_int(n_total, 'n_total') + def __init__( + self, + process, + store, + n_total, + n_samples, + n_overlap, + sfreq, + window="hann", + tol=1e-10, + *, + name="COLA", + verbose=None, + ): + n_samples = _ensure_int(n_samples, "n_samples") + n_overlap = _ensure_int(n_overlap, "n_overlap") + n_total = _ensure_int(n_total, "n_total") if n_samples <= 0: - raise ValueError('n_samples must be > 0, got %s' % (n_samples,)) + raise ValueError(f"n_samples must be > 0, got {n_samples}") if n_overlap < 0: - raise ValueError('n_overlap must be >= 0, got %s' % (n_overlap,)) + raise ValueError(f"n_overlap must be >= 0, got {n_overlap}") if n_total < 0: - raise ValueError('n_total must be >= 0, got %s' % (n_total,)) + raise ValueError(f"n_total must be >= 0, got {n_total}") self._n_samples = int(n_samples) self._n_overlap = int(n_overlap) del n_samples, n_overlap if n_total < self._n_samples: - raise ValueError('Number of samples per window (%d) must be at ' - 'most the total number of samples (%s)' - % (self._n_samples, n_total)) + raise ValueError( + f"Number of samples per window ({self._n_samples}) must be at " + f"most the total number of samples ({n_total})" + ) if not callable(process): - raise TypeError('process must be callable, got type %s' - % (type(process),)) + raise TypeError(f"process must be callable, got type {type(process)}") self._process = process self._step = self._n_samples - self._n_overlap self._store = _check_store(store) self._idx = 0 self._in_buffers = self._out_buffers = None + self.name = name # Create our window boundaries - window_name = window if isinstance(window, str) else 'custom' - self._window = get_window(window, self._n_samples, - fftbins=(self._n_samples - 1) % 2) - self._window /= _check_cola(self._window, self._n_samples, self._step, - window_name, tol=tol) + window_name = window if isinstance(window, str) else "custom" + self._window = get_window( + window, self._n_samples, fftbins=bool((self._n_samples - 1) % 2) + ) + self._window /= _check_cola( + self._window, self._n_samples, self._step, window_name, tol=tol + ) self.starts = np.arange(0, n_total - self._n_samples + 1, self._step) self.stops = self.starts + self._n_samples delta = n_total - self.stops[-1] self.stops[-1] = n_total sfreq = float(sfreq) - pl = 's' if len(self.starts) != 1 else '' - logger.info(' Processing %4d data chunk%s of (at least) %0.1f s ' - 'with %0.1f s overlap and %s windowing' - % (len(self.starts), pl, self._n_samples / sfreq, - self._n_overlap / sfreq, window_name)) + pl = "s" if len(self.starts) != 1 else "" + logger.info( + f" Processing {len(self.starts):4d} data chunk{pl} of (at least) " + f"{self._n_samples / sfreq:0.1f} s with " + f"{self._n_overlap / sfreq:0.1f} s overlap and {window_name} windowing" + ) del window, window_name if delta > 0: - logger.info(' The final %0.3f s will be lumped into the ' - 'final window' % (delta / sfreq,)) + logger.info( + f" The final {delta / sfreq} s will be lumped into the final window" + ) @property def _in_offset(self): @@ -323,115 +343,131 @@ def feed(self, *datas, verbose=None, **kwargs): if self._in_buffers is None: self._in_buffers = [None] * len(datas) if len(datas) != len(self._in_buffers): - raise ValueError('Got %d array(s), needed %d' - % (len(datas), len(self._in_buffers))) + raise ValueError( + f"Got {len(datas)} array(s), needed {len(self._in_buffers)}" + ) + current_offset = 0 # should be updated below for di, data in enumerate(datas): if not isinstance(data, np.ndarray) or data.ndim < 1: - raise TypeError('data entry %d must be an 2D ndarray, got %s' - % (di, type(data),)) + raise TypeError( + f"data entry {di} must be an 2D ndarray, got {type(data)}" + ) if self._in_buffers[di] is None: # In practice, users can give large chunks, so we use # dynamic allocation of the in buffer. We could save some # memory allocation by only ever processing max_len at once, # but this would increase code complexity. - self._in_buffers[di] = np.empty( - data.shape[:-1] + (0,), data.dtype) - if data.shape[:-1] != self._in_buffers[di].shape[:-1] or \ - self._in_buffers[di].dtype != data.dtype: - raise TypeError('data must dtype %s and shape[:-1]==%s, ' - 'got dtype %s shape[:-1]=%s' - % (self._in_buffers[di].dtype, - self._in_buffers[di].shape[:-1], - data.dtype, data.shape[:-1])) - logger.debug(' + Appending %d->%d' - % (self._in_offset, self._in_offset + data.shape[-1])) - self._in_buffers[di] = np.concatenate( - [self._in_buffers[di], data], -1) + self._in_buffers[di] = np.empty(data.shape[:-1] + (0,), data.dtype) + if ( + data.shape[:-1] != self._in_buffers[di].shape[:-1] + or self._in_buffers[di].dtype != data.dtype + ): + raise TypeError( + f"data must dtype {self._in_buffers[di].dtype} and " + f"shape[:-1]=={self._in_buffers[di].shape[:-1]}, got dtype " + f"{data.dtype} shape[:-1]={data.shape[:-1]}" + ) + # This gets updated on first iteration, so store it before it updates + if di == 0: + current_offset = self._in_offset + logger.debug( + f" + {self.name}[{di}] Appending " + f"{current_offset}:{current_offset + data.shape[-1]}" + ) + self._in_buffers[di] = np.concatenate([self._in_buffers[di], data], -1) if self._in_offset > self.stops[-1]: - raise ValueError('data (shape %s) exceeded expected total ' - 'buffer size (%s > %s)' - % (data.shape, self._in_offset, - self.stops[-1])) + raise ValueError( + f"data (shape {data.shape}) exceeded expected total buffer size (" + f"{self._in_offset} > {self.stops[-1]})" + ) # Check to see if we can process the next chunk and dump outputs - while self._idx < len(self.starts) and \ - self._in_offset >= self.stops[self._idx]: + while self._idx < len(self.starts) and self._in_offset >= self.stops[self._idx]: start, stop = self.starts[self._idx], self.stops[self._idx] this_len = stop - start this_window = self._window.copy() if self._idx == len(self.starts) - 1: this_window = np.pad( - self._window, (0, this_len - len(this_window)), 'constant') + self._window, (0, this_len - len(this_window)), "constant" + ) for offset in range(self._step, len(this_window), self._step): n_use = len(this_window) - offset this_window[offset:] += self._window[:n_use] if self._idx == 0: - for offset in range(self._n_samples - self._step, 0, - -self._step): + for offset in range(self._n_samples - self._step, 0, -self._step): this_window[:offset] += self._window[-offset:] - logger.debug(' * Processing %d->%d' % (start, stop)) - this_proc = [in_[..., :this_len].copy() - for in_ in self._in_buffers] - if not all(proc.shape[-1] == this_len == this_window.size - for proc in this_proc): - raise RuntimeError('internal indexing error') - outs = self._process(*this_proc, **kwargs) + this_proc = [in_[..., :this_len].copy() for in_ in self._in_buffers] + logger.debug( + f" * {self.name}[:] Processing {start}:{stop} " + f"(e.g., {this_proc[0].flat[[0, -1]]})" + ) + if not all( + proc.shape[-1] == this_len == this_window.size for proc in this_proc + ): + raise RuntimeError("internal indexing error") + start = self._store.idx + stop = self._store.idx + this_len + outs = self._process(*this_proc, start=start, stop=stop, **kwargs) if self._out_buffers is None: max_len = np.max(self.stops - self.starts) - self._out_buffers = [np.zeros(o.shape[:-1] + (max_len,), - o.dtype) for o in outs] + self._out_buffers = [ + np.zeros(o.shape[:-1] + (max_len,), o.dtype) for o in outs + ] for oi, out in enumerate(outs): out *= this_window - self._out_buffers[oi][..., :stop - start] += out + self._out_buffers[oi][..., : stop - start] += out self._idx += 1 if self._idx < len(self.starts): next_start = self.starts[self._idx] else: next_start = self.stops[-1] delta = next_start - self.starts[self._idx - 1] + logger.debug( + f" + {self.name}[:] Shifting input and output buffers by " + f"{delta} samples (storing {start}:{stop})" + ) for di in range(len(self._in_buffers)): self._in_buffers[di] = self._in_buffers[di][..., delta:] - logger.debug(' - Shifting input/output buffers by %d samples' - % (delta,)) self._store(*[o[..., :delta] for o in self._out_buffers]) for ob in self._out_buffers: ob[..., :-delta] = ob[..., delta:] - ob[..., -delta:] = 0. + ob[..., -delta:] = 0.0 def _check_cola(win, nperseg, step, window_name, tol=1e-10): """Check whether the Constant OverLap Add (COLA) constraint is met.""" # adapted from SciPy - binsums = np.sum([win[ii * step:(ii + 1) * step] - for ii in range(nperseg // step)], axis=0) + binsums = np.sum( + [win[ii * step : (ii + 1) * step] for ii in range(nperseg // step)], axis=0 + ) if nperseg % step != 0: - binsums[:nperseg % step] += win[-(nperseg % step):] + binsums[: nperseg % step] += win[-(nperseg % step) :] const = np.median(binsums) deviation = np.max(np.abs(binsums - const)) if deviation > tol: - raise ValueError('segment length %d with step %d for %s window ' - 'type does not provide a constant output ' - '(%g%% deviation)' - % (nperseg, step, window_name, - 100 * deviation / const)) + raise ValueError( + f"segment length {nperseg} with step {step} for {window_name} " + "window type does not provide a constant output " + f"({100 * deviation / const:g}% deviation)" + ) return const -class _Storer(object): +class _Storer: """Store data in chunks.""" def __init__(self, *outs, picks=None): for oi, out in enumerate(outs): if not isinstance(out, np.ndarray) or out.ndim < 1: - raise TypeError('outs[oi] must be >= 1D ndarray, got %s' - % (out,)) + raise TypeError(f"outs[oi] must be >= 1D ndarray, got {out}") self.outs = outs self.idx = 0 self.picks = picks def __call__(self, *outs): - if (len(outs) != len(self.outs) or - not all(out.shape[-1] == outs[0].shape[-1] for out in outs)): - raise ValueError('Bad outs') + if len(outs) != len(self.outs) or not all( + out.shape[-1] == outs[0].shape[-1] for out in outs + ): + raise ValueError("Bad outs") idx = (Ellipsis,) if self.picks is not None: idx += (self.picks,) diff --git a/mne/annotations.py b/mne/annotations.py index cf45d951c04..e298e80918c 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -1,79 +1,206 @@ -# Authors: Jaakko Leppakangas -# Robert Luke -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from collections import OrderedDict -from datetime import datetime, timedelta, timezone -import os.path as op +import json import re +import warnings +from collections import Counter, OrderedDict, UserDict, UserList +from collections.abc import Iterable from copy import deepcopy +from datetime import datetime, timedelta, timezone from itertools import takewhile -import json -from collections import Counter -from collections.abc import Iterable -import warnings from textwrap import shorten -import numpy as np -from .utils import (_pl, check_fname, _validate_type, verbose, warn, logger, - _check_pandas_installed, _mask_to_onsets_offsets, - _DefaultEventParser, _check_dt, _stamp_to_dt, _dt_to_stamp, - _check_fname, int_like, _check_option, fill_doc, - _on_missing, _is_numeric, _check_dict_keys) - -from .io.write import (start_block, end_block, write_float, - write_name_list_sanitized, _safe_name_list, - write_double, start_file, write_string) -from .io.constants import FIFF -from .io.open import fiff_open -from .io.tree import dir_tree_find -from .io.tag import read_tag +import numpy as np +from scipy.io import loadmat + +from ._fiff.constants import FIFF +from ._fiff.open import fiff_open +from ._fiff.tag import read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import ( + _safe_name_list, + end_block, + start_and_end_file, + start_block, + write_double, + write_float, + write_name_list_sanitized, + write_string, +) +from .utils import ( + _check_dict_keys, + _check_dt, + _check_fname, + _check_option, + _check_pandas_installed, + _check_time_format, + _convert_times, + _DefaultEventParser, + _dt_to_stamp, + _is_numeric, + _mask_to_onsets_offsets, + _on_missing, + _pl, + _stamp_to_dt, + _validate_type, + check_fname, + fill_doc, + int_like, + logger, + verbose, + warn, +) # For testing windows_like_datetime, we monkeypatch "datetime" in this module. # Keep the true datetime object around for _validate_type use. _datetime = datetime -def _check_o_d_s_c(onset, duration, description, ch_names): +class _AnnotationsExtrasDict(UserDict): + """A dictionary for storing extra fields of annotations. + + The keys of the dictionary are strings, and the values can be + strings, integers, floats, or None. + """ + + def __setitem__(self, key: str, value: str | int | float | None) -> None: + _validate_type(key, str, "key") + if key in ("onset", "duration", "description", "ch_names"): + raise ValueError(f"Key '{key}' is reserved and cannot be used in extras.") + _validate_type( + value, + (str, int, float, None), + "value", + ) + super().__setitem__(key, value) + + +class _AnnotationsExtrasList(UserList): + """A list of dictionaries for storing extra fields of annotations. + + Each dictionary in the list corresponds to an annotation and contains + extra fields. + The keys of the dictionaries are strings, and the values can be + strings, integers, floats, or None. + """ + + def __repr__(self): + return repr(self.data) + + @staticmethod + def _validate_value( + value: dict | _AnnotationsExtrasDict | None, + ) -> _AnnotationsExtrasDict: + _validate_type( + value, + (dict, _AnnotationsExtrasDict, None), + "extras dict value", + "dict or None", + ) + return ( + value + if isinstance(value, _AnnotationsExtrasDict) + else _AnnotationsExtrasDict(value or {}) + ) + + def __init__(self, initlist=None): + if not (isinstance(initlist, _AnnotationsExtrasList) or initlist is None): + initlist = [self._validate_value(v) for v in initlist] + super().__init__(initlist) + + def __setitem__( # type: ignore[override] + self, + key: int | slice, + value, + ) -> None: + _validate_type(key, (int, slice), "key", "int or slice") + if isinstance(key, int): + iterable = False + value = [value] + else: + _validate_type(value, Iterable, "value", "Iterable when key is a slice") + iterable = True + + new_values = [self._validate_value(v) for v in value] + if not iterable: + new_values = new_values[0] + super().__setitem__(key, new_values) + + def __iadd__(self, other): + if not isinstance(other, _AnnotationsExtrasList): + other = _AnnotationsExtrasList(other) + super().__iadd__(other) + + def append(self, item): + super().append(self._validate_value(item)) + + def insert(self, i, item): + super().insert(i, self._validate_value(item)) + + def extend(self, other): + if not isinstance(other, _AnnotationsExtrasList): + other = _AnnotationsExtrasList(other) + super().extend(other) + + +def _validate_extras(extras, length: int): + _validate_type(extras, (None, list, _AnnotationsExtrasList), "extras") + if extras is not None and len(extras) != length: + raise ValueError( + f"extras must be None or a list of length {length}, got {len(extras)}." + ) + if isinstance(extras, _AnnotationsExtrasList): + return extras + return _AnnotationsExtrasList(extras or [None] * length) + + +def _check_o_d_s_c_e(onset, duration, description, ch_names, extras): onset = np.atleast_1d(np.array(onset, dtype=float)) if onset.ndim != 1: - raise ValueError('Onset must be a one dimensional array, got %s ' - '(shape %s).' - % (onset.ndim, onset.shape)) + raise ValueError( + f"Onset must be a one dimensional array, got {onset.ndim} (shape " + f"{onset.shape})." + ) duration = np.array(duration, dtype=float) if duration.ndim == 0 or duration.shape == (1,): duration = np.repeat(duration, len(onset)) if duration.ndim != 1: - raise ValueError('Duration must be a one dimensional array, ' - 'got %d.' % (duration.ndim,)) + raise ValueError( + f"Duration must be a one dimensional array, got {duration.ndim}." + ) description = np.array(description, dtype=str) if description.ndim == 0 or description.shape == (1,): description = np.repeat(description, len(onset)) if description.ndim != 1: - raise ValueError('Description must be a one dimensional array, ' - 'got %d.' % (description.ndim,)) - _safe_name_list(description, 'write', 'description') + raise ValueError( + f"Description must be a one dimensional array, got {description.ndim}." + ) + _safe_name_list(description, "write", "description") # ch_names: convert to ndarray of tuples - _validate_type(ch_names, (None, tuple, list, np.ndarray), 'ch_names') + _validate_type(ch_names, (None, tuple, list, np.ndarray), "ch_names") if ch_names is None: ch_names = [()] * len(onset) ch_names = list(ch_names) for ai, ch in enumerate(ch_names): - _validate_type(ch, (list, tuple, np.ndarray), f'ch_names[{ai}]') + _validate_type(ch, (list, tuple, np.ndarray), f"ch_names[{ai}]") ch_names[ai] = tuple(ch) for ci, name in enumerate(ch_names[ai]): - _validate_type(name, str, f'ch_names[{ai}][{ci}]') + _validate_type(name, str, f"ch_names[{ai}][{ci}]") ch_names = _ndarray_ch_names(ch_names) if not (len(onset) == len(duration) == len(description) == len(ch_names)): raise ValueError( - 'Onset, duration, description, and ch_names must be ' - f'equal in sizes, got {len(onset)}, {len(duration)}, ' - f'{len(description)}, and {len(ch_names)}.') - return onset, duration, description, ch_names + "Onset, duration, description, and ch_names must be " + f"equal in sizes, got {len(onset)}, {len(duration)}, " + f"{len(description)}, and {len(ch_names)}." + ) + + extras = _validate_extras(extras, len(onset)) + return onset, duration, description, ch_names, extras def _ndarray_ch_names(ch_names): @@ -87,7 +214,7 @@ def _ndarray_ch_names(ch_names): @fill_doc -class Annotations(object): +class Annotations: """Annotation object for annotating segments of raw data. .. note:: @@ -115,10 +242,16 @@ class Annotations(object): the annotations with raw data if their acquisition is started at the same time. If it is a string, it should conform to the ISO8601 format. More precisely to this '%%Y-%%m-%%d %%H:%%M:%%S.%%f' particular case of - the ISO8601 format where the delimiter between date and time is ' '. + the ISO8601 format where the delimiter between date and time is ' ' and at most + microsecond precision (nanoseconds are not supported). %(ch_names_annot)s .. versionadded:: 0.23 + extras : list[dict[str, int | float | str | None] | None] | None + Optional list of dicts containing extra fields for each annotation. + The number of items must match the number of annotations. + + .. versionadded:: 1.10 See Also -------- @@ -247,11 +380,34 @@ class Annotations(object): :meth:`Raw.save() ` notes for details. """ # noqa: E501 - def __init__(self, onset, duration, description, - orig_time=None, ch_names=None): # noqa: D102 + def __init__( + self, + onset, + duration, + description, + orig_time=None, + ch_names=None, + *, + extras=None, + ): self._orig_time = _handle_meas_date(orig_time) - self.onset, self.duration, self.description, self.ch_names = \ - _check_o_d_s_c(onset, duration, description, ch_names) + if isinstance(orig_time, str) and self._orig_time is None: + try: # only warn if `orig_time` is not the default '1970-01-01 00:00:00' + if _handle_meas_date(0) == datetime.strptime( + orig_time, "%Y-%m-%d %H:%M:%S" + ).replace(tzinfo=timezone.utc): + pass + except ValueError: # error if incorrect datetime format AND not the default + warn( + "The format of the `orig_time` string is not recognised. It " + "must conform to the ISO8601 format with at most microsecond " + "precision and where the delimiter between date and time is " + f"' '. Got: {orig_time}. Defaulting `orig_time` to None.", + RuntimeWarning, + ) + self.onset, self.duration, self.description, self.ch_names, self._extras = ( + _check_o_d_s_c_e(onset, duration, description, ch_names, extras) + ) self._sort() # ensure we're sorted @property @@ -259,25 +415,48 @@ def orig_time(self): """The time base of the Annotations.""" return self._orig_time + @property + def extras(self): + """The extras of the Annotations. + + The ``extras`` attribute is a list of dictionaries. + It can easily be converted to a pandas DataFrame using: + ``pd.DataFrame(extras)``. + """ + return self._extras + + @extras.setter + def extras(self, extras): + self._extras = _validate_extras(extras, len(self.onset)) + + @property + def _extras_columns(self) -> set[str]: + """The set containing all the keys in all extras dicts.""" + return {k for d in self.extras for k in d} + def __eq__(self, other): """Compare to another Annotations instance.""" if not isinstance(other, Annotations): return False - return (np.array_equal(self.onset, other.onset) and - np.array_equal(self.duration, other.duration) and - np.array_equal(self.description, other.description) and - np.array_equal(self.ch_names, other.ch_names) and - self.orig_time == other.orig_time) + return ( + np.array_equal(self.onset, other.onset) + and np.array_equal(self.duration, other.duration) + and np.array_equal(self.description, other.description) + and np.array_equal(self.ch_names, other.ch_names) + and self.orig_time == other.orig_time + ) def __repr__(self): """Show the representation.""" counter = Counter(self.description) - kinds = ', '.join(['%s (%s)' % k for k in sorted(counter.items())]) - kinds = (': ' if len(kinds) > 0 else '') + kinds - ch_specific = ', channel-specific' if self._any_ch_names() else '' - s = ('Annotations | %s segment%s%s%s' % - (len(self.onset), _pl(len(self.onset)), ch_specific, kinds)) - return '<' + shorten(s, width=77, placeholder=' ...') + '>' + kinds = ", ".join(["{} ({})".format(*k) for k in sorted(counter.items())]) + kinds = (": " if len(kinds) > 0 else "") + kinds + ch_specific = ", channel-specific" if self._any_ch_names() else "" + s = ( + f"Annotations | {len(self.onset)} segment" + f"{_pl(len(self.onset))}{ch_specific}{kinds}" + ) + return "<" + shorten(s, width=77, placeholder=" ...") + ">" def __len__(self): """Return the number of annotations. @@ -303,38 +482,56 @@ def __iadd__(self, other): if len(self) == 0: self._orig_time = other.orig_time if self.orig_time != other.orig_time: - raise ValueError("orig_time should be the same to " - "add/concatenate 2 annotations " - "(got %s != %s)" % (self.orig_time, - other.orig_time)) - return self.append(other.onset, other.duration, other.description, - other.ch_names) + raise ValueError( + "orig_time should be the same to add/concatenate 2 annotations (got " + f"{self.orig_time} != {other.orig_time})" + ) + return self.append( + other.onset, + other.duration, + other.description, + other.ch_names, + extras=other.extras, + ) def __iter__(self): """Iterate over the annotations.""" + # Figure this out once ahead of time for consistency and speed (for + # thousands of annotations) + with_ch_names = self._any_ch_names() for idx in range(len(self.onset)): - yield self.__getitem__(idx) + yield self.__getitem__(idx, with_ch_names=with_ch_names) - def __getitem__(self, key): + def __getitem__(self, key, *, with_ch_names=None, with_extras=True): """Propagate indexing and slicing to the underlying numpy structure.""" if isinstance(key, int_like): - out_keys = ('onset', 'duration', 'description', 'orig_time') - out_vals = (self.onset[key], self.duration[key], - self.description[key], self.orig_time) - if self._any_ch_names(): - out_keys += ('ch_names',) + out_keys = ("onset", "duration", "description", "orig_time") + out_vals = ( + self.onset[key], + self.duration[key], + self.description[key], + self.orig_time, + ) + if with_ch_names or (with_ch_names is None and self._any_ch_names()): + out_keys += ("ch_names",) out_vals += (self.ch_names[key],) + if with_extras: + out_keys += ("extras",) + out_vals += (self.extras[key],) return OrderedDict(zip(out_keys, out_vals)) else: key = list(key) if isinstance(key, tuple) else key - return Annotations(onset=self.onset[key], - duration=self.duration[key], - description=self.description[key], - orig_time=self.orig_time, - ch_names=self.ch_names[key]) + return Annotations( + onset=self.onset[key], + duration=self.duration[key], + description=self.description[key], + orig_time=self.orig_time, + ch_names=self.ch_names[key], + extras=[self.extras[i] for i in np.arange(len(self.extras))[key]], + ) @fill_doc - def append(self, onset, duration, description, ch_names=None): + def append(self, onset, duration, description, ch_names=None, *, extras=None): """Add an annotated segment. Operates inplace. Parameters @@ -350,6 +547,11 @@ def append(self, onset, duration, description, ch_names=None): %(ch_names_annot)s .. versionadded:: 0.23 + extras : list[dict[str, int | float | str | None] | None] | None + Optional list of dicts containing extras fields for each annotation. + The number of items must match the number of annotations. + + .. versionadded:: 1.10 Returns ------- @@ -362,12 +564,14 @@ def append(self, onset, duration, description, ch_names=None): to not only ``list.append``, but also `list.extend `__. """ # noqa: E501 - onset, duration, description, ch_names = _check_o_d_s_c( - onset, duration, description, ch_names) + onset, duration, description, ch_names, extras = _check_o_d_s_c_e( + onset, duration, description, ch_names, extras + ) self.onset = np.append(self.onset, onset) self.duration = np.append(self.duration, duration) self.description = np.append(self.description, description) self.ch_names = np.append(self.ch_names, ch_names) + self.extras.extend(extras) self._sort() return self @@ -394,10 +598,24 @@ def delete(self, idx): self.duration = np.delete(self.duration, idx) self.description = np.delete(self.description, idx) self.ch_names = np.delete(self.ch_names, idx) + if isinstance(idx, int_like): + del self.extras[idx] + elif len(idx) > 0: + # convert slice-like idx to ints, and delete list items in reverse order + for i in np.sort(np.arange(len(self.extras))[idx])[::-1]: + del self.extras[i] - def to_data_frame(self): + @fill_doc + def to_data_frame(self, time_format="datetime"): """Export annotations in tabular structure as a pandas DataFrame. + Parameters + ---------- + %(time_format_df_raw)s + Default is ``datetime``. + + .. versionadded:: 1.7 + Returns ------- result : pandas.DataFrame @@ -406,25 +624,39 @@ def to_data_frame(self): annotations are channel-specific. """ pd = _check_pandas_installed(strict=True) + valid_time_formats = ["ms", "timedelta", "datetime"] dt = _handle_meas_date(self.orig_time) if dt is None: dt = _handle_meas_date(0) + time_format = _check_time_format(time_format, valid_time_formats, dt) dt = dt.replace(tzinfo=None) - onsets_dt = [dt + timedelta(seconds=o) for o in self.onset] - df = dict(onset=onsets_dt, duration=self.duration, - description=self.description) + times = _convert_times(self.onset, time_format, meas_date=dt, drop_nano=True) + df = dict(onset=times, duration=self.duration, description=self.description) if self._any_ch_names(): df.update(ch_names=self.ch_names) df = pd.DataFrame(df) + extras_df = pd.DataFrame(self.extras) + df = pd.concat([df, extras_df], axis=1) return df + def count(self): + """Count annotations. + + Returns + ------- + counts : dict + A dictionary containing unique annotation descriptions as keys with their + counts as values. + """ + return count_annotations(self) + def _any_ch_names(self): return any(len(ch) for ch in self.ch_names) def _prune_ch_names(self, info, on_missing): # this prunes channel names and if a given channel-specific annotation # no longer has any channels left, it gets dropped - keep = set(info['ch_names']) + keep = set(info["ch_names"]) ch_names = self.ch_names warned = False drop_idx = list() @@ -435,8 +667,10 @@ def _prune_ch_names(self, info, on_missing): if name not in keep: if not warned: _on_missing( - on_missing, 'At least one channel name in ' - f'annotations missing from info: {name}') + on_missing, + "At least one channel name in " + f"annotations missing from info: {name}", + ) warned = True else: names.append(name) @@ -473,16 +707,25 @@ def save(self, fname, *, overwrite=False, verbose=None): whereas :file:`.txt` files store onset as seconds since start of the recording (e.g., ``45.95597082905339``). """ - check_fname(fname, 'annotations', ('-annot.fif', '-annot.fif.gz', - '_annot.fif', '_annot.fif.gz', - '.txt', '.csv')) + check_fname( + fname, + "annotations", + ( + "-annot.fif", + "-annot.fif.gz", + "_annot.fif", + "_annot.fif.gz", + ".txt", + ".csv", + ), + ) fname = _check_fname(fname, overwrite=overwrite) if fname.suffix == ".txt": _write_annotations_txt(fname, self) elif fname.suffix == ".csv": _write_annotations_csv(fname, self) else: - with start_file(fname) as fid: + with start_and_end_file(fname) as fid: _write_annotations(fid, self) def _sort(self): @@ -495,10 +738,12 @@ def _sort(self): self.duration = self.duration[order] self.description = self.description[order] self.ch_names = self.ch_names[order] + self.extras = [self.extras[i] for i in order] @verbose - def crop(self, tmin=None, tmax=None, emit_warning=False, - use_orig_time=True, verbose=None): + def crop( + self, tmin=None, tmax=None, emit_warning=False, use_orig_time=True, verbose=None + ): """Remove all annotation that are outside of [tmin, tmax]. The method operates inplace. @@ -531,39 +776,42 @@ def crop(self, tmin=None, tmax=None, emit_warning=False, if tmin is None: tmin = timedelta(seconds=self.onset.min()) + offset if tmax is None: - tmax = timedelta( - seconds=(self.onset + self.duration).max()) + offset - for key, val in [('tmin', tmin), ('tmax', tmax)]: - _validate_type(val, ('numeric', _datetime), key, - 'numeric, datetime, or None') + tmax = timedelta(seconds=(self.onset + self.duration).max()) + offset + for key, val in [("tmin", tmin), ("tmax", tmax)]: + _validate_type( + val, ("numeric", _datetime), key, "numeric, datetime, or None" + ) absolute_tmin = _handle_meas_date(tmin) absolute_tmax = _handle_meas_date(tmax) del tmin, tmax if absolute_tmin > absolute_tmax: - raise ValueError('tmax should be greater than or equal to tmin ' - '(%s < %s).' % (absolute_tmin, absolute_tmax)) - logger.debug('Cropping annotations %s - %s' % (absolute_tmin, - absolute_tmax)) + raise ValueError( + f"tmax should be greater than or equal to tmin ({absolute_tmin} < " + f"{absolute_tmax})." + ) + logger.debug(f"Cropping annotations {absolute_tmin} - {absolute_tmax}") - onsets, durations, descriptions, ch_names = [], [], [], [] + onsets, durations, descriptions, ch_names, extras = [], [], [], [], [] out_of_bounds, clip_left_elem, clip_right_elem = [], [], [] - for idx, (onset, duration, description, ch) in enumerate(zip( - self.onset, self.duration, self.description, self.ch_names)): + for idx, (onset, duration, description, ch, extra) in enumerate( + zip(self.onset, self.duration, self.description, self.ch_names, self.extras) + ): # if duration is NaN behave like a zero if np.isnan(duration): - duration = 0. + duration = 0.0 # convert to absolute times absolute_onset = timedelta(seconds=onset) + offset absolute_offset = absolute_onset + timedelta(seconds=duration) out_of_bounds.append( - absolute_onset > absolute_tmax or - absolute_offset < absolute_tmin) + absolute_onset > absolute_tmax or absolute_offset < absolute_tmin + ) if out_of_bounds[-1]: clip_left_elem.append(False) clip_right_elem.append(False) logger.debug( - f' [{idx}] Dropping ' - f'({absolute_onset} - {absolute_offset}: {description})') + f" [{idx}] Dropping " + f"({absolute_onset} - {absolute_offset}: {description})" + ) else: # clip the left side clip_left_elem.append(absolute_onset < absolute_tmin) @@ -573,35 +821,36 @@ def crop(self, tmin=None, tmax=None, emit_warning=False, if clip_right_elem[-1]: absolute_offset = absolute_tmax if clip_left_elem[-1] or clip_right_elem[-1]: - durations.append( - (absolute_offset - absolute_onset).total_seconds()) + durations.append((absolute_offset - absolute_onset).total_seconds()) else: durations.append(duration) - onsets.append( - (absolute_onset - offset).total_seconds()) + onsets.append((absolute_onset - offset).total_seconds()) logger.debug( - f' [{idx}] Keeping ' - f'({absolute_onset} - {absolute_offset} -> ' - f'{onset} - {onset + duration})') + f" [{idx}] Keeping " + f"({absolute_onset} - {absolute_offset} -> " + f"{onset} - {onset + duration})" + ) descriptions.append(description) ch_names.append(ch) - logger.debug(f'Cropping complete (kept {len(onsets)})') + extras.append(extra) + logger.debug(f"Cropping complete (kept {len(onsets)})") self.onset = np.array(onsets, float) self.duration = np.array(durations, float) assert (self.duration >= 0).all() self.description = np.array(descriptions, dtype=str) self.ch_names = _ndarray_ch_names(ch_names) + self.extras = extras if emit_warning: omitted = np.array(out_of_bounds).sum() if omitted > 0: - warn('Omitted %s annotation(s) that were outside data' - ' range.' % omitted) - limited = (np.array(clip_left_elem) | - np.array(clip_right_elem)).sum() + warn(f"Omitted {omitted} annotation(s) that were outside data range.") + limited = (np.array(clip_left_elem) | np.array(clip_right_elem)).sum() if limited > 0: - warn('Limited %s annotation(s) that were expanding outside the' - ' data range.' % limited) + warn( + f"Limited {limited} annotation(s) that were expanding outside the" + " data range." + ) return self @@ -630,9 +879,12 @@ def set_durations(self, mapping, verbose=None): _validate_type(mapping, (int, float, dict)) if isinstance(mapping, dict): - _check_dict_keys(mapping, self.description, - valid_key_source="data", - key_description="Annotation description(s)") + _check_dict_keys( + mapping, + self.description, + valid_key_source="data", + key_description="Annotation description(s)", + ) for stim in mapping: map_idx = [desc == stim for desc in self.description] self.duration[map_idx] = mapping[stim] @@ -641,9 +893,11 @@ def set_durations(self, mapping, verbose=None): self.duration = np.ones(self.description.shape) * mapping else: - raise ValueError("Setting durations requires the mapping of " - "descriptions to times to be provided as a dict. " - f"Instead {type(mapping)} was provided.") + raise ValueError( + "Setting durations requires the mapping of " + "descriptions to times to be provided as a dict. " + f"Instead {type(mapping)} was provided." + ) return self @@ -668,13 +922,13 @@ def rename(self, mapping, verbose=None): .. versionadded:: 0.24.0 """ _validate_type(mapping, dict) - _check_dict_keys(mapping, self.description, valid_key_source="data", - key_description="Annotation description(s)") - - for old, new in mapping.items(): - self.description = [d.replace(old, new) for d in self.description] - - self.description = np.array(self.description) + _check_dict_keys( + mapping, + self.description, + valid_key_source="data", + key_description="Annotation description(s)", + ) + self.description = np.array([str(mapping.get(d, d)) for d in self.description]) return self @@ -686,8 +940,7 @@ def annotations(self): # noqa: D102 return self._annotations @verbose - def set_annotations(self, annotations, on_missing='raise', *, - verbose=None): + def set_annotations(self, annotations, on_missing="raise", *, verbose=None): """Setter for Epoch annotations from Raw. This method does not handle offsetting the times based @@ -727,24 +980,33 @@ def set_annotations(self, annotations, on_missing='raise', *, .. versionadded:: 1.0 """ - _validate_type(annotations, (Annotations, None), 'annotations') + _validate_type(annotations, (Annotations, None), "annotations") if annotations is None: self._annotations = None else: - if getattr(self, '_unsafe_annot_add', False): - warn('Adding annotations to Epochs created (and saved to ' - 'disk) before 1.0 will yield incorrect results if ' - 'decimation or resampling was performed on the instance, ' - 'we recommend regenerating the Epochs and re-saving them ' - 'to disk') + if getattr(self, "_unsafe_annot_add", False): + warn( + "Adding annotations to Epochs created (and saved to disk) before " + "1.0 will yield incorrect results if decimation or resampling was " + "performed on the instance, we recommend regenerating the Epochs " + "and re-saving them to disk." + ) new_annotations = annotations.copy() new_annotations._prune_ch_names(self.info, on_missing) self._annotations = new_annotations return self - def get_annotations_per_epoch(self): + def get_annotations_per_epoch(self, *, with_extras=False): """Get a list of annotations that occur during each epoch. + Parameters + ---------- + with_extras : bool + Whether to include the annotations extra fields in the output, + as an additional last element of the tuple. Default is False. + + .. versionadded:: 1.10 + Returns ------- epoch_annots : list @@ -765,8 +1027,9 @@ def get_annotations_per_epoch(self): # when each epoch and annotation starts/stops # no need to account for first_samp here... epoch_tzeros = self.events[:, 0] / self._raw_sfreq - epoch_starts, epoch_stops = np.atleast_2d( - epoch_tzeros) + np.atleast_2d(self.times[[0, -1]]).T + epoch_starts, epoch_stops = ( + np.atleast_2d(epoch_tzeros) + np.atleast_2d(self.times[[0, -1]]).T + ) # ... because first_samp isn't accounted for here either annot_starts = self._annotations.onset annot_stops = annot_starts + self._annotations.duration @@ -778,38 +1041,47 @@ def get_annotations_per_epoch(self): # we care about is presence/absence of overlap). annot_straddles_epoch_start = np.logical_and( np.atleast_2d(epoch_starts) >= np.atleast_2d(annot_starts).T, - np.atleast_2d(epoch_starts) < np.atleast_2d(annot_stops).T) + np.atleast_2d(epoch_starts) < np.atleast_2d(annot_stops).T, + ) annot_straddles_epoch_end = np.logical_and( np.atleast_2d(epoch_stops) > np.atleast_2d(annot_starts).T, - np.atleast_2d(epoch_stops) <= np.atleast_2d(annot_stops).T) + np.atleast_2d(epoch_stops) <= np.atleast_2d(annot_stops).T, + ) # this captures the only remaining case we care about: annotations # fully contained within an epoch (or exactly coextensive with it). annot_fully_within_epoch = np.logical_and( np.atleast_2d(epoch_starts) <= np.atleast_2d(annot_starts).T, - np.atleast_2d(epoch_stops) >= np.atleast_2d(annot_stops).T) + np.atleast_2d(epoch_stops) >= np.atleast_2d(annot_stops).T, + ) # combine all cases to get array of shape (n_annotations, n_epochs). # Nonzero entries indicate overlap between the corresponding # annotation (row index) and epoch (column index). - all_cases = (annot_straddles_epoch_start + - annot_straddles_epoch_end + - annot_fully_within_epoch) + all_cases = ( + annot_straddles_epoch_start + + annot_straddles_epoch_end + + annot_fully_within_epoch + ) # for each Epoch-Annotation overlap occurrence: for annot_ix, epo_ix in zip(*np.nonzero(all_cases)): this_annot = self._annotations[annot_ix] this_tzero = epoch_tzeros[epo_ix] # adjust annotation onset to be relative to epoch tzero... - annot = (this_annot['onset'] - this_tzero, - this_annot['duration'], - this_annot['description']) + annot = ( + this_annot["onset"] - this_tzero, + this_annot["duration"], + this_annot["description"], + ) + if with_extras: + annot += (this_annot["extras"],) # ...then add it to the correct sublist of `epoch_annot_list` epoch_annot_list[epo_ix].append(annot) return epoch_annot_list - def add_annotations_to_metadata(self, overwrite=False): + def add_annotations_to_metadata(self, overwrite=False, *, with_extras=True): """Add raw annotations into the Epochs metadata data frame. Adds three columns to the ``metadata`` consisting of a list @@ -826,6 +1098,11 @@ def add_annotations_to_metadata(self, overwrite=False): overwrite : bool Whether to overwrite existing columns in metadata or not. Default is False. + with_extras : bool + Whether to include the annotations extra fields in the output, + as an additional last element of the tuple. Default is True. + + .. versionadded:: 1.10 Returns ------- @@ -840,8 +1117,10 @@ def add_annotations_to_metadata(self, overwrite=False): # check if annotations exist if self.annotations is None: - warn(f'There were no Annotations stored in {self}, so ' - 'metadata was not modified.') + warn( + f"There were no Annotations stored in {self}, so " + "metadata was not modified." + ) return self # get existing metadata DataFrame or instantiate an empty one @@ -851,17 +1130,23 @@ def add_annotations_to_metadata(self, overwrite=False): data = np.empty((len(self.events), 0)) metadata = pd.DataFrame(data=data) - if any(name in metadata.columns for name in - ['annot_onset', 'annot_duration', 'annot_description']) and \ - not overwrite: + if ( + any( + name in metadata.columns + for name in ["annot_onset", "annot_duration", "annot_description"] + ) + and not overwrite + ): raise RuntimeError( - 'Metadata for Epochs already contains columns ' - '"annot_onset", "annot_duration", or "annot_description".') + "Metadata for Epochs already contains columns " + '"annot_onset", "annot_duration", or "annot_description".' + ) # get the Epoch annotations, then convert to separate lists for # onsets, durations, and descriptions - epoch_annot_list = self.get_annotations_per_epoch() + epoch_annot_list = self.get_annotations_per_epoch(with_extras=with_extras) onset, duration, description = [], [], [] + extras = {k: [] for k in self.annotations._extras_columns} for epoch_annot in epoch_annot_list: for ix, annot_prop in enumerate((onset, duration, description)): entry = [annot[ix] for annot in epoch_annot] @@ -871,20 +1156,26 @@ def add_annotations_to_metadata(self, overwrite=False): entry = np.round(entry, decimals=12).tolist() annot_prop.append(entry) + for k in extras.keys(): + entry = [annot[3].get(k, None) for annot in epoch_annot] + extras[k].append(entry) # Create a new Annotations column that is instantiated as an empty # list per Epoch. - metadata['annot_onset'] = pd.Series(onset) - metadata['annot_duration'] = pd.Series(duration) - metadata['annot_description'] = pd.Series(description) + metadata["annot_onset"] = pd.Series(onset) + metadata["annot_duration"] = pd.Series(duration) + metadata["annot_description"] = pd.Series(description) + for k, v in extras.items(): + metadata[f"annot_{k}"] = pd.Series(v) # reset the metadata self.metadata = metadata return self -def _combine_annotations(one, two, one_n_samples, one_first_samp, - two_first_samp, sfreq): +def _combine_annotations( + one, two, one_n_samples, one_first_samp, two_first_samp, sfreq +): """Combine a tuple of annotations.""" assert one is not None assert two is not None @@ -908,7 +1199,7 @@ def _handle_meas_date(meas_date): time. """ if isinstance(meas_date, str): - ACCEPTED_ISO8601 = '%Y-%m-%d %H:%M:%S.%f' + ACCEPTED_ISO8601 = "%Y-%m-%d %H:%M:%S.%f" try: meas_date = datetime.strptime(meas_date, ACCEPTED_ISO8601) except ValueError: @@ -936,13 +1227,12 @@ def _handle_meas_date(meas_date): def _sync_onset(raw, onset, inverse=False): """Adjust onsets in relation to raw data.""" offset = (-1 if inverse else 1) * raw._first_time - assert raw.info['meas_date'] == raw.annotations.orig_time + assert raw.info["meas_date"] == raw.annotations.orig_time annot_start = onset - offset return annot_start -def _annotations_starts_stops(raw, kinds, name='skip_by_annotation', - invert=False): +def _annotations_starts_stops(raw, kinds, name="skip_by_annotation", invert=False): """Get starts and stops from given kinds. onsets and ends are inclusive. @@ -952,14 +1242,16 @@ def _annotations_starts_stops(raw, kinds, name='skip_by_annotation', kinds = [kinds] else: for kind in kinds: - _validate_type(kind, 'str', "All entries") + _validate_type(kind, "str", "All entries") if len(raw.annotations) == 0: onsets, ends = np.array([], int), np.array([], int) else: - idxs = [idx for idx, desc in enumerate(raw.annotations.description) - if any(desc.upper().startswith(kind.upper()) - for kind in kinds)] + idxs = [ + idx + for idx, desc in enumerate(raw.annotations.description) + if any(desc.upper().startswith(kind.upper()) for kind in kinds) + ] # onsets are already sorted onsets = raw.annotations.onset[idxs] onsets = _sync_onset(raw, onsets) @@ -974,7 +1266,7 @@ def _annotations_starts_stops(raw, kinds, name='skip_by_annotation', for onset, end in zip(onsets, ends): mask[onset:end] = True mask = ~mask - extras = (onsets == ends) + extras = onsets == ends extra_onsets, extra_ends = onsets[extras], ends[extras] onsets, ends = _mask_to_onsets_offsets(mask) # Keep ones where things were exactly equal @@ -991,25 +1283,46 @@ def _write_annotations(fid, annotations): """Write annotations.""" start_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, annotations.onset) - write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, - annotations.duration + annotations.onset) + write_float( + fid, FIFF.FIFF_MNE_BASELINE_MAX, annotations.duration + annotations.onset + ) write_name_list_sanitized( - fid, FIFF.FIFF_COMMENT, annotations.description, name='description') + fid, FIFF.FIFF_COMMENT, annotations.description, name="description" + ) if annotations.orig_time is not None: - write_double(fid, FIFF.FIFF_MEAS_DATE, - _dt_to_stamp(annotations.orig_time)) + write_double(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(annotations.orig_time)) if annotations._any_ch_names(): - write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, - json.dumps(tuple(annotations.ch_names))) + write_string( + fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, json.dumps(tuple(annotations.ch_names)) + ) + if any(d is not None for d in annotations.extras): + write_string( + fid, + FIFF.FIFF_FREE_LIST, + json.dumps([extra.data for extra in annotations.extras]), + ) end_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) def _write_annotations_csv(fname, annot): annot = annot.to_data_frame() - if 'ch_names' in annot: - annot['ch_names'] = [ - _safe_name_list(ch, 'write', name=f'annot["ch_names"][{ci}') - for ci, ch in enumerate(annot['ch_names'])] + if "ch_names" in annot: + annot["ch_names"] = [ + _safe_name_list(ch, "write", name=f'annot["ch_names"][{ci}') + for ci, ch in enumerate(annot["ch_names"]) + ] + extras_columns = set(annot.columns) - { + "onset", + "duration", + "description", + "ch_names", + } + for col in extras_columns: + if len(dtypes := annot[col].apply(type).unique()) > 1: + warn( + f"Extra field '{col}' contains heterogeneous dtypes ({dtypes}). " + "Loading these CSV annotations may not return the original dtypes." + ) annot.to_csv(fname, index=False) @@ -1019,28 +1332,47 @@ def _write_annotations_txt(fname, annot): # for backward compat, we do not write tzinfo (assumed UTC) content += f"# orig_time : {annot.orig_time.replace(tzinfo=None)}\n" content += "# onset, duration, description" + n_cols = 3 data = [annot.onset, annot.duration, annot.description] if annot._any_ch_names(): - content += ', ch_names' - data.append([ - _safe_name_list(ch, 'write', f'annot.ch_names[{ci}]') - for ci, ch in enumerate(annot.ch_names)]) - content += '\n' + n_cols += 1 + content += ", ch_names" + data.append( + [ + _safe_name_list(ch, "write", f"annot.ch_names[{ci}]") + for ci, ch in enumerate(annot.ch_names) + ] + ) + if len(extras_columns := annot._extras_columns) > 0: + n_cols += len(extras_columns) + for column in extras_columns: + content += f", {column}" + values = [extra.get(column, None) for extra in annot.extras] + if len(dtypes := set(type(v) for v in values)) > 1: + warn( + f"Extra field '{column}' contains heterogeneous dtypes ({dtypes}). " + "Loading these TXT annotations may not return the original dtypes." + ) + data.append([val if val is not None else "" for val in values]) + content += "\n" data = np.array(data, dtype=str).T assert data.ndim == 2 assert data.shape[0] == len(annot.onset) - assert data.shape[1] in (3, 4) - with open(fname, 'wb') as fid: + assert data.shape[1] == n_cols + with open(fname, "wb") as fid: fid.write(content.encode()) - np.savetxt(fid, data, delimiter=',', fmt="%s") + np.savetxt(fid, data, delimiter=",", fmt="%s") -def read_annotations(fname, sfreq='auto', uint16_codec=None): +@fill_doc +def read_annotations( + fname, sfreq="auto", uint16_codec=None, encoding="utf8", ignore_marker_types=False +) -> Annotations: r"""Read annotations from a file. This function reads a ``.fif``, ``.fif.gz``, ``.vmrk``, ``.amrk``, - ``.edf``, ``.txt``, ``.csv``, ``.cnt``, ``.cef``, or ``.set`` file and - makes an :class:`mne.Annotations` object. + ``.edf``, ``.bdf``, ``.gdf``, ``.txt``, ``.csv``, ``.cnt``, ``.cef``, or + ``.set`` file and makes an :class:`mne.Annotations` object. Parameters ---------- @@ -1063,10 +1395,15 @@ def read_annotations(fname, sfreq='auto', uint16_codec=None): too small". ``uint16_codec`` allows to specify what codec (for example: ``'latin1'`` or ``'utf-8'``) should be used when reading character arrays and can therefore help you solve this problem. + %(encoding_edf)s + Only used when reading EDF annotations. + ignore_marker_types : bool + If ``True``, ignore marker types in BrainVision files (and only use their + descriptions). Defaults to ``False``. Returns ------- - annot : instance of Annotations | None + annot : instance of Annotations The annotations. Notes @@ -1076,74 +1413,72 @@ def read_annotations(fname, sfreq='auto', uint16_codec=None): ``.txt`` extension. """ from .io.brainvision.brainvision import _read_annotations_brainvision - from .io.eeglab.eeglab import _read_annotations_eeglab - from .io.edf.edf import _read_annotations_edf from .io.cnt.cnt import _read_annotations_cnt - from .io.curry.curry import _read_annotations_curry from .io.ctf.markers import _read_annotations_ctf + from .io.curry.curry import _read_annotations_curry + from .io.edf.edf import _read_annotations_edf + from .io.eeglab.eeglab import _read_annotations_eeglab - fname = str( - _check_fname( - fname, - overwrite="read", - must_exist=True, - need_dir=str(fname).endswith(".ds"), # for CTF - name="fname", - ) + fname = _check_fname( + fname, + overwrite="read", + must_exist=True, + need_dir=str(fname).endswith(".ds"), # for CTF + name="fname", ) - name = op.basename(fname) - if name.endswith(('fif', 'fif.gz')): + readers = { + ".csv": _read_annotations_csv, + ".cnt": _read_annotations_cnt, + ".ds": _read_annotations_ctf, + ".dat": _read_annotations_curry, + ".cdt": _read_annotations_curry, + ".cef": _read_annotations_curry, + ".set": _read_annotations_eeglab, + ".edf": _read_annotations_edf, + ".bdf": _read_annotations_edf, + ".gdf": _read_annotations_edf, + ".vmrk": _read_annotations_brainvision, + ".amrk": _read_annotations_brainvision, + ".txt": _read_annotations_txt, + } + kwargs = { + ".vmrk": {"sfreq": sfreq, "ignore_marker_types": ignore_marker_types}, + ".amrk": {"sfreq": sfreq, "ignore_marker_types": ignore_marker_types}, + ".dat": {"sfreq": sfreq}, + ".cdt": {"sfreq": sfreq}, + ".cef": {"sfreq": sfreq}, + ".set": {"uint16_codec": uint16_codec}, + ".edf": {"encoding": encoding}, + ".bdf": {"encoding": encoding}, + ".gdf": {"encoding": encoding}, + } + if fname.suffix in readers: + annotations = readers[fname.suffix](fname, **kwargs.get(fname.suffix, {})) + elif fname.name.endswith(("fif", "fif.gz")): # Read FiF files ff, tree, _ = fiff_open(fname, preload=False) with ff as fid: annotations = _read_annotations_fif(fid, tree) - elif name.endswith('txt'): - orig_time = _read_annotations_txt_parse_header(fname) - onset, duration, description, ch_names = _read_annotations_txt(fname) - annotations = Annotations(onset=onset, duration=duration, - description=description, orig_time=orig_time, - ch_names=ch_names) - - elif name.endswith(('vmrk', 'amrk')): - annotations = _read_annotations_brainvision(fname, sfreq=sfreq) - - elif name.endswith('csv'): - annotations = _read_annotations_csv(fname) - - elif name.endswith('cnt'): - annotations = _read_annotations_cnt(fname) - - elif name.endswith('ds'): - annotations = _read_annotations_ctf(fname) - - elif name.endswith('cef'): - annotations = _read_annotations_curry(fname, sfreq=sfreq) - - elif name.endswith('set'): - annotations = _read_annotations_eeglab(fname, - uint16_codec=uint16_codec) - - elif name.endswith(('edf', 'bdf', 'gdf')): - onset, duration, description = _read_annotations_edf(fname) - onset = np.array(onset, dtype=float) - duration = np.array(duration, dtype=float) - annotations = Annotations(onset=onset, duration=duration, - description=description, - orig_time=None) - - elif name.startswith('events_') and fname.endswith('mat'): + elif fname.name.startswith("events_") and fname.suffix == ".mat": annotations = _read_brainstorm_annotations(fname) else: - raise IOError('Unknown annotation file format "%s"' % fname) + raise OSError(f'Unknown annotation file format "{fname}"') if annotations is None: - raise IOError('No annotation data found in file "%s"' % fname) + raise OSError(f'No annotation data found in file "{fname}"') return annotations def _read_annotations_csv(fname): """Read annotations from csv. + The dtypes of the extra fields will automatically be inferred + by pandas. If some fields have heterogeneous types on the + different rows, this automatic inference may return unexpected + types. + If you need to save heterogeneous extra dtypes, we recommend + saving to FIF. + Parameters ---------- fname : path-like @@ -1156,24 +1491,40 @@ def _read_annotations_csv(fname): """ pd = _check_pandas_installed(strict=True) df = pd.read_csv(fname, keep_default_na=False) - orig_time = df['onset'].values[0] + orig_time = df["onset"].values[0] try: float(orig_time) - warn('It looks like you have provided annotation onsets as floats. ' - 'These will be interpreted as MILLISECONDS. If that is not what ' - 'you want, save your CSV as a TXT file; the TXT reader accepts ' - 'onsets in seconds.') + warn( + "It looks like you have provided annotation onsets as floats. " + "These will be interpreted as MILLISECONDS. If that is not what " + "you want, save your CSV as a TXT file; the TXT reader accepts " + "onsets in seconds." + ) except ValueError: - pass - onset_dt = pd.to_datetime(df['onset']) + # remove nanoseconds for ISO8601 (microsecond) compliance + timestamp = pd.Timestamp(orig_time) + timespec = "microseconds" + if timestamp == pd.Timestamp(_handle_meas_date(0)).astimezone(None): + timespec = "auto" # use default timespec for `orig_time=None` + orig_time = timestamp.isoformat(sep=" ", timespec=timespec) + + onset_dt = pd.to_datetime(df["onset"]) onset = (onset_dt - onset_dt[0]).dt.total_seconds() - duration = df['duration'].values.astype(float) - description = df['description'].values + duration = df["duration"].values.astype(float) + description = df["description"].values ch_names = None - if 'ch_names' in df.columns: - ch_names = [_safe_name_list(val, 'read', 'annotation channel name') - for val in df['ch_names'].values] - return Annotations(onset, duration, description, orig_time, ch_names) + if "ch_names" in df.columns: + ch_names = [ + _safe_name_list(val, "read", "annotation channel name") + for val in df["ch_names"].values + ] + extra_columns = list( + df.columns.difference(["onset", "duration", "description", "ch_names"]) + ) + extras = None + if len(extra_columns) > 0: + extras = df[extra_columns].to_dict(orient="records") + return Annotations(onset, duration, description, orig_time, ch_names, extras=extras) def _read_brainstorm_annotations(fname, orig_time=None): @@ -1197,67 +1548,138 @@ def _read_brainstorm_annotations(fname, orig_time=None): annot : instance of Annotations | None The annotations. """ - from scipy import io def get_duration_from_times(t): return t[1] - t[0] if t.shape[0] == 2 else np.zeros(len(t[0])) - annot_data = io.loadmat(fname) + annot_data = loadmat(fname) onsets, durations, descriptions = (list(), list(), list()) - for label, _, _, _, times, _, _ in annot_data['events'][0]: + for label, _, _, _, times, _, _ in annot_data["events"][0]: onsets.append(times[0]) durations.append(get_duration_from_times(times)) n_annot = len(times[0]) descriptions += [str(label[0])] * n_annot - return Annotations(onset=np.concatenate(onsets), - duration=np.concatenate(durations), - description=descriptions, - orig_time=orig_time) + return Annotations( + onset=np.concatenate(onsets), + duration=np.concatenate(durations), + description=descriptions, + orig_time=orig_time, + ) def _is_iso8601(candidate_str): - ISO8601 = r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}\.\d{6}$' + ISO8601 = r"^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}\.\d{6}$" return re.compile(ISO8601).match(candidate_str) is not None def _read_annotations_txt_parse_header(fname): def is_orig_time(x): - return x.startswith('# orig_time :') + return x.startswith("# orig_time :") + + def is_columns(x): + return x.startswith("# onset, duration, description") with open(fname) as fid: - header = list(takewhile(lambda x: x.startswith('#'), fid)) + header = list(takewhile(lambda x: x.startswith("#"), fid)) orig_values = [h[13:].strip() for h in header if is_orig_time(h)] - orig_values = [_handle_meas_date(orig) for orig in orig_values - if _is_iso8601(orig)] + orig_values = [_handle_meas_date(orig) for orig in orig_values if _is_iso8601(orig)] - return None if not orig_values else orig_values[0] + columns = [[c.strip() for c in h[2:].split(",")] for h in header if is_columns(h)] + + return ( + None if not orig_values else orig_values[0], + (None if not columns else columns[0]), + len(header), + ) def _read_annotations_txt(fname): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore") - out = np.loadtxt(fname, delimiter=',', - dtype=np.bytes_, unpack=True) - ch_names = None + out = np.loadtxt(fname, delimiter=",", dtype=np.bytes_, unpack=True) + orig_time, columns, n_rows_header = _read_annotations_txt_parse_header(fname) + ch_names = extras = None if len(out) == 0: onset, duration, desc = [], [], [] else: - _check_option('text header', len(out), (3, 4)) - if len(out) == 3: - onset, duration, desc = out - else: - onset, duration, desc, ch_names = out + if columns is None: + # No column names were present in the header + # We assume the first three columns are onset, duration, description + # And eventually a fourth column with ch_names + _check_option("text header", len(out), (3, 4)) + columns = ["onset", "duration", "description"] + ( + ["ch_names"] if len(out) == 4 else [] + ) + col_map = {col: i for i, col in enumerate(columns)} + if len(col_map) != len(columns): + raise ValueError( + "Duplicate column names found in header. Please check the file format." + ) + if missing := {"onset", "duration", "description"} - set(col_map.keys()): + raise ValueError( + f"Column(s) {missing} not found in header. " + "Please check the file format." + ) + _check_option("text header len", len(out), (len(columns),)) + onset = out[col_map["onset"]] + duration = out[col_map["duration"]] + desc = out[col_map["description"]] + if "ch_names" in col_map: + ch_names = out[col_map["ch_names"]] + extra_columns = set(col_map.keys()) - { + "onset", + "duration", + "description", + "ch_names", + } + if extra_columns: + pd = _check_pandas_installed(strict=False) + if pd: + df = pd.read_csv( + fname, + delimiter=",", + names=columns, + usecols=extra_columns, + skiprows=n_rows_header, + header=None, + keep_default_na=False, + ) + extras = df.to_dict(orient="records") + else: + warn( + "Extra fields found in the header but pandas is not installed. " + "Therefore the dtypes of the extra fields can not automatically " + "be inferred so they will be loaded as strings." + ) + extras = [ + { + col_name: out[col_map[col_name]][i].decode("UTF-8") + for col_name in extra_columns + } + for i in range(len(onset)) + ] onset = [float(o.decode()) for o in np.atleast_1d(onset)] duration = [float(d.decode()) for d in np.atleast_1d(duration)] desc = [str(d.decode()).strip() for d in np.atleast_1d(desc)] if ch_names is not None: ch_names = [ - _safe_name_list(ch.decode().strip(), 'read', f'ch_names[{ci}]') - for ci, ch in enumerate(ch_names)] - return onset, duration, desc, ch_names + _safe_name_list(ch.decode().strip(), "read", f"ch_names[{ci}]") + for ci, ch in enumerate(ch_names) + ] + + annotations = Annotations( + onset=onset, + duration=duration, + description=desc, + orig_time=orig_time, + ch_names=ch_names, + extras=extras, + ) + + return annotations def _read_annotations_fif(fid, tree): @@ -1267,9 +1689,9 @@ def _read_annotations_fif(fid, tree): annotations = None else: annot_data = annot_data[0] - orig_time = ch_names = None + orig_time = ch_names = extras = None onset, duration, description = list(), list(), list() - for ent in annot_data['directory']: + for ent in annot_data["directory"]: kind = ent.kind pos = ent.pos tag = read_tag(fid, pos) @@ -1280,7 +1702,7 @@ def _read_annotations_fif(fid, tree): duration = tag.data duration = list() if duration is None else duration - onset elif kind == FIFF.FIFF_COMMENT: - description = _safe_name_list(tag.data, 'read', 'description') + description = _safe_name_list(tag.data, "read", "description") elif kind == FIFF.FIFF_MEAS_DATE: orig_time = tag.data try: @@ -1289,15 +1711,20 @@ def _read_annotations_fif(fid, tree): orig_time = tuple(orig_time) # new way elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG: ch_names = tuple(tuple(x) for x in json.loads(tag.data)) + elif kind == FIFF.FIFF_FREE_LIST: + extras = json.loads(tag.data) assert len(onset) == len(duration) == len(description) - annotations = Annotations(onset, duration, description, - orig_time, ch_names) + if extras is not None: + assert len(extras) == len(onset) + annotations = Annotations( + onset, duration, description, orig_time, ch_names, extras=extras + ) return annotations def _select_annotations_based_on_description(descriptions, event_id, regexp): """Get a collection of descriptions and returns index of selected.""" - regexp_comp = re.compile('.*' if regexp is None else regexp) + regexp_comp = re.compile(".*" if regexp is None else regexp) event_id_ = dict() dropped = [] @@ -1322,11 +1749,10 @@ def _select_annotations_based_on_description(descriptions, event_id, regexp): else: dropped.append(desc) - event_sel = [ii for ii, kk in enumerate(descriptions) - if kk in event_id_] + event_sel = [ii for ii, kk in enumerate(descriptions) if kk in event_id_] if len(event_sel) == 0 and regexp is not None: - raise ValueError('Could not find any of the events you specified.') + raise ValueError("Could not find any of the events you specified.") return event_sel, event_id_ @@ -1344,33 +1770,38 @@ def _select_events_based_on_id(events, event_desc): event_sel = [ii for ii, e in enumerate(events) if e[2] in event_desc_] if len(event_sel) == 0: - raise ValueError('Could not find any of the events you specified.') + raise ValueError("Could not find any of the events you specified.") return event_sel, event_desc_ def _check_event_id(event_id, raw): - from .io.brainvision.brainvision import _BVEventParser - from .io.brainvision.brainvision import _check_bv_annot - from .io.brainvision.brainvision import RawBrainVision - from .io import RawFIF, RawArray + from .io import Raw, RawArray + from .io.brainvision.brainvision import ( + RawBrainVision, + _BVEventParser, + _check_bv_annot, + ) if event_id is None: return _DefaultEventParser() - elif event_id == 'auto': + elif event_id == "auto": if isinstance(raw, RawBrainVision): return _BVEventParser() - elif (isinstance(raw, (RawFIF, RawArray)) and - _check_bv_annot(raw.annotations.description)): - logger.info('Non-RawBrainVision raw using branvision markers') + elif isinstance(raw, Raw | RawArray) and _check_bv_annot( + raw.annotations.description + ): + logger.info("Non-RawBrainVision raw using branvision markers") return _BVEventParser() else: return _DefaultEventParser() elif callable(event_id) or isinstance(event_id, dict): return event_id else: - raise ValueError('Invalid type for event_id (should be None, str, ' - 'dict or callable). Got {}'.format(type(event_id))) + raise ValueError( + "Invalid type for event_id (should be None, str, " + f"dict or callable). Got {type(event_id)}." + ) def _check_event_description(event_desc, events): @@ -1380,28 +1811,33 @@ def _check_event_description(event_desc, events): if isinstance(event_desc, dict): for val in event_desc.values(): - _validate_type(val, (str, None), 'Event names') + _validate_type(val, (str, None), "Event names") elif isinstance(event_desc, Iterable): event_desc = np.asarray(event_desc) if event_desc.ndim != 1: - raise ValueError('event_desc must be 1D, got shape {}'.format( - event_desc.shape)) + raise ValueError(f"event_desc must be 1D, got shape {event_desc.shape}") event_desc = dict(zip(event_desc, map(str, event_desc))) elif callable(event_desc): pass else: - raise ValueError('Invalid type for event_desc (should be None, list, ' - '1darray, dict or callable). Got {}'.format( - type(event_desc))) + raise ValueError( + "Invalid type for event_desc (should be None, list, " + f"1darray, dict or callable). Got {type(event_desc)}." + ) return event_desc @verbose -def events_from_annotations(raw, event_id="auto", - regexp=r'^(?![Bb][Aa][Dd]|[Ee][Dd][Gg][Ee]).*$', - use_rounding=True, chunk_duration=None, - verbose=None): +def events_from_annotations( + raw, + event_id="auto", + regexp=r"^(?![Bb][Aa][Dd]|[Ee][Dd][Gg][Ee]).*$", + use_rounding=True, + chunk_duration=None, + tol=1e-8, + verbose=None, +): """Get :term:`events` and ``event_id`` from an Annotations object. Parameters @@ -1443,6 +1879,11 @@ def events_from_annotations(raw, event_id="auto", they fit within the annotation duration spaced according to ``chunk_duration``. As a consequence annotations with duration shorter than ``chunk_duration`` will not contribute events. + tol : float + The tolerance used to check if a chunk fits within an annotation when + ``chunk_duration`` is not ``None``. If the duration from a computed + chunk onset to the end of the annotation is smaller than + ``chunk_duration`` minus ``tol``, the onset will be discarded. %(verbose)s Returns @@ -1472,11 +1913,13 @@ def events_from_annotations(raw, event_id="auto", event_id = _check_event_id(event_id, raw) event_sel, event_id_ = _select_annotations_based_on_description( - annotations.description, event_id=event_id, regexp=regexp) + annotations.description, event_id=event_id, regexp=regexp + ) if chunk_duration is None: - inds = raw.time_as_index(annotations.onset, use_rounding=use_rounding, - origin=annotations.orig_time) + inds = raw.time_as_index( + annotations.onset, use_rounding=use_rounding, origin=annotations.orig_time + ) if annotations.orig_time is not None: inds += raw.first_samp values = [event_id_[kk] for kk in annotations.description[event_sel]] @@ -1484,33 +1927,34 @@ def events_from_annotations(raw, event_id="auto", else: inds = values = np.array([]).astype(int) for annot in annotations[event_sel]: - annot_offset = annot['onset'] + annot['duration'] - _onsets = np.arange(start=annot['onset'], stop=annot_offset, - step=chunk_duration) - good_events = annot_offset - _onsets >= chunk_duration + annot_offset = annot["onset"] + annot["duration"] + _onsets = np.arange(annot["onset"], annot_offset, chunk_duration) + good_events = annot_offset - _onsets >= chunk_duration - tol if good_events.any(): _onsets = _onsets[good_events] - _inds = raw.time_as_index(_onsets, - use_rounding=use_rounding, - origin=annotations.orig_time) + _inds = raw.time_as_index( + _onsets, use_rounding=use_rounding, origin=annotations.orig_time + ) _inds += raw.first_samp inds = np.append(inds, _inds) - _values = np.full(shape=len(_inds), - fill_value=event_id_[annot['description']], - dtype=int) + _values = np.full( + shape=len(_inds), + fill_value=event_id_[annot["description"]], + dtype=int, + ) values = np.append(values, _values) events = np.c_[inds, np.zeros(len(inds)), values].astype(int) - logger.info('Used Annotations descriptions: %s' % - (list(event_id_.keys()),)) + logger.info(f"Used Annotations descriptions: {list(event_id_.keys())}") return events, event_id_ @verbose -def annotations_from_events(events, sfreq, event_desc=None, first_samp=0, - orig_time=None, verbose=None): +def annotations_from_events( + events, sfreq, event_desc=None, first_samp=0, orig_time=None, verbose=None +): """Convert an event array to an Annotations object. Parameters @@ -1568,10 +2012,9 @@ def annotations_from_events(events, sfreq, event_desc=None, first_samp=0, durations = np.zeros(len(events_sel)) # dummy durations # Create annotations - annots = Annotations(onset=onsets, - duration=durations, - description=descriptions, - orig_time=orig_time) + annots = Annotations( + onset=onsets, duration=durations, description=descriptions, orig_time=orig_time + ) return annots @@ -1580,5 +2023,29 @@ def _adjust_onset_meas_date(annot, raw): """Adjust the annotation onsets based on raw meas_date.""" # If there is a non-None meas date, then the onset should take into # account the first_samp / first_time. - if raw.info['meas_date'] is not None: + if raw.info["meas_date"] is not None: annot.onset += raw.first_time + + +def count_annotations(annotations): + """Count annotations. + + Parameters + ---------- + annotations : mne.Annotations + The annotations instance. + + Returns + ------- + counts : dict + A dictionary containing unique annotation descriptions as keys with their + counts as values. + + Examples + -------- + >>> annotations = mne.Annotations([0, 1, 2], [1, 2, 1], ["T0", "T1", "T0"]) + >>> count_annotations(annotations) + {'T0': 2, 'T1': 1} + """ + types, counts = np.unique(annotations.description, return_counts=True) + return {str(t): int(count) for t, count in zip(types, counts)} diff --git a/mne/baseline.py b/mne/baseline.py index 10b868b46f9..4e73ed0ce95 100644 --- a/mne/baseline.py +++ b/mne/baseline.py @@ -1,28 +1,30 @@ """Utility functions to baseline-correct data.""" -# Authors: Alexandre Gramfort -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from .utils import logger, verbose, _check_option +from .utils import _check_option, _validate_type, logger, verbose -def _log_rescale(baseline, mode='mean'): +def _log_rescale(baseline, mode="mean"): """Log the rescaling method.""" if baseline is not None: - _check_option('mode', mode, ['logratio', 'ratio', 'zscore', 'mean', - 'percent', 'zlogratio']) - msg = 'Applying baseline correction (mode: %s)' % mode + _check_option( + "mode", + mode, + ["logratio", "ratio", "zscore", "mean", "percent", "zlogratio"], + ) + msg = f"Applying baseline correction (mode: {mode})" else: - msg = 'No baseline correction applied' + msg = "No baseline correction applied" return msg @verbose -def rescale(data, times, baseline, mode='mean', copy=True, picks=None, - verbose=None): +def rescale(data, times, baseline, mode="mean", copy=True, picks=None, verbose=None): """Rescale (baseline correct) data. Parameters @@ -73,44 +75,59 @@ def rescale(data, times, baseline, mode='mean', copy=True, picks=None, else: imin = np.where(times >= bmin)[0] if len(imin) == 0: - raise ValueError('bmin is too large (%s), it exceeds the largest ' - 'time value' % (bmin,)) + raise ValueError( + f"bmin is too large ({bmin}), it exceeds the largest time value" + ) imin = int(imin[0]) if bmax is None: imax = len(times) else: imax = np.where(times <= bmax)[0] if len(imax) == 0: - raise ValueError('bmax is too small (%s), it is smaller than the ' - 'smallest time value' % (bmax,)) + raise ValueError( + f"bmax is too small ({bmax}), it is smaller than the smallest time " + "value" + ) imax = int(imax[-1]) + 1 if imin >= imax: - raise ValueError('Bad rescaling slice (%s:%s) from time values %s, %s' - % (imin, imax, bmin, bmax)) + raise ValueError( + f"Bad rescaling slice ({imin}:{imax}) from time values {bmin}, {bmax}" + ) # technically this is inefficient when `picks` is given, but assuming # that we generally pick most channels for rescaling, it's not so bad mean = np.mean(data[..., imin:imax], axis=-1, keepdims=True) - if mode == 'mean': + if mode == "mean": + def fun(d, m): d -= m - elif mode == 'ratio': + + elif mode == "ratio": + def fun(d, m): d /= m - elif mode == 'logratio': + + elif mode == "logratio": + def fun(d, m): d /= m np.log10(d, out=d) - elif mode == 'percent': + + elif mode == "percent": + def fun(d, m): d -= m d /= m - elif mode == 'zscore': + + elif mode == "zscore": + def fun(d, m): d -= m d /= np.std(d[..., imin:imax], axis=-1, keepdims=True) - elif mode == 'zlogratio': + + elif mode == "zlogratio": + def fun(d, m): d /= m np.log10(d, out=d) @@ -124,15 +141,15 @@ def fun(d, m): return data -def _check_baseline(baseline, times, sfreq, on_baseline_outside_data='raise'): - """Check if the baseline is valid, and adjust it if requested. +def _check_baseline(baseline, times, sfreq, on_baseline_outside_data="raise"): + """Check if the baseline is valid and adjust it if requested. - ``None`` values inside the baseline parameter will be replaced with - ``times[0]`` and ``times[-1]``. + ``None`` values inside ``baseline`` will be replaced with ``times[0]`` and + ``times[-1]``. Parameters ---------- - baseline : tuple | None + baseline : array-like, shape (2,) | None Beginning and end of the baseline period, in seconds. If ``None``, assume no baseline and return immediately. times : array @@ -140,34 +157,38 @@ def _check_baseline(baseline, times, sfreq, on_baseline_outside_data='raise'): sfreq : float The sampling rate. on_baseline_outside_data : 'raise' | 'info' | 'adjust' - What do do if the baseline period exceeds the data. + What to do if the baseline period exceeds the data. If ``'raise'``, raise an exception (default). If ``'info'``, log an info message. - If ``'adjust'``, adjust the baseline such that it's within the data - range again. + If ``'adjust'``, adjust the baseline such that it is within the data range. Returns ------- (baseline_tmin, baseline_tmax) | None - The baseline with ``None`` values replaced with times, and with - adjusted times if ``on_baseline_outside_data='adjust'``; or ``None`` - if the ``baseline`` parameter is ``None``. - + The baseline with ``None`` values replaced with times, and with adjusted times + if ``on_baseline_outside_data='adjust'``; or ``None``, if ``baseline`` is + ``None``. """ if baseline is None: return None - if not isinstance(baseline, tuple) or len(baseline) != 2: - raise ValueError(f'`baseline={baseline}` is an invalid argument, must ' - f'be a tuple of length 2 or None') + _validate_type(baseline, "array-like") + baseline = tuple(baseline) + + if len(baseline) != 2: + raise ValueError( + f"baseline must have exactly two elements (got {len(baseline)})." + ) tmin, tmax = times[0], times[-1] - tstep = 1. / float(sfreq) + tstep = 1.0 / float(sfreq) # check default value of baseline and `tmin=0` if baseline == (None, 0) and tmin == 0: - raise ValueError('Baseline interval is only one sample. Use ' - '`baseline=(0, 0)` if this is desired.') + raise ValueError( + "Baseline interval is only one sample. Use `baseline=(0, 0)` if this is " + "desired." + ) baseline_tmin, baseline_tmax = baseline @@ -181,18 +202,20 @@ def _check_baseline(baseline, times, sfreq, on_baseline_outside_data='raise'): if baseline_tmin > baseline_tmax: raise ValueError( - "Baseline min (%s) must be less than baseline max (%s)" - % (baseline_tmin, baseline_tmax)) + f"Baseline min ({baseline_tmin}) must be less than baseline max (" + f"{baseline_tmax})" + ) if (baseline_tmin < tmin - tstep) or (baseline_tmax > tmax + tstep): - msg = (f"Baseline interval [{baseline_tmin}, {baseline_tmax}] s " - f"is outside of epochs data [{tmin}, {tmax}] s. Epochs were " - f"probably cropped.") - if on_baseline_outside_data == 'raise': + msg = ( + f"Baseline interval [{baseline_tmin}, {baseline_tmax}] s is outside of " + f"epochs data [{tmin}, {tmax}] s. Epochs were probably cropped." + ) + if on_baseline_outside_data == "raise": raise ValueError(msg) - elif on_baseline_outside_data == 'info': + elif on_baseline_outside_data == "info": logger.info(msg) - elif on_baseline_outside_data == 'adjust': + elif on_baseline_outside_data == "adjust": if baseline_tmin < tmin - tstep: baseline_tmin = tmin if baseline_tmax > tmax + tstep: diff --git a/mne/beamformer/__init__.py b/mne/beamformer/__init__.py index b82add2a7cc..16a5b363eb6 100644 --- a/mne/beamformer/__init__.py +++ b/mne/beamformer/__init__.py @@ -1,9 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Beamformers for source localization.""" +import lazy_loader as lazy -from ._lcmv import (make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw, - apply_lcmv_cov) -from ._dics import (make_dics, apply_dics, apply_dics_epochs, - apply_dics_tfr_epochs, apply_dics_csd) -from ._rap_music import rap_music -from ._compute_beamformer import Beamformer, read_beamformer -from .resolution_matrix import make_lcmv_resolution_matrix +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/beamformer/__init__.pyi b/mne/beamformer/__init__.pyi new file mode 100644 index 00000000000..f7d6eb9d203 --- /dev/null +++ b/mne/beamformer/__init__.pyi @@ -0,0 +1,34 @@ +__all__ = [ + "Beamformer", + "apply_dics", + "apply_dics_csd", + "apply_dics_epochs", + "apply_dics_tfr_epochs", + "apply_lcmv", + "apply_lcmv_cov", + "apply_lcmv_epochs", + "apply_lcmv_raw", + "make_dics", + "make_lcmv", + "make_lcmv_resolution_matrix", + "rap_music", + "read_beamformer", + "trap_music", +] +from ._compute_beamformer import Beamformer, read_beamformer +from ._dics import ( + apply_dics, + apply_dics_csd, + apply_dics_epochs, + apply_dics_tfr_epochs, + make_dics, +) +from ._lcmv import ( + apply_lcmv, + apply_lcmv_cov, + apply_lcmv_epochs, + apply_lcmv_raw, + make_lcmv, +) +from ._rap_music import rap_music, trap_music +from .resolution_matrix import make_lcmv_resolution_matrix diff --git a/mne/beamformer/_compute_beamformer.py b/mne/beamformer/_compute_beamformer.py index bfb547e9712..16bedc2c317 100644 --- a/mne/beamformer/_compute_beamformer.py +++ b/mne/beamformer/_compute_beamformer.py @@ -1,92 +1,135 @@ """Functions shared between different beamformer types.""" -# Authors: Alexandre Gramfort -# Roman Goj -# Britta Westner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from copy import deepcopy import numpy as np +from .._fiff.proj import Projection, make_projector from ..cov import Covariance, make_ad_hoc_cov -from ..forward.forward import is_fixed_orient, _restrict_forward_to_src_sel -from ..io.proj import make_projector, Projection +from ..forward.forward import _restrict_forward_to_src_sel, is_fixed_orient from ..minimum_norm.inverse import _get_vertno, _prepare_forward -from ..source_space import label_src_vertno_sel -from ..utils import (verbose, check_fname, _reg_pinv, _check_option, logger, - _pl, _check_src_normal, _sym_mat_pow, warn, - _import_h5io_funcs) +from ..source_space._source_space import label_src_vertno_sel from ..time_frequency.csd import CrossSpectralDensity +from ..utils import ( + _check_option, + _check_src_normal, + _import_h5io_funcs, + _pl, + _reg_pinv, + _sym_mat_pow, + check_fname, + logger, + verbose, + warn, +) def _check_proj_match(proj, filters): """Check whether SSP projections in data and spatial filter match.""" - proj_data, _, _ = make_projector(proj, filters['ch_names']) - if not np.allclose(proj_data, filters['proj'], - atol=np.finfo(float).eps, rtol=1e-13): - raise ValueError('The SSP projections present in the data ' - 'do not match the projections used when ' - 'calculating the spatial filter.') + proj_data, _, _ = make_projector(proj, filters["ch_names"]) + if not np.allclose( + proj_data, filters["proj"], atol=np.finfo(float).eps, rtol=1e-13 + ): + raise ValueError( + "The SSP projections present in the data " + "do not match the projections used when " + "calculating the spatial filter." + ) def _check_src_type(filters): """Check whether src_type is in filters and set custom warning.""" - if 'src_type' not in filters: - filters['src_type'] = None - warn_text = ('The spatial filter does not contain src_type and a robust ' - 'guess of src_type is not possible without src. Consider ' - 'recomputing the filter.') + if "src_type" not in filters: + filters["src_type"] = None + warn_text = ( + "The spatial filter does not contain src_type and a robust " + "guess of src_type is not possible without src. Consider " + "recomputing the filter." + ) return filters, warn_text -def _prepare_beamformer_input(info, forward, label=None, pick_ori=None, - noise_cov=None, rank=None, pca=False, loose=None, - combine_xyz='fro', exp=None, limit=None, - allow_fixed_depth=True, limit_depth_chs=False): +def _prepare_beamformer_input( + info, + forward, + label=None, + pick_ori=None, + noise_cov=None, + rank=None, + pca=False, + loose=None, + combine_xyz="fro", + exp=None, + limit=None, + allow_fixed_depth=True, + limit_depth_chs=False, +): """Input preparation common for LCMV, DICS, and RAP-MUSIC.""" - _check_option('pick_ori', pick_ori, - ('normal', 'max-power', 'vector', None)) + _check_option("pick_ori", pick_ori, ("normal", "max-power", "vector", None)) # Restrict forward solution to selected vertices if label is not None: - _, src_sel = label_src_vertno_sel(label, forward['src']) + _, src_sel = label_src_vertno_sel(label, forward["src"]) forward = _restrict_forward_to_src_sel(forward, src_sel) if loose is None: - loose = 0. if is_fixed_orient(forward) else 1. + loose = 0.0 if is_fixed_orient(forward) else 1.0 # TODO: Deduplicate with _check_one_ch_type, should not be necessary # (DICS hits this code path, LCMV does not) if noise_cov is None: - noise_cov = make_ad_hoc_cov(info, std=1.) - forward, info_picked, gain, _, orient_prior, _, trace_GRGT, noise_cov, \ - whitener = _prepare_forward( - forward, info, noise_cov, 'auto', loose, rank=rank, pca=pca, - use_cps=True, exp=exp, limit_depth_chs=limit_depth_chs, - combine_xyz=combine_xyz, limit=limit, - allow_fixed_depth=allow_fixed_depth) + noise_cov = make_ad_hoc_cov(info, std=1.0) + ( + forward, + info_picked, + gain, + _, + orient_prior, + _, + trace_GRGT, + noise_cov, + whitener, + ) = _prepare_forward( + forward, + info, + noise_cov, + "auto", + loose, + rank=rank, + pca=pca, + use_cps=True, + exp=exp, + limit_depth_chs=limit_depth_chs, + combine_xyz=combine_xyz, + limit=limit, + allow_fixed_depth=allow_fixed_depth, + ) is_free_ori = not is_fixed_orient(forward) # could have been changed - nn = forward['source_nn'] + nn = forward["source_nn"] if is_free_ori: # take Z coordinate nn = nn[2::3] nn = nn.copy() - vertno = _get_vertno(forward['src']) - if forward['surf_ori']: + vertno = _get_vertno(forward["src"]) + if forward["surf_ori"]: nn[...] = [0, 0, 1] # align to local +Z coordinate if pick_ori is not None and not is_free_ori: raise ValueError( - 'Normal or max-power orientation (got %r) can only be picked when ' - 'a forward operator with free orientation is used.' % (pick_ori,)) - if pick_ori == 'normal' and not forward['surf_ori']: - raise ValueError('Normal orientation can only be picked when a ' - 'forward operator oriented in surface coordinates is ' - 'used.') - _check_src_normal(pick_ori, forward['src']) + f"Normal or max-power orientation (got {pick_ori!r}) can only be picked " + "when a forward operator with free orientation is used." + ) + if pick_ori == "normal" and not forward["surf_ori"]: + raise ValueError( + "Normal orientation can only be picked when a forward operator oriented in " + "surface coordinates is used." + ) + _check_src_normal(pick_ori, forward["src"]) del forward, info # Undo the scaling that MNE prefers - scale = np.sqrt((noise_cov['eig'] > 0).sum() / trace_GRGT) + scale = np.sqrt((noise_cov["eig"] > 0).sum() / trace_GRGT) gain /= scale if orient_prior is not None: orient_std = np.sqrt(orient_prior) @@ -94,10 +137,8 @@ def _prepare_beamformer_input(info, forward, label=None, pick_ori=None, orient_std = np.ones(gain.shape[1]) # Get the projector - proj, _, _ = make_projector( - info_picked['projs'], info_picked['ch_names']) - return (is_free_ori, info_picked, proj, vertno, gain, whitener, nn, - orient_std) + proj, _, _ = make_projector(info_picked["projs"], info_picked["ch_names"]) + return (is_free_ori, info_picked, proj, vertno, gain, whitener, nn, orient_std) def _reduce_leadfield_rank(G): @@ -115,12 +156,12 @@ def _reduce_leadfield_rank(G): def _sym_inv_sm(x, reduce_rank, inversion, sk): """Symmetric inversion with single- or matrix-style inversion.""" if x.shape[1:] == (1, 1): - with np.errstate(divide='ignore', invalid='ignore'): - x_inv = 1. / x - x_inv[~np.isfinite(x_inv)] = 1. + with np.errstate(divide="ignore", invalid="ignore"): + x_inv = 1.0 / x + x_inv[~np.isfinite(x_inv)] = 1.0 else: assert x.shape[1:] == (3, 3) - if inversion == 'matrix': + if inversion == "matrix": x_inv = _sym_mat_pow(x, -1, reduce_rank=reduce_rank) # Reapply source covariance after inversion x_inv *= sk[:, :, np.newaxis] @@ -128,22 +169,33 @@ def _sym_inv_sm(x, reduce_rank, inversion, sk): else: # Invert for each dipole separately using plain division diags = np.diagonal(x, axis1=1, axis2=2) - assert not reduce_rank # guaranteed earlier - with np.errstate(divide='ignore'): - diags = 1. / diags + assert not reduce_rank # guaranteed earlier + with np.errstate(divide="ignore"): + diags = 1.0 / diags # set the diagonal of each 3x3 x_inv = np.zeros_like(x) for k in range(x.shape[0]): this = diags[k] # Reapply source covariance after inversion - this *= (sk[k] * sk[k]) + this *= sk[k] * sk[k] x_inv[k].flat[::4] = this return x_inv -def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, - reduce_rank, rank, inversion, nn, orient_std, - whitener): +def _compute_beamformer( + G, + Cm, + reg, + n_orient, + weight_norm, + pick_ori, + reduce_rank, + rank, + inversion, + nn, + orient_std, + whitener, +): """Compute a spatial beamformer filter (LCMV or DICS). For more detailed information on the parameters, see the docstrings of @@ -181,22 +233,26 @@ def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, W : ndarray, shape (n_dipoles, n_channels) The beamformer filter weights. """ - _check_option('weight_norm', weight_norm, - ['unit-noise-gain-invariant', 'unit-noise-gain', - 'nai', None]) + _check_option( + "weight_norm", + weight_norm, + ["unit-noise-gain-invariant", "unit-noise-gain", "nai", None], + ) # Whiten the data covariance Cm = whitener @ Cm @ whitener.T.conj() # Restore to properly Hermitian as large whitening coefs can have bad # rounding error - Cm[:] = (Cm + Cm.T.conj()) / 2. + Cm[:] = (Cm + Cm.T.conj()) / 2.0 assert Cm.shape == (G.shape[0],) * 2 s, _ = np.linalg.eigh(Cm) if not (s >= -s.max() * 1e-7).all(): # This shouldn't ever happen, but just in case - warn('data covariance does not appear to be positive semidefinite, ' - 'results will likely be incorrect') + warn( + "data covariance does not appear to be positive semidefinite, " + "results will likely be incorrect" + ) # Tikhonov regularization using reg parameter to control for # trade-off between spatial resolution and noise sensitivity # eq. 25 in Gross and Ioannides, 1999 Phys. Med. Biol. 44 2081 @@ -206,8 +262,7 @@ def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, n_sources = G.shape[1] // n_orient assert nn.shape == (n_sources, 3) - logger.info('Computing beamformer filters for %d source%s' - % (n_sources, _pl(n_sources))) + logger.info(f"Computing beamformer filters for {n_sources} source{_pl(n_sources)}") n_channels = G.shape[0] assert n_orient in (3, 1) Gk = np.reshape(G.T, (n_sources, n_orient, n_channels)).transpose(0, 2, 1) @@ -215,29 +270,37 @@ def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, sk = np.reshape(orient_std, (n_sources, n_orient)) del G, orient_std - _check_option('reduce_rank', reduce_rank, (True, False)) + _check_option("reduce_rank", reduce_rank, (True, False)) # inversion of the denominator - _check_option('inversion', inversion, ('matrix', 'single')) - if inversion == 'single' and n_orient > 1 and pick_ori == 'vector' and \ - weight_norm == 'unit-noise-gain-invariant': + _check_option("inversion", inversion, ("matrix", "single")) + if ( + inversion == "single" + and n_orient > 1 + and pick_ori == "vector" + and weight_norm == "unit-noise-gain-invariant" + ): raise ValueError( 'Cannot use pick_ori="vector" with inversion="single" and ' - 'weight_norm="unit-noise-gain-invariant"') - if reduce_rank and inversion == 'single': - raise ValueError('reduce_rank cannot be used with inversion="single"; ' - 'consider using inversion="matrix" if you have a ' - 'rank-deficient forward model (i.e., from a sphere ' - 'model with MEG channels), otherwise consider using ' - 'reduce_rank=False') + 'weight_norm="unit-noise-gain-invariant"' + ) + if reduce_rank and inversion == "single": + raise ValueError( + 'reduce_rank cannot be used with inversion="single"; ' + 'consider using inversion="matrix" if you have a ' + "rank-deficient forward model (i.e., from a sphere " + "model with MEG channels), otherwise consider using " + "reduce_rank=False" + ) if n_orient > 1: _, Gk_s, _ = np.linalg.svd(Gk, full_matrices=False) assert Gk_s.shape == (n_sources, n_orient) if not reduce_rank and (Gk_s[:, 0] > 1e6 * Gk_s[:, 2]).any(): raise ValueError( - 'Singular matrix detected when estimating spatial filters. ' - 'Consider reducing the rank of the forward operator by using ' - 'reduce_rank=True.') + "Singular matrix detected when estimating spatial filters. " + "Consider reducing the rank of the forward operator by using " + "reduce_rank=True." + ) del Gk_s # @@ -254,7 +317,7 @@ def _compute_bf_terms(Gk, Cm_inv): # # 2. Reorient lead field in direction of max power or normal # - if pick_ori == 'max-power': + if pick_ori == "max-power": assert n_orient == 3 _, bf_denom = _compute_bf_terms(Gk, Cm_inv) if weight_norm is None: @@ -265,7 +328,8 @@ def _compute_bf_terms(Gk, Cm_inv): ori_numer = bf_denom # Cm_inv should be Hermitian so no need for .T.conj() ori_denom = np.matmul( - np.matmul(Gk.swapaxes(-2, -1).conj(), Cm_inv @ Cm_inv), Gk) + np.matmul(Gk.swapaxes(-2, -1).conj(), Cm_inv @ Cm_inv), Gk + ) ori_denom_inv = _sym_inv_sm(ori_denom, reduce_rank, inversion, sk) ori_pick = np.matmul(ori_denom_inv, ori_numer) assert ori_pick.shape == (n_sources, n_orient, n_orient) @@ -280,7 +344,7 @@ def _compute_bf_terms(Gk, Cm_inv): # set the (otherwise arbitrary) sign to match the normal signs = np.sign(np.sum(max_power_ori * nn, axis=1, keepdims=True)) - signs[signs == 0] = 1. + signs[signs == 0] = 1.0 max_power_ori *= signs # Compute the lead field for the optimal orientation, @@ -289,7 +353,7 @@ def _compute_bf_terms(Gk, Cm_inv): n_orient = 1 else: max_power_ori = None - if pick_ori == 'normal': + if pick_ori == "normal": Gk = Gk[..., 2:3] n_orient = 1 @@ -338,16 +402,17 @@ def _compute_bf_terms(Gk, Cm_inv): # # Sekihara 2008 says to use sqrt(diag(W_ug @ W_ug.T)), which is not # rotation invariant: - if weight_norm in ('unit-noise-gain', 'nai'): + if weight_norm in ("unit-noise-gain", "nai"): noise_norm = np.matmul(W, W.swapaxes(-2, -1).conj()).real noise_norm = np.reshape( # np.diag operation over last two axes - noise_norm, (n_sources, -1, 1))[:, ::n_orient + 1] + noise_norm, (n_sources, -1, 1) + )[:, :: n_orient + 1] np.sqrt(noise_norm, out=noise_norm) noise_norm[noise_norm == 0] = np.inf assert noise_norm.shape == (n_sources, n_orient, 1) W /= noise_norm else: - assert weight_norm == 'unit-noise-gain-invariant' + assert weight_norm == "unit-noise-gain-invariant" # Here we use sqrtm. The shortcut: # # use = W @@ -357,9 +422,9 @@ def _compute_bf_terms(Gk, Cm_inv): use = bf_numer inner = np.matmul(use, use.swapaxes(-2, -1).conj()) W = np.matmul(_sym_mat_pow(inner, -0.5), use) - noise_norm = 1. + noise_norm = 1.0 - if weight_norm == 'nai': + if weight_norm == "nai": # Estimate noise level based on covariance matrix, taking the # first eigenvalue that falls outside the signal subspace or the # loading factor used during regularization, whichever is largest. @@ -368,10 +433,11 @@ def _compute_bf_terms(Gk, Cm_inv): # Use the loading factor as noise ceiling. if loading_factor == 0: raise RuntimeError( - 'Cannot compute noise subspace with a full-rank ' - 'covariance matrix and no regularization. Try ' - 'manually specifying the rank of the covariance ' - 'matrix or using regularization.') + "Cannot compute noise subspace with a full-rank " + "covariance matrix and no regularization. Try " + "manually specifying the rank of the covariance " + "matrix or using regularization." + ) noise = loading_factor else: noise, _ = np.linalg.eigh(Cm) @@ -380,7 +446,7 @@ def _compute_bf_terms(Gk, Cm_inv): W /= np.sqrt(noise) W = W.reshape(n_sources * n_orient, n_channels) - logger.info('Filter computation complete') + logger.info("Filter computation complete") return W, max_power_ori @@ -402,8 +468,9 @@ def _compute_power(Cm, W, n_orient): n_sources = W.shape[0] // n_orient Wk = W.reshape(n_sources, n_orient, W.shape[1]) - source_power = np.trace((Wk @ Cm @ Wk.conj().transpose(0, 2, 1)).real, - axis1=1, axis2=2) + source_power = np.trace( + (Wk @ Cm @ Wk.conj().transpose(0, 2, 1)).real, axis1=1, axis2=2 + ) return source_power @@ -427,23 +494,27 @@ def copy(self): return deepcopy(self) def __repr__(self): # noqa: D105 - n_verts = sum(len(v) for v in self['vertices']) - n_channels = len(self['ch_names']) - if self['subject'] is None: - subject = 'unknown' + n_verts = sum(len(v) for v in self["vertices"]) + n_channels = len(self["ch_names"]) + if self["subject"] is None: + subject = "unknown" else: - subject = '"%s"' % (self['subject'],) - out = (' -# Britta Westner -# Susanna Aro -# Roman Goj -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import numpy as np +from .._fiff.pick import pick_channels, pick_info from ..channels import equalize_channels -from ..io.pick import pick_info, pick_channels -from ..utils import (logger, verbose, _check_one_ch_type, - _check_channels_spatial_filter, _check_rank, - _check_option, _validate_type, warn) from ..forward import _subject_from_forward -from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth +from ..minimum_norm.inverse import _check_depth, _check_reference, combine_xyz from ..rank import compute_rank -from ..source_estimate import _make_stc, _get_src_type +from ..source_estimate import _get_src_type, _make_stc from ..time_frequency import EpochsTFR from ..time_frequency.tfr import _check_tfr_complex -from ._compute_beamformer import (_prepare_beamformer_input, - _compute_beamformer, _check_src_type, - Beamformer, _compute_power, - _proj_whiten_data) +from ..utils import ( + _check_channels_spatial_filter, + _check_one_ch_type, + _check_option, + _check_rank, + _validate_type, + logger, + verbose, + warn, +) +from ._compute_beamformer import ( + Beamformer, + _check_src_type, + _compute_beamformer, + _compute_power, + _prepare_beamformer_input, + _proj_whiten_data, +) @verbose -def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, - pick_ori=None, rank=None, weight_norm=None, - reduce_rank=False, depth=1., real_filter=True, - inversion='matrix', verbose=None): +def make_dics( + info, + forward, + csd, + reg=0.05, + noise_csd=None, + label=None, + pick_ori=None, + rank=None, + weight_norm=None, + reduce_rank=False, + depth=1.0, + real_filter=True, + inversion="matrix", + verbose=None, +): """Compute a Dynamic Imaging of Coherent Sources (DICS) spatial filter. This is a beamformer filter that can be used to estimate the source power @@ -166,57 +187,73 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, .. footbibliography:: """ # noqa: E501 rank = _check_rank(rank) - _check_option('pick_ori', pick_ori, - [None, 'vector', 'normal', 'max-power']) - _check_option('inversion', inversion, ['single', 'matrix']) - _validate_type(weight_norm, (str, None), 'weight_norm') + _check_option("pick_ori", pick_ori, [None, "vector", "normal", "max-power"]) + _check_option("inversion", inversion, ["single", "matrix"]) + _validate_type(weight_norm, (str, None), "weight_norm") frequencies = [np.mean(freq_bin) for freq_bin in csd.frequencies] n_freqs = len(frequencies) - _, _, allow_mismatch = _check_one_ch_type('dics', info, forward, csd, - noise_csd) + _, _, allow_mismatch = _check_one_ch_type("dics", info, forward, csd, noise_csd) # remove bads so that equalize_channels only keeps all good - info = pick_info(info, pick_channels(info['ch_names'], [], info['bads'])) + info = pick_info(info, pick_channels(info["ch_names"], [], info["bads"])) info, forward, csd = equalize_channels([info, forward, csd]) csd, noise_csd = _prepare_noise_csd(csd, noise_csd, real_filter) - depth = _check_depth(depth, 'depth_sparse') - if inversion == 'single': - depth['combine_xyz'] = False - - is_free_ori, info, proj, vertices, G, whitener, nn, orient_std = \ - _prepare_beamformer_input( - info, forward, label, pick_ori, noise_cov=noise_csd, rank=rank, - pca=False, **depth) + depth = _check_depth(depth, "depth_sparse") + if inversion == "single": + depth["combine_xyz"] = False + + ( + is_free_ori, + info, + proj, + vertices, + G, + whitener, + nn, + orient_std, + ) = _prepare_beamformer_input( + info, + forward, + label, + pick_ori, + noise_cov=noise_csd, + rank=rank, + pca=False, + **depth, + ) # Compute ranks csd_int_rank = [] if not allow_mismatch: noise_rank = compute_rank(noise_csd, info=info, rank=rank) for i in range(len(frequencies)): - csd_rank = compute_rank(csd.get_data(index=i, as_cov=True), - info=info, rank=rank) + csd_rank = compute_rank( + csd.get_data(index=i, as_cov=True), info=info, rank=rank + ) if not allow_mismatch: for key in csd_rank: if key not in noise_rank or csd_rank[key] != noise_rank[key]: - raise ValueError('%s data rank (%s) did not match the ' - 'noise rank (%s)' - % (key, csd_rank[key], - noise_rank.get(key, None))) + raise ValueError( + f"{key} data rank ({csd_rank[key]}) did not match the noise " + f"rank ({noise_rank.get(key, None)})" + ) csd_int_rank.append(sum(csd_rank.values())) del noise_csd - ch_names = list(info['ch_names']) + ch_names = list(info["ch_names"]) - logger.info('Computing DICS spatial filters...') + logger.info("Computing DICS spatial filters...") Ws = [] max_oris = [] for i, freq in enumerate(frequencies): if n_freqs > 1: - logger.info(' computing DICS spatial filter at ' - f'{round(freq, 2)} Hz ({i + 1}/{n_freqs})') + logger.info( + " computing DICS spatial filter at " + f"{round(freq, 2)} Hz ({i + 1}/{n_freqs})" + ) Cm = csd.get_data(index=i) @@ -228,29 +265,51 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, # compute spatial filter n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( - G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, - rank=csd_int_rank[i], inversion=inversion, nn=nn, - orient_std=orient_std, whitener=whitener) + G, + Cm, + reg, + n_orient, + weight_norm, + pick_ori, + reduce_rank, + rank=csd_int_rank[i], + inversion=inversion, + nn=nn, + orient_std=orient_std, + whitener=whitener, + ) Ws.append(W) max_oris.append(max_power_ori) Ws = np.array(Ws) - if pick_ori == 'max-power': + if pick_ori == "max-power": max_oris = np.array(max_oris) else: max_oris = None - src_type = _get_src_type(forward['src'], vertices) + src_type = _get_src_type(forward["src"], vertices) subject = _subject_from_forward(forward) - is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False + is_free_ori = is_free_ori if pick_ori in [None, "vector"] else False n_sources = np.sum([len(v) for v in vertices]) filters = Beamformer( - kind='DICS', weights=Ws, csd=csd, ch_names=ch_names, proj=proj, - vertices=vertices, n_sources=n_sources, subject=subject, - pick_ori=pick_ori, inversion=inversion, weight_norm=weight_norm, - src_type=src_type, source_nn=forward['source_nn'].copy(), - is_free_ori=is_free_ori, whitener=whitener, max_power_ori=max_oris) + kind="DICS", + weights=Ws, + csd=csd, + ch_names=ch_names, + proj=proj, + vertices=vertices, + n_sources=n_sources, + subject=subject, + pick_ori=pick_ori, + inversion=inversion, + weight_norm=weight_norm, + src_type=src_type, + source_nn=forward["source_nn"].copy(), + is_free_ori=is_free_ori, + whitener=whitener, + max_power_ori=max_oris, + ) return filters @@ -263,7 +322,7 @@ def _prepare_noise_csd(csd, noise_csd, real_filter): noise_csd = noise_csd.mean() noise_csd = noise_csd.get_data(as_cov=True) if real_filter: - noise_csd['data'] = noise_csd['data'].real + noise_csd["data"] = noise_csd["data"].real return csd, noise_csd @@ -275,48 +334,54 @@ def _apply_dics(data, filters, info, tmin, tfr=False): else: one_epoch = False - Ws = filters['weights'] + Ws = filters["weights"] one_freq = len(Ws) == 1 - subject = filters['subject'] + subject = filters["subject"] # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) for i, M in enumerate(data): if not one_epoch: - logger.info("Processing epoch : %d" % (i + 1)) + logger.info(f"Processing epoch : {i + 1}") # Apply SSPs if not tfr: # save computation, only compute once - M_w = _proj_whiten_data(M, info['projs'], filters) + M_w = _proj_whiten_data(M, info["projs"], filters) stcs = [] for j, W in enumerate(Ws): - if tfr: # must compute for each frequency - M_w = _proj_whiten_data(M[:, j], info['projs'], filters) + M_w = _proj_whiten_data(M[:, j], info["projs"], filters) # project to source space using beamformer weights sol = np.dot(W, M_w) - if filters['is_free_ori'] and filters['pick_ori'] != 'vector': - logger.info('combining the current components...') + if filters["is_free_ori"] and filters["pick_ori"] != "vector": + logger.info("combining the current components...") sol = combine_xyz(sol) - tstep = 1.0 / info['sfreq'] - - stcs.append(_make_stc(sol, vertices=filters['vertices'], - src_type=filters['src_type'], tmin=tmin, - tstep=tstep, subject=subject, - vector=(filters['pick_ori'] == 'vector'), - source_nn=filters['source_nn'], - warn_text=warn_text)) + tstep = 1.0 / info["sfreq"] + + stcs.append( + _make_stc( + sol, + vertices=filters["vertices"], + src_type=filters["src_type"], + tmin=tmin, + tstep=tstep, + subject=subject, + vector=(filters["pick_ori"] == "vector"), + source_nn=filters["source_nn"], + warn_text=warn_text, + ) + ) if one_freq: yield stcs[0] else: yield stcs - logger.info('[done]') + logger.info("[done]") @verbose @@ -413,19 +478,19 @@ def apply_dics_epochs(epochs, filters, return_generator=False, verbose=None): """ _check_reference(epochs) - if len(filters['weights']) > 1: + if len(filters["weights"]) > 1: raise ValueError( - 'This function only works on DICS beamformer weights that have ' - 'been computed for a single frequency. When calling make_dics(), ' - 'make sure to use a CSD object with only a single frequency (or ' - 'frequency-bin) defined.' + "This function only works on DICS beamformer weights that have " + "been computed for a single frequency. When calling make_dics(), " + "make sure to use a CSD object with only a single frequency (or " + "frequency-bin) defined." ) info = epochs.info tmin = epochs.times[0] sel = _check_channels_spatial_filter(epochs.ch_names, filters) - data = epochs.get_data()[:, sel, :] + data = epochs.get_data(sel) stcs = _apply_dics(data=data, filters=filters, info=info, tmin=tmin) @@ -436,8 +501,7 @@ def apply_dics_epochs(epochs, filters, return_generator=False, verbose=None): @verbose -def apply_dics_tfr_epochs(epochs_tfr, filters, return_generator=False, - verbose=None): +def apply_dics_tfr_epochs(epochs_tfr, filters, return_generator=False, verbose=None): """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights @@ -466,22 +530,23 @@ def apply_dics_tfr_epochs(epochs_tfr, filters, return_generator=False, apply_dics apply_dics_epochs apply_dics_csd - """ # noqa E501 + """ # noqa E501 _validate_type(epochs_tfr, EpochsTFR) _check_tfr_complex(epochs_tfr) - if filters['pick_ori'] == 'vector': - warn('Using a vector solution to compute power will lead to ' - 'inaccurate directions (only in the first quadrent) ' - 'because power is a strictly positive (squared) metric. ' - 'Using singular value decomposition (SVD) to determine ' - 'the direction is not yet supported in MNE.') + if filters["pick_ori"] == "vector": + warn( + "Using a vector solution to compute power will lead to " + "inaccurate directions (only in the first quadrent) " + "because power is a strictly positive (squared) metric. " + "Using singular value decomposition (SVD) to determine " + "the direction is not yet supported in MNE." + ) sel = _check_channels_spatial_filter(epochs_tfr.ch_names, filters) data = epochs_tfr.data[:, sel, :, :] - stcs = _apply_dics(data, filters, epochs_tfr.info, - epochs_tfr.tmin, tfr=True) + stcs = _apply_dics(data, filters, epochs_tfr.info, epochs_tfr.tmin, tfr=True) if not return_generator: stcs = [[stc for stc in tfr_stcs] for tfr_stcs in stcs] return stcs @@ -531,12 +596,12 @@ def apply_dics_csd(csd, filters, verbose=None): ---------- .. footbibliography:: """ # noqa: E501 - ch_names = filters['ch_names'] - vertices = filters['vertices'] - n_orient = 3 if filters['is_free_ori'] else 1 - subject = filters['subject'] - whitener = filters['whitener'] - n_sources = filters['n_sources'] + ch_names = filters["ch_names"] + vertices = filters["vertices"] + n_orient = 3 if filters["is_free_ori"] else 1 + subject = filters["subject"] + whitener = filters["whitener"] + n_sources = filters["n_sources"] # If CSD is summed over multiple frequencies, take the average frequency frequencies = [np.mean(dfreq) for dfreq in csd.frequencies] @@ -547,27 +612,37 @@ def apply_dics_csd(csd, filters, verbose=None): # Ensure the CSD is in the same order as the weights csd_picks = [csd.ch_names.index(ch) for ch in ch_names] - logger.info('Computing DICS source power...') + logger.info("Computing DICS source power...") for i, freq in enumerate(frequencies): if n_freqs > 1: - logger.info(' applying DICS spatial filter at ' - f'{round(freq, 2)} Hz ({i + 1}/{n_freqs})') + logger.info( + " applying DICS spatial filter at " + f"{round(freq, 2)} Hz ({i + 1}/{n_freqs})" + ) Cm = csd.get_data(index=i) Cm = Cm[csd_picks, :][:, csd_picks] - W = filters['weights'][i] + W = filters["weights"][i] # Whiten the CSD Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T)) source_power[:, i] = _compute_power(Cm, W, n_orient) - logger.info('[done]') + logger.info("[done]") # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) - return (_make_stc(source_power, vertices=vertices, - src_type=filters['src_type'], tmin=0., tstep=1., - subject=subject, warn_text=warn_text), - frequencies) + return ( + _make_stc( + source_power, + vertices=vertices, + src_type=filters["src_type"], + tmin=0.0, + tstep=1.0, + subject=subject, + warn_text=warn_text, + ), + frequencies, + ) diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py index 61c45a8ec66..cd3b2910cce 100644 --- a/mne/beamformer/_lcmv.py +++ b/mne/beamformer/_lcmv.py @@ -1,30 +1,50 @@ """Compute Linearly constrained minimum variance (LCMV) beamformer.""" -# Authors: Alexandre Gramfort -# Roman Goj -# Britta Westner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import numpy as np -from ..rank import compute_rank -from ..io.meas_info import _simplify_info -from ..io.pick import pick_channels_cov, pick_info +from .._fiff.meas_info import _simplify_info +from .._fiff.pick import pick_channels_cov, pick_info from ..forward import _subject_from_forward -from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth -from ..source_estimate import _make_stc, _get_src_type -from ..utils import (logger, verbose, _check_channels_spatial_filter, - _check_one_ch_type, _check_info_inv) +from ..minimum_norm.inverse import _check_depth, _check_reference, combine_xyz +from ..rank import compute_rank +from ..source_estimate import _get_src_type, _make_stc +from ..utils import ( + _check_channels_spatial_filter, + _check_info_inv, + _check_one_ch_type, + logger, + verbose, +) from ._compute_beamformer import ( - _prepare_beamformer_input, _compute_power, - _compute_beamformer, _check_src_type, Beamformer, _proj_whiten_data) + Beamformer, + _check_src_type, + _compute_beamformer, + _compute_power, + _prepare_beamformer_input, + _proj_whiten_data, +) @verbose -def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, - pick_ori=None, rank='info', - weight_norm='unit-noise-gain-invariant', - reduce_rank=False, depth=None, inversion='matrix', verbose=None): +def make_lcmv( + info, + forward, + data_cov, + reg=0.05, + noise_cov=None, + label=None, + pick_ori=None, + rank="info", + weight_norm="unit-noise-gain-invariant", + reduce_rank=False, + depth=None, + inversion="matrix", + verbose=None, +): """Compute LCMV spatial filter. Parameters @@ -142,9 +162,10 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, .. footbibliography:: """ # check number of sensor types present in the data and ensure a noise cov - info = _simplify_info(info) + info = _simplify_info(info, keep=("proc_history",)) noise_cov, _, allow_mismatch = _check_one_ch_type( - 'lcmv', info, forward, data_cov, noise_cov) + "lcmv", info, forward, data_cov, noise_cov + ) # XXX we need this extra picking step (can't just rely on minimum norm's # because there can be a mismatch. Should probably add an extra arg to # _prepare_beamformer_input at some point (later) @@ -153,58 +174,97 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, data_rank = compute_rank(data_cov, rank=rank, info=info) noise_rank = compute_rank(noise_cov, rank=rank, info=info) for key in data_rank: - if (key not in noise_rank or data_rank[key] != noise_rank[key]) and \ - not allow_mismatch: - raise ValueError('%s data rank (%s) did not match the noise ' - 'rank (%s)' - % (key, data_rank[key], - noise_rank.get(key, None))) + if ( + key not in noise_rank or data_rank[key] != noise_rank[key] + ) and not allow_mismatch: + raise ValueError( + f"{key} data rank ({data_rank[key]}) did not match the noise rank (" + f"{noise_rank.get(key, None)})" + ) del noise_rank rank = data_rank - logger.info('Making LCMV beamformer with rank %s' % (rank,)) + logger.info(f"Making LCMV beamformer with rank {rank}") del data_rank - depth = _check_depth(depth, 'depth_sparse') - if inversion == 'single': - depth['combine_xyz'] = False - - is_free_ori, info, proj, vertno, G, whitener, nn, orient_std = \ - _prepare_beamformer_input( - info, forward, label, pick_ori, noise_cov=noise_cov, rank=rank, - pca=False, **depth) - ch_names = list(info['ch_names']) + depth = _check_depth(depth, "depth_sparse") + if inversion == "single": + depth["combine_xyz"] = False + + ( + is_free_ori, + info, + proj, + vertno, + G, + whitener, + nn, + orient_std, + ) = _prepare_beamformer_input( + info, + forward, + label, + pick_ori, + noise_cov=noise_cov, + rank=rank, + pca=False, + **depth, + ) + ch_names = list(info["ch_names"]) data_cov = pick_channels_cov(data_cov, include=ch_names) Cm = data_cov._get_square() - if 'estimator' in data_cov: - del data_cov['estimator'] + if "estimator" in data_cov: + del data_cov["estimator"] rank_int = sum(rank.values()) del rank # compute spatial filter n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( - G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank_int, - inversion=inversion, nn=nn, orient_std=orient_std, - whitener=whitener) + G, + Cm, + reg, + n_orient, + weight_norm, + pick_ori, + reduce_rank, + rank_int, + inversion=inversion, + nn=nn, + orient_std=orient_std, + whitener=whitener, + ) # get src type to store with filters for _make_stc - src_type = _get_src_type(forward['src'], vertno) + src_type = _get_src_type(forward["src"], vertno) # get subject to store with filters subject_from = _subject_from_forward(forward) # Is the computed beamformer a scalar or vector beamformer? - is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False - is_ssp = bool(info['projs']) + is_free_ori = is_free_ori if pick_ori in [None, "vector"] else False + is_ssp = bool(info["projs"]) filters = Beamformer( - kind='LCMV', weights=W, data_cov=data_cov, noise_cov=noise_cov, - whitener=whitener, weight_norm=weight_norm, pick_ori=pick_ori, - ch_names=ch_names, proj=proj, is_ssp=is_ssp, vertices=vertno, - is_free_ori=is_free_ori, n_sources=forward['nsource'], - src_type=src_type, source_nn=forward['source_nn'].copy(), - subject=subject_from, rank=rank_int, max_power_ori=max_power_ori, - inversion=inversion) + kind="LCMV", + weights=W, + data_cov=data_cov, + noise_cov=noise_cov, + whitener=whitener, + weight_norm=weight_norm, + pick_ori=pick_ori, + ch_names=ch_names, + proj=proj, + is_ssp=is_ssp, + vertices=vertno, + is_free_ori=is_free_ori, + n_sources=forward["nsource"], + src_type=src_type, + source_nn=forward["source_nn"].copy(), + subject=subject_from, + rank=rank_int, + max_power_ori=max_power_ori, + inversion=inversion, + ) return filters @@ -217,45 +277,51 @@ def _apply_lcmv(data, filters, info, tmin): else: return_single = False - W = filters['weights'] + W = filters["weights"] for i, M in enumerate(data): - if len(M) != len(filters['ch_names']): - raise ValueError('data and picks must have the same length') + if len(M) != len(filters["ch_names"]): + raise ValueError("data and picks must have the same length") if not return_single: - logger.info("Processing epoch : %d" % (i + 1)) + logger.info(f"Processing epoch : {i + 1}") - M = _proj_whiten_data(M, info['projs'], filters) + M = _proj_whiten_data(M, info["projs"], filters) # project to source space using beamformer weights vector = False - if filters['is_free_ori']: + if filters["is_free_ori"]: sol = np.dot(W, M) - if filters['pick_ori'] == 'vector': + if filters["pick_ori"] == "vector": vector = True else: - logger.info('combining the current components...') + logger.info("combining the current components...") sol = combine_xyz(sol) else: # Linear inverse: do computation here or delayed - if (M.shape[0] < W.shape[0] and - filters['pick_ori'] != 'max-power'): + if M.shape[0] < W.shape[0] and filters["pick_ori"] != "max-power": sol = (W, M) else: sol = np.dot(W, M) - tstep = 1.0 / info['sfreq'] + tstep = 1.0 / info["sfreq"] # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) - yield _make_stc(sol, vertices=filters['vertices'], tmin=tmin, - tstep=tstep, subject=filters['subject'], - vector=vector, source_nn=filters['source_nn'], - src_type=filters['src_type'], warn_text=warn_text) + yield _make_stc( + sol, + vertices=filters["vertices"], + tmin=tmin, + tstep=tstep, + subject=filters["subject"], + vector=vector, + source_nn=filters["source_nn"], + src_type=filters["src_type"], + warn_text=warn_text, + ) - logger.info('[done]') + logger.info("[done]") @verbose @@ -296,15 +362,13 @@ def apply_lcmv(evoked, filters, *, verbose=None): sel = _check_channels_spatial_filter(evoked.ch_names, filters) data = data[sel] - stc = _apply_lcmv(data=data, filters=filters, info=info, - tmin=tmin) + stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) return next(stc) @verbose -def apply_lcmv_epochs(epochs, filters, *, return_generator=False, - verbose=None): +def apply_lcmv_epochs(epochs, filters, *, return_generator=False, verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights @@ -337,9 +401,8 @@ def apply_lcmv_epochs(epochs, filters, *, return_generator=False, tmin = epochs.times[0] sel = _check_channels_spatial_filter(epochs.ch_names, filters) - data = epochs.get_data()[:, sel, :] - stcs = _apply_lcmv(data=data, filters=filters, info=info, - tmin=tmin) + data = epochs.get_data(sel) + stcs = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) if not return_generator: stcs = [s for s in stcs] @@ -418,17 +481,23 @@ def apply_lcmv_cov(data_cov, filters, verbose=None): sel_names = [data_cov.ch_names[ii] for ii in sel] data_cov = pick_channels_cov(data_cov, sel_names) - n_orient = filters['weights'].shape[0] // filters['n_sources'] + n_orient = filters["weights"].shape[0] // filters["n_sources"] # Need to project and whiten along both dimensions - data = _proj_whiten_data(data_cov['data'].T, data_cov['projs'], filters) - data = _proj_whiten_data(data.T, data_cov['projs'], filters) + data = _proj_whiten_data(data_cov["data"].T, data_cov["projs"], filters) + data = _proj_whiten_data(data.T, data_cov["projs"], filters) del data_cov - source_power = _compute_power(data, filters['weights'], n_orient) + source_power = _compute_power(data, filters["weights"], n_orient) # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) - return _make_stc(source_power, vertices=filters['vertices'], - src_type=filters['src_type'], tmin=0., tstep=1., - subject=filters['subject'], - source_nn=filters['source_nn'], warn_text=warn_text) + return _make_stc( + source_power, + vertices=filters["vertices"], + src_type=filters["src_type"], + tmin=0.0, + tstep=1.0, + subject=filters["subject"], + source_nn=filters["source_nn"], + warn_text=warn_text, + ) diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py index 827b085385f..2b8c1a4ee84 100644 --- a/mne/beamformer/_rap_music.py +++ b/mne/beamformer/_rap_music.py @@ -1,25 +1,26 @@ """Compute a Recursively Applied and Projected MUltiple Signal Classification (RAP-MUSIC).""" # noqa -# Authors: Yousra Bekhti -# Alexandre Gramfort -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np +from scipy import linalg -from ..forward import is_fixed_orient, convert_forward_solution -from ..io.pick import pick_channels_evoked, pick_info, pick_channels_forward +from .._fiff.pick import pick_channels_forward, pick_info +from ..fixes import _safe_svd +from ..forward import convert_forward_solution, is_fixed_orient from ..inverse_sparse.mxne_inverse import _make_dipoles_sparse from ..minimum_norm.inverse import _log_exp_var -from ..utils import logger, verbose, _check_info_inv, fill_doc -from ..dipole import Dipole +from ..utils import _check_info_inv, fill_doc, logger, verbose from ._compute_beamformer import _prepare_beamformer_input @fill_doc -def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, - picks=None): - """RAP-MUSIC for evoked data. +def _apply_rap_music( + data, info, times, forward, noise_cov, n_dipoles=2, picks=None, use_trap=False +): + """RAP-MUSIC or TRAP-MUSIC for evoked data. Parameters ---------- @@ -36,6 +37,8 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, The number of dipoles to estimate. The default value is 2. picks : list of int Caller ensures this is a list of int. + use_trap : bool + Use the TRAP-MUSIC variant if True (default False). Returns ------- @@ -44,18 +47,17 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, explained_data : array | None Data explained by the dipoles using a least square fitting with the selected active dipoles and their estimated orientation. - Computed only if return_explained_data is True. """ - from scipy import linalg info = pick_info(info, picks) del picks # things are much simpler if we avoid surface orientation - align = forward['source_nn'].copy() - if forward['surf_ori'] and not is_fixed_orient(forward): + align = forward["source_nn"].copy() + if forward["surf_ori"] and not is_fixed_orient(forward): forward = convert_forward_solution(forward, surf_ori=False) is_free_ori, info, _, _, G, whitener, _, _ = _prepare_beamformer_input( - info, forward, noise_cov=noise_cov, rank=None) - forward = pick_channels_forward(forward, info['ch_names'], ordered=True) + info, forward, noise_cov=noise_cov, rank=None + ) + forward = pick_channels_forward(forward, info["ch_names"], ordered=True) del info # whiten the data (leadfield already whitened) @@ -67,7 +69,7 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, n_orient = 3 if is_free_ori else 1 G.shape = (G.shape[0], -1, n_orient) - gain = forward['sol']['data'].copy() + gain = forward["sol"]["data"].copy() gain.shape = G.shape n_channels = G.shape[0] A = np.empty((n_channels, n_dipoles)) @@ -80,7 +82,7 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, idxs = list() for k in range(n_dipoles): - subcorr_max = -1. + subcorr_max = -1.0 source_idx, source_ori, source_pos = 0, [0, 0, 0], [0, 0, 0] for i_source in range(G.shape[1]): Gk = G_proj[:, i_source] @@ -89,13 +91,13 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, subcorr_max = subcorr source_idx = i_source source_ori = ori - source_pos = forward['source_rr'][i_source] + source_pos = forward["source_rr"][i_source] if n_orient == 3 and align is not None: - surf_normal = forward['source_nn'][3 * i_source + 2] + surf_normal = forward["source_nn"][3 * i_source + 2] # make sure ori is aligned to the surface orientation - source_ori *= np.sign(source_ori @ surf_normal) or 1. + source_ori *= np.sign(source_ori @ surf_normal) or 1.0 if n_orient == 1: - source_ori = forward['source_nn'][i_source] + source_ori = forward["source_nn"][i_source] idxs.append(source_idx) if n_orient == 3: @@ -106,13 +108,15 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, oris[k] = source_ori poss[k] = source_pos - logger.info("source %s found: p = %s" % (k + 1, source_idx)) + logger.info(f"source {k + 1} found: p = {source_idx}") if n_orient == 3: - logger.info("ori = %s %s %s" % tuple(oris[k])) + logger.info("ori = {} {} {}".format(*tuple(oris[k]))) - projection = _compute_proj(A[:, :k + 1]) - G_proj = np.einsum('ab,bso->aso', projection, G) + projection = _compute_proj(A[:, : k + 1]) + G_proj = np.einsum("ab,bso->aso", projection, G) phi_sig_proj = np.dot(projection, phi_sig) + if use_trap: + phi_sig_proj = phi_sig_proj[:, -(n_dipoles - k) :] del G, G_proj sol = linalg.lstsq(A, M)[0] @@ -126,8 +130,7 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, if n_orient == 3: gain_dip = (oris * gain_active).sum(-1) idxs = np.array(idxs) - active_set = np.array( - [[3 * idxs, 3 * idxs + 1, 3 * idxs + 2]]).T.ravel() + active_set = np.array([[3 * idxs, 3 * idxs + 1, 3 * idxs + 2]]).T.ravel() else: gain_dip = gain_active[:, :, 0] active_set = idxs @@ -137,55 +140,21 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, explained_data = gain_dip @ sol M_estimate = whitener @ explained_data _log_exp_var(M, M_estimate) - tstep = np.median(np.diff(times)) if len(times) > 1 else 1. + tstep = np.median(np.diff(times)) if len(times) > 1 else 1.0 dipoles = _make_dipoles_sparse( - X, active_set, forward, times[0], tstep, M, - gain_active, active_is_idx=True) + X, active_set, forward, times[0], tstep, M, gain_active, active_is_idx=True + ) for dipole, ori in zip(dipoles, oris): signs = np.sign((dipole.ori * ori).sum(-1, keepdims=True)) - dipole.ori *= signs - dipole.amplitude *= signs[:, 0] - logger.info('[done]') + dipole._ori *= signs + dipole._amplitude *= signs[:, 0] + logger.info("[done]") return dipoles, explained_data -def _make_dipoles(times, poss, oris, sol, gof): - """Instantiate a list of Dipoles. - - Parameters - ---------- - times : array, shape (n_times,) - The time instants. - poss : array, shape (n_dipoles, 3) - The dipoles' positions. - oris : array, shape (n_dipoles, 3) - The dipoles' orientations. - sol : array, shape (n_times,) - The dipoles' amplitudes over time. - gof : array, shape (n_times,) - The goodness of fit of the dipoles. - Shared between all dipoles. - - Returns - ------- - dipoles : list - The list of Dipole instances. - """ - oris = np.array(oris) - - dipoles = [] - for i_dip in range(poss.shape[0]): - i_pos = poss[i_dip][np.newaxis, :].repeat(len(times), axis=0) - i_ori = oris[i_dip][np.newaxis, :].repeat(len(times), axis=0) - dipoles.append(Dipole(times, i_pos, sol[i_dip], i_ori, gof)) - - return dipoles - - def _compute_subcorr(G, phi_sig): """Compute the subspace correlation.""" - from scipy import linalg - Ug, Sg, Vg = linalg.svd(G, full_matrices=False) + Ug, Sg, Vg = _safe_svd(G, full_matrices=False) # Now we look at the actual rank of the forward fields # in G and handle the fact that it might be rank defficient # eg. when using MEG and a sphere model for which the @@ -196,25 +165,58 @@ def _compute_subcorr(G, phi_sig): rank = max(rank, 2) # rank cannot be 1 Ug, Sg, Vg = Ug[:, :rank], Sg[:rank], Vg[:rank] tmp = np.dot(Ug.T.conjugate(), phi_sig) - Uc, Sc, _ = linalg.svd(tmp, full_matrices=False) + Uc, Sc, _ = _safe_svd(tmp, full_matrices=False) X = np.dot(Vg.T / Sg[None, :], Uc[:, 0]) # subcorr return Sc[0], X / np.linalg.norm(X) def _compute_proj(A): """Compute the orthogonal projection operation for a manifold vector A.""" - from scipy import linalg - U, _, _ = linalg.svd(A, full_matrices=False) + U, _, _ = _safe_svd(A, full_matrices=False) return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate()) +def _rap_music(evoked, forward, noise_cov, n_dipoles, return_residual, use_trap): + """RAP-/TRAP-MUSIC implementation.""" + info = evoked.info + data = evoked.data + times = evoked.times + + picks = _check_info_inv(info, forward, data_cov=None, noise_cov=noise_cov) + + data = data[picks] + + dipoles, explained_data = _apply_rap_music( + data, info, times, forward, noise_cov, n_dipoles, picks, use_trap + ) + + if return_residual: + residual = evoked.copy().pick([info["ch_names"][p] for p in picks]) + residual.data -= explained_data + active_projs = [p for p in residual.info["projs"] if p["active"]] + for p in active_projs: + p["active"] = False + residual.add_proj(active_projs, remove_existing=True) + residual.apply_proj() + return dipoles, residual + else: + return dipoles + + @verbose -def rap_music(evoked, forward, noise_cov, n_dipoles=5, return_residual=False, - verbose=None): +def rap_music( + evoked, + forward, + noise_cov, + n_dipoles=5, + return_residual=False, + *, + verbose=None, +): """RAP-MUSIC source localization method. Compute Recursively Applied and Projected MUltiple SIgnal Classification - (RAP-MUSIC) on evoked data. + (RAP-MUSIC) :footcite:`MosherLeahy1999,MosherLeahy1996` on evoked data. .. note:: The goodness of fit (GOF) of all the returned dipoles is the same and corresponds to the GOF of the full set of dipoles. @@ -244,47 +246,70 @@ def rap_music(evoked, forward, noise_cov, n_dipoles=5, return_residual=False, See Also -------- mne.fit_dipole + mne.beamformer.trap_music Notes ----- - The references are: + .. versionadded:: 0.9.0 - J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively - applied and projected (RAP) MUSIC. Signal Processing, IEEE Trans. 47, 2 - (February 1999), 332-340. - DOI=10.1109/78.740118 https://doi.org/10.1109/78.740118 + References + ---------- + .. footbibliography:: + """ + return _rap_music(evoked, forward, noise_cov, n_dipoles, return_residual, False) - Mosher, J.C.; Leahy, R.M., EEG and MEG source localization using - recursively applied (RAP) MUSIC, Signals, Systems and Computers, 1996. - pp.1201,1207 vol.2, 3-6 Nov. 1996 - doi: 10.1109/ACSSC.1996.599135 - .. versionadded:: 0.9.0 - """ - info = evoked.info - data = evoked.data - times = evoked.times +@verbose +def trap_music( + evoked, + forward, + noise_cov, + n_dipoles=5, + return_residual=False, + *, + verbose=None, +): + """TRAP-MUSIC source localization method. + + Compute Truncated Recursively Applied and Projected MUltiple SIgnal Classification + (TRAP-MUSIC) :footcite:`Makela2018` on evoked data. - picks = _check_info_inv(info, forward, data_cov=None, noise_cov=noise_cov) + .. note:: The goodness of fit (GOF) of all the returned dipoles is the + same and corresponds to the GOF of the full set of dipoles. - data = data[picks] + Parameters + ---------- + evoked : instance of Evoked + Evoked data to localize. + forward : instance of Forward + Forward operator. + noise_cov : instance of Covariance + The noise covariance. + n_dipoles : int + The number of dipoles to look for. The default value is 5. + return_residual : bool + If True, the residual is returned as an Evoked instance. + %(verbose)s - dipoles, explained_data = _apply_rap_music(data, info, times, forward, - noise_cov, n_dipoles, - picks) + Returns + ------- + dipoles : list of instance of Dipole + The dipole fits. + residual : instance of Evoked + The residual a.k.a. data not explained by the dipoles. + Only returned if return_residual is True. - if return_residual: - residual = evoked.copy() - selection = [info['ch_names'][p] for p in picks] + See Also + -------- + mne.fit_dipole + mne.beamformer.rap_music - residual = pick_channels_evoked(residual, - include=selection) - residual.data -= explained_data - active_projs = [p for p in residual.info['projs'] if p['active']] - for p in active_projs: - p['active'] = False - residual.add_proj(active_projs, remove_existing=True) - residual.apply_proj() - return dipoles, residual - else: - return dipoles + Notes + ----- + .. versionadded:: 1.4 + + References + ---------- + .. footbibliography:: + """ + return _rap_music(evoked, forward, noise_cov, n_dipoles, return_residual, True) diff --git a/mne/beamformer/resolution_matrix.py b/mne/beamformer/resolution_matrix.py index b0e7c450ed6..e2dd258a2ab 100644 --- a/mne/beamformer/resolution_matrix.py +++ b/mne/beamformer/resolution_matrix.py @@ -1,13 +1,14 @@ -# -*- coding: utf-8 -*- """Compute resolution matrix for beamformers.""" -# Authors: olaf.hauk@mrc-cbu.cam.ac.uk -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import numpy as np -from ..io.pick import pick_channels, pick_info, pick_channels_forward +from .._fiff.pick import pick_channels, pick_channels_forward, pick_info from ..evoked import EvokedArray -from ..utils import logger, fill_doc +from ..utils import fill_doc, logger from ._lcmv import apply_lcmv @@ -34,8 +35,8 @@ def make_lcmv_resolution_matrix(filters, forward, info): for free dipole orientation versus factor 1 for scalar beamformers). """ # don't include bad channels from noise covariance matrix - bads_filt = filters['noise_cov']['bads'] - ch_names = filters['noise_cov']['names'] + bads_filt = filters["noise_cov"]["bads"] + ch_names = filters["noise_cov"]["names"] # good channels ch_names = [c for c in ch_names if (c not in bads_filt)] @@ -44,7 +45,7 @@ def make_lcmv_resolution_matrix(filters, forward, info): forward = pick_channels_forward(forward, ch_names, ordered=True) # get leadfield matrix from forward solution - leadfield = forward['sol']['data'] + leadfield = forward["sol"]["data"] # get the filter weights for beamformer as matrix filtmat = _get_matrix_from_lcmv(filters, forward, info) @@ -52,9 +53,7 @@ def make_lcmv_resolution_matrix(filters, forward, info): # compute resolution matrix resmat = filtmat.dot(leadfield) - shape = resmat.shape - - logger.info('Dimensions of LCMV resolution matrix: %d by %d.' % shape) + logger.info(f"Dimensions of LCMV resolution matrix: {resmat.shape}.") return resmat @@ -68,16 +67,15 @@ def _get_matrix_from_lcmv(filters, forward, info, verbose=None): Inverse matrix associated with LCMV beamformer filters. """ # number of channels for identity matrix - info = pick_info( - info, pick_channels(info['ch_names'], filters['ch_names'])) - n_chs = len(info['ch_names']) + info = pick_info(info, pick_channels(info["ch_names"], filters["ch_names"])) + n_chs = len(info["ch_names"]) # create identity matrix as input for inverse operator # set elements to zero for non-selected channels id_mat = np.eye(n_chs) # convert identity matrix to evoked data type (pretending it's an epochs - evo_ident = EvokedArray(id_mat, info=info, tmin=0.) + evo_ident = EvokedArray(id_mat, info=info, tmin=0.0) # apply beamformer to identity matrix stc_lcmv = apply_lcmv(evo_ident, filters, verbose=verbose) diff --git a/mne/beamformer/tests/__init__.py b/mne/beamformer/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/beamformer/tests/__init__.py +++ b/mne/beamformer/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 74d273a0b66..cebc0bb4057 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -1,70 +1,67 @@ -# Authors: Marijn van Vliet -# Britta Westner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import copy as cp -import pytest -from numpy.testing import (assert_array_equal, assert_allclose, - assert_array_less) import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal, assert_array_less import mne from mne import pick_types -from mne.beamformer import (make_dics, apply_dics, apply_dics_epochs, - apply_dics_tfr_epochs, apply_dics_csd, - read_beamformer, Beamformer) +from mne._fiff.constants import FIFF +from mne._fiff.pick import pick_info +from mne.beamformer import ( + Beamformer, + apply_dics, + apply_dics_csd, + apply_dics_epochs, + apply_dics_tfr_epochs, + make_dics, + read_beamformer, +) from mne.beamformer._compute_beamformer import _prepare_beamformer_input from mne.beamformer._dics import _prepare_noise_csd from mne.beamformer.tests.test_lcmv import _assert_weight_norm from mne.datasets import testing -from mne.io.constants import FIFF from mne.io import read_info -from mne.io.pick import pick_info from mne.proj import compute_proj_evoked, make_projector from mne.surface import _compute_nearest -from mne.time_frequency import (CrossSpectralDensity, csd_morlet, EpochsTFR, - csd_tfr) +from mne.time_frequency import CrossSpectralDensity, EpochsTFRArray, csd_morlet, csd_tfr from mne.time_frequency.csd import _sym_mat_to_vector -from mne.transforms import invert_transform, apply_trans -from mne.utils import object_diff, requires_version, catch_logging +from mne.transforms import apply_trans, invert_transform +from mne.utils import catch_logging, object_diff data_path = testing.data_path(download=False) fname_raw = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" -fname_fwd = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" -) -fname_fwd_vol = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-vol-7-fwd.fif" -) +fname_fwd = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" +fname_fwd_vol = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-vol-7-fwd.fif" fname_event = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw-eve.fif" subjects_dir = data_path / "subjects" -@pytest.fixture(scope='module', params=[testing._pytest_param()]) +@pytest.fixture(scope="module", params=[testing._pytest_param()]) def _load_forward(): """Load forward models.""" fwd_free = mne.read_forward_solution(fname_fwd) fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False) fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False) - fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True, - use_cps=False) - fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True, - use_cps=False) + fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True, use_cps=False) + fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True, use_cps=False) fwd_vol = mne.read_forward_solution(fname_fwd_vol) return fwd_free, fwd_surf, fwd_fixed, fwd_vol def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default """Simulate an oscillator on the cortex.""" - pytest.importorskip('nibabel') - source_vertno = fwd['src'][0]['vertno'][idx] + pytest.importorskip("nibabel") + source_vertno = fwd["src"][0]["vertno"][idx] - sfreq = 50. # Hz. + sfreq = 50.0 # Hz. times = np.arange(10 * sfreq) / sfreq # 10 seconds of data signal = np.sin(20 * 2 * np.pi * times) # 20 Hz oscillator - signal[:len(times) // 2] *= 2 # Make signal louder at the beginning + signal[: len(times) // 2] *= 2 # Make signal louder at the beginning signal *= 1e-9 # Scale to be in the ballpark of MEG data # Construct a SourceEstimate object that describes the signal at the @@ -74,16 +71,16 @@ def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default vertices=[[source_vertno], []], tmin=0, tstep=1 / sfreq, - subject='sample', + subject="sample", ) # Create an info object that holds information about the sensors - info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad') + info = mne.create_info(fwd["info"]["ch_names"], sfreq, ch_types="grad") with info._unlock(): - info.update(fwd['info']) # Merge in sensor position information + info.update(fwd["info"]) # Merge in sensor position information # heavily decimate sensors to make it much faster - info = mne.pick_info(info, np.arange(info['nchan'])[::5]) - fwd = mne.pick_channels_forward(fwd, info['ch_names']) + info = mne.pick_info(info, np.arange(info["nchan"])[::5]) + fwd = mne.pick_channels_forward(fwd, info["ch_names"]) # Run the simulated signal through the forward model, obtaining # simulated sensor data. @@ -95,31 +92,39 @@ def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default raw._data += noise # Define a single epoch (weird baseline but shouldn't matter) - epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0, - tmax=raw.times[-1], baseline=(0., 0.), preload=True) + epochs = mne.Epochs( + raw, + [[0, 0, 1]], + event_id=1, + tmin=0, + tmax=raw.times[-1], + baseline=(0.0, 0.0), + preload=True, + ) evoked = epochs.average() # Compute the cross-spectral density matrix csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5) - labels = mne.read_labels_from_annot( - 'sample', hemi='lh', subjects_dir=subjects_dir) - label = [ - label for label in labels if np.in1d(source_vertno, label.vertices)[0]] + labels = mne.read_labels_from_annot("sample", hemi="lh", subjects_dir=subjects_dir) + label = [label for label in labels if np.isin(source_vertno, label.vertices)] assert len(label) == 1 label = label[0] - vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno']) + vertices = np.intersect1d(label.vertices, fwd["src"][0]["vertno"]) source_ind = vertices.tolist().index(source_vertno) assert vertices[source_ind] == source_vertno return epochs, evoked, csd, source_vertno, label, vertices, source_ind -idx_param = pytest.mark.parametrize('idx', [ - 0, - pytest.param(100, marks=pytest.mark.slowtest), - 200, - pytest.param(233, marks=pytest.mark.slowtest), -]) +idx_param = pytest.mark.parametrize( + "idx", + [ + 0, + pytest.param(100, marks=pytest.mark.slowtest), + 200, + pytest.param(233, marks=pytest.mark.slowtest), + ], +) def _rand_csd(rng, info): @@ -130,7 +135,7 @@ def _rand_csd(rng, info): data = data @ data.conj().T data *= scales data *= scales[:, np.newaxis] - data.flat[::n + 1] = scales + data.flat[:: n + 1] = scales return data @@ -141,67 +146,74 @@ def _make_rand_csd(info, csd): s, u = np.linalg.eigh(csd.get_data(csd.frequencies[0])) mask = np.abs(s) >= s[-1] * 1e-7 rank = mask.sum() - assert rank == len(data) == len(info['ch_names']) + assert rank == len(data) == len(info["ch_names"]) noise_csd = CrossSpectralDensity( - _sym_mat_to_vector(data), info['ch_names'], 0., csd.n_fft) + _sym_mat_to_vector(data), info["ch_names"], 0.0, csd.n_fft + ) return noise_csd, rank @pytest.mark.slowtest @testing.requires_testing_data -@requires_version('h5io') @idx_param -@pytest.mark.parametrize('whiten', [ - pytest.param(False, marks=pytest.mark.slowtest), - True, -]) +@pytest.mark.parametrize( + "whiten", + [ + pytest.param(False, marks=pytest.mark.slowtest), + True, + ], +) def test_make_dics(tmp_path, _load_forward, idx, whiten): """Test making DICS beamformer filters.""" + pytest.importorskip("h5io") # We only test proper handling of parameters here. Testing the results is # done in test_apply_dics_timeseries and test_apply_dics_csd. fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward - epochs, _, csd, _, label, vertices, source_ind = \ - _simulate_data(fwd_fixed, idx) - with pytest.raises(ValueError, match='several sensor types'): + epochs, _, csd, _, label, vertices, source_ind = _simulate_data(fwd_fixed, idx) + with pytest.raises(ValueError, match="several sensor types"): make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) if whiten: noise_csd, rank = _make_rand_csd(epochs.info, csd) - assert rank == len(epochs.info['ch_names']) == 62 + assert rank == len(epochs.info["ch_names"]) == 62 else: noise_csd = None - epochs.pick_types(meg='grad') + epochs.pick(picks="grad") with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"): - make_dics(epochs.info, fwd_fixed, csd, pick_ori="notexistent", - noise_csd=noise_csd) - with pytest.raises(ValueError, match='rank, if str'): - make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd) - with pytest.raises(TypeError, match='rank must be'): - make_dics(epochs.info, fwd_fixed, csd, rank=1., noise_csd=noise_csd) + make_dics( + epochs.info, fwd_fixed, csd, pick_ori="notexistent", noise_csd=noise_csd + ) + with pytest.raises(ValueError, match="rank, if str"): + make_dics(epochs.info, fwd_fixed, csd, rank="foo", noise_csd=noise_csd) + with pytest.raises(TypeError, match="rank must be"): + make_dics(epochs.info, fwd_fixed, csd, rank=1.0, noise_csd=noise_csd) # Test if fixed forward operator is detected when picking normal # orientation - with pytest.raises(ValueError, match='forward operator with free ori'): - make_dics(epochs.info, fwd_fixed, csd, pick_ori="normal", - noise_csd=noise_csd) + with pytest.raises(ValueError, match="forward operator with free ori"): + make_dics(epochs.info, fwd_fixed, csd, pick_ori="normal", noise_csd=noise_csd) # Test if non-surface oriented forward operator is detected when picking # normal orientation - with pytest.raises(ValueError, match='oriented in surface coordinates'): - make_dics(epochs.info, fwd_free, csd, pick_ori="normal", - noise_csd=noise_csd) + with pytest.raises(ValueError, match="oriented in surface coordinates"): + make_dics(epochs.info, fwd_free, csd, pick_ori="normal", noise_csd=noise_csd) # Test if volume forward operator is detected when picking normal # orientation - with pytest.raises(ValueError, match='oriented in surface coordinates'): - make_dics(epochs.info, fwd_vol, csd, pick_ori="normal", - noise_csd=noise_csd) + with pytest.raises(ValueError, match="oriented in surface coordinates"): + make_dics(epochs.info, fwd_vol, csd, pick_ori="normal", noise_csd=noise_csd) # Test invalid combinations of parameters - with pytest.raises(ValueError, match='reduce_rank cannot be used with'): - make_dics(epochs.info, fwd_free, csd, inversion='single', - reduce_rank=True, noise_csd=noise_csd) + with pytest.raises(ValueError, match="reduce_rank cannot be used with"): + make_dics( + epochs.info, + fwd_free, + csd, + inversion="single", + reduce_rank=True, + noise_csd=noise_csd, + ) # TODO: Restore this? # with pytest.raises(ValueError, match='not stable with depth'): # make_dics(epochs.info, fwd_free, csd, weight_norm='unit-noise-gain', @@ -209,83 +221,136 @@ def test_make_dics(tmp_path, _load_forward, idx, whiten): # Sanity checks on the returned filters n_freq = len(csd.frequencies) - vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno']) + vertices = np.intersect1d(label.vertices, fwd_free["src"][0]["vertno"]) n_verts = len(vertices) n_orient = 3 n_channels = len(epochs.ch_names) # Test return values - weight_norm = 'unit-noise-gain' - inversion = 'single' - filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, - weight_norm=weight_norm, depth=None, real_filter=False, - noise_csd=noise_csd, inversion=inversion) - assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels) - assert np.iscomplexobj(filters['weights']) - assert filters['csd'].ch_names == epochs.ch_names - assert isinstance(filters['csd'], CrossSpectralDensity) - assert filters['ch_names'] == epochs.ch_names - assert_array_equal(filters['proj'], np.eye(n_channels)) - assert_array_equal(filters['vertices'][0], vertices) - assert_array_equal(filters['vertices'][1], []) # Label was on the LH - assert filters['subject'] == fwd_free['src']._subject - assert filters['pick_ori'] is None - assert filters['is_free_ori'] - assert filters['inversion'] == inversion - assert filters['weight_norm'] == weight_norm - assert 'DICS' in repr(filters) + weight_norm = "unit-noise-gain" + inversion = "single" + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori=None, + weight_norm=weight_norm, + depth=None, + real_filter=False, + noise_csd=noise_csd, + inversion=inversion, + ) + assert filters["weights"].shape == (n_freq, n_verts * n_orient, n_channels) + assert np.iscomplexobj(filters["weights"]) + assert filters["csd"].ch_names == epochs.ch_names + assert isinstance(filters["csd"], CrossSpectralDensity) + assert filters["ch_names"] == epochs.ch_names + assert_array_equal(filters["proj"], np.eye(n_channels)) + assert_array_equal(filters["vertices"][0], vertices) + assert_array_equal(filters["vertices"][1], []) # Label was on the LH + assert filters["subject"] == fwd_free["src"]._subject + assert filters["pick_ori"] is None + assert filters["is_free_ori"] + assert filters["inversion"] == inversion + assert filters["weight_norm"] == weight_norm + assert "DICS" in repr(filters) assert 'subject "sample"' in repr(filters) assert str(len(vertices)) in repr(filters) assert str(n_channels) in repr(filters) - assert 'rank' not in repr(filters) + assert "rank" not in repr(filters) _, noise_cov = _prepare_noise_csd(csd, noise_csd, real_filter=False) _, _, _, _, G, _, _, _ = _prepare_beamformer_input( - epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None, - noise_cov=noise_cov) + epochs.info, + fwd_surf, + label, + "vector", + combine_xyz=False, + exp=None, + noise_cov=noise_cov, + ) G.shape = (n_channels, n_verts, n_orient) G = G.transpose(1, 2, 0).conj() # verts, orient, ch _assert_weight_norm(filters, G) - inversion = 'matrix' - filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, - weight_norm=weight_norm, depth=None, - noise_csd=noise_csd, inversion=inversion) + inversion = "matrix" + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori=None, + weight_norm=weight_norm, + depth=None, + noise_csd=noise_csd, + inversion=inversion, + ) _assert_weight_norm(filters, G) - weight_norm = 'unit-noise-gain-invariant' - inversion = 'single' - filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, - weight_norm=weight_norm, depth=None, - noise_csd=noise_csd, inversion=inversion) + weight_norm = "unit-noise-gain-invariant" + inversion = "single" + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori=None, + weight_norm=weight_norm, + depth=None, + noise_csd=noise_csd, + inversion=inversion, + ) _assert_weight_norm(filters, G) # Test picking orientations. Also test weight norming under these different # conditions. - weight_norm = 'unit-noise-gain' - filters = make_dics(epochs.info, fwd_surf, csd, label=label, - pick_ori='normal', weight_norm=weight_norm, - depth=None, noise_csd=noise_csd, inversion=inversion) + weight_norm = "unit-noise-gain" + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori="normal", + weight_norm=weight_norm, + depth=None, + noise_csd=noise_csd, + inversion=inversion, + ) n_orient = 1 - assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels) - assert not filters['is_free_ori'] + assert filters["weights"].shape == (n_freq, n_verts * n_orient, n_channels) + assert not filters["is_free_ori"] _assert_weight_norm(filters, G) - filters = make_dics(epochs.info, fwd_surf, csd, label=label, - pick_ori='max-power', weight_norm=weight_norm, - depth=None, noise_csd=noise_csd, inversion=inversion) + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori="max-power", + weight_norm=weight_norm, + depth=None, + noise_csd=noise_csd, + inversion=inversion, + ) n_orient = 1 - assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels) - assert not filters['is_free_ori'] + assert filters["weights"].shape == (n_freq, n_verts * n_orient, n_channels) + assert not filters["is_free_ori"] _assert_weight_norm(filters, G) # From here on, only work on a single frequency csd = csd[0] # Test using a real-valued filter - filters = make_dics(epochs.info, fwd_surf, csd, label=label, - pick_ori='normal', real_filter=True, - noise_csd=noise_csd) - assert not np.iscomplexobj(filters['weights']) + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori="normal", + real_filter=True, + noise_csd=noise_csd, + ) + assert not np.iscomplexobj(filters["weights"]) # Test forward normalization. When inversion='single', the power of a # unit-noise CSD should be 1, even without weight normalization. @@ -294,105 +359,151 @@ def test_make_dics(tmp_path, _load_forward, idx, whiten): inds = np.triu_indices(csd.n_channels) # Using [:, :] syntax for in-place broadcasting csd_noise._data[:, :] = np.eye(csd.n_channels)[inds][:, np.newaxis] - filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label, - weight_norm=None, depth=1., noise_csd=noise_csd, - inversion='single') - w = filters['weights'][0][:3] - assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-6, - atol=0) + filters = make_dics( + epochs.info, + fwd_surf, + csd_noise, + label=label, + weight_norm=None, + depth=1.0, + noise_csd=noise_csd, + inversion="single", + ) + w = filters["weights"][0][:3] + assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-6, atol=0) # Test turning off both forward and weight normalization - filters = make_dics(epochs.info, fwd_surf, csd, label=label, - weight_norm=None, depth=None, noise_csd=noise_csd) - w = filters['weights'][0][:3] - assert not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0, - rtol=1e-2, atol=0) + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + weight_norm=None, + depth=None, + noise_csd=noise_csd, + ) + w = filters["weights"][0][:3] + assert not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-2, atol=0) # Test neural-activity-index weight normalization. It should be a scaled # version of the unit-noise-gain beamformer. filters_nai = make_dics( - epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', - weight_norm='nai', depth=None, noise_csd=noise_csd) - w_nai = filters_nai['weights'][0] + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori="max-power", + weight_norm="nai", + depth=None, + noise_csd=noise_csd, + ) + w_nai = filters_nai["weights"][0] filters_ung = make_dics( - epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', - weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd) - w_ung = filters_ung['weights'][0] - assert_allclose(np.corrcoef(np.abs(w_nai).ravel(), - np.abs(w_ung).ravel()), 1, atol=1e-7) + epochs.info, + fwd_surf, + csd, + label=label, + pick_ori="max-power", + weight_norm="unit-noise-gain", + depth=None, + noise_csd=noise_csd, + ) + w_ung = filters_ung["weights"][0] + assert_allclose( + np.corrcoef(np.abs(w_nai).ravel(), np.abs(w_ung).ravel()), 1, atol=1e-7 + ) # Test whether spatial filter contains src_type - assert 'src_type' in filters + assert "src_type" in filters fname = tmp_path / "filters-dics.h5" filters.save(fname) filters_read = read_beamformer(fname) assert isinstance(filters, Beamformer) assert isinstance(filters_read, Beamformer) - for key in ['tmin', 'tmax']: # deal with strictness of object_diff - setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key))) - assert object_diff(filters, filters_read) == '' + for key in ["tmin", "tmax"]: # deal with strictness of object_diff + setattr(filters["csd"], key, np.float64(getattr(filters["csd"], key))) + assert object_diff(filters, filters_read) == "" def _fwd_dist(power, fwd, vertices, source_ind, tidx=1): idx = np.argmax(power.data[:, tidx]) - rr_got = fwd['src'][0]['rr'][vertices[idx]] - rr_want = fwd['src'][0]['rr'][vertices[source_ind]] + rr_got = fwd["src"][0]["rr"][vertices[idx]] + rr_want = fwd["src"][0]["rr"][vertices[source_ind]] return np.linalg.norm(rr_got - rr_want) @idx_param -@pytest.mark.parametrize('inversion, weight_norm', [ - ('single', None), - ('matrix', 'unit-noise-gain'), -]) +@pytest.mark.parametrize( + "inversion, weight_norm", + [ + ("single", None), + ("matrix", "unit-noise-gain"), + ], +) def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm): """Test applying a DICS beamformer to a CSD matrix.""" fwd_free, fwd_surf, fwd_fixed, _ = _load_forward - epochs, _, csd, source_vertno, label, vertices, source_ind = \ - _simulate_data(fwd_fixed, idx) + epochs, _, csd, source_vertno, label, vertices, source_ind = _simulate_data( + fwd_fixed, idx + ) reg = 1 # Lots of regularization for our toy dataset - with pytest.raises(ValueError, match='several sensor types'): + with pytest.raises(ValueError, match="several sensor types"): make_dics(epochs.info, fwd_free, csd) - epochs.pick_types(meg='grad') + epochs.pick(picks="grad") # Try different types of forward models - assert label.hemi == 'lh' + assert label.hemi == "lh" for fwd in [fwd_free, fwd_surf, fwd_fixed]: - filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg, - inversion=inversion, weight_norm=weight_norm) + filters = make_dics( + epochs.info, + fwd, + csd, + label=label, + reg=reg, + inversion=inversion, + weight_norm=weight_norm, + ) power, f = apply_dics_csd(csd, filters) assert f == [10, 20] # Did we find the true source at 20 Hz? dist = _fwd_dist(power, fwd_free, vertices, source_ind) - assert dist == 0. + assert dist == 0.0 # Is the signal stronger at 20 Hz than 10? assert power.data[source_ind, 1] > power.data[source_ind, 0] -@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power', 'vector']) -@pytest.mark.parametrize('inversion', ['single', 'matrix']) +@pytest.mark.parametrize("pick_ori", [None, "normal", "max-power", "vector"]) +@pytest.mark.parametrize("inversion", ["single", "matrix"]) @idx_param def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx): """Test picking different orientations and inversion modes.""" fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward - epochs, _, csd, source_vertno, label, vertices, source_ind = \ - _simulate_data(fwd_fixed, idx) - epochs.pick_types(meg='grad') - - reg_ = 5 if inversion == 'matrix' else 1 - filters = make_dics(epochs.info, fwd_surf, csd, label=label, - reg=reg_, pick_ori=pick_ori, - inversion=inversion, depth=None, - weight_norm='unit-noise-gain') + epochs, _, csd, source_vertno, label, vertices, source_ind = _simulate_data( + fwd_fixed, idx + ) + epochs.pick(picks="grad") + + reg_ = 5 if inversion == "matrix" else 1 + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + reg=reg_, + pick_ori=pick_ori, + inversion=inversion, + depth=None, + weight_norm="unit-noise-gain", + ) power, f = apply_dics_csd(csd, filters) assert f == [10, 20] dist = _fwd_dist(power, fwd_surf, vertices, source_ind) # This is 0. for unit-noise-gain-invariant: - assert dist <= (0.02 if inversion == 'matrix' else 0.) + assert dist <= (0.02 if inversion == "matrix" else 0.0) assert power.data[source_ind, 1] > power.data[source_ind, 0] # Test unit-noise-gain weighting @@ -400,40 +511,55 @@ def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx): inds = np.triu_indices(csd.n_channels) csd_noise._data[...] = np.eye(csd.n_channels)[inds][:, np.newaxis] noise_power, f = apply_dics_csd(csd_noise, filters) - want_norm = 3 if pick_ori in (None, 'vector') else 1 + want_norm = 3 if pick_ori in (None, "vector") else 1 assert_allclose(noise_power.data, want_norm, atol=1e-7) # Test filter with forward normalization instead of weight # normalization - filters = make_dics(epochs.info, fwd_surf, csd, label=label, - reg=reg_, pick_ori=pick_ori, - inversion=inversion, weight_norm=None, - depth=1.) + filters = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + reg=reg_, + pick_ori=pick_ori, + inversion=inversion, + weight_norm=None, + depth=1.0, + ) power, f = apply_dics_csd(csd, filters) assert f == [10, 20] dist = _fwd_dist(power, fwd_surf, vertices, source_ind) mat_tol = {0: 0.055, 100: 0.20, 200: 0.015, 233: 0.035}[idx] - max_ = (mat_tol if inversion == 'matrix' else 0.) + max_ = mat_tol if inversion == "matrix" else 0.0 assert 0 <= dist <= max_ assert power.data[source_ind, 1] > power.data[source_ind, 0] def _nearest_vol_ind(fwd_vol, fwd, vertices, source_ind): return _compute_nearest( - fwd_vol['source_rr'], - fwd['src'][0]['rr'][vertices][source_ind][np.newaxis])[0] + fwd_vol["source_rr"], fwd["src"][0]["rr"][vertices][source_ind][np.newaxis] + )[0] @idx_param def test_real(_load_forward, idx): """Test using a real-valued filter.""" fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward - epochs, _, csd, source_vertno, label, vertices, source_ind = \ - _simulate_data(fwd_fixed, idx) - epochs.pick_types(meg='grad') + epochs, _, csd, source_vertno, label, vertices, source_ind = _simulate_data( + fwd_fixed, idx + ) + epochs.pick(picks="grad") reg = 1 # Lots of regularization for our toy dataset - filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg, - real_filter=True, inversion='single') + filters_real = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + reg=reg, + real_filter=True, + inversion="single", + ) # Also test here that no warnings are thrown - implemented to check whether # src should not be None warning occurs: power, f = apply_dics_csd(csd, filters_real) @@ -444,9 +570,16 @@ def test_real(_load_forward, idx): assert power.data[source_ind, 1] > power.data[source_ind, 0] # Test rank reduction - filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5, - pick_ori='max-power', inversion='matrix', - reduce_rank=True) + filters_real = make_dics( + epochs.info, + fwd_surf, + csd, + label=label, + reg=5, + pick_ori="max-power", + inversion="matrix", + reduce_rank=True, + ) power, f = apply_dics_csd(csd, filters_real) assert f == [10, 20] dist = _fwd_dist(power, fwd_surf, vertices, source_ind) @@ -454,57 +587,56 @@ def test_real(_load_forward, idx): assert power.data[source_ind, 1] > power.data[source_ind, 0] # Test computing source power on a volume source space - filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg, - inversion='single') + filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg, inversion="single") power, f = apply_dics_csd(csd, filters_vol) vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) assert f == [10, 20] - dist = _fwd_dist( - power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind) + dist = _fwd_dist(power, fwd_vol, fwd_vol["src"][0]["vertno"], vol_source_ind) vol_tols = {100: 0.008, 200: 0.008} - assert dist <= vol_tols.get(idx, 0.) + assert dist <= vol_tols.get(idx, 0.0) assert power.data[vol_source_ind, 1] > power.data[vol_source_ind, 0] # check whether a filters object without src_type throws expected warning - del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning - with pytest.warns(RuntimeWarning, match='spatial filter does not contain ' - 'src_type'): + del filters_vol["src_type"] # emulate 0.16 behaviour to cause warning + with pytest.warns(RuntimeWarning, match="spatial filter does not contain src_type"): apply_dics_csd(csd, filters_vol) -@pytest.mark.filterwarnings("ignore:The use of several sensor types with the" - ":RuntimeWarning") +@pytest.mark.filterwarnings( + "ignore:The use of several sensor types with the:RuntimeWarning" +) @idx_param def test_apply_dics_timeseries(_load_forward, idx): """Test DICS applied to timeseries data.""" fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward - epochs, evoked, csd, source_vertno, label, vertices, source_ind = \ - _simulate_data(fwd_fixed, idx) + epochs, evoked, csd, source_vertno, label, vertices, source_ind = _simulate_data( + fwd_fixed, idx + ) reg = 5 # Lots of regularization for our toy dataset - with pytest.raises(ValueError, match='several sensor types'): + with pytest.raises(ValueError, match="several sensor types"): make_dics(evoked.info, fwd_surf, csd) - evoked.pick_types(meg='grad') + evoked.pick(picks="grad") - multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label, - reg=reg) + multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label, reg=reg) # Sanity checks on the resulting STC after applying DICS on evoked stcs = apply_dics(evoked, multiple_filters) assert isinstance(stcs, list) - assert len(stcs) == len(multiple_filters['weights']) - assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0]) - assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1]) + assert len(stcs) == len(multiple_filters["weights"]) + assert_array_equal(stcs[0].vertices[0], multiple_filters["vertices"][0]) + assert_array_equal(stcs[0].vertices[1], multiple_filters["vertices"][1]) assert_allclose(stcs[0].times, evoked.times) # Applying filters for multiple frequencies on epoch data should fail - with pytest.raises(ValueError, match='computed for a single frequency'): + with pytest.raises(ValueError, match="computed for a single frequency"): apply_dics_epochs(epochs, multiple_filters) # From now on, only apply filters with a single frequency (20 Hz). csd20 = csd.pick_frequency(20) - filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg, - inversion='single') + filters = make_dics( + evoked.info, fwd_surf, csd20, label=label, reg=reg, inversion="single" + ) # Sanity checks on the resulting STC after applying DICS on epochs. # Also test here that no warnings are thrown - implemented to check whether @@ -513,8 +645,8 @@ def test_apply_dics_timeseries(_load_forward, idx): assert isinstance(stcs, list) assert len(stcs) == 1 - assert_array_equal(stcs[0].vertices[0], filters['vertices'][0]) - assert_array_equal(stcs[0].vertices[1], filters['vertices'][1]) + assert_array_equal(stcs[0].vertices[0], filters["vertices"][0]) + assert_array_equal(stcs[0].vertices[1], filters["vertices"][1]) assert_allclose(stcs[0].times, epochs.times) # Did we find the source? @@ -524,14 +656,14 @@ def test_apply_dics_timeseries(_load_forward, idx): # Apply filters to evoked stc = apply_dics(evoked, filters) - stc = (stc ** 2).mean() + stc = (stc**2).mean() dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) assert dist == 0 # Test if wrong channel selection is detected in application of filter evoked_ch = cp.deepcopy(evoked) - evoked_ch.pick_channels(evoked_ch.ch_names[:-1]) - with pytest.raises(ValueError, match='MEG 2633 which is not present'): + evoked_ch.pick(evoked_ch.ch_names[:-1]) + with pytest.raises(ValueError, match="MEG 2633 which is not present"): apply_dics(evoked_ch, filters) # Test whether projections are applied, by adding a custom projection @@ -542,13 +674,13 @@ def test_apply_dics_timeseries(_load_forward, idx): proj_matrix = make_projector(p, evoked_proj.ch_names)[0] evoked_proj.add_proj(p) filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label) - assert_array_equal(filters_proj['proj'], proj_matrix) + assert_allclose(filters_proj["proj"], proj_matrix, rtol=1e-7) stc_proj = apply_dics(evoked_proj, filters_proj) assert np.any(np.not_equal(stc_noproj.data, stc_proj.data)) # Test detecting incompatible projections - filters_proj['proj'] = filters_proj['proj'][:-1, :-1] - with pytest.raises(ValueError, match='operands could not be broadcast'): + filters_proj["proj"] = filters_proj["proj"][:-1, :-1] + with pytest.raises(ValueError, match="operands could not be broadcast"): apply_dics(evoked_proj, filters_proj) # Test returning a generator @@ -557,30 +689,29 @@ def test_apply_dics_timeseries(_load_forward, idx): assert_array_equal(stcs[0].data, next(stcs_gen).data) # Test computing timecourses on a volume source space - filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg, - inversion='single') + filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg, inversion="single") stc = apply_dics(evoked, filters_vol) - stc = (stc ** 2).mean() + stc = (stc**2).mean() assert stc.data.shape[1] == 1 vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) - dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind, - tidx=0) + dist = _fwd_dist(stc, fwd_vol, fwd_vol["src"][0]["vertno"], vol_source_ind, tidx=0) vol_tols = {100: 0.008, 200: 0.015} - vol_tol = vol_tols.get(idx, 0.) + vol_tol = vol_tols.get(idx, 0.0) assert dist <= vol_tol # check whether a filters object without src_type throws expected warning - del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning - with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'): + del filters_vol["src_type"] # emulate 0.16 behaviour to cause warning + with pytest.warns(RuntimeWarning, match="filter does not contain src_typ"): apply_dics_epochs(epochs, filters_vol) +@pytest.mark.slowtest @testing.requires_testing_data -@pytest.mark.parametrize('return_generator', (True, False)) +@pytest.mark.parametrize("return_generator", (True, False)) def test_apply_dics_tfr(return_generator): """Test DICS applied to time-frequency objects.""" info = read_info(fname_raw) - info = pick_info(info, pick_types(info, meg='grad')) + info = pick_info(info, pick_types(info, meg="grad")) forward = mne.read_forward_solution(fname_fwd) rng = np.random.default_rng(11) @@ -589,11 +720,11 @@ def test_apply_dics_tfr(return_generator): n_chans = len(info.ch_names) freqs = [8, 9] n_times = 300 - times = np.arange(n_times) / info['sfreq'] + times = np.arange(n_times) / info["sfreq"] data = rng.random((n_epochs, n_chans, len(freqs), n_times)) data *= 1e-6 data = data + data * 1j # add imag. component to simulate phase - epochs_tfr = EpochsTFR(info, data, times=times, freqs=freqs) + epochs_tfr = EpochsTFRArray(info=info, data=data, times=times, freqs=freqs) # Create a DICS beamformer and convert the EpochsTFR to source space. csd = csd_tfr(epochs_tfr) @@ -606,18 +737,23 @@ def test_apply_dics_tfr(return_generator): assert_allclose(stcs[0][0].times, times) assert len(stcs) == len(epochs_tfr) # check same number of epochs assert all([len(s) == len(freqs) for s in stcs]) # check nested freqs - assert all([s.data.shape == (forward['nsource'], n_times) - for these_stcs in stcs for s in these_stcs]) + assert all( + [ + s.data.shape == (forward["nsource"], n_times) + for these_stcs in stcs + for s in these_stcs + ] + ) # Compute power from the source space TFR. This should yield the same # result as the apply_dics_csd function. - source_power = np.zeros((forward['nsource'], len(freqs))) + source_power = np.zeros((forward["nsource"], len(freqs))) for stcs_epoch in stcs: for i, stc_freq in enumerate(stcs_epoch): power = (stc_freq.data * np.conj(stc_freq.data)).real power = power.mean(axis=-1) # mean over time # Scaling by sampling frequency for compatibility with Matlab - power /= epochs_tfr.info['sfreq'] + power /= epochs_tfr.info["sfreq"] source_power[:, i] += power.T source_power /= n_epochs @@ -628,86 +764,112 @@ def test_apply_dics_tfr(return_generator): # Test that real-value only data fails, due to non-linearity of computing # power, it is recommended to transform to source-space first before # converting to power. - with pytest.raises(RuntimeError, - match='Time-frequency data must be complex'): + with pytest.raises(RuntimeError, match="Time-frequency data must be complex"): epochs_tfr_real = epochs_tfr.copy() epochs_tfr_real.data = epochs_tfr_real.data.real stcs = apply_dics_tfr_epochs(epochs_tfr_real, filters) filters_vector = filters.copy() - filters_vector['pick_ori'] = 'vector' - with pytest.warns(match='vector solution'): + filters_vector["pick_ori"] = "vector" + with pytest.warns(match="vector solution"): apply_dics_tfr_epochs(epochs_tfr, filters_vector) def _cov_as_csd(cov, info): rng = np.random.RandomState(0) - assert cov['data'].ndim == 2 - assert len(cov['data']) == len(cov['names']) + assert cov["data"].ndim == 2 + assert len(cov["data"]) == len(cov["names"]) # we need to make this have at least some complex structure - data = cov['data'] + 1e-1 * _rand_csd(rng, info) + data = cov["data"] + 1e-1 * _rand_csd(rng, info) assert data.dtype == np.complex128 - return CrossSpectralDensity(_sym_mat_to_vector(data), cov['names'], 0., 16) + return CrossSpectralDensity(_sym_mat_to_vector(data), cov["names"], 0.0, 16) # Just test free ori here (assume fixed is same as LCMV if these are) # Changes here should be synced with test_lcmv.py @pytest.mark.slowtest @pytest.mark.parametrize( - 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter', [ - (0.05, 'vector', 'unit-noise-gain-invariant', - False, None, 26, 28, True), - (0.05, 'vector', 'unit-noise-gain', False, None, 13, 15, True), - (0.05, 'vector', 'nai', False, None, 13, 15, True), - (0.05, None, 'unit-noise-gain-invariant', False, None, 26, 28, False), - (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, False), - (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, True), - (0.05, None, 'unit-noise-gain', False, None, 13, 14, False), - (0.05, None, 'unit-noise-gain', True, None, 35, 37, False), - (0.05, None, 'nai', True, None, 35, 37, False), + "reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter", + [ + (0.05, "vector", "unit-noise-gain-invariant", False, None, 26, 28, True), + (0.05, "vector", "unit-noise-gain", False, None, 13, 15, True), + (0.05, "vector", "nai", False, None, 13, 15, True), + (0.05, None, "unit-noise-gain-invariant", False, None, 26, 28, False), + (0.05, None, "unit-noise-gain-invariant", True, None, 40, 42, False), + (0.05, None, "unit-noise-gain-invariant", True, None, 40, 42, True), + (0.05, None, "unit-noise-gain", False, None, 13, 14, False), + (0.05, None, "unit-noise-gain", True, None, 35, 37, False), + (0.05, None, "nai", True, None, 35, 37, False), (0.05, None, None, True, None, 12, 14, False), (0.05, None, None, True, 0.8, 39, 43, False), - (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, - False), - (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, False), - (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, True), - (0.05, 'max-power', 'nai', True, None, 21, 24, False), - (0.05, 'max-power', None, True, None, 7, 10, False), - (0.05, 'max-power', None, True, 0.8, 15, 18, False), + (0.05, "max-power", "unit-noise-gain-invariant", False, None, 17, 20, False), + (0.05, "max-power", "unit-noise-gain", False, None, 17, 20, False), + (0.05, "max-power", "unit-noise-gain", False, None, 17, 20, True), + (0.05, "max-power", "nai", True, None, 21, 24, False), + (0.05, "max-power", None, True, None, 7, 10, False), + (0.05, "max-power", None, True, 0.8, 15, 18, False), # skip most no-reg tests, assume others are equal to LCMV if these are (0.00, None, None, True, None, 21, 32, False), - (0.00, 'max-power', None, True, None, 13, 19, False), - ]) -def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, - use_cov, depth, lower, upper, real_filter): + (0.00, "max-power", None, True, None, 13, 19, False), + ], +) +def test_localization_bias_free( + bias_params_free, + reg, + pick_ori, + weight_norm, + use_cov, + depth, + lower, + upper, + real_filter, +): """Test localization bias for free-orientation DICS.""" evoked, fwd, noise_cov, data_cov, want = bias_params_free noise_csd = _cov_as_csd(noise_cov, evoked.info) data_csd = _cov_as_csd(data_cov, evoked.info) del noise_cov, data_cov if not use_cov: - evoked.pick_types(meg='grad') + evoked.pick(picks="grad") noise_csd = None filters = make_dics( - evoked.info, fwd, data_csd, reg, noise_csd, pick_ori=pick_ori, - weight_norm=weight_norm, depth=depth, real_filter=real_filter) + evoked.info, + fwd, + data_csd, + reg, + noise_csd, + pick_ori=pick_ori, + weight_norm=weight_norm, + depth=depth, + real_filter=real_filter, + ) loc = apply_dics(evoked, filters).data - loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) + loc = np.linalg.norm(loc, axis=1) if pick_ori == "vector" else np.abs(loc) # Compute the percentage of sources for which there is no loc bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 assert lower <= perc <= upper +@pytest.mark.slowtest @pytest.mark.parametrize( - 'weight_norm, lower, upper, lower_ori, upper_ori, real_filter', [ - ('unit-noise-gain-invariant', 57, 58, 0.60, 0.61, False), - ('unit-noise-gain', 57, 58, 0.60, 0.61, False), - ('unit-noise-gain', 57, 58, 0.60, 0.61, True), + "weight_norm, lower, upper, lower_ori, upper_ori, real_filter", + [ + ("unit-noise-gain-invariant", 57, 58, 0.60, 0.61, False), + ("unit-noise-gain", 57, 58, 0.60, 0.61, False), + ("unit-noise-gain", 57, 58, 0.60, 0.61, True), (None, 27, 28, 0.56, 0.57, False), - ]) -def test_orientation_max_power(bias_params_fixed, bias_params_free, - weight_norm, lower, upper, lower_ori, upper_ori, - real_filter): + ], +) +def test_orientation_max_power( + bias_params_fixed, + bias_params_free, + weight_norm, + lower, + upper, + lower_ori, + upper_ori, + real_filter, +): """Test orientation selection for bias for max-power DICS.""" # we simulate data for the fixed orientation forward and beamform using # the free orientation forward, and check the orientation match at the end @@ -716,11 +878,19 @@ def test_orientation_max_power(bias_params_fixed, bias_params_free, data_csd = _cov_as_csd(data_cov, evoked.info) del data_cov, noise_cov fwd = bias_params_free[1] - filters = make_dics(evoked.info, fwd, data_csd, 0.05, noise_csd, - pick_ori='max-power', weight_norm=weight_norm, - depth=None, real_filter=real_filter) + filters = make_dics( + evoked.info, + fwd, + data_csd, + 0.05, + noise_csd, + pick_ori="max-power", + weight_norm=weight_norm, + depth=None, + real_filter=real_filter, + ) loc = np.abs(apply_dics(evoked, filters).data) - ori = filters['max_power_ori'][0] + ori = filters["max_power_ori"][0] assert ori.shape == (246, 3) loc = np.abs(loc) # Compute the percentage of sources for which there is no loc bias: @@ -730,11 +900,10 @@ def test_orientation_max_power(bias_params_fixed, bias_params_free, assert lower <= perc <= upper # Compute the dot products of our forward normals and # assert we get some hopefully reasonable agreement - assert fwd['coord_frame'] == FIFF.FIFFV_COORD_HEAD - nn = np.concatenate( - [s['nn'][v] for s, v in zip(fwd['src'], filters['vertices'])]) + assert fwd["coord_frame"] == FIFF.FIFFV_COORD_HEAD + nn = np.concatenate([s["nn"][v] for s, v in zip(fwd["src"], filters["vertices"])]) nn = nn[want] - nn = apply_trans(invert_transform(fwd['mri_head_t']), nn, move=False) + nn = apply_trans(invert_transform(fwd["mri_head_t"]), nn, move=False) assert_allclose(np.linalg.norm(nn, axis=1), 1, atol=1e-6) assert_allclose(np.linalg.norm(ori, axis=1), 1, atol=1e-12) dots = np.abs((nn[mask] * ori[mask]).sum(-1)) @@ -746,40 +915,46 @@ def test_orientation_max_power(bias_params_fixed, bias_params_free, @testing.requires_testing_data @idx_param -@pytest.mark.parametrize('whiten', (False, True)) +@pytest.mark.parametrize("whiten", (False, True)) def test_make_dics_rank(_load_forward, idx, whiten): """Test making DICS beamformer filters with rank param.""" _, fwd_surf, fwd_fixed, _ = _load_forward epochs, _, csd, _, label, _, _ = _simulate_data(fwd_fixed, idx) if whiten: noise_csd, want_rank = _make_rand_csd(epochs.info, csd) - kind = 'mag + grad' + kind = "mag + grad" else: noise_csd = None - epochs.pick_types(meg='grad') + epochs.pick(picks="grad") want_rank = len(epochs.ch_names) assert want_rank == 41 - kind = 'grad' + kind = "grad" with catch_logging() as log: filters = make_dics( - epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, - verbose=True) + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, verbose=True + ) log = log.getvalue() - assert f'Estimated rank ({kind}): {want_rank}' in log, log + assert f"Estimated rank ({kind}): {want_rank}" in log, log stc, _ = apply_dics_csd(csd, filters) other_rank = want_rank - 1 # shouldn't make a huge difference use_rank = dict(meg=other_rank) if not whiten: # XXX it's a bug that our rank functions don't treat "meg" # properly here... - use_rank['grad'] = use_rank.pop('meg') + use_rank["grad"] = use_rank.pop("meg") with catch_logging() as log: filters_2 = make_dics( - epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, - rank=use_rank, verbose=True) + epochs.info, + fwd_surf, + csd, + label=label, + noise_csd=noise_csd, + rank=use_rank, + verbose=True, + ) log = log.getvalue() - assert f'Computing rank from covariance with rank={use_rank}' in log, log + assert f"Computing rank from covariance with rank={use_rank}" in log, log stc_2, _ = apply_dics_csd(csd, filters_2) corr = np.corrcoef(stc_2.data.ravel(), stc.data.ravel())[0, 1] assert 0.8 < corr < 0.999999 @@ -787,10 +962,15 @@ def test_make_dics_rank(_load_forward, idx, whiten): # degenerate conditions if whiten: # make rank deficient - data = noise_csd.get_data(0.) + data = noise_csd.get_data(0.0) data[0] = data[:0] = 0 noise_csd._data[:, 0] = _sym_mat_to_vector(data) - with pytest.raises(ValueError, match='meg data rank.*the noise rank'): + with pytest.raises(ValueError, match="meg data rank.*the noise rank"): filters = make_dics( - epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, - verbose=True) + epochs.info, + fwd_surf, + csd, + label=label, + noise_csd=noise_csd, + verbose=True, + ) diff --git a/mne/beamformer/tests/test_external.py b/mne/beamformer/tests/test_external.py index a20cb3b3e79..e4373693496 100644 --- a/mne/beamformer/tests/test_external.py +++ b/mne/beamformer/tests/test_external.py @@ -1,35 +1,30 @@ -# Authors: Britta Westner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pytest import numpy as np -from numpy.testing import assert_array_equal, assert_allclose +import pytest +from numpy.testing import assert_allclose, assert_array_equal from scipy.io import savemat import mne -from mne.beamformer import make_lcmv, apply_lcmv, apply_lcmv_cov +from mne.beamformer import apply_lcmv, apply_lcmv_cov, make_lcmv from mne.beamformer.tests.test_lcmv import _get_data from mne.datasets import testing -from mne.utils import requires_version data_path = testing.data_path(download=False) ft_data_path = data_path / "fieldtrip" / "beamformer" fname_raw = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" fname_cov = data_path / "MEG" / "sample" / "sample_audvis_trunc-cov.fif" -fname_fwd = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" -) -fname_fwd_vol = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-vol-7-fwd.fif" -) +fname_fwd = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" +fname_fwd_vol = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-vol-7-fwd.fif" fname_event = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw-eve.fif" fname_label = data_path / "MEG" / "sample" / "labels" / "Aud-lh.label" reject = dict(grad=4000e-13, mag=4e-12) -@pytest.fixture(scope='function', params=[testing._pytest_param()]) +@pytest.fixture(scope="function", params=[testing._pytest_param()]) def _get_bf_data(save_fieldtrip=False): raw, epochs, evoked, data_cov, _, _, _, _, _, fwd = _get_data(proj=False) @@ -38,28 +33,29 @@ def _get_bf_data(save_fieldtrip=False): raw.save(ft_data_path / "raw.fif", overwrite=True) # src (tris are not available in fwd['src'] once imported into MATLAB) - src = fwd['src'].copy() + src = fwd["src"].copy() mne.write_source_spaces( - ft_data_path / "src.fif", src, verbose='error', overwrite=True + ft_data_path / "src.fif", src, verbose="error", overwrite=True ) # pick gradiometers only: - epochs.pick_types(meg='grad') - evoked.pick_types(meg='grad') + epochs.pick(picks="grad") + evoked.pick(picks="grad") # compute covariance matrix (ignore false alarm about no baseline) - data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145, - method='empirical', verbose='error') + data_cov = mne.compute_covariance( + epochs, tmin=0.04, tmax=0.145, method="empirical", verbose="error" + ) if save_fieldtrip is True: # if the covariance matrix and epochs need resaving: # data covariance: cov_savepath = ft_data_path / "sample_cov.mat" - sample_cov = {'sample_cov': data_cov['data']} + sample_cov = {"sample_cov": data_cov["data"]} savemat(cov_savepath, sample_cov) # evoked data: ev_savepath = ft_data_path / "sample_evoked.mat" - data_ev = {'sample_evoked': evoked.data} + data_ev = {"sample_evoked": evoked.data} savemat(ev_savepath, data_ev) return evoked, data_cov, fwd @@ -67,23 +63,32 @@ def _get_bf_data(save_fieldtrip=False): # beamformer types to be tested: unit-gain (vector and scalar) and # unit-noise-gain (time series and power output [apply_lcmv_cov]) -@requires_version('pymatreader') -@pytest.mark.parametrize('bf_type, weight_norm, pick_ori, pwr', [ - ['ug_scal', None, 'max-power', False], - ['ung', 'unit-noise-gain', 'max-power', False], - ['ung_pow', 'unit-noise-gain', 'max-power', True], - ['ug_vec', None, 'vector', False], - ['ung_vec', 'unit-noise-gain', 'vector', False], -]) +@pytest.mark.parametrize( + "bf_type, weight_norm, pick_ori, pwr", + [ + ["ug_scal", None, "max-power", False], + ["ung", "unit-noise-gain", "max-power", False], + ["ung_pow", "unit-noise-gain", "max-power", True], + ["ug_vec", None, "vector", False], + ["ung_vec", "unit-noise-gain", "vector", False], + ], +) def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr): """Test LCMV vs fieldtrip output.""" - from pymatreader import read_mat + pymatreader = pytest.importorskip("pymatreader") + evoked, data_cov, fwd = _get_bf_data # run the MNE-Python beamformer - filters = make_lcmv(evoked.info, fwd, data_cov=data_cov, - noise_cov=None, pick_ori=pick_ori, reg=0.05, - weight_norm=weight_norm) + filters = make_lcmv( + evoked.info, + fwd, + data_cov=data_cov, + noise_cov=None, + pick_ori=pick_ori, + reg=0.05, + weight_norm=weight_norm, + ) if pwr: stc_mne = apply_lcmv_cov(data_cov, filters) else: @@ -91,18 +96,21 @@ def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr): # load the FieldTrip output ft_fname = ft_data_path / ("ft_source_" + bf_type + "-vol.mat") - stc_ft_data = read_mat(ft_fname)['stc'] + stc_ft_data = pymatreader.read_mat(ft_fname)["stc"] if stc_ft_data.ndim == 1: stc_ft_data.shape = (stc_ft_data.size, 1) if stc_mne.data.ndim == 2: signs = np.sign((stc_mne.data * stc_ft_data).sum(-1, keepdims=True)) if pwr: - assert_array_equal(signs, 1.) + assert_array_equal(signs, 1.0) stc_mne.data *= signs assert stc_ft_data.shape == stc_mne.data.shape - if pick_ori == 'vector': + if pick_ori == "vector": # compare norms first - assert_allclose(np.linalg.norm(stc_mne.data, axis=1), - np.linalg.norm(stc_ft_data, axis=1), rtol=1e-6) + assert_allclose( + np.linalg.norm(stc_mne.data, axis=1), + np.linalg.norm(stc_ft_data, axis=1), + rtol=1e-6, + ) assert_allclose(stc_mne.data, stc_ft_data, rtol=1e-6) diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 9e564dbc432..35b9943d54e 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -1,41 +1,58 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from contextlib import nullcontext from copy import deepcopy from inspect import signature -import pytest import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) from scipy import linalg from scipy.spatial.distance import cdist -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_allclose, assert_array_less) import mne -from mne.transforms import apply_trans, invert_transform -from mne import (convert_forward_solution, read_forward_solution, compute_rank, - VolVectorSourceEstimate, VolSourceEstimate, EvokedArray, - pick_channels_cov, read_vectorview_selection) -from mne.beamformer import (make_lcmv, apply_lcmv, apply_lcmv_epochs, - apply_lcmv_raw, Beamformer, - read_beamformer, apply_lcmv_cov, make_dics) +from mne import ( + EvokedArray, + VolSourceEstimate, + VolVectorSourceEstimate, + compute_rank, + convert_forward_solution, + pick_channels_cov, + read_forward_solution, + read_vectorview_selection, +) +from mne._fiff.compensator import set_current_comp +from mne._fiff.constants import FIFF +from mne.beamformer import ( + Beamformer, + apply_lcmv, + apply_lcmv_cov, + apply_lcmv_epochs, + apply_lcmv_raw, + make_dics, + make_lcmv, + read_beamformer, +) from mne.beamformer._compute_beamformer import _prepare_beamformer_input from mne.datasets import testing -from mne.io.compensator import set_current_comp -from mne.io.constants import FIFF -from mne.minimum_norm import make_inverse_operator, apply_inverse +from mne.minimum_norm import apply_inverse, make_inverse_operator from mne.minimum_norm.tests.test_inverse import _assert_free_ori_match from mne.simulation import simulate_evoked -from mne.utils import (object_diff, requires_version, catch_logging, - _record_warnings) - +from mne.transforms import apply_trans, invert_transform +from mne.utils import _record_warnings, catch_logging, object_diff data_path = testing.data_path(download=False) fname_raw = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" fname_cov = data_path / "MEG" / "sample" / "sample_audvis_trunc-cov.fif" -fname_fwd = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" -) -fname_fwd_vol = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-vol-7-fwd.fif" -) +fname_fwd = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" +fname_fwd_vol = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-vol-7-fwd.fif" fname_event = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw-eve.fif" fname_label = data_path / "MEG" / "sample" / "labels" / "Aud-lh.label" ctf_fname = data_path / "CTF" / "somMDYO-18av.ds" @@ -49,18 +66,25 @@ def _read_forward_solution_meg(*args, **kwargs): return mne.pick_types_forward(fwd, meg=True, eeg=False) -def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, - epochs_preload=True, data_cov=True, proj=True): +def _get_data( + tmin=-0.1, + tmax=0.15, + all_forward=True, + epochs=True, + epochs_preload=True, + data_cov=True, + proj=True, +): """Read in data used in tests.""" label = mne.read_label(fname_label) events = mne.read_events(fname_event) raw = mne.io.read_raw_fif(fname_raw, preload=True) forward = mne.read_forward_solution(fname_fwd) if all_forward: - forward_surf_ori = _read_forward_solution_meg( - fname_fwd, surf_ori=True) + forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True) forward_fixed = _read_forward_solution_meg( - fname_fwd, force_fixed=True, surf_ori=True, use_cps=False) + fname_fwd, force_fixed=True, surf_ori=True, use_cps=False + ) forward_vol = _read_forward_solution_meg(fname_fwd_vol) else: forward_surf_ori = None @@ -70,21 +94,20 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, event_id, tmin, tmax = 1, tmin, tmax # Setup for reading the raw data - raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels + raw.info["bads"] = ["MEG 2443", "EEG 053"] # 2 bad channels # Set up pick list: MEG - bad channels - left_temporal_channels = read_vectorview_selection('Left-temporal') - picks = mne.pick_types(raw.info, meg=True, - selection=left_temporal_channels) + left_temporal_channels = read_vectorview_selection("Left-temporal") + picks = mne.pick_types(raw.info, meg=True, selection=left_temporal_channels) picks = picks[::2] # decimate for speed # add a couple channels we will consider bad bad_picks = [100, 101] bads = [raw.ch_names[pick] for pick in bad_picks] assert not any(pick in picks for pick in bad_picks) picks = np.concatenate([picks, bad_picks]) - raw.pick_channels([raw.ch_names[ii] for ii in picks]) + raw.pick([raw.ch_names[ii] for ii in picks]) del picks - raw.info['bads'] = bads # add more bads + raw.info["bads"] = bads # add more bads if proj: raw.info.normalize_proj() # avoid projection warnings else: @@ -93,8 +116,16 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, if epochs: # Read epochs epochs = mne.Epochs( - raw, events, event_id, tmin, tmax, proj=True, - baseline=(None, 0), preload=epochs_preload, reject=reject) + raw, + events, + event_id, + tmin, + tmax, + proj=True, + baseline=(None, 0), + preload=epochs_preload, + reject=reject, + ) if epochs_preload: epochs.resample(200, npad=0) epochs.crop(0, None) @@ -106,17 +137,29 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, info = raw.info noise_cov = mne.read_cov(fname_cov) - noise_cov['projs'] = [] # avoid warning - noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, - eeg=0.1, proj=True, rank=None) + noise_cov["projs"] = [] # avoid warning + noise_cov = mne.cov.regularize( + noise_cov, info, mag=0.05, grad=0.05, eeg=0.1, proj=True, rank=None + ) if data_cov: data_cov = mne.compute_covariance( - epochs, tmin=0.04, tmax=0.145, verbose='error') # baseline warning + epochs, tmin=0.04, tmax=0.145, verbose="error" + ) # baseline warning else: data_cov = None - return raw, epochs, evoked, data_cov, noise_cov, label, forward,\ - forward_surf_ori, forward_fixed, forward_vol + return ( + raw, + epochs, + evoked, + data_cov, + noise_cov, + label, + forward, + forward_surf_ori, + forward_fixed, + forward_vol, + ) @pytest.mark.slowtest @@ -127,70 +170,79 @@ def test_lcmv_vector(): # For speed and for rank-deficiency calculation simplicity, # just use grads - info = mne.pick_info(info, mne.pick_types(info, meg='grad', exclude=())) + info = mne.pick_info(info, mne.pick_types(info, meg="grad", exclude=())) with info._unlock(): info.update(bads=[], projs=[]) forward = mne.read_forward_solution(fname_fwd) - forward = mne.pick_channels_forward(forward, info['ch_names']) - vertices = [s['vertno'][::200] for s in forward['src']] + forward = mne.pick_channels_forward(forward, info["ch_names"]) + vertices = [s["vertno"][::200] for s in forward["src"]] n_vertices = sum(len(v) for v in vertices) assert n_vertices == 4 amplitude = 100e-9 - stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices, - 0, 1. / info['sfreq']) - forward_sim = mne.convert_forward_solution(forward, force_fixed=True, - use_cps=True, copy=True) + stc = mne.SourceEstimate( + amplitude * np.eye(n_vertices), vertices, 0, 1.0 / info["sfreq"] + ) + forward_sim = mne.convert_forward_solution( + forward, force_fixed=True, use_cps=True, copy=True + ) forward_sim = mne.forward.restrict_forward_to_stc(forward_sim, stc) noise_cov = mne.make_ad_hoc_cov(info) - noise_cov.update(data=np.diag(noise_cov['data']), diag=False) + noise_cov.update(data=np.diag(noise_cov["data"]), diag=False) evoked = simulate_evoked(forward_sim, stc, info, noise_cov, nave=1) - source_nn = forward_sim['source_nn'] - source_rr = forward_sim['source_rr'] + source_nn = forward_sim["source_nn"] + source_rr = forward_sim["source_rr"] # Figure out our indices - mask = np.concatenate([np.in1d(s['vertno'], v) - for s, v in zip(forward['src'], vertices)]) + mask = np.concatenate( + [np.isin(s["vertno"], v) for s, v in zip(forward["src"], vertices)] + ) mapping = np.where(mask)[0] - assert_array_equal(source_rr, forward['source_rr'][mapping]) + assert_array_equal(source_rr, forward["source_rr"][mapping]) # Don't check NN because we didn't rotate to surf ori del forward_sim # Let's do minimum norm as a sanity check (dipole_fit is slower) - inv = make_inverse_operator(info, forward, noise_cov, loose=1.) - stc_vector_mne = apply_inverse(evoked, inv, pick_ori='vector') + inv = make_inverse_operator(info, forward, noise_cov, loose=1.0) + stc_vector_mne = apply_inverse(evoked, inv, pick_ori="vector") mne_ori = stc_vector_mne.data[mapping, :, np.arange(n_vertices)] mne_ori /= np.linalg.norm(mne_ori, axis=-1)[:, np.newaxis] mne_angles = np.rad2deg(np.arccos(np.sum(mne_ori * source_nn, axis=-1))) - assert np.mean(mne_angles) < 35 + assert np.mean(mne_angles) < 36 # Now let's do LCMV data_cov = mne.make_ad_hoc_cov(info) # just a stub for later with pytest.raises(ValueError, match="pick_ori"): - make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori='bad') + make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori="bad") lcmv_ori = list() for ti in range(n_vertices): this_evoked = evoked.copy().crop(evoked.times[ti], evoked.times[ti]) - data_cov['diag'] = False - data_cov['data'] = (np.outer(this_evoked.data, this_evoked.data) + - noise_cov['data']) - vals = linalg.svdvals(data_cov['data']) + data_cov["diag"] = False + data_cov["data"] = ( + np.outer(this_evoked.data, this_evoked.data) + noise_cov["data"] + ) + vals = linalg.svdvals(data_cov["data"]) assert vals[0] / vals[-1] < 1e5 # not rank deficient with catch_logging() as log: - filters = make_lcmv(info, forward, data_cov, 0.05, noise_cov, - verbose=True) + filters = make_lcmv(info, forward, data_cov, 0.05, noise_cov, verbose=True) log = log.getvalue() - assert '498 sources' in log + assert "498 sources" in log with catch_logging() as log: - filters_vector = make_lcmv(info, forward, data_cov, 0.05, - noise_cov, pick_ori='vector', - verbose=True) + filters_vector = make_lcmv( + info, + forward, + data_cov, + 0.05, + noise_cov, + pick_ori="vector", + verbose=True, + ) log = log.getvalue() - assert '498 sources' in log + assert "498 sources" in log stc = apply_lcmv(this_evoked, filters) stc_vector = apply_lcmv(this_evoked, filters_vector) assert isinstance(stc, mne.SourceEstimate) @@ -199,7 +251,7 @@ def test_lcmv_vector(): # Check the orientation by pooling across some neighbors, as LCMV can # have some "holes" at the points of interest - idx = np.where(cdist(forward['source_rr'], source_rr[[ti]]) < 0.02)[0] + idx = np.where(cdist(forward["source_rr"], source_rr[[ti]]) < 0.02)[0] lcmv_ori.append(np.mean(stc_vector.data[idx, :, 0], axis=0)) lcmv_ori[-1] /= np.linalg.norm(lcmv_ori[-1]) @@ -208,30 +260,51 @@ def test_lcmv_vector(): @pytest.mark.slowtest -@requires_version('h5io') @testing.requires_testing_data -@pytest.mark.parametrize('reg, proj, kind', [ - (0.01, True, 'volume'), - (0., False, 'volume'), - (0.01, False, 'surface'), - (0., True, 'surface'), -]) +@pytest.mark.parametrize( + "reg, proj, kind", + [ + (0.01, True, "volume"), + (0.0, False, "volume"), + (0.01, False, "surface"), + (0.0, True, "surface"), + ], +) def test_make_lcmv_bem(tmp_path, reg, proj, kind): """Test LCMV with evoked data and single trials.""" - raw, epochs, evoked, data_cov, noise_cov, label, forward,\ - forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj) - - if kind == 'surface': + pytest.importorskip("h5io") + ( + raw, + epochs, + evoked, + data_cov, + noise_cov, + label, + forward, + forward_surf_ori, + forward_fixed, + forward_vol, + ) = _get_data(proj=proj) + + if kind == "surface": fwd = forward else: fwd = forward_vol - assert kind == 'volume' + assert kind == "volume" - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov) + filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, noise_cov=noise_cov) stc = apply_lcmv(evoked, filters) stc.crop(0.02, None) + # Smoke test for label= support for surfaces only + label = mne.read_label(fname_label) + if kind == "volume": + ctx = pytest.raises(ValueError, match="volume source space") + else: + ctx = nullcontext() + with ctx: + make_lcmv(evoked.info, fwd, data_cov, reg=reg, noise_cov=noise_cov, label=label) + stc_pow = np.sum(np.abs(stc.data), axis=1) idx = np.argmax(stc_pow) max_stc = stc.data[idx] @@ -240,11 +313,17 @@ def test_make_lcmv_bem(tmp_path, reg, proj, kind): assert 0.08 < tmax < 0.15, tmax assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc) - if kind == 'surface': + if kind == "surface": # Test picking normal orientation (surface source space only). - filters = make_lcmv(evoked.info, forward_surf_ori, data_cov, - reg=reg, noise_cov=noise_cov, - pick_ori='normal', weight_norm=None) + filters = make_lcmv( + evoked.info, + forward_surf_ori, + data_cov, + reg=reg, + noise_cov=noise_cov, + pick_ori="normal", + weight_norm=None, + ) stc_normal = apply_lcmv(evoked, filters) stc_normal.crop(0.02, None) @@ -264,8 +343,9 @@ def test_make_lcmv_bem(tmp_path, reg, proj, kind): assert (np.abs(stc_normal.data) <= stc.data).all() # Test picking source orientation maximizing output source power - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov, pick_ori='max-power') + filters = make_lcmv( + evoked.info, fwd, data_cov, reg=reg, noise_cov=noise_cov, pick_ori="max-power" + ) stc_max_power = apply_lcmv(evoked, filters) stc_max_power.crop(0.02, None) stc_pow = np.sum(np.abs(stc_max_power.data), axis=1) @@ -275,85 +355,125 @@ def test_make_lcmv_bem(tmp_path, reg, proj, kind): lower = 0.08 if proj else 0.04 assert lower < tmax < 0.15, tmax - assert 0.8 < np.max(max_stc) < 3., np.max(max_stc) + assert 0.8 < np.max(max_stc) < 3.0, np.max(max_stc) stc_max_power.data[:, :] = np.abs(stc_max_power.data) - if kind == 'surface': + if kind == "surface": # Maximum output source power orientation results should be # similar to free orientation results in areas with channel # coverage label = mne.read_label(fname_label) - mean_stc = stc.extract_label_time_course( - label, fwd['src'], mode='mean') - mean_stc_max_pow = \ - stc_max_power.extract_label_time_course( - label, fwd['src'], mode='mean') + mean_stc = stc.extract_label_time_course(label, fwd["src"], mode="mean") + mean_stc_max_pow = stc_max_power.extract_label_time_course( + label, fwd["src"], mode="mean" + ) assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0) # Test if spatial filter contains src_type - assert filters['src_type'] == kind + assert filters["src_type"] == kind # __repr__ assert len(evoked.ch_names) == 22 - assert len(evoked.info['projs']) == (3 if proj else 0) - assert len(evoked.info['bads']) == 2 + assert len(evoked.info["projs"]) == (3 if proj else 0) + assert len(evoked.info["bads"]) == 2 rank = 17 if proj else 20 - assert 'LCMV' in repr(filters) - assert 'unknown subject' not in repr(filters) - assert f'{fwd["nsource"]} vert' in repr(filters) - assert '20 ch' in repr(filters) - assert 'rank %s' % rank in repr(filters) + assert "LCMV" in repr(filters) + assert "unknown subject" not in repr(filters) + assert f"{fwd['nsource']} vert" in repr(filters) + assert "20 ch" in repr(filters) + assert f"rank {rank}" in repr(filters) # I/O fname = tmp_path / "filters.h5" - with pytest.warns(RuntimeWarning, match='-lcmv.h5'): + with pytest.warns(RuntimeWarning, match="-lcmv.h5"): filters.save(fname) filters_read = read_beamformer(fname) assert isinstance(filters, Beamformer) assert isinstance(filters_read, Beamformer) # deal with object_diff strictness - filters_read['rank'] = int(filters_read['rank']) - filters['rank'] = int(filters['rank']) - assert object_diff(filters, filters_read) == '' + filters_read["rank"] = int(filters_read["rank"]) + filters["rank"] = int(filters["rank"]) + assert object_diff(filters, filters_read) == "" - if kind != 'surface': + if kind != "surface": return # Test if fixed forward operator is detected when picking normal or # max-power orientation - pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov, - reg=0.01, noise_cov=noise_cov, pick_ori='normal') - pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov, - reg=0.01, noise_cov=noise_cov, pick_ori='max-power') + pytest.raises( + ValueError, + make_lcmv, + evoked.info, + forward_fixed, + data_cov, + reg=0.01, + noise_cov=noise_cov, + pick_ori="normal", + ) + pytest.raises( + ValueError, + make_lcmv, + evoked.info, + forward_fixed, + data_cov, + reg=0.01, + noise_cov=noise_cov, + pick_ori="max-power", + ) # Test if non-surface oriented forward operator is detected when picking # normal orientation - pytest.raises(ValueError, make_lcmv, evoked.info, forward, data_cov, - reg=0.01, noise_cov=noise_cov, pick_ori='normal') + pytest.raises( + ValueError, + make_lcmv, + evoked.info, + forward, + data_cov, + reg=0.01, + noise_cov=noise_cov, + pick_ori="normal", + ) # Test if volume forward operator is detected when picking normal # orientation - pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol, data_cov, - reg=0.01, noise_cov=noise_cov, pick_ori='normal') + pytest.raises( + ValueError, + make_lcmv, + evoked.info, + forward_vol, + data_cov, + reg=0.01, + noise_cov=noise_cov, + pick_ori="normal", + ) # Test if missing of noise covariance matrix is detected when more than # one channel type is present in the data - pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol, - data_cov=data_cov, reg=0.01, noise_cov=None, - pick_ori='max-power') + pytest.raises( + ValueError, + make_lcmv, + evoked.info, + forward_vol, + data_cov=data_cov, + reg=0.01, + noise_cov=None, + pick_ori="max-power", + ) # Test if wrong channel selection is detected in application of filter evoked_ch = deepcopy(evoked) - evoked_ch.pick_channels(evoked_ch.ch_names[1:]) - filters = make_lcmv(evoked.info, forward_vol, data_cov, reg=0.01, - noise_cov=noise_cov) + evoked_ch.pick(evoked_ch.ch_names[1:]) + filters = make_lcmv( + evoked.info, forward_vol, data_cov, reg=0.01, noise_cov=noise_cov + ) # Test if discrepancies in channel selection of data and fwd model are # handled correctly in apply_lcmv # make filter with data where first channel was removed - filters = make_lcmv(evoked_ch.info, forward_vol, data_cov, reg=0.01, - noise_cov=noise_cov) + filters = make_lcmv( + evoked_ch.info, forward_vol, data_cov, reg=0.01, noise_cov=noise_cov + ) # applying that filter to the full data set should automatically exclude # this channel from the data # also test here that no warnings are thrown - implemented to check whether @@ -368,34 +488,34 @@ def test_make_lcmv_bem(tmp_path, reg, proj, kind): # Test if non-matching SSP projection is detected in application of filter if proj: raw_proj = raw.copy().del_proj() - with pytest.raises(ValueError, match='do not match the projections'): + with pytest.raises(ValueError, match="do not match the projections"): apply_lcmv_raw(raw_proj, filters) # Test apply_lcmv_raw use_raw = raw.copy().crop(0, 1) stc = apply_lcmv_raw(use_raw, filters) assert_allclose(stc.times, use_raw.times) - assert_array_equal(stc.vertices[0], forward_vol['src'][0]['vertno']) + assert_array_equal(stc.vertices[0], forward_vol["src"][0]["vertno"]) # Test if spatial filter contains src_type - assert 'src_type' in filters + assert "src_type" in filters # check whether a filters object without src_type throws expected warning - del filters['src_type'] # emulate 0.16 behaviour to cause warning - with pytest.warns(RuntimeWarning, match='spatial filter does not contain ' - 'src_type'): + del filters["src_type"] # emulate 0.16 behaviour to cause warning + with pytest.warns(RuntimeWarning, match="spatial filter does not contain src_type"): apply_lcmv(evoked, filters) # Now test single trial using fixed orientation forward solution # so we can compare it to the evoked solution - filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01, - noise_cov=noise_cov) + filters = make_lcmv( + epochs.info, forward_fixed, data_cov, reg=0.01, noise_cov=noise_cov + ) stcs = apply_lcmv_epochs(epochs, filters) stcs_ = apply_lcmv_epochs(epochs, filters, return_generator=True) assert_array_equal(stcs[0].data, next(stcs_).data) epochs.drop_bad() - assert (len(epochs.events) == len(stcs)) + assert len(epochs.events) == len(stcs) # average the single trial estimates stc_avg = np.zeros_like(stcs[0].data) @@ -404,15 +524,17 @@ def test_make_lcmv_bem(tmp_path, reg, proj, kind): stc_avg /= len(stcs) # compare it to the solution using evoked with fixed orientation - filters = make_lcmv(evoked.info, forward_fixed, data_cov, reg=0.01, - noise_cov=noise_cov) + filters = make_lcmv( + evoked.info, forward_fixed, data_cov, reg=0.01, noise_cov=noise_cov + ) stc_fixed = apply_lcmv(evoked, filters) assert_array_almost_equal(stc_avg, stc_fixed.data) # use a label so we have few source vertices and delayed computation is # not used - filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01, - noise_cov=noise_cov, label=label) + filters = make_lcmv( + epochs.info, forward_fixed, data_cov, reg=0.01, noise_cov=noise_cov, label=label + ) stcs_label = apply_lcmv_epochs(epochs, filters) assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data) @@ -420,54 +542,81 @@ def test_make_lcmv_bem(tmp_path, reg, proj, kind): # Test condition where the filters weights are zero. There should not be # any divide-by-zero errors zero_cov = data_cov.copy() - zero_cov['data'][:] = 0 - filters = make_lcmv(epochs.info, forward_fixed, zero_cov, reg=0.01, - noise_cov=noise_cov) - assert_array_equal(filters['weights'], 0) + zero_cov["data"][:] = 0 + filters = make_lcmv( + epochs.info, forward_fixed, zero_cov, reg=0.01, noise_cov=noise_cov + ) + assert_array_equal(filters["weights"], 0) # Test condition where one channel type is picked # (avoid "grad data rank (13) did not match the noise rank (None)") data_cov_grad = pick_channels_cov( - data_cov, [ch_name for ch_name in epochs.info['ch_names'] - if ch_name.endswith(('2', '3'))]) - assert len(data_cov_grad['names']) > 4 - make_lcmv(epochs.info, forward_fixed, data_cov_grad, reg=0.01, - noise_cov=noise_cov) + data_cov, + [ + ch_name + for ch_name in epochs.info["ch_names"] + if ch_name.endswith(("2", "3")) + ], + ordered=False, + ) + assert len(data_cov_grad["names"]) > 4 + make_lcmv(epochs.info, forward_fixed, data_cov_grad, reg=0.01, noise_cov=noise_cov) @testing.requires_testing_data @pytest.mark.slowtest -@pytest.mark.parametrize('weight_norm, pick_ori', [ - ('unit-noise-gain', 'max-power'), - ('unit-noise-gain', 'vector'), - ('unit-noise-gain', None), - ('nai', 'vector'), - (None, 'max-power'), -]) +@pytest.mark.parametrize( + "weight_norm, pick_ori", + [ + ("unit-noise-gain", "max-power"), + ("unit-noise-gain", "vector"), + ("unit-noise-gain", None), + ("nai", "vector"), + (None, "max-power"), + ], +) def test_make_lcmv_sphere(pick_ori, weight_norm): """Test LCMV with sphere head model.""" # unit-noise gain beamformer and orientation # selection and rank reduction of the leadfield _, _, evoked, data_cov, noise_cov, _, _, _, _, _ = _get_data(proj=True) - assert 'eeg' not in evoked - assert 'meg' in evoked - sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) + assert "eeg" not in evoked + assert "meg" in evoked + sphere = mne.make_sphere_model(r0=(0.0, 0.0, 0.0), head_radius=0.080) src = mne.setup_volume_source_space( - pos=25., sphere=sphere, mindist=5.0, exclude=2.0) + pos=25.0, sphere=sphere, mindist=5.0, exclude=2.0 + ) fwd_sphere = mne.make_forward_solution(evoked.info, None, src, sphere) # Test that we get an error if not reducing rank - with pytest.raises(ValueError, match='Singular matrix detected'): - with pytest.warns(RuntimeWarning, match='positive semidefinite'): - make_lcmv( - evoked.info, fwd_sphere, data_cov, reg=0.1, - noise_cov=noise_cov, weight_norm=weight_norm, - pick_ori=pick_ori, reduce_rank=False, rank='full') + with ( + pytest.raises(ValueError, match="Singular matrix detected"), + _record_warnings(), + pytest.warns(RuntimeWarning, match="positive semidefinite"), + ): + make_lcmv( + evoked.info, + fwd_sphere, + data_cov, + reg=0.1, + noise_cov=noise_cov, + weight_norm=weight_norm, + pick_ori=pick_ori, + reduce_rank=False, + rank="full", + ) # Now let's reduce it - filters = make_lcmv(evoked.info, fwd_sphere, data_cov, reg=0.1, - noise_cov=noise_cov, weight_norm=weight_norm, - pick_ori=pick_ori, reduce_rank=True) + filters = make_lcmv( + evoked.info, + fwd_sphere, + data_cov, + reg=0.1, + noise_cov=noise_cov, + weight_norm=weight_norm, + pick_ori=pick_ori, + reduce_rank=True, + ) stc_sphere = apply_lcmv(evoked, filters) if isinstance(stc_sphere, VolVectorSourceEstimate): stc_sphere = stc_sphere.magnitude() @@ -489,20 +638,36 @@ def test_make_lcmv_sphere(pick_ori, weight_norm): @testing.requires_testing_data -@pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain')) -@pytest.mark.parametrize('pick_ori', ('max-power', 'normal')) +@pytest.mark.parametrize("weight_norm", (None, "unit-noise-gain")) +@pytest.mark.parametrize("pick_ori", ("max-power", "normal")) def test_lcmv_cov(weight_norm, pick_ori): """Test LCMV source power computation.""" - raw, epochs, evoked, data_cov, noise_cov, label, forward,\ - forward_surf_ori, forward_fixed, forward_vol = _get_data() + ( + raw, + epochs, + evoked, + data_cov, + noise_cov, + label, + forward, + forward_surf_ori, + forward_fixed, + forward_vol, + ) = _get_data() convert_forward_solution(forward, surf_ori=True, copy=False) - filters = make_lcmv(evoked.info, forward, data_cov, noise_cov=noise_cov, - weight_norm=weight_norm, pick_ori=pick_ori) + filters = make_lcmv( + evoked.info, + forward, + data_cov, + noise_cov=noise_cov, + weight_norm=weight_norm, + pick_ori=pick_ori, + ) for cov in (data_cov, noise_cov): - this_cov = pick_channels_cov(cov, evoked.ch_names) - this_evoked = evoked.copy().pick_channels(this_cov['names']) - this_cov['projs'] = this_evoked.info['projs'] - assert this_evoked.ch_names == this_cov['names'] + this_cov = pick_channels_cov(cov, evoked.ch_names, ordered=False) + this_evoked = evoked.copy().pick(this_cov["names"]) + this_cov["projs"] = this_evoked.info["projs"] + assert this_evoked.ch_names == this_cov["names"] stc = apply_lcmv_cov(this_cov, filters) assert stc.data.min() > 0 assert stc.shape == (498, 1) @@ -529,88 +694,106 @@ def test_lcmv_ctf_comp(): evoked = epochs.average() data_cov = mne.compute_covariance(epochs) - fwd = mne.make_forward_solution(evoked.info, None, - mne.setup_volume_source_space(pos=30.0), - mne.make_sphere_model()) - with pytest.raises(ValueError, match='reduce_rank'): + fwd = mne.make_forward_solution( + evoked.info, + None, + mne.setup_volume_source_space(pos=30.0), + mne.make_sphere_model(), + ) + with pytest.raises(ValueError, match="reduce_rank"): make_lcmv(evoked.info, fwd, data_cov) filters = make_lcmv(evoked.info, fwd, data_cov, reduce_rank=True) - assert 'weights' in filters + assert "weights" in filters # test whether different compensations throw error info_comp = evoked.info.copy() set_current_comp(info_comp, 1) - with pytest.raises(RuntimeError, match='Compensation grade .* not match'): + with pytest.raises(RuntimeError, match="Compensation grade .* not match"): make_lcmv(info_comp, fwd, data_cov) @pytest.mark.slowtest @testing.requires_testing_data -@pytest.mark.parametrize('proj, weight_norm', [ - (True, 'unit-noise-gain'), - (False, 'unit-noise-gain'), - (True, None), - (True, 'nai'), -]) +@pytest.mark.parametrize( + "proj, weight_norm", + [ + (True, "unit-noise-gain"), + (False, "unit-noise-gain"), + (True, None), + (True, "nai"), + ], +) def test_lcmv_reg_proj(proj, weight_norm): """Test LCMV with and without proj.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) events = mne.find_events(raw) - raw.pick_types(meg=True) + raw.pick(picks="meg", exclude="bads") assert len(raw.ch_names) == 305 epochs = mne.Epochs(raw, events, None, preload=True, proj=proj) - with pytest.warns(RuntimeWarning, match='Too few samples'): + with pytest.warns(RuntimeWarning, match="Too few samples"): noise_cov = mne.compute_covariance(epochs, tmax=0) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) forward = mne.read_forward_solution(fname_fwd) - filters = make_lcmv(epochs.info, forward, data_cov, reg=0.05, - noise_cov=noise_cov, pick_ori='max-power', - weight_norm='nai', rank=None, verbose=True) + filters = make_lcmv( + epochs.info, + forward, + data_cov, + reg=0.05, + noise_cov=noise_cov, + pick_ori="max-power", + weight_norm="nai", + rank=None, + verbose=True, + ) want_rank = 302 # 305 good channels - 3 MEG projs - assert filters['rank'] == want_rank + assert filters["rank"] == want_rank # And also with and without noise_cov - with pytest.raises(ValueError, match='several sensor types'): - make_lcmv(epochs.info, forward, data_cov, reg=0.05, - noise_cov=None) - epochs.pick_types(meg='grad') + with pytest.raises(ValueError, match="several sensor types"): + make_lcmv(epochs.info, forward, data_cov, reg=0.05, noise_cov=None) + epochs.pick(picks="grad") kwargs = dict(reg=0.05, pick_ori=None, weight_norm=weight_norm) - filters_cov = make_lcmv(epochs.info, forward, data_cov, - noise_cov=noise_cov, **kwargs) - filters_nocov = make_lcmv(epochs.info, forward, data_cov, - noise_cov=None, **kwargs) + filters_cov = make_lcmv( + epochs.info, forward, data_cov, noise_cov=noise_cov, **kwargs + ) + filters_nocov = make_lcmv(epochs.info, forward, data_cov, noise_cov=None, **kwargs) ad_hoc = mne.make_ad_hoc_cov(epochs.info) - filters_adhoc = make_lcmv(epochs.info, forward, data_cov, - noise_cov=ad_hoc, **kwargs) + filters_adhoc = make_lcmv( + epochs.info, forward, data_cov, noise_cov=ad_hoc, **kwargs + ) evoked = epochs.average() stc_cov = apply_lcmv(evoked, filters_cov) stc_nocov = apply_lcmv(evoked, filters_nocov) stc_adhoc = apply_lcmv(evoked, filters_adhoc) # Compare adhoc and nocov: scale difference is necessitated by using std=1. - if weight_norm == 'unit-noise-gain': - scale = np.sqrt(ad_hoc['data'][0]) + if weight_norm == "unit-noise-gain": + scale = np.sqrt(ad_hoc["data"][0]) else: - scale = 1. + scale = 1.0 assert_allclose(stc_nocov.data, stc_adhoc.data * scale) - a = np.dot(filters_nocov['weights'], filters_nocov['whitener']) - b = np.dot(filters_adhoc['weights'], filters_adhoc['whitener']) * scale + a = np.dot(filters_nocov["weights"], filters_nocov["whitener"]) + b = np.dot(filters_adhoc["weights"], filters_adhoc["whitener"]) * scale atol = np.mean(np.sqrt(a * a)) * 1e-7 assert_allclose(a, b, atol=atol, rtol=1e-7) # Compare adhoc and cov: locs might not be equivalent, but the same # general profile should persist, so look at the std and be lenient: - if weight_norm == 'unit-noise-gain': + if weight_norm == "unit-noise-gain": adhoc_scale = 0.12 else: - adhoc_scale = 1. + adhoc_scale = 1.0 assert_allclose( np.linalg.norm(stc_adhoc.data, axis=0) * adhoc_scale, - np.linalg.norm(stc_cov.data, axis=0), rtol=0.3) + np.linalg.norm(stc_cov.data, axis=0), + rtol=0.3, + ) assert_allclose( np.linalg.norm(stc_nocov.data, axis=0) / scale * adhoc_scale, - np.linalg.norm(stc_cov.data, axis=0), rtol=0.3) + np.linalg.norm(stc_cov.data, axis=0), + rtol=0.3, + ) - if weight_norm == 'nai': + if weight_norm == "nai": # NAI is always normalized by noise-level (based on eigenvalues) for stc in (stc_nocov, stc_cov): assert_allclose(stc.data.std(), 0.584, rtol=0.2) @@ -620,34 +803,48 @@ def test_lcmv_reg_proj(proj, weight_norm): for stc in (stc_nocov, stc_cov): assert_allclose(stc.data.std(), 2.8e-8, rtol=0.1) else: - assert weight_norm == 'unit-noise-gain' + assert weight_norm == "unit-noise-gain" # Channel scalings depend on presence of noise_cov assert_allclose(stc_nocov.data.std(), 7.8e-13, rtol=0.1) assert_allclose(stc_cov.data.std(), 0.187, rtol=0.2) -@pytest.mark.parametrize('reg, weight_norm, use_cov, depth, lower, upper', [ - (0.05, 'unit-noise-gain', True, None, 97, 98), - (0.05, 'nai', True, None, 96, 98), - (0.05, 'nai', True, 0.8, 96, 98), - (0.05, None, True, None, 74, 76), - (0.05, None, True, 0.8, 90, 93), # depth improves weight_norm=None - (0.05, 'unit-noise-gain', False, None, 83, 86), - (0.05, 'unit-noise-gain', False, 0.8, 83, 86), # depth same for wn != None - # no reg - (0.00, 'unit-noise-gain', True, None, 35, 99), # TODO: Still not stable -]) -def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, - depth, lower, upper): +@pytest.mark.slowtest +@pytest.mark.parametrize( + "reg, weight_norm, use_cov, depth, lower, upper", + [ + (0.05, "unit-noise-gain", True, None, 97, 98), + (0.05, "nai", True, None, 96, 98), + (0.05, "nai", True, 0.8, 96, 98), + (0.05, None, True, None, 74, 76), + (0.05, None, True, 0.8, 90, 93), # depth improves weight_norm=None + (0.05, "unit-noise-gain", False, None, 83, 86), + (0.05, "unit-noise-gain", False, 0.8, 83, 86), # depth same for wn != None + # no reg + (0.00, "unit-noise-gain", True, None, 35, 99), # TODO: Still not stable + ], +) +def test_localization_bias_fixed( + bias_params_fixed, reg, weight_norm, use_cov, depth, lower, upper +): """Test localization bias for fixed-orientation LCMV.""" evoked, fwd, noise_cov, data_cov, want = bias_params_fixed if not use_cov: - evoked.pick_types(meg='grad') + evoked.pick(picks="grad") noise_cov = None - assert data_cov['data'].shape[0] == len(data_cov['names']) - loc = apply_lcmv(evoked, make_lcmv(evoked.info, fwd, data_cov, reg, - noise_cov, depth=depth, - weight_norm=weight_norm)).data + assert data_cov["data"].shape[0] == len(data_cov["names"]) + loc = apply_lcmv( + evoked, + make_lcmv( + evoked.info, + fwd, + data_cov, + reg, + noise_cov, + depth=depth, + weight_norm=weight_norm, + ), + ).data loc = np.abs(loc) # Compute the percentage of sources for which there is no loc bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 @@ -655,52 +852,119 @@ def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, # Changes here should be synced with test_dics.py +@pytest.mark.slowtest @pytest.mark.parametrize( - 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, ' - 'lower_ori, upper_ori', [ - (0.05, 'vector', 'unit-noise-gain-invariant', False, None, 26, 28, 0.82, 0.84), # noqa: E501 - (0.05, 'vector', 'unit-noise-gain-invariant', True, None, 40, 42, 0.96, 0.98), # noqa: E501 - (0.05, 'vector', 'unit-noise-gain', False, None, 13, 14, 0.79, 0.81), - (0.05, 'vector', 'unit-noise-gain', True, None, 35, 37, 0.98, 0.99), - (0.05, 'vector', 'nai', True, None, 35, 37, 0.98, 0.99), - (0.05, 'vector', None, True, None, 12, 14, 0.97, 0.98), - (0.05, 'vector', None, True, 0.8, 39, 43, 0.97, 0.98), - (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, 0, 0), # noqa: E501 - (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, 0, 0), - (0.05, 'max-power', 'nai', True, None, 21, 24, 0, 0), - (0.05, 'max-power', None, True, None, 7, 10, 0, 0), - (0.05, 'max-power', None, True, 0.8, 15, 18, 0, 0), + "reg, pick_ori, weight_norm, use_cov, depth, lower, upper, lower_ori, upper_ori", + [ + ( + 0.05, + "vector", + "unit-noise-gain-invariant", + False, + None, + 26, + 28, + 0.82, + 0.84, + ), # noqa: E501 + ( + 0.05, + "vector", + "unit-noise-gain-invariant", + True, + None, + 40, + 42, + 0.96, + 0.98, + ), # noqa: E501 + (0.05, "vector", "unit-noise-gain", False, None, 13, 14, 0.79, 0.81), + (0.05, "vector", "unit-noise-gain", True, None, 35, 37, 0.98, 0.99), + (0.05, "vector", "nai", True, None, 35, 37, 0.98, 0.99), + (0.05, "vector", None, True, None, 12, 14, 0.97, 0.98), + (0.05, "vector", None, True, 0.8, 39, 43, 0.97, 0.98), + ( + 0.05, + "max-power", + "unit-noise-gain-invariant", + False, + None, + 17, + 20, + 0, + 0, + ), # noqa: E501 + (0.05, "max-power", "unit-noise-gain", False, None, 17, 20, 0, 0), + (0.05, "max-power", "nai", True, None, 21, 24, 0, 0), + (0.05, "max-power", None, True, None, 7, 10, 0, 0), + (0.05, "max-power", None, True, 0.8, 15, 18, 0, 0), (0.05, None, None, True, 0.8, 40, 42, 0, 0), # no reg - (0.00, 'vector', None, True, None, 23, 24, 0.96, 0.97), - (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 52, 54, 0.95, 0.96), # noqa: E501 - (0.00, 'vector', 'unit-noise-gain', True, None, 44, 48, 0.97, 0.99), - (0.00, 'vector', 'nai', True, None, 44, 48, 0.97, 0.99), - (0.00, 'max-power', None, True, None, 14, 15, 0, 0), - (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 35, 37, 0, 0), # noqa: E501 - (0.00, 'max-power', 'unit-noise-gain', True, None, 35, 37, 0, 0), - (0.00, 'max-power', 'nai', True, None, 35, 37, 0, 0), - ]) -def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, - use_cov, depth, lower, upper, - lower_ori, upper_ori): + (0.00, "vector", None, True, None, 23, 24, 0.96, 0.97), + ( + 0.00, + "vector", + "unit-noise-gain-invariant", + True, + None, + 52, + 54, + 0.95, + 0.96, + ), # noqa: E501 + (0.00, "vector", "unit-noise-gain", True, None, 44, 48, 0.97, 0.99), + (0.00, "vector", "nai", True, None, 44, 48, 0.97, 0.99), + (0.00, "max-power", None, True, None, 14, 15, 0, 0), + ( + 0.00, + "max-power", + "unit-noise-gain-invariant", + True, + None, + 35, + 37, + 0, + 0, + ), # noqa: E501 + (0.00, "max-power", "unit-noise-gain", True, None, 35, 37, 0, 0), + (0.00, "max-power", "nai", True, None, 35, 37, 0, 0), + ], +) +def test_localization_bias_free( + bias_params_free, + reg, + pick_ori, + weight_norm, + use_cov, + depth, + lower, + upper, + lower_ori, + upper_ori, +): """Test localization bias for free-orientation LCMV.""" evoked, fwd, noise_cov, data_cov, want = bias_params_free if not use_cov: - evoked.pick_types(meg='grad') + evoked.pick(picks="grad") noise_cov = None with _record_warnings(): # rank deficiency of data_cov - filters = make_lcmv(evoked.info, fwd, data_cov, reg, - noise_cov, pick_ori=pick_ori, - weight_norm=weight_norm, - depth=depth) + filters = make_lcmv( + evoked.info, + fwd, + data_cov, + reg, + noise_cov, + pick_ori=pick_ori, + weight_norm=weight_norm, + depth=depth, + ) loc = apply_lcmv(evoked, filters).data - if pick_ori == 'vector': + if pick_ori == "vector": ori = loc.copy() / np.linalg.norm(loc, axis=1, keepdims=True) else: # doesn't make sense for pooled (None) or max-power (can't be all 3) ori = None - loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) + loc = np.linalg.norm(loc, axis=1) if pick_ori == "vector" else np.abs(loc) # Compute the percentage of sources for which there is no loc bias: max_idx = np.argmax(loc, axis=0) perc = (want == max_idx).mean() * 100 @@ -711,35 +975,52 @@ def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, # Changes here should be synced with the ones above, but these have meaningful # orientation values @pytest.mark.parametrize( - 'reg, weight_norm, use_cov, depth, lower, upper, lower_ori, upper_ori', [ - (0.05, 'unit-noise-gain-invariant', False, None, 38, 40, 0.54, 0.55), - (0.05, 'unit-noise-gain', False, None, 38, 40, 0.54, 0.55), - (0.05, 'nai', True, None, 56, 57, 0.59, 0.61), + "reg, weight_norm, use_cov, depth, lower, upper, lower_ori, upper_ori", + [ + (0.05, "unit-noise-gain-invariant", False, None, 38, 40, 0.54, 0.55), + (0.05, "unit-noise-gain", False, None, 38, 40, 0.54, 0.55), + (0.05, "nai", True, None, 56, 57, 0.59, 0.61), (0.05, None, True, None, 27, 28, 0.56, 0.57), (0.05, None, True, 0.8, 42, 43, 0.56, 0.57), # no reg (0.00, None, True, None, 50, 51, 0.58, 0.59), - (0.00, 'unit-noise-gain-invariant', True, None, 73, 75, 0.59, 0.61), - (0.00, 'unit-noise-gain', True, None, 73, 75, 0.59, 0.61), - (0.00, 'nai', True, None, 73, 75, 0.59, 0.61), - ]) -def test_orientation_max_power(bias_params_fixed, bias_params_free, - reg, weight_norm, use_cov, depth, lower, upper, - lower_ori, upper_ori): + (0.00, "unit-noise-gain-invariant", True, None, 73, 75, 0.59, 0.61), + (0.00, "unit-noise-gain", True, None, 73, 75, 0.59, 0.61), + (0.00, "nai", True, None, 73, 75, 0.59, 0.61), + ], +) +def test_orientation_max_power( + bias_params_fixed, + bias_params_free, + reg, + weight_norm, + use_cov, + depth, + lower, + upper, + lower_ori, + upper_ori, +): """Test orientation selection for bias for max-power LCMV.""" # we simulate data for the fixed orientation forward and beamform using # the free orientation forward, and check the orientation match at the end evoked, _, noise_cov, data_cov, want = bias_params_fixed fwd = bias_params_free[1] if not use_cov: - evoked.pick_types(meg='grad') + evoked.pick(picks="grad") noise_cov = None - filters = make_lcmv(evoked.info, fwd, data_cov, reg, - noise_cov, pick_ori='max-power', - weight_norm=weight_norm, - depth=depth) + filters = make_lcmv( + evoked.info, + fwd, + data_cov, + reg, + noise_cov, + pick_ori="max-power", + weight_norm=weight_norm, + depth=depth, + ) loc = apply_lcmv(evoked, filters).data - ori = filters['max_power_ori'] + ori = filters["max_power_ori"] assert ori.shape == (246, 3) loc = np.abs(loc) # Compute the percentage of sources for which there is no loc bias: @@ -748,11 +1029,10 @@ def test_orientation_max_power(bias_params_fixed, bias_params_free, perc = mask.mean() * 100 assert lower <= perc <= upper # Compute the dot products of our forward normals and - assert fwd['coord_frame'] == FIFF.FIFFV_COORD_HEAD - nn = np.concatenate( - [s['nn'][v] for s, v in zip(fwd['src'], filters['vertices'])]) + assert fwd["coord_frame"] == FIFF.FIFFV_COORD_HEAD + nn = np.concatenate([s["nn"][v] for s, v in zip(fwd["src"], filters["vertices"])]) nn = nn[want] - nn = apply_trans(invert_transform(fwd['mri_head_t']), nn, move=False) + nn = apply_trans(invert_transform(fwd["mri_head_t"]), nn, move=False) assert_allclose(np.linalg.norm(nn, axis=1), 1, atol=1e-6) assert_allclose(np.linalg.norm(ori, axis=1), 1, atol=1e-12) dots = np.abs((nn[mask] * ori[mask]).sum(-1)) @@ -762,21 +1042,44 @@ def test_orientation_max_power(bias_params_fixed, bias_params_free, assert lower_ori < got < upper_ori -@pytest.mark.parametrize('weight_norm, pick_ori', [ - pytest.param('nai', 'max-power', marks=pytest.mark.slowtest), - ('unit-noise-gain', 'vector'), - ('unit-noise-gain', 'max-power'), - pytest.param('unit-noise-gain', None, marks=pytest.mark.slowtest), -]) +@pytest.mark.parametrize( + "weight_norm, pick_ori", + [ + pytest.param("nai", "max-power", marks=pytest.mark.slowtest), + pytest.param("unit-noise-gain", "vector", marks=pytest.mark.slowtest), + ("unit-noise-gain", "max-power"), + pytest.param("unit-noise-gain", None, marks=pytest.mark.slowtest), + ], +) def test_depth_does_not_matter(bias_params_free, weight_norm, pick_ori): """Test that depth weighting does not matter for normalized filters.""" evoked, fwd, noise_cov, data_cov, _ = bias_params_free - data = apply_lcmv(evoked, make_lcmv( - evoked.info, fwd, data_cov, 0.05, noise_cov, pick_ori=pick_ori, - weight_norm=weight_norm, depth=0.)).data - data_depth = apply_lcmv(evoked, make_lcmv( - evoked.info, fwd, data_cov, 0.05, noise_cov, pick_ori=pick_ori, - weight_norm=weight_norm, depth=1.)).data + data = apply_lcmv( + evoked, + make_lcmv( + evoked.info, + fwd, + data_cov, + 0.05, + noise_cov, + pick_ori=pick_ori, + weight_norm=weight_norm, + depth=0.0, + ), + ).data + data_depth = apply_lcmv( + evoked, + make_lcmv( + evoked.info, + fwd, + data_cov, + 0.05, + noise_cov, + pick_ori=pick_ori, + weight_norm=weight_norm, + depth=1.0, + ), + ).data assert data.shape == data_depth.shape for d1, d2 in zip(data, data_depth): # Sign flips can change when nearly orthogonal to the normal direction @@ -785,66 +1088,101 @@ def test_depth_does_not_matter(bias_params_free, weight_norm, pick_ori): assert_allclose(d1, d2, atol=atol) -@testing.requires_testing_data -def test_lcmv_maxfiltered(): - """Test LCMV on maxfiltered data.""" +@pytest.fixture(scope="session") +def mf_data(): + """Produce Maxwell filtered data for beamforming.""" raw = mne.io.read_raw_fif(fname_raw).fix_mag_coil_types() raw_sss = mne.preprocessing.maxwell_filter(raw) events = mne.find_events(raw_sss) del raw - raw_sss.pick_types(meg='mag') + raw_sss.pick(picks="mag") assert len(raw_sss.ch_names) == 102 epochs = mne.Epochs(raw_sss, events) data_cov = mne.compute_covariance(epochs, tmin=0) fwd = mne.read_forward_solution(fname_fwd) + return epochs, data_cov, fwd + + +@testing.requires_testing_data +@pytest.mark.parametrize("use_rank", ("info", "computed", "full", None)) +def test_lcmv_maxfiltered(mf_data, use_rank): + """Test LCMV on maxfiltered data.""" + epochs, data_cov, fwd = mf_data rank = compute_rank(data_cov, info=epochs.info) - assert rank == {'mag': 71} - for use_rank in ('info', rank, 'full', None): - make_lcmv(epochs.info, fwd, data_cov, rank=use_rank) + assert rank == {"mag": 71} + ctx = nullcontext() + if use_rank == "computed": + use_rank = rank + elif use_rank is None: + ctx = pytest.warns(RuntimeWarning, match="rank as it exceeds") + with catch_logging() as log, ctx: + make_lcmv(epochs.info, fwd, data_cov, rank=use_rank, verbose=True) + log = log.getvalue() + n = 102 if use_rank == "full" else 71 + assert f"Making LCMV beamformer with rank {{'mag': {n}}}" in log # To reduce test time, only test combinations that should matter rather than # all of them @testing.requires_testing_data -@pytest.mark.parametrize('pick_ori, weight_norm, reg, inversion', [ - ('vector', 'unit-noise-gain-invariant', 0.05, 'matrix'), - ('vector', 'unit-noise-gain-invariant', 0.05, 'single'), - ('vector', 'unit-noise-gain', 0.05, 'matrix'), - ('vector', 'unit-noise-gain', 0.05, 'single'), - ('vector', 'unit-noise-gain', 0.0, 'matrix'), - ('vector', 'unit-noise-gain', 0.0, 'single'), - ('vector', 'nai', 0.05, 'matrix'), - ('max-power', 'unit-noise-gain', 0.05, 'matrix'), - ('max-power', 'unit-noise-gain', 0.0, 'single'), - ('max-power', 'unit-noise-gain', 0.05, 'single'), - ('max-power', 'unit-noise-gain-invariant', 0.05, 'matrix'), - ('normal', 'unit-noise-gain', 0.05, 'matrix'), - ('normal', 'nai', 0.0, 'matrix'), -]) +@pytest.mark.parametrize( + "pick_ori, weight_norm, reg, inversion", + [ + ("vector", "unit-noise-gain-invariant", 0.05, "matrix"), + ("vector", "unit-noise-gain-invariant", 0.05, "single"), + ("vector", "unit-noise-gain", 0.05, "matrix"), + ("vector", "unit-noise-gain", 0.05, "single"), + ("vector", "unit-noise-gain", 0.0, "matrix"), + ("vector", "unit-noise-gain", 0.0, "single"), + ("vector", "nai", 0.05, "matrix"), + ("max-power", "unit-noise-gain", 0.05, "matrix"), + ("max-power", "unit-noise-gain", 0.0, "single"), + ("max-power", "unit-noise-gain", 0.05, "single"), + ("max-power", "unit-noise-gain-invariant", 0.05, "matrix"), + ("normal", "unit-noise-gain", 0.05, "matrix"), + ("normal", "nai", 0.0, "matrix"), + ], +) def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): """Test unit-noise-gain filter against formula.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) events = mne.find_events(raw) - raw.pick_types(meg='mag') + raw.pick(picks="mag") assert len(raw.ch_names) == 102 epochs = mne.Epochs(raw, events, None, preload=True) data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15) # for now, avoid whitening to make life easier - noise_cov = mne.make_ad_hoc_cov(epochs.info, std=dict(grad=1., mag=1.)) + noise_cov = mne.make_ad_hoc_cov(epochs.info, std=dict(grad=1.0, mag=1.0)) forward = mne.read_forward_solution(fname_fwd) convert_forward_solution(forward, surf_ori=True, copy=False) rank = None - kwargs = dict(reg=reg, noise_cov=noise_cov, pick_ori=pick_ori, - weight_norm=weight_norm, rank=rank, inversion=inversion) - if inversion == 'single' and pick_ori == 'vector' and \ - weight_norm == 'unit-noise-gain-invariant': - with pytest.raises(ValueError, match='Cannot use'): + kwargs = dict( + reg=reg, + noise_cov=noise_cov, + pick_ori=pick_ori, + weight_norm=weight_norm, + rank=rank, + inversion=inversion, + ) + if ( + inversion == "single" + and pick_ori == "vector" + and weight_norm == "unit-noise-gain-invariant" + ): + with pytest.raises(ValueError, match="Cannot use"): make_lcmv(epochs.info, forward, data_cov, **kwargs) return filters = make_lcmv(epochs.info, forward, data_cov, **kwargs) _, _, _, _, G, _, _, _ = _prepare_beamformer_input( - epochs.info, forward, None, 'vector', noise_cov=noise_cov, rank=rank, - pca=False, exp=None) + epochs.info, + forward, + None, + "vector", + noise_cov=noise_cov, + rank=rank, + pca=False, + exp=None, + ) n_channels, n_sources = G.shape n_sources //= 3 G.shape = (n_channels, n_sources, 3) @@ -854,26 +1192,26 @@ def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): def _assert_weight_norm(filters, G): """Check the result of the chosen weight normalization strategy.""" - weights, max_power_ori = filters['weights'], filters['max_power_ori'] + weights, max_power_ori = filters["weights"], filters["max_power_ori"] # Make the dimensions of the weight matrix equal for both DICS (which # defines weights for multiple frequencies) and LCMV (which does not). - if filters['kind'] == 'LCMV': + if filters["kind"] == "LCMV": weights = weights[np.newaxis] if max_power_ori is not None: max_power_ori = max_power_ori[np.newaxis] if max_power_ori is not None: max_power_ori = max_power_ori[..., np.newaxis] - weight_norm = filters['weight_norm'] - inversion = filters['inversion'] + weight_norm = filters["weight_norm"] + inversion = filters["inversion"] n_channels = weights.shape[2] - if inversion == 'matrix': + if inversion == "matrix": # Dipoles are grouped in groups with size n_orient - n_sources = filters['n_sources'] - n_orient = 3 if filters['is_free_ori'] else 1 - elif inversion == 'single': + n_sources = filters["n_sources"] + n_orient = 3 if filters["is_free_ori"] else 1 + elif inversion == "single": # Every dipole is treated as a unique source n_sources = weights.shape[1] n_orient = 1 @@ -883,13 +1221,13 @@ def _assert_weight_norm(filters, G): # Compute leadfield in the direction chosen during the computation of # the beamformer. - if filters['pick_ori'] == 'max-power': + if filters["pick_ori"] == "max-power": use_G = np.sum(G * max_power_ori[wi], axis=1, keepdims=True) - elif filters['pick_ori'] == 'normal': + elif filters["pick_ori"] == "normal": use_G = G[:, -1:] else: use_G = G - if inversion == 'single': + if inversion == "single": # Every dipole is treated as a unique source use_G = use_G.reshape(n_sources, 1, n_channels) assert w.shape == use_G.shape == (n_sources, n_orient, n_channels) @@ -897,32 +1235,32 @@ def _assert_weight_norm(filters, G): # Test weight normalization scheme got = np.matmul(w, w.conj().swapaxes(-2, -1)) desired = np.repeat(np.eye(n_orient)[np.newaxis], w.shape[0], axis=0) - if n_orient == 3 and weight_norm in ('unit-noise-gain', 'nai'): + if n_orient == 3 and weight_norm in ("unit-noise-gain", "nai"): # only the diagonal is correct! assert not np.allclose(got, desired, atol=1e-7) - got = got.reshape(n_sources, -1)[:, ::n_orient + 1] + got = got.reshape(n_sources, -1)[:, :: n_orient + 1] desired = np.ones_like(got) - if weight_norm == 'nai': # additional scale factor, should be fixed + if weight_norm == "nai": # additional scale factor, should be fixed atol = 1e-7 * got.flat[0] desired *= got.flat[0] else: atol = 1e-7 - assert_allclose(got, desired, atol=atol, err_msg='w @ w.conj().T = I') + assert_allclose(got, desired, atol=atol, err_msg="w @ w.conj().T = I") # Check that the result here is a diagonal matrix for Sekihara - if n_orient > 1 and weight_norm != 'unit-noise-gain-invariant': + if n_orient > 1 and weight_norm != "unit-noise-gain-invariant": got = w @ use_G.swapaxes(-2, -1) diags = np.diagonal(got, 0, -2, -1) want = np.apply_along_axis(np.diagflat, 1, diags) atol = np.mean(diags).real * 1e-12 - assert_allclose(got, want, atol=atol, err_msg='G.T @ w = θI') + assert_allclose(got, want, atol=atol, err_msg="G.T @ w = θI") def test_api(): """Test LCMV/DICS API equivalence.""" lcmv_names = list(signature(make_lcmv).parameters) dics_names = list(signature(make_dics).parameters) - dics_names[dics_names.index('csd')] = 'data_cov' - dics_names[dics_names.index('noise_csd')] = 'noise_cov' - dics_names.pop(dics_names.index('real_filter')) # not a thing for LCMV + dics_names[dics_names.index("csd")] = "data_cov" + dics_names[dics_names.index("noise_csd")] = "noise_cov" + dics_names.pop(dics_names.index("real_filter")) # not a thing for LCMV assert lcmv_names == dics_names diff --git a/mne/beamformer/tests/test_rap_music.py b/mne/beamformer/tests/test_rap_music.py index 6595b792dcb..594e11bca09 100644 --- a/mne/beamformer/tests/test_rap_music.py +++ b/mne/beamformer/tests/test_rap_music.py @@ -1,46 +1,42 @@ -# Authors: Yousra Bekhti -# Alexandre Gramfort -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pytest import numpy as np -from scipy import linalg +import pytest from numpy.testing import assert_allclose +from scipy import linalg import mne -from mne.beamformer import rap_music +from mne.beamformer import rap_music, trap_music from mne.cov import regularize from mne.datasets import testing from mne.minimum_norm.tests.test_inverse import assert_var_exp_log from mne.utils import catch_logging - data_path = testing.data_path(download=False) fname_ave = data_path / "MEG" / "sample" / "sample_audvis-ave.fif" fname_cov = data_path / "MEG" / "sample" / "sample_audvis_trunc-cov.fif" -fname_fwd = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" -) +fname_fwd = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" def _get_data(ch_decim=1): """Read in data used in tests.""" # Read evoked evoked = mne.read_evokeds(fname_ave, 0, baseline=(None, 0)) - evoked.info['bads'] = ['MEG 2443'] + evoked.info["bads"] = ["MEG 2443"] with evoked.info._unlock(): - evoked.info['lowpass'] = 16 # fake for decim + evoked.info["lowpass"] = 16 # fake for decim evoked.decimate(12) evoked.crop(0.0, 0.3) picks = mne.pick_types(evoked.info, meg=True, eeg=False) picks = picks[::ch_decim] - evoked.pick_channels([evoked.ch_names[pick] for pick in picks]) + evoked.pick([evoked.ch_names[pick] for pick in picks]) evoked.info.normalize_proj() noise_cov = mne.read_cov(fname_cov) - noise_cov['projs'] = [] - noise_cov = regularize(noise_cov, evoked.info, rank='full', proj=False) + noise_cov["projs"] = [] + noise_cov = regularize(noise_cov, evoked.info, rank="full", proj=False) return evoked, noise_cov @@ -51,66 +47,67 @@ def simu_data(evoked, forward, noise_cov, n_dipoles, times, nave=1): """ # Generate the two dipoles data mu, sigma = 0.1, 0.005 - s1 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 / - (2 * sigma ** 2)) + s1 = ( + 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-((times - mu) ** 2) / (2 * sigma**2)) + ) mu, sigma = 0.075, 0.008 - s2 = -1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 / - (2 * sigma ** 2)) + s2 = ( + -1 + / (sigma * np.sqrt(2 * np.pi)) + * np.exp(-((times - mu) ** 2) / (2 * sigma**2)) + ) data = np.array([s1, s2]) * 1e-9 - src = forward['src'] + src = forward["src"] rng = np.random.RandomState(42) - rndi = rng.randint(len(src[0]['vertno'])) - lh_vertno = src[0]['vertno'][[rndi]] + rndi = rng.randint(len(src[0]["vertno"])) + lh_vertno = src[0]["vertno"][[rndi]] - rndi = rng.randint(len(src[1]['vertno'])) - rh_vertno = src[1]['vertno'][[rndi]] + rndi = rng.randint(len(src[1]["vertno"])) + rh_vertno = src[1]["vertno"][[rndi]] vertices = [lh_vertno, rh_vertno] - tmin, tstep = times.min(), 1 / evoked.info['sfreq'] + tmin, tstep = times.min(), 1 / evoked.info["sfreq"] stc = mne.SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep) - sim_evoked = mne.simulation.simulate_evoked(forward, stc, evoked.info, - noise_cov, nave=nave, - random_state=rng) + sim_evoked = mne.simulation.simulate_evoked( + forward, stc, evoked.info, noise_cov, nave=nave, random_state=rng + ) return sim_evoked, stc def _check_dipoles(dipoles, fwd, stc, evoked, residual=None): - src = fwd['src'] - pos1 = fwd['source_rr'][np.where(src[0]['vertno'] == - stc.vertices[0])] - pos2 = fwd['source_rr'][np.where(src[1]['vertno'] == - stc.vertices[1])[0] + - len(src[0]['vertno'])] + src = fwd["src"] + pos1 = fwd["source_rr"][np.where(src[0]["vertno"] == stc.vertices[0])] + pos2 = fwd["source_rr"][ + np.where(src[1]["vertno"] == stc.vertices[1])[0] + len(src[0]["vertno"]) + ] # Check the position of the two dipoles - assert (dipoles[0].pos[0] in np.array([pos1, pos2])) - assert (dipoles[1].pos[0] in np.array([pos1, pos2])) + assert dipoles[0].pos[0] in np.array([pos1, pos2]) + assert dipoles[1].pos[0] in np.array([pos1, pos2]) - ori1 = fwd['source_nn'][np.where(src[0]['vertno'] == - stc.vertices[0])[0]][0] - ori2 = fwd['source_nn'][np.where(src[1]['vertno'] == - stc.vertices[1])[0] + - len(src[0]['vertno'])][0] + ori1 = fwd["source_nn"][np.where(src[0]["vertno"] == stc.vertices[0])[0]][0] + ori2 = fwd["source_nn"][ + np.where(src[1]["vertno"] == stc.vertices[1])[0] + len(src[0]["vertno"]) + ][0] # Check the orientation of the dipoles - assert (np.max(np.abs(np.dot(dipoles[0].ori[0], - np.array([ori1, ori2]).T))) > 0.99) + assert np.max(np.abs(np.dot(dipoles[0].ori[0], np.array([ori1, ori2]).T))) > 0.99 - assert (np.max(np.abs(np.dot(dipoles[1].ori[0], - np.array([ori1, ori2]).T))) > 0.99) + assert np.max(np.abs(np.dot(dipoles[1].ori[0], np.array([ori1, ori2]).T))) > 0.99 if residual is not None: - picks_grad = mne.pick_types(residual.info, meg='grad') - picks_mag = mne.pick_types(residual.info, meg='mag') + picks_grad = mne.pick_types(residual.info, meg="grad") + picks_mag = mne.pick_types(residual.info, meg="mag") rel_tol = 0.02 for picks in [picks_grad, picks_mag]: - assert (linalg.norm(residual.data[picks], ord='fro') < - rel_tol * linalg.norm(evoked.data[picks], ord='fro')) + assert linalg.norm(residual.data[picks], ord="fro") < rel_tol * linalg.norm( + evoked.data[picks], ord="fro" + ) @testing.requires_testing_data @@ -120,37 +117,48 @@ def test_rap_music_simulated(): forward = mne.read_forward_solution(fname_fwd) forward = mne.pick_channels_forward(forward, evoked.ch_names) forward_surf_ori = mne.convert_forward_solution(forward, surf_ori=True) - forward_fixed = mne.convert_forward_solution(forward, force_fixed=True, - surf_ori=True, use_cps=True) + forward_fixed = mne.convert_forward_solution( + forward, force_fixed=True, surf_ori=True, use_cps=True + ) n_dipoles = 2 - sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov, - n_dipoles, evoked.times, nave=evoked.nave) + sim_evoked, stc = simu_data( + evoked, forward_fixed, noise_cov, n_dipoles, evoked.times, nave=evoked.nave + ) # Check dipoles for fixed ori with catch_logging() as log: - dipoles = rap_music(sim_evoked, forward_fixed, noise_cov, - n_dipoles=n_dipoles, verbose=True) + dipoles = rap_music( + sim_evoked, forward_fixed, noise_cov, n_dipoles=n_dipoles, verbose=True + ) assert_var_exp_log(log.getvalue(), 89, 91) _check_dipoles(dipoles, forward_fixed, stc, sim_evoked) assert 97 < dipoles[0].gof.max() < 100 assert 91 < dipoles[1].gof.max() < 93 - assert dipoles[0].gof.min() >= 0. + assert dipoles[0].gof.min() >= 0.0 nave = 100000 # add a tiny amount of noise to the simulated evokeds - sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov, - n_dipoles, evoked.times, nave=nave) - dipoles, residual = rap_music(sim_evoked, forward_fixed, noise_cov, - n_dipoles=n_dipoles, return_residual=True) + sim_evoked, stc = simu_data( + evoked, forward_fixed, noise_cov, n_dipoles, evoked.times, nave=nave + ) + dipoles, residual = rap_music( + sim_evoked, forward_fixed, noise_cov, n_dipoles=n_dipoles, return_residual=True + ) _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual) # Check dipoles for free ori - dipoles, residual = rap_music(sim_evoked, forward, noise_cov, - n_dipoles=n_dipoles, return_residual=True) + dipoles, residual = rap_music( + sim_evoked, forward, noise_cov, n_dipoles=n_dipoles, return_residual=True + ) _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual) # Check dipoles for free surface ori - dipoles, residual = rap_music(sim_evoked, forward_surf_ori, noise_cov, - n_dipoles=n_dipoles, return_residual=True) + dipoles, residual = rap_music( + sim_evoked, + forward_surf_ori, + noise_cov, + n_dipoles=n_dipoles, + return_residual=True, + ) _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual) @@ -159,17 +167,19 @@ def test_rap_music_simulated(): def test_rap_music_sphere(): """Test RAP-MUSIC with real data, sphere model, MEG only.""" evoked, noise_cov = _get_data(ch_decim=8) - sphere = mne.make_sphere_model(r0=(0., 0., 0.04)) - src = mne.setup_volume_source_space(subject=None, pos=10., - sphere=(0.0, 0.0, 40, 65.0), - mindist=5.0, exclude=0.0, - sphere_units='mm') - forward = mne.make_forward_solution(evoked.info, trans=None, src=src, - bem=sphere) + sphere = mne.make_sphere_model(r0=(0.0, 0.0, 0.04)) + src = mne.setup_volume_source_space( + subject=None, + pos=10.0, + sphere=(0.0, 0.0, 40, 65.0), + mindist=5.0, + exclude=0.0, + sphere_units="mm", + ) + forward = mne.make_forward_solution(evoked.info, trans=None, src=src, bem=sphere) with catch_logging() as log: - dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2, - verbose=True) + dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2, verbose=True) assert_var_exp_log(log.getvalue(), 47, 49) # Test that there is one dipole on each hemisphere pos = np.array([dip.pos[0] for dip in dipoles]) @@ -177,11 +187,11 @@ def test_rap_music_sphere(): assert (pos[:, 0] < 0).sum() == 1 assert (pos[:, 0] > 0).sum() == 1 # Check the amplitude scale - assert (1e-10 < dipoles[0].amplitude[0] < 1e-7) + assert 1e-10 < dipoles[0].amplitude[0] < 1e-7 # Check the orientation dip_fit = mne.fit_dipole(evoked, noise_cov, sphere)[0] - assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[0].ori[0]))) > 0.99) - assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[1].ori[0]))) > 0.99) + assert np.max(np.abs(np.dot(dip_fit.ori, dipoles[0].ori[0]))) > 0.99 + assert np.max(np.abs(np.dot(dip_fit.ori, dipoles[1].ori[0]))) > 0.99 idx = dip_fit.gof.argmax() dist = np.linalg.norm(dipoles[0].pos[idx] - dip_fit.pos[idx]) assert 0.004 <= dist < 0.007 @@ -191,11 +201,22 @@ def test_rap_music_sphere(): @testing.requires_testing_data def test_rap_music_picks(): """Test RAP-MUSIC with picking.""" - evoked = mne.read_evokeds(fname_ave, condition='Right Auditory', - baseline=(None, 0)) + evoked = mne.read_evokeds(fname_ave, condition="Right Auditory", baseline=(None, 0)) evoked.crop(tmin=0.05, tmax=0.15) # select N100 - evoked.pick_types(meg=True, eeg=False) + evoked.pick(picks="meg") forward = mne.read_forward_solution(fname_fwd) noise_cov = mne.read_cov(fname_cov) dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2) assert len(dipoles) == 2 + + +@testing.requires_testing_data +def test_trap_music(): + """Test TRAP-MUSIC.""" + evoked = mne.read_evokeds(fname_ave, condition="Right Auditory", baseline=(None, 0)) + evoked.crop(tmin=0.05, tmax=0.15) # select N100 + evoked.pick(picks="meg") + forward = mne.read_forward_solution(fname_fwd) + noise_cov = mne.read_cov(fname_cov) + dipoles = trap_music(evoked, forward, noise_cov, n_dipoles=2) + assert len(dipoles) == 2 diff --git a/mne/beamformer/tests/test_resolution_matrix.py b/mne/beamformer/tests/test_resolution_matrix.py index 09ef7fe1118..a02222a86f2 100755 --- a/mne/beamformer/tests/test_resolution_matrix.py +++ b/mne/beamformer/tests/test_resolution_matrix.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- -# Authors: Olaf Hauk -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """ Test computation of resolution matrix for LCMV beamformers. @@ -10,26 +10,22 @@ """ from copy import deepcopy + import numpy as np from numpy.testing import assert_allclose import mne -from mne.datasets import testing from mne.beamformer import make_lcmv, make_lcmv_resolution_matrix +from mne.datasets import testing data_path = testing.data_path(download=False) subjects_dir = data_path / "subjects" fname_inv = ( - data_path - / "MEG" - / "sample" - / "sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif" + data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif" ) fname_evoked = data_path / "MEG" / "sample" / "sample_audvis_trunc-ave.fif" fname_raw = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" -fname_fwd = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" -) +fname_fwd = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" fname_cov = data_path / "MEG" / "sample" / "sample_audvis_trunc-cov.fif" @@ -40,11 +36,10 @@ def test_resolution_matrix_lcmv(): forward = mne.read_forward_solution(fname_fwd) # remove bad channels - forward = mne.pick_channels_forward(forward, exclude='bads') + forward = mne.pick_channels_forward(forward, exclude="bads") # forward operator with fixed source orientations - forward_fxd = mne.convert_forward_solution(forward, surf_ori=True, - force_fixed=True) + forward_fxd = mne.convert_forward_solution(forward, surf_ori=True, force_fixed=True) # evoked info info = mne.io.read_info(fname_evoked) @@ -60,12 +55,18 @@ def test_resolution_matrix_lcmv(): # compute beamformer filters # reg=0. to make sure noise_cov and data_cov are as similar as possible - filters = make_lcmv(info, forward_fxd, data_cov, reg=0., - noise_cov=noise_cov, - pick_ori=None, rank=None, - weight_norm=None, - reduce_rank=False, - verbose=False) + filters = make_lcmv( + info, + forward_fxd, + data_cov, + reg=0.0, + noise_cov=noise_cov, + pick_ori=None, + rank=None, + weight_norm=None, + reduce_rank=False, + verbose=False, + ) # Compute resolution matrix for beamformer resmat_lcmv = make_lcmv_resolution_matrix(filters, forward_fxd, info) @@ -74,9 +75,9 @@ def test_resolution_matrix_lcmv(): # transpose of leadfield # create filters with transposed whitened leadfield as weights - forward_fxd = mne.pick_channels_forward(forward_fxd, info['ch_names']) + forward_fxd = mne.pick_channels_forward(forward_fxd, info["ch_names"]) filters_lfd = deepcopy(filters) - filters_lfd['weights'][:] = forward_fxd['sol']['data'].T + filters_lfd["weights"][:] = forward_fxd["sol"]["data"].T # compute resolution matrix for filters with transposed leadfield resmat_fwd = make_lcmv_resolution_matrix(filters_lfd, forward_fxd, info) @@ -86,12 +87,11 @@ def test_resolution_matrix_lcmv(): # Some rows are off by about 0.1 - not yet clear why corr = [] - for (f, l) in zip(resmat_fwd, resmat_lcmv): - - corr.append(np.corrcoef(f, l)[0, 1]) + for f, lf in zip(resmat_fwd, resmat_lcmv): + corr.append(np.corrcoef(f, lf)[0, 1]) # all row correlations should at least be above ~0.8 - assert_allclose(corr, 1., atol=0.2) + assert_allclose(corr, 1.0, atol=0.2) # Maximum row correlation should at least be close to 1 - assert_allclose(np.max(corr), 1., atol=0.01) + assert_allclose(np.max(corr), 1.0, atol=0.01) diff --git a/mne/bem.py b/mne/bem.py index 6a60ce065f5..233b005cfe7 100644 --- a/mne/bem.py +++ b/mne/bem.py @@ -1,45 +1,75 @@ -# Authors: Matti Hämäläinen -# Alexandre Gramfort -# Eric Larson -# Lorenzo De Santis -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # The computations in this code were primarily derived from Matti Hämäläinen's # C code. -from collections import OrderedDict -from copy import deepcopy -from functools import partial import glob import json import os import os.path as op -from pathlib import Path import shutil -import tempfile +from collections import OrderedDict +from copy import deepcopy +from functools import partial +from pathlib import Path import numpy as np - -from .io.constants import FIFF, FWD -from .io._digitization import _dig_kind_dict, _dig_kind_rev, _dig_kind_ints -from .io.write import (start_and_end_file, start_block, write_float, write_int, - write_float_matrix, write_int_matrix, end_block, - write_string) -from .io.tag import find_tag -from .io.tree import dir_tree_find -from .io.open import fiff_open -from .surface import (read_surface, write_surface, complete_surface_info, - _compute_nearest, _get_ico_surface, read_tri, - _fast_cross_nd_sum, _get_solids, _complete_sphere_surf, - decimate_surface, transform_surface_to) -from .transforms import _ensure_trans, apply_trans, Transform -from .utils import (verbose, logger, run_subprocess, get_subjects_dir, warn, - _pl, _validate_type, _TempDir, _check_freesurfer_home, - _check_fname, _check_option, path_like, _import_nibabel, - _on_missing, _import_h5io_funcs, _ensure_int, - _path_like, _verbose_safe_false, _check_head_radius) - +from scipy.optimize import fmin_cobyla + +from ._fiff._digitization import _dig_kind_dict, _dig_kind_ints, _dig_kind_rev +from ._fiff.constants import FIFF, FWD +from ._fiff.open import fiff_open +from ._fiff.tag import find_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import ( + end_block, + start_and_end_file, + start_block, + write_float, + write_float_matrix, + write_int, + write_int_matrix, + write_string, +) +from .fixes import _compare_version, _safe_svd +from .surface import ( + _complete_sphere_surf, + _compute_nearest, + _fast_cross_nd_sum, + _get_ico_surface, + _get_solids, + complete_surface_info, + decimate_surface, + read_surface, + read_tri, + transform_surface_to, + write_surface, +) +from .transforms import Transform, _ensure_trans, apply_trans +from .utils import ( + _check_fname, + _check_freesurfer_home, + _check_head_radius, + _check_option, + _ensure_int, + _import_h5io_funcs, + _import_nibabel, + _on_missing, + _path_like, + _pl, + _TempDir, + _validate_type, + _verbose_safe_false, + get_subjects_dir, + logger, + path_like, + run_subprocess, + verbose, + warn, +) +from .viz.misc import plot_bem # ############################################################################ # Compute BEM solution @@ -53,23 +83,27 @@ class ConductorModel(dict): - """BEM or sphere model.""" + """BEM or sphere model. + + See :func:`~mne.make_bem_model` and :func:`~mne.make_bem_solution` to create a + :class:`mne.bem.ConductorModel`. + """ def __repr__(self): # noqa: D105 - if self['is_sphere']: - center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0']) + if self["is_sphere"]: + center = ", ".join(f"{x * 1000.0:.1f}" for x in self["r0"]) rad = self.radius if rad is None: # no radius / MEG only - extra = 'Sphere (no layers): r0=[%s] mm' % center + extra = f"Sphere (no layers): r0=[{center}] mm" else: - extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm' - % (len(self['layers']) - 1, _pl(self['layers']), - center, rad * 1000.)) + extra = ( + f"Sphere ({len(self['layers']) - 1} layer{_pl(self['layers'])}): " + f"r0=[{center}] R={rad * 1000.0:1.0f} mm" + ) else: - extra = ('BEM (%s layer%s)' % (len(self['surfs']), - _pl(self['surfs']))) - extra += " solver=%s" % self['solver'] - return '' % extra + extra = f"BEM ({len(self['surfs'])} layer{_pl(self['surfs'])})" + extra += f" solver={self['solver']}" + return f"" def copy(self): """Return copy of ConductorModel instance.""" @@ -78,9 +112,9 @@ def copy(self): @property def radius(self): """Sphere radius if an EEG sphere model.""" - if not self['is_sphere']: - raise RuntimeError('radius undefined for BEM') - return None if len(self['layers']) == 0 else self['layers'][-1]['rad'] + if not self["is_sphere"]: + raise RuntimeError("radius undefined for BEM") + return None if len(self["layers"]) == 0 else self["layers"][-1]["rad"] def _calc_beta(rk, rk_norm, rk1, rk1_norm): @@ -108,9 +142,9 @@ def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area): l2 = np.linalg.norm(v2, axis=1) l3 = np.linalg.norm(v3, axis=1) ss = l1 * l2 * l3 - ss += np.einsum('ij,ij,i->i', v1, v2, l3) - ss += np.einsum('ij,ij,i->i', v1, v3, l2) - ss += np.einsum('ij,ij,i->i', v2, v3, l1) + ss += np.einsum("ij,ij,i->i", v1, v2, l3) + ss += np.einsum("ij,ij,i->i", v1, v3, l2) + ss += np.einsum("ij,ij,i->i", v2, v3, l1) solids = np.arctan2(triples, ss) # We *could* subselect the good points from v1, v2, v3, triples, solids, @@ -119,14 +153,16 @@ def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area): # solution. These three lines ensure we don't get invalid values in # _calc_beta. bad_mask = np.abs(solids) < np.pi / 1e6 - l1[bad_mask] = 1. - l2[bad_mask] = 1. - l3[bad_mask] = 1. + l1[bad_mask] = 1.0 + l2[bad_mask] = 1.0 + l3[bad_mask] = 1.0 # Calculate the magic vector vec_omega - beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis], - _calc_beta(v2, l2, v3, l3)[:, np.newaxis], - _calc_beta(v3, l3, v1, l1)[:, np.newaxis]] + beta = [ + _calc_beta(v1, l1, v2, l2)[:, np.newaxis], + _calc_beta(v2, l2, v3, l3)[:, np.newaxis], + _calc_beta(v3, l3, v1, l1)[:, np.newaxis], + ] vec_omega = (beta[2] - beta[0]) * v1 vec_omega += (beta[0] - beta[1]) * v2 vec_omega += (beta[1] - beta[2]) * v3 @@ -140,26 +176,27 @@ def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area): for k in range(3): diff = yys[idx[k - 1]] - yys[idx[k + 1]] zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn) - omega[:, k] = -n2 * (area2 * zdots * 2. * solids - - triples * (diff * vec_omega).sum(axis=-1)) + omega[:, k] = -n2 * ( + area2 * zdots * 2.0 * solids - triples * (diff * vec_omega).sum(axis=-1) + ) # omit the bad points from the solution - omega[bad_mask] = 0. + omega[bad_mask] = 0.0 return omega def _correct_auto_elements(surf, mat): """Improve auto-element approximation.""" pi2 = 2.0 * np.pi - tris_flat = surf['tris'].ravel() + tris_flat = surf["tris"].ravel() misses = pi2 - mat.sum(axis=1) for j, miss in enumerate(misses): # How much is missing? - n_memb = len(surf['neighbor_tri'][j]) + n_memb = len(surf["neighbor_tri"][j]) assert n_memb > 0 # should be guaranteed by our surface checks # The node itself receives one half mat[j, j] = miss / 2.0 # The rest is divided evenly among the member nodes... - miss /= (4.0 * n_memb) + miss /= 4.0 * n_memb members = np.where(j == tris_flat)[0] mods = members % 3 offsets = np.array([[1, 2], [-1, 1], [-1, -2]]) @@ -174,27 +211,29 @@ def _correct_auto_elements(surf, mat): def _fwd_bem_lin_pot_coeff(surfs): """Calculate the coefficients for linear collocation approach.""" # taken from fwd_bem_linear_collocation.c - nps = [surf['np'] for surf in surfs] + nps = [surf["np"] for surf in surfs] np_tot = sum(nps) coeff = np.zeros((np_tot, np_tot)) offsets = np.cumsum(np.concatenate(([0], nps))) for si_1, surf1 in enumerate(surfs): rr_ord = np.arange(nps[si_1]) for si_2, surf2 in enumerate(surfs): - logger.info(" %s (%d) -> %s (%d) ..." % - (_bem_surf_name[surf1['id']], nps[si_1], - _bem_surf_name[surf2['id']], nps[si_2])) - tri_rr = surf2['rr'][surf2['tris']] - tri_nn = surf2['tri_nn'] - tri_area = surf2['tri_area'] - submat = coeff[offsets[si_1]:offsets[si_1 + 1], - offsets[si_2]:offsets[si_2 + 1]] # view - for k in range(surf2['ntri']): - tri = surf2['tris'][k] + logger.info( + f" {_bem_surf_name[surf1['id']]} ({nps[si_1]:d}) -> " + f"{_bem_surf_name[surf2['id']]} ({nps[si_2]}) ..." + ) + tri_rr = surf2["rr"][surf2["tris"]] + tri_nn = surf2["tri_nn"] + tri_area = surf2["tri_area"] + submat = coeff[ + offsets[si_1] : offsets[si_1 + 1], offsets[si_2] : offsets[si_2 + 1] + ] # view + for k in range(surf2["ntri"]): + tri = surf2["tris"][k] if si_1 == si_2: - skip_idx = ((rr_ord == tri[0]) | - (rr_ord == tri[1]) | - (rr_ord == tri[2])) + skip_idx = ( + (rr_ord == tri[0]) | (rr_ord == tri[1]) | (rr_ord == tri[2]) + ) else: skip_idx = list() # No contribution from a triangle that @@ -202,9 +241,13 @@ def _fwd_bem_lin_pot_coeff(surfs): # if sidx1 == sidx2 and (tri == j).any(): # continue # Otherwise do the hard job - coeffs = _lin_pot_coeff(fros=surf1['rr'], tri_rr=tri_rr[k], - tri_nn=tri_nn[k], tri_area=tri_area[k]) - coeffs[skip_idx] = 0. + coeffs = _lin_pot_coeff( + fros=surf1["rr"], + tri_rr=tri_rr[k], + tri_nn=tri_nn[k], + tri_area=tri_area[k], + ) + coeffs[skip_idx] = 0.0 submat[:, tri] -= coeffs if si_1 == si_2: _correct_auto_elements(surf1, submat) @@ -246,11 +289,11 @@ def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri): n_last = n_tri[-1] mult = (1.0 + ip_mult) / ip_mult - logger.info(' Combining...') + logger.info(" Combining...") offsets = np.cumsum(np.concatenate(([0], n_tri))) for si in range(len(n_tri)): # Pick the correct submatrix (right column) and multiply - sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):] + sub = solution[offsets[si] : offsets[si + 1], np.sum(n_tri[:-1]) :] # Multiply sub -= 2 * np.dot(sub, ip_solution) @@ -258,61 +301,63 @@ def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri): sub[-n_last:, -n_last:] += mult * ip_solution # Final scaling - logger.info(' Scaling...') + logger.info(" Scaling...") solution *= ip_mult return -def _check_complete_surface(surf, copy=False, incomplete='raise', extra=''): - surf = complete_surface_info( - surf, copy=copy, verbose=_verbose_safe_false()) - fewer = np.where([len(t) < 3 for t in surf['neighbor_tri']])[0] +def _check_complete_surface(surf, copy=False, incomplete="raise", extra=""): + surf = complete_surface_info(surf, copy=copy, verbose=_verbose_safe_false()) + fewer = np.where([len(t) < 3 for t in surf["neighbor_tri"]])[0] if len(fewer) > 0: fewer = list(fewer) - fewer = (fewer[:80] + ['...']) if len(fewer) > 80 else fewer - fewer = ', '.join(str(f) for f in fewer) - msg = ('Surface {} has topological defects: {:.0f} / {:.0f} vertices ' - 'have fewer than three neighboring triangles [{}]{}' - .format(_bem_surf_name[surf['id']], len(fewer), len(surf['rr']), - fewer, extra)) - _on_missing(on_missing=incomplete, msg=msg, name='on_defects') + fewer = (fewer[:80] + ["..."]) if len(fewer) > 80 else fewer + fewer = ", ".join(str(f) for f in fewer) + msg = ( + f"Surface {_bem_surf_name[surf['id']]} has topological defects: " + f"{len(fewer)} / {len(surf['rr'])} vertices have fewer than three " + f"neighboring triangles [{fewer}]{extra}" + ) + _on_missing(on_missing=incomplete, msg=msg, name="on_defects") return surf def _fwd_bem_linear_collocation_solution(bem): """Compute the linear collocation potential solution.""" # first, add surface geometries - logger.info('Computing the linear collocation solution...') - logger.info(' Matrix coefficients...') - coeff = _fwd_bem_lin_pot_coeff(bem['surfs']) - bem['nsol'] = len(coeff) + logger.info("Computing the linear collocation solution...") + logger.info(" Matrix coefficients...") + coeff = _fwd_bem_lin_pot_coeff(bem["surfs"]) + bem["nsol"] = len(coeff) logger.info(" Inverting the coefficient matrix...") - nps = [surf['np'] for surf in bem['surfs']] - bem['solution'] = _fwd_bem_multi_solution(coeff, bem['gamma'], nps) - if len(bem['surfs']) == 3: - ip_mult = bem['sigma'][1] / bem['sigma'][2] + nps = [surf["np"] for surf in bem["surfs"]] + bem["solution"] = _fwd_bem_multi_solution(coeff, bem["gamma"], nps) + if len(bem["surfs"]) == 3: + ip_mult = bem["sigma"][1] / bem["sigma"][2] if ip_mult <= FWD.BEM_IP_APPROACH_LIMIT: - logger.info('IP approach required...') - logger.info(' Matrix coefficients (homog)...') - coeff = _fwd_bem_lin_pot_coeff([bem['surfs'][-1]]) - logger.info(' Inverting the coefficient matrix (homog)...') - ip_solution = _fwd_bem_homog_solution(coeff, - [bem['surfs'][-1]['np']]) - logger.info(' Modify the original solution to incorporate ' - 'IP approach...') - _fwd_bem_ip_modify_solution(bem['solution'], ip_solution, ip_mult, - nps) - bem['bem_method'] = FIFF.FIFFV_BEM_APPROX_LINEAR - bem['solver'] = 'mne' - - -def _import_openmeeg(what='compute a BEM solution using OpenMEEG'): + logger.info("IP approach required...") + logger.info(" Matrix coefficients (homog)...") + coeff = _fwd_bem_lin_pot_coeff([bem["surfs"][-1]]) + logger.info(" Inverting the coefficient matrix (homog)...") + ip_solution = _fwd_bem_homog_solution(coeff, [bem["surfs"][-1]["np"]]) + logger.info( + " Modify the original solution to incorporate IP approach..." + ) + _fwd_bem_ip_modify_solution(bem["solution"], ip_solution, ip_mult, nps) + bem["bem_method"] = FIFF.FIFFV_BEM_APPROX_LINEAR + bem["solver"] = "mne" + + +def _import_openmeeg(what="compute a BEM solution using OpenMEEG"): try: import openmeeg as om except Exception as exc: raise ImportError( - f'The OpenMEEG module must be installed to {what}, but ' - f'"import openmeeg" resulted in: {exc}') from None + f"The OpenMEEG module must be installed to {what}, but " + f'"import openmeeg" resulted in: {exc}' + ) from None + if not _compare_version(om.__version__, ">=", "2.5.6"): + raise ImportError(f"OpenMEEG 2.5.6+ is required, got {om.__version__}") return om @@ -320,126 +365,37 @@ def _make_openmeeg_geometry(bem, mri_head_t=None): # OpenMEEG om = _import_openmeeg() meshes = [] - for surf in bem['surfs'][::-1]: + for surf in bem["surfs"][::-1]: if mri_head_t is not None: surf = transform_surface_to(surf, "head", mri_head_t, copy=True) - points, faces = surf['rr'], surf['tris'] + points, faces = surf["rr"], surf["tris"] faces = faces[:, [1, 0, 2]] # swap faces meshes.append((points, faces)) - conductivity = bem['sigma'][::-1] - # We should be able to do this: - # - # geom = om.make_nested_geometry(meshes, conductivity) - # - # But OpenMEEG's NumPy support is iffy. So let's use file IO for now :( - - def _write_tris(fname, mesh): - from .surface import complete_surface_info - mesh = dict(rr=mesh[0], tris=mesh[1]) - complete_surface_info(mesh, copy=False, do_neighbor_tri=False) - with open(fname, 'w') as fid: - fid.write(f'- {len(mesh["rr"])}\n') - for r, n in zip(mesh['rr'], mesh['nn']): - fid.write(f'{r[0]:.8f} {r[1]:.8f} {r[2]:.8f} ' - f'{n[0]:.8f} {n[1]:.8f} {n[2]:.8f}\n') - n_tri = len(mesh['tris']) - fid.write(f'- {n_tri} {n_tri} {n_tri}\n') - for t in mesh['tris']: - fid.write(f'{t[0]} {t[1]} {t[2]}\n') - - assert len(conductivity) in (1, 3) - # on Windows, the dir can't be cleaned up, presumably because OpenMEEG - # does not let go of the file pointer (?). This is not great but hopefully - # writing files is temporary, and/or we can fix the file pointer bug - # in OpenMEEG soon. - tmp_dir = tempfile.TemporaryDirectory(prefix='openmeeg-io-') - tmp_path = Path(tmp_dir.name) - # In 3.10+ we could use this as a context manager as there is a - # ignore_cleanup_errors arg, but before this there is not. - # so let's just try/finally - try: - tmp_path = Path(tmp_path) - # write geom_file and three .tri files - geom_file = tmp_path / 'tmp.geom' - names = ['inner_skull', 'outer_skull', 'outer_skin'] - lines = [ - '# Domain Description 1.1', - '', - f'Interfaces {len(conductivity)}' - '', - f'Interface Cortex: "{names[0]}.tri"', - ] - if len(conductivity) == 3: - lines.extend([ - f'Interface Skull: "{names[1]}.tri"', - f'Interface Head: "{names[2]}.tri"', - ]) - lines.extend([ - '', - f'Domains {len(conductivity) + 1}', - '', - 'Domain Brain: -Cortex', - ]) - if len(conductivity) == 1: - lines.extend([ - 'Domain Air: Cortex', - ]) - else: - lines.extend([ - 'Domain Skull: Cortex -Skull', - 'Domain Scalp: Skull -Head', - 'Domain Air: Head', - ]) - with open(geom_file, 'w') as fid: - fid.write('\n'.join(lines)) - for mesh, name in zip(meshes, names): - _write_tris(tmp_path / f'{name}.tri', mesh) - # write cond_file - cond_file = tmp_path / 'tmp.cond' - lines = [ - '# Properties Description 1.0 (Conductivities)', - '', - f'Brain {conductivity[0]}', - ] - if len(conductivity) == 3: - lines.extend([ - f'Skull {conductivity[1]}', - f'Scalp {conductivity[2]}', - ]) - lines.append('Air 0.0') - with open(cond_file, 'w') as fid: - fid.write('\n'.join(lines)) - geom = om.Geometry(str(geom_file), str(cond_file)) - finally: - try: - tmp_dir.cleanup() - except Exception: - pass # ignore any cleanup errors (esp. on Windows) - - return geom + conductivity = bem["sigma"][::-1] + return om.make_nested_geometry(meshes, conductivity) def _fwd_bem_openmeeg_solution(bem): om = _import_openmeeg() - logger.info('Creating BEM solution using OpenMEEG') - logger.info('Computing the openmeeg head matrix solution...') - logger.info(' Matrix coefficients...') + logger.info("Creating BEM solution using OpenMEEG") + logger.info("Computing the openmeeg head matrix solution...") + logger.info(" Matrix coefficients...") geom = _make_openmeeg_geometry(bem) hm = om.HeadMat(geom) - bem['nsol'] = hm.nlin() + bem["nsol"] = hm.nlin() logger.info(" Inverting the coefficient matrix...") hm.invert() # invert inplace - bem['solution'] = hm.array_flat() - bem['bem_method'] = FIFF.FIFFV_BEM_APPROX_LINEAR - bem['solver'] = 'openmeeg' + bem["solution"] = hm.array_flat() + bem["bem_method"] = FIFF.FIFFV_BEM_APPROX_LINEAR + bem["solver"] = "openmeeg" @verbose -def make_bem_solution(surfs, *, solver='mne', verbose=None): +def make_bem_solution(surfs, *, solver="mne", verbose=None): """Create a BEM solution using the linear collocation approach. Parameters @@ -447,8 +403,8 @@ def make_bem_solution(surfs, *, solver='mne', verbose=None): surfs : list of dict The BEM surfaces to use (from :func:`mne.make_bem_model`). solver : str - Can be ``'mne'`` (default) to use MNE-Python, or ``'openmeeg'`` to use - the :doc:`OpenMEEG ` package. + Can be ``'mne'`` (default) to use MNE-Python, or ``'openmeeg'`` to use the + `OpenMEEG `__ package. .. versionadded:: 1.2 %(verbose)s @@ -470,76 +426,82 @@ def make_bem_solution(surfs, *, solver='mne', verbose=None): ----- .. versionadded:: 0.10.0 """ - _validate_type(solver, str, 'solver') - _check_option('method', solver.lower(), ('mne', 'openmeeg')) + _validate_type(solver, str, "solver") + _check_option("method", solver.lower(), ("mne", "openmeeg")) bem = _ensure_bem_surfaces(surfs) _add_gamma_multipliers(bem) - if len(bem['surfs']) == 3: - logger.info('Three-layer model surfaces loaded.') - elif len(bem['surfs']) == 1: - logger.info('Homogeneous model surface loaded.') + if len(bem["surfs"]) == 3: + logger.info("Three-layer model surfaces loaded.") + elif len(bem["surfs"]) == 1: + logger.info("Homogeneous model surface loaded.") else: - raise RuntimeError('Only 1- or 3-layer BEM computations supported') - _check_bem_size(bem['surfs']) - for surf in bem['surfs']: + raise RuntimeError("Only 1- or 3-layer BEM computations supported") + _check_bem_size(bem["surfs"]) + for surf in bem["surfs"]: _check_complete_surface(surf) - if solver.lower() == 'openmeeg': + if solver.lower() == "openmeeg": _fwd_bem_openmeeg_solution(bem) else: - assert solver.lower() == 'mne' + assert solver.lower() == "mne" _fwd_bem_linear_collocation_solution(bem) logger.info("Solution ready.") - logger.info('BEM geometry computations complete.') + logger.info("BEM geometry computations complete.") return bem # ############################################################################ # Make BEM model + def _ico_downsample(surf, dest_grade): """Downsample the surface if isomorphic to a subdivided icosahedron.""" - n_tri = len(surf['tris']) - bad_msg = ("Cannot decimate to requested ico grade %d. The provided " - "BEM surface has %d triangles, which cannot be isomorphic with " - "a subdivided icosahedron. Consider manually decimating the " - "surface to a suitable density and then use ico=None in " - "make_bem_model." % (dest_grade, n_tri)) + n_tri = len(surf["tris"]) + bad_msg = ( + f"Cannot decimate to requested ico grade {dest_grade}. The provided " + f"BEM surface has {n_tri} triangles, which cannot be isomorphic with " + "a subdivided icosahedron. Consider manually decimating the surface to " + "a suitable density and then use ico=None in make_bem_model." + ) if n_tri % 20 != 0: raise RuntimeError(bad_msg) n_tri = n_tri // 20 found = int(round(np.log(n_tri) / np.log(4))) - if n_tri != 4 ** found: + if n_tri != 4**found: raise RuntimeError(bad_msg) del n_tri if dest_grade > found: - raise RuntimeError('For this surface, decimation grade should be %d ' - 'or less, not %s.' % (found, dest_grade)) + raise RuntimeError( + f"For this surface, decimation grade should be {found} or less, " + f"not {dest_grade}." + ) source = _get_ico_surface(found) dest = _get_ico_surface(dest_grade, patch_stats=True) - del dest['tri_cent'] - del dest['tri_nn'] - del dest['neighbor_tri'] - del dest['tri_area'] - if not np.array_equal(source['tris'], surf['tris']): - raise RuntimeError('The source surface has a matching number of ' - 'triangles but ordering is wrong') - logger.info('Going from %dth to %dth subdivision of an icosahedron ' - '(n_tri: %d -> %d)' % (found, dest_grade, len(surf['tris']), - len(dest['tris']))) + del dest["tri_cent"] + del dest["tri_nn"] + del dest["neighbor_tri"] + del dest["tri_area"] + if not np.array_equal(source["tris"], surf["tris"]): + raise RuntimeError( + "The source surface has a matching number of " + "triangles but ordering is wrong" + ) + logger.info( + f"Going from {found}th to {dest_grade}th subdivision of an icosahedron " + f"(n_tri: {len(surf['tris'])} -> {len(dest['tris'])})" + ) # Find the mapping - dest['rr'] = surf['rr'][_get_ico_map(source, dest)] + dest["rr"] = surf["rr"][_get_ico_map(source, dest)] return dest def _get_ico_map(fro, to): """Get a mapping between ico surfaces.""" - nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True) + nearest, dists = _compute_nearest(fro["rr"], to["rr"], return_dists=True) n_bads = (dists > 5e-3).sum() if n_bads > 0: - raise RuntimeError('No matching vertex for %d destination vertices' - % (n_bads)) + raise RuntimeError(f"No matching vertex for {n_bads} destination vertices") return nearest @@ -548,32 +510,38 @@ def _order_surfaces(surfs): if len(surfs) != 3: return surfs # we have three surfaces - surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD, - FIFF.FIFFV_BEM_SURF_ID_SKULL, - FIFF.FIFFV_BEM_SURF_ID_BRAIN] - ids = np.array([surf['id'] for surf in surfs]) + surf_order = [ + FIFF.FIFFV_BEM_SURF_ID_HEAD, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_BRAIN, + ] + ids = np.array([surf["id"] for surf in surfs]) if set(ids) != set(surf_order): - raise RuntimeError('bad surface ids: %s' % ids) + raise RuntimeError(f"bad surface ids: {ids}") order = [np.where(ids == id_)[0][0] for id_ in surf_order] surfs = [surfs[idx] for idx in order] return surfs -def _assert_complete_surface(surf, incomplete='raise'): +def _assert_complete_surface(surf, incomplete="raise"): """Check the sum of solid angles as seen from inside.""" # from surface_checks.c # Center of mass.... - cm = surf['rr'].mean(axis=0) - logger.info('%s CM is %6.2f %6.2f %6.2f mm' % - (_bem_surf_name[surf['id']], - 1000 * cm[0], 1000 * cm[1], 1000 * cm[2])) - tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0] + cm = surf["rr"].mean(axis=0) + logger.info( + f"{_bem_surf_name[surf['id']]} CM is " + f"{1000 * cm[0]:6.2f} " + f"{1000 * cm[1]:6.2f} " + f"{1000 * cm[2]:6.2f} mm" + ) + tot_angle = _get_solids(surf["rr"][surf["tris"]], cm[np.newaxis, :])[0] prop = tot_angle / (2 * np.pi) if np.abs(prop - 1.0) > 1e-5: - msg = (f'Surface {_bem_surf_name[surf["id"]]} is not complete (sum of ' - f'solid angles yielded {prop}, should be 1.)') - _on_missing( - incomplete, msg, name='incomplete', error_klass=RuntimeError) + msg = ( + f"Surface {_bem_surf_name[surf['id']]} is not complete (sum of " + f"solid angles yielded {prop}, should be 1.)" + ) + _on_missing(incomplete, msg, name="incomplete", error_klass=RuntimeError) def _assert_inside(fro, to): @@ -581,15 +549,15 @@ def _assert_inside(fro, to): # this is "is_inside" in surface_checks.c fro_name = _bem_surf_name[fro["id"]] to_name = _bem_surf_name[to["id"]] - logger.info( - f'Checking that surface {fro_name} is inside surface {to_name} ...') - tot_angle = _get_solids(to['rr'][to['tris']], fro['rr']) + logger.info(f"Checking that surface {fro_name} is inside surface {to_name} ...") + tot_angle = _get_solids(to["rr"][to["tris"]], fro["rr"]) if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any(): raise RuntimeError( - f'Surface {fro_name} is not completely inside surface {to_name}') + f"Surface {fro_name} is not completely inside surface {to_name}" + ) -def _check_surfaces(surfs, incomplete='raise'): +def _check_surfaces(surfs, incomplete="raise"): """Check that the surfaces are complete and non-intersecting.""" for surf in surfs: _assert_complete_surface(surf, incomplete=incomplete) @@ -600,38 +568,41 @@ def _check_surfaces(surfs, incomplete='raise'): def _check_surface_size(surf): """Check that the coordinate limits are reasonable.""" - sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0) + sizes = surf["rr"].max(axis=0) - surf["rr"].min(axis=0) if (sizes < 0.05).any(): raise RuntimeError( - f'Dimensions of the surface {_bem_surf_name[surf["id"]]} seem too ' - f'small ({1000 * sizes.min():9.5f}). Maybe the unit of measure' - ' is meters instead of mm') + f"Dimensions of the surface {_bem_surf_name[surf['id']]} seem too " + f"small ({1000 * sizes.min():9.5f}). Maybe the unit of measure" + " is meters instead of mm" + ) def _check_thicknesses(surfs): """Compute how close we are.""" for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]): - min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'], - return_dists=True)[1] + min_dist = _compute_nearest(surf_1["rr"], surf_2["rr"], return_dists=True)[1] min_dist = min_dist.min() - fro = _bem_surf_name[surf_1['id']] - to = _bem_surf_name[surf_2['id']] - logger.info(f'Checking distance between {fro} and {to} surfaces...') - logger.info(f'Minimum distance between the {fro} and {to} surfaces is ' - f'approximately {1000 * min_dist:6.1f} mm') - - -def _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True, - incomplete='raise', extra=''): + fro = _bem_surf_name[surf_1["id"]] + to = _bem_surf_name[surf_2["id"]] + logger.info(f"Checking distance between {fro} and {to} surfaces...") + logger.info( + f"Minimum distance between the {fro} and {to} surfaces is " + f"approximately {1000 * min_dist:6.1f} mm" + ) + + +def _surfaces_to_bem( + surfs, ids, sigmas, ico=None, rescale=True, incomplete="raise", extra="" +): """Convert surfaces to a BEM.""" # equivalent of mne_surf2bem # surfs can be strings (filenames) or surface dicts - if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) == - len(sigmas)): - raise ValueError('surfs, ids, and sigmas must all have the same ' - 'number of elements (1 or 3)') + if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) == len(sigmas)): + raise ValueError( + "surfs, ids, and sigmas must all have the same number of elements (1 or 3)" + ) for si, surf in enumerate(surfs): - if isinstance(surf, (str, Path, os.PathLike)): + if isinstance(surf, str | Path | os.PathLike): surfs[si] = surf = read_surface(surf, return_dict=True)[-1] # Downsampling if the surface is isomorphic with a subdivided icosahedron if ico is not None: @@ -639,19 +610,18 @@ def _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True, surfs[si] = _ico_downsample(surf, ico) for surf, id_ in zip(surfs, ids): # Do topology checks (but don't save data) to fail early - surf['id'] = id_ - _check_complete_surface(surf, copy=True, incomplete=incomplete, - extra=extra) - surf['coord_frame'] = surf.get('coord_frame', FIFF.FIFFV_COORD_MRI) - surf.update(np=len(surf['rr']), ntri=len(surf['tris'])) + surf["id"] = id_ + _check_complete_surface(surf, copy=True, incomplete=incomplete, extra=extra) + surf["coord_frame"] = surf.get("coord_frame", FIFF.FIFFV_COORD_MRI) + surf.update(np=len(surf["rr"]), ntri=len(surf["tris"])) if rescale: - surf['rr'] /= 1000. # convert to meters + surf["rr"] /= 1000.0 # convert to meters # Shifting surfaces is not implemented here... # Order the surfaces for the benefit of the topology checks for surf, sigma in zip(surfs, sigmas): - surf['sigma'] = sigma + surf["sigma"] = sigma surfs = _order_surfaces(surfs) # Check topology as best we can @@ -659,39 +629,42 @@ def _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True, for surf in surfs: _check_surface_size(surf) _check_thicknesses(surfs) - logger.info('Surfaces passed the basic topology checks.') + logger.info("Surfaces passed the basic topology checks.") return surfs @verbose -def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3), - subjects_dir=None, verbose=None): +def make_bem_model( + subject, ico=4, conductivity=(0.3, 0.006, 0.3), subjects_dir=None, verbose=None +): """Create a BEM model for a subject. + Use :func:`~mne.make_bem_solution` to turn the returned surfaces into a + :class:`~mne.bem.ConductorModel` suitable for forward calculation. + .. note:: To get a single layer bem corresponding to the --homog flag in the command line tool set the ``conductivity`` parameter - to a list/tuple with a single value (e.g. [0.3]). + to a float (e.g. ``0.3``). Parameters ---------- - subject : str - The subject. + %(subject)s ico : int | None The surface ico downsampling to use, e.g. ``5=20484``, ``4=5120``, ``3=1280``. If None, no subsampling is applied. - conductivity : array of int, shape (3,) or (1,) + conductivity : float | array of float of shape (3,) or (1,) The conductivities to use for each shell. Should be a single element for a one-layer model, or three elements for a three-layer model. Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a - single-layer model would be ``[0.3]``. + single-layer model is ``[0.3]``. %(subjects_dir)s %(verbose)s Returns ------- surfaces : list of dict - The BEM surfaces. Use `make_bem_solution` to turn these into a - `~mne.bem.ConductorModel` suitable for forward calculation. + The BEM surfaces. Use :func:`~mne.make_bem_solution` to turn these into a + :class:`~mne.bem.ConductorModel` suitable for forward calculation. See Also -------- @@ -704,10 +677,11 @@ def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3), ----- .. versionadded:: 0.10.0 """ - conductivity = np.array(conductivity, float) + conductivity = np.atleast_1d(conductivity).astype(float) if conductivity.ndim != 1 or conductivity.size not in (1, 3): - raise ValueError('conductivity must be 1D array-like with 1 or 3 ' - 'elements') + raise ValueError( + "conductivity must be a float or a 1D array-like with 1 or 3 elements" + ) subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) subject_dir = subjects_dir / subject bem_dir = subject_dir / "bem" @@ -715,27 +689,30 @@ def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3), outer_skull = bem_dir / "outer_skull.surf" outer_skin = bem_dir / "outer_skin.surf" surfaces = [inner_skull, outer_skull, outer_skin] - ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN, - FIFF.FIFFV_BEM_SURF_ID_SKULL, - FIFF.FIFFV_BEM_SURF_ID_HEAD] - logger.info('Creating the BEM geometry...') + ids = [ + FIFF.FIFFV_BEM_SURF_ID_BRAIN, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_HEAD, + ] + logger.info("Creating the BEM geometry...") if len(conductivity) == 1: surfaces = surfaces[:1] ids = ids[:1] surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico) _check_bem_size(surfaces) - logger.info('Complete.\n') + logger.info("Complete.\n") return surfaces # ############################################################################ # Compute EEG sphere model + def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): """Get the model depended weighting factor for n.""" - nlayer = len(m['layers']) + nlayer = len(m["layers"]) if nlayer in (0, 1): - return 1. + return 1.0 # Initialize the arrays c1 = np.zeros(nlayer - 1) @@ -743,9 +720,9 @@ def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): cr = np.zeros(nlayer - 1) cr_mult = np.zeros(nlayer - 1) for k in range(nlayer - 1): - c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma'] + c1[k] = m["layers"][k]["sigma"] / m["layers"][k + 1]["sigma"] c2[k] = c1[k] - 1.0 - cr_mult[k] = m['layers'][k]['rel_rad'] + cr_mult[k] = m["layers"][k]["rel_rad"] cr[k] = cr_mult[k] cr_mult[k] *= cr_mult[k] @@ -759,8 +736,13 @@ def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): M = np.eye(2) n1 = n + 1.0 for k in range(nlayer - 2, -1, -1): - M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]], - [n * c2[k] * cr[k], n1 + n * c1[k]]], M) + M = np.dot( + [ + [n + n1 * c1[k], n1 * c2[k] / cr[k]], + [n * c2[k] * cr[k], n1 + n * c1[k]], + ], + M, + ) num = n * (2.0 * n + 1.0) ** (nlayer - 1) coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0]) return coeffs @@ -768,16 +750,14 @@ def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): def _compose_linear_fitting_data(mu, u): """Get the linear fitting data.""" - from scipy import linalg - k1 = np.arange(1, u['nterms']) + k1 = np.arange(1, u["nterms"]) mu1ns = mu[0] ** k1 # data to be fitted - y = u['w'][:-1] * (u['fn'][1:] - mu1ns * u['fn'][0]) + y = u["w"][:-1] * (u["fn"][1:] - mu1ns * u["fn"][0]) # model matrix - M = u['w'][:-1, np.newaxis] * (mu[1:] ** k1[:, np.newaxis] - - mu1ns[:, np.newaxis]) - uu, sing, vv = linalg.svd(M, full_matrices=False) - ncomp = u['nfit'] - 1 + M = u["w"][:-1, np.newaxis] * (mu[1:] ** k1[:, np.newaxis] - mu1ns[:, np.newaxis]) + uu, sing, vv = _safe_svd(M, full_matrices=False) + ncomp = u["nfit"] - 1 uu, sing, vv = uu[:, :ncomp], sing[:ncomp], vv[:ncomp] return y, uu, sing, vv @@ -791,9 +771,9 @@ def _compute_linear_parameters(mu, u): resi = y - np.dot(uu, vec) vec /= sing - lambda_ = np.zeros(u['nfit']) + lambda_ = np.zeros(u["nfit"]) lambda_[1:] = np.dot(vec, vv) - lambda_[0] = u['fn'][0] - np.sum(lambda_[1:]) + lambda_[0] = u["fn"][0] - np.sum(lambda_[1:]) rv = np.dot(resi, resi) / np.dot(y, y) return rv, lambda_ @@ -811,28 +791,27 @@ def _one_step(mu, u): def _fwd_eeg_fit_berg_scherg(m, nterms, nfit): """Fit the Berg-Scherg equivalent spherical model dipole parameters.""" - from scipy.optimize import fmin_cobyla assert nfit >= 2 u = dict(nfit=nfit, nterms=nterms) # (1) Calculate the coefficients of the true expansion - u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1) + u["fn"] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1) # (2) Calculate the weighting - f = (min([layer['rad'] for layer in m['layers']]) / - max([layer['rad'] for layer in m['layers']])) + f = min([layer["rad"] for layer in m["layers"]]) / max( + [layer["rad"] for layer in m["layers"]] + ) # correct weighting k = np.arange(1, nterms + 1) - u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) / - k) * np.power(f, (k - 1.0)) - u['w'][-1] = 0 + u["w"] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) / k) * np.power(f, (k - 1.0)) + u["w"][-1] = 0 # Do the nonlinear minimization, constraining mu to the interval [-1, +1] mu_0 = np.zeros(3) fun = partial(_one_step, u=u) catol = 1e-6 - max_ = 1. - 2 * catol + max_ = 1.0 - 2 * catol def cons(x): return max_ - np.abs(x) @@ -844,17 +823,22 @@ def cons(x): order = np.argsort(mu)[::-1] mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first - m['mu'] = mu + m["mu"] = mu # This division takes into account the actual conductivities - m['lambda'] = lambda_ / m['layers'][-1]['sigma'] - m['nfit'] = nfit + m["lambda"] = lambda_ / m["layers"][-1]["sigma"] + m["nfit"] = nfit return rv @verbose -def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None, - relative_radii=(0.90, 0.92, 0.97, 1.0), - sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None): +def make_sphere_model( + r0=(0.0, 0.0, 0.04), + head_radius=0.09, + info=None, + relative_radii=(0.90, 0.92, 0.97, 1.0), + sigmas=(0.33, 1.0, 0.004, 0.33), + verbose=None, +): """Create a spherical model for forward solution calculation. Parameters @@ -864,7 +848,8 @@ def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None, center will be calculated from the digitization points in info. head_radius : float | str | None If float, compute spherical shells for EEG using the given radius. - If 'auto', estimate an appropriate radius from the dig points in Info, + If ``'auto'``, estimate an appropriate radius from the dig points in the + :class:`~mne.Info` provided by the argument ``info``. If None, exclude shells (single layer sphere model). %(info)s Only needed if ``r0`` or ``head_radius`` are ``'auto'``. relative_radii : array-like @@ -896,33 +881,37 @@ def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None, .. versionadded:: 0.9.0 """ - for name in ('r0', 'head_radius'): + for name in ("r0", "head_radius"): param = locals()[name] if isinstance(param, str): - if param != 'auto': - raise ValueError('%s, if str, must be "auto" not "%s"' - % (name, param)) + if param != "auto": + raise ValueError(f'{name}, if str, must be "auto" not "{param}"') relative_radii = np.array(relative_radii, float).ravel() sigmas = np.array(sigmas, float).ravel() if len(relative_radii) != len(sigmas): - raise ValueError('relative_radii length (%s) must match that of ' - 'sigmas (%s)' % (len(relative_radii), - len(sigmas))) + raise ValueError( + f"relative_radii length ({len(relative_radii)}) must match that of sigmas (" + f"{len(sigmas)})" + ) if len(sigmas) <= 1 and head_radius is not None: - raise ValueError('at least 2 sigmas must be supplied if ' - 'head_radius is not None, got %s' % (len(sigmas),)) - if (isinstance(r0, str) and r0 == 'auto') or \ - (isinstance(head_radius, str) and head_radius == 'auto'): + raise ValueError( + "at least 2 sigmas must be supplied if head_radius is not None, got " + f"{len(sigmas)}" + ) + if (isinstance(r0, str) and r0 == "auto") or ( + isinstance(head_radius, str) and head_radius == "auto" + ): if info is None: - raise ValueError('Info must not be None for auto mode') - head_radius_fit, r0_fit = fit_sphere_to_headshape(info, units='m')[:2] + raise ValueError("Info must not be None for auto mode") + head_radius_fit, r0_fit = fit_sphere_to_headshape(info, units="m")[:2] if isinstance(r0, str): r0 = r0_fit if isinstance(head_radius, str): head_radius = head_radius_fit - sphere = ConductorModel(is_sphere=True, r0=np.array(r0), - coord_frame=FIFF.FIFFV_COORD_HEAD) - sphere['layers'] = list() + sphere = ConductorModel( + is_sphere=True, r0=np.array(r0), coord_frame=FIFF.FIFFV_COORD_HEAD + ) + sphere["layers"] = list() if head_radius is not None: # Eventually these could be configurable... relative_radii = np.array(relative_radii, float) @@ -933,15 +922,15 @@ def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None, for rel_rad, sig in zip(relative_radii, sigmas): # sort layers by (relative) radius, and scale radii layer = dict(rad=rel_rad, sigma=sig) - layer['rel_rad'] = layer['rad'] = rel_rad - sphere['layers'].append(layer) + layer["rel_rad"] = layer["rad"] = rel_rad + sphere["layers"].append(layer) # scale the radii - R = sphere['layers'][-1]['rad'] - rR = sphere['layers'][-1]['rel_rad'] - for layer in sphere['layers']: - layer['rad'] /= R - layer['rel_rad'] /= rR + R = sphere["layers"][-1]["rad"] + rR = sphere["layers"][-1]["rel_rad"] + for layer in sphere["layers"]: + layer["rad"] /= R + layer["rel_rad"] /= rR # # Setup the EEG sphere model calculations @@ -949,25 +938,24 @@ def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None, # Scale the relative radii for k in range(len(relative_radii)): - sphere['layers'][k]['rad'] = (head_radius * - sphere['layers'][k]['rel_rad']) + sphere["layers"][k]["rad"] = head_radius * sphere["layers"][k]["rel_rad"] rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3) - logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv)) + logger.info(f"\nEquiv. model fitting -> RV = {100 * rv:g} %%") for k in range(3): - logger.info('mu%d = %g lambda%d = %g' - % (k + 1, sphere['mu'][k], k + 1, - sphere['layers'][-1]['sigma'] * - sphere['lambda'][k])) - logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n' - % (1000 * head_radius,)) + s_k = sphere["layers"][-1]["sigma"] * sphere["lambda"][k] + logger.info(f"mu{k + 1} = {sphere['mu'][k]:g} lambda{k + 1} = {s_k:g}") + logger.info( + f"Set up EEG sphere model with scalp radius {1000 * head_radius:7.1f} mm\n" + ) return sphere # ############################################################################# # Sphere fitting + @verbose -def fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None): +def fit_sphere_to_headshape(info, dig_kinds="auto", units="m", verbose=None): """Fit a sphere to the headshape points to determine head center. Parameters @@ -994,11 +982,10 @@ def fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None): This function excludes any points that are low and frontal (``z < 0 and y > 0``) to improve the fit. """ - if not isinstance(units, str) or units not in ('m', 'mm'): + if not isinstance(units, str) or units not in ("m", "mm"): raise ValueError('units must be a "m" or "mm"') - radius, origin_head, origin_device = _fit_sphere_to_headshape( - info, dig_kinds) - if units == 'mm': + radius, origin_head, origin_device = _fit_sphere_to_headshape(info, dig_kinds) + if units == "mm": radius *= 1e3 origin_head *= 1e3 origin_device *= 1e3 @@ -1006,8 +993,7 @@ def fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None): @verbose -def get_fitting_dig(info, dig_kinds='auto', exclude_frontal=True, - verbose=None): +def get_fitting_dig(info, dig_kinds="auto", exclude_frontal=True, verbose=None): """Get digitization points suitable for sphere fitting. Parameters @@ -1033,17 +1019,18 @@ def get_fitting_dig(info, dig_kinds='auto', exclude_frontal=True, .. versionadded:: 0.14 """ _validate_type(info, "info") - if info['dig'] is None: - raise RuntimeError('Cannot fit headshape without digitization ' - ', info["dig"] is None') + if info.get("dig", None) is None: # "dig" can be missing for fwd/inv + raise RuntimeError( + 'Cannot fit headshape without digitization, info["dig"] is None' + ) if isinstance(dig_kinds, str): - if dig_kinds == 'auto': + if dig_kinds == "auto": # try "extra" first try: - return get_fitting_dig(info, 'extra') + return get_fitting_dig(info, "extra") except ValueError: pass - return get_fitting_dig(info, ('extra', 'eeg')) + return get_fitting_dig(info, ("extra", "eeg")) else: dig_kinds = (dig_kinds,) # convert string args to ints (first make dig_kinds mutable in case tuple) @@ -1051,19 +1038,20 @@ def get_fitting_dig(info, dig_kinds='auto', exclude_frontal=True, for di, d in enumerate(dig_kinds): dig_kinds[di] = _dig_kind_dict.get(d, d) if dig_kinds[di] not in _dig_kind_ints: - raise ValueError('dig_kinds[#%d] (%s) must be one of %s' - % (di, d, sorted(list(_dig_kind_dict.keys())))) + raise ValueError( + f"dig_kinds[{di}] ({d}) must be one of {sorted(_dig_kind_dict)}" + ) # get head digization points of the specified kind(s) - dig = [p for p in info['dig'] if p['kind'] in dig_kinds] + dig = [p for p in info["dig"] if p["kind"] in dig_kinds] if len(dig) == 0: - raise ValueError( - f'No digitization points found for dig_kinds={dig_kinds}') - if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in dig): + raise ValueError(f"No digitization points found for dig_kinds={dig_kinds}") + if any(p["coord_frame"] != FIFF.FIFFV_COORD_HEAD for p in dig): raise RuntimeError( - f'Digitization points dig_kinds={dig_kinds} not in head ' - 'coordinates, contact mne-python developers') - hsp = [p['r'] for p in dig] + f"Digitization points dig_kinds={dig_kinds} not in head " + "coordinates, contact mne-python developers" + ) + hsp = [p["r"] for p in dig] del dig # exclude some frontal points (nose etc.) @@ -1072,96 +1060,89 @@ def get_fitting_dig(info, dig_kinds='auto', exclude_frontal=True, hsp = np.array(hsp) if len(hsp) <= 10: - kinds_str = ', '.join(['"%s"' % _dig_kind_rev[d] - for d in sorted(dig_kinds)]) - msg = ('Only %s head digitization points of the specified kind%s (%s,)' - % (len(hsp), _pl(dig_kinds), kinds_str)) + kinds_str = ", ".join([f'"{_dig_kind_rev[d]}"' for d in sorted(dig_kinds)]) + msg = ( + f"Only {len(hsp)} head digitization points of the specified " + f"kind{_pl(dig_kinds)} ({kinds_str},)" + ) if len(hsp) < 4: - raise ValueError(msg + ', at least 4 required') + raise ValueError(msg + ", at least 4 required") else: - warn(msg + ', fitting may be inaccurate') + warn(msg + ", fitting may be inaccurate") return hsp @verbose -def _fit_sphere_to_headshape(info, dig_kinds, verbose=None): +def _fit_sphere_to_headshape(info, dig_kinds, *, verbose=None): """Fit a sphere to the given head shape.""" hsp = get_fitting_dig(info, dig_kinds) - radius, origin_head = _fit_sphere(np.array(hsp), disp=False) + radius, origin_head = _fit_sphere(np.array(hsp)) # compute origin in device coordinates - dev_head_t = info['dev_head_t'] + dev_head_t = info["dev_head_t"] if dev_head_t is None: - dev_head_t = Transform('meg', 'head') - head_to_dev = _ensure_trans(dev_head_t, 'head', 'meg') + dev_head_t = Transform("meg", "head") + head_to_dev = _ensure_trans(dev_head_t, "head", "meg") origin_device = apply_trans(head_to_dev, origin_head) - logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm' - % (radius * 1e3,)) + logger.info("Fitted sphere radius:".ljust(30) + f"{radius * 1e3:0.1f} mm") _check_head_radius(radius) # > 2 cm away from head center in X or Y is strange + o_mm = origin_head * 1e3 + o_d = origin_device * 1e3 if np.linalg.norm(origin_head[:2]) > 0.02: - warn('(X, Y) fit (%0.1f, %0.1f) more than 20 mm from ' - 'head frame origin' % tuple(1e3 * origin_head[:2])) - logger.info('Origin head coordinates:'.ljust(30) + - '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_head)) - logger.info('Origin device coordinates:'.ljust(30) + - '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_device)) + warn( + f"(X, Y) fit ({o_mm[0]:0.1f}, {o_mm[1]:0.1f}) " + "more than 20 mm from head frame origin" + ) + logger.info( + "Origin head coordinates:".ljust(30) + + f"{o_mm[0]:0.1f} {o_mm[1]:0.1f} {o_mm[2]:0.1f} mm" + ) + logger.info( + "Origin device coordinates:".ljust(30) + + f"{o_d[0]:0.1f} {o_d[1]:0.1f} {o_d[2]:0.1f} mm" + ) return radius, origin_head, origin_device -def _fit_sphere(points, disp='auto'): +def _fit_sphere(points): """Fit a sphere to an arbitrary set of points.""" - from scipy.optimize import fmin_cobyla - if isinstance(disp, str) and disp == 'auto': - disp = True if logger.level <= 20 else False - # initial guess for center and radius - radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2. - radius_init = radii.mean() - center_init = np.median(points, axis=0) - - # optimization - x0 = np.concatenate([center_init, [radius_init]]) - - def cost_fun(center_rad): - d = np.linalg.norm(points - center_rad[:3], axis=1) - center_rad[3] - d *= d - return d.sum() - - def constraint(center_rad): - return center_rad[3] # radius must be >= 0 - - x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init, - rhoend=radius_init * 1e-6, disp=disp) - - origin, radius = x_opt[:3], x_opt[3] + # linear least-squares sphere fit, see for example + # https://stackoverflow.com/a/78909044 + # TODO: At some point we should maybe reject outliers first... + A = np.c_[2 * points, np.ones((len(points), 1))] + b = (points**2).sum(axis=1) + x, _, _, _ = np.linalg.lstsq(A, b, rcond=1e-6) + origin = x[:3] + radius = np.sqrt(x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3]) return radius, origin -def _check_origin(origin, info, coord_frame='head', disp=False): +def _check_origin(origin, info, coord_frame="head", disp=False): """Check or auto-determine the origin.""" if isinstance(origin, str): - if origin != 'auto': - raise ValueError('origin must be a numerical array, or "auto", ' - 'not %s' % (origin,)) - if coord_frame == 'head': + if origin != "auto": + raise ValueError( + f'origin must be a numerical array, or "auto", not {origin}' + ) + if coord_frame == "head": R, origin = fit_sphere_to_headshape( - info, verbose=_verbose_safe_false(), units='m')[:2] - logger.info(' Automatic origin fit: head of radius %0.1f mm' - % (R * 1000.,)) + info, verbose=_verbose_safe_false(), units="m" + )[:2] + logger.info(f" Automatic origin fit: head of radius {R * 1000:0.1f} mm") del R else: - origin = (0., 0., 0.) + origin = (0.0, 0.0, 0.0) origin = np.array(origin, float) if origin.shape != (3,): - raise ValueError('origin must be a 3-element array') + raise ValueError("origin must be a 3-element array") if disp: - origin_str = ', '.join(['%0.1f' % (o * 1000) for o in origin]) - msg = (' Using origin %s mm in the %s frame' - % (origin_str, coord_frame)) - if coord_frame == 'meg' and info['dev_head_t'] is not None: - o_dev = apply_trans(info['dev_head_t'], origin) - origin_str = ', '.join('%0.1f' % (o * 1000,) for o in o_dev) - msg += ' (%s mm in the head frame)' % (origin_str,) + origin_str = ", ".join([f"{o * 1000:0.1f}" for o in origin]) + msg = f" Using origin {origin_str} mm in the {coord_frame} frame" + if coord_frame == "meg" and info["dev_head_t"] is not None: + o_dev = apply_trans(info["dev_head_t"], origin) + origin_str = ", ".join(f"{o * 1000:0.1f}" for o in o_dev) + msg += f" ({origin_str} mm in the head frame)" logger.info(msg) return origin @@ -1169,13 +1150,26 @@ def _check_origin(origin, info, coord_frame='head', disp=False): # ############################################################################ # Create BEM surfaces + @verbose -def make_watershed_bem(subject, subjects_dir=None, overwrite=False, - volume='T1', atlas=False, gcaatlas=False, preflood=None, - show=False, copy=True, T1=None, brainmask='ws.mgz', - verbose=None): +def make_watershed_bem( + subject, + subjects_dir=None, + overwrite=False, + volume="T1", + atlas=False, + gcaatlas=False, + preflood=None, + show=False, + copy=True, + T1=None, + brainmask="ws.mgz", + verbose=None, +): """Create BEM surfaces using the FreeSurfer watershed algorithm. + See :ref:`bem_watershed_algorithm` for additional information. + Parameters ---------- subject : str @@ -1185,9 +1179,9 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False, volume : str Defaults to T1. atlas : bool - Specify the --atlas option for mri_watershed. + Specify the ``--atlas option`` for ``mri_watershed``. gcaatlas : bool - Specify the --brain_atlas option for mri_watershed. + Specify the ``--brain_atlas`` option for ``mri_watershed``. preflood : int Change the preflood height. show : bool @@ -1227,79 +1221,113 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False, .. versionadded:: 0.10 """ - from .viz.misc import plot_bem env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir) tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD - run_subprocess_env = partial(run_subprocess, env=env, - cwd=tempdir) + run_subprocess_env = partial(run_subprocess, env=env, cwd=tempdir) - subjects_dir = env['SUBJECTS_DIR'] # Set by _prepare_env() above. + subjects_dir = env["SUBJECTS_DIR"] # Set by _prepare_env() above. subject_dir = op.join(subjects_dir, subject) - ws_dir = op.join(bem_dir, 'watershed') + ws_dir = op.join(bem_dir, "watershed") T1_dir = op.join(mri_dir, volume) T1_mgz = T1_dir - if not T1_dir.endswith('.mgz'): - T1_mgz += '.mgz' + if not T1_dir.endswith(".mgz"): + T1_mgz += ".mgz" if not op.isdir(bem_dir): os.makedirs(bem_dir) - _check_fname(T1_mgz, overwrite='read', must_exist=True, name='MRI data') + _check_fname(T1_mgz, overwrite="read", must_exist=True, name="MRI data") if op.isdir(ws_dir): if not overwrite: - raise RuntimeError('%s already exists. Use the --overwrite option' - ' to recreate it.' % ws_dir) + raise RuntimeError( + f"{ws_dir} already exists. Use the --overwrite option to recreate it." + ) else: shutil.rmtree(ws_dir) # put together the command - cmd = ['mri_watershed'] + cmd = ["mri_watershed"] if preflood: - cmd += ["-h", "%s" % int(preflood)] + cmd += ["-h", f"{int(preflood)}"] if T1 is None: T1 = gcaatlas if T1: - cmd += ['-T1'] + cmd += ["-T1"] if gcaatlas: - fname = op.join(env['FREESURFER_HOME'], 'average', - 'RB_all_withskull_*.gca') + fname = op.join(env["FREESURFER_HOME"], "average", "RB_all_withskull_*.gca") fname = sorted(glob.glob(fname))[::-1][0] - logger.info('Using GCA atlas: %s' % (fname,)) - cmd += ['-atlas', '-brain_atlas', fname, - subject_dir + '/mri/transforms/talairach_with_skull.lta'] + + # check if FS>8 didn't generate talairach_with_skull.lta + talairach_with_skull_path = os.path.join( + subject_dir, "mri/transforms/talairach_with_skull.lta" + ) + if not os.path.exists(talairach_with_skull_path): + logger.info( + f"{talairach_with_skull_path} does not exist. Running mri_em_register." + ) + em_reg_cmd = [ + "mri_em_register", + "-skull", + subject_dir + "/mri/nu.mgz", + fname, + talairach_with_skull_path, + ] + + run_subprocess_env(em_reg_cmd) + + logger.info(f"Using GCA atlas: {fname}") + cmd += [ + "-atlas", + "-brain_atlas", + fname, + subject_dir + "/mri/transforms/talairach_with_skull.lta", + ] elif atlas: - cmd += ['-atlas'] + cmd += ["-atlas"] if op.exists(T1_mgz): - cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz, - op.join(ws_dir, brainmask)] + cmd += [ + "-useSRAS", + "-surf", + op.join(ws_dir, subject), + T1_mgz, + op.join(ws_dir, brainmask), + ] else: - cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir, - op.join(ws_dir, brainmask)] + cmd += [ + "-useSRAS", + "-surf", + op.join(ws_dir, subject), + T1_dir, + op.join(ws_dir, brainmask), + ] # report and run - logger.info('\nRunning mri_watershed for BEM segmentation with the ' - 'following parameters:\n\nResults dir = %s\nCommand = %s\n' - % (ws_dir, ' '.join(cmd))) + logger.info( + "\nRunning mri_watershed for BEM segmentation with the following parameters:\n" + f"\nResults dir = {ws_dir}\nCommand = {' '.join(cmd)}\n" + ) os.makedirs(op.join(ws_dir)) run_subprocess_env(cmd) del tempdir # clean up directory if op.isfile(T1_mgz): new_info = _extract_volume_info(T1_mgz) if not new_info: - warn('nibabel is not available or the volume info is invalid.' - 'Volume info not updated in the written surface.') - surfs = ['brain', 'inner_skull', 'outer_skull', 'outer_skin'] + warn( + "nibabel is not available or the volume info is invalid. Volume info " + "not updated in the written surface." + ) + surfs = ["brain", "inner_skull", "outer_skull", "outer_skin"] for s in surfs: - surf_ws_out = op.join(ws_dir, '%s_%s_surface' % (subject, s)) + surf_ws_out = op.join(ws_dir, f"{subject}_{s}_surface") - rr, tris, volume_info = read_surface(surf_ws_out, - read_metadata=True) + rr, tris, volume_info = read_surface(surf_ws_out, read_metadata=True) # replace volume info, 'head' stays volume_info.update(new_info) - write_surface(surf_ws_out, rr, tris, volume_info=volume_info, - overwrite=True) + write_surface( + surf_ws_out, rr, tris, volume_info=volume_info, overwrite=True + ) # Create symbolic links - surf_out = op.join(bem_dir, '%s.surf' % s) + surf_out = op.join(bem_dir, f"{s}.surf") if not overwrite and op.exists(surf_out): skip_symlink = True else: @@ -1309,48 +1337,59 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False, skip_symlink = False if skip_symlink: - logger.info("Unable to create all symbolic links to .surf files " - "in bem folder. Use --overwrite option to recreate " - "them.") - dest = op.join(bem_dir, 'watershed') + logger.info( + "Unable to create all symbolic links to .surf files in bem folder. Use " + "--overwrite option to recreate them." + ) + dest = op.join(bem_dir, "watershed") else: logger.info("Symbolic links to .surf files created in bem folder") dest = bem_dir - logger.info("\nThank you for waiting.\nThe BEM triangulations for this " - "subject are now available at:\n%s." % dest) + logger.info( + "\nThank you for waiting.\nThe BEM triangulations for this subject are now " + f"available at:\n{dest}." + ) # Write a head file for coregistration - fname_head = op.join(bem_dir, subject + '-head.fif') + fname_head = op.join(bem_dir, subject + "-head.fif") if op.isfile(fname_head): os.remove(fname_head) - surf = _surfaces_to_bem([op.join(ws_dir, subject + '_outer_skin_surface')], - [FIFF.FIFFV_BEM_SURF_ID_HEAD], sigmas=[1]) + surf = _surfaces_to_bem( + [op.join(ws_dir, subject + "_outer_skin_surface")], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], + sigmas=[1], + ) write_bem_surfaces(fname_head, surf) # Show computed BEM surfaces if show: - plot_bem(subject=subject, subjects_dir=subjects_dir, - orientation='coronal', slices=None, show=True) + plot_bem( + subject=subject, + subjects_dir=subjects_dir, + orientation="coronal", + slices=None, + show=True, + ) - logger.info('Created %s\n\nComplete.' % (fname_head,)) + logger.info(f"Created {fname_head}\n\nComplete.") def _extract_volume_info(mgz): """Extract volume info from a mgz file.""" nib = _import_nibabel() header = nib.load(mgz).header - version = header['version'] + version = header["version"] vol_info = dict() if version == 1: - version = '%s # volume info valid' % version - vol_info['valid'] = version - vol_info['filename'] = mgz - vol_info['volume'] = header['dims'][:3] - vol_info['voxelsize'] = header['delta'] - vol_info['xras'], vol_info['yras'], vol_info['zras'] = header['Mdc'] - vol_info['cras'] = header['Pxyz_c'] + version = f"{version} # volume info valid" + vol_info["valid"] = version + vol_info["filename"] = mgz + vol_info["volume"] = header["dims"][:3] + vol_info["voxelsize"] = header["delta"] + vol_info["xras"], vol_info["yras"], vol_info["zras"] = header["Mdc"] + vol_info["cras"] = header["Pxyz_c"] return vol_info @@ -1358,9 +1397,11 @@ def _extract_volume_info(mgz): # ############################################################################ # Read + @verbose -def read_bem_surfaces(fname, patch_stats=False, s_id=None, on_defects='raise', - verbose=None): +def read_bem_surfaces( + fname, patch_stats=False, s_id=None, on_defects="raise", verbose=None +): """Read the BEM surfaces from a FIF file. Parameters @@ -1370,7 +1411,7 @@ def read_bem_surfaces(fname, patch_stats=False, s_id=None, on_defects='raise', patch_stats : bool, optional (default False) Calculate and add cortical patch statistics to the surfaces. s_id : int | None - If int, only read and return the surface with the given s_id. + If int, only read and return the surface with the given ``s_id``. An error will be raised if it doesn't exist. If None, all surfaces are read and returned. %(on_defects)s @@ -1381,7 +1422,7 @@ def read_bem_surfaces(fname, patch_stats=False, s_id=None, on_defects='raise', Returns ------- surf: list | dict - A list of dictionaries that each contain a surface. If s_id + A list of dictionaries that each contain a surface. If ``s_id`` is not None, only the requested surface will be returned. See Also @@ -1389,16 +1430,16 @@ def read_bem_surfaces(fname, patch_stats=False, s_id=None, on_defects='raise', write_bem_surfaces, write_bem_solution, make_bem_model """ # Open the file, create directory - _validate_type(s_id, ('int-like', None), 's_id') - fname = _check_fname(fname, 'read', True, 'fname') + _validate_type(s_id, ("int-like", None), "s_id") + fname = _check_fname(fname, "read", True, "fname") if fname.suffix == ".h5": surf = _read_bem_surfaces_h5(fname, s_id) else: surf = _read_bem_surfaces_fif(fname, s_id) if s_id is not None and len(surf) != 1: - raise ValueError('surface with id %d not found' % s_id) + raise ValueError(f"surface with id {s_id} not found") for this in surf: - if patch_stats or this['nn'] is None: + if patch_stats or this["nn"] is None: _check_complete_surface(this, incomplete=on_defects) return surf[0] if s_id is not None else surf @@ -1407,12 +1448,12 @@ def _read_bem_surfaces_h5(fname, s_id): read_hdf5, _ = _import_h5io_funcs() bem = read_hdf5(fname) try: - [s['id'] for s in bem['surfs']] + [s["id"] for s in bem["surfs"]] except Exception: # not our format - raise ValueError('BEM data not found') - surf = bem['surfs'] + raise ValueError("BEM data not found") + surf = bem["surfs"] if s_id is not None: - surf = [s for s in surf if s['id'] == s_id] + surf = [s for s in surf if s["id"] == s_id] return surf @@ -1424,32 +1465,33 @@ def _read_bem_surfaces_fif(fname, s_id): # Find BEM bem = dir_tree_find(tree, FIFF.FIFFB_BEM) if bem is None or len(bem) == 0: - raise ValueError('BEM data not found') + raise ValueError("BEM data not found") bem = bem[0] # Locate all surfaces bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF) if bemsurf is None: - raise ValueError('BEM surface data not found') + raise ValueError("BEM surface data not found") - logger.info(' %d BEM surfaces found' % len(bemsurf)) + logger.info(f" {len(bemsurf)} BEM surfaces found") # Coordinate frame possibly at the top level tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME) if tag is not None: coord_frame = tag.data # Read all surfaces if s_id is not None: - surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id) - for bsurf in bemsurf] + surf = [ + _read_bem_surface(fid, bsurf, coord_frame, s_id) for bsurf in bemsurf + ] surf = [s for s in surf if s is not None] else: surf = list() for bsurf in bemsurf: - logger.info(' Reading a surface...') + logger.info(" Reading a surface...") this = _read_bem_surface(fid, bsurf, coord_frame) surf.append(this) - logger.info('[done]') - logger.info(' %d BEM surfaces read' % len(surf)) + logger.info("[done]") + logger.info(f" {len(surf)} BEM surfaces read") return surf @@ -1461,63 +1503,63 @@ def _read_bem_surface(fid, this, def_coord_frame, s_id=None): tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID) if tag is None: - res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN + res["id"] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN else: - res['id'] = int(tag.data) + res["id"] = int(tag.data.item()) - if s_id is not None and res['id'] != s_id: + if s_id is not None and res["id"] != s_id: return None tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA) - res['sigma'] = 1.0 if tag is None else float(tag.data) + res["sigma"] = 1.0 if tag is None else float(tag.data.item()) tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE) if tag is None: - raise ValueError('Number of vertices not found') + raise ValueError("Number of vertices not found") - res['np'] = int(tag.data) + res["np"] = int(tag.data.item()) tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI) if tag is None: - raise ValueError('Number of triangles not found') - res['ntri'] = int(tag.data) + raise ValueError("Number of triangles not found") + res["ntri"] = int(tag.data.item()) tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME) if tag is None: tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME) if tag is None: - res['coord_frame'] = def_coord_frame + res["coord_frame"] = def_coord_frame else: - res['coord_frame'] = tag.data + res["coord_frame"] = int(tag.data.item()) else: - res['coord_frame'] = tag.data + res["coord_frame"] = int(tag.data.item()) # Vertices, normals, and triangles tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES) if tag is None: - raise ValueError('Vertex data not found') + raise ValueError("Vertex data not found") - res['rr'] = tag.data.astype(np.float64) - if res['rr'].shape[0] != res['np']: - raise ValueError('Vertex information is incorrect') + res["rr"] = tag.data.astype(np.float64) + if res["rr"].shape[0] != res["np"]: + raise ValueError("Vertex information is incorrect") tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS) if tag is None: tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS) if tag is None: - res['nn'] = None + res["nn"] = None else: - res['nn'] = tag.data.astype(np.float64) - if res['nn'].shape[0] != res['np']: - raise ValueError('Vertex normal information is incorrect') + res["nn"] = tag.data.astype(np.float64) + if res["nn"].shape[0] != res["np"]: + raise ValueError("Vertex normal information is incorrect") tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES) if tag is None: - raise ValueError('Triangulation not found') + raise ValueError("Triangulation not found") - res['tris'] = tag.data - 1 # index start at 0 in Python - if res['tris'].shape[0] != res['ntri']: - raise ValueError('Triangulation information is incorrect') + res["tris"] = tag.data - 1 # index start at 0 in Python + if res["tris"].shape[0] != res["ntri"]: + raise ValueError("Triangulation information is incorrect") return res @@ -1544,97 +1586,104 @@ def read_bem_solution(fname, *, verbose=None): make_bem_solution write_bem_solution """ - fname = _check_fname(fname, 'read', True, 'fname') + fname = _check_fname(fname, "read", True, "fname") # mirrors fwd_bem_load_surfaces from fwd_bem_model.c if fname.suffix == ".h5": read_hdf5, _ = _import_h5io_funcs() - logger.info('Loading surfaces and solution...') + logger.info("Loading surfaces and solution...") bem = read_hdf5(fname) - if 'solver' not in bem: - bem['solver'] = 'mne' + if "solver" not in bem: + bem["solver"] = "mne" else: bem = _read_bem_solution_fif(fname) - if len(bem['surfs']) == 3: - logger.info('Three-layer model surfaces loaded.') - needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD, - FIFF.FIFFV_BEM_SURF_ID_SKULL, - FIFF.FIFFV_BEM_SURF_ID_BRAIN]) - if not all(x['id'] in needed for x in bem['surfs']): - raise RuntimeError('Could not find necessary BEM surfaces') + if len(bem["surfs"]) == 3: + logger.info("Three-layer model surfaces loaded.") + needed = np.array( + [ + FIFF.FIFFV_BEM_SURF_ID_HEAD, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_BRAIN, + ] + ) + if not all(x["id"] in needed for x in bem["surfs"]): + raise RuntimeError("Could not find necessary BEM surfaces") # reorder surfaces as necessary (shouldn't need to?) reorder = [None] * 3 - for x in bem['surfs']: - reorder[np.where(x['id'] == needed)[0][0]] = x - bem['surfs'] = reorder - elif len(bem['surfs']) == 1: - if not bem['surfs'][0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN: - raise RuntimeError('BEM Surfaces not found') - logger.info('Homogeneous model surface loaded.') - - assert set(bem.keys()) == set( - ('surfs', 'solution', 'bem_method', 'solver')) + for x in bem["surfs"]: + reorder[np.where(x["id"] == needed)[0][0]] = x + bem["surfs"] = reorder + elif len(bem["surfs"]) == 1: + if not bem["surfs"][0]["id"] == FIFF.FIFFV_BEM_SURF_ID_BRAIN: + raise RuntimeError("BEM Surfaces not found") + logger.info("Homogeneous model surface loaded.") + + assert set(bem.keys()) == set(("surfs", "solution", "bem_method", "solver")) bem = ConductorModel(bem) - bem['is_sphere'] = False + bem["is_sphere"] = False # sanity checks and conversions _check_option( - 'BEM approximation method', bem['bem_method'], - (FIFF.FIFFV_BEM_APPROX_LINEAR,)) # CONSTANT not supported + "BEM approximation method", bem["bem_method"], (FIFF.FIFFV_BEM_APPROX_LINEAR,) + ) # CONSTANT not supported dim = 0 - solver = bem.get('solver', 'mne') - _check_option('BEM solver', solver, ('mne', 'openmeeg')) - for si, surf in enumerate(bem['surfs']): - assert bem['bem_method'] == FIFF.FIFFV_BEM_APPROX_LINEAR - dim += surf['np'] - if solver == 'openmeeg' and si != 0: - dim += surf['ntri'] - dims = bem['solution'].shape + solver = bem.get("solver", "mne") + _check_option("BEM solver", solver, ("mne", "openmeeg")) + for si, surf in enumerate(bem["surfs"]): + assert bem["bem_method"] == FIFF.FIFFV_BEM_APPROX_LINEAR + dim += surf["np"] + if solver == "openmeeg" and si != 0: + dim += surf["ntri"] + dims = bem["solution"].shape if solver == "openmeeg": sz = (dim * (dim + 1)) // 2 if len(dims) != 1 or dims[0] != sz: raise RuntimeError( - 'For the given BEM surfaces, OpenMEEG should produce a ' - f'solution matrix of shape ({sz},) but got {dims}') - bem['nsol'] = dim + "For the given BEM surfaces, OpenMEEG should produce a " + f"solution matrix of shape ({sz},) but got {dims}" + ) + bem["nsol"] = dim else: if len(dims) != 2 and solver != "openmeeg": - raise RuntimeError('Expected a two-dimensional solution matrix ' - 'instead of a %d dimensional one' % dims[0]) + raise RuntimeError( + "Expected a two-dimensional solution matrix " + f"instead of a {dims[0]} dimensional one" + ) if dims[0] != dim or dims[1] != dim: - raise RuntimeError('Expected a %d x %d solution matrix instead of ' - 'a %d x %d one' % (dim, dim, dims[1], dims[0])) - bem['nsol'] = bem['solution'].shape[0] + raise RuntimeError( + f"Expected a {dim} x {dim} solution matrix instead of " + f"a {dims[1]} x {dims[0]} one" + ) + bem["nsol"] = bem["solution"].shape[0] # Gamma factors and multipliers _add_gamma_multipliers(bem) - extra = f'made by {solver}' if solver != 'mne' else '' - logger.info(f'Loaded linear collocation BEM solution{extra} from {fname}') + extra = f"made by {solver}" if solver != "mne" else "" + logger.info(f"Loaded linear collocation BEM solution{extra} from {fname}") return bem def _read_bem_solution_fif(fname): - logger.info('Loading surfaces...') - surfs = read_bem_surfaces( - fname, patch_stats=True, verbose=_verbose_safe_false()) + logger.info("Loading surfaces...") + surfs = read_bem_surfaces(fname, patch_stats=True, verbose=_verbose_safe_false()) # convert from surfaces to solution - logger.info('\nLoading the solution matrix...\n') - solver = 'mne' + logger.info("\nLoading the solution matrix...\n") + solver = "mne" f, tree, _ = fiff_open(fname) with f as fid: # Find the BEM data nodes = dir_tree_find(tree, FIFF.FIFFB_BEM) if len(nodes) == 0: - raise RuntimeError('No BEM data in %s' % fname) + raise RuntimeError(f"No BEM data in {fname}") bem_node = nodes[0] # Approximation method tag = find_tag(f, bem_node, FIFF.FIFF_DESCRIPTION) if tag is not None: tag = json.loads(tag.data) - solver = tag['solver'] + solver = tag["solver"] tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX) if tag is None: - raise RuntimeError('No BEM solution found in %s' % fname) + raise RuntimeError(f"No BEM solution found in {fname}") method = tag.data[0] tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION) sol = tag.data @@ -1644,73 +1693,79 @@ def _read_bem_solution_fif(fname): def _add_gamma_multipliers(bem): """Add gamma and multipliers in-place.""" - bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']]) + bem["sigma"] = np.array([surf["sigma"] for surf in bem["surfs"]]) # Dirty trick for the zero conductivity outside - sigma = np.r_[0.0, bem['sigma']] - bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1]) - bem['field_mult'] = sigma[1:] - sigma[:-1] + sigma = np.r_[0.0, bem["sigma"]] + bem["source_mult"] = 2.0 / (sigma[1:] + sigma[:-1]) + bem["field_mult"] = sigma[1:] - sigma[:-1] # make sure subsequent "zip"s work correctly - assert len(bem['surfs']) == len(bem['field_mult']) - bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] / - (sigma[1:] + sigma[:-1])[:, np.newaxis]) + assert len(bem["surfs"]) == len(bem["field_mult"]) + bem["gamma"] = (sigma[1:] - sigma[:-1])[np.newaxis, :] / (sigma[1:] + sigma[:-1])[ + :, np.newaxis + ] # In our BEM code we do not model the CSF so we assign the innermost surface # the id BRAIN. Our 4-layer sphere we model CSF (at least by default), so when # searching for and referring to surfaces we need to keep track of this. -_sm_surf_dict = OrderedDict([ - ('brain', FIFF.FIFFV_BEM_SURF_ID_BRAIN), - ('inner_skull', FIFF.FIFFV_BEM_SURF_ID_CSF), - ('outer_skull', FIFF.FIFFV_BEM_SURF_ID_SKULL), - ('head', FIFF.FIFFV_BEM_SURF_ID_HEAD), -]) +_sm_surf_dict = OrderedDict( + [ + ("brain", FIFF.FIFFV_BEM_SURF_ID_BRAIN), + ("inner_skull", FIFF.FIFFV_BEM_SURF_ID_CSF), + ("outer_skull", FIFF.FIFFV_BEM_SURF_ID_SKULL), + ("head", FIFF.FIFFV_BEM_SURF_ID_HEAD), + ] +) _bem_surf_dict = { - 'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN, - 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL, - 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD, + "inner_skull": FIFF.FIFFV_BEM_SURF_ID_BRAIN, + "outer_skull": FIFF.FIFFV_BEM_SURF_ID_SKULL, + "head": FIFF.FIFFV_BEM_SURF_ID_HEAD, } _bem_surf_name = { - FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull', - FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull', - FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ', - FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ', + FIFF.FIFFV_BEM_SURF_ID_BRAIN: "inner skull", + FIFF.FIFFV_BEM_SURF_ID_SKULL: "outer skull", + FIFF.FIFFV_BEM_SURF_ID_HEAD: "outer skin ", + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: "unknown ", + FIFF.FIFFV_MNE_SURF_MEG_HELMET: "MEG helmet ", } _sm_surf_name = { - FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'brain', - FIFF.FIFFV_BEM_SURF_ID_CSF: 'csf', - FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull', - FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ', - FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ', + FIFF.FIFFV_BEM_SURF_ID_BRAIN: "brain", + FIFF.FIFFV_BEM_SURF_ID_CSF: "csf", + FIFF.FIFFV_BEM_SURF_ID_SKULL: "outer skull", + FIFF.FIFFV_BEM_SURF_ID_HEAD: "outer skin ", + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: "unknown ", + FIFF.FIFFV_MNE_SURF_MEG_HELMET: "helmet", } def _bem_find_surface(bem, id_): """Find surface from already-loaded conductor model.""" - if bem['is_sphere']: + if bem["is_sphere"]: _surf_dict = _sm_surf_dict _name_dict = _sm_surf_name - kind = 'Sphere model' - tri = 'boundary' + kind = "Sphere model" + tri = "boundary" else: _surf_dict = _bem_surf_dict _name_dict = _bem_surf_name - kind = 'BEM' - tri = 'triangulation' + kind = "BEM" + tri = "triangulation" if isinstance(id_, str): name = id_ id_ = _surf_dict[id_] else: name = _name_dict[id_] - kind = 'Sphere model' if bem['is_sphere'] else 'BEM' - idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0] + kind = "Sphere model" if bem["is_sphere"] else "BEM" + idx = np.where(np.array([s["id"] for s in bem["surfs"]]) == id_)[0] if len(idx) != 1: - raise RuntimeError(f'{kind} does not have the {name} {tri}') - return bem['surfs'][idx[0]] + raise RuntimeError(f"{kind} does not have the {name} {tri}") + return bem["surfs"][idx[0]] # ############################################################################ # Write + @verbose def write_bem_surfaces(fname, surfs, overwrite=False, *, verbose=None): """Write BEM surfaces to a FIF file. @@ -1726,7 +1781,7 @@ def write_bem_surfaces(fname, surfs, overwrite=False, *, verbose=None): """ if isinstance(surfs, dict): surfs = [surfs] - fname = _check_fname(fname, overwrite=overwrite, name='fname') + fname = _check_fname(fname, overwrite=overwrite, name="fname") if fname.suffix == ".h5": _, write_hdf5 = _import_h5io_funcs() @@ -1734,14 +1789,15 @@ def write_bem_surfaces(fname, surfs, overwrite=False, *, verbose=None): else: with start_and_end_file(fname) as fid: start_block(fid, FIFF.FIFFB_BEM) - write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame']) + write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]["coord_frame"]) _write_bem_surfaces_block(fid, surfs) end_block(fid, FIFF.FIFFB_BEM) @verbose -def write_head_bem(fname, rr, tris, on_defects='raise', overwrite=False, - *, verbose=None): +def write_head_bem( + fname, rr, tris, on_defects="raise", overwrite=False, *, verbose=None +): """Write a head surface to a FIF file. Parameters @@ -1757,9 +1813,13 @@ def write_head_bem(fname, rr, tris, on_defects='raise', overwrite=False, %(overwrite)s %(verbose)s """ - surf = _surfaces_to_bem([dict(rr=rr, tris=tris)], - [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False, - incomplete=on_defects) + surf = _surfaces_to_bem( + [dict(rr=rr, tris=tris)], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], + [1], + rescale=False, + incomplete=on_defects, + ) write_bem_surfaces(fname, surf, overwrite=overwrite) @@ -1767,17 +1827,17 @@ def _write_bem_surfaces_block(fid, surfs): """Write bem surfaces to open file handle.""" for surf in surfs: start_block(fid, FIFF.FIFFB_BEM_SURF) - write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma']) - write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id']) - write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame']) - write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np']) - write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri']) - write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr']) + if "sigma" in surf: + write_float(fid, FIFF.FIFF_BEM_SIGMA, surf["sigma"]) + write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf["id"]) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf["coord_frame"]) + write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf["np"]) + write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf["ntri"]) + write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf["rr"]) # index start at 0 in Python - write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES, - surf['tris'] + 1) - if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0: - write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn']) + write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES, surf["tris"] + 1) + if "nn" in surf and surf["nn"] is not None and len(surf["nn"]) > 0: + write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf["nn"]) end_block(fid, FIFF.FIFFB_BEM_SURF) @@ -1798,42 +1858,40 @@ def write_bem_solution(fname, bem, overwrite=False, *, verbose=None): -------- read_bem_solution """ - fname = _check_fname(fname, overwrite=overwrite, name='fname') + fname = _check_fname(fname, overwrite=overwrite, name="fname") if fname.suffix == ".h5": _, write_hdf5 = _import_h5io_funcs() - bem = {k: bem[k] for k in ('surfs', 'solution', 'bem_method')} + bem = {k: bem[k] for k in ("surfs", "solution", "bem_method")} write_hdf5(fname, bem, overwrite=True) else: _write_bem_solution_fif(fname, bem) def _write_bem_solution_fif(fname, bem): - _check_bem_size(bem['surfs']) + _check_bem_size(bem["surfs"]) with start_and_end_file(fname) as fid: start_block(fid, FIFF.FIFFB_BEM) # Coordinate frame (mainly for backward compatibility) - write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, - bem['surfs'][0]['coord_frame']) - solver = bem.get('solver', 'mne') - if solver != 'mne': - write_string( - fid, FIFF.FIFF_DESCRIPTION, json.dumps(dict(solver=solver))) + write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, bem["surfs"][0]["coord_frame"]) + solver = bem.get("solver", "mne") + if solver != "mne": + write_string(fid, FIFF.FIFF_DESCRIPTION, json.dumps(dict(solver=solver))) # Surfaces - _write_bem_surfaces_block(fid, bem['surfs']) + _write_bem_surfaces_block(fid, bem["surfs"]) # The potential solution - if 'solution' in bem: + if "solution" in bem: _check_option( - 'bem_method', bem['bem_method'], - (FIFF.FIFFV_BEM_APPROX_LINEAR,)) + "bem_method", bem["bem_method"], (FIFF.FIFFV_BEM_APPROX_LINEAR,) + ) write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR) - write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION, - bem['solution']) + write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION, bem["solution"]) end_block(fid, FIFF.FIFFB_BEM) # ############################################################################# # Create 3-Layers BEM model from Flash MRI images + def _prepare_env(subject, subjects_dir): """Prepare an env object for subprocess calls.""" env = os.environ.copy() @@ -1845,18 +1903,17 @@ def _prepare_env(subject, subjects_dir): subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) subject_dir = subjects_dir / subject if not subject_dir.is_dir(): - raise RuntimeError('Could not find the subject data directory "%s"' - % (subject_dir,)) - env.update(SUBJECT=subject, SUBJECTS_DIR=str(subjects_dir), - FREESURFER_HOME=fs_home) + raise RuntimeError(f'Could not find the subject data directory "{subject_dir}"') + env.update(SUBJECT=subject, SUBJECTS_DIR=str(subjects_dir), FREESURFER_HOME=fs_home) mri_dir = subject_dir / "mri" bem_dir = subject_dir / "bem" return env, mri_dir, bem_dir def _write_echos(mri_dir, flash_echos, angle): - nib = _import_nibabel('write echoes') + nib = _import_nibabel("write echoes") from nibabel.spatialimages import SpatialImage + if _path_like(flash_echos): flash_echos = nib.load(flash_echos) if isinstance(flash_echos, SpatialImage): @@ -1867,8 +1924,7 @@ def _write_echos(mri_dir, flash_echos, angle): data = data[..., np.newaxis] for echo_idx in range(data.shape[3]): this_echo_img = flash_echos.__class__( - data[..., echo_idx], affine=affine, - header=deepcopy(flash_echos.header) + data[..., echo_idx], affine=affine, header=deepcopy(flash_echos.header) ) flash_echo_imgs.append(this_echo_img) flash_echos = flash_echo_imgs @@ -1876,13 +1932,13 @@ def _write_echos(mri_dir, flash_echos, angle): for idx, flash_echo in enumerate(flash_echos, 1): if _path_like(flash_echo): flash_echo = nib.load(flash_echo) - nib.save(flash_echo, - op.join(mri_dir, 'flash', f'mef{angle}_{idx:03d}.mgz')) + nib.save(flash_echo, op.join(mri_dir, "flash", f"mef{angle}_{idx:03d}.mgz")) @verbose -def convert_flash_mris(subject, flash30=True, unwarp=False, - subjects_dir=None, flash5=True, verbose=None): +def convert_flash_mris( + subject, flash30=True, unwarp=False, subjects_dir=None, flash5=True, verbose=None +): """Synthesize the flash 5 files for use with make_flash_bem. This function aims to produce a synthesized flash 5 MRI from @@ -1895,8 +1951,7 @@ def convert_flash_mris(subject, flash30=True, unwarp=False, Parameters ---------- - subject : str - Subject name. + %(subject)s flash30 : bool | list of SpatialImage or path-like | SpatialImage | path-like If False do not use 30-degree flip angle data. The list of flash 5 echos to use. If True it will look for files @@ -1930,32 +1985,30 @@ def convert_flash_mris(subject, flash30=True, unwarp=False, """ # noqa: E501 env, mri_dir = _prepare_env(subject, subjects_dir)[:2] tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD - run_subprocess_env = partial(run_subprocess, env=env, - cwd=tempdir) + run_subprocess_env = partial(run_subprocess, env=env, cwd=tempdir) mri_dir = Path(mri_dir) # Step 1a : Data conversion to mgz format flash_dir = mri_dir / "flash" - pm_dir = flash_dir / 'parameter_maps' + pm_dir = flash_dir / "parameter_maps" pm_dir.mkdir(parents=True, exist_ok=True) echos_done = 0 if not isinstance(flash5, bool): - _write_echos(mri_dir, flash5, angle='05') + _write_echos(mri_dir, flash5, angle="05") if not isinstance(flash30, bool): - _write_echos(mri_dir, flash30, angle='30') + _write_echos(mri_dir, flash30, angle="30") # Step 1b : Run grad_unwarp on converted files template = op.join(flash_dir, "mef*_*.mgz") files = sorted(glob.glob(template)) if len(files) == 0: - raise ValueError('No suitable source files found (%s)' % template) + raise ValueError(f"No suitable source files found ({template})") if unwarp: logger.info("\n---- Unwarp mgz data sets ----") for infile in files: outfile = infile.replace(".mgz", "u.mgz") - cmd = ['grad_unwarp', '-i', infile, '-o', outfile, '-unwarp', - 'true'] + cmd = ["grad_unwarp", "-i", infile, "-o", outfile, "-unwarp", "true"] run_subprocess_env(cmd) # Clear parameter maps if some of the data were reconverted if echos_done > 0 and pm_dir.exists(): @@ -1969,20 +2022,24 @@ def convert_flash_mris(subject, flash30=True, unwarp=False, if unwarp: files = sorted(glob.glob(op.join(flash_dir, "mef05_*u.mgz"))) if len(os.listdir(pm_dir)) == 0: - cmd = (['mri_ms_fitparms'] + files + [str(pm_dir)]) + cmd = ["mri_ms_fitparms"] + files + [str(pm_dir)] run_subprocess_env(cmd) else: logger.info("Parameter maps were already computed") # Step 3 : Synthesize the flash 5 images logger.info("\n---- Synthesizing flash 5 images ----") - if not (pm_dir / 'flash5.mgz').exists(): - cmd = ['mri_synthesize', '20', '5', '5', - (pm_dir / 'T1.mgz'), - (pm_dir / 'PD.mgz'), - (pm_dir / 'flash5.mgz') - ] + if not (pm_dir / "flash5.mgz").exists(): + cmd = [ + "mri_synthesize", + "20", + "5", + "5", + (pm_dir / "T1.mgz"), + (pm_dir / "PD.mgz"), + (pm_dir / "flash5.mgz"), + ] run_subprocess_env(cmd) - (pm_dir / 'flash5_reg.mgz').unlink() + (pm_dir / "flash5_reg.mgz").unlink(missing_ok=True) else: logger.info("Synthesized flash 5 volume is already there") else: @@ -1990,24 +2047,34 @@ def convert_flash_mris(subject, flash30=True, unwarp=False, template = "mef05_*u.mgz" if unwarp else "mef05_*.mgz" files = sorted(flash_dir.glob(template)) if len(files) == 0: - raise ValueError('No suitable source files found (%s)' % template) - cmd = (['mri_average', '-noconform'] + files + [pm_dir / 'flash5.mgz']) + raise ValueError(f"No suitable source files found ({template})") + cmd = ["mri_average", "-noconform"] + files + [pm_dir / "flash5.mgz"] run_subprocess_env(cmd) - (pm_dir / 'flash5_reg.mgz').unlink(missing_ok=True) + (pm_dir / "flash5_reg.mgz").unlink(missing_ok=True) del tempdir # finally done running subprocesses - assert (pm_dir / 'flash5.mgz').exists() - return pm_dir / 'flash5.mgz' + assert (pm_dir / "flash5.mgz").exists() + return pm_dir / "flash5.mgz" @verbose -def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, - copy=True, *, flash5_img=None, register=True, verbose=None): +def make_flash_bem( + subject, + overwrite=False, + show=True, + subjects_dir=None, + copy=True, + *, + flash5_img=None, + register=True, + verbose=None, +): """Create 3-Layer BEM model from prepared flash MRI images. + See :ref:`bem_flash_algorithm` for additional information. + Parameters ---------- - subject : str - Subject name. + %(subject)s overwrite : bool Write over existing .surf files in bem folder. show : bool @@ -2046,50 +2113,53 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, outer skin) from a FLASH 5 MRI image synthesized from multiecho FLASH images acquired with spin angles of 5 and 30 degrees. """ - from .viz.misc import plot_bem - env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir) tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD - run_subprocess_env = partial(run_subprocess, env=env, - cwd=tempdir) + run_subprocess_env = partial(run_subprocess, env=env, cwd=tempdir) mri_dir = Path(mri_dir) bem_dir = Path(bem_dir) - subjects_dir = env['SUBJECTS_DIR'] - flash_path = (mri_dir / 'flash' / 'parameter_maps').resolve() + subjects_dir = env["SUBJECTS_DIR"] + flash_path = (mri_dir / "flash" / "parameter_maps").resolve() flash_path.mkdir(exist_ok=True, parents=True) - logger.info('\nProcessing the flash MRI data to produce BEM meshes with ' - 'the following parameters:\n' - 'SUBJECTS_DIR = %s\n' - 'SUBJECT = %s\n' - 'Result dir = %s\n' % (subjects_dir, subject, - bem_dir / 'flash')) + logger.info( + "\nProcessing the flash MRI data to produce BEM meshes with the following " + f"parameters:\nSUBJECTS_DIR = {subjects_dir}\nSUBJECT = {subject}\nResult dir =" + f"{bem_dir / 'flash'}\n" + ) # Step 4 : Register with MPRAGE - flash5 = flash_path / 'flash5.mgz' + flash5 = flash_path / "flash5.mgz" if _path_like(flash5_img): logger.info(f"Copying flash 5 image {flash5_img} to {flash5}") - cmd = ['mri_convert', Path(flash5_img).resolve(), flash5] + cmd = ["mri_convert", Path(flash5_img).resolve(), flash5] run_subprocess_env(cmd) elif flash5_img is None: if not flash5.exists(): - raise ValueError(f'Flash 5 image cannot be found at {flash5}.') + raise ValueError(f"Flash 5 image cannot be found at {flash5}.") else: logger.info(f"Writing flash 5 image at {flash5}") - nib = _import_nibabel('write an MRI image') + nib = _import_nibabel("write an MRI image") nib.save(flash5_img, flash5) if register: logger.info("\n---- Registering flash 5 with T1 MPRAGE ----") - flash5_reg = flash_path / 'flash5_reg.mgz' + flash5_reg = flash_path / "flash5_reg.mgz" if not flash5_reg.exists(): - if (mri_dir / 'T1.mgz').exists(): - ref_volume = mri_dir / 'T1.mgz' + if (mri_dir / "T1.mgz").exists(): + ref_volume = mri_dir / "T1.mgz" else: - ref_volume = mri_dir / 'T1' - cmd = ['fsl_rigid_register', '-r', str(ref_volume), '-i', - str(flash5), '-o', str(flash5_reg)] + ref_volume = mri_dir / "T1" + cmd = [ + "fsl_rigid_register", + "-r", + str(ref_volume), + "-i", + str(flash5), + "-o", + str(flash5_reg), + ] run_subprocess_env(cmd) else: logger.info("Registered flash 5 image is already there") @@ -2098,62 +2168,61 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, # Step 5a : Convert flash5 into COR logger.info("\n---- Converting flash5 volume into COR format ----") - flash5_dir = mri_dir / 'flash5' + flash5_dir = mri_dir / "flash5" shutil.rmtree(flash5_dir, ignore_errors=True) flash5_dir.mkdir(exist_ok=True, parents=True) - cmd = ['mri_convert', flash5_reg, flash5_dir] + cmd = ["mri_convert", flash5_reg, flash5_dir] run_subprocess_env(cmd) # Step 5b and c : Convert the mgz volumes into COR convert_T1 = False - T1_dir = mri_dir / 'T1' - if not T1_dir.is_dir() or next(T1_dir.glob('COR*')) is None: + T1_dir = mri_dir / "T1" + if not T1_dir.is_dir() or next(T1_dir.glob("COR*")) is None: convert_T1 = True convert_brain = False - brain_dir = mri_dir / 'brain' - if not brain_dir.is_dir() or next(brain_dir.glob('COR*')) is None: + brain_dir = mri_dir / "brain" + if not brain_dir.is_dir() or next(brain_dir.glob("COR*")) is None: convert_brain = True logger.info("\n---- Converting T1 volume into COR format ----") if convert_T1: - T1_fname = mri_dir / 'T1.mgz' + T1_fname = mri_dir / "T1.mgz" if not T1_fname.is_file(): raise RuntimeError("Both T1 mgz and T1 COR volumes missing.") T1_dir.mkdir(exist_ok=True, parents=True) - cmd = ['mri_convert', T1_fname, T1_dir] + cmd = ["mri_convert", T1_fname, T1_dir] run_subprocess_env(cmd) else: logger.info("T1 volume is already in COR format") logger.info("\n---- Converting brain volume into COR format ----") if convert_brain: - brain_fname = mri_dir / 'brain.mgz' + brain_fname = mri_dir / "brain.mgz" if not brain_fname.is_file(): raise RuntimeError("Both brain mgz and brain COR volumes missing.") brain_dir.mkdir(exist_ok=True, parents=True) - cmd = ['mri_convert', brain_fname, brain_dir] + cmd = ["mri_convert", brain_fname, brain_dir] run_subprocess_env(cmd) else: logger.info("Brain volume is already in COR format") # Finally ready to go logger.info("\n---- Creating the BEM surfaces ----") - cmd = ['mri_make_bem_surfaces', subject] + cmd = ["mri_make_bem_surfaces", subject] run_subprocess_env(cmd) del tempdir # ran our last subprocess; clean up directory logger.info("\n---- Converting the tri files into surf files ----") - flash_bem_dir = bem_dir / 'flash' + flash_bem_dir = bem_dir / "flash" flash_bem_dir.mkdir(exist_ok=True, parents=True) - surfs = ['inner_skull', 'outer_skull', 'outer_skin'] + surfs = ["inner_skull", "outer_skull", "outer_skin"] for surf in surfs: - out_fname = flash_bem_dir / (surf + '.tri') - shutil.move(bem_dir / (surf + '.tri'), out_fname) + out_fname = flash_bem_dir / (surf + ".tri") + shutil.move(bem_dir / (surf + ".tri"), out_fname) nodes, tris = read_tri(out_fname, swap=True) # Do not write volume info here because the tris are already in # standard Freesurfer coords - write_surface(op.splitext(out_fname)[0] + '.surf', nodes, tris, - overwrite=True) + write_surface(op.splitext(out_fname)[0] + ".surf", nodes, tris, overwrite=True) # Cleanup section logger.info("\n---- Cleaning up ----") - (bem_dir / 'inner_skull_tmp.tri').unlink() + (bem_dir / "inner_skull_tmp.tri").unlink() if convert_T1: shutil.rmtree(T1_dir) logger.info("Deleted the T1 COR volume") @@ -2166,7 +2235,7 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, logger.info("\n---- Creating symbolic links ----") # os.chdir(bem_dir) for surf in surfs: - surf = bem_dir / (surf + '.surf') + surf = bem_dir / (surf + ".surf") if not overwrite and surf.exists(): skip_symlink = True else: @@ -2175,28 +2244,37 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, _symlink(flash_bem_dir / surf.name, surf, copy) skip_symlink = False if skip_symlink: - logger.info("Unable to create all symbolic links to .surf files " - "in bem folder. Use --overwrite option to recreate them.") - dest = bem_dir / 'flash' + logger.info( + "Unable to create all symbolic links to .surf files " + "in bem folder. Use --overwrite option to recreate them." + ) + dest = bem_dir / "flash" else: logger.info("Symbolic links to .surf files created in bem folder") dest = bem_dir - logger.info("\nThank you for waiting.\nThe BEM triangulations for this " - "subject are now available at:\n%s.\nWe hope the BEM meshes " - "created will facilitate your MEG and EEG data analyses." - % dest) + logger.info( + "\nThank you for waiting.\nThe BEM triangulations for this " + f"subject are now available at:\n{dest}.\nWe hope the BEM meshes " + "created will facilitate your MEG and EEG data analyses." + ) # Show computed BEM surfaces if show: - plot_bem(subject=subject, subjects_dir=subjects_dir, - orientation='coronal', slices=None, show=True) + plot_bem( + subject=subject, + subjects_dir=subjects_dir, + orientation="coronal", + slices=None, + show=True, + ) def _check_bem_size(surfs): """Check bem surface sizes.""" - if len(surfs) > 1 and surfs[0]['np'] > 10000: - warn('The bem surfaces have %s data points. 5120 (ico grade=4) ' - 'should be enough. Dense 3-layer bems may not save properly.' % - surfs[0]['np']) + if len(surfs) > 1 and surfs[0]["np"] > 10000: + warn( + f"The bem surfaces have {surfs[0]['np']} data points. 5120 (ico grade=4) " + "should be enough. Dense 3-layer bems may not save properly." + ) def _symlink(src, dest, copy=False): @@ -2206,40 +2284,41 @@ def _symlink(src, dest, copy=False): try: os.symlink(src_link, dest) except OSError: - warn('Could not create symbolic link %s. Check that your ' - 'partition handles symbolic links. The file will be copied ' - 'instead.' % dest) + warn( + f"Could not create symbolic link {dest}. Check that your " + "partition handles symbolic links. The file will be copied " + "instead." + ) copy = True if copy: shutil.copy(src, dest) -def _ensure_bem_surfaces(bem, extra_allow=(), name='bem'): +def _ensure_bem_surfaces(bem, extra_allow=(), name="bem"): # by default only allow path-like and list, but handle None and # ConductorModel properly if need be. Always return a ConductorModel # even though it's incomplete (and might have is_sphere=True). assert all(extra in (None, ConductorModel) for extra in extra_allow) - allowed = ('path-like', list) + extra_allow + allowed = ("path-like", list) + extra_allow _validate_type(bem, allowed, name) if isinstance(bem, path_like): # Load the surfaces - logger.info(f'Loading BEM surfaces from {str(bem)}...') + logger.info(f"Loading BEM surfaces from {bem}...") bem = read_bem_surfaces(bem) bem = ConductorModel(is_sphere=False, surfs=bem) elif isinstance(bem, list): for ii, this_surf in enumerate(bem): - _validate_type(this_surf, dict, f'{name}[{ii}]') + _validate_type(this_surf, dict, f"{name}[{ii}]") if isinstance(bem, list): bem = ConductorModel(is_sphere=False, surfs=bem) # add surfaces in the spherical case - if isinstance(bem, ConductorModel) and bem['is_sphere']: + if isinstance(bem, ConductorModel) and bem["is_sphere"]: bem = bem.copy() - bem['surfs'] = [] - if len(bem['layers']) == 4: + bem["surfs"] = [] + if len(bem["layers"]) == 4: for idx, id_ in enumerate(_sm_surf_dict.values()): - bem['surfs'].append(_complete_sphere_surf( - bem, idx, 4, complete=False)) - bem['surfs'][-1]['id'] = id_ + bem["surfs"].append(_complete_sphere_surf(bem, idx, 4, complete=False)) + bem["surfs"][-1]["id"] = id_ return bem @@ -2247,7 +2326,7 @@ def _ensure_bem_surfaces(bem, extra_allow=(), name='bem'): def _check_file(fname, overwrite): """Prevent overwrites.""" if op.isfile(fname) and not overwrite: - raise IOError(f'File {fname} exists, use --overwrite to overwrite it') + raise OSError(f"File {fname} exists, use --overwrite to overwrite it") _tri_levels = dict( @@ -2257,9 +2336,17 @@ def _check_file(fname, overwrite): @verbose -def make_scalp_surfaces(subject, subjects_dir=None, force=True, - overwrite=False, no_decimate=False, *, - threshold=20, mri='T1.mgz', verbose=None): +def make_scalp_surfaces( + subject, + subjects_dir=None, + force=True, + overwrite=False, + no_decimate=False, + *, + threshold=20, + mri="T1.mgz", + verbose=None, +): """Create surfaces of the scalp and neck. The scalp surfaces are required for using the MNE coregistration GUI, and @@ -2291,22 +2378,23 @@ def make_scalp_surfaces(subject, subjects_dir=None, force=True, %(verbose)s """ subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - incomplete = 'warn' if force else 'raise' + incomplete = "warn" if force else "raise" subj_path = subjects_dir / subject if not subj_path.exists(): - raise RuntimeError('%s does not exist. Please check your subject ' - 'directory path.' % subj_path) + raise RuntimeError( + f"{subj_path} does not exist. Please check your subject directory path." + ) # Backward compat for old FreeSurfer (?) - _validate_type(mri, str, 'mri') - if mri == 'T1.mgz': + _validate_type(mri, str, "mri") + if mri == "T1.mgz": mri = mri if (subj_path / "mri" / mri).exists() else "T1" - logger.info('1. Creating a dense scalp tessellation with mkheadsurf...') + logger.info("1. Creating a dense scalp tessellation with mkheadsurf...") def check_seghead(surf_path=subj_path / "surf"): surf = None - for k in ['lh.seghead', 'lh.smseghead']: + for k in ["lh.seghead", "lh.smseghead"]: this_surf = surf_path / k if this_surf.exists(): surf = this_surf @@ -2314,61 +2402,79 @@ def check_seghead(surf_path=subj_path / "surf"): return surf my_seghead = check_seghead() - threshold = _ensure_int(threshold, 'threshold') + threshold = _ensure_int(threshold, "threshold") if my_seghead is None: this_env = deepcopy(os.environ) - this_env['SUBJECTS_DIR'] = str(subjects_dir) - this_env['SUBJECT'] = subject - this_env['subjdir'] = str(subj_path) - if 'FREESURFER_HOME' not in this_env: + this_env["SUBJECTS_DIR"] = str(subjects_dir) + this_env["SUBJECT"] = subject + this_env["subjdir"] = str(subj_path) + if "FREESURFER_HOME" not in this_env: raise RuntimeError( - 'The FreeSurfer environment needs to be set up to use ' - 'make_scalp_surfaces to create the outer skin surface ' - 'lh.seghead') - run_subprocess([ - 'mkheadsurf', '-subjid', subject, '-srcvol', mri, - '-thresh1', str(threshold), - '-thresh2', str(threshold)], env=this_env) + "The FreeSurfer environment needs to be set up to use " + "make_scalp_surfaces to create the outer skin surface " + "lh.seghead" + ) + run_subprocess( + [ + "mkheadsurf", + "-subjid", + subject, + "-srcvol", + mri, + "-thresh1", + str(threshold), + "-thresh2", + str(threshold), + ], + env=this_env, + ) surf = check_seghead() if surf is None: - raise RuntimeError('mkheadsurf did not produce the standard output ' - 'file.') + raise RuntimeError("mkheadsurf did not produce the standard output file.") bem_dir = subjects_dir / subject / "bem" if not bem_dir.is_dir(): os.mkdir(bem_dir) - fname_template = bem_dir / ("%s-head-{}.fif" % subject) - dense_fname = str(fname_template).format('dense') - logger.info('2. Creating %s ...' % dense_fname) + fname_template = bem_dir / (f"{subject}-head-{{}}.fif") + dense_fname = str(fname_template).format("dense") + logger.info(f"2. Creating {dense_fname} ...") _check_file(dense_fname, overwrite) # Helpful message if we get a topology error - msg = ('\n\nConsider using pymeshfix directly to fix the mesh, or --force ' - 'to ignore the problem.') + msg = ( + "\n\nConsider using pymeshfix directly to fix the mesh, or --force " + "to ignore the problem." + ) surf = _surfaces_to_bem( - [surf], [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], - incomplete=incomplete, extra=msg)[0] + [surf], [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], incomplete=incomplete, extra=msg + )[0] write_bem_surfaces(dense_fname, surf, overwrite=overwrite) - if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true': - tris = [len(surf['tris'])] # don't actually decimate + if os.getenv("_MNE_TESTING_SCALP", "false") == "true": + tris = [len(surf["tris"])] # don't actually decimate for ii, (level, n_tri) in enumerate(_tri_levels.items(), 3): if no_decimate: break - logger.info(f'{ii}. Creating {level} tessellation...') - logger.info(f'{ii}.1 Decimating the dense tessellation ' - f'({len(surf["tris"])} -> {n_tri} triangles)...') - points, tris = decimate_surface(points=surf['rr'], - triangles=surf['tris'], - n_triangles=n_tri) + logger.info(f"{ii}. Creating {level} tessellation...") + logger.info( + f"{ii}.1 Decimating the dense tessellation " + f"({len(surf['tris'])} -> {n_tri} triangles)..." + ) + points, tris = decimate_surface( + points=surf["rr"], triangles=surf["tris"], n_triangles=n_tri + ) dec_fname = str(fname_template).format(level) - logger.info('%i.2 Creating %s' % (ii, dec_fname)) + logger.info(f"{ii}.2 Creating {dec_fname}") _check_file(dec_fname, overwrite) dec_surf = _surfaces_to_bem( [dict(rr=points, tris=tris)], - [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False, - incomplete=incomplete, extra=msg) + [FIFF.FIFFV_BEM_SURF_ID_HEAD], + [1], + rescale=False, + incomplete=incomplete, + extra=msg, + ) write_bem_surfaces(dec_fname, dec_surf, overwrite=overwrite) - logger.info('[done]') + logger.info("[done]") @verbose @@ -2405,28 +2511,23 @@ def distance_to_bem(pos, bem, trans=None, verbose=None): distance = np.zeros((n,)) logger.info( - 'Computing distance to inner skull surface for ' + - f'{n} position{_pl(n)}...' + "Computing distance to inner skull surface for " + f"{n} position{_pl(n)}..." ) - if bem['is_sphere']: - center = bem['r0'] + if bem["is_sphere"]: + center = bem["r0"] if trans: center = apply_trans(trans, center, move=True) - radius = bem['layers'][0]['rad'] + radius = bem["layers"][0]["rad"] - distance = np.abs(radius - np.linalg.norm( - pos - center, axis=1 - )) + distance = np.abs(radius - np.linalg.norm(pos - center, axis=1)) else: # is BEM - surface_points = bem['surfs'][0]['rr'] + surface_points = bem["surfs"][0]["rr"] if trans: - surface_points = apply_trans( - trans, surface_points, move=True - ) + surface_points = apply_trans(trans, surface_points, move=True) _, distance = _compute_nearest(surface_points, pos, return_dists=True) diff --git a/mne/channels/__init__.py b/mne/channels/__init__.py index c5701c7b2b1..901a237da24 100644 --- a/mne/channels/__init__.py +++ b/mne/channels/__init__.py @@ -3,42 +3,10 @@ Can be used for setting of sensor locations used for processing and plotting. """ -from ..defaults import HEAD_SIZE_DEFAULT -from .layout import (Layout, make_eeg_layout, make_grid_layout, read_layout, - find_layout, generate_2d_layout) -from .montage import (DigMontage, - get_builtin_montages, make_dig_montage, read_dig_dat, - read_dig_egi, read_dig_captrak, read_dig_fif, - read_dig_polhemus_isotrak, read_polhemus_fastscan, - compute_dev_head_t, make_standard_montage, - read_custom_montage, read_dig_hpts, read_dig_localite, - compute_native_head_t) -from .channels import (equalize_channels, rename_channels, fix_mag_coil_types, - read_ch_adjacency, _get_ch_type, find_ch_adjacency, - make_1020_channel_selections, combine_channels, - read_vectorview_selection, _SELECTIONS, _EEG_SELECTIONS, - _divide_to_regions, get_builtin_ch_adjacencies) +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -__all__ = [ - # Data Structures - 'DigMontage', 'Layout', +import lazy_loader as lazy - # Factory Methods - 'make_dig_montage', 'make_eeg_layout', 'make_grid_layout', - 'make_standard_montage', - - # Readers - 'read_ch_adjacency', 'read_dig_captrak', 'read_dig_dat', - 'read_dig_egi', 'read_dig_fif', 'read_dig_localite', - 'read_dig_polhemus_isotrak', 'read_layout', - 'read_polhemus_fastscan', 'read_custom_montage', 'read_dig_hpts', - - # Helpers - 'rename_channels', 'make_1020_channel_selections', - '_get_ch_type', 'equalize_channels', 'find_ch_adjacency', 'find_layout', - 'fix_mag_coil_types', 'generate_2d_layout', 'get_builtin_montages', - 'combine_channels', 'read_vectorview_selection', - - # Other - 'compute_dev_head_t', 'compute_native_head_t', -] +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/channels/__init__.pyi b/mne/channels/__init__.pyi new file mode 100644 index 00000000000..470bc0cfd0b --- /dev/null +++ b/mne/channels/__init__.pyi @@ -0,0 +1,80 @@ +__all__ = [ + "DigMontage", + "Layout", + "_EEG_SELECTIONS", + "_SELECTIONS", + "_divide_to_regions", + "combine_channels", + "compute_dev_head_t", + "compute_native_head_t", + "equalize_channels", + "find_ch_adjacency", + "find_layout", + "fix_mag_coil_types", + "generate_2d_layout", + "get_builtin_ch_adjacencies", + "get_builtin_montages", + "make_1020_channel_selections", + "make_dig_montage", + "make_eeg_layout", + "make_grid_layout", + "make_standard_montage", + "read_ch_adjacency", + "read_custom_montage", + "read_dig_captrak", + "read_dig_curry", + "read_dig_dat", + "read_dig_egi", + "read_dig_fif", + "read_dig_hpts", + "read_dig_localite", + "read_dig_polhemus_isotrak", + "read_layout", + "read_polhemus_fastscan", + "read_vectorview_selection", + "rename_channels", + "transform_to_head", + "unify_bad_channels", +] +from .channels import ( + _EEG_SELECTIONS, + _SELECTIONS, + _divide_to_regions, + combine_channels, + equalize_channels, + find_ch_adjacency, + fix_mag_coil_types, + get_builtin_ch_adjacencies, + make_1020_channel_selections, + read_ch_adjacency, + read_vectorview_selection, + rename_channels, + unify_bad_channels, +) +from .layout import ( + Layout, + find_layout, + generate_2d_layout, + make_eeg_layout, + make_grid_layout, + read_layout, +) +from .montage import ( + DigMontage, + compute_dev_head_t, + compute_native_head_t, + get_builtin_montages, + make_dig_montage, + make_standard_montage, + read_custom_montage, + read_dig_captrak, + read_dig_curry, + read_dig_dat, + read_dig_egi, + read_dig_fif, + read_dig_hpts, + read_dig_localite, + read_dig_polhemus_isotrak, + read_polhemus_fastscan, + transform_to_head, +) diff --git a/mne/channels/_dig_montage_utils.py b/mne/channels/_dig_montage_utils.py index a60418e84d4..31bee83648d 100644 --- a/mne/channels/_dig_montage_utils.py +++ b/mne/channels/_dig_montage_utils.py @@ -1,90 +1,85 @@ -# Authors: Alexandre Gramfort -# Denis Engemann -# Martin Luessi -# Eric Larson -# Marijn van Vliet -# Jona Sassenhagen -# Teon Brooks -# Christian Brodbeck -# Stefan Appelhoff -# Joan Massich -# -# License: Simplified BSD - -import xml.etree.ElementTree as ElementTree +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np +import re +import numpy as np -from ..utils import _check_fname, Bunch, warn +from ..utils import Bunch, _check_fname, _soft_import, warn def _read_dig_montage_egi( - fname, - _scaling, - _all_data_kwargs_are_none, + fname, + _scaling, + _all_data_kwargs_are_none, ): - if not _all_data_kwargs_are_none: - raise ValueError('hsp, hpi, elp, point_names, fif must all be ' - 'None if egi is not None') - _check_fname(fname, overwrite='read', must_exist=True) - - root = ElementTree.parse(fname).getroot() - ns = root.tag[root.tag.index('{'):root.tag.index('}') + 1] - sensors = root.find('%ssensorLayout/%ssensors' % (ns, ns)) + raise ValueError( + "hsp, hpi, elp, point_names, fif must all be None if egi is not None" + ) + _check_fname(fname, overwrite="read", must_exist=True) + defusedxml = _soft_import("defusedxml", "reading EGI montages") + root = defusedxml.ElementTree.parse(fname).getroot() + ns = root.tag[root.tag.index("{") : root.tag.index("}") + 1] + sensors = root.find(f"{ns}sensorLayout/{ns}sensors") fids = dict() dig_ch_pos = dict() - fid_name_map = {'Nasion': 'nasion', - 'Right periauricular point': 'rpa', - 'Left periauricular point': 'lpa'} + fid_name_map = { + "Nasion": "nasion", + "Right periauricular point": "rpa", + "Left periauricular point": "lpa", + } for s in sensors: name, number, kind = s[0].text, int(s[1].text), int(s[2].text) - coordinates = np.array([float(s[3].text), float(s[4].text), - float(s[5].text)]) + coordinates = np.array([float(s[3].text), float(s[4].text), float(s[5].text)]) coordinates *= _scaling # EEG Channels if kind == 0: - dig_ch_pos['EEG %03d' % number] = coordinates + dig_ch_pos[f"EEG {number:03d}"] = coordinates # Reference elif kind == 1: - dig_ch_pos['EEG %03d' % - (len(dig_ch_pos.keys()) + 1)] = coordinates + dig_ch_pos[f"EEG {len(dig_ch_pos) + 1:03d}"] = coordinates # Fiducials elif kind == 2: fid_name = fid_name_map[name] fids[fid_name] = coordinates # Unknown else: - warn('Unknown sensor type %s detected. Skipping sensor...' - 'Proceed with caution!' % kind) + warn( + f"Unknown sensor type {kind} detected. Skipping sensor..." + "Proceed with caution!" + ) return Bunch( # EGI stuff - nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'], - ch_pos=dig_ch_pos, coord_frame='unknown', + nasion=fids["nasion"], + lpa=fids["lpa"], + rpa=fids["rpa"], + ch_pos=dig_ch_pos, + coord_frame="unknown", ) def _parse_brainvision_dig_montage(fname, scale): - FID_NAME_MAP = {'Nasion': 'nasion', 'RPA': 'rpa', 'LPA': 'lpa'} - - root = ElementTree.parse(fname).getroot() - sensors = root.find('CapTrakElectrodeList') + FID_NAME_MAP = {"Nasion": "nasion", "RPA": "rpa", "LPA": "lpa"} + defusedxml = _soft_import("defusedxml", "reading BrainVision montages") + root = defusedxml.ElementTree.parse(fname).getroot() + sensors = root.find("CapTrakElectrodeList") fids, dig_ch_pos = dict(), dict() for s in sensors: - name = s.find('Name').text + name = s.find("Name").text is_fid = name in FID_NAME_MAP - coordinates = scale * np.array([float(s.find('X').text), - float(s.find('Y').text), - float(s.find('Z').text)]) + coordinates = scale * np.array( + [float(s.find("X").text), float(s.find("Y").text), float(s.find("Z").text)] + ) # Fiducials if is_fid: @@ -95,6 +90,52 @@ def _parse_brainvision_dig_montage(fname, scale): return dict( # BVCT stuff - nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'], - ch_pos=dig_ch_pos, coord_frame='unknown' + nasion=fids["nasion"], + lpa=fids["lpa"], + rpa=fids["rpa"], + ch_pos=dig_ch_pos, + coord_frame="unknown", ) + + +def _read_dig_montage_curry(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): + # scale ch_pos to m?! + ch_pos /= 1000.0 + landmarks /= 1000.0 + # channel locations + # what about misc without pos? can they mess things up if unordered? + assert len(ch_pos) >= (ch_types.count("mag") + ch_types.count("eeg")) + assert len(ch_pos) == (ch_types.count("mag") + ch_types.count("eeg")) + ch_pos_eeg = { + ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "eeg" + } + # landmarks and headshape + landmark_dict = dict(zip(landmarkslabels, landmarks)) + for k in ["Nas", "RPA", "LPA"]: + if k not in landmark_dict.keys(): + landmark_dict[k] = None + if len(landmarkslabels) > 0: + hpi_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("HPI[1-99]", n)], : + ] + else: + hpi_pos = None + if len(landmarkslabels) > 0: + hsp_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("H[1-99]", n)], : + ] + else: + hsp_pos = None + # compile dig montage positions for eeg + if len(ch_pos_eeg) > 0: + return dict( + ch_pos=ch_pos_eeg, + nasion=landmark_dict["Nas"], + lpa=landmark_dict["LPA"], + rpa=landmark_dict["RPA"], + hsp=hsp_pos, + hpi=hpi_pos, + coord_frame="unknown", + ) + else: # not recorded? + raise ValueError("No eeg sensor locations found in header file.") diff --git a/mne/channels/_standard_montage_utils.py b/mne/channels/_standard_montage_utils.py index b83252c0dc1..eb3dc10d10e 100644 --- a/mne/channels/_standard_montage_utils.py +++ b/mne/channels/_standard_montage_utils.py @@ -1,44 +1,46 @@ -# Authors: Joan Massich -# Alexandre Gramfort -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause -from collections import OrderedDict -import csv +# Copyright the MNE-Python contributors. +import csv import os.path as op -import numpy as np - +from collections import OrderedDict from functools import partial -import xml.etree.ElementTree as ElementTree -from .montage import make_dig_montage +import numpy as np + from .._freesurfer import get_mni_fiducials from ..transforms import _sph_to_cart -from ..utils import warn, _pl +from ..utils import _pl, _soft_import, warn from . import __file__ as _CHANNELS_INIT_FILE +from .montage import make_dig_montage -MONTAGE_PATH = op.join(op.dirname(_CHANNELS_INIT_FILE), 'data', 'montages') +MONTAGE_PATH = op.join(op.dirname(_CHANNELS_INIT_FILE), "data", "montages") -_str = 'U100' +_str = "U100" # In standard_1020, T9=LPA, T10=RPA, Nasion is the same as Iz with a # sign-flipped Y value + def _egi_256(head_size): - fname = op.join(MONTAGE_PATH, 'EGI_256.csd') + fname = op.join(MONTAGE_PATH, "EGI_256.csd") montage = _read_csd(fname, head_size) ch_pos = montage._get_ch_pos() # For this cap, the Nasion is the frontmost electrode, # LPA/RPA we approximate by putting 75% of the way (toward the front) # between the two electrodes that are halfway down the ear holes - nasion = ch_pos['E31'] - lpa = 0.75 * ch_pos['E67'] + 0.25 * ch_pos['E94'] - rpa = 0.75 * ch_pos['E219'] + 0.25 * ch_pos['E190'] + nasion = ch_pos["E31"] + lpa = 0.75 * ch_pos["E67"] + 0.25 * ch_pos["E94"] + rpa = 0.75 * ch_pos["E219"] + 0.25 * ch_pos["E190"] fids_montage = make_dig_montage( - coord_frame='unknown', nasion=nasion, lpa=lpa, rpa=rpa, + coord_frame="unknown", + nasion=nasion, + lpa=lpa, + rpa=rpa, ) montage += fids_montage # add fiducials to montage @@ -63,119 +65,117 @@ def _str_names(ch_names): def _safe_np_loadtxt(fname, **kwargs): out = np.genfromtxt(fname, **kwargs) - ch_names = _str_names(out['f0']) - others = tuple(out['f%d' % ii] for ii in range(1, len(out.dtype.fields))) + ch_names = _str_names(out["f0"]) + others = tuple(out[f"f{ii}"] for ii in range(1, len(out.dtype.fields))) return (ch_names,) + others def _biosemi(basename, head_size): fname = op.join(MONTAGE_PATH, basename) - fid_names = ('Nz', 'LPA', 'RPA') + fid_names = ("Nz", "LPA", "RPA") return _read_theta_phi_in_degrees(fname, head_size, fid_names) -def _mgh_or_standard(basename, head_size, coord_frame='unknown'): - fid_names = ('Nz', 'LPA', 'RPA') +def _mgh_or_standard(basename, head_size, coord_frame="unknown"): + fid_names = ("Nz", "LPA", "RPA") fname = op.join(MONTAGE_PATH, basename) ch_names_, pos = [], [] with open(fname) as fid: # Ignore units as we will scale later using the norms anyway for line in fid: - if 'Positions\n' in line: + if "Positions\n" in line: break pos = [] for line in fid: - if 'Labels\n' in line: + if "Labels\n" in line: break pos.append(list(map(float, line.split()))) for line in fid: - if not line or not set(line) - {' '}: + if not line or not set(line) - {" "}: break - ch_names_.append(line.strip(' ').strip('\n')) + ch_names_.append(line.strip(" ").strip("\n")) - pos = np.array(pos) / 1000. + pos = np.array(pos) / 1000.0 ch_pos = _check_dupes_odict(ch_names_, pos) - nasion, lpa, rpa = [ch_pos.pop(n) for n in fid_names] + nasion, lpa, rpa = (ch_pos.pop(n) for n in fid_names) if head_size is None: - scale = 1. + scale = 1.0 else: scale = head_size / np.median(np.linalg.norm(pos, axis=1)) for value in ch_pos.values(): value *= scale # if we are in MRI/MNI coordinates, we need to replace nasion, LPA, and RPA # with those of fsaverage for ``trans='fsaverage'`` to work - if coord_frame == 'mri': - lpa, nasion, rpa = [ - x['r'].copy() for x in get_mni_fiducials('fsaverage')] + if coord_frame == "mri": + lpa, nasion, rpa = (x["r"].copy() for x in get_mni_fiducials("fsaverage")) nasion *= scale lpa *= scale rpa *= scale - return make_dig_montage(ch_pos=ch_pos, coord_frame=coord_frame, - nasion=nasion, lpa=lpa, rpa=rpa) + return make_dig_montage( + ch_pos=ch_pos, coord_frame=coord_frame, nasion=nasion, lpa=lpa, rpa=rpa + ) standard_montage_look_up_table = { - 'EGI_256': _egi_256, - - 'easycap-M1': partial(_easycap, basename='easycap-M1.txt'), - 'easycap-M10': partial(_easycap, basename='easycap-M10.txt'), - - 'GSN-HydroCel-128': partial(_hydrocel, basename='GSN-HydroCel-128.sfp'), - 'GSN-HydroCel-129': partial(_hydrocel, basename='GSN-HydroCel-129.sfp'), - 'GSN-HydroCel-256': partial(_hydrocel, basename='GSN-HydroCel-256.sfp'), - 'GSN-HydroCel-257': partial(_hydrocel, basename='GSN-HydroCel-257.sfp'), - 'GSN-HydroCel-32': partial(_hydrocel, basename='GSN-HydroCel-32.sfp'), - 'GSN-HydroCel-64_1.0': partial(_hydrocel, - basename='GSN-HydroCel-64_1.0.sfp'), - 'GSN-HydroCel-65_1.0': partial(_hydrocel, - basename='GSN-HydroCel-65_1.0.sfp'), - - 'biosemi128': partial(_biosemi, basename='biosemi128.txt'), - 'biosemi16': partial(_biosemi, basename='biosemi16.txt'), - 'biosemi160': partial(_biosemi, basename='biosemi160.txt'), - 'biosemi256': partial(_biosemi, basename='biosemi256.txt'), - 'biosemi32': partial(_biosemi, basename='biosemi32.txt'), - 'biosemi64': partial(_biosemi, basename='biosemi64.txt'), - - 'mgh60': partial(_mgh_or_standard, basename='mgh60.elc', - coord_frame='mri'), - 'mgh70': partial(_mgh_or_standard, basename='mgh70.elc', - coord_frame='mri'), - 'standard_1005': partial(_mgh_or_standard, - basename='standard_1005.elc', coord_frame='mri'), - 'standard_1020': partial(_mgh_or_standard, - basename='standard_1020.elc', coord_frame='mri'), - 'standard_alphabetic': partial(_mgh_or_standard, - basename='standard_alphabetic.elc', - coord_frame='mri'), - 'standard_postfixed': partial(_mgh_or_standard, - basename='standard_postfixed.elc', - coord_frame='mri'), - 'standard_prefixed': partial(_mgh_or_standard, - basename='standard_prefixed.elc', - coord_frame='mri'), - 'standard_primed': partial(_mgh_or_standard, - basename='standard_primed.elc', - coord_frame='mri'), - 'artinis-octamon': partial(_mgh_or_standard, coord_frame='mri', - basename='artinis-octamon.elc'), - 'artinis-brite23': partial(_mgh_or_standard, coord_frame='mri', - basename='artinis-brite23.elc'), - 'brainproducts-RNP-BA-128': partial( - _easycap, basename='brainproducts-RNP-BA-128.txt') + "EGI_256": _egi_256, + "easycap-M1": partial(_easycap, basename="easycap-M1.txt"), + "easycap-M10": partial(_easycap, basename="easycap-M10.txt"), + "easycap-M43": partial(_easycap, basename="easycap-M43.txt"), + "GSN-HydroCel-128": partial(_hydrocel, basename="GSN-HydroCel-128.sfp"), + "GSN-HydroCel-129": partial(_hydrocel, basename="GSN-HydroCel-129.sfp"), + "GSN-HydroCel-256": partial(_hydrocel, basename="GSN-HydroCel-256.sfp"), + "GSN-HydroCel-257": partial(_hydrocel, basename="GSN-HydroCel-257.sfp"), + "GSN-HydroCel-32": partial(_hydrocel, basename="GSN-HydroCel-32.sfp"), + "GSN-HydroCel-64_1.0": partial(_hydrocel, basename="GSN-HydroCel-64_1.0.sfp"), + "GSN-HydroCel-65_1.0": partial(_hydrocel, basename="GSN-HydroCel-65_1.0.sfp"), + "biosemi128": partial(_biosemi, basename="biosemi128.txt"), + "biosemi16": partial(_biosemi, basename="biosemi16.txt"), + "biosemi160": partial(_biosemi, basename="biosemi160.txt"), + "biosemi256": partial(_biosemi, basename="biosemi256.txt"), + "biosemi32": partial(_biosemi, basename="biosemi32.txt"), + "biosemi64": partial(_biosemi, basename="biosemi64.txt"), + "mgh60": partial(_mgh_or_standard, basename="mgh60.elc", coord_frame="mri"), + "mgh70": partial(_mgh_or_standard, basename="mgh70.elc", coord_frame="mri"), + "standard_1005": partial( + _mgh_or_standard, basename="standard_1005.elc", coord_frame="mri" + ), + "standard_1020": partial( + _mgh_or_standard, basename="standard_1020.elc", coord_frame="mri" + ), + "standard_alphabetic": partial( + _mgh_or_standard, basename="standard_alphabetic.elc", coord_frame="mri" + ), + "standard_postfixed": partial( + _mgh_or_standard, basename="standard_postfixed.elc", coord_frame="mri" + ), + "standard_prefixed": partial( + _mgh_or_standard, basename="standard_prefixed.elc", coord_frame="mri" + ), + "standard_primed": partial( + _mgh_or_standard, basename="standard_primed.elc", coord_frame="mri" + ), + "artinis-octamon": partial( + _mgh_or_standard, coord_frame="mri", basename="artinis-octamon.elc" + ), + "artinis-brite23": partial( + _mgh_or_standard, coord_frame="mri", basename="artinis-brite23.elc" + ), + "brainproducts-RNP-BA-128": partial( + _easycap, basename="brainproducts-RNP-BA-128.txt" + ), } def _read_sfp(fname, head_size): """Read .sfp BESA/EGI files.""" # fname has been already checked - fid_names = ('FidNz', 'FidT9', 'FidT10') - options = dict(dtype=(_str, 'f4', 'f4', 'f4')) + fid_names = ("FidNz", "FidT9", "FidT10") + options = dict(dtype=(_str, "f4", "f4", "f4")) ch_names, xs, ys, zs = _safe_np_loadtxt(fname, **options) # deal with "headshape" - mask = np.array([ch_name == 'headshape' for ch_name in ch_names], bool) + mask = np.array([ch_name == "headshape" for ch_name in ch_names], bool) hsp = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1) mask = ~mask pos = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1) @@ -183,7 +183,7 @@ def _read_sfp(fname, head_size): ch_pos = _check_dupes_odict(ch_names, pos) del xs, ys, zs, ch_names # no one grants that fid names are there. - nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) if head_size is not None: scale = head_size / np.median(np.linalg.norm(pos, axis=-1)) @@ -193,14 +193,16 @@ def _read_sfp(fname, head_size): lpa = lpa * scale if lpa is not None else None rpa = rpa * scale if rpa is not None else None - return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown', - nasion=nasion, rpa=rpa, lpa=lpa, hsp=hsp) + return make_dig_montage( + ch_pos=ch_pos, coord_frame="unknown", nasion=nasion, rpa=rpa, lpa=lpa, hsp=hsp + ) def _read_csd(fname, head_size): # Label, Theta, Phi, Radius, X, Y, Z, off sphere surface - options = dict(comments='//', - dtype=(_str, 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')) + options = dict( + comments="//", dtype=(_str, "f4", "f4", "f4", "f4", "f4", "f4", "f4") + ) ch_names, _, _, _, xs, ys, zs, _ = _safe_np_loadtxt(fname, **options) pos = np.stack([xs, ys, zs], axis=-1) @@ -213,22 +215,26 @@ def _read_csd(fname, head_size): def _check_dupes_odict(ch_names, pos): """Warn if there are duplicates, then turn to ordered dict.""" ch_names = list(ch_names) - dups = OrderedDict((ch_name, ch_names.count(ch_name)) - for ch_name in ch_names) - dups = OrderedDict((ch_name, count) for ch_name, count in dups.items() - if count > 1) + dups = OrderedDict((ch_name, ch_names.count(ch_name)) for ch_name in ch_names) + dups = OrderedDict((ch_name, count) for ch_name, count in dups.items() if count > 1) n = len(dups) if n: - dups = ', '.join( - f'{ch_name} ({count})' for ch_name, count in dups.items()) - warn(f'Duplicate channel position{_pl(n)} found, the last will be ' - f'used for {dups}') + dups = ", ".join(f"{ch_name} ({count})" for ch_name, count in dups.items()) + warn( + f"Duplicate channel position{_pl(n)} found, the last will be " + f"used for {dups}" + ) return OrderedDict(zip(ch_names, pos)) def _read_elc(fname, head_size): """Read .elc files. + The `.elc` files are so-called "asa electrode files". ASA here stands for + Advances Source Analysis, and is a software package developed and sold by + the ANT Neuro company. They provide a device for sensor digitization, called + 'xensor', which produces the `.elc` files. + Parameters ---------- fname : str @@ -240,48 +246,67 @@ def _read_elc(fname, head_size): Returns ------- montage : instance of DigMontage - The montage in [m]. + The montage units are [m]. """ - fid_names = ('Nz', 'LPA', 'RPA') + fid_names = ("Nz", "LPA", "RPA") - ch_names_, pos = [], [] with open(fname) as fid: + # Read units # _read_elc does require to detect the units. (see _mgh_or_standard) for line in fid: - if 'UnitPosition' in line: + if "UnitPosition" in line: units = line.split()[1] - scale = dict(m=1., mm=1e-3)[units] + scale = dict(m=1.0, mm=1e-3)[units] break else: - raise RuntimeError('Could not detect units in file %s' % fname) + raise RuntimeError(f"Could not detect units in file {fname}") for line in fid: - if 'Positions\n' in line: + if "Positions\n" in line: break + + # Read positions + new_style = False pos = [] for line in fid: - if 'Labels\n' in line: + if "Labels\n" in line: break - pos.append(list(map(float, line.split()))) + if ":" in line: + # Of the 'new' format: `E01 : 5.288 -3.658 119.693` + pos.append(list(map(float, line.split(":")[1].split()))) + new_style = True + else: + # Of the 'old' format: `5.288 -3.658 119.693` + pos.append(list(map(float, line.split()))) + + # Read labels + ch_names_ = [] for line in fid: - if not line or not set(line) - {' '}: + if not line or not set(line) - {" "}: break - ch_names_.append(line.strip(' ').strip('\n')) + if new_style: + # Not sure how this format would deal with spaces in channel labels, + # but none of my test files had this, so let's wait until it comes up. + parsed = line.strip(" ").strip("\n").split() + else: + parsed = [line.strip(" ").strip("\n")] + ch_names_.extend(parsed) pos = np.array(pos) * scale if head_size is not None: pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) ch_pos = _check_dupes_odict(ch_names_, pos) - nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) - return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown', - nasion=nasion, lpa=lpa, rpa=rpa) + return make_dig_montage( + ch_pos=ch_pos, coord_frame="unknown", nasion=nasion, lpa=lpa, rpa=rpa + ) -def _read_theta_phi_in_degrees(fname, head_size, fid_names=None, - add_fiducials=False): - ch_names, theta, phi = _safe_np_loadtxt(fname, skip_header=1, - dtype=(_str, 'i4', 'i4')) +def _read_theta_phi_in_degrees(fname, head_size, fid_names=None, add_fiducials=False): + ch_names, theta, phi = _safe_np_loadtxt( + fname, skip_header=1, dtype=(_str, "i4", "i4") + ) if add_fiducials: # Add fiducials based on 10/20 spherical coordinate definitions # http://chgd.umich.edu/wp-content/uploads/2014/06/ @@ -290,7 +315,7 @@ def _read_theta_phi_in_degrees(fname, head_size, fid_names=None, # https://www.easycap.de/wp-content/uploads/2018/02/ # Easycap-Equidistant-Layouts.pdf assert fid_names is None - fid_names = ['Nasion', 'LPA', 'RPA'] + fid_names = ["Nasion", "LPA", "RPA"] ch_names.extend(fid_names) theta = np.append(theta, [115, -115, 115]) phi = np.append(phi, [90, 0, 0]) @@ -301,25 +326,25 @@ def _read_theta_phi_in_degrees(fname, head_size, fid_names=None, nasion, lpa, rpa = None, None, None if fid_names is not None: - nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) - return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown', - nasion=nasion, lpa=lpa, rpa=rpa) + return make_dig_montage( + ch_pos=ch_pos, coord_frame="unknown", nasion=nasion, lpa=lpa, rpa=rpa + ) def _read_elp_besa(fname, head_size): # This .elp is not the same as polhemus elp. see _read_isotrak_elp_points - dtype = np.dtype('S8, S8, f8, f8, f8') + dtype = np.dtype("S8, S8, f8, f8, f8") data = np.loadtxt(fname, dtype=dtype) - ch_names = data['f1'].astype(str).tolist() - az = data['f2'] - horiz = data['f3'] - radius = np.abs(az / 180.) - az = np.deg2rad(np.array([h if a >= 0. else 180 + h - for h, a in zip(horiz, az)])) + ch_names = data["f1"].astype(str).tolist() + az = data["f2"] + horiz = data["f3"] + radius = np.abs(az / 180.0) + az = np.deg2rad(np.array([h if a >= 0.0 else 180 + h for h, a in zip(horiz, az)])) pol = radius * np.pi - rad = data['f4'] / 100 + rad = data["f4"] / 100 pos = _sph_to_cart(np.array([rad, az, pol]).T) if head_size is not None: @@ -327,9 +352,9 @@ def _read_elp_besa(fname, head_size): ch_pos = _check_dupes_odict(ch_names, pos) - fid_names = ('Nz', 'LPA', 'RPA') + fid_names = ("Nz", "LPA", "RPA") # No one grants that the fid names actually exist. - nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names] + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) return make_dig_montage(ch_pos=ch_pos, nasion=nasion, lpa=lpa, rpa=rpa) @@ -340,7 +365,8 @@ def _read_brainvision(fname, head_size): # standard electrode positions: X-axis from T7 to T8, Y-axis from Oz to # Fpz, Z-axis orthogonal from XY-plane through Cz, fit to a sphere if # idealized (when radius=1), specified in millimeters - root = ElementTree.parse(fname).getroot() + defusedxml = _soft_import("defusedxml", "reading BrainVision montages") + root = defusedxml.ElementTree.parse(fname).getroot() ch_names = [s.text for s in root.findall("./Electrode/Name")] theta = [float(s.text) for s in root.findall("./Electrode/Theta")] pol = np.deg2rad(np.array(theta)) @@ -379,7 +405,7 @@ def _read_xyz(fname): ch_names = [] pos = [] file_format = op.splitext(fname)[1].lower() - with open(fname, "r") as f: + with open(fname) as f: if file_format != ".xyz": f.readline() # skip header delimiter = "," if file_format == ".csv" else "\t" diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 12e797a5c10..ef98efd1731 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -1,109 +1,112 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Martin Luessi -# Denis Engemann -# Andrew Dykstra -# Teon Brooks -# Daniel McCloy -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. +from __future__ import annotations # only needed for Python ≤ 3.9 + import os.path as op -from pathlib import Path +import string import sys from collections import OrderedDict -from dataclasses import dataclass from copy import deepcopy +from dataclasses import dataclass from functools import partial -import string -from typing import Union +from pathlib import Path import numpy as np - +from scipy.io import loadmat +from scipy.sparse import csr_array, lil_array +from scipy.spatial import Delaunay +from scipy.stats import zscore + +from .._fiff.constants import FIFF +from .._fiff.meas_info import ( # noqa F401 + Info, + MontageMixin, + _merge_info, + _rename_comps, + _unit2human, # TODO: pybv relies on this, should be made public + create_info, +) +from .._fiff.pick import ( + _check_excludes_includes, + _pick_data_channels, + _picks_by_type, + _picks_to_idx, + _second_rules, + channel_indices_by_type, + channel_type, + pick_channels, + pick_info, + pick_types, +) +from .._fiff.proj import _has_eeg_average_ref_proj, setup_proj +from .._fiff.reference import add_reference_channels, set_eeg_reference +from .._fiff.tag import _rename_list +from ..bem import _check_origin from ..defaults import HEAD_SIZE_DEFAULT, _handle_default -from ..utils import (verbose, logger, warn, - _check_preload, _validate_type, fill_doc, _check_option, - _get_stim_channel, _check_fname, _check_dict_keys, - _on_missing) -from ..io.constants import FIFF -from ..io.meas_info import (anonymize_info, Info, MontageMixin, create_info, - _rename_comps) -from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type, - _check_excludes_includes, _contains_ch_type, - channel_indices_by_type, pick_channels, _picks_to_idx, - get_channel_type_constants, - _pick_data_channels) -from ..io.tag import _rename_list -from ..io.write import DATE_NONE -from ..io.proj import setup_proj +from ..utils import ( + _check_dict_keys, + _check_fname, + _check_option, + _check_preload, + _get_stim_channel, + _on_missing, + _validate_type, + fill_doc, + legacy, + logger, + verbose, + warn, +) def _get_meg_system(info): """Educated guess for the helmet type based on channels.""" have_helmet = True - for ch in info['chs']: - if ch['kind'] == FIFF.FIFFV_MEG_CH: + for ch in info["chs"]: + if ch["kind"] == FIFF.FIFFV_MEG_CH: # Only take first 16 bits, as higher bits store CTF grad comp order - coil_type = ch['coil_type'] & 0xFFFF - nmag = np.sum( - [c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']]) + coil_type = ch["coil_type"] & 0xFFFF + nmag = np.sum([c["kind"] == FIFF.FIFFV_MEG_CH for c in info["chs"]]) if coil_type == FIFF.FIFFV_COIL_NM_122: - system = '122m' + system = "122m" break elif coil_type // 1000 == 3: # All Vectorview coils are 30xx - system = '306m' + system = "306m" break - elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or - coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD): - system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh' + elif ( + coil_type == FIFF.FIFFV_COIL_MAGNES_MAG + or coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD + ): + system = "Magnes_3600wh" if nmag > 150 else "Magnes_2500wh" break elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD: - system = 'CTF_275' + system = "CTF_275" break elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD: - system = 'KIT' + system = "KIT" # Our helmet does not match very well, so let's just create it have_helmet = False break elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD: - system = 'BabySQUID' + system = "BabySQUID" break elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD: - system = 'ARTEMIS123' + system = "ARTEMIS123" have_helmet = False break + elif coil_type == FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1: + system = "Kernel_Flux" + have_helmet = True + break else: - system = 'unknown' + system = "unknown" have_helmet = False return system, have_helmet -def _get_ch_type(inst, ch_type, allow_ref_meg=False): - """Choose a single channel type (usually for plotting). - - Usually used in plotting to plot a single datatype, e.g. look for mags, - then grads, then ... to plot. - """ - if ch_type is None: - allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', - 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr', - 'ecog', 'seeg', 'dbs'] - allowed_types += ['ref_meg'] if allow_ref_meg else [] - for type_ in allowed_types: - if isinstance(inst, Info): - if _contains_ch_type(inst, type_): - ch_type = type_ - break - elif type_ in inst: - ch_type = type_ - break - else: - raise RuntimeError('No plottable channel types found') - return ch_type - - @verbose def equalize_channels(instances, copy=True, verbose=None): """Equalize channel picks and ordering across multiple MNE-Python objects. @@ -117,8 +120,11 @@ def equalize_channels(instances, copy=True, verbose=None): ---------- instances : list A list of MNE-Python objects to equalize the channels for. Objects can - be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance, + be of type Raw, Epochs, Evoked, Spectrum, AverageTFR, Forward, Covariance, CrossSpectralDensity or Info. + + .. versionchanged:: 1.11 + Added support for :class:`mne.time_frequency.Spectrum` objects. copy : bool When dropping and/or re-ordering channels, an object will be copied when this parameter is set to ``True``. When set to ``False`` (the @@ -133,30 +139,44 @@ def equalize_channels(instances, copy=True, verbose=None): A list of MNE-Python objects that have the same channels defined in the same order. - Notes - ----- - This function operates inplace. + See Also + -------- + mne.channels.unify_bad_channels + mne.channels.rename_channels + mne.channels.combine_channels """ from ..cov import Covariance - from ..io.base import BaseRaw - from ..io.meas_info import Info from ..epochs import BaseEpochs from ..evoked import Evoked from ..forward import Forward - from ..time_frequency import _BaseTFR, CrossSpectralDensity + from ..io import BaseRaw + from ..time_frequency import BaseTFR, CrossSpectralDensity + from ..time_frequency.spectrum import BaseSpectrum # Instances need to have a `ch_names` attribute and a `pick_channels` # method that supports `ordered=True`. - allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward, - Covariance, CrossSpectralDensity, Info) - allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, " - "CrossSpectralDensity or Info") + allowed_types = ( + BaseRaw, + BaseEpochs, + Evoked, + BaseSpectrum, + BaseTFR, + Forward, + Covariance, + CrossSpectralDensity, + Info, + ) + allowed_types_str = ( + "Raw, Epochs, Evoked, Spectrum, TFR, Forward, Covariance, CrossSpectralDensity " + "or Info" + ) for inst in instances: - _validate_type(inst, allowed_types, "Instances to be modified", - allowed_types_str) + _validate_type( + inst, allowed_types, "Instances to be modified", allowed_types_str + ) chan_template = instances[0].ch_names - logger.info('Identifying common channels ...') + logger.info("Identifying common channels ...") channels = [set(inst.ch_names) for inst in instances] common_channels = set(chan_template).intersection(*channels) all_channels = set(chan_template).union(*channels) @@ -173,58 +193,118 @@ def equalize_channels(instances, copy=True, verbose=None): # Only perform picking when needed if inst.ch_names != common_channels: if isinstance(inst, Info): - sel = pick_channels(inst.ch_names, common_channels, exclude=[], - ordered=True) + sel = pick_channels( + inst.ch_names, common_channels, exclude=[], ordered=True + ) inst = pick_info(inst, sel, copy=copy, verbose=False) else: if copy: inst = inst.copy() + # TODO change to .pick() once CSD, Cov, and Fwd have `.pick()` methods inst.pick_channels(common_channels, ordered=True) if len(inst.ch_names) == len(common_channels): reordered = True equalized_instances.append(inst) if dropped: - logger.info('Dropped the following channels:\n%s' % dropped) + logger.info(f"Dropped the following channels:\n{dropped}") elif reordered: - logger.info('Channels have been re-ordered.') + logger.info("Channels have been re-ordered.") return equalized_instances -channel_type_constants = get_channel_type_constants(include_defaults=True) -_human2fiff = {k: v.get('kind', FIFF.FIFFV_COIL_NONE) for k, v in - channel_type_constants.items()} -_human2unit = {k: v.get('unit', FIFF.FIFF_UNIT_NONE) for k, v in - channel_type_constants.items()} -_unit2human = {FIFF.FIFF_UNIT_V: 'V', - FIFF.FIFF_UNIT_T: 'T', - FIFF.FIFF_UNIT_T_M: 'T/m', - FIFF.FIFF_UNIT_MOL: 'M', - FIFF.FIFF_UNIT_NONE: 'NA', - FIFF.FIFF_UNIT_CEL: 'C', - FIFF.FIFF_UNIT_S: 'S'} - - -def _check_set(ch, projs, ch_type): - """Ensure type change is compatible with projectors.""" - new_kind = _human2fiff[ch_type] - if ch['kind'] != new_kind: - for proj in projs: - if ch['ch_name'] in proj['data']['col_names']: - raise RuntimeError('Cannot change channel type for channel %s ' - 'in projector "%s"' - % (ch['ch_name'], proj['desc'])) - ch['kind'] = new_kind - - -class SetChannelsMixin(MontageMixin): +def unify_bad_channels(insts): + """Unify bad channels across a list of instances. + + All instances must be of the same type and have matching channel names and channel + order. The ``.info["bads"]`` of each instance will be set to the union of + ``.info["bads"]`` across all instances. + + Parameters + ---------- + insts : list + List of instances (:class:`~mne.io.Raw`, :class:`~mne.Epochs`, + :class:`~mne.Evoked`, :class:`~mne.time_frequency.Spectrum`, + :class:`~mne.time_frequency.EpochsSpectrum`) across which to unify bad channels. + + Returns + ------- + insts : list + List of instances with bad channels unified across instances. + + See Also + -------- + mne.channels.equalize_channels + mne.channels.rename_channels + mne.channels.combine_channels + + Notes + ----- + This function modifies the instances in-place. + + .. versionadded:: 1.6 + """ + from ..epochs import Epochs + from ..evoked import Evoked + from ..io import BaseRaw + from ..time_frequency.spectrum import BaseSpectrum + + # ensure input is list-like + _validate_type(insts, (list, tuple), "insts") + # ensure non-empty + if len(insts) == 0: + raise ValueError("insts must not be empty") + # ensure all insts are MNE objects, and all the same type + inst_type = type(insts[0]) + valid_types = (BaseRaw, Epochs, Evoked, BaseSpectrum) + for inst in insts: + _validate_type(inst, valid_types, "each object in insts") + if type(inst) is not inst_type: + raise ValueError("All insts must be the same type") + + # ensure all insts have the same channels and channel order + ch_names = insts[0].ch_names + for inst in insts[1:]: + dif = set(inst.ch_names) ^ set(ch_names) + if len(dif): + raise ValueError( + "Channels do not match across the objects in insts. Consider calling " + "equalize_channels before calling this function." + ) + elif inst.ch_names != ch_names: + raise ValueError( + "Channel names are sorted differently across instances. Please use " + "mne.channels.equalize_channels." + ) + + # collect bads as dict keys so that insertion order is preserved, then cast to list + all_bads = dict() + for inst in insts: + all_bads.update(dict.fromkeys(inst.info["bads"])) + all_bads = list(all_bads) + + # update bads on all instances + for inst in insts: + inst.info["bads"] = all_bads + + return insts + + +class ReferenceMixin(MontageMixin): """Mixin class for Raw, Evoked, Epochs.""" @verbose - def set_eeg_reference(self, ref_channels='average', projection=False, - ch_type='auto', forward=None, *, joint=False, - verbose=None): + def set_eeg_reference( + self, + ref_channels="average", + projection=False, + ch_type="auto", + forward=None, + *, + joint=False, + verbose=None, + ): """Specify which reference to use for EEG data. Use this function to explicitly specify the desired reference for EEG. @@ -249,354 +329,54 @@ def set_eeg_reference(self, ref_channels='average', projection=False, directly re-referencing the data. %(set_eeg_reference_see_also_notes)s """ - from ..io.reference import set_eeg_reference - return set_eeg_reference(self, ref_channels=ref_channels, copy=False, - projection=projection, ch_type=ch_type, - forward=forward, joint=joint)[0] - - def _get_channel_positions(self, picks=None): - """Get channel locations from info. - - Parameters - ---------- - picks : str | list | slice | None - None gets good data indices. - - Notes - ----- - .. versionadded:: 0.9.0 - """ - picks = _picks_to_idx(self.info, picks) - chs = self.info['chs'] - pos = np.array([chs[k]['loc'][:3] for k in picks]) - n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0) - if n_zero > 1: # XXX some systems have origin (0, 0, 0) - raise ValueError('Could not extract channel positions for ' - '{} channels'.format(n_zero)) - return pos - - def _set_channel_positions(self, pos, names): - """Update channel locations in info. - - Parameters - ---------- - pos : array-like | np.ndarray, shape (n_points, 3) - The channel positions to be set. - names : list of str - The names of the channels to be set. - - Notes - ----- - .. versionadded:: 0.9.0 - """ - if len(pos) != len(names): - raise ValueError('Number of channel positions not equal to ' - 'the number of names given.') - pos = np.asarray(pos, dtype=np.float64) - if pos.shape[-1] != 3 or pos.ndim != 2: - msg = ('Channel positions must have the shape (n_points, 3) ' - 'not %s.' % (pos.shape,)) - raise ValueError(msg) - for name, p in zip(names, pos): - if name in self.ch_names: - idx = self.ch_names.index(name) - self.info['chs'][idx]['loc'][:3] = p - else: - msg = ('%s was not found in the info. Cannot be updated.' - % name) - raise ValueError(msg) - - @verbose - def set_channel_types(self, mapping, verbose=None): - """Define the sensor type of channels. - - Parameters - ---------- - mapping : dict - A dictionary mapping a channel to a sensor type (str), e.g., - ``{'EEG061': 'eog'}``. - %(verbose)s - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - The instance (modified in place). - - .. versionchanged:: 0.20 - Return the instance. - - Notes - ----- - The following sensor types are accepted: - - ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, - ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, - fnirs_fd_phase, fnirs_od, temperature, gsr - - .. versionadded:: 0.9.0 - """ - ch_names = self.info['ch_names'] - - # first check and assemble clean mappings of index and name - unit_changes = dict() - for ch_name, ch_type in mapping.items(): - if ch_name not in ch_names: - raise ValueError("This channel name (%s) doesn't exist in " - "info." % ch_name) - - c_ind = ch_names.index(ch_name) - if ch_type not in _human2fiff: - raise ValueError('This function cannot change to this ' - 'channel type: %s. Accepted channel types ' - 'are %s.' - % (ch_type, - ", ".join(sorted(_human2unit.keys())))) - # Set sensor type - _check_set(self.info['chs'][c_ind], self.info['projs'], ch_type) - unit_old = self.info['chs'][c_ind]['unit'] - unit_new = _human2unit[ch_type] - if unit_old not in _unit2human: - raise ValueError("Channel '%s' has unknown unit (%s). Please " - "fix the measurement info of your data." - % (ch_name, unit_old)) - if unit_old != _human2unit[ch_type]: - this_change = (_unit2human[unit_old], _unit2human[unit_new]) - if this_change not in unit_changes: - unit_changes[this_change] = list() - unit_changes[this_change].append(ch_name) - self.info['chs'][c_ind]['unit'] = _human2unit[ch_type] - if ch_type in ['eeg', 'seeg', 'ecog', 'dbs']: - coil_type = FIFF.FIFFV_COIL_EEG - elif ch_type == 'hbo': - coil_type = FIFF.FIFFV_COIL_FNIRS_HBO - elif ch_type == 'hbr': - coil_type = FIFF.FIFFV_COIL_FNIRS_HBR - elif ch_type == 'fnirs_cw_amplitude': - coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE - elif ch_type == 'fnirs_fd_ac_amplitude': - coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE - elif ch_type == 'fnirs_fd_phase': - coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE - elif ch_type == 'fnirs_od': - coil_type = FIFF.FIFFV_COIL_FNIRS_OD - else: - coil_type = FIFF.FIFFV_COIL_NONE - self.info['chs'][c_ind]['coil_type'] = coil_type - msg = "The unit for channel(s) {0} has changed from {1} to {2}." - for this_change, names in unit_changes.items(): - warn(msg.format(", ".join(sorted(names)), *this_change)) - return self - - @verbose - def rename_channels(self, mapping, allow_duplicates=False, verbose=None): - """Rename channels. - - Parameters - ---------- - %(mapping_rename_channels_duplicates)s - %(verbose)s - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - The instance (modified in place). - - .. versionchanged:: 0.20 - Return the instance. - - Notes - ----- - .. versionadded:: 0.9.0 - """ - from ..io import BaseRaw - - ch_names_orig = list(self.info['ch_names']) - rename_channels(self.info, mapping, allow_duplicates) - - # Update self._orig_units for Raw - if isinstance(self, BaseRaw): - # whatever mapping was provided, now we can just use a dict - mapping = dict(zip(ch_names_orig, self.info['ch_names'])) - for old_name, new_name in mapping.items(): - if old_name in self._orig_units: - self._orig_units[new_name] = self._orig_units.pop(old_name) - ch_names = self.annotations.ch_names - for ci, ch in enumerate(ch_names): - ch_names[ci] = tuple(mapping.get(name, name) for name in ch) - - return self - - @verbose - def plot_sensors(self, kind='topomap', ch_type=None, title=None, - show_names=False, ch_groups=None, to_sphere=True, - axes=None, block=False, show=True, sphere=None, - verbose=None): - """Plot sensor positions. - - Parameters - ---------- - kind : str - Whether to plot the sensors as 3d, topomap or as an interactive - sensor selection dialog. Available options 'topomap', '3d', - 'select'. If 'select', a set of channels can be selected - interactively by using lasso selector or clicking while holding - control key. The selected channels are returned along with the - figure instance. Defaults to 'topomap'. - ch_type : None | str - The channel type to plot. Available options 'mag', 'grad', 'eeg', - 'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag, - grad, eeg, seeg, dbs, and ecog channels are plotted. If - None (default), then channels are chosen in the order given above. - title : str | None - Title for the figure. If None (default), equals to ``'Sensor - positions (%%s)' %% ch_type``. - show_names : bool | array of str - Whether to display all channel names. If an array, only the channel - names in the array are shown. Defaults to False. - ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None - Channel groups for coloring the sensors. If None (default), default - coloring scheme is used. If 'position', the sensors are divided - into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If - array, the channels are divided by picks given in the array. - - .. versionadded:: 0.13.0 - to_sphere : bool - Whether to project the 3d locations to a sphere. When False, the - sensor array appears similar as to looking downwards straight above - the subject's head. Has no effect when kind='3d'. Defaults to True. - - .. versionadded:: 0.14.0 - axes : instance of Axes | instance of Axes3D | None - Axes to draw the sensors to. If ``kind='3d'``, axes must be an - instance of Axes3D. If None (default), a new axes will be created. - - .. versionadded:: 0.13.0 - block : bool - Whether to halt program execution until the figure is closed. - Defaults to False. - - .. versionadded:: 0.13.0 - show : bool - Show figure if True. Defaults to True. - %(sphere_topomap_auto)s - %(verbose)s - - Returns - ------- - fig : instance of Figure - Figure containing the sensor topography. - selection : list - A list of selected channels. Only returned if ``kind=='select'``. - - See Also - -------- - mne.viz.plot_layout - - Notes - ----- - This function plots the sensor locations from the info structure using - matplotlib. For drawing the sensors using PyVista see - :func:`mne.viz.plot_alignment`. - - .. versionadded:: 0.12.0 - """ - from ..viz.utils import plot_sensors - return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title, - show_names=show_names, ch_groups=ch_groups, - to_sphere=to_sphere, axes=axes, block=block, - show=show, sphere=sphere, verbose=verbose) - - @verbose - def anonymize(self, daysback=None, keep_his=False, verbose=None): - """Anonymize measurement information in place. - - Parameters - ---------- - %(daysback_anonymize_info)s - %(keep_his_anonymize_info)s - %(verbose)s - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - The modified instance. - - Notes - ----- - %(anonymize_info_notes)s - - .. versionadded:: 0.13.0 - """ - anonymize_info(self.info, daysback=daysback, keep_his=keep_his, - verbose=verbose) - self.set_meas_date(self.info['meas_date']) # unify annot update - return self - - def set_meas_date(self, meas_date): - """Set the measurement start date. - - Parameters - ---------- - meas_date : datetime | float | tuple | None - The new measurement date. - If datetime object, it must be timezone-aware and in UTC. - A tuple of (seconds, microseconds) or float (alias for - ``(meas_date, 0)``) can also be passed and a datetime - object will be automatically created. If None, will remove - the time reference. - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - The modified raw instance. Operates in place. - - See Also - -------- - mne.io.Raw.anonymize - - Notes - ----- - If you want to remove all time references in the file, call - :func:`mne.io.anonymize_info(inst.info) ` - after calling ``inst.set_meas_date(None)``. - - .. versionadded:: 0.20 - """ - from ..annotations import _handle_meas_date - meas_date = _handle_meas_date(meas_date) - with self.info._unlock(): - self.info['meas_date'] = meas_date - - # clear file_id and meas_id if needed - if meas_date is None: - for key in ('file_id', 'meas_id'): - value = self.info.get(key) - if value is not None: - assert 'msecs' not in value - value['secs'] = DATE_NONE[0] - value['usecs'] = DATE_NONE[1] - # The following copy is needed for a test CTF dataset - # otherwise value['machid'][:] = 0 would suffice - _tmp = value['machid'].copy() - _tmp[:] = 0 - value['machid'] = _tmp - - if hasattr(self, 'annotations'): - self.annotations._orig_time = meas_date - return self - - -class UpdateChannelsMixin(object): + return set_eeg_reference( + self, + ref_channels=ref_channels, + copy=False, + projection=projection, + ch_type=ch_type, + forward=forward, + joint=joint, + )[0] + + +class UpdateChannelsMixin: """Mixin class for Raw, Evoked, Epochs, Spectrum, AverageTFR.""" @verbose - def pick_types(self, meg=False, eeg=False, stim=False, eog=False, - ecg=False, emg=False, ref_meg='auto', *, misc=False, - resp=False, chpi=False, exci=False, ias=False, syst=False, - seeg=False, dipole=False, gof=False, bio=False, - ecog=False, fnirs=False, csd=False, dbs=False, - temperature=False, gsr=False, - include=(), exclude='bads', selection=None, verbose=None): + @legacy(alt="inst.pick(...)") + def pick_types( + self, + meg=False, + eeg=False, + stim=False, + eog=False, + ecg=False, + emg=False, + ref_meg="auto", + *, + misc=False, + resp=False, + chpi=False, + exci=False, + ias=False, + syst=False, + seeg=False, + dipole=False, + gof=False, + bio=False, + ecog=False, + fnirs=False, + csd=False, + dbs=False, + temperature=False, + gsr=False, + eyetrack=False, + include=(), + exclude="bads", + selection=None, + verbose=None, + ): """Pick some channels by type and names. Parameters @@ -618,24 +398,47 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, .. versionadded:: 0.9.0 """ idx = pick_types( - self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, - ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci, - ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio, - ecog=ecog, fnirs=fnirs, csd=csd, dbs=dbs, include=include, - exclude=exclude, selection=selection, temperature=temperature, - gsr=gsr) + self.info, + meg=meg, + eeg=eeg, + stim=stim, + eog=eog, + ecg=ecg, + emg=emg, + ref_meg=ref_meg, + misc=misc, + resp=resp, + chpi=chpi, + exci=exci, + ias=ias, + syst=syst, + seeg=seeg, + dipole=dipole, + gof=gof, + bio=bio, + ecog=ecog, + fnirs=fnirs, + csd=csd, + dbs=dbs, + temperature=temperature, + gsr=gsr, + eyetrack=eyetrack, + include=include, + exclude=exclude, + selection=selection, + ) self._pick_drop_channels(idx) # remove dropped channel types from reject and flat - if getattr(self, 'reject', None) is not None: - # use list(self.reject) to avoid RuntimeError for changing - # dictionary size during iteration + if getattr(self, "reject", None) is not None: + # use list(self.reject) to avoid RuntimeError for changing dictionary size + # during iteration for ch_type in list(self.reject): if ch_type not in self: del self.reject[ch_type] - if getattr(self, 'flat', None) is not None: + if getattr(self, "flat", None) is not None: for ch_type in list(self.flat): if ch_type not in self: del self.flat[ch_type] @@ -643,18 +446,15 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, return self @verbose - def pick_channels(self, ch_names, ordered=False, *, verbose=None): + @legacy(alt="inst.pick(...)") + def pick_channels(self, ch_names, ordered=True, *, verbose=None): """Pick some channels. Parameters ---------- ch_names : list The list of channels to select. - ordered : bool - If True (default False), ensure that the order of the channels in - the modified instance matches the order of ``ch_names``. - - .. versionadded:: 0.20.0 + %(ordered)s %(verbose)s .. versionadded:: 1.1 @@ -672,13 +472,15 @@ def pick_channels(self, ch_names, ordered=False, *, verbose=None): Notes ----- - The channel names given are assumed to be a set, i.e. the order - does not matter. The original order of the channels is preserved. - You can use ``reorder_channels`` to set channel order if necessary. + If ``ordered`` is ``False``, the channel names given via ``ch_names`` are + assumed to be a set, that is, their order does not matter. In that case, the + original order of the channels in the data is preserved. Apart from using + ``ordered=True``, you may also use ``reorder_channels`` to set channel order, + if necessary. .. versionadded:: 0.9.0 """ - picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered) + picks = pick_channels(self.info["ch_names"], ch_names, ordered=ordered) return self._pick_drop_channels(picks) @verbose @@ -700,9 +502,23 @@ def pick(self, picks, exclude=(), *, verbose=None): inst : instance of Raw, Epochs, or Evoked The modified instance. """ - picks = _picks_to_idx(self.info, picks, 'all', exclude, - allow_empty=False) - return self._pick_drop_channels(picks) + picks = _picks_to_idx(self.info, picks, "all", exclude, allow_empty=False) + self._pick_drop_channels(picks) + + # remove dropped channel types from reject and flat + if getattr(self, "reject", None) is not None: + # use list(self.reject) to avoid RuntimeError for changing dictionary size + # during iteration + for ch_type in list(self.reject): + if ch_type not in self: + del self.reject[ch_type] + + if getattr(self, "flat", None) is not None: + for ch_type in list(self.flat): + if ch_type not in self: + del self.flat[ch_type] + + return self def reorder_channels(self, ch_names): """Reorder channels. @@ -735,12 +551,12 @@ def reorder_channels(self, ch_names): for ch_name in ch_names: ii = self.ch_names.index(ch_name) if ii in idx: - raise ValueError('Channel name repeated: %s' % (ch_name,)) + raise ValueError(f"Channel name repeated: {ch_name}") idx.append(ii) return self._pick_drop_channels(idx) @fill_doc - def drop_channels(self, ch_names, on_missing='raise'): + def drop_channels(self, ch_names, on_missing="raise"): """Drop channel(s). Parameters @@ -770,69 +586,69 @@ def drop_channels(self, ch_names, on_missing='raise'): try: all_str = all([isinstance(ch, str) for ch in ch_names]) except TypeError: - raise ValueError("'ch_names' must be iterable, got " - "type {} ({}).".format(type(ch_names), ch_names)) + raise ValueError( + f"'ch_names' must be iterable, got type {type(ch_names)} ({ch_names})." + ) if not all_str: - raise ValueError("Each element in 'ch_names' must be str, got " - "{}.".format([type(ch) for ch in ch_names])) + raise ValueError( + "Each element in 'ch_names' must be str, got " + f"{[type(ch) for ch in ch_names]}." + ) missing = [ch for ch in ch_names if ch not in self.ch_names] if len(missing) > 0: msg = "Channel(s) {0} not found, nothing dropped." _on_missing(on_missing, msg.format(", ".join(missing))) - bad_idx = [self.ch_names.index(ch) for ch in ch_names - if ch in self.ch_names] + bad_idx = [self.ch_names.index(ch) for ch in ch_names if ch in self.ch_names] idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx) + if len(idx) == 0: + raise ValueError("All channels would be dropped.") return self._pick_drop_channels(idx) @verbose def _pick_drop_channels(self, idx, *, verbose=None): # avoid circular imports from ..io import BaseRaw - from ..time_frequency import AverageTFR, EpochsTFR - from ..time_frequency.spectrum import BaseSpectrum - msg = 'adding, dropping, or reordering channels' + msg = "adding, dropping, or reordering channels" if isinstance(self, BaseRaw): if self._projector is not None: - _check_preload(self, f'{msg} after calling .apply_proj()') + _check_preload(self, f"{msg} after calling .apply_proj()") else: _check_preload(self, msg) - if getattr(self, 'picks', None) is not None: + if getattr(self, "picks", None) is not None: self.picks = self.picks[idx] - if getattr(self, '_read_picks', None) is not None: + if getattr(self, "_read_picks", None) is not None: self._read_picks = [r[idx] for r in self._read_picks] - if hasattr(self, '_cals'): + if hasattr(self, "_cals"): self._cals = self._cals[idx] pick_info(self.info, idx, copy=False) - for key in ('_comp', '_projector'): + for key in ("_comp", "_projector"): mat = getattr(self, key, None) if mat is not None: setattr(self, key, mat[idx][:, idx]) - if isinstance(self, BaseSpectrum): - axis = self._dims.index('channel') - elif isinstance(self, (AverageTFR, EpochsTFR)): - axis = -3 + if hasattr(self, "_dims"): # Spectrum and "new-style" TFRs + axis = self._dims.index("channel") else: # All others (Evoked, Epochs, Raw) have chs axis=-2 axis = -2 - if hasattr(self, '_data'): # skip non-preloaded Raw + if hasattr(self, "_data"): # skip non-preloaded Raw self._data = self._data.take(idx, axis=axis) else: assert isinstance(self, BaseRaw) and not self.preload if isinstance(self, BaseRaw): - self.annotations._prune_ch_names(self.info, on_missing='ignore') + self.annotations._prune_ch_names(self.info, on_missing="ignore") self._orig_units = { - k: v for k, v in self._orig_units.items() - if k in self.ch_names} + k: v for k, v in self._orig_units.items() if k in self.ch_names + } self._pick_projs() return self @@ -840,30 +656,34 @@ def _pick_drop_channels(self, idx, *, verbose=None): def _pick_projs(self): """Keep only projectors which apply to at least 1 data channel.""" drop_idx = [] - for idx, proj in enumerate(self.info['projs']): - if not set(self.info['ch_names']) & set(proj['data']['col_names']): + for idx, proj in enumerate(self.info["projs"]): + if not set(self.info["ch_names"]) & set(proj["data"]["col_names"]): drop_idx.append(idx) for idx in drop_idx: logger.info(f"Removing projector {self.info['projs'][idx]}") - if drop_idx and hasattr(self, 'del_proj'): + if drop_idx and hasattr(self, "del_proj"): self.del_proj(drop_idx) return self def add_channels(self, add_list, force_update_info=False): - """Append new channels to the instance. + """Append new channels from other MNE objects to the instance. Parameters ---------- add_list : list - A list of objects to append to self. Must contain all the same - type as the current object. + A list of MNE objects to append to the current instance. + The channels contained in the other instances are appended to the + channels of the current instance. Therefore, all other instances + must be of the same type as the current object. + See notes on how to add data coming from an array. force_update_info : bool If True, force the info for objects to be appended to match the - values in ``self``. This should generally only be used when adding - stim channels for which important metadata won't be overwritten. + values of the current instance. This should generally only be + used when adding stim channels for which important metadata won't + be overwritten. .. versionadded:: 0.12 @@ -880,12 +700,19 @@ def add_channels(self, add_list, force_update_info=False): ----- If ``self`` is a Raw instance that has been preloaded into a :obj:`numpy.memmap` instance, the memmap will be resized. + + This function expects an MNE object to be appended (e.g. :class:`~mne.io.Raw`, + :class:`~mne.Epochs`, :class:`~mne.Evoked`). If you simply want to add a + channel based on values of an np.ndarray, you need to create a + :class:`~mne.io.RawArray`. + See `_ """ # avoid circular imports - from ..io import BaseRaw, _merge_info from ..epochs import BaseEpochs + from ..io import BaseRaw + from ..time_frequency import EpochsTFR - _validate_type(add_list, (list, tuple), 'Input') + _validate_type(add_list, (list, tuple), "Input") # Object-specific checks for inst in add_list + [self]: @@ -896,11 +723,14 @@ def add_channels(self, add_list, force_update_info=False): elif isinstance(self, BaseEpochs): con_axis = 1 comp_class = BaseEpochs + elif isinstance(self, EpochsTFR): + con_axis = 1 + comp_class = EpochsTFR else: con_axis = 0 comp_class = type(self) for inst in add_list: - _validate_type(inst, comp_class, 'All input') + _validate_type(inst, comp_class, "All input") data = [inst._data for inst in [self] + add_list] # Make sure that all dimensions other than channel axis are the same @@ -909,8 +739,9 @@ def add_channels(self, add_list, force_update_info=False): for shape in shapes: if not ((shapes[0] - shape) == 0).all(): raise ValueError( - 'All data dimensions except channels must match, got ' - f'{shapes[0]} != {shape}') + "All data dimensions except channels must match, got " + f"{shapes[0]} != {shape}" + ) del shapes # Create final data / info objects @@ -918,43 +749,50 @@ def add_channels(self, add_list, force_update_info=False): new_info = _merge_info(infos, force_update_to_first=force_update_info) # Now update the attributes - if isinstance(self._data, np.memmap) and con_axis == 0 and \ - sys.platform != 'darwin': # resizing not available--no mremap + if ( + isinstance(self._data, np.memmap) + and con_axis == 0 + and sys.platform != "darwin" + ): # resizing not available--no mremap # Use a resize and fill in other ones out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:] n_bytes = np.prod(out_shape) * self._data.dtype.itemsize self._data.flush() self._data.base.resize(n_bytes) - self._data = np.memmap(self._data.filename, mode='r+', - dtype=self._data.dtype, shape=out_shape) + self._data = np.memmap( + self._data.filename, mode="r+", dtype=self._data.dtype, shape=out_shape + ) assert self._data.shape == out_shape assert self._data.nbytes == n_bytes offset = len(data[0]) for d in data[1:]: this_len = len(d) - self._data[offset:offset + this_len] = d + self._data[offset : offset + this_len] = d offset += this_len else: self._data = np.concatenate(data, axis=con_axis) self.info = new_info if isinstance(self, BaseRaw): - self._cals = np.concatenate([getattr(inst, '_cals') - for inst in [self] + add_list]) + self._cals = np.concatenate( + [getattr(inst, "_cals") for inst in [self] + add_list] + ) # We should never use these since data are preloaded, let's just # set it to something large and likely to break (2 ** 31 - 1) - extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:]) - assert all(len(r) == infos[0]['nchan'] for r in self._read_picks) + extra_idx = [2147483647] * sum(info["nchan"] for info in infos[1:]) + assert all(len(r) == infos[0]["nchan"] for r in self._read_picks) self._read_picks = [ - np.concatenate([r, extra_idx]) for r in self._read_picks] - assert all(len(r) == self.info['nchan'] for r in self._read_picks) + np.concatenate([r, extra_idx]) for r in self._read_picks + ] + assert all(len(r) == self.info["nchan"] for r in self._read_picks) for other in add_list: self._orig_units.update(other._orig_units) elif isinstance(self, BaseEpochs): self.picks = np.arange(self._data.shape[1]) - if hasattr(self, '_projector'): + if hasattr(self, "_projector"): activate = False if self._do_delayed_proj else self.proj - self._projector, self.info = setup_proj(self.info, False, - activate=activate) + self._projector, self.info = setup_proj( + self.info, False, activate=activate + ) return self @@ -975,18 +813,22 @@ def add_reference_channels(self, ref_channels): inst : instance of Raw | Epochs | Evoked The modified instance. """ - from ..io.reference import add_reference_channels - return add_reference_channels(self, ref_channels, copy=False) -class InterpolationMixin(object): +class InterpolationMixin: """Mixin class for Raw, Evoked, Epochs.""" @verbose - def interpolate_bads(self, reset_bads=True, mode='accurate', - origin='auto', method=None, exclude=(), - verbose=None): + def interpolate_bads( + self, + reset_bads=True, + mode="accurate", + origin="auto", + method=None, + exclude=(), + verbose=None, + ): """Interpolate bad MEG and EEG channels. Operates in place. @@ -1005,22 +847,27 @@ def interpolate_bads(self, reset_bads=True, mode='accurate', origin fit. .. versionadded:: 0.17 - method : dict | None + method : dict | str | None Method to use for each channel type. - Currently only the key ``"eeg"`` has multiple options: - - ``"spline"`` (default) - Use spherical spline interpolation. - - ``"MNE"`` - Use minimum-norm projection to a sphere and back. - This is the method used for MEG channels. + - ``"meg"`` channels support ``"MNE"`` (default) and ``"nan"`` + - ``"eeg"`` channels support ``"spline"`` (default), ``"MNE"`` and ``"nan"`` + - ``"fnirs"`` channels support ``"nearest"`` (default) and ``"nan"`` + - ``"ecog"`` channels support ``"spline"`` (default) and ``"nan"`` + - ``"seeg"`` channels support ``"spline"`` (default) and ``"nan"`` - The value for ``"meg"`` is ``"MNE"``, and the value for - ``"fnirs"`` is ``"nearest"``. The default (None) is thus an alias - for:: + None is an alias for:: method=dict(meg="MNE", eeg="spline", fnirs="nearest") + If a :class:`str` is provided, the method will be applied to all channel + types supported and available in the instance. The method ``"nan"`` will + replace the channel data with ``np.nan``. + + .. warning:: + Be careful when using ``method="nan"``; the default value + ``reset_bads=True`` may not be what you want. + .. versionadded:: 0.21 exclude : list | tuple The channels to exclude from interpolation. If excluded a bad @@ -1034,72 +881,311 @@ def interpolate_bads(self, reset_bads=True, mode='accurate', Notes ----- + The ``"MNE"`` method uses minimum-norm projection to a sphere and back. + .. versionadded:: 0.9.0 """ - from ..bem import _check_origin - from .interpolation import _interpolate_bads_eeg,\ - _interpolate_bads_meeg, _interpolate_bads_nirs + from .interpolation import ( + _interpolate_bads_ecog, + _interpolate_bads_eeg, + _interpolate_bads_meeg, + _interpolate_bads_nan, + _interpolate_bads_nirs, + _interpolate_bads_seeg, + ) _check_preload(self, "interpolation") - method = _handle_default('interpolation_method', method) + _validate_type(method, (dict, str, None), "method") + method = _handle_default("interpolation_method", method) + ch_types = self.get_channel_types(unique=True) + # figure out if we have "mag" for "meg", "hbo" for "fnirs", ... to filter the + # "method" dictionary and keep only keys that correspond to existing channels. + for ch_type in ("meg", "fnirs"): + for sub_ch_type in _second_rules[ch_type][1].values(): + if sub_ch_type in ch_types: + ch_types.remove(sub_ch_type) + if ch_type not in ch_types: + ch_types.append(ch_type) + keys2delete = set(method) - set(ch_types) + for key in keys2delete: + del method[key] + valids = { + "eeg": ("spline", "MNE", "nan"), + "meg": ("MNE", "nan"), + "fnirs": ("nearest", "nan"), + "ecog": ("spline", "nan"), + "seeg": ("spline", "nan"), + } for key in method: - _check_option('method[key]', key, ('meg', 'eeg', 'fnirs')) - _check_option("method['eeg']", method['eeg'], ('spline', 'MNE')) - _check_option("method['meg']", method['meg'], ('MNE',)) - _check_option("method['fnirs']", method['fnirs'], ('nearest',)) - - if len(self.info['bads']) == 0: - warn('No bad channels to interpolate. Doing nothing...') + _check_option("method[key]", key, tuple(valids)) + _check_option(f"method['{key}']", method[key], valids[key]) + logger.info("Setting channel interpolation method to %s.", method) + idx = _picks_to_idx(self.info, list(method), exclude=(), allow_empty=True) + if idx.size == 0 or len(pick_info(self.info, idx)["bads"]) == 0: + warn("No bad channels to interpolate. Doing nothing...") return self - logger.info('Interpolating bad channels') - origin = _check_origin(origin, self.info) - if method['eeg'] == 'spline': + for ch_type in method.copy(): + idx = _picks_to_idx(self.info, ch_type, exclude=(), allow_empty=True) + if len(pick_info(self.info, idx)["bads"]) == 0: + method.pop(ch_type) + logger.info("Interpolating bad channels.") + needs_origin = [key != "seeg" and val != "nan" for key, val in method.items()] + if any(needs_origin): + origin = _check_origin(origin, self.info) + for ch_type, interp in method.items(): + if interp == "nan": + _interpolate_bads_nan(self, ch_type=ch_type, exclude=exclude) + if method.get("eeg", "") == "spline": _interpolate_bads_eeg(self, origin=origin, exclude=exclude) - eeg_mne = False - else: - eeg_mne = True - _interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne, - exclude=exclude) - _interpolate_bads_nirs(self, exclude=exclude) + meg_mne = method.get("meg", "") == "MNE" + eeg_mne = method.get("eeg", "") == "MNE" + if meg_mne or eeg_mne: + _interpolate_bads_meeg( + self, + mode=mode, + meg=meg_mne, + eeg=eeg_mne, + origin=origin, + exclude=exclude, + method=method, + ) + if method.get("fnirs", "") == "nearest": + _interpolate_bads_nirs(self, exclude=exclude) + if method.get("ecog", "") == "spline": + _interpolate_bads_ecog(self, origin=origin, exclude=exclude) + if method.get("seeg", "") == "spline": + _interpolate_bads_seeg(self, exclude=exclude) if reset_bads is True: - self.info['bads'] = \ - [ch for ch in self.info['bads'] if ch in exclude] + if "nan" in method.values(): + warn( + "interpolate_bads was called with method='nan' and " + "reset_bads=True. Consider setting reset_bads=False so that the " + "nan-containing channels can be easily excluded from later " + "computations." + ) + self.info["bads"] = [ch for ch in self.info["bads"] if ch in exclude] return self + def interpolate_to(self, sensors, origin="auto", method="spline", reg=0.0): + """Interpolate EEG data onto a new montage. + + .. warning:: + Be careful, only EEG channels are interpolated. Other channel types are + not interpolated. + + Parameters + ---------- + sensors : DigMontage + The target montage containing channel positions to interpolate onto. + origin : array-like, shape (3,) | str + Origin of the sphere in the head coordinate frame and in meters. + Can be ``'auto'`` (default), which means a head-digitization-based + origin fit. + method : str + Method to use for EEG channels. + Supported methods are 'spline' (default) and 'MNE'. + reg : float + The regularization parameter for the interpolation method + (only used when the method is 'spline'). + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The instance with updated channel locations and data. + + Notes + ----- + This method is useful for standardizing EEG layouts across datasets. + However, some attributes may be lost after interpolation. + + .. versionadded:: 1.10.0 + """ + from ..epochs import BaseEpochs, EpochsArray + from ..evoked import Evoked, EvokedArray + from ..forward._field_interpolation import _map_meg_or_eeg_channels + from ..io import RawArray + from ..io.base import BaseRaw + from .interpolation import _make_interpolation_matrix + from .montage import DigMontage + + # Check that the method option is valid. + _check_option("method", method, ["spline", "MNE"]) + _validate_type(sensors, DigMontage, "sensors") + + # Get target positions from the montage + ch_pos = sensors.get_positions().get("ch_pos", {}) + target_ch_names = list(ch_pos.keys()) + if not target_ch_names: + raise ValueError( + "The provided sensors configuration has no channel positions." + ) + + # Get original channel order + orig_names = self.info["ch_names"] + + # Identify EEG channel + picks_good_eeg = pick_types(self.info, meg=False, eeg=True, exclude="bads") + if len(picks_good_eeg) == 0: + raise ValueError("No good EEG channels available for interpolation.") + # Also get the full list of EEG channel indices (including bad channels) + picks_remove_eeg = pick_types(self.info, meg=False, eeg=True, exclude=[]) + eeg_names_orig = [orig_names[i] for i in picks_remove_eeg] + + # Identify non-EEG channels in original order + non_eeg_names_ordered = [ch for ch in orig_names if ch not in eeg_names_orig] + + # Create destination info for new EEG channels + sfreq = self.info["sfreq"] + info_interp = create_info( + ch_names=target_ch_names, + sfreq=sfreq, + ch_types=["eeg"] * len(target_ch_names), + ) + info_interp.set_montage(sensors) + info_interp["bads"] = [ch for ch in self.info["bads"] if ch in target_ch_names] + # Do not assign "projs" directly. + + # Compute the interpolation mapping + if method == "spline": + origin_val = _check_origin(origin, self.info) + pos_from = self.info._get_channel_positions(picks_good_eeg) - origin_val + pos_to = np.stack(list(ch_pos.values()), axis=0) + + def _check_pos_sphere(pos): + d = np.linalg.norm(pos, axis=-1) + d_norm = np.mean(d / np.mean(d)) + if np.abs(1.0 - d_norm) > 0.1: + warn("Your spherical fit is poor; interpolation may be inaccurate.") + + _check_pos_sphere(pos_from) + _check_pos_sphere(pos_to) + mapping = _make_interpolation_matrix(pos_from, pos_to, alpha=reg) + + else: + assert method == "MNE" + info_eeg = pick_info(self.info, picks_good_eeg) + # If the original info has an average EEG reference projector but + # the destination info does not, + # update info_interp via a temporary RawArray. + if _has_eeg_average_ref_proj(self.info) and not _has_eeg_average_ref_proj( + info_interp + ): + # Create dummy data: shape (n_channels, 1) + temp_data = np.zeros((len(info_interp["ch_names"]), 1)) + temp_raw = RawArray(temp_data, info_interp, first_samp=0) + # Using the public API, add an average reference projector. + temp_raw.set_eeg_reference( + ref_channels="average", projection=True, verbose=False + ) + # Extract the updated info. + info_interp = temp_raw.info + mapping = _map_meg_or_eeg_channels( + info_eeg, info_interp, mode="accurate", origin=origin + ) + + # Interpolate EEG data + data_good = self.get_data(picks=picks_good_eeg) + data_interp = mapping @ data_good + + # Create a new instance for the interpolated EEG channels + # TODO: Creating a new instance leads to a loss of information. + # We should consider updating the existing instance in the future + # by 1) drop channels, 2) add channels, 3) re-order channels. + if isinstance(self, BaseRaw): + inst_interp = RawArray(data_interp, info_interp, first_samp=self.first_samp) + elif isinstance(self, BaseEpochs): + inst_interp = EpochsArray(data_interp, info_interp) + else: + assert isinstance(self, Evoked) + inst_interp = EvokedArray(data_interp, info_interp) + + # Merge only if non-EEG channels exist + if not non_eeg_names_ordered: + return inst_interp + + inst_non_eeg = self.copy().pick(non_eeg_names_ordered).load_data() + inst_out = inst_non_eeg.add_channels([inst_interp], force_update_info=True) + + # Reorder channels + # Insert the entire new EEG block at the position of the first EEG channel. + orig_names_arr = np.array(orig_names) + mask_eeg = np.isin(orig_names_arr, eeg_names_orig) + if mask_eeg.any(): + first_eeg_index = np.where(mask_eeg)[0][0] + pre = orig_names_arr[:first_eeg_index] + new_eeg = np.array(info_interp["ch_names"]) + post = orig_names_arr[first_eeg_index:] + post = post[~np.isin(orig_names_arr[first_eeg_index:], eeg_names_orig)] + new_order = np.concatenate((pre, new_eeg, post)).tolist() + else: + new_order = orig_names + inst_out.reorder_channels(new_order) + return inst_out + @verbose -def rename_channels(info, mapping, allow_duplicates=False, verbose=None): +def rename_channels( + info, mapping, allow_duplicates=False, *, on_missing="raise", verbose=None +): """Rename channels. Parameters ---------- %(info_not_none)s Note: modified in place. %(mapping_rename_channels_duplicates)s + %(on_missing_ch_names)s + + .. versionadded:: 1.11.0 %(verbose)s + + See Also + -------- + mne.channels.equalize_channels + mne.channels.unify_bad_channels + mne.channels.combine_channels """ - _validate_type(info, Info, 'info') + _validate_type(info, Info, "info") info._check_consistency() - bads = list(info['bads']) # make our own local copies - ch_names = list(info['ch_names']) + bads = list(info["bads"]) # make our own local copies + ch_names = list(info["ch_names"]) # first check and assemble clean mappings of index and name if isinstance(mapping, dict): - _check_dict_keys(mapping, ch_names, key_description="channel name(s)", - valid_key_source="info") - new_names = [(ch_names.index(ch_name), new_name) - for ch_name, new_name in mapping.items()] + if on_missing in ["warn", "ignore"]: + new_mapping = { + ch_old: ch_new + for ch_old, ch_new in mapping.items() + if ch_old in ch_names + } + else: + new_mapping = mapping + + if new_mapping != mapping and on_missing == "warn": + warn( + "Channel rename map contains keys that are not present in the object " + "to be renamed. These will be ignored." + ) + + _check_dict_keys( + new_mapping, + ch_names, + key_description="channel name(s)", + valid_key_source="info", + ) + new_names = [ + (ch_names.index(ch_name), new_name) + for ch_name, new_name in new_mapping.items() + ] elif callable(mapping): - new_names = [(ci, mapping(ch_name)) - for ci, ch_name in enumerate(ch_names)] + new_names = [(ci, mapping(ch_name)) for ci, ch_name in enumerate(ch_names)] else: - raise ValueError('mapping must be callable or dict, not %s' - % (type(mapping),)) + raise ValueError(f"mapping must be callable or dict, not {type(mapping)}") # check we got all strings out of the mapping for new_name in new_names: - _validate_type(new_name[1], 'str', 'New channel mappings') + _validate_type(new_name[1], "str", "New channel mappings") # do the remapping locally for c_ind, new_name in new_names: @@ -1110,21 +1196,23 @@ def rename_channels(info, mapping, allow_duplicates=False, verbose=None): # check that all the channel names are unique if len(ch_names) != len(np.unique(ch_names)) and not allow_duplicates: - raise ValueError('New channel names are not unique, renaming failed') + raise ValueError("New channel names are not unique, renaming failed") # do the remapping in info - info['bads'] = bads + info["bads"] = [] ch_names_mapping = dict() - for ch, ch_name in zip(info['chs'], ch_names): - ch_names_mapping[ch['ch_name']] = ch_name - ch['ch_name'] = ch_name + for ch, ch_name in zip(info["chs"], ch_names): + ch_names_mapping[ch["ch_name"]] = ch_name + ch["ch_name"] = ch_name # .get b/c fwd info omits it - _rename_comps(info.get('comps', []), ch_names_mapping) - if 'projs' in info: # fwd might omit it - for proj in info['projs']: - proj['data']['col_names'][:] = \ - _rename_list(proj['data']['col_names'], ch_names_mapping) + _rename_comps(info.get("comps", []), ch_names_mapping) + if "projs" in info: # fwd might omit it + for proj in info["projs"]: + proj["data"]["col_names"][:] = _rename_list( + proj["data"]["col_names"], ch_names_mapping + ) info._update_redundant() + info["bads"] = bads info._check_consistency() @@ -1141,248 +1229,247 @@ class _BuiltinChannelAdjacency: name: str description: str fname: str - source_url: Union[str, None] + source_url: str | None _ft_neighbor_url_t = string.Template( - 'https://github.com/fieldtrip/fieldtrip/raw/master/' - 'template/neighbours/$fname' + "https://github.com/fieldtrip/fieldtrip/raw/master/template/neighbours/$fname" ) _BUILTIN_CHANNEL_ADJACENCIES = [ _BuiltinChannelAdjacency( - name='biosemi16', - description='Biosemi 16-electrode cap', - fname='biosemi16_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='biosemi16_neighb.mat'), + name="biosemi16", + description="Biosemi 16-electrode cap", + fname="biosemi16_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="biosemi16_neighb.mat"), ), _BuiltinChannelAdjacency( - name='biosemi32', - description='Biosemi 32-electrode cap', - fname='biosemi32_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='biosemi32_neighb.mat'), + name="biosemi32", + description="Biosemi 32-electrode cap", + fname="biosemi32_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="biosemi32_neighb.mat"), ), _BuiltinChannelAdjacency( - name='biosemi64', - description='Biosemi 64-electrode cap', - fname='biosemi64_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='biosemi64_neighb.mat'), + name="biosemi64", + description="Biosemi 64-electrode cap", + fname="biosemi64_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="biosemi64_neighb.mat"), ), _BuiltinChannelAdjacency( - name='bti148', - description='BTI 148-channel system', - fname='bti148_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='bti148_neighb.mat'), + name="bti148", + description="BTI 148-channel system", + fname="bti148_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="bti148_neighb.mat"), ), _BuiltinChannelAdjacency( - name='bti248', - description='BTI 248-channel system', - fname='bti248_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='bti248_neighb.mat'), + name="bti248", + description="BTI 248-channel system", + fname="bti248_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="bti248_neighb.mat"), ), _BuiltinChannelAdjacency( - name='bti248grad', - description='BTI 248 gradiometer system', - fname='bti248grad_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='bti248grad_neighb.mat'), # noqa: E501 + name="bti248grad", + description="BTI 248 gradiometer system", + fname="bti248grad_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="bti248grad_neighb.mat"), ), _BuiltinChannelAdjacency( - name='ctf64', - description='CTF 64 axial gradiometer', - fname='ctf64_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='ctf64_neighb.mat'), + name="ctf64", + description="CTF 64 axial gradiometer", + fname="ctf64_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ctf64_neighb.mat"), ), _BuiltinChannelAdjacency( - name='ctf151', - description='CTF 151 axial gradiometer', - fname='ctf151_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='ctf151_neighb.mat'), + name="ctf151", + description="CTF 151 axial gradiometer", + fname="ctf151_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ctf151_neighb.mat"), ), _BuiltinChannelAdjacency( - name='ctf275', - description='CTF 275 axial gradiometer', - fname='ctf275_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='ctf275_neighb.mat'), + name="ctf275", + description="CTF 275 axial gradiometer", + fname="ctf275_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ctf275_neighb.mat"), ), _BuiltinChannelAdjacency( - name='easycap32ch-avg', - description='', - fname='easycap32ch-avg_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='easycap32ch-avg_neighb.mat'), # noqa: E501 + name="easycap32ch-avg", + description="", + fname="easycap32ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycap32ch-avg_neighb.mat"), ), _BuiltinChannelAdjacency( - name='easycap64ch-avg', - description='', - fname='easycap64ch-avg_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='easycap64ch-avg_neighb.mat'), # noqa: E501 + name="easycap64ch-avg", + description="", + fname="easycap64ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycap64ch-avg_neighb.mat"), ), _BuiltinChannelAdjacency( - name='easycap128ch-avg', - description='', - fname='easycap128ch-avg_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='easycap128ch-avg_neighb.mat'), # noqa: E501 + name="easycap128ch-avg", + description="", + fname="easycap128ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycap128ch-avg_neighb.mat"), ), _BuiltinChannelAdjacency( - name='easycapM1', - description='Easycap M1', - fname='easycapM1_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='easycapM1_neighb.mat'), + name="easycapM1", + description="Easycap M1", + fname="easycapM1_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM1_neighb.mat"), ), _BuiltinChannelAdjacency( - name='easycapM11', - description='Easycap M11', - fname='easycapM11_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='easycapM11_neighb.mat'), # noqa: E501 + name="easycapM11", + description="Easycap M11", + fname="easycapM11_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM11_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='easycapM14', - description='Easycap M14', - fname='easycapM14_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='easycapM14_neighb.mat'), # noqa: E501 + name="easycapM14", + description="Easycap M14", + fname="easycapM14_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM14_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='easycapM15', - description='Easycap M15', - fname='easycapM15_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='easycapM15_neighb.mat'), # noqa: E501 + name="easycapM15", + description="Easycap M15", + fname="easycapM15_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM15_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='KIT-157', - description='', - fname='KIT-157_neighb.mat', + name="KIT-157", + description="", + fname="KIT-157_neighb.mat", source_url=None, ), _BuiltinChannelAdjacency( - name='KIT-208', - description='', - fname='KIT-208_neighb.mat', + name="KIT-208", + description="", + fname="KIT-208_neighb.mat", source_url=None, ), _BuiltinChannelAdjacency( - name='KIT-NYU-2019', - description='', - fname='KIT-NYU-2019_neighb.mat', + name="KIT-NYU-2019", + description="", + fname="KIT-NYU-2019_neighb.mat", source_url=None, ), _BuiltinChannelAdjacency( - name='KIT-UMD-1', - description='', - fname='KIT-UMD-1_neighb.mat', + name="KIT-UMD-1", + description="", + fname="KIT-UMD-1_neighb.mat", source_url=None, ), _BuiltinChannelAdjacency( - name='KIT-UMD-2', - description='', - fname='KIT-UMD-2_neighb.mat', + name="KIT-UMD-2", + description="", + fname="KIT-UMD-2_neighb.mat", source_url=None, ), _BuiltinChannelAdjacency( - name='KIT-UMD-3', - description='', - fname='KIT-UMD-3_neighb.mat', + name="KIT-UMD-3", + description="", + fname="KIT-UMD-3_neighb.mat", source_url=None, ), _BuiltinChannelAdjacency( - name='KIT-UMD-4', - description='', - fname='KIT-UMD-4_neighb.mat', + name="KIT-UMD-4", + description="", + fname="KIT-UMD-4_neighb.mat", source_url=None, ), _BuiltinChannelAdjacency( - name='neuromag306mag', - description='Neuromag306, only magnetometers', - fname='neuromag306mag_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='neuromag306mag_neighb.mat'), # noqa: E501 + name="neuromag306mag", + description="Neuromag306, only magnetometers", + fname="neuromag306mag_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag306mag_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='neuromag306planar', - description='Neuromag306, only planar gradiometers', - fname='neuromag306planar_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='neuromag306planar_neighb.mat'), # noqa: E501 + name="neuromag306planar", + description="Neuromag306, only planar gradiometers", + fname="neuromag306planar_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag306planar_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='neuromag122cmb', - description='Neuromag122, only combined planar gradiometers', - fname='neuromag122cmb_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='neuromag122cmb_neighb.mat'), # noqa: E501 + name="neuromag122cmb", + description="Neuromag122, only combined planar gradiometers", + fname="neuromag122cmb_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag122cmb_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='neuromag306cmb', - description='Neuromag306, only combined planar gradiometers', - fname='neuromag306cmb_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='neuromag306cmb_neighb.mat'), # noqa: E501 + name="neuromag306cmb", + description="Neuromag306, only combined planar gradiometers", + fname="neuromag306cmb_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag306cmb_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='ecog256', - description='ECOG 256channels, average referenced', - fname='ecog256_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='ecog256_neighb.mat'), # noqa: E501 + name="ecog256", + description="ECOG 256channels, average referenced", + fname="ecog256_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ecog256_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='ecog256bipolar', - description='ECOG 256channels, bipolar referenced', - fname='ecog256bipolar_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='ecog256bipolar_neighb.mat'), # noqa: E501 + name="ecog256bipolar", + description="ECOG 256channels, bipolar referenced", + fname="ecog256bipolar_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ecog256bipolar_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='eeg1010_neighb', - description='', - fname='eeg1010_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='eeg1010_neighb.mat'), + name="eeg1010_neighb", + description="", + fname="eeg1010_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="eeg1010_neighb.mat"), ), _BuiltinChannelAdjacency( - name='elec1005', - description='Standard 10-05 system', - fname='elec1005_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='elec1005_neighb.mat'), + name="elec1005", + description="Standard 10-05 system", + fname="elec1005_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="elec1005_neighb.mat"), ), _BuiltinChannelAdjacency( - name='elec1010', - description='Standard 10-10 system', - fname='elec1010_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='elec1010_neighb.mat'), + name="elec1010", + description="Standard 10-10 system", + fname="elec1010_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="elec1010_neighb.mat"), ), _BuiltinChannelAdjacency( - name='elec1020', - description='Standard 10-20 system', - fname='elec1020_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='elec1020_neighb.mat'), + name="elec1020", + description="Standard 10-20 system", + fname="elec1020_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="elec1020_neighb.mat"), ), _BuiltinChannelAdjacency( - name='itab28', - description='ITAB 28-channel system', - fname='itab28_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='itab28_neighb.mat'), + name="itab28", + description="ITAB 28-channel system", + fname="itab28_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="itab28_neighb.mat"), ), _BuiltinChannelAdjacency( - name='itab153', - description='ITAB 153-channel system', - fname='itab153_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='itab153_neighb.mat'), + name="itab153", + description="ITAB 153-channel system", + fname="itab153_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="itab153_neighb.mat"), ), _BuiltinChannelAdjacency( - name='language29ch-avg', - description='MPI for Psycholinguistic: Averaged 29-channel cap', - fname='language29ch-avg_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='language29ch-avg_neighb.mat'), # noqa: E501 + name="language29ch-avg", + description="MPI for Psycholinguistic: Averaged 29-channel cap", + fname="language29ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="language29ch-avg_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='mpi_59_channels', - description='MPI for Psycholinguistic: 59-channel cap', - fname='mpi_59_channels_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='mpi_59_channels_neighb.mat'), # noqa: E501 + name="mpi_59_channels", + description="MPI for Psycholinguistic: 59-channel cap", + fname="mpi_59_channels_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="mpi_59_channels_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='yokogawa160', - description='', - fname='yokogawa160_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='yokogawa160_neighb.mat'), # noqa: E501 + name="yokogawa160", + description="", + fname="yokogawa160_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="yokogawa160_neighb.mat"), # noqa: E501 ), _BuiltinChannelAdjacency( - name='yokogawa440', - description='', - fname='yokogawa440_neighb.mat', - source_url=_ft_neighbor_url_t.substitute(fname='yokogawa440_neighb.mat'), # noqa: E501 + name="yokogawa440", + description="", + fname="yokogawa440_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="yokogawa440_neighb.mat"), # noqa: E501 ), ] @@ -1418,13 +1505,10 @@ def get_builtin_ch_adjacencies(*, descriptions=False): if descriptions: return sorted( [(m.name, m.description) for m in _BUILTIN_CHANNEL_ADJACENCIES], - key=lambda x: x[0].casefold() # only sort based on name + key=lambda x: x[0].casefold(), # only sort based on name ) else: - return sorted( - [m.name for m in _BUILTIN_CHANNEL_ADJACENCIES], - key=str.casefold - ) + return sorted([m.name for m in _BUILTIN_CHANNEL_ADJACENCIES], key=str.casefold) @fill_doc @@ -1445,12 +1529,11 @@ def read_ch_adjacency(fname, picks=None): You can retrieve the names of all built-in channel adjacencies via :func:`mne.channels.get_builtin_ch_adjacencies`. - %(picks_all)s - Picks must match the template. + %(picks_all_notypes)s Returns ------- - ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) + ch_adjacency : scipy.sparse.csr_array, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. @@ -1472,7 +1555,6 @@ def read_ch_adjacency(fname, picks=None): :func:`mne.stats.combine_adjacency` to prepare a final "adjacency" to pass to the eventual function. """ - from scipy.io import loadmat if op.isabs(fname): fname = str( _check_fname( @@ -1484,20 +1566,19 @@ def read_ch_adjacency(fname, picks=None): else: # built-in FieldTrip neighbors ch_adj_name = fname del fname - if ch_adj_name.endswith('_neighb.mat'): # backward-compat - ch_adj_name = ch_adj_name.replace('_neighb.mat', '') + if ch_adj_name.endswith("_neighb.mat"): # backward-compat + ch_adj_name = ch_adj_name.replace("_neighb.mat", "") if ch_adj_name not in get_builtin_ch_adjacencies(): raise ValueError( - f'No built-in channel adjacency matrix found with name: ' - f'{ch_adj_name}. Valid names are: ' - f'{", ".join(get_builtin_ch_adjacencies())}' + f"No built-in channel adjacency matrix found with name: " + f"{ch_adj_name}. Valid names are: " + f"{', '.join(get_builtin_ch_adjacencies())}" ) - ch_adj = [a for a in _BUILTIN_CHANNEL_ADJACENCIES - if a.name == ch_adj_name][0] + ch_adj = [a for a in _BUILTIN_CHANNEL_ADJACENCIES if a.name == ch_adj_name][0] fname = ch_adj.fname - templates_dir = Path(__file__).resolve().parent / 'data' / 'neighbors' + templates_dir = Path(__file__).resolve().parent / "data" / "neighbors" fname = str( _check_fname( # only needed to convert to a string fname=templates_dir / fname, @@ -1506,23 +1587,17 @@ def read_ch_adjacency(fname, picks=None): ) ) - nb = loadmat(fname)['neighbours'] - ch_names = _recursive_flatten(nb['label'], str) - picks = _picks_to_idx(len(ch_names), picks) - neighbors = [_recursive_flatten(c, str) for c in - nb['neighblabel'].flatten()] + nb = loadmat(fname)["neighbours"] + ch_names = _recursive_flatten(nb["label"], str) + temp_info = create_info(ch_names, 1.0) + picks = _picks_to_idx(temp_info, picks, none="all") + neighbors = [_recursive_flatten(c, str) for c in nb["neighblabel"].flatten()] assert len(ch_names) == len(neighbors) adjacency = _ch_neighbor_adjacency(ch_names, neighbors) # picking before constructing matrix is buggy adjacency = adjacency[picks][:, picks] ch_names = [ch_names[p] for p in picks] - # make sure MEG channel names contain space after "MEG" - for idx, ch_name in enumerate(ch_names): - if ch_name.startswith('MEG') and not ch_name[3] == ' ': - ch_name = ch_name.replace('MEG', 'MEG ') - ch_names[idx] = ch_name - return adjacency, ch_names @@ -1543,25 +1618,23 @@ def _ch_neighbor_adjacency(ch_names, neighbors): ch_adjacency : scipy.sparse.spmatrix The adjacency matrix. """ - from scipy import sparse if len(ch_names) != len(neighbors): - raise ValueError('`ch_names` and `neighbors` must ' - 'have the same length') + raise ValueError("`ch_names` and `neighbors` must have the same length") set_neighbors = {c for d in neighbors for c in d} rest = set_neighbors - set(ch_names) if len(rest) > 0: - raise ValueError('Some of your neighbors are not present in the ' - 'list of channel names') + raise ValueError( + "Some of your neighbors are not present in the list of channel names" + ) for neigh in neighbors: - if (not isinstance(neigh, list) and - not all(isinstance(c, str) for c in neigh)): - raise ValueError('`neighbors` must be a list of lists of str') + if not isinstance(neigh, list) and not all(isinstance(c, str) for c in neigh): + raise ValueError("`neighbors` must be a list of lists of str") ch_adjacency = np.eye(len(ch_names), dtype=bool) for ii, neigbs in enumerate(neighbors): ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True - ch_adjacency = sparse.csr_matrix(ch_adjacency) + ch_adjacency = csr_array(ch_adjacency) return ch_adjacency @@ -1583,7 +1656,7 @@ def find_ch_adjacency(info, ch_type): Returns ------- - ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) + ch_adjacency : scipy.sparse.csr_array, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. @@ -1616,52 +1689,70 @@ def find_ch_adjacency(info, ch_type): :func:`mne.stats.combine_adjacency` to prepare a final "adjacency" to pass to the eventual function. """ + from ..io.kit.constants import KIT_NEIGHBORS + if ch_type is None: picks = channel_indices_by_type(info) if sum([len(p) != 0 for p in picks.values()]) != 1: - raise ValueError('info must contain only one channel type if ' - 'ch_type is None.') + raise ValueError( + "info must contain only one channel type if ch_type is None." + ) ch_type = channel_type(info, 0) else: - _check_option('ch_type', ch_type, ['mag', 'grad', 'eeg']) - (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, - has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, - has_eeg_coils_and_meg, has_eeg_coils_only, - has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info) + _check_option("ch_type", ch_type, ["mag", "grad", "eeg"]) + ( + has_vv_mag, + has_vv_grad, + is_old_vv, + has_4D_mag, + ctf_other_types, + has_CTF_grad, + n_kit_grads, + has_any_meg, + has_eeg_coils, + has_eeg_coils_and_meg, + has_eeg_coils_only, + has_neuromag_122_grad, + has_csd_coils, + ) = _get_ch_info(info) conn_name = None - if has_vv_mag and ch_type == 'mag': - conn_name = 'neuromag306mag' - elif has_vv_grad and ch_type == 'grad': - conn_name = 'neuromag306planar' + if has_vv_mag and ch_type == "mag": + conn_name = "neuromag306mag" + elif has_vv_grad and ch_type == "grad": + conn_name = "neuromag306planar" elif has_4D_mag: - if 'MEG 248' in info['ch_names']: - idx = info['ch_names'].index('MEG 248') - grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD - mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG - if ch_type == 'grad' and grad: - conn_name = 'bti248grad' - elif ch_type == 'mag' and mag: - conn_name = 'bti248' - elif 'MEG 148' in info['ch_names'] and ch_type == 'mag': - idx = info['ch_names'].index('MEG 148') - if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG: - conn_name = 'bti148' - elif has_CTF_grad and ch_type == 'mag': - if info['nchan'] < 100: - conn_name = 'ctf64' - elif info['nchan'] > 200: - conn_name = 'ctf275' + if "MEG 248" in info["ch_names"]: + idx = info["ch_names"].index("MEG 248") + grad = info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_MAGNES_GRAD + mag = info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_MAGNES_MAG + if ch_type == "grad" and grad: + conn_name = "bti248grad" + elif ch_type == "mag" and mag: + conn_name = "bti248" + elif "MEG 148" in info["ch_names"] and ch_type == "mag": + idx = info["ch_names"].index("MEG 148") + if info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_MAGNES_MAG: + conn_name = "bti148" + elif has_CTF_grad and ch_type == "mag": + if info["nchan"] < 100: + conn_name = "ctf64" + elif info["nchan"] > 200: + conn_name = "ctf275" else: - conn_name = 'ctf151' + conn_name = "ctf151" elif n_kit_grads > 0: - from ..io.kit.constants import KIT_NEIGHBORS - conn_name = KIT_NEIGHBORS.get(info['kit_system_id']) + conn_name = KIT_NEIGHBORS.get(info["kit_system_id"]) if conn_name is not None: - logger.info(f'Reading adjacency matrix for {conn_name}.') - return read_ch_adjacency(conn_name) - logger.info('Could not find a adjacency matrix for the data. ' - 'Computing adjacency based on Delaunay triangulations.') + logger.info(f"Reading adjacency matrix for {conn_name}.") + adjacency, ch_names = read_ch_adjacency(conn_name) + if conn_name.startswith("neuromag") and info["ch_names"][0].startswith("MEG "): + ch_names = [ch_name.replace("MEG", "MEG ") for ch_name in ch_names] + return adjacency, ch_names + logger.info( + "Could not find a adjacency matrix for the data. " + "Computing adjacency based on Delaunay triangulations." + ) return _compute_ch_adjacency(info, ch_type) @@ -1674,34 +1765,35 @@ def _compute_ch_adjacency(info, ch_type): %(info_not_none)s ch_type : str The channel type for computing the adjacency matrix. Currently - supports 'mag', 'grad' and 'eeg'. + supports ``'mag'``, ``'grad'`` and ``'eeg'``. Returns ------- - ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) + ch_adjacency : scipy.sparse.csr_array, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. """ - from scipy import sparse - from scipy.spatial import Delaunay - from .. import spatial_tris_adjacency from ..channels.layout import _find_topomap_coords, _pair_grad_sensors - combine_grads = (ch_type == 'grad' - and any([coil_type in [ch['coil_type'] - for ch in info['chs']] - for coil_type in - [FIFF.FIFFV_COIL_VV_PLANAR_T1, - FIFF.FIFFV_COIL_NM_122]])) + from ..source_estimate import spatial_tris_adjacency + + combine_grads = ch_type == "grad" and any( + [ + coil_type in [ch["coil_type"] for ch in info["chs"]] + for coil_type in [FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFFV_COIL_NM_122] + ] + ) picks = dict(_picks_by_type(info, exclude=[]))[ch_type] - ch_names = [info['ch_names'][pick] for pick in picks] + ch_names = [info["ch_names"][pick] for pick in picks] if combine_grads: pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[]) if len(pairs) != len(picks): - raise RuntimeError('Cannot find a pair for some of the ' - 'gradiometers. Cannot compute adjacency ' - 'matrix.') + raise RuntimeError( + "Cannot find a pair for some of the " + "gradiometers. Cannot compute adjacency " + "matrix." + ) # only for one of the pair xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT) else: @@ -1716,9 +1808,9 @@ def _compute_ch_adjacency(info, ch_type): for jj in range(2): ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair - ch_adjacency = sparse.csr_matrix(ch_adjacency) + ch_adjacency = csr_array(ch_adjacency) else: - ch_adjacency = sparse.lil_matrix(neighbors) + ch_adjacency = lil_array(neighbors) ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0])) ch_adjacency = ch_adjacency.tocsr() @@ -1757,28 +1849,25 @@ def fix_mag_coil_types(info, use_cal=False): Therefore the use of ``fix_mag_coil_types`` is not mandatory. """ old_mag_inds = _get_T1T2_mag_inds(info, use_cal) - + n_mag = len(pick_types(info, meg="mag", exclude=[])) for ii in old_mag_inds: - info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3 - logger.info('%d of %d magnetometer types replaced with T3.' % - (len(old_mag_inds), - len(pick_types(info, meg='mag', exclude=[])))) + info["chs"][ii]["coil_type"] = FIFF.FIFFV_COIL_VV_MAG_T3 + logger.info(f"{len(old_mag_inds)} of {n_mag} magnetometer types replaced with T3.") info._check_consistency() def _get_T1T2_mag_inds(info, use_cal=False): """Find T1/T2 magnetometer coil types.""" - picks = pick_types(info, meg='mag', exclude=[]) + picks = pick_types(info, meg="mag", exclude=[]) old_mag_inds = [] # From email exchanges, systems with the larger T2 coil only use the cal # value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10 # (Triux). So we can use a simple check for > 3e-11. for ii in picks: - ch = info['chs'][ii] - if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1, - FIFF.FIFFV_COIL_VV_MAG_T2): + ch = info["chs"][ii] + if ch["coil_type"] in (FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2): if use_cal: - if ch['cal'] > 3e-11: + if ch["cal"] > 3e-11: old_mag_inds.append(ii) else: old_mag_inds.append(ii) @@ -1787,80 +1876,114 @@ def _get_T1T2_mag_inds(info, use_cal=False): def _get_ch_info(info): """Get channel info for inferring acquisition device.""" - chs = info['chs'] + chs = info["chs"] # Only take first 16 bits, as higher bits store CTF comp order - coil_types = {ch['coil_type'] & 0xFFFF for ch in chs} - channel_types = {ch['kind'] for ch in chs} - - has_vv_mag = any(k in coil_types for k in - [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2, - FIFF.FIFFV_COIL_VV_MAG_T3]) - has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1, - FIFF.FIFFV_COIL_VV_PLANAR_T2, - FIFF.FIFFV_COIL_VV_PLANAR_T3]) - has_neuromag_122_grad = any(k in coil_types - for k in [FIFF.FIFFV_COIL_NM_122]) - - is_old_vv = ' ' in chs[0]['ch_name'] + coil_types = {ch["coil_type"] & 0xFFFF for ch in chs} + channel_types = {ch["kind"] for ch in chs} + + has_vv_mag = any( + k in coil_types + for k in [ + FIFF.FIFFV_COIL_VV_MAG_T1, + FIFF.FIFFV_COIL_VV_MAG_T2, + FIFF.FIFFV_COIL_VV_MAG_T3, + ] + ) + has_vv_grad = any( + k in coil_types + for k in [ + FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_VV_PLANAR_T2, + FIFF.FIFFV_COIL_VV_PLANAR_T3, + ] + ) + has_neuromag_122_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_NM_122]) + + is_old_vv = " " in chs[0]["ch_name"] has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types - ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG, - FIFF.FIFFV_COIL_CTF_REF_GRAD, - FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD) - has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or - (FIFF.FIFFV_MEG_CH in channel_types and - any(k in ctf_other_types for k in coil_types))) + ctf_other_types = ( + FIFF.FIFFV_COIL_CTF_REF_MAG, + FIFF.FIFFV_COIL_CTF_REF_GRAD, + FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, + ) + has_CTF_grad = FIFF.FIFFV_COIL_CTF_GRAD in coil_types or ( + FIFF.FIFFV_MEG_CH in channel_types + and any(k in ctf_other_types for k in coil_types) + ) # hack due to MNE-C bug in IO of CTF # only take first 16 bits, as higher bits store CTF comp order - n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD - for ch in chs) - - has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad, - n_kit_grads]) - has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and - FIFF.FIFFV_EEG_CH in channel_types) + n_kit_grads = sum( + ch["coil_type"] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD for ch in chs + ) + + has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad, n_kit_grads]) + has_eeg_coils = ( + FIFF.FIFFV_COIL_EEG in coil_types and FIFF.FIFFV_EEG_CH in channel_types + ) has_eeg_coils_and_meg = has_eeg_coils and has_any_meg has_eeg_coils_only = has_eeg_coils and not has_any_meg - has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and - FIFF.FIFFV_EEG_CH in channel_types) - - return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, - has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, - has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad, - has_csd_coils) + has_csd_coils = ( + FIFF.FIFFV_COIL_EEG_CSD in coil_types and FIFF.FIFFV_EEG_CH in channel_types + ) + + return ( + has_vv_mag, + has_vv_grad, + is_old_vv, + has_4D_mag, + ctf_other_types, + has_CTF_grad, + n_kit_grads, + has_any_meg, + has_eeg_coils, + has_eeg_coils_and_meg, + has_eeg_coils_only, + has_neuromag_122_grad, + has_csd_coils, + ) @fill_doc -def make_1020_channel_selections(info, midline="z"): - """Return dict mapping from ROI names to lists of picks for 10/20 setups. - - This passes through all channel names, and uses a simple heuristic to - separate channel names into three Region of Interest-based selections: - Left, Midline and Right. The heuristic is that channels ending on any of - the characters in ``midline`` are filed under that heading, otherwise those - ending in odd numbers under "Left", those in even numbers under "Right". - Other channels are ignored. This is appropriate for 10/20 files, but not - for other channel naming conventions. - If an info object is provided, lists are sorted from posterior to anterior. +def make_1020_channel_selections(info, midline="z", *, return_ch_names=False): + """Map hemisphere names to corresponding EEG channel names or indices. + + This function uses a simple heuristic to separate channel names into three + Region of Interest-based selections: ``Left``, ``Midline`` and ``Right``. + + The heuristic is that any of the channel names ending + with odd numbers are filed under ``Left``; those ending with even numbers + are filed under ``Right``; and those ending with the character(s) specified + in ``midline`` are filed under ``Midline``. Other channels are ignored. + + This is appropriate for 10/20, 10/10, 10/05, …, sensor arrangements, but + not for other naming conventions. Parameters ---------- - %(info_not_none)s If possible, the channel lists will be sorted - posterior-to-anterior; otherwise they default to the order specified in - ``info["ch_names"]``. + %(info_not_none)s If channel locations are present, the channel lists will + be sorted from posterior to anterior; otherwise, the order specified in + ``info["ch_names"]`` will be kept. midline : str Names ending in any of these characters are stored under the - ``Midline`` key. Defaults to 'z'. Note that capitalization is ignored. + ``Midline`` key. Defaults to ``'z'``. Capitalization is ignored. + return_ch_names : bool + Whether to return channel names instead of channel indices. + + .. versionadded:: 1.4.0 Returns ------- selections : dict - A dictionary mapping from ROI names to lists of picks (integers). + A dictionary mapping from region of interest name to a list of channel + indices (if ``return_ch_names=False``) or to a list of channel names + (if ``return_ch_names=True``). """ _validate_type(info, "info") try: from .layout import find_layout + layout = find_layout(info) pos = layout.pos ch_names = layout.names @@ -1882,15 +2005,30 @@ def make_1020_channel_selections(info, midline="z"): if pos is not None: # sort channels from front to center # (y-coordinate of the position info in the layout) - selections = {selection: np.array(picks)[pos[picks, 1].argsort()] - for selection, picks in selections.items()} + selections = { + selection: np.array(picks)[pos[picks, 1].argsort()] + for selection, picks in selections.items() + } + + # convert channel indices to names if requested + if return_ch_names: + for selection, ch_indices in selections.items(): + selections[selection] = [info.ch_names[idx] for idx in ch_indices] return selections @verbose -def combine_channels(inst, groups, method='mean', keep_stim=False, - drop_bad=False, verbose=None): +def combine_channels( + inst, + groups, + method="mean", + keep_stim=False, + drop_bad=False, + *, + on_missing="raise", + verbose=None, +): """Combine channels based on specified channel grouping. Parameters @@ -1928,6 +2066,8 @@ def combine_channels(inst, groups, method='mean', keep_stim=False, drop_bad : bool If ``True``, drop channels marked as bad before combining. Defaults to ``False``. + %(on_missing_epochs)s + .. versionadded:: 1.11.0 %(verbose)s Returns @@ -1936,113 +2076,145 @@ def combine_channels(inst, groups, method='mean', keep_stim=False, An MNE-Python object of the same type as the input ``inst``, containing one virtual channel for each group in ``groups`` (and, if ``keep_stim`` is ``True``, also containing stimulus channels). + + See Also + -------- + mne.channels.equalize_channels + mne.channels.rename_channels + mne.channels.unify_bad_channels """ + from ..epochs import BaseEpochs, EpochsArray + from ..evoked import Evoked, EvokedArray from ..io import BaseRaw, RawArray - from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray ch_axis = 1 if isinstance(inst, BaseEpochs) else 0 - ch_idx = list(range(inst.info['nchan'])) - ch_names = inst.info['ch_names'] + ch_idx = list(range(inst.info["nchan"])) + ch_names = inst.info["ch_names"] ch_types = inst.get_channel_types() - inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data() + kwargs = dict() + if isinstance(inst, BaseEpochs): + kwargs["copy"] = False + inst_data = inst.get_data(**kwargs) groups = OrderedDict(deepcopy(groups)) # Convert string values of ``method`` into callables # XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py if isinstance(method, str): - method_dict = {key: partial(getattr(np, key), axis=ch_axis) - for key in ('mean', 'median', 'std')} + method_dict = { + key: partial(getattr(np, key), axis=ch_axis) + for key in ("mean", "median", "std") + } try: method = method_dict[method] except KeyError: - raise ValueError('"method" must be a callable, or one of "mean", ' - f'"median", or "std"; got "{method}".') + raise ValueError( + '"method" must be a callable, or one of "mean", ' + f'"median", or "std"; got "{method}".' + ) # Instantiate channel info and data new_ch_names, new_ch_types, new_data = [], [], [] if not isinstance(keep_stim, bool): - raise TypeError('"keep_stim" must be of type bool, not ' - f'{type(keep_stim)}.') + raise TypeError(f'"keep_stim" must be of type bool, not {type(keep_stim)}.') if keep_stim: stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True)) if stim_ch_idx: new_ch_names = [ch_names[idx] for idx in stim_ch_idx] new_ch_types = [ch_types[idx] for idx in stim_ch_idx] - new_data = [np.take(inst_data, idx, axis=ch_axis) - for idx in stim_ch_idx] + new_data = [np.take(inst_data, idx, axis=ch_axis) for idx in stim_ch_idx] else: - warn('Could not find stimulus channels.') + warn("Could not find stimulus channels.") # Get indices of bad channels ch_idx_bad = [] if not isinstance(drop_bad, bool): - raise TypeError('"drop_bad" must be of type bool, not ' - f'{type(drop_bad)}.') - if drop_bad and inst.info['bads']: - ch_idx_bad = pick_channels(ch_names, inst.info['bads']) + raise TypeError(f'"drop_bad" must be of type bool, not {type(drop_bad)}.') + if drop_bad and inst.info["bads"]: + ch_idx_bad = pick_channels(ch_names, inst.info["bads"]) # Check correctness of combinations for this_group, this_picks in groups.items(): # Check if channel indices are out of bounds if not all(idx in ch_idx for idx in this_picks): - raise ValueError('Some channel indices are out of bounds.') + raise ValueError("Some channel indices are out of bounds.") # Check if heterogeneous sensor type combinations this_ch_type = np.array(ch_types)[this_picks] if len(set(this_ch_type)) > 1: - types = ', '.join(set(this_ch_type)) - raise ValueError('Cannot combine sensors of different types; ' - f'"{this_group}" contains types {types}.') + types = ", ".join(set(this_ch_type)) + raise ValueError( + "Cannot combine sensors of different types; " + f'"{this_group}" contains types {types}.' + ) # Remove bad channels these_bads = [idx for idx in this_picks if idx in ch_idx_bad] this_picks = [idx for idx in this_picks if idx not in ch_idx_bad] if these_bads: - logger.info('Dropped the following channels in group ' - f'{this_group}: {these_bads}') + logger.info( + f"Dropped the following channels in group {this_group}: {these_bads}" + ) # Check if combining less than 2 channel if len(set(this_picks)) < 2: - warn(f'Less than 2 channels in group "{this_group}" when ' - f'combining by method "{method}".') + warn( + f'Less than 2 channels in group "{this_group}" when ' + f'combining by method "{method}".' + ) # If all good create more detailed dict without bad channels groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0]) # Combine channels and add them to the new instance for this_group, this_group_dict in groups.items(): new_ch_names.append(this_group) - new_ch_types.append(this_group_dict['ch_type']) - this_picks = this_group_dict['picks'] + new_ch_types.append(this_group_dict["ch_type"]) + this_picks = this_group_dict["picks"] this_data = np.take(inst_data, this_picks, axis=ch_axis) new_data.append(method(this_data)) new_data = np.swapaxes(new_data, 0, ch_axis) - info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names, - ch_types=new_ch_types) + info = create_info( + sfreq=inst.info["sfreq"], ch_names=new_ch_names, ch_types=new_ch_types + ) # create new instances and make sure to copy important attributes if isinstance(inst, BaseRaw): combined_inst = RawArray(new_data, info, first_samp=inst.first_samp) elif isinstance(inst, BaseEpochs): - combined_inst = EpochsArray(new_data, info, events=inst.events, - tmin=inst.times[0], baseline=inst.baseline) + combined_inst = EpochsArray( + new_data, + info, + events=inst.events, + event_id=inst.event_id, + tmin=inst.times[0], + baseline=inst.baseline, + on_missing=on_missing, + ) if inst.metadata is not None: combined_inst.metadata = inst.metadata.copy() elif isinstance(inst, Evoked): - combined_inst = EvokedArray(new_data, info, tmin=inst.times[0], - baseline=inst.baseline) + combined_inst = EvokedArray( + new_data, info, tmin=inst.times[0], baseline=inst.baseline + ) return combined_inst # NeuroMag channel groupings -_SELECTIONS = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal', - 'Right-parietal', 'Left-occipital', 'Right-occipital', - 'Left-frontal', 'Right-frontal'] -_EEG_SELECTIONS = ['EEG 1-32', 'EEG 33-64', 'EEG 65-96', 'EEG 97-128'] +_SELECTIONS = [ + "Vertex", + "Left-temporal", + "Right-temporal", + "Left-parietal", + "Right-parietal", + "Left-occipital", + "Right-occipital", + "Left-frontal", + "Right-frontal", +] +_EEG_SELECTIONS = ["EEG 1-32", "EEG 33-64", "EEG 65-96", "EEG 97-128"] def _divide_to_regions(info, add_stim=True): """Divide channels to regions by positions.""" - from scipy.stats import zscore picks = _pick_data_channels(info, exclude=[]) chs_in_lobe = len(picks) // 4 - pos = np.array([ch['loc'][:3] for ch in info['chs']]) + pos = np.array([ch["loc"][:3] for ch in info["chs"]]) x, y, z = pos.T frontal = picks[np.argsort(y[picks])[-chs_in_lobe:]] @@ -2062,14 +2234,14 @@ def _divide_to_regions(info, add_stim=True): # Because of the way the sides are divided, there may be outliers in the # temporal lobes. Here we switch the sides for these outliers. For other # lobes it is not a big problem because of the vicinity of the lobes. - with np.errstate(invalid='ignore'): # invalid division, greater compare + with np.errstate(invalid="ignore"): # invalid division, greater compare zs = np.abs(zscore(x[rt])) - outliers = np.array(rt)[np.where(zs > 2.)[0]] + outliers = np.array(rt)[np.where(zs > 2.0)[0]] rt = list(np.setdiff1d(rt, outliers)) - with np.errstate(invalid='ignore'): # invalid division, greater compare + with np.errstate(invalid="ignore"): # invalid division, greater compare zs = np.abs(zscore(x[lt])) - outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.)[0]])) + outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.0)[0]])) lt = list(np.setdiff1d(lt, outliers)) l_mean = np.mean(x[lt]) @@ -2084,11 +2256,19 @@ def _divide_to_regions(info, add_stim=True): stim_ch = _get_stim_channel(None, info, raise_error=False) if len(stim_ch) > 0: for region in [lf, rf, lo, ro, lp, rp, lt, rt]: - region.append(info['ch_names'].index(stim_ch[0])) - return OrderedDict([('Left-frontal', lf), ('Right-frontal', rf), - ('Left-parietal', lp), ('Right-parietal', rp), - ('Left-occipital', lo), ('Right-occipital', ro), - ('Left-temporal', lt), ('Right-temporal', rt)]) + region.append(info["ch_names"].index(stim_ch[0])) + return OrderedDict( + [ + ("Left-frontal", lf), + ("Right-frontal", rf), + ("Left-parietal", lp), + ("Right-parietal", rp), + ("Left-occipital", lo), + ("Right-occipital", ro), + ("Left-temporal", lt), + ("Right-temporal", rt), + ] + ) def _divide_side(lobe, x): @@ -2133,57 +2313,55 @@ def read_vectorview_selection(name, fname=None, info=None, verbose=None): List with channel names in the selection. """ # convert name to list of string - if not isinstance(name, (list, tuple)): + if not isinstance(name, list | tuple): name = [name] if isinstance(info, Info): picks = pick_types(info, meg=True, exclude=()) - if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]: - spacing = 'new' + if len(picks) > 0 and " " not in info["ch_names"][picks[0]]: + spacing = "new" else: - spacing = 'old' + spacing = "old" elif info is not None: - raise TypeError('info must be an instance of Info or None, not %s' - % (type(info),)) + raise TypeError(f"info must be an instance of Info or None, not {type(info)}") else: # info is None - spacing = 'old' + spacing = "old" # use built-in selections by default if fname is None: - fname = op.join(op.dirname(__file__), '..', 'data', 'mne_analyze.sel') + fname = op.join(op.dirname(__file__), "..", "data", "mne_analyze.sel") fname = str(_check_fname(fname, must_exist=True, overwrite="read")) # use this to make sure we find at least one match for each name name_found = {n: False for n in name} - with open(fname, 'r') as fid: + with open(fname) as fid: sel = [] for line in fid: line = line.strip() # skip blank lines and comments - if len(line) == 0 or line[0] == '#': + if len(line) == 0 or line[0] == "#": continue # get the name of the selection in the file - pos = line.find(':') + pos = line.find(":") if pos < 0: - logger.info('":" delimiter not found in selections file, ' - 'skipping line') + logger.info('":" delimiter not found in selections file, skipping line') continue sel_name_file = line[:pos] # search for substring match with name provided for n in name: if sel_name_file.find(n) >= 0: - sel.extend(line[pos + 1:].split('|')) + sel.extend(line[pos + 1 :].split("|")) name_found[n] = True break # make sure we found at least one match for each name for n, found in name_found.items(): if not found: - raise ValueError('No match for selection name "%s" found' % n) + raise ValueError(f'No match for selection name "{n}" found') # make the selection a sorted list with unique elements sel = list(set(sel)) sel.sort() - if spacing == 'new': # "new" or "old" by now, "old" is default - sel = [s.replace('MEG ', 'MEG') for s in sel] + if spacing == "new": # "new" or "old" by now, "old" is default + sel = [s.replace("MEG ", "MEG") for s in sel] return sel diff --git a/mne/channels/data/montages/easycap-M43.txt b/mne/channels/data/montages/easycap-M43.txt new file mode 100644 index 00000000000..47bbad785ec --- /dev/null +++ b/mne/channels/data/montages/easycap-M43.txt @@ -0,0 +1,65 @@ +Site Theta Phi +1 23 90 +2 23 30 +3 23 -30 +4 23 -90 +5 -23 30 +6 -23 -30 +7 46 74 +8 46 41 +9 46 8 +10 46 -25 +11 46 -57 +12 46 -90 +13 -46 57 +14 -46 25 +15 -46 -8 +16 -46 -41 +17 -46 -74 +18 69 76 +19 69 49 +20 69 21 +21 69 -7 +22 69 -35 +23 69 -62 +24 69 -90 +25 -69 62 +26 -69 35 +27 -69 7 +28 -69 -21 +29 -69 -49 +30 -69 -76 +31 92 90 +32 92 62 +33 92 34 +34 92 6 +35 92 -21 +36 92 -49 +37 92 -76 +38 -92 76 +39 -92 49 +40 -92 21 +41 -92 -6 +42 -92 -34 +43 -92 -62 +44 115 35 +45 115 10 +46 115 -15 +47 115 -40 +48 115 -65 +49 115 -90 +50 -115 65 +51 -115 40 +52 -115 15 +53 -115 -10 +54 -115 -35 +55 138 23 +56 138 -15 +57 138 -40 +58 138 -65 +59 138 -90 +60 -138 65 +61 -138 40 +62 -138 15 +63 -138 -23 +Ref 0 0 diff --git a/mne/channels/data/neighbors/__init__.py b/mne/channels/data/neighbors/__init__.py index b49a56bb334..a07b13441ad 100644 --- a/mne/channels/data/neighbors/__init__.py +++ b/mne/channels/data/neighbors/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Neighbor definitions for clustering permutation analysis.""" # This is a selection of files from http://fieldtrip.fcdonders.nl/template # Additional definitions can be obtained through the FieldTrip software. diff --git a/mne/channels/interpolation.py b/mne/channels/interpolation.py index d8c0a2be78a..c0a70a7d6ed 100644 --- a/mne/channels/interpolation.py +++ b/mne/channels/interpolation.py @@ -1,16 +1,17 @@ -# Authors: Denis Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np from numpy.polynomial.legendre import legval +from scipy.interpolate import RectBivariateSpline +from scipy.linalg import pinv +from scipy.spatial.distance import pdist, squareform -from ..utils import logger, warn, verbose -from ..io.meas_info import _simplify_info -from ..io.pick import pick_types, pick_channels, pick_info +from .._fiff.meas_info import _simplify_info +from .._fiff.pick import pick_channels, pick_info, pick_types from ..surface import _normalize_vectors -from ..forward import _map_meg_or_eeg_channels -from ..utils import _check_option, _validate_type +from ..utils import _validate_type, logger, verbose, warn def _calc_h(cosang, stiffness=4, n_legendre_terms=50): @@ -26,9 +27,10 @@ def _calc_h(cosang, stiffness=4, n_legendre_terms=50): n_legendre_terms : int number of Legendre terms to evaluate. """ - factors = [(2 * n + 1) / - (n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi) - for n in range(1, n_legendre_terms + 1)] + factors = [ + (2 * n + 1) / (n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi) + for n in range(1, n_legendre_terms + 1) + ] return legval(cosang, [0] + factors) @@ -50,9 +52,10 @@ def _calc_g(cosang, stiffness=4, n_legendre_terms=50): G : np.ndrarray of float, shape(n_channels, n_channels) The G matrix. """ - factors = [(2 * n + 1) / (n ** stiffness * (n + 1) ** stiffness * - 4 * np.pi) - for n in range(1, n_legendre_terms + 1)] + factors = [ + (2 * n + 1) / (n**stiffness * (n + 1) ** stiffness * 4 * np.pi) + for n in range(1, n_legendre_terms + 1) + ] return legval(cosang, [0] + factors) @@ -82,7 +85,6 @@ def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): Spherical splines for scalp potential and current density mapping. Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7. """ - from scipy import linalg pos_from = pos_from.copy() pos_to = pos_to.copy() n_from = pos_from.shape[0] @@ -101,11 +103,15 @@ def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): assert G_to_from.shape == (n_to, n_from) if alpha is not None: - G_from.flat[::len(G_from) + 1] += alpha + G_from.flat[:: len(G_from) + 1] += alpha - C = np.vstack([np.hstack([G_from, np.ones((n_from, 1))]), - np.hstack([np.ones((1, n_from)), [[0]]])]) - C_inv = linalg.pinv(C) + C = np.vstack( + [ + np.hstack([G_from, np.ones((n_from, 1))]), + np.hstack([np.ones((1, n_from)), [[0]]]), + ] + ) + C_inv = pinv(C) interpolation = np.hstack([G_to_from, np.ones((n_to, 1))]) @ C_inv[:, :-1] assert interpolation.shape == (n_to, n_from) @@ -114,24 +120,26 @@ def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): def _do_interp_dots(inst, interpolation, goods_idx, bads_idx): """Dot product of channel mapping matrix to channel data.""" - from ..io.base import BaseRaw from ..epochs import BaseEpochs from ..evoked import Evoked - _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), 'inst') + from ..io import BaseRaw + + _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "inst") inst._data[..., bads_idx, :] = np.matmul( - interpolation, inst._data[..., goods_idx, :]) + interpolation, inst._data[..., goods_idx, :] + ) @verbose -def _interpolate_bads_eeg(inst, origin, exclude=None, verbose=None): +def _interpolate_bads_eeg(inst, origin, exclude=None, ecog=False, verbose=None): if exclude is None: exclude = list() bads_idx = np.zeros(len(inst.ch_names), dtype=bool) goods_idx = np.zeros(len(inst.ch_names), dtype=bool) - picks = pick_types(inst.info, meg=False, eeg=True, exclude=exclude) + picks = pick_types(inst.info, meg=False, eeg=not ecog, ecog=ecog, exclude=exclude) inst.info._check_consistency() - bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks] + bads_idx[picks] = [inst.ch_names[ch] in inst.info["bads"] for ch in picks] if len(picks) == 0 or bads_idx.sum() == 0: return @@ -148,30 +156,71 @@ def _interpolate_bads_eeg(inst, origin, exclude=None, verbose=None): # test spherical fit distance = np.linalg.norm(pos - origin, axis=-1) distance = np.mean(distance / np.mean(distance)) - if np.abs(1. - distance) > 0.1: - warn('Your spherical fit is poor, interpolation results are ' - 'likely to be inaccurate.') + if np.abs(1.0 - distance) > 0.1: + warn( + "Your spherical fit is poor, interpolation results are " + "likely to be inaccurate." + ) pos_good = pos[goods_idx_pos] - origin pos_bad = pos[bads_idx_pos] - origin - logger.info('Computing interpolation matrix from {} sensor ' - 'positions'.format(len(pos_good))) + logger.info(f"Computing interpolation matrix from {len(pos_good)} sensor positions") interpolation = _make_interpolation_matrix(pos_good, pos_bad) - logger.info('Interpolating {} sensors'.format(len(pos_bad))) + logger.info(f"Interpolating {len(pos_bad)} sensors") _do_interp_dots(inst, interpolation, goods_idx, bads_idx) -def _interpolate_bads_meg(inst, mode='accurate', origin=(0., 0., 0.04), - verbose=None, ref_meg=False): +@verbose +def _interpolate_bads_ecog(inst, *, origin, exclude=None, verbose=None): + _interpolate_bads_eeg(inst, origin, exclude=exclude, ecog=True, verbose=verbose) + + +def _interpolate_bads_meg( + inst, mode="accurate", *, origin, verbose=None, ref_meg=False +): return _interpolate_bads_meeg( - inst, mode, origin, ref_meg=ref_meg, eeg=False, verbose=verbose) + inst, mode, ref_meg=ref_meg, eeg=False, origin=origin, verbose=verbose + ) + + +@verbose +def _interpolate_bads_nan( + inst, + *, + ch_type, + ref_meg=False, + exclude=(), + verbose=None, +): + info = _simplify_info(inst.info) + picks_type = pick_types(info, ref_meg=ref_meg, exclude=exclude, **{ch_type: True}) + use_ch_names = [inst.info["ch_names"][p] for p in picks_type] + bads_type = [ch for ch in inst.info["bads"] if ch in use_ch_names] + if len(bads_type) == 0 or len(picks_type) == 0: + return + # select the bad channels to be interpolated + picks_bad = pick_channels(inst.info["ch_names"], bads_type, exclude=[]) + inst._data[..., picks_bad, :] = np.nan @verbose -def _interpolate_bads_meeg(inst, mode='accurate', origin=(0., 0., 0.04), - meg=True, eeg=True, ref_meg=False, - exclude=(), verbose=None): +def _interpolate_bads_meeg( + inst, + mode="accurate", + *, + meg=True, + eeg=True, + ref_meg=False, + exclude=(), + origin, + method=None, + verbose=None, +): + from ..forward import _map_meg_or_eeg_channels + + if method is None: + method = {"meg": "MNE", "eeg": "MNE"} bools = dict(meg=meg, eeg=eeg) info = _simplify_info(inst.info) for ch_type, do in bools.items(): @@ -180,31 +229,30 @@ def _interpolate_bads_meeg(inst, mode='accurate', origin=(0., 0., 0.04), kw = dict(meg=False, eeg=False) kw[ch_type] = True picks_type = pick_types(info, ref_meg=ref_meg, exclude=exclude, **kw) - picks_good = pick_types(info, ref_meg=ref_meg, exclude='bads', **kw) - use_ch_names = [inst.info['ch_names'][p] for p in picks_type] - bads_type = [ch for ch in inst.info['bads'] if ch in use_ch_names] + picks_good = pick_types(info, ref_meg=ref_meg, exclude="bads", **kw) + use_ch_names = [inst.info["ch_names"][p] for p in picks_type] + bads_type = [ch for ch in inst.info["bads"] if ch in use_ch_names] if len(bads_type) == 0 or len(picks_type) == 0: continue # select the bad channels to be interpolated - picks_bad = pick_channels(inst.info['ch_names'], bads_type, - exclude=[]) - if ch_type == 'eeg': + picks_bad = pick_channels(inst.info["ch_names"], bads_type, exclude=[]) + + # do MNE based interpolation + if ch_type == "eeg": picks_to = picks_type - bad_sel = np.in1d(picks_type, picks_bad) + bad_sel = np.isin(picks_type, picks_bad) else: picks_to = picks_bad bad_sel = slice(None) info_from = pick_info(inst.info, picks_good) info_to = pick_info(inst.info, picks_to) - mapping = _map_meg_or_eeg_channels( - info_from, info_to, mode=mode, origin=origin) + mapping = _map_meg_or_eeg_channels(info_from, info_to, mode=mode, origin=origin) mapping = mapping[bad_sel] _do_interp_dots(inst, mapping, picks_good, picks_bad) @verbose -def _interpolate_bads_nirs(inst, method='nearest', exclude=(), verbose=None): - from scipy.spatial.distance import pdist, squareform +def _interpolate_bads_nirs(inst, exclude=(), verbose=None): from mne.preprocessing.nirs import _validate_nirs_info if len(pick_types(inst.info, fnirs=True, exclude=())) == 0: @@ -212,34 +260,158 @@ def _interpolate_bads_nirs(inst, method='nearest', exclude=(), verbose=None): # Returns pick of all nirs and ensures channels are correctly ordered picks_nirs = _validate_nirs_info(inst.info) - nirs_ch_names = [inst.info['ch_names'][p] for p in picks_nirs] + nirs_ch_names = [inst.info["ch_names"][p] for p in picks_nirs] nirs_ch_names = [ch for ch in nirs_ch_names if ch not in exclude] - bads_nirs = [ch for ch in inst.info['bads'] if ch in nirs_ch_names] + bads_nirs = [ch for ch in inst.info["bads"] if ch in nirs_ch_names] if len(bads_nirs) == 0: return - picks_bad = pick_channels(inst.info['ch_names'], bads_nirs, exclude=[]) + picks_bad = pick_channels(inst.info["ch_names"], bads_nirs, exclude=[]) bads_mask = [p in picks_bad for p in picks_nirs] - chs = [inst.info['chs'][i] for i in picks_nirs] - locs3d = np.array([ch['loc'][:3] for ch in chs]) + chs = [inst.info["chs"][i] for i in picks_nirs] + locs3d = np.array([ch["loc"][:3] for ch in chs]) - _check_option('fnirs_method', method, ['nearest']) + dist = pdist(locs3d) + dist = squareform(dist) - if method == 'nearest': + for bad in picks_bad: + dists_to_bad = dist[bad] + # Ignore distances to self + dists_to_bad[dists_to_bad == 0] = np.inf + # Ignore distances to other bad channels + dists_to_bad[bads_mask] = np.inf + # Find closest remaining channels for same frequency + closest_idx = np.argmin(dists_to_bad) + (bad % 2) + inst._data[bad] = inst._data[closest_idx] - dist = pdist(locs3d) - dist = squareform(dist) + # TODO: this seems like a bug because it does not respect reset_bads + inst.info["bads"] = [ch for ch in inst.info["bads"] if ch in exclude] - for bad in picks_bad: - dists_to_bad = dist[bad] - # Ignore distances to self - dists_to_bad[dists_to_bad == 0] = np.inf - # Ignore distances to other bad channels - dists_to_bad[bads_mask] = np.inf - # Find closest remaining channels for same frequency - closest_idx = np.argmin(dists_to_bad) + (bad % 2) - inst._data[bad] = inst._data[closest_idx] + return inst - inst.info['bads'] = [ch for ch in inst.info['bads'] if ch in exclude] - return inst +def _find_seeg_electrode_shaft(pos, tol_shaft=0.002, tol_spacing=1): + # 1) find nearest neighbor to define the electrode shaft line + # 2) find all contacts on the same line + # 3) remove contacts with large distances + + dist = squareform(pdist(pos)) + np.fill_diagonal(dist, np.inf) + + shafts = list() + shaft_ts = list() + for i, n1 in enumerate(pos): + if any([i in shaft for shaft in shafts]): + continue + n2 = pos[np.argmin(dist[i])] # 1 + # https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html + shaft_dists = np.linalg.norm( + np.cross((pos - n1), (pos - n2)), axis=1 + ) / np.linalg.norm(n2 - n1) + shaft = np.where(shaft_dists < tol_shaft)[0] # 2 + shaft_prev = None + for _ in range(10): # avoid potential cycles + if np.array_equal(shaft, shaft_prev): + break + shaft_prev = shaft + # compute median shaft line + v = np.median( + [ + pos[i] - pos[j] + for idx, i in enumerate(shaft) + for j in shaft[idx + 1 :] + ], + axis=0, + ) + c = np.median(pos[shaft], axis=0) + # recompute distances + shaft_dists = np.linalg.norm( + np.cross((pos - c), (pos - c + v)), axis=1 + ) / np.linalg.norm(v) + shaft = np.where(shaft_dists < tol_shaft)[0] + ts = np.array([np.dot(c - n0, v) / np.linalg.norm(v) ** 2 for n0 in pos[shaft]]) + shaft_order = np.argsort(ts) + shaft = shaft[shaft_order] + ts = ts[shaft_order] + + # only include the largest group with spacing with the error tolerance + # avoid interpolating across spans between contacts + t_diffs = np.diff(ts) + t_diff_med = np.median(t_diffs) + spacing_errors = (t_diffs - t_diff_med) / t_diff_med + groups = list() + group = [shaft[0]] + for j in range(len(shaft) - 1): + if spacing_errors[j] > tol_spacing: + groups.append(group) + group = [shaft[j + 1]] + else: + group.append(shaft[j + 1]) + groups.append(group) + group = [group for group in groups if i in group][0] + ts = ts[np.isin(shaft, group)] + shaft = np.array(group, dtype=int) + + shafts.append(shaft) + shaft_ts.append(ts) + return shafts, shaft_ts + + +@verbose +def _interpolate_bads_seeg( + inst, exclude=None, tol_shaft=0.002, tol_spacing=1, verbose=None +): + if exclude is None: + exclude = list() + picks = pick_types(inst.info, meg=False, seeg=True, exclude=exclude) + inst.info._check_consistency() + bads_idx = np.isin(np.array(inst.ch_names)[picks], inst.info["bads"]) + + if len(picks) == 0 or bads_idx.sum() == 0: + return + + pos = inst._get_channel_positions(picks) + + # Make sure only sEEG are used + bads_idx_pos = bads_idx[picks] + + shafts, shaft_ts = _find_seeg_electrode_shaft( + pos, tol_shaft=tol_shaft, tol_spacing=tol_spacing + ) + + # interpolate the bad contacts + picks_bad = list(np.where(bads_idx_pos)[0]) + for shaft, ts in zip(shafts, shaft_ts): + bads_shaft = np.array([idx for idx in picks_bad if idx in shaft]) + if bads_shaft.size == 0: + continue + goods_shaft = shaft[np.isin(shaft, bads_shaft, invert=True)] + if goods_shaft.size < 4: # cubic spline requires 3 channels + msg = "No shaft" if shaft.size < 4 else "Not enough good channels" + no_shaft_chs = " and ".join(np.array(inst.ch_names)[bads_shaft]) + raise RuntimeError( + f"{msg} found in a line with {no_shaft_chs} " + "at least 3 good channels on the same line " + f"are required for interpolation, {goods_shaft.size} found. " + f"Dropping {no_shaft_chs} is recommended." + ) + logger.debug( + f"Interpolating {np.array(inst.ch_names)[bads_shaft]} using " + f"data from {np.array(inst.ch_names)[goods_shaft]}" + ) + bads_shaft_idx = np.where(np.isin(shaft, bads_shaft))[0] + goods_shaft_idx = np.where(~np.isin(shaft, bads_shaft))[0] + + z = inst._data[..., goods_shaft, :] + is_epochs = z.ndim == 3 + if is_epochs: + z = z.swapaxes(0, 1) + z = z.reshape(z.shape[0], -1) + y = np.arange(z.shape[-1]) + out = RectBivariateSpline(x=ts[goods_shaft_idx], y=y, z=z)( + x=ts[bads_shaft_idx], y=y + ) + if is_epochs: + out = out.reshape(bads_shaft.size, inst._data.shape[0], -1) + out = out.swapaxes(0, 1) + inst._data[..., bads_shaft, :] = out diff --git a/mne/channels/layout.py b/mne/channels/layout.py index 87149f458a4..20ef3809207 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -1,31 +1,37 @@ -# Authors: Alexandre Gramfort -# Denis Engemann -# Martin Luessi -# Eric Larson -# Marijn van Vliet -# Jona Sassenhagen -# Teon Brooks -# Robert Luke -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import logging from collections import defaultdict +from copy import deepcopy from itertools import combinations from pathlib import Path import numpy as np - -from ..transforms import _pol_to_cart, _cart_to_sph -from ..io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT -from ..io.constants import FIFF -from ..io.meas_info import Info -from ..utils import (_clean_names, warn, _check_ch_locs, fill_doc, - _check_fname, _check_option, _check_sphere, logger) +from scipy.spatial.distance import pdist, squareform + +from .._fiff.constants import FIFF +from .._fiff.meas_info import Info +from .._fiff.pick import _FNIRS_CH_TYPES_SPLIT, _picks_to_idx, pick_types +from ..transforms import _cart_to_sph, _pol_to_cart +from ..utils import ( + _check_ch_locs, + _check_fname, + _check_option, + _check_sphere, + _clean_names, + _ensure_int, + fill_doc, + logger, + verbose, + warn, +) +from ..viz.topomap import plot_layout from .channels import _get_ch_info -class Layout(object): +class Layout: """Sensor layouts. Layouts are typically loaded from a file using @@ -39,28 +45,44 @@ class Layout(object): pos : array, shape=(n_channels, 4) The unit-normalized positions of the channels in 2d (x, y, width, height). - names : list + names : list of str The channel names. - ids : list + ids : array-like of int The channel ids. kind : str The type of Layout (e.g. 'Vectorview-all'). """ - def __init__(self, box, pos, names, ids, kind): # noqa: D102 + def __init__(self, box, pos, names, ids, kind): self.box = box self.pos = pos self.names = names - self.ids = ids + self.ids = np.array(ids) + if self.ids.ndim != 1: + raise ValueError("The channel indices should be a 1D array-like.") self.kind = kind + def copy(self): + """Return a copy of the layout. + + Returns + ------- + layout : instance of Layout + A deepcopy of the layout. + + Notes + ----- + .. versionadded:: 1.7 + """ + return deepcopy(self) + def save(self, fname, overwrite=False): """Save Layout to disk. Parameters ---------- fname : path-like - The file name (e.g. ``'my_layout.lout'``). + The file name (must end with either ``.lout`` or ``.lay``). overwrite : bool If True, overwrites the destination file if it exists. @@ -74,26 +96,28 @@ def save(self, fname, overwrite=False): height = self.pos[:, 3] fname = _check_fname(fname, overwrite=overwrite, name=fname) if fname.suffix == ".lout": - out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box + out_str = "{:8.2f} {:8.2f} {:8.2f} {:8.2f}\n".format(*self.box) elif fname.suffix == ".lay": - out_str = '' + out_str = "" else: - raise ValueError('Unknown layout type. Should be of type ' - '.lout or .lay.') + raise ValueError("Unknown layout type. Should be of type .lout or .lay.") for ii in range(x.shape[0]): - out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' - % (self.ids[ii], x[ii], y[ii], - width[ii], height[ii], self.names[ii])) + out_str += ( + f"{self.ids[ii]:03d} {x[ii]:8.2f} {y[ii]:8.2f} " + f"{width[ii]:8.2f} {height[ii]:8.2f} {self.names[ii]}\n" + ) - f = open(fname, 'w') + f = open(fname, "w") f.write(out_str) f.close() def __repr__(self): """Return the string representation.""" - return '' % (self.kind, - ', '.join(self.names[:3])) + return "".format( + self.kind, + ", ".join(self.names[:3]), + ) @fill_doc def plot(self, picks=None, show_axes=False, show=True): @@ -116,9 +140,121 @@ def plot(self, picks=None, show_axes=False, show=True): ----- .. versionadded:: 0.12.0 """ - from ..viz.topomap import plot_layout return plot_layout(self, picks=picks, show_axes=show_axes, show=show) + @verbose + def pick(self, picks=None, exclude=(), *, verbose=None): + """Pick a subset of channels. + + Parameters + ---------- + %(picks_layout)s + exclude : str | int | array-like of str or int + Set of channels to exclude, only used when ``picks`` is set to ``'all'`` or + ``None``. Exclude will not drop channels explicitly provided in ``picks``. + %(verbose)s + + Returns + ------- + layout : instance of Layout + The modified layout. + + Notes + ----- + .. versionadded:: 1.7 + """ + # TODO: all the picking functions operates on an 'info' object which is missing + # for a layout, thus we have to do the extra work here. The logic below can be + # replaced when https://github.com/mne-tools/mne-python/issues/11913 is solved. + if (isinstance(picks, str) and picks == "all") or (picks is None): + picks = deepcopy(self.names) + apply_exclude = True + elif isinstance(picks, str): + picks = [picks] + apply_exclude = False + elif isinstance(picks, slice): + try: + picks = np.arange(len(self.names))[picks] + except TypeError: + raise TypeError( + "If a slice is provided, it must be a slice of integers." + ) + apply_exclude = False + else: + try: + picks = [_ensure_int(picks)] + except TypeError: + picks = ( + list(picks) if isinstance(picks, tuple | set) else deepcopy(picks) + ) + apply_exclude = False + if apply_exclude: + if isinstance(exclude, str): + exclude = [exclude] + else: + try: + exclude = [_ensure_int(exclude)] + except TypeError: + exclude = ( + list(exclude) + if isinstance(exclude, tuple | set) + else deepcopy(exclude) + ) + for var, var_name in ((picks, "picks"), (exclude, "exclude")): + if var_name == "exclude" and not apply_exclude: + continue + if not isinstance(var, list | tuple | set | np.ndarray): + raise TypeError( + f"'{var_name}' must be a list, tuple, set or ndarray. " + f"Got {type(var)} instead." + ) + if isinstance(var, np.ndarray) and var.ndim != 1: + raise ValueError( + f"'{var_name}' must be a 1D array-like. Got {var.ndim}D instead." + ) + for k, elt in enumerate(var): + if isinstance(elt, str) and elt in self.names: + var[k] = self.names.index(elt) + continue + elif isinstance(elt, str): + raise ValueError( + f"The channel name {elt} provided in {var_name} does not match " + "any channels from the layout." + ) + try: + var[k] = _ensure_int(elt) + except TypeError: + raise TypeError( + f"All elements in '{var_name}' must be integers or strings." + ) + if not (0 <= var[k] < len(self.names)): + raise ValueError( + f"The value {elt} provided in {var_name} does not match any " + f"channels from the layout. The layout has {len(self.names)} " + "channels." + ) + if len(var) != len(set(var)): + warn( + f"The provided '{var_name}' has duplicates which will be ignored.", + RuntimeWarning, + ) + picks = picks.astype(int) if isinstance(picks, np.ndarray) else picks + exclude = exclude.astype(int) if isinstance(exclude, np.ndarray) else exclude + if apply_exclude: + picks = np.array(list(set(picks) - set(exclude)), dtype=int) + if len(picks) == 0: + raise RuntimeError( + "The channel selection yielded no remaining channels. Please edit " + "the arguments 'picks' and 'exclude' to include at least one " + "channel." + ) + else: + picks = np.array(list(set(picks)), dtype=int) + self.pos = self.pos[picks] + self.ids = self.ids[picks] + self.names = [self.names[k] for k in picks] + return self + def _read_lout(fname): """Aux function.""" @@ -130,7 +266,7 @@ def _read_lout(fname): splits = line.split() if len(splits) == 7: cid, x, y, dx, dy, chkind, nb = splits - name = chkind + ' ' + nb + name = chkind + " " + nb else: cid, x, y, dx, dy, name = splits pos.append(np.array([x, y, dx, dy], dtype=np.float64)) @@ -151,7 +287,7 @@ def _read_lay(fname): splits = line.split() if len(splits) == 7: cid, x, y, dx, dy, chkind, nb = splits - name = chkind + ' ' + nb + name = chkind + " " + nb else: cid, x, y, dx, dy, name = splits pos.append(np.array([x, y, dx, dy], dtype=np.float64)) @@ -163,33 +299,18 @@ def _read_lay(fname): return box, pos, names, ids -def read_layout(fname=None, path="", scale=True, *, kind=None): +def read_layout(fname=None, *, scale=True): """Read layout from a file. Parameters ---------- fname : path-like | str Either the path to a ``.lout`` or ``.lay`` file or the name of a - built-in layout. c.f. Notes for a list of the available built-in + built-in layout. See Notes for a list of the available built-in layouts. - path : path-like | None - The path of the folder containing the Layout file. Defaults to the - ``mne/channels/data/layouts`` folder inside your mne-python - installation. - - .. deprecated:: v1.4 - The ``kind`` and ``path`` parameters will be removed in version - 1.5. Please use the ``fname`` parameter instead. scale : bool Apply useful scaling for out the box plotting using ``layout.pos``. Defaults to True. - kind : str | None - The name of the ``.lout`` file (e.g. ``kind='Vectorview-all'`` for - ``'Vectorview-all.lout'``). - - .. deprecated:: v1.4 - The ``kind`` and ``path`` parameters will be removed in version - 1.5. Please use the ``fname`` parameter instead. Returns ------- @@ -253,55 +374,19 @@ def read_layout(fname=None, path="", scale=True, *, kind=None): """ readers = {".lout": _read_lout, ".lay": _read_lay} - if fname is None: # deprecated in 1.4 - warn( - "Argument 'kind' and 'path' are deprecated in favor of 'fname'.", - DeprecationWarning, - ) - if path == "" or path is None: - path = Path(__file__).parent / "data" / "layouts" - # kind should be the name as a string, but let's consider the case - # where the path to the file is provided instead. - kind = Path(kind) - if ( - len(kind.suffix) == 0 - and (path / kind.with_suffix(".lout")).exists() - ): - kind = kind.with_suffix(".lout") - elif ( - len(kind.suffix) == 0 - and (path / kind.with_suffix(".lay")).exists() - ): - kind = kind.with_suffix(".lay") - - fname = kind if kind.exists() else path / kind.name - if fname.suffix not in (".lout", ".lay"): - raise ValueError( - "Unknown layout type. Should be of type .lout or .lay." - ) - kind = fname.stem - else: - # to be removed along the deprecated argument - if kind is not None or path != "": - warn( - "Argument 'kind' and 'path' are deprecated in favor of " - "'fname' and should not be provided alongside 'fname'.", - DeprecationWarning, - ) - if isinstance(fname, str): - # is it a built-in layout? - directory = Path(__file__).parent / "data" / "layouts" - if (directory / fname).exists(): - fname = directory / fname - elif (directory / fname).with_suffix(".lout").exists(): - fname = (directory / fname).with_suffix(".lout") - elif (directory / fname).with_suffix(".lay").exists(): - fname = (directory / fname).with_suffix(".lay") - # if not, it must be a valid path provided as str or Path - fname = _check_fname(fname, "read", must_exist=True, name="layout") - # and it must have a valid extension - _check_option("fname extension", fname.suffix, readers) - kind = fname.stem + if isinstance(fname, str): + # is it a built-in layout? + directory = Path(__file__).parent / "data" / "layouts" + for suffix in ("", ".lout", ".lay"): + _fname = (directory / fname).with_suffix(suffix) + if _fname.exists(): + fname = _fname + break + # if not, it must be a valid path provided as str or Path + fname = _check_fname(fname, "read", must_exist=True, name="layout") + # and it must have a valid extension + _check_option("fname extension", fname.suffix, readers) + kind = fname.stem box, pos, names, ids = readers[fname.suffix](fname) if scale: @@ -317,9 +402,10 @@ def read_layout(fname=None, path="", scale=True, *, kind=None): @fill_doc -def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads', - csd=False): - """Create .lout file from EEG electrode digitization. +def make_eeg_layout( + info, radius=0.5, width=None, height=None, exclude="bads", csd=False +): + """Make a Layout object based on EEG electrode digitization. Parameters ---------- @@ -348,18 +434,18 @@ def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads', make_grid_layout, generate_2d_layout """ if not (0 <= radius <= 0.5): - raise ValueError('The radius parameter should be between 0 and 0.5.') + raise ValueError("The radius parameter should be between 0 and 0.5.") if width is not None and not (0 <= width <= 1.0): - raise ValueError('The width parameter should be between 0 and 1.') + raise ValueError("The width parameter should be between 0 and 1.") if height is not None and not (0 <= height <= 1.0): - raise ValueError('The height parameter should be between 0 and 1.') + raise ValueError("The height parameter should be between 0 and 1.") pick_kwargs = dict(meg=False, eeg=True, ref_meg=False, exclude=exclude) if csd: pick_kwargs.update(csd=True, eeg=False) picks = pick_types(info, **pick_kwargs) loc2d = _find_topomap_coords(info, picks) - names = [info['chs'][i]['ch_name'] for i in picks] + names = [info["chs"][i]["ch_name"] for i in picks] # Scale [x, y] to be in the range [-0.5, 0.5] # Don't mess with the origin or aspect ratio @@ -376,7 +462,7 @@ def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads', # Some subplot centers will be at the figure edge. Shrink everything so it # fits in the figure. - scaling = min(1 / (1. + width), 1 / (1. + height)) + scaling = min(1 / (1.0 + width), 1 / (1.0 + height)) loc2d *= scaling width *= scaling height *= scaling @@ -385,20 +471,24 @@ def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads', loc2d += 0.5 n_channels = loc2d.shape[0] - pos = np.c_[loc2d[:, 0] - 0.5 * width, - loc2d[:, 1] - 0.5 * height, - width * np.ones(n_channels), - height * np.ones(n_channels)] + pos = np.c_[ + loc2d[:, 0] - 0.5 * width, + loc2d[:, 1] - 0.5 * height, + width * np.ones(n_channels), + height * np.ones(n_channels), + ] box = (0, 1, 0, 1) ids = 1 + np.arange(n_channels) - layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids) + layout = Layout(box=box, pos=pos, names=names, kind="EEG", ids=ids) return layout @fill_doc def make_grid_layout(info, picks=None, n_col=None): - """Generate .lout file for custom data, i.e., ICA sources. + """Make a grid Layout object. + + This can be helpful to plot custom data such as ICA sources. Parameters ---------- @@ -416,12 +506,12 @@ def make_grid_layout(info, picks=None, n_col=None): -------- make_eeg_layout, generate_2d_layout """ - picks = _picks_to_idx(info, picks, 'misc') + picks = _picks_to_idx(info, picks, "misc") - names = [info['chs'][k]['ch_name'] for k in picks] + names = [info["chs"][k]["ch_name"] for k in picks] if not names: - raise ValueError('No misc data channels found.') + raise ValueError("No misc data channels found.") ids = list(range(len(picks))) size = len(picks) @@ -439,16 +529,15 @@ def make_grid_layout(info, picks=None, n_col=None): n_row = int(np.ceil(size / float(n_col))) # setup position grid - x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col), - np.linspace(-0.5, 0.5, n_row)) + x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col), np.linspace(-0.5, 0.5, n_row)) x, y = x.ravel()[:size], y.ravel()[:size] width, height = _box_size(np.c_[x, y], padding=0.1) # Some axes will be at the figure edge. Shrink everything so it fits in the # figure. Add 0.01 border around everything border_x, border_y = (0.01, 0.01) - x_scaling = 1 / (1. + width + border_x) - y_scaling = 1 / (1. + height + border_y) + x_scaling = 1 / (1.0 + width + border_x) + y_scaling = 1 / (1.0 + height + border_y) x = x * x_scaling y = y * y_scaling width *= x_scaling @@ -459,16 +548,17 @@ def make_grid_layout(info, picks=None, n_col=None): y += 0.5 # calculate pos - pos = np.c_[x - 0.5 * width, y - 0.5 * height, - width * np.ones(size), height * np.ones(size)] + pos = np.c_[ + x - 0.5 * width, y - 0.5 * height, width * np.ones(size), height * np.ones(size) + ] box = (0, 1, 0, 1) - layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids) + layout = Layout(box=box, pos=pos, names=names, kind="grid-misc", ids=ids) return layout @fill_doc -def find_layout(info, ch_type=None, exclude='bads'): +def find_layout(info, ch_type=None, exclude="bads"): """Choose a layout based on the channels in the info 'chs' field. Parameters @@ -488,57 +578,70 @@ def find_layout(info, ch_type=None, exclude='bads'): layout : Layout instance | None None if layout not found. """ - _check_option('ch_type', ch_type, [None, 'mag', 'grad', 'meg', 'eeg', - 'csd']) - - (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, - has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, - has_eeg_coils_and_meg, has_eeg_coils_only, - has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info) + _check_option("ch_type", ch_type, [None, "mag", "grad", "meg", "eeg", "csd"]) + + ( + has_vv_mag, + has_vv_grad, + is_old_vv, + has_4D_mag, + ctf_other_types, + has_CTF_grad, + n_kit_grads, + has_any_meg, + has_eeg_coils, + has_eeg_coils_and_meg, + has_eeg_coils_only, + has_neuromag_122_grad, + has_csd_coils, + ) = _get_ch_info(info) has_vv_meg = has_vv_mag and has_vv_grad has_vv_only_mag = has_vv_mag and not has_vv_grad has_vv_only_grad = has_vv_grad and not has_vv_mag if ch_type == "meg" and not has_any_meg: - raise RuntimeError('No MEG channels present. Cannot find MEG layout.') + raise RuntimeError("No MEG channels present. Cannot find MEG layout.") if ch_type == "eeg" and not has_eeg_coils: - raise RuntimeError('No EEG channels present. Cannot find EEG layout.') + raise RuntimeError("No EEG channels present. Cannot find EEG layout.") layout_name = None - if ((has_vv_meg and ch_type is None) or - (any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')): - layout_name = 'Vectorview-all' - elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'): - layout_name = 'Vectorview-mag' - elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'): - if info['ch_names'][0].endswith('X'): - layout_name = 'Vectorview-grad_norm' + if (has_vv_meg and ch_type is None) or ( + any([has_vv_mag, has_vv_grad]) and ch_type == "meg" + ): + layout_name = "Vectorview-all" + elif has_vv_only_mag or (has_vv_meg and ch_type == "mag"): + layout_name = "Vectorview-mag" + elif has_vv_only_grad or (has_vv_meg and ch_type == "grad"): + if info["ch_names"][0].endswith("X"): + layout_name = "Vectorview-grad_norm" else: - layout_name = 'Vectorview-grad' + layout_name = "Vectorview-grad" elif has_neuromag_122_grad: - layout_name = 'Neuromag_122' - elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or - (has_eeg_coils_and_meg and ch_type == 'eeg')): - if not isinstance(info, (dict, Info)): - raise RuntimeError('Cannot make EEG layout, no measurement info ' - 'was passed to `find_layout`') + layout_name = "Neuromag_122" + elif (has_eeg_coils_only and ch_type in [None, "eeg"]) or ( + has_eeg_coils_and_meg and ch_type == "eeg" + ): + if not isinstance(info, dict | Info): + raise RuntimeError( + "Cannot make EEG layout, no measurement info " + "was passed to `find_layout`" + ) return make_eeg_layout(info, exclude=exclude) - elif has_csd_coils and ch_type in [None, 'csd']: + elif has_csd_coils and ch_type in [None, "csd"]: return make_eeg_layout(info, exclude=exclude, csd=True) elif has_4D_mag: - layout_name = 'magnesWH3600' + layout_name = "magnesWH3600" elif has_CTF_grad: - layout_name = 'CTF-275' + layout_name = "CTF-275" elif n_kit_grads > 0: layout_name = _find_kit_layout(info, n_kit_grads) # If no known layout is found, fall back on automatic layout if layout_name is None: - picks = _picks_to_idx(info, 'data', exclude=(), with_ref_meg=False) - ch_names = [info['ch_names'][pick] for pick in picks] + picks = _picks_to_idx(info, "data", exclude=(), with_ref_meg=False) + ch_names = [info["ch_names"][pick] for pick in picks] xy = _find_topomap_coords(info, picks=picks, ignore_overlap=True) - return generate_2d_layout(xy, ch_names=ch_names, name='custom', - normalize=True) + return generate_2d_layout(xy, ch_names=ch_names, name="custom", normalize=True) layout = read_layout(fname=layout_name) if not is_old_vv: @@ -547,12 +650,12 @@ def find_layout(info, ch_type=None, exclude='bads'): layout.names = _clean_names(layout.names, before_dash=True) # Apply mask for excluded channels. - if exclude == 'bads': - exclude = info['bads'] + if exclude == "bads": + exclude = info["bads"] idx = [ii for ii, name in enumerate(layout.names) if name not in exclude] layout.names = [layout.names[ii] for ii in idx] layout.pos = layout.pos[idx] - layout.ids = [layout.ids[ii] for ii in idx] + layout.ids = layout.ids[idx] return layout @@ -572,34 +675,69 @@ def _find_kit_layout(info, n_grads): kit_layout : str | None String naming the detected KIT layout or ``None`` if layout is missing. """ - if info['kit_system_id'] is not None: + from ..io.kit.constants import KIT_LAYOUT + + if info["kit_system_id"] is not None: # avoid circular import - from ..io.kit.constants import KIT_LAYOUT - return KIT_LAYOUT.get(info['kit_system_id']) + return KIT_LAYOUT.get(info["kit_system_id"]) elif n_grads == 160: - return 'KIT-160' + return "KIT-160" elif n_grads == 125: - return 'KIT-125' + return "KIT-125" elif n_grads > 157: - return 'KIT-AD' + return "KIT-AD" # channels which are on the left hemisphere for NY and right for UMD - test_chs = ('MEG 13', 'MEG 14', 'MEG 15', 'MEG 16', 'MEG 25', - 'MEG 26', 'MEG 27', 'MEG 28', 'MEG 29', 'MEG 30', - 'MEG 31', 'MEG 32', 'MEG 57', 'MEG 60', 'MEG 61', - 'MEG 62', 'MEG 63', 'MEG 64', 'MEG 73', 'MEG 90', - 'MEG 93', 'MEG 95', 'MEG 96', 'MEG 105', 'MEG 112', - 'MEG 120', 'MEG 121', 'MEG 122', 'MEG 123', 'MEG 124', - 'MEG 125', 'MEG 126', 'MEG 142', 'MEG 144', 'MEG 153', - 'MEG 154', 'MEG 155', 'MEG 156') - x = [ch['loc'][0] < 0 for ch in info['chs'] if ch['ch_name'] in test_chs] + test_chs = ( + "MEG 13", + "MEG 14", + "MEG 15", + "MEG 16", + "MEG 25", + "MEG 26", + "MEG 27", + "MEG 28", + "MEG 29", + "MEG 30", + "MEG 31", + "MEG 32", + "MEG 57", + "MEG 60", + "MEG 61", + "MEG 62", + "MEG 63", + "MEG 64", + "MEG 73", + "MEG 90", + "MEG 93", + "MEG 95", + "MEG 96", + "MEG 105", + "MEG 112", + "MEG 120", + "MEG 121", + "MEG 122", + "MEG 123", + "MEG 124", + "MEG 125", + "MEG 126", + "MEG 142", + "MEG 144", + "MEG 153", + "MEG 154", + "MEG 155", + "MEG 156", + ) + x = [ch["loc"][0] < 0 for ch in info["chs"] if ch["ch_name"] in test_chs] if np.all(x): - return 'KIT-157' # KIT-NY + return "KIT-157" # KIT-NY elif np.all(np.invert(x)): - raise NotImplementedError("Guessing sensor layout for legacy UMD " - "files is not implemented. Please convert " - "your files using MNE-Python 0.13 or " - "higher.") + raise NotImplementedError( + "Guessing sensor layout for legacy UMD " + "files is not implemented. Please convert " + "your files using MNE-Python 0.13 or " + "higher." + ) else: raise RuntimeError("KIT system could not be determined for data") @@ -629,7 +767,6 @@ def _box_size(points, width=None, height=None, padding=0.0): height : float Height of the box """ - from scipy.spatial.distance import pdist def xdiff(a, b): return np.abs(a[0] - b[0]) @@ -660,8 +797,7 @@ def ydiff(a, b): if height is None: # Find all axes that could potentially overlap horizontally. hdist = pdist(points, xdiff) - candidates = [all_combinations[i] for i, d in enumerate(hdist) - if d < width] + candidates = [all_combinations[i] for i, d in enumerate(hdist) if d < width] if len(candidates) == 0: # No axes overlap, take all the height you want. @@ -674,8 +810,7 @@ def ydiff(a, b): elif width is None: # Find all axes that could potentially overlap vertically. vdist = pdist(points, ydiff) - candidates = [all_combinations[i] for i, d in enumerate(vdist) - if d < height] + candidates = [all_combinations[i] for i, d in enumerate(vdist) if d < height] if len(candidates) == 0: # No axes overlap, take all the width you want. @@ -693,8 +828,9 @@ def ydiff(a, b): @fill_doc -def _find_topomap_coords(info, picks, layout=None, ignore_overlap=False, - to_sphere=True, sphere=None): +def _find_topomap_coords( + info, picks, layout=None, ignore_overlap=False, to_sphere=True, sphere=None +): """Guess the E/MEG layout and return appropriate topomap coordinates. Parameters @@ -714,16 +850,20 @@ def _find_topomap_coords(info, picks, layout=None, ignore_overlap=False, coords : array, shape = (n_chs, 2) 2 dimensional coordinates for each sensor for a topomap plot. """ - picks = _picks_to_idx(info, picks, 'all', exclude=(), allow_empty=False) + picks = _picks_to_idx(info, picks, "all", exclude=(), allow_empty=False) if layout is not None: - chs = [info['chs'][i] for i in picks] - pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs] + chs = [info["chs"][i] for i in picks] + pos = [layout.pos[layout.names.index(ch["ch_name"])] for ch in chs] pos = np.asarray(pos) else: pos = _auto_topomap_coords( - info, picks, ignore_overlap=ignore_overlap, to_sphere=to_sphere, - sphere=sphere) + info, + picks, + ignore_overlap=ignore_overlap, + to_sphere=to_sphere, + sphere=sphere, + ) return pos @@ -755,51 +895,62 @@ def _auto_topomap_coords(info, picks, ignore_overlap, to_sphere, sphere): locs : array, shape = (n_sensors, 2) An array of positions of the 2 dimensional map. """ - from scipy.spatial.distance import pdist, squareform sphere = _check_sphere(sphere, info) - logger.debug(f'Generating coords using: {sphere}') + logger.debug(f"Generating coords using: {sphere}") - picks = _picks_to_idx(info, picks, 'all', exclude=(), allow_empty=False) - chs = [info['chs'][i] for i in picks] + picks = _picks_to_idx(info, picks, "all", exclude=(), allow_empty=False) + chs = [info["chs"][i] for i in picks] # Use channel locations if available - locs3d = np.array([ch['loc'][:3] for ch in chs]) + locs3d = np.array([ch["loc"][:3] for ch in chs]) - # If electrode locations are not available, use digization points + # If electrode locations are not available, use digitization points if not _check_ch_locs(info=info, picks=picks): - logging.warning('Did not find any electrode locations (in the info ' - 'object), will attempt to use digitization points ' - 'instead. However, if digitization points do not ' - 'correspond to the EEG electrodes, this will lead to ' - 'bad results. Please verify that the sensor locations ' - 'in the plot are accurate.') + logging.warning( + "Did not find any electrode locations (in the info " + "object), will attempt to use digitization points " + "instead. However, if digitization points do not " + "correspond to the EEG electrodes, this will lead to " + "bad results. Please verify that the sensor locations " + "in the plot are accurate." + ) # MEG/EOG/ECG sensors don't have digitization points; all requested # channels must be EEG for ch in chs: - if ch['kind'] != FIFF.FIFFV_EEG_CH: - raise ValueError("Cannot determine location of MEG/EOG/ECG " - "channels using digitization points.") - - eeg_ch_names = [ch['ch_name'] for ch in info['chs'] - if ch['kind'] == FIFF.FIFFV_EEG_CH] + if ch["kind"] != FIFF.FIFFV_EEG_CH: + raise ValueError( + "Cannot determine location of MEG/EOG/ECG " + "channels using digitization points." + ) + + eeg_ch_names = [ + ch["ch_name"] for ch in info["chs"] if ch["kind"] == FIFF.FIFFV_EEG_CH + ] # Get EEG digitization points - if info['dig'] is None or len(info['dig']) == 0: - raise RuntimeError('No digitization points found.') - - locs3d = np.array([point['r'] for point in info['dig'] - if point['kind'] == FIFF.FIFFV_POINT_EEG]) + if info["dig"] is None or len(info["dig"]) == 0: + raise RuntimeError("No digitization points found.") + + locs3d = np.array( + [ + point["r"] + for point in info["dig"] + if point["kind"] == FIFF.FIFFV_POINT_EEG + ] + ) if len(locs3d) == 0: - raise RuntimeError('Did not find any digitization points of ' - 'kind FIFFV_POINT_EEG (%d) in the info.' - % FIFF.FIFFV_POINT_EEG) + raise RuntimeError( + "Did not find any digitization points of " + f"kind {FIFF.FIFFV_POINT_EEG} in the info." + ) if len(locs3d) != len(eeg_ch_names): - raise ValueError("Number of EEG digitization points (%d) " - "doesn't match the number of EEG channels " - "(%d)" % (len(locs3d), len(eeg_ch_names))) + raise ValueError( + f"Number of EEG digitization points ({len(locs3d)}) doesn't match the " + f"number of EEG channels ({len(eeg_ch_names)})" + ) # We no longer center digitization points on head origin, as we work # in head coordinates always @@ -807,22 +958,24 @@ def _auto_topomap_coords(info, picks, ignore_overlap, to_sphere, sphere): # Match the digitization points with the requested # channels. eeg_ch_locs = dict(zip(eeg_ch_names, locs3d)) - locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs]) + locs3d = np.array([eeg_ch_locs[ch["ch_name"]] for ch in chs]) # Sometimes we can get nans - locs3d[~np.isfinite(locs3d)] = 0. + locs3d[~np.isfinite(locs3d)] = 0.0 # Duplicate points cause all kinds of trouble during visualization dist = pdist(locs3d) if len(locs3d) > 1 and np.min(dist) < 1e-10 and not ignore_overlap: problematic_electrodes = [ - chs[elec_i]['ch_name'] + chs[elec_i]["ch_name"] for elec_i in squareform(dist < 1e-10).any(axis=0).nonzero()[0] ] - raise ValueError('The following electrodes have overlapping positions,' - ' which causes problems during visualization:\n' + - ', '.join(problematic_electrodes)) + raise ValueError( + "The following electrodes have overlapping positions," + " which causes problems during visualization:\n" + + ", ".join(problematic_electrodes) + ) if to_sphere: # translate to sphere origin, transform/flatten Z, translate back @@ -831,7 +984,7 @@ def _auto_topomap_coords(info, picks, ignore_overlap, to_sphere, sphere): cart_coords = _cart_to_sph(locs3d) out = _pol_to_cart(cart_coords[:, 1:][:, ::-1]) # scale from radians to mm - out *= cart_coords[:, [0]] / (np.pi / 2.) + out *= cart_coords[:, [0]] / (np.pi / 2.0) out += sphere[:2] else: out = _pol_to_cart(_cart_to_sph(locs3d)) @@ -862,18 +1015,19 @@ def _topo_to_sphere(pos, eegs): xs += 0.5 - np.mean(xs[eegs]) # Center the points ys += 0.5 - np.mean(ys[eegs]) - xs = xs * 2. - 1. # Values ranging from -1 to 1 - ys = ys * 2. - 1. + xs = xs * 2.0 - 1.0 # Values ranging from -1 to 1 + ys = ys * 2.0 - 1.0 - rs = np.clip(np.sqrt(xs ** 2 + ys ** 2), 0., 1.) + rs = np.clip(np.sqrt(xs**2 + ys**2), 0.0, 1.0) alphas = np.arccos(rs) zs = np.sin(alphas) return np.column_stack([xs, ys, zs]) @fill_doc -def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads', - raise_error=True): +def _pair_grad_sensors( + info, layout=None, topomap_coords=True, exclude="bads", raise_error=True +): """Find the picks for pairing grad channels. Parameters @@ -901,18 +1055,18 @@ def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads', """ # find all complete pairs of grad channels pairs = defaultdict(list) - grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude) + grad_picks = pick_types(info, meg="grad", ref_meg=False, exclude=exclude) _, has_vv_grad, *_, has_neuromag_122_grad, _ = _get_ch_info(info) for i in grad_picks: - ch = info['chs'][i] - name = ch['ch_name'] - if has_vv_grad and name.startswith('MEG'): - if name.endswith(('2', '3')): + ch = info["chs"][i] + name = ch["ch_name"] + if has_vv_grad and name.startswith("MEG"): + if name.endswith(("2", "3")): key = name[-4:-1] pairs[key].append(ch) - if has_neuromag_122_grad and name.startswith('MEG'): + if has_neuromag_122_grad and name.startswith("MEG"): key = (int(name[-3:]) - 1) // 2 pairs[key].append(ch) @@ -926,74 +1080,18 @@ def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads', # find the picks corresponding to the grad channels grad_chs = sum(pairs, []) - ch_names = info['ch_names'] - picks = [ch_names.index(c['ch_name']) for c in grad_chs] + ch_names = info["ch_names"] + picks = [ch_names.index(c["ch_name"]) for c in grad_chs] if topomap_coords: shape = (len(pairs), 2, -1) - coords = (_find_topomap_coords(info, picks, layout) - .reshape(shape).mean(axis=1)) + coords = _find_topomap_coords(info, picks, layout).reshape(shape).mean(axis=1) return picks, coords else: return picks -# this function is used to pair grad when info is not present -# it is the case of Projection that don't have the info. -def _pair_grad_sensors_ch_names_vectorview(ch_names): - """Find the indices for pairing grad channels in a Vectorview system. - - Parameters - ---------- - ch_names : list of str - A list of channel names. - - Returns - ------- - indexes : list of int - Indices of the grad channels, ordered in pairs. - """ - pairs = defaultdict(list) - for i, name in enumerate(ch_names): - if name.startswith('MEG'): - if name.endswith(('2', '3')): - key = name[-4:-1] - pairs[key].append(i) - - pairs = [p for p in pairs.values() if len(p) == 2] - - grad_chs = sum(pairs, []) - return grad_chs - - -# this function is used to pair grad when info is not present -# it is the case of Projection that don't have the info. -def _pair_grad_sensors_ch_names_neuromag122(ch_names): - """Find the indices for pairing grad channels in a Neuromag 122 system. - - Parameters - ---------- - ch_names : list of str - A list of channel names. - - Returns - ------- - indexes : list of int - Indices of the grad channels, ordered in pairs. - """ - pairs = defaultdict(list) - for i, name in enumerate(ch_names): - if name.startswith('MEG'): - key = (int(name[-3:]) - 1) // 2 - pairs[key].append(i) - - pairs = [p for p in pairs.values() if len(p) == 2] - - grad_chs = sum(pairs, []) - return grad_chs - - -def _merge_ch_data(data, ch_type, names, method='rms'): +def _merge_ch_data(data, ch_type, names, method="rms", *, modality="opm"): """Merge data from channel pairs. Parameters @@ -1006,6 +1104,8 @@ def _merge_ch_data(data, ch_type, names, method='rms'): List of channel names. method : str Can be 'rms' or 'mean'. + modality : str + The modality of the data, either 'grad', 'fnirs', or 'opm' Returns ------- @@ -1014,15 +1114,19 @@ def _merge_ch_data(data, ch_type, names, method='rms'): names : list List of channel names. """ - if ch_type == 'grad': + if ch_type == "grad": data = _merge_grad_data(data, method) - else: - assert ch_type in _FNIRS_CH_TYPES_SPLIT + elif modality == "fnirs" or ch_type in _FNIRS_CH_TYPES_SPLIT: data, names = _merge_nirs_data(data, names) + elif modality == "opm" and ch_type == "mag": + data, names = _merge_opm_data(data, names) + else: + raise ValueError(f"Unknown modality {modality} for channel type {ch_type}") + return data, names -def _merge_grad_data(data, method='rms'): +def _merge_grad_data(data, method="rms"): """Merge data from channel pairs using the RMS or mean. Parameters @@ -1038,12 +1142,12 @@ def _merge_grad_data(data, method='rms'): The root mean square or mean for each pair. """ data, orig_shape = data.reshape((len(data) // 2, 2, -1)), data.shape - if method == 'mean': + if method == "mean": data = np.mean(data, axis=1) - elif method == 'rms': - data = np.sqrt(np.sum(data ** 2, axis=1) / 2) + elif method == "rms": + data = np.sqrt(np.sum(data**2, axis=1) / 2) else: - raise ValueError('method must be "rms" or "mean", got %s.' % method) + raise ValueError(f'method must be "rms" or "mean", got {method}.') return data.reshape(data.shape[:1] + orig_shape[1:]) @@ -1070,7 +1174,7 @@ def _merge_nirs_data(data, merged_names): """ to_remove = np.empty(0, dtype=np.int32) for idx, ch in enumerate(merged_names): - if 'x' in ch: + if "x" in ch: indices = np.empty(0, dtype=np.int32) channels = ch.split("x") for sub_ch in channels[1:]: @@ -1084,9 +1188,48 @@ def _merge_nirs_data(data, merged_names): return data, merged_names -def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None, - ch_indices=None, name='ecog', bg_image=None, - normalize=True): +def _merge_opm_data(data, merged_names): + """Merge data from multiple opm channel by just using the radial component. + + Channel names that end in "MERGE_REMOVE" (ie non-radial channels) will be + removed. Only the the radial channel is kept. + + Parameters + ---------- + data : array, shape = (n_channels, ..., n_times) + Data for channels. + merged_names : list + List of strings containing the channel names. Channels that are to be + removed end in "MERGE_REMOVE". + + Returns + ------- + data : array + Data for channels with requested channels merged. Channels used in the + merge are removed from the array. + """ + to_remove = np.empty(0, dtype=np.int32) + for idx, ch in enumerate(merged_names): + if ch.endswith("MERGE-REMOVE"): + to_remove = np.append(to_remove, idx) + to_remove = np.unique(to_remove) + for rem in sorted(to_remove, reverse=True): + del merged_names[rem] + data = np.delete(data, to_remove, axis=0) + return data, merged_names + + +def generate_2d_layout( + xy, + w=0.07, + h=0.05, + pad=0.02, + ch_names=None, + ch_indices=None, + name="ecog", + bg_image=None, + normalize=True, +): """Generate a custom 2D layout from xy points. Generates a 2-D layout for plotting with plot_topo methods and @@ -1137,15 +1280,16 @@ def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None, .. versionadded:: 0.9.0 """ import matplotlib.pyplot as plt + if ch_indices is None: ch_indices = np.arange(xy.shape[0]) if ch_names is None: - ch_names = ['{}'.format(i) for i in ch_indices] + ch_names = list(map(str, ch_indices)) if len(ch_names) != len(ch_indices): - raise ValueError('# channel names and indices must be equal') + raise ValueError("# channel names and indices must be equal") if len(ch_names) != len(xy): - raise ValueError('# channel names and xy vals must be equal') + raise ValueError("# channel names and xy vals must be equal") x, y = xy.copy().astype(float).T @@ -1159,12 +1303,12 @@ def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None, # Normalize x and y by their maxes for i_dim in [x, y]: i_dim -= i_dim.min(0) - i_dim /= (i_dim.max(0) - i_dim.min(0)) + i_dim /= i_dim.max(0) - i_dim.min(0) # Create box and pos variable box = _box_size(np.vstack([x, y]).T, padding=pad) box = (0, 0, box[0], box[1]) - w, h = [np.array([i] * x.shape[0]) for i in [w, h]] + w, h = (np.array([i] * x.shape[0]) for i in [w, h]) loc_params = np.vstack([x, y, w, h]).T layout = Layout(box, loc_params, ch_names, ch_indices, name) diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 3bc153a0d14..35fdbce917c 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -1,46 +1,65 @@ -# Authors: Alexandre Gramfort -# Denis Engemann -# Martin Luessi -# Eric Larson -# Marijn van Vliet -# Jona Sassenhagen -# Teon Brooks -# Christian Brodbeck -# Stefan Appelhoff -# Joan Massich -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from collections import OrderedDict -from dataclasses import dataclass -from copy import deepcopy import os.path as op import re +from collections import OrderedDict +from copy import deepcopy +from dataclasses import dataclass import numpy as np -from ..defaults import HEAD_SIZE_DEFAULT +from .._fiff._digitization import ( + _coord_frame_const, + _count_points_by_type, + _ensure_fiducials_head, + _format_dig_points, + _get_data_as_dict_from_dig, + _get_dig_eeg, + _get_fid_coords, + _make_dig_points, + _read_dig_fif, + write_dig, +) +from .._fiff.constants import CHANNEL_LOC_ALIASES, FIFF +from .._fiff.meas_info import create_info +from .._fiff.open import fiff_open +from .._fiff.pick import _picks_to_idx, channel_type, pick_types from .._freesurfer import get_mni_fiducials +from ..defaults import HEAD_SIZE_DEFAULT +from ..transforms import ( + Transform, + _ensure_trans, + _fit_matched_points, + _frame_to_str, + _quat_to_affine, + _sph_to_cart, + _topo_to_sph, + _verbose_frames, + apply_trans, + get_ras_to_neuromag_trans, +) +from ..utils import ( + _check_fname, + _check_option, + _on_missing, + _pl, + _validate_type, + check_fname, + copy_function_doc_to_method_doc, + fill_doc, + legacy, + verbose, + warn, +) +from ..utils.docs import docdict from ..viz import plot_montage -from ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart, - _topo_to_sph, _frame_to_str, Transform, - _verbose_frames, _fit_matched_points, - _quat_to_affine, _ensure_trans) -from ..io._digitization import (_count_points_by_type, _ensure_fiducials_head, - _get_dig_eeg, _make_dig_points, write_dig, - _read_dig_fif, _format_dig_points, - _get_fid_coords, _coord_frame_const, - _get_data_as_dict_from_dig) -from ..io.meas_info import create_info -from ..io.open import fiff_open -from ..io.pick import pick_types, _picks_to_idx, channel_type -from ..io.constants import FIFF, CHANNEL_LOC_ALIASES -from ..utils import (warn, copy_function_doc_to_method_doc, _pl, verbose, - _check_option, _validate_type, _check_fname, _on_missing, - fill_doc, _docdict) - -from ._dig_montage_utils import _read_dig_montage_egi -from ._dig_montage_utils import _parse_brainvision_dig_montage +from ._dig_montage_utils import ( + _parse_brainvision_dig_montage, + _read_dig_montage_curry, + _read_dig_montage_egi, +) @dataclass @@ -51,132 +70,137 @@ class _BuiltinStandardMontage: _BUILTIN_STANDARD_MONTAGES = [ _BuiltinStandardMontage( - name='standard_1005', - description='Electrodes are named and positioned according to the ' - 'international 10-05 system (343+3 locations)', + name="standard_1005", + description="Electrodes are named and positioned according to the " + "international 10-05 system (343+3 locations)", ), _BuiltinStandardMontage( - name='standard_1020', - description='Electrodes are named and positioned according to the ' - 'international 10-20 system (94+3 locations)', + name="standard_1020", + description="Electrodes are named and positioned according to the " + "international 10-20 system (94+3 locations)", ), _BuiltinStandardMontage( - name='standard_alphabetic', - description='Electrodes are named with LETTER-NUMBER combinations ' - '(A1, B2, F4, …) (65+3 locations)', + name="standard_alphabetic", + description="Electrodes are named with LETTER-NUMBER combinations " + "(A1, B2, F4, …) (65+3 locations)", ), _BuiltinStandardMontage( - name='standard_postfixed', - description='Electrodes are named according to the international ' - '10-20 system using postfixes for intermediate positions ' - '(100+3 locations)', + name="standard_postfixed", + description="Electrodes are named according to the international " + "10-20 system using postfixes for intermediate positions " + "(100+3 locations)", ), _BuiltinStandardMontage( - name='standard_prefixed', - description='Electrodes are named according to the international ' - '10-20 system using prefixes for intermediate positions ' - '(74+3 locations)', + name="standard_prefixed", + description="Electrodes are named according to the international " + "10-20 system using prefixes for intermediate positions " + "(74+3 locations)", ), _BuiltinStandardMontage( - name='standard_primed', + name="standard_primed", description="Electrodes are named according to the international " - "10-20 system using prime marks (' and '') for " - "intermediate positions (100+3 locations)", + "10-20 system using prime marks (' and '') for " + "intermediate positions (100+3 locations)", ), _BuiltinStandardMontage( - name='biosemi16', - description='BioSemi cap with 16 electrodes (16+3 locations)', + name="biosemi16", + description="BioSemi cap with 16 electrodes (16+3 locations)", ), _BuiltinStandardMontage( - name='biosemi32', - description='BioSemi cap with 32 electrodes (32+3 locations)', + name="biosemi32", + description="BioSemi cap with 32 electrodes (32+3 locations)", ), _BuiltinStandardMontage( - name='biosemi64', - description='BioSemi cap with 64 electrodes (64+3 locations)', + name="biosemi64", + description="BioSemi cap with 64 electrodes (64+3 locations)", ), _BuiltinStandardMontage( - name='biosemi128', - description='BioSemi cap with 128 electrodes (128+3 locations)', + name="biosemi128", + description="BioSemi cap with 128 electrodes (128+3 locations)", ), _BuiltinStandardMontage( - name='biosemi160', - description='BioSemi cap with 160 electrodes (160+3 locations)', + name="biosemi160", + description="BioSemi cap with 160 electrodes (160+3 locations)", ), _BuiltinStandardMontage( - name='biosemi256', - description='BioSemi cap with 256 electrodes (256+3 locations)', + name="biosemi256", + description="BioSemi cap with 256 electrodes (256+3 locations)", ), _BuiltinStandardMontage( - name='easycap-M1', - description='EasyCap with 10-05 electrode names (74 locations)', + name="easycap-M1", + description="EasyCap with 10-05 electrode names (74 locations)", ), _BuiltinStandardMontage( - name='easycap-M10', - description='EasyCap with numbered electrodes (61 locations)', + name="easycap-M10", + description="EasyCap with numbered electrodes (61 locations)", ), _BuiltinStandardMontage( - name='EGI_256', - description='Geodesic Sensor Net (256 locations)', + name="easycap-M43", + description="EasyCap with numbered electrodes (64 locations)", ), _BuiltinStandardMontage( - name='GSN-HydroCel-32', - description='HydroCel Geodesic Sensor Net and Cz (33+3 locations)', + name="EGI_256", + description="Geodesic Sensor Net (256 locations)", ), _BuiltinStandardMontage( - name='GSN-HydroCel-64_1.0', - description='HydroCel Geodesic Sensor Net (64+3 locations)', + name="GSN-HydroCel-32", + description="HydroCel Geodesic Sensor Net and Cz (33+3 locations)", ), _BuiltinStandardMontage( - name='GSN-HydroCel-65_1.0', - description='HydroCel Geodesic Sensor Net and Cz (65+3 locations)', + name="GSN-HydroCel-64_1.0", + description="HydroCel Geodesic Sensor Net (64+3 locations)", ), _BuiltinStandardMontage( - name='GSN-HydroCel-128', - description='HydroCel Geodesic Sensor Net (128+3 locations)', + name="GSN-HydroCel-65_1.0", + description="HydroCel Geodesic Sensor Net and Cz (65+3 locations)", ), _BuiltinStandardMontage( - name='GSN-HydroCel-129', - description='HydroCel Geodesic Sensor Net and Cz (129+3 locations)', + name="GSN-HydroCel-128", + description="HydroCel Geodesic Sensor Net (128+3 locations)", ), _BuiltinStandardMontage( - name='GSN-HydroCel-256', - description='HydroCel Geodesic Sensor Net (256+3 locations)', + name="GSN-HydroCel-129", + description="HydroCel Geodesic Sensor Net and Cz (129+3 locations)", ), _BuiltinStandardMontage( - name='GSN-HydroCel-257', - description='HydroCel Geodesic Sensor Net and Cz (257+3 locations)', + name="GSN-HydroCel-256", + description="HydroCel Geodesic Sensor Net (256+3 locations)", ), _BuiltinStandardMontage( - name='mgh60', - description='The (older) 60-channel cap used at MGH (60+3 locations)', + name="GSN-HydroCel-257", + description="HydroCel Geodesic Sensor Net and Cz (257+3 locations)", ), _BuiltinStandardMontage( - name='mgh70', - description='The (newer) 70-channel BrainVision cap used at MGH ' - '(70+3 locations)', + name="mgh60", + description="The (older) 60-channel cap used at MGH (60+3 locations)", ), _BuiltinStandardMontage( - name='artinis-octamon', - description='Artinis OctaMon fNIRS (8 sources, 2 detectors)', + name="mgh70", + description="The (newer) 70-channel BrainVision cap used at MGH " + "(70+3 locations)", ), _BuiltinStandardMontage( - name='artinis-brite23', - description='Artinis Brite23 fNIRS (11 sources, 7 detectors)', + name="artinis-octamon", + description="Artinis OctaMon fNIRS (8 sources, 2 detectors)", ), _BuiltinStandardMontage( - name='brainproducts-RNP-BA-128', - description='Brain Products with 10-10 electrode names (128 channels)', - ) + name="artinis-brite23", + description="Artinis Brite23 fNIRS (11 sources, 7 detectors)", + ), + _BuiltinStandardMontage( + name="brainproducts-RNP-BA-128", + description="Brain Products with 10-10 electrode names (128 channels)", + ), ] def _check_get_coord_frame(dig): - dig_coord_frames = sorted(set(d['coord_frame'] for d in dig)) + dig_coord_frames = sorted(set(d["coord_frame"] for d in dig)) if len(dig_coord_frames) != 1: raise RuntimeError( - 'Only a single coordinate frame in dig is supported, got ' - f'{dig_coord_frames}') + "Only a single coordinate frame in dig is supported, got " + f"{dig_coord_frames}" + ) return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None @@ -205,15 +229,20 @@ def get_builtin_montages(*, descriptions=False): If ``descriptions=True``, a list of tuples ``(name, description)``. """ if descriptions: - return [ - (m.name, m.description) for m in _BUILTIN_STANDARD_MONTAGES - ] + return [(m.name, m.description) for m in _BUILTIN_STANDARD_MONTAGES] else: return [m.name for m in _BUILTIN_STANDARD_MONTAGES] -def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None, - hsp=None, hpi=None, coord_frame='unknown'): +def make_dig_montage( + ch_pos=None, + nasion=None, + lpa=None, + rpa=None, + hsp=None, + hpi=None, + coord_frame="unknown", +): r"""Make montage from arrays. Parameters @@ -263,20 +292,25 @@ def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None, read_dig_localite read_dig_polhemus_isotrak """ - _validate_type(ch_pos, (dict, None), 'ch_pos') + _validate_type(ch_pos, (dict, None), "ch_pos") if ch_pos is None: ch_names = None else: ch_names = list(ch_pos) dig = _make_dig_points( - nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=hsp, - dig_ch_pos=ch_pos, coord_frame=coord_frame + nasion=nasion, + lpa=lpa, + rpa=rpa, + hpi=hpi, + extra_points=hsp, + dig_ch_pos=ch_pos, + coord_frame=coord_frame, ) return DigMontage(dig=dig, ch_names=ch_names) -class DigMontage(object): +class DigMontage: """Montage for digitized electrode and headshape position data. .. warning:: Montages are typically created using one of the helper @@ -293,7 +327,6 @@ class DigMontage(object): See Also -------- read_dig_captrak - read_dig_dat read_dig_egi read_dig_fif read_dig_hpts @@ -308,13 +341,13 @@ class DigMontage(object): def __init__(self, *, dig=None, ch_names=None): dig = list() if dig is None else dig - _validate_type(item=dig, types=list, item_name='dig') + _validate_type(item=dig, types=list, item_name="dig") ch_names = list() if ch_names is None else ch_names - n_eeg = sum([1 for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG]) + n_eeg = sum([1 for d in dig if d["kind"] == FIFF.FIFFV_POINT_EEG]) if n_eeg != len(ch_names): raise ValueError( - 'The number of EEG channels (%d) does not match the number' - ' of channel names provided (%d)' % (n_eeg, len(ch_names)) + f"The number of EEG channels ({n_eeg}) does not match the number" + f" of channel names provided ({len(ch_names)})" ) self.dig = dig @@ -323,23 +356,46 @@ def __init__(self, *, dig=None, ch_names=None): def __repr__(self): """Return string representation.""" n_points = _count_points_by_type(self.dig) - return ('').format(**n_points) + return ( + "" + ).format(**n_points) @copy_function_doc_to_method_doc(plot_montage) - def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True, - sphere=None, *, axes=None, verbose=None): - return plot_montage(self, scale_factor=scale_factor, - show_names=show_names, kind=kind, show=show, - sphere=sphere, axes=axes) - - @fill_doc - def rename_channels(self, mapping, allow_duplicates=False): + def plot( + self, + *, + scale=1.0, + show_names=True, + kind="topomap", + show=True, + sphere=None, + axes=None, + verbose=None, + ): + return plot_montage( + self, + scale=scale, + show_names=show_names, + kind=kind, + show=show, + sphere=sphere, + axes=axes, + ) + + @verbose + def rename_channels( + self, mapping, allow_duplicates=False, *, on_missing="raise", verbose=None + ): """Rename the channels. Parameters ---------- %(mapping_rename_channels_duplicates)s + %(on_missing_ch_names)s + + .. versionadded:: 1.11.0 + %(verbose)s Returns ------- @@ -347,9 +403,11 @@ def rename_channels(self, mapping, allow_duplicates=False): The instance. Operates in-place. """ from .channels import rename_channels - temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg') - rename_channels(temp_info, mapping, allow_duplicates) - self.ch_names = temp_info['ch_names'] + + temp_info = create_info(list(self._get_ch_pos()), 1000.0, "eeg") + rename_channels(temp_info, mapping, allow_duplicates, on_missing=on_missing) + self.ch_names = temp_info["ch_names"] + return self @verbose def save(self, fname, *, overwrite=False, verbose=None): @@ -358,12 +416,25 @@ def save(self, fname, *, overwrite=False, verbose=None): Parameters ---------- fname : path-like - The filename to use. Should end in .fif or .fif.gz. + The filename to use. Should end in ``-dig.fif`` or ``-dig.fif.gz``. %(overwrite)s %(verbose)s + + See Also + -------- + mne.channels.read_dig_fif + + Notes + ----- + .. versionchanged:: 1.9 + Added support for saving the associated channel names. """ + fname = _check_fname(fname, overwrite=overwrite) + check_fname(fname, "montage", ("-dig.fif", "-dig.fif.gz")) coord_frame = _check_get_coord_frame(self.dig) - write_dig(fname, self.dig, coord_frame, overwrite=overwrite) + write_dig( + fname, self.dig, coord_frame, overwrite=overwrite, ch_names=self.ch_names + ) def __iadd__(self, other): """Add two DigMontages in place. @@ -374,20 +445,19 @@ def __iadd__(self, other): and if fiducials are present they should share the same coordinate system and location values. """ + def is_fid_defined(fid): - return not ( - fid.nasion is None and fid.lpa is None and fid.rpa is None - ) + return not (fid.nasion is None and fid.lpa is None and fid.rpa is None) # Check for none duplicated ch_names ch_names_intersection = set(self.ch_names).intersection(other.ch_names) if ch_names_intersection: - raise RuntimeError(( - "Cannot add two DigMontage objects if they contain duplicated" - " channel names. Duplicated channel(s) found: {}." - ).format( - ', '.join(['%r' % v for v in sorted(ch_names_intersection)]) - )) + raise RuntimeError( + ( + "Cannot add two DigMontage objects if they contain duplicated" + " channel names. Duplicated channel(s) found: {}." + ).format(", ".join([f"{v!r}" for v in sorted(ch_names_intersection)])) + ) # Check for unique matching fiducials self_fid, self_coord = _get_fid_coords(self.dig) @@ -395,20 +465,24 @@ def is_fid_defined(fid): if is_fid_defined(self_fid) and is_fid_defined(other_fid): if self_coord != other_coord: - raise RuntimeError('Cannot add two DigMontage objects if ' - 'fiducial locations are not in the same ' - 'coordinate system.') + raise RuntimeError( + "Cannot add two DigMontage objects if " + "fiducial locations are not in the same " + "coordinate system." + ) for kk in self_fid: if not np.array_equal(self_fid[kk], other_fid[kk]): - raise RuntimeError('Cannot add two DigMontage objects if ' - 'fiducial locations do not match ' - '(%s)' % kk) + raise RuntimeError( + "Cannot add two DigMontage objects if " + "fiducial locations do not match " + f"({kk})" + ) # keep self self.dig = _format_dig_points( - self.dig + [d for d in other.dig - if d['kind'] != FIFF.FIFFV_POINT_CARDINAL] + self.dig + + [d for d in other.dig if d["kind"] != FIFF.FIFFV_POINT_CARDINAL] ) else: self.dig = _format_dig_points(self.dig + other.dig) @@ -442,13 +516,13 @@ def __eq__(self, other): return self.dig == other.dig and self.ch_names == other.ch_names def _get_ch_pos(self): - pos = [d['r'] for d in _get_dig_eeg(self.dig)] + pos = [d["r"] for d in _get_dig_eeg(self.dig)] assert len(self.ch_names) == len(pos) return OrderedDict(zip(self.ch_names, pos)) def _get_dig_names(self): NAMED_KIND = (FIFF.FIFFV_POINT_EEG,) - is_eeg = np.array([d['kind'] in NAMED_KIND for d in self.dig]) + is_eeg = np.array([d["kind"] in NAMED_KIND for d in self.dig]) assert len(self.ch_names) == is_eeg.sum() dig_names = [None] * len(self.dig) for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]): @@ -509,16 +583,15 @@ def apply_trans(self, trans, verbose=None): The transformation matrix to be applied. %(verbose)s """ - _validate_type(trans, Transform, 'trans') - coord_frame = self.get_positions()['coord_frame'] - trans = _ensure_trans(trans, fro=coord_frame, to=trans['to']) + _validate_type(trans, Transform, "trans") + coord_frame = self.get_positions()["coord_frame"] + trans = _ensure_trans(trans, fro=coord_frame, to=trans["to"]) for d in self.dig: - d['r'] = apply_trans(trans, d['r']) - d['coord_frame'] = trans['to'] + d["r"] = apply_trans(trans, d["r"]) + d["coord_frame"] = trans["to"] @verbose - def add_estimated_fiducials(self, subject, subjects_dir=None, - verbose=None): + def add_estimated_fiducials(self, subject, subjects_dir=None, verbose=None): """Estimate fiducials based on FreeSurfer ``fsaverage`` subject. This takes a montage with the ``mri`` coordinate frame, @@ -558,8 +631,9 @@ def add_estimated_fiducials(self, subject, subjects_dir=None, if montage_bunch.coord_frame != FIFF.FIFFV_COORD_MRI: raise RuntimeError( f'Montage should be in the "mri" coordinate frame ' - f'to use `add_estimated_fiducials`. The current coordinate ' - f'frame is {montage_bunch.coord_frame}') + f"to use `add_estimated_fiducials`. The current coordinate " + f"frame is {montage_bunch.coord_frame}" + ) # estimate LPA, nasion, RPA from FreeSurfer fsaverage fids_mri = list(get_mni_fiducials(subject, subjects_dir)) @@ -598,14 +672,15 @@ def add_mni_fiducials(self, subjects_dir=None, verbose=None): if montage_bunch.coord_frame != FIFF.FIFFV_MNE_COORD_MNI_TAL: raise RuntimeError( f'Montage should be in the "mni_tal" coordinate frame ' - f'to use `add_estimated_fiducials`. The current coordinate ' - f'frame is {montage_bunch.coord_frame}') + f"to use `add_estimated_fiducials`. The current coordinate " + f"frame is {montage_bunch.coord_frame}" + ) - fids_mni = get_mni_fiducials('fsaverage', subjects_dir) + fids_mni = get_mni_fiducials("fsaverage", subjects_dir) for fid in fids_mni: # "mri" and "mni_tal" are equivalent for fsaverage - assert fid['coord_frame'] == FIFF.FIFFV_COORD_MRI - fid['coord_frame'] = FIFF.FIFFV_MNE_COORD_MNI_TAL + assert fid["coord_frame"] == FIFF.FIFFV_COORD_MRI + fid["coord_frame"] = FIFF.FIFFV_MNE_COORD_MNI_TAL self.dig = fids_mni + self.dig return self @@ -632,7 +707,7 @@ def remove_fiducials(self, verbose=None): should not be changed by removing fiducials. """ for d in self.dig.copy(): - if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: self.dig.remove(d) return self @@ -641,7 +716,7 @@ def remove_fiducials(self, verbose=None): def _check_unit_and_get_scaling(unit): - _check_option('unit', unit, sorted(VALID_SCALES.keys())) + _check_option("unit", unit, sorted(VALID_SCALES.keys())) return VALID_SCALES[unit] @@ -656,7 +731,7 @@ def transform_to_head(montage): Returns ------- montage : instance of DigMontage - The montage after transforming the points to head + A copy of the montage after transforming the points to head coordinate system. Notes @@ -677,24 +752,24 @@ def transform_to_head(montage): # Get fiducial points and their coord_frame native_head_t = compute_native_head_t(montage) montage = montage.copy() # to avoid inplace modification - if native_head_t['from'] != FIFF.FIFFV_COORD_HEAD: + if native_head_t["from"] != FIFF.FIFFV_COORD_HEAD: for d in montage.dig: - if d['coord_frame'] == native_head_t['from']: - d['r'] = apply_trans(native_head_t, d['r']) - d['coord_frame'] = FIFF.FIFFV_COORD_HEAD + if d["coord_frame"] == native_head_t["from"]: + d["r"] = apply_trans(native_head_t, d["r"]) + d["coord_frame"] = FIFF.FIFFV_COORD_HEAD _ensure_fiducials_head(montage.dig) return montage +@legacy(alt="read_dig_curry()") def read_dig_dat(fname): r"""Read electrode positions from a ``*.dat`` file. .. Warning:: This function was implemented based on ``*.dat`` files available from - `Compumedics `__ and might not work as expected with novel - files. If it does not read your files correctly please contact the - mne-python developers. + `Compumedics `__ and might not work + as expected with novel files. If it does not read your files correctly + please contact the MNE-Python developers. Parameters ---------- @@ -709,7 +784,7 @@ def read_dig_dat(fname): See Also -------- read_dig_captrak - read_dig_dat + read_dig_curry read_dig_egi read_dig_fif read_dig_hpts @@ -723,9 +798,10 @@ def read_dig_dat(fname): a plain text editor. """ from ._standard_montage_utils import _check_dupes_odict - fname = _check_fname(fname, overwrite='read', must_exist=True) - with open(fname, 'r') as fid: + fname = _check_fname(fname, overwrite="read", must_exist=True) + + with open(fname) as fid: lines = fid.readlines() ch_names, poss = list(), list() @@ -736,17 +812,18 @@ def read_dig_dat(fname): continue elif len(items) != 5: raise ValueError( - "Error reading %s, line %s has unexpected number of entries:\n" - "%s" % (fname, i, line.rstrip())) + f"Error reading {fname}, line {i} has unexpected number of entries:\n" + f"{line.rstrip()}" + ) num = items[1] - if num == '67': + if num == "67": continue # centroid pos = np.array([float(item) for item in items[2:]]) - if num == '78': + if num == "78": nasion = pos - elif num == '76': + elif num == "76": lpa = pos - elif num == '82': + elif num == "82": rpa = pos else: ch_names.append(items[0]) @@ -755,17 +832,15 @@ def read_dig_dat(fname): return make_dig_montage(electrodes, nasion, lpa, rpa) -def read_dig_fif(fname): +@verbose +def read_dig_fif(fname, *, verbose=None): r"""Read digitized points from a .fif file. - Note that electrode names are not present in the .fif file so - they are here defined with the convention from VectorView - systems (EEG001, EEG002, etc.) - Parameters ---------- fname : path-like FIF file from which to read digitization locations. + %(verbose)s Returns ------- @@ -775,30 +850,41 @@ def read_dig_fif(fname): See Also -------- DigMontage - read_dig_dat read_dig_egi read_dig_captrak + read_dig_curry read_dig_polhemus_isotrak read_dig_hpts read_dig_localite make_dig_montage + + Notes + ----- + .. versionchanged:: 1.9 + Added support for reading the associated channel names, if present. + + In some files, electrode names are not present (e.g., in older files). + For those files, the channel names are defined with the convention from + VectorView systems (EEG001, EEG002, etc.). """ - _check_fname(fname, overwrite='read', must_exist=True) + check_fname(fname, "montage", ("-dig.fif", "-dig.fif.gz")) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") # Load the dig data f, tree = fiff_open(fname)[:2] with f as fid: - dig = _read_dig_fif(fid, tree) + dig, ch_names = _read_dig_fif(fid, tree, return_ch_names=True) - ch_names = [] - for d in dig: - if d['kind'] == FIFF.FIFFV_POINT_EEG: - ch_names.append('EEG%03d' % d['ident']) + if ch_names is None: # backward compat from when we didn't save the names + ch_names = [] + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_EEG: + ch_names.append(f"EEG{d['ident']:03d}") montage = DigMontage(dig=dig, ch_names=ch_names) return montage -def read_dig_hpts(fname, unit='mm'): +def read_dig_hpts(fname, unit="mm"): """Read historical ``.hpts`` MNE-C files. Parameters @@ -817,7 +903,7 @@ def read_dig_hpts(fname, unit='mm'): -------- DigMontage read_dig_captrak - read_dig_dat + read_dig_curry read_dig_egi read_dig_fif read_dig_localite @@ -866,28 +952,29 @@ def read_dig_hpts(fname, unit='mm'): eeg F7 -6.1042 -68.2969 45.4939 ... """ - from ._standard_montage_utils import _str_names, _str + from ._standard_montage_utils import _str, _str_names - fname = _check_fname(fname, overwrite='read', must_exist=True) + fname = _check_fname(fname, overwrite="read", must_exist=True) _scale = _check_unit_and_get_scaling(unit) - out = np.genfromtxt(fname, comments='#', - dtype=(_str, _str, 'f8', 'f8', 'f8')) - kind, label = _str_names(out['f0']), _str_names(out['f1']) + out = np.genfromtxt(fname, comments="#", dtype=(_str, _str, "f8", "f8", "f8")) + kind, label = _str_names(out["f0"]), _str_names(out["f1"]) kind = [k.lower() for k in kind] - xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T + xyz = np.array([out[f"f{ii}"] for ii in range(2, 5)]).T xyz *= _scale del _scale - fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'} - fid = {fid_idx_to_label[label[ii]]: this_xyz - for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'} - ch_pos = {label[ii]: this_xyz - for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'} - hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) - if kind[ii] == 'hpi']) + fid_idx_to_label = {"1": "lpa", "2": "nasion", "3": "rpa"} + fid = { + fid_idx_to_label[label[ii]]: this_xyz + for ii, this_xyz in enumerate(xyz) + if kind[ii] == "cardinal" + } + ch_pos = { + label[ii]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "eeg" + } + hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "hpi"]) hpi.shape = (-1, 3) # in case it's empty - hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) - if kind[ii] == 'extra']) + hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "extra"]) hsp.shape = (-1, 3) # in case it's empty return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) @@ -909,19 +996,17 @@ def read_dig_egi(fname): -------- DigMontage read_dig_captrak - read_dig_dat + read_dig_curry read_dig_fif read_dig_hpts read_dig_localite read_dig_polhemus_isotrak make_dig_montage """ - _check_fname(fname, overwrite='read', must_exist=True) + _check_fname(fname, overwrite="read", must_exist=True) data = _read_dig_montage_egi( - fname=fname, - _scaling=1., - _all_data_kwargs_are_none=True + fname=fname, _scaling=1.0, _all_data_kwargs_are_none=True ) return make_dig_montage(**data) @@ -943,7 +1028,7 @@ def read_dig_captrak(fname): See Also -------- DigMontage - read_dig_dat + read_dig_curry read_dig_egi read_dig_fif read_dig_hpts @@ -951,12 +1036,57 @@ def read_dig_captrak(fname): read_dig_polhemus_isotrak make_dig_montage """ - _check_fname(fname, overwrite='read', must_exist=True) + _check_fname(fname, overwrite="read", must_exist=True) data = _parse_brainvision_dig_montage(fname, scale=1e-3) return make_dig_montage(**data) +def read_dig_curry(fname): + """Read electrode locations from Neuroscan Curry files. + + Parameters + ---------- + fname : path-like + A valid Curry file. + + Returns + ------- + montage : instance of DigMontage | None + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + + Notes + ----- + .. versionadded:: 1.11 + """ + from ..io.curry.curry import ( + _check_curry_filename, + _extract_curry_info, + ) + + # TODO - REVIEW NEEDED + fname = _check_curry_filename(fname) + (_, _, ch_names, ch_types, ch_pos, landmarks, landmarkslabels, _, _, _, _, _, _) = ( + _extract_curry_info(fname) + ) + data = _read_dig_montage_curry( + ch_names, ch_types, ch_pos, landmarks, landmarkslabels + ) + mont = make_dig_montage(**data) if data else None + return mont + + def read_dig_localite(fname, nasion=None, lpa=None, rpa=None): """Read Localite .csv file. @@ -980,7 +1110,7 @@ def read_dig_localite(fname, nasion=None, lpa=None, rpa=None): -------- DigMontage read_dig_captrak - read_dig_dat + read_dig_curry read_dig_egi read_dig_fif read_dig_hpts @@ -1005,7 +1135,7 @@ def read_dig_localite(fname, nasion=None, lpa=None, rpa=None): def _get_montage_in_head(montage): - coords = set([d['coord_frame'] for d in montage.dig]) + coords = set([d["coord_frame"] for d in montage.dig]) montage = montage.copy() if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD: _ensure_fiducials_head(montage.dig) @@ -1024,33 +1154,33 @@ def _set_montage_fnirs(info, montage): place. """ from ..preprocessing.nirs import _validate_nirs_info + # Validate that the fNIRS info is correctly formatted picks = _validate_nirs_info(info) # Modify info['chs'][#]['loc'] in place num_ficiduals = len(montage.dig) - len(montage.ch_names) for ch_idx in picks: - ch = info['chs'][ch_idx]['ch_name'] - source, detector = ch.split(' ')[0].split('_') - source_pos = montage.dig[montage.ch_names.index(source) - + num_ficiduals]['r'] - detector_pos = montage.dig[montage.ch_names.index(detector) - + num_ficiduals]['r'] - - info['chs'][ch_idx]['loc'][3:6] = source_pos - info['chs'][ch_idx]['loc'][6:9] = detector_pos + ch = info["chs"][ch_idx]["ch_name"] + source, detector = ch.split(" ")[0].split("_") + source_pos = montage.dig[montage.ch_names.index(source) + num_ficiduals]["r"] + detector_pos = montage.dig[montage.ch_names.index(detector) + num_ficiduals][ + "r" + ] + + info["chs"][ch_idx]["loc"][3:6] = source_pos + info["chs"][ch_idx]["loc"][6:9] = detector_pos midpoint = (source_pos + detector_pos) / 2 - info['chs'][ch_idx]['loc'][:3] = midpoint - info['chs'][ch_idx]['coord_frame'] = FIFF.FIFFV_COORD_HEAD + info["chs"][ch_idx]["loc"][:3] = midpoint + info["chs"][ch_idx]["coord_frame"] = FIFF.FIFFV_COORD_HEAD # Modify info['dig'] in place with info._unlock(): - info['dig'] = montage.dig + info["dig"] = montage.dig @fill_doc -def _set_montage(info, montage, match_case=True, match_alias=False, - on_missing='raise'): +def _set_montage(info, montage, match_case=True, match_alias=False, on_missing="raise"): """Apply montage to data. With a DigMontage, this function will replace the digitizer info with @@ -1071,19 +1201,20 @@ def _set_montage(info, montage, match_case=True, match_alias=False, ----- This function will change the info variable in place. """ - _validate_type(montage, (DigMontage, None, str), 'montage') + _validate_type(montage, (DigMontage, None, str), "montage") if montage is None: # Next line modifies info['dig'] in place with info._unlock(): - info['dig'] = None - for ch in info['chs']: + info["dig"] = None + for ch in info["chs"]: # Next line modifies info['chs'][#]['loc'] in place - ch['loc'] = np.full(12, np.nan) + ch["loc"] = np.full(12, np.nan) return if isinstance(montage, str): # load builtin montage _check_option( - parameter='montage', value=montage, - allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES] + parameter="montage", + value=montage, + allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES], ) montage = make_standard_montage(montage) @@ -1101,66 +1232,69 @@ def _backcompat_value(pos, ref_pos): # only get the eeg, seeg, dbs, ecog channels picks = pick_types( - info, meg=False, eeg=True, seeg=True, dbs=True, ecog=True, - exclude=()) - non_picks = np.setdiff1d(np.arange(info['nchan']), picks) + info, meg=False, eeg=True, seeg=True, dbs=True, ecog=True, exclude=() + ) + non_picks = np.setdiff1d(np.arange(info["nchan"]), picks) # get the reference position from the loc[3:6] - chs = [info['chs'][ii] for ii in picks] - non_names = [info['chs'][ii]['ch_name'] for ii in non_picks] + chs = [info["chs"][ii] for ii in picks] + non_names = [info["chs"][ii]["ch_name"] for ii in non_picks] del picks - ref_pos = [ch['loc'][3:6] for ch in chs] + ref_pos = np.array([ch["loc"][3:6] for ch in chs]) # keep reference location from EEG-like channels if they # already exist and are all the same. - custom_eeg_ref_dig = False # Note: ref position is an empty list for fieldtrip data - if ref_pos: - if all([np.equal(ref_pos[0], pos).all() for pos in ref_pos]) \ - and not np.equal(ref_pos[0], [0, 0, 0]).all(): - eeg_ref_pos = ref_pos[0] - # since we have an EEG reference position, we have - # to add it into the info['dig'] as EEG000 - custom_eeg_ref_dig = True - if not custom_eeg_ref_dig: - refs = set(ch_pos) & {'EEG000', 'REF'} + if len(ref_pos) and ref_pos[0].any() and (ref_pos[0] == ref_pos).all(): + eeg_ref_pos = ref_pos[0] + # since we have an EEG reference position, we have + # to add it into the info['dig'] as EEG000 + custom_eeg_ref_dig = True + else: + refs = set(ch_pos) & {"EEG000", "REF"} assert len(refs) <= 1 eeg_ref_pos = np.zeros(3) if not refs else ch_pos.pop(refs.pop()) + custom_eeg_ref_dig = False + del ref_pos # This raises based on info being subset/superset of montage - info_names = [ch['ch_name'] for ch in chs] + info_names = [ch["ch_name"] for ch in chs] dig_names = mnt_head._get_dig_names() - ref_names = [None, 'EEG000', 'REF'] + ref_names = [None, "EEG000", "REF"] if match_case: info_names_use = info_names dig_names_use = dig_names non_names_use = non_names else: - ch_pos_use = OrderedDict( - (name.lower(), pos) for name, pos in ch_pos.items()) + ch_pos_use = OrderedDict((name.lower(), pos) for name, pos in ch_pos.items()) info_names_use = [name.lower() for name in info_names] - dig_names_use = [name.lower() if name is not None else name - for name in dig_names] + dig_names_use = [ + name.lower() if name is not None else name for name in dig_names + ] non_names_use = [name.lower() for name in non_names] - ref_names = [name.lower() if name is not None else name - for name in ref_names] + ref_names = [name.lower() if name is not None else name for name in ref_names] n_dup = len(ch_pos) - len(ch_pos_use) if n_dup: - raise ValueError('Cannot use match_case=False as %s montage ' - 'name(s) require case sensitivity' % n_dup) + raise ValueError( + f"Cannot use match_case=False as {n_dup} montage " + "name(s) require case sensitivity" + ) n_dup = len(info_names_use) - len(set(info_names_use)) if n_dup: - raise ValueError('Cannot use match_case=False as %s channel ' - 'name(s) require case sensitivity' % n_dup) + raise ValueError( + f"Cannot use match_case=False as {n_dup} channel " + "name(s) require case sensitivity" + ) ch_pos = ch_pos_use del ch_pos_use del dig_names # use lookup table to match unrecognized channel names to known aliases if match_alias: - alias_dict = (match_alias if isinstance(match_alias, dict) else - CHANNEL_LOC_ALIASES) + alias_dict = ( + match_alias if isinstance(match_alias, dict) else CHANNEL_LOC_ALIASES + ) if not match_case: alias_dict = { ch_name.lower(): ch_alias.lower() @@ -1169,16 +1303,11 @@ def _backcompat_value(pos, ref_pos): # excluded ch_alias not in info, to prevent unnecessary mapping and # warning messages based on aliases. - alias_dict = { - ch_name: ch_alias - for ch_name, ch_alias in alias_dict.items() - } + alias_dict = {ch_name: ch_alias for ch_name, ch_alias in alias_dict.items()} info_names_use = [ alias_dict.get(ch_name, ch_name) for ch_name in info_names_use ] - non_names_use = [ - alias_dict.get(ch_name, ch_name) for ch_name in non_names_use - ] + non_names_use = [alias_dict.get(ch_name, ch_name) for ch_name in non_names_use] # warn user if there is not a full overlap of montage with info_chs missing = np.where([use not in ch_pos for use in info_names_use])[0] @@ -1209,42 +1338,47 @@ def _backcompat_value(pos, ref_pos): # will have entries "D1" and "S1". extra = np.where([non in ch_pos for non in non_names_use])[0] if len(extra): - types = '/'.join(sorted(set( - channel_type(info, non_picks[ii]) for ii in extra))) + types = "/".join(sorted(set(channel_type(info, non_picks[ii]) for ii in extra))) names = [non_names[ii] for ii in extra] - warn(f'Not setting position{_pl(extra)} of {len(extra)} {types} ' - f'channel{_pl(extra)} found in montage:\n{names}\n' - 'Consider setting the channel types to be of ' - f'{_docdict["montage_types"]} ' - 'using inst.set_channel_types before calling inst.set_montage, ' - 'or omit these channels when creating your montage.') + warn( + f"Not setting position{_pl(extra)} of {len(extra)} {types} " + f"channel{_pl(extra)} found in montage:\n{names}\n" + "Consider setting the channel types to be of " + f"{docdict['montage_types']} " + "using inst.set_channel_types before calling inst.set_montage, " + "or omit these channels when creating your montage." + ) for ch, use in zip(chs, info_names_use): # Next line modifies info['chs'][#]['loc'] in place if use in ch_pos: - ch['loc'][:6] = _backcompat_value(ch_pos[use], eeg_ref_pos) - ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD + ch["loc"][:6] = _backcompat_value(ch_pos[use], eeg_ref_pos) + ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD del ch_pos # XXX this is probably wrong as it uses the order from the montage # rather than the order of our info['ch_names'] ... digpoints = [ - mnt_head.dig[ii] for ii, name in enumerate(dig_names_use) - if name in (info_names_use + ref_names)] + mnt_head.dig[ii] + for ii, name in enumerate(dig_names_use) + if name in (info_names_use + ref_names) + ] # get a copy of the old dig - if info['dig'] is not None: - old_dig = info['dig'].copy() + if info["dig"] is not None: + old_dig = info["dig"].copy() else: old_dig = [] # determine if needed to add an extra EEG REF DigPoint if custom_eeg_ref_dig: # ref_name = 'EEG000' if match_case else 'eeg000' - ref_dig_dict = {'kind': FIFF.FIFFV_POINT_EEG, - 'r': eeg_ref_pos, - 'ident': 0, - 'coord_frame': info['dig'].pop()['coord_frame']} + ref_dig_dict = { + "kind": FIFF.FIFFV_POINT_EEG, + "r": eeg_ref_pos, + "ident": 0, + "coord_frame": info["dig"].pop()["coord_frame"], + } ref_dig_point = _format_dig_points([ref_dig_dict])[0] # only append the reference dig point if it was already # in the old dig @@ -1252,7 +1386,7 @@ def _backcompat_value(pos, ref_pos): digpoints.append(ref_dig_point) # Next line modifies info['dig'] in place with info._unlock(): - info['dig'] = _format_dig_points(digpoints, enforce_order=True) + info["dig"] = _format_dig_points(digpoints, enforce_order=True) del digpoints # TODO: Ideally we would have a check like this, but read_raw_bids for ECoG @@ -1268,7 +1402,7 @@ def _backcompat_value(pos, ref_pos): # 'not happen. Please contact MNE-Python developers.') # Handle fNIRS with source, detector and channel - fnirs_picks = _picks_to_idx(info, 'fnirs', allow_empty=True) + fnirs_picks = _picks_to_idx(info, "fnirs", allow_empty=True) if len(fnirs_picks) > 0: _set_montage_fnirs(info, mnt_head) @@ -1288,18 +1422,21 @@ def _read_isotrak_elp_points(fname): and 'points'. """ value_pattern = r"\-?\d+\.?\d*e?\-?\d*" - coord_pattern = r"({0})\s+({0})\s+({0})\s*$".format(value_pattern) + coord_pattern = rf"({value_pattern})\s+({value_pattern})\s+({value_pattern})\s*$" with open(fname) as fid: file_str = fid.read() - points_str = [m.groups() for m in re.finditer(coord_pattern, file_str, - re.MULTILINE)] + points_str = [ + m.groups() for m in re.finditer(coord_pattern, file_str, re.MULTILINE) + ] points = np.array(points_str, dtype=float) return { - 'nasion': points[0], 'lpa': points[1], 'rpa': points[2], - 'points': points[3:] + "nasion": points[0], + "lpa": points[1], + "rpa": points[2], + "points": points[3:], } @@ -1317,12 +1454,13 @@ def _read_isotrak_hsp_points(fname): The dictionary containing locations for 'nasion', 'lpa', 'rpa' and 'points'. """ + def get_hsp_fiducial(line): - return np.fromstring(line.replace('%F', ''), dtype=float, sep='\t') + return np.fromstring(line.replace("%F", ""), dtype=float, sep="\t") with open(fname) as ff: for line in ff: - if 'position of fiducials' in line.lower(): + if "position of fiducials" in line.lower(): break nasion = get_hsp_fiducial(ff.readline()) @@ -1332,20 +1470,20 @@ def get_hsp_fiducial(line): _ = ff.readline() line = ff.readline() if line: - n_points, n_cols = np.fromstring(line, dtype=int, sep='\t') + n_points, n_cols = np.fromstring(line, dtype=int, sep="\t") points = np.fromstring( - string=ff.read(), dtype=float, sep='\t', + string=ff.read(), + dtype=float, + sep="\t", ).reshape(-1, n_cols) assert points.shape[0] == n_points else: points = np.empty((0, 3)) - return { - 'nasion': nasion, 'lpa': lpa, 'rpa': rpa, 'points': points - } + return {"nasion": nasion, "lpa": lpa, "rpa": rpa, "points": points} -def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'): +def read_dig_polhemus_isotrak(fname, ch_names=None, unit="m"): """Read Polhemus digitizer data from a file. Parameters @@ -1373,19 +1511,19 @@ def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'): make_dig_montage read_polhemus_fastscan read_dig_captrak - read_dig_dat + read_dig_curry read_dig_egi read_dig_fif read_dig_localite """ - VALID_FILE_EXT = ('.hsp', '.elp', '.eeg') + VALID_FILE_EXT = (".hsp", ".elp", ".eeg") fname = str(_check_fname(fname, overwrite="read", must_exist=True)) _scale = _check_unit_and_get_scaling(unit) _, ext = op.splitext(fname) - _check_option('fname', ext, VALID_FILE_EXT) + _check_option("fname", ext, VALID_FILE_EXT) - if ext == '.elp': + if ext == ".elp": data = _read_isotrak_elp_points(fname) else: # Default case we read points as hsp since is the most likely scenario @@ -1397,39 +1535,38 @@ def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'): pass # noqa if ch_names is None: - keyword = 'hpi' if ext == '.elp' else 'hsp' - data[keyword] = data.pop('points') + keyword = "hpi" if ext == ".elp" else "hsp" + data[keyword] = data.pop("points") else: - points = data.pop('points') + points = data.pop("points") if points.shape[0] == len(ch_names): - data['ch_pos'] = OrderedDict(zip(ch_names, points)) + data["ch_pos"] = OrderedDict(zip(ch_names, points)) else: - raise ValueError(( - "Length of ``ch_names`` does not match the number of points" - " in {fname}. Expected ``ch_names`` length {n_points:d}," - " given {n_chnames:d}" - ).format( - fname=fname, n_points=points.shape[0], n_chnames=len(ch_names) - )) + raise ValueError( + "Length of ``ch_names`` does not match the number of points in " + f"{fname}. Expected ``ch_names`` length {points.shape[0]}, given " + f"{len(ch_names)}" + ) return make_dig_montage(**data) def _is_polhemus_fastscan(fname): - header = '' - with open(fname, 'r') as fid: + header = "" + with open(fname) as fid: for line in fid: - if not line.startswith('%'): + if not line.startswith("%"): break header += line - return 'FastSCAN' in header + return "FastSCAN" in header @verbose -def read_polhemus_fastscan(fname, unit='mm', on_header_missing='raise', *, - verbose=None): +def read_polhemus_fastscan( + fname, unit="mm", on_header_missing="raise", *, verbose=None +): """Read Polhemus FastSCAN digitizer data from a ``.txt`` file. Parameters @@ -1452,18 +1589,18 @@ def read_polhemus_fastscan(fname, unit='mm', on_header_missing='raise', *, read_dig_polhemus_isotrak make_dig_montage """ - VALID_FILE_EXT = ['.txt'] + VALID_FILE_EXT = [".txt"] fname = str(_check_fname(fname, overwrite="read", must_exist=True)) _scale = _check_unit_and_get_scaling(unit) _, ext = op.splitext(fname) - _check_option('fname', ext, VALID_FILE_EXT) + _check_option("fname", ext, VALID_FILE_EXT) if not _is_polhemus_fastscan(fname): - msg = "%s does not contain a valid Polhemus FastSCAN header" % fname + msg = f"{fname} does not contain a valid Polhemus FastSCAN header" _on_missing(on_header_missing, msg) - points = _scale * np.loadtxt(fname, comments='%', ndmin=2) + points = _scale * np.loadtxt(fname, comments="%", ndmin=2) _check_dig_shape(points) return points @@ -1478,7 +1615,10 @@ def _read_eeglab_locations(fname): return ch_names, pos -def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): +@verbose +def read_custom_montage( + fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None, *, verbose=None +): """Read a montage from a file. Parameters @@ -1499,6 +1639,7 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): for most readers but ``"head"`` for EEGLAB. .. versionadded:: 0.20 + %(verbose)s Returns ------- @@ -1509,6 +1650,7 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): -------- make_dig_montage make_standard_montage + read_dig_fif Notes ----- @@ -1522,66 +1664,75 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): :func:`make_dig_montage` that takes arrays as input. """ from ._standard_montage_utils import ( - _read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc, - _read_elp_besa, _read_brainvision, _read_xyz + _read_brainvision, + _read_csd, + _read_elc, + _read_elp_besa, + _read_sfp, + _read_theta_phi_in_degrees, + _read_xyz, ) + SUPPORTED_FILE_EXT = { - 'eeglab': ('.loc', '.locs', '.eloc', ), - 'hydrocel': ('.sfp', ), - 'matlab': ('.csd', ), - 'asa electrode': ('.elc', ), - 'generic (Theta-phi in degrees)': ('.txt', ), - 'standard BESA spherical': ('.elp', ), # NB: not same as polhemus elp - 'brainvision': ('.bvef', ), - 'xyz': ('.csv', '.tsv', '.xyz'), + "eeglab": ( + ".loc", + ".locs", + ".eloc", + ), + "hydrocel": (".sfp",), + "matlab": (".csd",), + "asa electrode": (".elc",), + "generic (Theta-phi in degrees)": (".txt",), + "standard BESA spherical": (".elp",), # NB: not same as polhemus elp + "brainvision": (".bvef",), + "xyz": (".csv", ".tsv", ".xyz"), } fname = str(_check_fname(fname, overwrite="read", must_exist=True)) _, ext = op.splitext(fname) - _check_option('fname', ext, list(sum(SUPPORTED_FILE_EXT.values(), ()))) + _check_option("fname", ext, list(sum(SUPPORTED_FILE_EXT.values(), ()))) - if ext in SUPPORTED_FILE_EXT['eeglab']: + if ext in SUPPORTED_FILE_EXT["eeglab"]: if head_size is None: - raise ValueError( - "``head_size`` cannot be None for '{}'".format(ext)) + raise ValueError(f"``head_size`` cannot be None for '{ext}'") ch_names, pos = _read_eeglab_locations(fname) scale = head_size / np.median(np.linalg.norm(pos, axis=-1)) pos *= scale montage = make_dig_montage( ch_pos=OrderedDict(zip(ch_names, pos)), - coord_frame='head', + coord_frame="head", ) - elif ext in SUPPORTED_FILE_EXT['hydrocel']: + elif ext in SUPPORTED_FILE_EXT["hydrocel"]: montage = _read_sfp(fname, head_size=head_size) - elif ext in SUPPORTED_FILE_EXT['matlab']: + elif ext in SUPPORTED_FILE_EXT["matlab"]: montage = _read_csd(fname, head_size=head_size) - elif ext in SUPPORTED_FILE_EXT['asa electrode']: + elif ext in SUPPORTED_FILE_EXT["asa electrode"]: montage = _read_elc(fname, head_size=head_size) - elif ext in SUPPORTED_FILE_EXT['generic (Theta-phi in degrees)']: + elif ext in SUPPORTED_FILE_EXT["generic (Theta-phi in degrees)"]: if head_size is None: - raise ValueError( - "``head_size`` cannot be None for '{}'".format(ext)) - montage = _read_theta_phi_in_degrees(fname, head_size=head_size, - fid_names=('Nz', 'LPA', 'RPA')) + raise ValueError(f"``head_size`` cannot be None for '{ext}'") + montage = _read_theta_phi_in_degrees( + fname, head_size=head_size, fid_names=("Nz", "LPA", "RPA") + ) - elif ext in SUPPORTED_FILE_EXT['standard BESA spherical']: + elif ext in SUPPORTED_FILE_EXT["standard BESA spherical"]: montage = _read_elp_besa(fname, head_size) - elif ext in SUPPORTED_FILE_EXT['brainvision']: + elif ext in SUPPORTED_FILE_EXT["brainvision"]: montage = _read_brainvision(fname, head_size) - elif ext in SUPPORTED_FILE_EXT['xyz']: + elif ext in SUPPORTED_FILE_EXT["xyz"]: montage = _read_xyz(fname) if coord_frame is not None: coord_frame = _coord_frame_const(coord_frame) for d in montage.dig: - d['coord_frame'] = coord_frame + d["coord_frame"] = coord_frame return montage @@ -1603,31 +1754,47 @@ def compute_dev_head_t(montage): """ _, coord_frame = _get_fid_coords(montage.dig) if coord_frame != FIFF.FIFFV_COORD_HEAD: - raise ValueError('montage should have been set to head coordinate ' - 'system with transform_to_head function.') + raise ValueError( + "montage should have been set to head coordinate " + "system with transform_to_head function." + ) hpi_head = np.array( - [d['r'] for d in montage.dig - if (d['kind'] == FIFF.FIFFV_POINT_HPI and - d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)], float) + [ + d["r"] + for d in montage.dig + if ( + d["kind"] == FIFF.FIFFV_POINT_HPI + and d["coord_frame"] == FIFF.FIFFV_COORD_HEAD + ) + ], + float, + ) hpi_dev = np.array( - [d['r'] for d in montage.dig - if (d['kind'] == FIFF.FIFFV_POINT_HPI and - d['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)], float) + [ + d["r"] + for d in montage.dig + if ( + d["kind"] == FIFF.FIFFV_POINT_HPI + and d["coord_frame"] == FIFF.FIFFV_COORD_DEVICE + ) + ], + float, + ) if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0): - raise ValueError(( + raise ValueError( "To compute Device-to-Head transformation, the same number of HPI" - " points in device and head coordinates is required. (Got {dev}" - " points in device and {head} points in head coordinate systems)" - ).format(dev=len(hpi_dev), head=len(hpi_head))) + f" points in device and head coordinates is required. (Got {len(hpi_dev)}" + f" points in device and {len(hpi_head)} points in head coordinate systems)" + ) trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0]) - return Transform(fro='meg', to='head', trans=trans) + return Transform(fro="meg", to="head", trans=trans) @verbose -def compute_native_head_t(montage, *, on_missing='warn', verbose=None): +def compute_native_head_t(montage, *, on_missing="warn", verbose=None): """Compute the native-to-head transformation for a montage. This uses the fiducials in the native space to transform to compute the @@ -1654,22 +1821,25 @@ def compute_native_head_t(montage, *, on_missing='warn', verbose=None): if coord_frame == FIFF.FIFFV_COORD_HEAD: native_head_t = np.eye(4) else: - fid_keys = ('nasion', 'lpa', 'rpa') + fid_keys = ("nasion", "lpa", "rpa") for key in fid_keys: - if fid_coords[key] is None: + this_coord = fid_coords[key] + if this_coord is None or np.any(np.isnan(this_coord)): msg = ( - f'Fiducial point {key} not found, assuming identity ' - f'{_verbose_frames[coord_frame]} to head transformation') + f"Fiducial point {key} not found, assuming identity " + f"{_verbose_frames[coord_frame]} to head transformation" + ) _on_missing(on_missing, msg, error_klass=RuntimeError) native_head_t = np.eye(4) break else: native_head_t = get_ras_to_neuromag_trans( - *[fid_coords[key] for key in fid_keys]) - return Transform(coord_frame, 'head', native_head_t) + *[fid_coords[key] for key in fid_keys] + ) + return Transform(coord_frame, "head", native_head_t) -def make_standard_montage(kind, head_size='auto'): +def make_standard_montage(kind, head_size="auto"): """Read a generic (built-in) standard montage that ships with MNE-Python. Parameters @@ -1701,22 +1871,24 @@ def make_standard_montage(kind, head_size='auto'): Notes ----- Individualized (digitized) electrode positions should be read in using - :func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`, - :func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`, + :func:`read_dig_captrak`, :func:`read_dig_curry`, + :func:`read_dig_egi`, :func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`, :func:`read_dig_hpts`, or manually made with :func:`make_dig_montage`. .. versionadded:: 0.19.0 """ from ._standard_montage_utils import standard_montage_look_up_table - _validate_type(kind, str, 'kind') + + _validate_type(kind, str, "kind") _check_option( - parameter='kind', value=kind, - allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES] + parameter="kind", + value=kind, + allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES], ) - _validate_type(head_size, ('numeric', str, None), 'head_size') + _validate_type(head_size, ("numeric", str, None), "head_size") if isinstance(head_size, str): - _check_option('head_size', head_size, ('auto',), extra='when str') - if kind.startswith(('standard', 'mgh', 'artinis')): + _check_option("head_size", head_size, ("auto",), extra="when str") + if kind.startswith(("standard", "mgh", "artinis")): head_size = None else: head_size = HEAD_SIZE_DEFAULT @@ -1724,7 +1896,6 @@ def make_standard_montage(kind, head_size='auto'): def _check_dig_shape(pts): - _validate_type(pts, np.ndarray, 'points') + _validate_type(pts, np.ndarray, "points") if pts.ndim != 2 or pts.shape[-1] != 3: - raise ValueError( - f'Points must be of shape (n, 3) instead of {pts.shape}') + raise ValueError(f"Points must be of shape (n, 3) instead of {pts.shape}") diff --git a/mne/channels/tests/__init__.py b/mne/channels/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/channels/tests/__init__.py +++ b/mne/channels/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index 7634100afbe..697b49e7943 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -1,36 +1,58 @@ -# Author: Daniel G Wakeman -# Denis A. Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from pathlib import Path +import hashlib +from contextlib import nullcontext from copy import deepcopy from functools import partial -import hashlib +from pathlib import Path -import pytest import numpy as np +import pooch +import pytest +from numpy.testing import assert_allclose, assert_array_equal, assert_equal from scipy.io import savemat -from numpy.testing import assert_array_equal, assert_equal, assert_allclose -from mne.channels import (rename_channels, read_ch_adjacency, combine_channels, - find_ch_adjacency, make_1020_channel_selections, - read_custom_montage, equalize_channels, - get_builtin_ch_adjacencies) +from mne import ( + Epochs, + EpochsArray, + EvokedArray, + create_info, + make_ad_hoc_cov, + pick_channels, + pick_types, + read_events, +) +from mne._fiff.constants import FIFF, _ch_unit_mul_named +from mne.channels import ( + combine_channels, + equalize_channels, + find_ch_adjacency, + get_builtin_ch_adjacencies, + make_1020_channel_selections, + read_ch_adjacency, + read_custom_montage, + rename_channels, +) from mne.channels.channels import ( - _ch_neighbor_adjacency, _compute_ch_adjacency, - _BUILTIN_CHANNEL_ADJACENCIES, _BuiltinChannelAdjacency + _BUILTIN_CHANNEL_ADJACENCIES, + _ch_neighbor_adjacency, + _compute_ch_adjacency, ) -from mne.io import (read_info, read_raw_fif, read_raw_ctf, read_raw_bti, - read_raw_eeglab, read_raw_kit, RawArray) -from mne.io.constants import FIFF -from mne import (pick_types, pick_channels, EpochsArray, EvokedArray, - make_ad_hoc_cov, create_info, read_events, Epochs) from mne.datasets import testing -from mne.utils import requires_pandas, requires_version -from mne.parallel import parallel_func +from mne.io import ( + RawArray, + read_info, + read_raw_bti, + read_raw_ctf, + read_raw_eeglab, + read_raw_fif, + read_raw_kit, +) +from mne.utils import requires_good_network -io_dir = Path(__file__).parent.parent.parent / "io" +io_dir = Path(__file__).parents[2] / "io" base_dir = io_dir / "tests" / "data" raw_fname = base_dir / "test_raw.fif" eve_fname = base_dir / "test-eve.fif" @@ -38,8 +60,8 @@ testing_path = testing.data_path(download=False) -@pytest.mark.parametrize('preload', (True, False)) -@pytest.mark.parametrize('proj', (True, False)) +@pytest.mark.parametrize("preload", (True, False)) +@pytest.mark.parametrize("proj", (True, False)) def test_reorder_channels(preload, proj): """Test reordering of channels.""" raw = read_raw_fif(raw_fname).crop(0, 0.1).del_proj() @@ -49,7 +71,7 @@ def test_reorder_channels(preload, proj): raw.load_data() # with .reorder_channels if proj and not preload: - with pytest.raises(RuntimeError, match='load data'): + with pytest.raises(RuntimeError, match="load data"): raw.copy().reorder_channels(raw.ch_names[::-1]) return raw_new = raw.copy().reorder_channels(raw.ch_names[::-1]) @@ -63,7 +85,7 @@ def test_reorder_channels(preload, proj): raw_new.reorder_channels(raw_new.ch_names[::-1][1:-1]) raw.drop_channels(raw.ch_names[:1] + raw.ch_names[-1:]) assert_array_equal(raw[:][0], raw_new[:][0]) - with pytest.raises(ValueError, match='repeated'): + with pytest.raises(ValueError, match="repeated"): raw.reorder_channels(raw.ch_names[:1] + raw.ch_names[:1]) # and with .pick reord = [1, 0] + list(range(2, len(raw.ch_names))) @@ -77,41 +99,46 @@ def test_rename_channels(): info = read_info(raw_fname) # Error Tests # Test channel name exists in ch_names - mapping = {'EEG 160': 'EEG060'} + mapping = {"EEG 160": "EEG060"} + ch_names_orig = info.ch_names[::] pytest.raises(ValueError, rename_channels, info, mapping) + rename_channels(info, mapping, on_missing="ignore") + assert info.ch_names == ch_names_orig + with pytest.warns(RuntimeWarning, match="Channel rename map contains keys that *"): + rename_channels(info, mapping, on_missing="warn") # Test improper mapping configuration - mapping = {'MEG 2641': 1.0} + mapping = {"MEG 2641": 1.0} pytest.raises(TypeError, rename_channels, info, mapping) # Test non-unique mapping configuration - mapping = {'MEG 2641': 'MEG 2642'} + mapping = {"MEG 2641": "MEG 2642"} pytest.raises(ValueError, rename_channels, info, mapping) # Test bad input - pytest.raises(ValueError, rename_channels, info, 1.) - pytest.raises(ValueError, rename_channels, info, 1.) + pytest.raises(ValueError, rename_channels, info, 1.0) + pytest.raises(ValueError, rename_channels, info, 1.0) # Test successful changes # Test ch_name and ch_names are changed info2 = deepcopy(info) # for consistency at the start of each test - info2['bads'] = ['EEG 060', 'EOG 061'] - mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'} + info2["bads"] = ["EEG 060", "EOG 061"] + mapping = {"EEG 060": "EEG060", "EOG 061": "EOG061"} rename_channels(info2, mapping) - assert info2['chs'][374]['ch_name'] == 'EEG060' - assert info2['ch_names'][374] == 'EEG060' - assert info2['chs'][375]['ch_name'] == 'EOG061' - assert info2['ch_names'][375] == 'EOG061' - assert_array_equal(['EEG060', 'EOG061'], info2['bads']) + assert info2["chs"][374]["ch_name"] == "EEG060" + assert info2["ch_names"][374] == "EEG060" + assert info2["chs"][375]["ch_name"] == "EOG061" + assert info2["ch_names"][375] == "EOG061" + assert_array_equal(["EEG060", "EOG061"], info2["bads"]) info2 = deepcopy(info) - rename_channels(info2, lambda x: x.replace(' ', '')) - assert info2['chs'][373]['ch_name'] == 'EEG059' + rename_channels(info2, lambda x: x.replace(" ", "")) + assert info2["chs"][373]["ch_name"] == "EEG059" info2 = deepcopy(info) - info2['bads'] = ['EEG 060', 'EEG 060'] + info2["bads"] = ["EEG 060", "EEG 060"] rename_channels(info2, mapping) - assert_array_equal(['EEG060', 'EEG060'], info2['bads']) + assert_array_equal(["EEG060", "EEG060"], info2["bads"]) # test that keys in Raw._orig_units will be renamed, too raw = read_raw_fif(raw_fname).crop(0, 0.1) - old, new = 'EEG 060', 'New' - raw._orig_units = {old: 'V'} + old, new = "EEG 060", "New" + raw._orig_units = {old: "V"} raw.rename_channels({old: new}) assert old not in raw._orig_units @@ -123,65 +150,90 @@ def test_set_channel_types(): raw = read_raw_fif(raw_fname) # Error Tests # Test channel name exists in ch_names - mapping = {'EEG 160': 'EEG060'} + mapping = {"EEG 160": "EEG060"} with pytest.raises(ValueError, match=r"name \(EEG 160\) doesn't exist"): raw.set_channel_types(mapping) # Test change to illegal channel type - mapping = {'EOG 061': 'xxx'} - with pytest.raises(ValueError, match='cannot change to this channel type'): + mapping = {"EOG 061": "xxx"} + with pytest.raises(ValueError, match="cannot change to this channel type"): raw.set_channel_types(mapping) # Test changing type if in proj - mapping = {'EEG 057': 'dbs', 'EEG 058': 'ecog', 'EEG 059': 'ecg', - 'EEG 060': 'eog', 'EOG 061': 'seeg', 'MEG 2441': 'eeg', - 'MEG 2443': 'eeg', 'MEG 2442': 'hbo', 'EEG 001': 'resp'} + mapping = { + "EEG 057": "dbs", + "EEG 058": "ecog", + "EEG 059": "ecg", + "EEG 060": "eog", + "EOG 061": "seeg", + "MEG 2441": "eeg", + "MEG 2443": "eeg", + "MEG 2442": "hbo", + "EEG 001": "resp", + } raw2 = read_raw_fif(raw_fname) - raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061'] + raw2.info["bads"] = ["EEG 059", "EEG 060", "EOG 061"] with pytest.raises(RuntimeError, match='type .* in projector "PCA-v1"'): raw2.set_channel_types(mapping) # has prj raw2.add_proj([], remove_existing=True) - with pytest.warns(RuntimeWarning, match='unit for channel.* has changed'): - raw2 = raw2.set_channel_types(mapping) + + # Should raise + with pytest.raises(ValueError, match="unit for channel.* has changed"): + raw2.copy().set_channel_types(mapping, on_unit_change="raise") + + # Should warn + with pytest.warns(RuntimeWarning, match="unit for channel.* has changed"): + raw2.copy().set_channel_types(mapping) + + # Shouldn't warn + raw2.set_channel_types(mapping, on_unit_change="ignore") + info = raw2.info - assert info['chs'][371]['ch_name'] == 'EEG 057' - assert info['chs'][371]['kind'] == FIFF.FIFFV_DBS_CH - assert info['chs'][371]['unit'] == FIFF.FIFF_UNIT_V - assert info['chs'][371]['coil_type'] == FIFF.FIFFV_COIL_EEG - assert info['chs'][372]['ch_name'] == 'EEG 058' - assert info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH - assert info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V - assert info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG - assert info['chs'][373]['ch_name'] == 'EEG 059' - assert info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH - assert info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V - assert info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE - assert info['chs'][374]['ch_name'] == 'EEG 060' - assert info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH - assert info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V - assert info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE - assert info['chs'][375]['ch_name'] == 'EOG 061' - assert info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH - assert info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V - assert info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG - for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']): - assert info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH - assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V - assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG - idx = pick_channels(raw.ch_names, ['MEG 2442'])[0] - assert info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH - assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL - assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO + assert info["chs"][371]["ch_name"] == "EEG 057" + assert info["chs"][371]["kind"] == FIFF.FIFFV_DBS_CH + assert info["chs"][371]["unit"] == FIFF.FIFF_UNIT_V + assert info["chs"][371]["coil_type"] == FIFF.FIFFV_COIL_EEG + assert info["chs"][372]["ch_name"] == "EEG 058" + assert info["chs"][372]["kind"] == FIFF.FIFFV_ECOG_CH + assert info["chs"][372]["unit"] == FIFF.FIFF_UNIT_V + assert info["chs"][372]["coil_type"] == FIFF.FIFFV_COIL_EEG + assert info["chs"][373]["ch_name"] == "EEG 059" + assert info["chs"][373]["kind"] == FIFF.FIFFV_ECG_CH + assert info["chs"][373]["unit"] == FIFF.FIFF_UNIT_V + assert info["chs"][373]["coil_type"] == FIFF.FIFFV_COIL_NONE + assert info["chs"][374]["ch_name"] == "EEG 060" + assert info["chs"][374]["kind"] == FIFF.FIFFV_EOG_CH + assert info["chs"][374]["unit"] == FIFF.FIFF_UNIT_V + assert info["chs"][374]["coil_type"] == FIFF.FIFFV_COIL_NONE + assert info["chs"][375]["ch_name"] == "EOG 061" + assert info["chs"][375]["kind"] == FIFF.FIFFV_SEEG_CH + assert info["chs"][375]["unit"] == FIFF.FIFF_UNIT_V + assert info["chs"][375]["coil_type"] == FIFF.FIFFV_COIL_EEG + for idx in pick_channels(raw.ch_names, ["MEG 2441", "MEG 2443"], ordered=False): + assert info["chs"][idx]["kind"] == FIFF.FIFFV_EEG_CH + assert info["chs"][idx]["unit"] == FIFF.FIFF_UNIT_V + assert info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_EEG + idx = pick_channels(raw.ch_names, ["MEG 2442"])[0] + assert info["chs"][idx]["kind"] == FIFF.FIFFV_FNIRS_CH + assert info["chs"][idx]["unit"] == FIFF.FIFF_UNIT_MOL + assert info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_FNIRS_HBO # resp channel type - idx = pick_channels(raw.ch_names, ['EEG 001'])[0] - assert info['chs'][idx]['kind'] == FIFF.FIFFV_RESP_CH - assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V - assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_NONE + idx = pick_channels(raw.ch_names, ["EEG 001"])[0] + assert info["chs"][idx]["kind"] == FIFF.FIFFV_RESP_CH + assert info["chs"][idx]["unit"] == FIFF.FIFF_UNIT_V + assert info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_NONE # Test meaningful error when setting channel type with unknown unit - raw.info['chs'][0]['unit'] = 0. - ch_types = {raw.ch_names[0]: 'misc'} + raw.info["chs"][0]["unit"] = 0.0 + ch_types = {raw.ch_names[0]: "misc"} pytest.raises(ValueError, raw.set_channel_types, ch_types) + # test reset of channel units on unit change + idx = raw.ch_names.index("EEG 003") + raw.info["chs"][idx]["unit_mul"] = _ch_unit_mul_named[-6] + assert raw.info["chs"][idx]["unit_mul"] == -6 + raw.set_channel_types({"EEG 003": "misc"}, on_unit_change="ignore") + assert raw.info["chs"][idx]["unit_mul"] == 0 + def test_get_builtin_ch_adjacencies(): """Test retrieving the names of all built-in FieldTrip neighbors.""" @@ -195,58 +247,84 @@ def test_get_builtin_ch_adjacencies(): assert len(name_and_description) == 2 +@pytest.mark.parametrize("name", get_builtin_ch_adjacencies()) +@pytest.mark.parametrize("picks", ["pick-slice", "pick-arange", "pick-names"]) +def test_read_builtin_ch_adjacency_picks(name, picks): + """Test picking channel subsets when reading builtin adjacency matrices.""" + ch_adjacency, ch_names = read_ch_adjacency(name) + assert_equal(ch_adjacency.shape[0], len(ch_names)) + subset_names = ch_names[::2] + if picks == "pick-slice": + subset = slice(None, None, 2) + elif picks == "pick-arange": + subset = np.arange(0, len(ch_names), 2) + else: + assert picks == "pick-names" + subset = subset_names + + ch_subset_adjacency, ch_subset_names = read_ch_adjacency(name, subset) + assert_array_equal(ch_subset_names, subset_names) + + def test_read_ch_adjacency(tmp_path): """Test reading channel adjacency templates.""" - a = partial(np.array, dtype=' ps # are channels in the correct selection? @@ -384,61 +429,73 @@ def test_1020_selection(): for channel, roi in zip(fz_c3_c4, ("Midline", "Left", "Right")): assert channel in sels[roi] + # ensure returning channel names works as expected + sels_names = make_1020_channel_selections(raw.info, return_ch_names=True) + for selection, ch_names in sels_names.items(): + assert ch_names == [raw.ch_names[idx] for idx in sels[selection]] + @testing.requires_testing_data def test_find_ch_adjacency(): """Test computing the adjacency matrix.""" - raw = read_raw_fif(raw_fname, preload=True) - sizes = {'mag': 828, 'grad': 1700, 'eeg': 384} - nchans = {'mag': 102, 'grad': 204, 'eeg': 60} - for ch_type in ['mag', 'grad', 'eeg']: + raw = read_raw_fif(raw_fname) + sizes = {"mag": 828, "grad": 1700, "eeg": 384} + nchans = {"mag": 102, "grad": 204, "eeg": 60} + for ch_type in ["mag", "grad", "eeg"]: conn, ch_names = find_ch_adjacency(raw.info, ch_type) # Silly test for checking the number of neighbors. - assert_equal(conn.getnnz(), sizes[ch_type]) + assert_equal(conn.astype(bool).sum(), sizes[ch_type]) assert_equal(len(ch_names), nchans[ch_type]) + kwargs = dict(exclude=()) + if ch_type in ("mag", "grad"): + kwargs["meg"] = ch_type + else: + kwargs[ch_type] = True + want_names = [raw.ch_names[pick] for pick in pick_types(raw.info, **kwargs)] + assert ch_names == want_names pytest.raises(ValueError, find_ch_adjacency, raw.info, None) # Test computing the conn matrix with gradiometers. - conn, ch_names = _compute_ch_adjacency(raw.info, 'grad') - assert_equal(conn.getnnz(), 2680) + conn, ch_names = _compute_ch_adjacency(raw.info, "grad") + assert_equal(conn.astype(bool).sum(), 2680) # Test ch_type=None. - raw.pick_types(meg='mag') + raw.pick(picks="mag") find_ch_adjacency(raw.info, None) bti_fname = testing_path / "BTi" / "erm_HFH" / "c,rfDC" bti_config_name = testing_path / "BTi" / "erm_HFH" / "config" raw = read_raw_bti(bti_fname, bti_config_name, None) - _, ch_names = find_ch_adjacency(raw.info, 'mag') - assert 'A1' in ch_names + _, ch_names = find_ch_adjacency(raw.info, "mag") + assert "A1" in ch_names ctf_fname = testing_path / "CTF" / "testdata_ctf_short.ds" raw = read_raw_ctf(ctf_fname) - _, ch_names = find_ch_adjacency(raw.info, 'mag') - assert 'MLC11' in ch_names + _, ch_names = find_ch_adjacency(raw.info, "mag") + assert "MLC11" in ch_names - pytest.raises(ValueError, find_ch_adjacency, raw.info, 'eog') + pytest.raises(ValueError, find_ch_adjacency, raw.info, "eog") raw_kit = read_raw_kit(fname_kit_157) - neighb, ch_names = find_ch_adjacency(raw_kit.info, 'mag') + neighb, ch_names = find_ch_adjacency(raw_kit.info, "mag") assert neighb.data.size == 1329 - assert ch_names[0] == 'MEG 001' + assert ch_names[0] == "MEG 001" @testing.requires_testing_data def test_neuromag122_adjacency(): """Test computing the adjacency matrix of Neuromag122-Data.""" nm122_fname = testing_path / "misc" / "neuromag122_test_file-raw.fif" - raw = read_raw_fif(nm122_fname, preload=True) - conn, ch_names = find_ch_adjacency(raw.info, 'grad') - assert conn.getnnz() == 1564 + raw = read_raw_fif(nm122_fname) + conn, ch_names = find_ch_adjacency(raw.info, "grad") + assert conn.astype(bool).sum() == 1564 assert len(ch_names) == 122 assert conn.shape == (122, 122) def test_drop_channels(): """Test if dropping channels works with various arguments.""" - raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1) + raw = read_raw_fif(raw_fname).crop(0, 0.1) raw.drop_channels(["MEG 0111"]) # list argument raw.drop_channels("MEG 0112") # str argument raw.drop_channels({"MEG 0132", "MEG 0133"}) # set argument @@ -447,32 +504,33 @@ def test_drop_channels(): # by default, drop channels raises a ValueError if a channel can't be found m_chs = ["MEG 0111", "MEG blahblah"] - with pytest.raises(ValueError, match='not found, nothing dropped'): + with pytest.raises(ValueError, match="not found, nothing dropped"): raw.drop_channels(m_chs) # ...but this can be turned to a warning - with pytest.warns(RuntimeWarning, match='not found, nothing dropped'): - raw.drop_channels(m_chs, on_missing='warn') + with pytest.warns(RuntimeWarning, match="not found, nothing dropped"): + raw.drop_channels(m_chs, on_missing="warn") # ...or ignored altogether - raw.drop_channels(m_chs, on_missing='ignore') + raw.drop_channels(m_chs, on_missing="ignore") + with pytest.raises(ValueError, match="All channels"): + raw.drop_channels(raw.ch_names) def test_pick_channels(): """Test if picking channels works with various arguments.""" - raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1) + raw = read_raw_fif(raw_fname).crop(0, 0.1) # selected correctly 3 channels - raw.pick(['MEG 0113', 'MEG 0112', 'MEG 0111']) + raw.pick(["MEG 0113", "MEG 0112", "MEG 0111"]) assert len(raw.ch_names) == 3 # selected correctly 3 channels and ignored 'meg', and emit warning - with pytest.warns(RuntimeWarning, match='not present in the info'): - raw.pick(['MEG 0113', "meg", 'MEG 0112', 'MEG 0111']) - assert len(raw.ch_names) == 3 + with pytest.raises(ValueError, match="not present in the info"): + raw.pick(["MEG 0113", "meg", "MEG 0112", "MEG 0111"]) names_len = len(raw.ch_names) - raw.pick(['all']) # selected correctly all channels + raw.pick(["all"]) # selected correctly all channels assert len(raw.ch_names) == names_len - raw.pick('all') # selected correctly all channels + raw.pick("all") # selected correctly all channels assert len(raw.ch_names) == names_len @@ -487,16 +545,16 @@ def test_add_reference_channels(): n_evoked_original_channels = len(evoked.ch_names) # Raw object - raw.add_reference_channels(['REF 123']) + raw.add_reference_channels(["REF 123"]) assert len(raw.ch_names) == n_raw_original_channels + 1 assert np.all(raw.get_data()[-1] == 0) # Epochs object - epochs.add_reference_channels(['REF 123']) + epochs.add_reference_channels(["REF 123"]) assert epochs._data.shape[1] == epochs_original_shape + 1 # Evoked object - evoked.add_reference_channels(['REF 123']) + evoked.add_reference_channels(["REF 123"]) assert len(evoked.ch_names) == n_evoked_original_channels + 1 assert np.all(evoked._data[-1] == 0) @@ -506,30 +564,35 @@ def test_equalize_channels(): # This function only tests the generic functionality of equalize_channels. # Additional tests for each instance type are included in the accompanying # test suite for each type. - pytest.raises(TypeError, equalize_channels, ['foo', 'bar'], - match='Instances to be modified must be an instance of') + pytest.raises( + TypeError, + equalize_channels, + ["foo", "bar"], + match="Instances to be modified must be an instance of", + ) - raw = RawArray([[1.], [2.], [3.], [4.]], - create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.)) - epochs = EpochsArray([[[1.], [2.], [3.]]], - create_info(['CH5', 'CH2', 'CH1'], sfreq=1.)) - cov = make_ad_hoc_cov(create_info(['CH2', 'CH1', 'CH8'], sfreq=1., - ch_types='eeg')) - cov['bads'] = ['CH1'] - ave = EvokedArray([[1.], [2.]], create_info(['CH1', 'CH2'], sfreq=1.)) + raw = RawArray( + [[1.0], [2.0], [3.0], [4.0]], + create_info(["CH1", "CH2", "CH3", "CH4"], sfreq=1.0), + ) + epochs = EpochsArray( + [[[1.0], [2.0], [3.0]]], create_info(["CH5", "CH2", "CH1"], sfreq=1.0) + ) + cov = make_ad_hoc_cov(create_info(["CH2", "CH1", "CH8"], sfreq=1.0, ch_types="eeg")) + cov["bads"] = ["CH1"] + ave = EvokedArray([[1.0], [2.0]], create_info(["CH1", "CH2"], sfreq=1.0)) - raw2, epochs2, cov2, ave2 = equalize_channels([raw, epochs, cov, ave], - copy=True) + raw2, epochs2, cov2, ave2 = equalize_channels([raw, epochs, cov, ave], copy=True) # The Raw object was the first in the list, so should have been used as # template for the ordering of the channels. No bad channels should have # been dropped. - assert raw2.ch_names == ['CH1', 'CH2'] - assert_array_equal(raw2.get_data(), [[1.], [2.]]) - assert epochs2.ch_names == ['CH1', 'CH2'] - assert_array_equal(epochs2.get_data(), [[[3.], [2.]]]) - assert cov2.ch_names == ['CH1', 'CH2'] - assert cov2['bads'] == cov['bads'] + assert raw2.ch_names == ["CH1", "CH2"] + assert_array_equal(raw2.get_data(), [[1.0], [2.0]]) + assert epochs2.ch_names == ["CH1", "CH2"] + assert_array_equal(epochs2.get_data(copy=False), [[[3.0], [2.0]]]) + assert cov2.ch_names == ["CH1", "CH2"] + assert cov2["bads"] == cov["bads"] assert ave2.ch_names == ave.ch_names assert_array_equal(ave2.data, ave.data) @@ -550,7 +613,7 @@ def test_combine_channels(): """Test channel combination on Raw, Epochs, and Evoked.""" raw = read_raw_fif(raw_fname, preload=True) raw_ch_bad = read_raw_fif(raw_fname, preload=True) - raw_ch_bad.info['bads'] = ['MEG 0113', 'MEG 0112'] + raw_ch_bad.info["bads"] = ["MEG 0113", "MEG 0112"] epochs = Epochs(raw, read_events(eve_fname)) evoked = epochs.average() good = dict(foo=[0, 1, 3, 4], bar=[5, 2]) # good grad and mag @@ -568,62 +631,66 @@ def test_combine_channels(): # Test with stimulus channels combine_stim = combine_channels(raw, good, keep_stim=True) target_nchan = len(good) + len(pick_types(raw.info, meg=False, stim=True)) - assert combine_stim.info['nchan'] == target_nchan + assert combine_stim.info["nchan"] == target_nchan # Test results with one ROI good_single = dict(foo=[0, 1, 3, 4]) # good grad - combined_mean = combine_channels(raw, good_single, method='mean') - combined_median = combine_channels(raw, good_single, method='median') - combined_std = combine_channels(raw, good_single, method='std') - foo_mean = np.mean(raw.get_data()[good_single['foo']], axis=0) - foo_median = np.median(raw.get_data()[good_single['foo']], axis=0) - foo_std = np.std(raw.get_data()[good_single['foo']], axis=0) - assert_array_equal(combined_mean.get_data(), - np.expand_dims(foo_mean, axis=0)) - assert_array_equal(combined_median.get_data(), - np.expand_dims(foo_median, axis=0)) - assert_array_equal(combined_std.get_data(), - np.expand_dims(foo_std, axis=0)) + combined_mean = combine_channels(raw, good_single, method="mean") + combined_median = combine_channels(raw, good_single, method="median") + combined_std = combine_channels(raw, good_single, method="std") + foo_mean = np.mean(raw.get_data()[good_single["foo"]], axis=0) + foo_median = np.median(raw.get_data()[good_single["foo"]], axis=0) + foo_std = np.std(raw.get_data()[good_single["foo"]], axis=0) + assert_array_equal(combined_mean.get_data(), np.expand_dims(foo_mean, axis=0)) + assert_array_equal(combined_median.get_data(), np.expand_dims(foo_median, axis=0)) + assert_array_equal(combined_std.get_data(), np.expand_dims(foo_std, axis=0)) # Test bad cases bad1 = dict(foo=[0, 376], bar=[5, 2]) # out of bounds bad2 = dict(foo=[0, 2], bar=[5, 2]) # type mix in same group with pytest.raises(ValueError, match='"method" must be a callable, or'): - combine_channels(raw, good, method='bad_method') + combine_channels(raw, good, method="bad_method") with pytest.raises(TypeError, match='"keep_stim" must be of type bool'): - combine_channels(raw, good, keep_stim='bad_type') + combine_channels(raw, good, keep_stim="bad_type") with pytest.raises(TypeError, match='"drop_bad" must be of type bool'): - combine_channels(raw, good, drop_bad='bad_type') - with pytest.raises(ValueError, match='Some channel indices are out of'): + combine_channels(raw, good, drop_bad="bad_type") + with pytest.raises(ValueError, match="Some channel indices are out of"): combine_channels(raw, bad1) - with pytest.raises(ValueError, match='Cannot combine sensors of diff'): + with pytest.raises(ValueError, match="Cannot combine sensors of diff"): combine_channels(raw, bad2) # Test warnings raw_no_stim = read_raw_fif(raw_fname, preload=True) - raw_no_stim.pick_types(meg=True, stim=False) + raw_no_stim.pick(picks="meg") warn1 = dict(foo=[375, 375], bar=[5, 2]) # same channel in same group warn2 = dict(foo=[375], bar=[5, 2]) # one channel (last channel) warn3 = dict(foo=[0, 4], bar=[5, 2]) # one good channel left - with pytest.warns(RuntimeWarning, match='Could not find stimulus'): + with pytest.warns(RuntimeWarning, match="Could not find stimulus"): combine_channels(raw_no_stim, good, keep_stim=True) - with pytest.warns(RuntimeWarning, match='Less than 2 channels') as record: + with pytest.warns(RuntimeWarning, match="Less than 2 channels") as record: combine_channels(raw, warn1) combine_channels(raw, warn2) combine_channels(raw_ch_bad, warn3, drop_bad=True) assert len(record) == 3 + # Test on_missing + event_id = [1, 100] # 100 does not exist + epochs1 = Epochs(raw, read_events(eve_fname), event_id, on_missing="ignore") + with pytest.raises(ValueError, match="No matching events found"): + combine_channels(epochs1, groups={"foo": [0, 1]}) + with pytest.warns(RuntimeWarning, match="No matching events found"): + combine_channels(epochs1, groups={"foo": [0, 1]}, on_missing="warn") + with nullcontext(): + combine_channels(epochs1, groups={"foo": [0, 1]}, on_missing="ignore") + -@requires_pandas def test_combine_channels_metadata(): """Test if metadata is correctly retained in combined object.""" - import pandas as pd - + pd = pytest.importorskip("pandas") raw = read_raw_fif(raw_fname, preload=True) epochs = Epochs(raw, read_events(eve_fname), preload=True) - metadata = pd.DataFrame({"A": np.arange(len(epochs)), - "B": np.ones(len(epochs))}) + metadata = pd.DataFrame({"A": np.arange(len(epochs)), "B": np.ones(len(epochs))}) epochs.metadata = metadata good = dict(foo=[0, 1, 3, 4], bar=[5, 2]) # good grad and mag diff --git a/mne/channels/tests/test_interpolation.py b/mne/channels/tests/test_interpolation.py index 092e7ae87c5..de09a97c306 100644 --- a/mne/channels/tests/test_interpolation.py +++ b/mne/channels/tests/test_interpolation.py @@ -1,20 +1,29 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from itertools import compress from pathlib import Path import numpy as np -from numpy.testing import assert_allclose, assert_array_equal import pytest -from itertools import compress +from numpy.testing import assert_allclose, assert_array_equal -from mne import io, pick_types, pick_channels, read_events, Epochs +from mne import Epochs, pick_channels, pick_types, read_events +from mne._fiff.constants import FIFF +from mne._fiff.proj import _has_eeg_average_ref_proj +from mne.channels import make_dig_montage, make_standard_montage from mne.channels.interpolation import _make_interpolation_matrix from mne.datasets import testing -from mne.preprocessing.nirs import (optical_density, scalp_coupling_index, - beer_lambert_law) -from mne.io import read_raw_nirx -from mne.io.proj import _has_eeg_average_ref_proj -from mne.utils import _record_warnings, requires_version - -base_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data" +from mne.io import RawArray, read_raw_ctf, read_raw_fif, read_raw_nirx +from mne.preprocessing.nirs import ( + beer_lambert_law, + optical_density, + scalp_coupling_index, +) +from mne.utils import _record_warnings + +base_dir = Path(__file__).parents[2] / "io" / "tests" / "data" raw_fname = base_dir / "test_raw.fif" event_name = base_dir / "test-eve.fif" raw_fname_ctf = base_dir / "test_ctf_raw.fif" @@ -27,98 +36,121 @@ def _load_data(kind): """Load data.""" # It is more memory efficient to load data in a separate # function so it's loaded on-demand - raw = io.read_raw_fif(raw_fname) + raw = read_raw_fif(raw_fname) events = read_events(event_name) # subselect channels for speed - if kind == 'eeg': + if kind == "eeg": picks = pick_types(raw.info, meg=False, eeg=True, exclude=[])[:15] - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - preload=True, reject=dict(eeg=80e-6)) + epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + preload=True, + reject=dict(eeg=80e-6), + ) else: picks = pick_types(raw.info, meg=True, eeg=False, exclude=[])[1:200:2] - assert kind == 'meg' - with pytest.warns(RuntimeWarning, match='projection'): - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - preload=True, - reject=dict(grad=1000e-12, mag=4e-12)) + assert kind == "meg" + with pytest.warns(RuntimeWarning, match="projection"): + epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + preload=True, + reject=dict(grad=1000e-12, mag=4e-12), + ) return raw, epochs -@pytest.mark.parametrize('offset', (0., 0.1)) -@pytest.mark.parametrize('avg_proj, ctol', [ - (True, (0.86, 0.93)), - (False, (0.97, 0.99)), -]) -@pytest.mark.parametrize('method, atol', [ - pytest.param(None, 3e-6, marks=pytest.mark.slowtest), # slow on Azure - (dict(eeg='MNE'), 4e-6), -]) -@pytest.mark.filterwarnings('ignore:.*than 20 mm from head frame origin.*') +@pytest.mark.parametrize("offset", (0.0, 0.1)) +@pytest.mark.parametrize( + "avg_proj, ctol", + [ + (True, (0.86, 0.93)), + (False, (0.97, 0.99)), + ], +) +@pytest.mark.parametrize( + "method, atol", + [ + pytest.param(None, 3e-6, marks=pytest.mark.slowtest), # slow on Azure + (dict(eeg="MNE"), 4e-6), + ], +) +@pytest.mark.filterwarnings("ignore:.*than 20 mm from head frame origin.*") def test_interpolation_eeg(offset, avg_proj, ctol, atol, method): """Test interpolation of EEG channels.""" - raw, epochs_eeg = _load_data('eeg') + raw, epochs_eeg = _load_data("eeg") epochs_eeg = epochs_eeg.copy() assert not _has_eeg_average_ref_proj(epochs_eeg.info) # Offsetting the coordinate frame should have no effect on the output for inst in (raw, epochs_eeg): - for ch in inst.info['chs']: - if ch['kind'] == io.constants.FIFF.FIFFV_EEG_CH: - ch['loc'][:3] += offset - ch['loc'][3:6] += offset - for d in inst.info['dig']: - d['r'] += offset + for ch in inst.info["chs"]: + if ch["kind"] == FIFF.FIFFV_EEG_CH: + ch["loc"][:3] += offset + ch["loc"][3:6] += offset + for d in inst.info["dig"]: + d["r"] += offset # check that interpolation does nothing if no bads are marked - epochs_eeg.info['bads'] = [] + epochs_eeg.info["bads"] = [] evoked_eeg = epochs_eeg.average() kw = dict(method=method) - with pytest.warns(RuntimeWarning, match='Doing nothing'): + with pytest.warns(RuntimeWarning, match="Doing nothing"): evoked_eeg.interpolate_bads(**kw) # create good and bad channels for EEG - epochs_eeg.info['bads'] = [] + epochs_eeg.info["bads"] = [] goods_idx = np.ones(len(epochs_eeg.ch_names), dtype=bool) - goods_idx[epochs_eeg.ch_names.index('EEG 012')] = False + goods_idx[epochs_eeg.ch_names.index("EEG 012")] = False bads_idx = ~goods_idx pos = epochs_eeg._get_channel_positions() evoked_eeg = epochs_eeg.average() if avg_proj: evoked_eeg.set_eeg_reference(projection=True).apply_proj() - assert_allclose(evoked_eeg.data.mean(0), 0., atol=1e-20) + assert_allclose(evoked_eeg.data.mean(0), 0.0, atol=1e-20) ave_before = evoked_eeg.data[bads_idx] # interpolate bad channels for EEG - epochs_eeg.info['bads'] = ['EEG 012'] + epochs_eeg.info["bads"] = ["EEG 012"] evoked_eeg = epochs_eeg.average() if avg_proj: evoked_eeg.set_eeg_reference(projection=True).apply_proj() good_picks = pick_types(evoked_eeg.info, meg=False, eeg=True) - assert_allclose(evoked_eeg.data[good_picks].mean(0), 0., atol=1e-20) + assert_allclose(evoked_eeg.data[good_picks].mean(0), 0.0, atol=1e-20) evoked_eeg_bad = evoked_eeg.copy() bads_picks = pick_channels( - epochs_eeg.ch_names, include=epochs_eeg.info['bads'], ordered=True + epochs_eeg.ch_names, include=epochs_eeg.info["bads"], ordered=True ) evoked_eeg_bad.data[bads_picks, :] = 1e10 # Test first the exclude parameter evoked_eeg_2_bads = evoked_eeg_bad.copy() - evoked_eeg_2_bads.info['bads'] = ['EEG 004', 'EEG 012'] + evoked_eeg_2_bads.info["bads"] = ["EEG 004", "EEG 012"] evoked_eeg_2_bads.data[ - pick_channels(evoked_eeg_bad.ch_names, ['EEG 004', 'EEG 012']) + pick_channels(evoked_eeg_bad.ch_names, ["EEG 004", "EEG 012"]) ] = 1e10 evoked_eeg_interp = evoked_eeg_2_bads.interpolate_bads( - origin=(0., 0., 0.), exclude=['EEG 004'], **kw) - assert evoked_eeg_interp.info['bads'] == ['EEG 004'] - assert np.all(evoked_eeg_interp.get_data('EEG 004') == 1e10) - assert np.all(evoked_eeg_interp.get_data('EEG 012') != 1e10) + origin=(0.0, 0.0, 0.0), exclude=["EEG 004"], **kw + ) + assert evoked_eeg_interp.info["bads"] == ["EEG 004"] + assert np.all(evoked_eeg_interp.get_data("EEG 004") == 1e10) + assert np.all(evoked_eeg_interp.get_data("EEG 012") != 1e10) # Now test without exclude parameter - evoked_eeg_bad.info['bads'] = ['EEG 012'] + evoked_eeg_bad.info["bads"] = ["EEG 012"] evoked_eeg_interp = evoked_eeg_bad.copy().interpolate_bads( - origin=(0., 0., 0.), **kw) + origin=(0.0, 0.0, 0.0), **kw + ) if avg_proj: - assert_allclose(evoked_eeg_interp.data.mean(0), 0., atol=1e-6) + assert_allclose(evoked_eeg_interp.data.mean(0), 0.0, atol=1e-6) interp_zero = evoked_eeg_interp.data[bads_idx] if method is None: # using pos_good = pos[goods_idx] @@ -136,34 +168,34 @@ def test_interpolation_eeg(offset, avg_proj, ctol, atol, method): # check that interpolation fails when preload is False epochs_eeg.preload = False - with pytest.raises(RuntimeError, match='requires epochs data to be loade'): + with pytest.raises(RuntimeError, match="requires epochs data to be load"): epochs_eeg.interpolate_bads(**kw) epochs_eeg.preload = True # check that interpolation changes the data in raw - raw_eeg = io.RawArray(data=epochs_eeg._data[0], info=epochs_eeg.info) + raw_eeg = RawArray(data=epochs_eeg._data[0], info=epochs_eeg.info) raw_before = raw_eeg._data[bads_idx] raw_after = raw_eeg.interpolate_bads(**kw)._data[bads_idx] assert not np.all(raw_before == raw_after) # check that interpolation fails when preload is False for inst in [raw, epochs_eeg]: - assert hasattr(inst, 'preload') + assert hasattr(inst, "preload") inst.preload = False - inst.info['bads'] = [inst.ch_names[1]] - with pytest.raises(RuntimeError, match='requires.*data to be loaded'): + inst.info["bads"] = [inst.ch_names[1]] + with pytest.raises(RuntimeError, match="requires.*data to be loaded"): inst.interpolate_bads(**kw) # check that interpolation works with few channels raw_few = raw.copy().crop(0, 0.1).load_data() - raw_few.pick_channels(raw_few.ch_names[:1] + raw_few.ch_names[3:4]) + raw_few.pick(raw_few.ch_names[:1] + raw_few.ch_names[3:4]) assert len(raw_few.ch_names) == 2 raw_few.del_proj() - raw_few.info['bads'] = [raw_few.ch_names[-1]] + raw_few.info["bads"] = [raw_few.ch_names[-1]] orig_data = raw_few[1][0] with _record_warnings() as w: raw_few.interpolate_bads(reset_bads=False, **kw) - assert len([ww for ww in w if 'more than' not in str(ww.message)]) == 0 + assert len([ww for ww in w if "more than" not in str(ww.message)]) == 0 new_data = raw_few[1][0] assert (new_data == 0).mean() < 0.5 assert np.corrcoef(new_data, orig_data)[0, 1] > 0.2 @@ -176,83 +208,80 @@ def test_interpolation_meg(): # correlation drops thresh = 0.68 - raw, epochs_meg = _load_data('meg') + raw, epochs_meg = _load_data("meg") # check that interpolation works when non M/EEG channels are present # before MEG channels - raw.crop(0, 0.1).load_data().pick_channels(epochs_meg.ch_names) + raw.crop(0, 0.1).load_data().pick(epochs_meg.ch_names) raw.info.normalize_proj() - with pytest.warns(RuntimeWarning, match='unit .* changed from .* to .*'): - raw.set_channel_types({raw.ch_names[0]: 'stim'}) - raw.info['bads'] = [raw.ch_names[1]] + raw.set_channel_types({raw.ch_names[0]: "stim"}, on_unit_change="ignore") + raw.info["bads"] = [raw.ch_names[1]] raw.load_data() - raw.interpolate_bads(mode='fast') + raw.interpolate_bads(mode="fast") del raw # check that interpolation works for MEG - epochs_meg.info['bads'] = ['MEG 0141'] + epochs_meg.info["bads"] = ["MEG 0141"] evoked = epochs_meg.average() - pick = pick_channels(epochs_meg.info['ch_names'], epochs_meg.info['bads']) + pick = pick_channels(epochs_meg.info["ch_names"], epochs_meg.info["bads"]) # MEG -- raw - raw_meg = io.RawArray(data=epochs_meg._data[0], info=epochs_meg.info) - raw_meg.info['bads'] = ['MEG 0141'] + raw_meg = RawArray(data=epochs_meg._data[0], info=epochs_meg.info) + raw_meg.info["bads"] = ["MEG 0141"] data1 = raw_meg[pick, :][0][0] raw_meg.info.normalize_proj() - data2 = raw_meg.interpolate_bads(reset_bads=False, - mode='fast')[pick, :][0][0] + data2 = raw_meg.interpolate_bads(reset_bads=False, mode="fast")[pick, :][0][0] assert np.corrcoef(data1, data2)[0, 1] > thresh # the same number of bads as before - assert len(raw_meg.info['bads']) == len(raw_meg.info['bads']) + assert len(raw_meg.info["bads"]) == len(raw_meg.info["bads"]) # MEG -- epochs - data1 = epochs_meg.get_data()[:, pick, :].ravel() + data1 = epochs_meg.get_data(pick).ravel() epochs_meg.info.normalize_proj() - epochs_meg.interpolate_bads(mode='fast') - data2 = epochs_meg.get_data()[:, pick, :].ravel() + epochs_meg.interpolate_bads(mode="fast") + data2 = epochs_meg.get_data(pick).ravel() assert np.corrcoef(data1, data2)[0, 1] > thresh - assert len(epochs_meg.info['bads']) == 0 + assert len(epochs_meg.info["bads"]) == 0 # MEG -- evoked (plus auto origin) data1 = evoked.data[pick] evoked.info.normalize_proj() - data2 = evoked.interpolate_bads(origin='auto').data[pick] + data2 = evoked.interpolate_bads(origin="auto").data[pick] assert np.corrcoef(data1, data2)[0, 1] > thresh # MEG -- with exclude - evoked.info['bads'] = ['MEG 0141', 'MEG 0121'] - pick = pick_channels(evoked.ch_names, evoked.info['bads'], ordered=True) + evoked.info["bads"] = ["MEG 0141", "MEG 0121"] + pick = pick_channels(evoked.ch_names, evoked.info["bads"], ordered=True) evoked.data[pick[-1]] = 1e10 data1 = evoked.data[pick] evoked.info.normalize_proj() - data2 = evoked.interpolate_bads( - origin='auto', exclude=['MEG 0121'] - ).data[pick] + data2 = evoked.interpolate_bads(origin="auto", exclude=["MEG 0121"]).data[pick] assert np.corrcoef(data1[0], data2[0])[0, 1] > thresh assert np.all(data2[1] == 1e10) def _this_interpol(inst, ref_meg=False): from mne.channels.interpolation import _interpolate_bads_meg - _interpolate_bads_meg(inst, ref_meg=ref_meg, mode='fast') + + _interpolate_bads_meg(inst, ref_meg=ref_meg, mode="fast", origin=(0.0, 0.0, 0.04)) return inst @pytest.mark.slowtest def test_interpolate_meg_ctf(): """Test interpolation of MEG channels from CTF system.""" - thresh = .85 - tol = .05 # assert the new interpol correlates at least .05 "better" - bad = 'MLC22-2622' # select a good channel to test the interpolation + thresh = 0.85 + tol = 0.05 # assert the new interpol correlates at least .05 "better" + bad = "MLC22-2622" # select a good channel to test the interpolation - raw = io.read_raw_fif(raw_fname_ctf).crop(0, 1.0).load_data() # 3 secs + raw = read_raw_fif(raw_fname_ctf).crop(0, 1.0).load_data() # 3 secs raw.apply_gradient_compensation(3) # Show that we have to exclude ref_meg for interpolating CTF MEG-channels # (fixed in #5965): - raw.info['bads'] = [bad] - pick_bad = pick_channels(raw.info['ch_names'], raw.info['bads']) + raw.info["bads"] = [bad] + pick_bad = pick_channels(raw.info["ch_names"], raw.info["bads"]) data_orig = raw[pick_bad, :][0] # mimic old behavior (the ref_meg-arg in _interpolate_bads_meg only serves # this purpose): @@ -261,46 +290,234 @@ def test_interpolate_meg_ctf(): data_interp_no_refmeg = _this_interpol(raw, ref_meg=False)[pick_bad, :][0] R = dict() - R['no_refmeg'] = np.corrcoef(data_orig, data_interp_no_refmeg)[0, 1] - R['with_refmeg'] = np.corrcoef(data_orig, data_interp_refmeg)[0, 1] + R["no_refmeg"] = np.corrcoef(data_orig, data_interp_no_refmeg)[0, 1] + R["with_refmeg"] = np.corrcoef(data_orig, data_interp_refmeg)[0, 1] - print('Corrcoef of interpolated with original channel: ', R) - assert R['no_refmeg'] > R['with_refmeg'] + tol - assert R['no_refmeg'] > thresh + print("Corrcoef of interpolated with original channel: ", R) + assert R["no_refmeg"] > R["with_refmeg"] + tol + assert R["no_refmeg"] > thresh @testing.requires_testing_data def test_interpolation_ctf_comp(): """Test interpolation with compensated CTF data.""" raw_fname = testing_path / "CTF" / "somMDYO-18av.ds" - raw = io.read_raw_ctf(raw_fname, preload=True) - raw.info['bads'] = [raw.ch_names[5], raw.ch_names[-5]] - raw.interpolate_bads(mode='fast', origin=(0., 0., 0.04)) - assert raw.info['bads'] == [] + raw = read_raw_ctf(raw_fname, preload=True) + raw.info["bads"] = [raw.ch_names[5], raw.ch_names[-5]] + raw.interpolate_bads(mode="fast", origin=(0.0, 0.0, 0.04)) + assert raw.info["bads"] == [] -@requires_version('pymatreader') @testing.requires_testing_data def test_interpolation_nirs(): """Test interpolating bad nirs channels.""" - fname = ( - testing_path / "NIRx" / "nirscout" / "nirx_15_2_recording_w_overlap" - ) + pytest.importorskip("pymatreader") + fname = testing_path / "NIRx" / "nirscout" / "nirx_15_2_recording_w_overlap" raw_intensity = read_raw_nirx(fname, preload=False) raw_od = optical_density(raw_intensity) sci = scalp_coupling_index(raw_od) - raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) - bad_0 = np.where([name == raw_od.info['bads'][0] for - name in raw_od.ch_names])[0][0] + raw_od.info["bads"] = list(compress(raw_od.ch_names, sci < 0.5)) + bad_0 = np.where([name == raw_od.info["bads"][0] for name in raw_od.ch_names])[0][0] bad_0_std_pre_interp = np.std(raw_od._data[bad_0]) - bads_init = list(raw_od.info['bads']) + bads_init = list(raw_od.info["bads"]) raw_od.interpolate_bads(exclude=bads_init[:2]) - assert raw_od.info['bads'] == bads_init[:2] + assert raw_od.info["bads"] == bads_init[:2] raw_od.interpolate_bads() - assert raw_od.info['bads'] == [] + assert raw_od.info["bads"] == [] assert bad_0_std_pre_interp > np.std(raw_od._data[bad_0]) raw_haemo = beer_lambert_law(raw_od, ppf=6) - raw_haemo.info['bads'] = raw_haemo.ch_names[2:4] - assert raw_haemo.info['bads'] == ['S1_D2 hbo', 'S1_D2 hbr'] + raw_haemo.info["bads"] = raw_haemo.ch_names[2:4] + assert raw_haemo.info["bads"] == ["S1_D2 hbo", "S1_D2 hbr"] raw_haemo.interpolate_bads() - assert raw_haemo.info['bads'] == [] + assert raw_haemo.info["bads"] == [] + + +@testing.requires_testing_data +def test_interpolation_ecog(): + """Test interpolation for ECoG.""" + raw, epochs_eeg = _load_data("eeg") + bads = ["EEG 012"] + bads_mask = np.isin(epochs_eeg.ch_names, bads) + + epochs_ecog = epochs_eeg.set_channel_types( + {ch: "ecog" for ch in epochs_eeg.ch_names} + ) + epochs_ecog.info["bads"] = bads + + # check that interpolation changes the data in raw + raw_ecog = RawArray(data=epochs_ecog._data[0], info=epochs_ecog.info) + raw_before = raw_ecog.copy() + raw_after = raw_ecog.interpolate_bads(method=dict(ecog="spline")) + assert not np.all(raw_before._data[bads_mask] == raw_after._data[bads_mask]) + assert_array_equal(raw_before._data[~bads_mask], raw_after._data[~bads_mask]) + + +@testing.requires_testing_data +def test_interpolation_seeg(): + """Test interpolation for sEEG.""" + raw, epochs_eeg = _load_data("eeg") + bads = ["EEG 012"] + bads_mask = np.isin(epochs_eeg.ch_names, bads) + epochs_seeg = epochs_eeg.set_channel_types( + {ch: "seeg" for ch in epochs_eeg.ch_names} + ) + epochs_seeg.info["bads"] = bads + + # check that interpolation changes the data in raw + raw_seeg = RawArray(data=epochs_seeg._data[0], info=epochs_seeg.info) + raw_before = raw_seeg.copy() + montage = raw_seeg.get_montage() + pos = montage.get_positions() + ch_pos = pos.pop("ch_pos") + n0 = ch_pos[epochs_seeg.ch_names[0]] + n1 = ch_pos[epochs_seeg.ch_names[1]] + for i, ch in enumerate(epochs_seeg.ch_names[2:]): + ch_pos[ch] = n0 + (n1 - n0) * (i + 2) + raw_seeg.set_montage(make_dig_montage(ch_pos, **pos)) + raw_after = raw_seeg.interpolate_bads(method=dict(seeg="spline")) + assert not np.all(raw_before._data[bads_mask] == raw_after._data[bads_mask]) + assert_array_equal(raw_before._data[~bads_mask], raw_after._data[~bads_mask]) + + # check interpolation on epochs + epochs_seeg.set_montage(make_dig_montage(ch_pos, **pos)) + epochs_before = epochs_seeg.copy() + epochs_after = epochs_seeg.interpolate_bads(method=dict(seeg="spline")) + assert not np.all( + epochs_before._data[:, bads_mask] == epochs_after._data[:, bads_mask] + ) + assert_array_equal( + epochs_before._data[:, ~bads_mask], epochs_after._data[:, ~bads_mask] + ) + + # test shaft all bad + epochs_seeg.info["bads"] = epochs_seeg.ch_names + with pytest.raises(RuntimeError, match="Not enough good channels"): + epochs_seeg.interpolate_bads(method=dict(seeg="spline")) + + # test bad not on shaft + ch_pos[bads[0]] = np.array([10, 10, 10]) + epochs_seeg.info["bads"] = bads + epochs_seeg.set_montage(make_dig_montage(ch_pos, **pos)) + with pytest.raises(RuntimeError, match="No shaft found"): + epochs_seeg.interpolate_bads(method=dict(seeg="spline")) + + +def test_nan_interpolation(raw): + """Test 'nan' method for interpolating bads.""" + ch_to_interp = [raw.ch_names[1]] # don't use channel 0 (type is IAS not MEG) + raw.info["bads"] = ch_to_interp + + # test that warning appears for reset_bads = True + with pytest.warns(RuntimeWarning, match="Consider setting reset_bads=False"): + raw.interpolate_bads(method="nan", reset_bads=True) + + # despite warning, interpolation still happened, make sure the channel is NaN + bad_chs = raw.get_data(ch_to_interp) + assert np.isnan(bad_chs).all() + + # make sure reset_bads=False works as expected + raw.info["bads"] = ch_to_interp + raw.interpolate_bads(method="nan", reset_bads=False) + assert raw.info["bads"] == ch_to_interp + + # make sure other channels are untouched + raw.drop_channels(ch_to_interp) + good_chs = raw.get_data() + assert np.isfinite(good_chs).all() + + +@testing.requires_testing_data +def test_method_str(): + """Test method argument types.""" + raw = read_raw_fif( + testing_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif", + preload=False, + ) + raw.crop(0, 1).pick(("meg", "eeg"), exclude=()).load_data() + raw.copy().interpolate_bads(method="MNE") + with pytest.raises(ValueError, match="Invalid value for the"): + raw.interpolate_bads(method="spline") + raw.pick("eeg", exclude=()) + raw.interpolate_bads(method="spline") + + +@pytest.mark.parametrize("montage_name", ["biosemi16", "standard_1020"]) +@pytest.mark.parametrize("method", ["spline", "MNE"]) +@pytest.mark.parametrize("data_type", ["raw", "epochs", "evoked"]) +def test_interpolate_to_eeg(montage_name, method, data_type): + """Test the interpolate_to method for EEG for raw, epochs, and evoked.""" + # Load EEG data + raw, epochs_eeg = _load_data("eeg") + epochs_eeg = epochs_eeg.copy() + + # Load data for raw + raw.load_data() + + # Create a target montage + montage = make_standard_montage(montage_name) + + # Prepare data to interpolate to + if data_type == "raw": + inst = raw.copy() + elif data_type == "epochs": + inst = epochs_eeg.copy() + elif data_type == "evoked": + inst = epochs_eeg.average() + shape = list(inst._data.shape) + orig_total = len(inst.info["ch_names"]) + n_eeg_orig = len(pick_types(inst.info, eeg=True)) + + # Assert first and last channels are not EEG + if data_type == "raw": + ch_types = inst.get_channel_types() + assert ch_types[0] != "eeg" + assert ch_types[-1] != "eeg" + + # Record the names and data of the first and last channels. + if data_type == "raw": + first_name = inst.info["ch_names"][0] + last_name = inst.info["ch_names"][-1] + data_first = inst._data[..., 0, :].copy() + data_last = inst._data[..., -1, :].copy() + + # Interpolate the EEG channels. + inst_interp = inst.copy().interpolate_to(montage, method=method) + + # Check that the new channel names include the montage channels. + assert set(montage.ch_names).issubset(set(inst_interp.info["ch_names"])) + # Check that the overall channel order is changed. + assert inst.info["ch_names"] != inst_interp.info["ch_names"] + + # Check that the data shape is as expected. + new_nchan_expected = orig_total - n_eeg_orig + len(montage.ch_names) + expected_shape = (new_nchan_expected, shape[-1]) + if len(shape) == 3: + expected_shape = (shape[0],) + expected_shape + assert inst_interp._data.shape == expected_shape + + # Verify that the first and last channels retain their positions. + if data_type == "raw": + assert inst_interp.info["ch_names"][0] == first_name + assert inst_interp.info["ch_names"][-1] == last_name + + # Verify that the data for the first and last channels is unchanged. + if data_type == "raw": + np.testing.assert_allclose( + inst_interp._data[..., 0, :], + data_first, + err_msg="Data for the first non-EEG channel has changed.", + ) + np.testing.assert_allclose( + inst_interp._data[..., -1, :], + data_last, + err_msg="Data for the last non-EEG channel has changed.", + ) + + # Validate that bad channels are carried over. + # Mark the first non eeg channel as bad + all_ch = inst_interp.info["ch_names"] + eeg_ch = [all_ch[i] for i in pick_types(inst_interp.info, eeg=True)] + bads = [ch for ch in all_ch if ch not in eeg_ch][:1] + inst.info["bads"] = bads + inst_interp = inst.copy().interpolate_to(montage, method=method) + assert inst_interp.info["bads"] == bads diff --git a/mne/channels/tests/test_layout.py b/mne/channels/tests/test_layout.py index e17f90cafaf..ad44a6873a4 100644 --- a/mne/channels/tests/test_layout.py +++ b/mne/channels/tests/test_layout.py @@ -1,28 +1,35 @@ -# Authors: Alexandre Gramfort -# Denis Engemann -# Martin Luessi -# Eric Larson -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import copy from pathlib import Path +import matplotlib.pyplot as plt import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_allclose, assert_equal) import pytest -import matplotlib.pyplot as plt - -from mne.channels import (make_eeg_layout, make_grid_layout, read_layout, - find_layout, HEAD_SIZE_DEFAULT) -from mne.channels.layout import (_box_size, _find_topomap_coords, - generate_2d_layout) -from mne import pick_types, pick_info -from mne.io import read_raw_kit, _empty_info, read_info -from mne.io.constants import FIFF - -io_dir = Path(__file__).parent.parent.parent / "io" +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) + +from mne import pick_info, pick_types +from mne._fiff.constants import FIFF +from mne._fiff.meas_info import _empty_info +from mne.channels import ( + Layout, + find_layout, + make_eeg_layout, + make_grid_layout, + read_layout, +) +from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout +from mne.defaults import HEAD_SIZE_DEFAULT +from mne.io import read_info, read_raw_kit + +io_dir = Path(__file__).parents[2] / "io" fif_fname = io_dir / "tests" / "data" / "test_raw.fif" lout_path = io_dir / "tests" / "data" bti_dir = io_dir / "bti" / "tests" / "data" @@ -34,57 +41,86 @@ def _get_test_info(): """Make test info.""" test_info = _empty_info(1000) - loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.], - dtype=np.float32) - test_info['chs'] = [ - {'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_frame': 0, - 'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1, - 'unit': -1, 'unit_mul': 0}, - {'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_frame': 0, - 'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2, - 'unit': -1, 'unit_mul': 0}, - {'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1, - 'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61, - 'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}] + loc = np.array( + [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32 + ) + test_info["chs"] = [ + { + "cal": 1, + "ch_name": "ICA 001", + "coil_type": 0, + "coord_frame": 0, + "kind": 502, + "loc": loc.copy(), + "logno": 1, + "range": 1.0, + "scanno": 1, + "unit": -1, + "unit_mul": 0, + }, + { + "cal": 1, + "ch_name": "ICA 002", + "coil_type": 0, + "coord_frame": 0, + "kind": 502, + "loc": loc.copy(), + "logno": 2, + "range": 1.0, + "scanno": 2, + "unit": -1, + "unit_mul": 0, + }, + { + "cal": 0.002142000012099743, + "ch_name": "EOG 061", + "coil_type": 1, + "coord_frame": 0, + "kind": 202, + "loc": loc.copy(), + "logno": 61, + "range": 1.0, + "scanno": 376, + "unit": 107, + "unit_mul": 0, + }, + ] test_info._unlocked = False test_info._update_redundant() test_info._check_consistency() return test_info +@pytest.fixture(scope="module") +def layout(): + """Get a layout.""" + return Layout( + (0.1, 0.2, 0.1, 1.2), + pos=np.array([[0, 0, 0.1, 0.1], [0.2, 0.2, 0.1, 0.1], [0.4, 0.4, 0.1, 0.1]]), + names=["0", "1", "2"], + ids=[0, 1, 2], + kind="test", + ) + + def test_io_layout_lout(tmp_path): """Test IO with .lout files.""" layout = read_layout(fname="Vectorview-all", scale=False) layout.save(tmp_path / "foobar.lout", overwrite=True) layout_read = read_layout( - fname=tmp_path / "foobar.lout", scale=False, + fname=tmp_path / "foobar.lout", + scale=False, ) assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2) assert layout.names == layout_read.names assert " -# Stefan Appelhoff -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. +import shutil from contextlib import nullcontext +from functools import partial from itertools import chain from pathlib import Path -import shutil - -import pytest - -import numpy as np -from functools import partial from string import ascii_lowercase -from numpy.testing import (assert_array_equal, assert_array_less, - assert_allclose, assert_equal) import matplotlib.pyplot as plt +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_array_equal, + assert_array_less, + assert_equal, +) -from mne import __file__ as _mne_file, create_info, read_evokeds, pick_types -from mne.source_space import get_mni_fiducials -from mne.utils._testing import assert_object_equal -from mne.channels import (get_builtin_montages, DigMontage, read_dig_dat, - read_dig_egi, read_dig_captrak, read_dig_fif, - make_standard_montage, read_custom_montage, - compute_dev_head_t, make_dig_montage, - read_dig_polhemus_isotrak, compute_native_head_t, - read_polhemus_fastscan, read_dig_localite, - read_dig_hpts) +import mne.channels.montage +from mne import ( + __file__ as _mne_file, +) +from mne import ( + create_info, + pick_types, + read_evokeds, +) +from mne._fiff._digitization import ( + _count_points_by_type, + _format_dig_points, + _get_dig_eeg, + _get_fid_coords, +) +from mne._fiff.constants import FIFF +from mne.bem import _fit_sphere +from mne.channels import ( + DigMontage, + compute_dev_head_t, + compute_native_head_t, + get_builtin_montages, + make_dig_montage, + make_standard_montage, + read_custom_montage, + read_dig_captrak, + read_dig_dat, + read_dig_egi, + read_dig_fif, + read_dig_hpts, + read_dig_localite, + read_dig_polhemus_isotrak, + read_polhemus_fastscan, +) from mne.channels.montage import ( - transform_to_head, _check_get_coord_frame, _BUILTIN_STANDARD_MONTAGES + _BUILTIN_STANDARD_MONTAGES, + _check_get_coord_frame, + transform_to_head, + write_dig, +) +from mne.coreg import get_mni_fiducials +from mne.datasets import testing +from mne.io import ( + RawArray, + read_fiducials, + read_raw_brainvision, + read_raw_egi, + read_raw_fif, + read_raw_nirx, ) +from mne.io.kit import read_mrk from mne.preprocessing import compute_current_source_density -from mne.utils import assert_dig_allclose, _record_warnings -from mne.bem import _fit_sphere -from mne.io.constants import FIFF -from mne.io._digitization import (_format_dig_points, - _get_fid_coords, _get_dig_eeg, - _count_points_by_type) -from mne.transforms import (_ensure_trans, apply_trans, invert_transform, - _get_trans) +from mne.transforms import _ensure_trans, _get_trans, apply_trans, invert_transform +from mne.utils import _record_warnings, assert_dig_allclose +from mne.utils._testing import assert_object_equal from mne.viz._3d import _fiducial_coords -from mne.io.kit import read_mrk -from mne.io import (read_raw_brainvision, read_raw_egi, read_raw_fif, - read_fiducials, read_raw_nirx) - -from mne.io import RawArray -from mne.datasets import testing - - data_path = testing.data_path(download=False) fif_dig_montage_fname = data_path / "montage" / "eeganes07.fif" egi_dig_montage_fname = data_path / "montage" / "coordinates.xml" @@ -61,15 +87,11 @@ locs_montage_fname = data_path / "EEGLAB" / "test_chans.locs" evoked_fname = data_path / "montage" / "level2_raw-ave.fif" eeglab_fname = data_path / "EEGLAB" / "test_raw.set" -bdf_fname1 = data_path / "BDF" / "test_generator_2.bdf" -bdf_fname2 = data_path / "BDF" / "test_bdf_stim_channel.bdf" -egi_fname1 = data_path / "EGI" / "test_egi.mff" -cnt_fname = data_path / "CNT" / "scan41_short.cnt" fnirs_dname = data_path / "NIRx" / "nirscout" / "nirx_15_2_recording_w_short" mgh70_fname = data_path / "SSS" / "mgh70_raw.fif" subjects_dir = data_path / "subjects" -io_dir = Path(__file__).parent.parent.parent / "io" +io_dir = Path(__file__).parents[2] / "io" kit_dir = io_dir / "kit" / "tests" / "data" elp = kit_dir / "test_elp.txt" hsp = kit_dir / "test_hsp.txt" @@ -78,359 +100,485 @@ fif_fname = io_dir / "tests" / "data" / "test_raw.fif" edf_path = io_dir / "edf" / "tests" / "data" / "test.edf" bdf_path = io_dir / "edf" / "tests" / "data" / "test_bdf_eeglab.mat" -egi_fname2 = io_dir / "egi" / "tests" / "data" / "test_egi.raw" vhdr_path = io_dir / "brainvision" / "tests" / "data" / "test.vhdr" ctf_fif_fname = io_dir / "tests" / "data" / "test_ctf_comp_raw.fif" -nicolet_fname = io_dir / "nicolet" / "tests" / "data" / "test_nicolet_raw.data" def _make_toy_raw(n_channels): return RawArray( data=np.empty([n_channels, 1]), info=create_info( - ch_names=list(ascii_lowercase[:n_channels]), - sfreq=1, ch_types='eeg' - ) + ch_names=list(ascii_lowercase[:n_channels]), sfreq=1, ch_types="eeg" + ), ) def _make_toy_dig_montage(n_channels, **kwargs): return make_dig_montage( - ch_pos=dict(zip( - list(ascii_lowercase[:n_channels]), - np.arange(n_channels * 3).reshape(n_channels, 3), - )), - **kwargs + ch_pos=dict( + zip( + list(ascii_lowercase[:n_channels]), + np.arange(n_channels * 3).reshape(n_channels, 3), + ) + ), + **kwargs, ) def _get_dig_montage_pos(montage): - return np.array([d['r'] for d in _get_dig_eeg(montage.dig)]) + return np.array([d["r"] for d in _get_dig_eeg(montage.dig)]) def test_dig_montage_trans(tmp_path): """Test getting a trans from and applying a trans to a montage.""" nasion, lpa, rpa, *ch_pos = np.random.RandomState(0).randn(10, 3) - ch_pos = {f'EEG{ii:3d}': pos for ii, pos in enumerate(ch_pos, 1)} - montage = make_dig_montage(ch_pos, nasion=nasion, lpa=lpa, rpa=rpa, - coord_frame='mri') + ch_pos = {f"EEG{ii:3d}": pos for ii, pos in enumerate(ch_pos, 1)} + montage = make_dig_montage( + ch_pos, nasion=nasion, lpa=lpa, rpa=rpa, coord_frame="mri" + ) trans = compute_native_head_t(montage) _ensure_trans(trans) # ensure that we can save and load it, too - fname = tmp_path / 'temp-mon.fif' - _check_roundtrip(montage, fname, 'mri') + fname = tmp_path / "temp-mon.fif" + with pytest.warns(RuntimeWarning, match="MNE naming conventions"): + _check_roundtrip(montage, fname, "mri") # test applying a trans position1 = montage.get_positions() montage.apply_trans(trans) - assert montage.get_positions()['coord_frame'] == 'head' + assert montage.get_positions()["coord_frame"] == "head" montage.apply_trans(invert_transform(trans)) position2 = montage.get_positions() assert str(position1) == str(position2) # exactly equal -def test_fiducials(): +@pytest.mark.parametrize("fname", (fif_fname, ctf_fif_fname)) +def test_fiducials(tmp_path, fname): """Test handling of fiducials.""" # Eventually the code used here should be unified with montage.py, but for # now it uses code in odd places - for fname in (fif_fname, ctf_fif_fname): - fids, coord_frame = read_fiducials(fname) - points = _fiducial_coords(fids, coord_frame) - assert points.shape == (3, 3) - # Fids - assert_allclose(points[:, 2], 0., atol=1e-6) - assert_allclose(points[::2, 1], 0., atol=1e-6) - assert points[2, 0] > 0 # RPA - assert points[0, 0] < 0 # LPA - # Nasion - assert_allclose(points[1, 0], 0., atol=1e-6) - assert points[1, 1] > 0 + fids, coord_frame = read_fiducials(fname) + assert coord_frame == FIFF.FIFFV_COORD_HEAD + points = _fiducial_coords(fids, coord_frame) + assert points.shape == (3, 3) + # Fids + assert_allclose(points[:, 2], 0.0, atol=1e-6) + assert_allclose(points[::2, 1], 0.0, atol=1e-6) + assert points[2, 0] > 0 # RPA + assert points[0, 0] < 0 # LPA + # Nasion + assert_allclose(points[1, 0], 0.0, atol=1e-6) + assert points[1, 1] > 0 + fname_out = tmp_path / "test-dig.fif" + make_dig_montage( + lpa=fids[0]["r"], nasion=fids[1]["r"], rpa=fids[2]["r"], coord_frame="mri_voxel" + ).save(fname_out, overwrite=True) + fids_2, coord_frame_2 = read_fiducials(fname_out) + assert coord_frame_2 == FIFF.FIFFV_MNE_COORD_MRI_VOXEL + assert_allclose( + [fid["r"] for fid in fids[:3]], + [fid["r"] for fid in fids_2], + rtol=1e-6, + ) + assert coord_frame_2 is not None def test_documented(): """Test that standard montages are documented.""" - montage_dir = Path(_mne_file).parent / 'channels' / 'data' / 'montages' - montage_files = Path(montage_dir).glob('*') + montage_dir = Path(_mne_file).parent / "channels" / "data" / "montages" + montage_files = Path(montage_dir).glob("*") montage_names = [f.stem for f in montage_files] assert len(montage_names) == len(_BUILTIN_STANDARD_MONTAGES) - assert set(montage_names) == set( - [m.name for m in _BUILTIN_STANDARD_MONTAGES] - ) - - -@pytest.mark.parametrize('reader, file_content, expected_dig, ext, warning', [ - pytest.param( - partial(read_custom_montage, head_size=None), - ('FidNz 0 9.071585155 -2.359754454\n' - 'FidT9 -6.711765 0.040402876 -3.251600355\n' - 'very_very_very_long_name -5.831241498 -4.494821698 4.955347697\n' - 'Cz 0 0 1\n' - 'Cz 0 0 8.899186843'), - make_dig_montage( - ch_pos={ - 'very_very_very_long_name': [-5.8312416, -4.4948215, 4.9553475], # noqa - 'Cz': [0., 0., 8.899187], - }, - nasion=[0., 9.071585, -2.3597546], - lpa=[-6.711765, 0.04040287, -3.2516003], - rpa=None, + assert set(montage_names) == set([m.name for m in _BUILTIN_STANDARD_MONTAGES]) + + +@pytest.mark.parametrize( + "reader, file_content, expected_dig, ext, warning", + [ + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "FidNz 0 9.071585155 -2.359754454\n" + "FidT9 -6.711765 0.040402876 -3.251600355\n" + "very_very_very_long_name -5.831241498 -4.494821698 4.955347697\n" + "Cz 0 0 1\n" + "Cz 0 0 8.899186843" + ), + make_dig_montage( + ch_pos={ + "very_very_very_long_name": [ + -5.8312416, + -4.4948215, + 4.9553475, + ], # noqa + "Cz": [0.0, 0.0, 8.899187], + }, + nasion=[0.0, 9.071585, -2.3597546], + lpa=[-6.711765, 0.04040287, -3.2516003], + rpa=None, + ), + "sfp", + (RuntimeWarning, r"Duplicate.*last will be used for Cz \(2\)"), + id="sfp_duplicate", + ), + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "FidNz 0 9.071585155 -2.359754454\n" + "FidT9 -6.711765 0.040402876 -3.251600355\n" + "headshape 1 2 3\n" + "headshape 4 5 6\n" + "Cz 0 0 8.899186843" + ), + make_dig_montage( + hsp=[ + [1, 2, 3], + [4, 5, 6], + ], + ch_pos={ + "Cz": [0.0, 0.0, 8.899187], + }, + nasion=[0.0, 9.071585, -2.3597546], + lpa=[-6.711765, 0.04040287, -3.2516003], + rpa=None, + ), + "sfp", + None, + id="sfp_headshape", + ), + pytest.param( + partial(read_custom_montage, head_size=1), + ( + "1 0 0.50669 FPz\n" + "2 23 0.71 EOG1\n" + "3 -39.947 0.34459 F3\n" + "4 0 0.25338 Fz\n" + ), + make_dig_montage( + ch_pos={ + "EOG1": [0.30873816, 0.72734152, -0.61290705], + "F3": [-0.56705965, 0.67706631, 0.46906776], + "FPz": [0.0, 0.99977915, -0.02101571], + "Fz": [0.0, 0.71457525, 0.69955859], + }, + nasion=None, + lpa=None, + rpa=None, + coord_frame="head", + ), + "loc", + None, + id="EEGLAB", ), - 'sfp', - (RuntimeWarning, r'Duplicate.*last will be used for Cz \(2\)'), - id='sfp_duplicate'), - - pytest.param( - partial(read_custom_montage, head_size=None), - ('FidNz 0 9.071585155 -2.359754454\n' - 'FidT9 -6.711765 0.040402876 -3.251600355\n' - 'headshape 1 2 3\n' - 'headshape 4 5 6\n' - 'Cz 0 0 8.899186843'), - make_dig_montage( - hsp=[ - [1, 2, 3], - [4, 5, 6], - ], - ch_pos={ - 'Cz': [0., 0., 8.899187], - }, - nasion=[0., 9.071585, -2.3597546], - lpa=[-6.711765, 0.04040287, -3.2516003], - rpa=None, + pytest.param( + partial(read_custom_montage, head_size=None, coord_frame="mri"), + "// MatLab Sphere coordinates [degrees] Cartesian coordinates\n" # noqa: E501 + "// Label Theta Phi Radius X Y Z off sphere surface\n" # noqa: E501 + "E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011\n" # noqa: E501 + "E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000\n" # noqa: E501 + "E31 90.000 -11.000 1.000 0.0000 0.9816 -0.1908 0.00000000000000000\n" # noqa: E501 + "E61 158.000 -17.200 1.000 -0.8857 0.3579 -0.2957 -0.00000000000000022", # noqa: E501 + make_dig_montage( + ch_pos={ + "E1": [0.7677, 0.5934, -0.2419], + "E3": [0.6084, 0.7704, 0.1908], + "E31": [0.0, 0.9816, -0.1908], + "E61": [-0.8857, 0.3579, -0.2957], + }, + nasion=None, + lpa=None, + rpa=None, + coord_frame="mri", + ), + "csd", + None, + id="matlab", ), - 'sfp', - None, - id='sfp_headshape'), - - pytest.param( - partial(read_custom_montage, head_size=1), - ('1 0 0.50669 FPz\n' - '2 23 0.71 EOG1\n' - '3 -39.947 0.34459 F3\n' - '4 0 0.25338 Fz\n'), - make_dig_montage( - ch_pos={ - 'EOG1': [0.30873816, 0.72734152, -0.61290705], - 'F3': [-0.56705965, 0.67706631, 0.46906776], - 'FPz': [0., 0.99977915, -0.02101571], - 'Fz': [0., 0.71457525, 0.69955859], - }, - nasion=None, lpa=None, rpa=None, coord_frame='head', + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "# ASA electrode file\nReferenceLabel avg\nUnitPosition mm\n" + "NumberPositions= 68\n" + "Positions\n" + "-86.0761 -19.9897 -47.9860\n" + "85.7939 -20.0093 -48.0310\n" + "0.0083 86.8110 -39.9830\n" + "-86.0761 -24.9897 -67.9860\n" + "Labels\nLPA\nRPA\nNz\nDummy\n" + ), + make_dig_montage( + ch_pos={ + "Dummy": [-0.0860761, -0.0249897, -0.067986], + }, + nasion=[8.3000e-06, 8.6811e-02, -3.9983e-02], + lpa=[-0.0860761, -0.0199897, -0.047986], + rpa=[0.0857939, -0.0200093, -0.048031], + ), + "elc", + None, + id="old ASA electrode (elc)", ), - 'loc', - None, - id='EEGLAB'), - - pytest.param( - partial(read_custom_montage, head_size=None, coord_frame='mri'), - ('// MatLab Sphere coordinates [degrees] Cartesian coordinates\n' # noqa: E501 - '// Label Theta Phi Radius X Y Z off sphere surface\n' # noqa: E501 - 'E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011\n' # noqa: E501 - 'E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000\n' # noqa: E501 - 'E31 90.000 -11.000 1.000 0.0000 0.9816 -0.1908 0.00000000000000000\n' # noqa: E501 - 'E61 158.000 -17.200 1.000 -0.8857 0.3579 -0.2957 -0.00000000000000022'), # noqa: E501 - make_dig_montage( - ch_pos={ - 'E1': [0.7677, 0.5934, -0.2419], - 'E3': [0.6084, 0.7704, 0.1908], - 'E31': [0., 0.9816, -0.1908], - 'E61': [-0.8857, 0.3579, -0.2957], - }, - nasion=None, lpa=None, rpa=None, coord_frame='mri', + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "NumberPositions= 96\n" + "UnitPosition mm\n" + "Positions\n" + "E01 : 5.288 -3.658 119.693\n" + "E02 : 59.518 -4.031 101.404\n" + "E03 : 29.949 -50.988 98.145\n" + "Labels\n" + "E01 E02 E03\n" + ), + make_dig_montage( + ch_pos={ + "E01": [0.005288, -0.003658, 0.119693], + "E02": [0.059518, -0.004031, 0.101404], + "E03": [0.029949, -0.050988, 0.098145], + }, + ), + "elc", + None, + id="new ASA electrode (elc)", ), - 'csd', - None, - id='matlab'), - - pytest.param( - partial(read_custom_montage, head_size=None), - ('# ASA electrode file\nReferenceLabel avg\nUnitPosition mm\n' - 'NumberPositions= 68\n' - 'Positions\n' - '-86.0761 -19.9897 -47.9860\n' - '85.7939 -20.0093 -48.0310\n' - '0.0083 86.8110 -39.9830\n' - '-86.0761 -24.9897 -67.9860\n' - 'Labels\nLPA\nRPA\nNz\nDummy\n'), - make_dig_montage( - ch_pos={ - 'Dummy': [-0.0860761, -0.0249897, -0.067986], - }, - nasion=[8.3000e-06, 8.6811e-02, -3.9983e-02], - lpa=[-0.0860761, -0.0199897, -0.047986], - rpa=[0.0857939, -0.0200093, -0.048031], + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "ReferenceLabel\n" + "avg\n" + "UnitPosition mm\n" + "NumberPositions= 6\n" + "Positions\n" + "-69.2574 10.5895 -25.0009\n" + "3.3791 94.6594 32.2592\n" + "77.2856 12.0537 -30.2488\n" + "4.6147 121.8858 8.6370\n" + "-31.3669 54.0269 94.9191\n" + "-8.7495 56.5653 99.6655\n" + "Labels\n" + "LPA\n" + "Nz\n" + "RPA\n" + "EEG 000\n" + "EEG 001\n" + "EEG 002\n" + ), + make_dig_montage( + ch_pos={ + "EEG 000": [0.004615, 0.121886, 0.008637], + "EEG 001": [-0.031367, 0.054027, 0.094919], + "EEG 002": [-0.00875, 0.056565, 0.099665], + }, + nasion=[0.003379, 0.094659, 0.032259], + lpa=[-0.069257, 0.010589, -0.025001], + rpa=[0.077286, 0.012054, -0.030249], + ), + "elc", + None, + id="another old ASA electrode (elc)", ), - 'elc', - None, - id='ASA electrode'), - - pytest.param( - partial(read_custom_montage, head_size=1), - ('Site Theta Phi\n' - 'Fp1 -92 -72\n' - 'Fp2 92 72\n' - 'very_very_very_long_name -92 72\n' - 'O2 92 -90\n'), - make_dig_montage( - ch_pos={ - 'Fp1': [-0.30882875, 0.95047716, -0.0348995], - 'Fp2': [0.30882875, 0.95047716, -0.0348995], - 'very_very_very_long_name': [-0.30882875, -0.95047716, -0.0348995], # noqa - 'O2': [6.11950389e-17, -9.99390827e-01, -3.48994967e-02] - }, - nasion=None, lpa=None, rpa=None, + pytest.param( + partial(read_custom_montage, head_size=1), + ( + "Site Theta Phi\n" + "Fp1 -92 -72\n" + "Fp2 92 72\n" + "very_very_very_long_name -92 72\n" + "O2 92 -90\n" + ), + make_dig_montage( + ch_pos={ + "Fp1": [-0.30882875, 0.95047716, -0.0348995], + "Fp2": [0.30882875, 0.95047716, -0.0348995], + "very_very_very_long_name": [ + -0.30882875, + -0.95047716, + -0.0348995, + ], # noqa + "O2": [6.11950389e-17, -9.99390827e-01, -3.48994967e-02], + }, + nasion=None, + lpa=None, + rpa=None, + ), + "txt", + None, + id="generic theta-phi (txt)", ), - 'txt', - None, - id='generic theta-phi (txt)'), - - pytest.param( - partial(read_custom_montage, head_size=None), - ('FID\t LPA\t -120.03\t 0\t 85\n' - 'FID\t RPA\t 120.03\t 0\t 85\n' - 'FID\t Nz\t 114.03\t 90\t 85\n' - 'EEG\t F3\t -62.027\t -50.053\t 85\n' - 'EEG\t Fz\t 45.608\t 90\t 85\n' - 'EEG\t F4\t 62.01\t 50.103\t 85\n' - 'EEG\t FCz\t 68.01\t 58.103\t 85\n'), - make_dig_montage( - ch_pos={ - 'F3': [-0.48200427, 0.57551063, 0.39869712], - 'Fz': [3.71915931e-17, 6.07384809e-01, 5.94629038e-01], - 'F4': [0.48142596, 0.57584026, 0.39891983], - 'FCz': [0.41645989, 0.66914889, 0.31827805], - }, - nasion=[4.75366562e-17, 7.76332511e-01, -3.46132681e-01], - lpa=[-7.35898963e-01, 9.01216309e-17, -4.25385374e-01], - rpa=[0.73589896, 0., -0.42538537], + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "FID\t LPA\t -120.03\t 0\t 85\n" + "FID\t RPA\t 120.03\t 0\t 85\n" + "FID\t Nz\t 114.03\t 90\t 85\n" + "EEG\t F3\t -62.027\t -50.053\t 85\n" + "EEG\t Fz\t 45.608\t 90\t 85\n" + "EEG\t F4\t 62.01\t 50.103\t 85\n" + "EEG\t FCz\t 68.01\t 58.103\t 85\n" + ), + make_dig_montage( + ch_pos={ + "F3": [-0.48200427, 0.57551063, 0.39869712], + "Fz": [3.71915931e-17, 6.07384809e-01, 5.94629038e-01], + "F4": [0.48142596, 0.57584026, 0.39891983], + "FCz": [0.41645989, 0.66914889, 0.31827805], + }, + nasion=[4.75366562e-17, 7.76332511e-01, -3.46132681e-01], + lpa=[-7.35898963e-01, 9.01216309e-17, -4.25385374e-01], + rpa=[0.73589896, 0.0, -0.42538537], + ), + "elp", + None, + id="BESA spherical model", ), - 'elp', - None, - id='BESA spherical model'), - - pytest.param( - partial(read_dig_hpts, unit='m'), - ('eeg Fp1 -95.0 -3. -3.\n' - 'eeg AF7 -1 -1 -3\n' - 'eeg A3 -2 -2 2\n' - 'eeg A 0 0 0'), - make_dig_montage( - ch_pos={ - 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], - 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], - }, - nasion=None, lpa=None, rpa=None, + pytest.param( + partial(read_dig_hpts, unit="m"), + ("eeg Fp1 -95.0 -3. -3.\neeg AF7 -1 -1 -3\neeg A3 -2 -2 2\neeg A 0 0 0"), + make_dig_montage( + ch_pos={ + "A": [0.0, 0.0, 0.0], + "A3": [-2.0, -2.0, 2.0], + "AF7": [-1.0, -1.0, -3.0], + "Fp1": [-95.0, -3.0, -3.0], + }, + nasion=None, + lpa=None, + rpa=None, + ), + "hpts", + None, + id="legacy mne-c", ), - 'hpts', - None, - id='legacy mne-c'), - - pytest.param( - read_custom_montage, - ('ch_name, x, y, z\n' - 'Fp1, -95.0, -3., -3.\n' - 'AF7, -1, -1, -3\n' - 'A3, -2, -2, 2\n' - 'A, 0, 0, 0'), - make_dig_montage( - ch_pos={ - 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], - 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], - }, - nasion=None, lpa=None, rpa=None, + pytest.param( + read_custom_montage, + ( + "ch_name, x, y, z\n" + "Fp1, -95.0, -3., -3.\n" + "AF7, -1, -1, -3\n" + "A3, -2, -2, 2\n" + "A, 0, 0, 0" + ), + make_dig_montage( + ch_pos={ + "A": [0.0, 0.0, 0.0], + "A3": [-2.0, -2.0, 2.0], + "AF7": [-1.0, -1.0, -3.0], + "Fp1": [-95.0, -3.0, -3.0], + }, + nasion=None, + lpa=None, + rpa=None, + ), + "csv", + None, + id="CSV file", ), - 'csv', - None, - id='CSV file'), - - pytest.param( - read_custom_montage, - ('1\t-95.0\t-3.\t-3.\tFp1\n' - '2\t-1\t-1\t-3\tAF7\n' - '3\t-2\t-2\t2\tA3\n' - '4\t0\t0\t0\tA'), - make_dig_montage( - ch_pos={ - 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], - 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], - }, - nasion=None, lpa=None, rpa=None, + pytest.param( + read_custom_montage, + ( + "1\t-95.0\t-3.\t-3.\tFp1\n" + "2\t-1\t-1\t-3\tAF7\n" + "3\t-2\t-2\t2\tA3\n" + "4\t0\t0\t0\tA" + ), + make_dig_montage( + ch_pos={ + "A": [0.0, 0.0, 0.0], + "A3": [-2.0, -2.0, 2.0], + "AF7": [-1.0, -1.0, -3.0], + "Fp1": [-95.0, -3.0, -3.0], + }, + nasion=None, + lpa=None, + rpa=None, + ), + "xyz", + None, + id="XYZ file", ), - 'xyz', - None, - id='XYZ file'), - - pytest.param( - read_custom_montage, - ('ch_name\tx\ty\tz\n' - 'Fp1\t-95.0\t-3.\t-3.\n' - 'AF7\t-1\t-1\t-3\n' - 'A3\t-2\t-2\t2\n' - 'A\t0\t0\t0'), - make_dig_montage( - ch_pos={ - 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], - 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], - }, - nasion=None, lpa=None, rpa=None, + pytest.param( + read_custom_montage, + ( + "ch_name\tx\ty\tz\n" + "Fp1\t-95.0\t-3.\t-3.\n" + "AF7\t-1\t-1\t-3\n" + "A3\t-2\t-2\t2\n" + "A\t0\t0\t0" + ), + make_dig_montage( + ch_pos={ + "A": [0.0, 0.0, 0.0], + "A3": [-2.0, -2.0, 2.0], + "AF7": [-1.0, -1.0, -3.0], + "Fp1": [-95.0, -3.0, -3.0], + }, + nasion=None, + lpa=None, + rpa=None, + ), + "tsv", + None, + id="TSV file", ), - 'tsv', - None, - id='TSV file'), - - pytest.param( - partial(read_custom_montage, head_size=None), - ('\n' - '\n' - '\n' - ' \n' - ' Fp1\n' - ' -90\n' - ' -72\n' - ' 1\n' - ' 1\n' - ' \n' - ' \n' - ' Fz\n' - ' 45\n' - ' 90\n' - ' 1\n' - ' 2\n' - ' \n' - ' \n' - ' F3\n' - ' -60\n' - ' -51\n' - ' 1\n' - ' 3\n' - ' \n' - ' \n' - ' F7\n' - ' -90\n' - ' -36\n' - ' 1\n' - ' 4\n' - ' \n' - ''), - make_dig_montage( - ch_pos={ - 'Fp1': [-3.09016994e-01, 9.51056516e-01, 6.12323400e-17], - 'Fz': [4.32978028e-17, 7.07106781e-01, 7.07106781e-01], - 'F3': [-0.54500745, 0.67302815, 0.5], - 'F7': [-8.09016994e-01, 5.87785252e-01, 6.12323400e-17], - }, - nasion=None, lpa=None, rpa=None, + pytest.param( + partial(read_custom_montage, head_size=None), + ( + '\n' + "\n" + '\n' + " \n" + " Fp1\n" + " -90\n" + " -72\n" + " 1\n" + " 1\n" + " \n" + " \n" + " Fz\n" + " 45\n" + " 90\n" + " 1\n" + " 2\n" + " \n" + " \n" + " F3\n" + " -60\n" + " -51\n" + " 1\n" + " 3\n" + " \n" + " \n" + " F7\n" + " -90\n" + " -36\n" + " 1\n" + " 4\n" + " \n" + "" + ), + make_dig_montage( + ch_pos={ + "Fp1": [-3.09016994e-01, 9.51056516e-01, 6.12323400e-17], + "Fz": [4.32978028e-17, 7.07106781e-01, 7.07106781e-01], + "F3": [-0.54500745, 0.67302815, 0.5], + "F7": [-8.09016994e-01, 5.87785252e-01, 6.12323400e-17], + }, + nasion=None, + lpa=None, + rpa=None, + ), + "bvef", + None, + id="brainvision", ), - 'bvef', - None, - id='brainvision'), -]) -def test_montage_readers( - reader, file_content, expected_dig, ext, warning, tmp_path -): + ], +) +def test_montage_readers(reader, file_content, expected_dig, ext, warning, tmp_path): """Test that we have an equivalent of read_montage for all file formats.""" + if file_content.startswith("' + "" ) - assert set([d['coord_frame'] for d in montage.dig]) == { - FIFF.FIFFV_COORD_UNKNOWN - } + assert set([d["coord_frame"] for d in montage.dig]) == {FIFF.FIFFV_COORD_UNKNOWN} EXPECTED_FID_IN_POLHEMUS = { - 'nasion': [0.001393, 0.0131613, -0.0046967], - 'lpa': [-0.0624997, -0.0737271, 0.07996], - 'rpa': [-0.0748957, 0.0873785, 0.0811943], + "nasion": [0.001393, 0.0131613, -0.0046967], + "lpa": [-0.0624997, -0.0737271, 0.07996], + "rpa": [-0.0748957, 0.0873785, 0.0811943], } fiducials, fid_coordframe = _get_fid_coords(montage.dig) assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN @@ -562,17 +733,17 @@ def test_read_dig_montage_using_polhemus_fastscan(): def test_read_dig_montage_using_polhemus_fastscan_error_handling(tmp_path): """Test reading Polhemus FastSCAN errors.""" with open(kit_dir / "test_elp.txt") as fid: - content = fid.read().replace('FastSCAN', 'XxxxXXXX') + content = fid.read().replace("FastSCAN", "XxxxXXXX") - fname = tmp_path / 'faulty_FastSCAN.txt' - with open(fname, 'w') as fid: + fname = tmp_path / "faulty_FastSCAN.txt" + with open(fname, "w") as fid: fid.write(content) - with pytest.raises(ValueError, match='not contain.*Polhemus FastSCAN'): + with pytest.raises(ValueError, match="not contain.*Polhemus FastSCAN"): _ = read_polhemus_fastscan(fname) - fname = tmp_path / 'faulty_FastSCAN.bar' - with open(fname, 'w') as fid: + fname = tmp_path / "faulty_FastSCAN.bar" + with open(fname, "w") as fid: fid.write(content) EXPECTED_ERR_MSG = "allowed value is '.txt', but got '.bar' instead" with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): @@ -582,16 +753,13 @@ def test_read_dig_montage_using_polhemus_fastscan_error_handling(tmp_path): def test_read_dig_polhemus_isotrak_hsp(): """Test reading Polhemus IsoTrak HSP file.""" EXPECTED_FID_IN_POLHEMUS = { - 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]), - 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), - 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), + "nasion": np.array([1.1056e-01, -5.4210e-19, 0]), + "lpa": np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), + "rpa": np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), } - montage = read_dig_polhemus_isotrak( - fname=kit_dir / "test.hsp", ch_names=None - ) + montage = read_dig_polhemus_isotrak(fname=kit_dir / "test.hsp", ch_names=None) assert repr(montage) == ( - '' + "" ) fiducials, fid_coordframe = _get_fid_coords(montage.dig) @@ -604,16 +772,13 @@ def test_read_dig_polhemus_isotrak_hsp(): def test_read_dig_polhemus_isotrak_elp(): """Test reading Polhemus IsoTrak ELP file.""" EXPECTED_FID_IN_POLHEMUS = { - 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]), - 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), - 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), + "nasion": np.array([1.1056e-01, -5.4210e-19, 0]), + "lpa": np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), + "rpa": np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), } - montage = read_dig_polhemus_isotrak( - fname=kit_dir / "test.elp", ch_names=None - ) + montage = read_dig_polhemus_isotrak(fname=kit_dir / "test.elp", ch_names=None) assert repr(montage) == ( - '' + "" ) fiducials, fid_coordframe = _get_fid_coords(montage.dig) @@ -622,35 +787,35 @@ def test_read_dig_polhemus_isotrak_elp(): assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk]) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def isotrak_eeg(tmp_path_factory): """Mock isotrak file with EEG positions.""" _SEED = 42 N_ROWS, N_COLS = 5, 3 content = np.random.RandomState(_SEED).randn(N_ROWS, N_COLS) - fname = tmp_path_factory.mktemp('data') / 'test.eeg' - with open(str(fname), 'w') as fid: - fid.write(( - '3 200\n' - '//Shape file\n' - '//Minor revision number\n' - '2\n' - '//Subject Name\n' - '%N Name \n' - '////Shape code, number of digitized points\n' - )) - fid.write('0 {rows:d}\n'.format(rows=N_ROWS)) - fid.write(( - '//Position of fiducials X+, Y+, Y- on the subject\n' - '%F 0.11056 -5.421e-19 0 \n' - '%F -0.00021075 0.080793 -7.5894e-19 \n' - '%F 0.00021075 -0.080793 -2.8731e-18 \n' - '//No of rows, no of columns; position of digitized points\n' - )) - fid.write('{rows:d} {cols:d}\n'.format(rows=N_ROWS, cols=N_COLS)) + fname = tmp_path_factory.mktemp("data") / "test.eeg" + with open(str(fname), "w") as fid: + fid.write( + "3 200\n" + "//Shape file\n" + "//Minor revision number\n" + "2\n" + "//Subject Name\n" + "%N Name \n" + "////Shape code, number of digitized points\n" + ) + fid.write(f"0 {N_ROWS:d}\n") + fid.write( + "//Position of fiducials X+, Y+, Y- on the subject\n" + "%F 0.11056 -5.421e-19 0 \n" + "%F -0.00021075 0.080793 -7.5894e-19 \n" + "%F 0.00021075 -0.080793 -2.8731e-18 \n" + "//No of rows, no of columns; position of digitized points\n" + ) + fid.write(f"{N_ROWS} {N_COLS}\n") for row in content: - fid.write('\t'.join('%0.18e' % cell for cell in row) + '\n') + fid.write("\t".join(f"{cell:0.18e}" for cell in row) + "\n") return str(fname) @@ -660,18 +825,18 @@ def test_read_dig_polhemus_isotrak_eeg(isotrak_eeg): N_CHANNELS = 5 _SEED = 42 EXPECTED_FID_IN_POLHEMUS = { - 'nasion': np.array([1.1056e-01, -5.4210e-19, 0]), - 'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), - 'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), + "nasion": np.array([1.1056e-01, -5.4210e-19, 0]), + "lpa": np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]), + "rpa": np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]), } - ch_names = ['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS)] - EXPECTED_CH_POS = dict(zip( - ch_names, np.random.RandomState(_SEED).randn(N_CHANNELS, 3))) + ch_names = [f"eeg {ii:01d}" for ii in range(N_CHANNELS)] + EXPECTED_CH_POS = dict( + zip(ch_names, np.random.RandomState(_SEED).randn(N_CHANNELS, 3)) + ) montage = read_dig_polhemus_isotrak(fname=isotrak_eeg, ch_names=ch_names) assert repr(montage) == ( - '' + "" ) fiducials, fid_coordframe = _get_fid_coords(montage.dig) @@ -681,8 +846,8 @@ def test_read_dig_polhemus_isotrak_eeg(isotrak_eeg): assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk]) for kk, dig_point in zip(montage.ch_names, _get_dig_eeg(montage.dig)): - assert_array_equal(dig_point['r'], EXPECTED_CH_POS[kk]) - assert dig_point['coord_frame'] == FIFF.FIFFV_COORD_UNKNOWN + assert_array_equal(dig_point["r"], EXPECTED_CH_POS[kk]) + assert dig_point["coord_frame"] == FIFF.FIFFV_COORD_UNKNOWN def test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmp_path): @@ -697,7 +862,7 @@ def test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmp_path): with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): _ = read_dig_polhemus_isotrak( fname=isotrak_eeg, - ch_names=['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS + 42)] + ch_names=[f"eeg {ii:01d}" for ii in range(N_CHANNELS + 42)], ) # Check fname extensions @@ -706,7 +871,7 @@ def test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmp_path): with pytest.raises( ValueError, - match="Allowed val.*'.hsp', '.elp', and '.eeg', but got '.bar' instead" + match="Allowed val.*'.hsp', '.elp', and '.eeg', but got '.bar' instead", ): _ = read_dig_polhemus_isotrak(fname=fname, ch_names=None) @@ -714,52 +879,64 @@ def test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmp_path): def test_combining_digmontage_objects(): """Test combining different DigMontage objects.""" rng = np.random.RandomState(0) - fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3))) + fiducials = dict(zip(("nasion", "lpa", "rpa"), rng.rand(3, 3))) # hsp positions are [1X, 1X, 1X] - hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.)) - hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.)) - hsp3 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 13.)) + hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.0)) + hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.0)) + hsp3 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 13.0)) # hpi positions are [2X, 2X, 2X] - hpi1 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 21.)) - hpi2 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 22.)) - hpi3 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 23.)) + hpi1 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 21.0)) + hpi2 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 22.0)) + hpi3 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 23.0)) # channels have positions at 40s, 50s, and 60s. ch_pos1 = make_dig_montage( - **fiducials, - ch_pos={'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43]} + **fiducials, ch_pos={"h": [41, 41, 41], "b": [42, 42, 42], "g": [43, 43, 43]} ) ch_pos2 = make_dig_montage( - **fiducials, - ch_pos={'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53]} + **fiducials, ch_pos={"n": [51, 51, 51], "y": [52, 52, 52], "p": [53, 53, 53]} ) ch_pos3 = make_dig_montage( - **fiducials, - ch_pos={'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63]} + **fiducials, ch_pos={"v": [61, 61, 61], "a": [62, 62, 62], "l": [63, 63, 63]} ) montage = ( - DigMontage() + hsp1 + hsp2 + hsp3 + hpi1 + hpi2 + hpi3 + ch_pos1 + - ch_pos2 + ch_pos3 + DigMontage() + + hsp1 + + hsp2 + + hsp3 + + hpi1 + + hpi2 + + hpi3 + + ch_pos1 + + ch_pos2 + + ch_pos3 ) assert repr(montage) == ( - '' + "" ) EXPECTED_MONTAGE = make_dig_montage( **fiducials, - hsp=np.concatenate([np.full((2, 3), 11.), np.full((2, 3), 12.), - np.full((2, 3), 13.)]), - hpi=np.concatenate([np.full((2, 3), 21.), np.full((2, 3), 22.), - np.full((2, 3), 23.)]), + hsp=np.concatenate( + [np.full((2, 3), 11.0), np.full((2, 3), 12.0), np.full((2, 3), 13.0)] + ), + hpi=np.concatenate( + [np.full((2, 3), 21.0), np.full((2, 3), 22.0), np.full((2, 3), 23.0)] + ), ch_pos={ - 'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43], - 'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53], - 'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63], - } + "h": [41, 41, 41], + "b": [42, 42, 42], + "g": [43, 43, 43], + "n": [51, 51, 51], + "y": [52, 52, 52], + "p": [53, 53, 53], + "v": [61, 61, 61], + "a": [62, 62, 62], + "l": [63, 63, 63], + }, ) # Do some checks to ensure they are the same DigMontage @@ -773,33 +950,33 @@ def test_combining_digmontage_objects(): def test_combining_digmontage_forbiden_behaviors(): """Test combining different DigMontage objects with repeated names.""" rng = np.random.RandomState(0) - fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3))) + fiducials = dict(zip(("nasion", "lpa", "rpa"), rng.rand(3, 3))) dig1 = make_dig_montage( **fiducials, - ch_pos=dict(zip(list('abc'), rng.rand(3, 3))), + ch_pos=dict(zip(list("abc"), rng.rand(3, 3))), ) dig2 = make_dig_montage( **fiducials, - ch_pos=dict(zip(list('bcd'), rng.rand(3, 3))), + ch_pos=dict(zip(list("bcd"), rng.rand(3, 3))), ) dig2_wrong_fid = make_dig_montage( - nasion=rng.rand(3), lpa=rng.rand(3), rpa=rng.rand(3), - ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))), + nasion=rng.rand(3), + lpa=rng.rand(3), + rpa=rng.rand(3), + ch_pos=dict(zip(list("ghi"), rng.rand(3, 3))), ) dig2_wrong_coordframe = make_dig_montage( - **fiducials, - ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))), - coord_frame='meg' + **fiducials, ch_pos=dict(zip(list("ghi"), rng.rand(3, 3))), coord_frame="meg" ) - EXPECTED_ERR_MSG = "Cannot.*duplicated channel.*found: \'b\', \'c\'." + EXPECTED_ERR_MSG = "Cannot.*duplicated channel.*found: 'b', 'c'." with pytest.raises(RuntimeError, match=EXPECTED_ERR_MSG): _ = dig1 + dig2 - with pytest.raises(RuntimeError, match='fiducial locations do not match'): + with pytest.raises(RuntimeError, match="fiducial locations do not match"): _ = dig1 + dig2_wrong_fid - with pytest.raises(RuntimeError, match='not in the same coordinate '): + with pytest.raises(RuntimeError, match="not in the same coordinate "): _ = dig1 + dig2_wrong_coordframe @@ -807,69 +984,128 @@ def test_set_dig_montage(): """Test setting DigMontage with toy understandable points.""" N_CHANNELS, N_HSP, N_HPI = 3, 2, 1 ch_names = list(ascii_lowercase[:N_CHANNELS]) - ch_pos = dict(zip( - ch_names, - np.arange(N_CHANNELS * 3).reshape(N_CHANNELS, 3), - )) + ch_pos = dict( + zip( + ch_names, + np.arange(N_CHANNELS * 3).reshape(N_CHANNELS, 3), + ) + ) - montage_ch_only = make_dig_montage(ch_pos=ch_pos, coord_frame='head') + montage_ch_only = make_dig_montage(ch_pos=ch_pos, coord_frame="head") assert repr(montage_ch_only) == ( - '' + "" ) - info = create_info(ch_names, sfreq=1, ch_types='eeg') + info = create_info(ch_names, sfreq=1, ch_types="eeg") info.set_montage(montage_ch_only) - assert len(info['dig']) == len(montage_ch_only.dig) + 3 # added fiducials + assert len(info["dig"]) == len(montage_ch_only.dig) + 3 # added fiducials - assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]), - desired=[[0., 1., 2., 0., 0., 0.], - [3., 4., 5., 0., 0., 0.], - [6., 7., 8., 0., 0., 0.]]) + assert_allclose( + actual=np.array([ch["loc"][:6] for ch in info["chs"]]), + desired=[ + [0.0, 1.0, 2.0, 0.0, 0.0, 0.0], + [3.0, 4.0, 5.0, 0.0, 0.0, 0.0], + [6.0, 7.0, 8.0, 0.0, 0.0, 0.0], + ], + ) montage_full = make_dig_montage( ch_pos=dict(**ch_pos, EEG000=np.full(3, 42)), # 4 = 3 egg + 1 eeg_ref - nasion=[1, 1, 1], lpa=[2, 2, 2], rpa=[3, 3, 3], + nasion=[1, 1, 1], + lpa=[2, 2, 2], + rpa=[3, 3, 3], hsp=np.full((N_HSP, 3), 4), hpi=np.full((N_HPI, 3), 4), - coord_frame='head' + coord_frame="head", ) assert repr(montage_full) == ( - '' + "" ) - info = create_info(ch_names, sfreq=1, ch_types='eeg') + info = create_info(ch_names, sfreq=1, ch_types="eeg") info.set_montage(montage_full) - EXPECTED_LEN = sum({'hsp': 2, 'hpi': 1, 'fid': 3, 'eeg': 4}.values()) - assert len(info['dig']) == EXPECTED_LEN - assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]), - desired=[[0., 1., 2., 42., 42., 42.], - [3., 4., 5., 42., 42., 42.], - [6., 7., 8., 42., 42., 42.]]) + EXPECTED_LEN = sum({"hsp": 2, "hpi": 1, "fid": 3, "eeg": 4}.values()) + assert len(info["dig"]) == EXPECTED_LEN + assert_allclose( + actual=np.array([ch["loc"][:6] for ch in info["chs"]]), + desired=[ + [0.0, 1.0, 2.0, 42.0, 42.0, 42.0], + [3.0, 4.0, 5.0, 42.0, 42.0, 42.0], + [6.0, 7.0, 8.0, 42.0, 42.0, 42.0], + ], + ) + + +def test_set_dig_montage_with_nan_positions(): + """Test that fiducials are not NaN. + + Test that setting a montage with some NaN positions does not produce + NaN fiducials. + """ + + def _ensure_fid_not_nan(info, ch_pos): + montage_kwargs = dict(ch_pos=dict(), coord_frame="head") + for ch_idx, ch in enumerate(info.ch_names): + montage_kwargs["ch_pos"][ch] = ch_pos[ch_idx] + + new_montage = make_dig_montage(**montage_kwargs) + info = info.copy() + info.set_montage(new_montage) + + recovered_montage = info.get_montage() + fid_coords, coord_frame = _get_fid_coords( + recovered_montage.dig, raise_error=False + ) + + for fid_coord in fid_coords.values(): + if fid_coord is not None: + assert not np.isnan(fid_coord).any() + + return fid_coords, coord_frame + + channels = list("ABCDEF") + info = create_info(channels, 1000, ch_types="seeg") + + # if all positions are NaN, the fiducials should not be NaN, but None + ch_pos = [info["chs"][ch_idx]["loc"][:3] for ch_idx in range(len(channels))] + fid_coords, coord_frame = _ensure_fid_not_nan(info, ch_pos) + for fid_coord in fid_coords.values(): + assert fid_coord is None + assert coord_frame is None + + # if some positions are not NaN, the fiducials should be a non-NaN array + ch_pos[0] = np.array([1.0, 1.5, 1.0]) + ch_pos[1] = np.array([2.0, 1.5, 1.5]) + ch_pos[2] = np.array([1.25, 1.0, 1.25]) + fid_coords, coord_frame = _ensure_fid_not_nan(info, ch_pos) + for fid_coord in fid_coords.values(): + assert isinstance(fid_coord, np.ndarray) + assert coord_frame == FIFF.FIFFV_COORD_HEAD @testing.requires_testing_data -def test_fif_dig_montage(tmp_path): +def test_fif_dig_montage(tmp_path, monkeypatch): """Test FIF dig montage support.""" - dig_montage = read_dig_fif(fif_dig_montage_fname) + dig_montage = read_dig_fif(fif_dig_montage_fname, verbose="error") # test round-trip IO - fname_temp = tmp_path / "test.fif" + fname_temp = tmp_path / "test-dig.fif" _check_roundtrip(dig_montage, fname_temp) # Make a BrainVision file like the one the user would have had raw_bv = read_raw_brainvision(bv_fname, preload=True) raw_bv_2 = raw_bv.copy() mapping = dict() - for ii, ch_name in enumerate(raw_bv.ch_names): - mapping[ch_name] = 'EEG%03d' % (ii + 1,) + for ii, ch_name in enumerate(raw_bv.ch_names, 1): + mapping[ch_name] = f"EEG{ii:03d}" raw_bv.rename_channels(mapping) - for ii, ch_name in enumerate(raw_bv_2.ch_names): - mapping[ch_name] = 'EEG%03d' % (ii + 33,) + for ii, ch_name in enumerate(raw_bv_2.ch_names, 33): + mapping[ch_name] = f"EEG{ii:03d}" raw_bv_2.rename_channels(mapping) raw_bv.add_channels([raw_bv_2]) - for ch in raw_bv.info['chs']: - ch['kind'] = FIFF.FIFFV_EEG_CH + for ch in raw_bv.info["chs"]: + ch["kind"] = FIFF.FIFFV_EEG_CH # Set the montage raw_bv.set_montage(dig_montage) @@ -879,62 +1115,81 @@ def test_fif_dig_montage(tmp_path): # check info[chs] matches assert_equal(len(raw_bv.ch_names), len(evoked.ch_names) - 1) - for ch_py, ch_c in zip(raw_bv.info['chs'], evoked.info['chs'][:-1]): - assert_equal(ch_py['ch_name'], - ch_c['ch_name'].replace('EEG ', 'EEG')) + for ch_py, ch_c in zip(raw_bv.info["chs"], evoked.info["chs"][:-1]): + assert_equal(ch_py["ch_name"], ch_c["ch_name"].replace("EEG ", "EEG")) # C actually says it's unknown, but it's not (?): # assert_equal(ch_py['coord_frame'], ch_c['coord_frame']) - assert_equal(ch_py['coord_frame'], FIFF.FIFFV_COORD_HEAD) - c_loc = ch_c['loc'].copy() + assert_equal(ch_py["coord_frame"], FIFF.FIFFV_COORD_HEAD) + c_loc = ch_c["loc"].copy() c_loc[c_loc == 0] = np.nan - assert_allclose(ch_py['loc'], c_loc, atol=1e-7) + assert_allclose(ch_py["loc"], c_loc, atol=1e-7) # check info[dig] assert_dig_allclose(raw_bv.info, evoked.info) # Roundtrip of non-FIF start - montage = make_dig_montage(hsp=read_polhemus_fastscan(hsp), - hpi=read_mrk(hpi)) + montage = make_dig_montage(hsp=read_polhemus_fastscan(hsp), hpi=read_mrk(hpi)) elp_points = read_polhemus_fastscan(elp) - ch_pos = {"EEG%03d" % (k + 1): pos for k, pos in enumerate(elp_points[8:])} - montage += make_dig_montage(nasion=elp_points[0], - lpa=elp_points[1], - rpa=elp_points[2], - ch_pos=ch_pos) - _check_roundtrip(montage, fname_temp, 'unknown') + ch_pos = {f"ECoG{k:03d}": pos for k, pos in enumerate(elp_points[3:], 1)} + assert len(elp_points) == 8 # there are only 8 but pretend the last are ECoG + other = make_dig_montage( + nasion=elp_points[0], lpa=elp_points[1], rpa=elp_points[2], ch_pos=ch_pos + ) + assert other.ch_names[0].startswith("ECoG") + montage += other + assert montage.ch_names[0].startswith("ECoG") + _check_roundtrip(montage, fname_temp, "unknown") montage = transform_to_head(montage) _check_roundtrip(montage, fname_temp) - montage.dig[0]['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN - with pytest.raises(RuntimeError, match='Only a single coordinate'): - montage.save(fname_temp) + montage.dig[0]["coord_frame"] = FIFF.FIFFV_COORD_UNKNOWN + with pytest.raises(RuntimeError, match="Only a single coordinate"): + montage.save(fname_temp, overwrite=True) + montage.dig[0]["coord_frame"] = FIFF.FIFFV_COORD_HEAD + + # Check that old-style files can be read, too, using EEG001 etc. + def write_dig_no_ch_names(*args, **kwargs): + kwargs["ch_names"] = None + return write_dig(*args, **kwargs) + + monkeypatch.setattr(mne.channels.montage, "write_dig", write_dig_no_ch_names) + montage.save(fname_temp, overwrite=True) + montage_read = read_dig_fif(fname_temp) + default_ch_names = [f"EEG{ii:03d}" for ii in range(1, 6)] + assert montage_read.ch_names == default_ch_names @testing.requires_testing_data def test_egi_dig_montage(tmp_path): """Test EGI MFF XML dig montage support.""" + pytest.importorskip("defusedxml") dig_montage = read_dig_egi(egi_dig_montage_fname) fid, coord = _get_fid_coords(dig_montage.dig) assert coord == FIFF.FIFFV_COORD_UNKNOWN assert_allclose( - actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]), - desired=[[ 0. , 10.564, -2.051], # noqa - [-8.592, 0.498, -4.128], # noqa - [ 8.592, 0.498, -4.128]], # noqa + actual=np.array([fid[key] for key in ["nasion", "lpa", "rpa"]]), + desired=[ + [0.0, 10.564, -2.051], # noqa + [-8.592, 0.498, -4.128], # noqa + [8.592, 0.498, -4.128], + ], # noqa ) # Test accuracy and embedding within raw object - raw_egi = read_raw_egi(egi_raw_fname, channel_naming='EEG %03d') + raw_egi = read_raw_egi( + egi_raw_fname, + channel_naming="EEG %03d", + events_as_annotations=True, + ) raw_egi.set_montage(dig_montage) test_raw_egi = read_raw_fif(egi_fif_fname) assert_equal(len(raw_egi.ch_names), len(test_raw_egi.ch_names)) - for ch_raw, ch_test_raw in zip(raw_egi.info['chs'], - test_raw_egi.info['chs']): - assert_equal(ch_raw['ch_name'], ch_test_raw['ch_name']) - assert_equal(ch_raw['coord_frame'], FIFF.FIFFV_COORD_HEAD) - assert_allclose(ch_raw['loc'], ch_test_raw['loc'], atol=1e-7) + for ch_raw, ch_test_raw in zip(raw_egi.info["chs"], test_raw_egi.info["chs"]): + assert_equal(ch_raw["ch_name"], ch_test_raw["ch_name"]) + assert_equal(ch_raw["coord_frame"], FIFF.FIFFV_COORD_HEAD) + assert_allclose(ch_raw["loc"], ch_test_raw["loc"], atol=1e-7) assert_dig_allclose(raw_egi.info, test_raw_egi.info) @@ -942,63 +1197,167 @@ def test_egi_dig_montage(tmp_path): fid, coord = _get_fid_coords(dig_montage_in_head.dig) assert coord == FIFF.FIFFV_COORD_HEAD assert_allclose( - actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]), - desired=[[0., 10.278, 0.], [-8.592, 0., 0.], [8.592, 0., 0.]], + actual=np.array([fid[key] for key in ["nasion", "lpa", "rpa"]]), + desired=[[0.0, 10.278, 0.0], [-8.592, 0.0, 0.0], [8.592, 0.0, 0.0]], atol=1e-4, ) - # test round-trip IO - fname_temp = tmp_path / 'egi_test.fif' - _check_roundtrip(dig_montage, fname_temp, 'unknown') + # test round-trip IO (with GZ) + fname_temp = tmp_path / "egi_test-dig.fif.gz" + _check_roundtrip(dig_montage, fname_temp, "unknown") _check_roundtrip(dig_montage_in_head, fname_temp) -def _pop_montage(dig_montage, ch_name): - # remove reference that was not used in old API - name_idx = dig_montage.ch_names.index(ch_name) - dig_idx = dig_montage._get_dig_names().index(ch_name) - - del dig_montage.dig[dig_idx] - del dig_montage.ch_names[name_idx] - for k in range(dig_idx, len(dig_montage.dig)): - dig_montage.dig[k]['ident'] -= 1 - - @testing.requires_testing_data def test_read_dig_captrak(tmp_path): """Test reading a captrak montage file.""" + pytest.importorskip("defusedxml") EXPECTED_CH_NAMES_OLD = [ - 'AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1', - 'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4', - 'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6', - 'FT10', 'FT7', 'FT8', 'FT9', 'Fp1', 'Fp2', 'Fz', 'GND', 'O1', 'O2', - 'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO10', 'PO3', - 'PO4', 'PO7', 'PO8', 'PO9', 'POz', 'Pz', 'REF', 'T7', 'T8', 'TP10', - 'TP7', 'TP8', 'TP9' + "AF3", + "AF4", + "AF7", + "AF8", + "C1", + "C2", + "C3", + "C4", + "C5", + "C6", + "CP1", + "CP2", + "CP3", + "CP4", + "CP5", + "CP6", + "CPz", + "Cz", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "FC1", + "FC2", + "FC3", + "FC4", + "FC5", + "FC6", + "FT10", + "FT7", + "FT8", + "FT9", + "Fp1", + "Fp2", + "Fz", + "GND", + "O1", + "O2", + "Oz", + "P1", + "P2", + "P3", + "P4", + "P5", + "P6", + "P7", + "P8", + "PO10", + "PO3", + "PO4", + "PO7", + "PO8", + "PO9", + "POz", + "Pz", + "REF", + "T7", + "T8", + "TP10", + "TP7", + "TP8", + "TP9", ] EXPECTED_CH_NAMES = [ - 'T7', 'FC5', 'F7', 'C5', 'FT7', 'FT9', 'TP7', 'TP9', 'P7', 'CP5', - 'PO7', 'C3', 'CP3', 'P5', 'P3', 'PO3', 'PO9', 'O1', 'Oz', 'POz', 'O2', - 'PO4', 'P1', 'Pz', 'P2', 'CP2', 'CP1', 'CPz', 'Cz', 'C1', 'FC1', 'FC3', - 'REF', 'F3', 'F1', 'Fz', 'F5', 'AF7', 'AF3', 'Fp1', 'GND', 'F2', 'AF4', - 'Fp2', 'F4', 'F8', 'F6', 'AF8', 'FC2', 'FC6', 'FC4', 'C2', 'C4', 'P4', - 'CP4', 'PO8', 'P8', 'P6', 'CP6', 'PO10', 'TP10', 'TP8', 'FT10', 'T8', - 'C6', 'FT8' + "T7", + "FC5", + "F7", + "C5", + "FT7", + "FT9", + "TP7", + "TP9", + "P7", + "CP5", + "PO7", + "C3", + "CP3", + "P5", + "P3", + "PO3", + "PO9", + "O1", + "Oz", + "POz", + "O2", + "PO4", + "P1", + "Pz", + "P2", + "CP2", + "CP1", + "CPz", + "Cz", + "C1", + "FC1", + "FC3", + "REF", + "F3", + "F1", + "Fz", + "F5", + "AF7", + "AF3", + "Fp1", + "GND", + "F2", + "AF4", + "Fp2", + "F4", + "F8", + "F6", + "AF8", + "FC2", + "FC6", + "FC4", + "C2", + "C4", + "P4", + "CP4", + "PO8", + "P8", + "P6", + "CP6", + "PO10", + "TP10", + "TP8", + "FT10", + "T8", + "C6", + "FT8", ] assert set(EXPECTED_CH_NAMES) == set(EXPECTED_CH_NAMES_OLD) - montage = read_dig_captrak( - fname=data_path / "montage" / "captrak_coords.bvct" - ) + montage = read_dig_captrak(fname=data_path / "montage" / "captrak_coords.bvct") assert montage.ch_names == EXPECTED_CH_NAMES assert repr(montage) == ( - '' + "" ) montage = transform_to_head(montage) # transform_to_head has to be tested - _check_roundtrip(montage=montage, - fname=str(tmp_path / 'bvct_test.fif')) + _check_roundtrip(montage=montage, fname=tmp_path / "bvct_test-dig.fif") fid, _ = _get_fid_coords(montage.dig) assert_allclose( @@ -1008,115 +1367,125 @@ def test_read_dig_captrak(tmp_path): ) raw_bv = read_raw_brainvision(bv_raw_fname) - raw_bv.set_channel_types({"HEOG": 'eog', "VEOG": 'eog', "ECG": 'ecg'}) + raw_bv.set_channel_types({"HEOG": "eog", "VEOG": "eog", "ECG": "ecg"}) raw_bv.set_montage(montage) test_raw_bv = read_raw_fif(bv_fif_fname) # compare after set_montage using chs loc. - for actual, expected in zip(raw_bv.info['chs'], test_raw_bv.info['chs']): - assert_allclose(actual['loc'][:3], expected['loc'][:3]) - if actual['kind'] == FIFF.FIFFV_EEG_CH: - assert_allclose(actual['loc'][3:6], - [-0.005103, 0.05395, 0.144622], rtol=1e-04) + for actual, expected in zip(raw_bv.info["chs"], test_raw_bv.info["chs"]): + assert_allclose(actual["loc"][:3], expected["loc"][:3]) + if actual["kind"] == FIFF.FIFFV_EEG_CH: + assert_allclose( + actual["loc"][3:6], [-0.005103, 0.05395, 0.144622], rtol=1e-04 + ) # https://gist.github.com/larsoner/2264fb5895070d29a8c9aa7c0dc0e8a6 _MGH60 = ( - 'Fp1 Fpz Fp2 ' - 'AF7 AF3 AF4 AF8 ' - 'F7 F5 F3 F1 Fz F2 F4 F6 F8 ' - 'FT9 FT7 FC5 FC1 FC2 FC6 FT8 FT10 ' - 'T9 T7 C5 C3 C1 Cz C2 C4 C6 T8 T10 ' - 'TP9 TP7 CP3 CP1 CP2 CP4 TP8 TP10 ' - 'P7 P5 P3 P1 Pz P2 P4 P6 P8 ' - 'PO7 PO3 PO4 PO8 ' - 'O1 Oz O2 ' - 'Iz' + "Fp1 Fpz Fp2 " + "AF7 AF3 AF4 AF8 " + "F7 F5 F3 F1 Fz F2 F4 F6 F8 " + "FT9 FT7 FC5 FC1 FC2 FC6 FT8 FT10 " + "T9 T7 C5 C3 C1 Cz C2 C4 C6 T8 T10 " + "TP9 TP7 CP3 CP1 CP2 CP4 TP8 TP10 " + "P7 P5 P3 P1 Pz P2 P4 P6 P8 " + "PO7 PO3 PO4 PO8 " + "O1 Oz O2 " + "Iz" ).split() -@pytest.mark.parametrize('rename', ('raw', 'montage', 'custom')) +@pytest.mark.parametrize("rename", ("raw", "montage", "custom")) def test_set_montage_mgh(rename): """Test setting 'mgh60' montage to old fif.""" raw = read_raw_fif(fif_fname) eeg_picks = pick_types(raw.info, meg=False, eeg=True, exclude=()) - assert list(eeg_picks) == [ii for ii, name in enumerate(raw.ch_names) - if name.startswith('EEG')] - orig_pos = np.array([raw.info['chs'][pick]['loc'][:3] - for pick in eeg_picks]) + assert list(eeg_picks) == [ + ii for ii, name in enumerate(raw.ch_names) if name.startswith("EEG") + ] + orig_pos = np.array([raw.info["chs"][pick]["loc"][:3] for pick in eeg_picks]) atol = 1e-6 mon = None - if rename == 'raw': - raw.rename_channels(lambda x: x.replace('EEG ', 'EEG')) - raw.set_montage('mgh60') # test loading with string argument - elif rename == 'montage': - mon = make_standard_montage('mgh60') - mon.rename_channels(lambda x: x.replace('EEG', 'EEG ')) + if rename == "raw": + raw.rename_channels(lambda x: x.replace("EEG ", "EEG")) + raw.set_montage("mgh60") # test loading with string argument + elif rename == "montage": + mon = make_standard_montage("mgh60") + mon.rename_channels(lambda x: x.replace("EEG", "EEG ")) assert [raw.ch_names[pick] for pick in eeg_picks] == mon.ch_names raw.set_montage(mon) else: atol = 3e-3 # different subsets of channel locations - assert rename == 'custom' + assert rename == "custom" assert len(_MGH60) == 60 - mon = make_standard_montage('standard_1020') + mon = make_standard_montage("standard_1020") assert len(mon._get_ch_pos()) == 94 def renamer(x): try: - return 'EEG %03d' % (_MGH60.index(x) + 1,) + return f"EEG {_MGH60.index(x) + 1:03d}" except ValueError: return x - mon.rename_channels(renamer) + mon = mon.rename_channels(renamer) raw.set_montage(mon) if mon is not None: # first two are 'Fp1' and 'Fz', take them from standard_1020.elc -- # they should not be changed on load! want_pos = [[-29.4367, 83.9171, -6.9900], [0.1123, 88.2470, -1.7130]] - got_pos = [mon.get_positions()['ch_pos'][f'EEG {x:03d}'] * 1000 - for x in range(1, 3)] + got_pos = [ + mon.get_positions()["ch_pos"][f"EEG {x:03d}"] * 1000 for x in range(1, 3) + ] assert_allclose(want_pos, got_pos) - assert mon.dig[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI + assert mon.dig[0]["coord_frame"] == FIFF.FIFFV_COORD_MRI trans = compute_native_head_t(mon) - trans_2 = _get_trans('fsaverage', 'mri', 'head')[0] - assert trans['to'] == trans_2['to'] - assert trans['from'] == trans_2['from'] - assert_allclose(trans['trans'], trans_2['trans'], atol=1e-6) + trans_2 = _get_trans("fsaverage", "mri", "head")[0] + assert trans["to"] == trans_2["to"] + assert trans["from"] == trans_2["from"] + assert_allclose(trans["trans"], trans_2["trans"], atol=1e-6) - new_pos = np.array([ch['loc'][:3] for ch in raw.info['chs'] - if ch['ch_name'].startswith('EEG')]) - assert ((orig_pos != new_pos).all()) + new_pos = np.array( + [ch["loc"][:3] for ch in raw.info["chs"] if ch["ch_name"].startswith("EEG")] + ) + assert (orig_pos != new_pos).all() r0 = _fit_sphere(new_pos)[1] - assert_allclose(r0, [-0.001021, 0.014554, 0.041404], atol=1e-4) + assert_allclose(r0, [-0.001043, 0.01469, 0.041448], atol=1e-4) # spot check: Fp1 and Fpz - assert_allclose(new_pos[:2], [[-0.030903, 0.114585, 0.027867], - [-0.001337, 0.119102, 0.03289]], atol=atol) + assert_allclose( + new_pos[:2], + [[-0.030903, 0.114585, 0.027867], [-0.001337, 0.119102, 0.03289]], + atol=atol, + ) -@pytest.mark.parametrize('fname, montage, n_eeg, n_good, bads', [ - (fif_fname, 'mgh60', 60, 59, ['EEG 053']), - pytest.param(mgh70_fname, 'mgh70', 70, 64, None, - marks=[testing._pytest_mark()]), -]) +@pytest.mark.parametrize( + "fname, montage, n_eeg, n_good, bads", + [ + (fif_fname, "mgh60", 60, 59, ["EEG 053"]), + pytest.param( + mgh70_fname, "mgh70", 70, 64, None, marks=[testing._pytest_mark()] + ), + ], +) def test_montage_positions_similar(fname, montage, n_eeg, n_good, bads): """Test that montages give spatially similar positions.""" # 1. Prepare data: load, set bads (if missing), and filter - raw = read_raw_fif(fname).pick_types(eeg=True, exclude=()) + raw = read_raw_fif(fname).pick(picks="eeg") if bads is not None: - assert raw.info['bads'] == [] - raw.info['bads'] = bads + assert raw.info["bads"] == [] + raw.info["bads"] = bads assert len(raw.ch_names) == n_eeg - raw.pick_types(eeg=True, exclude='bads').load_data() + raw.pick(picks="eeg", exclude="bads").load_data() raw.apply_function(lambda x: x - x.mean()) # remove DC raw.filter(None, 40) # remove line noise assert len(raw.ch_names) == n_good - if montage == 'mgh60': + if montage == "mgh60": montage = make_standard_montage(montage) - montage.rename_channels(lambda n: f'EEG {n[-3:]}') + montage.rename_channels(lambda n: f"EEG {n[-3:]}") raw_mon = raw.copy().set_montage(montage) # 2. First test: CSDs should be similar (CSD uses 3D positions) csd = compute_current_source_density(raw).get_data() @@ -1128,8 +1497,8 @@ def test_montage_positions_similar(fname, montage, n_eeg, n_good, bads): bads = [raw.ch_names[idx] for idx in bad_picks] orig_data = raw.get_data(bad_picks) assert_allclose(orig_data, raw_mon.get_data(bad_picks)) - raw.info['bads'] = bads - raw_mon.info['bads'] = bads + raw.info["bads"] = bads + raw_mon.info["bads"] = bads raw.interpolate_bads() raw_mon.interpolate_bads() orig_data = orig_data.ravel() @@ -1139,94 +1508,83 @@ def test_montage_positions_similar(fname, montage, n_eeg, n_good, bads): assert 0.95 < corr < 0.99, corr # 4. Third test: project each to a sphere, check cosine angles are small poss = dict() - for kind, this_raw in (('orig', raw), ('mon', raw_mon)): + for kind, this_raw in (("orig", raw), ("mon", raw_mon)): pos = np.array( - list(this_raw.get_montage().get_positions()['ch_pos'].values()), - float) + list(this_raw.get_montage().get_positions()["ch_pos"].values()), float + ) pos -= np.mean(pos, axis=0) pos /= np.linalg.norm(pos, axis=1, keepdims=True) poss[kind] = pos ang = np.rad2deg( # arccos is in [0, pi] - np.arccos(np.minimum(np.sum(poss['orig'] * poss['mon'], axis=1), 1))) + np.arccos(np.minimum(np.sum(poss["orig"] * poss["mon"], axis=1), 1)) + ) assert_array_less(ang, 20) # less than 20 deg assert_array_less(0, ang) # but not equal -# XXX: this does not check ch_names + it cannot work because of write_dig -def _check_roundtrip(montage, fname, coord_frame='head'): +def _check_roundtrip(montage, fname, coord_frame="head"): """Check roundtrip writing.""" montage.save(fname, overwrite=True) montage_read = read_dig_fif(fname=fname) - assert_equal(repr(montage), repr(montage_read)) - assert_equal(_check_get_coord_frame(montage_read.dig), coord_frame) + assert repr(montage) == repr(montage_read) + assert _check_get_coord_frame(montage_read.dig) == coord_frame assert_dig_allclose(montage, montage_read) - - -def _fake_montage(ch_names): - pos = np.random.RandomState(42).randn(len(ch_names), 3) - return make_dig_montage(ch_pos=dict(zip(ch_names, pos)), - coord_frame='head') - - -cnt_ignore_warns = [ - pytest.mark.filterwarnings( - 'ignore:.*Could not parse meas date from the header. Setting to None.' - ), - pytest.mark.filterwarnings(( - 'ignore:.*Could not define the number of bytes automatically.' - ' Defaulting to 2.') - ), -] + assert montage.ch_names == montage_read.ch_names def test_digmontage_constructor_errors(): """Test proper error messaging.""" - with pytest.raises(ValueError, match='does not match the number'): - _ = DigMontage(ch_names=['foo', 'bar'], dig=list()) + with pytest.raises(ValueError, match="does not match the number"): + _ = DigMontage(ch_names=["foo", "bar"], dig=list()) def test_transform_to_head_and_compute_dev_head_t(): """Test transform_to_head and compute_dev_head_t.""" - EXPECTED_DEV_HEAD_T = \ - [[-3.72201691e-02, -9.98212167e-01, -4.67667497e-02, -7.31583414e-04], - [8.98064989e-01, -5.39382685e-02, 4.36543170e-01, 1.60134431e-02], - [-4.38285221e-01, -2.57513699e-02, 8.98466990e-01, 6.13035748e-02], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]] + EXPECTED_DEV_HEAD_T = [ + [-3.72201691e-02, -9.98212167e-01, -4.67667497e-02, -7.31583414e-04], + [8.98064989e-01, -5.39382685e-02, 4.36543170e-01, 1.60134431e-02], + [-4.38285221e-01, -2.57513699e-02, 8.98466990e-01, 6.13035748e-02], + [0.00000000e00, 0.00000000e00, 0.00000000e00, 1.00000000e00], + ] EXPECTED_FID_IN_POLHEMUS = { - 'nasion': np.array([0.001393, 0.0131613, -0.0046967]), - 'lpa': np.array([-0.0624997, -0.0737271, 0.07996]), - 'rpa': np.array([-0.0748957, 0.0873785, 0.0811943]), + "nasion": np.array([0.001393, 0.0131613, -0.0046967]), + "lpa": np.array([-0.0624997, -0.0737271, 0.07996]), + "rpa": np.array([-0.0748957, 0.0873785, 0.0811943]), } EXPECTED_FID_IN_HEAD = { - 'nasion': np.array([-8.94466792e-18, 1.10559624e-01, -3.85185989e-34]), - 'lpa': np.array([-8.10816716e-02, 6.56321671e-18, 0]), - 'rpa': np.array([8.05048781e-02, -6.47441364e-18, 0]), + "nasion": np.array([-8.94466792e-18, 1.10559624e-01, -3.85185989e-34]), + "lpa": np.array([-8.10816716e-02, 6.56321671e-18, 0]), + "rpa": np.array([8.05048781e-02, -6.47441364e-18, 0]), } hpi_dev = np.array( - [[ 2.13951493e-02, 8.47444056e-02, -5.65431188e-02], # noqa - [ 2.10299433e-02, -8.03141101e-02, -6.34420259e-02], # noqa - [ 1.05916829e-01, 8.18485672e-05, 1.19928083e-02], # noqa - [ 9.26595105e-02, 4.64804385e-02, 8.45141253e-03], # noqa - [ 9.42554419e-02, -4.35206589e-02, 8.78999363e-03]] # noqa + [ + [2.13951493e-02, 8.47444056e-02, -5.65431188e-02], # noqa + [2.10299433e-02, -8.03141101e-02, -6.34420259e-02], # noqa + [1.05916829e-01, 8.18485672e-05, 1.19928083e-02], # noqa + [9.26595105e-02, 4.64804385e-02, 8.45141253e-03], # noqa + [9.42554419e-02, -4.35206589e-02, 8.78999363e-03], + ] # noqa ) hpi_polhemus = np.array( - [[-0.0595004, -0.0704836, 0.075893 ], # noqa - [-0.0646373, 0.0838228, 0.0762123], # noqa - [-0.0135035, 0.0072522, -0.0268405], # noqa - [-0.0202967, -0.0351498, -0.0129305], # noqa - [-0.0277519, 0.0452628, -0.0222407]] # noqa + [ + [-0.0595004, -0.0704836, 0.075893], # noqa + [-0.0646373, 0.0838228, 0.0762123], # noqa + [-0.0135035, 0.0072522, -0.0268405], # noqa + [-0.0202967, -0.0351498, -0.0129305], # noqa + [-0.0277519, 0.0452628, -0.0222407], + ] # noqa ) montage_polhemus = make_dig_montage( - **EXPECTED_FID_IN_POLHEMUS, hpi=hpi_polhemus, coord_frame='unknown' + **EXPECTED_FID_IN_POLHEMUS, hpi=hpi_polhemus, coord_frame="unknown" ) - montage_meg = make_dig_montage(hpi=hpi_dev, coord_frame='meg') + montage_meg = make_dig_montage(hpi=hpi_dev, coord_frame="meg") # Test regular workflow to get dev_head_t montage = montage_polhemus + montage_meg @@ -1234,7 +1592,7 @@ def test_transform_to_head_and_compute_dev_head_t(): for kk in fids: assert_allclose(fids[kk], EXPECTED_FID_IN_POLHEMUS[kk], atol=1e-5) - with pytest.raises(ValueError, match='set to head coordinate system'): + with pytest.raises(ValueError, match="set to head coordinate system"): _ = compute_dev_head_t(montage) montage = transform_to_head(montage) @@ -1244,39 +1602,43 @@ def test_transform_to_head_and_compute_dev_head_t(): assert_allclose(fids[kk], EXPECTED_FID_IN_HEAD[kk], atol=1e-5) dev_head_t = compute_dev_head_t(montage) - assert_allclose(dev_head_t['trans'], EXPECTED_DEV_HEAD_T, atol=5e-7) + assert_allclose(dev_head_t["trans"], EXPECTED_DEV_HEAD_T, atol=5e-7) # Test errors when number of HPI points do not match - EXPECTED_ERR_MSG = 'Device-to-Head .*Got 0 .*device and 5 points in head' + EXPECTED_ERR_MSG = "Device-to-Head .*Got 0 .*device and 5 points in head" with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): _ = compute_dev_head_t(transform_to_head(montage_polhemus)) - EXPECTED_ERR_MSG = 'Device-to-Head .*Got 5 .*device and 0 points in head' + EXPECTED_ERR_MSG = "Device-to-Head .*Got 5 .*device and 0 points in head" with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): - _ = compute_dev_head_t(transform_to_head( - montage_meg + make_dig_montage(**EXPECTED_FID_IN_POLHEMUS) - )) + _ = compute_dev_head_t( + transform_to_head( + montage_meg + make_dig_montage(**EXPECTED_FID_IN_POLHEMUS) + ) + ) - EXPECTED_ERR_MSG = 'Device-to-Head .*Got 3 .*device and 5 points in head' + EXPECTED_ERR_MSG = "Device-to-Head .*Got 3 .*device and 5 points in head" with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): - _ = compute_dev_head_t(transform_to_head( - DigMontage(dig=_format_dig_points(montage_meg.dig[:3])) + - montage_polhemus - )) + _ = compute_dev_head_t( + transform_to_head( + DigMontage(dig=_format_dig_points(montage_meg.dig[:3])) + + montage_polhemus + ) + ) def test_set_montage_with_mismatching_ch_names(): """Test setting a DigMontage with mismatching ch_names.""" raw = read_raw_fif(fif_fname) - montage = make_standard_montage('mgh60') + montage = make_standard_montage("mgh60") # 'EEG 001' and 'EEG001' won't match - missing_err = '60 channel positions not present' + missing_err = "60 channel positions not present" with pytest.raises(ValueError, match=missing_err): raw.set_montage(montage) montage.ch_names = [ # modify the names in place - name.replace('EEG', 'EEG ') for name in montage.ch_names + name.replace("EEG", "EEG ") for name in montage.ch_names ] raw.set_montage(montage) # does not raise @@ -1287,90 +1649,97 @@ def test_set_montage_with_mismatching_ch_names(): # should work raw.set_montage(montage, match_case=False) raw.rename_channels(lambda x: x.upper()) # restore - assert 'EEG 001' in raw.ch_names and 'eeg 001' not in raw.ch_names - raw.rename_channels({'EEG 002': 'eeg 001'}) - assert 'EEG 001' in raw.ch_names and 'eeg 001' in raw.ch_names - with pytest.warns(RuntimeWarning, match='changed from V to NA'): - raw.set_channel_types({'eeg 001': 'misc'}) + assert "EEG 001" in raw.ch_names and "eeg 001" not in raw.ch_names + raw.rename_channels({"EEG 002": "eeg 001"}) + assert "EEG 001" in raw.ch_names and "eeg 001" in raw.ch_names + with pytest.warns(RuntimeWarning, match="changed from V to NA"): + raw.set_channel_types({"eeg 001": "misc"}) raw.set_montage(montage) - with pytest.warns(RuntimeWarning, match='changed from NA to V'): - raw.set_channel_types({'eeg 001': 'eeg'}) - with pytest.raises(ValueError, match='1 channel position not present'): + with pytest.warns(RuntimeWarning, match="changed from NA to V"): + raw.set_channel_types({"eeg 001": "eeg"}) + with pytest.raises(ValueError, match="1 channel position not present"): raw.set_montage(montage) - with pytest.raises(ValueError, match='match_case=False as 1 channel name'): + with pytest.raises(ValueError, match="match_case=False as 1 channel name"): raw.set_montage(montage, match_case=False) - info = create_info(['EEG 001'], 1000., 'eeg') - mon = make_dig_montage({'EEG 001': np.zeros(3), 'eeg 001': np.zeros(3)}, - nasion=[0, 1., 0], rpa=[1., 0, 0], lpa=[-1., 0, 0]) + info = create_info(["EEG 001"], 1000.0, "eeg") + mon = make_dig_montage( + {"EEG 001": np.zeros(3), "eeg 001": np.zeros(3)}, + nasion=[0, 1.0, 0], + rpa=[1.0, 0, 0], + lpa=[-1.0, 0, 0], + ) info.set_montage(mon) - with pytest.raises(ValueError, match='match_case=False as 1 montage name'): + with pytest.raises(ValueError, match="match_case=False as 1 montage name"): info.set_montage(mon, match_case=False) def test_set_montage_with_sub_super_set_of_ch_names(): """Test info and montage ch_names matching criteria.""" - N_CHANNELS = len('abcdef') - montage = _make_toy_dig_montage(N_CHANNELS, coord_frame='head') + N_CHANNELS = len("abcdef") + montage = _make_toy_dig_montage(N_CHANNELS, coord_frame="head") # montage and info match - info = create_info(ch_names=list('abcdef'), sfreq=1, ch_types='eeg') + info = create_info(ch_names=list("abcdef"), sfreq=1, ch_types="eeg") info.set_montage(montage) # montage is a SUPERset of info - info = create_info(list('abc'), sfreq=1, ch_types='eeg') + info = create_info(list("abc"), sfreq=1, ch_types="eeg") info.set_montage(montage) - assert len(info['dig']) == len(list('abc')) + 3 # 3 fiducials + assert len(info["dig"]) == len(list("abc")) + 3 # 3 fiducials # montage is a SUBset of info - _MSG = 'subset of info. There are 2 .* not present in the DigMontage' - info = create_info(ch_names=list('abcdfgh'), sfreq=1, ch_types='eeg') + _MSG = "subset of info. There are 2 .* not present in the DigMontage" + info = create_info(ch_names=list("abcdfgh"), sfreq=1, ch_types="eeg") with pytest.raises(ValueError, match=_MSG) as exc: info.set_montage(montage) # plus suggestions - assert exc.match('set_channel_types') - assert exc.match('on_missing') + assert exc.match("set_channel_types") + assert exc.match("on_missing") def test_set_montage_with_known_aliases(): """Test matching unrecognized channel locations to known aliases.""" # montage and info match - mock_montage_ch_names = ['POO7', 'POO8'] + mock_montage_ch_names = ["POO7", "POO8"] n_channels = len(mock_montage_ch_names) - montage = make_dig_montage(ch_pos=dict( - zip( - mock_montage_ch_names, - np.arange(n_channels * 3).reshape(n_channels, 3), - )), - coord_frame='head') + montage = make_dig_montage( + ch_pos=dict( + zip( + mock_montage_ch_names, + np.arange(n_channels * 3).reshape(n_channels, 3), + ) + ), + coord_frame="head", + ) - mock_info_ch_names = ['Cb1', 'Cb2'] - info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + mock_info_ch_names = ["Cb1", "Cb2"] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types="eeg") info.set_montage(montage, match_alias=True) # work with match_case - mock_info_ch_names = ['cb1', 'cb2'] - info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + mock_info_ch_names = ["cb1", "cb2"] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types="eeg") info.set_montage(montage, match_case=False, match_alias=True) # should warn user T1 instead of its alias T9 - mock_info_ch_names = ['Cb1', 'T1'] - info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') - with pytest.raises(ValueError, match='T1'): + mock_info_ch_names = ["Cb1", "T1"] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types="eeg") + with pytest.raises(ValueError, match="T1"): info.set_montage(montage, match_case=False, match_alias=True) def test_heterogeneous_ch_type(): """Test ch_names matching criteria with heterogeneous ch_type.""" - VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg', 'dbs') + VALID_MONTAGE_NAMED_CHS = ("eeg", "ecog", "seeg", "dbs") montage = _make_toy_dig_montage( n_channels=len(VALID_MONTAGE_NAMED_CHS), - coord_frame='head', + coord_frame="head", ) # Montage and info match - info = create_info(montage.ch_names, 1., list(VALID_MONTAGE_NAMED_CHS)) + info = create_info(montage.ch_names, 1.0, list(VALID_MONTAGE_NAMED_CHS)) RawArray(np.zeros((4, 1)), info, copy=None).set_montage(montage) @@ -1379,45 +1748,46 @@ def test_set_montage_coord_frame_in_head_vs_unknown(): N_CHANNELS, NaN = 3, np.nan raw = _make_toy_raw(N_CHANNELS) - montage_in_head = _make_toy_dig_montage(N_CHANNELS, coord_frame='head') - montage_in_unknown = _make_toy_dig_montage( - N_CHANNELS, coord_frame='unknown' - ) + montage_in_head = _make_toy_dig_montage(N_CHANNELS, coord_frame="head") + montage_in_unknown = _make_toy_dig_montage(N_CHANNELS, coord_frame="unknown") montage_in_unknown_with_fid = _make_toy_dig_montage( - N_CHANNELS, coord_frame='unknown', - nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0], + N_CHANNELS, + coord_frame="unknown", + nasion=[0, 1, 0], + lpa=[1, 0, 0], + rpa=[-1, 0, 0], ) assert_allclose( - actual=np.array([ch['loc'] for ch in raw.info['chs']]), - desired=np.full((N_CHANNELS, 12), np.nan) + actual=np.array([ch["loc"] for ch in raw.info["chs"]]), + desired=np.full((N_CHANNELS, 12), np.nan), ) raw.set_montage(montage_in_head) assert_allclose( - actual=np.array([ch['loc'] for ch in raw.info['chs']]), + actual=np.array([ch["loc"] for ch in raw.info["chs"]]), desired=[ - [0., 1., 2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - [3., 4., 5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - [6., 7., 8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - ] + [0.0, 1.0, 2.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + [3.0, 4.0, 5.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + [6.0, 7.0, 8.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + ], ) - with pytest.warns(RuntimeWarning, match='assuming identity'): + with pytest.warns(RuntimeWarning, match="assuming identity"): raw.set_montage(montage_in_unknown) raw.set_montage(montage_in_unknown_with_fid) assert_allclose( - actual=np.array([ch['loc'] for ch in raw.info['chs']]), + actual=np.array([ch["loc"] for ch in raw.info["chs"]]), desired=[ - [-0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - [-6., 7., -8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - ] + [-0.0, 1.0, -2.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + [-3.0, 4.0, -5.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + [-6.0, 7.0, -8.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + ], ) # check no collateral effects from transforming montage - assert _check_get_coord_frame(montage_in_unknown_with_fid.dig) == 'unknown' + assert _check_get_coord_frame(montage_in_unknown_with_fid.dig) == "unknown" assert_array_equal( _get_dig_montage_pos(montage_in_unknown_with_fid), [[0, 1, 2], [3, 4, 5], [6, 7, 8]], @@ -1425,41 +1795,42 @@ def test_set_montage_coord_frame_in_head_vs_unknown(): @testing.requires_testing_data -@pytest.mark.parametrize('ch_type', ('eeg', 'ecog', 'seeg', 'dbs')) +@pytest.mark.parametrize("ch_type", ("eeg", "ecog", "seeg", "dbs")) def test_montage_head_frame(ch_type): """Test that head frame is set properly.""" # gh-9446 data = np.random.randn(2, 100) - info = create_info(['a', 'b'], 512, ch_type) - for ch in info['chs']: - assert ch['coord_frame'] == FIFF.FIFFV_COORD_HEAD + info = create_info(["a", "b"], 512, ch_type) + for ch in info["chs"]: + assert ch["coord_frame"] == FIFF.FIFFV_COORD_HEAD raw = RawArray(data, info) - ch_pos = dict(a=[-0.00250136, 0.04913788, 0.05047056], - b=[-0.00528394, 0.05066484, 0.05061559]) - lpa, nasion, rpa = get_mni_fiducials( - 'fsaverage', subjects_dir=subjects_dir) - lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r'] + ch_pos = dict( + a=[-0.00250136, 0.04913788, 0.05047056], b=[-0.00528394, 0.05066484, 0.05061559] + ) + lpa, nasion, rpa = get_mni_fiducials("fsaverage", subjects_dir=subjects_dir) + lpa, nasion, rpa = lpa["r"], nasion["r"], rpa["r"] montage = make_dig_montage( - ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa) + ch_pos, coord_frame="mri", nasion=nasion, lpa=lpa, rpa=rpa + ) mri_head_t = compute_native_head_t(montage) raw.set_montage(montage) pos = apply_trans(mri_head_t, np.array(list(ch_pos.values()))) - for p, ch in zip(pos, raw.info['chs']): - assert ch['coord_frame'] == FIFF.FIFFV_COORD_HEAD - assert_allclose(p, ch['loc'][:3]) + for p, ch in zip(pos, raw.info["chs"]): + assert ch["coord_frame"] == FIFF.FIFFV_COORD_HEAD + assert_allclose(p, ch["loc"][:3]) # Also test that including channels in the montage that will not have their # positions set will emit a warning - with pytest.warns(RuntimeWarning, match='changed from V to NA'): - raw.set_channel_types(dict(a='misc')) - with pytest.warns(RuntimeWarning, match='Not setting .*of 1 misc channel'): + with pytest.warns(RuntimeWarning, match="changed from V to NA"): + raw.set_channel_types(dict(a="misc")) + with pytest.warns(RuntimeWarning, match="Not setting .*of 1 misc channel"): raw.set_montage(montage) # and with a bunch of bad types raw = read_raw_fif(fif_fname) ch_pos = {ch_name: np.zeros(3) for ch_name in raw.ch_names} - mon = make_dig_montage(ch_pos, coord_frame='head') - with pytest.warns(RuntimeWarning, match='316 eog/grad/mag/stim channels'): + mon = make_dig_montage(ch_pos, coord_frame="head") + with pytest.warns(RuntimeWarning, match="316 eog/grad/mag/stim channels"): raw.set_montage(mon) @@ -1468,39 +1839,44 @@ def test_set_montage_with_missing_coordinates(): N_CHANNELS, NaN = 3, np.nan raw = _make_toy_raw(N_CHANNELS) - raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names}) + raw.set_channel_types({ch: "ecog" for ch in raw.ch_names}) # don't include all the channels ch_names = raw.ch_names[1:] n_channels = len(ch_names) ch_coords = np.arange(n_channels * 3).reshape(n_channels, 3) montage_in_mri = make_dig_montage( - ch_pos=dict(zip(ch_names, ch_coords,)), - coord_frame='unknown', - nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0], + ch_pos=dict( + zip( + ch_names, + ch_coords, + ) + ), + coord_frame="unknown", + nasion=[0, 1, 0], + lpa=[1, 0, 0], + rpa=[-1, 0, 0], ) - with pytest.raises(ValueError, match='DigMontage is ' - 'only a subset of info'): + with pytest.raises(ValueError, match="DigMontage is only a subset of info"): raw.set_montage(montage_in_mri) - with pytest.raises(ValueError, match='Invalid value'): - raw.set_montage(montage_in_mri, on_missing='foo') + with pytest.raises(ValueError, match="Invalid value"): + raw.set_montage(montage_in_mri, on_missing="foo") - with pytest.raises(TypeError, match='must be an instance'): + with pytest.raises(TypeError, match="must be an instance"): raw.set_montage(montage_in_mri, on_missing=True) - with pytest.warns(RuntimeWarning, match='DigMontage is ' - 'only a subset of info'): - raw.set_montage(montage_in_mri, on_missing='warn') + with pytest.warns(RuntimeWarning, match="DigMontage is only a subset of info"): + raw.set_montage(montage_in_mri, on_missing="warn") - raw.set_montage(montage_in_mri, on_missing='ignore') + raw.set_montage(montage_in_mri, on_missing="ignore") assert_allclose( - actual=np.array([ch['loc'] for ch in raw.info['chs']]), + actual=np.array([ch["loc"] for ch in raw.info["chs"]]), desired=[ [NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], - [0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - [-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN], - ] + [0.0, 1.0, -2.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + [-3.0, 4.0, -5.0, 0.0, 0.0, 0.0, NaN, NaN, NaN, NaN, NaN, NaN], + ], ) @@ -1513,16 +1889,16 @@ def test_get_montage(): # 1. read in testing data and assert montage roundtrip # for testing dataset: 'test_raw.fif' raw = read_raw_fif(fif_fname) - raw = raw.rename_channels(lambda name: name.replace('EEG ', 'EEG')) + raw = raw.rename_channels(lambda name: name.replace("EEG ", "EEG")) raw2 = raw.copy() # get montage and then set montage and # it should be the same montage = raw.get_montage() - raw.set_montage(montage, on_missing='raise') + raw.set_montage(montage, on_missing="raise") test_montage = raw.get_montage() - assert_object_equal(raw.info['chs'], raw2.info['chs']) + assert_object_equal(raw.info["chs"], raw2.info["chs"]) assert_dig_allclose(raw2.info, raw.info) - assert_object_equal(raw2.info['dig'], raw.info['dig']) + assert_object_equal(raw2.info["dig"], raw.info["dig"]) # the montage does not change assert_object_equal(montage.dig, test_montage.dig) @@ -1532,7 +1908,7 @@ def test_get_montage(): assert_object_equal(test2_montage.dig, test_montage.dig) # 2. now do a standard montage - montage = make_standard_montage('mgh60') + montage = make_standard_montage("mgh60") # set the montage; note renaming to make standard montage map raw.set_montage(montage) @@ -1540,20 +1916,20 @@ def test_get_montage(): # the channel locations should be the same raw2 = raw.copy() test_montage = raw.get_montage() - raw.set_montage(test_montage, on_missing='ignore') + raw.set_montage(test_montage, on_missing="ignore") # the montage should fulfill a roundtrip with make_dig_montage test2_montage = make_dig_montage(**test_montage.get_positions()) assert_object_equal(test2_montage.dig, test_montage.dig) # chs should not change - assert_object_equal(raw2.info['chs'], raw.info['chs']) + assert_object_equal(raw2.info["chs"], raw.info["chs"]) # dig order might be different after set_montage assert montage.ch_names == test_montage.ch_names # note that test_montage will have different coordinate frame # compared to standard montage assert_dig_allclose(raw2.info, raw.info) - assert_object_equal(raw2.info['dig'], raw.info['dig']) + assert_object_equal(raw2.info["dig"], raw.info["dig"]) # 3. if montage gets set to None raw.set_montage(None) @@ -1561,7 +1937,7 @@ def test_get_montage(): # 4. read in BV test dataset and make sure montage # fulfills roundtrip on non-standard montage - dig_montage = read_dig_fif(fif_dig_montage_fname) + dig_montage = read_dig_fif(fif_dig_montage_fname, verbose="error") # Make a BrainVision file like the one the user would have had # with testing dataset 'test.vhdr' @@ -1571,15 +1947,15 @@ def test_get_montage(): # rename channels to make it have the full set # of channels mapping = dict() - for ii, ch_name in enumerate(raw_bv.ch_names): - mapping[ch_name] = 'EEG%03d' % (ii + 1,) + for ii, ch_name in enumerate(raw_bv.ch_names, 1): + mapping[ch_name] = f"EEG{ii:03d}" raw_bv.rename_channels(mapping) - for ii, ch_name in enumerate(raw_bv_2.ch_names): - mapping[ch_name] = 'EEG%03d' % (ii + 33,) + for ii, ch_name in enumerate(raw_bv_2.ch_names, 33): + mapping[ch_name] = f"EEG{ii:03d}" raw_bv_2.rename_channels(mapping) raw_bv.add_channels([raw_bv_2]) - for ch in raw_bv.info['chs']: - ch['kind'] = FIFF.FIFFV_EEG_CH + for ch in raw_bv.info["chs"]: + ch["kind"] = FIFF.FIFFV_EEG_CH # Set the montage and roundtrip raw_bv.set_montage(dig_montage) @@ -1587,14 +1963,14 @@ def test_get_montage(): # reset the montage test_montage = raw_bv.get_montage() - raw_bv.set_montage(test_montage, on_missing='ignore') + raw_bv.set_montage(test_montage, on_missing="ignore") # dig order might be different after set_montage - assert_object_equal(raw_bv2.info['dig'], raw_bv.info['dig']) + assert_object_equal(raw_bv2.info["dig"], raw_bv.info["dig"]) assert_dig_allclose(raw_bv2.info, raw_bv.info) # if dig is not set in the info, then montage returns None with raw.info._unlock(): - raw.info['dig'] = None + raw.info["dig"] = None assert raw.get_montage() is None # the montage should fulfill a roundtrip with make_dig_montage @@ -1607,14 +1983,13 @@ def test_read_dig_hpts(): fname = io_dir / "brainvision" / "tests" / "data" / "test.hpts" montage = read_dig_hpts(fname) assert repr(montage) == ( - '' + "" ) def test_get_builtin_montages(): """Test help function to obtain builtin montages.""" - EXPECTED_COUNT = 27 + EXPECTED_COUNT = 28 montages = get_builtin_montages() assert len(montages) == EXPECTED_COUNT @@ -1629,13 +2004,12 @@ def test_get_builtin_montages(): def test_plot_montage(): """Test plotting montage.""" # gh-8025 + pytest.importorskip("defusedxml") montage = read_dig_captrak(bvct_dig_montage_fname) montage.plot() - plt.close('all') f, ax = plt.subplots(1, 1) montage.plot(axes=ax) - plt.close("all") with pytest.raises(TypeError, match="must be an instance of Axes"): montage.plot(axes=101) @@ -1648,12 +2022,12 @@ def test_plot_montage(): def test_montage_equality(): """Test montage equality.""" rng = np.random.RandomState(0) - fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3))) + fiducials = dict(zip(("nasion", "lpa", "rpa"), rng.rand(3, 3))) # hsp positions are [1X, 1X, 1X] - hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.)) - hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.)) - hsp2_identical = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.)) + hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.0)) + hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.0)) + hsp2_identical = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.0)) assert hsp1 != hsp2 assert hsp2 == hsp2_identical @@ -1664,45 +2038,46 @@ def test_montage_add_fiducials(): """Test montage can add estimated fiducials for rpa, lpa, nas.""" # get the fiducials from test file subjects_dir = data_path / "subjects" - subject = 'sample' + subject = "sample" fid_fname = subjects_dir / subject / "bem" / "sample-fiducials.fif" - test_fids, test_coord_frame = read_fiducials(fid_fname) - test_fids = np.array([f['r'] for f in test_fids]) + test_fids, _ = read_fiducials(fid_fname) + test_fids = np.array([f["r"] for f in test_fids]) # create test montage and add estimated fiducials - test_ch_pos = {'A1': [0, 0, 0]} - montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame='mri') + test_ch_pos = {"A1": [0, 0, 0]} + montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame="mri") montage.add_estimated_fiducials(subject=subject, subjects_dir=subjects_dir) # check that adding MNI fiducials fails because we're in MRI - with pytest.raises(RuntimeError, match='Montage should be in the ' - '"mni_tal" coordinate frame'): + with pytest.raises( + RuntimeError, match='Montage should be in the "mni_tal" coordinate frame' + ): montage.add_mni_fiducials(subjects_dir=subjects_dir) # check that these fiducials are close to the estimated fiducials ch_pos = montage.get_positions() - fids_est = [ch_pos['lpa'], ch_pos['nasion'], ch_pos['rpa']] + fids_est = [ch_pos["lpa"], ch_pos["nasion"], ch_pos["rpa"]] - dists = np.linalg.norm(test_fids - fids_est, axis=-1) * 1000. # -> mm + dists = np.linalg.norm(test_fids - fids_est, axis=-1) * 1000.0 # -> mm assert (dists < 8).all(), dists # an error should be raised if the montage is not in `mri` coord_frame # which is the FreeSurfer RAS - montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame='mni_tal') - with pytest.raises(RuntimeError, match='Montage should be in the ' - '"mri" coordinate frame'): - montage.add_estimated_fiducials(subject=subject, - subjects_dir=subjects_dir) + montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame="mni_tal") + with pytest.raises( + RuntimeError, match='Montage should be in the "mri" coordinate frame' + ): + montage.add_estimated_fiducials(subject=subject, subjects_dir=subjects_dir) # test that adding MNI fiducials works montage.add_mni_fiducials(subjects_dir=subjects_dir) - test_fids = get_mni_fiducials('fsaverage', subjects_dir=subjects_dir) + test_fids = get_mni_fiducials("fsaverage", subjects_dir=subjects_dir) for fid, test_fid in zip(montage.dig[:3], test_fids): - assert_array_equal(fid['r'], test_fid['r']) + assert_array_equal(fid["r"], test_fid["r"]) # test remove fiducials montage.remove_fiducials() - assert all([d['kind'] != FIFF.FIFFV_POINT_CARDINAL for d in montage.dig]) + assert all([d["kind"] != FIFF.FIFFV_POINT_CARDINAL for d in montage.dig]) def test_read_dig_localite(tmp_path): @@ -1727,23 +2102,23 @@ def test_read_dig_localite(tmp_path): 17,ch14,-61.16539571,-61.86866187,26.23986153 18,ch15,-55.82855386,-34.77319103,25.8083942""" - fname = tmp_path / 'localite.csv' - with open(fname, 'w') as f: - for row in contents.split('\n'): - f.write(f'{row.lstrip()}\n') + fname = tmp_path / "localite.csv" + with open(fname, "w") as f: + for row in contents.split("\n"): + f.write(f"{row.lstrip()}\n") montage = read_dig_localite(fname, nasion="Nasion", lpa="LPA", rpa="RPA") - s = '' + s = "" assert repr(montage) == s - assert montage.ch_names == [f'ch{i:02}' for i in range(1, 16)] + assert montage.ch_names == [f"ch{i:02}" for i in range(1, 16)] def test_make_wrong_dig_montage(): """Test that a montage with non numeric is not possible.""" - make_dig_montage(ch_pos={'A1': ['0', '0', '0']}) # converted to floats + make_dig_montage(ch_pos={"A1": ["0", "0", "0"]}) # converted to floats with pytest.raises(ValueError, match="could not convert string to float"): - make_dig_montage(ch_pos={'A1': ['a', 'b', 'c']}) + make_dig_montage(ch_pos={"A1": ["a", "b", "c"]}) with pytest.raises(TypeError, match="instance of ndarray, list, or tuple"): - make_dig_montage(ch_pos={'A1': 5}) + make_dig_montage(ch_pos={"A1": 5}) @testing.requires_testing_data @@ -1759,15 +2134,14 @@ def test_fnirs_montage(): assert num_detectors == 13 # Make a change to the montage before setting - raw.info['chs'][2]['loc'][:3] = [1., 2, 3] + raw.info["chs"][2]["loc"][:3] = [1.0, 2, 3] # Set montage back to original raw.set_montage(mtg) for ch in range(len(raw.ch_names)): - assert_array_equal(info_orig['chs'][ch]['loc'], - raw.info['chs'][ch]['loc']) + assert_array_equal(info_orig["chs"][ch]["loc"], raw.info["chs"][ch]["loc"]) # Mixed channel types not supported yet - raw.set_channel_types({ch_name: 'eeg' for ch_name in raw.ch_names[-2:]}) - with pytest.raises(ValueError, match='mix of fNIRS'): + raw.set_channel_types({ch_name: "eeg" for ch_name in raw.ch_names[-2:]}) + with pytest.raises(ValueError, match="mix of fNIRS"): raw.get_montage() diff --git a/mne/channels/tests/test_standard_montage.py b/mne/channels/tests/test_standard_montage.py index 49fffaa4ab3..b12b416643d 100644 --- a/mne/channels/tests/test_standard_montage.py +++ b/mne/channels/tests/test_standard_montage.py @@ -1,27 +1,23 @@ -# Authors: Joan Massich -# Alexandre Gramfort -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pytest - import numpy as np - -from numpy.testing import (assert_allclose, assert_array_almost_equal, - assert_raises) +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_raises from mne import create_info -from mne.channels import make_standard_montage, compute_native_head_t -from mne.channels.montage import get_builtin_montages, HEAD_SIZE_DEFAULT +from mne._fiff._digitization import _get_dig_eeg, _get_fid_coords +from mne._fiff.constants import FIFF +from mne.channels import compute_native_head_t, make_standard_montage +from mne.channels.montage import HEAD_SIZE_DEFAULT, get_builtin_montages from mne.io import RawArray -from mne.io._digitization import _get_dig_eeg, _get_fid_coords -from mne.io.constants import FIFF -from mne.preprocessing.nirs import optical_density, beer_lambert_law -from mne.transforms import _get_trans, _angle_between_quats, rot_to_quat +from mne.preprocessing.nirs import beer_lambert_law, optical_density +from mne.transforms import _angle_between_quats, _get_trans, rot_to_quat -@pytest.mark.parametrize('kind', get_builtin_montages()) +@pytest.mark.parametrize("kind", get_builtin_montages()) def test_standard_montages_have_fids(kind): """Test standard montage are all in unknown coord (have fids).""" montage = make_standard_montage(kind) @@ -29,44 +25,47 @@ def test_standard_montages_have_fids(kind): for k, v in fids.items(): assert v is not None, k for d in montage.dig: - if kind.startswith(('artinis', 'standard', 'mgh')): + if kind.startswith(("artinis", "standard", "mgh")): want = FIFF.FIFFV_COORD_MRI else: want = FIFF.FIFFV_COORD_UNKNOWN - assert d['coord_frame'] == want + assert d["coord_frame"] == want def test_standard_montage_errors(): """Test error handling for wrong keys.""" _msg = "Invalid value for the 'kind' parameter..*but got.*not-here" with pytest.raises(ValueError, match=_msg): - _ = make_standard_montage('not-here') - - -@pytest.mark.parametrize('head_size', (HEAD_SIZE_DEFAULT, 0.05)) -@pytest.mark.parametrize('kind, tol', [ - ['EGI_256', 1e-5], - ['easycap-M1', 1e-8], - ['easycap-M10', 1e-8], - ['biosemi128', 1e-8], - ['biosemi16', 1e-8], - ['biosemi160', 1e-8], - ['biosemi256', 1e-8], - ['biosemi32', 1e-8], - ['biosemi64', 1e-8], - ['brainproducts-RNP-BA-128', 1e-8] -]) + _ = make_standard_montage("not-here") + + +@pytest.mark.parametrize("head_size", (HEAD_SIZE_DEFAULT, 0.05)) +@pytest.mark.parametrize( + "kind, tol", + [ + ["EGI_256", 1e-5], + ["easycap-M1", 1e-8], + ["easycap-M10", 1e-8], + ["biosemi128", 1e-8], + ["biosemi16", 1e-8], + ["biosemi160", 1e-8], + ["biosemi256", 1e-8], + ["biosemi32", 1e-8], + ["biosemi64", 1e-8], + ["brainproducts-RNP-BA-128", 1e-8], + ], +) def test_standard_montages_on_sphere(kind, tol, head_size): """Test some standard montage are on sphere.""" kwargs = dict() if head_size != HEAD_SIZE_DEFAULT: - kwargs['head_size'] = head_size + kwargs["head_size"] = head_size montage = make_standard_montage(kind, **kwargs) - eeg_loc = np.array([ch['r'] for ch in _get_dig_eeg(montage.dig)]) + eeg_loc = np.array([ch["r"] for ch in _get_dig_eeg(montage.dig)]) assert_allclose( actual=np.linalg.norm(eeg_loc, axis=1), - desired=np.full((eeg_loc.shape[0], ), head_size), + desired=np.full((eeg_loc.shape[0],), head_size), atol=tol, ) @@ -74,14 +73,14 @@ def test_standard_montages_on_sphere(kind, tol, head_size): def test_standard_superset(): """Test some properties that should hold for superset montages.""" # new montages, tweaked to end up at the same size as the others - m_1005 = make_standard_montage('standard_1005', 0.0970) - m_1020 = make_standard_montage('standard_1020', 0.0991) + m_1005 = make_standard_montage("standard_1005", 0.0970) + m_1020 = make_standard_montage("standard_1020", 0.0991) assert len(set(m_1005.ch_names) - set(m_1020.ch_names)) > 0 # XXX weird that this is not a proper superset... - assert set(m_1020.ch_names) - set(m_1005.ch_names) == {'O10', 'O9'} + assert set(m_1020.ch_names) - set(m_1005.ch_names) == {"O10", "O9"} c_1005 = m_1005._get_ch_pos() for key, value in m_1020._get_ch_pos().items(): - if key not in ('O10', 'O9'): + if key not in ("O10", "O9"): assert_allclose(c_1005[key], value, atol=1e-4, err_msg=key) @@ -93,15 +92,29 @@ def _simulate_artinis_octamon(): """ np.random.seed(42) data = np.absolute(np.random.normal(size=(16, 100))) - ch_names = ['S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', - 'S3_D1 760', 'S3_D1 850', 'S4_D1 760', 'S4_D1 850', - 'S5_D2 760', 'S5_D2 850', 'S6_D2 760', 'S6_D2 850', - 'S7_D2 760', 'S7_D2 850', 'S8_D2 760', 'S8_D2 850'] - ch_types = ['fnirs_cw_amplitude' for _ in ch_names] - sfreq = 10. # Hz + ch_names = [ + "S1_D1 760", + "S1_D1 850", + "S2_D1 760", + "S2_D1 850", + "S3_D1 760", + "S3_D1 850", + "S4_D1 760", + "S4_D1 850", + "S5_D2 760", + "S5_D2 850", + "S6_D2 760", + "S6_D2 850", + "S7_D2 760", + "S7_D2 850", + "S8_D2 760", + "S8_D2 850", + ] + ch_types = ["fnirs_cw_amplitude" for _ in ch_names] + sfreq = 10.0 # Hz info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) for i, ch_name in enumerate(ch_names): - info['chs'][i]['loc'][9] = int(ch_name.split(' ')[1]) + info["chs"][i]["loc"][9] = int(ch_name.split(" ")[1]) raw = RawArray(data, info) return raw @@ -115,47 +128,71 @@ def _simulate_artinis_brite23(): """ np.random.seed(0) data = np.random.normal(size=(46, 100)) - sd_names = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S3_D2', 'S4_D2', 'S5_D2', - 'S4_D3', 'S5_D3', 'S6_D3', 'S5_D4', 'S6_D4', 'S7_D4', 'S6_D5', - 'S7_D5', 'S8_D5', 'S7_D6', 'S8_D6', 'S9_D6', 'S8_D7', 'S9_D7', - 'S10_D7', 'S11_D7'] + sd_names = [ + "S1_D1", + "S2_D1", + "S3_D1", + "S4_D1", + "S3_D2", + "S4_D2", + "S5_D2", + "S4_D3", + "S5_D3", + "S6_D3", + "S5_D4", + "S6_D4", + "S7_D4", + "S6_D5", + "S7_D5", + "S8_D5", + "S7_D6", + "S8_D6", + "S9_D6", + "S8_D7", + "S9_D7", + "S10_D7", + "S11_D7", + ] ch_names = [] ch_types = [] for name in sd_names: - ch_names.append(name + ' hbo') - ch_types.append('hbo') - ch_names.append(name + ' hbr') - ch_types.append('hbr') - sfreq = 10. # Hz + ch_names.append(name + " hbo") + ch_types.append("hbo") + ch_names.append(name + " hbr") + ch_types.append("hbr") + sfreq = 10.0 # Hz info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) raw = RawArray(data, info) return raw -@pytest.mark.parametrize('kind', ('octamon', 'brite23')) +@pytest.mark.parametrize("kind", ("octamon", "brite23")) def test_set_montage_artinis_fsaverage(kind): """Test that artinis montages match fsaverage's head<->MRI transform.""" # Compare OctaMon and Brite23 to fsaverage - trans_fs, _ = _get_trans('fsaverage') - montage = make_standard_montage(f'artinis-{kind}') + trans_fs, _ = _get_trans("fsaverage") + montage = make_standard_montage(f"artinis-{kind}") trans = compute_native_head_t(montage) - assert trans['to'] == trans_fs['to'] - assert trans['from'] == trans_fs['from'] - translation = 1000 * np.linalg.norm(trans['trans'][:3, 3] - - trans_fs['trans'][:3, 3]) + assert trans["to"] == trans_fs["to"] + assert trans["from"] == trans_fs["from"] + translation = 1000 * np.linalg.norm( + trans["trans"][:3, 3] - trans_fs["trans"][:3, 3] + ) assert 0 < translation < 1 # mm rotation = np.rad2deg( - _angle_between_quats(rot_to_quat(trans['trans'][:3, :3]), - rot_to_quat(trans_fs['trans'][:3, :3]))) + _angle_between_quats( + rot_to_quat(trans["trans"][:3, :3]), rot_to_quat(trans_fs["trans"][:3, :3]) + ) + ) assert 0 < rotation < 1 # degrees def test_set_montage_artinis_basic(): """Test that OctaMon and Brite23 montages are set properly.""" # Test OctaMon montage - montage_octamon = make_standard_montage('artinis-octamon') - montage_brite23 = make_standard_montage('artinis-brite23') + montage_octamon = make_standard_montage("artinis-octamon") + montage_brite23 = make_standard_montage("artinis-brite23") raw = _simulate_artinis_octamon() raw_od = optical_density(raw) old_info = raw.info.copy() @@ -164,82 +201,106 @@ def test_set_montage_artinis_basic(): raw_od.set_montage(montage_octamon) raw_hb = beer_lambert_law(raw_od, ppf=6) # montage needed for BLL # Check that the montage was actually modified - assert_raises(AssertionError, assert_array_almost_equal, - old_info['chs'][0]['loc'][:9], - raw.info['chs'][0]['loc'][:9]) - assert_raises(AssertionError, assert_array_almost_equal, - old_info_od['chs'][0]['loc'][:9], - raw_od.info['chs'][0]['loc'][:9]) + assert_raises( + AssertionError, + assert_array_almost_equal, + old_info["chs"][0]["loc"][:9], + raw.info["chs"][0]["loc"][:9], + ) + assert_raises( + AssertionError, + assert_array_almost_equal, + old_info_od["chs"][0]["loc"][:9], + raw_od.info["chs"][0]["loc"][:9], + ) # Check a known location - assert_array_almost_equal(raw.info['chs'][0]['loc'][:3], - [0.054243, 0.081884, 0.054544]) - assert_array_almost_equal(raw.info['chs'][8]['loc'][:3], - [-0.03013, 0.105097, 0.055894]) - assert_array_almost_equal(raw.info['chs'][12]['loc'][:3], - [-0.055681, 0.086566, 0.055858]) - assert_array_almost_equal(raw_od.info['chs'][12]['loc'][:3], - [-0.055681, 0.086566, 0.055858]) - assert_array_almost_equal(raw_hb.info['chs'][12]['loc'][:3], - [-0.055681, 0.086566, 0.055858]) + assert_array_almost_equal( + raw.info["chs"][0]["loc"][:3], [0.054243, 0.081884, 0.054544] + ) + assert_array_almost_equal( + raw.info["chs"][8]["loc"][:3], [-0.03013, 0.105097, 0.055894] + ) + assert_array_almost_equal( + raw.info["chs"][12]["loc"][:3], [-0.055681, 0.086566, 0.055858] + ) + assert_array_almost_equal( + raw_od.info["chs"][12]["loc"][:3], [-0.055681, 0.086566, 0.055858] + ) + assert_array_almost_equal( + raw_hb.info["chs"][12]["loc"][:3], [-0.055681, 0.086566, 0.055858] + ) # Check that locations are identical for a pair of channels (all elements # except the 10th which is the wavelength if not hbo and hbr type) - assert_array_almost_equal(raw.info['chs'][0]['loc'][:9], - raw.info['chs'][1]['loc'][:9]) - assert_array_almost_equal(raw_od.info['chs'][0]['loc'][:9], - raw_od.info['chs'][1]['loc'][:9]) - assert_array_almost_equal(raw_hb.info['chs'][0]['loc'][:9], - raw_hb.info['chs'][1]['loc'][:9]) + assert_array_almost_equal( + raw.info["chs"][0]["loc"][:9], raw.info["chs"][1]["loc"][:9] + ) + assert_array_almost_equal( + raw_od.info["chs"][0]["loc"][:9], raw_od.info["chs"][1]["loc"][:9] + ) + assert_array_almost_equal( + raw_hb.info["chs"][0]["loc"][:9], raw_hb.info["chs"][1]["loc"][:9] + ) # Test Brite23 montage raw = _simulate_artinis_brite23() old_info = raw.info.copy() raw.set_montage(montage_brite23) # Check that the montage was actually modified - assert_raises(AssertionError, assert_array_almost_equal, - old_info['chs'][0]['loc'][:9], - raw.info['chs'][0]['loc'][:9]) + assert_raises( + AssertionError, + assert_array_almost_equal, + old_info["chs"][0]["loc"][:9], + raw.info["chs"][0]["loc"][:9], + ) # Check a known location - assert_array_almost_equal(raw.info['chs'][0]['loc'][:3], - [0.068931, 0.046201, 0.072055]) - assert_array_almost_equal(raw.info['chs'][8]['loc'][:3], - [0.055196, 0.082757, 0.052165]) - assert_array_almost_equal(raw.info['chs'][12]['loc'][:3], - [0.033592, 0.102607, 0.047423]) + assert_array_almost_equal( + raw.info["chs"][0]["loc"][:3], [0.068931, 0.046201, 0.072055] + ) + assert_array_almost_equal( + raw.info["chs"][8]["loc"][:3], [0.055196, 0.082757, 0.052165] + ) + assert_array_almost_equal( + raw.info["chs"][12]["loc"][:3], [0.033592, 0.102607, 0.047423] + ) # Check that locations are identical for a pair of channels (all elements # except the 10th which is the wavelength if not hbo and hbr type) - assert_array_almost_equal(raw.info['chs'][0]['loc'][:9], - raw.info['chs'][1]['loc'][:9]) + assert_array_almost_equal( + raw.info["chs"][0]["loc"][:9], raw.info["chs"][1]["loc"][:9] + ) # Test channel variations raw_old = _simulate_artinis_brite23() # Raw missing some channels that are in the montage: pass raw = raw_old.copy() - raw.pick(['S1_D1 hbo', 'S1_D1 hbr']) - raw.set_montage('artinis-brite23') + raw.pick(["S1_D1 hbo", "S1_D1 hbr"]) + raw.set_montage("artinis-brite23") # Unconventional channel pair: pass raw = raw_old.copy() - info_new = create_info(['S11_D1 hbo', 'S11_D1 hbr'], raw.info['sfreq'], - ['hbo', 'hbr']) + info_new = create_info( + ["S11_D1 hbo", "S11_D1 hbr"], raw.info["sfreq"], ["hbo", "hbr"] + ) new = RawArray(np.random.normal(size=(2, len(raw))), info_new) raw.add_channels([new], force_update_info=True) - raw.set_montage('artinis-brite23') + raw.set_montage("artinis-brite23") # Source not in montage: fail raw = raw_old.copy() - info_new = create_info(['S12_D7 hbo', 'S12_D7 hbr'], raw.info['sfreq'], - ['hbo', 'hbr']) + info_new = create_info( + ["S12_D7 hbo", "S12_D7 hbr"], raw.info["sfreq"], ["hbo", "hbr"] + ) new = RawArray(np.random.normal(size=(2, len(raw))), info_new) raw.add_channels([new], force_update_info=True) - with pytest.raises(ValueError, match='is not in list'): - raw.set_montage('artinis-brite23') + with pytest.raises(ValueError, match="is not in list"): + raw.set_montage("artinis-brite23") # Detector not in montage: fail raw = raw_old.copy() - info_new = create_info(['S11_D8 hbo', 'S11_D8 hbr'], raw.info['sfreq'], - ['hbo', 'hbr']) + info_new = create_info( + ["S11_D8 hbo", "S11_D8 hbr"], raw.info["sfreq"], ["hbo", "hbr"] + ) new = RawArray(np.random.normal(size=(2, len(raw))), info_new) raw.add_channels([new], force_update_info=True) - with pytest.raises(ValueError, match='is not in list'): - raw.set_montage('artinis-brite23') + with pytest.raises(ValueError, match="is not in list"): + raw.set_montage("artinis-brite23") diff --git a/mne/channels/tests/test_unify_bads.py b/mne/channels/tests/test_unify_bads.py new file mode 100644 index 00000000000..5575b5dce5b --- /dev/null +++ b/mne/channels/tests/test_unify_bads.py @@ -0,0 +1,58 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import pytest + +from mne.channels import unify_bad_channels + + +def test_error_raising(raw, epochs): + """Tests input checking.""" + with pytest.raises(TypeError, match=r"must be an instance of list"): + unify_bad_channels("bad input") + with pytest.raises(ValueError, match=r"insts must not be empty"): + unify_bad_channels([]) + with pytest.raises(TypeError, match=r"each object in insts must be an instance of"): + unify_bad_channels(["bad_instance"]) + with pytest.raises(ValueError, match=r"same type"): + unify_bad_channels([raw, epochs]) + with pytest.raises(ValueError, match=r"Channels do not match across"): + raw_alt1 = raw.copy() + raw_alt1.drop_channels(raw.info["ch_names"][-1]) + unify_bad_channels([raw, raw_alt1]) # ch diff preserving order + with pytest.raises(ValueError, match=r"sorted differently"): + raw_alt2 = raw.copy() + new_order = [raw.ch_names[-1]] + raw.ch_names[:-1] + raw_alt2.reorder_channels(new_order) + unify_bad_channels([raw, raw_alt2]) + + +def test_bads_compilation(raw): + """Tests that bads are compiled properly. + + Tests two cases: a) single instance passed to function with an existing + bad, and b) multiple instances passed to function with varying compilation + scenarios including empty bads, unique bads, and partially duplicated bads + listed out-of-order. + + Only the Raw instance type is tested, since bad channel implementation is + controlled across instance types with a MixIn class. + """ + assert raw.info["bads"] == [] + chns = raw.ch_names[:3] + no_bad = raw.copy() + one_bad = raw.copy() + one_bad.info["bads"] = [chns[1]] + three_bad = raw.copy() + three_bad.info["bads"] = chns + # scenario 1: single instance passed with actual bads + s_out = unify_bad_channels([one_bad]) + assert len(s_out) == 1, len(s_out) + assert s_out[0].info["bads"] == [chns[1]], (s_out[0].info["bads"], chns[1]) + # scenario 2: multiple instances passed + m_out = unify_bad_channels([one_bad, no_bad, three_bad]) + assert len(m_out) == 3, len(m_out) + expected_order = [chns[1], chns[0], chns[2]] + for inst in m_out: + assert inst.info["bads"] == expected_order diff --git a/mne/chpi.py b/mne/chpi.py index 096c4e6f2bc..1cc168f3e20 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -1,53 +1,84 @@ -# -*- coding: utf-8 -*- -"""Functions for fitting head positions with (c)HPI coils.""" - -# Next, ``compute_head_pos`` can be used to: -# -# 1. Drop coils whose GOF are below ``gof_limit``. If fewer than 3 coils -# remain, abandon fitting for the chunk. -# 2. Fit dev_head_t quaternion (using ``_fit_chpi_quat_subset``), -# iteratively dropping coils (as long as 3 remain) to find the best GOF -# (using ``_fit_chpi_quat``). -# 3. If fewer than 3 coils meet the ``dist_limit`` criteria following -# projection of the fitted device coil locations into the head frame, -# abandon fitting for the chunk. -# -# The function ``filter_chpi`` uses the same linear model to filter cHPI -# and (optionally) line frequencies from the data. - -# Authors: Eric Larson -# +"""Functions for fitting head positions with (c)HPI coils. + +``compute_head_pos`` can be used to: + +1. Drop coils whose GOF are below ``gof_limit``. If fewer than 3 coils + remain, abandon fitting for the chunk. +2. Fit dev_head_t quaternion (using ``_fit_chpi_quat_subset``), + iteratively dropping coils (as long as 3 remain) to find the best GOF + (using ``_fit_chpi_quat``). +3. If fewer than 3 coils meet the ``dist_limit`` criteria following + projection of the fitted device coil locations into the head frame, + abandon fitting for the chunk. + +The function ``filter_chpi`` uses the same linear model to filter cHPI +and (optionally) line frequencies from the data. +""" + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. +import copy +import itertools from functools import partial import numpy as np -import itertools - -from .event import find_events -from .io.base import BaseRaw +from scipy.linalg import orth +from scipy.optimize import fmin_cobyla +from scipy.spatial.distance import cdist + +from ._fiff.constants import FIFF +from ._fiff.meas_info import Info, _simplify_info +from ._fiff.pick import ( + _picks_to_idx, + pick_channels, + pick_channels_regexp, + pick_info, + pick_types, +) +from ._fiff.proj import Projection, setup_proj +from .bem import ConductorModel from .channels.channels import _get_meg_system -from .io.kit.constants import KIT -from .io.kit.kit import RawKIT as _RawKIT -from .io.meas_info import _simplify_info, Info -from .io.pick import (pick_types, pick_channels, pick_channels_regexp, - pick_info, _picks_to_idx) -from .io.proj import Projection, setup_proj -from .io.constants import FIFF -from .io.ctf.trans import _make_ctf_coord_trans_set -from .forward import (_magnetic_dipole_field_vec, _create_meg_coils, - _concatenate_coils) -from .cov import make_ad_hoc_cov, compute_whitener +from .cov import compute_whitener, make_ad_hoc_cov from .dipole import _make_guesses +from .event import find_events from .fixes import jit -from .preprocessing.maxwell import (_sss_basis, _prep_mf_coils, - _regularize_out, _get_mf_picks_fix_mags) -from .transforms import (apply_trans, invert_transform, _angle_between_quats, - quat_to_rot, rot_to_quat, _fit_matched_points, - _quat_to_affine, als_ras_trans) -from .utils import (verbose, logger, use_log_level, _check_fname, warn, - _validate_type, ProgressBar, _check_option, _pl, - _on_missing, _verbose_safe_false) +from .forward import _concatenate_coils, _create_meg_coils, _magnetic_dipole_field_vec +from .io import BaseRaw +from .io.ctf.trans import _make_ctf_coord_trans_set +from .io.kit.constants import KIT +from .io.kit.kit import RawKIT as _RawKIT +from .preprocessing.maxwell import ( + _get_mf_picks_fix_mags, + _prep_mf_coils, + _regularize_out, + _sss_basis, +) +from .transforms import ( + Transform, + _angle_between_quats, + _fit_matched_points, + _quat_to_affine, + als_ras_trans, + apply_trans, + invert_transform, + quat_to_rot, + rot_to_quat, +) +from .utils import ( + ProgressBar, + _check_fname, + _check_option, + _on_missing, + _pl, + _validate_type, + _verbose_safe_false, + logger, + use_log_level, + verbose, + warn, +) # Eventually we should add: # hpicons @@ -58,6 +89,7 @@ # ############################################################################ # Reading from text or FIF file + def read_head_pos(fname): """Read MaxFilter-formatted head position parameters. @@ -69,8 +101,9 @@ def read_head_pos(fname): Returns ------- - pos : array, shape (N, 10) + quats : array, shape (n_pos, 10) The position and quaternion parameters from cHPI fitting. + See :func:`mne.chpi.compute_head_pos` for details on the columns. See Also -------- @@ -81,12 +114,11 @@ def read_head_pos(fname): ----- .. versionadded:: 0.12 """ - _check_fname(fname, must_exist=True, overwrite='read') + _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it data.shape = (-1, 10) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb - raise RuntimeError('positions could not be read properly from %s' - % fname) + raise RuntimeError(f"positions could not be read properly from {fname}") return data @@ -97,8 +129,9 @@ def write_head_pos(fname, pos): ---------- fname : path-like The filename to write. - pos : array, shape (N, 10) + pos : array, shape (n_pos, 10) The position and quaternion parameters from cHPI fitting. + See :func:`mne.chpi.compute_head_pos` for details on the columns. See Also -------- @@ -112,14 +145,17 @@ def write_head_pos(fname, pos): _check_fname(fname, overwrite=True) pos = np.array(pos, np.float64) if pos.ndim != 2 or pos.shape[1] != 10: - raise ValueError('pos must be a 2D array of shape (N, 10)') - with open(fname, 'wb') as fid: - fid.write(' Time q1 q2 q3 q4 q5 ' - 'q6 g-value error velocity\n'.encode('ASCII')) + raise ValueError( + f"pos must be a 2D array of shape (N, 10), got shape {pos.shape}" + ) + with open(fname, "wb") as fid: + fid.write( + " Time q1 q2 q3 q4 q5 " + "q6 g-value error velocity\n".encode("ASCII") + ) for p in pos: - fmts = ['% 9.3f'] + ['% 8.5f'] * 9 - fid.write(((' ' + ' '.join(fmts) + '\n') - % tuple(p)).encode('ASCII')) + fmts = ["% 9.3f"] + ["% 8.5f"] * 9 + fid.write(((" " + " ".join(fmts) + "\n") % tuple(p)).encode("ASCII")) def head_pos_to_trans_rot_t(quats): @@ -127,16 +163,17 @@ def head_pos_to_trans_rot_t(quats): Parameters ---------- - quats : ndarray, shape (N, 10) + quats : ndarray, shape (n_pos, 10) MaxFilter-formatted position and quaternion parameters. + See :func:`mne.chpi.read_head_pos` for details on the columns. Returns ------- - translation : ndarray, shape (N, 3) + translation : ndarray, shape (n_pos, 3) Translations at each time point. - rotation : ndarray, shape (N, 3, 3) + rotation : ndarray, shape (n_pos, 3, 3) Rotations at each time point. - t : ndarray, shape (N,) + t : ndarray, shape (n_pos,) The time points. See Also @@ -179,15 +216,14 @@ def extract_chpi_locs_ctf(raw, verbose=None): .. versionadded:: 0.20 """ # Pick channels corresponding to the cHPI positions - hpi_picks = pick_channels_regexp(raw.info['ch_names'], 'HLC00[123][123].*') + hpi_picks = pick_channels_regexp(raw.info["ch_names"], "HLC00[123][123].*") # make sure we get 9 channels if len(hpi_picks) != 9: - raise RuntimeError('Could not find all 9 cHPI channels') + raise RuntimeError("Could not find all 9 cHPI channels") # get indices in alphabetical order - sorted_picks = np.array(sorted(hpi_picks, - key=lambda k: raw.info['ch_names'][k])) + sorted_picks = np.array(sorted(hpi_picks, key=lambda k: raw.info["ch_names"][k])) # make picks to match order of dig cardinial ident codes. # LPA (HPIC002[123]-*), NAS(HPIC001[123]-*), RPA(HPIC003[123]-*) @@ -200,12 +236,12 @@ def extract_chpi_locs_ctf(raw, verbose=None): # transforms tmp_trans = _make_ctf_coord_trans_set(None, None) - ctf_dev_dev_t = tmp_trans['t_ctf_dev_dev'] + ctf_dev_dev_t = tmp_trans["t_ctf_dev_dev"] del tmp_trans # find indices where chpi locations change indices = [0] - indices.extend(np.where(np.all(np.diff(chpi_data, axis=1), axis=0))[0] + 1) + indices.extend(np.where(np.any(np.diff(chpi_data, axis=1), axis=0))[0] + 1) # data in channels are in ctf device coordinates (cm) rrs = chpi_data[:, indices].T.reshape(len(indices), 3, 3) # m # map to mne device coords @@ -217,7 +253,7 @@ def extract_chpi_locs_ctf(raw, verbose=None): @verbose -def extract_chpi_locs_kit(raw, stim_channel='MISC 064', *, verbose=None): +def extract_chpi_locs_kit(raw, stim_channel="MISC 064", *, verbose=None): """Extract cHPI locations from KIT data. Parameters @@ -236,34 +272,35 @@ def extract_chpi_locs_kit(raw, stim_channel='MISC 064', *, verbose=None): ----- .. versionadded:: 0.23 """ - _validate_type(raw, (_RawKIT,), 'raw') + _validate_type(raw, (_RawKIT,), "raw") stim_chs = [ - raw.info['ch_names'][pick] for pick in pick_types( - raw.info, stim=True, misc=True, ref_meg=False)] - _validate_type(stim_channel, str, 'stim_channel') - _check_option('stim_channel', stim_channel, stim_chs) + raw.info["ch_names"][pick] + for pick in pick_types(raw.info, stim=True, misc=True, ref_meg=False) + ] + _validate_type(stim_channel, str, "stim_channel") + _check_option("stim_channel", stim_channel, stim_chs) idx = raw.ch_names.index(stim_channel) safe_false = _verbose_safe_false() events_on = find_events( - raw, stim_channel=raw.ch_names[idx], output='onset', - verbose=safe_false)[:, 0] + raw, stim_channel=raw.ch_names[idx], output="onset", verbose=safe_false + )[:, 0] events_off = find_events( - raw, stim_channel=raw.ch_names[idx], output='offset', - verbose=safe_false)[:, 0] + raw, stim_channel=raw.ch_names[idx], output="offset", verbose=safe_false + )[:, 0] bad = False if len(events_on) == 0 or len(events_off) == 0: bad = True else: if events_on[-1] > events_off[-1]: events_on = events_on[:-1] - if events_on.size != events_off.size or not \ - (events_on < events_off).all(): + if events_on.size != events_off.size or not (events_on < events_off).all(): bad = True if bad: raise RuntimeError( - f'Could not find appropriate cHPI intervals from {stim_channel}') + f"Could not find appropriate cHPI intervals from {stim_channel}" + ) # use the midpoint for times - times = (events_on + events_off) / (2 * raw.info['sfreq']) + times = (events_on + events_off) / (2 * raw.info["sfreq"]) del events_on, events_off # XXX remove first two rows. It is unknown currently if there is a way to # determine from the con file the number of initial pulses that @@ -272,24 +309,25 @@ def extract_chpi_locs_kit(raw, stim_channel='MISC 064', *, verbose=None): # may just always be 2... times = times[2:] n_coils = 5 # KIT always has 5 (hard-coded in reader) - header = raw._raw_extras[0]['dirs'][KIT.DIR_INDEX_CHPI_DATA] - dtype = np.dtype([('good', ' 0 else None # grab codes indicating a coil is active - hpi_on = [coil['event_bits'][0] for coil in hpi_sub['hpi_coils']] + hpi_on = [coil["event_bits"][0] for coil in hpi_sub["hpi_coils"]] # not all HPI coils will actually be used - hpi_on = np.array([hpi_on[hc['number'] - 1] for hc in hpi_coils]) + hpi_on = np.array([hpi_on[hc["number"] - 1] for hc in hpi_coils]) # mask for coils that may be active hpi_mask = np.array([event_bit != 0 for event_bit in hpi_on]) hpi_on = hpi_on[hpi_mask] @@ -367,63 +416,74 @@ def get_chpi_info(info, on_missing='raise', verbose=None): @verbose def _get_hpi_initial_fit(info, adjust=False, verbose=None): """Get HPI fit locations from raw.""" - if info['hpi_results'] is None or len(info['hpi_results']) == 0: - raise RuntimeError('no initial cHPI head localization performed') - - hpi_result = info['hpi_results'][-1] - hpi_dig = sorted([d for d in info['dig'] - if d['kind'] == FIFF.FIFFV_POINT_HPI], - key=lambda x: x['ident']) # ascending (dig) order + if info["hpi_results"] is None or len(info["hpi_results"]) == 0: + raise RuntimeError("no initial cHPI head localization performed") + + hpi_result = info["hpi_results"][-1] + hpi_dig = sorted( + [d for d in info["dig"] if d["kind"] == FIFF.FIFFV_POINT_HPI], + key=lambda x: x["ident"], + ) # ascending (dig) order if len(hpi_dig) == 0: # CTF data, probably - hpi_dig = sorted(hpi_result['dig_points'], key=lambda x: x['ident']) - if all(d['coord_frame'] in (FIFF.FIFFV_COORD_DEVICE, - FIFF.FIFFV_COORD_UNKNOWN) - for d in hpi_dig): + msg = "HPIFIT: No HPI dig points, using hpifit result" + hpi_dig = sorted(hpi_result["dig_points"], key=lambda x: x["ident"]) + if all( + d["coord_frame"] in (FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_UNKNOWN) + for d in hpi_dig + ): + # Do not modify in place! + hpi_dig = copy.deepcopy(hpi_dig) + msg += " transformed to head coords" for dig in hpi_dig: - dig.update(r=apply_trans(info['dev_head_t'], dig['r']), - coord_frame=FIFF.FIFFV_COORD_HEAD) + dig.update( + r=apply_trans(info["dev_head_t"], dig["r"]), + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + logger.debug(msg) # zero-based indexing, dig->info # CTF does not populate some entries so we use .get here - pos_order = hpi_result.get('order', np.arange(1, len(hpi_dig) + 1)) - 1 - used = hpi_result.get('used', np.arange(len(hpi_dig))) - dist_limit = hpi_result.get('dist_limit', 0.005) - good_limit = hpi_result.get('good_limit', 0.98) - goodness = hpi_result.get('goodness', np.ones(len(hpi_dig))) + pos_order = hpi_result.get("order", np.arange(1, len(hpi_dig) + 1)) - 1 + used = hpi_result.get("used", np.arange(len(hpi_dig))) + dist_limit = hpi_result.get("dist_limit", 0.005) + good_limit = hpi_result.get("good_limit", 0.98) + goodness = hpi_result.get("goodness", np.ones(len(hpi_dig))) # this shouldn't happen, eventually we could add the transforms # necessary to put it in head coords - if not all(d['coord_frame'] == FIFF.FIFFV_COORD_HEAD for d in hpi_dig): - raise RuntimeError('cHPI coordinate frame incorrect') + if not all(d["coord_frame"] == FIFF.FIFFV_COORD_HEAD for d in hpi_dig): + raise RuntimeError("cHPI coordinate frame incorrect") # Give the user some info - logger.info('HPIFIT: %s coils digitized in order %s' - % (len(pos_order), ' '.join(str(o + 1) for o in pos_order))) - logger.debug('HPIFIT: %s coils accepted: %s' - % (len(used), ' '.join(str(h) for h in used))) - hpi_rrs = np.array([d['r'] for d in hpi_dig])[pos_order] + logger.info( + f"HPIFIT: {len(pos_order)} coils digitized in order " + f"{' '.join(str(o + 1) for o in pos_order)}" + ) + logger.debug( + f"HPIFIT: {len(used)} coils accepted: {' '.join(str(h) for h in used)}" + ) + hpi_rrs = np.array([d["r"] for d in hpi_dig])[pos_order] assert len(hpi_rrs) >= 3 # Fitting errors - hpi_rrs_fit = sorted([d for d in info['hpi_results'][-1]['dig_points']], - key=lambda x: x['ident']) - hpi_rrs_fit = np.array([d['r'] for d in hpi_rrs_fit]) + hpi_rrs_fit = sorted( + [d for d in info["hpi_results"][-1]["dig_points"]], key=lambda x: x["ident"] + ) + hpi_rrs_fit = np.array([d["r"] for d in hpi_rrs_fit]) # hpi_result['dig_points'] are in FIFFV_COORD_UNKNOWN coords, but this # is probably a misnomer because it should be FIFFV_COORD_DEVICE for this # to work - assert hpi_result['coord_trans']['to'] == FIFF.FIFFV_COORD_HEAD - hpi_rrs_fit = apply_trans(hpi_result['coord_trans']['trans'], hpi_rrs_fit) - if 'moments' in hpi_result: - logger.debug('Hpi coil moments (%d %d):' - % hpi_result['moments'].shape[::-1]) - for moment in hpi_result['moments']: - logger.debug("%g %g %g" % tuple(moment)) + assert hpi_result["coord_trans"]["to"] == FIFF.FIFFV_COORD_HEAD + hpi_rrs_fit = apply_trans(hpi_result["coord_trans"]["trans"], hpi_rrs_fit) + if "moments" in hpi_result: + logger.debug(f"Hpi coil moments {hpi_result['moments'].shape[::-1]}:") + for moment in hpi_result["moments"]: + logger.debug(f"{moment[0]:g} {moment[1]:g} {moment[2]:g}") errors = np.linalg.norm(hpi_rrs - hpi_rrs_fit, axis=1) - logger.debug('HPIFIT errors: %s mm.' - % ', '.join('%0.1f' % (1000. * e) for e in errors)) + logger.debug(f"HPIFIT errors: {', '.join(f'{1000 * e:0.1f}' for e in errors)} mm.") if errors.sum() < len(errors) * dist_limit: - logger.info('HPI consistency of isotrak and hpifit is OK.') + logger.info("HPI consistency of isotrak and hpifit is OK.") elif not adjust and (len(used) == len(hpi_dig)): - warn('HPI consistency of isotrak and hpifit is poor.') + warn("HPI consistency of isotrak and hpifit is poor.") else: # adjust HPI coil locations using the hpifit transformation for hi, (err, r_fit) in enumerate(zip(errors, hpi_rrs_fit)): @@ -431,24 +491,31 @@ def _get_hpi_initial_fit(info, adjust=False, verbose=None): d = 1000 * err if not adjust: if err >= dist_limit: - warn('Discrepancy of HPI coil %d isotrak and hpifit is ' - '%.1f mm!' % (hi + 1, d)) + warn( + f"Discrepancy of HPI coil {hi + 1} isotrak and hpifit is " + f"{d:.1f} mm!" + ) elif hi + 1 not in used: if goodness[hi] >= good_limit: - logger.info('Note: HPI coil %d isotrak is adjusted by ' - '%.1f mm!' % (hi + 1, d)) + logger.info( + f"Note: HPI coil {hi + 1} isotrak is adjusted by {d:.1f} mm!" + ) hpi_rrs[hi] = r_fit else: - warn('Discrepancy of HPI coil %d isotrak and hpifit of ' - '%.1f mm was not adjusted!' % (hi + 1, d)) - logger.debug('HP fitting limits: err = %.1f mm, gval = %.3f.' - % (1000 * dist_limit, good_limit)) + warn( + f"Discrepancy of HPI coil {hi + 1} isotrak and hpifit of " + f"{d:.1f} mm was not adjusted!" + ) + logger.debug( + f"HP fitting limits: err = {1000 * dist_limit:.1f} mm, gval = {good_limit:.3f}." + ) return hpi_rrs.astype(float) -def _magnetic_dipole_objective(x, B, B2, coils, whitener, too_close, - return_moment=False): +def _magnetic_dipole_objective( + x, B, B2, coils, whitener, too_close, return_moment=False +): """Project data onto right eigenvectors of whitened forward.""" fwd = _magnetic_dipole_field_vec(x[np.newaxis], coils, too_close) out, u, s, one = _magnetic_dipole_delta(fwd, whitener, B, B2) @@ -478,23 +545,26 @@ def _magnetic_dipole_delta_multi(whitened_fwd_svd, B, B2): def _fit_magnetic_dipole(B_orig, x0, too_close, whitener, coils, guesses): """Fit a single bit of data (x0 = pos).""" - from scipy.optimize import fmin_cobyla B = np.dot(whitener, B_orig) B2 = np.dot(B, B) - objective = partial(_magnetic_dipole_objective, B=B, B2=B2, - coils=coils, whitener=whitener, - too_close=too_close) + objective = partial( + _magnetic_dipole_objective, + B=B, + B2=B2, + coils=coils, + whitener=whitener, + too_close=too_close, + ) if guesses is not None: res0 = objective(x0) - res = _magnetic_dipole_delta_multi( - guesses['whitened_fwd_svd'], B, B2) - assert res.shape == (guesses['rr'].shape[0],) + res = _magnetic_dipole_delta_multi(guesses["whitened_fwd_svd"], B, B2) + assert res.shape == (guesses["rr"].shape[0],) idx = np.argmin(res) if res[idx] < res0: - x0 = guesses['rr'][idx] + x0 = guesses["rr"][idx] x = fmin_cobyla(objective, x0, (), rhobeg=1e-3, rhoend=1e-5, disp=False) gof, moment = objective(x, return_moment=True) - gof = 1. - gof / B2 + gof = 1.0 - gof / B2 return x, gof, moment @@ -516,7 +586,7 @@ def _fit_chpi_quat(coil_dev_rrs, coil_head_rrs): # XXX someday we could choose to weight these points by their goodness # of fit somehow. quat = _fit_matched_points(coil_dev_rrs, coil_head_rrs)[0] - gof = 1. - _chpi_objective(quat, coil_dev_rrs, coil_head_rrs) / denom + gof = 1.0 - _chpi_objective(quat, coil_dev_rrs, coil_head_rrs) / denom return quat, gof @@ -535,7 +605,7 @@ def _fit_coil_order_dev_head_trans(dev_pnts, head_pnts, bias=True): # equivalent g values. To avoid this, heavily penalize # large rotations. rotation = _angle_between_quats(this_quat[:3], np.zeros(3)) - check_g = g * max(1. - rotation / np.pi, 0) ** 0.25 + check_g = g * max(1.0 - rotation / np.pi, 0) ** 0.25 else: check_g = g if check_g > best_g: @@ -550,61 +620,87 @@ def _fit_coil_order_dev_head_trans(dev_pnts, head_pnts, bias=True): @verbose -def _setup_hpi_amplitude_fitting(info, t_window, remove_aliased=False, - ext_order=1, allow_empty=False, verbose=None): +def _setup_hpi_amplitude_fitting( + info, t_window, remove_aliased=False, ext_order=1, allow_empty=False, verbose=None +): """Generate HPI structure for HPI localization.""" # grab basic info. - on_missing = 'raise' if not allow_empty else 'ignore' + on_missing = "raise" if not allow_empty else "ignore" hpi_freqs, hpi_pick, hpi_ons = get_chpi_info(info, on_missing=on_missing) - _validate_type(t_window, (str, 'numeric'), 't_window') - if info['line_freq'] is not None: - line_freqs = np.arange(info['line_freq'], info['sfreq'] / 3., - info['line_freq']) + # check for maxwell filtering + for ent in info["proc_history"]: + for key in ("sss_info", "max_st"): + if len(ent["max_info"]["sss_info"]) > 0: + warn( + "Fitting cHPI amplitudes after Maxwell filtering may not work, " + "consider fitting on the original data." + ) + break + + _validate_type(t_window, (str, "numeric"), "t_window") + if info["line_freq"] is not None: + line_freqs = np.arange( + info["line_freq"], info["sfreq"] / 3.0, info["line_freq"] + ) else: line_freqs = np.zeros([0]) - logger.info('Line interference frequencies: %s Hz' - % ' '.join(['%d' % lf for lf in line_freqs])) + lfs = " ".join(f"{lf}" for lf in line_freqs) + logger.info(f"Line interference frequencies: {lfs} Hz") # worry about resampled/filtered data. # What to do e.g. if Raw has been resampled and some of our # HPI freqs would now be aliased - highest = info.get('lowpass') - highest = info['sfreq'] / 2. if highest is None else highest + highest = info.get("lowpass") + highest = info["sfreq"] / 2.0 if highest is None else highest keepers = hpi_freqs <= highest if remove_aliased: hpi_freqs = hpi_freqs[keepers] hpi_ons = hpi_ons[keepers] elif not keepers.all(): - raise RuntimeError('Found HPI frequencies %s above the lowpass ' - '(or Nyquist) frequency %0.1f' - % (hpi_freqs[~keepers].tolist(), highest)) + raise RuntimeError( + f"Found HPI frequencies {hpi_freqs[~keepers].tolist()} above the lowpass (" + f"or Nyquist) frequency {highest:0.1f}" + ) # calculate optimal window length. if isinstance(t_window, str): - _check_option('t_window', t_window, ('auto',), extra='if a string') + _check_option("t_window", t_window, ("auto",), extra="if a string") if len(hpi_freqs): all_freqs = np.concatenate((hpi_freqs, line_freqs)) delta_freqs = np.diff(np.unique(all_freqs)) - t_window = max(5. / all_freqs.min(), 1. / delta_freqs.min()) + t_window = max(5.0 / all_freqs.min(), 1.0 / delta_freqs.min()) else: t_window = 0.2 t_window = float(t_window) if t_window <= 0: - raise ValueError('t_window (%s) must be > 0' % (t_window,)) - logger.info('Using time window: %0.1f ms' % (1000 * t_window,)) - window_nsamp = np.rint(t_window * info['sfreq']).astype(int) - model = _setup_hpi_glm(hpi_freqs, line_freqs, info['sfreq'], window_nsamp) + raise ValueError(f"t_window ({t_window}) must be > 0") + logger.info(f"Using time window: {1000 * t_window:0.1f} ms") + window_nsamp = np.rint(t_window * info["sfreq"]).astype(int) + model = _setup_hpi_glm(hpi_freqs, line_freqs, info["sfreq"], window_nsamp) inv_model = np.linalg.pinv(model) inv_model_reord = _reorder_inv_model(inv_model, len(hpi_freqs)) proj, proj_op, meg_picks = _setup_ext_proj(info, ext_order) # include mag and grad picks separately, for SNR computations - mag_picks = _picks_to_idx(info, 'mag', allow_empty=True) - grad_picks = _picks_to_idx(info, 'grad', allow_empty=True) + mag_subpicks = _picks_to_idx(info, "mag", allow_empty=True) + mag_subpicks = np.searchsorted(meg_picks, mag_subpicks) + grad_subpicks = _picks_to_idx(info, "grad", allow_empty=True) + grad_subpicks = np.searchsorted(meg_picks, grad_subpicks) # Set up magnetic dipole fits hpi = dict( - meg_picks=meg_picks, mag_picks=mag_picks, grad_picks=grad_picks, - hpi_pick=hpi_pick, model=model, inv_model=inv_model, t_window=t_window, - inv_model_reord=inv_model_reord, on=hpi_ons, n_window=window_nsamp, - proj=proj, proj_op=proj_op, freqs=hpi_freqs, line_freqs=line_freqs) + meg_picks=meg_picks, + mag_subpicks=mag_subpicks, + grad_subpicks=grad_subpicks, + hpi_pick=hpi_pick, + model=model, + inv_model=inv_model, + t_window=t_window, + inv_model_reord=inv_model_reord, + on=hpi_ons, + n_window=window_nsamp, + proj=proj, + proj_op=proj_op, + freqs=hpi_freqs, + line_freqs=line_freqs, + ) return hpi @@ -614,9 +710,14 @@ def _setup_hpi_glm(hpi_freqs, line_freqs, sfreq, window_nsamp): radians_per_sec = 2 * np.pi * np.arange(window_nsamp, dtype=float) / sfreq f_t = hpi_freqs[np.newaxis, :] * radians_per_sec[:, np.newaxis] l_t = line_freqs[np.newaxis, :] * radians_per_sec[:, np.newaxis] - model = [np.sin(f_t), np.cos(f_t), # hpi freqs - np.sin(l_t), np.cos(l_t), # line freqs - slope, np.ones_like(slope)] # drift, DC + model = [ + np.sin(f_t), + np.cos(f_t), # hpi freqs + np.sin(l_t), + np.cos(l_t), # line freqs + slope, + np.ones_like(slope), + ] # drift, DC return np.hstack(model) @@ -628,35 +729,39 @@ def _reorder_inv_model(inv_model, n_freqs): def _setup_ext_proj(info, ext_order): - from scipy import linalg - meg_picks = pick_types(info, meg=True, eeg=False, exclude='bads') + meg_picks = pick_types(info, meg=True, eeg=False, exclude="bads") info = pick_info(_simplify_info(info), meg_picks) # makes a copy _, _, _, _, mag_or_fine = _get_mf_picks_fix_mags( - info, int_order=0, ext_order=ext_order, ignore_ref=True, - verbose='error') - mf_coils = _prep_mf_coils(info, verbose='error') + info, int_order=0, ext_order=ext_order, ignore_ref=True, verbose="error" + ) + mf_coils = _prep_mf_coils(info, verbose="error") ext = _sss_basis( - dict(origin=(0., 0., 0.), int_order=0, ext_order=ext_order), - mf_coils).T + dict(origin=(0.0, 0.0, 0.0), int_order=0, ext_order=ext_order), mf_coils + ).T out_removes = _regularize_out(0, 1, mag_or_fine, []) - ext = ext[~np.in1d(np.arange(len(ext)), out_removes)] - ext = linalg.orth(ext.T).T + ext = ext[~np.isin(np.arange(len(ext)), out_removes)] + ext = orth(ext.T).T assert ext.shape[1] == len(meg_picks) proj = Projection( - kind=FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD, desc='SSS', active=False, - data=dict(data=ext, ncol=info['nchan'], col_names=info['ch_names'], - nrow=len(ext))) + kind=FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD, + desc="SSS", + active=False, + data=dict( + data=ext, ncol=info["nchan"], col_names=info["ch_names"], nrow=len(ext) + ), + ) with info._unlock(): - info['projs'] = [proj] + info["projs"] = [proj] proj_op, _ = setup_proj( - info, add_eeg_ref=False, activate=False, verbose=_verbose_safe_false()) + info, add_eeg_ref=False, activate=False, verbose=_verbose_safe_false() + ) assert proj_op.shape == (len(meg_picks),) * 2 return proj, proj_op, meg_picks def _time_prefix(fit_time): """Format log messages.""" - return (' t=%0.3f:' % fit_time).ljust(17) + return (f" t={fit_time:0.3f}:").ljust(17) def _fit_chpi_amplitudes(raw, time_sl, hpi, snr=False): @@ -673,32 +778,43 @@ def _fit_chpi_amplitudes(raw, time_sl, hpi, snr=False): # No need to detrend the data because our model has a DC term with use_log_level(False): # loads good channels - this_data = raw[hpi['meg_picks'], time_sl][0] + this_data = raw[hpi["meg_picks"], time_sl][0] # which HPI coils to use - if hpi['hpi_pick'] is not None: + if hpi["hpi_pick"] is not None: with use_log_level(False): # loads hpi_stim channel - chpi_data = raw[hpi['hpi_pick'], time_sl][0] + chpi_data = raw[hpi["hpi_pick"], time_sl][0] - ons = (np.round(chpi_data).astype(np.int64) & - hpi['on'][:, np.newaxis]).astype(bool) + ons = (np.round(chpi_data).astype(np.int64) & hpi["on"][:, np.newaxis]).astype( + bool + ) n_on = ons.all(axis=-1).sum(axis=0) if not (n_on >= 3).all(): return None if snr: return _fast_fit_snr( - this_data, len(hpi['freqs']), hpi['model'], hpi['inv_model'], - hpi['mag_picks'], hpi['grad_picks']) - return _fast_fit(this_data, hpi['proj_op'], len(hpi['freqs']), - hpi['model'], hpi['inv_model_reord']) + this_data, + len(hpi["freqs"]), + hpi["model"], + hpi["inv_model"], + hpi["mag_subpicks"], + hpi["grad_subpicks"], + ) + return _fast_fit( + this_data, + hpi["proj_op"], + len(hpi["freqs"]), + hpi["model"], + hpi["inv_model_reord"], + ) @jit() def _fast_fit(this_data, proj, n_freqs, model, inv_model_reord): # first or last window if this_data.shape[1] != model.shape[0]: - model = model[:this_data.shape[1]] + model = model[: this_data.shape[1]] inv_model_reord = _reorder_inv_model(np.linalg.pinv(model), n_freqs) proj_data = proj @ this_data X = inv_model_reord @ proj_data.T @@ -706,7 +822,7 @@ def _fast_fit(this_data, proj, n_freqs, model, inv_model_reord): sin_fit = np.zeros((n_freqs, X.shape[1])) for fi in range(n_freqs): # use SVD across all sensors to estimate the sinusoid phase - u, s, vt = np.linalg.svd(X[2 * fi:2 * fi + 2], full_matrices=False) + u, s, vt = np.linalg.svd(X[2 * fi : 2 * fi + 2], full_matrices=False) # the first component holds the predominant phase direction # (so ignore the second, effectively doing s[1] = 0): sin_fit[fi] = vt[0] * s[0] @@ -717,11 +833,11 @@ def _fast_fit(this_data, proj, n_freqs, model, inv_model_reord): def _fast_fit_snr(this_data, n_freqs, model, inv_model, mag_picks, grad_picks): # first or last window if this_data.shape[1] != model.shape[0]: - model = model[:this_data.shape[1]] + model = model[: this_data.shape[1]] inv_model = np.linalg.pinv(model) coefs = np.ascontiguousarray(inv_model) @ np.ascontiguousarray(this_data.T) # average sin & cos terms (special property of sinusoids: power=A²/2) - hpi_power = (coefs[:n_freqs] ** 2 + coefs[n_freqs:(2 * n_freqs)] ** 2) / 2 + hpi_power = (coefs[:n_freqs] ** 2 + coefs[n_freqs : (2 * n_freqs)] ** 2) / 2 resid = this_data - np.ascontiguousarray((model @ coefs).T) # can't use np.var(..., axis=1) with Numba, so do it manually: resid_mean = np.atleast_2d(resid.sum(axis=1) / resid.shape[1]).T @@ -742,59 +858,66 @@ def _fast_fit_snr(this_data, n_freqs, model, inv_model, mag_picks, grad_picks): def _check_chpi_param(chpi_, name): - if name == 'chpi_locs': + if name == "chpi_locs": want_ndims = dict(times=1, rrs=3, moments=3, gofs=2) extra_keys = list() else: - assert name == 'chpi_amplitudes' + assert name == "chpi_amplitudes" want_ndims = dict(times=1, slopes=3) - extra_keys = ['proj'] + extra_keys = ["proj"] _validate_type(chpi_, dict, name) want_keys = list(want_ndims.keys()) + extra_keys if set(want_keys).symmetric_difference(chpi_): - raise ValueError('%s must be a dict with entries %s, got %s' - % (name, want_keys, sorted(chpi_.keys()))) + raise ValueError( + f"{name} must be a dict with entries {want_keys}, got " + f"{sorted(chpi_.keys())}" + ) n_times = None for key, want_ndim in want_ndims.items(): - key_str = '%s[%s]' % (name, key) + key_str = f"{name}[{key}]" val = chpi_[key] _validate_type(val, np.ndarray, key_str) shape = val.shape if val.ndim != want_ndim: - raise ValueError('%s must have ndim=%d, got %d' - % (key_str, want_ndim, val.ndim)) - if n_times is None and key != 'proj': + raise ValueError(f"{key_str} must have ndim={want_ndim}, got {val.ndim}") + if n_times is None and key != "proj": n_times = shape[0] - if n_times != shape[0] and key != 'proj': - raise ValueError('%s have inconsistent number of time ' - 'points in %s' % (name, want_keys)) - if name == 'chpi_locs': - n_coils = chpi_['rrs'].shape[1] - for key in ('gofs', 'moments'): + if n_times != shape[0] and key != "proj": + raise ValueError( + f"{name} have inconsistent number of time points in {want_keys}" + ) + if name == "chpi_locs": + n_coils = chpi_["rrs"].shape[1] + for key in ("gofs", "moments"): val = chpi_[key] if val.shape[1] != n_coils: - raise ValueError('chpi_locs["rrs"] had values for %d coils but' - ' chpi_locs["%s"] had values for %d coils' - % (n_coils, key, val.shape[1])) - for key in ('rrs', 'moments'): + raise ValueError( + f'chpi_locs["rrs"] had values for {n_coils} coils but ' + f'chpi_locs["{key}"] had values for {val.shape[1]} coils' + ) + for key in ("rrs", "moments"): val = chpi_[key] if val.shape[2] != 3: - raise ValueError('chpi_locs["%s"].shape[2] must be 3, got ' - 'shape %s' % (key, shape)) + raise ValueError( + f'chpi_locs["{key}"].shape[2] must be 3, got shape {shape}' + ) else: - assert name == 'chpi_amplitudes' - slopes, proj = chpi_['slopes'], chpi_['proj'] + assert name == "chpi_amplitudes" + slopes, proj = chpi_["slopes"], chpi_["proj"] _validate_type(proj, Projection, 'chpi_amplitudes["proj"]') - n_ch = len(proj['data']['col_names']) + n_ch = len(proj["data"]["col_names"]) if slopes.shape[0] != n_times or slopes.shape[2] != n_ch: - raise ValueError('slopes must have shape[0]==%d and shape[2]==%d,' - ' got shape %s' % (n_times, n_ch, slopes.shape)) + raise ValueError( + f"slopes must have shape[0]=={n_times} and shape[2]=={n_ch}, got shape " + f"{slopes.shape}" + ) @verbose -def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, - adjust_dig=False, verbose=None): +def compute_head_pos( + info, chpi_locs, dist_limit=0.005, gof_limit=0.98, adjust_dig=False, verbose=None +): """Compute time-varying head positions. Parameters @@ -813,7 +936,8 @@ def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, Returns ------- quats : ndarray, shape (n_pos, 10) - The ``[t, q1, q2, q3, x, y, z, gof, err, v]`` for each fit. + MaxFilter-formatted head position parameters. The columns correspond to + ``[t, q1, q2, q3, x, y, z, gof, err, v]`` for each time point. See Also -------- @@ -826,33 +950,35 @@ def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, ----- .. versionadded:: 0.20 """ - _check_chpi_param(chpi_locs, 'chpi_locs') - _validate_type(info, Info, 'info') - hpi_dig_head_rrs = _get_hpi_initial_fit(info, adjust=adjust_dig, - verbose='error') + _check_chpi_param(chpi_locs, "chpi_locs") + _validate_type(info, Info, "info") + hpi_dig_head_rrs = _get_hpi_initial_fit(info, adjust=adjust_dig, verbose="error") n_coils = len(hpi_dig_head_rrs) - coil_dev_rrs = apply_trans(invert_transform(info['dev_head_t']), - hpi_dig_head_rrs) - dev_head_t = info['dev_head_t']['trans'] + coil_dev_rrs = apply_trans(invert_transform(info["dev_head_t"]), hpi_dig_head_rrs) + dev_head_t = info["dev_head_t"]["trans"] pos_0 = dev_head_t[:3, 3] - last = dict(quat_fit_time=-0.1, coil_dev_rrs=coil_dev_rrs, - quat=np.concatenate([rot_to_quat(dev_head_t[:3, :3]), - dev_head_t[:3, 3]])) + last = dict( + quat_fit_time=-0.1, + coil_dev_rrs=coil_dev_rrs, + quat=np.concatenate([rot_to_quat(dev_head_t[:3, :3]), dev_head_t[:3, 3]]), + ) del coil_dev_rrs quats = [] for fit_time, this_coil_dev_rrs, g_coils in zip( - *(chpi_locs[key] for key in ('times', 'rrs', 'gofs'))): + *(chpi_locs[key] for key in ("times", "rrs", "gofs")) + ): use_idx = np.where(g_coils >= gof_limit)[0] # # 1. Check number of good ones # if len(use_idx) < 3: - msg = (_time_prefix(fit_time) + '%s/%s good HPI fits, cannot ' - 'determine the transformation (%s GOF)!' - % (len(use_idx), n_coils, - ', '.join('%0.2f' % g for g in g_coils))) - warn(msg) + gofs = ", ".join(f"{g:0.2f}" for g in g_coils) + warn( + f"{_time_prefix(fit_time)}{len(use_idx)}/{n_coils} " + "good HPI fits, cannot determine the transformation " + f"({gofs} GOF)!" + ) continue # @@ -861,7 +987,8 @@ def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, # positions) iteratively using different sets of coils. # this_quat, g, use_idx = _fit_chpi_quat_subset( - this_coil_dev_rrs, hpi_dig_head_rrs, use_idx) + this_coil_dev_rrs, hpi_dig_head_rrs, use_idx + ) # # 3. Stop if < 3 good @@ -873,64 +1000,80 @@ def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, errs = np.linalg.norm(hpi_dig_head_rrs - est_coil_head_rrs, axis=1) n_good = ((g_coils >= gof_limit) & (errs < dist_limit)).sum() if n_good < 3: - warn(_time_prefix(fit_time) + '%s/%s good HPI fits, cannot ' - 'determine the transformation (%s mm/GOF)!' - % (n_good, n_coils, - ', '.join(f'{1000 * e:0.1f}::{g:0.2f}' - for e, g in zip(errs, g_coils)))) + warn_str = ", ".join( + f"{1000 * e:0.1f}::{g:0.2f}" for e, g in zip(errs, g_coils) + ) + warn( + f"{_time_prefix(fit_time)}{n_good}/{n_coils} good HPI fits, cannot " + f"determine the transformation ({warn_str} mm/GOF)!" + ) continue # velocities, in device coords, of HPI coils - dt = fit_time - last['quat_fit_time'] - vs = tuple(1000. * np.linalg.norm(last['coil_dev_rrs'] - - this_coil_dev_rrs, axis=1) / dt) - logger.info(_time_prefix(fit_time) + - ('%s/%s good HPI fits, movements [mm/s] = ' + - ' / '.join(['% 8.1f'] * n_coils)) - % ((n_good, n_coils) + vs)) + dt = fit_time - last["quat_fit_time"] + vs = tuple( + 1000.0 + * np.linalg.norm(last["coil_dev_rrs"] - this_coil_dev_rrs, axis=1) + / dt + ) + logger.info( + _time_prefix(fit_time) + + ( + "%s/%s good HPI fits, movements [mm/s] = " + + " / ".join(["% 8.1f"] * n_coils) + ) + % ((n_good, n_coils) + vs) + ) # Log results # MaxFilter averages over a 200 ms window for display, but we don't for ii in range(n_coils): if ii in use_idx: - start, end = ' ', '/' + start, end = " ", "/" else: - start, end = '(', ')' - log_str = (' ' + start + - '{0:6.1f} {1:6.1f} {2:6.1f} / ' + - '{3:6.1f} {4:6.1f} {5:6.1f} / ' + - 'g = {6:0.3f} err = {7:4.1f} ' + - end) - vals = np.concatenate((1000 * hpi_dig_head_rrs[ii], - 1000 * est_coil_head_rrs[ii], - [g_coils[ii], 1000 * errs[ii]])) + start, end = "(", ")" + log_str = ( + " " + + start + + "{0:6.1f} {1:6.1f} {2:6.1f} / " + + "{3:6.1f} {4:6.1f} {5:6.1f} / " + + "g = {6:0.3f} err = {7:4.1f} " + + end + ) + vals = np.concatenate( + ( + 1000 * hpi_dig_head_rrs[ii], + 1000 * est_coil_head_rrs[ii], + [g_coils[ii], 1000 * errs[ii]], + ) + ) if len(use_idx) >= 3: if ii <= 2: - log_str += '{8:6.3f} {9:6.3f} {10:6.3f}' - vals = np.concatenate( - (vals, this_dev_head_t[ii, :3])) + log_str += "{8:6.3f} {9:6.3f} {10:6.3f}" + vals = np.concatenate((vals, this_dev_head_t[ii, :3])) elif ii == 3: - log_str += '{8:6.1f} {9:6.1f} {10:6.1f}' - vals = np.concatenate( - (vals, this_dev_head_t[:3, 3] * 1000.)) + log_str += "{8:6.1f} {9:6.1f} {10:6.1f}" + vals = np.concatenate((vals, this_dev_head_t[:3, 3] * 1000.0)) logger.debug(log_str.format(*vals)) # resulting errors in head coil positions - d = np.linalg.norm(last['quat'][3:] - this_quat[3:]) # m - r = _angle_between_quats(last['quat'][:3], this_quat[:3]) / dt + d = np.linalg.norm(last["quat"][3:] - this_quat[3:]) # m + r = _angle_between_quats(last["quat"][:3], this_quat[:3]) / dt v = d / dt # m/s d = 100 * np.linalg.norm(this_quat[3:] - pos_0) # dis from 1st - logger.debug(' #t = %0.3f, #e = %0.2f cm, #g = %0.3f, ' - '#v = %0.2f cm/s, #r = %0.2f rad/s, #d = %0.2f cm' - % (fit_time, 100 * errs.mean(), g, 100 * v, r, d)) - logger.debug(' #t = %0.3f, #q = %s ' - % (fit_time, ' '.join(map('{:8.5f}'.format, this_quat)))) - - quats.append(np.concatenate(([fit_time], this_quat, [g], - [errs[use_idx].mean()], [v]))) - last['quat_fit_time'] = fit_time - last['quat'] = this_quat - last['coil_dev_rrs'] = this_coil_dev_rrs + logger.debug( + f" #t = {fit_time:0.3f}, #e = {100 * errs.mean():0.2f} cm, #g = {g:0.3f}" + f", #v = {100 * v:0.2f} cm/s, #r = {r:0.2f} rad/s, #d = {d:0.2f} cm" + ) + q_rep = " ".join(f"{qq:8.5f}" for qq in this_quat) + logger.debug(f" #t = {fit_time:0.3f}, #q = {q_rep}") + + quats.append( + np.concatenate(([fit_time], this_quat, [g], [errs[use_idx].mean()], [v])) + ) + last["quat_fit_time"] = fit_time + last["quat"] = this_quat + last["coil_dev_rrs"] = this_coil_dev_rrs quats = np.array(quats, np.float64) quats = np.zeros((0, 10)) if quats.size == 0 else quats return quats @@ -941,23 +1084,19 @@ def _fit_chpi_quat_subset(coil_dev_rrs, coil_head_rrs, use_idx): out_idx = use_idx.copy() if len(use_idx) > 3: # try dropping one (recursively) for di in range(len(use_idx)): - this_use_idx = list(use_idx[:di]) + list(use_idx[di + 1:]) + this_use_idx = list(use_idx[:di]) + list(use_idx[di + 1 :]) this_quat, this_g, this_use_idx = _fit_chpi_quat_subset( - coil_dev_rrs, coil_head_rrs, this_use_idx) + coil_dev_rrs, coil_head_rrs, this_use_idx + ) if this_g > g: quat, g, out_idx = this_quat, this_g, this_use_idx return quat, g, np.array(out_idx, int) -@jit() -def _unit_quat_constraint(x): - """Constrain our 3 quaternion rot params (ignoring w) to have norm <= 1.""" - return 1 - (x * x).sum() - - @verbose -def compute_chpi_snr(raw, t_step_min=0.01, t_window='auto', ext_order=1, - tmin=0, tmax=None, verbose=None): +def compute_chpi_snr( + raw, t_step_min=0.01, t_window="auto", ext_order=1, tmin=0, tmax=None, verbose=None +): """Compute time-varying estimates of cHPI SNR. Parameters @@ -988,13 +1127,15 @@ def compute_chpi_snr(raw, t_step_min=0.01, t_window='auto', ext_order=1, ----- .. versionadded:: 0.24 """ - return _compute_chpi_amp_or_snr(raw, t_step_min, t_window, ext_order, - tmin, tmax, verbose, snr=True) + return _compute_chpi_amp_or_snr( + raw, t_step_min, t_window, ext_order, tmin, tmax, verbose, snr=True + ) @verbose -def compute_chpi_amplitudes(raw, t_step_min=0.01, t_window='auto', - ext_order=1, tmin=0, tmax=None, verbose=None): +def compute_chpi_amplitudes( + raw, t_step_min=0.01, t_window="auto", ext_order=1, tmin=0, tmax=None, verbose=None +): """Compute time-varying cHPI amplitudes. Parameters @@ -1040,13 +1181,21 @@ def compute_chpi_amplitudes(raw, t_step_min=0.01, t_window='auto', .. versionadded:: 0.20 """ - return _compute_chpi_amp_or_snr(raw, t_step_min, t_window, ext_order, - tmin, tmax, verbose) + return _compute_chpi_amp_or_snr( + raw, t_step_min, t_window, ext_order, tmin, tmax, verbose + ) -def _compute_chpi_amp_or_snr(raw, t_step_min=0.01, t_window='auto', - ext_order=1, tmin=0, tmax=None, verbose=None, - snr=False): +def _compute_chpi_amp_or_snr( + raw, + t_step_min=0.01, + t_window="auto", + ext_order=1, + tmin=0, + tmax=None, + verbose=None, + snr=False, +): """Compute cHPI amplitude or SNR. See compute_chpi_amplitudes for parameter descriptions. One additional @@ -1055,70 +1204,81 @@ def _compute_chpi_amp_or_snr(raw, t_step_min=0.01, t_window='auto', """ hpi = _setup_hpi_amplitude_fitting(raw.info, t_window, ext_order=ext_order) tmin, tmax = raw._tmin_tmax_to_start_stop(tmin, tmax) - tmin = tmin / raw.info['sfreq'] - tmax = tmax / raw.info['sfreq'] - need_win = hpi['t_window'] / 2. - fit_idxs = raw.time_as_index(np.arange( - tmin + need_win, tmax, t_step_min), use_rounding=True) - logger.info('Fitting %d HPI coil locations at up to %s time points ' - '(%0.1f s duration)' - % (len(hpi['freqs']), len(fit_idxs), tmax - tmin)) + tmin = tmin / raw.info["sfreq"] + tmax = tmax / raw.info["sfreq"] + need_win = hpi["t_window"] / 2.0 + fit_idxs = raw.time_as_index( + np.arange(tmin + need_win, tmax, t_step_min), use_rounding=True + ) + logger.info( + f"Fitting {len(hpi['freqs'])} HPI coil locations at up to " + f"{len(fit_idxs)} time points ({tmax - tmin:.1f} s duration)" + ) del tmin, tmax sin_fits = dict() - sin_fits['proj'] = hpi['proj'] - sin_fits['times'] = np.round(fit_idxs + raw.first_samp - - hpi['n_window'] / 2.) / raw.info['sfreq'] - n_times = len(sin_fits['times']) - n_freqs = len(hpi['freqs']) - n_chans = len(sin_fits['proj']['data']['col_names']) + sin_fits["proj"] = hpi["proj"] + sin_fits["times"] = ( + np.round(fit_idxs + raw.first_samp - hpi["n_window"] / 2.0) / raw.info["sfreq"] + ) + n_times = len(sin_fits["times"]) + n_freqs = len(hpi["freqs"]) + n_chans = len(sin_fits["proj"]["data"]["col_names"]) if snr: - del sin_fits['proj'] - sin_fits['freqs'] = hpi['freqs'] + del sin_fits["proj"] + sin_fits["freqs"] = hpi["freqs"] ch_types = raw.get_channel_types() - grad_offset = 3 if 'mag' in ch_types else 0 - for ch_type in ('mag', 'grad'): + grad_offset = 3 if "mag" in ch_types else 0 + for ch_type in ("mag", "grad"): if ch_type in ch_types: - for key in ('snr', 'power', 'resid'): - cols = 1 if key == 'resid' else n_freqs - sin_fits[f'{ch_type}_{key}'] = np.empty((n_times, cols)) + for key in ("snr", "power", "resid"): + cols = 1 if key == "resid" else n_freqs + sin_fits[f"{ch_type}_{key}"] = np.empty((n_times, cols)) else: - sin_fits['slopes'] = np.empty((n_times, n_freqs, n_chans)) + sin_fits["slopes"] = np.empty((n_times, n_freqs, n_chans)) message = f"cHPI {'SNRs' if snr else 'amplitudes'}" for mi, midpt in enumerate(ProgressBar(fit_idxs, mesg=message)): # # 0. determine samples to fit. # - time_sl = midpt - hpi['n_window'] // 2 - time_sl = slice(max(time_sl, 0), - min(time_sl + hpi['n_window'], len(raw.times))) + time_sl = midpt - hpi["n_window"] // 2 + time_sl = slice(max(time_sl, 0), min(time_sl + hpi["n_window"], len(raw.times))) # # 1. Fit amplitudes for each channel from each of the N sinusoids # amps_or_snrs = _fit_chpi_amplitudes(raw, time_sl, hpi, snr) if snr: + if amps_or_snrs is None: + amps_or_snrs = np.full((n_freqs, grad_offset + 3), np.nan) # unpack the SNR estimates. mag & grad are returned in one array # (because of Numba) so take care with which column is which. # note that mean residual is a scalar (same for all HPI freqs) but # is returned as a (tiled) vector (again, because Numba) so that's # why below we take amps_or_snrs[0, 2] instead of [:, 2] ch_types = raw.get_channel_types() - if 'mag' in ch_types: - sin_fits['mag_snr'][mi] = amps_or_snrs[:, 0] # SNR - sin_fits['mag_power'][mi] = amps_or_snrs[:, 1] # mean power - sin_fits['mag_resid'][mi] = amps_or_snrs[0, 2] # mean resid - if 'grad' in ch_types: - sin_fits['grad_snr'][mi] = amps_or_snrs[:, grad_offset] - sin_fits['grad_power'][mi] = amps_or_snrs[:, grad_offset + 1] - sin_fits['grad_resid'][mi] = amps_or_snrs[0, grad_offset + 2] + if "mag" in ch_types: + sin_fits["mag_snr"][mi] = amps_or_snrs[:, 0] # SNR + sin_fits["mag_power"][mi] = amps_or_snrs[:, 1] # mean power + sin_fits["mag_resid"][mi] = amps_or_snrs[0, 2] # mean resid + if "grad" in ch_types: + sin_fits["grad_snr"][mi] = amps_or_snrs[:, grad_offset] + sin_fits["grad_power"][mi] = amps_or_snrs[:, grad_offset + 1] + sin_fits["grad_resid"][mi] = amps_or_snrs[0, grad_offset + 2] else: - sin_fits['slopes'][mi] = amps_or_snrs + sin_fits["slopes"][mi] = amps_or_snrs return sin_fits @verbose -def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', - adjust_dig=False, verbose=None): +def compute_chpi_locs( + info, + chpi_amplitudes, + t_step_max=1.0, + too_close="raise", + adjust_dig=False, + *, + verbose=None, +): """Compute locations of each cHPI coils over time. Parameters @@ -1163,19 +1323,19 @@ def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', .. versionadded:: 0.20 """ # Set up magnetic dipole fits - _check_option('too_close', too_close, ['raise', 'warning', 'info']) - _check_chpi_param(chpi_amplitudes, 'chpi_amplitudes') - _validate_type(info, Info, 'info') + _check_option("too_close", too_close, ["raise", "warning", "info"]) + _check_chpi_param(chpi_amplitudes, "chpi_amplitudes") + _validate_type(info, Info, "info") + _validate_type(info["dev_head_t"], Transform, "info['dev_head_t']") sin_fits = chpi_amplitudes # use the old name below del chpi_amplitudes - proj = sin_fits['proj'] - meg_picks = pick_channels( - info['ch_names'], proj['data']['col_names'], ordered=True) + proj = sin_fits["proj"] + meg_picks = pick_channels(info["ch_names"], proj["data"]["col_names"], ordered=True) info = pick_info(info, meg_picks) # makes a copy with info._unlock(): - info['projs'] = [proj] + info["projs"] = [proj] del meg_picks, proj - meg_coils = _concatenate_coils(_create_meg_coils(info['chs'], 'accurate')) + meg_coils = _concatenate_coils(_create_meg_coils(info["chs"], "accurate")) # Set up external model for interference suppression safe_false = _verbose_safe_false() @@ -1184,10 +1344,12 @@ def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', # Make some location guesses (1 cm grid) R = np.linalg.norm(meg_coils[0], axis=1).min() - guesses = _make_guesses(dict(R=R, r0=np.zeros(3)), 0.01, 0., 0.005, - verbose=safe_false)[0]['rr'] - logger.info('Computing %d HPI location guesses (1 cm grid in a %0.1f cm ' - 'sphere)' % (len(guesses), R * 100)) + sphere = ConductorModel(layers=[dict(rad=R)], r0=np.zeros(3), is_sphere=True) + guesses = _make_guesses(sphere, 0.01, 0.0, 0.005, verbose=safe_false)[0]["rr"] + logger.info( + f"Computing {len(guesses)} HPI location guesses " + f"(1 cm grid in a {R * 100:.1f} cm sphere)" + ) fwd = _magnetic_dipole_field_vec(guesses, meg_coils, too_close) fwd = np.dot(fwd, whitener.T) fwd.shape = (guesses.shape[0], 3, -1) @@ -1195,51 +1357,58 @@ def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', guesses = dict(rr=guesses, whitened_fwd_svd=fwd) del fwd, R - iter_ = list(zip(sin_fits['times'], sin_fits['slopes'])) + iter_ = list(zip(sin_fits["times"], sin_fits["slopes"])) chpi_locs = dict(times=[], rrs=[], gofs=[], moments=[]) # setup last iteration structure hpi_dig_dev_rrs = apply_trans( - invert_transform(info['dev_head_t'])['trans'], - _get_hpi_initial_fit(info, adjust=adjust_dig)) - last = dict(sin_fit=None, coil_fit_time=sin_fits['times'][0] - 1, - coil_dev_rrs=hpi_dig_dev_rrs) + invert_transform(info["dev_head_t"])["trans"], + _get_hpi_initial_fit(info, adjust=adjust_dig), + ) + last = dict( + sin_fit=None, + coil_fit_time=sin_fits["times"][0] - 1, + coil_dev_rrs=hpi_dig_dev_rrs, + ) n_hpi = len(hpi_dig_dev_rrs) del hpi_dig_dev_rrs - for fit_time, sin_fit in ProgressBar(iter_, mesg='cHPI locations '): + for fit_time, sin_fit in ProgressBar(iter_, mesg="cHPI locations "): # skip this window if bad if not np.isfinite(sin_fit).all(): continue # check if data has sufficiently changed - if last['sin_fit'] is not None: # first iteration + if last["sin_fit"] is not None: # first iteration corrs = np.array( - [np.corrcoef(s, l)[0, 1] - for s, l in zip(sin_fit, last['sin_fit'])]) + [np.corrcoef(s, lst)[0, 1] for s, lst in zip(sin_fit, last["sin_fit"])] + ) corrs *= corrs # check to see if we need to continue - if fit_time - last['coil_fit_time'] <= t_step_max - 1e-7 and \ - (corrs > 0.98).sum() >= 3: + if ( + fit_time - last["coil_fit_time"] <= t_step_max - 1e-7 + and (corrs > 0.98).sum() >= 3 + ): # don't need to refit data continue # update 'last' sin_fit *before* inplace sign mult - last['sin_fit'] = sin_fit.copy() + last["sin_fit"] = sin_fit.copy() # # 2. Fit magnetic dipole for each coil to obtain coil positions # in device coordinates # - coil_fits = [_fit_magnetic_dipole(f, x0, too_close, whitener, - meg_coils, guesses) - for f, x0 in zip(sin_fit, last['coil_dev_rrs'])] + coil_fits = [ + _fit_magnetic_dipole(f, x0, too_close, whitener, meg_coils, guesses) + for f, x0 in zip(sin_fit, last["coil_dev_rrs"]) + ] rrs, gofs, moments = zip(*coil_fits) - chpi_locs['times'].append(fit_time) - chpi_locs['rrs'].append(rrs) - chpi_locs['gofs'].append(gofs) - chpi_locs['moments'].append(moments) - last['coil_fit_time'] = fit_time - last['coil_dev_rrs'] = rrs - n_times = len(chpi_locs['times']) + chpi_locs["times"].append(fit_time) + chpi_locs["rrs"].append(rrs) + chpi_locs["gofs"].append(gofs) + chpi_locs["moments"].append(moments) + last["coil_fit_time"] = fit_time + last["coil_dev_rrs"] = rrs + n_times = len(chpi_locs["times"]) shapes = dict( times=(n_times,), rrs=(n_times, n_hpi, 3), @@ -1254,17 +1423,32 @@ def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', def _chpi_locs_to_times_dig(chpi_locs): """Reformat chpi_locs as list of dig (dict).""" dig = list() - for rrs, gofs in zip(*(chpi_locs[key] for key in ('rrs', 'gofs'))): - dig.append([{'r': rr, 'ident': idx, 'gof': gof, - 'kind': FIFF.FIFFV_POINT_HPI, - 'coord_frame': FIFF.FIFFV_COORD_DEVICE} - for idx, (rr, gof) in enumerate(zip(rrs, gofs), 1)]) - return chpi_locs['times'], dig + for rrs, gofs in zip(*(chpi_locs[key] for key in ("rrs", "gofs"))): + dig.append( + [ + { + "r": rr, + "ident": idx, + "gof": gof, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } + for idx, (rr, gof) in enumerate(zip(rrs, gofs), 1) + ] + ) + return chpi_locs["times"], dig @verbose -def filter_chpi(raw, include_line=True, t_step=0.01, t_window='auto', - ext_order=1, allow_line_only=False, verbose=None): +def filter_chpi( + raw, + include_line=True, + t_step=0.01, + t_window="auto", + ext_order=1, + allow_line_only=False, + verbose=None, +): """Remove cHPI and line noise from data. .. note:: This function will only work properly if cHPI was on @@ -1301,73 +1485,78 @@ def filter_chpi(raw, include_line=True, t_step=0.01, t_window='auto', .. versionadded:: 0.12 """ - _validate_type(raw, BaseRaw, 'raw') + _validate_type(raw, BaseRaw, "raw") if not raw.preload: - raise RuntimeError('raw data must be preloaded') + raise RuntimeError("raw data must be preloaded") t_step = float(t_step) if t_step <= 0: - raise ValueError('t_step (%s) must be > 0' % (t_step,)) - n_step = int(np.ceil(t_step * raw.info['sfreq'])) - if include_line and raw.info['line_freq'] is None: - raise RuntimeError('include_line=True but raw.info["line_freq"] is ' - 'None, consider setting it to the line frequency') + raise ValueError(f"t_step ({t_step}) must be > 0") + n_step = int(np.ceil(t_step * raw.info["sfreq"])) + if include_line and raw.info["line_freq"] is None: + raise RuntimeError( + 'include_line=True but raw.info["line_freq"] is ' + "None, consider setting it to the line frequency" + ) hpi = _setup_hpi_amplitude_fitting( - raw.info, t_window, remove_aliased=True, ext_order=ext_order, - allow_empty=allow_line_only, verbose=_verbose_safe_false()) + raw.info, + t_window, + remove_aliased=True, + ext_order=ext_order, + allow_empty=allow_line_only, + verbose=_verbose_safe_false(), + ) - fit_idxs = np.arange(0, len(raw.times) + hpi['n_window'] // 2, n_step) - n_freqs = len(hpi['freqs']) + fit_idxs = np.arange(0, len(raw.times) + hpi["n_window"] // 2, n_step) + n_freqs = len(hpi["freqs"]) n_remove = 2 * n_freqs meg_picks = pick_types(raw.info, meg=True, exclude=()) # filter all chs n_times = len(raw.times) - msg = 'Removing %s cHPI' % n_freqs + msg = f"Removing {n_freqs} cHPI" if include_line: - n_remove += 2 * len(hpi['line_freqs']) - msg += ' and %s line harmonic' % len(hpi['line_freqs']) - msg += ' frequencies from %s MEG channels' % len(meg_picks) + n_remove += 2 * len(hpi["line_freqs"]) + msg += f" and {len(hpi['line_freqs'])} line harmonic" + msg += f" frequencies from {len(meg_picks)} MEG channels" - recon = np.dot(hpi['model'][:, :n_remove], hpi['inv_model'][:n_remove]).T + recon = np.dot(hpi["model"][:, :n_remove], hpi["inv_model"][:n_remove]).T logger.info(msg) chunks = list() # the chunks to subtract last_endpt = 0 - pb = ProgressBar(fit_idxs, mesg='Filtering') + pb = ProgressBar(fit_idxs, mesg="Filtering") for ii, midpt in enumerate(pb): - left_edge = midpt - hpi['n_window'] // 2 - time_sl = slice(max(left_edge, 0), - min(left_edge + hpi['n_window'], len(raw.times))) + left_edge = midpt - hpi["n_window"] // 2 + time_sl = slice( + max(left_edge, 0), min(left_edge + hpi["n_window"], len(raw.times)) + ) this_len = time_sl.stop - time_sl.start - if this_len == hpi['n_window']: + if this_len == hpi["n_window"]: this_recon = recon else: # first or last window - model = hpi['model'][:this_len] + model = hpi["model"][:this_len] inv_model = np.linalg.pinv(model) this_recon = np.dot(model[:, :n_remove], inv_model[:n_remove]).T this_data = raw._data[meg_picks, time_sl] subt_pt = min(midpt + n_step, n_times) if last_endpt != subt_pt: - fit_left_edge = left_edge - time_sl.start + hpi['n_window'] // 2 - fit_sl = slice(fit_left_edge, - fit_left_edge + (subt_pt - last_endpt)) + fit_left_edge = left_edge - time_sl.start + hpi["n_window"] // 2 + fit_sl = slice(fit_left_edge, fit_left_edge + (subt_pt - last_endpt)) chunks.append((subt_pt, np.dot(this_data, this_recon[:, fit_sl]))) last_endpt = subt_pt # Consume (trailing) chunks that are now safe to remove because # our windows will no longer touch them if ii < len(fit_idxs) - 1: - next_left_edge = fit_idxs[ii + 1] - hpi['n_window'] // 2 + next_left_edge = fit_idxs[ii + 1] - hpi["n_window"] // 2 else: next_left_edge = np.inf while len(chunks) > 0 and chunks[0][0] <= next_left_edge: right_edge, chunk = chunks.pop(0) - raw._data[meg_picks, - right_edge - chunk.shape[1]:right_edge] -= chunk + raw._data[meg_picks, right_edge - chunk.shape[1] : right_edge] -= chunk return raw def _compute_good_distances(hpi_coil_dists, new_pos, dist_limit=0.005): """Compute good coils based on distances.""" - from scipy.spatial.distance import cdist these_dists = cdist(new_pos, new_pos) these_dists = np.abs(hpi_coil_dists - these_dists) # there is probably a better algorithm for finding the bad ones... @@ -1375,7 +1564,7 @@ def _compute_good_distances(hpi_coil_dists, new_pos, dist_limit=0.005): use_mask = np.ones(len(hpi_coil_dists), bool) while not good: d = these_dists[use_mask][:, use_mask] - d_bad = (d > dist_limit) + d_bad = d > dist_limit good = not d_bad.any() if not good: if use_mask.sum() == 2: @@ -1389,7 +1578,7 @@ def _compute_good_distances(hpi_coil_dists, new_pos, dist_limit=0.005): @verbose -def get_active_chpi(raw, *, on_missing='raise', verbose=None): +def get_active_chpi(raw, *, on_missing="raise", verbose=None): """Determine how many HPI coils were active for a time point. Parameters @@ -1412,13 +1601,14 @@ def get_active_chpi(raw, *, on_missing='raise', verbose=None): system, _ = _get_meg_system(raw.info) # check whether we have a neuromag system - if system not in ['122m', '306m']: - raise NotImplementedError(('Identifying active HPI channels' - ' is not implemented for other systems' - ' than neuromag.')) + if system not in ["122m", "306m"]: + raise NotImplementedError( + "Identifying active HPI channels is not implemented for other systems than " + "neuromag." + ) # extract hpi info chpi_info = get_chpi_info(raw.info, on_missing=on_missing) - if len(chpi_info[2]) == 0: + if (len(chpi_info[2]) == 0) or (chpi_info[1] is None): return np.zeros_like(raw.times) # extract hpi time series and infer which one was on diff --git a/mne/commands/__init__.py b/mne/commands/__init__.py index 62eef819982..1c7134ea78b 100644 --- a/mne/commands/__init__.py +++ b/mne/commands/__init__.py @@ -1,3 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Command-line utilities.""" +import lazy_loader as lazy -from . import utils +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/commands/__init__.pyi b/mne/commands/__init__.pyi new file mode 100644 index 00000000000..c0caf04abbb --- /dev/null +++ b/mne/commands/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["utils"] +from . import utils diff --git a/mne/commands/mne_anonymize.py b/mne/commands/mne_anonymize.py index 7c858319265..28fae42c82b 100644 --- a/mne/commands/mne_anonymize.py +++ b/mne/commands/mne_anonymize.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python -# Authors : Dominik Krzeminski -# Luke Bloy +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. """Anonymize raw fif file. @@ -15,11 +15,12 @@ """ +import os.path as op import sys + import mne -import os.path as op -ANONYMIZE_FILE_PREFIX = 'anon' +ANONYMIZE_FILE_PREFIX = "anon" def mne_anonymize(fif_fname, out_fname, keep_his, daysback, overwrite): @@ -49,8 +50,7 @@ def mne_anonymize(fif_fname, out_fname, keep_his, daysback, overwrite): dir_name = op.split(fif_fname)[0] if out_fname is None: fif_bname = op.basename(fif_fname) - out_fname = op.join(dir_name, - "{}-{}".format(ANONYMIZE_FILE_PREFIX, fif_bname)) + out_fname = op.join(dir_name, f"{ANONYMIZE_FILE_PREFIX}-{fif_bname}") elif not op.isabs(out_fname): out_fname = op.join(dir_name, out_fname) @@ -63,20 +63,48 @@ def run(): parser = get_optparser(__file__) - parser.add_option("-f", "--file", type="string", dest="file", - help="Name of file to modify.", metavar="FILE", - default=None) - parser.add_option("-o", "--output", type="string", dest="output", - help="Name of anonymized output file." - "`anon-` prefix is added to FILE if not given", - metavar="OUTFILE", default=None) - parser.add_option("--keep_his", dest="keep_his", action="http://github.com/store_true", - help="Keep the HIS tag (not advised)", default=False) - parser.add_option("-d", "--daysback", type="int", dest="daysback", - help="Move dates in file backwards by this many days.", - metavar="N_DAYS", default=None) - parser.add_option("--overwrite", dest="overwrite", action="http://github.com/store_true", - help="Overwrite input file.", default=False) + parser.add_option( + "-f", + "--file", + type="string", + dest="file", + help="Name of file to modify.", + metavar="FILE", + default=None, + ) + parser.add_option( + "-o", + "--output", + type="string", + dest="output", + help="Name of anonymized output file." + "`anon-` prefix is added to FILE if not given", + metavar="OUTFILE", + default=None, + ) + parser.add_option( + "--keep_his", + dest="keep_his", + action="http://github.com/store_true", + help="Keep the HIS tag (not advised)", + default=False, + ) + parser.add_option( + "-d", + "--daysback", + type="int", + dest="daysback", + help="Move dates in file backwards by this many days.", + metavar="N_DAYS", + default=None, + ) + parser.add_option( + "--overwrite", + dest="overwrite", + action="http://github.com/store_true", + help="Overwrite input file.", + default=False, + ) options, args = parser.parse_args() if options.file is None: @@ -88,12 +116,12 @@ def run(): keep_his = options.keep_his daysback = options.daysback overwrite = options.overwrite - if not fname.endswith('.fif'): - raise ValueError('%s does not seem to be a .fif file.' % fname) + if not fname.endswith(".fif"): + raise ValueError(f"{fname} does not seem to be a .fif file.") mne_anonymize(fname, out_fname, keep_his, daysback, overwrite) -is_main = (__name__ == '__main__') +is_main = __name__ == "__main__" if is_main: run() diff --git a/mne/commands/mne_browse_raw.py b/mne/commands/mne_browse_raw.py index 95b4381cc7b..a6db0e2d6ea 100644 --- a/mne/commands/mne_browse_raw.py +++ b/mne/commands/mne_browse_raw.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python r"""Browse raw data. This uses :func:`mne.io.read_raw` so it supports the same formats @@ -13,68 +12,136 @@ --eve sample_audvis_raw-eve.fif """ -# Authors : Eric Larson, PhD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys + import mne def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser from mne.viz import _RAW_CLIP_DEF - parser = get_optparser(__file__, usage='usage: %prog raw [options]') - - parser.add_option("--raw", dest="raw_in", - help="Input raw FIF file (can also be specified " - "directly as an argument without the --raw prefix)", - metavar="FILE") - parser.add_option("--proj", dest="proj_in", - help="Projector file", metavar="FILE", - default='') - parser.add_option("--projoff", dest="proj_off", - help="Disable all projectors", - default=False, action="http://github.com/store_true") - parser.add_option("--eve", dest="eve_in", - help="Events file", metavar="FILE", - default='') - parser.add_option("-d", "--duration", dest="duration", type="float", - help="Time window for plotting (s)", - default=10.0) - parser.add_option("-t", "--start", dest="start", type="float", - help="Initial start time for plotting", - default=0.0) - parser.add_option("-n", "--n_channels", dest="n_channels", type="int", - help="Number of channels to plot at a time", - default=20) - parser.add_option("-o", "--order", dest="group_by", - help="Order to use for grouping during plotting " - "('type' or 'original')", default='type') - parser.add_option("-p", "--preload", dest="preload", - help="Preload raw data (for faster navigaton)", - default=False, action="http://github.com/store_true") - parser.add_option("-s", "--show_options", dest="show_options", - help="Show projection options dialog", - default=False) - parser.add_option("--allowmaxshield", dest="maxshield", - help="Allow loading MaxShield processed data", - action="http://github.com/store_true") - parser.add_option("--highpass", dest="highpass", type="float", - help="Display high-pass filter corner frequency", - default=-1) - parser.add_option("--lowpass", dest="lowpass", type="float", - help="Display low-pass filter corner frequency", - default=-1) - parser.add_option("--filtorder", dest="filtorder", type="int", - help="Display filtering IIR order (or 0 to use FIR)", - default=4) - parser.add_option("--clipping", dest="clipping", - help="Enable trace clipping mode, either 'clamp' or " - "'transparent'", default=_RAW_CLIP_DEF) - parser.add_option("--filterchpi", dest="filterchpi", - help="Enable filtering cHPI signals.", default=None, - action="http://github.com/store_true") + parser = get_optparser(__file__, usage="usage: %prog raw [options]") + + parser.add_option( + "--raw", + dest="raw_in", + help="Input raw FIF file (can also be specified " + "directly as an argument without the --raw prefix)", + metavar="FILE", + ) + parser.add_option( + "--proj", dest="proj_in", help="Projector file", metavar="FILE", default="" + ) + parser.add_option( + "--projoff", + dest="proj_off", + help="Disable all projectors", + default=False, + action="http://github.com/store_true", + ) + parser.add_option( + "--eve", dest="eve_in", help="Events file", metavar="FILE", default="" + ) + parser.add_option( + "-d", + "--duration", + dest="duration", + type="float", + help="Time window for plotting (s)", + default=10.0, + ) + parser.add_option( + "-t", + "--start", + dest="start", + type="float", + help="Initial start time for plotting", + default=0.0, + ) + parser.add_option( + "-n", + "--n_channels", + dest="n_channels", + type="int", + help="Number of channels to plot at a time", + default=20, + ) + parser.add_option( + "-o", + "--order", + dest="group_by", + help="Order to use for grouping during plotting ('type' or 'original')", + default="type", + ) + parser.add_option( + "-p", + "--preload", + dest="preload", + help="Preload raw data (for faster navigation)", + default=False, + action="http://github.com/store_true", + ) + parser.add_option( + "-s", + "--show_options", + dest="show_options", + help="Show projection options dialog", + default=False, + ) + parser.add_option( + "--allowmaxshield", + dest="maxshield", + help="Allow loading MaxShield processed data", + action="http://github.com/store_true", + ) + parser.add_option( + "--highpass", + dest="highpass", + type="float", + help="Display high-pass filter corner frequency", + default=-1, + ) + parser.add_option( + "--lowpass", + dest="lowpass", + type="float", + help="Display low-pass filter corner frequency", + default=-1, + ) + parser.add_option( + "--filtorder", + dest="filtorder", + type="int", + help="Display filtering IIR order (or 0 to use FIR)", + default=4, + ) + parser.add_option( + "--clipping", + dest="clipping", + help="Enable trace clipping mode. Can be 'clamp', 'transparent', a float, " + "or 'none'.", + default=_RAW_CLIP_DEF, + ) + parser.add_option( + "--filterchpi", + dest="filterchpi", + help="Enable filtering cHPI signals.", + default=None, + action="http://github.com/store_true", + ) + parser.add_option( + "--butterfly", + dest="butterfly", + help="Plot in butterfly mode", + default=False, + action="http://github.com/store_true", + ) _add_verbose_flag(parser) options, args = parser.parse_args() @@ -97,7 +164,7 @@ def run(): filtorder = options.filtorder clipping = options.clipping if isinstance(clipping, str): - if clipping.lower() == 'none': + if clipping.lower() == "none": clipping = None else: try: @@ -106,6 +173,7 @@ def run(): pass filterchpi = options.filterchpi verbose = options.verbose + butterfly = options.butterfly if raw_in is None: parser.print_help() @@ -113,11 +181,11 @@ def run(): kwargs = dict(preload=preload) if maxshield: - kwargs.update(allow_maxshield='yes') + kwargs.update(allow_maxshield="yes") raw = mne.io.read_raw(raw_in, **kwargs) if len(proj_in) > 0: projs = mne.read_proj(proj_in) - raw.info['projs'] = projs + raw.info["projs"] = projs if len(eve_in) > 0: events = mne.read_events(eve_in) else: @@ -125,17 +193,28 @@ def run(): if filterchpi: if not preload: - raise RuntimeError( - 'Raw data must be preloaded for chpi, use --preload') + raise RuntimeError("Raw data must be preloaded for chpi, use --preload") raw = mne.chpi.filter_chpi(raw) highpass = None if highpass < 0 or filtorder < 0 else highpass lowpass = None if lowpass < 0 or filtorder < 0 else lowpass - raw.plot(duration=duration, start=start, n_channels=n_channels, - group_by=group_by, show_options=show_options, events=events, - highpass=highpass, lowpass=lowpass, filtorder=filtorder, - clipping=clipping, proj=not proj_off, verbose=verbose, - show=True, block=True) + raw.plot( + duration=duration, + start=start, + n_channels=n_channels, + group_by=group_by, + show_options=show_options, + events=events, + highpass=highpass, + lowpass=lowpass, + filtorder=filtorder, + clipping=clipping, + butterfly=butterfly, + proj=not proj_off, + verbose=verbose, + show=True, + block=True, + ) mne.utils.run_command_if_main() diff --git a/mne/commands/mne_bti2fiff.py b/mne/commands/mne_bti2fiff.py index db3c37fcd8c..8a0606444aa 100644 --- a/mne/commands/mne_bti2fiff.py +++ b/mne/commands/mne_bti2fiff.py @@ -1,10 +1,9 @@ -#!/usr/bin/env python -r"""Import BTi / 4D MagnesWH3600 data to fif file. +"""Import BTi / 4D MagnesWH3600 data to fif file. Notes ----- 1. Currently direct inclusion of reference channel weights - is not supported. Please use \'mne_create_comp_data\' to include + is not supported. Please use 'mne_create_comp_data' to include the weights or use the low level functions from this module to include them by yourself. 2. The informed guess for the 4D name is E31 for the ECG channel and @@ -20,14 +19,9 @@ """ -# Authors: Denis A. Engemann -# Martin Luessi -# Alexandre Gramfort -# Matti Hämäläinen -# Yuval Harpaz -# -# simplified bsd-3 license - +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys @@ -41,29 +35,57 @@ def run(): parser = get_optparser(__file__) - parser.add_option('-p', '--pdf', dest='pdf_fname', - help='Input data file name', metavar='FILE') - parser.add_option('-c', '--config', dest='config_fname', - help='Input config file name', metavar='FILE', - default='config') - parser.add_option('--head_shape', dest='head_shape_fname', - help='Headshape file name', metavar='FILE', - default='hs_file') - parser.add_option('-o', '--out_fname', dest='out_fname', - help='Name of the resulting fiff file', - default='as_data_fname') - parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float', - help='Compensatory rotation about Neuromag x axis, deg', - default=2.0) - parser.add_option('-T', '--translation', dest='translation', type='str', - help='Default translation, meter', - default=(0.00, 0.02, 0.11)) - parser.add_option('--ecg_ch', dest='ecg_ch', type='str', - help='4D ECG channel name', - default='E31') - parser.add_option('--eog_ch', dest='eog_ch', type='str', - help='4D EOG channel names', - default='E63,E64') + parser.add_option( + "-p", "--pdf", dest="pdf_fname", help="Input data file name", metavar="FILE" + ) + parser.add_option( + "-c", + "--config", + dest="config_fname", + help="Input config file name", + metavar="FILE", + default="config", + ) + parser.add_option( + "--head_shape", + dest="head_shape_fname", + help="Headshape file name", + metavar="FILE", + default="hs_file", + ) + parser.add_option( + "-o", + "--out_fname", + dest="out_fname", + help="Name of the resulting fiff file", + default="as_data_fname", + ) + parser.add_option( + "-r", + "--rotation_x", + dest="rotation_x", + type="float", + help="Compensatory rotation about Neuromag x axis, deg", + default=2.0, + ) + parser.add_option( + "-T", + "--translation", + dest="translation", + type="str", + help="Default translation, meter", + default=(0.00, 0.02, 0.11), + ) + parser.add_option( + "--ecg_ch", dest="ecg_ch", type="str", help="4D ECG channel name", default="E31" + ) + parser.add_option( + "--eog_ch", + dest="eog_ch", + type="str", + help="4D EOG channel names", + default="E63,E64", + ) options, args = parser.parse_args() @@ -78,15 +100,20 @@ def run(): rotation_x = options.rotation_x translation = options.translation ecg_ch = options.ecg_ch - eog_ch = options.ecg_ch.split(',') - - if out_fname == 'as_data_fname': - out_fname = pdf_fname + '_raw.fif' - - raw = read_raw_bti(pdf_fname=pdf_fname, config_fname=config_fname, - head_shape_fname=head_shape_fname, - rotation_x=rotation_x, translation=translation, - ecg_ch=ecg_ch, eog_ch=eog_ch) + eog_ch = options.ecg_ch.split(",") + + if out_fname == "as_data_fname": + out_fname = pdf_fname + "_raw.fif" + + raw = read_raw_bti( + pdf_fname=pdf_fname, + config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, + translation=translation, + ecg_ch=ecg_ch, + eog_ch=eog_ch, + ) raw.save(out_fname) raw.close() diff --git a/mne/commands/mne_clean_eog_ecg.py b/mne/commands/mne_clean_eog_ecg.py index f722a9fea52..add6e3830af 100644 --- a/mne/commands/mne_clean_eog_ecg.py +++ b/mne/commands/mne_clean_eog_ecg.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Clean a raw file from EOG and ECG artifacts with PCA (ie SSP). Examples @@ -8,20 +7,28 @@ $ mne clean_eog_ecg -i in_raw.fif -o clean_raw.fif -e -c """ -# Authors : Dr Engr. Sheraz Khan, P.Eng, Ph.D. -# Engr. Nandita Shetty, MS. -# Alexandre Gramfort, Ph.D. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys import mne -def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True, - ecg_proj_fname=None, eog_proj_fname=None, - ecg_event_fname=None, eog_event_fname=None, in_path='.', - quiet=False): +def clean_ecg_eog( + in_fif_fname, + out_fif_fname=None, + eog=True, + ecg=True, + ecg_proj_fname=None, + eog_proj_fname=None, + ecg_event_fname=None, + eog_event_fname=None, + in_path=".", + quiet=False, +): """Clean ECG from raw fif file. Parameters @@ -45,65 +52,124 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True, # Reading fif File raw_in = mne.io.read_raw_fif(in_fif_fname) - if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'): + if in_fif_fname.endswith("_raw.fif") or in_fif_fname.endswith("-raw.fif"): prefix = in_fif_fname[:-8] else: prefix = in_fif_fname[:-4] if out_fif_fname is None: - out_fif_fname = prefix + '_clean_ecg_eog_raw.fif' + out_fif_fname = prefix + "_clean_ecg_eog_raw.fif" if ecg_proj_fname is None: - ecg_proj_fname = prefix + '_ecg-proj.fif' + ecg_proj_fname = prefix + "_ecg-proj.fif" if eog_proj_fname is None: - eog_proj_fname = prefix + '_eog-proj.fif' + eog_proj_fname = prefix + "_eog-proj.fif" if ecg_event_fname is None: - ecg_event_fname = prefix + '_ecg-eve.fif' + ecg_event_fname = prefix + "_ecg-eve.fif" if eog_event_fname is None: - eog_event_fname = prefix + '_eog-eve.fif' + eog_event_fname = prefix + "_eog-eve.fif" - print('Implementing ECG and EOG artifact rejection on data') + print("Implementing ECG and EOG artifact rejection on data") kwargs = dict() if quiet else dict(stdout=None, stderr=None) if ecg: ecg_events, _, _ = mne.preprocessing.find_ecg_events( - raw_in, reject_by_annotation=True) - print("Writing ECG events in %s" % ecg_event_fname) + raw_in, reject_by_annotation=True + ) + print(f"Writing ECG events in {ecg_event_fname}") mne.write_events(ecg_event_fname, ecg_events) - print('Computing ECG projector') - command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, - '--events', ecg_event_fname, '--makeproj', - '--projtmin', '-0.08', '--projtmax', '0.08', - '--saveprojtag', '_ecg-proj', '--projnmag', '2', - '--projngrad', '1', '--projevent', '999', '--highpass', '5', - '--lowpass', '35', '--projmagrej', '4000', - '--projgradrej', '3000') + print("Computing ECG projector") + command = ( + "mne_process_raw", + "--cd", + in_path, + "--raw", + in_fif_fname, + "--events", + ecg_event_fname, + "--makeproj", + "--projtmin", + "-0.08", + "--projtmax", + "0.08", + "--saveprojtag", + "_ecg-proj", + "--projnmag", + "2", + "--projngrad", + "1", + "--projevent", + "999", + "--highpass", + "5", + "--lowpass", + "35", + "--projmagrej", + "4000", + "--projgradrej", + "3000", + ) mne.utils.run_subprocess(command, **kwargs) if eog: eog_events = mne.preprocessing.find_eog_events(raw_in) - print("Writing EOG events in %s" % eog_event_fname) + print(f"Writing EOG events in {eog_event_fname}") mne.write_events(eog_event_fname, eog_events) - print('Computing EOG projector') - command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, - '--events', eog_event_fname, '--makeproj', - '--projtmin', '-0.15', '--projtmax', '0.15', - '--saveprojtag', '_eog-proj', '--projnmag', '2', - '--projngrad', '2', '--projevent', '998', '--lowpass', '35', - '--projmagrej', '4000', '--projgradrej', '3000') + print("Computing EOG projector") + command = ( + "mne_process_raw", + "--cd", + in_path, + "--raw", + in_fif_fname, + "--events", + eog_event_fname, + "--makeproj", + "--projtmin", + "-0.15", + "--projtmax", + "0.15", + "--saveprojtag", + "_eog-proj", + "--projnmag", + "2", + "--projngrad", + "2", + "--projevent", + "998", + "--lowpass", + "35", + "--projmagrej", + "4000", + "--projgradrej", + "3000", + ) mne.utils.run_subprocess(command, **kwargs) if out_fif_fname is not None: # Applying the ECG EOG projector - print('Applying ECG EOG projector') - command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, - '--proj', in_fif_fname, '--projoff', '--save', - out_fif_fname, '--filteroff', - '--proj', ecg_proj_fname, '--proj', eog_proj_fname) + print("Applying ECG EOG projector") + command = ( + "mne_process_raw", + "--cd", + in_path, + "--raw", + in_fif_fname, + "--proj", + in_fif_fname, + "--projoff", + "--save", + out_fif_fname, + "--filteroff", + "--proj", + ecg_proj_fname, + "--proj", + eog_proj_fname, + ) mne.utils.run_subprocess(command, **kwargs) - print('Done removing artifacts.') - print("Cleaned raw data saved in: %s" % out_fif_fname) - print('IMPORTANT : Please eye-ball the data !!') + print("Done removing artifacts.") + print(f"Cleaned raw data saved in: {out_fif_fname}") + print("IMPORTANT : Please eye-ball the data !!") else: - print('Projection not applied to raw data.') + print("Projection not applied to raw data.") def run(): @@ -112,17 +178,41 @@ def run(): parser = get_optparser(__file__) - parser.add_option("-i", "--in", dest="raw_in", - help="Input raw FIF file", metavar="FILE") - parser.add_option("-o", "--out", dest="raw_out", - help="Output raw FIF file", metavar="FILE", - default=None) - parser.add_option("-e", "--no-eog", dest="eog", action="http://github.com/store_false", - help="Remove EOG", default=True) - parser.add_option("-c", "--no-ecg", dest="ecg", action="http://github.com/store_false", - help="Remove ECG", default=True) - parser.add_option("-q", "--quiet", dest="quiet", action="http://github.com/store_true", - help="Suppress mne_process_raw output", default=False) + parser.add_option( + "-i", "--in", dest="raw_in", help="Input raw FIF file", metavar="FILE" + ) + parser.add_option( + "-o", + "--out", + dest="raw_out", + help="Output raw FIF file", + metavar="FILE", + default=None, + ) + parser.add_option( + "-e", + "--no-eog", + dest="eog", + action="http://github.com/store_false", + help="Remove EOG", + default=True, + ) + parser.add_option( + "-c", + "--no-ecg", + dest="ecg", + action="http://github.com/store_false", + help="Remove ECG", + default=True, + ) + parser.add_option( + "-q", + "--quiet", + dest="quiet", + action="http://github.com/store_true", + help="Suppress mne_process_raw output", + default=False, + ) options, args = parser.parse_args() diff --git a/mne/commands/mne_compare_fiff.py b/mne/commands/mne_compare_fiff.py index b616a3e4072..c619aa57345 100644 --- a/mne/commands/mne_compare_fiff.py +++ b/mne/commands/mne_compare_fiff.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Compare FIFF files. Examples @@ -9,16 +8,20 @@ """ -# Authors : Eric Larson, PhD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys + import mne def run(): """Run command.""" parser = mne.commands.utils.get_optparser( - __file__, usage='mne compare_fiff ') + __file__, usage="mne compare_fiff " + ) options, args = parser.parse_args() if len(args) != 2: parser.print_help() diff --git a/mne/commands/mne_compute_proj_ecg.py b/mne/commands/mne_compute_proj_ecg.py index c42798be3be..42038cf1d60 100644 --- a/mne/commands/mne_compute_proj_ecg.py +++ b/mne/commands/mne_compute_proj_ecg.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python r"""Compute SSP/PCA projections for ECG artifacts. Examples @@ -10,11 +9,14 @@ --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 """ -# Authors : Alexandre Gramfort, Ph.D. -# Martin Luessi, Ph.D. + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os import sys + import mne @@ -24,97 +26,191 @@ def run(): parser = get_optparser(__file__) - parser.add_option("-i", "--in", dest="raw_in", - help="Input raw FIF file", metavar="FILE") - parser.add_option("--tmin", dest="tmin", type="float", - help="Time before event in seconds", - default=-0.2) - parser.add_option("--tmax", dest="tmax", type="float", - help="Time after event in seconds", - default=0.4) - parser.add_option("-g", "--n-grad", dest="n_grad", type="int", - help="Number of SSP vectors for gradiometers", - default=2) - parser.add_option("-m", "--n-mag", dest="n_mag", type="int", - help="Number of SSP vectors for magnetometers", - default=2) - parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int", - help="Number of SSP vectors for EEG", - default=2) - parser.add_option("--l-freq", dest="l_freq", type="float", - help="Filter low cut-off frequency in Hz", - default=1) - parser.add_option("--h-freq", dest="h_freq", type="float", - help="Filter high cut-off frequency in Hz", - default=100) - parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float", - help="Filter low cut-off frequency in Hz used " - "for ECG event detection", - default=5) - parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float", - help="Filter high cut-off frequency in Hz used " - "for ECG event detection", - default=35) - parser.add_option("-p", "--preload", dest="preload", - help="Temporary file used during computation " - "(to save memory)", - default=True) - parser.add_option("-a", "--average", dest="average", action="http://github.com/store_true", - help="Compute SSP after averaging", - default=False) - parser.add_option("--proj", dest="proj", - help="Use SSP projections from a fif file.", - default=None) - parser.add_option("--filtersize", dest="filter_length", type="int", - help="Number of taps to use for filtering", - default=2048) - parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int", - help="Number of jobs to run in parallel", - default=1) - parser.add_option("-c", "--channel", dest="ch_name", - help="Channel to use for ECG detection " - "(Required if no ECG found)", - default=None) - parser.add_option("--rej-grad", dest="rej_grad", type="float", - help="Gradiometers rejection parameter " - "in fT/cm (peak to peak amplitude)", - default=2000) - parser.add_option("--rej-mag", dest="rej_mag", type="float", - help="Magnetometers rejection parameter " - "in fT (peak to peak amplitude)", - default=3000) - parser.add_option("--rej-eeg", dest="rej_eeg", type="float", - help="EEG rejection parameter in µV " - "(peak to peak amplitude)", - default=50) - parser.add_option("--rej-eog", dest="rej_eog", type="float", - help="EOG rejection parameter in µV " - "(peak to peak amplitude)", - default=250) - parser.add_option("--avg-ref", dest="avg_ref", action="http://github.com/store_true", - help="Add EEG average reference proj", - default=False) - parser.add_option("--no-proj", dest="no_proj", action="http://github.com/store_true", - help="Exclude the SSP projectors currently " - "in the fiff file", - default=False) - parser.add_option("--bad", dest="bad_fname", - help="Text file containing bad channels list " - "(one per line)", - default=None) - parser.add_option("--event-id", dest="event_id", type="int", - help="ID to use for events", - default=999) - parser.add_option("--event-raw", dest="raw_event_fname", - help="raw file to use for event detection", - default=None) - parser.add_option("--tstart", dest="tstart", type="float", - help="Start artifact detection after tstart seconds", - default=0.) - parser.add_option("--qrsthr", dest="qrs_threshold", type="string", - help="QRS detection threshold. Between 0 and 1. Can " - "also be 'auto' for automatic selection", - default='auto') + parser.add_option( + "-i", "--in", dest="raw_in", help="Input raw FIF file", metavar="FILE" + ) + parser.add_option( + "--tmin", + dest="tmin", + type="float", + help="Time before event in seconds", + default=-0.2, + ) + parser.add_option( + "--tmax", + dest="tmax", + type="float", + help="Time after event in seconds", + default=0.4, + ) + parser.add_option( + "-g", + "--n-grad", + dest="n_grad", + type="int", + help="Number of SSP vectors for gradiometers", + default=2, + ) + parser.add_option( + "-m", + "--n-mag", + dest="n_mag", + type="int", + help="Number of SSP vectors for magnetometers", + default=2, + ) + parser.add_option( + "-e", + "--n-eeg", + dest="n_eeg", + type="int", + help="Number of SSP vectors for EEG", + default=2, + ) + parser.add_option( + "--l-freq", + dest="l_freq", + type="float", + help="Filter low cut-off frequency in Hz", + default=1, + ) + parser.add_option( + "--h-freq", + dest="h_freq", + type="float", + help="Filter high cut-off frequency in Hz", + default=100, + ) + parser.add_option( + "--ecg-l-freq", + dest="ecg_l_freq", + type="float", + help="Filter low cut-off frequency in Hz used for ECG event detection", + default=5, + ) + parser.add_option( + "--ecg-h-freq", + dest="ecg_h_freq", + type="float", + help="Filter high cut-off frequency in Hz used for ECG event detection", + default=35, + ) + parser.add_option( + "-p", + "--preload", + dest="preload", + help="Temporary file used during computation (to save memory)", + default=True, + ) + parser.add_option( + "-a", + "--average", + dest="average", + action="http://github.com/store_true", + help="Compute SSP after averaging", + default=False, + ) + parser.add_option( + "--proj", dest="proj", help="Use SSP projections from a fif file.", default=None + ) + parser.add_option( + "--filtersize", + dest="filter_length", + type="int", + help="Number of taps to use for filtering", + default=2048, + ) + parser.add_option( + "-j", + "--n-jobs", + dest="n_jobs", + type="int", + help="Number of jobs to run in parallel", + default=1, + ) + parser.add_option( + "-c", + "--channel", + dest="ch_name", + help="Channel to use for ECG detection (Required if no ECG found)", + default=None, + ) + parser.add_option( + "--rej-grad", + dest="rej_grad", + type="float", + help="Gradiometers rejection parameter in fT/cm (peak to peak amplitude)", + default=2000, + ) + parser.add_option( + "--rej-mag", + dest="rej_mag", + type="float", + help="Magnetometers rejection parameter in fT (peak to peak amplitude)", + default=3000, + ) + parser.add_option( + "--rej-eeg", + dest="rej_eeg", + type="float", + help="EEG rejection parameter in µV (peak to peak amplitude)", + default=50, + ) + parser.add_option( + "--rej-eog", + dest="rej_eog", + type="float", + help="EOG rejection parameter in µV (peak to peak amplitude)", + default=250, + ) + parser.add_option( + "--avg-ref", + dest="avg_ref", + action="http://github.com/store_true", + help="Add EEG average reference proj", + default=False, + ) + parser.add_option( + "--no-proj", + dest="no_proj", + action="http://github.com/store_true", + help="Exclude the SSP projectors currently in the fiff file", + default=False, + ) + parser.add_option( + "--bad", + dest="bad_fname", + help="Text file containing bad channels list (one per line)", + default=None, + ) + parser.add_option( + "--event-id", + dest="event_id", + type="int", + help="ID to use for events", + default=999, + ) + parser.add_option( + "--event-raw", + dest="raw_event_fname", + help="raw file to use for event detection", + default=None, + ) + parser.add_option( + "--tstart", + dest="tstart", + type="float", + help="Start artifact detection after tstart seconds", + default=0.0, + ) + parser.add_option( + "--qrsthr", + dest="qrs_threshold", + type="string", + help="QRS detection threshold. Between 0 and 1. Can " + "also be 'auto' for automatic selection", + default="auto", + ) options, args = parser.parse_args() @@ -138,10 +234,12 @@ def run(): filter_length = options.filter_length n_jobs = options.n_jobs ch_name = options.ch_name - reject = dict(grad=1e-13 * float(options.rej_grad), - mag=1e-15 * float(options.rej_mag), - eeg=1e-6 * float(options.rej_eeg), - eog=1e-6 * float(options.rej_eog)) + reject = dict( + grad=1e-13 * float(options.rej_grad), + mag=1e-15 * float(options.rej_mag), + eeg=1e-6 * float(options.rej_eeg), + eog=1e-6 * float(options.rej_eog), + ) avg_ref = options.avg_ref no_proj = options.no_proj bad_fname = options.bad_fname @@ -150,30 +248,30 @@ def run(): raw_event_fname = options.raw_event_fname tstart = options.tstart qrs_threshold = options.qrs_threshold - if qrs_threshold != 'auto': + if qrs_threshold != "auto": try: qrs_threshold = float(qrs_threshold) except ValueError: raise ValueError('qrsthr must be "auto" or a float') if bad_fname is not None: - with open(bad_fname, 'r') as fid: + with open(bad_fname) as fid: bads = [w.rstrip() for w in fid.readlines()] - print('Bad channels read : %s' % bads) + print(f"Bad channels read : {bads}") else: bads = [] - if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'): + if raw_in.endswith("_raw.fif") or raw_in.endswith("-raw.fif"): prefix = raw_in[:-8] else: prefix = raw_in[:-4] - ecg_event_fname = prefix + '_ecg-eve.fif' + ecg_event_fname = prefix + "_ecg-eve.fif" if average: - ecg_proj_fname = prefix + '_ecg_avg-proj.fif' + ecg_proj_fname = prefix + "_ecg_avg-proj.fif" else: - ecg_proj_fname = prefix + '_ecg-proj.fif' + ecg_proj_fname = prefix + "_ecg-proj.fif" raw = mne.io.read_raw_fif(raw_in, preload=preload) @@ -184,10 +282,31 @@ def run(): flat = None projs, events = mne.preprocessing.compute_proj_ecg( - raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg, l_freq, h_freq, - average, filter_length, n_jobs, ch_name, reject, flat, bads, avg_ref, - no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart, qrs_threshold, - copy=False) + raw, + raw_event, + tmin, + tmax, + n_grad, + n_mag, + n_eeg, + l_freq, + h_freq, + average, + filter_length, + n_jobs, + ch_name, + reject, + flat, + bads, + avg_ref, + no_proj, + event_id, + ecg_l_freq, + ecg_h_freq, + tstart, + qrs_threshold, + copy=False, + ) raw.close() @@ -195,17 +314,17 @@ def run(): raw_event.close() if proj_fname is not None: - print('Including SSP projections from : %s' % proj_fname) + print(f"Including SSP projections from : {proj_fname}") # append the ecg projs, so they are last in the list projs = mne.read_proj(proj_fname) + projs if isinstance(preload, str) and os.path.exists(preload): os.remove(preload) - print("Writing ECG projections in %s" % ecg_proj_fname) + print(f"Writing ECG projections in {ecg_proj_fname}") mne.write_proj(ecg_proj_fname, projs) - print("Writing ECG events in %s" % ecg_event_fname) + print(f"Writing ECG events in {ecg_event_fname}") mne.write_events(ecg_event_fname, events) diff --git a/mne/commands/mne_compute_proj_eog.py b/mne/commands/mne_compute_proj_eog.py index 3494ffa47af..a1e2679646b 100644 --- a/mne/commands/mne_compute_proj_eog.py +++ b/mne/commands/mne_compute_proj_eog.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python r"""Compute SSP/PCA projections for EOG artifacts. Examples @@ -20,11 +19,14 @@ to exclude ECG artifacts from projection computation. """ -# Authors : Alexandre Gramfort, Ph.D. -# Martin Luessi, Ph.D. + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os import sys + import mne @@ -34,77 +36,184 @@ def run(): parser = get_optparser(__file__) - parser.add_option("-i", "--in", dest="raw_in", - help="Input raw FIF file", metavar="FILE") - parser.add_option("--tmin", dest="tmin", type="float", - help="Time before event in seconds", default=-0.2) - parser.add_option("--tmax", dest="tmax", type="float", - help="Time after event in seconds", default=0.2) - parser.add_option("-g", "--n-grad", dest="n_grad", type="int", - help="Number of SSP vectors for gradiometers", - default=2) - parser.add_option("-m", "--n-mag", dest="n_mag", type="int", - help="Number of SSP vectors for magnetometers", - default=2) - parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int", - help="Number of SSP vectors for EEG", default=2) - parser.add_option("--l-freq", dest="l_freq", type="float", - help="Filter low cut-off frequency in Hz", - default=1) - parser.add_option("--h-freq", dest="h_freq", type="float", - help="Filter high cut-off frequency in Hz", - default=35) - parser.add_option("--eog-l-freq", dest="eog_l_freq", type="float", - help="Filter low cut-off frequency in Hz used for " - "EOG event detection", default=1) - parser.add_option("--eog-h-freq", dest="eog_h_freq", type="float", - help="Filter high cut-off frequency in Hz used for " - "EOG event detection", default=10) - parser.add_option("-p", "--preload", dest="preload", - help="Temporary file used during computation (to " - "save memory)", default=True) - parser.add_option("-a", "--average", dest="average", action="http://github.com/store_true", - help="Compute SSP after averaging", - default=False) - parser.add_option("--proj", dest="proj", - help="Use SSP projections from a fif file.", - default=None) - parser.add_option("--filtersize", dest="filter_length", type="int", - help="Number of taps to use for filtering", - default=2048) - parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int", - help="Number of jobs to run in parallel", default=1) - parser.add_option("--rej-grad", dest="rej_grad", type="float", - help="Gradiometers rejection parameter in fT/cm (peak " - "to peak amplitude)", default=2000) - parser.add_option("--rej-mag", dest="rej_mag", type="float", - help="Magnetometers rejection parameter in fT (peak to " - "peak amplitude)", default=3000) - parser.add_option("--rej-eeg", dest="rej_eeg", type="float", - help="EEG rejection parameter in µV (peak to peak " - "amplitude)", default=50) - parser.add_option("--rej-eog", dest="rej_eog", type="float", - help="EOG rejection parameter in µV (peak to peak " - "amplitude)", default=1e9) - parser.add_option("--avg-ref", dest="avg_ref", action="http://github.com/store_true", - help="Add EEG average reference proj", - default=False) - parser.add_option("--no-proj", dest="no_proj", action="http://github.com/store_true", - help="Exclude the SSP projectors currently in the " - "fiff file", default=False) - parser.add_option("--bad", dest="bad_fname", - help="Text file containing bad channels list " - "(one per line)", default=None) - parser.add_option("--event-id", dest="event_id", type="int", - help="ID to use for events", default=998) - parser.add_option("--event-raw", dest="raw_event_fname", - help="raw file to use for event detection", default=None) - parser.add_option("--tstart", dest="tstart", type="float", - help="Start artifact detection after tstart seconds", - default=0.) - parser.add_option("-c", "--channel", dest="ch_name", type="string", - help="Custom EOG channel(s), comma separated", - default=None) + parser.add_option( + "-i", "--in", dest="raw_in", help="Input raw FIF file", metavar="FILE" + ) + parser.add_option( + "--tmin", + dest="tmin", + type="float", + help="Time before event in seconds", + default=-0.2, + ) + parser.add_option( + "--tmax", + dest="tmax", + type="float", + help="Time after event in seconds", + default=0.2, + ) + parser.add_option( + "-g", + "--n-grad", + dest="n_grad", + type="int", + help="Number of SSP vectors for gradiometers", + default=2, + ) + parser.add_option( + "-m", + "--n-mag", + dest="n_mag", + type="int", + help="Number of SSP vectors for magnetometers", + default=2, + ) + parser.add_option( + "-e", + "--n-eeg", + dest="n_eeg", + type="int", + help="Number of SSP vectors for EEG", + default=2, + ) + parser.add_option( + "--l-freq", + dest="l_freq", + type="float", + help="Filter low cut-off frequency in Hz", + default=1, + ) + parser.add_option( + "--h-freq", + dest="h_freq", + type="float", + help="Filter high cut-off frequency in Hz", + default=35, + ) + parser.add_option( + "--eog-l-freq", + dest="eog_l_freq", + type="float", + help="Filter low cut-off frequency in Hz used for EOG event detection", + default=1, + ) + parser.add_option( + "--eog-h-freq", + dest="eog_h_freq", + type="float", + help="Filter high cut-off frequency in Hz used for EOG event detection", + default=10, + ) + parser.add_option( + "-p", + "--preload", + dest="preload", + help="Temporary file used during computation (to save memory)", + default=True, + ) + parser.add_option( + "-a", + "--average", + dest="average", + action="http://github.com/store_true", + help="Compute SSP after averaging", + default=False, + ) + parser.add_option( + "--proj", dest="proj", help="Use SSP projections from a fif file.", default=None + ) + parser.add_option( + "--filtersize", + dest="filter_length", + type="int", + help="Number of taps to use for filtering", + default=2048, + ) + parser.add_option( + "-j", + "--n-jobs", + dest="n_jobs", + type="int", + help="Number of jobs to run in parallel", + default=1, + ) + parser.add_option( + "--rej-grad", + dest="rej_grad", + type="float", + help="Gradiometers rejection parameter in fT/cm (peak to peak amplitude)", + default=2000, + ) + parser.add_option( + "--rej-mag", + dest="rej_mag", + type="float", + help="Magnetometers rejection parameter in fT (peak to peak amplitude)", + default=3000, + ) + parser.add_option( + "--rej-eeg", + dest="rej_eeg", + type="float", + help="EEG rejection parameter in µV (peak to peak amplitude)", + default=50, + ) + parser.add_option( + "--rej-eog", + dest="rej_eog", + type="float", + help="EOG rejection parameter in µV (peak to peak amplitude)", + default=1e9, + ) + parser.add_option( + "--avg-ref", + dest="avg_ref", + action="http://github.com/store_true", + help="Add EEG average reference proj", + default=False, + ) + parser.add_option( + "--no-proj", + dest="no_proj", + action="http://github.com/store_true", + help="Exclude the SSP projectors currently in the fiff file", + default=False, + ) + parser.add_option( + "--bad", + dest="bad_fname", + help="Text file containing bad channels list (one per line)", + default=None, + ) + parser.add_option( + "--event-id", + dest="event_id", + type="int", + help="ID to use for events", + default=998, + ) + parser.add_option( + "--event-raw", + dest="raw_event_fname", + help="raw file to use for event detection", + default=None, + ) + parser.add_option( + "--tstart", + dest="tstart", + type="float", + help="Start artifact detection after tstart seconds", + default=0.0, + ) + parser.add_option( + "-c", + "--channel", + dest="ch_name", + type="string", + help="Custom EOG channel(s), comma separated", + default=None, + ) options, args = parser.parse_args() @@ -127,10 +236,12 @@ def run(): preload = options.preload filter_length = options.filter_length n_jobs = options.n_jobs - reject = dict(grad=1e-13 * float(options.rej_grad), - mag=1e-15 * float(options.rej_mag), - eeg=1e-6 * float(options.rej_eeg), - eog=1e-6 * float(options.rej_eog)) + reject = dict( + grad=1e-13 * float(options.rej_grad), + mag=1e-15 * float(options.rej_mag), + eeg=1e-6 * float(options.rej_eeg), + eog=1e-6 * float(options.rej_eog), + ) avg_ref = options.avg_ref no_proj = options.no_proj bad_fname = options.bad_fname @@ -141,23 +252,23 @@ def run(): ch_name = options.ch_name if bad_fname is not None: - with open(bad_fname, 'r') as fid: + with open(bad_fname) as fid: bads = [w.rstrip() for w in fid.readlines()] - print('Bad channels read : %s' % bads) + print(f"Bad channels read : {bads}") else: bads = [] - if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'): + if raw_in.endswith("_raw.fif") or raw_in.endswith("-raw.fif"): prefix = raw_in[:-8] else: prefix = raw_in[:-4] - eog_event_fname = prefix + '_eog-eve.fif' + eog_event_fname = prefix + "_eog-eve.fif" if average: - eog_proj_fname = prefix + '_eog_avg-proj.fif' + eog_proj_fname = prefix + "_eog_avg-proj.fif" else: - eog_proj_fname = prefix + '_eog-proj.fif' + eog_proj_fname = prefix + "_eog-proj.fif" raw = mne.io.read_raw_fif(raw_in, preload=preload) @@ -168,13 +279,30 @@ def run(): flat = None projs, events = mne.preprocessing.compute_proj_eog( - raw=raw, raw_event=raw_event, tmin=tmin, tmax=tmax, n_grad=n_grad, - n_mag=n_mag, n_eeg=n_eeg, l_freq=l_freq, h_freq=h_freq, - average=average, filter_length=filter_length, - n_jobs=n_jobs, reject=reject, flat=flat, bads=bads, - avg_ref=avg_ref, no_proj=no_proj, event_id=event_id, - eog_l_freq=eog_l_freq, eog_h_freq=eog_h_freq, - tstart=tstart, ch_name=ch_name, copy=False) + raw=raw, + raw_event=raw_event, + tmin=tmin, + tmax=tmax, + n_grad=n_grad, + n_mag=n_mag, + n_eeg=n_eeg, + l_freq=l_freq, + h_freq=h_freq, + average=average, + filter_length=filter_length, + n_jobs=n_jobs, + reject=reject, + flat=flat, + bads=bads, + avg_ref=avg_ref, + no_proj=no_proj, + event_id=event_id, + eog_l_freq=eog_l_freq, + eog_h_freq=eog_h_freq, + tstart=tstart, + ch_name=ch_name, + copy=False, + ) raw.close() @@ -182,20 +310,20 @@ def run(): raw_event.close() if proj_fname is not None: - print('Including SSP projections from : %s' % proj_fname) + print(f"Including SSP projections from : {proj_fname}") # append the eog projs, so they are last in the list projs = mne.read_proj(proj_fname) + projs if isinstance(preload, str) and os.path.exists(preload): os.remove(preload) - print("Writing EOG projections in %s" % eog_proj_fname) + print(f"Writing EOG projections in {eog_proj_fname}") mne.write_proj(eog_proj_fname, projs) - print("Writing EOG events in %s" % eog_event_fname) + print(f"Writing EOG events in {eog_event_fname}") mne.write_events(eog_event_fname, events) -is_main = (__name__ == '__main__') +is_main = __name__ == "__main__" if is_main: run() diff --git a/mne/commands/mne_coreg.py b/mne/commands/mne_coreg.py index 0e25c1f44de..d288c7b53db 100644 --- a/mne/commands/mne_coreg.py +++ b/mne/commands/mne_coreg.py @@ -1,5 +1,6 @@ -#!/usr/bin/env python -# Authors: Christian Brodbeck +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. """Open the coregistration GUI. @@ -18,55 +19,70 @@ def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser parser = get_optparser(__file__) - parser.add_option("-d", "--subjects-dir", dest="subjects_dir", - default=None, help="Subjects directory") - parser.add_option("-s", "--subject", dest="subject", default=None, - help="Subject name") - parser.add_option("-f", "--fiff", dest="inst", default=None, - help="FIFF file with digitizer data for coregistration") - parser.add_option("-t", "--tabbed", dest="tabbed", action="http://github.com/store_true", - default=False, help="Option for small screens: Combine " - "the data source panel and the coregistration panel " - "into a single panel with tabs.") - parser.add_option("--no-guess-mri", dest="guess_mri_subject", - action='http://github.com/store_false', default=None, - help="Prevent the GUI from automatically guessing and " - "changing the MRI subject when a new head shape source " - "file is selected.") - parser.add_option("--head-opacity", type=float, default=None, - dest="head_opacity", - help="The opacity of the head surface, in the range " - "[0, 1].") - parser.add_option("--high-res-head", - action='http://github.com/store_true', default=False, dest="high_res_head", - help="Use a high-resolution head surface.") - parser.add_option("--low-res-head", - action='http://github.com/store_true', default=False, dest="low_res_head", - help="Use a low-resolution head surface.") - parser.add_option('--trans', dest='trans', default=None, - help='Head<->MRI transform FIF file ("-trans.fif")') - parser.add_option('--interaction', - type=str, default=None, dest='interaction', - help='Interaction style to use, can be "trackball" or ' - '"terrain".') - parser.add_option('--scale', - type=float, default=None, dest='scale', - help='Scale factor for the scene.') - parser.add_option('--simple-rendering', action='http://github.com/store_false', - dest='advanced_rendering', - help='Use simplified OpenGL rendering') + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + default=None, + help="Subjects directory", + ) + parser.add_option( + "-s", "--subject", dest="subject", default=None, help="Subject name" + ) + parser.add_option( + "-f", + "--fiff", + dest="inst", + default=None, + help="FIFF file with digitizer data for coregistration", + ) + parser.add_option( + "--head-opacity", + type=float, + default=None, + dest="head_opacity", + help="The opacity of the head surface, in the range [0, 1].", + ) + parser.add_option( + "--high-res-head", + action="http://github.com/store_true", + default=False, + dest="high_res_head", + help="Use a high-resolution head surface.", + ) + parser.add_option( + "--low-res-head", + action="http://github.com/store_true", + default=False, + dest="low_res_head", + help="Use a low-resolution head surface.", + ) + parser.add_option( + "--trans", + dest="trans", + default=None, + help='Head<->MRI transform FIF file ("-trans.fif")', + ) + parser.add_option( + "--interaction", + type=str, + default=None, + dest="interaction", + help='Interaction style to use, can be "trackball" or "terrain".', + ) _add_verbose_flag(parser) options, args = parser.parse_args() if options.low_res_head: if options.high_res_head: - raise ValueError("Can't specify --high-res-head and " - "--low-res-head at the same time.") + raise ValueError( + "Can't specify --high-res-head and --low-res-head at the same time." + ) head_high_res = False elif options.high_res_head: head_high_res = True @@ -81,18 +97,20 @@ def run(): if trans is not None: trans = op.expanduser(trans) import faulthandler + faulthandler.enable() mne.gui.coregistration( - options.tabbed, inst=options.inst, subject=options.subject, + inst=options.inst, + subject=options.subject, subjects_dir=subjects_dir, - guess_mri_subject=options.guess_mri_subject, - head_opacity=options.head_opacity, head_high_res=head_high_res, - trans=trans, scrollable=True, + head_opacity=options.head_opacity, + head_high_res=head_high_res, + trans=trans, interaction=options.interaction, - scale=options.scale, - advanced_rendering=options.advanced_rendering, - show=True, block=True, - verbose=options.verbose) + show=True, + block=True, + verbose=options.verbose, + ) mne.utils.run_command_if_main() diff --git a/mne/commands/mne_flash_bem.py b/mne/commands/mne_flash_bem.py index 3556b58a78d..63bcb79d9d8 100644 --- a/mne/commands/mne_flash_bem.py +++ b/mne/commands/mne_flash_bem.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Create 3-layer BEM model from Flash MRI images. Examples @@ -22,8 +21,10 @@ should be, as usual, in the subject's mri directory. """ # noqa E501 -# Authors: Lorenzo De Santis -# Alexandre Gramfort + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import mne from mne.bem import convert_flash_mris, make_flash_bem @@ -31,6 +32,7 @@ def _vararg_callback(option, opt_str, value, parser): assert value is None + del opt_str # required for input but not used value = [] for arg in parser.rargs: @@ -41,7 +43,7 @@ def _vararg_callback(option, opt_str, value, parser): break value.append(arg) - del parser.rargs[:len(value)] + del parser.rargs[: len(value)] setattr(parser.values, option.dest, value) @@ -51,45 +53,81 @@ def run(): parser = get_optparser(__file__) - parser.add_option("-s", "--subject", dest="subject", - help="Subject name", default=None) - parser.add_option("-d", "--subjects-dir", dest="subjects_dir", - help="Subjects directory", default=None) - parser.add_option("-3", "--flash30", "--noflash30", dest="flash30", - action="http://github.com/callback", callback=_vararg_callback, - help=("The 30-degree flip angle data. If no argument do " - "not use flash30. If arguments are given, them as " - "file names.")) - parser.add_option("-5", "--flash5", dest="flash5", - action="http://github.com/callback", callback=_vararg_callback, - help=("Path to the multiecho flash 5 images. " - "Can be one file or one per echo."),) - parser.add_option("-r", "--registered", dest="registered", - action="http://github.com/store_true", default=False, - help=("Set if the Flash MRI images have already " - "been registered with the T1.mgz file.")) - parser.add_option("-n", "--noconvert", dest="noconvert", - action="http://github.com/store_true", default=False, - help=("[DEPRECATED] Assume that the Flash MRI images " - "have already been converted to mgz files")) - parser.add_option("-u", "--unwarp", dest="unwarp", - action="http://github.com/store_true", default=False, - help=("Run grad_unwarp with -unwarp " - "option on each of the converted data sets")) - parser.add_option("-o", "--overwrite", dest="overwrite", - action="http://github.com/store_true", default=False, - help="Write over existing .surf files in bem folder") - parser.add_option("-v", "--view", dest="show", action="http://github.com/store_true", - help="Show BEM model in 3D for visual inspection", - default=False) - parser.add_option("--copy", dest="copy", - help="Use copies instead of symlinks for surfaces", - action="http://github.com/store_true") - parser.add_option("-p", "--flash-path", dest="flash_path", - default=None, - help="[DEPRECATED] The directory containing flash5.mgz " - "files (defaults to " - "$SUBJECTS_DIR/$SUBJECT/mri/flash/parameter_maps") + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name", default=None + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) + parser.add_option( + "-3", + "--flash30", + "--noflash30", + dest="flash30", + action="http://github.com/callback", + callback=_vararg_callback, + help=( + "The 30-degree flip angle data. If no argument do " + "not use flash30. If arguments are given, them as " + "file names." + ), + ) + parser.add_option( + "-5", + "--flash5", + dest="flash5", + action="http://github.com/callback", + callback=_vararg_callback, + help=("Path to the multiecho flash 5 images. Can be one file or one per echo."), + ) + parser.add_option( + "-r", + "--registered", + dest="registered", + action="http://github.com/store_true", + default=False, + help=( + "Set if the Flash MRI images have already " + "been registered with the T1.mgz file." + ), + ) + parser.add_option( + "-u", + "--unwarp", + dest="unwarp", + action="http://github.com/store_true", + default=False, + help=( + "Run grad_unwarp with -unwarp " + "option on each of the converted data sets" + ), + ) + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + action="http://github.com/store_true", + default=False, + help="Write over existing .surf files in bem folder", + ) + parser.add_option( + "-v", + "--view", + dest="show", + action="http://github.com/store_true", + help="Show BEM model in 3D for visual inspection", + default=False, + ) + parser.add_option( + "--copy", + dest="copy", + help="Use copies instead of symlinks for surfaces", + action="http://github.com/store_true", + ) options, _ = parser.parse_args() @@ -111,15 +149,26 @@ def run(): if options.subject is None: parser.print_help() - raise RuntimeError('The subject argument must be set') + raise RuntimeError("The subject argument must be set") flash5_img = convert_flash_mris( - subject=subject, subjects_dir=subjects_dir, flash5=flash5, - flash30=flash30, unwarp=unwarp, verbose=True + subject=subject, + subjects_dir=subjects_dir, + flash5=flash5, + flash30=flash30, + unwarp=unwarp, + verbose=True, + ) + make_flash_bem( + subject=subject, + subjects_dir=subjects_dir, + overwrite=overwrite, + show=show, + copy=copy, + register=register, + flash5_img=flash5_img, + verbose=True, ) - make_flash_bem(subject=subject, subjects_dir=subjects_dir, - overwrite=overwrite, show=show, copy=copy, - register=register, flash5_img=flash5_img, verbose=True) mne.utils.run_command_if_main() diff --git a/mne/commands/mne_freeview_bem_surfaces.py b/mne/commands/mne_freeview_bem_surfaces.py index f5a65d9fb79..4edf1465a8a 100644 --- a/mne/commands/mne_freeview_bem_surfaces.py +++ b/mne/commands/mne_freeview_bem_surfaces.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """View the 3-Layers BEM model using Freeview. Examples @@ -8,17 +7,20 @@ $ mne freeview_bem_surfaces -s sample """ -# Authors: Alexandre Gramfort -import sys +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import os import os.path as op +import sys import mne -from mne.utils import run_subprocess, get_subjects_dir +from mne.utils import get_subjects_dir, run_subprocess -def freeview_bem_surfaces(subject, subjects_dir, method): +def freeview_bem_surfaces(subject, subjects_dir, method=None): """View 3-Layers BEM model with Freeview. Parameters @@ -27,8 +29,9 @@ def freeview_bem_surfaces(subject, subjects_dir, method): Subject name subjects_dir : path-like Directory containing subjects data (Freesurfer SUBJECTS_DIR) - method : str - Can be ``'flash'`` or ``'watershed'``. + method : str | None + Can be ``'flash'`` or ``'watershed'``, or None to use the ``bem/`` directory + files. """ subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) @@ -38,39 +41,39 @@ def freeview_bem_surfaces(subject, subjects_dir, method): subject_dir = op.join(subjects_dir, subject) if not op.isdir(subject_dir): - raise ValueError("Wrong path: '{}'. Check subjects-dir or" - "subject argument.".format(subject_dir)) + raise ValueError( + f"Wrong path: '{subject_dir}'. Check subjects-dir or subject argument." + ) env = os.environ.copy() - env['SUBJECT'] = subject - env['SUBJECTS_DIR'] = subjects_dir + env["SUBJECT"] = subject + env["SUBJECTS_DIR"] = subjects_dir - if 'FREESURFER_HOME' not in env: - raise RuntimeError('The FreeSurfer environment needs to be set up.') + if "FREESURFER_HOME" not in env: + raise RuntimeError("The FreeSurfer environment needs to be set up.") - mri_dir = op.join(subject_dir, 'mri') - bem_dir = op.join(subject_dir, 'bem') - mri = op.join(mri_dir, 'T1.mgz') + mri_dir = op.join(subject_dir, "mri") + bem_dir = op.join(subject_dir, "bem") + mri = op.join(mri_dir, "T1.mgz") - if method == 'watershed': - bem_dir = op.join(bem_dir, 'watershed') - outer_skin = op.join(bem_dir, '%s_outer_skin_surface' % subject) - outer_skull = op.join(bem_dir, '%s_outer_skull_surface' % subject) - inner_skull = op.join(bem_dir, '%s_inner_skull_surface' % subject) + if method == "watershed": + bem_dir = op.join(bem_dir, "watershed") + outer_skin = op.join(bem_dir, f"{subject}_outer_skin_surface") + outer_skull = op.join(bem_dir, f"{subject}_outer_skull_surface") + inner_skull = op.join(bem_dir, f"{subject}_inner_skull_surface") else: - if method == 'flash': - bem_dir = op.join(bem_dir, 'flash') - outer_skin = op.join(bem_dir, 'outer_skin.surf') - outer_skull = op.join(bem_dir, 'outer_skull.surf') - inner_skull = op.join(bem_dir, 'inner_skull.surf') + if method == "flash": + bem_dir = op.join(bem_dir, "flash") + outer_skin = op.join(bem_dir, "outer_skin.surf") + outer_skull = op.join(bem_dir, "outer_skull.surf") + inner_skull = op.join(bem_dir, "inner_skull.surf") # put together the command - cmd = ['freeview'] + cmd = ["freeview"] cmd += ["--volume", mri] - cmd += ["--surface", "%s:color=red:edgecolor=red" % inner_skull] - cmd += ["--surface", "%s:color=yellow:edgecolor=yellow" % outer_skull] - cmd += ["--surface", - "%s:color=255,170,127:edgecolor=255,170,127" % outer_skin] + cmd += ["--surface", f"{inner_skull}:color=red:edgecolor=red"] + cmd += ["--surface", f"{outer_skull}:color=yellow:edgecolor=yellow"] + cmd += ["--surface", f"{outer_skin}:color=255,170,127:edgecolor=255,170,127"] run_subprocess(cmd, env=env, stdout=sys.stdout) print("[done]") @@ -82,18 +85,22 @@ def run(): parser = get_optparser(__file__) - subject = os.environ.get('SUBJECT') - subjects_dir = get_subjects_dir() - if subjects_dir is not None: - subjects_dir = str(subjects_dir) - - parser.add_option("-s", "--subject", dest="subject", - help="Subject name", default=subject) - parser.add_option("-d", "--subjects-dir", dest="subjects_dir", - help="Subjects directory", default=subjects_dir) - parser.add_option("-m", "--method", dest="method", - help=("Method used to generate the BEM model. " - "Can be flash or watershed.")) + subject = os.environ.get("SUBJECT") + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name", default=subject + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + ) + parser.add_option( + "-m", + "--method", + dest="method", + help="Method used to generate the BEM model. Can be flash or watershed.", + ) options, args = parser.parse_args() diff --git a/mne/commands/mne_kit2fiff.py b/mne/commands/mne_kit2fiff.py index 1317a154c8c..a6874fecf05 100644 --- a/mne/commands/mne_kit2fiff.py +++ b/mne/commands/mne_kit2fiff.py @@ -1,5 +1,6 @@ -#!/usr/bin/env python -# Authors: Teon Brooks +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. """Import KIT / NYU data to fif file. @@ -29,33 +30,50 @@ def run(): parser = get_optparser(__file__) - parser.add_option('--input', dest='input_fname', - help='Input data file name', metavar='filename') - parser.add_option('--mrk', dest='mrk_fname', - help='MEG Marker file name', metavar='filename') - parser.add_option('--elp', dest='elp_fname', - help='Headshape points file name', metavar='filename') - parser.add_option('--hsp', dest='hsp_fname', - help='Headshape file name', metavar='filename') - parser.add_option('--stim', dest='stim', - help='Colon Separated Stimulus Trigger Channels', - metavar='chs') - parser.add_option('--slope', dest='slope', help='Slope direction', - metavar='slope') - parser.add_option('--stimthresh', dest='stimthresh', default=1, - help='Threshold value for trigger channels', - metavar='value') - parser.add_option('--output', dest='out_fname', - help='Name of the resulting fiff file', - metavar='filename') - parser.add_option('--debug', dest='debug', action='http://github.com/store_true', - default=False, - help='Set logging level for terminal output to debug') + parser.add_option( + "--input", dest="input_fname", help="Input data file name", metavar="filename" + ) + parser.add_option( + "--mrk", dest="mrk_fname", help="MEG Marker file name", metavar="filename" + ) + parser.add_option( + "--elp", dest="elp_fname", help="Headshape points file name", metavar="filename" + ) + parser.add_option( + "--hsp", dest="hsp_fname", help="Headshape file name", metavar="filename" + ) + parser.add_option( + "--stim", + dest="stim", + help="Colon Separated Stimulus Trigger Channels", + metavar="chs", + ) + parser.add_option("--slope", dest="slope", help="Slope direction", metavar="slope") + parser.add_option( + "--stimthresh", + dest="stimthresh", + default=1, + help="Threshold value for trigger channels", + metavar="value", + ) + parser.add_option( + "--output", + dest="out_fname", + help="Name of the resulting fiff file", + metavar="filename", + ) + parser.add_option( + "--debug", + dest="debug", + action="http://github.com/store_true", + default=False, + help="Set logging level for terminal output to debug", + ) options, args = parser.parse_args() if options.debug: - mne.set_log_level('debug') + mne.set_log_level("debug") input_fname = options.input_fname if input_fname is None: @@ -63,8 +81,8 @@ def run(): from mne_kit_gui import kit2fiff # noqa except ImportError: raise ImportError( - 'The mne-kit-gui package is required, install it using ' - 'conda or pip') from None + "The mne-kit-gui package is required, install it using conda or pip" + ) from None kit2fiff() sys.exit(0) @@ -77,11 +95,17 @@ def run(): out_fname = options.out_fname if isinstance(stim, str): - stim = map(int, stim.split(':')) - - raw = read_raw_kit(input_fname=input_fname, mrk=mrk_fname, elp=elp_fname, - hsp=hsp_fname, stim=stim, slope=slope, - stimthresh=stimthresh) + stim = map(int, stim.split(":")) + + raw = read_raw_kit( + input_fname=input_fname, + mrk=mrk_fname, + elp=elp_fname, + hsp=hsp_fname, + stim=stim, + slope=slope, + stimthresh=stimthresh, + ) raw.save(out_fname) raw.close() diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py index 9da7941384c..894ede7fa1a 100644 --- a/mne/commands/mne_make_scalp_surfaces.py +++ b/mne/commands/mne_make_scalp_surfaces.py @@ -1,10 +1,6 @@ -#!/usr/bin/env python - -# Authors: Denis A. Engemann -# Alexandre Gramfort -# Matti Hämäläinen -# -# simplified bsd-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. """Create high-resolution head surfaces for coordinate alignment. @@ -15,6 +11,7 @@ $ mne make_scalp_surfaces --overwrite --subject sample """ + import os import sys @@ -24,32 +21,62 @@ def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser parser = get_optparser(__file__) - subjects_dir = mne.get_config('SUBJECTS_DIR') + subjects_dir = mne.get_config("SUBJECTS_DIR") - parser.add_option('-o', '--overwrite', dest='overwrite', - action='http://github.com/store_true', - help='Overwrite previously computed surface') - parser.add_option('-s', '--subject', dest='subject', - help='The name of the subject', type='str') - parser.add_option('-m', '--mri', dest='mri', type='str', default='T1.mgz', - help='The MRI file to process using mkheadsurf.') - parser.add_option('-f', '--force', dest='force', action='http://github.com/store_true', - help='Force creation of the surface even if it has ' - 'some topological defects.') - parser.add_option('-t', '--threshold', dest='threshold', type='int', - default=20, help='Threshold value to use with the MRI.') - parser.add_option("-d", "--subjects-dir", dest="subjects_dir", - help="Subjects directory", default=subjects_dir) - parser.add_option("-n", "--no-decimate", dest="no_decimate", - help="Disable medium and sparse decimations " - "(dense only)", action='http://github.com/store_true') + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + action="http://github.com/store_true", + help="Overwrite previously computed surface", + ) + parser.add_option( + "-s", "--subject", dest="subject", help="The name of the subject", type="str" + ) + parser.add_option( + "-m", + "--mri", + dest="mri", + type="str", + default="T1.mgz", + help="The MRI file to process using mkheadsurf.", + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="http://github.com/store_true", + help="Force creation of the surface even if it has some topological defects.", + ) + parser.add_option( + "-t", + "--threshold", + dest="threshold", + type="int", + default=20, + help="Threshold value to use with the MRI.", + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=subjects_dir, + ) + parser.add_option( + "-n", + "--no-decimate", + dest="no_decimate", + help="Disable medium and sparse decimations (dense only)", + action="http://github.com/store_true", + ) _add_verbose_flag(parser) options, args = parser.parse_args() - subject = vars(options).get('subject', os.getenv('SUBJECT')) + subject = vars(options).get("subject", os.getenv("SUBJECT")) subjects_dir = options.subjects_dir if subject is None or subjects_dir is None: parser.print_help() @@ -62,7 +89,8 @@ def run(): no_decimate=options.no_decimate, threshold=options.threshold, mri=options.mri, - verbose=options.verbose) + verbose=options.verbose, + ) mne.utils.run_command_if_main() diff --git a/mne/commands/mne_maxfilter.py b/mne/commands/mne_maxfilter.py deleted file mode 100644 index 4825b4d5553..00000000000 --- a/mne/commands/mne_maxfilter.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env python -"""Apply MaxFilter. - -Examples --------- -.. code-block:: console - - $ mne maxfilter -i sample_audvis_raw.fif --st - -This will apply MaxFilter with the MaxSt extension. The origin used -by MaxFilter is computed by mne-python by fitting a sphere to the -headshape points. -""" - -# Authors : Martin Luessi - -import sys -import os -import mne - - -def run(): - """Run command.""" - from mne.commands.utils import get_optparser - - parser = get_optparser(__file__) - - parser.add_option("-i", "--in", dest="in_fname", - help="Input raw FIF file", metavar="FILE") - parser.add_option("-o", dest="out_fname", - help="Output FIF file (if not set, suffix '_sss' will " - "be used)", metavar="FILE", default=None) - parser.add_option("--origin", dest="origin", - help="Head origin in mm, or a filename to read the " - "origin from. If not set it will be estimated from " - "headshape points", default=None) - parser.add_option("--origin-out", dest="origin_out", - help="Filename to use for computed origin", default=None) - parser.add_option("--frame", dest="frame", type="string", - help="Coordinate frame for head center ('device' or " - "'head')", default="device") - parser.add_option("--bad", dest="bad", type="string", - help="List of static bad channels", - default=None) - parser.add_option("--autobad", dest="autobad", type="string", - help="Set automated bad channel detection ('on', 'off', " - "'n')", default="off") - parser.add_option("--skip", dest="skip", - help="Skips raw data sequences, time intervals pairs in " - "s, e.g.: 0 30 120 150", default=None) - parser.add_option("--force", dest="force", action="http://github.com/store_true", - help="Ignore program warnings", - default=False) - parser.add_option("--st", dest="st", action="http://github.com/store_true", - help="Apply the time-domain MaxST extension", - default=False) - parser.add_option("--buflen", dest="st_buflen", type="float", - help="MaxSt buffer length in s", - default=16.0) - parser.add_option("--corr", dest="st_corr", type="float", - help="MaxSt subspace correlation", - default=0.96) - parser.add_option("--trans", dest="mv_trans", - help="Transforms the data into the coil definitions of " - "in_fname, or into the default frame", default=None) - parser.add_option("--movecomp", dest="mv_comp", action="http://github.com/store_true", - help="Estimates and compensates head movements in " - "continuous raw data", default=False) - parser.add_option("--headpos", dest="mv_headpos", action="http://github.com/store_true", - help="Estimates and stores head position parameters, " - "but does not compensate movements", default=False) - parser.add_option("--hp", dest="mv_hp", type="string", - help="Stores head position data in an ascii file", - default=None) - parser.add_option("--hpistep", dest="mv_hpistep", type="float", - help="Sets head position update interval in ms", - default=None) - parser.add_option("--hpisubt", dest="mv_hpisubt", type="string", - help="Subtracts hpi signals: sine amplitudes, amp + " - "baseline, or switch off", default=None) - parser.add_option("--nohpicons", dest="mv_hpicons", action="http://github.com/store_false", - help="Do not check initial consistency isotrak vs " - "hpifit", default=True) - parser.add_option("--linefreq", dest="linefreq", type="float", - help="Sets the basic line interference frequency (50 or " - "60 Hz)", default=None) - parser.add_option("--nooverwrite", dest="overwrite", action="http://github.com/store_false", - help="Do not overwrite output file if it already exists", - default=True) - parser.add_option("--args", dest="mx_args", type="string", - help="Additional command line arguments to pass to " - "MaxFilter", default="") - - options, args = parser.parse_args() - - in_fname = options.in_fname - - if in_fname is None: - parser.print_help() - sys.exit(1) - - out_fname = options.out_fname - origin = options.origin - origin_out = options.origin_out - frame = options.frame - bad = options.bad - autobad = options.autobad - skip = options.skip - force = options.force - st = options.st - st_buflen = options.st_buflen - st_corr = options.st_corr - mv_trans = options.mv_trans - mv_comp = options.mv_comp - mv_headpos = options.mv_headpos - mv_hp = options.mv_hp - mv_hpistep = options.mv_hpistep - mv_hpisubt = options.mv_hpisubt - mv_hpicons = options.mv_hpicons - linefreq = options.linefreq - overwrite = options.overwrite - mx_args = options.mx_args - - if in_fname.endswith('_raw.fif') or in_fname.endswith('-raw.fif'): - prefix = in_fname[:-8] - else: - prefix = in_fname[:-4] - - if out_fname is None: - if st: - out_fname = prefix + '_tsss.fif' - else: - out_fname = prefix + '_sss.fif' - - if origin is not None and os.path.exists(origin): - with open(origin, 'r') as fid: - origin = fid.readlines()[0].strip() - - origin = mne.preprocessing.apply_maxfilter( - in_fname, out_fname, origin, frame, - bad, autobad, skip, force, st, st_buflen, st_corr, mv_trans, - mv_comp, mv_headpos, mv_hp, mv_hpistep, mv_hpisubt, mv_hpicons, - linefreq, mx_args, overwrite) - - if origin_out is not None: - with open(origin_out, 'w') as fid: - fid.write(origin + '\n') - - -mne.utils.run_command_if_main() diff --git a/mne/commands/mne_prepare_bem_model.py b/mne/commands/mne_prepare_bem_model.py index da308bb737e..3a830e5ec27 100644 --- a/mne/commands/mne_prepare_bem_model.py +++ b/mne/commands/mne_prepare_bem_model.py @@ -1,4 +1,7 @@ -#!/usr/bin/env python +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Create a BEM solution using the linear collocation approach. Examples @@ -9,29 +12,37 @@ """ -import sys import os +import sys + import mne def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser parser = get_optparser(__file__) - parser.add_option('--bem', dest='bem_fname', - help='The name of the file containing the ' - 'triangulations of the BEM surfaces and the ' - 'conductivities of the compartments. The standard ' - 'ending for this file is -bem.fif.', - metavar="FILE") - parser.add_option('--sol', dest='bem_sol_fname', - help='The name of the resulting file containing BEM ' - 'solution (geometry matrix). It uses the linear ' - 'collocation approach. The file should end with ' - '-bem-sof.fif.', - metavar='FILE', default=None) + parser.add_option( + "--bem", + dest="bem_fname", + help="The name of the file containing the " + "triangulations of the BEM surfaces and the " + "conductivities of the compartments. The standard " + "ending for this file is -bem.fif.", + metavar="FILE", + ) + parser.add_option( + "--sol", + dest="bem_sol_fname", + help="The name of the resulting file containing BEM " + "solution (geometry matrix). It uses the linear " + "collocation approach. The file should end with " + "-bem-sof.fif.", + metavar="FILE", + default=None, + ) _add_verbose_flag(parser) options, args = parser.parse_args() @@ -45,10 +56,9 @@ def run(): if bem_sol_fname is None: base, _ = os.path.splitext(bem_fname) - bem_sol_fname = base + '-sol.fif' + bem_sol_fname = base + "-sol.fif" - bem_model = mne.read_bem_surfaces(bem_fname, patch_stats=False, - verbose=verbose) + bem_model = mne.read_bem_surfaces(bem_fname, patch_stats=False, verbose=verbose) bem_solution = mne.make_bem_solution(bem_model, verbose=verbose) mne.write_bem_solution(bem_sol_fname, bem_solution) diff --git a/mne/commands/mne_report.py b/mne/commands/mne_report.py index 2d96570f26f..eec37c445c7 100644 --- a/mne/commands/mne_report.py +++ b/mne/commands/mne_report.py @@ -1,4 +1,7 @@ -#!/usr/bin/env python +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + r"""Create mne report for a folder. Examples @@ -72,51 +75,87 @@ import mne from mne.report import Report -from mne.utils import verbose, logger +from mne.utils import logger, verbose @verbose def log_elapsed(t, verbose=None): """Log elapsed time.""" - logger.info('Report complete in %s seconds' % round(t, 1)) + logger.info(f"Report complete in {round(t, 1)} seconds") def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser parser = get_optparser(__file__) - parser.add_option("-p", "--path", dest="path", - help="Path to folder who MNE-Report must be created") - parser.add_option("-i", "--info", dest="info_fname", - help="File from which info dictionary is to be read", - metavar="FILE") - parser.add_option("-c", "--cov", dest="cov_fname", - help="File from which noise covariance is to be read", - metavar="FILE") - parser.add_option("--bmin", dest="bmin", - help="Time at which baseline correction starts for " - "evokeds", default=None) - parser.add_option("--bmax", dest="bmax", - help="Time at which baseline correction stops for " - "evokeds", default=None) - parser.add_option("-d", "--subjects-dir", dest="subjects_dir", - help="The subjects directory") - parser.add_option("-s", "--subject", dest="subject", - help="The subject name") - parser.add_option("--no-browser", dest="no_browser", action='http://github.com/store_false', - help="Do not open MNE-Report in browser") - parser.add_option("--overwrite", dest="overwrite", action='http://github.com/store_false', - help="Overwrite html report if it already exists") - parser.add_option("-j", "--jobs", dest="n_jobs", help="Number of jobs to" - " run in parallel") - parser.add_option("-m", "--mri-decim", type="int", dest="mri_decim", - default=2, help="Integer factor used to decimate " - "BEM plots") - parser.add_option("--image-format", type="str", dest="image_format", - default='png', help="Image format to use " - "(can be 'png' or 'svg')") + parser.add_option( + "-p", + "--path", + dest="path", + help="Path to folder who MNE-Report must be created", + ) + parser.add_option( + "-i", + "--info", + dest="info_fname", + help="File from which info dictionary is to be read", + metavar="FILE", + ) + parser.add_option( + "-c", + "--cov", + dest="cov_fname", + help="File from which noise covariance is to be read", + metavar="FILE", + ) + parser.add_option( + "--bmin", + dest="bmin", + help="Time at which baseline correction starts for evokeds", + default=None, + ) + parser.add_option( + "--bmax", + dest="bmax", + help="Time at which baseline correction stops for evokeds", + default=None, + ) + parser.add_option( + "-d", "--subjects-dir", dest="subjects_dir", help="The subjects directory" + ) + parser.add_option("-s", "--subject", dest="subject", help="The subject name") + parser.add_option( + "--no-browser", + dest="no_browser", + action="http://github.com/store_false", + help="Do not open MNE-Report in browser", + ) + parser.add_option( + "--overwrite", + dest="overwrite", + action="http://github.com/store_false", + help="Overwrite html report if it already exists", + ) + parser.add_option( + "-j", "--jobs", dest="n_jobs", help="Number of jobs to run in parallel" + ) + parser.add_option( + "-m", + "--mri-decim", + type="int", + dest="mri_decim", + default=2, + help="Integer factor used to decimate BEM plots", + ) + parser.add_option( + "--image-format", + type="str", + dest="image_format", + default="png", + help="Image format to use (can be 'png' or 'svg')", + ) _add_verbose_flag(parser) options, args = parser.parse_args() @@ -144,12 +183,16 @@ def run(): baseline = (bmin, bmax) t0 = time.time() - report = Report(info_fname, subjects_dir=subjects_dir, - subject=subject, baseline=baseline, - cov_fname=cov_fname, verbose=verbose, - image_format=image_format) - report.parse_folder(path, verbose=verbose, n_jobs=n_jobs, - mri_decim=mri_decim) + report = Report( + info_fname, + subjects_dir=subjects_dir, + subject=subject, + baseline=baseline, + cov_fname=cov_fname, + verbose=verbose, + image_format=image_format, + ) + report.parse_folder(path, verbose=verbose, n_jobs=n_jobs, mri_decim=mri_decim) log_elapsed(time.time() - t0, verbose=verbose) report.save(open_browser=open_browser, overwrite=overwrite) diff --git a/mne/commands/mne_setup_forward_model.py b/mne/commands/mne_setup_forward_model.py index 239decefbfe..29e8616f571 100644 --- a/mne/commands/mne_setup_forward_model.py +++ b/mne/commands/mne_setup_forward_model.py @@ -1,4 +1,7 @@ -#!/usr/bin/env python +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Create a BEM model for a subject. Examples @@ -9,63 +12,79 @@ """ -import sys import os +import sys + import mne from mne.utils import get_subjects_dir, warn def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser parser = get_optparser(__file__) - parser.add_option("-s", "--subject", - dest="subject", - help="Subject name (required)", - default=None) - parser.add_option("--model", - dest="model", - help="Output file name. Use a name /-bem.fif", - default=None, - type='string') - parser.add_option('--ico', - dest='ico', - help='The surface ico downsampling to use, e.g. ' - ' 5=20484, 4=5120, 3=1280. If None, no subsampling' - ' is applied.', - default=None, - type='int') - parser.add_option('--brainc', - dest='brainc', - help='Defines the brain compartment conductivity. ' - 'The default value is 0.3 S/m.', - default=0.3, - type='float') - parser.add_option('--skullc', - dest='skullc', - help='Defines the skull compartment conductivity. ' - 'The default value is 0.006 S/m.', - default=None, - type='float') - parser.add_option('--scalpc', - dest='scalpc', - help='Defines the scalp compartment conductivity. ' - 'The default value is 0.3 S/m.', - default=None, - type='float') - parser.add_option('--homog', - dest='homog', - help='Use a single compartment model (brain only) ' - 'instead a three layer one (scalp, skull, and ' - ' brain). If this flag is specified, the options ' - '--skullc and --scalpc are irrelevant.', - default=None, action="http://github.com/store_true") - parser.add_option('-d', '--subjects-dir', - dest='subjects_dir', - help='Subjects directory', - default=None) + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name (required)", default=None + ) + parser.add_option( + "--model", + dest="model", + help="Output file name. Use a name /-bem.fif", + default=None, + type="string", + ) + parser.add_option( + "--ico", + dest="ico", + help="The surface ico downsampling to use, e.g. " + " 5=20484, 4=5120, 3=1280. If None, no subsampling" + " is applied.", + default=None, + type="int", + ) + parser.add_option( + "--brainc", + dest="brainc", + help="Defines the brain compartment conductivity. " + "The default value is 0.3 S/m.", + default=0.3, + type="float", + ) + parser.add_option( + "--skullc", + dest="skullc", + help="Defines the skull compartment conductivity. " + "The default value is 0.006 S/m.", + default=None, + type="float", + ) + parser.add_option( + "--scalpc", + dest="scalpc", + help="Defines the scalp compartment conductivity. " + "The default value is 0.3 S/m.", + default=None, + type="float", + ) + parser.add_option( + "--homog", + dest="homog", + help="Use a single compartment model (brain only) " + "instead a three layer one (scalp, skull, and " + " brain). If this flag is specified, the options " + "--skullc and --scalpc are irrelevant.", + default=None, + action="http://github.com/store_true", + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) _add_verbose_flag(parser) options, args = parser.parse_args() @@ -85,11 +104,15 @@ def run(): # Parse conductivity option if homog is True: if skullc is not None: - warn('Trying to set the skull conductivity for a single layer ' - 'model. To use a 3 layer model, do not set the --homog flag.') + warn( + "Trying to set the skull conductivity for a single layer " + "model. To use a 3 layer model, do not set the --homog flag." + ) if scalpc is not None: - warn('Trying to set the scalp conductivity for a single layer ' - 'model. To use a 3 layer model, do not set the --homog flag.') + warn( + "Trying to set the scalp conductivity for a single layer " + "model. To use a 3 layer model, do not set the --homog flag." + ) # Single layer conductivity = [brainc] else: @@ -99,17 +122,19 @@ def run(): scalpc = 0.3 conductivity = [brainc, skullc, scalpc] # Create source space - bem_model = mne.make_bem_model(subject, - ico=ico, - conductivity=conductivity, - subjects_dir=subjects_dir, - verbose=verbose) + bem_model = mne.make_bem_model( + subject, + ico=ico, + conductivity=conductivity, + subjects_dir=subjects_dir, + verbose=verbose, + ) # Generate filename if fname is None: - n_faces = list(str(len(surface['tris'])) for surface in bem_model) - fname = subject + '-' + '-'.join(n_faces) + '-bem.fif' + n_faces = list(str(len(surface["tris"])) for surface in bem_model) + fname = subject + "-" + "-".join(n_faces) + "-bem.fif" else: - if not (fname.endswith('-bem.fif') or fname.endswith('_bem.fif')): + if not (fname.endswith("-bem.fif") or fname.endswith("_bem.fif")): fname = fname + "-bem.fif" # Save to subject's directory subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) diff --git a/mne/commands/mne_setup_source_space.py b/mne/commands/mne_setup_source_space.py index e8b14b78db3..273e833b31c 100644 --- a/mne/commands/mne_setup_source_space.py +++ b/mne/commands/mne_setup_source_space.py @@ -1,4 +1,7 @@ -#!/usr/bin/env python +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Set up bilateral hemisphere surface-based source space with subsampling. Examples @@ -21,65 +24,88 @@ def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser + parser = get_optparser(__file__) - parser.add_option('-s', '--subject', - dest='subject', - help='Subject name (required)', - default=None) - parser.add_option('--src', dest='fname', - help='Output file name. Use a name /-src.fif', - metavar='FILE', default=None) - parser.add_option('--morph', - dest='subject_to', - help='morph the source space to this subject', - default=None) - parser.add_option('--surf', - dest='surface', - help='The surface to use. (default to white)', - default='white', - type='string') - parser.add_option('--spacing', - dest='spacing', - help='Specifies the approximate grid spacing of the ' - 'source space in mm. (default to 7mm)', - default=None, - type='int') - parser.add_option('--ico', - dest='ico', - help='use the recursively subdivided icosahedron ' - 'to create the source space.', - default=None, - type='int') - parser.add_option('--oct', - dest='oct', - help='use the recursively subdivided octahedron ' - 'to create the source space.', - default=None, - type='int') - parser.add_option('-d', '--subjects-dir', - dest='subjects_dir', - help='Subjects directory', - default=None) - parser.add_option('-n', '--n-jobs', - dest='n_jobs', - help='The number of jobs to run in parallel ' - '(default 1). Requires the joblib package. ' - 'Will use at most 2 jobs' - ' (one for each hemisphere).', - default=1, - type='int') - parser.add_option('--add-dist', - dest='add_dist', - help='Add distances. Can be "True", "False", or "patch" ' - 'to only compute cortical patch statistics (like the ' - '--cps option in MNE-C; requires SciPy >= 1.3)', - default='True') - parser.add_option('-o', '--overwrite', - dest='overwrite', - help='to write over existing files', - default=None, action="http://github.com/store_true") + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name (required)", default=None + ) + parser.add_option( + "--src", + dest="fname", + help="Output file name. Use a name /-src.fif", + metavar="FILE", + default=None, + ) + parser.add_option( + "--morph", + dest="subject_to", + help="morph the source space to this subject", + default=None, + ) + parser.add_option( + "--surf", + dest="surface", + help="The surface to use. (default to white)", + default="white", + type="string", + ) + parser.add_option( + "--spacing", + dest="spacing", + help="Specifies the approximate grid spacing of the " + "source space in mm. (default to 7mm)", + default=None, + type="int", + ) + parser.add_option( + "--ico", + dest="ico", + help="use the recursively subdivided icosahedron to create the source space.", + default=None, + type="int", + ) + parser.add_option( + "--oct", + dest="oct", + help="use the recursively subdivided octahedron to create the source space.", + default=None, + type="int", + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) + parser.add_option( + "-n", + "--n-jobs", + dest="n_jobs", + help="The number of jobs to run in parallel " + "(default 1). Requires the joblib package. " + "Will use at most 2 jobs" + " (one for each hemisphere).", + default=1, + type="int", + ) + parser.add_option( + "--add-dist", + dest="add_dist", + help='Add distances. Can be "True", "False", or "patch" ' + "to only compute cortical patch statistics (like the --cps option in MNE-C)", + default="True", + ) + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + help="to write over existing files", + default=None, + action="http://github.com/store_true", + ) _add_verbose_flag(parser) options, args = parser.parse_args() @@ -94,50 +120,60 @@ def run(): subjects_dir = options.subjects_dir spacing = options.spacing ico = options.ico - oct = options.oct + oct_ = options.oct surface = options.surface n_jobs = options.n_jobs add_dist = options.add_dist - _check_option('add_dist', add_dist, ('True', 'False', 'patch')) - add_dist = {'True': True, 'False': False, 'patch': 'patch'}[add_dist] + _check_option("add_dist", add_dist, ("True", "False", "patch")) + add_dist = {"True": True, "False": False, "patch": "patch"}[add_dist] verbose = True if options.verbose is not None else False overwrite = True if options.overwrite is not None else False # Parse source spacing option - spacing_options = [ico, oct, spacing] + spacing_options = [ico, oct_, spacing] n_options = len([x for x in spacing_options if x is not None]) + use_spacing = "oct6" if n_options > 1: - raise ValueError('Only one spacing option can be set at the same time') + raise ValueError("Only one spacing option can be set at the same time") elif n_options == 0: # Default to oct6 - use_spacing = 'oct6' + pass elif n_options == 1: if ico is not None: use_spacing = "ico" + str(ico) - elif oct is not None: - use_spacing = "oct" + str(oct) + elif oct_ is not None: + use_spacing = "oct" + str(oct_) elif spacing is not None: use_spacing = spacing + del ico, oct_, spacing # Generate filename if fname is None: if subject_to is None: - fname = subject + '-' + str(use_spacing) + '-src.fif' + fname = subject + "-" + str(use_spacing) + "-src.fif" else: - fname = (subject_to + '-' + subject + '-' + - str(use_spacing) + '-src.fif') + fname = subject_to + "-" + subject + "-" + str(use_spacing) + "-src.fif" else: - if not (fname.endswith('_src.fif') or fname.endswith('-src.fif')): + if not (fname.endswith("_src.fif") or fname.endswith("-src.fif")): fname = fname + "-src.fif" # Create source space - src = mne.setup_source_space(subject=subject, spacing=use_spacing, - surface=surface, subjects_dir=subjects_dir, - n_jobs=n_jobs, add_dist=add_dist, - verbose=verbose) + src = mne.setup_source_space( + subject=subject, + spacing=use_spacing, + surface=surface, + subjects_dir=subjects_dir, + n_jobs=n_jobs, + add_dist=add_dist, + verbose=verbose, + ) # Morph source space if --morph is set if subject_to is not None: - src = mne.morph_source_spaces(src, subject_to=subject_to, - subjects_dir=subjects_dir, - surf=surface, verbose=verbose) + src = mne.morph_source_spaces( + src, + subject_to=subject_to, + subjects_dir=subjects_dir, + surf=surface, + verbose=verbose, + ) # Save source space to file src.save(fname=fname, overwrite=overwrite) diff --git a/mne/commands/mne_show_fiff.py b/mne/commands/mne_show_fiff.py index be31cde2ad8..2f9e73d2dc1 100644 --- a/mne/commands/mne_show_fiff.py +++ b/mne/commands/mne_show_fiff.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Show the contents of a FIFF file. Examples @@ -16,23 +15,39 @@ """ -# Authors : Eric Larson, PhD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys + import mne def run(): """Run command.""" - parser = mne.commands.utils.get_optparser( - __file__, usage='mne show_fiff ') - parser.add_option("-t", "--tag", dest="tag", - help="provide information about this tag", metavar="TAG") + parser = mne.commands.utils.get_optparser(__file__, usage="mne show_fiff ") + parser.add_option( + "-t", + "--tag", + dest="tag", + help="provide information about this tag", + metavar="TAG", + ) + parser.add_option( + "-b", + "--bytes", + dest="show_bytes", + help="show the byte offset of each tag", + action="http://github.com/store_true", + ) options, args = parser.parse_args() if len(args) != 1: parser.print_help() sys.exit(1) - msg = mne.io.show_fiff(args[0], tag=options.tag).strip() + msg = mne.io.show_fiff( + args[0], tag=options.tag, show_bytes=options.show_bytes + ).strip() print(msg) diff --git a/mne/commands/mne_show_info.py b/mne/commands/mne_show_info.py index 44e1fa79141..b9448011206 100644 --- a/mne/commands/mne_show_info.py +++ b/mne/commands/mne_show_info.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Show measurement info from .fif file. Examples @@ -9,16 +8,18 @@ """ -# Authors : Alexandre Gramfort, Ph.D. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys + import mne def run(): """Run command.""" - parser = mne.commands.utils.get_optparser( - __file__, usage='mne show_info ') + parser = mne.commands.utils.get_optparser(__file__, usage="mne show_info ") options, args = parser.parse_args() if len(args) != 1: parser.print_help() @@ -26,11 +27,11 @@ def run(): fname = args[0] - if not fname.endswith('.fif'): - raise ValueError('%s does not seem to be a .fif file.' % fname) + if not fname.endswith(".fif"): + raise ValueError(f"{fname} does not seem to be a .fif file.") info = mne.io.read_info(fname) - print("File : %s" % fname) + print(f"File : {fname}") print(info) diff --git a/mne/commands/mne_surf2bem.py b/mne/commands/mne_surf2bem.py index 4cb5ade9662..5d6426986e8 100644 --- a/mne/commands/mne_surf2bem.py +++ b/mne/commands/mne_surf2bem.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python r"""Convert surface to BEM FIF file. Examples @@ -10,9 +9,10 @@ --id=4 """ -# Authors: Alexandre Gramfort -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys @@ -25,12 +25,19 @@ def run(): parser = get_optparser(__file__) - parser.add_option("-s", "--surf", dest="surf", - help="Surface in Freesurfer format", metavar="FILE") - parser.add_option("-f", "--fif", dest="fif", - help="FIF file produced", metavar="FILE") - parser.add_option("-i", "--id", dest="id", default=4, - help=("Surface Id (e.g. 4 for head surface)")) + parser.add_option( + "-s", "--surf", dest="surf", help="Surface in Freesurfer format", metavar="FILE" + ) + parser.add_option( + "-f", "--fif", dest="fif", help="FIF file produced", metavar="FILE" + ) + parser.add_option( + "-i", + "--id", + dest="id", + default=4, + help=("Surface Id (e.g. 4 for head surface)"), + ) options, args = parser.parse_args() @@ -38,9 +45,8 @@ def run(): parser.print_help() sys.exit(1) - print("Converting %s to BEM FIF file." % options.surf) - surf = mne.bem._surfaces_to_bem([options.surf], [int(options.id)], - sigmas=[1]) + print(f"Converting {options.surf} to BEM FIF file.") + surf = mne.bem._surfaces_to_bem([options.surf], [int(options.id)], sigmas=[1]) mne.write_bem_surfaces(options.fif, surf) diff --git a/mne/commands/mne_sys_info.py b/mne/commands/mne_sys_info.py index a09994de8f9..70c56e42f3e 100644 --- a/mne/commands/mne_sys_info.py +++ b/mne/commands/mne_sys_info.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Show system information. Examples @@ -9,25 +8,49 @@ """ -# Authors : Eric Larson +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import sys + import mne def run(): """Run command.""" - parser = mne.commands.utils.get_optparser(__file__, usage='mne sys_info') - parser.add_option('-p', '--show-paths', dest='show_paths', - help='Show module paths', action='http://github.com/store_true') - parser.add_option('-d', '--developer', dest='developer', - help='Show additional developer module information', - action='http://github.com/store_true') - parser.add_option('-a', '--ascii', dest='unicode', - help='Use ASCII instead of unicode symbols', - action='http://github.com/store_false', default=True) + parser = mne.commands.utils.get_optparser(__file__, usage="mne sys_info") + parser.add_option( + "-p", + "--show-paths", + dest="show_paths", + help="Show module paths", + action="http://github.com/store_true", + ) + parser.add_option( + "-d", + "--developer", + dest="developer", + help="Show additional developer module information", + action="http://github.com/store_true", + ) + parser.add_option( + "-a", + "--ascii", + dest="unicode", + help="Use ASCII instead of unicode symbols", + action="http://github.com/store_false", + default=True, + ) + parser.add_option( + "--no-check-version", + dest="check_version", + help="Disable MNE-Python remote version checking.", + action="http://github.com/store_false", + default=True, + ) options, args = parser.parse_args() - dependencies = 'developer' if options.developer else 'user' + dependencies = "developer" if options.developer else "user" if len(args) != 0: parser.print_help() sys.exit(1) @@ -35,7 +58,8 @@ def run(): mne.sys_info( show_paths=options.show_paths, dependencies=dependencies, - unicode=options.unicode + unicode=options.unicode, + check_version=options.check_version, ) diff --git a/mne/commands/mne_watershed_bem.py b/mne/commands/mne_watershed_bem.py index b69a2801fd6..4f872b90dac 100644 --- a/mne/commands/mne_watershed_bem.py +++ b/mne/commands/mne_watershed_bem.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python -# Authors: Lorenzo De Santis +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Create BEM surfaces using the watershed algorithm included with FreeSurfer. Examples @@ -19,39 +21,77 @@ def run(): """Run command.""" - from mne.commands.utils import get_optparser, _add_verbose_flag + from mne.commands.utils import _add_verbose_flag, get_optparser parser = get_optparser(__file__) - parser.add_option("-s", "--subject", dest="subject", - help="Subject name (required)", default=None) - parser.add_option("-d", "--subjects-dir", dest="subjects_dir", - help="Subjects directory", default=None) - parser.add_option("-o", "--overwrite", dest="overwrite", - help="Write over existing files", action="http://github.com/store_true") - parser.add_option("-v", "--volume", dest="volume", - help="Defaults to T1", default='T1') - parser.add_option("-a", "--atlas", dest="atlas", - help="Specify the --atlas option for mri_watershed", - default=False, action="http://github.com/store_true") - parser.add_option("-g", "--gcaatlas", dest="gcaatlas", - help="Specify the --brain_atlas option for " - "mri_watershed", default=False, action="http://github.com/store_true") - parser.add_option("-p", "--preflood", dest="preflood", - help="Change the preflood height", default=None) - parser.add_option("--copy", dest="copy", - help="Use copies instead of symlinks for surfaces", - action="http://github.com/store_true") - parser.add_option("-t", "--T1", dest="T1", - help="Whether or not to pass the -T1 flag " - "(can be true, false, 0, or 1). " - "By default it takes the same value as gcaatlas.", - default=None) - parser.add_option("-b", "--brainmask", dest="brainmask", - help="The filename for the brainmask output file " - "relative to the " - "$SUBJECTS_DIR/$SUBJECT/bem/watershed/ directory.", - default="ws") + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name (required)", default=None + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + help="Write over existing files", + action="http://github.com/store_true", + ) + parser.add_option( + "-v", "--volume", dest="volume", help="Defaults to T1", default="T1" + ) + parser.add_option( + "-a", + "--atlas", + dest="atlas", + help="Specify the --atlas option for mri_watershed", + default=False, + action="http://github.com/store_true", + ) + parser.add_option( + "-g", + "--gcaatlas", + dest="gcaatlas", + help="Specify the --brain_atlas option for mri_watershed", + default=False, + action="http://github.com/store_true", + ) + parser.add_option( + "-p", + "--preflood", + dest="preflood", + help="Change the preflood height", + default=None, + ) + parser.add_option( + "--copy", + dest="copy", + help="Use copies instead of symlinks for surfaces", + action="http://github.com/store_true", + ) + parser.add_option( + "-t", + "--T1", + dest="T1", + help="Whether or not to pass the -T1 flag " + "(can be true, false, 0, or 1). " + "By default it takes the same value as gcaatlas.", + default=None, + ) + parser.add_option( + "-b", + "--brainmask", + dest="brainmask", + help="The filename for the brainmask output file " + "relative to the " + "$SUBJECTS_DIR/$SUBJECT/bem/watershed/ directory.", + default="ws", + ) _add_verbose_flag(parser) options, args = parser.parse_args() @@ -72,14 +112,23 @@ def run(): T1 = options.T1 if T1 is not None: T1 = T1.lower() - _check_option("--T1", T1, ('true', 'false', '0', '1')) - T1 = T1 in ('true', '1') + _check_option("--T1", T1, ("true", "false", "0", "1")) + T1 = T1 in ("true", "1") verbose = options.verbose - make_watershed_bem(subject=subject, subjects_dir=subjects_dir, - overwrite=overwrite, volume=volume, atlas=atlas, - gcaatlas=gcaatlas, preflood=preflood, copy=copy, - T1=T1, brainmask=brainmask, verbose=verbose) + make_watershed_bem( + subject=subject, + subjects_dir=subjects_dir, + overwrite=overwrite, + volume=volume, + atlas=atlas, + gcaatlas=gcaatlas, + preflood=preflood, + copy=copy, + T1=T1, + brainmask=brainmask, + verbose=verbose, + ) mne.utils.run_command_if_main() diff --git a/mne/commands/mne_what.py b/mne/commands/mne_what.py index 5d281facd0c..f6ee1683ba9 100644 --- a/mne/commands/mne_what.py +++ b/mne/commands/mne_what.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python r"""Check type of FIF file. Examples @@ -9,7 +8,9 @@ raw """ -# Authors : Eric Larson, PhD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import mne @@ -17,7 +18,8 @@ def run(): """Run command.""" from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage='usage: %prog fname [fname2 ...]') + + parser = get_optparser(__file__, usage="usage: %prog fname [fname2 ...]") options, args = parser.parse_args() for arg in args: print(mne.what(arg)) diff --git a/mne/commands/tests/__init__.py b/mne/commands/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/commands/tests/__init__.py +++ b/mne/commands/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/commands/tests/test_commands.py b/mne/commands/tests/test_commands.py index e405778b32b..d7d8fbb69b5 100644 --- a/mne/commands/tests/test_commands.py +++ b/mne/commands/tests/test_commands.py @@ -1,57 +1,86 @@ -# -*- coding: utf-8 -*- +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import glob import os -from os import path as op +import platform import shutil +from os import path as op from pathlib import Path import numpy as np import pytest -from numpy.testing import assert_equal, assert_allclose +from numpy.testing import assert_allclose, assert_equal import mne -from mne import (concatenate_raws, read_bem_surfaces, read_surface, - read_source_spaces, read_bem_solution) +from mne import ( + concatenate_raws, + read_bem_solution, + read_bem_surfaces, + read_source_spaces, + read_surface, +) from mne.bem import ConductorModel, convert_flash_mris -from mne.commands import (mne_browse_raw, mne_bti2fiff, mne_clean_eog_ecg, - mne_compute_proj_ecg, mne_compute_proj_eog, - mne_coreg, mne_kit2fiff, - mne_make_scalp_surfaces, mne_maxfilter, - mne_report, mne_surf2bem, mne_watershed_bem, - mne_compare_fiff, mne_flash_bem, mne_show_fiff, - mne_show_info, mne_what, mne_setup_source_space, - mne_setup_forward_model, mne_anonymize, - mne_prepare_bem_model, mne_sys_info) +from mne.commands import ( + mne_anonymize, + mne_browse_raw, + mne_bti2fiff, + mne_clean_eog_ecg, + mne_compare_fiff, + mne_compute_proj_ecg, + mne_compute_proj_eog, + mne_coreg, + mne_flash_bem, + mne_kit2fiff, + mne_make_scalp_surfaces, + mne_prepare_bem_model, + mne_report, + mne_setup_forward_model, + mne_setup_source_space, + mne_show_fiff, + mne_show_info, + mne_surf2bem, + mne_sys_info, + mne_watershed_bem, + mne_what, +) from mne.datasets import testing -from mne.io import read_raw_fif, read_info -from mne.utils import (requires_mne, requires_freesurfer, ArgvSetter, - _stamp_to_dt, _record_warnings) - -base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') -raw_fname = op.join(base_dir, 'test_raw.fif') +from mne.io import read_info, read_raw_fif, show_fiff +from mne.utils import ( + ArgvSetter, + _record_warnings, + _stamp_to_dt, + requires_freesurfer, + requires_mne, +) + +base_dir = op.join(op.dirname(__file__), "..", "..", "io", "tests", "data") +raw_fname = op.join(base_dir, "test_raw.fif") testing_path = testing.data_path(download=False) -subjects_dir = op.join(testing_path, 'subjects') -bem_model_fname = op.join(testing_path, 'subjects', - 'sample', 'bem', 'sample-320-320-320-bem.fif') +subjects_dir = op.join(testing_path, "subjects") +bem_model_fname = op.join( + testing_path, "subjects", "sample", "bem", "sample-320-320-320-bem.fif" +) def check_usage(module, force_help=False): """Ensure we print usage.""" - args = ('--help',) if force_help else () + args = ("--help",) if force_help else () with ArgvSetter(args) as out: try: module.run() except SystemExit: pass - assert 'Usage: ' in out.stdout.getvalue() + assert "Usage: " in out.stdout.getvalue() @pytest.mark.slowtest def test_browse_raw(): """Test mne browse_raw.""" check_usage(mne_browse_raw) - with ArgvSetter(('--raw', raw_fname)): + with ArgvSetter(("--raw", raw_fname)): with _record_warnings(): # mpl show warning mne_browse_raw.run() @@ -61,7 +90,7 @@ def test_what(): check_usage(mne_browse_raw) with ArgvSetter((raw_fname,)) as out: mne_what.run() - assert 'raw' == out.stdout.getvalue().strip() + assert "raw" == out.stdout.getvalue().strip() def test_bti2fiff(): @@ -74,13 +103,24 @@ def test_compare_fiff(): check_usage(mne_compare_fiff) -def test_show_fiff(): +# should match ".*valid tag.*" but conda-linux intermittently fails for some reason +@pytest.mark.filterwarnings("ignore:Invalid tag.*:RuntimeWarning") +def test_show_fiff(tmp_path): """Test mne compare_fiff.""" + if os.getenv("MNE_CI_KIND", "") == "conda" and platform.system() == "Linux": + pytest.skip("Skipping test on conda-linux due to intermittent failures") check_usage(mne_show_fiff) with ArgvSetter((raw_fname,)): mne_show_fiff.run() - with ArgvSetter((raw_fname, '--tag=102')): + with ArgvSetter((raw_fname, "--tag=102")): mne_show_fiff.run() + bad_fname = tmp_path / "test_bad_raw.fif" + with open(bad_fname, "wb") as fout, open(raw_fname, "rb") as fin: + fout.write(fin.read(100000)) + lines = show_fiff(bad_fname, output=list) + last_line = lines[-1] + assert last_line.endswith(">>>>BAD @9015") + assert "302 = FIFF_EPOCH (734412b >f4)" in last_line @requires_mne @@ -88,42 +128,40 @@ def test_clean_eog_ecg(tmp_path): """Test mne clean_eog_ecg.""" check_usage(mne_clean_eog_ecg) tempdir = str(tmp_path) - raw = concatenate_raws([read_raw_fif(f) - for f in [raw_fname, raw_fname, raw_fname]]) - raw.info['bads'] = ['MEG 2443'] + raw = concatenate_raws([read_raw_fif(f) for f in [raw_fname, raw_fname, raw_fname]]) + raw.info["bads"] = ["MEG 2443"] use_fname = op.join(tempdir, op.basename(raw_fname)) raw.save(use_fname) - with ArgvSetter(('-i', use_fname, '--quiet')): + with ArgvSetter(("-i", use_fname, "--quiet")): mne_clean_eog_ecg.run() - for key, count in (('proj', 2), ('-eve', 3)): - fnames = glob.glob(op.join(tempdir, '*%s.fif' % key)) + for key, count in (("proj", 2), ("-eve", 3)): + fnames = glob.glob(op.join(tempdir, f"*{key}.fif")) assert len(fnames) == count @pytest.mark.slowtest -@pytest.mark.parametrize('fun', (mne_compute_proj_ecg, mne_compute_proj_eog)) +@pytest.mark.parametrize("fun", (mne_compute_proj_ecg, mne_compute_proj_eog)) def test_compute_proj_exg(tmp_path, fun): """Test mne compute_proj_ecg/eog.""" check_usage(fun) tempdir = str(tmp_path) use_fname = op.join(tempdir, op.basename(raw_fname)) - bad_fname = op.join(tempdir, 'bads.txt') - with open(bad_fname, 'w') as fid: - fid.write('MEG 2443\n') + bad_fname = op.join(tempdir, "bads.txt") + with open(bad_fname, "w") as fid: + fid.write("MEG 2443\n") shutil.copyfile(raw_fname, use_fname) - with ArgvSetter(('-i', use_fname, '--bad=' + bad_fname, - '--rej-eeg', '150')): + with ArgvSetter(("-i", use_fname, "--bad=" + bad_fname, "--rej-eeg", "150")): with _record_warnings(): # samples, sometimes fun.run() - fnames = glob.glob(op.join(tempdir, '*proj.fif')) + fnames = glob.glob(op.join(tempdir, "*proj.fif")) assert len(fnames) == 1 - fnames = glob.glob(op.join(tempdir, '*-eve.fif')) + fnames = glob.glob(op.join(tempdir, "*-eve.fif")) assert len(fnames) == 1 def test_coreg(): """Test mne coreg.""" - assert hasattr(mne_coreg, 'run') + assert hasattr(mne_coreg, "run") def test_kit2fiff(): @@ -137,77 +175,77 @@ def test_kit2fiff(): @testing.requires_testing_data def test_make_scalp_surfaces(tmp_path, monkeypatch): """Test mne make_scalp_surfaces.""" - pytest.importorskip('nibabel') - pytest.importorskip('pyvista') + pytest.importorskip("nibabel") + pytest.importorskip("pyvista") check_usage(mne_make_scalp_surfaces) - has = 'SUBJECTS_DIR' in os.environ + has = "SUBJECTS_DIR" in os.environ # Copy necessary files to avoid FreeSurfer call tempdir = str(tmp_path) - surf_path = op.join(subjects_dir, 'sample', 'surf') - surf_path_new = op.join(tempdir, 'sample', 'surf') - os.mkdir(op.join(tempdir, 'sample')) + surf_path = op.join(subjects_dir, "sample", "surf") + surf_path_new = op.join(tempdir, "sample", "surf") + os.mkdir(op.join(tempdir, "sample")) os.mkdir(surf_path_new) - subj_dir = op.join(tempdir, 'sample', 'bem') + subj_dir = op.join(tempdir, "sample", "bem") os.mkdir(subj_dir) - cmd = ('-s', 'sample', '--subjects-dir', tempdir) + cmd = ("-s", "sample", "--subjects-dir", tempdir) monkeypatch.setattr( - mne.bem, 'decimate_surface', - lambda points, triangles, n_triangles: (points, triangles)) - dense_fname = op.join(subj_dir, 'sample-head-dense.fif') - medium_fname = op.join(subj_dir, 'sample-head-medium.fif') + mne.bem, + "decimate_surface", + lambda points, triangles, n_triangles: (points, triangles), + ) + dense_fname = op.join(subj_dir, "sample-head-dense.fif") + medium_fname = op.join(subj_dir, "sample-head-medium.fif") with ArgvSetter(cmd, disable_stdout=False, disable_stderr=False): - monkeypatch.delenv('FREESURFER_HOME') - with pytest.raises(RuntimeError, match='The FreeSurfer environ'): + monkeypatch.delenv("FREESURFER_HOME", raising=False) + with pytest.raises(RuntimeError, match="The FreeSurfer environ"): mne_make_scalp_surfaces.run() - shutil.copy(op.join(surf_path, 'lh.seghead'), surf_path_new) - monkeypatch.setenv('FREESURFER_HOME', tempdir) + shutil.copy(op.join(surf_path, "lh.seghead"), surf_path_new) + monkeypatch.setenv("FREESURFER_HOME", tempdir) mne_make_scalp_surfaces.run() assert op.isfile(dense_fname) assert op.isfile(medium_fname) - with pytest.raises(IOError, match='overwrite'): + with pytest.raises(OSError, match="overwrite"): mne_make_scalp_surfaces.run() # actually check the outputs head_py = read_bem_surfaces(dense_fname) assert_equal(len(head_py), 1) head_py = head_py[0] - head_c = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem', - 'sample-head-dense.fif'))[0] - assert_allclose(head_py['rr'], head_c['rr']) + head_c = read_bem_surfaces( + op.join(subjects_dir, "sample", "bem", "sample-head-dense.fif") + )[0] + assert_allclose(head_py["rr"], head_c["rr"]) if not has: - assert 'SUBJECTS_DIR' not in os.environ - - -def test_maxfilter(): - """Test mne maxfilter.""" - check_usage(mne_maxfilter) - with ArgvSetter(('-i', raw_fname, '--st', '--movecomp', '--linefreq', '60', - '--trans', raw_fname)) as out: - with pytest.warns(RuntimeWarning, match="Don't use"): - os.environ['_MNE_MAXFILTER_TEST'] = 'true' - try: - mne_maxfilter.run() - finally: - del os.environ['_MNE_MAXFILTER_TEST'] - out = out.stdout.getvalue() - for check in ('maxfilter', '-trans', '-movecomp'): - assert check in out, check + assert "SUBJECTS_DIR" not in os.environ @pytest.mark.slowtest @testing.requires_testing_data def test_report(tmp_path): """Test mne report.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") check_usage(mne_report) tempdir = str(tmp_path) use_fname = op.join(tempdir, op.basename(raw_fname)) shutil.copyfile(raw_fname, use_fname) - with ArgvSetter(('-p', tempdir, '-i', use_fname, '-d', subjects_dir, - '-s', 'sample', '--no-browser', '-m', '30')): + with ArgvSetter( + ( + "-p", + tempdir, + "-i", + use_fname, + "-d", + subjects_dir, + "-s", + "sample", + "--no-browser", + "-m", + "30", + ) + ): with _record_warnings(): # contour levels mne_report.run() - fnames = glob.glob(op.join(tempdir, '*.html')) + fnames = glob.glob(op.join(tempdir, "*.html")) assert len(fnames) == 1 @@ -219,48 +257,48 @@ def test_surf2bem(): @pytest.mark.timeout(900) # took ~400 s on a local test @pytest.mark.slowtest @pytest.mark.ultraslowtest -@requires_freesurfer('mri_watershed') +@requires_freesurfer("mri_watershed") @testing.requires_testing_data def test_watershed_bem(tmp_path): """Test mne watershed bem.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") check_usage(mne_watershed_bem) # from T1.mgz Mdc = np.array([[-1, 0, 0], [0, 0, -1], [0, 1, 0]]) Pxyz_c = np.array([-5.273613, 9.039085, -27.287964]) # Copy necessary files to tempdir tempdir = str(tmp_path) - mridata_path = op.join(subjects_dir, 'sample', 'mri') - subject_path_new = op.join(tempdir, 'sample') - mridata_path_new = op.join(subject_path_new, 'mri') + mridata_path = op.join(subjects_dir, "sample", "mri") + subject_path_new = op.join(tempdir, "sample") + mridata_path_new = op.join(subject_path_new, "mri") os.makedirs(mridata_path_new) - new_fname = op.join(mridata_path_new, 'T1.mgz') - shutil.copyfile(op.join(mridata_path, 'T1.mgz'), new_fname) + new_fname = op.join(mridata_path_new, "T1.mgz") + shutil.copyfile(op.join(mridata_path, "T1.mgz"), new_fname) old_mode = os.stat(new_fname).st_mode os.chmod(new_fname, 0) - args = ('-d', tempdir, '-s', 'sample', '-o') - with pytest.raises(PermissionError, match=r'read permissions.*T1\.mgz'): + args = ("-d", tempdir, "-s", "sample", "-o") + with pytest.raises(PermissionError, match=r"read permissions.*T1\.mgz"): with ArgvSetter(args): mne_watershed_bem.run() os.chmod(new_fname, old_mode) - for s in ('outer_skin', 'outer_skull', 'inner_skull'): - assert not op.isfile(op.join(subject_path_new, 'bem', '%s.surf' % s)) + for s in ("outer_skin", "outer_skull", "inner_skull"): + assert not op.isfile(op.join(subject_path_new, "bem", f"{s}.surf")) with ArgvSetter(args): mne_watershed_bem.run() kwargs = dict(rtol=1e-5, atol=1e-5) - for s in ('outer_skin', 'outer_skull', 'inner_skull'): - rr, tris, vol_info = read_surface(op.join(subject_path_new, 'bem', - '%s.surf' % s), - read_metadata=True) + for s in ("outer_skin", "outer_skull", "inner_skull"): + rr, tris, vol_info = read_surface( + op.join(subject_path_new, "bem", f"{s}.surf"), read_metadata=True + ) assert_equal(len(tris), 20480) assert_equal(tris.min(), 0) assert_equal(rr.shape[0], tris.max() + 1) # compare the volume info to the mgz header - assert_allclose(vol_info['xras'], Mdc[0], **kwargs) - assert_allclose(vol_info['yras'], Mdc[1], **kwargs) - assert_allclose(vol_info['zras'], Mdc[2], **kwargs) - assert_allclose(vol_info['cras'], Pxyz_c, **kwargs) + assert_allclose(vol_info["xras"], Mdc[0], **kwargs) + assert_allclose(vol_info["yras"], Mdc[1], **kwargs) + assert_allclose(vol_info["zras"], Mdc[2], **kwargs) + assert_allclose(vol_info["cras"], Pxyz_c, **kwargs) @pytest.mark.timeout(180) # took ~70 s locally @@ -270,36 +308,42 @@ def test_watershed_bem(tmp_path): @testing.requires_testing_data def test_flash_bem(tmp_path): """Test mne flash_bem.""" + pytest.importorskip("nibabel") check_usage(mne_flash_bem, force_help=True) # Copy necessary files to tempdir tempdir = Path(str(tmp_path)) - mridata_path = Path(subjects_dir) / 'sample' / 'mri' - subject_path_new = tempdir / 'sample' - mridata_path_new = subject_path_new / 'mri' - flash_path = mridata_path_new / 'flash' + mridata_path = Path(subjects_dir) / "sample" / "mri" + subject_path_new = tempdir / "sample" + mridata_path_new = subject_path_new / "mri" + flash_path = mridata_path_new / "flash" flash_path.mkdir(parents=True, exist_ok=True) - bem_path = mridata_path_new / 'bem' + bem_path = mridata_path_new / "bem" bem_path.mkdir(parents=True, exist_ok=True) - shutil.copyfile(op.join(mridata_path, 'T1.mgz'), - op.join(mridata_path_new, 'T1.mgz')) - shutil.copyfile(op.join(mridata_path, 'brain.mgz'), - op.join(mridata_path_new, 'brain.mgz')) + shutil.copyfile( + op.join(mridata_path, "T1.mgz"), op.join(mridata_path_new, "T1.mgz") + ) + shutil.copyfile( + op.join(mridata_path, "brain.mgz"), op.join(mridata_path_new, "brain.mgz") + ) # Copy the available mri/flash/mef*.mgz files from the dataset for kind in (5, 30): - in_fname = mridata_path / "flash" / f'mef{kind:02d}.mgz' - in_fname_echo = flash_path / f'mef{kind:02d}_001.mgz' + in_fname = mridata_path / "flash" / f"mef{kind:02d}.mgz" + in_fname_echo = flash_path / f"mef{kind:02d}_001.mgz" shutil.copyfile(in_fname, flash_path / in_fname_echo.name) # Test mne flash_bem with --noconvert option # (since there are no DICOM Flash images in dataset) - for s in ('outer_skin', 'outer_skull', 'inner_skull'): - assert not op.isfile(subject_path_new / 'bem' / f'{s}.surf') + for s in ("outer_skin", "outer_skull", "inner_skull"): + assert not op.isfile(subject_path_new / "bem" / f"{s}.surf") # First test without flash30 - with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n', '-r', '-3'), - disable_stdout=False, disable_stderr=False): + with ArgvSetter( + ("-d", tempdir, "-s", "sample", "-r", "-3"), + disable_stdout=False, + disable_stderr=False, + ): mne_flash_bem.run() - for s in ('outer_skin', 'outer_skull', 'inner_skull'): - surf_path = subject_path_new / 'bem' / f'{s}.surf' + for s in ("outer_skin", "outer_skull", "inner_skull"): + surf_path = subject_path_new / "bem" / f"{s}.surf" assert surf_path.exists() surf_path.unlink() # cleanup shutil.rmtree(flash_path / "parameter_maps") # remove old files @@ -314,22 +358,30 @@ def test_flash_bem(tmp_path): # Test with flash5 and flash30 shutil.rmtree(flash_path) # first remove old files - with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n', - '-3', str(mridata_path / "flash" / 'mef30.mgz'), - '-5', str(mridata_path / "flash" / 'mef05.mgz')), - disable_stdout=False, disable_stderr=False): + with ArgvSetter( + ( + "-d", + tempdir, + "-s", + "sample", + "-3", + str(mridata_path / "flash" / "mef30.mgz"), + "-5", + str(mridata_path / "flash" / "mef05.mgz"), + ), + disable_stdout=False, + disable_stderr=False, + ): mne_flash_bem.run() kwargs = dict(rtol=1e-5, atol=1e-5) - for s in ('outer_skin', 'outer_skull', 'inner_skull'): - rr, tris = read_surface(op.join(subject_path_new, 'bem', - '%s.surf' % s)) + for s in ("outer_skin", "outer_skull", "inner_skull"): + rr, tris = read_surface(op.join(subject_path_new, "bem", f"{s}.surf")) assert_equal(len(tris), 5120) assert_equal(tris.min(), 0) assert_equal(rr.shape[0], tris.max() + 1) # compare to the testing flash surfaces - rr_c, tris_c = read_surface(op.join(subjects_dir, 'sample', 'bem', - '%s.surf' % s)) + rr_c, tris_c = read_surface(op.join(subjects_dir, "sample", "bem", f"{s}.surf")) assert_allclose(rr, rr_c, **kwargs) assert_allclose(tris, tris_c, **kwargs) @@ -337,29 +389,80 @@ def test_flash_bem(tmp_path): @testing.requires_testing_data def test_setup_source_space(tmp_path): """Test mne setup_source_space.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") check_usage(mne_setup_source_space, force_help=True) # Using the sample dataset use_fname = op.join(tmp_path, "sources-src.fif") # Test command - with ArgvSetter(('--src', use_fname, '-d', subjects_dir, - '-s', 'sample', '--morph', 'sample', - '--add-dist', 'False', '--ico', '3', '--verbose')): + with ArgvSetter( + ( + "--src", + use_fname, + "-d", + subjects_dir, + "-s", + "sample", + "--morph", + "sample", + "--add-dist", + "False", + "--ico", + "3", + "--verbose", + ) + ): mne_setup_source_space.run() src = read_source_spaces(use_fname) assert len(src) == 2 with pytest.raises(Exception): - with ArgvSetter(('--src', use_fname, '-d', subjects_dir, - '-s', 'sample', '--ico', '3', '--oct', '3')): + with ArgvSetter( + ( + "--src", + use_fname, + "-d", + subjects_dir, + "-s", + "sample", + "--ico", + "3", + "--oct", + "3", + ) + ): assert mne_setup_source_space.run() with pytest.raises(Exception): - with ArgvSetter(('--src', use_fname, '-d', subjects_dir, - '-s', 'sample', '--ico', '3', '--spacing', '10')): + with ArgvSetter( + ( + "--src", + use_fname, + "-d", + subjects_dir, + "-s", + "sample", + "--ico", + "3", + "--spacing", + "10", + ) + ): assert mne_setup_source_space.run() with pytest.raises(Exception): - with ArgvSetter(('--src', use_fname, '-d', subjects_dir, - '-s', 'sample', '--ico', '3', '--spacing', '10', - '--oct', '3')): + with ArgvSetter( + ( + "--src", + use_fname, + "-d", + subjects_dir, + "-s", + "sample", + "--ico", + "3", + "--spacing", + "10", + "--oct", + "3", + ) + ): assert mne_setup_source_space.run() @@ -367,17 +470,29 @@ def test_setup_source_space(tmp_path): @testing.requires_testing_data def test_setup_forward_model(tmp_path): """Test mne setup_forward_model.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") check_usage(mne_setup_forward_model, force_help=True) # Using the sample dataset use_fname = op.join(tmp_path, "model-bem.fif") # Test command - with ArgvSetter(('--model', use_fname, '-d', subjects_dir, '--homog', - '-s', 'sample', '--ico', '3', '--verbose')): + with ArgvSetter( + ( + "--model", + use_fname, + "-d", + subjects_dir, + "--homog", + "-s", + "sample", + "--ico", + "3", + "--verbose", + ) + ): mne_setup_forward_model.run() model = read_bem_surfaces(use_fname) assert len(model) == 1 - sol_fname = op.splitext(use_fname)[0] + '-sol.fif' + sol_fname = op.splitext(use_fname)[0] + "-sol.fif" read_bem_solution(sol_fname) @@ -389,8 +504,9 @@ def test_mne_prepare_bem_model(tmp_path): # Using the sample dataset bem_solution_fname = op.join(tmp_path, "bem_solution-bem-sol.fif") # Test command - with ArgvSetter(('--bem', bem_model_fname, '--sol', bem_solution_fname, - '--verbose')): + with ArgvSetter( + ("--bem", bem_model_fname, "--sol", bem_solution_fname, "--verbose") + ): mne_prepare_bem_model.run() bem_solution = read_bem_solution(bem_solution_fname) assert isinstance(bem_solution, ConductorModel) @@ -407,19 +523,19 @@ def test_sys_info(): """Test mne show_info.""" check_usage(mne_sys_info, force_help=True) with ArgvSetter((raw_fname,)): - with pytest.raises(SystemExit, match='1'): + with pytest.raises(SystemExit, match="1"): mne_sys_info.run() - with ArgvSetter() as out: + with ArgvSetter(("--no-check-version",)) as out: mne_sys_info.run() - assert 'numpy' in out.stdout.getvalue() + assert "numpy" in out.stdout.getvalue() def test_anonymize(tmp_path): """Test mne anonymize.""" check_usage(mne_anonymize) - out_fname = op.join(tmp_path, 'anon_test_raw.fif') - with ArgvSetter(('-f', raw_fname, '-o', out_fname)): + out_fname = op.join(tmp_path, "anon_test_raw.fif") + with ArgvSetter(("-f", raw_fname, "-o", out_fname)): mne_anonymize.run() info = read_info(out_fname) assert op.exists(out_fname) - assert info['meas_date'] == _stamp_to_dt((946684800, 0)) + assert info["meas_date"] == _stamp_to_dt((946684800, 0)) diff --git a/mne/commands/utils.py b/mne/commands/utils.py index 415f513cad1..beb3a4e98c4 100644 --- a/mne/commands/utils.py +++ b/mne/commands/utils.py @@ -1,24 +1,27 @@ """Some utility functions for commands (e.g., for cmdline handling).""" -# Authors: Yaroslav Halchenko -# Stefan Appelhoff -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import glob import importlib import os import os.path as op -from optparse import OptionParser import sys +from optparse import OptionParser import mne def _add_verbose_flag(parser): - parser.add_option("--verbose", dest='verbose', - help="Enable verbose mode (printing of log messages).", - default=None, action="http://github.com/store_true") + parser.add_option( + "--verbose", + dest="verbose", + help="Enable verbose mode (printing of log messages).", + default=None, + action="http://github.com/store_true", + ) def load_module(name, path): @@ -37,32 +40,33 @@ def load_module(name, path): Imported module. """ - from importlib.util import spec_from_file_location, module_from_spec + from importlib.util import module_from_spec, spec_from_file_location + spec = spec_from_file_location(name, path) mod = module_from_spec(spec) spec.loader.exec_module(mod) return mod -def get_optparser(cmdpath, usage=None, prog_prefix='mne', version=None): +def get_optparser(cmdpath, usage=None, prog_prefix="mne", version=None): """Create OptionParser with cmd specific settings (e.g., prog value).""" # Fetch description - mod = load_module('__temp', cmdpath) + mod = load_module("__temp", cmdpath) if mod.__doc__: doc, description, epilog = mod.__doc__, None, None - doc_lines = doc.split('\n') + doc_lines = doc.split("\n") description = doc_lines[0] if len(doc_lines) > 1: - epilog = '\n'.join(doc_lines[1:]) + epilog = "\n".join(doc_lines[1:]) # Get the name of the command command = os.path.basename(cmdpath) command, _ = os.path.splitext(command) - command = command[len(prog_prefix) + 1:] # +1 is for `_` character + command = command[len(prog_prefix) + 1 :] # +1 is for `_` character # Set prog - prog = prog_prefix + ' {}'.format(command) + prog = prog_prefix + f" {command}" # Set version if version is None: @@ -70,10 +74,9 @@ def get_optparser(cmdpath, usage=None, prog_prefix='mne', version=None): # monkey patch OptionParser to not wrap epilog OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(prog=prog, - version=version, - description=description, - epilog=epilog, usage=usage) + parser = OptionParser( + prog=prog, version=version, description=description, epilog=epilog, usage=usage + ) return parser @@ -81,27 +84,26 @@ def get_optparser(cmdpath, usage=None, prog_prefix='mne', version=None): def main(): """Entrypoint for mne usage.""" mne_bin_dir = op.dirname(op.dirname(__file__)) - valid_commands = sorted(glob.glob(op.join(mne_bin_dir, - 'commands', 'mne_*.py'))) + valid_commands = sorted(glob.glob(op.join(mne_bin_dir, "commands", "mne_*.py"))) valid_commands = [c.split(op.sep)[-1][4:-3] for c in valid_commands] def print_help(): # noqa print("Usage : mne command options\n") print("Accepted commands :\n") for c in valid_commands: - print("\t- %s" % c) + print(f"\t- {c}") print("\nExample : mne browse_raw --raw sample_audvis_raw.fif") print("\nGetting help example : mne compute_proj_eog -h") if len(sys.argv) == 1 or "help" in sys.argv[1] or "-h" in sys.argv[1]: print_help() elif sys.argv[1] == "--version": - print("MNE %s" % mne.__version__) + print(f"MNE {mne.__version__}") elif sys.argv[1] not in valid_commands: - print('Invalid command: "%s"\n' % sys.argv[1]) + print(f'Invalid command: "{sys.argv[1]}"\n') print_help() else: cmd = sys.argv[1] - cmd = importlib.import_module('.mne_%s' % (cmd,), 'mne.commands') + cmd = importlib.import_module(f".mne_{cmd}", "mne.commands") sys.argv = sys.argv[1:] cmd.run() diff --git a/mne/conftest.py b/mne/conftest.py index 0c08dab9a03..57d14205e17 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -1,108 +1,134 @@ -# -*- coding: utf-8 -*- -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from contextlib import contextmanager -import inspect -from textwrap import dedent import gc +import inspect import os import os.path as op -from pathlib import Path +import platform +import re import shutil import sys import warnings -import pytest +from collections import defaultdict +from contextlib import contextmanager +from pathlib import Path +from textwrap import dedent from unittest import mock import numpy as np +import pytest +from pytest import StashKey import mne -from mne import read_events, pick_types, Epochs +from mne import Epochs, pick_types, read_events from mne.channels import read_layout from mne.coreg import create_default_subject from mne.datasets import testing -from mne.fixes import has_numba, _compare_version -from mne.io import read_raw_fif, read_raw_ctf, read_raw_nirx, read_raw_snirf +from mne.fixes import _compare_version, has_numba +from mne.io import read_raw_ctf, read_raw_fif, read_raw_nirx, read_raw_snirf from mne.stats import cluster_level -from mne.utils import (_pl, _assert_no_instances, numerics, Bunch, - _check_qt_version, _TempDir, check_version) +from mne.utils import ( + Bunch, + _assert_no_instances, + _check_qt_version, + _pl, + _record_warnings, + _TempDir, + check_version, + numerics, +) # data from sample dataset from mne.viz._figure import use_browser_backend from mne.viz.backends._utils import _init_mne_qtapp test_path = testing.data_path(download=False) -s_path = op.join(test_path, 'MEG', 'sample') -fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif') -fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif') -fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') -fname_fwd_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') -bem_path = op.join(test_path, 'subjects', 'sample', 'bem') -fname_bem = op.join(bem_path, 'sample-1280-bem.fif') -fname_aseg = op.join(test_path, 'subjects', 'sample', 'mri', 'aseg.mgz') -subjects_dir = op.join(test_path, 'subjects') -fname_src = op.join(bem_path, 'sample-oct-4-src.fif') -fname_trans = op.join(s_path, 'sample_audvis_trunc-trans.fif') - -ctf_dir = op.join(test_path, 'CTF') -fname_ctf_continuous = op.join(ctf_dir, 'testdata_ctf.ds') - -nirx_path = test_path / 'NIRx' -snirf_path = test_path / 'SNIRF' -nirsport2 = nirx_path / 'nirsport_v2' / 'aurora_recording _w_short_and_acc' -nirsport2_snirf = ( - snirf_path / 'NIRx' / 'NIRSport2' / '1.0.3' / - '2021-05-05_001.snirf') -nirsport2_2021_9 = nirx_path / 'nirsport_v2' / 'aurora_2021_9' +s_path = op.join(test_path, "MEG", "sample") +fname_evoked = op.join(s_path, "sample_audvis_trunc-ave.fif") +fname_cov = op.join(s_path, "sample_audvis_trunc-cov.fif") +fname_fwd = op.join(s_path, "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif") +fname_fwd_full = op.join(s_path, "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif") +bem_path = op.join(test_path, "subjects", "sample", "bem") +fname_bem = op.join(bem_path, "sample-1280-bem.fif") +fname_aseg = op.join(test_path, "subjects", "sample", "mri", "aseg.mgz") +subjects_dir = op.join(test_path, "subjects") +fname_src = op.join(bem_path, "sample-oct-4-src.fif") +fname_trans = op.join(s_path, "sample_audvis_trunc-trans.fif") + +ctf_dir = op.join(test_path, "CTF") +fname_ctf_continuous = op.join(ctf_dir, "testdata_ctf.ds") + +nirx_path = test_path / "NIRx" +snirf_path = test_path / "SNIRF" +nirsport2 = nirx_path / "nirsport_v2" / "aurora_recording _w_short_and_acc" +nirsport2_snirf = snirf_path / "NIRx" / "NIRSport2" / "1.0.3" / "2021-05-05_001.snirf" +nirsport2_2021_9 = nirx_path / "nirsport_v2" / "aurora_2021_9" nirsport2_20219_snirf = ( - snirf_path / 'NIRx' / 'NIRSport2' / '2021.9' / - '2021-10-01_002.snirf') + snirf_path / "NIRx" / "NIRSport2" / "2021.9" / "2021-10-01_002.snirf" +) # data from mne.io.tests.data -base_dir = op.join(op.dirname(__file__), 'io', 'tests', 'data') -fname_raw_io = op.join(base_dir, 'test_raw.fif') -fname_event_io = op.join(base_dir, 'test-eve.fif') -fname_cov_io = op.join(base_dir, 'test-cov.fif') -fname_evoked_io = op.join(base_dir, 'test-ave.fif') +base_dir = op.join(op.dirname(__file__), "io", "tests", "data") +fname_raw_io = op.join(base_dir, "test_raw.fif") +fname_event_io = op.join(base_dir, "test-eve.fif") +fname_cov_io = op.join(base_dir, "test-cov.fif") +fname_evoked_io = op.join(base_dir, "test-ave.fif") event_id, tmin, tmax = 1, -0.1, 1.0 -vv_layout = read_layout('Vectorview-all') +vv_layout = read_layout("Vectorview-all") -collect_ignore = [ - 'export/_brainvision.py', - 'export/_eeglab.py', - 'export/_edf.py'] +collect_ignore = ["export/_brainvision.py", "export/_eeglab.py", "export/_edf_bdf.py"] -def pytest_configure(config): +def pytest_configure(config: pytest.Config): """Configure pytest options.""" # Markers - for marker in ('slowtest', 'ultraslowtest', 'pgtest', 'allow_unclosed', - 'allow_unclosed_pyside2'): - config.addinivalue_line('markers', marker) + # can be queried with `pytest --markers` for example + for marker in ( + "slowtest: mark a test as slow", + "ultraslowtest: mark a test as ultraslow or to be run rarely", + "pgtest: mark a test as relevant for mne-qt-browser", + "pvtest: mark a test as relevant for pyvistaqt", + "allow_unclosed: allow unclosed pyvistaqt instances", + ): + config.addinivalue_line("markers", marker) # Fixtures - for fixture in ('matplotlib_config', 'close_all', 'check_verbose', - 'qt_config', 'protect_config'): - config.addinivalue_line('usefixtures', fixture) + for fixture in ( + "matplotlib_config", + "qt_config", + "protect_config", + ): + config.addinivalue_line("usefixtures", fixture) # pytest-qt uses PYTEST_QT_API, but let's make it respect qtpy's QT_API # if present - if os.getenv('PYTEST_QT_API') is None and os.getenv('QT_API') is not None: - os.environ['PYTEST_QT_API'] = os.environ['QT_API'] + if os.getenv("PYTEST_QT_API") is None and os.getenv("QT_API") is not None: + os.environ["PYTEST_QT_API"] = os.environ["QT_API"] + + # suppress: + # Debugger warning: It seems that frozen modules are being used, which may + # make the debugger miss breakpoints. Please pass -Xfrozen_modules=off + # to python to disable frozen modules. + if os.getenv("PYDEVD_DISABLE_FILE_VALIDATION") is None: + os.environ["PYDEVD_DISABLE_FILE_VALIDATION"] = "1" + + # https://numba.readthedocs.io/en/latest/reference/deprecation.html#deprecation-of-old-style-numba-captured-errors # noqa: E501 + if "NUMBA_CAPTURED_ERRORS" not in os.environ: + os.environ["NUMBA_CAPTURED_ERRORS"] = "new_style" # Warnings # - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0) # we should remove them from here. # - This list should also be considered alongside reset_warnings in # doc/conf.py. - if os.getenv('MNE_IGNORE_WARNINGS_IN_TESTS', '') != 'true': - first_kind = 'error' + if os.getenv("MNE_IGNORE_WARNINGS_IN_TESTS", "") not in ("true", "1"): + first_kind = "error" else: - first_kind = 'always' - warning_lines = r""" - {0}:: + first_kind = "always" + warning_lines = f" {first_kind}::" + warning_lines += r""" # matplotlib->traitlets (notebook) ignore:Passing unrecognized arguments to super.*:DeprecationWarning # notebook tests @@ -111,40 +137,85 @@ def pytest_configure(config): ignore:unclosed event loop <:ResourceWarning # ignore if joblib is missing ignore:joblib not installed.*:RuntimeWarning - # TODO: This is indicative of a problem - ignore:.*Matplotlib is currently using agg.*: # qdarkstyle ignore:.*Setting theme=.*:RuntimeWarning - # scikit-learn using this arg - ignore:.*The 'sym_pos' keyword is deprecated.*:DeprecationWarning - # Should be removable by 2022/07/08, SciPy savemat issue - ignore:.*elementwise comparison failed; returning scalar in.*:FutureWarning - # numba with NumPy dev - ignore:`np.MachAr` is deprecated.*:DeprecationWarning - # matplotlib 3.6 and pyvista/nilearn - ignore:.*cmap function will be deprecated.*: - # joblib hasn't updated to avoid distutils - ignore:.*distutils package is deprecated.*:DeprecationWarning - ignore:.*distutils Version classes are deprecated.*:DeprecationWarning # nbclient ignore:Passing a schema to Validator\.iter_errors is deprecated.*: ignore:Unclosed context tz + ignore:datetime.datetime.utcfromtimestamp.*:DeprecationWarning + # joblib + ignore:ast\.Num is deprecated.*:DeprecationWarning + ignore:Attribute n is deprecated and will be removed in Python 3\.14.*:DeprecationWarning + # numpydoc + ignore:ast\.NameConstant is deprecated and will be removed in Python 3\.14.*:DeprecationWarning + # pooch + ignore:Python 3\.14 will, by default, filter extracted tar archives.*:DeprecationWarning + # pandas + ignore:\n*Pyarrow will become a required dependency of pandas.*:DeprecationWarning + ignore:np\.find_common_type is deprecated.*:DeprecationWarning + ignore:Python binding for RankQuantileOptions.*: + # pyvista <-> NumPy 2.0 + ignore:__array_wrap__ must accept context and return_scalar arguments.*:DeprecationWarning + # pyvista <-> VTK dev + ignore:Call to deprecated method Get.*:DeprecationWarning + # nibabel <-> NumPy 2.0 + ignore:__array__ implementation doesn't accept a copy.*:DeprecationWarning + # quantities via neo + ignore:The 'copy' argument in Quantity is deprecated.*: + # debugpy uses deprecated matplotlib API + ignore:The (non_)?interactive_bk attribute was deprecated.*: + # SWIG (via OpenMEEG) + ignore:.*builtin type swigvarlink has no.*:DeprecationWarning + # eeglabio + ignore:numpy\.core\.records is deprecated.*:DeprecationWarning + ignore:Starting field name with a underscore.*: + # joblib + ignore:process .* is multi-threaded, use of fork/exec.*:DeprecationWarning + # sklearn + ignore:Python binding for RankQuantileOptions.*:RuntimeWarning + ignore:.*The `disp` and `iprint` options of the L-BFGS-B solver.*:DeprecationWarning + # matplotlib<->nilearn + ignore:[\S\s]*You are using the 'agg' matplotlib backend[\S\s]*:UserWarning + # matplotlib<->pyparsing + ignore:^'.*' argument is deprecated, use '.*'$:DeprecationWarning + ignore:^'.*' deprecated - use '.*'$:DeprecationWarning + # dipy + ignore:'where' used without 'out', expect .*:UserWarning + """ # noqa: E501 + for warning_line in warning_lines.split("\n"): warning_line = warning_line.strip() - if warning_line and not warning_line.startswith('#'): - config.addinivalue_line('filterwarnings', warning_line) + if warning_line and not warning_line.startswith("#"): + config.addinivalue_line("filterwarnings", warning_line) + + +def pytest_collection_modifyitems(items: list[pytest.Item]): + """Add slowtest marker automatically to anything marked ultraslow.""" + for item in items: + if len(list(item.iter_markers("ultraslowtest"))): + item.add_marker(pytest.mark.slowtest) # Have to be careful with autouse=True, but this is just an int comparison @@ -158,9 +229,10 @@ def check_verbose(request): try: assert mne.utils.logger.level == starting_level except AssertionError: - pytest.fail('.'.join([request.module.__name__, - request.function.__name__]) + - ' modifies logger.level') + pytest.fail( + ".".join([request.module.__name__, request.function.__name__]) + + " modifies logger.level" + ) @pytest.fixture(autouse=True) @@ -168,8 +240,9 @@ def close_all(): """Close all matplotlib plots, regardless of test status.""" # This adds < 1 µS in local testing, and we have ~2500 tests, so ~2 ms max import matplotlib.pyplot as plt + yield - plt.close('all') + plt.close("all") @pytest.fixture(autouse=True) @@ -178,86 +251,75 @@ def add_mne(doctest_namespace): doctest_namespace["mne"] = mne -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def verbose_debug(): """Run a test with debug verbosity.""" - with mne.utils.use_log_level('debug'): + with mne.utils.use_log_level("debug"): yield -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def qt_config(): """Configure the Qt backend for viz tests.""" - os.environ['_MNE_BROWSER_NO_BLOCK'] = 'true' + os.environ["_MNE_BROWSER_NO_BLOCK"] = "true" + if "_MNE_BROWSER_BACK" not in os.environ: + os.environ["_MNE_BROWSER_BACK"] = "true" -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def matplotlib_config(): """Configure matplotlib for viz tests.""" import matplotlib from matplotlib import cbook + # Allow for easy interactive debugging with a call like: # # $ MNE_MPL_TESTING_BACKEND=Qt5Agg pytest mne/viz/tests/test_raw.py -k annotation -x --pdb # noqa: E501 # try: - want = os.environ['MNE_MPL_TESTING_BACKEND'] + want = os.environ["MNE_MPL_TESTING_BACKEND"] except KeyError: - want = 'agg' # don't pop up windows + want = "agg" # don't pop up windows with warnings.catch_warnings(record=True): # ignore warning - warnings.filterwarnings('ignore') + warnings.filterwarnings("ignore") matplotlib.use(want, force=True) import matplotlib.pyplot as plt + assert plt.get_backend() == want # overwrite some params that can horribly slow down tests that # users might have changed locally (but should not otherwise affect # functionality) plt.ioff() - plt.rcParams['figure.dpi'] = 100 - try: - plt.rcParams['figure.raise_window'] = False - except KeyError: # MPL < 3.3 - pass + plt.rcParams["figure.dpi"] = 100 + plt.rcParams["figure.raise_window"] = False # Make sure that we always reraise exceptions in handlers orig = cbook.CallbackRegistry class CallbackRegistryReraise(orig): def __init__(self, exception_handler=None, signals=None): - super(CallbackRegistryReraise, self).__init__(exception_handler) + super().__init__(exception_handler) cbook.CallbackRegistry = CallbackRegistryReraise -@pytest.fixture(scope='session') -def ci_macos(): - """Determine if running on MacOS CI.""" - return (os.getenv('CI', 'false').lower() == 'true' and - sys.platform == 'darwin') - - -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def azure_windows(): """Determine if running on Azure Windows.""" - return (os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true' and - sys.platform.startswith('win')) - - -@pytest.fixture() -def check_gui_ci(ci_macos, azure_windows): - """Skip tests that are not reliable on CIs.""" - if azure_windows or ci_macos: - pytest.skip('Skipping GUI tests on MacOS CIs and Azure Windows') + return ( + os.getenv("AZURE_CI_WINDOWS", "false").lower() == "true" + and platform.system() == "Windows" + ) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def raw_orig(): """Get raw data without any change to it from mne.io.tests.data.""" raw = read_raw_fif(fname_raw_io, preload=True) return raw -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def raw(): """ Get raw data and pick channels to reduce load for testing. @@ -266,21 +328,27 @@ def raw(): """ raw = read_raw_fif(fname_raw_io, preload=True) # Throws a warning about a changed unit. - with pytest.warns(RuntimeWarning, match='unit'): - raw.set_channel_types({raw.ch_names[0]: 'ias'}) - raw.pick_channels(raw.ch_names[:9]) + with pytest.warns(RuntimeWarning, match="unit"): + raw.set_channel_types({raw.ch_names[0]: "ias"}) + raw.pick(raw.ch_names[:9]) raw.info.normalize_proj() # Fix projectors after subselection return raw -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def raw_ctf(): """Get ctf raw data from mne.io.tests.data.""" raw_ctf = read_raw_ctf(fname_ctf_continuous, preload=True) return raw_ctf -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") +def raw_spectrum(raw): + """Get raw with power spectral density computed from mne.io.tests.data.""" + return raw.compute_psd() + + +@pytest.fixture(scope="function") def events(): """Get events from mne.io.tests.data.""" return read_events(fname_event_io) @@ -290,13 +358,22 @@ def _get_epochs(stop=5, meg=True, eeg=False, n_chan=20): """Get epochs.""" raw = read_raw_fif(fname_raw_io) events = read_events(fname_event_io) - picks = pick_types(raw.info, meg=meg, eeg=eeg, stim=False, - ecg=False, eog=False, exclude='bads') + picks = pick_types( + raw.info, meg=meg, eeg=eeg, stim=False, ecg=False, eog=False, exclude="bads" + ) # Use a subset of channels for plotting speed picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int) - with pytest.warns(RuntimeWarning, match='projection'): - epochs = Epochs(raw, events[:stop], event_id, tmin, tmax, picks=picks, - proj=False, preload=False) + with pytest.warns(RuntimeWarning, match="projection"): + epochs = Epochs( + raw, + events[:stop], + event_id, + tmin, + tmax, + picks=picks, + proj=False, + preload=False, + ) epochs.info.normalize_proj() # avoid warnings return epochs @@ -323,23 +400,77 @@ def epochs_full(): return _get_epochs(None).load_data() -@pytest.fixture(scope='session', params=[testing._pytest_param()]) -def _evoked(): - # This one is session scoped, so be sure not to modify it (use evoked - # instead) - evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory', - baseline=(None, 0)) - evoked.crop(0, 0.2) - return evoked +@pytest.fixture() +def epochs_spectrum(): + """Get epochs with power spectral density computed from mne.io.tests.data.""" + return _get_epochs().load_data().compute_psd() + + +@pytest.fixture() +def epochs_tfr(): + """Get an EpochsTFR computed from mne.io.tests.data.""" + epochs = _get_epochs().load_data() + return epochs.compute_tfr(method="morlet", freqs=np.linspace(20, 40, num=5)) + + +@pytest.fixture() +def average_tfr(epochs_tfr): + """Get an AverageTFR computed by averaging an EpochsTFR (this is small & fast).""" + return epochs_tfr.average() + + +@pytest.fixture() +def full_average_tfr(full_evoked): + """Get an AverageTFR computed from Evoked. + + This is slower than the `average_tfr` fixture, but a few TFR.plot_* tests need it. + """ + return full_evoked.compute_tfr(method="morlet", freqs=np.linspace(20, 40, num=5)) + + +@pytest.fixture() +def raw_tfr(raw): + """Get a RawTFR computed from mne.io.tests.data.""" + return raw.compute_tfr(method="morlet", freqs=np.linspace(20, 40, num=5)) + + +@pytest.fixture() +def epochs_empty(): + """Get empty epochs from mne.io.tests.data.""" + epochs = _get_epochs(meg=True, eeg=True).load_data() + with pytest.warns(RuntimeWarning, match="were dropped"): + epochs.drop_bad(reject={"mag": 1e-20}) + + return epochs + + +@pytest.fixture(scope="session", params=[testing._pytest_param()]) +def _full_evoked(): + # This is session scoped, so be sure not to modify its return value (use + # `full_evoked` fixture instead) + return mne.read_evokeds(fname_evoked, condition="Left Auditory", baseline=(None, 0)) + + +@pytest.fixture(scope="session", params=[testing._pytest_param()]) +def _evoked(_full_evoked): + # This is session scoped, so be sure not to modify its return value (use `evoked` + # fixture instead) + return _full_evoked.copy().crop(0, 0.2) @pytest.fixture() def evoked(_evoked): - """Get evoked data.""" + """Get truncated evoked data.""" return _evoked.copy() -@pytest.fixture(scope='function', params=[testing._pytest_param()]) +@pytest.fixture() +def full_evoked(_full_evoked): + """Get full-duration evoked data (needed for, e.g., testing TFR).""" + return _full_evoked.copy() + + +@pytest.fixture(scope="function", params=[testing._pytest_param()]) def noise_cov(): """Get a noise cov from the testing dataset.""" return mne.read_cov(fname_cov) @@ -351,45 +482,44 @@ def noise_cov_io(): return mne.read_cov(fname_cov_io) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def bias_params_free(evoked, noise_cov): """Provide inputs for free bias functions.""" fwd = mne.read_forward_solution(fname_fwd) return _bias_params(evoked, noise_cov, fwd) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def bias_params_fixed(evoked, noise_cov): """Provide inputs for fixed bias functions.""" fwd = mne.read_forward_solution(fname_fwd) - mne.convert_forward_solution( - fwd, force_fixed=True, surf_ori=True, copy=False) + mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True, copy=False) return _bias_params(evoked, noise_cov, fwd) def _bias_params(evoked, noise_cov, fwd): - evoked.pick_types(meg=True, eeg=True, exclude=()) + evoked.pick(picks=["meg", "eeg"]) # restrict to limited set of verts (small src here) and one hemi for speed - vertices = [fwd['src'][0]['vertno'].copy(), []] + vertices = [fwd["src"][0]["vertno"].copy(), []] stc = mne.SourceEstimate( - np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0, 1) + np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0, 1 + ) fwd = mne.forward.restrict_forward_to_stc(fwd, stc) - assert fwd['sol']['row_names'] == noise_cov['names'] - assert noise_cov['names'] == evoked.ch_names - evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info) + assert fwd["sol"]["row_names"] == noise_cov["names"] + assert noise_cov["names"] == evoked.ch_names + evoked = mne.EvokedArray(fwd["sol"]["data"].copy(), evoked.info) data_cov = noise_cov.copy() - data = fwd['sol']['data'] @ fwd['sol']['data'].T + data = fwd["sol"]["data"] @ fwd["sol"]["data"].T data *= 1e-14 # 100 nAm at each source, effectively (1e-18 would be 1 nAm) # This is rank-deficient, so let's make it actually positive semidefinite # by regularizing a tiny bit - data.flat[::data.shape[0] + 1] += mne.make_ad_hoc_cov(evoked.info)['data'] + data.flat[:: data.shape[0] + 1] += mne.make_ad_hoc_cov(evoked.info)["data"] # Do our projection - proj, _, _ = mne.io.proj.make_projector( - data_cov['projs'], data_cov['names']) + proj, _, _ = mne._fiff.proj.make_projector(data_cov["projs"], data_cov["names"]) data = proj @ data @ proj.T - data_cov['data'][:] = data - assert data_cov['data'].shape[0] == len(noise_cov['names']) - want = np.arange(fwd['sol']['data'].shape[1]) + data_cov["data"][:] = data + assert data_cov["data"].shape[0] == len(noise_cov["names"]) + want = np.arange(fwd["sol"]["data"].shape[1]) if not mne.forward.is_fixed_orient(fwd): want //= 3 return evoked, fwd, noise_cov, data_cov, want @@ -405,42 +535,42 @@ def garbage_collect(): @pytest.fixture def mpl_backend(garbage_collect): """Use for epochs/ica when not implemented with pyqtgraph yet.""" - with use_browser_backend('matplotlib') as backend: + with use_browser_backend("matplotlib") as backend: yield backend backend._close_all() # Skip functions or modules for mne-qt-browser < 0.2.0 -pre_2_0_skip_modules = ['mne.viz.tests.test_epochs', - 'mne.viz.tests.test_ica'] -pre_2_0_skip_funcs = ['test_plot_raw_white', - 'test_plot_raw_selection'] +pre_2_0_skip_modules = ["mne.viz.tests.test_epochs", "mne.viz.tests.test_ica"] +pre_2_0_skip_funcs = ["test_plot_raw_white", "test_plot_raw_selection"] def _check_pyqtgraph(request): # Check Qt qt_version, api = _check_qt_version(return_api=True) - if (not qt_version) or _compare_version(qt_version, '<', '5.12'): - pytest.skip(f'Qt API {api} has version {qt_version} ' - f'but pyqtgraph needs >= 5.12!') + if (not qt_version) or _compare_version(qt_version, "<", "5.12"): + pytest.skip( + f"Qt API {api} has version {qt_version} but pyqtgraph needs >= 5.12!" + ) try: import mne_qt_browser # noqa: F401 + # Check mne-qt-browser version - lower_2_0 = _compare_version(mne_qt_browser.__version__, '<', '0.2.0') + lower_2_0 = _compare_version(mne_qt_browser.__version__, "<", "0.2.0") m_name = request.function.__module__ f_name = request.function.__name__ if lower_2_0 and m_name in pre_2_0_skip_modules: - pytest.skip(f'Test-Module "{m_name}" was skipped for' - f' mne-qt-browser < 0.2.0') + pytest.skip( + f'Test-Module "{m_name}" was skipped for mne-qt-browser < 0.2.0' + ) elif lower_2_0 and f_name in pre_2_0_skip_funcs: - pytest.skip(f'Test "{f_name}" was skipped for ' - f'mne-qt-browser < 0.2.0') + pytest.skip(f'Test "{f_name}" was skipped for mne-qt-browser < 0.2.0') except Exception: - pytest.skip('Requires mne_qt_browser') + pytest.skip("Requires mne_qt_browser") else: ver = mne_qt_browser.__version__ - if api != 'PyQt5' and _compare_version(ver, '<=', '0.2.6'): - pytest.skip(f'mne_qt_browser {ver} requires PyQt5, API is {api}') + if api != "PyQt5" and _compare_version(ver, "<=", "0.2.6"): + pytest.skip(f"mne_qt_browser {ver} requires PyQt5, API is {api}") @pytest.fixture @@ -448,67 +578,72 @@ def pg_backend(request, garbage_collect): """Use for pyqtgraph-specific test-functions.""" _check_pyqtgraph(request) from mne_qt_browser._pg_figure import MNEQtBrowser - with use_browser_backend('qt') as backend: + + with use_browser_backend("qt") as backend: backend._close_all() yield backend backend._close_all() # This shouldn't be necessary, but let's make sure nothing is stale import mne_qt_browser + mne_qt_browser._browser_instances.clear() - if check_version('mne_qt_browser', min_version='0.4'): - _assert_no_instances( - MNEQtBrowser, f'Closure of {request.node.name}') + if not _test_passed(request): + return + _assert_no_instances(MNEQtBrowser, f"Closure of {request.node.name}") -@pytest.fixture(params=[ - 'matplotlib', - pytest.param('qt', marks=pytest.mark.pgtest), -]) +@pytest.fixture( + params=[ + "matplotlib", + pytest.param("qt", marks=pytest.mark.pgtest), + ] +) def browser_backend(request, garbage_collect, monkeypatch): """Parametrizes the name of the browser backend.""" backend_name = request.param - if backend_name == 'qt': + if backend_name == "qt": _check_pyqtgraph(request) with use_browser_backend(backend_name) as backend: backend._close_all() - monkeypatch.setenv('MNE_BROWSE_RAW_SIZE', '10,10') + monkeypatch.setenv("MNE_BROWSE_RAW_SIZE", "10,10") yield backend backend._close_all() - if backend_name == 'qt': + if backend_name == "qt": # This shouldn't be necessary, but let's make sure nothing is stale import mne_qt_browser + mne_qt_browser._browser_instances.clear() -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer(request, options_3d, garbage_collect): """Yield the 3D backends.""" with _use_backend(request.param, interactive=False) as renderer: yield renderer -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer_pyvistaqt(request, options_3d, garbage_collect): """Yield the PyVista backend.""" with _use_backend(request.param, interactive=False) as renderer: yield renderer -@pytest.fixture(params=["notebook"]) +@pytest.fixture(params=[pytest.param("notebook", marks=pytest.mark.pvtest)]) def renderer_notebook(request, options_3d): """Yield the 3D notebook renderer.""" with _use_backend(request.param, interactive=False) as renderer: yield renderer -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer_interactive_pyvistaqt(request, options_3d, qt_windows_closed): """Yield the interactive PyVista backend.""" with _use_backend(request.param, interactive=True) as renderer: yield renderer -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer_interactive(request, options_3d): """Yield the interactive 3D backends.""" with _use_backend(request.param, interactive=True) as renderer: @@ -518,9 +653,11 @@ def renderer_interactive(request, options_3d): @contextmanager def _use_backend(backend_name, interactive): from mne.viz.backends.renderer import _use_test_3d_backend + _check_skip_backend(backend_name) with _use_test_3d_backend(backend_name, interactive=interactive): from mne.viz.backends import renderer + try: yield renderer finally: @@ -528,34 +665,35 @@ def _use_backend(backend_name, interactive): def _check_skip_backend(name): - from mne.viz.backends.tests._utils import (has_pyvista, - has_imageio_ffmpeg, - has_pyvistaqt) from mne.viz.backends._utils import _notebook_vtk_works - if not has_pyvista(): - pytest.skip("Test skipped, requires pyvista.") - if not has_imageio_ffmpeg(): - pytest.skip("Test skipped, requires imageio-ffmpeg") - if name == 'pyvistaqt': + + pytest.importorskip("pyvista") + pytest.importorskip("imageio_ffmpeg") + if name == "pyvistaqt": + pytest.importorskip("pyvistaqt") if not _check_qt_version(): pytest.skip("Test skipped, requires Qt.") - if not has_pyvistaqt(): - pytest.skip("Test skipped, requires pyvistaqt") else: - assert name == 'notebook', name + assert name == "notebook", name + pytest.importorskip("jupyter") + pytest.importorskip("ipympl") + pytest.importorskip("ipyevents") + pytest.importorskip("trame") + pytest.importorskip("trame_vtk") + pytest.importorskip("trame_vuetify") if not _notebook_vtk_works(): pytest.skip("Test skipped, requires working notebook vtk") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def pixel_ratio(): """Get the pixel ratio.""" - from mne.viz.backends.tests._utils import has_pyvista # _check_qt_version will init an app for us, so no need for us to do it - if not has_pyvista() or not _check_qt_version(): - return 1. - from qtpy.QtWidgets import QMainWindow + if not check_version("pyvista", "0.32") or not _check_qt_version(): + return 1.0 from qtpy.QtCore import Qt + from qtpy.QtWidgets import QMainWindow + app = _init_mne_qtapp() app.processEvents() window = QMainWindow() @@ -565,10 +703,10 @@ def pixel_ratio(): return ratio -@pytest.fixture(scope='function', params=[testing._pytest_param()]) +@pytest.fixture(scope="function", params=[testing._pytest_param()]) def subjects_dir_tmp(tmp_path): """Copy MNE-testing-data subjects_dir to a temp dir for manipulation.""" - for key in ('sample', 'fsaverage'): + for key in ("sample", "fsaverage"): shutil.copytree(op.join(subjects_dir, key), str(tmp_path / key)) return str(tmp_path) @@ -576,59 +714,69 @@ def subjects_dir_tmp(tmp_path): @pytest.fixture(params=[testing._pytest_param()]) def subjects_dir_tmp_few(tmp_path): """Copy fewer files to a tmp_path.""" - subjects_path = tmp_path / 'subjects' + subjects_path = tmp_path / "subjects" os.mkdir(subjects_path) # add fsaverage - create_default_subject(subjects_dir=subjects_path, fs_home=test_path, - verbose=True) + create_default_subject(subjects_dir=subjects_path, fs_home=test_path, verbose=True) # add sample (with few files) - sample_path = subjects_path / 'sample' - os.makedirs(sample_path / 'bem') - for dirname in ('mri', 'surf'): + sample_path = subjects_path / "sample" + os.makedirs(sample_path / "bem") + for dirname in ("mri", "surf"): shutil.copytree( - test_path / 'subjects' / 'sample' / dirname, sample_path / dirname) + test_path / "subjects" / "sample" / dirname, sample_path / dirname + ) return subjects_path # Scoping these as session will make things faster, but need to make sure # not to modify them in-place in the tests, so keep them private -@pytest.fixture(scope='session', params=[testing._pytest_param()]) +@pytest.fixture(scope="session", params=[testing._pytest_param()]) def _evoked_cov_sphere(_evoked): """Compute a small evoked/cov/sphere combo for use with forwards.""" - evoked = _evoked.copy().pick_types(meg=True) - evoked.pick_channels(evoked.ch_names[::4]) + evoked = _evoked.copy().pick(picks="meg") + evoked.pick(evoked.ch_names[::4]) assert len(evoked.ch_names) == 77 cov = mne.read_cov(fname_cov) - sphere = mne.make_sphere_model('auto', 'auto', evoked.info) + sphere = mne.make_sphere_model( + (0.0, 0.0, 0.04), 0.1, relative_radii=(0.995, 0.997, 0.998, 1.0) + ) return evoked, cov, sphere -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _fwd_surf(_evoked_cov_sphere): """Compute a forward for a surface source space.""" - evoked, cov, sphere = _evoked_cov_sphere + evoked, _, sphere = _evoked_cov_sphere src_surf = mne.read_source_spaces(fname_src) return mne.make_forward_solution( - evoked.info, fname_trans, src_surf, sphere, mindist=5.0) + evoked.info, fname_trans, src_surf, sphere, mindist=5.0 + ) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _fwd_subvolume(_evoked_cov_sphere): """Compute a forward for a surface source space.""" - pytest.importorskip('nibabel') - evoked, cov, sphere = _evoked_cov_sphere - volume_labels = ['Left-Cerebellum-Cortex', 'right-Cerebellum-Cortex'] - with pytest.raises(ValueError, - match=r"Did you mean one of \['Right-Cere"): + pytest.importorskip("nibabel") + evoked, _, sphere = _evoked_cov_sphere + volume_labels = ["Left-Cerebellum-Cortex", "right-Cerebellum-Cortex"] + with pytest.raises(ValueError, match=r"Did you mean one of \['Right-Cere"): mne.setup_volume_source_space( - 'sample', pos=20., volume_label=volume_labels, - subjects_dir=subjects_dir) - volume_labels[1] = 'R' + volume_labels[1][1:] + "sample", pos=20.0, volume_label=volume_labels, subjects_dir=subjects_dir + ) + volume_labels[1] = "R" + volume_labels[1][1:] src_vol = mne.setup_volume_source_space( - 'sample', pos=20., volume_label=volume_labels, - subjects_dir=subjects_dir, add_interpolator=False) - return mne.make_forward_solution( - evoked.info, fname_trans, src_vol, sphere, mindist=5.0) + "sample", + pos=20.0, + volume_label=volume_labels, + subjects_dir=subjects_dir, + add_interpolator=False, + ) + fwd = mne.make_forward_solution( + evoked.info, fname_trans, src_vol, sphere, mindist=1.0 + ) + nsrc = sum(s["nuse"] for s in src_vol) + assert fwd["nsource"] == nsrc + return fwd @pytest.fixture @@ -637,52 +785,50 @@ def fwd_volume_small(_fwd_subvolume): return _fwd_subvolume.copy() -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _all_src_types_fwd(_fwd_surf, _fwd_subvolume): """Create all three forward types (surf, vol, mixed).""" - fwds = dict( - surface=_fwd_surf.copy(), - volume=_fwd_subvolume.copy()) - with pytest.raises(RuntimeError, - match='Invalid source space with kinds'): - fwds['volume']['src'] + fwds['surface']['src'] + fwds = dict(surface=_fwd_surf.copy(), volume=_fwd_subvolume.copy()) + with pytest.raises(RuntimeError, match="Invalid source space with kinds"): + fwds["volume"]["src"] + fwds["surface"]["src"] # mixed (4) - fwd = fwds['surface'].copy() - f2 = fwds['volume'].copy() + fwd = fwds["surface"].copy() + f2 = fwds["volume"].copy() del _fwd_surf, _fwd_subvolume - for keys, axis in [(('source_rr',), 0), - (('source_nn',), 0), - (('sol', 'data'), 1), - (('_orig_sol',), 1)]: + for keys, axis in [ + (("source_rr",), 0), + (("source_nn",), 0), + (("sol", "data"), 1), + (("_orig_sol",), 1), + ]: a, b = fwd, f2 key = keys[0] if len(keys) > 1: a, b = a[key], b[key] key = keys[1] a[key] = np.concatenate([a[key], b[key]], axis=axis) - fwd['sol']['ncol'] = fwd['sol']['data'].shape[1] - fwd['nsource'] = fwd['sol']['ncol'] // 3 - fwd['src'] = fwd['src'] + f2['src'] - fwds['mixed'] = fwd + fwd["sol"]["ncol"] = fwd["sol"]["data"].shape[1] + fwd["nsource"] = fwd["sol"]["ncol"] // 3 + fwd["src"] = fwd["src"] + f2["src"] + fwds["mixed"] = fwd return fwds -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _all_src_types_inv_evoked(_evoked_cov_sphere, _all_src_types_fwd): """Compute inverses for all source types.""" evoked, cov, _ = _evoked_cov_sphere invs = dict() for kind, fwd in _all_src_types_fwd.items(): - assert fwd['src'].kind == kind - with pytest.warns(RuntimeWarning, match='has been reduced'): - invs[kind] = mne.minimum_norm.make_inverse_operator( - evoked.info, fwd, cov) + assert fwd["src"].kind == kind + with pytest.warns(RuntimeWarning, match="has been reduced"): + invs[kind] = mne.minimum_norm.make_inverse_operator(evoked.info, fwd, cov) return invs, evoked -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def all_src_types_inv_evoked(_all_src_types_inv_evoked): """All source types of inverses, allowing for possible modification.""" invs, evoked = _all_src_types_inv_evoked @@ -691,61 +837,110 @@ def all_src_types_inv_evoked(_all_src_types_inv_evoked): return invs, evoked -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def mixed_fwd_cov_evoked(_evoked_cov_sphere, _all_src_types_fwd): """Compute inverses for all source types.""" evoked, cov, _ = _evoked_cov_sphere - return _all_src_types_fwd['mixed'].copy(), cov.copy(), evoked.copy() + return _all_src_types_fwd["mixed"].copy(), cov.copy(), evoked.copy() -@pytest.fixture(scope='session') -@pytest.mark.slowtest -@pytest.mark.parametrize(params=[testing._pytest_param()]) +@pytest.fixture(scope="session") def src_volume_labels(): """Create a 7mm source space with labels.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") volume_labels = mne.get_volume_labels_from_aseg(fname_aseg) - with pytest.warns(RuntimeWarning, match='Found no usable.*Left-vessel.*'): + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match="Found no usable.*t-vessel.*"), + ): src = mne.setup_volume_source_space( - 'sample', 7., mri='aseg.mgz', volume_label=volume_labels, - add_interpolator=False, bem=fname_bem, - subjects_dir=subjects_dir) + "sample", + 7.0, + mri="aseg.mgz", + volume_label=volume_labels, + add_interpolator=False, + bem=fname_bem, + subjects_dir=subjects_dir, + ) lut, _ = mne.read_freesurfer_lut() assert len(volume_labels) == 46 - assert volume_labels[0] == 'Unknown' - assert lut['Unknown'] == 0 # it will be excluded during label gen + assert volume_labels[0] == "Unknown" + assert lut["Unknown"] == 0 # it will be excluded during label gen return src, tuple(volume_labels), lut def _fail(*args, **kwargs): __tracebackhide__ = True - raise AssertionError('Test should not download') + raise AssertionError("Test should not download") -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def download_is_error(monkeypatch): """Prevent downloading by raising an error when it's attempted.""" import pooch - monkeypatch.setattr(pooch, 'retrieve', _fail) + + monkeypatch.setattr(pooch, "retrieve", _fail) + yield + + +@pytest.fixture() +def fake_retrieve(monkeypatch, download_is_error): + """Monkeypatch pooch.retrieve to avoid downloading (just touch files).""" + import pooch + + my_func = _FakeFetch() + monkeypatch.setattr(pooch, "retrieve", my_func) + monkeypatch.setattr(pooch, "create", my_func) + yield my_func + + +class _FakeFetch: + def __init__(self): + self.call_args_list = list() + + @property + def call_count(self): + return len(self.call_args_list) + + # Wrapper for pooch.retrieve(...) and pooch.create(...) + def __call__(self, *args, **kwargs): + assert "path" in kwargs + if "fname" in kwargs: # pooch.retrieve(...) + self.call_args_list.append((args, kwargs)) + path = Path(kwargs["path"], kwargs["fname"]) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text("test") + return path + else: # pooch.create(...) has been called + self.path = kwargs["path"] + return self + + # Wrappers for Pooch instances (e.g., in eegbci we pooch.create) + def fetch(self, fname): + self(path=self.path, fname=fname) + + def load_registry(self, registry): + assert Path(registry).exists(), registry # We can't use monkeypatch because its scope (function-level) conflicts with # the requests fixture (module-level), so we live with a module-scoped version # that uses mock -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def options_3d(): """Disable advanced 3d rendering.""" with mock.patch.dict( - os.environ, { + os.environ, + { "MNE_3D_OPTION_ANTIALIAS": "false", "MNE_3D_OPTION_DEPTH_PEELING": "false", "MNE_3D_OPTION_SMOOTH_SHADING": "false", - } + }, ): yield -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def protect_config(): """Protect ~/.mne.""" temp = _TempDir() @@ -753,38 +948,40 @@ def protect_config(): yield +def _test_passed(request): + if _phase_report_key not in request.node.stash: + return True + report = request.node.stash[_phase_report_key] + return "call" in report and report["call"].outcome == "passed" + + @pytest.fixture() def brain_gc(request): """Ensure that brain can be properly garbage collected.""" keys = ( - 'renderer_interactive', - 'renderer_interactive_pyvistaqt', - 'renderer', - 'renderer_pyvistaqt', - 'renderer_notebook', + "renderer_interactive", + "renderer_interactive_pyvistaqt", + "renderer", + "renderer_pyvistaqt", + "renderer_notebook", ) assert set(request.fixturenames) & set(keys) != set() for key in keys: if key in request.fixturenames: - is_pv = \ - request.getfixturevalue(key)._get_3d_backend() == 'pyvistaqt' + is_pv = request.getfixturevalue(key)._get_3d_backend() == "pyvistaqt" close_func = request.getfixturevalue(key).backend._close_all break if not is_pv: yield return from mne.viz import Brain + ignore = set(id(o) for o in gc.get_objects()) yield close_func() - # no need to warn if the test itself failed, pytest-harvest helps us here - try: - outcome = request.node.harvest_rep_call - except Exception: - outcome = 'failed' - if outcome != 'passed': + if not _test_passed(request): return - _assert_no_instances(Brain, 'after') + _assert_no_instances(Brain, "after") # Check VTK objs = gc.get_objects() bad = list() @@ -794,11 +991,11 @@ def brain_gc(request): except Exception: # old Python, probably pass else: - if name.startswith('vtk') and id(o) not in ignore: + if name.startswith("vtk") and id(o) not in ignore: bad.append(name) del o del objs, ignore, Brain - assert len(bad) == 0, 'VTK objects linger:\n' + '\n'.join(bad) + assert len(bad) == 0, "VTK objects linger:\n" + "\n".join(bad) _files = list() @@ -809,26 +1006,24 @@ def pytest_sessionfinish(session, exitstatus): n = session.config.option.durations if n is None: return - print('\n') - try: - import pytest_harvest - except ImportError: - print('Module-level timings require pytest-harvest') - return + print("\n") # get the number to print - res = pytest_harvest.get_session_synthesis_dct(session) - files = dict() - for key, val in res.items(): - parts = Path(key.split(':')[0]).parts + files = defaultdict(lambda: 0.0) + for item in session.items: + if _phase_report_key not in item.stash: + continue + report = item.stash[_phase_report_key] + dur = sum(x.duration for x in report.values()) + parts = Path(item.nodeid.split(":")[0]).parts # split mne/tests/test_whatever.py into separate categories since these # are essentially submodule-level tests. Keeping just [:3] works, # except for mne/viz where we want level-4 granulatity - split_submodules = (('mne', 'viz'), ('mne', 'preprocessing')) - parts = parts[:4 if parts[:2] in split_submodules else 3] - if not parts[-1].endswith('.py'): - parts = parts + ('',) - file_key = '/'.join(parts) - files[file_key] = files.get(file_key, 0) + val['pytest_duration_s'] + split_submodules = (("mne", "viz"), ("mne", "preprocessing")) + parts = parts[: 4 if parts[:2] in split_submodules else 3] + if not parts[-1].endswith(".py"): + parts = parts + ("",) + file_key = "/".join(parts) + files[file_key] += dur files = sorted(list(files.items()), key=lambda x: x[1])[::-1] # print _files[:] = files[:n] @@ -839,47 +1034,55 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): writer = terminalreporter n = len(_files) if n: - writer.line('') # newline - writer.write_sep('=', f'slowest {n} test module{_pl(n)}') + writer.line("") # newline + writer.write_sep("=", f"slowest {n} test module{_pl(n)}") names, timings = zip(*_files) - timings = [f'{timing:0.2f}s total' for timing in timings] + timings = [f"{timing:0.2f}s total" for timing in timings] rjust = max(len(timing) for timing in timings) timings = [timing.rjust(rjust) for timing in timings] for name, timing in zip(names, timings): - writer.line(f'{timing.ljust(15)}{name}') + writer.line(f"{timing.ljust(15)}{name}") + + +def pytest_report_header(config, startdir=None): + """Add information to the pytest run header.""" + return f"MNE {mne.__version__} -- {Path(mne.__file__).parent}" -@pytest.fixture(scope="function", params=('Numba', 'NumPy')) +@pytest.fixture(scope="function", params=("Numba", "NumPy")) def numba_conditional(monkeypatch, request): """Test both code paths on machines that have Numba.""" - assert request.param in ('Numba', 'NumPy') - if request.param == 'NumPy' and has_numba: + assert request.param in ("Numba", "NumPy") + if request.param == "NumPy" and has_numba: monkeypatch.setattr( - cluster_level, '_get_buddies', cluster_level._get_buddies_fallback) + cluster_level, "_get_buddies", cluster_level._get_buddies_fallback + ) monkeypatch.setattr( - cluster_level, '_get_selves', cluster_level._get_selves_fallback) + cluster_level, "_get_selves", cluster_level._get_selves_fallback + ) monkeypatch.setattr( - cluster_level, '_where_first', cluster_level._where_first_fallback) - monkeypatch.setattr( - numerics, '_arange_div', numerics._arange_div_fallback) - if request.param == 'Numba' and not has_numba: - pytest.skip('Numba not installed') + cluster_level, "_where_first", cluster_level._where_first_fallback + ) + monkeypatch.setattr(numerics, "_arange_div", numerics._arange_div_fallback) + if request.param == "Numba" and not has_numba: + pytest.skip("Numba not installed") yield request.param # Create one nbclient and reuse it -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _nbclient(): try: import nbformat + import trame # noqa + from ipywidgets import Button # noqa from jupyter_client import AsyncKernelManager from nbclient import NotebookClient - from ipywidgets import Button # noqa - import ipyvtklink # noqa except Exception as exc: - return pytest.skip(f'Skipping Notebook test: {exc}') + return pytest.skip(f"Skipping Notebook test: {exc}") km = AsyncKernelManager(config=None) - nb = nbformat.reads(""" + nb = nbformat.reads( + """ { "cells": [ { @@ -905,7 +1108,9 @@ def _nbclient(): }, "nbformat": 4, "nbformat_minor": 4 -}""", as_version=4) +}""", + as_version=4, + ) client = NotebookClient(nb, km=km) yield client try: @@ -914,18 +1119,30 @@ def _nbclient(): pass -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def nbexec(_nbclient): """Execute Python code in a notebook.""" # Adapted/simplified from nbclient/client.py (BSD-3-Clause) + from nbclient.exceptions import CellExecutionError + _nbclient._cleanup_kernel() def execute(code, reset=False): _nbclient.reset_execution_trackers() with _nbclient.setup_kernel(): assert _nbclient.kc is not None - cell = Bunch(cell_type='code', metadata={}, source=dedent(code)) - _nbclient.execute_cell(cell, 0, execution_count=0) + cell = Bunch(cell_type="code", metadata={}, source=dedent(code), outputs=[]) + try: + _nbclient.execute_cell(cell, 0, execution_count=0) + except CellExecutionError: # pragma: no cover + for kind in ("stdout", "stderr"): + print( + "\n".join( + o["text"] for o in cell.outputs if o.get("name", "") == kind + ), + file=getattr(sys, kind), + ) + raise _nbclient.set_widgets_metadata() yield execute @@ -933,15 +1150,15 @@ def execute(code, reset=False): def pytest_runtest_call(item): """Run notebook code written in Python.""" - if 'nbexec' in getattr(item, 'fixturenames', ()): - nbexec = item.funcargs['nbexec'] - code = inspect.getsource(getattr(item.module, item.name.split('[')[0])) + if "nbexec" in getattr(item, "fixturenames", ()): + nbexec = item.funcargs["nbexec"] + code = inspect.getsource(getattr(item.module, item.name.split("[")[0])) code = code.splitlines() ci = 0 for ci, c in enumerate(code): - if c.startswith(' '): # actual content + if c.startswith(" "): # actual content break - code = '\n'.join(code[ci:]) + code = "\n".join(code[ci:]) def run(nbexec=nbexec, code=code): nbexec(code) @@ -950,27 +1167,30 @@ def run(nbexec=nbexec, code=code): return -@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') -@pytest.fixture(params=( - [nirsport2, nirsport2_snirf, testing._pytest_param()], - [nirsport2_2021_9, nirsport2_20219_snirf, testing._pytest_param()], -)) +@pytest.fixture( + params=( + [nirsport2, nirsport2_snirf, testing._pytest_param()], + [nirsport2_2021_9, nirsport2_20219_snirf, testing._pytest_param()], + ) +) def nirx_snirf(request): """Return a (raw_nirx, raw_snirf) matched pair.""" - pytest.importorskip('h5py') + pytest.importorskip("h5py") skipper = request.param[2].marks[0].mark if skipper.args[0]: # will skip - pytest.skip(skipper.kwargs['reason']) - return (read_raw_nirx(request.param[0], preload=True), - read_raw_snirf(request.param[1], preload=True)) + pytest.skip(skipper.kwargs["reason"]) + return ( + read_raw_nirx(request.param[0], preload=True), + read_raw_snirf(request.param[1], preload=True), + ) @pytest.fixture def qt_windows_closed(request): """Ensure that no new Qt windows are open after a test.""" - _check_skip_backend('pyvistaqt') + _check_skip_backend("pyvistaqt") app = _init_mne_qtapp() - from qtpy import API_NAME + app.processEvents() gc.collect() n_before = len(app.topLevelWidgets()) @@ -978,10 +1198,119 @@ def qt_windows_closed(request): yield app.processEvents() gc.collect() - if 'allow_unclosed' in marks: + if "allow_unclosed" in marks: return - if 'allow_unclosed_pyside2' in marks and API_NAME.lower() == 'pyside2': + # Don't check when the test fails + if not _test_passed(request): return widgets = app.topLevelWidgets() n_after = len(widgets) assert n_before == n_after, widgets[-4:] + + +# https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures # noqa: E501 +_phase_report_key = StashKey() + + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + """Stash the status of each item and turn unexpected skips into errors.""" + outcome = yield + rep: pytest.TestReport = outcome.get_result() + item.stash.setdefault(_phase_report_key, {})[rep.when] = rep + if rep.outcome == "passed": # only check for skips etc. if otherwise green + _modify_report_skips(rep) + return rep + + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_make_collect_report(collector: pytest.Collector): + """Turn unexpected skips during collection (e.g., module-level) into errors.""" + outcome = yield + rep: pytest.CollectReport = outcome.get_result() + _modify_report_skips(rep) + return rep + + +# Default means "allow all skips". Can use something like "$." to mean +# "never match", i.e., "treat all skips as errors" +_valid_skips_re = re.compile(os.getenv("MNE_TEST_ALLOW_SKIP", ".*")) + + +# To turn unexpected skips into errors, we need to look both at the collection phase +# (for decorated tests) and the call phase (for things like `importorskip` +# within the test body). code adapted from pytest-error-for-skips +def _modify_report_skips(report: pytest.TestReport | pytest.CollectReport): + if not report.skipped: + return + if isinstance(report.longrepr, tuple): + file, lineno, reason = report.longrepr + else: + file, lineno, reason = "", 1, str(report.longrepr) + if _valid_skips_re.match(reason): + return + assert isinstance(report, pytest.TestReport | pytest.CollectReport), type(report) + if file.endswith("doctest.py"): # _python/doctest.py + return + # xfail tests aren't true "skips" but show up as skipped in reports + if getattr(report, "keywords", {}).get("xfail", False): + return + # the above only catches marks, so we need to actually parse the report to catch + # an xfail based on the traceback + if " pytest.xfail( " in reason: + return + if reason.startswith("Skipped: "): + reason = reason[9:] + report.longrepr = f"{file}:{lineno}: UNEXPECTED SKIP: {reason}" + # Make it show up as an error in the report + report.outcome = "error" if isinstance(report, pytest.TestReport) else "failed" + + +@pytest.fixture(scope="function") +def eyetrack_cal(): + """Create a toy calibration instance.""" + screen_size = (0.4, 0.225) # width, height in meters + screen_resolution = (1920, 1080) + screen_distance = 0.7 # meters + onset = 0 + model = "HV9" + eye = "R" + avg_error = 0.5 + max_error = 1.0 + positions = np.zeros((9, 2)) + offsets = np.zeros((9,)) + gaze = np.zeros((9, 2)) + cal = mne.preprocessing.eyetracking.Calibration( + screen_size=screen_size, + screen_distance=screen_distance, + screen_resolution=screen_resolution, + eye=eye, + model=model, + positions=positions, + offsets=offsets, + gaze=gaze, + onset=onset, + avg_error=avg_error, + max_error=max_error, + ) + return cal + + +@pytest.fixture(scope="function") +def eyetrack_raw(): + """Create a toy raw instance with eyetracking channels.""" + # simulate a steady fixation at the center pixel of a 1920x1080 resolution screen + shape = (1, 100) # x or y, time + data = np.vstack([np.full(shape, 960), np.full(shape, 540), np.full(shape, 0)]) + + info = info = mne.create_info( + ch_names=["xpos", "ypos", "pupil"], sfreq=100, ch_types="eyegaze" + ) + more_info = dict( + xpos=("eyegaze", "px", "right", "x"), + ypos=("eyegaze", "px", "right", "y"), + pupil=("pupil", "au", "right"), + ) + raw = mne.io.RawArray(data, info) + raw = mne.preprocessing.eyetracking.set_channel_types_eyetrack(raw, more_info) + return raw diff --git a/mne/coreg.py b/mne/coreg.py index 3419392a013..f9829f3172d 100644 --- a/mne/coreg.py +++ b/mne/coreg.py @@ -1,80 +1,117 @@ -# -*- coding: utf-8 -*- """Coregistration between different coordinate frames.""" -# Authors: Christian Brodbeck -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import configparser import fnmatch -from glob import glob, iglob import os import os.path as op -import stat -import sys import re import shutil +import stat +import sys from functools import reduce +from glob import glob, iglob import numpy as np +from scipy.optimize import leastsq +from scipy.spatial.distance import cdist + +from ._fiff._digitization import _get_data_as_dict_from_dig +from ._fiff.constants import FIFF +from ._fiff.meas_info import Info, read_fiducials, read_info, write_fiducials -from .io import read_fiducials, write_fiducials, read_info -from .io.constants import FIFF -from .io.meas_info import Info -from .io._digitization import _get_data_as_dict_from_dig # keep get_mni_fiducials for backward compat (no burden to keep in this # namespace, too) -from ._freesurfer import (_read_mri_info, get_mni_fiducials, # noqa: F401 - estimate_head_mri_t) # noqa: F401 -from .label import read_label, Label -from .source_space import (add_source_space_distances, read_source_spaces, # noqa: E501,F401 - write_source_spaces) -from .surface import (read_surface, write_surface, _normalize_vectors, - complete_surface_info, decimate_surface, - _DistanceQuery) +from ._freesurfer import ( + _read_mri_info, + estimate_head_mri_t, # noqa: F401 + get_mni_fiducials, +) from .bem import read_bem_surfaces, write_bem_surfaces -from .transforms import (rotation, rotation3d, scaling, translation, Transform, - _read_fs_xfm, _write_fs_xfm, invert_transform, - combine_transforms, _quat_to_euler, - _fit_matched_points, apply_trans, - rot_to_quat, _angle_between_quats) from .channels import make_dig_montage -from .utils import (get_config, get_subjects_dir, logger, pformat, verbose, - warn, fill_doc, _validate_type, - _check_subject, _check_option, _import_nibabel) +from .label import Label, read_label +from .source_space import ( + add_source_space_distances, + read_source_spaces, # noqa: F401 + write_source_spaces, +) +from .surface import ( + _DistanceQuery, + _normalize_vectors, + complete_surface_info, + decimate_surface, + read_surface, + write_surface, +) +from .transforms import ( + Transform, + _angle_between_quats, + _fit_matched_points, + _quat_to_euler, + _read_fs_xfm, + _write_fs_xfm, + apply_trans, + combine_transforms, + invert_transform, + rot_to_quat, + rotation, + rotation3d, + scaling, + translation, +) +from .utils import ( + _check_option, + _check_subject, + _import_nibabel, + _validate_type, + fill_doc, + get_config, + get_subjects_dir, + logger, + pformat, + verbose, + warn, +) from .viz._3d import _fiducial_coords # some path templates -trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif') -subject_dirname = os.path.join('{subjects_dir}', '{subject}') -bem_dirname = os.path.join(subject_dirname, 'bem') -mri_dirname = os.path.join(subject_dirname, 'mri') -mri_transforms_dirname = os.path.join(subject_dirname, 'mri', 'transforms') -surf_dirname = os.path.join(subject_dirname, 'surf') +trans_fname = os.path.join("{raw_dir}", "{subject}-trans.fif") +subject_dirname = os.path.join("{subjects_dir}", "{subject}") +bem_dirname = os.path.join(subject_dirname, "bem") +mri_dirname = os.path.join(subject_dirname, "mri") +mri_transforms_dirname = os.path.join(subject_dirname, "mri", "transforms") +surf_dirname = os.path.join(subject_dirname, "surf") bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif") -head_bem_fname = pformat(bem_fname, name='head') -head_sparse_fname = pformat(bem_fname, name='head-sparse') -fid_fname = pformat(bem_fname, name='fiducials') +head_bem_fname = pformat(bem_fname, name="head") +head_sparse_fname = pformat(bem_fname, name="head-sparse") +fid_fname = pformat(bem_fname, name="fiducials") fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif") -src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif') -_head_fnames = (os.path.join(bem_dirname, 'outer_skin.surf'), - head_sparse_fname, - head_bem_fname) -_high_res_head_fnames = (os.path.join(bem_dirname, '{subject}-head-dense.fif'), - os.path.join(surf_dirname, 'lh.seghead'), - os.path.join(surf_dirname, 'lh.smseghead')) +src_fname = os.path.join(bem_dirname, "{subject}-{spacing}-src.fif") +_head_fnames = ( + os.path.join(bem_dirname, "outer_skin.surf"), + head_sparse_fname, + head_bem_fname, +) +_high_res_head_fnames = ( + os.path.join(bem_dirname, "{subject}-head-dense.fif"), + os.path.join(surf_dirname, "lh.seghead"), + os.path.join(surf_dirname, "lh.smseghead"), +) def _map_fid_name_to_idx(name: str) -> int: """Map a fiducial name to its index in the DigMontage.""" name = name.lower() - if name == 'lpa': + if name == "lpa": return 0 - elif name == 'nasion': + elif name == "nasion": return 1 else: - assert name == 'rpa' + assert name == "rpa" return 2 @@ -85,7 +122,7 @@ def _make_writable(fname): def _make_writable_recursive(path): """Recursively set writable.""" - if sys.platform.startswith('win'): + if sys.platform.startswith("win"): return # can't safely set perms for root, dirs, files in os.walk(path, topdown=False): for f in dirs + files: @@ -127,30 +164,28 @@ def coregister_fiducials(info, fiducials, tol=0.01): fiducials, coord_frame_to = read_fiducials(fiducials) else: coord_frame_to = FIFF.FIFFV_COORD_MRI - frames_from = {d['coord_frame'] for d in info['dig']} + frames_from = {d["coord_frame"] for d in info["dig"]} if len(frames_from) > 1: - raise ValueError("info contains fiducials from different coordinate " - "frames") + raise ValueError("info contains fiducials from different coordinate frames") else: coord_frame_from = frames_from.pop() - coords_from = _fiducial_coords(info['dig']) + coords_from = _fiducial_coords(info["dig"]) coords_to = _fiducial_coords(fiducials, coord_frame_to) trans = fit_matched_points(coords_from, coords_to, tol=tol) return Transform(coord_frame_from, coord_frame_to, trans) @verbose -def create_default_subject(fs_home=None, update=False, subjects_dir=None, - verbose=None): +def create_default_subject(fs_home=None, update=False, subjects_dir=None, verbose=None): """Create an average brain subject for subjects without structural MRI. - Create a copy of fsaverage from the Freesurfer directory in subjects_dir + Create a copy of fsaverage from the FreeSurfer directory in subjects_dir and add auxiliary files from the mne package. Parameters ---------- fs_home : None | str - The freesurfer home directory (only needed if ``FREESURFER_HOME`` is + The FreeSurfer home directory (only needed if ``FREESURFER_HOME`` is not specified as environment variable). update : bool In cases where a copy of the fsaverage brain already exists in the @@ -164,62 +199,69 @@ def create_default_subject(fs_home=None, update=False, subjects_dir=None, Notes ----- When no structural MRI is available for a subject, an average brain can be - substituted. Freesurfer comes with such an average brain model, and MNE + substituted. FreeSurfer comes with such an average brain model, and MNE comes with some auxiliary files which make coregistration easier. :py:func:`create_default_subject` copies the relevant - files from Freesurfer into the current subjects_dir, and also adds the + files from FreeSurfer into the current subjects_dir, and also adds the auxiliary files provided by MNE. """ subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) if fs_home is None: - fs_home = get_config('FREESURFER_HOME', fs_home) + fs_home = get_config("FREESURFER_HOME", fs_home) if fs_home is None: raise ValueError( "FREESURFER_HOME environment variable not found. Please " "specify the fs_home parameter in your call to " - "create_default_subject().") + "create_default_subject()." + ) - # make sure freesurfer files exist - fs_src = os.path.join(fs_home, 'subjects', 'fsaverage') + # make sure FreeSurfer files exist + fs_src = os.path.join(fs_home, "subjects", "fsaverage") if not os.path.exists(fs_src): - raise IOError('fsaverage not found at %r. Is fs_home specified ' - 'correctly?' % fs_src) - for name in ('label', 'mri', 'surf'): + raise OSError( + f"fsaverage not found at {fs_src!r}. Is fs_home specified correctly?" + ) + for name in ("label", "mri", "surf"): dirname = os.path.join(fs_src, name) if not os.path.isdir(dirname): - raise IOError("Freesurfer fsaverage seems to be incomplete: No " - "directory named %s found in %s" % (name, fs_src)) + raise OSError( + "FreeSurfer fsaverage seems to be incomplete: No directory named " + f"{name} found in {fs_src}" + ) # make sure destination does not already exist - dest = os.path.join(subjects_dir, 'fsaverage') + dest = os.path.join(subjects_dir, "fsaverage") if dest == fs_src: - raise IOError( - "Your subjects_dir points to the freesurfer subjects_dir (%r). " - "The default subject can not be created in the freesurfer " - "installation directory; please specify a different " - "subjects_dir." % subjects_dir) + raise OSError( + "Your subjects_dir points to the FreeSurfer subjects_dir " + f"({repr(subjects_dir)}). The default subject can not be created in the " + "FreeSurfer installation directory; please specify a different " + "subjects_dir." + ) elif (not update) and os.path.exists(dest): - raise IOError( - "Can not create fsaverage because %r already exists in " - "subjects_dir %r. Delete or rename the existing fsaverage " - "subject folder." % ('fsaverage', subjects_dir)) + raise OSError( + 'Can not create fsaverage because "fsaverage" already exists in ' + f"subjects_dir {repr(subjects_dir)}. Delete or rename the existing " + "fsaverage subject folder." + ) - # copy fsaverage from freesurfer - logger.info("Copying fsaverage subject from freesurfer directory...") + # copy fsaverage from FreeSurfer + logger.info("Copying fsaverage subject from FreeSurfer directory...") if (not update) or not os.path.exists(dest): shutil.copytree(fs_src, dest) _make_writable_recursive(dest) # copy files from mne - source_fname = os.path.join(os.path.dirname(__file__), 'data', 'fsaverage', - 'fsaverage-%s.fif') - dest_bem = os.path.join(dest, 'bem') + source_fname = os.path.join( + os.path.dirname(__file__), "data", "fsaverage", "fsaverage-%s.fif" + ) + dest_bem = os.path.join(dest, "bem") if not os.path.exists(dest_bem): os.mkdir(dest_bem) logger.info("Copying auxiliary fsaverage files from mne...") - dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif') + dest_fname = os.path.join(dest_bem, "fsaverage-%s.fif") _make_writable_recursive(dest_bem) - for name in ('fiducials', 'head', 'inner_skull-bem', 'trans'): + for name in ("fiducials", "head", "inner_skull-bem", "trans"): if not os.path.exists(dest_fname % name): shutil.copy(source_fname % name, dest_bem) @@ -243,11 +285,10 @@ def _decimate_points(pts, res=10): pts : array, shape = (n_points, 3) The decimated points. """ - from scipy.spatial.distance import cdist pts = np.asarray(pts) # find the bin edges for the voxel space - xmin, ymin, zmin = pts.min(0) - res / 2. + xmin, ymin, zmin = pts.min(0) - res / 2.0 xmax, ymax, zmax = pts.max(0) + res xax = np.arange(xmin, xmax, res) yax = np.arange(ymin, ymax, res) @@ -259,19 +300,18 @@ def _decimate_points(pts, res=10): x = xax[xbins] y = yax[ybins] z = zax[zbins] - mids = np.c_[x, y, z] + res / 2. + mids = np.c_[x, y, z] + res / 2.0 # each point belongs to at most one voxel center, so figure those out - # (cKDTree faster than BallTree for these small problems) - tree = _DistanceQuery(mids, method='cKDTree') + # (KDTree faster than BallTree for these small problems) + tree = _DistanceQuery(mids, method="KDTree") _, mid_idx = tree.query(pts) # then figure out which to actually use based on proximity # (take advantage of sorting the mid_idx to get our mapping of # pts to nearest voxel midpoint) sort_idx = np.argsort(mid_idx) - bounds = np.cumsum( - np.concatenate([[0], np.bincount(mid_idx, minlength=len(mids))])) + bounds = np.cumsum(np.concatenate([[0], np.bincount(mid_idx, minlength=len(mids))])) assert len(bounds) == len(mids) + 1 out = list() for mi, mid in enumerate(mids): @@ -282,14 +322,13 @@ def _decimate_points(pts, res=10): # But it's faster for many points than making a big boolean indexer # over and over (esp. since each point can only belong to a single # voxel). - use_pts = pts[sort_idx[bounds[mi]:bounds[mi + 1]]] + use_pts = pts[sort_idx[bounds[mi] : bounds[mi + 1]]] if not len(use_pts): out.append([np.inf] * 3) else: - out.append( - use_pts[np.argmin(cdist(use_pts, mid[np.newaxis])[:, 0])]) + out.append(use_pts[np.argmin(cdist(use_pts, mid[np.newaxis])[:, 0])]) out = np.array(out, float).reshape(-1, 3) - out = out[np.abs(out - mids).max(axis=1) < res / 2.] + out = out[np.abs(out - mids).max(axis=1) < res / 2.0] # """ return out @@ -307,7 +346,7 @@ def _trans_from_params(param_info, params): i += 3 if do_translate: - x, y, z = params[i:i + 3] + x, y, z = params[i : i + 3] trans.insert(0, translation(x, y, z)) i += 3 @@ -315,7 +354,7 @@ def _trans_from_params(param_info, params): s = params[i] trans.append(scaling(s, s, s)) elif do_scale == 3: - x, y, z = params[i:i + 3] + x, y, z = params[i : i + 3] trans.append(scaling(x, y, z)) trans = reduce(np.dot, trans) @@ -326,9 +365,17 @@ def _trans_from_params(param_info, params): # XXX this function should be moved out of coreg as used elsewhere -def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True, - scale=False, tol=None, x0=None, out='trans', - weights=None): +def fit_matched_points( + src_pts, + tgt_pts, + rotate=True, + translate=True, + scale=False, + tol=None, + x0=None, + out="trans", + weights=None, +): """Find a transform between matched sets of points. This minimizes the squared distance between two matching sets of points. @@ -373,13 +420,17 @@ def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True, src_pts = np.atleast_2d(src_pts) tgt_pts = np.atleast_2d(tgt_pts) if src_pts.shape != tgt_pts.shape: - raise ValueError("src_pts and tgt_pts must have same shape (got " - "{}, {})".format(src_pts.shape, tgt_pts.shape)) + raise ValueError( + "src_pts and tgt_pts must have same shape " + f"(got {src_pts.shape}, {tgt_pts.shape})" + ) if weights is not None: weights = np.asarray(weights, src_pts.dtype) if weights.ndim != 1 or weights.size not in (src_pts.shape[0], 1): - raise ValueError("weights (shape=%s) must be None or have shape " - "(%s,)" % (weights.shape, src_pts.shape[0],)) + raise ValueError( + f"weights (shape={weights.shape}) must be None or have shape " + f"({src_pts.shape[0]},)" + ) weights = weights[:, np.newaxis] param_info = (bool(rotate), bool(translate), int(scale)) @@ -392,15 +443,14 @@ def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True, tgt_pts = np.asarray(tgt_pts, float) if weights is not None: weights = np.asarray(weights, float) - x, s = _fit_matched_points( - src_pts, tgt_pts, weights, bool(param_info[2])) + x, s = _fit_matched_points(src_pts, tgt_pts, weights, bool(param_info[2])) x[:3] = _quat_to_euler(x[:3]) x = np.concatenate((x, [s])) if param_info[2] else x else: x = _generic_fit(src_pts, tgt_pts, param_info, weights, x0) # re-create the final transformation matrix - if (tol is not None) or (out == 'trans'): + if (tol is not None) or (out == "trans"): trans = _trans_from_params(param_info, x) # assess the error of the solution @@ -409,23 +459,24 @@ def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True, est_pts = np.dot(src_pts, trans.T)[:, :3] err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1)) if np.any(err > tol): - raise RuntimeError("Error exceeds tolerance. Error = %r" % err) + raise RuntimeError(f"Error exceeds tolerance. Error = {err!r}") - if out == 'params': + if out == "params": return x - elif out == 'trans': + elif out == "trans": return trans else: - raise ValueError("Invalid out parameter: %r. Needs to be 'params' or " - "'trans'." % out) + raise ValueError( + f"Invalid out parameter: {out!r}. Needs to be 'params' or 'trans'." + ) def _generic_fit(src_pts, tgt_pts, param_info, weights, x0): - from scipy.optimize import leastsq if param_info[1]: # translate src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1)))) if param_info == (True, False, 0): + def error(x): rx, ry, rz = x trans = rotation3d(rx, ry, rz) @@ -434,9 +485,11 @@ def error(x): if weights is not None: d *= weights return d.ravel() + if x0 is None: x0 = (0, 0, 0) elif param_info == (True, True, 0): + def error(x): rx, ry, rz, tx, ty, tz = x trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz)) @@ -445,44 +498,52 @@ def error(x): if weights is not None: d *= weights return d.ravel() + if x0 is None: x0 = (0, 0, 0, 0, 0, 0) elif param_info == (True, True, 1): + def error(x): rx, ry, rz, tx, ty, tz, s = x - trans = reduce(np.dot, (translation(tx, ty, tz), - rotation(rx, ry, rz), - scaling(s, s, s))) + trans = reduce( + np.dot, + (translation(tx, ty, tz), rotation(rx, ry, rz), scaling(s, s, s)), + ) est = np.dot(src_pts, trans.T)[:, :3] d = tgt_pts - est if weights is not None: d *= weights return d.ravel() + if x0 is None: x0 = (0, 0, 0, 0, 0, 0, 1) elif param_info == (True, True, 3): + def error(x): rx, ry, rz, tx, ty, tz, sx, sy, sz = x - trans = reduce(np.dot, (translation(tx, ty, tz), - rotation(rx, ry, rz), - scaling(sx, sy, sz))) + trans = reduce( + np.dot, + (translation(tx, ty, tz), rotation(rx, ry, rz), scaling(sx, sy, sz)), + ) est = np.dot(src_pts, trans.T)[:, :3] d = tgt_pts - est if weights is not None: d *= weights return d.ravel() + if x0 is None: x0 = (0, 0, 0, 0, 0, 0, 1, 1, 1) else: raise NotImplementedError( "The specified parameter combination is not implemented: " - "rotate=%r, translate=%r, scale=%r" % param_info) + "rotate={!r}, translate={!r}, scale={!r}".format(*param_info) + ) x, _, _, _, _ = leastsq(error, x0, full_output=True) return x -def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None): +def _find_label_paths(subject="fsaverage", pattern=None, subjects_dir=None): """Find paths to label files in a subject's label directory. Parameters @@ -510,7 +571,7 @@ def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None): paths = [] for dirpath, _, filenames in os.walk(lbl_dir): rel_dir = os.path.relpath(dirpath, lbl_dir) - for filename in fnmatch.filter(filenames, '*.label'): + for filename in fnmatch.filter(filenames, "*.label"): path = os.path.join(rel_dir, filename) paths.append(path) else: @@ -527,7 +588,7 @@ def _find_mri_paths(subject, skip_fiducials, subjects_dir): subject : str Name of the mri subject. skip_fiducials : bool - Do not scale the MRI fiducials. If False, an IOError will be raised + Do not scale the MRI fiducials. If False, an OSError will be raised if no fiducials file can be found. subjects_dir : None | path-like Override the SUBJECTS_DIR environment variable @@ -543,41 +604,56 @@ def _find_mri_paths(subject, skip_fiducials, subjects_dir): paths = {} # directories to create - paths['dirs'] = [bem_dirname, surf_dirname] + paths["dirs"] = [bem_dirname, surf_dirname] # surf/ files - paths['surf'] = [] - surf_fname = os.path.join(surf_dirname, '{name}') - surf_names = ('inflated', 'white', 'orig', 'orig_avg', 'inflated_avg', - 'inflated_pre', 'pial', 'pial_avg', 'smoothwm', 'white_avg', - 'seghead', 'smseghead') - if os.getenv('_MNE_FEW_SURFACES', '') == 'true': # for testing + paths["surf"] = [] + surf_fname = os.path.join(surf_dirname, "{name}") + surf_names = ( + "inflated", + "white", + "orig", + "orig_avg", + "inflated_avg", + "inflated_pre", + "pial", + "pial_avg", + "smoothwm", + "white_avg", + "seghead", + "smseghead", + ) + if os.getenv("_MNE_FEW_SURFACES", "") == "true": # for testing surf_names = surf_names[:4] for surf_name in surf_names: - for hemi in ('lh.', 'rh.'): + for hemi in ("lh.", "rh."): name = hemi + surf_name - path = surf_fname.format(subjects_dir=subjects_dir, - subject=subject, name=name) + path = surf_fname.format( + subjects_dir=subjects_dir, subject=subject, name=name + ) if os.path.exists(path): - paths['surf'].append(pformat(surf_fname, name=name)) - surf_fname = os.path.join(bem_dirname, '{name}') - surf_names = ('inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf') + paths["surf"].append(pformat(surf_fname, name=name)) + surf_fname = os.path.join(bem_dirname, "{name}") + surf_names = ("inner_skull.surf", "outer_skull.surf", "outer_skin.surf") for surf_name in surf_names: - path = surf_fname.format(subjects_dir=subjects_dir, - subject=subject, name=surf_name) + path = surf_fname.format( + subjects_dir=subjects_dir, subject=subject, name=surf_name + ) if os.path.exists(path): - paths['surf'].append(pformat(surf_fname, name=surf_name)) + paths["surf"].append(pformat(surf_fname, name=surf_name)) del surf_names, surf_name, path, hemi # BEM files - paths['bem'] = bem = [] + paths["bem"] = bem = [] path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject) if os.path.exists(path): - bem.append('head') - bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir, - subject=subject, name='*-bem') - re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject, - name='(.+)').replace('\\', '\\\\') + bem.append("head") + bem_pattern = pformat( + bem_fname, subjects_dir=subjects_dir, subject=subject, name="*-bem" + ) + re_pattern = pformat( + bem_fname, subjects_dir=subjects_dir, subject=subject, name="(.+)" + ).replace("\\", "\\\\") for path in iglob(bem_pattern): match = re.match(re_pattern, path) name = match.group(1) @@ -586,54 +662,57 @@ def _find_mri_paths(subject, skip_fiducials, subjects_dir): # fiducials if skip_fiducials: - paths['fid'] = [] + paths["fid"] = [] else: - paths['fid'] = _find_fiducials_files(subject, subjects_dir) + paths["fid"] = _find_fiducials_files(subject, subjects_dir) # check that we found at least one - if len(paths['fid']) == 0: - raise IOError("No fiducials file found for %s. The fiducials " - "file should be named " - "{subject}/bem/{subject}-fiducials.fif. In " - "order to scale an MRI without fiducials set " - "skip_fiducials=True." % subject) + if len(paths["fid"]) == 0: + raise OSError( + f"No fiducials file found for {subject}. The fiducials " + "file should be named " + "{subject}/bem/{subject}-fiducials.fif. In " + "order to scale an MRI without fiducials set " + "skip_fiducials=True." + ) # duplicate files (curvature and some surfaces) - paths['duplicate'] = [] - path = os.path.join(surf_dirname, '{name}') - surf_fname = os.path.join(surf_dirname, '{name}') - surf_dup_names = ('curv', 'sphere', 'sphere.reg', 'sphere.reg.avg') + paths["duplicate"] = [] + path = os.path.join(surf_dirname, "{name}") + surf_fname = os.path.join(surf_dirname, "{name}") + surf_dup_names = ("curv", "sphere", "sphere.reg", "sphere.reg.avg") for surf_dup_name in surf_dup_names: - for hemi in ('lh.', 'rh.'): + for hemi in ("lh.", "rh."): name = hemi + surf_dup_name - path = surf_fname.format(subjects_dir=subjects_dir, - subject=subject, name=name) + path = surf_fname.format( + subjects_dir=subjects_dir, subject=subject, name=name + ) if os.path.exists(path): - paths['duplicate'].append(pformat(surf_fname, name=name)) + paths["duplicate"].append(pformat(surf_fname, name=name)) del surf_dup_name, name, path, hemi # transform files (talairach) - paths['transforms'] = [] - transform_fname = os.path.join(mri_transforms_dirname, 'talairach.xfm') + paths["transforms"] = [] + transform_fname = os.path.join(mri_transforms_dirname, "talairach.xfm") path = transform_fname.format(subjects_dir=subjects_dir, subject=subject) if os.path.exists(path): - paths['transforms'].append(transform_fname) + paths["transforms"].append(transform_fname) del transform_fname, path # find source space files - paths['src'] = src = [] + paths["src"] = src = [] bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject) - fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif') - prefix = subject + '-' + fnames = fnmatch.filter(os.listdir(bem_dir), "*-src.fif") + prefix = subject + "-" for fname in fnames: if fname.startswith(prefix): - fname = "{subject}-%s" % fname[len(prefix):] + fname = f"{{subject}}-{fname[len(prefix) :]}" path = os.path.join(bem_dirname, fname) src.append(path) # find MRIs mri_dir = mri_dirname.format(subjects_dir=subjects_dir, subject=subject) - fnames = fnmatch.filter(os.listdir(mri_dir), '*.mgz') - paths['mri'] = [os.path.join(mri_dir, f) for f in fnames] + fnames = fnmatch.filter(os.listdir(mri_dir), "*.mgz") + paths["mri"] = [os.path.join(mri_dir, f) for f in fnames] return paths @@ -642,17 +721,18 @@ def _find_fiducials_files(subject, subjects_dir): """Find fiducial files.""" fid = [] # standard fiducials - if os.path.exists(fid_fname.format(subjects_dir=subjects_dir, - subject=subject)): + if os.path.exists(fid_fname.format(subjects_dir=subjects_dir, subject=subject)): fid.append(fid_fname) # fiducials with subject name - pattern = pformat(fid_fname_general, subjects_dir=subjects_dir, - subject=subject, head='*') - regex = pformat(fid_fname_general, subjects_dir=subjects_dir, - subject=subject, head='(.+)').replace('\\', '\\\\') + pattern = pformat( + fid_fname_general, subjects_dir=subjects_dir, subject=subject, head="*" + ) + regex = pformat( + fid_fname_general, subjects_dir=subjects_dir, subject=subject, head="(.+)" + ).replace("\\", "\\\\") for path in iglob(pattern): match = re.match(regex, path) - head = match.group(1).replace(subject, '{subject}') + head = match.group(1).replace(subject, "{subject}") fid.append(pformat(fid_fname_general, head=head)) return fid @@ -673,30 +753,10 @@ def _is_mri_subject(subject, subjects_dir=None): Whether ``subject`` is an mri subject. """ subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) - return bool(_find_head_bem(subject, subjects_dir) or - _find_head_bem(subject, subjects_dir, high_res=True)) - - -def _is_scaled_mri_subject(subject, subjects_dir=None): - """Check whether a directory in subjects_dir is a scaled mri subject. - - Parameters - ---------- - subject : str - Name of the potential subject/directory. - subjects_dir : None | path-like - Override the SUBJECTS_DIR environment variable. - - Returns - ------- - is_scaled_mri_subject : bool - Whether ``subject`` is a scaled mri subject. - """ - subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - if not _is_mri_subject(subject, subjects_dir): - return False - fname = subjects_dir / subject / "MRI scaling parameters.cfg" - return fname.exists() + return bool( + _find_head_bem(subject, subjects_dir) + or _find_head_bem(subject, subjects_dir, high_res=True) + ) def _mri_subject_has_bem(subject, subjects_dir=None): @@ -715,8 +775,7 @@ def _mri_subject_has_bem(subject, subjects_dir=None): Whether ``subject`` has a bem file. """ subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) - pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject, - name='*-bem') + pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject, name="*-bem") fnames = glob(pattern) return bool(len(fnames)) @@ -740,23 +799,28 @@ def read_mri_cfg(subject, subjects_dir=None): fname = subjects_dir / subject / "MRI scaling parameters.cfg" if not fname.exists(): - raise IOError("%r does not seem to be a scaled mri subject: %r does " - "not exist." % (subject, fname)) + raise OSError( + f"{subject!r} does not seem to be a scaled mri subject: {fname!r} does not" + "exist." + ) - logger.info("Reading MRI cfg file %s" % fname) + logger.info(f"Reading MRI cfg file {fname}") config = configparser.RawConfigParser() config.read(fname) - n_params = config.getint("MRI Scaling", 'n_params') + n_params = config.getint("MRI Scaling", "n_params") if n_params == 1: - scale = config.getfloat("MRI Scaling", 'scale') + scale = config.getfloat("MRI Scaling", "scale") elif n_params == 3: - scale_str = config.get("MRI Scaling", 'scale') + scale_str = config.get("MRI Scaling", "scale") scale = np.array([float(s) for s in scale_str.split()]) else: - raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params) + raise ValueError(f"Invalid n_params value in MRI cfg: {n_params}") - out = {'subject_from': config.get("MRI Scaling", 'subject_from'), - 'n_params': n_params, 'scale': scale} + out = { + "subject_from": config.get("MRI Scaling", "subject_from"), + "n_params": n_params, + "scale": scale, + } return out @@ -782,15 +846,15 @@ def _write_mri_config(fname, subject_from, subject_to, scale): config = configparser.RawConfigParser() config.add_section("MRI Scaling") - config.set("MRI Scaling", 'subject_from', subject_from) - config.set("MRI Scaling", 'subject_to', subject_to) - config.set("MRI Scaling", 'n_params', str(n_params)) + config.set("MRI Scaling", "subject_from", subject_from) + config.set("MRI Scaling", "subject_to", subject_to) + config.set("MRI Scaling", "n_params", str(n_params)) if n_params == 1: - config.set("MRI Scaling", 'scale', str(scale)) + config.set("MRI Scaling", "scale", str(scale)) else: - config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale])) - config.set("MRI Scaling", 'version', '1') - with open(fname, 'w') as fid: + config.set("MRI Scaling", "scale", " ".join([str(s) for s in scale])) + config.set("MRI Scaling", "version", "1") + with open(fname, "w") as fid: config.write(fid) @@ -811,27 +875,37 @@ def _scale_params(subject_to, subject_from, scale, subjects_dir): """ subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) if (subject_from is None) != (scale is None): - raise TypeError("Need to provide either both subject_from and scale " - "parameters, or neither.") + raise TypeError( + "Need to provide either both subject_from and scale parameters, or neither." + ) if subject_from is None: cfg = read_mri_cfg(subject_to, subjects_dir) - subject_from = cfg['subject_from'] - n_params = cfg['n_params'] + subject_from = cfg["subject_from"] + n_params = cfg["n_params"] assert n_params in (1, 3) - scale = cfg['scale'] + scale = cfg["scale"] scale = np.atleast_1d(scale) if scale.ndim != 1 or scale.shape[0] not in (1, 3): - raise ValueError("Invalid shape for scale parameter. Need scalar " - "or array of length 3. Got shape %s." - % (scale.shape,)) + raise ValueError( + "Invalid shape for scale parameter. Need scalar or array of length 3. Got " + f"shape {scale.shape}." + ) n_params = len(scale) return str(subjects_dir), subject_from, scale, n_params == 1 @verbose -def scale_bem(subject_to, bem_name, subject_from=None, scale=None, - subjects_dir=None, *, on_defects='raise', verbose=None): +def scale_bem( + subject_to, + bem_name, + subject_from=None, + scale=None, + subjects_dir=None, + *, + on_defects="raise", + verbose=None, +): """Scale a bem file. Parameters @@ -855,29 +929,36 @@ def scale_bem(subject_to, bem_name, subject_from=None, scale=None, .. versionadded:: 1.0 %(verbose)s """ - subjects_dir, subject_from, scale, uniform = \ - _scale_params(subject_to, subject_from, scale, subjects_dir) + subjects_dir, subject_from, scale, uniform = _scale_params( + subject_to, subject_from, scale, subjects_dir + ) - src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from, - name=bem_name) - dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to, - name=bem_name) + src = bem_fname.format( + subjects_dir=subjects_dir, subject=subject_from, name=bem_name + ) + dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to, name=bem_name) if os.path.exists(dst): - raise IOError("File already exists: %s" % dst) + raise OSError(f"File already exists: {dst}") surfs = read_bem_surfaces(src, on_defects=on_defects) for surf in surfs: - surf['rr'] *= scale + surf["rr"] *= scale if not uniform: - assert len(surf['nn']) > 0 - surf['nn'] /= scale - _normalize_vectors(surf['nn']) + assert len(surf["nn"]) > 0 + surf["nn"] /= scale + _normalize_vectors(surf["nn"]) write_bem_surfaces(dst, surfs) -def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None, - scale=None, subjects_dir=None): +def scale_labels( + subject_to, + pattern=None, + overwrite=False, + subject_from=None, + scale=None, + subjects_dir=None, +): r"""Scale labels to match a brain that was previously created by scaling. Parameters @@ -902,7 +983,8 @@ def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None, Override the ``SUBJECTS_DIR`` environment variable. """ subjects_dir, subject_from, scale, _ = _scale_params( - subject_to, subject_from, scale, subjects_dir) + subject_to, subject_from, scale, subjects_dir + ) # find labels paths = _find_label_paths(subject_from, pattern, subjects_dir) @@ -925,15 +1007,31 @@ def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None, src = src_root / fname l_old = read_label(src) pos = l_old.pos * scale - l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi, - l_old.comment, subject=subject_to) + l_new = Label( + l_old.vertices, + pos, + l_old.values, + l_old.hemi, + l_old.comment, + subject=subject_to, + ) l_new.save(dst) @verbose -def scale_mri(subject_from, subject_to, scale, overwrite=False, - subjects_dir=None, skip_fiducials=False, labels=True, - annot=False, *, on_defects='raise', verbose=None): +def scale_mri( + subject_from, + subject_to, + scale, + overwrite=False, + subjects_dir=None, + skip_fiducials=False, + labels=True, + annot=False, + *, + on_defects="raise", + verbose=None, +): """Create a scaled copy of an MRI subject. Parameters @@ -949,7 +1047,7 @@ def scale_mri(subject_from, subject_to, scale, overwrite=False, subjects_dir : None | path-like Override the ``SUBJECTS_DIR`` environment variable. skip_fiducials : bool - Do not scale the MRI fiducials. If False (default), an IOError will be + Do not scale the MRI fiducials. If False (default), an OSError will be raised if no fiducials file can be found. labels : bool Also scale all labels (default True). @@ -979,99 +1077,119 @@ def scale_mri(subject_from, subject_to, scale, overwrite=False, if np.isclose(scale[1], scale[0]) and np.isclose(scale[2], scale[0]): scale = scale[0] # speed up scaling conditionals using a singleton elif scale.shape != (1,): - raise ValueError('scale must have shape (3,) or (1,), got %s' - % (scale.shape,)) + raise ValueError(f"scale must have shape (3,) or (1,), got {scale.shape}") # make sure we have an empty target directory - dest = subject_dirname.format(subject=subject_to, - subjects_dir=subjects_dir) + dest = subject_dirname.format(subject=subject_to, subjects_dir=subjects_dir) if os.path.exists(dest): if not overwrite: - raise IOError("Subject directory for %s already exists: %r" - % (subject_to, dest)) + raise OSError( + f"Subject directory for {subject_to} already exists: {dest!r}" + ) shutil.rmtree(dest) - logger.debug('create empty directory structure') - for dirname in paths['dirs']: + logger.debug("create empty directory structure") + for dirname in paths["dirs"]: dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir) os.makedirs(dir_) - logger.debug('save MRI scaling parameters') - fname = os.path.join(dest, 'MRI scaling parameters.cfg') + logger.debug("save MRI scaling parameters") + fname = os.path.join(dest, "MRI scaling parameters.cfg") _write_mri_config(fname, subject_from, subject_to, scale) - logger.debug('surf files [in mm]') - for fname in paths['surf']: + logger.debug("surf files [in mm]") + for fname in paths["surf"]: src = fname.format(subject=subject_from, subjects_dir=subjects_dir) src = os.path.realpath(src) dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) pts, tri = read_surface(src) write_surface(dest, pts * scale, tri) - logger.debug('BEM files [in m]') - for bem_name in paths['bem']: - scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir, - on_defects=on_defects, verbose=False) + logger.debug("BEM files [in m]") + for bem_name in paths["bem"]: + scale_bem( + subject_to, + bem_name, + subject_from, + scale, + subjects_dir, + on_defects=on_defects, + verbose=False, + ) - logger.debug('fiducials [in m]') - for fname in paths['fid']: + logger.debug("fiducials [in m]") + for fname in paths["fid"]: src = fname.format(subject=subject_from, subjects_dir=subjects_dir) src = os.path.realpath(src) pts, cframe = read_fiducials(src, verbose=False) for pt in pts: - pt['r'] = pt['r'] * scale + pt["r"] = pt["r"] * scale dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) write_fiducials(dest, pts, cframe, overwrite=True, verbose=False) - logger.debug('MRIs [nibabel]') - os.mkdir(mri_dirname.format(subjects_dir=subjects_dir, - subject=subject_to)) - for fname in paths['mri']: + logger.debug("MRIs [nibabel]") + os.mkdir(mri_dirname.format(subjects_dir=subjects_dir, subject=subject_to)) + for fname in paths["mri"]: mri_name = os.path.basename(fname) _scale_mri(subject_to, mri_name, subject_from, scale, subjects_dir) - logger.debug('Transforms') - for mri_name in paths['mri']: - if mri_name.endswith('T1.mgz'): - os.mkdir(mri_transforms_dirname.format(subjects_dir=subjects_dir, - subject=subject_to)) - for fname in paths['transforms']: + logger.debug("Transforms") + for mri_name in paths["mri"]: + if mri_name.endswith("T1.mgz"): + os.mkdir( + mri_transforms_dirname.format( + subjects_dir=subjects_dir, subject=subject_to + ) + ) + for fname in paths["transforms"]: xfm_name = os.path.basename(fname) - _scale_xfm(subject_to, xfm_name, mri_name, - subject_from, scale, subjects_dir) + _scale_xfm( + subject_to, xfm_name, mri_name, subject_from, scale, subjects_dir + ) break - logger.debug('duplicate files') - for fname in paths['duplicate']: + logger.debug("duplicate files") + for fname in paths["duplicate"]: src = fname.format(subject=subject_from, subjects_dir=subjects_dir) dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) shutil.copyfile(src, dest) - logger.debug('source spaces') - for fname in paths['src']: + logger.debug("source spaces") + for fname in paths["src"]: src_name = os.path.basename(fname) - scale_source_space(subject_to, src_name, subject_from, scale, - subjects_dir, verbose=False) + scale_source_space( + subject_to, src_name, subject_from, scale, subjects_dir, verbose=False + ) - logger.debug('labels [in m]') - os.mkdir(os.path.join(subjects_dir, subject_to, 'label')) + logger.debug("labels [in m]") + os.mkdir(os.path.join(subjects_dir, subject_to, "label")) if labels: - scale_labels(subject_to, subject_from=subject_from, scale=scale, - subjects_dir=subjects_dir) + scale_labels( + subject_to, + subject_from=subject_from, + scale=scale, + subjects_dir=subjects_dir, + ) - logger.debug('copy *.annot files') + logger.debug("copy *.annot files") # they don't contain scale-dependent information if annot: - src_pattern = os.path.join(subjects_dir, subject_from, 'label', - '*.annot') - dst_dir = os.path.join(subjects_dir, subject_to, 'label') + src_pattern = os.path.join(subjects_dir, subject_from, "label", "*.annot") + dst_dir = os.path.join(subjects_dir, subject_to, "label") for src_file in iglob(src_pattern): shutil.copy(src_file, dst_dir) @verbose -def scale_source_space(subject_to, src_name, subject_from=None, scale=None, - subjects_dir=None, n_jobs=None, verbose=None): +def scale_source_space( + subject_to, + src_name, + subject_from=None, + scale=None, + subjects_dir=None, + n_jobs=None, + verbose=None, +): """Scale a source space for an mri created with scale_mri(). Parameters @@ -1105,8 +1223,9 @@ def scale_source_space(subject_to, src_name, subject_from=None, scale=None, are updated so that source estimates can be plotted on the original MRI volume. """ - subjects_dir, subject_from, scale, uniform = \ - _scale_params(subject_to, subject_from, scale, subjects_dir) + subjects_dir, subject_from, scale, uniform = _scale_params( + subject_to, subject_from, scale, subjects_dir + ) # if n_params==1 scale is a scalar; if n_params==3 scale is a (3,) array # find the source space file names @@ -1116,45 +1235,46 @@ def scale_source_space(subject_to, src_name, subject_from=None, scale=None, else: match = re.match(r"(oct|ico|vol)-?(\d+)$", src_name) if match: - spacing = '-'.join(match.groups()) + spacing = "-".join(match.groups()) src_pattern = src_fname else: spacing = None src_pattern = os.path.join(bem_dirname, src_name) - src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from, - spacing=spacing) - dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to, - spacing=spacing) + src = src_pattern.format( + subjects_dir=subjects_dir, subject=subject_from, spacing=spacing + ) + dst = src_pattern.format( + subjects_dir=subjects_dir, subject=subject_to, spacing=spacing + ) # read and scale the source space [in m] sss = read_source_spaces(src) - logger.info("scaling source space %s: %s -> %s", spacing, subject_from, - subject_to) + logger.info("scaling source space %s: %s -> %s", spacing, subject_from, subject_to) logger.info("Scale factor: %s", scale) add_dist = False for ss in sss: - ss['subject_his_id'] = subject_to - ss['rr'] *= scale + ss["subject_his_id"] = subject_to + ss["rr"] *= scale # additional tags for volume source spaces - for key in ('vox_mri_t', 'src_mri_t'): + for key in ("vox_mri_t", "src_mri_t"): # maintain transform to original MRI volume ss['mri_volume_name'] if key in ss: - ss[key]['trans'][:3] *= scale[:, np.newaxis] + ss[key]["trans"][:3] *= scale[:, np.newaxis] # distances and patch info if uniform: - if ss['dist'] is not None: - ss['dist'] *= scale[0] + if ss["dist"] is not None: + ss["dist"] *= scale[0] # Sometimes this is read-only due to how it's read - ss['nearest_dist'] = ss['nearest_dist'] * scale - ss['dist_limit'] = ss['dist_limit'] * scale + ss["nearest_dist"] = ss["nearest_dist"] * scale + ss["dist_limit"] = ss["dist_limit"] * scale else: # non-uniform scaling - ss['nn'] /= scale - _normalize_vectors(ss['nn']) - if ss['dist'] is not None: + ss["nn"] /= scale + _normalize_vectors(ss["nn"]) + if ss["dist"] is not None: add_dist = True - dist_limit = float(np.abs(sss[0]['dist_limit'])) - elif ss['nearest'] is not None: + dist_limit = float(np.abs(sss[0]["dist_limit"])) + elif ss["nearest"] is not None: add_dist = True dist_limit = 0 @@ -1168,12 +1288,15 @@ def scale_source_space(subject_to, src_name, subject_from=None, scale=None, def _scale_mri(subject_to, mri_fname, subject_from, scale, subjects_dir): """Scale an MRI by setting its affine.""" subjects_dir, subject_from, scale, _ = _scale_params( - subject_to, subject_from, scale, subjects_dir) - nibabel = _import_nibabel('scale an MRI') - fname_from = op.join(mri_dirname.format( - subjects_dir=subjects_dir, subject=subject_from), mri_fname) - fname_to = op.join(mri_dirname.format( - subjects_dir=subjects_dir, subject=subject_to), mri_fname) + subject_to, subject_from, scale, subjects_dir + ) + nibabel = _import_nibabel("scale an MRI") + fname_from = op.join( + mri_dirname.format(subjects_dir=subjects_dir, subject=subject_from), mri_fname + ) + fname_to = op.join( + mri_dirname.format(subjects_dir=subjects_dir, subject=subject_to), mri_fname + ) img = nibabel.load(fname_from) zooms = np.array(img.header.get_zooms()) zooms[[0, 2, 1]] *= scale @@ -1184,27 +1307,29 @@ def _scale_mri(subject_to, mri_fname, subject_from, scale, subjects_dir): nibabel.save(img, fname_to) -def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale, - subjects_dir): +def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale, subjects_dir): """Scale a transform.""" subjects_dir, subject_from, scale, _ = _scale_params( - subject_to, subject_from, scale, subjects_dir) + subject_to, subject_from, scale, subjects_dir + ) # The nibabel warning should already be there in MRI step, if applicable, # as we only get here if T1.mgz is present (and thus a scaling was # attempted) so we can silently return here. fname_from = os.path.join( - mri_transforms_dirname.format( - subjects_dir=subjects_dir, subject=subject_from), xfm_fname) + mri_transforms_dirname.format(subjects_dir=subjects_dir, subject=subject_from), + xfm_fname, + ) fname_to = op.join( - mri_transforms_dirname.format( - subjects_dir=subjects_dir, subject=subject_to), xfm_fname) + mri_transforms_dirname.format(subjects_dir=subjects_dir, subject=subject_to), + xfm_fname, + ) assert op.isfile(fname_from), fname_from assert op.isdir(op.dirname(fname_to)), op.dirname(fname_to) # The "talairach.xfm" file stores the ras_mni transform. # # For "from" subj F, "to" subj T, F->T scaling S, some equivalent vertex - # positions F_x and T_x in MRI (Freesurfer RAS) coords, knowing that + # positions F_x and T_x in MRI (FreeSurfer RAS) coords, knowing that # we have T_x = S @ F_x, we want to have the same MNI coords computed # for these vertices: # @@ -1223,23 +1348,25 @@ def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale, # prepare the scale (S) transform scale = np.atleast_1d(scale) scale = np.tile(scale, 3) if len(scale) == 1 else scale - S = Transform('mri', 'mri', scaling(*scale)) # F_mri->T_mri + S = Transform("mri", "mri", scaling(*scale)) # F_mri->T_mri # # Get the necessary transforms of the "from" subject # xfm, kind = _read_fs_xfm(fname_from) - assert kind == 'MNI Transform File', kind - _, _, F_mri_ras, _, _ = _read_mri_info(mri_name, units='mm') - F_ras_mni = Transform('ras', 'mni_tal', xfm) + assert kind == "MNI Transform File", kind + _, _, F_mri_ras, _, _ = _read_mri_info(mri_name, units="mm") + F_ras_mni = Transform("ras", "mni_tal", xfm) del xfm # # Get the necessary transforms of the "to" subject # - mri_name = op.join(mri_dirname.format( - subjects_dir=subjects_dir, subject=subject_to), op.basename(mri_name)) - _, _, T_mri_ras, _, _ = _read_mri_info(mri_name, units='mm') + mri_name = op.join( + mri_dirname.format(subjects_dir=subjects_dir, subject=subject_to), + op.basename(mri_name), + ) + _, _, T_mri_ras, _, _ = _read_mri_info(mri_name, units="mm") T_ras_mri = invert_transform(T_mri_ras) del mri_name, T_mri_ras @@ -1248,37 +1375,39 @@ def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale, # T_ras_mni = F_ras_mni @ F_mri_ras @ S⁻¹ @ T_ras_mri # # By moving right to left through the equation. - T_ras_mni = \ + T_ras_mni = combine_transforms( combine_transforms( - combine_transforms( - combine_transforms( - T_ras_mri, invert_transform(S), 'ras', 'mri'), - F_mri_ras, 'ras', 'ras'), - F_ras_mni, 'ras', 'mni_tal') - _write_fs_xfm(fname_to, T_ras_mni['trans'], kind) + combine_transforms(T_ras_mri, invert_transform(S), "ras", "mri"), + F_mri_ras, + "ras", + "ras", + ), + F_ras_mni, + "ras", + "mni_tal", + ) + _write_fs_xfm(fname_to, T_ras_mni["trans"], kind) def _read_surface(filename, *, on_defects): bem = dict() if filename is not None and op.exists(filename): - if filename.endswith('.fif'): - bem = read_bem_surfaces( - filename, on_defects=on_defects, verbose=False - )[0] + if filename.endswith(".fif"): + bem = read_bem_surfaces(filename, on_defects=on_defects, verbose=False)[0] else: try: bem = read_surface(filename, return_dict=True)[2] - bem['rr'] *= 1e-3 + bem["rr"] *= 1e-3 complete_surface_info(bem, copy=False) except Exception: raise ValueError( - "Error loading surface from %s (see " - "Terminal for details)." % filename) + f"Error loading surface from {filename} (see Terminal for details)." + ) return bem @fill_doc -class Coregistration(object): +class Coregistration: """Class for MRI<->head coregistration. Parameters @@ -1315,36 +1444,34 @@ class Coregistration(object): to create a surrogate MRI subject with the proper scale factors. """ - def __init__(self, info, subject, subjects_dir=None, fiducials='auto', *, - on_defects='raise'): - _validate_type(info, (Info, None), 'info') + def __init__( + self, info, subject, subjects_dir=None, fiducials="auto", *, on_defects="raise" + ): + _validate_type(info, (Info, None), "info") self._info = info self._subject = _check_subject(subject, subject) - self._subjects_dir = str( - get_subjects_dir(subjects_dir, raise_error=True) - ) + self._subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) self._scale_mode = None self._on_defects = on_defects - self._rot_trans = None - self._default_parameters = \ - np.array([0., 0., 0., 0., 0., 0., 1., 1., 1.]) + self._default_parameters = np.array( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0] + ) self._rotation = self._default_parameters[:3] self._translation = self._default_parameters[3:6] self._scale = self._default_parameters[6:9] - self._icp_iterations = 20 self._icp_angle = 0.2 self._icp_distance = 0.2 self._icp_scale = 0.2 - self._icp_fid_matches = ('nearest', 'matched') + self._icp_fid_matches = ("nearest", "matched") self._icp_fid_match = self._icp_fid_matches[0] - self._lpa_weight = 1. - self._nasion_weight = 10. - self._rpa_weight = 1. - self._hsp_weight = 1. - self._eeg_weight = 1. - self._hpi_weight = 1. + self._lpa_weight = 1.0 + self._nasion_weight = 10.0 + self._rpa_weight = 1.0 + self._hsp_weight = 1.0 + self._eeg_weight = 1.0 + self._hpi_weight = 1.0 self._extra_points_filter = None self._setup_digs() @@ -1366,77 +1493,84 @@ def _setup_digs(self): ) else: self._dig_dict = _get_data_as_dict_from_dig( - dig=self._info['dig'], - exclude_ref_channel=False + dig=self._info["dig"], exclude_ref_channel=False ) # adjustments: # set weights to 0 for None input # convert fids to float arrays - for k, w_atr in zip(['nasion', 'lpa', 'rpa', 'hsp', 'hpi'], - ['_nasion_weight', '_lpa_weight', - '_rpa_weight', '_hsp_weight', '_hpi_weight']): + for k, w_atr in zip( + ["nasion", "lpa", "rpa", "hsp", "hpi"], + [ + "_nasion_weight", + "_lpa_weight", + "_rpa_weight", + "_hsp_weight", + "_hpi_weight", + ], + ): if self._dig_dict[k] is None: self._dig_dict[k] = np.zeros((0, 3)) setattr(self, w_atr, 0) - elif k in ['rpa', 'nasion', 'lpa']: + elif k in ["rpa", "nasion", "lpa"]: self._dig_dict[k] = np.array([self._dig_dict[k]], float) def _setup_bem(self): # find high-res head model (if possible) - high_res_path = _find_head_bem(self._subject, self._subjects_dir, - high_res=True) - low_res_path = _find_head_bem(self._subject, self._subjects_dir, - high_res=False) + high_res_path = _find_head_bem(self._subject, self._subjects_dir, high_res=True) + low_res_path = _find_head_bem(self._subject, self._subjects_dir, high_res=False) if high_res_path is None and low_res_path is None: - raise RuntimeError("No standard head model was " - f"found for subject {self._subject}") + raise RuntimeError( + "No standard head model was " + f"found for subject {self._subject} in " + f"{self._subjects_dir}" + ) if high_res_path is not None: self._bem_high_res = _read_surface( high_res_path, on_defects=self._on_defects ) - logger.info(f'Using high resolution head model in {high_res_path}') + logger.info(f"Using high resolution head model in {high_res_path}") else: self._bem_high_res = _read_surface( low_res_path, on_defects=self._on_defects ) - logger.info(f'Using low resolution head model in {low_res_path}') + logger.info(f"Using low resolution head model in {low_res_path}") if low_res_path is None: # This should be very rare! - warn('No low-resolution head found, decimating high resolution ' - 'mesh (%d vertices): %s' % (len(self._bem_high_res['rr']), - high_res_path,)) + warn( + "No low-resolution head found, decimating high resolution " + f"mesh ({len(self._bem_high_res['rr'])} vertices): {high_res_path}" + ) # Create one from the high res one, which we know we have - rr, tris = decimate_surface(self._bem_high_res['rr'], - self._bem_high_res['tris'], - n_triangles=5120) + rr, tris = decimate_surface( + self._bem_high_res["rr"], self._bem_high_res["tris"], n_triangles=5120 + ) # directly set the attributes of bem_low_res self._bem_low_res = complete_surface_info( - dict(rr=rr, tris=tris), copy=False, verbose=False) - else: - self._bem_low_res = _read_surface( - low_res_path, on_defects=self._on_defects + dict(rr=rr, tris=tris), copy=False, verbose=False ) + else: + self._bem_low_res = _read_surface(low_res_path, on_defects=self._on_defects) def _setup_fiducials(self, fids): _validate_type(fids, (str, dict, list)) # find fiducials file fid_accurate = None - if fids == 'auto': - fid_files = _find_fiducials_files(self._subject, - self._subjects_dir) + if fids == "auto": + fid_files = _find_fiducials_files(self._subject, self._subjects_dir) if len(fid_files) > 0: # Read fiducials from disk fid_filename = fid_files[0].format( - subjects_dir=self._subjects_dir, subject=self._subject) - logger.info(f'Using fiducials from: {fid_filename}.') + subjects_dir=self._subjects_dir, subject=self._subject + ) + logger.info(f"Using fiducials from: {fid_filename}.") fids, _ = read_fiducials(fid_filename) fid_accurate = True self._fid_filename = fid_filename else: - fids = 'estimated' + fids = "estimated" - if fids == 'estimated': - logger.info('Estimating fiducials from fsaverage.') + if fids == "estimated": + logger.info("Estimating fiducials from fsaverage.") fid_accurate = False fids = get_mni_fiducials(self._subject, self._subjects_dir) @@ -1445,8 +1579,9 @@ def _setup_fiducials(self, fids): fid_coords = _fiducial_coords(fids) else: assert isinstance(fids, dict) - fid_coords = np.array([fids['lpa'], fids['nasion'], fids['rpa']], - dtype=float) + fid_coords = np.array( + [fids["lpa"], fids["nasion"], fids["rpa"]], dtype=float + ) self._fid_points = fid_coords self._fid_accurate = fid_accurate @@ -1459,12 +1594,11 @@ def _reset_fiducials(self): lpa=self._fid_points[0], nasion=self._fid_points[1], rpa=self._fid_points[2], - coord_frame='mri' + coord_frame="mri", ) self.fiducials = dig_montage - def _update_params(self, rot=None, tra=None, sca=None, - force_update=False): + def _update_params(self, rot=None, tra=None, sca=None, force_update=False): if force_update and tra is None: tra = self._translation rot_changed = False @@ -1480,18 +1614,19 @@ def _update_params(self, rot=None, tra=None, sca=None, self._last_translation = self._translation.copy() self._translation = tra self._head_mri_t = rotation(*self._rotation).T - self._head_mri_t[:3, 3] = \ - -np.dot(self._head_mri_t[:3, :3], tra) - self._transformed_dig_hpi = \ - apply_trans(self._head_mri_t, self._dig_dict['hpi']) - self._transformed_dig_eeg = \ - apply_trans( - self._head_mri_t, self._dig_dict['dig_ch_pos_location']) - self._transformed_dig_extra = \ - apply_trans(self._head_mri_t, - self._filtered_extra_points) - self._transformed_orig_dig_extra = \ - apply_trans(self._head_mri_t, self._dig_dict['hsp']) + self._head_mri_t[:3, 3] = -np.dot(self._head_mri_t[:3, :3], tra) + self._transformed_dig_hpi = apply_trans( + self._head_mri_t, self._dig_dict["hpi"] + ) + self._transformed_dig_eeg = apply_trans( + self._head_mri_t, self._dig_dict["dig_ch_pos_location"] + ) + self._transformed_dig_extra = apply_trans( + self._head_mri_t, self._filtered_extra_points + ) + self._transformed_orig_dig_extra = apply_trans( + self._head_mri_t, self._dig_dict["hsp"] + ) self._mri_head_t = rotation(*self._rotation) self._mri_head_t[:3, 3] = np.array(tra) if tra_changed or sca is not None: @@ -1501,27 +1636,32 @@ def _update_params(self, rot=None, tra=None, sca=None, self._scale = sca self._mri_trans = np.eye(4) self._mri_trans[:, :3] *= sca - self._transformed_high_res_mri_points = \ - apply_trans(self._mri_trans, - self._processed_high_res_mri_points) + self._transformed_high_res_mri_points = apply_trans( + self._mri_trans, self._processed_high_res_mri_points + ) self._update_nearest_calc() if tra_changed: - self._nearest_transformed_high_res_mri_idx_orig_hsp = \ + self._nearest_transformed_high_res_mri_idx_orig_hsp = ( self._nearest_calc.query(self._transformed_orig_dig_extra)[1] - self._nearest_transformed_high_res_mri_idx_hpi = \ - self._nearest_calc.query(self._transformed_dig_hpi)[1] - self._nearest_transformed_high_res_mri_idx_eeg = \ - self._nearest_calc.query(self._transformed_dig_eeg)[1] - self._nearest_transformed_high_res_mri_idx_rpa = \ - self._nearest_calc.query( - apply_trans(self._head_mri_t, self._dig_dict['rpa']))[1] - self._nearest_transformed_high_res_mri_idx_nasion = \ - self._nearest_calc.query( - apply_trans(self._head_mri_t, self._dig_dict['nasion']))[1] - self._nearest_transformed_high_res_mri_idx_lpa = \ + ) + self._nearest_transformed_high_res_mri_idx_hpi = self._nearest_calc.query( + self._transformed_dig_hpi + )[1] + self._nearest_transformed_high_res_mri_idx_eeg = self._nearest_calc.query( + self._transformed_dig_eeg + )[1] + self._nearest_transformed_high_res_mri_idx_rpa = self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict["rpa"]) + )[1] + self._nearest_transformed_high_res_mri_idx_nasion = ( self._nearest_calc.query( - apply_trans(self._head_mri_t, self._dig_dict['lpa']))[1] + apply_trans(self._head_mri_t, self._dig_dict["nasion"]) + )[1] + ) + self._nearest_transformed_high_res_mri_idx_lpa = self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict["lpa"]) + )[1] def set_scale_mode(self, scale_mode): """Select how to fit the scale parameters. @@ -1611,14 +1751,15 @@ def set_scale(self, sca): def _update_nearest_calc(self): self._nearest_calc = _DistanceQuery( - self._processed_high_res_mri_points * self._scale) + self._processed_high_res_mri_points * self._scale + ) @property def _filtered_extra_points(self): if self._extra_points_filter is None: - return self._dig_dict['hsp'] + return self._dig_dict["hsp"] else: - return self._dig_dict['hsp'][self._extra_points_filter] + return self._dig_dict["hsp"][self._extra_points_filter] @property def _parameters(self): @@ -1626,79 +1767,85 @@ def _parameters(self): @property def _last_parameters(self): - return np.concatenate((self._last_rotation, - self._last_translation, self._last_scale)) + return np.concatenate( + (self._last_rotation, self._last_translation, self._last_scale) + ) @property def _changes(self): move = np.linalg.norm(self._last_translation - self._translation) * 1e3 - angle = np.rad2deg(_angle_between_quats( - rot_to_quat(rotation(*self._rotation)[:3, :3]), - rot_to_quat(rotation(*self._last_rotation)[:3, :3]))) + angle = np.rad2deg( + _angle_between_quats( + rot_to_quat(rotation(*self._rotation)[:3, :3]), + rot_to_quat(rotation(*self._last_rotation)[:3, :3]), + ) + ) percs = 100 * (self._scale - self._last_scale) / self._last_scale return move, angle, percs @property def _nearest_transformed_high_res_mri_idx_hsp(self): return self._nearest_calc.query( - apply_trans(self._head_mri_t, self._filtered_extra_points))[1] + apply_trans(self._head_mri_t, self._filtered_extra_points) + )[1] @property def _has_hsp_data(self): - return (self._has_mri_data and - len(self._nearest_transformed_high_res_mri_idx_hsp) > 0) + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_hsp) > 0 + ) @property def _has_hpi_data(self): - return (self._has_mri_data and - len(self._nearest_transformed_high_res_mri_idx_hpi) > 0) + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_hpi) > 0 + ) @property def _has_eeg_data(self): - return (self._has_mri_data and - len(self._nearest_transformed_high_res_mri_idx_eeg) > 0) + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_eeg) > 0 + ) @property def _has_lpa_data(self): - mri_point = self.fiducials.dig[_map_fid_name_to_idx('lpa')] - assert mri_point['ident'] == FIFF.FIFFV_POINT_LPA - has_mri_data = np.any(mri_point['r']) - has_head_data = np.any(self._dig_dict['lpa']) + mri_point = self.fiducials.dig[_map_fid_name_to_idx("lpa")] + assert mri_point["ident"] == FIFF.FIFFV_POINT_LPA + has_mri_data = np.any(mri_point["r"]) + has_head_data = np.any(self._dig_dict["lpa"]) return has_mri_data and has_head_data @property def _has_nasion_data(self): - mri_point = self.fiducials.dig[_map_fid_name_to_idx('nasion')] - assert mri_point['ident'] == FIFF.FIFFV_POINT_NASION - has_mri_data = np.any(mri_point['r']) - has_head_data = np.any(self._dig_dict['nasion']) + mri_point = self.fiducials.dig[_map_fid_name_to_idx("nasion")] + assert mri_point["ident"] == FIFF.FIFFV_POINT_NASION + has_mri_data = np.any(mri_point["r"]) + has_head_data = np.any(self._dig_dict["nasion"]) return has_mri_data and has_head_data @property def _has_rpa_data(self): - mri_point = self.fiducials.dig[_map_fid_name_to_idx('rpa')] - assert mri_point['ident'] == FIFF.FIFFV_POINT_RPA - has_mri_data = np.any(mri_point['r']) - has_head_data = np.any(self._dig_dict['rpa']) + mri_point = self.fiducials.dig[_map_fid_name_to_idx("rpa")] + assert mri_point["ident"] == FIFF.FIFFV_POINT_RPA + has_mri_data = np.any(mri_point["r"]) + has_head_data = np.any(self._dig_dict["rpa"]) return has_mri_data and has_head_data @property def _processed_high_res_mri_points(self): - return self._get_processed_mri_points('high') - - @property - def _processed_low_res_mri_points(self): - return self._get_processed_mri_points('low') + return self._get_processed_mri_points("high") def _get_processed_mri_points(self, res): - bem = self._bem_low_res if res == 'low' else self._bem_high_res - points = bem['rr'].copy() + bem = self._bem_low_res if res == "low" else self._bem_high_res + points = bem["rr"].copy() if self._grow_hair: - assert len(bem['nn']) # should be guaranteed by _read_surface - scaled_hair_dist = (1e-3 * self._grow_hair / - np.array(self._scale)) + assert len(bem["nn"]) # should be guaranteed by _read_surface + scaled_hair_dist = 1e-3 * self._grow_hair / np.array(self._scale) hair = points[:, 2] > points[:, 1] - points[hair] += bem['nn'][hair] * scaled_hair_dist + points[hair] += bem["nn"][hair] * scaled_hair_dist return points @property @@ -1707,20 +1854,24 @@ def _has_mri_data(self): @property def _has_dig_data(self): - return (self._has_mri_data and - len(self._nearest_transformed_high_res_mri_idx_hsp) > 0) + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_hsp) > 0 + ) @property def _orig_hsp_point_distance(self): mri_points = self._transformed_high_res_mri_points[ - self._nearest_transformed_high_res_mri_idx_orig_hsp] + self._nearest_transformed_high_res_mri_idx_orig_hsp + ] hsp_points = self._transformed_orig_dig_extra return np.linalg.norm(mri_points - hsp_points, axis=-1) def _log_dig_mri_distance(self, prefix): errs_nearest = self.compute_dig_mri_distances() - logger.info(f'{prefix} median distance: ' - f'{np.median(errs_nearest * 1000):6.2f} mm') + logger.info( + f"{prefix} median distance: {np.median(errs_nearest * 1000):6.2f} mm" + ) @property def scale(self): @@ -1734,8 +1885,9 @@ def scale(self): return self._scale.copy() @verbose - def fit_fiducials(self, lpa_weight=1., nasion_weight=10., rpa_weight=1., - verbose=None): + def fit_fiducials( + self, lpa_weight=1.0, nasion_weight=10.0, rpa_weight=1.0, verbose=None + ): """Find rotation and translation to fit all 3 fiducials. Parameters @@ -1753,34 +1905,41 @@ def fit_fiducials(self, lpa_weight=1., nasion_weight=10., rpa_weight=1., self : Coregistration The modified Coregistration object. """ - logger.info('Aligning using fiducials') - self._log_dig_mri_distance('Start') + logger.info("Aligning using fiducials") + self._log_dig_mri_distance("Start") n_scale_params = self._n_scale_params if n_scale_params == 3: # enforce 1 even for 3-axis here (3 points is not enough) - logger.info("Enforcing 1 scaling parameter for fit " - "with fiducials.") + logger.info("Enforcing 1 scaling parameter for fit with fiducials.") n_scale_params = 1 self._lpa_weight = lpa_weight self._nasion_weight = nasion_weight self._rpa_weight = rpa_weight - head_pts = np.vstack((self._dig_dict['lpa'], - self._dig_dict['nasion'], - self._dig_dict['rpa'])) + head_pts = np.vstack( + (self._dig_dict["lpa"], self._dig_dict["nasion"], self._dig_dict["rpa"]) + ) mri_pts = np.vstack( - (self.fiducials.dig[0]['r'], # LPA - self.fiducials.dig[1]['r'], # Nasion - self.fiducials.dig[2]['r']) # RPA + ( + self.fiducials.dig[0]["r"], # LPA + self.fiducials.dig[1]["r"], # Nasion + self.fiducials.dig[2]["r"], + ) # RPA ) weights = [lpa_weight, nasion_weight, rpa_weight] if n_scale_params == 0: mri_pts *= self._scale # not done in fit_matched_points x0 = self._parameters - x0 = x0[:6 + n_scale_params] - est = fit_matched_points(mri_pts, head_pts, x0=x0, out='params', - scale=n_scale_params, weights=weights) + x0 = x0[: 6 + n_scale_params] + est = fit_matched_points( + mri_pts, + head_pts, + x0=x0, + out="params", + scale=n_scale_params, + weights=weights, + ) if n_scale_params == 0: self._update_params(rot=est[:3], tra=est[3:6]) else: @@ -1788,43 +1947,56 @@ def fit_fiducials(self, lpa_weight=1., nasion_weight=10., rpa_weight=1., est = np.concatenate([est, [est[-1]] * 2]) assert est.size == 9 self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) - self._log_dig_mri_distance('End ') + self._log_dig_mri_distance("End ") return self def _setup_icp(self, n_scale_params): - head_pts = list() - mri_pts = list() - weights = list() + head_pts = [np.zeros((0, 3))] + mri_pts = [np.zeros((0, 3))] + weights = [np.zeros(0)] if self._has_dig_data and self._hsp_weight > 0: # should be true head_pts.append(self._filtered_extra_points) - mri_pts.append(self._processed_high_res_mri_points[ - self._nearest_transformed_high_res_mri_idx_hsp]) + mri_pts.append( + self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hsp + ] + ) weights.append(np.full(len(head_pts[-1]), self._hsp_weight)) - for key in ('lpa', 'nasion', 'rpa'): - if getattr(self, f'_has_{key}_data'): + for key in ("lpa", "nasion", "rpa"): + if getattr(self, f"_has_{key}_data"): head_pts.append(self._dig_dict[key]) - if self._icp_fid_match == 'matched': + if self._icp_fid_match == "matched": idx = _map_fid_name_to_idx(name=key) - p = self.fiducials.dig[idx]['r'].reshape(1, -1) + p = self.fiducials.dig[idx]["r"].reshape(1, -1) mri_pts.append(p) else: - assert self._icp_fid_match == 'nearest' - mri_pts.append(self._processed_high_res_mri_points[ - getattr( - self, - '_nearest_transformed_high_res_mri_idx_%s' - % (key,))]) - weights.append(np.full(len(mri_pts[-1]), - getattr(self, '_%s_weight' % key))) + assert self._icp_fid_match == "nearest" + mri_pts.append( + self._processed_high_res_mri_points[ + getattr( + self, + f"_nearest_transformed_high_res_mri_idx_{key}", + ) + ] + ) + weights.append( + np.full(len(mri_pts[-1]), getattr(self, f"_{key}_weight")) + ) if self._has_eeg_data and self._eeg_weight > 0: - head_pts.append(self._dig_dict['dig_ch_pos_location']) - mri_pts.append(self._processed_high_res_mri_points[ - self._nearest_transformed_high_res_mri_idx_eeg]) + head_pts.append(self._dig_dict["dig_ch_pos_location"]) + mri_pts.append( + self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_eeg + ] + ) weights.append(np.full(len(mri_pts[-1]), self._eeg_weight)) if self._has_hpi_data and self._hpi_weight > 0: - head_pts.append(self._dig_dict['hpi']) - mri_pts.append(self._processed_high_res_mri_points[ - self._nearest_transformed_high_res_mri_idx_hpi]) + head_pts.append(self._dig_dict["hpi"]) + mri_pts.append( + self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hpi + ] + ) weights.append(np.full(len(mri_pts[-1]), self._hpi_weight)) head_pts = np.concatenate(head_pts) mri_pts = np.concatenate(mri_pts) @@ -1848,14 +2020,23 @@ def set_fid_match(self, match): self : Coregistration The modified Coregistration object. """ - _check_option('match', match, self._icp_fid_matches) + _check_option("match", match, self._icp_fid_matches) self._icp_fid_match = match return self @verbose - def fit_icp(self, n_iterations=20, lpa_weight=1., nasion_weight=10., - rpa_weight=1., hsp_weight=1., eeg_weight=1., hpi_weight=1., - callback=None, verbose=None): + def fit_icp( + self, + n_iterations=20, + lpa_weight=1.0, + nasion_weight=10.0, + rpa_weight=1.0, + hsp_weight=1.0, + eeg_weight=1.0, + hpi_weight=1.0, + callback=None, + verbose=None, + ): """Find MRI scaling, translation, and rotation to match HSP. Parameters @@ -1885,25 +2066,31 @@ def fit_icp(self, n_iterations=20, lpa_weight=1., nasion_weight=10., self : Coregistration The modified Coregistration object. """ - logger.info('Aligning using ICP') - self._log_dig_mri_distance('Start ') + logger.info("Aligning using ICP") + self._log_dig_mri_distance("Start ") n_scale_params = self._n_scale_params self._lpa_weight = lpa_weight self._nasion_weight = nasion_weight self._rpa_weight = rpa_weight self._hsp_weight = hsp_weight self._eeg_weight = eeg_weight - self._hsp_weight = hpi_weight + self._hpi_weight = hpi_weight # Initial guess (current state) est = self._parameters - est = est[:[6, 7, None, 9][n_scale_params]] + est = est[: [6, 7, None, 9][n_scale_params]] # Do the fits, assigning and evaluating at each step for iteration in range(n_iterations): head_pts, mri_pts, weights = self._setup_icp(n_scale_params) - est = fit_matched_points(mri_pts, head_pts, scale=n_scale_params, - x0=est, out='params', weights=weights) + est = fit_matched_points( + mri_pts, + head_pts, + scale=n_scale_params, + x0=est, + out="params", + weights=weights, + ) if n_scale_params == 0: self._update_params(rot=est[:3], tra=est[3:6]) elif n_scale_params == 1: @@ -1912,20 +2099,23 @@ def fit_icp(self, n_iterations=20, lpa_weight=1., nasion_weight=10., else: self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) angle, move, scale = self._changes - self._log_dig_mri_distance(f' ICP {iteration + 1:2d} ') + self._log_dig_mri_distance(f" ICP {iteration + 1:2d} ") if callback is not None: callback(iteration, n_iterations) - if angle <= self._icp_angle and move <= self._icp_distance and \ - all(scale <= self._icp_scale): + if ( + angle <= self._icp_angle + and move <= self._icp_distance + and all(scale <= self._icp_scale) + ): break - self._log_dig_mri_distance('End ') + self._log_dig_mri_distance("End ") return self @property def _n_scale_params(self): if self._scale_mode is None: n_scale_params = 0 - elif self._scale_mode == 'uniform': + elif self._scale_mode == "uniform": n_scale_params = 1 else: n_scale_params = 3 @@ -1952,8 +2142,11 @@ def omit_head_shape_points(self, distance): # find the new filter mask = self._orig_hsp_point_distance <= distance n_excluded = np.sum(~mask) - logger.info("Coregistration: Excluding %i head shape points with " - "distance >= %.3f m.", n_excluded, distance) + logger.info( + "Coregistration: Excluding %i head shape points with distance >= %.3f m.", + n_excluded, + distance, + ) # set the filter self._extra_points_filter = mask self._update_params(force_update=True) @@ -1980,7 +2173,7 @@ def compute_dig_mri_distances(self): @property def trans(self): """The head->mri :class:`~mne.transforms.Transform`.""" - return Transform('head', 'mri', self._head_mri_t) + return Transform("head", "mri", self._head_mri_t) def reset(self): """Reset all the parameters affecting the coregistration. @@ -1990,7 +2183,7 @@ def reset(self): self : Coregistration The modified Coregistration object. """ - self._grow_hair = 0. + self._grow_hair = 0.0 self.set_rotation(self._default_parameters[:3]) self.set_translation(self._default_parameters[3:6]) self.set_scale(self._default_parameters[6:9]) @@ -2000,15 +2193,13 @@ def reset(self): def _get_fiducials_distance(self): distance = dict() - for key in ('lpa', 'nasion', 'rpa'): + for key in ("lpa", "nasion", "rpa"): idx = _map_fid_name_to_idx(name=key) - fid = self.fiducials.dig[idx]['r'].reshape(1, -1) + fid = self.fiducials.dig[idx]["r"].reshape(1, -1) transformed_mri = apply_trans(self._mri_trans, fid) - transformed_hsp = apply_trans( - self._head_mri_t, self._dig_dict[key]) - distance[key] = np.linalg.norm( - np.ravel(transformed_mri - transformed_hsp)) + transformed_hsp = apply_trans(self._head_mri_t, self._dig_dict[key]) + distance[key] = np.linalg.norm(np.ravel(transformed_mri - transformed_hsp)) return np.array(list(distance.values())) * 1e3 def _get_fiducials_distance_str(self): @@ -2019,18 +2210,27 @@ def _get_point_distance(self): mri_points = list() hsp_points = list() if self._hsp_weight > 0 and self._has_hsp_data: - mri_points.append(self._transformed_high_res_mri_points[ - self._nearest_transformed_high_res_mri_idx_hsp]) + mri_points.append( + self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hsp + ] + ) hsp_points.append(self._transformed_dig_extra) assert len(mri_points[-1]) == len(hsp_points[-1]) if self._eeg_weight > 0 and self._has_eeg_data: - mri_points.append(self._transformed_high_res_mri_points[ - self._nearest_transformed_high_res_mri_idx_eeg]) + mri_points.append( + self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_eeg + ] + ) hsp_points.append(self._transformed_dig_eeg) assert len(mri_points[-1]) == len(hsp_points[-1]) if self._hpi_weight > 0 and self._has_hpi_data: - mri_points.append(self._transformed_high_res_mri_points[ - self._nearest_transformed_high_res_mri_idx_hpi]) + mri_points.append( + self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hpi + ] + ) hsp_points.append(self._transformed_dig_hpi) assert len(mri_points[-1]) == len(hsp_points[-1]) if all(len(h) == 0 for h in hsp_points): @@ -2046,10 +2246,14 @@ def _get_point_distance_str(self): dists = 1e3 * point_distance av_dist = np.mean(dists) std_dist = np.std(dists) - kinds = [kind for kind, check in - (('HSP', self._hsp_weight > 0 and self._has_hsp_data), - ('EEG', self._eeg_weight > 0 and self._has_eeg_data), - ('HPI', self._hpi_weight > 0 and self._has_hpi_data)) - if check] - kinds = '+'.join(kinds) + kinds = [ + kind + for kind, check in ( + ("HSP", self._hsp_weight > 0 and self._has_hsp_data), + ("EEG", self._eeg_weight > 0 and self._has_eeg_data), + ("HPI", self._hpi_weight > 0 and self._has_hpi_data), + ) + if check + ] + kinds = "+".join(kinds) return f"{len(dists)} {kinds}: {av_dist:.1f} ± {std_dist:.1f} mm" diff --git a/mne/cov.py b/mne/cov.py index 59dfb01edaa..07af31476d8 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1,64 +1,95 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Denis A. Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from copy import deepcopy import itertools as itt +from copy import deepcopy from math import log import numpy as np +from scipy.sparse import issparse -from .defaults import (_INTERPOLATION_DEFAULT, _EXTRAPOLATE_DEFAULT, - _BORDER_DEFAULT, DEFAULTS) -from .io.write import start_and_end_file -from .io.proj import (make_projector, _proj_equal, activate_proj, - _check_projs, _needs_eeg_average_ref_proj, - _has_eeg_average_ref_proj, _read_proj, _write_proj) -from .io import fiff_open, RawArray - -from .io.pick import (pick_types, pick_channels_cov, pick_channels, pick_info, - _picks_by_type, _pick_data_channels, _picks_to_idx, - _DATA_CH_TYPES_SPLIT) - -from .io.constants import FIFF -from .io.meas_info import _read_bad_channels, create_info, _write_bad_channels -from .io.tag import find_tag -from .io.tree import dir_tree_find -from .io.write import (start_block, end_block, write_int, write_double, - write_float_matrix, write_string, _safe_name_list, - write_name_list_sanitized) -from .defaults import _handle_default +from . import viz +from ._fiff.constants import FIFF +from ._fiff.meas_info import _read_bad_channels, _write_bad_channels, create_info +from ._fiff.pick import ( + _DATA_CH_TYPES_SPLIT, + _pick_data_channels, + _picks_by_type, + _picks_to_idx, + pick_channels, + pick_channels_cov, + pick_info, + pick_types, +) +from ._fiff.proj import ( + _check_projs, + _has_eeg_average_ref_proj, + _needs_eeg_average_ref_proj, + _proj_equal, + _read_proj, + _write_proj, +) +from ._fiff.proj import activate_proj as _activate_proj +from ._fiff.proj import make_projector as _make_projector +from ._fiff.tag import find_tag +from ._fiff.tree import dir_tree_find +from .defaults import ( + _BORDER_DEFAULT, + _EXTRAPOLATE_DEFAULT, + _INTERPOLATION_DEFAULT, + DEFAULTS, + _handle_default, +) from .epochs import Epochs from .event import make_fixed_length_events from .evoked import EvokedArray -from .rank import compute_rank -from .utils import (check_fname, logger, verbose, check_version, _time_mask, - warn, copy_function_doc_to_method_doc, _pl, - _undo_scaling_cov, _scaled_array, _validate_type, - _check_option, eigh, fill_doc, _on_missing, - _check_on_missing, _check_fname, _verbose_safe_false) -from . import viz - -from .fixes import (BaseEstimator, EmpiricalCovariance, _logdet, - empirical_covariance, log_likelihood) +from .fixes import ( + EmpiricalCovariance, + _EstimatorMixin, + _logdet, + _safe_svd, + empirical_covariance, + log_likelihood, +) +from .rank import _compute_rank +from .utils import ( + _array_repr, + _check_fname, + _check_on_missing, + _check_option, + _on_missing, + _pl, + _scaled_array, + _time_mask, + _undo_scaling_cov, + _validate_type, + _verbose_safe_false, + check_fname, + check_version, + copy_function_doc_to_method_doc, + eigh, + fill_doc, + logger, + verbose, + warn, +) def _check_covs_algebra(cov1, cov2): if cov1.ch_names != cov2.ch_names: - raise ValueError('Both Covariance do not have the same list of ' - 'channels.') - projs1 = [str(c) for c in cov1['projs']] - projs2 = [str(c) for c in cov1['projs']] + raise ValueError("Both Covariance do not have the same list of channels.") + projs1 = [str(c) for c in cov1["projs"]] + projs2 = [str(c) for c in cov1["projs"]] if projs1 != projs2: - raise ValueError('Both Covariance do not have the same list of ' - 'SSP projections.') + raise ValueError( + "Both Covariance do not have the same list of SSP projections." + ) def _get_tslice(epochs, tmin, tmax): """Get the slice.""" - mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info['sfreq']) + mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info["sfreq"]) tstart = np.where(mask)[0][0] if tmin is not None else None tend = np.where(mask)[0][-1] + 1 if tmax is not None else None tslice = slice(tstart, tend, None) @@ -116,33 +147,54 @@ class Covariance(dict): """ @verbose - def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None, - method=None, loglik=None, *, verbose=None): + def __init__( + self, + data, + names, + bads, + projs, + nfree, + eig=None, + eigvec=None, + method=None, + loglik=None, + *, + verbose=None, + ): """Init of covariance.""" - diag = (data.ndim == 1) + diag = data.ndim == 1 projs = _check_projs(projs) - self.update(data=data, dim=len(data), names=names, bads=bads, - nfree=nfree, eig=eig, eigvec=eigvec, diag=diag, - projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV) + self.update( + data=data, + dim=len(data), + names=names, + bads=bads, + nfree=nfree, + eig=eig, + eigvec=eigvec, + diag=diag, + projs=projs, + kind=FIFF.FIFFV_MNE_NOISE_COV, + ) if method is not None: - self['method'] = method + self["method"] = method if loglik is not None: - self['loglik'] = loglik + self["loglik"] = loglik @property def data(self): """Numpy array of Noise covariance matrix.""" - return self['data'] + return self["data"] @property def ch_names(self): """Channel names.""" - return self['names'] + return self["names"] @property def nfree(self): """Number of degrees of freedom.""" - return self['nfree'] + return self["nfree"] @verbose def save(self, fname, *, overwrite=False, verbose=None): @@ -157,8 +209,11 @@ def save(self, fname, *, overwrite=False, verbose=None): .. versionadded:: 1.0 %(verbose)s """ - check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz', - '_cov.fif', '_cov.fif.gz')) + from ._fiff.write import start_and_end_file + + check_fname( + fname, "covariance", ("-cov.fif", "-cov.fif.gz", "_cov.fif", "_cov.fif.gz") + ) fname = _check_fname(fname=fname, overwrite=overwrite) with start_and_end_file(fname) as fid: _write_cov(fid, self) @@ -188,80 +243,108 @@ def as_diag(self): This function operates in place. """ - if self['diag']: + if self["diag"]: return self - self['diag'] = True - self['data'] = np.diag(self['data']) - self['eig'] = None - self['eigvec'] = None + self["diag"] = True + self["data"] = np.diag(self["data"]) + self["eig"] = None + self["eigvec"] = None return self def _as_square(self): # This is a hack but it works because np.diag() behaves nicely - if self['diag']: - self['diag'] = False + if self["diag"]: + self["diag"] = False self.as_diag() - self['diag'] = False + self["diag"] = False return self def _get_square(self): - if self['diag'] != (self.data.ndim == 1): + if self["diag"] != (self.data.ndim == 1): raise RuntimeError( - 'Covariance attributes inconsistent, got data with ' - 'dimensionality %d but diag=%s' - % (self.data.ndim, self['diag'])) - return np.diag(self.data) if self['diag'] else self.data.copy() + "Covariance attributes inconsistent, got data with " + f"dimensionality {self.data.ndim} but diag={self['diag']}" + ) + return np.diag(self.data) if self["diag"] else self.data.copy() def __repr__(self): # noqa: D105 - if self.data.ndim == 2: - s = 'size : %s x %s' % self.data.shape - else: # ndim == 1 - s = 'diagonal : %s' % self.data.size - s += ", n_samples : %s" % self.nfree - s += ", data : %s" % self.data - return "" % s + s = "" + return s def __add__(self, cov): """Add Covariance taking into account number of degrees of freedom.""" _check_covs_algebra(self, cov) this_cov = cov.copy() - this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) + - (self['data'] * self['nfree'])) / - (self['nfree'] + this_cov['nfree'])) - this_cov['nfree'] += self['nfree'] + this_cov["data"] = ( + (this_cov["data"] * this_cov["nfree"]) + (self["data"] * self["nfree"]) + ) / (self["nfree"] + this_cov["nfree"]) + this_cov["nfree"] += self["nfree"] - this_cov['bads'] = list(set(this_cov['bads']).union(self['bads'])) + this_cov["bads"] = list(set(this_cov["bads"]).union(self["bads"])) return this_cov def __iadd__(self, cov): """Add Covariance taking into account number of degrees of freedom.""" _check_covs_algebra(self, cov) - self['data'][:] = (((self['data'] * self['nfree']) + - (cov['data'] * cov['nfree'])) / - (self['nfree'] + cov['nfree'])) - self['nfree'] += cov['nfree'] + self["data"][:] = ( + (self["data"] * self["nfree"]) + (cov["data"] * cov["nfree"]) + ) / (self["nfree"] + cov["nfree"]) + self["nfree"] += cov["nfree"] - self['bads'] = list(set(self['bads']).union(cov['bads'])) + self["bads"] = list(set(self["bads"]).union(cov["bads"])) return self @verbose - @copy_function_doc_to_method_doc(viz.misc.plot_cov) - def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True, - show=True, verbose=None): - return viz.misc.plot_cov(self, info, exclude, colorbar, proj, show_svd, - show, verbose) + @copy_function_doc_to_method_doc(viz.plot_cov) + def plot( + self, + info, + exclude=(), + colorbar=True, + proj=False, + show_svd=True, + show=True, + verbose=None, + ): + return viz.plot_cov( + self, info, exclude, colorbar, proj, show_svd, show, verbose + ) @verbose def plot_topomap( - self, info, ch_type=None, *, scalings=None, proj=False, - noise_cov=None, sensors=True, show_names=False, mask=None, - mask_params=None, contours=6, outlines='head', sphere=None, - image_interp=_INTERPOLATION_DEFAULT, - extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT, res=64, - size=1, cmap=None, vlim=(None, None), cnorm=None, colorbar=True, - cbar_fmt='%3.1f', units=None, axes=None, show=True, verbose=None): + self, + info, + ch_type=None, + *, + scalings=None, + proj=False, + noise_cov=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + show=True, + verbose=None, + ): """Plot a topomap of the covariance diagonal. Parameters @@ -284,7 +367,15 @@ def plot_topomap( %(sphere_topomap_auto)s %(image_interp_topomap)s %(extrapolate_topomap)s + + .. versionchanged:: 0.21 + + - The default was changed to ``'local'`` for MEG sensors. + - ``'local'`` was changed to use a convex hull mask + - ``'head'`` was changed to extrapolate out to the clipping circle. %(border_topomap)s + + .. versionadded:: 0.20 %(res_topomap)s %(size_topomap)s %(cmap_topomap)s @@ -319,33 +410,51 @@ def plot_topomap( # entries is the same as multiplying twice evoked = whiten_evoked(whiten_evoked(evoked, noise_cov), noise_cov) if units is None: - units = 'AU' + units = "AU" if scalings is None: - scalings = 1. + scalings = 1.0 if units is None: - units = {k: f'({v})²' for k, v in DEFAULTS['units'].items()} + units = {k: f"({v})²" for k, v in DEFAULTS["units"].items()} if scalings is None: - scalings = {k: v * v for k, v in DEFAULTS['scalings'].items()} + scalings = {k: v * v for k, v in DEFAULTS["scalings"].items()} return evoked.plot_topomap( - times=[0], ch_type=ch_type, vlim=vlim, cmap=cmap, - sensors=sensors, cnorm=cnorm, colorbar=colorbar, scalings=scalings, - units=units, res=res, size=size, cbar_fmt=cbar_fmt, - proj=proj, show=show, show_names=show_names, - mask=mask, mask_params=mask_params, outlines=outlines, - contours=contours, image_interp=image_interp, axes=axes, - extrapolate=extrapolate, sphere=sphere, border=border, - time_format='') - - def pick_channels(self, ch_names, ordered=False): + times=[0], + ch_type=ch_type, + vlim=vlim, + cmap=cmap, + sensors=sensors, + cnorm=cnorm, + colorbar=colorbar, + scalings=scalings, + units=units, + res=res, + size=size, + cbar_fmt=cbar_fmt, + proj=proj, + show=show, + show_names=show_names, + mask=mask, + mask_params=mask_params, + outlines=outlines, + contours=contours, + image_interp=image_interp, + axes=axes, + extrapolate=extrapolate, + sphere=sphere, + border=border, + time_format="", + ) + + @verbose + def pick_channels(self, ch_names, ordered=True, *, verbose=None): """Pick channels from this covariance matrix. Parameters ---------- ch_names : list of str List of channels to keep. All other channels are dropped. - ordered : bool - If True (default False), ensure that the order of the channels - matches the order of ``ch_names``. + %(ordered)s + %(verbose)s Returns ------- @@ -358,13 +467,15 @@ def pick_channels(self, ch_names, ordered=False): .. versionadded:: 0.20.0 """ - return pick_channels_cov(self, ch_names, exclude=[], ordered=ordered, - copy=False) + return pick_channels_cov( + self, ch_names, exclude=[], ordered=ordered, copy=False + ) ############################################################################### # IO + @verbose def read_cov(fname, verbose=None): """Read a noise covariance from a FIF file. @@ -385,18 +496,23 @@ def read_cov(fname, verbose=None): -------- write_cov, compute_covariance, compute_raw_covariance """ - check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz', - '_cov.fif', '_cov.fif.gz')) - fname = _check_fname(fname=fname, must_exist=True, overwrite='read') + from ._fiff.open import fiff_open + + check_fname( + fname, "covariance", ("-cov.fif", "-cov.fif.gz", "_cov.fif", "_cov.fif.gz") + ) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") f, tree, _ = fiff_open(fname) with f as fid: - return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV, - limited=True)) + return Covariance( + **_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV, limited=True) + ) ############################################################################### # Estimate from data + @verbose def make_ad_hoc_cov(info, std=None, *, verbose=None): """Create an ad hoc noise covariance. @@ -423,33 +539,54 @@ def make_ad_hoc_cov(info, std=None, *, verbose=None): .. versionadded:: 0.9.0 """ picks = pick_types(info, meg=True, eeg=True, exclude=()) - std = _handle_default('noise_std', std) + std = _handle_default("noise_std", std) data = np.zeros(len(picks)) - for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True), - (std['grad'], std['mag'], std['eeg'])): + for meg, eeg, val in zip( + ("grad", "mag", False), + (False, False, True), + (std["grad"], std["mag"], std["eeg"]), + ): these_picks = pick_types(info, meg=meg, eeg=eeg) data[np.searchsorted(picks, these_picks)] = val * val - ch_names = [info['ch_names'][pick] for pick in picks] - return Covariance(data, ch_names, info['bads'], info['projs'], nfree=0) + ch_names = [info["ch_names"][pick] for pick in picks] + return Covariance(data, ch_names, info["bads"], info["projs"], nfree=0) -def _check_n_samples(n_samples, n_chan): +def _check_n_samples(n_samples, n_chan, on_few_samples="warn"): """Check to see if there are enough samples for reliable cov calc.""" n_samples_min = 10 * (n_chan + 1) // 2 if n_samples <= 0: - raise ValueError('No samples found to compute the covariance matrix') + raise ValueError("No samples found to compute the covariance matrix") if n_samples < n_samples_min: - warn('Too few samples (required : %d got : %d), covariance ' - 'estimate may be unreliable' % (n_samples_min, n_samples)) + msg = ( + f"Too few samples (required {n_samples_min} but got {n_samples} for " + f"{n_chan} channels), covariance estimate may be unreliable" + ) + _on_missing(on_few_samples, msg, "on_few_samples") @verbose -def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None, - flat=None, picks=None, method='empirical', - method_params=None, cv=3, scalings=None, - n_jobs=None, return_estimators=False, - reject_by_annotation=True, rank=None, verbose=None): +def compute_raw_covariance( + raw, + tmin=0, + tmax=None, + tstep=0.2, + reject=None, + flat=None, + picks=None, + *, + on_few_samples="warn", + method="empirical", + method_params=None, + cv=3, + scalings=None, + n_jobs=None, + return_estimators=False, + reject_by_annotation=True, + rank=None, + verbose=None, +): """Estimate noise covariance matrix from a continuous segment of raw data. It is typically useful to estimate a noise covariance from empty room @@ -489,6 +626,12 @@ def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None, are floats that set the minimum acceptable peak-to-peak amplitude. If flat is None then no rejection is done. %(picks_good_data_noref)s + on_few_samples : str + Can be 'warn' (default), 'ignore', or 'raise' to control behavior when + there are fewer samples than channels, which can lead to inaccurate + covariance or rank estimates. + + .. versionadded:: 1.11 method : str | list | None (default 'empirical') The method used for covariance estimation. See :func:`mne.compute_covariance`. @@ -557,31 +700,40 @@ def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None, baseline correction) subtract the mean across time *for each epoch* (instead of across epochs) for each channel. """ - tmin = 0. if tmin is None else float(tmin) - dt = 1. / raw.info['sfreq'] + tmin = 0.0 if tmin is None else float(tmin) + dt = 1.0 / raw.info["sfreq"] tmax = raw.times[-1] + dt if tmax is None else float(tmax) tstep = tmax - tmin if tstep is None else float(tstep) tstep_m1 = tstep - dt # inclusive! events = make_fixed_length_events(raw, 1, tmin, tmax, tstep) - logger.info('Using up to %s segment%s' % (len(events), _pl(events))) + logger.info(f"Using up to {len(events)} segment{_pl(events)}") # don't exclude any bad channels, inverses expect all channels present if picks is None: - # Need to include all channels e.g. if eog rejection is to be used - picks = np.arange(raw.info['nchan']) - pick_mask = np.in1d( - picks, _pick_data_channels(raw.info, with_ref_meg=False)) + # Need to include all good channels e.g. if eog rejection is to be used + picks = np.arange(raw.info["nchan"]) + pick_mask = np.isin(picks, _pick_data_channels(raw.info, with_ref_meg=False)) else: pick_mask = slice(None) picks = _picks_to_idx(raw.info, picks) - epochs = Epochs(raw, events, 1, 0, tstep_m1, baseline=None, - picks=picks, reject=reject, flat=flat, - verbose=_verbose_safe_false(), - preload=False, proj=False, - reject_by_annotation=reject_by_annotation) + epochs = Epochs( + raw, + events, + 1, + 0, + tstep_m1, + baseline=None, + picks=picks, + reject=reject, + flat=flat, + verbose=_verbose_safe_false(), + preload=False, + proj=False, + reject_by_annotation=reject_by_annotation, + ) if method is None: - method = 'empirical' - if isinstance(method, str) and method == 'empirical': + method = "empirical" + if isinstance(method, str) and method == "empirical": # potentially *much* more memory efficient to do it the iterative way picks = picks[pick_mask] data = 0 @@ -593,15 +745,14 @@ def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None, mu += raw_segment.sum(axis=1) data += np.dot(raw_segment, raw_segment.T) n_samples += raw_segment.shape[1] - _check_n_samples(n_samples, len(picks)) + _check_n_samples(n_samples, len(picks), on_few_samples) data -= mu[:, None] * (mu[None, :] / n_samples) - data /= (n_samples - 1.0) - logger.info("Number of samples used : %d" % n_samples) - logger.info('[done]') - ch_names = [raw.info['ch_names'][k] for k in picks] - bads = [b for b in raw.info['bads'] if b in ch_names] - return Covariance(data, ch_names, bads, raw.info['projs'], - nfree=n_samples - 1) + data /= n_samples - 1.0 + logger.info("Number of samples used : %d", n_samples) + logger.info("[done]") + ch_names = [raw.info["ch_names"][k] for k in picks] + bads = [b for b in raw.info["bads"] if b in ch_names] + return Covariance(data, ch_names, bads, raw.info["projs"], nfree=n_samples - 1) del picks, pick_mask # This makes it equivalent to what we used to do (and do above for @@ -611,85 +762,129 @@ def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None, epochs._data -= ch_means[np.newaxis, :, np.newaxis] # fake this value so there are no complaints from compute_covariance epochs.baseline = (None, None) - return compute_covariance(epochs, keep_sample_mean=True, method=method, - method_params=method_params, cv=cv, - scalings=scalings, n_jobs=n_jobs, - return_estimators=return_estimators, - rank=rank) - - -def _check_method_params(method, method_params, keep_sample_mean=True, - name='method', allow_auto=True, rank=None): + return compute_covariance( + epochs, + keep_sample_mean=True, + method=method, + method_params=method_params, + cv=cv, + scalings=scalings, + n_jobs=n_jobs, + return_estimators=return_estimators, + rank=rank, + ) + + +def _check_method_params( + method, + method_params, + keep_sample_mean=True, + name="method", + allow_auto=True, + rank=None, +): """Check that method and method_params are usable.""" - accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf', - 'oas', 'shrunk', 'pca', 'factor_analysis', 'shrinkage') + accepted_methods = ( + "auto", + "empirical", + "diagonal_fixed", + "ledoit_wolf", + "oas", + "shrunk", + "pca", + "factor_analysis", + "shrinkage", + ) _method_params = { - 'empirical': {'store_precision': False, 'assume_centered': True}, - 'diagonal_fixed': {'store_precision': False, 'assume_centered': True}, - 'ledoit_wolf': {'store_precision': False, 'assume_centered': True}, - 'oas': {'store_precision': False, 'assume_centered': True}, - 'shrinkage': {'shrinkage': 0.1, 'store_precision': False, - 'assume_centered': True}, - 'shrunk': {'shrinkage': np.logspace(-4, 0, 30), - 'store_precision': False, 'assume_centered': True}, - 'pca': {'iter_n_components': None}, - 'factor_analysis': {'iter_n_components': None} + "empirical": {"store_precision": False, "assume_centered": True}, + "diagonal_fixed": {"store_precision": False, "assume_centered": True}, + "ledoit_wolf": {"store_precision": False, "assume_centered": True}, + "oas": {"store_precision": False, "assume_centered": True}, + "shrinkage": { + "shrinkage": 0.1, + "store_precision": False, + "assume_centered": True, + }, + "shrunk": { + "shrinkage": np.logspace(-4, 0, 30), + "store_precision": False, + "assume_centered": True, + }, + "pca": {"iter_n_components": None}, + "factor_analysis": {"iter_n_components": None}, } for ch_type in _DATA_CH_TYPES_SPLIT: - _method_params['diagonal_fixed'][ch_type] = 0.1 + _method_params["diagonal_fixed"][ch_type] = 0.1 if isinstance(method_params, dict): for key, values in method_params.items(): if key not in _method_params: - raise ValueError('key (%s) must be "%s"' % - (key, '" or "'.join(_method_params))) + raise ValueError( + 'key ({}) must be "{}"'.format(key, '" or "'.join(_method_params)) + ) _method_params[key].update(method_params[key]) - shrinkage = method_params.get('shrinkage', {}).get('shrinkage', 0.1) + shrinkage = method_params.get("shrinkage", {}).get("shrinkage", 0.1) if not 0 <= shrinkage <= 1: - raise ValueError('shrinkage must be between 0 and 1, got %s' - % (shrinkage,)) + raise ValueError(f"shrinkage must be between 0 and 1, got {shrinkage}") was_auto = False if method is None: - method = ['empirical'] - elif method == 'auto' and allow_auto: + method = ["empirical"] + elif method == "auto" and allow_auto: was_auto = True - method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis'] + method = ["shrunk", "diagonal_fixed", "empirical", "factor_analysis"] - if not isinstance(method, (list, tuple)): + if not isinstance(method, list | tuple): method = [method] if not all(k in accepted_methods for k in method): raise ValueError( - 'Invalid {name} ({method}). Accepted values (individually or ' - 'in a list) are any of "{accepted_methods}" or None.'.format( - name=name, method=method, accepted_methods=accepted_methods)) - if not (isinstance(rank, str) and rank == 'full'): + f"Invalid {name} ({method}). Accepted values (individually or " + f"in a list) are any of '{accepted_methods}' or None." + ) + if not (isinstance(rank, str) and rank == "full"): if was_auto: - method.pop(method.index('factor_analysis')) + method.pop(method.index("factor_analysis")) for method_ in method: - if method_ in ('pca', 'factor_analysis'): - raise ValueError('%s can so far only be used with rank="full",' - ' got rank=%r' % (method_, rank)) + if method_ in ("pca", "factor_analysis"): + raise ValueError( + f'{method_} can so far only be used with rank="full", got rank=' + f"{rank!r}" + ) if not keep_sample_mean: - if len(method) != 1 or 'empirical' not in method: - raise ValueError('`keep_sample_mean=False` is only supported' - 'with %s="empirical"' % (name,)) + if len(method) != 1 or "empirical" not in method: + raise ValueError( + f'`keep_sample_mean=False` is only supported with {name}="empirical"' + ) for p, v in _method_params.items(): - if v.get('assume_centered', None) is False: - raise ValueError('`assume_centered` must be True' - ' if `keep_sample_mean` is False') + if v.get("assume_centered", None) is False: + raise ValueError( + "`assume_centered` must be True if `keep_sample_mean` is False" + ) return method, _method_params @verbose -def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, - projs=None, method='empirical', method_params=None, - cv=3, scalings=None, n_jobs=None, - return_estimators=False, on_mismatch='raise', - rank=None, verbose=None): +def compute_covariance( + epochs, + keep_sample_mean=True, + tmin=None, + tmax=None, + projs=None, + *, + on_few_samples="warn", + method="empirical", + method_params=None, + cv=3, + scalings=None, + n_jobs=None, + return_estimators=False, + on_mismatch="raise", + rank=None, + verbose=None, +): """Estimate noise covariance matrix from epochs. The noise covariance is typically estimated on pre-stimulus periods @@ -725,6 +920,12 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, List of projectors to use in covariance calculation, or None to indicate that the projectors from the epochs should be inherited. If None, then projectors from all epochs must match. + on_few_samples : str + Can be 'warn' (default), 'ignore', or 'raise' to control behavior when + there are fewer samples than channels, which can lead to inaccurate + covariance or rank estimates. + + .. versionadded:: 1.11 method : str | list | None (default 'empirical') The method used for covariance estimation. If 'empirical' (default), the sample covariance will be computed. A list can be passed to @@ -751,7 +952,7 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, except ``'factor_analysis'`` and ``'pca'``):: {'diagonal_fixed': {'grad': 0.1, 'mag': 0.1, 'eeg': 0.1, ...}, - 'shrinkage': {'shrikage': 0.1}, + 'shrinkage': {'shrinkage': 0.1}, 'shrunk': {'shrinkage': np.logspace(-4, 0, 30)}, 'pca': {'iter_n_components': None}, 'factor_analysis': {'iter_n_components': None}} @@ -787,8 +988,8 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, Returns ------- cov : instance of Covariance | list - The computed covariance. If method equals 'auto' or is a list of str - and return_estimators equals True, a list of covariance estimators is + The computed covariance. If method equals ``'auto'`` or is a list of str + and ``return_estimators=True``, a list of covariance estimators is returned (sorted by log-likelihood, from high to low, i.e. from best to worst). @@ -816,16 +1017,14 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, .. versionadded:: 0.16 * ``'ledoit_wolf'`` The Ledoit-Wolf estimator, which uses an - empirical formula for the optimal shrinkage value - :footcite:`LedoitWolf2004`. + empirical formula for the optimal shrinkage value :footcite:`LedoitWolf2004`. * ``'oas'`` The OAS estimator :footcite:`ChenEtAl2010`, which uses a different empricial formula for the optimal shrinkage value. .. versionadded:: 0.16 * ``'shrunk'`` - Like 'ledoit_wolf', but with cross-validation - for optimal alpha. + Like 'ledoit_wolf', but with cross-validation for optimal alpha. * ``'pca'`` Probabilistic PCA with low rank :footcite:`TippingBishop1999`. * ``'factor_analysis'`` @@ -847,7 +1046,7 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, The ``method`` parameter allows to regularize the covariance in an automated way. It also allows to select between different alternative estimation algorithms which themselves achieve regularization. - Details are described in :footcite:`EngemannGramfort2015`. + Details are described in :footcite:t:`EngemannGramfort2015`. For more information on the advanced estimation methods, see :ref:`the sklearn manual `. @@ -859,7 +1058,8 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None, # scale to natural unit for best stability with MEG/EEG scalings = _check_scalings_user(scalings) method, _method_params = _check_method_params( - method, method_params, keep_sample_mean, rank=rank) + method, method_params, keep_sample_mean, rank=rank + ) del method_params # for multi condition support epochs is required to refer to a list of @@ -878,43 +1078,47 @@ def _unpack_epochs(epochs): epochs = sum([_unpack_epochs(epoch) for epoch in epochs], []) # check for baseline correction - if any(epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5 and - keep_sample_mean for epochs_t in epochs): - warn('Epochs are not baseline corrected, covariance ' - 'matrix may be inaccurate') - - orig = epochs[0].info['dev_head_t'] - _check_on_missing(on_mismatch, 'on_mismatch') + if any( + epochs_t.baseline is None + and epochs_t.info["highpass"] < 0.5 + and keep_sample_mean + for epochs_t in epochs + ): + warn("Epochs are not baseline corrected, covariance matrix may be inaccurate") + + orig = epochs[0].info["dev_head_t"] + _check_on_missing(on_mismatch, "on_mismatch") for ei, epoch in enumerate(epochs): epoch.info._check_consistency() - if (orig is None) != (epoch.info['dev_head_t'] is None) or \ - (orig is not None and not - np.allclose(orig['trans'], - epoch.info['dev_head_t']['trans'])): - msg = ('MEG<->Head transform mismatch between epochs[0]:\n%s\n\n' - 'and epochs[%s]:\n%s' - % (orig, ei, epoch.info['dev_head_t'])) - _on_missing(on_mismatch, msg, 'on_mismatch') - - bads = epochs[0].info['bads'] + if (orig is None) != (epoch.info["dev_head_t"] is None) or ( + orig is not None + and not np.allclose(orig["trans"], epoch.info["dev_head_t"]["trans"]) + ): + msg = ( + "MEG<->Head transform mismatch between epochs[0]:\n{}\n\n" + "and epochs[{}]:\n{}".format(orig, ei, epoch.info["dev_head_t"]) + ) + _on_missing(on_mismatch, msg, "on_mismatch") + + bads = epochs[0].info["bads"] if projs is None: - projs = epochs[0].info['projs'] + projs = epochs[0].info["projs"] # make sure Epochs are compatible for epochs_t in epochs[1:]: if epochs_t.proj != epochs[0].proj: - raise ValueError('Epochs must agree on the use of projections') - for proj_a, proj_b in zip(epochs_t.info['projs'], projs): + raise ValueError("Epochs must agree on the use of projections") + for proj_a, proj_b in zip(epochs_t.info["projs"], projs): if not _proj_equal(proj_a, proj_b): - raise ValueError('Epochs must have same projectors') + raise ValueError("Epochs must have same projectors") projs = _check_projs(projs) ch_names = epochs[0].ch_names # make sure Epochs are compatible for epochs_t in epochs[1:]: - if epochs_t.info['bads'] != bads: - raise ValueError('Epochs must have same bad channels') + if epochs_t.info["bads"] != bads: + raise ValueError("Epochs must have same bad channels") if epochs_t.ch_names != ch_names: - raise ValueError('Epochs must have same channel names') + raise ValueError("Epochs must have same channel names") picks_list = _picks_by_type(epochs[0].info) picks_meeg = np.concatenate([b for _, b in picks_list]) picks_meeg = np.sort(picks_meeg) @@ -929,7 +1133,6 @@ def _unpack_epochs(epochs): n_epochs = np.zeros(n_epoch_types, dtype=np.int64) for ii, epochs_t in enumerate(epochs): - tslice = _get_tslice(epochs_t, tmin, tmax) for e in epochs_t: e = e[picks_meeg, tslice] @@ -940,8 +1143,10 @@ def _unpack_epochs(epochs): n_samples_epoch = n_samples // n_epochs norm_const = np.sum(n_samples_epoch * (n_epochs - 1)) - data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean - in zip(n_epochs, data_mean)] + data_mean = [ + 1.0 / n_epoch * np.dot(mean, mean.T) + for n_epoch, mean in zip(n_epochs, data_mean) + ] info = pick_info(info, picks_meeg) tslice = _get_tslice(epochs[0], tmin, tmax) @@ -956,18 +1161,27 @@ def _unpack_epochs(epochs): epochs = np.hstack(epochs) n_samples_tot = epochs.shape[-1] - _check_n_samples(n_samples_tot, len(picks_meeg)) + _check_n_samples(n_samples_tot, len(picks_meeg), on_few_samples) epochs = epochs.T # sklearn | C-order cov_data = _compute_covariance_auto( - epochs, method=method, method_params=_method_params, info=info, - cv=cv, n_jobs=n_jobs, stop_early=True, picks_list=picks_list, - scalings=scalings, rank=rank) + epochs, + method=method, + method_params=_method_params, + info=info, + cv=cv, + n_jobs=n_jobs, + stop_early=True, + picks_list=picks_list, + scalings=scalings, + rank=rank, + on_few_samples=on_few_samples, + ) if keep_sample_mean is False: - cov = cov_data['empirical']['data'] + cov = cov_data["empirical"]["data"] # undo scaling - cov *= (n_samples_tot - 1) + cov *= n_samples_tot - 1 # ... apply pre-computed class-wise normalization for mean_cov in data_mean: cov -= mean_cov @@ -975,28 +1189,29 @@ def _unpack_epochs(epochs): covs = list() for this_method, data in cov_data.items(): - cov = Covariance(data.pop('data'), ch_names, info['bads'], projs, - nfree=n_samples_tot - 1) + cov = Covariance( + data.pop("data"), ch_names, info["bads"], projs, nfree=n_samples_tot - 1 + ) # add extra info cov.update(method=this_method, **data) covs.append(cov) - logger.info('Number of samples used : %d' % n_samples_tot) - covs.sort(key=lambda c: c['loglik'], reverse=True) + logger.info("Number of samples used : %d", n_samples_tot) + covs.sort(key=lambda c: c["loglik"], reverse=True) if len(covs) > 1: - msg = ['log-likelihood on unseen data (descending order):'] + msg = ["log-likelihood on unseen data (descending order):"] for c in covs: - msg.append('%s: %0.3f' % (c['method'], c['loglik'])) - logger.info('\n '.join(msg)) + msg.append(f"{c['method']}: {c['loglik']:0.3f}") + logger.info("\n ".join(msg)) if return_estimators: out = covs else: out = covs[0] - logger.info('selecting best estimator: {}'.format(out['method'])) + logger.info("selecting best estimator: {}".format(out["method"])) else: out = covs[0] - logger.info('[done]') + logger.info("[done]") return out @@ -1004,11 +1219,12 @@ def _unpack_epochs(epochs): def _check_scalings_user(scalings): if isinstance(scalings, dict): for k, v in scalings.items(): - _check_option('the keys in `scalings`', k, ['mag', 'grad', 'eeg']) + _check_option("the keys in `scalings`", k, ["mag", "grad", "eeg"]) elif scalings is not None and not isinstance(scalings, np.ndarray): - raise TypeError('scalings must be a dict, ndarray, or None, got %s' - % type(scalings)) - scalings = _handle_default('scalings', scalings) + raise TypeError( + f"scalings must be a dict, ndarray, or None, got {type(scalings)}" + ) + scalings = _handle_default("scalings", scalings) return scalings @@ -1021,55 +1237,104 @@ def _eigvec_subspace(eig, eigvec, mask): return eig, eigvec -def _compute_covariance_auto(data, method, info, method_params, cv, - scalings, n_jobs, stop_early, picks_list, rank): +@verbose +def _compute_rank_raw_array( + data, info, rank, scalings, *, log_ch_type=None, on_few_samples="warn", verbose=None +): + from .io import RawArray + + return _compute_rank( + RawArray(data, info, copy="auto", verbose=_verbose_safe_false()), + rank, + scalings, + info, + log_ch_type=log_ch_type, + on_few_samples=on_few_samples, + ) + + +def _compute_covariance_auto( + data, + method, + info, + method_params, + cv, + scalings, + n_jobs, + stop_early, + picks_list, + rank, + *, + cov_kind="", + log_ch_type=None, + log_rank=True, + on_few_samples="warn", +): """Compute covariance auto mode.""" # rescale to improve numerical stability orig_rank = rank - rank = compute_rank( - RawArray(data.T, info, copy=None, verbose=_verbose_safe_false()), - rank, scalings, info) + rank = _compute_rank_raw_array( + data.T, + info, + rank=rank, + scalings=scalings, + on_few_samples=on_few_samples, + verbose=_verbose_safe_false(), + ) with _scaled_array(data.T, picks_list, scalings): C = np.dot(data.T, data) - _, eigvec, mask = _smart_eigh(C, info, rank, proj_subspace=True, - do_compute_rank=False) + _, eigvec, mask = _smart_eigh( + C, + info, + rank, + proj_subspace=True, + do_compute_rank=False, + log_ch_type=log_ch_type, + verbose=None if log_rank else _verbose_safe_false(), + ) eigvec = eigvec[mask] data = np.dot(data, eigvec.T) used = np.where(mask)[0] - sub_picks_list = [(key, np.searchsorted(used, picks)) - for key, picks in picks_list] + sub_picks_list = [ + (key, np.searchsorted(used, picks)) for key, picks in picks_list + ] sub_info = pick_info(info, used) if len(used) != len(mask) else info - logger.info('Reducing data rank from %s -> %s' - % (len(mask), eigvec.shape[0])) + if log_rank: + logger.info(f"Reducing data rank from {len(mask)} -> {eigvec.shape[0]}") estimator_cov_info = list() - msg = 'Estimating covariance using %s' - ok_sklearn = check_version('sklearn') - if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'): - raise ValueError('scikit-learn is not installed, `method` must be ' - '`empirical`, got %s' % (method,)) + ok_sklearn = check_version("sklearn") + if not ok_sklearn and (len(method) != 1 or method[0] != "empirical"): + raise ValueError( + 'scikit-learn is not installed, `method` must be "empirical", got ' + f"{repr(method)}" + ) for method_ in method: data_ = data.copy() name = method_.__name__ if callable(method_) else method_ - logger.info(msg % name.upper()) + logger.info( + f"Estimating {cov_kind + (' ' if cov_kind else '')}" + f"covariance using {name.upper()}" + ) mp = method_params[method_] _info = {} - if method_ == 'empirical': + if method_ == "empirical": est = EmpiricalCovariance(**mp) est.fit(data_) estimator_cov_info.append((est, est.covariance_, _info)) del est - elif method_ == 'diagonal_fixed': + elif method_ == "diagonal_fixed": est = _RegCovariance(info=sub_info, **mp) est.fit(data_) estimator_cov_info.append((est, est.covariance_, _info)) del est - elif method_ == 'ledoit_wolf': + elif method_ == "ledoit_wolf": from sklearn.covariance import LedoitWolf + shrinkages = [] lw = LedoitWolf(**mp) @@ -1081,8 +1346,9 @@ def _compute_covariance_auto(data, method, info, method_params, cv, estimator_cov_info.append((sc, sc.covariance_, _info)) del lw, sc - elif method_ == 'oas': + elif method_ == "oas": from sklearn.covariance import OAS + shrinkages = [] oas = OAS(**mp) @@ -1094,58 +1360,65 @@ def _compute_covariance_auto(data, method, info, method_params, cv, estimator_cov_info.append((sc, sc.covariance_, _info)) del oas, sc - elif method_ == 'shrinkage': + elif method_ == "shrinkage": sc = _ShrunkCovariance(**mp) sc.fit(data_) estimator_cov_info.append((sc, sc.covariance_, _info)) del sc - elif method_ == 'shrunk': - from sklearn.model_selection import GridSearchCV + elif method_ == "shrunk": from sklearn.covariance import ShrunkCovariance - shrinkage = mp.pop('shrinkage') - tuned_parameters = [{'shrinkage': shrinkage}] + from sklearn.model_selection import GridSearchCV + + shrinkage = mp.pop("shrinkage") + tuned_parameters = [{"shrinkage": shrinkage}] shrinkages = [] - gs = GridSearchCV(ShrunkCovariance(**mp), - tuned_parameters, cv=cv) + gs = GridSearchCV(ShrunkCovariance(**mp), tuned_parameters, cv=cv) for ch_type, picks in sub_picks_list: gs.fit(data_[:, picks]) - shrinkages.append((ch_type, gs.best_estimator_.shrinkage, - picks)) + shrinkages.append((ch_type, gs.best_estimator_.shrinkage, picks)) shrinkages = [c[0] for c in zip(shrinkages)] sc = _ShrunkCovariance(shrinkage=shrinkages, **mp) sc.fit(data_) estimator_cov_info.append((sc, sc.covariance_, _info)) del shrinkage, sc - elif method_ == 'pca': - assert orig_rank == 'full' + elif method_ == "pca": + assert orig_rank == "full" pca, _info = _auto_low_rank_model( - data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv, - stop_early=stop_early) + data_, + method_, + n_jobs=n_jobs, + method_params=mp, + cv=cv, + stop_early=stop_early, + ) pca.fit(data_) estimator_cov_info.append((pca, pca.get_covariance(), _info)) del pca - elif method_ == 'factor_analysis': - assert orig_rank == 'full' + elif method_ == "factor_analysis": + assert orig_rank == "full" fa, _info = _auto_low_rank_model( - data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv, - stop_early=stop_early) + data_, + method_, + n_jobs=n_jobs, + method_params=mp, + cv=cv, + stop_early=stop_early, + ) fa.fit(data_) estimator_cov_info.append((fa, fa.get_covariance(), _info)) del fa else: - raise ValueError('Oh no! Your estimator does not have' - ' a .fit method') - logger.info('Done.') + raise ValueError("Oh no! Your estimator does not have a .fit method") + logger.info("Done.") if len(method) > 1: - logger.info('Using cross-validation to select the best estimator.') + logger.info("Using cross-validation to select the best estimator.") out = dict() - for ei, (estimator, cov, runtime_info) in \ - enumerate(estimator_cov_info): + for ei, (estimator, cov, runtime_info) in enumerate(estimator_cov_info): if len(method) > 1: loglik = _cross_val(data, estimator, cv, n_jobs) else: @@ -1153,7 +1426,7 @@ def _compute_covariance_auto(data, method, info, method_params, cv, # project back cov = np.dot(eigvec.T, np.dot(cov, eigvec)) # undo bias - cov *= data.shape[0] / (data.shape[0] - 1) + cov *= data.shape[0] / max(data.shape[0] - 1, 1) # undo scaling _undo_scaling_cov(cov, picks_list, scalings) method_ = method[ei] @@ -1169,8 +1442,8 @@ def _gaussian_loglik_scorer(est, X, y=None): # compute empirical covariance of the test set precision = est.get_precision() n_samples, n_features = X.shape - log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1) - log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision)) + log_like = -0.5 * (X * (np.dot(X, precision))).sum(axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - _logdet(precision)) out = np.mean(log_like) return out @@ -1178,22 +1451,28 @@ def _gaussian_loglik_scorer(est, X, y=None): def _cross_val(data, est, cv, n_jobs): """Compute cross validation.""" from sklearn.model_selection import cross_val_score - return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs, - scoring=_gaussian_loglik_scorer)) + + return np.mean( + cross_val_score( + est, data, cv=cv, n_jobs=n_jobs, scoring=_gaussian_loglik_scorer + ) + ) -def _auto_low_rank_model(data, mode, n_jobs, method_params, cv, - stop_early=True, verbose=None): +def _auto_low_rank_model( + data, mode, n_jobs, method_params, cv, stop_early=True, verbose=None +): """Compute latent variable models.""" method_params = deepcopy(method_params) - iter_n_components = method_params.pop('iter_n_components') + iter_n_components = method_params.pop("iter_n_components") if iter_n_components is None: iter_n_components = np.arange(5, data.shape[1], 5) from sklearn.decomposition import PCA, FactorAnalysis - if mode == 'factor_analysis': + + if mode == "factor_analysis": est = FactorAnalysis else: - assert mode == 'pca' + assert mode == "pca" est = PCA est = est(**method_params) est.n_components = 1 @@ -1203,8 +1482,10 @@ def _auto_low_rank_model(data, mode, n_jobs, method_params, cv, # make sure we don't empty the thing if it's a generator max_n = max(list(deepcopy(iter_n_components))) if max_n > data.shape[1]: - warn('You are trying to estimate %i components on matrix ' - 'with %i features.' % (max_n, data.shape[1])) + warn( + f"You are trying to estimate {max_n} components on matrix " + f"with {data.shape[1]} features." + ) for ii, n in enumerate(iter_n_components): est.n_components = n @@ -1213,30 +1494,34 @@ def _auto_low_rank_model(data, mode, n_jobs, method_params, cv, except ValueError: score = np.inf if np.isinf(score) or score > 0: - logger.info('... infinite values encountered. stopping estimation') + logger.info("... infinite values encountered. stopping estimation") break - logger.info('... rank: %i - loglik: %0.3f' % (n, score)) + logger.info("... rank: %i - loglik: %0.3f", n, score) if score != -np.inf: scores[ii] = score - if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0) and stop_early): + if ii >= 3 and np.all(np.diff(scores[ii - 3 : ii]) < 0) and stop_early: # early stop search when loglik has been going down 3 times - logger.info('early stopping parameter search.') + logger.info("early stopping parameter search.") break # happens if rank is too low right form the beginning if np.isnan(scores).all(): - raise RuntimeError('Oh no! Could not estimate covariance because all ' - 'scores were NaN. Please contact the MNE-Python ' - 'developers.') + raise RuntimeError( + "Oh no! Could not estimate covariance because all " + "scores were NaN. Please contact the MNE-Python " + "developers." + ) i_score = np.nanargmax(scores) best = est.n_components = iter_n_components[i_score] - logger.info('... best model at rank = %i' % best) - runtime_info = {'ranks': np.array(iter_n_components), - 'scores': scores, - 'best': best, - 'cv': cv} + logger.info("... best model at rank = %i", best) + runtime_info = { + "ranks": np.array(iter_n_components), + "scores": scores, + "best": best, + "cv": cv, + } return est, runtime_info @@ -1244,14 +1529,28 @@ def _auto_low_rank_model(data, mode, n_jobs, method_params, cv, # Sklearn Estimators -class _RegCovariance(BaseEstimator): +class _RegCovariance(_EstimatorMixin): """Aux class.""" - def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, - ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, - fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1, - csd=0.1, dbs=0.1, store_precision=False, - assume_centered=False): + def __init__( + self, + info, + grad=0.1, + mag=0.1, + eeg=0.1, + seeg=0.1, + ecog=0.1, + hbo=0.1, + hbr=0.1, + fnirs_cw_amplitude=0.1, + fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, + fnirs_od=0.1, + csd=0.1, + dbs=0.1, + store_precision=False, + assume_centered=False, + ): self.info = info # For sklearn compat, these cannot (easily?) be combined into # a single dictionary @@ -1274,20 +1573,33 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, def fit(self, X): """Fit covariance model with classical diagonal regularization.""" self.estimator_ = EmpiricalCovariance( - store_precision=self.store_precision, - assume_centered=self.assume_centered) + store_precision=self.store_precision, assume_centered=self.assume_centered + ) self.covariance_ = self.estimator_.fit(X).covariance_ self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T) cov_ = Covariance( - data=self.covariance_, names=self.info['ch_names'], - bads=self.info['bads'], projs=self.info['projs'], - nfree=len(self.covariance_)) + data=self.covariance_, + names=self.info["ch_names"], + bads=self.info["bads"], + projs=self.info["projs"], + nfree=len(self.covariance_), + ) cov_ = regularize( - cov_, self.info, proj=False, exclude='bads', - grad=self.grad, mag=self.mag, eeg=self.eeg, - ecog=self.ecog, seeg=self.seeg, dbs=self.dbs, - hbo=self.hbo, hbr=self.hbr, rank='full') + cov_, + self.info, + proj=False, + exclude="bads", + grad=self.grad, + mag=self.mag, + eeg=self.eeg, + ecog=self.ecog, + seeg=self.seeg, + dbs=self.dbs, + hbo=self.hbo, + hbr=self.hbr, + rank="full", + ) self.estimator_.covariance_ = self.covariance_ = cov_.data return self @@ -1300,12 +1612,10 @@ def get_precision(self): return self.estimator_.get_precision() -class _ShrunkCovariance(BaseEstimator): +class _ShrunkCovariance(_EstimatorMixin): """Aux class.""" - def __init__(self, store_precision, assume_centered, - shrinkage=0.1): - + def __init__(self, store_precision, assume_centered, shrinkage=0.1): self.store_precision = store_precision self.assume_centered = assume_centered self.shrinkage = shrinkage @@ -1313,14 +1623,15 @@ def __init__(self, store_precision, assume_centered, def fit(self, X): """Fit covariance model with oracle shrinkage regularization.""" from sklearn.covariance import shrunk_covariance + self.estimator_ = EmpiricalCovariance( - store_precision=self.store_precision, - assume_centered=self.assume_centered) + store_precision=self.store_precision, assume_centered=self.assume_centered + ) cov = self.estimator_.fit(X).covariance_ - if not isinstance(self.shrinkage, (list, tuple)): - shrinkage = [('all', self.shrinkage, np.arange(len(cov)))] + if not isinstance(self.shrinkage, list | tuple): + shrinkage = [("all", self.shrinkage, np.arange(len(cov)))] else: shrinkage = self.shrinkage @@ -1328,7 +1639,7 @@ def fit(self, X): for a, b in itt.combinations(shrinkage, 2): picks_i, picks_j = a[2], b[2] ch_ = a[0], b[0] - if 'eeg' in ch_: + if "eeg" in ch_: zero_cross_cov[np.ix_(picks_i, picks_j)] = True zero_cross_cov[np.ix_(picks_j, picks_i)] = True @@ -1337,14 +1648,13 @@ def fit(self, X): # Apply shrinkage to blocks for ch_type, c, picks in shrinkage: sub_cov = cov[np.ix_(picks, picks)] - cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov, - shrinkage=c) + cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov, shrinkage=c) # Apply shrinkage to cross-cov for a, b in itt.combinations(shrinkage, 2): shrinkage_i, shrinkage_j = a[1], b[1] picks_i, picks_j = a[2], b[2] - c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j)) + c_ij = np.sqrt((1.0 - shrinkage_i) * (1.0 - shrinkage_j)) cov[np.ix_(picks_i, picks_j)] *= c_ij cov[np.ix_(picks_j, picks_i)] *= c_ij @@ -1358,10 +1668,11 @@ def fit(self, X): def score(self, X_test, y=None): """Delegate to modified EmpiricalCovariance instance.""" # compute empirical covariance of the test set - test_cov = empirical_covariance(X_test - self.estimator_.location_, - assume_centered=True) + test_cov = empirical_covariance( + X_test - self.estimator_.location_, assume_centered=True + ) if np.any(self.zero_cross_cov_): - test_cov[self.zero_cross_cov_] = 0. + test_cov[self.zero_cross_cov_] = 0.0 res = log_likelihood(test_cov, self.estimator_.get_precision()) return res @@ -1373,6 +1684,7 @@ def get_precision(self): ############################################################################### # Writing + @verbose def write_cov(fname, cov, *, overwrite=False, verbose=None): """Write a noise covariance matrix. @@ -1399,6 +1711,7 @@ def write_cov(fname, cov, *, overwrite=False, verbose=None): ############################################################################### # Prepare for inverse modeling + def _unpack_epochs(epochs): """Aux Function.""" if len(epochs.event_id) > 1: @@ -1418,8 +1731,10 @@ def _get_ch_whitener(A, pca, ch_type, rank): eig[:-rank] = 0.0 mask[:-rank] = False - logger.info(' Setting small %s eigenvalues to zero (%s)' - % (ch_type, 'using PCA' if pca else 'without PCA')) + logger.info( + f" Setting small {ch_type} eigenvalues to zero " + f"({'using' if pca else 'without'} PCA)" + ) if pca: # No PCA case. # This line will reduce the actual number of variables in data # and leadfield to the true rank. @@ -1428,8 +1743,16 @@ def _get_ch_whitener(A, pca, ch_type, rank): @verbose -def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, - scalings=None, on_rank_mismatch='ignore', verbose=None): +def prepare_noise_cov( + noise_cov, + info, + ch_names=None, + rank=None, + scalings=None, + on_rank_mismatch="ignore", + *, + verbose=None, +): """Prepare noise covariance matrix. Parameters @@ -1461,7 +1784,7 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, # reorder C and info to match ch_names order noise_cov_idx = list() missing = list() - ch_names = info['ch_names'] if ch_names is None else ch_names + ch_names = info["ch_names"] if ch_names is None else ch_names for c in ch_names: # this could be try/except ValueError, but it is not the preferred way if c in noise_cov.ch_names: @@ -1469,92 +1792,154 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, else: missing.append(c) if len(missing): - raise RuntimeError('Not all channels present in noise covariance:\n%s' - % missing) + raise RuntimeError(f"Not all channels present in noise covariance:\n{missing}") C = noise_cov._get_square()[np.ix_(noise_cov_idx, noise_cov_idx)] - info = pick_info(info, pick_channels(info['ch_names'], ch_names)) - projs = info['projs'] + noise_cov['projs'] + info = pick_info(info, pick_channels(info["ch_names"], ch_names, ordered=False)) + projs = info["projs"] + noise_cov["projs"] noise_cov = Covariance( - data=C, names=ch_names, bads=list(noise_cov['bads']), - projs=deepcopy(noise_cov['projs']), nfree=noise_cov['nfree'], - method=noise_cov.get('method', None), - loglik=noise_cov.get('loglik', None)) - - eig, eigvec, _ = _smart_eigh(noise_cov, info, rank, scalings, projs, - ch_names, on_rank_mismatch=on_rank_mismatch) + data=C, + names=ch_names, + bads=list(noise_cov["bads"]), + projs=deepcopy(noise_cov["projs"]), + nfree=noise_cov["nfree"], + method=noise_cov.get("method", None), + loglik=noise_cov.get("loglik", None), + ) + + eig, eigvec, _ = _smart_eigh( + noise_cov, + info, + rank, + scalings, + projs, + ch_names, + on_rank_mismatch=on_rank_mismatch, + ) noise_cov.update(eig=eig, eigvec=eigvec) return noise_cov @verbose -def _smart_eigh(C, info, rank, scalings=None, projs=None, - ch_names=None, proj_subspace=False, do_compute_rank=True, - on_rank_mismatch='ignore', verbose=None): +def _smart_eigh( + C, + info, + rank, + scalings=None, + projs=None, + ch_names=None, + proj_subspace=False, + do_compute_rank=True, + on_rank_mismatch="ignore", + *, + log_ch_type=None, + verbose=None, +): """Compute eigh of C taking into account rank and ch_type scalings.""" - scalings = _handle_default('scalings_cov_rank', scalings) - projs = info['projs'] if projs is None else projs - ch_names = info['ch_names'] if ch_names is None else ch_names - if info['ch_names'] != ch_names: - info = pick_info(info, [info['ch_names'].index(c) for c in ch_names]) - assert info['ch_names'] == ch_names + scalings = _handle_default("scalings_cov_rank", scalings) + projs = info["projs"] if projs is None else projs + ch_names = info["ch_names"] if ch_names is None else ch_names + if info["ch_names"] != ch_names: + info = pick_info(info, [info["ch_names"].index(c) for c in ch_names]) + assert info["ch_names"] == ch_names n_chan = len(ch_names) # Create the projection operator - proj, ncomp, _ = make_projector(projs, ch_names) + proj, ncomp, _ = _make_projector(projs, ch_names) if isinstance(C, Covariance): - C = C['data'] + C = C["data"] if ncomp > 0: - logger.info(' Created an SSP operator (subspace dimension = %d)' - % ncomp) + logger.info(" Created an SSP operator (subspace dimension = %d)", ncomp) C = np.dot(proj, np.dot(C, proj.T)) noise_cov = Covariance(C, ch_names, [], projs, 0) if do_compute_rank: # if necessary - rank = compute_rank( - noise_cov, rank, scalings, info, on_rank_mismatch=on_rank_mismatch) + rank = _compute_rank( + noise_cov, + rank, + scalings, + info, + on_rank_mismatch=on_rank_mismatch, + log_ch_type=log_ch_type, + ) assert C.ndim == 2 and C.shape[0] == C.shape[1] # time saving short-circuit if proj_subspace and sum(rank.values()) == C.shape[0]: return np.ones(n_chan), np.eye(n_chan), np.ones(n_chan, bool) - dtype = complex if C.dtype == np.complex_ else float + dtype = complex if C.dtype == np.complex128 else float eig = np.zeros(n_chan, dtype) eigvec = np.zeros((n_chan, n_chan), dtype) mask = np.zeros(n_chan, bool) - for ch_type, picks in _picks_by_type(info, meg_combined=True, - ref_meg=False, exclude=[]): + for ch_type, picks in _picks_by_type( + info, meg_combined=True, ref_meg=False, exclude=[] + ): if len(picks) == 0: continue this_C = C[np.ix_(picks, picks)] - if ch_type not in rank and ch_type in ('mag', 'grad'): - this_rank = rank['meg'] # if there is only one or the other + if ch_type not in rank and ch_type in ("mag", "grad"): + this_rank = rank["meg"] # if there is only one or the other else: this_rank = rank[ch_type] - e, ev, m = _get_ch_whitener(this_C, False, ch_type.upper(), this_rank) + if log_ch_type is not None: + ch_type_ = log_ch_type + else: + ch_type_ = ch_type.upper() + e, ev, m = _get_ch_whitener(this_C, False, ch_type_, this_rank) if proj_subspace: # Choose the subspace the same way we do for projections e, ev = _eigvec_subspace(e, ev, m) eig[picks], eigvec[np.ix_(picks, picks)], mask[picks] = e, ev, m + largest, smallest = e[-1], e[m][0] + if largest > 1e10 * smallest: + warn( + f"The largest eigenvalue of the {len(picks)}-channel {ch_type} " + f"covariance (rank={this_rank}) is over 10 orders of magnitude " + f"larger than the smallest ({largest:0.3g} > 1e10 * {smallest:0.3g}), " + "the resulting whitener will likely be unstable" + ) + # XXX : also handle ref for sEEG and ECoG - if ch_type == 'eeg' and _needs_eeg_average_ref_proj(info) and not \ - _has_eeg_average_ref_proj(info, projs=projs): - warn('No average EEG reference present in info["projs"], ' - 'covariance may be adversely affected. Consider recomputing ' - 'covariance using with an average eeg reference projector ' - 'added.') + if ( + ch_type == "eeg" + and _needs_eeg_average_ref_proj(info) + and not _has_eeg_average_ref_proj(info, projs=projs) + ): + warn( + 'No average EEG reference present in info["projs"], ' + "covariance may be adversely affected. Consider recomputing " + "covariance using with an average eeg reference projector " + "added." + ) return eig, eigvec, mask @verbose -def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', - proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, - fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, - fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, dbs=0.1, - rank=None, scalings=None, verbose=None): +def regularize( + cov, + info, + mag=0.1, + grad=0.1, + eeg=0.1, + exclude="bads", + proj=True, + seeg=0.1, + ecog=0.1, + hbo=0.1, + hbr=0.1, + fnirs_cw_amplitude=0.1, + fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, + fnirs_od=0.1, + csd=0.1, + dbs=0.1, + rank=None, + scalings=None, + verbose=None, +): """Regularize noise covariance matrix. This method works by adding a constant to the diagonal for each @@ -1564,9 +1949,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', .. note:: This function is kept for reasons of backward-compatibility. Please consider explicitly using the ``method`` parameter in :func:`mne.compute_covariance` to directly combine estimation - with regularization in a data-driven fashion. See the `faq - `_ - for more information. + with regularization in a data-driven fashion. See the + :ref:`FAQ ` for more information. Parameters ---------- @@ -1627,45 +2011,61 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', -------- mne.compute_covariance """ # noqa: E501 - from scipy import linalg cov = cov.copy() info._check_consistency() - scalings = _handle_default('scalings_cov_rank', scalings) - regs = dict(eeg=eeg, seeg=seeg, dbs=dbs, ecog=ecog, hbo=hbo, hbr=hbr, - fnirs_cw_amplitude=fnirs_cw_amplitude, - fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, - fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) + scalings = _handle_default("scalings_cov_rank", scalings) + regs = dict( + eeg=eeg, + seeg=seeg, + dbs=dbs, + ecog=ecog, + hbo=hbo, + hbr=hbr, + fnirs_cw_amplitude=fnirs_cw_amplitude, + fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, + fnirs_fd_phase=fnirs_fd_phase, + fnirs_od=fnirs_od, + csd=csd, + ) if exclude is None: raise ValueError('exclude must be a list of strings or "bads"') - if exclude == 'bads': - exclude = info['bads'] + cov['bads'] + if exclude == "bads": + exclude = info["bads"] + cov["bads"] picks_dict = {ch_type: [] for ch_type in _DATA_CH_TYPES_SPLIT} - meg_combined = 'auto' if rank != 'full' else False - picks_dict.update(dict(_picks_by_type( - info, meg_combined=meg_combined, exclude=exclude, ref_meg=False))) - if len(picks_dict.get('meg', [])) > 0 and rank != 'full': # combined + meg_combined = "auto" if rank != "full" else False + picks_dict.update( + dict( + _picks_by_type( + info, meg_combined=meg_combined, exclude=exclude, ref_meg=False + ) + ) + ) + if len(picks_dict.get("meg", [])) > 0 and rank != "full": # combined if mag != grad: - raise ValueError('On data where magnetometers and gradiometers ' - 'are dependent (e.g., SSSed data), mag (%s) must ' - 'equal grad (%s)' % (mag, grad)) - logger.info('Regularizing MEG channels jointly') - regs['meg'] = mag + raise ValueError( + "On data where magnetometers and gradiometers are dependent (e.g., " + f"SSSed data), mag ({mag}) must equal grad ({grad})" + ) + logger.info("Regularizing MEG channels jointly") + regs["meg"] = mag else: regs.update(mag=mag, grad=grad) - if rank != 'full': - rank = compute_rank(cov, rank, scalings, info) + if rank != "full": + rank = _compute_rank(cov, rank, scalings, info) - info_ch_names = info['ch_names'] + info_ch_names = info["ch_names"] ch_names_by_type = dict() for ch_type, picks_type in picks_dict.items(): ch_names_by_type[ch_type] = [info_ch_names[i] for i in picks_type] # This actually removes bad channels from the cov, which is not backward # compatible, so let's leave all channels in - cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude) + cov_good = pick_channels_cov( + cov, include=info_ch_names, exclude=exclude, ordered=False + ) ch_names = cov_good.ch_names # Now get the indices for each channel type in the cov @@ -1676,15 +2076,15 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', idx_cov[ch_type].append(i) break else: - raise Exception('channel %s is unknown type' % ch) + raise Exception(f"channel {ch} is unknown type") - C = cov_good['data'] + C = cov_good["data"] assert len(C) == sum(map(len, idx_cov.values())) if proj: - projs = info['projs'] + cov_good['projs'] - projs = activate_proj(projs) + projs = info["projs"] + cov_good["projs"] + projs = _activate_proj(projs) for ch_type in idx_cov: desc = ch_type.upper() @@ -1693,23 +2093,24 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', continue reg = regs[ch_type] if reg == 0.0: - logger.info(" %s regularization : None" % desc) + logger.info(f" {desc} regularization : None") continue - logger.info(" %s regularization : %s" % (desc, reg)) + logger.info(f" {desc} regularization : {reg}") this_C = C[np.ix_(idx, idx)] U = np.eye(this_C.shape[0]) this_ch_names = [ch_names[k] for k in idx] - if rank == 'full': + if rank == "full": if proj: - P, ncomp, _ = make_projector(projs, this_ch_names) + P, ncomp, _ = _make_projector(projs, this_ch_names) if ncomp > 0: # This adjustment ends up being redundant if rank is None: - U = linalg.svd(P)[0][:, :-ncomp] - logger.info(' Created an SSP operator for %s ' - '(dimension = %d)' % (desc, ncomp)) + U = _safe_svd(P)[0][:, :-ncomp] + logger.info( + f" Created an SSP operator for {desc} (dimension = {ncomp})" + ) else: - this_picks = pick_channels(info['ch_names'], this_ch_names) + this_picks = pick_channels(info["ch_names"], this_ch_names) this_info = pick_info(info, this_picks) # Here we could use proj_subspace=True, but this should not matter # since this is already in a loop over channel types @@ -1718,19 +2119,33 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', this_C = np.dot(U.T, np.dot(this_C, U)) sigma = np.mean(np.diag(this_C)) - this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace + this_C.flat[:: len(this_C) + 1] += reg * sigma # modify diag inplace this_C = np.dot(U, np.dot(this_C, U.T)) C[np.ix_(idx, idx)] = this_C # Put data back in correct locations - idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude) - cov['data'][np.ix_(idx, idx)] = C + idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude, ordered=False) + cov["data"][np.ix_(idx, idx)] = C return cov -def _regularized_covariance(data, reg=None, method_params=None, info=None, - rank=None): +@verbose +def _regularized_covariance( + data, + reg=None, + method_params=None, + info=None, + rank=None, + *, + log_ch_type=None, + log_rank=None, + cov_kind="", + # backward-compat default for decoding (maybe someday we want to expose this but + # it's likely too invasive and since it's usually regularized, unnecessary): + on_few_samples="ignore", + verbose=None, +): """Compute a regularized covariance from data using sklearn. This is a convenience wrapper for mne.decoding functions, which @@ -1741,36 +2156,59 @@ def _regularized_covariance(data, reg=None, method_params=None, info=None, cov : ndarray, shape (n_channels, n_channels) The covariance matrix. """ - _validate_type(reg, (str, 'numeric', None)) + _validate_type(reg, (str, "numeric", None)) if reg is None: - reg = 'empirical' + reg = "empirical" elif not isinstance(reg, str): reg = float(reg) if method_params is not None: - raise ValueError('If reg is a float, method_params must be None ' - '(got %s)' % (type(method_params),)) - method_params = dict(shrinkage=dict( - shrinkage=reg, assume_centered=True, store_precision=False)) - reg = 'shrinkage' + raise ValueError( + "If reg is a float, method_params must be None (got " + f"{type(method_params)})" + ) + method_params = dict( + shrinkage=dict(shrinkage=reg, assume_centered=True, store_precision=False) + ) + reg = "shrinkage" method, method_params = _check_method_params( - reg, method_params, name='reg', allow_auto=False, rank=rank) + reg, method_params, name="reg", allow_auto=False, rank=rank + ) # use mag instead of eeg here to avoid the cov EEG projection warning - info = create_info(data.shape[-2], 1000., 'mag') if info is None else info + info = create_info(data.shape[-2], 1000.0, "mag") if info is None else info picks_list = _picks_by_type(info) - scalings = _handle_default('scalings_cov_rank', None) + scalings = _handle_default("scalings_cov_rank", None) cov = _compute_covariance_auto( - data.T, method=method, method_params=method_params, - info=info, cv=None, n_jobs=None, stop_early=True, - picks_list=picks_list, scalings=scalings, - rank=rank)[reg]['data'] + data.T, + method=method, + method_params=method_params, + info=info, + cv=None, + n_jobs=None, + stop_early=True, + picks_list=picks_list, + scalings=scalings, + rank=rank, + cov_kind=cov_kind, + log_ch_type=log_ch_type, + log_rank=log_rank, + on_few_samples=on_few_samples, + )[reg]["data"] return cov @verbose -def compute_whitener(noise_cov, info=None, picks=None, rank=None, - scalings=None, return_rank=False, pca=False, - return_colorer=False, on_rank_mismatch='warn', - verbose=None): +def compute_whitener( + noise_cov, + info=None, + picks=None, + rank=None, + scalings=None, + return_rank=False, + pca=False, + return_colorer=False, + on_rank_mismatch="warn", + verbose=None, +): """Compute whitening matrix. Parameters @@ -1821,53 +2259,57 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, colorer : ndarray, shape (n_channels, n_channels) or (n_channels, n_nonzero) The coloring matrix. """ # noqa: E501 - _validate_type(pca, (str, bool), 'space') - _valid_pcas = (True, 'white', False) + _validate_type(pca, (str, bool), "space") + _valid_pcas = (True, "white", False) if pca not in _valid_pcas: - raise ValueError('space must be one of %s, got %s' - % (_valid_pcas, pca)) + raise ValueError(f"space must be one of {_valid_pcas}, got {pca}") if info is None: - if 'eig' not in noise_cov: - raise ValueError('info can only be None if the noise cov has ' - 'already been prepared with prepare_noise_cov') - ch_names = deepcopy(noise_cov['names']) + if "eig" not in noise_cov: + raise ValueError( + "info can only be None if the noise cov has already been prepared with " + "prepare_noise_cov" + ) + ch_names = deepcopy(noise_cov["names"]) else: picks = _picks_to_idx(info, picks, with_ref_meg=False) - ch_names = [info['ch_names'][k] for k in picks] + ch_names = [info["ch_names"][k] for k in picks] del picks noise_cov = prepare_noise_cov( - noise_cov, info, ch_names, rank, scalings, - on_rank_mismatch=on_rank_mismatch) + noise_cov, info, ch_names, rank, scalings, on_rank_mismatch=on_rank_mismatch + ) n_chan = len(ch_names) - assert n_chan == len(noise_cov['eig']) + assert n_chan == len(noise_cov["eig"]) # Omit the zeroes due to projection - eig = noise_cov['eig'].copy() - nzero = (eig > 0) - eig[~nzero] = 0. # get rid of numerical noise (negative) ones + eig = noise_cov["eig"].copy() + nzero = eig > 0 + eig[~nzero] = 0.0 # get rid of numerical noise (negative) ones - if noise_cov['eigvec'].dtype.kind == 'c': + if noise_cov["eigvec"].dtype.kind == "c": dtype = np.complex128 else: dtype = np.float64 W = np.zeros((n_chan, 1), dtype) W[nzero, 0] = 1.0 / np.sqrt(eig[nzero]) # Rows of eigvec are the eigenvectors - W = W * noise_cov['eigvec'] # C ** -0.5 - C = np.sqrt(eig) * noise_cov['eigvec'].conj().T # C ** 0.5 + W = W * noise_cov["eigvec"] # C ** -0.5 + C = np.sqrt(eig) * noise_cov["eigvec"].conj().T # C ** 0.5 n_nzero = nzero.sum() - logger.info(' Created the whitener using a noise covariance matrix ' - 'with rank %d (%d small eigenvalues omitted)' - % (n_nzero, noise_cov['dim'] - n_nzero)) + logger.info( + " Created the whitener using a noise covariance matrix " + "with rank %d (%d small eigenvalues omitted)", + n_nzero, + noise_cov["dim"] - n_nzero, + ) # Do the requested projection if pca is True: W = W[nzero] C = C[:, nzero] elif pca is False: - W = np.dot(noise_cov['eigvec'].conj().T, W) - C = np.dot(C, noise_cov['eigvec']) + W = np.dot(noise_cov["eigvec"].conj().T, W) + C = np.dot(C, noise_cov["eigvec"]) # Triage return out = W, ch_names @@ -1879,8 +2321,9 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, @verbose -def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None, - scalings=None, verbose=None): +def whiten_evoked( + evoked, noise_cov, picks=None, diag=None, rank=None, scalings=None, verbose=None +): """Whiten evoked data using given noise covariance. Parameters @@ -1916,8 +2359,9 @@ def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None, if diag: noise_cov = noise_cov.as_diag() - W, _ = compute_whitener(noise_cov, evoked.info, picks=picks, - rank=rank, scalings=scalings) + W, _ = compute_whitener( + noise_cov, evoked.info, picks=picks, rank=rank, scalings=scalings + ) evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks]) return evoked @@ -1927,29 +2371,30 @@ def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None, def _read_cov(fid, node, cov_kind, limited=False, verbose=None): """Read a noise covariance matrix.""" # Find all covariance matrices - from scipy import sparse + from ._fiff.write import _safe_name_list + covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV) if len(covs) == 0: - raise ValueError('No covariance matrices found') + raise ValueError("No covariance matrices found") # Is any of the covariance matrices a noise covariance for p in range(len(covs)): tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND) - if tag is not None and int(tag.data) == cov_kind: + if tag is not None and int(tag.data.item()) == cov_kind: this = covs[p] # Find all the necessary data tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM) if tag is None: - raise ValueError('Covariance matrix dimension not found') - dim = int(tag.data) + raise ValueError("Covariance matrix dimension not found") + dim = int(tag.data.item()) tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE) if tag is None: nfree = -1 else: - nfree = int(tag.data) + nfree = int(tag.data.item()) tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD) if tag is None: @@ -1967,39 +2412,52 @@ def _read_cov(fid, node, cov_kind, limited=False, verbose=None): if tag is None: names = [] else: - names = _safe_name_list(tag.data, 'read', 'names') + names = _safe_name_list(tag.data, "read", "names") if len(names) != dim: - raise ValueError('Number of names does not match ' - 'covariance matrix dimension') + raise ValueError( + "Number of names does not match covariance matrix dimension" + ) tag = find_tag(fid, this, FIFF.FIFF_MNE_COV) if tag is None: tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG) if tag is None: - raise ValueError('No covariance matrix data found') + raise ValueError("No covariance matrix data found") else: # Diagonal is stored data = tag.data diag = True - logger.info(' %d x %d diagonal covariance (kind = ' - '%d) found.' % (dim, dim, cov_kind)) + logger.info( + " %d x %d diagonal covariance (kind = %d) found.", + dim, + dim, + cov_kind, + ) else: - if not sparse.issparse(tag.data): + if not issparse(tag.data): # Lower diagonal is stored vals = tag.data data = np.zeros((dim, dim)) data[np.tril(np.ones((dim, dim))) > 0] = vals data = data + data.T - data.flat[::dim + 1] /= 2.0 + data.flat[:: dim + 1] /= 2.0 diag = False - logger.info(' %d x %d full covariance (kind = %d) ' - 'found.' % (dim, dim, cov_kind)) + logger.info( + " %d x %d full covariance (kind = %d) found.", + dim, + dim, + cov_kind, + ) else: diag = False data = tag.data - logger.info(' %d x %d sparse covariance (kind = %d)' - ' found.' % (dim, dim, cov_kind)) + logger.info( + " %d x %d sparse covariance (kind = %d) found.", + dim, + dim, + cov_kind, + ) # Read the possibly precomputed decomposition tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES) @@ -2020,78 +2478,96 @@ def _read_cov(fid, node, cov_kind, limited=False, verbose=None): # Put it together assert dim == len(data) assert data.ndim == (1 if diag else 2) - cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names, - data=data, projs=projs, bads=bads, nfree=nfree, eig=eig, - eigvec=eigvec) + cov = dict( + kind=cov_kind, + diag=diag, + dim=dim, + names=names, + data=data, + projs=projs, + bads=bads, + nfree=nfree, + eig=eig, + eigvec=eigvec, + ) if score is not None: - cov['loglik'] = score + cov["loglik"] = score if method is not None: - cov['method'] = method + cov["method"] = method if limited: - del cov['kind'], cov['dim'], cov['diag'] + del cov["kind"], cov["dim"], cov["diag"] return cov - logger.info(' Did not find the desired covariance matrix (kind = %d)' - % cov_kind) + logger.info(" Did not find the desired covariance matrix (kind = %d)", cov_kind) return None def _write_cov(fid, cov): """Write a noise covariance matrix.""" + from ._fiff.write import ( + end_block, + start_block, + write_double, + write_float_matrix, + write_int, + write_name_list_sanitized, + write_string, + ) + start_block(fid, FIFF.FIFFB_MNE_COV) # Dimensions etc. - write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind']) - write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim']) - if cov['nfree'] > 0: - write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree']) + write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov["kind"]) + write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov["dim"]) + if cov["nfree"] > 0: + write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov["nfree"]) # Channel names - if cov['names'] is not None and len(cov['names']) > 0: + if cov["names"] is not None and len(cov["names"]) > 0: write_name_list_sanitized( - fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'], 'cov["names"]') + fid, FIFF.FIFF_MNE_ROW_NAMES, cov["names"], 'cov["names"]' + ) # Data - if cov['diag']: - write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data']) + if cov["diag"]: + write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov["data"]) else: # Store only lower part of covariance matrix - dim = cov['dim'] + dim = cov["dim"] mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0 - vals = cov['data'][mask].ravel() + vals = cov["data"][mask].ravel() write_double(fid, FIFF.FIFF_MNE_COV, vals) # Eigenvalues and vectors if present - if cov['eig'] is not None and cov['eigvec'] is not None: - write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec']) - write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig']) + if cov["eig"] is not None and cov["eigvec"] is not None: + write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov["eigvec"]) + write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov["eig"]) # Projection operator - if cov['projs'] is not None and len(cov['projs']) > 0: - _write_proj(fid, cov['projs']) + if cov["projs"] is not None and len(cov["projs"]) > 0: + _write_proj(fid, cov["projs"]) # Bad channels - _write_bad_channels(fid, cov['bads'], None) + _write_bad_channels(fid, cov["bads"], None) # estimator method - if 'method' in cov: - write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method']) + if "method" in cov: + write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov["method"]) # negative log-likelihood score - if 'loglik' in cov: - write_double( - fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik'])) + if "loglik" in cov: + write_double(fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov["loglik"])) # Done! end_block(fid, FIFF.FIFFB_MNE_COV) @verbose -def _ensure_cov(cov, name='cov', *, verbose=None): - _validate_type(cov, ('path-like', Covariance), name) - logger.info('Noise covariance : %s' % (cov,)) +def _ensure_cov(cov, name="cov", *, verbose=None): + _validate_type(cov, ("path-like", Covariance), name) + logger.info(f"Noise covariance : {cov}") if not isinstance(cov, Covariance): cov = read_cov(cov, verbose=_verbose_safe_false()) return cov diff --git a/mne/cuda.py b/mne/cuda.py index 15a2be2bab7..ce46207563d 100644 --- a/mne/cuda.py +++ b/mne/cuda.py @@ -1,17 +1,25 @@ -# Authors: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np - -from .utils import (sizeof_fmt, logger, get_config, warn, _explain_exception, - verbose, fill_doc, _check_option) - +from scipy.fft import irfft, rfft + +from .utils import ( + _check_option, + _explain_exception, + fill_doc, + get_config, + logger, + sizeof_fmt, + verbose, + warn, +) _cuda_capable = False -def get_cuda_memory(kind='available'): +def get_cuda_memory(kind="available"): """Get the amount of free memory for CUDA operations. Parameters @@ -25,10 +33,11 @@ def get_cuda_memory(kind='available'): The amount of available or total memory as a human-readable string. """ if not _cuda_capable: - warn('CUDA not enabled, returning zero for memory') + warn("CUDA not enabled, returning zero for memory") mem = 0 else: import cupy + mem = cupy.cuda.runtime.memGetInfo()[dict(available=0, total=1)[kind]] return sizeof_fmt(mem) @@ -55,29 +64,30 @@ def init_cuda(ignore_config=False, verbose=None): global _cuda_capable if _cuda_capable: return - if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() != - 'true'): - logger.info('CUDA not enabled in config, skipping initialization') + if not ignore_config and (get_config("MNE_USE_CUDA", "false").lower() != "true"): + logger.info("CUDA not enabled in config, skipping initialization") return # Triage possible errors for informative messaging _cuda_capable = False try: import cupy # noqa except ImportError: - warn('module cupy not found, CUDA not enabled') + warn("module cupy not found, CUDA not enabled") return - device_id = int(get_config('MNE_CUDA_DEVICE', '0')) + device_id = int(get_config("MNE_CUDA_DEVICE", "0")) try: # Initialize CUDA _set_cuda_device(device_id, verbose) except Exception: - warn('so CUDA device could be initialized, likely a hardware error, ' - 'CUDA not enabled%s' % _explain_exception()) + warn( + "so CUDA device could be initialized, likely a hardware error, " + f"CUDA not enabled{_explain_exception()}" + ) return _cuda_capable = True # Figure out limit for CUDA FFT calculations - logger.info('Enabling CUDA with %s available memory' % get_cuda_memory()) + logger.info(f"Enabling CUDA with {get_cuda_memory()} available memory") @verbose @@ -92,28 +102,31 @@ def set_cuda_device(device_id, verbose=None): """ if _cuda_capable: _set_cuda_device(device_id, verbose) - elif get_config('MNE_USE_CUDA', 'false').lower() == 'true': + elif get_config("MNE_USE_CUDA", "false").lower() == "true": init_cuda() _set_cuda_device(device_id, verbose) else: - warn('Could not set CUDA device because CUDA is not enabled; either ' - 'run mne.cuda.init_cuda() first, or set the MNE_USE_CUDA config ' - 'variable to "true".') + warn( + "Could not set CUDA device because CUDA is not enabled; either " + "run mne.cuda.init_cuda() first, or set the MNE_USE_CUDA config " + 'variable to "true".' + ) @verbose def _set_cuda_device(device_id, verbose=None): """Set the CUDA device.""" import cupy + cupy.cuda.Device(device_id).use() - logger.info('Now using CUDA device {}'.format(device_id)) + logger.info(f"Now using CUDA device {device_id}") ############################################################################### # Repeated FFT multiplication -def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft, - kind='FFT FIR filtering'): + +def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft, kind="FFT FIR filtering"): """Set up repeated CUDA FFT multiplication with a given filter. Parameters @@ -153,29 +166,29 @@ def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft, ----- This function is designed to be used with fft_multiply_repeated(). """ - from scipy.fft import rfft, irfft - cuda_dict = dict(n_fft=n_fft, rfft=rfft, irfft=irfft, - h_fft=rfft(h, n=n_fft)) + cuda_dict = dict(n_fft=n_fft, rfft=rfft, irfft=irfft, h_fft=rfft(h, n=n_fft)) if isinstance(n_jobs, str): - _check_option('n_jobs', n_jobs, ('cuda',)) + _check_option("n_jobs", n_jobs, ("cuda",)) n_jobs = 1 init_cuda() if _cuda_capable: import cupy + try: # do the IFFT normalization now so we don't have to later - h_fft = cupy.array(cuda_dict['h_fft']) - logger.info('Using CUDA for %s' % kind) + h_fft = cupy.array(cuda_dict["h_fft"]) + logger.info(f"Using CUDA for {kind}") except Exception as exp: - logger.info('CUDA not used, could not instantiate memory ' - '(arrays may be too large: "%s"), falling back to ' - 'n_jobs=None' % str(exp)) - cuda_dict.update(h_fft=h_fft, - rfft=_cuda_upload_rfft, - irfft=_cuda_irfft_get) + logger.info( + "CUDA not used, could not instantiate memory (arrays may be too " + f'large: "{exp}"), falling back to n_jobs=None' + ) + cuda_dict.update(h_fft=h_fft, rfft=_cuda_upload_rfft, irfft=_cuda_irfft_get) else: - logger.info('CUDA not used, CUDA could not be initialized, ' - 'falling back to n_jobs=None') + logger.info( + "CUDA not used, CUDA could not be initialized, " + "falling back to n_jobs=None" + ) return n_jobs, cuda_dict @@ -199,15 +212,16 @@ def _fft_multiply_repeated(x, cuda_dict): Filtered version of x. """ # do the fourier-domain operations - x_fft = cuda_dict['rfft'](x, cuda_dict['n_fft']) - x_fft *= cuda_dict['h_fft'] - x = cuda_dict['irfft'](x_fft, cuda_dict['n_fft']) + x_fft = cuda_dict["rfft"](x, cuda_dict["n_fft"]) + x_fft *= cuda_dict["h_fft"] + x = cuda_dict["irfft"](x_fft, cuda_dict["n_fft"]) return x ############################################################################### # FFT Resampling + def _setup_cuda_fft_resample(n_jobs, W, new_len): """Set up CUDA FFT resampling. @@ -247,53 +261,58 @@ def _setup_cuda_fft_resample(n_jobs, W, new_len): ----- This function is designed to be used with fft_resample(). """ - from scipy.fft import rfft, irfft cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft) rfft_len_x = len(W) // 2 + 1 # fold the window onto inself (should be symmetric) and truncate W = W.copy() - W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][:rfft_len_x - 1]) / 2. + W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][: rfft_len_x - 1]) / 2.0 W = W[:rfft_len_x] if isinstance(n_jobs, str): - _check_option('n_jobs', n_jobs, ('cuda',)) + _check_option("n_jobs", n_jobs, ("cuda",)) n_jobs = 1 init_cuda() if _cuda_capable: try: import cupy + # do the IFFT normalization now so we don't have to later W = cupy.array(W) - logger.info('Using CUDA for FFT resampling') + logger.info("Using CUDA for FFT resampling") except Exception: - logger.info('CUDA not used, could not instantiate memory ' - '(arrays may be too large), falling back to ' - 'n_jobs=None') + logger.info( + "CUDA not used, could not instantiate memory " + "(arrays may be too large), falling back to " + "n_jobs=None" + ) else: - cuda_dict.update(use_cuda=True, - rfft=_cuda_upload_rfft, - irfft=_cuda_irfft_get) + cuda_dict.update( + use_cuda=True, rfft=_cuda_upload_rfft, irfft=_cuda_irfft_get + ) else: - logger.info('CUDA not used, CUDA could not be initialized, ' - 'falling back to n_jobs=None') - cuda_dict['W'] = W + logger.info( + "CUDA not used, CUDA could not be initialized, " + "falling back to n_jobs=None" + ) + cuda_dict["W"] = W return n_jobs, cuda_dict def _cuda_upload_rfft(x, n, axis=-1): """Upload and compute rfft.""" import cupy + return cupy.fft.rfft(cupy.array(x), n=n, axis=axis) def _cuda_irfft_get(x, n, axis=-1): """Compute irfft and get.""" import cupy + return cupy.fft.irfft(x, n=n, axis=axis).get() @fill_doc -def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, - pad='reflect_limited'): +def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, pad="reflect_limited"): """Do FFT resampling with a filter function (possibly using CUDA). Parameters @@ -309,7 +328,7 @@ def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, Number of samples to remove after resampling. cuda_dict : dict Dictionary constructed using setup_cuda_multiply_repeated(). - %(pad)s + %(pad_resample)s The default is ``'reflect_limited'``. .. versionadded:: 0.15 @@ -327,16 +346,16 @@ def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, old_len = len(x) shorter = new_len < old_len use_len = new_len if shorter else old_len - x_fft = cuda_dict['rfft'](x, None) + x_fft = cuda_dict["rfft"](x, None) if use_len % 2 == 0: nyq = use_len // 2 - x_fft[nyq:nyq + 1] *= 2 if shorter else 0.5 - x_fft *= cuda_dict['W'] - y = cuda_dict['irfft'](x_fft, new_len) + x_fft[nyq : nyq + 1] *= 2 if shorter else 0.5 + x_fft *= cuda_dict["W"] + y = cuda_dict["irfft"](x_fft, new_len) # now let's trim it back to the correct size (if there was padding) if (to_removes > 0).any(): - y = y[to_removes[0]:y.shape[0] - to_removes[1]] + y = y[to_removes[0] : y.shape[0] - to_removes[1]] return y @@ -344,20 +363,31 @@ def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, ############################################################################### # Misc + # this has to go in mne.cuda instead of mne.filter to avoid import errors -def _smart_pad(x, n_pad, pad='reflect_limited'): +def _smart_pad(x, n_pad, pad="reflect_limited"): """Pad vector x.""" n_pad = np.asarray(n_pad) assert n_pad.shape == (2,) if (n_pad == 0).all(): return x elif (n_pad < 0).any(): - raise RuntimeError('n_pad must be non-negative') - if pad == 'reflect_limited': - # need to pad with zeros if len(x) <= npad + raise RuntimeError("n_pad must be non-negative") + if pad == "reflect_limited": l_z_pad = np.zeros(max(n_pad[0] - len(x) + 1, 0), dtype=x.dtype) r_z_pad = np.zeros(max(n_pad[1] - len(x) + 1, 0), dtype=x.dtype) - return np.concatenate([l_z_pad, 2 * x[0] - x[n_pad[0]:0:-1], x, - 2 * x[-1] - x[-2:-n_pad[1] - 2:-1], r_z_pad]) + out = np.concatenate( + [ + l_z_pad, + 2 * x[0] - x[n_pad[0] : 0 : -1], + x, + 2 * x[-1] - x[-2 : -n_pad[1] - 2 : -1], + r_z_pad, + ] + ) else: - return np.pad(x, (tuple(n_pad),), pad) + kwargs = dict() + if pad == "reflect": + kwargs["reflect_type"] = "odd" + out = np.pad(x, (tuple(n_pad),), pad, **kwargs) + return out diff --git a/mne/data/__init__.py b/mne/data/__init__.py index 6f92b469cf7..a48c2d63753 100644 --- a/mne/data/__init__.py +++ b/mne/data/__init__.py @@ -1 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """MNE-Python data.""" diff --git a/mne/data/helmets/Kernel_Flux.fif.gz b/mne/data/helmets/Kernel_Flux.fif.gz new file mode 100644 index 00000000000..66504d7b4a9 Binary files /dev/null and b/mne/data/helmets/Kernel_Flux.fif.gz differ diff --git a/mne/data/helmets/Kernel_Flux_ch_pos.txt b/mne/data/helmets/Kernel_Flux_ch_pos.txt new file mode 100644 index 00000000000..8f9807c9e03 --- /dev/null +++ b/mne/data/helmets/Kernel_Flux_ch_pos.txt @@ -0,0 +1,202 @@ +{ + "MA1": [ + -0.040249, + 0.092195, + 0.024061 + ], + "MA2": [ + 0.0, + 0.10462, + 0.016906 + ], + "MA3": [ + 0.040249, + 0.092195, + 0.024061 + ], + "MA4": [ + 0.0, + 0.083089, + 0.053953 + ], + "MB1": [ + -0.040251, + 0.063041, + 0.05933 + ], + "MB2": [ + 0.0, + 0.045194, + 0.077115 + ], + "MB3": [ + 0.040251, + 0.063041, + 0.05933 + ], + "MB4": [ + -0.036861, + 0.026016, + 0.076163 + ], + "MB5": [ + 0.036861, + 0.026016, + 0.076163 + ], + "MC1": [ + -0.035716, + -0.017727, + 0.077709 + ], + "MC2": [ + 0.0, + 0.00196, + 0.085517 + ], + "MC3": [ + 0.035716, + -0.017727, + 0.077709 + ], + "MC4": [ + -0.034502, + -0.056871, + 0.064752 + ], + "MC5": [ + 0.0, + -0.037943, + 0.080668 + ], + "MC6": [ + 0.034502, + -0.056871, + 0.064752 + ], + "MD1": [ + -0.036596, + -0.084035, + 0.027244 + ], + "MD2": [ + 0, + -0.082423, + 0.048749 + ], + "MD3": [ + 0.036596, + -0.084035, + 0.027244 + ], + "MD4": [ + 0, + -0.095211, + 0.008834 + ], + "ME1": [ + -0.038329, + -0.084588, + -0.017313 + ], + "ME2": [ + 0.0, + -0.086368, + -0.035705 + ], + "ME3": [ + 0.038329, + -0.084588, + -0.017313 + ], + "ME4": [ + -0.028545, + -0.071814, + -0.06008 + ], + "ME5": [ + 0.028545, + -0.071814, + -0.06008 + ], + "RA1": [ + 0.06916, + 0.059735, + 0.008274 + ], + "RA2": [ + 0.070053, + 0.032925, + 0.039006 + ], + "RB1": [ + 0.069394, + -0.009905, + 0.045791 + ], + "RB2": [ + 0.06596, + -0.048253, + 0.029541 + ], + "RC1": [ + 0.07757, + 0.023536, + -0.014638 + ], + "RC2": [ + 0.077612, + -0.013307, + 0.002181 + ], + "RC3": [ + 0.068927, + -0.049354, + -0.013998 + ], + "RC4": [ + 0.06444, + -0.040455, + -0.053382 + ], + "LA1": [ + -0.06916, + 0.059735, + 0.008274 + ], + "LA2": [ + -0.070053, + 0.032925, + 0.039006 + ], + "LB1": [ + -0.069394, + -0.009905, + 0.045791 + ], + "LB2": [ + -0.06596, + -0.048253, + 0.029541 + ], + "LC1": [ + -0.07757, + 0.023536, + -0.014638 + ], + "LC2": [ + -0.077612, + -0.013307, + 0.002181 + ], + "LC3": [ + -0.068927, + -0.049354, + -0.013998 + ], + "LC4": [ + -0.06444, + -0.040455, + -0.053382 + ] +} \ No newline at end of file diff --git a/mne/data/image/custom_layout.lout b/mne/data/image/custom_layout.lout deleted file mode 100644 index ab5b81408cb..00000000000 --- a/mne/data/image/custom_layout.lout +++ /dev/null @@ -1,257 +0,0 @@ - 0.00 0.00 0.01 0.02 -000 0.79 0.46 0.07 0.05 0 -001 0.78 0.48 0.07 0.05 1 -002 0.76 0.51 0.07 0.05 2 -003 0.74 0.53 0.07 0.05 3 -004 0.72 0.55 0.07 0.05 4 -005 0.71 0.57 0.07 0.05 5 -006 0.69 0.59 0.07 0.05 6 -007 0.67 0.62 0.07 0.05 7 -008 0.66 0.64 0.07 0.05 8 -009 0.64 0.66 0.07 0.05 9 -010 0.62 0.68 0.07 0.05 10 -011 0.61 0.69 0.07 0.05 11 -012 0.59 0.71 0.07 0.05 12 -013 0.58 0.73 0.07 0.05 13 -014 0.56 0.75 0.07 0.05 14 -015 0.54 0.77 0.07 0.05 15 -016 0.77 0.44 0.07 0.05 16 -017 0.75 0.46 0.07 0.05 17 -018 0.73 0.49 0.07 0.05 18 -019 0.72 0.51 0.07 0.05 19 -020 0.70 0.54 0.07 0.05 20 -021 0.68 0.56 0.07 0.05 21 -022 0.66 0.58 0.07 0.05 22 -023 0.65 0.60 0.07 0.05 23 -024 0.63 0.62 0.07 0.05 24 -025 0.62 0.64 0.07 0.05 25 -026 0.60 0.66 0.07 0.05 26 -027 0.58 0.68 0.07 0.05 27 -028 0.57 0.70 0.07 0.05 28 -029 0.55 0.71 0.07 0.05 29 -030 0.53 0.73 0.07 0.05 30 -031 0.52 0.75 0.07 0.05 31 -032 0.75 0.42 0.07 0.05 32 -033 0.73 0.45 0.07 0.05 33 -034 0.71 0.47 0.07 0.05 34 -035 0.69 0.50 0.07 0.05 35 -036 0.68 0.52 0.07 0.05 36 -037 0.66 0.54 0.07 0.05 37 -038 0.64 0.57 0.07 0.05 38 -039 0.62 0.58 0.07 0.05 39 -040 0.61 0.61 0.07 0.05 40 -041 0.59 0.62 0.07 0.05 41 -042 0.58 0.64 0.07 0.05 42 -043 0.56 0.66 0.07 0.05 43 -044 0.54 0.68 0.07 0.05 44 -045 0.53 0.70 0.07 0.05 45 -046 0.51 0.72 0.07 0.05 46 -047 0.50 0.74 0.07 0.05 47 -048 0.72 0.41 0.07 0.05 48 -049 0.71 0.43 0.07 0.05 49 -050 0.69 0.46 0.07 0.05 50 -051 0.67 0.48 0.07 0.05 51 -052 0.65 0.50 0.07 0.05 52 -053 0.63 0.52 0.07 0.05 53 -054 0.62 0.55 0.07 0.05 54 -055 0.60 0.57 0.07 0.05 55 -056 0.58 0.59 0.07 0.05 56 -057 0.57 0.61 0.07 0.05 57 -058 0.55 0.63 0.07 0.05 58 -059 0.54 0.65 0.07 0.05 59 -060 0.52 0.67 0.07 0.05 60 -061 0.51 0.69 0.07 0.05 61 -062 0.49 0.71 0.07 0.05 62 -063 0.47 0.73 0.07 0.05 63 -064 0.70 0.39 0.07 0.05 64 -065 0.68 0.41 0.07 0.05 65 -066 0.66 0.44 0.07 0.05 66 -067 0.65 0.46 0.07 0.05 67 -068 0.63 0.49 0.07 0.05 68 -069 0.61 0.51 0.07 0.05 69 -070 0.59 0.53 0.07 0.05 70 -071 0.58 0.55 0.07 0.05 71 -072 0.56 0.57 0.07 0.05 72 -073 0.55 0.59 0.07 0.05 73 -074 0.53 0.61 0.07 0.05 74 -075 0.51 0.64 0.07 0.05 75 -076 0.50 0.66 0.07 0.05 76 -077 0.48 0.68 0.07 0.05 77 -078 0.47 0.69 0.07 0.05 78 -079 0.45 0.72 0.07 0.05 79 -080 0.68 0.38 0.07 0.05 80 -081 0.66 0.40 0.07 0.05 81 -082 0.64 0.42 0.07 0.05 82 -083 0.62 0.44 0.07 0.05 83 -084 0.60 0.47 0.07 0.05 84 -085 0.59 0.49 0.07 0.05 85 -086 0.57 0.51 0.07 0.05 86 -087 0.55 0.54 0.07 0.05 87 -088 0.54 0.56 0.07 0.05 88 -089 0.52 0.58 0.07 0.05 89 -090 0.50 0.60 0.07 0.05 90 -091 0.49 0.62 0.07 0.05 91 -092 0.47 0.64 0.07 0.05 92 -093 0.46 0.66 0.07 0.05 93 -094 0.44 0.68 0.07 0.05 94 -095 0.42 0.70 0.07 0.05 95 -096 0.65 0.36 0.07 0.05 96 -097 0.63 0.38 0.07 0.05 97 -098 0.61 0.41 0.07 0.05 98 -099 0.60 0.43 0.07 0.05 99 -100 0.58 0.45 0.07 0.05 100 -101 0.56 0.47 0.07 0.05 101 -102 0.55 0.50 0.07 0.05 102 -103 0.53 0.52 0.07 0.05 103 -104 0.51 0.54 0.07 0.05 104 -105 0.50 0.56 0.07 0.05 105 -106 0.48 0.58 0.07 0.05 106 -107 0.47 0.61 0.07 0.05 107 -108 0.45 0.63 0.07 0.05 108 -109 0.44 0.65 0.07 0.05 109 -110 0.42 0.67 0.07 0.05 110 -111 0.41 0.69 0.07 0.05 111 -112 0.63 0.34 0.07 0.05 112 -113 0.61 0.36 0.07 0.05 113 -114 0.59 0.39 0.07 0.05 114 -115 0.58 0.41 0.07 0.05 115 -116 0.56 0.43 0.07 0.05 116 -117 0.54 0.46 0.07 0.05 117 -118 0.52 0.48 0.07 0.05 118 -119 0.51 0.51 0.07 0.05 119 -120 0.49 0.52 0.07 0.05 120 -121 0.47 0.55 0.07 0.05 121 -122 0.46 0.57 0.07 0.05 122 -123 0.44 0.59 0.07 0.05 123 -124 0.43 0.61 0.07 0.05 124 -125 0.41 0.63 0.07 0.05 125 -126 0.40 0.65 0.07 0.05 126 -127 0.38 0.67 0.07 0.05 127 -128 0.60 0.32 0.07 0.05 128 -129 0.59 0.35 0.07 0.05 129 -130 0.56 0.37 0.07 0.05 130 -131 0.55 0.39 0.07 0.05 131 -132 0.53 0.42 0.07 0.05 132 -133 0.52 0.44 0.07 0.05 133 -134 0.50 0.46 0.07 0.05 134 -135 0.48 0.49 0.07 0.05 135 -136 0.47 0.51 0.07 0.05 136 -137 0.45 0.53 0.07 0.05 137 -138 0.43 0.56 0.07 0.05 138 -139 0.42 0.57 0.07 0.05 139 -140 0.40 0.60 0.07 0.05 140 -141 0.39 0.61 0.07 0.05 141 -142 0.37 0.63 0.07 0.05 142 -143 0.36 0.66 0.07 0.05 143 -144 0.58 0.31 0.07 0.05 144 -145 0.56 0.33 0.07 0.05 145 -146 0.54 0.35 0.07 0.05 146 -147 0.53 0.38 0.07 0.05 147 -148 0.51 0.40 0.07 0.05 148 -149 0.49 0.42 0.07 0.05 149 -150 0.48 0.45 0.07 0.05 150 -151 0.46 0.47 0.07 0.05 151 -152 0.44 0.49 0.07 0.05 152 -153 0.42 0.51 0.07 0.05 153 -154 0.41 0.53 0.07 0.05 154 -155 0.39 0.56 0.07 0.05 155 -156 0.38 0.58 0.07 0.05 156 -157 0.36 0.60 0.07 0.05 157 -158 0.35 0.62 0.07 0.05 158 -159 0.33 0.64 0.07 0.05 159 -160 0.55 0.29 0.07 0.05 160 -161 0.54 0.32 0.07 0.05 161 -162 0.52 0.34 0.07 0.05 162 -163 0.50 0.36 0.07 0.05 163 -164 0.49 0.38 0.07 0.05 164 -165 0.47 0.41 0.07 0.05 165 -166 0.45 0.43 0.07 0.05 166 -167 0.43 0.45 0.07 0.05 167 -168 0.42 0.48 0.07 0.05 168 -169 0.40 0.50 0.07 0.05 169 -170 0.39 0.52 0.07 0.05 170 -171 0.37 0.54 0.07 0.05 171 -172 0.36 0.56 0.07 0.05 172 -173 0.34 0.58 0.07 0.05 173 -174 0.33 0.60 0.07 0.05 174 -175 0.31 0.62 0.07 0.05 175 -176 0.53 0.27 0.07 0.05 176 -177 0.52 0.30 0.07 0.05 177 -178 0.50 0.32 0.07 0.05 178 -179 0.48 0.34 0.07 0.05 179 -180 0.46 0.37 0.07 0.05 180 -181 0.45 0.39 0.07 0.05 181 -182 0.43 0.41 0.07 0.05 182 -183 0.41 0.43 0.07 0.05 183 -184 0.40 0.46 0.07 0.05 184 -185 0.38 0.48 0.07 0.05 185 -186 0.36 0.50 0.07 0.05 186 -187 0.35 0.53 0.07 0.05 187 -188 0.33 0.55 0.07 0.05 188 -189 0.32 0.57 0.07 0.05 189 -190 0.30 0.59 0.07 0.05 190 -191 0.29 0.61 0.07 0.05 191 -192 0.51 0.26 0.07 0.05 192 -193 0.49 0.28 0.07 0.05 193 -194 0.47 0.31 0.07 0.05 194 -195 0.46 0.33 0.07 0.05 195 -196 0.44 0.35 0.07 0.05 196 -197 0.42 0.37 0.07 0.05 197 -198 0.41 0.40 0.07 0.05 198 -199 0.39 0.42 0.07 0.05 199 -200 0.37 0.44 0.07 0.05 200 -201 0.36 0.46 0.07 0.05 201 -202 0.34 0.49 0.07 0.05 202 -203 0.32 0.51 0.07 0.05 203 -204 0.31 0.53 0.07 0.05 204 -205 0.29 0.55 0.07 0.05 205 -206 0.28 0.57 0.07 0.05 206 -207 0.27 0.59 0.07 0.05 207 -208 0.48 0.24 0.07 0.05 208 -209 0.47 0.26 0.07 0.05 209 -210 0.45 0.28 0.07 0.05 210 -211 0.43 0.31 0.07 0.05 211 -212 0.41 0.33 0.07 0.05 212 -213 0.40 0.35 0.07 0.05 213 -214 0.38 0.38 0.07 0.05 214 -215 0.37 0.40 0.07 0.05 215 -216 0.35 0.42 0.07 0.05 216 -217 0.33 0.45 0.07 0.05 217 -218 0.32 0.47 0.07 0.05 218 -219 0.30 0.49 0.07 0.05 219 -220 0.28 0.51 0.07 0.05 220 -221 0.27 0.53 0.07 0.05 221 -222 0.25 0.55 0.07 0.05 222 -223 0.24 0.58 0.07 0.05 223 -224 0.46 0.23 0.07 0.05 224 -225 0.45 0.25 0.07 0.05 225 -226 0.43 0.27 0.07 0.05 226 -227 0.41 0.29 0.07 0.05 227 -228 0.39 0.31 0.07 0.05 228 -229 0.38 0.34 0.07 0.05 229 -230 0.36 0.36 0.07 0.05 230 -231 0.34 0.38 0.07 0.05 231 -232 0.33 0.41 0.07 0.05 232 -233 0.31 0.43 0.07 0.05 233 -234 0.29 0.45 0.07 0.05 234 -235 0.28 0.47 0.07 0.05 235 -236 0.26 0.50 0.07 0.05 236 -237 0.25 0.52 0.07 0.05 237 -238 0.24 0.54 0.07 0.05 238 -239 0.22 0.56 0.07 0.05 239 -240 0.44 0.21 0.07 0.05 240 -241 0.42 0.23 0.07 0.05 241 -242 0.41 0.25 0.07 0.05 242 -243 0.39 0.27 0.07 0.05 243 -244 0.37 0.30 0.07 0.05 244 -245 0.35 0.32 0.07 0.05 245 -246 0.33 0.34 0.07 0.05 246 -247 0.32 0.37 0.07 0.05 247 -248 0.30 0.39 0.07 0.05 248 -249 0.28 0.41 0.07 0.05 249 -250 0.27 0.43 0.07 0.05 250 -251 0.25 0.46 0.07 0.05 251 -252 0.24 0.48 0.07 0.05 252 -253 0.23 0.50 0.07 0.05 253 -254 0.21 0.52 0.07 0.05 254 -255 0.20 0.54 0.07 0.05 255 diff --git a/mne/data/image/mni_brain.gif b/mne/data/image/mni_brain.gif deleted file mode 100644 index 3d6cc08edbd..00000000000 Binary files a/mne/data/image/mni_brain.gif and /dev/null differ diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index 96219aaf621..700dab0608e 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -3,44 +3,10 @@ See :ref:`datasets` for more information. """ -from . import fieldtrip_cmc -from . import brainstorm -from . import visual_92_categories -from . import kiloword -from . import eegbci -from . import hf_sef -from . import misc -from . import mtrf -from . import sample -from . import somato -from . import multimodal -from . import fnirs_motor -from . import opm -from . import spm_face -from . import testing -from . import _fake -from . import phantom_4dbti -from . import sleep_physionet -from . import limo -from . import refmeg_noise -from . import ssvep -from . import erp_core -from . import epilepsy_ecog -from . import ucl_opm_auditory -from ._fetch import fetch_dataset -from .utils import (_download_all_example_data, fetch_hcp_mmp_parcellation, - fetch_aparc_sub_parcellation, has_dataset) -from ._fsaverage.base import fetch_fsaverage -from ._infant.base import fetch_infant_template -from ._phantom.base import fetch_phantom +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -__all__ = [ - '_download_all_example_data', '_fake', 'brainstorm', 'eegbci', - 'fetch_aparc_sub_parcellation', 'fetch_fsaverage', 'fetch_infant_template', - 'fetch_hcp_mmp_parcellation', 'fieldtrip_cmc', 'hf_sef', 'kiloword', - 'misc', 'mtrf', 'multimodal', 'opm', 'phantom_4dbti', 'sample', - 'sleep_physionet', 'somato', 'spm_face', 'ssvep', 'testing', - 'visual_92_categories', 'limo', 'erp_core', 'epilepsy_ecog', - 'fetch_dataset', 'fetch_phantom', 'has_dataset', 'refmeg_noise', - 'fnirs_motor' -] +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/datasets/__init__.pyi b/mne/datasets/__init__.pyi new file mode 100644 index 00000000000..2f69a1027e5 --- /dev/null +++ b/mne/datasets/__init__.pyi @@ -0,0 +1,78 @@ +__all__ = [ + "_download_all_example_data", + "_fake", + "brainstorm", + "eegbci", + "epilepsy_ecog", + "erp_core", + "eyelink", + "default_path", + "fetch_aparc_sub_parcellation", + "fetch_dataset", + "fetch_fsaverage", + "fetch_hcp_mmp_parcellation", + "fetch_infant_template", + "fetch_phantom", + "fieldtrip_cmc", + "fnirs_motor", + "has_dataset", + "hf_sef", + "kiloword", + "limo", + "misc", + "mtrf", + "multimodal", + "opm", + "phantom_4dbti", + "phantom_kernel", + "phantom_kit", + "refmeg_noise", + "sample", + "sleep_physionet", + "somato", + "spm_face", + "ssvep", + "testing", + "ucl_opm_auditory", + "visual_92_categories", +] +from . import ( + _fake, + brainstorm, + eegbci, + epilepsy_ecog, + erp_core, + eyelink, + fieldtrip_cmc, + fnirs_motor, + hf_sef, + kiloword, + limo, + misc, + mtrf, + multimodal, + opm, + phantom_4dbti, + phantom_kernel, + phantom_kit, + refmeg_noise, + sample, + sleep_physionet, + somato, + spm_face, + ssvep, + testing, + ucl_opm_auditory, + visual_92_categories, +) +from ._fetch import fetch_dataset +from ._fsaverage.base import fetch_fsaverage +from ._infant import fetch_infant_template +from ._phantom.base import fetch_phantom +from .utils import ( + _download_all_example_data, + default_path, + fetch_aparc_sub_parcellation, + fetch_hcp_mmp_parcellation, + has_dataset, +) diff --git a/mne/datasets/_fake/__init__.py b/mne/datasets/_fake/__init__.py index 57b8d214c70..fb116510033 100644 --- a/mne/datasets/_fake/__init__.py +++ b/mne/datasets/_fake/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Fake dataset for testing.""" from ._fake import data_path, get_version diff --git a/mne/datasets/_fake/_fake.py b/mne/datasets/_fake/_fake.py index 61ef7678862..e983153ef31 100644 --- a/mne/datasets/_fake/_fake.py +++ b/mne/datasets/_fake/_fake.py @@ -1,28 +1,30 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _download_mne_dataset, - _get_version, _version_doc) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=False, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=False, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='fake', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="fake", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='fake', - conf='MNE_DATASETS_FAKE_PATH') +data_path.__doc__ = _data_path_doc.format(name="fake", conf="MNE_DATASETS_FAKE_PATH") def get_version(): # noqa: D103 - return _get_version('fake') + return _get_version("fake") -get_version.__doc__ = _version_doc.format(name='fake') +get_version.__doc__ = _version_doc.format(name="fake") diff --git a/mne/datasets/_fetch.py b/mne/datasets/_fetch.py index 87cd1664534..deb72488b86 100644 --- a/mne/datasets/_fetch.py +++ b/mne/datasets/_fetch.py @@ -1,25 +1,32 @@ -# Authors: Adam Li -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations # only needed for Python ≤ 3.9 -import logging -import sys import os import os.path as op +import sys +import time from pathlib import Path from shutil import rmtree from .. import __version__ as mne_version -from ..utils import logger, warn, _safe_input +from ..fixes import _compare_version +from ..utils import _safe_input, logger, warn from .config import ( - _bst_license_text, + MISC_VERSIONED, RELEASES, TESTING_VERSIONED, - MISC_VERSIONED, + _bst_license_text, +) +from .utils import ( + _dataset_version, + _do_path_update, + _downloader_params, + _get_path, + _log_time_size, ) -from .utils import _dataset_version, _do_path_update, _get_path -from ..fixes import _compare_version - _FAKE_VERSION = None # used for monkeypatching while testing versioning @@ -36,7 +43,7 @@ def fetch_dataset( accept=False, auth=None, token=None, -): +) -> Path | tuple[Path, str]: """Fetch an MNE-compatible dataset using pooch. Parameters @@ -50,7 +57,7 @@ def fetch_dataset( What to do after downloading the file. ``"unzip"`` and ``"untar"`` will decompress the downloaded file in place; for custom extraction (e.g., only extracting certain files from the archive) pass an instance of - :class:`pooch.Unzip` or :class:`pooch.Untar`. If ``None`` (the + ``pooch.Unzip`` or ``pooch.Untar``. If ``None`` (the default), the files are left as-is. path : None | str Directory in which to put the dataset. If ``None``, the dataset @@ -81,10 +88,10 @@ def fetch_dataset( Default is ``False``. auth : tuple | None Optional authentication tuple containing the username and - password/token, passed to :class:`pooch.HTTPDownloader` (e.g., + password/token, passed to ``pooch.HTTPDownloader`` (e.g., ``auth=('foo', 012345)``). token : str | None - Optional authentication token passed to :class:`pooch.HTTPDownloader`. + Optional authentication token passed to ``pooch.HTTPDownloader``. Returns ------- @@ -119,8 +126,8 @@ def fetch_dataset( {'dataset_name': 'sample', 'archive_name': 'MNE-sample-data-processed.tar.gz', - 'hash': 'md5:12b75d1cb7df9dfb4ad73ed82f61094f', - 'url': 'https://osf.io/86qa2/download?version=5', + 'hash': 'md5:e8f30c4516abdc12a0c08e6bae57409c', + 'url': 'https://osf.io/download/86qa2?version=6', 'folder_name': 'MNE-sample-data', 'config_key': 'MNE_DATASETS_SAMPLE_PATH'} @@ -131,11 +138,12 @@ def fetch_dataset( """ # noqa E501 import pooch + t0 = time.time() + if auth is not None: if len(auth) != 2: raise RuntimeError( - "auth should be a 2-tuple consisting " - "of a username and password/token." + "auth should be a 2-tuple consisting of a username and password/token." ) # processor to uncompress files @@ -151,7 +159,7 @@ def fetch_dataset( names = [params["dataset_name"] for params in dataset_params] name = names[0] dataset_dict = dataset_params[0] - config_key = dataset_dict.get('config_key', None) + config_key = dataset_dict.get("config_key", None) folder_name = dataset_dict["folder_name"] # get download path for specific dataset @@ -173,8 +181,9 @@ def fetch_dataset( # get the version of the dataset and then check if the version is outdated data_version = _dataset_version(final_path, name) - outdated = (want_version is not None and - _compare_version(want_version, '>', data_version)) + outdated = want_version is not None and _compare_version( + want_version, ">", data_version + ) if outdated: logger.info( @@ -186,16 +195,13 @@ def fetch_dataset( # return empty string if outdated dataset and we don't want to download if (not force_update) and outdated and not download: logger.info( - 'Dataset out of date but force_update=False and download=False, ' - 'returning empty data_path') + "Dataset out of date but force_update=False and download=False, " + "returning empty data_path" + ) return (empty, data_version) if return_version else empty # reasons to bail early (hf_sef has separate code for this): - if ( - (not force_update) - and (not outdated) - and (not name.startswith("hf_sef_")) - ): + if (not force_update) and (not outdated) and (not name.startswith("hf_sef_")): # ...if target folder exists (otherwise pooch downloads every # time because we don't save the archive files after unpacking, so # pooch can't check its checksum) @@ -213,20 +219,13 @@ def fetch_dataset( else: # If they don't have stdin, just accept the license # https://github.com/mne-tools/mne-python/issues/8513#issuecomment-726823724 # noqa: E501 - answer = _safe_input( - "%sAgree (y/[n])? " % _bst_license_text, use="y") + answer = _safe_input(f"{_bst_license_text}Agree (y/[n])? ", use="y") if answer.lower() != "y": - raise RuntimeError( - "You must agree to the license to use this " "dataset" - ) + raise RuntimeError("You must agree to the license to use this dataset") # downloader & processors - download_params = dict(progressbar=logger.level <= logging.INFO) + download_params = _downloader_params(auth=auth, token=token) if name == "fake": download_params["progressbar"] = False - if auth is not None: - download_params["auth"] = auth - if token is not None: - download_params["headers"] = {"Authorization": f"token {token}"} downloader = pooch.HTTPDownloader(**download_params) # make mappings from archive names to urls and to checksums @@ -241,8 +240,9 @@ def fetch_dataset( registry[archive_name] = dataset_hash # create the download manager + use_path = final_path if processor is None else Path(path) fetcher = pooch.create( - path=str(final_path) if processor is None else path, + path=str(use_path), base_url="", # Full URLs are given in the `urls` dict. version=None, # Data versioning is decoupled from MNE-Python version. urls=urls, @@ -252,6 +252,7 @@ def fetch_dataset( # use our logger level for pooch's logger too pooch.get_logger().setLevel(logger.getEffectiveLevel()) + sz = 0 for idx in range(len(names)): # fetch and unpack the data @@ -262,15 +263,18 @@ def fetch_dataset( ) except ValueError as err: err = str(err) - if 'hash of downloaded file' in str(err): + if "hash of downloaded file" in str(err): raise ValueError( - f'{err} Consider using force_update=True to force ' - 'the dataset to be downloaded again.') from None + f"{err} Consider using force_update=True to force " + "the dataset to be downloaded again." + ) from None else: raise + fname = use_path / archive_name + sz += fname.stat().st_size # after unpacking, remove the archive file if processor is not None: - os.remove(op.join(path, archive_name)) + fname.unlink() # remove version number from "misc" and "testing" datasets folder names if name == "misc": @@ -289,14 +293,14 @@ def fetch_dataset( data_version = _dataset_version(path, name) # 0.7 < 0.7.git should be False, therefore strip if check_version and ( - _compare_version(data_version, '<', mne_version.strip(".git")) + _compare_version(data_version, "<", mne_version.strip(".git")) ): + # OK to `nosec` because it's false positive (misidentified as SQL) warn( - "The {name} dataset (version {current}) is older than " - "mne-python (version {newest}). If the examples fail, " - "you may need to update the {name} dataset by using " - "mne.datasets.{name}.data_path(force_update=True)".format( - name=name, current=data_version, newest=mne_version - ) + f"The {name} dataset (version {data_version}) is older than " + f"mne-python (version {mne_version}). If the examples fail, " + f"you may need to update the {name} dataset by using " + f"mne.datasets.{name}.data_path(force_update=True)" # nosec B608 ) + _log_time_size(t0, sz) return (final_path, data_version) if return_version else final_path diff --git a/mne/datasets/_fsaverage/__init__.py b/mne/datasets/_fsaverage/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/datasets/_fsaverage/__init__.py +++ b/mne/datasets/_fsaverage/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/datasets/_fsaverage/base.py b/mne/datasets/_fsaverage/base.py index b22d50ae7ab..5dce5b8fca4 100644 --- a/mne/datasets/_fsaverage/base.py +++ b/mne/datasets/_fsaverage/base.py @@ -1,19 +1,18 @@ -# -*- coding: utf-8 -*- -# Authors: Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os -import os.path as op +from pathlib import Path -from ..utils import _manifest_check_download, _get_path -from ...utils import verbose, get_subjects_dir, set_config +from ...utils import get_subjects_dir, set_config, verbose +from ..utils import _get_path, _manifest_check_download -FSAVERAGE_MANIFEST_PATH = op.dirname(__file__) +FSAVERAGE_MANIFEST_PATH = Path(__file__).parent @verbose def fetch_fsaverage(subjects_dir=None, *, verbose=None): - """Fetch and update fsaverage. + """Fetch and update :ref:`fsaverage `. Parameters ---------- @@ -26,9 +25,12 @@ def fetch_fsaverage(subjects_dir=None, *, verbose=None): Returns ------- - fs_dir : str + fs_dir : Path The fsaverage directory. - (essentially ``subjects_dir + '/fsaverage'``). + (essentially ``subjects_dir / 'fsaverage'``). + + .. versionchanged:: 1.8 + A :class:`pathlib.Path` object is returned instead of a string. Notes ----- @@ -65,20 +67,20 @@ def fetch_fsaverage(subjects_dir=None, *, verbose=None): # fid.write('\n'.join(names)) # subjects_dir = _set_montage_coreg_path(subjects_dir) - subjects_dir = op.abspath(op.expanduser(subjects_dir)) - fs_dir = op.join(subjects_dir, 'fsaverage') - os.makedirs(fs_dir, exist_ok=True) + subjects_dir = subjects_dir.expanduser().absolute() + fs_dir = subjects_dir / "fsaverage" + fs_dir.mkdir(parents=True, exist_ok=True) _manifest_check_download( - manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'root.txt'), - destination=op.join(subjects_dir), - url='https://osf.io/3bxqt/download?version=2', - hash_='5133fe92b7b8f03ae19219d5f46e4177', + manifest_path=FSAVERAGE_MANIFEST_PATH / "root.txt", + destination=subjects_dir, + url="https://osf.io/download/3bxqt?version=2", + hash_="5133fe92b7b8f03ae19219d5f46e4177", ) _manifest_check_download( - manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'bem.txt'), - destination=op.join(subjects_dir, 'fsaverage'), - url='https://osf.io/7ve8g/download?version=4', - hash_='b31509cdcf7908af6a83dc5ee8f49fb1', + manifest_path=FSAVERAGE_MANIFEST_PATH / "bem.txt", + destination=subjects_dir / "fsaverage", + url="https://osf.io/download/7ve8g?version=4", + hash_="b31509cdcf7908af6a83dc5ee8f49fb1", ) return fs_dir @@ -86,48 +88,16 @@ def fetch_fsaverage(subjects_dir=None, *, verbose=None): def _get_create_subjects_dir(subjects_dir): subjects_dir = get_subjects_dir(subjects_dir, raise_error=False) if subjects_dir is None: - subjects_dir = _get_path(None, 'MNE_DATA', 'montage coregistration') - subjects_dir = op.join(subjects_dir, 'MNE-fsaverage-data') - os.makedirs(subjects_dir, exist_ok=True) - else: - subjects_dir = str(subjects_dir) + subjects_dir = _get_path(None, "MNE_DATA", "montage coregistration") + subjects_dir = subjects_dir / "MNE-fsaverage-data" + subjects_dir.mkdir(parents=True, exist_ok=True) return subjects_dir def _set_montage_coreg_path(subjects_dir=None): - """Set a subject directory suitable for montage(-only) coregistration. - - Parameters - ---------- - subjects_dir : path-like | None - The path to use as the subjects directory in the MNE-Python - config file. None will use the existing config variable (i.e., - will not change anything), and if it does not exist, will use - ``~/mne_data/MNE-fsaverage-data``. - - Returns - ------- - subjects_dir : str - The subjects directory that was used. - - See Also - -------- - mne.datasets.fetch_fsaverage - mne.get_config - mne.set_config - - Notes - ----- - If you plan to only do EEG-montage based coregistrations with fsaverage - without any MRI warping, this function can facilitate the process. - Essentially it sets the default value for ``subjects_dir`` in MNE - functions to be ``~/mne_data/MNE-fsaverage-data`` (assuming it has - not already been set to some other value). - - .. versionadded:: 0.18 - """ + """Set a subject directory suitable for montage(-only) coregistration.""" subjects_dir = _get_create_subjects_dir(subjects_dir) old_subjects_dir = get_subjects_dir(None, raise_error=False) if old_subjects_dir is None: - set_config('SUBJECTS_DIR', subjects_dir) + set_config("SUBJECTS_DIR", subjects_dir) return subjects_dir diff --git a/mne/datasets/_infant/__init__.py b/mne/datasets/_infant/__init__.py new file mode 100644 index 00000000000..d787cc63511 --- /dev/null +++ b/mne/datasets/_infant/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .base import fetch_infant_template diff --git a/mne/datasets/_infant/base.py b/mne/datasets/_infant/base.py index fe9032dc44f..6c4d038aa72 100644 --- a/mne/datasets/_infant/base.py +++ b/mne/datasets/_infant/base.py @@ -1,16 +1,15 @@ -# -*- coding: utf-8 -*- -# Authors: Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os -import os.path as op +from pathlib import Path +from ...utils import _check_option, _validate_type, get_subjects_dir, verbose from ..utils import _manifest_check_download -from ...utils import verbose, get_subjects_dir, _check_option, _validate_type -_AGES = '2wk 1mo 2mo 3mo 4.5mo 6mo 7.5mo 9mo 10.5mo 12mo 15mo 18mo 2yr' +_AGES = "2wk 1mo 2mo 3mo 4.5mo 6mo 7.5mo 9mo 10.5mo 12mo 15mo 18mo 2yr" # https://github.com/christian-oreilly/infant_template_paper/releases -_ORIGINAL_URL = 'https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/%7Bsubject%7D.zip' # noqa: E501 +_ORIGINAL_URL = "https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/%7Bsubject%7D.zip" # noqa: E501 # Formatted the same way as md5sum *.zip on Ubuntu: _ORIGINAL_HASHES = """ 851737d5f8f246883f2aef9819c6ec29 ANTS10-5Months3T.zip @@ -27,7 +26,7 @@ 75fe37a1bc80ed6793a8abb47681d5ab ANTS7-5Months3T.zip 790f7dba0a264262e6c1c2dfdf216215 ANTS9-0Months3T.zip """ -_MANIFEST_PATH = op.dirname(__file__) +_MANIFEST_PATH = Path(__file__).parent @verbose @@ -72,23 +71,24 @@ def fetch_infant_template(age, subjects_dir=None, *, verbose=None): # ... names = sorted(name for name in zip.namelist() if not zipfile.Path(zip, name).is_dir()) # noqa: E501 # ... with open(f'{name}.txt', 'w') as fid: # ... fid.write('\n'.join(names)) - _validate_type(age, str, 'age') - _check_option('age', age, _AGES.split()) + _validate_type(age, str, "age") + _check_option("age", age, _AGES.split()) subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - unit = dict(wk='Weeks', mo='Months', yr='Years')[age[-2:]] - first = age[:-2].split('.')[0] - dash = '-5' if '.5' in age else '-0' - subject = f'ANTS{first}{dash}{unit}3T' + unit = dict(wk="Weeks", mo="Months", yr="Years")[age[-2:]] + first = age[:-2].split(".")[0] + dash = "-5" if ".5" in age else "-0" + subject = f"ANTS{first}{dash}{unit}3T" # Actually get and create the files - subj_dir = subjects_dir / subject - os.makedirs(subj_dir, exist_ok=True) + subject_dir = subjects_dir / subject + subject_dir.mkdir(parents=True, exist_ok=True) # .zip -> hash mapping - orig_hashes = dict(line.strip().split()[::-1] - for line in _ORIGINAL_HASHES.strip().splitlines()) + orig_hashes = dict( + line.strip().split()[::-1] for line in _ORIGINAL_HASHES.strip().splitlines() + ) _manifest_check_download( - manifest_path=op.join(_MANIFEST_PATH, f'{subject}.txt'), - destination=subj_dir, + manifest_path=_MANIFEST_PATH / f"{subject}.txt", + destination=subject_dir, url=_ORIGINAL_URL.format(subject=subject), - hash_=orig_hashes[f'{subject}.zip'], + hash_=orig_hashes[f"{subject}.zip"], ) return subject diff --git a/mne/datasets/_phantom/__init__.py b/mne/datasets/_phantom/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/datasets/_phantom/__init__.py +++ b/mne/datasets/_phantom/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/datasets/_phantom/base.py b/mne/datasets/_phantom/base.py index d420ca777e3..562e3f9538f 100644 --- a/mne/datasets/_phantom/base.py +++ b/mne/datasets/_phantom/base.py @@ -1,14 +1,13 @@ -# -*- coding: utf-8 -*- -# Authors: Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os -import os.path as op +from pathlib import Path +from ...utils import _check_option, _validate_type, get_subjects_dir, verbose from ..utils import _manifest_check_download -from ...utils import verbose, get_subjects_dir, _check_option, _validate_type -PHANTOM_MANIFEST_PATH = op.dirname(__file__) +PHANTOM_MANIFEST_PATH = Path(__file__).parent @verbose @@ -24,7 +23,7 @@ def fetch_phantom(kind, subjects_dir=None, *, verbose=None): Returns ------- - subject_dir : str + subject_dir : pathlib.Path The resulting phantom subject directory. See Also @@ -44,19 +43,21 @@ def fetch_phantom(kind, subjects_dir=None, *, verbose=None): .. versionadded:: 0.24 """ phantoms = dict( - otaniemi=dict(url='https://osf.io/j5czy/download?version=1', - hash='42d17db5b1db3e30327ffb4cf2649de8'), + otaniemi=dict( + url="https://osf.io/download/j5czy?version=1", + hash="42d17db5b1db3e30327ffb4cf2649de8", + ), ) - _validate_type(kind, str, 'kind') - _check_option('kind', kind, list(phantoms)) + _validate_type(kind, str, "kind") + _check_option("kind", kind, list(phantoms)) subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - subject = f'phantom_{kind}' + subject = f"phantom_{kind}" subject_dir = subjects_dir / subject - os.makedirs(subject_dir, exist_ok=True) + subject_dir.mkdir(parents=True, exist_ok=True) _manifest_check_download( - manifest_path=op.join(PHANTOM_MANIFEST_PATH, f'{subject}.txt'), + manifest_path=PHANTOM_MANIFEST_PATH / f"{subject}.txt", destination=subjects_dir, - url=phantoms[kind]['url'], - hash_=phantoms[kind]['hash'], + url=phantoms[kind]["url"], + hash_=phantoms[kind]["hash"], ) return subject_dir diff --git a/mne/datasets/brainstorm/__init__.py b/mne/datasets/brainstorm/__init__.py index 8dcf9b79811..90a0eaa8e3e 100644 --- a/mne/datasets/brainstorm/__init__.py +++ b/mne/datasets/brainstorm/__init__.py @@ -1,4 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Brainstorm datasets.""" -from . import (bst_raw, bst_resting, bst_auditory, bst_phantom_ctf, - bst_phantom_elekta) +from . import bst_raw, bst_resting, bst_auditory, bst_phantom_ctf, bst_phantom_elekta diff --git a/mne/datasets/brainstorm/bst_auditory.py b/mne/datasets/brainstorm/bst_auditory.py index 41c2f078671..230fd679870 100644 --- a/mne/datasets/brainstorm/bst_auditory.py +++ b/mne/datasets/brainstorm/bst_auditory.py @@ -1,9 +1,14 @@ -# Authors: Mainak Jas -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from ...utils import verbose -from ..utils import (_get_version, _version_doc, - _data_path_doc_accept, _download_mne_dataset) +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) _description = """ URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory @@ -22,26 +27,40 @@ @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, accept=False, *, verbose=None): # noqa: D103 +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 return _download_mne_dataset( - name='bst_auditory', processor='nested_untar', path=path, - force_update=force_update, update_path=update_path, - download=download, accept=accept) + name="bst_auditory", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) _data_path_doc = _data_path_doc_accept.format( - name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') -_data_path_doc = _data_path_doc.replace('brainstorm dataset', - 'brainstorm (bst_auditory) dataset') + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_auditory) dataset" +) data_path.__doc__ = _data_path_doc def get_version(): # noqa: D103 - return _get_version('bst_auditory') + return _get_version("bst_auditory") -get_version.__doc__ = _version_doc.format(name='brainstorm') +get_version.__doc__ = _version_doc.format(name="brainstorm") def description(): diff --git a/mne/datasets/brainstorm/bst_phantom_ctf.py b/mne/datasets/brainstorm/bst_phantom_ctf.py index 87300a82971..328a7748924 100644 --- a/mne/datasets/brainstorm/bst_phantom_ctf.py +++ b/mne/datasets/brainstorm/bst_phantom_ctf.py @@ -1,9 +1,14 @@ -# Authors: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from ...utils import verbose -from ..utils import (_get_version, _version_doc, - _data_path_doc_accept, _download_mne_dataset) +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) _description = """ URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf @@ -11,26 +16,40 @@ @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, accept=False, *, verbose=None): # noqa: D103 +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 return _download_mne_dataset( - name='bst_phantom_ctf', processor='nested_untar', path=path, - force_update=force_update, update_path=update_path, - download=download, accept=accept) + name="bst_phantom_ctf", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) _data_path_doc = _data_path_doc_accept.format( - name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') -_data_path_doc = _data_path_doc.replace('brainstorm dataset', - 'brainstorm (bst_phantom_ctf) dataset') + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_phantom_ctf) dataset" +) data_path.__doc__ = _data_path_doc def get_version(): # noqa: D103 - return _get_version('bst_phantom_ctf') + return _get_version("bst_phantom_ctf") -get_version.__doc__ = _version_doc.format(name='brainstorm') +get_version.__doc__ = _version_doc.format(name="brainstorm") def description(): diff --git a/mne/datasets/brainstorm/bst_phantom_elekta.py b/mne/datasets/brainstorm/bst_phantom_elekta.py index 40f92661085..1a7e3ac1f31 100644 --- a/mne/datasets/brainstorm/bst_phantom_elekta.py +++ b/mne/datasets/brainstorm/bst_phantom_elekta.py @@ -1,37 +1,55 @@ -# Authors: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause -from ...utils import verbose -from ..utils import (_get_version, _version_doc, - _data_path_doc_accept, _download_mne_dataset) +# Copyright the MNE-Python contributors. -_description = u""" +from ...utils import verbose +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) + +_description = """ URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta """ @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, accept=False, *, verbose=None): # noqa: D103 +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 return _download_mne_dataset( - name='bst_phantom_elekta', processor='nested_untar', path=path, - force_update=force_update, update_path=update_path, - download=download, accept=accept) + name="bst_phantom_elekta", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) _data_path_doc = _data_path_doc_accept.format( - name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') -_data_path_doc = _data_path_doc.replace('brainstorm dataset', - 'brainstorm (bst_phantom_elekta) ' - 'dataset') + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_phantom_elekta) dataset" +) data_path.__doc__ = _data_path_doc def get_version(): # noqa: D103 - return _get_version('bst_phantom_elekta') + return _get_version("bst_phantom_elekta") -get_version.__doc__ = _version_doc.format(name='brainstorm') +get_version.__doc__ = _version_doc.format(name="brainstorm") def description(): diff --git a/mne/datasets/brainstorm/bst_raw.py b/mne/datasets/brainstorm/bst_raw.py index 0616ca176d5..13a4aa3e5cc 100644 --- a/mne/datasets/brainstorm/bst_raw.py +++ b/mne/datasets/brainstorm/bst_raw.py @@ -1,14 +1,19 @@ -# Authors: Mainak Jas -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause -from functools import partial +# Copyright the MNE-Python contributors. -from ...utils import verbose, get_config -from ..utils import (has_dataset, _get_version, _version_doc, - _data_path_doc_accept, _download_mne_dataset) +from functools import partial +from ...utils import get_config, verbose +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, + has_dataset, +) -has_brainstorm_data = partial(has_dataset, name='bst_raw') +has_brainstorm_data = partial(has_dataset, name="bst_raw") _description = """ URL: http://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf @@ -26,26 +31,40 @@ @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, accept=False, *, verbose=None): # noqa: D103 +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 return _download_mne_dataset( - name='bst_raw', processor='nested_untar', path=path, - force_update=force_update, update_path=update_path, - download=download, accept=accept) + name="bst_raw", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) _data_path_doc = _data_path_doc_accept.format( - name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') -_data_path_doc = _data_path_doc.replace('brainstorm dataset', - 'brainstorm (bst_raw) dataset') + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_raw) dataset" +) data_path.__doc__ = _data_path_doc def get_version(): # noqa: D103 - return _get_version('bst_raw') + return _get_version("bst_raw") -get_version.__doc__ = _version_doc.format(name='brainstorm') +get_version.__doc__ = _version_doc.format(name="brainstorm") def description(): # noqa: D103 @@ -55,8 +74,7 @@ def description(): # noqa: D103 def _skip_bstraw_data(): - skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == - 'true') + skip_testing = get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true" skip = skip_testing or not has_brainstorm_data() return skip @@ -64,5 +82,7 @@ def _skip_bstraw_data(): def requires_bstraw_data(func): """Skip testing data test.""" import pytest - return pytest.mark.skipif(_skip_bstraw_data(), - reason='Requires brainstorm dataset')(func) + + return pytest.mark.skipif( + _skip_bstraw_data(), reason="Requires brainstorm dataset" + )(func) diff --git a/mne/datasets/brainstorm/bst_resting.py b/mne/datasets/brainstorm/bst_resting.py index e0eb226e863..d740cf1a6bc 100644 --- a/mne/datasets/brainstorm/bst_resting.py +++ b/mne/datasets/brainstorm/bst_resting.py @@ -1,9 +1,14 @@ -# Authors: Mainak Jas -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from ...utils import verbose -from ..utils import (_get_version, _version_doc, - _data_path_doc_accept, _download_mne_dataset) +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) _description = """ URL: http://neuroimage.usc.edu/brainstorm/DatasetResting @@ -14,26 +19,40 @@ @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, accept=False, *, verbose=None): # noqa: D103 +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 return _download_mne_dataset( - name='bst_resting', processor='nested_untar', path=path, - force_update=force_update, update_path=update_path, - download=download, accept=accept) + name="bst_resting", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) _data_path_doc = _data_path_doc_accept.format( - name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH') -_data_path_doc = _data_path_doc.replace('brainstorm dataset', - 'brainstorm (bst_resting) dataset') + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_resting) dataset" +) data_path.__doc__ = _data_path_doc def get_version(): # noqa: D103 - return _get_version('bst_resting') + return _get_version("bst_resting") -get_version.__doc__ = _version_doc.format(name='brainstorm') +get_version.__doc__ = _version_doc.format(name="brainstorm") def description(): diff --git a/mne/datasets/config.py b/mne/datasets/config.py index c9431a9837e..10d3dea7fa4 100644 --- a/mne/datasets/config.py +++ b/mne/datasets/config.py @@ -1,7 +1,6 @@ -# Authors: Adam Li -# Daniel McCloy -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. _bst_license_text = """ @@ -86,10 +85,15 @@ # To update the `testing` or `misc` datasets, push or merge commits to their # respective repos, and make a new release of the dataset on GitHub. Then # update the checksum in the MNE_DATASETS dict below, and change version -# here: ↓↓↓↓↓ ↓↓↓ -RELEASES = dict(testing='0.142', misc='0.24') -TESTING_VERSIONED = f'mne-testing-data-{RELEASES["testing"]}' -MISC_VERSIONED = f'mne-misc-data-{RELEASES["misc"]}' +# here: ↓↓↓↓↓↓↓↓ +RELEASES = dict( + testing="0.168", + misc="0.27", + phantom_kit="0.2", + ucl_opm_auditory="0.2", +) +TESTING_VERSIONED = f"mne-testing-data-{RELEASES['testing']}" +MISC_VERSIONED = f"mne-misc-data-{RELEASES['misc']}" # To update any other dataset besides `testing` or `misc`, upload the new # version of the data archive itself (e.g., to https://osf.io or wherever) and @@ -109,229 +113,258 @@ # of the downloaded dataset (ex: "MNE_DATASETS_EEGBCI_PATH"). # Testing and misc are at the top as they're updated most often -MNE_DATASETS['testing'] = dict( - archive_name=f'{TESTING_VERSIONED}.tar.gz', - hash='md5:44b857ddb34aefd752e4f5b19d625dee', - url=('https://codeload.github.com/mne-tools/mne-testing-data/' - f'tar.gz/{RELEASES["testing"]}'), +MNE_DATASETS["testing"] = dict( + archive_name=f"{TESTING_VERSIONED}.tar.gz", + hash="md5:7782a64f170b9435b0fd126862b0cf63", + url=( + "https://codeload.github.com/mne-tools/mne-testing-data/" + f"tar.gz/{RELEASES['testing']}" + ), # In case we ever have to resort to osf.io again... # archive_name='mne-testing-data.tar.gz', # hash='md5:c805a5fed8ca46f723e7eec828d90824', - # url='https://osf.io/dqfgy/download?version=1', # 0.136 - folder_name='MNE-testing-data', - config_key='MNE_DATASETS_TESTING_PATH', + # url='https://osf.io/download/dqfgy?version=1', # 0.136 + folder_name="MNE-testing-data", + config_key="MNE_DATASETS_TESTING_PATH", +) +MNE_DATASETS["misc"] = dict( + archive_name=f"{MISC_VERSIONED}.tar.gz", # 'mne-misc-data', + hash="md5:e343d3a00cb49f8a2f719d14f4758afe", + url=( + f"https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/%7BRELEASES['misc']}" + ), + folder_name="MNE-misc-data", + config_key="MNE_DATASETS_MISC_PATH", ) -MNE_DATASETS['misc'] = dict( - archive_name=f'{MISC_VERSIONED}.tar.gz', # 'mne-misc-data', - hash='md5:eb017a919939511932bd683f26f97490', - url=('https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/' - f'{RELEASES["misc"]}'), - folder_name='MNE-misc-data', - config_key='MNE_DATASETS_MISC_PATH' + +MNE_DATASETS["fnirs_motor"] = dict( + archive_name="MNE-fNIRS-motor-data.tgz", + hash="md5:c4935d19ddab35422a69f3326a01fef8", + url="https://osf.io/download/dj3eh?version=1", + folder_name="MNE-fNIRS-motor-data", + config_key="MNE_DATASETS_FNIRS_MOTOR_PATH", +) + +MNE_DATASETS["ucl_opm_auditory"] = dict( + archive_name="auditory_OPM_stationary.zip", + hash="md5:b2d69aa2d656b960bd0c18968dc1a14d", + url="https://osf.io/download/tp324?version=1", # original is mwrt3 + folder_name="auditory_OPM_stationary", + config_key="MNE_DATASETS_UCL_OPM_AUDITORY_PATH", ) -MNE_DATASETS['fnirs_motor'] = dict( - archive_name='MNE-fNIRS-motor-data.tgz', - hash='md5:c4935d19ddab35422a69f3326a01fef8', - url='https://osf.io/dj3eh/download?version=1', - folder_name='MNE-fNIRS-motor-data', - config_key='MNE_DATASETS_FNIRS_MOTOR_PATH', +MNE_DATASETS["kiloword"] = dict( + archive_name="MNE-kiloword-data.tar.gz", + hash="md5:3a124170795abbd2e48aae8727e719a8", + url="https://osf.io/download/qkvf9?version=1", + folder_name="MNE-kiloword-data", + config_key="MNE_DATASETS_KILOWORD_PATH", ) -MNE_DATASETS['ucl_opm_auditory'] = dict( - archive_name='auditory_OPM_stationary.zip', - hash='md5:9ed0d8d554894542b56f8e7c4c0041fe', - url='https://osf.io/download/mwrt3/?version=1', - folder_name='auditory_OPM_stationary', - config_key='MNE_DATASETS_UCL_OPM_AUDITORY_PATH', +MNE_DATASETS["multimodal"] = dict( + archive_name="MNE-multimodal-data.tar.gz", + hash="md5:26ec847ae9ab80f58f204d09e2c08367", + url="https://ndownloader.figshare.com/files/5999598", + folder_name="MNE-multimodal-data", + config_key="MNE_DATASETS_MULTIMODAL_PATH", ) -MNE_DATASETS['kiloword'] = dict( - archive_name='MNE-kiloword-data.tar.gz', - hash='md5:3a124170795abbd2e48aae8727e719a8', - url='https://osf.io/qkvf9/download?version=1', - folder_name='MNE-kiloword-data', - config_key='MNE_DATASETS_KILOWORD_PATH', +MNE_DATASETS["opm"] = dict( + archive_name="MNE-OPM-data.tar.gz", + hash="md5:370ad1dcfd5c47e029e692c85358a374", + url="https://osf.io/download/p6ae7?version=2", + folder_name="MNE-OPM-data", + config_key="MNE_DATASETS_OPM_PATH", ) -MNE_DATASETS['multimodal'] = dict( - archive_name='MNE-multimodal-data.tar.gz', - hash='md5:26ec847ae9ab80f58f204d09e2c08367', - url='https://ndownloader.figshare.com/files/5999598', - folder_name='MNE-multimodal-data', - config_key='MNE_DATASETS_MULTIMODAL_PATH', +MNE_DATASETS["phantom_kit"] = dict( + archive_name="MNE-phantom-KIT-data.tar.gz", + hash="md5:7bfdf40bbeaf17a66c99c695640e0740", + url="https://osf.io/download/fb6ya?version=1", + folder_name="MNE-phantom-KIT-data", + config_key="MNE_DATASETS_PHANTOM_KIT_PATH", ) -MNE_DATASETS['opm'] = dict( - archive_name='MNE-OPM-data.tar.gz', - hash='md5:370ad1dcfd5c47e029e692c85358a374', - url='https://osf.io/p6ae7/download?version=2', - folder_name='MNE-OPM-data', - config_key='MNE_DATASETS_OPM_PATH', +MNE_DATASETS["phantom_4dbti"] = dict( + archive_name="MNE-phantom-4DBTi.zip", + hash="md5:938a601440f3ffa780d20a17bae039ff", + url="https://osf.io/download/v2brw?version=2", + folder_name="MNE-phantom-4DBTi", + config_key="MNE_DATASETS_PHANTOM_4DBTI_PATH", ) -MNE_DATASETS['phantom_4dbti'] = dict( - archive_name='MNE-phantom-4DBTi.zip', - hash='md5:938a601440f3ffa780d20a17bae039ff', - url='https://osf.io/v2brw/download?version=2', - folder_name='MNE-phantom-4DBTi', - config_key='MNE_DATASETS_PHANTOM_4DBTI_PATH', +MNE_DATASETS["phantom_kernel"] = dict( + archive_name="MNE-phantom-kernel.tar.gz", + hash="md5:4e2ad987dac1a20f95bae8ffeb2d41d6", + url="https://osf.io/download/dj7wz?version=1", + folder_name="MNE-phantom-kernel-data", + config_key="MNE_DATASETS_PHANTOM_KERNEL_PATH", ) -MNE_DATASETS['sample'] = dict( - archive_name='MNE-sample-data-processed.tar.gz', - hash='md5:e8f30c4516abdc12a0c08e6bae57409c', - url='https://osf.io/86qa2/download?version=6', - folder_name='MNE-sample-data', - config_key='MNE_DATASETS_SAMPLE_PATH', +MNE_DATASETS["sample"] = dict( + archive_name="MNE-sample-data-processed.tar.gz", + hash="md5:e8f30c4516abdc12a0c08e6bae57409c", + url="https://osf.io/download/86qa2?version=6", + folder_name="MNE-sample-data", + config_key="MNE_DATASETS_SAMPLE_PATH", ) -MNE_DATASETS['somato'] = dict( - archive_name='MNE-somato-data.tar.gz', - hash='md5:32fd2f6c8c7eb0784a1de6435273c48b', - url='https://osf.io/tp4sg/download?version=7', - folder_name='MNE-somato-data', - config_key='MNE_DATASETS_SOMATO_PATH' +MNE_DATASETS["somato"] = dict( + archive_name="MNE-somato-data.tar.gz", + hash="md5:32fd2f6c8c7eb0784a1de6435273c48b", + url="https://osf.io/download/tp4sg?version=7", + folder_name="MNE-somato-data", + config_key="MNE_DATASETS_SOMATO_PATH", ) -MNE_DATASETS['spm'] = dict( - archive_name='MNE-spm-face.tar.gz', - hash='md5:9f43f67150e3b694b523a21eb929ea75', - url='https://osf.io/je4s8/download?version=2', - folder_name='MNE-spm-face', - config_key='MNE_DATASETS_SPM_FACE_PATH', +MNE_DATASETS["spm"] = dict( + archive_name="MNE-spm-face.tar.gz", + hash="md5:9f43f67150e3b694b523a21eb929ea75", + url="https://osf.io/download/je4s8?version=2", + folder_name="MNE-spm-face", + config_key="MNE_DATASETS_SPM_FACE_PATH", ) # Visual 92 categories has the dataset split into 2 files. # We define a dictionary holding the items with the same # value across both files: folder name and configuration key. -MNE_DATASETS['visual_92_categories'] = dict( - folder_name='MNE-visual_92_categories-data', - config_key='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', -) -MNE_DATASETS['visual_92_categories_1'] = dict( - archive_name='MNE-visual_92_categories-data-part1.tar.gz', - hash='md5:74f50bbeb65740903eadc229c9fa759f', - url='https://osf.io/8ejrs/download?version=1', - folder_name='MNE-visual_92_categories-data', - config_key='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', -) -MNE_DATASETS['visual_92_categories_2'] = dict( - archive_name='MNE-visual_92_categories-data-part2.tar.gz', - hash='md5:203410a98afc9df9ae8ba9f933370e20', - url='https://osf.io/t4yjp/download?version=1', - folder_name='MNE-visual_92_categories-data', - config_key='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', -) - -MNE_DATASETS['mtrf'] = dict( - archive_name='mTRF_1.5.zip', - hash='md5:273a390ebbc48da2c3184b01a82e4636', - url='https://osf.io/h85s2/download?version=1', - folder_name='mTRF_1.5', - config_key='MNE_DATASETS_MTRF_PATH' -) -MNE_DATASETS['refmeg_noise'] = dict( - archive_name='sample_reference_MEG_noise-raw.zip', - hash='md5:779fecd890d98b73a4832e717d7c7c45', - url='https://osf.io/drt6v/download?version=1', - folder_name='MNE-refmeg-noise-data', - config_key='MNE_DATASETS_REFMEG_NOISE_PATH' -) - -MNE_DATASETS['ssvep'] = dict( - archive_name='ssvep_example_data.zip', - hash='md5:af866bbc0f921114ac9d683494fe87d6', - url='https://osf.io/z8h6k/download?version=5', - folder_name='ssvep-example-data', - config_key='MNE_DATASETS_SSVEP_PATH' -) - -MNE_DATASETS['erp_core'] = dict( - archive_name='MNE-ERP-CORE-data.tar.gz', - hash='md5:5866c0d6213bd7ac97f254c776f6c4b1', - url='https://osf.io/rzgba/download?version=1', - folder_name='MNE-ERP-CORE-data', - config_key='MNE_DATASETS_ERP_CORE_PATH', -) - -MNE_DATASETS['epilepsy_ecog'] = dict( - archive_name='MNE-epilepsy-ecog-data.tar.gz', - hash='md5:ffb139174afa0f71ec98adbbb1729dea', - url='https://osf.io/z4epq/download?version=1', - folder_name='MNE-epilepsy-ecog-data', - config_key='MNE_DATASETS_EPILEPSY_ECOG_PATH', +MNE_DATASETS["visual_92_categories"] = dict( + folder_name="MNE-visual_92_categories-data", + config_key="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH", +) +MNE_DATASETS["visual_92_categories_1"] = dict( + archive_name="MNE-visual_92_categories-data-part1.tar.gz", + hash="md5:74f50bbeb65740903eadc229c9fa759f", + url="https://osf.io/download/8ejrs?version=1", + folder_name="MNE-visual_92_categories-data", + config_key="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH", +) +MNE_DATASETS["visual_92_categories_2"] = dict( + archive_name="MNE-visual_92_categories-data-part2.tar.gz", + hash="md5:203410a98afc9df9ae8ba9f933370e20", + url="https://osf.io/download/t4yjp?version=1", + folder_name="MNE-visual_92_categories-data", + config_key="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH", +) + +MNE_DATASETS["mtrf"] = dict( + archive_name="mTRF_1.5.zip", + hash="md5:273a390ebbc48da2c3184b01a82e4636", + url="https://osf.io/download/h85s2?version=1", + folder_name="mTRF_1.5", + config_key="MNE_DATASETS_MTRF_PATH", +) +MNE_DATASETS["refmeg_noise"] = dict( + archive_name="sample_reference_MEG_noise-raw.zip", + hash="md5:779fecd890d98b73a4832e717d7c7c45", + url="https://osf.io/download/drt6v?version=1", + folder_name="MNE-refmeg-noise-data", + config_key="MNE_DATASETS_REFMEG_NOISE_PATH", +) + +MNE_DATASETS["ssvep"] = dict( + archive_name="ssvep_example_data.zip", + hash="md5:af866bbc0f921114ac9d683494fe87d6", + url="https://osf.io/download/z8h6k?version=5", + folder_name="ssvep-example-data", + config_key="MNE_DATASETS_SSVEP_PATH", +) + +MNE_DATASETS["erp_core"] = dict( + archive_name="MNE-ERP-CORE-data.tar.gz", + hash="md5:5866c0d6213bd7ac97f254c776f6c4b1", + url="https://osf.io/download/rzgba?version=1", + folder_name="MNE-ERP-CORE-data", + config_key="MNE_DATASETS_ERP_CORE_PATH", +) + +MNE_DATASETS["epilepsy_ecog"] = dict( + archive_name="MNE-epilepsy-ecog-data.tar.gz", + hash="md5:ffb139174afa0f71ec98adbbb1729dea", + url="https://osf.io/download/z4epq?version=1", + folder_name="MNE-epilepsy-ecog-data", + config_key="MNE_DATASETS_EPILEPSY_ECOG_PATH", ) # Fieldtrip CMC dataset -MNE_DATASETS['fieldtrip_cmc'] = dict( - archive_name='SubjectCMC.zip', - hash='md5:6f9fd6520f9a66e20994423808d2528c', - url='https://osf.io/j9b6s/download?version=1', - folder_name='MNE-fieldtrip_cmc-data', - config_key='MNE_DATASETS_FIELDTRIP_CMC_PATH' +MNE_DATASETS["fieldtrip_cmc"] = dict( + archive_name="SubjectCMC.zip", + hash="md5:6f9fd6520f9a66e20994423808d2528c", + url="https://osf.io/download/j9b6s?version=1", + folder_name="MNE-fieldtrip_cmc-data", + config_key="MNE_DATASETS_FIELDTRIP_CMC_PATH", ) # brainstorm datasets: -MNE_DATASETS['bst_auditory'] = dict( - archive_name='bst_auditory.tar.gz', - hash='md5:fa371a889a5688258896bfa29dd1700b', - url='https://osf.io/5t9n8/download?version=1', - folder_name='MNE-brainstorm-data', - config_key='MNE_DATASETS_BRAINSTORM_PATH', -) -MNE_DATASETS['bst_phantom_ctf'] = dict( - archive_name='bst_phantom_ctf.tar.gz', - hash='md5:80819cb7f5b92d1a5289db3fb6acb33c', - url='https://osf.io/sxr8y/download?version=1', - folder_name='MNE-brainstorm-data', - config_key='MNE_DATASETS_BRAINSTORM_PATH', -) -MNE_DATASETS['bst_phantom_elekta'] = dict( - archive_name='bst_phantom_elekta.tar.gz', - hash='md5:1badccbe17998d18cc373526e86a7aaf', - url='https://osf.io/dpcku/download?version=1', - folder_name='MNE-brainstorm-data', - config_key='MNE_DATASETS_BRAINSTORM_PATH', -) -MNE_DATASETS['bst_raw'] = dict( - archive_name='bst_raw.tar.gz', - hash='md5:fa2efaaec3f3d462b319bc24898f440c', - url='https://osf.io/9675n/download?version=2', - folder_name='MNE-brainstorm-data', - config_key='MNE_DATASETS_BRAINSTORM_PATH', -) -MNE_DATASETS['bst_resting'] = dict( - archive_name='bst_resting.tar.gz', - hash='md5:70fc7bf9c3b97c4f2eab6260ee4a0430', - url='https://osf.io/m7bd3/download?version=3', - folder_name='MNE-brainstorm-data', - config_key='MNE_DATASETS_BRAINSTORM_PATH', +MNE_DATASETS["bst_auditory"] = dict( + archive_name="bst_auditory.tar.gz", + hash="md5:fa371a889a5688258896bfa29dd1700b", + url="https://osf.io/download/5t9n8?version=1", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_phantom_ctf"] = dict( + archive_name="bst_phantom_ctf.tar.gz", + hash="md5:80819cb7f5b92d1a5289db3fb6acb33c", + url="https://osf.io/download/sxr8y?version=1", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_phantom_elekta"] = dict( + archive_name="bst_phantom_elekta.tar.gz", + hash="md5:1badccbe17998d18cc373526e86a7aaf", + url="https://osf.io/download/dpcku?version=1", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_raw"] = dict( + archive_name="bst_raw.tar.gz", + hash="md5:fa2efaaec3f3d462b319bc24898f440c", + url="https://osf.io/download/9675n?version=2", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_resting"] = dict( + archive_name="bst_resting.tar.gz", + hash="md5:70fc7bf9c3b97c4f2eab6260ee4a0430", + url="https://osf.io/download/m7bd3?version=3", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", ) # HF-SEF -MNE_DATASETS['hf_sef_raw'] = dict( - archive_name='hf_sef_raw.tar.gz', - hash='md5:33934351e558542bafa9b262ac071168', - url='https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz', - folder_name='hf_sef', - config_key='MNE_DATASETS_HF_SEF_PATH', -) -MNE_DATASETS['hf_sef_evoked'] = dict( - archive_name='hf_sef_evoked.tar.gz', - hash='md5:13d34cb5db584e00868677d8fb0aab2b', - url=('https://zenodo.org/record/3523071/files/' - 'hf_sef_evoked.tar.gz'), - folder_name='hf_sef', - config_key='MNE_DATASETS_HF_SEF_PATH', +MNE_DATASETS["hf_sef_raw"] = dict( + archive_name="hf_sef_raw.tar.gz", + hash="md5:33934351e558542bafa9b262ac071168", + url="https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz", + folder_name="hf_sef", + config_key="MNE_DATASETS_HF_SEF_PATH", +) +MNE_DATASETS["hf_sef_evoked"] = dict( + archive_name="hf_sef_evoked.tar.gz", + hash="md5:13d34cb5db584e00868677d8fb0aab2b", + # Zenodo can be slow, so we use the OSF mirror + # url=('https://zenodo.org/record/3523071/files/' + # 'hf_sef_evoked.tar.gz'), + url="https://osf.io/download/25f8d?version=2", + folder_name="hf_sef", + config_key="MNE_DATASETS_HF_SEF_PATH", ) # "fake" dataset (for testing) -MNE_DATASETS['fake'] = dict( - archive_name='foo.tgz', - hash='md5:3194e9f7b46039bb050a74f3e1ae9908', - url=('https://github.com/mne-tools/mne-testing-data/raw/master/' - 'datasets/foo.tgz'), - folder_name='foo', - config_key='MNE_DATASETS_FAKE_PATH' +MNE_DATASETS["fake"] = dict( + archive_name="foo.tgz", + hash="md5:3194e9f7b46039bb050a74f3e1ae9908", + url="https://github.com/mne-tools/mne-testing-data/raw/master/datasets/foo.tgz", + folder_name="foo", + config_key="MNE_DATASETS_FAKE_PATH", +) + +# eyelink dataset +MNE_DATASETS["eyelink"] = dict( + archive_name="MNE-eyelink-data.zip", + hash="md5:68a6323ef17d655f1a659c3290ee1c3f", + url=("https://osf.io/download/xsu4g?version=1"), + folder_name="MNE-eyelink-data", + config_key="MNE_DATASETS_EYELINK_PATH", ) diff --git a/mne/datasets/eegbci/__init__.py b/mne/datasets/eegbci/__init__.py index 7be4fbc2858..ac2a7179596 100644 --- a/mne/datasets/eegbci/__init__.py +++ b/mne/datasets/eegbci/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """EEG Motor Movement/Imagery Dataset.""" from .eegbci import data_path, load_data, standardize diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py index d976425dd7a..7b6b3e8bdc8 100644 --- a/mne/datasets/eegbci/eegbci.py +++ b/mne/datasets/eegbci/eegbci.py @@ -1,128 +1,132 @@ -# Author: Martin Billinger -# Adam Li -# Daniel McCloy -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os import re +import time +from importlib.resources import files from os import path as op from pathlib import Path -from ...utils import _url_to_local_path, verbose -from ..utils import _do_path_update, _get_path +from ...utils import _url_to_local_path, logger, verbose +from ..utils import _do_path_update, _downloader_params, _get_path, _log_time_size -# TODO: remove try/except when our min version is py 3.9 -try: - from importlib.resources import files -except ImportError: - from importlib_resources import files - - -EEGMI_URL = 'https://physionet.org/files/eegmmidb/1.0.0/' +EEGMI_URL = "https://physionet.org/files/eegmmidb/1.0.0/" @verbose -def data_path(url, path=None, force_update=False, update_path=None, *, - verbose=None): +def data_path(url, path=None, force_update=False, update_path=None, *, verbose=None): """Get path to local copy of EEGMMI dataset URL. - This is a low-level function useful for getting a local copy of a - remote EEGBCI dataset :footcite:`SchalkEtAl2004` which is available at PhysioNet :footcite:`GoldbergerEtAl2000`. + This is a low-level function useful for getting a local copy of a remote EEGBCI + dataset :footcite:`SchalkEtAl2004`, which is also available at PhysioNet + :footcite:`GoldbergerEtAl2000`. Metadata, such as the meaning of event markers + may be obtained from the + `PhysioNet documentation page `_. Parameters ---------- url : str The dataset to use. - path : None | str - Location of where to look for the EEGBCI data storing location. - If None, the environment variable or config parameter - ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the - "~/mne_data" directory is used. If the EEGBCI dataset - is not found under the given path, the data - will be automatically downloaded to the specified folder. + path : None | path-like + Location of where to look for the EEGBCI data. If ``None``, the environment + variable or config parameter ``MNE_DATASETS_EEGBCI_PATH`` is used. If neither + exists, the ``~/mne_data`` directory is used. If the EEGBCI dataset is not found + under the given path, the data will be automatically downloaded to the specified + folder. force_update : bool Force update of the dataset even if a local copy exists. update_path : bool | None - If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python - config to the given path. If None, the user is prompted. + If ``True``, set ``MNE_DATASETS_EEGBCI_PATH`` in the configuration to the given + path. If ``None``, the user is prompted. %(verbose)s Returns ------- path : list of Path - Local path to the given data file. This path is contained inside a list - of length one, for compatibility. + Local path to the given data file. This path is contained inside a list of + length one for compatibility. Notes ----- For example, one could do: >>> from mne.datasets import eegbci - >>> url = 'http://www.physionet.org/physiobank/database/eegmmidb/' - >>> eegbci.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP + >>> url = "http://www.physionet.org/physiobank/database/eegmmidb/" + >>> eegbci.data_path(url, "~/datasets") # doctest:+SKIP - This would download the given EEGBCI data file to the 'datasets' folder, - and prompt the user to save the 'datasets' path to the mne-python config, - if it isn't there already. + This would download the given EEGBCI data file to the ``~/datasets`` folder and + prompt the user to store this path in the config (if it does not already exist). References ---------- .. footbibliography:: - """ # noqa: E501 + """ import pooch - key = 'MNE_DATASETS_EEGBCI_PATH' - name = 'EEGBCI' + key = "MNE_DATASETS_EEGBCI_PATH" + name = "EEGBCI" path = _get_path(path, key, name) - fname = 'MNE-eegbci-data' + fname = "MNE-eegbci-data" destination = _url_to_local_path(url, op.join(path, fname)) destinations = [destination] - # Fetch the file + # fetch the file + downloader = pooch.HTTPDownloader(**_downloader_params()) if not op.isfile(destination) or force_update: if op.isfile(destination): os.remove(destination) if not op.isdir(op.dirname(destination)): os.makedirs(op.dirname(destination)) pooch.retrieve( - # URL to one of Pooch's test files url=url, path=destination, - fname=fname + downloader=downloader, + fname=fname, ) - # Offer to update the path + # offer to update the path _do_path_update(path, update_path, key, name) destinations = [Path(dest) for dest in destinations] return destinations @verbose -def load_data(subject, runs, path=None, force_update=False, update_path=None, - base_url=EEGMI_URL, verbose=None): # noqa: D301 +def load_data( + subjects, + runs, + *, + path=None, + force_update=False, + update_path=None, + base_url=EEGMI_URL, + verbose=None, +): # noqa: D301 """Get paths to local copies of EEGBCI dataset files. - This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is also - available at PhysioNet :footcite:`GoldbergerEtAl2000`. + This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is + also available at PhysioNet :footcite:`GoldbergerEtAl2000`. Metadata, such as the + meaning of event markers may be obtained from the + `PhysioNet documentation page `_. Parameters ---------- - subject : int - The subject to use. Can be in the range of 1-109 (inclusive). + subjects : int | list of int + The subjects to use. Can be in the range of 1-109 (inclusive). runs : int | list of int - The runs to use. See Notes for details. - path : None | str - Location of where to look for the EEGBCI data storing location. - If None, the environment variable or config parameter - ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the - "~/mne_data" directory is used. If the EEGBCI dataset - is not found under the given path, the data - will be automatically downloaded to the specified folder. + The runs to use (see Notes for details). + path : None | path-like + Location of where to look for the EEGBCI data. If ``None``, the environment + variable or config parameter ``MNE_DATASETS_EEGBCI_PATH`` is used. If neither + exists, the ``~/mne_data`` directory is used. If the EEGBCI dataset is not found + under the given path, the data will be automatically downloaded to the specified + folder. force_update : bool Force update of the dataset even if a local copy exists. update_path : bool | None - If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python - config to the given path. If None, the user is prompted. + If ``True``, set ``MNE_DATASETS_EEGBCI_PATH`` in the configuration to the given + path. If ``None``, the user is prompted. base_url : str The URL root for the data. %(verbose)s @@ -150,59 +154,77 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, For example, one could do:: >>> from mne.datasets import eegbci - >>> eegbci.load_data(1, [4, 10, 14], os.getenv('HOME') + '/datasets') # doctest:+SKIP + >>> eegbci.load_data([1, 2], [6, 10, 14], "~/datasets") # doctest:+SKIP - This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from - subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the - user to save the 'datasets' path to the mne-python config, if it isn't - there already. + This would download runs 6, 10, and 14 (hand/foot motor imagery) runs from subjects + 1 and 2 in the EEGBCI dataset to "~/datasets" and prompt the user to store this path + in the config (if it does not already exist). References ---------- .. footbibliography:: - """ # noqa: E501 + """ import pooch - if not hasattr(runs, '__iter__'): + t0 = time.time() + + if not hasattr(subjects, "__iter__"): + subjects = [subjects] + + if not hasattr(runs, "__iter__"): runs = [runs] # get local storage path - config_key = 'MNE_DATASETS_EEGBCI_PATH' - folder = 'MNE-eegbci-data' - name = 'EEGBCI' + config_key = "MNE_DATASETS_EEGBCI_PATH" + folder = "MNE-eegbci-data" + name = "EEGBCI" path = _get_path(path, config_key, name) # extract path parts - pattern = r'(?:https?://.*)(files)/(eegmmidb)/(\d+\.\d+\.\d+)/?' + pattern = r"(?:https?://.*)(files)/(eegmmidb)/(\d+\.\d+\.\d+)/?" match = re.compile(pattern).match(base_url) if match is None: - raise ValueError('base_url does not match the expected EEGMI folder ' - 'structure. Please notify MNE-Python developers.') + raise ValueError( + "base_url does not match the expected EEGMI folder " + "structure. Please notify MNE-Python developers." + ) base_path = op.join(path, folder, *match.groups()) # create the download manager fetcher = pooch.create( path=base_path, base_url=base_url, - version=None, # Data versioning is decoupled from MNE-Python version. - registry=None, # Registry is loaded from file, below. - retry_if_failed=2 # 2 retries = 3 total attempts + version=None, # data versioning is decoupled from MNE-Python version + registry=None, # registry is loaded from file (below) + retry_if_failed=2, # 2 retries = 3 total attempts ) # load the checksum registry - registry = files('mne').joinpath('data', 'eegbci_checksums.txt') + registry = files("mne").joinpath("data", "eegbci_checksums.txt") fetcher.load_registry(registry) # fetch the file(s) data_paths = [] - for run in runs: - file_part = f'S{subject:03d}/S{subject:03d}R{run:02d}.edf' - destination = op.join(base_path, file_part) - if force_update and op.isfile(destination): - os.remove(destination) - data_paths.append(fetcher.fetch(file_part)) - # update path in config if desired - _do_path_update(path, update_path, config_key, name) + sz = 0 + for subject in subjects: + for run in runs: + file_part = f"S{subject:03d}/S{subject:03d}R{run:02d}.edf" + destination = Path(base_path, file_part) + data_paths.append(destination) + if destination.exists(): + if force_update: + destination.unlink() + else: + continue + if sz == 0: # log once + logger.info("Downloading EEGBCI data") + fetcher.fetch(file_part) + # update path in config if desired + sz += destination.stat().st_size + + _do_path_update(path, update_path, config_key, name) + if sz > 0: + _log_time_size(t0, sz) return data_paths @@ -216,11 +238,11 @@ def standardize(raw): """ rename = dict() for name in raw.ch_names: - std_name = name.strip('.') + std_name = name.strip(".") std_name = std_name.upper() - if std_name.endswith('Z'): - std_name = std_name[:-1] + 'z' - if std_name.startswith('FP'): - std_name = 'Fp' + std_name[2:] + if std_name.endswith("Z"): + std_name = std_name[:-1] + "z" + if std_name.startswith("FP"): + std_name = "Fp" + std_name[2:] rename[name] = std_name raw.rename_channels(rename) diff --git a/mne/datasets/eegbci/tests/test_eegbci.py b/mne/datasets/eegbci/tests/test_eegbci.py new file mode 100644 index 00000000000..e9f63fee288 --- /dev/null +++ b/mne/datasets/eegbci/tests/test_eegbci.py @@ -0,0 +1,15 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + + +from mne.datasets import eegbci + + +def test_eegbci_download(tmp_path, fake_retrieve): + """Test Sleep Physionet URL handling.""" + subjects = range(1, 5) + for subj in subjects: + fnames = eegbci.load_data(subj, runs=[3], path=tmp_path, update_path=False) + assert len(fnames) == 1, subj + assert fake_retrieve.call_count == 4 diff --git a/mne/datasets/epilepsy_ecog/__init__.py b/mne/datasets/epilepsy_ecog/__init__.py index 10982c2f504..6ecb3379302 100644 --- a/mne/datasets/epilepsy_ecog/__init__.py +++ b/mne/datasets/epilepsy_ecog/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Clinical epilepsy datasets.""" from ._data import data_path, get_version diff --git a/mne/datasets/epilepsy_ecog/_data.py b/mne/datasets/epilepsy_ecog/_data.py index 33535c1aff0..20abb9fd8be 100644 --- a/mne/datasets/epilepsy_ecog/_data.py +++ b/mne/datasets/epilepsy_ecog/_data.py @@ -1,27 +1,32 @@ -# Authors: Adam Li -# Alex Rockhill -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='epilepsy_ecog', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="epilepsy_ecog", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) data_path.__doc__ = _data_path_doc.format( - name='epilepsy_ecog', conf='MNE_DATASETS_EPILEPSY_ECOG_PATH') + name="epilepsy_ecog", conf="MNE_DATASETS_EPILEPSY_ECOG_PATH" +) def get_version(): # noqa: D103 - return _get_version('epilepsy_ecog') + return _get_version("epilepsy_ecog") -get_version.__doc__ = _version_doc.format(name='epilepsy_ecog') +get_version.__doc__ = _version_doc.format(name="epilepsy_ecog") diff --git a/mne/datasets/erp_core/__init__.py b/mne/datasets/erp_core/__init__.py index 9e2588347da..b16989af413 100644 --- a/mne/datasets/erp_core/__init__.py +++ b/mne/datasets/erp_core/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """ERP-CORE EEG dataset.""" from .erp_core import data_path, get_version diff --git a/mne/datasets/erp_core/erp_core.py b/mne/datasets/erp_core/erp_core.py index 76bd62ca209..2771b17dfe3 100644 --- a/mne/datasets/erp_core/erp_core.py +++ b/mne/datasets/erp_core/erp_core.py @@ -1,23 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from ...utils import verbose -from ..utils import (_data_path_doc, - _get_version, _version_doc, _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='erp_core', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="erp_core", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='erp_core', - conf='MNE_DATASETS_ERP_CORE_PATH') +data_path.__doc__ = _data_path_doc.format( + name="erp_core", conf="MNE_DATASETS_ERP_CORE_PATH" +) def get_version(): # noqa: D103 - return _get_version('erp_core') + return _get_version("erp_core") -get_version.__doc__ = _version_doc.format(name='erp_core') +get_version.__doc__ = _version_doc.format(name="erp_core") diff --git a/mne/datasets/eyelink/__init__.py b/mne/datasets/eyelink/__init__.py new file mode 100644 index 00000000000..db3ea545b58 --- /dev/null +++ b/mne/datasets/eyelink/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Eyelink test dataset.""" + +from .eyelink import data_path, get_version diff --git a/mne/datasets/eyelink/eyelink.py b/mne/datasets/eyelink/eyelink.py new file mode 100644 index 00000000000..918ac86f1e8 --- /dev/null +++ b/mne/datasets/eyelink/eyelink.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="eyelink", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="eyelink", conf="MNE_DATASETS_EYELINK_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("eyelink") + + +get_version.__doc__ = _version_doc.format(name="eyelink") diff --git a/mne/datasets/fieldtrip_cmc/__init__.py b/mne/datasets/fieldtrip_cmc/__init__.py index 328d81ff5ba..0ed74466be8 100644 --- a/mne/datasets/fieldtrip_cmc/__init__.py +++ b/mne/datasets/fieldtrip_cmc/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """fieldtrip Cortico-Muscular Coherence (CMC) Dataset.""" from .fieldtrip_cmc import data_path, get_version diff --git a/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py b/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py index d7abe1c68f0..b61b1d7d188 100644 --- a/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py +++ b/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py @@ -1,27 +1,32 @@ -# Authors: Chris Holdgraf -# Alexandre Barachant -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from ...utils import verbose -from ..utils import (_data_path_doc, - _get_version, _version_doc, _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='fieldtrip_cmc', processor='nested_unzip', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="fieldtrip_cmc", + processor="nested_unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) data_path.__doc__ = _data_path_doc.format( - name='fieldtrip_cmc', conf='MNE_DATASETS_FIELDTRIP_CMC_PATH') + name="fieldtrip_cmc", conf="MNE_DATASETS_FIELDTRIP_CMC_PATH" +) def get_version(): # noqa: D103 - return _get_version('fieldtrip_cmc') + return _get_version("fieldtrip_cmc") -get_version.__doc__ = _version_doc.format(name='fieldtrip_cmc') +get_version.__doc__ = _version_doc.format(name="fieldtrip_cmc") diff --git a/mne/datasets/fnirs_motor/__init__.py b/mne/datasets/fnirs_motor/__init__.py index 66ec175ef1b..61808bb9fdf 100644 --- a/mne/datasets/fnirs_motor/__init__.py +++ b/mne/datasets/fnirs_motor/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """fNIRS motor dataset.""" from .fnirs_motor import data_path, get_version diff --git a/mne/datasets/fnirs_motor/fnirs_motor.py b/mne/datasets/fnirs_motor/fnirs_motor.py index ce0294f9f4e..43a4ffc9b96 100644 --- a/mne/datasets/fnirs_motor/fnirs_motor.py +++ b/mne/datasets/fnirs_motor/fnirs_motor.py @@ -1,26 +1,32 @@ -# Authors: Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='fnirs_motor', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="fnirs_motor", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='fnirs_motor', - conf='MNE_DATASETS_FNIRS_MOTOR_PATH') +data_path.__doc__ = _data_path_doc.format( + name="fnirs_motor", conf="MNE_DATASETS_FNIRS_MOTOR_PATH" +) def get_version(): # noqa: D103 - return _get_version('fnirs_motor') + return _get_version("fnirs_motor") -get_version.__doc__ = _version_doc.format(name='fnirs_motor') +get_version.__doc__ = _version_doc.format(name="fnirs_motor") diff --git a/mne/datasets/hf_sef/__init__.py b/mne/datasets/hf_sef/__init__.py index 08fe8ca5651..3834a1279b1 100644 --- a/mne/datasets/hf_sef/__init__.py +++ b/mne/datasets/hf_sef/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """HF-SEF dataset.""" from .hf_sef import data_path diff --git a/mne/datasets/hf_sef/hf_sef.py b/mne/datasets/hf_sef/hf_sef.py index 63d97df4fdf..aff18315970 100644 --- a/mne/datasets/hf_sef/hf_sef.py +++ b/mne/datasets/hf_sef/hf_sef.py @@ -1,20 +1,23 @@ #!/usr/bin/env python2 -# -*- coding: utf-8 -*- -# Authors: Jussi Nurminen -# License: BSD Style. + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os.path as op import os -from ...utils import verbose, _check_option -from ..utils import _get_path, _do_path_update, _download_mne_dataset +import os.path as op + +from ...utils import _check_option, verbose from ..config import MNE_DATASETS +from ..utils import _do_path_update, _download_mne_dataset, _get_path @verbose -def data_path(dataset='evoked', path=None, force_update=False, - update_path=True, *, verbose=None): - u"""Get path to local copy of the high frequency SEF dataset. +def data_path( + dataset="evoked", path=None, force_update=False, update_path=True, *, verbose=None +): + """Get path to local copy of the high frequency SEF dataset. Gets a local copy of the high frequency SEF MEG dataset :footcite:`NurminenEtAl2017`. @@ -47,33 +50,38 @@ def data_path(dataset='evoked', path=None, force_update=False, ---------- .. footbibliography:: """ - _check_option('dataset', dataset, ('evoked', 'raw')) - if dataset == 'raw': - data_dict = MNE_DATASETS['hf_sef_raw'] - data_dict['dataset_name'] = 'hf_sef_raw' + _check_option("dataset", dataset, ("evoked", "raw")) + if dataset == "raw": + data_dict = MNE_DATASETS["hf_sef_raw"] + data_dict["dataset_name"] = "hf_sef_raw" else: - data_dict = MNE_DATASETS['hf_sef_evoked'] - data_dict['dataset_name'] = 'hf_sef_evoked' - config_key = data_dict['config_key'] - folder_name = data_dict['folder_name'] + data_dict = MNE_DATASETS["hf_sef_evoked"] + data_dict["dataset_name"] = "hf_sef_evoked" + config_key = data_dict["config_key"] + folder_name = data_dict["folder_name"] # get download path for specific dataset path = _get_path(path=path, key=config_key, name=folder_name) final_path = op.join(path, folder_name) - megdir = op.join(final_path, 'MEG', 'subject_a') - has_raw = (dataset == 'raw' and op.isdir(megdir) and - any('raw' in filename for filename in os.listdir(megdir))) - has_evoked = (dataset == 'evoked' and - op.isdir(op.join(final_path, 'subjects'))) + megdir = op.join(final_path, "MEG", "subject_a") + has_raw = ( + dataset == "raw" + and op.isdir(megdir) + and any("raw" in filename for filename in os.listdir(megdir)) + ) + has_evoked = dataset == "evoked" and op.isdir(op.join(final_path, "subjects")) # data not there, or force_update requested: if has_raw or has_evoked and not force_update: - _do_path_update(path, update_path, config_key, - folder_name) + _do_path_update(path, update_path, config_key, folder_name) return final_path # instantiate processor that unzips file - data_path = _download_mne_dataset(name=data_dict['dataset_name'], - processor='untar', path=path, - force_update=force_update, - update_path=update_path, download=True) + data_path = _download_mne_dataset( + name=data_dict["dataset_name"], + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=True, + ) return data_path diff --git a/mne/datasets/kiloword/__init__.py b/mne/datasets/kiloword/__init__.py index 18a22f9ecd1..17b54adacf3 100644 --- a/mne/datasets/kiloword/__init__.py +++ b/mne/datasets/kiloword/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """MNE visual_92_categories dataset.""" from .kiloword import data_path, get_version diff --git a/mne/datasets/kiloword/kiloword.py b/mne/datasets/kiloword/kiloword.py index c011365bad3..37fd3f1ee6d 100644 --- a/mne/datasets/kiloword/kiloword.py +++ b/mne/datasets/kiloword/kiloword.py @@ -1,12 +1,15 @@ -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_get_version, _version_doc, _download_mne_dataset) +from ..utils import _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): """Get path to local copy of the kiloword dataset. This is the dataset from :footcite:`DufauEtAl2015`. @@ -44,14 +47,18 @@ def data_path(path=None, force_update=False, update_path=True, .. footbibliography:: """ return _download_mne_dataset( - name='kiloword', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="kiloword", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) def get_version(): """Get dataset version.""" - return _get_version('kiloword') + return _get_version("kiloword") -get_version.__doc__ = _version_doc.format(name='kiloword') +get_version.__doc__ = _version_doc.format(name="kiloword") diff --git a/mne/datasets/limo/__init__.py b/mne/datasets/limo/__init__.py index f83eac67505..b5622d90302 100644 --- a/mne/datasets/limo/__init__.py +++ b/mne/datasets/limo/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """LIMO Dataset.""" from .limo import data_path, load_data diff --git a/mne/datasets/limo/limo.py b/mne/datasets/limo/limo.py index 143a9dd1162..f0696a78b1e 100644 --- a/mne/datasets/limo/limo.py +++ b/mne/datasets/limo/limo.py @@ -1,26 +1,28 @@ -# Authors: Jose C. Garcia Alanis -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os import os.path as op +import time +from pathlib import Path import numpy as np +from scipy.io import loadmat +from ..._fiff.meas_info import create_info from ...channels import make_standard_montage from ...epochs import EpochsArray -from ...io.meas_info import create_info -from ...utils import _check_pandas_installed, verbose -from ..utils import _get_path, _do_path_update, logger - +from ...utils import _check_pandas_installed, logger, verbose +from ..utils import _do_path_update, _downloader_params, _get_path, _log_time_size # root url for LIMO files -root_url = 'https://files.de-1.osf.io/v1/resources/52rea/providers/osfstorage/' +root_url = "https://files.de-1.osf.io/v1/resources/52rea/providers/osfstorage/" @verbose -def data_path(subject, path=None, force_update=False, update_path=None, *, - verbose=None): +def data_path( + subject, path=None, force_update=False, update_path=None, *, verbose=None +): """Get path to local copy of LIMO dataset URL. This is a low-level function useful for getting a local copy of the @@ -68,120 +70,202 @@ def data_path(subject, path=None, force_update=False, update_path=None, *, """ # noqa: E501 import pooch - downloader = pooch.HTTPDownloader(progressbar=True) # use tqdm + t0 = time.time() + + downloader = pooch.HTTPDownloader(**_downloader_params()) # local storage patch - config_key = 'MNE_DATASETS_LIMO_PATH' - name = 'LIMO' - subj = f'S{subject}' + config_key = "MNE_DATASETS_LIMO_PATH" + name = "LIMO" + subj = f"S{subject}" path = _get_path(path, config_key, name) - base_path = op.join(path, 'MNE-limo-data') + base_path = op.join(path, "MNE-limo-data") subject_path = op.join(base_path, subj) # the remote URLs are in the form of UUIDs: urls = dict( - S18={'Yr.mat': '5cf839833a4d9500178a6ff8', - 'LIMO.mat': '5cf83907e650a2001ad592e4'}, - S17={'Yr.mat': '5cf838e83a4d9500168aeb76', - 'LIMO.mat': '5cf83867a542b80019c87602'}, - S16={'Yr.mat': '5cf83857e650a20019d5778f', - 'LIMO.mat': '5cf837dc3a4d9500188a64fe'}, - S15={'Yr.mat': '5cf837cce650a2001ad591e8', - 'LIMO.mat': '5cf83758a542b8001ac7d11d'}, - S14={'Yr.mat': '5cf837493a4d9500198a938f', - 'LIMO.mat': '5cf836e4a542b8001bc7cc53'}, - S13={'Yr.mat': '5cf836d23a4d9500178a6df7', - 'LIMO.mat': '5cf836543a4d9500168ae7cb'}, - S12={'Yr.mat': '5cf83643d4c7d700193e5954', - 'LIMO.mat': '5cf835193a4d9500178a6c92'}, - S11={'Yr.mat': '5cf8356ea542b8001cc81517', - 'LIMO.mat': '5cf834f7d4c7d700163daab8'}, - S10={'Yr.mat': '5cf833b0e650a20019d57454', - 'LIMO.mat': '5cf83204e650a20018d59eb2'}, - S9={'Yr.mat': '5cf83201a542b8001cc811cf', - 'LIMO.mat': '5cf8316c3a4d9500168ae13b'}, - S8={'Yr.mat': '5cf8326ce650a20017d60373', - 'LIMO.mat': '5cf8316d3a4d9500198a8dc5'}, - S7={'Yr.mat': '5cf834a03a4d9500168ae59b', - 'LIMO.mat': '5cf83069e650a20017d600d7'}, - S6={'Yr.mat': '5cf830e6a542b80019c86a70', - 'LIMO.mat': '5cf83057a542b80019c869ca'}, - S5={'Yr.mat': '5cf8115be650a20018d58041', - 'LIMO.mat': '5cf80c0bd4c7d700193e213c'}, - S4={'Yr.mat': '5cf810c9a542b80019c8450a', - 'LIMO.mat': '5cf80bf83a4d9500198a6eb4'}, - S3={'Yr.mat': '5cf80c55d4c7d700163d8f52', - 'LIMO.mat': '5cf80bdea542b80019c83cab'}, - S2={'Yr.mat': '5cde827123fec40019e01300', - 'LIMO.mat': '5cde82682a50c4001677c259'}, - S1={'Yr.mat': '5d6d3071536cf5001a8b0c78', - 'LIMO.mat': '5d6d305f6f41fc001a3151d8'}, + S18={ + "Yr.mat": "5cf839833a4d9500178a6ff8", + "LIMO.mat": "5cf83907e650a2001ad592e4", + }, + S17={ + "Yr.mat": "5cf838e83a4d9500168aeb76", + "LIMO.mat": "5cf83867a542b80019c87602", + }, + S16={ + "Yr.mat": "5cf83857e650a20019d5778f", + "LIMO.mat": "5cf837dc3a4d9500188a64fe", + }, + S15={ + "Yr.mat": "5cf837cce650a2001ad591e8", + "LIMO.mat": "5cf83758a542b8001ac7d11d", + }, + S14={ + "Yr.mat": "5cf837493a4d9500198a938f", + "LIMO.mat": "5cf836e4a542b8001bc7cc53", + }, + S13={ + "Yr.mat": "5cf836d23a4d9500178a6df7", + "LIMO.mat": "5cf836543a4d9500168ae7cb", + }, + S12={ + "Yr.mat": "5cf83643d4c7d700193e5954", + "LIMO.mat": "5cf835193a4d9500178a6c92", + }, + S11={ + "Yr.mat": "5cf8356ea542b8001cc81517", + "LIMO.mat": "5cf834f7d4c7d700163daab8", + }, + S10={ + "Yr.mat": "5cf833b0e650a20019d57454", + "LIMO.mat": "5cf83204e650a20018d59eb2", + }, + S9={ + "Yr.mat": "5cf83201a542b8001cc811cf", + "LIMO.mat": "5cf8316c3a4d9500168ae13b", + }, + S8={ + "Yr.mat": "5cf8326ce650a20017d60373", + "LIMO.mat": "5cf8316d3a4d9500198a8dc5", + }, + S7={ + "Yr.mat": "5cf834a03a4d9500168ae59b", + "LIMO.mat": "5cf83069e650a20017d600d7", + }, + S6={ + "Yr.mat": "5cf830e6a542b80019c86a70", + "LIMO.mat": "5cf83057a542b80019c869ca", + }, + S5={ + "Yr.mat": "5cf8115be650a20018d58041", + "LIMO.mat": "5cf80c0bd4c7d700193e213c", + }, + S4={ + "Yr.mat": "5cf810c9a542b80019c8450a", + "LIMO.mat": "5cf80bf83a4d9500198a6eb4", + }, + S3={ + "Yr.mat": "5cf80c55d4c7d700163d8f52", + "LIMO.mat": "5cf80bdea542b80019c83cab", + }, + S2={ + "Yr.mat": "5cde827123fec40019e01300", + "LIMO.mat": "5cde82682a50c4001677c259", + }, + S1={ + "Yr.mat": "5d6d3071536cf5001a8b0c78", + "LIMO.mat": "5d6d305f6f41fc001a3151d8", + }, ) # these can't be in the registry file (mne/data/dataset_checksums.txt) # because of filename duplication hashes = dict( - S18={'Yr.mat': 'md5:87f883d442737971a80fc0a35d057e51', - 'LIMO.mat': 'md5:8b4879646f65d7876fa4adf2e40162c5'}, - S17={'Yr.mat': 'md5:7b667ec9eefd7a9996f61ae270e295ee', - 'LIMO.mat': 'md5:22eaca4e6fad54431fd61b307fc426b8'}, - S16={'Yr.mat': 'md5:c877afdb4897426421577e863a45921a', - 'LIMO.mat': 'md5:86672d7afbea1e8c39305bc3f852c8c2'}, - S15={'Yr.mat': 'md5:eea9e0140af598fefc08c886a6f05de5', - 'LIMO.mat': 'md5:aed5cb71ddbfd27c6a3ac7d3e613d07f'}, - S14={'Yr.mat': 'md5:8bd842cfd8588bd5d32e72fdbe70b66e', - 'LIMO.mat': 'md5:1e07d1f36f2eefad435a77530daf2680'}, - S13={'Yr.mat': 'md5:d7925d2af7288b8a5186dfb5dbb63d34', - 'LIMO.mat': 'md5:ba891015d2f9e447955fffa9833404ca'}, - S12={'Yr.mat': 'md5:0e1d05beaa4bf2726e0d0671b78fe41e', - 'LIMO.mat': 'md5:423fd479d71097995b6614ecb11df9ad'}, - S11={'Yr.mat': 'md5:1b0016fb9832e43b71f79c1992fcbbb1', - 'LIMO.mat': 'md5:1a281348c2a41ee899f42731d30cda70'}, - S10={'Yr.mat': 'md5:13c66f60e241b9a9cc576eaf1b55a417', - 'LIMO.mat': 'md5:3c4b41e221eb352a21bbef1a7e006f06'}, - S9={'Yr.mat': 'md5:3ae1d9c3a1d9325deea2f2dddd1ab507', - 'LIMO.mat': 'md5:5e204e2a4bcfe4f535b4b1af469b37f7'}, - S8={'Yr.mat': 'md5:7e9adbca4e03d8d7ce8ea07ccecdc8fd', - 'LIMO.mat': 'md5:88313c21d34428863590e586b2bc3408'}, - S7={'Yr.mat': 'md5:6b5290a6725ecebf1022d5d2789b186d', - 'LIMO.mat': 'md5:8c769219ebc14ce3f595063e84bfc0a9'}, - S6={'Yr.mat': 'md5:420c858a8340bf7c28910b7b0425dc5d', - 'LIMO.mat': 'md5:9cf4e1a405366d6bd0cc6d996e32fd63'}, - S5={'Yr.mat': 'md5:946436cfb474c8debae56ffb1685ecf3', - 'LIMO.mat': 'md5:241fac95d3a79d2cea081391fb7078bd'}, - S4={'Yr.mat': 'md5:c8216af78ac87b739e86e57b345cafdd', - 'LIMO.mat': 'md5:8e10ef36c2e075edc2f787581ba33459'}, - S3={'Yr.mat': 'md5:ff02e885b65b7b807146f259a30b1b5e', - 'LIMO.mat': 'md5:59b5fb3a9749003133608b5871309e2c'}, - S2={'Yr.mat': 'md5:a4329022e57fd07ceceb7d1735fd2718', - 'LIMO.mat': 'md5:98b284b567f2dd395c936366e404f2c6'}, - S1={'Yr.mat': 'md5:076c0ae78fb71d43409c1877707df30e', - 'LIMO.mat': 'md5:136c8cf89f8f111a11f531bd9fa6ae69'}, + S18={ + "Yr.mat": "md5:87f883d442737971a80fc0a35d057e51", + "LIMO.mat": "md5:8b4879646f65d7876fa4adf2e40162c5", + }, + S17={ + "Yr.mat": "md5:7b667ec9eefd7a9996f61ae270e295ee", + "LIMO.mat": "md5:22eaca4e6fad54431fd61b307fc426b8", + }, + S16={ + "Yr.mat": "md5:c877afdb4897426421577e863a45921a", + "LIMO.mat": "md5:86672d7afbea1e8c39305bc3f852c8c2", + }, + S15={ + "Yr.mat": "md5:eea9e0140af598fefc08c886a6f05de5", + "LIMO.mat": "md5:aed5cb71ddbfd27c6a3ac7d3e613d07f", + }, + S14={ + "Yr.mat": "md5:8bd842cfd8588bd5d32e72fdbe70b66e", + "LIMO.mat": "md5:1e07d1f36f2eefad435a77530daf2680", + }, + S13={ + "Yr.mat": "md5:d7925d2af7288b8a5186dfb5dbb63d34", + "LIMO.mat": "md5:ba891015d2f9e447955fffa9833404ca", + }, + S12={ + "Yr.mat": "md5:0e1d05beaa4bf2726e0d0671b78fe41e", + "LIMO.mat": "md5:423fd479d71097995b6614ecb11df9ad", + }, + S11={ + "Yr.mat": "md5:1b0016fb9832e43b71f79c1992fcbbb1", + "LIMO.mat": "md5:1a281348c2a41ee899f42731d30cda70", + }, + S10={ + "Yr.mat": "md5:13c66f60e241b9a9cc576eaf1b55a417", + "LIMO.mat": "md5:3c4b41e221eb352a21bbef1a7e006f06", + }, + S9={ + "Yr.mat": "md5:3ae1d9c3a1d9325deea2f2dddd1ab507", + "LIMO.mat": "md5:5e204e2a4bcfe4f535b4b1af469b37f7", + }, + S8={ + "Yr.mat": "md5:7e9adbca4e03d8d7ce8ea07ccecdc8fd", + "LIMO.mat": "md5:88313c21d34428863590e586b2bc3408", + }, + S7={ + "Yr.mat": "md5:6b5290a6725ecebf1022d5d2789b186d", + "LIMO.mat": "md5:8c769219ebc14ce3f595063e84bfc0a9", + }, + S6={ + "Yr.mat": "md5:420c858a8340bf7c28910b7b0425dc5d", + "LIMO.mat": "md5:9cf4e1a405366d6bd0cc6d996e32fd63", + }, + S5={ + "Yr.mat": "md5:946436cfb474c8debae56ffb1685ecf3", + "LIMO.mat": "md5:241fac95d3a79d2cea081391fb7078bd", + }, + S4={ + "Yr.mat": "md5:c8216af78ac87b739e86e57b345cafdd", + "LIMO.mat": "md5:8e10ef36c2e075edc2f787581ba33459", + }, + S3={ + "Yr.mat": "md5:ff02e885b65b7b807146f259a30b1b5e", + "LIMO.mat": "md5:59b5fb3a9749003133608b5871309e2c", + }, + S2={ + "Yr.mat": "md5:a4329022e57fd07ceceb7d1735fd2718", + "LIMO.mat": "md5:98b284b567f2dd395c936366e404f2c6", + }, + S1={ + "Yr.mat": "md5:076c0ae78fb71d43409c1877707df30e", + "LIMO.mat": "md5:136c8cf89f8f111a11f531bd9fa6ae69", + }, ) # create the download manager fetcher = pooch.create( path=subject_path, - base_url='', - version=None, # Data versioning is decoupled from MNE-Python version. + base_url="", + version=None, # Data versioning is decoupled from MNE-Python version. registry=hashes[subj], - urls={key: f'{root_url}{uuid}' for key, uuid in urls[subj].items()}, - retry_if_failed=2 # 2 retries = 3 total attempts + urls={key: f"{root_url}{uuid}" for key, uuid in urls[subj].items()}, + retry_if_failed=2, # 2 retries = 3 total attempts ) # use our logger level for pooch's logger too pooch.get_logger().setLevel(logger.getEffectiveLevel()) # fetch the data - for fname in ('LIMO.mat', 'Yr.mat'): - destination = op.join(subject_path, fname) - if force_update and op.isfile(destination): - os.remove(destination) + sz = 0 + for fname in ("LIMO.mat", "Yr.mat"): + destination = Path(subject_path, fname) + if destination.exists(): + if force_update: + destination.unlink() + else: + continue + if sz == 0: # log once + logger.info("Downloading LIMO data") # fetch the remote file (if local file missing or has hash mismatch) fetcher.fetch(fname=fname, downloader=downloader) + sz += destination.stat().st_size # update path in config if desired _do_path_update(path, update_path, config_key, name) + if sz > 0: + _log_time_size(t0, sz) return base_path @verbose -def load_data(subject, path=None, force_update=False, update_path=None, - verbose=None): +def load_data(subject, path=None, force_update=False, update_path=None, verbose=None): """Fetch subjects epochs data for the LIMO data set. Parameters @@ -206,49 +290,47 @@ def load_data(subject, path=None, force_update=False, update_path=None, The epochs. """ # noqa: E501 pd = _check_pandas_installed() - from scipy.io import loadmat - # subject in question if isinstance(subject, int) and 1 <= subject <= 18: - subj = 'S%i' % subject + subj = f"S{subject}" else: - raise ValueError('subject must be an int in the range from 1 to 18') + raise ValueError("subject must be an int in the range from 1 to 18") # set limo path, download and decompress files if not found limo_path = data_path(subject, path, force_update, update_path) # -- 1) import .mat files # epochs info - fname_info = op.join(limo_path, subj, 'LIMO.mat') + fname_info = op.join(limo_path, subj, "LIMO.mat") data_info = loadmat(fname_info) # number of epochs per condition - design = data_info['LIMO']['design'][0][0]['X'][0][0] - data_info = data_info['LIMO']['data'][0][0][0][0] + design = data_info["LIMO"]["design"][0][0]["X"][0][0] + data_info = data_info["LIMO"]["data"][0][0][0][0] # epochs data - fname_eeg = op.join(limo_path, subj, 'Yr.mat') + fname_eeg = op.join(limo_path, subj, "Yr.mat") data = loadmat(fname_eeg) # -- 2) get epochs information from structure # sampling rate - sfreq = data_info['sampling_rate'][0][0] + sfreq = data_info["sampling_rate"][0][0] # tmin and tmax - tmin = data_info['start'][0][0] + tmin = data_info["start"][0][0] # create events matrix sample = np.arange(len(design)) prev_id = np.zeros(len(design)) ev_id = design[:, 1] events = np.array([sample, prev_id, ev_id]).astype(int).T # event ids, such that Face B == 1 - event_id = {'Face/A': 0, 'Face/B': 1} + event_id = {"Face/A": 0, "Face/B": 1} # -- 3) extract channel labels from LIMO structure # get individual labels - labels = data_info['chanlocs']['labels'] + labels = data_info["chanlocs"]["labels"] labels = [label for label, *_ in labels[0]] # get montage - montage = make_standard_montage('biosemi128') + montage = make_standard_montage("biosemi128") # add external electrodes (e.g., eogs) - ch_names = montage.ch_names + ['EXG1', 'EXG2', 'EXG3', 'EXG4'] + ch_names = montage.ch_names + ["EXG1", "EXG2", "EXG3", "EXG4"] # match individual labels to labels in montage found_inds = [ind for ind, name in enumerate(ch_names) if name in labels] missing_chans = [name for name in ch_names if name not in labels] @@ -258,7 +340,7 @@ def load_data(subject, path=None, force_update=False, update_path=None, # data is stored as channels x time points x epochs # data['Yr'].shape # <-- see here # transpose to epochs x channels time points - data = np.transpose(data['Yr'], (2, 0, 1)) + data = np.transpose(data["Yr"], (2, 0, 1)) # initialize data in expected order temp_data = np.empty((data.shape[0], len(ch_names), data.shape[2])) # copy over the non-missing data @@ -275,14 +357,16 @@ def load_data(subject, path=None, force_update=False, update_path=None, info = create_info(ch_names, sfreq, types).set_montage(montage) # get faces and noise variables from design matrix event_list = list(events[:, 2]) - faces = ['B' if event else 'A' for event in event_list] + faces = ["B" if event else "A" for event in event_list] noise = list(design[:, 2]) # create epochs metadata - metadata = {'face': faces, 'phase-coherence': noise} + metadata = {"face": faces, "phase-coherence": noise} metadata = pd.DataFrame(metadata) # -- 6) Create custom epochs array - epochs = EpochsArray(data, info, events, tmin, event_id, metadata=metadata) - epochs.info['bads'] = missing_chans # missing channels are marked as bad. + epochs = EpochsArray( + data, info, events, tmin, event_id, metadata=metadata, verbose=False + ) + epochs.info["bads"] = missing_chans # missing channels are marked as bad. return epochs diff --git a/mne/datasets/misc/__init__.py b/mne/datasets/misc/__init__.py index 884848d4817..25377ca9ceb 100644 --- a/mne/datasets/misc/__init__.py +++ b/mne/datasets/misc/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """MNE misc dataset.""" from ._misc import data_path, _pytest_mark diff --git a/mne/datasets/misc/_misc.py b/mne/datasets/misc/_misc.py index 85f65332ad1..ba7fd2341b1 100644 --- a/mne/datasets/misc/_misc.py +++ b/mne/datasets/misc/_misc.py @@ -1,26 +1,31 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import has_dataset, _data_path_doc, _download_mne_dataset +from ..utils import _data_path_doc, _download_mne_dataset, has_dataset @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='misc', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="misc", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) def _pytest_mark(): import pytest + return pytest.mark.skipif( - not has_dataset(name='misc'), reason='Requires misc dataset') + not has_dataset(name="misc"), reason="Requires misc dataset" + ) -data_path.__doc__ = _data_path_doc.format(name='misc', - conf='MNE_DATASETS_MISC_PATH') +data_path.__doc__ = _data_path_doc.format(name="misc", conf="MNE_DATASETS_MISC_PATH") diff --git a/mne/datasets/mtrf/__init__.py b/mne/datasets/mtrf/__init__.py index dffa76e0230..13828c026a8 100644 --- a/mne/datasets/mtrf/__init__.py +++ b/mne/datasets/mtrf/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """mTRF Dataset.""" from .mtrf import data_path, get_version diff --git a/mne/datasets/mtrf/mtrf.py b/mne/datasets/mtrf/mtrf.py index bfc5cd0ba58..78c50027f43 100644 --- a/mne/datasets/mtrf/mtrf.py +++ b/mne/datasets/mtrf/mtrf.py @@ -1,26 +1,28 @@ -# Authors: Chris Holdgraf -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, - _get_version, _version_doc, _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc - -data_name = 'mtrf' +data_name = "mtrf" @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name=data_name, processor='unzip', path=path, - force_update=force_update, update_path=update_path, - download=download) + name=data_name, + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name=data_name, - conf='MNE_DATASETS_MTRF_PATH') +data_path.__doc__ = _data_path_doc.format(name=data_name, conf="MNE_DATASETS_MTRF_PATH") def get_version(): # noqa: D103 diff --git a/mne/datasets/multimodal/__init__.py b/mne/datasets/multimodal/__init__.py index 753f0cf952c..7bc3e5571a5 100644 --- a/mne/datasets/multimodal/__init__.py +++ b/mne/datasets/multimodal/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Multimodal dataset.""" from .multimodal import data_path, get_version diff --git a/mne/datasets/multimodal/multimodal.py b/mne/datasets/multimodal/multimodal.py index 4ef0fd38efb..60aa7d21bfc 100644 --- a/mne/datasets/multimodal/multimodal.py +++ b/mne/datasets/multimodal/multimodal.py @@ -1,28 +1,32 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='multimodal', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="multimodal", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='multimodal', - conf='MNE_DATASETS_MULTIMODAL_PATH') +data_path.__doc__ = _data_path_doc.format( + name="multimodal", conf="MNE_DATASETS_MULTIMODAL_PATH" +) def get_version(): # noqa: D103 - return _get_version('multimodal') + return _get_version("multimodal") -get_version.__doc__ = _version_doc.format(name='multimodal') +get_version.__doc__ = _version_doc.format(name="multimodal") diff --git a/mne/datasets/opm/__init__.py b/mne/datasets/opm/__init__.py index 6ff15e6868c..a587c5f1006 100644 --- a/mne/datasets/opm/__init__.py +++ b/mne/datasets/opm/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """OPM dataset.""" from .opm import data_path, get_version diff --git a/mne/datasets/opm/opm.py b/mne/datasets/opm/opm.py index 014e91f2029..a5245ea02d8 100644 --- a/mne/datasets/opm/opm.py +++ b/mne/datasets/opm/opm.py @@ -1,28 +1,30 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='opm', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="opm", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='opm', - conf='MNE_DATASETS_OPML_PATH') +data_path.__doc__ = _data_path_doc.format(name="opm", conf="MNE_DATASETS_OPML_PATH") def get_version(): # noqa: D103 - return _get_version('opm') + return _get_version("opm") -get_version.__doc__ = _version_doc.format(name='opm') +get_version.__doc__ = _version_doc.format(name="opm") diff --git a/mne/datasets/phantom_4dbti/__init__.py b/mne/datasets/phantom_4dbti/__init__.py index 0d9323adc06..c5426171998 100644 --- a/mne/datasets/phantom_4dbti/__init__.py +++ b/mne/datasets/phantom_4dbti/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Multimodal dataset.""" from .phantom_4dbti import data_path, get_version diff --git a/mne/datasets/phantom_4dbti/phantom_4dbti.py b/mne/datasets/phantom_4dbti/phantom_4dbti.py index 2154dee99ce..59906a9c110 100644 --- a/mne/datasets/phantom_4dbti/phantom_4dbti.py +++ b/mne/datasets/phantom_4dbti/phantom_4dbti.py @@ -1,27 +1,32 @@ -# Authors: Alexandre Gramfort -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='phantom_4dbti', processor='unzip', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="phantom_4dbti", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) data_path.__doc__ = _data_path_doc.format( - name='phantom_4dbti', conf='MNE_DATASETS_PHANTOM_4DBTI_PATH') + name="phantom_4dbti", conf="MNE_DATASETS_PHANTOM_4DBTI_PATH" +) def get_version(): # noqa: D103 - return _get_version('phantom_4dbti') + return _get_version("phantom_4dbti") -get_version.__doc__ = _version_doc.format(name='phantom_4dbti') +get_version.__doc__ = _version_doc.format(name="phantom_4dbti") diff --git a/mne/datasets/phantom_kernel/__init__.py b/mne/datasets/phantom_kernel/__init__.py new file mode 100644 index 00000000000..ad4aecf0ff8 --- /dev/null +++ b/mne/datasets/phantom_kernel/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Multimodal dataset.""" + +from .phantom_kernel import data_path, get_version diff --git a/mne/datasets/phantom_kernel/phantom_kernel.py b/mne/datasets/phantom_kernel/phantom_kernel.py new file mode 100644 index 00000000000..ed44a78977b --- /dev/null +++ b/mne/datasets/phantom_kernel/phantom_kernel.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="phantom_kernel", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="phantom_kernel", conf="MNE_DATASETS_PHANTOM_KERNEL_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("phantom_kernel") + + +get_version.__doc__ = _version_doc.format(name="phantom_kernel") diff --git a/mne/datasets/phantom_kit/__init__.py b/mne/datasets/phantom_kit/__init__.py new file mode 100644 index 00000000000..7fcc361e073 --- /dev/null +++ b/mne/datasets/phantom_kit/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""KIT phantom dataset.""" + +from .phantom_kit import data_path, get_version diff --git a/mne/datasets/phantom_kit/phantom_kit.py b/mne/datasets/phantom_kit/phantom_kit.py new file mode 100644 index 00000000000..150e08dc435 --- /dev/null +++ b/mne/datasets/phantom_kit/phantom_kit.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="phantom_kit", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="phantom_kit", conf="MNE_DATASETS_PHANTOM_KIT_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("phantom_kit") + + +get_version.__doc__ = _version_doc.format(name="phantom_kit") diff --git a/mne/datasets/refmeg_noise/__init__.py b/mne/datasets/refmeg_noise/__init__.py index 00460d173bb..7437aaad6a3 100644 --- a/mne/datasets/refmeg_noise/__init__.py +++ b/mne/datasets/refmeg_noise/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """MEG reference-noise data set.""" from .refmeg_noise import data_path, get_version diff --git a/mne/datasets/refmeg_noise/refmeg_noise.py b/mne/datasets/refmeg_noise/refmeg_noise.py index 2027a31bacc..c6c24ff3942 100644 --- a/mne/datasets/refmeg_noise/refmeg_noise.py +++ b/mne/datasets/refmeg_noise/refmeg_noise.py @@ -1,26 +1,32 @@ -# Authors: Jeff Hanna -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='refmeg_noise', processor='unzip', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="refmeg_noise", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) data_path.__doc__ = _data_path_doc.format( - name='refmeg_noise', conf='MNE_DATASETS_REFMEG_NOISE_PATH') + name="refmeg_noise", conf="MNE_DATASETS_REFMEG_NOISE_PATH" +) def get_version(): # noqa: D103 - return _get_version('refmeg_noise') + return _get_version("refmeg_noise") -get_version.__doc__ = _version_doc.format(name='refmeg_noise') +get_version.__doc__ = _version_doc.format(name="refmeg_noise") diff --git a/mne/datasets/sample/__init__.py b/mne/datasets/sample/__init__.py index c94c6d50a7f..2c465adb13c 100644 --- a/mne/datasets/sample/__init__.py +++ b/mne/datasets/sample/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """MNE sample dataset.""" from .sample import data_path, get_version diff --git a/mne/datasets/sample/sample.py b/mne/datasets/sample/sample.py index 4876b7bc7f7..8dde8c67047 100644 --- a/mne/datasets/sample/sample.py +++ b/mne/datasets/sample/sample.py @@ -1,28 +1,32 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='sample', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="sample", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='sample', - conf='MNE_DATASETS_SAMPLE_PATH') +data_path.__doc__ = _data_path_doc.format( + name="sample", conf="MNE_DATASETS_SAMPLE_PATH" +) def get_version(): # noqa: D103 - return _get_version('sample') + return _get_version("sample") -get_version.__doc__ = _version_doc.format(name='sample') +get_version.__doc__ = _version_doc.format(name="sample") diff --git a/mne/datasets/sleep_physionet/__init__.py b/mne/datasets/sleep_physionet/__init__.py index 04536a93134..aff3f92d440 100644 --- a/mne/datasets/sleep_physionet/__init__.py +++ b/mne/datasets/sleep_physionet/__init__.py @@ -1 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from . import age, temazepam, _utils diff --git a/mne/datasets/sleep_physionet/_utils.py b/mne/datasets/sleep_physionet/_utils.py index 85e6088bfcb..06c9928d1a5 100644 --- a/mne/datasets/sleep_physionet/_utils.py +++ b/mne/datasets/sleep_physionet/_utils.py @@ -1,48 +1,50 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Joan Massich -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os import os.path as op import numpy as np -from ...utils import (verbose, _TempDir, _check_pandas_installed, - _on_missing) -from ..utils import _get_path +from ...utils import _check_pandas_installed, _on_missing, _TempDir, verbose +from ..utils import _downloader_params, _get_path -AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), 'age_records.csv') -TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__), - 'temazepam_records.csv') +AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), "age_records.csv") +TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__), "temazepam_records.csv") -TEMAZEPAM_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls' # noqa: E501 -TEMAZEPAM_RECORDS_URL_SHA1 = 'f52fffe5c18826a2bd4c5d5cb375bb4a9008c885' +TEMAZEPAM_RECORDS_URL = ( + "https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls" # noqa: E501 +) +TEMAZEPAM_RECORDS_URL_SHA1 = "f52fffe5c18826a2bd4c5d5cb375bb4a9008c885" -AGE_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls' # noqa: E501 -AGE_RECORDS_URL_SHA1 = '0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f' +AGE_RECORDS_URL = "https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls" +AGE_RECORDS_URL_SHA1 = "0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f" -sha1sums_fname = op.join(op.dirname(__file__), 'SHA1SUMS') +sha1sums_fname = op.join(op.dirname(__file__), "SHA1SUMS") def _fetch_one(fname, hashsum, path, force_update, base_url): import pooch + # Fetch the file - url = base_url + '/' + fname + url = base_url + "/" + fname destination = op.join(path, fname) - if not op.isfile(destination) or force_update: - if op.isfile(destination): - os.remove(destination) - if not op.isdir(op.dirname(destination)): - os.makedirs(op.dirname(destination)) - pooch.retrieve( - url=url, - known_hash=f"sha1:{hashsum}", - path=path, - fname=fname - ) - return destination + if op.isfile(destination) and not force_update: + return destination, False + if op.isfile(destination): + os.remove(destination) + if not op.isdir(op.dirname(destination)): + os.makedirs(op.dirname(destination)) + downloader = pooch.HTTPDownloader(**_downloader_params()) + pooch.retrieve( + url=url, + known_hash=f"sha1:{hashsum}", + path=path, + downloader=downloader, + fname=fname, + ) + return destination, True @verbose @@ -73,10 +75,10 @@ def _data_path(path=None, verbose=None): ---------- .. footbibliography:: """ # noqa: E501 - key = 'PHYSIONET_SLEEP_PATH' - name = 'PHYSIONET_SLEEP' + key = "PHYSIONET_SLEEP_PATH" + name = "PHYSIONET_SLEEP" path = _get_path(path, key, name) - return op.join(path, 'physionet-sleep-data') + return op.join(path, "physionet-sleep-data") def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS): @@ -87,53 +89,70 @@ def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS): tmp = _TempDir() # Download subjects info. - subjects_fname = op.join(tmp, 'ST-subjects.xls') + subjects_fname = op.join(tmp, "ST-subjects.xls") + downloader = pooch.HTTPDownloader(**_downloader_params()) pooch.retrieve( url=TEMAZEPAM_RECORDS_URL, known_hash=f"sha1:{TEMAZEPAM_RECORDS_URL_SHA1}", path=tmp, - fname=op.basename(subjects_fname) + downloader=downloader, + fname=op.basename(subjects_fname), ) # Load and Massage the checksums. - sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None, - names=['sha', 'fname'], engine='python') - select_age_records = (sha1_df.fname.str.startswith('ST') & - sha1_df.fname.str.endswith('edf')) + sha1_df = pd.read_csv( + sha1sums_fname, sep=" ", header=None, names=["sha", "fname"], engine="python" + ) + select_age_records = sha1_df.fname.str.startswith( + "ST" + ) & sha1_df.fname.str.endswith("edf") sha1_df = sha1_df[select_age_records] - sha1_df['id'] = [name[:6] for name in sha1_df.fname] + sha1_df["id"] = [name[:6] for name in sha1_df.fname] # Load and massage the data. data = pd.read_excel(subjects_fname, header=[0, 1]) - data = data.set_index(('Subject - age - sex', 'Nr')) - data.index.name = 'subject' + data = data.set_index(("Subject - age - sex", "Nr")) + data.index.name = "subject" data.columns.names = [None, None] - data = (data.set_index([('Subject - age - sex', 'Age'), - ('Subject - age - sex', 'M1/F2')], append=True) - .stack(level=0).reset_index()) + data = ( + data.set_index( + [("Subject - age - sex", "Age"), ("Subject - age - sex", "M1/F2")], + append=True, + ) + # TODO VERSION future_stack goes away w/ Pandas 3.0 + .stack(level=0, future_stack=True) + .reset_index() + ) - data = data.rename(columns={('Subject - age - sex', 'Age'): 'age', - ('Subject - age - sex', 'M1/F2'): 'sex', - 'level_3': 'drug'}) - data['id'] = ['ST7{:02d}{:1d}'.format(s, n) - for s, n in zip(data.subject, data['night nr'])] + data = data.rename( + columns={ + ("Subject - age - sex", "Age"): "age", + ("Subject - age - sex", "M1/F2"): "sex", + "level_3": "drug", + } + ) + data["id"] = [f"ST7{s:02d}{n:1d}" for s, n in zip(data.subject, data["night nr"])] - data = pd.merge(sha1_df, data, how='outer', on='id') - data['record type'] = (data.fname.str.split('-', expand=True)[1] - .str.split('.', expand=True)[0] - .astype('category')) + data = pd.merge(sha1_df, data, how="outer", on="id") + data["record type"] = ( + data.fname.str.split("-", expand=True)[1] + .str.split(".", expand=True)[0] + .astype("category") + ) - data = data.set_index(['id', 'subject', 'age', 'sex', 'drug', - 'lights off', 'night nr', 'record type']).unstack() - data.columns = [l1 + '_' + l2 for l1, l2 in data.columns] - data = data.reset_index().drop(columns=['id']) + data = data.set_index( + ["id", "subject", "age", "sex", "drug", "lights off", "night nr", "record type"] + ).unstack() + data.columns = [l1 + "_" + l2 for l1, l2 in data.columns] + data = data.reset_index().drop(columns=["id"]) - data['sex'] = (data.sex.astype('category') - .cat.rename_categories({1: 'male', 2: 'female'})) + data["sex"] = data.sex.astype("category").cat.rename_categories( + {1: "male", 2: "female"} + ) - data['drug'] = data['drug'].str.split(expand=True)[0] - data['subject_orig'] = data['subject'] - data['subject'] = data.index // 2 # to make sure index is from 0 to 21 + data["drug"] = data["drug"].str.split(expand=True)[0] + data["subject_orig"] = data["subject"] + data["subject"] = data.index // 2 # to make sure index is from 0 to 21 # Save the data. data.to_csv(fname, index=False) @@ -142,51 +161,60 @@ def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS): def _update_sleep_age_records(fname=AGE_SLEEP_RECORDS): """Help function to download Physionet's age dataset records.""" import pooch + pd = _check_pandas_installed() tmp = _TempDir() # Download subjects info. - subjects_fname = op.join(tmp, 'SC-subjects.xls') + subjects_fname = op.join(tmp, "SC-subjects.xls") + downloader = pooch.HTTPDownloader(**_downloader_params()) pooch.retrieve( url=AGE_RECORDS_URL, known_hash=f"sha1:{AGE_RECORDS_URL_SHA1}", path=tmp, - fname=op.basename(subjects_fname) + downloader=downloader, + fname=op.basename(subjects_fname), ) # Load and Massage the checksums. - sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None, - names=['sha', 'fname'], engine='python') - select_age_records = (sha1_df.fname.str.startswith('SC') & - sha1_df.fname.str.endswith('edf')) + sha1_df = pd.read_csv( + sha1sums_fname, sep=" ", header=None, names=["sha", "fname"], engine="python" + ) + select_age_records = sha1_df.fname.str.startswith( + "SC" + ) & sha1_df.fname.str.endswith("edf") sha1_df = sha1_df[select_age_records] - sha1_df['id'] = [name[:6] for name in sha1_df.fname] + sha1_df["id"] = [name[:6] for name in sha1_df.fname] # Load and massage the data. data = pd.read_excel(subjects_fname) - data = data.rename(index=str, columns={'sex (F=1)': 'sex', - 'LightsOff': 'lights off'}) - data['sex'] = (data.sex.astype('category') - .cat.rename_categories({1: 'female', 2: 'male'})) + data = data.rename( + index=str, columns={"sex (F=1)": "sex", "LightsOff": "lights off"} + ) + data["sex"] = data.sex.astype("category").cat.rename_categories( + {1: "female", 2: "male"} + ) - data['id'] = ['SC4{:02d}{:1d}'.format(s, n) - for s, n in zip(data.subject, data.night)] + data["id"] = [f"SC4{s:02d}{n:1d}" for s, n in zip(data.subject, data.night)] - data = data.set_index('id').join(sha1_df.set_index('id')).dropna() + data = data.set_index("id").join(sha1_df.set_index("id")).dropna() - data['record type'] = (data.fname.str.split('-', expand=True)[1] - .str.split('.', expand=True)[0] - .astype('category')) + data["record type"] = ( + data.fname.str.split("-", expand=True)[1] + .str.split(".", expand=True)[0] + .astype("category") + ) - data = data.reset_index().drop(columns=['id']) - data = data[['subject', 'night', 'record type', 'age', 'sex', 'lights off', - 'sha', 'fname']] + data = data.reset_index().drop(columns=["id"]) + data = data[ + ["subject", "night", "record type", "age", "sex", "lights off", "sha", "fname"] + ] # Save the data. data.to_csv(fname, index=False) -def _check_subjects(subjects, n_subjects, missing=None, on_missing='raise'): +def _check_subjects(subjects, n_subjects, missing=None, on_missing="raise"): """Check whether subjects are available. Parameters @@ -208,8 +236,10 @@ def _check_subjects(subjects, n_subjects, missing=None, on_missing='raise'): valid_subjects = np.setdiff1d(valid_subjects, missing) unknown_subjects = np.setdiff1d(subjects, valid_subjects) if unknown_subjects.size > 0: - subjects_list = ', '.join([str(s) for s in unknown_subjects]) - msg = (f'This dataset contains subjects 0 to {n_subjects - 1} with ' - f'missing subjects {missing}. Unknown subjects: ' - f'{subjects_list}.') + subjects_list = ", ".join([str(s) for s in unknown_subjects]) + msg = ( + f"This dataset contains subjects 0 to {n_subjects - 1} with " + f"missing subjects {missing}. Unknown subjects: " + f"{subjects_list}." + ) _on_missing(on_missing, msg) diff --git a/mne/datasets/sleep_physionet/age.py b/mne/datasets/sleep_physionet/age.py index 2f544035a24..b5ea1764946 100644 --- a/mne/datasets/sleep_physionet/age.py +++ b/mne/datasets/sleep_physionet/age.py @@ -1,23 +1,38 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Joan Massich -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import time import numpy as np from ...utils import verbose -from ._utils import _fetch_one, _data_path, _on_missing, AGE_SLEEP_RECORDS -from ._utils import _check_subjects +from ..utils import _log_time_size +from ._utils import ( + AGE_SLEEP_RECORDS, + _check_subjects, + _data_path, + _fetch_one, + _on_missing, +) data_path = _data_path # expose _data_path(..) as data_path(..) -BASE_URL = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-cassette/' # noqa: E501 +BASE_URL = "https://physionet.org/physiobank/database/sleep-edfx/sleep-cassette/" @verbose -def fetch_data(subjects, recording=(1, 2), path=None, force_update=False, - base_url=BASE_URL, on_missing='raise', *, verbose=None): # noqa: D301, E501 +def fetch_data( + subjects, + recording=(1, 2), + path=None, + force_update=False, + base_url=BASE_URL, + on_missing="raise", + *, + verbose=None, +): # noqa: D301, E501 """Get paths to local copies of PhysioNet Polysomnography dataset files. This will fetch data from the publicly available subjects from PhysioNet's @@ -80,43 +95,56 @@ def fetch_data(subjects, recording=(1, 2), path=None, force_update=False, ---------- .. footbibliography:: """ # noqa: E501 - records = np.loadtxt(AGE_SLEEP_RECORDS, - skiprows=1, - delimiter=',', - usecols=(0, 1, 2, 6, 7), - dtype={'names': ('subject', 'record', 'type', 'sha', - 'fname'), - 'formats': (' 0: + _log_time_size(t0, sz) return fnames diff --git a/mne/datasets/sleep_physionet/temazepam.py b/mne/datasets/sleep_physionet/temazepam.py index f4981a7cc25..443b4057f55 100644 --- a/mne/datasets/sleep_physionet/temazepam.py +++ b/mne/datasets/sleep_physionet/temazepam.py @@ -1,23 +1,25 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Joan Massich -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import time import numpy as np from ...utils import verbose -from ._utils import _fetch_one, _data_path, TEMAZEPAM_SLEEP_RECORDS -from ._utils import _check_subjects +from ..utils import _log_time_size +from ._utils import TEMAZEPAM_SLEEP_RECORDS, _check_subjects, _data_path, _fetch_one data_path = _data_path # expose _data_path(..) as data_path(..) -BASE_URL = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-telemetry/' # noqa: E501 +BASE_URL = "https://physionet.org/physiobank/database/sleep-edfx/sleep-telemetry/" # noqa: E501 @verbose -def fetch_data(subjects, path=None, force_update=False, base_url=BASE_URL, *, - verbose=None): +def fetch_data( + subjects, path=None, force_update=False, base_url=BASE_URL, *, verbose=None +): """Get paths to local copies of PhysioNet Polysomnography dataset files. This will fetch data from the publicly available subjects from PhysioNet's @@ -68,15 +70,24 @@ def fetch_data(subjects, path=None, force_update=False, base_url=BASE_URL, *, ---------- .. footbibliography:: """ - records = np.loadtxt(TEMAZEPAM_SLEEP_RECORDS, - skiprows=1, - delimiter=',', - usecols=(0, 3, 6, 7, 8, 9), - dtype={'names': ('subject', 'record', 'hyp sha', - 'psg sha', 'hyp fname', 'psg fname'), - 'formats': (' 0: + _log_time_size(t0, sz) return fnames diff --git a/mne/datasets/sleep_physionet/tests/test_physionet.py b/mne/datasets/sleep_physionet/tests/test_physionet.py index 3f754b863ac..5cb315f9064 100644 --- a/mne/datasets/sleep_physionet/tests/test_physionet.py +++ b/mne/datasets/sleep_physionet/tests/test_physionet.py @@ -1,59 +1,42 @@ -# Authors: Alexandre Gramfort -# Joan Massich -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os.path as op -import numpy as np -import pytest +from pathlib import Path -from numpy.testing import assert_array_equal -import pooch +import pytest -from mne.utils import requires_good_network -from mne.utils import requires_pandas, requires_version from mne.datasets.sleep_physionet import age, temazepam -from mne.datasets.sleep_physionet._utils import _update_sleep_temazepam_records -from mne.datasets.sleep_physionet._utils import _update_sleep_age_records -from mne.datasets.sleep_physionet._utils import AGE_SLEEP_RECORDS -from mne.datasets.sleep_physionet._utils import TEMAZEPAM_SLEEP_RECORDS +from mne.datasets.sleep_physionet._utils import ( + AGE_SLEEP_RECORDS, + TEMAZEPAM_SLEEP_RECORDS, + _update_sleep_age_records, + _update_sleep_temazepam_records, +) +from mne.utils import requires_good_network -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def physionet_tmpdir(tmp_path_factory): """Fixture exposing a temporary directory for testing.""" - return str(tmp_path_factory.mktemp('physionet_files')) - + return str(tmp_path_factory.mktemp("physionet_files")) -class _FakeFetch: - def __init__(self): - self.call_args_list = list() - - def __call__(self, *args, **kwargs): - self.call_args_list.append((args, kwargs)) - - @property - def call_count(self): - return len(self.call_args_list) - - -def _keep_basename_only(path_structure): - return np.vectorize(op.basename)(np.array(path_structure)) +def _keep_basename_only(paths): + return [Path(p).name for p in paths] def _get_expected_url(http://github.com/name): - base = 'https://physionet.org/physiobank/database/sleep-edfx/' - midle = 'sleep-cassette/' if name.startswith('SC') else 'sleep-telemetry/' - return base + midle + '/' + name + base = "https://physionet.org/physiobank/database/sleep-edfx/" + middle = "sleep-cassette/" if name.startswith("SC") else "sleep-telemetry/" + return base + middle + "/" + name def _get_expected_path(base, name): - return op.join(base, name) + return Path(base, name) -def _check_mocked_function_calls(mocked_func, call_fname_hash_pairs, - base_path): +def _check_mocked_function_calls(mocked_func, call_fname_hash_pairs, base_path): # Check mocked_func has been called the right amount of times. assert mocked_func.call_count == len(call_fname_hash_pairs) @@ -61,148 +44,171 @@ def _check_mocked_function_calls(mocked_func, call_fname_hash_pairs, # order. for idx, current in enumerate(call_fname_hash_pairs): _, call_kwargs = mocked_func.call_args_list[idx] - hash_type, hash = call_kwargs['known_hash'].split(':') - assert call_kwargs['url'] == _get_expected_url(http://github.com/current['name']) - assert op.join(call_kwargs['path'], call_kwargs['fname']) == \ - _get_expected_path(base_path, current['name']) - assert hash == current['hash'] - assert hash_type == 'sha1' + hash_type, hash_ = call_kwargs["known_hash"].split(":") + assert call_kwargs["url"] == _get_expected_url(http://github.com/current[%22name%22]), idx + assert Path(call_kwargs["path"], call_kwargs["fname"]) == _get_expected_path( + base_path, current["name"] + ) + assert hash_ == current["hash"] + assert hash_type == "sha1" @pytest.mark.timeout(60) @pytest.mark.xfail(strict=False) @requires_good_network -@requires_pandas -@requires_version('xlrd', '0.9') def test_run_update_age_records(tmp_path): """Test Sleep Physionet URL handling.""" - import pandas as pd + pd = pytest.importorskip("pandas") + pytest.importorskip("xlrd", "0.9") + fname = tmp_path / "records.csv" _update_sleep_age_records(fname) data = pd.read_csv(fname) pd.testing.assert_frame_equal(data, pd.read_csv(AGE_SLEEP_RECORDS)) -@pytest.mark.parametrize('subject', [39, 68, 69, 78, 79, 83]) -def test_sleep_physionet_age_missing_subjects(physionet_tmpdir, subject, - download_is_error): +@pytest.mark.parametrize("subject", [39, 68, 69, 78, 79, 83]) +def test_sleep_physionet_age_missing_subjects( + physionet_tmpdir, subject, download_is_error +): """Test handling of missing subjects in Sleep Physionet age fetcher.""" - with pytest.raises( - ValueError, match='This dataset contains subjects 0 to 82'): + with pytest.raises(ValueError, match="This dataset contains subjects 0 to 82"): age.fetch_data( - subjects=[subject], recording=[1], on_missing='raise', - path=physionet_tmpdir) - with pytest.warns(RuntimeWarning, - match='This dataset contains subjects 0 to 82'): + subjects=[subject], recording=[1], on_missing="raise", path=physionet_tmpdir + ) + with pytest.warns(RuntimeWarning, match="This dataset contains subjects 0 to 82"): age.fetch_data( - subjects=[subject], recording=[1], on_missing='warn', - path=physionet_tmpdir) + subjects=[subject], recording=[1], on_missing="warn", path=physionet_tmpdir + ) paths = age.fetch_data( - subjects=[subject], recording=[1], on_missing='ignore', - path=physionet_tmpdir) + subjects=[subject], recording=[1], on_missing="ignore", path=physionet_tmpdir + ) assert paths == [] -@pytest.mark.parametrize('subject,recording', [(13, 2), (36, 1), (52, 1)]) -def test_sleep_physionet_age_missing_recordings(physionet_tmpdir, subject, - recording, download_is_error): +@pytest.mark.parametrize("subject,recording", [(13, 2), (36, 1), (52, 1)]) +def test_sleep_physionet_age_missing_recordings( + physionet_tmpdir, subject, recording, download_is_error +): """Test handling of missing recordings in Sleep Physionet age fetcher.""" with pytest.raises( - ValueError, match=f'Requested recording {recording} for subject'): - age.fetch_data(subjects=[subject], recording=[recording], - on_missing='raise', path=physionet_tmpdir) - with pytest.warns(RuntimeWarning, - match=f'Requested recording {recording} for subject'): - age.fetch_data(subjects=[subject], recording=[recording], - on_missing='warn', path=physionet_tmpdir) - paths = age.fetch_data(subjects=[subject], recording=[recording], - on_missing='ignore', path=physionet_tmpdir) + ValueError, match=f"Requested recording {recording} for subject" + ): + age.fetch_data( + subjects=[subject], + recording=[recording], + on_missing="raise", + path=physionet_tmpdir, + ) + with pytest.warns( + RuntimeWarning, match=f"Requested recording {recording} for subject" + ): + age.fetch_data( + subjects=[subject], + recording=[recording], + on_missing="warn", + path=physionet_tmpdir, + ) + paths = age.fetch_data( + subjects=[subject], + recording=[recording], + on_missing="ignore", + path=physionet_tmpdir, + ) assert paths == [] -def test_sleep_physionet_age(physionet_tmpdir, monkeypatch, download_is_error): +def test_sleep_physionet_age(physionet_tmpdir, fake_retrieve): """Test Sleep Physionet URL handling.""" - # check download_is_error patching - with pytest.raises(AssertionError, match='Test should not download'): - age.fetch_data(subjects=[0], recording=[1], path=physionet_tmpdir) - # then patch - my_func = _FakeFetch() - monkeypatch.setattr(pooch, 'retrieve', my_func) - paths = age.fetch_data(subjects=[0], recording=[1], path=physionet_tmpdir) - assert_array_equal(_keep_basename_only(paths), - [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf']]) - - paths = age.fetch_data(subjects=[0, 1], recording=[1], - path=physionet_tmpdir) - assert_array_equal(_keep_basename_only(paths), - [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'], - ['SC4011E0-PSG.edf', 'SC4011EH-Hypnogram.edf']]) - - paths = age.fetch_data(subjects=[0], recording=[1, 2], - path=physionet_tmpdir) - assert_array_equal(_keep_basename_only(paths), - [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'], - ['SC4002E0-PSG.edf', 'SC4002EC-Hypnogram.edf']]) + assert _keep_basename_only(paths[0]) == [ + "SC4001E0-PSG.edf", + "SC4001EC-Hypnogram.edf", + ] + + paths = age.fetch_data(subjects=[0, 1], recording=[1], path=physionet_tmpdir) + assert _keep_basename_only(paths[0]) == [ + "SC4001E0-PSG.edf", + "SC4001EC-Hypnogram.edf", + ] + assert _keep_basename_only(paths[1]) == [ + "SC4011E0-PSG.edf", + "SC4011EH-Hypnogram.edf", + ] + + paths = age.fetch_data(subjects=[0], recording=[1, 2], path=physionet_tmpdir) + assert _keep_basename_only(paths[0]) == [ + "SC4001E0-PSG.edf", + "SC4001EC-Hypnogram.edf", + ] + assert _keep_basename_only(paths[1]) == [ + "SC4002E0-PSG.edf", + "SC4002EC-Hypnogram.edf", + ] EXPECTED_CALLS = ( - {'name': 'SC4001E0-PSG.edf', - 'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'}, - {'name': 'SC4001EC-Hypnogram.edf', - 'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'}, - {'name': 'SC4001E0-PSG.edf', - 'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'}, - {'name': 'SC4001EC-Hypnogram.edf', - 'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'}, - {'name': 'SC4011E0-PSG.edf', - 'hash': '4d17451f7847355bcab17584de05e7e1df58c660'}, - {'name': 'SC4011EH-Hypnogram.edf', - 'hash': 'd582a3cbe2db481a362af890bc5a2f5ca7c878dc'}, - {'name': 'SC4001E0-PSG.edf', - 'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'}, - {'name': 'SC4001EC-Hypnogram.edf', - 'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'}, - {'name': 'SC4002E0-PSG.edf', - 'hash': 'c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d'}, - {'name': 'SC4002EC-Hypnogram.edf', - 'hash': '386230188a3552b1fc90bba0fb7476ceaca174b6'}) + { + "name": "SC4001E0-PSG.edf", + "hash": "adabd3b01fc7bb75c523a974f38ee3ae4e57b40f", + }, + { + "name": "SC4001EC-Hypnogram.edf", + "hash": "21c998eadc8b1e3ea6727d3585186b8f76e7e70b", + }, + { + "name": "SC4011E0-PSG.edf", + "hash": "4d17451f7847355bcab17584de05e7e1df58c660", + }, + { + "name": "SC4011EH-Hypnogram.edf", + "hash": "d582a3cbe2db481a362af890bc5a2f5ca7c878dc", + }, + { + "name": "SC4002E0-PSG.edf", + "hash": "c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d", + }, + { + "name": "SC4002EC-Hypnogram.edf", + "hash": "386230188a3552b1fc90bba0fb7476ceaca174b6", + }, + ) base_path = age.data_path(path=physionet_tmpdir) - _check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path) + _check_mocked_function_calls(fake_retrieve, EXPECTED_CALLS, base_path) @pytest.mark.xfail(strict=False) @requires_good_network -@requires_pandas -@requires_version('xlrd', '0.9') def test_run_update_temazepam_records(tmp_path): """Test Sleep Physionet URL handling.""" - import pandas as pd - + pd = pytest.importorskip("pandas") + pytest.importorskip("xlrd", "0.9") fname = tmp_path / "records.csv" _update_sleep_temazepam_records(fname) data = pd.read_csv(fname) - pd.testing.assert_frame_equal( - data, pd.read_csv(TEMAZEPAM_SLEEP_RECORDS)) + pd.testing.assert_frame_equal(data, pd.read_csv(TEMAZEPAM_SLEEP_RECORDS)) -def test_sleep_physionet_temazepam(physionet_tmpdir, monkeypatch): +def test_sleep_physionet_temazepam(physionet_tmpdir, fake_retrieve): """Test Sleep Physionet URL handling.""" - my_func = _FakeFetch() - monkeypatch.setattr(pooch, 'retrieve', my_func) - paths = temazepam.fetch_data(subjects=[0], path=physionet_tmpdir) - assert_array_equal(_keep_basename_only(paths), - [['ST7011J0-PSG.edf', 'ST7011JP-Hypnogram.edf']]) + assert _keep_basename_only(paths[0]) == [ + "ST7011J0-PSG.edf", + "ST7011JP-Hypnogram.edf", + ] EXPECTED_CALLS = ( - {'name': 'ST7011J0-PSG.edf', - 'hash': 'b9d11484126ebff1884034396d6a20c62c0ef48d'}, - {'name': 'ST7011JP-Hypnogram.edf', - 'hash': 'ff28e5e01296cefed49ae0c27cfb3ebc42e710bf'}) + { + "name": "ST7011J0-PSG.edf", + "hash": "b9d11484126ebff1884034396d6a20c62c0ef48d", + }, + { + "name": "ST7011JP-Hypnogram.edf", + "hash": "ff28e5e01296cefed49ae0c27cfb3ebc42e710bf", + }, + ) base_path = temazepam.data_path(path=physionet_tmpdir) - _check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path) + _check_mocked_function_calls(fake_retrieve, EXPECTED_CALLS, base_path) - with pytest.raises( - ValueError, match='This dataset contains subjects 0 to 21'): + with pytest.raises(ValueError, match="This dataset contains subjects 0 to 21"): paths = temazepam.fetch_data(subjects=[22], path=physionet_tmpdir) diff --git a/mne/datasets/somato/__init__.py b/mne/datasets/somato/__init__.py index 4777bbe8d5c..24ad4524de0 100644 --- a/mne/datasets/somato/__init__.py +++ b/mne/datasets/somato/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Somatosensory dataset.""" from .somato import data_path, get_version diff --git a/mne/datasets/somato/somato.py b/mne/datasets/somato/somato.py index db69ac46658..177a642fb2e 100644 --- a/mne/datasets/somato/somato.py +++ b/mne/datasets/somato/somato.py @@ -1,28 +1,32 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='somato', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="somato", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='somato', - conf='MNE_DATASETS_SOMATO_PATH') +data_path.__doc__ = _data_path_doc.format( + name="somato", conf="MNE_DATASETS_SOMATO_PATH" +) def get_version(): # noqa: D103 - return _get_version('somato') + return _get_version("somato") -get_version.__doc__ = _version_doc.format(name='somato') +get_version.__doc__ = _version_doc.format(name="somato") diff --git a/mne/datasets/spm_face/__init__.py b/mne/datasets/spm_face/__init__.py index dfe2edd5486..a49e3432abe 100644 --- a/mne/datasets/spm_face/__init__.py +++ b/mne/datasets/spm_face/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """SPM face dataset.""" from .spm_data import data_path, has_spm_data, get_version, requires_spm_data diff --git a/mne/datasets/spm_face/spm_data.py b/mne/datasets/spm_face/spm_data.py index 2c71fc144c3..6653796e357 100644 --- a/mne/datasets/spm_face/spm_data.py +++ b/mne/datasets/spm_face/spm_data.py @@ -1,40 +1,47 @@ -# Authors: Denis Engemann -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from functools import partial -from ...utils import verbose, get_config -from ..utils import (has_dataset, _data_path_doc, _get_version, - _version_doc, _download_mne_dataset) +from ...utils import get_config, verbose +from ..utils import ( + _data_path_doc, + _download_mne_dataset, + _get_version, + _version_doc, + has_dataset, +) - -has_spm_data = partial(has_dataset, name='spm') +has_spm_data = partial(has_dataset, name="spm") @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='spm', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="spm", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='spm', - conf='MNE_DATASETS_SPM_DATA_PATH') +data_path.__doc__ = _data_path_doc.format(name="spm", conf="MNE_DATASETS_SPM_DATA_PATH") def get_version(): # noqa: D103 - return _get_version('spm') + return _get_version("spm") -get_version.__doc__ = _version_doc.format(name='spm') +get_version.__doc__ = _version_doc.format(name="spm") def _skip_spm_data(): - skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == - 'true') + skip_testing = get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true" skip = skip_testing or not has_spm_data() return skip @@ -42,5 +49,5 @@ def _skip_spm_data(): def requires_spm_data(func): """Skip testing data test.""" import pytest - return pytest.mark.skipif(_skip_spm_data(), - reason='Requires spm dataset')(func) + + return pytest.mark.skipif(_skip_spm_data(), reason="Requires spm dataset")(func) diff --git a/mne/datasets/ssvep/__init__.py b/mne/datasets/ssvep/__init__.py index a7a3d1db0a8..18229cf176c 100644 --- a/mne/datasets/ssvep/__init__.py +++ b/mne/datasets/ssvep/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """SSVEP dataset.""" from .ssvep import data_path, get_version diff --git a/mne/datasets/ssvep/ssvep.py b/mne/datasets/ssvep/ssvep.py index d914ec93819..9f13ace8789 100644 --- a/mne/datasets/ssvep/ssvep.py +++ b/mne/datasets/ssvep/ssvep.py @@ -1,26 +1,30 @@ -# Authors: Dominik Welke -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name='ssvep', processor='unzip', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="ssvep", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='ssvep', - conf='MNE_DATASETS_SSVEP_PATH') +data_path.__doc__ = _data_path_doc.format(name="ssvep", conf="MNE_DATASETS_SSVEP_PATH") def get_version(): # noqa: D103 - return _get_version('ssvep') + return _get_version("ssvep") -get_version.__doc__ = _version_doc.format(name='ssvep') +get_version.__doc__ = _version_doc.format(name="ssvep") diff --git a/mne/datasets/testing/__init__.py b/mne/datasets/testing/__init__.py index b56c5f36ed4..07ad8768029 100644 --- a/mne/datasets/testing/__init__.py +++ b/mne/datasets/testing/__init__.py @@ -1,4 +1,13 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """MNE testing dataset.""" -from ._testing import (data_path, requires_testing_data, get_version, - _pytest_param, _pytest_mark) +from ._testing import ( + data_path, + requires_testing_data, + get_version, + _pytest_param, + _pytest_mark, +) diff --git a/mne/datasets/testing/_testing.py b/mne/datasets/testing/_testing.py index 7332fb5a8e5..61d454898be 100644 --- a/mne/datasets/testing/_testing.py +++ b/mne/datasets/testing/_testing.py @@ -1,47 +1,55 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from functools import partial -from ...utils import verbose, get_config -from ..utils import (has_dataset, _data_path_doc, _get_version, - _version_doc, _download_mne_dataset) +from ...utils import get_config, verbose +from ..utils import ( + _data_path_doc, + _download_mne_dataset, + _get_version, + _version_doc, + has_dataset, +) -has_testing_data = partial(has_dataset, name='testing') +has_testing_data = partial(has_dataset, name="testing") @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 # Make sure we don't do something stupid - if download and \ - get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == 'true': - raise RuntimeError('Cannot download data if skipping is forced') + if download and get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true": + raise RuntimeError("Cannot download data if skipping is forced") return _download_mne_dataset( - name='testing', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="testing", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) -data_path.__doc__ = _data_path_doc.format(name='testing', - conf='MNE_DATASETS_TESTING_PATH') +data_path.__doc__ = _data_path_doc.format( + name="testing", conf="MNE_DATASETS_TESTING_PATH" +) def get_version(): # noqa: D103 - return _get_version('testing') + return _get_version("testing") -get_version.__doc__ = _version_doc.format(name='testing') +get_version.__doc__ = _version_doc.format(name="testing") # Allow forcing of testing dataset skip (for Debian tests) using: # `make test-no-testing-data` def _skip_testing_data(): - skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == - 'true') + skip_testing = get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true" skip = skip_testing or not has_testing_data() return skip @@ -53,16 +61,17 @@ def requires_testing_data(func): def _pytest_param(*args, **kwargs): if len(args) == 0: - args = ('testing_data',) + args = ("testing_data",) import pytest + # turn anything that uses testing data into an auto-skipper by # setting params=[testing._pytest_param()], or by parametrizing functions # with testing._pytest_param(whatever) - kwargs['marks'] = kwargs.get('marks', list()) + [_pytest_mark()] + kwargs["marks"] = kwargs.get("marks", list()) + [_pytest_mark()] return pytest.param(*args, **kwargs) def _pytest_mark(): import pytest - return pytest.mark.skipif( - _skip_testing_data(), reason='Requires testing dataset') + + return pytest.mark.skipif(_skip_testing_data(), reason="Requires testing dataset") diff --git a/mne/datasets/tests/__init__.py b/mne/datasets/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/datasets/tests/__init__.py +++ b/mne/datasets/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index 8709b934326..a7f985392e7 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -1,248 +1,279 @@ -from functools import partial -import os -from os import path as op +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import re import shutil import zipfile +from functools import partial +from pathlib import Path import pooch import pytest +import mne.datasets._fsaverage.base from mne import datasets, read_labels_from_annot, write_labels_to_annot -from mne.datasets import (testing, fetch_infant_template, fetch_phantom, - fetch_dataset) +from mne.datasets import fetch_dataset, fetch_infant_template, fetch_phantom, testing from mne.datasets._fsaverage.base import _set_montage_coreg_path from mne.datasets._infant import base as infant_base from mne.datasets._phantom import base as phantom_base from mne.datasets.utils import _manifest_check_download +from mne.utils import ( + ArgvSetter, + _pl, + catch_logging, + get_subjects_dir, + hashfunc, + requires_good_network, + use_log_level, +) -from mne.utils import (requires_good_network, - get_subjects_dir, ArgvSetter, _pl, use_log_level, - catch_logging, hashfunc) - - -subjects_dir = testing.data_path(download=False) / 'subjects' +subjects_dir = testing.data_path(download=False) / "subjects" def test_datasets_basic(tmp_path, monkeypatch): """Test simple dataset functions.""" # XXX 'hf_sef' and 'misc' do not conform to these standards - for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm', - 'bst_raw', 'bst_auditory', 'bst_resting', 'multimodal', - 'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword', - 'mtrf', 'phantom_4dbti', - 'visual_92_categories', 'fieldtrip_cmc'): - if dname.startswith('bst'): + for dname in ( + "sample", + "somato", + "spm_face", + "testing", + "opm", + "bst_raw", + "bst_auditory", + "bst_resting", + "multimodal", + "bst_phantom_ctf", + "bst_phantom_elekta", + "kiloword", + "mtrf", + "phantom_4dbti", + "visual_92_categories", + "fieldtrip_cmc", + ): + if dname.startswith("bst"): dataset = getattr(datasets.brainstorm, dname) else: dataset = getattr(datasets, dname) - if str(dataset.data_path(download=False)) != '.': + if str(dataset.data_path(download=False)) != ".": assert isinstance(dataset.get_version(), str) assert datasets.has_dataset(dname) else: assert dataset.get_version() is None assert not datasets.has_dataset(dname) - print('%s: %s' % (dname, datasets.has_dataset(dname))) - tempdir = str(tmp_path) + print(f"{dname}: {datasets.has_dataset(dname)}") # Explicitly test one that isn't preset (given the config) - monkeypatch.setenv('MNE_DATASETS_SAMPLE_PATH', tempdir) + monkeypatch.setenv("MNE_DATASETS_SAMPLE_PATH", str(tmp_path)) dataset = datasets.sample - assert str(dataset.data_path(download=False)) == '.' - assert dataset.get_version() != '' + assert str(dataset.data_path(download=False)) == "." + assert dataset.get_version() != "" assert dataset.get_version() is None # don't let it read from the config file to get the directory, # force it to look for the default - monkeypatch.setenv('_MNE_FAKE_HOME_DIR', tempdir) - monkeypatch.delenv('SUBJECTS_DIR', raising=False) - assert (str(datasets.utils._get_path(None, 'foo', 'bar')) == - op.join(tempdir, 'mne_data')) + monkeypatch.setenv("_MNE_FAKE_HOME_DIR", str(tmp_path)) + monkeypatch.delenv("SUBJECTS_DIR", raising=False) + assert datasets.utils._get_path(None, "foo", "bar") == tmp_path / "mne_data" assert get_subjects_dir(None) is None _set_montage_coreg_path() sd = get_subjects_dir() - assert sd.name.endswith('MNE-fsaverage-data') - monkeypatch.setenv('MNE_DATA', str(tmp_path / 'foo')) - with pytest.raises(FileNotFoundError, match='as specified by MNE_DAT'): + assert sd.name.endswith("MNE-fsaverage-data") + monkeypatch.setenv("MNE_DATA", str(tmp_path / "foo")) + with pytest.raises(FileNotFoundError, match="as specified by MNE_DAT"): testing.data_path(download=False) + def noop(*args, **kwargs): + return + + monkeypatch.setattr(mne.datasets._fsaverage.base, "_manifest_check_download", noop) + sd_2 = datasets.fetch_fsaverage() + assert sd / "fsaverage" == sd_2 + @requires_good_network def test_downloads(tmp_path, monkeypatch, capsys): """Test dataset URL and version handling.""" # Try actually downloading a dataset - kwargs = dict(path=str(tmp_path), verbose=True) + kwargs = dict(path=tmp_path, verbose=True) # XXX we shouldn't need to disable capsys here, but there's a pytest bug # that we're hitting (https://github.com/pytest-dev/pytest/issues/5997) # now that we use pooch with capsys.disabled(): - with pytest.raises(RuntimeError, match='Do not download .* in tests'): + with pytest.raises(RuntimeError, match="Do not download .* in tests"): path = datasets._fake.data_path(update_path=False, **kwargs) monkeypatch.setattr( - datasets.utils, - '_MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS', ()) + datasets.utils, "_MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS", () + ) path = datasets._fake.data_path(update_path=False, **kwargs) - assert op.isdir(path) - assert op.isfile(op.join(path, 'bar')) - assert not datasets.has_dataset('fake') # not in the desired path + assert path.is_dir() + assert (path / "bar").is_file() + assert not datasets.has_dataset("fake") # not in the desired path assert datasets._fake.get_version() is None - assert datasets.utils._get_version('fake') is None - monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path)) - with pytest.warns(RuntimeWarning, match='non-standard config'): + assert datasets.utils._get_version("fake") is None + monkeypatch.setenv("_MNE_FAKE_HOME_DIR", str(tmp_path)) + with pytest.warns(RuntimeWarning, match="non-standard config"): new_path = datasets._fake.data_path(update_path=True, **kwargs) assert path == new_path out, _ = capsys.readouterr() - assert 'Downloading' not in out + assert "Downloading" not in out # No version: shown as existing but unknown version - assert datasets.has_dataset('fake') + assert datasets.has_dataset("fake") # XXX logic bug, should be "unknown" - assert datasets._fake.get_version() == '0.0' + assert datasets._fake.get_version() == "0.0" # With a version but no required one: shown as existing and gives version - fname = tmp_path / 'foo' / 'version.txt' - with open(fname, 'w') as fid: - fid.write('0.1') - assert datasets.has_dataset('fake') - assert datasets._fake.get_version() == '0.1' + fname = tmp_path / "foo" / "version.txt" + with open(fname, "w") as fid: + fid.write("0.1") + assert datasets.has_dataset("fake") + assert datasets._fake.get_version() == "0.1" datasets._fake.data_path(download=False, **kwargs) out, _ = capsys.readouterr() - assert 'out of date' not in out + assert "out of date" not in out # With the required version: shown as existing with the required version - monkeypatch.setattr(datasets._fetch, '_FAKE_VERSION', '0.1') - assert datasets.has_dataset('fake') - assert datasets._fake.get_version() == '0.1' + monkeypatch.setattr(datasets._fetch, "_FAKE_VERSION", "0.1") + assert datasets.has_dataset("fake") + assert datasets._fake.get_version() == "0.1" datasets._fake.data_path(download=False, **kwargs) out, _ = capsys.readouterr() - assert 'out of date' not in out - monkeypatch.setattr(datasets._fetch, '_FAKE_VERSION', '0.2') + assert "out of date" not in out + monkeypatch.setattr(datasets._fetch, "_FAKE_VERSION", "0.2") # With an older version: # 1. Marked as not actually being present - assert not datasets.has_dataset('fake') + assert not datasets.has_dataset("fake") # 2. Will try to update when `data_path` gets called, with logged message - want_msg = 'Correctly trying to download newer version' + want_msg = "Correctly trying to download newer version" def _error_download(self, fname, downloader, processor): url = self.get_url(http://github.com/fname) full_path = self.abspath / fname - assert 'foo.tgz' in url + assert "foo.tgz" in url assert str(tmp_path) in str(full_path) raise RuntimeError(want_msg) - monkeypatch.setattr(pooch.Pooch, 'fetch', _error_download) + monkeypatch.setattr(pooch.Pooch, "fetch", _error_download) with pytest.raises(RuntimeError, match=want_msg): datasets._fake.data_path(**kwargs) out, _ = capsys.readouterr() - assert re.match(r'.* 0\.1 .*out of date.* 0\.2.*', out, re.MULTILINE), out + assert re.match(r".* 0\.1 .*out of date.* 0\.2.*", out, re.MULTILINE), out # Hash mismatch suggestion # https://mne.discourse.group/t/fsaverage-hash-value-mismatch/4663/3 - want_msg = 'MD5 hash of downloaded file (MNE-sample-data-processed.tar.gz) does not match the known hash: expected md5:e8f30c4516abdc12a0c08e6bae57409c but got a9dfc7e8843fd7f8a928901e12fb3d25. Deleted download for safety. The downloaded file may have been corrupted or the known hash may be outdated.' # noqa: E501 + want_msg = "MD5 hash of downloaded file (MNE-sample-data-processed.tar.gz) does not match the known hash: expected md5:e8f30c4516abdc12a0c08e6bae57409c but got a9dfc7e8843fd7f8a928901e12fb3d25. Deleted download for safety. The downloaded file may have been corrupted or the known hash may be outdated." # noqa: E501 def _error_download_2(self, fname, downloader, processor): url = self.get_url(http://github.com/fname) full_path = self.abspath / fname - assert 'foo.tgz' in url + assert "foo.tgz" in url assert str(tmp_path) in str(full_path) raise ValueError(want_msg) - shutil.rmtree(tmp_path / 'foo') - monkeypatch.setattr(pooch.Pooch, 'fetch', _error_download_2) - with pytest.raises(ValueError, match='.*known hash.*force_update=True.*'): + shutil.rmtree(tmp_path / "foo") + monkeypatch.setattr(pooch.Pooch, "fetch", _error_download_2) + with pytest.raises(ValueError, match=".*known hash.*force_update=True.*"): datasets._fake.data_path(download=True, force_update=True, **kwargs) -@pytest.mark.slowtest +# Okay to xfail this one because CircleCI downloads + uses the parcellations, +# so we'll know if they break +@pytest.mark.xfail(reason="Figshare blocks access from CIs sometimes") +@pytest.mark.ultraslowtest # not really ultraslow, but flakes out a lot @testing.requires_testing_data -@requires_good_network def test_fetch_parcellations(tmp_path): """Test fetching parcellations.""" - pytest.importorskip('nibabel') - this_subjects_dir = str(tmp_path) - os.mkdir(op.join(this_subjects_dir, 'fsaverage')) - os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'label')) - os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'surf')) - for hemi in ('lh', 'rh'): + pytest.importorskip("nibabel") + this_subjects_dir = tmp_path + fsaverage_dir = this_subjects_dir / "fsaverage" + (fsaverage_dir / "label").mkdir(parents=True) + (fsaverage_dir / "surf").mkdir() + for hemi in ("lh", "rh"): shutil.copyfile( - op.join(subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi), - op.join(this_subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi)) + subjects_dir / "fsaverage" / "surf" / f"{hemi}.white", + fsaverage_dir / "surf" / f"{hemi}.white", + ) # speed up by prenteding we have one of them - with open(op.join(this_subjects_dir, 'fsaverage', 'label', - 'lh.aparc_sub.annot'), 'wb'): + with open(fsaverage_dir / "label" / "lh.aparc_sub.annot", "wb"): pass datasets.fetch_aparc_sub_parcellation(subjects_dir=this_subjects_dir) - with ArgvSetter(('--accept-hcpmmp-license',)): + with ArgvSetter(("--accept-hcpmmp-license",)): datasets.fetch_hcp_mmp_parcellation(subjects_dir=this_subjects_dir) - for hemi in ('lh', 'rh'): - assert op.isfile(op.join(this_subjects_dir, 'fsaverage', 'label', - '%s.aparc_sub.annot' % hemi)) + for hemi in ("lh", "rh"): + assert (fsaverage_dir / "label" / f"{hemi}.aparc_sub.annot").is_file() # test our annot round-trips here - kwargs = dict(subject='fsaverage', hemi='both', sort=False, - subjects_dir=this_subjects_dir) - labels = read_labels_from_annot(parc='HCPMMP1', **kwargs) + kwargs = dict( + subject="fsaverage", hemi="both", sort=False, subjects_dir=this_subjects_dir + ) + labels = read_labels_from_annot(parc="HCPMMP1", **kwargs) write_labels_to_annot( - labels, parc='HCPMMP1_round', - table_name='./left.fsaverage164.label.gii', **kwargs) - orig = op.join(this_subjects_dir, 'fsaverage', 'label', 'lh.HCPMMP1.annot') + labels, + parc="HCPMMP1_round", + table_name="./left.fsaverage164.label.gii", + **kwargs, + ) + orig = fsaverage_dir / "label" / "lh.HCPMMP1.annot" first = hashfunc(orig) - new = orig[:-6] + '_round.annot' + new = str(orig)[:-6] + "_round.annot" second = hashfunc(new) assert first == second -_zip_fnames = ['foo/foo.txt', 'foo/bar.txt', 'foo/baz.txt'] +_zip_fnames = ["foo/foo.txt", "foo/bar.txt", "foo/baz.txt"] -def _fake_zip_fetch(url, path, fname, known_hash): - fname = op.join(path, fname) - with zipfile.ZipFile(fname, 'w') as zipf: - with zipf.open('foo/', 'w'): +def _fake_zip_fetch(url, path, fname, *args, **kwargs): + path = Path(path) + assert isinstance(fname, str) + fname = path / fname + with zipfile.ZipFile(fname, "w") as zipf: + with zipf.open("foo/", "w"): pass for fname in _zip_fnames: - with zipf.open(fname, 'w'): + with zipf.open(fname, "w"): pass -@pytest.mark.parametrize('n_have', range(len(_zip_fnames))) +@pytest.mark.parametrize("n_have", range(len(_zip_fnames))) def test_manifest_check_download(tmp_path, n_have, monkeypatch): """Test our manifest downloader.""" - monkeypatch.setattr(pooch, 'retrieve', _fake_zip_fetch) - destination = op.join(str(tmp_path), 'empty') - manifest_path = op.join(str(tmp_path), 'manifest.txt') - with open(manifest_path, 'w') as fid: + monkeypatch.setattr(pooch, "retrieve", _fake_zip_fetch) + destination = tmp_path / "empty" + manifest_path = tmp_path / "manifest.txt" + with open(manifest_path, "w") as fid: for fname in _zip_fnames: - fid.write('%s\n' % fname) + fid.write(f"{fname}\n") assert n_have in range(len(_zip_fnames) + 1) - assert not op.isdir(destination) + assert not destination.is_file() if n_have > 0: - os.makedirs(op.join(destination, 'foo')) - assert op.isdir(op.join(destination, 'foo')) + (destination / "foo").mkdir(parents=True) + assert (destination / "foo").is_dir() for fname in _zip_fnames: - assert not op.isfile(op.join(destination, fname)) + assert not (destination / fname).is_file() for fname in _zip_fnames[:n_have]: - with open(op.join(destination, fname), 'w'): + with open(destination / fname, "w"): pass with catch_logging() as log: with use_log_level(True): # we mock the pooch.retrieve so these are not used - url = hash_ = '' + url = hash_ = "" _manifest_check_download(manifest_path, destination, url, hash_) log = log.getvalue() n_missing = 3 - n_have - assert ('%d file%s missing from' % (n_missing, _pl(n_missing))) in log - for want in ('Extracting missing', 'Successfully '): + assert (f"{n_missing} file{_pl(n_missing)} missing from") in log + for want in ("Extracting missing", "Successfully "): if n_missing > 0: assert want in log else: assert want not in log - assert op.isdir(destination) + assert (destination).is_dir() for fname in _zip_fnames: - assert op.isfile(op.join(destination, fname)) + assert (destination / fname).is_file() -def _fake_mcd(manifest_path, destination, url, hash_, name=None, - fake_files=False): +def _fake_mcd(manifest_path, destination, url, hash_, name=None, fake_files=False): if name is None: - name = url.split('/')[-1].split('.')[0] + name = url.split("/")[-1].split(".")[0] assert name in url assert name in str(destination) - assert name in manifest_path + assert name in str(manifest_path) assert len(hash_) == 32 if fake_files: with open(manifest_path) as fid: @@ -250,18 +281,18 @@ def _fake_mcd(manifest_path, destination, url, hash_, name=None, path = path.strip() if not path: continue - fname = op.join(destination, path) - os.makedirs(op.dirname(fname), exist_ok=True) - with open(fname, 'wb'): + fname = destination / path + fname.parent.mkdir(exist_ok=True) + with open(fname, "wb"): pass def test_infant(tmp_path, monkeypatch): """Test fetch_infant_template.""" - monkeypatch.setattr(infant_base, '_manifest_check_download', _fake_mcd) - fetch_infant_template('12mo', subjects_dir=tmp_path) - with pytest.raises(ValueError, match='Invalid value for'): - fetch_infant_template('0mo', subjects_dir=tmp_path) + monkeypatch.setattr(infant_base, "_manifest_check_download", _fake_mcd) + fetch_infant_template("12mo", subjects_dir=tmp_path) + with pytest.raises(ValueError, match="Invalid value for"): + fetch_infant_template("0mo", subjects_dir=tmp_path) def test_phantom(tmp_path, monkeypatch): @@ -270,21 +301,24 @@ def test_phantom(tmp_path, monkeypatch): # an actual download here. But it doesn't seem worth it given that # CircleCI will at least test the VectorView one, and this file should # not change often. - monkeypatch.setattr(phantom_base, '_manifest_check_download', - partial(_fake_mcd, name='phantom_otaniemi', - fake_files=True)) - fetch_phantom('otaniemi', subjects_dir=tmp_path) - assert op.isfile(tmp_path / 'phantom_otaniemi' / 'mri' / 'T1.mgz') + monkeypatch.setattr( + phantom_base, + "_manifest_check_download", + partial(_fake_mcd, name="phantom_otaniemi", fake_files=True), + ) + fetch_phantom("otaniemi", subjects_dir=tmp_path) + assert (tmp_path / "phantom_otaniemi" / "mri" / "T1.mgz").is_file() +@requires_good_network def test_fetch_uncompressed_file(tmp_path): """Test downloading an uncompressed file with our fetch function.""" dataset_dict = dict( - dataset_name='license', - url=('https://raw.githubusercontent.com/mne-tools/mne-python/main/' - 'LICENSE.txt'), - archive_name='LICENSE.foo', - folder_name=op.join(tmp_path, 'foo'), - hash=None) + dataset_name="license", + url="https://raw.githubusercontent.com/mne-tools/mne-python/main/LICENSE.txt", + archive_name="LICENSE.foo", + folder_name=tmp_path / "foo", + hash=None, + ) fetch_dataset(dataset_dict, path=None, force_update=True) - assert (tmp_path / 'foo' / 'LICENSE.foo').is_file() + assert (tmp_path / "foo" / "LICENSE.foo").is_file() diff --git a/mne/datasets/ucl_opm_auditory/__init__.py b/mne/datasets/ucl_opm_auditory/__init__.py index 4269d12b0c6..94f389a51db 100644 --- a/mne/datasets/ucl_opm_auditory/__init__.py +++ b/mne/datasets/ucl_opm_auditory/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """fNIRS motor dataset.""" from .ucl_opm_auditory import data_path, get_version diff --git a/mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py b/mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py index e43443d1480..a9f6f410af4 100644 --- a/mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py +++ b/mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py @@ -1,27 +1,31 @@ -# Authors: Eric Larson -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_data_path_doc, _get_version, _version_doc, - _download_mne_dataset) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc - -_NAME = 'ucl_opm_auditory' -_PROCESSOR = 'unzip' +_NAME = "ucl_opm_auditory" +_PROCESSOR = "unzip" @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): # noqa: D103 +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 return _download_mne_dataset( - name=_NAME, processor=_PROCESSOR, path=path, - force_update=force_update, update_path=update_path, - download=download) + name=_NAME, + processor=_PROCESSOR, + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) data_path.__doc__ = _data_path_doc.format( name=_NAME, - conf=f'MNE_DATASETS_{_NAME.upper()}_PATH', + conf=f"MNE_DATASETS_{_NAME.upper()}_PATH", ) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index e03d179cfc6..8e8d4fb0db1 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -1,31 +1,36 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Eric Larson -# Denis Egnemann -# Stefan Appelhoff -# Adam Li -# Daniel McCloy -# -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from collections import OrderedDict +import glob import importlib import inspect +import logging import os import os.path as op -from pathlib import Path import sys -import zipfile import tempfile +import time +import zipfile +from collections import OrderedDict +from pathlib import Path +from typing import cast import numpy as np -from .config import _hcp_mmp_license_text, MNE_DATASETS -from ..label import read_labels_from_annot, Label, write_labels_to_annot -from ..utils import (get_config, set_config, logger, _validate_type, - verbose, get_subjects_dir, _pl, _safe_input) -from ..utils.docs import docdict, _docformat - +from ..label import Label, read_labels_from_annot, write_labels_to_annot +from ..utils import ( + _pl, + _safe_input, + _validate_type, + get_config, + get_subjects_dir, + logger, + set_config, + verbose, +) +from ..utils.docs import _docformat, docdict +from .config import MNE_DATASETS, _hcp_mmp_license_text _data_path_doc = """Get path to local copy of {name} dataset. @@ -56,10 +61,10 @@ path : instance of Path Path to {name} dataset directory. """ -_data_path_doc_accept = _data_path_doc.split('%(verbose)s') -_data_path_doc_accept[-1] = '%(verbose)s' + _data_path_doc_accept[-1] -_data_path_doc_accept.insert(1, ' %(accept)s') -_data_path_doc_accept = ''.join(_data_path_doc_accept) +_data_path_doc_accept = _data_path_doc.split("%(verbose)s") +_data_path_doc_accept[-1] = "%(verbose)s" + _data_path_doc_accept[-1] +_data_path_doc_accept.insert(1, " %(accept)s") +_data_path_doc_accept = "".join(_data_path_doc_accept) _data_path_doc = _docformat(_data_path_doc, docdict) _data_path_doc_accept = _docformat(_data_path_doc_accept, docdict) @@ -75,69 +80,90 @@ def _dataset_version(path, name): """Get the version of the dataset.""" - ver_fname = op.join(path, 'version.txt') + ver_fname = op.join(path, "version.txt") if op.exists(ver_fname): - with open(ver_fname, 'r') as fid: + with open(ver_fname) as fid: version = fid.readline().strip() # version is on first line else: - logger.debug(f'Version file missing: {ver_fname}') + logger.debug(f"Version file missing: {ver_fname}") # Sample dataset versioning was introduced after 0.3 # SPM dataset was introduced with 0.7 - versions = dict(sample='0.7', spm='0.3') - version = versions.get(name, '0.0') + versions = dict(sample="0.7", spm="0.3") + version = versions.get(name, "0.0") return version +@verbose +def default_path(*, verbose=None): + """Get the default MNE_DATA path. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + data_path : instance of Path + Path to the default MNE_DATA directory. + """ + return _get_path(None, None, None) + + def _get_path(path, key, name): """Get a dataset path.""" # 1. Input - _validate_type(path, ('path-like', None), path) + _validate_type(path, ("path-like", None), path) if path is not None: - return path + return Path(path).expanduser() # 2. get_config(key) — unless key is None or "" (special get_config values) # 3. get_config('MNE_DATA') - path = get_config(key or 'MNE_DATA', get_config('MNE_DATA')) + path = get_config(key or "MNE_DATA", get_config("MNE_DATA")) if path is not None: path = Path(path).expanduser() if not path.exists(): - msg = (f"Download location {path} as specified by MNE_DATA does " - f"not exist. Either create this directory manually and try " - f"again, or set MNE_DATA to an existing directory.") + msg = ( + f"Download location {path} as specified by MNE_DATA does " + f"not exist. Either create this directory manually and try " + f"again, or set MNE_DATA to an existing directory." + ) raise FileNotFoundError(msg) return path # 4. ~/mne_data (but use a fake home during testing so we don't # unnecessarily create ~/mne_data) - logger.info('Using default location ~/mne_data for %s...' % name) - path = op.join(os.getenv('_MNE_FAKE_HOME_DIR', - op.expanduser("~")), 'mne_data') - if not op.exists(path): - logger.info('Creating ~/mne_data') + extra = f" for {name}" if name else "" + logger.info(f"Using default location ~/mne_data{extra}...") + path = Path(os.getenv("_MNE_FAKE_HOME_DIR", "~")).expanduser() / "mne_data" + if not path.is_dir(): + logger.info(f"Creating {path}") try: - os.mkdir(path) + path.mkdir() except OSError: - raise OSError("User does not have write permissions " - "at '%s', try giving the path as an " - "argument to data_path() where user has " - "write permissions, for ex:data_path" - "('/home/xyz/me2/')" % (path)) - return Path(path) + raise OSError( + "User does not have write permissions " + f"at '{path}', try giving the path as an " + "argument to data_path() where user has " + "write permissions, for ex:data_path" + "('/home/xyz/me2/')" + ) + return path def _do_path_update(path, update_path, key, name): """Update path.""" path = op.abspath(path) - identical = get_config(key, '', use_env=False) == path + identical = get_config(key, "", use_env=False) == path if not identical: if update_path is None: update_path = True - if '--update-dataset-path' in sys.argv: - answer = 'y' + if "--update-dataset-path" in sys.argv: + answer = "y" else: - msg = ('Do you want to set the path:\n %s\nas the default ' - '%s dataset path in the mne-python config [y]/n? ' - % (path, name)) - answer = _safe_input(msg, alt='pass update_path=True') - if answer.lower() == 'n': + msg = ( + f"Do you want to set the path:\n {path}\nas the default {name} " + "dataset path in the mne-python config [y]/n? " + ) + answer = _safe_input(msg, alt="pass update_path=True") + if answer.lower() == "n": update_path = False if update_path: @@ -147,14 +173,15 @@ def _do_path_update(path, update_path, key, name): # This is meant to be semi-public: let packages like mne-bids use it to make # sure they don't accidentally set download=True in their tests, too -_MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS = ('mne',) +_MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS = ("mne",) def _check_in_testing_and_raise(name, download): """Check if we're in an MNE test and raise an error if download!=False.""" root_dirs = [ importlib.import_module(ns) - for ns in _MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS] + for ns in _MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS + ] root_dirs = [str(Path(ns.__file__).parent) for ns in root_dirs] check = False func = None @@ -162,7 +189,7 @@ def _check_in_testing_and_raise(name, download): try: # First, traverse out of the data_path() call while frame: - if frame.f_code.co_name in ('data_path', 'load_data'): + if frame.f_code.co_name in ("data_path", "load_data"): func = frame.f_code.co_name frame = frame.f_back.f_back # out of verbose decorator break @@ -175,10 +202,12 @@ def _check_in_testing_and_raise(name, download): # in mne namespace, and # (can't use is_relative_to here until 3.9) if any(str(fname).startswith(rd) for rd in root_dirs) and ( - # in tests/*.py - fname.parent.stem == 'tests' or - # or in a conftest.py - fname.stem == 'conftest.py'): + # in tests/*.py + fname.parent.stem == "tests" + or + # or in a conftest.py + fname.stem == "conftest.py" + ): check = True break frame = frame.f_back @@ -186,47 +215,58 @@ def _check_in_testing_and_raise(name, download): del frame if check and download is not False: raise RuntimeError( - f'Do not download dataset {repr(name)} in tests, pass ' - f'{func}(download=False) to prevent accidental downloads') + f"Do not download dataset {repr(name)} in tests, pass " + f"{func}(download=False) to prevent accidental downloads" + ) -def _download_mne_dataset(name, processor, path, force_update, - update_path, download, accept=False): +def _download_mne_dataset( + name, processor, path, force_update, update_path, download, accept=False +) -> Path: """Aux function for downloading internal MNE datasets.""" import pooch + from mne.datasets._fetch import fetch_dataset _check_in_testing_and_raise(name, download) # import pooch library for handling the dataset downloading dataset_params = MNE_DATASETS[name] - dataset_params['dataset_name'] = name - config_key = MNE_DATASETS[name]['config_key'] - folder_name = MNE_DATASETS[name]['folder_name'] + dataset_params["dataset_name"] = name + config_key = MNE_DATASETS[name]["config_key"] + folder_name = MNE_DATASETS[name]["folder_name"] # get download path for specific dataset path = _get_path(path=path, key=config_key, name=name) # instantiate processor that unzips file - if processor == 'nested_untar': + if processor == "nested_untar": processor_ = pooch.Untar(extract_dir=op.join(path, folder_name)) - elif processor == 'nested_unzip': + elif processor == "nested_unzip": processor_ = pooch.Unzip(extract_dir=op.join(path, folder_name)) else: processor_ = processor # handle case of multiple sub-datasets with different urls - if name == 'visual_92_categories': + if name == "visual_92_categories": dataset_params = [] - for name in ['visual_92_categories_1', 'visual_92_categories_2']: + for name in ["visual_92_categories_1", "visual_92_categories_2"]: this_dataset = MNE_DATASETS[name] - this_dataset['dataset_name'] = name + this_dataset["dataset_name"] = name dataset_params.append(this_dataset) - return fetch_dataset(dataset_params=dataset_params, processor=processor_, - path=path, force_update=force_update, - update_path=update_path, download=download, - accept=accept) + return cast( + Path, + fetch_dataset( + dataset_params=dataset_params, + processor=processor_, + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ), + ) def _get_version(name): @@ -236,14 +276,13 @@ def _get_version(name): if not has_dataset(name): return None dataset_params = MNE_DATASETS[name] - dataset_params['dataset_name'] = name - config_key = MNE_DATASETS[name]['config_key'] + dataset_params["dataset_name"] = name + config_key = MNE_DATASETS[name]["config_key"] # get download path for specific dataset path = _get_path(path=None, key=config_key, name=name) - return fetch_dataset(dataset_params, path=path, - return_version=True)[1] + return fetch_dataset(dataset_params, path=path, return_version=True)[1] def has_dataset(name): @@ -266,24 +305,23 @@ def has_dataset(name): from mne.datasets._fetch import fetch_dataset if isinstance(name, dict): - dataset_name = name['dataset_name'] + dataset_name = name["dataset_name"] dataset_params = name else: - dataset_name = 'spm' if name == 'spm_face' else name + dataset_name = "spm" if name == "spm_face" else name dataset_params = MNE_DATASETS[dataset_name] - dataset_params['dataset_name'] = dataset_name + dataset_params["dataset_name"] = dataset_name - config_key = dataset_params['config_key'] + config_key = dataset_params["config_key"] # get download path for specific dataset path = _get_path(path=None, key=config_key, name=dataset_name) - dp = fetch_dataset(dataset_params, path=path, download=False, - check_version=False) - if dataset_name.startswith('bst_'): + dp = fetch_dataset(dataset_params, path=path, download=False, check_version=False) + if dataset_name.startswith("bst_"): check = dataset_name else: - check = MNE_DATASETS[dataset_name]['folder_name'] + check = MNE_DATASETS[dataset_name]["folder_name"] return str(dp).endswith(check) @@ -299,48 +337,71 @@ def _download_all_example_data(verbose=True): # # verbose=True by default so we get nice status messages. # Consider adding datasets from here to CircleCI for PR-auto-build - from . import (sample, testing, misc, spm_face, somato, brainstorm, - eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, - kiloword, phantom_4dbti, sleep_physionet, limo, - fnirs_motor, refmeg_noise, fetch_infant_template, - fetch_fsaverage, ssvep, erp_core, epilepsy_ecog, - fetch_phantom, ucl_opm_auditory) - sample_path = sample.data_path() - testing.data_path() - misc.data_path() - spm_face.data_path() - somato.data_path() - hf_sef.data_path() - multimodal.data_path() - fnirs_motor.data_path() - opm.data_path() - mtrf.data_path() - fieldtrip_cmc.data_path() - kiloword.data_path() - phantom_4dbti.data_path() - refmeg_noise.data_path() - ssvep.data_path() - epilepsy_ecog.data_path() - ucl_opm_auditory.data_path() - brainstorm.bst_raw.data_path(accept=True) - brainstorm.bst_auditory.data_path(accept=True) - brainstorm.bst_resting.data_path(accept=True) - phantom_path = brainstorm.bst_phantom_elekta.data_path(accept=True) - fetch_phantom('otaniemi', subjects_dir=phantom_path) - brainstorm.bst_phantom_ctf.data_path(accept=True) - eegbci.load_data(1, [6, 10, 14], update_path=True) - for subj in range(4): - eegbci.load_data(subj + 1, runs=[3], update_path=True) + import openneuro + + paths = dict() + for kind in ( + "sample testing misc spm_face somato hf_sef multimodal " + "fnirs_motor opm mtrf fieldtrip_cmc kiloword phantom_kit phantom_4dbti " + "refmeg_noise ssvep epilepsy_ecog ucl_opm_auditory eyelink " + "erp_core brainstorm.bst_raw brainstorm.bst_auditory " + "brainstorm.bst_resting brainstorm.bst_phantom_ctf " + "brainstorm.bst_phantom_elekta phantom_kernel" + ).split(): + mod = importlib.import_module(f"mne.datasets.{kind}") + data_path_func = getattr(mod, "data_path") + kwargs = dict() + if "accept" in inspect.getfullargspec(data_path_func).args: + kwargs["accept"] = True + paths[kind] = data_path_func(**kwargs) + logger.info(f"[done {kind}]") + + # Now for the exceptions: + from . import ( + eegbci, + fetch_fsaverage, + fetch_hcp_mmp_parcellation, + fetch_infant_template, + fetch_phantom, + limo, + sleep_physionet, + ) + + eegbci.load_data(subjects=1, runs=[6, 10, 14], update_path=True) + eegbci.load_data(subjects=range(1, 5), runs=[3], update_path=True) + logger.info("[done eegbci]") + sleep_physionet.age.fetch_data(subjects=[0, 1], recording=[1]) + logger.info("[done sleep_physionet]") + # If the user has SUBJECTS_DIR, respect it, if not, set it to the EEG one # (probably on CircleCI, or otherwise advanced user) - fetch_fsaverage(None) - fetch_infant_template('6mo') - fetch_hcp_mmp_parcellation( - subjects_dir=sample_path / 'subjects', accept=True) + fetch_fsaverage(subjects_dir=None) + logger.info("[done fsaverage]") + + # Now also update the sample dataset path, if not already SUBJECTS_DIR + # (some tutorials make use of these files) + fetch_fsaverage(subjects_dir=paths["sample"] / "subjects") + + fetch_infant_template("6mo") + logger.info("[done infant_template]") + + fetch_hcp_mmp_parcellation(subjects_dir=paths["sample"] / "subjects", accept=True) + logger.info("[done hcp_mmp_parcellation]") + + fetch_phantom("otaniemi", subjects_dir=paths["brainstorm.bst_phantom_elekta"]) + logger.info("[done phantom]") + limo.load_data(subject=1, update_path=True) + logger.info("[done limo]") - erp_core.data_path() + # for ESG + ds = "ds004388" + target_dir = default_path() / ds + run_name = "sub-001/eeg/*median_run-03_eeg*.set" + if not glob.glob(str(target_dir / run_name)): + target_dir.mkdir(exist_ok=True) + openneuro.download(dataset=ds, target_dir=target_dir, include=run_name[:-4]) @verbose @@ -366,25 +427,28 @@ def fetch_aparc_sub_parcellation(subjects_dir=None, verbose=None): subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) destination = subjects_dir / "fsaverage" / "label" - urls = dict(lh='https://osf.io/p92yb/download', - rh='https://osf.io/4kxny/download') - hashes = dict(lh='9e4d8d6b90242b7e4b0145353436ef77', - rh='dd6464db8e7762d969fc1d8087cd211b') - for hemi in ('lh', 'rh'): - fname = f'{hemi}.aparc_sub.annot' + urls = dict(lh="https://osf.io/download/p92yb", rh="https://osf.io/download/4kxny") + hashes = dict( + lh="9e4d8d6b90242b7e4b0145353436ef77", rh="dd6464db8e7762d969fc1d8087cd211b" + ) + downloader = pooch.HTTPDownloader(**_downloader_params()) + for hemi in ("lh", "rh"): + fname = f"{hemi}.aparc_sub.annot" fpath = destination / fname if not fpath.is_file(): pooch.retrieve( url=urls[hemi], known_hash=f"md5:{hashes[hemi]}", path=destination, + downloader=downloader, fname=fname, ) @verbose -def fetch_hcp_mmp_parcellation(subjects_dir=None, combine=True, *, - accept=False, verbose=None): +def fetch_hcp_mmp_parcellation( + subjects_dir=None, combine=True, *, accept=False, verbose=None +): """Fetch the HCP-MMP parcellation. This will download and install the HCP-MMP parcellation @@ -417,105 +481,282 @@ def fetch_hcp_mmp_parcellation(subjects_dir=None, combine=True, *, subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) destination = subjects_dir / "fsaverage" / "label" fnames = [destination / f"{hemi}.HCPMMP1.annot" for hemi in ("lh", "rh")] - urls = dict(lh='https://ndownloader.figshare.com/files/5528816', - rh='https://ndownloader.figshare.com/files/5528819') - hashes = dict(lh='46a102b59b2fb1bb4bd62d51bf02e975', - rh='75e96b331940227bbcb07c1c791c2463') + urls = dict( + lh="https://ndownloader.figshare.com/files/5528816", + rh="https://ndownloader.figshare.com/files/5528819", + ) + hashes = dict( + lh="46a102b59b2fb1bb4bd62d51bf02e975", rh="75e96b331940227bbcb07c1c791c2463" + ) if not all(fname.exists() for fname in fnames): - if accept or '--accept-hcpmmp-license' in sys.argv: - answer = 'y' + if accept or "--accept-hcpmmp-license" in sys.argv: + answer = "y" else: - answer = _safe_input('%s\nAgree (y/[n])? ' % _hcp_mmp_license_text) - if answer.lower() != 'y': - raise RuntimeError('You must agree to the license to use this ' - 'dataset') - for hemi, fpath in zip(('lh', 'rh'), fnames): + answer = _safe_input(f"{_hcp_mmp_license_text}\nAgree (y/[n])? ") + if answer.lower() != "y": + raise RuntimeError("You must agree to the license to use this dataset") + downloader = pooch.HTTPDownloader(**_downloader_params()) + for hemi, fpath in zip(("lh", "rh"), fnames): if not op.isfile(fpath): fname = fpath.name pooch.retrieve( url=urls[hemi], known_hash=f"md5:{hashes[hemi]}", path=destination, + downloader=downloader, fname=fname, ) if combine: - fnames = [op.join(destination, '%s.HCPMMP1_combined.annot' % hemi) - for hemi in ('lh', 'rh')] + fnames = [ + op.join(destination, f"{hemi}.HCPMMP1_combined.annot") + for hemi in ("lh", "rh") + ] if all(op.isfile(fname) for fname in fnames): return # otherwise, let's make them - logger.info('Creating combined labels') - groups = OrderedDict([ - ('Primary Visual Cortex (V1)', - ('V1',)), - ('Early Visual Cortex', - ('V2', 'V3', 'V4')), - ('Dorsal Stream Visual Cortex', - ('V3A', 'V3B', 'V6', 'V6A', 'V7', 'IPS1')), - ('Ventral Stream Visual Cortex', - ('V8', 'VVC', 'PIT', 'FFC', 'VMV1', 'VMV2', 'VMV3')), - ('MT+ Complex and Neighboring Visual Areas', - ('V3CD', 'LO1', 'LO2', 'LO3', 'V4t', 'FST', 'MT', 'MST', 'PH')), - ('Somatosensory and Motor Cortex', - ('4', '3a', '3b', '1', '2')), - ('Paracentral Lobular and Mid Cingulate Cortex', - ('24dd', '24dv', '6mp', '6ma', 'SCEF', '5m', '5L', '5mv',)), - ('Premotor Cortex', - ('55b', '6d', '6a', 'FEF', '6v', '6r', 'PEF')), - ('Posterior Opercular Cortex', - ('43', 'FOP1', 'OP4', 'OP1', 'OP2-3', 'PFcm')), - ('Early Auditory Cortex', - ('A1', 'LBelt', 'MBelt', 'PBelt', 'RI')), - ('Auditory Association Cortex', - ('A4', 'A5', 'STSdp', 'STSda', 'STSvp', 'STSva', 'STGa', 'TA2',)), - ('Insular and Frontal Opercular Cortex', - ('52', 'PI', 'Ig', 'PoI1', 'PoI2', 'FOP2', 'FOP3', - 'MI', 'AVI', 'AAIC', 'Pir', 'FOP4', 'FOP5')), - ('Medial Temporal Cortex', - ('H', 'PreS', 'EC', 'PeEc', 'PHA1', 'PHA2', 'PHA3',)), - ('Lateral Temporal Cortex', - ('PHT', 'TE1p', 'TE1m', 'TE1a', 'TE2p', 'TE2a', - 'TGv', 'TGd', 'TF',)), - ('Temporo-Parieto-Occipital Junction', - ('TPOJ1', 'TPOJ2', 'TPOJ3', 'STV', 'PSL',)), - ('Superior Parietal Cortex', - ('LIPv', 'LIPd', 'VIP', 'AIP', 'MIP', - '7PC', '7AL', '7Am', '7PL', '7Pm',)), - ('Inferior Parietal Cortex', - ('PGp', 'PGs', 'PGi', 'PFm', 'PF', 'PFt', 'PFop', - 'IP0', 'IP1', 'IP2',)), - ('Posterior Cingulate Cortex', - ('DVT', 'ProS', 'POS1', 'POS2', 'RSC', 'v23ab', 'd23ab', - '31pv', '31pd', '31a', '23d', '23c', 'PCV', '7m',)), - ('Anterior Cingulate and Medial Prefrontal Cortex', - ('33pr', 'p24pr', 'a24pr', 'p24', 'a24', 'p32pr', 'a32pr', 'd32', - 'p32', 's32', '8BM', '9m', '10v', '10r', '25',)), - ('Orbital and Polar Frontal Cortex', - ('47s', '47m', 'a47r', '11l', '13l', - 'a10p', 'p10p', '10pp', '10d', 'OFC', 'pOFC',)), - ('Inferior Frontal Cortex', - ('44', '45', 'IFJp', 'IFJa', 'IFSp', 'IFSa', '47l', 'p47r',)), - ('DorsoLateral Prefrontal Cortex', - ('8C', '8Av', 'i6-8', 's6-8', 'SFL', '8BL', '9p', '9a', '8Ad', - 'p9-46v', 'a9-46v', '46', '9-46d',)), - ('???', - ('???',))]) + logger.info("Creating combined labels") + groups = OrderedDict( + [ + ("Primary Visual Cortex (V1)", ("V1",)), + ("Early Visual Cortex", ("V2", "V3", "V4")), + ( + "Dorsal Stream Visual Cortex", + ("V3A", "V3B", "V6", "V6A", "V7", "IPS1"), + ), + ( + "Ventral Stream Visual Cortex", + ("V8", "VVC", "PIT", "FFC", "VMV1", "VMV2", "VMV3"), + ), + ( + "MT+ Complex and Neighboring Visual Areas", + ("V3CD", "LO1", "LO2", "LO3", "V4t", "FST", "MT", "MST", "PH"), + ), + ("Somatosensory and Motor Cortex", ("4", "3a", "3b", "1", "2")), + ( + "Paracentral Lobular and Mid Cingulate Cortex", + ( + "24dd", + "24dv", + "6mp", + "6ma", + "SCEF", + "5m", + "5L", + "5mv", + ), + ), + ("Premotor Cortex", ("55b", "6d", "6a", "FEF", "6v", "6r", "PEF")), + ( + "Posterior Opercular Cortex", + ("43", "FOP1", "OP4", "OP1", "OP2-3", "PFcm"), + ), + ("Early Auditory Cortex", ("A1", "LBelt", "MBelt", "PBelt", "RI")), + ( + "Auditory Association Cortex", + ( + "A4", + "A5", + "STSdp", + "STSda", + "STSvp", + "STSva", + "STGa", + "TA2", + ), + ), + ( + "Insular and Frontal Opercular Cortex", + ( + "52", + "PI", + "Ig", + "PoI1", + "PoI2", + "FOP2", + "FOP3", + "MI", + "AVI", + "AAIC", + "Pir", + "FOP4", + "FOP5", + ), + ), + ( + "Medial Temporal Cortex", + ( + "H", + "PreS", + "EC", + "PeEc", + "PHA1", + "PHA2", + "PHA3", + ), + ), + ( + "Lateral Temporal Cortex", + ( + "PHT", + "TE1p", + "TE1m", + "TE1a", + "TE2p", + "TE2a", + "TGv", + "TGd", + "TF", + ), + ), + ( + "Temporo-Parieto-Occipital Junction", + ( + "TPOJ1", + "TPOJ2", + "TPOJ3", + "STV", + "PSL", + ), + ), + ( + "Superior Parietal Cortex", + ( + "LIPv", + "LIPd", + "VIP", + "AIP", + "MIP", + "7PC", + "7AL", + "7Am", + "7PL", + "7Pm", + ), + ), + ( + "Inferior Parietal Cortex", + ( + "PGp", + "PGs", + "PGi", + "PFm", + "PF", + "PFt", + "PFop", + "IP0", + "IP1", + "IP2", + ), + ), + ( + "Posterior Cingulate Cortex", + ( + "DVT", + "ProS", + "POS1", + "POS2", + "RSC", + "v23ab", + "d23ab", + "31pv", + "31pd", + "31a", + "23d", + "23c", + "PCV", + "7m", + ), + ), + ( + "Anterior Cingulate and Medial Prefrontal Cortex", + ( + "33pr", + "p24pr", + "a24pr", + "p24", + "a24", + "p32pr", + "a32pr", + "d32", + "p32", + "s32", + "8BM", + "9m", + "10v", + "10r", + "25", + ), + ), + ( + "Orbital and Polar Frontal Cortex", + ( + "47s", + "47m", + "a47r", + "11l", + "13l", + "a10p", + "p10p", + "10pp", + "10d", + "OFC", + "pOFC", + ), + ), + ( + "Inferior Frontal Cortex", + ( + "44", + "45", + "IFJp", + "IFJa", + "IFSp", + "IFSa", + "47l", + "p47r", + ), + ), + ( + "DorsoLateral Prefrontal Cortex", + ( + "8C", + "8Av", + "i6-8", + "s6-8", + "SFL", + "8BL", + "9p", + "9a", + "8Ad", + "p9-46v", + "a9-46v", + "46", + "9-46d", + ), + ), + ("???", ("???",)), + ] + ) assert len(groups) == 23 labels_out = list() - for hemi in ('lh', 'rh'): - labels = read_labels_from_annot('fsaverage', 'HCPMMP1', hemi=hemi, - subjects_dir=subjects_dir, - sort=False) + for hemi in ("lh", "rh"): + labels = read_labels_from_annot( + "fsaverage", "HCPMMP1", hemi=hemi, subjects_dir=subjects_dir, sort=False + ) label_names = [ - '???' if label.name.startswith('???') else - label.name.split('_')[1] for label in labels] + "???" if label.name.startswith("???") else label.name.split("_")[1] + for label in labels + ] used = np.zeros(len(labels), bool) for key, want in groups.items(): - assert '\t' not in key - these_labels = [li for li, label_name in enumerate(label_names) - if label_name in want] + assert "\t" not in key + these_labels = [ + li + for li, label_name in enumerate(label_names) + if label_name in want + ] assert not used[these_labels].any() assert len(these_labels) == len(want) used[these_labels] = True @@ -525,52 +766,90 @@ def fetch_hcp_mmp_parcellation(subjects_dir=None, combine=True, *, w = np.array([len(label.vertices) for label in these_labels]) w = w / float(w.sum()) color = np.dot(w, [label.color for label in these_labels]) - these_labels = sum(these_labels, - Label([], subject='fsaverage', hemi=hemi)) + these_labels = sum( + these_labels, Label([], subject="fsaverage", hemi=hemi) + ) these_labels.name = key these_labels.color = color labels_out.append(these_labels) assert used.all() assert len(labels_out) == 46 - for hemi, side in (('lh', 'left'), ('rh', 'right')): - table_name = './%s.fsaverage164.label.gii' % (side,) - write_labels_to_annot(labels_out, 'fsaverage', 'HCPMMP1_combined', - hemi=hemi, subjects_dir=subjects_dir, - sort=False, table_name=table_name) + for hemi, side in (("lh", "left"), ("rh", "right")): + table_name = f"./{side}.fsaverage164.label.gii" + write_labels_to_annot( + labels_out, + "fsaverage", + "HCPMMP1_combined", + hemi=hemi, + subjects_dir=subjects_dir, + sort=False, + table_name=table_name, + ) def _manifest_check_download(manifest_path, destination, url, hash_): import pooch - with open(manifest_path, 'r') as fid: + with open(manifest_path) as fid: names = [name.strip() for name in fid.readlines()] - manifest_path = op.basename(manifest_path) need = list() for name in names: - if not op.isfile(op.join(destination, name)): + if not (destination / name).is_file(): need.append(name) - logger.info('%d file%s missing from %s in %s' - % (len(need), _pl(need), manifest_path, destination)) + logger.info( + "%d file%s missing from %s in %s", + len(need), + _pl(need), + manifest_path.name, + destination, + ) if len(need) > 0: + downloader = pooch.HTTPDownloader(**_downloader_params()) with tempfile.TemporaryDirectory() as path: - logger.info('Downloading missing files remotely') + logger.info("Downloading missing files remotely") - fname_path = op.join(path, 'temp.zip') + path = Path(path) + fname_path = path / "temp.zip" pooch.retrieve( url=url, known_hash=f"md5:{hash_}", path=path, - fname=op.basename(fname_path) + downloader=downloader, + fname=fname_path.name, ) - logger.info('Extracting missing file%s' % (_pl(need),)) - with zipfile.ZipFile(fname_path, 'r') as ff: - members = set(f for f in ff.namelist() if not f.endswith('/')) + logger.info(f"Extracting missing file{_pl(need)}") + with zipfile.ZipFile(fname_path, "r") as ff: + members = set(f for f in ff.namelist() if not f.endswith("/")) missing = sorted(members.symmetric_difference(set(names))) if len(missing): - raise RuntimeError('Zip file did not have correct names:' - '\n%s' % ('\n'.join(missing))) + raise RuntimeError( + "Zip file did not have correct names:\n{'\n'.join(missing)}" + ) for name in need: ff.extract(name, path=destination) - logger.info('Successfully extracted %d file%s' - % (len(need), _pl(need))) + logger.info(f"Successfully extracted {len(need)} file{_pl(need)}") + + +def _log_time_size(t0, sz): + t = time.time() - t0 + fmt = "%Ss" + if t > 60: + fmt = f"%Mm{fmt}" + if t > 3600: + fmt = f"%Hh{fmt}" + sz = sz / 1048576 # 1024 ** 2 + t = time.strftime(fmt, time.gmtime(t)) + logger.info(f"Download complete in {t} ({sz:.1f} MB)") + + +def _downloader_params(*, auth=None, token=None): + params = dict(timeout=15) + params["progressbar"] = ( + logger.level <= logging.INFO and get_config("MNE_TQDM", "tqdm.auto") != "off" + ) + if auth is not None: + params["auth"] = auth + if token is not None: + params["headers"] = {"Authorization": f"token {token}"} + return params diff --git a/mne/datasets/visual_92_categories/__init__.py b/mne/datasets/visual_92_categories/__init__.py index a0b26c17fed..598795bcc9d 100644 --- a/mne/datasets/visual_92_categories/__init__.py +++ b/mne/datasets/visual_92_categories/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """MNE visual_92_categories dataset.""" from .visual_92_categories import data_path, get_version diff --git a/mne/datasets/visual_92_categories/visual_92_categories.py b/mne/datasets/visual_92_categories/visual_92_categories.py index df687aafb6c..b0ec1e67820 100644 --- a/mne/datasets/visual_92_categories/visual_92_categories.py +++ b/mne/datasets/visual_92_categories/visual_92_categories.py @@ -1,13 +1,15 @@ -# License: BSD Style. +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import verbose -from ..utils import (_download_mne_dataset, _data_path_doc, _get_version, - _version_doc) +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc @verbose -def data_path(path=None, force_update=False, update_path=True, - download=True, *, verbose=None): +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): """ Get path to local copy of visual_92_categories dataset. @@ -43,18 +45,23 @@ def data_path(path=None, force_update=False, update_path=True, human object recognition in space and time. doi: 10.1038/NN.3635 """ return _download_mne_dataset( - name='visual_92_categories', processor='untar', path=path, - force_update=force_update, update_path=update_path, - download=download) + name="visual_92_categories", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) data_path.__doc__ = _data_path_doc.format( - name='visual_92_categories', conf='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH') + name="visual_92_categories", conf="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH" +) def get_version(): """Get dataset version.""" - return _get_version('visual_92_categories') + return _get_version("visual_92_categories") -get_version.__doc__ = _version_doc.format(name='visual_92_categories') +get_version.__doc__ = _version_doc.format(name="visual_92_categories") diff --git a/mne/decoding/__init__.py b/mne/decoding/__init__.py index 2b0136256b6..b0dc90e1048 100644 --- a/mne/decoding/__init__.py +++ b/mne/decoding/__init__.py @@ -1,14 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Decoding and encoding, including machine learning and receptive fields.""" +import lazy_loader as lazy -from .transformer import (PSDEstimator, Vectorizer, - UnsupervisedSpatialFilter, TemporalFilter, - Scaler, FilterEstimator) -from .mixin import TransformerMixin -from .base import BaseEstimator, LinearModel, get_coef, cross_val_multiscore -from .csp import CSP, SPoC -from .ssd import SSD -from .ems import compute_ems, EMS -from .time_frequency import TimeFrequency -from .receptive_field import ReceptiveField -from .time_delaying_ridge import TimeDelayingRidge -from .search_light import SlidingEstimator, GeneralizingEstimator +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/decoding/__init__.pyi b/mne/decoding/__init__.pyi new file mode 100644 index 00000000000..1131f1597c5 --- /dev/null +++ b/mne/decoding/__init__.pyi @@ -0,0 +1,50 @@ +__all__ = [ + "BaseEstimator", + "CSP", + "EMS", + "FilterEstimator", + "GeneralizingEstimator", + "LinearModel", + "PSDEstimator", + "ReceptiveField", + "SPoC", + "SSD", + "Scaler", + "SlidingEstimator", + "SpatialFilter", + "TemporalFilter", + "TimeDelayingRidge", + "TimeFrequency", + "TransformerMixin", + "UnsupervisedSpatialFilter", + "Vectorizer", + "XdawnTransformer", + "compute_ems", + "cross_val_multiscore", + "get_coef", + "get_spatial_filter_from_estimator", +] +from .base import ( + BaseEstimator, + LinearModel, + TransformerMixin, + cross_val_multiscore, + get_coef, +) +from .csp import CSP, SPoC +from .ems import EMS, compute_ems +from .receptive_field import ReceptiveField +from .search_light import GeneralizingEstimator, SlidingEstimator +from .spatial_filter import SpatialFilter, get_spatial_filter_from_estimator +from .ssd import SSD +from .time_delaying_ridge import TimeDelayingRidge +from .time_frequency import TimeFrequency +from .transformer import ( + FilterEstimator, + PSDEstimator, + Scaler, + TemporalFilter, + UnsupervisedSpatialFilter, + Vectorizer, +) +from .xdawn import XdawnTransformer diff --git a/mne/decoding/_covs_ged.py b/mne/decoding/_covs_ged.py new file mode 100644 index 00000000000..8061a343f23 --- /dev/null +++ b/mne/decoding/_covs_ged.py @@ -0,0 +1,287 @@ +"""Covariance estimation for GED transformers.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from .._fiff.meas_info import Info, create_info +from .._fiff.pick import _picks_to_idx, pick_info +from ..cov import Covariance, _compute_rank_raw_array, _regularized_covariance +from ..defaults import _handle_default +from ..filter import filter_data +from ..rank import compute_rank +from ..utils import _verbose_safe_false, logger + + +def _concat_cov(x_class, *, cov_kind, log_rank, reg, cov_method_params, info, rank): + """Concatenate epochs before computing the covariance.""" + _, n_channels, _ = x_class.shape + + x_class = x_class.transpose(1, 0, 2).reshape(n_channels, -1) + cov = _regularized_covariance( + x_class, + reg=reg, + method_params=cov_method_params, + rank=rank, + info=info, + cov_kind=cov_kind, + log_rank=log_rank, + log_ch_type="data", + ) + + return cov, n_channels # the weight here is just the number of channels + + +def _epoch_cov(x_class, *, cov_kind, log_rank, reg, cov_method_params, info, rank): + """Mean of per-epoch covariances.""" + name = reg if isinstance(reg, str) else "empirical" + name += " with shrinkage" if isinstance(reg, float) else "" + logger.info( + f"Estimating {cov_kind + (' ' if cov_kind else '')}" + f"covariance (average over epochs; {name.upper()})" + ) + cov = sum( + _regularized_covariance( + this_X, + reg=reg, + method_params=cov_method_params, + rank=rank, + info=info, + cov_kind=cov_kind, + log_rank=log_rank and ii == 0, + log_ch_type="data", + verbose=_verbose_safe_false(), + ) + for ii, this_X in enumerate(x_class) + ) + cov /= len(x_class) + weight = len(x_class) + + return cov, weight + + +def _handle_info_rank(X, info, rank): + if info is None: + # use mag instead of eeg to avoid the cov EEG projection warning + info = create_info(X.shape[1], 1000.0, "mag") + if isinstance(rank, dict): + rank = dict(mag=sum(rank.values())) + + return info, rank + + +def _csp_estimate(X, y, reg, cov_method_params, cov_est, info, rank, norm_trace): + _, n_channels, _ = X.shape + classes_ = np.unique(y) + if cov_est == "concat": + cov_estimator = _concat_cov + elif cov_est == "epoch": + cov_estimator = _epoch_cov + + info, rank = _handle_info_rank(X, info, rank) + if not isinstance(rank, dict): + rank = _compute_rank_raw_array( + np.hstack(X), + info, + rank=rank, + scalings=None, + log_ch_type="data", + on_few_samples="ignore", + ) + + covs = [] + sample_weights = [] + for ci, this_class in enumerate(classes_): + cov, weight = cov_estimator( + X[y == this_class], + cov_kind=f"class={this_class}", + log_rank=ci == 0, + reg=reg, + cov_method_params=cov_method_params, + info=info, + rank=rank, + ) + + if norm_trace: + cov /= np.trace(cov) + + covs.append(cov) + sample_weights.append(weight) + + covs = np.stack(covs) + C_ref = covs.mean(0) + + return covs, C_ref, info, rank, dict(sample_weights=np.array(sample_weights)) + + +def _xdawn_estimate( + X, + y, + reg, + cov_method_params, + R=None, + info=None, + rank="full", +): + classes = np.unique(y) + info, rank = _handle_info_rank(X, info, rank) + + # Retrieve or compute whitening covariance + if R is None: + R = _regularized_covariance( + np.hstack(X), reg, cov_method_params, info, rank=rank + ) + elif isinstance(R, Covariance): + R = R.data + + # Get prototype events + evokeds, toeplitzs = list(), list() + for c in classes: + # Prototyped response for each class + evokeds.append(np.mean(X[y == c, :, :], axis=0)) + toeplitzs.append(1.0) + + covs = [] + for evo, toeplitz in zip(evokeds, toeplitzs): + # Estimate covariance matrix of the prototype response + evo = np.dot(evo, toeplitz) + evo_cov = _regularized_covariance(evo, reg, cov_method_params, info, rank=rank) + covs.append(evo_cov) + + covs.append(R) + C_ref = R + if not isinstance(rank, dict): + rank = _compute_rank_raw_array( + np.hstack(X), + info, + rank=rank, + scalings=None, + log_ch_type="data", + on_few_samples="ignore", + ) + return covs, C_ref, info, rank, dict() + + +def _ssd_estimate( + X, + y, + reg, + cov_method_params, + info, + picks, + n_fft, + filt_params_signal, + filt_params_noise, + rank, + sort_by_spectral_ratio, +): + if isinstance(info, Info): + sfreq = info["sfreq"] + elif isinstance(info, float): # special case, mostly for testing + sfreq = info + info = create_info(X.shape[-2], sfreq, ch_types="eeg") + picks_ = _picks_to_idx(info, picks, none="data", exclude="bads") + X_aux = X[..., picks_, :] + X_signal = filter_data(X_aux, sfreq, **filt_params_signal) + X_noise = filter_data(X_aux, sfreq, **filt_params_noise) + X_noise -= X_signal + if X.ndim == 3: + X_signal = np.hstack(X_signal) + X_noise = np.hstack(X_noise) + + # prevent rank change when computing cov with rank='full' + picked_info = pick_info(info, picks_) + S = _regularized_covariance( + X_signal, + reg=reg, + method_params=cov_method_params, + rank="full", + info=picked_info, + ) + R = _regularized_covariance( + X_noise, + reg=reg, + method_params=cov_method_params, + rank="full", + info=picked_info, + ) + covs = [S, R] + C_ref = S + + all_ranks = list() + for cov in covs: + r = list( + compute_rank( + Covariance( + cov, + picked_info.ch_names, + list(), + list(), + 0, + verbose=_verbose_safe_false(), + ), + rank, + _handle_default("scalings_cov_rank", None), + info, + ).values() + )[0] + all_ranks.append(r) + rank = np.min(all_ranks) + freqs_signal = (filt_params_signal["l_freq"], filt_params_signal["h_freq"]) + freqs_noise = (filt_params_noise["l_freq"], filt_params_noise["h_freq"]) + n_fft = min( + int(n_fft if n_fft is not None else sfreq), + X.shape[-1], + ) + kwargs = dict( + X=X, + picks=picks_, + sfreq=sfreq, + n_fft=n_fft, + freqs_signal=freqs_signal, + freqs_noise=freqs_noise, + sort_by_spectral_ratio=sort_by_spectral_ratio, + ) + rank = dict(eeg=rank) + info = picked_info + return covs, C_ref, info, rank, kwargs + + +def _spoc_estimate(X, y, reg, cov_method_params, info, rank): + info, rank = _handle_info_rank(X, info, rank) + # Normalize target variable + target = y.astype(np.float64) + target -= target.mean() + target /= target.std() + + n_epochs, n_channels = X.shape[:2] + + # Estimate single trial covariance + covs = np.empty((n_epochs, n_channels, n_channels)) + for ii, epoch in enumerate(X): + covs[ii] = _regularized_covariance( + epoch, + reg=reg, + method_params=cov_method_params, + rank=rank, + log_ch_type="data", + log_rank=ii == 0, + ) + + S = np.mean(covs * target[:, np.newaxis, np.newaxis], axis=0) + R = covs.mean(0) + + covs = [S, R] + C_ref = R + if not isinstance(rank, dict): + rank = _compute_rank_raw_array( + np.hstack(X), + info, + rank=rank, + scalings=None, + log_ch_type="data", + on_few_samples="ignore", + ) + return covs, C_ref, info, rank, dict() diff --git a/mne/decoding/_fixes.py b/mne/decoding/_fixes.py new file mode 100644 index 00000000000..f0f7689bc75 --- /dev/null +++ b/mne/decoding/_fixes.py @@ -0,0 +1,173 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +try: + from sklearn.utils.validation import validate_data +except ImportError: + from sklearn.utils.validation import check_array, check_X_y + + # Use a limited version pulled from sklearn 1.7 + def validate_data( + _estimator, + /, + X="no_validation", + y="no_validation", + reset=True, + validate_separately=False, + skip_check_array=False, + **check_params, + ): + """Validate input data and set or check feature names and counts of the input. + + This helper function should be used in an estimator that requires input + validation. This mutates the estimator and sets the `n_features_in_` and + `feature_names_in_` attributes if `reset=True`. + + .. versionadded:: 1.6 + + Parameters + ---------- + _estimator : estimator instance + The estimator to validate the input for. + + X : {array-like, sparse matrix, dataframe} of shape \ + (n_samples, n_features), default='no validation' + The input samples. + If `'no_validation'`, no validation is performed on `X`. This is + useful for meta-estimator which can delegate input validation to + their underlying estimator(s). In that case `y` must be passed and + the only accepted `check_params` are `multi_output` and + `y_numeric`. + + y : array-like of shape (n_samples,), default='no_validation' + The targets. + + - If `None`, :func:`~sklearn.utils.check_array` is called on `X`. If + the estimator's `requires_y` tag is True, then an error will be raised. + - If `'no_validation'`, :func:`~sklearn.utils.check_array` is called + on `X` and the estimator's `requires_y` tag is ignored. This is a default + placeholder and is never meant to be explicitly set. In that case `X` must + be passed. + - Otherwise, only `y` with `_check_y` or both `X` and `y` are checked with + either :func:`~sklearn.utils.check_array` or + :func:`~sklearn.utils.check_X_y` depending on `validate_separately`. + + reset : bool, default=True + Whether to reset the `n_features_in_` attribute. + If False, the input will be checked for consistency with data + provided when reset was last True. + + .. note:: + + It is recommended to call `reset=True` in `fit` and in the first + call to `partial_fit`. All other methods that validate `X` + should set `reset=False`. + + validate_separately : False or tuple of dicts, default=False + Only used if `y` is not `None`. + If `False`, call :func:`~sklearn.utils.check_X_y`. Else, it must be a tuple + of kwargs to be used for calling :func:`~sklearn.utils.check_array` on `X` + and `y` respectively. + + `estimator=self` is automatically added to these dicts to generate + more informative error message in case of invalid input data. + + skip_check_array : bool, default=False + If `True`, `X` and `y` are unchanged and only `feature_names_in_` and + `n_features_in_` are checked. Otherwise, :func:`~sklearn.utils.check_array` + is called on `X` and `y`. + + **check_params : kwargs + Parameters passed to :func:`~sklearn.utils.check_array` or + :func:`~sklearn.utils.check_X_y`. Ignored if validate_separately + is not False. + + `estimator=self` is automatically added to these params to generate + more informative error message in case of invalid input data. + + Returns + ------- + out : {ndarray, sparse matrix} or tuple of these + The validated input. A tuple is returned if both `X` and `y` are + validated. + """ + no_val_X = isinstance(X, str) and X == "no_validation" + no_val_y = y is None or (isinstance(y, str) and y == "no_validation") + + if no_val_X and no_val_y: + raise ValueError("Validation should be done on X, y or both.") + + default_check_params = {"estimator": _estimator} + check_params = {**default_check_params, **check_params} + + if skip_check_array: + if not no_val_X and no_val_y: + out = X + elif no_val_X and not no_val_y: + out = y + else: + out = X, y + elif not no_val_X and no_val_y: + out = check_array(X, input_name="X", **check_params) + elif no_val_X and not no_val_y: + out = check_array(y, input_name="y", **check_params) + else: + if validate_separately: + # We need this because some estimators validate X and y + # separately, and in general, separately calling check_array() + # on X and y isn't equivalent to just calling check_X_y() + # :( + check_X_params, check_y_params = validate_separately + if "estimator" not in check_X_params: + check_X_params = {**default_check_params, **check_X_params} + X = check_array(X, input_name="X", **check_X_params) + if "estimator" not in check_y_params: + check_y_params = {**default_check_params, **check_y_params} + y = check_array(y, input_name="y", **check_y_params) + else: + X, y = check_X_y(X, y, **check_params) + out = X, y + + return out + + +def _check_n_features_3d(estimator, X, reset): + """Set the `n_features_in_` attribute, or check against it on an estimator. + + Sklearn takes n_features from X.shape[1], but we need X.shape[-1] + + Parameters + ---------- + estimator : estimator instance + The estimator to validate the input for. + + X : {ndarray, sparse matrix} of shape ([n_epochs], n_samples, n_features) + The input samples. + + reset : bool + If True, the `n_features_in_` attribute is set to `X.shape[1]`. + If False and the attribute exists, then check that it is equal to + `X.shape[1]`. If False and the attribute does *not* exist, then + the check is skipped. + .. note:: + It is recommended to call reset=True in `fit` and in the first + call to `partial_fit`. All other methods that validate `X` + should set `reset=False`. + """ + n_features = X.shape[-1] + if reset: + estimator.n_features_in_ = n_features + return + + if not hasattr(estimator, "n_features_in_"): + # Skip this check if the expected number of expected input features + # was not recorded by calling fit first. This is typically the case + # for stateless transformers. + return + + if n_features != estimator.n_features_in_: + raise ValueError( + f"X has {n_features} features, but {estimator.__class__.__name__} " + f"is expecting {estimator.n_features_in_} features as input." + ) diff --git a/mne/decoding/_ged.py b/mne/decoding/_ged.py new file mode 100644 index 00000000000..ed6915b6674 --- /dev/null +++ b/mne/decoding/_ged.py @@ -0,0 +1,148 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +import scipy.linalg + +from ..cov import Covariance, _smart_eigh, compute_whitener + + +def _handle_restr_mat(C_ref, restr_type, info, rank): + """Get restricting matrix to C_ref rank-dimensional principal subspace. + + Returns matrix of shape (rank, n_chs) used to restrict or + restrict+rescale (whiten) covariances matrices. + """ + if C_ref is None or restr_type is None: + return None + if restr_type == "whitening": + C_ref_cov = Covariance(C_ref, info.ch_names, info["bads"], info["projs"], 0) + restr_mat = compute_whitener( + C_ref_cov, info, rank=rank, pca=True, verbose="error" + )[0] + elif restr_type == "restricting": + restr_mat = _get_restr_mat(C_ref, info, rank) + else: + raise ValueError( + "restr_type should either be callable or one of " + "('whitening', 'restricting')" + ) + return restr_mat + + +def _smart_ged(S, R, restr_mat=None, R_func=None): + """Perform smart generalized eigenvalue decomposition (GED) of S and R. + + If restr_mat is provided S and R will be restricted to the principal subspace + of a reference matrix with rank r (see _handle_restr_mat), then GED is performed + on the restricted S and R and then generalized eigenvectors are transformed back + to the original space. The g-eigenvectors matrix is of shape (n_chs, r). + If callable R_func is provided the GED will be performed on (S, R_func(S,R)) + """ + if restr_mat is None: + evals, evecs = scipy.linalg.eigh(S, R) + return evals, evecs + + S_restr = restr_mat @ S @ restr_mat.T + R_restr = restr_mat @ R @ restr_mat.T + if R_func is not None: + R_restr = R_func([S_restr, R_restr]) + evals, evecs_restr = scipy.linalg.eigh(S_restr, R_restr) + evecs = restr_mat.T @ evecs_restr + + return evals, evecs + + +def _is_cov_symm(cov, rtol=1e-7, atol=None): + if atol is None: + atol = 1e-7 * np.max(np.abs(cov)) + is_symm = scipy.linalg.issymmetric(cov, rtol=rtol, atol=atol) + return is_symm + + +def _get_cov_def(cov, eval_tol=None): + """Get definiteness of symmetric cov matrix. + + All evals in (-eval_tol, eval_tol) will be considered zero, + while all evals smaller than -eval_tol will be considered + negative. + """ + evals = scipy.linalg.eigvalsh(cov) + if eval_tol is None: + eval_tol = 1e-7 * np.max(np.abs(evals)) + if np.all(evals > eval_tol): + return "pos_def" + elif np.all(evals >= -eval_tol): + return "pos_semidef" + else: + return "indef" + + +def _is_cov_pos_semidef(cov, eval_tol=None): + cov_def = _get_cov_def(cov, eval_tol=eval_tol) + return cov_def in ("pos_def", "pos_semidef") + + +def _is_cov_pos_def(cov, eval_tol=None): + cov_def = _get_cov_def(cov, eval_tol=eval_tol) + return cov_def == "pos_def" + + +def _smart_ajd(covs, restr_mat=None, weights=None): + """Perform smart approximate joint diagonalization. + + If restr_mat is provided all the cov matrices will be restricted to the + principal subspace of a reference matrix with rank r (see _handle_restr_mat), + then GED is performed on the restricted S and R and then generalized eigenvectors + are transformed back to the original space. + The matrix of generalized eigenvectors is of shape (n_chs, r). + """ + from .csp import _ajd_pham + + if restr_mat is None: + are_all_pos_def = all([_is_cov_pos_def(cov) for cov in covs]) + if not are_all_pos_def: + raise ValueError( + "If C_ref is not provided by covariance estimator, " + "all the covs should be positive definite" + ) + evecs, D = _ajd_pham(covs) + return evecs + + else: + are_all_pos_semidef = all([_is_cov_pos_semidef(cov) for cov in covs]) + if not are_all_pos_semidef: + raise ValueError( + "All the covs should be positive semi-definite for " + "approximate joint diagonalization" + ) + covs = np.array([restr_mat @ cov @ restr_mat.T for cov in covs], float) + evecs_restr, D = _ajd_pham(covs) + evecs = _normalize_eigenvectors(evecs_restr.T, covs, weights) + evecs = restr_mat.T @ evecs + return evecs + + +def _get_restr_mat(C, info, rank): + """Get matrix restricting covariance to rank-dimensional principal subspace of C.""" + _, ref_evecs, mask = _smart_eigh( + C, + info, + rank, + proj_subspace=True, + do_compute_rank=False, + log_ch_type="data", + ) + restr_mat = ref_evecs[mask] + return restr_mat + + +def _normalize_eigenvectors(evecs, covs, sample_weights): + # Here we apply an euclidean mean. See pyRiemann for other metrics + mean_cov = np.average(covs, axis=0, weights=sample_weights) + + for ii in range(evecs.shape[1]): + tmp = np.dot(np.dot(evecs[:, ii].T, mean_cov), evecs[:, ii]) + evecs[:, ii] /= np.sqrt(tmp) + return evecs diff --git a/mne/decoding/_mod_ged.py b/mne/decoding/_mod_ged.py new file mode 100644 index 00000000000..df917a78ae3 --- /dev/null +++ b/mne/decoding/_mod_ged.py @@ -0,0 +1,132 @@ +"""Eigenvalue eigenvector modifiers for GED transformers.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..time_frequency import psd_array_welch +from ..utils import _time_mask + + +def _compute_mutual_info(covs, sample_weights, evecs): + class_probas = sample_weights / sample_weights.sum() + + mutual_info = [] + for jj in range(evecs.shape[1]): + aa, bb = 0, 0 + for cov, prob in zip(covs, class_probas): + tmp = np.dot(np.dot(evecs[:, jj].T, cov), evecs[:, jj]) + aa += prob * np.log(np.sqrt(tmp)) + bb += prob * (tmp**2 - 1) + mi = -(aa + (3.0 / 16) * (bb**2)) + mutual_info.append(mi) + + return mutual_info + + +def _csp_mod(evals, evecs, covs, evecs_order, sample_weights): + n_classes = sample_weights.shape[0] + if evecs_order == "mutual_info" and n_classes > 2: + mutual_info = _compute_mutual_info(covs, sample_weights, evecs) + ix = np.argsort(mutual_info)[::-1] + elif evecs_order == "mutual_info" and n_classes == 2: + ix = np.argsort(np.abs(evals - 0.5))[::-1] + elif evecs_order == "alternate" and n_classes == 2: + i = np.argsort(evals) + ix = np.empty_like(i) + ix[1::2] = i[: len(i) // 2] + ix[0::2] = i[len(i) // 2 :][::-1] + if evals is not None: + evals = evals[ix] + evecs = evecs[:, ix] + sorter = ix + return evals, evecs, sorter + + +def _xdawn_mod(evals, evecs, covs=None): + evals, evecs, sorter = _sort_descending(evals, evecs) + evecs /= np.linalg.norm(evecs, axis=0) + return evals, evecs, sorter + + +def _get_spectral_ratio(ssd_sources, sfreq, n_fft, freqs_signal, freqs_noise): + """Get the spectal signal-to-noise ratio for each spatial filter. + + Spectral ratio measure for best n_components selection + See :footcite:`NikulinEtAl2011`, Eq. (24). + + Returns + ------- + spec_ratio : array, shape (n_channels) + Array with the sprectal ratio value for each component. + sorter_spec : array, shape (n_channels) + Array of indices for sorting spec_ratio. + + References + ---------- + .. footbibliography:: + """ + psd, freqs = psd_array_welch(ssd_sources, sfreq=sfreq, n_fft=n_fft) + sig_idx = _time_mask(freqs, *freqs_signal) + noise_idx = _time_mask(freqs, *freqs_noise) + if psd.ndim == 3: + mean_sig = psd[:, :, sig_idx].mean(axis=2).mean(axis=0) + mean_noise = psd[:, :, noise_idx].mean(axis=2).mean(axis=0) + spec_ratio = mean_sig / mean_noise + else: + mean_sig = psd[:, sig_idx].mean(axis=1) + mean_noise = psd[:, noise_idx].mean(axis=1) + spec_ratio = mean_sig / mean_noise + sorter_spec = spec_ratio.argsort()[::-1] + return spec_ratio, sorter_spec + + +def _ssd_mod( + evals, + evecs, + covs, + X, + picks, + sfreq, + n_fft, + freqs_signal, + freqs_noise, + sort_by_spectral_ratio, +): + evals, evecs, sorter = _sort_descending(evals, evecs) + if sort_by_spectral_ratio: + # We assume that ordering by spectral ratio is more important + # than the initial ordering. + filters = evecs.T + ssd_sources = filters @ X[..., picks, :] + _, sorter_spec = _get_spectral_ratio( + ssd_sources, sfreq, n_fft, freqs_signal, freqs_noise + ) + evecs = evecs[:, sorter_spec] + evals = evals[sorter_spec] + sorter = sorter_spec + return evals, evecs, sorter + + +def _spoc_mod(evals, evecs, covs=None): + evals = evals.real + evecs = evecs.real + evals, evecs, sorter = _sort_descending(evals, evecs, by_abs=True) + return evals, evecs, sorter + + +def _sort_descending(evals, evecs, by_abs=False): + if by_abs: + ix = np.argsort(np.abs(evals))[::-1] + else: + ix = np.argsort(evals)[::-1] + evals = evals[ix] + evecs = evecs[:, ix] + sorter = ix + return evals, evecs, sorter + + +def _no_op_mod(evals, evecs, *args, **kwargs): + return evals, evecs, None diff --git a/mne/decoding/base.py b/mne/decoding/base.py index 9d8070b8179..3a51a04bed7 100644 --- a/mne/decoding/base.py +++ b/mne/decoding/base.py @@ -1,20 +1,344 @@ """Base class copy from sklearn.base.""" -# Authors: Gael Varoquaux -# Romain Trachel -# Alexandre Gramfort -# Jean-Remi King -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np import datetime as dt import numbers +from functools import partial +from inspect import Parameter, signature + +import numpy as np +from sklearn import model_selection as models +from sklearn.base import ( # noqa: F401 + BaseEstimator, + MetaEstimatorMixin, + TransformerMixin, + clone, + is_classifier, + is_regressor, +) +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import check_scoring +from sklearn.model_selection import KFold, StratifiedKFold, check_cv +from sklearn.utils import indexable +from sklearn.utils.validation import check_is_fitted + from ..parallel import parallel_func -from ..fixes import BaseEstimator, is_classifier, _get_check_scoring -from ..utils import warn, verbose +from ..utils import ( + _check_option, + _pl, + _validate_type, + logger, + pinv, + verbose, + warn, +) +from ._fixes import validate_data +from ._ged import ( + _handle_restr_mat, + _is_cov_pos_semidef, + _is_cov_symm, + _smart_ajd, + _smart_ged, +) +from ._mod_ged import _no_op_mod +from .transformer import MNETransformerMixin, Vectorizer + + +class _GEDTransformer(MNETransformerMixin, BaseEstimator): + """M/EEG signal decomposition using the generalized eigenvalue decomposition (GED). + + Given two channel covariance matrices S and R, the goal is to find spatial filters + that maximise contrast between S and R. + + Parameters + ---------- + n_components : int | None + The number of spatial filters to decompose M/EEG signals. + If None, all of the components will be used for transformation. + Defaults to None. + cov_callable : callable + Function used to estimate covariances and reference matrix (C_ref) from the + data. The only required arguments should be 'X' and optionally 'y'. The function + should return covs, C_ref, info, rank and additional kwargs passed further + to mod_ged_callable. C_ref, info, rank can be None and kwargs can be empty dict. + mod_ged_callable : callable | None + Function used to modify (e.g. sort or normalize) generalized + eigenvalues and eigenvectors. It should accept as arguments evals, evecs + and also covs and optional kwargs returned by cov_callable. It should return + sorted and/or modified evals and evecs and the list of indices according + to which the first two were sorted. If None, evals and evecs will be + ordered according to :func:`~scipy.linalg.eigh` default. Defaults to None. + dec_type : "single" | "multi" + When "single" and cov_callable returns > 2 covariances, + approximate joint diagonalization based on Pham's algorithm + will be used instead of GED. + When 'multi', GED is performed separately for each class, i.e. each covariance + (except the last) returned by cov_callable is decomposed with the last + covariance. In this case, number of covariances should be number of classes + 1. + Defaults to "single". + restr_type : "restricting" | "whitening" | None + Restricting transformation for covariance matrices before performing GED. + If "restricting" only restriction to the principal subspace of the C_ref + will be performed. + If "whitening", covariance matrices will be additionally rescaled according + to the whitening for the C_ref. + If None, no restriction will be applied. Defaults to None. + R_func : callable | None + If provided, GED will be performed on (S, R_func([S,R])). When dec_type is + "single", R_func applicable only if two covariances returned by cov_callable. + If None, GED is performed on (S, R). Defaults to None. + + Attributes + ---------- + evals_ : ndarray, shape (n_channels) + If fit, generalized eigenvalues used to decompose S and R, else None. + filters_ : ndarray, shape (n_channels or less, n_channels) + If fit, spatial filters (unmixing matrix) used to decompose the data, + else None. + patterns_ : ndarray, shape (n_channels or less, n_channels) + If fit, spatial patterns (mixing matrix) used to restore M/EEG signals, + else None. + + See Also + -------- + CSP + SPoC + SSD + + Notes + ----- + .. versionadded:: 1.11 + """ + + def __init__( + self, + cov_callable=None, + n_components=None, + mod_ged_callable=None, + dec_type="single", + restr_type=None, + R_func=None, + ): + self.n_components = n_components + self.cov_callable = cov_callable + self.mod_ged_callable = mod_ged_callable + self.dec_type = dec_type + self.restr_type = restr_type + self.R_func = R_func + + _is_base_ged = True + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + cls._is_base_ged = False + + def fit(self, X, y=None): + """...""" + # Let the inheriting transformers check data by themselves + if self._is_base_ged: + X, y = self._check_data( + X, + y=y, + fit=True, + return_y=True, + ) + self._validate_ged_params() + covs, C_ref, info, rank, kwargs = self.cov_callable(X, y) + covs = np.stack(covs) + self._validate_covariances(covs) + if C_ref is not None: + self._validate_covariances([C_ref]) + mod_ged_callable = ( + self.mod_ged_callable if self.mod_ged_callable is not None else _no_op_mod + ) + restr_mat = _handle_restr_mat(C_ref, self.restr_type, info, rank) + + if self.dec_type == "single": + if len(covs) > 2: + weights = kwargs.get("sample_weights", None) + evecs = _smart_ajd(covs, restr_mat, weights=weights) + evals = None + else: + S = covs[0] + R = covs[1] + evals, evecs = _smart_ged(S, R, restr_mat, R_func=self.R_func) + + evals, evecs, self.sorter_ = mod_ged_callable(evals, evecs, covs, **kwargs) + self.evals_ = evals + self.filters_ = evecs.T + self.patterns_ = pinv(evecs) + + elif self.dec_type == "multi": + self.classes_ = np.unique(y) + R = covs[-1] + all_evals, all_evecs = list(), list() + all_patterns, all_sorters = list(), list() + for i in range(len(self.classes_)): + S = covs[i] + + evals, evecs = _smart_ged(S, R, restr_mat, R_func=self.R_func) + + evals, evecs, sorter = mod_ged_callable(evals, evecs, covs, **kwargs) + all_evals.append(evals) + all_evecs.append(evecs.T) + all_patterns.append(pinv(evecs)) + all_sorters.append(sorter) + self.sorter_ = np.array(all_sorters) + self.evals_ = np.array(all_evals) + self.filters_ = np.array(all_evecs) + self.patterns_ = np.array(all_patterns) + return self -class LinearModel(BaseEstimator): + def transform(self, X): + """...""" + check_is_fitted(self, "filters_") + # Let the inheriting transformers check data by themselves + if self._is_base_ged: + X = self._check_data(X) + if self.dec_type == "single": + pick_filters = self.filters_[: self.n_components] + elif self.dec_type == "multi": + pick_filters = self._subset_multi_components() + X = pick_filters @ X + return X + + def _subset_multi_components(self, name="filters"): + # The shape of stored filters and patterns is + # is (n_classes, n_evecs, n_chs) + # Transform and subset into (n_classes*n_components, n_chs) + if name == "filters": + return self.filters_[:, : self.n_components, :].reshape( + -1, self.filters_.shape[2] + ) + elif name == "patterns": + return self.patterns_[:, : self.n_components, :].reshape( + -1, self.patterns_.shape[2] + ) + return None + + def _validate_required_args(self, func, desired_required_args): + sig = signature(func) + actual_required_args = [ + param.name + for param in sig.parameters.values() + if param.default is Parameter.empty + ] + func_name = func.func.__name__ if isinstance(func, partial) else func.__name__ + if not all(arg in desired_required_args for arg in actual_required_args): + raise ValueError( + f"Invalid required arguments for '{func_name}'. " + f"The only allowed required arguments are {desired_required_args}, " + f"but got {actual_required_args} instead." + ) + + def _validate_ged_params(self): + # Naming is GED-specific so that the validation is still executed + # when child classes run super().fit() + + _validate_type(self.n_components, (int, None), "n_components") + if self.n_components is not None and self.n_components <= 0: + raise ValueError( + "Invalid value for the 'n_components' parameter. " + "Allowed are positive integers or None, " + "but got a non-positive integer instead." + ) + + self._validate_required_args( + self.cov_callable, desired_required_args=["X", "y"] + ) + + _check_option( + "dec_type", + self.dec_type, + ("single", "multi"), + ) + + _check_option( + "restr_type", + self.restr_type, + ("restricting", "whitening", None), + ) + + def _validate_covariances(self, covs): + error_template = ( + "{matrix} is not {prop}, but required to be for {decomp}. " + "Check your cov_callable" + ) + if len(covs) == 1: + C_ref = covs[0] + is_C_ref_symm = _is_cov_symm(C_ref) + if not is_C_ref_symm: + raise ValueError( + error_template.format( + matrix="C_ref covariance", + prop="symmetric", + decomp="decomposition", + ) + ) + elif self.dec_type == "single" and len(covs) > 2: + # make only lenient symmetric check here. + # positive semidefiniteness/definiteness will be + # checked inside _smart_ajd + for ci, cov in enumerate(covs): + if not _is_cov_symm(cov): + raise ValueError( + error_template.format( + matrix=f"cov[{ci}]", + prop="symmetric", + decomp="approximate joint diagonalization", + ) + ) + else: + if len(covs) == 2: + S_covs = [covs[0]] + R = covs[1] + elif self.dec_type == "multi": + S_covs = covs[:-1] + R = covs[-1] + + are_all_S_symm = all([_is_cov_symm(S) for S in S_covs]) + if not are_all_S_symm: + raise ValueError( + error_template.format( + matrix="S covariance", + prop="symmetric", + decomp="generalized eigendecomposition", + ) + ) + if not _is_cov_symm(R): + raise ValueError( + error_template.format( + matrix="R covariance", + prop="symmetric", + decomp="generalized eigendecomposition", + ) + ) + if not _is_cov_pos_semidef(R): + raise ValueError( + error_template.format( + matrix="R covariance", + prop="positive semi-definite", + decomp="generalized eigendecomposition", + ) + ) + + def __sklearn_tags__(self): + """Tag the transformer.""" + tags = super().__sklearn_tags__() + # Can be a transformer where S and R covs are not based on y classes. + tags.target_tags.required = False + tags.target_tags.one_d_labels = True + tags.input_tags.two_d_array = True + tags.input_tags.three_d_array = True + tags.requires_fit = True + return tags + + +class LinearModel(MetaEstimatorMixin, BaseEstimator): """Compute and store patterns from linear models. The linear model coefficients (filters) are used to extract discriminant @@ -27,7 +351,8 @@ class LinearModel(BaseEstimator): model : object | None A linear model from scikit-learn with a fit method that updates a ``coef_`` attribute. - If None the model will be LogisticRegression. + If None the model will be + :class:`sklearn.linear_model.LogisticRegression`. Attributes ---------- @@ -51,13 +376,65 @@ class LinearModel(BaseEstimator): .. footbibliography:: """ - def __init__(self, model=None): # noqa: D102 - if model is None: - from sklearn.linear_model import LogisticRegression - model = LogisticRegression(solver='liblinear') + _model_attr_wrap = ( + "transform", + "fit_transform", + "predict", + "predict_proba", + "predict_log_proba", + "_estimator_type", # remove after sklearn 1.6 + "decision_function", + "score", + "classes_", + ) + def __init__(self, model=None): self.model = model - self._estimator_type = getattr(model, "_estimator_type", None) + + def __sklearn_tags__(self): + """Get sklearn tags.""" + tags = super().__sklearn_tags__() + model = self.model if self.model is not None else LogisticRegression() + model_tags = model.__sklearn_tags__() + tags.estimator_type = model_tags.estimator_type + if tags.estimator_type is not None: + model_type_tags = getattr(model_tags, f"{tags.estimator_type}_tags") + setattr(tags, f"{tags.estimator_type}_tags", model_type_tags) + return tags + + def __getattr__(self, attr): + """Wrap to model for some attributes.""" + if attr in LinearModel._model_attr_wrap: + model = self.model_ if "model_" in self.__dict__ else self.model + if attr == "fit_transform" and hasattr(model, "fit_transform"): + return self._fit_transform + else: + return getattr(model, attr) + else: + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{attr}'" + ) + + def _fit_transform(self, X, y): + return self.fit(X, y).transform(X) + + def _validate_params(self, X): + if self.model is not None: + model = self.model + if isinstance(model, MetaEstimatorMixin): + model = model.estimator + is_predictor = is_regressor(model) or is_classifier(model) + if not is_predictor: + raise ValueError( + "Linear model should be a supervised predictor " + "(classifier or regressor)" + ) + + # For sklearn < 1.6 + try: + self._check_n_features(X, reset=True) + except AttributeError: + pass def fit(self, X, y, **fit_params): """Estimate the coefficients of the linear model. @@ -79,20 +456,19 @@ def fit(self, X, y, **fit_params): self : instance of LinearModel Returns the modified instance. """ - X, y = np.asarray(X), np.asarray(y) - if X.ndim != 2: - raise ValueError('LinearModel only accepts 2-dimensional X, got ' - '%s instead.' % (X.shape,)) - if y.ndim > 2: - raise ValueError('LinearModel only accepts up to 2-dimensional y, ' - 'got %s instead.' % (y.shape,)) + self._validate_params(X) + X, y = validate_data(self, X, y, multi_output=True) # fit the Model - self.model.fit(X, y, **fit_params) + self.model_ = ( + clone(self.model) + if self.model is not None + else LogisticRegression(solver="liblinear") + ) + self.model_.fit(X, y, **fit_params) # Computes patterns using Haufe's trick: A = Cov_X . W . Precision_Y - - inv_Y = 1. + inv_Y = 1.0 X = X - X.mean(0, keepdims=True) if y.ndim == 2 and y.shape[1] != 1: y = y - y.mean(0, keepdims=True) @@ -103,188 +479,139 @@ def fit(self, X, y, **fit_params): @property def filters_(self): - if hasattr(self.model, 'coef_'): + if hasattr(self.model_, "coef_"): # Standard Linear Model - filters = self.model.coef_ - elif hasattr(self.model.best_estimator_, 'coef_'): + filters = self.model_.coef_ + elif hasattr(self.model_, "estimators_"): + # Linear model with OneVsRestClassifier + filters = np.vstack([est.coef_ for est in self.model_.estimators_]) + elif hasattr(self.model_, "best_estimator_") and hasattr( + self.model_.best_estimator_, "coef_" + ): # Linear Model with GridSearchCV - filters = self.model.best_estimator_.coef_ + filters = self.model_.best_estimator_.coef_ else: - raise ValueError('model does not have a `coef_` attribute.') + raise ValueError("model does not have a `coef_` attribute.") if filters.ndim == 2 and filters.shape[0] == 1: filters = filters[0] return filters - def transform(self, X): - """Transform the data using the linear model. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - The data to transform. - - Returns - ------- - y_pred : array, shape (n_samples,) - The predicted targets. - """ - return self.model.transform(X) - - def fit_transform(self, X, y): - """Fit the data and transform it using the linear model. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - The training input samples to estimate the linear coefficients. - y : array, shape (n_samples,) - The target values. - - Returns - ------- - y_pred : array, shape (n_samples,) - The predicted targets. - """ - return self.fit(X, y).transform(X) - - def predict(self, X): - """Compute predictions of y from X. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - The data used to compute the predictions. - - Returns - ------- - y_pred : array, shape (n_samples,) - The predictions. - """ - return self.model.predict(X) - - def predict_proba(self, X): - """Compute probabilistic predictions of y from X. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - The data used to compute the predictions. - - Returns - ------- - y_pred : array, shape (n_samples, n_classes) - The probabilities. - """ - return self.model.predict_proba(X) - - def decision_function(self, X): - """Compute distance from the decision function of y from X. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - The data used to compute the predictions. - - Returns - ------- - y_pred : array, shape (n_samples, n_classes) - The distances. - """ - return self.model.decision_function(X) - - def score(self, X, y): - """Score the linear model computed on the given test data. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - The data to transform. - y : array, shape (n_samples,) - The target values. - - Returns - ------- - score : float - Score of the linear model. - """ - return self.model.score(X, y) - def _set_cv(cv, estimator=None, X=None, y=None): """Set the default CV depending on whether clf is classifier/regressor.""" # Detect whether classification or regression - if estimator in ['classifier', 'regressor']: - est_is_classifier = estimator == 'classifier' + + if estimator in ["classifier", "regressor"]: + est_is_classifier = estimator == "classifier" else: est_is_classifier = is_classifier(estimator) # Setup CV - from sklearn import model_selection as models - from sklearn.model_selection import (check_cv, StratifiedKFold, KFold) - if isinstance(cv, (int, np.int64)): + if isinstance(cv, int | np.int64): XFold = StratifiedKFold if est_is_classifier else KFold cv = XFold(n_splits=cv) elif isinstance(cv, str): if not hasattr(models, cv): - raise ValueError('Unknown cross-validation') + raise ValueError("Unknown cross-validation") cv = getattr(models, cv) cv = cv() cv = check_cv(cv=cv, y=y, classifier=est_is_classifier) # Extract train and test set to retrieve them at predict time - cv_splits = [(train, test) for train, test in - cv.split(X=np.zeros_like(y), y=y)] + cv_splits = [(train, test) for train, test in cv.split(X=np.zeros_like(y), y=y)] if not np.all([len(train) for train, _ in cv_splits]): - raise ValueError('Some folds do not have any train epochs.') + raise ValueError("Some folds do not have any train epochs.") return cv, cv_splits def _check_estimator(estimator, get_params=True): """Check whether an object has the methods required by sklearn.""" - valid_methods = ('predict', 'transform', 'predict_proba', - 'decision_function') - if ( - (not hasattr(estimator, 'fit')) or - (not any(hasattr(estimator, method) for method in valid_methods)) + valid_methods = ("predict", "transform", "predict_proba", "decision_function") + if (not hasattr(estimator, "fit")) or ( + not any(hasattr(estimator, method) for method in valid_methods) ): - raise ValueError('estimator must be a scikit-learn transformer or ' - 'an estimator with the fit and a predict-like (e.g. ' - 'predict_proba) or a transform method.') + raise ValueError( + "estimator must be a scikit-learn transformer or " + "an estimator with the fit and a predict-like (e.g. " + "predict_proba) or a transform method." + ) - if get_params and not hasattr(estimator, 'get_params'): - raise ValueError('estimator must be a scikit-learn transformer or an ' - 'estimator with the get_params method that allows ' - 'cloning.') + if get_params and not hasattr(estimator, "get_params"): + raise ValueError( + "estimator must be a scikit-learn transformer or an " + "estimator with the get_params method that allows " + "cloning." + ) def _get_inverse_funcs(estimator, terminal=True): """Retrieve the inverse functions of an pipeline or an estimator.""" - inverse_func = [False] - if hasattr(estimator, 'steps'): + inverse_func = list() + estimators = list() + if hasattr(estimator, "steps"): # if pipeline, retrieve all steps by nesting - inverse_func = list() for _, est in estimator.steps: inverse_func.extend(_get_inverse_funcs(est, terminal=False)) - elif hasattr(estimator, 'inverse_transform'): + estimators.append(est.__class__.__name__) + elif hasattr(estimator, "inverse_transform"): # if not pipeline attempt to retrieve inverse function - inverse_func = [estimator.inverse_transform] + inverse_func.append(estimator.inverse_transform) + estimators.append(estimator.__class__.__name__) + else: + inverse_func.append(False) + estimators.append("Unknown") # If terminal node, check that that the last estimator is a classifier, # and remove it from the transformers. if terminal: last_is_estimator = inverse_func[-1] is False - all_invertible = False not in inverse_func[:-1] - if last_is_estimator and all_invertible: + logger.debug(f" Last estimator is an estimator: {last_is_estimator}") + non_invertible = np.where( + [inv_func is False for inv_func in inverse_func[:-1]] + )[0] + if last_is_estimator and len(non_invertible) == 0: # keep all inverse transformation and remove last estimation + logger.debug(" Removing inverse transformation from inverse list.") inverse_func = inverse_func[:-1] else: + if len(non_invertible): + bad = ", ".join(estimators[ni] for ni in non_invertible) + warn( + f"Cannot inverse transform non-invertible " + f"estimator{_pl(non_invertible)}: {bad}." + ) inverse_func = list() return inverse_func -def get_coef(estimator, attr='filters_', inverse_transform=False): +def _get_inverse_funcs_before_step(estimator, step_name): + """Get the inverse_transform methods for all steps before a target step.""" + # in case step_name is nested with __ + parts = step_name.split("__") + inverse_funcs = list() + current_pipeline = estimator + for part_name in parts: + all_names = [name for name, _ in current_pipeline.steps] + part_idx = all_names.index(part_name) + # get all preceding steps for the current step + for prec_name, prec_step in current_pipeline.steps[:part_idx]: + if hasattr(prec_step, "inverse_transform"): + inverse_funcs.append(prec_step.inverse_transform) + else: + warn( + f"Preceding step '{prec_name}' is not invertible " + f"and will be skipped." + ) + current_pipeline = current_pipeline.named_steps[part_name] + return inverse_funcs + + +@verbose +def get_coef( + estimator, attr="filters_", inverse_transform=False, *, step_name=None, verbose=None +): """Retrieve the coefficients of an estimator ending with a Linear Model. This is typically useful to retrieve "spatial filters" or "spatial @@ -300,6 +627,14 @@ def get_coef(estimator, attr='filters_', inverse_transform=False): inverse_transform : bool If True, returns the coefficients after inverse transforming them with the transformer steps of the estimator. + step_name : str | None + Name of the sklearn's pipeline step to get the coef from. + If inverse_transform is True, the inverse transformations + will be applied using transformers before this step. + If None, the last step will be used. Defaults to None. + + .. versionadded:: 1.11 + %(verbose)s Returns ------- @@ -312,22 +647,33 @@ def get_coef(estimator, attr='filters_', inverse_transform=False): """ # Get the coefficients of the last estimator in case of nested pipeline est = estimator - while hasattr(est, 'steps'): - est = est.steps[-1][1] + logger.debug(f"Getting coefficients from estimator: {est.__class__.__name__}") + + if step_name is not None: + if not hasattr(estimator, "named_steps"): + raise ValueError("step_name can only be used with a pipeline estimator.") + try: + est = est.get_params(deep=True)[step_name] + except KeyError: + raise ValueError(f"Step '{step_name}' is not part of the pipeline.") + else: + while hasattr(est, "steps"): + est = est.steps[-1][1] squeeze_first_dim = False # If SlidingEstimator, loop across estimators - if hasattr(est, 'estimators_'): + if hasattr(est, "estimators_"): coef = list() - for this_est in est.estimators_: + for ei, this_est in enumerate(est.estimators_): + if ei == 0: + logger.debug(" Extracting coefficients from SlidingEstimator.") coef.append(get_coef(this_est, attr, inverse_transform)) coef = np.transpose(coef) coef = coef[np.newaxis] # fake a sample dimension squeeze_first_dim = True elif not hasattr(est, attr): - raise ValueError('This estimator does not have a %s attribute:\n%s' - % (attr, est)) + raise ValueError(f"This estimator does not have a {attr} attribute:\n{est}") else: coef = getattr(est, attr) @@ -337,24 +683,52 @@ def get_coef(estimator, attr='filters_', inverse_transform=False): # inverse pattern e.g. to get back physical units if inverse_transform: - if not hasattr(estimator, 'steps') and not hasattr(est, 'estimators_'): - raise ValueError('inverse_transform can only be applied onto ' - 'pipeline estimators.') + if not hasattr(estimator, "steps") and not hasattr(est, "estimators_"): + raise ValueError( + "inverse_transform can only be applied onto pipeline estimators." + ) + if step_name is None: + inverse_funcs = _get_inverse_funcs(estimator) + else: + inverse_funcs = _get_inverse_funcs_before_step(estimator, step_name) + # The inverse_transform parameter will call this method on any # estimator contained in the pipeline, in reverse order. - for inverse_func in _get_inverse_funcs(estimator)[::-1]: + for inverse_func in inverse_funcs[::-1]: + logger.debug(f" Applying inverse transformation: {inverse_func}.") coef = inverse_func(coef) if squeeze_first_dim: + logger.debug(" Squeezing first dimension of coefficients.") coef = coef[0] + # inverse_transform with Vectorizer returns shape (n_channels, n_components). + # we should transpose to be consistent with how spatial filters + # store filters and patterns: (n_components, n_channels) + if inverse_transform and hasattr(estimator, "steps"): + is_vectorizer = any( + isinstance(param_value, Vectorizer) + for param_value in estimator.get_params(deep=True).values() + ) + if is_vectorizer and coef.ndim == 2: + coef = coef.T + return coef @verbose -def cross_val_multiscore(estimator, X, y=None, groups=None, scoring=None, - cv=None, n_jobs=None, verbose=None, fit_params=None, - pre_dispatch='2*n_jobs'): +def cross_val_multiscore( + estimator, + X, + y=None, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=None, + fit_params=None, + pre_dispatch="2*n_jobs", +): """Evaluate a score by cross-validation. Parameters @@ -416,12 +790,6 @@ def cross_val_multiscore(estimator, X, y=None, groups=None, scoring=None, Array of scores of the estimator for each run of the cross validation. """ # This code is copied from sklearn - - from sklearn.base import clone - from sklearn.utils import indexable - from sklearn.model_selection._split import check_cv - check_scoring = _get_check_scoring() - X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) @@ -430,15 +798,23 @@ def cross_val_multiscore(estimator, X, y=None, groups=None, scoring=None, # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. # Note: this parallelization is implemented using MNE Parallel - parallel, p_func, n_jobs = parallel_func(_fit_and_score, n_jobs, - pre_dispatch=pre_dispatch) - position = hasattr(estimator, 'position') + parallel, p_func, n_jobs = parallel_func( + _fit_and_score, n_jobs, pre_dispatch=pre_dispatch + ) + position = hasattr(estimator, "position") scores = parallel( p_func( - estimator=clone(estimator), X=X, y=y, scorer=scorer, train=train, - test=test, fit_params=fit_params, verbose=verbose, + estimator=clone(estimator), + X=X, + y=y, + scorer=scorer, + train=train, + test=test, + fit_params=fit_params, + verbose=verbose, parameters=dict(position=ii % n_jobs) if position else None, - ) for ii, (train, test) in enumerate(cv_iter) + ) + for ii, (train, test) in enumerate(cv_iter) ) return np.array(scores)[:, 0, ...] # flatten over joblib output. @@ -446,20 +822,36 @@ def cross_val_multiscore(estimator, X, y=None, groups=None, scoring=None, # This verbose is necessary to properly set the verbosity level # during parallelization @verbose -def _fit_and_score(estimator, X, y, scorer, train, test, - parameters, fit_params, return_train_score=False, - return_parameters=False, return_n_test_samples=False, - return_times=False, error_score='raise', *, verbose=None, - position=0): +def _fit_and_score( + estimator, + X, + y, + scorer, + train, + test, + parameters, + fit_params, + return_train_score=False, + return_parameters=False, + return_n_test_samples=False, + return_times=False, + error_score="raise", + *, + verbose=None, + position=0, +): """Fit estimator and compute scores for a given dataset split.""" # This code is adapted from sklearn - from ..fixes import _check_fit_params + from sklearn.model_selection import _validation from sklearn.utils.metaestimators import _safe_split from sklearn.utils.validation import _num_samples # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} - fit_params = _check_fit_params(X, fit_params, train) + fit_params = { + k: _validation._index_param_value(X, v, train) for k, v in fit_params.items() + } if parameters is not None: estimator.set_params(**parameters) @@ -479,19 +871,21 @@ def _fit_and_score(estimator, X, y, scorer, train, test, # Note fit time as time until error fit_duration = dt.datetime.now() - start_time score_duration = dt.timedelta(0) - if error_score == 'raise': + if error_score == "raise": raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score - warn("Classifier fit failed. The score on this train-test" - " partition for these parameters will be set to %f. " - "Details: \n%r" % (error_score, e)) + warn( + "Classifier fit failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n{e!r}" + ) else: - raise ValueError("error_score must be the string 'raise' or a" - " numeric value. (Hint: if using 'raise', please" - " make sure that it has been spelled correctly.)") + raise ValueError( + "error_score must be the string 'raise' or a numeric value. (Hint: if " + "using 'raise', please make sure that it has been spelled correctly.)" + ) else: fit_duration = dt.datetime.now() - start_time @@ -505,10 +899,7 @@ def _fit_and_score(estimator, X, y, scorer, train, test, if return_n_test_samples: ret.append(_num_samples(X_test)) if return_times: - ret.extend([ - fit_duration.total_seconds(), - score_duration.total_seconds() - ]) + ret.extend([fit_duration.total_seconds(), score_duration.total_seconds()]) if return_parameters: ret.append(parameters) return ret @@ -524,7 +915,7 @@ def _score(estimator, X_test, y_test, scorer): score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) - if hasattr(score, 'item'): + if hasattr(score, "item"): try: # e.g. unwrap memmapped scalars score = score.item() diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py index c7362a0b5bf..adf857d11b9 100644 --- a/mne/decoding/csp.py +++ b/mne/decoding/csp.py @@ -1,27 +1,23 @@ -# -*- coding: utf-8 -*- -# Authors: Romain Trachel -# Alexandre Gramfort -# Alexandre Barachant -# Clemens Brunner -# Jean-Remi King -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import copy as cp +import collections.abc as abc +from functools import partial import numpy as np -from .base import BaseEstimator -from .mixin import TransformerMixin -from ..cov import _regularized_covariance -from ..defaults import (_BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, - _INTERPOLATION_DEFAULT) -from ..fixes import pinv -from ..utils import fill_doc, _check_option, _validate_type, copy_doc +from .._fiff.meas_info import Info +from ..defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT +from ..utils import _check_option, _validate_type, fill_doc, legacy +from ._covs_ged import _csp_estimate, _spoc_estimate +from ._mod_ged import _csp_mod, _spoc_mod +from .base import _GEDTransformer +from .spatial_filter import get_spatial_filter_from_estimator @fill_doc -class CSP(TransformerMixin, BaseEstimator): +class CSP(_GEDTransformer): """M/EEG signal decomposition using the Common Spatial Patterns (CSP). This class can be used as a supervised decomposition to estimate spatial @@ -64,6 +60,26 @@ class CSP(TransformerMixin, BaseEstimator): Parameters to pass to :func:`mne.compute_covariance`. .. versionadded:: 0.16 + + restr_type : "restricting" | "whitening" | None + Restricting transformation for covariance matrices before performing + generalized eigendecomposition. + If "restricting" only restriction to the principal subspace of signal_cov + will be performed. + If "whitening", covariance matrices will be additionally rescaled according + to the whitening for the signal_cov. + If None, no restriction will be applied. Defaults to "restricting". + + .. versionadded:: 1.11 + info : mne.Info | None + The mne.Info object with information about the sensors and methods of + measurement used for covariance estimation and generalized + eigendecomposition. + If None, one channel type and no projections will be assumed and if + rank is dict, it will be sum of ranks per channel type. + Defaults to None. + + .. versionadded:: 1.11 %(rank_none)s .. versionadded:: 0.17 @@ -91,61 +107,105 @@ class CSP(TransformerMixin, BaseEstimator): See Also -------- - mne.preprocessing.Xdawn, SPoC + XdawnTransformer, SPoC, SSD References ---------- .. footbibliography:: """ - def __init__(self, n_components=4, reg=None, log=None, cov_est='concat', - transform_into='average_power', norm_trace=False, - cov_method_params=None, rank=None, - component_order='mutual_info'): + def __init__( + self, + n_components=4, + reg=None, + log=None, + cov_est="concat", + transform_into="average_power", + norm_trace=False, + cov_method_params=None, + *, + restr_type="restricting", + info=None, + rank=None, + component_order="mutual_info", + ): # Init default CSP - if not isinstance(n_components, int): - raise ValueError('n_components must be an integer.') self.n_components = n_components + self.info = info self.rank = rank self.reg = reg - - # Init default cov_est - if not (cov_est == "concat" or cov_est == "epoch"): - raise ValueError("unknown covariance estimation method") self.cov_est = cov_est - - # Init default transform_into - self.transform_into = _check_option('transform_into', transform_into, - ['average_power', 'csp_space']) - - # Init default log - if transform_into == 'average_power': - if log is not None and not isinstance(log, bool): - raise ValueError('log must be a boolean if transform_into == ' - '"average_power".') - else: - if log is not None: - raise ValueError('log must be a None if transform_into == ' - '"csp_space".') + self.transform_into = transform_into self.log = log - - _validate_type(norm_trace, bool, 'norm_trace') self.norm_trace = norm_trace self.cov_method_params = cov_method_params - self.component_order = _check_option('component_order', - component_order, - ('mutual_info', 'alternate')) - - def _check_Xy(self, X, y=None): - """Check input data.""" - if not isinstance(X, np.ndarray): - raise ValueError("X should be of type ndarray (got %s)." - % type(X)) - if y is not None: - if len(X) != len(y) or len(y) < 1: - raise ValueError('X and y must have the same length.') - if X.ndim < 3: - raise ValueError('X must have at least 3 dimensions.') + self.component_order = component_order + self.restr_type = restr_type + + cov_callable = partial( + _csp_estimate, + reg=reg, + cov_method_params=cov_method_params, + cov_est=cov_est, + info=info, + rank=rank, + norm_trace=norm_trace, + ) + mod_ged_callable = partial(_csp_mod, evecs_order=component_order) + super().__init__( + n_components=n_components, + cov_callable=cov_callable, + mod_ged_callable=mod_ged_callable, + restr_type=restr_type, + R_func=sum, + ) + + def __sklearn_tags__(self): + """Tag the transformer.""" + tags = super().__sklearn_tags__() + tags.target_tags.required = True + tags.target_tags.multi_output = True + return tags + + def _validate_params(self, *, y): + _validate_type(self.n_components, int, "n_components") + if hasattr(self, "cov_est"): + _validate_type(self.cov_est, str, "cov_est") + _check_option("cov_est", self.cov_est, ("concat", "epoch")) + if hasattr(self, "norm_trace"): + _validate_type(self.norm_trace, bool, "norm_trace") + _check_option( + "transform_into", self.transform_into, ["average_power", "csp_space"] + ) + if self.transform_into == "average_power": + _validate_type( + self.log, + (bool, None), + "log", + extra="when transform_into is 'average_power'", + ) + else: + _validate_type( + self.log, None, "log", extra="when transform_into is 'csp_space'" + ) + _check_option( + "component_order", self.component_order, ("mutual_info", "alternate") + ) + self.classes_ = np.unique(y) + n_classes = len(self.classes_) + if n_classes < 2: + raise ValueError( + "y should be a 1d array with more than two classes, " + f"but got {n_classes} class from {y}" + ) + elif n_classes > 2 and self.component_order == "alternate": + raise ValueError( + "component_order='alternate' requires two classes, but data contains " + f"{n_classes} classes; use component_order='mutual_info' instead." + ) + _validate_type(self.rank, (dict, None, str), "rank") + _validate_type(self.info, (Info, None), "info") + _validate_type(self.cov_method_params, (abc.Mapping, None), "cov_method_params") def fit(self, X, y): """Estimate the CSP decomposition on epochs. @@ -162,34 +222,18 @@ def fit(self, X, y): self : instance of CSP Returns the modified instance. """ - self._check_Xy(X, y) + X, y = self._check_data(X, y=y, fit=True, return_y=True) + self._validate_params(y=y) - self._classes = np.unique(y) - n_classes = len(self._classes) - if n_classes < 2: - raise ValueError("n_classes must be >= 2.") - if n_classes > 2 and self.component_order == 'alternate': - raise ValueError("component_order='alternate' requires two " - "classes, but data contains {} classes; use " - "component_order='mutual_info' " - "instead.".format(n_classes)) - - covs, sample_weights = self._compute_covariance_matrices(X, y) - eigen_vectors, eigen_values = self._decompose_covs(covs, - sample_weights) - ix = self._order_components(covs, sample_weights, eigen_vectors, - eigen_values, self.component_order) - - eigen_vectors = eigen_vectors[:, ix] - - self.filters_ = eigen_vectors.T - self.patterns_ = pinv(eigen_vectors) + # Covariance estimation, GED/AJD + # and evecs/evals sorting happen here + super().fit(X, y) - pick_filters = self.filters_[:self.n_components] + pick_filters = self.filters_[: self.n_components] X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) # compute features (mean power) - X = (X ** 2).mean(axis=2) + X = (X**2).mean(axis=2) # To standardize features self.mean_ = X.mean(axis=0) @@ -209,22 +253,15 @@ def transform(self, X): ------- X : ndarray If self.transform_into == 'average_power' then returns the power of - CSP features averaged over time and shape (n_epochs, n_sources) + CSP features averaged over time and shape (n_epochs, n_components) If self.transform_into == 'csp_space' then returns the data in CSP - space and shape is (n_epochs, n_sources, n_times). + space and shape is (n_epochs, n_components, n_times). """ - if not isinstance(X, np.ndarray): - raise ValueError("X should be of type ndarray (got %s)." % type(X)) - if self.filters_ is None: - raise RuntimeError('No filters available. Please first fit CSP ' - 'decomposition.') - - pick_filters = self.filters_[:self.n_components] - X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) - + X = self._check_data(X) + X = super().transform(X) # compute features (mean band power) - if self.transform_into == 'average_power': - X = (X ** 2).mean(axis=2) + if self.transform_into == "average_power": + X = (X**2).mean(axis=2) log = True if self.log is None else self.log if log: X = np.log(X) @@ -233,20 +270,90 @@ def transform(self, X): X /= self.std_ return X - @copy_doc(TransformerMixin.fit_transform) - def fit_transform(self, X, y, **fit_params): # noqa: D102 + def inverse_transform(self, X): + """Project CSP features back to sensor space. + + Parameters + ---------- + X : array, shape (n_epochs, n_components) + The data in CSP power space. + + Returns + ------- + X : ndarray + The data in sensor space and shape (n_epochs, n_channels, n_components). + """ + if self.transform_into != "average_power": + raise NotImplementedError( + "Can only inverse transform CSP features when transform_into is " + "'average_power'." + ) + if not (X.ndim == 2 and X.shape[1] == self.n_components): + raise ValueError( + f"X must be 2D with X[1]={self.n_components}, got {X.shape=}" + ) + return X[:, np.newaxis, :] * self.patterns_[: self.n_components].T + + def fit_transform(self, X, y=None, **fit_params): + """Fit CSP to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data on which to estimate the CSP. + y : array, shape (n_epochs,) + The class for each epoch. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.CSP.fit` + method. Not used for this class. + + Returns + ------- + X_csp : array, shape (n_epochs, n_components[, n_times]) + If ``self.transform_into == 'average_power'`` then returns the power of CSP + features averaged over time and shape is ``(n_epochs, n_components)``. If + ``self.transform_into == 'csp_space'`` then returns the data in CSP space + and shape is ``(n_epochs, n_components, n_times)``. + """ + # use parent TransformerMixin method but with custom docstring return super().fit_transform(X, y=y, **fit_params) + @legacy(alt="get_spatial_filter_from_estimator(clf, info=info).plot_patterns()") @fill_doc def plot_patterns( - self, info, components=None, *, average=None, ch_type=None, - scalings=None, sensors=True, show_names=False, mask=None, - mask_params=None, contours=6, outlines='head', sphere=None, - image_interp=_INTERPOLATION_DEFAULT, - extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT, res=64, - size=1, cmap='RdBu_r', vlim=(None, None), cnorm=None, - colorbar=True, cbar_fmt='%3.1f', units=None, axes=None, - name_format='CSP%01d', nrows=1, ncols='auto', show=True): + self, + info, + components=None, + *, + ch_type=None, + scalings=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap="RdBu_r", + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + name_format="CSP%01d", + nrows=1, + ncols="auto", + show=True, + ): """Plot topographic patterns of components. The patterns explain how the measured data was generated from the @@ -258,7 +365,6 @@ def plot_patterns( :func:`mne.create_info`. components : float | array of float | None The patterns to plot. If ``None``, all components will be shown. - %(average_plot_evoked_topomap)s %(ch_type_topomap)s scalings : dict | float | None The scalings of the channel types to be applied for plotting. @@ -302,41 +408,69 @@ def plot_patterns( fig : instance of matplotlib.figure.Figure The figure. """ - from .. import EvokedArray - - if units is None: - units = 'AU' - if components is None: - components = np.arange(self.n_components) - - # set sampling frequency to have 1 component per time point - info = cp.deepcopy(info) - with info._unlock(): - info['sfreq'] = 1. - # create an evoked - patterns = EvokedArray(self.patterns_.T, info, tmin=0) - # the call plot_topomap - fig = patterns.plot_topomap( - times=components, average=average, ch_type=ch_type, - scalings=scalings, sensors=sensors, show_names=show_names, - mask=mask, mask_params=mask_params, contours=contours, - outlines=outlines, sphere=sphere, image_interp=image_interp, - extrapolate=extrapolate, border=border, res=res, size=size, - cmap=cmap, vlim=vlim, cnorm=cnorm, colorbar=colorbar, - cbar_fmt=cbar_fmt, units=units, axes=axes, time_format=name_format, - nrows=nrows, ncols=ncols, show=show) - return fig - + spf = get_spatial_filter_from_estimator(self, info=info) + return spf.plot_patterns( + components, + ch_type=ch_type, + scalings=scalings, + sensors=sensors, + show_names=show_names, + mask=mask, + mask_params=mask_params, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + name_format=name_format, + nrows=nrows, + ncols=ncols, + show=show, + ) + + @legacy(alt="get_spatial_filter_from_estimator(clf, info=info).plot_filters()") @fill_doc def plot_filters( - self, info, components=None, *, average=None, ch_type=None, - scalings=None, sensors=True, show_names=False, mask=None, - mask_params=None, contours=6, outlines='head', sphere=None, - image_interp=_INTERPOLATION_DEFAULT, - extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT, res=64, - size=1, cmap='RdBu_r', vlim=(None, None), cnorm=None, - colorbar=True, cbar_fmt='%3.1f', units=None, axes=None, - name_format='CSP%01d', nrows=1, ncols='auto', show=True): + self, + info, + components=None, + *, + ch_type=None, + scalings=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap="RdBu_r", + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + name_format="CSP%01d", + nrows=1, + ncols="auto", + show=True, + ): """Plot topographic filters of components. The filters are used to extract discriminant neural sources from @@ -348,7 +482,6 @@ def plot_filters( :func:`mne.create_info`. components : float | array of float | None The patterns to plot. If ``None``, all components will be shown. - %(average_plot_evoked_topomap)s %(ch_type_topomap)s scalings : dict | float | None The scalings of the channel types to be applied for plotting. @@ -392,131 +525,35 @@ def plot_filters( fig : instance of matplotlib.figure.Figure The figure. """ - from .. import EvokedArray - - if units is None: - units = 'AU' - if components is None: - components = np.arange(self.n_components) - - # set sampling frequency to have 1 component per time point - info = cp.deepcopy(info) - with info._unlock(): - info['sfreq'] = 1. - # create an evoked - filters = EvokedArray(self.filters_.T, info, tmin=0) - # the call plot_topomap - fig = filters.plot_topomap( - times=components, average=average, ch_type=ch_type, - scalings=scalings, sensors=sensors, show_names=show_names, - mask=mask, mask_params=mask_params, contours=contours, - outlines=outlines, sphere=sphere, image_interp=image_interp, - extrapolate=extrapolate, border=border, res=res, size=size, - cmap=cmap, vlim=vlim, cnorm=cnorm, colorbar=colorbar, - cbar_fmt=cbar_fmt, units=units, axes=axes, time_format=name_format, - nrows=nrows, ncols=ncols, show=show) - return fig - - def _compute_covariance_matrices(self, X, y): - _, n_channels, _ = X.shape - - if self.cov_est == "concat": - cov_estimator = self._concat_cov - elif self.cov_est == "epoch": - cov_estimator = self._epoch_cov - - covs = [] - sample_weights = [] - for this_class in self._classes: - cov, weight = cov_estimator(X[y == this_class]) - - if self.norm_trace: - cov /= np.trace(cov) - - covs.append(cov) - sample_weights.append(weight) - - return np.stack(covs), np.array(sample_weights) - - def _concat_cov(self, x_class): - """Concatenate epochs before computing the covariance.""" - _, n_channels, _ = x_class.shape - - x_class = np.transpose(x_class, [1, 0, 2]) - x_class = x_class.reshape(n_channels, -1) - cov = _regularized_covariance( - x_class, reg=self.reg, method_params=self.cov_method_params, - rank=self.rank) - weight = x_class.shape[0] - - return cov, weight - - def _epoch_cov(self, x_class): - """Mean of per-epoch covariances.""" - cov = sum(_regularized_covariance( - this_X, reg=self.reg, - method_params=self.cov_method_params, - rank=self.rank) for this_X in x_class) - cov /= len(x_class) - weight = len(x_class) - - return cov, weight - - def _decompose_covs(self, covs, sample_weights): - from scipy import linalg - n_classes = len(covs) - if n_classes == 2: - eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0)) - else: - # The multiclass case is adapted from - # http://github.com/alexandrebarachant/pyRiemann - eigen_vectors, D = _ajd_pham(covs) - eigen_vectors = self._normalize_eigenvectors(eigen_vectors.T, covs, - sample_weights) - eigen_values = None - return eigen_vectors, eigen_values - - def _compute_mutual_info(self, covs, sample_weights, eigen_vectors): - class_probas = sample_weights / sample_weights.sum() - - mutual_info = [] - for jj in range(eigen_vectors.shape[1]): - aa, bb = 0, 0 - for (cov, prob) in zip(covs, class_probas): - tmp = np.dot(np.dot(eigen_vectors[:, jj].T, cov), - eigen_vectors[:, jj]) - aa += prob * np.log(np.sqrt(tmp)) - bb += prob * (tmp ** 2 - 1) - mi = - (aa + (3.0 / 16) * (bb ** 2)) - mutual_info.append(mi) - - return mutual_info - - def _normalize_eigenvectors(self, eigen_vectors, covs, sample_weights): - # Here we apply an euclidean mean. See pyRiemann for other metrics - mean_cov = np.average(covs, axis=0, weights=sample_weights) - - for ii in range(eigen_vectors.shape[1]): - tmp = np.dot(np.dot(eigen_vectors[:, ii].T, mean_cov), - eigen_vectors[:, ii]) - eigen_vectors[:, ii] /= np.sqrt(tmp) - return eigen_vectors - - def _order_components(self, covs, sample_weights, eigen_vectors, - eigen_values, component_order): - n_classes = len(self._classes) - if component_order == 'mutual_info' and n_classes > 2: - mutual_info = self._compute_mutual_info(covs, sample_weights, - eigen_vectors) - ix = np.argsort(mutual_info)[::-1] - elif component_order == 'mutual_info' and n_classes == 2: - ix = np.argsort(np.abs(eigen_values - 0.5))[::-1] - elif component_order == 'alternate' and n_classes == 2: - i = np.argsort(eigen_values) - ix = np.empty_like(i) - ix[1::2] = i[:len(i) // 2] - ix[0::2] = i[len(i) // 2:][::-1] - return ix + spf = get_spatial_filter_from_estimator(self, info=info) + return spf.plot_filters( + components, + ch_type=ch_type, + scalings=scalings, + sensors=sensors, + show_names=show_names, + mask=mask, + mask_params=mask_params, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + name_format=name_format, + nrows=nrows, + ncols=ncols, + show=show, + ) def _ajd_pham(X, eps=1e-6, max_iter=15): @@ -584,16 +621,16 @@ def _ajd_pham(X, eps=1e-6, max_iter=15): decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0 - tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21) - tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21)) + tmp = 1 + 1.0j * 0.5 * np.imag(h12 * h21) + tmp = np.real(tmp + np.sqrt(tmp**2 - h12 * h21)) tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]]) A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :]) tmp = np.c_[A[:, Ii], A[:, Ij]] - tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F') + tmp = np.reshape(tmp, (n_times * n_epochs, 2), order="F") tmp = np.dot(tmp, tau.T) - tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F') + tmp = np.reshape(tmp, (n_times, n_epochs * 2), order="F") A[:, Ii] = tmp[:, :n_epochs] A[:, Ij] = tmp[:, n_epochs:] V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :]) @@ -640,6 +677,25 @@ class SPoC(CSP): Parameters to pass to :func:`mne.compute_covariance`. .. versionadded:: 0.16 + restr_type : "restricting" | "whitening" | None + Restricting transformation for covariance matrices before performing + generalized eigendecomposition. + If "restricting" only restriction to the principal subspace of signal_cov + will be performed. + If "whitening", covariance matrices will be additionally rescaled according + to the whitening for the signal_cov. + If None, no restriction will be applied. Defaults to None. + + .. versionadded:: 1.11 + info : mne.Info | None + The mne.Info object with information about the sensors and methods of + measurement used for covariance estimation and generalized + eigendecomposition. + If None, one channel type and no projections will be assumed and if + rank is dict, it will be sum of ranks per channel type. + Defaults to None. + + .. versionadded:: 1.11 %(rank_none)s .. versionadded:: 0.17 @@ -664,19 +720,57 @@ class SPoC(CSP): .. footbibliography:: """ - def __init__(self, n_components=4, reg=None, log=None, - transform_into='average_power', cov_method_params=None, - rank=None): + def __init__( + self, + n_components=4, + reg=None, + log=None, + transform_into="average_power", + cov_method_params=None, + *, + restr_type=None, + info=None, + rank=None, + ): """Init of SPoC.""" - super(SPoC, self).__init__(n_components=n_components, reg=reg, log=log, - cov_est="epoch", norm_trace=False, - transform_into=transform_into, rank=rank, - cov_method_params=cov_method_params) + super().__init__( + n_components=n_components, + reg=reg, + log=log, + cov_est="epoch", + norm_trace=False, + transform_into=transform_into, + restr_type=restr_type, + info=info, + rank=rank, + cov_method_params=cov_method_params, + ) + + cov_callable = partial( + _spoc_estimate, + reg=reg, + cov_method_params=cov_method_params, + info=info, + rank=rank, + ) + super(CSP, self).__init__( + n_components=n_components, + cov_callable=cov_callable, + mod_ged_callable=_spoc_mod, + restr_type=restr_type, + ) + # Covariance estimation have to be done on the single epoch level, # unlike CSP where covariance estimation can also be achieved through # concatenation of all epochs from the same class. - delattr(self, 'cov_est') - delattr(self, 'norm_trace') + delattr(self, "cov_est") + delattr(self, "norm_trace") + + def __sklearn_tags__(self): + """Tag the transformer.""" + tags = super().__sklearn_tags__() + tags.target_tags.multi_output = False + return tags def fit(self, X, y): """Estimate the SPoC decomposition on epochs. @@ -693,50 +787,16 @@ def fit(self, X, y): self : instance of SPoC Returns the modified instance. """ - from scipy import linalg - self._check_Xy(X, y) - - if len(np.unique(y)) < 2: - raise ValueError("y must have at least two distinct values.") - - # The following code is directly copied from pyRiemann - - # Normalize target variable - target = y.astype(np.float64) - target -= target.mean() - target /= target.std() - - n_epochs, n_channels = X.shape[:2] - - # Estimate single trial covariance - covs = np.empty((n_epochs, n_channels, n_channels)) - for ii, epoch in enumerate(X): - covs[ii] = _regularized_covariance( - epoch, reg=self.reg, method_params=self.cov_method_params, - rank=self.rank) - - C = covs.mean(0) - Cz = np.mean(covs * target[:, np.newaxis, np.newaxis], axis=0) - - # solve eigenvalue decomposition - evals, evecs = linalg.eigh(Cz, C) - evals = evals.real - evecs = evecs.real - # sort vectors - ix = np.argsort(np.abs(evals))[::-1] + X, y = self._check_data(X, y=y, fit=True, return_y=True) + self._validate_params(y=y) - # sort eigenvectors - evecs = evecs[:, ix].T + super(CSP, self).fit(X, y) - # spatial patterns - self.patterns_ = linalg.pinv(evecs).T # n_channels x n_channels - self.filters_ = evecs # n_channels x n_channels - - pick_filters = self.filters_[:self.n_components] + pick_filters = self.filters_[: self.n_components] X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) # compute features (mean band power) - X = (X ** 2).mean(axis=-1) + X = (X**2).mean(axis=-1) # To standardize features self.mean_ = X.mean(axis=0) @@ -756,8 +816,35 @@ def transform(self, X): ------- X : ndarray If self.transform_into == 'average_power' then returns the power of - CSP features averaged over time and shape (n_epochs, n_sources) + CSP features averaged over time and shape (n_epochs, n_components) If self.transform_into == 'csp_space' then returns the data in CSP - space and shape is (n_epochs, n_sources, n_times). + space and shape is (n_epochs, n_components, n_times). """ - return super(SPoC, self).transform(X) + return super().transform(X) + + def fit_transform(self, X, y=None, **fit_params): + """Fit SPoC to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data on which to estimate the SPoC. + y : array, shape (n_epochs,) + The class for each epoch. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.CSP.fit` + method. Not used for this class. + + Returns + ------- + X : array, shape (n_epochs, n_components[, n_times]) + If ``self.transform_into == 'average_power'`` then returns the power of CSP + features averaged over time and shape is ``(n_epochs, n_components)``. If + ``self.transform_into == 'csp_space'`` then returns the data in CSP space + and shape is ``(n_epochs, n_components, n_times)``. + """ + # use parent TransformerMixin method but with custom docstring + return super().fit_transform(X, y=y, **fit_params) diff --git a/mne/decoding/ems.py b/mne/decoding/ems.py index 3f125bfb74a..5c7557798ef 100644 --- a/mne/decoding/ems.py +++ b/mne/decoding/ems.py @@ -1,22 +1,20 @@ -# Author: Denis Engemann -# Alexandre Gramfort -# Jean-Remi King -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from collections import Counter import numpy as np +from sklearn.base import BaseEstimator -from .mixin import TransformerMixin, EstimatorMixin -from .base import _set_cv -from ..io.pick import _picks_to_idx +from .._fiff.pick import _picks_to_idx, pick_info, pick_types from ..parallel import parallel_func from ..utils import logger, verbose -from .. import pick_types, pick_info +from .base import _set_cv +from .transformer import MNETransformerMixin -class EMS(TransformerMixin, EstimatorMixin): +class EMS(MNETransformerMixin, BaseEstimator): """Transformer to compute event-matched spatial filters. This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire @@ -40,12 +38,24 @@ class EMS(TransformerMixin, EstimatorMixin): .. footbibliography:: """ + def __sklearn_tags__(self): + """Return sklearn tags.""" + from sklearn.utils import ClassifierTags + + tags = super().__sklearn_tags__() + if tags.classifier_tags is None: + tags.classifier_tags = ClassifierTags() + tags.classifier_tags.multi_class = False + return tags + def __repr__(self): # noqa: D105 - if hasattr(self, 'filters_'): - return '' % ( - len(self.filters_), len(self.classes_)) + if hasattr(self, "filters_"): + return ( + f"" + ) else: - return '' + return "" def fit(self, X, y): """Fit the spatial filters. @@ -65,11 +75,12 @@ def fit(self, X, y): self : instance of EMS Returns self. """ - classes = np.unique(y) - if len(classes) != 2: - raise ValueError('EMS only works for binary classification.') + X, y = self._check_data(X, y=y, fit=True, return_y=True) + classes, y = np.unique(y, return_inverse=True) + if len(classes) > 2: + raise ValueError("EMS only works for binary classification.") self.classes_ = classes - filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0) + filters = X[y == 0].mean(0) - X[y == 1].mean(0) filters /= np.linalg.norm(filters, axis=0)[None, :] self.filters_ = filters return self @@ -87,13 +98,15 @@ def transform(self, X): X : array, shape (n_epochs, n_times) The input data transformed by the spatial filters. """ + X = self._check_data(X) Xt = np.sum(X * self.filters_, axis=1) return Xt @verbose -def compute_ems(epochs, conditions=None, picks=None, n_jobs=None, cv=None, - verbose=None): +def compute_ems( + epochs, conditions=None, picks=None, n_jobs=None, cv=None, *, verbose=None +): """Compute event-matched spatial filter on epochs. This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire @@ -141,16 +154,18 @@ def compute_ems(epochs, conditions=None, picks=None, n_jobs=None, cv=None, ---------- .. footbibliography:: """ - logger.info('...computing surrogate time series. This can take some time') + logger.info("...computing surrogate time series. This can take some time") # Default to leave-one-out cv - cv = 'LeaveOneOut' if cv is None else cv + cv = "LeaveOneOut" if cv is None else cv picks = _picks_to_idx(epochs.info, picks) if not len(set(Counter(epochs.events[:, 2]).values())) == 1: - raise ValueError('The same number of epochs is required by ' - 'this function. Please consider ' - '`epochs.equalize_event_counts`') + raise ValueError( + "The same number of epochs is required by " + "this function. Please consider " + "`epochs.equalize_event_counts`" + ) if conditions is None: conditions = epochs.event_id.keys() @@ -161,9 +176,10 @@ def compute_ems(epochs, conditions=None, picks=None, n_jobs=None, cv=None, epochs.drop_bad() if len(conditions) != 2: - raise ValueError('Currently this function expects exactly 2 ' - 'conditions but you gave me %i' % - len(conditions)) + raise ValueError( + "Currently this function expects exactly 2 " + f"conditions but you gave me {len(conditions)}" + ) ev = epochs.events[:, 2] # Special care to avoid path dependent mappings and orders @@ -175,27 +191,28 @@ def compute_ems(epochs, conditions=None, picks=None, n_jobs=None, cv=None, # Scale (z-score) the data by channel type # XXX the z-scoring is applied outside the CV, which is not standard. - for ch_type in ['mag', 'grad', 'eeg']: + for ch_type in ["mag", "grad", "eeg"]: if ch_type in epochs: # FIXME should be applied to all sort of data channels - if ch_type == 'eeg': + if ch_type == "eeg": this_picks = pick_types(info, meg=False, eeg=True) else: this_picks = pick_types(info, meg=ch_type, eeg=False) data[:, this_picks] /= np.std(data[:, this_picks]) # Setup cross-validation. Need to use _set_cv to deal with sklearn - # deprecation of cv objects. + # changes in cv object handling. y = epochs.events[:, 2] - _, cv_splits = _set_cv(cv, 'classifier', X=y, y=y) + _, cv_splits = _set_cv(cv, "classifier", X=y, y=y) parallel, p_func, n_jobs = parallel_func(_run_ems, n_jobs=n_jobs) # FIXME this parallelization should be removed. # 1) it's numpy computation so it's already efficient, # 2) it duplicates the data in RAM, # 3) the computation is already super fast. - out = parallel(p_func(_ems_diff, data, cond_idx, train, test) - for train, test in cv_splits) + out = parallel( + p_func(_ems_diff, data, cond_idx, train, test) for train, test in cv_splits + ) surrogate_trials, spatial_filter = zip(*out) surrogate_trials = np.array(surrogate_trials) @@ -212,6 +229,6 @@ def _ems_diff(data0, data1): def _run_ems(objective_function, data, cond_idx, train, test): """Run EMS.""" d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx)) - d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :] + d /= np.sqrt(np.sum(d**2, axis=0))[None, :] # compute surrogates return np.sum(data[test[0]] * d, axis=0), d diff --git a/mne/decoding/mixin.py b/mne/decoding/mixin.py deleted file mode 100644 index b2c491b9118..00000000000 --- a/mne/decoding/mixin.py +++ /dev/null @@ -1,85 +0,0 @@ - - -class TransformerMixin(object): - """Mixin class for all transformers in scikit-learn.""" - - def fit_transform(self, X, y=None, **fit_params): - """Fit to data, then transform it. - - Fits transformer to ``X`` and ``y`` with optional parameters - ``fit_params``, and returns a transformed version of ``X``. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - Training set. - y : array, shape (n_samples,) - Target values or class labels. - **fit_params : dict - Additional fitting parameters passed to the ``fit`` method.. - - Returns - ------- - X_new : array, shape (n_samples, n_features_new) - Transformed array. - """ - # non-optimized default implementation; override when a better - # method is possible for a given clustering algorithm - if y is None: - # fit method of arity 1 (unsupervised transformation) - return self.fit(X, **fit_params).transform(X) - else: - # fit method of arity 2 (supervised transformation) - return self.fit(X, y, **fit_params).transform(X) - - -class EstimatorMixin(object): - """Mixin class for estimators.""" - - def get_params(self, deep=True): - """Get the estimator params. - - Parameters - ---------- - deep : bool - Deep. - """ - return - - def set_params(self, **params): - """Set parameters (mimics sklearn API). - - Parameters - ---------- - **params : dict - Extra parameters. - - Returns - ------- - inst : object - The instance. - """ - if not params: - return self - valid_params = self.get_params(deep=True) - for key, value in params.items(): - split = key.split('__', 1) - if len(split) > 1: - # nested objects case - name, sub_name = split - if name not in valid_params: - raise ValueError('Invalid parameter %s for estimator %s. ' - 'Check the list of available parameters ' - 'with `estimator.get_params().keys()`.' % - (name, self)) - sub_object = valid_params[name] - sub_object.set_params(**{sub_name: value}) - else: - # simple objects case - if key not in valid_params: - raise ValueError('Invalid parameter %s for estimator %s. ' - 'Check the list of available parameters ' - 'with `estimator.get_params().keys()`.' % - (key, self.__class__.__name__)) - setattr(self, key, value) - return self diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index d344e93e668..0516adadb9a 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -1,21 +1,28 @@ -# -*- coding: utf-8 -*- -# Authors: Chris Holdgraf -# Eric Larson - +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numbers import numpy as np - -from .base import get_coef, BaseEstimator, _check_estimator +from scipy.stats import pearsonr +from sklearn.base import ( + BaseEstimator, + MetaEstimatorMixin, + clone, + is_regressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.metrics import r2_score + +from ..utils import _validate_type, fill_doc, pinv +from ._fixes import _check_n_features_3d, validate_data +from .base import _check_estimator, get_coef from .time_delaying_ridge import TimeDelayingRidge -from ..fixes import is_regressor -from ..utils import _validate_type, verbose, fill_doc @fill_doc -class ReceptiveField(BaseEstimator): +class ReceptiveField(MetaEstimatorMixin, BaseEstimator): """Fit a receptive field model. This allows you to fit an encoding model (stimulus to brain) or a decoding @@ -67,7 +74,6 @@ class ReceptiveField(BaseEstimator): duration. Only used if ``estimator`` is float or None. .. versionadded:: 0.18 - %(verbose)s Attributes ---------- @@ -103,15 +109,24 @@ class ReceptiveField(BaseEstimator): .. footbibliography:: """ # noqa E501 - @verbose - def __init__(self, tmin, tmax, sfreq, feature_names=None, estimator=None, - fit_intercept=None, scoring='r2', patterns=False, - n_jobs=None, edge_correction=True, verbose=None): - self.feature_names = feature_names - self.sfreq = float(sfreq) + def __init__( + self, + tmin, + tmax, + sfreq, + feature_names=None, + estimator=None, + fit_intercept=None, + scoring="r2", + patterns=False, + n_jobs=None, + edge_correction=True, + ): self.tmin = tmin self.tmax = tmax - self.estimator = 0. if estimator is None else estimator + self.sfreq = sfreq + self.feature_names = feature_names + self.estimator = estimator self.fit_intercept = fit_intercept self.scoring = scoring self.patterns = patterns @@ -119,37 +134,81 @@ def __init__(self, tmin, tmax, sfreq, feature_names=None, estimator=None, self.edge_correction = edge_correction def __repr__(self): # noqa: D105 - s = "tmin, tmax : (%.3f, %.3f), " % (self.tmin, self.tmax) + s = f"tmin, tmax : ({self.tmin:.3f}, {self.tmax:.3f}), " estimator = self.estimator if not isinstance(estimator, str): estimator = type(self.estimator) - s += "estimator : %s, " % (estimator,) - if hasattr(self, 'coef_'): + s += f"estimator : {estimator}, " + if hasattr(self, "coef_"): if self.feature_names is not None: feats = self.feature_names if len(feats) == 1: - s += "feature: %s, " % feats[0] + s += f"feature: {feats[0]}, " else: - s += "features : [%s, ..., %s], " % (feats[0], feats[-1]) + s += f"features : [{feats[0]}, ..., {feats[-1]}], " s += "fit: True" else: s += "fit: False" - if hasattr(self, 'scores_'): - s += "scored (%s)" % self.scoring - return "" % s + if hasattr(self, "scores_"): + s += f"scored ({self.scoring})" + return f"" + + def __sklearn_tags__(self): + """...""" + from sklearn.utils import RegressorTags + + tags = super().__sklearn_tags__() + tags.estimator_type = "regressor" + tags.regressor_tags = RegressorTags() + tags.input_tags.three_d_array = True + tags.target_tags.one_d_labels = True + tags.target_tags.multi_output = True + tags.target_tags.required = True + return tags def _delay_and_reshape(self, X, y=None): """Delay and reshape the variables.""" if not isinstance(self.estimator_, TimeDelayingRidge): # X is now shape (n_times, n_epochs, n_feats, n_delays) - X = _delay_time_series(X, self.tmin, self.tmax, self.sfreq, - fill_mean=self.fit_intercept) + X = _delay_time_series( + X, + self.tmin, + self.tmax, + self.sfreq_, + fill_mean=self.fit_intercept_, + ) X = _reshape_for_est(X) # Concat times + epochs if y is not None: - y = y.reshape(-1, y.shape[-1], order='F') + y = y.reshape(-1, y.shape[-1], order="F") + return X, y + + def _check_data(self, X, y=None, reset=False): + if reset: + X, y = validate_data( + self, + X=X, + y=y, + reset=reset, + validate_separately=( # to take care of 3D y + dict(allow_nd=True, ensure_2d=False), + dict(allow_nd=True, ensure_2d=False), + ), + ) + else: + X = validate_data(self, X=X, allow_nd=True, ensure_2d=False, reset=reset) + _check_n_features_3d(self, X, reset) return X, y + def _validate_params(self, X): + if self.scoring not in _SCORERS.keys(): + raise ValueError( + f"scoring must be one of {sorted(_SCORERS.keys())}, got {self.scoring}" + ) + self.sfreq_ = float(self.sfreq) + if self.tmin > self.tmax: + raise ValueError(f"tmin ({self.tmin}) must be at most tmax ({self.tmax})") + def fit(self, X, y): """Fit a receptive field model. @@ -165,43 +224,49 @@ def fit(self, X, y): self : instance The instance so you can chain operations. """ - from scipy import linalg - if self.scoring not in _SCORERS.keys(): - raise ValueError('scoring must be one of %s, got' - '%s ' % (sorted(_SCORERS.keys()), self.scoring)) - from sklearn.base import clone + X, y = self._check_data(X, y, reset=True) + self._validate_params(X) X, y, _, self._y_dim = self._check_dimensions(X, y) - if self.tmin > self.tmax: - raise ValueError('tmin (%s) must be at most tmax (%s)' - % (self.tmin, self.tmax)) # Initialize delays - self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq) + self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq_) # Define the slice that we should use in the middle self.valid_samples_ = _delays_to_slice(self.delays_) - if isinstance(self.estimator, numbers.Real): + if self.estimator is None or isinstance(self.estimator, numbers.Real): + alpha = self.estimator if self.estimator is not None else 0.0 if self.fit_intercept is None: - self.fit_intercept = True + self.fit_intercept_ = True + else: + self.fit_intercept_ = self.fit_intercept estimator = TimeDelayingRidge( - self.tmin, self.tmax, self.sfreq, alpha=self.estimator, - fit_intercept=self.fit_intercept, n_jobs=self.n_jobs, - edge_correction=self.edge_correction) + self.tmin, + self.tmax, + self.sfreq_, + alpha=alpha, + fit_intercept=self.fit_intercept_, + n_jobs=self.n_jobs, + edge_correction=self.edge_correction, + ) elif is_regressor(self.estimator): estimator = clone(self.estimator) - if self.fit_intercept is not None and \ - estimator.fit_intercept != self.fit_intercept: + if ( + self.fit_intercept is not None + and estimator.fit_intercept != self.fit_intercept + ): raise ValueError( - 'Estimator fit_intercept (%s) != initialization ' - 'fit_intercept (%s), initialize ReceptiveField with the ' - 'same fit_intercept value or use fit_intercept=None' - % (estimator.fit_intercept, self.fit_intercept)) - self.fit_intercept = estimator.fit_intercept + f"Estimator fit_intercept ({estimator.fit_intercept}) != " + f"initialization fit_intercept ({self.fit_intercept}), initialize " + "ReceptiveField with the same fit_intercept value or use " + "fit_intercept=None" + ) + self.fit_intercept_ = estimator.fit_intercept else: - raise ValueError('`estimator` must be a float or an instance' - ' of `BaseEstimator`,' - ' got type %s.' % type(self.estimator)) + raise ValueError( + "`estimator` must be a float or an instance of `BaseEstimator`, got " + f"type {self.estimator}." + ) self.estimator_ = estimator del estimator _check_estimator(self.estimator_) @@ -212,16 +277,17 @@ def fit(self, X, y): n_delays = len(self.delays_) # Update feature names if we have none - if ((self.feature_names is not None) and - (len(self.feature_names) != n_feats)): - raise ValueError('n_features in X does not match feature names ' - '(%s != %s)' % (n_feats, len(self.feature_names))) + if (self.feature_names is not None) and (len(self.feature_names) != n_feats): + raise ValueError( + f"n_features in X does not match feature names ({n_feats} != " + f"{len(self.feature_names)})" + ) # Create input features X, y = self._delay_and_reshape(X, y) self.estimator_.fit(X, y) - coef = get_coef(self.estimator_, 'coef_') # (n_targets, n_features) + coef = get_coef(self.estimator_, "coef_") # (n_targets, n_features) shape = [n_feats, n_delays] if self._y_dim > 1: shape.insert(0, -1) @@ -229,9 +295,15 @@ def fit(self, X, y): # Inverse-transform model weights if self.patterns: + n_total_samples = n_times * n_epochs + if n_total_samples < 2: + raise ValueError( + "Cannot compute patterns with only one sample; " + f"got n_samples = {n_total_samples}." + ) if isinstance(self.estimator_, TimeDelayingRidge): cov_ = self.estimator_.cov_ / float(n_times * n_epochs - 1) - y = y.reshape(-1, y.shape[-1], order='F') + y = y.reshape(-1, y.shape[-1], order="F") else: X = X - X.mean(0, keepdims=True) cov_ = np.cov(X.T) @@ -240,9 +312,9 @@ def fit(self, X, y): # Inverse output covariance if y.ndim == 2 and y.shape[1] != 1: y = y - y.mean(0, keepdims=True) - inv_Y = linalg.pinv(np.cov(y.T)) + inv_Y = pinv(np.cov(y.T)) else: - inv_Y = 1. / float(n_times * n_epochs - 1) + inv_Y = 1.0 / float(n_times * n_epochs - 1) del y # Inverse coef according to Haufe's method @@ -268,9 +340,12 @@ def predict(self, X): unaffected by edge artifacts during the time delaying step) can be obtained using ``y_pred[rf.valid_samples_]``. """ - if not hasattr(self, 'delays_'): - raise ValueError('Estimator has not been fit yet.') + if not hasattr(self, "delays_"): + raise NotFittedError("Estimator has not been fit yet.") + + X, _ = self._check_data(X) X, _, X_dim = self._check_dimensions(X, None, predict=True)[:3] + del _ # convert to sklearn and back pred_shape = X.shape[:-1] @@ -278,14 +353,14 @@ def predict(self, X): pred_shape = pred_shape + (self.coef_.shape[0],) X, _ = self._delay_and_reshape(X) y_pred = self.estimator_.predict(X) - y_pred = y_pred.reshape(pred_shape, order='F') + y_pred = y_pred.reshape(pred_shape, order="F") shape = list(y_pred.shape) if X_dim <= 2: shape.pop(1) # epochs extra = 0 else: extra = 1 - shape = shape[:self._y_dim + extra] + shape = shape[: self._y_dim + extra] y_pred.shape = shape return y_pred @@ -320,13 +395,15 @@ def score(self, X, y): y = y[self.valid_samples_] # Re-vectorize and call scorer - y = y.reshape([-1, n_outputs], order='F') - y_pred = y_pred.reshape([-1, n_outputs], order='F') + y = y.reshape([-1, n_outputs], order="F") + y_pred = y_pred.reshape([-1, n_outputs], order="F") assert y.shape == y_pred.shape - scores = scorer_(y, y_pred, multioutput='raw_values') + scores = scorer_(y, y_pred, multioutput="raw_values") return scores def _check_dimensions(self, X, y, predict=False): + _validate_type(X, "array-like", "X") + _validate_type(y, ("array-like", None), "y") X_dim = X.ndim y_dim = y.ndim if y is not None else 0 if X_dim == 2: @@ -338,28 +415,40 @@ def _check_dimensions(self, X, y, predict=False): elif y_dim == 2: y = y[:, np.newaxis, :] # epochs else: - raise ValueError('y must be shape (n_times[, n_epochs]' - '[,n_outputs], got %s' % (y.shape,)) + raise ValueError( + "y must be shape (n_times[, n_epochs][,n_outputs], got " + f"{y.shape}" + ) elif X.ndim == 3: if y is not None: if y.ndim == 2: y = y[:, :, np.newaxis] # Add an outputs dim elif y.ndim != 3: - raise ValueError('If X has 3 dimensions, ' - 'y must have 2 or 3 dimensions') + raise ValueError( + "If X has 3 dimensions, y must have 2 or 3 dimensions" + ) else: - raise ValueError('X must be shape (n_times[, n_epochs],' - ' n_features), got %s' % (X.shape,)) + raise ValueError( + "X must be shape (n_times[, n_epochs], n_features), " + f"got {X.shape}. Reshape your data to 2D or 3D " + "(e.g., array.reshape(-1, 1) for a single feature, " + "or array.reshape(1, -1) for a single sample)." + ) if y is not None: if X.shape[0] != y.shape[0]: - raise ValueError('X and y do not have the same n_times\n' - '%s != %s' % (X.shape[0], y.shape[0])) + raise ValueError( + f"X and y do not have the same n_times\n{X.shape[0]} != " + f"{y.shape[0]}" + ) if X.shape[1] != y.shape[1]: - raise ValueError('X and y do not have the same n_epochs\n' - '%s != %s' % (X.shape[1], y.shape[1])) - if predict and y.shape[-1] != len(self.estimator_.coef_): - raise ValueError('Number of outputs does not match' - ' estimator coefficients dimensions') + raise ValueError( + f"X and y do not have the same n_epochs\n{X.shape[1]} != " + f"{y.shape[1]}" + ) + if predict and y.shape[-1] not in (len(self.estimator_.coef_), 1): + raise ValueError( + "Number of outputs does not match estimator coefficients dimensions" + ) return X, y, X_dim, y_dim @@ -424,15 +513,14 @@ def _delay_time_series(X, tmin, tmax, sfreq, fill_mean=False): use_X = X out[:] = use_X if fill_mean: - out[:] += (mean_value - use_X.mean(axis=0)) + out[:] += mean_value - use_X.mean(axis=0) return delayed def _times_to_delays(tmin, tmax, sfreq): """Convert a tmin/tmax in seconds to delays.""" # Convert seconds to samples - delays = np.arange(int(np.round(tmin * sfreq)), - int(np.round(tmax * sfreq) + 1)) + delays = np.arange(int(np.round(tmin * sfreq)), int(np.round(tmax * sfreq) + 1)) return delays @@ -447,37 +535,35 @@ def _delays_to_slice(delays): def _check_delayer_params(tmin, tmax, sfreq): """Check delayer input parameters. For future custom delay support.""" - _validate_type(sfreq, 'numeric', '`sfreq`') + _validate_type(sfreq, "numeric", "`sfreq`") for tlim in (tmin, tmax): - _validate_type(tlim, 'numeric', 'tmin/tmax') + _validate_type(tlim, "numeric", "tmin/tmax") if not tmin <= tmax: - raise ValueError('tmin must be <= tmax') + raise ValueError("tmin must be <= tmax") def _reshape_for_est(X_del): """Convert X_del to a sklearn-compatible shape.""" n_times, n_epochs, n_feats, n_delays = X_del.shape X_del = X_del.reshape(n_times, n_epochs, -1) # concatenate feats - X_del = X_del.reshape(n_times * n_epochs, -1, order='F') + X_del = X_del.reshape(n_times * n_epochs, -1, order="F") return X_del # Create a correlation scikit-learn-style scorer def _corr_score(y_true, y, multioutput=None): - from scipy.stats import pearsonr - assert multioutput == 'raw_values' + assert multioutput == "raw_values" for this_y in (y_true, y): if this_y.ndim != 2: - raise ValueError('inputs must be shape (samples, outputs), got %s' - % (this_y.shape,)) - return np.array([pearsonr(y_true[:, ii], y[:, ii])[0] - for ii in range(y.shape[-1])]) + raise ValueError( + f"inputs must be shape (samples, outputs), got {this_y.shape}" + ) + return np.array([pearsonr(y_true[:, ii], y[:, ii])[0] for ii in range(y.shape[-1])]) def _r2_score(y_true, y, multioutput=None): - from sklearn.metrics import r2_score return r2_score(y_true, y, multioutput=multioutput) -_SCORERS = {'r2': _r2_score, 'corrcoef': _corr_score} +_SCORERS = {"r2": _r2_score, "corrcoef": _corr_score} diff --git a/mne/decoding/search_light.py b/mne/decoding/search_light.py index 81c83b256a4..81699ecd5ba 100644 --- a/mne/decoding/search_light.py +++ b/mne/decoding/search_light.py @@ -1,19 +1,29 @@ -# Author: Jean-Remi King -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import logging + import numpy as np +from sklearn.base import BaseEstimator, MetaEstimatorMixin, clone +from sklearn.metrics import check_scoring +from sklearn.preprocessing import LabelEncoder +from sklearn.utils.validation import check_is_fitted -from .mixin import TransformerMixin -from .base import BaseEstimator, _check_estimator -from ..fixes import _get_check_scoring from ..parallel import parallel_func -from ..utils import (array_split_idx, ProgressBar, - verbose, fill_doc, _parse_verbose) +from ..utils import ( + ProgressBar, + _parse_verbose, + _verbose_safe_false, + array_split_idx, + fill_doc, +) +from .base import _check_estimator +from .transformer import MNETransformerMixin @fill_doc -class SlidingEstimator(BaseEstimator, TransformerMixin): +class SlidingEstimator(MetaEstimatorMixin, MNETransformerMixin, BaseEstimator): """Search Light. Fit, predict and score a series of models to each subset of the dataset @@ -26,6 +36,7 @@ class SlidingEstimator(BaseEstimator, TransformerMixin): %(scoring)s %(n_jobs)s %(position)s + %(allow_2d)s %(verbose)s Attributes @@ -34,23 +45,47 @@ class SlidingEstimator(BaseEstimator, TransformerMixin): List of fitted scikit-learn estimators (one per task). """ - @verbose - def __init__(self, base_estimator, scoring=None, n_jobs=None, *, - position=0, verbose=None): # noqa: D102 - _check_estimator(base_estimator) - self._estimator_type = getattr(base_estimator, "_estimator_type", None) + def __init__( + self, + base_estimator, + scoring=None, + n_jobs=None, + *, + position=0, + allow_2d=False, + verbose=None, + ): self.base_estimator = base_estimator self.n_jobs = n_jobs self.scoring = scoring self.position = position + self.allow_2d = allow_2d self.verbose = verbose + @property + def _estimator_type(self): + return getattr(self.base_estimator, "_estimator_type", None) + + def __sklearn_tags__(self): + """Get sklearn tags.""" + from sklearn.utils import get_tags + + tags = super().__sklearn_tags__() + sub_tags = get_tags(self.base_estimator) + tags.estimator_type = sub_tags.estimator_type + for kind in ("classifier", "regressor", "transformer"): + if tags.estimator_type == kind: + attr = f"{kind}_tags" + setattr(tags, attr, getattr(sub_tags, attr)) + break + return tags + def __repr__(self): # noqa: D105 - repr_str = '<' + super(SlidingEstimator, self).__repr__() - if hasattr(self, 'estimators_'): + repr_str = "<" + super().__repr__() + if hasattr(self, "estimators_"): repr_str = repr_str[:-1] - repr_str += ', fitted with %i estimators' % len(self.estimators_) - return repr_str + '>' + repr_str += f", fitted with {len(self.estimators_)} estimators" + return repr_str + ">" def fit(self, X, y, **fit_params): """Fit a series of independent estimators to the dataset. @@ -72,18 +107,22 @@ def fit(self, X, y, **fit_params): self : object Return self. """ - self._check_Xy(X, y) + _check_estimator(self.base_estimator) + X, _ = self._check_Xy(X, y, fit=True) parallel, p_func, n_jobs = parallel_func( - _sl_fit, self.n_jobs, max_jobs=X.shape[-1], verbose=False) + _sl_fit, + self.n_jobs, + max_jobs=X.shape[-1], + verbose=_verbose_safe_false(), + ) self.estimators_ = list() - self.fit_params = fit_params + self.fit_params_ = fit_params # For fitting, the parallelization is across estimators. - context = _create_progressbar_context(self, X, 'Fitting') + context = _create_progressbar_context(self, X, "Fitting") with context as pb: estimators = parallel( - p_func(self.base_estimator, split, y, - pb.subset(pb_idx), **fit_params) + p_func(self.base_estimator, split, y, pb.subset(pb_idx), **fit_params) for pb_idx, split in array_split_idx(X, n_jobs, axis=-1) ) @@ -123,20 +162,25 @@ def fit_transform(self, X, y, **fit_params): def _transform(self, X, method): """Aux. function to make parallel predictions/transformation.""" - self._check_Xy(X) + X, is_nd = self._check_Xy(X) + orig_method = method + check_is_fitted(self) method = _check_method(self.base_estimator, method) if X.shape[-1] != len(self.estimators_): - raise ValueError('The number of estimators does not match ' - 'X.shape[-1]') + raise ValueError("The number of estimators does not match X.shape[-1]") # For predictions/transforms the parallelization is across the data and # not across the estimators to avoid memory load. parallel, p_func, n_jobs = parallel_func( - _sl_transform, self.n_jobs, max_jobs=X.shape[-1], verbose=False) + _sl_transform, + self.n_jobs, + max_jobs=X.shape[-1], + verbose=_verbose_safe_false(), + ) X_splits = np.array_split(X, n_jobs, axis=-1) idx, est_splits = zip(*array_split_idx(self.estimators_, n_jobs)) - context = _create_progressbar_context(self, X, 'Transforming') + context = _create_progressbar_context(self, X, "Transforming") with context as pb: y_pred = parallel( p_func(est, x, method, pb.subset(pb_idx)) @@ -144,6 +188,13 @@ def _transform(self, X, method): ) y_pred = np.concatenate(y_pred, axis=1) + if orig_method == "transform": + y_pred = y_pred.astype(X.dtype) + elif ( + orig_method in ("predict", "predict_proba", "decision_function") + and not is_nd + ): + y_pred = y_pred.squeeze() return y_pred def transform(self, X): @@ -166,7 +217,7 @@ def transform(self, X): Xt : array, shape (n_samples, n_estimators) The transformed values generated by each estimator. """ # noqa: E501 - return self._transform(X, 'transform') + return self._transform(X, "transform") def predict(self, X): """Predict each data slice/task with a series of independent estimators. @@ -188,7 +239,7 @@ def predict(self, X): y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets) Predicted values for each estimator/data slice. """ # noqa: E501 - return self._transform(X, 'predict') + return self._transform(X, "predict") def predict_proba(self, X): """Predict each data slice with a series of independent estimators. @@ -210,7 +261,7 @@ def predict_proba(self, X): y_pred : array, shape (n_samples, n_tasks, n_classes) Predicted probabilities for each estimator/data slice/task. """ # noqa: E501 - return self._transform(X, 'predict_proba') + return self._transform(X, "predict_proba") def decision_function(self, X): """Estimate distances of each data slice to the hyperplanes. @@ -233,15 +284,23 @@ def decision_function(self, X): ----- This requires base_estimator to have a ``decision_function`` method. """ # noqa: E501 - return self._transform(X, 'decision_function') + return self._transform(X, "decision_function") - def _check_Xy(self, X, y=None): + def _check_Xy(self, X, y=None, fit=False): """Aux. function to check input data.""" - if y is not None: - if len(X) != len(y) or len(y) < 1: - raise ValueError('X and y must have the same length.') - if X.ndim < 3: - raise ValueError('X must have at least 3 dimensions.') + # Once we require sklearn 1.1+ we should do something like: + X = self._check_data(X, y=y, atleast_3d=False, fit=fit) + is_nd = X.ndim >= 3 + if not is_nd: + err = None + if not self.allow_2d: + err = 3 + elif X.ndim < 2: + err = 2 + if err: + raise ValueError(f"X must have at least {err} dimensions.") + X = X[..., np.newaxis] + return X, is_nd def score(self, X, y): """Score each estimator on each task. @@ -266,12 +325,9 @@ def score(self, X, y): score : array, shape (n_samples, n_estimators) Score for each estimator/task. """ # noqa: E501 - check_scoring = _get_check_scoring() - - self._check_Xy(X) + X, _ = self._check_Xy(X, y) if X.shape[-1] != len(self.estimators_): - raise ValueError('The number of estimators does not match ' - 'X.shape[-1]') + raise ValueError("The number of estimators does not match X.shape[-1]") scoring = check_scoring(self.base_estimator, self.scoring) y = _fix_auc(scoring, y) @@ -279,21 +335,27 @@ def score(self, X, y): # For predictions/transforms the parallelization is across the data and # not across the estimators to avoid memory load. parallel, p_func, n_jobs = parallel_func( - _sl_score, self.n_jobs, max_jobs=X.shape[-1], verbose=False) + _sl_score, + self.n_jobs, + max_jobs=X.shape[-1], + verbose=_verbose_safe_false(), + ) X_splits = np.array_split(X, n_jobs, axis=-1) est_splits = np.array_split(self.estimators_, n_jobs) - score = parallel(p_func(est, scoring, x, y) - for (est, x) in zip(est_splits, X_splits)) + score = parallel( + p_func(est, scoring, x, y) for (est, x) in zip(est_splits, X_splits) + ) score = np.concatenate(score, axis=0) return score @property def classes_(self): - if not hasattr(self.estimators_[0], 'classes_'): - raise AttributeError('classes_ attribute available only if ' - 'base_estimator has it, and estimator %s does' - ' not' % (self.estimators_[0],)) + if not hasattr(self.estimators_[0], "classes_"): + raise AttributeError( + "classes_ attribute available only if base_estimator has it, and " + f"estimator {self.estimators_[0]} does not" + ) return self.estimators_[0].classes_ @@ -321,7 +383,6 @@ def _sl_fit(estimator, X, y, pb, **fit_params): estimators_ : list of estimators The fitted estimators. """ - from sklearn.base import clone estimators_ = list() for ii in range(X.shape[-1]): est = clone(estimator) @@ -410,10 +471,10 @@ def _check_method(estimator, method): If method == 'transform' and estimator does not have 'transform', use 'predict' instead. """ - if method == 'transform' and not hasattr(estimator, 'transform'): - method = 'predict' + if method == "transform" and not hasattr(estimator, "transform"): + method = "predict" if not hasattr(estimator, method): - ValueError('base_estimator does not have `%s` method.' % method) + ValueError(f"base_estimator does not have `{method}` method.") return method @@ -430,33 +491,48 @@ class GeneralizingEstimator(SlidingEstimator): %(scoring)s %(n_jobs)s %(position)s + %(allow_2d)s %(verbose)s """ def __repr__(self): # noqa: D105 - repr_str = super(GeneralizingEstimator, self).__repr__() - if hasattr(self, 'estimators_'): + repr_str = super().__repr__() + if hasattr(self, "estimators_"): repr_str = repr_str[:-1] - repr_str += ', fitted with %i estimators>' % len(self.estimators_) + repr_str += f", fitted with {len(self.estimators_)} estimators>" return repr_str def _transform(self, X, method): """Aux. function to make parallel predictions/transformation.""" - self._check_Xy(X) + X, is_nd = self._check_Xy(X) + check_is_fitted(self) + orig_method = method method = _check_method(self.base_estimator, method) parallel, p_func, n_jobs = parallel_func( - _gl_transform, self.n_jobs, max_jobs=X.shape[-1], verbose=False) + _gl_transform, + self.n_jobs, + max_jobs=X.shape[-1], + verbose=_verbose_safe_false(), + ) - context = _create_progressbar_context(self, X, 'Transforming') + context = _create_progressbar_context(self, X, "Transforming") with context as pb: y_pred = parallel( p_func(self.estimators_, x_split, method, pb.subset(pb_idx)) for pb_idx, x_split in array_split_idx( - X, n_jobs, axis=-1, n_per_split=len(self.estimators_)) + X, n_jobs, axis=-1, n_per_split=len(self.estimators_) + ) ) y_pred = np.concatenate(y_pred, axis=2) + if orig_method == "transform": + y_pred = y_pred.astype(X.dtype) + if ( + orig_method in ("predict", "predict_proba", "decision_function") + and not is_nd + ): + y_pred = y_pred.squeeze() return y_pred def transform(self, X): @@ -475,7 +551,8 @@ def transform(self, X): Xt : array, shape (n_samples, n_estimators, n_slices) The transformed values generated by each estimator. """ - return self._transform(X, 'transform') + check_is_fitted(self) + return self._transform(X, "transform") def predict(self, X): """Predict each data slice with all possible estimators. @@ -493,7 +570,7 @@ def predict(self, X): y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets) The predicted values for each estimator. """ # noqa: E501 - return self._transform(X, 'predict') + return self._transform(X, "predict") def predict_proba(self, X): """Estimate probabilistic estimates of each data slice with all possible estimators. @@ -515,7 +592,7 @@ def predict_proba(self, X): ----- This requires ``base_estimator`` to have a ``predict_proba`` method. """ # noqa: E501 - return self._transform(X, 'predict_proba') + return self._transform(X, "predict_proba") def decision_function(self, X): """Estimate distances of each data slice to all hyperplanes. @@ -539,7 +616,7 @@ def decision_function(self, X): This requires ``base_estimator`` to have a ``decision_function`` method. """ # noqa: E501 - return self._transform(X, 'decision_function') + return self._transform(X, "decision_function") def score(self, X, y): """Score each of the estimators on the tested dimensions. @@ -560,21 +637,25 @@ def score(self, X, y): score : array, shape (n_samples, n_estimators, n_slices) Score for each estimator / data slice couple. """ # noqa: E501 - check_scoring = _get_check_scoring() - self._check_Xy(X) + X, _ = self._check_Xy(X, y) # For predictions/transforms the parallelization is across the data and # not across the estimators to avoid memory load. parallel, p_func, n_jobs = parallel_func( - _gl_score, self.n_jobs, max_jobs=X.shape[-1], verbose=False) + _gl_score, + self.n_jobs, + max_jobs=X.shape[-1], + verbose=_verbose_safe_false(), + ) scoring = check_scoring(self.base_estimator, self.scoring) y = _fix_auc(scoring, y) - context = _create_progressbar_context(self, X, 'Scoring') + context = _create_progressbar_context(self, X, "Scoring") with context as pb: score = parallel( p_func(self.estimators_, scoring, x, y, pb.subset(pb_idx)) for pb_idx, x in array_split_idx( - X, n_jobs, axis=-1, n_per_split=len(self.estimators_)) + X, n_jobs, axis=-1, n_per_split=len(self.estimators_) + ) ) score = np.concatenate(score, axis=1) @@ -628,8 +709,7 @@ def _gl_init_pred(y_pred, X, n_train): """Aux. function to GeneralizingEstimator to initialize y_pred.""" n_sample, n_iter = X.shape[0], X.shape[-1] if y_pred.ndim == 3: - y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]), - y_pred.dtype) + y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]), y_pred.dtype) else: y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype) return y_pred @@ -678,31 +758,33 @@ def _gl_score(estimators, scoring, X, y, pb): def _fix_auc(scoring, y): - from sklearn.preprocessing import LabelEncoder # This fixes sklearn's inability to compute roc_auc when y not in [0, 1] # scikit-learn/scikit-learn#6874 if scoring is not None: - score_func = getattr(scoring, '_score_func', None) - kwargs = getattr(scoring, '_kwargs', {}) - if (getattr(score_func, '__name__', '') == 'roc_auc_score' and - kwargs.get('multi_class', 'raise') == 'raise'): + score_func = getattr(scoring, "_score_func", None) + kwargs = getattr(scoring, "_kwargs", {}) + if ( + getattr(score_func, "__name__", "") == "roc_auc_score" + and kwargs.get("multi_class", "raise") == "raise" + ): if np.ndim(y) != 1 or len(set(y)) != 2: - raise ValueError('roc_auc scoring can only be computed for ' - 'two-class problems.') + raise ValueError( + "roc_auc scoring can only be computed for two-class problems." + ) y = LabelEncoder().fit_transform(y) return y def _create_progressbar_context(inst, X, message): """Create a progress bar taking into account ``inst.verbose``.""" - multiply = (len(inst.estimators_) - if isinstance(inst, GeneralizingEstimator) else 1) + multiply = len(inst.estimators_) if isinstance(inst, GeneralizingEstimator) else 1 n_steps = X.shape[-1] * max(1, multiply) - mesg = f'{message} {inst.__class__.__name__}' + mesg = f"{message} {inst.__class__.__name__}" - which_tqdm = 'off' if not _check_verbose(inst.verbose) else None - context = ProgressBar(n_steps, mesg=mesg, position=inst.position, - which_tqdm=which_tqdm) + which_tqdm = "off" if not _check_verbose(inst.verbose) else None + context = ProgressBar( + n_steps, mesg=mesg, position=inst.position, which_tqdm=which_tqdm + ) return context diff --git a/mne/decoding/spatial_filter.py b/mne/decoding/spatial_filter.py new file mode 100644 index 00000000000..169cca7d005 --- /dev/null +++ b/mne/decoding/spatial_filter.py @@ -0,0 +1,639 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import copy as cp + +import matplotlib.pyplot as plt +import numpy as np + +from ..defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT +from ..evoked import EvokedArray +from ..utils import _check_option, fill_doc, verbose +from ..viz.utils import plt_show +from .base import LinearModel, _GEDTransformer, get_coef + + +def _plot_model( + model_array, + info, + components=None, + *, + evk_tmin=None, + ch_type=None, + scalings=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap="RdBu_r", + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + name_format=None, + nrows=1, + ncols="auto", + show=True, +): + if components is None: + n_comps = model_array.shape[-2] + components = np.arange(n_comps) + kwargs = dict( + # args set here + times=components, + average=None, + proj=False, + units="AU" if units is None else units, + time_format=name_format, + # args passed from the upstream + ch_type=ch_type, + scalings=scalings, + sensors=sensors, + show_names=show_names, + mask=mask, + mask_params=mask_params, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + nrows=nrows, + ncols=ncols, + show=show, + ) + + # set sampling frequency to have 1 component per time point + + if evk_tmin is None: + info = cp.deepcopy(info) + with info._unlock(): + info["sfreq"] = 1.0 + evk_tmin = 0 + + if model_array.ndim == 3: + n_classes = model_array.shape[0] + figs = list() + for class_idx in range(n_classes): + model_evk = EvokedArray(model_array[class_idx].T, info, tmin=evk_tmin) + fig = model_evk.plot_topomap( + axes=axes[class_idx] if axes else None, **kwargs + ) + figs.append(fig) + return figs + else: + model_evk = EvokedArray(model_array.T, info, tmin=evk_tmin) + fig = model_evk.plot_topomap(axes=axes, **kwargs) + return fig + + +def _plot_scree_per_class(evals, add_cumul_evals, axes): + component_numbers = np.arange(len(evals)) + cumul_evals = np.cumsum(evals) if add_cumul_evals else None + # plot individual eigenvalues + color_line = "cornflowerblue" + axes.set_xlabel("Component Index", fontsize=18) + axes.set_ylabel("Eigenvalue", fontsize=18) + axes.plot( + component_numbers, + evals, + color=color_line, + marker="o", + markersize=8, + ) + axes.tick_params(axis="y", labelsize=16) + axes.tick_params(axis="x", labelsize=16) + + if add_cumul_evals: + # plot cumulative eigenvalues + ax2 = axes.twinx() + ax2.grid(False) + color_line = "firebrick" + ax2.set_ylabel("Cumulative Eigenvalues", fontsize=18) + ax2.plot( + component_numbers, + cumul_evals, + color=color_line, + marker="o", + markersize=6, + ) + ax2.tick_params(axis="y", labelcolor=color_line, labelsize=16) + ax2.set_ylim(0) + + +def _plot_scree( + evals, + title="Scree plot", + add_cumul_evals=True, + axes=None, +): + evals_data = evals if evals.ndim == 2 else [evals] + n_classes = len(evals_data) + axes = [axes] if isinstance(axes, plt.Axes) else axes + if axes is not None and n_classes != len(axes): + raise ValueError(f"Received {len(axes)} axes, but expected {n_classes}") + + orig_axes = axes + figs = list() + for class_idx in range(n_classes): + fig = None + if orig_axes is None: + fig, ax = plt.subplots(figsize=(7, 4), layout="constrained") + else: + ax = axes[class_idx] + _plot_scree_per_class(evals_data[class_idx], add_cumul_evals, ax) + if fig is not None: + fig.suptitle(title, fontsize=22) + figs.append(fig) + + return figs[0] if len(figs) == 1 else figs + + +@verbose +def get_spatial_filter_from_estimator( + estimator, + info, + *, + inverse_transform=False, + step_name=None, + get_coefs=("filters_", "patterns_", "evals_"), + patterns_method=None, + verbose=None, +): + """Instantiate a :class:`mne.decoding.SpatialFilter` object. + + Creates object from the fitted generalized eigendecomposition + transformers or :class:`mne.decoding.LinearModel`. + This object can be used to visualize spatial filters, + patterns, and eigenvalues. + + Parameters + ---------- + estimator : instance of sklearn.base.BaseEstimator + Sklearn-based estimator or meta-estimator from which to initialize + spatial filter. Use ``step_name`` to select relevant transformer + from the pipeline object (works with nested names using ``__`` syntax). + info : instance of mne.Info + The measurement info object for plotting topomaps. + inverse_transform : bool + If True, returns filters and patterns after inverse transforming them with + the transformer steps of the estimator. Defaults to False. + step_name : str | None + Name of the sklearn's pipeline step to get the coefs from. + If inverse_transform is True, the inverse transformations + will be applied using transformers before this step. + If None, the last step will be used. Defaults to None. + get_coefs : tuple + The names of the coefficient attributes to retrieve, can include + ``'filters_'``, ``'patterns_'`` and ``'evals_'``. + If step is GEDTransformer, will use all. + if step is LinearModel will only use ``'filters_'`` and ``'patterns_'``. + Defaults to (``'filters_'``, ``'patterns_'``, ``'evals_'``). + patterns_method : str + The method used to compute the patterns. Can be None, ``'pinv'`` or ``'haufe'``. + It will be set automatically to ``'pinv'`` if step is GEDTransformer, + or to ``'haufe'`` if step is LinearModel. Defaults to None. + %(verbose)s + + Returns + ------- + sp_filter : instance of mne.decoding.SpatialFilter + The spatial filter object. + + See Also + -------- + SpatialFilter, mne.decoding.LinearModel, mne.decoding.CSP, + mne.decoding.SSD, mne.decoding.XdawnTransformer, mne.decoding.SPoC + + Notes + ----- + .. versionadded:: 1.11 + """ + for coef in get_coefs: + if coef not in ("filters_", "patterns_", "evals_"): + raise ValueError( + f"'get_coefs' can only include 'filters_', " + f"'patterns_' and 'evals_', but got {coef}." + ) + if step_name is not None: + model = estimator.get_params()[step_name] + elif hasattr(estimator, "named_steps"): + model = estimator[-1] + else: + model = estimator + if isinstance(model, LinearModel): + patterns_method = "haufe" + get_coefs = ["filters_", "patterns_"] + elif isinstance(model, _GEDTransformer): + patterns_method = "pinv" + get_coefs = ["filters_", "patterns_", "evals_"] + + coefs = { + coef[:-1]: get_coef( + estimator, + coef, + inverse_transform=False if coef == "evals_" else inverse_transform, + step_name=step_name, + verbose=verbose, + ) + for coef in get_coefs + } + + sp_filter = SpatialFilter(info, patterns_method=patterns_method, **coefs) + return sp_filter + + +class SpatialFilter: + r"""Container for spatial filter weights (evecs) and patterns. + + .. warning:: For MNE-Python decoding classes, this container should be + instantiated with `mne.decoding.get_spatial_filter_from_estimator`. + Direct instantiation with external spatial filters is possible + at your own risk. + + This object is obtained either by generalized eigendecomposition (GED) algorithms + such as :class:`mne.decoding.CSP`, :class:`mne.decoding.SPoC`, + :class:`mne.decoding.SSD`, :class:`mne.decoding.XdawnTransformer` or by + :class:`mne.decoding.LinearModel`, wrapping linear models like SVM or Logit. + The object stores the filters that projects sensor data to a reduced component + space, and the corresponding patterns (obtained by pseudoinverse in GED case or + Haufe's trick in case of :class:`mne.decoding.LinearModel`). It can also be directly + initialized using filters from other transformers (e.g. PyRiemann), + but make sure that the dimensions match. + + Parameters + ---------- + info : instance of Info + The measurement info containing channel topography. + filters : ndarray, shape ((n_classes), n_components, n_channels) + The spatial filters (transposed eigenvectors of the decomposition). + evals : ndarray, shape ((n_classes), n_components) | None + The eigenvalues of the decomposition. Defaults to ``None``. + patterns : ndarray, shape ((n_classes), n_components, n_channels) | None + The patterns of the decomposition. If None, they will be computed + from the filters using pseudoinverse. Defaults to ``None``. + patterns_method : str + The method used to compute the patterns. Can be ``'pinv'`` or ``'haufe'``. + If ``patterns`` is None, it will be set to ``'pinv'``. Defaults to ``'pinv'``. + + Attributes + ---------- + info : instance of Info + The measurement info. + filters : ndarray, shape (n_components, n_channels) + The spatial filters (unmixing matrix). Applying these filters to the data + gives the component time series. + patterns : ndarray, shape (n_components, n_channels) + The spatial patterns (mixing matrix/forward model). + These represent the scalp topography of each component. + evals : ndarray, shape (n_components,) + The eigenvalues associated with each component. + patterns_method : str + The method used to compute the patterns from the filters. + + See Also + -------- + get_spatial_filter_from_estimator, mne.decoding.LinearModel, mne.decoding.CSP, + mne.decoding.SSD, mne.decoding.XdawnTransformer, mne.decoding.SPoC + + Notes + ----- + The spatial filters and patterns are stored with shape + ``(n_components, n_channels)``. + + Filters and patterns are related by the following equation: + + .. math:: + \mathbf{A} = \mathbf{W}^{-1} + + where :math:`\mathbf{A}` is the matrix of patterns (the mixing matrix) and + :math:`\mathbf{W}` is the matrix of filters (the unmixing matrix). + + For a detailed discussion on the difference between filters and patterns for GED + see :footcite:`Cohen2022` and for linear models in + general see :footcite:`HaufeEtAl2014`. + + .. versionadded:: 1.11 + + References + ---------- + .. footbibliography:: + """ + + def __init__( + self, + info, + filters, + *, + evals=None, + patterns=None, + patterns_method="pinv", + ): + _check_option( + "patterns_method", + patterns_method, + ("pinv", "haufe"), + ) + self.info = info + self.evals = evals + self.filters = filters + n_comps, n_chs = self.filters.shape[-2:] + if patterns is None: + # XXX Using numpy's pinv here to handle 3D case seamlessly + # Perhaps mne.linalg.pinv can be improved to handle 3D also + # Then it could be changed here to be consistent with + # GEDTransformer + self.patterns = np.linalg.pinv(filters.T) + self.patterns_method = "pinv" + else: + self.patterns = patterns + self.patterns_method = patterns_method + + # In case of multi-target classification in LinearModel + # number of targets can be greater than number of channels. + if patterns_method != "haufe" and n_comps > n_chs: + raise ValueError( + "Number of components can't be greater " + "than number of channels in filters, " + "perhaps the provided matrix is transposed?" + ) + if self.filters.shape != self.patterns.shape: + raise ValueError( + f"Shape mismatch between filters and patterns." + f"Filters are {self.filters.shape}," + f"while patterns are {self.patterns.shape}" + ) + + @fill_doc + def plot_filters( + self, + components=None, + tmin=None, + *, + ch_type=None, + scalings=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap="RdBu_r", + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + name_format="Filter%01d", + nrows=1, + ncols="auto", + show=True, + ): + """Plot topographic maps of model filters. + + Parameters + ---------- + components : float | array of float | 'auto' | None + Indices of filters to plot. If "auto", the number of + ``axes`` determines the amount of filters. + If None, all filters will be plotted. Defaults to None. + tmin : float | None + In case filters are distributed temporally, + this can be used to align them with times + and frequency. Use ``epochs.tmin``, for example. + Defaults to None. + %(ch_type_topomap)s + %(scalings_topomap)s + %(sensors_topomap)s + %(show_names_topomap)s + %(mask_evoked_topomap)s + %(mask_params_topomap)s + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + %(border_topomap)s + %(res_topomap)s + %(size_topomap)s + %(cmap_topomap)s + %(vlim_plot_topomap_psd)s + %(cnorm)s + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + %(units_topomap_evoked)s + %(axes_evoked_plot_topomap)s + name_format : str + String format for topomap values. Defaults to ``'Filter%%01d'``. + %(nrows_ncols_topomap)s + %(show)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + """ + fig = _plot_model( + self.filters, + self.info, + components=components, + evk_tmin=tmin, + ch_type=ch_type, + scalings=scalings, + sensors=sensors, + show_names=show_names, + mask=mask, + mask_params=mask_params, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + name_format=name_format, + nrows=nrows, + ncols=ncols, + show=show, + ) + return fig + + @fill_doc + def plot_patterns( + self, + components=None, + tmin=None, + *, + ch_type=None, + scalings=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap="RdBu_r", + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + name_format="Pattern%01d", + nrows=1, + ncols="auto", + show=True, + ): + """Plot topographic maps of model patterns. + + Parameters + ---------- + components : float | array of float | 'auto' | None + Indices of patterns to plot. If "auto", the number of + ``axes`` determines the amount of patterns. + If None, all patterns will be plotted. Defaults to None. + tmin : float | None + In case patterns are distributed temporally, + this can be used to align them with times + and frequency. Use ``epochs.tmin``, for example. + Defaults to None. + %(ch_type_topomap)s + %(scalings_topomap)s + %(sensors_topomap)s + %(show_names_topomap)s + %(mask_evoked_topomap)s + %(mask_params_topomap)s + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + %(border_topomap)s + %(res_topomap)s + %(size_topomap)s + %(cmap_topomap)s + %(vlim_plot_topomap_psd)s + %(cnorm)s + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + %(units_topomap_evoked)s + %(axes_evoked_plot_topomap)s + name_format : str + String format for topomap values. Defaults to ``'Pattern%%01d'``. + %(nrows_ncols_topomap)s + %(show)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + """ + fig = _plot_model( + self.patterns, + self.info, + components=components, + evk_tmin=tmin, + ch_type=ch_type, + scalings=scalings, + sensors=sensors, + show_names=show_names, + mask=mask, + mask_params=mask_params, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + name_format=name_format, + nrows=nrows, + ncols=ncols, + show=show, + ) + return fig + + @fill_doc + def plot_scree( + self, + title="Scree plot", + add_cumul_evals=False, + axes=None, + show=True, + ): + """Plot scree for GED eigenvalues. + + Parameters + ---------- + title : str + Title for the plot. Defaults to ``'Scree plot'``. + add_cumul_evals : bool + Whether to add second line and y-axis for cumulative eigenvalues. + Defaults to ``True``. + axes : instance of Axes | None + The matplotlib axes to plot to. Defaults to ``None``. + %(show)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + """ + if self.evals is None: + raise AttributeError("Can't plot scree if eigenvalues are not provided.") + + fig = _plot_scree( + self.evals, + title=title, + add_cumul_evals=add_cumul_evals, + axes=axes, + ) + plt_show(show, block=False) + return fig diff --git a/mne/decoding/ssd.py b/mne/decoding/ssd.py index 4739264f544..b746cc7ac05 100644 --- a/mne/decoding/ssd.py +++ b/mne/decoding/ssd.py @@ -1,24 +1,27 @@ -# Author: Denis A. Engemann -# Victoria Peterson -# Thomas S. Binns +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import collections.abc as abc +from functools import partial import numpy as np -from . import TransformerMixin, BaseEstimator -from ..cov import _regularized_covariance, Covariance -from ..defaults import _handle_default +from .._fiff.meas_info import Info, create_info +from .._fiff.pick import _picks_to_idx from ..filter import filter_data -from ..io.pick import _get_channel_types, _picks_to_idx -from ..rank import compute_rank -from ..time_frequency import psd_array_welch from ..utils import ( - fill_doc, logger, _check_option, _time_mask, _validate_type, - _verbose_safe_false) + _validate_type, + fill_doc, + logger, +) +from ._covs_ged import _ssd_estimate +from ._mod_ged import _get_spectral_ratio, _ssd_mod +from .base import _GEDTransformer @fill_doc -class SSD(BaseEstimator, TransformerMixin): +class SSD(_GEDTransformer): """ Signal decomposition using the Spatio-Spectral Decomposition (SSD). @@ -59,11 +62,22 @@ class SSD(BaseEstimator, TransformerMixin): If sort_by_spectral_ratio is set to True, then the SSD sources will be sorted according to their spectral ratio which is calculated based on :func:`mne.time_frequency.psd_array_welch`. The n_fft parameter sets the - length of FFT used. + length of FFT used. The default (None) will use 1 second of data. See :func:`mne.time_frequency.psd_array_welch` for more information. cov_method_params : dict | None (default None) As in :class:`mne.decoding.SPoC` The default is None. + restr_type : "restricting" | "whitening" | "ssd" | None + Restricting transformation for covariance matrices before performing + generalized eigendecomposition. + If "restricting" only restriction to the principal subspace of signal_cov + will be performed. + If "whitening", covariance matrices will be additionally rescaled according + to the whitening for the signal_cov. + If "ssd", simplified version of "whitening" is performed. + If None, no restriction will be applied. Defaults to "ssd". + + .. versionadded:: 1.11 rank : None | dict | ‘info’ | ‘full’ As in :class:`mne.decoding.SPoC` This controls the rank computation that can be read from the @@ -74,9 +88,9 @@ class SSD(BaseEstimator, TransformerMixin): Attributes ---------- - filters_ : array, shape (n_channels, n_components) + filters_ : array, shape (``n_channels or less``, n_channels) The spatial filters to be multiplied with the signal. - patterns_ : array, shape (n_components, n_channels) + patterns_ : array, shape (``n_channels or less``, n_channels) The patterns for reconstructing the signal from the filtered data. References @@ -84,67 +98,115 @@ class SSD(BaseEstimator, TransformerMixin): .. footbibliography:: """ - def __init__(self, info, filt_params_signal, filt_params_noise, - reg=None, n_components=None, picks=None, - sort_by_spectral_ratio=True, return_filtered=False, - n_fft=None, cov_method_params=None, rank=None): + def __init__( + self, + info, + filt_params_signal, + filt_params_noise, + reg=None, + n_components=None, + picks=None, + sort_by_spectral_ratio=True, + return_filtered=False, + n_fft=None, + cov_method_params=None, + *, + restr_type="whitening", + rank=None, + ): """Initialize instance.""" - dicts = {"signal": filt_params_signal, "noise": filt_params_noise} - for param, dd in [('l', 0), ('h', 0), ('l', 1), ('h', 1)]: - key = ('signal', 'noise')[dd] - if param + '_freq' not in dicts[key]: - raise ValueError( - '%s must be defined in filter parameters for %s' - % (param + '_freq', key)) - val = dicts[key][param + '_freq'] - if not isinstance(val, (int, float)): - _validate_type(val, ('numeric',), f'{key} {param}_freq') - # check freq bands - if (filt_params_noise['l_freq'] > filt_params_signal['l_freq'] or - filt_params_signal['h_freq'] > filt_params_noise['h_freq']): - raise ValueError('Wrongly specified frequency bands!\n' - 'The signal band-pass must be within the noise ' - 'band-pass!') - self.picks_ = _picks_to_idx(info, picks, none='data', exclude='bads') - del picks - ch_types = _get_channel_types(info, picks=self.picks_, unique=True) - if len(ch_types) > 1: - raise ValueError('At this point SSD only supports fitting ' - 'single channel types. Your info has %i types' % - (len(ch_types))) self.info = info - self.freqs_signal = (filt_params_signal['l_freq'], - filt_params_signal['h_freq']) - self.freqs_noise = (filt_params_noise['l_freq'], - filt_params_noise['h_freq']) self.filt_params_signal = filt_params_signal self.filt_params_noise = filt_params_noise - # check if boolean - if not isinstance(sort_by_spectral_ratio, (bool)): - raise ValueError('sort_by_spectral_ratio must be boolean') - self.sort_by_spectral_ratio = sort_by_spectral_ratio - if n_fft is None: - self.n_fft = int(self.info['sfreq']) - else: - self.n_fft = int(n_fft) - # check if boolean - if not isinstance(return_filtered, (bool)): - raise ValueError('return_filtered must be boolean') - self.return_filtered = return_filtered self.reg = reg self.n_components = n_components - self.rank = rank + self.picks = picks + self.sort_by_spectral_ratio = sort_by_spectral_ratio + self.return_filtered = return_filtered + self.n_fft = n_fft self.cov_method_params = cov_method_params + self.restr_type = restr_type + self.rank = rank - def _check_X(self, X): + cov_callable = partial( + _ssd_estimate, + reg=reg, + cov_method_params=cov_method_params, + info=info, + picks=picks, + n_fft=n_fft, + filt_params_signal=filt_params_signal, + filt_params_noise=filt_params_noise, + rank=rank, + sort_by_spectral_ratio=sort_by_spectral_ratio, + ) + super().__init__( + n_components=n_components, + cov_callable=cov_callable, + mod_ged_callable=_ssd_mod, + restr_type=restr_type, + ) + + def _validate_params(self, X): + if isinstance(self.info, float): # special case, mostly for testing + self.sfreq_ = self.info + else: + _validate_type(self.info, Info, "info") + self.sfreq_ = self.info["sfreq"] + dicts = {"signal": self.filt_params_signal, "noise": self.filt_params_noise} + for param, dd in [("l", 0), ("h", 0), ("l", 1), ("h", 1)]: + key = ("signal", "noise")[dd] + if param + "_freq" not in dicts[key]: + raise ValueError( + f"{param + '_freq'} must be defined in filter parameters for {key}" + ) + val = dicts[key][param + "_freq"] + if not isinstance(val, int | float): + _validate_type(val, ("numeric",), f"{key} {param}_freq") + # check freq bands + if ( + self.filt_params_noise["l_freq"] > self.filt_params_signal["l_freq"] + or self.filt_params_signal["h_freq"] > self.filt_params_noise["h_freq"] + ): + raise ValueError( + "Wrongly specified frequency bands!\n" + "The signal band-pass must be within the noise " + "band-pass!" + ) + self.freqs_signal_ = ( + self.filt_params_signal["l_freq"], + self.filt_params_signal["h_freq"], + ) + self.freqs_noise_ = ( + self.filt_params_noise["l_freq"], + self.filt_params_noise["h_freq"], + ) + _validate_type(self.sort_by_spectral_ratio, (bool,), "sort_by_spectral_ratio") + _validate_type(self.n_fft, ("numeric", None), "n_fft") + self.n_fft_ = min( + int(self.n_fft if self.n_fft is not None else self.sfreq_), + X.shape[-1], + ) + _validate_type(self.return_filtered, (bool,), "return_filtered") + if isinstance(self.info, Info): + ch_types = self.info.get_channel_types(picks=self.picks, unique=True) + if len(ch_types) > 1: + raise ValueError( + "At this point SSD only supports fitting " + f"single channel types. Your info has {len(ch_types)} types." + ) + _validate_type(self.cov_method_params, (abc.Mapping, None), "cov_method_params") + + def _check_X(self, X, *, y=None, fit=False): """Check input data.""" - _validate_type(X, np.ndarray, 'X') - _check_option('X.ndim', X.ndim, (2, 3)) + X = self._check_data(X, y=y, fit=fit, atleast_3d=False) n_chan = X.shape[-2] - if n_chan != self.info['nchan']: - raise ValueError('Info must match the input data.' - 'Found %i channels but expected %i.' % - (n_chan, self.info['nchan'])) + if isinstance(self.info, Info) and n_chan != self.info["nchan"]: + raise ValueError( + "Info must match the input data." + f"Found {n_chan} channels but expected {self.info['nchan']}." + ) + return X def fit(self, X, y=None): """Estimate the SSD decomposition on raw or epoched data. @@ -155,56 +217,25 @@ def fit(self, X, y=None): The input data from which to estimate the SSD. Either 2D array obtained from continuous data or 3D array obtained from epoched data. - y : None | array, shape (n_samples,) - Used for scikit-learn compatibility. + y : None + Ignored; exists for compatibility with scikit-learn pipelines. Returns ------- self : instance of SSD Returns the modified instance. """ - from scipy import linalg - self._check_X(X) - X_aux = X[..., self.picks_, :] + X = self._check_X(X, y=y, fit=True) + self._validate_params(X) + if isinstance(self.info, Info): + info = self.info + else: + info = create_info(X.shape[-2], self.sfreq_, ch_types="eeg") + self.picks_ = _picks_to_idx(info, self.picks, none="data", exclude="bads") - X_signal = filter_data( - X_aux, self.info['sfreq'], **self.filt_params_signal) - X_noise = filter_data( - X_aux, self.info['sfreq'], **self.filt_params_noise) - X_noise -= X_signal - if X.ndim == 3: - X_signal = np.hstack(X_signal) - X_noise = np.hstack(X_noise) - - # prevent rank change when computing cov with rank='full' - cov_signal = _regularized_covariance( - X_signal, reg=self.reg, method_params=self.cov_method_params, - rank='full', info=self.info) - cov_noise = _regularized_covariance( - X_noise, reg=self.reg, method_params=self.cov_method_params, - rank='full', info=self.info) - - # project cov to rank subspace - cov_signal, cov_noise, rank_proj = (_dimensionality_reduction( - cov_signal, cov_noise, self.info, self.rank)) - - eigvals_, eigvects_ = linalg.eigh(cov_signal, cov_noise) - # sort in descending order - ix = np.argsort(eigvals_)[::-1] - self.eigvals_ = eigvals_[ix] - # project back to sensor space - self.filters_ = np.matmul(rank_proj, eigvects_[:, ix]) - self.patterns_ = np.linalg.pinv(self.filters_) - - # We assume that ordering by spectral ratio is more important - # than the initial ordering. This ordering should be also learned when - # fitting. - X_ssd = self.filters_.T @ X[..., self.picks_, :] - sorter_spec = Ellipsis - if self.sort_by_spectral_ratio: - _, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd) - self.sorter_spec = sorter_spec - logger.info('Done.') + super().fit(X, y) + + logger.info("Done.") return self def transform(self, X): @@ -222,20 +253,42 @@ def transform(self, X): X_ssd : array, shape ([n_epochs, ]n_components, n_times) The processed data. """ - self._check_X(X) - if self.filters_ is None: - raise RuntimeError('No filters available. Please first call fit') - if self.return_filtered: - X_aux = X[..., self.picks_, :] - X = filter_data(X_aux, self.info['sfreq'], - **self.filt_params_signal) - X_ssd = self.filters_.T @ X[..., self.picks_, :] + X = self._check_X(X) + # For the case where n_epochs dimension is absent. if X.ndim == 2: - X_ssd = X_ssd[self.sorter_spec][:self.n_components] - else: - X_ssd = X_ssd[:, self.sorter_spec, :][:, :self.n_components, :] + X = np.expand_dims(X, axis=0) + X_aux = X[..., self.picks_, :] + if self.return_filtered: + X_aux = filter_data(X_aux, self.sfreq_, **self.filt_params_signal) + X_ssd = super().transform(X_aux).squeeze() + return X_ssd + def fit_transform(self, X, y=None, **fit_params): + """Fit SSD to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array obtained from + continuous data or 3D array obtained from epoched data. + y : None + Ignored; exists for compatibility with scikit-learn pipelines. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.SSD.fit` + method. Not used for this class. + + Returns + ------- + X_ssd : array, shape ([n_epochs, ]n_components, n_times) + The processed data. + """ + # use parent TransformerMixin method but with custom docstring + return super().fit_transform(X, y=y, **fit_params) + def get_spectral_ratio(self, ssd_sources): """Get the spectal signal-to-noise ratio for each spatial filter. @@ -258,24 +311,18 @@ def get_spectral_ratio(self, ssd_sources): ---------- .. footbibliography:: """ - psd, freqs = psd_array_welch( - ssd_sources, sfreq=self.info['sfreq'], n_fft=self.n_fft) - sig_idx = _time_mask(freqs, *self.freqs_signal) - noise_idx = _time_mask(freqs, *self.freqs_noise) - if psd.ndim == 3: - mean_sig = psd[:, :, sig_idx].mean(axis=2).mean(axis=0) - mean_noise = psd[:, :, noise_idx].mean(axis=2).mean(axis=0) - spec_ratio = mean_sig / mean_noise - else: - mean_sig = psd[:, sig_idx].mean(axis=1) - mean_noise = psd[:, noise_idx].mean(axis=1) - spec_ratio = mean_sig / mean_noise - sorter_spec = spec_ratio.argsort()[::-1] + spec_ratio, sorter_spec = _get_spectral_ratio( + ssd_sources=ssd_sources, + sfreq=self.sfreq_, + n_fft=self.n_fft_, + freqs_signal=self.freqs_signal_, + freqs_noise=self.freqs_noise_, + ) return spec_ratio, sorter_spec def inverse_transform(self): """Not implemented yet.""" - raise NotImplementedError('inverse_transform is not yet available.') + raise NotImplementedError("inverse_transform is not yet available.") def apply(self, X): """Remove selected components from the signal. @@ -301,44 +348,6 @@ def apply(self, X): The processed data. """ X_ssd = self.transform(X) - pick_patterns = self.patterns_[self.sorter_spec][:self.n_components].T + pick_patterns = self.patterns_[: self.n_components].T X = pick_patterns @ X_ssd return X - - -def _dimensionality_reduction(cov_signal, cov_noise, info, rank): - """Perform dimensionality reduction on the covariance matrices.""" - from scipy import linalg - n_channels = cov_signal.shape[0] - - # find ranks of covariance matrices - rank_signal = list(compute_rank( - Covariance(cov_signal, info.ch_names, list(), list(), 0, - verbose=_verbose_safe_false()), - rank, _handle_default('scalings_cov_rank', None), info).values())[0] - rank_noise = list(compute_rank( - Covariance(cov_noise, info.ch_names, list(), list(), 0, - verbose=_verbose_safe_false()), - rank, _handle_default('scalings_cov_rank', None), info).values())[0] - rank = np.min([rank_signal, rank_noise]) # should be identical - - if rank < n_channels: - eigvals, eigvects = linalg.eigh(cov_signal) - # sort in descending order - ix = np.argsort(eigvals)[::-1] - eigvals = eigvals[ix] - eigvects = eigvects[:, ix] - # compute rank subspace projection matrix - rank_proj = np.matmul( - eigvects[:, :rank], np.eye(rank) * (eigvals[:rank]**-0.5)) - logger.info( - 'Projecting covariance of %i channels to %i rank subspace' - % (n_channels, rank,)) - else: - rank_proj = np.eye(n_channels) - logger.info('Preserving covariance rank (%i)' % (rank,)) - - # project covariance matrices to rank subspace - cov_signal = np.matmul(rank_proj.T, np.matmul(cov_signal, rank_proj)) - cov_noise = np.matmul(rank_proj.T, np.matmul(cov_noise, rank_proj)) - return cov_signal, cov_noise, rank_proj diff --git a/mne/decoding/tests/__init__.py b/mne/decoding/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/decoding/tests/__init__.py +++ b/mne/decoding/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/decoding/tests/test_base.py b/mne/decoding/tests/test_base.py index 43f8b08d097..68623876222 100644 --- a/mne/decoding/tests/test_base.py +++ b/mne/decoding/tests/test_base.py @@ -1,21 +1,58 @@ -# Author: Jean-Remi King, -# Marijn van Vliet, -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import platform +from contextlib import nullcontext import numpy as np -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_equal, assert_allclose, assert_array_less) import pytest - -from mne import create_info, EpochsArray -from mne.fixes import is_regressor, is_classifier -from mne.utils import requires_sklearn -from mne.decoding.base import (_get_inverse_funcs, LinearModel, get_coef, - cross_val_multiscore, BaseEstimator) +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_equal, +) + +pytest.importorskip("sklearn") + +from sklearn import svm +from sklearn.base import ( + BaseEstimator as sklearn_BaseEstimator, +) +from sklearn.base import ( + TransformerMixin as sklearn_TransformerMixin, +) +from sklearn.base import ( + is_classifier, + is_regressor, +) +from sklearn.decomposition import PCA +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge +from sklearn.model_selection import ( + GridSearchCV, + KFold, + StratifiedKFold, + cross_val_score, +) +from sklearn.multiclass import OneVsRestClassifier +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils.estimator_checks import parametrize_with_checks + +from mne import EpochsArray, create_info +from mne.decoding import GeneralizingEstimator, Scaler, TransformerMixin, Vectorizer +from mne.decoding.base import ( + BaseEstimator, + LinearModel, + _get_inverse_funcs, + cross_val_multiscore, + get_coef, +) from mne.decoding.search_light import SlidingEstimator -from mne.decoding import (Scaler, TransformerMixin, Vectorizer, - GeneralizingEstimator) +from mne.utils import check_version def _make_data(n_samples=1000, n_features=5, n_targets=3): @@ -43,7 +80,7 @@ def _make_data(n_samples=1000, n_features=5, n_targets=3): # Define Y latent factors np.random.seed(0) cov_Y = np.eye(n_targets) * 10 + np.random.rand(n_targets, n_targets) - cov_Y = (cov_Y + cov_Y.T) / 2. + cov_Y = (cov_Y + cov_Y.T) / 2.0 mean_Y = np.random.rand(n_targets) Y = np.random.multivariate_normal(mean_Y, cov_Y, size=n_samples) @@ -53,36 +90,43 @@ def _make_data(n_samples=1000, n_features=5, n_targets=3): X = Y.dot(A.T) X += np.random.randn(n_samples, n_features) # add noise X += np.random.rand(n_features) # Put an offset + if n_targets == 1: + Y = Y[:, 0] return X, Y, A -@requires_sklearn def test_get_coef(): """Test getting linear coefficients (filters/patterns) from estimators.""" - from sklearn.base import TransformerMixin, BaseEstimator - from sklearn.pipeline import make_pipeline - from sklearn.preprocessing import StandardScaler - from sklearn import svm - from sklearn.linear_model import Ridge - from sklearn.model_selection import GridSearchCV - - lm_classification = LinearModel() - assert (is_classifier(lm_classification)) + lm_classification = LinearModel(LogisticRegression(solver="liblinear")) + assert hasattr(lm_classification, "__sklearn_tags__") + if check_version("sklearn", "1.6"): + print(lm_classification.__sklearn_tags__()) + assert is_classifier(lm_classification.model) + assert is_classifier(lm_classification) + assert not is_regressor(lm_classification.model) + assert not is_regressor(lm_classification) lm_regression = LinearModel(Ridge()) - assert (is_regressor(lm_regression)) + assert is_regressor(lm_regression.model) + assert is_regressor(lm_regression) + assert not is_classifier(lm_regression.model) + assert not is_classifier(lm_regression) - parameters = {'kernel': ['linear'], 'C': [1, 10]} + parameters = {"kernel": ["linear"], "C": [1, 10]} lm_gs_classification = LinearModel( - GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=None)) - assert (is_classifier(lm_gs_classification)) + GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=None) + ) + assert is_classifier(lm_gs_classification) lm_gs_regression = LinearModel( - GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=None)) - assert (is_regressor(lm_gs_regression)) + GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=None) + ) + assert is_regressor(lm_gs_regression) # Define a classifier, an invertible transformer and an non-invertible one. + assert BaseEstimator is sklearn_BaseEstimator + assert TransformerMixin is sklearn_TransformerMixin class Clf(BaseEstimator): def fit(self, X, y): @@ -113,27 +157,36 @@ def inverse_transform(self, X): for expected_n, est in good_estimators: est.fit(X, y) - assert (expected_n == len(_get_inverse_funcs(est))) + assert expected_n == len(_get_inverse_funcs(est)) bad_estimators = [ - Clf(), # no preprocessing - Inv(), # final estimator isn't classifier - make_pipeline(NoInv(), Clf()), # first step isn't invertible - make_pipeline(Inv(), make_pipeline( - Inv(), NoInv()), Clf()), # nested step isn't invertible + Clf(), # 0: no preprocessing + Inv(), # 1: final estimator isn't classifier + make_pipeline(NoInv(), Clf()), # 2: first step isn't invertible + make_pipeline( + Inv(), make_pipeline(Inv(), NoInv()), Clf() + ), # 3: nested step isn't invertible ] - for est in bad_estimators: + # It's the NoInv that triggers the warning, but too hard to context manage just + # the correct part of the bad_estimators loop + for ei, est in enumerate(bad_estimators): est.fit(X, y) - invs = _get_inverse_funcs(est) + if ei in (2, 3): # the NoInv indices + ctx = pytest.warns(RuntimeWarning, match="Cannot inverse transform") + else: + ctx = nullcontext() + with ctx: + invs = _get_inverse_funcs(est) assert_equal(invs, list()) # II. Test get coef for classification/regression estimators and pipelines rng = np.random.RandomState(0) - for clf in (lm_regression, - lm_gs_classification, - make_pipeline(StandardScaler(), lm_classification), - make_pipeline(StandardScaler(), lm_gs_regression)): - + for clf in ( + lm_regression, + lm_gs_classification, + make_pipeline(StandardScaler(), lm_classification), + make_pipeline(StandardScaler(), lm_gs_regression), + ): # generate some categorical/continuous data # according to the type of estimator. if is_classifier(clf): @@ -147,38 +200,37 @@ def inverse_transform(self, X): clf.fit(X, y) # Retrieve final linear model - filters = get_coef(clf, 'filters_', False) - if hasattr(clf, 'steps'): - if hasattr(clf.steps[-1][-1].model, 'best_estimator_'): + filters = get_coef(clf, "filters_", False) + if hasattr(clf, "steps"): + if hasattr(clf.steps[-1][-1].model_, "best_estimator_"): # Linear Model with GridSearchCV - coefs = clf.steps[-1][-1].model.best_estimator_.coef_ + coefs = clf.steps[-1][-1].model_.best_estimator_.coef_ else: # Standard Linear Model - coefs = clf.steps[-1][-1].model.coef_ + coefs = clf.steps[-1][-1].model_.coef_ else: - if hasattr(clf.model, 'best_estimator_'): + if hasattr(clf.model_, "best_estimator_"): # Linear Model with GridSearchCV - coefs = clf.model.best_estimator_.coef_ + coefs = clf.model_.best_estimator_.coef_ else: # Standard Linear Model - coefs = clf.model.coef_ + coefs = clf.model_.coef_ if coefs.ndim == 2 and coefs.shape[0] == 1: coefs = coefs[0] assert_array_equal(filters, coefs) - patterns = get_coef(clf, 'patterns_', False) - assert (filters[0] != patterns[0]) + patterns = get_coef(clf, "patterns_", False) + assert filters[0] != patterns[0] n_chans = X.shape[1] assert_array_equal(filters.shape, patterns.shape, [n_chans, n_chans]) # Inverse transform linear model - filters_inv = get_coef(clf, 'filters_', True) - assert (filters[0] != filters_inv[0]) - patterns_inv = get_coef(clf, 'patterns_', True) - assert (patterns[0] != patterns_inv[0]) + filters_inv = get_coef(clf, "filters_", True) + assert filters[0] != filters_inv[0] + patterns_inv = get_coef(clf, "patterns_", True) + assert patterns[0] != patterns_inv[0] class _Noop(BaseEstimator, TransformerMixin): - def fit(self, X, y=None): return self @@ -188,50 +240,147 @@ def transform(self, X): inverse_transform = transform -@requires_sklearn -@pytest.mark.parametrize('inverse', (True, False)) -@pytest.mark.parametrize('Scale, kwargs', [ - (Scaler, dict(info=None, scalings='mean')), - (_Noop, dict()), -]) -def test_get_coef_inverse_transform(inverse, Scale, kwargs): +@pytest.mark.parametrize("inverse", (True, False)) +@pytest.mark.parametrize( + "clf", + [ + pytest.param( + make_pipeline( + Scaler(info=None, scalings="mean"), + SlidingEstimator(make_pipeline(LinearModel(Ridge()))), + ), + id="Scaler+SlidingEstimator", + ), + pytest.param( + make_pipeline( + _Noop(), + SlidingEstimator(make_pipeline(LinearModel(Ridge()))), + ), + id="Noop+SlidingEstimator", + ), + pytest.param( + SlidingEstimator(make_pipeline(StandardScaler(), LinearModel(Ridge()))), + id="SlidingEstimator+nested StandardScaler", + ), + ], +) +def test_get_coef_inverse_transform(inverse, clf): """Test get_coef with and without inverse_transform.""" - from sklearn.linear_model import Ridge - from sklearn.pipeline import make_pipeline - lm_regression = LinearModel(Ridge()) X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1) - # Check with search_light and combination of preprocessing ending with sl: - # slider = SlidingEstimator(make_pipeline(StandardScaler(), lm_regression)) - # XXX : line above should work but does not as only last step is - # used in get_coef ... - slider = SlidingEstimator(make_pipeline(lm_regression)) X = np.transpose([X, -X], [1, 2, 0]) # invert X across 2 time samples - clf = make_pipeline(Scale(**kwargs), slider) clf.fit(X, y) - patterns = get_coef(clf, 'patterns_', inverse) - filters = get_coef(clf, 'filters_', inverse) + patterns = get_coef(clf, "patterns_", inverse) + filters = get_coef(clf, "filters_", inverse) assert_array_equal(filters.shape, patterns.shape, X.shape[1:]) # the two time samples get inverted patterns assert_equal(patterns[0, 0], -patterns[0, 1]) + for t in [0, 1]: - filters_t = get_coef( - clf.named_steps['slidingestimator'].estimators_[t], - 'filters_', False) - if Scale is _Noop: - assert_array_equal(filters_t, filters[:, t]) + if hasattr(clf, "named_steps"): + est_t = clf.named_steps["slidingestimator"].estimators_[t] + filters_t = get_coef(est_t, "filters_", inverse) + if inverse: + filters_t = clf[0].inverse_transform(filters_t.reshape(1, -1))[0] + else: + est_t = clf.estimators_[t] + filters_t = get_coef(est_t, "filters_", inverse) + + assert_equal(filters_t, filters[:, t]) + + +def test_get_coef_inverse_step_name(): + """Test get_coef with inverse_transform=True and a specific step_name.""" + X, y, _ = _make_data(n_samples=100, n_features=5, n_targets=1) + + # Test with a simple pipeline + pipe = make_pipeline(StandardScaler(), PCA(n_components=3), LinearModel(Ridge())) + pipe.fit(X, y) + + coef_inv_actual = get_coef( + pipe, attr="patterns_", inverse_transform=True, step_name="linearmodel" + ) + # Reshape your data using array.reshape(1, -1) if it contains a single sample. + coef_raw = pipe.named_steps["linearmodel"].patterns_.reshape(1, -1) + coef_inv_desired = pipe.named_steps["pca"].inverse_transform(coef_raw) + coef_inv_desired = pipe.named_steps["standardscaler"].inverse_transform( + coef_inv_desired + ) + + assert coef_inv_actual.shape == (X.shape[1],) + # Reshape your data using array.reshape(1, -1) if it contains a single sample. + assert_array_almost_equal(coef_inv_actual.reshape(1, -1), coef_inv_desired) + + with pytest.raises(ValueError, match="inverse_transform"): + _ = get_coef( + pipe[-1], # LinearModel + "filters_", + inverse_transform=True, + ) + with pytest.raises(ValueError, match="step_name"): + _ = get_coef( + SlidingEstimator(pipe), + "filters_", + inverse_transform=True, + step_name="slidingestimator__pipeline__linearmodel", + ) + + # Test with a nested pipeline to check __ parsing + inner_pipe = make_pipeline(PCA(n_components=3), LinearModel(Ridge())) + nested_pipe = make_pipeline(StandardScaler(), inner_pipe) + nested_pipe.fit(X, y) + coef_nested_inv_actual = get_coef( + nested_pipe, + attr="patterns_", + inverse_transform=True, + step_name="pipeline__linearmodel", + ) + linearmodel = nested_pipe.named_steps["pipeline"].named_steps["linearmodel"] + pca = nested_pipe.named_steps["pipeline"].named_steps["pca"] + scaler = nested_pipe.named_steps["standardscaler"] + + coef_nested_raw = linearmodel.patterns_.reshape(1, -1) + coef_nested_inv_desired = pca.inverse_transform(coef_nested_raw) + coef_nested_inv_desired = scaler.inverse_transform(coef_nested_inv_desired) + + assert coef_nested_inv_actual.shape == (X.shape[1],) + assert_array_almost_equal( + coef_nested_inv_actual.reshape(1, -1), coef_nested_inv_desired + ) + + with pytest.raises(ValueError, match="i_do_not_exist"): + get_coef( + pipe, attr="patterns_", inverse_transform=True, step_name="i_do_not_exist" + ) + + class NonInvertibleTransformer(BaseEstimator, TransformerMixin): + def fit(self, X, y=None): + return self + + def transform(self, X): + # In a real scenario, this would modify X + return X + + pipe = make_pipeline(NonInvertibleTransformer(), LinearModel(Ridge())) + pipe.fit(X, y) + with pytest.warns(RuntimeWarning, match="not invertible"): + _ = get_coef( + pipe, + "filters_", + inverse_transform=True, + step_name="linearmodel", + ) -@requires_sklearn -@pytest.mark.parametrize('n_features', [1, 5]) -@pytest.mark.parametrize('n_targets', [1, 3]) +@pytest.mark.parametrize("n_features", [1, 5]) +@pytest.mark.parametrize("n_targets", [1, 3]) def test_get_coef_multiclass(n_features, n_targets): """Test get_coef on multiclass problems.""" # Check patterns with more than 1 regressor - from sklearn.linear_model import LinearRegression, Ridge - from sklearn.pipeline import make_pipeline - X, Y, A = _make_data( - n_samples=30000, n_features=n_features, n_targets=n_targets) - lm = LinearModel(LinearRegression()).fit(X, Y) + X, Y, A = _make_data(n_samples=30000, n_features=n_features, n_targets=n_targets) + lm = LinearModel(LinearRegression()) + assert not hasattr(lm, "model_") + lm.fit(X, Y) + assert lm.model is not lm.model_ assert_array_equal(lm.filters_.shape, lm.patterns_.shape) if n_targets == 1: want_shape = (n_features,) @@ -245,57 +394,65 @@ def test_get_coef_multiclass(n_features, n_targets): clf.fit(X, Y) if n_features > 1 and n_targets > 1: assert_allclose(A, lm.patterns_.T, atol=2e-2) - coef = get_coef(clf, 'patterns_', inverse_transform=True) + coef = get_coef(clf, "patterns_", inverse_transform=True) assert_allclose(lm.patterns_, coef, atol=1e-5) # With epochs, scaler, and vectorizer (typical use case) X_epo = X.reshape(X.shape + (1,)) - info = create_info(n_features, 1000., 'eeg') + info = create_info(n_features, 1000.0, "eeg") lm = LinearModel(Ridge(alpha=1)) clf = make_pipeline( - Scaler(info, scalings=dict(eeg=1.)), # XXX adding this step breaks + Scaler(info, scalings=dict(eeg=1.0)), # XXX adding this step breaks Vectorizer(), lm, ) clf.fit(X_epo, Y) if n_features > 1 and n_targets > 1: assert_allclose(A, lm.patterns_.T, atol=2e-2) - coef = get_coef(clf, 'patterns_', inverse_transform=True) - lm_patterns_ = lm.patterns_[..., np.newaxis] + coef = get_coef(clf, "patterns_", inverse_transform=True) + + lm_patterns_ = lm.patterns_ + # Expected shape is (n_targets, n_features) + # which is equivalent to (n_components, n_channels) + # in spatial filters + if lm_patterns_.ndim == 1: + lm_patterns_ = lm_patterns_[np.newaxis, :] + else: + lm_patterns_ = lm_patterns_[..., np.newaxis] assert_allclose(lm_patterns_, coef, atol=1e-5) # Check can pass fitting parameters lm.fit(X, Y, sample_weight=np.ones(len(Y))) -@requires_sklearn -@pytest.mark.parametrize('n_classes, n_channels, n_times', [ - (4, 10, 2), - (4, 3, 2), - (3, 2, 1), - (3, 1, 2), -]) +@pytest.mark.parametrize( + "n_classes, n_channels, n_times", + [ + (4, 10, 2), + (4, 3, 2), + (3, 2, 1), + (3, 1, 2), + ], +) def test_get_coef_multiclass_full(n_classes, n_channels, n_times): """Test a full example with pattern extraction.""" - from sklearn.pipeline import make_pipeline - from sklearn.linear_model import LogisticRegression - from sklearn.model_selection import StratifiedKFold data = np.zeros((10 * n_classes, n_channels, n_times)) # Make only the first channel informative for ii in range(n_classes): - data[ii * 10:(ii + 1) * 10, 0] = ii + data[ii * 10 : (ii + 1) * 10, 0] = ii events = np.zeros((len(data), 3), int) events[:, 0] = np.arange(len(events)) events[:, 2] = data[:, 0, 0] - info = create_info(n_channels, 1000., 'eeg') + info = create_info(n_channels, 1000.0, "eeg") epochs = EpochsArray(data, info, events, tmin=0) clf = make_pipeline( - Scaler(epochs.info), Vectorizer(), - LinearModel(LogisticRegression(random_state=0, multi_class='ovr')), + Scaler(epochs.info), + Vectorizer(), + LinearModel(OneVsRestClassifier(LogisticRegression(random_state=0))), ) - scorer = 'roc_auc_ovr_weighted' + scorer = "roc_auc_ovr_weighted" time_gen = GeneralizingEstimator(clf, scorer, verbose=True) - X = epochs.get_data() + X = epochs.get_data(copy=False) y = epochs.events[:, 2] n_splits = 3 cv = StratifiedKFold(n_splits=n_splits) @@ -304,18 +461,18 @@ def test_get_coef_multiclass_full(n_classes, n_channels, n_times): if n_times > 1: want += (n_times, n_times) assert scores.shape == want - assert_array_less(0.8, scores) + # On Windows LBFGS can fail to converge, so we need to be a bit more tol here + limit = 0.7 if platform.system() == "Windows" else 0.8 + assert_array_less(limit, scores) clf.fit(X, y) - patterns = get_coef(clf, 'patterns_', inverse_transform=True) + patterns = get_coef(clf, "patterns_", inverse_transform=True) assert patterns.shape == (n_classes, n_channels, n_times) - assert_allclose(patterns[:, 1:], 0., atol=1e-7) # no other channels useful + assert_allclose(patterns[:, 1:], 0.0, atol=1e-7) # no other channels useful -@requires_sklearn def test_linearmodel(): """Test LinearModel class for computing filters and patterns.""" # check categorical target fit in standard linear model - from sklearn.linear_model import LinearRegression rng = np.random.RandomState(0) clf = LinearModel() n, n_features = 20, 3 @@ -328,12 +485,25 @@ def test_linearmodel(): wrong_X = rng.rand(n, n_features, 99) clf.fit(wrong_X, y) + # check fit_transform call + clf = LinearModel(LinearDiscriminantAnalysis()) + _ = clf.fit_transform(X, y) + + # check that model has to have coef_, RBF-SVM doesn't + clf = LinearModel(svm.SVC(kernel="rbf")) + with pytest.raises(ValueError, match="does not have a `coef_`"): + clf.fit(X, y) + + # check that model has to be a predictor + clf = LinearModel(StandardScaler()) + with pytest.raises(ValueError, match="classifier or regressor"): + clf.fit(X, y) + # check categorical target fit in standard linear model with GridSearchCV - from sklearn import svm - from sklearn.model_selection import GridSearchCV - parameters = {'kernel': ['linear'], 'C': [1, 10]} + parameters = {"kernel": ["linear"], "C": [1, 10]} clf = LinearModel( - GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=None)) + GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=None) + ) clf.fit(X, y) assert_equal(clf.filters_.shape, (n_features,)) assert_equal(clf.patterns_.shape, (n_features,)) @@ -345,10 +515,11 @@ def test_linearmodel(): n_targets = 1 Y = rng.rand(n, n_targets) clf = LinearModel( - GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=None)) + GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=None) + ) clf.fit(X, y) - assert_equal(clf.filters_.shape, (n_features, )) - assert_equal(clf.patterns_.shape, (n_features, )) + assert_equal(clf.filters_.shape, (n_features,)) + assert_equal(clf.patterns_.shape, (n_features,)) with pytest.raises(ValueError): wrong_y = rng.rand(n, n_features, 99) clf.fit(X, wrong_y) @@ -365,26 +536,23 @@ def test_linearmodel(): clf.fit(X, wrong_y) -@requires_sklearn def test_cross_val_multiscore(): """Test cross_val_multiscore for computing scores on decoding over time.""" - from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score - from sklearn.linear_model import LogisticRegression, LinearRegression - - logreg = LogisticRegression(solver='liblinear', random_state=0) + logreg = LogisticRegression(solver="liblinear", random_state=0) # compare to cross-val-score X = np.random.rand(20, 3) y = np.arange(20) % 2 cv = KFold(2, random_state=0, shuffle=True) clf = logreg - assert_array_equal(cross_val_score(clf, X, y, cv=cv), - cross_val_multiscore(clf, X, y, cv=cv)) + assert_array_equal( + cross_val_score(clf, X, y, cv=cv), cross_val_multiscore(clf, X, y, cv=cv) + ) # Test with search light X = np.random.rand(20, 4, 3) y = np.arange(20) % 2 - clf = SlidingEstimator(logreg, scoring='accuracy') + clf = SlidingEstimator(logreg, scoring="accuracy") scores_acc = cross_val_multiscore(clf, X, y, cv=cv) assert_array_equal(np.shape(scores_acc), [2, 3]) @@ -399,9 +567,9 @@ def test_cross_val_multiscore(): # raise an error if scoring is defined at cross-val-score level and # search light, because search light does not return a 1-dimensional # prediction. - pytest.raises(ValueError, cross_val_multiscore, clf, X, y, cv=cv, - scoring='roc_auc') - clf = SlidingEstimator(logreg, scoring='roc_auc') + with pytest.raises(ValueError, match="multi_class must be"): + cross_val_multiscore(clf, X, y, cv=cv, scoring="roc_auc", n_jobs=1) + clf = SlidingEstimator(logreg, scoring="roc_auc") scores_auc = cross_val_multiscore(clf, X, y, cv=cv, n_jobs=None) scores_auc_manual = list() for train, test in cv.split(X, y): @@ -425,3 +593,9 @@ def test_cross_val_multiscore(): manual = cross_val(reg, X, y, cv=KFold(2)) auto = cross_val(reg, X, y, cv=2) assert_array_equal(manual, auto) + + +@parametrize_with_checks([LinearModel(LogisticRegression())]) +def test_sklearn_compliance(estimator, check): + """Test LinearModel compliance with sklearn.""" + check(estimator) diff --git a/mne/decoding/tests/test_csp.py b/mne/decoding/tests/test_csp.py index 6945a812cf7..4411267b407 100644 --- a/mne/decoding/tests/test_csp.py +++ b/mne/decoding/tests/test_csp.py @@ -1,25 +1,35 @@ -# Author: Alexandre Gramfort -# Romain Trachel -# Alexandre Barachant -# Jean-Remi King -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path import numpy as np import pytest -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_equal) - -from mne import io, Epochs, read_events, pick_types -from mne.decoding.csp import CSP, _ajd_pham, SPoC -from mne.utils import requires_sklearn - -data_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data" +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) + +pytest.importorskip("sklearn") + +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import StratifiedKFold, cross_val_score +from sklearn.pipeline import Pipeline, make_pipeline +from sklearn.svm import SVC +from sklearn.utils.estimator_checks import parametrize_with_checks + +from mne import Epochs, compute_proj_raw, io, pick_types, read_events +from mne.decoding import CSP, LinearModel, Scaler, SPoC, get_coef +from mne.decoding.csp import _ajd_pham +from mne.utils import catch_logging + +data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" raw_fname = data_dir / "test_raw.fif" event_name = data_dir / "test-eve.fif" -tmin, tmax = -0.2, 0.5 +tmin, tmax = -0.1, 0.2 event_id = dict(aud_l=1, vis_l=3) # if stop is too small pca may fail in some cases, but we're okay on this file start, stop = 0, 8 @@ -46,23 +56,39 @@ def simulate_data(target, n_trials=100, n_channels=10, random_state=42): return X, mixing_mat -def deterministic_toy_data(classes=('class_a', 'class_b')): +def deterministic_toy_data(classes=("class_a", "class_b")): """Generate a small deterministic toy data set. Four independent sources are modulated by the target class and mixed into signal space. """ - sources_a = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], - [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]], - dtype=float) * 2 - 1 - - sources_b = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], - [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]], - dtype=float) * 2 - 1 + sources_a = ( + np.array( + [ + [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], + ], + dtype=float, + ) + * 2 + - 1 + ) + + sources_b = ( + np.array( + [ + [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], + ], + dtype=float, + ) + * 2 + - 1 + ) sources_a[0, :] *= 1 sources_a[1, :] *= 2 @@ -70,10 +96,14 @@ def deterministic_toy_data(classes=('class_a', 'class_b')): sources_b[2, :] *= 3 sources_b[3, :] *= 4 - mixing = np.array([[1.0, 0.8, 0.6, 0.4], - [0.8, 1.0, 0.8, 0.6], - [0.6, 0.8, 1.0, 0.8], - [0.4, 0.6, 0.8, 1.0]]) + mixing = np.array( + [ + [1.0, 0.8, 0.6, 0.4], + [0.8, 1.0, 0.8, 0.6], + [0.6, 0.8, 1.0, 0.8], + [0.4, 0.6, 0.8, 1.0], + ] + ) x_class_a = mixing @ sources_a x_class_b = mixing @ sources_b @@ -89,29 +119,43 @@ def test_csp(): """Test Common Spatial Patterns algorithm on epochs.""" raw = io.read_raw_fif(raw_fname, preload=False) events = read_events(event_name) - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads') + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) picks = picks[2:12:3] # subselect channels -> disable proj! raw.add_proj([], remove_existing=True) - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), preload=True, proj=False) - epochs_data = epochs.get_data() + epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + preload=True, + proj=False, + ) + epochs_data = epochs.get_data(copy=False) n_channels = epochs_data.shape[1] y = epochs.events[:, -1] # Init - pytest.raises(ValueError, CSP, n_components='foo', norm_trace=False) - for reg in ['foo', -0.1, 1.1]: + csp = CSP(n_components="foo") + with pytest.raises(TypeError, match="must be an instance"): + csp.fit(epochs_data, y) + for reg in ["foo", -0.1, 1.1]: csp = CSP(reg=reg, norm_trace=False) pytest.raises(ValueError, csp.fit, epochs_data, epochs.events[:, -1]) - for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]: + for reg in ["oas", "ledoit_wolf", 0, 0.5, 1.0]: CSP(reg=reg, norm_trace=False) - for cov_est in ['foo', None]: - pytest.raises(ValueError, CSP, cov_est=cov_est, norm_trace=False) - with pytest.raises(TypeError, match='instance of bool'): - CSP(norm_trace='foo') - for cov_est in ['concat', 'epoch']: - CSP(cov_est=cov_est, norm_trace=False) + csp = CSP(cov_est="foo", norm_trace=False) + with pytest.raises(ValueError, match="Invalid value"): + csp.fit(epochs_data, y) + csp = CSP(norm_trace="foo") + with pytest.raises(TypeError, match="instance of bool"): + csp.fit(epochs_data, y) + for cov_est in ["concat", "epoch"]: + CSP(cov_est=cov_est, norm_trace=False).fit(epochs_data, y) n_components = 3 # Fit @@ -125,66 +169,75 @@ def test_csp(): # Transform X = csp.fit_transform(epochs_data, y) sources = csp.transform(epochs_data) - assert (sources.shape[1] == n_components) - assert (csp.filters_.shape == (n_channels, n_channels)) - assert (csp.patterns_.shape == (n_channels, n_channels)) + assert sources.shape[1] == n_components + assert csp.filters_.shape == (n_channels, n_channels) + assert csp.patterns_.shape == (n_channels, n_channels) assert_array_almost_equal(sources, X) # Test data exception - pytest.raises(ValueError, csp.fit, epochs_data, - np.zeros_like(epochs.events)) - pytest.raises(ValueError, csp.fit, epochs, y) - pytest.raises(ValueError, csp.transform, epochs) + pytest.raises(ValueError, csp.fit, epochs_data, np.zeros_like(epochs.events)) + pytest.raises(ValueError, csp.fit, "foo", y) + pytest.raises(ValueError, csp.transform, "foo") # Test plots - epochs.pick_types(meg='mag') - cmap = ('RdBu', True) + epochs.pick(picks="mag") + cmap = ("RdBu", True) components = np.arange(n_components) for plot in (csp.plot_patterns, csp.plot_filters): plot(epochs.info, components=components, res=12, show=False, cmap=cmap) # Test with more than 2 classes - epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks, - event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4), - baseline=(None, 0), proj=False, preload=True) - epochs_data = epochs.get_data() + epochs = Epochs( + raw, + events, + tmin=tmin, + tmax=tmax, + picks=picks, + event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4), + baseline=(None, 0), + proj=False, + preload=True, + ) + epochs_data = epochs.get_data(copy=False) n_channels = epochs_data.shape[1] n_channels = epochs_data.shape[1] - for cov_est in ['concat', 'epoch']: + for cov_est in ["concat", "epoch"]: csp = CSP(n_components=n_components, cov_est=cov_est, norm_trace=False) csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) - assert_equal(len(csp._classes), 4) + assert_equal(len(csp.classes_), 4) assert_array_equal(csp.filters_.shape, [n_channels, n_channels]) assert_array_equal(csp.patterns_.shape, [n_channels, n_channels]) # Test average power transform n_components = 2 - assert (csp.transform_into == 'average_power') + assert csp.transform_into == "average_power" feature_shape = [len(epochs_data), n_components] X_trans = dict() for log in (None, True, False): csp = CSP(n_components=n_components, log=log, norm_trace=False) - assert (csp.log is log) + assert csp.log is log Xt = csp.fit_transform(epochs_data, epochs.events[:, 2]) assert_array_equal(Xt.shape, feature_shape) X_trans[str(log)] = Xt # log=None => log=True - assert_array_almost_equal(X_trans['None'], X_trans['True']) + assert_array_almost_equal(X_trans["None"], X_trans["True"]) # Different normalization return different transform - assert (np.sum((X_trans['True'] - X_trans['False']) ** 2) > 1.) + assert np.sum((X_trans["True"] - X_trans["False"]) ** 2) > 1.0 # Check wrong inputs - pytest.raises(ValueError, CSP, transform_into='average_power', log='foo') + csp = CSP(transform_into="average_power", log="foo") + with pytest.raises(TypeError, match="must be an instance of bool"): + csp.fit(epochs_data, epochs.events[:, 2]) # Test csp space transform - csp = CSP(transform_into='csp_space', norm_trace=False) - assert (csp.transform_into == 'csp_space') - for log in ('foo', True, False): - pytest.raises(ValueError, CSP, transform_into='csp_space', log=log, - norm_trace=False) + csp = CSP(transform_into="csp_space", norm_trace=False) + assert csp.transform_into == "csp_space" + for log in ("foo", True, False): + csp = CSP(transform_into="csp_space", log=log, norm_trace=False) + with pytest.raises(TypeError, match="must be an instance"): + csp.fit(epochs_data, epochs.events[:, 2]) n_components = 2 - csp = CSP(n_components=n_components, transform_into='csp_space', - norm_trace=False) + csp = CSP(n_components=n_components, transform_into="csp_space", norm_trace=False) Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data) feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]] assert_array_equal(Xt.shape, feature_shape) @@ -193,7 +246,7 @@ def test_csp(): y = np.array([100] * 50 + [1] * 50) X, A = simulate_data(y) - for cov_est in ['concat', 'epoch']: + for cov_est in ["concat", "epoch"]: # fit csp csp = CSP(n_components=1, cov_est=cov_est, norm_trace=False) csp.fit(X, y) @@ -209,53 +262,131 @@ def test_csp(): assert np.abs(corr) > 0.95 -@requires_sklearn -def test_regularized_csp(): +# Even the "reg is None and rank is None" case should pass now thanks to the +# do_compute_rank +@pytest.mark.parametrize("ch_type", ("mag", "eeg", ("mag", "eeg"))) +@pytest.mark.parametrize("rank", (None, "full", "correct")) +@pytest.mark.parametrize("reg", [None, 0.001, "oas"]) +def test_regularized_csp(ch_type, rank, reg): """Test Common Spatial Patterns algorithm using regularized covariance.""" - raw = io.read_raw_fif(raw_fname) + raw = io.read_raw_fif(raw_fname).pick(ch_type, exclude="bads").load_data() + n_orig = len(raw.ch_names) + ch_decim = 2 + raw.pick_channels(raw.ch_names[::ch_decim]) + raw.info.normalize_proj() + if "eeg" in ch_type: + raw.set_eeg_reference(projection=True) + # TODO: for some reason we need to add a second EEG projector in order to get + # the non-semidefinite error for EEG data. Hopefully this won't make much + # difference in practice given our default is rank=None and regularization + # is easy to use. + raw.add_proj(compute_proj_raw(raw, n_eeg=1, n_mag=0, n_grad=0, n_jobs=1)) + n_eig = len(raw.ch_names) - len(raw.info["projs"]) + n_ch = n_orig // ch_decim + if ch_type == "eeg": + assert n_eig == n_ch - 2 + elif ch_type == "mag": + assert n_eig == n_ch - 3 + else: + assert n_eig == n_ch - 5 + if rank == "correct": + if isinstance(ch_type, str): + rank = {ch_type: n_eig} + else: + assert ch_type == ("mag", "eeg") + rank = dict( + mag=102 // ch_decim - 3, + eeg=60 // ch_decim - 2, + ) + else: + assert rank is None or rank == "full", rank + if rank == "full": + n_eig = n_ch + raw.filter(2, 40).apply_proj() events = read_events(event_name) - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads') - picks = picks[1:13:3] - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), preload=True) - epochs_data = epochs.get_data() + # map make left and right events the same + events[events[:, 2] == 2, 2] = 1 + events[events[:, 2] == 4, 2] = 3 + epochs = Epochs(raw, events, event_id, tmin, tmax, decim=5, preload=True) + epochs.equalize_event_counts() + assert 25 < len(epochs) < 30 + epochs_data = epochs.get_data(copy=False) n_channels = epochs_data.shape[1] - + assert n_channels == n_ch n_components = 3 - reg_cov = [None, 0.05, 'ledoit_wolf', 'oas'] - for reg in reg_cov: - csp = CSP(n_components=n_components, reg=reg, norm_trace=False, - rank=None) - csp.fit(epochs_data, epochs.events[:, -1]) - y = epochs.events[:, -1] - X = csp.fit_transform(epochs_data, y) - assert (csp.filters_.shape == (n_channels, n_channels)) - assert (csp.patterns_.shape == (n_channels, n_channels)) - assert_array_almost_equal(csp.fit(epochs_data, y). - transform(epochs_data), X) - # test init exception - pytest.raises(ValueError, csp.fit, epochs_data, - np.zeros_like(epochs.events)) - pytest.raises(ValueError, csp.fit, epochs, y) - pytest.raises(ValueError, csp.transform, epochs) + sc = Scaler(epochs.info) + epochs_data_orig = epochs_data.copy() + epochs_data = sc.fit_transform(epochs_data) + csp = CSP(n_components=n_components, reg=reg, norm_trace=False, rank=rank) + if rank == "full" and reg is None: + with pytest.raises(np.linalg.LinAlgError, match="leading minor"): + csp.fit(epochs_data, epochs.events[:, -1]) + return + with catch_logging(verbose=True) as log: + X = csp.fit_transform(epochs_data, epochs.events[:, -1]) + log = log.getvalue() + assert "Setting small MAG" not in log + if rank != "full": + assert "Setting small data eigen" in log + else: + assert "Setting small data eigen" not in log + if rank is None: + assert "Computing rank from data" in log + assert " mag: rank" not in log.lower() + assert " data: rank" in log + assert "rank (mag)" not in log.lower() + assert "rank (data)" in log + elif rank != "full": # if rank is passed no computation is done + assert "Computing rank" not in log + assert ": rank" not in log + assert "rank (" not in log + assert "reducing mag" not in log.lower() + assert f"Reducing data rank from {n_channels} " in log + y = epochs.events[:, -1] + assert csp.filters_.shape == (n_eig, n_channels) + assert csp.patterns_.shape == (n_eig, n_channels) + assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data), X) - csp.n_components = n_components - sources = csp.transform(epochs_data) - assert (sources.shape[1] == n_components) + # test init exception + pytest.raises(ValueError, csp.fit, epochs_data, np.zeros_like(epochs.events)) + pytest.raises(ValueError, csp.fit, "foo", y) + pytest.raises(ValueError, csp.transform, "foo") + + csp.n_components = n_components + sources = csp.transform(epochs_data) + assert sources.shape[1] == n_components + + cv = StratifiedKFold(5) + clf = make_pipeline( + sc, + csp, + LinearModel(LogisticRegression(solver="liblinear")), + ) + score = cross_val_score(clf, epochs_data_orig, y, cv=cv, scoring="roc_auc").mean() + assert 0.75 <= score <= 1.0 + + # Test get_coef on CSP + clf.fit(epochs_data_orig, y) + coef = csp.patterns_[:n_components] + assert coef.shape == (n_components, n_channels), coef.shape + coef = sc.inverse_transform(coef.T[np.newaxis])[0] + assert coef.shape == (len(epochs.ch_names), n_components), coef.shape + coef_mne = get_coef(clf, "patterns_", inverse_transform=True, verbose="debug") + assert coef.shape == coef_mne.shape + coef_mne /= np.linalg.norm(coef_mne, axis=0) + coef /= np.linalg.norm(coef, axis=0) + coef *= np.sign(np.sum(coef_mne * coef, axis=0)) + assert_allclose(coef_mne, coef) -@requires_sklearn def test_csp_pipeline(): """Test if CSP works in a pipeline.""" - from sklearn.svm import SVC - from sklearn.pipeline import Pipeline csp = CSP(reg=1, norm_trace=False) svc = SVC() pipe = Pipeline([("CSP", csp), ("SVC", svc)]) pipe.set_params(CSP__reg=0.2) - assert (pipe.get_params()["CSP__reg"] == 0.2) + assert pipe.get_params()["CSP__reg"] == 0.2 def test_ajd(): @@ -267,15 +398,17 @@ def test_ajd(): seed = np.random.RandomState(0) diags = 2.0 + 0.1 * seed.randn(n_times, n_channels) A = 2 * seed.rand(n_channels, n_channels) - 1 - A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T + A /= np.atleast_2d(np.sqrt(np.sum(A**2, 1))).T covmats = np.empty((n_times, n_channels, n_channels)) for i in range(n_times): covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T) V, D = _ajd_pham(covmats) # Results obtained with original matlab implementation - V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574], - [0.694689013234610, 0.775690358505945, -1.162043086446043], - [-0.592603135588066, -0.598996925696260, 1.009550086271192]] + V_matlab = [ + [-3.507280775058041, -5.498189967306344, 7.720624541198574], + [0.694689013234610, 0.775690358505945, -1.162043086446043], + [-0.592603135588066, -0.598996925696260, 1.009550086271192], + ] assert_array_almost_equal(V, V_matlab) @@ -288,7 +421,7 @@ def test_spoc(): spoc.fit(X, y) Xt = spoc.transform(X) assert_array_equal(Xt.shape, [10, 4]) - spoc = SPoC(n_components=4, transform_into='csp_space') + spoc = SPoC(n_components=4, transform_into="csp_space") spoc.fit(X, y) Xt = spoc.transform(X) assert_array_equal(Xt.shape, [10, 4, 20]) @@ -298,8 +431,8 @@ def test_spoc(): # check y pytest.raises(ValueError, spoc.fit, X, y * 0) - # Check that doesn't take CSP-spcific input - pytest.raises(TypeError, SPoC, cov_est='epoch') + # Check that doesn't take CSP-specific input + pytest.raises(TypeError, SPoC, cov_est="epoch") # Check mixing matrix on simulated data rs = np.random.RandomState(42) @@ -322,36 +455,45 @@ def test_spoc(): def test_csp_twoclass_symmetry(): """Test that CSP is symmetric when swapping classes.""" - x, y = deterministic_toy_data(['class_a', 'class_b']) - csp = CSP(norm_trace=False, transform_into='average_power', log=True) + x, y = deterministic_toy_data(["class_a", "class_b"]) + csp = CSP(norm_trace=False, transform_into="average_power", log=True) log_power = csp.fit_transform(x, y) log_power_ratio_ab = log_power[0] - log_power[1] - x, y = deterministic_toy_data(['class_b', 'class_a']) - csp = CSP(norm_trace=False, transform_into='average_power', log=True) + x, y = deterministic_toy_data(["class_b", "class_a"]) + csp = CSP(norm_trace=False, transform_into="average_power", log=True) log_power = csp.fit_transform(x, y) log_power_ratio_ba = log_power[0] - log_power[1] - assert_array_almost_equal(log_power_ratio_ab, - log_power_ratio_ba) + assert_array_almost_equal(log_power_ratio_ab, log_power_ratio_ba) def test_csp_component_ordering(): """Test that CSP component ordering works as expected.""" - x, y = deterministic_toy_data(['class_a', 'class_b']) + x, y = deterministic_toy_data(["class_a", "class_b"]) - pytest.raises(ValueError, CSP, component_order='invalid') + csp = CSP(component_order="invalid") + with pytest.raises(ValueError, match="Invalid value"): + csp.fit(x, y) # component_order='alternate' only works with two classes - csp = CSP(component_order='alternate') + csp = CSP(component_order="alternate") with pytest.raises(ValueError): - csp.fit(np.zeros((3, 0, 0)), ['a', 'b', 'c']) + csp.fit(np.zeros((3, 0, 0)), ["a", "b", "c"]) - p_alt = CSP(component_order='alternate').fit(x, y).patterns_ - p_mut = CSP(component_order='mutual_info').fit(x, y).patterns_ + p_alt = CSP(component_order="alternate").fit(x, y).patterns_ + p_mut = CSP(component_order="mutual_info").fit(x, y).patterns_ # This permutation of p_alt and p_mut is explained by the particular # eigenvalues of the toy data: [0.06, 0.1, 0.5, 0.8]. # p_alt arranges them to [0.8, 0.06, 0.5, 0.1] # p_mut arranges them to [0.06, 0.1, 0.8, 0.5] assert_array_almost_equal(p_alt, p_mut[[2, 0, 3, 1]]) + + +@pytest.mark.filterwarnings("ignore:.*Only one sample available.*") +@parametrize_with_checks([CSP(), SPoC()]) +def test_sklearn_compliance(estimator, check): + """Test compliance with sklearn.""" + pytest.importorskip("sklearn", minversion="1.4") # TODO VERSION remove on 1.4+ + check(estimator) diff --git a/mne/decoding/tests/test_ems.py b/mne/decoding/tests/test_ems.py index b24ebdd75aa..c713e1bce17 100644 --- a/mne/decoding/tests/test_ems.py +++ b/mne/decoding/tests/test_ems.py @@ -1,82 +1,101 @@ -# Author: Denis A. Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path import numpy as np -from numpy.testing import assert_array_almost_equal, assert_equal import pytest +from numpy.testing import assert_array_almost_equal, assert_equal + +pytest.importorskip("sklearn") -from mne import io, Epochs, read_events, pick_types -from mne.utils import requires_sklearn -from mne.decoding import compute_ems, EMS +from sklearn.model_selection import StratifiedKFold +from sklearn.utils.estimator_checks import parametrize_with_checks -data_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data" +from mne import Epochs, io, pick_types, read_events +from mne.decoding import EMS, compute_ems + +data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" raw_fname = data_dir / "test_raw.fif" event_name = data_dir / "test-eve.fif" tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) -@requires_sklearn def test_ems(): """Test event-matched spatial filters.""" - from sklearn.model_selection import StratifiedKFold raw = io.read_raw_fif(raw_fname, preload=False) # create unequal number of events events = read_events(event_name) events[-2, 2] = 3 - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads') + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) picks = picks[1:13:3] - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), preload=True) - pytest.raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l']) + epochs = Epochs( + raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True + ) + pytest.raises(ValueError, compute_ems, epochs, ["aud_l", "vis_l"]) epochs.equalize_event_counts(epochs.event_id) - pytest.raises(KeyError, compute_ems, epochs, ['blah', 'hahah']) + pytest.raises(KeyError, compute_ems, epochs, ["blah", "hahah"]) surrogates, filters, conditions = compute_ems(epochs) assert_equal(list(set(conditions)), [1, 3]) events = read_events(event_name) event_id2 = dict(aud_l=1, aud_r=2, vis_l=3) - epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks, - baseline=(None, 0), preload=True) + epochs = Epochs( + raw, + events, + event_id2, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + preload=True, + ) epochs.equalize_event_counts(epochs.event_id) - n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']]) + n_expected = sum([len(epochs[k]) for k in ["aud_l", "vis_l"]]) pytest.raises(ValueError, compute_ems, epochs) - surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l']) + surrogates, filters, conditions = compute_ems(epochs, ["aud_r", "vis_l"]) assert_equal(n_expected, len(surrogates)) assert_equal(n_expected, len(conditions)) assert_equal(list(set(conditions)), [2, 3]) # test compute_ems cv - epochs = epochs['aud_r', 'vis_l'] + epochs = epochs["aud_r", "vis_l"] epochs.equalize_event_counts(epochs.event_id) cv = StratifiedKFold(n_splits=3) compute_ems(epochs, cv=cv) compute_ems(epochs, cv=2) - pytest.raises(ValueError, compute_ems, epochs, cv='foo') + pytest.raises(ValueError, compute_ems, epochs, cv="foo") pytest.raises(ValueError, compute_ems, epochs, cv=len(epochs) + 1) raw.close() # EMS transformer, check that identical to compute_ems - X = epochs.get_data() + X = epochs.get_data(copy=False) y = epochs.events[:, 2] X = X / np.std(X) # X scaled outside cv in compute_ems Xt, coefs = list(), list() ems = EMS() - assert_equal(ems.__repr__(), '') + assert_equal(ems.__repr__(), "") # manual leave-one-out to avoid sklearn version problem for test in range(len(y)): train = np.setdiff1d(range(len(y)), np.atleast_1d(test)) ems.fit(X[train], y[train]) coefs.append(ems.filters_) Xt.append(ems.transform(X[[test]])) - assert_equal(ems.__repr__(), '') + assert_equal(ems.__repr__(), "") assert_array_almost_equal(filters, np.mean(coefs, axis=0)) assert_array_almost_equal(surrogates, np.vstack(Xt)) + + +@parametrize_with_checks([EMS()]) +def test_sklearn_compliance(estimator, check): + """Test compliance with sklearn.""" + pytest.importorskip("sklearn", minversion="1.4") # TODO VERSION remove on 1.4+ + check(estimator) diff --git a/mne/decoding/tests/test_ged.py b/mne/decoding/tests/test_ged.py new file mode 100644 index 00000000000..1d951dbf0c1 --- /dev/null +++ b/mne/decoding/tests/test_ged.py @@ -0,0 +1,376 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from functools import partial +from pathlib import Path + +import numpy as np +import pytest + +pytest.importorskip("sklearn") + + +from sklearn.model_selection import ParameterGrid +from sklearn.utils._testing import assert_allclose +from sklearn.utils.estimator_checks import parametrize_with_checks + +from mne import Epochs, compute_rank, create_info, pick_types, read_events +from mne._fiff.proj import make_eeg_average_ref_proj +from mne.cov import Covariance, _regularized_covariance +from mne.decoding._ged import ( + _get_cov_def, + _get_restr_mat, + _handle_restr_mat, + _is_cov_symm, + _smart_ajd, + _smart_ged, +) +from mne.decoding._mod_ged import _no_op_mod +from mne.decoding.base import _GEDTransformer +from mne.io import read_raw + +data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" +raw_fname = data_dir / "test_raw.fif" +event_name = data_dir / "test-eve.fif" +tmin, tmax = -0.1, 0.2 +# if stop is too small pca may fail in some cases, but we're okay on this file +start, stop = 0, 8 + + +def _mock_info(n_channels): + info = create_info(n_channels, 1000.0, "eeg") + avg_eeg_projector = make_eeg_average_ref_proj(info=info, activate=False) + info["projs"].append(avg_eeg_projector) + return info + + +def _get_min_rank(covs, info): + min_rank = dict( + eeg=min( + list( + compute_rank( + Covariance( + cov, + info.ch_names, + list(), + list(), + 0, + # verbose=_verbose_safe_false(), + ), + rank=None, + # _handle_default("scalings_cov_rank", None), + info=info, + ).values() + )[0] + for cov in covs + ) + ) + return min_rank + + +def _mock_cov_callable(X, y, cov_method_params=None, compute_C_ref=True): + if cov_method_params is None: + cov_method_params = dict() + n_epochs, n_channels, n_times = X.shape + + # To pass sklearn check: + if n_channels == 1: + n_channels = 2 + X = np.tile(X, (1, n_channels, 1)) + + # To make covariance estimation sensible + if n_times == 1: + n_times = n_channels + X = np.tile(X, (1, 1, n_channels)) + + classes = np.unique(y) + covs, sample_weights = list(), list() + for ci, this_class in enumerate(classes): + class_data = X[y == this_class] + class_data = class_data.transpose(1, 0, 2).reshape(n_channels, -1) + cov = _regularized_covariance(class_data, **cov_method_params) + covs.append(cov) + sample_weights.append(class_data.shape[0]) + + ref_data = X.transpose(1, 0, 2).reshape(n_channels, -1) + if compute_C_ref: + C_ref = _regularized_covariance(ref_data, **cov_method_params) + else: + C_ref = None + info = _mock_info(n_channels) + rank = _get_min_rank(covs, info) + kwargs = dict() + + # To pass sklearn check: + if len(covs) == 1: + covs.append(covs[0]) + + elif len(covs) > 2: + kwargs["sample_weights"] = sample_weights + return covs, C_ref, info, rank, kwargs + + +def _mock_mod_ged_callable(evals, evecs, covs, **kwargs): + sorter = None + if evals is not None: + ix = np.argsort(evals)[::-1] + evals = evals[ix] + evecs = evecs[:, ix] + sorter = ix + return evals, evecs, sorter + + +param_grid = dict( + n_components=[4], + cov_callable=[partial(_mock_cov_callable, cov_method_params=dict(reg="empirical"))], + mod_ged_callable=[_mock_mod_ged_callable], + dec_type=["single", "multi"], + restr_type=["restricting", "whitening"], + R_func=[None, partial(np.sum, axis=0)], +) + +ged_estimators = [_GEDTransformer(**p) for p in ParameterGrid(param_grid)] + + +@pytest.mark.slowtest +@parametrize_with_checks(ged_estimators) +def test_sklearn_compliance(estimator, check): + """Test GEDTransformer compliance with sklearn.""" + check(estimator) + + +def _get_X_y(event_id): + raw = read_raw(raw_fname, preload=False) + events = read_events(event_name) + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) + picks = picks[2:12:3] # subselect channels -> disable proj! + raw.add_proj([], remove_existing=True) + epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + preload=True, + proj=False, + ) + X = epochs.get_data(copy=False, units=dict(eeg="uV", grad="fT/cm", mag="fT")) + y = epochs.events[:, -1] + return X, y + + +def test_ged_binary_cov(): + """Test GEDTransformer on audvis dataset with two covariances.""" + event_id = dict(aud_l=1, vis_l=3) + X, y = _get_X_y(event_id) + # Test "single" decomposition + covs, C_ref, info, rank, kwargs = _mock_cov_callable(X, y) + S, R = covs[0], covs[1] + restr_mat = _get_restr_mat(C_ref, info, rank) + evals, evecs = _smart_ged(S, R, restr_mat=restr_mat, R_func=None) + actual_evals, actual_evecs, sorter = _mock_mod_ged_callable( + evals, evecs, [S, R], **kwargs + ) + actual_filters = actual_evecs.T + + ged = _GEDTransformer( + n_components=4, + cov_callable=_mock_cov_callable, + mod_ged_callable=_mock_mod_ged_callable, + restr_type="restricting", + ) + ged.fit(X, y) + desired_evals = ged.evals_ + desired_filters = ged.filters_ + + assert_allclose(actual_evals, desired_evals) + assert_allclose(actual_filters, desired_filters) + + # Test "multi" decomposition (loop), restr_mat can be reused + all_evals, all_evecs = list(), list() + for i in range(len(covs)): + S = covs[i] + evals, evecs = _smart_ged(S, R, restr_mat) + evals, evecs, sorter = _mock_mod_ged_callable(evals, evecs, covs) + all_evals.append(evals) + all_evecs.append(evecs.T) + actual_evals = np.array(all_evals) + actual_filters = np.array(all_evecs) + + ged = _GEDTransformer( + n_components=4, + cov_callable=_mock_cov_callable, + mod_ged_callable=_mock_mod_ged_callable, + dec_type="multi", + restr_type="restricting", + ) + ged.fit(X, y) + desired_evals = ged.evals_ + desired_filters = ged.filters_ + + assert_allclose(actual_evals, desired_evals) + assert_allclose(actual_filters, desired_filters) + + assert ged._subset_multi_components(name="foo") is None + + +def test_ged_multicov(): + """Test GEDTransformer on audvis dataset with multiple covariances.""" + event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4) + X, y = _get_X_y(event_id) + # Test "single" decomposition for multicov (AJD) with C_ref + covs, C_ref, info, rank, kwargs = _mock_cov_callable(X, y) + restr_mat = _get_restr_mat(C_ref, info, rank) + evecs = _smart_ajd(covs, restr_mat=restr_mat) + evals = None + _, actual_evecs, _ = _mock_mod_ged_callable(evals, evecs, covs, **kwargs) + actual_filters = actual_evecs.T + + ged = _GEDTransformer( + n_components=4, + cov_callable=_mock_cov_callable, + mod_ged_callable=_mock_mod_ged_callable, + restr_type="restricting", + ) + ged.fit(X, y) + desired_filters = ged.filters_ + + assert_allclose(actual_filters, desired_filters) + + # Test "multi" decomposition for multicov (loop) + R = covs[-1] + all_evals, all_evecs = list(), list() + for i in range(len(covs)): + S = covs[i] + evals, evecs = _smart_ged(S, R, restr_mat) + evals, evecs, sorter = _mock_mod_ged_callable(evals, evecs, covs) + all_evals.append(evals) + all_evecs.append(evecs.T) + actual_evals = np.array(all_evals) + actual_filters = np.array(all_evecs) + + ged = _GEDTransformer( + n_components=4, + cov_callable=_mock_cov_callable, + mod_ged_callable=_mock_mod_ged_callable, + dec_type="multi", + restr_type="restricting", + ) + ged.fit(X, y) + desired_evals = ged.evals_ + desired_filters = ged.filters_ + + assert_allclose(actual_evals, desired_evals) + assert_allclose(actual_filters, desired_filters) + + # Test "single" decomposition for multicov (AJD) without C_ref + covs, C_ref, info, rank, kwargs = _mock_cov_callable( + X, y, cov_method_params=dict(reg="oas"), compute_C_ref=False + ) + covs = np.stack(covs) + evecs = _smart_ajd(covs, restr_mat=None) + evals = None + _, actual_evecs, _ = _mock_mod_ged_callable(evals, evecs, covs, **kwargs) + actual_filters = actual_evecs.T + + ged = _GEDTransformer( + n_components=4, + cov_callable=partial( + _mock_cov_callable, cov_method_params=dict(reg="oas"), compute_C_ref=False + ), + mod_ged_callable=_mock_mod_ged_callable, + restr_type="restricting", + ) + ged.fit(X, y) + desired_filters = ged.filters_ + + assert_allclose(actual_filters, desired_filters) + + +def test_ged_validation_raises(): + """Test GEDTransofmer validation raises correct errors.""" + event_id = dict(aud_l=1, vis_l=3) + X, y = _get_X_y(event_id) + + ged = _GEDTransformer( + n_components=-1, + cov_callable=_mock_cov_callable, + mod_ged_callable=_mock_mod_ged_callable, + restr_type="restricting", + ) + with pytest.raises(ValueError): + ged.fit(X, y) + + def _bad_cov_callable(X, y, foo): + return X, y, foo + + ged = _GEDTransformer( + n_components=1, + cov_callable=_bad_cov_callable, + mod_ged_callable=_mock_mod_ged_callable, + restr_type="restricting", + ) + with pytest.raises(ValueError): + ged.fit(X, y) + + +def test_ged_invalid_cov(): + """Test _validate_covariances raises proper errors.""" + ged = _GEDTransformer( + n_components=1, + cov_callable=_mock_cov_callable, + mod_ged_callable=_mock_mod_ged_callable, + ) + asymm_cov = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + with pytest.raises(ValueError, match="not symmetric"): + ged._validate_covariances([asymm_cov, None]) + + +def test__handle_restr_mat_invalid_restr_type(): + """Test _handle_restr_mat raises correct error when wrong restr_type.""" + C_ref = np.eye(3) + with pytest.raises(ValueError, match="restr_type"): + _handle_restr_mat(C_ref, restr_type="blah", info=None, rank=None) + + +def test_cov_validators(): + """Test that covariance validators indeed validate.""" + asymm_indef = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + sing_pos_semidef = np.array([[1, 2, 3], [2, 4, 6], [3, 6, 9]]) + pos_def = np.array([[5, 1, 1], [1, 6, 2], [1, 2, 7]]) + + assert not _is_cov_symm(asymm_indef) + assert _get_cov_def(asymm_indef) == "indef" + assert _get_cov_def(sing_pos_semidef) == "pos_semidef" + assert _get_cov_def(pos_def) == "pos_def" + + +def test__smart_ajd_raises(): + """Test _smart_ajd raises proper ValueErrors.""" + asymm_indef = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + sing_pos_semidef = np.array([[1, 2, 3], [2, 4, 6], [3, 6, 9]]) + pos_def1 = np.array([[5, 1, 1], [1, 6, 2], [1, 2, 7]]) + pos_def2 = np.array([[10, 1, 2], [1, 12, 3], [2, 3, 15]]) + + bad_covs = np.stack([sing_pos_semidef, asymm_indef, pos_def1]) + with pytest.raises(ValueError, match="positive semi-definite"): + _smart_ajd(bad_covs, restr_mat=pos_def2, weights=None) + + bad_covs = np.stack([sing_pos_semidef, pos_def1, pos_def2]) + with pytest.raises(ValueError, match="positive definite"): + _smart_ajd(bad_covs, restr_mat=None, weights=None) + + +def test__no_op_mod(): + """Test _no_op_mod returns the same evals/evecs objects.""" + evals = np.array([[1, 2], [3, 4]]) + evecs = np.array([0, 1]) + evals_no_op, evecs_no_op, sorter_no_op = _no_op_mod(evals, evecs) + assert evals is evals_no_op + assert evecs is evecs_no_op + assert sorter_no_op is None diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index c5d62fb4c63..db2209f4695 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -1,24 +1,30 @@ -# Authors: Chris Holdgraf -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path -import pytest import numpy as np +import pytest from numpy import einsum -from numpy.fft import rfft, irfft -from numpy.testing import assert_array_equal, assert_allclose, assert_equal +from numpy.fft import irfft, rfft +from numpy.testing import assert_allclose, assert_array_equal, assert_equal -from mne.utils import requires_sklearn -from mne.decoding import ReceptiveField, TimeDelayingRidge -from mne.decoding.receptive_field import (_delay_time_series, _SCORERS, - _times_to_delays, _delays_to_slice) -from mne.decoding.time_delaying_ridge import (_compute_reg_neighbors, - _compute_corrs) +pytest.importorskip("sklearn") +from sklearn.linear_model import Ridge +from sklearn.utils.estimator_checks import parametrize_with_checks -data_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data" +from mne.decoding import ReceptiveField, TimeDelayingRidge +from mne.decoding.receptive_field import ( + _SCORERS, + _delay_time_series, + _delays_to_slice, + _times_to_delays, +) +from mne.decoding.time_delaying_ridge import _compute_corrs, _compute_reg_neighbors + +data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" raw_fname = data_dir / "test_raw.fif" event_name = data_dir / "test-eve.fif" @@ -26,50 +32,71 @@ event_id = dict(aud_l=1, vis_l=3) # Loading raw data -n_jobs_test = (1, 'cuda') +n_jobs_test = (1, "cuda") def test_compute_reg_neighbors(): """Test fast calculation of laplacian regularizer.""" for reg_type in ( - ('ridge', 'ridge'), - ('ridge', 'laplacian'), - ('laplacian', 'ridge'), - ('laplacian', 'laplacian')): + ("ridge", "ridge"), + ("ridge", "laplacian"), + ("laplacian", "ridge"), + ("laplacian", "laplacian"), + ): for n_ch_x, n_delays in ( - (1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (4, 1), - (2, 2), (2, 3), (3, 2), (3, 3), - (2, 4), (4, 2), (3, 4), (4, 3), (4, 4), - (5, 4), (4, 5), (5, 5), - (20, 9), (9, 20)): + (1, 1), + (1, 2), + (2, 1), + (1, 3), + (3, 1), + (1, 4), + (4, 1), + (2, 2), + (2, 3), + (3, 2), + (3, 3), + (2, 4), + (4, 2), + (3, 4), + (4, 3), + (4, 4), + (5, 4), + (4, 5), + (5, 5), + (20, 9), + (9, 20), + ): for normed in (True, False): reg_direct = _compute_reg_neighbors( - n_ch_x, n_delays, reg_type, 'direct', normed=normed) + n_ch_x, n_delays, reg_type, "direct", normed=normed + ) reg_csgraph = _compute_reg_neighbors( - n_ch_x, n_delays, reg_type, 'csgraph', normed=normed) + n_ch_x, n_delays, reg_type, "csgraph", normed=normed + ) assert_allclose( - reg_direct, reg_csgraph, atol=1e-7, - err_msg='%s: %s' % (reg_type, (n_ch_x, n_delays))) + reg_direct, + reg_csgraph, + atol=1e-7, + err_msg=f"{reg_type}: {(n_ch_x, n_delays)}", + ) -@requires_sklearn def test_rank_deficiency(): """Test signals that are rank deficient.""" # See GH#4253 - from sklearn.linear_model import Ridge N = 256 - fs = 1. + fs = 1.0 tmin, tmax = -50, 100 reg = 0.1 rng = np.random.RandomState(0) eeg = rng.randn(N, 1) eeg *= 100 eeg = rfft(eeg, axis=0) - eeg[N // 4:] = 0 # rank-deficient lowpass + eeg[N // 4 :] = 0 # rank-deficient lowpass eeg = irfft(eeg, axis=0) win = np.hanning(N // 8) win /= win.mean() - y = np.apply_along_axis(np.convolve, 0, eeg, win, mode='same') + y = np.apply_along_axis(np.convolve, 0, eeg, win, mode="same") y += rng.randn(*y.shape) * 100 for est in (Ridge(reg), reg): @@ -101,14 +128,15 @@ def test_time_delay(): ((-2, 0), 1), ((-2, -1), 1), ((-2, -1), 1), - ((0, .2), 10), - ((-.1, .1), 10)] + ((0, 0.2), 10), + ((-0.1, 0.1), 10), + ] for (tmin, tmax), isfreq in test_tlims: # sfreq must be int/float - with pytest.raises(TypeError, match='`sfreq` must be an instance of'): + with pytest.raises(TypeError, match="`sfreq` must be an instance of"): _delay_time_series(X, tmin, tmax, sfreq=[1]) # Delays must be int/float - with pytest.raises(TypeError, match='.*complex.*'): + with pytest.raises(TypeError, match=".*complex.*"): _delay_time_series(X, np.complex128(tmin), tmax, 1) # Make sure swapaxes works start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1 @@ -128,34 +156,35 @@ def test_time_delay(): del_zero = int(round(-tmin * isfreq)) for ii in range(-2, 3): idx = del_zero + ii - err_msg = '[%s,%s] (%s): %s %s' % (tmin, tmax, isfreq, ii, idx) + err_msg = f"[{tmin},{tmax}] ({isfreq}): {ii} {idx}" if 0 <= idx < X_delayed.shape[-1]: if ii == 0: - assert_array_equal(X_delayed[:, :, idx], X, - err_msg=err_msg) + assert_array_equal(X_delayed[:, :, idx], X, err_msg=err_msg) elif ii < 0: # negative delay - assert_array_equal(X_delayed[:ii, :, idx], X[-ii:, :], - err_msg=err_msg) - assert_array_equal(X_delayed[ii:, :, idx], 0.) + assert_array_equal( + X_delayed[:ii, :, idx], X[-ii:, :], err_msg=err_msg + ) + assert_array_equal(X_delayed[ii:, :, idx], 0.0) else: - assert_array_equal(X_delayed[ii:, :, idx], X[:-ii, :], - err_msg=err_msg) - assert_array_equal(X_delayed[:ii, :, idx], 0.) + assert_array_equal( + X_delayed[ii:, :, idx], X[:-ii, :], err_msg=err_msg + ) + assert_array_equal(X_delayed[:ii, :, idx], 0.0) @pytest.mark.slowtest # slow on Azure -@pytest.mark.parametrize('n_jobs', n_jobs_test) -@requires_sklearn +@pytest.mark.parametrize("n_jobs", n_jobs_test) +@pytest.mark.filterwarnings("ignore:Estimator .* has no __sklearn_tags__.*") +@pytest.mark.filterwarnings("ignore:The following error was .*__sklearn_tags__.*") def test_receptive_field_basic(n_jobs): """Test model prep and fitting.""" - from sklearn.linear_model import Ridge # Make sure estimator pulling works mod = Ridge() rng = np.random.RandomState(1337) # Test the receptive field model # Define parameters for the model and simulate inputs + weights - tmin, tmax = -10., 0 + tmin, tmax = -10.0, 0 n_feats = 3 rng = np.random.RandomState(0) X = rng.randn(10000, n_feats) @@ -163,82 +192,84 @@ def test_receptive_field_basic(n_jobs): # Delay inputs and cut off first 4 values since they'll be cut in the fit X_del = np.concatenate( - _delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1) + _delay_time_series(X, tmin, tmax, 1.0).transpose(2, 0, 1), axis=1 + ) y = np.dot(X_del, w) # Fit the model and test values - feature_names = ['feature_%i' % ii for ii in [0, 1, 2]] - rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod, - patterns=True) + feature_names = [f"feature_{ii}" for ii in [0, 1, 2]] + rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod, patterns=True) rf.fit(X, y) + assert rf.coef_.shape == (3, 11) assert_array_equal(rf.delays_, np.arange(tmin, tmax + 1)) y_pred = rf.predict(X) assert_allclose(y[rf.valid_samples_], y_pred[rf.valid_samples_], atol=1e-2) scores = rf.score(X, y) - assert scores > .99 + assert scores > 0.99 assert_allclose(rf.coef_.T.ravel(), w, atol=1e-3) # Make sure different input shapes work - rf.fit(X[:, np.newaxis:], y[:, np.newaxis]) + rf.fit(X[:, np.newaxis :], y[:, np.newaxis]) rf.fit(X, y[:, np.newaxis]) - with pytest.raises(ValueError, match='If X has 3 .* y must have 2 or 3'): + with pytest.raises(ValueError, match="If X has 3 .* y must have 2 or 3"): rf.fit(X[..., np.newaxis], y) - with pytest.raises(ValueError, match='X must be shape'): + with pytest.raises(ValueError, match="X must be shape"): rf.fit(X[:, 0], y) - with pytest.raises(ValueError, match='X and y do not have the same n_epo'): - rf.fit(X[:, np.newaxis], np.tile(y[:, np.newaxis, np.newaxis], - [1, 2, 1])) - with pytest.raises(ValueError, match='X and y do not have the same n_tim'): + with pytest.raises(ValueError, match="X and y do not have the same n_epo"): + rf.fit(X[:, np.newaxis], np.tile(y[:, np.newaxis, np.newaxis], [1, 2, 1])) + with pytest.raises(ValueError, match="X and y do not have the same n_tim"): rf.fit(X, y[:-2]) - with pytest.raises(ValueError, match='n_features in X does not match'): + with pytest.raises(ValueError, match="n_features in X does not match"): rf.fit(X[:, :1], y) # auto-naming features - feature_names = ['feature_%s' % ii for ii in [0, 1, 2]] - rf = ReceptiveField(tmin, tmax, 1, estimator=mod, - feature_names=feature_names) + feature_names = [f"feature_{ii}" for ii in [0, 1, 2]] + rf = ReceptiveField(tmin, tmax, 1, estimator=mod, feature_names=feature_names) assert_equal(rf.feature_names, feature_names) rf = ReceptiveField(tmin, tmax, 1, estimator=mod) rf.fit(X, y) assert_equal(rf.feature_names, None) # Float becomes ridge - rf = ReceptiveField(tmin, tmax, 1, ['one', 'two', 'three'], estimator=0) + rf = ReceptiveField(tmin, tmax, 1, ["one", "two", "three"], estimator=0) str(rf) # repr works before fit rf.fit(X, y) assert isinstance(rf.estimator_, TimeDelayingRidge) str(rf) # repr works after fit - rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0) + rf = ReceptiveField(tmin, tmax, 1, ["one"], estimator=0) rf.fit(X[:, [0]], y) str(rf) # repr with one feature # Should only accept estimators or floats - with pytest.raises(ValueError, match='`estimator` must be a float or'): - ReceptiveField(tmin, tmax, 1, estimator='foo').fit(X, y) - with pytest.raises(ValueError, match='`estimator` must be a float or'): + with pytest.raises((ValueError, AttributeError)): + ReceptiveField(tmin, tmax, 1, estimator="foo").fit(X, y) + with pytest.raises((ValueError, AttributeError)): ReceptiveField(tmin, tmax, 1, estimator=np.array([1, 2, 3])).fit(X, y) - with pytest.raises(ValueError, match='tmin .* must be at most tmax'): + with pytest.raises(ValueError, match="tmin .* must be at most tmax"): ReceptiveField(5, 4, 1).fit(X, y) # scorers for key, val in _SCORERS.items(): - rf = ReceptiveField(tmin, tmax, 1, ['one'], - estimator=0, scoring=key, patterns=True) + rf = ReceptiveField( + tmin, tmax, 1, ["one"], estimator=0, scoring=key, patterns=True + ) rf.fit(X[:, [0]], y) y_pred = rf.predict(X[:, [0]]).T.ravel()[:, np.newaxis] - assert_allclose(val(y[:, np.newaxis], y_pred, - multioutput='raw_values'), - rf.score(X[:, [0]], y), rtol=1e-2) - with pytest.raises(ValueError, match='inputs must be shape'): - _SCORERS['corrcoef'](y.ravel(), y_pred, multioutput='raw_values') + assert_allclose( + val(y[:, np.newaxis], y_pred, multioutput="raw_values"), + rf.score(X[:, [0]], y), + rtol=1e-2, + ) + with pytest.raises(ValueError, match="inputs must be shape"): + _SCORERS["corrcoef"](y.ravel(), y_pred, multioutput="raw_values") # Need correct scorers - with pytest.raises(ValueError, match='scoring must be one of'): - ReceptiveField(tmin, tmax, 1., scoring='foo').fit(X, y) + with pytest.raises(ValueError, match="scoring must be one of"): + ReceptiveField(tmin, tmax, 1.0, scoring="foo").fit(X, y) -@pytest.mark.parametrize('n_jobs', n_jobs_test) +@pytest.mark.parametrize("n_jobs", n_jobs_test) def test_time_delaying_fast_calc(n_jobs): """Test time delaying and fast calculations.""" X = np.array([[1, 2, 3], [5, 7, 11]]).T # all negative smin, smax = 1, 2 - X_del = _delay_time_series(X, smin, smax, 1.) + X_del = _delay_time_series(X, smin, smax, 1.0) # (n_times, n_features, n_delays) -> (n_times, n_features * n_delays) X_del.shape = (X.shape[0], -1) expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T @@ -250,30 +281,32 @@ def test_time_delaying_fast_calc(n_jobs): assert_allclose(x_xt, expected) # all positive smin, smax = -2, -1 - X_del = _delay_time_series(X, smin, smax, 1.) + X_del = _delay_time_series(X, smin, smax, 1.0) X_del.shape = (X.shape[0], -1) expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) - expected = [[9, 6, 33, 21], [6, 13, 22, 47], - [33, 22, 121, 77], [21, 47, 77, 170]] + expected = [[9, 6, 33, 21], [6, 13, 22, 47], [33, 22, 121, 77], [21, 47, 77, 170]] assert_allclose(Xt_X, expected) x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] assert_allclose(x_xt, expected) # both sides smin, smax = -1, 1 - X_del = _delay_time_series(X, smin, smax, 1.) + X_del = _delay_time_series(X, smin, smax, 1.0) X_del.shape = (X.shape[0], -1) - expected = np.array([[2, 3, 0], [1, 2, 3], [0, 1, 2], - [7, 11, 0], [5, 7, 11], [0, 5, 7]]).T + expected = np.array( + [[2, 3, 0], [1, 2, 3], [0, 1, 2], [7, 11, 0], [5, 7, 11], [0, 5, 7]] + ).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) - expected = [[13, 8, 3, 47, 31, 15], - [8, 14, 8, 29, 52, 31], - [3, 8, 5, 11, 29, 19], - [47, 29, 11, 170, 112, 55], - [31, 52, 29, 112, 195, 112], - [15, 31, 19, 55, 112, 74]] + expected = [ + [13, 8, 3, 47, 31, 15], + [8, 14, 8, 29, 52, 31], + [3, 8, 5, 11, 29, 19], + [47, 29, 11, 170, 112, 55], + [31, 52, 29, 112, 195, 112], + [15, 31, 19, 55, 112, 74], + ] assert_allclose(Xt_X, expected) x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] assert_allclose(x_xt, expected) @@ -281,10 +314,9 @@ def test_time_delaying_fast_calc(n_jobs): # slightly harder to get the non-Toeplitz correction correct X = np.array([[1, 2, 3, 5]]).T smin, smax = 0, 3 - X_del = _delay_time_series(X, smin, smax, 1.) + X_del = _delay_time_series(X, smin, smax, 1.0) X_del.shape = (X.shape[0], -1) - expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3], - [0, 0, 1, 2], [0, 0, 0, 1]]).T + expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3], [0, 0, 1, 2], [0, 0, 0, 1]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) expected = [[39, 23, 13, 5], [23, 14, 8, 3], [13, 8, 5, 2], [5, 3, 2, 1]] @@ -295,18 +327,23 @@ def test_time_delaying_fast_calc(n_jobs): # even worse X = np.array([[1, 2, 3], [5, 7, 11]]).T smin, smax = 0, 2 - X_del = _delay_time_series(X, smin, smax, 1.) + X_del = _delay_time_series(X, smin, smax, 1.0) X_del.shape = (X.shape[0], -1) - expected = np.array([[1, 2, 3], [0, 1, 2], [0, 0, 1], - [5, 7, 11], [0, 5, 7], [0, 0, 5]]).T + expected = np.array( + [[1, 2, 3], [0, 1, 2], [0, 0, 1], [5, 7, 11], [0, 5, 7], [0, 0, 5]] + ).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) - expected = np.array([[14, 8, 3, 52, 31, 15], - [8, 5, 2, 29, 19, 10], - [3, 2, 1, 11, 7, 5], - [52, 29, 11, 195, 112, 55], - [31, 19, 7, 112, 74, 35], - [15, 10, 5, 55, 35, 25]]) + expected = np.array( + [ + [14, 8, 3, 52, 31, 15], + [8, 5, 2, 29, 19, 10], + [3, 2, 1, 11, 7, 5], + [52, 29, 11, 195, 112, 55], + [31, 19, 7, 112, 74, 35], + [15, 10, 5, 55, 35, 25], + ] + ) assert_allclose(Xt_X, expected) x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0] assert_allclose(x_xt, expected) @@ -323,10 +360,10 @@ def test_time_delaying_fast_calc(n_jobs): for ii in range(X.shape[1]): kernel = rng.randn(smax - smin + 1) kernel -= np.mean(kernel) - y[:, ii % y.shape[-1]] = np.convolve(X[:, ii], kernel, 'same') + y[:, ii % y.shape[-1]] = np.convolve(X[:, ii], kernel, "same") x_xt, x_yt, n_ch_x, _, _ = _compute_corrs(X, y, smin, smax + 1) - X_del = _delay_time_series(X, smin, smax, 1., fill_mean=False) - x_yt_true = einsum('tfd,to->ofd', X_del, y) + X_del = _delay_time_series(X, smin, smax, 1.0, fill_mean=False) + x_yt_true = einsum("tfd,to->ofd", X_del, y) x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax)) X_del.shape = (X.shape[0], -1) @@ -334,11 +371,9 @@ def test_time_delaying_fast_calc(n_jobs): assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax)) -@pytest.mark.parametrize('n_jobs', n_jobs_test) -@requires_sklearn +@pytest.mark.parametrize("n_jobs", n_jobs_test) def test_receptive_field_1d(n_jobs): """Test that the fast solving works like Ridge.""" - from sklearn.linear_model import Ridge rng = np.random.RandomState(0) x = rng.randn(500, 1) for delay in range(-2, 3): @@ -356,22 +391,26 @@ def test_receptive_field_1d(n_jobs): y.shape = (y.shape[0],) + (1,) * (ndim - 1) for slim in slims: smin, smax = slim - lap = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', - fit_intercept=False, n_jobs=n_jobs) - for estimator in (Ridge(alpha=0.), Ridge(alpha=0.1), 0., 0.1, - lap): + lap = TimeDelayingRidge( + smin, + smax, + 1.0, + 0.1, + "laplacian", + fit_intercept=False, + n_jobs=n_jobs, + ) + for estimator in (Ridge(alpha=0.0), Ridge(alpha=0.1), 0.0, 0.1, lap): for offset in (-100, 0, 100): - model = ReceptiveField(smin, smax, 1., - estimator=estimator, - n_jobs=n_jobs) + model = ReceptiveField( + smin, smax, 1.0, estimator=estimator, n_jobs=n_jobs + ) use_x = x + offset model.fit(use_x, y) if estimator is lap: continue # these checks are too stringent - assert_allclose(model.estimator_.intercept_, -offset, - atol=1e-1) - assert_array_equal(model.delays_, - np.arange(smin, smax + 1)) + assert_allclose(model.estimator_.intercept_, -offset, atol=1e-1) + assert_array_equal(model.delays_, np.arange(smin, smax + 1)) expected = (model.delays_ == delay).astype(float) expected = expected[np.newaxis] # features if y.ndim == 2: @@ -383,16 +422,16 @@ def test_receptive_field_1d(n_jobs): assert stop - start >= 495 assert_allclose( model.predict(use_x)[model.valid_samples_], - y[model.valid_samples_], atol=1e-2) + y[model.valid_samples_], + atol=1e-2, + ) score = np.mean(model.score(use_x, y)) assert score > 0.9999 -@pytest.mark.parametrize('n_jobs', n_jobs_test) -@requires_sklearn +@pytest.mark.parametrize("n_jobs", n_jobs_test) def test_receptive_field_nd(n_jobs): """Test multidimensional support.""" - from sklearn.linear_model import Ridge # multidimensional rng = np.random.RandomState(3) x = rng.randn(1000, 3) @@ -407,55 +446,57 @@ def test_receptive_field_nd(n_jobs): x -= np.mean(x, axis=0) x_off = x + 1e3 expected = [ - [[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 4, 0], - [0, 0, 2, 0, 0, 0]], - [[0, 0, 0, -3, 0, 0], - [0, -1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]], + [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 4, 0], [0, 0, 2, 0, 0, 0]], + [[0, 0, 0, -3, 0, 0], [0, -1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], ] - tdr_l = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', n_jobs=n_jobs) - tdr_nc = TimeDelayingRidge(smin, smax, 1., 0.1, n_jobs=n_jobs, - edge_correction=False) - for estimator, atol in zip((Ridge(alpha=0.), 0., 0.01, tdr_l, tdr_nc), - (1e-3, 1e-3, 1e-3, 5e-3, 5e-2)): - model = ReceptiveField(smin, smax, 1., - estimator=estimator) + tdr_l = TimeDelayingRidge(smin, smax, 1.0, 0.1, "laplacian", n_jobs=n_jobs) + tdr_nc = TimeDelayingRidge( + smin, smax, 1.0, 0.1, n_jobs=n_jobs, edge_correction=False + ) + for estimator, atol in zip( + (Ridge(alpha=0.0), 0.0, 0.01, tdr_l, tdr_nc), (1e-3, 1e-3, 1e-3, 5e-3, 5e-2) + ): + model = ReceptiveField(smin, smax, 1.0, estimator=estimator) model.fit(x, y) - assert_array_equal(model.delays_, - np.arange(smin, smax + 1)) + assert_array_equal(model.delays_, np.arange(smin, smax + 1)) assert_allclose(model.coef_, expected, atol=atol) - tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type='foo', - n_jobs=n_jobs) - model = ReceptiveField(smin, smax, 1., estimator=tdr) - with pytest.raises(ValueError, match='reg_type entries must be one of'): + tdr = TimeDelayingRidge(smin, smax, 1.0, 0.01, reg_type="foo", n_jobs=n_jobs) + model = ReceptiveField(smin, smax, 1.0, estimator=tdr) + with pytest.raises(ValueError, match="reg_type entries must be one of"): model.fit(x, y) - tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type=['laplacian'], - n_jobs=n_jobs) - model = ReceptiveField(smin, smax, 1., estimator=tdr) - with pytest.raises(ValueError, match='reg_type must have two elements'): + tdr = TimeDelayingRidge( + smin, smax, 1.0, 0.01, reg_type=["laplacian"], n_jobs=n_jobs + ) + model = ReceptiveField(smin, smax, 1.0, estimator=tdr) + with pytest.raises(ValueError, match="reg_type must have two elements"): model.fit(x, y) model = ReceptiveField(smin, smax, 1, estimator=tdr, fit_intercept=False) - with pytest.raises(ValueError, match='fit_intercept'): + with pytest.raises(ValueError, match="fit_intercept"): model.fit(x, y) # Now check the intercept_ - tdr = TimeDelayingRidge(smin, smax, 1., 0., n_jobs=n_jobs) - tdr_no = TimeDelayingRidge(smin, smax, 1., 0., fit_intercept=False, - n_jobs=n_jobs) - for estimator in (Ridge(alpha=0.), tdr, - Ridge(alpha=0., fit_intercept=False), tdr_no): + tdr = TimeDelayingRidge(smin, smax, 1.0, 0.0, n_jobs=n_jobs) + tdr_no = TimeDelayingRidge(smin, smax, 1.0, 0.0, fit_intercept=False, n_jobs=n_jobs) + for estimator in ( + Ridge(alpha=0.0), + tdr, + Ridge(alpha=0.0, fit_intercept=False), + tdr_no, + ): # first with no intercept in the data - model = ReceptiveField(smin, smax, 1., estimator=estimator) + model = ReceptiveField(smin, smax, 1.0, estimator=estimator) model.fit(x, y) - assert_allclose(model.estimator_.intercept_, 0., atol=1e-7, - err_msg=repr(estimator)) - assert_allclose(model.coef_, expected, atol=1e-3, - err_msg=repr(estimator)) + assert_allclose( + model.estimator_.intercept_, 0.0, atol=1e-7, err_msg=repr(estimator) + ) + assert_allclose(model.coef_, expected, atol=1e-3, err_msg=repr(estimator)) y_pred = model.predict(x) - assert_allclose(y_pred[model.valid_samples_], - y[model.valid_samples_], - atol=1e-2, err_msg=repr(estimator)) + assert_allclose( + y_pred[model.valid_samples_], + y[model.valid_samples_], + atol=1e-2, + err_msg=repr(estimator), + ) score = np.mean(model.score(x, y)) assert score > 0.9999 @@ -466,12 +507,14 @@ def test_receptive_field_nd(n_jobs): itol = 0.5 ctol = 5e-4 else: - val = itol = 0. - ctol = 2. - assert_allclose(model.estimator_.intercept_, val, atol=itol, - err_msg=repr(estimator)) - assert_allclose(model.coef_, expected, atol=ctol, rtol=ctol, - err_msg=repr(estimator)) + val = itol = 0.0 + ctol = 2.0 + assert_allclose( + model.estimator_.intercept_, val, atol=itol, err_msg=repr(estimator) + ) + assert_allclose( + model.coef_, expected, atol=ctol, rtol=ctol, err_msg=repr(estimator) + ) if estimator.fit_intercept: ptol = 1e-2 stol = 0.999999 @@ -479,13 +522,14 @@ def test_receptive_field_nd(n_jobs): ptol = 10 stol = 0.6 y_pred = model.predict(x_off)[model.valid_samples_] - assert_allclose(y_pred, y[model.valid_samples_], - atol=ptol, err_msg=repr(estimator)) + assert_allclose( + y_pred, y[model.valid_samples_], atol=ptol, err_msg=repr(estimator) + ) score = np.mean(model.score(x_off, y)) assert score > stol, estimator - model = ReceptiveField(smin, smax, 1., fit_intercept=False) + model = ReceptiveField(smin, smax, 1.0, fit_intercept=False) model.fit(x_off, y) - assert_allclose(model.estimator_.intercept_, 0., atol=1e-7) + assert_allclose(model.estimator_.intercept_, 0.0, atol=1e-7) score = np.mean(model.score(x_off, y)) assert score > 0.6 @@ -496,35 +540,33 @@ def _make_data(n_feats, n_targets, n_samples, tmin, tmax): w = rng.randn(int((tmax - tmin) + 1) * n_feats, n_targets) # Delay inputs X_del = np.concatenate( - _delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1) + _delay_time_series(X, tmin, tmax, 1.0).transpose(2, 0, 1), axis=1 + ) y = np.dot(X_del, w) return X, y -@requires_sklearn def test_inverse_coef(): """Test inverse coefficients computation.""" - from sklearn.linear_model import Ridge - - tmin, tmax = 0., 10. + tmin, tmax = 0.0, 10.0 n_feats, n_targets, n_samples = 3, 2, 1000 n_delays = int((tmax - tmin) + 1) # Check coefficient dims, for all estimator types X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax) - tdr = TimeDelayingRidge(tmin, tmax, 1., 0.1, 'laplacian') - for estimator in (0., 0.01, Ridge(alpha=0.), tdr): - rf = ReceptiveField(tmin, tmax, 1., estimator=estimator, - patterns=True) + tdr = TimeDelayingRidge(tmin, tmax, 1.0, 0.1, "laplacian") + for estimator in (0.0, 0.01, Ridge(alpha=0.0), tdr): + rf = ReceptiveField(tmin, tmax, 1.0, estimator=estimator, patterns=True) rf.fit(X, y) - inv_rf = ReceptiveField(tmin, tmax, 1., estimator=estimator, - patterns=True) + inv_rf = ReceptiveField(tmin, tmax, 1.0, estimator=estimator, patterns=True) inv_rf.fit(y, X) - assert_array_equal(rf.coef_.shape, rf.patterns_.shape, - (n_targets, n_feats, n_delays)) - assert_array_equal(inv_rf.coef_.shape, inv_rf.patterns_.shape, - (n_feats, n_targets, n_delays)) + assert_array_equal( + rf.coef_.shape, rf.patterns_.shape, (n_targets, n_feats, n_delays) + ) + assert_array_equal( + inv_rf.coef_.shape, inv_rf.patterns_.shape, (n_feats, n_targets, n_delays) + ) # we should have np.dot(patterns.T,coef) ~ np.eye(n) c0 = rf.coef_.reshape(n_targets, n_feats * n_delays) @@ -532,14 +574,44 @@ def test_inverse_coef(): assert_allclose(np.dot(c0, c1.T), np.eye(c0.shape[0]), atol=0.2) -@requires_sklearn def test_linalg_warning(): """Test that warnings are issued when no regularization is applied.""" - from sklearn.linear_model import Ridge n_feats, n_targets, n_samples = 5, 60, 50 X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax) - for estimator in (0., Ridge(alpha=0.)): - rf = ReceptiveField(tmin, tmax, 1., estimator=estimator) - with pytest.warns((RuntimeWarning, UserWarning), - match='[Singular|scipy.linalg.solve]'): + for estimator in (0.0, Ridge(alpha=0.0)): + rf = ReceptiveField(tmin, tmax, 1.0, estimator=estimator) + with pytest.warns( + (RuntimeWarning, UserWarning), match="[Singular|scipy.linalg.solve]" + ): rf.fit(y, X) + + +@parametrize_with_checks([TimeDelayingRidge(0, 10, 1.0, 0.1, "laplacian", n_jobs=1)]) +def test_tdr_sklearn_compliance(estimator, check): + """Test sklearn estimator compliance.""" + pytest.importorskip("sklearn", minversion="1.4") # TODO VERSION remove on 1.4+ + ignores = ( + # TDR convolves and thus its output cannot be invariant when + # shuffled or subsampled. + "check_methods_sample_order_invariance", + "check_methods_subset_invariance", + ) + if any(ignore in str(check) for ignore in ignores): + return + check(estimator) + + +@pytest.mark.filterwarnings("ignore:.*invalid value encountered in subtract.*:") +@parametrize_with_checks([ReceptiveField(-1, 2, 1.0, estimator=Ridge(), patterns=True)]) +def test_rf_sklearn_compliance(estimator, check): + """Test sklearn RF compliance.""" + pytest.importorskip("sklearn", minversion="1.4") # TODO VERSION remove on 1.4+ + ignores = ( + # RF does time-lagging, so its output cannot be invariant when + # shuffled or subsampled. + "check_methods_sample_order_invariance", + "check_methods_subset_invariance", + ) + if any(ignore in str(check) for ignore in ignores): + return + check(estimator) diff --git a/mne/decoding/tests/test_search_light.py b/mne/decoding/tests/test_search_light.py index 1bc4f1e1e9a..56d239c1bcc 100644 --- a/mne/decoding/tests/test_search_light.py +++ b/mne/decoding/tests/test_search_light.py @@ -1,16 +1,32 @@ -# Author: Jean-Remi King, -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. +import platform from inspect import signature import numpy as np -from numpy.testing import assert_array_equal, assert_equal import pytest +from numpy.testing import assert_array_equal, assert_equal + +sklearn = pytest.importorskip("sklearn") -from mne.utils import requires_sklearn, _record_warnings, use_log_level -from mne.decoding.search_light import SlidingEstimator, GeneralizingEstimator +from sklearn.base import BaseEstimator, clone, is_classifier +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.ensemble import BaggingClassifier +from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge +from sklearn.metrics import make_scorer, roc_auc_score +from sklearn.model_selection import cross_val_predict +from sklearn.multiclass import OneVsRestClassifier +from sklearn.pipeline import make_pipeline +from sklearn.svm import SVC +from sklearn.utils.estimator_checks import parametrize_with_checks + +from mne.decoding.search_light import GeneralizingEstimator, SlidingEstimator from mne.decoding.transformer import Vectorizer +from mne.utils import check_version, use_log_level + +NEW_MULTICLASS_SAMPLE_WEIGHT = check_version("sklearn", "1.4") def make_data(): @@ -25,70 +41,75 @@ def make_data(): return X, y -@requires_sklearn -def test_search_light(): +def test_search_light_basic(): """Test SlidingEstimator.""" - from sklearn.linear_model import Ridge, LogisticRegression - from sklearn.pipeline import make_pipeline - from sklearn.metrics import roc_auc_score, make_scorer - with _record_warnings(): # NumPy module import - from sklearn.ensemble import BaggingClassifier - from sklearn.base import is_classifier + # https://github.com/scikit-learn/scikit-learn/issues/27711 + if platform.system() == "Windows" and check_version("numpy", "2.0.0.dev0"): + pytest.skip("sklearn int_t / long long mismatch") - logreg = LogisticRegression(solver='liblinear', multi_class='ovr', - random_state=0) + logreg = OneVsRestClassifier(LogisticRegression(solver="liblinear", random_state=0)) X, y = make_data() n_epochs, _, n_time = X.shape # init - pytest.raises(ValueError, SlidingEstimator, 'foo') + sl = SlidingEstimator("foo") + with pytest.raises(ValueError, match="must be"): + sl.fit(X, y) sl = SlidingEstimator(Ridge()) - assert (not is_classifier(sl)) - sl = SlidingEstimator(LogisticRegression(solver='liblinear')) - assert (is_classifier(sl)) + assert not is_classifier(sl) + sl = SlidingEstimator(LogisticRegression(solver="liblinear")) + assert is_classifier(sl.base_estimator) + assert is_classifier(sl) # fit - assert_equal(sl.__repr__()[:18], '') + assert_equal(sl.__repr__()[-28:], ", fitted with 10 estimators>") pytest.raises(ValueError, sl.fit, X[1:], y) pytest.raises(ValueError, sl.fit, X[:, :, 0], y) sl.fit(X, y, sample_weight=np.ones_like(y)) # transforms pytest.raises(ValueError, sl.predict, X[:, :, :2]) + y_trans = sl.transform(X) + assert X.dtype == float + assert y_trans.dtype == float y_pred = sl.predict(X) - assert (y_pred.dtype == int) + assert y_pred.dtype == np.dtype(int) assert_array_equal(y_pred.shape, [n_epochs, n_time]) y_proba = sl.predict_proba(X) - assert (y_proba.dtype == float) + assert y_proba.dtype == np.dtype(float) assert_array_equal(y_proba.shape, [n_epochs, n_time, 2]) # score score = sl.score(X, y) assert_array_equal(score.shape, [n_time]) - assert (np.sum(np.abs(score)) != 0) - assert (score.dtype == float) + assert np.sum(np.abs(score)) != 0 + assert score.dtype == np.dtype(float) sl = SlidingEstimator(logreg) assert_equal(sl.scoring, None) # Scoring method - for scoring in ['foo', 999]: + for scoring in ["foo", 999]: sl = SlidingEstimator(logreg, scoring=scoring) sl.fit(X, y) pytest.raises((ValueError, TypeError), sl.score, X, y) # Check sklearn's roc_auc fix: scikit-learn/scikit-learn#6874 # -- 3 class problem - sl = SlidingEstimator(logreg, scoring='roc_auc') + sl = SlidingEstimator(logreg, scoring="roc_auc") y = np.arange(len(X)) % 3 sl.fit(X, y) - with pytest.raises(ValueError, match='for two-class'): + with pytest.raises(ValueError, match="for two-class"): sl.score(X, y) # But check that valid ones should work with new enough sklearn - if 'multi_class' in signature(roc_auc_score).parameters: - scoring = make_scorer( - roc_auc_score, needs_proba=True, multi_class='ovo') + kwargs = dict() + if check_version("sklearn", "1.4"): + kwargs["response_method"] = "predict_proba" + else: + kwargs["needs_proba"] = True + if "multi_class" in signature(roc_auc_score).parameters: + scoring = make_scorer(roc_auc_score, multi_class="ovo", **kwargs) sl = SlidingEstimator(logreg, scoring=scoring) sl.fit(X, y) sl.score(X, y) # smoke test @@ -97,8 +118,10 @@ def test_search_light(): y = np.arange(len(X)) % 2 + 1 sl.fit(X, y) score = sl.score(X, y) - assert_array_equal(score, [roc_auc_score(y - 1, _y_pred - 1) - for _y_pred in sl.decision_function(X).T]) + assert_array_equal( + score, + [roc_auc_score(y - 1, _y_pred - 1) for _y_pred in sl.decision_function(X).T], + ) y = np.arange(len(X)) % 2 # Cannot pass a metric as a scoring parameter @@ -107,22 +130,28 @@ def test_search_light(): pytest.raises(ValueError, sl1.score, X, y) # Now use string as scoring - sl1 = SlidingEstimator(logreg, scoring='roc_auc') + sl1 = SlidingEstimator(logreg, scoring="roc_auc") sl1.fit(X, y) rng = np.random.RandomState(0) X = rng.randn(*X.shape) # randomize X to avoid AUCs in [0, 1] score_sl = sl1.score(X, y) assert_array_equal(score_sl.shape, [n_time]) - assert (score_sl.dtype == float) + assert score_sl.dtype == np.dtype(float) # Check that scoring was applied adequately - scoring = make_scorer(roc_auc_score, needs_threshold=True) - score_manual = [scoring(est, x, y) for est, x in zip( - sl1.estimators_, X.transpose(2, 0, 1))] + kwargs = dict() + if check_version("sklearn", "1.4"): + kwargs["response_method"] = ("decision_function", "predict_proba") + else: + kwargs["needs_threshold"] = True + scoring = make_scorer(roc_auc_score, **kwargs) + score_manual = [ + scoring(est, x, y) for est, x in zip(sl1.estimators_, X.transpose(2, 0, 1)) + ] assert_array_equal(score_manual, score_sl) # n_jobs - sl = SlidingEstimator(logreg, n_jobs=None, scoring='roc_auc') + sl = SlidingEstimator(logreg, n_jobs=None, scoring="roc_auc") score_1job = sl.fit(X, y).score(X, y) sl.n_jobs = 2 score_njobs = sl.fit(X, y).score(X, y) @@ -136,13 +165,10 @@ def test_search_light(): # pipeline class _LogRegTransformer(LogisticRegression): def transform(self, X): - return super(_LogRegTransformer, self).predict_proba(X)[..., 1] + return super().predict_proba(X)[..., 1] - logreg_transformer = _LogRegTransformer( - random_state=0, multi_class='ovr', solver='liblinear' - ) - pipe = make_pipeline(SlidingEstimator(logreg_transformer), - logreg) + logreg_transformer = OneVsRestClassifier(_LogRegTransformer(random_state=0)) + pipe = make_pipeline(SlidingEstimator(logreg_transformer), logreg) pipe.fit(X, y) pipe.predict(X) @@ -151,8 +177,7 @@ def transform(self, X): y = np.arange(10) % 2 y_preds = list() for n_jobs in [1, 2]: - pipe = SlidingEstimator( - make_pipeline(Vectorizer(), logreg), n_jobs=n_jobs) + pipe = SlidingEstimator(make_pipeline(Vectorizer(), logreg), n_jobs=n_jobs) y_preds.append(pipe.fit(X, y).predict(X)) features_shape = pipe.estimators_[0].steps[0][1].features_shape_ assert_array_equal(features_shape, [3, 4]) @@ -164,34 +189,47 @@ def transform(self, X): pipe = SlidingEstimator(BaggingClassifier(None, 2), n_jobs=n_jobs) pipe.fit(X, y) pipe.score(X, y) - assert (isinstance(pipe.estimators_[0], BaggingClassifier)) + assert isinstance(pipe.estimators_[0], BaggingClassifier) -@requires_sklearn -def test_generalization_light(): - """Test GeneralizingEstimator.""" - from sklearn.pipeline import make_pipeline - from sklearn.linear_model import LogisticRegression - from sklearn.metrics import roc_auc_score +@pytest.fixture() +def metadata_routing(): + """Temporarily enable metadata routing for new sklearn.""" + if NEW_MULTICLASS_SAMPLE_WEIGHT: + sklearn.set_config(enable_metadata_routing=True) + yield + if NEW_MULTICLASS_SAMPLE_WEIGHT: + sklearn.set_config(enable_metadata_routing=False) - logreg = LogisticRegression(solver='liblinear', multi_class='ovr', - random_state=0) + +def test_generalization_light(metadata_routing): + """Test GeneralizingEstimator.""" + if NEW_MULTICLASS_SAMPLE_WEIGHT: + clf = LogisticRegression(random_state=0) + clf.set_fit_request(sample_weight=True) + logreg = OneVsRestClassifier(clf) + else: + logreg = LogisticRegression( + solver="liblinear", + random_state=0, + multi_class="ovr", + ) X, y = make_data() n_epochs, _, n_time = X.shape # fit gl = GeneralizingEstimator(logreg) - assert_equal(repr(gl)[:23], '') + assert_equal(gl.__repr__()[-28:], ", fitted with 10 estimators>") # transforms y_pred = gl.predict(X) assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time]) - assert (y_pred.dtype == int) + assert y_pred.dtype == np.dtype(int) y_proba = gl.predict_proba(X) - assert (y_proba.dtype == float) + assert y_proba.dtype == np.dtype(float) assert_array_equal(y_proba.shape, [n_epochs, n_time, n_time, 2]) # transform to different datasize @@ -201,23 +239,23 @@ def test_generalization_light(): # score score = gl.score(X[:, :, :3], y) assert_array_equal(score.shape, [n_time, 3]) - assert (np.sum(np.abs(score)) != 0) - assert (score.dtype == float) + assert np.sum(np.abs(score)) != 0 + assert score.dtype == np.dtype(float) - gl = GeneralizingEstimator(logreg, scoring='roc_auc') + gl = GeneralizingEstimator(logreg, scoring="roc_auc") gl.fit(X, y) score = gl.score(X, y) auc = roc_auc_score(y, gl.estimators_[0].predict_proba(X[..., 0])[..., 1]) assert_equal(score[0, 0], auc) - for scoring in ['foo', 999]: + for scoring in ["foo", 999]: gl = GeneralizingEstimator(logreg, scoring=scoring) gl.fit(X, y) pytest.raises((ValueError, TypeError), gl.score, X, y) # Check sklearn's roc_auc fix: scikit-learn/scikit-learn#6874 # -- 3 class problem - gl = GeneralizingEstimator(logreg, scoring='roc_auc') + gl = GeneralizingEstimator(logreg, scoring="roc_auc") y = np.arange(len(X)) % 3 gl.fit(X, y) pytest.raises(ValueError, gl.score, X, y) @@ -225,8 +263,10 @@ def test_generalization_light(): y = np.arange(len(X)) % 2 + 1 gl.fit(X, y) score = gl.score(X, y) - manual_score = [[roc_auc_score(y - 1, _y_pred) for _y_pred in _y_preds] - for _y_preds in gl.decision_function(X).transpose(1, 2, 0)] + manual_score = [ + [roc_auc_score(y - 1, _y_pred) for _y_pred in _y_preds] + for _y_preds in gl.decision_function(X).transpose(1, 2, 0) + ] assert_array_equal(score, manual_score) # n_jobs @@ -246,47 +286,38 @@ def test_generalization_light(): y = np.arange(10) % 2 y_preds = list() for n_jobs in [1, 2]: - pipe = GeneralizingEstimator( - make_pipeline(Vectorizer(), logreg), n_jobs=n_jobs) + pipe = GeneralizingEstimator(make_pipeline(Vectorizer(), logreg), n_jobs=n_jobs) y_preds.append(pipe.fit(X, y).predict(X)) features_shape = pipe.estimators_[0].steps[0][1].features_shape_ assert_array_equal(features_shape, [3, 4]) assert_array_equal(y_preds[0], y_preds[1]) -@requires_sklearn -@pytest.mark.parametrize('n_jobs, verbose', - [(1, False), (2, False), (1, True), (2, 'info')]) +@pytest.mark.parametrize( + "n_jobs, verbose", [(1, False), (2, False), (1, True), (2, "info")] +) def test_verbose_arg(capsys, n_jobs, verbose): """Test controlling output with the ``verbose`` argument.""" - from sklearn.svm import SVC - X, y = make_data() clf = SVC() # shows progress bar and prints other messages to the console with use_log_level(True): for estimator_object in [SlidingEstimator, GeneralizingEstimator]: - estimator = estimator_object( - clf, n_jobs=n_jobs, verbose=verbose) + estimator = estimator_object(clf, n_jobs=n_jobs, verbose=verbose) estimator = estimator.fit(X, y) estimator.score(X, y) estimator.predict(X) stdout, stderr = capsys.readouterr() if isinstance(verbose, bool) and not verbose: - assert all(channel == '' for channel in (stdout, stderr)) + assert all(channel == "" for channel in (stdout, stderr)) else: assert any(len(channel) > 0 for channel in (stdout, stderr)) -@requires_sklearn def test_cross_val_predict(): """Test cross_val_predict with predict_proba.""" - from sklearn.linear_model import LinearRegression - from sklearn.discriminant_analysis import LinearDiscriminantAnalysis - from sklearn.base import BaseEstimator, clone - from sklearn.model_selection import cross_val_predict rng = np.random.RandomState(42) X = rng.randn(10, 1, 3) y = rng.randint(0, 2, 10) @@ -309,7 +340,19 @@ def predict_proba(self, X): with pytest.raises(AttributeError, match="classes_ attribute"): estimator = SlidingEstimator(Classifier()) - cross_val_predict(estimator, X, y, method='predict_proba', cv=2) + cross_val_predict(estimator, X, y, method="predict_proba", cv=2) estimator = SlidingEstimator(LinearDiscriminantAnalysis()) - cross_val_predict(estimator, X, y, method='predict_proba', cv=2) + cross_val_predict(estimator, X, y, method="predict_proba", cv=2) + + +@pytest.mark.slowtest +@parametrize_with_checks( + [ + SlidingEstimator(LogisticRegression(), allow_2d=True), + GeneralizingEstimator(LogisticRegression(), allow_2d=True), + ] +) +def test_sklearn_compliance(estimator, check): + """Test searchlights compliance with sklearn.""" + check(estimator) diff --git a/mne/decoding/tests/test_spatial_filter.py b/mne/decoding/tests/test_spatial_filter.py new file mode 100644 index 00000000000..385b73fc053 --- /dev/null +++ b/mne/decoding/tests/test_spatial_filter.py @@ -0,0 +1,190 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +pytest.importorskip("sklearn") + +from sklearn.linear_model import LinearRegression +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler + +from mne import Epochs, create_info, io, pick_types, read_events +from mne.decoding import ( + CSP, + LinearModel, + SpatialFilter, + Vectorizer, + XdawnTransformer, + get_spatial_filter_from_estimator, +) + +data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" +raw_fname = data_dir / "test_raw.fif" +event_name = data_dir / "test-eve.fif" +tmin, tmax = -0.1, 0.2 +event_id = dict(aud_l=1, vis_l=3) +start, stop = 0, 8 + + +def _get_X_y(event_id, return_info=False): + raw = io.read_raw(raw_fname, preload=False) + events = read_events(event_name) + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) + picks = picks[2:12:3] # subselect channels -> disable proj! + raw.add_proj([], remove_existing=True) + epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + baseline=(None, 0), + preload=True, + proj=False, + ) + X = epochs.get_data(copy=False, units=dict(eeg="uV", grad="fT/cm", mag="fT")) + y = epochs.events[:, -1] + if return_info: + return X, y, epochs.info + return X, y + + +def test_spatial_filter_init(): + """Test the initialization of the SpatialFilter class.""" + # Test initialization and factory function + rng = np.random.RandomState(0) + n, n_features = 20, 3 + X = rng.rand(n, n_features) + n_targets = 5 + y = rng.rand(n, n_targets) + clf = LinearModel(LinearRegression()) + clf.fit(X, y) + + # test get_spatial_filter_from_estimator for LinearModel + info = create_info(n_features, 1000.0, "eeg") + sp_filter = get_spatial_filter_from_estimator(clf, info) + assert sp_filter.patterns_method == "haufe" + assert_array_equal(sp_filter.filters, clf.filters_) + assert_array_equal(sp_filter.patterns, clf.patterns_) + assert sp_filter.evals is None + + with pytest.raises(ValueError, match="can only include"): + _ = get_spatial_filter_from_estimator( + clf, info, get_coefs=("foo", "foo", "foo") + ) + + event_id = dict(aud_l=1, vis_l=3) + X, y, info = _get_X_y(event_id, return_info=True) + estimator = make_pipeline(Vectorizer(), StandardScaler(), CSP(n_components=4)) + estimator.fit(X, y) + csp = estimator[-1] + # test get_spatial_filter_from_estimator for GED + sp_filter = get_spatial_filter_from_estimator(estimator, info, step_name="csp") + assert sp_filter.patterns_method == "pinv" + assert_array_equal(sp_filter.filters, csp.filters_) + assert_array_equal(sp_filter.patterns, csp.patterns_) + assert_array_equal(sp_filter.evals, csp.evals_) + assert sp_filter.info is info + + # test without step_name + sp_filter = get_spatial_filter_from_estimator(estimator, info) + assert_array_equal(sp_filter.filters, csp.filters_) + assert_array_equal(sp_filter.patterns, csp.patterns_) + assert_array_equal(sp_filter.evals, csp.evals_) + + # test basic initialization + sp_filter = SpatialFilter( + info, filters=csp.filters_, patterns=csp.patterns_, evals=csp.evals_ + ) + assert_array_equal(sp_filter.filters, csp.filters_) + assert_array_equal(sp_filter.patterns, csp.patterns_) + assert_array_equal(sp_filter.evals, csp.evals_) + assert sp_filter.info is info + + # test automatic pattern calculation via pinv + sp_filter_pinv = SpatialFilter(info, filters=csp.filters_, evals=csp.evals_) + patterns_pinv = np.linalg.pinv(csp.filters_.T) + assert_array_equal(sp_filter_pinv.patterns, patterns_pinv) + assert sp_filter_pinv.patterns_method == "pinv" + + # test shape mismatch error + with pytest.raises(ValueError, match="Shape mismatch"): + SpatialFilter(info, filters=csp.filters_, patterns=csp.patterns_[:-1]) + + # test invalid patterns_method + with pytest.raises(ValueError, match="patterns_method"): + SpatialFilter(info, filters=csp.filters_, patterns_method="foo") + + # test n_components > n_channels error + bad_filters = np.random.randn(31, 30) # 31 components, 30 channels + with pytest.raises(ValueError, match="Number of components can't be greater"): + SpatialFilter(info, filters=bad_filters) + + +def test_spatial_filter_plotting(): + """Test the plotting methods of SpatialFilter.""" + event_id = dict(aud_l=1, vis_l=3) + X, y, info = _get_X_y(event_id, return_info=True) + csp = CSP(n_components=4) + csp.fit(X, y) + + sp_filter = get_spatial_filter_from_estimator(csp, info) + + # test plot_filters + fig_filters = sp_filter.plot_filters(components=[0, 1], show=False) + assert isinstance(fig_filters, plt.Figure) + plt.close("all") + + # test plot_patterns + fig_patterns = sp_filter.plot_patterns(show=False) + assert isinstance(fig_patterns, plt.Figure) + plt.close("all") + + # test plot_scree + fig_scree = sp_filter.plot_scree(show=False, add_cumul_evals=True) + assert isinstance(fig_scree, plt.Figure) + plt.close("all") + _, axes = plt.subplots(figsize=(12, 7), layout="constrained") + fig_scree = sp_filter.plot_scree(axes=axes, show=False) + assert fig_scree == list() + plt.close("all") + + # test plot_scree raises error if evals is None + sp_filter_no_evals = SpatialFilter(info, filters=csp.filters_, evals=None) + with pytest.raises(AttributeError, match="eigenvalues are not provided"): + sp_filter_no_evals.plot_scree() + + # 3D case ('multi' GED decomposition) + n_classes = 2 + event_id = dict(aud_l=1, vis_l=3) + X, y, info = _get_X_y(event_id, return_info=True) + xdawn = XdawnTransformer(n_components=4) + xdawn.fit(X, y) + sp_filter = get_spatial_filter_from_estimator(xdawn, info) + + fig_patterns = sp_filter.plot_patterns(show=False) + assert len(fig_patterns) == n_classes + plt.close("all") + + fig_scree = sp_filter.plot_scree(show=False) + assert len(fig_scree) == n_classes + plt.close("all") + + with pytest.raises(ValueError, match="but expected"): + _, axes = plt.subplots(figsize=(12, 7), layout="constrained") + _ = sp_filter.plot_scree(axes=axes, show=False) + + _, axes = plt.subplots(n_classes, figsize=(12, 7), layout="constrained") + fig_scree = sp_filter.plot_scree(axes=axes, show=False) + assert fig_scree == list() + plt.close("all") diff --git a/mne/decoding/tests/test_ssd.py b/mne/decoding/tests/test_ssd.py index 8ba7657b660..236e65b82fd 100644 --- a/mne/decoding/tests/test_ssd.py +++ b/mne/decoding/tests/test_ssd.py @@ -1,26 +1,48 @@ -# Author: Denis A. Engemann -# Victoria Peterson -# Thomas S. Binns +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys +from pathlib import Path import numpy as np import pytest -from numpy.testing import (assert_array_almost_equal, assert_array_equal) -from mne import io -from mne.time_frequency import psd_array_welch +from numpy.testing import assert_array_almost_equal, assert_array_equal + +pytest.importorskip("sklearn") + +from sklearn.pipeline import Pipeline +from sklearn.utils.estimator_checks import parametrize_with_checks + +from mne import Epochs, create_info, io, pick_types, read_events +from mne._fiff.pick import _picks_to_idx +from mne.decoding import CSP +from mne.decoding._mod_ged import _get_spectral_ratio from mne.decoding.ssd import SSD -from mne.utils import requires_sklearn from mne.filter import filter_data -from mne import create_info -from mne.decoding import CSP +from mne.time_frequency import psd_array_welch freqs_sig = 9, 12 freqs_noise = 8, 13 - -def simulate_data(freqs_sig=[9, 12], n_trials=100, n_channels=20, - n_samples=500, samples_per_second=250, - n_components=5, SNR=0.05, random_state=42): +data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" +raw_fname = data_dir / "test_raw.fif" +event_name = data_dir / "test-eve.fif" +tmin, tmax = -0.1, 0.2 +event_id = dict(aud_l=1, vis_l=3) +start, stop = 0, 8 + + +def simulate_data( + freqs_sig=(9, 12), + n_trials=100, + n_channels=20, + n_samples=500, + samples_per_second=250, + n_components=5, + SNR=0.05, + random_state=42, +): """Simulate data according to an instantaneous mixin model. Data are simulated in the statistical source space, where n=n_components @@ -28,9 +50,13 @@ def simulate_data(freqs_sig=[9, 12], n_trials=100, n_channels=20, """ rng = np.random.RandomState(random_state) - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1, - fir_design='firwin') + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + fir_design="firwin", + ) # generate an orthogonal mixin matrix mixing_mat = np.linalg.svd(rng.randn(n_channels, n_channels))[0] @@ -44,8 +70,8 @@ def simulate_data(freqs_sig=[9, 12], n_trials=100, n_channels=20, X_s = np.dot(mixing_mat[:, :n_components], S_s.T).T X_n = np.dot(mixing_mat[:, n_components:], S_n.T).T # add noise - X_s = X_s / np.linalg.norm(X_s, 'fro') - X_n = X_n / np.linalg.norm(X_n, 'fro') + X_s = X_s / np.linalg.norm(X_s, "fro") + X_n = X_n / np.linalg.norm(X_n, "fro") X = SNR * X_s + (1 - SNR) * X_n X = X.T S = S.T @@ -58,75 +84,106 @@ def test_ssd(): X, A, S = simulate_data() sf = 250 n_channels = X.shape[0] - info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + info = create_info(ch_names=n_channels, sfreq=sf, ch_types="eeg") n_components_true = 5 # Init - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) ssd = SSD(info, filt_params_signal, filt_params_noise) # freq no int - freq = 'foo' - filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - with pytest.raises(TypeError, match='must be an instance '): - ssd = SSD(info, filt_params_signal, filt_params_noise) + freq = "foo" + filt_params_signal = dict( + l_freq=freq, h_freq=freqs_sig[1], l_trans_bandwidth=1, h_trans_bandwidth=1 + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + ssd = SSD(info, filt_params_signal, filt_params_noise) + with pytest.raises(TypeError, match="must be an instance "): + ssd.fit(X) # Wrongly specified noise band freq = 2 - filt_params_signal = dict(l_freq=freq, h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - with pytest.raises(ValueError, match='Wrongly specified '): - ssd = SSD(info, filt_params_signal, filt_params_noise) + filt_params_signal = dict( + l_freq=freq, h_freq=freqs_sig[1], l_trans_bandwidth=1, h_trans_bandwidth=1 + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + ssd = SSD(info, filt_params_signal, filt_params_noise) + with pytest.raises(ValueError, match="Wrongly specified "): + ssd.fit(X) # filt param no dict filt_params_signal = freqs_sig filt_params_noise = freqs_noise - with pytest.raises(ValueError, match='must be defined'): - ssd = SSD(info, filt_params_signal, filt_params_noise) + ssd = SSD(info, filt_params_signal, filt_params_noise) + with pytest.raises(ValueError, match="must be defined"): + ssd.fit(X) # Data type - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) ssd = SSD(info, filt_params_signal, filt_params_noise) raw = io.RawArray(X, info) - pytest.raises(TypeError, ssd.fit, raw) + with pytest.raises(ValueError): + ssd.fit(raw) # check non-boolean return_filtered - with pytest.raises(ValueError, match='return_filtered'): - ssd = SSD(info, filt_params_signal, filt_params_noise, - return_filtered=0) + ssd = SSD(info, filt_params_signal, filt_params_noise, return_filtered=0) + with pytest.raises(TypeError, match="return_filtered"): + ssd.fit(X) # check non-boolean sort_by_spectral_ratio - with pytest.raises(ValueError, match='sort_by_spectral_ratio'): - ssd = SSD(info, filt_params_signal, filt_params_noise, - sort_by_spectral_ratio=0) + ssd = SSD(info, filt_params_signal, filt_params_noise, sort_by_spectral_ratio=0) + with pytest.raises(TypeError, match="sort_by_spectral_ratio"): + ssd.fit(X) # More than 1 channel type - ch_types = np.reshape([['mag'] * 10, ['eeg'] * 10], n_channels) + ch_types = np.reshape([["mag"] * 10, ["eeg"] * 10], n_channels) info_2 = create_info(ch_names=n_channels, sfreq=sf, ch_types=ch_types) - with pytest.raises(ValueError, match='At this point SSD'): - ssd = SSD(info_2, filt_params_signal, filt_params_noise) + ssd = SSD(info_2, filt_params_signal, filt_params_noise) + with pytest.raises(ValueError, match="At this point SSD"): + ssd.fit(X) # Number of channels - info_3 = create_info(ch_names=n_channels + 1, sfreq=sf, ch_types='eeg') + info_3 = create_info(ch_names=n_channels + 1, sfreq=sf, ch_types="eeg") ssd = SSD(info_3, filt_params_signal, filt_params_noise) - pytest.raises(ValueError, ssd.fit, X) + with pytest.raises(ValueError, match="channels but expected"): + ssd.fit(X) # Fit n_components = 10 - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=n_components) + ssd = SSD(info, filt_params_signal, filt_params_noise, n_components=n_components) # Call transform before fit pytest.raises(AttributeError, ssd.transform, X) @@ -134,30 +191,47 @@ def test_ssd(): # Check outputs ssd.fit(X) - assert (ssd.filters_.shape == (n_channels, n_channels)) - assert (ssd.patterns_.shape == (n_channels, n_channels)) + assert ssd.filters_.shape == (n_channels, n_channels) + assert ssd.patterns_.shape == (n_channels, n_channels) # Transform X_ssd = ssd.fit_transform(X) - assert (X_ssd.shape[0] == n_components) + assert X_ssd.shape[0] == n_components # back and forward - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=None, sort_by_spectral_ratio=False) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=None, + sort_by_spectral_ratio=False, + ) ssd.fit(X) X_denoised = ssd.apply(X) assert_array_almost_equal(X_denoised, X) # denoised by low-rank-factorization - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=n_components, sort_by_spectral_ratio=True) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=n_components, + sort_by_spectral_ratio=True, + ) ssd.fit(X) X_denoised = ssd.apply(X) - assert (np.linalg.matrix_rank(X_denoised) == n_components) + assert np.linalg.matrix_rank(X_denoised) == n_components # Power ratio ordering - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=None, sort_by_spectral_ratio=False) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=None, + sort_by_spectral_ratio=False, + ) ssd.fit(X) - spec_ratio, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X)) + spec_ratio, sorter_spec = _get_spectral_ratio( + ssd.transform(X), ssd.sfreq_, ssd.n_fft_, ssd.freqs_signal_, ssd.freqs_noise_ + ) # since we now that the number of true components is 5, the relative # difference should be low for the first 5 components and then increases index_diff = np.argmax(-np.diff(spec_ratio)) @@ -165,12 +239,25 @@ def test_ssd(): # Check detected peaks # fit ssd n_components = n_components_true - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=n_components, sort_by_spectral_ratio=False) + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=n_components, + sort_by_spectral_ratio=False, + ) ssd.fit(X) out = ssd.transform(X) @@ -197,7 +284,7 @@ def test_ssd_epoched_data(): X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500) sf = 250 n_channels = X.shape[0] - info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + info = create_info(ch_names=n_channels, sfreq=sf, ch_types="eeg") n_components_true = 5 # Build epochs as sliding windows over the continuous raw file @@ -206,10 +293,18 @@ def test_ssd_epoched_data(): X_e = np.reshape(X, (100, 20, 500)) # Fit - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=4, h_trans_bandwidth=4) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=4, h_trans_bandwidth=4) + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=4, + h_trans_bandwidth=4, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=4, + h_trans_bandwidth=4, + ) # ssd on epochs ssd_e = SSD(info, filt_params_signal, filt_params_noise) @@ -219,36 +314,51 @@ def test_ssd_epoched_data(): ssd.fit(X) # Check if the 5 first 5 components are the same for both - _, sorter_spec_e = ssd_e.get_spectral_ratio(ssd_e.transform(X_e)) - _, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X)) - assert_array_equal(sorter_spec_e[:n_components_true], - sorter_spec[:n_components_true]) + _, sorter_spec_e = _get_spectral_ratio( + ssd_e.transform(X_e), + ssd_e.sfreq_, + ssd_e.n_fft_, + ssd_e.freqs_signal_, + ssd_e.freqs_noise_, + ) + _, sorter_spec = _get_spectral_ratio( + ssd.transform(X), ssd.sfreq_, ssd.n_fft_, ssd.freqs_signal_, ssd.freqs_noise_ + ) + assert_array_equal( + sorter_spec_e[:n_components_true], sorter_spec[:n_components_true] + ) -@requires_sklearn def test_ssd_pipeline(): """Test if SSD works in a pipeline.""" - from sklearn.pipeline import Pipeline sf = 250 X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500) X_e = np.reshape(X, (100, 20, 500)) # define bynary random output - y = np.random.randint(2, size=100) - - info = create_info(ch_names=20, sfreq=sf, ch_types='eeg') - - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=4, h_trans_bandwidth=4) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=4, h_trans_bandwidth=4) + y = np.random.RandomState(0).randint(2, size=100) + + info = create_info(ch_names=20, sfreq=sf, ch_types="eeg") + + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=4, + h_trans_bandwidth=4, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=4, + h_trans_bandwidth=4, + ) ssd = SSD(info, filt_params_signal, filt_params_noise) csp = CSP() - pipe = Pipeline([('SSD', ssd), ('CSP', csp)]) + pipe = Pipeline([("SSD", ssd), ("CSP", csp)]) pipe.set_params(SSD__n_components=5) pipe.set_params(CSP__n_components=2) out = pipe.fit_transform(X_e, y) - assert (out.shape == (100, 2)) - assert (pipe.get_params()['SSD__n_components'] == 5) + assert out.shape == (100, 2) + assert pipe.get_params()["SSD__n_components"] == 5 def test_sorting(): @@ -260,32 +370,61 @@ def test_sorting(): Xtr, Xte = X[:80], X[80:] sf = 250 n_channels = Xtr.shape[1] - info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') - - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=4, h_trans_bandwidth=4) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=4, h_trans_bandwidth=4) + info = create_info(ch_names=n_channels, sfreq=sf, ch_types="eeg") + + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=4, + h_trans_bandwidth=4, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=4, + h_trans_bandwidth=4, + ) # check sort_by_spectral_ratio set to False - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=None, sort_by_spectral_ratio=False) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=None, + sort_by_spectral_ratio=False, + ) ssd.fit(Xtr) - _, sorter_tr = ssd.get_spectral_ratio(ssd.transform(Xtr)) - _, sorter_te = ssd.get_spectral_ratio(ssd.transform(Xte)) + _, sorter_tr = _get_spectral_ratio( + ssd.transform(Xtr), ssd.sfreq_, ssd.n_fft_, ssd.freqs_signal_, ssd.freqs_noise_ + ) + _, sorter_te = _get_spectral_ratio( + ssd.transform(Xte), ssd.sfreq_, ssd.n_fft_, ssd.freqs_signal_, ssd.freqs_noise_ + ) assert any(sorter_tr != sorter_te) # check sort_by_spectral_ratio set to True - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=None, sort_by_spectral_ratio=True) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=None, + sort_by_spectral_ratio=True, + ) ssd.fit(Xtr) # check sorters - sorter_in = ssd.sorter_spec - ssd = SSD(info, filt_params_signal, filt_params_noise, - n_components=None, sort_by_spectral_ratio=False) + sorter_in = ssd.sorter_ + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=None, + sort_by_spectral_ratio=False, + ) ssd.fit(Xtr) - _, sorter_out = ssd.get_spectral_ratio(ssd.transform(Xtr)) + _, sorter_out = _get_spectral_ratio( + ssd.transform(Xtr), ssd.sfreq_, ssd.n_fft_, ssd.freqs_signal_, ssd.freqs_noise_ + ) assert all(sorter_in == sorter_out) @@ -297,44 +436,70 @@ def test_return_filtered(): X, _, _ = simulate_data(SNR=0.9, freqs_sig=[4, 13]) sf = 250 n_channels = X.shape[0] - info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') - - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) + info = create_info(ch_names=n_channels, sfreq=sf, ch_types="eeg") + + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) # return filtered to true - ssd = SSD(info, filt_params_signal, filt_params_noise, - sort_by_spectral_ratio=False, return_filtered=True) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + sort_by_spectral_ratio=False, + return_filtered=True, + ) ssd.fit(X) out = ssd.transform(X) psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250) freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1]) - assert (freqs_up == freqs_sig) + assert freqs_up == freqs_sig # return filtered to false - ssd = SSD(info, filt_params_signal, filt_params_noise, - sort_by_spectral_ratio=False, return_filtered=False) + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + sort_by_spectral_ratio=False, + return_filtered=False, + ) ssd.fit(X) out = ssd.transform(X) psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250) freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1]) - assert (freqs_up != freqs_sig) + assert freqs_up != freqs_sig def test_non_full_rank_data(): """Test that the method works with non-full rank data.""" n_channels = 10 X, _, _ = simulate_data(SNR=0.9, freqs_sig=[4, 13], n_channels=n_channels) - info = create_info(ch_names=n_channels, sfreq=250, ch_types='eeg') - - filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) - filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], - l_trans_bandwidth=1, h_trans_bandwidth=1) + info = create_info(ch_names=n_channels, sfreq=250, ch_types="eeg") + + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) # Make data non-full rank rank = 5 @@ -342,4 +507,136 @@ def test_non_full_rank_data(): assert np.linalg.matrix_rank(X) == rank ssd = SSD(info, filt_params_signal, filt_params_noise) + if sys.platform == "darwin": + pytest.xfail("Unknown linalg bug (Accelerate?)") + ssd.fit(X) + + +def test_picks_arg(): + """Test that picks argument works as expected.""" + raw = io.read_raw_fif(raw_fname, preload=False) + events = read_events(event_name) + picks = pick_types( + raw.info, meg=True, eeg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) + raw.add_proj([], remove_existing=True) + epochs = Epochs( + raw, + events, + event_id, + -0.1, + 1, + picks=picks, + baseline=(None, 0), + preload=True, + proj=False, + ) + X = epochs.get_data(copy=False) + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=3, + h_trans_bandwidth=3, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=3, + h_trans_bandwidth=3, + ) + picks = ["eeg"] + info = epochs.info + picks_idx = _picks_to_idx(info, picks) + + # Test when return_filtered is False + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + picks=picks_idx, + return_filtered=False, + ) + ssd.fit(X).transform(X) + + # Test when return_filtered is true + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + picks=picks_idx, + return_filtered=True, + n_fft=64, + ) + ssd.fit(X).transform(X) + + +def test_get_spectral_ratio(): + """Test that method is the same as function in _mod_ged.py.""" + X, _, _ = simulate_data() + sf = 250 + n_channels = X.shape[0] + info = create_info(ch_names=n_channels, sfreq=sf, ch_types="eeg") + + # Init + filt_params_signal = dict( + l_freq=freqs_sig[0], + h_freq=freqs_sig[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + filt_params_noise = dict( + l_freq=freqs_noise[0], + h_freq=freqs_noise[1], + l_trans_bandwidth=1, + h_trans_bandwidth=1, + ) + + ssd = SSD( + info, + filt_params_signal, + filt_params_noise, + n_components=None, + sort_by_spectral_ratio=False, + ) ssd.fit(X) + ssd_sources = ssd.transform(X) + spec_ratio_ssd, sorter_spec_ssd = ssd.get_spectral_ratio(ssd_sources) + spec_ratio_ged, sorter_spec_ged = _get_spectral_ratio( + ssd_sources, ssd.sfreq_, ssd.n_fft_, ssd.freqs_signal_, ssd.freqs_noise_ + ) + assert_array_equal(spec_ratio_ssd, spec_ratio_ged) + assert_array_equal(sorter_spec_ssd, sorter_spec_ged) + + +@pytest.mark.filterwarnings("ignore:.*invalid value encountered in divide.*") +@pytest.mark.filterwarnings("ignore:.*is longer than.*") +@parametrize_with_checks( + [ + SSD( + 100.0, + dict(l_freq=0.0, h_freq=30.0), + dict(l_freq=0.0, h_freq=40.0), + ) + ] +) +def test_sklearn_compliance(estimator, check): + """Test LinearModel compliance with sklearn.""" + pytest.importorskip("sklearn", minversion="1.4") # TODO VERSION remove on 1.4+ + ignores = ( + # Checks below fail because what sklearn passes as (n_samples, n_features) + # is considered (n_channels, n_times) by SSD and creates problems + # when n_channels change between fit and transform. + # Could potentially be fixed by if X.ndim == 2: X = np.expand_dims(X, axis=2) + # in fit and transform instead of axis=0. + # But this will require to drop support for 2D inputs and expect + # user to provide 3D array even if it's a continuous signal. + "check_methods_sample_order_invariance", # SSD is not time-invariant + "check_fit_idempotent", + "check_methods_subset_invariance", + "check_transformer_general", + "check_transformer_data_not_an_array", + ) + if any(ignore in str(check) for ignore in ignores): + return + + check(estimator) diff --git a/mne/decoding/tests/test_time_frequency.py b/mne/decoding/tests/test_time_frequency.py index 5fea1402e68..1ac6bba5dcb 100644 --- a/mne/decoding/tests/test_time_frequency.py +++ b/mne/decoding/tests/test_time_frequency.py @@ -1,37 +1,43 @@ -# Author: Jean-Remi King, -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from numpy.testing import assert_array_equal import pytest +from numpy.testing import assert_array_equal + +pytest.importorskip("sklearn") + +from sklearn.base import clone +from sklearn.utils.estimator_checks import parametrize_with_checks -from mne.utils import requires_sklearn from mne.decoding.time_frequency import TimeFrequency -@requires_sklearn -def test_timefrequency(): +def test_timefrequency_basic(): """Test TimeFrequency.""" - from sklearn.base import clone # Init n_freqs = 3 freqs = [20, 21, 22] tf = TimeFrequency(freqs, sfreq=100) - for output in ['avg_power', 'foo', None]: - pytest.raises(ValueError, TimeFrequency, freqs, output=output) + n_epochs, n_chans, n_times = 10, 2, 100 + X = np.random.rand(n_epochs, n_chans, n_times) + for output in ["avg_power", "foo", None]: + tf = TimeFrequency(freqs, output=output) + with pytest.raises(ValueError, match="Invalid value"): + tf.fit(X) tf = clone(tf) # Clone estimator freqs_array = np.array(np.asarray(freqs)) - tf = TimeFrequency(freqs_array, 100, "morlet", freqs_array / 5.) + tf = TimeFrequency(freqs_array, 100, "morlet", freqs_array / 5.0) clone(tf) # Fit - n_epochs, n_chans, n_times = 10, 2, 100 - X = np.random.rand(n_epochs, n_chans, n_times) + assert not hasattr(tf, "fitted_") tf.fit(X, None) + assert tf.fitted_ # Transform tf = TimeFrequency(freqs, sfreq=100) @@ -40,9 +46,16 @@ def test_timefrequency(): Xt = tf.transform(X) assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times]) # 2-D X - Xt = tf.transform(X[:, 0, :]) + Xt = tf.fit_transform(X[:, 0, :]) assert_array_equal(Xt.shape, [n_epochs, n_freqs, n_times]) # 3-D with decim tf = TimeFrequency(freqs, sfreq=100, decim=2) - Xt = tf.transform(X) + Xt = tf.fit_transform(X) assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times // 2]) + + +@parametrize_with_checks([TimeFrequency([300, 400], 1000.0, n_cycles=0.25)]) +def test_sklearn_compliance(estimator, check): + """Test LinearModel compliance with sklearn.""" + pytest.importorskip("sklearn", minversion="1.4") # TODO VERSION remove on 1.4+ + check(estimator) diff --git a/mne/decoding/tests/test_transformer.py b/mne/decoding/tests/test_transformer.py index 3c53d7e2ca1..1911aa650e5 100644 --- a/mne/decoding/tests/test_transformer.py +++ b/mne/decoding/tests/test_transformer.py @@ -1,55 +1,73 @@ -# Author: Mainak Jas -# Romain Trachel -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path import numpy as np - import pytest -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_allclose, assert_equal) - -from mne import io, read_events, Epochs, pick_types -from mne.decoding import (Scaler, FilterEstimator, PSDEstimator, Vectorizer, - UnsupervisedSpatialFilter, TemporalFilter) +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) + +pytest.importorskip("sklearn") + +from sklearn.decomposition import PCA +from sklearn.kernel_ridge import KernelRidge +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils.estimator_checks import parametrize_with_checks + +from mne import Epochs, EpochsArray, create_info, io, pick_types, read_events +from mne.decoding import ( + FilterEstimator, + LinearModel, + PSDEstimator, + Scaler, + TemporalFilter, + UnsupervisedSpatialFilter, + Vectorizer, +) from mne.defaults import DEFAULTS -from mne.utils import requires_sklearn, check_version, use_log_level +from mne.utils import use_log_level tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) start, stop = 0, 8 -data_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data" +data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" raw_fname = data_dir / "test_raw.fif" event_name = data_dir / "test-eve.fif" - - -@pytest.mark.parametrize('info, method', [ - (True, None), - (True, dict(mag=5, grad=10, eeg=20)), - (False, 'mean'), - (False, 'median'), -]) +info = create_info(2, 1000.0, "eeg") + + +@pytest.mark.parametrize( + "info, method", + [ + (True, None), + (True, dict(mag=5, grad=10, eeg=20)), + (False, "mean"), + (False, "median"), + ], +) def test_scaler(info, method): """Test methods of Scaler.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads') + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) picks = picks[1:13:3] - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), preload=True) - epochs_data = epochs.get_data() + epochs = Epochs( + raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True + ) + epochs_data = epochs.get_data(copy=False) y = epochs.events[:, -1] epochs_data_t = epochs_data.transpose([1, 0, 2]) - if method in ('mean', 'median'): - if not check_version('sklearn'): - with pytest.raises(ImportError, match='No module'): - Scaler(info, method) - return if info: info = epochs.info @@ -57,22 +75,28 @@ def test_scaler(info, method): X = scaler.fit_transform(epochs_data, y) assert_equal(X.shape, epochs_data.shape) if method is None or isinstance(method, dict): - sd = DEFAULTS['scalings'] if method is None else method + sd = DEFAULTS["scalings"] if method is None else method stds = np.zeros(len(picks)) - for key in ('mag', 'grad'): - stds[pick_types(epochs.info, meg=key)] = 1. / sd[key] - stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg'] + for key in ("mag", "grad"): + stds[pick_types(epochs.info, meg=key)] = 1.0 / sd[key] + stds[pick_types(epochs.info, meg=False, eeg=True)] = 1.0 / sd["eeg"] means = np.zeros(len(epochs.ch_names)) - elif method == 'mean': + elif method == "mean": stds = np.array([np.std(ch_data) for ch_data in epochs_data_t]) means = np.array([np.mean(ch_data) for ch_data in epochs_data_t]) else: # median - percs = np.array([np.percentile(ch_data, [25, 50, 75]) - for ch_data in epochs_data_t]) + percs = np.array( + [np.percentile(ch_data, [25, 50, 75]) for ch_data in epochs_data_t] + ) stds = percs[:, 2] - percs[:, 0] means = percs[:, 1] - assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis], - epochs_data, rtol=1e-12, atol=1e-20, err_msg=method) + assert_allclose( + X * stds[:, np.newaxis] + means[:, np.newaxis], + epochs_data, + rtol=1e-12, + atol=1e-20, + err_msg=method, + ) X2 = scaler.fit(epochs_data, y).transform(epochs_data) assert_array_equal(X, X2) @@ -82,74 +106,101 @@ def test_scaler(info, method): assert_array_almost_equal(epochs_data, Xi) # Test init exception - pytest.raises(ValueError, Scaler, None, None) - pytest.raises(TypeError, scaler.fit, epochs, y) - pytest.raises(TypeError, scaler.transform, epochs) - epochs_bad = Epochs(raw, events, event_id, 0, 0.01, baseline=None, - picks=np.arange(len(raw.ch_names))) # non-data chs + x = Scaler(None, None) + with pytest.raises(ValueError): + x.fit(epochs_data, y) + pytest.raises(ValueError, scaler.fit, "foo", y) + pytest.raises(ValueError, scaler.transform, "foo") + epochs_bad = Epochs( + raw, + events, + event_id, + 0, + 0.01, + baseline=None, + picks=np.arange(len(raw.ch_names)), + ) # non-data chs scaler = Scaler(epochs_bad.info, None) - pytest.raises(ValueError, scaler.fit, epochs_bad.get_data(), y) + pytest.raises(ValueError, scaler.fit, epochs_bad.get_data(copy=False), y) def test_filterestimator(): """Test methods of FilterEstimator.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads') + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) picks = picks[1:13:3] - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), preload=True) - epochs_data = epochs.get_data() + epochs = Epochs( + raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True + ) + epochs_data = epochs.get_data(copy=False) # Add tests for different combinations of l_freq and h_freq filt = FilterEstimator(epochs.info, l_freq=40, h_freq=80) y = epochs.events[:, -1] X = filt.fit_transform(epochs_data, y) - assert (X.shape == epochs_data.shape) + assert X.shape == epochs_data.shape assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X) - filt = FilterEstimator(epochs.info, l_freq=None, h_freq=40, - filter_length='auto', - l_trans_bandwidth='auto', h_trans_bandwidth='auto') + filt = FilterEstimator( + epochs.info, + l_freq=None, + h_freq=40, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + ) y = epochs.events[:, -1] X = filt.fit_transform(epochs_data, y) filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1) y = epochs.events[:, -1] - with pytest.warns(RuntimeWarning, match='longer than the signal'): + with pytest.warns(RuntimeWarning, match="longer than the signal"): pytest.raises(ValueError, filt.fit_transform, epochs_data, y) - filt = FilterEstimator(epochs.info, l_freq=40, h_freq=None, - filter_length='auto', - l_trans_bandwidth='auto', h_trans_bandwidth='auto') + filt = FilterEstimator( + epochs.info, + l_freq=40, + h_freq=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + ) X = filt.fit_transform(epochs_data, y) # Test init exception - pytest.raises(ValueError, filt.fit, epochs, y) - pytest.raises(ValueError, filt.transform, epochs) + pytest.raises(ValueError, filt.fit, "foo", y) + pytest.raises(ValueError, filt.transform, "foo") def test_psdestimator(): """Test methods of PSDEstimator.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads') + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) picks = picks[1:13:3] - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - baseline=(None, 0), preload=True) - epochs_data = epochs.get_data() + epochs = Epochs( + raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True + ) + epochs_data = epochs.get_data(copy=False) psd = PSDEstimator(2 * np.pi, 0, np.inf) y = epochs.events[:, -1] + assert not hasattr(psd, "fitted_") X = psd.fit_transform(epochs_data, y) + assert psd.fitted_ - assert (X.shape[0] == epochs_data.shape[0]) + assert X.shape[0] == epochs_data.shape[0] assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X) # Test init exception - pytest.raises(ValueError, psd.fit, epochs, y) - pytest.raises(ValueError, psd.transform, epochs) + with pytest.raises(ValueError): + psd.fit("foo", y) + with pytest.raises(ValueError): + psd.transform("foo") def test_vectorizer(): @@ -166,35 +217,49 @@ def test_vectorizer(): assert_array_equal(vect.inverse_transform(result[1:]), data[1:]) # check with different shape - assert_equal(vect.fit_transform(np.random.rand(150, 18, 6, 3)).shape, - (150, 324)) + assert_equal(vect.fit_transform(np.random.rand(150, 18, 6, 3)).shape, (150, 324)) assert_equal(vect.fit_transform(data[1:]).shape, (149, 108)) # check if raised errors are working correctly - vect.fit(np.random.rand(105, 12, 3)) - pytest.raises(ValueError, vect.transform, np.random.rand(105, 12, 3, 1)) - pytest.raises(ValueError, vect.inverse_transform, - np.random.rand(102, 12, 12)) + X = np.random.default_rng(0).standard_normal((105, 12, 3)) + y = np.arange(X.shape[0]) % 2 + pytest.raises(ValueError, vect.transform, X[..., np.newaxis]) + pytest.raises(ValueError, vect.inverse_transform, X[:, :-1]) + + # And that pipelines work properly + X_arr = EpochsArray(X, create_info(12, 1000.0, "eeg")) + vect.fit(X_arr) + clf = make_pipeline(Vectorizer(), StandardScaler(), LinearModel()) + clf.fit(X_arr, y) -@requires_sklearn def test_unsupervised_spatial_filter(): """Test unsupervised spatial filter.""" - from sklearn.decomposition import PCA - from sklearn.kernel_ridge import KernelRidge raw = io.read_raw_fif(raw_fname) events = read_events(event_name) - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads') + picks = pick_types( + raw.info, meg=True, stim=False, ecg=False, eog=False, exclude="bads" + ) picks = picks[1:13:3] - epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, - preload=True, baseline=None, verbose=False) - - # Test estimator - pytest.raises(ValueError, UnsupervisedSpatialFilter, KernelRidge(2)) + epochs = Epochs( + raw, + events, + event_id, + tmin, + tmax, + picks=picks, + preload=True, + baseline=None, + verbose=False, + ) + + # Test estimator (must be a transformer) + X = epochs.get_data(copy=False) + usf = UnsupervisedSpatialFilter(KernelRidge(2)) + with pytest.raises(ValueError, match="transform"): + usf.fit(X) # Test fit - X = epochs.get_data() n_components = 4 usf = UnsupervisedSpatialFilter(PCA(n_components)) usf.fit(X) @@ -210,7 +275,9 @@ def test_unsupervised_spatial_filter(): # Test with average param usf = UnsupervisedSpatialFilter(PCA(4), average=True) usf.fit_transform(X) - pytest.raises(ValueError, UnsupervisedSpatialFilter, PCA(4), 2) + usf = UnsupervisedSpatialFilter(PCA(4), 2) + with pytest.raises(TypeError, match="average must be"): + usf.fit(X) def test_temporal_filter(): @@ -218,34 +285,67 @@ def test_temporal_filter(): X = np.random.rand(5, 5, 1200) # Test init test - values = (('10hz', None, 100., 'auto'), (5., '10hz', 100., 'auto'), - (10., 20., 5., 'auto'), (None, None, 100., '5hz')) + values = ( + ("10hz", None, 100.0, "auto"), + (5.0, "10hz", 100.0, "auto"), + (10.0, 20.0, 5.0, "auto"), + (None, None, 100.0, "5hz"), + ) for low, high, sf, ltrans in values: - filt = TemporalFilter(low, high, sf, ltrans, fir_design='firwin') + filt = TemporalFilter(low, high, sf, ltrans, fir_design="firwin") pytest.raises(ValueError, filt.fit_transform, X) # Add tests for different combinations of l_freq and h_freq - for low, high in ((5., 15.), (None, 15.), (5., None)): - filt = TemporalFilter(low, high, sfreq=100., fir_design='firwin') + for low, high in ((5.0, 15.0), (None, 15.0), (5.0, None)): + filt = TemporalFilter(low, high, sfreq=100.0, fir_design="firwin") Xt = filt.fit_transform(X) assert_array_equal(filt.fit_transform(X), Xt) - assert (X.shape == Xt.shape) + assert X.shape == Xt.shape # Test fit and transform numpy type check - with pytest.raises(ValueError, match='Data to be filtered must be'): - filt.transform([1, 2]) + with pytest.raises(ValueError): + filt.transform("foo") # Test with 2 dimensional data array X = np.random.rand(101, 500) - filt = TemporalFilter(l_freq=25., h_freq=50., sfreq=1000., - filter_length=150, fir_design='firwin2') - with use_log_level('error'): # warning about transition bandwidth + filt = TemporalFilter( + l_freq=25.0, h_freq=50.0, sfreq=1000.0, filter_length=150, fir_design="firwin2" + ) + with use_log_level("error"): # warning about transition bandwidth assert_equal(filt.fit_transform(X).shape, X.shape) def test_bad_triage(): """Test for gh-10924.""" - filt = TemporalFilter(l_freq=8, h_freq=60, sfreq=160.) + filt = TemporalFilter(l_freq=8, h_freq=60, sfreq=160.0) # Used to fail with "ValueError: Effective band-stop frequency (135.0) is # too high (maximum based on Nyquist is 80.0)" + assert not hasattr(filt, "fitted_") filt.fit_transform(np.zeros((1, 1, 481))) + assert filt.fitted_ + + +@pytest.mark.filterwarnings("ignore:.*filter_length.*") +@parametrize_with_checks( + [ + FilterEstimator(info, l_freq=1, h_freq=10), + PSDEstimator(), + Scaler(scalings="mean"), + # Not easy to test Scaler(info) b/c number of channels must match + TemporalFilter(), + UnsupervisedSpatialFilter(PCA()), + Vectorizer(), + ] +) +def test_sklearn_compliance(estimator, check): + """Test LinearModel compliance with sklearn.""" + pytest.importorskip("sklearn", minversion="1.4") # TODO VERSION remove on 1.4+ + ignores = [] + if estimator.__class__.__name__ == "FilterEstimator": + ignores += [ + "check_estimators_overwrite_params", # we modify self.info + "check_methods_sample_order_invariance", # Filtering is not time invariant + ] + if any(ignore in str(check) for ignore in ignores): + return + check(estimator) diff --git a/mne/decoding/tests/test_xdawn.py b/mne/decoding/tests/test_xdawn.py new file mode 100644 index 00000000000..a2936686b59 --- /dev/null +++ b/mne/decoding/tests/test_xdawn.py @@ -0,0 +1,17 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import pytest + +pytest.importorskip("sklearn") +from sklearn.utils.estimator_checks import parametrize_with_checks + +from mne.decoding import XdawnTransformer + + +@pytest.mark.filterwarnings("ignore:.*Only one sample available.*") +@parametrize_with_checks([XdawnTransformer(reg="oas")]) # oas handles few sample cases +def test_sklearn_compliance(estimator, check): + """Test compliance with sklearn.""" + check(estimator) diff --git a/mne/decoding/time_delaying_ridge.py b/mne/decoding/time_delaying_ridge.py index a1e2f426b18..6a754ed361d 100644 --- a/mne/decoding/time_delaying_ridge.py +++ b/mne/decoding/time_delaying_ridge.py @@ -1,21 +1,26 @@ -# -*- coding: utf-8 -*- """TimeDelayingRidge class.""" -# Authors: Eric Larson -# Ross Maddox -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np +from scipy import linalg +from scipy.signal import fftconvolve +from scipy.sparse.csgraph import laplacian +from sklearn.base import BaseEstimator, RegressorMixin +from sklearn.utils.validation import check_is_fitted -from .base import BaseEstimator from ..cuda import _setup_cuda_fft_multiply_repeated from ..filter import next_fast_len from ..fixes import jit -from ..utils import warn, ProgressBar, logger +from ..utils import ProgressBar, _check_option, logger, warn +from ._fixes import _check_n_features_3d, validate_data -def _compute_corrs(X, y, smin, smax, n_jobs=None, fit_intercept=False, - edge_correction=True): +def _compute_corrs( + X, y, smin, smax, n_jobs=None, fit_intercept=False, edge_correction=True +): """Compute auto- and cross-correlations.""" if fit_intercept: # We could do this in the Fourier domain, too, but it should @@ -28,7 +33,7 @@ def _compute_corrs(X, y, smin, smax, n_jobs=None, fit_intercept=False, X = X - X_offset y = y - y_offset else: - X_offset = y_offset = 0. + X_offset = y_offset = 0.0 if X.ndim == 2: assert y.ndim == 2 X = X[:, np.newaxis, :] @@ -36,13 +41,15 @@ def _compute_corrs(X, y, smin, smax, n_jobs=None, fit_intercept=False, assert X.shape[:2] == y.shape[:2] len_trf = smax - smin len_x, n_epochs, n_ch_x = X.shape - len_y, n_epcohs, n_ch_y = y.shape + len_y, n_epochs_y, n_ch_y = y.shape assert len_x == len_y + assert n_epochs == n_epochs_y n_fft = next_fast_len(2 * X.shape[0] - 1) _, cuda_dict = _setup_cuda_fft_multiply_repeated( - n_jobs, [1.], n_fft, 'correlation calculations') + n_jobs, [1.0], n_fft, "correlation calculations" + ) del n_jobs # only used to set as CUDA # create our Toeplitz indexer @@ -50,26 +57,27 @@ def _compute_corrs(X, y, smin, smax, n_jobs=None, fit_intercept=False, for ii in range(len_trf): ij[ii, ii:] = np.arange(len_trf - ii) x = np.arange(n_fft - 1, n_fft - len_trf + ii, -1) - ij[ii + 1:, ii] = x + ij[ii + 1 :, ii] = x x_xt = np.zeros([n_ch_x * len_trf] * 2) - x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order='F') + x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order="F") n = n_epochs * (n_ch_x * (n_ch_x + 1) // 2 + n_ch_x) - logger.info('Fitting %d epochs, %d channels' % (n_epochs, n_ch_x)) - pb = ProgressBar(n, mesg='Sample') + logger.info(f"Fitting {n_epochs} epochs, {n_ch_x} channels") + pb = ProgressBar(n, mesg="Sample") count = 0 pb.update(count) for ei in range(n_epochs): this_X = X[:, ei, :] # XXX maybe this is what we should parallelize over CPUs at some point - X_fft = cuda_dict['rfft'](this_X, n=n_fft, axis=0) + X_fft = cuda_dict["rfft"](this_X, n=n_fft, axis=0) X_fft_conj = X_fft.conj() - y_fft = cuda_dict['rfft'](y[:, ei, :], n=n_fft, axis=0) + y_fft = cuda_dict["rfft"](y[:, ei, :], n=n_fft, axis=0) for ch0 in range(n_ch_x): for oi, ch1 in enumerate(range(ch0, n_ch_x)): - this_result = cuda_dict['irfft']( - X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0) + this_result = cuda_dict["irfft"]( + X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0 + ) # Our autocorrelation structure is a Toeplitz matrix, but # it's faster to create the Toeplitz ourselves than use # linalg.toeplitz. @@ -86,40 +94,43 @@ def _compute_corrs(X, y, smin, smax, n_jobs=None, fit_intercept=False, _edge_correct(this_result, this_X, smax, smin, ch0, ch1) # Store the results in our output matrix - x_xt[ch0 * len_trf:(ch0 + 1) * len_trf, - ch1 * len_trf:(ch1 + 1) * len_trf] += this_result + x_xt[ + ch0 * len_trf : (ch0 + 1) * len_trf, + ch1 * len_trf : (ch1 + 1) * len_trf, + ] += this_result if ch0 != ch1: - x_xt[ch1 * len_trf:(ch1 + 1) * len_trf, - ch0 * len_trf:(ch0 + 1) * len_trf] += this_result.T + x_xt[ + ch1 * len_trf : (ch1 + 1) * len_trf, + ch0 * len_trf : (ch0 + 1) * len_trf, + ] += this_result.T count += 1 pb.update(count) # compute the crosscorrelations - cc_temp = cuda_dict['irfft']( - y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0) + cc_temp = cuda_dict["irfft"]( + y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0 + ) if smin < 0 and smax >= 0: x_y[:-smin, ch0] += cc_temp[smin:] - x_y[len_trf - smax:, ch0] += cc_temp[:smax] + x_y[len_trf - smax :, ch0] += cc_temp[:smax] else: x_y[:, ch0] += cc_temp[smin:smax] count += 1 pb.update(count) - x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order='F') + x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order="F") return x_xt, x_y, n_ch_x, X_offset, y_offset @jit() def _edge_correct(this_result, this_X, smax, smin, ch0, ch1): if smax > 0: - tail = _toeplitz_dot(this_X[-1:-smax:-1, ch0], - this_X[-1:-smax:-1, ch1]) + tail = _toeplitz_dot(this_X[-1:-smax:-1, ch0], this_X[-1:-smax:-1, ch1]) if smin > 0: - tail = tail[smin - 1:, smin - 1:] - this_result[max(-smin + 1, 0):, max(-smin + 1, 0):] -= tail + tail = tail[smin - 1 :, smin - 1 :] + this_result[max(-smin + 1, 0) :, max(-smin + 1, 0) :] -= tail if smin < 0: - head = _toeplitz_dot(this_X[:-smin, ch0], - this_X[:-smin, ch1])[::-1, ::-1] + head = _toeplitz_dot(this_X[:-smin, ch0], this_X[:-smin, ch1])[::-1, ::-1] if smax < 0: head = head[:smax, :smax] this_result[:-smin, :-smin] -= head @@ -137,28 +148,23 @@ def _toeplitz_dot(a, b): assert a.shape == b.shape and a.ndim == 1 out = np.outer(a, b) for ii in range(1, len(a)): - out[ii, ii:] += out[ii - 1, ii - 1:-1] - out[ii + 1:, ii] += out[ii:-1, ii - 1] + out[ii, ii:] += out[ii - 1, ii - 1 : -1] + out[ii + 1 :, ii] += out[ii:-1, ii - 1] return out -def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', - normed=False): +def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method="direct", normed=False): """Compute regularization parameter from neighbors.""" - from scipy import linalg - from scipy.sparse.csgraph import laplacian - known_types = ('ridge', 'laplacian') + known_types = ("ridge", "laplacian") if isinstance(reg_type, str): reg_type = (reg_type,) * 2 if len(reg_type) != 2: - raise ValueError('reg_type must have two elements, got %s' - % (len(reg_type),)) + raise ValueError(f"reg_type must have two elements, got {len(reg_type)}") for r in reg_type: if r not in known_types: - raise ValueError('reg_type entries must be one of %s, got %s' - % (known_types, r)) - reg_time = (reg_type[0] == 'laplacian' and n_delays > 1) - reg_chs = (reg_type[1] == 'laplacian' and n_ch_x > 1) + raise ValueError(f"reg_type entries must be one of {known_types}, got {r}") + reg_time = reg_type[0] == "laplacian" and n_delays > 1 + reg_chs = reg_type[1] == "laplacian" and n_ch_x > 1 if not reg_time and not reg_chs: return np.eye(n_ch_x * n_delays) # regularize time @@ -167,7 +173,7 @@ def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', stride = n_delays + 1 reg.flat[1::stride] += -1 reg.flat[n_delays::stride] += -1 - reg.flat[n_delays + 1:-n_delays - 1:stride] += 1 + reg.flat[n_delays + 1 : -n_delays - 1 : stride] += 1 args = [reg] * n_ch_x reg = linalg.block_diag(*args) else: @@ -179,12 +185,12 @@ def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', row_offset = block * n_ch_x stride = n_delays * n_ch_x + 1 reg.flat[n_delays:-row_offset:stride] += -1 - reg.flat[n_delays + row_offset::stride] += 1 + reg.flat[n_delays + row_offset :: stride] += 1 reg.flat[row_offset:-n_delays:stride] += -1 - reg.flat[:-(n_delays + row_offset):stride] += 1 + reg.flat[: -(n_delays + row_offset) : stride] += 1 assert np.array_equal(reg[::-1, ::-1], reg) - if method == 'direct': + if method == "direct": if normed: norm = np.sqrt(np.diag(reg)) reg /= norm @@ -201,7 +207,6 @@ def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in): """Fit the model using correlation matrices.""" # do the regularized solving - from scipy import linalg n_ch_out = x_y.shape[1] assert x_y.shape[0] % n_ch_x == 0 n_delays = x_y.shape[0] // n_ch_x @@ -212,16 +217,18 @@ def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in): # Note: we must use overwrite_a=False in order to be able to # use the fall-back solution below in case a LinAlgError # is raised - w = linalg.solve(mat, x_y, overwrite_a=False, assume_a='pos') + w = linalg.solve(mat, x_y, overwrite_a=False, assume_a="pos") except np.linalg.LinAlgError: - warn('Singular matrix in solving dual problem. Using ' - 'least-squares solution instead.') - w = linalg.lstsq(mat, x_y, lapack_driver='gelsy')[0] + warn( + "Singular matrix in solving dual problem. Using " + "least-squares solution instead." + ) + w = linalg.lstsq(mat, x_y, lapack_driver="gelsy")[0] w = w.T.reshape([n_ch_out, n_ch_in, n_delays]) return w -class TimeDelayingRidge(BaseEstimator): +class TimeDelayingRidge(RegressorMixin, BaseEstimator): """Ridge regression of data with time delays. Parameters @@ -269,29 +276,91 @@ class TimeDelayingRidge(BaseEstimator): auto- and cross-correlations. """ - _estimator_type = "regressor" - - def __init__(self, tmin, tmax, sfreq, alpha=0., reg_type='ridge', - fit_intercept=True, n_jobs=None, edge_correction=True): - if tmin > tmax: - raise ValueError('tmin must be <= tmax, got %s and %s' - % (tmin, tmax)) - self.tmin = float(tmin) - self.tmax = float(tmax) - self.sfreq = float(sfreq) - self.alpha = float(alpha) + def __init__( + self, + tmin, + tmax, + sfreq, + alpha=0.0, + reg_type="ridge", + fit_intercept=True, + n_jobs=None, + edge_correction=True, + ): + self.tmin = tmin + self.tmax = tmax + self.sfreq = sfreq + self.alpha = alpha self.reg_type = reg_type self.fit_intercept = fit_intercept self.edge_correction = edge_correction self.n_jobs = n_jobs + def __sklearn_tags__(self): + """...""" + tags = super().__sklearn_tags__() + tags.input_tags.three_d_array = True + tags.target_tags.one_d_labels = True + tags.target_tags.two_d_labels = True + tags.target_tags.multi_output = True + return tags + @property def _smin(self): - return int(round(self.tmin * self.sfreq)) + return int(round(self.tmin_ * self.sfreq_)) @property def _smax(self): - return int(round(self.tmax * self.sfreq)) + 1 + return int(round(self.tmax_ * self.sfreq_)) + 1 + + def _check_data(self, X, y=None, reset=False): + if reset: + X, y = validate_data( + self, + X=X, + y=y, + reset=reset, + validate_separately=( # to take care of 3D y + dict(allow_nd=True), + dict(allow_nd=True, ensure_2d=False), + ), + ) + if X.ndim == 3: + assert y.ndim == 3 + assert X.shape[:2] == y.shape[:2] + else: + if y.ndim == 1: + y = y[:, np.newaxis] + assert y.ndim == 2 + _check_option("y.shape[0]", y.shape[0], (X.shape[0],)) + else: + X = validate_data(self, X=X, allow_nd=True, ensure_2d=False, reset=reset) + # Because when ensure_2d=True, sklearn takes n_features from X.shape[1], + # when we need X.shape[-1]. So we ensure 2D and check features ourselves. + if X.ndim == 1: + raise ValueError( + "Reshape your data either using array.reshape(-1, 1) if " + "your data has a single feature or array.reshape(1, -1) " + "if it contains a single sample." + ) + _check_n_features_3d(self, X, reset) + + return X, y + + def _validate_params(self, X): + self.tmin_ = float(self.tmin) + self.tmax_ = float(self.tmax) + self.sfreq_ = float(self.sfreq) + self.alpha_ = float(self.alpha) + if self.tmin_ > self.tmax_: + raise ValueError(f"tmin must be <= tmax, got {self.tmin_} and {self.tmax_}") + n_delays = self._smax - self._smin + min_samples = (n_delays + 1) // 2 + if X.shape[0] < min_samples: + raise ValueError( + f"Got n_samples = {X.shape[0]}, but at least {min_samples} " + f"are required to estimate {n_delays} delays." + ) def fit(self, X, y): """Estimate the coefficients of the linear model. @@ -308,25 +377,28 @@ def fit(self, X, y): self : instance of TimeDelayingRidge Returns the modified instance. """ - if X.ndim == 3: - assert y.ndim == 3 - assert X.shape[:2] == y.shape[:2] - else: - assert X.ndim == 2 and y.ndim == 2 - assert X.shape[0] == y.shape[0] + X, y = self._check_data(X, y, reset=True) + self._validate_params(X) # These are split into two functions because it's possible that we # might want to allow people to do them separately (e.g., to test # different regularization parameters). self.cov_, x_y_, n_ch_x, X_offset, y_offset = _compute_corrs( - X, y, self._smin, self._smax, self.n_jobs, self.fit_intercept, - self.edge_correction) - self.coef_ = _fit_corrs(self.cov_, x_y_, n_ch_x, - self.reg_type, self.alpha, n_ch_x) + X, + y, + self._smin, + self._smax, + self.n_jobs, + self.fit_intercept, + self.edge_correction, + ) + self.coef_ = _fit_corrs( + self.cov_, x_y_, n_ch_x, self.reg_type, self.alpha_, n_ch_x + ) # This is the sklearn formula from LinearModel (will be 0. for no fit) if self.fit_intercept: self.intercept_ = y_offset - np.dot(X_offset, self.coef_.sum(-1).T) else: - self.intercept_ = 0. + self.intercept_ = 0.0 return self def predict(self, X): @@ -342,8 +414,8 @@ def predict(self, X): X : ndarray The predicted response. """ - from scipy.signal import fftconvolve - + check_is_fitted(self) + X, _ = self._check_data(X) if X.ndim == 2: X = X[:, np.newaxis, :] singleton = True @@ -356,9 +428,10 @@ def predict(self, X): for oi in range(self.coef_.shape[0]): for fi in range(self.coef_.shape[1]): temp = fftconvolve(X[:, ei, fi], self.coef_[oi, fi]) - temp = temp[max(-smin, 0):][:len(out) - offset] - out[offset:len(temp) + offset, ei, oi] += temp + temp = temp[max(-smin, 0) :][: len(out) - offset] + out[offset : len(temp) + offset, ei, oi] += temp out += self.intercept_ if singleton: out = out[:, 0, :] + out = out.squeeze() return out diff --git a/mne/decoding/time_frequency.py b/mne/decoding/time_frequency.py index 330cc1ed5c8..29232aaeb9f 100644 --- a/mne/decoding/time_frequency.py +++ b/mne/decoding/time_frequency.py @@ -1,16 +1,18 @@ -# Author: Jean-Remi King -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from .mixin import TransformerMixin -from .base import BaseEstimator +from sklearn.base import BaseEstimator +from sklearn.utils.validation import check_is_fitted + from ..time_frequency.tfr import _compute_tfr -from ..utils import fill_doc, _check_option, verbose +from ..utils import _check_option, fill_doc +from .transformer import MNETransformerMixin @fill_doc -class TimeFrequency(TransformerMixin, BaseEstimator): +class TimeFrequency(MNETransformerMixin, BaseEstimator): """Time frequency transformer. Time-frequency transform of times series along the last axis. @@ -59,15 +61,20 @@ class TimeFrequency(TransformerMixin, BaseEstimator): mne.time_frequency.tfr_multitaper """ - @verbose - def __init__(self, freqs, sfreq=1.0, method='morlet', n_cycles=7.0, - time_bandwidth=None, use_fft=True, decim=1, output='complex', - n_jobs=1, verbose=None): # noqa: D102 + def __init__( + self, + freqs, + sfreq=1.0, + method="morlet", + n_cycles=7.0, + time_bandwidth=None, + use_fft=True, + decim=1, + output="complex", + n_jobs=1, + verbose=None, + ): """Init TimeFrequency transformer.""" - # Check non-average output - output = _check_option('output', output, - ['complex', 'power', 'phase']) - self.freqs = freqs self.sfreq = sfreq self.method = method @@ -80,6 +87,16 @@ def __init__(self, freqs, sfreq=1.0, method='morlet', n_cycles=7.0, self.n_jobs = n_jobs self.verbose = verbose + def __sklearn_tags__(self): + """Return sklearn tags.""" + out = super().__sklearn_tags__() + from sklearn.utils import TransformerTags + + if out.transformer_tags is None: + out.transformer_tags = TransformerTags() + out.transformer_tags.preserves_dtype = [] # real->complex + return out + def fit_transform(self, X, y=None): """Time-frequency transform of times series along the last axis. @@ -114,6 +131,10 @@ def fit(self, X, y=None): # noqa: D401 self : object Return self. """ + # Check non-average output + _check_option("output", self.output, ["complex", "power", "phase"]) + self._check_data(X, y=y, fit=True) + self.fitted_ = True return self def transform(self, X): @@ -121,26 +142,38 @@ def transform(self, X): Parameters ---------- - X : array, shape (n_samples, n_channels, n_times) + X : array, shape (n_samples, [n_channels, ]n_times) The training data samples. The channel dimension can be zero- or 1-dimensional. Returns ------- - Xt : array, shape (n_samples, n_channels, n_freqs, n_times) + Xt : array, shape (n_samples, [n_channels, ]n_freqs, n_times) The time-frequency transform of the data, where n_channels can be zero- or 1-dimensional. """ + X = self._check_data(X, atleast_3d=False) + check_is_fitted(self, "fitted_") # Ensure 3-dimensional X shape = X.shape[1:-1] if not shape: X = X[:, np.newaxis, :] # Compute time-frequency - Xt = _compute_tfr(X, self.freqs, self.sfreq, self.method, - self.n_cycles, True, self.time_bandwidth, - self.use_fft, self.decim, self.output, self.n_jobs, - self.verbose) + Xt = _compute_tfr( + X, + freqs=self.freqs, + sfreq=self.sfreq, + method=self.method, + n_cycles=self.n_cycles, + zero_mean=True, + time_bandwidth=self.time_bandwidth, + use_fft=self.use_fft, + decim=self.decim, + output=self.output, + n_jobs=self.n_jobs, + verbose=self.verbose, + ) # Back to original shape if not shape: diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py index 4bf73cc5f5d..b02ff8d8deb 100644 --- a/mne/decoding/transformer.py +++ b/mne/decoding/transformer.py @@ -1,25 +1,80 @@ -# -*- coding: utf-8 -*- -# Authors: Mainak Jas -# Alexandre Gramfort -# Romain Trachel -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np - -from .mixin import TransformerMixin -from .base import BaseEstimator - -from .. import pick_types +from sklearn.base import BaseEstimator, TransformerMixin, check_array, clone +from sklearn.preprocessing import RobustScaler, StandardScaler +from sklearn.utils import check_X_y +from sklearn.utils.validation import check_is_fitted + +from .._fiff.pick import ( + _pick_data_channels, + _picks_by_type, + _picks_to_idx, + pick_info, +) +from ..cov import _check_scalings_user +from ..epochs import BaseEpochs from ..filter import filter_data from ..time_frequency import psd_array_multitaper -from ..utils import fill_doc, _check_option, _validate_type, verbose -from ..io.pick import (pick_info, _pick_data_channels, _picks_by_type, - _picks_to_idx) -from ..cov import _check_scalings_user +from ..utils import _check_option, _validate_type, check_version, fill_doc +from ._fixes import validate_data # TODO VERSION remove with sklearn 1.4+ + + +class MNETransformerMixin(TransformerMixin): + """TransformerMixin plus some helpers.""" + + def _check_data( + self, + epochs_data, + *, + y=None, + atleast_3d=True, + fit=False, + return_y=False, + multi_output=False, + check_n_features=True, + ): + # Sklearn calls asarray under the hood which works, but elsewhere they check for + # __len__ then look at the size of obj[0]... which is an epoch of shape (1, ...) + # rather than what they expect (shape (...)). So we explicitly get the NumPy + # array to make everyone happy. + if isinstance(epochs_data, BaseEpochs): + epochs_data = epochs_data.get_data(copy=False) + kwargs = dict(dtype=np.float64, allow_nd=True, order="C") + if check_version("sklearn", "1.4"): # TODO VERSION sklearn 1.4+ + kwargs["force_writeable"] = True + if hasattr(self, "n_features_in_") and check_n_features: + if y is None: + epochs_data = validate_data( + self, + epochs_data, + **kwargs, + reset=fit, + ) + else: + epochs_data, y = validate_data( + self, + epochs_data, + y, + **kwargs, + reset=fit, + ) + elif y is None: + epochs_data = check_array(epochs_data, **kwargs) + else: + epochs_data, y = check_X_y( + X=epochs_data, y=y, multi_output=multi_output, **kwargs + ) + if fit: + self.n_features_in_ = epochs_data.shape[1] + if atleast_3d: + epochs_data = np.atleast_3d(epochs_data) + return (epochs_data, y) if return_y else epochs_data -class _ConstantScaler(): +class _ConstantScaler: """Scale channel types using constant values.""" def __init__(self, info, scalings, do_scaling=True): @@ -29,15 +84,17 @@ def __init__(self, info, scalings, do_scaling=True): def fit(self, X, y=None): scalings = _check_scalings_user(self._scalings) - picks_by_type = _picks_by_type(pick_info( - self._info, _pick_data_channels(self._info, exclude=()))) + picks_by_type = _picks_by_type( + pick_info(self._info, _pick_data_channels(self._info, exclude=())) + ) std = np.ones(sum(len(p[1]) for p in picks_by_type)) if X.shape[1] != len(std): - raise ValueError('info had %d data channels but X has %d channels' - % (len(std), len(X))) + raise ValueError( + f"info had {len(std)} data channels but X has {len(X)} channels" + ) if self._do_scaling: # this is silly, but necessary for completeness for kind, picks in picks_by_type: - std[picks] = 1. / scalings[kind] + std[picks] = 1.0 / scalings[kind] self.std_ = std self.mean_ = np.zeros_like(std) return self @@ -54,8 +111,9 @@ def fit_transform(self, X, y=None): def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): """Reshape epochs and apply function.""" - if not isinstance(X, np.ndarray): - raise ValueError("data should be an np.ndarray, got %s." % type(X)) + _validate_type(X, np.ndarray, "X") + if X.size == 0: + return X.copy() if return_result else None orig_shape = X.shape X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1])) X = func(X, *args, **kwargs) @@ -66,8 +124,8 @@ def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): @fill_doc -class Scaler(TransformerMixin, BaseEstimator): - u"""Standardize channel data. +class Scaler(MNETransformerMixin, BaseEstimator): + """Standardize channel data. This class scales data for each channel. It differs from scikit-learn classes (e.g., :class:`sklearn.preprocessing.StandardScaler`) in that @@ -102,32 +160,12 @@ class Scaler(TransformerMixin, BaseEstimator): if ``scalings`` is a dict or None). """ - def __init__(self, info=None, scalings=None, with_mean=True, - with_std=True): # noqa: D102 + def __init__(self, info=None, scalings=None, with_mean=True, with_std=True): self.info = info self.with_mean = with_mean self.with_std = with_std self.scalings = scalings - if not (scalings is None or isinstance(scalings, (dict, str))): - raise ValueError('scalings type should be dict, str, or None, ' - 'got %s' % type(scalings)) - if isinstance(scalings, str): - _check_option('scalings', scalings, ['mean', 'median']) - if scalings is None or isinstance(scalings, dict): - if info is None: - raise ValueError('Need to specify "info" if scalings is' - '%s' % type(scalings)) - self._scaler = _ConstantScaler(info, scalings, self.with_std) - elif scalings == 'mean': - from sklearn.preprocessing import StandardScaler - self._scaler = StandardScaler( - with_mean=self.with_mean, with_std=self.with_std) - else: # scalings == 'median': - from sklearn.preprocessing import RobustScaler - self._scaler = RobustScaler( - with_centering=self.with_mean, with_scaling=self.with_std) - def fit(self, epochs_data, y=None): """Standardize data across channels. @@ -143,11 +181,30 @@ def fit(self, epochs_data, y=None): self : instance of Scaler The modified instance. """ - _validate_type(epochs_data, np.ndarray, 'epochs_data') - if epochs_data.ndim == 2: - epochs_data = epochs_data[..., np.newaxis] + epochs_data = self._check_data(epochs_data, y=y, fit=True, multi_output=True) assert epochs_data.ndim == 3, epochs_data.shape - _sklearn_reshape_apply(self._scaler.fit, False, epochs_data, y=y) + + _validate_type(self.scalings, (dict, str, type(None)), "scalings") + if isinstance(self.scalings, str): + _check_option( + "scalings", self.scalings, ["mean", "median"], extra="when str" + ) + if self.scalings is None or isinstance(self.scalings, dict): + if self.info is None: + raise ValueError( + f'Need to specify "info" if scalings is {type(self.scalings)}' + ) + self.scaler_ = _ConstantScaler(self.info, self.scalings, self.with_std) + elif self.scalings == "mean": + self.scaler_ = StandardScaler( + with_mean=self.with_mean, with_std=self.with_std + ) + else: # scalings == 'median': + self.scaler_ = RobustScaler( + with_centering=self.with_mean, with_scaling=self.with_std + ) + + _sklearn_reshape_apply(self.scaler_.fit, False, epochs_data, y=y) return self def transform(self, epochs_data): @@ -168,14 +225,14 @@ def transform(self, epochs_data): This function makes a copy of the data before the operations and the memory usage may be large with big data. """ - _validate_type(epochs_data, np.ndarray, 'epochs_data') + check_is_fitted(self, "scaler_") + epochs_data = self._check_data(epochs_data, atleast_3d=False) if epochs_data.ndim == 2: # can happen with SlidingEstimator if self.info is not None: - assert len(self.info['ch_names']) == epochs_data.shape[1] + assert len(self.info["ch_names"]) == epochs_data.shape[1] epochs_data = epochs_data[..., np.newaxis] assert epochs_data.ndim == 3, epochs_data.shape - return _sklearn_reshape_apply(self._scaler.transform, True, - epochs_data) + return _sklearn_reshape_apply(self.scaler_.transform, True, epochs_data) def fit_transform(self, epochs_data, y=None): """Fit to data, then transform it. @@ -208,7 +265,7 @@ def inverse_transform(self, epochs_data): Parameters ---------- - epochs_data : array, shape (n_epochs, n_channels, n_times) + epochs_data : array, shape ([n_epochs, ]n_channels, n_times) The data. Returns @@ -221,12 +278,20 @@ def inverse_transform(self, epochs_data): This function makes a copy of the data before the operations and the memory usage may be large with big data. """ + epochs_data = self._check_data(epochs_data, atleast_3d=False) + squeeze = False + # Can happen with CSP + if epochs_data.ndim == 2: + squeeze = True + epochs_data = epochs_data[..., np.newaxis] assert epochs_data.ndim == 3, epochs_data.shape - return _sklearn_reshape_apply(self._scaler.inverse_transform, True, - epochs_data) + out = _sklearn_reshape_apply(self.scaler_.inverse_transform, True, epochs_data) + if squeeze: + out = out[..., 0] + return out -class Vectorizer(TransformerMixin): +class Vectorizer(MNETransformerMixin, BaseEstimator): """Transform n-dimensional array into 2D array of n_samples by n_features. This class reshapes an n-dimensional array into an n_samples * n_features @@ -239,8 +304,10 @@ class Vectorizer(TransformerMixin): Examples -------- - clf = make_pipeline(SpatialFilter(), _XdawnTransformer(), Vectorizer(), - LogisticRegression()) + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> clf = make_pipeline(Vectorizer(), StandardScaler(), LogisticRegression()) """ def fit(self, X, y=None): @@ -261,7 +328,7 @@ def fit(self, X, y=None): self : instance of Vectorizer Return the modified instance. """ - X = np.asarray(X) + X = self._check_data(X, y=y, atleast_3d=False, fit=True, check_n_features=False) self.features_shape_ = X.shape[1:] return self @@ -281,10 +348,9 @@ def transform(self, X): X : array, shape (n_samples, n_features) The transformed data. """ - X = np.asarray(X) + X = self._check_data(X, atleast_3d=False) if X.shape[1:] != self.features_shape_: - raise ValueError("Shape of X used in fit and transform must be " - "same") + raise ValueError("Shape of X used in fit and transform must be same") return X.reshape(len(X), -1) def fit_transform(self, X, y=None): @@ -321,15 +387,16 @@ def inverse_transform(self, X): The data transformed into shape as used in fit. The first dimension is of length n_samples. """ - X = np.asarray(X) + X = self._check_data(X, atleast_3d=False, check_n_features=False) if X.ndim not in (2, 3): - raise ValueError("X should be of 2 or 3 dimensions but has shape " - "%s" % (X.shape,)) + raise ValueError( + f"X should be of 2 or 3 dimensions but has shape {X.shape}" + ) return X.reshape(X.shape[:-1] + self.features_shape_) @fill_doc -class PSDEstimator(TransformerMixin): +class PSDEstimator(MNETransformerMixin, BaseEstimator): """Compute power spectral density (PSD) using a multi-taper method. Parameters @@ -351,7 +418,6 @@ class PSDEstimator(TransformerMixin): n_jobs : int Number of parallel jobs to use (only used if adaptive=True). %(normalization)s - %(verbose)s See Also -------- @@ -361,10 +427,17 @@ class PSDEstimator(TransformerMixin): mne.Evoked.compute_psd """ - @verbose - def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None, - adaptive=False, low_bias=True, n_jobs=None, - normalization='length', *, verbose=None): # noqa: D102 + def __init__( + self, + sfreq=2 * np.pi, + fmin=0, + fmax=np.inf, + bandwidth=None, + adaptive=False, + low_bias=True, + n_jobs=None, + normalization="length", + ): self.sfreq = sfreq self.fmin = fmin self.fmax = fmax @@ -374,7 +447,14 @@ def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None, self.n_jobs = n_jobs self.normalization = normalization - def fit(self, epochs_data, y): + def __sklearn_tags__(self): + """...""" + tags = super().__sklearn_tags__() + tags.target_tags.required = False + tags.requires_fit = False + return tags + + def fit(self, epochs_data, y=None): """Compute power spectral density (PSD) using a multi-taper method. Parameters @@ -389,10 +469,8 @@ def fit(self, epochs_data, y): self : instance of PSDEstimator The modified instance. """ - if not isinstance(epochs_data, np.ndarray): - raise ValueError("epochs_data should be of type ndarray (got %s)." - % type(epochs_data)) - + self._check_data(epochs_data, y=y, fit=True) + self.fitted_ = True # sklearn compliance return self def transform(self, epochs_data): @@ -408,19 +486,23 @@ def transform(self, epochs_data): psd : array, shape (n_signals, n_freqs) or (n_freqs,) The computed PSD. """ - if not isinstance(epochs_data, np.ndarray): - raise ValueError("epochs_data should be of type ndarray (got %s)." - % type(epochs_data)) + epochs_data = self._check_data(epochs_data) psd, _ = psd_array_multitaper( - epochs_data, sfreq=self.sfreq, fmin=self.fmin, fmax=self.fmax, - bandwidth=self.bandwidth, adaptive=self.adaptive, - low_bias=self.low_bias, normalization=self.normalization, - n_jobs=self.n_jobs) + epochs_data, + sfreq=self.sfreq, + fmin=self.fmin, + fmax=self.fmax, + bandwidth=self.bandwidth, + adaptive=self.adaptive, + low_bias=self.low_bias, + normalization=self.normalization, + n_jobs=self.n_jobs, + ) return psd @fill_doc -class FilterEstimator(TransformerMixin): +class FilterEstimator(MNETransformerMixin, BaseEstimator): """Estimator to filter RtEpochs. Applies a zero-phase low-pass, high-pass, band-pass, or band-stop @@ -450,14 +532,12 @@ class FilterEstimator(TransformerMixin): Number of jobs to run in parallel. Can be 'cuda' if ``cupy`` is installed properly and method='fir'. method : str - 'fir' will use overlap-add FIR filtering, 'iir' will use IIR - forward-backward filtering (via filtfilt). + 'fir' will use overlap-add FIR filtering, 'iir' will use IIR filtering. iir_params : dict | None Dictionary of parameters to use for IIR filtering. See mne.filter.construct_iir_filter for details. If iir_params is None and method="iir", 4th order Butterworth will be used. %(fir_design)s - %(verbose)s See Also -------- @@ -465,20 +545,29 @@ class FilterEstimator(TransformerMixin): Notes ----- - This is primarily meant for use in conjunction with - :class:`mne_realtime.RtEpochs`. In general it is not recommended in a - normal processing pipeline as it may result in edge artifacts. Use with - caution. + This is primarily meant for use in realtime applications. + In general it is not recommended in a normal processing pipeline as it may result + in edge artifacts. Use with caution. """ - def __init__(self, info, l_freq, h_freq, picks=None, filter_length='auto', - l_trans_bandwidth='auto', h_trans_bandwidth='auto', - n_jobs=None, method='fir', iir_params=None, - fir_design='firwin', *, verbose=None): # noqa: D102 + def __init__( + self, + info, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + fir_design="firwin", + ): self.info = info self.l_freq = l_freq self.h_freq = h_freq - self.picks = _picks_to_idx(info, picks) + self.picks = picks self.filter_length = filter_length self.l_trans_bandwidth = l_trans_bandwidth self.h_trans_bandwidth = h_trans_bandwidth @@ -502,38 +591,27 @@ def fit(self, epochs_data, y): self : instance of FilterEstimator The modified instance. """ - if not isinstance(epochs_data, np.ndarray): - raise ValueError("epochs_data should be of type ndarray (got %s)." - % type(epochs_data)) - - if self.picks is None: - self.picks = pick_types(self.info, meg=True, eeg=True, - ref_meg=False, exclude=[]) + self.picks_ = _picks_to_idx(self.info, self.picks) + self._check_data(epochs_data, y=y, fit=True) if self.l_freq == 0: self.l_freq = None - if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.): - self.h_freq = None - if self.l_freq is not None and not isinstance(self.l_freq, float): - self.l_freq = float(self.l_freq) - if self.h_freq is not None and not isinstance(self.h_freq, float): - self.h_freq = float(self.h_freq) - - if self.info['lowpass'] is None or (self.h_freq is not None and - (self.l_freq is None or - self.l_freq < self.h_freq) and - self.h_freq < - self.info['lowpass']): + + if self.info["lowpass"] is None or ( + self.h_freq is not None + and (self.l_freq is None or self.l_freq < self.h_freq) + and self.h_freq < self.info["lowpass"] + ): with self.info._unlock(): - self.info['lowpass'] = self.h_freq + self.info["lowpass"] = self.h_freq - if self.info['highpass'] is None or (self.l_freq is not None and - (self.h_freq is None or - self.l_freq < self.h_freq) and - self.l_freq > - self.info['highpass']): + if self.info["highpass"] is None or ( + self.l_freq is not None + and (self.h_freq is None or self.l_freq < self.h_freq) + and self.l_freq > self.info["highpass"] + ): with self.info._unlock(): - self.info['highpass'] = self.l_freq + self.info["highpass"] = self.l_freq return self @@ -550,19 +628,25 @@ def transform(self, epochs_data): X : array, shape (n_epochs, n_channels, n_times) The data after filtering. """ - if not isinstance(epochs_data, np.ndarray): - raise ValueError("epochs_data should be of type ndarray (got %s)." - % type(epochs_data)) - epochs_data = np.atleast_3d(epochs_data) return filter_data( - epochs_data, self.info['sfreq'], self.l_freq, self.h_freq, - self.picks, self.filter_length, self.l_trans_bandwidth, - self.h_trans_bandwidth, method=self.method, - iir_params=self.iir_params, n_jobs=self.n_jobs, copy=False, - fir_design=self.fir_design, verbose=False) - - -class UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator): + self._check_data(epochs_data), + self.info["sfreq"], + self.l_freq, + self.h_freq, + self.picks_, + self.filter_length, + self.l_trans_bandwidth, + self.h_trans_bandwidth, + method=self.method, + iir_params=self.iir_params, + n_jobs=self.n_jobs, + copy=False, + fir_design=self.fir_design, + verbose=False, + ) + + +class UnsupervisedSpatialFilter(MNETransformerMixin, BaseEstimator): """Use unsupervised spatial filtering across time and samples. Parameters @@ -574,17 +658,7 @@ class UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator): (e.g. epochs). """ - def __init__(self, estimator, average=False): # noqa: D102 - # XXX: Use _check_estimator #3381 - for attr in ('fit', 'transform', 'fit_transform'): - if not hasattr(estimator, attr): - raise ValueError('estimator must be a scikit-learn ' - 'transformer, missing %s method' % attr) - - if not isinstance(average, bool): - raise ValueError("average parameter must be of bool type, got " - "%s instead" % type(bool)) - + def __init__(self, estimator, average=False): self.estimator = estimator self.average = average @@ -603,14 +677,25 @@ def fit(self, X, y=None): self : instance of UnsupervisedSpatialFilter Return the modified instance. """ + # sklearn.utils.estimator_checks.check_estimator(self.estimator) is probably + # too strict for us, given that we don't fully adhere yet, so just check attrs + for attr in ("fit", "transform", "fit_transform"): + if not hasattr(self.estimator, attr): + raise ValueError( + "estimator must be a scikit-learn " + f"transformer, missing {attr} method" + ) + _validate_type(self.average, bool, "average") + X = self._check_data(X, y=y, fit=True) if self.average: X = np.mean(X, axis=0).T else: n_epochs, n_channels, n_times = X.shape # trial as time samples - X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs * - n_times)).T - self.estimator.fit(X) + X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs * n_times)).T + + self.estimator_ = clone(self.estimator) + self.estimator_.fit(X) return self def fit_transform(self, X, y=None): @@ -643,7 +728,9 @@ def transform(self, X): X : array, shape (n_epochs, n_channels, n_times) The transformed data. """ - return self._apply_method(X, 'transform') + check_is_fitted(self.estimator_) + X = self._check_data(X) + return self._apply_method(X, "transform") def inverse_transform(self, X): """Inverse transform the data to its original space. @@ -658,7 +745,7 @@ def inverse_transform(self, X): X : array, shape (n_epochs, n_channels, n_times) The transformed data. """ - return self._apply_method(X, 'inverse_transform') + return self._apply_method(X, "inverse_transform") def _apply_method(self, X, method): """Vectorize time samples as trials, apply method and reshape back. @@ -678,7 +765,7 @@ def _apply_method(self, X, method): X = np.transpose(X, [1, 0, 2]) X = np.reshape(X, [n_channels, n_epochs * n_times]).T # apply method - method = getattr(self.estimator, method) + method = getattr(self.estimator_, method) X = method(X) # put it back to n_epochs, n_dimensions X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2]) @@ -686,7 +773,7 @@ def _apply_method(self, X, method): @fill_doc -class TemporalFilter(TransformerMixin): +class TemporalFilter(MNETransformerMixin, BaseEstimator): """Estimator to filter data array along the last dimension. Applies a zero-phase low-pass, high-pass, band-pass, or band-stop @@ -760,7 +847,6 @@ class TemporalFilter(TransformerMixin): attenuation using fewer samples than "firwin2". .. versionadded:: 0.15 - %(verbose)s See Also -------- @@ -769,12 +855,20 @@ class TemporalFilter(TransformerMixin): mne.filter.filter_data """ - @verbose - def __init__(self, l_freq=None, h_freq=None, sfreq=1.0, - filter_length='auto', l_trans_bandwidth='auto', - h_trans_bandwidth='auto', n_jobs=None, method='fir', - iir_params=None, fir_window='hamming', fir_design='firwin', - *, verbose=None): # noqa: D102 + def __init__( + self, + l_freq=None, + h_freq=None, + sfreq=1.0, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + fir_window="hamming", + fir_design="firwin", + ): self.l_freq = l_freq self.h_freq = h_freq self.sfreq = sfreq @@ -787,16 +881,19 @@ def __init__(self, l_freq=None, h_freq=None, sfreq=1.0, self.fir_window = fir_window self.fir_design = fir_design - if not isinstance(self.n_jobs, int) and self.n_jobs == 'cuda': - raise ValueError('n_jobs must be int or "cuda", got %s instead.' - % type(self.n_jobs)) + def __sklearn_tags__(self): + """...""" + tags = super().__sklearn_tags__() + tags.target_tags.required = False + tags.requires_fit = False + return tags def fit(self, X, y=None): """Do nothing (for scikit-learn compatibility purposes). Parameters ---------- - X : array, shape (n_epochs, n_channels, n_times) or or shape (n_channels, n_times) + X : array, shape ([n_epochs, ]n_channels, n_times) The data to be filtered over the last dimension. The channels dimension can be zero when passing a 2D array. y : None @@ -806,7 +903,9 @@ def fit(self, X, y=None): ------- self : instance of TemporalFilter The modified instance. - """ # noqa: E501 + """ + self.fitted_ = True # sklearn compliance + self._check_data(X, y=y, atleast_3d=False, fit=True) return self def transform(self, X): @@ -814,7 +913,7 @@ def transform(self, X): Parameters ---------- - X : array, shape (n_epochs, n_channels, n_times) or shape (n_channels, n_times) + X : array, shape ([n_epochs, ]n_channels, n_times) The data to be filtered over the last dimension. The channels dimension can be zero when passing a 2D array. @@ -823,19 +922,30 @@ def transform(self, X): X : array The data after filtering. """ # noqa: E501 + X = self._check_data(X, atleast_3d=False) X = np.atleast_2d(X) if X.ndim > 3: - raise ValueError("Array must be of at max 3 dimensions instead " - "got %s dimensional matrix" % (X.ndim)) + raise ValueError( + "Array must be of at max 3 dimensions instead " + f"got {X.ndim} dimensional matrix" + ) shape = X.shape X = X.reshape(-1, shape[-1]) - X = filter_data(X, self.sfreq, self.l_freq, self.h_freq, - filter_length=self.filter_length, - l_trans_bandwidth=self.l_trans_bandwidth, - h_trans_bandwidth=self.h_trans_bandwidth, - n_jobs=self.n_jobs, method=self.method, - iir_params=self.iir_params, copy=False, - fir_window=self.fir_window, fir_design=self.fir_design) + X = filter_data( + X, + self.sfreq, + self.l_freq, + self.h_freq, + filter_length=self.filter_length, + l_trans_bandwidth=self.l_trans_bandwidth, + h_trans_bandwidth=self.h_trans_bandwidth, + n_jobs=self.n_jobs, + method=self.method, + iir_params=self.iir_params, + copy=False, + fir_window=self.fir_window, + fir_design=self.fir_design, + ) return X.reshape(shape) diff --git a/mne/decoding/xdawn.py b/mne/decoding/xdawn.py new file mode 100644 index 00000000000..a34d042e30a --- /dev/null +++ b/mne/decoding/xdawn.py @@ -0,0 +1,213 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import collections.abc as abc +from functools import partial + +import numpy as np + +from .._fiff.meas_info import Info +from ..cov import Covariance +from ..decoding._covs_ged import _xdawn_estimate +from ..decoding._mod_ged import _xdawn_mod +from ..decoding.base import _GEDTransformer +from ..utils import _validate_type, fill_doc + + +@fill_doc +class XdawnTransformer(_GEDTransformer): + """Implementation of the Xdawn Algorithm compatible with scikit-learn. + + Xdawn is a spatial filtering method designed to improve the signal + to signal + noise ratio (SSNR) of the event related responses. Xdawn was + originally designed for P300 evoked potential by enhancing the target + response with respect to the non-target response. This implementation is a + generalization to any type of event related response. + + .. note:: XdawnTransformer does not correct for epochs overlap. To correct + overlaps see `mne.preprocessing.Xdawn`. + + Parameters + ---------- + n_components : int (default 2) + The number of components to decompose the signals. + reg : float | str | None (default None) + If not None (same as ``'empirical'``, default), allow + regularization for covariance estimation. + If float, shrinkage is used (0 <= shrinkage <= 1). + For str options, ``reg`` will be passed to ``method`` to + :func:`mne.compute_covariance`. + signal_cov : None | Covariance | array, shape (n_channels, n_channels) + The signal covariance used for whitening of the data. + if None, the covariance is estimated from the epochs signal. + cov_method_params : dict | None + Parameters to pass to :func:`mne.compute_covariance`. + + .. versionadded:: 0.16 + restr_type : "restricting" | "whitening" | None + Restricting transformation for covariance matrices before performing + generalized eigendecomposition. + If "restricting" only restriction to the principal subspace of signal_cov + will be performed. + If "whitening", covariance matrices will be additionally rescaled according + to the whitening for the signal_cov. + If None, no restriction will be applied. Defaults to None. + + .. versionadded:: 1.11 + info : mne.Info | None + The mne.Info object with information about the sensors and methods of + measurement used for covariance estimation and generalized + eigendecomposition. + If None, one channel type and no projections will be assumed and if + rank is dict, it will be sum of ranks per channel type. + Defaults to None. + + .. versionadded:: 1.11 + %(rank_full)s + + .. versionadded:: 1.11 + + Attributes + ---------- + classes_ : array, shape (n_classes) + The event indices of the classes. + filters_ : array, shape (n_channels, n_channels) + The Xdawn components used to decompose the data for each event type. + patterns_ : array, shape (n_channels, n_channels) + The Xdawn patterns used to restore the signals for each event type. + + See Also + -------- + CSP, SPoC, SSD + """ + + def __init__( + self, + n_components=2, + reg=None, + signal_cov=None, + cov_method_params=None, + *, + restr_type=None, + info=None, + rank="full", + ): + self.n_components = n_components + self.signal_cov = signal_cov + self.reg = reg + self.cov_method_params = cov_method_params + self.restr_type = restr_type + self.info = info + self.rank = rank + + cov_callable = partial( + _xdawn_estimate, + reg=reg, + cov_method_params=cov_method_params, + R=signal_cov, + info=info, + rank=rank, + ) + super().__init__( + n_components=n_components, + cov_callable=cov_callable, + mod_ged_callable=_xdawn_mod, + dec_type="multi", + restr_type=restr_type, + ) + + def __sklearn_tags__(self): + """Tag the transformer.""" + tags = super().__sklearn_tags__() + tags.target_tags.required = True + return tags + + def _validate_params(self, X): + _validate_type(self.n_components, int, "n_components") + + # reg is validated in _regularized_covariance + + if self.signal_cov is not None: + if isinstance(self.signal_cov, Covariance): + self.signal_cov = self.signal_cov.data + elif not isinstance(self.signal_cov, np.ndarray): + raise ValueError("signal_cov should be mne.Covariance or np.ndarray") + if not np.array_equal(self.signal_cov.shape, np.tile(X.shape[1], 2)): + raise ValueError( + "signal_cov data should be of shape (n_channels, n_channels)" + ) + _validate_type(self.cov_method_params, (abc.Mapping, None), "cov_method_params") + _validate_type(self.info, (Info, None), "info") + + def fit(self, X, y=None): + """Fit Xdawn spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_samples) + The target data. + y : array, shape (n_epochs,) | None + The target labels. If None, Xdawn fit on the average evoked. + + Returns + ------- + self : Xdawn instance + The Xdawn instance. + """ + X, y = self._check_data(X, y=y, fit=True, return_y=True) + # For test purposes + if y is None: + y = np.ones(len(X)) + self._validate_params(X) + + super().fit(X, y) + + return self + + def transform(self, X): + """Transform data with spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_samples) + The target data. + + Returns + ------- + X : array, shape (n_epochs, n_components * n_classes, n_samples) + The transformed data. + """ + X = self._check_data(X) + X = super().transform(X) + return X + + def inverse_transform(self, X): + """Remove selected components from the signal. + + Given the unmixing matrix, transform data, zero out components, + and inverse transform the data. This procedure will reconstruct + the signals from which the dynamics described by the excluded + components is subtracted. + + Parameters + ---------- + X : array, shape (n_epochs, n_components * n_classes, n_times) + The transformed data. + + Returns + ------- + X : array, shape (n_epochs, n_channels * n_classes, n_times) + The inverse transform data. + """ + # Check size + X = self._check_data(X, check_n_features=False) + n_epochs, n_comp, n_times = X.shape + if n_comp != (self.n_components * len(self.classes_)): + raise ValueError( + f"X must have {self.n_components * len(self.classes_)} components, " + f"got {n_comp} instead." + ) + pick_patterns = self._subset_multi_components(name="patterns") + # Transform + return np.dot(pick_patterns.T, X).transpose(1, 0, 2) diff --git a/mne/defaults.py b/mne/defaults.py index 362eba0d67f..ac040f8fceb 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -1,130 +1,323 @@ -# Authors: Alexandre Gramfort -# Denis A. Engemann -# Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from copy import deepcopy DEFAULTS = dict( - color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m', emg='k', - ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', - exci='k', ias='k', syst='k', seeg='saddlebrown', dbs='seagreen', - dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_cw_amplitude='k', fnirs_fd_ac_amplitude='k', - fnirs_fd_phase='k', fnirs_od='k', csd='k', whitened='k', - gsr='#666633', temperature='#663333'), - si_units=dict(mag='T', grad='T/m', eeg='V', eog='V', ecg='V', emg='V', - misc='AU', seeg='V', dbs='V', dipole='Am', gof='GOF', - bio='V', ecog='V', hbo='M', hbr='M', ref_meg='T', - fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', - fnirs_fd_phase='rad', fnirs_od='V', csd='V/m²', - whitened='Z', gsr='S', temperature='C'), - units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', - misc='AU', seeg='mV', dbs='µV', dipole='nAm', gof='GOF', - bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', - fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', - fnirs_fd_phase='rad', fnirs_od='V', csd='mV/m²', - whitened='Z', gsr='S', temperature='C'), + color=dict( + mag="darkblue", + grad="b", + eeg="k", + eog="k", + ecg="m", + emg="k", + ref_meg="steelblue", + misc="k", + stim="k", + resp="k", + chpi="k", + exci="k", + ias="k", + syst="k", + seeg="saddlebrown", + dbs="seagreen", + dipole="k", + gof="k", + bio="k", + ecog="k", + hbo="#AA3377", + hbr="b", + fnirs_cw_amplitude="k", + fnirs_fd_ac_amplitude="k", + fnirs_fd_phase="k", + fnirs_od="k", + csd="k", + whitened="k", + gsr="#666633", + temperature="#663333", + eyegaze="k", + pupil="k", + ), + si_units=dict( + mag="T", + grad="T/m", + eeg="V", + eog="V", + ecg="V", + emg="V", + misc="AU", + seeg="V", + dbs="V", + dipole="Am", + gof="GOF", + bio="V", + ecog="V", + hbo="M", + hbr="M", + ref_meg="T", + fnirs_cw_amplitude="V", + fnirs_fd_ac_amplitude="V", + fnirs_fd_phase="rad", + fnirs_od="V", + csd="V/m²", + whitened="Z", + gsr="S", + temperature="C", + eyegaze="rad", + pupil="m", + ), + units=dict( + mag="fT", + grad="fT/cm", + eeg="µV", + eog="µV", + ecg="µV", + emg="µV", + misc="AU", + seeg="mV", + dbs="µV", + dipole="nAm", + gof="GOF", + bio="µV", + ecog="µV", + hbo="µM", + hbr="µM", + ref_meg="fT", + fnirs_cw_amplitude="V", + fnirs_fd_ac_amplitude="V", + fnirs_fd_phase="rad", + fnirs_od="V", + csd="mV/m²", + whitened="Z", + gsr="S", + temperature="C", + eyegaze="rad", + pupil="mm", + ), # scalings for the units - scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, - misc=1.0, seeg=1e3, dbs=1e6, ecog=1e6, dipole=1e9, gof=1.0, - bio=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, - fnirs_cw_amplitude=1.0, fnirs_fd_ac_amplitude=1.0, - fnirs_fd_phase=1., fnirs_od=1.0, csd=1e3, whitened=1., - gsr=1., temperature=1.), + scalings=dict( + mag=1e15, + grad=1e13, + eeg=1e6, + eog=1e6, + emg=1e6, + ecg=1e6, + misc=1.0, + seeg=1e3, + dbs=1e6, + ecog=1e6, + dipole=1e9, + gof=1.0, + bio=1e6, + hbo=1e6, + hbr=1e6, + ref_meg=1e15, + fnirs_cw_amplitude=1.0, + fnirs_fd_ac_amplitude=1.0, + fnirs_fd_phase=1.0, + fnirs_od=1.0, + csd=1e3, + whitened=1.0, + gsr=1.0, + temperature=1.0, + eyegaze=1.0, + pupil=1e3, + ), # rough guess for a good plot - scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, - ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', - stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, - seeg=1e-4, dbs=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, - hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, - fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-1, - fnirs_od=2e-2, csd=200e-4, - dipole=1e-7, gof=1e2, - gsr=1., temperature=0.1), - scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings - seeg=1e1, dbs=1e4, ecog=1e4, hbo=1e4, hbr=1e4), - ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), - misc=(-5., 5.), seeg=(-20., 20.), dbs=(-200., 200.), - dipole=(-100., 100.), gof=(0., 1.), bio=(-500., 500.), - ecog=(-200., 200.), hbo=(0, 20), hbr=(0, 20), csd=(-50., 50.)), - titles=dict(mag='Magnetometers', grad='Gradiometers', eeg='EEG', eog='EOG', - ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', dbs='DBS', - bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', - ref_meg='Reference Magnetometers', - fnirs_cw_amplitude='fNIRS (CW amplitude)', - fnirs_fd_ac_amplitude='fNIRS (FD AC amplitude)', - fnirs_fd_phase='fNIRS (FD phase)', - fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', - gof='Goodness of fit', csd='Current source density', - stim='Stimulus', gsr='Galvanic skin response', - temperature='Temperature', - ), - mask_params=dict(marker='o', - markerfacecolor='w', - markeredgecolor='k', - linewidth=0, - markeredgewidth=1, - markersize=4), + scalings_plot_raw=dict( + mag=1e-12, + grad=4e-11, + eeg=20e-6, + eog=150e-6, + ecg=5e-4, + emg=1e-3, + ref_meg=1e-12, + misc="auto", + stim=1, + resp=1, + chpi=1e-4, + exci=1, + ias=1, + syst=1, + seeg=1e-4, + dbs=1e-4, + bio=1e-6, + ecog=1e-4, + hbo=10e-6, + hbr=10e-6, + whitened=10.0, + fnirs_cw_amplitude=2e-2, + fnirs_fd_ac_amplitude=2e-2, + fnirs_fd_phase=2e-1, + fnirs_od=2e-2, + csd=200e-4, + dipole=1e-7, + gof=1e2, + gsr=1.0, + temperature=0.1, + eyegaze=2e-1, + pupil=1e-2, + ), + scalings_cov_rank=dict( + mag=1e12, + grad=1e11, + eeg=1e5, # ~100x scalings + seeg=1e1, + dbs=1e4, + ecog=1e4, + hbo=1e4, + hbr=1e4, + ), + ylim=dict( + mag=(-600.0, 600.0), + grad=(-200.0, 200.0), + eeg=(-200.0, 200.0), + misc=(-5.0, 5.0), + seeg=(-20.0, 20.0), + dbs=(-200.0, 200.0), + dipole=(-100.0, 100.0), + gof=(0.0, 1.0), + bio=(-500.0, 500.0), + ecog=(-200.0, 200.0), + hbo=(0, 20), + hbr=(0, 20), + csd=(-50.0, 50.0), + eyegaze=(-1, 1), + pupil=(-1.0, 1.0), + ), + titles=dict( + mag="Magnetometers", + grad="Gradiometers", + eeg="EEG", + eog="EOG", + ecg="ECG", + emg="EMG", + misc="misc", + seeg="sEEG", + dbs="DBS", + bio="BIO", + dipole="Dipole", + ecog="ECoG", + hbo="Oxyhemoglobin", + ref_meg="Reference Magnetometers", + fnirs_cw_amplitude="fNIRS (CW amplitude)", + fnirs_fd_ac_amplitude="fNIRS (FD AC amplitude)", + fnirs_fd_phase="fNIRS (FD phase)", + fnirs_od="fNIRS (OD)", + hbr="Deoxyhemoglobin", + gof="Goodness of fit", + csd="Current source density", + stim="Stimulus", + gsr="Galvanic skin response", + temperature="Temperature", + eyegaze="Eye-tracking (Gaze position)", + pupil="Eye-tracking (Pupil size)", + resp="Respiration monitoring channel", + chpi="Continuous head position indicator (HPI) coil channels", + exci="Flux excitation channel", + ias="Internal Active Shielding data (Triux systems)", + syst="System status channel information (Triux systems)", + whitened="Whitened data", + ), + mask_params=dict( + marker="o", + markerfacecolor="w", + markeredgecolor="k", + linewidth=0, + markeredgewidth=1, + markersize=4, + ), coreg=dict( mri_fid_opacity=1.0, dig_fid_opacity=1.0, - + # go from unit scaling (e.g., unit-radius sphere) to meters mri_fid_scale=5e-3, dig_fid_scale=8e-3, extra_scale=4e-3, - eeg_scale=4e-3, eegp_scale=20e-3, eegp_height=0.1, - ecog_scale=5e-3, - seeg_scale=5e-3, + eeg_scale=4e-3, + eegp_scale=20e-3, + eegp_height=0.1, + ecog_scale=2e-3, + seeg_scale=2e-3, + meg_scale=1.0, # sensors are already in SI units + ref_meg_scale=1.0, dbs_scale=5e-3, fnirs_scale=5e-3, source_scale=5e-3, detector_scale=5e-3, hpi_scale=4e-3, - head_color=(0.988, 0.89, 0.74), - hpi_color=(1., 0., 1.), - extra_color=(1., 1., 1.), - meg_color=(0., 0.25, 0.5), ref_meg_color=(0.5, 0.5, 0.5), + hpi_color=(1.0, 0.0, 1.0), + extra_color=(1.0, 1.0, 1.0), + meg_color=(0.0, 0.25, 0.5), + ref_meg_color=(0.5, 0.5, 0.5), helmet_color=(0.0, 0.0, 0.6), - eeg_color=(1., 0.596, 0.588), eegp_color=(0.839, 0.15, 0.16), - ecog_color=(1., 1., 1.), + eeg_color=(1.0, 0.596, 0.588), + eegp_color=(0.839, 0.15, 0.16), + ecog_color=(1.0, 1.0, 1.0), dbs_color=(0.82, 0.455, 0.659), - seeg_color=(1., 1., .3), - fnirs_color=(1., .647, 0.), - source_color=(1., .05, 0.), - detector_color=(.3, .15, .15), - lpa_color=(1., 0., 0.), - nasion_color=(0., 1., 0.), - rpa_color=(0., 0., 1.), + seeg_color=(1.0, 1.0, 0.3), + fnirs_color=(1.0, 0.647, 0.0), + source_color=(1.0, 0.05, 0.0), + detector_color=(0.3, 0.15, 0.15), + lpa_color=(1.0, 0.0, 0.0), + nasion_color=(0.0, 1.0, 0.0), + rpa_color=(0.0, 0.0, 1.0), ), + report_coreg=dict(dig=True, meg=("helmet", "sensors"), show_axes=True), noise_std=dict(grad=5e-13, mag=20e-15, eeg=0.2e-6), eloreta_options=dict(eps=1e-6, max_iter=20, force_equal=False), - depth_mne=dict(exp=0.8, limit=10., limit_depth_chs=True, - combine_xyz='spectral', allow_fixed_depth=False), - depth_sparse=dict(exp=0.8, limit=None, limit_depth_chs='whiten', - combine_xyz='fro', allow_fixed_depth=True), - interpolation_method=dict(eeg='spline', meg='MNE', fnirs='nearest'), + depth_mne=dict( + exp=0.8, + limit=10.0, + limit_depth_chs=True, + combine_xyz="spectral", + allow_fixed_depth=False, + ), + depth_sparse=dict( + exp=0.8, + limit=None, + limit_depth_chs="whiten", + combine_xyz="fro", + allow_fixed_depth=True, + ), + interpolation_method=dict( + eeg="spline", meg="MNE", fnirs="nearest", ecog="spline", seeg="spline" + ), volume_options=dict( - alpha=None, resolution=1., surface_alpha=None, blending='mip', - silhouette_alpha=None, silhouette_linewidth=2.), - prefixes={'k': 1e-3, 'h': 1e-2, '': 1e0, 'd': 1e1, 'c': 1e2, 'm': 1e3, - 'µ': 1e6, 'u': 1e6, 'n': 1e9, 'p': 1e12, 'f': 1e15}, - transform_zooms=dict( - translation=None, rigid=None, affine=None, sdr=None), + alpha=None, + resolution=1.0, + surface_alpha=None, + blending="mip", + silhouette_alpha=None, + silhouette_linewidth=2.0, + ), + prefixes={ + "k": 1e-3, + "h": 1e-2, + "": 1e0, + "d": 1e1, + "c": 1e2, + "m": 1e3, + "µ": 1e6, + "u": 1e6, + "n": 1e9, + "p": 1e12, + "f": 1e15, + }, + transform_zooms=dict(translation=None, rigid=None, affine=None, sdr=None), transform_niter=dict( translation=(10000, 1000, 100), rigid=(10000, 1000, 100), affine=(10000, 1000, 100), - sdr=(10, 10, 5)), + sdr=(10, 10, 5), + ), volume_label_indices=( # Left and middle 4, # Left-Lateral-Ventricle 5, # Left-Inf-Lat-Vent - 8, # Left-Cerebellum-Cortex - 10, # Left-Thalamus-Proper 11, # Left-Caudate 12, # Left-Putamen @@ -134,44 +327,32 @@ 16, # Brain-Stem 17, # Left-Hippocampus 18, # Left-Amygdala - 26, # Left-Accumbens-area - 28, # Left-VentralDC - # Right 43, # Right-Lateral-Ventricle 44, # Right-Inf-Lat-Vent - 47, # Right-Cerebellum-Cortex - 49, # Right-Thalamus-Proper 50, # Right-Caudate 51, # Right-Putamen 52, # Right-Pallidum 53, # Right-Hippocampus 54, # Right-Amygdala - 58, # Right-Accumbens-area - 60, # Right-VentralDC ), report_stc_plot_kwargs=dict( - views=('lateral', 'medial'), - hemi='split', - backend='pyvistaqt', + views=("lateral", "medial"), + hemi="split", + backend="pyvistaqt", time_viewer=False, show_traces=False, size=(450, 450), - background='white', + background="white", time_label=None, - add_data_kwargs={ - 'colorbar_kwargs': { - 'label_font_size': 12, - 'n_labels': 5 - } - } - ) + add_data_kwargs={"colorbar_kwargs": {"label_font_size": 12, "n_labels": 5}}, + ), ) @@ -194,6 +375,6 @@ def _handle_default(k, v=None): HEAD_SIZE_DEFAULT = 0.095 # in [m] -_BORDER_DEFAULT = 'mean' -_INTERPOLATION_DEFAULT = 'cubic' -_EXTRAPOLATE_DEFAULT = 'auto' +_BORDER_DEFAULT = "mean" +_INTERPOLATION_DEFAULT = "cubic" +_EXTRAPOLATE_DEFAULT = "auto" diff --git a/mne/dipole.py b/mne/dipole.py index 415261063fd..67ff5cf65c4 100644 --- a/mne/dipole.py +++ b/mne/dipole.py @@ -1,50 +1,64 @@ -# -*- coding: utf-8 -*- """Single-dipole functions and classes.""" -# Authors: Alexandre Gramfort -# Eric Larson -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from copy import deepcopy import functools -from functools import partial import re +from copy import deepcopy +from functools import partial import numpy as np - -from .cov import compute_whitener, _ensure_cov -from .io.constants import FIFF -from .io.pick import pick_types -from .io.proj import make_projector, _needs_eeg_average_ref_proj -from .bem import _fit_sphere -from .evoked import _read_evoked, _aspect_rev, _write_evokeds -from .fixes import pinvh -from ._freesurfer import read_freesurfer_lut, _get_aseg -from .transforms import _print_coord_trans, _coord_frame_name, apply_trans -from .viz.evoked import _plot_evoked -from ._freesurfer import head_to_mni, head_to_mri -from .forward._make_forward import (_get_trans, _setup_bem, - _prep_meg_channels, _prep_eeg_channels) -from .forward._compute_forward import (_compute_forwards_meeg, - _prep_field_computation) - -from .surface import (transform_surface_to, _compute_nearest, - _points_outside_surface) -from .bem import _bem_find_surface, _bem_surf_name -from .source_space import _make_volume_source_space, SourceSpaces +from scipy.linalg import eigh +from scipy.optimize import fmin_cobyla + +from ._fiff.constants import FIFF +from ._fiff.pick import pick_types +from ._fiff.proj import _needs_eeg_average_ref_proj, make_projector +from ._freesurfer import _get_aseg, head_to_mni, head_to_mri, read_freesurfer_lut +from .bem import ConductorModel, _bem_find_surface, _bem_surf_name, _fit_sphere +from .cov import _ensure_cov, compute_whitener +from .evoked import _aspect_rev, _read_evoked, _write_evokeds +from .fixes import _safe_svd +from .forward._compute_forward import _compute_forwards_meeg, _prep_field_computation +from .forward._make_forward import ( + _get_trans, + _prep_eeg_channels, + _prep_meg_channels, + _setup_bem, +) from .parallel import parallel_func -from .utils import (logger, verbose, _time_mask, warn, _check_fname, - check_fname, _pl, fill_doc, _check_option, - _svd_lwork, _repeated_svd, _get_blas_funcs, _validate_type, - copy_function_doc_to_method_doc, TimeMixin, - _verbose_safe_false) -from .viz import plot_dipole_locations +from .source_space._source_space import SourceSpaces, _make_volume_source_space +from .surface import _compute_nearest, _points_outside_surface, transform_surface_to +from .transforms import _coord_frame_name, _print_coord_trans, apply_trans +from .utils import ( + ExtendedTimeMixin, + TimeMixin, + _check_fname, + _check_option, + _get_blas_funcs, + _pl, + _repeated_svd, + _svd_lwork, + _time_mask, + _validate_type, + _verbose_safe_false, + check_fname, + copy_function_doc_to_method_doc, + fill_doc, + logger, + pinvh, + verbose, + warn, +) +from .viz import plot_dipole_amplitudes, plot_dipole_locations +from .viz.evoked import _plot_evoked @fill_doc class Dipole(TimeMixin): - u"""Dipole class for sequential dipole fits. + """Dipole class for sequential dipole fits. .. note:: This class should usually not be instantiated directly via @@ -102,32 +116,92 @@ class Dipole(TimeMixin): """ @verbose - def __init__(self, times, pos, amplitude, ori, gof, - name=None, conf=None, khi2=None, nfree=None, - *, verbose=None): # noqa: D102 + def __init__( + self, + times, + pos, + amplitude, + ori, + gof, + name=None, + conf=None, + khi2=None, + nfree=None, + *, + verbose=None, + ): self._set_times(np.array(times)) - self.pos = np.array(pos) - self.amplitude = np.array(amplitude) - self.ori = np.array(ori) - self.gof = np.array(gof) - self.name = name - self.conf = dict() + self._pos = np.array(pos) + self._amplitude = np.array(amplitude) + self._ori = np.array(ori) + self._gof = np.array(gof) + self._name = name + self._conf = dict() if conf is not None: for key, value in conf.items(): - self.conf[key] = np.array(value) - self.khi2 = np.array(khi2) if khi2 is not None else None - self.nfree = np.array(nfree) if nfree is not None else None + self._conf[key] = np.array(value) + self._khi2 = np.array(khi2) if khi2 is not None else None + self._nfree = np.array(nfree) if nfree is not None else None def __repr__(self): # noqa: D105 - s = "n_times : %s" % len(self.times) - s += ", tmin : %0.3f" % np.min(self.times) - s += ", tmax : %0.3f" % np.max(self.times) - return "" % s + s = f"n_times : {len(self.times)}" + s += f", tmin : {np.min(self.times):0.3f}" + s += f", tmax : {np.max(self.times):0.3f}" + return f"" + + @property + def pos(self): + """The dipoles positions (m) in head coordinates.""" + return self._pos + + @property + def amplitude(self): + """The amplitude of the dipoles (Am).""" + return self._amplitude + + @property + def ori(self): + """The dipole orientations (normalized to unit length).""" + return self._ori + + @property + def gof(self): + """The goodness of fit.""" + return self._gof + + @property + def name(self): + """Name of the dipole.""" + return self._name + + @name.setter + def name(self, name): + _validate_type(name, str, "name") + self._name = name + + @property + def conf(self): + """Confidence limits in dipole orientation.""" + return self._conf + + @property + def khi2(self): + """The χ^2 values for the fits.""" + return self._khi2 + + @property + def nfree(self): + """The number of free parameters for each fit.""" + return self._nfree @verbose def save(self, fname, overwrite=False, *, verbose=None): """Save dipole in a ``.dip`` or ``.bdip`` file. + The ``.[b]dip`` format is for :class:`mne.Dipole` objects, that is, + fixed-position dipole fits. For these fits, the amplitude, orientation, + and position vary as a function of time. + Parameters ---------- fname : path-like @@ -137,6 +211,10 @@ def save(self, fname, overwrite=False, *, verbose=None): .. versionadded:: 0.20 %(verbose)s + See Also + -------- + read_dipole + Notes ----- .. versionchanged:: 0.20 @@ -169,11 +247,12 @@ def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): """ sfreq = None if len(self.times) > 1: - sfreq = 1. / np.median(np.diff(self.times)) - mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq, - include_tmax=include_tmax) + sfreq = 1.0 / np.median(np.diff(self.times)) + mask = _time_mask( + self.times, tmin, tmax, sfreq=sfreq, include_tmax=include_tmax + ) self._set_times(self.times[mask]) - for attr in ('pos', 'gof', 'amplitude', 'ori', 'khi2', 'nfree'): + for attr in ("_pos", "_gof", "_amplitude", "_ori", "_khi2", "_nfree"): if getattr(self, attr) is not None: setattr(self, attr, getattr(self, attr)[mask]) for key in self.conf.keys(): @@ -192,21 +271,53 @@ def copy(self): @verbose @copy_function_doc_to_method_doc(plot_dipole_locations) - def plot_locations(self, trans, subject, subjects_dir=None, - mode='orthoview', coord_frame='mri', idx='gof', - show_all=True, ax=None, block=False, show=True, - scale=None, color=None, *, highlight_color='r', - fig=None, title=None, head_source='seghead', - surf='pial', width=None, verbose=None): + def plot_locations( + self, + trans, + subject, + subjects_dir=None, + mode="orthoview", + coord_frame="mri", + idx="gof", + show_all=True, + ax=None, + block=False, + show=True, + scale=None, + color=None, + *, + highlight_color="r", + fig=None, + title=None, + head_source="seghead", + surf="pial", + width=None, + verbose=None, + ): return plot_dipole_locations( - self, trans, subject, subjects_dir, mode, coord_frame, idx, - show_all, ax, block, show, scale=scale, color=color, - highlight_color=highlight_color, fig=fig, title=title, - head_source=head_source, surf=surf, width=width) + self, + trans, + subject, + subjects_dir, + mode, + coord_frame, + idx, + show_all, + ax, + block, + show, + scale=scale, + color=color, + highlight_color=highlight_color, + fig=fig, + title=title, + head_source=head_source, + surf=surf, + width=width, + ) @verbose - def to_mni(self, subject, trans, subjects_dir=None, - verbose=None): + def to_mni(self, subject, trans, subjects_dir=None, verbose=None): """Convert dipole location from head to MNI coordinates. Parameters @@ -222,12 +333,12 @@ def to_mni(self, subject, trans, subjects_dir=None, The MNI coordinates (in mm) of pos. """ mri_head_t, trans = _get_trans(trans) - return head_to_mni(self.pos, subject, mri_head_t, - subjects_dir=subjects_dir, verbose=verbose) + return head_to_mni( + self.pos, subject, mri_head_t, subjects_dir=subjects_dir, verbose=verbose + ) @verbose - def to_mri(self, subject, trans, subjects_dir=None, - verbose=None): + def to_mri(self, subject, trans, subjects_dir=None, verbose=None): """Convert dipole location from head to MRI surface RAS coordinates. Parameters @@ -243,13 +354,24 @@ def to_mri(self, subject, trans, subjects_dir=None, The Freesurfer surface RAS coordinates (in mm) of pos. """ mri_head_t, trans = _get_trans(trans) - return head_to_mri(self.pos, subject, mri_head_t, - subjects_dir=subjects_dir, verbose=verbose, - kind='mri') + return head_to_mri( + self.pos, + subject, + mri_head_t, + subjects_dir=subjects_dir, + verbose=verbose, + kind="mri", + ) @verbose - def to_volume_labels(self, trans, subject='fsaverage', aseg='aparc+aseg', - subjects_dir=None, verbose=None): + def to_volume_labels( + self, + trans, + subject="fsaverage", + aseg="aparc+aseg", + subjects_dir=None, + verbose=None, + ): """Find an ROI in atlas for the dipole positions. Parameters @@ -280,16 +402,15 @@ def to_volume_labels(self, trans, subject='fsaverage', aseg='aparc+aseg', lut = {v: k for k, v in lut_inv.items()} # transform to voxel space from head space - pos = self.to_mri(subject, trans, subjects_dir=subjects_dir, - verbose=verbose) + pos = self.to_mri(subject, trans, subjects_dir=subjects_dir, verbose=verbose) pos = apply_trans(mri_vox_t, pos) pos = np.rint(pos).astype(int) # Get voxel value and label from LUT - labels = [lut.get(aseg_data[tuple(coord)], 'Unknown') for coord in pos] + labels = [lut.get(aseg_data[tuple(coord)], "Unknown") for coord in pos] return labels - def plot_amplitudes(self, color='k', show=True): + def plot_amplitudes(self, color="k", show=True): """Plot the dipole amplitudes as a function of time. Parameters @@ -304,7 +425,6 @@ def plot_amplitudes(self, color='k', show=True): fig : matplotlib.figure.Figure The figure object containing the plot. """ - from .viz import plot_dipole_amplitudes return plot_dipole_amplitudes([self], [color], show) def __getitem__(self, item): @@ -335,9 +455,16 @@ def __getitem__(self, item): selected_khi2 = self.khi2[item] if self.khi2 is not None else None selected_nfree = self.nfree[item] if self.nfree is not None else None return Dipole( - selected_times, selected_pos, selected_amplitude, selected_ori, - selected_gof, selected_name, selected_conf, selected_khi2, - selected_nfree) + selected_times, + selected_pos, + selected_amplitude, + selected_ori, + selected_gof, + selected_name, + selected_conf, + selected_khi2, + selected_nfree, + ) def __len__(self): """Return the number of dipoles. @@ -359,13 +486,13 @@ def __len__(self): def _read_dipole_fixed(fname): """Read a fixed dipole FIF file.""" - logger.info('Reading %s ...' % fname) + logger.info(f"Reading {fname} ...") info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname) return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment) @fill_doc -class DipoleFixed(TimeMixin): +class DipoleFixed(ExtendedTimeMixin): """Dipole class for fixed-position dipole fits. .. note:: @@ -404,12 +531,13 @@ class DipoleFixed(TimeMixin): """ @verbose - def __init__(self, info, data, times, nave, aspect_kind, - comment='', *, verbose=None): # noqa: D102 + def __init__( + self, info, data, times, nave, aspect_kind, comment="", *, verbose=None + ): self.info = info self.nave = nave self._aspect_kind = aspect_kind - self.kind = _aspect_rev.get(aspect_kind, 'unknown') + self.kind = _aspect_rev.get(aspect_kind, "unknown") self.comment = comment self._set_times(np.array(times)) self.data = data @@ -417,10 +545,10 @@ def __init__(self, info, data, times, nave, aspect_kind, self._update_first_last() def __repr__(self): # noqa: D105 - s = "n_times : %s" % len(self.times) - s += ", tmin : %s" % np.min(self.times) - s += ", tmax : %s" % np.max(self.times) - return "" % s + s = f"n_times : {len(self.times)}" + s += f", tmin : {np.min(self.times)}" + s += f", tmax : {np.max(self.times)}" + return f"" def copy(self): """Copy the DipoleFixed object. @@ -439,26 +567,46 @@ def copy(self): @property def ch_names(self): """Channel names.""" - return self.info['ch_names'] + return self.info["ch_names"] @verbose - def save(self, fname, verbose=None): - """Save dipole in a .fif file. + def save(self, fname, *, overwrite=False, verbose=None): + """Save fixed dipole in FIF format. + + The ``.fif[.gz]`` format is for :class:`mne.DipoleFixed` objects, that is, + fixed-position and optionally fixed-orientation dipole fits. For these fits, + the amplitude (and optionally orientation) vary as a function of time, + but not the position. Parameters ---------- fname : path-like - The name of the .fif file. Must end with ``'.fif'`` or - ``'.fif.gz'`` to make it explicit that the file contains + The name of the FIF file. Must end with ``'-dip.fif'`` or + ``'-dip.fif.gz'`` to make it explicit that the file contains dipole information in FIF format. + %(overwrite)s + + .. versionadded:: 1.10.0 %(verbose)s - """ - check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz', - '_dip.fif', '_dip.fif.gz',), - ('.fif', '.fif.gz')) - _write_evokeds(fname, self, check=False) - def plot(self, show=True, time_unit='s'): + See Also + -------- + read_dipole + """ + check_fname( + fname, + "DipoleFixed", + ( + "-dip.fif", + "-dip.fif.gz", + "_dip.fif", + "_dip.fif.gz", + ), + (".fif", ".fif.gz"), + ) + _write_evokeds(fname, self, check=False, overwrite=overwrite) + + def plot(self, show=True, time_unit="s"): """Plot dipole data. Parameters @@ -475,24 +623,43 @@ def plot(self, show=True, time_unit='s'): fig : instance of matplotlib.figure.Figure The figure containing the time courses. """ - return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show, - ylim=None, xlim='tight', proj=False, hline=None, - units=None, scalings=None, titles=None, axes=None, - gfp=False, window_title=None, spatial_colors=False, - plot_type="butterfly", selectable=False, - time_unit=time_unit) + return _plot_evoked( + self, + picks=None, + exclude=(), + unit=True, + show=show, + ylim=None, + xlim="tight", + proj=False, + hline=None, + units=None, + scalings=None, + titles=None, + axes=None, + gfp=False, + window_title=None, + spatial_colors=False, + plot_type="butterfly", + selectable=False, + time_unit=time_unit, + ) # ############################################################################# # IO @verbose def read_dipole(fname, verbose=None): - """Read ``.dip`` file from Neuromag/xfit or MNE. + """Read a dipole object from a file. + + Non-fixed-position :class:`mne.Dipole` objects are usually saved in ``.[b]dip`` + format. Fixed-position :class:`mne.DipoleFixed` objects are usually saved in + FIF format. Parameters ---------- fname : path-like - The name of the ``.dip`` or ``.fif`` file. + The name of the ``.[b]dip`` or ``.fif[.gz]`` file. %(verbose)s Returns @@ -510,7 +677,7 @@ def read_dipole(fname, verbose=None): .. versionchanged:: 0.20 Support for reading bdip (Xfit binary) format. """ - fname = _check_fname(fname, overwrite='read', must_exist=True) + fname = _check_fname(fname, overwrite="read", must_exist=True) if fname.suffix == ".fif" or fname.name.endswith(".fif.gz"): return _read_dipole_fixed(fname) elif fname.suffix == ".bdip": @@ -527,69 +694,98 @@ def _read_dipole_text(fname): # There is a bug in older np.loadtxt regarding skipping fields, # so just read the data ourselves (need to get name and header anyway) data = list() - with open(fname, 'r') as fid: + with open(fname) as fid: for line in fid: - if not (line.startswith('%') or line.startswith('#')): + if not (line.startswith("%") or line.startswith("#")): need_header = False data.append(line.strip().split()) else: if need_header: def_line = line - if line.startswith('##') or line.startswith('%%'): + if line.startswith("##") or line.startswith("%%"): m = re.search('Name "(.*) dipoles"', line) if m: name = m.group(1) del line data = np.atleast_2d(np.array(data, float)) if def_line is None: - raise IOError('Dipole text file is missing field definition ' - 'comment, cannot parse %s' % (fname,)) + raise OSError( + "Dipole text file is missing field definition comment, cannot parse " + f"{fname}" + ) # actually parse the fields - def_line = def_line.lstrip('%').lstrip('#').strip() + def_line = def_line.lstrip("%").lstrip("#").strip() # MNE writes it out differently than Elekta, let's standardize them... - fields = re.sub(r'([X|Y|Z] )\(mm\)', # "X (mm)", etc. - lambda match: match.group(1).strip() + '/mm', def_line) - fields = re.sub(r'\((.*?)\)', # "Q(nAm)", etc. - lambda match: '/' + match.group(1), fields) - fields = re.sub('(begin|end) ', # "begin" and "end" with no units - lambda match: match.group(1) + '/ms', fields) + fields = re.sub( + r"([X|Y|Z] )\(mm\)", # "X (mm)", etc. + lambda match: match.group(1).strip() + "/mm", + def_line, + ) + fields = re.sub( + r"\((.*?)\)", + lambda match: "/" + match.group(1), + fields, # "Q(nAm)", etc. + ) + fields = re.sub( + "(begin|end) ", # "begin" and "end" with no units + lambda match: match.group(1) + "/ms", + fields, + ) fields = fields.lower().split() - required_fields = ('begin/ms', - 'x/mm', 'y/mm', 'z/mm', - 'q/nam', 'qx/nam', 'qy/nam', 'qz/nam', - 'g/%') - optional_fields = ('khi^2', 'free', # standard ones - # now the confidence fields (up to 5!) - 'vol/mm^3', 'depth/mm', 'long/mm', 'trans/mm', - 'qlong/nam', 'qtrans/nam') + required_fields = ( + "begin/ms", + "x/mm", + "y/mm", + "z/mm", + "q/nam", + "qx/nam", + "qy/nam", + "qz/nam", + "g/%", + ) + optional_fields = ( + "khi^2", + "free", # standard ones + # now the confidence fields (up to 5!) + "vol/mm^3", + "depth/mm", + "long/mm", + "trans/mm", + "qlong/nam", + "qtrans/nam", + ) conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9] missing_fields = sorted(set(required_fields) - set(fields)) if len(missing_fields) > 0: - raise RuntimeError('Could not find necessary fields in header: %s' - % (missing_fields,)) + raise RuntimeError( + f"Could not find necessary fields in header: {missing_fields}" + ) handled_fields = set(required_fields) | set(optional_fields) assert len(handled_fields) == len(required_fields) + len(optional_fields) - ignored_fields = sorted(set(fields) - - set(handled_fields) - - {'end/ms'}) + ignored_fields = sorted(set(fields) - set(handled_fields) - {"end/ms"}) if len(ignored_fields) > 0: - warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,)) + warn(f"Ignoring extra fields in dipole file: {ignored_fields}") if len(fields) != data.shape[1]: - raise IOError('More data fields (%s) found than data columns (%s): %s' - % (len(fields), data.shape[1], fields)) - - logger.info("%d dipole(s) found" % len(data)) - - if 'end/ms' in fields: - if np.diff(data[:, [fields.index('begin/ms'), - fields.index('end/ms')]], 1, -1).any(): - warn('begin and end fields differed, but only begin will be used ' - 'to store time values') + raise OSError( + f"More data fields ({len(fields)}) found than data columns ({data.shape[1]}" + f"): {fields}" + ) + + logger.info(f"{len(data)} dipole(s) found") + + if "end/ms" in fields: + if np.diff( + data[:, [fields.index("begin/ms"), fields.index("end/ms")]], 1, -1 + ).any(): + warn( + "begin and end fields differed, but only begin will be used " + "to store time values" + ) # Find the correct column in our data array, then scale to proper units idx = [fields.index(field) for field in required_fields] assert len(idx) >= 9 - times = data[:, idx[0]] / 1000. + times = data[:, idx[0]] / 1000.0 pos = 1e-3 * data[:, idx[1:4]] # put data in meters amplitude = data[:, idx[4]] norm = amplitude.copy() @@ -606,36 +802,39 @@ def _read_dipole_text(fname): conf = dict() for field, scale in zip(optional_fields[2:], conf_scales): # confidence if field in fields: - conf[field.split('/')[0]] = scale * data[:, fields.index(field)] + conf[field.split("/")[0]] = scale * data[:, fields.index(field)] return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree) def _write_dipole_text(fname, dip): - fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f' - header = ('# begin end X (mm) Y (mm) Z (mm)' - ' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%') - t = dip.times[:, np.newaxis] * 1000. + fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f" + header = ( + "# begin end X (mm) Y (mm) Z (mm)" + " Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%" + ) + t = dip.times[:, np.newaxis] * 1000.0 gof = dip.gof[:, np.newaxis] amp = 1e9 * dip.amplitude[:, np.newaxis] out = (t, t, dip.pos / 1e-3, amp, dip.ori * amp, gof) # optional fields - fmts = dict(khi2=(' khi^2', ' %8.1f', 1.), - nfree=(' free', ' %5d', 1), - vol=(' vol/mm^3', ' %9.3f', 1e9), - depth=(' depth/mm', ' %9.3f', 1e3), - long=(' long/mm', ' %8.3f', 1e3), - trans=(' trans/mm', ' %9.3f', 1e3), - qlong=(' Qlong/nAm', ' %10.3f', 1e9), - qtrans=(' Qtrans/nAm', ' %11.3f', 1e9), - ) - for key in ('khi2', 'nfree'): + fmts = dict( + khi2=(" khi^2", " %8.1f", 1.0), + nfree=(" free", " %5d", 1), + vol=(" vol/mm^3", " %9.3f", 1e9), + depth=(" depth/mm", " %9.3f", 1e3), + long=(" long/mm", " %8.3f", 1e3), + trans=(" trans/mm", " %9.3f", 1e3), + qlong=(" Qlong/nAm", " %10.3f", 1e9), + qtrans=(" Qtrans/nAm", " %11.3f", 1e9), + ) + for key in ("khi2", "nfree"): data = getattr(dip, key) if data is not None: header += fmts[key][0] fmt += fmts[key][1] out += (data[:, np.newaxis] * fmts[key][2],) - for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'): + for key in ("vol", "depth", "long", "trans", "qlong", "qtrans"): data = dip.conf.get(key) if data is not None: header += fmts[key][0] @@ -644,22 +843,21 @@ def _write_dipole_text(fname, dip): out = np.concatenate(out, axis=-1) # NB CoordinateSystem is hard-coded as Head here - with open(fname, 'wb') as fid: - fid.write('# CoordinateSystem "Head"\n'.encode('utf-8')) - fid.write((header + '\n').encode('utf-8')) + with open(fname, "wb") as fid: + fid.write(b'# CoordinateSystem "Head"\n') + fid.write((header + "\n").encode("utf-8")) np.savetxt(fid, out, fmt=fmt) if dip.name is not None: - fid.write(('## Name "%s dipoles" Style "Dipoles"' - % dip.name).encode('utf-8')) + fid.write((f'## Name "{dip.name} dipoles" Style "Dipoles"').encode()) -_BDIP_ERROR_KEYS = ('depth', 'long', 'trans', 'qlong', 'qtrans') +_BDIP_ERROR_KEYS = ("depth", "long", "trans", "qlong", "qtrans") def _read_dipole_bdip(fname): name = None nfree = None - with open(fname, 'rb') as fid: + with open(fname, "rb") as fid: # Which dipole in a multi-dipole set times = list() pos = list() @@ -670,75 +868,77 @@ def _read_dipole_bdip(fname): khi2 = list() has_errors = None while True: - num = np.frombuffer(fid.read(4), '>i4') + num = np.frombuffer(fid.read(4), ">i4") if len(num) == 0: break - times.append(np.frombuffer(fid.read(4), '>f4')[0]) + times.append(np.frombuffer(fid.read(4), ">f4")[0]) fid.read(4) # end fid.read(12) # r0 - pos.append(np.frombuffer(fid.read(12), '>f4')) - Q = np.frombuffer(fid.read(12), '>f4') + pos.append(np.frombuffer(fid.read(12), ">f4")) + Q = np.frombuffer(fid.read(12), ">f4") amplitude.append(np.linalg.norm(Q)) ori.append(Q / amplitude[-1]) - gof.append(100 * np.frombuffer(fid.read(4), '>f4')[0]) - this_has_errors = bool(np.frombuffer(fid.read(4), '>i4')[0]) + gof.append(100 * np.frombuffer(fid.read(4), ">f4")[0]) + this_has_errors = bool(np.frombuffer(fid.read(4), ">i4")[0]) if has_errors is None: has_errors = this_has_errors for key in _BDIP_ERROR_KEYS: conf[key] = list() assert has_errors == this_has_errors fid.read(4) # Noise level used for error computations - limits = np.frombuffer(fid.read(20), '>f4') # error limits + limits = np.frombuffer(fid.read(20), ">f4") # error limits for key, lim in zip(_BDIP_ERROR_KEYS, limits): conf[key].append(lim) fid.read(100) # (5, 5) fully describes the conf. ellipsoid - conf['vol'].append(np.frombuffer(fid.read(4), '>f4')[0]) - khi2.append(np.frombuffer(fid.read(4), '>f4')[0]) + conf["vol"].append(np.frombuffer(fid.read(4), ">f4")[0]) + khi2.append(np.frombuffer(fid.read(4), ">f4")[0]) fid.read(4) # prob fid.read(4) # total noise estimate return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree) def _write_dipole_bdip(fname, dip): - with open(fname, 'wb+') as fid: + with open(fname, "wb+") as fid: for ti, t in enumerate(dip.times): - fid.write(np.zeros(1, '>i4').tobytes()) # int dipole - fid.write(np.array([t, 0]).astype('>f4').tobytes()) - fid.write(np.zeros(3, '>f4').tobytes()) # r0 - fid.write(dip.pos[ti].astype('>f4').tobytes()) # pos + fid.write(np.zeros(1, ">i4").tobytes()) # int dipole + fid.write(np.array([t, 0]).astype(">f4").tobytes()) + fid.write(np.zeros(3, ">f4").tobytes()) # r0 + fid.write(dip.pos[ti].astype(">f4").tobytes()) # pos Q = dip.amplitude[ti] * dip.ori[ti] - fid.write(Q.astype('>f4').tobytes()) - fid.write(np.array(dip.gof[ti] / 100., '>f4').tobytes()) + fid.write(Q.astype(">f4").tobytes()) + fid.write(np.array(dip.gof[ti] / 100.0, ">f4").tobytes()) has_errors = int(bool(len(dip.conf))) - fid.write(np.array(has_errors, '>i4').tobytes()) # has_errors - fid.write(np.zeros(1, '>f4').tobytes()) # noise level + fid.write(np.array(has_errors, ">i4").tobytes()) # has_errors + fid.write(np.zeros(1, ">f4").tobytes()) # noise level for key in _BDIP_ERROR_KEYS: - val = dip.conf[key][ti] if key in dip.conf else 0. + val = dip.conf[key][ti] if key in dip.conf else 0.0 assert val.shape == () - fid.write(np.array(val, '>f4').tobytes()) - fid.write(np.zeros(25, '>f4').tobytes()) - conf = dip.conf['vol'][ti] if 'vol' in dip.conf else 0. - fid.write(np.array(conf, '>f4').tobytes()) + fid.write(np.array(val, ">f4").tobytes()) + fid.write(np.zeros(25, ">f4").tobytes()) + conf = dip.conf["vol"][ti] if "vol" in dip.conf else 0.0 + fid.write(np.array(conf, ">f4").tobytes()) khi2 = dip.khi2[ti] if dip.khi2 is not None else 0 - fid.write(np.array(khi2, '>f4').tobytes()) - fid.write(np.zeros(1, '>f4').tobytes()) # prob - fid.write(np.zeros(1, '>f4').tobytes()) # total noise est + fid.write(np.array(khi2, ">f4").tobytes()) + fid.write(np.zeros(1, ">f4").tobytes()) # prob + fid.write(np.zeros(1, ">f4").tobytes()) # total noise est # ############################################################################# # Fitting + def _dipole_forwards(*, sensors, fwd_data, whitener, rr, n_jobs=None): """Compute the forward solution and do other nice stuff.""" B = _compute_forwards_meeg( - rr, sensors=sensors, fwd_data=fwd_data, n_jobs=n_jobs, silent=True) + rr, sensors=sensors, fwd_data=fwd_data, n_jobs=n_jobs, silent=True + ) B = np.concatenate(list(B.values()), axis=1) assert np.isfinite(B).all() B_orig = B.copy() # Apply projection and whiten (cov has projections already) _, _, dgemm = _get_ddot_dgemv_dgemm() - B = dgemm(1., B, whitener.T) + B = dgemm(1.0, B, whitener.T) # column normalization doesn't affect our fitting, so skip for now # S = np.sum(B * B, axis=1) # across channels @@ -752,21 +952,31 @@ def _dipole_forwards(*, sensors, fwd_data, whitener, rr, n_jobs=None): @verbose def _make_guesses(surf, grid, exclude, mindist, n_jobs=None, verbose=None): """Make a guess space inside a sphere or BEM surface.""" - if 'rr' in surf: - logger.info('Guess surface (%s) is in %s coordinates' - % (_bem_surf_name[surf['id']], - _coord_frame_name(surf['coord_frame']))) + if "rr" in surf: + logger.info( + "Guess surface ({}) is in {} coordinates".format( + _bem_surf_name[surf["id"]], _coord_frame_name(surf["coord_frame"]) + ) + ) else: - logger.info('Making a spherical guess space with radius %7.1f mm...' - % (1000 * surf['R'])) - logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid)) - src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist, - do_neighbors=False, n_jobs=n_jobs)[0] - assert 'vertno' in src + logger.info( + f"Making a spherical guess space with radius {1000 * surf.radius:7.1f} " + "mm..." + ) + logger.info("Filtering (grid = %6.f mm)..." % (1000 * grid)) + src = _make_volume_source_space( + surf, grid, exclude, 1000 * mindist, do_neighbors=False, n_jobs=n_jobs + )[0] + assert "vertno" in src # simplify the result to make things easier later - src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']], - nuse=src['nuse'], coord_frame=src['coord_frame'], - vertno=np.arange(src['nuse']), type='discrete') + src = dict( + rr=src["rr"][src["vertno"]], + nn=src["nn"][src["vertno"]], + nuse=src["nuse"], + coord_frame=src["coord_frame"], + vertno=np.arange(src["nuse"]), + type="discrete", + ) return SourceSpaces([src]) @@ -775,26 +985,26 @@ def _fit_eval(rd, B, B2, *, sensors, fwd_data, whitener, lwork, fwd_svd): if fwd_svd is None: assert sensors is not None fwd = _dipole_forwards( - sensors=sensors, fwd_data=fwd_data, whitener=whitener, - rr=rd[np.newaxis, :])[0] + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=rd[np.newaxis, :] + )[0] uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True) else: uu, sing, vv = fwd_svd gof = _dipole_gof(uu, sing, vv, B, B2)[0] # mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version - return 1. - gof + return 1.0 - gof @functools.lru_cache(None) def _get_ddot_dgemv_dgemm(): - return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm')) + return _get_blas_funcs(np.float64, ("dot", "gemv", "gemm")) def _dipole_gof(uu, sing, vv, B, B2): """Calculate the goodness of fit from the forward SVD.""" ddot, dgemv, _ = _get_ddot_dgemv_dgemm() - ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2 - one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B) + ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.0) > 0.2 else 2 + one = dgemv(1.0, vv[:ncomp], B) # np.dot(vv[:ncomp], B) Bm2 = ddot(one, one) # np.sum(one * one) gof = Bm2 / B2 return gof, one @@ -802,25 +1012,24 @@ def _dipole_gof(uu, sing, vv, B, B2): def _fit_Q(*, sensors, fwd_data, whitener, B, B2, B_orig, rd, ori=None): """Fit the dipole moment once the location is known.""" - from scipy import linalg - if 'fwd' in fwd_data: + if "fwd" in fwd_data: # should be a single precomputed "guess" (i.e., fixed position) assert rd is None - fwd = fwd_data['fwd'] + fwd = fwd_data["fwd"] assert fwd.shape[0] == 3 - fwd_orig = fwd_data['fwd_orig'] + fwd_orig = fwd_data["fwd_orig"] assert fwd_orig.shape[0] == 3 - scales = fwd_data['scales'] + scales = fwd_data["scales"] assert scales.shape == (3,) - fwd_svd = fwd_data['fwd_svd'][0] + fwd_svd = fwd_data["fwd_svd"][0] else: fwd, fwd_orig, scales = _dipole_forwards( - sensors=sensors, fwd_data=fwd_data, whitener=whitener, - rr=rd[np.newaxis, :]) + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=rd[np.newaxis, :] + ) fwd_svd = None if ori is None: if fwd_svd is None: - fwd_svd = linalg.svd(fwd, full_matrices=False) + fwd_svd = _safe_svd(fwd, full_matrices=False) uu, sing, vv = fwd_svd gof, one = _dipole_gof(uu, sing, vv, B, B2) ncomp = len(one) @@ -839,19 +1048,42 @@ def _fit_Q(*, sensors, fwd_data, whitener, B, B2, B_orig, rd, ori=None): return Q, gof, B_residual_noproj, ncomp -def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs, - guess_data, *, sensors, fwd_data, whitener, ori, n_jobs, - rank, rhoend): +def _fit_dipoles( + fun, + min_dist_to_inner_skull, + data, + times, + guess_rrs, + guess_data, + *, + sensors, + fwd_data, + whitener, + ori, + n_jobs, + rank, + rhoend, +): """Fit a single dipole to the given whitened, projected data.""" - from scipy.optimize import fmin_cobyla parallel, p_fun, n_jobs = parallel_func(fun, n_jobs) # parallel over time points res = parallel( p_fun( - min_dist_to_inner_skull, B, t, guess_rrs, guess_data, - sensors=sensors, fwd_data=fwd_data, whitener=whitener, - fmin_cobyla=fmin_cobyla, ori=ori, rank=rank, rhoend=rhoend) - for B, t in zip(data.T, times)) + min_dist_to_inner_skull, + B, + t, + guess_rrs, + guess_data, + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + fmin_cobyla=fmin_cobyla, + ori=ori, + rank=rank, + rhoend=rhoend, + ) + for B, t in zip(data.T, times) + ) pos = np.array([r[0] for r in res]) amp = np.array([r[1] for r in res]) ori = np.array([r[2] for r in res]) @@ -859,7 +1091,7 @@ def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs, conf = None if res[0][4] is not None: conf = np.array([r[4] for r in res]) - keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'] + keys = ["vol", "depth", "long", "trans", "qlong", "qtrans"] conf = {key: conf[:, ki] for ki, key in enumerate(keys)} khi2 = np.array([r[5] for r in res]) nfree = np.array([r[6] for r in res]) @@ -971,12 +1203,11 @@ def _fit_confidence(*, rd, Q, ori, whitener, fwd_data, sensors): # # And then the confidence interval is the diagonal of C, scaled by 1.96 # (for 95% confidence). - from scipy import linalg direction = np.empty((3, 3)) # The coordinate system has the x axis aligned with the dipole orientation, direction[0] = ori # the z axis through the origin of the sphere model - rvec = rd - fwd_data['inner_skull']['r0'] + rvec = rd - fwd_data["inner_skull"]["r0"] direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize direction[2] /= np.linalg.norm(direction[2]) # and the y axis perpendical with these forming a right-handed system. @@ -990,15 +1221,19 @@ def _fit_confidence(*, rd, Q, ori, whitener, fwd_data, sensors): for delta in deltas: this_r = rd[np.newaxis] + delta * direction[ii] fwds.append( - np.dot(Q, _dipole_forwards( - sensors=sensors, fwd_data=fwd_data, - whitener=whitener, rr=this_r)[0])) + np.dot( + Q, + _dipole_forwards( + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=this_r + )[0], + ) + ) J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0] # Get current (Q) deltas in the dipole directions deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q) this_fwd = _dipole_forwards( - sensors=sensors, fwd_data=fwd_data, whitener=whitener, - rr=rd[np.newaxis])[0] + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=rd[np.newaxis] + )[0] for ii in range(3): fwds = [] for delta in deltas: @@ -1019,8 +1254,12 @@ def _fit_confidence(*, rd, Q, ori, whitener, fwd_data, sensors): # The confidence volume of the dipole location is obtained from by # taking the eigenvalues of the upper left submatrix and computing # v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or: - vol_conf = 4 * np.pi / 3. * np.sqrt( - 476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True))) + vol_conf = ( + 4 + * np.pi + / 3.0 + * np.sqrt(476.379541 * np.prod(eigh(C[:3, :3], eigvals_only=True))) + ) conf = np.concatenate([conf, [vol_conf]]) # Now we reorder and subselect the proper columns: # vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero) @@ -1030,10 +1269,9 @@ def _fit_confidence(*, rd, Q, ori, whitener, fwd_data, sensors): def _surface_constraint(rd, surf, min_dist_to_inner_skull): """Surface fitting constraint.""" - dist = _compute_nearest(surf['rr'], rd[np.newaxis, :], - return_dists=True)[1][0] + dist = _compute_nearest(surf["rr"], rd[np.newaxis, :], return_dists=True)[1][0] if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]: - dist *= -1. + dist *= -1.0 # Once we know the dipole is below the inner skull, # let's check if its distance to the inner skull is at least # min_dist_to_inner_skull. This can be enforced by adding a @@ -1047,45 +1285,82 @@ def _sphere_constraint(rd, r0, R_adj): return R_adj - np.sqrt(np.sum((rd - r0) ** 2)) -def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs, - guess_data, *, sensors, fwd_data, whitener, fmin_cobyla, - ori, rank, rhoend): +def _fit_dipole( + min_dist_to_inner_skull, + B_orig, + t, + guess_rrs, + guess_data, + *, + sensors, + fwd_data, + whitener, + fmin_cobyla, + ori, + rank, + rhoend, +): """Fit a single bit of data.""" B = np.dot(whitener, B_orig) # make constraint function to keep the solver within the inner skull - if 'rr' in fwd_data['inner_skull']: # bem - surf = fwd_data['inner_skull'] - constraint = partial(_surface_constraint, surf=surf, - min_dist_to_inner_skull=min_dist_to_inner_skull) + if "rr" in fwd_data["inner_skull"]: # bem + surf = fwd_data["inner_skull"] + constraint = partial( + _surface_constraint, + surf=surf, + min_dist_to_inner_skull=min_dist_to_inner_skull, + ) else: # sphere surf = None constraint = partial( - _sphere_constraint, r0=fwd_data['inner_skull']['r0'], - R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull) + _sphere_constraint, + r0=fwd_data["inner_skull"]["r0"], + R_adj=fwd_data["inner_skull"].radius - min_dist_to_inner_skull, + ) # Find a good starting point (find_best_guess in C) B2 = np.dot(B, B) if B2 == 0: - warn('Zero field found for time %s' % t) + warn(f"Zero field found for time {t}") return np.zeros(3), 0, np.zeros(3), 0, B - idx = np.argmin([ - _fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd=fwd_svd, - fwd_data=None, sensors=None, whitener=None, lwork=None) - for fi, fwd_svd in enumerate(guess_data['fwd_svd'])]) + idx = np.argmin( + [ + _fit_eval( + guess_rrs[[fi], :], + B, + B2, + fwd_svd=fwd_svd, + fwd_data=None, + sensors=None, + whitener=None, + lwork=None, + ) + for fi, fwd_svd in enumerate(guess_data["fwd_svd"]) + ] + ) x0 = guess_rrs[idx] lwork = _svd_lwork((3, B.shape[0])) - fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener, - lwork=lwork, sensors=sensors, fwd_svd=None) + fun = partial( + _fit_eval, + B=B, + B2=B2, + fwd_data=fwd_data, + whitener=whitener, + lwork=lwork, + sensors=sensors, + fwd_svd=None, + ) # Tested minimizers: # Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC # Several were similar, but COBYLA won for having a handy constraint # function we can use to ensure we stay inside the inner skull / # smallest sphere - rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(), - rhobeg=5e-2, rhoend=rhoend, disp=False) + rd_final = fmin_cobyla( + fun, x0, (constraint,), consargs=(), rhobeg=5e-2, rhoend=rhoend, disp=False + ) # simplex = _make_tetra_simplex() + x0 # _simplex_minimize(simplex, 1e-4, 2e-4, fun) @@ -1093,45 +1368,71 @@ def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs, # Compute the dipole moment at the final point Q, gof, residual_noproj, n_comp = _fit_Q( - sensors=sensors, fwd_data=fwd_data, whitener=whitener, B=B, B2=B2, - B_orig=B_orig, rd=rd_final, ori=ori) + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + B=B, + B2=B2, + B_orig=B_orig, + rd=rd_final, + ori=ori, + ) khi2 = (1 - gof) * B2 nfree = rank - n_comp amp = np.sqrt(np.dot(Q, Q)) - norm = 1. if amp == 0. else amp + norm = 1.0 if amp == 0.0 else amp ori = Q / norm conf = _fit_confidence( - sensors=sensors, rd=rd_final, Q=Q, ori=ori, whitener=whitener, - fwd_data=fwd_data) + sensors=sensors, rd=rd_final, Q=Q, ori=ori, whitener=whitener, fwd_data=fwd_data + ) - msg = '---- Fitted : %7.1f ms' % (1000. * t) + msg = "---- Fitted : %7.1f ms" % (1000.0 * t) if surf is not None: dist_to_inner_skull = _compute_nearest( - surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0] - msg += (", distance to inner skull : %2.4f mm" - % (dist_to_inner_skull * 1000.)) + surf["rr"], rd_final[np.newaxis, :], return_dists=True + )[1][0] + msg += ", distance to inner skull : %2.4f mm" % (dist_to_inner_skull * 1000.0) logger.info(msg) return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj -def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs, - guess_data, *, sensors, fwd_data, whitener, - fmin_cobyla, ori, rank, rhoend): +def _fit_dipole_fixed( + min_dist_to_inner_skull, + B_orig, + t, + guess_rrs, + guess_data, + *, + sensors, + fwd_data, + whitener, + fmin_cobyla, + ori, + rank, + rhoend, +): """Fit a data using a fixed position.""" B = np.dot(whitener, B_orig) B2 = np.dot(B, B) if B2 == 0: - warn('Zero field found for time %s' % t) + warn(f"Zero field found for time {t}") return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6) # Compute the dipole moment Q, gof, residual_noproj = _fit_Q( - fwd_data=guess_data, whitener=whitener, B=B, B2=B2, B_orig=B_orig, - sensors=sensors, rd=None, ori=ori)[:3] + fwd_data=guess_data, + whitener=whitener, + B=B, + B2=B2, + B_orig=B_orig, + sensors=sensors, + rd=None, + ori=ori, + )[:3] if ori is None: amp = np.sqrt(np.dot(Q, Q)) - norm = 1. if amp == 0. else amp + norm = 1.0 if amp == 0.0 else amp ori = Q / norm else: amp = np.dot(Q, ori) @@ -1144,9 +1445,20 @@ def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs, @verbose -def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=None, - pos=None, ori=None, rank=None, accuracy='normal', tol=5e-5, - verbose=None): +def fit_dipole( + evoked, + cov, + bem, + trans=None, + min_dist=5.0, + n_jobs=None, + pos=None, + ori=None, + rank=None, + accuracy="normal", + tol=5e-5, + verbose=None, +): """Fit a dipole. Parameters @@ -1219,78 +1531,82 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=None, ----- .. versionadded:: 0.9.0 """ - from scipy import linalg # This could eventually be adapted to work with other inputs, these # are what is needed: evoked = evoked.copy() - _validate_type(accuracy, str, 'accuracy') - _check_option('accuracy', accuracy, ('accurate', 'normal')) + _validate_type(accuracy, str, "accuracy") + _check_option("accuracy", accuracy, ("accurate", "normal")) # Determine if a list of projectors has an average EEG ref if _needs_eeg_average_ref_proj(evoked.info): - raise ValueError('EEG average reference is mandatory for dipole ' - 'fitting.') + raise ValueError("EEG average reference is mandatory for dipole fitting.") if min_dist < 0: - raise ValueError('min_dist should be positive. Got %s' % min_dist) + raise ValueError(f"min_dist should be positive. Got {min_dist}") if ori is not None and pos is None: - raise ValueError('pos must be provided if ori is not None') + raise ValueError("pos must be provided if ori is not None") data = evoked.data if not np.isfinite(data).all(): - raise ValueError('Evoked data must be finite') + raise ValueError("Evoked data must be finite") info = evoked.info times = evoked.times.copy() comment = evoked.comment # Convert the min_dist to meters - min_dist_to_inner_skull = min_dist / 1000. + min_dist_to_inner_skull = min_dist / 1000.0 del min_dist # Figure out our inputs - neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False, - exclude=[])) + neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=[])) if isinstance(bem, str): bem_extra = bem else: bem_extra = repr(bem) - logger.info('BEM : %s' % bem_extra) + logger.info(f"BEM : {bem_extra}") mri_head_t, trans = _get_trans(trans) - logger.info('MRI transform : %s' % trans) + logger.info(f"MRI transform : {trans}") safe_false = _verbose_safe_false() bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=safe_false) - if not bem['is_sphere']: + if not bem["is_sphere"]: # Find the best-fitting sphere - inner_skull = _bem_find_surface(bem, 'inner_skull') + inner_skull = _bem_find_surface(bem, "inner_skull") inner_skull = inner_skull.copy() - R, r0 = _fit_sphere(inner_skull['rr'], disp=False) + R, r0 = _fit_sphere(inner_skull["rr"]) # r0 back to head frame for logging - r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0] - inner_skull['r0'] = r0 - logger.info('Head origin : ' - '%6.1f %6.1f %6.1f mm rad = %6.1f mm.' - % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R)) + r0 = apply_trans(mri_head_t["trans"], r0[np.newaxis, :])[0] + inner_skull["r0"] = r0 + logger.info( + f"Head origin : {1000 * r0[0]:6.1f} {1000 * r0[1]:6.1f} " + f"{1000 * r0[2]:6.1f} mm rad = {1000 * R:6.1f} mm." + ) del R, r0 else: - r0 = bem['r0'] - if len(bem.get('layers', [])) > 0: - R = bem['layers'][0]['rad'] - kind = 'rad' + r0 = bem["r0"] + if len(bem.get("layers", [])) > 0: + R = bem["layers"][0]["rad"] + kind = "rad" else: # MEG-only # Use the minimum distance to the MEG sensors as the radius then - R = np.dot(np.linalg.inv(info['dev_head_t']['trans']), - np.hstack([r0, [1.]]))[:3] # r0 -> device - R = R - [info['chs'][pick]['loc'][:3] - for pick in pick_types(info, meg=True, exclude=[])] + R = np.dot( + np.linalg.inv(info["dev_head_t"]["trans"]), np.hstack([r0, [1.0]]) + )[:3] # r0 -> device + R = R - [ + info["chs"][pick]["loc"][:3] + for pick in pick_types(info, meg=True, exclude=[]) + ] if len(R) == 0: - raise RuntimeError('No MEG channels found, but MEG-only ' - 'sphere model used') - R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors - kind = 'max_rad' - logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, ' - '%s = %6.1f mm' - % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R)) - inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame + raise RuntimeError( + "No MEG channels found, but MEG-only sphere model used" + ) + R = np.min(np.linalg.norm(R, axis=1)) + kind = "min_rad" + logger.info( + f"Sphere model : origin at ({1000 * r0[0]: 7.2f} {1000 * r0[1]: 7.2f} " + f"{1000 * r0[2]: 7.2f}) mm, {kind} = {R:6.1f} mm" + ) + # NB sphere model defined in head frame + inner_skull = ConductorModel(layers=[dict(rad=R)], r0=r0, is_sphere=True) del R, r0 # Deal with DipoleFixed cases here @@ -1298,23 +1614,24 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=None, fixed_position = True pos = np.array(pos, float) if pos.shape != (3,): - raise ValueError('pos must be None or a 3-element array-like,' - ' got %s' % (pos,)) - logger.info('Fixed position : %6.1f %6.1f %6.1f mm' - % tuple(1000 * pos)) + raise ValueError(f"pos must be None or a 3-element array-like, got {pos}") + logger.info( + "Fixed position : {:6.1f} {:6.1f} {:6.1f} mm".format(*tuple(1000 * pos)) + ) if ori is not None: ori = np.array(ori, float) if ori.shape != (3,): - raise ValueError('oris must be None or a 3-element array-like,' - ' got %s' % (ori,)) + raise ValueError( + f"oris must be None or a 3-element array-like, got {ori}" + ) norm = np.sqrt(np.sum(ori * ori)) if not np.isclose(norm, 1): - raise ValueError('ori must be a unit vector, got length %s' - % (norm,)) - logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm' - % tuple(ori)) + raise ValueError(f"ori must be a unit vector, got length {norm}") + logger.info( + "Fixed orientation : {:6.4f} {:6.4f} {:6.4f} mm".format(*tuple(ori)) + ) else: - logger.info('Free orientation : ') + logger.info("Free orientation : ") fit_n_jobs = 1 # only use 1 job to do the guess fitting else: fixed_position = False @@ -1324,43 +1641,41 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=None, guess_mindist = max(0.005, min_dist_to_inner_skull) guess_exclude = 0.02 - logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,)) + logger.info(f"Guess grid : {1000 * guess_grid:6.1f} mm") if guess_mindist > 0.0: - logger.info('Guess mindist : %6.1f mm' - % (1000 * guess_mindist,)) + logger.info(f"Guess mindist : {1000 * guess_mindist:6.1f} mm") if guess_exclude > 0: - logger.info('Guess exclude : %6.1f mm' - % (1000 * guess_exclude,)) - logger.info(f'Using {accuracy} MEG coil definitions.') + logger.info(f"Guess exclude : {1000 * guess_exclude:6.1f} mm") + logger.info(f"Using {accuracy} MEG coil definitions.") fit_n_jobs = n_jobs cov = _ensure_cov(cov) - logger.info('') + logger.info("") _print_coord_trans(mri_head_t) - _print_coord_trans(info['dev_head_t']) - logger.info('%d bad channels total' % len(info['bads'])) + _print_coord_trans(info["dev_head_t"]) + logger.info(f"{len(info['bads'])} bad channels total") # Forward model setup (setup_forward_model from setup.c) ch_types = evoked.get_channel_types() sensors = dict() - if 'grad' in ch_types or 'mag' in ch_types: - sensors['meg'] = _prep_meg_channels( - info, exclude='bads', accuracy=accuracy, verbose=verbose) - if 'eeg' in ch_types: - sensors['eeg'] = _prep_eeg_channels( - info, exclude='bads', verbose=verbose) + if "grad" in ch_types or "mag" in ch_types: + sensors["meg"] = _prep_meg_channels( + info, exclude="bads", accuracy=accuracy, verbose=verbose + ) + if "eeg" in ch_types: + sensors["eeg"] = _prep_eeg_channels(info, exclude="bads", verbose=verbose) # Ensure that MEG and/or EEG channels are present if len(sensors) == 0: - raise RuntimeError('No MEG or EEG channels found.') + raise RuntimeError("No MEG or EEG channels found.") # Whitener for the data - logger.info('Decomposing the sensor noise covariance matrix...') + logger.info("Decomposing the sensor noise covariance matrix...") picks = pick_types(info, meg=True, eeg=True, ref_meg=False) # In case we want to more closely match MNE-C for debugging: - # from .io.pick import pick_info + # from ._fiff.pick import pick_info # from .cov import prepare_noise_cov # info_nb = pick_info(info, picks) # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False) @@ -1370,63 +1685,87 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=None, # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero]) # whitener = np.dot(whitener, cov['eigvec']) - whitener, _, rank = compute_whitener(cov, info, picks=picks, - rank=rank, return_rank=True) + whitener, _, rank = compute_whitener( + cov, info, picks=picks, rank=rank, return_rank=True + ) # Proceed to computing the fits (make_guess_data) if fixed_position: guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True])) - logger.info('Compute forward for dipole location...') + logger.info("Compute forward for dipole location...") else: - logger.info('\n---- Computing the forward solution for the guesses...') - guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude, - guess_mindist, n_jobs=n_jobs)[0] + logger.info("\n---- Computing the forward solution for the guesses...") + guess_src = _make_guesses( + inner_skull, guess_grid, guess_exclude, guess_mindist, n_jobs=n_jobs + )[0] # grid coordinates go from mri to head frame - transform_surface_to(guess_src, 'head', mri_head_t) - logger.info('Go through all guess source locations...') + transform_surface_to(guess_src, "head", mri_head_t) + logger.info("Go through all guess source locations...") # inner_skull goes from mri to head frame - if 'rr' in inner_skull: - transform_surface_to(inner_skull, 'head', mri_head_t) + if "rr" in inner_skull: + transform_surface_to(inner_skull, "head", mri_head_t) if fixed_position: - if 'rr' in inner_skull: - check = _surface_constraint(pos, inner_skull, - min_dist_to_inner_skull) + if "rr" in inner_skull: + check = _surface_constraint(pos, inner_skull, min_dist_to_inner_skull) else: check = _sphere_constraint( - pos, inner_skull['r0'], - R_adj=inner_skull['R'] - min_dist_to_inner_skull) + pos, + inner_skull["r0"], + R_adj=inner_skull.radius - min_dist_to_inner_skull, + ) if check <= 0: - raise ValueError('fixed position is %0.1fmm outside the inner ' - 'skull boundary' % (-1000 * check,)) + raise ValueError( + f"fixed position is {-1000 * check:0.1f}mm outside the inner skull " + "boundary" + ) # C code computes guesses w/sphere model for speed, don't bother here fwd_data = _prep_field_computation( - guess_src['rr'], sensors=sensors, bem=bem, n_jobs=n_jobs, - verbose=safe_false) - fwd_data['inner_skull'] = inner_skull + sensors=sensors, bem=bem, n_jobs=n_jobs, verbose=safe_false + ) + fwd_data["inner_skull"] = inner_skull guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards( - sensors=sensors, fwd_data=fwd_data, whitener=whitener, - rr=guess_src['rr'], n_jobs=fit_n_jobs) + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + rr=guess_src["rr"], + n_jobs=fit_n_jobs, + ) # decompose ahead of time - guess_fwd_svd = [linalg.svd(fwd, full_matrices=False) - for fwd in np.array_split(guess_fwd, - len(guess_src['rr']))] - guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd, - fwd_orig=guess_fwd_orig, scales=guess_fwd_scales) + guess_fwd_svd = [ + _safe_svd(fwd, full_matrices=False) + for fwd in np.array_split(guess_fwd, len(guess_src["rr"])) + ] + guess_data = dict( + fwd=guess_fwd, + fwd_svd=guess_fwd_svd, + fwd_orig=guess_fwd_orig, + scales=guess_fwd_scales, + ) del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed - logger.info('[done %d source%s]' % (guess_src['nuse'], - _pl(guess_src['nuse']))) + logger.info("[done %d source%s]", guess_src["nuse"], _pl(guess_src["nuse"])) # Do actual fits data = data[picks] - ch_names = [info['ch_names'][p] for p in picks] - proj_op = make_projector(info['projs'], ch_names, info['bads'])[0] + ch_names = [info["ch_names"][p] for p in picks] + proj_op = make_projector(info["projs"], ch_names, info["bads"])[0] fun = _fit_dipole_fixed if fixed_position else _fit_dipole out = _fit_dipoles( - fun, min_dist_to_inner_skull, data, times, guess_src['rr'], - guess_data, sensors=sensors, fwd_data=fwd_data, whitener=whitener, - ori=ori, n_jobs=n_jobs, rank=rank, rhoend=tol) + fun, + min_dist_to_inner_skull, + data, + times, + guess_src["rr"], + guess_data, + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + ori=ori, + n_jobs=n_jobs, + rank=rank, + rhoend=tol, + ) assert len(out) == 8 if fixed_position and ori is not None: # DipoleFixed @@ -1434,38 +1773,96 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=None, out_info = deepcopy(info) loc = np.concatenate([pos, ori, np.zeros(6)]) out_info._unlocked = True - out_info['chs'] = [ - dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE, - coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM, - coil_type=FIFF.FIFFV_COIL_DIPOLE, - unit_mul=0, range=1, cal=1., scanno=1, logno=1), - dict(ch_name='goodness', loc=np.full(12, np.nan), - kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM, - coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - coil_type=FIFF.FIFFV_COIL_NONE, - unit_mul=0, range=1., cal=1., scanno=2, logno=100)] - for key in ['hpi_meas', 'hpi_results', 'projs']: + out_info["chs"] = [ + dict( + ch_name="dip 01", + loc=loc, + kind=FIFF.FIFFV_DIPOLE_WAVE, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + unit=FIFF.FIFF_UNIT_AM, + coil_type=FIFF.FIFFV_COIL_DIPOLE, + unit_mul=0, + range=1, + cal=1.0, + scanno=1, + logno=1, + ), + dict( + ch_name="goodness", + loc=np.full(12, np.nan), + kind=FIFF.FIFFV_GOODNESS_FIT, + unit=FIFF.FIFF_UNIT_AM, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + coil_type=FIFF.FIFFV_COIL_NONE, + unit_mul=0, + range=1.0, + cal=1.0, + scanno=2, + logno=100, + ), + ] + for key in ["hpi_meas", "hpi_results", "projs"]: out_info[key] = list() - for key in ['acq_pars', 'acq_stim', 'description', 'dig', - 'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name', - 'subject_info']: + for key in [ + "acq_pars", + "acq_stim", + "description", + "dig", + "experimenter", + "hpi_subsystem", + "proj_id", + "proj_name", + "subject_info", + ]: out_info[key] = None out_info._unlocked = False - out_info['bads'] = [] + out_info["bads"] = [] out_info._update_redundant() out_info._check_consistency() - dipoles = DipoleFixed(out_info, data, times, evoked.nave, - evoked._aspect_kind, comment=comment) + dipoles = DipoleFixed( + out_info, data, times, evoked.nave, evoked._aspect_kind, comment=comment + ) else: - dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment, - out[4], out[5], out[6]) + dipoles = Dipole( + times, out[0], out[1], out[2], out[3], comment, out[4], out[5], out[6] + ) residual = evoked.copy().apply_proj() # set the projs active residual.data[picks] = np.dot(proj_op, out[-1]) - logger.info('%d time points fitted' % len(dipoles.times)) + logger.info("%d time points fitted", len(dipoles.times)) return dipoles, residual -def get_phantom_dipoles(kind='vectorview'): +# Every other row of Table 3 from OyamaEtAl2015 +_OYAMA = """ +0.00 56.29 -27.50 +32.50 56.29 5.00 +0.00 65.00 5.00 +-32.50 56.29 5.00 +0.00 56.29 37.50 +0.00 32.50 61.29 +-56.29 0.00 -27.50 +-56.29 32.50 5.00 +-65.00 0.00 5.00 +-56.29 -32.50 5.00 +-56.29 0.00 37.50 +-32.50 0.00 61.29 +0.00 -56.29 -27.50 +-32.50 -56.29 5.00 +0.00 -65.00 5.00 +32.50 -56.29 5.00 +0.00 -56.29 37.50 +0.00 -32.50 61.29 +56.29 0.00 -27.50 +56.29 -32.50 5.00 +65.00 0.00 5.00 +56.29 32.50 5.00 +56.29 0.00 37.50 +32.50 0.00 61.29 +0.00 0.00 70.00 +""" + + +def get_phantom_dipoles(kind="vectorview"): """Get standard phantom dipole locations and orientations. Parameters @@ -1477,6 +1874,11 @@ def get_phantom_dipoles(kind='vectorview'): The Neuromag VectorView phantom. ``otaniemi`` The older Neuromag phantom used at Otaniemi. + ``oyama`` + The phantom from :footcite:`OyamaEtAl2015`. + + .. versionchanged:: 1.6 + Support added for ``'oyama'``. Returns ------- @@ -1493,9 +1895,14 @@ def get_phantom_dipoles(kind='vectorview'): ----- The Elekta phantoms have a radius of 79.5mm, and HPI coil locations in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...). + + References + ---------- + .. footbibliography:: """ - _check_option('kind', kind, ['vectorview', 'otaniemi']) - if kind == 'vectorview': + _validate_type(kind, str, "kind") + _check_option("kind", kind, ["vectorview", "otaniemi", "oyama"]) + if kind == "vectorview": # these values were pulled from a scanned image provided by # Elekta folks a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9]) @@ -1506,7 +1913,7 @@ def get_phantom_dipoles(kind='vectorview'): d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9] z = np.concatenate((c, c, d, d)) signs = ([1, -1] * 4 + [-1, 1] * 4) * 2 - elif kind == 'otaniemi': + elif kind == "otaniemi": # these values were pulled from an Neuromag manual # (NM20456A, 13.7.1999, p.65) a = np.array([56.3, 47.6, 39.0, 30.3]) @@ -1516,19 +1923,43 @@ def get_phantom_dipoles(kind='vectorview'): y = np.concatenate((c, c, -a, -b, c, c, b, a)) z = np.concatenate((b, a, b, a, b, a, a, b)) signs = [-1] * 8 + [1] * 16 + [-1] * 8 - pos = np.vstack((x, y, z)).T / 1000. + else: + assert kind == "oyama" + xyz = np.fromstring(_OYAMA.strip().replace("\n", " "), sep=" ").reshape(25, 3) + xyz = np.repeat(xyz, 2, axis=0) + x, y, z = xyz.T + signs = [1] * 50 + pos = np.vstack((x, y, z)).T / 1000.0 + # For Neuromag-style phantoms, # Locs are always in XZ or YZ, and so are the oris. The oris are # also in the same plane and tangential, so it's easy to determine # the orientation. + # For Oyama, vectors are orthogonal to the position vector and oriented with one + # pointed toward the north pole (except for the topmost points, which are just xy). ori = list() for pi, this_pos in enumerate(pos): this_ori = np.zeros(3) idx = np.where(this_pos == 0)[0] # assert len(idx) == 1 + if len(idx) == 0: # oyama + idx = [np.argmin(this_pos)] idx = np.setdiff1d(np.arange(3), idx[0]) - this_ori[idx] = (this_pos[idx][::-1] / - np.linalg.norm(this_pos[idx])) * [1, -1] - this_ori *= signs[pi] + this_ori[idx] = (this_pos[idx][::-1] / np.linalg.norm(this_pos[idx])) * [1, -1] + if kind == "oyama": + # Ensure it's orthogonal to the position vector + pos_unit = this_pos / np.linalg.norm(this_pos) + this_ori -= pos_unit * np.dot(this_ori, pos_unit) + this_ori /= np.linalg.norm(this_ori) + # This was empirically determined by looking at the dipole fits + if np.abs(this_ori[2]) >= 1e-6: # if it's not in the XY plane + this_ori *= -1 * np.sign(this_ori[2]) # point downward + elif np.abs(this_ori[0]) < 1e-6: # in the XY plane (at the north pole) + this_ori *= -1 * np.sign(this_ori[1]) # point backward + # Odd ones create a RH coordinate system with their ori + if pi % 2: + this_ori = np.cross(pos_unit, this_ori) + else: + this_ori *= signs[pi] # Now we have this quality, which we could uncomment to # double-check: # np.testing.assert_allclose(np.dot(this_ori, this_pos) / @@ -1549,6 +1980,11 @@ def _concatenate_dipoles(dipoles): ori.append(dipole.ori) gof.append(dipole.gof) - return Dipole(np.concatenate(times), np.concatenate(pos), - np.concatenate(amplitude), np.concatenate(ori), - np.concatenate(gof), name=None) + return Dipole( + np.concatenate(times), + np.concatenate(pos), + np.concatenate(amplitude), + np.concatenate(ori), + np.concatenate(gof), + name=None, + ) diff --git a/mne/epochs.py b/mne/epochs.py index b2448bd95ba..6c3935097bf 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -1,126 +1,149 @@ -# -*- coding: utf-8 -*- - """Tools for working with epoched data.""" -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Daniel Strohmeier -# Denis Engemann -# Mainak Jas -# Stefan Appelhoff -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from functools import partial -from collections import Counter -from copy import deepcopy import json import operator import os.path as op +from collections import Counter +from copy import deepcopy +from functools import partial +from inspect import getfullargspec +from pathlib import Path import numpy as np - -from .io.utils import _construct_bids_filename -from .io.write import (start_and_end_file, start_block, end_block, - write_int, write_float, write_float_matrix, - write_double_matrix, write_complex_float_matrix, - write_complex_double_matrix, write_id, write_string, - _get_split_size, _NEXT_FILE_BUFFER, INT32_MAX) -from .io.meas_info import (read_meas_info, write_meas_info, - _ensure_infos_match, ContainsMixin) -from .io.open import fiff_open, _get_next_fname -from .io.tree import dir_tree_find -from .io.tag import read_tag, read_tag_info -from .io.constants import FIFF -from .io.fiff.raw import _get_fname_rep -from .io.pick import (channel_indices_by_type, channel_type, - pick_channels, pick_info, _pick_data_channels, - _DATA_CH_TYPES_SPLIT, _picks_to_idx) -from .io.proj import setup_proj, ProjMixin -from .io.base import BaseRaw, TimeMixin, _get_ch_factors +from scipy.interpolate import interp1d + +from ._fiff.constants import FIFF +from ._fiff.meas_info import ( + ContainsMixin, + SetChannelsMixin, + _ensure_infos_match, + read_meas_info, + write_meas_info, +) +from ._fiff.open import _get_next_fname, fiff_open +from ._fiff.pick import ( + _DATA_CH_TYPES_SPLIT, + _pick_data_channels, + _picks_to_idx, + channel_indices_by_type, + channel_type, + pick_channels, + pick_info, +) +from ._fiff.proj import ProjMixin, setup_proj +from ._fiff.tag import _read_tag_header, read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.utils import _make_split_fnames +from ._fiff.write import ( + _NEXT_FILE_BUFFER, + INT32_MAX, + _get_split_size, + end_block, + start_and_end_file, + start_block, + write_complex_double_matrix, + write_complex_float_matrix, + write_double_matrix, + write_float, + write_float_matrix, + write_id, + write_int, + write_string, +) +from .annotations import ( + EpochAnnotationsMixin, + _read_annotations_fif, + _write_annotations, + events_from_annotations, +) +from .baseline import _check_baseline, _log_rescale, rescale from .bem import _check_origin +from .channels.channels import InterpolationMixin, ReferenceMixin, UpdateChannelsMixin +from .event import _read_events_fif, make_fixed_length_events, match_event_names from .evoked import EvokedArray -from .baseline import rescale, _log_rescale, _check_baseline -from .channels.channels import (UpdateChannelsMixin, - SetChannelsMixin, InterpolationMixin) -from .filter import detrend, FilterMixin, _check_fun -from .parallel import parallel_func - -from .event import (_read_events_fif, make_fixed_length_events, - match_event_names) +from .filter import FilterMixin, _check_fun, detrend from .fixes import rng_uniform -from .time_frequency.spectrum import (EpochsSpectrum, SpectrumMixin, - _validate_method) -from .viz import (plot_epochs, plot_epochs_image, - plot_topo_image_epochs, plot_drop_log) -from .utils import (_check_fname, check_fname, logger, verbose, repr_html, - check_random_state, warn, _pl, - sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc, - _check_pandas_installed, - _check_preload, GetEpochsMixin, - _prepare_read_metadata, _prepare_write_metadata, - _check_event_id, _gen_events, _check_option, - _check_combine, _build_data_frame, - _check_pandas_index_arguments, _convert_times, - _scale_dataframe_data, _check_time_format, object_size, - _on_missing, _validate_type, _ensure_events, - _path_like) +from .html_templates import _get_html_template +from .parallel import parallel_func +from .time_frequency.spectrum import EpochsSpectrum, SpectrumMixin, _validate_method +from .time_frequency.tfr import AverageTFR, EpochsTFR +from .utils import ( + ExtendedTimeMixin, + GetEpochsMixin, + SizeMixin, + _build_data_frame, + _check_combine, + _check_event_id, + _check_fname, + _check_option, + _check_pandas_index_arguments, + _check_pandas_installed, + _check_preload, + _check_time_format, + _convert_times, + _ensure_events, + _gen_events, + _on_missing, + _path_like, + _pl, + _prepare_read_metadata, + _prepare_write_metadata, + _scale_dataframe_data, + _validate_type, + check_fname, + check_random_state, + copy_function_doc_to_method_doc, + logger, + object_size, + repr_html, + sizeof_fmt, + verbose, + warn, +) from .utils.docs import fill_doc -from .annotations import (_write_annotations, _read_annotations_fif, - EpochAnnotationsMixin) +from .viz import plot_drop_log, plot_epochs, plot_epochs_image, plot_topo_image_epochs def _pack_reject_params(epochs): reject_params = dict() - for key in ('reject', 'flat', 'reject_tmin', 'reject_tmax'): + for key in ("reject", "flat", "reject_tmin", "reject_tmax"): val = getattr(epochs, key, None) if val is not None: reject_params[key] = val return reject_params -def _save_split(epochs, fname, part_idx, n_parts, fmt, split_naming, - overwrite): +def _save_split(epochs, split_fnames, part_idx, n_parts, fmt, overwrite): """Split epochs. Anything new added to this function also needs to be added to BaseEpochs.save to account for new file sizes. """ # insert index in filename - base, ext = op.splitext(fname) - if part_idx > 0: - if split_naming == 'neuromag': - fname = '%s-%d%s' % (base, part_idx, ext) - else: - assert split_naming == 'bids' - fname = _construct_bids_filename(base, ext, part_idx, - validate=False) - _check_fname(fname, overwrite=overwrite) + this_fname = split_fnames[part_idx] + _check_fname(this_fname, overwrite=overwrite) - next_fname = None + next_fname, next_idx = None, None if part_idx < n_parts - 1: - if split_naming == 'neuromag': - next_fname = '%s-%d%s' % (base, part_idx + 1, ext) - else: - assert split_naming == 'bids' - next_fname = _construct_bids_filename(base, ext, part_idx + 1, - validate=False) next_idx = part_idx + 1 - else: - next_idx = None + next_fname = split_fnames[next_idx] - with start_and_end_file(fname) as fid: + with start_and_end_file(this_fname) as fid: _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx) def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): info = epochs.info - meas_id = info['meas_id'] + meas_id = info["meas_id"] start_block(fid, FIFF.FIFFB_MEAS) write_id(fid, FIFF.FIFF_BLOCK_ID) - if info['meas_id'] is not None: - write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) # Write measurement info write_meas_info(fid, info) @@ -130,23 +153,23 @@ def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): start_block(fid, FIFF.FIFFB_MNE_EPOCHS) # write events out after getting data to ensure bad events are dropped - data = epochs.get_data() + data = epochs.get_data(copy=False) - _check_option('fmt', fmt, ['single', 'double']) + _check_option("fmt", fmt, ["single", "double"]) if np.iscomplexobj(data): - if fmt == 'single': + if fmt == "single": write_function = write_complex_float_matrix - elif fmt == 'double': + elif fmt == "double": write_function = write_complex_double_matrix else: - if fmt == 'single': + if fmt == "single": write_function = write_float_matrix - elif fmt == 'double': + elif fmt == "double": write_function = write_double_matrix # Epoch annotations are written if there are any - annotations = getattr(epochs, 'annotations', []) + annotations = getattr(epochs, "annotations", []) if annotations is not None and len(annotations): _write_annotations(fid, annotations) @@ -164,7 +187,7 @@ def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): end_block(fid, FIFF.FIFFB_MNE_METADATA) # First and last sample - first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe + first = int(round(epochs.tmin * info["sfreq"])) # round just to be safe last = first + len(epochs.times) - 1 write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first) write_int(fid, FIFF.FIFF_LAST_SAMPLE, last) @@ -179,10 +202,9 @@ def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax) # The epochs itself - decal = np.empty(info['nchan']) - for k in range(info['nchan']): - decal[k] = 1.0 / (info['chs'][k]['cal'] * - info['chs'][k].get('scale', 1.0)) + decal = np.empty(info["nchan"]) + for k in range(info["nchan"]): + decal[k] = 1.0 / (info["chs"][k]["cal"] * info["chs"][k].get("scale", 1.0)) data *= decal[np.newaxis, :, np.newaxis] @@ -191,16 +213,13 @@ def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): # undo modifications to data data /= decal[np.newaxis, :, np.newaxis] - write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, - json.dumps(epochs.drop_log)) + write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, json.dumps(epochs.drop_log)) reject_params = _pack_reject_params(epochs) if reject_params: - write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT, - json.dumps(reject_params)) + write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT, json.dumps(reject_params)) - write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION, - epochs.selection) + write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION, epochs.selection) # And now write the next file info in case epochs are split on disk if next_fname is not None and n_parts > 1: @@ -218,7 +237,7 @@ def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): def _event_id_string(event_id): - return ';'.join([k + ':' + str(v) for k, v in event_id.items()]) + return ";".join([k + ":" + str(v) for k, v in event_id.items()]) def _merge_events(events, event_id, selection): @@ -228,7 +247,6 @@ def _merge_events(events, event_id, selection): event_idxs_to_delete = list() unique_events, counts = np.unique(events[:, 0], return_counts=True) for ev in unique_events[counts > 1]: - # indices at which the non-unique events happened idxs = (events[:, 0] == ev).nonzero()[0] @@ -244,18 +262,18 @@ def _merge_events(events, event_id, selection): # Else, make a new event_id for the merged event else: - # Find all event_id keys involved in duplicated events. These # keys will be merged to become a new entry in "event_id" event_id_keys = list(event_id.keys()) event_id_vals = list(event_id.values()) - new_key_comps = [event_id_keys[event_id_vals.index(value)] - for value in ev_vals] + new_key_comps = [ + event_id_keys[event_id_vals.index(value)] for value in ev_vals + ] # Check if we already have an entry for merged keys of duplicate # events ... if yes, reuse it for key in event_id: - if set(key.split('/')) == set(new_key_comps): + if set(key.split("/")) == set(new_key_comps): new_event_val = event_id[key] break @@ -263,9 +281,10 @@ def _merge_events(events, event_id, selection): # the event_id dict else: ev_vals = np.unique( - np.concatenate((list(event_id.values()), - events[:, 1:].flatten()), - axis=0)) + np.concatenate( + (list(event_id.values()), events[:, 1:].flatten()), axis=0 + ) + ) if ev_vals[0] > 1: new_event_val = 1 else: @@ -274,7 +293,7 @@ def _merge_events(events, event_id, selection): idx = -1 if len(idx) == 0 else idx[0] new_event_val = ev_vals[idx] + 1 - new_event_id_key = '/'.join(sorted(new_key_comps)) + new_event_id_key = "/".join(sorted(new_key_comps)) event_id[new_event_id_key] = int(new_event_val) # Replace duplicate event times with merged event and remember which @@ -290,8 +309,7 @@ def _merge_events(events, event_id, selection): return new_events, event_id, new_selection -def _handle_event_repeated(events, event_id, event_repeated, selection, - drop_log): +def _handle_event_repeated(events, event_id, event_repeated, selection, drop_log): """Handle repeated events. Note that drop_log will be modified inplace @@ -306,29 +324,34 @@ def _handle_event_repeated(events, event_id, event_repeated, selection, return events, event_id, selection, drop_log # Else, we have duplicates. Triage ... - _check_option('event_repeated', event_repeated, ['error', 'drop', 'merge']) + _check_option("event_repeated", event_repeated, ["error", "drop", "merge"]) drop_log = list(drop_log) - if event_repeated == 'error': - raise RuntimeError('Event time samples were not unique. Consider ' - 'setting the `event_repeated` parameter."') + if event_repeated == "error": + raise RuntimeError( + "Event time samples were not unique. Consider " + 'setting the `event_repeated` parameter."' + ) - elif event_repeated == 'drop': - logger.info('Multiple event values for single event times found. ' - 'Keeping the first occurrence and dropping all others.') + elif event_repeated == "drop": + logger.info( + "Multiple event values for single event times found. " + "Keeping the first occurrence and dropping all others." + ) new_events = events[u_ev_idxs] new_selection = selection[u_ev_idxs] drop_ev_idxs = np.setdiff1d(selection, new_selection) for idx in drop_ev_idxs: - drop_log[idx] = drop_log[idx] + ('DROP DUPLICATE',) + drop_log[idx] = drop_log[idx] + ("DROP DUPLICATE",) selection = new_selection - elif event_repeated == 'merge': - logger.info('Multiple event values for single event times found. ' - 'Creating new event value to reflect simultaneous events.') - new_events, event_id, new_selection = \ - _merge_events(events, event_id, selection) + elif event_repeated == "merge": + logger.info( + "Multiple event values for single event times found. " + "Creating new event value to reflect simultaneous events." + ) + new_events, event_id, new_selection = _merge_events(events, event_id, selection) drop_ev_idxs = np.setdiff1d(selection, new_selection) for idx in drop_ev_idxs: - drop_log[idx] = drop_log[idx] + ('MERGE DUPLICATE',) + drop_log[idx] = drop_log[idx] + ("MERGE DUPLICATE",) selection = new_selection drop_log = tuple(drop_log) @@ -340,10 +363,20 @@ def _handle_event_repeated(events, event_id, event_repeated, selection, @fill_doc -class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, - SetChannelsMixin, InterpolationMixin, FilterMixin, - TimeMixin, SizeMixin, GetEpochsMixin, EpochAnnotationsMixin, - SpectrumMixin): +class BaseEpochs( + ProjMixin, + ContainsMixin, + UpdateChannelsMixin, + ReferenceMixin, + SetChannelsMixin, + InterpolationMixin, + FilterMixin, + ExtendedTimeMixin, + SizeMixin, + GetEpochsMixin, + EpochAnnotationsMixin, + SpectrumMixin, +): """Abstract base class for `~mne.Epochs`-type classes. .. note:: @@ -378,9 +411,11 @@ class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, .. versionadded:: 0.16 %(drop_log)s - filename : str | None + filename : Path | None The filename (if the epochs are read from disk). %(metadata_epochs)s + + .. versionadded:: 0.16 %(event_repeated_epochs)s %(raw_sfreq)s annotations : instance of mne.Annotations | None @@ -401,22 +436,50 @@ class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, """ @verbose - def __init__(self, info, data, events, event_id=None, - tmin=-0.2, tmax=0.5, - baseline=(None, 0), raw=None, picks=None, reject=None, - flat=None, decim=1, reject_tmin=None, reject_tmax=None, - detrend=None, proj=True, on_missing='raise', - preload_at_end=False, selection=None, drop_log=None, - filename=None, metadata=None, event_repeated='error', - *, raw_sfreq=None, - annotations=None, verbose=None): # noqa: D102 + def __init__( + self, + info, + data, + events, + event_id=None, + tmin=-0.2, + tmax=0.5, + baseline=(None, 0), + raw=None, + picks=None, + reject=None, + flat=None, + decim=1, + reject_tmin=None, + reject_tmax=None, + detrend=None, + proj=True, + on_missing="raise", + preload_at_end=False, + selection=None, + drop_log=None, + filename=None, + metadata=None, + event_repeated="error", + *, + raw_sfreq=None, + annotations=None, + verbose=None, + ): if events is not None: # RtEpochs can have events=None events = _ensure_events(events) - events_max = events.max() - if events_max > INT32_MAX: - raise ValueError( - f'events array values must not exceed {INT32_MAX}, ' - f'got {events_max}') + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if len(events) == 0: + self._allow_empty = True + selection = None + else: + self._allow_empty = False + events_max = events.max() + if events_max > INT32_MAX: + raise ValueError( + f"events array values must not exceed {INT32_MAX}, " + f"got {events_max}" + ) event_id = _check_event_id(event_id, events) self.event_id = event_id del event_id @@ -424,8 +487,7 @@ def __init__(self, info, data, events, event_id=None, if events is not None: # RtEpochs can have events=None for key, val in self.event_id.items(): if val not in events[:, 2]: - msg = ('No matching events found for %s ' - '(event id %i)' % (key, val)) + msg = f"No matching events found for {key} (event id {val})" _on_missing(on_missing, msg) # ensure metadata matches original events size @@ -438,32 +500,42 @@ def __init__(self, info, data, events, event_id=None, del events values = list(self.event_id.values()) - selected = np.where(np.in1d(self.events[:, 2], values))[0] + selected = np.where(np.isin(self.events[:, 2], values))[0] if selection is None: selection = selected else: selection = np.array(selection, int) if selection.shape != (len(selected),): - raise ValueError('selection must be shape %s got shape %s' - % (selected.shape, selection.shape)) + raise ValueError( + f"selection must be shape {selected.shape} got shape " + f"{selection.shape}" + ) self.selection = selection if drop_log is None: self.drop_log = tuple( - () if k in self.selection else ('IGNORED',) - for k in range(max(len(self.events), - max(self.selection) + 1))) + () if k in self.selection else ("IGNORED",) + for k in range(max(len(self.events), max(self.selection) + 1)) + ) else: self.drop_log = drop_log self.events = self.events[selected] - self.events, self.event_id, self.selection, self.drop_log = \ - _handle_event_repeated( - self.events, self.event_id, event_repeated, - self.selection, self.drop_log) + ( + self.events, + self.event_id, + self.selection, + self.drop_log, + ) = _handle_event_repeated( + self.events, + self.event_id, + event_repeated, + self.selection, + self.drop_log, + ) # then subselect - sub = np.where(np.in1d(selection, self.selection))[0] + sub = np.where(np.isin(selection, self.selection))[0] if isinstance(metadata, list): metadata = [metadata[s] for s in sub] elif metadata is not None: @@ -479,13 +551,18 @@ def __init__(self, info, data, events, event_id=None, n_events = len(self.events) if n_events > 1: if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0: - warn('The events passed to the Epochs constructor are not ' - 'chronologically ordered.', RuntimeWarning) + warn( + "The events passed to the Epochs constructor are not " + "chronologically ordered.", + RuntimeWarning, + ) if n_events > 0: - logger.info('%d matching events found' % n_events) + logger.info(f"{n_events} matching events found") else: - raise ValueError('No desired events found.') + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if not self._allow_empty: + raise ValueError("No desired events found.") else: self.drop_log = tuple() self.selection = np.array([], int) @@ -493,13 +570,14 @@ def __init__(self, info, data, events, event_id=None, # do not set self.events here, let subclass do it if (detrend not in [None, 0, 1]) or isinstance(detrend, bool): - raise ValueError('detrend must be None, 0, or 1') + raise ValueError("detrend must be None, 0, or 1") self.detrend = detrend self._raw = raw info._check_consistency() - self.picks = _picks_to_idx(info, picks, none='all', exclude=(), - allow_empty=False) + self.picks = _picks_to_idx( + info, picks, none="all", exclude=(), allow_empty=False + ) self.info = pick_info(info, self.picks) del info self._current = 0 @@ -510,48 +588,54 @@ def __init__(self, info, data, events, event_id=None, self._do_baseline = True else: assert decim == 1 - if data.ndim != 3 or data.shape[2] != \ - round((tmax - tmin) * self.info['sfreq']) + 1: - raise RuntimeError('bad data shape') + if ( + data.ndim != 3 + or data.shape[2] != round((tmax - tmin) * self.info["sfreq"]) + 1 + ): + raise RuntimeError("bad data shape") if data.shape[0] != len(self.events): raise ValueError( - 'The number of epochs and the number of events must match') + "The number of epochs and the number of events must match" + ) self.preload = True self._data = data self._do_baseline = False self._offset = None if tmin > tmax: - raise ValueError('tmin has to be less than or equal to tmax') + raise ValueError("tmin has to be less than or equal to tmax") # Handle times - sfreq = float(self.info['sfreq']) + sfreq = float(self.info["sfreq"]) start_idx = int(round(tmin * sfreq)) - self._raw_times = np.arange(start_idx, - int(round(tmax * sfreq)) + 1) / sfreq + self._raw_times = np.arange(start_idx, int(round(tmax * sfreq)) + 1) / sfreq self._set_times(self._raw_times) # check reject_tmin and reject_tmax if reject_tmin is not None: - if (np.isclose(reject_tmin, tmin)): + if np.isclose(reject_tmin, tmin): # adjust for potential small deviations due to sampling freq reject_tmin = self.tmin elif reject_tmin < tmin: - raise ValueError(f'reject_tmin needs to be None or >= tmin ' - f'(got {reject_tmin})') + raise ValueError( + f"reject_tmin needs to be None or >= tmin (got {reject_tmin})" + ) if reject_tmax is not None: - if (np.isclose(reject_tmax, tmax)): + if np.isclose(reject_tmax, tmax): # adjust for potential small deviations due to sampling freq reject_tmax = self.tmax elif reject_tmax > tmax: - raise ValueError(f'reject_tmax needs to be None or <= tmax ' - f'(got {reject_tmax})') + raise ValueError( + f"reject_tmax needs to be None or <= tmax (got {reject_tmax})" + ) if (reject_tmin is not None) and (reject_tmax is not None): if reject_tmin >= reject_tmax: - raise ValueError(f'reject_tmin ({reject_tmin}) needs to be ' - f' < reject_tmax ({reject_tmax})') + raise ValueError( + f"reject_tmin ({reject_tmin}) needs to be " + f" < reject_tmax ({reject_tmax})" + ) self.reject_tmin = reject_tmin self.reject_tmax = reject_tmax @@ -561,11 +645,14 @@ def __init__(self, info, data, events, event_id=None, self.decimate(decim) # baseline correction: replace `None` tuple elements with actual times - self.baseline = _check_baseline(baseline, times=self.times, - sfreq=self.info['sfreq']) + self.baseline = _check_baseline( + baseline, times=self.times, sfreq=self.info["sfreq"] + ) if self.baseline is not None and self.baseline != baseline: - logger.info(f'Setting baseline interval to ' - f'[{self.baseline[0]}, {self.baseline[1]}] s') + logger.info( + f"Setting baseline interval to " + f"[{self.baseline[0]}, {self.baseline[1]}] s" + ) logger.info(_log_rescale(self.baseline)) @@ -575,18 +662,16 @@ def __init__(self, info, data, events, event_id=None, self._reject_setup(reject, flat) # do the rest - valid_proj = [True, 'delayed', False] + valid_proj = [True, "delayed", False] if proj not in valid_proj: - raise ValueError('"proj" must be one of %s, not %s' - % (valid_proj, proj)) - if proj == 'delayed': + raise ValueError(f'"proj" must be one of {valid_proj}, not {proj}') + if proj == "delayed": self._do_delayed_proj = True - logger.info('Entering delayed SSP mode.') + logger.info("Entering delayed SSP mode.") else: self._do_delayed_proj = False activate = False if self._do_delayed_proj else proj - self._projector, self.info = setup_proj(self.info, False, - activate=activate) + self._projector, self.info = setup_proj(self.info, False, activate=activate) if preload_at_end: assert self._data is None assert self.preload is False @@ -598,22 +683,21 @@ def __init__(self, info, data, events, event_id=None, # more memory safe in most instances for ii, epoch in enumerate(self._data): self._data[ii] = np.dot(self._projector, epoch) - self._filename = str(filename) if filename is not None else filename + self.filename = filename if filename is not None else filename if raw_sfreq is None: - raw_sfreq = self.info['sfreq'] + raw_sfreq = self.info["sfreq"] self._raw_sfreq = raw_sfreq self._check_consistency() - self.set_annotations(annotations) + self.set_annotations(annotations, on_missing="ignore") def _check_consistency(self): """Check invariants of epochs object.""" - if hasattr(self, 'events'): + if hasattr(self, "events"): assert len(self.selection) == len(self.events) assert len(self.drop_log) >= len(self.events) - assert len(self.selection) == sum( - (len(dl) == 0 for dl in self.drop_log)) - assert hasattr(self, '_times_readonly') - assert not self.times.flags['WRITEABLE'] + assert len(self.selection) == sum(len(dl) == 0 for dl in self.drop_log) + assert hasattr(self, "_times_readonly") + assert not self.times.flags["WRITEABLE"] assert isinstance(self.drop_log, tuple) assert all(isinstance(log, tuple) for log in self.drop_log) assert all(isinstance(s, str) for log in self.drop_log for s in log) @@ -680,14 +764,15 @@ def apply_baseline(self, baseline=(None, 0), *, verbose=None): .. versionadded:: 0.10.0 """ - baseline = _check_baseline(baseline, times=self.times, - sfreq=self.info['sfreq']) + baseline = _check_baseline(baseline, times=self.times, sfreq=self.info["sfreq"]) if self.preload: if self.baseline is not None and baseline is None: - raise RuntimeError('You cannot remove baseline correction ' - 'from preloaded data once it has been ' - 'applied.') + raise RuntimeError( + "You cannot remove baseline correction " + "from preloaded data once it has been " + "applied." + ) self._do_baseline = True picks = self._detrend_picks rescale(self._data, self.times, baseline, copy=False, picks=picks) @@ -701,44 +786,55 @@ def apply_baseline(self, baseline=(None, 0), *, verbose=None): self.baseline = baseline return self - def _reject_setup(self, reject, flat): + def _reject_setup(self, reject, flat, *, allow_callable=False): """Set self._reject_time and self._channel_type_idx.""" idx = channel_indices_by_type(self.info) reject = deepcopy(reject) if reject is not None else dict() flat = deepcopy(flat) if flat is not None else dict() - for rej, kind in zip((reject, flat), ('reject', 'flat')): - if not isinstance(rej, dict): - raise TypeError('reject and flat must be dict or None, not %s' - % type(rej)) + for rej, kind in zip((reject, flat), ("reject", "flat")): + _validate_type(rej, dict, kind) bads = set(rej.keys()) - set(idx.keys()) if len(bads) > 0: - raise KeyError('Unknown channel types found in %s: %s' - % (kind, bads)) + raise KeyError(f"Unknown channel types found in {kind}: {bads}") for key in idx.keys(): # don't throw an error if rejection/flat would do nothing - if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or - flat.get(key, -1) >= 0): + if len(idx[key]) == 0 and ( + np.isfinite(reject.get(key, np.inf)) or flat.get(key, -1) >= 0 + ): # This is where we could eventually add e.g. # self.allow_missing_reject_keys check to allow users to # provide keys that don't exist in data - raise ValueError("No %s channel found. Cannot reject based on " - "%s." % (key.upper(), key.upper())) - - # check for invalid values - for rej, kind in zip((reject, flat), ('Rejection', 'Flat')): - for key, val in rej.items(): - if val is None or val < 0: - raise ValueError('%s value must be a number >= 0, not "%s"' - % (kind, val)) + raise ValueError( + f"No {key.upper()} channel found. Cannot reject based on " + f"{key.upper()}." + ) + + # check for invalid values + for rej, kind in zip((reject, flat), ("Rejection", "Flat")): + for key, val in rej.items(): + name = f"{kind} dict value for {key}" + if callable(val) and allow_callable: + continue + extra_str = "" + if allow_callable: + extra_str = "or callable" + _validate_type(val, "numeric", name, extra=extra_str) + if val is None or val < 0: + raise ValueError( + f"If using numerical {name} criteria, the value " + f"must be >= 0, not {repr(val)}" + ) # now check to see if our rejection and flat are getting more # restrictive old_reject = self.reject if self.reject is not None else dict() old_flat = self.flat if self.flat is not None else dict() - bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new ' - '{kind} values must be at least as stringent as ' - 'previous ones') + bad_msg = ( + '{kind}["{key}"] == {new} {op} {old} (old value), new ' + "{kind} values must be at least as stringent as " + "previous ones" + ) # copy thresholds for channel types that were used previously, but not # passed this time @@ -746,19 +842,32 @@ def _reject_setup(self, reject, flat): reject[key] = old_reject[key] # make sure new thresholds are at least as stringent as the old ones for key in reject: + # Skip this check if old_reject and reject are callables + if callable(reject[key]) and allow_callable: + continue if key in old_reject and reject[key] > old_reject[key]: raise ValueError( - bad_msg.format(kind='reject', key=key, new=reject[key], - old=old_reject[key], op='>')) + bad_msg.format( + kind="reject", + key=key, + new=reject[key], + old=old_reject[key], + op=">", + ) + ) # same for flat thresholds for key in set(old_flat) - set(flat): flat[key] = old_flat[key] for key in flat: + if callable(flat[key]) and allow_callable: + continue if key in old_flat and flat[key] < old_flat[key]: raise ValueError( - bad_msg.format(kind='flat', key=key, new=flat[key], - old=old_flat[key], op='<')) + bad_msg.format( + kind="flat", key=key, new=flat[key], old=old_flat[key], op="<" + ) + ) # after validation, set parameters self._bad_dropped = False @@ -787,20 +896,26 @@ def _is_good_epoch(self, data, verbose=None): if isinstance(data, str): return False, (data,) if data is None: - return False, ('NO_DATA',) + return False, ("NO_DATA",) n_times = len(self.times) if data.shape[1] < n_times: # epoch is too short ie at the end of the data - return False, ('TOO_SHORT',) + return False, ("TOO_SHORT",) if self.reject is None and self.flat is None: return True, None else: if self._reject_time is not None: data = data[:, self._reject_time] - return _is_good(data, self.ch_names, self._channel_type_idx, - self.reject, self.flat, full_report=True, - ignore_chs=self.info['bads']) + return _is_good( + data, + self.ch_names, + self._channel_type_idx, + self.reject, + self.flat, + full_report=True, + ignore_chs=self.info["bads"], + ) @verbose def _detrend_offset_decim(self, epoch, picks, verbose=None): @@ -821,8 +936,13 @@ def _detrend_offset_decim(self, epoch, picks, verbose=None): # Baseline correct if self._do_baseline: rescale( - epoch, self._raw_times, self.baseline, picks=picks, copy=False, - verbose=False) + epoch, + self._raw_times, + self.baseline, + picks=picks, + copy=False, + verbose=False, + ) # Decimate if necessary (i.e., epoch not preloaded) epoch = epoch[:, self._decim_slice] @@ -885,13 +1005,13 @@ def subtract_evoked(self, evoked=None): .. [1] David et al. "Mechanisms of evoked and induced responses in MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006. """ - logger.info('Subtracting Evoked from Epochs') + logger.info("Subtracting Evoked from Epochs") if evoked is None: picks = _pick_data_channels(self.info, exclude=[]) evoked = self.average(picks) # find the indices of the channels to use - picks = pick_channels(evoked.ch_names, include=self.ch_names) + picks = pick_channels(evoked.ch_names, include=self.ch_names, ordered=False) # make sure the omitted channels are not data channels if len(picks) < len(self.ch_names): @@ -899,24 +1019,32 @@ def subtract_evoked(self, evoked=None): diff_ch = list(set(self.ch_names).difference(sel_ch)) diff_idx = [self.ch_names.index(ch) for ch in diff_ch] diff_types = [channel_type(self.info, idx) for idx in diff_idx] - bad_idx = [diff_types.index(t) for t in diff_types if t in - _DATA_CH_TYPES_SPLIT] + bad_idx = [ + diff_types.index(t) for t in diff_types if t in _DATA_CH_TYPES_SPLIT + ] if len(bad_idx) > 0: - bad_str = ', '.join([diff_ch[ii] for ii in bad_idx]) - raise ValueError('The following data channels are missing ' - 'in the evoked response: %s' % bad_str) - logger.info(' The following channels are not included in the ' - 'subtraction: %s' % ', '.join(diff_ch)) + bad_str = ", ".join([diff_ch[ii] for ii in bad_idx]) + raise ValueError( + "The following data channels are missing " + f"in the evoked response: {bad_str}" + ) + logger.info( + " The following channels are not included in the subtraction: " + + ", ".join(diff_ch) + ) # make sure the times match - if (len(self.times) != len(evoked.times) or - np.max(np.abs(self.times - evoked.times)) >= 1e-7): - raise ValueError('Epochs and Evoked object do not contain ' - 'the same time points.') + if ( + len(self.times) != len(evoked.times) + or np.max(np.abs(self.times - evoked.times)) >= 1e-7 + ): + raise ValueError( + "Epochs and Evoked object do not contain the same time points." + ) # handle SSPs if not self.proj and evoked.proj: - warn('Evoked has SSP applied while Epochs has not.') + warn("Evoked has SSP applied while Epochs has not.") if self.proj and not evoked.proj: evoked = evoked.copy().apply_proj() @@ -928,10 +1056,11 @@ def subtract_evoked(self, evoked=None): self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :] else: if self._offset is None: - self._offset = np.zeros((len(self.ch_names), len(self.times)), - dtype=np.float64) + self._offset = np.zeros( + (len(self.ch_names), len(self.times)), dtype=np.float64 + ) self._offset[ep_picks] -= evoked.data[picks] - logger.info('[done]') + logger.info("[done]") return self @@ -976,11 +1105,11 @@ def average(self, picks=None, method="mean", by_event_type=False): This would compute the trimmed mean. """ + self._handle_empty("raise", "average") if by_event_type: evokeds = list() for event_type in self.event_id.keys(): - ev = self[event_type]._compute_aggregate(picks=picks, - mode=method) + ev = self[event_type]._compute_aggregate(picks=picks, mode=method) ev.comment = event_type evokeds.append(ev) else: @@ -1000,39 +1129,43 @@ def standard_error(self, picks=None, by_event_type=False): ------- %(std_err_by_event_type_returns)s """ - return self.average(picks=picks, method="std", - by_event_type=by_event_type) + return self.average(picks=picks, method="std", by_event_type=by_event_type) - def _compute_aggregate(self, picks, mode='mean'): + def _compute_aggregate(self, picks, mode="mean"): """Compute the mean, median, or std over epochs and return Evoked.""" # if instance contains ICA channels they won't be included unless picks # is specified if picks is None: - check_ICA = [x.startswith('ICA') for x in self.ch_names] + check_ICA = [x.startswith("ICA") for x in self.ch_names] if np.all(check_ICA): - raise TypeError('picks must be specified (i.e. not None) for ' - 'ICA channel data') + raise TypeError( + "picks must be specified (i.e. not None) for ICA channel data" + ) elif np.any(check_ICA): - warn('ICA channels will not be included unless explicitly ' - 'selected in picks') + warn( + "ICA channels will not be included unless explicitly " + "selected in picks" + ) n_channels = len(self.ch_names) n_times = len(self.times) if self.preload: n_events = len(self.events) - fun = _check_combine(mode, valid=('mean', 'median', 'std')) + fun = _check_combine(mode, valid=("mean", "median", "std")) data = fun(self._data) assert len(self.events) == len(self._data) if data.shape != self._data.shape[1:]: raise RuntimeError( - 'You passed a function that resulted n data of shape {}, ' - 'but it should be {}.'.format( - data.shape, self._data.shape[1:])) + f"You passed a function that resulted n data of shape " + f"{data.shape}, but it should be {self._data.shape[1:]}." + ) else: if mode not in {"mean", "std"}: - raise ValueError("If data are not preloaded, can only compute " - "mean or standard deviation.") + raise ValueError( + "If data are not preloaded, can only compute " + "mean or standard deviation." + ) data = np.zeros((n_channels, n_times)) n_events = 0 for e in self: @@ -1050,26 +1183,27 @@ def _compute_aggregate(self, picks, mode='mean'): # two (slower) in case there are large numbers if mode == "std": data_mean = data.copy() - data.fill(0.) + data.fill(0.0) for e in self: data += (e - data_mean) ** 2 data = np.sqrt(data / n_events) if mode == "std": - kind = 'standard_error' + kind = "standard_error" data /= np.sqrt(n_events) else: kind = "average" - return self._evoked_from_epoch_data(data, self.info, picks, n_events, - kind, self._name) + return self._evoked_from_epoch_data( + data, self.info, picks, n_events, kind, self._name + ) @property def _name(self): """Give a nice string representation based on event ids.""" return self._get_name() - def _get_name(self, count='frac', ms='×', sep='+'): + def _get_name(self, count="frac", ms="×", sep="+"): """Generate human-readable name for epochs and evokeds from event_id. Parameters @@ -1085,7 +1219,7 @@ def _get_name(self, count='frac', ms='×', sep='+'): How to separate the different events names. Ignored if only one event type is present. """ - _check_option('count', value=count, allowed_values=['frac', 'total']) + _check_option("count", value=count, allowed_values=["frac", "total"]) if len(self.event_id) == 1: comment = next(iter(self.event_id.keys())) @@ -1095,28 +1229,34 @@ def _get_name(self, count='frac', ms='×', sep='+'): # Take care of padding if ms is None: - ms = ' ' + ms = " " else: - ms = f' {ms} ' + ms = f" {ms} " for event_name, event_code in self.event_id.items(): - if count == 'frac': + if count == "frac": frac = float(counter[event_code]) / len(self.events) - comment = f'{frac:.2f}{ms}{event_name}' + comment = f"{frac:.2f}{ms}{event_name}" else: # 'total' - comment = f'{counter[event_code]}{ms}{event_name}' + comment = f"{counter[event_code]}{ms}{event_name}" comments.append(comment) - comment = f' {sep} '.join(comments) + comment = f" {sep} ".join(comments) return comment - def _evoked_from_epoch_data(self, data, info, picks, n_events, kind, - comment): + def _evoked_from_epoch_data(self, data, info, picks, n_events, kind, comment): """Create an evoked object from epoch data.""" info = deepcopy(info) # don't apply baseline correction; we'll set evoked.baseline manually - evoked = EvokedArray(data, info, tmin=self.times[0], comment=comment, - nave=n_events, kind=kind, baseline=None) + evoked = EvokedArray( + data, + info, + tmin=self.times[0], + comment=comment, + nave=n_events, + kind=kind, + baseline=None, + ) evoked.baseline = self.baseline # the above constructor doesn't recreate the times object precisely @@ -1124,58 +1264,120 @@ def _evoked_from_epoch_data(self, data, info, picks, n_events, kind, evoked._set_times(self.times.copy()) # pick channels - picks = _picks_to_idx(self.info, picks, 'data_or_ica', ()) + picks = _picks_to_idx(self.info, picks, "data_or_ica", ()) ch_names = [evoked.ch_names[p] for p in picks] - evoked.pick_channels(ch_names) + evoked.pick(ch_names) - if len(evoked.info['ch_names']) == 0: - raise ValueError('No data channel found when averaging.') + if len(evoked.info["ch_names"]) == 0: + raise ValueError("No data channel found when averaging.") if evoked.nave < 1: - warn('evoked object is empty (based on less than 1 epoch)') + warn("evoked object is empty (based on less than 1 epoch)") return evoked @property def ch_names(self): """Channel names.""" - return self.info['ch_names'] + return self.info["ch_names"] @copy_function_doc_to_method_doc(plot_epochs) - def plot(self, picks=None, scalings=None, n_epochs=20, n_channels=20, - title=None, events=None, event_color=None, - order=None, show=True, block=False, decim='auto', noise_cov=None, - butterfly=False, show_scrollbars=True, show_scalebars=True, - epoch_colors=None, event_id=None, group_by='type', - precompute=None, use_opengl=None, *, theme=None, - overview_mode=None): - return plot_epochs(self, picks=picks, scalings=scalings, - n_epochs=n_epochs, n_channels=n_channels, - title=title, events=events, event_color=event_color, - order=order, show=show, block=block, decim=decim, - noise_cov=noise_cov, butterfly=butterfly, - show_scrollbars=show_scrollbars, - show_scalebars=show_scalebars, - epoch_colors=epoch_colors, event_id=event_id, - group_by=group_by, precompute=precompute, - use_opengl=use_opengl, theme=theme, - overview_mode=overview_mode) + def plot( + self, + picks=None, + scalings=None, + n_epochs=20, + n_channels=20, + title=None, + events=False, + event_color=None, + order=None, + show=True, + block=False, + decim="auto", + noise_cov=None, + butterfly=False, + show_scrollbars=True, + show_scalebars=True, + epoch_colors=None, + event_id=None, + group_by="type", + precompute=None, + use_opengl=None, + *, + theme=None, + overview_mode=None, + splash=True, + ): + return plot_epochs( + self, + picks=picks, + scalings=scalings, + n_epochs=n_epochs, + n_channels=n_channels, + title=title, + events=events, + event_color=event_color, + order=order, + show=show, + block=block, + decim=decim, + noise_cov=noise_cov, + butterfly=butterfly, + show_scrollbars=show_scrollbars, + show_scalebars=show_scalebars, + epoch_colors=epoch_colors, + event_id=event_id, + group_by=group_by, + precompute=precompute, + use_opengl=use_opengl, + theme=theme, + overview_mode=overview_mode, + splash=splash, + ) @copy_function_doc_to_method_doc(plot_topo_image_epochs) - def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None, - colorbar=None, order=None, cmap='RdBu_r', - layout_scale=.95, title=None, scalings=None, - border='none', fig_facecolor='k', fig_background=None, - font_color='w', show=True): + def plot_topo_image( + self, + layout=None, + sigma=0.0, + vmin=None, + vmax=None, + colorbar=None, + order=None, + cmap="RdBu_r", + layout_scale=0.95, + title=None, + scalings=None, + border="none", + fig_facecolor="k", + fig_background=None, + font_color="w", + select=False, + show=True, + ): return plot_topo_image_epochs( - self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax, - colorbar=colorbar, order=order, cmap=cmap, - layout_scale=layout_scale, title=title, scalings=scalings, - border=border, fig_facecolor=fig_facecolor, - fig_background=fig_background, font_color=font_color, show=show) + self, + layout=layout, + sigma=sigma, + vmin=vmin, + vmax=vmax, + colorbar=colorbar, + order=order, + cmap=cmap, + layout_scale=layout_scale, + title=title, + scalings=scalings, + border=border, + fig_facecolor=fig_facecolor, + fig_background=fig_background, + font_color=font_color, + select=select, + show=show, + ) @verbose - def drop_bad(self, reject='existing', flat='existing', verbose=None): + def drop_bad(self, reject="existing", flat="existing", verbose=None): """Drop bad epochs without retaining the epochs data. Should be used before slicing operations. @@ -1204,23 +1406,22 @@ def drop_bad(self, reject='existing', flat='existing', verbose=None): Dropping bad epochs can be done multiple times with different ``reject`` and ``flat`` parameters. However, once an epoch is dropped, it is dropped forever, so if more lenient thresholds may - subsequently be applied, `epochs.copy ` should be + subsequently be applied, :meth:`epochs.copy ` should be used. """ - if reject == 'existing': - if flat == 'existing' and self._bad_dropped: + if reject == "existing": + if flat == "existing" and self._bad_dropped: return reject = self.reject - if flat == 'existing': + if flat == "existing": flat = self.flat - if any(isinstance(rej, str) and rej != 'existing' for - rej in (reject, flat)): + if any(isinstance(rej, str) and rej != "existing" for rej in (reject, flat)): raise ValueError('reject and flat, if strings, must be "existing"') - self._reject_setup(reject, flat) + self._reject_setup(reject, flat, allow_callable=True) self._get_data(out=False, verbose=verbose) return self - def drop_log_stats(self, ignore=('IGNORED',)): + def drop_log_stats(self, ignore=("IGNORED",)): """Compute the channel stats based on a drop_log from Epochs. Parameters @@ -1240,33 +1441,81 @@ def drop_log_stats(self, ignore=('IGNORED',)): return _drop_log_stats(self.drop_log, ignore) @copy_function_doc_to_method_doc(plot_drop_log) - def plot_drop_log(self, threshold=0, n_max_plot=20, subject=None, - color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',), - show=True): + def plot_drop_log( + self, + threshold=0, + n_max_plot=20, + subject=None, + color=(0.9, 0.9, 0.9), + width=0.8, + ignore=("IGNORED",), + show=True, + ): if not self._bad_dropped: - raise ValueError("You cannot use plot_drop_log since bad " - "epochs have not yet been dropped. " - "Use epochs.drop_bad().") - return plot_drop_log(self.drop_log, threshold, n_max_plot, subject, - color=color, width=width, ignore=ignore, - show=show) + raise ValueError( + "You cannot use plot_drop_log since bad " + "epochs have not yet been dropped. " + "Use epochs.drop_bad()." + ) + return plot_drop_log( + self.drop_log, + threshold, + n_max_plot, + subject, + color=color, + width=width, + ignore=ignore, + show=show, + ) @copy_function_doc_to_method_doc(plot_epochs_image) - def plot_image(self, picks=None, sigma=0., vmin=None, vmax=None, - colorbar=True, order=None, show=True, units=None, - scalings=None, cmap=None, fig=None, axes=None, - overlay_times=None, combine=None, group_by=None, - evoked=True, ts_args=None, title=None, clear=False): - return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin, - vmax=vmax, colorbar=colorbar, order=order, - show=show, units=units, scalings=scalings, - cmap=cmap, fig=fig, axes=axes, - overlay_times=overlay_times, combine=combine, - group_by=group_by, evoked=evoked, - ts_args=ts_args, title=title, clear=clear) + def plot_image( + self, + picks=None, + sigma=0.0, + vmin=None, + vmax=None, + colorbar=True, + order=None, + show=True, + units=None, + scalings=None, + cmap=None, + fig=None, + axes=None, + overlay_times=None, + combine=None, + group_by=None, + evoked=True, + ts_args=None, + title=None, + clear=False, + ): + return plot_epochs_image( + self, + picks=picks, + sigma=sigma, + vmin=vmin, + vmax=vmax, + colorbar=colorbar, + order=order, + show=show, + units=units, + scalings=scalings, + cmap=cmap, + fig=fig, + axes=axes, + overlay_times=overlay_times, + combine=combine, + group_by=group_by, + evoked=evoked, + ts_args=ts_args, + title=title, + clear=clear, + ) @verbose - def drop(self, indices, reason='USER', verbose=None): + def drop(self, indices, reason="USER", verbose=None): """Drop epochs based on indices or boolean mask. .. note:: The indices refer to the current set of undropped epochs @@ -1284,8 +1533,9 @@ def drop(self, indices, reason='USER', verbose=None): Set epochs to remove by specifying indices to remove or a boolean mask to apply (where True values get removed). Events are correspondingly modified. - reason : str - Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc). + reason : list | tuple | str + Reason(s) for dropping the epochs ('ECG', 'timeout', 'blink' etc). + Reason(s) are applied to all indices specified. Default: 'USER'. %(verbose)s @@ -1297,21 +1547,27 @@ def drop(self, indices, reason='USER', verbose=None): indices = np.atleast_1d(indices) if indices.ndim > 1: - raise ValueError("indices must be a scalar or a 1-d array") + raise TypeError("indices must be a scalar or a 1-d array") + # Check if indices and reasons are of the same length + # if using collection to drop epochs - if indices.dtype == bool: + if indices.dtype == np.dtype(bool): indices = np.where(indices)[0] try_idx = np.where(indices < 0, indices + len(self.events), indices) out_of_bounds = (try_idx < 0) | (try_idx >= len(self.events)) if out_of_bounds.any(): first = indices[out_of_bounds][0] - raise IndexError("Epoch index %d is out of bounds" % first) + raise IndexError(f"Epoch index {first} is out of bounds") keep = np.setdiff1d(np.arange(len(self.events)), try_idx) self._getitem(keep, reason, copy=False, drop_event_id=False) count = len(try_idx) - logger.info('Dropped %d epoch%s: %s' % - (count, _pl(count), ', '.join(map(str, np.sort(try_idx))))) + logger.info( + "Dropped %d epoch%s: %s", + count, + _pl(count), + ", ".join(map(str, np.sort(try_idx))), + ) return self @@ -1330,9 +1586,29 @@ def _project_epoch(self, epoch): epoch = np.dot(self._projector, epoch) return epoch + def _handle_empty(self, on_empty, meth): + if len(self.events) == 0: + msg = ( + f"epochs.{meth}() can't run because this Epochs-object is empty. " + f"You might want to check Epochs.drop_log or Epochs.plot_drop_log()" + f" to see why epochs were dropped." + ) + _on_missing(on_empty, msg, error_klass=RuntimeError) + @verbose - def _get_data(self, out=True, picks=None, item=None, *, units=None, - tmin=None, tmax=None, verbose=None): + def _get_data( + self, + out=True, + picks=None, + item=None, + *, + units=None, + tmin=None, + tmax=None, + copy=False, + on_empty="warn", + verbose=None, + ): """Load all data, dropping bad epochs along the way. Parameters @@ -1350,18 +1626,31 @@ def _get_data(self, out=True, picks=None, item=None, *, units=None, End time of data to get in seconds. %(verbose)s """ + from .io.base import _get_ch_factors + + if copy is not None: + _validate_type(copy, bool, "copy") + + # Handle empty epochs + self._handle_empty(on_empty, "_get_data") # if called with 'out=False', the call came from 'drop_bad()' # if no reasons to drop, just declare epochs as good and return if not out: # make sure first and last epoch not out of bounds of raw in_bounds = self.preload or ( - self._get_epoch_from_raw(idx=0) is not None and - self._get_epoch_from_raw(idx=-1) is not None) + self._get_epoch_from_raw(idx=0) is not None + and self._get_epoch_from_raw(idx=-1) is not None + ) # might be BaseEpochs or Epochs, only the latter has the attribute - reject_by_annotation = getattr(self, 'reject_by_annotation', False) - if (self.reject is None and self.flat is None and in_bounds and - self._reject_time is None and not reject_by_annotation): - logger.debug('_get_data is a noop, returning') + reject_by_annotation = getattr(self, "reject_by_annotation", False) + if ( + self.reject is None + and self.flat is None + and in_bounds + and self._reject_time is None + and not reject_by_annotation + ): + logger.debug("_get_data is a noop, returning") self._bad_dropped = True return None start, stop = self._handle_tmin_tmax(tmin, tmax) @@ -1370,8 +1659,9 @@ def _get_data(self, out=True, picks=None, item=None, *, units=None, item = slice(None) elif not self._bad_dropped: raise ValueError( - 'item must be None in epochs.get_data() unless bads have been ' - 'dropped. Consider using epochs.drop_bad().') + "item must be None in epochs.get_data() unless bads have been " + "dropped. Consider using epochs.drop_bad()." + ) select = self._item_to_select(item) # indices or slice use_idx = np.arange(len(self.events))[select] n_events = len(use_idx) @@ -1381,15 +1671,16 @@ def _get_data(self, out=True, picks=None, item=None, *, units=None, data = self._data else: # we start out with an empty array, allocate only if necessary - data = np.empty((0, len(self.info['ch_names']), len(self.times))) - msg = (f'for {n_events} events and {len(self._raw_times)} ' - 'original time points') + data = np.empty((0, len(self.info["ch_names"]), len(self.times))) + msg = ( + f"for {n_events} events and {len(self._raw_times)} original time points" + ) if self._decim > 1: - msg += ' (prior to decimation)' + msg += " (prior to decimation)" if getattr(self._raw, "preload", False): - logger.info(f'Using data from preloaded Raw {msg} ...') + logger.info(f"Using data from preloaded Raw {msg} ...") else: - logger.info(f'Loading data {msg} ...') + logger.info(f"Loading data {msg} ...") orig_picks = picks if orig_picks is None: @@ -1400,34 +1691,39 @@ def _get_data(self, out=True, picks=None, item=None, *, units=None, # handle units param only if we are going to return data (out==True) if (units is not None) and out: ch_factors = _get_ch_factors(self, units, picks) + else: + ch_factors = None if self._bad_dropped: if not out: return if self.preload: - data = data[select] - if orig_picks is not None: - data = data[:, picks] - if units is not None: - data *= ch_factors[:, np.newaxis] - if start != 0 or stop != self.times.size: - data = data[..., start:stop] - return data + return self._data_sel_copy_scale( + data, + select=select, + orig_picks=orig_picks, + picks=picks, + ch_factors=ch_factors, + start=start, + stop=stop, + copy=copy, + ) # we need to load from disk, drop, and return data detrend_picks = self._detrend_picks for ii, idx in enumerate(use_idx): # faster to pre-allocate memory here epoch_noproj = self._get_epoch_from_raw(idx) - epoch_noproj = self._detrend_offset_decim( - epoch_noproj, detrend_picks) + epoch_noproj = self._detrend_offset_decim(epoch_noproj, detrend_picks) if self._do_delayed_proj: epoch_out = epoch_noproj else: epoch_out = self._project_epoch(epoch_noproj) if ii == 0: - data = np.empty((n_events, len(self.ch_names), - len(self.times)), dtype=epoch_out.dtype) + data = np.empty( + (n_events, len(self.ch_names), len(self.times)), + dtype=epoch_out.dtype, + ) data[ii] = epoch_out else: # bads need to be dropped, this might occur after a preload @@ -1449,12 +1745,12 @@ def _get_data(self, out=True, picks=None, item=None, *, units=None, else: # from disk epoch_noproj = self._get_epoch_from_raw(idx) epoch_noproj = self._detrend_offset_decim( - epoch_noproj, detrend_picks) + epoch_noproj, detrend_picks + ) epoch = self._project_epoch(epoch_noproj) epoch_out = epoch_noproj if self._do_delayed_proj else epoch - is_good, bad_tuple = self._is_good_epoch( - epoch, verbose=verbose) + is_good, bad_tuple = self._is_good_epoch(epoch, verbose=verbose) if not is_good: assert isinstance(bad_tuple, tuple) assert all(isinstance(x, str) for x in bad_tuple) @@ -1466,20 +1762,32 @@ def _get_data(self, out=True, picks=None, item=None, *, units=None, if out or self.preload: # faster to pre-allocate, then trim as necessary if n_out == 0 and not self.preload: - data = np.empty((n_events, epoch_out.shape[0], - epoch_out.shape[1]), - dtype=epoch_out.dtype, order='C') + data = np.empty( + (n_events, epoch_out.shape[0], epoch_out.shape[1]), + dtype=epoch_out.dtype, + order="C", + ) data[n_out] = epoch_out n_out += 1 self.drop_log = tuple(drop_log) del drop_log self._bad_dropped = True - logger.info("%d bad epochs dropped" % (n_events - len(good_idx))) + n_bads_dropped = n_events - len(good_idx) + logger.info(f"{n_bads_dropped} bad epochs dropped") + + if n_bads_dropped == n_events: + warn( + "All epochs were dropped!\n" + "You might need to alter reject/flat-criteria " + "or drop bad channels to avoid this. " + "You can use Epochs.plot_drop_log() to see which " + "channels are responsible for the dropping of epochs." + ) # adjust the data size if there is a reason to (output or update) if out or self.preload: - if data.flags['OWNDATA'] and data.flags['C_CONTIGUOUS']: + if data.flags["OWNDATA"] and data.flags["C_CONTIGUOUS"]: data.resize((n_out,) + data.shape[1:], refcheck=False) else: data = data[:n_out] @@ -1487,31 +1795,82 @@ def _get_data(self, out=True, picks=None, item=None, *, units=None, self._data = data # Now update our properties (excepd data, which is already fixed) - self._getitem(good_idx, None, copy=False, drop_event_id=False, - select_data=False) - - if out: - if orig_picks is not None: - data = data[:, picks] - if units is not None: - data *= ch_factors[:, np.newaxis] - if start != 0 or stop != self.times.size: - data = data[..., start:stop] - return data + self._getitem( + good_idx, None, copy=False, drop_event_id=False, select_data=False + ) + + if not out: + return + return self._data_sel_copy_scale( + data, + select=slice(None), + orig_picks=orig_picks, + picks=picks, + ch_factors=ch_factors, + start=start, + stop=stop, + copy=copy, + ) + + def _data_sel_copy_scale( + self, data, *, select, orig_picks, picks, ch_factors, start, stop, copy + ): + # data arg starts out as self._data when data is preloaded + data_is_self_data = bool(self.preload) + logger.debug(f"Data is self data: {data_is_self_data}") + # only two types of epoch subselection allowed + assert isinstance(select, slice | np.ndarray), type(select) + if not isinstance(select, slice): + logger.debug(" Copying, fancy indexed epochs") + data_is_self_data = False # copy (fancy indexing) + elif select != slice(None): + logger.debug(" Slicing epochs") + if orig_picks is not None: + logger.debug(" Copying, fancy indexed picks") + assert isinstance(picks, np.ndarray), type(picks) + data_is_self_data = False # copy (fancy indexing) else: - return None + picks = slice(None) + if not all(isinstance(x, slice) and x == slice(None) for x in (select, picks)): + data = data[select][:, picks] + del picks + if start != 0 or stop != self.times.size: + logger.debug(" Slicing time") + data = data[..., start:stop] # view (slice) + if ch_factors is not None: + if data_is_self_data: + logger.debug(" Copying, scale factors applied") + data = data.copy() + data_is_self_data = False + data *= ch_factors[:, np.newaxis] + if not data_is_self_data: + return data + if copy: + logger.debug(" Copying, copy=True") + data = data.copy() + return data @property def _detrend_picks(self): if self._do_baseline: return _pick_data_channels( - self.info, with_ref_meg=True, with_aux=True, exclude=()) + self.info, with_ref_meg=True, with_aux=True, exclude=() + ) else: return [] - @fill_doc - def get_data(self, picks=None, item=None, units=None, tmin=None, - tmax=None): + @verbose + def get_data( + self, + picks=None, + item=None, + units=None, + tmin=None, + tmax=None, + *, + copy=True, + verbose=None, + ): """Get all epochs as a 3D array. Parameters @@ -1536,18 +1895,45 @@ def get_data(self, picks=None, item=None, units=None, tmin=None, End time of data to get in seconds. .. versionadded:: 0.24.0 + copy : bool + Whether to return a copy of the object's data, or (if possible) a view. + See :ref:`the NumPy docs ` for an + explanation. Default is ``False`` in 1.6 but will change to ``True`` in 1.7, + set it explicitly to avoid a warning in some cases. A view is only possible + when ``item is None``, ``picks is None``, ``units is None``, and data are + preloaded. + + .. warning:: + Using ``copy=False`` and then modifying the returned ``data`` will in + turn modify the Epochs object. Use with caution! + + .. versionchanged:: 1.7 + The default changed from ``False`` to ``True``. + + .. versionadded:: 1.6 + %(verbose)s Returns ------- data : array of shape (n_epochs, n_channels, n_times) - A view on epochs data. + The epochs data. Will be a copy when ``copy=True`` and will be a view + when possible when ``copy=False``. """ - return self._get_data(picks=picks, item=item, units=units, tmin=tmin, - tmax=tmax) + return self._get_data( + picks=picks, item=item, units=units, tmin=tmin, tmax=tmax, copy=copy + ) @verbose - def apply_function(self, fun, picks=None, dtype=None, n_jobs=None, - channel_wise=True, verbose=None, **kwargs): + def apply_function( + self, + fun, + picks=None, + dtype=None, + n_jobs=None, + channel_wise=True, + verbose=None, + **kwargs, + ): """Apply a function to a subset of channels. %(applyfun_summary_epochs)s @@ -1568,97 +1954,147 @@ def apply_function(self, fun, picks=None, dtype=None, n_jobs=None, self : instance of Epochs The epochs object with transformed data. """ - _check_preload(self, 'epochs.apply_function') + _check_preload(self, "epochs.apply_function") picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) if not callable(fun): - raise ValueError('fun needs to be a function') + raise ValueError("fun needs to be a function") data_in = self._data if dtype is not None and dtype != self._data.dtype: self._data = self._data.astype(dtype) + args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs + if channel_wise is False: + if ("ch_idx" in args) or ("ch_name" in args): + raise ValueError( + "apply_function cannot access ch_idx or ch_name " + "when channel_wise=False" + ) + if "ch_idx" in args: + logger.info("apply_function requested to access ch_idx") + if "ch_name" in args: + logger.info("apply_function requested to access ch_name") + if channel_wise: parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) if n_jobs == 1: - _fun = partial(_check_fun, fun, **kwargs) + _fun = partial(_check_fun, fun) # modify data inplace to save memory - for idx in picks: - self._data[:, idx, :] = np.apply_along_axis( - _fun, -1, data_in[:, idx, :]) + for ch_idx in picks: + if "ch_idx" in args: + kwargs.update(ch_idx=ch_idx) + if "ch_name" in args: + kwargs.update(ch_name=self.info["ch_names"][ch_idx]) + self._data[:, ch_idx, :] = np.apply_along_axis( + _fun, -1, data_in[:, ch_idx, :], **kwargs + ) else: # use parallel function - data_picks_new = parallel(p_fun( - fun, data_in[:, p, :], **kwargs) for p in picks) - for pp, p in enumerate(picks): - self._data[:, p, :] = data_picks_new[pp] + _fun = partial(np.apply_along_axis, fun, -1) + data_picks_new = parallel( + p_fun( + _fun, + data_in[:, ch_idx, :], + **kwargs, + **{ + k: v + for k, v in [ + ("ch_name", self.info["ch_names"][ch_idx]), + ("ch_idx", ch_idx), + ] + if k in args + }, + ) + for ch_idx in picks + ) + for run_idx, ch_idx in enumerate(picks): + self._data[:, ch_idx, :] = data_picks_new[run_idx] else: self._data = _check_fun(fun, data_in, **kwargs) return self @property - def filename(self): - """The filename.""" + def filename(self) -> Path | None: + """The filename if the epochs are loaded from disk. + + :type: :class:`pathlib.Path` | ``None`` + """ return self._filename + @filename.setter + def filename(self, value): + if value is not None: + value = _check_fname(value, overwrite="read", must_exist=True) + self._filename = value + def __repr__(self): """Build string representation.""" - s = ' %s events ' % len(self.events) - s += '(all good)' if self._bad_dropped else '(good & bad)' - s += ', %g – %g s' % (self.tmin, self.tmax) - s += ', baseline ' + s = f"{len(self.events)} events " + s += "(all good)" if self._bad_dropped else "(good & bad)" + s += f", {self.tmin:.3f}".rstrip("0").rstrip(".") + s += f" – {self.tmax:.3f}".rstrip("0").rstrip(".") + s += " s (baseline " if self.baseline is None: - s += 'off' + s += "off" else: - s += f'{self.baseline[0]:g} – {self.baseline[1]:g} s' + s += f"{self.baseline[0]:.3f}".rstrip("0").rstrip(".") + s += f" – {self.baseline[1]:.3f}".rstrip("0").rstrip(".") + s += " s" if self.baseline != _check_baseline( - self.baseline, times=self.times, sfreq=self.info['sfreq'], - on_baseline_outside_data='adjust'): - s += ' (baseline period was cropped after baseline correction)' - - s += ', ~%s' % (sizeof_fmt(self._size),) - s += ', data%s loaded' % ('' if self.preload else ' not') - s += ', with metadata' if self.metadata is not None else '' + self.baseline, + times=self.times, + sfreq=self.info["sfreq"], + on_baseline_outside_data="adjust", + ): + s += " (baseline period was cropped after baseline correction)" + + s += f"), ~{sizeof_fmt(self._size)}" + s += f", data{'' if self.preload else ' not'} loaded" + s += ", with metadata" if self.metadata is not None else "" max_events = 10 - counts = ['%r: %i' % (k, sum(self.events[:, 2] == v)) - for k, v in list(self.event_id.items())[:max_events]] + counts = [ + f"{k!r}: {sum(self.events[:, 2] == v)}" + for k, v in list(self.event_id.items())[:max_events] + ] if len(self.event_id) > 0: - s += ',' + '\n '.join([''] + counts) + s += "," + "\n ".join([""] + counts) if len(self.event_id) > max_events: not_shown_events = len(self.event_id) - max_events s += f"\n and {not_shown_events} more events ..." class_name = self.__class__.__name__ - class_name = 'Epochs' if class_name == 'BaseEpochs' else class_name - return '<%s | %s>' % (class_name, s) + class_name = "Epochs" if class_name == "BaseEpochs" else class_name + return f"<{class_name} | {s}>" @repr_html def _repr_html_(self): - from .html_templates import repr_templates_env - if self.baseline is None: - baseline = 'off' - else: - baseline = tuple([f'{b:.3f}' for b in self.baseline]) - baseline = f'{baseline[0]} – {baseline[1]} s' - if isinstance(self.event_id, dict): event_strings = [] for k, v in sorted(self.event_id.items()): n_events = sum(self.events[:, 2] == v) - event_strings.append(f'{k}: {n_events}') + event_strings.append(f"{k}: {n_events}") elif isinstance(self.event_id, list): event_strings = [] for k in self.event_id: n_events = sum(self.events[:, 2] == k) - event_strings.append(f'{k}: {n_events}') + event_strings.append(f"{k}: {n_events}") elif isinstance(self.event_id, int): n_events = len(self.events[:, 2]) - event_strings = [f'{self.event_id}: {n_events}'] + event_strings = [f"{self.event_id}: {n_events}"] else: event_strings = None - t = repr_templates_env.get_template('epochs.html.jinja') - t = t.render(epochs=self, baseline=baseline, events=event_strings) + t = _get_html_template("repr", "epochs.html.jinja") + t = t.render( + inst=self, + filenames=( + [Path(self.filename).name] + if getattr(self, "filename", None) is not None + else None + ), + event_counts=event_strings, + ) return t @verbose @@ -1684,20 +2120,22 @@ def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): %(notes_tmax_included_by_default)s """ # XXX this could be made to work on non-preloaded data... - _check_preload(self, 'Modifying data of epochs') + _check_preload(self, "Modifying data of epochs") super().crop(tmin=tmin, tmax=tmax, include_tmax=include_tmax) # Adjust rejection period if self.reject_tmin is not None and self.reject_tmin < self.tmin: logger.info( - f'reject_tmin is not in epochs time interval. ' - f'Setting reject_tmin to epochs.tmin ({self.tmin} s)') + f"reject_tmin is not in epochs time interval. " + f"Setting reject_tmin to epochs.tmin ({self.tmin} s)" + ) self.reject_tmin = self.tmin if self.reject_tmax is not None and self.reject_tmax > self.tmax: logger.info( - f'reject_tmax is not in epochs time interval. ' - f'Setting reject_tmax to epochs.tmax ({self.tmax} s)') + f"reject_tmax is not in epochs time interval. " + f"Setting reject_tmax to epochs.tmax ({self.tmax} s)" + ) self.reject_tmax = self.tmax return self @@ -1718,7 +2156,7 @@ def __deepcopy__(self, memodict): for k, v in self.__dict__.items(): # drop_log is immutable and _raw is private (and problematic to # deepcopy) - if k in ('drop_log', '_raw', '_times_readonly'): + if k in ("drop_log", "_raw", "_times_readonly"): memodict[id(v)] = v else: v = deepcopy(v, memodict) @@ -1726,8 +2164,15 @@ def __deepcopy__(self, memodict): return result @verbose - def save(self, fname, split_size='2GB', fmt='single', overwrite=False, - split_naming='neuromag', verbose=None): + def save( + self, + fname, + split_size="2GB", + fmt="single", + overwrite=False, + split_naming="neuromag", + verbose=None, + ): """Save epochs in a fif file. Parameters @@ -1762,19 +2207,34 @@ def save(self, fname, split_size='2GB', fmt='single', overwrite=False, .. versionadded:: 0.24 %(verbose)s + Returns + ------- + fnames : List of path-like + List of path-like objects containing the path to each file split. + + .. versionadded:: 1.9 + Notes ----- Bad epochs will be dropped before saving the epochs to disk. """ - check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz', - '_epo.fif', '_epo.fif.gz')) + check_fname( + fname, "epochs", ("-epo.fif", "-epo.fif.gz", "_epo.fif", "_epo.fif.gz") + ) # check for file existence and expand `~` if present - fname = str(_check_fname(fname=fname, overwrite=overwrite)) + fname = str( + _check_fname( + fname=fname, + overwrite=overwrite, + check_bids_split=True, + name="fname", + ) + ) split_size_bytes = _get_split_size(split_size) - _check_option('fmt', fmt, ['single', 'double']) + _check_option("fmt", fmt, ["single", "double"]) # to know the length accurately. The get_data() call would drop # bad epochs anyway @@ -1782,12 +2242,12 @@ def save(self, fname, split_size='2GB', fmt='single', overwrite=False, # total_size tracks sizes that get split # over_size tracks overhead (tags, things that get written to each) if len(self) == 0: - warn('Saving epochs with no data') + warn("Saving epochs with no data") total_size = 0 else: - d = self[0].get_data() + d = self[0].get_data(copy=False) # this should be guaranteed by subclasses - assert d.dtype in ('>f8', 'c16', 'f8", "c16", "= 1, n_parts if n_parts > 1: - logger.info(f'Splitting into {n_parts} parts') + logger.info(f"Splitting into {n_parts} parts") if n_parts > 100: # This must be an error raise ValueError( - f'Split size {split_size} would result in writing ' - f'{n_parts} files') + f"Split size {split_size} would result in writing {n_parts} files" + ) if len(self.drop_log) > 100000: - warn(f'epochs.drop_log contains {len(self.drop_log)} entries ' - f'which will incur up to a {sizeof_fmt(drop_size)} writing ' - f'overhead (per split file), consider using ' - f'epochs.reset_drop_log_selection() prior to writing') + warn( + f"epochs.drop_log contains {len(self.drop_log)} entries " + f"which will incur up to a {sizeof_fmt(drop_size)} writing " + f"overhead (per split file), consider using " + f"epochs.reset_drop_log_selection() prior to writing" + ) epoch_idxs = np.array_split(np.arange(n_epochs), n_parts) + _check_option("split_naming", split_naming, ("neuromag", "bids")) + split_fnames = _make_split_fnames(fname, n_parts, split_naming) for part_idx, epoch_idx in enumerate(epoch_idxs): this_epochs = self[epoch_idx] if n_parts > 1 else self # avoid missing event_ids in splits this_epochs.event_id = self.event_id - _save_split(this_epochs, fname, part_idx, n_parts, fmt, - split_naming, overwrite) + + _save_split(this_epochs, split_fnames, part_idx, n_parts, fmt, overwrite) + return split_fnames @verbose - def export(self, fname, fmt='auto', *, overwrite=False, verbose=None): + def export(self, fname, fmt="auto", *, overwrite=False, verbose=None): """Export Epochs to external formats. %(export_fmt_support_epochs)s @@ -1886,9 +2351,13 @@ def export(self, fname, fmt='auto', *, overwrite=False, verbose=None): %(export_eeglab_note)s """ from .export import export_epochs + export_epochs(fname, self, fmt, overwrite=overwrite, verbose=verbose) - def equalize_event_counts(self, event_ids=None, method='mintime'): + @fill_doc + def equalize_event_counts( + self, event_ids=None, method="mintime", *, random_state=None + ): """Equalize the number of trials in each condition. It tries to make the remaining epochs occurring as close as possible in @@ -1928,10 +2397,8 @@ def equalize_event_counts(self, event_ids=None, method='mintime'): matched by the provided tags had been supplied instead. The ``event_ids`` must identify non-overlapping subsets of the epochs. - method : str - If ``'truncate'``, events will be truncated from the end of each - type of events. If ``'mintime'``, timing differences between each - event type will be minimized. + %(equalize_events_method)s + %(random_state)s Used only if ``method='random'``. Returns ------- @@ -1961,16 +2428,23 @@ def equalize_event_counts(self, event_ids=None, method='mintime'): event names were specified explicitly. """ from collections.abc import Iterable - _validate_type(event_ids, types=(Iterable, None), - item_name='event_ids', type_name='list-like or None') + + _validate_type( + event_ids, + types=(Iterable, None), + item_name="event_ids", + type_name="list-like or None", + ) if isinstance(event_ids, str): - raise TypeError(f'event_ids must be list-like or None, but ' - f'received a string: {event_ids}') + raise TypeError( + f"event_ids must be list-like or None, but " + f"received a string: {event_ids}" + ) if event_ids is None: event_ids = list(self.event_id) elif not event_ids: - raise ValueError('event_ids must have at least one element') + raise ValueError("event_ids must have at least one element") if not self._bad_dropped: self.drop_bad() @@ -1983,8 +2457,7 @@ def equalize_event_counts(self, event_ids=None, method='mintime'): tagging = False if "/" in "".join(ids): # make string inputs a list of length 1 - event_ids = [[x] if isinstance(x, str) else x - for x in event_ids] + event_ids = [[x] if isinstance(x, str) else x for x in event_ids] for ids_ in event_ids: # check if tagging is attempted if any([id_ not in ids for id_ in ids_]): tagging = True @@ -1992,19 +2465,26 @@ def equalize_event_counts(self, event_ids=None, method='mintime'): # 2a. for tags, find all the event_ids matched by the tags # 2b. for non-tag ids, just pass them directly # 3. do this for every input - event_ids = [[k for k in ids - if all((tag in k.split("/") - for tag in id_))] # ids matching all tags - if all(id__ not in ids for id__ in id_) - else id_ # straight pass for non-tag inputs - for id_ in event_ids] + event_ids = [ + ( + [ + k for k in ids if all(tag in k.split("/") for tag in id_) + ] # ids matching all tags + if all(id__ not in ids for id__ in id_) + else id_ + ) # straight pass for non-tag inputs + for id_ in event_ids + ] for ii, id_ in enumerate(event_ids): if len(id_) == 0: - raise KeyError(f"{orig_ids[ii]} not found in the epoch " - "object's event_id.") + raise KeyError( + f"{orig_ids[ii]} not found in the epoch object's event_id." + ) elif len({sub_id in ids for sub_id in id_}) != 1: - err = ("Don't mix hierarchical and regular event_ids" - " like in \'%s\'." % ", ".join(id_)) + err = ( + "Don't mix hierarchical and regular event_ids" + f" like in '{', '.join(id_)}'." + ) raise ValueError(err) # raise for non-orthogonal tags @@ -2012,25 +2492,40 @@ def equalize_event_counts(self, event_ids=None, method='mintime'): events_ = [set(self[x].events[:, 0]) for x in event_ids] doubles = events_[0].intersection(events_[1]) if len(doubles): - raise ValueError("The two sets of epochs are " - "overlapping. Provide an " - "orthogonal selection.") + raise ValueError( + "The two sets of epochs are " + "overlapping. Provide an " + "orthogonal selection." + ) for eq in event_ids: eq_inds.append(self._keys_to_idx(eq)) - event_times = [self.events[e, 0] for e in eq_inds] - indices = _get_drop_indices(event_times, method) + sample_nums = [self.events[e, 0] for e in eq_inds] + indices = _get_drop_indices(sample_nums, method, random_state) # need to re-index indices indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)]) - self.drop(indices, reason='EQUALIZED_COUNT') + self.drop(indices, reason="EQUALIZED_COUNT") # actually remove the indices return self, indices @verbose - def compute_psd(self, method='multitaper', fmin=0, fmax=np.inf, tmin=None, - tmax=None, picks=None, proj=False, *, n_jobs=1, - verbose=None, **method_kw): + def compute_psd( + self, + method="multitaper", + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + remove_dc=True, + exclude=(), + *, + n_jobs=1, + verbose=None, + **method_kw, + ): """Perform spectral analysis on sensor data. Parameters @@ -2041,6 +2536,8 @@ def compute_psd(self, method='multitaper', fmin=0, fmax=np.inf, tmin=None, %(tmin_tmax_psd)s %(picks_good_data_noref)s %(proj_psd)s + %(remove_dc)s + %(exclude_psd)s %(n_jobs)s %(verbose)s %(method_kw_psd)s @@ -2062,17 +2559,182 @@ def compute_psd(self, method='multitaper', fmin=0, fmax=np.inf, tmin=None, self._set_legacy_nfft_default(tmin, tmax, method, method_kw) return EpochsSpectrum( - self, method=method, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, - picks=picks, proj=proj, n_jobs=n_jobs, verbose=verbose, - **method_kw) + self, + method=method, + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + exclude=exclude, + proj=proj, + remove_dc=remove_dc, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) @verbose - def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, picks=None, - proj=False, *, method='auto', average=False, dB=True, - estimate='auto', xscale='linear', area_mode='std', - area_alpha=0.33, color='black', line_alpha=None, - spatial_colors=True, sphere=None, exclude='bads', ax=None, - show=True, n_jobs=1, verbose=None, **method_kw): + def compute_tfr( + self, + method, + freqs, + *, + tmin=None, + tmax=None, + picks=None, + proj=False, + output="power", + average=False, + return_itc=False, + decim=1, + n_jobs=None, + verbose=None, + **method_kw, + ): + """Compute a time-frequency representation of epoched data. + + Parameters + ---------- + %(method_tfr_epochs)s + %(freqs_tfr_epochs)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(output_compute_tfr)s + average : bool + Whether to return average power across epochs (instead of single-trial + power). ``average=True`` is not compatible with ``output="complex"`` or + ``output="phase"``. Ignored if ``method="stockwell"`` (Stockwell method + *requires* averaging). Default is ``False``. + return_itc : bool + Whether to return inter-trial coherence (ITC) as well as power estimates. + If ``True`` then must specify ``average=True`` (or ``method="stockwell", + average="auto"``). Default is ``False``. + %(decim_tfr)s + %(n_jobs)s + %(verbose)s + %(method_kw_epochs_tfr)s + + Returns + ------- + tfr : instance of EpochsTFR or AverageTFR + The time-frequency-resolved power estimates. + itc : instance of AverageTFR + The inter-trial coherence (ITC). Only returned if ``return_itc=True``. + + Notes + ----- + If ``average=True`` (or ``method="stockwell", average="auto"``) the result will + be an :class:`~mne.time_frequency.AverageTFR` instead of an + :class:`~mne.time_frequency.EpochsTFR`. + + .. versionadded:: 1.7 + + References + ---------- + .. footbibliography:: + """ + if method == "stockwell" and not average: # stockwell method *must* average + logger.info( + 'Requested `method="stockwell"` so ignoring parameter `average=False`.' + ) + average = True + if average: + # augment `output` value for use by tfr_array_* functions + _check_option("output", output, ("power",), extra=" when average=True") + method_kw["output"] = "avg_power_itc" if return_itc else "avg_power" + else: + msg = ( + "compute_tfr() got incompatible parameters `average=False` and `{}` " + "({} requires averaging over epochs)." + ) + if return_itc: + raise ValueError(msg.format("return_itc=True", "computing ITC")) + if method == "stockwell": + raise ValueError(msg.format('method="stockwell"', "Stockwell method")) + # `average` and `return_itc` both False, so "phase" and "complex" are OK + _check_option("output", output, ("power", "phase", "complex")) + method_kw["output"] = output + + if method == "stockwell": + method_kw["return_itc"] = return_itc + method_kw.pop("output") + if isinstance(freqs, str): + _check_option("freqs", freqs, "auto") + else: + _validate_type(freqs, "array-like") + _check_option( + "freqs", np.array(freqs).shape, ((2,),), extra=" (wrong shape)." + ) + if average: + out = AverageTFR( + inst=self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + # tfr_array_stockwell always returns ITC (but sometimes it's None) + if hasattr(out, "_itc"): + if out._itc is not None: + state = out.__getstate__() + state["data"] = out._itc + state["data_type"] = "Inter-trial coherence" + itc = AverageTFR(inst=state) + del out._itc + return out, itc + del out._itc + return out + # now handle average=False + return EpochsTFR( + inst=self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def plot_psd( + self, + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + *, + method="auto", + average=False, + dB=True, + estimate="power", + xscale="linear", + area_mode="std", + area_alpha=0.33, + color="black", + line_alpha=None, + spatial_colors=True, + sphere=None, + exclude="bads", + ax=None, + show=True, + n_jobs=1, + verbose=None, + **method_kw, + ): """%(plot_psd_doc)s. Parameters @@ -2116,17 +2778,44 @@ def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, picks=None, %(notes_plot_psd_meth)s """ return super().plot_psd( - fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, picks=picks, proj=proj, - reject_by_annotation=False, method=method, average=average, dB=dB, - estimate=estimate, xscale=xscale, area_mode=area_mode, - area_alpha=area_alpha, color=color, line_alpha=line_alpha, - spatial_colors=spatial_colors, sphere=sphere, exclude=exclude, - ax=ax, show=show, n_jobs=n_jobs, verbose=verbose, **method_kw) + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + reject_by_annotation=False, + method=method, + average=average, + dB=dB, + estimate=estimate, + xscale=xscale, + area_mode=area_mode, + area_alpha=area_alpha, + color=color, + line_alpha=line_alpha, + spatial_colors=spatial_colors, + sphere=sphere, + exclude=exclude, + ax=ax, + show=show, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) @verbose - def to_data_frame(self, picks=None, index=None, - scalings=None, copy=True, long_format=False, - time_format=None, *, verbose=None): + def to_data_frame( + self, + picks=None, + index=None, + scalings=None, + copy=True, + long_format=False, + time_format=None, + *, + verbose=None, + ): """Export data in tabular structure as a pandas DataFrame. Channels are converted to columns in the DataFrame. By default, @@ -2156,13 +2845,13 @@ def to_data_frame(self, picks=None, index=None, # check pandas once here, instead of in each private utils function pd = _check_pandas_installed() # noqa # arg checking - valid_index_args = ['time', 'epoch', 'condition'] - valid_time_formats = ['ms', 'timedelta'] + valid_index_args = ["time", "epoch", "condition"] + valid_time_formats = ["ms", "timedelta"] index = _check_pandas_index_arguments(index, valid_index_args) time_format = _check_time_format(time_format, valid_time_formats) # get data - picks = _picks_to_idx(self.info, picks, 'all', exclude=()) - data = self.get_data()[:, picks, :] + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + data = self._get_data(on_empty="raise")[:, picks, :] times = self.times n_epochs, n_picks, n_times = data.shape data = np.hstack(data).T # (time*epochs) x signals @@ -2172,19 +2861,26 @@ def to_data_frame(self, picks=None, index=None, # prepare extra columns / multiindex mindex = list() times = np.tile(times, n_epochs) - times = _convert_times(self, times, time_format) - mindex.append(('time', times)) + times = _convert_times(times, time_format, meas_date=self.info["meas_date"]) + mindex.append(("time", times)) rev_event_id = {v: k for k, v in self.event_id.items()} conditions = [rev_event_id[k] for k in self.events[:, 2]] - mindex.append(('condition', np.repeat(conditions, n_times))) - mindex.append(('epoch', np.repeat(self.selection, n_times))) + mindex.append(("condition", np.repeat(conditions, n_times))) + mindex.append(("epoch", np.repeat(self.selection, n_times))) assert all(len(mdx) == len(mindex[0]) for mdx in mindex) # build DataFrame - df = _build_data_frame(self, data, picks, long_format, mindex, index, - default_index=['condition', 'epoch', 'time']) + df = _build_data_frame( + self, + data, + picks, + long_format, + mindex, + index, + default_index=["condition", "epoch", "time"], + ) return df - def as_type(self, ch_type='grad', mode='fast'): + def as_type(self, ch_type="grad", mode="fast"): """Compute virtual epochs using interpolated fields. .. Warning:: Using virtual epochs to compute inverse can yield @@ -2214,10 +2910,12 @@ def as_type(self, ch_type='grad', mode='fast'): .. versionadded:: 0.20.0 """ from .forward import _as_meg_type_inst + + self._handle_empty("raise", "as_type") return _as_meg_type_inst(self, ch_type=ch_type, mode=mode) -def _drop_log_stats(drop_log, ignore=('IGNORED',)): +def _drop_log_stats(drop_log, ignore=("IGNORED",)): """Compute drop log stats. Parameters @@ -2232,28 +2930,40 @@ def _drop_log_stats(drop_log, ignore=('IGNORED',)): perc : float Total percentage of epochs dropped. """ - if not isinstance(drop_log, tuple) or \ - not all(isinstance(d, tuple) for d in drop_log) or \ - not all(isinstance(s, str) for d in drop_log for s in d): - raise TypeError('drop_log must be a tuple of tuple of str') - perc = 100 * np.mean([len(d) > 0 for d in drop_log - if not any(r in ignore for r in d)]) + if ( + not isinstance(drop_log, tuple) + or not all(isinstance(d, tuple) for d in drop_log) + or not all(isinstance(s, str) for d in drop_log for s in d) + ): + raise TypeError("drop_log must be a tuple of tuple of str") + perc = 100 * np.mean( + [len(d) > 0 for d in drop_log if not any(r in ignore for r in d)] + ) return perc -def make_metadata(events, event_id, tmin, tmax, sfreq, - row_events=None, keep_first=None, keep_last=None): - """Generate metadata from events for use with `mne.Epochs`. +def make_metadata( + events, + event_id, + tmin, + tmax, + sfreq, + row_events=None, + keep_first=None, + keep_last=None, +): + """Automatically generate metadata for use with `mne.Epochs` from events. This function mimics the epoching process (it constructs time windows around time-locked "events of interest") and collates information about any other events that occurred within those time windows. The information - is returned as a :class:`pandas.DataFrame` suitable for use as + is returned as a :class:`pandas.DataFrame`, suitable for use as `~mne.Epochs` metadata: one row per time-locked event, and columns - indicating presence/absence and latency of each ancillary event type. + indicating presence or absence and latency of each ancillary event type. The function will also return a new ``events`` array and ``event_id`` - dictionary that correspond to the generated metadata. + dictionary that correspond to the generated metadata, which together can then be + readily fed into `~mne.Epochs`. Parameters ---------- @@ -2266,25 +2976,47 @@ def make_metadata(events, event_id, tmin, tmax, sfreq, A mapping from event names (keys) to event IDs (values). The event names will be incorporated as columns of the returned metadata :class:`~pandas.DataFrame`. - tmin, tmax : float - Start and end of the time interval for metadata generation in seconds, - relative to the time-locked event of the respective time window. + tmin, tmax : float | str | list of str | None + If float, start and end of the time interval for metadata generation in seconds, + relative to the time-locked event of the respective time window (the "row + events"). .. note:: If you are planning to attach the generated metadata to `~mne.Epochs` and intend to include only events that fall inside - your epochs time interval, pass the same ``tmin`` and ``tmax`` + your epoch's time interval, pass the same ``tmin`` and ``tmax`` values here as you use for your epochs. + If ``None``, the time window used for metadata generation is bounded by the + ``row_events``. This is can be particularly practical if trial duration varies + greatly, but each trial starts with a known event (e.g., a visual cue or + fixation). + + .. note:: + If ``tmin=None``, the first time window for metadata generation starts with + the first row event. If ``tmax=None``, the last time window for metadata + generation ends with the last event in ``events``. + + If a string or a list of strings, the events bounding the metadata around each + "row event". For ``tmin``, the events are assumed to occur **before** the row + event, and for ``tmax``, the events are assumed to occur **after** – unless + ``tmin`` or ``tmax`` are equal to a row event, in which case the row event + serves as the bound. + + .. versionchanged:: 1.6.0 + Added support for ``None``. + + .. versionadded:: 1.7.0 + Added support for strings. sfreq : float The sampling frequency of the data from which the events array was extracted. row_events : list of str | str | None - Event types around which to create the time windows / for which to - create **rows** in the returned metadata :class:`pandas.DataFrame`. If - provided, the string(s) must be keys of ``event_id``. If ``None`` - (default), rows are created for **all** event types present in - ``event_id``. + Event types around which to create the time windows. For each of these + time-locked events, we will create a **row** in the returned metadata + :class:`pandas.DataFrame`. If provided, the string(s) must be keys of + ``event_id``. If ``None`` (default), rows are created for **all** event types + present in ``event_id``. keep_first : str | list of str | None Specify subsets of :term:`hierarchical event descriptors` (HEDs, inspired by :footcite:`BigdelyShamloEtAl2013`) matching events of which @@ -2359,8 +3091,10 @@ def make_metadata(events, event_id, tmin, tmax, sfreq, The time window used for metadata generation need not correspond to the time window used to create the `~mne.Epochs`, to which the metadata will be attached; it may well be much shorter or longer, or not overlap at all, - if desired. The can be useful, for example, to include events that occurred - before or after an epoch, e.g. during the inter-trial interval. + if desired. This can be useful, for example, to include events that + occurred before or after an epoch, e.g. during the inter-trial interval. + If either ``tmin``, ``tmax``, or both are ``None``, or a string referring e.g. to a + response event, the time window will typically vary, too. .. versionadded:: 0.23 @@ -2370,16 +3104,17 @@ def make_metadata(events, event_id, tmin, tmax, sfreq, """ pd = _check_pandas_installed() - _validate_type(event_id, types=(dict,), item_name='event_id') - _validate_type(row_events, types=(None, str, list, tuple), - item_name='row_events') - _validate_type(keep_first, types=(None, str, list, tuple), - item_name='keep_first') - _validate_type(keep_last, types=(None, str, list, tuple), - item_name='keep_last') + _validate_type(events, types=("array-like",), item_name="events") + _validate_type(event_id, types=(dict,), item_name="event_id") + _validate_type(sfreq, types=("numeric",), item_name="sfreq") + _validate_type(tmin, types=("numeric", str, "array-like", None), item_name="tmin") + _validate_type(tmax, types=("numeric", str, "array-like", None), item_name="tmax") + _validate_type(row_events, types=(None, str, "array-like"), item_name="row_events") + _validate_type(keep_first, types=(None, str, "array-like"), item_name="keep_first") + _validate_type(keep_last, types=(None, str, "array-like"), item_name="keep_last") if not event_id: - raise ValueError('event_id dictionary must contain at least one entry') + raise ValueError("event_id dictionary must contain at least one entry") def _ensure_list(x): if x is None: @@ -2393,45 +3128,83 @@ def _ensure_list(x): keep_first = _ensure_list(keep_first) keep_last = _ensure_list(keep_last) + # Turn tmin, tmax into a list if they're strings or arrays of strings + try: + _validate_type(tmin, types=(str, "array-like"), item_name="tmin") + tmin = _ensure_list(tmin) + except TypeError: + pass + + try: + _validate_type(tmax, types=(str, "array-like"), item_name="tmax") + tmax = _ensure_list(tmax) + except TypeError: + pass + keep_first_and_last = set(keep_first) & set(keep_last) if keep_first_and_last: - raise ValueError(f'The event names in keep_first and keep_last must ' - f'be mutually exclusive. Specified in both: ' - f'{", ".join(sorted(keep_first_and_last))}') + raise ValueError( + f"The event names in keep_first and keep_last must " + f"be mutually exclusive. Specified in both: " + f"{', '.join(sorted(keep_first_and_last))}" + ) del keep_first_and_last - for param_name, values in dict(keep_first=keep_first, - keep_last=keep_last).items(): + for param_name, values in dict(keep_first=keep_first, keep_last=keep_last).items(): for first_last_event_name in values: try: match_event_names(event_id, [first_last_event_name]) except KeyError: raise ValueError( f'Event "{first_last_event_name}", specified in ' - f'{param_name}, cannot be found in event_id dictionary') + f"{param_name}, cannot be found in event_id dictionary" + ) - event_name_diff = sorted(set(row_events) - set(event_id.keys())) - if event_name_diff: - raise ValueError( - f'Present in row_events, but missing from event_id: ' - f'{", ".join(event_name_diff)}') - del event_name_diff + # If tmin, tmax are strings, ensure these event names are present in event_id + def _diff_input_strings_vs_event_id(input_strings, input_name, event_id): + event_name_diff = sorted(set(input_strings) - set(event_id.keys())) + if event_name_diff: + raise ValueError( + f"Present in {input_name}, but missing from event_id: " + f"{', '.join(event_name_diff)}" + ) + + _diff_input_strings_vs_event_id( + input_strings=row_events, input_name="row_events", event_id=event_id + ) + if isinstance(tmin, list): + _diff_input_strings_vs_event_id( + input_strings=tmin, input_name="tmin", event_id=event_id + ) + if isinstance(tmax, list): + _diff_input_strings_vs_event_id( + input_strings=tmax, input_name="tmax", event_id=event_id + ) # First and last sample of each epoch, relative to the time-locked event # This follows the approach taken in mne.Epochs - start_sample = int(round(tmin * sfreq)) - stop_sample = int(round(tmax * sfreq)) + 1 + # For strings and None, we don't know the start and stop samples in advance as the + # time window can vary. + if isinstance(tmin, type(None) | list): + start_sample = None + else: + start_sample = int(round(tmin * sfreq)) + + if isinstance(tmax, type(None) | list): + stop_sample = None + else: + stop_sample = int(round(tmax * sfreq)) + 1 # Make indexing easier # We create the DataFrame before subsetting the events so we end up with # indices corresponding to the original event indices. Not used for now, # but might come in handy sometime later - events_df = pd.DataFrame(events, columns=('sample', 'prev_id', 'id')) + events_df = pd.DataFrame(events, columns=("sample", "prev_id", "id")) id_to_name_map = {v: k for k, v in event_id.items()} # Only keep events that are of interest - events = events[np.in1d(events[:, 2], list(event_id.values()))] - events_df = events_df.loc[events_df['id'].isin(event_id.values()), :] + events = events[np.isin(events[:, 2], list(event_id.values()))] + events_df = events_df.loc[events_df["id"].isin(event_id.values()), :] # Prepare & condition the metadata DataFrame @@ -2439,51 +3212,119 @@ def _ensure_list(x): # event_id.keys() and keep_first / keep_last simultaneously keep_first_cols = [col for col in keep_first if col not in event_id] keep_last_cols = [col for col in keep_last if col not in event_id] - first_cols = [f'first_{col}' for col in keep_first_cols] - last_cols = [f'last_{col}' for col in keep_last_cols] - - columns = ['event_name', - *event_id.keys(), - *keep_first_cols, - *keep_last_cols, - *first_cols, - *last_cols] - - data = np.empty((len(events_df), len(columns))) + first_cols = [f"first_{col}" for col in keep_first_cols] + last_cols = [f"last_{col}" for col in keep_last_cols] + + columns = [ + "event_name", + *event_id.keys(), + *keep_first_cols, + *keep_last_cols, + *first_cols, + *last_cols, + ] + + data = np.empty((len(events_df), len(columns)), float) metadata = pd.DataFrame(data=data, columns=columns, index=events_df.index) # Event names - metadata.iloc[:, 0] = '' + metadata["event_name"] = "" # Event times start_idx = 1 - stop_idx = (start_idx + len(event_id.keys()) + - len(keep_first_cols + keep_last_cols)) + stop_idx = start_idx + len(event_id.keys()) + len(keep_first_cols + keep_last_cols) metadata.iloc[:, start_idx:stop_idx] = np.nan # keep_first and keep_last names start_idx = stop_idx - metadata.iloc[:, start_idx:] = None + metadata[columns[start_idx:]] = None - # We're all set, let's iterate over all eventns and fill in in the + # We're all set, let's iterate over all events and fill in in the # respective cells in the metadata. We will subset this to include only # `row_events` later - for row_event in events_df.itertuples(name='RowEvent'): + for row_event in events_df.itertuples(name="RowEvent"): row_idx = row_event.Index - metadata.loc[row_idx, 'event_name'] = \ - id_to_name_map[row_event.id] + metadata.loc[row_idx, "event_name"] = id_to_name_map[row_event.id] + + # Determine which events fall into the current time window + if start_sample is None and isinstance(tmin, list): + # Lower bound is the the current or the closest previpus event with a name + # in "tmin"; if there is no such event (e.g., beginning of the recording is + # being approached), the upper lower becomes the last event in the + # recording. + prev_matching_events = events_df.loc[ + (events_df["sample"] <= row_event.sample) + & (events_df["id"].isin([event_id[name] for name in tmin])), + :, + ] + if prev_matching_events.size == 0: + # No earlier matching event. Use the current one as the beginning of the + # time window. This may occur at the beginning of a recording. + window_start_sample = row_event.sample + else: + # At least one earlier matching event. Use the closest one. + window_start_sample = prev_matching_events.iloc[-1]["sample"] + elif start_sample is None: + # Lower bound is the current event. + window_start_sample = row_event.sample + else: + # Lower bound is determined by tmin. + window_start_sample = row_event.sample + start_sample + + if stop_sample is None and isinstance(tmax, list): + # Upper bound is the the current or the closest following event with a name + # in "tmax"; if there is no such event (e.g., end of the recording is being + # approached), the upper bound becomes the last event in the recording. + next_matching_events = events_df.loc[ + (events_df["sample"] >= row_event.sample) + & (events_df["id"].isin([event_id[name] for name in tmax])), + :, + ] + if next_matching_events.size == 0: + # No matching event after the current one; use the end of the recording + # as upper bound. This may occur at the end of a recording. + window_stop_sample = events_df["sample"].iloc[-1] + else: + # At least one matching later event. Use the closest one.. + window_stop_sample = next_matching_events.iloc[0]["sample"] + elif stop_sample is None: + # Upper bound: next event of the same type, or the last event (of + # any type) if no later event of the same type can be found. + next_events = events_df.loc[ + (events_df["sample"] > row_event.sample), + :, + ] + if next_events.size == 0: + # We've reached the last event in the recording. + window_stop_sample = row_event.sample + elif next_events.loc[next_events["id"] == row_event.id, :].size > 0: + # There's still an event of the same type appearing after the + # current event. Stop one sample short, we don't want to include that + # last event here, but in the next iteration. + window_stop_sample = ( + next_events.loc[next_events["id"] == row_event.id, :].iloc[0][ + "sample" + ] + - 1 + ) + else: + # There are still events after the current one, but not of the + # same type. + window_stop_sample = next_events.iloc[-1]["sample"] + else: + # Upper bound is determined by tmax. + window_stop_sample = row_event.sample + stop_sample - # Determine which events fall into the current epoch - window_start_sample = row_event.sample + start_sample - window_stop_sample = row_event.sample + stop_sample events_in_window = events_df.loc[ - (events_df['sample'] >= window_start_sample) & - (events_df['sample'] <= window_stop_sample), :] + (events_df["sample"] >= window_start_sample) + & (events_df["sample"] <= window_stop_sample), + :, + ] assert not events_in_window.empty # Store the metadata - for event in events_in_window.itertuples(name='Event'): + for event in events_in_window.itertuples(name="Event"): event_sample = event.sample - row_event.sample event_time = event_sample / sfreq event_time = 0 if np.isclose(event_time, 0) else event_time @@ -2500,31 +3341,29 @@ def _ensure_list(x): # Handle keep_first and keep_last event aggregation for event_group_name in keep_first + keep_last: - if event_name not in match_event_names( - event_id, [event_group_name] - ): + if event_name not in match_event_names(event_id, [event_group_name]): continue if event_group_name in keep_first: - first_last_col = f'first_{event_group_name}' + first_last_col = f"first_{event_group_name}" else: - first_last_col = f'last_{event_group_name}' + first_last_col = f"last_{event_group_name}" old_time = metadata.loc[row_idx, event_group_name] if not np.isnan(old_time): - if ((event_group_name in keep_first and - old_time <= event_time) or - (event_group_name in keep_last and - old_time >= event_time)): + if (event_group_name in keep_first and old_time <= event_time) or ( + event_group_name in keep_last and old_time >= event_time + ): continue if event_group_name not in event_id: # This is an HED. Strip redundant information from the # event name - name = (event_name - .replace(event_group_name, '') - .replace('//', '/') - .strip('/')) + name = ( + event_name.replace(event_group_name, "") + .replace("//", "/") + .strip("/") + ) metadata.loc[row_idx, first_last_col] = name del name @@ -2532,18 +3371,51 @@ def _ensure_list(x): # Only keep rows of interest if row_events: - event_id_timelocked = {name: val for name, val in event_id.items() - if name in row_events} - events = events[np.in1d(events[:, 2], - list(event_id_timelocked.values()))] - metadata = metadata.loc[ - metadata['event_name'].isin(event_id_timelocked)] + event_id_timelocked = { + name: val for name, val in event_id.items() if name in row_events + } + events = events[np.isin(events[:, 2], list(event_id_timelocked.values()))] + metadata = metadata.loc[metadata["event_name"].isin(event_id_timelocked)] assert len(events) == len(metadata) event_id = event_id_timelocked return metadata, events, event_id +def _events_from_annotations(raw, events, event_id, annotations, on_missing): + """Generate events and event_ids from annotations.""" + events, event_id_tmp = events_from_annotations(raw) + if events.size == 0: + raise RuntimeError( + "No usable annotations found in the raw object. " + "Either `events` must be provided or the raw " + "object must have annotations to construct epochs" + ) + if any(raw.annotations.duration > 0): + logger.info( + "Ignoring annotation durations and creating fixed-duration epochs " + "around annotation onsets." + ) + if event_id is None: + event_id = event_id_tmp + # if event_id is the names of events, map to events integers + if isinstance(event_id, str): + event_id = [event_id] + if isinstance(event_id, list | tuple | set): + if not set(event_id).issubset(set(event_id_tmp)): + msg = ( + "No matching annotations found for event_id(s) " + f"{set(event_id) - set(event_id_tmp)}" + ) + _on_missing(on_missing, msg) + # remove extras if on_missing not error + event_id = set(event_id) & set(event_id_tmp) + event_id = {my_id: event_id_tmp[my_id] for my_id in event_id} + # remove any non-selected annotations + annotations.delete(~np.isin(raw.annotations.description, list(event_id))) + return events, event_id, annotations + + @fill_doc class Epochs(BaseEpochs): """Epochs extracted from a Raw instance. @@ -2551,7 +3423,16 @@ class Epochs(BaseEpochs): Parameters ---------- %(raw_epochs)s + + .. note:: + If ``raw`` contains annotations, ``Epochs`` can be constructed around + ``raw.annotations.onset``, but note that the durations of the annotations + are ignored in this case. %(events_epochs)s + + .. versionchanged:: 1.7 + Allow ``events=None`` to use ``raw.annotations.onset`` as the source of + epoch times. %(event_id)s %(epochs_tmin_tmax)s %(baseline_epochs)s @@ -2569,20 +3450,18 @@ class Epochs(BaseEpochs): %(on_missing_epochs)s %(reject_by_annotation_epochs)s %(metadata_epochs)s + + .. versionadded:: 0.16 %(event_repeated_epochs)s %(verbose)s Attributes ---------- %(info_not_none)s - event_id : dict - Names of conditions corresponding to event_ids. + %(event_id_attr)s ch_names : list of string List of channel names. - selection : array - List of indices of selected events (not dropped or ignored etc.). For - example, if the original event array had 4 events and the second event - has been dropped, this attribute would be np.array([0, 2, 3]). + %(selection_attr)s preload : bool Indicates whether epochs are in memory. drop_log : tuple of tuple @@ -2600,6 +3479,10 @@ class Epochs(BaseEpochs): See :meth:`~mne.Epochs.equalize_event_counts` - 'USER' For user-defined reasons (see :meth:`~mne.Epochs.drop`). + + When dropping based on flat or reject parameters the tuple of + reasons contains a tuple of channels that satisfied the rejection + criteria. filename : str The filename of the object. times : ndarray @@ -2649,16 +3532,37 @@ class Epochs(BaseEpochs): """ @verbose - def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5, - baseline=(None, 0), picks=None, preload=False, reject=None, - flat=None, proj=True, decim=1, reject_tmin=None, - reject_tmax=None, detrend=None, on_missing='raise', - reject_by_annotation=True, metadata=None, - event_repeated='error', verbose=None): # noqa: D102 + def __init__( + self, + raw, + events=None, + event_id=None, + tmin=-0.2, + tmax=0.5, + baseline=(None, 0), + picks=None, + preload=False, + reject=None, + flat=None, + proj=True, + decim=1, + reject_tmin=None, + reject_tmax=None, + detrend=None, + on_missing="raise", + reject_by_annotation=True, + metadata=None, + event_repeated="error", + verbose=None, + ): + from .io import BaseRaw + if not isinstance(raw, BaseRaw): - raise ValueError('The first argument to `Epochs` must be an ' - 'instance of mne.io.BaseRaw') + raise ValueError( + "The first argument to `Epochs` must be an instance of mne.io.BaseRaw" + ) info = deepcopy(raw.info) + annotations = raw.annotations.copy() # proj is on when applied in Raw proj = proj or raw.proj @@ -2666,17 +3570,52 @@ def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5, self.reject_by_annotation = reject_by_annotation # keep track of original sfreq (needed for annotations) - raw_sfreq = raw.info['sfreq'] + raw_sfreq = raw.info["sfreq"] + + # get events from annotations if no events given + if events is None: + events, event_id, annotations = _events_from_annotations( + raw, events, event_id, annotations, on_missing + ) + + # add the annotations.extras to the metadata + if not all(len(d) == 0 for d in annotations.extras): + pd = _check_pandas_installed(strict=True) + extras_df = pd.DataFrame(annotations.extras) + if metadata is None: + metadata = extras_df + else: + extras_df.set_index(metadata.index, inplace=True) + metadata = pd.concat( + [metadata, extras_df], axis=1, ignore_index=False + ) # call BaseEpochs constructor - super(Epochs, self).__init__( - info, None, events, event_id, tmin, tmax, - metadata=metadata, baseline=baseline, raw=raw, picks=picks, - reject=reject, flat=flat, decim=decim, reject_tmin=reject_tmin, - reject_tmax=reject_tmax, detrend=detrend, - proj=proj, on_missing=on_missing, preload_at_end=preload, - event_repeated=event_repeated, verbose=verbose, - raw_sfreq=raw_sfreq, annotations=raw.annotations) + super().__init__( + info, + None, + events, + event_id, + tmin, + tmax, + metadata=metadata, + baseline=baseline, + raw=raw, + picks=picks, + reject=reject, + flat=flat, + decim=decim, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + detrend=detrend, + proj=proj, + on_missing=on_missing, + preload_at_end=preload, + event_repeated=event_repeated, + verbose=verbose, + raw_sfreq=raw_sfreq, + annotations=annotations, + ) @verbose def _get_epoch_from_raw(self, idx, verbose=None): @@ -2691,10 +3630,12 @@ def _get_epoch_from_raw(self, idx, verbose=None): """ if self._raw is None: # This should never happen, as raw=None only if preload=True - raise ValueError('An error has occurred, no valid raw file found. ' - 'Please report this to the mne-python ' - 'developers.') - sfreq = self._raw.info['sfreq'] + raise ValueError( + "An error has occurred, no valid raw file found. " + "Please report this to the mne-python " + "developers." + ) + sfreq = self._raw.info["sfreq"] event_samp = self.events[idx, 0] # Read a data segment from "start" to "stop" in samples first_samp = self._raw.first_samp @@ -2716,10 +3657,15 @@ def _get_epoch_from_raw(self, idx, verbose=None): diff = int(round((self._raw_times[-1] - reject_tmax) * sfreq)) reject_stop = stop - diff - logger.debug(' Getting epoch for %d-%d' % (start, stop)) - data = self._raw._check_bad_segment(start, stop, self.picks, - reject_start, reject_stop, - self.reject_by_annotation) + logger.debug(f" Getting epoch for {start}-{stop}") + data = self._raw._check_bad_segment( + start, + stop, + self.picks, + reject_start, + reject_stop, + self.reject_by_annotation, + ) return data @@ -2734,38 +3680,17 @@ class EpochsArray(BaseEpochs): measure. %(info_not_none)s Consider using :func:`mne.create_info` to populate this structure. - events : None | array of int, shape (n_events, 3) - The events typically returned by the read_events function. - If some events don't match the events of interest as specified - by event_id, they will be marked as 'IGNORED' in the drop log. - If None (default), all event values are set to 1 and event time-samples - are set to range(n_epochs). - tmin : float - Start time before event. If nothing provided, defaults to 0. - event_id : int | list of int | dict | None - The id of the event to consider. If dict, - the keys can later be used to access associated events. Example: - dict(auditory=1, visual=3). If int, a dict will be created with - the id as string. If a list, all events with the IDs specified - in the list are used. If None, all events will be used with - and a dict is created with string integer names corresponding - to the event id integers. + %(events_epochs)s + %(tmin_epochs)s + %(event_id)s %(reject_epochs)s %(flat)s - reject_tmin : scalar | None - Start of the time window used to reject epochs (with the default None, - the window will start with tmin). - reject_tmax : scalar | None - End of the time window used to reject epochs (with the default None, - the window will end with tmax). + %(epochs_reject_tmin_tmax)s %(baseline_epochs)s Defaults to ``None``, i.e. no baseline correction. - proj : bool | 'delayed' - Apply SSP projection vectors. See :class:`mne.Epochs` for details. - on_missing : str - See :class:`mne.Epochs` docstring for details. - metadata : instance of pandas.DataFrame | None - See :class:`mne.Epochs` docstring for details. + %(proj_epochs)s + %(on_missing_epochs)s + %(metadata_epochs)s .. versionadded:: 0.16 %(selection)s @@ -2801,38 +3726,70 @@ class EpochsArray(BaseEpochs): """ @verbose - def __init__(self, data, info, events=None, tmin=0, event_id=None, - reject=None, flat=None, reject_tmin=None, - reject_tmax=None, baseline=None, proj=True, - on_missing='raise', metadata=None, selection=None, - *, drop_log=None, raw_sfreq=None, verbose=None): # noqa: D102 + def __init__( + self, + data, + info, + events=None, + tmin=0.0, + event_id=None, + reject=None, + flat=None, + reject_tmin=None, + reject_tmax=None, + baseline=None, + proj=True, + on_missing="raise", + metadata=None, + selection=None, + *, + drop_log=None, + raw_sfreq=None, + verbose=None, + ): dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 data = np.asanyarray(data, dtype=dtype) if data.ndim != 3: - raise ValueError('Data must be a 3D array of shape (n_epochs, ' - 'n_channels, n_samples)') + raise ValueError( + "Data must be a 3D array of shape (n_epochs, n_channels, n_samples)" + ) - if len(info['ch_names']) != data.shape[1]: - raise ValueError('Info and data must have same number of ' - 'channels.') + if len(info["ch_names"]) != data.shape[1]: + raise ValueError("Info and data must have same number of channels.") if events is None: n_epochs = len(data) events = _gen_events(n_epochs) info = info.copy() # do not modify original info - tmax = (data.shape[2] - 1) / info['sfreq'] + tmin - - super(EpochsArray, self).__init__( - info, data, events, event_id, tmin, tmax, baseline, - reject=reject, flat=flat, reject_tmin=reject_tmin, - reject_tmax=reject_tmax, decim=1, metadata=metadata, - selection=selection, proj=proj, on_missing=on_missing, - drop_log=drop_log, raw_sfreq=raw_sfreq, verbose=verbose) + tmax = (data.shape[2] - 1) / info["sfreq"] + tmin + + super().__init__( + info, + data, + events, + event_id, + tmin, + tmax, + baseline, + reject=reject, + flat=flat, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + decim=1, + metadata=metadata, + selection=selection, + proj=proj, + on_missing=on_missing, + drop_log=drop_log, + raw_sfreq=raw_sfreq, + verbose=verbose, + ) if self.baseline is not None: self._do_baseline = True - if len(events) != np.in1d(self.events[:, 2], - list(self.event_id.values())).sum(): - raise ValueError('The events must only contain event numbers from ' - 'event_id') + if ( + len(events) + != np.isin(self.events[:, 2], list(self.event_id.values())).sum() + ): + raise ValueError("The events must only contain event numbers from event_id") detrend_picks = self._detrend_picks for e in self._data: # This is safe without assignment b/c there is no decim @@ -2876,19 +3833,20 @@ def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True): new_event_id = {str(new_event_id): new_event_id} else: if not isinstance(new_event_id, dict): - raise ValueError('new_event_id must be a dict or int') + raise ValueError("new_event_id must be a dict or int") if not len(list(new_event_id.keys())) == 1: - raise ValueError('new_event_id dict must have one entry') + raise ValueError("new_event_id dict must have one entry") new_event_num = list(new_event_id.values())[0] new_event_num = operator.index(new_event_num) if new_event_num in epochs.event_id.values(): - raise ValueError('new_event_id value must not already exist') + raise ValueError("new_event_id value must not already exist") # could use .pop() here, but if a latter one doesn't exist, we're # in trouble, so run them all here and pop() later old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids]) # find the ones to replace - inds = np.any(epochs.events[:, 2][:, np.newaxis] == - old_event_nums[np.newaxis, :], axis=1) + inds = np.any( + epochs.events[:, 2][:, np.newaxis] == old_event_nums[np.newaxis, :], axis=1 + ) # replace the event numbers in the events list epochs.events[inds, 2] = new_event_num # delete old entries @@ -2899,23 +3857,22 @@ def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True): return epochs -def equalize_epoch_counts(epochs_list, method='mintime'): - """Equalize the number of trials in multiple Epoch instances. +@fill_doc +def equalize_epoch_counts(epochs_list, method="mintime", *, random_state=None): + """Equalize the number of trials in multiple Epochs or EpochsTFR instances. Parameters ---------- epochs_list : list of Epochs instances The Epochs instances to equalize trial counts for. - method : str - If 'truncate', events will be truncated from the end of each event - list. If 'mintime', timing differences between each event list will be - minimized. + %(equalize_events_method)s + %(random_state)s Used only if ``method='random'``. Notes ----- - This tries to make the remaining epochs occurring as close as possible in - time. This method works based on the idea that if there happened to be some - time-varying (like on the scale of minutes) noise characteristics during + The method ``'mintime'`` tries to make the remaining epochs occurring as close as + possible in time. This method is motivated by the possibility that if there happened + to be some time-varying (like on the scale of minutes) noise characteristics during a recording, they could be compensated for (to some extent) in the equalization process. This method thus seeks to reduce any of those effects by minimizing the differences in the times of the events in the two sets of @@ -2927,39 +3884,43 @@ def equalize_epoch_counts(epochs_list, method='mintime'): -------- >>> equalize_epoch_counts([epochs1, epochs2]) # doctest: +SKIP """ - if not all(isinstance(e, BaseEpochs) for e in epochs_list): - raise ValueError('All inputs must be Epochs instances') - + if not all(isinstance(epoch, BaseEpochs | EpochsTFR) for epoch in epochs_list): + raise ValueError("All inputs must be Epochs instances") # make sure bad epochs are dropped - for e in epochs_list: - if not e._bad_dropped: - e.drop_bad() - event_times = [e.events[:, 0] for e in epochs_list] - indices = _get_drop_indices(event_times, method) - for e, inds in zip(epochs_list, indices): - e.drop(inds, reason='EQUALIZED_COUNT') + for epoch in epochs_list: + if not epoch._bad_dropped: + epoch.drop_bad() + sample_nums = [epoch.events[:, 0] for epoch in epochs_list] + indices = _get_drop_indices(sample_nums, method, random_state) + for epoch, inds in zip(epochs_list, indices): + epoch.drop(inds, reason="EQUALIZED_COUNT") -def _get_drop_indices(event_times, method): +def _get_drop_indices(sample_nums, method, random_state): """Get indices to drop from multiple event timing lists.""" - small_idx = np.argmin([e.shape[0] for e in event_times]) - small_e_times = event_times[small_idx] - _check_option('method', method, ['mintime', 'truncate']) + small_idx = np.argmin([e.size for e in sample_nums]) + small_epoch_indices = sample_nums[small_idx] + _check_option("method", method, ["mintime", "truncate", "random"]) indices = list() - for e in event_times: - if method == 'mintime': - mask = _minimize_time_diff(small_e_times, e) - else: - mask = np.ones(e.shape[0], dtype=bool) - mask[small_e_times.shape[0]:] = False + for event in sample_nums: + if method == "mintime": + mask = _minimize_time_diff(small_epoch_indices, event) + elif method == "truncate": + mask = np.ones(event.size, dtype=bool) + mask[small_epoch_indices.size :] = False + elif method == "random": + rng = check_random_state(random_state) + mask = np.zeros(event.size, dtype=bool) + idx = rng.choice( + np.arange(event.size), size=small_epoch_indices.size, replace=False + ) + mask[idx] = True indices.append(np.where(np.logical_not(mask))[0]) - return indices def _minimize_time_diff(t_shorter, t_longer): """Find a boolean mask to minimize timing differences.""" - from scipy.interpolate import interp1d keep = np.ones((len(t_longer)), dtype=bool) # special case: length zero or one if len(t_shorter) < 2: # interp1d won't work @@ -2968,12 +3929,11 @@ def _minimize_time_diff(t_shorter, t_longer): idx = np.argmin(np.abs(t_longer - t_shorter)) keep[idx] = True return keep - scores = np.ones((len(t_longer))) + scores = np.ones(len(t_longer)) x1 = np.arange(len(t_shorter)) # The first set of keep masks to test kwargs = dict(copy=False, bounds_error=False, assume_sorted=True) - shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1], - **kwargs) + shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1], **kwargs) for ii in range(len(t_longer) - len(t_shorter)): scores.fill(np.inf) # set up the keep masks to test, eliminating any rows that are already @@ -2983,9 +3943,9 @@ def _minimize_time_diff(t_shorter, t_longer): # Check every possible removal to see if it minimizes x2 = np.arange(len(t_longer) - ii - 1) t_keeps = np.array([t_longer[km] for km in keep_mask]) - longer_interp = interp1d(x2, t_keeps, axis=1, - fill_value=t_keeps[:, -1], - **kwargs) + longer_interp = interp1d( + x2, t_keeps, axis=1, fill_value=t_keeps[:, -1], **kwargs + ) d1 = longer_interp(x1) - t_shorter d2 = shorter_interp(x2) - t_keeps scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1) @@ -2994,40 +3954,81 @@ def _minimize_time_diff(t_shorter, t_longer): @verbose -def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False, - ignore_chs=[], verbose=None): +def _is_good( + e, + ch_names, + channel_type_idx, + reject, + flat, + full_report=False, + ignore_chs=(), + verbose=None, +): """Test if data segment e is good according to reject and flat. + The reject and flat parameters can accept functions as values. + If full_report=True, it will give True/False as well as a list of all offending channels. """ bad_tuple = tuple() has_printed = False checkable = np.ones(len(ch_names), dtype=bool) - checkable[np.array([c in ignore_chs - for c in ch_names], dtype=bool)] = False - for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']): + checkable[np.array([c in ignore_chs for c in ch_names], dtype=bool)] = False + + for refl, f, t in zip([reject, flat], [np.greater, np.less], ["", "flat"]): if refl is not None: - for key, thresh in refl.items(): + for key, refl in refl.items(): + criterion = refl idx = channel_type_idx[key] name = key.upper() if len(idx) > 0: e_idx = e[idx] - deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1) checkable_idx = checkable[idx] - idx_deltas = np.where(np.logical_and(f(deltas, thresh), - checkable_idx))[0] + # Check if criterion is a function and apply it + if callable(criterion): + result = criterion(e_idx) + _validate_type(result, tuple, "reject/flat output") + if len(result) != 2: + raise TypeError( + "Function criterion must return a tuple of length 2" + ) + cri_truth, reasons = result + _validate_type(cri_truth, (bool, np.bool_), cri_truth, "bool") + _validate_type( + reasons, (str, list, tuple), reasons, "str, list, or tuple" + ) + idx_deltas = np.where(np.logical_and(cri_truth, checkable_idx))[ + 0 + ] + else: + deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1) + idx_deltas = np.where( + np.logical_and(f(deltas, criterion), checkable_idx) + )[0] if len(idx_deltas) > 0: - bad_names = [ch_names[idx[i]] for i in idx_deltas] - if (not has_printed): - logger.info(' Rejecting %s epoch based on %s : ' - '%s' % (t, name, bad_names)) - has_printed = True - if not full_report: - return False + # Check to verify that refl is a callable that returns + # (bool, reason). Reason must be a str/list/tuple. + # If using tuple + if callable(refl): + if isinstance(reasons, str): + reasons = (reasons,) + for idx, reason in enumerate(reasons): + _validate_type(reason, str, reason) + bad_tuple += tuple(reasons) else: - bad_tuple += tuple(bad_names) + bad_names = [ch_names[idx[i]] for i in idx_deltas] + if not has_printed: + logger.info( + f" Rejecting {t} epoch based on {name} : " + f"{bad_names}" + ) + has_printed = True + if not full_report: + return False + else: + bad_tuple += tuple(bad_names) if not full_report: return True @@ -3046,13 +4047,20 @@ def _read_one_epoch_file(f, tree, preload): # read in the Annotations if they exist annotations = _read_annotations_fif(fid, tree) - events, mappings = _read_events_fif(fid, tree) - + try: + events, mappings = _read_events_fif(fid, tree) + except ValueError as e: + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if str(e) == "Could not find any events": + events = np.empty((0, 3), dtype=np.int32) + mappings = dict() + else: + raise # Metadata metadata = None metadata_tree = dir_tree_find(tree, FIFF.FIFFB_MNE_METADATA) if len(metadata_tree) > 0: - for dd in metadata_tree[0]['directory']: + for dd in metadata_tree[0]["directory"]: kind = dd.kind pos = dd.pos if kind == FIFF.FIFF_DESCRIPTION: @@ -3064,7 +4072,7 @@ def _read_one_epoch_file(f, tree, preload): processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA) del meas if len(processed) == 0: - raise ValueError('Could not find processed data') + raise ValueError("Could not find processed data") epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS) if len(epochs_node) == 0: @@ -3074,7 +4082,7 @@ def _read_one_epoch_file(f, tree, preload): if len(epochs_node) == 0: epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11 if len(epochs_node) == 0: - raise ValueError('Could not find epochs data') + raise ValueError("Could not find epochs data") my_epochs = epochs_node[0] @@ -3087,29 +4095,28 @@ def _read_one_epoch_file(f, tree, preload): drop_log = None raw_sfreq = None reject_params = {} - for k in range(my_epochs['nent']): - kind = my_epochs['directory'][k].kind - pos = my_epochs['directory'][k].pos + for k in range(my_epochs["nent"]): + kind = my_epochs["directory"][k].kind + pos = my_epochs["directory"][k].pos if kind == FIFF.FIFF_FIRST_SAMPLE: tag = read_tag(fid, pos) - first = int(tag.data) + first = int(tag.data.item()) elif kind == FIFF.FIFF_LAST_SAMPLE: tag = read_tag(fid, pos) - last = int(tag.data) + last = int(tag.data.item()) elif kind == FIFF.FIFF_EPOCH: # delay reading until later fid.seek(pos, 0) - data_tag = read_tag_info(fid) - data_tag.pos = pos + data_tag = _read_tag_header(fid, pos) data_tag.type = data_tag.type ^ (1 << 30) elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]: # Constant 304 was used before v0.11 tag = read_tag(fid, pos) - bmin = float(tag.data) + bmin = float(tag.data.item()) elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]: # Constant 305 was used before v0.11 tag = read_tag(fid, pos) - bmax = float(tag.data) + bmax = float(tag.data.item()) elif kind == FIFF.FIFF_MNE_EPOCHS_SELECTION: tag = read_tag(fid, pos) selection = np.array(tag.data) @@ -3129,44 +4136,52 @@ def _read_one_epoch_file(f, tree, preload): baseline = (bmin, bmax) n_samp = last - first + 1 - logger.info(' Found the data of interest:') - logger.info(' t = %10.2f ... %10.2f ms' - % (1000 * first / info['sfreq'], - 1000 * last / info['sfreq'])) - if info['comps'] is not None: - logger.info(' %d CTF compensation matrices available' - % len(info['comps'])) + logger.info(" Found the data of interest:") + logger.info( + f" t = {1000 * first / info['sfreq']:10.2f} ... " + f"{1000 * last / info['sfreq']:10.2f} ms" + ) + if info["comps"] is not None: + logger.info( + f" {len(info['comps'])} CTF compensation matrices available" + ) # Inspect the data if data_tag is None: - raise ValueError('Epochs data not found') - epoch_shape = (len(info['ch_names']), n_samp) + raise ValueError("Epochs data not found") + epoch_shape = (len(info["ch_names"]), n_samp) size_expected = len(events) * np.prod(epoch_shape) # on read double-precision is always used if data_tag.type == FIFF.FIFFT_FLOAT: datatype = np.float64 - fmt = '>f4' + fmt = ">f4" elif data_tag.type == FIFF.FIFFT_DOUBLE: datatype = np.float64 - fmt = '>f8' + fmt = ">f8" elif data_tag.type == FIFF.FIFFT_COMPLEX_FLOAT: datatype = np.complex128 - fmt = '>c8' + fmt = ">c8" elif data_tag.type == FIFF.FIFFT_COMPLEX_DOUBLE: datatype = np.complex128 - fmt = '>c16' + fmt = ">c16" fmt_itemsize = np.dtype(fmt).itemsize assert fmt_itemsize in (4, 8, 16) size_actual = data_tag.size // fmt_itemsize - 16 // fmt_itemsize if not size_actual == size_expected: - raise ValueError('Incorrect number of samples (%d instead of %d)' - % (size_actual, size_expected)) + raise ValueError( + f"Incorrect number of samples ({size_actual} instead of " + f"{size_expected})." + ) # Calibration factors - cals = np.array([[info['chs'][k]['cal'] * - info['chs'][k].get('scale', 1.0)] - for k in range(info['nchan'])], np.float64) + cals = np.array( + [ + [info["chs"][k]["cal"] * info["chs"][k].get("scale", 1.0)] + for k in range(info["nchan"]) + ], + np.float64, + ) # Read the data if preload: @@ -3174,10 +4189,13 @@ def _read_one_epoch_file(f, tree, preload): data *= cals # Put it all together - tmin = first / info['sfreq'] - tmax = last / info['sfreq'] - event_id = ({str(e): e for e in np.unique(events[:, 2])} - if mappings is None else mappings) + tmin = first / info["sfreq"] + tmax = last / info["sfreq"] + event_id = ( + {str(e): e for e in np.unique(events[:, 2])} + if mappings is None + else mappings + ) # In case epochs didn't have a FIFF.FIFF_MNE_EPOCHS_SELECTION tag # (version < 0.8): if selection is None: @@ -3185,13 +4203,29 @@ def _read_one_epoch_file(f, tree, preload): if drop_log is None: drop_log = ((),) * len(events) - return (info, data, data_tag, events, event_id, metadata, tmin, tmax, - baseline, selection, drop_log, epoch_shape, cals, reject_params, - fmt, annotations, raw_sfreq) + return ( + info, + data, + data_tag, + events, + event_id, + metadata, + tmin, + tmax, + baseline, + selection, + drop_log, + epoch_shape, + cals, + reject_params, + fmt, + annotations, + raw_sfreq, + ) @verbose -def read_epochs(fname, proj=True, preload=True, verbose=None): +def read_epochs(fname, proj=True, preload=True, verbose=None) -> "EpochsFIF": """Read epochs from a fif file. Parameters @@ -3211,11 +4245,10 @@ def read_epochs(fname, proj=True, preload=True, verbose=None): return EpochsFIF(fname, proj, preload, verbose) -class _RawContainer(object): +class _RawContainer: """Helper for a raw data container.""" - def __init__(self, fid, data_tag, event_samps, epoch_shape, - cals, fmt): # noqa: D102 + def __init__(self, fid, data_tag, event_samps, epoch_shape, cals, fmt): self.fid = fid self.data_tag = data_tag self.event_samps = event_samps @@ -3249,36 +4282,53 @@ class EpochsFIF(BaseEpochs): """ @verbose - def __init__(self, fname, proj=True, preload=True, - verbose=None): # noqa: D102 + def __init__(self, fname, proj=True, preload=True, verbose=None): + from .io.base import _get_fname_rep + if _path_like(fname): check_fname( - fname=fname, filetype='epochs', - endings=('-epo.fif', '-epo.fif.gz', '_epo.fif', '_epo.fif.gz') - ) - fname = str( - _check_fname(fname=fname, must_exist=True, overwrite="read") + fname=fname, + filetype="epochs", + endings=("-epo.fif", "-epo.fif.gz", "_epo.fif", "_epo.fif.gz"), ) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") elif not preload: - raise ValueError('preload must be used with file-like objects') + raise ValueError("preload must be used with file-like objects") fnames = [fname] + fname_rep = _get_fname_rep(fname) ep_list = list() raw = list() for fname in fnames: - fname_rep = _get_fname_rep(fname) - logger.info('Reading %s ...' % fname_rep) + logger.info(f"Reading {fname_rep} ...") fid, tree, _ = fiff_open(fname, preload=preload) next_fname = _get_next_fname(fid, fname, tree) - (info, data, data_tag, events, event_id, metadata, tmin, tmax, - baseline, selection, drop_log, epoch_shape, cals, - reject_params, fmt, annotations, raw_sfreq) = \ - _read_one_epoch_file(fid, tree, preload) + ( + info, + data, + data_tag, + events, + event_id, + metadata, + tmin, + tmax, + baseline, + selection, + drop_log, + epoch_shape, + cals, + reject_params, + fmt, + annotations, + raw_sfreq, + ) = _read_one_epoch_file(fid, tree, preload) if (events[:, 0] < 0).any(): events = events.copy() - warn('Incorrect events detected on disk, setting event ' - 'numbers to consecutive increasing integers') + warn( + "Incorrect events detected on disk, setting event " + "numbers to consecutive increasing integers" + ) events[:, 0] = np.arange(1, len(events) + 1) # here we ignore missing events, since users should already be # aware of missing events if they have saved data that way @@ -3286,35 +4336,63 @@ def __init__(self, fname, proj=True, preload=True, # correction (data is being baseline-corrected when written to # disk) epoch = BaseEpochs( - info, data, events, event_id, tmin, tmax, + info, + data, + events, + event_id, + tmin, + tmax, baseline=None, - metadata=metadata, on_missing='ignore', - selection=selection, drop_log=drop_log, - proj=False, verbose=False, raw_sfreq=raw_sfreq) + metadata=metadata, + on_missing="ignore", + selection=selection, + drop_log=drop_log, + proj=False, + verbose=False, + raw_sfreq=raw_sfreq, + ) epoch.baseline = baseline epoch._do_baseline = False # might be superfluous but won't hurt ep_list.append(epoch) if not preload: # store everything we need to index back to the original data - raw.append(_RawContainer(fiff_open(fname)[0], data_tag, - events[:, 0].copy(), epoch_shape, - cals, fmt)) + raw.append( + _RawContainer( + fiff_open(fname)[0], + data_tag, + events[:, 0].copy(), + epoch_shape, + cals, + fmt, + ) + ) if next_fname is not None: fnames.append(next_fname) unsafe_annot_add = raw_sfreq is None - (info, data, raw_sfreq, events, event_id, tmin, tmax, metadata, - baseline, selection, drop_log) = _concatenate_epochs( + ( + info, + data, + raw_sfreq, + events, + event_id, + tmin, + tmax, + metadata, + baseline, + selection, + drop_log, + ) = _concatenate_epochs( ep_list, with_data=preload, add_offset=False, - on_mismatch='raise', + on_mismatch="raise", ) # we need this uniqueness for non-preloaded data to work properly if len(np.unique(events[:, 0])) != len(events): - raise RuntimeError('Event time samples were not unique') + raise RuntimeError("Event time samples were not unique") # correct the drop log assert len(drop_log) % len(fnames) == 0 @@ -3324,20 +4402,34 @@ def __init__(self, fname, proj=True, preload=True, for i1, i2 in zip(offsets[:-1], offsets[1:]): other_log = drop_log[i1:i2] for k, (a, b) in enumerate(zip(drop_log, other_log)): - if a == ('IGNORED',) and b != ('IGNORED',): + if a == ("IGNORED",) and b != ("IGNORED",): drop_log[k] = b drop_log = tuple(drop_log[:step]) # call BaseEpochs constructor # again, ensure we're retaining the baseline period originally loaded # from disk without trying to re-apply baseline correction - super(EpochsFIF, self).__init__( - info, data, events, event_id, tmin, tmax, - baseline=None, raw=raw, - proj=proj, preload_at_end=False, on_missing='ignore', - selection=selection, drop_log=drop_log, filename=fname_rep, - metadata=metadata, verbose=verbose, raw_sfreq=raw_sfreq, - annotations=annotations, **reject_params) + super().__init__( + info, + data, + events, + event_id, + tmin, + tmax, + baseline=None, + raw=raw, + proj=proj, + preload_at_end=False, + on_missing="ignore", + selection=selection, + drop_log=drop_log, + filename=fname_rep, + metadata=metadata, + verbose=verbose, + raw_sfreq=raw_sfreq, + annotations=annotations, + **reject_params, + ) self.baseline = baseline self._do_baseline = False # use the private property instead of drop_bad so that epochs @@ -3362,8 +4454,9 @@ def _get_epoch_from_raw(self, idx, verbose=None): break else: # read the correct subset of the data - raise RuntimeError('Correct epoch could not be found, please ' - 'contact mne-python developers') + raise RuntimeError( + "Correct epoch could not be found, please contact mne-python developers" + ) # the following is equivalent to this, but faster: # # >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float) @@ -3373,10 +4466,10 @@ def _get_epoch_from_raw(self, idx, verbose=None): # Eventually this could be refactored in io/tag.py if other functions # could make use of it raw.fid.seek(raw.data_tag.pos + offset, 0) - if fmt == '>c8': - read_fmt = '>f4' - elif fmt == '>c16': - read_fmt = '>f8' + if fmt == ">c8": + read_fmt = ">f4" + elif fmt == ">c16": + read_fmt = ">f8" else: read_fmt = fmt data = np.frombuffer(raw.fid.read(size), read_fmt) @@ -3407,9 +4500,11 @@ def bootstrap(epochs, random_state=None): The bootstrap samples """ if not epochs.preload: - raise RuntimeError('Modifying data of epochs is only supported ' - 'when preloading is used. Use preload=True ' - 'in the constructor.') + raise RuntimeError( + "Modifying data of epochs is only supported " + "when preloading is used. Use preload=True " + "in the constructor." + ) rng = check_random_state(random_state) epochs_bootstrap = epochs.copy() @@ -3419,39 +4514,32 @@ def bootstrap(epochs, random_state=None): return epochs_bootstrap -def _check_merge_epochs(epochs_list): - """Aux function.""" - if len({tuple(epochs.event_id.items()) for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for event_id") - if len({epochs.tmin for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for tmin") - if len({epochs.tmax for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for tmax") - if len({epochs.baseline for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for baseline") - - -def _concatenate_epochs(epochs_list, *, with_data=True, add_offset=True, - on_mismatch='raise'): +def _concatenate_epochs( + epochs_list, *, with_data=True, add_offset=True, on_mismatch="raise" +): """Auxiliary function for concatenating epochs.""" - if not isinstance(epochs_list, (list, tuple)): - raise TypeError('epochs_list must be a list or tuple, got %s' - % (type(epochs_list),)) + if not isinstance(epochs_list, list | tuple): + raise TypeError(f"epochs_list must be a list or tuple, got {type(epochs_list)}") # to make warning messages only occur once during concatenation warned = False for ei, epochs in enumerate(epochs_list): if not isinstance(epochs, BaseEpochs): - raise TypeError('epochs_list[%d] must be an instance of Epochs, ' - 'got %s' % (ei, type(epochs))) + raise TypeError( + f"epochs_list[{ei}] must be an instance of Epochs, got {type(epochs)}" + ) - if (getattr(epochs, 'annotations', None) is not None and - len(epochs.annotations) > 0 and - not warned): + if ( + getattr(epochs, "annotations", None) is not None + and len(epochs.annotations) > 0 + and not warned + ): warned = True - warn('Concatenation of Annotations within Epochs is not supported ' - 'yet. All annotations will be dropped.') + warn( + "Concatenation of Annotations within Epochs is not supported yet. All " + "annotations will be dropped." + ) # create a copy, so that the Annotations are not modified in place # from the original object @@ -3471,40 +4559,47 @@ def _concatenate_epochs(epochs_list, *, with_data=True, add_offset=True, event_id = deepcopy(out.event_id) selection = out.selection # offset is the last epoch + tmax + 10 second - shift = int((10 + tmax) * out.info['sfreq']) - events_offset = int(np.max(events[0][:, 0])) + shift + shift = np.int64((10 + tmax) * out.info["sfreq"]) + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if out._allow_empty: + events_offset = 0 + else: + events_offset = int(np.max(events[0][:, 0])) + shift + events_offset = np.int64(events_offset) events_overflow = False warned = False for ii, epochs in enumerate(epochs_list[1:], 1): - _ensure_infos_match(epochs.info, info, f'epochs[{ii}]', - on_mismatch=on_mismatch) + _ensure_infos_match(epochs.info, info, f"epochs[{ii}]", on_mismatch=on_mismatch) if not np.allclose(epochs.times, epochs_list[0].times): - raise ValueError('Epochs must have same times') + raise ValueError("Epochs must have same times") if epochs.baseline != baseline: - raise ValueError('Baseline must be same for all epochs') + raise ValueError("Baseline must be same for all epochs") if epochs._raw_sfreq != raw_sfreq and not warned: warned = True - warn('The original raw sampling rate of the Epochs does not ' - 'match for all Epochs. Please proceed cautiously.') + warn( + "The original raw sampling rate of the Epochs does not " + "match for all Epochs. Please proceed cautiously." + ) # compare event_id common_keys = list(set(event_id).intersection(set(epochs.event_id))) for key in common_keys: if not event_id[key] == epochs.event_id[key]: - msg = ('event_id values must be the same for identical keys ' - 'for all concatenated epochs. Key "{}" maps to {} in ' - 'some epochs and to {} in others.') - raise ValueError(msg.format(key, event_id[key], - epochs.event_id[key])) + msg = ( + "event_id values must be the same for identical keys " + 'for all concatenated epochs. Key "{}" maps to {} in ' + "some epochs and to {} in others." + ) + raise ValueError(msg.format(key, event_id[key], epochs.event_id[key])) if with_data: epochs.drop_bad() offsets.append(len(epochs)) evs = epochs.events.copy() if len(epochs.events) == 0: - warn('One of the Epochs objects to concatenate was empty.') + warn("One of the Epochs objects to concatenate was empty.") elif add_offset: # We need to cast to a native Python int here to detect an # overflow of a numpy int32 (which is the default on windows) @@ -3512,9 +4607,11 @@ def _concatenate_epochs(epochs_list, *, with_data=True, add_offset=True, evs[:, 0] += events_offset events_offset += max_timestamp + shift if events_offset > INT32_MAX: - warn(f'Event number greater than {INT32_MAX} created, ' - 'events[:, 0] will be assigned consecutive increasing ' - 'integer values') + warn( + f"Event number greater than {INT32_MAX} created, " + "events[:, 0] will be assigned consecutive increasing " + "integer values" + ) events_overflow = True add_offset = False # we no longer need to add offset events.append(evs) @@ -3532,9 +4629,10 @@ def _concatenate_epochs(epochs_list, *, with_data=True, add_offset=True, if n_have == 0: metadata = None elif n_have != len(metadata): - raise ValueError('%d of %d epochs instances have metadata, either ' - 'all or none must have metadata' - % (n_have, len(metadata))) + raise ValueError( + f"{n_have} of {len(metadata)} epochs instances have metadata, either " + "all or none must have metadata" + ) else: pd = _check_pandas_installed(strict=False) if pd is not False: @@ -3546,19 +4644,32 @@ def _concatenate_epochs(epochs_list, *, with_data=True, add_offset=True, if with_data: offsets = np.cumsum(offsets) for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list): - this_data = epochs.get_data() + this_data = epochs.get_data(copy=False) if data is None: data = np.empty( (offsets[-1], len(out.ch_names), len(out.times)), - dtype=this_data.dtype) + dtype=this_data.dtype, + ) data[start:stop] = this_data - return (info, data, raw_sfreq, events, event_id, tmin, tmax, metadata, - baseline, selection, drop_log) + return ( + info, + data, + raw_sfreq, + events, + event_id, + tmin, + tmax, + metadata, + baseline, + selection, + drop_log, + ) @verbose -def concatenate_epochs(epochs_list, add_offset=True, *, on_mismatch='raise', - verbose=None): +def concatenate_epochs( + epochs_list, add_offset=True, *, on_mismatch="raise", verbose=None +): """Concatenate a list of `~mne.Epochs` into one `~mne.Epochs` object. .. note:: Unlike `~mne.concatenate_raws`, this function does **not** @@ -3587,8 +4698,19 @@ def concatenate_epochs(epochs_list, add_offset=True, *, on_mismatch='raise', ----- .. versionadded:: 0.9.0 """ - (info, data, raw_sfreq, events, event_id, tmin, tmax, metadata, - baseline, selection, drop_log) = _concatenate_epochs( + ( + info, + data, + raw_sfreq, + events, + event_id, + tmin, + tmax, + metadata, + baseline, + selection, + drop_log, + ) = _concatenate_epochs( epochs_list, with_data=True, add_offset=add_offset, @@ -3596,19 +4718,39 @@ def concatenate_epochs(epochs_list, add_offset=True, *, on_mismatch='raise', ) selection = np.where([len(d) == 0 for d in drop_log])[0] out = EpochsArray( - data=data, info=info, events=events, event_id=event_id, - tmin=tmin, baseline=baseline, selection=selection, drop_log=drop_log, - proj=False, on_missing='ignore', metadata=metadata, - raw_sfreq=raw_sfreq) + data=data, + info=info, + events=events, + event_id=event_id, + tmin=tmin, + baseline=baseline, + selection=selection, + drop_log=drop_log, + proj=False, + on_missing="ignore", + metadata=metadata, + raw_sfreq=raw_sfreq, + ) out.drop_bad() return out @verbose -def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, - origin='auto', weight_all=True, int_order=8, ext_order=3, - destination=None, ignore_ref=False, return_mapping=False, - mag_scale=100., verbose=None): +def average_movements( + epochs, + head_pos=None, + orig_sfreq=None, + picks=None, + origin="auto", + weight_all=True, + int_order=8, + ext_order=3, + destination=None, + ignore_ref=False, + return_mapping=False, + mag_scale=100.0, + verbose=None, +): """Average data using Maxwell filtering, transforming using head positions. Parameters @@ -3669,36 +4811,47 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, of children in MEG: Quantification, effects on source estimation, and compensation. NeuroImage 40:541–550, 2008. """ # noqa: E501 - from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads, - _check_usable, _col_norm_pinv, - _get_n_moments, _get_mf_picks_fix_mags, - _prep_mf_coils, _check_destination, - _remove_meg_projs, _get_coil_scale) + from .preprocessing.maxwell import ( + _check_destination, + _check_usable, + _col_norm_pinv, + _get_coil_scale, + _get_mf_picks_fix_mags, + _get_n_moments, + _get_sensor_operator, + _prep_mf_coils, + _remove_meg_projs_comps, + _reset_meg_bads, + _trans_sss_basis, + ) + if head_pos is None: - raise TypeError('head_pos must be provided and cannot be None') + raise TypeError("head_pos must be provided and cannot be None") from .chpi import head_pos_to_trans_rot_t + if not isinstance(epochs, BaseEpochs): - raise TypeError('epochs must be an instance of Epochs, not %s' - % (type(epochs),)) - orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq + raise TypeError(f"epochs must be an instance of Epochs, not {type(epochs)}") + orig_sfreq = epochs.info["sfreq"] if orig_sfreq is None else orig_sfreq orig_sfreq = float(orig_sfreq) if isinstance(head_pos, np.ndarray): head_pos = head_pos_to_trans_rot_t(head_pos) trn, rot, t = head_pos del head_pos - _check_usable(epochs) - origin = _check_origin(origin, epochs.info, 'head') - recon_trans = _check_destination(destination, epochs.info, True) + _check_usable(epochs, ignore_ref) + origin = _check_origin(origin, epochs.info, "head") + recon_trans = _check_destination(destination, epochs.info, "head") - logger.info('Aligning and averaging up to %s epochs' - % (len(epochs.events))) + logger.info(f"Aligning and averaging up to {len(epochs.events)} epochs") if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])): - raise RuntimeError('Epochs must have monotonically increasing events') + raise RuntimeError("Epochs must have monotonically increasing events") info_to = epochs.info.copy() - meg_picks, mag_picks, grad_picks, good_mask, _ = \ - _get_mf_picks_fix_mags(info_to, int_order, ext_order, ignore_ref) + meg_picks, mag_picks, grad_picks, good_mask, _ = _get_mf_picks_fix_mags( + info_to, int_order, ext_order, ignore_ref + ) coil_scale, mag_scale = _get_coil_scale( - meg_picks, mag_picks, grad_picks, mag_scale, info_to) + meg_picks, mag_picks, grad_picks, mag_scale, info_to + ) + mult = _get_sensor_operator(epochs, meg_picks) n_channels, n_times = len(epochs.ch_names), len(epochs.times) other_picks = np.setdiff1d(np.arange(n_channels), meg_picks) data = np.zeros((n_channels, n_times)) @@ -3710,37 +4863,36 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, # remove MEG bads in "to" info _reset_meg_bads(info_to) # set up variables - w_sum = 0. + w_sum = 0.0 n_in, n_out = _get_n_moments([int_order, ext_order]) - S_decomp = 0. # this will end up being a weighted average + S_decomp = 0.0 # this will end up being a weighted average last_trans = None decomp_coil_scale = coil_scale[good_mask] - exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True, - origin=origin) + exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True, origin=origin) n_in = _get_n_moments(int_order) for ei, epoch in enumerate(epochs): event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq use_idx = np.where(t <= event_time)[0] if len(use_idx) == 0: - trans = info_to['dev_head_t']['trans'] + trans = info_to["dev_head_t"]["trans"] else: use_idx = use_idx[-1] - trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]), - [[0., 0., 0., 1.]]]) - loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000)) + trans = np.vstack( + [np.hstack([rot[use_idx], trn[[use_idx]].T]), [[0.0, 0.0, 0.0, 1.0]]] + ) + loc_str = ", ".join(f"{tr:0.1f}" for tr in (trans[:3, 3] * 1000)) if last_trans is None or not np.allclose(last_trans, trans): - logger.info(' Processing epoch %s (device location: %s mm)' - % (ei + 1, loc_str)) + logger.info( + f" Processing epoch {ei + 1} (device location: {loc_str} mm)" + ) reuse = False last_trans = trans else: - logger.info(' Processing epoch %s (device location: same)' - % (ei + 1,)) + logger.info(f" Processing epoch {ei + 1} (device location: same)") reuse = True epoch = epoch.copy() # because we operate inplace if not reuse: - S = _trans_sss_basis(exp, all_coils, trans, - coil_scale=decomp_coil_scale) + S = _trans_sss_basis(exp, all_coils, trans, coil_scale=decomp_coil_scale) # Get the weight from the un-regularized version (eq. 44) weight = np.linalg.norm(S[:, :n_in]) # XXX Eventually we could do cross-talk and fine-cal here @@ -3761,9 +4913,12 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, S_decomp /= w_sum # Get recon matrix # (We would need to include external here for regularization to work) - exp['ext_order'] = 0 + exp["ext_order"] = 0 S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans) - exp['ext_order'] = ext_order + if mult is not None: + S_decomp = mult @ S_decomp + S_recon = mult @ S_recon + exp["ext_order"] = ext_order # We could determine regularization on basis of destination basis # matrix, restricted to good channels, as regularizing individual # matrices within the loop above does not seem to work. But in @@ -3777,19 +4932,26 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, mapping = np.dot(S_recon, pS_ave) # Apply mapping data[meg_picks] = np.dot(mapping, data[meg_picks[good_mask]]) - info_to['dev_head_t'] = recon_trans # set the reconstruction transform - evoked = epochs._evoked_from_epoch_data(data, info_to, picks, - n_events=count, kind='average', - comment=epochs._name) - _remove_meg_projs(evoked) # remove MEG projectors, they won't apply now - logger.info('Created Evoked dataset from %s epochs' % (count,)) + info_to["dev_head_t"] = recon_trans # set the reconstruction transform + evoked = epochs._evoked_from_epoch_data( + data, info_to, picks, n_events=count, kind="average", comment=epochs._name + ) + _remove_meg_projs_comps(evoked, ignore_ref) + logger.info(f"Created Evoked dataset from {count} epochs") return (evoked, mapping) if return_mapping else evoked @verbose -def make_fixed_length_epochs(raw, duration=1., preload=False, - reject_by_annotation=True, proj=True, overlap=0., - id=1, verbose=None): +def make_fixed_length_epochs( + raw, + duration=1.0, + preload=False, + reject_by_annotation=True, + proj=True, + overlap=0.0, + id=1, # noqa: A002 + verbose=None, +): """Divide continuous raw data into equal-sized consecutive epochs. Parameters @@ -3825,10 +4987,17 @@ def make_fixed_length_epochs(raw, duration=1., preload=False, ----- .. versionadded:: 0.20 """ - events = make_fixed_length_events(raw, id=id, duration=duration, - overlap=overlap) - delta = 1. / raw.info['sfreq'] - return Epochs(raw, events, event_id=[id], tmin=0, tmax=duration - delta, - baseline=None, preload=preload, - reject_by_annotation=reject_by_annotation, proj=proj, - verbose=verbose) + events = make_fixed_length_events(raw, id=id, duration=duration, overlap=overlap) + delta = 1.0 / raw.info["sfreq"] + return Epochs( + raw, + events, + event_id=[id], + tmin=0, + tmax=duration - delta, + baseline=None, + preload=preload, + reject_by_annotation=reject_by_annotation, + proj=proj, + verbose=verbose, + ) diff --git a/mne/event.py b/mne/event.py index 104897014d9..ba2c4f0120c 100644 --- a/mne/event.py +++ b/mne/event.py @@ -1,26 +1,35 @@ """IO with fif files containing events.""" -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Teon Brooks -# Clement Moutard -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from collections.abc import Sequence from pathlib import Path import numpy as np -from .utils import (check_fname, logger, verbose, _get_stim_channel, warn, - _validate_type, _check_option, fill_doc, _check_fname, - _on_missing, _check_on_missing) -from .io.constants import FIFF -from .io.tree import dir_tree_find -from .io.tag import read_tag -from .io.open import fiff_open -from .io.write import write_int, start_block, start_and_end_file, end_block -from .io.pick import pick_channels +from ._fiff.constants import FIFF +from ._fiff.open import fiff_open +from ._fiff.pick import pick_channels +from ._fiff.tag import read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import end_block, start_and_end_file, start_block, write_int +from .utils import ( + _check_fname, + _check_integer_or_list, + _check_on_missing, + _check_option, + _get_stim_channel, + _on_missing, + _pl, + _validate_type, + check_fname, + fill_doc, + logger, + verbose, + warn, +) @fill_doc @@ -49,8 +58,7 @@ def pick_events(events, include=None, exclude=None, step=False): The list of events. """ if include is not None: - if not isinstance(include, list): - include = [include] + include = _check_integer_or_list(include, "include") mask = np.zeros(len(events), dtype=bool) for e in include: mask = np.logical_or(mask, events[:, 2] == e) @@ -58,8 +66,7 @@ def pick_events(events, include=None, exclude=None, step=False): mask = np.logical_or(mask, events[:, 1] == e) events = events[mask] elif exclude is not None: - if not isinstance(exclude, list): - exclude = [exclude] + exclude = _check_integer_or_list(exclude, "exclude") mask = np.ones(len(events), dtype=bool) for e in exclude: mask = np.logical_and(mask, events[:, 2] != e) @@ -75,8 +82,9 @@ def pick_events(events, include=None, exclude=None, step=False): return events -def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, - new_id=None, fill_na=None): +def define_target_events( + events, reference_id, target_id, sfreq, tmin, tmax, new_id=None, fill_na=None +): """Define new events by co-occurrence of existing events. This function can be used to evaluate events depending on the @@ -125,8 +133,11 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, if event[2] == reference_id: lower = event[0] + imin upper = event[0] + imax - res = events[(events[:, 0] > lower) & - (events[:, 0] < upper) & (events[:, 2] == target_id)] + res = events[ + (events[:, 0] > lower) + & (events[:, 0] < upper) + & (events[:, 2] == target_id) + ] if res.any(): lag += [event[0] - res[0][0]] event[2] = new_id @@ -138,8 +149,8 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, new_events = np.array(new_events) - with np.errstate(invalid='ignore'): # casting nans - lag = np.abs(lag, dtype='f8') + with np.errstate(invalid="ignore"): # casting nans + lag = np.abs(lag, dtype="f8") if lag.any(): lag *= tsample else: @@ -155,35 +166,35 @@ def _read_events_fif(fid, tree): if len(events) == 0: fid.close() - raise ValueError('Could not find event data') + raise ValueError("Could not find event data") events = events[0] event_list = None event_id = None - for d in events['directory']: + for d in events["directory"]: kind = d.kind pos = d.pos if kind == FIFF.FIFF_MNE_EVENT_LIST: tag = read_tag(fid, pos) event_list = tag.data - event_list.shape = (-1, 3) break if event_list is None: - raise ValueError('Could not find any events') - for d in events['directory']: + raise ValueError("Could not find any events") + else: + event_list.shape = (-1, 3) + for d in events["directory"]: kind = d.kind pos = d.pos if kind == FIFF.FIFF_DESCRIPTION: tag = read_tag(fid, pos) event_id = tag.data - m_ = [[s[::-1] for s in m[::-1].split(':', 1)] - for m in event_id.split(';')] + m_ = [[s[::-1] for s in m[::-1].split(":", 1)] for m in event_id.split(";")] event_id = {k: int(v) for v, k in m_} break elif kind == FIFF.FIFF_MNE_EVENT_COMMENTS: tag = read_tag(fid, pos) event_id = tag.data - event_id = event_id.tobytes().decode('latin-1').split('\x00')[:-1] + event_id = event_id.tobytes().decode("latin-1").split("\x00")[:-1] assert len(event_id) == len(event_list) event_id = {k: v[2] for k, v in zip(event_id, event_list)} break @@ -191,8 +202,15 @@ def _read_events_fif(fid, tree): @verbose -def read_events(filename, include=None, exclude=None, mask=None, - mask_type='and', return_event_id=False, verbose=None): +def read_events( + filename, + include=None, + exclude=None, + mask=None, + mask_type="and", + return_event_id=False, + verbose=None, +): """Read :term:`events` from fif or text file. See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays` @@ -247,11 +265,22 @@ def read_events(filename, include=None, exclude=None, mask=None, For more information on ``mask`` and ``mask_type``, see :func:`mne.find_events`. """ - check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz', - '-eve.lst', '-eve.txt', '_eve.fif', - '_eve.fif.gz', '_eve.lst', '_eve.txt', - '-annot.fif', # MNE-C annot - )) + check_fname( + filename, + "events", + ( + ".eve", + "-eve.fif", + "-eve.fif.gz", + "-eve.lst", + "-eve.txt", + "_eve.fif", + "_eve.fif.gz", + "_eve.lst", + "_eve.txt", + "-annot.fif", # MNE-C annot + ), + ) filename = Path(filename) if filename.suffix in (".fif", ".gz"): fid, tree, _ = fiff_open(filename) @@ -264,7 +293,7 @@ def read_events(filename, include=None, exclude=None, mask=None, # eve/lst files had a second float column that will raise errors lines = np.loadtxt(filename, dtype=np.float64).astype(int) if len(lines) == 0: - raise ValueError('No text lines found') + raise ValueError("No text lines found") if lines.ndim == 1: # Special case for only one event lines = lines[np.newaxis, :] @@ -274,13 +303,12 @@ def read_events(filename, include=None, exclude=None, mask=None, elif len(lines[0]) == 3: goods = [0, 1, 2] else: - raise ValueError('Unknown number of columns in event text file') + raise ValueError("Unknown number of columns in event text file") event_list = lines[:, goods] - if (mask is not None and event_list.shape[0] > 0 and - event_list[0, 2] == 0): + if mask is not None and event_list.shape[0] > 0 and event_list[0, 2] == 0: event_list = event_list[1:] - warn('first row of event file discarded (zero-valued)') + warn("first row of event file discarded (zero-valued)") event_id = None event_list = pick_events(event_list, include, exclude) @@ -289,12 +317,11 @@ def read_events(filename, include=None, exclude=None, mask=None, event_list = _mask_trigs(event_list, mask, mask_type) masked_len = event_list.shape[0] if masked_len < unmasked_len: - warn('{} of {} events masked'.format(unmasked_len - masked_len, - unmasked_len)) + warn(f"{unmasked_len - masked_len} of {unmasked_len} events masked") out = event_list if return_event_id: if event_id is None: - raise RuntimeError('No event_id found in the file') + raise RuntimeError("No event_id found in the file") out = (out, event_id) return out @@ -321,26 +348,38 @@ def write_events(filename, events, *, overwrite=False, verbose=None): read_events """ filename = _check_fname(filename, overwrite=overwrite) - check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz', - '-eve.lst', '-eve.txt', '_eve.fif', - '_eve.fif.gz', '_eve.lst', '_eve.txt')) - if filename.suffix in ('.fif', '.gz'): + check_fname( + filename, + "events", + ( + ".eve", + "-eve.fif", + "-eve.fif.gz", + "-eve.lst", + "-eve.txt", + "_eve.fif", + "_eve.fif.gz", + "_eve.lst", + "_eve.txt", + ), + ) + if filename.suffix in (".fif", ".gz"): # Start writing... with start_and_end_file(filename) as fid: start_block(fid, FIFF.FIFFB_MNE_EVENTS) write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, events.T) end_block(fid, FIFF.FIFFB_MNE_EVENTS) else: - with open(filename, 'w') as f: + with open(filename, "w") as f: for e in events: - f.write('%6d %6d %3d\n' % tuple(e)) + f.write(f"{e[0]:6d} {e[1]:6d} {e[2]:3d}\n") def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0): changed = np.diff(data, axis=1) != 0 idx = np.where(np.all(changed, axis=0))[0] if len(idx) == 0: - return np.empty((0, 3), dtype='int32') + return np.empty((0, 3), dtype="int32") pre_step = data[0, idx] idx += 1 @@ -361,7 +400,7 @@ def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0): if merge != 0: diff = np.diff(steps[:, 0]) - idx = (diff <= abs(merge)) + idx = diff <= abs(merge) if np.any(idx): where = np.where(idx)[0] keep = np.logical_not(idx) @@ -374,15 +413,14 @@ def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0): steps[where, 2] = steps[where + 1, 2] keep = np.insert(keep, 0, True) - is_step = (steps[:, 1] != steps[:, 2]) + is_step = steps[:, 1] != steps[:, 2] keep = np.logical_and(keep, is_step) steps = steps[keep] return steps -def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, - stim_channel=None): +def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, stim_channel=None): """Find all steps in data from a stim channel. Parameters @@ -422,23 +460,35 @@ def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, # pull stim channel from config if necessary stim_channel = _get_stim_channel(stim_channel, raw.info) - picks = pick_channels(raw.info['ch_names'], include=stim_channel) + picks = pick_channels(raw.info["ch_names"], include=stim_channel, ordered=False) if len(picks) == 0: - raise ValueError('No stim channel found to extract event triggers.') + raise ValueError("No stim channel found to extract event triggers.") data, _ = raw[picks, :] if np.any(data < 0): - warn('Trigger channel contains negative values, using absolute value.') + warn("Trigger channel contains negative values, using absolute value.") data = np.abs(data) # make sure trig channel is positive data = data.astype(np.int64) - return _find_stim_steps(data, raw.first_samp, pad_start=pad_start, - pad_stop=pad_stop, merge=merge) + return _find_stim_steps( + data, raw.first_samp, pad_start=pad_start, pad_stop=pad_stop, merge=merge + ) @verbose -def _find_events(data, first_samp, verbose=None, output='onset', - consecutive='increasing', min_samples=0, mask=None, - uint_cast=False, mask_type='and', initial_event=False): +def _find_events( + data, + first_samp, + *, + verbose=None, + output="onset", + consecutive="increasing", + min_samples=0, + mask=None, + uint_cast=False, + mask_type="and", + initial_event=False, + ch_name=None, +): """Help find events.""" assert data.shape[0] == 1 # data should be only a row vector @@ -453,42 +503,46 @@ def _find_events(data, first_samp, verbose=None, output='onset', if uint_cast: data = data.astype(np.uint16).astype(np.int64) if data.min() < 0: - warn('Trigger channel contains negative values, using absolute ' - 'value. If data were acquired on a Neuromag system with ' - 'STI016 active, consider using uint_cast=True to work around ' - 'an acquisition bug') + warn( + "Trigger channel contains negative values, using absolute " + "value. If data were acquired on a Neuromag system with " + "STI016 active, consider using uint_cast=True to work around " + "an acquisition bug" + ) data = np.abs(data) # make sure trig channel is positive events = _find_stim_steps(data, first_samp, pad_stop=0, merge=merge) initial_value = data[0, 0] if initial_value != 0: if initial_event: - events = np.insert( - events, 0, [first_samp, 0, initial_value], axis=0) + events = np.insert(events, 0, [first_samp, 0, initial_value], axis=0) else: - logger.info('Trigger channel has a non-zero initial value of {} ' - '(consider using initial_event=True to detect this ' - 'event)'.format(initial_value)) + logger.info( + f"Trigger channel {ch_name} has a non-zero initial value of " + f"{initial_value} (consider using initial_event=True to detect this " + "event)" + ) events = _mask_trigs(events, mask, mask_type) # Determine event onsets and offsets - if consecutive == 'increasing': - onsets = (events[:, 2] > events[:, 1]) - offsets = np.logical_and(np.logical_or(onsets, (events[:, 2] == 0)), - (events[:, 1] > 0)) + if consecutive == "increasing": + onsets = events[:, 2] > events[:, 1] + offsets = np.logical_and( + np.logical_or(onsets, (events[:, 2] == 0)), (events[:, 1] > 0) + ) elif consecutive: - onsets = (events[:, 2] > 0) - offsets = (events[:, 1] > 0) + onsets = events[:, 2] > 0 + offsets = events[:, 1] > 0 else: - onsets = (events[:, 1] == 0) - offsets = (events[:, 2] == 0) + onsets = events[:, 1] == 0 + offsets = events[:, 2] == 0 onset_idx = np.where(onsets)[0] offset_idx = np.where(offsets)[0] if len(onset_idx) == 0 or len(offset_idx) == 0: - return np.empty((0, 3), dtype='int32') + return np.empty((0, 3), dtype="int32") # delete orphaned onsets/offsets if onset_idx[0] > offset_idx[0]: @@ -499,22 +553,22 @@ def _find_events(data, first_samp, verbose=None, output='onset', logger.info("Removing orphaned onset at the end of the file.") onset_idx = np.delete(onset_idx, -1) - if output == 'onset': + _check_option("output", output, ("onset", "step", "offset")) + if output == "onset": events = events[onset_idx] - elif output == 'step': + elif output == "step": idx = np.union1d(onset_idx, offset_idx) events = events[idx] - elif output == 'offset': + else: + assert output == "offset" event_id = events[onset_idx, 2] events = events[offset_idx] events[:, 1] = events[:, 2] events[:, 2] = event_id events[:, 0] -= 1 - else: - raise ValueError("Invalid output parameter %r" % output) - logger.info("%s events found" % len(events)) - logger.info("Event IDs: %s" % np.unique(events[:, 2])) + logger.info(f"{len(events)} event{_pl(events)} found on stim channel {ch_name}") + logger.info(f"Event IDs: {np.unique(events[:, 2])}") return events @@ -522,20 +576,32 @@ def _find_events(data, first_samp, verbose=None, output='onset', def _find_unique_events(events): """Uniquify events (ie remove duplicated rows.""" e = np.ascontiguousarray(events).view( - np.dtype((np.void, events.dtype.itemsize * events.shape[1]))) + np.dtype((np.void, events.dtype.itemsize * events.shape[1])) + ) _, idx = np.unique(e, return_index=True) n_dupes = len(events) - len(idx) if n_dupes > 0: - warn("Some events are duplicated in your different stim channels." - " %d events were ignored during deduplication." % n_dupes) + warn( + "Some events are duplicated in your different stim channels. " + f"{n_dupes} events were ignored during deduplication." + ) return events[idx] @verbose -def find_events(raw, stim_channel=None, output='onset', - consecutive='increasing', min_duration=0, - shortest_event=2, mask=None, uint_cast=False, - mask_type='and', initial_event=False, verbose=None): +def find_events( + raw, + stim_channel=None, + output="onset", + consecutive="increasing", + min_duration=0, + shortest_event=2, + mask=None, + uint_cast=False, + mask_type="and", + initial_event=False, + verbose=None, +): """Find :term:`events` from raw file. See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays` @@ -682,42 +748,53 @@ def find_events(raw, stim_channel=None, output='onset', ---------------- 2 '0000010' """ - min_samples = min_duration * raw.info['sfreq'] + min_samples = min_duration * raw.info["sfreq"] # pull stim channel from config if necessary try: stim_channel = _get_stim_channel(stim_channel, raw.info) except ValueError: if len(raw.annotations) > 0: - raise ValueError("No stim channels found, but the raw object has " - "annotations. Consider using " - "mne.events_from_annotations to convert these to " - "events.") + raise ValueError( + "No stim channels found, but the raw object has " + "annotations. Consider using " + "mne.events_from_annotations to convert these to " + "events." + ) else: raise - picks = pick_channels(raw.info['ch_names'], include=stim_channel) + picks = pick_channels(raw.info["ch_names"], include=stim_channel) if len(picks) == 0: - raise ValueError('No stim channel found to extract event triggers.') + raise ValueError("No stim channel found to extract event triggers.") + logger.info(f"Finding events on: {', '.join(raw.ch_names[pick] for pick in picks)}") data, _ = raw[picks, :] events_list = [] - for d in data: - events = _find_events(d[np.newaxis, :], raw.first_samp, - verbose=verbose, output=output, - consecutive=consecutive, min_samples=min_samples, - mask=mask, uint_cast=uint_cast, - mask_type=mask_type, initial_event=initial_event) + for d, ch_name in zip(data, stim_channel): + events = _find_events( + d[np.newaxis, :], + raw.first_samp, + verbose=verbose, + output=output, + consecutive=consecutive, + min_samples=min_samples, + mask=mask, + uint_cast=uint_cast, + mask_type=mask_type, + initial_event=initial_event, + ch_name=ch_name, + ) # add safety check for spurious events (for ex. from neuromag syst.) by # checking the number of low sample events n_short_events = np.sum(np.diff(events[:, 0]) < shortest_event) if n_short_events > 0: - raise ValueError("You have %i events shorter than the " - "shortest_event. These are very unusual and you " - "may want to set min_duration to a larger value " - "e.g. x / raw.info['sfreq']. Where x = 1 sample " - "shorter than the shortest event " - "length." % (n_short_events)) + raise ValueError( + f"You have {n_short_events} events shorter than the shortest_event. " + "These are very unusual and you may want to set min_duration to a " + "larger value e.g. x / raw.info['sfreq']. Where x = 1 sample shorter " + "than the shortest event length." + ) events_list.append(events) @@ -729,7 +806,7 @@ def find_events(raw, stim_channel=None, output='onset', def _mask_trigs(events, mask, mask_type): """Mask digital trigger values.""" - _check_option('mask_type', mask_type, ['not_and', 'and']) + _check_option("mask_type", mask_type, ["not_and", "and"]) if mask is not None: _validate_type(mask, "int", "mask", "int or None") n_events = len(events) @@ -737,11 +814,13 @@ def _mask_trigs(events, mask, mask_type): return events.copy() if mask is not None: - if mask_type == 'not_and': + if mask_type == "not_and": mask = np.bitwise_not(mask) - elif mask_type != 'and': - raise ValueError("'mask_type' should be either 'and'" - " or 'not_and', instead of '%s'" % mask_type) + elif mask_type != "and": + raise ValueError( + "'mask_type' should be either 'and'" + f" or 'not_and', instead of '{mask_type}'" + ) events[:, 1:] = np.bitwise_and(events[:, 1:], mask) events = events[events[:, 1] != events[:, 2]] @@ -833,15 +912,22 @@ def shift_time_events(events, ids, tshift, sfreq): if ids is None: mask = slice(None) else: - mask = np.in1d(events[:, 2], ids) + mask = np.isin(events[:, 2], ids) events[mask, 0] += int(tshift * sfreq) return events @fill_doc -def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., - first_samp=True, overlap=0.): +def make_fixed_length_events( + raw, + id=1, # noqa: A002 + start=0, + stop=None, + duration=1.0, + first_samp=True, + overlap=0.0, +): """Make a set of :term:`events` separated by a fixed duration. Parameters @@ -873,15 +959,17 @@ def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., ------- %(events)s """ - from .io.base import BaseRaw + from .io import BaseRaw + _validate_type(raw, BaseRaw, "raw") - _validate_type(id, int, "id") + _validate_type(id, "int", "id") _validate_type(duration, "numeric", "duration") _validate_type(overlap, "numeric", "overlap") duration, overlap = float(duration), float(overlap) if not 0 <= overlap < duration: - raise ValueError('overlap must be >=0 but < duration (%s), got %s' - % (duration, overlap)) + raise ValueError( + f"overlap must be >=0 but < duration ({duration}), got {overlap}" + ) start = raw.time_as_index(start, use_rounding=True)[0] if stop is not None: @@ -894,16 +982,17 @@ def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., else: stop = min([stop, len(raw.times)]) # Make sure we don't go out the end of the file: - stop -= int(np.round(raw.info['sfreq'] * duration)) + stop -= int(np.round(raw.info["sfreq"] * duration)) # This should be inclusive due to how we generally use start and stop... - ts = np.arange(start, stop + 1, - raw.info['sfreq'] * (duration - overlap)).astype(int) + ts = np.arange(start, stop + 1, raw.info["sfreq"] * (duration - overlap)).astype( + int + ) n_events = len(ts) if n_events == 0: - raise ValueError('No events produced, check the values of start, ' - 'stop, and duration') - events = np.c_[ts, np.zeros(n_events, dtype=int), - id * np.ones(n_events, dtype=int)] + raise ValueError( + "No events produced, check the values of start, stop, and duration" + ) + events = np.c_[ts, np.zeros(n_events, dtype=int), id * np.ones(n_events, dtype=int)] return events @@ -934,10 +1023,10 @@ def concatenate_events(events, first_samps, last_samps): mne.concatenate_raws """ _validate_type(events, list, "events") - if not (len(events) == len(last_samps) and - len(events) == len(first_samps)): - raise ValueError('events, first_samps, and last_samps must all have ' - 'the same lengths') + if not (len(events) == len(last_samps) and len(events) == len(first_samps)): + raise ValueError( + "events, first_samps, and last_samps must all have the same lengths" + ) first_samps = np.array(first_samps) last_samps = np.array(last_samps) n_samps = np.cumsum(last_samps - first_samps + 1) @@ -954,7 +1043,7 @@ def concatenate_events(events, first_samps, last_samps): @fill_doc -class AcqParserFIF(object): +class AcqParserFIF: """Parser for Elekta data acquisition settings. This class parses parameters (e.g. events and averaging categories) that @@ -993,85 +1082,125 @@ class AcqParserFIF(object): """ # DACQ variables always start with one of these - _acq_var_magic = ['ERF', 'DEF', 'ACQ', 'TCP'] + _acq_var_magic = ["ERF", "DEF", "ACQ", "TCP"] # averager related DACQ variable names (without preceding 'ERF') # old versions (DACQ < 3.4) - _dacq_vars_compat = ('megMax', 'megMin', 'megNoise', 'megSlope', - 'megSpike', 'eegMax', 'eegMin', 'eegNoise', - 'eegSlope', 'eegSpike', 'eogMax', 'ecgMax', 'ncateg', - 'nevent', 'stimSource', 'triggerMap', 'update', - 'artefIgnore', 'averUpdate') - - _event_vars_compat = ('Comment', 'Delay') - - _cat_vars = ('Comment', 'Display', 'Start', 'State', 'End', 'Event', - 'Nave', 'ReqEvent', 'ReqWhen', 'ReqWithin', 'SubAve') + _dacq_vars_compat = ( + "megMax", + "megMin", + "megNoise", + "megSlope", + "megSpike", + "eegMax", + "eegMin", + "eegNoise", + "eegSlope", + "eegSpike", + "eogMax", + "ecgMax", + "ncateg", + "nevent", + "stimSource", + "triggerMap", + "update", + "artefIgnore", + "averUpdate", + ) + + _event_vars_compat = ("Comment", "Delay") + + _cat_vars = ( + "Comment", + "Display", + "Start", + "State", + "End", + "Event", + "Nave", + "ReqEvent", + "ReqWhen", + "ReqWithin", + "SubAve", + ) # new versions only (DACQ >= 3.4) - _dacq_vars = _dacq_vars_compat + ('magMax', 'magMin', 'magNoise', - 'magSlope', 'magSpike', 'version') - - _event_vars = _event_vars_compat + ('Name', 'Channel', 'NewBits', - 'OldBits', 'NewMask', 'OldMask') - - def __init__(self, info): # noqa: D102 - acq_pars = info['acq_pars'] + _dacq_vars = _dacq_vars_compat + ( + "magMax", + "magMin", + "magNoise", + "magSlope", + "magSpike", + "version", + ) + + _event_vars = _event_vars_compat + ( + "Name", + "Channel", + "NewBits", + "OldBits", + "NewMask", + "OldMask", + ) + + def __init__(self, info): + acq_pars = info["acq_pars"] if not acq_pars: - raise ValueError('No acquisition parameters') + raise ValueError("No acquisition parameters") self.acq_dict = dict(self._acqpars_gen(acq_pars)) - if 'ERFversion' in self.acq_dict: + if "ERFversion" in self.acq_dict: self.compat = False # DACQ ver >= 3.4 - elif 'ERFncateg' in self.acq_dict: # probably DACQ < 3.4 + elif "ERFncateg" in self.acq_dict: # probably DACQ < 3.4 self.compat = True else: - raise ValueError('Cannot parse acquisition parameters') + raise ValueError("Cannot parse acquisition parameters") dacq_vars = self._dacq_vars_compat if self.compat else self._dacq_vars # set instance variables for var in dacq_vars: - val = self.acq_dict['ERF' + var] - if var[:3] in ['mag', 'meg', 'eeg', 'eog', 'ecg']: + val = self.acq_dict["ERF" + var] + if var[:3] in ["mag", "meg", "eeg", "eog", "ecg"]: val = float(val) - elif var in ['ncateg', 'nevent']: + elif var in ["ncateg", "nevent"]: val = int(val) setattr(self, var.lower(), val) - self.stimsource = ( - 'Internal' if self.stimsource == '1' else 'External') + self.stimsource = "Internal" if self.stimsource == "1" else "External" # collect all events and categories self._events = self._events_from_acq_pars() self._categories = self._categories_from_acq_pars() # mark events that are used by a category for cat in self._categories.values(): - if cat['event']: - self._events[cat['event']]['in_use'] = True - if cat['reqevent']: - self._events[cat['reqevent']]['in_use'] = True + if cat["event"]: + self._events[cat["event"]]["in_use"] = True + if cat["reqevent"]: + self._events[cat["reqevent"]]["in_use"] = True # make mne rejection dicts based on the averager parameters - self.reject = {'grad': self.megmax, 'eeg': self.eegmax, - 'eog': self.eogmax, 'ecg': self.ecgmax} + self.reject = { + "grad": self.megmax, + "eeg": self.eegmax, + "eog": self.eogmax, + "ecg": self.ecgmax, + } if not self.compat: - self.reject['mag'] = self.magmax - self.reject = {k: float(v) for k, v in self.reject.items() - if float(v) > 0} - self.flat = {'grad': self.megmin, 'eeg': self.eegmin} + self.reject["mag"] = self.magmax + self.reject = {k: float(v) for k, v in self.reject.items() if float(v) > 0} + self.flat = {"grad": self.megmin, "eeg": self.eegmin} if not self.compat: - self.flat['mag'] = self.magmin - self.flat = {k: float(v) for k, v in self.flat.items() - if float(v) > 0} + self.flat["mag"] = self.magmin + self.flat = {k: float(v) for k, v in self.flat.items() if float(v) > 0} def __repr__(self): # noqa: D105 - s = ' bits for old DACQ versions - _compat_event_lookup = {1: 1, 2: 2, 3: 4, 4: 8, 5: 16, 6: 32, 7: 3, - 8: 5, 9: 6, 10: 7, 11: 9, 12: 10, 13: 11, - 14: 12, 15: 13, 16: 14, 17: 15} + _compat_event_lookup = { + 1: 1, + 2: 2, + 3: 4, + 4: 8, + 5: 16, + 6: 32, + 7: 3, + 8: 5, + 9: 6, + 10: 7, + 11: 9, + 12: 10, + 13: 11, + 14: 12, + 15: 13, + 16: 14, + 17: 15, + } events = dict() for evnum in range(1, self.nevent + 1): evnum_s = str(evnum).zfill(2) # '01', '02' etc. evdi = dict() - event_vars = (self._event_vars_compat if self.compat - else self._event_vars) + event_vars = self._event_vars_compat if self.compat else self._event_vars for var in event_vars: # name of DACQ variable, e.g. 'ERFeventNewBits01' - acq_key = 'ERFevent' + var + evnum_s + acq_key = "ERFevent" + var + evnum_s # corresponding dict key, e.g. 'newbits' dict_key = var.lower() val = self.acq_dict[acq_key] # type convert numeric values - if dict_key in ['newbits', 'oldbits', 'newmask', 'oldmask']: + if dict_key in ["newbits", "oldbits", "newmask", "oldmask"]: val = int(val) - elif dict_key in ['delay']: + elif dict_key in ["delay"]: val = float(val) evdi[dict_key] = val - evdi['in_use'] = False # __init__() will set this - evdi['index'] = evnum + evdi["in_use"] = False # __init__() will set this + evdi["index"] = evnum if self.compat: - evdi['name'] = str(evnum) - evdi['oldmask'] = 63 - evdi['newmask'] = 63 - evdi['oldbits'] = 0 - evdi['newbits'] = _compat_event_lookup[evnum] + evdi["name"] = str(evnum) + evdi["oldmask"] = 63 + evdi["newmask"] = 63 + evdi["oldbits"] = 0 + evdi["newbits"] = _compat_event_lookup[evnum] events[evnum] = evdi return events def _acqpars_gen(self, acq_pars): """Yield key/value pairs from ``info['acq_pars'])``.""" - key, val = '', '' + key, val = "", "" for line in acq_pars.split(): if any([line.startswith(x) for x in self._acq_var_magic]): key = line - val = '' + val = "" else: if not key: - raise ValueError('Cannot parse acquisition parameters') + raise ValueError("Cannot parse acquisition parameters") # DACQ splits items with spaces into multiple lines - val += ' ' + line if val else line + val += " " + line if val else line yield key, val def _categories_from_acq_pars(self): @@ -1209,20 +1353,20 @@ def _categories_from_acq_pars(self): catdi = dict() # read all category variables for var in self._cat_vars: - acq_key = 'ERFcat' + var + catnum + acq_key = "ERFcat" + var + catnum class_key = var.lower() val = self.acq_dict[acq_key] catdi[class_key] = val # some type conversions - catdi['display'] = (catdi['display'] == '1') - catdi['state'] = (catdi['state'] == '1') - for key in ['start', 'end', 'reqwithin']: + catdi["display"] = catdi["display"] == "1" + catdi["state"] = catdi["state"] == "1" + for key in ["start", "end", "reqwithin"]: catdi[key] = float(catdi[key]) - for key in ['nave', 'event', 'reqevent', 'reqwhen', 'subave']: + for key in ["nave", "event", "reqevent", "reqwhen", "subave"]: catdi[key] = int(catdi[key]) # some convenient extra (non-DACQ) vars - catdi['index'] = int(catnum) # index of category in DACQ list - cats[catdi['comment']] = catdi + catdi["index"] = int(catnum) # index of category in DACQ list + cats[catdi["comment"]] = catdi return cats def _events_mne_to_dacq(self, mne_events): @@ -1238,13 +1382,13 @@ def _events_mne_to_dacq(self, mne_events): events_ = mne_events.copy() events_[:, 1:3] = 0 for n, ev in self._events.items(): - if ev['in_use']: + if ev["in_use"]: pre_ok = ( - np.bitwise_and(ev['oldmask'], - mne_events[:, 1]) == ev['oldbits']) + np.bitwise_and(ev["oldmask"], mne_events[:, 1]) == ev["oldbits"] + ) post_ok = ( - np.bitwise_and(ev['newmask'], - mne_events[:, 2]) == ev['newbits']) + np.bitwise_and(ev["newmask"], mne_events[:, 2]) == ev["newbits"] + ) ok_ind = np.where(pre_ok & post_ok) events_[ok_ind, 2] |= 1 << (n - 1) return events_ @@ -1256,8 +1400,8 @@ def _mne_events_to_category_t0(self, cat, mne_events, sfreq): Then the zero times for the epochs are obtained by considering the reference and conditional (required) events and the delay to stimulus. """ - cat_ev = cat['event'] - cat_reqev = cat['reqevent'] + cat_ev = cat["event"] + cat_reqev = cat["reqevent"] # first convert mne events to dacq event list events = self._events_mne_to_dacq(mne_events) # next, take req. events and delays into account @@ -1267,25 +1411,25 @@ def _mne_events_to_category_t0(self, cat, mne_events, sfreq): refEvents_t = times[refEvents_inds] if cat_reqev: # indices of times where req. event occurs - reqEvents_inds = np.where(events[:, 2] & ( - 1 << cat_reqev - 1))[0] + reqEvents_inds = np.where(events[:, 2] & (1 << cat_reqev - 1))[0] reqEvents_t = times[reqEvents_inds] # relative (to refevent) time window where req. event # must occur (e.g. [0 .2]) - twin = [0, (-1)**(cat['reqwhen']) * cat['reqwithin']] + twin = [0, (-1) ** (cat["reqwhen"]) * cat["reqwithin"]] win = np.round(np.array(sorted(twin)) * sfreq) # to samples refEvents_wins = refEvents_t[:, None] + win req_acc = np.zeros(refEvents_inds.shape, dtype=bool) for t in reqEvents_t: # mark time windows where req. condition is satisfied reqEvent_in_win = np.logical_and( - t >= refEvents_wins[:, 0], t <= refEvents_wins[:, 1]) + t >= refEvents_wins[:, 0], t <= refEvents_wins[:, 1] + ) req_acc |= reqEvent_in_win # drop ref. events where req. event condition is not satisfied refEvents_inds = refEvents_inds[np.where(req_acc)] refEvents_t = times[refEvents_inds] # adjust for trigger-stimulus delay by delaying the ref. event - refEvents_t += int(np.round(self._events[cat_ev]['delay'] * sfreq)) + refEvents_t += int(np.round(self._events[cat_ev]["delay"] * sfreq)) return refEvents_t @property @@ -1294,8 +1438,7 @@ def categories(self): Only returns categories marked active in DACQ. """ - cats = sorted(self._categories_in_use.values(), - key=lambda cat: cat['index']) + cats = sorted(self._categories_in_use.values(), key=lambda cat: cat["index"]) return cats @property @@ -1304,19 +1447,27 @@ def events(self): Only returns events that are in use (referred to by a category). """ - evs = sorted(self._events_in_use.values(), key=lambda ev: ev['index']) + evs = sorted(self._events_in_use.values(), key=lambda ev: ev["index"]) return evs @property def _categories_in_use(self): - return {k: v for k, v in self._categories.items() if v['state']} + return {k: v for k, v in self._categories.items() if v["state"]} @property def _events_in_use(self): - return {k: v for k, v in self._events.items() if v['in_use']} - - def get_condition(self, raw, condition=None, stim_channel=None, mask=None, - uint_cast=None, mask_type='and', delayed_lookup=True): + return {k: v for k, v in self._events.items() if v["in_use"]} + + def get_condition( + self, + raw, + condition=None, + stim_channel=None, + mask=None, + uint_cast=None, + mask_type="and", + delayed_lookup=True, + ): """Get averaging parameters for a condition (averaging category). Output is designed to be used with the Epochs class to extract the @@ -1388,35 +1539,45 @@ def get_condition(self, raw, condition=None, stim_channel=None, mask=None, for cat in condition: if isinstance(cat, str): cat = self[cat] - mne_events = find_events(raw, stim_channel=stim_channel, mask=mask, - mask_type=mask_type, output='step', - uint_cast=uint_cast, consecutive=True, - verbose=False, shortest_event=1) + mne_events = find_events( + raw, + stim_channel=stim_channel, + mask=mask, + mask_type=mask_type, + output="step", + uint_cast=uint_cast, + consecutive=True, + verbose=False, + shortest_event=1, + ) if delayed_lookup: ind = np.where(np.diff(mne_events[:, 0]) == 1)[0] if 1 in np.diff(ind): - raise ValueError('There are several subsequent ' - 'transitions on the trigger channel. ' - 'This will not work well with ' - 'delayed_lookup=True. You may want to ' - 'check your trigger data and ' - 'set delayed_lookup=False.') + raise ValueError( + "There are several subsequent " + "transitions on the trigger channel. " + "This will not work well with " + "delayed_lookup=True. You may want to " + "check your trigger data and " + "set delayed_lookup=False." + ) mne_events[ind, 2] = mne_events[ind + 1, 2] mne_events = np.delete(mne_events, ind + 1, axis=0) - sfreq = raw.info['sfreq'] + sfreq = raw.info["sfreq"] cat_t0_ = self._mne_events_to_category_t0(cat, mne_events, sfreq) # make it compatible with the usual events array - cat_t0 = np.c_[cat_t0_, np.zeros(cat_t0_.shape), - cat['index'] * np.ones(cat_t0_.shape) - ].astype(np.uint32) - cat_id = {cat['comment']: cat['index']} - tmin, tmax = cat['start'], cat['end'] - conds_data.append(dict(events=cat_t0, event_id=cat_id, - tmin=tmin, tmax=tmax)) + cat_t0 = np.c_[ + cat_t0_, np.zeros(cat_t0_.shape), cat["index"] * np.ones(cat_t0_.shape) + ].astype(np.uint32) + cat_id = {cat["comment"]: cat["index"]} + tmin, tmax = cat["start"], cat["end"] + conds_data.append( + dict(events=cat_t0, event_id=cat_id, tmin=tmin, tmax=tmax) + ) return conds_data[0] if len(conds_data) == 1 else conds_data -def match_event_names(event_names, keys, *, on_missing='raise'): +def match_event_names(event_names, keys, *, on_missing="raise"): """Search a collection of event names for matching (sub-)groups of events. This function is particularly helpful when using grouped event names @@ -1467,10 +1628,7 @@ def match_event_names(event_names, keys, *, on_missing='raise'): event_names = list(event_names) # ensure we have a list of `keys` - if ( - isinstance(keys, (Sequence, np.ndarray)) and - not isinstance(keys, str) - ): + if isinstance(keys, Sequence | np.ndarray) and not isinstance(keys, str): keys = list(keys) else: keys = [keys] @@ -1480,19 +1638,20 @@ def match_event_names(event_names, keys, *, on_missing='raise'): # form the hierarchical event name mapping for key in keys: if not isinstance(key, str): - raise ValueError(f'keys must be strings, got {type(key)} ({key})') + raise ValueError(f"keys must be strings, got {type(key)} ({key})") matches.extend( - name for name in event_names - if set(key.split('/')).issubset(name.split('/')) + name + for name in event_names + if set(key.split("/")).issubset(name.split("/")) ) if not matches: _on_missing( on_missing=on_missing, msg=f'Event name "{key}" could not be found. The following events ' - f'are present in the data: {", ".join(event_names)}', - error_klass=KeyError + f"are present in the data: {', '.join(event_names)}", + error_klass=KeyError, ) matches = sorted(set(matches)) # deduplicate if necessary @@ -1527,7 +1686,7 @@ def count_events(events, ids=None): {1: 2, 11: 0} """ counts = np.bincount(events[:, 2]) - counts = {i: count for i, count in enumerate(counts) if count > 0} + counts = {i: int(count) for i, count in enumerate(counts) if count > 0} if ids is not None: - return {id: counts.get(id, 0) for id in ids} + counts = {id_: counts.get(id_, 0) for id_ in ids} return counts diff --git a/mne/evoked.py b/mne/evoked.py index 1db3de6bcd7..9a13f5229f6 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -1,71 +1,111 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Denis Engemann -# Andrew Dykstra -# Mads Jensen -# Jona Sassenhagen -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations # only needed for Python ≤ 3.9 from copy import deepcopy +from inspect import getfullargspec +from pathlib import Path import numpy as np -from .baseline import rescale, _log_rescale, _check_baseline -from .channels.channels import (UpdateChannelsMixin, - SetChannelsMixin, InterpolationMixin) +from ._fiff.constants import FIFF +from ._fiff.meas_info import ( + ContainsMixin, + SetChannelsMixin, + _ensure_infos_match, + _read_extended_ch_info, + _rename_list, + read_meas_info, + write_meas_info, +) +from ._fiff.open import fiff_open +from ._fiff.pick import _FNIRS_CH_TYPES_SPLIT, _picks_to_idx, pick_types +from ._fiff.proj import ProjMixin +from ._fiff.tag import read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import ( + end_block, + start_and_end_file, + start_block, + write_complex_float_matrix, + write_float, + write_float_matrix, + write_id, + write_int, + write_string, +) +from .baseline import _check_baseline, _log_rescale, rescale +from .channels.channels import InterpolationMixin, ReferenceMixin, UpdateChannelsMixin from .channels.layout import _merge_ch_data, _pair_grad_sensors -from .defaults import (_INTERPOLATION_DEFAULT, _EXTRAPOLATE_DEFAULT, - _BORDER_DEFAULT) -from .filter import detrend, FilterMixin, _check_fun -from .utils import (check_fname, logger, verbose, warn, sizeof_fmt, repr_html, - SizeMixin, copy_function_doc_to_method_doc, _validate_type, - fill_doc, _check_option, _build_data_frame, - _check_pandas_installed, _check_pandas_index_arguments, - _convert_times, _scale_dataframe_data, _check_time_format, - _check_preload, _check_fname, TimeMixin) -from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field, - plot_evoked_image, plot_evoked_topo) -from .viz.evoked import plot_evoked_white, plot_evoked_joint -from .viz.topomap import _topomap_animation - -from .io.constants import FIFF -from .io.open import fiff_open -from .io.tag import read_tag -from .io.tree import dir_tree_find -from .io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT -from .io.meas_info import (ContainsMixin, read_meas_info, write_meas_info, - _read_extended_ch_info, _rename_list, - _ensure_infos_match) -from .io.proj import ProjMixin -from .io.write import (start_and_end_file, start_block, end_block, - write_int, write_string, write_float_matrix, - write_id, write_float, write_complex_float_matrix) -from .io.base import _check_maxshield, _get_ch_factors +from .defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT +from .filter import FilterMixin, _check_fun, detrend +from .html_templates import _get_html_template from .parallel import parallel_func from .time_frequency.spectrum import Spectrum, SpectrumMixin, _validate_method +from .time_frequency.tfr import AverageTFR +from .utils import ( + ExtendedTimeMixin, + SizeMixin, + _build_data_frame, + _check_fname, + _check_option, + _check_pandas_index_arguments, + _check_pandas_installed, + _check_preload, + _check_time_format, + _convert_times, + _scale_dataframe_data, + _validate_type, + check_fname, + copy_function_doc_to_method_doc, + fill_doc, + logger, + repr_html, + sizeof_fmt, + verbose, + warn, +) +from .viz import ( + plot_evoked, + plot_evoked_field, + plot_evoked_image, + plot_evoked_topo, + plot_evoked_topomap, +) +from .viz.evoked import plot_evoked_joint, plot_evoked_white +from .viz.topomap import _topomap_animation _aspect_dict = { - 'average': FIFF.FIFFV_ASPECT_AVERAGE, - 'standard_error': FIFF.FIFFV_ASPECT_STD_ERR, - 'single_epoch': FIFF.FIFFV_ASPECT_SINGLE, - 'partial_average': FIFF.FIFFV_ASPECT_SUBAVERAGE, - 'alternating_subaverage': FIFF.FIFFV_ASPECT_ALTAVERAGE, - 'sample_cut_out_by_graph': FIFF.FIFFV_ASPECT_SAMPLE, - 'power_density_spectrum': FIFF.FIFFV_ASPECT_POWER_DENSITY, - 'dipole_amplitude_cuvre': FIFF.FIFFV_ASPECT_DIPOLE_WAVE, - 'squid_modulation_lower_bound': FIFF.FIFFV_ASPECT_IFII_LOW, - 'squid_modulation_upper_bound': FIFF.FIFFV_ASPECT_IFII_HIGH, - 'squid_gate_setting': FIFF.FIFFV_ASPECT_GATE, + "average": FIFF.FIFFV_ASPECT_AVERAGE, + "standard_error": FIFF.FIFFV_ASPECT_STD_ERR, + "single_epoch": FIFF.FIFFV_ASPECT_SINGLE, + "partial_average": FIFF.FIFFV_ASPECT_SUBAVERAGE, + "alternating_subaverage": FIFF.FIFFV_ASPECT_ALTAVERAGE, + "sample_cut_out_by_graph": FIFF.FIFFV_ASPECT_SAMPLE, + "power_density_spectrum": FIFF.FIFFV_ASPECT_POWER_DENSITY, + "dipole_amplitude_cuvre": FIFF.FIFFV_ASPECT_DIPOLE_WAVE, + "squid_modulation_lower_bound": FIFF.FIFFV_ASPECT_IFII_LOW, + "squid_modulation_upper_bound": FIFF.FIFFV_ASPECT_IFII_HIGH, + "squid_gate_setting": FIFF.FIFFV_ASPECT_GATE, } _aspect_rev = {val: key for key, val in _aspect_dict.items()} @fill_doc -class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, - InterpolationMixin, FilterMixin, TimeMixin, SizeMixin, - SpectrumMixin): +class Evoked( + ProjMixin, + ContainsMixin, + UpdateChannelsMixin, + ReferenceMixin, + SetChannelsMixin, + InterpolationMixin, + FilterMixin, + ExtendedTimeMixin, + SizeMixin, + SpectrumMixin, +): """Evoked data. Parameters @@ -124,17 +164,28 @@ class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, """ @verbose - def __init__(self, fname, condition=None, proj=True, - kind='average', allow_maxshield=False, *, - verbose=None): # noqa: D102 + def __init__( + self, + fname, + condition=None, + proj=True, + kind="average", + allow_maxshield=False, + *, + verbose=None, + ): _validate_type(proj, bool, "'proj'") # Read the requested data - fname = str( - _check_fname(fname=fname, must_exist=True, overwrite="read") - ) - self.info, self.nave, self._aspect_kind, self.comment, times, \ - self.data, self.baseline = _read_evoked(fname, condition, kind, - allow_maxshield) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") + ( + self.info, + self.nave, + self._aspect_kind, + self.comment, + times, + self.data, + self.baseline, + ) = _read_evoked(fname, condition, kind, allow_maxshield) self._set_times(times) self._raw_times = self.times.copy() self._decim = 1 @@ -146,6 +197,18 @@ def __init__(self, fname, condition=None, proj=True, self.apply_proj() self.filename = fname + @property + def filename(self) -> Path | None: + """The filename of the evoked object, if it exists. + + :type: :class:`~pathlib.Path` | None + """ + return self._filename + + @filename.setter + def filename(self, value): + self._filename = Path(value) if value is not None else value + @property def kind(self): """The data kind.""" @@ -153,7 +216,7 @@ def kind(self): @kind.setter def kind(self, kind): - _check_option('kind', kind, list(_aspect_dict.keys())) + _check_option("kind", kind, list(_aspect_dict.keys())) self._aspect_kind = _aspect_dict[kind] @property @@ -188,6 +251,9 @@ def get_data(self, picks=None, units=None, tmin=None, tmax=None): ----- .. versionadded:: 0.24 """ + # Avoid circular import + from .io.base import _get_ch_factors + picks = _picks_to_idx(self.info, picks, "all", exclude=()) start, stop = self._handle_tmin_tmax(tmin, tmax) @@ -201,8 +267,17 @@ def get_data(self, picks=None, units=None, tmin=None, tmax=None): return data @verbose - def apply_function(self, fun, picks=None, dtype=None, n_jobs=None, - verbose=None, **kwargs): + def apply_function( + self, + fun, + picks=None, + dtype=None, + n_jobs=None, + channel_wise=True, + *, + verbose=None, + **kwargs, + ): """Apply a function to a subset of channels. %(applyfun_summary_evoked)s @@ -214,6 +289,9 @@ def apply_function(self, fun, picks=None, dtype=None, n_jobs=None, %(dtype_applyfun)s %(n_jobs)s Ignored if ``channel_wise=False`` as the workload is split across channels. + %(channel_wise_applyfun)s + + .. versionadded:: 1.6 %(verbose)s %(kwargs_fun)s @@ -222,30 +300,65 @@ def apply_function(self, fun, picks=None, dtype=None, n_jobs=None, self : instance of Evoked The evoked object with transformed data. """ - _check_preload(self, 'evoked.apply_function') + _check_preload(self, "evoked.apply_function") picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) if not callable(fun): - raise ValueError('fun needs to be a function') + raise ValueError("fun needs to be a function") data_in = self._data if dtype is not None and dtype != self._data.dtype: self._data = self._data.astype(dtype) - # check the dimension of the incoming evoked data - _check_option('evoked.ndim', self._data.ndim, [2]) + args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs + if channel_wise is False: + if ("ch_idx" in args) or ("ch_name" in args): + raise ValueError( + "apply_function cannot access ch_idx or ch_name " + "when channel_wise=False" + ) + if "ch_idx" in args: + logger.info("apply_function requested to access ch_idx") + if "ch_name" in args: + logger.info("apply_function requested to access ch_name") - parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) - if n_jobs == 1: - # modify data inplace to save memory - for idx in picks: - self._data[idx, :] = _check_fun(fun, data_in[idx, :], **kwargs) + # check the dimension of the incoming evoked data + _check_option("evoked.ndim", self._data.ndim, [2]) + + if channel_wise: + parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) + if n_jobs == 1: + # modify data inplace to save memory + for ch_idx in picks: + if "ch_idx" in args: + kwargs.update(ch_idx=ch_idx) + if "ch_name" in args: + kwargs.update(ch_name=self.info["ch_names"][ch_idx]) + self._data[ch_idx, :] = _check_fun( + fun, data_in[ch_idx, :], **kwargs + ) + else: + # use parallel function + data_picks_new = parallel( + p_fun( + fun, + data_in[ch_idx, :], + **kwargs, + **{ + k: v + for k, v in [ + ("ch_name", self.info["ch_names"][ch_idx]), + ("ch_idx", ch_idx), + ] + if k in args + }, + ) + for ch_idx in picks + ) + for run_idx, ch_idx in enumerate(picks): + self._data[ch_idx, :] = data_picks_new[run_idx] else: - # use parallel function - data_picks_new = parallel(p_fun( - fun, data_in[p, :], **kwargs) for p in picks) - for pp, p in enumerate(picks): - self._data[p, :] = data_picks_new[pp] + self._data[picks, :] = _check_fun(fun, data_in[picks, :], **kwargs) return self @@ -271,11 +384,12 @@ def apply_baseline(self, baseline=(None, 0), *, verbose=None): .. versionadded:: 0.13.0 """ - baseline = _check_baseline(baseline, times=self.times, - sfreq=self.info['sfreq']) + baseline = _check_baseline(baseline, times=self.times, sfreq=self.info["sfreq"]) if self.baseline is not None and baseline is None: - raise ValueError('The data has already been baseline-corrected. ' - 'Cannot remove existing baseline correction.') + raise ValueError( + "The data has already been baseline-corrected. " + "Cannot remove existing baseline correction." + ) elif baseline is None: # Do not rescale logger.info(_log_rescale(None)) @@ -310,7 +424,7 @@ def save(self, fname, *, overwrite=False, verbose=None): write_evokeds(fname, self, overwrite=overwrite) @verbose - def export(self, fname, fmt='auto', *, overwrite=False, verbose=None): + def export(self, fname, fmt="auto", *, overwrite=False, verbose=None): """Export Evoked to external formats. %(export_fmt_support_evoked)s @@ -331,6 +445,7 @@ def export(self, fname, fmt='auto', *, overwrite=False, verbose=None): %(export_warning_note_evoked)s """ from .export import export_evokeds + export_evokeds(fname, self, fmt, overwrite=overwrite, verbose=verbose) def __repr__(self): # noqa: D105 @@ -340,140 +455,362 @@ def __repr__(self): # noqa: D105 comment += "..." else: comment = self.comment - s = "'%s' (%s, N=%s)" % (comment, self.kind, self.nave) - s += ", %0.5g – %0.5g s" % (self.times[0], self.times[-1]) - s += ', baseline ' + s = f"'{comment}' ({self.kind}, N={self.nave})" + s += f", {self.times[0]:0.5g} – {self.times[-1]:0.5g} s" + s += ", baseline " if self.baseline is None: - s += 'off' + s += "off" else: - s += f'{self.baseline[0]:g} – {self.baseline[1]:g} s' + s += f"{self.baseline[0]:g} – {self.baseline[1]:g} s" if self.baseline != _check_baseline( - self.baseline, times=self.times, sfreq=self.info['sfreq'], - on_baseline_outside_data='adjust'): - s += ' (baseline period was cropped after baseline correction)' - s += ", %s ch" % self.data.shape[0] - s += ", ~%s" % (sizeof_fmt(self._size),) - return "" % s + self.baseline, + times=self.times, + sfreq=self.info["sfreq"], + on_baseline_outside_data="adjust", + ): + s += " (baseline period was cropped after baseline correction)" + s += f", {self.data.shape[0]} ch" + s += f", ~{sizeof_fmt(self._size)}" + return f"" @repr_html def _repr_html_(self): - from .html_templates import repr_templates_env - if self.baseline is None: - baseline = 'off' - else: - baseline = tuple([f'{b:.3f}' for b in self.baseline]) - baseline = f'{baseline[0]} – {baseline[1]} s' - - t = repr_templates_env.get_template('evoked.html.jinja') - t = t.render(evoked=self, baseline=baseline) + t = _get_html_template("repr", "evoked.html.jinja") + t = t.render( + inst=self, + filenames=( + [Path(self.filename).name] + if getattr(self, "filename", None) is not None + else None + ), + ) return t @property def ch_names(self): """Channel names.""" - return self.info['ch_names'] + return self.info["ch_names"] @copy_function_doc_to_method_doc(plot_evoked) - def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None, - xlim='tight', proj=False, hline=None, units=None, scalings=None, - titles=None, axes=None, gfp=False, window_title=None, - spatial_colors='auto', zorder='unsorted', selectable=True, - noise_cov=None, time_unit='s', sphere=None, *, highlight=None, - verbose=None): + def plot( + self, + picks=None, + exclude="bads", + unit=True, + show=True, + ylim=None, + xlim="tight", + proj=False, + hline=None, + units=None, + scalings=None, + titles=None, + axes=None, + gfp=False, + window_title=None, + spatial_colors="auto", + zorder="unsorted", + selectable=True, + noise_cov=None, + time_unit="s", + sphere=None, + *, + highlight=None, + verbose=None, + ): return plot_evoked( - self, picks=picks, exclude=exclude, unit=unit, show=show, - ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units, - scalings=scalings, titles=titles, axes=axes, gfp=gfp, - window_title=window_title, spatial_colors=spatial_colors, - zorder=zorder, selectable=selectable, noise_cov=noise_cov, - time_unit=time_unit, sphere=sphere, highlight=highlight, - verbose=verbose) + self, + picks=picks, + exclude=exclude, + unit=unit, + show=show, + ylim=ylim, + proj=proj, + xlim=xlim, + hline=hline, + units=units, + scalings=scalings, + titles=titles, + axes=axes, + gfp=gfp, + window_title=window_title, + spatial_colors=spatial_colors, + zorder=zorder, + selectable=selectable, + noise_cov=noise_cov, + time_unit=time_unit, + sphere=sphere, + highlight=highlight, + verbose=verbose, + ) @copy_function_doc_to_method_doc(plot_evoked_image) - def plot_image(self, picks=None, exclude='bads', unit=True, show=True, - clim=None, xlim='tight', proj=False, units=None, - scalings=None, titles=None, axes=None, cmap='RdBu_r', - colorbar=True, mask=None, mask_style=None, - mask_cmap='Greys', mask_alpha=.25, time_unit='s', - show_names=None, group_by=None, sphere=None): + def plot_image( + self, + picks=None, + exclude="bads", + unit=True, + show=True, + clim=None, + xlim="tight", + proj=False, + units=None, + scalings=None, + titles=None, + axes=None, + cmap="RdBu_r", + colorbar=True, + mask=None, + mask_style=None, + mask_cmap="Greys", + mask_alpha=0.25, + time_unit="s", + show_names=None, + group_by=None, + sphere=None, + ): return plot_evoked_image( - self, picks=picks, exclude=exclude, unit=unit, show=show, - clim=clim, xlim=xlim, proj=proj, units=units, scalings=scalings, - titles=titles, axes=axes, cmap=cmap, colorbar=colorbar, mask=mask, - mask_style=mask_style, mask_cmap=mask_cmap, mask_alpha=mask_alpha, - time_unit=time_unit, show_names=show_names, group_by=group_by, - sphere=sphere) + self, + picks=picks, + exclude=exclude, + unit=unit, + show=show, + clim=clim, + xlim=xlim, + proj=proj, + units=units, + scalings=scalings, + titles=titles, + axes=axes, + cmap=cmap, + colorbar=colorbar, + mask=mask, + mask_style=mask_style, + mask_cmap=mask_cmap, + mask_alpha=mask_alpha, + time_unit=time_unit, + show_names=show_names, + group_by=group_by, + sphere=sphere, + ) @copy_function_doc_to_method_doc(plot_evoked_topo) - def plot_topo(self, layout=None, layout_scale=0.945, color=None, - border='none', ylim=None, scalings=None, title=None, - proj=False, vline=[0.0], fig_background=None, - merge_grads=False, legend=True, axes=None, - background_color='w', noise_cov=None, exclude='bads', - show=True): - """ + def plot_topo( + self, + layout=None, + layout_scale=0.945, + color=None, + border="none", + ylim=None, + scalings=None, + title=None, + proj=False, + vline=(0.0,), + fig_background=None, + merge_grads=False, + legend=True, + axes=None, + background_color="w", + noise_cov=None, + exclude="bads", + select=False, + show=True, + ): + """. + Notes ----- .. versionadded:: 0.10.0 """ return plot_evoked_topo( - self, layout=layout, layout_scale=layout_scale, - color=color, border=border, ylim=ylim, scalings=scalings, - title=title, proj=proj, vline=vline, fig_background=fig_background, - merge_grads=merge_grads, legend=legend, axes=axes, - background_color=background_color, noise_cov=noise_cov, - exclude=exclude, show=show) + self, + layout=layout, + layout_scale=layout_scale, + color=color, + border=border, + ylim=ylim, + scalings=scalings, + title=title, + proj=proj, + vline=vline, + fig_background=fig_background, + merge_grads=merge_grads, + legend=legend, + axes=axes, + background_color=background_color, + noise_cov=noise_cov, + exclude=exclude, + select=select, + show=show, + ) @copy_function_doc_to_method_doc(plot_evoked_topomap) def plot_topomap( - self, times="auto", *, average=None, ch_type=None, scalings=None, - proj=False, sensors=True, show_names=False, mask=None, - mask_params=None, contours=6, outlines='head', sphere=None, - image_interp=_INTERPOLATION_DEFAULT, - extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT, res=64, - size=1, cmap=None, vlim=(None, None), cnorm=None, colorbar=True, - cbar_fmt='%3.1f', units=None, axes=None, time_unit='s', - time_format=None, nrows=1, ncols='auto', show=True): + self, + times="auto", + *, + average=None, + ch_type=None, + scalings=None, + proj=False, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + time_unit="s", + time_format=None, + nrows=1, + ncols="auto", + show=True, + ): return plot_evoked_topomap( - self, times=times, ch_type=ch_type, vlim=vlim, cmap=cmap, - cnorm=cnorm, sensors=sensors, colorbar=colorbar, scalings=scalings, - units=units, res=res, size=size, cbar_fmt=cbar_fmt, - time_unit=time_unit, time_format=time_format, proj=proj, show=show, - show_names=show_names, mask=mask, mask_params=mask_params, - outlines=outlines, contours=contours, image_interp=image_interp, - average=average, axes=axes, extrapolate=extrapolate, sphere=sphere, - border=border, nrows=nrows, ncols=ncols) + self, + times=times, + ch_type=ch_type, + vlim=vlim, + cmap=cmap, + cnorm=cnorm, + sensors=sensors, + colorbar=colorbar, + scalings=scalings, + units=units, + res=res, + size=size, + cbar_fmt=cbar_fmt, + time_unit=time_unit, + time_format=time_format, + proj=proj, + show=show, + show_names=show_names, + mask=mask, + mask_params=mask_params, + outlines=outlines, + contours=contours, + image_interp=image_interp, + average=average, + axes=axes, + extrapolate=extrapolate, + sphere=sphere, + border=border, + nrows=nrows, + ncols=ncols, + ) @copy_function_doc_to_method_doc(plot_evoked_field) - def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms', - n_jobs=None, fig=None, vmax=None, n_contours=21, - *, interaction='http://github.com/terrain', verbose=None): - return plot_evoked_field(self, surf_maps, time=time, - time_label=time_label, n_jobs=n_jobs, - fig=fig, vmax=vmax, n_contours=n_contours, - interaction=interaction, verbose=verbose) + def plot_field( + self, + surf_maps, + time=None, + time_label="t = %0.0f ms", + n_jobs=None, + fig=None, + vmax=None, + n_contours=21, + *, + show_density=True, + alpha=None, + interpolation="nearest", + interaction="http://github.com/terrain", + time_viewer="auto", + verbose=None, + ): + return plot_evoked_field( + self, + surf_maps, + time=time, + time_label=time_label, + n_jobs=n_jobs, + fig=fig, + vmax=vmax, + n_contours=n_contours, + show_density=show_density, + alpha=alpha, + interpolation=interpolation, + interaction=interaction, + time_viewer=time_viewer, + verbose=verbose, + ) @copy_function_doc_to_method_doc(plot_evoked_white) - def plot_white(self, noise_cov, show=True, rank=None, time_unit='s', - sphere=None, axes=None, verbose=None): + def plot_white( + self, + noise_cov, + show=True, + rank=None, + time_unit="s", + sphere=None, + axes=None, + *, + spatial_colors="auto", + verbose=None, + ): return plot_evoked_white( - self, noise_cov=noise_cov, rank=rank, show=show, - time_unit=time_unit, sphere=sphere, axes=axes, verbose=verbose) + self, + noise_cov=noise_cov, + rank=rank, + show=show, + time_unit=time_unit, + sphere=sphere, + axes=axes, + spatial_colors=spatial_colors, + verbose=verbose, + ) @copy_function_doc_to_method_doc(plot_evoked_joint) - def plot_joint(self, times="peaks", title='', picks=None, - exclude='bads', show=True, ts_args=None, - topomap_args=None): - return plot_evoked_joint(self, times=times, title=title, picks=picks, - exclude=exclude, show=show, ts_args=ts_args, - topomap_args=topomap_args) + def plot_joint( + self, + times="peaks", + title="", + picks=None, + exclude="bads", + show=True, + ts_args=None, + topomap_args=None, + ): + return plot_evoked_joint( + self, + times=times, + title=title, + picks=picks, + exclude=exclude, + show=show, + ts_args=ts_args, + topomap_args=topomap_args, + ) @fill_doc - def animate_topomap(self, ch_type=None, times=None, frame_rate=None, - butterfly=False, blit=True, show=True, time_unit='s', - sphere=None, *, image_interp=_INTERPOLATION_DEFAULT, - extrapolate=_EXTRAPOLATE_DEFAULT, vmin=None, vmax=None, - verbose=None): + def animate_topomap( + self, + ch_type=None, + times=None, + frame_rate=None, + butterfly=False, + blit=True, + show=True, + time_unit="s", + sphere=None, + *, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + vmin=None, + vmax=None, + verbose=None, + ): """Make animation of evoked data as topomap timeseries. The animation can be paused/resumed with left mouse button. @@ -531,12 +868,23 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, .. versionadded:: 0.12.0 """ return _topomap_animation( - self, ch_type=ch_type, times=times, frame_rate=frame_rate, - butterfly=butterfly, blit=blit, show=show, time_unit=time_unit, - sphere=sphere, image_interp=image_interp, - extrapolate=extrapolate, vmin=vmin, vmax=vmax, verbose=verbose) + self, + ch_type=ch_type, + times=times, + frame_rate=frame_rate, + butterfly=butterfly, + blit=blit, + show=show, + time_unit=time_unit, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + vmin=vmin, + vmax=vmax, + verbose=verbose, + ) - def as_type(self, ch_type='grad', mode='fast'): + def as_type(self, ch_type="grad", mode="fast"): """Compute virtual evoked using interpolated fields. .. Warning:: Using virtual evoked to compute inverse can yield @@ -566,6 +914,7 @@ def as_type(self, ch_type='grad', mode='fast'): .. versionadded:: 0.9.0 """ from .forward import _as_meg_type_inst + return _as_meg_type_inst(self, ch_type=ch_type, mode=mode) @fill_doc @@ -613,14 +962,23 @@ def __neg__(self): out = self.copy() out.data *= -1 - if out.comment is not None and ' + ' in out.comment: - out.comment = f'({out.comment})' # multiple conditions in evoked - out.comment = f'- {out.comment or "unknown"}' + if out.comment is not None and " + " in out.comment: + out.comment = f"({out.comment})" # multiple conditions in evoked + out.comment = f"- {out.comment or 'unknown'}" return out - def get_peak(self, ch_type=None, tmin=None, tmax=None, - mode='abs', time_as_index=False, merge_grads=False, - return_amplitude=False): + def get_peak( + self, + ch_type=None, + tmin=None, + tmax=None, + mode="abs", + time_as_index=False, + merge_grads=False, + return_amplitude=False, + *, + strict=True, + ): """Get location and latency of peak amplitude. Parameters @@ -647,6 +1005,12 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, If True, return also the amplitude at the maximum response. .. versionadded:: 0.16 + strict : bool + If True, raise an error if values are all positive when detecting + a minimum (mode='neg'), or all negative when detecting a maximum + (mode='pos'). Defaults to True. + + .. versionadded:: 1.7 Returns ------- @@ -661,11 +1025,19 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, .. versionadded:: 0.16 """ # noqa: E501 - supported = ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'misc', - 'None') + _FNIRS_CH_TYPES_SPLIT + supported = ( + "mag", + "grad", + "eeg", + "seeg", + "dbs", + "ecog", + "misc", + "None", + ) + _FNIRS_CH_TYPES_SPLIT types_used = self.get_channel_types(unique=True, only_data_chs=True) - _check_option('ch_type', str(ch_type), supported) + _check_option("ch_type", str(ch_type), supported) if ch_type is not None and ch_type not in types_used: raise ValueError( @@ -675,29 +1047,30 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, elif len(types_used) > 1 and ch_type is None: raise RuntimeError( 'Multiple data channel types found. Please pass the "ch_type" ' - 'parameter.' + "parameter." ) if merge_grads: - if ch_type != 'grad': + if ch_type != "grad": raise ValueError('Channel type must be "grad" for merge_grads') - elif mode == 'neg': - raise ValueError('Negative mode (mode=neg) does not make ' - 'sense with merge_grads=True') + elif mode == "neg": + raise ValueError( + "Negative mode (mode=neg) does not make sense with merge_grads=True" + ) meg = eeg = misc = seeg = dbs = ecog = fnirs = False picks = None - if ch_type in ('mag', 'grad'): + if ch_type in ("mag", "grad"): meg = ch_type - elif ch_type == 'eeg': + elif ch_type == "eeg": eeg = True - elif ch_type == 'misc': + elif ch_type == "misc": misc = True - elif ch_type == 'seeg': + elif ch_type == "seeg": seeg = True - elif ch_type == 'dbs': + elif ch_type == "dbs": dbs = True - elif ch_type == 'ecog': + elif ch_type == "ecog": ecog = True elif ch_type in _FNIRS_CH_TYPES_SPLIT: fnirs = ch_type @@ -706,9 +1079,17 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, if merge_grads: picks = _pair_grad_sensors(self.info, topomap_coords=False) else: - picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc, - seeg=seeg, ecog=ecog, ref_meg=False, - fnirs=fnirs, dbs=dbs) + picks = pick_types( + self.info, + meg=meg, + eeg=eeg, + misc=misc, + seeg=seeg, + ecog=ecog, + ref_meg=False, + fnirs=fnirs, + dbs=dbs, + ) data = self.data ch_names = self.ch_names @@ -718,13 +1099,18 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, if merge_grads: data, _ = _merge_ch_data(data, ch_type, []) - ch_names = [ch_name[:-1] + 'X' for ch_name in ch_names[::2]] - - ch_idx, time_idx, max_amp = _get_peak(data, self.times, tmin, - tmax, mode) + ch_names = [ch_name[:-1] + "X" for ch_name in ch_names[::2]] + + ch_idx, time_idx, max_amp = _get_peak( + data, + self.times, + tmin, + tmax, + mode, + strict=strict, + ) - out = (ch_names[ch_idx], time_idx if time_as_index else - self.times[time_idx]) + out = (ch_names[ch_idx], time_idx if time_as_index else self.times[time_idx]) if return_amplitude: out += (max_amp,) @@ -732,9 +1118,22 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, return out @verbose - def compute_psd(self, method='multitaper', fmin=0, fmax=np.inf, tmin=None, - tmax=None, picks=None, proj=False, *, n_jobs=1, - verbose=None, **method_kw): + def compute_psd( + self, + method="multitaper", + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + remove_dc=True, + exclude=(), + *, + n_jobs=1, + verbose=None, + **method_kw, + ): """Perform spectral analysis on sensor data. Parameters @@ -745,6 +1144,8 @@ def compute_psd(self, method='multitaper', fmin=0, fmax=np.inf, tmin=None, %(tmin_tmax_psd)s %(picks_good_data_noref)s %(proj_psd)s + %(remove_dc)s + %(exclude_psd)s %(n_jobs)s %(verbose)s %(method_kw_psd)s @@ -766,17 +1167,110 @@ def compute_psd(self, method='multitaper', fmin=0, fmax=np.inf, tmin=None, self._set_legacy_nfft_default(tmin, tmax, method, method_kw) return Spectrum( - self, method=method, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, - picks=picks, proj=proj, reject_by_annotation=False, n_jobs=n_jobs, - verbose=verbose, **method_kw) + self, + method=method, + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + exclude=exclude, + proj=proj, + remove_dc=remove_dc, + reject_by_annotation=False, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) @verbose - def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, picks=None, - proj=False, *, method='auto', average=False, dB=True, - estimate='auto', xscale='linear', area_mode='std', - area_alpha=0.33, color='black', line_alpha=None, - spatial_colors=True, sphere=None, exclude='bads', ax=None, - show=True, n_jobs=1, verbose=None, **method_kw): + def compute_tfr( + self, + method, + freqs, + *, + tmin=None, + tmax=None, + picks=None, + proj=False, + output="power", + decim=1, + n_jobs=None, + verbose=None, + **method_kw, + ): + """Compute a time-frequency representation of evoked data. + + Parameters + ---------- + %(method_tfr)s + %(freqs_tfr)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(output_compute_tfr)s + %(decim_tfr)s + %(n_jobs)s + %(verbose)s + %(method_kw_tfr)s + + Returns + ------- + tfr : instance of AverageTFR + The time-frequency-resolved power estimates of the data. + + Notes + ----- + .. versionadded:: 1.7 + + References + ---------- + .. footbibliography:: + """ + _check_option("output", output, ("power", "phase", "complex")) + method_kw["output"] = output + return AverageTFR( + inst=self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def plot_psd( + self, + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + *, + method="auto", + average=False, + dB=True, + estimate="power", + xscale="linear", + area_mode="std", + area_alpha=0.33, + color="black", + line_alpha=None, + spatial_colors=True, + sphere=None, + exclude="bads", + ax=None, + show=True, + n_jobs=1, + verbose=None, + **method_kw, + ): """%(plot_psd_doc)s. Parameters @@ -820,17 +1314,44 @@ def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, picks=None, %(notes_plot_psd_meth)s """ return super().plot_psd( - fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, picks=picks, proj=proj, - reject_by_annotation=False, method=method, average=average, dB=dB, - estimate=estimate, xscale=xscale, area_mode=area_mode, - area_alpha=area_alpha, color=color, line_alpha=line_alpha, - spatial_colors=spatial_colors, sphere=sphere, exclude=exclude, - ax=ax, show=show, n_jobs=n_jobs, verbose=verbose, **method_kw) + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + reject_by_annotation=False, + method=method, + average=average, + dB=dB, + estimate=estimate, + xscale=xscale, + area_mode=area_mode, + area_alpha=area_alpha, + color=color, + line_alpha=line_alpha, + spatial_colors=spatial_colors, + sphere=sphere, + exclude=exclude, + ax=ax, + show=show, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) @verbose - def to_data_frame(self, picks=None, index=None, - scalings=None, copy=True, long_format=False, - time_format=None, *, verbose=None): + def to_data_frame( + self, + picks=None, + index=None, + scalings=None, + copy=True, + long_format=False, + time_format=None, + *, + verbose=None, + ): """Export data in tabular structure as a pandas DataFrame. Channels are converted to columns in the DataFrame. By default, @@ -857,12 +1378,12 @@ def to_data_frame(self, picks=None, index=None, # check pandas once here, instead of in each private utils function pd = _check_pandas_installed() # noqa # arg checking - valid_index_args = ['time'] - valid_time_formats = ['ms', 'timedelta'] + valid_index_args = ["time"] + valid_time_formats = ["ms", "timedelta"] index = _check_pandas_index_arguments(index, valid_index_args) time_format = _check_time_format(time_format, valid_time_formats) # get data - picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + picks = _picks_to_idx(self.info, picks, "all", exclude=()) data = self.data[picks, :] times = self.times data = data.T @@ -871,11 +1392,12 @@ def to_data_frame(self, picks=None, index=None, data = _scale_dataframe_data(self, data, picks, scalings) # prepare extra columns / multiindex mindex = list() - times = _convert_times(self, times, time_format) - mindex.append(('time', times)) + times = _convert_times(times, time_format, meas_date=self.info["meas_date"]) + mindex.append(("time", times)) # build DataFrame - df = _build_data_frame(self, data, picks, long_format, mindex, index, - default_index=['time']) + df = _build_data_frame( + self, data, picks, long_format, mindex, index, default_index=["time"] + ) return df @@ -920,26 +1442,40 @@ class EvokedArray(Evoked): """ @verbose - def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average', - baseline=None, *, verbose=None): # noqa: D102 + def __init__( + self, + data, + info, + tmin=0.0, + comment="", + nave=1, + kind="average", + baseline=None, + *, + verbose=None, + ): dtype = np.complex128 if np.iscomplexobj(data) else np.float64 data = np.asanyarray(data, dtype=dtype) if data.ndim != 2: - raise ValueError('Data must be a 2D array of shape (n_channels, ' - 'n_samples), got shape %s' % (data.shape,)) + raise ValueError( + "Data must be a 2D array of shape (n_channels, n_samples), got shape " + f"{data.shape}" + ) - if len(info['ch_names']) != np.shape(data)[0]: - raise ValueError('Info (%s) and data (%s) must have same number ' - 'of channels.' % (len(info['ch_names']), - np.shape(data)[0])) + if len(info["ch_names"]) != np.shape(data)[0]: + raise ValueError( + f"Info ({len(info['ch_names'])}) and data ({np.shape(data)[0]}) must " + "have same number of channels." + ) self.data = data - self.first = int(round(tmin * info['sfreq'])) + self.first = int(round(tmin * info["sfreq"])) self.last = self.first + np.shape(data)[-1] - 1 - self._set_times(np.arange(self.first, self.last + 1, - dtype=np.float64) / info['sfreq']) + self._set_times( + np.arange(self.first, self.last + 1, dtype=np.float64) / info["sfreq"] + ) self._raw_times = self.times.copy() self._decim = 1 self.info = info.copy() # do not modify original info @@ -951,13 +1487,15 @@ def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average', self._projector = None _validate_type(self.kind, "str", "kind") if self.kind not in _aspect_dict: - raise ValueError('unknown kind "%s", should be "average" or ' - '"standard_error"' % (self.kind,)) + raise ValueError( + f'unknown kind "{self.kind}", should be "average" or "standard_error"' + ) self._aspect_kind = _aspect_dict[self.kind] self.baseline = baseline if self.baseline is not None: # omit log msg if not baselining self.apply_baseline(self.baseline) + self._filename = None def _get_entries(fid, evoked_node, allow_maxshield=False): @@ -965,33 +1503,34 @@ def _get_entries(fid, evoked_node, allow_maxshield=False): comments = list() aspect_kinds = list() for ev in evoked_node: - for k in range(ev['nent']): - my_kind = ev['directory'][k].kind - pos = ev['directory'][k].pos + for k in range(ev["nent"]): + my_kind = ev["directory"][k].kind + pos = ev["directory"][k].pos if my_kind == FIFF.FIFF_COMMENT: tag = read_tag(fid, pos) comments.append(tag.data) my_aspect = _get_aspect(ev, allow_maxshield)[0] - for k in range(my_aspect['nent']): - my_kind = my_aspect['directory'][k].kind - pos = my_aspect['directory'][k].pos + for k in range(my_aspect["nent"]): + my_kind = my_aspect["directory"][k].kind + pos = my_aspect["directory"][k].pos if my_kind == FIFF.FIFF_ASPECT_KIND: tag = read_tag(fid, pos) - aspect_kinds.append(int(tag.data)) + aspect_kinds.append(int(tag.data.item())) comments = np.atleast_1d(comments) aspect_kinds = np.atleast_1d(aspect_kinds) if len(comments) != len(aspect_kinds) or len(comments) == 0: fid.close() - raise ValueError('Dataset names in FIF file ' - 'could not be found.') + raise ValueError("Dataset names in FIF file could not be found.") t = [_aspect_rev[a] for a in aspect_kinds] - t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)] - t = '\n'.join(t) + t = ['"' + c + '" (' + tt + ")" for tt, c in zip(t, comments)] + t = "\n".join(t) return comments, aspect_kinds, t def _get_aspect(evoked, allow_maxshield): """Get Evoked data aspect.""" + from .io.base import _check_maxshield + is_maxshield = False aspect = dir_tree_find(evoked, FIFF.FIFFB_ASPECT) if len(aspect) == 0: @@ -999,7 +1538,7 @@ def _get_aspect(evoked, allow_maxshield): aspect = dir_tree_find(evoked, FIFF.FIFFB_IAS_ASPECT) is_maxshield = True if len(aspect) > 1: - logger.info('Multiple data aspects found. Taking first one.') + logger.info("Multiple data aspects found. Taking first one.") return aspect[0], is_maxshield @@ -1018,17 +1557,14 @@ def _check_evokeds_ch_names_times(all_evoked): for ii, ev in enumerate(all_evoked[1:]): if ev.ch_names != ch_names: if set(ev.ch_names) != set(ch_names): - raise ValueError( - "%s and %s do not contain the same channels." % (evoked, - ev)) + raise ValueError(f"{evoked} and {ev} do not contain the same channels.") else: warn("Order of channels differs, reordering channels ...") ev = ev.copy() ev.reorder_channels(ch_names) all_evoked[ii + 1] = ev if not np.max(np.abs(ev.times - evoked.times)) < 1e-7: - raise ValueError("%s and %s do not contain the same time instants" - % (evoked, ev)) + raise ValueError(f"{evoked} and {ev} do not contain the same time instants") return all_evoked @@ -1041,7 +1577,7 @@ def combine_evoked(all_evoked, weights): .. Warning:: Other than cases like simple subtraction mentioned above (where all - weights are -1 or 1), if you provide numeric weights instead of using + weights are ``-1`` or ``1``), if you provide numeric weights instead of using ``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's ``.nave`` attribute (which is used to scale noise covariance when applying the inverse operator) may not be suitable for inverse imaging. @@ -1050,7 +1586,7 @@ def combine_evoked(all_evoked, weights): ---------- all_evoked : list of Evoked The evoked datasets. - weights : list of float | 'equal' | 'nave' + weights : list of float | ``'equal'`` | ``'nave'`` The weights to apply to the data of each evoked instance, or a string describing the weighting strategy to apply: ``'nave'`` computes sum-to-one weights proportional to each object's ``nave`` attribute; @@ -1067,8 +1603,8 @@ def combine_evoked(all_evoked, weights): """ naves = np.array([evk.nave for evk in all_evoked], float) if isinstance(weights, str): - _check_option('weights', weights, ['nave', 'equal']) - if weights == 'nave': + _check_option("weights", weights, ["nave", "equal"]) + if weights == "nave": weights = naves / naves.sum() else: weights = np.ones_like(naves) / len(naves) @@ -1076,7 +1612,7 @@ def combine_evoked(all_evoked, weights): weights = np.array(weights, float) if weights.ndim != 1 or weights.size != len(all_evoked): - raise ValueError('weights must be the same size as all_evoked') + raise ValueError("weights must be the same size as all_evoked") # cf. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean, section on # "weighted sample variance". The variance of a weighted sample mean is: @@ -1088,7 +1624,7 @@ def combine_evoked(all_evoked, weights): # σ² = w₁² / nave₁ + w₂² / nave₂ + ... + wₙ² / naveₙ # # And our resulting nave is the reciprocal of this: - new_nave = 1. / np.sum(weights ** 2 / naves) + new_nave = 1.0 / np.sum(weights**2 / naves) # This general formula is equivalent to formulae in Matti's manual # (pp 128-129), where: # new_nave = sum(naves) when weights='nave' and @@ -1098,37 +1634,44 @@ def combine_evoked(all_evoked, weights): evoked = all_evoked[0].copy() # use union of bad channels - bads = list(set(b for e in all_evoked for b in e.info['bads'])) - evoked.info['bads'] = bads + bads = list(set(b for e in all_evoked for b in e.info["bads"])) + evoked.info["bads"] = bads evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked)) evoked.nave = new_nave - comment = '' + comment = "" for idx, (w, e) in enumerate(zip(weights, all_evoked)): # pick sign - sign = '' if w >= 0 else '-' + sign = "" if w >= 0 else "-" # format weight - weight = '' if np.isclose(abs(w), 1.) else f'{abs(w):0.3f}' + weight = "" if np.isclose(abs(w), 1.0) else f"{abs(w):0.3f}" # format multiplier - multiplier = ' × ' if weight else '' + multiplier = " × " if weight else "" # format comment - if e.comment is not None and ' + ' in e.comment: # multiple conditions - this_comment = f'({e.comment})' + if e.comment is not None and " + " in e.comment: # multiple conditions + this_comment = f"({e.comment})" else: - this_comment = f'{e.comment or "unknown"}' + this_comment = f"{e.comment or 'unknown'}" # assemble everything if idx == 0: - comment += f'{sign}{weight}{multiplier}{this_comment}' + comment += f"{sign}{weight}{multiplier}{this_comment}" else: - comment += f' {sign or "+"} {weight}{multiplier}{this_comment}' + comment += f" {sign or '+'} {weight}{multiplier}{this_comment}" # special-case: combine_evoked([e1, -e2], [1, -1]) - evoked.comment = comment.replace(' - - ', ' + ') + evoked.comment = comment.replace(" - - ", " + ") return evoked @verbose -def read_evokeds(fname, condition=None, baseline=None, kind='average', - proj=True, allow_maxshield=False, verbose=None): +def read_evokeds( + fname, + condition=None, + baseline=None, + kind="average", + proj=True, + allow_maxshield=False, + verbose=None, +) -> list[Evoked] | Evoked: """Read evoked dataset(s). Parameters @@ -1153,7 +1696,7 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', baseline correction, but merely omit the optional, additional baseline correction. kind : str - Either 'average' or 'standard_error', the type of data to read. + Either ``'average'`` or ``'standard_error'``, the type of data to read. proj : bool If False, available projectors won't be applied to the data. allow_maxshield : bool | str (default False) @@ -1161,7 +1704,7 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', active compensation (MaxShield). Data recorded with MaxShield should generally not be loaded directly, but should first be processed using SSS/tSSS to remove the compensation signals that may also affect brain - activity. Can also be "yes" to load without eliciting a warning. + activity. Can also be ``"yes"`` to load without eliciting a warning. %(verbose)s Returns @@ -1182,10 +1725,9 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', saving, this will be reflected in their ``baseline`` attribute after reading. """ - fname = str(_check_fname(fname, overwrite="read", must_exist=True)) - check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', - '_ave.fif', '_ave.fif.gz')) - logger.info('Reading %s ...' % fname) + fname = _check_fname(fname, overwrite="read", must_exist=True) + check_fname(fname, "evoked", ("-ave.fif", "-ave.fif.gz", "_ave.fif", "_ave.fif.gz")) + logger.info(f"Reading {fname} ...") return_list = True if condition is None: evoked_node = _get_evoked_node(fname) @@ -1196,16 +1738,23 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', out = [] for c in condition: - evoked = Evoked(fname, c, kind=kind, proj=proj, - allow_maxshield=allow_maxshield, - verbose=verbose) + evoked = Evoked( + fname, + c, + kind=kind, + proj=proj, + allow_maxshield=allow_maxshield, + verbose=verbose, + ) if baseline is None and evoked.baseline is None: logger.info(_log_rescale(None)) elif baseline is None and evoked.baseline is not None: # Don't touch an existing baseline bmin, bmax = evoked.baseline - logger.info(f'Loaded Evoked data is baseline-corrected ' - f'(baseline: [{bmin:g}, {bmax:g}] s)') + logger.info( + f"Loaded Evoked data is baseline-corrected " + f"(baseline: [{bmin:g}, {bmax:g}] s)" + ) else: evoked.apply_baseline(baseline) out.append(evoked) @@ -1213,10 +1762,10 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', return out if return_list else out[0] -def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): +def _read_evoked(fname, condition=None, kind="average", allow_maxshield=False): """Read evoked data from a FIF file.""" if fname is None: - raise ValueError('No evoked filename specified') + raise ValueError("No evoked filename specified") f, tree, _ = fiff_open(fname) with f as fid: @@ -1226,47 +1775,46 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): # Locate the data of interest processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA) if len(processed) == 0: - raise ValueError('Could not find processed data') + raise ValueError("Could not find processed data") evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED) if len(evoked_node) == 0: - raise ValueError('Could not find evoked data') + raise ValueError("Could not find evoked data") # find string-based entry if isinstance(condition, str): if kind not in _aspect_dict.keys(): - raise ValueError('kind must be "average" or ' - '"standard_error"') + raise ValueError('kind must be "average" or "standard_error"') - comments, aspect_kinds, t = _get_entries(fid, evoked_node, - allow_maxshield) - goods = (np.in1d(comments, [condition]) & - np.in1d(aspect_kinds, [_aspect_dict[kind]])) + comments, aspect_kinds, t = _get_entries(fid, evoked_node, allow_maxshield) + goods = np.isin(comments, [condition]) & np.isin( + aspect_kinds, [_aspect_dict[kind]] + ) found_cond = np.where(goods)[0] if len(found_cond) != 1: - raise ValueError('condition "%s" (%s) not found, out of ' - 'found datasets:\n%s' - % (condition, kind, t)) + raise ValueError( + f'condition "{condition}" ({kind}) not found, out of found ' + f"datasets:\n{t}" + ) condition = found_cond[0] elif condition is None: if len(evoked_node) > 1: - _, _, conditions = _get_entries(fid, evoked_node, - allow_maxshield) - raise TypeError("Evoked file has more than one " - "condition, the condition parameters " - "must be specified from:\n%s" % conditions) + _, _, conditions = _get_entries(fid, evoked_node, allow_maxshield) + raise TypeError( + "Evoked file has more than one condition, the condition parameters " + f"must be specified from:\n{conditions}" + ) else: condition = 0 if condition >= len(evoked_node) or condition < 0: - raise ValueError('Data set selector out of range') + raise ValueError("Data set selector out of range") my_evoked = evoked_node[condition] # Identify the aspects with info._unlock(): - my_aspect, info['maxshield'] = _get_aspect(my_evoked, - allow_maxshield) + my_aspect, info["maxshield"] = _get_aspect(my_evoked, allow_maxshield) # Now find the data in the evoked block nchan = 0 @@ -1274,42 +1822,42 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): chs = [] baseline = bmin = bmax = None comment = last = first = first_time = nsamp = None - for k in range(my_evoked['nent']): - my_kind = my_evoked['directory'][k].kind - pos = my_evoked['directory'][k].pos + for k in range(my_evoked["nent"]): + my_kind = my_evoked["directory"][k].kind + pos = my_evoked["directory"][k].pos if my_kind == FIFF.FIFF_COMMENT: tag = read_tag(fid, pos) comment = tag.data elif my_kind == FIFF.FIFF_FIRST_SAMPLE: tag = read_tag(fid, pos) - first = int(tag.data) + first = int(tag.data.item()) elif my_kind == FIFF.FIFF_LAST_SAMPLE: tag = read_tag(fid, pos) - last = int(tag.data) + last = int(tag.data.item()) elif my_kind == FIFF.FIFF_NCHAN: tag = read_tag(fid, pos) - nchan = int(tag.data) + nchan = int(tag.data.item()) elif my_kind == FIFF.FIFF_SFREQ: tag = read_tag(fid, pos) - sfreq = float(tag.data) + sfreq = float(tag.data.item()) elif my_kind == FIFF.FIFF_CH_INFO: tag = read_tag(fid, pos) chs.append(tag.data) elif my_kind == FIFF.FIFF_FIRST_TIME: tag = read_tag(fid, pos) - first_time = float(tag.data) + first_time = float(tag.data.item()) elif my_kind == FIFF.FIFF_NO_SAMPLES: tag = read_tag(fid, pos) - nsamp = int(tag.data) + nsamp = int(tag.data.item()) elif my_kind == FIFF.FIFF_MNE_BASELINE_MIN: tag = read_tag(fid, pos) - bmin = float(tag.data) + bmin = float(tag.data.item()) elif my_kind == FIFF.FIFF_MNE_BASELINE_MAX: tag = read_tag(fid, pos) - bmax = float(tag.data) + bmax = float(tag.data.item()) if comment is None: - comment = 'No comment' + comment = "No comment" if bmin is not None or bmax is not None: # None's should've been replaced with floats @@ -1319,51 +1867,55 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): # Local channel information? if nchan > 0: if chs is None: - raise ValueError('Local channel information was not found ' - 'when it was expected.') + raise ValueError( + "Local channel information was not found when it was expected." + ) if len(chs) != nchan: - raise ValueError('Number of channels and number of ' - 'channel definitions are different') + raise ValueError( + "Number of channels and number of channel definitions are different" + ) ch_names_mapping = _read_extended_ch_info(chs, my_evoked, fid) - info['chs'] = chs - info['bads'][:] = _rename_list(info['bads'], ch_names_mapping) - logger.info(' Found channel information in evoked data. ' - 'nchan = %d' % nchan) + info["chs"] = chs + info["bads"][:] = _rename_list(info["bads"], ch_names_mapping) + logger.info( + f" Found channel information in evoked data. nchan = {nchan}" + ) if sfreq > 0: - info['sfreq'] = sfreq + info["sfreq"] = sfreq # Read the data in the aspect block nave = 1 epoch = [] - for k in range(my_aspect['nent']): - kind = my_aspect['directory'][k].kind - pos = my_aspect['directory'][k].pos + for k in range(my_aspect["nent"]): + kind = my_aspect["directory"][k].kind + pos = my_aspect["directory"][k].pos if kind == FIFF.FIFF_COMMENT: tag = read_tag(fid, pos) comment = tag.data elif kind == FIFF.FIFF_ASPECT_KIND: tag = read_tag(fid, pos) - aspect_kind = int(tag.data) + aspect_kind = int(tag.data.item()) elif kind == FIFF.FIFF_NAVE: tag = read_tag(fid, pos) - nave = int(tag.data) + nave = int(tag.data.item()) elif kind == FIFF.FIFF_EPOCH: tag = read_tag(fid, pos) epoch.append(tag) nepoch = len(epoch) - if nepoch != 1 and nepoch != info['nchan']: - raise ValueError('Number of epoch tags is unreasonable ' - '(nepoch = %d nchan = %d)' - % (nepoch, info['nchan'])) + if nepoch != 1 and nepoch != info["nchan"]: + raise ValueError( + "Number of epoch tags is unreasonable " + f"(nepoch = {nepoch} nchan = {info['nchan']})" + ) if nepoch == 1: # Only one epoch data = epoch[0].data # May need a transpose if the number of channels is one - if data.shape[1] == 1 and info['nchan'] == 1: + if data.shape[1] == 1 and info["nchan"] == 1: data = data.T else: # Put the old style epochs together @@ -1374,37 +1926,42 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): data = data.astype(np.complex128) if first_time is not None and nsamp is not None: - times = first_time + np.arange(nsamp) / info['sfreq'] + times = first_time + np.arange(nsamp) / info["sfreq"] elif first is not None: nsamp = last - first + 1 - times = np.arange(first, last + 1) / info['sfreq'] + times = np.arange(first, last + 1) / info["sfreq"] else: - raise RuntimeError('Could not read time parameters') + raise RuntimeError("Could not read time parameters") del first, last if nsamp is not None and data.shape[1] != nsamp: - raise ValueError('Incorrect number of samples (%d instead of ' - ' %d)' % (data.shape[1], nsamp)) - logger.info(' Found the data of interest:') - logger.info(' t = %10.2f ... %10.2f ms (%s)' - % (1000 * times[0], 1000 * times[-1], comment)) - if info['comps'] is not None: - logger.info(' %d CTF compensation matrices available' - % len(info['comps'])) - logger.info(' nave = %d - aspect type = %d' - % (nave, aspect_kind)) + raise ValueError( + f"Incorrect number of samples ({data.shape[1]} instead of {nsamp})" + ) + logger.info(" Found the data of interest:") + logger.info( + f" t = {1000 * times[0]:10.2f} ... {1000 * times[-1]:10.2f} ms (" + f"{comment})" + ) + if info["comps"] is not None: + logger.info( + f" {len(info['comps'])} CTF compensation matrices available" + ) + logger.info(f" nave = {nave} - aspect type = {aspect_kind}") # Calibrate - cals = np.array([info['chs'][k]['cal'] * - info['chs'][k].get('scale', 1.0) - for k in range(info['nchan'])]) + cals = np.array( + [ + info["chs"][k]["cal"] * info["chs"][k].get("scale", 1.0) + for k in range(info["nchan"]) + ] + ) data *= cals[:, np.newaxis] return info, nave, aspect_kind, comment, times, data, baseline @verbose -def write_evokeds(fname, evoked, *, on_mismatch='raise', overwrite=False, - verbose=None): +def write_evokeds(fname, evoked, *, on_mismatch="raise", overwrite=False, verbose=None): """Write an evoked dataset to a file. Parameters @@ -1437,27 +1994,26 @@ def write_evokeds(fname, evoked, *, on_mismatch='raise', overwrite=False, _write_evokeds(fname, evoked, on_mismatch=on_mismatch, overwrite=overwrite) -def _write_evokeds(fname, evoked, check=True, *, on_mismatch='raise', - overwrite=False): +def _write_evokeds(fname, evoked, check=True, *, on_mismatch="raise", overwrite=False): """Write evoked data.""" from .dipole import DipoleFixed # avoid circular import fname = _check_fname(fname=fname, overwrite=overwrite) if check: - check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', - '_ave.fif', '_ave.fif.gz')) + check_fname( + fname, "evoked", ("-ave.fif", "-ave.fif.gz", "_ave.fif", "_ave.fif.gz") + ) - if not isinstance(evoked, (list, tuple)): + if not isinstance(evoked, list | tuple): evoked = [evoked] warned = False # Create the file and save the essentials with start_and_end_file(fname) as fid: - start_block(fid, FIFF.FIFFB_MEAS) write_id(fid, FIFF.FIFF_BLOCK_ID) - if evoked[0].info['meas_id'] is not None: - write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id']) + if evoked[0].info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info["meas_id"]) # Write measurement info write_meas_info(fid, evoked[0].info) @@ -1466,9 +2022,12 @@ def _write_evokeds(fname, evoked, check=True, *, on_mismatch='raise', start_block(fid, FIFF.FIFFB_PROCESSED_DATA) for ei, e in enumerate(evoked): if ei: - _ensure_infos_match(info1=evoked[0].info, info2=e.info, - name=f'evoked[{ei}]', - on_mismatch=on_mismatch) + _ensure_infos_match( + info1=evoked[0].info, + info2=e.info, + name=f"evoked[{ei}]", + on_mismatch=on_mismatch, + ) start_block(fid, FIFF.FIFFB_EVOKED) # Comment is optional @@ -1488,7 +2047,7 @@ def _write_evokeds(fname, evoked, check=True, *, on_mismatch='raise', write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax) # The evoked data itself - if e.info.get('maxshield'): + if e.info.get("maxshield"): aspect = FIFF.FIFFB_IAS_ASPECT else: aspect = FIFF.FIFFB_ASPECT @@ -1498,17 +2057,20 @@ def _write_evokeds(fname, evoked, check=True, *, on_mismatch='raise', # convert nave to integer to comply with FIFF spec nave_int = int(round(e.nave)) if nave_int != e.nave and not warned: - warn('converting "nave" to integer before saving evoked; this ' - 'can have a minor effect on the scale of source ' - 'estimates that are computed using "nave".') + warn( + 'converting "nave" to integer before saving evoked; this ' + "can have a minor effect on the scale of source " + 'estimates that are computed using "nave".' + ) warned = True write_int(fid, FIFF.FIFF_NAVE, nave_int) del nave_int - decal = np.zeros((e.info['nchan'], 1)) - for k in range(e.info['nchan']): - decal[k] = 1.0 / (e.info['chs'][k]['cal'] * - e.info['chs'][k].get('scale', 1.0)) + decal = np.zeros((e.info["nchan"], 1)) + for k in range(e.info["nchan"]): + decal[k] = 1.0 / ( + e.info["chs"][k]["cal"] * e.info["chs"][k].get("scale", 1.0) + ) if np.iscomplexobj(e.data): write_function = write_complex_float_matrix @@ -1523,7 +2085,7 @@ def _write_evokeds(fname, evoked, check=True, *, on_mismatch='raise', end_block(fid, FIFF.FIFFB_MEAS) -def _get_peak(data, times, tmin=None, tmax=None, mode='abs'): +def _get_peak(data, times, tmin=None, tmax=None, mode="abs", *, strict=True): """Get feature-index and time of maximum signal from 2D array. Note. This is a 'getter', not a 'finder'. For non-evoked type @@ -1544,6 +2106,10 @@ def _get_peak(data, times, tmin=None, tmax=None, mode='abs'): values will be considered. If 'neg' only negative values will be considered. If 'abs' absolute values will be considered. Defaults to 'abs'. + strict : bool + If True, raise an error if values are all positive when detecting + a minimum (mode='neg'), or all negative when detecting a maximum + (mode='pos'). Defaults to True. Returns ------- @@ -1554,7 +2120,7 @@ def _get_peak(data, times, tmin=None, tmax=None, mode='abs'): max_amp : float Amplitude of the maximum response. """ - _check_option('mode', mode, ['abs', 'neg', 'pos']) + _check_option("mode", mode, ["abs", "neg", "pos"]) if tmin is None: tmin = times[0] @@ -1563,36 +2129,37 @@ def _get_peak(data, times, tmin=None, tmax=None, mode='abs'): if tmin < times.min() or tmax > times.max(): if tmin < times.min(): - param_name = 'tmin' + param_name = "tmin" param_val = tmin else: - param_name = 'tmax' + param_name = "tmax" param_val = tmax raise ValueError( - f'{param_name} ({param_val}) is out of bounds. It must be ' - f'between {times.min()} and {times.max()}' + f"{param_name} ({param_val}) is out of bounds. It must be " + f"between {times.min()} and {times.max()}" ) elif tmin > tmax: - raise ValueError(f'tmin ({tmin}) must be <= tmax ({tmax})') + raise ValueError(f"tmin ({tmin}) must be <= tmax ({tmax})") time_win = (times >= tmin) & (times <= tmax) mask = np.ones_like(data).astype(bool) mask[:, time_win] = False maxfun = np.argmax - if mode == 'pos': - if not np.any(data[~mask] > 0): - raise ValueError('No positive values encountered. Cannot ' - 'operate in pos mode.') - elif mode == 'neg': - if not np.any(data[~mask] < 0): - raise ValueError('No negative values encountered. Cannot ' - 'operate in neg mode.') + if mode == "pos": + if strict and not np.any(data[~mask] > 0): + raise ValueError( + "No positive values encountered. Cannot operate in pos mode." + ) + elif mode == "neg": + if strict and not np.any(data[~mask] < 0): + raise ValueError( + "No negative values encountered. Cannot operate in neg mode." + ) maxfun = np.argmin - masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data, - mask=mask) + masked_index = np.ma.array(np.abs(data) if mode == "abs" else data, mask=mask) max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape) diff --git a/mne/export/__init__.py b/mne/export/__init__.py index 9d7abae0aff..7a07df656d6 100644 --- a/mne/export/__init__.py +++ b/mne/export/__init__.py @@ -1,2 +1,8 @@ -from ._export import export_raw, export_epochs, export_evokeds -from ._egimff import export_evokeds_mff +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Functions for exporting data to non-FIF formats.""" +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/export/__init__.pyi b/mne/export/__init__.pyi new file mode 100644 index 00000000000..6cd2a374ef3 --- /dev/null +++ b/mne/export/__init__.pyi @@ -0,0 +1,3 @@ +__all__ = ["export_epochs", "export_evokeds", "export_evokeds_mff", "export_raw"] +from ._egimff import export_evokeds_mff +from ._export import export_epochs, export_evokeds, export_raw diff --git a/mne/export/_brainvision.py b/mne/export/_brainvision.py index 319b2314864..6503c540f41 100644 --- a/mne/export/_brainvision.py +++ b/mne/export/_brainvision.py @@ -1,13 +1,159 @@ -# -*- coding: utf-8 -*- -# Authors: MNE Developers -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os +from pathlib import Path + +import numpy as np + +from mne.channels.channels import _unit2human +from mne.io.constants import FIFF +from mne.utils import _check_pybv_installed, warn -from ..utils import _check_pybv_installed _check_pybv_installed() -from pybv._export import _export_mne_raw # noqa: E402 +from pybv import write_brainvision # noqa: E402 + + +def _export_mne_raw(*, raw, fname, events=None, overwrite=False): + """Export raw data from MNE-Python. + + Parameters + ---------- + raw : mne.io.Raw + The raw data to export. + fname : str | pathlib.Path + The name of the file where raw data will be exported to. Must end with + ``".vhdr"``, and accompanying *.vmrk* and *.eeg* files will be written inside + the same directory. + events : np.ndarray | None + Events to be written to the marker file (*.vmrk*). If array, must be in + `MNE-Python format `_. If + ``None`` (default), events will be written based on ``raw.annotations``. + overwrite : bool + Whether or not to overwrite existing data. Defaults to ``False``. + + """ + # prepare file location + if not str(fname).endswith(".vhdr"): + raise ValueError("`fname` must have the '.vhdr' extension for BrainVision.") + fname = Path(fname) + folder_out = fname.parents[0] + fname_base = fname.stem + + # prepare data from raw + data = raw.get_data() # gets data starting from raw.first_samp + sfreq = raw.info["sfreq"] # in Hz + meas_date = raw.info["meas_date"] # datetime.datetime + ch_names = raw.ch_names + + # write voltage units as micro-volts and all other units without scaling + # write units that we don't know as n/a + unit = [] + for ch in raw.info["chs"]: + if ch["unit"] == FIFF.FIFF_UNIT_V: + unit.append("µV") + elif ch["unit"] == FIFF.FIFF_UNIT_CEL: + unit.append("°C") + else: + unit.append(_unit2human.get(ch["unit"], "n/a")) + unit = [u if u != "NA" else "n/a" for u in unit] + + # enforce conversion to float32 format + # XXX: Could add a feature that checks data and optimizes `unit`, `resolution`, and + # `format` so that raw.orig_format could be retained if reasonable. + if raw.orig_format != "single": + warn( + f"Encountered data in '{raw.orig_format}' format. Converting to float32.", + RuntimeWarning, + ) + + fmt = "binary_float32" + resolution = 0.1 + + # handle events + # if we got an ndarray, this is in MNE-Python format + msg = "`events` must be None or array in MNE-Python format." + if events is not None: + # subtract raw.first_samp because brainvision marks events starting from the + # first available data point and ignores the raw.first_samp + assert isinstance(events, np.ndarray), msg + assert events.ndim == 2, msg + assert events.shape[-1] == 3, msg + events[:, 0] -= raw.first_samp + events = events[:, [0, 2]] # reorder for pybv required order + else: # else, prepare pybv style events from raw.annotations + events = _mne_annots2pybv_events(raw) + + # no information about reference channels in mne currently + ref_ch_names = None + + # write to BrainVision + write_brainvision( + data=data, + sfreq=sfreq, + ch_names=ch_names, + ref_ch_names=ref_ch_names, + fname_base=fname_base, + folder_out=folder_out, + overwrite=overwrite, + events=events, + resolution=resolution, + unit=unit, + fmt=fmt, + meas_date=meas_date, + ) + + +def _mne_annots2pybv_events(raw): + """Convert mne Annotations to pybv events.""" + # check that raw.annotations.orig_time is the same as raw.info["meas_date"] + # so that onsets are relative to the first sample + # (after further correction for first_time) + if raw.annotations and raw.info["meas_date"] != raw.annotations.orig_time: + raise ValueError( + "Annotations must have the same orig_time as raw.info['meas_date']" + ) + events = [] + for annot in raw.annotations: + # handle onset and duration: seconds to sample, relative to + # raw.first_samp / raw.first_time + onset = annot["onset"] - raw.first_time + onset = raw.time_as_index(onset).astype(int)[0] + duration = int(annot["duration"] * raw.info["sfreq"]) + + # triage type and description + # defaults to type="Comment" and the full description + etype = "Comment" + description = annot["description"] + for start in ["Stimulus/S", "Response/R", "Comment/"]: + if description.startswith(start): + etype = start.split("/")[0] + description = description.replace(start, "") + break + + if etype in ["Stimulus", "Response"] and description.strip().isdigit(): + description = int(description.strip()) + else: + # if cannot convert to int, we must use this as "Comment" + etype = "Comment" + + event_dict = dict( + onset=onset, # in samples + duration=duration, # in samples + description=description, + type=etype, + ) + + if "ch_names" in annot: + # handle channels + channels = list(annot["ch_names"]) + event_dict["channels"] = channels + + # add a "pybv" event + events += [event_dict] + + return events def _export_raw(fname, raw, overwrite): diff --git a/mne/export/_edf.py b/mne/export/_edf.py deleted file mode 100644 index 752e8c81caa..00000000000 --- a/mne/export/_edf.py +++ /dev/null @@ -1,289 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: MNE Developers -# -# License: BSD-3-Clause - -from contextlib import contextmanager -import numpy as np - -from ..utils import _check_edflib_installed, warn -_check_edflib_installed() -from EDFlib.edfwriter import EDFwriter # noqa: E402 - - -def _try_to_set_value(header, key, value, channel_index=None): - """Set key/value pairs in EDF header.""" - # all EDFLib set functions are set - # for example "setPatientName()" - func_name = f'set{key}' - func = getattr(header, func_name) - - # some setter functions are indexed by channels - if channel_index is None: - return_val = func(value) - else: - return_val = func(channel_index, value) - - # a nonzero return value indicates an error - if return_val != 0: - raise RuntimeError(f"Setting {key} with {value} " - f"returned an error value " - f"{return_val}.") - - -@contextmanager -def _auto_close(fid): - # try to close the handle no matter what - try: - yield fid - finally: - try: - fid.close() - except Exception: - pass # we did our best - - -def _export_raw(fname, raw, physical_range, add_ch_type): - """Export Raw objects to EDF files. - - TODO: if in future the Info object supports transducer or - technician information, allow writing those here. - """ - # scale to save data in EDF - phys_dims = 'uV' - - # get EEG-related data in uV - units = dict(eeg='uV', ecog='uV', seeg='uV', eog='uV', ecg='uV', emg='uV', - bio='uV', dbs='uV') - - digital_min = -32767 - digital_max = 32767 - file_type = EDFwriter.EDFLIB_FILETYPE_EDFPLUS - - # load data first - raw.load_data() - - # remove extra STI channels - orig_ch_types = raw.get_channel_types() - drop_chs = [] - if 'stim' in orig_ch_types: - stim_index = np.argwhere(np.array(orig_ch_types) == 'stim') - stim_index = np.atleast_1d(stim_index.squeeze()).tolist() - drop_chs.extend([raw.ch_names[idx] for idx in stim_index]) - - # Add warning if any channel types are not voltage based. - # Users are expected to only export data that is voltage based, - # such as EEG, ECoG, sEEG, etc. - # Non-voltage channels are dropped by the export function. - # Note: we can write these other channels, such as 'misc' - # but these are simply a "catch all" for unknown or undesired - # channels. - voltage_types = list(units) + ['stim', 'misc'] - non_voltage_ch = [ch not in voltage_types for ch in orig_ch_types] - if any(non_voltage_ch): - warn(f"Non-voltage channels detected: {non_voltage_ch}. MNE-Python's " - 'EDF exporter only supports voltage-based channels, because the ' - 'EDF format cannot accommodate much of the accompanying data ' - 'necessary for channel types like MEG and fNIRS (channel ' - 'orientations, coordinate frame transforms, etc). You can ' - 'override this restriction by setting those channel types to ' - '"misc" but no guarantees are made of the fidelity of that ' - 'approach.') - - ch_names = [ch for ch in raw.ch_names if ch not in drop_chs] - ch_types = np.array(raw.get_channel_types(picks=ch_names)) - n_channels = len(ch_names) - n_times = raw.n_times - - # Sampling frequency in EDF only supports integers, so to allow for - # float sampling rates from Raw, we adjust the output sampling rate - # for all channels and the data record duration. - sfreq = raw.info['sfreq'] - if float(sfreq).is_integer(): - out_sfreq = int(sfreq) - data_record_duration = None - else: - out_sfreq = np.floor(sfreq).astype(int) - data_record_duration = int(np.around( - out_sfreq / sfreq, decimals=6) * 1e6) - - warn(f'Data has a non-integer sampling rate of {sfreq}; writing to ' - 'EDF format may cause a small change to sample times.') - - # get any filter information applied to the data - lowpass = raw.info['lowpass'] - highpass = raw.info['highpass'] - linefreq = raw.info['line_freq'] - filter_str_info = f"HP:{highpass}Hz LP:{lowpass}Hz N:{linefreq}Hz" - - # get the entire dataset in uV - data = raw.get_data(units=units, picks=ch_names) - - if physical_range == 'auto': - # get max and min for each channel type data - ch_types_phys_max = dict() - ch_types_phys_min = dict() - - for _type in np.unique(ch_types): - _picks = np.nonzero(ch_types == _type)[0] - _data = raw.get_data(units=units, picks=_picks) - ch_types_phys_max[_type] = _data.max() - ch_types_phys_min[_type] = _data.min() - else: - # get the physical min and max of the data in uV - # Physical ranges of the data in uV is usually set by the manufacturer - # and properties of the electrode. In general, physical max and min - # should be the clipping levels of the ADC input and they should be - # the same for all channels. For example, Nihon Kohden uses +3200 uV - # and -3200 uV for all EEG channels (which are the actual clipping - # levels of their input amplifiers & ADC). - # For full discussion, see: https://github.com/sccn/eeglab/issues/246 - pmin, pmax = physical_range[0], physical_range[1] - - # check that physical min and max is not exceeded - if data.max() > pmax: - warn( - f"The maximum μV of the data {data.max()} is " - f"more than the physical max passed in {pmax}.", - ) - if data.min() < pmin: - warn( - f"The minimum μV of the data {data.min()} is " - f"less than the physical min passed in {pmin}.", - ) - - # create instance of EDF Writer - with _auto_close(EDFwriter(fname, file_type, n_channels)) as hdl: - # set channel data - for idx, ch in enumerate(ch_names): - ch_type = ch_types[idx] - signal_label = f'{ch_type.upper()} {ch}' if add_ch_type else ch - if len(signal_label) > 16: - raise RuntimeError(f'Signal label for {ch} ({ch_type}) is ' - f'longer than 16 characters, which is not ' - f'supported in EDF. Please shorten the ' - f'channel name before exporting to EDF.') - - if physical_range == 'auto': - # take the channel type minimum and maximum - pmin = ch_types_phys_min[ch_type] - pmax = ch_types_phys_max[ch_type] - for key, val in [('PhysicalMaximum', pmax), - ('PhysicalMinimum', pmin), - ('DigitalMaximum', digital_max), - ('DigitalMinimum', digital_min), - ('PhysicalDimension', phys_dims), - ('SampleFrequency', out_sfreq), - ('SignalLabel', signal_label), - ('PreFilter', filter_str_info)]: - _try_to_set_value(hdl, key, val, channel_index=idx) - - # set patient info - subj_info = raw.info.get('subject_info') - if subj_info is not None: - birthday = subj_info.get('birthday') - - # get the full name of subject if available - first_name = subj_info.get('first_name') - last_name = subj_info.get('last_name') - first_name = first_name or '' - last_name = last_name or '' - joiner = '' - if len(first_name) and len(last_name): - joiner = ' ' - name = joiner.join([first_name, last_name]) - - hand = subj_info.get('hand') - sex = subj_info.get('sex') - - if birthday is not None: - if hdl.setPatientBirthDate(birthday[0], birthday[1], - birthday[2]) != 0: - raise RuntimeError( - f"Setting patient birth date to {birthday} " - f"returned an error") - for key, val in [('PatientName', name), - ('PatientGender', sex), - ('AdditionalPatientInfo', f'hand={hand}')]: - # EDFwriter compares integer encodings of sex and will - # raise a TypeError if value is None as returned by - # subj_info.get(key) if key is missing. - if val is not None: - _try_to_set_value(hdl, key, val) - - # set measurement date - meas_date = raw.info['meas_date'] - if meas_date: - subsecond = int(meas_date.microsecond / 100) - if hdl.setStartDateTime(year=meas_date.year, month=meas_date.month, - day=meas_date.day, hour=meas_date.hour, - minute=meas_date.minute, - second=meas_date.second, - subsecond=subsecond) != 0: - raise RuntimeError(f"Setting start date time {meas_date} " - f"returned an error") - - device_info = raw.info.get('device_info') - if device_info is not None: - device_type = device_info.get('type') - _try_to_set_value(hdl, 'Equipment', device_type) - - # set data record duration - if data_record_duration is not None: - _try_to_set_value(hdl, 'DataRecordDuration', data_record_duration) - - # compute number of data records to loop over - n_blocks = np.ceil(n_times / out_sfreq).astype(int) - - # increase the number of annotation signals if necessary - annots = raw.annotations - if annots is not None: - n_annotations = len(raw.annotations) - n_annot_chans = int(n_annotations / n_blocks) - if np.mod(n_annotations, n_blocks): - n_annot_chans += 1 - if n_annot_chans > 1: - hdl.setNumberOfAnnotationSignals(n_annot_chans) - - # Write each data record sequentially - for idx in range(n_blocks): - end_samp = (idx + 1) * out_sfreq - if end_samp > n_times: - end_samp = n_times - start_samp = idx * out_sfreq - - # then for each datarecord write each channel - for jdx in range(n_channels): - # create a buffer with sampling rate - buf = np.zeros(out_sfreq, np.float64, "C") - - # get channel data for this block - ch_data = data[jdx, start_samp:end_samp] - - # assign channel data to the buffer and write to EDF - buf[:len(ch_data)] = ch_data - err = hdl.writeSamples(buf) - if err != 0: - raise RuntimeError( - f"writeSamples() for channel{ch_names[jdx]} " - f"returned error: {err}") - - # there was an incomplete datarecord - if len(ch_data) != len(buf): - warn(f'EDF format requires equal-length data blocks, ' - f'so {(len(buf) - len(ch_data)) / sfreq} seconds of ' - 'zeros were appended to all channels when writing the ' - 'final block.') - - # write annotations - if annots is not None: - for desc, onset, duration in zip(raw.annotations.description, - raw.annotations.onset, - raw.annotations.duration): - # annotations are written in terms of 100 microseconds - onset = onset * 10000 - duration = duration * 10000 - if hdl.writeAnnotation(onset, duration, desc) != 0: - raise RuntimeError(f'writeAnnotation() returned an error ' - f'trying to write {desc} at {onset} ' - f'for {duration} seconds.') diff --git a/mne/export/_edf_bdf.py b/mne/export/_edf_bdf.py new file mode 100644 index 00000000000..fa4f9ebcf64 --- /dev/null +++ b/mne/export/_edf_bdf.py @@ -0,0 +1,281 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime as dt +from collections.abc import Callable + +import numpy as np + +from mne.annotations import _sync_onset +from mne.utils import _check_edfio_installed, warn + +_check_edfio_installed() +from edfio import ( # noqa: E402 + Bdf, + BdfSignal, + Edf, + EdfAnnotation, + EdfSignal, + Patient, + Recording, +) + + +# copied from edfio (Apache license) +def _round_float_to_8_characters( + value: float, + round_func: Callable[[float], int], +) -> float: + if isinstance(value, int) or value.is_integer(): + return value + length = 8 + integer_part_length = str(value).find(".") + if integer_part_length == length: + return round_func(value) + factor = 10 ** (length - 1 - integer_part_length) + return round_func(value * factor) / factor + + +def _export_raw_edf_bdf(fname, raw, physical_range, add_ch_type, file_format): + """Export Raw objects to EDF/BDF files. + + Parameters + ---------- + fname : str + Output file name. + raw : instance of Raw + The raw instance to export. + physical_range : str or tuple + Physical range setting. + add_ch_type : bool + Whether to add channel type to signal label. + file_format : str + File format ("EDF" or "BDF"). + + Notes + ----- + TODO: if in future the Info object supports transducer or technician information, + allow writing those here. + """ + units = dict( + eeg="uV", ecog="uV", seeg="uV", eog="uV", ecg="uV", emg="uV", bio="uV", dbs="uV" + ) + + if file_format == "EDF": + digital_min, digital_max = -32767, 32767 # 16-bit + signal_class = EdfSignal + writer_class = Edf + else: # BDF + digital_min, digital_max = -8388607, 8388607 # 24-bit + signal_class = BdfSignal + writer_class = Bdf + + ch_types = np.array(raw.get_channel_types()) + + # load and prepare data + raw.load_data() + data = raw.get_data(units=units) + sfreq = raw.info["sfreq"] + pad_annotations = [] + + # Sampling frequency in EDF/BDF only supports integers, so to allow for float + # sampling rates from Raw, we adjust the output sampling rate for all channels and + # the data record duration. + if float(sfreq).is_integer(): + out_sfreq = int(sfreq) + data_record_duration = None + # make non-integer second durations work + if (pad_width := int(np.ceil(raw.n_times / sfreq) * sfreq - raw.n_times)) > 0: + warn( + f"{file_format} format requires equal-length data blocks, so " + f"{pad_width / sfreq:.3g} seconds of edge values were appended to all " + "channels when writing the final block." + ) + data = np.pad( + data, + ( + (0, 0), + (0, int(pad_width)), + ), + "edge", + ) + + pad_annotations.append( + EdfAnnotation( + raw.times[-1] + 1 / sfreq, pad_width / sfreq, "BAD_ACQ_SKIP" + ) + ) + else: + data_record_duration = _round_float_to_8_characters( + np.floor(sfreq) / sfreq, round + ) + out_sfreq = np.floor(sfreq) / data_record_duration + warn( + f"Data has a non-integer sampling rate of {sfreq}; writing to " + f"{file_format} format may cause a small change to sample times." + ) + + # extract filter information + lowpass = raw.info["lowpass"] + highpass = raw.info["highpass"] + linefreq = raw.info["line_freq"] + filter_str_info = f"HP:{highpass}Hz LP:{lowpass}Hz" + if linefreq is not None: + filter_str_info += f" N:{linefreq}Hz" + + # compute physical range + if physical_range == "auto": + # get max and min for each channel type data + ch_types_phys_max = dict() + ch_types_phys_min = dict() + + for _type in np.unique(ch_types): + _picks = [n for n, t in zip(raw.ch_names, ch_types) if t == _type] + _data = raw.get_data(units=units, picks=_picks) + ch_types_phys_max[_type] = _data.max() + ch_types_phys_min[_type] = _data.min() + elif physical_range == "channelwise": + prange = None + else: + # get the physical min and max of the data in uV + # Physical ranges of the data in uV are usually set by the manufacturer and + # electrode properties. In general, physical min and max should be the clipping + # levels of the ADC input, and they should be the same for all channels. For + # example, Nihon Kohden uses ±3200 uV for all EEG channels (corresponding to the + # actual clipping levels of their input amplifiers & ADC). For a discussion, + # see https://github.com/sccn/eeglab/issues/246 + pmin, pmax = physical_range[0], physical_range[1] + + # check that physical min and max is not exceeded + if data.max() > pmax: + warn( + f"The maximum μV of the data {data.max()} is more than the physical max" + f" passed in {pmax}." + ) + if data.min() < pmin: + warn( + f"The minimum μV of the data {data.min()} is less than the physical min" + f" passed in {pmin}." + ) + data = np.clip(data, pmin, pmax) + prange = pmin, pmax + + # create signals + signals = [] + for idx, ch in enumerate(raw.ch_names): + ch_type = ch_types[idx] + signal_label = f"{ch_type.upper()} {ch}" if add_ch_type else ch + if len(signal_label) > 16: + raise RuntimeError( + f"Signal label for {ch} ({ch_type}) is longer than 16 characters, which" + f" is not supported by the {file_format} standard. Please shorten the " + f"channel name before exporting to {file_format}." + ) + + if physical_range == "auto": # per channel type + pmin = ch_types_phys_min[ch_type] + pmax = ch_types_phys_max[ch_type] + if pmax == pmin: + pmax = pmin + 1 + prange = pmin, pmax + + signals.append( + signal_class( + data[idx], + out_sfreq, + label=signal_label, + transducer_type="", + physical_dimension="" if ch_type == "stim" else "uV", + physical_range=prange, + digital_range=(digital_min, digital_max), + prefiltering=filter_str_info, + ) + ) + + # create patient info + subj_info = raw.info.get("subject_info") + if subj_info is not None: + # get the full name of subject if available + first_name = subj_info.get("first_name", "") + middle_name = subj_info.get("middle_name", "") + last_name = subj_info.get("last_name", "") + name = "_".join(filter(None, [first_name, middle_name, last_name])) + + birthday = subj_info.get("birthday") + hand = subj_info.get("hand") + weight = subj_info.get("weight") + height = subj_info.get("height") + sex = subj_info.get("sex") + + additional_patient_info = [] + for key, value in [("height", height), ("weight", weight), ("hand", hand)]: + if value: + additional_patient_info.append(f"{key}={value}") + + patient = Patient( + code=subj_info.get("his_id") or "X", + sex={0: "X", 1: "M", 2: "F", None: "X"}[sex], + birthdate=birthday, + name=name or "X", + additional=additional_patient_info, + ) + else: + patient = None + + # create recording info + if (meas_date := raw.info["meas_date"]) is not None: + startdate = dt.date(meas_date.year, meas_date.month, meas_date.day) + starttime = dt.time( + meas_date.hour, meas_date.minute, meas_date.second, meas_date.microsecond + ) + else: + startdate = None + starttime = None + + device_info = raw.info.get("device_info") + if device_info is not None: + device_type = device_info.get("type") or "X" + recording = Recording(startdate=startdate, equipment_code=device_type) + else: + recording = Recording(startdate=startdate) + + # create annotations + annotations = [] + for desc, onset, duration, ch_names in zip( + raw.annotations.description, + # subtract raw.first_time because EDF/BDF marks events starting from the first + # available data point and ignores raw.first_time + _sync_onset(raw, raw.annotations.onset, inverse=False), + raw.annotations.duration, + raw.annotations.ch_names, + ): + if ch_names: + for ch_name in ch_names: + annotations.append( + EdfAnnotation(onset, duration, desc + f"@@{ch_name}") + ) + else: + annotations.append(EdfAnnotation(onset, duration, desc)) + + annotations.extend(pad_annotations) + + # write to file + writer_class( + signals=signals, + patient=patient, + recording=recording, + starttime=starttime, + data_record_duration=data_record_duration, + annotations=annotations, + ).write(fname) + + +def _export_raw_edf(fname, raw, physical_range, add_ch_type): + """Export Raw object to EDF.""" + _export_raw_edf_bdf(fname, raw, physical_range, add_ch_type, file_format="EDF") + + +def _export_raw_bdf(fname, raw, physical_range, add_ch_type): + """Export Raw object to BDF.""" + _export_raw_edf_bdf(fname, raw, physical_range, add_ch_type, file_format="BDF") diff --git a/mne/export/_eeglab.py b/mne/export/_eeglab.py index 93556b056c5..c73243b0311 100644 --- a/mne/export/_eeglab.py +++ b/mne/export/_eeglab.py @@ -1,14 +1,17 @@ -# -*- coding: utf-8 -*- -# Authors: MNE Developers -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from inspect import getfullargspec import numpy as np +from ..annotations import _sync_onset from ..utils import _check_eeglabio_installed + _check_eeglabio_installed() -import eeglabio.raw # noqa: E402 import eeglabio.epochs # noqa: E402 +import eeglabio.raw # noqa: E402 def _export_raw(fname, raw): @@ -16,20 +19,32 @@ def _export_raw(fname, raw): raw.load_data() # remove extra epoc and STI channels - drop_chs = ['epoc'] + drop_chs = ["epoc"] # filenames attribute of RawArray is filled with None - if raw.filenames[0] and not (raw.filenames[0].endswith('.fif')): - drop_chs.append('STI 014') + if raw.filenames[0] and raw.filenames[0].suffix != ".fif": + drop_chs.append("STI 014") ch_names = [ch for ch in raw.ch_names if ch not in drop_chs] - cart_coords = _get_als_coords_from_chs(raw.info['chs'], drop_chs) + cart_coords = _get_als_coords_from_chs(raw.info["chs"], drop_chs) - annotations = [raw.annotations.description, - raw.annotations.onset, - raw.annotations.duration] + if raw.annotations: + annotations = [ + raw.annotations.description, + # subtract raw.first_time because EEGLAB marks events starting from + # the first available data point and ignores raw.first_time + _sync_onset(raw, raw.annotations.onset, inverse=False), + raw.annotations.duration, + ] + else: + annotations = None eeglabio.raw.export_set( - fname, data=raw.get_data(picks=ch_names), sfreq=raw.info['sfreq'], - ch_names=ch_names, ch_locs=cart_coords, annotations=annotations) + fname, + data=raw.get_data(picks=ch_names), + sfreq=raw.info["sfreq"], + ch_names=ch_names, + ch_locs=cart_coords, + annotations=annotations, + ) def _export_epochs(fname, epochs): @@ -38,21 +53,37 @@ def _export_epochs(fname, epochs): epochs.load_data() # remove extra epoc and STI channels - drop_chs = ['epoc', 'STI 014'] + drop_chs = ["epoc", "STI 014"] ch_names = [ch for ch in epochs.ch_names if ch not in drop_chs] - cart_coords = _get_als_coords_from_chs(epochs.info['chs'], drop_chs) + cart_coords = _get_als_coords_from_chs(epochs.info["chs"], drop_chs) if epochs.annotations: - annot = [epochs.annotations.description, epochs.annotations.onset, - epochs.annotations.duration] + annot = [ + epochs.annotations.description, + epochs.annotations.onset, + epochs.annotations.duration, + ] else: annot = None + # https://github.com/jackz314/eeglabio/pull/18 + kwargs = dict() + if "epoch_indices" in getfullargspec(eeglabio.epochs.export_set).kwonlyargs: + kwargs["epoch_indices"] = epochs.selection + eeglabio.epochs.export_set( - fname, data=epochs.get_data(picks=ch_names), - sfreq=epochs.info['sfreq'], events=epochs.events, - tmin=epochs.tmin, tmax=epochs.tmax, ch_names=ch_names, - event_id=epochs.event_id, ch_locs=cart_coords, annotations=annot) + fname, + data=epochs.get_data(picks=ch_names), + sfreq=epochs.info["sfreq"], + events=epochs.events, + tmin=epochs.tmin, + tmax=epochs.tmax, + ch_names=ch_names, + event_id=epochs.event_id, + ch_locs=cart_coords, + annotations=annot, + **kwargs, + ) def _get_als_coords_from_chs(chs, drop_chs=None): @@ -64,8 +95,7 @@ def _get_als_coords_from_chs(chs, drop_chs=None): """ if drop_chs is None: drop_chs = [] - cart_coords = np.array([d['loc'][:3] for d in chs - if d['ch_name'] not in drop_chs]) + cart_coords = np.array([d["loc"][:3] for d in chs if d["ch_name"] not in drop_chs]) if cart_coords.any(): # has coordinates # (-y x z) to (x y z) cart_coords[:, 0] = -cart_coords[:, 0] # -y to y diff --git a/mne/export/_egimff.py b/mne/export/_egimff.py index 6ad4ead73e7..185afb5f558 100644 --- a/mne/export/_egimff.py +++ b/mne/export/_egimff.py @@ -1,23 +1,21 @@ -# -*- coding: utf-8 -*- -# Authors: MNE Developers -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os -import shutil import datetime +import os import os.path as op +import shutil import numpy as np +from .._fiff.pick import pick_channels, pick_types from ..io.egi.egimff import _import_mffpy -from ..io.pick import pick_types, pick_channels -from ..utils import verbose, warn, _check_fname +from ..utils import _check_fname, verbose, warn @verbose -def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, - verbose=None): +def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, verbose=None): """Export evoked dataset to MFF. %(export_warning)s @@ -50,18 +48,21 @@ def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, (e.g. 'HydroCel GSN 256 1.0'). This field is automatically populated when using MFF read functions. """ - mffpy = _import_mffpy('Export evokeds to MFF.') - import pytz + mffpy = _import_mffpy("Export evokeds to MFF.") + info = evoked[0].info - if np.round(info['sfreq']) != info['sfreq']: - raise ValueError('Sampling frequency must be a whole number. ' - f'sfreq: {info["sfreq"]}') - sampling_rate = int(info['sfreq']) + if np.round(info["sfreq"]) != info["sfreq"]: + raise ValueError( + f"Sampling frequency must be a whole number. sfreq: {info['sfreq']}" + ) + sampling_rate = int(info["sfreq"]) # check for unapplied projectors - if any(not proj['active'] for proj in evoked[0].info['projs']): - warn('Evoked instance has unapplied projectors. Consider applying ' - 'them before exporting with evoked.apply_proj().') + if any(not proj["active"] for proj in evoked[0].info["projs"]): + warn( + "Evoked instance has unapplied projectors. Consider applying " + "them before exporting with evoked.apply_proj()." + ) # Initialize writer # Future changes: conditions based on version or mffpy requirement if @@ -70,12 +71,12 @@ def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, if op.exists(fname): os.remove(fname) if op.isfile(fname) else shutil.rmtree(fname) writer = mffpy.Writer(fname) - current_time = pytz.utc.localize(datetime.datetime.utcnow()) - writer.addxml('fileInfo', recordTime=current_time) + current_time = datetime.datetime.now(datetime.timezone.utc) + writer.addxml("fileInfo", recordTime=current_time) try: - device = info['device_info']['type'] + device = info["device_info"]["type"] except (TypeError, KeyError): - raise ValueError('No device type. Cannot determine sensor layout.') + raise ValueError("No device type. Cannot determine sensor layout.") writer.add_coordinates_and_sensor_layout(device) # Add EEG data @@ -89,11 +90,11 @@ def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, # Add categories categories_content = _categories_content_from_evokeds(evoked) - writer.addxml('categories', categories=categories_content) + writer.addxml("categories", categories=categories_content) # Add history if history: - writer.addxml('historyEntries', entries=history) + writer.addxml("historyEntries", entries=history) writer.write() @@ -104,14 +105,20 @@ def _categories_content_from_evokeds(evoked): begin_time = 0 for ave in evoked: # Times are converted to microseconds - sfreq = ave.info['sfreq'] + sfreq = ave.info["sfreq"] duration = np.round(len(ave.times) / sfreq * 1e6).astype(int) end_time = begin_time + duration event_time = begin_time - np.round(ave.tmin * 1e6).astype(int) eeg_bads = _get_bad_eeg_channels(ave.info) content[ave.comment] = [ - _build_segment_content(begin_time, end_time, event_time, eeg_bads, - name='Average', nsegs=ave.nave) + _build_segment_content( + begin_time, + end_time, + event_time, + eeg_bads, + name="Average", + nsegs=ave.nave, + ) ] begin_time += duration return content @@ -123,49 +130,47 @@ def _get_bad_eeg_channels(info): Given a list of only the EEG channels in file, return the indices of this list (starting at 1) that correspond to bad channels. """ - if len(info['bads']) == 0: + if len(info["bads"]) == 0: return [] eeg_channels = pick_types(info, eeg=True, exclude=[]) - bad_channels = pick_channels(info['ch_names'], info['bads']) + bad_channels = pick_channels(info["ch_names"], info["bads"]) bads_elementwise = np.isin(eeg_channels, bad_channels) return list(np.flatnonzero(bads_elementwise) + 1) -def _build_segment_content(begin_time, end_time, event_time, eeg_bads, - status='unedited', name=None, pns_bads=None, - nsegs=None): +def _build_segment_content( + begin_time, + end_time, + event_time, + eeg_bads, + status="unedited", + name=None, + pns_bads=None, + nsegs=None, +): """Build content for a single segment in categories.xml. Segments are sorted into categories in categories.xml. In a segmented MFF each category can contain multiple segments, but in an averaged MFF each category only contains one segment (the average). """ - channel_status = [{ - 'signalBin': 1, - 'exclusion': 'badChannels', - 'channels': eeg_bads - }] + channel_status = [ + {"signalBin": 1, "exclusion": "badChannels", "channels": eeg_bads} + ] if pns_bads: - channel_status.append({ - 'signalBin': 2, - 'exclusion': 'badChannels', - 'channels': pns_bads - }) + channel_status.append( + {"signalBin": 2, "exclusion": "badChannels", "channels": pns_bads} + ) content = { - 'status': status, - 'beginTime': begin_time, - 'endTime': end_time, - 'evtBegin': event_time, - 'evtEnd': event_time, - 'channelStatus': channel_status, + "status": status, + "beginTime": begin_time, + "endTime": end_time, + "evtBegin": event_time, + "evtEnd": event_time, + "channelStatus": channel_status, } if name: - content['name'] = name + content["name"] = name if nsegs: - content['keys'] = { - '#seg': { - 'type': 'long', - 'data': nsegs - } - } + content["keys"] = {"#seg": {"type": "long", "data": nsegs}} return content diff --git a/mne/export/_export.py b/mne/export/_export.py index 1d2cc44a141..2842b747f21 100644 --- a/mne/export/_export.py +++ b/mne/export/_export.py @@ -1,23 +1,38 @@ -# -*- coding: utf-8 -*- -# Authors: MNE Developers -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import os.path as op +import os -from ._egimff import export_evokeds_mff -from ..utils import logger, verbose, warn, _check_fname, _validate_type +from mne.export._egimff import export_evokeds_mff +from mne.utils import _check_fname, _validate_type, logger, verbose, warn @verbose -def export_raw(fname, raw, fmt='auto', physical_range='auto', - add_ch_type=False, *, overwrite=False, verbose=None): +def export_raw( + fname, + raw, + fmt="auto", + physical_range="auto", + add_ch_type=False, + *, + overwrite=False, + verbose=None, +): """Export Raw to external formats. %(export_fmt_support_raw)s %(export_warning)s + .. warning:: + When exporting ``Raw`` with annotations, ``raw.info["meas_date"]`` must be the + same as ``raw.annotations.orig_time``. This guarantees that the annotations are + in the same reference frame as the samples. When + :attr:`Raw.first_time ` is not zero (e.g., after + cropping), the onsets are automatically corrected so that onsets are always + relative to the first sample. + Parameters ---------- %(fname_export_params)s @@ -41,30 +56,45 @@ def export_raw(fname, raw, fmt='auto', physical_range='auto', """ fname = str(_check_fname(fname, overwrite=overwrite)) supported_export_formats = { # format : (extensions,) - 'eeglab': ('set',), - 'edf': ('edf',), - 'brainvision': ('eeg', 'vmrk', 'vhdr',) + "bdf": ("bdf",), + "brainvision": ( + "eeg", + "vmrk", + "vhdr", + ), + "edf": ("edf",), + "eeglab": ("set",), } fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) # check for unapplied projectors - if any(not proj['active'] for proj in raw.info['projs']): - warn('Raw instance has unapplied projectors. Consider applying ' - 'them before exporting with raw.apply_proj().') + if any(not proj["active"] for proj in raw.info["projs"]): + warn( + "Raw instance has unapplied projectors. Consider applying " + "them before exporting with raw.apply_proj()." + ) + + match fmt: + case "bdf": + from mne.export._edf_bdf import _export_raw_bdf + + _export_raw_bdf(fname, raw, physical_range, add_ch_type) + case "brainvision": + from mne.export._brainvision import _export_raw + + _export_raw(fname, raw, overwrite) + case "edf": + from mne.export._edf_bdf import _export_raw_edf - if fmt == 'eeglab': - from ._eeglab import _export_raw - _export_raw(fname, raw) - elif fmt == 'edf': - from ._edf import _export_raw - _export_raw(fname, raw, physical_range, add_ch_type) - elif fmt == 'brainvision': - from ._brainvision import _export_raw - _export_raw(fname, raw, overwrite) + _export_raw_edf(fname, raw, physical_range, add_ch_type) + case "eeglab": + from mne.export._eeglab import _export_raw + + _export_raw(fname, raw) @verbose -def export_epochs(fname, epochs, fmt='auto', *, overwrite=False, verbose=None): +def export_epochs(fname, epochs, fmt="auto", *, overwrite=False, verbose=None): """Export Epochs to external formats. %(export_fmt_support_epochs)s @@ -91,23 +121,25 @@ def export_epochs(fname, epochs, fmt='auto', *, overwrite=False, verbose=None): """ fname = str(_check_fname(fname, overwrite=overwrite)) supported_export_formats = { - 'eeglab': ('set',), + "eeglab": ("set",), } fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) # check for unapplied projectors - if any(not proj['active'] for proj in epochs.info['projs']): - warn('Epochs instance has unapplied projectors. Consider applying ' - 'them before exporting with epochs.apply_proj().') + if any(not proj["active"] for proj in epochs.info["projs"]): + warn( + "Epochs instance has unapplied projectors. Consider applying " + "them before exporting with epochs.apply_proj()." + ) + + if fmt == "eeglab": + from mne.export._eeglab import _export_epochs - if fmt == 'eeglab': - from ._eeglab import _export_epochs _export_epochs(fname, epochs) @verbose -def export_evokeds(fname, evoked, fmt='auto', *, overwrite=False, - verbose=None): +def export_evokeds(fname, evoked, fmt="auto", *, overwrite=False, verbose=None): """Export evoked dataset to external formats. This function is a wrapper for format-specific export functions. The export @@ -144,16 +176,16 @@ def export_evokeds(fname, evoked, fmt='auto', *, overwrite=False, """ fname = str(_check_fname(fname, overwrite=overwrite)) supported_export_formats = { - 'mff': ('mff',), + "mff": ("mff",), } fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) if not isinstance(evoked, list): evoked = [evoked] - logger.info(f'Exporting evoked dataset to {fname}...') + logger.info(f"Exporting evoked dataset to {fname}...") - if fmt == 'mff': + if fmt == "mff": export_evokeds_mff(fname, evoked, overwrite=overwrite) @@ -175,26 +207,29 @@ def _infer_check_export_fmt(fmt, fname, supported_formats): Dictionary containing supported formats (as keys) and each format's corresponding file extensions in a tuple (e.g., {'eeglab': ('set',)}) """ - _validate_type(fmt, str, 'fmt') + _validate_type(fmt, str, "fmt") fmt = fmt.lower() if fmt == "auto": - fmt = op.splitext(fname)[1] + fmt = os.path.splitext(fname)[1] if fmt: fmt = fmt[1:].lower() # find fmt in supported formats dict's tuples - fmt = next((k for k, v in supported_formats.items() if fmt in v), - fmt) # default to original fmt for raising error later + fmt = next( + (k for k, v in supported_formats.items() if fmt in v), fmt + ) # default to original fmt for raising error later else: - raise ValueError(f"Couldn't infer format from filename {fname}" - " (no extension found)") + raise ValueError( + f"Couldn't infer format from filename {fname} (no extension found)" + ) if fmt not in supported_formats: supported = [] - for format, extensions in supported_formats.items(): - ext_str = ', '.join(f'*.{ext}' for ext in extensions) - supported.append(f'{format} ({ext_str})') - - supported_str = ', '.join(supported) - raise ValueError(f"Format '{fmt}' is not supported. " - f"Supported formats are {supported_str}.") + for supp_format, extensions in supported_formats.items(): + ext_str = ", ".join(f"*.{ext}" for ext in extensions) + supported.append(f"{supp_format} ({ext_str})") + + supported_str = ", ".join(supported) + raise ValueError( + f"Format '{fmt}' is not supported. Supported formats are {supported_str}." + ) return fmt diff --git a/mne/export/tests/test_export.py b/mne/export/tests/test_export.py index aaf92c92cb2..f9146227d50 100644 --- a/mne/export/tests/test_export.py +++ b/mne/export/tests/test_export.py @@ -1,51 +1,63 @@ -# -*- coding: utf-8 -*- """Test exporting functions.""" -# Authors: MNE Developers -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from contextlib import nullcontext -from datetime import datetime, timezone -from os import remove +from datetime import date, datetime, timezone from pathlib import Path -import pytest import numpy as np -from numpy.testing import (assert_allclose, assert_array_almost_equal, - assert_array_equal) - -from mne import (read_epochs_eeglab, Epochs, read_evokeds, read_evokeds_mff, - Annotations) -from mne.datasets import testing, misc +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal + +from mne import ( + Annotations, + Epochs, + create_info, + read_epochs_eeglab, + read_evokeds, + read_evokeds_mff, +) +from mne.datasets import misc, testing from mne.export import export_evokeds, export_evokeds_mff from mne.fixes import _compare_version -from mne.io import (RawArray, read_raw_fif, read_raw_eeglab, read_raw_edf, - read_raw_brainvision) -from mne.io.meas_info import create_info -from mne.utils import (_check_eeglabio_installed, requires_version, - object_diff, _check_edflib_installed, _resource_path, - _check_pybv_installed, _record_warnings) +from mne.io import ( + RawArray, + read_raw_bdf, + read_raw_brainvision, + read_raw_edf, + read_raw_eeglab, + read_raw_fif, +) from mne.tests.test_epochs import _get_data +from mne.utils import ( + _check_edfio_installed, + _record_warnings, + _resource_path, + check_version, + object_diff, +) -fname_evoked = _resource_path('mne.io.tests.data', 'test-ave.fif') -fname_raw = _resource_path('mne.io.tests.data', 'test_raw.fif') +fname_evoked = _resource_path("mne.io.tests.data", "test-ave.fif") +fname_raw = _resource_path("mne.io.tests.data", "test_raw.fif") data_path = testing.data_path(download=False) egi_evoked_fname = data_path / "EGI" / "test_egi_evoked.mff" misc_path = misc.data_path(download=False) -@pytest.mark.skipif(not _check_pybv_installed(strict=False), - reason='pybv not installed') @pytest.mark.parametrize( - ['meas_date', 'orig_time', 'ext'], [ - [None, None, '.vhdr'], - [datetime(2022, 12, 3, 19, 1, 10, 720100, tzinfo=timezone.utc), - None, - '.eeg'], - ]) + ["meas_date", "orig_time", "ext"], + [ + [None, None, ".vhdr"], + [datetime(2022, 12, 3, 19, 1, 10, 720100, tzinfo=timezone.utc), None, ".eeg"], + ], +) def test_export_raw_pybv(tmp_path, meas_date, orig_time, ext): """Test saving a Raw instance to BrainVision format via pybv.""" + pytest.importorskip("pybv") raw = read_raw_fif(fname_raw, preload=True) raw.apply_proj() @@ -67,39 +79,39 @@ def test_export_raw_pybv(tmp_path, meas_date, orig_time, ext): ) raw.set_annotations(annots) - temp_fname = tmp_path / ('test' + ext) - with pytest.warns(RuntimeWarning, match="'short' format. Converting"): + temp_fname = tmp_path / ("test" + ext) + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match="'short' format. Converting"), + ): raw.export(temp_fname) - raw_read = read_raw_brainvision(str(temp_fname).replace('.eeg', '.vhdr')) + raw_read = read_raw_brainvision(str(temp_fname).replace(".eeg", ".vhdr")) assert raw.ch_names == raw_read.ch_names assert_allclose(raw.times, raw_read.times) assert_allclose(raw.get_data(), raw_read.get_data()) -@requires_version('pymatreader') -@pytest.mark.skipif(not _check_eeglabio_installed(strict=False), - reason='eeglabio not installed') def test_export_raw_eeglab(tmp_path): """Test saving a Raw instance to EEGLAB's set format.""" + pytest.importorskip("eeglabio") raw = read_raw_fif(fname_raw, preload=True) raw.apply_proj() temp_fname = tmp_path / "test.set" raw.export(temp_fname) - raw.drop_channels([ch for ch in ['epoc'] - if ch in raw.ch_names]) + raw.drop_channels([ch for ch in ["epoc"] if ch in raw.ch_names]) - with pytest.warns(RuntimeWarning, match='is above the 99th percentile'): - raw_read = read_raw_eeglab(temp_fname, preload=True, montage_units='m') + with pytest.warns(RuntimeWarning, match="is above the 99th percentile"): + raw_read = read_raw_eeglab(temp_fname, preload=True, montage_units="m") assert raw.ch_names == raw_read.ch_names - cart_coords = np.array([d['loc'][:3] for d in raw.info['chs']]) # just xyz - cart_coords_read = np.array([d['loc'][:3] for d in raw_read.info['chs']]) + cart_coords = np.array([d["loc"][:3] for d in raw.info["chs"]]) # just xyz + cart_coords_read = np.array([d["loc"][:3] for d in raw_read.info["chs"]]) assert_allclose(cart_coords, cart_coords_read) assert_allclose(raw.times, raw_read.times) assert_allclose(raw.get_data(), raw_read.get_data()) # test overwrite - with pytest.raises(FileExistsError, match='Destination file exists'): + with pytest.raises(FileExistsError, match="Destination file exists"): raw.export(temp_fname, overwrite=False) raw.export(temp_fname, overwrite=True) @@ -108,237 +120,380 @@ def test_export_raw_eeglab(tmp_path): # test warning with unapplied projectors raw = read_raw_fif(fname_raw, preload=True) - with pytest.warns(RuntimeWarning, - match='Raw instance has unapplied projectors.'): + with pytest.warns(RuntimeWarning, match="Raw instance has unapplied projectors."): raw.export(temp_fname, overwrite=True) -@pytest.mark.skipif(not _check_edflib_installed(strict=False), - reason='edflib-python not installed') +@pytest.mark.parametrize("tmin", (0, 1, 5, 10)) +def test_export_raw_eeglab_annotations(tmp_path, tmin): + """Test annotations in the exported EEGLAB file. + + All annotations should be preserved and onset corrected. + """ + pytest.importorskip("eeglabio") + raw = read_raw_fif(fname_raw, preload=True) + raw.apply_proj() + annotations = Annotations( + onset=[0.01, 0.05, 0.90, 1.05], + duration=[0, 1, 0, 0], + description=["test1", "test2", "test3", "test4"], + ch_names=[["MEG 0113"], ["MEG 0113", "MEG 0132"], [], ["MEG 0143"]], + ) + raw.set_annotations(annotations) + raw.crop(tmin) + + # export + temp_fname = tmp_path / "test.set" + raw.export(temp_fname) + + # read in the file + with pytest.warns(RuntimeWarning, match="is above the 99th percentile"): + raw_read = read_raw_eeglab(temp_fname, preload=True, montage_units="m") + assert raw_read.first_time == 0 # exportation resets first_time + valid_annot = ( + raw.annotations.onset >= tmin + ) # only annotations in the cropped range gets exported + + # compare annotations before and after export + assert_array_almost_equal( + raw.annotations.onset[valid_annot] - raw.first_time, + raw_read.annotations.onset, + ) + assert_array_equal( + raw.annotations.duration[valid_annot], raw_read.annotations.duration + ) + assert_array_equal( + raw.annotations.description[valid_annot], raw_read.annotations.description + ) + + +def _create_raw_for_edf_tests(stim_channel_index=None): + rng = np.random.RandomState(12345) + ch_types = [ + "eeg", + "eeg", + "ecog", + "ecog", + "seeg", + "eog", + "ecg", + "emg", + "dbs", + "bio", + ] + if stim_channel_index is not None: + ch_types.insert(stim_channel_index, "stim") + ch_names = np.arange(len(ch_types)).astype(str).tolist() + info = create_info(ch_names, sfreq=1000, ch_types=ch_types) + data = rng.random(size=(len(ch_names), 2000)) * 1e-5 + return RawArray(data, info) + + +edfio_mark = pytest.mark.skipif( + not _check_edfio_installed(strict=False), reason="requires edfio" +) + + +@edfio_mark() def test_double_export_edf(tmp_path): """Test exporting an EDF file multiple times.""" - rng = np.random.RandomState(123456) - format = 'edf' - ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg', 'eog', 'ecg', - 'emg', 'dbs', 'bio'] - info = create_info(len(ch_types), sfreq=1000, ch_types=ch_types) - data = rng.random(size=(len(ch_types), 1000)) * 1e-5 + raw = _create_raw_for_edf_tests(stim_channel_index=2) + raw.info.set_meas_date("2023-09-04 14:53:09.000") + raw.set_annotations(Annotations(onset=[1], duration=[0], description=["test"])) # include subject info and measurement date - info['subject_info'] = dict(first_name='mne', last_name='python', - birthday=(1992, 1, 20), sex=1, hand=3) - raw = RawArray(data, info) + raw.info["subject_info"] = dict( + his_id="12345", + first_name="mne", + last_name="python", + birthday=date(1992, 1, 20), + sex=1, + weight=78.3, + height=1.75, + hand=3, + ) # export once - temp_fname = tmp_path / f'test.{format}' + temp_fname = tmp_path / "test.edf" raw.export(temp_fname, add_ch_type=True) raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) # export again - raw_read.load_data() raw_read.export(temp_fname, add_ch_type=True, overwrite=True) raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) - # stim channel should be dropped - raw.drop_channels('2') - assert raw.ch_names == raw_read.ch_names - # only compare the original length, since extra zeros are appended - orig_raw_len = len(raw) - assert_array_almost_equal( - raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) - assert_allclose( - raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + assert_array_almost_equal(raw.get_data(), raw_read.get_data(), decimal=10) + assert_array_equal(raw.times, raw_read.times) + + # check info + for key in set(raw.info) - {"chs"}: + assert raw.info[key] == raw_read.info[key] - # check channel types except for 'bio', which loses its type orig_ch_types = raw.get_channel_types() read_ch_types = raw_read.get_channel_types() assert_array_equal(orig_ch_types, read_ch_types) - # check handling of missing subject metadata - del info['subject_info']['sex'] - raw_2 = RawArray(data, info) - raw_2.export(temp_fname, add_ch_type=True, overwrite=True) +@edfio_mark() +def test_edf_physical_range(tmp_path): + """Test exporting an EDF file with different physical range settings.""" + ch_types = ["eeg"] * 4 + ch_names = np.arange(len(ch_types)).astype(str).tolist() + fs = 1000 + info = create_info(len(ch_types), sfreq=fs, ch_types=ch_types) + data = np.tile( + np.sin(2 * np.pi * 10 * np.arange(0, 2, 1 / fs)) * 1e-5, (len(ch_names), 1) + ) + data = (data.T + [0.1, 0, 0, -0.1]).T # add offsets + raw = RawArray(data, info) -@pytest.mark.skipif(not _check_edflib_installed(strict=False), - reason='edflib-python not installed') -def test_export_edf_annotations(tmp_path): - """Test that exporting EDF preserves annotations.""" - rng = np.random.RandomState(123456) - format = 'edf' - ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'ecog', 'seeg', - 'eog', 'ecg', 'emg', 'dbs', 'bio'] + # export with physical range per channel type (default) + temp_fname = tmp_path / "test_auto.edf" + raw.export(temp_fname) + raw_read = read_raw_edf(temp_fname, preload=True) + with pytest.raises(AssertionError, match="Arrays are not almost equal"): + assert_array_almost_equal(raw.get_data(), raw_read.get_data(), decimal=10) + + # export with physical range per channel + temp_fname = tmp_path / "test_per_channel.edf" + raw.export(temp_fname, physical_range="channelwise") + raw_read = read_raw_edf(temp_fname, preload=True) + assert_array_almost_equal(raw.get_data(), raw_read.get_data(), decimal=10) + + +@edfio_mark() +@pytest.mark.parametrize("pad_width", (1, 10, 100, 500, 999)) +def test_edf_padding(tmp_path, pad_width): + """Test exporting an EDF file with not-equal-length data blocks.""" + ch_types = ["eeg"] * 4 ch_names = np.arange(len(ch_types)).astype(str).tolist() - info = create_info(ch_names, sfreq=1000, - ch_types=ch_types) - data = rng.random(size=(len(ch_names), 2000)) * 1.e-5 + fs = 1000 + info = create_info(len(ch_types), sfreq=fs, ch_types=ch_types) + data = np.tile( + np.sin(2 * np.pi * 10 * np.arange(0, 2, 1 / fs)) * 1e-5, (len(ch_names), 1) + )[:, 0:-pad_width] # remove last pad_width samples raw = RawArray(data, info) + # export with physical range per channel type (default) + temp_fname = tmp_path / "test.edf" + with pytest.warns( + RuntimeWarning, + match=( + "EDF format requires equal-length data blocks.*" + f"{pad_width / 1000:.3g} seconds of edge values were appended.*" + ), + ): + raw.export(temp_fname) + + # read in the file + raw_read = read_raw_edf(temp_fname, preload=True) + assert raw.n_times == raw_read.n_times - pad_width + edge_data = raw_read.get_data()[:, -pad_width - 1] + pad_data = raw_read.get_data()[:, -pad_width:] + assert_array_almost_equal( + raw.get_data(), raw_read.get_data()[:, :-pad_width], decimal=10 + ) + assert_array_almost_equal( + pad_data, np.tile(edge_data, (pad_width, 1)).T, decimal=10 + ) + + assert "BAD_ACQ_SKIP" in raw_read.annotations.description + assert_array_almost_equal(raw_read.annotations.onset[0], raw.times[-1] + 1 / fs) + assert_array_almost_equal(raw_read.annotations.duration[0], pad_width / fs) + + +@edfio_mark() +@pytest.mark.parametrize("tmin", (0, 0.005, 0.03, 1)) +def test_export_edf_annotations(tmp_path, tmin): + """Test annotations in the exported EDF file. + + All annotations should be preserved and onset corrected. + """ + raw = _create_raw_for_edf_tests() annotations = Annotations( - onset=[0.01, 0.05, 0.90, 1.05], duration=[0, 1, 0, 0], - description=['test1', 'test2', 'test3', 'test4']) + onset=[0.01, 0.05, 0.90, 1.05], + duration=[0, 1, 0, 0], + description=["test1", "test2", "test3", "test4"], + ch_names=[["0"], ["0", "1"], [], ["1"]], + ) raw.set_annotations(annotations) + raw.crop(tmin) + assert raw.first_time == tmin + + if raw.n_times % raw.info["sfreq"] == 0: + expectation = nullcontext() + else: + expectation = pytest.warns( + RuntimeWarning, match="EDF format requires equal-length data blocks" + ) # export - temp_fname = tmp_path / f"test.{format}" - raw.export(temp_fname) + temp_fname = tmp_path / "test.edf" + with expectation: + raw.export(temp_fname) # read in the file raw_read = read_raw_edf(temp_fname, preload=True) - assert_array_equal(raw.annotations.onset, raw_read.annotations.onset) - assert_array_equal(raw.annotations.duration, raw_read.annotations.duration) - assert_array_equal(raw.annotations.description, - raw_read.annotations.description) + assert raw_read.first_time == 0 # exportation resets first_time + bad_annot = raw_read.annotations.description == "BAD_ACQ_SKIP" + if bad_annot.any(): + raw_read.annotations.delete(bad_annot) + valid_annot = ( + raw.annotations.onset >= tmin + ) # only annotations in the cropped range gets exported + + # compare annotations before and after export + assert_array_almost_equal( + raw.annotations.onset[valid_annot] - raw.first_time, raw_read.annotations.onset + ) + assert_array_equal( + raw.annotations.duration[valid_annot], raw_read.annotations.duration + ) + assert_array_equal( + raw.annotations.description[valid_annot], raw_read.annotations.description + ) + assert_array_equal( + raw.annotations.ch_names[valid_annot], raw_read.annotations.ch_names + ) -@pytest.mark.skipif(not _check_edflib_installed(strict=False), - reason='edflib-python not installed') +@edfio_mark() def test_rawarray_edf(tmp_path): """Test saving a Raw array with integer sfreq to EDF.""" - rng = np.random.RandomState(12345) - format = 'edf' - ch_types = ['eeg', 'eeg', 'stim', 'ecog', 'seeg', 'eog', 'ecg', 'emg', - 'dbs', 'bio'] - ch_names = np.arange(len(ch_types)).astype(str).tolist() - info = create_info(ch_names, sfreq=1000, - ch_types=ch_types) - data = rng.random(size=(len(ch_names), 1000)) * 1e-5 + raw = _create_raw_for_edf_tests() # include subject info and measurement date - subject_info = dict(first_name='mne', last_name='python', - birthday=(1992, 1, 20), sex=1, hand=3) - info['subject_info'] = subject_info - raw = RawArray(data, info) + raw.info["subject_info"] = dict( + first_name="mne", + last_name="python", + birthday=date(1992, 1, 20), + sex=1, + hand=3, + ) time_now = datetime.now() - meas_date = datetime(year=time_now.year, month=time_now.month, - day=time_now.day, hour=time_now.hour, - minute=time_now.minute, second=time_now.second, - tzinfo=timezone.utc) + meas_date = datetime( + year=time_now.year, + month=time_now.month, + day=time_now.day, + hour=time_now.hour, + minute=time_now.minute, + second=time_now.second, + tzinfo=timezone.utc, + ) raw.set_meas_date(meas_date) - temp_fname = tmp_path / f"test.{format}" + temp_fname = tmp_path / "test.edf" raw.export(temp_fname, add_ch_type=True) raw_read = read_raw_edf(temp_fname, infer_types=True, preload=True) - # stim channel should be dropped - raw.drop_channels('2') - assert raw.ch_names == raw_read.ch_names - # only compare the original length, since extra zeros are appended - orig_raw_len = len(raw) - assert_array_almost_equal( - raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) - assert_allclose( - raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + assert_array_almost_equal(raw.get_data(), raw_read.get_data(), decimal=10) + assert_array_equal(raw.times, raw_read.times) - # check channel types except for 'bio', which loses its type orig_ch_types = raw.get_channel_types() read_ch_types = raw_read.get_channel_types() assert_array_equal(orig_ch_types, read_ch_types) - assert raw.info['meas_date'] == raw_read.info['meas_date'] - - # channel name can't be longer than 16 characters with the type added - raw_bad = raw.copy() - raw_bad.rename_channels({'1': 'abcdefghijklmnopqrstuvwxyz'}) - with pytest.raises(RuntimeError, match='Signal label'), \ - pytest.warns(RuntimeWarning, match='Data has a non-integer'): - raw_bad.export(temp_fname, overwrite=True) - - # include bad birthday that is non-EDF compliant - bad_info = info.copy() - bad_info['subject_info']['birthday'] = (1700, 1, 20) - raw = RawArray(data, bad_info) - with pytest.raises(RuntimeError, match='Setting patient birth date'): - raw.export(temp_fname, overwrite=True) + assert raw.info["meas_date"] == raw_read.info["meas_date"] - # include bad measurement date that is non-EDF compliant - raw = RawArray(data, info) - meas_date = datetime(year=1984, month=1, day=1, tzinfo=timezone.utc) - raw.set_meas_date(meas_date) - with pytest.raises(RuntimeError, match='Setting start date time'): - raw.export(temp_fname, overwrite=True) - # test that warning is raised if there are non-voltage based channels - raw = RawArray(data, info) - with pytest.warns(RuntimeWarning, match='The unit'): - raw.set_channel_types({'9': 'hbr'}) - with pytest.warns(RuntimeWarning, match='Non-voltage channels'): - raw.export(temp_fname, overwrite=True) +@edfio_mark() +def test_edf_export_non_voltage_channels(tmp_path): + """Test saving a Raw array containing a non-voltage channel.""" + temp_fname = tmp_path / "test.edf" + + raw = _create_raw_for_edf_tests() + raw.set_channel_types({"9": "hbr"}, on_unit_change="ignore") + raw.export(temp_fname, overwrite=True) # data should match up to the non-accepted channel raw_read = read_raw_edf(temp_fname, preload=True) - orig_raw_len = len(raw) - assert_array_almost_equal( - raw.get_data()[:-1, :], raw_read.get_data()[:, :orig_raw_len], - decimal=4) - assert_allclose( - raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + assert raw.ch_names == raw_read.ch_names + assert_array_almost_equal(raw.get_data()[:-1], raw_read.get_data()[:-1], decimal=10) + assert_array_almost_equal(raw.get_data()[-1], raw_read.get_data()[-1], decimal=5) + assert_array_equal(raw.times, raw_read.times) + + +@edfio_mark() +def test_channel_label_too_long_for_edf_raises_error(tmp_path): + """Test trying to save an EDF where a channel label is longer than 16 characters.""" + raw = _create_raw_for_edf_tests() + raw.rename_channels({"1": "abcdefghijklmnopqrstuvwxyz"}) + with pytest.raises(RuntimeError, match="Signal label"): + raw.export(tmp_path / "test.edf") + + +@edfio_mark() +def test_measurement_date_outside_range_valid_for_edf(tmp_path): + """Test trying to save an EDF with a measurement date before 1985-01-01.""" + raw = _create_raw_for_edf_tests() + raw.set_meas_date(datetime(year=1984, month=1, day=1, tzinfo=timezone.utc)) + with pytest.raises(ValueError, match="EDF only allows dates from 1985 to 2084"): + raw.export(tmp_path / "test.edf", overwrite=True) + - # the data should still match though +@pytest.mark.filterwarnings("ignore:Data has a non-integer:RuntimeWarning") +@pytest.mark.parametrize( + ("physical_range", "exceeded_bound"), + [ + ((-1e6, 0), "maximum"), + ((0, 1e6), "minimum"), + ], +) +@edfio_mark() +def test_export_edf_signal_clipping(tmp_path, physical_range, exceeded_bound): + """Test if exporting data exceeding physical min/max clips and emits a warning.""" + raw = read_raw_fif(fname_raw) + raw.pick(picks=["eeg", "ecog", "seeg"]).load_data() + temp_fname = tmp_path / "test.edf" + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match=f"The {exceeded_bound}"), + ): + raw.export(temp_fname, physical_range=physical_range) raw_read = read_raw_edf(temp_fname, preload=True) - raw.drop_channels('2') - assert raw.ch_names == raw_read.ch_names - orig_raw_len = len(raw) - assert_array_almost_equal( - raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) - assert_allclose( - raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + assert raw_read.get_data().min() >= physical_range[0] + assert raw_read.get_data().max() <= physical_range[1] + + +@edfio_mark() +def test_export_edf_with_constant_channel(tmp_path): + """Test if exporting to edf works if a channel contains only constant values.""" + temp_fname = tmp_path / "test.edf" + raw = RawArray(np.zeros((1, 10)), info=create_info(1, 1)) + raw.export(temp_fname) + raw_read = read_raw_edf(temp_fname, preload=True) + assert_array_equal(raw_read.get_data(), np.zeros((1, 10))) -@pytest.mark.skipif(not _check_edflib_installed(strict=False), - reason='edflib-python not installed') +@edfio_mark() @pytest.mark.parametrize( - ['dataset', 'format'], [ - ['test', 'edf'], - pytest.param('misc', 'edf', marks=[pytest.mark.slowtest, - misc._pytest_mark()]), - ]) -def test_export_raw_edf(tmp_path, dataset, format): + ("input_path", "warning_msg"), + [ + (fname_raw, "Data has a non-integer"), + pytest.param( + misc_path / "ecog" / "sample_ecog_ieeg.fif", + "EDF format requires", + marks=[pytest.mark.slowtest, misc._pytest_mark()], + ), + ], +) +def test_export_raw_edf(tmp_path, input_path, warning_msg): """Test saving a Raw instance to EDF format.""" - if dataset == 'test': - raw = read_raw_fif(fname_raw) - elif dataset == 'misc': - fname = misc_path / "ecog" / "sample_ecog_ieeg.fif" - raw = read_raw_fif(fname) + raw = read_raw_fif(input_path) # only test with EEG channels - raw.pick_types(eeg=True, ecog=True, seeg=True) - raw.load_data() - orig_ch_names = raw.ch_names - temp_fname = tmp_path / f"test.{format}" - - # test runtime errors - with pytest.warns() as record: - raw.export(temp_fname, physical_range=(-1e6, 0)) - if dataset == 'test': - assert any( - "Data has a non-integer" in str(rec.message) for rec in record - ) - assert any("The maximum" in str(rec.message) for rec in record) - remove(temp_fname) - - with pytest.warns() as record: - raw.export(temp_fname, physical_range=(0, 1e6)) - if dataset == 'test': - assert any( - "Data has a non-integer" in str(rec.message) for rec in record - ) - assert any("The minimum" in str(rec.message) for rec in record) - remove(temp_fname) + raw.pick(picks=["eeg", "ecog", "seeg"]).load_data() + temp_fname = tmp_path / "test.edf" - if dataset == 'test': - with pytest.warns(RuntimeWarning, match='Data has a non-integer'): - raw.export(temp_fname) - elif dataset == 'misc': - with pytest.warns(RuntimeWarning, match='EDF format requires'): - raw.export(temp_fname) + with pytest.warns(RuntimeWarning, match=warning_msg): + raw.export(temp_fname) - if 'epoc' in raw.ch_names: - raw.drop_channels(['epoc']) + if "epoc" in raw.ch_names: + raw.drop_channels(["epoc"]) raw_read = read_raw_edf(temp_fname, preload=True) - assert orig_ch_names == raw_read.ch_names + assert raw.ch_names == raw_read.ch_names # only compare the original length, since extra zeros are appended orig_raw_len = len(raw) @@ -348,7 +503,8 @@ def test_export_raw_edf(tmp_path, dataset, format): # will result in a resolution of 0.09 uV. This resolution # though is acceptable for most EEG manufacturers. assert_array_almost_equal( - raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4) + raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=8 + ) # Due to the data record duration limitations of EDF files, one # cannot store arbitrary float sampling rate exactly. Usually this @@ -356,46 +512,59 @@ def test_export_raw_edf(tmp_path, dataset, format): # decimal points. This for practical purposes does not matter # but will result in an error when say the number of time points # is very very large. - assert_allclose( - raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + assert_allclose(raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) + +@edfio_mark() +def test_export_raw_edf_does_not_fail_on_empty_header_fields(tmp_path): + """Test writing a Raw instance with empty header fields to EDF.""" + rng = np.random.RandomState(123456) -@pytest.mark.xfail(reason='eeglabio (usage?) bugs that should be fixed') -@requires_version('pymatreader') -@pytest.mark.skipif(not _check_eeglabio_installed(strict=False), - reason='eeglabio not installed') -@pytest.mark.parametrize('preload', (True, False)) + ch_types = ["eeg"] + info = create_info(len(ch_types), sfreq=1000, ch_types=ch_types) + info["subject_info"] = { + "his_id": "", + "first_name": "", + "middle_name": "", + "last_name": "", + } + info["device_info"] = {"type": "123"} + + data = rng.random(size=(len(ch_types), 1000)) * 1e-5 + raw = RawArray(data, info) + + raw.export(tmp_path / "test.edf", add_ch_type=True) + + +@pytest.mark.skipif(not check_version("eeglabio", "0.1.2"), reason="fixed by 0.1.2") +@pytest.mark.parametrize("preload", (True, False)) def test_export_epochs_eeglab(tmp_path, preload): """Test saving an Epochs instance to EEGLAB's set format.""" - import eeglabio + eeglabio = pytest.importorskip("eeglabio") raw, events = _get_data()[:2] raw.load_data() epochs = Epochs(raw, events, preload=preload) temp_fname = tmp_path / "test.set" # TODO: eeglabio 0.2 warns about invalid events - if _compare_version(eeglabio.__version__, '==', '0.0.2-1'): + if _compare_version(eeglabio.__version__, "==", "0.0.2-1"): ctx = _record_warnings else: ctx = nullcontext with ctx(): epochs.export(temp_fname) - epochs.drop_channels([ch for ch in ['epoc', 'STI 014'] - if ch in epochs.ch_names]) - epochs_read = read_epochs_eeglab(temp_fname) + epochs.drop_channels([ch for ch in ["epoc", "STI 014"] if ch in epochs.ch_names]) + epochs_read = read_epochs_eeglab(temp_fname, verbose="error") # head radius assert epochs.ch_names == epochs_read.ch_names - cart_coords = np.array([d['loc'][:3] - for d in epochs.info['chs']]) # just xyz - cart_coords_read = np.array([d['loc'][:3] - for d in epochs_read.info['chs']]) + cart_coords = np.array([d["loc"][:3] for d in epochs.info["chs"]]) # just xyz + cart_coords_read = np.array([d["loc"][:3] for d in epochs_read.info["chs"]]) assert_allclose(cart_coords, cart_coords_read) - assert_array_equal(epochs.events[:, 0], - epochs_read.events[:, 0]) # latency + assert_array_equal(epochs.events[:, 0], epochs_read.events[:, 0]) # latency assert epochs.event_id.keys() == epochs_read.event_id.keys() # just keys assert_allclose(epochs.times, epochs_read.times) assert_allclose(epochs.get_data(), epochs_read.get_data()) # test overwrite - with pytest.raises(FileExistsError, match='Destination file exists'): + with pytest.raises(FileExistsError, match="Destination file exists"): epochs.export(temp_fname, overwrite=False) with ctx(): epochs.export(temp_fname, overwrite=True) @@ -406,45 +575,47 @@ def test_export_epochs_eeglab(tmp_path, preload): # test warning with unapplied projectors epochs = Epochs(raw, events, preload=preload, proj=False) - with pytest.warns(RuntimeWarning, - match='Epochs instance has unapplied projectors.'): + with pytest.warns( + RuntimeWarning, match="Epochs instance has unapplied projectors." + ): epochs.export(Path(temp_fname), overwrite=True) -@pytest.mark.filterwarnings('ignore::FutureWarning') -@requires_version('mffpy', '0.5.7') +@pytest.mark.filterwarnings("ignore::FutureWarning") @testing.requires_testing_data -@pytest.mark.parametrize('fmt', ('auto', 'mff')) -@pytest.mark.parametrize('do_history', (True, False)) +@pytest.mark.parametrize("fmt", ("auto", "mff")) +@pytest.mark.parametrize("do_history", (True, False)) def test_export_evokeds_to_mff(tmp_path, fmt, do_history): """Test exporting evoked dataset to MFF.""" + pytest.importorskip("mffpy", "0.5.7") + pytest.importorskip("defusedxml") evoked = read_evokeds_mff(egi_evoked_fname) export_fname = tmp_path / "evoked.mff" history = [ { - 'name': 'Test Segmentation', - 'method': 'Segmentation', - 'settings': ['Setting 1', 'Setting 2'], - 'results': ['Result 1', 'Result 2'] + "name": "Test Segmentation", + "method": "Segmentation", + "settings": ["Setting 1", "Setting 2"], + "results": ["Result 1", "Result 2"], }, { - 'name': 'Test Averaging', - 'method': 'Averaging', - 'settings': ['Setting 1', 'Setting 2'], - 'results': ['Result 1', 'Result 2'] - } + "name": "Test Averaging", + "method": "Averaging", + "settings": ["Setting 1", "Setting 2"], + "results": ["Result 1", "Result 2"], + }, ] if do_history: export_evokeds_mff(export_fname, evoked, history=history) else: export_evokeds(export_fname, evoked, fmt=fmt) # Drop non-EEG channels - evoked = [ave.drop_channels(['ECG', 'EMG']) for ave in evoked] + evoked = [ave.drop_channels(["ECG", "EMG"]) for ave in evoked] evoked_exported = read_evokeds_mff(export_fname) assert len(evoked) == len(evoked_exported) for ave, ave_exported in zip(evoked, evoked_exported): # Compare infos - assert object_diff(ave_exported.info, ave.info) == '' + assert object_diff(ave_exported.info, ave.info) == "" # Compare data assert_allclose(ave_exported.data, ave.data) # Compare properties @@ -454,16 +625,14 @@ def test_export_evokeds_to_mff(tmp_path, fmt, do_history): assert_allclose(ave_exported.times, ave.times) # test overwrite - with pytest.raises(FileExistsError, match='Destination file exists'): + with pytest.raises(FileExistsError, match="Destination file exists"): if do_history: - export_evokeds_mff(export_fname, evoked, history=history, - overwrite=False) + export_evokeds_mff(export_fname, evoked, history=history, overwrite=False) else: export_evokeds(export_fname, evoked, overwrite=False) if do_history: - export_evokeds_mff(export_fname, evoked, history=history, - overwrite=True) + export_evokeds_mff(export_fname, evoked, history=history, overwrite=True) else: export_evokeds(export_fname, evoked, overwrite=True) @@ -471,35 +640,81 @@ def test_export_evokeds_to_mff(tmp_path, fmt, do_history): evoked[0].export(export_fname, overwrite=True) -@pytest.mark.filterwarnings('ignore::FutureWarning') -@requires_version('mffpy', '0.5.7') +@pytest.mark.filterwarnings("ignore::FutureWarning") @testing.requires_testing_data def test_export_to_mff_no_device(): """Test no device type throws ValueError.""" - evoked = read_evokeds_mff(egi_evoked_fname, condition='Category 1') - evoked.info['device_info'] = None - with pytest.raises(ValueError, match='No device type.'): - export_evokeds('output.mff', evoked) + pytest.importorskip("mffpy", "0.5.7") + pytest.importorskip("defusedxml") + evoked = read_evokeds_mff(egi_evoked_fname, condition="Category 1") + evoked.info["device_info"] = None + with pytest.raises(ValueError, match="No device type."): + export_evokeds("output.mff", evoked) -@pytest.mark.filterwarnings('ignore::FutureWarning') -@requires_version('mffpy', '0.5.7') +@pytest.mark.filterwarnings("ignore::FutureWarning") def test_export_to_mff_incompatible_sfreq(): """Test non-whole number sampling frequency throws ValueError.""" + pytest.importorskip("mffpy", "0.5.7") evoked = read_evokeds(fname_evoked) - with pytest.raises(ValueError, match=f'sfreq: {evoked[0].info["sfreq"]}'): - export_evokeds('output.mff', evoked) + with pytest.raises(ValueError, match=f"sfreq: {evoked[0].info['sfreq']}"): + export_evokeds("output.mff", evoked) -@pytest.mark.parametrize('fmt,ext', [ - ('EEGLAB', 'set'), - ('EDF', 'edf'), - ('BrainVision', 'vhdr'), - ('auto', 'vhdr') -]) +@pytest.mark.parametrize( + "fmt,ext", + [("EEGLAB", "set"), ("EDF", "edf"), ("BrainVision", "vhdr"), ("auto", "vhdr")], +) def test_export_evokeds_unsupported_format(fmt, ext): """Test exporting evoked dataset to non-supported formats.""" evoked = read_evokeds(fname_evoked) errstr = fmt.lower() if fmt != "auto" else "vhdr" with pytest.raises(ValueError, match=f"Format '{errstr}' is not .*"): - export_evokeds(f'output.{ext}', evoked, fmt=fmt) + export_evokeds(f"output.{ext}", evoked, fmt=fmt) + + +@edfio_mark() +@pytest.mark.parametrize( + ("input_path", "warning_msg"), + [ + (fname_raw, "Data has a non-integer"), + pytest.param( + misc_path / "ecog" / "sample_ecog_ieeg.fif", + "BDF format requires", + marks=[pytest.mark.slowtest, misc._pytest_mark()], + ), + ], +) +def test_export_raw_bdf(tmp_path, input_path, warning_msg): + """Test saving a Raw instance to BDF format.""" + raw = read_raw_fif(input_path) + + # only test with EEG channels + raw.pick(picks=["eeg", "ecog", "seeg"]).load_data() + temp_fname = tmp_path / "test.bdf" + + with pytest.warns(RuntimeWarning, match=warning_msg): + raw.export(temp_fname) + + if "epoc" in raw.ch_names: + raw.drop_channels(["epoc"]) + + raw_read = read_raw_bdf(temp_fname, preload=True) + assert raw.ch_names == raw_read.ch_names + # only compare the original length, since extra zeros are appended + orig_raw_len = len(raw) + + # assert data and times are not different + # Due to the physical range of the data, reading and writing is not lossless. For + # example, a physical min/max of -/+ 3200 uV will result in a resolution of 0.38 nV. + # This resolution is more than sufficient for EEG. + assert_array_almost_equal( + raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=11 + ) + + # Due to the data record duration limitations of BDF files, one cannot store + # arbitrary float sampling rate exactly. Usually this results in two sampling rates + # that are off by very low number of decimal points. This for practical purposes + # does not matter but will result in an error when say the number of time points is + # very very large. + assert_allclose(raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5) diff --git a/mne/filter.py b/mne/filter.py index e70af37fdfc..8d5d3e48ea7 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -1,48 +1,45 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """IIR and FIR filtering and resampling functions.""" from collections import Counter from copy import deepcopy from functools import partial +from math import gcd import numpy as np +from scipy import fft, signal +from scipy.stats import f as fstat -from .annotations import _annotations_starts_stops -from .io.pick import _picks_to_idx -from .cuda import (_setup_cuda_fft_multiply_repeated, _fft_multiply_repeated, - _setup_cuda_fft_resample, _fft_resample, _smart_pad) -from .parallel import parallel_func -from .utils import (logger, verbose, sum_squared, warn, _pl, - _check_preload, _validate_type, _check_option, _ensure_int) +from ._fiff.pick import _picks_to_idx from ._ola import _COLA +from .cuda import ( + _fft_multiply_repeated, + _fft_resample, + _setup_cuda_fft_multiply_repeated, + _setup_cuda_fft_resample, + _smart_pad, +) +from .fixes import minimum_phase +from .parallel import parallel_func +from .utils import ( + _check_option, + _check_preload, + _ensure_int, + _pl, + _validate_type, + logger, + sum_squared, + verbose, + warn, +) # These values from Ifeachor and Jervis. _length_factors = dict(hann=3.1, hamming=3.3, blackman=5.0) -def is_power2(num): - """Test if number is a power of 2. - - Parameters - ---------- - num : int - Number. - - Returns - ------- - b : bool - True if is power of 2. - - Examples - -------- - >>> is_power2(2 ** 3) - True - >>> is_power2(5) - False - """ - num = int(num) - return num != 0 and ((num & (num - 1)) == 0) - - def next_fast_len(target): """Find the next fast size of input data to `fft`, for zero-padding, etc. @@ -66,20 +63,178 @@ def next_fast_len(target): Copied from SciPy with minor modifications. """ from bisect import bisect_left - hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, - 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, - 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, - 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, - 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, - 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, - 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, - 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, - 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, - 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, - 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, - 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, - 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, - 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000) + + hams = ( + 8, + 9, + 10, + 12, + 15, + 16, + 18, + 20, + 24, + 25, + 27, + 30, + 32, + 36, + 40, + 45, + 48, + 50, + 54, + 60, + 64, + 72, + 75, + 80, + 81, + 90, + 96, + 100, + 108, + 120, + 125, + 128, + 135, + 144, + 150, + 160, + 162, + 180, + 192, + 200, + 216, + 225, + 240, + 243, + 250, + 256, + 270, + 288, + 300, + 320, + 324, + 360, + 375, + 384, + 400, + 405, + 432, + 450, + 480, + 486, + 500, + 512, + 540, + 576, + 600, + 625, + 640, + 648, + 675, + 720, + 729, + 750, + 768, + 800, + 810, + 864, + 900, + 960, + 972, + 1000, + 1024, + 1080, + 1125, + 1152, + 1200, + 1215, + 1250, + 1280, + 1296, + 1350, + 1440, + 1458, + 1500, + 1536, + 1600, + 1620, + 1728, + 1800, + 1875, + 1920, + 1944, + 2000, + 2025, + 2048, + 2160, + 2187, + 2250, + 2304, + 2400, + 2430, + 2500, + 2560, + 2592, + 2700, + 2880, + 2916, + 3000, + 3072, + 3125, + 3200, + 3240, + 3375, + 3456, + 3600, + 3645, + 3750, + 3840, + 3888, + 4000, + 4050, + 4096, + 4320, + 4374, + 4500, + 4608, + 4800, + 4860, + 5000, + 5120, + 5184, + 5400, + 5625, + 5760, + 5832, + 6000, + 6075, + 6144, + 6250, + 6400, + 6480, + 6561, + 6750, + 6912, + 7200, + 7290, + 7500, + 7680, + 7776, + 8000, + 8100, + 8192, + 8640, + 8748, + 9000, + 9216, + 9375, + 9600, + 9720, + 10000, + ) if target <= 6: return target @@ -92,7 +247,7 @@ def next_fast_len(target): if target <= hams[-1]: return hams[bisect_left(hams, target)] - match = float('inf') # Anything found will be smaller + match = float("inf") # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 @@ -121,53 +276,29 @@ def next_fast_len(target): return match -def _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None, - n_jobs=None, copy=True, pad='reflect_limited'): - """Filter the signal x using h with overlap-add FFTs. - - Parameters - ---------- - x : array, shape (n_signals, n_times) - Signals to filter. - h : 1d array - Filter impulse response (FIR filter coefficients). Must be odd length - if ``phase='linear'``. - n_fft : int - Length of the FFT. If None, the best size is determined automatically. - phase : str - If ``'zero'``, the delay for the filter is compensated (and it must be - an odd-length symmetric filter). If ``'linear'``, the response is - uncompensated. If ``'zero-double'``, the filter is applied in the - forward and reverse directions. If 'minimum', a minimum-phase - filter will be used. - picks : list | None - See calling functions. - n_jobs : int | str - Number of jobs to run in parallel. Can be ``'cuda'`` if ``cupy`` - is installed properly. - copy : bool - If True, a copy of x, filtered, is returned. Otherwise, it operates - on x in place. - pad : str - Padding type for ``_smart_pad``. - - Returns - ------- - x : array, shape (n_signals, n_times) - x filtered. - """ +def _overlap_add_filter( + x, + h, + n_fft=None, + phase="zero", + picks=None, + n_jobs=None, + copy=True, + pad="reflect_limited", +): + """Filter the signal x using h with overlap-add FFTs.""" # set up array for filtering, reshape to 2D, operate on last axis x, orig_shape, picks = _prep_for_filtering(x, copy, picks) # Extend the signal by mirroring the edges to reduce transient filter # response _check_zero_phase_length(len(h), phase) if len(h) == 1: - return x * h ** 2 if phase == 'zero-double' else x * h + return x * h**2 if phase == "zero-double" else x * h n_edge = max(min(len(h), x.shape[1]) - 1, 0) - logger.debug('Smart-padding with: %s samples on each edge' % n_edge) + logger.debug(f"Smart-padding with: {n_edge} samples on each edge") n_x = x.shape[1] + 2 * n_edge - if phase == 'zero-double': + if phase == "zero-double": h = np.convolve(h, h[::-1]) # Determine FFT length to use @@ -176,10 +307,14 @@ def _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None, max_fft = n_x if max_fft >= min_fft: # cost function based on number of multiplications - N = 2 ** np.arange(np.ceil(np.log2(min_fft)), - np.ceil(np.log2(max_fft)) + 1, dtype=int) - cost = (np.ceil(n_x / (N - len(h) + 1).astype(np.float64)) * - N * (np.log2(N) + 1)) + N = 2 ** np.arange( + np.ceil(np.log2(min_fft)), np.ceil(np.log2(max_fft)) + 1, dtype=int + ) + cost = ( + np.ceil(n_x / (N - len(h) + 1).astype(np.float64)) + * N + * (np.log2(N) + 1) + ) # add a heuristic term to prevent too-long FFT's which are slow # (not predicted by mult. cost alone, 4e-5 exp. determined) @@ -189,10 +324,12 @@ def _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None, else: # Use only a single block n_fft = next_fast_len(min_fft) - logger.debug('FFT block length: %s' % n_fft) + logger.debug(f"FFT block length: {n_fft}") if n_fft < min_fft: - raise ValueError('n_fft is too short, has to be at least ' - '2 * len(h) - 1 (%s), got %s' % (min_fft, n_fft)) + raise ValueError( + f"n_fft is too short, has to be at least 2 * len(h) - 1 ({min_fft}), got " + f"{n_fft}" + ) # Figure out if we should use CUDA n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft) @@ -202,11 +339,13 @@ def _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None, parallel, p_fun, _ = parallel_func(_1d_overlap_filter, n_jobs) if n_jobs == 1: for p in picks: - x[p] = _1d_overlap_filter(x[p], len(h), n_edge, phase, - cuda_dict, pad, n_fft) + x[p] = _1d_overlap_filter( + x[p], len(h), n_edge, phase, cuda_dict, pad, n_fft + ) else: - data_new = parallel(p_fun(x[p], len(h), n_edge, phase, - cuda_dict, pad, n_fft) for p in picks) + data_new = parallel( + p_fun(x[p], len(h), n_edge, phase, cuda_dict, pad, n_fft) for p in picks + ) for pp, p in enumerate(picks): x[p] = data_new[pp] @@ -223,7 +362,7 @@ def _1d_overlap_filter(x, n_h, n_edge, phase, cuda_dict, pad, n_fft): n_seg = n_fft - n_h + 1 n_segments = int(np.ceil(n_x / float(n_seg))) - shift = ((n_h - 1) // 2 if phase.startswith('zero') else 0) + n_edge + shift = ((n_h - 1) // 2 if phase.startswith("zero") else 0) + n_edge # Now the actual filtering step is identical for zero-phase (filtfilt-like) # or single-pass @@ -242,14 +381,13 @@ def _1d_overlap_filter(x, n_h, n_edge, phase, cuda_dict, pad, n_fft): x_filtered[start_filt:stop_filt] += prod[start_prod:stop_prod] # Remove mirrored edges that we added and cast (n_edge can be zero) - x_filtered = x_filtered[:n_x - 2 * n_edge].astype(x.dtype) + x_filtered = x_filtered[: n_x - 2 * n_edge].astype(x.dtype) return x_filtered def _filter_attenuation(h, freq, gain): """Compute minimum attenuation at stop frequency.""" - from scipy.signal import freqz - _, filt_resp = freqz(h.ravel(), worN=np.pi * freq) + _, filt_resp = signal.freqz(h.ravel(), worN=np.pi * freq) filt_resp = np.abs(filt_resp) # use amplitude response filt_resp[np.where(gain == 1)] = 0 idx = np.argmax(filt_resp) @@ -269,12 +407,12 @@ def _prep_for_filtering(x, copy, picks=None): x.shape = (np.prod(x.shape[:-1]), x.shape[-1]) if len(orig_shape) == 3: n_epochs, n_channels, n_times = orig_shape - offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), - len(picks)) + offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), len(picks)) picks = np.tile(picks, n_epochs) + offset elif len(orig_shape) > 3: - raise ValueError('picks argument is not supported for data with more' - ' than three dimensions') + raise ValueError( + "picks argument is not supported for data with more than three dimensions" + ) assert all(0 <= pick < x.shape[0] for pick in picks) # guaranteed by above return x, orig_shape, picks @@ -282,7 +420,6 @@ def _prep_for_filtering(x, copy, picks=None): def _firwin_design(N, freq, gain, window, sfreq): """Construct a FIR filter using firwin.""" - from scipy.signal import firwin assert freq[0] == 0 assert len(freq) > 1 assert len(freq) == len(gain) @@ -296,31 +433,38 @@ def _firwin_design(N, freq, gain, window, sfreq): for this_freq, this_gain in zip(freq[::-1][1:], gain[::-1][1:]): assert this_gain in (0, 1) if this_gain != prev_gain: - # Get the correct N to satistify the requested transition bandwidth - transition = (prev_freq - this_freq) / 2. + # Get the correct N to satisfy the requested transition bandwidth + transition = (prev_freq - this_freq) / 2.0 this_N = int(round(_length_factors[window] / transition)) - this_N += (1 - this_N % 2) # make it odd + this_N += 1 - this_N % 2 # make it odd if this_N > N: - raise ValueError('The requested filter length %s is too short ' - 'for the requested %0.2f Hz transition band, ' - 'which requires %s samples' - % (N, transition * sfreq / 2., this_N)) + raise ValueError( + f"The requested filter length {N} is too short for the requested " + f"{transition * sfreq / 2.0:0.2f} Hz transition band, which " + f"requires {this_N} samples" + ) # Construct a lowpass - this_h = firwin(this_N, (prev_freq + this_freq) / 2., - window=window, pass_zero=True, fs=freq[-1] * 2) + this_h = signal.firwin( + this_N, + (prev_freq + this_freq) / 2.0, + window=window, + pass_zero=True, + fs=freq[-1] * 2, + ) assert this_h.shape == (this_N,) offset = (N - this_N) // 2 if this_gain == 0: - h[offset:N - offset] -= this_h + h[offset : N - offset] -= this_h else: - h[offset:N - offset] += this_h + h[offset : N - offset] += this_h prev_gain = this_gain prev_freq = this_freq return h -def _construct_fir_filter(sfreq, freq, gain, filter_length, phase, fir_window, - fir_design): +def _construct_fir_filter( + sfreq, freq, gain, filter_length, phase, fir_window, fir_design +): """Filter signal using gain control points in the frequency domain. The filter impulse response is constructed from a Hann window (window @@ -328,80 +472,53 @@ def _construct_fir_filter(sfreq, freq, gain, filter_length, phase, fir_window, (windowing is a smoothing in frequency domain). If x is multi-dimensional, this operates along the last dimension. - - Parameters - ---------- - sfreq : float - Sampling rate in Hz. - freq : 1d array - Frequency sampling points in Hz. - gain : 1d array - Filter gain at frequency sampling points. - Must be all 0 and 1 for fir_design=="firwin". - filter_length : int - Length of the filter to use. Must be odd length if phase == "zero". - phase : str - If 'zero', the delay for the filter is compensated (and it must be - an odd-length symmetric filter). If 'linear', the response is - uncompensated. If 'zero-double', the filter is applied in the - forward and reverse directions. If 'minimum', a minimum-phase - filter will be used. - fir_window : str - The window to use in FIR design, can be "hamming" (default), - "hann", or "blackman". - fir_design : str - Can be "firwin2" or "firwin". - - Returns - ------- - h : array - Filter coefficients. """ assert freq[0] == 0 - if fir_design == 'firwin2': - from scipy.signal import firwin2 as fir_design + if fir_design == "firwin2": + fir_design = signal.firwin2 else: - assert fir_design == 'firwin' + assert fir_design == "firwin" fir_design = partial(_firwin_design, sfreq=sfreq) - from scipy.signal import minimum_phase - # issue a warning if attenuation is less than this - min_att_db = 12 if phase == 'minimum' else 20 + min_att_db = 12 if phase == "minimum-half" else 20 # normalize frequencies - freq = np.array(freq) / (sfreq / 2.) + freq = np.array(freq) / (sfreq / 2.0) if freq[0] != 0 or freq[-1] != 1: - raise ValueError('freq must start at 0 and end an Nyquist (%s), got %s' - % (sfreq / 2., freq)) + raise ValueError( + f"freq must start at 0 and end an Nyquist ({sfreq / 2.0}), got {freq}" + ) gain = np.array(gain) # Use overlap-add filter with a fixed length N = _check_zero_phase_length(filter_length, phase, gain[-1]) # construct symmetric (linear phase) filter - if phase == 'minimum': + if phase == "minimum-half": h = fir_design(N * 2 - 1, freq, gain, window=fir_window) h = minimum_phase(h) else: h = fir_design(N, freq, gain, window=fir_window) + if phase == "minimum": + h = minimum_phase(h, half=False) assert h.size == N att_db, att_freq = _filter_attenuation(h, freq, gain) - if phase == 'zero-double': + if phase == "zero-double": att_db += 6 if att_db < min_att_db: - att_freq *= sfreq / 2. - warn('Attenuation at stop frequency %0.2f Hz is only %0.2f dB. ' - 'Increase filter_length for higher attenuation.' - % (att_freq, att_db)) + att_freq *= sfreq / 2.0 + warn( + f"Attenuation at stop frequency {att_freq:0.2f} Hz is only {att_db:0.2f} " + "dB. Increase filter_length for higher attenuation." + ) return h def _check_zero_phase_length(N, phase, gain_nyq=0): N = int(N) if N % 2 == 0: - if phase == 'zero': - raise RuntimeError('filter_length must be odd if phase="zero", ' - 'got %s' % N) - elif phase == 'zero-double' and gain_nyq == 1: + if phase == "zero": + raise RuntimeError(f'filter_length must be odd if phase="zero", got {N}') + elif phase == "zero-double" and gain_nyq == 1: N += 1 return N @@ -409,31 +526,49 @@ def _check_zero_phase_length(N, phase, gain_nyq=0): def _check_coefficients(system): """Check for filter stability.""" if isinstance(system, tuple): - from scipy.signal import tf2zpk - z, p, k = tf2zpk(*system) + z, p, k = signal.tf2zpk(*system) else: # sos - from scipy.signal import sos2zpk - z, p, k = sos2zpk(system) + z, p, k = signal.sos2zpk(system) if np.any(np.abs(p) > 1.0): - raise RuntimeError('Filter poles outside unit circle, filter will be ' - 'unstable. Consider using different filter ' - 'coefficients.') + raise RuntimeError( + "Filter poles outside unit circle, filter will be " + "unstable. Consider using different filter " + "coefficients." + ) -def _filtfilt(x, iir_params, picks, n_jobs, copy): - """Call filtfilt.""" +def _iir_filter(x, iir_params, picks, n_jobs, copy, phase="zero"): + """Call filtfilt or lfilter.""" # set up array for filtering, reshape to 2D, operate on last axis - from scipy.signal import filtfilt, sosfiltfilt - padlen = min(iir_params['padlen'], x.shape[-1] - 1) x, orig_shape, picks = _prep_for_filtering(x, copy, picks) - if 'sos' in iir_params: - fun = partial(sosfiltfilt, sos=iir_params['sos'], padlen=padlen, - axis=-1) - _check_coefficients(iir_params['sos']) + if phase in ("zero", "zero-double"): + padlen = min(iir_params["padlen"], x.shape[-1] - 1) + if "sos" in iir_params: + fun = partial( + _iir_pad_apply_unpad, + func=signal.sosfiltfilt, + sos=iir_params["sos"], + padlen=padlen, + padtype="reflect_limited", + ) + _check_coefficients(iir_params["sos"]) + else: + fun = partial( + _iir_pad_apply_unpad, + func=signal.filtfilt, + b=iir_params["b"], + a=iir_params["a"], + padlen=padlen, + padtype="reflect_limited", + ) + _check_coefficients((iir_params["b"], iir_params["a"])) else: - fun = partial(filtfilt, b=iir_params['b'], a=iir_params['a'], - padlen=padlen, axis=-1) - _check_coefficients((iir_params['b'], iir_params['a'])) + if "sos" in iir_params: + fun = partial(signal.sosfilt, sos=iir_params["sos"], axis=-1) + _check_coefficients(iir_params["sos"]) + else: + fun = partial(signal.lfilter, b=iir_params["b"], a=iir_params["a"], axis=-1) + _check_coefficients((iir_params["b"], iir_params["a"])) parallel, p_fun, n_jobs = parallel_func(fun, n_jobs) if n_jobs == 1: for p in picks: @@ -462,15 +597,14 @@ def estimate_ringing_samples(system, max_try=100000): n : int The approximate ringing. """ - from scipy import signal if isinstance(system, tuple): # TF - kind = 'ba' + kind = "ba" b, a = system - zi = [0.] * (len(a) - 1) + zi = [0.0] * (len(a) - 1) else: - kind = 'sos' + kind = "sos" sos = system - zi = [[0.] * 2] * len(sos) + zi = [[0.0] * 2] * len(sos) n_per_chunk = 1000 n_chunks_max = int(np.ceil(max_try / float(n_per_chunk))) x = np.zeros(n_per_chunk) @@ -478,7 +612,7 @@ def estimate_ringing_samples(system, max_try=100000): last_good = n_per_chunk thresh_val = 0 for ii in range(n_chunks_max): - if kind == 'ba': + if kind == "ba": h, zi = signal.lfilter(b, a, x, zi=zi) else: h, zi = signal.sosfilt(sos, x, zi=zi) @@ -492,23 +626,32 @@ def estimate_ringing_samples(system, max_try=100000): idx = (ii - 1) * n_per_chunk + last_good break else: - warn('Could not properly estimate ringing for the filter') + warn("Could not properly estimate ringing for the filter") idx = n_per_chunk * n_chunks_max return idx _ftype_dict = { - 'butter': 'Butterworth', - 'cheby1': 'Chebyshev I', - 'cheby2': 'Chebyshev II', - 'ellip': 'Cauer/elliptic', - 'bessel': 'Bessel/Thomson', + "butter": "Butterworth", + "cheby1": "Chebyshev I", + "cheby2": "Chebyshev II", + "ellip": "Cauer/elliptic", + "bessel": "Bessel/Thomson", } @verbose -def construct_iir_filter(iir_params, f_pass=None, f_stop=None, sfreq=None, - btype=None, return_copy=True, verbose=None): +def construct_iir_filter( + iir_params, + f_pass=None, + f_stop=None, + sfreq=None, + btype=None, + return_copy=True, + *, + phase="zero", + verbose=None, +): """Use IIR parameters to get filtering coefficients. This function works like a wrapper for iirdesign and iirfilter in @@ -563,6 +706,15 @@ def construct_iir_filter(iir_params, f_pass=None, f_stop=None, sfreq=None, ``iir_params`` will be set inplace (if they weren't already). Otherwise, a new ``iir_params`` instance will be created and returned with these entries. + phase : str + Phase of the filter. + ``phase='zero'`` (default) or equivalently ``'zero-double'`` constructs and + applies IIR filter twice, once forward, and once backward (making it non-causal) + using :func:`~scipy.signal.filtfilt`; ``phase='forward'`` will apply + the filter once in the forward (causal) direction using + :func:`~scipy.signal.lfilter`. + + .. versionadded:: 0.13 %(verbose)s Returns @@ -624,124 +776,176 @@ def construct_iir_filter(iir_params, f_pass=None, f_stop=None, sfreq=None, For more information, see the tutorials :ref:`disc-filtering` and :ref:`tut-filter-resample`. """ # noqa: E501 - from scipy.signal import iirfilter, iirdesign, freqz, sosfreqz - known_filters = ('bessel', 'butter', 'butterworth', 'cauer', 'cheby1', - 'cheby2', 'chebyshev1', 'chebyshev2', 'chebyshevi', - 'chebyshevii', 'ellip', 'elliptic') + known_filters = ( + "bessel", + "butter", + "butterworth", + "cauer", + "cheby1", + "cheby2", + "chebyshev1", + "chebyshev2", + "chebyshevi", + "chebyshevii", + "ellip", + "elliptic", + ) if not isinstance(iir_params, dict): - raise TypeError('iir_params must be a dict, got %s' % type(iir_params)) + raise TypeError(f"iir_params must be a dict, got {type(iir_params)}") # if the filter has been designed, we're good to go Wp = None - if 'sos' in iir_params: - system = iir_params['sos'] - output = 'sos' - elif 'a' in iir_params and 'b' in iir_params: - system = (iir_params['b'], iir_params['a']) - output = 'ba' + if "sos" in iir_params: + system = iir_params["sos"] + output = "sos" + elif "a" in iir_params and "b" in iir_params: + system = (iir_params["b"], iir_params["a"]) + output = "ba" else: - output = iir_params.get('output', 'sos') - _check_option('output', output, ('ba', 'sos')) + output = iir_params.get("output", "sos") + _check_option("output", output, ("ba", "sos")) # ensure we have a valid ftype - if 'ftype' not in iir_params: - raise RuntimeError('ftype must be an entry in iir_params if ''b'' ' - 'and ''a'' are not specified') - ftype = iir_params['ftype'] + if "ftype" not in iir_params: + raise RuntimeError( + "ftype must be an entry in iir_params if 'b' and 'a' are not specified." + ) + ftype = iir_params["ftype"] if ftype not in known_filters: - raise RuntimeError('ftype must be in filter_dict from ' - 'scipy.signal (e.g., butter, cheby1, etc.) not ' - '%s' % ftype) + raise RuntimeError( + "ftype must be in filter_dict from scipy.signal (e.g., butter, cheby1, " + f"etc.) not {ftype}" + ) # use order-based design f_pass = np.atleast_1d(f_pass) if f_pass.ndim > 1: - raise ValueError('frequencies must be 1D, got %dD' % f_pass.ndim) - edge_freqs = ', '.join('%0.2f' % (f,) for f in f_pass) + raise ValueError(f"frequencies must be 1D, got {f_pass.ndim}D") + edge_freqs = ", ".join(f"{f:0.2f}" for f in f_pass) Wp = f_pass / (float(sfreq) / 2) # IT will de designed ftype_nice = _ftype_dict.get(ftype, ftype) - logger.info('') - logger.info('IIR filter parameters') - logger.info('---------------------') - logger.info('%s %s zero-phase (two-pass forward and reverse) ' - 'non-causal filter:' % (ftype_nice, btype)) - # SciPy designs for -3dB but we do forward-backward, so this is -6dB - if 'order' in iir_params: - kwargs = dict(N=iir_params['order'], Wn=Wp, btype=btype, - ftype=ftype, output=output) - for key in ('rp', 'rs'): + _validate_type(phase, str, "phase") + _check_option("phase", phase, ("zero", "zero-double", "forward")) + if phase in ("zero-double", "zero"): + ptype = "zero-phase (two-pass forward and reverse) non-causal" + else: + ptype = "non-linear phase (one-pass forward) causal" + logger.info("") + logger.info("IIR filter parameters") + logger.info("---------------------") + logger.info(f"{ftype_nice} {btype} {ptype} filter:") + # SciPy designs forward for -3dB, so forward-backward is -6dB + if "order" in iir_params: + singleton = btype in ("low", "lowpass", "high", "highpass") + use_Wp = Wp.item() if singleton else Wp + kwargs = dict( + N=iir_params["order"], + Wn=use_Wp, + btype=btype, + ftype=ftype, + output=output, + ) + for key in ("rp", "rs"): if key in iir_params: kwargs[key] = iir_params[key] - system = iirfilter(**kwargs) - logger.info('- Filter order %d (effective, after forward-backward)' - % (2 * iir_params['order'] * len(Wp),)) + system = signal.iirfilter(**kwargs) + if phase in ("zero", "zero-double"): + ptype, pmul = "(effective, after forward-backward)", 2 + else: + ptype, pmul = "(forward)", 1 + logger.info( + "- Filter order %d %s", pmul * iir_params["order"] * len(Wp), ptype + ) else: # use gpass / gstop design Ws = np.asanyarray(f_stop) / (float(sfreq) / 2) - if 'gpass' not in iir_params or 'gstop' not in iir_params: - raise ValueError('iir_params must have at least ''gstop'' and' - ' ''gpass'' (or ''N'') entries') - system = iirdesign(Wp, Ws, iir_params['gpass'], - iir_params['gstop'], ftype=ftype, output=output) + if "gpass" not in iir_params or "gstop" not in iir_params: + raise ValueError( + "iir_params must have at least 'gstop' and 'gpass' (or N) entries." + ) + system = signal.iirdesign( + Wp, + Ws, + iir_params["gpass"], + iir_params["gstop"], + ftype=ftype, + output=output, + ) if system is None: - raise RuntimeError('coefficients could not be created from iir_params') + raise RuntimeError("coefficients could not be created from iir_params") # do some sanity checks _check_coefficients(system) # get the gains at the cutoff frequencies if Wp is not None: - if output == 'sos': - cutoffs = sosfreqz(system, worN=Wp * np.pi)[1] + if output == "sos": + cutoffs = signal.sosfreqz(system, worN=Wp * np.pi)[1] else: - cutoffs = freqz(system[0], system[1], worN=Wp * np.pi)[1] + cutoffs = signal.freqz(system[0], system[1], worN=Wp * np.pi)[1] + cutoffs = 20 * np.log10(np.abs(cutoffs)) # 2 * 20 here because we do forward-backward filtering - cutoffs = 40 * np.log10(np.abs(cutoffs)) - cutoffs = ', '.join(['%0.2f' % (c,) for c in cutoffs]) - logger.info('- Cutoff%s at %s Hz: %s dB' - % (_pl(f_pass), edge_freqs, cutoffs)) + if phase in ("zero", "zero-double"): + cutoffs *= 2 + cutoffs = ", ".join([f"{c:0.2f}" for c in cutoffs]) + logger.info(f"- Cutoff{_pl(f_pass)} at {edge_freqs} Hz: {cutoffs} dB") # now deal with padding - if 'padlen' not in iir_params: + if "padlen" not in iir_params: padlen = estimate_ringing_samples(system) else: - padlen = iir_params['padlen'] + padlen = iir_params["padlen"] if return_copy: iir_params = deepcopy(iir_params) iir_params.update(dict(padlen=padlen)) - if output == 'sos': + if output == "sos": iir_params.update(sos=system) else: iir_params.update(b=system[0], a=system[1]) - logger.info('') + logger.info("") return iir_params def _check_method(method, iir_params, extra_types=()): """Parse method arguments.""" - allowed_types = ['iir', 'fir', 'fft'] + list(extra_types) - _validate_type(method, 'str', 'method') - _check_option('method', method, allowed_types) - if method == 'fft': - method = 'fir' # use the better name - if method == 'iir': + allowed_types = ["iir", "fir", "fft"] + list(extra_types) + _validate_type(method, "str", "method") + _check_option("method", method, allowed_types) + if method == "fft": + method = "fir" # use the better name + if method == "iir": if iir_params is None: iir_params = dict() - if len(iir_params) == 0 or (len(iir_params) == 1 and - 'output' in iir_params): - iir_params = dict(order=4, ftype='butter', - output=iir_params.get('output', 'sos')) + if len(iir_params) == 0 or (len(iir_params) == 1 and "output" in iir_params): + iir_params = dict( + order=4, ftype="butter", output=iir_params.get("output", "sos") + ) elif iir_params is not None: raise ValueError('iir_params must be None if method != "iir"') return iir_params, method @verbose -def filter_data(data, sfreq, l_freq, h_freq, picks=None, filter_length='auto', - l_trans_bandwidth='auto', h_trans_bandwidth='auto', - n_jobs=None, method='fir', iir_params=None, copy=True, - phase='zero', fir_window='hamming', fir_design='firwin', - pad='reflect_limited', *, verbose=None): +def filter_data( + data, + sfreq, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + copy=True, + phase="zero", + fir_window="hamming", + fir_design="firwin", + pad="reflect_limited", + *, + verbose=None, +): """Filter a subset of channels. Parameters @@ -810,21 +1014,42 @@ def filter_data(data, sfreq, l_freq, h_freq, picks=None, filter_length='auto', data = _check_filterable(data) iir_params, method = _check_method(method, iir_params) filt = create_filter( - data, sfreq, l_freq, h_freq, filter_length, l_trans_bandwidth, - h_trans_bandwidth, method, iir_params, phase, fir_window, fir_design) - if method in ('fir', 'fft'): - data = _overlap_add_filter(data, filt, None, phase, picks, n_jobs, - copy, pad) + data, + sfreq, + l_freq, + h_freq, + filter_length, + l_trans_bandwidth, + h_trans_bandwidth, + method, + iir_params, + phase, + fir_window, + fir_design, + ) + if method in ("fir", "fft"): + data = _overlap_add_filter(data, filt, None, phase, picks, n_jobs, copy, pad) else: - data = _filtfilt(data, filt, picks, n_jobs, copy) + data = _iir_filter(data, filt, picks, n_jobs, copy, phase) return data @verbose -def create_filter(data, sfreq, l_freq, h_freq, filter_length='auto', - l_trans_bandwidth='auto', h_trans_bandwidth='auto', - method='fir', iir_params=None, phase='zero', - fir_window='hamming', fir_design='firwin', verbose=None): +def create_filter( + data, + sfreq, + l_freq, + h_freq, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + method="fir", + iir_params=None, + phase="zero", + fir_window="hamming", + fir_design="firwin", + verbose=None, +): r"""Create a FIR or IIR filter. ``l_freq`` and ``h_freq`` are the frequencies below which and above @@ -943,79 +1168,174 @@ def create_filter(data, sfreq, l_freq, h_freq, filter_length='auto', """ sfreq = float(sfreq) if sfreq < 0: - raise ValueError('sfreq must be positive') + raise ValueError("sfreq must be positive") # If no data specified, sanity checking will be skipped if data is None: - logger.info('No data specified. Sanity checks related to the length of' - ' the signal relative to the filter order will be' - ' skipped.') + logger.info( + "No data specified. Sanity checks related to the length of the signal " + "relative to the filter order will be skipped." + ) if h_freq is not None: h_freq = np.array(h_freq, float).ravel() - if (h_freq > (sfreq / 2.)).any(): - raise ValueError('h_freq (%s) must be less than the Nyquist ' - 'frequency %s' % (h_freq, sfreq / 2.)) + if (h_freq > (sfreq / 2.0)).any(): + raise ValueError( + f"h_freq ({h_freq}) must be less than the Nyquist frequency " + f"{sfreq / 2.0}" + ) if l_freq is not None: l_freq = np.array(l_freq, float).ravel() if (l_freq == 0).all(): l_freq = None iir_params, method = _check_method(method, iir_params) if l_freq is None and h_freq is None: - data, sfreq, _, _, _, _, filter_length, phase, fir_window, \ - fir_design = _triage_filter_params( - data, sfreq, None, None, None, None, - filter_length, method, phase, fir_window, fir_design) - if method == 'iir': + ( + data, + sfreq, + _, + _, + _, + _, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + None, + None, + None, + None, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": out = dict() if iir_params is None else deepcopy(iir_params) - out.update(b=np.array([1.]), a=np.array([1.])) + out.update(b=np.array([1.0]), a=np.array([1.0])) else: - freq = [0, sfreq / 2.] - gain = [1., 1.] + freq = [0, sfreq / 2.0] + gain = [1.0, 1.0] if l_freq is None and h_freq is not None: - logger.info('Setting up low-pass filter at %0.2g Hz' % (h_freq,)) - data, sfreq, _, f_p, _, f_s, filter_length, phase, fir_window, \ - fir_design = _triage_filter_params( - data, sfreq, None, h_freq, None, h_trans_bandwidth, - filter_length, method, phase, fir_window, fir_design) - if method == 'iir': - out = construct_iir_filter(iir_params, f_p, f_s, sfreq, 'lowpass') + h_freq = h_freq.item() + logger.info(f"Setting up low-pass filter at {h_freq:0.2g} Hz") + ( + data, + sfreq, + _, + f_p, + _, + f_s, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + None, + h_freq, + None, + h_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": + out = construct_iir_filter( + iir_params, f_p, f_s, sfreq, "lowpass", phase=phase + ) else: # 'fir' freq = [0, f_p, f_s] gain = [1, 1, 0] - if f_s != sfreq / 2.: - freq += [sfreq / 2.] + if f_s != sfreq / 2.0: + freq += [sfreq / 2.0] gain += [0] elif l_freq is not None and h_freq is None: - logger.info('Setting up high-pass filter at %0.2g Hz' % (l_freq,)) - data, sfreq, pass_, _, stop, _, filter_length, phase, fir_window, \ - fir_design = _triage_filter_params( - data, sfreq, l_freq, None, l_trans_bandwidth, None, - filter_length, method, phase, fir_window, fir_design) - if method == 'iir': - out = construct_iir_filter(iir_params, pass_, stop, sfreq, - 'highpass') + l_freq = l_freq.item() + logger.info(f"Setting up high-pass filter at {l_freq:0.2g} Hz") + ( + data, + sfreq, + pass_, + _, + stop, + _, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + l_freq, + None, + l_trans_bandwidth, + None, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": + out = construct_iir_filter( + iir_params, pass_, stop, sfreq, "highpass", phase=phase + ) else: # 'fir' - freq = [stop, pass_, sfreq / 2.] + freq = [stop, pass_, sfreq / 2.0] gain = [0, 1, 1] if stop != 0: freq = [0] + freq gain = [0] + gain elif l_freq is not None and h_freq is not None: if (l_freq < h_freq).any(): - logger.info('Setting up band-pass filter from %0.2g - %0.2g Hz' - % (l_freq, h_freq)) - data, sfreq, f_p1, f_p2, f_s1, f_s2, filter_length, phase, \ - fir_window, fir_design = _triage_filter_params( - data, sfreq, l_freq, h_freq, l_trans_bandwidth, - h_trans_bandwidth, filter_length, method, phase, - fir_window, fir_design) - if method == 'iir': - out = construct_iir_filter(iir_params, [f_p1, f_p2], - [f_s1, f_s2], sfreq, 'bandpass') + l_freq, h_freq = l_freq.item(), h_freq.item() + logger.info( + f"Setting up band-pass filter from {l_freq:0.2g} - {h_freq:0.2g} Hz" + ) + ( + data, + sfreq, + f_p1, + f_p2, + f_s1, + f_s2, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + l_freq, + h_freq, + l_trans_bandwidth, + h_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": + out = construct_iir_filter( + iir_params, + [f_p1, f_p2], + [f_s1, f_s2], + sfreq, + "bandpass", + phase=phase, + ) else: # 'fir' freq = [f_s1, f_p1, f_p2, f_s2] gain = [0, 1, 1, 0] - if f_s2 != sfreq / 2.: - freq += [sfreq / 2.] + if f_s2 != sfreq / 2.0: + freq += [sfreq / 2.0] gain += [0] if f_s1 != 0: freq = [0] + freq @@ -1024,53 +1344,101 @@ def create_filter(data, sfreq, l_freq, h_freq, filter_length='auto', # This could possibly be removed after 0.14 release, but might # as well leave it in to sanity check notch_filter if len(l_freq) != len(h_freq): - raise ValueError('l_freq and h_freq must be the same length') - msg = 'Setting up band-stop filter' + raise ValueError("l_freq and h_freq must be the same length") + msg = "Setting up band-stop filter" if len(l_freq) == 1: - msg += ' from %0.2g - %0.2g Hz' % (h_freq, l_freq) + l_freq, h_freq = l_freq.item(), h_freq.item() + msg += f" from {h_freq:0.2g} - {l_freq:0.2g} Hz" logger.info(msg) # Note: order of outputs is intentionally switched here! - data, sfreq, f_s1, f_s2, f_p1, f_p2, filter_length, phase, \ - fir_window, fir_design = _triage_filter_params( - data, sfreq, h_freq, l_freq, h_trans_bandwidth, - l_trans_bandwidth, filter_length, method, phase, - fir_window, fir_design, bands='arr', reverse=True) - if method == 'iir': + ( + data, + sfreq, + f_s1, + f_s2, + f_p1, + f_p2, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + h_freq, + l_freq, + h_trans_bandwidth, + l_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + bands="arr", + reverse=True, + ) + if method == "iir": if len(f_p1) != 1: - raise ValueError('Multiple stop-bands can only be used ' - 'with FIR filtering') - out = construct_iir_filter(iir_params, [f_p1[0], f_p2[0]], - [f_s1[0], f_s2[0]], sfreq, - 'bandstop') + raise ValueError( + "Multiple stop-bands can only be used with method='fir' " + "and method='spectrum_fit'" + ) + out = construct_iir_filter( + iir_params, + [f_p1[0], f_p2[0]], + [f_s1[0], f_s2[0]], + sfreq, + "bandstop", + phase=phase, + ) else: # 'fir' freq = np.r_[f_p1, f_s1, f_s2, f_p2] - gain = np.r_[np.ones_like(f_p1), np.zeros_like(f_s1), - np.zeros_like(f_s2), np.ones_like(f_p2)] + gain = np.r_[ + np.ones_like(f_p1), + np.zeros_like(f_s1), + np.zeros_like(f_s2), + np.ones_like(f_p2), + ] order = np.argsort(freq) freq = freq[order] gain = gain[order] if freq[0] != 0: - freq = np.r_[[0.], freq] - gain = np.r_[[1.], gain] - if freq[-1] != sfreq / 2.: - freq = np.r_[freq, [sfreq / 2.]] - gain = np.r_[gain, [1.]] + freq = np.r_[[0.0], freq] + gain = np.r_[[1.0], gain] + if freq[-1] != sfreq / 2.0: + freq = np.r_[freq, [sfreq / 2.0]] + gain = np.r_[gain, [1.0]] if np.any(np.abs(np.diff(gain, 2)) > 1): - raise ValueError('Stop bands are not sufficiently ' - 'separated.') - if method == 'fir': - out = _construct_fir_filter(sfreq, freq, gain, filter_length, phase, - fir_window, fir_design) + raise ValueError("Stop bands are not sufficiently separated.") + if method == "fir": + out = _construct_fir_filter( + sfreq, freq, gain, filter_length, phase, fir_window, fir_design + ) return out @verbose -def notch_filter(x, Fs, freqs, filter_length='auto', notch_widths=None, - trans_bandwidth=1, method='fir', iir_params=None, - mt_bandwidth=None, p_value=0.05, picks=None, n_jobs=None, - copy=True, phase='zero', fir_window='hamming', - fir_design='firwin', pad='reflect_limited', *, - verbose=None): +def notch_filter( + x, + Fs, + freqs, + filter_length="auto", + notch_widths=None, + trans_bandwidth=1, + method="fir", + iir_params=None, + mt_bandwidth=None, + p_value=0.05, + picks=None, + n_jobs=None, + copy=True, + phase="zero", + fir_window="hamming", + fir_design="firwin", + pad="reflect_limited", + *, + verbose=None, +): r"""Notch filter for the signal x. Applies a zero-phase notch filter to the signal x, operating on the last @@ -1084,15 +1452,16 @@ def notch_filter(x, Fs, freqs, filter_length='auto', notch_widths=None, Sampling rate in Hz. freqs : float | array of float | None Frequencies to notch filter in Hz, e.g. np.arange(60, 241, 60). - None can only be used with the mode 'spectrum_fit', where an F - test is used to find sinusoidal components. + Multiple stop-bands can only be used with method='fir' + and method='spectrum_fit'. None can only be used with the mode + 'spectrum_fit', where an F test is used to find sinusoidal components. %(filter_length_notch)s notch_widths : float | array of float | None Width of the stop band (centred at each freq in freqs) in Hz. If None, freqs / 200 is used. trans_bandwidth : float Width of the transition band in Hz. - Only used for ``method='fir'``. + Only used for ``method='fir'`` and ``method='iir'``. %(method_fir)s 'spectrum_fit' will use multi-taper estimation of sinusoidal components. If freqs=None and method='spectrum_fit', significant @@ -1154,72 +1523,105 @@ def notch_filter(x, Fs, freqs, filter_length='auto', notch_widths=None, & Hemant Bokil, Oxford University Press, New York, 2008. Please cite this in publications if method 'spectrum_fit' is used. """ - x = _check_filterable(x, 'notch filtered', 'notch_filter') - iir_params, method = _check_method(method, iir_params, ['spectrum_fit']) + x = _check_filterable(x, "notch filtered", "notch_filter") + iir_params, method = _check_method(method, iir_params, ["spectrum_fit"]) if freqs is not None: freqs = np.atleast_1d(freqs) - elif method != 'spectrum_fit': - raise ValueError('freqs=None can only be used with method ' - 'spectrum_fit') + elif method != "spectrum_fit": + raise ValueError("freqs=None can only be used with method spectrum_fit") # Only have to deal with notch_widths for non-autodetect if freqs is not None: if notch_widths is None: notch_widths = freqs / 200.0 elif np.any(notch_widths < 0): - raise ValueError('notch_widths must be >= 0') + raise ValueError("notch_widths must be >= 0") else: notch_widths = np.atleast_1d(notch_widths) if len(notch_widths) == 1: notch_widths = notch_widths[0] * np.ones_like(freqs) elif len(notch_widths) != len(freqs): - raise ValueError('notch_widths must be None, scalar, or the ' - 'same length as freqs') + raise ValueError( + "notch_widths must be None, scalar, or the same length as freqs" + ) - if method in ('fir', 'iir'): + if method in ("fir", "iir"): # Speed this up by computing the fourier coefficients once tb_2 = trans_bandwidth / 2.0 - lows = [freq - nw / 2.0 - tb_2 - for freq, nw in zip(freqs, notch_widths)] - highs = [freq + nw / 2.0 + tb_2 - for freq, nw in zip(freqs, notch_widths)] - xf = filter_data(x, Fs, highs, lows, picks, filter_length, tb_2, tb_2, - n_jobs, method, iir_params, copy, phase, fir_window, - fir_design, pad=pad) - elif method == 'spectrum_fit': - xf = _mt_spectrum_proc(x, Fs, freqs, notch_widths, mt_bandwidth, - p_value, picks, n_jobs, copy, filter_length) + lows = [freq - nw / 2.0 - tb_2 for freq, nw in zip(freqs, notch_widths)] + highs = [freq + nw / 2.0 + tb_2 for freq, nw in zip(freqs, notch_widths)] + xf = filter_data( + x, + Fs, + highs, + lows, + picks, + filter_length, + tb_2, + tb_2, + n_jobs, + method, + iir_params, + copy, + phase, + fir_window, + fir_design, + pad=pad, + ) + elif method == "spectrum_fit": + xf = _mt_spectrum_proc( + x, + Fs, + freqs, + notch_widths, + mt_bandwidth, + p_value, + picks, + n_jobs, + copy, + filter_length, + ) return xf def _get_window_thresh(n_times, sfreq, mt_bandwidth, p_value): - from scipy import stats from .time_frequency.multitaper import _compute_mt_params # figure out what tapers to use window_fun, _, _ = _compute_mt_params( - n_times, sfreq, mt_bandwidth, False, False, verbose=False) + n_times, sfreq, mt_bandwidth, False, False, verbose=False + ) # F-stat of 1-p point - threshold = stats.f.ppf(1 - p_value / n_times, 2, 2 * len(window_fun) - 2) + threshold = fstat.ppf(1 - p_value / n_times, 2, 2 * len(window_fun) - 2) return window_fun, threshold -def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth, - p_value, picks, n_jobs, copy, filter_length): +def _mt_spectrum_proc( + x, + sfreq, + line_freqs, + notch_widths, + mt_bandwidth, + p_value, + picks, + n_jobs, + copy, + filter_length, +): """Call _mt_spectrum_remove.""" # set up array for filtering, reshape to 2D, operate on last axis x, orig_shape, picks = _prep_for_filtering(x, copy, picks) - if isinstance(filter_length, str) and filter_length == 'auto': - filter_length = '10s' + if isinstance(filter_length, str) and filter_length == "auto": + filter_length = "10s" if filter_length is None: filter_length = x.shape[-1] - filter_length = min(_to_samples(filter_length, sfreq, '', ''), x.shape[-1]) + filter_length = min(_to_samples(filter_length, sfreq, "", ""), x.shape[-1]) get_wt = partial( - _get_window_thresh, sfreq=sfreq, mt_bandwidth=mt_bandwidth, - p_value=p_value) + _get_window_thresh, sfreq=sfreq, mt_bandwidth=mt_bandwidth, p_value=p_value + ) window_fun, threshold = get_wt(filter_length) parallel, p_fun, n_jobs = parallel_func(_mt_spectrum_remove_win, n_jobs) if n_jobs == 1: @@ -1227,69 +1629,69 @@ def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth, for ii, x_ in enumerate(x): if ii in picks: x[ii], f = _mt_spectrum_remove_win( - x_, sfreq, line_freqs, notch_widths, window_fun, threshold, - get_wt) + x_, sfreq, line_freqs, notch_widths, window_fun, threshold, get_wt + ) freq_list.append(f) else: - data_new = parallel(p_fun(x_, sfreq, line_freqs, notch_widths, - window_fun, threshold, get_wt) - for xi, x_ in enumerate(x) - if xi in picks) + data_new = parallel( + p_fun(x_, sfreq, line_freqs, notch_widths, window_fun, threshold, get_wt) + for xi, x_ in enumerate(x) + if xi in picks + ) freq_list = [d[1] for d in data_new] data_new = np.array([d[0] for d in data_new]) x[picks, :] = data_new # report found frequencies, but do some sanitizing first by binning into # 1 Hz bins - counts = Counter(sum((np.unique(np.round(ff)).tolist() - for f in freq_list for ff in f), list())) - kind = 'Detected' if line_freqs is None else 'Removed' - found_freqs = '\n'.join(f' {freq:6.2f} : ' - f'{counts[freq]:4d} window{_pl(counts[freq])}' - for freq in sorted(counts)) or ' None' - logger.info(f'{kind} notch frequencies (Hz):\n{found_freqs}') + counts = Counter( + sum((np.unique(np.round(ff)).tolist() for f in freq_list for ff in f), list()) + ) + kind = "Detected" if line_freqs is None else "Removed" + found_freqs = ( + "\n".join( + f" {freq:6.2f} : {counts[freq]:4d} window{_pl(counts[freq])}" + for freq in sorted(counts) + ) + or " None" + ) + logger.info(f"{kind} notch frequencies (Hz):\n{found_freqs}") x.shape = orig_shape return x -def _mt_spectrum_remove_win(x, sfreq, line_freqs, notch_widths, - window_fun, threshold, get_thresh): +def _mt_spectrum_remove_win( + x, sfreq, line_freqs, notch_widths, window_fun, threshold, get_thresh +): n_times = x.shape[-1] n_samples = window_fun.shape[1] n_overlap = (n_samples + 1) // 2 x_out = np.zeros_like(x) rm_freqs = list() - idx = [0] # Define how to process a chunk of data - def process(x_): + def process(x_, *, start, stop): out = _mt_spectrum_remove( - x_, sfreq, line_freqs, notch_widths, window_fun, threshold, - get_thresh) + x_, sfreq, line_freqs, notch_widths, window_fun, threshold, get_thresh + ) rm_freqs.append(out[1]) return (out[0],) # must return a tuple - # Define how to store a chunk of fully processed data (it's trivial) - def store(x_): - stop = idx[0] + x_.shape[-1] - x_out[..., idx[0]:stop] += x_ - idx[0] = stop - - _COLA(process, store, n_times, n_samples, n_overlap, sfreq, - verbose=False).feed(x) - assert idx[0] == n_times + _COLA(process, x_out, n_times, n_samples, n_overlap, sfreq, verbose=False).feed(x) return x_out, rm_freqs -def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths, - window_fun, threshold, get_thresh): +def _mt_spectrum_remove( + x, sfreq, line_freqs, notch_widths, window_fun, threshold, get_thresh +): """Use MT-spectrum to remove line frequencies. Based on Chronux. If line_freqs is specified, all freqs within notch_width of each line_freq is set to zero. """ from .time_frequency.multitaper import _mt_spectra + assert x.ndim == 1 if x.shape[-1] != window_fun.shape[-1]: window_fun, threshold = get_thresh(x.shape[-1]) @@ -1312,8 +1714,7 @@ def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths, x_p, freqs = _mt_spectra(x[np.newaxis, :], window_fun, sfreq) # sum of the product of x_p and H0 across tapers (1, n_freqs) - x_p_H0 = np.sum(x_p[:, tapers_odd, :] * - H0[np.newaxis, :, np.newaxis], axis=1) + x_p_H0 = np.sum(x_p[:, tapers_odd, :] * H0[np.newaxis, :, np.newaxis], axis=1) # resulting calculated amplitudes for all freqs A = x_p_H0 / H0_sq @@ -1327,8 +1728,9 @@ def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths, # numerator for F-statistic num = (n_tapers - 1) * (A * A.conj()).real * H0_sq # denominator for F-statistic - den = (np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) + - np.sum(np.abs(x_p[:, tapers_even, :]) ** 2, 1)) + den = np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) + np.sum( + np.abs(x_p[:, tapers_even, :]) ** 2, 1 + ) den[den == 0] = np.inf f_stat = num / den @@ -1337,10 +1739,11 @@ def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths, rm_freqs = freqs[indices] else: # specify frequencies - indices_1 = np.unique([np.argmin(np.abs(freqs - lf)) - for lf in line_freqs]) - indices_2 = [np.logical_and(freqs > lf - nw / 2., freqs < lf + nw / 2.) - for lf, nw in zip(line_freqs, notch_widths)] + indices_1 = np.unique([np.argmin(np.abs(freqs - lf)) for lf in line_freqs]) + indices_2 = [ + np.logical_and(freqs > lf - nw / 2.0, freqs < lf + nw / 2.0) + for lf, nw in zip(line_freqs, notch_widths) + ] indices_2 = np.where(np.any(np.array(indices_2), axis=0))[0] indices = np.unique(np.r_[indices_1, indices_2]) rm_freqs = freqs[indices] @@ -1360,31 +1763,31 @@ def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths, return x - datafit, rm_freqs -def _check_filterable(x, kind='filtered', alternative='filter'): +def _check_filterable(x, kind="filtered", alternative="filter"): # Let's be fairly strict about this -- users can easily coerce to ndarray # at their end, and we already should do it internally any time we are # using these low-level functions. At the same time, let's # help people who might accidentally use low-level functions that they # shouldn't use by pushing them in the right direction - from .io.base import BaseRaw from .epochs import BaseEpochs from .evoked import Evoked - if isinstance(x, (BaseRaw, BaseEpochs, Evoked)): + from .io import BaseRaw + + if isinstance(x, BaseRaw | BaseEpochs | Evoked): try: name = x.__class__.__name__ except Exception: pass else: raise TypeError( - 'This low-level function only operates on np.ndarray ' - f'instances. To get a {kind} {name} instance, use a method ' - f'like `inst_new = inst.copy().{alternative}(...)` ' - 'instead.') - _validate_type(x, (np.ndarray, list, tuple), f'Data to be {kind}') + "This low-level function only operates on np.ndarray instances. To get " + f"a {kind} {name} instance, use a method like `inst_new = inst.copy()." + f"{alternative}(...)` instead." + ) + _validate_type(x, (np.ndarray, list, tuple), f"Data to be {kind}") x = np.asanyarray(x) if x.dtype != np.float64: - raise ValueError('Data to be %s must be real floating, got %s' - % (kind, x.dtype,)) + raise ValueError(f"Data to be {kind} must be real floating, got {x.dtype}") return x @@ -1394,8 +1797,19 @@ def _resamp_ratio_len(up, down, n): @verbose -def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', - n_jobs=None, pad='reflect_limited', *, verbose=None): +def resample( + x, + up=1.0, + down=1.0, + *, + axis=-1, + window="auto", + n_jobs=None, + pad="auto", + npad=100, + method="fft", + verbose=None, +): """Resample an array. Operates along the last dimension of the array. @@ -1408,15 +1822,18 @@ def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', Factor to upsample by. down : float Factor to downsample by. - %(npad)s axis : int Axis along which to resample (default is the last axis). %(window_resample)s %(n_jobs_cuda)s - %(pad)s - The default is ``'reflect_limited'``. + ``n_jobs='cuda'`` is only supported when ``method="fft"``. + %(pad_resample_auto)s .. versionadded:: 0.15 + %(npad_resample)s + %(method_resample)s + + .. versionadded:: 1.7 %(verbose)s Returns @@ -1426,58 +1843,106 @@ def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', Notes ----- - This uses (hopefully) intelligent edge padding and frequency-domain - windowing improve scipy.signal.resample's resampling method, which + When using ``method="fft"`` (default), + this uses (hopefully) intelligent edge padding and frequency-domain + windowing improve :func:`scipy.signal.resample`'s resampling method, which we have adapted for our use here. Choices of npad and window have important consequences, and the default choices should work well for most natural signals. - - Resampling arguments are broken into "up" and "down" components for future - compatibility in case we decide to use an upfirdn implementation. The - current implementation is functionally equivalent to passing - up=up/down and down=1. """ - from scipy.signal import get_window - from scipy.fft import ifftshift, fftfreq - # check explicitly for backwards compatibility - if not isinstance(axis, int): - err = ("The axis parameter needs to be an integer (got %s). " - "The axis parameter was missing from this function for a " - "period of time, you might be intending to specify the " - "subsequent window parameter." % repr(axis)) - raise TypeError(err) + _validate_type(method, str, "method") + _validate_type(pad, str, "pad") + _check_option("method", method, ("fft", "polyphase")) # make sure our arithmetic will work - x = _check_filterable(x, 'resampled', 'resample') + x = _check_filterable(x, "resampled", "resample") ratio, final_len = _resamp_ratio_len(up, down, x.shape[axis]) del up, down if axis < 0: axis = x.ndim + axis - orig_last_axis = x.ndim - 1 - if axis != orig_last_axis: - x = x.swapaxes(axis, orig_last_axis) - orig_shape = x.shape - x_len = orig_shape[-1] - if x_len == 0: - warn('x has zero length along last axis, returning a copy of x') + if x.shape[axis] == 0: + warn(f"x has zero length along axis={axis}, returning a copy of x") return x.copy() - bad_msg = 'npad must be "auto" or an integer' + + # prep for resampling along the last axis (swap axis with last then reshape) + out_shape = list(x.shape) + out_shape.pop(axis) + out_shape.append(final_len) + x = np.atleast_2d(x.swapaxes(axis, -1).reshape((-1, x.shape[axis]))) + + # do the resampling using FFT or polyphase methods + kwargs = dict(pad=pad, window=window, n_jobs=n_jobs) + if method == "fft": + y = _resample_fft(x, npad=npad, ratio=ratio, final_len=final_len, **kwargs) + else: + up, down, kwargs["window"] = _prep_polyphase( + ratio, x.shape[-1], final_len, window + ) + half_len = len(window) // 2 + logger.info( + f"Polyphase resampling neighborhood: ±{half_len} " + f"input sample{_pl(half_len)}" + ) + y = _resample_polyphase(x, up=up, down=down, **kwargs) + assert y.shape[-1] == final_len + + # restore dimensions (reshape then swap axis with last) + y = y.reshape(out_shape).swapaxes(axis, -1) + + return y + + +def _prep_polyphase(ratio, x_len, final_len, window): + if isinstance(window, str) and window == "auto": + window = ("kaiser", 5.0) # SciPy default + up = final_len + down = x_len + g_ = gcd(up, down) + up = up // g_ + down = down // g_ + # Figure out our signal neighborhood and design window (adapted from SciPy) + if not isinstance(window, list | np.ndarray): + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1.0 / max_rate # cutoff of FIR filter (rel. to Nyquist) + half_len = 10 * max_rate # reasonable cutoff for sinc-like function + window = signal.firwin(2 * half_len + 1, f_c, window=window) + return up, down, window + + +def _resample_polyphase(x, *, up, down, pad, window, n_jobs): + if pad == "auto": + pad = "reflect" + kwargs = dict(padtype=pad, window=window, up=up, down=down) + _validate_type( + n_jobs, (None, "int-like"), "n_jobs", extra="when method='polyphase'" + ) + parallel, p_fun, n_jobs = parallel_func(signal.resample_poly, n_jobs) + if n_jobs == 1: + y = signal.resample_poly(x, axis=-1, **kwargs) + else: + y = np.array(parallel(p_fun(x_, **kwargs) for x_ in x)) + return y + + +def _resample_fft(x_flat, *, ratio, final_len, pad, window, npad, n_jobs): + x_len = x_flat.shape[-1] + pad = "reflect_limited" if pad == "auto" else pad + if (isinstance(window, str) and window == "auto") or window is None: + window = "boxcar" if isinstance(npad, str): - if npad != 'auto': - raise ValueError(bad_msg) + _check_option("npad", npad, ("auto",), extra="when a string") # Figure out reasonable pad that gets us to a power of 2 min_add = min(x_len // 8, 100) * 2 npad = 2 ** int(np.ceil(np.log2(x_len + min_add))) - x_len npad, extra = divmod(npad, 2) npads = np.array([npad, npad + extra], int) else: - if npad != int(npad): - raise ValueError(bad_msg) + npad = _ensure_int(npad, "npad", extra="or 'auto'") npads = np.array([npad, npad], int) del npad # prep for resampling now - x_flat = x.reshape((-1, x_len)) orig_len = x_len + npads.sum() # length after padding new_len = max(int(round(ratio * orig_len)), 1) # length after resampling to_removes = [int(round(ratio * npads[0]))] @@ -1487,17 +1952,13 @@ def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', # assert np.abs(to_removes[1] - to_removes[0]) <= int(np.ceil(ratio)) # figure out windowing function - if window is not None: - if callable(window): - W = window(fftfreq(orig_len)) - elif isinstance(window, np.ndarray) and \ - window.shape == (orig_len,): - W = window - else: - W = ifftshift(get_window(window, orig_len)) + if callable(window): + W = window(fft.fftfreq(orig_len)) + elif isinstance(window, np.ndarray) and window.shape == (orig_len,): + W = window else: - W = np.ones(orig_len) - W *= (float(new_len) / float(orig_len)) + W = fft.ifftshift(signal.get_window(window, orig_len)) + W *= float(new_len) / float(orig_len) # figure out if we should use CUDA n_jobs, cuda_dict = _setup_cuda_fft_resample(n_jobs, W, new_len) @@ -1506,21 +1967,15 @@ def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', # use of the 'flat' window is recommended for minimal ringing parallel, p_fun, n_jobs = parallel_func(_fft_resample, n_jobs) if n_jobs == 1: - y = np.zeros((len(x_flat), new_len - to_removes.sum()), dtype=x.dtype) + y = np.zeros((len(x_flat), new_len - to_removes.sum()), dtype=x_flat.dtype) for xi, x_ in enumerate(x_flat): - y[xi] = _fft_resample(x_, new_len, npads, to_removes, - cuda_dict, pad) + y[xi] = _fft_resample(x_, new_len, npads, to_removes, cuda_dict, pad) else: - y = parallel(p_fun(x_, new_len, npads, to_removes, cuda_dict, pad) - for x_ in x_flat) + y = parallel( + p_fun(x_, new_len, npads, to_removes, cuda_dict, pad) for x_ in x_flat + ) y = np.array(y) - # Restore the original array shape (modified for resampling) - y.shape = orig_shape[:-1] + (y.shape[1],) - if axis != orig_last_axis: - y = y.swapaxes(axis, orig_last_axis) - assert y.shape[axis] == final_len - return y @@ -1558,8 +2013,7 @@ def _resample_stim_channels(stim_data, up, down): # out-of-bounds, which can happen (having one sample more than # expected) due to padding sample_picks = np.minimum( - (np.arange(resampled_n_samples) / ratio).astype(int), - n_samples - 1 + (np.arange(resampled_n_samples) / ratio).astype(int), n_samples - 1 ) # Create windows starting from sample_picks[i], ending at sample_picks[i+1] @@ -1568,7 +2022,7 @@ def _resample_stim_channels(stim_data, up, down): # Use the first non-zero value in each window for window_i, window in enumerate(windows): for stim_num, stim in enumerate(stim_data): - nonzero = stim[window[0]:window[1]].nonzero()[0] + nonzero = stim[window[0] : window[1]].nonzero()[0] if len(nonzero) > 0: val = stim[window[0] + nonzero[0]] else: @@ -1603,20 +2057,19 @@ def detrend(x, order=1, axis=-1): >>> npoints = int(1e3) >>> noise = randgen.randn(npoints) >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise - >>> (detrend(x) - noise).max() < 0.01 + >>> bool((detrend(x) - noise).max() < 0.01) True """ - from scipy.signal import detrend if axis > len(x.shape): - raise ValueError('x does not have %d axes' % axis) + raise ValueError(f"x does not have {axis} axes") if order == 0: - fit = 'constant' + fit = "constant" elif order == 1: - fit = 'linear' + fit = "linear" else: - raise ValueError('order must be 0 or 1') + raise ValueError("order must be 0 or 1") - y = detrend(x, axis=axis, type=fit) + y = signal.detrend(x, axis=axis, type=fit) return y @@ -1629,30 +2082,33 @@ def detrend(x, order=1, axis=-1): # (Hamming) then δs = 10 ** (53 / -20.), which means that the passband # deviation should be 20 * np.log10(1 + 10 ** (53 / -20.)) == 0.0194. _fir_window_dict = { - 'hann': dict(name='Hann', ripple=0.0546, attenuation=44), - 'hamming': dict(name='Hamming', ripple=0.0194, attenuation=53), - 'blackman': dict(name='Blackman', ripple=0.0017, attenuation=74), + "hann": dict(name="Hann", ripple=0.0546, attenuation=44), + "hamming": dict(name="Hamming", ripple=0.0194, attenuation=53), + "blackman": dict(name="Blackman", ripple=0.0017, attenuation=74), } _known_fir_windows = tuple(sorted(_fir_window_dict.keys())) -_known_phases = ('linear', 'zero', 'zero-double', 'minimum') -_known_fir_designs = ('firwin', 'firwin2') +_known_phases_fir = ("linear", "zero", "zero-double", "minimum", "minimum-half") +_known_phases_iir = ("zero", "zero-double", "forward") +_known_fir_designs = ("firwin", "firwin2") _fir_design_dict = { - 'firwin': 'Windowed time-domain', - 'firwin2': 'Windowed frequency-domain', + "firwin": "Windowed time-domain", + "firwin2": "Windowed frequency-domain", } def _to_samples(filter_length, sfreq, phase, fir_design): - _validate_type(filter_length, (str, 'int-like'), 'filter_length') + _validate_type(filter_length, (str, "int-like"), "filter_length") if isinstance(filter_length, str): filter_length = filter_length.lower() - err_msg = ('filter_length, if a string, must be a ' - 'human-readable time, e.g. "10s", or "auto", not ' - '"%s"' % filter_length) - if filter_length.lower().endswith('ms'): + err_msg = ( + "filter_length, if a string, must be a " + 'human-readable time, e.g. "10s", or "auto", not ' + f'"{filter_length}"' + ) + if filter_length.lower().endswith("ms"): mult_fact = 1e-3 filter_length = filter_length[:-2] - elif filter_length[-1].lower() == 's': + elif filter_length[-1].lower() == "s": mult_fact = 1 filter_length = filter_length[:-1] else: @@ -1662,49 +2118,62 @@ def _to_samples(filter_length, sfreq, phase, fir_design): filter_length = float(filter_length) except ValueError: raise ValueError(err_msg) - filter_length = max(int(np.ceil(filter_length * mult_fact * - sfreq)), 1) - if fir_design == 'firwin': + filter_length = max(int(np.ceil(filter_length * mult_fact * sfreq)), 1) + if fir_design == "firwin": filter_length += (filter_length - 1) % 2 - filter_length = _ensure_int(filter_length, 'filter_length') + filter_length = _ensure_int(filter_length, "filter_length") return filter_length -def _triage_filter_params(x, sfreq, l_freq, h_freq, - l_trans_bandwidth, h_trans_bandwidth, - filter_length, method, phase, fir_window, - fir_design, bands='scalar', reverse=False): +def _triage_filter_params( + x, + sfreq, + l_freq, + h_freq, + l_trans_bandwidth, + h_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + bands="scalar", + reverse=False, +): """Validate and automate filter parameter selection.""" - _validate_type(phase, 'str', 'phase') - _check_option('phase', phase, _known_phases) - _validate_type(fir_window, 'str', 'fir_window') - _check_option('fir_window', fir_window, _known_fir_windows) - _validate_type(fir_design, 'str', 'fir_design') - _check_option('fir_design', fir_design, _known_fir_designs) + _validate_type(phase, "str", "phase") + if method == "fir": + _check_option("phase", phase, _known_phases_fir, extra="when FIR filtering") + else: + _check_option("phase", phase, _known_phases_iir, extra="when IIR filtering") + _validate_type(fir_window, "str", "fir_window") + _check_option("fir_window", fir_window, _known_fir_windows) + _validate_type(fir_design, "str", "fir_design") + _check_option("fir_design", fir_design, _known_fir_designs) # Helpers for reporting - report_phase = 'non-linear phase' if phase == 'minimum' else 'zero-phase' - causality = 'causal' if phase == 'minimum' else 'non-causal' - if phase == 'zero-double': - report_pass = 'two-pass forward and reverse' + report_phase = "non-linear phase" if phase == "minimum" else "zero-phase" + causality = "causal" if phase == "minimum" else "non-causal" + if phase == "zero-double": + report_pass = "two-pass forward and reverse" else: - report_pass = 'one-pass' + report_pass = "one-pass" if l_freq is not None: if h_freq is not None: - kind = 'bandstop' if reverse else 'bandpass' + kind = "bandstop" if reverse else "bandpass" else: - kind = 'highpass' + kind = "highpass" assert not reverse elif h_freq is not None: - kind = 'lowpass' + kind = "lowpass" assert not reverse else: - kind = 'allpass' + kind = "allpass" def float_array(c): return np.array(c, float).ravel() - if bands == 'arr': + if bands == "arr": cast = float_array else: cast = float @@ -1712,145 +2181,193 @@ def float_array(c): if l_freq is not None: l_freq = cast(l_freq) if np.any(l_freq <= 0): - raise ValueError('highpass frequency %s must be greater than zero' - % (l_freq,)) + raise ValueError(f"highpass frequency {l_freq} must be greater than zero") if h_freq is not None: h_freq = cast(h_freq) - if np.any(h_freq >= sfreq / 2.): - raise ValueError('lowpass frequency %s must be less than Nyquist ' - '(%s)' % (h_freq, sfreq / 2.)) + if np.any(h_freq >= sfreq / 2.0): + raise ValueError( + f"lowpass frequency {h_freq} must be less than Nyquist ({sfreq / 2.0})" + ) dB_cutoff = False # meaning, don't try to compute or report - if bands == 'scalar' or (len(h_freq) == 1 and len(l_freq) == 1): - if phase == 'zero': - dB_cutoff = '-6 dB' - elif phase == 'zero-double': - dB_cutoff = '-12 dB' + if bands == "scalar" or (len(h_freq) == 1 and len(l_freq) == 1): + if phase == "zero": + dB_cutoff = "-6 dB" + elif phase == "zero-double": + dB_cutoff = "-12 dB" # we go to the next power of two when in FIR and zero-double mode - if method == 'iir': + if method == "iir": # Ignore these parameters, effectively l_stop, h_stop = l_freq, h_freq else: # method == 'fir' l_stop = h_stop = None - logger.info('') - logger.info('FIR filter parameters') - logger.info('---------------------') - logger.info('Designing a %s, %s, %s %s filter:' - % (report_pass, report_phase, causality, kind)) - logger.info('- %s design (%s) method' - % (_fir_design_dict[fir_design], fir_design)) + logger.info("") + logger.info("FIR filter parameters") + logger.info("---------------------") + logger.info( + f"Designing a {report_pass}, {report_phase}, {causality} {kind} filter:" + ) + logger.info(f"- {_fir_design_dict[fir_design]} design ({fir_design}) method") this_dict = _fir_window_dict[fir_window] - if fir_design == 'firwin': - logger.info('- {name:s} window with {ripple:0.4f} passband ripple ' - 'and {attenuation:d} dB stopband attenuation' - .format(**this_dict)) + if fir_design == "firwin": + logger.info( + "- {name:s} window with {ripple:0.4f} passband ripple " + "and {attenuation:d} dB stopband attenuation".format(**this_dict) + ) else: - logger.info('- {name:s} window'.format(**this_dict)) + logger.info("- {name:s} window".format(**this_dict)) if l_freq is not None: # high-pass component if isinstance(l_trans_bandwidth, str): - if l_trans_bandwidth != 'auto': - raise ValueError('l_trans_bandwidth must be "auto" if ' - 'string, got "%s"' % l_trans_bandwidth) - l_trans_bandwidth = np.minimum(np.maximum(0.25 * l_freq, 2.), - l_freq) - msg = ('- Lower transition bandwidth: %0.2f Hz' - % (l_trans_bandwidth)) - if dB_cutoff: - logger.info('- Lower passband edge: %0.2f' % (l_freq,)) - msg += ' (%s cutoff frequency: %0.2f Hz)' % ( - dB_cutoff, l_freq - l_trans_bandwidth / 2.) + if l_trans_bandwidth != "auto": + raise ValueError( + 'l_trans_bandwidth must be "auto" if string, got "' + f'{l_trans_bandwidth}"' + ) + l_trans_bandwidth = np.minimum(np.maximum(0.25 * l_freq, 2.0), l_freq) + l_trans_rep = np.array(l_trans_bandwidth, float) + if l_trans_rep.size == 1: + l_trans_rep = f"{l_trans_rep.item():0.2f}" + with np.printoptions(precision=2, floatmode="fixed"): + msg = f"- Lower transition bandwidth: {l_trans_rep} Hz" + if dB_cutoff: + l_freq_rep = np.array(l_freq, float) + if l_freq_rep.size == 1: + l_freq_rep = f"{l_freq_rep.item():0.2f}" + cutoff_rep = np.array(l_freq - l_trans_bandwidth / 2.0, float) + if cutoff_rep.size == 1: + cutoff_rep = f"{cutoff_rep.item():0.2f}" + # Could be an array + logger.info(f"- Lower passband edge: {l_freq_rep}") + msg += f" ({dB_cutoff} cutoff frequency: {cutoff_rep} Hz)" logger.info(msg) l_trans_bandwidth = cast(l_trans_bandwidth) if np.any(l_trans_bandwidth <= 0): - raise ValueError('l_trans_bandwidth must be positive, got %s' - % (l_trans_bandwidth,)) + raise ValueError( + f"l_trans_bandwidth must be positive, got {l_trans_bandwidth}" + ) l_stop = l_freq - l_trans_bandwidth if reverse: # band-stop style l_stop += l_trans_bandwidth l_freq += l_trans_bandwidth if np.any(l_stop < 0): - raise ValueError('Filter specification invalid: Lower stop ' - 'frequency negative (%0.2f Hz). Increase pass' - ' frequency or reduce the transition ' - 'bandwidth (l_trans_bandwidth)' % l_stop) + raise ValueError( + "Filter specification invalid: Lower stop frequency negative (" + f"{l_stop:0.2f} Hz). Increase pass frequency or reduce the " + "transition bandwidth (l_trans_bandwidth)" + ) if h_freq is not None: # low-pass component if isinstance(h_trans_bandwidth, str): - if h_trans_bandwidth != 'auto': - raise ValueError('h_trans_bandwidth must be "auto" if ' - 'string, got "%s"' % h_trans_bandwidth) - h_trans_bandwidth = np.minimum(np.maximum(0.25 * h_freq, 2.), - sfreq / 2. - h_freq) - msg = ('- Upper transition bandwidth: %0.2f Hz' - % (h_trans_bandwidth)) - if dB_cutoff: - logger.info('- Upper passband edge: %0.2f Hz' % (h_freq,)) - msg += ' (%s cutoff frequency: %0.2f Hz)' % ( - dB_cutoff, h_freq + h_trans_bandwidth / 2.) + if h_trans_bandwidth != "auto": + raise ValueError( + 'h_trans_bandwidth must be "auto" if ' + f'string, got "{h_trans_bandwidth}"' + ) + h_trans_bandwidth = np.minimum( + np.maximum(0.25 * h_freq, 2.0), sfreq / 2.0 - h_freq + ) + h_trans_rep = np.array(h_trans_bandwidth, float) + if h_trans_rep.size == 1: + h_trans_rep = f"{h_trans_rep.item():0.2f}" + with np.printoptions(precision=2, floatmode="fixed"): + msg = f"- Upper transition bandwidth: {h_trans_rep} Hz" + if dB_cutoff: + h_freq_rep = np.array(h_freq, float) + if h_freq_rep.size == 1: + h_freq_rep = f"{h_freq_rep.item():0.2f}" + cutoff_rep = np.array(h_freq + h_trans_bandwidth / 2.0, float) + if cutoff_rep.size == 1: + cutoff_rep = f"{cutoff_rep.item():0.2f}" + logger.info(f"- Upper passband edge: {h_freq_rep} Hz") + msg += f" ({dB_cutoff} cutoff frequency: {cutoff_rep} Hz)" logger.info(msg) h_trans_bandwidth = cast(h_trans_bandwidth) if np.any(h_trans_bandwidth <= 0): - raise ValueError('h_trans_bandwidth must be positive, got %s' - % (h_trans_bandwidth,)) + raise ValueError( + f"h_trans_bandwidth must be positive, got {h_trans_bandwidth}" + ) h_stop = h_freq + h_trans_bandwidth if reverse: # band-stop style h_stop -= h_trans_bandwidth h_freq -= h_trans_bandwidth if np.any(h_stop > sfreq / 2): - raise ValueError('Effective band-stop frequency (%s) is too ' - 'high (maximum based on Nyquist is %s)' - % (h_stop, sfreq / 2.)) + raise ValueError( + f"Effective band-stop frequency ({h_stop}) is too high (maximum " + f"based on Nyquist is {sfreq / 2.0})" + ) - if isinstance(filter_length, str) and filter_length.lower() == 'auto': + if isinstance(filter_length, str) and filter_length.lower() == "auto": filter_length = filter_length.lower() - h_check = h_trans_bandwidth if h_freq is not None else np.inf - l_check = l_trans_bandwidth if l_freq is not None else np.inf - mult_fact = 2. if fir_design == 'firwin2' else 1. - filter_length = '%ss' % (_length_factors[fir_window] * mult_fact / - float(min(h_check, l_check)),) + h_check = l_check = np.inf + if h_freq is not None: + h_check = min(np.atleast_1d(h_trans_bandwidth)) + if l_freq is not None: + l_check = min(np.atleast_1d(l_trans_bandwidth)) + mult_fact = 2.0 if fir_design == "firwin2" else 1.0 + filter_length = f"{_length_factors[fir_window] * mult_fact / float(min(h_check, l_check))}s" # noqa: E501 next_pow_2 = False # disable old behavior else: - next_pow_2 = ( - isinstance(filter_length, str) and phase == 'zero-double') + next_pow_2 = isinstance(filter_length, str) and phase == "zero-double" filter_length = _to_samples(filter_length, sfreq, phase, fir_design) # use correct type of filter (must be odd length for firwin and for # zero phase) - if fir_design == 'firwin' or phase == 'zero': + if fir_design == "firwin" or phase == "zero": filter_length += (filter_length - 1) % 2 - logger.info('- Filter length: %s samples (%0.3f s)' - % (filter_length, filter_length / sfreq)) - logger.info('') + logger.info( + f"- Filter length: {filter_length} samples ({filter_length / sfreq:0.3f} s)" + ) + logger.info("") if filter_length <= 0: - raise ValueError('filter_length must be positive, got %s' - % (filter_length,)) + raise ValueError(f"filter_length must be positive, got {filter_length}") if next_pow_2: filter_length = 2 ** int(np.ceil(np.log2(filter_length))) - if fir_design == 'firwin': + if fir_design == "firwin": filter_length += (filter_length - 1) % 2 # If we have data supplied, do a sanity check if x is not None: x = _check_filterable(x) len_x = x.shape[-1] - if method != 'fir': + if method != "fir": filter_length = len_x if filter_length > len_x and not (l_freq is None and h_freq is None): - warn('filter_length (%s) is longer than the signal (%s), ' - 'distortion is likely. Reduce filter length or filter a ' - 'longer signal.' % (filter_length, len_x)) + warn( + f"filter_length ({filter_length}) is longer than the signal ({len_x}), " + "distortion is likely. Reduce filter length or filter a longer signal." + ) + + logger.debug(f"Using filter length: {filter_length}") + return ( + x, + sfreq, + l_freq, + h_freq, + l_stop, + h_stop, + filter_length, + phase, + fir_window, + fir_design, + ) + - logger.debug('Using filter length: %s' % filter_length) - return (x, sfreq, l_freq, h_freq, l_stop, h_stop, filter_length, phase, - fir_window, fir_design) +def _check_resamp_noop(sfreq, o_sfreq, rtol=1e-6): + if np.isclose(sfreq, o_sfreq, atol=0, rtol=rtol): + logger.info( + f"Sampling frequency of the instance is already {sfreq}, returning " + "unmodified." + ) + return True + return False -class FilterMixin(object): +class FilterMixin: """Object for Epoch/Evoked filtering.""" @verbose @@ -1870,7 +2387,7 @@ def savgol_filter(self, h_freq, verbose=None): Returns ------- - inst : instance of Epochs or Evoked + inst : instance of Epochs, Evoked or SourceEstimate The object with the filtering applied. See Also @@ -1883,6 +2400,8 @@ def savgol_filter(self, h_freq, verbose=None): https://gist.github.com/larsoner/bbac101d50176611136b + When working on SourceEstimates the sample rate of the original data is inferred from tstep. + .. versionadded:: 0.9.0 References @@ -1898,28 +2417,46 @@ def savgol_filter(self, h_freq, verbose=None): >>> evoked.savgol_filter(10.) # low-pass at around 10 Hz # doctest:+SKIP >>> evoked.plot() # doctest:+SKIP """ # noqa: E501 - from scipy.signal import savgol_filter - _check_preload(self, 'inst.savgol_filter') + from .source_estimate import _BaseSourceEstimate + + _check_preload(self, "inst.savgol_filter") + if not isinstance(self, _BaseSourceEstimate): + s_freq = self.info["sfreq"] + else: + s_freq = 1 / self.tstep h_freq = float(h_freq) - if h_freq >= self.info['sfreq'] / 2.: - raise ValueError('h_freq must be less than half the sample rate') + if h_freq >= s_freq / 2.0: + raise ValueError("h_freq must be less than half the sample rate") # savitzky-golay filtering - window_length = (int(np.round(self.info['sfreq'] / - h_freq)) // 2) * 2 + 1 - logger.info('Using savgol length %d' % window_length) - self._data[:] = savgol_filter(self._data, axis=-1, polyorder=5, - window_length=window_length) + window_length = (int(np.round(s_freq / h_freq)) // 2) * 2 + 1 + logger.info("Using savgol length %d", window_length) + self._data[:] = signal.savgol_filter( + self._data, axis=-1, polyorder=5, window_length=window_length + ) return self @verbose - def filter(self, l_freq, h_freq, picks=None, filter_length='auto', - l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=None, - method='fir', iir_params=None, phase='zero', - fir_window='hamming', fir_design='firwin', - skip_by_annotation=('edge', 'bad_acq_skip'), pad='edge', *, - verbose=None): - """Filter a subset of channels. + def filter( + self, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + phase="zero", + fir_window="hamming", + fir_design="firwin", + skip_by_annotation=("edge", "bad_acq_skip"), + pad="edge", + *, + verbose=None, + ): + """Filter a subset of channels/vertices. Parameters ---------- @@ -1943,7 +2480,7 @@ def filter(self, l_freq, h_freq, picks=None, filter_length='auto', Returns ------- - inst : instance of Epochs, Evoked, or Raw + inst : instance of Epochs, Evoked, SourceEstimate, or Raw The filtered data. See Also @@ -1980,45 +2517,81 @@ def filter(self, l_freq, h_freq, picks=None, filter_length='auto', ``len(picks) * n_times`` additional time points need to be temporarily stored in memory. + When working on SourceEstimates the sample rate of the original + data is inferred from tstep. + For more information, see the tutorials :ref:`disc-filtering` and :ref:`tut-filter-resample` and :func:`mne.filter.create_filter`. .. versionadded:: 0.15 """ - from .io.base import BaseRaw - _check_preload(self, 'inst.filter') - if pad is None and method != 'iir': - pad = 'edge' - update_info, picks = _filt_check_picks(self.info, picks, - l_freq, h_freq) + from .annotations import _annotations_starts_stops + from .io import BaseRaw + from .source_estimate import _BaseSourceEstimate + + _check_preload(self, "inst.filter") + if not isinstance(self, _BaseSourceEstimate): + update_info, picks = _filt_check_picks(self.info, picks, l_freq, h_freq) + s_freq = self.info["sfreq"] + else: + s_freq = 1.0 / self.tstep + if pad is None and method != "iir": + pad = "edge" if isinstance(self, BaseRaw): # Deal with annotations onsets, ends = _annotations_starts_stops( - self, skip_by_annotation, invert=True) - logger.info('Filtering raw data in %d contiguous segment%s' - % (len(onsets), _pl(onsets))) + self, skip_by_annotation, invert=True + ) + logger.info( + "Filtering raw data in %d contiguous segment%s", + len(onsets), + _pl(onsets), + ) else: onsets, ends = np.array([0]), np.array([self._data.shape[1]]) max_idx = (ends - onsets).argmax() for si, (start, stop) in enumerate(zip(onsets, ends)): # Only output filter params once (for info level), and only warn # once about the length criterion (longest segment is too short) - use_verbose = verbose if si == max_idx else 'error' + use_verbose = verbose if si == max_idx else "error" filter_data( - self._data[:, start:stop], self.info['sfreq'], l_freq, h_freq, - picks, filter_length, l_trans_bandwidth, h_trans_bandwidth, - n_jobs, method, iir_params, copy=False, phase=phase, - fir_window=fir_window, fir_design=fir_design, pad=pad, - verbose=use_verbose) - # update info if filter is applied to all data channels, + self._data[:, start:stop], + s_freq, + l_freq, + h_freq, + picks, + filter_length, + l_trans_bandwidth, + h_trans_bandwidth, + n_jobs, + method, + iir_params, + copy=False, + phase=phase, + fir_window=fir_window, + fir_design=fir_design, + pad=pad, + verbose=use_verbose, + ) + # update info if filter is applied to all data channels/vertices, # and it's not a band-stop filter - _filt_update_info(self.info, update_info, l_freq, h_freq) + if not isinstance(self, _BaseSourceEstimate): + _filt_update_info(self.info, update_info, l_freq, h_freq) return self @verbose - def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=None, - pad='edge', *, verbose=None): + def resample( + self, + sfreq, + *, + npad="auto", + window="auto", + n_jobs=None, + pad="edge", + method="fft", + verbose=None, + ): """Resample data. If appropriate, an anti-aliasing filter is applied before resampling. @@ -2033,11 +2606,12 @@ def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=None, %(npad)s %(window_resample)s %(n_jobs_cuda)s - %(pad)s - The default is ``'edge'``, which pads with the edge values of each - vector. + %(pad_resample)s .. versionadded:: 0.15 + %(method_resample)s + + .. versionadded:: 1.7 %(verbose)s Returns @@ -2056,23 +2630,35 @@ def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=None, """ from .epochs import BaseEpochs from .evoked import Evoked - # Should be guaranteed by our inheritance, and the fact that - # mne.io.base.BaseRaw overrides this method - assert isinstance(self, (BaseEpochs, Evoked)) - _check_preload(self, 'inst.resample') + # Should be guaranteed by our inheritance, and the fact that + # mne.io.BaseRaw and _BaseSourceEstimate overrides this method + assert isinstance(self, BaseEpochs | Evoked) sfreq = float(sfreq) - o_sfreq = self.info['sfreq'] - self._data = resample(self._data, sfreq, o_sfreq, npad, window=window, - n_jobs=n_jobs, pad=pad) - lowpass = self.info.get('lowpass') + o_sfreq = self.info["sfreq"] + if _check_resamp_noop(sfreq, o_sfreq): + return self + + _check_preload(self, "inst.resample") + self._data = resample( + self._data, + sfreq, + o_sfreq, + npad=npad, + window=window, + n_jobs=n_jobs, + pad=pad, + method=method, + ) + lowpass = self.info.get("lowpass") lowpass = np.inf if lowpass is None else lowpass with self.info._unlock(): - self.info['lowpass'] = min(lowpass, sfreq / 2.) - self.info['sfreq'] = float(sfreq) - new_times = (np.arange(self._data.shape[-1], dtype=np.float64) / - sfreq + self.times[0]) + self.info["lowpass"] = min(lowpass, sfreq / 2.0) + self.info["sfreq"] = float(sfreq) + new_times = ( + np.arange(self._data.shape[-1], dtype=np.float64) / sfreq + self.times[0] + ) # adjust indirectly affected variables self._set_times(new_times) self._raw_times = self.times @@ -2080,15 +2666,16 @@ def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=None, return self @verbose - def apply_hilbert(self, picks=None, envelope=False, n_jobs=None, - n_fft='auto', *, verbose=None): - """Compute analytic signal or envelope for a subset of channels. + def apply_hilbert( + self, picks=None, envelope=False, n_jobs=None, n_fft="auto", *, verbose=None + ): + """Compute analytic signal or envelope for a subset of channels/vertices. Parameters ---------- %(picks_all_data_noref)s envelope : bool - Compute the envelope signal of each channel. Default False. + Compute the envelope signal of each channel/vertex. Default False. See Notes. %(n_jobs)s n_fft : int | None | str @@ -2100,19 +2687,19 @@ def apply_hilbert(self, picks=None, envelope=False, n_jobs=None, Returns ------- - self : instance of Raw, Epochs, or Evoked + self : instance of Raw, Epochs, Evoked or SourceEstimate The raw object with transformed data. Notes ----- **Parameters** - If ``envelope=False``, the analytic signal for the channels defined in + If ``envelope=False``, the analytic signal for the channels/vertices defined in ``picks`` is computed and the data of the Raw object is converted to a complex representation (the analytic signal is complex valued). If ``envelope=True``, the absolute value of the analytic signal for the - channels defined in ``picks`` is computed, resulting in the envelope + channels/vertices defined in ``picks`` is computed, resulting in the envelope signal. .. warning: Do not use ``envelope=True`` if you intend to compute @@ -2145,20 +2732,30 @@ def apply_hilbert(self, picks=None, envelope=False, n_jobs=None, by computing the analytic signal in sensor space, applying the MNE inverse, and computing the envelope in source space. """ - _check_preload(self, 'inst.apply_hilbert') + from .source_estimate import _BaseSourceEstimate + + if not isinstance(self, _BaseSourceEstimate): + use_info = self.info + else: + use_info = len(self._data) + _check_preload(self, "inst.apply_hilbert") + picks = _picks_to_idx(use_info, picks, exclude=(), with_ref_meg=False) + if n_fft is None: n_fft = len(self.times) elif isinstance(n_fft, str): - if n_fft != 'auto': - raise ValueError('n_fft must be an integer, string, or None, ' - 'got %s' % (type(n_fft),)) + if n_fft != "auto": + raise ValueError( + f"n_fft must be an integer, string, or None, got {type(n_fft)}" + ) n_fft = next_fast_len(len(self.times)) n_fft = int(n_fft) if n_fft < len(self.times): - raise ValueError("n_fft (%d) must be at least the number of time " - "points (%d)" % (n_fft, len(self.times))) + raise ValueError( + f"n_fft ({n_fft}) must be at least the number of time points (" + f"{len(self.times)})" + ) dtype = None if envelope else np.complex128 - picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) args, kwargs = (), dict(n_fft=n_fft, envelope=envelope) data_in = self._data @@ -2170,12 +2767,13 @@ def apply_hilbert(self, picks=None, envelope=False, n_jobs=None, # modify data inplace to save memory for idx in picks: self._data[..., idx, :] = _check_fun( - _my_hilbert, data_in[..., idx, :], *args, **kwargs) + _my_hilbert, data_in[..., idx, :], *args, **kwargs + ) else: # use parallel function data_picks_new = parallel( - p_fun(_my_hilbert, data_in[..., p, :], *args, **kwargs) - for p in picks) + p_fun(_my_hilbert, data_in[..., p, :], *args, **kwargs) for p in picks + ) for pp, p in enumerate(picks): self._data[..., p, :] = data_picks_new[pp] return self @@ -2186,10 +2784,9 @@ def _check_fun(fun, d, *args, **kwargs): want_shape = d.shape d = fun(d, *args, **kwargs) if not isinstance(d, np.ndarray): - raise TypeError('Return value must be an ndarray') + raise TypeError("Return value must be an ndarray") if d.shape != want_shape: - raise ValueError('Return data must have shape %s not %s' - % (want_shape, d.shape)) + raise ValueError(f"Return data must have shape {want_shape} not {d.shape}") return d @@ -2212,18 +2809,22 @@ def _my_hilbert(x, n_fft=None, envelope=False): out : array, shape (n_times) The hilbert transform of the signal, or the envelope. """ - from scipy.signal import hilbert n_x = x.shape[-1] - out = hilbert(x, N=n_fft, axis=-1)[..., :n_x] + out = signal.hilbert(x, N=n_fft, axis=-1)[..., :n_x] if envelope: out = np.abs(out) return out @verbose -def design_mne_c_filter(sfreq, l_freq=None, h_freq=40., - l_trans_bandwidth=None, h_trans_bandwidth=5., - verbose=None): +def design_mne_c_filter( + sfreq, + l_freq=None, + h_freq=40.0, + l_trans_bandwidth=None, + h_trans_bandwidth=5.0, + verbose=None, +): """Create a FIR filter like that used by MNE-C. Parameters @@ -2256,75 +2857,100 @@ def design_mne_c_filter(sfreq, l_freq=None, h_freq=40., 4197 frequencies are directly constructed, with zeroes in the stop-band and ones in the passband, with squared cosine ramps in between. """ - from scipy.fft import irfft n_freqs = (4096 + 2 * 2048) // 2 + 1 freq_resp = np.ones(n_freqs) l_freq = 0 if l_freq is None else float(l_freq) if l_trans_bandwidth is None: l_width = 3 else: - l_width = (int(((n_freqs - 1) * l_trans_bandwidth) / - (0.5 * sfreq)) + 1) // 2 + l_width = (int(((n_freqs - 1) * l_trans_bandwidth) / (0.5 * sfreq)) + 1) // 2 l_start = int(((n_freqs - 1) * l_freq) / (0.5 * sfreq)) - h_freq = sfreq / 2. if h_freq is None else float(h_freq) - h_width = (int(((n_freqs - 1) * h_trans_bandwidth) / - (0.5 * sfreq)) + 1) // 2 + h_freq = sfreq / 2.0 if h_freq is None else float(h_freq) + h_width = (int(((n_freqs - 1) * h_trans_bandwidth) / (0.5 * sfreq)) + 1) // 2 h_start = int(((n_freqs - 1) * h_freq) / (0.5 * sfreq)) - logger.info('filter : %7.3f ... %6.1f Hz bins : %d ... %d of %d ' - 'hpw : %d lpw : %d' % (l_freq, h_freq, l_start, h_start, - n_freqs, l_width, h_width)) + logger.info( + "filter : %7.3f ... %6.1f Hz bins : %d ... %d of %d hpw : %d lpw : %d", + l_freq, + h_freq, + l_start, + h_start, + n_freqs, + l_width, + h_width, + ) if l_freq > 0: start = l_start - l_width + 1 stop = start + 2 * l_width - 1 if start < 0 or stop >= n_freqs: - raise RuntimeError('l_freq too low or l_trans_bandwidth too large') - freq_resp[:start] = 0. - k = np.arange(-l_width + 1, l_width) / float(l_width) + 3. - freq_resp[start:stop] = np.cos(np.pi / 4. * k) ** 2 + raise RuntimeError("l_freq too low or l_trans_bandwidth too large") + freq_resp[:start] = 0.0 + k = np.arange(-l_width + 1, l_width) / float(l_width) + 3.0 + freq_resp[start:stop] = np.cos(np.pi / 4.0 * k) ** 2 - if h_freq < sfreq / 2.: + if h_freq < sfreq / 2.0: start = h_start - h_width + 1 stop = start + 2 * h_width - 1 if start < 0 or stop >= n_freqs: - raise RuntimeError('h_freq too high or h_trans_bandwidth too ' - 'large') - k = np.arange(-h_width + 1, h_width) / float(h_width) + 1. - freq_resp[start:stop] *= np.cos(np.pi / 4. * k) ** 2 + raise RuntimeError("h_freq too high or h_trans_bandwidth too large") + k = np.arange(-h_width + 1, h_width) / float(h_width) + 1.0 + freq_resp[start:stop] *= np.cos(np.pi / 4.0 * k) ** 2 freq_resp[stop:] = 0.0 # Get the time-domain version of this signal - h = irfft(freq_resp, n=2 * len(freq_resp) - 1) + h = fft.irfft(freq_resp, n=2 * len(freq_resp) - 1) h = np.roll(h, n_freqs - 1) # center the impulse like a linear-phase filt return h def _filt_check_picks(info, picks, h_freq, l_freq): - from .io.pick import _picks_to_idx update_info = False # This will pick *all* data channels - picks = _picks_to_idx(info, picks, 'data_or_ica', exclude=()) + picks = _picks_to_idx(info, picks, "data_or_ica", exclude=()) if h_freq is not None or l_freq is not None: - data_picks = _picks_to_idx(info, None, 'data_or_ica', exclude=(), - allow_empty=True) + data_picks = _picks_to_idx( + info, None, "data_or_ica", exclude=(), allow_empty=True + ) if len(data_picks) == 0: - logger.info('No data channels found. The highpass and ' - 'lowpass values in the measurement info will not ' - 'be updated.') - elif np.in1d(data_picks, picks).all(): + logger.info( + "No data channels found. The highpass and " + "lowpass values in the measurement info will not " + "be updated." + ) + elif np.isin(data_picks, picks).all(): update_info = True else: - logger.info('Filtering a subset of channels. The highpass and ' - 'lowpass values in the measurement info will not ' - 'be updated.') + logger.info( + "Filtering a subset of channels. The highpass and " + "lowpass values in the measurement info will not " + "be updated." + ) return update_info, picks def _filt_update_info(info, update_info, l_freq, h_freq): if update_info: - if h_freq is not None and (l_freq is None or l_freq < h_freq) and \ - (info["lowpass"] is None or h_freq < info['lowpass']): + if ( + h_freq is not None + and (l_freq is None or l_freq < h_freq) + and (info["lowpass"] is None or h_freq < info["lowpass"]) + ): with info._unlock(): - info['lowpass'] = float(h_freq) - if l_freq is not None and (h_freq is None or l_freq < h_freq) and \ - (info["highpass"] is None or l_freq > info['highpass']): + info["lowpass"] = float(h_freq) + if ( + l_freq is not None + and (h_freq is None or l_freq < h_freq) + and (info["highpass"] is None or l_freq > info["highpass"]) + ): with info._unlock(): - info['highpass'] = float(l_freq) + info["highpass"] = float(l_freq) + + +def _iir_pad_apply_unpad(x, *, func, padlen, padtype, **kwargs): + x_out = np.reshape(x, (-1, x.shape[-1])).copy() + for this_x in x_out: + x_ext = this_x + if padlen: + x_ext = _smart_pad(x_ext, (padlen, padlen), padtype) + x_ext = func(x=x_ext, axis=-1, padlen=0, **kwargs) + this_x[:] = x_ext[padlen : len(x_ext) - padlen] + x_out.shape = x.shape + return x_out diff --git a/mne/fixes.py b/mne/fixes.py index 09bc6a58947..2148330fb34 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -1,4 +1,4 @@ -"""Compatibility fixes for older versions of libraries +"""Compatibility fixes for older versions of libraries. If you add content to this file, please give the version of the package at which the fix is no longer needed. @@ -6,28 +6,29 @@ # originally copied from scikit-learn """ -# Authors: Emmanuelle Gouillart -# Gael Varoquaux -# Fabian Pedregosa -# Lars Buitinck -# License: BSD + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# NOTE: +# Imports for SciPy submodules need to stay nested in this module +# because this module is imported many places (but not always used)! import inspect -from math import log -from pprint import pprint -from io import StringIO +import io +import operator as operator_module import os import warnings +from math import log import numpy as np - +import numpy.typing +from packaging.version import parse ############################################################################### -# distutils +# distutils LooseVersion removed in Python 3.12 -# distutils has been deprecated since Python 3.10 and is scheduled for removal -# from the standard library with the release of Python 3.12. For version -# comparisons, we use setuptools's `parse_version` if available. def _compare_version(version_a, operator, version_b): """Compare two version strings via a user-specified operator. @@ -47,15 +48,18 @@ def _compare_version(version_a, operator, version_b): bool The result of the version comparison. """ - from packaging.version import parse + mapping = {"<": "lt", "<=": "le", "==": "eq", "!=": "ne", ">=": "ge", ">": "gt"} with warnings.catch_warnings(record=True): - warnings.simplefilter('ignore') - return eval(f'parse("{version_a}") {operator} parse("{version_b}")') + warnings.simplefilter("ignore") + ver_a = parse(version_a) + ver_b = parse(version_b) + return getattr(operator_module, mapping[operator])(ver_a, ver_b) ############################################################################### # Misc + def _median_complex(data, axis): """Compute marginal median on complex data safely. @@ -64,34 +68,44 @@ def _median_complex(data, axis): """ # np.median must be passed real arrays for the desired result if np.iscomplexobj(data): - data = (np.median(np.real(data), axis=axis) - + 1j * np.median(np.imag(data), axis=axis)) + data = np.median(np.real(data), axis=axis) + 1j * np.median( + np.imag(data), axis=axis + ) else: data = np.median(data, axis=axis) return data def _safe_svd(A, **kwargs): - """Wrapper to get around the SVD did not converge error of death""" + """Get around the SVD did not converge error of death.""" # Intel has a bug with their GESVD driver: # https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501 # For SciPy 0.18 and up, we can work around it by using # lapack_driver='gesvd' instead. from scipy import linalg - if kwargs.get('overwrite_a', False): - raise ValueError('Cannot set overwrite_a=True with this function') + + if kwargs.get("overwrite_a", False): + raise ValueError("Cannot set overwrite_a=True with this function") try: return linalg.svd(A, **kwargs) except np.linalg.LinAlgError as exp: from .utils import warn - warn('SVD error (%s), attempting to use GESVD instead of GESDD' - % (exp,)) - return linalg.svd(A, lapack_driver='gesvd', **kwargs) + warn(f"SVD error ({exp}), attempting to use GESVD instead of GESDD") + return linalg.svd(A, lapack_driver="gesvd", **kwargs) + + +def _csc_array_cast(x): + from scipy.sparse import csc_array + + return csc_array(x) -def _csc_matrix_cast(x): - from scipy.sparse import csc_matrix - return csc_matrix(x) + +# Can be replaced with sparse.eye_array once we depend on SciPy >= 1.12 +def _eye_array(n, *, format="csr"): # noqa: A002 + from scipy import sparse + + return sparse.dia_array((np.ones(n), 0), shape=(n, n)).asformat(format) ############################################################################### @@ -99,27 +113,15 @@ def _csc_matrix_cast(x): def rng_uniform(rng): - """Get the unform/randint from the rng.""" + """Get the uniform/randint from the rng.""" # prefer Generator.integers, fall back to RandomState.randint - return getattr(rng, 'integers', getattr(rng, 'randint', None)) - - -def _validate_sos(sos): - """Helper to validate a SOS input""" - sos = np.atleast_2d(sos) - if sos.ndim != 2: - raise ValueError('sos array must be 2D') - n_sections, m = sos.shape - if m != 6: - raise ValueError('sos array must be shape (n_sections, 6)') - if not (sos[:, 3] == 1).all(): - raise ValueError('sos[:, 3] should be all ones') - return sos, n_sections + return getattr(rng, "integers", getattr(rng, "randint", None)) ############################################################################### # Misc utilities + # get_fdata() requires knowing the dtype ahead of time, so let's triage on our # own instead def _get_img_fdata(img): @@ -128,135 +130,70 @@ def _get_img_fdata(img): return data.astype(dtype) -def _read_volume_info(fobj): - """An implementation of nibabel.freesurfer.io._read_volume_info, since old - versions of nibabel (<=2.1.0) don't have it. - """ - volume_info = dict() - head = np.fromfile(fobj, '>i4', 1) - if not np.array_equal(head, [20]): # Read two bytes more - head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)]) - if not np.array_equal(head, [2, 0, 20]): - warnings.warn("Unknown extension code.") - return volume_info - - volume_info['head'] = head - for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', - 'zras', 'cras']: - pair = fobj.readline().decode('utf-8').split('=') - if pair[0].strip() != key or len(pair) != 2: - raise IOError('Error parsing volume info.') - if key in ('valid', 'filename'): - volume_info[key] = pair[1].strip() - elif key == 'volume': - volume_info[key] = np.array(pair[1].split()).astype(int) - else: - volume_info[key] = np.array(pair[1].split()).astype(float) - # Ignore the rest - return volume_info - - -############################################################################## -# adapted from scikit-learn +############################################################################### +# Copied from sklearn to simplify code paths -def is_classifier(estimator): - """Returns True if the given estimator is (probably) a classifier. +def empirical_covariance(X, assume_centered=False): + """Compute the Maximum likelihood covariance estimator. Parameters ---------- - estimator : object - Estimator object to test. + X : ndarray, shape (n_samples, n_features) + Data from which to compute the covariance estimate + + assume_centered : Boolean + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. Returns ------- - out : bool - True if estimator is a classifier and False otherwise. + covariance : 2D ndarray, shape (n_features, n_features) + Empirical covariance (Maximum Likelihood Estimator). """ - return getattr(estimator, "_estimator_type", None) == "classifier" + X = np.asarray(X) + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + if X.shape[0] == 1: + warnings.warn( + "Only one sample available. You may want to reshape your data array" + ) -def is_regressor(estimator): - """Returns True if the given estimator is (probably) a regressor. + if assume_centered: + covariance = np.dot(X.T, X) / X.shape[0] + else: + covariance = np.cov(X.T, bias=1) - Parameters - ---------- - estimator : object - Estimator object to test. + if covariance.ndim == 0: + covariance = np.array([[covariance]]) + return covariance - Returns - ------- - out : bool - True if estimator is a regressor and False otherwise. - """ - return getattr(estimator, "_estimator_type", None) == "regressor" - - -_DEFAULT_TAGS = { - 'non_deterministic': False, - 'requires_positive_X': False, - 'requires_positive_y': False, - 'X_types': ['2darray'], - 'poor_score': False, - 'no_validation': False, - 'multioutput': False, - "allow_nan": False, - 'stateless': False, - 'multilabel': False, - '_skip_test': False, - '_xfail_checks': False, - 'multioutput_only': False, - 'binary_only': False, - 'requires_fit': True, - 'preserves_dtype': [np.float64], - 'requires_y': False, - 'pairwise': False, -} - - -class BaseEstimator(object): - """Base class for all estimators in scikit-learn. - - Notes - ----- - All estimators should specify all the parameters that can be set - at the class level in their ``__init__`` as explicit keyword - arguments (no ``*args`` or ``**kwargs``). - """ - @classmethod - def _get_param_names(cls): - """Get parameter names for the estimator""" - # fetch the constructor or the original constructor before - # deprecation wrapping if any - init = getattr(cls.__init__, 'deprecated_original', cls.__init__) - if init is object.__init__: - # No explicit constructor to introspect - return [] - - # introspect the constructor arguments to find the model parameters - # to represent - init_signature = inspect.signature(init) - # Consider the constructor parameters excluding 'self' - parameters = [p for p in init_signature.parameters.values() - if p.name != 'self' and p.kind != p.VAR_KEYWORD] - for p in parameters: - if p.kind == p.VAR_POSITIONAL: - raise RuntimeError("scikit-learn estimators should always " - "specify their parameters in the signature" - " of their __init__ (no varargs)." - " %s with constructor %s doesn't " - " follow this convention." - % (cls, init_signature)) - # Extract and sort argument names excluding 'self' - return sorted([p.name for p in parameters]) +class _EstimatorMixin: + def __sklearn_tags__(self): + # If we get here, we should have sklearn installed + from sklearn.utils import Tags, TargetTags + + return Tags( + estimator_type=None, + target_tags=TargetTags(required=False), + transformer_tags=None, + regressor_tags=None, + classifier_tags=None, + ) + + def _param_names(self): + return inspect.getfullargspec(self.__init__).args[1:] def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- - deep : bool, optional + deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. @@ -266,26 +203,8 @@ def get_params(self, deep=True): Parameter names mapped to their values. """ out = dict() - for key in self._get_param_names(): - # We need deprecation warnings to always be on in order to - # catch deprecated param values. - # This is set in utils/__init__.py but it gets overwritten - # when running under python3 somehow. - warnings.simplefilter("always", DeprecationWarning) - try: - with warnings.catch_warnings(record=True) as w: - value = getattr(self, key, None) - if len(w) and w[0].category == DeprecationWarning: - # if the parameter is deprecated, don't show it - continue - finally: - warnings.filters.pop(0) - - # XXX: should we rather test if instance of estimator? - if deep and hasattr(value, 'get_params'): - deep_items = value.get_params().items() - out.update((key + '__' + k, val) for k, val in deep_items) - out[key] = value + for key in self._param_names(): + out[key] = getattr(self, key) return out def set_params(self, **params): @@ -299,153 +218,21 @@ def set_params(self, **params): Parameters ---------- **params : dict - Parameters. + Estimator parameters. Returns ------- - inst : instance - The object. + self : object + Estimator instance. """ - if not params: - # Simple optimisation to gain speed (inspect is slow) - return self - valid_params = self.get_params(deep=True) - for key, value in params.items(): - split = key.split('__', 1) - if len(split) > 1: - # nested objects case - name, sub_name = split - if name not in valid_params: - raise ValueError('Invalid parameter %s for estimator %s. ' - 'Check the list of available parameters ' - 'with `estimator.get_params().keys()`.' % - (name, self)) - sub_object = valid_params[name] - sub_object.set_params(**{sub_name: value}) - else: - # simple objects case - if key not in valid_params: - raise ValueError('Invalid parameter %s for estimator %s. ' - 'Check the list of available parameters ' - 'with `estimator.get_params().keys()`.' % - (key, self.__class__.__name__)) - setattr(self, key, value) - return self - - def __repr__(self): - params = StringIO() - pprint(self.get_params(deep=False), params) - params.seek(0) - class_name = self.__class__.__name__ - return '%s(%s)' % (class_name, params.read().strip()) - - # __getstate__ and __setstate__ are omitted because they only contain - # conditionals that are not satisfied by our objects (e.g., - # ``if type(self).__module__.startswith('sklearn.')``. - - def _more_tags(self): - return _DEFAULT_TAGS - - def _get_tags(self): - collected_tags = {} - for base_class in reversed(inspect.getmro(self.__class__)): - if hasattr(base_class, '_more_tags'): - # need the if because mixins might not have _more_tags - # but might do redundant work in estimators - # (i.e. calling more tags on BaseEstimator multiple times) - more_tags = base_class._more_tags(self) - collected_tags.update(more_tags) - return collected_tags - - -# newer sklearn deprecates importing from sklearn.metrics.scoring, -# but older sklearn does not expose check_scoring in sklearn.metrics. -def _get_check_scoring(): - try: - from sklearn.metrics import check_scoring # noqa - except ImportError: - from sklearn.metrics.scorer import check_scoring # noqa - return check_scoring + param_names = self._param_names() + for key in params: + if key in param_names: + setattr(self, key, params[key]) -def _check_fit_params(X, fit_params, indices=None): - """Check and validate the parameters passed during `fit`. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - Data array. - - fit_params : dict - Dictionary containing the parameters passed at fit. - - indices : array-like of shape (n_samples,), default=None - Indices to be selected if the parameter has the same size as - `X`. - - Returns - ------- - fit_params_validated : dict - Validated parameters. We ensure that the values support - indexing. - """ - try: - from sklearn.utils.validation import \ - _check_fit_params as _sklearn_check_fit_params - return _sklearn_check_fit_params(X, fit_params, indices) - except ImportError: - from sklearn.model_selection import _validation - - fit_params_validated = \ - {k: _validation._index_param_value(X, v, indices) - for k, v in fit_params.items()} - return fit_params_validated - - -############################################################################### -# Copied from sklearn to simplify code paths - -def empirical_covariance(X, assume_centered=False): - """Computes the Maximum likelihood covariance estimator - - - Parameters - ---------- - X : ndarray, shape (n_samples, n_features) - Data from which to compute the covariance estimate - - assume_centered : Boolean - If True, data are not centered before computation. - Useful when working with data whose mean is almost, but not exactly - zero. - If False, data are centered before computation. - - Returns - ------- - covariance : 2D ndarray, shape (n_features, n_features) - Empirical covariance (Maximum Likelihood Estimator). - - """ - X = np.asarray(X) - if X.ndim == 1: - X = np.reshape(X, (1, -1)) - - if X.shape[0] == 1: - warnings.warn("Only one sample available. " - "You may want to reshape your data array") - - if assume_centered: - covariance = np.dot(X.T, X) / X.shape[0] - else: - covariance = np.cov(X.T, bias=1) - - if covariance.ndim == 0: - covariance = np.array([[covariance]]) - return covariance - - -class EmpiricalCovariance(BaseEstimator): - """Maximum likelihood covariance estimator +class EmpiricalCovariance(_EstimatorMixin): + """Maximum likelihood covariance estimator. Read more in the :ref:`User Guide `. @@ -468,14 +255,14 @@ class EmpiricalCovariance(BaseEstimator): precision_ : 2D ndarray, shape (n_features, n_features) Estimated pseudo-inverse matrix. (stored only if store_precision is True) - """ + def __init__(self, store_precision=True, assume_centered=False): self.store_precision = store_precision self.assume_centered = assume_centered def _set_covariance(self, covariance): - """Saves the covariance and precision estimates + """Save the covariance and precision estimates. Storage is done accordingly to `self.store_precision`. Precision stored only if invertible. @@ -485,9 +272,9 @@ def _set_covariance(self, covariance): covariance : 2D ndarray, shape (n_features, n_features) Estimated covariance matrix to be stored, and from which precision is computed. - """ from scipy import linalg + # covariance = check_array(covariance) # set covariance self.covariance_ = covariance @@ -507,6 +294,7 @@ def get_precision(self): """ from scipy import linalg + if self.store_precision: precision = self.precision_ else: @@ -534,8 +322,7 @@ def fit(self, X, y=None): self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) - covariance = empirical_covariance( - X, assume_centered=self.assume_centered) + covariance = empirical_covariance(X, assume_centered=self.assume_centered) self._set_covariance(covariance) return self @@ -562,16 +349,14 @@ def score(self, X_test, y=None): estimator of its covariance matrix. """ # compute empirical covariance of the test set - test_cov = empirical_covariance( - X_test - self.location_, assume_centered=True) + test_cov = empirical_covariance(X_test - self.location_, assume_centered=True) # compute log likelihood res = log_likelihood(test_cov, self.get_precision()) return res - def error_norm(self, comp_cov, norm='frobenius', scaling=True, - squared=True): - """Computes the Mean Squared Error between two covariance estimators. + def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True): + """Compute the Mean Squared Error between two covariance estimators. Parameters ---------- @@ -596,16 +381,18 @@ def error_norm(self, comp_cov, norm='frobenius', scaling=True, `self` and `comp_cov` covariance estimators. """ from scipy import linalg + # compute the error error = comp_cov - self.covariance_ # compute the error norm if norm == "frobenius": - squared_norm = np.sum(error ** 2) + squared_norm = np.sum(error**2) elif norm == "spectral": squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) else: raise NotImplementedError( - "Only spectral and frobenius norms are implemented") + "Only spectral and frobenius norms are implemented" + ) # optionally scale the error norm if scaling: squared_norm = squared_norm / error.shape[0] @@ -618,7 +405,7 @@ def error_norm(self, comp_cov, norm='frobenius', scaling=True, return result def mahalanobis(self, observations): - """Computes the squared Mahalanobis distances of given observations. + """Compute the squared Mahalanobis distances of given observations. Parameters ---------- @@ -631,19 +418,17 @@ def mahalanobis(self, observations): ------- mahalanobis_distance : array, shape = [n_observations,] Squared Mahalanobis distances of the observations. - """ precision = self.get_precision() # compute mahalanobis distances centered_obs = observations - self.location_ - mahalanobis_dist = np.sum( - np.dot(centered_obs, precision) * centered_obs, 1) + mahalanobis_dist = np.sum(np.dot(centered_obs, precision) * centered_obs, 1) return mahalanobis_dist def log_likelihood(emp_cov, precision): - """Computes the sample mean of the log_likelihood under a covariance model + """Compute the sample mean of the log_likelihood under a covariance model. computes the empirical expected log-likelihood (accounting for the normalization terms and scaling), allowing for universal comparison (beyond @@ -662,17 +447,19 @@ def log_likelihood(emp_cov, precision): sample mean of the log-likelihood """ p = precision.shape[0] - log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision) + log_likelihood_ = -np.sum(emp_cov * precision) + _logdet(precision) log_likelihood_ -= p * np.log(2 * np.pi) - log_likelihood_ /= 2. + log_likelihood_ /= 2.0 return log_likelihood_ # sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues + def _logdet(A): """Compute the log det of a positive semidefinite matrix.""" from scipy import linalg + vals = linalg.eigvalsh(A) # avoid negative (numerical errors) or zero (semi-definite matrix) values tol = vals.max() * vals.size * np.finfo(np.float64).eps @@ -681,7 +468,8 @@ def _logdet(A): def _infer_dimension_(spectrum, n_samples, n_features): - """Infers the dimension of a dataset of shape (n_samples, n_features) + """Infer the dimension of a dataset of shape (n_samples, n_features). + The dataset is described by its spectrum `spectrum`. """ n_spectrum = len(spectrum) @@ -693,42 +481,42 @@ def _infer_dimension_(spectrum, n_samples, n_features): def _assess_dimension_(spectrum, rank, n_samples, n_features): from scipy.special import gammaln + if rank > len(spectrum): - raise ValueError("The tested rank cannot exceed the rank of the" - " dataset") + raise ValueError("The tested rank cannot exceed the rank of the dataset") - pu = -rank * log(2.) + pu = -rank * log(2.0) for i in range(rank): - pu += (gammaln((n_features - i) / 2.) - - log(np.pi) * (n_features - i) / 2.) + pu += gammaln((n_features - i) / 2.0) - log(np.pi) * (n_features - i) / 2.0 pl = np.sum(np.log(spectrum[:rank])) - pl = -pl * n_samples / 2. + pl = -pl * n_samples / 2.0 if rank == n_features: pv = 0 v = 1 else: v = np.sum(spectrum[rank:]) / (n_features - rank) - pv = -np.log(v) * n_samples * (n_features - rank) / 2. + pv = -np.log(v) * n_samples * (n_features - rank) / 2.0 - m = n_features * rank - rank * (rank + 1.) / 2. - pp = log(2. * np.pi) * (m + rank + 1.) / 2. + m = n_features * rank - rank * (rank + 1.0) / 2.0 + pp = log(2.0 * np.pi) * (m + rank + 1.0) / 2.0 - pa = 0. + pa = 0.0 spectrum_ = spectrum.copy() spectrum_[rank:n_features] = v for i in range(rank): for j in range(i + 1, len(spectrum)): - pa += log((spectrum[i] - spectrum[j]) * - (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples) + pa += log( + (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]) + ) + log(n_samples) - ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2. + ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0 return ll -def svd_flip(u, v, u_based_decision=True): +def svd_flip(u, v, u_based_decision=True): # noqa: D103 if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) @@ -745,7 +533,7 @@ def svd_flip(u, v, u_based_decision=True): def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): - """Use high precision for cumsum and check that final value matches sum + """Use high precision for cumsum and check that final value matches sum. Parameters ---------- @@ -761,23 +549,28 @@ def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): """ out = np.cumsum(arr, axis=axis, dtype=np.float64) expected = np.sum(arr, axis=axis, dtype=np.float64) - if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol, - atol=atol, equal_nan=True)): - warnings.warn('cumsum was found to be unstable: ' - 'its last element does not correspond to sum', - RuntimeWarning) + if not np.all( + np.isclose( + out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True + ) + ): + warnings.warn( + "cumsum was found to be unstable: " + "its last element does not correspond to sum", + RuntimeWarning, + ) return out ############################################################################### # From nilearn + def _crop_colorbar(cbar, cbar_vmin, cbar_vmax): - """ - crop a colorbar to show from cbar_vmin to cbar_vmax + """Crop a colorbar to show from cbar_vmin to cbar_vmax. + Used when symmetric_cbar=False is used. """ - import matplotlib if (cbar_vmin is None) and (cbar_vmax is None): return cbar_tick_locs = cbar.locator.locs @@ -785,8 +578,7 @@ def _crop_colorbar(cbar, cbar_vmin, cbar_vmax): cbar_vmax = cbar_tick_locs.max() if cbar_vmin is None: cbar_vmin = cbar_tick_locs.min() - new_tick_locs = np.linspace(cbar_vmin, cbar_vmax, - len(cbar_tick_locs)) + new_tick_locs = np.linspace(cbar_vmin, cbar_vmax, len(cbar_tick_locs)) cbar.ax.set_ylim(cbar_vmin, cbar_vmax) X = cbar._mesh()[0] @@ -796,9 +588,11 @@ def _crop_colorbar(cbar, cbar_vmin, cbar_vmax): ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0] x = X.T.reshape(-1)[ii] y = Y.T.reshape(-1)[ii] - xy = (np.column_stack([y, x]) - if cbar.orientation == 'horizontal' else - np.column_stack([x, y])) + xy = ( + np.column_stack([y, x]) + if cbar.orientation == "horizontal" + else np.column_stack([x, y]) + ) cbar.outline.set_xy(xy) cbar.set_ticks(new_tick_locs) @@ -811,29 +605,35 @@ def _crop_colorbar(cbar, cbar_vmin, cbar_vmax): # Here we choose different defaults to speed things up by default try: import numba - if _compare_version(numba.__version__, '<', '0.53.1'): + + if _compare_version(numba.__version__, "<", "0.56.4"): raise ImportError prange = numba.prange - def jit(nopython=True, nogil=True, fastmath=True, cache=True, - **kwargs): # noqa - return numba.jit(nopython=nopython, nogil=nogil, fastmath=fastmath, - cache=cache, **kwargs) + + def jit(nopython=True, nogil=True, fastmath=True, cache=True, **kwargs): # noqa + return numba.jit( + nopython=nopython, nogil=nogil, fastmath=fastmath, cache=cache, **kwargs + ) + except Exception: # could be ImportError, SystemError, etc. has_numba = False else: - has_numba = (os.getenv('MNE_USE_NUMBA', 'true').lower() == 'true') + has_numba = os.getenv("MNE_USE_NUMBA", "true").lower() == "true" if not has_numba: + def jit(**kwargs): # noqa def _jit(func): return func + return _jit + prange = range bincount = np.bincount - mean = np.mean else: + @jit() def bincount(x, weights, minlength): # noqa: D103 out = np.zeros(minlength) @@ -841,85 +641,127 @@ def bincount(x, weights, minlength): # noqa: D103 out[idx] += w return out - # fix because Numba does not support axis kwarg for mean - @jit() - def _np_apply_along_axis(func1d, axis, arr): - assert arr.ndim == 2 - assert axis in [0, 1] - if axis == 0: - result = np.empty(arr.shape[1]) - for i in range(len(result)): - result[i] = func1d(arr[:, i]) - else: - result = np.empty(arr.shape[0]) - for i in range(len(result)): - result[i] = func1d(arr[i, :]) - return result - - @jit() - def mean(array, axis): - return _np_apply_along_axis(np.mean, axis, array) - ############################################################################### # Matplotlib + # workaround: plt.close() doesn't spawn close_event on Agg backend # https://github.com/matplotlib/matplotlib/issues/18609 -# scheduled to be fixed by MPL 3.6 def _close_event(fig): """Force calling of the MPL figure close event.""" - from .utils import logger from matplotlib import backend_bases + + from .utils import logger + try: fig.canvas.callbacks.process( - 'close_event', backend_bases.CloseEvent( - name='close_event', canvas=fig.canvas)) - logger.debug(f'Called {fig!r}.canvas.close_event()') + "close_event", + backend_bases.CloseEvent(name="close_event", canvas=fig.canvas), + ) + logger.debug(f"Called {fig!r}.canvas.close_event()") except ValueError: # old mpl with Qt - logger.debug(f'Calling {fig!r}.canvas.close_event() failed') + logger.debug(f"Calling {fig!r}.canvas.close_event() failed") pass # pragma: no cover -def _is_last_row(ax): - try: - return ax.get_subplotspec().is_last_row() # 3.4+ - except AttributeError: - return ax.is_last_row() - return ax.get_subplotspec().is_last_row() - - -def _sharex(ax1, ax2): - if hasattr(ax1.axes, 'sharex'): - ax1.axes.sharex(ax2) +############################################################################### +# SciPy 1.14+ minimum_phase half=True option + + +def minimum_phase(h, method="homomorphic", n_fft=None, *, half=True): + """Wrap scipy.signal.minimum_phase with half option.""" + # Can be removed once + from scipy.fft import fft, ifft + from scipy.signal import minimum_phase as sp_minimum_phase + + assert isinstance(method, str) and method == "homomorphic" + + if "half" in inspect.getfullargspec(sp_minimum_phase).kwonlyargs: + return sp_minimum_phase(h, method=method, n_fft=n_fft, half=half) + h = np.asarray(h) + if np.iscomplexobj(h): + raise ValueError("Complex filters not supported") + if h.ndim != 1 or h.size <= 2: + raise ValueError("h must be 1-D and at least 2 samples long") + n_half = len(h) // 2 + if not np.allclose(h[-n_half:][::-1], h[:n_half]): + warnings.warn( + "h does not appear to by symmetric, conversion may fail", + RuntimeWarning, + stacklevel=2, + ) + if n_fft is None: + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + n_fft = int(n_fft) + if n_fft < len(h): + raise ValueError(f"n_fft must be at least len(h)=={len(h)}") + + # zero-pad; calculate the DFT + h_temp = np.abs(fft(h, n_fft)) + # take 0.25*log(|H|**2) = 0.5*log(|H|) + h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up + np.log(h_temp, out=h_temp) + if half: # halving of magnitude spectrum optional + h_temp *= 0.5 + # IDFT + h_temp = ifft(h_temp).real + # multiply pointwise by the homomorphic filter + # lmin[n] = 2u[n] - d[n] + # i.e., double the positive frequencies and zero out the negative ones; + # Oppenheim+Shafer 3rd ed p991 eq13.42b and p1004 fig13.7 + win = np.zeros(n_fft) + win[0] = 1 + stop = n_fft // 2 + win[1:stop] = 2 + if n_fft % 2: + win[stop] = 1 + h_temp *= win + h_temp = ifft(np.exp(fft(h_temp))) + h_minimum = h_temp.real + + n_out = (n_half + len(h) % 2) if half else len(h) + return h_minimum[:n_out] + + +# SciPy 1.15 deprecates sph_harm for sph_harm_y and using it will trigger a +# DeprecationWarning. This is a backport of the new function for older SciPy versions. +def sph_harm_y(n, m, theta, phi, *, diff_n=0): + """Wrap scipy.special.sph_harm for sph_harm_y.""" + # Can be removed once we no longer support scipy < 1.15.0 + from scipy import special + + if "sph_harm_y" in special.__dict__: + return special.sph_harm_y(n, m, theta, phi, diff_n=diff_n) else: - ax1.get_shared_x_axes().join(ax1, ax2) + return special.sph_harm(m, n, phi, theta) ############################################################################### -# SciPy deprecation of pinv + pinvh rcond (never worked properly anyway) in 1.7 - -def pinvh(a, rtol=None): - """Compute a pseudo-inverse of a Hermitian matrix.""" - s, u = np.linalg.eigh(a) - del a - if rtol is None: - rtol = s.size * np.finfo(s.dtype).eps - maxS = np.max(np.abs(s)) - above_cutoff = (abs(s) > maxS * rtol) - psigma_diag = 1.0 / s[above_cutoff] - u = u[:, above_cutoff] - return (u * psigma_diag) @ u.conj().T - - -def pinv(a, rtol=None): - """Compute a pseudo-inverse of a matrix.""" - u, s, vh = np.linalg.svd(a, full_matrices=False) - del a - maxS = np.max(s) - if rtol is None: - rtol = max(vh.shape + u.shape) * np.finfo(u.dtype).eps - rank = np.sum(s > maxS * rtol) - u = u[:, :rank] - u /= s[:rank] - return (u @ vh[:rank]).conj().T +# workaround: Numpy won't allow to read from file-like objects with numpy.fromfile, +# we try to use numpy.fromfile, if a blob is used we use numpy.frombuffer to read +# from the file-like object. +def read_from_file_or_buffer( + file: str | bytes | os.PathLike | io.IOBase, + dtype: numpy.typing.DTypeLike = float, + count: int = -1, +): + """numpy.fromfile() wrapper, handling io.BytesIO file-like streams. + + Numpy requires open files to be actual files on disk, i.e., must support + file.fileno(), so it fails with file-like streams such as io.BytesIO(). + + If numpy.fromfile() fails due to no file.fileno() support, this wrapper + reads the required bytes from file and redirects the call to + numpy.frombuffer(). + + See https://github.com/numpy/numpy/issues/2230#issuecomment-949795210 + """ + try: + return np.fromfile(file, dtype=dtype, count=count) + except io.UnsupportedOperation as e: + if not (e.args and e.args[0] == "fileno" and isinstance(file, io.IOBase)): + raise # Nothing I can do about it + dtype = np.dtype(dtype) + buffer = file.read(dtype.itemsize * count) + return np.frombuffer(buffer, dtype=dtype, count=count) diff --git a/mne/forward/__init__.py b/mne/forward/__init__.py index 83788b8f706..3dcafb625de 100644 --- a/mne/forward/__init__.py +++ b/mne/forward/__init__.py @@ -1,22 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Forward modeling code.""" +import lazy_loader as lazy # for testing purposes -from .forward import (Forward, read_forward_solution, write_forward_solution, - is_fixed_orient, _read_forward_meas_info, - _select_orient_forward, - compute_orient_prior, compute_depth_prior, - apply_forward, apply_forward_raw, - restrict_forward_to_stc, restrict_forward_to_label, - average_forward_solutions, _stc_src_sel, - _fill_measurement_info, _apply_forward, - _subject_from_forward, convert_forward_solution, - _merge_fwds, _do_forward_solution) -from ._make_forward import (make_forward_solution, _prepare_for_forward, - _prep_meg_channels, _prep_eeg_channels, - _to_forward_dict, _create_meg_coils, - _read_coil_defs, _transform_orig_meg_coils, - make_forward_dipole, use_coil_def) -from ._compute_forward import (_magnetic_dipole_field_vec, _compute_forwards, - _concatenate_coils) -from ._field_interpolation import (_make_surface_mapping, make_field_map, - _as_meg_type_inst, _map_meg_or_eeg_channels) -from . import _lead_dots # for testing purposes +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/forward/__init__.pyi b/mne/forward/__init__.pyi new file mode 100644 index 00000000000..dcac0d30881 --- /dev/null +++ b/mne/forward/__init__.pyi @@ -0,0 +1,86 @@ +__all__ = [ + "Forward", + "_apply_forward", + "_as_meg_type_inst", + "_compute_forwards", + "_concatenate_coils", + "_create_meg_coils", + "_do_forward_solution", + "_fill_measurement_info", + "_lead_dots", + "_magnetic_dipole_field_vec", + "_make_surface_mapping", + "_map_meg_or_eeg_channels", + "_merge_fwds", + "_prep_eeg_channels", + "_prep_meg_channels", + "_prepare_for_forward", + "_read_coil_defs", + "_read_forward_meas_info", + "_select_orient_forward", + "_stc_src_sel", + "_subject_from_forward", + "_to_forward_dict", + "_transform_orig_meg_coils", + "apply_forward", + "apply_forward_raw", + "average_forward_solutions", + "compute_depth_prior", + "compute_orient_prior", + "convert_forward_solution", + "is_fixed_orient", + "make_field_map", + "make_forward_dipole", + "make_forward_solution", + "read_forward_solution", + "restrict_forward_to_label", + "restrict_forward_to_stc", + "use_coil_def", + "write_forward_solution", +] +from . import _lead_dots +from ._compute_forward import ( + _compute_forwards, + _concatenate_coils, + _magnetic_dipole_field_vec, +) +from ._field_interpolation import ( + _as_meg_type_inst, + _make_surface_mapping, + _map_meg_or_eeg_channels, + make_field_map, +) +from ._make_forward import ( + _create_meg_coils, + _prep_eeg_channels, + _prep_meg_channels, + _prepare_for_forward, + _read_coil_defs, + _to_forward_dict, + _transform_orig_meg_coils, + make_forward_dipole, + make_forward_solution, + use_coil_def, +) +from .forward import ( + Forward, + _apply_forward, + _do_forward_solution, + _fill_measurement_info, + _merge_fwds, + _read_forward_meas_info, + _select_orient_forward, + _stc_src_sel, + _subject_from_forward, + apply_forward, + apply_forward_raw, + average_forward_solutions, + compute_depth_prior, + compute_orient_prior, + convert_forward_solution, + is_fixed_orient, + read_forward_solution, + restrict_forward_to_label, + restrict_forward_to_stc, + write_forward_solution, +) diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py index 7ceec0b22ad..8ba15def389 100644 --- a/mne/forward/_compute_forward.py +++ b/mne/forward/_compute_forward.py @@ -1,11 +1,6 @@ -# -*- coding: utf-8 -*- -# Authors: Matti Hämäläinen -# Alexandre Gramfort -# Martin Luessi -# Eric Larson -# Mark Wronkiewicz -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # The computations in this code were primarily derived from Matti Hämäläinen's # C code. @@ -16,36 +11,38 @@ # 2) EEG and MEG: forward solutions for inverse methods. Mosher, Leahy, and # Lewis, 1999. Generalized discussion of forward solutions. -import numpy as np from copy import deepcopy -from ..fixes import jit, bincount -from ..io.constants import FIFF +import numpy as np + +from .._fiff.constants import FIFF +from ..bem import _import_openmeeg, _make_openmeeg_geometry +from ..fixes import bincount, jit from ..parallel import parallel_func -from ..surface import _project_onto_surface, _jit_cross +from ..surface import _jit_cross, _project_onto_surface from ..transforms import apply_trans, invert_transform -from ..utils import logger, verbose, _pl, warn, fill_doc, _check_option -from ..bem import _make_openmeeg_geometry, _import_openmeeg - +from ..utils import _check_option, _pl, fill_doc, logger, verbose, warn # ############################################################################# # COIL SPECIFICATION AND FIELD COMPUTATION MATRIX + def _dup_coil_set(coils, coord_frame, t): """Make a duplicate.""" - if t is not None and coord_frame != t['from']: - raise RuntimeError('transformation frame does not match the coil set') + if t is not None and coord_frame != t["from"]: + raise RuntimeError("transformation frame does not match the coil set") coils = deepcopy(coils) if t is not None: - coord_frame = t['to'] + coord_frame = t["to"] for coil in coils: - for key in ('ex', 'ey', 'ez'): + assert isinstance(coil, dict), f"Coil must be a dict, got {type(coil)}" + for key in ("ex", "ey", "ez"): if key in coil: - coil[key] = apply_trans(t['trans'], coil[key], False) - coil['r0'] = apply_trans(t['trans'], coil['r0']) - coil['rmag'] = apply_trans(t['trans'], coil['rmag']) - coil['cosmag'] = apply_trans(t['trans'], coil['cosmag'], False) - coil['coord_frame'] = t['to'] + coil[key] = apply_trans(t["trans"], coil[key], False) + coil["r0"] = apply_trans(t["trans"], coil["r0"]) + coil["rmag"] = apply_trans(t["trans"], coil["rmag"]) + coil["cosmag"] = apply_trans(t["trans"], coil["cosmag"], False) + coil["coord_frame"] = t["to"] return coils, coord_frame @@ -54,10 +51,9 @@ def _check_coil_frame(coils, coord_frame, bem): if coord_frame != FIFF.FIFFV_COORD_MRI: if coord_frame == FIFF.FIFFV_COORD_HEAD: # Make a transformed duplicate - coils, coord_Frame = _dup_coil_set(coils, coord_frame, - bem['head_mri_t']) + coils, coord_frame = _dup_coil_set(coils, coord_frame, bem["head_mri_t"]) else: - raise RuntimeError('Bad coil coordinate frame %s' % coord_frame) + raise RuntimeError(f"Bad coil coordinate frame {coord_frame}") return coils, coord_frame @@ -89,12 +85,17 @@ def _lin_field_coeff(surf, mult, rmags, cosmags, ws, bins, n_jobs): (?) """ parallel, p_fun, n_jobs = parallel_func( - _do_lin_field_coeff, n_jobs, max_jobs=len(surf['tris'])) + _do_lin_field_coeff, n_jobs, max_jobs=len(surf["tris"]) + ) nas = np.array_split - coeffs = parallel(p_fun(surf['rr'], t, tn, ta, rmags, cosmags, ws, bins) - for t, tn, ta in zip(nas(surf['tris'], n_jobs), - nas(surf['tri_nn'], n_jobs), - nas(surf['tri_area'], n_jobs))) + coeffs = parallel( + p_fun(surf["rr"], t, tn, ta, rmags, cosmags, ws, bins) + for t, tn, ta in zip( + nas(surf["tris"], n_jobs), + nas(surf["tri_nn"], n_jobs), + nas(surf["tri_area"], n_jobs), + ) + ) return mult * np.sum(coeffs, axis=0) @@ -155,22 +156,21 @@ def _do_lin_field_coeff(bem_rr, tris, tn, ta, rmags, cosmags, ws, bins): for ti in range(3): x = np.sum(c[:, ti], axis=-1) x /= den[:, tri[ti]] / tri_area - coeff[:, tri[ti]] += \ - bincount(bins, weights=x, minlength=bins[-1] + 1) + coeff[:, tri[ti]] += bincount(bins, weights=x, minlength=bins[-1] + 1) return coeff def _concatenate_coils(coils): """Concatenate MEG coil parameters.""" - rmags = np.concatenate([coil['rmag'] for coil in coils]) - cosmags = np.concatenate([coil['cosmag'] for coil in coils]) - ws = np.concatenate([coil['w'] for coil in coils]) - n_int = np.array([len(coil['rmag']) for coil in coils]) + rmags = np.concatenate([coil["rmag"] for coil in coils]) + cosmags = np.concatenate([coil["cosmag"] for coil in coils]) + ws = np.concatenate([coil["w"] for coil in coils]) + n_int = np.array([len(coil["rmag"]) for coil in coils]) if n_int[-1] == 0: # We assume each sensor has at least one integration point, # which should be a safe assumption. But let's check it here, since # our code elsewhere relies on bins[-1] + 1 being the number of sensors - raise RuntimeError('not supported') + raise RuntimeError("not supported") bins = np.repeat(np.arange(len(n_int)), n_int) return rmags, cosmags, ws, bins @@ -209,8 +209,8 @@ def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs): # Process each of the surfaces rmags, cosmags, ws, bins = _triage_coils(coils) del coils - lens = np.cumsum(np.r_[0, [len(s['rr']) for s in bem['surfs']]]) - sol = np.zeros((bins[-1] + 1, bem['solution'].shape[1])) + lens = np.cumsum(np.r_[0, [len(s["rr"]) for s in bem["surfs"]]]) + sol = np.zeros((bins[-1] + 1, bem["solution"].shape[1])) lims = np.concatenate([np.arange(0, sol.shape[0], 100), [sol.shape[0]]]) # Put through the bem (in channel-based chunks to save memory) @@ -218,10 +218,11 @@ def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs): mask = np.logical_and(bins >= start, bins < stop) r, c, w, b = rmags[mask], cosmags[mask], ws[mask], bins[mask] - start # Compute coeffs for each surface, one at a time - for o1, o2, surf, mult in zip(lens[:-1], lens[1:], - bem['surfs'], bem['field_mult']): + for o1, o2, surf, mult in zip( + lens[:-1], lens[1:], bem["surfs"], bem["field_mult"] + ): coeff = _lin_field_coeff(surf, mult, r, c, w, b, n_jobs) - sol[start:stop] += np.dot(coeff, bem['solution'][o1:o2]) + sol[start:stop] += np.dot(coeff, bem["solution"][o1:o2]) sol *= mults return sol @@ -243,20 +244,22 @@ def _bem_specify_els(bem, els, mults): sol : ndarray, shape (n_EEG_sensors, n_BEM_vertices) EEG solution """ - sol = np.zeros((len(els), bem['solution'].shape[1])) - scalp = bem['surfs'][0] + sol = np.zeros((len(els), bem["solution"].shape[1])) + scalp = bem["surfs"][0] # Operate on all integration points for all electrodes (in MRI coords) - rrs = np.concatenate([apply_trans(bem['head_mri_t']['trans'], el['rmag']) - for el in els], axis=0) - ws = np.concatenate([el['w'] for el in els]) + rrs = np.concatenate( + [apply_trans(bem["head_mri_t"]["trans"], el["rmag"]) for el in els], axis=0 + ) + ws = np.concatenate([el["w"] for el in els]) tri_weights, tri_idx = _project_onto_surface(rrs, scalp) tri_weights *= ws[:, np.newaxis] - weights = np.matmul(tri_weights[:, np.newaxis], - bem['solution'][scalp['tris'][tri_idx]])[:, 0] + weights = np.matmul( + tri_weights[:, np.newaxis], bem["solution"][scalp["tris"][tri_idx]] + )[:, 0] # there are way more vertices than electrodes generally, so let's iterate # over the electrodes - edges = np.concatenate([[0], np.cumsum([len(el['w']) for el in els])]) + edges = np.concatenate([[0], np.cumsum([len(el["w"]) for el in els])]) for ii, (start, stop) in enumerate(zip(edges[:-1], edges[1:])): sol[ii] = weights[start:stop].sum(0) sol *= mults @@ -303,7 +306,7 @@ def _bem_inf_pots(mri_rr, bem_rr, mri_Q=None): this_diff = bem_rr - rr diff_norm = np.sum(this_diff * this_diff, axis=1) diff_norm *= np.sqrt(diff_norm) - diff_norm[diff_norm == 0] = 1. + diff_norm[diff_norm == 0] = 1.0 if mri_Q is not None: this_diff = np.dot(this_diff, mri_Q.T) this_diff /= diff_norm.reshape(-1, 1) @@ -311,6 +314,7 @@ def _bem_inf_pots(mri_rr, bem_rr, mri_Q=None): return diff + # This function has been refactored to process all points simultaneously # def _bem_inf_field(rd, Q, rp, d): # """Infinite-medium magnetic field. See (7) in Mosher, 1999""" @@ -371,8 +375,7 @@ def _bem_inf_fields(rr, rmag, cosmag): @fill_doc -def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, - coil_type): +def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, coil_type): """Calculate the magnetic field or electric potential forward solution. The code is very similar between EEG and MEG potentials, so combine them. @@ -405,22 +408,25 @@ def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, # Both MEG and EEG have the inifinite-medium potentials # This could be just vectorized, but eats too much memory, so instead we # reduce memory by chunking within _do_inf_pots and parallelize, too: - parallel, p_fun, n_jobs = parallel_func( - _do_inf_pots, n_jobs, max_jobs=len(rr)) + parallel, p_fun, n_jobs = parallel_func(_do_inf_pots, n_jobs, max_jobs=len(rr)) nas = np.array_split - B = np.sum(parallel(p_fun(mri_rr, sr.copy(), np.ascontiguousarray(mri_Q), - np.array(sol)) # copy and contig - for sr, sol in zip(nas(bem_rr, n_jobs), - nas(solution.T, n_jobs))), axis=0) + B = np.sum( + parallel( + p_fun( + mri_rr, sr.copy(), np.ascontiguousarray(mri_Q), np.array(sol) + ) # copy and contig + for sr, sol in zip(nas(bem_rr, n_jobs), nas(solution.T, n_jobs)) + ), + axis=0, + ) # The copy()s above should make it so the whole objects don't need to be # pickled... # Only MEG coils are sensitive to the primary current distribution. - if coil_type == 'meg': + if coil_type == "meg": # Primary current contribution (can be calc. in coil/dipole coords) parallel, p_fun, n_jobs = parallel_func(_do_prim_curr, n_jobs) - pcc = np.concatenate(parallel(p_fun(r, coils) - for r in nas(rr, n_jobs)), axis=0) + pcc = np.concatenate(parallel(p_fun(r, coils) for r in nas(rr, n_jobs)), axis=0) B += pcc B *= _MAG_FACTOR return B @@ -452,8 +458,9 @@ def _do_prim_curr(rr, coils): pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) pp *= ws pp.shape = (3 * (stop - start), -1) - pc[3 * start:3 * stop] = [bincount(bins, this_pp, bins[-1] + 1) - for this_pp in pp] + pc[3 * start : 3 * stop] = [ + bincount(bins, this_pp, bins[-1] + 1) for this_pp in pp + ] return pc @@ -494,21 +501,21 @@ def _do_inf_pots(mri_rr, bem_rr, mri_Q, sol): # v0 in Hämäläinen et al., 1989 == v_inf in Mosher, et al., 1999 v0s = _bem_inf_pots(mri_rr[start:stop], bem_rr, mri_Q) v0s = v0s.reshape(-1, v0s.shape[2]) - B[3 * start:3 * stop] = np.dot(v0s, sol) + B[3 * start : 3 * stop] = np.dot(v0s, sol) return B # ############################################################################# # SPHERE COMPUTATION -def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, - n_jobs, coil_type): + +def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, coil_type): """Do potential or field for spherical model.""" - fun = _eeg_spherepot_coil if coil_type == 'eeg' else _sphere_field - parallel, p_fun, n_jobs = parallel_func( - fun, n_jobs, max_jobs=len(rr)) - B = np.concatenate(parallel(p_fun(r, coils, sphere=solution) - for r in np.array_split(rr, n_jobs))) + fun = _eeg_spherepot_coil if coil_type == "eeg" else _sphere_field + parallel, p_fun, n_jobs = parallel_func(fun, n_jobs, max_jobs=len(rr)) + B = np.concatenate( + parallel(p_fun(r, coils, sphere=solution) for r in np.array_split(rr, n_jobs)) + ) return B @@ -522,7 +529,7 @@ def _sphere_field(rrs, coils, sphere): by Matti Hämäläinen, February 1990 """ rmags, cosmags, ws, bins = _triage_coils(coils) - return _do_sphere_field(rrs, rmags, cosmags, ws, bins, sphere['r0']) + return _do_sphere_field(rrs, rmags, cosmags, ws, bins, sphere["r0"]) @jit() @@ -558,8 +565,9 @@ def _do_sphere_field(rrs, rmags, cosmags, ws, bins, r0): _jit_cross(v1, rr_, cosmags) v2 = np.empty((cosmags.shape[0], 3)) _jit_cross(v2, rr_, this_poss) - xx = ((good * ws).reshape(-1, 1) * - (v1 / F.reshape(-1, 1) + v2 * g.reshape(-1, 1))) + xx = (good * ws).reshape(-1, 1) * ( + v1 / F.reshape(-1, 1) + v2 * g.reshape(-1, 1) + ) for jj in range(3): zz = bincount(bins, xx[:, jj], n_coils) B[3 * ri + jj, :] = zz @@ -574,24 +582,24 @@ def _eeg_spherepot_coil(rrs, coils, sphere): del coils # Shift to the sphere model coordinates - rrs = rrs - sphere['r0'] + rrs = rrs - sphere["r0"] B = np.zeros((3 * len(rrs), n_coils)) for ri, rr in enumerate(rrs): # Only process dipoles inside the innermost sphere - if np.sqrt(np.dot(rr, rr)) >= sphere['layers'][0]['rad']: + if np.sqrt(np.dot(rr, rr)) >= sphere["layers"][0]["rad"]: continue # fwd_eeg_spherepot_vec vval_one = np.zeros((len(rmags), 3)) # Make a weighted sum over the equivalence parameters - for eq in range(sphere['nfit']): + for eq in range(sphere["nfit"]): # Scale the dipole position - rd = sphere['mu'][eq] * rr + rd = sphere["mu"][eq] * rr rd2 = np.sum(rd * rd) rd2_inv = 1.0 / rd2 # Go over all electrodes - this_pos = rmags - sphere['r0'] + this_pos = rmags - sphere["r0"] # Scale location onto the surface of the sphere (not used) # if sphere['scale_pos']: @@ -617,17 +625,19 @@ def _eeg_spherepot_coil(rrs, coils, sphere): c2 = a3 + (a + r) / (r * F) # Mix them together and scale by lambda/(rd*rd) - m1 = (c1 - c2 * rrd) + m1 = c1 - c2 * rrd m2 = c2 * rd2 - vval_one += (sphere['lambda'][eq] * rd2_inv * - (m1[:, np.newaxis] * rd + - m2[:, np.newaxis] * this_pos)) + vval_one += ( + sphere["lambda"][eq] + * rd2_inv + * (m1[:, np.newaxis] * rd + m2[:, np.newaxis] * this_pos) + ) # compute total result xx = vval_one * ws[:, np.newaxis] zz = np.array([bincount(bins, x, bins[-1] + 1) for x in xx.T]) - B[3 * ri:3 * ri + 3, :] = zz + B[3 * ri : 3 * ri + 3, :] = zz # finishing by scaling by 1/(4*M_PI) B *= 0.25 / np.pi return B @@ -643,14 +653,14 @@ def _triage_coils(coils): _MIN_DIST_LIMIT = 1e-5 -def _magnetic_dipole_field_vec(rrs, coils, too_close='raise'): +def _magnetic_dipole_field_vec(rrs, coils, too_close="raise"): rmags, cosmags, ws, bins = _triage_coils(coils) fwd, min_dist = _compute_mdfv(rrs, rmags, cosmags, ws, bins, too_close) if min_dist < _MIN_DIST_LIMIT: - msg = 'Coil too close (dist = %g mm)' % (min_dist * 1000,) - if too_close == 'raise': + msg = f"Coil too close (dist = {min_dist * 1000:g} mm)" + if too_close == "raise": raise RuntimeError(msg) - func = warn if too_close == 'warning' else logger.info + func = warn if too_close == "warning" else logger.info func(msg) return fwd @@ -683,7 +693,7 @@ def _compute_mdfv(rrs, rmags, cosmags, ws, bins, too_close): dist2 = dist2_.reshape(-1, 1) dist = np.sqrt(dist2) min_dist = min(dist.min(), min_dist) - if min_dist < _MIN_DIST_LIMIT and too_close == 'raise': + if min_dist < _MIN_DIST_LIMIT and too_close == "raise": break t_ = np.sum(diff * cosmags, axis=1) t = t_.reshape(-1, 1) @@ -697,8 +707,9 @@ def _compute_mdfv(rrs, rmags, cosmags, ws, bins, too_close): # ############################################################################# # MAIN TRIAGING FUNCTION + @verbose -def _prep_field_computation(rr, *, sensors, bem, n_jobs, verbose=None): +def _prep_field_computation(*, sensors, bem, n_jobs, verbose=None): """Precompute and store some things that are used for both MEG and EEG. Calculation includes multiplication factors, coordinate transforms, @@ -718,44 +729,47 @@ def _prep_field_computation(rr, *, sensors, bem, n_jobs, verbose=None): %(verbose)s """ bem_rr = mults = mri_Q = head_mri_t = None - if not bem['is_sphere']: - if bem['bem_method'] != FIFF.FIFFV_BEM_APPROX_LINEAR: - raise RuntimeError('only linear collocation supported') + if not bem["is_sphere"]: + if bem["bem_method"] != FIFF.FIFFV_BEM_APPROX_LINEAR: + raise RuntimeError("only linear collocation supported") # Store (and apply soon) μ_0/(4π) factor before source computations - mults = np.repeat(bem['source_mult'] / (4.0 * np.pi), - [len(s['rr']) for s in bem['surfs']])[np.newaxis, :] + mults = np.repeat( + bem["source_mult"] / (4.0 * np.pi), [len(s["rr"]) for s in bem["surfs"]] + )[np.newaxis, :] # Get positions of BEM points for every surface - bem_rr = np.concatenate([s['rr'] for s in bem['surfs']]) + bem_rr = np.concatenate([s["rr"] for s in bem["surfs"]]) # The dipole location and orientation must be transformed - head_mri_t = bem['head_mri_t'] - mri_Q = bem['head_mri_t']['trans'][:3, :3].T + head_mri_t = bem["head_mri_t"] + mri_Q = bem["head_mri_t"]["trans"][:3, :3].T solutions = dict() for coil_type in sensors: - coils = sensors[coil_type]['defs'] - if not bem['is_sphere']: - if coil_type == 'meg': + coils = sensors[coil_type]["defs"] + if not bem["is_sphere"]: + if coil_type == "meg": # MEG field computation matrices for BEM - start = 'Composing the field computation matrix' - logger.info('\n' + start + '...') + start = "Composing the field computation matrix" + logger.info("\n" + start + "...") cf = FIFF.FIFFV_COORD_HEAD # multiply solution by "mults" here for simplicity solution = _bem_specify_coils(bem, coils, cf, mults, n_jobs) else: # Compute solution for EEG sensor - logger.info('Setting up for EEG...') + logger.info("Setting up for EEG...") solution = _bem_specify_els(bem, coils, mults) else: solution = bem - if coil_type == 'eeg': - logger.info('Using the equivalent source approach in the ' - 'homogeneous sphere for EEG') - sensors[coil_type]['defs'] = _triage_coils(coils) + if coil_type == "eeg": + logger.info( + "Using the equivalent source approach in the " + "homogeneous sphere for EEG" + ) + sensors[coil_type]["defs"] = _triage_coils(coils) solutions[coil_type] = solution # Get appropriate forward physics function depending on sphere or BEM model - fun = _sphere_pot_or_field if bem['is_sphere'] else _bem_pot_or_field + fun = _sphere_pot_or_field if bem["is_sphere"] else _bem_pot_or_field # Update fwd_data with # bem_rr (3D BEM vertex positions) @@ -765,8 +779,8 @@ def _prep_field_computation(rr, *, sensors, bem, n_jobs, verbose=None): # solutions (len 2 list; [ndarray, shape (n_MEG_sens, n BEM vertices), # ndarray, shape (n_EEG_sens, n BEM vertices)] fwd_data = dict( - bem_rr=bem_rr, mri_Q=mri_Q, head_mri_t=head_mri_t, fun=fun, - solutions=solutions) + bem_rr=bem_rr, mri_Q=mri_Q, head_mri_t=head_mri_t, fun=fun, solutions=solutions + ) return fwd_data @@ -776,26 +790,35 @@ def _compute_forwards_meeg(rr, *, sensors, fwd_data, n_jobs, silent=False): Bs = dict() # The dipole location and orientation must be transformed to mri coords mri_rr = None - if fwd_data['head_mri_t'] is not None: - mri_rr = np.ascontiguousarray( - apply_trans(fwd_data['head_mri_t']['trans'], rr)) - mri_Q, bem_rr, fun = fwd_data['mri_Q'], fwd_data['bem_rr'], fwd_data['fun'] - solutions = fwd_data['solutions'] + if fwd_data["head_mri_t"] is not None: + mri_rr = np.ascontiguousarray(apply_trans(fwd_data["head_mri_t"]["trans"], rr)) + mri_Q, bem_rr, fun = fwd_data["mri_Q"], fwd_data["bem_rr"], fwd_data["fun"] + solutions = fwd_data["solutions"] del fwd_data + rr = np.ascontiguousarray(rr) # usually true but not guaranteed, e.g. in dipole.py for coil_type, sens in sensors.items(): - coils = sens['defs'] - compensator = sens.get('compensator', None) - post_picks = sens.get('post_picks', None) + coils = sens["defs"] + compensator = sens.get("compensator", None) + post_picks = sens.get("post_picks", None) solution = solutions.get(coil_type, None) # Do the actual forward calculation for a list MEG/EEG sensors if not silent: - logger.info('Computing %s at %d source location%s ' - '(free orientations)...' - % (coil_type.upper(), len(rr), _pl(rr))) + logger.info( + f"Computing {coil_type.upper()} at {len(rr)} source location{_pl(rr)} " + "(free orientations)..." + ) # Calculate forward solution using spherical or BEM model - B = fun(rr, mri_rr, mri_Q, coils=coils, solution=solution, - bem_rr=bem_rr, n_jobs=n_jobs, coil_type=coil_type) + B = fun( + rr, + mri_rr, + mri_Q, + coils=coils, + solution=solution, + bem_rr=bem_rr, + n_jobs=n_jobs, + coil_type=coil_type, + ) # Compensate if needed (only done for MEG systems w/compensation) if compensator is not None: @@ -811,16 +834,19 @@ def _compute_forwards(rr, *, bem, sensors, n_jobs, verbose=None): """Compute the MEG and EEG forward solutions.""" # Split calculation into two steps to save (potentially) a lot of time # when e.g. dipole fitting - solver = bem.get('solver', 'mne') - _check_option('solver', solver, ('mne', 'openmeeg')) - if bem['is_sphere'] or solver == 'mne': - fwd_data = _prep_field_computation( - rr, sensors=sensors, bem=bem, n_jobs=n_jobs) + solver = bem.get("solver", "mne") + _check_option("solver", solver, ("mne", "openmeeg")) + if bem["is_sphere"] or solver == "mne": + # This modifies "sensors" in place, so let's copy it in case the calling + # function needs to reuse it (e.g., in simulate_raw.py) + sensors = deepcopy(sensors) + fwd_data = _prep_field_computation(sensors=sensors, bem=bem, n_jobs=n_jobs) Bs = _compute_forwards_meeg( - rr, sensors=sensors, fwd_data=fwd_data, n_jobs=n_jobs) + rr, sensors=sensors, fwd_data=fwd_data, n_jobs=n_jobs + ) else: Bs = _compute_forwards_openmeeg(rr, bem=bem, sensors=sensors) - n_sensors_want = sum(len(s['ch_names']) for s in sensors.values()) + n_sensors_want = sum(len(s["ch_names"]) for s in sensors.values()) n_sensors = sum(B.shape[1] for B in Bs.values()) n_sources = list(Bs.values())[0].shape[0] assert (n_sources, n_sensors) == (len(rr) * 3, n_sensors_want) @@ -831,30 +857,30 @@ def _compute_forwards_openmeeg(rr, *, bem, sensors): """Compute the MEG and EEG forward solutions for OpenMEEG.""" if len(bem["surfs"]) != 3: raise RuntimeError("Only 3-layer BEM is supported for OpenMEEG.") - om = _import_openmeeg('compute a forward solution using OpenMEEG') + om = _import_openmeeg("compute a forward solution using OpenMEEG") hminv = om.SymMatrix(bem["solution"]) - geom = _make_openmeeg_geometry(bem, invert_transform(bem['head_mri_t'])) + geom = _make_openmeeg_geometry(bem, invert_transform(bem["head_mri_t"])) # Make dipoles for all XYZ orientations dipoles = np.c_[ np.kron(rr.T, np.ones(3)[None, :]).T, - np.kron(np.ones(len(rr))[:, None], - np.eye(3)), + np.kron(np.ones(len(rr))[:, None], np.eye(3)), ] dipoles = np.asfortranarray(dipoles) dipoles = om.Matrix(dipoles) dsm = om.DipSourceMat(geom, dipoles, "Brain") Bs = dict() - if 'eeg' in sensors: - rmags, _, ws, bins = _concatenate_coils(sensors['eeg']['defs']) + if "eeg" in sensors: + rmags, _, ws, bins = _concatenate_coils(sensors["eeg"]["defs"]) rmags = np.asfortranarray(rmags.astype(np.float64)) eeg_sensors = om.Sensors(om.Matrix(np.asfortranarray(rmags)), geom) h2em = om.Head2EEGMat(geom, eeg_sensors) eeg_fwd_full = om.GainEEG(hminv, dsm, h2em).array() - Bs['eeg'] = np.array([bincount(bins, ws * x, bins[-1] + 1) - for x in eeg_fwd_full.T], float) - if 'meg' in sensors: - rmags, cosmags, ws, bins = _concatenate_coils(sensors['meg']['defs']) + Bs["eeg"] = np.array( + [bincount(bins, ws * x, bins[-1] + 1) for x in eeg_fwd_full.T], float + ) + if "meg" in sensors: + rmags, cosmags, ws, bins = _concatenate_coils(sensors["meg"]["defs"]) rmags = np.asfortranarray(rmags.astype(np.float64)) cosmags = np.asfortranarray(cosmags.astype(np.float64)) labels = [str(ii) for ii in range(len(rmags))] @@ -863,13 +889,14 @@ def _compute_forwards_openmeeg(rr, *, bem, sensors): h2mm = om.Head2MEGMat(geom, meg_sensors) ds2mm = om.DipSource2MEGMat(dipoles, meg_sensors) meg_fwd_full = om.GainMEG(hminv, dsm, h2mm, ds2mm).array() - B = np.array([bincount(bins, ws * x, bins[-1] + 1) - for x in meg_fwd_full.T], float) - compensator = sensors['meg'].get('compensator', None) - post_picks = sensors['meg'].get('post_picks', None) + B = np.array( + [bincount(bins, ws * x, bins[-1] + 1) for x in meg_fwd_full.T], float + ) + compensator = sensors["meg"].get("compensator", None) + post_picks = sensors["meg"].get("post_picks", None) if compensator is not None: B = B @ compensator.T if post_picks is not None: B = B[:, post_picks] - Bs['meg'] = B + Bs["meg"] = B return Bs diff --git a/mne/forward/_field_interpolation.py b/mne/forward/_field_interpolation.py index 2d2c0a6e615..8695154e1cc 100644 --- a/mne/forward/_field_interpolation.py +++ b/mne/forward/_field_interpolation.py @@ -1,39 +1,42 @@ -# -*- coding: utf-8 -*- -# Authors: Matti Hämäläinen -# Alexandre Gramfort -# Eric Larson +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. # The computations in this code were primarily derived from Matti Hämäläinen's # C code. +import inspect from copy import deepcopy import numpy as np +from scipy.interpolate import interp1d +from .._fiff.constants import FIFF +from .._fiff.meas_info import _simplify_info +from .._fiff.pick import pick_info, pick_types +from .._fiff.proj import _has_eeg_average_ref_proj, make_projector from ..bem import _check_origin from ..cov import make_ad_hoc_cov -from ..io.constants import FIFF -from ..io.pick import pick_types, pick_info -from ..io.meas_info import _simplify_info -from ..io.proj import _has_eeg_average_ref_proj, make_projector +from ..epochs import BaseEpochs, EpochsArray +from ..evoked import Evoked, EvokedArray +from ..fixes import _safe_svd from ..surface import get_head_surf, get_meg_helmet_surf -from ..transforms import transform_surface_to, _find_trans, _get_trans -from ._make_forward import _create_meg_coils, _create_eeg_els, _read_coil_defs -from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table, - _do_cross_dots) -from ..utils import ( - logger, verbose, _check_option, _reg_pinv, _pl, _check_fname +from ..transforms import _find_trans, transform_surface_to +from ..utils import _check_fname, _check_option, _pl, _reg_pinv, logger, verbose, warn +from ._lead_dots import ( + _do_cross_dots, + _do_self_dots, + _do_surface_dots, + _get_legen_table, ) -from ..epochs import EpochsArray, BaseEpochs -from ..evoked import Evoked, EvokedArray +from ._make_forward import _create_eeg_els, _create_meg_coils, _read_coil_defs def _setup_dots(mode, info, coils, ch_type): """Set up dot products.""" - from scipy.interpolate import interp1d int_rad = 0.06 noise = make_ad_hoc_cov(info, dict(mag=20e-15, grad=5e-13, eeg=1e-6)) - n_coeff, interp = (50, 'nearest') if mode == 'fast' else (100, 'linear') + n_coeff, interp = (50, "nearest") if mode == "fast" else (100, "linear") lut, n_fact = _get_legen_table(ch_type, False, n_coeff, verbose=False) lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp, axis=0) return int_rad, noise, lut_fun, n_fact @@ -41,27 +44,27 @@ def _setup_dots(mode, info, coils, ch_type): def _compute_mapping_matrix(fmd, info): """Do the hairy computations.""" - logger.info(' Preparing the mapping matrix...') + logger.info(" Preparing the mapping matrix...") # assemble a projector and apply it to the data - ch_names = fmd['ch_names'] - projs = info.get('projs', list()) + ch_names = fmd["ch_names"] + projs = info.get("projs", list()) proj_op = make_projector(projs, ch_names)[0] - proj_dots = np.dot(proj_op.T, np.dot(fmd['self_dots'], proj_op)) + proj_dots = np.dot(proj_op.T, np.dot(fmd["self_dots"], proj_op)) - noise_cov = fmd['noise'] + noise_cov = fmd["noise"] # Whiten - if not noise_cov['diag']: + if not noise_cov["diag"]: raise NotImplementedError # this shouldn't happen - whitener = np.diag(1.0 / np.sqrt(noise_cov['data'].ravel())) + whitener = np.diag(1.0 / np.sqrt(noise_cov["data"].ravel())) whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener)) # SVD is numerically better than the eigenvalue composition even if # mat is supposed to be symmetric and positive definite - if fmd.get('pinv_method', 'tsvd') == 'tsvd': - inv, fmd['nest'] = _pinv_trunc(whitened_dots, fmd['miss']) + if fmd.get("pinv_method", "tsvd") == "tsvd": + inv, fmd["nest"] = _pinv_trunc(whitened_dots, fmd["miss"]) else: - assert fmd['pinv_method'] == 'tikhonov', fmd['pinv_method'] - inv, fmd['nest'] = _pinv_tikhonov(whitened_dots, fmd['miss']) + assert fmd["pinv_method"] == "tikhonov", fmd["pinv_method"] + inv, fmd["nest"] = _pinv_tikhonov(whitened_dots, fmd["miss"]) # Sandwich with the whitener inv_whitened = np.dot(whitener.T, np.dot(inv, whitener)) @@ -72,29 +75,34 @@ def _compute_mapping_matrix(fmd, info): # Finally sandwich in the selection matrix # This one picks up the correct lead field projection - mapping_mat = np.dot(fmd['surface_dots'], inv_whitened_proj) + mapping_mat = np.dot(fmd["surface_dots"], inv_whitened_proj) # Optionally apply the average electrode reference to the final field map - if fmd['kind'] == 'eeg' and _has_eeg_average_ref_proj(info): + if fmd["kind"] == "eeg" and _has_eeg_average_ref_proj(info): logger.info( - ' The map has an average electrode reference ' - f'({mapping_mat.shape[0]} channels)') + " The map has an average electrode reference " + f"({mapping_mat.shape[0]} channels)" + ) mapping_mat -= np.mean(mapping_mat, axis=0) return mapping_mat def _pinv_trunc(x, miss): """Compute pseudoinverse, truncating at most "miss" fraction of varexp.""" - from scipy import linalg - u, s, v = linalg.svd(x, full_matrices=False) + u, s, v = _safe_svd(x, full_matrices=False) # Eigenvalue truncation varexp = np.cumsum(s) varexp /= varexp[-1] n = np.where(varexp >= (1.0 - miss))[0][0] + 1 - logger.info(' Truncating at %d/%d components to omit less than %g ' - '(%0.2g)' % (n, len(s), miss, 1. - varexp[n - 1])) - s = 1. / s[:n] + logger.info( + " Truncating at %d/%d components to omit less than %g (%0.2g)", + n, + len(s), + miss, + 1.0 - varexp[n - 1], + ) + s = 1.0 / s[:n] inv = ((u[:, :n] * s) @ v[:n]).T return inv, n @@ -102,12 +110,13 @@ def _pinv_trunc(x, miss): def _pinv_tikhonov(x, reg): # _reg_pinv requires square Hermitian, which we have here inv, _, n = _reg_pinv(x, reg=reg, rank=None) - logger.info(f' Truncating at {n}/{len(x)} components and regularizing ' - f'with α={reg:0.1e}') + logger.info( + f" Truncating at {n}/{len(x)} components and regularizing with α={reg:0.1e}" + ) return inv, n -def _map_meg_or_eeg_channels(info_from, info_to, mode, origin, miss=None): +def _map_meg_or_eeg_channels(info_from, info_to, mode, *, origin, miss=None): """Find mapping from one set of channels to another. Parameters @@ -123,67 +132,87 @@ def _map_meg_or_eeg_channels(info_from, info_to, mode, origin, miss=None): origin : array-like, shape (3,) | str Origin of the sphere in the head coordinate frame and in meters. Can be ``'auto'``, which means a head-digitization-based origin - fit. Default is ``(0., 0., 0.04)``. + fit. Returns ------- mapping : array, shape (n_to, n_from) A mapping matrix. """ + assert origin is not None # should be assured elsewhere + # no need to apply trans because both from and to coils are in device # coordinates - info_kinds = set(ch['kind'] for ch in info_to['chs']) - info_kinds |= set(ch['kind'] for ch in info_from['chs']) + info_kinds = set(ch["kind"] for ch in info_to["chs"]) + info_kinds |= set(ch["kind"] for ch in info_from["chs"]) if FIFF.FIFFV_REF_MEG_CH in info_kinds: # refs same as MEG info_kinds |= set([FIFF.FIFFV_MEG_CH]) info_kinds -= set([FIFF.FIFFV_REF_MEG_CH]) info_kinds = sorted(info_kinds) # This should be guaranteed by the callers - assert (len(info_kinds) == 1 and info_kinds[0] in ( - FIFF.FIFFV_MEG_CH, FIFF.FIFFV_EEG_CH)) - kind = 'eeg' if info_kinds[0] == FIFF.FIFFV_EEG_CH else 'meg' + assert len(info_kinds) == 1 and info_kinds[0] in ( + FIFF.FIFFV_MEG_CH, + FIFF.FIFFV_EEG_CH, + ) + kind = "eeg" if info_kinds[0] == FIFF.FIFFV_EEG_CH else "meg" # # Step 1. Prepare the coil definitions # - if kind == 'meg': + if kind == "meg": templates = _read_coil_defs(verbose=False) - coils_from = _create_meg_coils(info_from['chs'], 'normal', - info_from['dev_head_t'], templates) - coils_to = _create_meg_coils(info_to['chs'], 'normal', - info_to['dev_head_t'], templates) - pinv_method = 'tsvd' + coils_from = _create_meg_coils( + info_from["chs"], "normal", info_from["dev_head_t"], templates + ) + coils_to = _create_meg_coils( + info_to["chs"], "normal", info_to["dev_head_t"], templates + ) + pinv_method = "tsvd" miss = 1e-4 else: - coils_from = _create_eeg_els(info_from['chs']) - coils_to = _create_eeg_els(info_to['chs']) - pinv_method = 'tikhonov' + coils_from = _create_eeg_els(info_from["chs"]) + coils_to = _create_eeg_els(info_to["chs"]) + pinv_method = "tikhonov" miss = 1e-1 - if _has_eeg_average_ref_proj(info_from) and \ - not _has_eeg_average_ref_proj(info_to): + if _has_eeg_average_ref_proj(info_from) and not _has_eeg_average_ref_proj( + info_to + ): raise RuntimeError( - 'info_to must have an average EEG reference projector if ' - 'info_from has one') + "info_to must have an average EEG reference projector if " + "info_from has one" + ) origin = _check_origin(origin, info_from) # # Step 2. Calculate the dot products # - int_rad, noise, lut_fun, n_fact = _setup_dots( - mode, info_from, coils_from, kind) - logger.info(f' Computing dot products for {len(coils_from)} ' - f'{kind.upper()} channel{_pl(coils_from)}...') - self_dots = _do_self_dots(int_rad, False, coils_from, origin, kind, - lut_fun, n_fact, n_jobs=None) - logger.info(f' Computing cross products for {len(coils_from)} → ' - f'{len(coils_to)} {kind.upper()} channel{_pl(coils_to)}...') - cross_dots = _do_cross_dots(int_rad, False, coils_from, coils_to, - origin, kind, lut_fun, n_fact).T - - ch_names = [c['ch_name'] for c in info_from['chs']] - fmd = dict(kind=kind, ch_names=ch_names, - origin=origin, noise=noise, self_dots=self_dots, - surface_dots=cross_dots, int_rad=int_rad, miss=miss, - pinv_method=pinv_method) + int_rad, noise, lut_fun, n_fact = _setup_dots(mode, info_from, coils_from, kind) + logger.info( + f" Computing dot products for {len(coils_from)} " + f"{kind.upper()} channel{_pl(coils_from)}..." + ) + self_dots = _do_self_dots( + int_rad, False, coils_from, origin, kind, lut_fun, n_fact, n_jobs=None + ) + logger.info( + f" Computing cross products for {len(coils_from)} → " + f"{len(coils_to)} {kind.upper()} channel{_pl(coils_to)}..." + ) + cross_dots = _do_cross_dots( + int_rad, False, coils_from, coils_to, origin, kind, lut_fun, n_fact + ).T + + ch_names = [c["ch_name"] for c in info_from["chs"]] + fmd = dict( + kind=kind, + ch_names=ch_names, + origin=origin, + noise=noise, + self_dots=self_dots, + surface_dots=cross_dots, + int_rad=int_rad, + miss=miss, + pinv_method=pinv_method, + ) # # Step 3. Compute the mapping matrix @@ -192,7 +221,7 @@ def _map_meg_or_eeg_channels(info_from, info_to, mode, origin, miss=None): return mapping -def _as_meg_type_inst(inst, ch_type='grad', mode='fast'): +def _as_meg_type_inst(inst, ch_type="grad", mode="fast"): """Compute virtual evoked using interpolated fields in mag/grad channels. Parameters @@ -211,31 +240,35 @@ def _as_meg_type_inst(inst, ch_type='grad', mode='fast'): inst : instance of mne.EvokedArray or mne.EpochsArray The transformed evoked object containing only virtual channels. """ - _check_option('ch_type', ch_type, ['mag', 'grad']) + _check_option("ch_type", ch_type, ["mag", "grad"]) # pick the original and destination channels - pick_from = pick_types(inst.info, meg=True, eeg=False, - ref_meg=False) - pick_to = pick_types(inst.info, meg=ch_type, eeg=False, - ref_meg=False) + pick_from = pick_types(inst.info, meg=True, eeg=False, ref_meg=False) + pick_to = pick_types(inst.info, meg=ch_type, eeg=False, ref_meg=False) if len(pick_to) == 0: - raise ValueError('No channels matching the destination channel type' - ' found in info. Please pass an evoked containing' - 'both the original and destination channels. Only the' - ' locations of the destination channels will be used' - ' for interpolation.') + raise ValueError( + "No channels matching the destination channel type" + " found in info. Please pass an evoked containing" + "both the original and destination channels. Only the" + " locations of the destination channels will be used" + " for interpolation." + ) info_from = pick_info(inst.info, pick_from) info_to = pick_info(inst.info, pick_to) # XXX someday we should probably expose the origin mapping = _map_meg_or_eeg_channels( - info_from, info_to, origin=(0., 0., 0.04), mode=mode) + info_from, info_to, origin=(0.0, 0.0, 0.04), mode=mode + ) # compute data by multiplying by the 'gain matrix' from # original sensors to virtual sensors - if hasattr(inst, 'get_data'): - data = inst.get_data() + if hasattr(inst, "get_data"): + kwargs = dict() + if "copy" in inspect.getfullargspec(inst.get_data).kwonlyargs: + kwargs["copy"] = False + data = inst.get_data(**kwargs) else: data = inst.data @@ -243,8 +276,7 @@ def _as_meg_type_inst(inst, ch_type='grad', mode='fast'): if ndim == 2: data = data[np.newaxis, :, :] - data_ = np.empty((data.shape[0], len(mapping), data.shape[2]), - dtype=data.dtype) + data_ = np.empty((data.shape[0], len(mapping), data.shape[2]), dtype=data.dtype) for d, d_ in zip(data, data_): d_[:] = np.dot(mapping, d[pick_from]) @@ -252,28 +284,42 @@ def _as_meg_type_inst(inst, ch_type='grad', mode='fast'): info = pick_info(inst.info, sel=pick_to, copy=True) # change channel names to emphasize they contain interpolated data - for ch in info['chs']: - ch['ch_name'] += '_v' + for ch in info["chs"]: + ch["ch_name"] += "_v" info._update_redundant() info._check_consistency() if isinstance(inst, Evoked): assert ndim == 2 data_ = data_[0] # undo new axis - inst_ = EvokedArray(data_, info, tmin=inst.times[0], - comment=inst.comment, nave=inst.nave) + inst_ = EvokedArray( + data_, info, tmin=inst.times[0], comment=inst.comment, nave=inst.nave + ) else: assert isinstance(inst, BaseEpochs) - inst_ = EpochsArray(data_, info, tmin=inst.tmin, - events=inst.events, - event_id=inst.event_id, - metadata=inst.metadata) + inst_ = EpochsArray( + data_, + info, + tmin=inst.tmin, + events=inst.events, + event_id=inst.event_id, + metadata=inst.metadata, + ) return inst_ @verbose -def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast', - n_jobs=None, origin=(0., 0., 0.04), verbose=None): +def _make_surface_mapping( + info, + surf, + ch_type="meg", + trans=None, + mode="fast", + n_jobs=None, + *, + origin, + verbose=None, +): """Re-map M/EEG data to a surface. Parameters @@ -294,8 +340,6 @@ def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast', %(n_jobs)s origin : array-like, shape (3,) | str Origin of the sphere in the head coordinate frame and in meters. - The default is ``'auto'``, which means a head-digitization-based - origin fit. %(verbose)s Returns @@ -304,88 +348,111 @@ def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast', A n_vertices x n_sensors array that remaps the MEG or EEG data, as `new_data = np.dot(mapping, data)`. """ - if not all(key in surf for key in ['rr', 'nn']): + assert origin is not None # should be assured elsewhere + + if not all(key in surf for key in ["rr", "nn"]): raise KeyError('surf must have both "rr" and "nn"') - if 'coord_frame' not in surf: - raise KeyError('The surface coordinate frame must be specified ' - 'in surf["coord_frame"]') - _check_option('mode', mode, ['accurate', 'fast']) + if "coord_frame" not in surf: + raise KeyError( + 'The surface coordinate frame must be specified in surf["coord_frame"]' + ) + _check_option("mode", mode, ["accurate", "fast"]) # deal with coordinate frames here -- always go to "head" (easiest) orig_surf = surf - surf = transform_surface_to(deepcopy(surf), 'head', trans) + surf = transform_surface_to(deepcopy(surf), "head", trans) origin = _check_origin(origin, info) # # Step 1. Prepare the coil definitions # Do the dot products, assume surf in head coords # - _check_option('ch_type', ch_type, ['meg', 'eeg']) - if ch_type == 'meg': + _check_option("ch_type", ch_type, ["meg", "eeg"]) + if ch_type == "meg": picks = pick_types(info, meg=True, eeg=False, ref_meg=False) - logger.info('Prepare MEG mapping...') + logger.info("Prepare MEG mapping...") else: picks = pick_types(info, meg=False, eeg=True, ref_meg=False) - logger.info('Prepare EEG mapping...') + logger.info("Prepare EEG mapping...") if len(picks) == 0: - raise RuntimeError('cannot map, no channels found') + raise RuntimeError("cannot map, no channels found") # XXX this code does not do any checking for compensation channels, # but it seems like this must be intentional from the ref_meg=False # (presumably from the C code) - dev_head_t = info['dev_head_t'] + dev_head_t = info["dev_head_t"] info = pick_info(_simplify_info(info), picks) - info['dev_head_t'] = dev_head_t + info["dev_head_t"] = dev_head_t # create coil defs in head coordinates - if ch_type == 'meg': + if ch_type == "meg": # Put them in head coordinates - coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t']) - type_str = 'coils' + coils = _create_meg_coils(info["chs"], "normal", info["dev_head_t"]) + type_str = "coils" miss = 1e-4 # Smoothing criterion for MEG else: # EEG - coils = _create_eeg_els(info['chs']) - type_str = 'electrodes' + coils = _create_eeg_els(info["chs"]) + type_str = "electrodes" miss = 1e-3 # Smoothing criterion for EEG # # Step 2. Calculate the dot products # int_rad, noise, lut_fun, n_fact = _setup_dots(mode, info, coils, ch_type) - logger.info('Computing dot products for %i %s...' % (len(coils), type_str)) - self_dots = _do_self_dots(int_rad, False, coils, origin, ch_type, - lut_fun, n_fact, n_jobs) - sel = np.arange(len(surf['rr'])) # eventually we should do sub-selection - logger.info('Computing dot products for %i surface locations...' - % len(sel)) - surface_dots = _do_surface_dots(int_rad, False, coils, surf, sel, - origin, ch_type, lut_fun, n_fact, - n_jobs) + logger.info("Computing dot products for %i %s...", len(coils), type_str) + self_dots = _do_self_dots( + int_rad, False, coils, origin, ch_type, lut_fun, n_fact, n_jobs + ) + sel = np.arange(len(surf["rr"])) # eventually we should do sub-selection + logger.info("Computing dot products for %i surface locations...", len(sel)) + surface_dots = _do_surface_dots( + int_rad, False, coils, surf, sel, origin, ch_type, lut_fun, n_fact, n_jobs + ) # # Step 4. Return the result # - fmd = dict(kind=ch_type, surf=surf, ch_names=info['ch_names'], coils=coils, - origin=origin, noise=noise, self_dots=self_dots, - surface_dots=surface_dots, int_rad=int_rad, miss=miss) - logger.info('Field mapping data ready') - - fmd['data'] = _compute_mapping_matrix(fmd, info) + fmd = dict( + kind=ch_type, + surf=surf, + ch_names=info["ch_names"], + coils=coils, + origin=origin, + noise=noise, + self_dots=self_dots, + surface_dots=surface_dots, + int_rad=int_rad, + miss=miss, + ) + logger.info("Field mapping data ready") + + fmd["data"] = _compute_mapping_matrix(fmd, info) # bring the original back, whatever coord frame it was in - fmd['surf'] = orig_surf + fmd["surf"] = orig_surf # Remove some unnecessary fields - del fmd['self_dots'] - del fmd['surface_dots'] - del fmd['int_rad'] - del fmd['miss'] + del fmd["self_dots"] + del fmd["surface_dots"] + del fmd["int_rad"] + del fmd["miss"] return fmd @verbose -def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None, - ch_type=None, mode='fast', meg_surf='helmet', - origin=(0., 0., 0.04), n_jobs=None, *, - head_source=('bem', 'head'), verbose=None): +def make_field_map( + evoked, + trans="auto", + subject=None, + subjects_dir=None, + ch_type=None, + mode="fast", + meg_surf="helmet", + origin=None, + n_jobs=None, + *, + upsampling=1, + head_source=("bem", "head"), + verbose=None, +): """Compute surface maps used for field display in 3D. Parameters @@ -419,7 +486,13 @@ def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None, fit. Default is ``(0., 0., 0.04)``. .. versionadded:: 0.11 + .. versionchanged:: 1.12 + In 1.12 the default value is "auto". + In 1.11 and prior versions, it is ``(0., 0., 0.04)``. %(n_jobs)s + %(helmet_upsampling)s + + .. versionadded:: 1.10 %(head_source)s .. versionadded:: 1.1 @@ -431,12 +504,21 @@ def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None, The surface maps to be used for field plots. The list contains separate ones for MEG and EEG (if both MEG and EEG are present). """ + if origin is None: + warn_message = ( + 'Default value for origin is "(0.0, 0.0, 0.04)" in version 1.11 ' + 'but will be changed to "auto" in 1.12. Set the origin parameter ' + "explicitly to avoid this warning." + ) + warn(warn_message, FutureWarning) + origin = (0.0, 0.0, 0.04) + info = evoked.info if ch_type is None: - types = [t for t in ['eeg', 'meg'] if t in evoked] + types = [t for t in ["eeg", "meg"] if t in evoked] else: - _check_option('ch_type', ch_type, ['eeg', 'meg']) + _check_option("ch_type", ch_type, ["eeg", "meg"]) types = [ch_type] if subjects_dir is not None: @@ -447,35 +529,42 @@ def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None, name="subjects_dir", need_dir=True, ) - if isinstance(trans, str) and trans == 'auto': - # let's try to do this in MRI coordinates so they're easy to plot - trans = _find_trans(subject, subjects_dir) - trans, trans_type = _get_trans(trans, fro='head', to='mri') - if 'eeg' in types and trans_type == 'identity': - logger.info('No trans file available. EEG data ignored.') - types.remove('eeg') + trans, trans_type = _find_trans( + trans=trans, + subject=subject, + subjects_dir=subjects_dir, + ) + + if "eeg" in types and trans_type == "identity": + logger.info("No trans file available. EEG data ignored.") + types.remove("eeg") if len(types) == 0: - raise RuntimeError('No data available for mapping.') + raise RuntimeError("No data available for mapping.") - _check_option('meg_surf', meg_surf, ['helmet', 'head']) + _check_option("meg_surf", meg_surf, ["helmet", "head"]) surfs = [] for this_type in types: - if this_type == 'meg' and meg_surf == 'helmet': - surf = get_meg_helmet_surf(info, trans) + if this_type == "meg" and meg_surf == "helmet": + surf = get_meg_helmet_surf(info, trans, upsampling=upsampling) else: - surf = get_head_surf( - subject, source=head_source, subjects_dir=subjects_dir) + surf = get_head_surf(subject, source=head_source, subjects_dir=subjects_dir) surfs.append(surf) surf_maps = list() for this_type, this_surf in zip(types, surfs): - this_map = _make_surface_mapping(evoked.info, this_surf, this_type, - trans, n_jobs=n_jobs, origin=origin, - mode=mode) + this_map = _make_surface_mapping( + evoked.info, + this_surf, + this_type, + trans, + n_jobs=n_jobs, + origin=origin, + mode=mode, + ) surf_maps.append(this_map) return surf_maps diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py index a97bac9d660..10183fc6280 100644 --- a/mne/forward/_lead_dots.py +++ b/mne/forward/_lead_dots.py @@ -1,8 +1,6 @@ -# Authors: Matti Hämäläinen -# Eric Larson -# Mainak Jas -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # The computations in this code were primarily derived from Matti Hämäläinen's # C code. @@ -14,12 +12,12 @@ from numpy.polynomial import legendre from ..parallel import parallel_func -from ..utils import logger, verbose, _get_extra_data_path, fill_doc - +from ..utils import _get_extra_data_path, _open_lock, fill_doc, logger, verbose ############################################################################## # FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE + def _next_legen_der(n, x, p0, p01, p0d, p0dd): """Compute the next Legendre polynomial and its derivatives.""" # only good for n > 1 ! @@ -46,50 +44,56 @@ def _get_legen_der(xx, n_coeff=100): p0dds[:2] = [0.0, 0.0] for n in range(2, n_coeff): p0s[n], p0ds[n], p0dds[n] = _next_legen_der( - n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1]) + n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1] + ) return coeffs @verbose -def _get_legen_table(ch_type, volume_integral=False, n_coeff=100, - n_interp=20000, force_calc=False, verbose=None): +def _get_legen_table( + ch_type, + volume_integral=False, + n_coeff=100, + n_interp=20000, + force_calc=False, + verbose=None, +): """Return a (generated) LUT of Legendre (derivative) polynomial coeffs.""" if n_interp % 2 != 0: - raise RuntimeError('n_interp must be even') - fname = op.join(_get_extra_data_path(), 'tables') + raise RuntimeError("n_interp must be even") + fname = op.join(_get_extra_data_path(), "tables") if not op.isdir(fname): # Updated due to API change (GH 1167) os.makedirs(fname) - if ch_type == 'meg': - fname = op.join(fname, 'legder_%s_%s.bin' % (n_coeff, n_interp)) + if ch_type == "meg": + fname = op.join(fname, f"legder_{n_coeff}_{n_interp}.bin") leg_fun = _get_legen_der - extra_str = ' derivative' + extra_str = " derivative" lut_shape = (n_interp + 1, n_coeff, 3) else: # 'eeg' - fname = op.join(fname, 'legval_%s_%s.bin' % (n_coeff, n_interp)) + fname = op.join(fname, f"legval_{n_coeff}_{n_interp}.bin") leg_fun = _get_legen - extra_str = '' + extra_str = "" lut_shape = (n_interp + 1, n_coeff) if not op.isfile(fname) or force_calc: - logger.info('Generating Legendre%s table...' % extra_str) + logger.info(f"Generating Legendre{extra_str} table...") x_interp = np.linspace(-1, 1, n_interp + 1) lut = leg_fun(x_interp, n_coeff).astype(np.float32) if not force_calc: - with open(fname, 'wb') as fid: + with _open_lock(fname, "wb") as fid: fid.write(lut.tobytes()) else: - logger.info('Reading Legendre%s table...' % extra_str) - with open(fname, 'rb', buffering=0) as fid: + logger.info(f"Reading Legendre{extra_str} table...") + with _open_lock(fname, "rb", buffering=0) as fid: lut = np.fromfile(fid, np.float32) lut.shape = lut_shape # we need this for the integration step n_fact = np.arange(1, n_coeff, dtype=float) - if ch_type == 'meg': + if ch_type == "meg": n_facts = list() # multn, then mult, then multn * (n + 1) if volume_integral: - n_facts.append(n_fact / ((2.0 * n_fact + 1.0) * - (2.0 * n_fact + 3.0))) + n_facts.append(n_fact / ((2.0 * n_fact + 1.0) * (2.0 * n_fact + 3.0))) else: n_facts.append(n_fact / (2.0 * n_fact + 1.0)) n_facts.append(n_facts[0] / (n_fact + 1.0)) @@ -167,8 +171,13 @@ def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral): bbeta = np.tile(beta[start:stop][np.newaxis], (n_fact.shape[0], 1)) bbeta[0] *= beta[start:stop] np.cumprod(bbeta, axis=0, out=bbeta) # run inplace - np.einsum('ji,jk,ijk->ki', bbeta, n_fact, lut_fun(ctheta[start:stop]), - out=sums[:, start:stop]) + np.einsum( + "ji,jk,ijk->ki", + bbeta, + n_fact, + lut_fun(ctheta[start:stop]), + out=sums[:, start:stop], + ) return sums @@ -179,8 +188,21 @@ def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral): _eeg_const = 1.0 / (4.0 * np.pi) -def _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s, - w1, w2s, volume_integral, lut, n_fact, ch_type): +def _fast_sphere_dot_r0( + r, + rr1_orig, + rr2s, + lr1, + lr2s, + cosmags1, + cosmags2s, + w1, + w2s, + volume_integral, + lut, + n_fact, + ch_type, +): """Lead field dot product computation for M/EEG in the sphere model. Parameters @@ -230,7 +252,7 @@ def _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s, cosmags2 = np.concatenate(cosmags2s) # outer product, sum over coords - ct = np.einsum('ik,jk->ij', rr1_orig, rr2) + ct = np.einsum("ik,jk->ij", rr1_orig, rr2) np.clip(ct, -1, 1, ct) # expand axes @@ -239,9 +261,10 @@ def _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s, lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :] beta = (r * r) / lr1lr2 - if ch_type == 'meg': - sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact, - volume_integral) + if ch_type == "meg": + sums = _comp_sums_meg( + beta.flatten(), ct.flatten(), lut, n_fact, volume_integral + ) sums.shape = (4,) + beta.shape # Accumulate the result, a little bit streamlined version @@ -252,21 +275,23 @@ def _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s, # n2c1 = np.sum(cosmags2 * rr1, axis=2) # n2c2 = np.sum(cosmags2 * rr2, axis=2) # n1n2 = np.sum(cosmags1 * cosmags2, axis=2) - n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1) - n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2) - n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1) - n2c2 = np.einsum('jk,ijk->ij', cosmags2, rr2) - n1n2 = np.einsum('ik,jk->ij', cosmags1, cosmags2) + n1c1 = np.einsum("ik,ijk->ij", cosmags1, rr1) + n1c2 = np.einsum("ik,ijk->ij", cosmags1, rr2) + n2c1 = np.einsum("jk,ijk->ij", cosmags2, rr1) + n2c2 = np.einsum("jk,ijk->ij", cosmags2, rr2) + n1n2 = np.einsum("ik,jk->ij", cosmags1, cosmags2) part1 = ct * n1c1 * n2c2 part2 = n1c1 * n2c1 + n1c2 * n2c2 - result = (n1c1 * n2c2 * sums[0] + - (2.0 * part1 - part2) * sums[1] + - (n1n2 + part1 - part2) * sums[2] + - (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3]) + result = ( + n1c1 * n2c2 * sums[0] + + (2.0 * part1 - part2) * sums[1] + + (n1n2 + part1 - part2) * sums[2] + + (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3] + ) # Give it a finishing touch! - result *= (_meg_const / lr1lr2) + result *= _meg_const / lr1lr2 if volume_integral: result *= r else: # 'eeg' @@ -281,7 +306,7 @@ def _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s, if w1 is not None: result *= w1[:, np.newaxis] for ii, w2 in enumerate(w2s): - out[ii] = np.sum(result[:, offset:offset + len(w2)], axis=sum_axis) + out[ii] = np.sum(result[:, offset : offset + len(w2)], axis=sum_axis) offset += len(w2) return out @@ -314,40 +339,52 @@ def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs): products : array, shape (n_coils, n_coils) The integration products. """ - if ch_type == 'eeg': + if ch_type == "eeg": intrad = intrad * 0.7 # convert to normalized distances from expansion center - rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils] + rmags = [coil["rmag"] - r0[np.newaxis, :] for coil in coils] rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags] rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)] - cosmags = [coil['cosmag'] for coil in coils] - ws = [coil['w'] for coil in coils] + cosmags = [coil["cosmag"] for coil in coils] + ws = [coil["w"] for coil in coils] parallel, p_fun, n_jobs = parallel_func(_do_self_dots_subset, n_jobs) - prods = parallel(p_fun(intrad, rmags, rlens, cosmags, - ws, volume, lut, n_fact, ch_type, idx) - for idx in np.array_split(np.arange(len(rmags)), n_jobs)) + prods = parallel( + p_fun(intrad, rmags, rlens, cosmags, ws, volume, lut, n_fact, ch_type, idx) + for idx in np.array_split(np.arange(len(rmags)), n_jobs) + ) products = np.sum(prods, axis=0) return products -def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut, - n_fact, ch_type, idx): +def _do_self_dots_subset( + intrad, rmags, rlens, cosmags, ws, volume, lut, n_fact, ch_type, idx +): """Parallelize.""" # all possible combinations of two magnetometers products = np.zeros((len(rmags), len(rmags))) for ci1 in idx: ci2 = ci1 + 1 res = _fast_sphere_dot_r0( - intrad, rmags[ci1], rmags[:ci2], rlens[ci1], rlens[:ci2], - cosmags[ci1], cosmags[:ci2], ws[ci1], ws[:ci2], volume, lut, - n_fact, ch_type) + intrad, + rmags[ci1], + rmags[:ci2], + rlens[ci1], + rlens[:ci2], + cosmags[ci1], + cosmags[:ci2], + ws[ci1], + ws[:ci2], + volume, + lut, + n_fact, + ch_type, + ) products[ci1, :ci2] = res products[:ci2, ci1] = res return products -def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type, - lut, n_fact): +def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type, lut, n_fact): """Compute lead field dot product integrations between two coil sets. The code is a direct translation of MNE-C code found in @@ -378,10 +415,10 @@ def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type, products : array, shape (n_coils, n_coils) The integration products. """ - if ch_type == 'eeg': + if ch_type == "eeg": intrad = intrad * 0.7 - rmags1 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils1] - rmags2 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils2] + rmags1 = [coil["rmag"] - r0[np.newaxis, :] for coil in coils1] + rmags2 = [coil["rmag"] - r0[np.newaxis, :] for coil in coils2] rlens1 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags1] rlens2 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags2] @@ -389,24 +426,37 @@ def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type, rmags1 = [r / rl[:, np.newaxis] for r, rl in zip(rmags1, rlens1)] rmags2 = [r / rl[:, np.newaxis] for r, rl in zip(rmags2, rlens2)] - ws1 = [coil['w'] for coil in coils1] - ws2 = [coil['w'] for coil in coils2] + ws1 = [coil["w"] for coil in coils1] + ws2 = [coil["w"] for coil in coils2] - cosmags1 = [coil['cosmag'] for coil in coils1] - cosmags2 = [coil['cosmag'] for coil in coils2] + cosmags1 = [coil["cosmag"] for coil in coils1] + cosmags2 = [coil["cosmag"] for coil in coils2] products = np.zeros((len(rmags1), len(rmags2))) for ci1 in range(len(coils1)): res = _fast_sphere_dot_r0( - intrad, rmags1[ci1], rmags2, rlens1[ci1], rlens2, cosmags1[ci1], - cosmags2, ws1[ci1], ws2, volume, lut, n_fact, ch_type) + intrad, + rmags1[ci1], + rmags2, + rlens1[ci1], + rlens2, + cosmags1[ci1], + cosmags2, + ws1[ci1], + ws2, + volume, + lut, + n_fact, + ch_type, + ) products[ci1, :] = res return products @fill_doc -def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type, - lut, n_fact, n_jobs): +def _do_surface_dots( + intrad, volume, coils, surf, sel, r0, ch_type, lut, n_fact, n_jobs +): """Compute the map construction products. Parameters @@ -438,15 +488,15 @@ def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type, The integration products. """ # convert to normalized distances from expansion center - rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils] + rmags = [coil["rmag"] - r0[np.newaxis, :] for coil in coils] rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags] rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)] - cosmags = [coil['cosmag'] for coil in coils] - ws = [coil['w'] for coil in coils] + cosmags = [coil["cosmag"] for coil in coils] + ws = [coil["w"] for coil in coils] rref = None refl = None # virt_ref = False - if ch_type == 'eeg': + if ch_type == "eeg": intrad = intrad * 0.7 # The virtual ref code is untested and unused, so it is # commented out for now @@ -455,24 +505,54 @@ def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type, # refl = np.sqrt(np.sum(rref * rref, axis=1)) # rref /= refl[:, np.newaxis] - rsurf = surf['rr'][sel] - r0[np.newaxis, :] + rsurf = surf["rr"][sel] - r0[np.newaxis, :] lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1)) rsurf /= lsurf[:, np.newaxis] - this_nn = surf['nn'][sel] + this_nn = surf["nn"][sel] # loop over the coils parallel, p_fun, n_jobs = parallel_func(_do_surface_dots_subset, n_jobs) - prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens, - this_nn, cosmags, ws, volume, lut, n_fact, ch_type, - idx) - for idx in np.array_split(np.arange(len(rmags)), n_jobs)) + prods = parallel( + p_fun( + intrad, + rsurf, + rmags, + rref, + refl, + lsurf, + rlens, + this_nn, + cosmags, + ws, + volume, + lut, + n_fact, + ch_type, + idx, + ) + for idx in np.array_split(np.arange(len(rmags)), n_jobs) + ) products = np.sum(prods, axis=0) return products -def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens, - this_nn, cosmags, ws, volume, lut, n_fact, ch_type, - idx): +def _do_surface_dots_subset( + intrad, + rsurf, + rmags, + rref, + refl, + lsurf, + rlens, + this_nn, + cosmags, + ws, + volume, + lut, + n_fact, + ch_type, + idx, +): """Parallelize. Parameters @@ -507,8 +587,20 @@ def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens, The integration products. """ products = _fast_sphere_dot_r0( - intrad, rsurf, rmags, lsurf, rlens, this_nn, cosmags, None, ws, - volume, lut, n_fact, ch_type).T + intrad, + rsurf, + rmags, + lsurf, + rlens, + this_nn, + cosmags, + None, + ws, + volume, + lut, + n_fact, + ch_type, + ).T if rref is not None: raise NotImplementedError # we don't ever use this, isn't tested # vres = _fast_sphere_dot_r0( diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py index 3bd54fca55c..e1264bd2fe6 100644 --- a/mne/forward/_make_forward.py +++ b/mne/forward/_make_forward.py @@ -1,42 +1,63 @@ -# Authors: Matti Hämäläinen -# Alexandre Gramfort -# Martin Luessi -# Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # The computations in this code were primarily derived from Matti Hämäläinen's # C code. -from copy import deepcopy -from contextlib import contextmanager -from pathlib import Path import os import os.path as op +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path import numpy as np -from ._compute_forward import _compute_forwards -from ..io import read_info, _loc_to_coil_trans, _loc_to_eeg_loc, Info -from ..io.compensator import get_current_comp, make_compensator -from ..io.pick import _has_kit_refs, pick_types, pick_info -from ..io.constants import FIFF, FWD -from ..transforms import (_ensure_trans, transform_surface_to, apply_trans, - _get_trans, _print_coord_trans, _coord_frame_name, - Transform, invert_transform) -from ..utils import logger, verbose, warn, _pl, _validate_type, _check_fname -from ..source_space import (_ensure_src, _filter_source_spaces, - _make_discrete_source_space, _complete_vol_src) +from .._fiff.compensator import get_current_comp, make_compensator +from .._fiff.constants import FIFF, FWD +from .._fiff.meas_info import Info, read_info +from .._fiff.pick import _has_kit_refs, pick_info, pick_types +from .._fiff.tag import _loc_to_coil_trans, _loc_to_eeg_loc +from ..bem import ConductorModel, _bem_find_surface, read_bem_solution from ..source_estimate import VolSourceEstimate -from ..surface import _normalize_vectors, _CheckInside -from ..bem import read_bem_solution, _bem_find_surface, ConductorModel - -from .forward import (Forward, _merge_fwds, convert_forward_solution, - _FWD_ORDER) - - -_accuracy_dict = dict(normal=FWD.COIL_ACCURACY_NORMAL, - accurate=FWD.COIL_ACCURACY_ACCURATE) +from ..source_space._source_space import ( + SourceSpaces, + _complete_vol_src, + _ensure_src, + _filter_source_spaces, + _make_discrete_source_space, +) +from ..surface import _CheckInside, _CheckInsideSphere, _normalize_vectors +from ..transforms import ( + Transform, + _coord_frame_name, + _ensure_trans, + _get_trans, + _print_coord_trans, + apply_trans, + invert_transform, +) +from ..utils import ( + _check_fname, + _on_missing, + _pl, + _validate_type, + logger, + verbose, + warn, +) +from ._compute_forward import ( + _compute_forwards, + _compute_forwards_meeg, + _prep_field_computation, +) +from .forward import _FWD_ORDER, Forward, _merge_fwds, convert_forward_solution + +_accuracy_dict = dict( + point=FWD.COIL_ACCURACY_POINT, + normal=FWD.COIL_ACCURACY_NORMAL, + accurate=FWD.COIL_ACCURACY_ACCURATE, +) _extra_coil_def_fname = None @@ -62,11 +83,11 @@ def _read_coil_defs(verbose=None): The global variable "_extra_coil_def_fname" can be used to prepend additional definitions. These are never added to the registry. """ - coil_dir = op.join(op.split(__file__)[0], '..', 'data') + coil_dir = op.join(op.split(__file__)[0], "..", "data") coils = list() if _extra_coil_def_fname is not None: coils += _read_coil_def_file(_extra_coil_def_fname, use_registry=False) - coils += _read_coil_def_file(op.join(coil_dir, 'coil_def.dat')) + coils += _read_coil_def_file(op.join(coil_dir, "coil_def.dat")) return coils @@ -80,23 +101,28 @@ def _read_coil_def_file(fname, use_registry=True): if not use_registry or fname not in _coil_registry: big_val = 0.5 coils = list() - with open(fname, 'r') as fid: + with open(fname) as fid: lines = fid.readlines() lines = lines[::-1] while len(lines) > 0: line = lines.pop().strip() - if line[0] == '#' and len(line) > 0: + if line[0] == "#" and len(line) > 0: continue desc_start = line.find('"') desc_end = len(line) - 1 assert line.strip()[desc_end] == '"' desc = line[desc_start:desc_end] - vals = np.fromstring(line[:desc_start].strip(), - dtype=float, sep=' ') + vals = np.fromstring(line[:desc_start].strip(), dtype=float, sep=" ") assert len(vals) == 6 npts = int(vals[3]) - coil = dict(coil_type=vals[1], coil_class=vals[0], desc=desc, - accuracy=vals[2], size=vals[4], base=vals[5]) + coil = dict( + coil_type=vals[1], + coil_class=vals[0], + desc=desc, + accuracy=vals[2], + size=vals[4], + base=vals[5], + ) # get parameters of each component rmag = list() cosmag = list() @@ -104,13 +130,13 @@ def _read_coil_def_file(fname, use_registry=True): for p in range(npts): # get next non-comment line line = lines.pop() - while line[0] == '#': + while line[0] == "#": line = lines.pop() - vals = np.fromstring(line, sep=' ') + vals = np.fromstring(line, sep=" ") if len(vals) != 7: raise RuntimeError( - f'Could not interpret line {p + 1} as 7 points:\n' - f'{line}') + f"Could not interpret line {p + 1} as 7 points:\n{line}" + ) # Read and verify data for each integration point w.append(vals[0]) rmag.append(vals[[1, 2, 3]]) @@ -118,11 +144,11 @@ def _read_coil_def_file(fname, use_registry=True): w = np.array(w) rmag = np.array(rmag) cosmag = np.array(cosmag) - size = np.sqrt(np.sum(cosmag ** 2, axis=1)) - if np.any(np.sqrt(np.sum(rmag ** 2, axis=1)) > big_val): - raise RuntimeError('Unreasonable integration point') + size = np.sqrt(np.sum(cosmag**2, axis=1)) + if np.any(np.sqrt(np.sum(rmag**2, axis=1)) > big_val): + raise RuntimeError("Unreasonable integration point") if np.any(size <= 0): - raise RuntimeError('Unreasonable normal') + raise RuntimeError("Unreasonable normal") cosmag /= size[:, np.newaxis] coil.update(dict(w=w, cosmag=cosmag, rmag=rmag)) coils.append(coil) @@ -130,70 +156,91 @@ def _read_coil_def_file(fname, use_registry=True): _coil_registry[fname] = coils if use_registry: coils = deepcopy(_coil_registry[fname]) - logger.info('%d coil definition%s read', len(coils), _pl(coils)) + logger.info("%d coil definition%s read", len(coils), _pl(coils)) return coils def _create_meg_coil(coilset, ch, acc, do_es): """Create a coil definition using templates, transform if necessary.""" # Also change the coordinate frame if so desired - if ch['kind'] not in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]: - raise RuntimeError('%s is not a MEG channel' % ch['ch_name']) + if ch["kind"] not in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]: + raise RuntimeError(f"{ch['ch_name']} is not a MEG channel") # Simple linear search from the coil definitions for coil in coilset: - if coil['coil_type'] == (ch['coil_type'] & 0xFFFF) and \ - coil['accuracy'] == acc: + if coil["coil_type"] == (ch["coil_type"] & 0xFFFF) and coil["accuracy"] == acc: break else: - raise RuntimeError('Desired coil definition not found ' - '(type = %d acc = %d)' % (ch['coil_type'], acc)) + raise RuntimeError( + f"Desired coil definition not found (type = {ch['coil_type']} acc = {acc})" + ) # Apply a coordinate transformation if so desired - coil_trans = _loc_to_coil_trans(ch['loc']) + coil_trans = _loc_to_coil_trans(ch["loc"]) # Create the result - res = dict(chname=ch['ch_name'], coil_class=coil['coil_class'], - accuracy=coil['accuracy'], base=coil['base'], size=coil['size'], - type=ch['coil_type'], w=coil['w'], desc=coil['desc'], - coord_frame=FIFF.FIFFV_COORD_DEVICE, rmag_orig=coil['rmag'], - cosmag_orig=coil['cosmag'], coil_trans_orig=coil_trans, - r0=coil_trans[:3, 3], - rmag=apply_trans(coil_trans, coil['rmag']), - cosmag=apply_trans(coil_trans, coil['cosmag'], False)) + res = dict( + chname=ch["ch_name"], + coil_class=coil["coil_class"], + accuracy=coil["accuracy"], + base=coil["base"], + size=coil["size"], + type=ch["coil_type"], + w=coil["w"], + desc=coil["desc"], + coord_frame=FIFF.FIFFV_COORD_DEVICE, + rmag_orig=coil["rmag"], + cosmag_orig=coil["cosmag"], + coil_trans_orig=coil_trans, + r0=coil_trans[:3, 3], + rmag=apply_trans(coil_trans, coil["rmag"]), + cosmag=apply_trans(coil_trans, coil["cosmag"], False), + ) if do_es: - r0_exey = (np.dot(coil['rmag'][:, :2], coil_trans[:3, :2].T) + - coil_trans[:3, 3]) - res.update(ex=coil_trans[:3, 0], ey=coil_trans[:3, 1], - ez=coil_trans[:3, 2], r0_exey=r0_exey) + r0_exey = np.dot(coil["rmag"][:, :2], coil_trans[:3, :2].T) + coil_trans[:3, 3] + res.update( + ex=coil_trans[:3, 0], + ey=coil_trans[:3, 1], + ez=coil_trans[:3, 2], + r0_exey=r0_exey, + ) return res def _create_eeg_el(ch, t=None): """Create an electrode definition, transform coords if necessary.""" - if ch['kind'] != FIFF.FIFFV_EEG_CH: - raise RuntimeError('%s is not an EEG channel. Cannot create an ' - 'electrode definition.' % ch['ch_name']) + if ch["kind"] != FIFF.FIFFV_EEG_CH: + raise RuntimeError( + f"{ch['ch_name']} is not an EEG channel. Cannot create an electrode " + "definition." + ) if t is None: - t = Transform('head', 'head') # identity, no change - if t.from_str != 'head': - raise RuntimeError('Inappropriate coordinate transformation') + t = Transform("head", "head") # identity, no change + if t.from_str != "head": + raise RuntimeError("Inappropriate coordinate transformation") - r0ex = _loc_to_eeg_loc(ch['loc']) + r0ex = _loc_to_eeg_loc(ch["loc"]) if r0ex.shape[1] == 1: # no reference - w = np.array([1.]) + w = np.array([1.0]) else: # has reference - w = np.array([1., -1.]) + w = np.array([1.0, -1.0]) # Optional coordinate transformation - r0ex = apply_trans(t['trans'], r0ex.T) + r0ex = apply_trans(t["trans"], r0ex.T) # The electrode location cosmag = r0ex.copy() _normalize_vectors(cosmag) - res = dict(chname=ch['ch_name'], coil_class=FWD.COILC_EEG, w=w, - accuracy=_accuracy_dict['normal'], type=ch['coil_type'], - coord_frame=t['to'], rmag=r0ex, cosmag=cosmag) + res = dict( + chname=ch["ch_name"], + coil_class=FWD.COILC_EEG, + w=w, + accuracy=_accuracy_dict["normal"], + type=ch["coil_type"], + coord_frame=t["to"], + rmag=r0ex, + cosmag=cosmag, + ) return res @@ -211,16 +258,24 @@ def _transform_orig_meg_coils(coils, t, do_es=True): if t is None: return for coil in coils: - coil_trans = np.dot(t['trans'], coil['coil_trans_orig']) + coil_trans = np.dot(t["trans"], coil["coil_trans_orig"]) coil.update( - coord_frame=t['to'], r0=coil_trans[:3, 3], - rmag=apply_trans(coil_trans, coil['rmag_orig']), - cosmag=apply_trans(coil_trans, coil['cosmag_orig'], False)) + coord_frame=t["to"], + r0=coil_trans[:3, 3], + rmag=apply_trans(coil_trans, coil["rmag_orig"]), + cosmag=apply_trans(coil_trans, coil["cosmag_orig"], False), + ) if do_es: - r0_exey = (np.dot(coil['rmag_orig'][:, :2], - coil_trans[:3, :2].T) + coil_trans[:3, 3]) - coil.update(ex=coil_trans[:3, 0], ey=coil_trans[:3, 1], - ez=coil_trans[:3, 2], r0_exey=r0_exey) + r0_exey = ( + np.dot(coil["rmag_orig"][:, :2], coil_trans[:3, :2].T) + + coil_trans[:3, 3] + ) + coil.update( + ex=coil_trans[:3, 0], + ey=coil_trans[:3, 1], + ez=coil_trans[:3, 2], + r0_exey=r0_exey, + ) def _create_eeg_els(chs): @@ -229,47 +284,56 @@ def _create_eeg_els(chs): @verbose -def _setup_bem(bem, bem_extra, neeg, mri_head_t, allow_none=False, - verbose=None): +def _setup_bem(bem, bem_extra, neeg, mri_head_t, allow_none=False, verbose=None): """Set up a BEM for forward computation, making a copy and modifying.""" if allow_none and bem is None: return None - logger.info('') - _validate_type(bem, ('path-like', ConductorModel), bem) + logger.info("") + _validate_type(bem, ("path-like", ConductorModel), bem) if not isinstance(bem, ConductorModel): - logger.info('Setting up the BEM model using %s...\n' % bem_extra) + logger.info(f"Setting up the BEM model using {bem_extra}...\n") bem = read_bem_solution(bem) else: bem = bem.copy() - if bem['is_sphere']: - logger.info('Using the sphere model.\n') - if len(bem['layers']) == 0 and neeg > 0: - raise RuntimeError('Spherical model has zero shells, cannot use ' - 'with EEG data') - if bem['coord_frame'] != FIFF.FIFFV_COORD_HEAD: - raise RuntimeError('Spherical model is not in head coordinates') + if bem["is_sphere"]: + logger.info("Using the sphere model.\n") + if len(bem["layers"]) == 0 and neeg > 0: + raise RuntimeError( + "Spherical model has zero shells, cannot use with EEG data" + ) + if bem["coord_frame"] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError("Spherical model is not in head coordinates") else: - if bem['surfs'][0]['coord_frame'] != FIFF.FIFFV_COORD_MRI: + if bem["surfs"][0]["coord_frame"] != FIFF.FIFFV_COORD_MRI: + raise RuntimeError( + f"BEM is in {_coord_frame_name(bem['surfs'][0]['coord_frame'])} " + "coordinates, should be in MRI" + ) + if neeg > 0 and len(bem["surfs"]) == 1: raise RuntimeError( - 'BEM is in %s coordinates, should be in MRI' - % (_coord_frame_name(bem['surfs'][0]['coord_frame']),)) - if neeg > 0 and len(bem['surfs']) == 1: - raise RuntimeError('Cannot use a homogeneous (1-layer BEM) model ' - 'for EEG forward calculations, consider ' - 'using a 3-layer BEM instead') - logger.info('Employing the head->MRI coordinate transform with the ' - 'BEM model.') + "Cannot use a homogeneous (1-layer BEM) model " + "for EEG forward calculations, consider " + "using a 3-layer BEM instead" + ) + logger.info("Employing the head->MRI coordinate transform with the BEM model.") # fwd_bem_set_head_mri_t: Set the coordinate transformation - bem['head_mri_t'] = _ensure_trans(mri_head_t, 'head', 'mri') - logger.info('BEM model %s is now set up' % op.split(bem_extra)[1]) - logger.info('') + bem["head_mri_t"] = _ensure_trans(mri_head_t, "head", "mri") + logger.info(f"BEM model {op.split(bem_extra)[1]} is now set up") + logger.info("") return bem @verbose -def _prep_meg_channels(info, accuracy='accurate', exclude=(), *, - ignore_ref=False, head_frame=True, do_es=False, - verbose=None): +def _prep_meg_channels( + info, + accuracy="accurate", + exclude=(), + *, + ignore_ref=False, + head_frame=True, + do_es=False, + verbose=None, +): """Prepare MEG coil definitions for forward calculation.""" # Find MEG channels ref_meg = True if not ignore_ref else False @@ -277,104 +341,121 @@ def _prep_meg_channels(info, accuracy='accurate', exclude=(), *, # Make sure MEG coils exist if len(picks) <= 0: - raise RuntimeError('Could not find any MEG channels') + raise RuntimeError("Could not find any MEG channels") info_meg = pick_info(info, picks) del picks # Get channel info and names for MEG channels - logger.info(f'Read {len(info_meg["chs"])} MEG channels from info') + logger.info(f"Read {len(info_meg['chs'])} MEG channels from info") # Get MEG compensation channels compensator = post_picks = None - ch_names = info_meg['ch_names'] + ch_names = info_meg["ch_names"] if not ignore_ref: ref_picks = pick_types(info, meg=False, ref_meg=True, exclude=exclude) ncomp = len(ref_picks) - if (ncomp > 0): - logger.info(f'Read {ncomp} MEG compensation channels from info') + if ncomp > 0: + logger.info(f"Read {ncomp} MEG compensation channels from info") # We need to check to make sure these are NOT KIT refs if _has_kit_refs(info, ref_picks): raise NotImplementedError( - 'Cannot create forward solution with KIT reference ' + "Cannot create forward solution with KIT reference " 'channels. Consider using "ignore_ref=True" in ' - 'calculation') - logger.info( - f'{len(info["comps"])} compensation data sets in info') + "calculation" + ) + logger.info(f"{len(info['comps'])} compensation data sets in info") # Compose a compensation data set if necessary # adapted from mne_make_ctf_comp() from mne_ctf_comp.c - logger.info('Setting up compensation data...') + logger.info("Setting up compensation data...") comp_num = get_current_comp(info) if comp_num is None or comp_num == 0: - logger.info(' No compensation set. Nothing more to do.') + logger.info(" No compensation set. Nothing more to do.") else: compensator = make_compensator( - info_meg, 0, comp_num, exclude_comp_chs=False) - logger.info( - f' Desired compensation data ({comp_num}) found.') - logger.info(' All compensation channels found.') - logger.info(' Preselector created.') - logger.info(' Compensation data matrix created.') - logger.info(' Postselector created.') - post_picks = pick_types( - info_meg, meg=True, ref_meg=False, exclude=exclude) + info_meg, 0, comp_num, exclude_comp_chs=False + ) + logger.info(f" Desired compensation data ({comp_num}) found.") + logger.info(" All compensation channels found.") + logger.info(" Preselector created.") + logger.info(" Compensation data matrix created.") + logger.info(" Postselector created.") + post_picks = pick_types(info_meg, meg=True, ref_meg=False, exclude=exclude) ch_names = [ch_names[pick] for pick in post_picks] # Create coil descriptions with transformation to head or device frame templates = _read_coil_defs() if head_frame: - _print_coord_trans(info['dev_head_t']) - transform = info['dev_head_t'] + _print_coord_trans(info["dev_head_t"]) + transform = info["dev_head_t"] else: transform = None megcoils = _create_meg_coils( - info_meg['chs'], accuracy, transform, templates, do_es=do_es) + info_meg["chs"], accuracy, transform, templates, do_es=do_es + ) # Check that coordinate frame is correct and log it if head_frame: - assert megcoils[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD - logger.info('MEG coil definitions created in head coordinates.') + assert megcoils[0]["coord_frame"] == FIFF.FIFFV_COORD_HEAD + logger.info("MEG coil definitions created in head coordinates.") else: - assert megcoils[0]['coord_frame'] == FIFF.FIFFV_COORD_DEVICE - logger.info('MEG coil definitions created in device coordinate.') + assert megcoils[0]["coord_frame"] == FIFF.FIFFV_COORD_DEVICE + logger.info("MEG coil definitions created in device coordinate.") return dict( - defs=megcoils, ch_names=ch_names, compensator=compensator, - info=info_meg, post_picks=post_picks) + defs=megcoils, + ch_names=ch_names, + compensator=compensator, + info=info_meg, + post_picks=post_picks, + ) @verbose def _prep_eeg_channels(info, exclude=(), verbose=None): """Prepare EEG electrode definitions for forward calculation.""" - info_extra = 'info' + info_extra = "info" # Find EEG electrodes - picks = pick_types(info, meg=False, eeg=True, ref_meg=False, - exclude=exclude) + picks = pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=exclude) # Make sure EEG electrodes exist neeg = len(picks) if neeg <= 0: - raise RuntimeError('Could not find any EEG channels') + raise RuntimeError("Could not find any EEG channels") # Get channel info and names for EEG channels - eegchs = pick_info(info, picks)['chs'] - eegnames = [info['ch_names'][p] for p in picks] - logger.info('Read %3d EEG channels from %s' % (len(picks), info_extra)) + eegchs = pick_info(info, picks)["chs"] + eegnames = [info["ch_names"][p] for p in picks] + logger.info(f"Read {len(picks):3} EEG channels from {info_extra}") # Create EEG electrode descriptions eegels = _create_eeg_els(eegchs) - logger.info('Head coordinate coil definitions created.') + logger.info("Head coordinate coil definitions created.") return dict(defs=eegels, ch_names=eegnames) @verbose -def _prepare_for_forward(src, mri_head_t, info, bem, mindist, n_jobs, - bem_extra='', trans='', info_extra='', - meg=True, eeg=True, ignore_ref=False, - allow_bem_none=False, verbose=None): +def _prepare_for_forward( + src, + mri_head_t, + info, + bem, + mindist, + n_jobs, + *, + bem_extra="", + trans="", + info_extra="", + meg=True, + eeg=True, + ignore_ref=False, + allow_bem_none=False, + on_inside="raise", + verbose=None, +): """Prepare for forward computation. The sensors dict contains keys for each sensor type, e.g. 'meg', 'eeg'. @@ -388,116 +469,161 @@ def _prepare_for_forward(src, mri_head_t, info, bem, mindist, n_jobs, compensator """ # Read the source locations - logger.info('') + logger.info("") # let's make a copy in case we modify something src = _ensure_src(src).copy() - nsource = sum(s['nuse'] for s in src) - if nsource == 0: - raise RuntimeError('No sources are active in these source spaces. ' - '"do_all" option should be used.') - logger.info('Read %d source spaces a total of %d active source locations' - % (len(src), nsource)) + nsource = sum(s["nuse"] for s in src) + if len(src) and nsource == 0: + raise RuntimeError( + "No sources are active in these source spaces. " + '"do_all" option should be used.' + ) + logger.info( + "Read %d source spaces a total of %d active source locations", len(src), nsource + ) # Delete some keys to clean up the source space: - for key in ['working_dir', 'command_line']: + for key in ["working_dir", "command_line"]: if key in src.info: del src.info[key] # Read the MRI -> head coordinate transformation - logger.info('') + logger.info("") _print_coord_trans(mri_head_t) # make a new dict with the relevant information - arg_list = [info_extra, trans, src, bem_extra, meg, eeg, mindist, - n_jobs, verbose] - cmd = 'make_forward_solution(%s)' % (', '.join([str(a) for a in arg_list])) + arg_list = [info_extra, trans, src, bem_extra, meg, eeg, mindist, n_jobs, verbose] + cmd = f"make_forward_solution({', '.join(str(a) for a in arg_list)})" mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0) info_trans = str(trans) if isinstance(trans, Path) else trans - info = Info(chs=info['chs'], comps=info['comps'], - dev_head_t=info['dev_head_t'], mri_file=info_trans, - mri_id=mri_id, - meas_file=info_extra, meas_id=None, working_dir=os.getcwd(), - command_line=cmd, bads=info['bads'], mri_head_t=mri_head_t) + info = Info( + chs=info["chs"], + comps=info["comps"], + # The forward-writing code always wants a dev_head_t, so give an identity one + dev_head_t=info["dev_head_t"] or Transform("meg", "head"), + mri_file=info_trans, + mri_id=mri_id, + meas_file=info_extra, + meas_id=None, + working_dir=os.getcwd(), + command_line=cmd, + bads=info["bads"], + mri_head_t=mri_head_t, + ) info._update_redundant() info._check_consistency() - logger.info('') + logger.info("") sensors = dict() if meg and len(pick_types(info, meg=True, ref_meg=False, exclude=[])) > 0: - sensors['meg'] = _prep_meg_channels(info, ignore_ref=ignore_ref) + sensors["meg"] = _prep_meg_channels(info, ignore_ref=ignore_ref) if eeg and len(pick_types(info, eeg=True, exclude=[])) > 0: - sensors['eeg'] = _prep_eeg_channels(info) + sensors["eeg"] = _prep_eeg_channels(info) # Check that some channels were found if len(sensors) == 0: - raise RuntimeError('No MEG or EEG channels found.') + raise RuntimeError("No MEG or EEG channels found.") # pick out final info - info = pick_info(info, pick_types(info, meg=meg, eeg=eeg, ref_meg=False, - exclude=[])) + info = pick_info( + info, pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[]) + ) # Transform the source spaces into the appropriate coordinates # (will either be HEAD or MRI) - for s in src: - transform_surface_to(s, 'head', mri_head_t) - logger.info('Source spaces are now in %s coordinates.' - % _coord_frame_name(s['coord_frame'])) + src._transform_to("head", mri_head_t) + if len(src): + logger.info( + f"Source spaces are now in {_coord_frame_name(src[0]['coord_frame'])} " + "coordinates." + ) # Prepare the BEM model - eegnames = sensors.get('eeg', dict()).get('ch_names', []) - bem = _setup_bem(bem, bem_extra, len(eegnames), mri_head_t, - allow_none=allow_bem_none) + eegnames = sensors.get("eeg", dict()).get("ch_names", []) + bem = _setup_bem( + bem, bem_extra, len(eegnames), mri_head_t, allow_none=allow_bem_none + ) del eegnames # Circumvent numerical problems by excluding points too close to the skull, # and check that sensors are not inside any BEM surface if bem is not None: - if not bem['is_sphere']: - check_surface = 'inner skull surface' - inner_skull = _bem_find_surface(bem, 'inner_skull') - check_inside = _filter_source_spaces( - inner_skull, mindist, mri_head_t, src, n_jobs) - logger.info('') - if len(bem['surfs']) == 3: - check_surface = 'scalp surface' - check_inside = _CheckInside(_bem_find_surface(bem, 'head')) + kwargs = dict(limit=mindist, mri_head_t=mri_head_t, src=src) + if not bem["is_sphere"]: + check_surface = "inner skull surface" + check_inside_brain = _CheckInside(_bem_find_surface(bem, "inner_skull")) + logger.info("") + if len(bem["surfs"]) == 3: + check_surface = "scalp surface" + check_inside_head = _CheckInside(_bem_find_surface(bem, "head")) + else: + check_inside_head = check_inside_brain else: - check_surface = 'outermost sphere shell' - if len(bem['layers']) == 0: - def check_inside(x): - return np.zeros(len(x), bool) + check_surface = "outermost sphere shell" + check_inside_brain = _CheckInsideSphere(bem) + if bem.radius is not None: + check_inside_head = _CheckInsideSphere(bem, check="outer") else: - def check_inside(x): - return (np.linalg.norm(x - bem['r0'], axis=1) < - bem['layers'][-1]['rad']) - if 'meg' in sensors: - meg_loc = apply_trans( - invert_transform(mri_head_t), - np.array([coil['r0'] for coil in sensors['meg']['defs']])) - n_inside = check_inside(meg_loc).sum() - if n_inside: - raise RuntimeError( - f'Found {n_inside} MEG sensor{_pl(n_inside)} inside the ' - f'{check_surface}, perhaps coordinate frames and/or ' - 'coregistration must be incorrect') - rr = np.concatenate([s['rr'][s['vertno']] for s in src]) - if len(rr) < 1: - raise RuntimeError('No points left in source space after excluding ' - 'points close to inner skull.') + def check_inside_head(x): + return np.zeros(len(x), bool) + + if len(src): + _filter_source_spaces(check_inside_brain, **kwargs) + + if "meg" in sensors: + meg_loc = np.array([coil["r0"] for coil in sensors["meg"]["defs"]]) + if not bem["is_sphere"]: + meg_loc = apply_trans(invert_transform(mri_head_t), meg_loc) + n_inside = check_inside_head(meg_loc).sum() + if n_inside: + msg = ( + f"Found {n_inside} MEG sensor{_pl(n_inside)} inside the " + f"{check_surface}, perhaps coordinate frames and/or " + "coregistration are incorrect" + ) + _on_missing(on_inside, msg, name="on_inside", error_klass=RuntimeError) + + if len(src): + rr = np.concatenate([s["rr"][s["vertno"]] for s in src]) + if len(rr) < 1: + raise RuntimeError( + "No points left in source space after excluding " + "points close to inner skull." + ) + else: + rr = np.zeros((0, 3)) # deal with free orientations: source_nn = np.tile(np.eye(3), (len(rr), 1)) - update_kwargs = dict(nchan=len(info['ch_names']), nsource=len(rr), - info=info, src=src, source_nn=source_nn, - source_rr=rr, surf_ori=False, mri_head_t=mri_head_t) + update_kwargs = dict( + nchan=len(info["ch_names"]), + nsource=len(rr), + info=info, + src=src, + source_nn=source_nn, + source_rr=rr, + surf_ori=False, + mri_head_t=mri_head_t, + ) return sensors, rr, info, update_kwargs, bem @verbose -def make_forward_solution(info, trans, src, bem, meg=True, eeg=True, *, - mindist=0.0, ignore_ref=False, n_jobs=None, - verbose=None): +def make_forward_solution( + info, + trans, + src, + bem, + meg=True, + eeg=True, + *, + mindist=0.0, + ignore_ref=False, + n_jobs=None, + on_inside="raise", + verbose=None, +): """Calculate a forward solution for a subject. Parameters @@ -509,14 +635,16 @@ def make_forward_solution(info, trans, src, bem, meg=True, eeg=True, *, Support for ``'fsaverage'`` argument. src : path-like | instance of SourceSpaces Either a path to a source space file or a loaded or generated - `~mne.source_space.SourceSpaces`. - bem : path-like | dict + :class:`~mne.SourceSpaces`. + bem : path-like | ConductorModel Filename of the BEM (e.g., ``"sample-5120-5120-5120-bem-sol.fif"``) to - use, or a loaded sphere model (dict). + use, or a loaded :class:`~mne.bem.ConductorModel`. See + :func:`~mne.make_bem_model` and :func:`~mne.make_bem_solution` to create a + :class:`mne.bem.ConductorModel`. meg : bool - If True (Default), include MEG computations. + If True (default), include MEG computations. eeg : bool - If True (Default), include EEG computations. + If True (default), include EEG computations. mindist : float Minimum distance of sources from inner skull surface (in mm). ignore_ref : bool @@ -524,6 +652,13 @@ def make_forward_solution(info, trans, src, bem, meg=True, eeg=True, *, option should be True for KIT files, since forward computation with reference channels is not currently supported. %(n_jobs)s + on_inside : 'raise' | 'warn' | 'ignore' + What to do if MEG sensors are inside the outer skin surface. If 'raise' + (default), an error is raised. If 'warn' or 'ignore', the forward + solution is computed anyway and a warning is or isn't emitted, + respectively. + + .. versionadded:: 1.10 %(verbose)s Returns @@ -544,7 +679,7 @@ def make_forward_solution(info, trans, src, bem, meg=True, eeg=True, *, followed by :func:`mne.convert_forward_solution`. .. note:: - If the BEM solution was computed with :doc:`OpenMEEG ` + If the BEM solution was computed with `OpenMEEG `__ in :func:`mne.make_bem_solution`, then OpenMEEG will automatically be used to compute the forward solution. @@ -560,61 +695,75 @@ def make_forward_solution(info, trans, src, bem, meg=True, eeg=True, *, # (could also be HEAD to MRI) mri_head_t, trans = _get_trans(trans) if isinstance(bem, ConductorModel): - bem_extra = 'instance of ConductorModel' + bem_extra = "instance of ConductorModel" else: bem_extra = bem - _validate_type(info, ('path-like', Info), 'info') + _validate_type(info, ("path-like", Info), "info") if not isinstance(info, Info): info_extra = op.split(info)[1] - info = _check_fname(info, must_exist=True, overwrite='read', - name='info') + info = _check_fname(info, must_exist=True, overwrite="read", name="info") info = read_info(info, verbose=False) else: - info_extra = 'instance of Info' + info_extra = "instance of Info" # Report the setup - logger.info('Source space : %s' % src) - logger.info('MRI -> head transform : %s' % trans) - logger.info('Measurement data : %s' % info_extra) - if isinstance(bem, ConductorModel) and bem['is_sphere']: - logger.info('Sphere model : origin at %s mm' - % (bem['r0'],)) - logger.info('Standard field computations') + logger.info(f"Source space : {src}") + logger.info(f"MRI -> head transform : {trans}") + logger.info(f"Measurement data : {info_extra}") + if isinstance(bem, ConductorModel) and bem["is_sphere"]: + logger.info(f"Sphere model : origin at {bem['r0']} mm") + logger.info("Standard field computations") else: - logger.info('Conductor model : %s' % bem_extra) - logger.info('Accurate field computations') - logger.info('Do computations in %s coordinates', - _coord_frame_name(FIFF.FIFFV_COORD_HEAD)) - logger.info('Free source orientations') + logger.info(f"Conductor model : {bem_extra}") + logger.info("Accurate field computations") + logger.info( + "Do computations in %s coordinates", _coord_frame_name(FIFF.FIFFV_COORD_HEAD) + ) + logger.info("Free source orientations") # Create MEG coils and EEG electrodes in the head coordinate frame sensors, rr, info, update_kwargs, bem = _prepare_for_forward( - src, mri_head_t, info, bem, mindist, n_jobs, bem_extra, trans, - info_extra, meg, eeg, ignore_ref) - del (src, mri_head_t, trans, info_extra, bem_extra, mindist, - meg, eeg, ignore_ref) + src, + mri_head_t, + info, + bem, + mindist, + n_jobs, + bem_extra=bem_extra, + trans=trans, + info_extra=info_extra, + meg=meg, + eeg=eeg, + ignore_ref=ignore_ref, + on_inside=on_inside, + ) + del (src, mri_head_t, trans, info_extra, bem_extra, mindist, meg, eeg, ignore_ref) # Time to do the heavy lifting: MEG first, then EEG fwds = _compute_forwards(rr, bem=bem, sensors=sensors, n_jobs=n_jobs) # merge forwards - fwds = {key: _to_forward_dict(fwds[key], sensors[key]['ch_names']) - for key in _FWD_ORDER if key in fwds} + fwds = { + key: _to_forward_dict(fwds[key], sensors[key]["ch_names"]) + for key in _FWD_ORDER + if key in fwds + } fwd = _merge_fwds(fwds, verbose=False) del fwds - logger.info('') + logger.info("") # Don't transform the source spaces back into MRI coordinates (which is # done in the C code) because mne-python assumes forward solution source # spaces are in head coords. fwd.update(**update_kwargs) - logger.info('Finished.') + logger.info("Finished.") return fwd @verbose -def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=None, *, - verbose=None): +def make_forward_dipole( + dipole, bem, info, trans=None, n_jobs=None, *, on_inside="raise", verbose=None +): """Convert dipole object to source estimate and calculate forward operator. The instance of Dipole is converted to a discrete source space, @@ -640,6 +789,13 @@ def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=None, *, The head<->MRI transform filename. Must be provided unless BEM is a sphere model. %(n_jobs)s + on_inside : 'raise' | 'warn' | 'ignore' + What to do if MEG sensors are inside the outer skin surface. If 'raise' + (default), an error is raised. If 'warn' or 'ignore', the forward + solution is computed anyway and a warning is or isn't emitted, + respectively. + + .. versionadded:: 1.10 %(verbose)s Returns @@ -661,6 +817,7 @@ def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=None, *, """ if isinstance(dipole, list): from ..dipole import _concatenate_dipoles # To avoid circular import + dipole = _concatenate_dipoles(dipole) # Make copies to avoid mangling original dipole @@ -673,31 +830,32 @@ def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=None, *, # NB information about dipole orientation enters here, then no more sources = dict(rr=pos, nn=ori) # Dipole objects must be in the head frame - src = _complete_vol_src( - [_make_discrete_source_space(sources, coord_frame='head')]) + src = _complete_vol_src([_make_discrete_source_space(sources, coord_frame="head")]) # Forward operator created for channels in info (use pick_info to restrict) # Use defaults for most params, including min_dist - fwd = make_forward_solution(info, trans, src, bem, n_jobs=n_jobs, - verbose=verbose) + fwd = make_forward_solution( + info, trans, src, bem, n_jobs=n_jobs, on_inside=on_inside, verbose=verbose + ) # Convert from free orientations to fixed (in-place) - convert_forward_solution(fwd, surf_ori=False, force_fixed=True, - copy=False, use_cps=False, verbose=None) + convert_forward_solution( + fwd, surf_ori=False, force_fixed=True, copy=False, use_cps=False, verbose=None + ) # Check for omissions due to proximity to inner skull in # make_forward_solution, which will result in an exception - if fwd['src'][0]['nuse'] != len(pos): - inuse = fwd['src'][0]['inuse'].astype(bool) - head = ('The following dipoles are outside the inner skull boundary') - msg = len(head) * '#' + '\n' + head + '\n' - for (t, pos) in zip(times[np.logical_not(inuse)], - pos[np.logical_not(inuse)]): - msg += ' t={:.0f} ms, pos=({:.0f}, {:.0f}, {:.0f}) mm\n'.\ - format(t * 1000., pos[0] * 1000., - pos[1] * 1000., pos[2] * 1000.) - msg += len(head) * '#' + if fwd["src"][0]["nuse"] != len(pos): + inuse = fwd["src"][0]["inuse"].astype(bool) + head = "The following dipoles are outside the inner skull boundary" + msg = len(head) * "#" + "\n" + head + "\n" + for t, pos in zip(times[np.logical_not(inuse)], pos[np.logical_not(inuse)]): + msg += ( + f" t={t * 1000.0:.0f} ms, pos=({pos[0] * 1000.0:.0f}, " + f"{pos[1] * 1000.0:.0f}, {pos[2] * 1000.0:.0f}) mm\n" + ) + msg += len(head) * "#" logger.error(msg) - raise ValueError('One or more dipoles outside the inner skull.') + raise ValueError("One or more dipoles outside the inner skull.") # multiple dipoles (rr and nn) per time instant allowed # uneven sampling in time returns list @@ -705,8 +863,10 @@ def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=None, *, if len(timepoints) > 1: tdiff = np.diff(timepoints) if not np.allclose(tdiff, tdiff[0]): - warn('Unique time points of dipoles unevenly spaced: returned ' - 'stc will be a list, one for each time point.') + warn( + "Unique time points of dipoles unevenly spaced: returned " + "stc will be a list, one for each time point." + ) tstep = -1.0 else: tstep = tdiff[0] @@ -720,40 +880,65 @@ def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=None, *, data = np.zeros((len(amplitude), len(timepoints))) # (n_d, n_t) row = 0 for tpind, tp in enumerate(timepoints): - amp = amplitude[np.in1d(times, tp)] - data[row:row + len(amp), tpind] = amp + amp = amplitude[np.isin(times, tp)] + data[row : row + len(amp), tpind] = amp row += len(amp) if tstep > 0: - stc = VolSourceEstimate(data, vertices=[fwd['src'][0]['vertno']], - tmin=timepoints[0], - tstep=tstep, subject=None) + stc = VolSourceEstimate( + data, + vertices=[fwd["src"][0]["vertno"]], + tmin=timepoints[0], + tstep=tstep, + subject=None, + ) else: # Must return a list of stc, one for each time point stc = [] for col, tp in enumerate(timepoints): - stc += [VolSourceEstimate(data[:, col][:, np.newaxis], - vertices=[fwd['src'][0]['vertno']], - tmin=tp, tstep=0.001, subject=None)] + stc += [ + VolSourceEstimate( + data[:, col][:, np.newaxis], + vertices=[fwd["src"][0]["vertno"]], + tmin=tp, + tstep=0.001, + subject=None, + ) + ] return fwd, stc -def _to_forward_dict(fwd, names, fwd_grad=None, - coord_frame=FIFF.FIFFV_COORD_HEAD, - source_ori=FIFF.FIFFV_MNE_FREE_ORI): +def _to_forward_dict( + fwd, + names, + fwd_grad=None, + coord_frame=FIFF.FIFFV_COORD_HEAD, + source_ori=FIFF.FIFFV_MNE_FREE_ORI, +): """Convert forward solution matrices to dicts.""" assert names is not None - sol = dict(data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0], - row_names=names, col_names=[]) - fwd = Forward(sol=sol, source_ori=source_ori, nsource=sol['ncol'], - coord_frame=coord_frame, sol_grad=None, - nchan=sol['nrow'], _orig_source_ori=source_ori, - _orig_sol=sol['data'].copy(), _orig_sol_grad=None) + sol = dict( + data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0], row_names=names, col_names=[] + ) + fwd = Forward( + sol=sol, + source_ori=source_ori, + nsource=sol["ncol"], + coord_frame=coord_frame, + sol_grad=None, + nchan=sol["nrow"], + _orig_source_ori=source_ori, + _orig_sol=sol["data"].copy(), + _orig_sol_grad=None, + ) if fwd_grad is not None: - sol_grad = dict(data=fwd_grad.T, nrow=fwd_grad.shape[1], - ncol=fwd_grad.shape[0], row_names=names, - col_names=[]) - fwd.update(dict(sol_grad=sol_grad), - _orig_sol_grad=sol_grad['data'].copy()) + sol_grad = dict( + data=fwd_grad.T, + nrow=fwd_grad.shape[1], + ncol=fwd_grad.shape[0], + row_names=names, + col_names=[], + ) + fwd.update(dict(sol_grad=sol_grad), _orig_sol_grad=sol_grad["data"].copy()) return fwd @@ -787,3 +972,91 @@ def use_coil_def(fname): yield finally: _extra_coil_def_fname = None + + +class _ForwardModeler: + """Optimized incremental fitting using the same sensors and BEM.""" + + @verbose + def __init__( + self, + info, + trans, + bem, + *, + mindist=0.0, + n_jobs=1, + verbose=None, + ): + self.mri_head_t, _ = _get_trans(trans) + self.mindist = mindist + self.n_jobs = n_jobs + src = SourceSpaces([]) + self.sensors, _, _, self.update_kwargs, self.bem = _prepare_for_forward( + src, + self.mri_head_t, + info, + bem, + mindist, + n_jobs, + bem_extra="", + trans="", + info_extra="", + meg=True, + eeg=True, + ignore_ref=False, + ) + self.fwd_data = _prep_field_computation( + sensors=self.sensors, + bem=self.bem, + n_jobs=self.n_jobs, + ) + if self.bem["is_sphere"]: + self.check_inside = _CheckInsideSphere(self.bem) + else: + self.check_inside = _CheckInside(_bem_find_surface(self.bem, "inner_skull")) + + def compute(self, src): + src = _ensure_src(src).copy() + src._transform_to("head", self.mri_head_t) + _filter_source_spaces( + self.check_inside, + n_jobs=self.n_jobs, + limit=self.mindist, + mri_head_t=self.mri_head_t, + src=src, + ) + rr = np.concatenate([s["rr"][s["vertno"]] for s in src]) + if len(rr) < 1: + raise RuntimeError( + "No points left in source space after excluding " + "points close to inner skull." + ) + + sensors = deepcopy(self.sensors) + fwd_data = deepcopy(self.fwd_data) + fwds = _compute_forwards_meeg( + rr, + sensors=sensors, + fwd_data=fwd_data, + n_jobs=self.n_jobs, + ) + fwds = { + key: _to_forward_dict(fwds[key], sensors[key]["ch_names"]) + for key in _FWD_ORDER + if key in fwds + } + fwd = _merge_fwds(fwds, verbose=False) + del fwds + + fwd.update(**self.update_kwargs) + # Delete some keys to clean up the source space: + for key in ["working_dir", "command_line"]: + if key in src.info: + del src.info[key] + # our `update_kwargs` set these, but they need to be updated for this src + fwd["src"] = src + fwd["nsource"] = sum(s["nuse"] for s in src) + fwd["source_rr"] = np.vstack([s["rr"][s["inuse"] == 1] for s in src]) + fwd["source_nn"] = np.tile(np.eye(3), (fwd["nsource"], 1)) + return fwd diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 866b319c81b..e8a7d62a3ce 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -1,55 +1,87 @@ -# Authors: Matti Hämäläinen -# Alexandre Gramfort -# Martin Luessi -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # The computations in this code were primarily derived from Matti Hämäläinen's # C code. +import os import re +import shutil +import tempfile from copy import deepcopy from os import PathLike +from os import path as op from pathlib import Path from time import time import numpy as np +from scipy import sparse -import shutil -import os -from os import path as op -import tempfile - -from ..io import RawArray, Info -from ..io.constants import FIFF -from ..io.open import fiff_open -from ..io.tree import dir_tree_find -from ..io.tag import find_tag, read_tag -from ..io.matrix import (_read_named_matrix, _transpose_named_matrix, - write_named_matrix) -from ..io.meas_info import (_read_bad_channels, write_info, _write_ch_infos, - _read_extended_ch_info, _make_ch_names_mapping, - _write_bad_channels) -from ..io.pick import (pick_channels_forward, pick_info, pick_channels, - pick_types) -from ..io.write import (write_int, start_block, end_block, write_coord_trans, - write_string, start_and_end_file, write_id) -from ..io.base import BaseRaw -from ..evoked import Evoked, EvokedArray +from .._fiff.constants import FIFF +from .._fiff.matrix import ( + _read_named_matrix, + _transpose_named_matrix, + write_named_matrix, +) +from .._fiff.meas_info import ( + Info, + _make_ch_names_mapping, + _read_bad_channels, + _read_extended_ch_info, + _write_bad_channels, + _write_ch_infos, + write_info, +) +from .._fiff.open import fiff_open +from .._fiff.pick import pick_channels, pick_channels_forward, pick_info, pick_types +from .._fiff.tag import find_tag, read_tag +from .._fiff.tree import dir_tree_find +from .._fiff.write import ( + end_block, + start_and_end_file, + start_block, + write_coord_trans, + write_id, + write_int, + write_string, +) from ..epochs import BaseEpochs -from ..source_space import (_read_source_spaces_from_tree, - find_source_space_hemi, _set_source_space_vertices, - _write_source_spaces_to_fid, _get_src_nn, - _src_kind_dict) -from ..source_estimate import _BaseVectorSourceEstimate, _BaseSourceEstimate -from ..surface import _normal_orth -from ..transforms import (transform_surface_to, invert_transform, - write_trans) -from ..utils import (_check_fname, get_subjects_dir, has_mne_c, warn, - run_subprocess, check_fname, logger, verbose, fill_doc, - _validate_type, _check_compensation_grade, _check_option, - _check_stc_units, _stamp_to_dt, _on_missing, repr_html) +from ..evoked import Evoked, EvokedArray +from ..html_templates import _get_html_template +from ..io import BaseRaw, RawArray from ..label import Label +from ..source_estimate import _BaseSourceEstimate, _BaseVectorSourceEstimate +from ..source_space._source_space import ( + SourceSpaces, + _get_src_nn, + _read_source_spaces_from_tree, + _set_source_space_vertices, + _src_kind_dict, + _write_source_spaces_to_fid, + find_source_space_hemi, +) +from ..surface import _normal_orth +from ..transforms import invert_transform, write_trans +from ..utils import ( + _check_compensation_grade, + _check_fname, + _check_option, + _check_stc_units, + _import_h5io_funcs, + _on_missing, + _stamp_to_dt, + _validate_type, + check_fname, + fill_doc, + get_subjects_dir, + has_mne_c, + logger, + repr_html, + run_subprocess, + verbose, + warn, +) class Forward(dict): @@ -132,70 +164,78 @@ def copy(self): """Copy the Forward instance.""" return Forward(deepcopy(self)) + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save the forward solution. + + Parameters + ---------- + %(fname_fwd)s + %(overwrite)s + %(verbose)s + """ + write_forward_solution(fname, self, overwrite=overwrite) + def _get_src_type_and_ori_for_repr(self): - src_types = np.array([src['type'] for src in self['src']]) - - if (src_types == 'surf').all(): - src_type = 'Surface with %d vertices' % self['nsource'] - elif (src_types == 'vol').all(): - src_type = 'Volume with %d grid points' % self['nsource'] - elif (src_types == 'discrete').all(): - src_type = 'Discrete with %d dipoles' % self['nsource'] + src_types = np.array([src["type"] for src in self["src"]]) + + if (src_types == "surf").all(): + src_type = f"Surface with {self['nsource']} vertices" + elif (src_types == "vol").all(): + src_type = f"Volume with {self['nsource']} grid points" + elif (src_types == "discrete").all(): + src_type = f"Discrete with {self['nsource']} dipoles" else: - count_string = '' - if (src_types == 'surf').any(): - count_string += '%d surface, ' % (src_types == 'surf').sum() - if (src_types == 'vol').any(): - count_string += '%d volume, ' % (src_types == 'vol').sum() - if (src_types == 'discrete').any(): - count_string += '%d discrete, ' \ - % (src_types == 'discrete').sum() - count_string = count_string.rstrip(', ') - src_type = ('Mixed (%s) with %d vertices' - % (count_string, self['nsource'])) - - if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI: - src_ori = 'Unknown' - elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI: - src_ori = 'Fixed' - elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI: - src_ori = 'Free' + count_string = "" + if (src_types == "surf").any(): + count_string += f"{(src_types == 'surf').sum()} surface, " + if (src_types == "vol").any(): + count_string += f"{(src_types == 'vol').sum()} volume, " + if (src_types == "discrete").any(): + count_string += f"{(src_types == 'discrete').sum()} discrete, " + count_string = count_string.rstrip(", ") + src_type = f"Mixed ({count_string}) with {self['nsource']} vertices" + + if self["source_ori"] == FIFF.FIFFV_MNE_UNKNOWN_ORI: + src_ori = "Unknown" + elif self["source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI: + src_ori = "Fixed" + elif self["source_ori"] == FIFF.FIFFV_MNE_FREE_ORI: + src_ori = "Free" return src_type, src_ori def __repr__(self): """Summarize forward info instead of printing all.""" - entr = ' 0: - raise ValueError('Width of matrix must be a multiple of n') + raise ValueError("Width of matrix must be a multiple of n") tmp = np.arange(ma * bdn, dtype=np.int64).reshape(bdn, ma) tmp = np.tile(tmp, (1, n)) @@ -269,7 +309,7 @@ def _block_diag(A, n): jj = jj * np.ones(ma, dtype=np.int64)[:, None] jj = jj.T.ravel() # column indices foreach sparse bd - bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc() + bd = sparse.coo_array((A.T.ravel(), np.c_[ii, jj].T)).tocsc() return bd @@ -278,9 +318,8 @@ def _get_tag_int(fid, node, name, id_): """Check we have an appropriate tag.""" tag = find_tag(fid, node, id_) if tag is None: - fid.close() - raise ValueError(name + ' tag not found') - return int(tag.data) + raise ValueError(name + " tag not found") + return int(tag.data.item()) def _read_one(fid, node): @@ -290,42 +329,44 @@ def _read_one(fid, node): return None one = Forward() - one['source_ori'] = _get_tag_int(fid, node, 'Source orientation', - FIFF.FIFF_MNE_SOURCE_ORIENTATION) - one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame', - FIFF.FIFF_MNE_COORD_FRAME) - one['nsource'] = _get_tag_int(fid, node, 'Number of sources', - FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS) - one['nchan'] = _get_tag_int(fid, node, 'Number of channels', - FIFF.FIFF_NCHAN) + one["source_ori"] = _get_tag_int( + fid, node, "Source orientation", FIFF.FIFF_MNE_SOURCE_ORIENTATION + ) + one["coord_frame"] = _get_tag_int( + fid, node, "Coordinate frame", FIFF.FIFF_MNE_COORD_FRAME + ) + one["nsource"] = _get_tag_int( + fid, node, "Number of sources", FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS + ) + one["nchan"] = _get_tag_int(fid, node, "Number of channels", FIFF.FIFF_NCHAN) try: - one['sol'] = _read_named_matrix(fid, node, - FIFF.FIFF_MNE_FORWARD_SOLUTION, - transpose=True) - one['_orig_sol'] = one['sol']['data'].copy() + one["sol"] = _read_named_matrix( + fid, node, FIFF.FIFF_MNE_FORWARD_SOLUTION, transpose=True + ) + one["_orig_sol"] = one["sol"]["data"].copy() except Exception: - logger.error('Forward solution data not found') + logger.error("Forward solution data not found") raise try: fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD - one['sol_grad'] = _read_named_matrix(fid, node, fwd_type, - transpose=True) - one['_orig_sol_grad'] = one['sol_grad']['data'].copy() + one["sol_grad"] = _read_named_matrix(fid, node, fwd_type, transpose=True) + one["_orig_sol_grad"] = one["sol_grad"]["data"].copy() except Exception: - one['sol_grad'] = None + one["sol_grad"] = None - if one['sol']['data'].shape[0] != one['nchan'] or \ - (one['sol']['data'].shape[1] != one['nsource'] and - one['sol']['data'].shape[1] != 3 * one['nsource']): - raise ValueError('Forward solution matrix has wrong dimensions') + if one["sol"]["data"].shape[0] != one["nchan"] or ( + one["sol"]["data"].shape[1] != one["nsource"] + and one["sol"]["data"].shape[1] != 3 * one["nsource"] + ): + raise ValueError("Forward solution matrix has wrong dimensions") - if one['sol_grad'] is not None: - if one['sol_grad']['data'].shape[0] != one['nchan'] or \ - (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and - one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']): - raise ValueError('Forward solution gradient matrix has ' - 'wrong dimensions') + if one["sol_grad"] is not None: + if one["sol_grad"]["data"].shape[0] != one["nchan"] or ( + one["sol_grad"]["data"].shape[1] != 3 * one["nsource"] + and one["sol_grad"]["data"].shape[1] != 3 * 3 * one["nsource"] + ): + raise ValueError("Forward solution gradient matrix has wrong dimensions") return one @@ -352,30 +393,30 @@ def _read_forward_meas_info(tree, fid): # Information from the MRI file parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE) if len(parent_mri) == 0: - raise ValueError('No parent MEG information found in operator') + raise ValueError("No parent MEG information found in operator") parent_mri = parent_mri[0] tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME) - info['mri_file'] = tag.data if tag is not None else None + info["mri_file"] = tag.data if tag is not None else None tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID) - info['mri_id'] = tag.data if tag is not None else None + info["mri_id"] = tag.data if tag is not None else None # Information from the MEG file parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) if len(parent_meg) == 0: - raise ValueError('No parent MEG information found in operator') + raise ValueError("No parent MEG information found in operator") parent_meg = parent_meg[0] tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME) - info['meas_file'] = tag.data if tag is not None else None + info["meas_file"] = tag.data if tag is not None else None tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID) - info['meas_id'] = tag.data if tag is not None else None + info["meas_id"] = tag.data if tag is not None else None # Add channel information - info['chs'] = chs = list() - for k in range(parent_meg['nent']): - kind = parent_meg['directory'][k].kind - pos = parent_meg['directory'][k].pos + info["chs"] = chs = list() + for k in range(parent_meg["nent"]): + kind = parent_meg["directory"][k].kind + pos = parent_meg["directory"][k].pos if kind == FIFF.FIFF_CH_INFO: tag = read_tag(fid, pos) chs.append(tag.data) @@ -389,49 +430,48 @@ def _read_forward_meas_info(tree, fid): coord_device = FIFF.FIFFV_COORD_DEVICE coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD if tag is None: - raise ValueError('MRI/head coordinate transformation not found') + raise ValueError("MRI/head coordinate transformation not found") cand = tag.data - if cand['from'] == coord_mri and cand['to'] == coord_head: - info['mri_head_t'] = cand + if cand["from"] == coord_mri and cand["to"] == coord_head: + info["mri_head_t"] = cand else: - raise ValueError('MRI/head coordinate transformation not found') + raise ValueError("MRI/head coordinate transformation not found") # Get the MEG device <-> head coordinate transformation tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS) if tag is None: - raise ValueError('MEG/head coordinate transformation not found') + raise ValueError("MEG/head coordinate transformation not found") cand = tag.data - if cand['from'] == coord_device and cand['to'] == coord_head: - info['dev_head_t'] = cand - elif cand['from'] == coord_ctf_head and cand['to'] == coord_head: - info['ctf_head_t'] = cand + if cand["from"] == coord_device and cand["to"] == coord_head: + info["dev_head_t"] = cand + elif cand["from"] == coord_ctf_head and cand["to"] == coord_head: + info["ctf_head_t"] = cand else: - raise ValueError('MEG/head coordinate transformation not found') + raise ValueError("MEG/head coordinate transformation not found") - info['bads'] = _read_bad_channels( - fid, parent_meg, ch_names_mapping=ch_names_mapping) + bads = _read_bad_channels(fid, parent_meg, ch_names_mapping=ch_names_mapping) # clean up our bad list, old versions could have non-existent bads - info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']] + info["bads"] = [bad for bad in bads if bad in info["ch_names"]] # Check if a custom reference has been applied tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF) if tag is None: tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11 - info['custom_ref_applied'] = int(tag.data) if tag is not None else False + info["custom_ref_applied"] = int(tag.data.item()) if tag is not None else False info._unlocked = False return info def _subject_from_forward(forward): """Get subject id from inverse operator.""" - return forward['src']._subject + return forward["src"]._subject # This sets the forward solution order (and gives human-readable names) _FWD_ORDER = dict( - meg='MEG', - eeg='EEG', + meg="MEG", + eeg="EEG", ) @@ -453,38 +493,43 @@ def _merge_fwds(fwds, *, verbose=None): b = fwds[key] a_kind, b_kind = _FWD_ORDER[first_key], _FWD_ORDER[key] combined.append(b_kind) - if (a['sol']['data'].shape[1] != b['sol']['data'].shape[1] or - a['source_ori'] != b['source_ori'] or - a['nsource'] != b['nsource'] or - a['coord_frame'] != b['coord_frame']): + if ( + a["sol"]["data"].shape[1] != b["sol"]["data"].shape[1] + or a["source_ori"] != b["source_ori"] + or a["nsource"] != b["nsource"] + or a["coord_frame"] != b["coord_frame"] + ): raise ValueError( - f'The {a_kind} and {b_kind} forward solutions do not match') - for k in ('sol', 'sol_grad'): + f"The {a_kind} and {b_kind} forward solutions do not match" + ) + for k in ("sol", "sol_grad"): if a[k] is None: continue - a[k]['data'] = np.r_[a[k]['data'], b[k]['data']] - a[f'_orig_{k}'] = np.r_[a[f'_orig_{k}'], b[f'_orig_{k}']] - a[k]['nrow'] = a[k]['nrow'] + b[k]['nrow'] - a[k]['row_names'] = a[k]['row_names'] + b[k]['row_names'] - a['nchan'] = a['nchan'] + b['nchan'] + a[k]["data"] = np.r_[a[k]["data"], b[k]["data"]] + a[f"_orig_{k}"] = np.r_[a[f"_orig_{k}"], b[f"_orig_{k}"]] + a[k]["nrow"] = a[k]["nrow"] + b[k]["nrow"] + a[k]["row_names"] = a[k]["row_names"] + b[k]["row_names"] + a["nchan"] = a["nchan"] + b["nchan"] if len(fwds) > 1: - logger.info(f' Forward solutions combined: {", ".join(combined)}') + logger.info(f" Forward solutions combined: {', '.join(combined)}") return fwd @verbose -def read_forward_solution(fname, include=(), exclude=(), verbose=None): +def read_forward_solution(fname, include=(), exclude=(), *, ordered=True, verbose=None): """Read a forward solution a.k.a. lead field. Parameters ---------- fname : path-like - The file name, which should end with ``-fwd.fif`` or ``-fwd.fif.gz``. + The file name, which should end with ``-fwd.fif``, ``-fwd.fif.gz``, + ``_fwd.fif``, ``_fwd.fif.gz``, ``-fwd.h5``, or ``_fwd.h5``. include : list, optional List of names of channels to include. If empty all channels are included. exclude : list, optional List of names of channels to exclude. If empty include all channels. + %(ordered)s %(verbose)s Returns @@ -511,27 +556,32 @@ def read_forward_solution(fname, include=(), exclude=(), verbose=None): surface-based, fixed orientation cannot be reverted after loading the forward solution with :func:`read_forward_solution`. """ - check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz', - '_fwd.fif', '_fwd.fif.gz')) - fname = _check_fname(fname=fname, must_exist=True, overwrite='read') + check_fname( + fname, + "forward", + ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz", "-fwd.h5", "_fwd.h5"), + ) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") # Open the file, create directory - logger.info('Reading forward solution from %s...' % fname) + logger.info(f"Reading forward solution from {fname}...") + if fname.suffix == ".h5": + return _read_forward_hdf5(fname) f, tree, _ = fiff_open(fname) with f as fid: # Find all forward solutions fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION) if len(fwds) == 0: - raise ValueError('No forward solutions in %s' % fname) + raise ValueError(f"No forward solutions in {fname}") # Parent MRI data parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE) if len(parent_mri) == 0: - raise ValueError('No parent MRI information in %s' % fname) + raise ValueError(f"No parent MRI information in {fname}") parent_mri = parent_mri[0] src = _read_source_spaces_from_tree(fid, tree, patch_stats=False) for s in src: - s['id'] = find_source_space_hemi(s) + s["id"] = find_source_space_hemi(s) fwd = None @@ -541,8 +591,7 @@ def read_forward_solution(fname, include=(), exclude=(), verbose=None): for k in range(len(fwds)): tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS) if tag is None: - raise ValueError('Methods not listed for one of the forward ' - 'solutions') + raise ValueError("Methods not listed for one of the forward solutions") if tag.data == FIFF.FIFFV_MNE_MEG: megnode = fwds[k] @@ -552,26 +601,34 @@ def read_forward_solution(fname, include=(), exclude=(), verbose=None): fwds = dict() megfwd = _read_one(fid, megnode) if megfwd is not None: - fwds['meg'] = megfwd + fwds["meg"] = megfwd if is_fixed_orient(megfwd): - ori = 'fixed' + ori = "fixed" else: - ori = 'free' - logger.info(' Read MEG forward solution (%d sources, ' - '%d channels, %s orientations)' - % (megfwd['nsource'], megfwd['nchan'], ori)) + ori = "free" + logger.info( + " Read MEG forward solution (%d sources, " + "%d channels, %s orientations)", + megfwd["nsource"], + megfwd["nchan"], + ori, + ) del megfwd eegfwd = _read_one(fid, eegnode) if eegfwd is not None: - fwds['eeg'] = eegfwd + fwds["eeg"] = eegfwd if is_fixed_orient(eegfwd): - ori = 'fixed' + ori = "fixed" else: - ori = 'free' - logger.info(' Read EEG forward solution (%d sources, ' - '%d channels, %s orientations)' - % (eegfwd['nsource'], eegfwd['nchan'], ori)) + ori = "free" + logger.info( + " Read EEG forward solution (%d sources, " + "%d channels, %s orientations)", + eegfwd["nsource"], + eegfwd["nchan"], + ori, + ) del eegfwd fwd = _merge_fwds(fwds) @@ -580,22 +637,24 @@ def read_forward_solution(fname, include=(), exclude=(), verbose=None): # Get the MRI <-> head coordinate transformation tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS) if tag is None: - raise ValueError('MRI/head coordinate transformation not found') + raise ValueError("MRI/head coordinate transformation not found") mri_head_t = tag.data - if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or - mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD): + if ( + mri_head_t["from"] != FIFF.FIFFV_COORD_MRI + or mri_head_t["to"] != FIFF.FIFFV_COORD_HEAD + ): mri_head_t = invert_transform(mri_head_t) - if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or - mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD): - fid.close() - raise ValueError('MRI/head coordinate transformation not ' - 'found') - fwd['mri_head_t'] = mri_head_t + if ( + mri_head_t["from"] != FIFF.FIFFV_COORD_MRI + or mri_head_t["to"] != FIFF.FIFFV_COORD_HEAD + ): + raise ValueError("MRI/head coordinate transformation not found") + fwd["mri_head_t"] = mri_head_t # # get parent MEG info # - fwd['info'] = _read_forward_meas_info(tree, fid) + fwd["info"] = _read_forward_meas_info(tree, fid) # MNE environment parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV) @@ -603,67 +662,62 @@ def read_forward_solution(fname, include=(), exclude=(), verbose=None): parent_env = parent_env[0] tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR) if tag is not None: - with fwd['info']._unlock(): - fwd['info']['working_dir'] = tag.data + with fwd["info"]._unlock(): + fwd["info"]["working_dir"] = tag.data tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE) if tag is not None: - with fwd['info']._unlock(): - fwd['info']['command_line'] = tag.data + with fwd["info"]._unlock(): + fwd["info"]["command_line"] = tag.data # Transform the source spaces to the correct coordinate frame # if necessary # Make sure forward solution is in either the MRI or HEAD coordinate frame - if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD): - raise ValueError('Only forward solutions computed in MRI or head ' - 'coordinates are acceptable') + if fwd["coord_frame"] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD): + raise ValueError( + "Only forward solutions computed in MRI or head coordinates are acceptable" + ) # Transform each source space to the HEAD or MRI coordinate frame, # depending on the coordinate frame of the forward solution - # NOTE: the function transform_surface_to will also work on discrete and - # volume sources - nuse = 0 - for s in src: - try: - s = transform_surface_to(s, fwd['coord_frame'], mri_head_t) - except Exception as inst: - raise ValueError('Could not transform source space (%s)' % inst) - - nuse += s['nuse'] + src._transform_to(fwd["coord_frame"], mri_head_t) + nuse = sum(s["nuse"] for s in src) # Make sure the number of sources match after transformation - if nuse != fwd['nsource']: - raise ValueError('Source spaces do not match the forward solution.') + if nuse != fwd["nsource"]: + raise ValueError("Source spaces do not match the forward solution.") - logger.info(' Source spaces transformed to the forward solution ' - 'coordinate frame') - fwd['src'] = src + logger.info( + " Source spaces transformed to the forward solution coordinate frame" + ) + fwd["src"] = src # Handle the source locations and orientations - fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :] - for ss in src], axis=0) + fwd["source_rr"] = np.concatenate([ss["rr"][ss["vertno"], :] for ss in src], axis=0) # Store original source orientations - fwd['_orig_source_ori'] = fwd['source_ori'] + fwd["_orig_source_ori"] = fwd["source_ori"] # Deal with include and exclude pick_channels_forward(fwd, include=include, exclude=exclude, copy=False) if is_fixed_orient(fwd, orig=True): - fwd['source_nn'] = np.concatenate([_src['nn'][_src['vertno'], :] - for _src in fwd['src']], axis=0) - fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI - fwd['surf_ori'] = True + fwd["source_nn"] = np.concatenate( + [_src["nn"][_src["vertno"], :] for _src in fwd["src"]], axis=0 + ) + fwd["source_ori"] = FIFF.FIFFV_MNE_FIXED_ORI + fwd["surf_ori"] = True else: - fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3)) - fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI - fwd['surf_ori'] = False + fwd["source_nn"] = np.kron(np.ones((fwd["nsource"], 1)), np.eye(3)) + fwd["source_ori"] = FIFF.FIFFV_MNE_FREE_ORI + fwd["surf_ori"] = False return Forward(fwd) @verbose -def convert_forward_solution(fwd, surf_ori=False, force_fixed=False, - copy=True, use_cps=True, verbose=None): +def convert_forward_solution( + fwd, surf_ori=False, force_fixed=False, copy=True, use_cps=True, *, verbose=None +): """Convert forward solution between different source orientations. Parameters @@ -685,29 +739,33 @@ def convert_forward_solution(fwd, surf_ori=False, force_fixed=False, fwd : Forward The modified forward solution. """ - from scipy import sparse fwd = fwd.copy() if copy else fwd if force_fixed is True: surf_ori = True - if any([src['type'] == 'vol' for src in fwd['src']]) and force_fixed: + if any([src["type"] == "vol" for src in fwd["src"]]) and force_fixed: raise ValueError( - 'Forward operator was generated with sources from a ' - 'volume source space. Conversion to fixed orientation is not ' - 'possible. Consider using a discrete source space if you have ' - 'meaningful normal orientations.') + "Forward operator was generated with sources from a " + "volume source space. Conversion to fixed orientation is not " + "possible. Consider using a discrete source space if you have " + "meaningful normal orientations." + ) if surf_ori and use_cps: - if any(s.get('patch_inds') is not None for s in fwd['src']): - logger.info(' Average patch normals will be employed in ' - 'the rotation to the local surface coordinates..' - '..') + if any(s.get("patch_inds") is not None for s in fwd["src"]): + logger.info( + " Average patch normals will be employed in " + "the rotation to the local surface coordinates.." + ".." + ) else: use_cps = False - logger.info(' No patch info available. The standard source ' - 'space normals will be employed in the rotation ' - 'to the local surface coordinates....') + logger.info( + " No patch info available. The standard source " + "space normals will be employed in the rotation " + "to the local surface coordinates...." + ) # We need to change these entries (only): # 1. source_nn @@ -719,78 +777,79 @@ def convert_forward_solution(fwd, surf_ori=False, force_fixed=False, if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_cps): # Fixed - fwd['source_nn'] = np.concatenate([_get_src_nn(s, use_cps) - for s in fwd['src']], axis=0) + fwd["source_nn"] = np.concatenate( + [_get_src_nn(s, use_cps) for s in fwd["src"]], axis=0 + ) if not is_fixed_orient(fwd, orig=True): - logger.info(' Changing to fixed-orientation forward ' - 'solution with surface-based source orientations...') - fix_rot = _block_diag(fwd['source_nn'].T, 1) + logger.info( + " Changing to fixed-orientation forward " + "solution with surface-based source orientations..." + ) + fix_rot = _block_diag(fwd["source_nn"].T, 1) # newer versions of numpy require explicit casting here, so *= no # longer works - fwd['sol']['data'] = (fwd['_orig_sol'] * - fix_rot).astype('float32') - fwd['sol']['ncol'] = fwd['nsource'] - if fwd['sol_grad'] is not None: + fwd["sol"]["data"] = (fwd["_orig_sol"] @ fix_rot).astype("float32") + fwd["sol"]["ncol"] = fwd["nsource"] + if fwd["sol_grad"] is not None: x = sparse.block_diag([fix_rot] * 3) - fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod - fwd['sol_grad']['ncol'] = 3 * fwd['nsource'] - fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI - fwd['surf_ori'] = True + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"] @ x + fwd["sol_grad"]["ncol"] = 3 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FIXED_ORI + fwd["surf_ori"] = True elif surf_ori: # Free, surf-oriented # Rotate the local source coordinate systems - fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3)) - logger.info(' Converting to surface-based source orientations...') + fwd["source_nn"] = np.kron(np.ones((fwd["nsource"], 1)), np.eye(3)) + logger.info(" Converting to surface-based source orientations...") # Actually determine the source orientations pp = 0 - for s in fwd['src']: - if s['type'] in ['surf', 'discrete']: + for s in fwd["src"]: + if s["type"] in ["surf", "discrete"]: nn = _get_src_nn(s, use_cps) - stop = pp + 3 * s['nuse'] - fwd['source_nn'][pp:stop] = _normal_orth(nn).reshape(-1, 3) + stop = pp + 3 * s["nuse"] + fwd["source_nn"][pp:stop] = _normal_orth(nn).reshape(-1, 3) pp = stop del nn else: - pp += 3 * s['nuse'] + pp += 3 * s["nuse"] # Rotate the solution components as well if force_fixed: - fwd['source_nn'] = fwd['source_nn'][2::3, :] - fix_rot = _block_diag(fwd['source_nn'].T, 1) + fwd["source_nn"] = fwd["source_nn"][2::3, :] + fix_rot = _block_diag(fwd["source_nn"].T, 1) # newer versions of numpy require explicit casting here, so *= no # longer works - fwd['sol']['data'] = (fwd['_orig_sol'] * - fix_rot).astype('float32') - fwd['sol']['ncol'] = fwd['nsource'] - if fwd['sol_grad'] is not None: + fwd["sol"]["data"] = (fwd["_orig_sol"] @ fix_rot).astype("float32") + fwd["sol"]["ncol"] = fwd["nsource"] + if fwd["sol_grad"] is not None: x = sparse.block_diag([fix_rot] * 3) - fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod - fwd['sol_grad']['ncol'] = 3 * fwd['nsource'] - fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI - fwd['surf_ori'] = True + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"] @ x + fwd["sol_grad"]["ncol"] = 3 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FIXED_ORI + fwd["surf_ori"] = True else: - surf_rot = _block_diag(fwd['source_nn'].T, 3) - fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot - fwd['sol']['ncol'] = 3 * fwd['nsource'] - if fwd['sol_grad'] is not None: + surf_rot = _block_diag(fwd["source_nn"].T, 3) + fwd["sol"]["data"] = fwd["_orig_sol"] @ surf_rot + fwd["sol"]["ncol"] = 3 * fwd["nsource"] + if fwd["sol_grad"] is not None: x = sparse.block_diag([surf_rot] * 3) - fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod - fwd['sol_grad']['ncol'] = 9 * fwd['nsource'] - fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI - fwd['surf_ori'] = True + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"] @ x + fwd["sol_grad"]["ncol"] = 9 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FREE_ORI + fwd["surf_ori"] = True else: # Free, cartesian - logger.info(' Cartesian source orientations...') - fwd['source_nn'] = np.tile(np.eye(3), (fwd['nsource'], 1)) - fwd['sol']['data'] = fwd['_orig_sol'].copy() - fwd['sol']['ncol'] = 3 * fwd['nsource'] - if fwd['sol_grad'] is not None: - fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy() - fwd['sol_grad']['ncol'] = 9 * fwd['nsource'] - fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI - fwd['surf_ori'] = False - - logger.info(' [done]') + logger.info(" Cartesian source orientations...") + fwd["source_nn"] = np.tile(np.eye(3), (fwd["nsource"], 1)) + fwd["sol"]["data"] = fwd["_orig_sol"].copy() + fwd["sol"]["ncol"] = 3 * fwd["nsource"] + if fwd["sol_grad"] is not None: + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"].copy() + fwd["sol_grad"]["ncol"] = 9 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FREE_ORI + fwd["surf_ori"] = False + + logger.info(" [done]") return fwd @@ -801,9 +860,7 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None): Parameters ---------- - fname : path-like - File name to save the forward solution to. It should end with - ``-fwd.fif`` or ``-fwd.fif.gz``. + %(fname_fwd)s fwd : Forward Forward solution. %(overwrite)s @@ -828,13 +885,32 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None): surface-based, fixed orientation cannot be reverted after loading the forward solution with :func:`read_forward_solution`. """ - check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz', - '_fwd.fif', '_fwd.fif.gz')) + check_fname( + fname, + "forward", + ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz", "-fwd.h5", "_fwd.h5"), + ) # check for file existence and expand `~` if present fname = _check_fname(fname, overwrite) - with start_and_end_file(fname) as fid: - _write_forward_solution(fid, fwd) + if fname.suffix == ".h5": + _write_forward_hdf5(fname, fwd) + else: + with start_and_end_file(fname) as fid: + _write_forward_solution(fid, fwd) + + +def _write_forward_hdf5(fname, fwd): + _, write_hdf5 = _import_h5io_funcs() + write_hdf5(fname, dict(fwd=fwd), overwrite=True) + + +def _read_forward_hdf5(fname): + read_hdf5, _ = _import_h5io_funcs() + fwd = Forward(read_hdf5(fname)["fwd"]) + fwd["info"] = Info(fwd["info"]) + fwd["src"] = SourceSpaces(fwd["src"]) + return fwd def _write_forward_solution(fid, fwd): @@ -845,10 +921,10 @@ def _write_forward_solution(fid, fwd): # start_block(fid, FIFF.FIFFB_MNE_ENV) write_id(fid, FIFF.FIFF_BLOCK_ID) - data = fwd['info'].get('working_dir', None) + data = fwd["info"].get("working_dir", None) if data is not None: write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data) - data = fwd['info'].get('command_line', None) + data = fwd["info"].get("command_line", None) if data is not None: write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data) end_block(fid, FIFF.FIFFB_MNE_ENV) @@ -857,118 +933,131 @@ def _write_forward_solution(fid, fwd): # Information from the MRI file # start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE) - write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file']) - if fwd['info']['mri_id'] is not None: - write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id']) + write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd["info"]["mri_file"]) + if fwd["info"]["mri_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd["info"]["mri_id"]) # store the MRI to HEAD transform in MRI file - write_coord_trans(fid, fwd['info']['mri_head_t']) + write_coord_trans(fid, fwd["info"]["mri_head_t"]) end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE) # write measurement info - write_forward_meas_info(fid, fwd['info']) + write_forward_meas_info(fid, fwd["info"]) # invert our original source space transform - src = list() - for s in fwd['src']: - s = deepcopy(s) - try: - # returns source space to original coordinate frame - # usually MRI - s = transform_surface_to(s, fwd['mri_head_t']['from'], - fwd['mri_head_t']) - except Exception as inst: - raise ValueError('Could not transform source space (%s)' % inst) - src.append(s) + src = fwd["src"].copy() + # returns source space to original coordinate frame, usually MRI + src._transform_to(fwd["mri_head_t"]["from"], fwd["mri_head_t"]) # # Write the source spaces (again) # _write_source_spaces_to_fid(fid, src) - n_vert = sum([ss['nuse'] for ss in src]) - if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI: + n_vert = sum([ss["nuse"] for ss in src]) + if fwd["_orig_source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI: n_col = n_vert else: n_col = 3 * n_vert # Undo transformations - sol = fwd['_orig_sol'].copy() - if fwd['sol_grad'] is not None: - sol_grad = fwd['_orig_sol_grad'].copy() + sol = fwd["_orig_sol"].copy() + if fwd["sol_grad"] is not None: + sol_grad = fwd["_orig_sol_grad"].copy() else: sol_grad = None - if fwd['surf_ori'] is True: - if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI: - warn('The forward solution, which is stored on disk now, is based ' - 'on a forward solution with fixed orientation. Please note ' - 'that the transformation to surface-based, fixed orientation ' - 'cannot be reverted after loading the forward solution with ' - 'read_forward_solution.', RuntimeWarning) + if fwd["surf_ori"] is True: + if fwd["_orig_source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI: + warn( + "The forward solution, which is stored on disk now, is based " + "on a forward solution with fixed orientation. Please note " + "that the transformation to surface-based, fixed orientation " + "cannot be reverted after loading the forward solution with " + "read_forward_solution.", + RuntimeWarning, + ) else: - warn('This forward solution is based on a forward solution with ' - 'free orientation. The original forward solution is stored ' - 'on disk in X/Y/Z RAS coordinates. Any transformation ' - '(surface orientation or fixed orientation) will be ' - 'reverted. To reapply any transformation to the forward ' - 'operator please apply convert_forward_solution after ' - 'reading the forward solution with read_forward_solution.', - RuntimeWarning) + warn( + "This forward solution is based on a forward solution with " + "free orientation. The original forward solution is stored " + "on disk in X/Y/Z RAS coordinates. Any transformation " + "(surface orientation or fixed orientation) will be " + "reverted. To reapply any transformation to the forward " + "operator please apply convert_forward_solution after " + "reading the forward solution with read_forward_solution.", + RuntimeWarning, + ) # # MEG forward solution # - picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False, - exclude=[]) - picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False, - exclude=[]) + picks_meg = pick_types(fwd["info"], meg=True, eeg=False, ref_meg=False, exclude=[]) + picks_eeg = pick_types(fwd["info"], meg=False, eeg=True, ref_meg=False, exclude=[]) n_meg = len(picks_meg) n_eeg = len(picks_eeg) - row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg] - row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg] + row_names_meg = [fwd["sol"]["row_names"][p] for p in picks_meg] + row_names_eeg = [fwd["sol"]["row_names"][p] for p in picks_eeg] if n_meg > 0: - meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col, - row_names=row_names_meg, col_names=[]) + meg_solution = dict( + data=sol[picks_meg], + nrow=n_meg, + ncol=n_col, + row_names=row_names_meg, + col_names=[], + ) _transpose_named_matrix(meg_solution) start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG) - write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame']) - write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, - fwd['_orig_source_ori']) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd["coord_frame"]) + write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd["_orig_source_ori"]) write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert) write_int(fid, FIFF.FIFF_NCHAN, n_meg) write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution) if sol_grad is not None: - meg_solution_grad = dict(data=sol_grad[picks_meg], - nrow=n_meg, ncol=n_col * 3, - row_names=row_names_meg, col_names=[]) + meg_solution_grad = dict( + data=sol_grad[picks_meg], + nrow=n_meg, + ncol=n_col * 3, + row_names=row_names_meg, + col_names=[], + ) _transpose_named_matrix(meg_solution_grad) - write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, - meg_solution_grad) + write_named_matrix( + fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, meg_solution_grad + ) end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) # # EEG forward solution # if n_eeg > 0: - eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col, - row_names=row_names_eeg, col_names=[]) + eeg_solution = dict( + data=sol[picks_eeg], + nrow=n_eeg, + ncol=n_col, + row_names=row_names_eeg, + col_names=[], + ) _transpose_named_matrix(eeg_solution) start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG) - write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame']) - write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, - fwd['_orig_source_ori']) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd["coord_frame"]) + write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd["_orig_source_ori"]) write_int(fid, FIFF.FIFF_NCHAN, n_eeg) write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert) write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution) if sol_grad is not None: - eeg_solution_grad = dict(data=sol_grad[picks_eeg], - nrow=n_eeg, ncol=n_col * 3, - row_names=row_names_eeg, col_names=[]) + eeg_solution_grad = dict( + data=sol_grad[picks_eeg], + nrow=n_eeg, + ncol=n_col * 3, + row_names=row_names_eeg, + col_names=[], + ) _transpose_named_matrix(eeg_solution_grad) - write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, - eeg_solution_grad) + write_named_matrix( + fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, eeg_solution_grad + ) end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) end_block(fid, FIFF.FIFFB_MNE) @@ -991,9 +1080,9 @@ def is_fixed_orient(forward, orig=False): Whether or not it is fixed orientation. """ if orig: # if we want to know about the original version - fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI) + fixed_ori = forward["_orig_source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI else: # most of the time we want to know about the current version - fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI) + fixed_ori = forward["source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI return fixed_ori @@ -1012,25 +1101,24 @@ def write_forward_meas_info(fid, info): # Information from the MEG file # start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) - write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file']) - if info['meas_id'] is not None: - write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) + write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info["meas_file"]) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) # get transformation from CTF and DEVICE to HEAD coordinate frame - meg_head_t = info.get('dev_head_t', info.get('ctf_head_t')) + meg_head_t = info.get("dev_head_t", info.get("ctf_head_t")) if meg_head_t is None: - fid.close() - raise ValueError('Head<-->sensor transform not found') + raise ValueError("Head<-->sensor transform not found") write_coord_trans(fid, meg_head_t) ch_names_mapping = dict() - if 'chs' in info: + if "chs" in info: # Channel information - ch_names_mapping = _make_ch_names_mapping(info['chs']) - write_int(fid, FIFF.FIFF_NCHAN, len(info['chs'])) - _write_ch_infos(fid, info['chs'], False, ch_names_mapping) - if 'bads' in info and len(info['bads']) > 0: + ch_names_mapping = _make_ch_names_mapping(info["chs"]) + write_int(fid, FIFF.FIFF_NCHAN, len(info["chs"])) + _write_ch_infos(fid, info["chs"], False, ch_names_mapping) + if "bads" in info and len(info["bads"]) > 0: # Bad channels - _write_bad_channels(fid, info['bads'], ch_names_mapping) + _write_bad_channels(fid, info["bads"], ch_names_mapping) end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) @@ -1038,82 +1126,88 @@ def write_forward_meas_info(fid, info): def _select_orient_forward(forward, info, noise_cov=None, copy=True): """Prepare forward solution for inverse solvers.""" # fwd['sol']['row_names'] may be different order from fwd['info']['chs'] - fwd_sol_ch_names = forward['sol']['row_names'] + fwd_sol_ch_names = forward["sol"]["row_names"] all_ch_names = set(fwd_sol_ch_names) - all_bads = set(info['bads']) + all_bads = set(info["bads"]) if noise_cov is not None: - all_ch_names &= set(noise_cov['names']) - all_bads |= set(noise_cov['bads']) + all_ch_names &= set(noise_cov["names"]) + all_bads |= set(noise_cov["bads"]) else: - noise_cov = dict(bads=info['bads']) - ch_names = [c['ch_name'] for c in info['chs'] - if c['ch_name'] not in all_bads and - c['ch_name'] in all_ch_names] - - if not len(info['bads']) == len(noise_cov['bads']) or \ - not all(b in noise_cov['bads'] for b in info['bads']): - logger.info('info["bads"] and noise_cov["bads"] do not match, ' - 'excluding bad channels from both') + noise_cov = dict(bads=info["bads"]) + ch_names = [ + c["ch_name"] + for c in info["chs"] + if c["ch_name"] not in all_bads and c["ch_name"] in all_ch_names + ] + + if not len(info["bads"]) == len(noise_cov["bads"]) or not all( + b in noise_cov["bads"] for b in info["bads"] + ): + logger.info( + 'info["bads"] and noise_cov["bads"] do not match, ' + "excluding bad channels from both" + ) # check the compensation grade - _check_compensation_grade(forward['info'], info, 'forward') + _check_compensation_grade(forward["info"], info, "forward") n_chan = len(ch_names) - logger.info("Computing inverse operator with %d channels." % n_chan) - forward = pick_channels_forward(forward, ch_names, ordered=True, - copy=copy) - info_idx = [info['ch_names'].index(name) for name in ch_names] + logger.info("Computing inverse operator with %d channels.", n_chan) + forward = pick_channels_forward(forward, ch_names, ordered=True, copy=copy) + info_idx = [info["ch_names"].index(name) for name in ch_names] info_picked = pick_info(info, info_idx) - forward['info']._check_consistency() + forward["info"]._check_consistency() info_picked._check_consistency() return forward, info_picked -def _triage_loose(src, loose, fixed='auto'): - _validate_type(loose, (str, dict, 'numeric'), 'loose') - _validate_type(fixed, (str, bool), 'fixed') +def _triage_loose(src, loose, fixed="auto"): + _validate_type(loose, (str, dict, "numeric"), "loose") + _validate_type(fixed, (str, bool), "fixed") orig_loose = loose if isinstance(loose, str): - _check_option('loose', loose, ('auto',)) + _check_option("loose", loose, ("auto",)) if fixed is True: - loose = 0. + loose = 0.0 else: # False or auto - loose = 0.2 if src.kind == 'surface' else 1. - src_types = set(_src_kind_dict[s['type']] for s in src) + loose = 0.2 if src.kind == "surface" else 1.0 + src_types = set(_src_kind_dict[s["type"]] for s in src) if not isinstance(loose, dict): loose = float(loose) loose = {key: loose for key in src_types} loose_keys = set(loose.keys()) if loose_keys != src_types: raise ValueError( - f'loose, if dict, must have keys {sorted(src_types)} to match the ' - f'source space, got {sorted(loose_keys)}') + f"loose, if dict, must have keys {sorted(src_types)} to match the " + f"source space, got {sorted(loose_keys)}" + ) # if fixed is auto it can be ignored, if it's False it can be ignored, # only really need to care about fixed=True if fixed is True: - if not all(v == 0. for v in loose.values()): + if not all(v == 0.0 for v in loose.values()): raise ValueError( - 'When using fixed=True, loose must be 0. or "auto", ' - f'got {orig_loose}') + f'When using fixed=True, loose must be 0. or "auto", got {orig_loose}' + ) elif fixed is False: - if any(v == 0. for v in loose.values()): + if any(v == 0.0 for v in loose.values()): raise ValueError( - 'If loose==0., then fixed must be True or "auto", got False') + 'If loose==0., then fixed must be True or "auto", got False' + ) del fixed for key, this_loose in loose.items(): - if key not in ('surface', 'discrete') and this_loose != 1: + if key not in ("surface", "discrete") and this_loose != 1: raise ValueError( 'loose parameter has to be 1 or "auto" for non-surface/' - f'discrete source spaces, got loose["{key}"] = {this_loose}') + f'discrete source spaces, got loose["{key}"] = {this_loose}' + ) if not 0 <= this_loose <= 1: - raise ValueError( - f'loose ({key}) must be between 0 and 1, got {this_loose}') + raise ValueError(f"loose ({key}) must be between 0 and 1, got {this_loose}") return loose @verbose -def compute_orient_prior(forward, loose='auto', verbose=None): +def compute_orient_prior(forward, loose="auto", verbose=None): """Compute orientation prior. Parameters @@ -1132,40 +1226,46 @@ def compute_orient_prior(forward, loose='auto', verbose=None): -------- compute_depth_prior """ - _validate_type(forward, Forward, 'forward') - n_sources = forward['sol']['data'].shape[1] + _validate_type(forward, Forward, "forward") + n_sources = forward["sol"]["data"].shape[1] - loose = _triage_loose(forward['src'], loose) + loose = _triage_loose(forward["src"], loose) orient_prior = np.ones(n_sources, dtype=np.float64) if is_fixed_orient(forward): - if any(v > 0. for v in loose.values()): - raise ValueError('loose must be 0. with forward operator ' - 'with fixed orientation, got %s' % (loose,)) + if any(v > 0.0 for v in loose.values()): + raise ValueError( + "loose must be 0. with forward operator " + f"with fixed orientation, got {loose}" + ) return orient_prior - if all(v == 1. for v in loose.values()): + if all(v == 1.0 for v in loose.values()): return orient_prior # We actually need non-unity prior, compute it for each source space # separately - if not forward['surf_ori']: - raise ValueError('Forward operator is not oriented in surface ' - 'coordinates. loose parameter should be 1. ' - 'not %s.' % (loose,)) + if not forward["surf_ori"]: + raise ValueError( + "Forward operator is not oriented in surface " + "coordinates. loose parameter should be 1. " + f"not {loose}." + ) start = 0 logged = dict() - for s in forward['src']: - this_type = _src_kind_dict[s['type']] + for s in forward["src"]: + this_type = _src_kind_dict[s["type"]] use_loose = loose[this_type] if not logged.get(this_type): - if use_loose == 1.: - name = 'free' + if use_loose == 1.0: + name = "free" else: - name = 'fixed' if use_loose == 0. else 'loose' - logger.info(f'Applying {name.ljust(5)} dipole orientations to ' - f'{this_type.ljust(7)} source spaces: {use_loose}') + name = "fixed" if use_loose == 0.0 else "loose" + logger.info( + f"Applying {name.ljust(5)} dipole orientations to " + f"{this_type.ljust(7)} source spaces: {use_loose}" + ) logged[this_type] = True - stop = start + 3 * s['nuse'] + stop = start + 3 * s["nuse"] orient_prior[start:stop:3] *= use_loose - orient_prior[start + 1:stop:3] *= use_loose + orient_prior[start + 1 : stop : 3] *= use_loose start = stop return orient_prior @@ -1173,27 +1273,38 @@ def compute_orient_prior(forward, loose='auto', verbose=None): def _restrict_gain_matrix(G, info): """Restrict gain matrix entries for optimal depth weighting.""" # Figure out which ones have been used - if len(info['chs']) != G.shape[0]: - raise ValueError('G.shape[0] (%d) and length of info["chs"] (%d) ' - 'do not match' % (G.shape[0], len(info['chs']))) + if len(info["chs"]) != G.shape[0]: + raise ValueError( + f'G.shape[0] ({G.shape[0]}) and length of info["chs"] ({len(info["chs"])}) ' + "do not match." + ) for meg, eeg, kind in ( - ('grad', False, 'planar'), - ('mag', False, 'magnetometer or axial gradiometer'), - (False, True, 'EEG')): + ("grad", False, "planar"), + ("mag", False, "magnetometer or axial gradiometer"), + (False, True, "EEG"), + ): sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[]) if len(sel) > 0: - logger.info(' %d %s channels' % (len(sel), kind)) + logger.info(" %d %s channels", len(sel), kind) break else: - warn('Could not find MEG or EEG channels to limit depth channels') + warn("Could not find MEG or EEG channels to limit depth channels") sel = slice(None) return G[sel] @verbose -def compute_depth_prior(forward, info, exp=0.8, limit=10.0, - limit_depth_chs=False, combine_xyz='spectral', - noise_cov=None, rank=None, verbose=None): +def compute_depth_prior( + forward, + info, + exp=0.8, + limit=10.0, + limit_depth_chs=False, + combine_xyz="spectral", + noise_cov=None, + rank=None, + verbose=None, +): """Compute depth prior for depth weighting. Parameters @@ -1274,39 +1385,43 @@ def compute_depth_prior(forward, info, exp=0.8, limit=10.0, SI units (such as EEG being orders of magnitude larger than MEG). """ from ..cov import Covariance, compute_whitener - _validate_type(forward, Forward, 'forward') - patch_areas = forward.get('patch_areas', None) + + _validate_type(forward, Forward, "forward") + patch_areas = forward.get("patch_areas", None) is_fixed_ori = is_fixed_orient(forward) - G = forward['sol']['data'] - logger.info('Creating the depth weighting matrix...') - _validate_type(noise_cov, (Covariance, None), 'noise_cov', - 'Covariance or None') - _validate_type(limit_depth_chs, (str, bool), 'limit_depth_chs') + G = forward["sol"]["data"] + logger.info("Creating the depth weighting matrix...") + _validate_type(noise_cov, (Covariance, None), "noise_cov", "Covariance or None") + _validate_type(limit_depth_chs, (str, bool), "limit_depth_chs") if isinstance(limit_depth_chs, str): - if limit_depth_chs != 'whiten': - raise ValueError('limit_depth_chs, if str, must be "whiten", got ' - '%s' % (limit_depth_chs,)) + if limit_depth_chs != "whiten": + raise ValueError( + f'limit_depth_chs, if str, must be "whiten", got {limit_depth_chs}' + ) if not isinstance(noise_cov, Covariance): - raise ValueError('With limit_depth_chs="whiten", noise_cov must be' - ' a Covariance, got %s' % (type(noise_cov),)) + raise ValueError( + 'With limit_depth_chs="whiten", noise_cov must be' + f" a Covariance, got {type(noise_cov)}" + ) if combine_xyz is not False: # private / expert option - _check_option('combine_xyz', combine_xyz, ('fro', 'spectral')) + _check_option("combine_xyz", combine_xyz, ("fro", "spectral")) # If possible, pick best depth-weighting channels if limit_depth_chs is True: G = _restrict_gain_matrix(G, info) - elif limit_depth_chs == 'whiten': - whitener, _ = compute_whitener(noise_cov, info, pca=True, rank=rank, - verbose=False) + elif limit_depth_chs == "whiten": + whitener, _ = compute_whitener( + noise_cov, info, pca=True, rank=rank, verbose=False + ) G = np.dot(whitener, G) # Compute the gain matrix - if is_fixed_ori or combine_xyz in ('fro', False): - d = np.sum(G ** 2, axis=0) + if is_fixed_ori or combine_xyz in ("fro", False): + d = np.sum(G**2, axis=0) if not (is_fixed_ori or combine_xyz is False): d = d.reshape(-1, 3).sum(axis=1) # Spherical leadfield can be zero at the center - d[d == 0.] = np.min(d[d != 0.]) + d[d == 0.0] = np.min(d[d != 0.0]) else: # 'spectral' # n_pos = G.shape[1] // 3 # The following is equivalent to this, but 4-10x faster @@ -1316,22 +1431,24 @@ def compute_depth_prior(forward, info, exp=0.8, limit=10.0, # x = np.dot(Gk.T, Gk) # d[k] = linalg.svdvals(x)[0] G.shape = (G.shape[0], -1, 3) - d = np.linalg.norm(np.einsum('svj,svk->vjk', G, G), # vector dot prods - ord=2, axis=(1, 2)) # ord=2 spectral (largest s.v.) + d = np.linalg.norm( + np.einsum("svj,svk->vjk", G, G), # vector dot prods + ord=2, # ord=2 spectral (largest s.v.) + axis=(1, 2), + ) G.shape = (G.shape[0], -1) # XXX Currently the fwd solns never have "patch_areas" defined if patch_areas is not None: if not is_fixed_ori and combine_xyz is False: patch_areas = np.repeat(patch_areas, 3) - d /= patch_areas ** 2 - logger.info(' Patch areas taken into account in the depth ' - 'weighting') + d /= patch_areas**2 + logger.info(" Patch areas taken into account in the depth weighting") w = 1.0 / d if limit is not None: ws = np.sort(w) - weight_limit = limit ** 2 + weight_limit = limit**2 if limit_depth_chs is False: # match old mne-python behavior # we used to do ind = np.argmin(ws), but this is 0 by sort above @@ -1346,13 +1463,13 @@ def compute_depth_prior(forward, info, exp=0.8, limit=10.0, limit = ws[ind] n_limit = ind - logger.info(' limit = %d/%d = %f' - % (n_limit + 1, len(d), - np.sqrt(limit / ws[0]))) + logger.info( + " limit = %d/%d = %f", n_limit + 1, len(d), np.sqrt(limit / ws[0]) + ) scale = 1.0 / limit - logger.info(' scale = %g exp = %g' % (scale, exp)) + logger.info(f" scale = {scale:g} exp = {exp:g}") w = np.minimum(w / limit, 1) - depth_prior = w ** exp + depth_prior = w**exp if not (is_fixed_ori or combine_xyz is False): depth_prior = np.repeat(depth_prior, 3) @@ -1360,8 +1477,9 @@ def compute_depth_prior(forward, info, exp=0.8, limit=10.0, return depth_prior -def _stc_src_sel(src, stc, on_missing='raise', - extra=', likely due to forward calculations'): +def _stc_src_sel( + src, stc, on_missing="raise", extra=", likely due to forward calculations" +): """Select the vertex indices of a source space using a source estimate.""" if isinstance(stc, list): vertices = stc @@ -1370,14 +1488,16 @@ def _stc_src_sel(src, stc, on_missing='raise', vertices = stc.vertices del stc if not len(src) == len(vertices): - raise RuntimeError('Mismatch between number of source spaces (%s) and ' - 'STC vertices (%s)' % (len(src), len(vertices))) + raise RuntimeError( + f"Mismatch between number of source spaces ({len(src)}) and " + f"STC vertices ({len(vertices)})" + ) src_sels, stc_sels, out_vertices = [], [], [] src_offset = stc_offset = 0 for s, v in zip(src, vertices): - joint_sel = np.intersect1d(s['vertno'], v) - src_sels.append(np.searchsorted(s['vertno'], joint_sel) + src_offset) - src_offset += len(s['vertno']) + joint_sel = np.intersect1d(s["vertno"], v) + src_sels.append(np.searchsorted(s["vertno"], joint_sel) + src_offset) + src_offset += len(s["vertno"]) idx = np.searchsorted(v, joint_sel) stc_sels.append(idx + stc_offset) stc_offset += len(v) @@ -1389,19 +1509,19 @@ def _stc_src_sel(src, stc, on_missing='raise', n_stc = sum(len(v) for v in vertices) n_joint = len(src_sel) if n_joint != n_stc: - msg = ('Only %i of %i SourceEstimate %s found in ' - 'source space%s' - % (n_joint, n_stc, 'vertex' if n_stc == 1 else 'vertices', - extra)) + msg = ( + f"Only {n_joint} of {n_stc} SourceEstimate " + f"{'vertex' if n_stc == 1 else 'vertices'} found in source space{extra}" + ) _on_missing(on_missing, msg) return src_sel, stc_sel, out_vertices def _fill_measurement_info(info, fwd, sfreq, data): """Fill the measurement info of a Raw or Evoked object.""" - sel = pick_channels(info['ch_names'], fwd['sol']['row_names']) + sel = pick_channels(info["ch_names"], fwd["sol"]["row_names"], ordered=False) info = pick_info(info, sel) - info['bads'] = [] + info["bads"] = [] now = time() sec = np.floor(now) @@ -1409,41 +1529,49 @@ def _fill_measurement_info(info, fwd, sfreq, data): # this is probably correct based on what's done in meas_info.py... with info._unlock(check_after=True): - info.update(meas_id=fwd['info']['meas_id'], file_id=info['meas_id'], - meas_date=_stamp_to_dt((int(sec), int(usec))), - highpass=0., lowpass=sfreq / 2., sfreq=sfreq, projs=[]) + info.update( + meas_id=fwd["info"]["meas_id"], + file_id=info["meas_id"], + meas_date=_stamp_to_dt((int(sec), int(usec))), + highpass=0.0, + lowpass=sfreq / 2.0, + sfreq=sfreq, + projs=[], + ) # reorder data (which is in fwd order) to match that of info - order = [fwd['sol']['row_names'].index(name) for name in info['ch_names']] + order = [fwd["sol"]["row_names"].index(name) for name in info["ch_names"]] data = data[order] return info, data @verbose -def _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise', - use_cps=True, verbose=None): +def _apply_forward( + fwd, stc, start=None, stop=None, on_missing="raise", use_cps=True, verbose=None +): """Apply forward model and return data, times, ch_names.""" - _validate_type(stc, _BaseSourceEstimate, 'stc', 'SourceEstimate') - _validate_type(fwd, Forward, 'fwd') + _validate_type(stc, _BaseSourceEstimate, "stc", "SourceEstimate") + _validate_type(fwd, Forward, "fwd") if isinstance(stc, _BaseVectorSourceEstimate): vector = True fwd = convert_forward_solution(fwd, force_fixed=False, surf_ori=False) else: vector = False if not is_fixed_orient(fwd): - fwd = convert_forward_solution(fwd, force_fixed=True, - use_cps=use_cps) + fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=use_cps) if np.all(stc.data > 0): - warn('Source estimate only contains currents with positive values. ' - 'Use pick_ori="normal" when computing the inverse to compute ' - 'currents not current magnitudes.') + warn( + "Source estimate only contains currents with positive values. " + 'Use pick_ori="normal" when computing the inverse to compute ' + "currents not current magnitudes." + ) _check_stc_units(stc) - src_sel, stc_sel, _ = _stc_src_sel(fwd['src'], stc, on_missing=on_missing) - gain = fwd['sol']['data'] + src_sel, stc_sel, _ = _stc_src_sel(fwd["src"], stc, on_missing=on_missing) + gain = fwd["sol"]["data"] stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel times = stc.times[start:stop].copy() stc_data = stc.data[stc_sel, ..., start:stop].reshape(-1, len(times)) @@ -1453,15 +1581,23 @@ def _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise', gain = gain[:, src_sel].reshape(len(gain), -1) # save some memory if possible - logger.info('Projecting source estimate to sensor space...') + logger.info("Projecting source estimate to sensor space...") data = np.dot(gain, stc_data) - logger.info('[done]') + logger.info("[done]") return data, times @verbose -def apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True, - on_missing='raise', verbose=None): +def apply_forward( + fwd, + stc, + info, + start=None, + stop=None, + use_cps=True, + on_missing="raise", + verbose=None, +): """Project source space currents to sensor space using a forward operator. The sensor space data is computed for all channels present in fwd. Use @@ -1502,19 +1638,21 @@ def apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True, -------- apply_forward_raw: Compute sensor space data and return a Raw object. """ - _validate_type(info, Info, 'info') - _validate_type(fwd, Forward, 'forward') + _validate_type(info, Info, "info") + _validate_type(fwd, Forward, "forward") info._check_consistency() # make sure evoked_template contains all channels in fwd - for ch_name in fwd['sol']['row_names']: - if ch_name not in info['ch_names']: - raise ValueError('Channel %s of forward operator not present in ' - 'evoked_template.' % ch_name) + for ch_name in fwd["sol"]["row_names"]: + if ch_name not in info["ch_names"]: + raise ValueError( + f"Channel {ch_name} of forward operator not present in evoked_template." + ) # project the source estimate to the sensor space - data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing, - use_cps=use_cps) + data, times = _apply_forward( + fwd, stc, start, stop, on_missing=on_missing, use_cps=use_cps + ) # fill the measurement info sfreq = float(1.0 / stc.tstep) @@ -1529,8 +1667,16 @@ def apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True, @verbose -def apply_forward_raw(fwd, stc, info, start=None, stop=None, - on_missing='raise', use_cps=True, verbose=None): +def apply_forward_raw( + fwd, + stc, + info, + start=None, + stop=None, + on_missing="raise", + use_cps=True, + verbose=None, +): """Project source space currents to sensor space using a forward operator. The sensor space data is computed for all channels present in fwd. Use @@ -1572,19 +1718,21 @@ def apply_forward_raw(fwd, stc, info, start=None, stop=None, apply_forward: Compute sensor space data and return an Evoked object. """ # make sure info contains all channels in fwd - for ch_name in fwd['sol']['row_names']: - if ch_name not in info['ch_names']: - raise ValueError('Channel %s of forward operator not present in ' - 'info.' % ch_name) + for ch_name in fwd["sol"]["row_names"]: + if ch_name not in info["ch_names"]: + raise ValueError( + f"Channel {ch_name} of forward operator not present in info." + ) # project the source estimate to the sensor space - data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing, - use_cps=use_cps) + data, times = _apply_forward( + fwd, stc, start, stop, on_missing=on_missing, use_cps=use_cps + ) sfreq = 1.0 / stc.tstep info, data = _fill_measurement_info(info, fwd, sfreq, data) with info._unlock(): - info['projs'] = [] + info["projs"] = [] # store sensor data in Raw object using the info raw = RawArray(data, info, first_samp=int(np.round(times[0] * sfreq))) raw._projector = None @@ -1592,7 +1740,7 @@ def apply_forward_raw(fwd, stc, info, start=None, stop=None, @fill_doc -def restrict_forward_to_stc(fwd, stc, on_missing='ignore'): +def restrict_forward_to_stc(fwd, stc, on_missing="ignore"): """Restrict forward operator to active sources in a source estimate. Parameters @@ -1615,9 +1763,9 @@ def restrict_forward_to_stc(fwd, stc, on_missing='ignore'): -------- restrict_forward_to_label """ - _validate_type(on_missing, str, 'on_missing') - _check_option('on_missing', on_missing, ('ignore', 'warn', 'raise')) - src_sel, _, vertices = _stc_src_sel(fwd['src'], stc, on_missing=on_missing) + _validate_type(on_missing, str, "on_missing") + _check_option("on_missing", on_missing, ("ignore", "warn", "raise")) + src_sel, _, _ = _stc_src_sel(fwd["src"], stc, on_missing=on_missing) del stc return _restrict_forward_to_src_sel(fwd, src_sel) @@ -1625,46 +1773,47 @@ def restrict_forward_to_stc(fwd, stc, on_missing='ignore'): def _restrict_forward_to_src_sel(fwd, src_sel): fwd_out = deepcopy(fwd) # figure out the vertno we are keeping - idx_sel = np.concatenate([[[si] * len(s['vertno']), s['vertno']] - for si, s in enumerate(fwd['src'])], axis=-1) + idx_sel = np.concatenate( + [[[si] * len(s["vertno"]), s["vertno"]] for si, s in enumerate(fwd["src"])], + axis=-1, + ) assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2 - assert idx_sel.shape[1] == fwd['nsource'] + assert idx_sel.shape[1] == fwd["nsource"] idx_sel = idx_sel[:, src_sel] - fwd_out['source_rr'] = fwd['source_rr'][src_sel] - fwd_out['nsource'] = len(src_sel) + fwd_out["source_rr"] = fwd["source_rr"][src_sel] + fwd_out["nsource"] = len(src_sel) if is_fixed_orient(fwd): idx = src_sel - if fwd['sol_grad'] is not None: + if fwd["sol_grad"] is not None: idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() else: idx = (3 * src_sel[:, None] + np.arange(3)).ravel() - if fwd['sol_grad'] is not None: + if fwd["sol_grad"] is not None: idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() - fwd_out['source_nn'] = fwd['source_nn'][idx] - fwd_out['sol']['data'] = fwd['sol']['data'][:, idx] - if fwd['sol_grad'] is not None: - fwd_out['sol_grad']['data'] = fwd['sol_grad']['data'][:, idx_grad] - fwd_out['sol']['ncol'] = len(idx) + fwd_out["source_nn"] = fwd["source_nn"][idx] + fwd_out["sol"]["data"] = fwd["sol"]["data"][:, idx] + if fwd["sol_grad"] is not None: + fwd_out["sol_grad"]["data"] = fwd["sol_grad"]["data"][:, idx_grad] + fwd_out["sol"]["ncol"] = len(idx) if is_fixed_orient(fwd, orig=True): idx = src_sel - if fwd['sol_grad'] is not None: + if fwd["sol_grad"] is not None: idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() else: idx = (3 * src_sel[:, None] + np.arange(3)).ravel() - if fwd['sol_grad'] is not None: + if fwd["sol_grad"] is not None: idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() - fwd_out['_orig_sol'] = fwd['_orig_sol'][:, idx] - if fwd['sol_grad'] is not None: - fwd_out['_orig_sol_grad'] = fwd['_orig_sol_grad'][:, idx_grad] + fwd_out["_orig_sol"] = fwd["_orig_sol"][:, idx] + if fwd["sol_grad"] is not None: + fwd_out["_orig_sol_grad"] = fwd["_orig_sol_grad"][:, idx_grad] - vertices = [idx_sel[1][idx_sel[0] == si] - for si in range(len(fwd_out['src']))] - _set_source_space_vertices(fwd_out['src'], vertices) + vertices = [idx_sel[1][idx_sel[0] == si] for si in range(len(fwd_out["src"]))] + _set_source_space_vertices(fwd_out["src"], vertices) return fwd_out @@ -1696,92 +1845,37 @@ def restrict_forward_to_label(fwd, labels): # Get vertices separately of each hemisphere from all label for label in labels: _validate_type(label, Label, "label", "Label or list") - i = 0 if label.hemi == 'lh' else 1 + i = 0 if label.hemi == "lh" else 1 vertices[i] = np.append(vertices[i], label.vertices) # Remove duplicates and sort vertices = [np.unique(vert_hemi) for vert_hemi in vertices] - - fwd_out = deepcopy(fwd) - fwd_out['source_rr'] = np.zeros((0, 3)) - fwd_out['nsource'] = 0 - fwd_out['source_nn'] = np.zeros((0, 3)) - fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0)) - fwd_out['_orig_sol'] = np.zeros((fwd['_orig_sol'].shape[0], 0)) - if fwd['sol_grad'] is not None: - fwd_out['sol_grad']['data'] = np.zeros( - (fwd['sol_grad']['data'].shape[0], 0)) - fwd_out['_orig_sol_grad'] = np.zeros( - (fwd['_orig_sol_grad'].shape[0], 0)) - fwd_out['sol']['ncol'] = 0 - nuse_lh = fwd['src'][0]['nuse'] - - for i in range(2): - fwd_out['src'][i]['vertno'] = np.array([], int) - fwd_out['src'][i]['nuse'] = 0 - fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy() - fwd_out['src'][i]['inuse'].fill(0) - fwd_out['src'][i]['use_tris'] = np.array([[]], int) - fwd_out['src'][i]['nuse_tri'] = np.array([0]) - - # src_sel is idx to cols in fwd that are in any label per hemi - src_sel = np.intersect1d(fwd['src'][i]['vertno'], vertices[i]) - src_sel = np.searchsorted(fwd['src'][i]['vertno'], src_sel) - - # Reconstruct each src - vertno = fwd['src'][i]['vertno'][src_sel] - fwd_out['src'][i]['inuse'][vertno] = 1 - fwd_out['src'][i]['nuse'] += len(vertno) - fwd_out['src'][i]['vertno'] = np.where(fwd_out['src'][i]['inuse'])[0] - - # Reconstruct part of fwd that is not sol data - src_sel += i * nuse_lh # Add column shift to right hemi - fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'], - fwd['source_rr'][src_sel]]) - fwd_out['nsource'] += len(src_sel) - - if is_fixed_orient(fwd): - idx = src_sel - if fwd['sol_grad'] is not None: - idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() - else: - idx = (3 * src_sel[:, None] + np.arange(3)).ravel() - if fwd['sol_grad'] is not None: - idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() - - fwd_out['source_nn'] = np.vstack( - [fwd_out['source_nn'], fwd['source_nn'][idx]]) - fwd_out['sol']['data'] = np.hstack( - [fwd_out['sol']['data'], fwd['sol']['data'][:, idx]]) - if fwd['sol_grad'] is not None: - fwd_out['sol_grad']['data'] = np.hstack( - [fwd_out['sol_grad']['data'], - fwd['sol_rad']['data'][:, idx_grad]]) - fwd_out['sol']['ncol'] += len(idx) - - if is_fixed_orient(fwd, orig=True): - idx = src_sel - if fwd['sol_grad'] is not None: - idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() - else: - idx = (3 * src_sel[:, None] + np.arange(3)).ravel() - if fwd['sol_grad'] is not None: - idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() - - fwd_out['_orig_sol'] = np.hstack( - [fwd_out['_orig_sol'], fwd['_orig_sol'][:, idx]]) - if fwd['sol_grad'] is not None: - fwd_out['_orig_sol_grad'] = np.hstack( - [fwd_out['_orig_sol_grad'], - fwd['_orig_sol_grad'][:, idx_grad]]) - - return fwd_out + vertices = [ + vert_hemi[np.isin(vert_hemi, s["vertno"])] + for vert_hemi, s in zip(vertices, fwd["src"]) + ] + src_sel, _, _ = _stc_src_sel(fwd["src"], vertices, on_missing="raise") + return _restrict_forward_to_src_sel(fwd, src_sel) -def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, - mindist=None, bem=None, mri=None, trans=None, - eeg=True, meg=True, fixed=False, grad=False, - mricoord=False, overwrite=False, subjects_dir=None, - verbose=None): +def _do_forward_solution( + subject, + meas, + fname=None, + src=None, + spacing=None, + mindist=None, + bem=None, + mri=None, + trans=None, + eeg=True, + meg=True, + fixed=False, + grad=False, + mricoord=False, + overwrite=False, + subjects_dir=None, + verbose=None, +): """Calculate a forward solution for a subject using MNE-C routines. This is kept around for testing purposes. @@ -1847,7 +1941,7 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, The generated forward solution. """ if not has_mne_c(): - raise RuntimeError('mne command line tools could not be found') + raise RuntimeError("mne command line tools could not be found") # check for file existence temp_dir = Path(tempfile.mkdtemp()) @@ -1857,9 +1951,9 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, _validate_type(subject, "str", "subject") # check for meas to exist as string, or try to make evoked - _validate_type(meas, ('path-like', BaseRaw, BaseEpochs, Evoked), 'meas') - if isinstance(meas, (BaseRaw, BaseEpochs, Evoked)): - meas_file = op.join(temp_dir, 'info.fif') + _validate_type(meas, ("path-like", BaseRaw, BaseEpochs, Evoked), "meas") + if isinstance(meas, BaseRaw | BaseEpochs | Evoked): + meas_file = op.join(temp_dir, "info.fif") write_info(meas_file, meas.info) meas = meas_file else: @@ -1867,11 +1961,11 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, # deal with trans/mri if mri is not None and trans is not None: - raise ValueError('trans and mri cannot both be specified') + raise ValueError("trans and mri cannot both be specified") if mri is None and trans is None: # MNE allows this to default to a trans/mri in the subject's dir, # but let's be safe here and force the user to pass us a trans/mri - raise ValueError('Either trans or mri must be specified') + raise ValueError("Either trans or mri must be specified") if trans is not None: if isinstance(trans, dict): @@ -1880,9 +1974,11 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, try: write_trans(trans, trans_data) except Exception: - raise IOError('trans was a dict, but could not be ' - 'written to disk as a transform file') - elif isinstance(trans, (str, Path, PathLike)): + raise OSError( + "trans was a dict, but could not be " + "written to disk as a transform file" + ) + elif isinstance(trans, str | Path | PathLike): _check_fname(trans, "read", must_exist=True, name="trans") trans = Path(trans) else: @@ -1894,9 +1990,11 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, try: write_trans(mri, mri_data) except Exception: - raise IOError('mri was a dict, but could not be ' - 'written to disk as a transform file') - elif isinstance(mri, (str, Path, PathLike)): + raise OSError( + "mri was a dict, but could not be " + "written to disk as a transform file" + ) + elif isinstance(mri, str | Path | PathLike): _check_fname(mri, "read", must_exist=True, name="mri") mri = Path(mri) else: @@ -1904,37 +2002,45 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, # deal with meg/eeg if not meg and not eeg: - raise ValueError('meg or eeg (or both) must be True') + raise ValueError("meg or eeg (or both) must be True") if not fname.suffix == ".fif": - raise ValueError('Forward name does not end with .fif') + raise ValueError("Forward name does not end with .fif") path = fname.parent.absolute() fname = fname.name # deal with mindist if mindist is not None: if isinstance(mindist, str): - if not mindist.lower() == 'all': + if not mindist.lower() == "all": raise ValueError('mindist, if string, must be "all"') - mindist = ['--all'] + mindist = ["--all"] else: - mindist = ['--mindist', '%g' % mindist] + mindist = ["--mindist", f"{mindist:g}"] # src, spacing, bem - for element, name, kind in zip((src, spacing, bem), - ("src", "spacing", "bem"), - ('path-like', 'str', 'path-like')): + for element, name, kind in zip( + (src, spacing, bem), + ("src", "spacing", "bem"), + ("path-like", "str", "path-like"), + ): if element is not None: - _validate_type(element, kind, name, "%s or None" % kind) + _validate_type(element, kind, name, f"{kind} or None") # put together the actual call - cmd = ['mne_do_forward_solution', - '--subject', subject, - '--meas', meas, - '--fwd', fname, - '--destdir', str(path)] + cmd = [ + "mne_do_forward_solution", + "--subject", + subject, + "--meas", + meas, + "--fwd", + fname, + "--destdir", + str(path), + ] if src is not None: - cmd += ['--src', src] + cmd += ["--src", src] if spacing is not None: if spacing.isdigit(): pass # spacing in mm @@ -1942,37 +2048,39 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, # allow both "ico4" and "ico-4" style values match = re.match(r"(oct|ico)-?(\d+)$", spacing) if match is None: - raise ValueError("Invalid spacing parameter: %r" % spacing) - spacing = '-'.join(match.groups()) - cmd += ['--spacing', spacing] + raise ValueError(f"Invalid spacing parameter: {spacing!r}") + spacing = "-".join(match.groups()) + cmd += ["--spacing", spacing] if mindist is not None: cmd += mindist if bem is not None: - cmd += ['--bem', bem] + cmd += ["--bem", bem] if mri is not None: - cmd += ['--mri', '%s' % str(mri.absolute())] + cmd += ["--mri", f"{mri.absolute()}"] if trans is not None: - cmd += ['--trans', '%s' % str(trans.absolute())] + cmd += ["--trans", f"{trans.absolute()}"] if not meg: - cmd.append('--eegonly') + cmd.append("--eegonly") if not eeg: - cmd.append('--megonly') + cmd.append("--megonly") if fixed: - cmd.append('--fixed') + cmd.append("--fixed") if grad: - cmd.append('--grad') + cmd.append("--grad") if mricoord: - cmd.append('--mricoord') + cmd.append("--mricoord") if overwrite: - cmd.append('--overwrite') + cmd.append("--overwrite") env = os.environ.copy() subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) - env['SUBJECTS_DIR'] = subjects_dir + env["SUBJECTS_DIR"] = subjects_dir try: - logger.info('Running forward solution generation command with ' - 'subjects_dir %s' % subjects_dir) + logger.info( + "Running forward solution generation command with " + f"subjects_dir {subjects_dir}" + ) run_subprocess(cmd, env=env) except Exception: raise @@ -2006,19 +2114,19 @@ def average_forward_solutions(fwds, weights=None, verbose=None): # check for fwds being a list _validate_type(fwds, list, "fwds") if not len(fwds) > 0: - raise ValueError('fwds must not be empty') + raise ValueError("fwds must not be empty") # check weights if weights is None: weights = np.ones(len(fwds)) weights = np.asanyarray(weights) # in case it's a list, convert it if not np.all(weights >= 0): - raise ValueError('weights must be non-negative') + raise ValueError("weights must be non-negative") if not len(weights) == len(fwds): - raise ValueError('weights must be None or the same length as fwds') + raise ValueError("weights must be None or the same length as fwds") w_sum = np.sum(weights) if not w_sum > 0: - raise ValueError('weights cannot all be zero') + raise ValueError("weights cannot all be zero") weights /= w_sum # check our forward solutions @@ -2026,32 +2134,49 @@ def average_forward_solutions(fwds, weights=None, verbose=None): # check to make sure it's a forward solution _validate_type(fwd, dict, "each entry in fwds", "dict") # check to make sure the dict is actually a fwd - check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol', - 'source_rr', 'source_ori', 'surf_ori', 'coord_frame', - 'mri_head_t', 'nsource'] + check_keys = [ + "info", + "sol_grad", + "nchan", + "src", + "source_nn", + "sol", + "source_rr", + "source_ori", + "surf_ori", + "coord_frame", + "mri_head_t", + "nsource", + ] if not all(key in fwd for key in check_keys): - raise KeyError('forward solution dict does not have all standard ' - 'entries, cannot compute average.') + raise KeyError( + "forward solution dict does not have all standard " + "entries, cannot compute average." + ) # check forward solution compatibility - if any(fwd['sol'][k] != fwds[0]['sol'][k] - for fwd in fwds[1:] for k in ['nrow', 'ncol']): - raise ValueError('Forward solutions have incompatible dimensions') - if any(fwd[k] != fwds[0][k] for fwd in fwds[1:] - for k in ['source_ori', 'surf_ori', 'coord_frame']): - raise ValueError('Forward solutions have incompatible orientations') + if any( + fwd["sol"][k] != fwds[0]["sol"][k] for fwd in fwds[1:] for k in ["nrow", "ncol"] + ): + raise ValueError("Forward solutions have incompatible dimensions") + if any( + fwd[k] != fwds[0][k] + for fwd in fwds[1:] + for k in ["source_ori", "surf_ori", "coord_frame"] + ): + raise ValueError("Forward solutions have incompatible orientations") # actually average them (solutions and gradients) fwd_ave = deepcopy(fwds[0]) - fwd_ave['sol']['data'] *= weights[0] - fwd_ave['_orig_sol'] *= weights[0] + fwd_ave["sol"]["data"] *= weights[0] + fwd_ave["_orig_sol"] *= weights[0] for fwd, w in zip(fwds[1:], weights[1:]): - fwd_ave['sol']['data'] += w * fwd['sol']['data'] - fwd_ave['_orig_sol'] += w * fwd['_orig_sol'] - if fwd_ave['sol_grad'] is not None: - fwd_ave['sol_grad']['data'] *= weights[0] - fwd_ave['_orig_sol_grad'] *= weights[0] + fwd_ave["sol"]["data"] += w * fwd["sol"]["data"] + fwd_ave["_orig_sol"] += w * fwd["_orig_sol"] + if fwd_ave["sol_grad"] is not None: + fwd_ave["sol_grad"]["data"] *= weights[0] + fwd_ave["_orig_sol_grad"] *= weights[0] for fwd, w in zip(fwds[1:], weights[1:]): - fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data'] - fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad'] + fwd_ave["sol_grad"]["data"] += w * fwd["sol_grad"]["data"] + fwd_ave["_orig_sol_grad"] += w * fwd["_orig_sol_grad"] return fwd_ave diff --git a/mne/forward/tests/__init__.py b/mne/forward/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/forward/tests/__init__.py +++ b/mne/forward/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py index 9adf3915870..57b204d97af 100644 --- a/mne/forward/tests/test_field_interpolation.py +++ b/mne/forward/tests/test_field_interpolation.py @@ -1,35 +1,44 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from os import path as op from pathlib import Path import numpy as np +import pytest from numpy.polynomial import legendre -from numpy.testing import (assert_allclose, assert_array_equal, assert_equal, - assert_array_almost_equal) +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) from scipy.interpolate import interp1d -import pytest - import mne +from mne import Epochs, make_fixed_length_events, pick_types, read_evokeds +from mne.datasets import testing from mne.forward import _make_surface_mapping, make_field_map -from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg, - _get_legen_table, _do_cross_dots) -from mne.forward._make_forward import _create_meg_coils from mne.forward._field_interpolation import _setup_dots -from mne.surface import get_meg_helmet_surf, get_head_surf -from mne.datasets import testing -from mne import read_evokeds, pick_types, make_fixed_length_events, Epochs +from mne.forward._lead_dots import ( + _comp_sum_eeg, + _comp_sums_meg, + _do_cross_dots, + _get_legen_table, +) +from mne.forward._make_forward import _create_meg_coils from mne.io import read_raw_fif +from mne.surface import get_head_surf, get_meg_helmet_surf - -base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') -raw_fname = op.join(base_dir, 'test_raw.fif') -evoked_fname = op.join(base_dir, 'test-ave.fif') -raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif') +base_dir = op.join(op.dirname(__file__), "..", "..", "io", "tests", "data") +raw_fname = op.join(base_dir, "test_raw.fif") +evoked_fname = op.join(base_dir, "test-ave.fif") +raw_ctf_fname = op.join(base_dir, "test_ctf_raw.fif") data_path = testing.data_path(download=False) -trans_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc-trans.fif') -subjects_dir = op.join(data_path, 'subjects') +trans_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc-trans.fif") +subjects_dir = op.join(data_path, "subjects") @testing.requires_testing_data @@ -39,31 +48,38 @@ def test_field_map_ctf(): raw.apply_gradient_compensation(3) events = make_fixed_length_events(raw, duration=0.5) evoked = Epochs(raw, events).average() - evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster + evoked.pick(evoked.ch_names[:50]) # crappy mapping but faster # smoke test - passing trans_fname as pathlib.Path as additional check - make_field_map(evoked, trans=Path(trans_fname), subject='sample', - subjects_dir=subjects_dir) + # set origin to "(0.0, 0.0, 0.04)", which was the default until v1.12 + # estimating origin from "auto" impossible due to missing digitization points + make_field_map( + evoked, + trans=Path(trans_fname), + subject="sample", + subjects_dir=subjects_dir, + origin=(0.0, 0.0, 0.04), + ) def test_legendre_val(): """Test Legendre polynomial (derivative) equivalence.""" rng = np.random.RandomState(0) # check table equiv - xs = np.linspace(-1., 1., 1000) + xs = np.linspace(-1.0, 1.0, 1000) n_terms = 100 # True, numpy vals_np = legendre.legvander(xs, n_terms - 1) # Table approximation - for nc, interp in zip([100, 50], ['nearest', 'linear']): - lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True) - lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp, - axis=0) + for nc, interp in zip([100, 50], ["nearest", "linear"]): + lut, n_fact = _get_legen_table("eeg", n_coeff=nc, force_calc=True) + lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp, axis=0) vals_i = lut_fun(xs) # Need a "1:" here because we omit the first coefficient in our table! - assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i, - rtol=1e-2, atol=5e-3) + assert_allclose( + vals_np[:, 1 : vals_i.shape[1] + 1], vals_i, rtol=1e-2, atol=5e-3 + ) # Now let's look at our sums ctheta = rng.rand(20, 30) * 2.0 - 1.0 @@ -74,24 +90,27 @@ def test_legendre_val(): # compare to numpy n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis] coeffs = np.zeros((n_terms,) + beta.shape) - coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) * - (2.0 * n + 1.0) * (2.0 * n + 1.0) / n) + coeffs[1:] = ( + np.cumprod([beta] * (n_terms - 1), axis=0) + * (2.0 * n + 1.0) + * (2.0 * n + 1.0) + / n + ) # can't use tensor=False here b/c it isn't in old numpy c2 = np.empty((20, 30)) for ci1 in range(20): for ci2 in range(30): - c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2], - coeffs[:, ci1, ci2]) + c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2], coeffs[:, ci1, ci2]) assert_allclose(c1, c2, 1e-2, 1e-3) # close enough... # compare fast and slow for MEG ctheta = rng.rand(20 * 30) * 2.0 - 1.0 beta = rng.rand(20 * 30) * 0.8 - lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True) - fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0) + lut, n_fact = _get_legen_table("meg", n_coeff=10, force_calc=True) + fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, "nearest", axis=0) coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False) - lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True) - fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0) + lut, n_fact = _get_legen_table("meg", n_coeff=20, force_calc=True) + fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, "linear", axis=0) coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False) @@ -99,10 +118,10 @@ def test_legendre_table(): """Test Legendre table calculation.""" # double-check our table generation n = 10 - for ch_type in ['eeg', 'meg']: + for ch_type in ["eeg", "meg"]: lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True) - lut1 = lut1[:, :n - 1].copy() - n_fact1 = n_fact1[:n - 1].copy() + lut1 = lut1[:, : n - 1].copy() + n_fact1 = n_fact1[: n - 1].copy() lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True) assert_allclose(lut1, lut2) assert_allclose(n_fact1, n_fact2) @@ -111,77 +130,104 @@ def test_legendre_table(): @testing.requires_testing_data def test_make_field_map_eeg(): """Test interpolation of EEG field onto head.""" - evoked = read_evokeds(evoked_fname, condition='Left Auditory') - evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads - surf = get_head_surf('sample', subjects_dir=subjects_dir) + evoked = read_evokeds(evoked_fname, condition="Left Auditory") + evoked.info["bads"] = ["MEG 2443", "EEG 053"] # add some bads + surf = get_head_surf("sample", subjects_dir=subjects_dir) # we must have trans if surface is in MRI coords - pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg') + pytest.raises( + ValueError, _make_surface_mapping, evoked.info, surf, "eeg", origin="auto" + ) - evoked.pick_types(meg=False, eeg=True) - fmd = make_field_map(evoked, trans_fname, - subject='sample', subjects_dir=subjects_dir) + evoked.pick(picks="eeg") + fmd = make_field_map( + evoked, trans_fname, subject="sample", subjects_dir=subjects_dir, origin="auto" + ) # trans is necessary for EEG only - pytest.raises(RuntimeError, make_field_map, evoked, None, - subject='sample', subjects_dir=subjects_dir) - - fmd = make_field_map(evoked, trans_fname, - subject='sample', subjects_dir=subjects_dir) + pytest.raises( + RuntimeError, + make_field_map, + evoked, + None, + subject="sample", + subjects_dir=subjects_dir, + origin="auto", + ) + + fmd = make_field_map( + evoked, trans_fname, subject="sample", subjects_dir=subjects_dir, origin="auto" + ) assert len(fmd) == 1 - assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf - assert len(fmd[0]['ch_names']) == 59 + assert_array_equal(fmd[0]["data"].shape, (642, 59)) # maps data onto surf + assert len(fmd[0]["ch_names"]) == 59 @testing.requires_testing_data @pytest.mark.slowtest def test_make_field_map_meg(): """Test interpolation of MEG field onto helmet | head.""" - evoked = read_evokeds(evoked_fname, condition='Left Auditory') + evoked = read_evokeds(evoked_fname, condition="Left Auditory") info = evoked.info surf = get_meg_helmet_surf(info) # let's reduce the number of channels by a bunch to speed it up - info['bads'] = info['ch_names'][:200] + info["bads"] = info["ch_names"][:200] # bad ch_type - pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo') + pytest.raises(ValueError, _make_surface_mapping, info, surf, "foo", origin="auto") # bad mode - pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg', - mode='foo') + pytest.raises( + ValueError, _make_surface_mapping, info, surf, "meg", mode="foo", origin="auto" + ) # no picks - evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True) - pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info, - surf, 'meg') + evoked_eeg = evoked.copy().pick(picks="eeg") + pytest.raises( + RuntimeError, _make_surface_mapping, evoked_eeg.info, surf, "meg", origin="auto" + ) # bad surface def - nn = surf['nn'] - del surf['nn'] - pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg') - surf['nn'] = nn - cf = surf['coord_frame'] - del surf['coord_frame'] - pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg') - surf['coord_frame'] = cf + nn = surf["nn"] + del surf["nn"] + pytest.raises(KeyError, _make_surface_mapping, info, surf, "meg", origin="auto") + surf["nn"] = nn + cf = surf["coord_frame"] + del surf["coord_frame"] + pytest.raises(KeyError, _make_surface_mapping, info, surf, "meg", origin="auto") + surf["coord_frame"] = cf # now do it with make_field_map - evoked.pick_types(meg=True, eeg=False) + evoked.pick(picks="meg") evoked.info.normalize_proj() # avoid projection warnings - fmd = make_field_map(evoked, None, - subject='sample', subjects_dir=subjects_dir) - assert (len(fmd) == 1) - assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf - assert len(fmd[0]['ch_names']) == 106 + fmd = make_field_map( + evoked, None, subject="sample", subjects_dir=subjects_dir, origin="auto" + ) + assert len(fmd) == 1 + assert_array_equal(fmd[0]["data"].shape, (304, 106)) # maps data onto surf + assert len(fmd[0]["ch_names"]) == 106 - pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar') + pytest.raises(ValueError, make_field_map, evoked, ch_type="foobar", origin="auto") # now test the make_field_map on head surf for MEG - evoked.pick_types(meg=True, eeg=False) + evoked.pick(picks="meg") evoked.info.normalize_proj() - fmd = make_field_map(evoked, trans_fname, meg_surf='head', - subject='sample', subjects_dir=subjects_dir) + fmd = make_field_map( + evoked, + trans_fname, + meg_surf="head", + subject="sample", + subjects_dir=subjects_dir, + origin="auto", + ) assert len(fmd) == 1 - assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf - assert len(fmd[0]['ch_names']) == 106 + assert_array_equal(fmd[0]["data"].shape, (642, 106)) # maps data onto surf + assert len(fmd[0]["ch_names"]) == 106 - pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar', - subjects_dir=subjects_dir, trans=trans_fname) + pytest.raises( + ValueError, + make_field_map, + evoked, + meg_surf="foobar", + subjects_dir=subjects_dir, + trans=trans_fname, + origin="auto", + ) @testing.requires_testing_data @@ -190,33 +236,56 @@ def test_make_field_map_meeg(): evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0] picks = pick_types(evoked.info, meg=True, eeg=True) picks = picks[::10] - evoked.pick_channels([evoked.ch_names[p] for p in picks]) + evoked.pick([evoked.ch_names[p] for p in picks]) evoked.info.normalize_proj() - maps = make_field_map(evoked, trans_fname, subject='sample', - subjects_dir=subjects_dir, verbose='debug') - assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head - assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet + # set origin to "(0.0, 0.0, 0.04)", which was the default until v1.12 + # estimated origin from "auto" fails the assertions below + maps = make_field_map( + evoked, + trans_fname, + subject="sample", + subjects_dir=subjects_dir, + verbose="debug", + origin=(0.0, 0.0, 0.04), + ) + assert_equal(maps[0]["data"].shape, (642, 6)) # EEG->Head + assert_equal(maps[1]["data"].shape, (304, 31)) # MEG->Helmet # reasonable ranges maxs = (1.2, 2.0) # before #4418, was (1.1, 2.0) mins = (-0.8, -1.3) # before #4418, was (-0.6, -1.2) assert_equal(len(maxs), len(maps)) for map_, max_, min_ in zip(maps, maxs, mins): - assert_allclose(map_['data'].max(), max_, rtol=5e-2) - assert_allclose(map_['data'].min(), min_, rtol=5e-2) + assert_allclose(map_["data"].max(), max_, rtol=5e-2) + assert_allclose(map_["data"].min(), min_, rtol=5e-2) # calculated from correct looking mapping on 2015/12/26 - assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903, # 16.6088, - atol=1e-3, rtol=1e-3) - assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748, # 20.1245, - atol=1e-3, rtol=1e-3) + assert_allclose( + np.sqrt(np.sum(maps[0]["data"] ** 2)), + 19.0903, + atol=1e-3, + rtol=1e-3, + ) + assert_allclose( + np.sqrt(np.sum(maps[1]["data"] ** 2)), + 19.4748, + atol=1e-3, + rtol=1e-3, + ) def _setup_args(info): """Configure args for test_as_meg_type_evoked.""" - coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t']) - int_rad, _, lut_fun, n_fact = _setup_dots('fast', info, coils, 'meg') - my_origin = np.array([0., 0., 0.04]) - args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin, - ch_type='meg', lut=lut_fun, n_fact=n_fact) + coils = _create_meg_coils(info["chs"], "normal", info["dev_head_t"]) + int_rad, _, lut_fun, n_fact = _setup_dots("fast", info, coils, "meg") + my_origin = np.array([0.0, 0.0, 0.04]) + args_dict = dict( + intrad=int_rad, + volume=False, + coils1=coils, + r0=my_origin, + ch_type="meg", + lut=lut_fun, + n_fact=n_fact, + ) return args_dict @@ -226,34 +295,41 @@ def test_as_meg_type_evoked(): # validation tests raw = read_raw_fif(raw_fname) events = mne.find_events(raw) - picks = pick_types(raw.info, meg=True, eeg=True, stim=True, - ecg=True, eog=True, include=['STI 014'], - exclude='bads') + picks = pick_types( + raw.info, + meg=True, + eeg=True, + stim=True, + ecg=True, + eog=True, + include=["STI 014"], + exclude="bads", + ) epochs = mne.Epochs(raw, events, picks=picks) evoked = epochs.average() with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"): - evoked.as_type('meg') + evoked.as_type("meg") with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"): - evoked.copy().pick_types(meg='grad').as_type('meg') + evoked.copy().pick(picks="grad").as_type("meg") # channel names - ch_names = evoked.info['ch_names'] - virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1]) + ch_names = evoked.info["ch_names"] + virt_evoked = evoked.copy().pick(ch_names[:10:1]) virt_evoked.info.normalize_proj() - virt_evoked = virt_evoked.as_type('mag') - assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names'])) + virt_evoked = virt_evoked.as_type("mag") + assert all(ch.endswith("_v") for ch in virt_evoked.info["ch_names"]) # pick from and to channels - evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3]) - evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3]) + evoked_from = evoked.copy().pick(ch_names[2:10:3]) + evoked_to = evoked.copy().pick(ch_names[0:10:3]) info_from, info_to = evoked_from.info, evoked_to.info # set up things args1, args2 = _setup_args(info_from), _setup_args(info_to) - args1.update(coils2=args2['coils1']) - args2.update(coils2=args1['coils1']) + args1.update(coils2=args2["coils1"]) + args2.update(coils2=args1["coils1"]) # test cross dots cross_dots1 = _do_cross_dots(**args1) @@ -262,15 +338,14 @@ def test_as_meg_type_evoked(): assert_array_almost_equal(cross_dots1, cross_dots2.T) # correlation test - evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy() - data1 = evoked.pick_types(meg='grad').data.ravel() - data2 = evoked.as_type('grad').data.ravel() - assert (np.corrcoef(data1, data2)[0, 1] > 0.95) + evoked = evoked.pick(ch_names[:10:]).copy() + data1 = evoked.pick("grad").data.ravel() + data2 = evoked.as_type("grad").data.ravel() + assert np.corrcoef(data1, data2)[0, 1] > 0.95 # Do it with epochs - virt_epochs = \ - epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1]) + virt_epochs = epochs.copy().load_data().pick(ch_names[:10:1]) virt_epochs.info.normalize_proj() - virt_epochs = virt_epochs.as_type('mag') - assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names'])) - assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data) + virt_epochs = virt_epochs.as_type("mag") + assert all(ch.endswith("_v") for ch in virt_epochs.info["ch_names"]) + assert_allclose(virt_epochs.get_data(copy=False).mean(0), virt_evoked.data) diff --git a/mne/forward/tests/test_forward.py b/mne/forward/tests/test_forward.py index ff244d9e0bf..3918ee37f18 100644 --- a/mne/forward/tests/test_forward.py +++ b/mne/forward/tests/test_forward.py @@ -1,59 +1,71 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import gc from pathlib import Path -import pytest import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_equal, - assert_array_equal, assert_allclose) +import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) +from mne import ( + SourceEstimate, + VectorSourceEstimate, + apply_forward, + apply_forward_raw, + average_forward_solutions, + convert_forward_solution, + pick_types_forward, + read_evokeds, + read_forward_solution, + write_forward_solution, +) +from mne._fiff.pick import pick_channels_forward +from mne.channels import equalize_channels from mne.datasets import testing -from mne import (read_forward_solution, apply_forward, apply_forward_raw, - average_forward_solutions, write_forward_solution, - convert_forward_solution, SourceEstimate, pick_types_forward, - read_evokeds, VectorSourceEstimate) +from mne.forward import ( + Forward, + compute_depth_prior, + compute_orient_prior, + is_fixed_orient, + restrict_forward_to_label, + restrict_forward_to_stc, +) from mne.io import read_info from mne.label import read_label -from mne.utils import requires_mne, run_subprocess -from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label, - Forward, is_fixed_orient, compute_orient_prior, - compute_depth_prior) -from mne.channels import equalize_channels +from mne.utils import _record_warnings, requires_mne, run_subprocess data_path = testing.data_path(download=False) -fname_meeg = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" -) +fname_meeg = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" fname_meeg_grad = ( - data_path - / "MEG" - / "sample" - / "sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif" -) -fname_evoked = ( - Path(__file__).parent.parent.parent - / "io" - / "tests" - / "data" - / "test-ave.fif" + data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif" ) +fname_evoked = Path(__file__).parents[2] / "io" / "tests" / "data" / "test-ave.fif" +label_path = data_path / "MEG" / "sample" / "labels" def assert_forward_allclose(f1, f2, rtol=1e-7): """Compare two potentially converted forward solutions.""" - assert_allclose(f1['sol']['data'], f2['sol']['data'], rtol=rtol) - assert f1['sol']['ncol'] == f2['sol']['ncol'] - assert f1['sol']['ncol'] == f1['sol']['data'].shape[1] - assert_allclose(f1['source_nn'], f2['source_nn'], rtol=rtol) - if f1['sol_grad'] is not None: - assert (f2['sol_grad'] is not None) - assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data']) - assert f1['sol_grad']['ncol'] == f2['sol_grad']['ncol'] - assert f1['sol_grad']['ncol'] == f1['sol_grad']['data'].shape[1] + assert_allclose(f1["sol"]["data"], f2["sol"]["data"], rtol=rtol) + assert f1["sol"]["ncol"] == f2["sol"]["ncol"] + assert f1["sol"]["ncol"] == f1["sol"]["data"].shape[1] + assert_allclose(f1["source_nn"], f2["source_nn"], rtol=rtol) + if f1["sol_grad"] is not None: + assert f2["sol_grad"] is not None + assert_allclose(f1["sol_grad"]["data"], f2["sol_grad"]["data"]) + assert f1["sol_grad"]["ncol"] == f2["sol_grad"]["ncol"] + assert f1["sol_grad"]["ncol"] == f1["sol_grad"]["data"].shape[1] else: - assert (f2['sol_grad'] is None) - assert f1['source_ori'] == f2['source_ori'] - assert f1['surf_ori'] == f2['surf_ori'] - assert f1['src'][0]['coord_frame'] == f1['src'][0]['coord_frame'] + assert f2["sol_grad"] is None + assert f1["source_ori"] == f2["source_ori"] + assert f1["surf_ori"] == f2["surf_ori"] + assert f1["src"][0]["coord_frame"] == f1["src"][0]["coord_frame"] @testing.requires_testing_data @@ -61,33 +73,33 @@ def test_convert_forward(): """Test converting forward solution between different representations.""" fwd = read_forward_solution(fname_meeg_grad) fwd_repr = repr(fwd) - assert ('306' in fwd_repr) - assert ('60' in fwd_repr) - assert (fwd_repr) - assert (isinstance(fwd, Forward)) + assert "306" in fwd_repr + assert "60" in fwd_repr + assert fwd_repr + assert isinstance(fwd, Forward) # look at surface orientation fwd_surf = convert_forward_solution(fwd, surf_ori=True) # go back fwd_new = convert_forward_solution(fwd_surf, surf_ori=False) - assert (repr(fwd_new)) - assert (isinstance(fwd_new, Forward)) + assert repr(fwd_new) + assert isinstance(fwd_new, Forward) assert_forward_allclose(fwd, fwd_new) del fwd_new gc.collect() # now go to fixed - fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=True, - force_fixed=True, use_cps=False) + fwd_fixed = convert_forward_solution( + fwd_surf, surf_ori=True, force_fixed=True, use_cps=False + ) del fwd_surf gc.collect() - assert (repr(fwd_fixed)) - assert (isinstance(fwd_fixed, Forward)) - assert (is_fixed_orient(fwd_fixed)) + assert repr(fwd_fixed) + assert isinstance(fwd_fixed, Forward) + assert is_fixed_orient(fwd_fixed) # now go back to cartesian (original condition) - fwd_new = convert_forward_solution(fwd_fixed, surf_ori=False, - force_fixed=False) - assert (repr(fwd_new)) - assert (isinstance(fwd_new, Forward)) + fwd_new = convert_forward_solution(fwd_fixed, surf_ori=False, force_fixed=False) + assert repr(fwd_new) + assert isinstance(fwd_new, Forward) assert_forward_allclose(fwd, fwd_new) del fwd, fwd_new, fwd_fixed gc.collect() @@ -100,86 +112,86 @@ def test_io_forward(tmp_path): # do extensive tests with MEEG + grad n_channels, n_src = 366, 108 fwd = read_forward_solution(fname_meeg_grad) - assert (isinstance(fwd, Forward)) + assert isinstance(fwd, Forward) fwd = read_forward_solution(fname_meeg_grad) fwd = convert_forward_solution(fwd, surf_ori=True) - leadfield = fwd['sol']['data'] + leadfield = fwd["sol"]["data"] assert_equal(leadfield.shape, (n_channels, n_src)) - assert_equal(len(fwd['sol']['row_names']), n_channels) - fname_temp = tmp_path / 'test-fwd.fif' - with pytest.warns(RuntimeWarning, match='stored on disk'): + assert_equal(len(fwd["sol"]["row_names"]), n_channels) + fname_temp = tmp_path / "test-fwd.fif" + with pytest.warns(RuntimeWarning, match="stored on disk"): write_forward_solution(fname_temp, fwd, overwrite=True) fwd = read_forward_solution(fname_meeg_grad) fwd = convert_forward_solution(fwd, surf_ori=True) fwd_read = read_forward_solution(fname_temp) fwd_read = convert_forward_solution(fwd_read, surf_ori=True) - leadfield = fwd_read['sol']['data'] + leadfield = fwd_read["sol"]["data"] assert_equal(leadfield.shape, (n_channels, n_src)) - assert_equal(len(fwd_read['sol']['row_names']), n_channels) - assert_equal(len(fwd_read['info']['chs']), n_channels) - assert ('dev_head_t' in fwd_read['info']) - assert ('mri_head_t' in fwd_read) - assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data']) + assert_equal(len(fwd_read["sol"]["row_names"]), n_channels) + assert_equal(len(fwd_read["info"]["chs"]), n_channels) + assert "dev_head_t" in fwd_read["info"] + assert "mri_head_t" in fwd_read + assert_array_almost_equal(fwd["sol"]["data"], fwd_read["sol"]["data"]) fwd = read_forward_solution(fname_meeg) - fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, - use_cps=False) - with pytest.warns(RuntimeWarning, match='stored on disk'): + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=False) + with pytest.warns(RuntimeWarning, match="stored on disk"): write_forward_solution(fname_temp, fwd, overwrite=True) fwd_read = read_forward_solution(fname_temp) - fwd_read = convert_forward_solution(fwd_read, surf_ori=True, - force_fixed=True, use_cps=False) - assert (repr(fwd_read)) - assert (isinstance(fwd_read, Forward)) - assert (is_fixed_orient(fwd_read)) + fwd_read = convert_forward_solution( + fwd_read, surf_ori=True, force_fixed=True, use_cps=False + ) + assert repr(fwd_read) + assert isinstance(fwd_read, Forward) + assert is_fixed_orient(fwd_read) assert_forward_allclose(fwd, fwd_read) - fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, - use_cps=True) - leadfield = fwd['sol']['data'] + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True) + leadfield = fwd["sol"]["data"] assert_equal(leadfield.shape, (n_channels, 1494 / 3)) - assert_equal(len(fwd['sol']['row_names']), n_channels) - assert_equal(len(fwd['info']['chs']), n_channels) - assert ('dev_head_t' in fwd['info']) - assert ('mri_head_t' in fwd) - assert (fwd['surf_ori']) - with pytest.warns(RuntimeWarning, match='stored on disk'): + assert_equal(len(fwd["sol"]["row_names"]), n_channels) + assert_equal(len(fwd["info"]["chs"]), n_channels) + assert "dev_head_t" in fwd["info"] + assert "mri_head_t" in fwd + assert fwd["surf_ori"] + with pytest.warns(RuntimeWarning, match="stored on disk"): write_forward_solution(fname_temp, fwd, overwrite=True) fwd_read = read_forward_solution(fname_temp) - fwd_read = convert_forward_solution(fwd_read, surf_ori=True, - force_fixed=True, use_cps=True) - assert (repr(fwd_read)) - assert (isinstance(fwd_read, Forward)) - assert (is_fixed_orient(fwd_read)) + fwd_read = convert_forward_solution( + fwd_read, surf_ori=True, force_fixed=True, use_cps=True + ) + assert repr(fwd_read) + assert isinstance(fwd_read, Forward) + assert is_fixed_orient(fwd_read) assert_forward_allclose(fwd, fwd_read) fwd = read_forward_solution(fname_meeg_grad) - fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, - use_cps=True) - leadfield = fwd['sol']['data'] + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True) + leadfield = fwd["sol"]["data"] assert_equal(leadfield.shape, (n_channels, n_src / 3)) - assert_equal(len(fwd['sol']['row_names']), n_channels) - assert_equal(len(fwd['info']['chs']), n_channels) - assert ('dev_head_t' in fwd['info']) - assert ('mri_head_t' in fwd) - assert (fwd['surf_ori']) - with pytest.warns(RuntimeWarning, match='stored on disk'): + assert_equal(len(fwd["sol"]["row_names"]), n_channels) + assert_equal(len(fwd["info"]["chs"]), n_channels) + assert "dev_head_t" in fwd["info"] + assert "mri_head_t" in fwd + assert fwd["surf_ori"] + with pytest.warns(RuntimeWarning, match="stored on disk"): write_forward_solution(fname_temp, fwd, overwrite=True) fwd_read = read_forward_solution(fname_temp) - fwd_read = convert_forward_solution(fwd_read, surf_ori=True, - force_fixed=True, use_cps=True) - assert (repr(fwd_read)) - assert (isinstance(fwd_read, Forward)) - assert (is_fixed_orient(fwd_read)) + fwd_read = convert_forward_solution( + fwd_read, surf_ori=True, force_fixed=True, use_cps=True + ) + assert repr(fwd_read) + assert isinstance(fwd_read, Forward) + assert is_fixed_orient(fwd_read) assert_forward_allclose(fwd, fwd_read) # test warnings on bad filenames fwd = read_forward_solution(fname_meeg_grad) - fwd_badname = tmp_path / 'test-bad-name.fif.gz' - with pytest.warns(RuntimeWarning, match='end with'): + fwd_badname = tmp_path / "test-bad-name.fif.gz" + with pytest.warns(RuntimeWarning, match="end with"): write_forward_solution(fwd_badname, fwd) - with pytest.warns(RuntimeWarning, match='end with'): + with pytest.warns(RuntimeWarning, match="end with"): read_forward_solution(fwd_badname) fwd = read_forward_solution(fname_meeg) @@ -187,6 +199,15 @@ def test_io_forward(tmp_path): fwd_read = read_forward_solution(fname_temp) assert_forward_allclose(fwd, fwd_read) + h5py = pytest.importorskip("h5py") + pytest.importorskip("h5io") + fname_h5 = fname_temp.with_suffix(".h5") + fwd.save(fname_h5) + with h5py.File(fname_h5, "r"): + pass # just checks for hdf5-ness + fwd_read = read_forward_solution(fname_h5) + assert_forward_allclose(fwd, fwd_read) + @testing.requires_testing_data def test_apply_forward(): @@ -198,53 +219,59 @@ def test_apply_forward(): t_start = 0.123 fwd = read_forward_solution(fname_meeg) - fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, - use_cps=True) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True) fwd = pick_types_forward(fwd, meg=True) assert isinstance(fwd, Forward) - vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']] + vertno = [fwd["src"][0]["vertno"], fwd["src"][1]["vertno"]] stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times)) stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq) - gain_sum = np.sum(fwd['sol']['data'], axis=1) + gain_sum = np.sum(fwd["sol"]["data"], axis=1) # Evoked evoked = read_evokeds(fname_evoked, condition=0) - evoked.pick_types(meg=True) - with pytest.warns(RuntimeWarning, match='only .* positive values'): + evoked.pick(picks="meg") + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match="only .* positive values"), + ): evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop) data = evoked.data times = evoked.times # do some tests - assert_array_almost_equal(evoked.info['sfreq'], sfreq) + assert_array_almost_equal(evoked.info["sfreq"], sfreq) assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum) assert_array_almost_equal(times[0], t_start) assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq) # vector stc_vec = VectorSourceEstimate( - fwd['source_nn'][:, :, np.newaxis] * stc.data[:, np.newaxis], - stc.vertices, stc.tmin, stc.tstep) - with pytest.warns(RuntimeWarning, match='very large'): + fwd["source_nn"][:, :, np.newaxis] * stc.data[:, np.newaxis], + stc.vertices, + stc.tmin, + stc.tstep, + ) + large_ctx = pytest.warns(RuntimeWarning, match="very large") + with large_ctx: evoked_2 = apply_forward(fwd, stc_vec, evoked.info) assert np.abs(evoked_2.data).mean() > 1e-5 assert_allclose(evoked.data, evoked_2.data, atol=1e-10) # Raw - with pytest.warns(RuntimeWarning, match='only .* positive values'): - raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start, - stop=stop) + with large_ctx, pytest.warns(RuntimeWarning, match="only .* positive values"): + raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start, stop=stop) data, times = raw_proj[:, :] # do some tests - assert_array_almost_equal(raw_proj.info['sfreq'], sfreq) + assert_array_almost_equal(raw_proj.info["sfreq"], sfreq) assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum) - atol = 1. / sfreq + atol = 1.0 / sfreq assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol) - assert_allclose(raw_proj.last_samp / sfreq, - t_start + (n_times - 1) / sfreq, atol=atol) + assert_allclose( + raw_proj.last_samp / sfreq, t_start + (n_times - 1) / sfreq, atol=atol + ) @testing.requires_testing_data @@ -257,47 +284,47 @@ def test_restrict_forward_to_stc(tmp_path): t_start = 0.123 fwd = read_forward_solution(fname_meeg) - fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, - use_cps=True) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True) fwd = pick_types_forward(fwd, meg=True) - vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]] + vertno = [fwd["src"][0]["vertno"][0:15], fwd["src"][1]["vertno"][0:5]] stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times)) stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq) fwd_out = restrict_forward_to_stc(fwd, stc) - assert (isinstance(fwd_out, Forward)) + assert isinstance(fwd_out, Forward) - assert_equal(fwd_out['sol']['ncol'], 20) - assert_equal(fwd_out['src'][0]['nuse'], 15) - assert_equal(fwd_out['src'][1]['nuse'], 5) - assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15]) - assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5]) + assert_equal(fwd_out["sol"]["ncol"], 20) + assert_equal(fwd_out["src"][0]["nuse"], 15) + assert_equal(fwd_out["src"][1]["nuse"], 5) + assert_equal(fwd_out["src"][0]["vertno"], fwd["src"][0]["vertno"][0:15]) + assert_equal(fwd_out["src"][1]["vertno"], fwd["src"][1]["vertno"][0:5]) fwd = read_forward_solution(fname_meeg) fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False) fwd = pick_types_forward(fwd, meg=True) - vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]] + vertno = [fwd["src"][0]["vertno"][0:15], fwd["src"][1]["vertno"][0:5]] stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times)) stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq) fwd_out = restrict_forward_to_stc(fwd, stc) - assert_equal(fwd_out['sol']['ncol'], 60) - assert_equal(fwd_out['src'][0]['nuse'], 15) - assert_equal(fwd_out['src'][1]['nuse'], 5) - assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15]) - assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5]) + assert_equal(fwd_out["sol"]["ncol"], 60) + assert_equal(fwd_out["src"][0]["nuse"], 15) + assert_equal(fwd_out["src"][1]["nuse"], 5) + assert_equal(fwd_out["src"][0]["vertno"], fwd["src"][0]["vertno"][0:15]) + assert_equal(fwd_out["src"][1]["vertno"], fwd["src"][1]["vertno"][0:5]) # Test saving the restricted forward object. This only works if all fields # are properly accounted for. - fname_copy = tmp_path / 'copy-fwd.fif' - with pytest.warns(RuntimeWarning, match='stored on disk'): + fname_copy = tmp_path / "copy-fwd.fif" + with pytest.warns(RuntimeWarning, match="stored on disk"): write_forward_solution(fname_copy, fwd_out, overwrite=True) fwd_out_read = read_forward_solution(fname_copy) - fwd_out_read = convert_forward_solution(fwd_out_read, surf_ori=True, - force_fixed=False) + fwd_out_read = convert_forward_solution( + fwd_out_read, surf_ori=True, force_fixed=False + ) assert_forward_allclose(fwd_out, fwd_out_read) @@ -305,68 +332,106 @@ def test_restrict_forward_to_stc(tmp_path): def test_restrict_forward_to_label(tmp_path): """Test restriction of source space to label.""" fwd = read_forward_solution(fname_meeg) - fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, - use_cps=True) + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True) fwd = pick_types_forward(fwd, meg=True) - label_path = data_path / "MEG" / "sample" / "labels" - labels = ['Aud-lh', 'Vis-rh'] + labels = ["Aud-lh", "Vis-rh"] label_lh = read_label(label_path / (labels[0] + ".label")) label_rh = read_label(label_path / (labels[1] + ".label")) fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh]) - src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices) - src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh) - vertno_lh = fwd['src'][0]['vertno'][src_sel_lh] + src_sel_lh = np.intersect1d(fwd["src"][0]["vertno"], label_lh.vertices) + src_sel_lh = np.searchsorted(fwd["src"][0]["vertno"], src_sel_lh) + vertno_lh = fwd["src"][0]["vertno"][src_sel_lh] - nuse_lh = fwd['src'][0]['nuse'] - src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices) - src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) - vertno_rh = fwd['src'][1]['vertno'][src_sel_rh] + nuse_lh = fwd["src"][0]["nuse"] + src_sel_rh = np.intersect1d(fwd["src"][1]["vertno"], label_rh.vertices) + src_sel_rh = np.searchsorted(fwd["src"][1]["vertno"], src_sel_rh) + vertno_rh = fwd["src"][1]["vertno"][src_sel_rh] src_sel_rh += nuse_lh - assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh)) - assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh)) - assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh)) - assert_equal(fwd_out['src'][0]['vertno'], vertno_lh) - assert_equal(fwd_out['src'][1]['vertno'], vertno_rh) + assert_equal(fwd_out["sol"]["ncol"], len(src_sel_lh) + len(src_sel_rh)) + assert_equal(fwd_out["src"][0]["nuse"], len(src_sel_lh)) + assert_equal(fwd_out["src"][1]["nuse"], len(src_sel_rh)) + assert_equal(fwd_out["src"][0]["vertno"], vertno_lh) + assert_equal(fwd_out["src"][1]["vertno"], vertno_rh) fwd = read_forward_solution(fname_meeg) fwd = pick_types_forward(fwd, meg=True) - label_path = data_path / "MEG" / "sample" / "labels" - labels = ['Aud-lh', 'Vis-rh'] + labels = ["Aud-lh", "Vis-rh"] label_lh = read_label(label_path / (labels[0] + ".label")) label_rh = read_label(label_path / (labels[1] + ".label")) fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh]) - src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices) - src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh) - vertno_lh = fwd['src'][0]['vertno'][src_sel_lh] + src_sel_lh = np.intersect1d(fwd["src"][0]["vertno"], label_lh.vertices) + src_sel_lh = np.searchsorted(fwd["src"][0]["vertno"], src_sel_lh) + vertno_lh = fwd["src"][0]["vertno"][src_sel_lh] - nuse_lh = fwd['src'][0]['nuse'] - src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices) - src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) - vertno_rh = fwd['src'][1]['vertno'][src_sel_rh] + nuse_lh = fwd["src"][0]["nuse"] + src_sel_rh = np.intersect1d(fwd["src"][1]["vertno"], label_rh.vertices) + src_sel_rh = np.searchsorted(fwd["src"][1]["vertno"], src_sel_rh) + vertno_rh = fwd["src"][1]["vertno"][src_sel_rh] src_sel_rh += nuse_lh - assert_equal(fwd_out['sol']['ncol'], - 3 * (len(src_sel_lh) + len(src_sel_rh))) - assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh)) - assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh)) - assert_equal(fwd_out['src'][0]['vertno'], vertno_lh) - assert_equal(fwd_out['src'][1]['vertno'], vertno_rh) + assert_equal(fwd_out["sol"]["ncol"], 3 * (len(src_sel_lh) + len(src_sel_rh))) + assert_equal(fwd_out["src"][0]["nuse"], len(src_sel_lh)) + assert_equal(fwd_out["src"][1]["nuse"], len(src_sel_rh)) + assert_equal(fwd_out["src"][0]["vertno"], vertno_lh) + assert_equal(fwd_out["src"][1]["vertno"], vertno_rh) # Test saving the restricted forward object. This only works if all fields # are properly accounted for. - fname_copy = tmp_path / 'copy-fwd.fif' + fname_copy = tmp_path / "copy-fwd.fif" write_forward_solution(fname_copy, fwd_out, overwrite=True) fwd_out_read = read_forward_solution(fname_copy) assert_forward_allclose(fwd_out, fwd_out_read) +@pytest.mark.parametrize("use_cps", [True, False]) +@testing.requires_testing_data +def test_restrict_forward_to_label_cps(tmp_path, use_cps): + """Test for gh-11689.""" + label_lh = read_label(label_path / "Aud-lh.label") + fwd = read_forward_solution(fname_meeg) + convert_forward_solution( + fwd, surf_ori=True, force_fixed=False, copy=False, use_cps=use_cps + ) + fwd = pick_types_forward(fwd, meg="mag") + fwd_out = restrict_forward_to_label(fwd, label_lh) + vert = fwd_out["src"][0]["vertno"][0] + + assert fwd["surf_ori"] + assert not is_fixed_orient(fwd) + idx = list(fwd["src"][0]["vertno"]).index(vert) + assert idx == 126 + go1 = fwd["_orig_sol"][:, idx * 3 : idx * 3 + 3].copy() + gs1 = fwd["sol"]["data"][:, idx * 3 : idx * 3 + 3].copy() + + assert fwd_out["surf_ori"] + assert not is_fixed_orient(fwd_out) + idx = list(fwd_out["src"][0]["vertno"]).index(vert) + assert idx == 0 + go2 = fwd_out["_orig_sol"][:, idx * 3 : idx * 3 + 3].copy() + gs2 = fwd_out["sol"]["data"][:, idx * 3 : idx * 3 + 3].copy() + assert_allclose(go2, go1) + assert_allclose(gs2, gs1) + + # should be a no-op + convert_forward_solution( + fwd_out, surf_ori=True, force_fixed=False, copy=False, use_cps=use_cps + ) + assert fwd_out["surf_ori"] + assert not is_fixed_orient(fwd_out) + assert list(fwd_out["src"][0]["vertno"]).index(vert) == 0 + go3 = fwd_out["_orig_sol"][:, idx * 3 : idx * 3 + 3].copy() + gs3 = fwd_out["sol"]["data"][:, idx * 3 : idx * 3 + 3].copy() + assert_allclose(go3, go1) + assert_allclose(gs3, gs1) + + @testing.requires_testing_data @requires_mne def test_average_forward_solution(tmp_path): @@ -387,20 +452,27 @@ def test_average_forward_solution(tmp_path): # try an easy case fwd_copy = average_forward_solutions([fwd]) - assert (isinstance(fwd_copy, Forward)) - assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data']) + assert isinstance(fwd_copy, Forward) + assert_array_equal(fwd["sol"]["data"], fwd_copy["sol"]["data"]) # modify a fwd solution, save it, use MNE to average with old one - fwd_copy['sol']['data'] *= 0.5 - fname_copy = str(tmp_path / 'copy-fwd.fif') + fwd_copy["sol"]["data"] *= 0.5 + fname_copy = str(tmp_path / "copy-fwd.fif") write_forward_solution(fname_copy, fwd_copy, overwrite=True) - cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd', - fname_copy, '--out', fname_copy) + cmd = ( + "mne_average_forward_solutions", + "--fwd", + fname_meeg, + "--fwd", + fname_copy, + "--out", + fname_copy, + ) run_subprocess(cmd) # now let's actually do it, with one filename and one fwd fwd_ave = average_forward_solutions([fwd, fwd_copy]) - assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data']) + assert_array_equal(0.75 * fwd["sol"]["data"], fwd_ave["sol"]["data"]) # fwd_ave_mne = read_forward_solution(fname_copy) # assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data']) @@ -416,32 +488,32 @@ def test_priors(): # Depth prior fwd = read_forward_solution(fname_meeg) assert not is_fixed_orient(fwd) - n_sources = fwd['nsource'] + n_sources = fwd["nsource"] info = read_info(fname_evoked) depth_prior = compute_depth_prior(fwd, info, exp=0.8) assert depth_prior.shape == (3 * n_sources,) - depth_prior = compute_depth_prior(fwd, info, exp=0.) - assert_array_equal(depth_prior, 1.) + depth_prior = compute_depth_prior(fwd, info, exp=0.0) + assert_array_equal(depth_prior, 1.0) with pytest.raises(ValueError, match='must be "whiten"'): - compute_depth_prior(fwd, info, limit_depth_chs='foo') - with pytest.raises(ValueError, match='noise_cov must be a Covariance'): - compute_depth_prior(fwd, info, limit_depth_chs='whiten') + compute_depth_prior(fwd, info, limit_depth_chs="foo") + with pytest.raises(ValueError, match="noise_cov must be a Covariance"): + compute_depth_prior(fwd, info, limit_depth_chs="whiten") fwd_fixed = convert_forward_solution(fwd, force_fixed=True) depth_prior = compute_depth_prior(fwd_fixed, info=info) assert depth_prior.shape == (n_sources,) # Orientation prior - orient_prior = compute_orient_prior(fwd, 1.) - assert_array_equal(orient_prior, 1.) - orient_prior = compute_orient_prior(fwd_fixed, 0.) - assert_array_equal(orient_prior, 1.) - with pytest.raises(ValueError, match='oriented in surface coordinates'): + orient_prior = compute_orient_prior(fwd, 1.0) + assert_array_equal(orient_prior, 1.0) + orient_prior = compute_orient_prior(fwd_fixed, 0.0) + assert_array_equal(orient_prior, 1.0) + with pytest.raises(ValueError, match="oriented in surface coordinates"): compute_orient_prior(fwd, 0.5) fwd_surf_ori = convert_forward_solution(fwd, surf_ori=True) orient_prior = compute_orient_prior(fwd_surf_ori, 0.5) - assert all(np.in1d(orient_prior, (0.5, 1.))) - with pytest.raises(ValueError, match='between 0 and 1'): + assert all(np.isin(orient_prior, (0.5, 1.0))) + with pytest.raises(ValueError, match="between 0 and 1"): compute_orient_prior(fwd_surf_ori, -0.5) - with pytest.raises(ValueError, match='with fixed orientation'): + with pytest.raises(ValueError, match="with fixed orientation"): compute_orient_prior(fwd_fixed, 0.5) @@ -449,8 +521,8 @@ def test_priors(): def test_equalize_channels(): """Test equalization of channels for instances of Forward.""" fwd1 = read_forward_solution(fname_meeg) - fwd1.pick_channels(['EEG 001', 'EEG 002', 'EEG 003']) - fwd2 = fwd1.copy().pick_channels(['EEG 002', 'EEG 001'], ordered=True) + pick_channels_forward(fwd1, include=["EEG 001", "EEG 002", "EEG 003"], copy=False) + fwd2 = pick_channels_forward(fwd1, include=["EEG 002", "EEG 001"], ordered=True) fwd1, fwd2 = equalize_channels([fwd1, fwd2]) - assert fwd1.ch_names == ['EEG 001', 'EEG 002'] - assert fwd2.ch_names == ['EEG 001', 'EEG 002'] + assert fwd1.ch_names == ["EEG 001", "EEG 002"] + assert fwd2.ch_names == ["EEG 001", "EEG 002"] diff --git a/mne/forward/tests/test_make_forward.py b/mne/forward/tests/test_make_forward.py index b23a1ec2f6e..b584202465c 100644 --- a/mne/forward/tests/test_make_forward.py +++ b/mne/forward/tests/test_make_forward.py @@ -1,61 +1,77 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from itertools import product from pathlib import Path -import pytest import numpy as np -from numpy.testing import assert_allclose, assert_array_equal -from numpy.testing import assert_array_less - -from mne.bem import read_bem_surfaces, make_bem_solution +import pytest +from numpy.testing import assert_allclose, assert_array_equal, assert_array_less + +from mne import ( + convert_forward_solution, + create_info, + get_volume_labels_from_aseg, + make_forward_solution, + make_sphere_model, + pick_info, + pick_types, + pick_types_forward, + read_cov, + read_dipole, + read_evokeds, + read_forward_solution, + read_source_spaces, + setup_volume_source_space, + write_forward_solution, +) +from mne._fiff.constants import FIFF +from mne.bem import make_bem_solution, read_bem_surfaces from mne.channels import make_standard_montage from mne.datasets import testing -from mne.io import read_raw_fif, read_raw_kit, read_raw_bti, read_info -from mne.io.constants import FIFF -from mne import (read_forward_solution, write_forward_solution, - make_forward_solution, convert_forward_solution, - setup_volume_source_space, read_source_spaces, create_info, - make_sphere_model, pick_types_forward, pick_info, pick_types, - read_evokeds, read_cov, read_dipole, - get_volume_labels_from_aseg) -from mne.surface import _get_ico_surface -from mne.transforms import Transform -from mne.utils import (requires_mne, run_subprocess, catch_logging, - requires_mne_mark, requires_openmeeg_mark) -from mne.forward._make_forward import _create_meg_coils, make_forward_dipole -from mne.forward._compute_forward import _magnetic_dipole_field_vec -from mne.forward import Forward, _do_forward_solution, use_coil_def from mne.dipole import Dipole, fit_dipole +from mne.forward import Forward, _do_forward_solution, use_coil_def +from mne.forward._compute_forward import _magnetic_dipole_field_vec +from mne.forward._make_forward import ( + _create_meg_coils, + _ForwardModeler, + make_forward_dipole, +) +from mne.forward.tests.test_forward import assert_forward_allclose +from mne.io import read_info, read_raw_bti, read_raw_fif, read_raw_kit from mne.simulation import simulate_evoked from mne.source_estimate import VolSourceEstimate -from mne.source_space import (write_source_spaces, _compare_source_spaces, - setup_source_space) - -from mne.forward.tests.test_forward import assert_forward_allclose - -data_path = testing.data_path(download=False) -fname_meeg = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" +from mne.source_space._source_space import ( + _compare_source_spaces, + setup_source_space, + write_source_spaces, ) -fname_raw = ( - Path(__file__).parent.parent.parent - / "io" - / "tests" - / "data" - / "test_raw.fif" +from mne.surface import _get_ico_surface +from mne.transforms import Transform, apply_trans, invert_transform +from mne.utils import ( + _record_warnings, + catch_logging, + requires_mne, + requires_mne_mark, + requires_openmeeg_mark, + run_subprocess, ) + +data_path = testing.data_path(download=False) +fname_meeg = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" +fname_raw = Path(__file__).parents[2] / "io" / "tests" / "data" / "test_raw.fif" fname_evo = data_path / "MEG" / "sample" / "sample_audvis_trunc-ave.fif" fname_cov = data_path / "MEG" / "sample" / "sample_audvis_trunc-cov.fif" fname_dip = data_path / "MEG" / "sample" / "sample_audvis_trunc_set1.dip" fname_trans = data_path / "MEG" / "sample" / "sample_audvis_trunc-trans.fif" subjects_dir = data_path / "subjects" fname_src = subjects_dir / "sample" / "bem" / "sample-oct-4-src.fif" -fname_bem = ( - subjects_dir / "sample" / "bem" / "sample-1280-1280-1280-bem-sol.fif" -) +fname_bem = subjects_dir / "sample" / "bem" / "sample-1280-1280-1280-bem-sol.fif" fname_aseg = subjects_dir / "sample" / "mri" / "aseg.mgz" fname_bem_meg = subjects_dir / "sample" / "bem" / "sample-1280-bem-sol.fif" -io_path = Path(__file__).parent.parent.parent / "io" +io_path = Path(__file__).parents[2] / "io" bti_dir = io_path / "bti" / "tests" / "data" kit_dir = io_path / "kit" / "tests" / "data" trans_path = kit_dir / "trans-sample.fif" @@ -70,9 +86,9 @@ def _col_corrs(a, b): a_std = np.sqrt((a * a).mean(0)) b_std = np.sqrt((b * b).mean(0)) all_zero = (a_std == 0) & (b_std == 0) - num[all_zero] = 1. - a_std[all_zero] = 1. - b_std[all_zero] = 1. + num[all_zero] = 1.0 + a_std[all_zero] = 1.0 + b_std[all_zero] = 1.0 return num / (a_std * b_std) @@ -81,67 +97,94 @@ def _rdm(a, b): a_norm = np.linalg.norm(a, axis=0) b_norm = np.linalg.norm(b, axis=0) all_zero = (a_norm == 0) & (b_norm == 0) - a_norm[all_zero] = 1. - b_norm[all_zero] = 1. + a_norm[all_zero] = 1.0 + b_norm[all_zero] = 1.0 return a_norm / b_norm -def _compare_forwards(fwd, fwd_py, n_sensors, n_src, - meg_rtol=1e-4, meg_atol=1e-9, - meg_corr_tol=0.99, meg_rdm_tol=0.01, - eeg_rtol=1e-3, eeg_atol=1e-3, - eeg_corr_tol=0.99, eeg_rdm_tol=0.01): +def _compare_forwards( + fwd, + fwd_py, + n_sensors, + n_src, + meg_rtol=1e-4, + meg_atol=1e-9, + meg_corr_tol=0.99, + meg_rdm_tol=0.01, + eeg_rtol=1e-3, + eeg_atol=1e-3, + eeg_corr_tol=0.99, + eeg_rdm_tol=0.01, +): """Test forwards.""" # check source spaces - assert len(fwd['src']) == len(fwd_py['src']) - _compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx') + assert len(fwd["src"]) == len(fwd_py["src"]) + _compare_source_spaces(fwd["src"], fwd_py["src"], mode="approx") for surf_ori, force_fixed in product([False, True], [False, True]): # use copy here to leave our originals unmodified - fwd = convert_forward_solution(fwd, surf_ori, force_fixed, copy=True, - use_cps=True) - fwd_py = convert_forward_solution(fwd_py, surf_ori, force_fixed, - copy=True, use_cps=True) + fwd = convert_forward_solution( + fwd, surf_ori, force_fixed, copy=True, use_cps=True + ) + fwd_py = convert_forward_solution( + fwd_py, surf_ori, force_fixed, copy=True, use_cps=True + ) check_src = n_src // 3 if force_fixed else n_src - for key in ('nchan', 'source_rr', 'source_ori', - 'surf_ori', 'coord_frame', 'nsource'): - assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7, - err_msg=key) + for key in ( + "nchan", + "source_rr", + "source_ori", + "surf_ori", + "coord_frame", + "nsource", + ): + assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7, err_msg=key) # In surf_ori=True only Z matters for source_nn if surf_ori and not force_fixed: ori_sl = slice(2, None, 3) else: ori_sl = slice(None) - assert_allclose(fwd_py['source_nn'][ori_sl], fwd['source_nn'][ori_sl], - rtol=1e-4, atol=1e-6) - assert_allclose(fwd_py['mri_head_t']['trans'], - fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8) - - assert fwd_py['sol']['data'].shape == (n_sensors, check_src) - assert len(fwd['sol']['row_names']) == n_sensors - assert len(fwd_py['sol']['row_names']) == n_sensors + assert_allclose( + fwd_py["source_nn"][ori_sl], fwd["source_nn"][ori_sl], rtol=1e-4, atol=1e-6 + ) + assert_allclose( + fwd_py["mri_head_t"]["trans"], + fwd["mri_head_t"]["trans"], + rtol=1e-5, + atol=1e-8, + ) + + assert fwd_py["sol"]["data"].shape == (n_sensors, check_src) + assert len(fwd["sol"]["row_names"]) == n_sensors + assert len(fwd_py["sol"]["row_names"]) == n_sensors # check MEG - fwd_meg = fwd['sol']['data'][:306, ori_sl] - fwd_meg_py = fwd_py['sol']['data'][:306, ori_sl] - assert_allclose(fwd_meg, fwd_meg_py, rtol=meg_rtol, atol=meg_atol, - err_msg='MEG mismatch') + fwd_meg = fwd["sol"]["data"][:306, ori_sl] + fwd_meg_py = fwd_py["sol"]["data"][:306, ori_sl] + assert_allclose( + fwd_meg, fwd_meg_py, rtol=meg_rtol, atol=meg_atol, err_msg="MEG mismatch" + ) meg_corrs = _col_corrs(fwd_meg, fwd_meg_py) - assert_array_less(meg_corr_tol, meg_corrs, err_msg='MEG corr/MAG') + assert_array_less(meg_corr_tol, meg_corrs, err_msg="MEG corr/MAG") meg_rdm = _rdm(fwd_meg, fwd_meg_py) - assert_allclose(meg_rdm, 1, atol=meg_rdm_tol, err_msg='MEG RDM') + assert_allclose(meg_rdm, 1, atol=meg_rdm_tol, err_msg="MEG RDM") # check EEG - if fwd['sol']['data'].shape[0] > 306: - fwd_eeg = fwd['sol']['data'][306:, ori_sl] - fwd_eeg_py = fwd['sol']['data'][306:, ori_sl] - assert_allclose(fwd_eeg, fwd_eeg_py, rtol=eeg_rtol, atol=eeg_atol, - err_msg='EEG mismatch') + if fwd["sol"]["data"].shape[0] > 306: + fwd_eeg = fwd["sol"]["data"][306:, ori_sl] + fwd_eeg_py = fwd["sol"]["data"][306:, ori_sl] + assert_allclose( + fwd_eeg, + fwd_eeg_py, + rtol=eeg_rtol, + atol=eeg_atol, + err_msg="EEG mismatch", + ) # To test so-called MAG we use correlation (related to cosine # similarity) and also RDM to test the amplitude mismatch eeg_corrs = _col_corrs(fwd_eeg, fwd_eeg_py) - assert_array_less(eeg_corr_tol, eeg_corrs, err_msg='EEG corr/MAG') + assert_array_less(eeg_corr_tol, eeg_corrs, err_msg="EEG corr/MAG") eeg_rdm = _rdm(fwd_eeg, fwd_eeg_py) - assert_allclose(eeg_rdm, 1, atol=eeg_rdm_tol, err_msg='EEG RDM') + assert_allclose(eeg_rdm, 1, atol=eeg_rdm_tol, err_msg="EEG RDM") def test_magnetic_dipole(): @@ -149,24 +192,24 @@ def test_magnetic_dipole(): info = read_info(fname_raw) picks = pick_types(info, meg=True, eeg=False, exclude=[]) info = pick_info(info, picks[:12]) - coils = _create_meg_coils(info['chs'], 'normal', None) + coils = _create_meg_coils(info["chs"], "normal", None) # magnetic dipole far (meters!) from device origin - r0 = np.array([0., 13., -6.]) - for ch, coil in zip(info['chs'], coils): - rr = (ch['loc'][:3] + r0) / 2. # get halfway closer + r0 = np.array([0.0, 13.0, -6.0]) + for ch, coil in zip(info["chs"], coils): + rr = (ch["loc"][:3] + r0) / 2.0 # get halfway closer far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil]) near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil]) - ratio = 8. if ch['ch_name'][-1] == '1' else 16. # grad vs mag + ratio = 8.0 if ch["ch_name"][-1] == "1" else 16.0 # grad vs mag assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1) # degenerate case - r0 = coils[0]['rmag'][[0]] - with pytest.raises(RuntimeError, match='Coil too close'): + r0 = coils[0]["rmag"][[0]] + with pytest.raises(RuntimeError, match="Coil too close"): _magnetic_dipole_field_vec(r0, coils[:1]) - with pytest.warns(RuntimeWarning, match='Coil too close'): - fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='warning') + with _record_warnings(), pytest.warns(RuntimeWarning, match="Coil too close"): + fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close="warning") assert not np.isfinite(fwd).any() - with np.errstate(invalid='ignore'): - fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='info') + with np.errstate(invalid="ignore"): + fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close="info") assert not np.isfinite(fwd).any() @@ -181,147 +224,226 @@ def test_make_forward_solution_kit(tmp_path, fname_src_small): fname_kit_raw = kit_dir / "test_bin_raw.fif" # first use mne-C: convert file, make forward solution - fwd = _do_forward_solution('sample', fname_kit_raw, src=fname_src_small, - bem=fname_bem_meg, mri=trans_path, - eeg=False, meg=True, subjects_dir=subjects_dir) - assert (isinstance(fwd, Forward)) + fwd = _do_forward_solution( + "sample", + fname_kit_raw, + src=fname_src_small, + bem=fname_bem_meg, + mri=trans_path, + eeg=False, + meg=True, + subjects_dir=subjects_dir, + ) + assert isinstance(fwd, Forward) # now let's use python with the same raw file src = read_source_spaces(fname_src_small) - fwd_py = make_forward_solution(fname_kit_raw, trans_path, src, - fname_bem_meg, eeg=False, meg=True) + fwd_py = make_forward_solution( + fname_kit_raw, trans_path, src, fname_bem_meg, eeg=False, meg=True + ) _compare_forwards(fwd, fwd_py, 157, n_src_small) - assert (isinstance(fwd_py, Forward)) + assert isinstance(fwd_py, Forward) # now let's use mne-python all the way raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path) # without ignore_ref=True, this should throw an error: - with pytest.raises(NotImplementedError, match='Cannot.*KIT reference'): - make_forward_solution(raw_py.info, src=src, eeg=False, meg=True, - bem=fname_bem_meg, trans=trans_path) + with pytest.raises(NotImplementedError, match="Cannot.*KIT reference"): + make_forward_solution( + raw_py.info, + src=src, + eeg=False, + meg=True, + bem=fname_bem_meg, + trans=trans_path, + ) # check that asking for eeg channels (even if they don't exist) is handled - meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True, - eeg=False)) - fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True, - bem=fname_bem_meg, trans=trans_path, - ignore_ref=True) - _compare_forwards(fwd, fwd_py, 157, n_src_small, - meg_rtol=1e-3, meg_atol=1e-7) + meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True, eeg=False)) + fwd_py = make_forward_solution( + meg_only_info, + src=src, + meg=True, + eeg=True, + bem=fname_bem_meg, + trans=trans_path, + ignore_ref=True, + ) + _compare_forwards(fwd, fwd_py, 157, n_src_small, meg_rtol=1e-3, meg_atol=1e-7) @requires_mne def test_make_forward_solution_bti(fname_src_small): """Test BTI end-to-end versus C.""" - bti_pdf = bti_dir / 'test_pdf_linux' - bti_config = bti_dir / 'test_config_linux' - bti_hs = bti_dir / 'test_hs_linux' - fname_bti_raw = bti_dir / 'exported4D_linux_raw.fif' + bti_pdf = bti_dir / "test_pdf_linux" + bti_config = bti_dir / "test_config_linux" + bti_hs = bti_dir / "test_hs_linux" + fname_bti_raw = bti_dir / "exported4D_linux_raw.fif" raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False) src = read_source_spaces(fname_src_small) - fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True, - bem=fname_bem_meg, trans=trans_path) - fwd = _do_forward_solution('sample', fname_bti_raw, src=fname_src_small, - bem=fname_bem_meg, mri=trans_path, - eeg=False, meg=True, subjects_dir=subjects_dir) + fwd_py = make_forward_solution( + raw_py.info, src=src, eeg=False, meg=True, bem=fname_bem_meg, trans=trans_path + ) + fwd = _do_forward_solution( + "sample", + fname_bti_raw, + src=fname_src_small, + bem=fname_bem_meg, + mri=trans_path, + eeg=False, + meg=True, + subjects_dir=subjects_dir, + ) _compare_forwards(fwd, fwd_py, 248, n_src_small) -@pytest.mark.parametrize('other', [ - pytest.param('MNE-C', marks=requires_mne_mark()), - pytest.param('openmeeg', marks=requires_openmeeg_mark()), -]) +@pytest.mark.parametrize( + "other", + [ + pytest.param("MNE-C", marks=requires_mne_mark()), + pytest.param( + "openmeeg", + marks=[requires_openmeeg_mark(), pytest.mark.slowtest], + ), + ], +) def test_make_forward_solution_ctf(tmp_path, fname_src_small, other): """Test CTF w/compensation against MNE-C or OpenMEEG.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") src = read_source_spaces(fname_src_small) raw = read_raw_fif(fname_ctf_raw) assert raw.compensation_grade == 3 - if other == 'openmeeg': - mindist = 20. + if other == "openmeeg": + mindist = 20.0 n_src_want = 51 else: - assert other == 'MNE-C' - mindist = 0. + assert other == "MNE-C" + mindist = 0.0 n_src_want = n_src_small assert n_src_want == 108 - mindist = 20. if other == 'openmeeg' else 0. + mindist = 20.0 if other == "openmeeg" else 0.0 fwd_py = make_forward_solution( - fname_ctf_raw, fname_trans, src, fname_bem_meg, eeg=False, - mindist=mindist, verbose=True) - - if other == 'openmeeg': + fname_ctf_raw, + fname_trans, + src, + fname_bem_meg, + eeg=False, + mindist=mindist, + verbose=True, + ) + + if other == "openmeeg": # TODO: This should be a 1-layer, but it's broken # (some correlations become negative!)... bem_surfaces = read_bem_surfaces(fname_bem) # fname_bem_meg - bem = make_bem_solution(bem_surfaces, solver='openmeeg') + bem = make_bem_solution(bem_surfaces, solver="openmeeg") # TODO: These tolerances are bad tol_kwargs = dict(meg_atol=1, meg_corr_tol=0.65, meg_rdm_tol=0.6) fwd = make_forward_solution( - fname_ctf_raw, fname_trans, src, bem, eeg=False, mindist=mindist, - verbose=True) + fname_ctf_raw, + fname_trans, + src, + bem, + eeg=False, + mindist=mindist, + verbose=True, + ) else: - assert other == 'MNE-C' + assert other == "MNE-C" bem = None tol_kwargs = dict() fwd = _do_forward_solution( - 'sample', fname_ctf_raw, mri=fname_trans, src=fname_src_small, - bem=fname_bem_meg, eeg=False, meg=True, subjects_dir=subjects_dir, - mindist=mindist) + "sample", + fname_ctf_raw, + mri=fname_trans, + src=fname_src_small, + bem=fname_bem_meg, + eeg=False, + meg=True, + subjects_dir=subjects_dir, + mindist=mindist, + ) _compare_forwards(fwd, fwd_py, 274, n_src_want, **tol_kwargs) # CTF with compensation changed in python ctf_raw = read_raw_fif(fname_ctf_raw) - ctf_raw.info['bads'] = ['MRO24-2908'] # test that it works with some bads + ctf_raw.info["bads"] = ["MRO24-2908"] # test that it works with some bads ctf_raw.apply_gradient_compensation(2) fwd_py = make_forward_solution( - ctf_raw.info, fname_trans, src, fname_bem_meg, eeg=False, meg=True, - mindist=mindist) - if other == 'openmeeg': + ctf_raw.info, + fname_trans, + src, + fname_bem_meg, + eeg=False, + meg=True, + mindist=mindist, + ) + if other == "openmeeg": assert bem is not None fwd = make_forward_solution( - ctf_raw.info, fname_trans, src, bem, eeg=False, mindist=mindist, - verbose=True) + ctf_raw.info, + fname_trans, + src, + bem, + eeg=False, + mindist=mindist, + verbose=True, + ) else: fwd = _do_forward_solution( - 'sample', ctf_raw, mri=fname_trans, src=fname_src_small, - bem=fname_bem_meg, eeg=False, meg=True, subjects_dir=subjects_dir, - mindist=mindist) + "sample", + ctf_raw, + mri=fname_trans, + src=fname_src_small, + bem=fname_bem_meg, + eeg=False, + meg=True, + subjects_dir=subjects_dir, + mindist=mindist, + ) _compare_forwards(fwd, fwd_py, 274, n_src_want, **tol_kwargs) - fname_temp = tmp_path / 'test-ctf-fwd.fif' + fname_temp = tmp_path / "test-ctf-fwd.fif" write_forward_solution(fname_temp, fwd_py) fwd_py2 = read_forward_solution(fname_temp) _compare_forwards(fwd_py, fwd_py2, 274, n_src_want, **tol_kwargs) repr(fwd_py) +@pytest.mark.slowtest @testing.requires_testing_data def test_make_forward_solution_basic(): """Test making M-EEG forward solution from python.""" with catch_logging() as log: # make sure everything can be path-like (gh #10872) fwd_py = make_forward_solution( - Path(fname_raw), Path(fname_trans), Path(fname_src), - Path(fname_bem), mindist=5., verbose=True) + Path(fname_raw), + Path(fname_trans), + Path(fname_src), + Path(fname_bem), + mindist=5.0, + verbose=True, + ) log = log.getvalue() - assert 'Total 258/258 points inside the surface' in log - assert (isinstance(fwd_py, Forward)) + assert "Total 258/258 points inside the surface" in log + assert isinstance(fwd_py, Forward) fwd = read_forward_solution(fname_meeg) - assert (isinstance(fwd, Forward)) + assert isinstance(fwd, Forward) _compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3) # Homogeneous model - with pytest.raises(RuntimeError, match='homogeneous.*1-layer.*EEG'): - make_forward_solution(fname_raw, fname_trans, fname_src, - fname_bem_meg) + with pytest.raises(RuntimeError, match="homogeneous.*1-layer.*EEG"): + make_forward_solution(fname_raw, fname_trans, fname_src, fname_bem_meg) +@pytest.mark.slowtest @requires_openmeeg_mark() -@pytest.mark.parametrize("n_layers", [ - 3, - pytest.param(1, marks=pytest.mark.xfail(raises=RuntimeError)), -]) +@pytest.mark.parametrize( + "n_layers", + [ + 3, + pytest.param(1, marks=pytest.mark.xfail(raises=RuntimeError)), + ], +) @testing.requires_testing_data def test_make_forward_solution_openmeeg(n_layers): """Test making M-EEG forward solution from OpenMEEG.""" @@ -329,33 +451,45 @@ def test_make_forward_solution_openmeeg(n_layers): bem_surfaces = read_bem_surfaces(fname_bem) raw = read_raw_fif(fname_raw) n_sensors = 366 - ch_types = ['eeg', 'meg'] + ch_types = ["eeg", "meg"] if n_layers == 1: - ch_types = ['meg'] + ch_types = ["meg"] bem_surfaces = bem_surfaces[-1:] - assert bem_surfaces[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN + assert bem_surfaces[0]["id"] == FIFF.FIFFV_BEM_SURF_ID_BRAIN n_sensors = 306 raw.pick(ch_types) n_sources_kept = 501 // 3 fwds = dict() for solver in ["openmeeg", "mne"]: bem = make_bem_solution(bem_surfaces, solver=solver) - assert bem['solver'] == solver + assert bem["solver"] == solver with catch_logging() as log: # make sure everything can be path-like (gh #10872) fwd = make_forward_solution( - raw.info, Path(fname_trans), Path(fname_src), - bem, mindist=20., verbose=True) + raw.info, + Path(fname_trans), + Path(fname_src), + bem, + mindist=20.0, + verbose=True, + ) log = log.getvalue() - assert 'Total 258/258 points inside the surface' in log - assert (isinstance(fwd, Forward)) + assert "Total 258/258 points inside the surface" in log + assert isinstance(fwd, Forward) fwds[solver] = fwd del fwd - _compare_forwards(fwds["openmeeg"], - fwds["mne"], n_sensors, n_sources_kept * 3, - meg_atol=1, eeg_atol=100, - meg_corr_tol=0.98, eeg_corr_tol=0.98, - meg_rdm_tol=0.1, eeg_rdm_tol=0.2) + _compare_forwards( + fwds["openmeeg"], + fwds["mne"], + n_sensors, + n_sources_kept * 3, + meg_atol=1, + eeg_atol=100, + meg_corr_tol=0.98, + eeg_corr_tol=0.98, + meg_rdm_tol=0.11, + eeg_rdm_tol=0.2, + ) def test_make_forward_solution_discrete(tmp_path, small_surf_src): @@ -363,31 +497,36 @@ def test_make_forward_solution_discrete(tmp_path, small_surf_src): # smoke test for depth weighting and discrete source spaces src = small_surf_src src = src + setup_volume_source_space( - pos=dict(rr=src[0]['rr'][src[0]['vertno'][:3]].copy(), - nn=src[0]['nn'][src[0]['vertno'][:3]].copy())) + pos=dict( + rr=src[0]["rr"][src[0]["vertno"][:3]].copy(), + nn=src[0]["nn"][src[0]["vertno"][:3]].copy(), + ) + ) sphere = make_sphere_model() - fwd = make_forward_solution(fname_raw, fname_trans, src, sphere, - meg=True, eeg=False) + fwd = make_forward_solution( + fname_raw, fname_trans, src, sphere, meg=True, eeg=False + ) convert_forward_solution(fwd, surf_ori=True) n_src_small = 108 # this is the resulting # of verts in fwd -@pytest.fixture(scope='module', params=[testing._pytest_param()]) +@pytest.fixture(scope="module", params=[testing._pytest_param()]) def small_surf_src(): """Create a small surface source space.""" - pytest.importorskip('nibabel') - src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir, - add_dist=False) - assert sum(s['nuse'] for s in src) * 3 == n_src_small + pytest.importorskip("nibabel") + src = setup_source_space( + "sample", "oct2", subjects_dir=subjects_dir, add_dist=False + ) + assert sum(s["nuse"] for s in src) * 3 == n_src_small return src @pytest.fixture() def fname_src_small(tmp_path, small_surf_src): """Create a small source space.""" - fname_src_small = tmp_path / 'sample-oct-2-src.fif' + fname_src_small = tmp_path / "sample-oct-2-src.fif" write_source_spaces(fname_src_small, small_surf_src) return fname_src_small @@ -396,47 +535,122 @@ def fname_src_small(tmp_path, small_surf_src): @pytest.mark.timeout(90) # can take longer than 60 s on Travis def test_make_forward_solution_sphere(tmp_path, fname_src_small): """Test making a forward solution with a sphere model.""" - out_name = tmp_path / 'tmp-fwd.fif' - run_subprocess(['mne_forward_solution', '--meg', '--eeg', - '--meas', fname_raw, '--src', fname_src_small, - '--mri', fname_trans, '--fwd', out_name]) + out_name = tmp_path / "tmp-fwd.fif" + run_subprocess( + [ + "mne_forward_solution", + "--meg", + "--eeg", + "--meas", + fname_raw, + "--src", + fname_src_small, + "--mri", + fname_trans, + "--fwd", + out_name, + ] + ) fwd = read_forward_solution(out_name) - sphere = make_sphere_model(verbose=True) + sphere = make_sphere_model( + head_radius=0.1, + relative_radii=(0.95, 0.97, 0.98, 1), + verbose=True, + ) src = read_source_spaces(fname_src_small) - fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere, - meg=True, eeg=True, verbose=True) - _compare_forwards(fwd, fwd_py, 366, 108, - meg_rtol=5e-1, meg_atol=1e-6, - eeg_rtol=5e-1, eeg_atol=5e-1) + fwd_py = make_forward_solution( + fname_raw, fname_trans, src, sphere, meg=True, eeg=True, verbose=True + ) + _compare_forwards( + fwd, + fwd_py, + 366, + 108, + meg_rtol=5e-1, + meg_atol=1e-6, + eeg_rtol=5e-1, + eeg_atol=5e-1, + ) # Since the above is pretty lax, let's check a different way for meg, eeg in zip([True, False], [False, True]): fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg) fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg) - assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(), - fwd_py_['sol']['data'].ravel())[0, 1], - 1.0, rtol=1e-3) + assert_allclose( + np.corrcoef(fwd_["sol"]["data"].ravel(), fwd_py_["sol"]["data"].ravel())[ + 0, 1 + ], + 1.0, + rtol=1e-3, + ) + # Number of layers in the sphere model doesn't matter for MEG # (as long as no sources are omitted due to distance) - assert len(sphere['layers']) == 4 - fwd = make_forward_solution(fname_raw, fname_trans, src, sphere, - meg=True, eeg=False) + assert len(sphere["layers"]) == 4 + fwd = make_forward_solution( + fname_raw, fname_trans, src, sphere, meg=True, eeg=False + ) sphere_1 = make_sphere_model(head_radius=None) - assert len(sphere_1['layers']) == 0 - assert_array_equal(sphere['r0'], sphere_1['r0']) - fwd_1 = make_forward_solution(fname_raw, fname_trans, src, sphere, - meg=True, eeg=False) + assert len(sphere_1["layers"]) == 0 + assert_array_equal(sphere["r0"], sphere_1["r0"]) + fwd_1 = make_forward_solution( + fname_raw, fname_trans, src, sphere, meg=True, eeg=False + ) _compare_forwards(fwd, fwd_1, 306, 108, meg_rtol=1e-12, meg_atol=1e-12) # Homogeneous model sphere = make_sphere_model(head_radius=None) - with pytest.raises(RuntimeError, match='zero shells.*EEG'): + with pytest.raises(RuntimeError, match="zero shells.*EEG"): make_forward_solution(fname_raw, fname_trans, src, sphere) + # Since the spherical model is defined in head space, the head->MRI transform should + # not matter for the check that MEG sensors are outside the sphere. + custom_trans = Transform("head", "mri") + custom_trans["trans"][0, 3] = 0.05 # move MEG sensors close to mesh + sphere = make_sphere_model() + fwd = make_forward_solution(fname_raw, custom_trans, src, sphere) + assert fwd["mri_head_t"]["trans"][0, 3] == -0.05 + + +def test_make_forward_sphere_exclude(): + """Test that points are excluded that are outside BEM sphere inner layer.""" + r0 = (0.0, 0.0, 0.04) + inner_radius = 0.08 + head_radius = inner_radius / 0.9 # relative_radii[0] for make_sphere model + bem = make_sphere_model(r0=r0, head_radius=head_radius) + # construct our source space using a sphere the size of the inner (brain) radius, + # with a 1mm exclude zone to avoid numerical issues + src = setup_volume_source_space( + pos=10.0, + sphere=r0 + (inner_radius,), + mindist=1, # use >0 to avoid any numerical issues + exclude=10, + ) + assert src[0]["nuse"] == 2102 # empirically determined + trans = Transform("mri", "head") # identity for simplicity + raw = read_raw_fif(fname_raw) + raw.pick(raw.ch_names[:1]) + fwd = make_forward_solution(raw.info, trans, src, bem, mindist=0) + assert fwd["nsource"] == src[0]["nuse"] + bem_small = make_sphere_model(r0=r0, head_radius=head_radius - 0.01 / 0.9) + fwd_small = make_forward_solution(raw.info, trans, src, bem_small, mindist=0) + assert fwd_small["nsource"] < src[0]["nuse"] + idx = np.searchsorted(fwd["src"][0]["vertno"], fwd_small["src"][0]["vertno"]) + fwd_data = np.reshape(fwd["sol"]["data"], (len(raw.ch_names), -1, 3)) + fwd_small_data = np.reshape(fwd_small["sol"]["data"], (len(raw.ch_names), -1, 3)) + assert_allclose(fwd_data[:, idx], fwd_small_data) + # again to avoid numerical issues, make this close + fwd_small_2 = make_forward_solution(raw.info, trans, src, bem, mindist=9.999) + assert fwd_small_2["nsource"] == fwd_small["nsource"] + fwd_small_2_data = np.reshape( + fwd_small_2["sol"]["data"], (len(raw.ch_names), -1, 3) + ) + assert_allclose(fwd_small_data, fwd_small_2_data) + @pytest.mark.slowtest @testing.requires_testing_data def test_forward_mixed_source_space(tmp_path): """Test making the forward solution for a mixed source space.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") # get the surface source space rng = np.random.RandomState(0) surf = read_source_spaces(fname_src) @@ -444,42 +658,49 @@ def test_forward_mixed_source_space(tmp_path): # setup two volume source spaces label_names = get_volume_labels_from_aseg(fname_aseg) vol_labels = rng.choice(label_names, 2) - with pytest.warns(RuntimeWarning, match='Found no usable.*CC_Mid_Ant.*'): - vol1 = setup_volume_source_space('sample', pos=20., mri=fname_aseg, - volume_label=vol_labels[0], - add_interpolator=False) - vol2 = setup_volume_source_space('sample', pos=20., mri=fname_aseg, - volume_label=vol_labels[1], - add_interpolator=False) + with pytest.warns(RuntimeWarning, match="Found no usable.*CC_Mid_Ant.*"): + vol1 = setup_volume_source_space( + "sample", + pos=20.0, + mri=fname_aseg, + volume_label=vol_labels[0], + add_interpolator=False, + ) + vol2 = setup_volume_source_space( + "sample", + pos=20.0, + mri=fname_aseg, + volume_label=vol_labels[1], + add_interpolator=False, + ) # merge surfaces and volume src = surf + vol1 + vol2 # calculate forward solution fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem) - assert (repr(fwd)) + assert repr(fwd) # extract source spaces - src_from_fwd = fwd['src'] + src_from_fwd = fwd["src"] # get the coordinate frame of each source space - coord_frames = np.array([s['coord_frame'] for s in src_from_fwd]) + coord_frames = np.array([s["coord_frame"] for s in src_from_fwd]) # assert that all source spaces are in head coordinates - assert ((coord_frames == FIFF.FIFFV_COORD_HEAD).all()) + assert (coord_frames == FIFF.FIFFV_COORD_HEAD).all() # run tests for SourceSpaces.export_volume - fname_img = tmp_path / 'temp-image.mgz' + fname_img = tmp_path / "temp-image.mgz" # head coordinates and mri_resolution, but trans file - with pytest.raises(ValueError, match='trans containing mri to head'): + with pytest.raises(ValueError, match="trans containing mri to head"): src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=None) # head coordinates and mri_resolution, but wrong trans file - vox_mri_t = vol1[0]['vox_mri_t'] - with pytest.raises(ValueError, match='head<->mri, got mri_voxel->mri'): - src_from_fwd.export_volume(fname_img, mri_resolution=True, - trans=vox_mri_t) + vox_mri_t = vol1[0]["vox_mri_t"] + with pytest.raises(ValueError, match="head<->mri, got mri_voxel->mri"): + src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=vox_mri_t) @pytest.mark.slowtest @@ -490,12 +711,12 @@ def test_make_forward_dipole(tmp_path): evoked = read_evokeds(fname_evo)[0] cov = read_cov(fname_cov) - cov['projs'] = [] # avoid proj warning + cov["projs"] = [] # avoid proj warning dip_c = read_dipole(fname_dip) # Only use magnetometers for speed! - picks = pick_types(evoked.info, meg='mag', eeg=False)[::8] - evoked.pick_channels([evoked.ch_names[p] for p in picks]) + picks = pick_types(evoked.info, meg="mag", eeg=False)[::8] + evoked.pick([evoked.ch_names[p] for p in picks]) evoked.info.normalize_proj() info = evoked.info @@ -503,18 +724,19 @@ def test_make_forward_dipole(tmp_path): # in the test dataset. n_test_dipoles = 3 # minimum 3 needed to get uneven sampling in time dipsel = np.sort(rng.permutation(np.arange(len(dip_c)))[:n_test_dipoles]) - dip_test = Dipole(times=dip_c.times[dipsel], - pos=dip_c.pos[dipsel], - amplitude=dip_c.amplitude[dipsel], - ori=dip_c.ori[dipsel], - gof=dip_c.gof[dipsel]) + dip_test = Dipole( + times=dip_c.times[dipsel], + pos=dip_c.pos[dipsel], + amplitude=dip_c.amplitude[dipsel], + ori=dip_c.ori[dipsel], + gof=dip_c.gof[dipsel], + ) sphere = make_sphere_model(head_radius=0.1) # Warning emitted due to uneven sampling in time - with pytest.warns(RuntimeWarning, match='unevenly spaced'): - fwd, stc = make_forward_dipole(dip_test, sphere, info, - trans=fname_trans) + with pytest.warns(RuntimeWarning, match="unevenly spaced"): + fwd, stc = make_forward_dipole(dip_test, sphere, info, trans=fname_trans) # stc is list of VolSourceEstimate's assert isinstance(stc, list) @@ -526,8 +748,7 @@ def test_make_forward_dipole(tmp_path): times, pos, amplitude, ori, gof = [], [], [], [], [] nave = 400 # add a tiny amount of noise to the simulated evokeds for s in stc: - evo_test = simulate_evoked(fwd, s, info, cov, - nave=nave, random_state=rng) + evo_test = simulate_evoked(fwd, s, info, cov, nave=nave, random_state=rng) # evo_test.add_proj(make_eeg_average_ref_proj(evo_test.info)) dfit, resid = fit_dipole(evo_test, cov, sphere, None) times += dfit.times.tolist() @@ -544,36 +765,40 @@ def test_make_forward_dipole(tmp_path): diff = dip_test.pos - dip_fit.pos corr = np.corrcoef(dip_test.pos.ravel(), dip_fit.pos.ravel())[0, 1] dist = np.sqrt(np.mean(np.sum(diff * diff, axis=1))) - gc_dist = 180 / np.pi * \ - np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1))) + gc_dist = ( + 180 / np.pi * np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1))) + ) amp_err = np.sqrt(np.mean((dip_test.amplitude - dip_fit.amplitude) ** 2)) # Make sure each coordinate is close to reference # NB tolerance should be set relative to snr of simulated evoked! - assert_allclose(dip_fit.pos, dip_test.pos, rtol=0, atol=1e-2, - err_msg='position mismatch') + assert_allclose( + dip_fit.pos, dip_test.pos, rtol=0, atol=1.5e-2, err_msg="position mismatch" + ) assert dist < 1e-2 # within 1 cm - assert corr > 0.985 + assert corr > 0.98 assert gc_dist < 20 # less than 20 degrees assert amp_err < 10e-9 # within 10 nAm # Make sure rejection works with BEM: one dipole at z=1m # NB _make_forward.py:_prepare_for_forward will raise a RuntimeError # if no points are left after min_dist exclusions, hence 2 dips here! - dip_outside = Dipole(times=[0., 0.001], - pos=[[0., 0., 1.0], [0., 0., 0.040]], - amplitude=[100e-9, 100e-9], - ori=[[1., 0., 0.], [1., 0., 0.]], gof=1) - with pytest.raises(ValueError, match='outside the inner skull'): + dip_outside = Dipole( + times=[0.0, 0.001], + pos=[[0.0, 0.0, 1.0], [0.0, 0.0, 0.040]], + amplitude=[100e-9, 100e-9], + ori=[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], + gof=1, + ) + with pytest.raises(ValueError, match="outside the inner skull"): make_forward_dipole(dip_outside, fname_bem, info, fname_trans) # if we get this far, can safely assume the code works with BEMs too # -> use sphere again below for speed # Now make an evenly sampled set of dipoles, some simultaneous, # should return a VolSourceEstimate regardless - times = [0., 0., 0., 0.001, 0.001, 0.002] - pos = np.random.rand(6, 3) * 0.020 + \ - np.array([0., 0., 0.040])[np.newaxis, :] + times = [0.0, 0.0, 0.0, 0.001, 0.001, 0.002] + pos = np.random.rand(6, 3) * 0.020 + np.array([0.0, 0.0, 0.040])[np.newaxis, :] amplitude = np.random.rand(6) * 100e-9 ori = np.eye(6, 3) + np.eye(6, 3, -3) gof = np.arange(len(times)) / len(times) # arbitrary @@ -581,61 +806,64 @@ def test_make_forward_dipole(tmp_path): dip_even_samp = Dipole(times, pos, amplitude, ori, gof) # I/O round-trip - fname = str(tmp_path / 'test-fwd.fif') - with pytest.warns(RuntimeWarning, match='free orientation'): + fname = str(tmp_path / "test-fwd.fif") + with pytest.warns(RuntimeWarning, match="free orientation"): write_forward_solution(fname, fwd) - fwd_read = convert_forward_solution( - read_forward_solution(fname), force_fixed=True) + fwd_read = convert_forward_solution(read_forward_solution(fname), force_fixed=True) assert_forward_allclose(fwd, fwd_read, rtol=1e-6) - fwd, stc = make_forward_dipole(dip_even_samp, sphere, info, - trans=fname_trans) + fwd, stc = make_forward_dipole(dip_even_samp, sphere, info, trans=fname_trans) assert isinstance(stc, VolSourceEstimate) - assert_allclose(stc.times, np.arange(0., 0.003, 0.001)) + assert_allclose(stc.times, np.arange(0.0, 0.003, 0.001)) # Test passing a list of Dipoles instead of a single Dipole object - fwd2, stc2 = make_forward_dipole([dip_even_samp[0], dip_even_samp[1:]], - sphere, info, trans=fname_trans) - assert_array_equal(fwd['sol']['data'], fwd2['sol']['data']) + fwd2, stc2 = make_forward_dipole( + [dip_even_samp[0], dip_even_samp[1:]], sphere, info, trans=fname_trans + ) + assert_array_equal(fwd["sol"]["data"], fwd2["sol"]["data"]) assert_array_equal(stc.data, stc2.data) @testing.requires_testing_data def test_make_forward_no_meg(tmp_path): """Test that we can make and I/O forward solution with no MEG channels.""" - pos = dict(rr=[[0.05, 0, 0]], nn=[[0, 0, 1.]]) + pos = dict(rr=[[0.05, 0, 0]], nn=[[0, 0, 1.0]]) src = setup_volume_source_space(pos=pos) bem = make_sphere_model() trans = None - montage = make_standard_montage('standard_1020') - info = create_info(['Cz'], 1000., 'eeg').set_montage(montage) + montage = make_standard_montage("standard_1020") + info = create_info(["Cz"], 1000.0, "eeg").set_montage(montage) fwd = make_forward_solution(info, trans, src, bem) - fname = tmp_path / 'test-fwd.fif' + fname = tmp_path / "test-fwd.fif" write_forward_solution(fname, fwd) fwd_read = read_forward_solution(fname) - assert_allclose(fwd['sol']['data'], fwd_read['sol']['data']) + assert_allclose(fwd["sol"]["data"], fwd_read["sol"]["data"]) def test_use_coil_def(tmp_path): """Test use_coil_def.""" - info = create_info(1, 1000., 'mag') - info['chs'][0]['coil_type'] = 9999 - info['chs'][0]['loc'][:] = [0, 0, 0.02, 1, 0, 0, 0, 1, 0, 0, 0, 1] - sphere = make_sphere_model((0., 0., 0.), 0.01) + info = create_info(1, 1000.0, "mag") + info["chs"][0]["coil_type"] = 9999 + info["chs"][0]["loc"][:] = [0, 0, 0.02, 1, 0, 0, 0, 1, 0, 0, 0, 1] + info["dev_head_t"] = Transform("meg", "head") + sphere = make_sphere_model((0.0, 0.0, 0.0), 0.01) src = setup_volume_source_space(pos=5, sphere=sphere) - trans = Transform('head', 'mri', None) - with pytest.raises(RuntimeError, match='coil definition not found'): + trans = Transform("head", "mri", None) + with pytest.raises(RuntimeError, match="coil definition not found"): make_forward_solution(info, trans, src, sphere) - coil_fname = tmp_path / 'coil_def.dat' - with open(coil_fname, 'w') as fid: - fid.write("""# custom cube coil def + coil_fname = tmp_path / "coil_def.dat" + with open(coil_fname, "w") as fid: + fid.write( + """# custom cube coil def 1 9999 2 8 3e-03 0.000e+00 "Test" - 0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000""") - with pytest.raises(RuntimeError, match='Could not interpret'): + 0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000""" + ) + with pytest.raises(RuntimeError, match="Could not interpret"): with use_coil_def(coil_fname): make_forward_solution(info, trans, src, sphere) - with open(coil_fname, 'w') as fid: - fid.write("""# custom cube coil def + with open(coil_fname, "w") as fid: + fid.write( + """# custom cube coil def 1 9999 2 8 3e-03 0.000e+00 "Test" 0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000 0.1250 -0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000 @@ -644,7 +872,8 @@ def test_use_coil_def(tmp_path): 0.1250 -0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000 0.1250 -0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000 0.1250 0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000 - 0.1250 0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000""") + 0.1250 0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000""" + ) with use_coil_def(coil_fname): make_forward_solution(info, trans, src, sphere) @@ -653,27 +882,62 @@ def test_use_coil_def(tmp_path): @testing.requires_testing_data def test_sensors_inside_bem(): """Test that sensors inside the BEM are problematic.""" - rr = _get_ico_surface(1)['rr'] + rr = _get_ico_surface(1)["rr"] rr /= np.linalg.norm(rr, axis=1, keepdims=True) rr *= 0.1 assert len(rr) == 42 - info = create_info(len(rr), 1000., 'mag') - info['dev_head_t'] = Transform('meg', 'head', np.eye(4)) - for ii, ch in enumerate(info['chs']): - ch['loc'][:] = np.concatenate((rr[ii], np.eye(3).ravel())) - trans = Transform('head', 'mri', np.eye(4)) - trans['trans'][2, 3] = 0.03 - sphere_noshell = make_sphere_model((0., 0., 0.), None) - sphere = make_sphere_model((0., 0., 0.), 1.01) - with pytest.raises(RuntimeError, match='.* 15 MEG.*inside the scalp.*'): - make_forward_solution(info, trans, fname_src, fname_bem) + info = create_info(len(rr), 1000.0, "mag") + info["dev_head_t"] = Transform("meg", "head", np.eye(4)) + for ii, ch in enumerate(info["chs"]): + ch["loc"][:] = np.concatenate((rr[ii], np.eye(3).ravel())) + trans = Transform("head", "mri", np.eye(4)) + trans["trans"][2, 3] = 0.03 + sphere_noshell = make_sphere_model((0.0, 0.0, 0.0), None) + sphere = make_sphere_model((0.0, 0.0, 0.0), 1.01) + with pytest.warns(RuntimeWarning, match=".* 15 MEG.*inside the scalp.*"): + fwd = make_forward_solution(info, trans, fname_src, fname_bem, on_inside="warn") + assert fwd["nsource"] == 516 + assert fwd["nchan"] == 42 + assert np.isfinite(fwd["sol"]["data"]).all() make_forward_solution(info, trans, fname_src, fname_bem_meg) # okay make_forward_solution(info, trans, fname_src, sphere_noshell) # okay - with pytest.raises(RuntimeError, match='.* 42 MEG.*outermost sphere sh.*'): + with pytest.raises(RuntimeError, match=".* 42 MEG.*outermost sphere sh.*"): make_forward_solution(info, trans, fname_src, sphere) - sphere = make_sphere_model((0., 0., 2.0), 1.01) # weird, but okay - make_forward_solution(info, trans, fname_src, sphere) - for ch in info['chs']: - ch['loc'][:3] *= 0.1 - with pytest.raises(RuntimeError, match='.* 42 MEG.*the inner skull.*'): + sphere = make_sphere_model((0.0, 0.0, 0.0), 1.01) # weird, but okay + with pytest.raises(RuntimeError, match=".* 42 MEG.*the outermost sphere shell.*"): + make_forward_solution(info, trans, fname_src, sphere) + for ch in info["chs"]: + ch["loc"][:3] *= 0.1 + with pytest.raises(RuntimeError, match=".* 42 MEG.*the inner skull.*"): make_forward_solution(info, trans, fname_src, fname_bem_meg) + + +def test_make_forward_iterative(): + """Test that points are excluded that are outside BEM sphere inner layer.""" + r0 = (0.0, 0.0, 0.04) + head_radius = 0.08 + bem = make_sphere_model(r0=r0, head_radius=head_radius) + src = setup_volume_source_space( + pos=10.0, + sphere=r0 + (head_radius,), + exclude=10, + ) + assert 500 < src[0]["nuse"] < 4000 + trans = Transform("mri", "head") + raw = read_raw_fif(fname_raw) + raw.pick(raw.ch_names[:2]) + fwd = make_forward_solution(raw.info, trans, src, bem, mindist=0, verbose=True) + # check against iterative version + fm = _ForwardModeler(raw.info, trans, bem) + fwd_iterative = fm.compute(src) + _compare_forwards(fwd, fwd_iterative, fwd["nchan"], 3 * fwd["nsource"]) + midpt = fwd["nsource"] // 2 + assert fwd["coord_frame"] == FIFF.FIFFV_COORD_HEAD + fwd_data = list() + rr = apply_trans(invert_transform(fwd["mri_head_t"]), fwd["source_rr"]) + nn = apply_trans(invert_transform(fwd["mri_head_t"]), fwd["source_nn"], move=False) + for sl in (slice(None, midpt), slice(midpt, None)): + ss = setup_volume_source_space(pos=dict(rr=rr[sl], nn=nn[sl])) + fwd_data.append(fm.compute(ss)["sol"]["data"]) + fwd_data = np.concatenate(fwd_data, axis=1) + assert_allclose(fwd_data, fwd["sol"]["data"]) diff --git a/mne/gui/__init__.py b/mne/gui/__init__.py index f92817bf637..c06c8dc9939 100644 --- a/mne/gui/__init__.py +++ b/mne/gui/__init__.py @@ -1,298 +1,8 @@ -"""Convenience functions for opening GUIs.""" - -# Authors: Christian Brodbeck -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from ..utils import verbose, get_config, warn - - -@verbose -def coregistration(tabbed=False, split=True, width=None, inst=None, - subject=None, subjects_dir=None, guess_mri_subject=None, - height=None, head_opacity=None, head_high_res=None, - trans=None, scrollable=True, *, - orient_to_surface=True, scale_by_distance=True, - mark_inside=True, interaction=None, scale=None, - advanced_rendering=None, head_inside=True, - fullscreen=None, show=True, block=False, verbose=None): - """Coregister an MRI with a subject's head shape. - - The GUI can be launched through the command line interface: - - .. code-block:: bash - - $ mne coreg - - or using a python interpreter as shown in :ref:`tut-source-alignment`. - - Parameters - ---------- - tabbed : bool - Combine the data source panel and the coregistration panel into a - single panel with tabs. - split : bool - Split the main panels with a movable splitter (good for QT4 but - unnecessary for wx backend). - width : int | None - Specify the width for window (in logical pixels). - Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value - (which defaults to 800). - inst : None | str - Path to an instance file containing the digitizer data. Compatible for - Raw, Epochs, and Evoked files. - subject : None | str - Name of the mri subject. - %(subjects_dir)s - guess_mri_subject : bool - When selecting a new head shape file, guess the subject's name based - on the filename and change the MRI subject accordingly (default True). - height : int | None - Specify a height for window (in logical pixels). - Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value - (which defaults to 400). - head_opacity : float | None - The opacity of the head surface in the range [0., 1.]. - Default is None, which uses ``MNE_COREG_HEAD_OPACITY`` config value - (which defaults to 1.). - head_high_res : bool | None - Use a high resolution head surface. - Default is None, which uses ``MNE_COREG_HEAD_HIGH_RES`` config value - (which defaults to True). - trans : path-like | None - The transform file to use. - scrollable : bool - Make the coregistration panel vertically scrollable (default True). - orient_to_surface : bool | None - If True (default), orient EEG electrode and head shape points - to the head surface. - - .. versionadded:: 0.16 - scale_by_distance : bool | None - If True (default), scale the digitization points by their - distance from the scalp surface. - - .. versionadded:: 0.16 - mark_inside : bool | None - If True (default), mark points inside the head surface in a - different color. - - .. versionadded:: 0.16 - %(interaction_scene_none)s - Defaults to ``'terrain'``. - - .. versionadded:: 0.16 - .. versionchanged:: 1.0 - Default interaction mode if ``None`` and no config setting found - changed from ``'trackball'`` to ``'terrain'``. - scale : float | None - The scaling for the scene. - - .. versionadded:: 0.16 - advanced_rendering : bool - Use advanced OpenGL rendering techniques (default True). - For some renderers (such as MESA software) this can cause rendering - bugs. - - .. versionadded:: 0.18 - head_inside : bool - If True (default), add opaque inner scalp head surface to help occlude - points behind the head. - - .. versionadded:: 0.23 - %(fullscreen)s - Default is None, which uses ``MNE_COREG_FULLSCREEN`` config value - (which defaults to False). - - .. versionadded:: 1.1 - show : bool - Show the GUI if True. - block : bool - Whether to halt program execution until the figure is closed. - %(verbose)s - - Returns - ------- - frame : instance of CoregistrationUI - The coregistration frame. - - Notes - ----- - Many parameters (e.g., ``head_opacity``) take None as a parameter, - which means that the default will be read from the MNE-Python - configuration file (which gets saved when exiting). - - Step by step instructions for the coregistrations are shown below: - - .. youtube:: ALV5qqMHLlQ - """ - unsupported_params = { - 'tabbed': (tabbed, False), - 'split': (split, True), - 'scrollable': (scrollable, True), - 'head_inside': (head_inside, True), - 'guess_mri_subject': guess_mri_subject, - 'scale': scale, - 'advanced_rendering': advanced_rendering, - } - for key, val in unsupported_params.items(): - if isinstance(val, tuple): - to_raise = val[0] != val[1] - else: - to_raise = val is not None - if to_raise: - warn(f"The parameter {key} is not supported with" - " the pyvistaqt 3d backend. It will be ignored.") - config = get_config() - if guess_mri_subject is None: - guess_mri_subject = config.get( - 'MNE_COREG_GUESS_MRI_SUBJECT', 'true') == 'true' - if head_high_res is None: - head_high_res = config.get('MNE_COREG_HEAD_HIGH_RES', 'true') == 'true' - if advanced_rendering is None: - advanced_rendering = \ - config.get('MNE_COREG_ADVANCED_RENDERING', 'true') == 'true' - if head_opacity is None: - head_opacity = config.get('MNE_COREG_HEAD_OPACITY', 0.8) - if head_inside is None: - head_inside = \ - config.get('MNE_COREG_HEAD_INSIDE', 'true').lower() == 'true' - if width is None: - width = config.get('MNE_COREG_WINDOW_WIDTH', 800) - if height is None: - height = config.get('MNE_COREG_WINDOW_HEIGHT', 600) - if subjects_dir is None: - if 'SUBJECTS_DIR' in config: - subjects_dir = config['SUBJECTS_DIR'] - elif 'MNE_COREG_SUBJECTS_DIR' in config: - subjects_dir = config['MNE_COREG_SUBJECTS_DIR'] - if orient_to_surface is None: - orient_to_surface = (config.get('MNE_COREG_ORIENT_TO_SURFACE', '') == - 'true') - if scale_by_distance is None: - scale_by_distance = (config.get('MNE_COREG_SCALE_BY_DISTANCE', '') == - 'true') - if interaction is None: - interaction = config.get('MNE_COREG_INTERACTION', 'terrain') - if mark_inside is None: - mark_inside = config.get('MNE_COREG_MARK_INSIDE', '') == 'true' - if scale is None: - scale = config.get('MNE_COREG_SCENE_SCALE', 0.16) - if fullscreen is None: - fullscreen = config.get('MNE_COREG_FULLSCREEN', '') == 'true' - head_opacity = float(head_opacity) - head_inside = bool(head_inside) - width = int(width) - height = int(height) - scale = float(scale) - - from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING - from ._coreg import CoregistrationUI - if MNE_3D_BACKEND_TESTING: - show = block = False - return CoregistrationUI( - info_file=inst, subject=subject, subjects_dir=subjects_dir, - head_resolution=head_high_res, head_opacity=head_opacity, - orient_glyphs=orient_to_surface, scale_by_distance=scale_by_distance, - mark_inside=mark_inside, trans=trans, size=(width, height), show=show, - block=block, interaction=interaction, fullscreen=fullscreen, - verbose=verbose - ) - - -@verbose -def locate_ieeg(info, trans, base_image, subject=None, subjects_dir=None, - groups=None, show=True, block=False, verbose=None): - """Locate intracranial electrode contacts. - - Parameters - ---------- - %(info_not_none)s - %(trans_not_none)s - base_image : path-like | nibabel.spatialimages.SpatialImage - The CT or MR image on which the electrode contacts can located. It - must be aligned to the Freesurfer T1 if ``subject`` and - ``subjects_dir`` are provided. Path-like inputs and nibabel image - objects are supported. - %(subject)s - %(subjects_dir)s - groups : dict | None - A dictionary with channels as keys and their group index as values. - If None, the groups will be inferred by the channel names. Channel - names must have a format like ``LAMY 7`` where a string prefix - like ``LAMY`` precedes a numeric index like ``7``. If the channels - are formatted improperly, group plotting will work incorrectly. - Group assignments can be adjusted in the GUI. - show : bool - Show the GUI if True. - block : bool - Whether to halt program execution until the figure is closed. - %(verbose)s - - Returns - ------- - gui : instance of IntracranialElectrodeLocator - The graphical user interface (GUI) window. - """ - from ..viz.backends._utils import _qt_app_exec - from ._ieeg_locate import IntracranialElectrodeLocator - from qtpy.QtWidgets import QApplication - # get application - app = QApplication.instance() - if app is None: - app = QApplication(["Intracranial Electrode Locator"]) - gui = IntracranialElectrodeLocator( - info, trans, base_image, subject=subject, subjects_dir=subjects_dir, - groups=groups, show=show, verbose=verbose) - if block: - _qt_app_exec(app) - return gui - - -class _GUIScraper(object): - """Scrape GUI outputs.""" - - def __repr__(self): - return '' +"""Convenience functions for opening GUIs.""" +import lazy_loader as lazy - def __call__(self, block, block_vars, gallery_conf): - from ._ieeg_locate import IntracranialElectrodeLocator - from ._coreg import CoregistrationUI - from sphinx_gallery.scrapers import figure_rst - from qtpy import QtGui - for gui in block_vars['example_globals'].values(): - if (isinstance(gui, (IntracranialElectrodeLocator, - CoregistrationUI)) and - not getattr(gui, '_scraped', False) and - gallery_conf['builder_name'] == 'html'): - gui._scraped = True # monkey-patch but it's easy enough - img_fname = next(block_vars['image_path_iterator']) - # TODO fix in window refactor - window = gui if hasattr(gui, 'grab') else gui._renderer._window - # window is QWindow - # https://doc.qt.io/qt-5/qwidget.html#grab - pixmap = window.grab() - if hasattr(gui, '_renderer'): # if no renderer, no need - # Now the tricky part: we need to get the 3D renderer, - # extract the image from it, and put it in the correct - # place in the pixmap. The easiest way to do this is - # actually to save the 3D image first, then load it - # using QPixmap and Qt geometry. - plotter = gui._renderer.plotter - plotter.screenshot(img_fname) - sub_pixmap = QtGui.QPixmap(img_fname) - # https://doc.qt.io/qt-5/qwidget.html#mapTo - # https://doc.qt.io/qt-5/qpainter.html#drawPixmap-1 - QtGui.QPainter(pixmap).drawPixmap( - plotter.mapTo(window, plotter.rect().topLeft()), - sub_pixmap) - # https://doc.qt.io/qt-5/qpixmap.html#save - pixmap.save(img_fname) - try: # for compatibility with both GUIs, will be refactored - gui._renderer.close() # TODO should be triggered by close - except Exception: - pass - gui.close() - return figure_rst( - [img_fname], gallery_conf['src_dir'], 'GUI') - return '' +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/gui/__init__.pyi b/mne/gui/__init__.pyi new file mode 100644 index 00000000000..086c51a4904 --- /dev/null +++ b/mne/gui/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["_GUIScraper", "coregistration"] +from ._gui import _GUIScraper, coregistration diff --git a/mne/gui/_core.py b/mne/gui/_core.py deleted file mode 100644 index 7929c838737..00000000000 --- a/mne/gui/_core.py +++ /dev/null @@ -1,540 +0,0 @@ -# -*- coding: utf-8 -*- -"""Shared GUI classes and functions.""" - -# Authors: Alex Rockhill -# -# License: BSD (3-clause) - -import os -import os.path as op -import numpy as np -from functools import partial - -from qtpy import QtCore -from qtpy.QtCore import Slot, Qt -from qtpy.QtWidgets import (QMainWindow, QGridLayout, - QVBoxLayout, QHBoxLayout, QLabel, - QMessageBox, QWidget, QLineEdit) - -from matplotlib import patheffects -from matplotlib.backends.backend_qt5agg import FigureCanvas -from matplotlib.figure import Figure -from matplotlib.patches import Rectangle - -from ..viz.backends.renderer import _get_renderer -from ..viz.utils import safe_event -from ..surface import _read_mri_surface, _marching_cubes -from ..transforms import apply_trans, _frame_to_str -from ..utils import (logger, _check_fname, verbose, warn, get_subjects_dir, - _import_nibabel) -from ..viz.backends._utils import _qt_safe_window - -_IMG_LABELS = [['I', 'P'], ['I', 'L'], ['P', 'L']] -_ZOOM_STEP_SIZE = 5 - - -@verbose -def _load_image(img, verbose=None): - """Load data from a 3D image file (e.g. CT, MR).""" - nib = _import_nibabel('use GUI') - if not isinstance(img, nib.spatialimages.SpatialImage): - logger.debug(f'Loading {img}') - _check_fname(img, overwrite='read', must_exist=True) - img = nib.load(img) - # get data - orig_data = np.array(img.dataobj).astype(np.float32) - # reorient data to RAS - ornt = nib.orientations.axcodes2ornt( - nib.orientations.aff2axcodes(img.affine)).astype(int) - ras_ornt = nib.orientations.axcodes2ornt('RAS') - ornt_trans = nib.orientations.ornt_transform(ornt, ras_ornt) - img_data = nib.orientations.apply_orientation(orig_data, ornt_trans) - orig_mgh = nib.MGHImage(orig_data, img.affine) - aff_trans = nib.orientations.inv_ornt_aff(ornt_trans, img.shape) - vox_ras_t = np.dot(orig_mgh.header.get_vox2ras_tkr(), aff_trans) - vox_scan_ras_t = np.dot(orig_mgh.header.get_vox2ras(), aff_trans) - return img_data, vox_ras_t, vox_scan_ras_t - - -def _make_mpl_plot(width=4, height=4, dpi=300, tight=True, hide_axes=True, - facecolor='black', invert=True): - fig = Figure(figsize=(width, height), dpi=dpi) - canvas = FigureCanvas(fig) - ax = fig.subplots() - if tight: - fig.subplots_adjust(bottom=0, left=0, right=1, top=1, - wspace=0, hspace=0) - ax.set_facecolor(facecolor) - # clean up excess plot text, invert - if invert: - ax.invert_yaxis() - if hide_axes: - ax.set_xticks([]) - ax.set_yticks([]) - return canvas, fig - - -class SliceBrowser(QMainWindow): - """Navigate between slices of an MRI, CT, etc. image.""" - - _xy_idx = ( - (1, 2), - (0, 2), - (0, 1), - ) - - @_qt_safe_window(splash='_renderer.figure.splash', window='') - def __init__(self, base_image=None, subject=None, subjects_dir=None, - verbose=None): - """GUI for browsing slices of anatomical images.""" - # initialize QMainWindow class - super(SliceBrowser, self).__init__() - self.setAttribute(Qt.WA_DeleteOnClose, True) - - self._verbose = verbose - # if bad/None subject, will raise an informative error when loading MRI - subject = os.environ.get('SUBJECT') if subject is None else subject - subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=False)) - self._subject_dir = op.join(subjects_dir, subject) \ - if subject and subjects_dir else None - self._load_image_data(base_image=base_image) - - # GUI design - - # Main plots: make one plot for each view; sagittal, coronal, axial - self._plt_grid = QGridLayout() - self._figs = list() - for i in range(3): - canvas, fig = _make_mpl_plot() - self._plt_grid.addWidget(canvas, i // 2, i % 2) - self._figs.append(fig) - self._renderer = _get_renderer( - name='Slice Browser', size=(400, 400), bgcolor='w') - self._plt_grid.addWidget(self._renderer.plotter, 1, 1) - - self._set_ras([0., 0., 0.], update_plots=False) - - self._plot_images() - - self._configure_ui() - - def _configure_ui(self): - bottom_hbox = self._configure_status_bar() - - # Put everything together - plot_ch_hbox = QHBoxLayout() - plot_ch_hbox.addLayout(self._plt_grid) - - main_vbox = QVBoxLayout() - main_vbox.addLayout(plot_ch_hbox) - main_vbox.addLayout(bottom_hbox) - - central_widget = QWidget() - central_widget.setLayout(main_vbox) - self.setCentralWidget(central_widget) - - def _load_image_data(self, base_image=None): - """Get image data to display and transforms to/from vox/RAS.""" - if self._subject_dir is None: - # if the recon-all is not finished or the CT is not - # downsampled to the MRI, the MRI can not be used - self._mri_data = None - self._head = None - self._lh = self._rh = None - else: - mri_img = 'brain' if op.isfile(op.join( - self._subject_dir, 'mri', 'brain.mgz')) else 'T1' - self._mri_data, vox_ras_t, vox_scan_ras_t = _load_image( - op.join(self._subject_dir, 'mri', f'{mri_img}.mgz')) - - # ready alternate base image if provided, otherwise use brain/T1 - if base_image is None: - assert self._mri_data is not None - self._base_data = self._mri_data - self._vox_ras_t = vox_ras_t - self._vox_scan_ras_t = vox_scan_ras_t - else: - self._base_data, self._vox_ras_t, self._vox_scan_ras_t = \ - _load_image(base_image) - if self._mri_data is not None: - if self._mri_data.shape != self._base_data.shape or \ - not np.allclose(self._vox_ras_t, vox_ras_t, rtol=1e-6): - raise ValueError( - 'Base image is not aligned to MRI, got ' - f'Base shape={self._base_data.shape}, ' - f'MRI shape={self._mri_data.shape}, ' - f'Base affine={vox_ras_t} and ' - f'MRI affine={self._vox_ras_t}, ' - 'please provide an aligned image or do not use the ' - '``subject`` and ``subjects_dir`` arguments') - - self._ras_vox_t = np.linalg.inv(self._vox_ras_t) - self._scan_ras_vox_t = np.linalg.inv(self._vox_scan_ras_t) - self._voxel_sizes = np.array(self._base_data.shape) - self._voxel_ratios = self._voxel_sizes / self._voxel_sizes.min() - - # We need our extents to land the centers of each pixel on the voxel - # number. This code assumes 1mm isotropic... - img_delta = 0.5 - self._img_extents = list( - [-img_delta, self._voxel_sizes[idx[0]] - img_delta, - -img_delta, self._voxel_sizes[idx[1]] - img_delta] - for idx in self._xy_idx) - - if self._subject_dir is not None: - if op.exists(op.join(self._subject_dir, 'surf', 'lh.seghead')): - self._head = _read_mri_surface( - op.join(self._subject_dir, 'surf', 'lh.seghead')) - assert _frame_to_str[self._head['coord_frame']] == 'mri' - else: - warn('`seghead` not found, using marching cubes on base image ' - 'for head plot, use :ref:`mne.bem.make_scalp_surfaces` ' - 'to add the scalp surface instead') - self._head = None - - if self._subject_dir is not None: - # allow ?h.pial.T1 if ?h.pial doesn't exist - # end with '' for better file not found error - for img in ('', '.T1', '.T2', ''): - surf_fname = op.join( - self._subject_dir, 'surf', '{hemi}' + f'.pial{img}') - if op.isfile(surf_fname.format(hemi='lh')): - break - if op.exists(surf_fname.format(hemi='lh')): - self._lh = _read_mri_surface(surf_fname.format(hemi='lh')) - assert _frame_to_str[self._lh['coord_frame']] == 'mri' - self._rh = _read_mri_surface(surf_fname.format(hemi='rh')) - assert _frame_to_str[self._rh['coord_frame']] == 'mri' - else: - warn('`pial` surface not found, skipping adding to 3D ' - 'plot. This indicates the Freesurfer recon-all ' - 'has not finished or has been modified and ' - 'these files have been deleted.') - self._lh = self._rh = None - - def _plot_images(self): - """Use the MRI or CT to make plots.""" - # Plot sagittal (0), coronal (1) or axial (2) view - self._images = dict(base=list(), cursor_v=list(), cursor_h=list(), - bounds=list()) - img_min = np.nanmin(self._base_data) - img_max = np.nanmax(self._base_data) - text_kwargs = dict(fontsize='medium', weight='bold', color='#66CCEE', - family='monospace', ha='center', va='center', - path_effects=[patheffects.withStroke( - linewidth=4, foreground="k", alpha=0.75)]) - xyz = apply_trans(self._ras_vox_t, self._ras) - for axis in range(3): - plot_x_idx, plot_y_idx = self._xy_idx[axis] - fig = self._figs[axis] - ax = fig.axes[0] - img_data = np.take(self._base_data, self._current_slice[axis], - axis=axis).T - self._images['base'].append(ax.imshow( - img_data, cmap='gray', aspect='auto', zorder=1, - vmin=img_min, vmax=img_max)) - img_extent = self._img_extents[axis] # x0, x1, y0, y1 - w, h = np.diff(np.array(img_extent).reshape(2, 2), axis=1)[:, 0] - self._images['bounds'].append(Rectangle( - img_extent[::2], w, h, edgecolor='w', facecolor='none', - alpha=0.25, lw=0.5, zorder=1.5)) - ax.add_patch(self._images['bounds'][-1]) - v_x = (xyz[plot_x_idx],) * 2 - v_y = img_extent[2:4] - self._images['cursor_v'].append(ax.plot( - v_x, v_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0]) - h_y = (xyz[plot_y_idx],) * 2 - h_x = img_extent[0:2] - self._images['cursor_h'].append(ax.plot( - h_x, h_y, color='lime', linewidth=0.5, alpha=0.5, zorder=8)[0]) - # label axes - self._figs[axis].text(0.5, 0.075, _IMG_LABELS[axis][0], - **text_kwargs) - self._figs[axis].text(0.075, 0.5, _IMG_LABELS[axis][1], - **text_kwargs) - self._figs[axis].axes[0].axis(img_extent) - self._figs[axis].canvas.mpl_connect( - 'scroll_event', self._on_scroll) - self._figs[axis].canvas.mpl_connect( - 'button_release_event', partial(self._on_click, axis=axis)) - # add head and brain in mm (convert from m) - if self._head is None: - logger.debug('Using marching cubes on the base image for the ' - '3D visualization panel') - # in this case, leave in voxel coordinates - rr, tris = _marching_cubes(np.where( - self._base_data < np.quantile(self._base_data, 0.95), 0, 1), - [1])[0] - # marching cubes transposes dimensions so flip - rr = apply_trans(self._vox_ras_t, rr[:, ::-1]) - self._renderer.mesh( - *rr.T, triangles=tris, color='gray', opacity=0.2, - reset_camera=False, render=False) - self._renderer.set_camera(focalpoint=rr.mean(axis=0)) - else: - self._renderer.mesh( - *self._head['rr'].T * 1000, triangles=self._head['tris'], - color='gray', opacity=0.2, reset_camera=False, render=False) - if self._lh is not None and self._rh is not None: - self._renderer.mesh( - *self._lh['rr'].T * 1000, triangles=self._lh['tris'], - color='white', opacity=0.2, reset_camera=False, render=False) - self._renderer.mesh( - *self._rh['rr'].T * 1000, triangles=self._rh['tris'], - color='white', opacity=0.2, reset_camera=False, render=False) - self._renderer.set_camera(azimuth=90, elevation=90, distance=300, - focalpoint=tuple(self._ras)) - # update plots - self._draw() - self._renderer._update() - - def _configure_status_bar(self, hbox=None): - """Make a bar at the bottom with information in it.""" - hbox = QHBoxLayout() if hbox is None else hbox - - self._intensity_label = QLabel('') # update later - hbox.addWidget(self._intensity_label) - - VOX_label = QLabel('VOX =') - self._VOX_textbox = QLineEdit('') # update later - self._VOX_textbox.setMaximumHeight(25) - self._VOX_textbox.setMinimumWidth(75) - self._VOX_textbox.focusOutEvent = self._update_VOX - hbox.addWidget(VOX_label) - hbox.addWidget(self._VOX_textbox) - - RAS_label = QLabel('RAS =') - self._RAS_textbox = QLineEdit('') # update later - self._RAS_textbox.setMaximumHeight(25) - self._RAS_textbox.setMinimumWidth(150) - self._RAS_textbox.focusOutEvent = self._update_RAS - hbox.addWidget(RAS_label) - hbox.addWidget(self._RAS_textbox) - self._update_moved() # update text now - return hbox - - def _update_camera(self, render=False): - """Update the camera position.""" - self._renderer.set_camera( - # needs fix, distance moves when focal point updates - distance=self._renderer.plotter.camera.distance * 0.9, - focalpoint=tuple(self._ras), - reset_camera=False) - - def _on_scroll(self, event): - """Process mouse scroll wheel event to zoom.""" - self._zoom(np.sign(event.step), draw=True) - - def _zoom(self, sign=1, draw=False): - """Zoom in on the image.""" - delta = _ZOOM_STEP_SIZE * sign - for axis, fig in enumerate(self._figs): - xcur = self._images['cursor_v'][axis].get_xdata()[0] - ycur = self._images['cursor_h'][axis].get_ydata()[0] - rx, ry = [self._voxel_ratios[idx] for idx in self._xy_idx[axis]] - xmin, xmax = fig.axes[0].get_xlim() - ymin, ymax = fig.axes[0].get_ylim() - xmid = (xmin + xmax) / 2 - ymid = (ymin + ymax) / 2 - if sign == 1: # may need to shift if zooming in - if abs(xmid - xcur) > delta / 2 * rx: - xmid += delta * np.sign(xcur - xmid) * rx - if abs(ymid - ycur) > delta / 2 * ry: - ymid += delta * np.sign(ycur - ymid) * ry - xwidth = (xmax - xmin) / 2 - delta * rx - ywidth = (ymax - ymin) / 2 - delta * ry - if xwidth <= 0 or ywidth <= 0: - return - fig.axes[0].set_xlim(xmid - xwidth, xmid + xwidth) - fig.axes[0].set_ylim(ymid - ywidth, ymid + ywidth) - if draw: - fig.canvas.draw() - - @Slot() - def _update_RAS(self, event): - """Interpret user input to the RAS textbox.""" - ras = self._convert_text(self._RAS_textbox.text(), 'ras') - if ras is not None: - self._set_ras(ras) - - @Slot() - def _update_VOX(self, event): - """Interpret user input to the RAS textbox.""" - ras = self._convert_text(self._VOX_textbox.text(), 'vox') - if ras is not None: - self._set_ras(ras) - - def _convert_text(self, text, text_kind): - text = text.replace('\n', '') - vals = text.split(',') - if len(vals) != 3: - vals = text.split(' ') # spaces also okay as in freesurfer - vals = [var.lstrip().rstrip() for var in vals] - try: - vals = np.array([float(var) for var in vals]).reshape(3) - except Exception: - self._update_moved() # resets RAS label - return - if text_kind == 'vox': - vox = vals - ras = apply_trans(self._vox_ras_t, vox) - else: - assert text_kind == 'ras' - ras = vals - vox = apply_trans(self._ras_vox_t, ras) - wrong_size = any(var < 0 or var > n - 1 for var, n in - zip(vox, self._voxel_sizes)) - if wrong_size: - self._update_moved() # resets RAS label - return - return ras - - @property - def _ras(self): - return self._ras_safe - - def set_RAS(self, ras): - """Set the crosshairs to a given RAS. - - Parameters - ---------- - ras : array-like - The right-anterior-superior scanner RAS coordinate. - """ - self._set_ras(ras) - - def _set_ras(self, ras, update_plots=True): - ras = np.asarray(ras, dtype=float) - assert ras.shape == (3,) - msg = ', '.join(f'{x:0.2f}' for x in ras) - logger.debug(f'Trying RAS: ({msg}) mm') - # clip to valid - vox = apply_trans(self._ras_vox_t, ras) - vox = np.array([ - np.clip(d, 0, self._voxel_sizes[ii] - 1) - for ii, d in enumerate(vox)]) - # transform back, make write-only - self._ras_safe = apply_trans(self._vox_ras_t, vox) - self._ras_safe.flags['WRITEABLE'] = False - msg = ', '.join(f'{x:0.2f}' for x in self._ras_safe) - logger.debug(f'Setting RAS: ({msg}) mm') - if update_plots: - self._move_cursors_to_pos() - - def set_vox(self, vox): - """Set the crosshairs to a given voxel coordinate. - - Parameters - ---------- - vox : array-like - The voxel coordinate. - """ - self._set_ras(apply_trans(self._vox_ras_t, vox)) - - @property - def _vox(self): - return apply_trans(self._ras_vox_t, self._ras) - - @property - def _current_slice(self): - return self._vox.round().astype(int) - - def _draw(self, axis=None): - """Update the figures with a draw call.""" - for axis in (range(3) if axis is None else [axis]): - self._figs[axis].canvas.draw() - - def _update_base_images(self, axis=None, draw=False): - """Update the base images.""" - for axis in range(3) if axis is None else [axis]: - img_data = np.take(self._base_data, self._current_slice[axis], - axis=axis).T - self._images['base'][axis].set_data(img_data) - if draw: - self._draw(axis) - - def _update_images(self, axis=None, draw=True): - """Update CT and channel images when general changes happen.""" - self._update_base_images(axis=axis) - if draw: - self._draw(axis) - - def _move_cursors_to_pos(self): - """Move the cursors to a position.""" - for axis in range(3): - x, y = self._vox[list(self._xy_idx[axis])] - self._images['cursor_v'][axis].set_xdata([x, x]) - self._images['cursor_h'][axis].set_ydata([y, y]) - self._update_images(draw=True) - self._update_moved() - - def _show_help(self): - """Show the help menu.""" - QMessageBox.information( - self, 'Help', - "Help:\n" - "'+'/'-': zoom\nleft/right arrow: left/right\n" - "up/down arrow: superior/inferior\n" - "left angle bracket/right angle bracket: anterior/posterior") - - def keyPressEvent(self, event): - """Execute functions when the user presses a key.""" - if event.key() == 'escape': - self.close() - - elif event.key() == QtCore.Qt.Key_Return: - for widget in (self._RAS_textbox, self._VOX_textbox): - if widget.hasFocus(): - widget.clearFocus() - self.setFocus() # removing focus calls focus out event - - elif event.text() == 'h': - self._show_help() - - elif event.text() in ('=', '+', '-'): - self._zoom(sign=-2 * (event.text() == '-') + 1, draw=True) - - # Changing slices - elif event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, - QtCore.Qt.Key_Left, QtCore.Qt.Key_Right, - QtCore.Qt.Key_Comma, QtCore.Qt.Key_Period, - QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown): - ras = np.array(self._ras) - if event.key() in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down): - ras[2] += 2 * (event.key() == QtCore.Qt.Key_Up) - 1 - elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Right): - ras[0] += 2 * (event.key() == QtCore.Qt.Key_Right) - 1 - else: - ras[1] += 2 * (event.key() == QtCore.Qt.Key_PageUp or - event.key() == QtCore.Qt.Key_Period) - 1 - self._set_ras(ras) - - def _on_click(self, event, axis): - """Move to view on MRI and CT on click.""" - if event.inaxes is self._figs[axis].axes[0]: - # Data coordinates are voxel coordinates - pos = (event.xdata, event.ydata) - logger.debug(f'Clicked {"XYZ"[axis]} ({axis}) axis at pos {pos}') - xyz = self._vox - xyz[list(self._xy_idx[axis])] = pos - logger.debug(f'Using voxel {list(xyz)}') - ras = apply_trans(self._vox_ras_t, xyz) - self._set_ras(ras) - - def _update_moved(self): - """Update when cursor position changes.""" - self._RAS_textbox.setText('{:.2f}, {:.2f}, {:.2f}'.format( - *self._ras)) - self._VOX_textbox.setText('{:3d}, {:3d}, {:3d}'.format( - *self._current_slice)) - self._intensity_label.setText('intensity = {:.2f}'.format( - self._base_data[tuple(self._current_slice)])) - - @safe_event - def closeEvent(self, event): - """Clean up upon closing the window.""" - try: - self._renderer.plotter.close() - except AttributeError: - pass - self.close() diff --git a/mne/gui/_coreg.py b/mne/gui/_coreg.py index 61232f5ed7d..bd2decc4a3f 100644 --- a/mne/gui/_coreg.py +++ b/mne/gui/_coreg.py @@ -1,43 +1,74 @@ -from contextlib import contextmanager -from functools import partial +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import inspect import os import os.path as op import platform -from pathlib import Path -import time import queue -import threading import re +import threading +import time +import weakref +from contextlib import contextmanager +from functools import partial +from pathlib import Path import numpy as np -from traitlets import observe, HasTraits, Unicode, Bool, Float +from traitlets import Bool, Float, HasTraits, Instance, Unicode, observe -from ..io.constants import FIFF -from ..defaults import DEFAULTS -from ..io import read_info, read_fiducials, write_fiducials, read_raw -from ..io.pick import pick_types -from ..io.open import fiff_open, dir_tree_find -from ..io.meas_info import _empty_info -from ..io._read_raw import supported as raw_supported_types +from .._fiff.constants import FIFF +from .._fiff.meas_info import _empty_info, read_fiducials, read_info, write_fiducials +from .._fiff.open import dir_tree_find, fiff_open +from .._fiff.pick import pick_types from ..bem import make_bem_solution, write_bem_solution -from ..coreg import (Coregistration, _is_mri_subject, scale_mri, bem_fname, - _mri_subject_has_bem, fid_fname, _map_fid_name_to_idx, - _find_head_bem) -from ..viz._3d import (_plot_head_surface, _plot_head_fiducials, - _plot_head_shape_points, _plot_mri_fiducials, - _plot_hpi_coils, _plot_sensors, _plot_helmet) +from ..channels import read_dig_fif +from ..coreg import ( + Coregistration, + _find_head_bem, + _is_mri_subject, + _map_fid_name_to_idx, + _mri_subject_has_bem, + bem_fname, + fid_fname, + scale_mri, +) +from ..defaults import DEFAULTS +from ..io._read_raw import _get_supported, read_raw +from ..surface import _CheckInside, _DistanceQuery +from ..transforms import ( + Transform, + _ensure_trans, + _get_trans, + _get_transforms_to_coord_frame, + read_trans, + rotation_angles, + write_trans, +) +from ..utils import ( + _check_fname, + _validate_type, + check_fname, + fill_doc, + get_subjects_dir, + logger, + verbose, +) +from ..viz._3d import ( + _plot_head_fiducials, + _plot_head_shape_points, + _plot_head_surface, + _plot_helmet, + _plot_hpi_coils, + _plot_mri_fiducials, + _plot_sensors_3d, +) from ..viz.backends._utils import _qt_app_exec, _qt_safe_window from ..viz.utils import safe_event -from ..transforms import (read_trans, write_trans, _ensure_trans, _get_trans, - rotation_angles, _get_transforms_to_coord_frame) -from ..utils import (get_subjects_dir, check_fname, _check_fname, fill_doc, - verbose, logger, _validate_type) -from ..surface import _DistanceQuery, _CheckInside -from ..channels import read_dig_fif -class _WorkerData(): +class _WorkerData: def __init__(self, name, params=None): self._name = name self._params = params @@ -50,9 +81,9 @@ def _get_subjects(sdir): dir_content = os.listdir(sdir) subjects = [s for s in dir_content if _is_mri_subject(s, sdir)] if len(subjects) == 0: - subjects.append('') + subjects.append("") else: - subjects = [''] + subjects = [""] return sorted(subjects) @@ -62,47 +93,51 @@ class CoregistrationUI(HasTraits): Parameters ---------- - info_file : None | str + info_file : None | path-like The FIFF file with digitizer data for coregistration. %(subject)s %(subjects_dir)s %(fiducials)s head_resolution : bool - If True, use a high-resolution head surface. Defaults to False. + If ``True``, use a high-resolution head surface. Defaults to ``False``. head_opacity : float - The opacity of the head surface. Defaults to 0.8. + The opacity of the head surface. Defaults to ``0.8``. hpi_coils : bool - If True, display the HPI coils. Defaults to True. + If ``True``, display the HPI coils. Defaults to ``True``. head_shape_points : bool - If True, display the head shape points. Defaults to True. + If ``True``, display the head shape points. Defaults to ``True``. eeg_channels : bool - If True, display the EEG channels. Defaults to True. + If ``True``, display the EEG channels. Defaults to ``True``. + meg_channels : bool + If ``True``, display the MEG channels. Defaults to ``False``. + fnirs_channels : bool + If ``True``, display the fNIRS channels. Defaults to ``True``. orient_glyphs : bool - If True, orient the sensors towards the head surface. Default to False. + If ``True``, orient the sensors towards the head surface. Default to ``False``. scale_by_distance : bool - If True, scale the sensors based on their distance to the head surface. - Defaults to True. + If ``True``, scale the sensors based on their distance to the head surface. + Defaults to ``True``. mark_inside : bool - If True, mark the head shape points that are inside the head surface - with a different color. Defaults to True. + If ``True``, mark the head shape points that are inside the head surface + with a different color. Defaults to ``True``. sensor_opacity : float - The opacity of the sensors between 0 and 1. Defaults to 1.0. - trans : path-like - The path to the Head<->MRI transform FIF file ("-trans.fif"). + The opacity of the sensors between ``0`` and ``1``. Defaults to ``1.``. + trans : path-like | Transform + The Head<->MRI transform or the path to its FIF file (``"-trans.fif"``). size : tuple The dimensions (width, height) of the rendering view. The default is - (800, 600). - bgcolor : tuple | str + ``(800, 600)``. + bgcolor : tuple of float | str The background color as a tuple (red, green, blue) of float - values between 0 and 1 or a valid color name (i.e. 'white' - or 'w'). Defaults to 'grey'. + values between ``0`` and ``1`` or a valid color name (i.e. ``'white'`` + or ``'w'``). Defaults to ``'grey'``. show : bool - Display the window as soon as it is ready. Defaults to True. + Display the window as soon as it is ready. Defaults to ``True``. block : bool Whether to halt program execution until the GUI has been closed (``True``) or not (``False``, default). %(fullscreen)s - The default is False. + The default is ``False``. .. versionadded:: 1.1 %(interaction_scene)s @@ -121,13 +156,15 @@ class CoregistrationUI(HasTraits): _subjects_dir = Unicode() _lock_fids = Bool() _current_fiducial = Unicode() - _info_file = Unicode() + _info_file = Instance(Path, default_value=Path(".")) _orient_glyphs = Bool() _scale_by_distance = Bool() _mark_inside = Bool() _hpi_coils = Bool() _head_shape_points = Bool() _eeg_channels = Bool() + _meg_channels = Bool() + _fnirs_channels = Bool() _head_resolution = Bool() _head_opacity = Float() _helmet = Bool() @@ -136,28 +173,49 @@ class CoregistrationUI(HasTraits): _scale_mode = Unicode() _icp_fid_match = Unicode() - @_qt_safe_window(splash='_renderer.figure.splash', - window='_renderer.figure.plotter') + @_qt_safe_window( + splash="_renderer.figure.splash", window="_renderer.figure.plotter" + ) @verbose - def __init__(self, info_file, *, subject=None, subjects_dir=None, - fiducials='auto', head_resolution=None, - head_opacity=None, hpi_coils=None, - head_shape_points=None, eeg_channels=None, orient_glyphs=None, - scale_by_distance=None, mark_inside=None, - sensor_opacity=None, trans=None, size=None, bgcolor=None, - show=True, block=False, fullscreen=False, - interaction='http://github.com/terrain', verbose=None): + def __init__( + self, + info_file, + *, + subject=None, + subjects_dir=None, + fiducials="auto", + head_resolution=None, + head_opacity=None, + hpi_coils=None, + head_shape_points=None, + eeg_channels=None, + meg_channels=None, + fnirs_channels=None, + orient_glyphs=None, + scale_by_distance=None, + mark_inside=None, + sensor_opacity=None, + trans=None, + size=None, + bgcolor=None, + show=True, + block=False, + fullscreen=False, + interaction="http://github.com/terrain", + verbose=None, + ): from ..viz.backends.renderer import _get_renderer def _get_default(var, val): return var if var is not None else val + self._actors = dict() self._surfaces = dict() self._widgets = dict() self._verbose = verbose self._plot_locked = False self._params_locked = False - self._refresh_rate_ms = max(int(round(1000. / 60.)), 1) + self._refresh_rate_ms = max(int(round(1000.0 / 60.0)), 1) self._redraws_pending = set() self._parameter_mutex = threading.Lock() self._redraw_mutex = threading.Lock() @@ -176,8 +234,8 @@ def _get_default(var, val): self._mri_scale_modified = False self._accept_close_event = True self._fid_colors = tuple( - DEFAULTS['coreg'][f'{key}_color'] for key in - ('lpa', 'nasion', 'rpa')) + DEFAULTS["coreg"][f"{key}_color"] for key in ("lpa", "nasion", "rpa") + ) self._defaults = dict( size=_get_default(size, (800, 600)), bgcolor=_get_default(bgcolor, "grey"), @@ -187,6 +245,8 @@ def _get_default(var, val): hpi_coils=_get_default(hpi_coils, True), head_shape_points=_get_default(head_shape_points, True), eeg_channels=_get_default(eeg_channels, True), + meg_channels=_get_default(meg_channels, False), + fnirs_channels=_get_default(fnirs_channels, True), head_resolution=_get_default(head_resolution, True), head_opacity=_get_default(head_opacity, 0.8), helmet=False, @@ -198,8 +258,8 @@ def _get_default(var, val): subject_to="", scale_modes=["None", "uniform", "3-axis"], scale_mode="None", - icp_fid_matches=('nearest', 'matched'), - icp_fid_match='matched', + icp_fid_matches=("nearest", "matched"), + icp_fid_match="matched", icp_n_iterations=20, omit_hsp_distance=10.0, lock_head_opacity=self._head_opacity < 1.0, @@ -221,7 +281,7 @@ def _get_default(var, val): subject = _get_default(subject, _get_subjects(subjects_dir)[0]) # setup the window - splash = 'Initializing coregistration GUI...' if show else False + splash = "Initializing coregistration GUI..." if show else False self._renderer = _get_renderer( size=self._defaults["size"], bgcolor=self._defaults["bgcolor"], @@ -233,13 +293,16 @@ def _get_default(var, val): self._renderer.set_interaction(interaction) # coregistration model setup - self._immediate_redraw = (self._renderer._kind != 'qt') + self._picking_targets = list() + self._immediate_redraw = self._renderer._kind != "qt" self._info = info self._fiducials = fiducials self.coreg = Coregistration( - info=self._info, subject=subject, subjects_dir=subjects_dir, + info=self._info, + subject=subject, + subjects_dir=subjects_dir, fiducials=fiducials, - on_defects='ignore' # safe due to interactive visual inspection + on_defects="ignore", # safe due to interactive visual inspection ) fid_accurate = self.coreg._fid_accurate for fid in self._defaults["weights"].keys(): @@ -257,6 +320,8 @@ def _get_default(var, val): self._set_hpi_coils(self._defaults["hpi_coils"]) self._set_head_shape_points(self._defaults["head_shape_points"]) self._set_eeg_channels(self._defaults["eeg_channels"]) + self._set_meg_channels(self._defaults["meg_channels"]) + self._set_fnirs_channels(self._defaults["fnirs_channels"]) self._set_head_resolution(self._defaults["head_resolution"]) self._set_helmet(self._defaults["helmet"]) self._set_grow_hair(self._defaults["grow_hair"]) @@ -286,8 +351,8 @@ def _get_default(var, val): # internally self._set_fiducials_file(self.coreg._fid_filename) else: - self._set_head_resolution('high') - self._forward_widget_command('high_res_head', "set_value", True) + self._set_head_resolution("high") + self._forward_widget_command("high_res_head", "set_value", True) self._set_lock_fids(True) # hack to make the dig disappear self._update_fiducials_label() self._update_fiducials() @@ -301,20 +366,21 @@ def _get_default(var, val): if show: self._renderer.show() # update the view once shown - views = {True: dict(azimuth=90, elevation=90), # front - False: dict(azimuth=180, elevation=90)} # left - self._renderer.set_camera(distance=None, **views[self._lock_fids]) + views = { + True: dict(azimuth=90, elevation=90), # front + False: dict(azimuth=180, elevation=90), + } # left + self._renderer.set_camera(distance="auto", **views[self._lock_fids]) self._redraw() # XXX: internal plotter/renderer should not be exposed if not self._immediate_redraw: - self._renderer.plotter.add_callback( - self._redraw, self._refresh_rate_ms) + self._renderer.plotter.add_callback(self._redraw, self._refresh_rate_ms) self._renderer.plotter.show_axes() # initialization does not count as modification by the user self._trans_modified = False self._mri_fids_modified = False self._mri_scale_modified = False - if block and self._renderer._kind != 'notebook': + if block and self._renderer._kind != "notebook": _qt_app_exec(self._renderer.figure.store["app"]) def _set_subjects_dir(self, subjects_dir): @@ -330,10 +396,8 @@ def _set_subjects_dir(self, subjects_dir): ) ) subjects = _get_subjects(subjects_dir) - low_res_path = _find_head_bem( - subjects[0], subjects_dir, high_res=False) - high_res_path = _find_head_bem( - subjects[0], subjects_dir, high_res=True) + low_res_path = _find_head_bem(subjects[0], subjects_dir, high_res=False) + high_res_path = _find_head_bem(subjects[0], subjects_dir, high_res=True) valid = low_res_path is not None or high_res_path is not None except Exception: valid = False @@ -352,7 +416,7 @@ def _set_lock_fids(self, state): def _set_fiducials_file(self, fname): if fname is None: - fids = 'auto' + fids = "auto" else: fname = str( _check_fname( @@ -373,17 +437,11 @@ def _set_fiducials_file(self, fname): if fname is None: self._set_lock_fids(False) - self._forward_widget_command( - 'reload_mri_fids', 'set_enabled', False - ) + self._forward_widget_command("reload_mri_fids", "set_enabled", False) else: self._set_lock_fids(True) - self._forward_widget_command( - 'reload_mri_fids', 'set_enabled', True - ) - self._display_message( - f"Loading MRI fiducials from {fname}... Done!" - ) + self._forward_widget_command("reload_mri_fids", "set_enabled", True) + self._display_message(f"Loading MRI fiducials from {fname}... Done!") def _set_current_fiducial(self, fid): self._current_fiducial = fid.lower() @@ -393,24 +451,30 @@ def _set_info_file(self, fname): return # info file can be anything supported by read_raw + supported = _get_supported() try: - check_fname(fname, 'info', tuple(raw_supported_types.keys()), - endings_err=tuple(raw_supported_types.keys())) - fname = str(_check_fname(fname, overwrite="read")) # cast to str - + check_fname( + fname, + "info", + tuple(supported), + endings_err=tuple(supported), + ) + fname = Path(fname) # ctf ds `files` are actually directories - if fname.endswith(('.ds',)): + if fname.suffix == ".ds": info_file = _check_fname( - fname, overwrite='read', must_exist=True, need_dir=True) + fname, overwrite="read", must_exist=True, need_dir=True + ) else: info_file = _check_fname( - fname, overwrite='read', must_exist=True, need_dir=False) + fname, overwrite="read", must_exist=True, need_dir=False + ) valid = True - except IOError: + except OSError: valid = False if valid: style = dict(border="initial") - self._info_file = str(info_file) + self._info_file = info_file else: style = dict(border="2px solid #ff0000") self._forward_widget_command("info_file_field", "set_style", style) @@ -436,6 +500,12 @@ def _set_head_shape_points(self, state): def _set_eeg_channels(self, state): self._eeg_channels = bool(state) + def _set_meg_channels(self, state): + self._meg_channels = bool(state) + + def _set_fnirs_channels(self, state): + self._fnirs_channels = bool(state) + def _set_head_resolution(self, state): self._head_resolution = bool(state) @@ -450,14 +520,12 @@ def _set_grow_hair(self, value): def _set_subject_to(self, value): self._subject_to = value - self._forward_widget_command( - "save_subject", "set_enabled", len(value) > 0) + self._forward_widget_command("save_subject", "set_enabled", len(value) > 0) if self._check_subject_exists(): style = dict(border="2px solid #ff0000") else: style = dict(border="initial") - self._forward_widget_command( - "subject_to", "set_style", style) + self._forward_widget_command("subject_to", "set_style", style) def _set_scale_mode(self, mode): self._scale_mode = mode @@ -470,7 +538,7 @@ def _set_fiducial(self, value, coord): coords = ["X", "Y", "Z"] coord_idx = coords.index(coord) - self.coreg.fiducials.dig[fid_idx]['r'][coord_idx] = value / 1e3 + self.coreg.fiducials.dig[fid_idx]["r"][coord_idx] = value / 1e3 self._update_plot("mri_fids") def _set_parameter(self, value, mode_name, coord, plot_locked=False): @@ -482,10 +550,9 @@ def _set_parameter(self, value, mode_name, coord, plot_locked=False): return if mode_name == "scale" and self._scale_mode == "uniform": with self._lock(params=True): - self._forward_widget_command( - ["sY", "sZ"], "set_value", value) + self._forward_widget_command(["sY", "sZ"], "set_value", value) with self._parameter_mutex: - self. _set_parameter_safe(value, mode_name, coord) + self._set_parameter_safe(value, mode_name, coord) if not plot_locked: self._update_plot("sensors") @@ -521,9 +588,11 @@ def _set_icp_fid_match(self, method): def _set_point_weight(self, weight, point): funcs = { - 'hpi': '_set_hpi_coils', - 'hsp': '_set_head_shape_points', - 'eeg': '_set_eeg_channels', + "hpi": "_set_hpi_coils", + "hsp": "_set_head_shape_points", + "eeg": "_set_eeg_channels", + "meg": "_set_meg_channels", + "fnirs": "_set_fnirs_channels", } if point in funcs.keys(): getattr(self, funcs[point])(weight > 0) @@ -567,70 +636,91 @@ def _lock_fids_changed(self, change=None): # MRI fiducials "save_mri_fids", # View options - "helmet", "head_opacity", "high_res_head", + "helmet", + "meg", + "head_opacity", + "high_res_head", # Digitization source - "info_file", "grow_hair", "omit_distance", "omit", "reset_omit", + "info_file", + "grow_hair", + "omit_distance", + "omit", + "reset_omit", # Scaling - "scaling_mode", "sX", "sY", "sZ", + "scaling_mode", + "sX", + "sY", + "sZ", # Transformation - "tX", "tY", "tZ", - "rX", "rY", "rZ", + "tX", + "tY", + "tZ", + "rX", + "rY", + "rZ", # Fitting buttons - "fit_fiducials", "fit_icp", + "fit_fiducials", + "fit_icp", # Transformation I/O - "save_trans", "load_trans", + "save_trans", + "load_trans", "reset_trans", # ICP - "icp_n_iterations", "icp_fid_match", "reset_fitting_options", + "icp_n_iterations", + "icp_fid_match", + "reset_fitting_options", # Weights - "hsp_weight", "eeg_weight", "hpi_weight", - "lpa_weight", "nasion_weight", "rpa_weight", + "hsp_weight", + "eeg_weight", + "hpi_weight", + "lpa_weight", + "nasion_weight", + "rpa_weight", ] fits_widgets = ["fits_fiducials", "fits_icp"] fid_widgets = ["fid_X", "fid_Y", "fid_Z", "fids_file", "fids"] if self._lock_fids: self._forward_widget_command(locked_widgets, "set_enabled", True) self._forward_widget_command( - 'head_opacity', 'set_value', self._old_head_opacity + "head_opacity", "set_value", self._old_head_opacity ) self._scale_mode_changed() self._display_message() self._update_distance_estimation() else: self._old_head_opacity = self._head_opacity - self._forward_widget_command( - 'head_opacity', 'set_value', 1.0 - ) + self._forward_widget_command("head_opacity", "set_value", 1.0) self._forward_widget_command(locked_widgets, "set_enabled", False) self._forward_widget_command(fits_widgets, "set_enabled", False) - self._display_message("Placing MRI fiducials - " - f"{self._current_fiducial.upper()}") + self._display_message( + f"Placing MRI fiducials - {self._current_fiducial.upper()}" + ) self._set_sensors_visibility(self._lock_fids) self._forward_widget_command("lock_fids", "set_value", self._lock_fids) - self._forward_widget_command(fid_widgets, "set_enabled", - not self._lock_fids) + self._forward_widget_command(fid_widgets, "set_enabled", not self._lock_fids) @observe("_current_fiducial") def _current_fiducial_changed(self, change=None): self._update_fiducials() self._follow_fiducial_view() if not self._lock_fids: - self._display_message("Placing MRI fiducials - " - f"{self._current_fiducial.upper()}") + self._display_message( + f"Placing MRI fiducials - {self._current_fiducial.upper()}" + ) @observe("_info_file") def _info_file_changed(self, change=None): if not self._info_file: return - elif self._info_file.endswith(('.fif', '.fif.gz')): + elif self._info_file.name.endswith((".fif", ".fif.gz")): fid, tree, _ = fiff_open(self._info_file) fid.close() if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0: self._info = read_info(self._info_file, verbose=False) elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0: self._info = _empty_info(1) - self._info['dig'] = read_dig_fif(fname=self._info_file).dig + self._info["dig"] = read_dig_fif(fname=self._info_file).dig self._info._unlocked = False else: self._info = read_raw(self._info_file).info @@ -641,11 +731,11 @@ def _info_file_changed(self, change=None): @observe("_orient_glyphs") def _orient_glyphs_changed(self, change=None): - self._update_plot(["hpi", "hsp", "eeg"]) + self._update_plot(["hpi", "hsp", "sensors"]) @observe("_scale_by_distance") def _scale_by_distance_changed(self, change=None): - self._update_plot(["hpi", "hsp", "eeg"]) + self._update_plot(["hpi", "hsp", "sensors"]) @observe("_mark_inside") def _mark_inside_changed(self, change=None): @@ -661,7 +751,15 @@ def _head_shape_point_changed(self, change=None): @observe("_eeg_channels") def _eeg_channels_changed(self, change=None): - self._update_plot("eeg") + self._update_plot("sensors") + + @observe("_meg_channels") + def _meg_channels_changed(self, change=None): + self._update_plot("sensors") + + @observe("_fnirs_channels") + def _fnirs_channels_changed(self, change=None): + self._update_plot("sensors") @observe("_head_resolution") def _head_resolution_changed(self, change=None): @@ -689,10 +787,12 @@ def _scale_mode_changed(self, change=None): mode = None if self._scale_mode == "None" else self._scale_mode self.coreg.set_scale_mode(mode) if self._lock_fids: - self._forward_widget_command(locked_widgets, "set_enabled", - mode is not None) - self._forward_widget_command("fits_fiducials", "set_enabled", - mode not in (None, "3-axis")) + self._forward_widget_command( + locked_widgets, "set_enabled", mode is not None + ) + self._forward_widget_command( + "fits_fiducials", "set_enabled", mode not in (None, "3-axis") + ) if self._scale_mode == "uniform": self._forward_widget_command(["sY", "sZ"], "set_enabled", False) @@ -712,13 +812,15 @@ def _run_worker(self, queue, jobs): def _configure_dialogs(self): from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + for name, buttons in zip( - ["overwrite_subject", "overwrite_subject_exit"], - [["Yes", "No"], ["Yes", "Discard", "Cancel"]]): + ["overwrite_subject", "overwrite_subject_exit"], + [["Yes", "No"], ["Yes", "Discard", "Cancel"]], + ): self._widgets[name] = self._renderer._dialog_create( title="CoregistrationUI", text="The name of the output subject used to " - "save the scaled anatomy already exists.", + "save the scaled anatomy already exists.", info_text="Do you want to overwrite?", callback=self._overwrite_subject_callback, buttons=buttons, @@ -731,11 +833,13 @@ def _configure_worker(self): "_parameter_queue": dict(set_parameter=self._set_parameter), } for queue_name, jobs in work_plan.items(): - t = threading.Thread(target=partial( - self._run_worker, - queue=getattr(self, queue_name), - jobs=jobs, - )) + t = threading.Thread( + target=partial( + self._run_worker, + queue=getattr(self, queue_name), + jobs=jobs, + ) + ) t.daemon = True t.start() @@ -744,17 +848,19 @@ def _configure_picking(self): self._on_mouse_move, self._on_button_press, self._on_button_release, - self._on_pick + self._on_pick, ) def _configure_legend(self): - colors = \ - [np.array(DEFAULTS['coreg'][f"{fid.lower()}_color"]).astype(float) - for fid in self._defaults['fiducials']] - labels = list(zip(self._defaults['fiducials'], colors)) + colors = [ + np.array(DEFAULTS["coreg"][f"{fid.lower()}_color"]).astype(float) + for fid in self._defaults["fiducials"] + ] + labels = list(zip(self._defaults["fiducials"], colors)) mri_fids_legend_actor = self._renderer.legend(labels=labels) self._update_actor("mri_fids_legend", mri_fids_legend_actor) + @safe_event @verbose def _redraw(self, *, verbose=None): if not self._redraws_pending: @@ -764,7 +870,7 @@ def _redraw(self, *, verbose=None): mri_fids=self._add_mri_fiducials, hsp=self._add_head_shape_points, hpi=self._add_hpi_coils, - eeg=self._add_eeg_channels, + sensors=self._add_channels, head_fids=self._add_head_fiducials, helmet=self._add_helmet, ) @@ -772,16 +878,16 @@ def _redraw(self, *, verbose=None): # We need at least "head" before "hsp", because the grow_hair param # for head sets the rr that are used for inside/outside hsp redraws_ordered = sorted( - self._redraws_pending, - key=lambda key: list(draw_map).index(key)) - logger.debug(f'Redrawing {redraws_ordered}') + self._redraws_pending, key=lambda key: list(draw_map).index(key) + ) + logger.debug(f"Redrawing {redraws_ordered}") for ki, key in enumerate(redraws_ordered): - logger.debug(f'{ki}. Drawing {repr(key)}') + logger.debug(f"{ki}. Drawing {repr(key)}") draw_map[key]() self._redraws_pending.clear() self._renderer._update() # necessary for MacOS - if platform.system() == 'Darwin': + if platform.system() == "Darwin": self._renderer._process_events() def _on_mouse_move(self, vtk_picker, event): @@ -795,10 +901,10 @@ def _on_button_release(self, vtk_picker, event): if self._mouse_no_mvt > 0: x, y = vtk_picker.GetEventPosition() # XXX: internal plotter/renderer should not be exposed - plotter = self._renderer.figure.plotter + picker = self._renderer._picker picked_renderer = self._renderer.figure.plotter.renderer # trigger the pick - plotter.picker.Pick(x, y, 0, picked_renderer) + picker.Pick(x, y, 0, picked_renderer) self._mouse_no_mvt = 0 def _on_pick(self, vtk_picker, event): @@ -809,33 +915,28 @@ def _on_pick(self, vtk_picker, event): mesh = vtk_picker.GetDataSet() if mesh is None or cell_id == -1 or not self._mouse_no_mvt: return - if not getattr(mesh, "_picking_target", False): + if not any(mesh is target() for target in self._picking_targets): return pos = np.array(vtk_picker.GetPickPosition()) - vtk_cell = mesh.GetCell(cell_id) - cell = [vtk_cell.GetPointId(point_id) for point_id - in range(vtk_cell.GetNumberOfPoints())] - vertices = mesh.points[cell] - idx = np.argmin(abs(vertices - pos), axis=0) - vertex_id = cell[idx[0]] - fiducials = [s.lower() for s in self._defaults["fiducials"]] idx = fiducials.index(self._current_fiducial.lower()) # XXX: add coreg.set_fids - self.coreg._fid_points[idx] = self._surfaces["head"].points[vertex_id] + self.coreg._fid_points[idx] = pos self.coreg._reset_fiducials() self._update_fiducials() self._update_plot("mri_fids") def _reset_fitting_parameters(self): - self._forward_widget_command("icp_n_iterations", "set_value", - self._defaults["icp_n_iterations"]) - self._forward_widget_command("icp_fid_match", "set_value", - self._defaults["icp_fid_match"]) - weights_widgets = [f"{w}_weight" - for w in self._defaults["weights"].keys()] - self._forward_widget_command(weights_widgets, "set_value", - list(self._defaults["weights"].values())) + self._forward_widget_command( + "icp_n_iterations", "set_value", self._defaults["icp_n_iterations"] + ) + self._forward_widget_command( + "icp_fid_match", "set_value", self._defaults["icp_fid_match"] + ) + weights_widgets = [f"{w}_weight" for w in self._defaults["weights"].keys()] + self._forward_widget_command( + weights_widgets, "set_value", list(self._defaults["weights"].values()) + ) def _reset_fiducials(self): self._set_current_fiducial(self._defaults["fiducial"]) @@ -843,21 +944,22 @@ def _reset_fiducials(self): def _omit_hsp(self): self.coreg.omit_head_shape_points(self._omit_hsp_distance / 1e3) n_omitted = np.sum(~self.coreg._extra_points_filter) - n_remaining = len(self.coreg._dig_dict['hsp']) - n_omitted + n_remaining = len(self.coreg._dig_dict["hsp"]) - n_omitted self._update_plot("hsp") self._update_distance_estimation() self._display_message( - f"{n_omitted} head shape points omitted, " - f"{n_remaining} remaining.") + f"{n_omitted} head shape points omitted, {n_remaining} remaining." + ) def _reset_omit_hsp_filter(self): self.coreg._extra_points_filter = None self.coreg._update_params(force_update=True) self._update_plot("hsp") self._update_distance_estimation() - n_total = len(self.coreg._dig_dict['hsp']) + n_total = len(self.coreg._dig_dict["hsp"]) self._display_message( - f"No head shape point is omitted, the total is {n_total}.") + f"No head shape point is omitted, the total is {n_total}." + ) @verbose def _update_plot(self, changes="all", verbose=None): @@ -866,9 +968,8 @@ def _update_plot(self, changes="all", verbose=None): try: fun_name = inspect.currentframe().f_back.f_back.f_code.co_name except Exception: # just in case one of these attrs is missing - fun_name = 'unknown' - logger.debug( - f'Updating plots based on {fun_name}: {repr(changes)}') + fun_name = "unknown" + logger.debug(f"Updating plots based on {fun_name}: {repr(changes)}") if self._plot_locked: return if self._info is None: @@ -876,15 +977,20 @@ def _update_plot(self, changes="all", verbose=None): self._to_cf_t = dict(mri=dict(trans=np.eye(4)), head=None) else: self._to_cf_t = _get_transforms_to_coord_frame( - self._info, self.coreg.trans, coord_frame=self._coord_frame) + self._info, self.coreg.trans, coord_frame=self._coord_frame + ) all_keys = ( - 'head', 'mri_fids', # MRI first - 'hsp', 'hpi', 'eeg', 'head_fids', # then dig - 'helmet', - ) - if changes == 'all': + "head", + "mri_fids", # MRI first + "hsp", + "hpi", + "sensors", + "head_fids", # then dig + "helmet", + ) + if changes == "all": changes = list(all_keys) - elif changes == 'sensors': + elif changes == "sensors": changes = all_keys[2:] # omit MRI ones elif isinstance(changes, str): changes = [changes] @@ -894,7 +1000,7 @@ def _update_plot(self, changes="all", verbose=None): # it would reduce "jerkiness" of the updates, but this should at least # work okay bad = changes.difference(set(all_keys)) - assert len(bad) == 0, f'Unknown changes: {bad}' + assert len(bad) == 0, f"Unknown changes: {bad}" self._redraws_pending.update(changes) if self._immediate_redraw: self._redraw() @@ -913,15 +1019,24 @@ def _lock(self, plot=False, params=False, scale_mode=False, fitting=False): self.coreg._scale_mode = None if fitting: widgets = [ - "sX", "sY", "sZ", - "tX", "tY", "tZ", - "rX", "rY", "rZ", - "fit_icp", "fit_fiducials", "fits_icp", "fits_fiducials" + "sX", + "sY", + "sZ", + "tX", + "tY", + "tZ", + "rX", + "rY", + "rZ", + "fit_icp", + "fit_fiducials", + "fits_icp", + "fits_fiducials", ] states = [ self._forward_widget_command( - w, "is_enabled", None, - input_value=False, output_value=True) + w, "is_enabled", None, input_value=False, output_value=True + ) for w in widgets ] self._forward_widget_command(widgets, "set_enabled", False) @@ -939,23 +1054,21 @@ def _lock(self, plot=False, params=False, scale_mode=False, fitting=False): self._forward_widget_command(w, "set_enabled", states[idx]) def _display_message(self, msg=""): - self._forward_widget_command('status_message', 'set_value', msg) - self._forward_widget_command( - 'status_message', 'show', None, input_value=False - ) + self._forward_widget_command("status_message", "set_value", msg) + self._forward_widget_command("status_message", "show", None, input_value=False) self._forward_widget_command( - 'status_message', 'update', None, input_value=False + "status_message", "update", None, input_value=False ) if msg: logger.info(msg) def _follow_fiducial_view(self): fid = self._current_fiducial.lower() - view = dict(lpa='left', rpa='right', nasion='front') - kwargs = dict(front=(90., 90.), left=(180, 90), right=(0., 90)) - kwargs = dict(zip(('azimuth', 'elevation'), kwargs[view[fid]])) + view = dict(lpa="left", rpa="right", nasion="front") + kwargs = dict(front=(90.0, 90.0), left=(180, 90), right=(0.0, 90)) + kwargs = dict(zip(("azimuth", "elevation"), kwargs[view[fid]])) if not self._lock_fids: - self._renderer.set_camera(distance=None, **kwargs) + self._renderer.set_camera(distance="auto", **kwargs) def _update_fiducials(self): fid = self._current_fiducial @@ -963,35 +1076,42 @@ def _update_fiducials(self): return idx = _map_fid_name_to_idx(name=fid) - val = self.coreg.fiducials.dig[idx]['r'] * 1e3 + val = self.coreg.fiducials.dig[idx]["r"] * 1e3 with self._lock(plot=True): - self._forward_widget_command( - ["fid_X", "fid_Y", "fid_Z"], "set_value", val) + self._forward_widget_command(["fid_X", "fid_Y", "fid_Z"], "set_value", val) def _update_distance_estimation(self): - value = self.coreg._get_fiducials_distance_str() + '\n' + \ - self.coreg._get_point_distance_str() + value = ( + self.coreg._get_fiducials_distance_str() + + "\n" + + self.coreg._get_point_distance_str() + ) dists = self.coreg.compute_dig_mri_distances() * 1e3 if self._hsp_weight > 0: - value += "\nHSP <-> MRI (mean/min/max): "\ - f"{np.mean(dists):.2f} "\ - f"/ {np.min(dists):.2f} / {np.max(dists):.2f} mm" + if len(dists) == 0: + value += "\nNo head shape points found." + else: + value += ( + "\nHSP <-> MRI (mean/min/max): " + f"{np.mean(dists):.2f} " + f"/ {np.min(dists):.2f} / {np.max(dists):.2f} mm" + ) self._forward_widget_command("fit_label", "set_value", value) def _update_parameters(self): with self._lock(plot=True, params=True): # rotation deg = np.rad2deg(self.coreg._rotation) - logger.debug(f' Rotation: {deg}') + logger.debug(f" Rotation: {deg}") self._forward_widget_command(["rX", "rY", "rZ"], "set_value", deg) # translation mm = self.coreg._translation * 1e3 - logger.debug(f' Translation: {mm}') + logger.debug(f" Translation: {mm}") self._forward_widget_command(["tX", "tY", "tZ"], "set_value", mm) # scale sc = self.coreg._scale * 1e2 - logger.debug(f' Scale: {sc}') + logger.debug(f" Scale: {sc}") self._forward_widget_command(["sX", "sY", "sZ"], "set_value", sc) def _reset(self, keep_trans=False): @@ -1011,8 +1131,9 @@ def _reset(self, keep_trans=False): self._update_parameters() self._update_distance_estimation() - def _forward_widget_command(self, names, command, value, - input_value=True, output_value=False): + def _forward_widget_command( + self, names, command, value, input_value=True, output_value=False + ): """Invoke a method of one or more widgets if the widgets exist. Parameters @@ -1035,15 +1156,11 @@ def _forward_widget_command(self, names, command, value, ``None`` if ``output_value`` is ``False``, and the return value of ``command`` otherwise. """ - _validate_type( - item=names, - types=(str, list), - item_name='names' - ) + _validate_type(item=names, types=(str, list), item_name="names") if isinstance(names, str): names = [names] - if not isinstance(value, (str, float, int, dict, type(None))): + if not isinstance(value, str | float | int | dict | type(None)): value = list(value) assert len(names) == len(value) @@ -1058,26 +1175,44 @@ def _forward_widget_command(self, names, command, value, return ret def _set_sensors_visibility(self, state): - sensors = ["head_fiducials", "hpi_coils", "head_shape_points", - "eeg_channels"] + sensors = [ + "head_fiducials", + "hpi_coils", + "head_shape_points", + "sensors", + "helmet", + ] for sensor in sensors: if sensor in self._actors and self._actors[sensor] is not None: actors = self._actors[sensor] actors = actors if isinstance(actors, list) else [actors] for actor in actors: + if actor is None: + continue actor.SetVisibility(state) + self._renderer._update() def _update_actor(self, actor_name, actor): # XXX: internal plotter/renderer should not be exposed - self._renderer.plotter.remove_actor(self._actors.get(actor_name), - render=False) + # Work around PyVista sequential update bug with iterable until > 0.42.3 is req + # https://github.com/pyvista/pyvista/pull/5046 + actors = self._actors.get(actor_name) or [] # convert None to list + if not isinstance(actors, list): + actors = [actors] + for this_actor in actors: + self._renderer.plotter.remove_actor(this_actor, render=False) self._actors[actor_name] = actor def _add_mri_fiducials(self): mri_fids_actors = _plot_mri_fiducials( - self._renderer, self.coreg._fid_points, self._subjects_dir, - self._subject, self._to_cf_t, self._fid_colors) + self._renderer, + self.coreg._fid_points, + self._subjects_dir, + self._subject, + self._to_cf_t, + self._fid_colors, + ) # disable picking on the markers for actor in mri_fids_actors: actor.SetPickable(False) @@ -1085,19 +1220,24 @@ def _add_mri_fiducials(self): def _add_head_fiducials(self): head_fids_actors = _plot_head_fiducials( - self._renderer, self._info, self._to_cf_t, self._fid_colors) + self._renderer, self._info, self._to_cf_t, self._fid_colors + ) self._update_actor("head_fiducials", head_fids_actors) def _add_hpi_coils(self): if self._hpi_coils: hpi_actors = _plot_hpi_coils( - self._renderer, self._info, self._to_cf_t, + self._renderer, + self._info, + self._to_cf_t, opacity=self._defaults["sensor_opacity"], scale=DEFAULTS["coreg"]["extra_scale"], orient_glyphs=self._orient_glyphs, scale_by_distance=self._scale_by_distance, - surf=self._head_geo, check_inside=self._check_inside, - nearest=self._nearest) + surf=self._head_geo, + check_inside=self._check_inside, + nearest=self._nearest, + ) else: hpi_actors = None self._update_actor("hpi_coils", hpi_actors) @@ -1105,61 +1245,96 @@ def _add_hpi_coils(self): def _add_head_shape_points(self): if self._head_shape_points: hsp_actors = _plot_head_shape_points( - self._renderer, self._info, self._to_cf_t, + self._renderer, + self._info, + self._to_cf_t, opacity=self._defaults["sensor_opacity"], orient_glyphs=self._orient_glyphs, scale_by_distance=self._scale_by_distance, - mark_inside=self._mark_inside, surf=self._head_geo, + mark_inside=self._mark_inside, + surf=self._head_geo, mask=self.coreg._extra_points_filter, - check_inside=self._check_inside, nearest=self._nearest) + check_inside=self._check_inside, + nearest=self._nearest, + ) else: hsp_actors = None self._update_actor("head_shape_points", hsp_actors) - def _add_eeg_channels(self): + def _add_channels(self): + plot_types = dict(eeg=False, meg=False, fnirs=False) if self._eeg_channels: - eeg = ["original"] - picks = pick_types(self._info, eeg=(len(eeg) > 0), fnirs=True) - if len(picks) > 0: - actors = _plot_sensors( - self._renderer, self._info, self._to_cf_t, picks, - meg=False, eeg=eeg, fnirs=["sources", "detectors"], - warn_meg=False, head_surf=self._head_geo, units='m', - sensor_opacity=self._defaults["sensor_opacity"], - orient_glyphs=self._orient_glyphs, - scale_by_distance=self._scale_by_distance, - surf=self._head_geo, check_inside=self._check_inside, - nearest=self._nearest) - sens_actors = actors["eeg"] - sens_actors.extend(actors["fnirs"]) - else: - sens_actors = None - else: - sens_actors = None - self._update_actor("eeg_channels", sens_actors) + plot_types["eeg"] = ["original"] + if self._meg_channels: + plot_types["meg"] = ["sensors"] + if self._fnirs_channels: + plot_types["fnirs"] = ["sources", "detectors"] + sensor_alpha = dict( + eeg=self._defaults["sensor_opacity"], + fnirs=self._defaults["sensor_opacity"], + meg=0.25, + ) + picks = pick_types( + self._info, + ref_meg=False, + meg=True, + eeg=True, + fnirs=True, + exclude=(), + ) + these_actors = _plot_sensors_3d( + self._renderer, + self._info, + self._to_cf_t, + picks=picks, + warn_meg=False, + head_surf=self._head_geo, + units="m", + sensor_alpha=sensor_alpha, + orient_glyphs=self._orient_glyphs, + scale_by_distance=self._scale_by_distance, + surf=self._head_geo, + check_inside=self._check_inside, + nearest=self._nearest, + **plot_types, + ) + sens_actors = sum((these_actors or {}).values(), list()) + self._update_actor("sensors", sens_actors) def _add_head_surface(self): bem = None if self._head_resolution: - surface = 'head-dense' - key = 'high' + surface = "head-dense" + key = "high" else: - surface = 'head' - key = 'low' + surface = "head" + key = "low" try: head_actor, head_surf, _ = _plot_head_surface( - self._renderer, surface, self._subject, - self._subjects_dir, bem, self._coord_frame, self._to_cf_t, - alpha=self._head_opacity) - except IOError: + self._renderer, + surface, + self._subject, + self._subjects_dir, + bem, + self._coord_frame, + self._to_cf_t, + alpha=self._head_opacity, + ) + except OSError: head_actor, head_surf, _ = _plot_head_surface( - self._renderer, "head", self._subject, self._subjects_dir, - bem, self._coord_frame, self._to_cf_t, - alpha=self._head_opacity) - key = 'low' + self._renderer, + "head", + self._subject, + self._subjects_dir, + bem, + self._coord_frame, + self._to_cf_t, + alpha=self._head_opacity, + ) + key = "low" self._update_actor("head", head_actor) # mark head surface mesh to restrict picking - head_surf._picking_target = True + self._picking_targets.append(weakref.ref(head_surf)) # We need to use _get_processed_mri_points to incorporate grow_hair rr = self.coreg._get_processed_mri_points(key) * self.coreg._scale.T head_surf.points = rr @@ -1170,16 +1345,16 @@ def _add_head_surface(self): nn = self._surfaces["head"].point_normals assert nn.shape == (len(rr), 3), nn.shape self._head_geo = dict(rr=rr, tris=tris, nn=nn) - self._check_inside = _CheckInside(head_surf, mode='pyvista') + self._check_inside = _CheckInside(head_surf, mode="pyvista") self._nearest = _DistanceQuery(rr) def _add_helmet(self): if self._helmet: - logger.debug('Drawing helmet') - head_mri_t = _get_trans(self.coreg.trans, 'head', 'mri')[0] + logger.debug("Drawing helmet") + head_mri_t = _get_trans(self.coreg.trans, "head", "mri")[0] helmet_actor, _, _ = _plot_helmet( - self._renderer, self._info, self._to_cf_t, head_mri_t, - self._coord_frame) + self._renderer, self._info, self._to_cf_t, head_mri_t, self._coord_frame + ) else: helmet_actor = None self._update_actor("helmet", helmet_actor) @@ -1199,7 +1374,8 @@ def _fits_fiducials(self): ) end = time.time() self._display_message( - f"Fitting fiducials finished in {end - start:.2f} seconds.") + f"Fitting fiducials finished in {end - start:.2f} seconds." + ) self._update_plot("sensors") self._update_parameters() self._update_distance_estimation() @@ -1214,13 +1390,12 @@ def _fits_icp(self): def _fit_icp_real(self, *, update_head): with self._lock(params=True, fitting=True): self._current_icp_iterations = 0 - updates = ['hsp', 'hpi', 'eeg', 'head_fids', 'helmet'] + updates = ["hsp", "hpi", "sensors", "head_fids", "helmet"] if update_head: - updates.insert(0, 'head') + updates.insert(0, "head") def callback(iteration, n_iterations): - self._display_message( - f"Fitting ICP - iteration {iteration + 1}") + self._display_message(f"Fitting ICP - iteration {iteration + 1}") self._update_plot(updates) self._current_icp_iterations += 1 self._update_distance_estimation() @@ -1240,11 +1415,13 @@ def callback(iteration, n_iterations): self._display_message() self._display_message( f"Fitting ICP finished in {end - start:.2f} seconds and " - f"{self._current_icp_iterations} iterations.") + f"{self._current_icp_iterations} iterations." + ) del self._current_icp_iterations def _task_save_subject(self): from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + if MNE_3D_BACKEND_TESTING: self._save_subject() else: @@ -1252,12 +1429,21 @@ def _task_save_subject(self): def _task_set_parameter(self, value, mode_name, coord): from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + if MNE_3D_BACKEND_TESTING: self._set_parameter(value, mode_name, coord, self._plot_locked) else: - self._parameter_queue.put(_WorkerData("set_parameter", dict( - value=value, mode_name=mode_name, coord=coord, - plot_locked=self._plot_locked))) + self._parameter_queue.put( + _WorkerData( + "set_parameter", + dict( + value=value, + mode_name=mode_name, + coord=coord, + plot_locked=self._plot_locked, + ), + ) + ) def _overwrite_subject_callback(self, button_name): if button_name == "Yes": @@ -1270,9 +1456,10 @@ def _overwrite_subject_callback(self, button_name): def _check_subject_exists(self): if not self._subject_to: return False - subject_dirname = os.path.join('{subjects_dir}', '{subject}') - dest = subject_dirname.format(subject=self._subject_to, - subjects_dir=self._subjects_dir) + subject_dirname = os.path.join("{subjects_dir}", "{subject}") + dest = subject_dirname.format( + subject=self._subject_to, subjects_dir=self._subjects_dir + ) return os.path.exists(dest) def _save_subject(self, exit_mode=False): @@ -1286,19 +1473,19 @@ def _save_subject_callback(self, overwrite=False): self._display_message(f"Saving {self._subject_to}...") default_cursor = self._renderer._window_get_cursor() self._renderer._window_set_cursor( - self._renderer._window_new_cursor("WaitCursor")) + self._renderer._window_new_cursor("WaitCursor") + ) # prepare bem bem_names = [] if self._scale_mode != "None": - can_prepare_bem = _mri_subject_has_bem( - self._subject, self._subjects_dir) + can_prepare_bem = _mri_subject_has_bem(self._subject, self._subjects_dir) else: can_prepare_bem = False if can_prepare_bem: - pattern = bem_fname.format(subjects_dir=self._subjects_dir, - subject=self._subject, - name='(.+-bem)') + pattern = bem_fname.format( + subjects_dir=self._subjects_dir, subject=self._subject, name="(.+-bem)" + ) bem_dir, pattern = os.path.split(pattern) for filename in os.listdir(bem_dir): match = re.match(pattern, filename) @@ -1309,10 +1496,15 @@ def _save_subject_callback(self, overwrite=False): try: self._display_message(f"Scaling {self._subject_to}...") scale_mri( - subject_from=self._subject, subject_to=self._subject_to, - scale=self.coreg._scale, overwrite=overwrite, - subjects_dir=self._subjects_dir, skip_fiducials=True, - labels=True, annot=True, on_defects='ignore' + subject_from=self._subject, + subject_to=self._subject_to, + scale=self.coreg._scale, + overwrite=overwrite, + subjects_dir=self._subjects_dir, + skip_fiducials=True, + labels=True, + annot=True, + on_defects="ignore", ) except Exception: logger.error(f"Error scaling {self._subject_to}") @@ -1324,16 +1516,17 @@ def _save_subject_callback(self, overwrite=False): for bem_name in bem_names: try: self._display_message(f"Computing {bem_name} solution...") - bem_file = bem_fname.format(subjects_dir=self._subjects_dir, - subject=self._subject_to, - name=bem_name) + bem_file = bem_fname.format( + subjects_dir=self._subjects_dir, + subject=self._subject_to, + name=bem_name, + ) bemsol = make_bem_solution(bem_file) - write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol) + write_bem_solution(bem_file[:-4] + "-sol.fif", bemsol) except Exception: logger.error(f"Error computing {bem_name} solution") else: - self._display_message(f"Computing {bem_name} solution..." - " Done!") + self._display_message(f"Computing {bem_name} solution... Done!") self._display_message(f"Saving {self._subject_to}... Done!") self._renderer._window_set_cursor(default_cursor) self._mri_scale_modified = False @@ -1342,7 +1535,7 @@ def _save_mri_fiducials(self, fname): self._display_message(f"Saving {fname}...") dig_montage = self.coreg.fiducials write_fiducials( - fname=fname, pts=dig_montage.dig, coord_frame='mri', overwrite=True + fname=fname, pts=dig_montage.dig, coord_frame="mri", overwrite=True ) self._set_fiducials_file(fname) self._display_message(f"Saving {fname}... Done!") @@ -1350,13 +1543,13 @@ def _save_mri_fiducials(self, fname): def _save_trans(self, fname): write_trans(fname, self.coreg.trans, overwrite=True) - self._display_message( - f"{fname} transform file is saved.") + self._display_message(f"{fname} transform file is saved.") self._trans_modified = False - def _load_trans(self, fname): - mri_head_t = _ensure_trans(read_trans(fname, return_all=True), - 'mri', 'head')['trans'] + def _load_trans(self, trans): + if not isinstance(trans, Transform): + trans = read_trans(trans, return_all=True) + mri_head_t = _ensure_trans(trans, "mri", "head")["trans"] rot_x, rot_y, rot_z = rotation_angles(mri_head_t) x, y, z = mri_head_t[:3, 3] self.coreg._update_params( @@ -1366,17 +1559,16 @@ def _load_trans(self, fname): self._update_parameters() self._update_distance_estimation() self._update_plot() - self._display_message( - f"{fname} transform file is loaded.") + self._display_message(f"{trans} transform file is loaded.") def _update_fiducials_label(self): if self._fiducials_file is None: text = ( - '

No custom MRI fiducials loaded!

' - '

MRI fiducials could not be found in the standard ' - 'location. The displayed initial MRI fiducial locations ' - '(diamonds) were derived from fsaverage. Place, lock, and ' - 'save fiducials to discard this message.

' + "

No custom MRI fiducials loaded!

" + "

MRI fiducials could not be found in the standard " + "location. The displayed initial MRI fiducial locations " + "(diamonds) were derived from fsaverage. Place, lock, and " + "save fiducials to discard this message.

" ) else: assert self._fiducials_file == fid_fname.format( @@ -1384,30 +1576,24 @@ def _update_fiducials_label(self): ) assert self.coreg._fid_accurate is True text = ( - f'

MRI fiducials (diamonds) loaded from ' - f'standard location:

' - f'

{self._fiducials_file}

' + f"

MRI fiducials (diamonds) loaded from " + f"standard location:

" + f"

{self._fiducials_file}

" ) - self._forward_widget_command( - 'mri_fiducials_label', 'set_value', text - ) + self._forward_widget_command("mri_fiducials_label", "set_value", text) def _configure_dock(self): - if self._renderer._kind == 'notebook': + if self._renderer._kind == "notebook": collapse = True # collapsible and collapsed else: collapse = None # not collapsible - self._renderer._dock_initialize( - name="Input", area="left", max_width="350px" - ) + self._renderer._dock_initialize(name="Input", area="left", max_width="375px") mri_subject_layout = self._renderer._dock_add_group_box( name="MRI Subject", collapse=collapse, ) - subjects_dir_layout = self._renderer._dock_add_layout( - vertical=False - ) + subjects_dir_layout = self._renderer._dock_add_layout(vertical=False) self._widgets["subjects_dir_field"] = self._renderer._dock_add_text( name="subjects_dir_field", value=self._subjects_dir, @@ -1421,8 +1607,7 @@ def _configure_dock(self): func=self._set_subjects_dir, is_directory=True, icon=True, - tooltip="Load the path to the directory containing the " - "FreeSurfer subjects", + tooltip="Load the path to the directory containing the FreeSurfer subjects", layout=subjects_dir_layout, ) self._renderer._layout_add_widget( @@ -1444,38 +1629,33 @@ def _configure_dock(self): collapse=collapse, ) # Add MRI fiducials I/O widgets - self._widgets['mri_fiducials_label'] = self._renderer._dock_add_label( - value='', # Will be filled via _update_fiducials_label() + self._widgets["mri_fiducials_label"] = self._renderer._dock_add_label( + value="", # Will be filled via _update_fiducials_label() layout=mri_fiducials_layout, - selectable=True + selectable=True, ) # Reload & Save buttons go into their own layout widget - mri_fiducials_button_layout = self._renderer._dock_add_layout( - vertical=False - ) + mri_fiducials_button_layout = self._renderer._dock_add_layout(vertical=False) self._renderer._layout_add_widget( - layout=mri_fiducials_layout, - widget=mri_fiducials_button_layout + layout=mri_fiducials_layout, widget=mri_fiducials_button_layout ) self._widgets["reload_mri_fids"] = self._renderer._dock_add_button( - name='Reload MRI Fid.', + name="Reload MRI Fid.", callback=lambda: self._set_fiducials_file(self._fiducials_file), tooltip="Reload MRI fiducials from the standard location", layout=mri_fiducials_button_layout, ) # Disable reload button until we've actually loaded a fiducial file # (happens in _set_fiducials_file method) - self._forward_widget_command('reload_mri_fids', 'set_enabled', False) + self._forward_widget_command("reload_mri_fids", "set_enabled", False) self._widgets["save_mri_fids"] = self._renderer._dock_add_button( name="Save MRI Fid.", callback=lambda: self._save_mri_fiducials( - fid_fname.format( - subjects_dir=self._subjects_dir, subject=self._subject - ) + fid_fname.format(subjects_dir=self._subjects_dir, subject=self._subject) ), tooltip="Save MRI fiducials to the standard location. Fiducials " - "must be locked first!", + "must be locked first!", layout=mri_fiducials_button_layout, ) self._widgets["lock_fids"] = self._renderer._dock_add_check_box( @@ -1497,7 +1677,7 @@ def _configure_dock(self): name = f"fid_{coord}" self._widgets[name] = self._renderer._dock_add_spin_box( name=coord, - value=0., + value=0.0, rng=[-1e3, 1e3], callback=partial( self._set_fiducial, @@ -1509,16 +1689,13 @@ def _configure_dock(self): tooltip=f"Set the {coord} fiducial coordinate", layout=fiducial_coords_layout, ) - self._renderer._layout_add_widget( - mri_fiducials_layout, fiducial_coords_layout) + self._renderer._layout_add_widget(mri_fiducials_layout, fiducial_coords_layout) dig_source_layout = self._renderer._dock_add_group_box( name="Info source with digitization", collapse=collapse, ) - info_file_layout = self._renderer._dock_add_layout( - vertical=False - ) + info_file_layout = self._renderer._dock_add_layout(vertical=False) self._widgets["info_file_field"] = self._renderer._dock_add_text( name="info_file_field", value=self._info_file, @@ -1531,8 +1708,7 @@ def _configure_dock(self): desc="Load", func=self._set_info_file, icon=True, - tooltip="Load the FIFF file with digitization data for " - "coregistration", + tooltip="Load the FIFF file with digitization data for coregistration", layout=info_file_layout, ) self._renderer._layout_add_widget( @@ -1560,8 +1736,7 @@ def _configure_dock(self): self._widgets["omit"] = self._renderer._dock_add_button( name="Omit", callback=self._omit_hsp, - tooltip="Exclude the head shape points that are far away from " - "the MRI head", + tooltip="Exclude the head shape points that are far away from the MRI head", layout=omit_hsp_layout_2, ) self._widgets["reset_omit"] = self._renderer._dock_add_button( @@ -1584,6 +1759,13 @@ def _configure_dock(self): tooltip="Enable/Disable MEG helmet", layout=view_options_layout, ) + self._widgets["meg"] = self._renderer._dock_add_check_box( + name="Show MEG sensors", + value=self._meg_channels, + callback=self._set_meg_channels, + tooltip="Enable/Disable MEG sensors", + layout=view_options_layout, + ) self._widgets["high_res_head"] = self._renderer._dock_add_check_box( name="Show high-resolution head", value=self._head_resolution, @@ -1603,7 +1785,7 @@ def _configure_dock(self): self._renderer._dock_add_stretch() self._renderer._dock_initialize( - name="Parameters", area="right", max_width="350px" + name="Parameters", area="right", max_width="375px" ) mri_scaling_layout = self._renderer._dock_add_group_box( name="MRI Scaling", @@ -1629,7 +1811,7 @@ def _configure_dock(self): self._widgets[name] = self._renderer._dock_add_spin_box( name=name, value=attr[coords.index(coord)] * 1e2, - rng=[1., 10000.], # percent + rng=[1.0, 10000.0], # percent callback=partial( self._set_parameter, mode_name="scale", @@ -1647,18 +1829,17 @@ def _configure_dock(self): name="Fit fiducials with scaling", callback=self._fits_fiducials, tooltip="Find MRI scaling, rotation, and translation to fit all " - "3 fiducials", + "3 fiducials", layout=fit_scale_layout, ) self._widgets["fits_icp"] = self._renderer._dock_add_button( name="Fit ICP with scaling", callback=self._fits_icp, tooltip="Find MRI scaling, rotation, and translation to match the " - "head shape points", + "head shape points", layout=fit_scale_layout, ) - self._renderer._layout_add_widget( - scale_params_layout, fit_scale_layout) + self._renderer._layout_add_widget(scale_params_layout, fit_scale_layout) subject_to_layout = self._renderer._dock_add_layout(vertical=False) self._widgets["subject_to"] = self._renderer._dock_add_text( name="subject-to", @@ -1673,8 +1854,7 @@ def _configure_dock(self): tooltip="Save scaled anatomy", layout=subject_to_layout, ) - self._renderer._layout_add_widget( - mri_scaling_layout, subject_to_layout) + self._renderer._layout_add_widget(mri_scaling_layout, subject_to_layout) param_layout = self._renderer._dock_add_group_box( name="Translation (t) and Rotation (r)", collapse=collapse, @@ -1699,8 +1879,8 @@ def _configure_dock(self): double=True, step=1, tooltip=f"Set the {coord} {mode_name.lower()}" - f" parameter (in {unit})", - layout=coord_layout + f" parameter (in {unit})", + layout=coord_layout, ) self._renderer._layout_add_widget(param_layout, coord_layout) @@ -1714,8 +1894,7 @@ def _configure_dock(self): self._widgets["fit_icp"] = self._renderer._dock_add_button( name="Fit ICP", callback=self._fit_icp, - tooltip="Find rotation and translation to match the " - "head shape points", + tooltip="Find rotation and translation to match the head shape points", layout=fit_layout, ) self._renderer._layout_add_widget(param_layout, fit_layout) @@ -1731,8 +1910,8 @@ def _configure_dock(self): func=self._save_trans, tooltip="Save the transform file to disk", layout=save_trans_layout, - filter='Head->MRI transformation (*-trans.fif *_trans.fif)', - initial_directory=str(Path(self._info_file).parent), + filter_="Head->MRI transformation (*-trans.fif *_trans.fif)", + initial_directory=self._info_file.parent, ) self._widgets["load_trans"] = self._renderer._dock_add_file_button( name="load_trans", @@ -1740,8 +1919,8 @@ def _configure_dock(self): func=self._load_trans, tooltip="Load the transform file from disk", layout=save_trans_layout, - filter='Head->MRI transformation (*-trans.fif *_trans.fif)', - initial_directory=str(Path(self._info_file).parent), + filter_="Head->MRI transformation (*-trans.fif *_trans.fif)", + initial_directory=self._info_file.parent, ) self._renderer._layout_add_widget(trans_layout, save_trans_layout) self._widgets["reset_trans"] = self._renderer._dock_add_button( @@ -1782,15 +1961,14 @@ def _configure_dock(self): name="Weights", layout=fitting_options_layout, ) - for point, fid in zip(("HSP", "EEG", "HPI"), - self._defaults["fiducials"]): + for point, fid in zip(("HSP", "EEG", "HPI"), self._defaults["fiducials"]): weight_layout = self._renderer._dock_add_layout(vertical=False) point_lower = point.lower() name = f"{point_lower}_weight" self._widgets[name] = self._renderer._dock_add_spin_box( name=point, value=getattr(self, f"_{point_lower}_weight"), - rng=[0., 100.], + rng=[0.0, 100.0], callback=partial(self._set_point_weight, point=point_lower), compact=True, double=True, @@ -1803,7 +1981,7 @@ def _configure_dock(self): self._widgets[name] = self._renderer._dock_add_spin_box( name=fid, value=getattr(self, f"_{fid_lower}_weight"), - rng=[0., 100.], + rng=[0.0, 100.0], callback=partial(self._set_point_weight, point=fid_lower), compact=True, double=True, @@ -1811,23 +1989,21 @@ def _configure_dock(self): layout=weight_layout, ) self._renderer._layout_add_widget(weights_layout, weight_layout) - self._widgets['reset_fitting_options'] = ( - self._renderer._dock_add_button( - name="Reset Fitting Options", - callback=self._reset_fitting_parameters, - tooltip="Reset all the fitting parameters to default value", - layout=fitting_options_layout, - ) + self._widgets["reset_fitting_options"] = self._renderer._dock_add_button( + name="Reset Fitting Options", + callback=self._reset_fitting_parameters, + tooltip="Reset all the fitting parameters to default value", + layout=fitting_options_layout, ) self._renderer._dock_add_stretch() def _configure_status_bar(self): self._renderer._status_bar_initialize() - self._widgets['status_message'] = self._renderer._status_bar_add_label( + self._widgets["status_message"] = self._renderer._status_bar_add_label( "", stretch=1 ) self._forward_widget_command( - 'status_message', 'hide', value=None, input_value=False + "status_message", "hide", value=None, input_value=False ) def _clean(self): @@ -1851,17 +2027,16 @@ def close(self): def _close_dialog_callback(self, button_name): from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + self._accept_close_event = True if button_name == "Save": if self._trans_modified: - self._forward_widget_command( - "save_trans", "set_value", None) + self._forward_widget_command("save_trans", "set_value", None) # cancel means _save_trans is not called if self._trans_modified: self._accept_close_event = False if self._mri_fids_modified: - self._forward_widget_command( - "save_mri_fids", "set_value", None) + self._forward_widget_command("save_mri_fids", "set_value", None) if self._mri_scale_modified: if self._subject_to: self._save_subject(exit_mode=True) @@ -1869,7 +2044,7 @@ def _close_dialog_callback(self, button_name): dialog = self._renderer._dialog_create( title="CoregistrationUI", text="The name of the output subject used to " - "save the scaled anatomy is not set.", + "save the scaled anatomy is not set.", info_text="Please set a subject name", callback=lambda x: None, buttons=["Ok"], @@ -1883,9 +2058,9 @@ def _close_dialog_callback(self, button_name): assert button_name == "Discard" def _close_callback(self): - if self._trans_modified or self._mri_fids_modified or \ - self._mri_scale_modified: + if self._trans_modified or self._mri_fids_modified or self._mri_scale_modified: from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + # prepare the dialog's text text = "The following is/are not saved:" text += "
    " diff --git a/mne/gui/_gui.py b/mne/gui/_gui.py new file mode 100644 index 00000000000..b8898d8b7c2 --- /dev/null +++ b/mne/gui/_gui.py @@ -0,0 +1,222 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ..utils import get_config, verbose + + +@verbose +def coregistration( + *, + width=None, + height=None, + inst=None, + subject=None, + subjects_dir=None, + head_opacity=None, + head_high_res=None, + trans=None, + orient_to_surface=None, + scale_by_distance=None, + mark_inside=None, + interaction=None, + fullscreen=None, + show=True, + block=False, + verbose=None, +): + """Coregister an MRI with a subject's head shape. + + The GUI can be launched through the command line interface: + + .. code-block:: bash + + $ mne coreg + + or using a python interpreter as shown in :ref:`tut-source-alignment`. + + Parameters + ---------- + width : int | None + Specify the width for window (in logical pixels). + Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value + (which defaults to ``800``). + height : int | None + Specify a height for window (in logical pixels). + Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value + (which defaults to ``400``). + inst : None | path-like + Path to an instance file containing the digitizer data. Compatible for + Raw, Epochs, and Evoked files. + subject : None | str + Name of the mri subject. + %(subjects_dir)s + head_opacity : float | None + The opacity of the head surface in the range ``[0., 1.]``. + Default is None, which uses ``MNE_COREG_HEAD_OPACITY`` config value + (which defaults to ``1.``). + head_high_res : bool | None + Use a high resolution head surface. + Default is None, which uses ``MNE_COREG_HEAD_HIGH_RES`` config value + (which defaults to True). + trans : path-like | Transform | None + The Head<->MRI transform or the path to its FIF file (``"-trans.fif"``). + orient_to_surface : bool | None + If True (default), orient EEG electrode and head shape points to the head + surface. + + .. versionadded:: 0.16 + scale_by_distance : bool | None + If True (default), scale the digitization points by their distance from the + scalp surface. + + .. versionadded:: 0.16 + mark_inside : bool | None + If True (default), mark points inside the head surface in a + different color. + + .. versionadded:: 0.16 + %(interaction_scene_none)s + Defaults to ``'terrain'``. + + .. versionadded:: 0.16 + .. versionchanged:: 1.0 + Default interaction mode if ``None`` and no config setting found + changed from ``'trackball'`` to ``'terrain'``. + %(fullscreen)s + Default is ``None``, which uses ``MNE_COREG_FULLSCREEN`` config value + (which defaults to ``False``). + + .. versionadded:: 1.1 + show : bool + Show the GUI if True. + block : bool + Whether to halt program execution until the figure is closed. + %(verbose)s + + Returns + ------- + frame : instance of CoregistrationUI + The coregistration frame. + + Notes + ----- + Many parameters (e.g., ``head_opacity``) take None as a parameter, + which means that the default will be read from the MNE-Python + configuration file (which gets saved when exiting). + + Step by step instructions for the coregistrations are shown below: + + .. youtube:: ALV5qqMHLlQ + """ + config = get_config() + if head_high_res is None: + head_high_res = config.get("MNE_COREG_HEAD_HIGH_RES", "true") == "true" + if head_opacity is None: + head_opacity = config.get("MNE_COREG_HEAD_OPACITY", 0.8) + if width is None: + width = config.get("MNE_COREG_WINDOW_WIDTH", 800) + if height is None: + height = config.get("MNE_COREG_WINDOW_HEIGHT", 600) + if subjects_dir is None: + if "SUBJECTS_DIR" in config: + subjects_dir = config["SUBJECTS_DIR"] + elif "MNE_COREG_SUBJECTS_DIR" in config: + subjects_dir = config["MNE_COREG_SUBJECTS_DIR"] + false_like = ("false", "0") + if orient_to_surface is None: + orient_to_surface = config.get("MNE_COREG_ORIENT_TO_SURFACE", "true").lower() + orient_to_surface = orient_to_surface not in false_like + if scale_by_distance is None: + scale_by_distance = config.get("MNE_COREG_SCALE_BY_DISTANCE", "true").lower() + scale_by_distance = scale_by_distance not in false_like + if interaction is None: + interaction = config.get("MNE_COREG_INTERACTION", "terrain") + if mark_inside is None: + mark_inside = config.get("MNE_COREG_MARK_INSIDE", "true").lower() + mark_inside = mark_inside not in false_like + if fullscreen is None: + fullscreen = config.get("MNE_COREG_FULLSCREEN", "") == "true" + head_opacity = float(head_opacity) + width = int(width) + height = int(height) + + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + from ._coreg import CoregistrationUI + + if MNE_3D_BACKEND_TESTING: + show = block = False + return CoregistrationUI( + info_file=inst, + subject=subject, + subjects_dir=subjects_dir, + head_resolution=head_high_res, + head_opacity=head_opacity, + orient_glyphs=orient_to_surface, + scale_by_distance=scale_by_distance, + mark_inside=mark_inside, + trans=trans, + size=(width, height), + show=show, + block=block, + interaction=interaction, + fullscreen=fullscreen, + verbose=verbose, + ) + + +class _GUIScraper: + """Scrape GUI outputs.""" + + def __repr__(self): + return "" + + def __call__(self, block, block_vars, gallery_conf): + from ._coreg import CoregistrationUI + + gui_classes = (CoregistrationUI,) + try: + from mne_gui_addons._ieeg_locate import IntracranialElectrodeLocator + except Exception: + pass + else: + gui_classes = gui_classes + (IntracranialElectrodeLocator,) + from qtpy import QtGui + from sphinx_gallery.scrapers import figure_rst + + for gui in block_vars["example_globals"].values(): + if ( + isinstance(gui, gui_classes) + and not getattr(gui, "_scraped", False) + and gallery_conf["builder_name"] == "html" + ): + gui._scraped = True # monkey-patch but it's easy enough + img_fname = next(block_vars["image_path_iterator"]) + # TODO fix in window refactor + window = gui if hasattr(gui, "grab") else gui._renderer._window + # window is QWindow + # https://doc.qt.io/qt-5/qwidget.html#grab + pixmap = window.grab() + if hasattr(gui, "_renderer"): # if no renderer, no need + # Now the tricky part: we need to get the 3D renderer, + # extract the image from it, and put it in the correct + # place in the pixmap. The easiest way to do this is + # actually to save the 3D image first, then load it + # using QPixmap and Qt geometry. + plotter = gui._renderer.plotter + plotter.screenshot(img_fname) + sub_pixmap = QtGui.QPixmap(img_fname) + # https://doc.qt.io/qt-5/qwidget.html#mapTo + # https://doc.qt.io/qt-5/qpainter.html#drawPixmap-1 + QtGui.QPainter(pixmap).drawPixmap( + plotter.mapTo(window, plotter.rect().topLeft()), sub_pixmap + ) + # https://doc.qt.io/qt-5/qpixmap.html#save + pixmap.save(img_fname) + try: # for compatibility with both GUIs, will be refactored + gui._renderer.close() # TODO should be triggered by close + except Exception: + pass + gui.close() + return figure_rst([img_fname], gallery_conf["src_dir"], "GUI") + return "" diff --git a/mne/gui/_ieeg_locate.py b/mne/gui/_ieeg_locate.py deleted file mode 100644 index 59f3ea715de..00000000000 --- a/mne/gui/_ieeg_locate.py +++ /dev/null @@ -1,769 +0,0 @@ -# -*- coding: utf-8 -*- -"""Intracranial elecrode localization GUI for finding contact locations.""" - -# Authors: Alex Rockhill -# -# License: BSD (3-clause) - -import numpy as np -import platform - -from scipy.ndimage import maximum_filter - -from qtpy import QtCore, QtGui -from qtpy.QtCore import Slot, Signal -from qtpy.QtWidgets import (QVBoxLayout, QHBoxLayout, QLabel, - QMessageBox, QWidget, QAbstractItemView, - QListView, QSlider, QPushButton, - QComboBox) - -from matplotlib.colors import LinearSegmentedColormap - -from ..channels import make_dig_montage -from ._core import SliceBrowser -from ..surface import _voxel_neighbors -from ..transforms import apply_trans, _get_trans, invert_transform -from ..utils import logger, _validate_type, verbose -from .. import pick_types - -_CH_PLOT_SIZE = 1024 -_RADIUS_SCALAR = 0.4 -_TUBE_SCALAR = 0.1 -_BOLT_SCALAR = 30 # mm -_CH_MENU_WIDTH = 30 if platform.system() == 'Windows' else 10 - -# 20 colors generated to be evenly spaced in a cube, worked better than -# matplotlib color cycle -_UNIQUE_COLORS = [(0.1, 0.42, 0.43), (0.9, 0.34, 0.62), (0.47, 0.51, 0.3), - (0.47, 0.55, 0.99), (0.79, 0.68, 0.06), (0.34, 0.74, 0.05), - (0.58, 0.87, 0.13), (0.86, 0.98, 0.4), (0.92, 0.91, 0.66), - (0.77, 0.38, 0.34), (0.9, 0.37, 0.1), (0.2, 0.62, 0.9), - (0.22, 0.65, 0.64), (0.14, 0.94, 0.8), (0.34, 0.31, 0.68), - (0.59, 0.28, 0.74), (0.46, 0.19, 0.94), (0.37, 0.93, 0.7), - (0.56, 0.86, 0.55), (0.67, 0.69, 0.44)] -_N_COLORS = len(_UNIQUE_COLORS) -_CMAP = LinearSegmentedColormap.from_list( - 'ch_colors', _UNIQUE_COLORS, N=_N_COLORS) - - -class ComboBox(QComboBox): - """Dropdown menu that emits a click when popped up.""" - - clicked = Signal() - - def showPopup(self): - """Override show popup method to emit click.""" - self.clicked.emit() - super(ComboBox, self).showPopup() - - -class IntracranialElectrodeLocator(SliceBrowser): - """Locate electrode contacts using a coregistered MRI and CT.""" - - def __init__(self, info, trans, base_image, subject=None, - subjects_dir=None, groups=None, show=True, verbose=None): - """GUI for locating intracranial electrodes. - - .. note:: Images will be displayed using orientation information - obtained from the image header. Images will be resampled to - dimensions [256, 256, 256] for display. - """ - if not info.ch_names: - raise ValueError('No channels found in `info` to locate') - - # store info for modification - self._info = info - self._seeg_idx = pick_types(self._info, meg=False, seeg=True) - self._verbose = verbose - - # channel plotting default parameters - self._ch_alpha = 0.5 - self._radius = int(_CH_PLOT_SIZE // 100) # starting 1/100 of image - - # initialize channel data - self._ch_index = 0 - # load data, apply trans - self._head_mri_t = _get_trans(trans, 'head', 'mri')[0] - self._mri_head_t = invert_transform(self._head_mri_t) - - # ensure channel positions in head - montage = info.get_montage() - if montage and montage.get_positions()['coord_frame'] != 'head': - raise RuntimeError('Channel positions in the ``info`` object must ' - 'be in the "head" coordinate frame.') - - # load channels, convert from m to mm - self._chs = {name: apply_trans(self._head_mri_t, ch['loc'][:3]) * 1000 - for name, ch in zip(info.ch_names, info['chs'])} - self._ch_names = list(self._chs.keys()) - self._group_channels(groups) - - # Initialize GUI - super(IntracranialElectrodeLocator, self).__init__( - base_image=base_image, subject=subject, subjects_dir=subjects_dir) - - # set current position as current contact location if exists - if not np.isnan(self._chs[self._ch_names[self._ch_index]]).any(): - self._set_ras(self._chs[self._ch_names[self._ch_index]], - update_plots=False) - - # add plots of contacts on top - self._plot_ch_images() - - # Add lines - self._lines = dict() - self._lines_2D = dict() - for group in set(self._groups.values()): - self._update_lines(group) - - # ready for user - self._move_cursors_to_pos() - self._ch_list.setFocus() # always focus on list - - if show: - self.show() - - def _configure_ui(self): - # data is loaded for an abstract base image, associate with ct - self._ct_data = self._base_data - self._images['ct'] = self._images['base'] - self._ct_maxima = None # don't compute until turned on - - toolbar = self._configure_toolbar() - slider_bar = self._configure_sliders() - status_bar = self._configure_status_bar() - self._ch_list = self._configure_channel_sidebar() # need for updating - - plot_layout = QHBoxLayout() - plot_layout.addLayout(self._plt_grid) - plot_layout.addWidget(self._ch_list) - - main_vbox = QVBoxLayout() - main_vbox.addLayout(toolbar) - main_vbox.addLayout(slider_bar) - main_vbox.addLayout(plot_layout) - main_vbox.addLayout(status_bar) - - central_widget = QWidget() - central_widget.setLayout(main_vbox) - self.setCentralWidget(central_widget) - - def _configure_channel_sidebar(self): - """Configure the sidebar to select channels/contacts.""" - ch_list = QListView() - ch_list.setSelectionMode(QAbstractItemView.SingleSelection) - max_ch_name_len = max([len(name) for name in self._chs]) - ch_list.setMinimumWidth(max_ch_name_len * _CH_MENU_WIDTH) - ch_list.setMaximumWidth(max_ch_name_len * _CH_MENU_WIDTH) - self._ch_list_model = QtGui.QStandardItemModel(ch_list) - for name in self._ch_names: - self._ch_list_model.appendRow(QtGui.QStandardItem(name)) - self._color_list_item(name=name) - ch_list.setModel(self._ch_list_model) - ch_list.clicked.connect(self._go_to_ch) - ch_list.setCurrentIndex( - self._ch_list_model.index(self._ch_index, 0)) - ch_list.keyPressEvent = self.keyPressEvent - return ch_list - - def _make_ch_image(self, axis, proj=False): - """Make a plot to display the channel locations.""" - # Make channel data higher resolution so it looks better. - ch_image = np.zeros((_CH_PLOT_SIZE, _CH_PLOT_SIZE)) * np.nan - vxyz = self._voxel_sizes - - def color_ch_radius(ch_image, xf, yf, group, radius): - # Take the fraction across each dimension of the RAS - # coordinates converted to xyz and put a circle in that - # position in this larger resolution image - ex, ey = np.round(np.array([xf, yf]) * _CH_PLOT_SIZE).astype(int) - ii = np.arange(-radius, radius + 1) - ii_sq = ii * ii - idx = np.where(ii_sq + ii_sq[:, np.newaxis] < radius * radius) - # negative y because y axis is inverted - ch_image[-(ey + ii[idx[1]]), ex + ii[idx[0]]] = group - return ch_image - - for name, ras in self._chs.items(): - # move from middle-centered (half coords positive, half negative) - # to bottom-left corner centered (all coords positive). - if np.isnan(ras).any(): - continue - xyz = apply_trans(self._ras_vox_t, ras) - # check if closest to that voxel - dist = np.linalg.norm(xyz - self._current_slice) - if proj or dist < self._radius: - group = self._groups[name] - r = self._radius if proj else \ - self._radius - np.round(abs(dist)).astype(int) - xf, yf = (xyz / vxyz)[list(self._xy_idx[axis])] - ch_image = color_ch_radius(ch_image, xf, yf, group, r) - return ch_image - - @verbose - def _save_ch_coords(self, info=None, verbose=None): - """Save the location of the electrode contacts.""" - logger.info('Saving channel positions to `info`') - if info is None: - info = self._info - montage = info.get_montage() - montage_kwargs = montage.get_positions() if montage else \ - dict(ch_pos=dict(), coord_frame='head') - for ch in info['chs']: - # surface RAS-> head and mm->m - montage_kwargs['ch_pos'][ch['ch_name']] = apply_trans( - self._mri_head_t, self._chs[ch['ch_name']].copy() / 1000) - info.set_montage(make_dig_montage(**montage_kwargs)) - - def _plot_ch_images(self): - img_delta = 0.5 - ch_deltas = list(img_delta * (self._voxel_sizes[ii] / _CH_PLOT_SIZE) - for ii in range(3)) - self._ch_extents = list( - [-ch_delta, self._voxel_sizes[idx[0]] - ch_delta, - -ch_delta, self._voxel_sizes[idx[1]] - ch_delta] - for idx, ch_delta in zip(self._xy_idx, ch_deltas)) - self._images['chs'] = list() - for axis in range(3): - fig = self._figs[axis] - ax = fig.axes[0] - self._images['chs'].append(ax.imshow( - self._make_ch_image(axis), aspect='auto', - extent=self._ch_extents[axis], zorder=3, - cmap=_CMAP, alpha=self._ch_alpha, vmin=0, vmax=_N_COLORS)) - self._3d_chs = dict() - for name in self._chs: - self._plot_3d_ch(name) - - def _plot_3d_ch(self, name, render=False): - """Plot a single 3D channel.""" - if name in self._3d_chs: - self._renderer.plotter.remove_actor( - self._3d_chs.pop(name), render=False) - if not any(np.isnan(self._chs[name])): - self._3d_chs[name] = self._renderer.sphere( - tuple(self._chs[name]), scale=1, - color=_CMAP(self._groups[name])[:3], opacity=self._ch_alpha)[0] - # The actor scale is managed differently than the glyph scale - # in order not to recreate objects, we use the actor scale - self._3d_chs[name].SetOrigin(self._chs[name]) - self._3d_chs[name].SetScale(self._radius * _RADIUS_SCALAR) - if render: - self._renderer._update() - - def _configure_toolbar(self): - """Make a bar with buttons for user interactions.""" - hbox = QHBoxLayout() - - help_button = QPushButton('Help') - help_button.released.connect(self._show_help) - hbox.addWidget(help_button) - - hbox.addStretch(8) - - hbox.addWidget(QLabel('Snap to Center')) - self._snap_button = QPushButton('Off') - self._snap_button.setMaximumWidth(25) # not too big - hbox.addWidget(self._snap_button) - self._snap_button.released.connect(self._toggle_snap) - self._toggle_snap() # turn on to start - - hbox.addStretch(1) - - self._toggle_brain_button = QPushButton('Show Brain') - self._toggle_brain_button.released.connect(self._toggle_show_brain) - hbox.addWidget(self._toggle_brain_button) - - hbox.addStretch(1) - - mark_button = QPushButton('Mark') - hbox.addWidget(mark_button) - mark_button.released.connect(self.mark_channel) - - remove_button = QPushButton('Remove') - hbox.addWidget(remove_button) - remove_button.released.connect(self.remove_channel) - - self._group_selector = ComboBox() - group_model = self._group_selector.model() - - for i in range(_N_COLORS): - self._group_selector.addItem(' ') - color = QtGui.QColor() - color.setRgb(*(255 * np.array(_CMAP(i))).round().astype(int)) - brush = QtGui.QBrush(color) - brush.setStyle(QtCore.Qt.SolidPattern) - group_model.setData(group_model.index(i, 0), - brush, QtCore.Qt.BackgroundRole) - self._group_selector.clicked.connect(self._select_group) - self._group_selector.currentIndexChanged.connect( - self._select_group) - hbox.addWidget(self._group_selector) - - # update background color for current selection - self._update_group() - - return hbox - - def _configure_sliders(self): - """Make a bar with sliders on it.""" - - def make_label(name): - label = QLabel(name) - label.setAlignment(QtCore.Qt.AlignCenter) - return label - - def make_slider(smin, smax, sval, sfun=None): - slider = QSlider(QtCore.Qt.Horizontal) - slider.setMinimum(int(round(smin))) - slider.setMaximum(int(round(smax))) - slider.setValue(int(round(sval))) - slider.setTracking(False) # only update on release - if sfun is not None: - slider.valueChanged.connect(sfun) - slider.keyPressEvent = self.keyPressEvent - return slider - - slider_hbox = QHBoxLayout() - - ch_vbox = QVBoxLayout() - ch_vbox.addWidget(make_label('ch alpha')) - ch_vbox.addWidget(make_label('ch radius')) - slider_hbox.addLayout(ch_vbox) - - ch_slider_vbox = QVBoxLayout() - self._alpha_slider = make_slider(0, 100, self._ch_alpha * 100, - self._update_ch_alpha) - ch_plot_max = _CH_PLOT_SIZE // 50 # max 1 / 50 of plot size - ch_slider_vbox.addWidget(self._alpha_slider) - self._radius_slider = make_slider(0, ch_plot_max, self._radius, - self._update_radius) - ch_slider_vbox.addWidget(self._radius_slider) - slider_hbox.addLayout(ch_slider_vbox) - - ct_vbox = QVBoxLayout() - ct_vbox.addWidget(make_label('CT min')) - ct_vbox.addWidget(make_label('CT max')) - slider_hbox.addLayout(ct_vbox) - - ct_slider_vbox = QVBoxLayout() - ct_min = int(round(np.nanmin(self._ct_data))) - ct_max = int(round(np.nanmax(self._ct_data))) - self._ct_min_slider = make_slider( - ct_min, ct_max, ct_min, self._update_ct_scale) - ct_slider_vbox.addWidget(self._ct_min_slider) - self._ct_max_slider = make_slider( - ct_min, ct_max, ct_max, self._update_ct_scale) - ct_slider_vbox.addWidget(self._ct_max_slider) - slider_hbox.addLayout(ct_slider_vbox) - return slider_hbox - - def _configure_status_bar(self, hbox=None): - hbox = QHBoxLayout() if hbox is None else hbox - - hbox.addStretch(3) - - self._toggle_show_mip_button = QPushButton('Show Max Intensity Proj') - self._toggle_show_mip_button.released.connect( - self._toggle_show_mip) - hbox.addWidget(self._toggle_show_mip_button) - - self._toggle_show_max_button = QPushButton('Show Maxima') - self._toggle_show_max_button.released.connect( - self._toggle_show_max) - hbox.addWidget(self._toggle_show_max_button) - - self._intensity_label = QLabel('') # update later - hbox.addWidget(self._intensity_label) - - # add SliceBrowser navigation items - super(IntracranialElectrodeLocator, self)._configure_status_bar( - hbox=hbox) - return hbox - - def _move_cursors_to_pos(self): - super(IntracranialElectrodeLocator, self)._move_cursors_to_pos() - self._ch_list.setFocus() # remove focus from text edit - - def _group_channels(self, groups): - """Automatically find a group based on the name of the channel.""" - if groups is not None: - for name in self._ch_names: - if name not in groups: - raise ValueError(f'{name} not found in ``groups``') - _validate_type(groups[name], (float, int), f'groups[{name}]') - self.groups = groups - else: - i = 0 - self._groups = dict() - base_names = dict() - for name in self._ch_names: - # strip all numbers from the name - base_name = ''.join([letter for letter in name if - not letter.isdigit() and letter != ' ']) - if base_name in base_names: - # look up group number by base name - self._groups[name] = base_names[base_name] - else: - self._groups[name] = i - base_names[base_name] = i - i += 1 - - def _update_lines(self, group, only_2D=False): - """Draw lines that connect the points in a group.""" - if group in self._lines_2D: # remove existing 2D lines first - for line in self._lines_2D[group]: - line.remove() - self._lines_2D.pop(group) - if only_2D: # if not in projection, don't add 2D lines - if self._toggle_show_mip_button.text() == \ - 'Show Max Intensity Proj': - return - elif group in self._lines: # if updating 3D, remove first - self._renderer.plotter.remove_actor( - self._lines[group], render=False) - pos = np.array([ - self._chs[ch] for i, ch in enumerate(self._ch_names) - if self._groups[ch] == group and i in self._seeg_idx and - not np.isnan(self._chs[ch]).any()]) - if len(pos) < 2: # not enough points for line - return - # first, the insertion will be the point farthest from the origin - # brains are a longer posterior-anterior, scale for this (80%) - insert_idx = np.argmax(np.linalg.norm(pos * np.array([1, 0.8, 1]), - axis=1)) - # second, find the farthest point from the insertion - target_idx = np.argmax(np.linalg.norm(pos[insert_idx] - pos, axis=1)) - # third, make a unit vector and to add to the insertion for the bolt - elec_v = pos[insert_idx] - pos[target_idx] - elec_v /= np.linalg.norm(elec_v) - if not only_2D: - self._lines[group] = self._renderer.tube( - [pos[target_idx]], [pos[insert_idx] + elec_v * _BOLT_SCALAR], - radius=self._radius * _TUBE_SCALAR, color=_CMAP(group)[:3])[0] - if self._toggle_show_mip_button.text() == 'Hide Max Intensity Proj': - # add 2D lines on each slice plot if in max intensity projection - target_vox = apply_trans(self._ras_vox_t, pos[target_idx]) - insert_vox = apply_trans(self._ras_vox_t, - pos[insert_idx] + elec_v * _BOLT_SCALAR) - lines_2D = list() - for axis in range(3): - x, y = self._xy_idx[axis] - lines_2D.append(self._figs[axis].axes[0].plot( - [target_vox[x], insert_vox[x]], - [target_vox[y], insert_vox[y]], - color=_CMAP(group), linewidth=0.25, zorder=7)[0]) - self._lines_2D[group] = lines_2D - - def _select_group(self): - """Change the group label to the selection.""" - group = self._group_selector.currentIndex() - self._groups[self._ch_names[self._ch_index]] = group - # color differently if found already - self._color_list_item(self._ch_names[self._ch_index]) - self._update_group() - - def _update_group(self): - """Set background for closed group menu.""" - group = self._group_selector.currentIndex() - rgb = (255 * np.array(_CMAP(group))).round().astype(int) - self._group_selector.setStyleSheet( - 'background-color: rgb({:d},{:d},{:d})'.format(*rgb)) - self._group_selector.update() - - def _update_ch_selection(self): - """Update which channel is selected.""" - name = self._ch_names[self._ch_index] - self._ch_list.setCurrentIndex( - self._ch_list_model.index(self._ch_index, 0)) - self._group_selector.setCurrentIndex(self._groups[name]) - self._update_group() - if not np.isnan(self._chs[name]).any(): - self._set_ras(self._chs[name]) - self._update_camera(render=True) - self._draw() - - def _go_to_ch(self, index): - """Change current channel to the item selected.""" - self._ch_index = index.row() - self._update_ch_selection() - - @Slot() - def _next_ch(self): - """Increment the current channel selection index.""" - self._ch_index = (self._ch_index + 1) % len(self._ch_names) - self._update_ch_selection() - - def _color_list_item(self, name=None): - """Color the item in the view list for easy id of marked channels.""" - name = self._ch_names[self._ch_index] if name is None else name - color = QtGui.QColor('white') - if not np.isnan(self._chs[name]).any(): - group = self._groups[name] - color.setRgb(*[int(c * 255) for c in _CMAP(group)]) - brush = QtGui.QBrush(color) - brush.setStyle(QtCore.Qt.SolidPattern) - self._ch_list_model.setData( - self._ch_list_model.index(self._ch_names.index(name), 0), - brush, QtCore.Qt.BackgroundRole) - # color text black - color = QtGui.QColor('black') - brush = QtGui.QBrush(color) - brush.setStyle(QtCore.Qt.SolidPattern) - self._ch_list_model.setData( - self._ch_list_model.index(self._ch_names.index(name), 0), - brush, QtCore.Qt.ForegroundRole) - - @Slot() - def _toggle_snap(self): - """Toggle snapping the contact location to the center of mass.""" - if self._snap_button.text() == 'Off': - self._snap_button.setText('On') - self._snap_button.setStyleSheet("background-color: green") - else: # text == 'On', turn off - self._snap_button.setText('Off') - self._snap_button.setStyleSheet("background-color: red") - - @Slot() - def mark_channel(self, ch=None): - """Mark a channel as being located at the crosshair. - - Parameters - ---------- - ch : str - The channel name. If ``None``, the current channel - is marked. - """ - if ch is not None and ch not in self._ch_names: - raise ValueError(f'Channel {ch} not found') - name = self._ch_names[self._ch_index if ch is None else - self._ch_names.index(ch)] - if self._snap_button.text() == 'Off': - self._chs[name][:] = self._ras - else: - shape = np.mean(self._voxel_sizes) # Freesurfer shape (256) - voxels_max = int( - 4 / 3 * np.pi * (shape * self._radius / _CH_PLOT_SIZE)**3) - neighbors = _voxel_neighbors( - self._vox, self._ct_data, thresh=0.5, - voxels_max=voxels_max, use_relative=True) - self._chs[name][:] = apply_trans( # to surface RAS - self._vox_ras_t, np.array(list(neighbors)).mean(axis=0)) - self._color_list_item() - self._update_lines(self._groups[name]) - self._update_ch_images(draw=True) - self._plot_3d_ch(name, render=True) - self._save_ch_coords() - self._next_ch() - self._ch_list.setFocus() - - @Slot() - def remove_channel(self, ch=None): - """Remove the location data for the current channel. - - Parameters - ---------- - ch : str - The channel name. If ``None``, the current channel - is removed. - """ - if ch is not None and ch not in self._ch_names: - raise ValueError(f'Channel {ch} not found') - name = self._ch_names[self._ch_index if ch is None else - self._ch_names.index(ch)] - self._chs[name] *= np.nan - self._color_list_item() - self._save_ch_coords() - self._update_lines(self._groups[name]) - self._update_ch_images(draw=True) - self._plot_3d_ch(name, render=True) - self._next_ch() - self._ch_list.setFocus() - - def _update_ch_images(self, axis=None, draw=False): - """Update the channel image(s).""" - for axis in range(3) if axis is None else [axis]: - self._images['chs'][axis].set_data( - self._make_ch_image(axis)) - if self._toggle_show_mip_button.text() == \ - 'Hide Max Intensity Proj': - self._images['mip_chs'][axis].set_data( - self._make_ch_image(axis, proj=True)) - if draw: - self._draw(axis) - - def _update_ct_images(self, axis=None, draw=False): - """Update the CT image(s).""" - for axis in range(3) if axis is None else [axis]: - ct_data = np.take(self._ct_data, self._current_slice[axis], - axis=axis).T - # Threshold the CT so only bright objects (electrodes) are visible - ct_data[ct_data < self._ct_min_slider.value()] = np.nan - ct_data[ct_data > self._ct_max_slider.value()] = np.nan - self._images['ct'][axis].set_data(ct_data) - if 'local_max' in self._images: - ct_max_data = np.take( - self._ct_maxima, self._current_slice[axis], axis=axis).T - self._images['local_max'][axis].set_data(ct_max_data) - if draw: - self._draw(axis) - - def _update_mri_images(self, axis=None, draw=False): - """Update the CT image(s).""" - if 'mri' in self._images: - for axis in range(3) if axis is None else [axis]: - self._images['mri'][axis].set_data( - np.take(self._mri_data, self._current_slice[axis], - axis=axis).T) - if draw: - self._draw(axis) - - def _update_images(self, axis=None, draw=True): - """Update CT and channel images when general changes happen.""" - self._update_ch_images(axis=axis) - self._update_mri_images(axis=axis) - super()._update_images() - - def _update_ct_scale(self): - """Update CT min slider value.""" - new_min = self._ct_min_slider.value() - new_max = self._ct_max_slider.value() - # handle inversions - self._ct_min_slider.setValue(min([new_min, new_max])) - self._ct_max_slider.setValue(max([new_min, new_max])) - self._update_ct_images(draw=True) - - def _update_radius(self): - """Update channel plot radius.""" - self._radius = np.round(self._radius_slider.value()).astype(int) - if self._toggle_show_max_button.text() == 'Hide Maxima': - self._update_ct_maxima() - self._update_ct_images() - else: - self._ct_maxima = None # signals ct max is out-of-date - self._update_ch_images(draw=True) - for name, actor in self._3d_chs.items(): - if not np.isnan(self._chs[name]).any(): - actor.SetOrigin(self._chs[name]) - actor.SetScale(self._radius * _RADIUS_SCALAR) - self._renderer._update() - self._ch_list.setFocus() # remove focus from 3d plotter - - def _update_ch_alpha(self): - """Update channel plot alpha.""" - self._ch_alpha = self._alpha_slider.value() / 100 - for axis in range(3): - self._images['chs'][axis].set_alpha(self._ch_alpha) - self._draw() - for actor in self._3d_chs.values(): - actor.GetProperty().SetOpacity(self._ch_alpha) - self._renderer._update() - self._ch_list.setFocus() # remove focus from 3d plotter - - def _show_help(self): - """Show the help menu.""" - QMessageBox.information( - self, 'Help', - "Help:\n'm': mark channel location\n" - "'r': remove channel location\n" - "'b': toggle viewing of brain in T1\n" - "'+'/'-': zoom\nleft/right arrow: left/right\n" - "up/down arrow: superior/inferior\n" - "left angle bracket/right angle bracket: anterior/posterior") - - def _update_ct_maxima(self): - """Compute the maximum voxels based on the current radius.""" - self._ct_maxima = maximum_filter( - self._ct_data, (self._radius,) * 3) == self._ct_data - self._ct_maxima[self._ct_data <= np.median(self._ct_data)] = \ - False - self._ct_maxima = np.where(self._ct_maxima, 1, np.nan) # transparent - - def _toggle_show_mip(self): - """Toggle whether the maximum-intensity projection is shown.""" - if self._toggle_show_mip_button.text() == 'Show Max Intensity Proj': - self._toggle_show_mip_button.setText('Hide Max Intensity Proj') - self._images['mip'] = list() - self._images['mip_chs'] = list() - ct_min, ct_max = np.nanmin(self._ct_data), np.nanmax(self._ct_data) - for axis in range(3): - ct_mip_data = np.max(self._ct_data, axis=axis).T - self._images['mip'].append( - self._figs[axis].axes[0].imshow( - ct_mip_data, cmap='gray', aspect='auto', - vmin=ct_min, vmax=ct_max, zorder=5)) - # add circles for each channel - xs, ys, colors = list(), list(), list() - for name, ras in self._chs.items(): - xyz = self._vox - xs.append(xyz[self._xy_idx[axis][0]]) - ys.append(xyz[self._xy_idx[axis][1]]) - colors.append(_CMAP(self._groups[name])) - self._images['mip_chs'].append( - self._figs[axis].axes[0].imshow( - self._make_ch_image(axis, proj=True), aspect='auto', - extent=self._ch_extents[axis], zorder=6, - cmap=_CMAP, alpha=1, vmin=0, vmax=_N_COLORS)) - for group in set(self._groups.values()): - self._update_lines(group, only_2D=True) - else: - for img in self._images['mip'] + self._images['mip_chs']: - img.remove() - self._images.pop('mip') - self._images.pop('mip_chs') - self._toggle_show_mip_button.setText('Show Max Intensity Proj') - for group in set(self._groups.values()): # remove lines - self._update_lines(group, only_2D=True) - self._draw() - - def _toggle_show_max(self): - """Toggle whether to color local maxima differently.""" - if self._toggle_show_max_button.text() == 'Show Maxima': - self._toggle_show_max_button.setText('Hide Maxima') - # happens on initiation or if the radius is changed with it off - if self._ct_maxima is None: # otherwise don't recompute - self._update_ct_maxima() - self._images['local_max'] = list() - for axis in range(3): - ct_max_data = np.take(self._ct_maxima, - self._current_slice[axis], axis=axis).T - self._images['local_max'].append( - self._figs[axis].axes[0].imshow( - ct_max_data, cmap='autumn', aspect='auto', - vmin=0, vmax=1, zorder=4)) - else: - for img in self._images['local_max']: - img.remove() - self._images.pop('local_max') - self._toggle_show_max_button.setText('Show Maxima') - self._draw() - - def _toggle_show_brain(self): - """Toggle whether the brain/MRI is being shown.""" - if 'mri' in self._images: - for img in self._images['mri']: - img.remove() - self._images.pop('mri') - self._toggle_brain_button.setText('Show Brain') - else: - self._images['mri'] = list() - for axis in range(3): - mri_data = np.take(self._mri_data, - self._current_slice[axis], axis=axis).T - self._images['mri'].append(self._figs[axis].axes[0].imshow( - mri_data, cmap='hot', aspect='auto', alpha=0.25, zorder=2)) - self._toggle_brain_button.setText('Hide Brain') - self._draw() - - def keyPressEvent(self, event): - """Execute functions when the user presses a key.""" - super(IntracranialElectrodeLocator, self).keyPressEvent(event) - - if event.text() == 'm': - self.mark_channel() - - if event.text() == 'r': - self.remove_channel() - - if event.text() == 'b': - self._toggle_show_brain() diff --git a/mne/gui/tests/__init__.py b/mne/gui/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/gui/tests/__init__.py +++ b/mne/gui/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/gui/tests/test_core.py b/mne/gui/tests/test_core.py deleted file mode 100644 index 7a5040903bc..00000000000 --- a/mne/gui/tests/test_core.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Alex Rockhill -# -# License: BSD-3-clause - -import numpy as np -from numpy.testing import assert_allclose - -import pytest - -from mne.datasets import testing -from mne.utils import catch_logging, use_log_level -from mne.viz.utils import _fake_click - -data_path = testing.data_path(download=False) -subject = "sample" -subjects_dir = data_path / "subjects" - - -@testing.requires_testing_data -def test_slice_browser_io(renderer_interactive_pyvistaqt): - """Test the input/output of the slice browser GUI.""" - nib = pytest.importorskip('nibabel') - from mne.gui._core import SliceBrowser - with pytest.raises(ValueError, match='Base image is not aligned to MRI'): - SliceBrowser(nib.MGHImage( - np.ones((96, 96, 96), dtype=np.float32), np.eye(4)), - subject=subject, subjects_dir=subjects_dir) - - -# TODO: For some reason this leaves some stuff un-closed, we should fix it -@pytest.mark.allow_unclosed -@testing.requires_testing_data -def test_slice_browser_display(renderer_interactive_pyvistaqt): - """Test that the slice browser GUI displays properly.""" - pytest.importorskip('nibabel') - from mne.gui._core import SliceBrowser - # test no seghead, fsaverage doesn't have seghead - with pytest.warns(RuntimeWarning, match='`seghead` not found'): - with catch_logging() as log: - gui = SliceBrowser( - subject='fsaverage', subjects_dir=subjects_dir, - verbose=True) - log = log.getvalue() - assert 'using marching cubes' in log - gui.close() - - # test functions - with pytest.warns(RuntimeWarning, match='`pial` surface not found'): - gui = SliceBrowser(subject=subject, subjects_dir=subjects_dir) - - # test RAS - gui._RAS_textbox.setText('10 10 10') - gui._RAS_textbox.focusOutEvent(event=None) - assert_allclose(gui._ras, [10, 10, 10]) - - # test vox - gui._VOX_textbox.setText('150, 150, 150') - gui._VOX_textbox.focusOutEvent(event=None) - assert_allclose(gui._ras, [23, 22, 23]) - - # test click - with use_log_level('debug'): - _fake_click(gui._figs[2], gui._figs[2].axes[0], - [137, 140], xform='data', kind='release') - assert_allclose(gui._ras, [10, 12, 23]) - gui.close() diff --git a/mne/gui/tests/test_coreg.py b/mne/gui/tests/test_coreg.py index 44374d3c048..4800bea29e1 100644 --- a/mne/gui/tests/test_coreg.py +++ b/mne/gui/tests/test_coreg.py @@ -1,67 +1,50 @@ -# Author: Christian Brodbeck -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os from contextlib import nullcontext from pathlib import Path +import numpy as np import pytest from numpy.testing import assert_allclose -import numpy as np import mne -from mne.datasets import testing -from mne.io import read_info -from mne.io.kit.tests import data_dir as kit_data_dir -from mne.io.constants import FIFF -from mne.utils import get_config, catch_logging, requires_version +from mne._fiff.constants import FIFF from mne.channels import DigMontage from mne.coreg import Coregistration +from mne.datasets import testing +from mne.io import read_info +from mne.utils import catch_logging, get_config from mne.viz import _3d - data_path = testing.data_path(download=False) raw_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" fname_trans = data_path / "MEG" / "sample" / "sample_audvis_trunc-trans.fif" -kit_raw_path = kit_data_dir / "test_bin_raw.fif" subjects_dir = data_path / "subjects" fid_fname = subjects_dir / "sample" / "bem" / "sample-fiducials.fif" ctf_raw_path = data_path / "CTF" / "catch-alp-good-f.ds" nirx_15_0_raw_path = ( - data_path - / "NIRx" - / "nirscout" - / "nirx_15_0_recording" - / "NIRS-2019-10-27_003.hdr" + data_path / "NIRx" / "nirscout" / "nirx_15_0_recording" / "NIRS-2019-10-27_003.hdr" ) nirsport2_raw_path = ( - data_path - / "NIRx" - / "nirsport_v2" - / "aurora_2021_9" - / "2021-10-01_002_config.hdr" + data_path / "NIRx" / "nirsport_v2" / "aurora_2021_9" / "2021-10-01_002_config.hdr" ) snirf_nirsport2_raw_path = ( - data_path - / "SNIRF" - / "NIRx" - / "NIRSport2" - / "1.0.3" - / "2021-05-05_001.snirf" + data_path / "SNIRF" / "NIRx" / "NIRSport2" / "1.0.3" / "2021-05-05_001.snirf" ) -pytest.importorskip('nibabel') +pytest.importorskip("nibabel") -class TstVTKPicker(object): +class TstVTKPicker: """Class to test cell picking.""" def __init__(self, mesh, cell_id, event_pos): self.mesh = mesh self.cell_id = cell_id - self.point_id = None self.event_pos = event_pos def GetCellId(self): @@ -75,10 +58,7 @@ def GetDataSet(self): def GetPickPosition(self): """Return the picked position.""" vtk_cell = self.mesh.GetCell(self.cell_id) - cell = [vtk_cell.GetPointId(point_id) for point_id - in range(vtk_cell.GetNumberOfPoints())] - self.point_id = cell[0] - return self.mesh.points[self.point_id] + return self.mesh.points[vtk_cell.GetPointId(0)] def GetEventPosition(self): """Return event position.""" @@ -88,62 +68,77 @@ def GetEventPosition(self): @pytest.mark.slowtest @testing.requires_testing_data @pytest.mark.parametrize( - 'inst_path', (raw_path, 'gen_montage', ctf_raw_path, nirx_15_0_raw_path, - nirsport2_raw_path, snirf_nirsport2_raw_path)) -def test_coreg_gui_pyvista_file_support(inst_path, tmp_path, - renderer_interactive_pyvistaqt): + "inst_path", + ( + raw_path, + "gen_montage", + ctf_raw_path, + nirx_15_0_raw_path, + nirsport2_raw_path, + snirf_nirsport2_raw_path, + ), +) +def test_coreg_gui_pyvista_file_support( + inst_path, tmp_path, renderer_interactive_pyvistaqt +): """Test reading supported files.""" from mne.gui import coregistration - if inst_path == 'gen_montage': + if inst_path == "gen_montage": # generate a montage fig to use as inst. tmp_info = read_info(raw_path) eeg_chans = [] - for pt in tmp_info['dig']: - if pt['kind'] == FIFF.FIFFV_POINT_EEG: + for pt in tmp_info["dig"]: + if pt["kind"] == FIFF.FIFFV_POINT_EEG: eeg_chans.append(f"EEG {pt['ident']:03d}") dig = DigMontage(dig=tmp_info["dig"], ch_names=eeg_chans) - inst_path = tmp_path / 'tmp-dig.fif' + inst_path = tmp_path / "tmp-dig.fif" dig.save(inst_path) if inst_path == ctf_raw_path: - ctx = pytest.warns(RuntimeWarning, match='MEG ref channel RMSP') + ctx = pytest.warns(RuntimeWarning, match="MEG ref channel RMSP") elif inst_path == snirf_nirsport2_raw_path: # TODO: This is maybe a bug? ctx = pytest.warns(RuntimeWarning, match='assuming "head"') else: ctx = nullcontext() with ctx: coreg = coregistration( - inst=inst_path, subject='sample', subjects_dir=subjects_dir) + inst=inst_path, subject="sample", subjects_dir=subjects_dir + ) coreg._accept_close_event = True coreg.close() @pytest.mark.slowtest @testing.requires_testing_data -def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, - renderer_interactive_pyvistaqt): +def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, renderer_interactive_pyvistaqt): """Test that using CoregistrationUI matches mne coreg.""" from mne.gui import coregistration + config = get_config() # the sample subject in testing has MRI fids assert (subjects_dir / "sample" / "bem" / "sample-fiducials.fif").is_file() - coreg = coregistration(subject='sample', subjects_dir=subjects_dir, - trans=fname_trans) + coreg = coregistration( + subject="sample", subjects_dir=subjects_dir, trans=fname_trans + ) assert coreg._lock_fids coreg._reset_fiducials() coreg.close() # make it always log the distances - monkeypatch.setattr(_3d.logger, 'info', _3d.logger.warning) + monkeypatch.setattr(_3d.logger, "info", _3d.logger.warning) with catch_logging() as log: - coreg = coregistration(inst=raw_path, subject='sample', - head_high_res=False, # for speed - subjects_dir=subjects_dir, verbose='debug') + coreg = coregistration( + inst=raw_path, + subject="sample", + head_high_res=False, # for speed + subjects_dir=subjects_dir, + verbose="debug", + ) log = log.getvalue() - assert 'Total 16/78 points inside the surface' in log + assert "Total 16/78 points inside the surface" in log coreg._set_fiducials_file(fid_fname) assert coreg._fiducials_file == str(fid_fname) @@ -153,18 +148,18 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, coreg._reset_fitting_parameters() coreg._set_scale_mode("uniform") coreg._fits_fiducials() - assert_allclose(coreg.coreg._scale, - np.array([97.46, 97.46, 97.46]) * 1e-2, - atol=1e-3) - shown_scale = [coreg._widgets[f's{x}'].get_value() for x in 'XYZ'] + assert_allclose( + coreg.coreg._scale, np.array([97.46, 97.46, 97.46]) * 1e-2, atol=1e-3 + ) + shown_scale = [coreg._widgets[f"s{x}"].get_value() for x in "XYZ"] assert_allclose(shown_scale, coreg.coreg._scale * 100, atol=1e-2) coreg._set_icp_fid_match("nearest") coreg._set_scale_mode("3-axis") coreg._fits_icp() - assert_allclose(coreg.coreg._scale, - np.array([104.43, 101.47, 125.78]) * 1e-2, - atol=1e-3) - shown_scale = [coreg._widgets[f's{x}'].get_value() for x in 'XYZ'] + assert_allclose( + coreg.coreg._scale, np.array([104.43, 101.47, 125.78]) * 1e-2, atol=1e-3 + ) + shown_scale = [coreg._widgets[f"s{x}"].get_value() for x in "XYZ"] assert_allclose(shown_scale, coreg.coreg._scale * 100, atol=1e-2) coreg._set_scale_mode("None") coreg._set_icp_fid_match("matched") @@ -177,7 +172,7 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, # picking assert not coreg._mri_fids_modified - vtk_picker = TstVTKPicker(coreg._surfaces['head'], 0, (0, 0)) + vtk_picker = TstVTKPicker(coreg._surfaces["head"], 0, (0, 0)) coreg._on_mouse_move(vtk_picker, None) coreg._on_button_press(vtk_picker, None) coreg._on_pick(vtk_picker, None) @@ -190,31 +185,31 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, assert coreg._lock_fids # fitting (no scaling) - assert coreg._nasion_weight == 10. - coreg._set_point_weight(11., 'nasion') - assert coreg._nasion_weight == 11. + assert coreg._nasion_weight == 10.0 + coreg._set_point_weight(11.0, "nasion") + assert coreg._nasion_weight == 11.0 coreg._fit_fiducials() with catch_logging() as log: coreg._redraw() # actually emit the log log = log.getvalue() - assert 'Total 6/78 points inside the surface' in log + assert "Total 6/78 points inside the surface" in log with catch_logging() as log: coreg._fit_icp() coreg._redraw() log = log.getvalue() - assert 'Total 38/78 points inside the surface' in log + assert "Total 38/78 points inside the surface" in log assert coreg.coreg._extra_points_filter is None coreg._omit_hsp() with catch_logging() as log: coreg._redraw() log = log.getvalue() - assert 'Total 29/53 points inside the surface' in log + assert "Total 29/53 points inside the surface" in log assert coreg.coreg._extra_points_filter is not None coreg._reset_omit_hsp_filter() with catch_logging() as log: coreg._redraw() log = log.getvalue() - assert 'Total 38/78 points inside the surface' in log + assert "Total 38/78 points inside the surface" in log assert coreg.coreg._extra_points_filter is None assert coreg._grow_hair == 0 @@ -222,48 +217,62 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, with catch_logging() as log: coreg._redraw() log = log.getvalue() - assert 'Total 6/78 points inside the surface' in log - norm = np.linalg.norm(coreg._head_geo['rr']) # what's used for inside + assert "Total 6/78 points inside the surface" in log + norm = np.linalg.norm(coreg._head_geo["rr"]) # what's used for inside assert_allclose(norm, 5.949288, atol=1e-3) coreg._set_grow_hair(20.0) with catch_logging() as log: coreg._redraw() assert coreg._grow_hair == 20.0 - norm = np.linalg.norm(coreg._head_geo['rr']) + norm = np.linalg.norm(coreg._head_geo["rr"]) assert_allclose(norm, 6.555220, atol=1e-3) # outward log = log.getvalue() - assert 'Total 8/78 points inside the surface' in log # more outside now + assert "Total 8/78 points inside the surface" in log # more outside now # visualization assert not coreg._helmet - assert coreg._actors['helmet'] is None + assert coreg._actors["helmet"] is None coreg._set_helmet(True) + assert coreg._eeg_channels + coreg._set_eeg_channels(False) + assert not coreg._eeg_channels assert coreg._helmet with catch_logging() as log: - coreg._redraw(verbose='debug') + coreg._redraw(verbose="debug") log = log.getvalue() - assert 'Drawing helmet' in log - coreg._set_point_weight(1., 'nasion') + assert "Drawing helmet" in log + coreg._set_point_weight(1.0, "nasion") coreg._fit_fiducials() with catch_logging() as log: - coreg._redraw(verbose='debug') + coreg._redraw(verbose="debug") log = log.getvalue() - assert 'Drawing helmet' in log + assert "Drawing helmet" in log + assert not coreg._meg_channels + assert coreg._actors["helmet"] is not None + # TODO: Someday test our file dialogs like: + # coreg._widgets["save_trans"].widget.click() + assert len(coreg._actors["sensors"]) == 0 + coreg._set_meg_channels(True) + assert coreg._meg_channels + with catch_logging() as log: + coreg._redraw(verbose="debug") + assert "Drawing meg sensors" in log.getvalue() + assert coreg._actors["helmet"] is not None + assert len(coreg._actors["sensors"]) == 306 assert coreg._orient_glyphs assert coreg._scale_by_distance assert coreg._mark_inside assert_allclose( - coreg._head_opacity, - float(config.get('MNE_COREG_HEAD_OPACITY', '0.8'))) + coreg._head_opacity, float(config.get("MNE_COREG_HEAD_OPACITY", "0.8")) + ) assert coreg._hpi_coils - assert coreg._eeg_channels assert coreg._head_shape_points - assert coreg._scale_mode == 'None' - assert coreg._icp_fid_match == 'matched' + assert coreg._scale_mode == "None" + assert coreg._icp_fid_match == "matched" assert coreg._head_resolution is False assert coreg._trans_modified - tmp_trans = tmp_path / 'tmp-trans.fif' + tmp_trans = tmp_path / "tmp-trans.fif" coreg._save_trans(tmp_trans) assert not coreg._trans_modified assert tmp_trans.is_file() @@ -274,14 +283,14 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, coreg._renderer._process_events() assert coreg._mri_fids_modified # should prompt assert coreg._renderer.plotter.app_window.children() is not None - assert 'close_dialog' not in coreg._widgets + assert "close_dialog" not in coreg._widgets assert not coreg._renderer.plotter._closed assert coreg._accept_close_event # make sure it's ignored (PySide6 causes problems here and doesn't wait) coreg._accept_close_event = False coreg.close() assert not coreg._renderer.plotter._closed - coreg._widgets['close_dialog'].trigger('Discard') # do not save + coreg._widgets["close_dialog"].trigger("Discard") # do not save coreg.close() assert coreg._renderer.plotter._closed coreg._clean() # finally, cleanup internal structures @@ -296,30 +305,31 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, def test_fullscreen(renderer_interactive_pyvistaqt): """Test fullscreen mode.""" from mne.gui import coregistration + # Fullscreen mode - coreg = coregistration( - subject='sample', subjects_dir=subjects_dir, fullscreen=True - ) + coreg = coregistration(subject="sample", subjects_dir=subjects_dir, fullscreen=True) coreg._accept_close_event = True coreg.close() @pytest.mark.slowtest -@requires_version('sphinx_gallery') @testing.requires_testing_data def test_coreg_gui_scraper(tmp_path, renderer_interactive_pyvistaqt): """Test the scrapper for the coregistration GUI.""" + pytest.importorskip("sphinx_gallery") from mne.gui import coregistration - coreg = coregistration(subject='sample', subjects_dir=subjects_dir, - trans=fname_trans) - (tmp_path / '_images').mkdir() - image_path = tmp_path / '_images' / 'temp.png' - gallery_conf = dict(builder_name='html', src_dir=tmp_path) + + coreg = coregistration( + subject="sample", subjects_dir=subjects_dir, trans=fname_trans + ) + (tmp_path / "_images").mkdir() + image_path = tmp_path / "_images" / "temp.png" + gallery_conf = dict(builder_name="html", src_dir=tmp_path) block_vars = dict( - example_globals=dict(gui=coreg), - image_path_iterator=iter([str(image_path)])) + example_globals=dict(gui=coreg), image_path_iterator=iter([str(image_path)]) + ) assert not image_path.is_file() - assert not getattr(coreg, '_scraped', False) + assert not getattr(coreg, "_scraped", False) mne.gui._GUIScraper()(None, block_vars, gallery_conf) assert image_path.is_file() assert coreg._scraped @@ -330,43 +340,45 @@ def test_coreg_gui_scraper(tmp_path, renderer_interactive_pyvistaqt): def test_coreg_gui_notebook(renderer_notebook, nbexec): """Test the coregistration UI in a notebook.""" import pytest + import mne from mne.datasets import testing from mne.gui import coregistration - mne.viz.set_3d_backend('notebook') # set the 3d backend + mne.viz.set_3d_backend("notebook") # set the 3d backend with pytest.MonkeyPatch().context() as mp: - mp.delenv('_MNE_FAKE_HOME_DIR') + mp.delenv("_MNE_FAKE_HOME_DIR") data_path = testing.data_path(download=False) subjects_dir = data_path / "subjects" - coregistration(subject='sample', subjects_dir=subjects_dir) + coregistration(subject="sample", subjects_dir=subjects_dir) @pytest.mark.slowtest -def test_no_sparse_head(subjects_dir_tmp, renderer_interactive_pyvistaqt, - monkeypatch): +def test_no_sparse_head(subjects_dir_tmp, renderer_interactive_pyvistaqt, monkeypatch): """Test mne.gui.coregistration with no sparse head.""" from mne.gui import coregistration subjects_dir_tmp = Path(subjects_dir_tmp) - subject = 'sample' + subject = "sample" out_rr, out_tris = mne.read_surface( subjects_dir_tmp / subject / "bem" / "outer_skin.surf" ) - for head in ('sample-head.fif', 'outer_skin.surf'): + for head in ("sample-head.fif", "outer_skin.surf"): os.remove(subjects_dir_tmp / subject / "bem" / head) # Avoid actually doing the decimation (it's slow) monkeypatch.setattr( - mne.coreg, 'decimate_surface', - lambda rr, tris, n_triangles: (out_rr, out_tris)) - with pytest.warns(RuntimeWarning, match='No low-resolution head found'): + mne.coreg, "decimate_surface", lambda rr, tris, n_triangles: (out_rr, out_tris) + ) + with pytest.warns(RuntimeWarning, match="No low-resolution head found"): coreg = coregistration( - inst=raw_path, subject=subject, subjects_dir=subjects_dir_tmp) + inst=raw_path, subject=subject, subjects_dir=subjects_dir_tmp + ) coreg.close() def test_splash_closed(tmp_path, renderer_interactive_pyvistaqt): """Test that the splash closes on error.""" from mne.gui import coregistration - with pytest.raises(RuntimeError, match='No standard head model'): - coregistration(subjects_dir=tmp_path, subject='fsaverage') + + with pytest.raises(RuntimeError, match="No standard head model"): + coregistration(subjects_dir=tmp_path, subject="fsaverage") diff --git a/mne/gui/tests/test_gui_api.py b/mne/gui/tests/test_gui_api.py index 8e693cf65ff..5be82d27af6 100644 --- a/mne/gui/tests/test_gui_api.py +++ b/mne/gui/tests/test_gui_api.py @@ -1,67 +1,64 @@ -# Authors: Guillaume Favelier -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import sys import pytest from mne.utils import _check_qt_version # These will skip all tests in this scope -pytestmark = pytest.mark.skipif( - sys.platform.startswith('win'), reason='nbexec does not work on Windows') -pytest.importorskip('nibabel') +pytest.importorskip("nibabel") -def test_gui_api(renderer_notebook, nbexec, *, n_warn=0, backend='qt'): +def test_gui_api(renderer_notebook, nbexec, *, backend="qt"): """Test GUI API.""" import contextlib - import mne import warnings - import sys + + import mne + try: # Function backend # noqa except Exception: # Notebook standalone mode - backend = 'notebook' - n_warn = 0 + backend = "notebook" # nbexec does not expose renderer_notebook so I use a # temporary variable to synchronize the tests - if backend == 'notebook': - mne.viz.set_3d_backend('notebook') + if backend == "notebook": + mne.viz.set_3d_backend("notebook") renderer = mne.viz.backends.renderer._get_renderer(size=(300, 300)) # theme with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - renderer._window_set_theme('/does/not/exist') - if backend == 'qt': + warnings.simplefilter("always") + renderer._window_set_theme("/does/not/exist") + if backend == "qt": assert len(w) == 1 - assert 'not found' in str(w[0].message), str(w[0].message) + assert "not found" in str(w[0].message), str(w[0].message) else: assert len(w) == 0 with mne.utils._record_warnings() as w: - renderer._window_set_theme('dark') - w = [ww for ww in w if 'is not yet supported' in str(ww.message)] - if sys.platform != 'darwin': # sometimes this is fine - assert len(w) == n_warn, [ww.message for ww in w] + renderer._window_set_theme("dark") + w = [ww for ww in w if "is not yet supported" in str(ww.message)] + assert len(w) == 0, [ww.message for ww in w] # window without 3d plotter - if backend == 'qt': + if backend == "qt": window = renderer._window_create() widget = renderer._window_create() - central_layout = renderer._layout_create(orientation='grid') + central_layout = renderer._layout_create(orientation="grid") renderer._layout_add_widget(central_layout, widget, row=0, col=0) - renderer._window_initialize(window=window, - central_layout=central_layout) + renderer._window_initialize(window=window, central_layout=central_layout) from unittest.mock import Mock + mock = Mock() @contextlib.contextmanager - def _check_widget_trigger(widget, mock, before, after, call_count=True, - get_value=True): + def _check_widget_trigger( + widget, mock, before, after, call_count=True, get_value=True + ): if get_value: assert widget.get_value() == before old_call_count = mock.call_count @@ -74,16 +71,16 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, assert mock.call_count == old_call_count + 1 # --- BEGIN: dock --- - renderer._dock_initialize(name='', area='left') + renderer._dock_initialize(name="", area="left") # label (not interactive) widget = renderer._dock_add_label( - value='', + value="", align=False, selectable=True, ) widget = renderer._dock_add_label( - value='', + value="", align=True, ) widget.update() @@ -95,17 +92,17 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, # ToolButton widget = renderer._dock_add_button( - name='', + name="", callback=mock, - style='toolbutton', - tooltip='button', + style="toolbutton", + tooltip="button", ) with _check_widget_trigger(widget, mock, None, None, get_value=False): widget.set_value(True) # PushButton widget = renderer._dock_add_button( - name='', + name="", callback=mock, ) with _check_widget_trigger(widget, mock, None, None, get_value=False): @@ -113,36 +110,36 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, # slider widget = renderer._dock_add_slider( - name='', + name="", value=0, rng=[0, 10], callback=mock, - tooltip='slider', + tooltip="slider", ) with _check_widget_trigger(widget, mock, 0, 5): widget.set_value(5) # check box widget = renderer._dock_add_check_box( - name='', + name="", value=False, callback=mock, - tooltip='check box', + tooltip="check box", ) with _check_widget_trigger(widget, mock, False, True): widget.set_value(True) # spin box renderer._dock_add_spin_box( - name='', + name="", value=0, rng=[0, 1], callback=mock, step=0.1, - tooltip='spin box', + tooltip="spin box", ) widget = renderer._dock_add_spin_box( - name='', + name="", value=0, rng=[0, 1], callback=mock, @@ -153,71 +150,66 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, # combo box widget = renderer._dock_add_combo_box( - name='', - value='foo', - rng=['foo', 'bar'], + name="", + value="foo", + rng=["foo", "bar"], callback=mock, - tooltip='combo box', + tooltip="combo box", ) - with _check_widget_trigger(widget, mock, 'foo', 'bar'): - widget.set_value('bar') + with _check_widget_trigger(widget, mock, "foo", "bar"): + widget.set_value("bar") # radio buttons widget = renderer._dock_add_radio_buttons( - value='foo', - rng=['foo', 'bar'], + value="foo", + rng=["foo", "bar"], callback=mock, ) with _check_widget_trigger(widget, mock, None, None, get_value=False): - widget.set_value(1, 'bar') - assert widget.get_value(0) == 'foo' - assert widget.get_value(1) == 'bar' + widget.set_value(1, "bar") + assert widget.get_value(0) == "foo" + assert widget.get_value(1) == "bar" widget.set_enabled(False) # text field widget = renderer._dock_add_text( - name='', - value='foo', - placeholder='', + name="", + value="foo", + placeholder="", callback=mock, ) - with _check_widget_trigger(widget, mock, 'foo', 'bar'): - widget.set_value('bar') + with _check_widget_trigger(widget, mock, "foo", "bar"): + widget.set_value("bar") widget.set_style(dict(border="2px solid #ff0000")) # file button renderer._dock_add_file_button( - name='', - desc='', + name="", + desc="", func=mock, is_directory=True, - tooltip='file button', + tooltip="file button", ) renderer._dock_add_file_button( - name='', - desc='', + name="", + desc="", func=mock, - initial_directory='', + initial_directory="", ) renderer._dock_add_file_button( - name='', - desc='', + name="", + desc="", func=mock, ) - widget = renderer._dock_add_file_button( - name='', - desc='', - func=mock, - save=True - ) + widget = renderer._dock_add_file_button(name="", desc="", func=mock, save=True) # XXX: the internal file dialogs may hang without signals widget.set_enabled(False) - renderer._dock_initialize(name='', area='right') - renderer._dock_named_layout(name='') + renderer._dock_initialize(name="", area="right") + renderer._dock_named_layout(name="") for collapse in (None, True, False): - renderer._dock_add_group_box(name='', collapse=collapse) + renderer._dock_add_group_box(name="", collapse=collapse) renderer._dock_add_stretch() renderer._dock_add_layout() renderer._dock_finalize() @@ -232,75 +224,75 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, ) # button - assert 'reset' not in renderer.actions + assert "reset" not in renderer.actions renderer._tool_bar_add_button( - name='reset', - desc='', + name="reset", + desc="", func=mock, - icon_name='help', + icon_name="help", ) - assert 'reset' in renderer.actions + assert "reset" in renderer.actions # icon renderer._tool_bar_update_button_icon( - name='reset', - icon_name='reset', + name="reset", + icon_name="reset", ) # text renderer._tool_bar_add_text( - name='', - value='', - placeholder='', + name="", + value="", + placeholder="", ) # spacer renderer._tool_bar_add_spacer() # file button - assert 'help' not in renderer.actions + assert "help" not in renderer.actions renderer._tool_bar_add_file_button( - name='help', - desc='', + name="help", + desc="", func=mock, shortcut=None, ) - renderer.actions['help'].trigger() - if renderer._kind == 'qt': + renderer.actions["help"].trigger() + if renderer._kind == "qt": dialog = renderer._window.children()[-1] - assert 'FileDialog' in repr(dialog) + assert "FileDialog" in repr(dialog) dialog.close() dialog.deleteLater() # play button - assert 'play' not in renderer.actions + assert "play" not in renderer.actions renderer._tool_bar_add_play_button( - name='play', - desc='', + name="play", + desc="", func=mock, shortcut=None, ) - assert 'play' in renderer.actions + assert "play" in renderer.actions # --- END: tool bar --- # --- BEGIN: menu bar --- renderer._menu_initialize() # submenu - renderer._menu_add_submenu(name='foo', desc='foo') - assert 'foo' in renderer._menus - assert 'foo' in renderer._menu_actions + renderer._menu_add_submenu(name="foo", desc="foo") + assert "foo" in renderer._menus + assert "foo" in renderer._menu_actions # button renderer._menu_add_button( - menu_name='foo', - name='bar', - desc='bar', + menu_name="foo", + name="bar", + desc="bar", func=mock, ) - assert 'bar' in renderer._menu_actions['foo'] - with _check_widget_trigger(None, mock, '', '', get_value=False): - renderer._menu_actions['foo']['bar'].trigger() + assert "bar" in renderer._menu_actions["foo"] + with _check_widget_trigger(None, mock, "", "", get_value=False): + renderer._menu_actions["foo"]["bar"].trigger() # --- END: menu bar --- @@ -309,8 +301,8 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, renderer._status_bar_update() # label - widget = renderer._status_bar_add_label(value='foo', stretch=0) - assert widget.get_value() == 'foo' + widget = renderer._status_bar_add_label(value="foo", stretch=0) + assert widget.get_value() == "foo" # progress bar widget = renderer._status_bar_add_progress_bar(stretch=0) @@ -320,74 +312,70 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, # --- END: status bar --- # --- BEGIN: tooltips --- - widget = renderer._dock_add_button( - name='', - callback=mock, - tooltip='foo' - ) - assert widget.get_tooltip() == 'foo' + widget = renderer._dock_add_button(name="", callback=mock, tooltip="foo") + assert widget.get_tooltip() == "foo" # Change it … - widget.set_tooltip('bar') - assert widget.get_tooltip() == 'bar' + widget.set_tooltip("bar") + assert widget.get_tooltip() == "bar" # --- END: tooltips --- # --- BEGIN: dialog --- # dialogs are not supported yet on notebook - if renderer._kind == 'qt': + if renderer._kind == "qt": # warning buttons = ["Save", "Cancel"] widget = renderer._dialog_create( - title='', - text='', - info_text='', + title="", + text="", + info_text="", callback=mock, buttons=buttons, modal=False, ) widget.show() for button in buttons: - with _check_widget_trigger(None, mock, '', '', get_value=False): + with _check_widget_trigger(None, mock, "", "", get_value=False): widget.trigger(button=button) assert mock.call_args.args == (button,) assert not widget._widget.isVisible() # buttons list empty means OK button (default) - button = 'Ok' + button = "Ok" widget = renderer._dialog_create( - title='', - text='', - info_text='', + title="", + text="", + info_text="", callback=mock, - icon='NoIcon', + icon="NoIcon", modal=False, ) widget.show() - with _check_widget_trigger(None, mock, '', '', get_value=False): + with _check_widget_trigger(None, mock, "", "", get_value=False): widget.trigger(button=button) assert mock.call_args.args == (button,) - widget.trigger(button='Ok') + widget.trigger(button="Ok") # --- END: dialog --- # --- BEGIN: keypress --- renderer._keypress_initialize() - renderer._keypress_add('a', mock) + renderer._keypress_add("a", mock) # keypress is not supported yet on notebook - if renderer._kind == 'qt': - with _check_widget_trigger(None, mock, '', '', get_value=False): - renderer._keypress_trigger('a') + if renderer._kind == "qt": + with _check_widget_trigger(None, mock, "", "", get_value=False): + renderer._keypress_trigger("a") # --- END: keypress --- renderer.show() - renderer._window_close_connect(lambda: mock('first'), after=False) - renderer._window_close_connect(lambda: mock('last')) + renderer._window_close_connect(lambda: mock("first"), after=False) + renderer._window_close_connect(lambda: mock("last")) old_call_count = mock.call_count renderer.close() - if renderer._kind == 'qt': + if renderer._kind == "qt": assert mock.call_count == old_call_count + 2 - assert mock.call_args_list[-1].args == ('last',) - assert mock.call_args_list[-2].args == ('first',) + assert mock.call_args_list[-1].args == ("last",) + assert mock.call_args_list[-2].args == ("first",) assert renderer._window.isVisible() is False del renderer @@ -395,10 +383,9 @@ def _check_widget_trigger(widget, mock, before, after, call_count=True, def test_gui_api_qt(renderer_interactive_pyvistaqt): """Test GUI API with the Qt backend.""" _, api = _check_qt_version(return_api=True) - n_warn = int(api in ('PySide6', 'PyQt6')) # TODO: After merging https://github.com/mne-tools/mne-python/pull/11567 # The Qt CI run started failing about 50% of the time, so let's skip this # for now. - if api == 'PySide6': - pytest.skip('PySide6 causes segfaults on CIs sometimes') - test_gui_api(None, None, n_warn=n_warn, backend='qt') + if api == "PySide6": + pytest.skip("PySide6 causes segfaults on CIs sometimes") + test_gui_api(None, None, backend="qt") diff --git a/mne/gui/tests/test_ieeg_locate.py b/mne/gui/tests/test_ieeg_locate.py deleted file mode 100644 index 2463d171d54..00000000000 --- a/mne/gui/tests/test_ieeg_locate.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Alex Rockhill -# -# License: BSD-3-clause - -import numpy as np -from numpy.testing import assert_allclose - -import pytest - -import mne -from mne.datasets import testing -from mne.transforms import apply_trans -from mne.utils import requires_version, use_log_level -from mne.viz.utils import _fake_click - -data_path = testing.data_path(download=False) -subject = "sample" -subjects_dir = data_path / "subjects" -sample_dir = data_path / "MEG" / subject -raw_path = sample_dir / "sample_audvis_trunc_raw.fif" -fname_trans = sample_dir / "sample_audvis_trunc-trans.fif" - - -@pytest.fixture -def _fake_CT_coords(skull_size=5, contact_size=2): - """Make somewhat realistic CT data with contacts.""" - nib = pytest.importorskip('nibabel') - brain = nib.load(subjects_dir / subject / "mri" / "brain.mgz") - verts = mne.read_surface( - subjects_dir / subject / "bem" / "outer_skull.surf" - )[0] - verts = apply_trans(np.linalg.inv(brain.header.get_vox2ras_tkr()), verts) - x, y, z = np.array(brain.shape).astype(int) // 2 - coords = [(x, y - 14, z), (x - 10, y - 15, z), - (x - 20, y - 16, z + 1), (x - 30, y - 16, z + 1)] - center = np.array(brain.shape) / 2 - # make image - np.random.seed(99) - ct_data = np.random.random(brain.shape).astype(np.float32) * 100 - # make skull - for vert in verts: - x, y, z = np.round(vert).astype(int) - ct_data[slice(x - skull_size, x + skull_size + 1), - slice(y - skull_size, y + skull_size + 1), - slice(z - skull_size, z + skull_size + 1)] = 1000 - # add electrode with contacts - for (x, y, z) in coords: - # make sure not in skull - assert np.linalg.norm(center - np.array((x, y, z))) < 50 - ct_data[slice(x - contact_size, x + contact_size + 1), - slice(y - contact_size, y + contact_size + 1), - slice(z - contact_size, z + contact_size + 1)] = \ - 1000 - np.linalg.norm(np.array(np.meshgrid( - *[range(-contact_size, contact_size + 1)] * 3)), axis=0) - ct = nib.MGHImage(ct_data, brain.affine) - coords = apply_trans(ct.header.get_vox2ras_tkr(), np.array(coords)) - return ct, coords - - -def test_ieeg_elec_locate_io(renderer_interactive_pyvistaqt): - """Test the input/output of the intracranial location GUI.""" - nib = pytest.importorskip('nibabel') - import mne.gui - info = mne.create_info([], 1000) - - # fake as T1 so that aligned - aligned_ct = nib.load(subjects_dir / subject / "mri" / "brain.mgz") - - trans = mne.transforms.Transform('head', 'mri') - with pytest.raises(ValueError, - match='No channels found in `info` to locate'): - mne.gui.locate_ieeg(info, trans, aligned_ct, subject, subjects_dir) - - info = mne.create_info(['test'], 1000, 'seeg') - montage = mne.channels.make_dig_montage( - {'test': [0, 0, 0]}, coord_frame='mri') - with pytest.warns(RuntimeWarning, match='nasion not found'): - info.set_montage(montage) - with pytest.raises(RuntimeError, - match='must be in the "head" coordinate frame'): - with pytest.warns(RuntimeWarning, match='`pial` surface not found'): - mne.gui.locate_ieeg(info, trans, aligned_ct, subject, subjects_dir) - - -@requires_version('sphinx_gallery') -@testing.requires_testing_data -def test_locate_scraper(renderer_interactive_pyvistaqt, _fake_CT_coords, - tmp_path): - """Test sphinx-gallery scraping of the GUI.""" - import mne.gui - raw = mne.io.read_raw_fif(raw_path) - raw.pick_types(eeg=True) - ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2', - 'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'} - raw.pick_channels(list(ch_dict.keys())) - raw.rename_channels(ch_dict) - raw.set_montage(None) - aligned_ct, _ = _fake_CT_coords - trans = mne.read_trans(fname_trans) - with pytest.warns(RuntimeWarning, match='`pial` surface not found'): - gui = mne.gui.locate_ieeg( - raw.info, trans, aligned_ct, - subject=subject, subjects_dir=subjects_dir) - (tmp_path / '_images').mkdir() - image_path = tmp_path / '_images' / 'temp.png' - gallery_conf = dict(builder_name='html', src_dir=tmp_path) - block_vars = dict( - example_globals=dict(gui=gui), - image_path_iterator=iter([str(image_path)])) - assert not image_path.is_file() - assert not getattr(gui, '_scraped', False) - mne.gui._GUIScraper()(None, block_vars, gallery_conf) - assert image_path.is_file() - assert gui._scraped - # no need to call .close - - -@testing.requires_testing_data -def test_ieeg_elec_locate_display(renderer_interactive_pyvistaqt, - _fake_CT_coords): - """Test that the intracranial location GUI displays properly.""" - raw = mne.io.read_raw_fif(raw_path, preload=True) - raw.pick_types(eeg=True) - ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2', - 'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'} - raw.pick_channels(list(ch_dict.keys())) - raw.rename_channels(ch_dict) - raw.set_eeg_reference('average') - raw.set_channel_types({name: 'seeg' for name in raw.ch_names}) - raw.set_montage(None) - aligned_ct, coords = _fake_CT_coords - trans = mne.read_trans(fname_trans) - - with pytest.warns(RuntimeWarning, match='`pial` surface not found'): - gui = mne.gui.locate_ieeg( - raw.info, trans, aligned_ct, - subject=subject, subjects_dir=subjects_dir, - verbose=True) - - with pytest.raises(ValueError, match='read-only'): - gui._ras[:] = coords[0] # start in the right position - gui.set_RAS(coords[0]) - gui.mark_channel() - - with pytest.raises(ValueError, match='not found'): - gui.mark_channel('foo') - - assert not gui._lines and not gui._lines_2D # no lines for one contact - for ci, coord in enumerate(coords[1:], 1): - coord_vox = apply_trans(gui._ras_vox_t, coord) - with use_log_level('debug'): - _fake_click(gui._figs[2], gui._figs[2].axes[0], - coord_vox[:-1], xform='data', kind='release') - assert_allclose(coord[:2], gui._ras[:2], atol=0.1, - err_msg=f'coords[{ci}][:2]') - assert_allclose(coord[2], gui._ras[2], atol=2, - err_msg=f'coords[{ci}][2]') - gui.mark_channel() - - # ensure a 3D line was made for each group - assert len(gui._lines) == 2 - - # test snap to center - gui._ch_index = 0 - gui.set_RAS(coords[0]) # move to first position - gui.mark_channel() - assert_allclose(coords[0], gui._chs['LAMY 1'], atol=0.2) - gui._snap_button.click() - assert gui._snap_button.text() == 'Off' - # now make sure no snap happens - gui._ch_index = 0 - gui.set_RAS(coords[1] + 1) - gui.mark_channel() - assert_allclose(coords[1] + 1, gui._chs['LAMY 1'], atol=0.01) - # check that it turns back on - gui._snap_button.click() - assert gui._snap_button.text() == 'On' - - # test remove - gui.remove_channel('LAMY 2') - assert np.isnan(gui._chs['LAMY 2']).all() - - with pytest.raises(ValueError, match='not found'): - gui.remove_channel('foo') - - # check that raw object saved - assert not np.isnan(raw.info['chs'][0]['loc'][:3]).any() # LAMY 1 - assert np.isnan(raw.info['chs'][1]['loc'][:3]).all() # LAMY 2 (removed) - - # move sliders - gui._alpha_slider.setValue(75) - assert gui._ch_alpha == 0.75 - gui._radius_slider.setValue(5) - assert gui._radius == 5 - ct_sum_before = np.nansum(gui._images['ct'][0].get_array().data) - gui._ct_min_slider.setValue(500) - assert np.nansum(gui._images['ct'][0].get_array().data) < ct_sum_before - - # test buttons - gui._toggle_show_brain() - assert 'mri' in gui._images - assert 'local_max' not in gui._images - gui._toggle_show_max() - assert 'local_max' in gui._images - assert 'mip' not in gui._images - gui._toggle_show_mip() - assert 'mip' in gui._images - assert 'mip_chs' in gui._images - assert len(gui._lines_2D) == 1 # LAMY only has one contact - - # check montage - montage = raw.get_montage() - assert montage is not None - assert_allclose(montage.get_positions()['ch_pos']['LAMY 1'], - [0.00726235, 0.01713514, 0.04167233], atol=0.01) - gui.close() diff --git a/mne/html/d3.v3.min.js b/mne/html/d3.v3.min.js deleted file mode 100644 index eed58e6a572..00000000000 --- a/mne/html/d3.v3.min.js +++ /dev/null @@ -1,5 +0,0 @@ -!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++ue;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o=0,a=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(n))switch(u=r[2].split(","),r[1]){case"hsl":return e(parseFloat(u[0]),parseFloat(u[1])/100,parseFloat(u[2])/100);case"rgb":return t(Mt(u[0]),Mt(u[1]),Mt(u[2]))}return(i=Va.get(n))?t(i.r,i.g,i.b):(null!=n&&"#"===n.charAt(0)&&(4===n.length?(o=n.charAt(1),o+=o,a=n.charAt(2),a+=a,c=n.charAt(3),c+=c):7===n.length&&(o=n.substring(1,3),a=n.substring(3,5),c=n.substring(5,7)),o=parseInt(o,16),a=parseInt(a,16),c=parseInt(c,16)),t(o,a,c))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t8?function(n){return n/e}:function(n){return n*e},symbol:n}}function zt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function Tt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=Tt;var r=new Tt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=Tt;var r=new Tt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++aa;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=Tt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=Tt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++ea;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=oa(_)>Sa,w=p*x;if(hc.add(Math.atan2(w*Math.sin(_),v*M+w*Math.cos(_))),i+=b?_+(_>=0?ka:-ka):_,b^h>=e^m>=e){var S=fe(se(f),se(n));pe(S);var k=fe(u,S);pe(k);var E=(b^_>=0?-1:1)*X(k[2]);(r>E||r===E&&(S[0]||S[1]))&&(o+=b^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function ze(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function Te(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)0?0:3:oa(r[0]-e)0?2:1:oa(r[1]-t)0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),zc>t&&(zc=t),t>Tc&&(Tc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)i||oa((y*L+x*z)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b -},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function zr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];zr(n);for(var c=i;c.circle&&oa(e-c.circle.x)l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function Tr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)Aa?{x:f,y:oa(t-f)Aa?{x:oa(e-p)Aa?{x:h,y:oa(t-h)Aa?{x:oa(e-g)=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.yd||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.yr||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.yg){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.xr;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=zu(t,e),i=qu(Tu(e,t,-u))||0;t[0]*e[1]180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++ie;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.ro;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++iu&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function Ti(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():Ti(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=Ti(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=Ti(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++0;h--)o.push(i(s)*h);for(s=0;o[s]c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++ae?[0/0,0/0]:[e>0?u[e-1]:n[0],et?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++ue?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=Ti(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.2"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ur&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ur&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ue&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ue&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i=e);)e=u=void 0;for(;++ir&&(e=r),r>u&&(u=r))}else{for(;++i=e);)e=void 0;for(;++ir&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)r;){var i=r+u>>>1;er?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=function(n,t){return Sizzle.uniqueSort(Sizzle(n,t))},va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++ur){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++rr;++r)p[r]=z(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++oi;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=T.apply(this,arguments);for(var t=-1,e=this.length;++tn;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++ar){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,za=Math.SQRT2,qa=2,Ta=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(za*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(za*t+v)]}return[r+n*s,u+n*l,i*Math.exp(za*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+Ta*f)/(2*i*qa*h),p=(c*c-i*i-Ta*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/za;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=z.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=z.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=z.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=z.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",z=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=z.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,z,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;Tt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:zt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++rn?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nhc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h -}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,ze,Te,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,zc,qc,Tc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=Tc=-(Lc=zc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,zc],[qc,Tc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.xm&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++ea*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++at;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++fg;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i0)for(i=-1;++i=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++ut?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++oe&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++ie.dx)&&(l=e.dx);++ie&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[]) -},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(zo(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=To,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++oi;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=zs,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":zs,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var zs="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return Ts[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),z.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),z=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?z.on("touchmove.brush",v).on("touchend.brush",y):z.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),T=+/^n/.test(k);M=[l[1-q]-L[0],f[1-T]-L[1]],L[0]=l[q],L[1]=f[T]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var Ts={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(+n,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}(); \ No newline at end of file diff --git a/mne/html/mpld3.v0.2.min.js b/mne/html/mpld3.v0.2.min.js deleted file mode 100644 index adefb15efa7..00000000000 --- a/mne/html/mpld3.v0.2.min.js +++ /dev/null @@ -1,2 +0,0 @@ -!function(t){function s(t){var s={};for(var o in t)s[o]=t[o];return s}function o(t,s){t="undefined"!=typeof t?t:10,s="undefined"!=typeof s?s:"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";for(var o=s.charAt(Math.round(Math.random()*(s.length-11))),e=1;t>e;e++)o+=s.charAt(Math.round(Math.random()*(s.length-1)));return o}function e(s,o){var e=t.interpolate([s[0].valueOf(),s[1].valueOf()],[o[0].valueOf(),o[1].valueOf()]);return function(t){var s=e(t);return[new Date(s[0]),new Date(s[1])]}}function i(t){return"undefined"==typeof t}function r(t){return null==t||i(t)}function n(t,s){return t.length>0?t[s%t.length]:null}function a(){function s(s,n){var a=t.functor(o),p=t.functor(e),h=[],l=[],c=0,d=-1,u=0,f=!1;if(!n){n=["M"];for(var y=1;yc;)i.call(this,s[c],c)?(h.push(a.call(this,s[c],c),p.call(this,s[c],c)),c++):(h=null,c=u);h?f&&h.length>0?(l.push("M",h[0],h[1]),f=!1):(l.push(n[d]),l=l.concat(h)):f=!0}return c!=s.length&&console.warn("Warning: not all vertices used in Path"),l.join(" ")}var o=function(t){return t[0]},e=function(t){return t[1]},i=function(){return!0},r={M:1,m:1,L:1,l:1,Q:2,q:2,T:1,t:1,S:2,s:2,C:3,c:3,Z:0,z:0};return s.x=function(t){return arguments.length?(o=t,s):o},s.y=function(t){return arguments.length?(e=t,s):e},s.defined=function(t){return arguments.length?(i=t,s):i},s.call=s,s}function p(){function t(t){return s.forEach(function(s){t=s(t)}),t}var s=Array.prototype.slice.call(arguments,0),o=s.length;return t.domain=function(o){return arguments.length?(s[0].domain(o),t):s[0].domain()},t.range=function(e){return arguments.length?(s[o-1].range(e),t):s[o-1].range()},t.step=function(t){return s[t]},t}function h(t,s){if(O.call(this,t,s),this.cssclass="mpld3-"+this.props.xy+"grid","x"==this.props.xy)this.transform="translate(0,"+this.ax.height+")",this.position="bottom",this.scale=this.ax.xdom,this.tickSize=-this.ax.height;else{if("y"!=this.props.xy)throw"unrecognized grid xy specifier: should be 'x' or 'y'";this.transform="translate(0,0)",this.position="left",this.scale=this.ax.ydom,this.tickSize=-this.ax.width}}function l(t,s){O.call(this,t,s);var o={bottom:[0,this.ax.height],top:[0,0],left:[0,0],right:[this.ax.width,0]},e={bottom:"x",top:"x",left:"y",right:"y"};this.transform="translate("+o[this.props.position]+")",this.props.xy=e[this.props.position],this.cssclass="mpld3-"+this.props.xy+"axis",this.scale=this.ax[this.props.xy+"dom"]}function c(t,s){if("undefined"==typeof s){if(this.ax=null,this.fig=null,"display"!==this.trans)throw"ax must be defined if transform != 'display'"}else this.ax=s,this.fig=s.fig;if(this.zoomable="data"===t,this.x=this["x_"+t],this.y=this["y_"+t],"undefined"==typeof this.x||"undefined"==typeof this.y)throw"unrecognized coordinate code: "+t}function d(t,s){O.call(this,t,s),this.data=t.fig.get_data(this.props.data),this.pathcodes=this.props.pathcodes,this.pathcoords=new c(this.props.coordinates,this.ax),this.offsetcoords=new c(this.props.offsetcoordinates,this.ax),this.datafunc=a()}function u(t,s){O.call(this,t,s),(null==this.props.facecolors||0==this.props.facecolors.length)&&(this.props.facecolors=["none"]),(null==this.props.edgecolors||0==this.props.edgecolors.length)&&(this.props.edgecolors=["none"]);var o=this.ax.fig.get_data(this.props.offsets);(null===o||0===o.length)&&(o=[null]);var e=Math.max(this.props.paths.length,o.length);if(o.length===e)this.offsets=o;else{this.offsets=[];for(var i=0;e>i;i++)this.offsets.push(n(o,i))}this.pathcoords=new c(this.props.pathcoordinates,this.ax),this.offsetcoords=new c(this.props.offsetcoordinates,this.ax)}function f(s,o){O.call(this,s,o);var e=this.props;e.facecolor="none",e.edgecolor=e.color,delete e.color,e.edgewidth=e.linewidth,delete e.linewidth,this.defaultProps=d.prototype.defaultProps,d.call(this,s,e),this.datafunc=t.svg.line().interpolate("linear")}function y(s,o){O.call(this,s,o),this.marker=null!==this.props.markerpath?0==this.props.markerpath[0].length?null:F.path().call(this.props.markerpath[0],this.props.markerpath[1]):null===this.props.markername?null:t.svg.symbol(this.props.markername).size(Math.pow(this.props.markersize,2))();var e={paths:[this.props.markerpath],offsets:s.fig.get_data(this.props.data),xindex:this.props.xindex,yindex:this.props.yindex,offsetcoordinates:this.props.coordinates,edgecolors:[this.props.edgecolor],edgewidths:[this.props.edgewidth],facecolors:[this.props.facecolor],alphas:[this.props.alpha],zorder:this.props.zorder,id:this.props.id};this.requiredProps=u.prototype.requiredProps,this.defaultProps=u.prototype.defaultProps,u.call(this,s,e)}function g(t,s){O.call(this,t,s),this.coords=new c(this.props.coordinates,this.ax)}function m(t,s){O.call(this,t,s),this.text=this.props.text,this.position=this.props.position,this.coords=new c(this.props.coordinates,this.ax)}function x(s,o){function e(t){return new Date(t[0],t[1],t[2],t[3],t[4],t[5])}function i(t,s){return"date"!==t?s:[e(s[0]),e(s[1])]}function r(s,o,e){var i="date"===s?t.time.scale():"log"===s?t.scale.log():t.scale.linear();return i.domain(o).range(e)}O.call(this,s,o),this.axnum=this.fig.axes.length,this.axid=this.fig.figid+"_ax"+(this.axnum+1),this.clipid=this.axid+"_clip",this.props.xdomain=this.props.xdomain||this.props.xlim,this.props.ydomain=this.props.ydomain||this.props.ylim,this.sharex=[],this.sharey=[],this.elements=[];var n=this.props.bbox;this.position=[n[0]*this.fig.width,(1-n[1]-n[3])*this.fig.height],this.width=n[2]*this.fig.width,this.height=n[3]*this.fig.height,this.props.xdomain=i(this.props.xscale,this.props.xdomain),this.props.ydomain=i(this.props.yscale,this.props.ydomain),this.x=this.xdom=r(this.props.xscale,this.props.xdomain,[0,this.width]),this.y=this.ydom=r(this.props.yscale,this.props.ydomain,[this.height,0]),"date"===this.props.xscale&&(this.x=F.multiscale(t.scale.linear().domain(this.props.xlim).range(this.props.xdomain.map(Number)),this.xdom)),"date"===this.props.yscale&&(this.x=F.multiscale(t.scale.linear().domain(this.props.ylim).range(this.props.ydomain.map(Number)),this.ydom));for(var a=this.props.axes,p=0;p0&&this.buttons.forEach(function(t){t.actions.filter(s).length>0&&t.deactivate()})},F.Button=v,v.prototype=Object.create(O.prototype),v.prototype.constructor=v,v.prototype.setState=function(t){t?this.activate():this.deactivate()},v.prototype.click=function(){this.active?this.deactivate():this.activate()},v.prototype.activate=function(){this.toolbar.deactivate_by_action(this.actions),this.onActivate(),this.active=!0,this.toolbar.toolbar.select("."+this.cssclass).classed({pressed:!0}),this.sticky||this.deactivate()},v.prototype.deactivate=function(){this.onDeactivate(),this.active=!1,this.toolbar.toolbar.select("."+this.cssclass).classed({pressed:!1})},v.prototype.sticky=!1,v.prototype.actions=[],v.prototype.icon=function(){return""},v.prototype.onActivate=function(){},v.prototype.onDeactivate=function(){},v.prototype.onDraw=function(){},F.ButtonFactory=function(t){function s(t){v.call(this,t,this.buttonID)}if("string"!=typeof t.buttonID)throw"ButtonFactory: buttonID must be present and be a string";s.prototype=Object.create(v.prototype),s.prototype.constructor=s;for(var o in t)s.prototype[o]=t[o];return s},F.Plugin=A,A.prototype=Object.create(O.prototype),A.prototype.constructor=A,A.prototype.requiredProps=[],A.prototype.defaultProps={},A.prototype.draw=function(){},F.ResetPlugin=z,F.register_plugin("reset",z),z.prototype=Object.create(A.prototype),z.prototype.constructor=z,z.prototype.requiredProps=[],z.prototype.defaultProps={},F.ZoomPlugin=w,F.register_plugin("zoom",w),w.prototype=Object.create(A.prototype),w.prototype.constructor=w,w.prototype.requiredProps=[],w.prototype.defaultProps={button:!0,enabled:null},w.prototype.activate=function(){this.fig.enable_zoom()},w.prototype.deactivate=function(){this.fig.disable_zoom()},w.prototype.draw=function(){this.props.enabled?this.fig.enable_zoom():this.fig.disable_zoom()},F.BoxZoomPlugin=_,F.register_plugin("boxzoom",_),_.prototype=Object.create(A.prototype),_.prototype.constructor=_,_.prototype.requiredProps=[],_.prototype.defaultProps={button:!0,enabled:null},_.prototype.activate=function(){this.enable&&this.enable()},_.prototype.deactivate=function(){this.disable&&this.disable()},_.prototype.draw=function(){function t(t){if(this.enabled){var o=s.extent();s.empty()||t.set_axlim([o[0][0],o[1][0]],[o[0][1],o[1][1]])}t.axes.call(s.clear())}F.insert_css("#"+this.fig.figid+" rect.extent."+this.extentClass,{fill:"#fff","fill-opacity":0,stroke:"#999"});var s=this.fig.getBrush();this.enable=function(){this.fig.showBrush(this.extentClass),s.on("brushend",t.bind(this)),this.enabled=!0},this.disable=function(){this.fig.hideBrush(this.extentClass),this.enabled=!1},this.toggle=function(){this.enabled?this.disable():this.enable()},this.disable()},F.TooltipPlugin=k,F.register_plugin("tooltip",k),k.prototype=Object.create(A.prototype),k.prototype.constructor=k,k.prototype.requiredProps=["id"],k.prototype.defaultProps={labels:null,hoffset:0,voffset:10,location:"mouse"},k.prototype.draw=function(){function s(t,s){this.tooltip.style("visibility","visible").text(null===r?"("+t+")":n(r,s))}function o(){if("mouse"===a){var s=t.mouse(this.fig.canvas.node());this.x=s[0]+this.props.hoffset,this.y=s[1]-this.props.voffset}this.tooltip.attr("x",this.x).attr("y",this.y)}function e(){this.tooltip.style("visibility","hidden")}var i=F.get_element(this.props.id,this.fig),r=this.props.labels,a=this.props.location;this.tooltip=this.fig.canvas.append("text").attr("class","mpld3-tooltip-text").attr("x",0).attr("y",0).text("").style("visibility","hidden"),"bottom left"==a||"top left"==a?(this.x=i.ax.position[0]+5+this.props.hoffset,this.tooltip.style("text-anchor","beginning")):"bottom right"==a||"top right"==a?(this.x=i.ax.position[0]+i.ax.width-5+this.props.hoffset,this.tooltip.style("text-anchor","end")):this.tooltip.style("text-anchor","middle"),"bottom left"==a||"bottom right"==a?this.y=i.ax.position[1]+i.ax.height-5+this.props.voffset:("top left"==a||"top right"==a)&&(this.y=i.ax.position[1]+5+this.props.voffset),i.elements().on("mouseover",s.bind(this)).on("mousemove",o.bind(this)).on("mouseout",e.bind(this))},F.LinkedBrushPlugin=P,F.register_plugin("linkedbrush",P),P.prototype=Object.create(F.Plugin.prototype),P.prototype.constructor=P,P.prototype.requiredProps=["id"],P.prototype.defaultProps={button:!0,enabled:null},P.prototype.activate=function(){this.enable&&this.enable()},P.prototype.deactivate=function(){this.disable&&this.disable()},P.prototype.draw=function(){function s(s){l!=this&&(t.select(l).call(p.clear()),l=this,p.x(s.xdom).y(s.ydom))}function o(t){var s=h[t.axnum];if(s.length>0){var o=s[0].props.xindex,e=s[0].props.yindex,i=p.extent();p.empty()?c.selectAll("path").classed("mpld3-hidden",!1):c.selectAll("path").classed("mpld3-hidden",function(t){return i[0][0]>t[o]||i[1][0]t[e]||i[1][1]1?s[1]:""},"object"==typeof module&&module.exports?module.exports=F:this.mpld3=F,console.log("Loaded mpld3 version "+F.version)}(d3); \ No newline at end of file diff --git a/mne/html_templates/__init__.py b/mne/html_templates/__init__.py index 24cbd45e31d..b8234d27037 100644 --- a/mne/html_templates/__init__.py +++ b/mne/html_templates/__init__.py @@ -1,3 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Jinja2 HTML templates.""" +import lazy_loader as lazy -from ._templates import repr_templates_env, report_templates_env +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/html_templates/__init__.pyi b/mne/html_templates/__init__.pyi new file mode 100644 index 00000000000..2312227d990 --- /dev/null +++ b/mne/html_templates/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["_get_html_template"] +from ._templates import _get_html_template diff --git a/mne/html_templates/_templates.py b/mne/html_templates/_templates.py index 28fd93617e5..1f68303a51e 100644 --- a/mne/html_templates/_templates.py +++ b/mne/html_templates/_templates.py @@ -1,25 +1,171 @@ -import jinja2 - -autoescape = jinja2.select_autoescape( - default=True, - default_for_string=True -) - -# For _html_repr_() -repr_templates_env = jinja2.Environment( - loader=jinja2.PackageLoader( - package_name='mne.html_templates', - package_path='repr' - ), - autoescape=autoescape -) - -# For mne.Report -report_templates_env = jinja2.Environment( - loader=jinja2.PackageLoader( - package_name='mne.html_templates', - package_path='report' - ), - autoescape=autoescape -) -report_templates_env.filters['zip'] = zip +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations # only needed for Python ≤ 3.9 + +import datetime +import functools +import uuid +from dataclasses import dataclass +from typing import Any, Literal + +from .._fiff.pick import channel_type +from ..defaults import _handle_default + +_COLLAPSED = False # will override in doc build + + +def _format_number(value: int | float) -> str: + """Insert thousand separators.""" + return f"{value:,}" + + +def _append_uuid(string: str, sep: str = "-") -> str: + """Append a UUID to a string.""" + return f"{string}{sep}{uuid.uuid4()}" + + +def _data_type(obj) -> str: + """Return the qualified name of a class.""" + return obj.__class__.__qualname__ + + +def _dt_to_str(dt: datetime.datetime) -> str: + """Convert a datetime object to a human-readable string representation.""" + return dt.strftime("%Y-%m-%d at %H:%M:%S %Z") + + +def _format_baseline(inst) -> str: + """Format the baseline time period.""" + if inst.baseline is None: + baseline = "off" + else: + baseline = ( + f"{round(inst.baseline[0], 3):.3f} – {round(inst.baseline[1], 3):.3f} s" + ) + + return baseline + + +def _format_metadata(inst) -> str: + """Format metadata representation.""" + if inst.metadata is None: + metadata = "No metadata set" + else: + metadata = f"{inst.metadata.shape[0]} rows × {inst.metadata.shape[1]} columns" + + return metadata + + +def _format_time_range(inst) -> str: + """Format evoked and epochs time range.""" + tr = f"{round(inst.tmin, 3):.3f} – {round(inst.tmax, 3):.3f} s" + return tr + + +def _format_projs(info) -> list[str]: + """Format projectors.""" + projs = [f"{p['desc']} ({'on' if p['active'] else 'off'})" for p in info["projs"]] + return projs + + +@dataclass +class _Channel: + """A channel in a recording.""" + + index: int + name_html: str + type: str + type_pretty: str + status: Literal["good", "bad"] + + +def _format_channels(info) -> dict[str, dict[Literal["good", "bad"], list[str]]]: + """Format channel names.""" + ch_types_pretty: dict[str, str] = _handle_default("titles") + channels = [] + + if info.ch_names: + for ch_index, ch_name in enumerate(info.ch_names): + ch_type = channel_type(info, ch_index) + ch_type_pretty = ch_types_pretty.get(ch_type, ch_type.upper()) + ch_status = "bad" if ch_name in info["bads"] else "good" + channel = _Channel( + index=ch_index, + name_html=ch_name.replace(" ", " "), + type=ch_type, + type_pretty=ch_type_pretty, + status=ch_status, + ) + channels.append(channel) + + # Extract unique channel types and put them in the desired order. + ch_types = list(set([c.type_pretty for c in channels])) + ch_types = [c for c in ch_types_pretty.values() if c in ch_types] + + channels_formatted = {} + for ch_type in ch_types: + goods = [c for c in channels if c.type_pretty == ch_type and c.status == "good"] + bads = [c for c in channels if c.type_pretty == ch_type and c.status == "bad"] + if ch_type not in channels_formatted: + channels_formatted[ch_type] = {"good": [], "bad": []} + channels_formatted[ch_type]["good"] = goods + channels_formatted[ch_type]["bad"] = bads + + return channels_formatted + + +def _has_attr(obj: Any, attr: str) -> bool: + """Check if an object has an attribute `obj.attr`. + + This is needed because on dict-like objects, Jinja2's `obj.attr is defined` would + check for `obj["attr"]`, which may not be what we want. + """ + return hasattr(obj, attr) + + +@functools.lru_cache(maxsize=2) +def _get_html_templates_env(kind): + # For _html_repr_() and mne.Report + assert kind in ("repr", "report"), kind + import jinja2 + + templates_env = jinja2.Environment( + loader=jinja2.PackageLoader( + package_name="mne.html_templates", package_path=kind + ), + autoescape=jinja2.select_autoescape(default=True, default_for_string=True), + ) + if kind == "report": + templates_env.filters["zip"] = zip + + templates_env.filters["format_number"] = _format_number + templates_env.filters["append_uuid"] = _append_uuid + templates_env.filters["data_type"] = _data_type + templates_env.filters["dt_to_str"] = _dt_to_str + templates_env.filters["format_baseline"] = _format_baseline + templates_env.filters["format_metadata"] = _format_metadata + templates_env.filters["format_time_range"] = _format_time_range + templates_env.filters["format_projs"] = _format_projs + templates_env.filters["format_channels"] = _format_channels + templates_env.filters["has_attr"] = _has_attr + return templates_env + + +def _get_html_template(kind, name): + return _RenderWrap( + _get_html_templates_env(kind).get_template(name), + collapsed=_COLLAPSED, + ) + + +class _RenderWrap: + """Class that allows functools.partial-like wrapping of jinja2 Template.render().""" + + def __init__(self, template, **kwargs): + self._template = template + self._kwargs = kwargs + + def render(self, *args, **kwargs): + return self._template.render(*args, **kwargs, **self._kwargs) diff --git a/mne/html_templates/report/forward.html.jinja b/mne/html_templates/report/forward.html.jinja index 1a46f4ccd81..68c8ebecc27 100644 --- a/mne/html_templates/report/forward.html.jinja +++ b/mne/html_templates/report/forward.html.jinja @@ -2,4 +2,5 @@ {% block html_content %} {{repr | safe}} {{sensitivity_maps | safe}} +{{source_space | safe}} {% endblock html_content %} diff --git a/mne/html_templates/report/html.html.jinja b/mne/html_templates/report/html.html.jinja index 9592944f915..a9b4f881f12 100644 --- a/mne/html_templates/report/html.html.jinja +++ b/mne/html_templates/report/html.html.jinja @@ -1,9 +1,8 @@
    -
    - - -
    +
    {{ html | safe }}
    diff --git a/mne/html_templates/report/image.html.jinja b/mne/html_templates/report/image.html.jinja index 6f80960ccc3..c3f6e42f06f 100644 --- a/mne/html_templates/report/image.html.jinja +++ b/mne/html_templates/report/image.html.jinja @@ -1,17 +1,19 @@ +{% if not embedded %} {% extends "section.html.jinja" %} +{% endif %} {% block html_content %} -
    - {% if image_format == 'svg' %} -
    - {{ img }} -
    - {% else %} - {{ title }} - {% endif %} +
    + {% if image_format == 'svg' %} +
    + {{ img|safe }} +
    + {% else %} + {{ title }} + {% endif %} - {% if caption is not none %} -
    {{ caption }}
    - {% endif %} -
    + {% if caption is not none %} +
    {{ caption }}
    + {% endif %} +
    {% endblock html_content %} diff --git a/mne/html_templates/report/section.html.jinja b/mne/html_templates/report/section.html.jinja index b7daaa2db6d..baddf7dd8b6 100644 --- a/mne/html_templates/report/section.html.jinja +++ b/mne/html_templates/report/section.html.jinja @@ -1,9 +1,8 @@
    -
    - - -
    +
    {% block html_content %} {% for html in htmls %} diff --git a/mne/html_templates/report/slider.html.jinja b/mne/html_templates/report/slider.html.jinja index 24227bf490e..fab7f56472d 100644 --- a/mne/html_templates/report/slider.html.jinja +++ b/mne/html_templates/report/slider.html.jinja @@ -1,9 +1,9 @@ -
    +
    -
    - -
    +
    - -
    diff --git a/mne/html_templates/repr/_acquisition.html.jinja b/mne/html_templates/repr/_acquisition.html.jinja new file mode 100644 index 00000000000..e1ee4f69dd3 --- /dev/null +++ b/mne/html_templates/repr/_acquisition.html.jinja @@ -0,0 +1,97 @@ +{% set section = "Acquisition" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% if duration %} + + + Duration + {{ duration }} (HH:MM:SS) + +{% endif %} +{% if inst is defined and inst | has_attr("kind") and inst | has_attr("nave") %} + + + Aggregation + {% if inst.kind == "average" %} + average of {{ inst.nave }} epochs + {% elif inst.kind == "standard_error" %} + standard error of {{ inst.nave }} epochs + {% else %} + {{ inst.kind }} ({{ inst.nave }} epochs) + {% endif %} + +{% endif %} +{% if inst is defined and inst | has_attr("comment") %} + + + Condition + {{inst.comment}} + +{% endif %} +{% if inst is defined and inst | has_attr("events") %} + + + Total number of events + {{ inst.events | length }} + +{% endif %} +{% if event_counts is defined %} + + + Events counts + {% if events is not none %} + + {% for e in event_counts %} + {{ e }} + {% if not loop.last %}
    {% endif %} + {% endfor %} + + {% else %} + Not available + {% endif %} + +{% endif %} +{% if inst is defined and inst | has_attr("tmin") and inst | has_attr("tmax") %} + + + Time range + {{ inst | format_time_range }} + +{% endif %} +{% if inst is defined and inst | has_attr("baseline") %} + + + Baseline + {{ inst | format_baseline }} + +{% endif %} +{% if info["sfreq"] is defined and info["sfreq"] is not none %} + + + Sampling frequency + {{ "%0.2f" | format(info["sfreq"]) }} Hz + +{% endif %} +{% if inst is defined and inst | has_attr("times") and inst.times is defined %} + + + Time points + {{ inst.times | length | format_number }} + +{% endif %} +{% if inst is defined and inst | has_attr("metadata") %} + + + Metadata + {{ inst | format_metadata }} + +{% endif %} \ No newline at end of file diff --git a/mne/html_templates/repr/_channels.html.jinja b/mne/html_templates/repr/_channels.html.jinja new file mode 100644 index 00000000000..1d057b9688f --- /dev/null +++ b/mne/html_templates/repr/_channels.html.jinja @@ -0,0 +1,25 @@ +{% set section = "Channels" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% for channel_type, channels in (info | format_channels).items() %} + {% include 'static/_channels.html.jinja' %} +{% endfor %} + + + + Head & sensor digitization + {% if info["dig"] is not none %} + {{ info["dig"] | length }} points + {% else %} + Not available + {% endif %} + \ No newline at end of file diff --git a/mne/html_templates/repr/_filters.html.jinja b/mne/html_templates/repr/_filters.html.jinja new file mode 100644 index 00000000000..97ede5157c1 --- /dev/null +++ b/mne/html_templates/repr/_filters.html.jinja @@ -0,0 +1,38 @@ +{% set section = "Filters" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% if info["highpass"] is defined and info["highpass"] is not none %} + + + Highpass + {{ "%0.2f" | format(info["highpass"]) }} Hz + +{% endif %} +{% if info["lowpass"] is defined and info["lowpass"] is not none %} + + + Lowpass + {{ "%0.2f" | format(info["lowpass"]) }} Hz + +{% endif %} +{% if info.projs is defined and info.projs %} + + + Projections + + {% for p in (info | format_projs) %} + {{ p }} + {% if not loop.last %}
    {% endif %} + {% endfor %} + + +{% endif %} \ No newline at end of file diff --git a/mne/html_templates/repr/_frequencies.html.jinja b/mne/html_templates/repr/_frequencies.html.jinja new file mode 100644 index 00000000000..b55b8ddf883 --- /dev/null +++ b/mne/html_templates/repr/_frequencies.html.jinja @@ -0,0 +1,62 @@ +{% set section = "Frequencies" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + + + + Data type + {{ inst._data_type }} + + + + Computed from + {{ computed_from }} + + + + Estimation method + {{ inst.method }} + + +{% if "taper" in inst._dims %} + + + Number of tapers + {{ inst._mt_weights.size }} + +{% endif %} +{% if inst.freqs is defined %} + + + Frequency range + {{ '%.2f'|format(inst.freqs[0]) }} – {{ '%.2f'|format(inst.freqs[-1]) }} Hz + + + + Number of frequency bins + {{ inst.freqs|length }} + +{%- for unit in units %} + + + {%- if loop.index == 1 %} + Units + {%- endif %} + {{ unit }} + +{%- endfor %} +{% endif %} diff --git a/mne/html_templates/repr/_general.html.jinja b/mne/html_templates/repr/_general.html.jinja new file mode 100644 index 00000000000..a57ae40049d --- /dev/null +++ b/mne/html_templates/repr/_general.html.jinja @@ -0,0 +1,58 @@ +{% set section = "General" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% if filenames %} + + + Filename(s) + + {% for f in filenames %} + {{ f }} + {% if not loop.last %}
    {% endif %} + {% endfor %} + + +{% endif %} + + + MNE object type + {{ inst | data_type }} + + + + Measurement date + {% if info["meas_date"] is defined and info["meas_date"] is not none %} + {{ info["meas_date"] | dt_to_str }} + {% else %} + Unknown + {% endif %} + + + + Participant + {% if info["subject_info"] is defined and info["subject_info"] is not none %} + {% if info["subject_info"]["his_id"] is defined %} + {{ info["subject_info"]["his_id"] }} + {% endif %} + {% else %} + Unknown + {% endif %} + + + + Experimenter + {% if info["experimenter"] is defined and info["experimenter"] is not none %} + {{ info["experimenter"] }} + {% else %} + Unknown + {% endif %} + \ No newline at end of file diff --git a/mne/html_templates/repr/_js_and_css.html.jinja b/mne/html_templates/repr/_js_and_css.html.jinja new file mode 100644 index 00000000000..f185cfbe00a --- /dev/null +++ b/mne/html_templates/repr/_js_and_css.html.jinja @@ -0,0 +1,7 @@ + + + \ No newline at end of file diff --git a/mne/html_templates/repr/epochs.html.jinja b/mne/html_templates/repr/epochs.html.jinja index f2894a599e2..6b33c177e87 100644 --- a/mne/html_templates/repr/epochs.html.jinja +++ b/mne/html_templates/repr/epochs.html.jinja @@ -1,22 +1,10 @@ - - - - - - - - {% if events is not none %} - - {% else %} - - {% endif %} - - - - - - - - - -
    Number of events{{ epochs.events|length }}
    Events{{ events|join('
    ') | safe }}
    Not available
    Time range{{ '%.3f'|format(epochs.tmin) }} – {{ '%.3f'|format(epochs.tmax) }} s
    Baseline{{ baseline }}
    +{%include '_js_and_css.html.jinja' %} + +{% set info = inst.info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
    \ No newline at end of file diff --git a/mne/html_templates/repr/evoked.html.jinja b/mne/html_templates/repr/evoked.html.jinja index cd3c471b3d0..6b33c177e87 100644 --- a/mne/html_templates/repr/evoked.html.jinja +++ b/mne/html_templates/repr/evoked.html.jinja @@ -1,29 +1,10 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Condition{{evoked.comment}}
    Data kind{{evoked.kind}}
    Timepoints{{ evoked.data.shape[1] }} samples
    Channels{{ evoked.data.shape[0] }} channels
    Number of averaged epochs{{evoked.nave}}
    Time range (secs){{ evoked.times[0] }} – {{ evoked.times[-1] }}
    Baseline (secs){{baseline}}
    +{%include '_js_and_css.html.jinja' %} + +{% set info = inst.info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
    \ No newline at end of file diff --git a/mne/html_templates/repr/forward.html.jinja b/mne/html_templates/repr/forward.html.jinja index f7294cf2cdd..e8bce9649b1 100644 --- a/mne/html_templates/repr/forward.html.jinja +++ b/mne/html_templates/repr/forward.html.jinja @@ -1,18 +1,29 @@ - - - - - - - - - - - +{%include '_js_and_css.html.jinja' %} + +{% set section = "Forward" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +
    Good channels{{ good_channels }}
    Bad channels{{ bad_channels }}
    Source space
    + {%include 'static/_section_header_row.html.jinja' %} + {% for channel_type, channels in (info | format_channels).items() %} + {% include 'static/_channels.html.jinja' %} + {% endfor %} + + + + - - + + + -
    Source space {{ source_space_descr }}
    Source orientation
    Source orientation {{ source_orientation }}
    + \ No newline at end of file diff --git a/mne/html_templates/repr/ica.html.jinja b/mne/html_templates/repr/ica.html.jinja index 080c3cd5e95..62762098365 100644 --- a/mne/html_templates/repr/ica.html.jinja +++ b/mne/html_templates/repr/ica.html.jinja @@ -1,8 +1,12 @@ - +
    + + + + diff --git a/mne/html_templates/repr/info.html.jinja b/mne/html_templates/repr/info.html.jinja index ec01af7decd..64463728979 100644 --- a/mne/html_templates/repr/info.html.jinja +++ b/mne/html_templates/repr/info.html.jinja @@ -1,74 +1,10 @@ -
    Method {{ method }}
    Fit parameters{% if fit_params %}{% for key, value in fit_params.items() %}{{ key }}={{ value }}
    {% endfor %}{% else %}—{% endif %}
    Fit {% if fit_on %}{{ n_iter }} iterations on {{ fit_on }} ({{ n_samples }} samples){% else %}no{% endif %}
    - - - {% if meas_date is not none %} - - {% else %} - - {% endif %} - - - - {% if experimenter is not none %} - - {% else %} - - {% endif %} - - - {% if subject_info is not none %} - {% if 'his_id' in subject_info.keys() %} - - {% endif %} - {% else %} - - {% endif %} - - - - {% if dig is not none %} - - {% else %} - - {% endif %} - - - - - - - - - - - - - - - - - {% if sfreq is not none %} - - - - - {% endif %} - {% if highpass is not none %} - - - - - {% endif %} - {% if lowpass is not none %} - - - - - {% endif %} - {% if projs is not none %} - - - - - {% endif %} -
    Measurement date{{ meas_date }}Unknown
    Experimenter{{ experimenter }}Unknown
    Participant{{ subject_info['his_id'] }}Unknown
    Digitized points{{ dig|length }} pointsNot available
    Good channels{{ good_channels }}
    Bad channels{{ bad_channels }}
    EOG channels{{ eog }}
    ECG channels{{ ecg }}
    Sampling frequency{{ '%0.2f'|format(sfreq) }} Hz
    Highpass{{ '%0.2f'|format(highpass) }} Hz
    Lowpass{{ '%0.2f'|format(lowpass) }} Hz
    Projections{{ projs|join('
    ') | safe }}
    +{%include '_js_and_css.html.jinja' %} + +{%set inst = info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
    \ No newline at end of file diff --git a/mne/html_templates/repr/inverse_operator.html.jinja b/mne/html_templates/repr/inverse_operator.html.jinja index 31376a6e6c8..63d1ee8cf3b 100644 --- a/mne/html_templates/repr/inverse_operator.html.jinja +++ b/mne/html_templates/repr/inverse_operator.html.jinja @@ -1,4 +1,4 @@ - +
    diff --git a/mne/html_templates/repr/raw.html.jinja b/mne/html_templates/repr/raw.html.jinja index 7bc8542eaee..6b33c177e87 100644 --- a/mne/html_templates/repr/raw.html.jinja +++ b/mne/html_templates/repr/raw.html.jinja @@ -1,12 +1,10 @@ -{{ info_repr[:-9] | safe }} - {% if filenames %} - - - - - {% endif %} - - - - -
    Channels {{ channels }}
    Filenames{{ filenames|join('
    ') }}
    Duration{{ duration }} (HH:MM:SS)
    +{%include '_js_and_css.html.jinja' %} + +{% set info = inst.info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
    \ No newline at end of file diff --git a/mne/html_templates/repr/spectrum.html.jinja b/mne/html_templates/repr/spectrum.html.jinja index ee35f8e2ec1..40cc2222005 100644 --- a/mne/html_templates/repr/spectrum.html.jinja +++ b/mne/html_templates/repr/spectrum.html.jinja @@ -1,50 +1,11 @@ - - - - - - {%- for unit in units %} - - {%- if loop.index == 1 %} - - {%- endif %} - - - {%- endfor %} - - - - - {%- if inst_type == "Epochs" %} - - - - - {% endif -%} - - - - - - - - - {% if "taper" in spectrum._dims %} - - - - - {% endif %} - - - - - - - - - - - - +{%include '_js_and_css.html.jinja' %} + +{% set info = inst.info %} + +
    Data type{{ spectrum._data_type }}
    Units{{ unit }}
    Data source{{ inst_type }}
    Number of epochs{{ spectrum.shape[0] }}
    Dims{{ spectrum._dims | join(", ") }}
    Estimation method{{ spectrum.method }}
    Number of tapers{{ spectrum._mt_weights.size }}
    Number of channels{{ spectrum.ch_names|length }}
    Number of frequency bins{{ spectrum.freqs|length }}
    Frequency range{{ '%.2f'|format(spectrum.freqs[0]) }} – {{ '%.2f'|format(spectrum.freqs[-1]) }} Hz
    + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_frequencies.html.jinja' %} + {%include '_filters.html.jinja' %}
    diff --git a/mne/html_templates/repr/static/_channels.html.jinja b/mne/html_templates/repr/static/_channels.html.jinja new file mode 100644 index 00000000000..d40f62f5d6b --- /dev/null +++ b/mne/html_templates/repr/static/_channels.html.jinja @@ -0,0 +1,17 @@ +{% set channel_names_good = channels["good"] | map(attribute='name_html') | join(', ') %} + + + {{ channel_type }} + + + + {% if channels["bad"] %} + {% set channel_names_bad = channels["bad"] | map(attribute='name_html') | join(', ') %} + and + {% endif %} + + diff --git a/mne/html_templates/repr/static/_section_header_row.html.jinja b/mne/html_templates/repr/static/_section_header_row.html.jinja new file mode 100644 index 00000000000..d8d3da21d3f --- /dev/null +++ b/mne/html_templates/repr/static/_section_header_row.html.jinja @@ -0,0 +1,12 @@ + + + + + + {{ section }} + + diff --git a/mne/html_templates/repr/static/repr.css b/mne/html_templates/repr/static/repr.css new file mode 100644 index 00000000000..58931878430 --- /dev/null +++ b/mne/html_templates/repr/static/repr.css @@ -0,0 +1,107 @@ +/* +Styles in this section apply both to the sphinx-built website docs and to notebooks +rendered in an IDE or in Jupyter. In our web docs, styles here are complemented by +doc/_static/styles.css and other CSS files (e.g. from the sphinx theme, sphinx-gallery, +or bootstrap). In IDEs/Jupyter, those style files are unavailable, so only the rules in +this file apply (plus whatever default styling the IDE applies). +*/ +.mne-repr-table { + display: inline; /* prevent using full container width */ +} +.mne-repr-table tr.mne-repr-section-header > th { + padding-top: 1rem; + text-align: left; + vertical-align: middle; +} +.mne-repr-section-toggle > button { + all: unset; + display: block; + height: 1rem; + width: 1rem; +} +.mne-repr-section-toggle > button > svg { + height: 60%; +} + +/* transition (rotation) effects on the collapser button */ +.mne-repr-section-toggle > button.collapsed > svg { + transition: 0.1s ease-out; + transform: rotate(-90deg); +} +.mne-repr-section-toggle > button:not(.collapsed) > svg { + transition: 0.1s ease-out; + transform: rotate(0deg); +} + +/* hide collapsed table rows */ +.mne-repr-collapsed { + display: none; +} + + +@layer { + /* + Selectors in a `@layer` will always be lower-precedence than selectors outside the + layer. So even though e.g. `div.output_html` is present in the sphinx-rendered + website docs, the styles here won't take effect there as long as some other rule + somewhere in the page's CSS targets the same element. + + In IDEs or Jupyter notebooks, though, the CSS files from the sphinx theme, + sphinx-gallery, and bootstrap are unavailable, so these styles will apply. + + Notes: + + - the selector `.accordion-body` is for MNE Reports + - the selector `.output_html` is for VSCode's notebook interface + - the selector `.jp-RenderedHTML` is for Jupyter notebook + - variables starting with `--theme-` are VSCode-specific. + - variables starting with `--jp-` are Jupyter styles, *some of which* are also + available in VSCode. Here we try the `--theme-` variable first, then fall back to + the `--jp-` ones. + */ + .mne-repr-table { + --mne-toggle-color: var(--theme-foreground, var(--jp-ui-font-color1)); + --mne-button-bg-color: var(--theme-button-background, var(--jp-info-color0, var(--jp-content-link-color))); + --mne-button-fg-color: var(--theme-button-foreground, var(--jp-ui-inverse-font-color0, var(--jp-editor-background))); + --mne-button-hover-bg-color: var(--theme-button-hover-background, var(--jp-info-color1)); + --mne-button-radius: var(--jp-border-radius, 0.25rem); + } + /* chevron position/alignment; in VSCode it looks ok without adjusting */ + .accordion-body .mne-repr-section-toggle > button, + .jp-RenderedHTML .mne-repr-section-toggle > button { + padding: 0 0 45% 25% !important; + } + /* chevron color; MNE Report doesn't have light/dark mode */ + div.output_html .mne-repr-section-toggle > button > svg > path, + .jp-RenderedHTML .mne-repr-section-toggle > button > svg > path { + fill: var(--mne-toggle-color); + } + .accordion-body .mne-ch-names-btn, + div.output_html .mne-ch-names-btn, + .jp-RenderedHTML .mne-ch-names-btn { + -webkit-border-radius: var(--mne-button-radius); + -moz-border-radius: var(--mne-button-radius); + border-radius: var(--mne-button-radius); + border: none; + background-image: none; + background-color: var(--mne-button-bg-color); + color: var(--mne-button-fg-color); + font-size: inherit; + min-width: 1.5rem; + padding: 0.25rem; + text-align: center; + text-decoration: none; + } + .accordion-body .mne-ch-names-btn:hover, + div.output_html .mne.ch-names-btn:hover, + .jp-RenderedHTML .mne-ch-names-btn:hover { + background-color: var(--mne-button-hover-bg-color); + text-decoration: underline; + } + .accordion-body .mne-ch-names-btn:focus-visible, + div.output_html .mne-ch-names-btn:focus-visible, + .jp-RenderedHTML .mne-ch-names-btn:focus-visible { + outline: 0.1875rem solid var(--mne-button-bg-color) !important; + outline-offset: 0.1875rem !important; + } +} diff --git a/mne/html_templates/repr/static/repr.js b/mne/html_templates/repr/static/repr.js new file mode 100644 index 00000000000..00c9d327d97 --- /dev/null +++ b/mne/html_templates/repr/static/repr.js @@ -0,0 +1,23 @@ +// must be `var` (not `const`) because this can get embedded multiple times on a page +var toggleVisibility = (className) => { + + const elements = document.querySelectorAll(`.${className}`); + + elements.forEach(element => { + if (element.classList.contains("mne-repr-section-header")) { + return // Don't collapse the section header row + } + element.classList.toggle("mne-repr-collapsed"); + }); + + // trigger caret to rotate + var sel = `.mne-repr-section-header.${className} > th.mne-repr-section-toggle > button`; + const button = document.querySelector(sel); + button.classList.toggle("collapsed"); + + // adjust tooltip + sel = `tr.mne-repr-section-header.${className}`; + const secHeadRow = document.querySelector(sel); + secHeadRow.classList.toggle("collapsed"); + secHeadRow.title = secHeadRow.title === "Hide section" ? "Show section" : "Hide section"; +} diff --git a/mne/html_templates/repr/tfr.html.jinja b/mne/html_templates/repr/tfr.html.jinja new file mode 100644 index 00000000000..d2881a0db58 --- /dev/null +++ b/mne/html_templates/repr/tfr.html.jinja @@ -0,0 +1,60 @@ + + + + + + {%- for unit in units %} + + {%- if loop.index == 1 %} + + {%- endif %} + + + {%- endfor %} + + + + + {%- if inst_type == "Epochs" %} + + + + + {% endif -%} + {%- if inst_type == "Evoked" %} + + + + + {% endif -%} + + + + + + + + + {% if "taper" in tfr._dims %} + + + + + {% endif %} + + + + + + + + + + + + + + + + +
    Data type{{ tfr._data_type }}
    Units{{ unit }}
    Data source{{ inst_type }}
    Number of epochs{{ tfr.shape[0] }}
    Number of averaged trials{{ nave }}
    Dims{{ tfr._dims | join(", ") }}
    Estimation method{{ tfr.method }}
    Number of tapers{{ tfr._mt_weights.size }}
    Number of channels{{ tfr.ch_names|length }}
    Number of timepoints{{ tfr.times|length }}
    Number of frequency bins{{ tfr.freqs|length }}
    Frequency range{{ '%.2f'|format(tfr.freqs[0]) }} – {{ '%.2f'|format(tfr.freqs[-1]) }} Hz
    diff --git a/mne/icons/mne_default_icon.png b/mne/icons/mne_default_icon.png index 83a187ecc93..22a90e2275c 100644 Binary files a/mne/icons/mne_default_icon.png and b/mne/icons/mne_default_icon.png differ diff --git a/mne/icons/mne_splash.png b/mne/icons/mne_splash.png index 6ba7ee49b51..76aa18ea574 100644 Binary files a/mne/icons/mne_splash.png and b/mne/icons/mne_splash.png differ diff --git a/mne/inverse_sparse/__init__.py b/mne/inverse_sparse/__init__.py index 867becd38a5..615c3062bdb 100644 --- a/mne/inverse_sparse/__init__.py +++ b/mne/inverse_sparse/__init__.py @@ -1,9 +1,9 @@ """Non-Linear sparse inverse solvers.""" -# Author: Alexandre Gramfort -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from .mxne_inverse import (mixed_norm, tf_mixed_norm, - make_stc_from_dipoles) -from ._gamma_map import gamma_map +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/inverse_sparse/__init__.pyi b/mne/inverse_sparse/__init__.pyi new file mode 100644 index 00000000000..557921114ca --- /dev/null +++ b/mne/inverse_sparse/__init__.pyi @@ -0,0 +1,3 @@ +__all__ = ["gamma_map", "make_stc_from_dipoles", "mixed_norm", "tf_mixed_norm"] +from ._gamma_map import gamma_map +from .mxne_inverse import make_stc_from_dipoles, mixed_norm, tf_mixed_norm diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 6f71cbedae7..35fd158f3e0 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -1,20 +1,35 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np +from ..fixes import _safe_svd from ..forward import is_fixed_orient from ..minimum_norm.inverse import _check_reference, _log_exp_var from ..utils import logger, verbose, warn -from .mxne_inverse import (_check_ori, _make_sparse_stc, _prepare_gain, - _reapply_source_weighting, _compute_residual, - _make_dipoles_sparse) +from .mxne_inverse import ( + _check_ori, + _compute_residual, + _make_dipoles_sparse, + _make_sparse_stc, + _prepare_gain, + _reapply_source_weighting, +) @verbose -def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, - group_size=1, gammas=None, verbose=None): +def _gamma_map_opt( + M, + G, + alpha, + maxit=10000, + tol=1e-6, + update_mode=1, + group_size=1, + gammas=None, + verbose=None, +): """Hierarchical Bayes (Gamma-MAP). Parameters @@ -45,7 +60,6 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, active_set : array, shape=(n_active,) Indices of active sources. """ - from scipy import linalg G = G.copy() M = M.copy() @@ -58,15 +72,16 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, n_sensors, n_times = M.shape # apply normalization so the numerical values are sane - M_normalize_constant = np.linalg.norm(np.dot(M, M.T), ord='fro') + M_normalize_constant = np.linalg.norm(np.dot(M, M.T), ord="fro") M /= np.sqrt(M_normalize_constant) alpha /= M_normalize_constant G_normalize_constant = np.linalg.norm(G, ord=np.inf) G /= G_normalize_constant if n_sources % group_size != 0: - raise ValueError('Number of sources has to be evenly dividable by the ' - 'group size') + raise ValueError( + "Number of sources has to be evenly dividable by the group size" + ) n_active = n_sources active_set = np.arange(n_sources) @@ -84,7 +99,7 @@ def denom_fun(x): for itno in range(maxit): gammas[np.isnan(gammas)] = 0.0 - gidx = (np.abs(gammas) > eps) + gidx = np.abs(gammas) > eps active_set = active_set[gidx] gammas = gammas[gidx] @@ -94,9 +109,9 @@ def denom_fun(x): G = G[:, gidx] CM = np.dot(G * gammas[np.newaxis, :], G.T) - CM.flat[::n_sensors + 1] += alpha + CM.flat[:: n_sensors + 1] += alpha # Invert CM keeping symmetry - U, S, _ = linalg.svd(CM, full_matrices=False) + U, S, _ = _safe_svd(CM, full_matrices=False) S = S[np.newaxis, :] del CM CMinv = np.dot(U / (S + eps), U.T) @@ -105,21 +120,20 @@ def denom_fun(x): if update_mode == 1: # MacKay fixed point update (10) in [1] - numer = gammas ** 2 * np.mean((A * A.conj()).real, axis=1) + numer = gammas**2 * np.mean((A * A.conj()).real, axis=1) denom = gammas * np.sum(G * CMinvG, axis=0) elif update_mode == 2: # modified MacKay fixed point update (11) in [1] numer = gammas * np.sqrt(np.mean((A * A.conj()).real, axis=1)) denom = np.sum(G * CMinvG, axis=0) # sqrt is applied below else: - raise ValueError('Invalid value for update_mode') + raise ValueError("Invalid value for update_mode") if group_size == 1: if denom is None: gammas = numer else: - gammas = numer / np.maximum(denom_fun(denom), - np.finfo('float').eps) + gammas = numer / np.maximum(denom_fun(denom), np.finfo("float").eps) else: numer_comb = np.sum(numer.reshape(-1, group_size), axis=1) if denom is None: @@ -134,24 +148,27 @@ def denom_fun(x): gammas_full = np.zeros(n_sources, dtype=np.float64) gammas_full[active_set] = gammas - err = (np.sum(np.abs(gammas_full - gammas_full_old)) / - np.sum(np.abs(gammas_full_old))) + err = np.sum(np.abs(gammas_full - gammas_full_old)) / np.sum( + np.abs(gammas_full_old) + ) gammas_full_old = gammas_full - breaking = (err < tol or n_active == 0) + breaking = err < tol or n_active == 0 if len(gammas) != last_size or breaking: - logger.info('Iteration: %d\t active set size: %d\t convergence: ' - '%0.3e' % (itno, len(gammas), err)) + logger.info( + f"Iteration: {itno}\t active set size: {len(gammas)}\t convergence: " + f"{err:.3e}" + ) last_size = len(gammas) if breaking: break if itno < maxit - 1: - logger.info('\nConvergence reached !\n') + logger.info("\nConvergence reached !\n") else: - warn('\nConvergence NOT reached !\n') + warn("\nConvergence NOT reached !\n") # undo normalization and compute final posterior mean n_const = np.sqrt(M_normalize_constant) / G_normalize_constant @@ -161,10 +178,25 @@ def denom_fun(x): @verbose -def gamma_map(evoked, forward, noise_cov, alpha, loose="auto", depth=0.8, - xyz_same_gamma=True, maxit=10000, tol=1e-6, update_mode=1, - gammas=None, pca=True, return_residual=False, - return_as_dipoles=False, rank=None, pick_ori=None, verbose=None): +def gamma_map( + evoked, + forward, + noise_cov, + alpha, + loose="auto", + depth=0.8, + xyz_same_gamma=True, + maxit=10000, + tol=1e-6, + update_mode=1, + gammas=None, + pca=True, + return_residual=False, + return_as_dipoles=False, + rank=None, + pick_ori=None, + verbose=None, +): """Hierarchical Bayes (Gamma-MAP) sparse source localization method. Models each source time course using a zero-mean Gaussian prior with an @@ -228,23 +260,32 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose="auto", depth=0.8, _check_reference(evoked) forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( - forward, evoked.info, noise_cov, pca, depth, loose, rank) + forward, evoked.info, noise_cov, pca, depth, loose, rank + ) _check_ori(pick_ori, forward) group_size = 1 if (is_fixed_orient(forward) or not xyz_same_gamma) else 3 # get the data - sel = [evoked.ch_names.index(name) for name in gain_info['ch_names']] + sel = [evoked.ch_names.index(name) for name in gain_info["ch_names"]] M = evoked.data[sel] # whiten the data - logger.info('Whitening data matrix.') + logger.info("Whitening data matrix.") M = np.dot(whitener, M) # run the optimization - X, active_set = _gamma_map_opt(M, gain, alpha, maxit=maxit, tol=tol, - update_mode=update_mode, gammas=gammas, - group_size=group_size, verbose=verbose) + X, active_set = _gamma_map_opt( + M, + gain, + alpha, + maxit=maxit, + tol=tol, + update_mode=update_mode, + gammas=gammas, + group_size=group_size, + verbose=verbose, + ) if len(active_set) == 0: raise Exception("No active dipoles found. alpha is too big.") @@ -255,8 +296,7 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose="auto", depth=0.8, X = _reapply_source_weighting(X, source_weighting, active_set) if return_residual: - residual = _compute_residual(forward, evoked, X, active_set, - gain_info) + residual = _compute_residual(forward, evoked, X, active_set, gain_info) if group_size == 1 and not is_fixed_orient(forward): # make sure each source has 3 components @@ -274,18 +314,26 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose="auto", depth=0.8, del source_weighting tmin = evoked.times[0] - tstep = 1.0 / evoked.info['sfreq'] + tstep = 1.0 / evoked.info["sfreq"] if return_as_dipoles: - out = _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, - gain_active, active_is_idx=True) + out = _make_dipoles_sparse( + X, active_set, forward, tmin, tstep, M, gain_active, active_is_idx=True + ) else: - out = _make_sparse_stc(X, active_set, forward, tmin, tstep, - active_is_idx=True, pick_ori=pick_ori, - verbose=verbose) - - _log_exp_var(M, M_estimate, prefix='') - logger.info('[done]') + out = _make_sparse_stc( + X, + active_set, + forward, + tmin, + tstep, + active_is_idx=True, + pick_ori=pick_ori, + verbose=verbose, + ) + + _log_exp_var(M, M_estimate, prefix="") + logger.info("[done]") if return_residual: out = out, residual diff --git a/mne/inverse_sparse/mxne_debiasing.py b/mne/inverse_sparse/mxne_debiasing.py index 1ea3ca6f95d..860c67c5f6c 100644 --- a/mne/inverse_sparse/mxne_debiasing.py +++ b/mne/inverse_sparse/mxne_debiasing.py @@ -1,12 +1,12 @@ -# Authors: Daniel Strohmeier -# Alexandre Gramfort -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from math import sqrt + import numpy as np -from ..utils import check_random_state, logger, verbose, fill_doc +from ..utils import check_random_state, fill_doc, logger, verbose @fill_doc @@ -37,13 +37,13 @@ def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0): AS_size = C.shape[0] rng = check_random_state(random_state) B = rng.randn(AS_size, AS_size) - B /= np.linalg.norm(B, 'fro') + B /= np.linalg.norm(B, "fro") ATA = np.dot(A.T, A) CCT = np.dot(C, C.T) L0 = np.inf for _ in range(max_iter): Y = np.dot(np.dot(ATA, B), CCT) - L = np.linalg.norm(Y, 'fro') + L = np.linalg.norm(Y, "fro") if abs(L - L0) < tol: break @@ -115,7 +115,7 @@ def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None): D = np.maximum(D, 1.0) t0 = t - t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2)) + t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t**2)) Y.fill(0.0) dt = (t0 - 1.0) / t Y = D + dt * (D - D0) @@ -123,11 +123,15 @@ def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None): Ddiff = np.linalg.norm(D - D0, np.inf) if Ddiff < tol: - logger.info("Debiasing converged after %d iterations " - "max(|D - D0| = %e < %e)" % (i, Ddiff, tol)) + logger.info( + f"Debiasing converged after {i} iterations " + f"max(|D - D0| = {Ddiff:e} < {tol:e})" + ) break else: Ddiff = np.linalg.norm(D - D0, np.inf) - logger.info("Debiasing did not converge after %d iterations! " - "max(|D - D0| = %e >= %e)" % (max_iter, Ddiff, tol)) + logger.info( + f"Debiasing did not converge after {max_iter} iterations! " + f"max(|D - D0| = {Ddiff:e} >= {tol:e})" + ) return D diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index ef17fc736b0..c3ccddbf7cd 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -1,31 +1,50 @@ -# Author: Alexandre Gramfort -# Daniel Strohmeier -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc -from ..minimum_norm.inverse import (combine_xyz, _prepare_forward, - _check_reference, _log_exp_var) -from ..forward import is_fixed_orient -from ..io.pick import pick_channels_evoked -from ..io.proj import deactivate_proj -from ..utils import (logger, verbose, _check_depth, _check_option, sum_squared, - _validate_type, check_random_state, warn) +from .._fiff.proj import deactivate_proj from ..dipole import Dipole - -from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi, - tf_mixed_norm_solver, iterative_tf_mixed_norm_solver, - norm_l2inf, norm_epsilon_inf, groups_norm2) +from ..fixes import _safe_svd +from ..forward import is_fixed_orient +from ..minimum_norm.inverse import ( + _check_reference, + _log_exp_var, + _prepare_forward, + combine_xyz, +) +from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc +from ..utils import ( + _check_depth, + _check_option, + _validate_type, + check_random_state, + logger, + sum_squared, + verbose, + warn, +) +from .mxne_optim import ( + _Phi, + groups_norm2, + iterative_mixed_norm_solver, + iterative_tf_mixed_norm_solver, + mixed_norm_solver, + norm_epsilon_inf, + norm_l2inf, + tf_mixed_norm_solver, +) def _check_ori(pick_ori, forward): """Check pick_ori.""" - _check_option('pick_ori', pick_ori, [None, 'vector']) - if pick_ori == 'vector' and is_fixed_orient(forward): - raise ValueError('pick_ori="vector" cannot be combined with a fixed ' - 'orientation forward solution.') + _check_option("pick_ori", pick_ori, [None, "vector"]) + if pick_ori == "vector" and is_fixed_orient(forward): + raise ValueError( + 'pick_ori="vector" cannot be combined with a fixed ' + "orientation forward solution." + ) def _prepare_weights(forward, gain, source_weighting, weights, weights_min): @@ -34,15 +53,16 @@ def _prepare_weights(forward, gain, source_weighting, weights, weights_min): weights = np.max(np.abs(weights.data), axis=1) weights_max = np.max(weights) if weights_min > weights_max: - raise ValueError('weights_min > weights_max (%s > %s)' % - (weights_min, weights_max)) + raise ValueError(f"weights_min > weights_max ({weights_min} > {weights_max})") weights_min = weights_min / weights_max weights = weights / weights_max n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T) if len(weights) != gain.shape[1]: - raise ValueError('weights do not have the correct dimension ' - ' (%d != %d)' % (len(weights), gain.shape[1])) + raise ValueError( + "weights do not have the correct dimension " + f" ({len(weights)} != {gain.shape[1]})" + ) if len(source_weighting.shape) == 1: source_weighting *= weights else: @@ -50,26 +70,28 @@ def _prepare_weights(forward, gain, source_weighting, weights, weights_min): gain *= weights[None, :] if weights_min is not None: - mask = (weights > weights_min) + mask = weights > weights_min gain = gain[:, mask] n_sources = np.sum(mask) // n_dip_per_pos - logger.info("Reducing source space to %d sources" % n_sources) + logger.info(f"Reducing source space to {n_sources} sources") return gain, source_weighting, mask -def _prepare_gain(forward, info, noise_cov, pca, depth, loose, rank, - weights=None, weights_min=None): - depth = _check_depth(depth, 'depth_sparse') - forward, gain_info, gain, _, _, source_weighting, _, _, whitener = \ - _prepare_forward(forward, info, noise_cov, 'auto', loose, rank, pca, - use_cps=True, **depth) +def _prepare_gain( + forward, info, noise_cov, pca, depth, loose, rank, weights=None, weights_min=None +): + depth = _check_depth(depth, "depth_sparse") + forward, gain_info, gain, _, _, source_weighting, _, _, whitener = _prepare_forward( + forward, info, noise_cov, "auto", loose, rank, pca, use_cps=True, **depth + ) if weights is None: mask = None else: gain, source_weighting, mask = _prepare_weights( - forward, gain, source_weighting, weights, weights_min) + forward, gain, source_weighting, weights, weights_min + ) return forward, gain, gain_info, whitener, source_weighting, mask @@ -81,26 +103,26 @@ def _reapply_source_weighting(X, source_weighting, active_set): def _compute_residual(forward, evoked, X, active_set, info): # OK, picking based on row_names is safe - sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']] - residual = evoked.copy() - residual = pick_channels_evoked(residual, include=info['ch_names']) + sel = [forward["sol"]["row_names"].index(c) for c in info["ch_names"]] + residual = evoked.copy().pick(info["ch_names"]) r_tmp = residual.copy() - r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X) + r_tmp.data = np.dot(forward["sol"]["data"][sel, :][:, active_set], X) # Take care of proj active_projs = list() non_active_projs = list() - for p in evoked.info['projs']: - if p['active']: + for p in evoked.info["projs"]: + if p["active"]: active_projs.append(p) else: non_active_projs.append(p) if len(active_projs) > 0: with r_tmp.info._unlock(): - r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True, - verbose=False) + r_tmp.info["projs"] = deactivate_proj( + active_projs, copy=True, verbose=False + ) r_tmp.apply_proj(verbose=False) r_tmp.add_proj(non_active_projs, remove_existing=False, verbose=False) @@ -110,13 +132,21 @@ def _compute_residual(forward, evoked, X, active_set, info): @verbose -def _make_sparse_stc(X, active_set, forward, tmin, tstep, - active_is_idx=False, pick_ori=None, verbose=None): - source_nn = forward['source_nn'] +def _make_sparse_stc( + X, + active_set, + forward, + tmin, + tstep, + active_is_idx=False, + pick_ori=None, + verbose=None, +): + source_nn = forward["source_nn"] vector = False if not is_fixed_orient(forward): - if pick_ori != 'vector': - logger.info('combining the current components...') + if pick_ori != "vector": + logger.info("combining the current components...") X = combine_xyz(X) else: vector = True @@ -131,21 +161,29 @@ def _make_sparse_stc(X, active_set, forward, tmin, tstep, if n_dip_per_pos > 1: active_idx = np.unique(active_idx // n_dip_per_pos) - src = forward['src'] + src = forward["src"] vertices = [] n_points_so_far = 0 for this_src in src: - this_n_points_so_far = n_points_so_far + len(this_src['vertno']) - this_active_idx = active_idx[(n_points_so_far <= active_idx) & - (active_idx < this_n_points_so_far)] + this_n_points_so_far = n_points_so_far + len(this_src["vertno"]) + this_active_idx = active_idx[ + (n_points_so_far <= active_idx) & (active_idx < this_n_points_so_far) + ] this_active_idx -= n_points_so_far - this_vertno = this_src['vertno'][this_active_idx] + this_vertno = this_src["vertno"][this_active_idx] n_points_so_far = this_n_points_so_far vertices.append(this_vertno) source_nn = source_nn[active_idx] return _make_stc( - X, vertices, src.kind, tmin, tstep, src[0]['subject_his_id'], - vector=vector, source_nn=source_nn) + X, + vertices, + src.kind, + tmin, + tstep, + src[0]["subject_his_id"], + vector=vector, + source_nn=source_nn, + ) def _split_gof(M, X, gain): @@ -172,7 +210,7 @@ def _split_gof(M, X, gain): # determine the weights by projecting each one onto this basis w = (U.T @ gain)[:, :, np.newaxis] * X w_norm = np.linalg.norm(w, axis=1, keepdims=True) - w_norm[w_norm == 0] = 1. + w_norm[w_norm == 0] = 1.0 w /= w_norm # our weights are now unit-norm positive (will presrve power) fit_back = np.linalg.norm(fit_orth[:, np.newaxis] * w, axis=0) ** 2 @@ -184,9 +222,17 @@ def _split_gof(M, X, gain): @verbose -def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, - gain_active, active_is_idx=False, - verbose=None): +def _make_dipoles_sparse( + X, + active_set, + forward, + tmin, + tstep, + M, + gain_active, + active_is_idx=False, + verbose=None, +): times = tmin + tstep * np.arange(X.shape[1]) if not active_is_idx: @@ -214,21 +260,26 @@ def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, dipoles = [] for k, i_dip in enumerate(active_idx): - i_pos = forward['source_rr'][i_dip][np.newaxis, :] + i_pos = forward["source_rr"][i_dip][np.newaxis, :] i_pos = i_pos.repeat(len(times), axis=0) - X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos] + X_ = X[k * n_dip_per_pos : (k + 1) * n_dip_per_pos] if n_dip_per_pos == 1: amplitude = X_[0] - i_ori = forward['source_nn'][i_dip][np.newaxis, :] + i_ori = forward["source_nn"][i_dip][np.newaxis, :] i_ori = i_ori.repeat(len(times), axis=0) else: - if forward['surf_ori']: - X_ = np.dot(forward['source_nn'][ - i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_) + if forward["surf_ori"]: + X_ = np.dot( + forward["source_nn"][ + i_dip * n_dip_per_pos : (i_dip + 1) * n_dip_per_pos + ].T, + X_, + ) amplitude = np.linalg.norm(X_, axis=0) i_ori = np.zeros((len(times), 3)) - i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] / - amplitude[amplitude > 0.]).T + i_ori[amplitude > 0.0] = ( + X_[:, amplitude > 0.0] / amplitude[amplitude > 0.0] + ).T dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof_split[k])) @@ -252,47 +303,68 @@ def make_stc_from_dipoles(dipoles, src, verbose=None): stc : SourceEstimate The source estimate. """ - logger.info('Converting dipoles into a SourceEstimate.') + logger.info("Converting dipoles into a SourceEstimate.") if isinstance(dipoles, Dipole): dipoles = [dipoles] if not isinstance(dipoles, list): - raise ValueError('Dipoles must be an instance of Dipole or ' - 'a list of instances of Dipole. ' - 'Got %s!' % type(dipoles)) + raise ValueError( + "Dipoles must be an instance of Dipole or " + "a list of instances of Dipole. " + f"Got {type(dipoles)}!" + ) tmin = dipoles[0].times[0] tstep = dipoles[0].times[1] - tmin X = np.zeros((len(dipoles), len(dipoles[0].times))) - source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src], - axis=0) - n_lh_points = len(src[0]['vertno']) + source_rr = np.concatenate([_src["rr"][_src["vertno"], :] for _src in src], axis=0) + n_lh_points = len(src[0]["vertno"]) lh_vertno = list() rh_vertno = list() for i in range(len(dipoles)): if not np.all(dipoles[i].pos == dipoles[i].pos[0]): - raise ValueError('Only dipoles with fixed position over time ' - 'are supported!') + raise ValueError( + "Only dipoles with fixed position over time are supported!" + ) X[i] = dipoles[i].amplitude idx = np.all(source_rr == dipoles[i].pos[0], axis=1) idx = np.where(idx)[0][0] if idx < n_lh_points: - lh_vertno.append(src[0]['vertno'][idx]) + lh_vertno.append(src[0]["vertno"][idx]) else: - rh_vertno.append(src[1]['vertno'][idx - n_lh_points]) - vertices = [np.array(lh_vertno).astype(int), - np.array(rh_vertno).astype(int)] - stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep, - subject=src._subject) - logger.info('[done]') + rh_vertno.append(src[1]["vertno"][idx - n_lh_points]) + vertices = [np.array(lh_vertno).astype(int), np.array(rh_vertno).astype(int)] + stc = SourceEstimate( + X, vertices=vertices, tmin=tmin, tstep=tstep, subject=src._subject + ) + logger.info("[done]") return stc @verbose -def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto', - depth=0.8, maxit=3000, tol=1e-4, active_set_size=10, - debias=True, time_pca=True, weights=None, weights_min=0., - solver='auto', n_mxne_iter=1, return_residual=False, - return_as_dipoles=False, dgap_freq=10, rank=None, pick_ori=None, - sure_alpha_grid="auto", random_state=None, verbose=None): +def mixed_norm( + evoked, + forward, + noise_cov, + alpha="sure", + loose="auto", + depth=0.8, + maxit=3000, + tol=1e-4, + active_set_size=10, + debias=True, + time_pca=True, + weights=None, + weights_min=0.0, + solver="auto", + n_mxne_iter=1, + return_residual=False, + return_as_dipoles=False, + dgap_freq=10, + rank=None, + pick_ori=None, + sure_alpha_grid="auto", + random_state=None, + verbose=None, +): """Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE). Compute L1/L2 mixed-norm solution :footcite:`GramfortEtAl2012` or L0.5/L2 @@ -381,27 +453,36 @@ def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto', ---------- .. footbibliography:: """ - from scipy import linalg - _validate_type(alpha, ('numeric', str), 'alpha') + _validate_type(alpha, ("numeric", str), "alpha") if isinstance(alpha, str): - _check_option('alpha', alpha, ('sure',)) - elif not 0. <= alpha < 100: - raise ValueError('If not equal to "sure" alpha must be in [0, 100). ' - 'Got alpha = %s' % alpha) + _check_option("alpha", alpha, ("sure",)) + elif not 0.0 <= alpha < 100: + raise ValueError( + f'If not equal to "sure" alpha must be in [0, 100). Got alpha = {alpha}' + ) if n_mxne_iter < 1: - raise ValueError('MxNE has to be computed at least 1 time. ' - 'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter) - if dgap_freq <= 0.: - raise ValueError('dgap_freq must be a positive integer.' - ' Got dgap_freq = %s' % dgap_freq) - if not (isinstance(sure_alpha_grid, (np.ndarray, list)) or - sure_alpha_grid == "auto"): - raise ValueError('If not equal to "auto" sure_alpha_grid must be an ' - 'array. Got %s' % type(sure_alpha_grid)) - if ((isinstance(sure_alpha_grid, str) and sure_alpha_grid != "auto") - and (isinstance(alpha, str) and alpha != "sure")): - raise Exception('If sure_alpha_grid is manually specified, alpha must ' - 'be "sure". Got %s' % alpha) + raise ValueError( + "MxNE has to be computed at least 1 time. " + f"Requires n_mxne_iter >= 1, got {n_mxne_iter}" + ) + if dgap_freq <= 0.0: + raise ValueError( + f"dgap_freq must be a positive integer. Got dgap_freq = {dgap_freq}" + ) + if not ( + isinstance(sure_alpha_grid, np.ndarray | list) or sure_alpha_grid == "auto" + ): + raise ValueError( + 'If not equal to "auto" sure_alpha_grid must be an ' + f"array. Got {type(sure_alpha_grid)}" + ) + if (isinstance(sure_alpha_grid, str) and sure_alpha_grid != "auto") and ( + isinstance(alpha, str) and alpha != "sure" + ): + raise Exception( + "If sure_alpha_grid is manually specified, alpha must " + f'be "sure". Got {alpha}' + ) pca = True if not isinstance(evoked, list): evoked = [evoked] @@ -409,24 +490,31 @@ def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto', _check_reference(evoked[0]) all_ch_names = evoked[0].ch_names - if not all(all_ch_names == evoked[i].ch_names - for i in range(1, len(evoked))): - raise Exception('All the datasets must have the same good channels.') + if not all(all_ch_names == evoked[i].ch_names for i in range(1, len(evoked))): + raise Exception("All the datasets must have the same good channels.") forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( - forward, evoked[0].info, noise_cov, pca, depth, loose, rank, - weights, weights_min) + forward, + evoked[0].info, + noise_cov, + pca, + depth, + loose, + rank, + weights, + weights_min, + ) _check_ori(pick_ori, forward) - sel = [all_ch_names.index(name) for name in gain_info['ch_names']] + sel = [all_ch_names.index(name) for name in gain_info["ch_names"]] M = np.concatenate([e.data[sel] for e in evoked], axis=1) # Whiten data - logger.info('Whitening data matrix.') + logger.info("Whitening data matrix.") M = np.dot(whitener, M) if time_pca: - U, s, Vh = linalg.svd(M, full_matrices=False) + U, s, Vh = _safe_svd(M, full_matrices=False) if not isinstance(time_pca, bool) and isinstance(time_pca, int): U = U[:, :time_pca] s = s[:time_pca] @@ -447,24 +535,52 @@ def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto', if isinstance(sure_alpha_grid, str) and sure_alpha_grid == "auto": alpha_grid = np.geomspace(100, 10, num=15) X, active_set, best_alpha_ = _compute_mxne_sure( - M, gain, alpha_grid, sigma=1, random_state=random_state, - n_mxne_iter=n_mxne_iter, maxit=maxit, tol=tol, - n_orient=n_dip_per_pos, active_set_size=active_set_size, - debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose) - logger.info('Selected alpha: %s' % best_alpha_) + M, + gain, + alpha_grid, + sigma=1, + random_state=random_state, + n_mxne_iter=n_mxne_iter, + maxit=maxit, + tol=tol, + n_orient=n_dip_per_pos, + active_set_size=active_set_size, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + verbose=verbose, + ) + logger.info(f"Selected alpha: {best_alpha_}") else: if n_mxne_iter == 1: X, active_set, E = mixed_norm_solver( - M, gain, alpha, maxit=maxit, tol=tol, - active_set_size=active_set_size, n_orient=n_dip_per_pos, - debias=debias, solver=solver, dgap_freq=dgap_freq, - verbose=verbose) + M, + gain, + alpha, + maxit=maxit, + tol=tol, + active_set_size=active_set_size, + n_orient=n_dip_per_pos, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + verbose=verbose, + ) else: X, active_set, E = iterative_mixed_norm_solver( - M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol, - n_orient=n_dip_per_pos, active_set_size=active_set_size, - debias=debias, solver=solver, dgap_freq=dgap_freq, - verbose=verbose) + M, + gain, + alpha, + n_mxne_iter, + maxit=maxit, + tol=tol, + n_orient=n_dip_per_pos, + active_set_size=active_set_size, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + verbose=verbose, + ) if time_pca: X = np.dot(X, Vh) @@ -493,25 +609,30 @@ def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto', cnt = 0 for e in evoked: tmin = e.times[0] - tstep = 1.0 / e.info['sfreq'] - Xe = X[:, cnt:(cnt + len(e.times))] + tstep = 1.0 / e.info["sfreq"] + Xe = X[:, cnt : (cnt + len(e.times))] if return_as_dipoles: out = _make_dipoles_sparse( - Xe, active_set, forward, tmin, tstep, - M[:, cnt:(cnt + len(e.times))], - gain_active) + Xe, + active_set, + forward, + tmin, + tstep, + M[:, cnt : (cnt + len(e.times))], + gain_active, + ) else: out = _make_sparse_stc( - Xe, active_set, forward, tmin, tstep, pick_ori=pick_ori) + Xe, active_set, forward, tmin, tstep, pick_ori=pick_ori + ) outs.append(out) cnt += len(e.times) if return_residual: - residual.append(_compute_residual(forward, e, Xe, active_set, - gain_info)) + residual.append(_compute_residual(forward, e, Xe, active_set, gain_info)) - _log_exp_var(M, M_estimate, prefix='') - logger.info('[done]') + _log_exp_var(M, M_estimate, prefix="") + logger.info("[done]") if len(outs) == 1: out = outs[0] @@ -528,12 +649,12 @@ def mixed_norm(evoked, forward, noise_cov, alpha='sure', loose='auto', def _window_evoked(evoked, size): """Window evoked (size in seconds).""" - if isinstance(size, (float, int)): + if isinstance(size, float | int): lsize = rsize = float(size) else: lsize, rsize = size evoked = evoked.copy() - sfreq = float(evoked.info['sfreq']) + sfreq = float(evoked.info["sfreq"]) lsize = int(lsize * sfreq) rsize = int(rsize * sfreq) lhann = np.hanning(lsize * 2)[:lsize] @@ -544,13 +665,31 @@ def _window_evoked(evoked, size): @verbose -def tf_mixed_norm(evoked, forward, noise_cov, - loose='auto', depth=0.8, maxit=3000, - tol=1e-4, weights=None, weights_min=0., pca=True, - debias=True, wsize=64, tstep=4, window=0.02, - return_residual=False, return_as_dipoles=False, alpha=None, - l1_ratio=None, dgap_freq=10, rank=None, pick_ori=None, - n_tfmxne_iter=1, verbose=None): +def tf_mixed_norm( + evoked, + forward, + noise_cov, + loose="auto", + depth=0.8, + maxit=3000, + tol=1e-4, + weights=None, + weights_min=0.0, + pca=True, + debias=True, + wsize=64, + tstep=4, + window=0.02, + return_residual=False, + return_as_dipoles=False, + alpha=None, + l1_ratio=None, + dgap_freq=10, + rank=None, + pick_ori=None, + n_tfmxne_iter=1, + verbose=None, +): """Time-Frequency Mixed-norm estimate (TF-MxNE). Compute L1/L2 + L1 mixed-norm solution on time-frequency @@ -643,34 +782,36 @@ def tf_mixed_norm(evoked, forward, noise_cov, all_ch_names = evoked.ch_names info = evoked.info - if not (0. <= alpha < 100.): - raise ValueError('alpha must be in [0, 100). ' - 'Got alpha = %s' % alpha) + if not (0.0 <= alpha < 100.0): + raise ValueError(f"alpha must be in [0, 100). Got alpha = {alpha}") - if not (0. <= l1_ratio <= 1.): - raise ValueError('l1_ratio must be in range [0, 1].' - ' Got l1_ratio = %s' % l1_ratio) - alpha_space = alpha * (1. - l1_ratio) + if not (0.0 <= l1_ratio <= 1.0): + raise ValueError(f"l1_ratio must be in range [0, 1]. Got l1_ratio = {l1_ratio}") + alpha_space = alpha * (1.0 - l1_ratio) alpha_time = alpha * l1_ratio if n_tfmxne_iter < 1: - raise ValueError('TF-MxNE has to be computed at least 1 time. ' - 'Requires n_tfmxne_iter >= 1, got %s' % n_tfmxne_iter) + raise ValueError( + "TF-MxNE has to be computed at least 1 time. " + f"Requires n_tfmxne_iter >= 1, got {n_tfmxne_iter}" + ) - if dgap_freq <= 0.: - raise ValueError('dgap_freq must be a positive integer.' - ' Got dgap_freq = %s' % dgap_freq) + if dgap_freq <= 0.0: + raise ValueError( + f"dgap_freq must be a positive integer. Got dgap_freq = {dgap_freq}" + ) tstep = np.atleast_1d(tstep) wsize = np.atleast_1d(wsize) if len(tstep) != len(wsize): - raise ValueError('The same number of window sizes and steps must be ' - 'passed. Got tstep = %s and wsize = %s' % - (tstep, wsize)) + raise ValueError( + "The same number of window sizes and steps must be " + f"passed. Got tstep = {tstep} and wsize = {wsize}" + ) forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( - forward, evoked.info, noise_cov, pca, depth, loose, rank, - weights, weights_min) + forward, evoked.info, noise_cov, pca, depth, loose, rank, weights, weights_min + ) _check_ori(pick_ori, forward) n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 @@ -682,7 +823,7 @@ def tf_mixed_norm(evoked, forward, noise_cov, M = evoked.data[sel] # Whiten data - logger.info('Whitening data matrix.') + logger.info("Whitening data matrix.") M = np.dot(whitener, M) n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) @@ -699,18 +840,38 @@ def tf_mixed_norm(evoked, forward, noise_cov, if n_tfmxne_iter == 1: X, active_set, E = tf_mixed_norm_solver( - M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep, - maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos, - dgap_freq=dgap_freq, debias=debias) + M, + gain, + alpha_space, + alpha_time, + wsize=wsize, + tstep=tstep, + maxit=maxit, + tol=tol, + verbose=verbose, + n_orient=n_dip_per_pos, + dgap_freq=dgap_freq, + debias=debias, + ) else: X, active_set, E = iterative_tf_mixed_norm_solver( - M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep, - n_tfmxne_iter=n_tfmxne_iter, maxit=maxit, tol=tol, verbose=verbose, - n_orient=n_dip_per_pos, dgap_freq=dgap_freq, debias=debias) + M, + gain, + alpha_space, + alpha_time, + wsize=wsize, + tstep=tstep, + n_tfmxne_iter=n_tfmxne_iter, + maxit=maxit, + tol=tol, + verbose=verbose, + n_orient=n_dip_per_pos, + dgap_freq=dgap_freq, + debias=debias, + ) if active_set.sum() == 0: - raise Exception("No active dipoles found. " - "alpha_space/alpha_time are too big.") + raise Exception("No active dipoles found. alpha_space/alpha_time are too big.") # Compute estimated whitened sensor data for each dipole (dip, ch, time) gain_active = gain[:, active_set] @@ -725,19 +886,23 @@ def tf_mixed_norm(evoked, forward, noise_cov, gain_active /= source_weighting[active_set] if return_residual: - residual = _compute_residual( - forward, evoked, X, active_set, gain_info) + residual = _compute_residual(forward, evoked, X, active_set, gain_info) if return_as_dipoles: out = _make_dipoles_sparse( - X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'], - M, gain_active) + X, active_set, forward, evoked.times[0], 1.0 / info["sfreq"], M, gain_active + ) else: out = _make_sparse_stc( - X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'], - pick_ori=pick_ori) + X, + active_set, + forward, + evoked.times[0], + 1.0 / info["sfreq"], + pick_ori=pick_ori, + ) - logger.info('[done]') + logger.info("[done]") if return_residual: out = out, residual @@ -746,9 +911,22 @@ def tf_mixed_norm(evoked, forward, noise_cov, @verbose -def _compute_mxne_sure(M, gain, alpha_grid, sigma, n_mxne_iter, maxit, tol, - n_orient, active_set_size, debias, solver, dgap_freq, - random_state, verbose): +def _compute_mxne_sure( + M, + gain, + alpha_grid, + sigma, + n_mxne_iter, + maxit, + tol, + n_orient, + active_set_size, + debias, + solver, + dgap_freq, + random_state, + verbose, +): """Stein Unbiased Risk Estimator (SURE). Implements the finite-difference Monte-Carlo approximation @@ -801,26 +979,46 @@ def _compute_mxne_sure(M, gain, alpha_grid, sigma, n_mxne_iter, maxit, tol, ---------- .. footbibliography:: """ + def g(w): return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient))) def gprime(w): - return 2. * np.repeat(g(w), n_orient).ravel() + return 2.0 * np.repeat(g(w), n_orient).ravel() - def _run_solver(alpha, M, n_mxne_iter, as_init=None, X_init=None, - w_init=None): + def _run_solver(alpha, M, n_mxne_iter, as_init=None, X_init=None, w_init=None): if n_mxne_iter == 1: X, active_set, _ = mixed_norm_solver( - M, gain, alpha, maxit=maxit, tol=tol, - active_set_size=active_set_size, n_orient=n_orient, - debias=debias, solver=solver, dgap_freq=dgap_freq, - active_set_init=as_init, X_init=X_init, verbose=False) + M, + gain, + alpha, + maxit=maxit, + tol=tol, + active_set_size=active_set_size, + n_orient=n_orient, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + active_set_init=as_init, + X_init=X_init, + verbose=False, + ) else: X, active_set, _ = iterative_mixed_norm_solver( - M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol, - n_orient=n_orient, active_set_size=active_set_size, - debias=debias, solver=solver, dgap_freq=dgap_freq, - weight_init=w_init, verbose=False) + M, + gain, + alpha, + n_mxne_iter, + maxit=maxit, + tol=tol, + n_orient=n_orient, + active_set_size=active_set_size, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + weight_init=w_init, + verbose=False, + ) return X, active_set def _fit_on_grid(gain, M, eps, delta): @@ -829,9 +1027,9 @@ def _fit_on_grid(gain, M, eps, delta): active_sets, active_sets_eps = [], [] M_eps = M + eps * delta # warm start - first iteration (leverages convexity) - logger.info('Warm starting...') + logger.info("Warm starting...") for j, alpha in enumerate(alpha_grid): - logger.info('alpha: %s' % alpha) + logger.info(f"alpha: {alpha}") X, a_set = _run_solver(alpha, M, 1) X_eps, a_set_eps = _run_solver(alpha, M_eps, 1) coefs_grid_1_0[j][a_set, :] = X @@ -844,20 +1042,19 @@ def _fit_on_grid(gain, M, eps, delta): else: coefs_grid_1 = coefs_grid_1_0.copy() coefs_grid_2 = coefs_grid_2_0.copy() - logger.info('Fitting SURE on grid.') + logger.info("Fitting SURE on grid.") for j, alpha in enumerate(alpha_grid): - logger.info('alpha: %s' % alpha) + logger.info(f"alpha: {alpha}") if active_sets[j].sum() > 0: w = gprime(coefs_grid_1[j]) - X, a_set = _run_solver(alpha, M, n_mxne_iter - 1, - w_init=w) + X, a_set = _run_solver(alpha, M, n_mxne_iter - 1, w_init=w) coefs_grid_1[j][a_set, :] = X active_sets[j] = a_set if active_sets_eps[j].sum() > 0: w_eps = gprime(coefs_grid_2[j]) - X_eps, a_set_eps = _run_solver(alpha, M_eps, - n_mxne_iter - 1, - w_init=w_eps) + X_eps, a_set_eps = _run_solver( + alpha, M_eps, n_mxne_iter - 1, w_init=w_eps + ) coefs_grid_2[j][a_set_eps, :] = X_eps active_sets_eps[j] = a_set_eps @@ -867,8 +1064,8 @@ def _compute_sure_val(coef1, coef2, gain, M, sigma, delta, eps): n_sensors, n_times = gain.shape[0], M.shape[1] dof = (gain @ (coef2 - coef1) * delta).sum() / eps df_term = np.linalg.norm(M - gain @ coef1) ** 2 - sure = df_term - n_sensors * n_times * sigma ** 2 - sure += 2 * dof * sigma ** 2 + sure = df_term - n_sensors * n_times * sigma**2 + sure += 2 * dof * sigma**2 return sure sure_path = np.empty(len(alpha_grid)) @@ -882,10 +1079,9 @@ def _compute_sure_val(coef1, coef2, gain, M, sigma, delta, eps): logger.info("Computing SURE values on grid.") for i, (coef1, coef2) in enumerate(zip(coefs_grid_1, coefs_grid_2)): - sure_path[i] = _compute_sure_val( - coef1, coef2, gain, M, sigma, delta, eps) + sure_path[i] = _compute_sure_val(coef1, coef2, gain, M, sigma, delta, eps) if verbose: - logger.info("alpha %s :: sure %s" % (alpha_grid[i], sure_path[i])) + logger.info(f"alpha {alpha_grid[i]} :: sure {sure_path[i]}") best_alpha_ = alpha_grid[np.argmin(sure_path)] X = coefs_grid_1[np.argmin(sure_path)] diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py index 587f1744400..528cc140654 100644 --- a/mne/inverse_sparse/mxne_optim.py +++ b/mne/inverse_sparse/mxne_optim.py @@ -1,22 +1,28 @@ -# Author: Alexandre Gramfort -# Daniel Strohmeier -# Mathurin Massias -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import functools from math import sqrt import numpy as np +from ..time_frequency._stft import istft, stft, stft_norm1, stft_norm2 +from ..utils import ( + _check_option, + _get_blas_funcs, + _validate_type, + logger, + sum_squared, + verbose, + warn, +) from .mxne_debiasing import compute_bias -from ..utils import (logger, verbose, sum_squared, warn, _get_blas_funcs, - _validate_type, _check_option) -from ..time_frequency._stft import stft_norm1, stft_norm2, stft, istft @functools.lru_cache(None) def _get_dgemm(): - return _get_blas_funcs(np.float64, 'gemm') + return _get_blas_funcs(np.float64, "gemm") def groups_norm2(A, n_orient): @@ -121,23 +127,35 @@ def dgap_l21(M, G, X, active_set, alpha, n_orient): dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False) scaling = alpha / dual_norm scaling = min(scaling, 1.0) - d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX) + d_obj = (scaling - 0.5 * (scaling**2)) * nR2 + scaling * np.sum(R * GX) gap = p_obj - d_obj return gap, p_obj, d_obj, R -@verbose -def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000, - tol=1e-8, verbose=None, init=None, n_orient=1, - dgap_freq=10): +def _mixed_norm_solver_cd( + M, + G, + alpha, + lipschitz_constant, + maxit=10000, + tol=1e-8, + init=None, + n_orient=1, + dgap_freq=10, +): """Solve L21 inverse problem with coordinate descent.""" from sklearn.linear_model import MultiTaskLasso assert M.ndim == G.ndim and M.shape[0] == G.shape[0] - clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol / sum_squared(M), - fit_intercept=False, max_iter=maxit, warm_start=True) + clf = MultiTaskLasso( + alpha=alpha / len(M), + tol=tol / sum_squared(M), + fit_intercept=False, + max_iter=maxit, + warm_start=True, + ) if init is not None: clf.coef_ = init.T else: @@ -151,10 +169,19 @@ def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000, return X, active_set, p_obj -@verbose -def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, - tol=1e-8, verbose=None, init=None, n_orient=1, - dgap_freq=10, use_accel=True, K=5): +def _mixed_norm_solver_bcd( + M, + G, + alpha, + lipschitz_constant, + maxit=200, + tol=1e-8, + init=None, + n_orient=1, + dgap_freq=10, + use_accel=True, + K=5, +): """Solve L21 inverse problem with block coordinate descent.""" _, n_times = M.shape _, n_sources = G.shape @@ -168,7 +195,7 @@ def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, R = M - np.dot(G, X) E = [] # track primal objective function - highest_d_obj = - np.inf + highest_d_obj = -np.inf active_set = np.zeros(n_sources, dtype=bool) # start with full AS alpha_lc = alpha / lipschitz_constant @@ -182,7 +209,7 @@ def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, # Ensure these are correct for dgemm assert R.dtype == np.float64 assert G.dtype == np.float64 - one_ovr_lc = 1. / lipschitz_constant + one_ovr_lc = 1.0 / lipschitz_constant # assert that all the multiplied matrices are fortran contiguous assert X.T.flags.f_contiguous @@ -198,17 +225,22 @@ def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c) if (i + 1) % dgap_freq == 0: - _, p_obj, d_obj, _ = dgap_l21(M, G, X[active_set], active_set, - alpha, n_orient) + _, p_obj, d_obj, _ = dgap_l21( + M, G, X[active_set], active_set, alpha, n_orient + ) highest_d_obj = max(d_obj, highest_d_obj) gap = p_obj - highest_d_obj E.append(p_obj) - logger.debug("Iteration %d :: p_obj %f :: dgap %f :: n_active %d" % - (i + 1, p_obj, gap, np.sum(active_set) / n_orient)) + logger.debug( + "Iteration %d :: p_obj %f :: dgap %f :: n_active %d", + i + 1, + p_obj, + gap, + np.sum(active_set) / n_orient, + ) if gap < tol: - logger.debug('Convergence reached ! (gap: %s < %s)' - % (gap, tol)) + logger.debug(f"Convergence reached ! (gap: {gap} < {tol})") break # using Anderson acceleration of the primal variable for faster @@ -226,23 +258,21 @@ def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, # z = np.linalg.solve(C, np.ones(K)) u, s, _ = np.linalg.svd(C, hermitian=True) if s[-1] <= 1e-6 * s[0] or not np.isfinite(s).all(): - logger.debug("Iteration %d: LinAlg Error" % (i + 1)) + logger.debug("Iteration %d: LinAlg Error", i + 1) continue z = ((u * 1 / s) @ u.T).sum(0) c = z / z.sum() - X_acc = np.sum( - last_K_X[:-1] * c[:, None, None], axis=0 - ) + X_acc = np.sum(last_K_X[:-1] * c[:, None, None], axis=0) _grp_norm2_acc = groups_norm2(X_acc, n_orient) active_set_acc = _grp_norm2_acc != 0 if n_orient > 1: active_set_acc = np.kron( active_set_acc, np.ones(n_orient, dtype=bool) ) - p_obj = _primal_l21(M, G, X[active_set], active_set, alpha, - n_orient)[0] - p_obj_acc = _primal_l21(M, G, X_acc[active_set_acc], - active_set_acc, alpha, n_orient)[0] + p_obj = _primal_l21(M, G, X[active_set], active_set, alpha, n_orient)[0] + p_obj_acc = _primal_l21( + M, G, X_acc[active_set_acc], active_set_acc, alpha, n_orient + )[0] if p_obj_acc < p_obj: X = X_acc active_set = active_set_acc @@ -278,43 +308,54 @@ def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c): alpha_lc: array, shape (n_positions, ) alpha * (Lipschitz constants). """ - X_j_new = np.zeros_like(X[:n_orient, :], order='C') + X_j_new = np.zeros_like(X[:n_orient, :], order="C") dgemm = _get_dgemm() for j, G_j_c in enumerate(list_G_j_c): idx = slice(j * n_orient, (j + 1) * n_orient) G_j = G[:, idx] X_j = X[idx] - dgemm(alpha=one_ovr_lc[j], beta=0., a=R.T, b=G_j, c=X_j_new.T, - overwrite_c=True) + dgemm( + alpha=one_ovr_lc[j], beta=0.0, a=R.T, b=G_j, c=X_j_new.T, overwrite_c=True + ) # X_j_new = G_j.T @ R # Mathurin's trick to avoid checking all the entries was_non_zero = X_j[0, 0] != 0 # was_non_zero = np.any(X_j) if was_non_zero: - dgemm(alpha=1., beta=1., a=X_j.T, b=G_j_c.T, c=R.T, - overwrite_c=True) + dgemm(alpha=1.0, beta=1.0, a=X_j.T, b=G_j_c.T, c=R.T, overwrite_c=True) # R += np.dot(G_j, X_j) X_j_new += X_j block_norm = sqrt(sum_squared(X_j_new)) if block_norm <= alpha_lc[j]: - X_j.fill(0.) + X_j.fill(0.0) active_set[idx] = False else: shrink = max(1.0 - alpha_lc[j] / block_norm, 0.0) X_j_new *= shrink - dgemm(alpha=-1., beta=1., a=X_j_new.T, b=G_j_c.T, c=R.T, - overwrite_c=True) + dgemm(alpha=-1.0, beta=1.0, a=X_j_new.T, b=G_j_c.T, c=R.T, overwrite_c=True) # R -= np.dot(G_j, X_j_new) X_j[:] = X_j_new active_set[idx] = True @verbose -def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, - active_set_size=50, debias=True, n_orient=1, - solver='auto', return_gap=False, dgap_freq=10, - active_set_init=None, X_init=None): +def mixed_norm_solver( + M, + G, + alpha, + maxit=3000, + tol=1e-8, + verbose=None, + active_set_size=50, + debias=True, + n_orient=1, + solver="auto", + return_gap=False, + dgap_freq=10, + active_set_init=None, + X_init=None, +): """Solve L1/L2 mixed-norm inverse problem with active set strategy. See references :footcite:`GramfortEtAl2012,StrohmeierEtAl2016, @@ -375,7 +416,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, n_positions = n_dipoles // n_orient _, n_times = M.shape alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False) - logger.info("-- ALPHA MAX : %s" % alpha_max) + logger.info(f"-- ALPHA MAX : {alpha_max}") alpha = float(alpha) X = np.zeros((n_dipoles, n_times), dtype=G.dtype) @@ -385,31 +426,35 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, except ImportError: has_sklearn = False - _validate_type(solver, str, 'solver') - _check_option('solver', solver, ('cd', 'bcd', 'auto')) - if solver == 'auto': + _validate_type(solver, str, "solver") + _check_option("solver", solver, ("cd", "bcd", "auto")) + if solver == "auto": if has_sklearn and (n_orient == 1): - solver = 'cd' + solver = "cd" else: - solver = 'bcd' + solver = "bcd" - if solver == 'cd': + if solver == "cd": if n_orient == 1 and not has_sklearn: - warn('Scikit-learn >= 0.12 cannot be found. Using block coordinate' - ' descent instead of coordinate descent.') - solver = 'bcd' + warn( + "Scikit-learn >= 0.12 cannot be found. Using block coordinate" + " descent instead of coordinate descent." + ) + solver = "bcd" if n_orient > 1: - warn('Coordinate descent is only available for fixed orientation. ' - 'Using block coordinate descent instead of coordinate ' - 'descent') - solver = 'bcd' - - if solver == 'cd': + warn( + "Coordinate descent is only available for fixed orientation. " + "Using block coordinate descent instead of coordinate " + "descent" + ) + solver = "bcd" + + if solver == "cd": logger.info("Using coordinate descent") l21_solver = _mixed_norm_solver_cd lc = None else: - assert solver == 'bcd' + assert solver == "bcd" logger.info("Using block coordinate descent") l21_solver = _mixed_norm_solver_bcd G = np.asfortranarray(G) @@ -418,59 +463,75 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, else: lc = np.empty(n_positions) for j in range(n_positions): - G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] + G_tmp = G[:, (j * n_orient) : ((j + 1) * n_orient)] lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) if active_set_size is not None: E = list() - highest_d_obj = - np.inf + highest_d_obj = -np.inf if X_init is not None and X_init.shape != (n_dipoles, n_times): - raise ValueError('Wrong dim for initialized coefficients.') - active_set = (active_set_init if active_set_init is not None else - np.zeros(n_dipoles, dtype=bool)) + raise ValueError("Wrong dim for initialized coefficients.") + active_set = ( + active_set_init + if active_set_init is not None + else np.zeros(n_dipoles, dtype=bool) + ) idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient)) new_active_idx = idx_large_corr[-active_set_size:] if n_orient > 1: - new_active_idx = (n_orient * new_active_idx[:, None] + - np.arange(n_orient)[None, :]).ravel() + new_active_idx = ( + n_orient * new_active_idx[:, None] + np.arange(n_orient)[None, :] + ).ravel() active_set[new_active_idx] = True as_size = np.sum(active_set) gap = np.inf for k in range(maxit): - if solver == 'bcd': + if solver == "bcd": lc_tmp = lc[active_set[::n_orient]] - elif solver == 'cd': + elif solver == "cd": lc_tmp = None else: lc_tmp = 1.01 * np.linalg.norm(G[:, active_set], ord=2) ** 2 - X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp, - maxit=maxit, tol=tol, init=X_init, - n_orient=n_orient, dgap_freq=dgap_freq) + X, as_, _ = l21_solver( + M, + G[:, active_set], + alpha, + lc_tmp, + maxit=maxit, + tol=tol, + init=X_init, + n_orient=n_orient, + dgap_freq=dgap_freq, + ) active_set[active_set] = as_.copy() idx_old_active_set = np.where(active_set)[0] - _, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha, - n_orient) + _, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha, n_orient) highest_d_obj = max(d_obj, highest_d_obj) gap = p_obj - highest_d_obj E.append(p_obj) - logger.info("Iteration %d :: p_obj %f :: dgap %f :: " - "n_active_start %d :: n_active_end %d" % ( - k + 1, p_obj, gap, as_size // n_orient, - np.sum(active_set) // n_orient)) + logger.info( + "Iteration %d :: p_obj %f :: dgap %f :: n_active_start %d :: n_active_" + "end %d", + k + 1, + p_obj, + gap, + as_size // n_orient, + np.sum(active_set) // n_orient, + ) if gap < tol: - logger.info('Convergence reached ! (gap: %s < %s)' - % (gap, tol)) + logger.info(f"Convergence reached ! (gap: {gap} < {tol})") break # add sources if not last iteration if k < (maxit - 1): - idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R), - n_orient)) + idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R), n_orient)) new_active_idx = idx_large_corr[-active_set_size:] if n_orient > 1: - new_active_idx = (n_orient * new_active_idx[:, None] + - np.arange(n_orient)[None, :]) + new_active_idx = ( + n_orient * new_active_idx[:, None] + + np.arange(n_orient)[None, :] + ) new_active_idx = new_active_idx.ravel() active_set[new_active_idx] = True idx_active_set = np.where(active_set)[0] @@ -479,10 +540,11 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, idx = np.searchsorted(idx_active_set, idx_old_active_set) X_init[idx] = X else: - warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol)) + warn(f"Did NOT converge ! (gap: {gap} > {tol})") else: - X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit, - tol=tol, n_orient=n_orient, init=None) + X, active_set, E = l21_solver( + M, G, alpha, lc, maxit=maxit, tol=tol, n_orient=n_orient, init=None + ) if return_gap: gap = dgap_l21(M, G, X, active_set, alpha, n_orient)[0] @@ -490,7 +552,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) X *= bias[:, np.newaxis] - logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient)) + logger.info("Final active set size: %s" % (np.sum(active_set) // n_orient)) if return_gap: return X, active_set, E, gap @@ -499,10 +561,21 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, @verbose -def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000, - tol=1e-8, verbose=None, active_set_size=50, - debias=True, n_orient=1, dgap_freq=10, - solver='auto', weight_init=None): +def iterative_mixed_norm_solver( + M, + G, + alpha, + n_mxne_iter, + maxit=3000, + tol=1e-8, + verbose=None, + active_set_size=50, + debias=True, + n_orient=1, + dgap_freq=10, + solver="auto", + weight_init=None, +): """Solve L0.5/L2 mixed-norm inverse problem with active set strategy. See reference :footcite:`StrohmeierEtAl2016`. @@ -551,20 +624,23 @@ def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000, ---------- .. footbibliography:: """ + def g(w): return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient))) def gprime(w): - return 2. * np.repeat(g(w), n_orient).ravel() + return 2.0 * np.repeat(g(w), n_orient).ravel() E = list() if weight_init is not None and weight_init.shape != (G.shape[1],): - raise ValueError('Wrong dimension for weight initialization. Got %s. ' - 'Expected %s.' % (weight_init.shape, (G.shape[1],))) + raise ValueError( + f"Wrong dimension for weight initialization. Got {weight_init.shape}. " + f"Expected {(G.shape[1],)}." + ) weights = weight_init if weight_init is not None else np.ones(G.shape[1]) - active_set = (weights != 0) + active_set = weights != 0 weights = weights[active_set] X = np.zeros((G.shape[1], M.shape[1])) @@ -576,39 +652,67 @@ def gprime(w): if active_set_size is not None: if np.sum(active_set) > (active_set_size * n_orient): X, _active_set, _ = mixed_norm_solver( - M, G_tmp, alpha, debias=False, n_orient=n_orient, - maxit=maxit, tol=tol, active_set_size=active_set_size, - dgap_freq=dgap_freq, solver=solver, verbose=verbose) + M, + G_tmp, + alpha, + debias=False, + n_orient=n_orient, + maxit=maxit, + tol=tol, + active_set_size=active_set_size, + dgap_freq=dgap_freq, + solver=solver, + ) else: X, _active_set, _ = mixed_norm_solver( - M, G_tmp, alpha, debias=False, n_orient=n_orient, - maxit=maxit, tol=tol, active_set_size=None, - dgap_freq=dgap_freq, solver=solver, verbose=verbose) + M, + G_tmp, + alpha, + debias=False, + n_orient=n_orient, + maxit=maxit, + tol=tol, + active_set_size=None, + dgap_freq=dgap_freq, + solver=solver, + ) else: X, _active_set, _ = mixed_norm_solver( - M, G_tmp, alpha, debias=False, n_orient=n_orient, - maxit=maxit, tol=tol, active_set_size=None, - dgap_freq=dgap_freq, solver=solver, verbose=verbose) - - logger.info('active set size %d' % (_active_set.sum() / n_orient)) + M, + G_tmp, + alpha, + debias=False, + n_orient=n_orient, + maxit=maxit, + tol=tol, + active_set_size=None, + dgap_freq=dgap_freq, + solver=solver, + ) + + logger.info("active set size %d", _active_set.sum() / n_orient) if _active_set.sum() > 0: active_set[active_set] = _active_set # Reapply weights to have correct unit X *= weights[_active_set][:, np.newaxis] weights = gprime(X) - p_obj = 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), - 'fro') ** 2. + alpha * np.sum(g(X)) + p_obj = 0.5 * np.linalg.norm( + M - np.dot(G[:, active_set], X), "fro" + ) ** 2.0 + alpha * np.sum(g(X)) E.append(p_obj) # Check convergence - if ((k >= 1) and np.all(active_set == active_set_0) and - np.all(np.abs(X - X0) < tol)): - print('Convergence reached after %d reweightings!' % k) + if ( + (k >= 1) + and np.all(active_set == active_set_0) + and np.all(np.abs(X - X0) < tol) + ): + logger.info("Convergence reached after %d reweightings!", k) break else: active_set = np.zeros_like(active_set) - p_obj = 0.5 * np.linalg.norm(M) ** 2. + p_obj = 0.5 * np.linalg.norm(M) ** 2.0 E.append(p_obj) break @@ -622,50 +726,11 @@ def gprime(w): ############################################################################### # TF-MxNE -@verbose -def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None): - """Compute lipschitz constant for FISTA. - - It uses a power iteration method. - """ - n_times = M.shape[1] - n_points = G.shape[1] - iv = np.ones((n_points, n_times), dtype=np.float64) - v = phi(iv) - L = 1e100 - for it in range(100): - L_old = L - logger.info('Lipschitz estimation: iteration = %d' % it) - iv = np.real(phiT(v)) - Gv = np.dot(G, iv) - GtGv = np.dot(G.T, Gv) - w = phi(GtGv) - L = np.max(np.abs(w)) # l_inf norm - v = w / L - if abs((L - L_old) / L_old) < tol: - break - return L - -def safe_max_abs(A, ia): - """Compute np.max(np.abs(A[ia])) possible with empty A.""" - if np.sum(ia): # ia is not empty - return np.max(np.abs(A[ia])) - else: - return 0. - - -def safe_max_abs_diff(A, ia, B, ib): - """Compute np.max(np.abs(A)) possible with empty A.""" - A = A[ia] if np.sum(ia) else 0.0 - B = B[ib] if np.sum(ia) else 0.0 - return np.max(np.abs(A - B)) - - -class _Phi(object): +class _Phi: """Have phi stft as callable w/o using a lambda that does not pickle.""" - def __init__(self, wsize, tstep, n_coefs, n_times): # noqa: D102 + def __init__(self, wsize, tstep, n_coefs, n_times): self.wsize = np.atleast_1d(wsize) self.tstep = np.atleast_1d(tstep) self.n_coefs = np.atleast_1d(n_coefs) @@ -677,38 +742,34 @@ def __init__(self, wsize, tstep, n_coefs, n_times): # noqa: D102 self.ops = list() for ws, ts in zip(self.wsize, self.tstep): self.ops.append( - stft(np.eye(n_times), ws, ts, - verbose=False).reshape(n_times, -1)) + stft(np.eye(n_times), ws, ts, verbose=False).reshape(n_times, -1) + ) def __call__(self, x): # noqa: D105 if self.n_dicts == 1: return x @ self.ops[0] else: - return np.hstack( - [x @ op for op in self.ops]) / np.sqrt(self.n_dicts) + return np.hstack([x @ op for op in self.ops]) / np.sqrt(self.n_dicts) - def norm(self, z, ord=2): + def norm(self, z, ord=2): # noqa: A002 """Squared L2 norm if ord == 2 and L1 norm if order == 1.""" if ord not in (1, 2): - raise ValueError('Only supported norm order are 1 and 2. ' - 'Got ord = %s' % ord) + raise ValueError(f"Only supported norm order are 1 and 2. Got ord = {ord}") stft_norm = stft_norm1 if ord == 1 else stft_norm2 - norm = 0. + norm = 0.0 if len(self.n_coefs) > 1: - z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1], - axis=1) + z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1], axis=1) else: z_ = [np.atleast_2d(z)] for i in range(len(z_)): - norm += stft_norm( - z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i])) + norm += stft_norm(z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i])) return norm -class _PhiT(object): +class _PhiT: """Have phi.T istft as callable w/o using a lambda that does not pickle.""" - def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102 + def __init__(self, tstep, n_freqs, n_steps, n_times): self.tstep = tstep self.n_freqs = n_freqs self.n_steps = n_steps @@ -721,10 +782,8 @@ def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102 nc = nf * ns self.n_coefs.append(nc) eye = np.eye(nc).reshape(nf, ns, nf, ns) - self.op_re.append(istft( - eye, ts, n_times).reshape(nc, n_times)) - self.op_im.append(istft( - eye * 1j, ts, n_times).reshape(nc, n_times)) + self.op_re.append(istft(eye, ts, n_times).reshape(nc, n_times)) + self.op_im.append(istft(eye * 1j, ts, n_times).reshape(nc, n_times)) def __call__(self, z): # noqa: D105 if self.n_dicts == 1: @@ -740,13 +799,12 @@ def __call__(self, z): # noqa: D105 def norm_l21_tf(Z, phi, n_orient, w_space=None): """L21 norm for TF.""" if Z.shape[0]: - l21_norm = np.sqrt( - phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1)) + l21_norm = np.sqrt(phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1)) if w_space is not None: l21_norm *= w_space l21_norm = l21_norm.sum() else: - l21_norm = 0. + l21_norm = 0.0 return l21_norm @@ -754,18 +812,19 @@ def norm_l1_tf(Z, phi, n_orient, w_time): """L1 norm for TF.""" if Z.shape[0]: n_positions = Z.shape[0] // n_orient - Z_ = np.sqrt(np.sum( - (np.abs(Z) ** 2.).reshape((n_orient, -1), order='F'), axis=0)) - Z_ = Z_.reshape((n_positions, -1), order='F') + Z_ = np.sqrt( + np.sum((np.abs(Z) ** 2.0).reshape((n_orient, -1), order="F"), axis=0) + ) + Z_ = Z_.reshape((n_positions, -1), order="F") if w_time is not None: Z_ *= w_time l1_norm = phi.norm(Z_, ord=1).sum() else: - l1_norm = 0. + l1_norm = 0.0 return l1_norm -def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None): +def norm_epsilon(Y, l1_ratio, phi, w_space=1.0, w_time=None): """Weighted epsilon norm. The weighted epsilon norm is the dual norm of:: @@ -810,35 +869,35 @@ def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None): # Add negative freqs: count all freqs twice except first and last: freqs_count = np.full(len(Y), 2) - for i, fc in enumerate(np.array_split(freqs_count, - np.cumsum(phi.n_coefs)[:-1])): - fc[:phi.n_steps[i]] = 1 - fc[-phi.n_steps[i]:] = 1 + for i, fc in enumerate(np.array_split(freqs_count, np.cumsum(phi.n_coefs)[:-1])): + fc[: phi.n_steps[i]] = 1 + fc[-phi.n_steps[i] :] = 1 # exclude 0 weights: if w_time is not None: - nonzero_weights = (w_time != 0.0) + nonzero_weights = w_time != 0.0 Y = Y[nonzero_weights] freqs_count = freqs_count[nonzero_weights] w_time = w_time[nonzero_weights] norm_inf_Y = np.max(Y / w_time) if w_time is not None else np.max(Y) - if l1_ratio == 1.: + if l1_ratio == 1.0: # dual norm of L1 weighted is Linf with inverse weights return norm_inf_Y - elif l1_ratio == 0.: + elif l1_ratio == 0.0: # dual norm of L2 is L2 return np.sqrt(phi.norm(Y[None, :], ord=2).sum()) - if norm_inf_Y == 0.: - return 0. + if norm_inf_Y == 0.0: + return 0.0 # ignore some values of Y by lower bound on dual norm: if w_time is None: idx = Y > l1_ratio * norm_inf_Y else: - idx = Y > l1_ratio * np.max(Y / (w_space * (1. - l1_ratio) + - l1_ratio * w_time)) + idx = Y > l1_ratio * np.max( + Y / (w_space * (1.0 - l1_ratio) + l1_ratio * w_time) + ) if idx.sum() == 1: return norm_inf_Y @@ -859,18 +918,18 @@ def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None): K = Y.shape[0] if w_time is None: - p_sum_Y2 = np.cumsum(Y ** 2) + p_sum_Y2 = np.cumsum(Y**2) p_sum_w2 = np.arange(1, K + 1) p_sum_Yw = np.cumsum(Y) - upper = p_sum_Y2 / Y ** 2 - 2. * p_sum_Yw / Y + p_sum_w2 + upper = p_sum_Y2 / Y**2 - 2.0 * p_sum_Yw / Y + p_sum_w2 else: - p_sum_Y2 = np.cumsum(Y ** 2) - p_sum_w2 = np.cumsum(w_time ** 2) + p_sum_Y2 = np.cumsum(Y**2) + p_sum_w2 = np.cumsum(w_time**2) p_sum_Yw = np.cumsum(Y * w_time) - upper = (p_sum_Y2 / (Y / w_time) ** 2 - - 2. * p_sum_Yw / (Y / w_time) + p_sum_w2) - upper_greater = np.where(upper > w_space ** 2 * (1. - l1_ratio) ** 2 / - l1_ratio ** 2)[0] + upper = p_sum_Y2 / (Y / w_time) ** 2 - 2.0 * p_sum_Yw / (Y / w_time) + p_sum_w2 + upper_greater = np.where(upper > w_space**2 * (1.0 - l1_ratio) ** 2 / l1_ratio**2)[ + 0 + ] i0 = upper_greater[0] - 1 if upper_greater.size else K - 1 @@ -878,9 +937,9 @@ def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None): p_sum_w2 = p_sum_w2[i0] p_sum_Yw = p_sum_Yw[i0] - denom = l1_ratio ** 2 * p_sum_w2 - w_space ** 2 * (1. - l1_ratio) ** 2 + denom = l1_ratio**2 * p_sum_w2 - w_space**2 * (1.0 - l1_ratio) ** 2 if np.abs(denom) < 1e-10: - return p_sum_Y2 / (2. * l1_ratio * p_sum_Yw) + return p_sum_Y2 / (2.0 * l1_ratio * p_sum_Yw) else: delta = (l1_ratio * p_sum_Yw) ** 2 - p_sum_Y2 * denom return (l1_ratio * p_sum_Yw - np.sqrt(delta)) / denom @@ -918,24 +977,35 @@ def norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, w_space=None, w_time=None): n_positions = G.shape[1] // n_orient GTRPhi = np.abs(phi(np.dot(G.T, R))) # norm over orientations: - GTRPhi = GTRPhi.reshape((n_orient, -1), order='F') + GTRPhi = GTRPhi.reshape((n_orient, -1), order="F") GTRPhi = np.linalg.norm(GTRPhi, axis=0) - GTRPhi = GTRPhi.reshape((n_positions, -1), order='F') - nu = 0. + GTRPhi = GTRPhi.reshape((n_positions, -1), order="F") + nu = 0.0 for idx in range(n_positions): GTRPhi_ = GTRPhi[idx] w_t = w_time[idx] if w_time is not None else None - w_s = w_space[idx] if w_space is not None else 1. - norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi, w_space=w_s, - w_time=w_t) + w_s = w_space[idx] if w_space is not None else 1.0 + norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi, w_space=w_s, w_time=w_t) if norm_eps > nu: nu = norm_eps return nu -def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT, - n_orient, highest_d_obj, w_space=None, w_time=None): +def dgap_l21l1( + M, + G, + Z, + active_set, + alpha_space, + alpha_time, + phi, + phiT, + n_orient, + highest_d_obj, + w_space=None, + w_time=None, +): """Duality gap for the time-frequency mixed norm inverse problem. See :footcite:`GramfortEtAl2012,NdiayeEtAl2016` @@ -1003,29 +1073,44 @@ def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT, p_obj = 0.5 * nR2 + alpha_space * penaltyl21 + alpha_time * penaltyl1 l1_ratio = alpha_time / (alpha_space + alpha_time) - dual_norm = norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, - w_space=w_space, w_time=w_time) - scaling = min(1., (alpha_space + alpha_time) / dual_norm) + dual_norm = norm_epsilon_inf( + G, R, phi, l1_ratio, n_orient, w_space=w_space, w_time=w_time + ) + scaling = min(1.0, (alpha_space + alpha_time) / dual_norm) - d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX) + d_obj = (scaling - 0.5 * (scaling**2)) * nR2 + scaling * np.sum(R * GX) d_obj = max(d_obj, highest_d_obj) gap = p_obj - d_obj return gap, p_obj, d_obj, R -def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, - alpha_time, lipschitz_constant, phi, phiT, - w_space=None, w_time=None, n_orient=1, - maxit=200, tol=1e-8, dgap_freq=10, perc=None, - timeit=True, verbose=None): +def _tf_mixed_norm_solver_bcd_( + M, + G, + Z, + active_set, + candidates, + alpha_space, + alpha_time, + lipschitz_constant, + phi, + phiT, + *, + w_space=None, + w_time=None, + n_orient=1, + maxit=200, + tol=1e-8, + dgap_freq=10, + perc=None, +): n_sources = G.shape[1] n_positions = n_sources // n_orient # First make G fortran for faster access to blocks of columns Gd = np.asfortranarray(G) - G = np.ascontiguousarray( - Gd.T.reshape(n_positions, n_orient, -1).transpose(0, 2, 1)) + G = np.ascontiguousarray(Gd.T.reshape(n_positions, n_orient, -1).transpose(0, 2, 1)) R = M.copy() # residual active = np.where(active_set[::n_orient])[0] @@ -1044,7 +1129,7 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, alpha_space_lc = alpha_space * w_space / lipschitz_constant converged = False - d_obj = - np.inf + d_obj = -np.inf for i in range(maxit): for jj in candidates: @@ -1066,7 +1151,7 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, R += np.dot(G_j, X_j) X_j_new += X_j - rows_norm = np.linalg.norm(X_j_new, 'fro') + rows_norm = np.linalg.norm(X_j_new, "fro") if rows_norm <= alpha_space_lc[jj]: if was_active: Z[jj] = 0.0 @@ -1084,8 +1169,11 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, active_set_j[:] = False else: # l1 - shrink = np.maximum(1.0 - alpha_time_lc[jj] / np.maximum( - col_norm, alpha_time_lc[jj]), 0.0) + shrink = np.maximum( + 1.0 + - alpha_time_lc[jj] / np.maximum(col_norm, alpha_time_lc[jj]), + 0.0, + ) if w_time is not None: shrink[w_time[jj] == 0.0] = 0.0 Z_j_new *= shrink[np.newaxis, :] @@ -1098,8 +1186,11 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, active_set_j[:] = False else: shrink = np.maximum( - 1.0 - alpha_space_lc[jj] / - np.maximum(row_norm, alpha_space_lc[jj]), 0.0) + 1.0 + - alpha_space_lc[jj] + / np.maximum(row_norm, alpha_space_lc[jj]), + 0.0, + ) Z_j_new *= shrink Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy() active_set_j[:] = True @@ -1107,17 +1198,29 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, R -= np.dot(G_j, Z_j_phi_T) if (i + 1) % dgap_freq == 0: - Zd = np.vstack([Z[pos] for pos in range(n_positions) - if np.any(Z[pos])]) + Zd = np.vstack([Z[pos] for pos in range(n_positions) if np.any(Z[pos])]) gap, p_obj, d_obj, _ = dgap_l21l1( - M, Gd, Zd, active_set, alpha_space, alpha_time, phi, phiT, - n_orient, d_obj, w_space=w_space, w_time=w_time) - converged = (gap < tol) + M, + Gd, + Zd, + active_set, + alpha_space, + alpha_time, + phi, + phiT, + n_orient, + d_obj, + w_space=w_space, + w_time=w_time, + ) + converged = gap < tol E.append(p_obj) - logger.info("\n Iteration %d :: n_active %d" % ( - i + 1, np.sum(active_set) / n_orient)) - logger.info(" dgap %.2e :: p_obj %f :: d_obj %f" % ( - gap, p_obj, d_obj)) + logger.info( + "\n Iteration %d :: n_active %d", + i + 1, + np.sum(active_set) / n_orient, + ) + logger.info(f" dgap {gap:.2e} :: p_obj {p_obj} :: d_obj {d_obj}") if converged: break @@ -1129,14 +1232,23 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, return Z, active_set, E, converged -@verbose -def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, - lipschitz_constant, phi, phiT, - Z_init=None, w_space=None, - w_time=None, n_orient=1, maxit=200, - tol=1e-8, dgap_freq=10, - verbose=None): - +def _tf_mixed_norm_solver_bcd_active_set( + M, + G, + alpha_space, + alpha_time, + lipschitz_constant, + phi, + phiT, + *, + Z_init=None, + w_space=None, + w_time=None, + n_orient=1, + maxit=200, + tol=1e-8, + dgap_freq=10, +): n_sensors, n_times = M.shape n_sources = G.shape[1] n_positions = n_sources // n_orient @@ -1146,15 +1258,15 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, active = [] if Z_init is not None: if Z_init.shape != (n_sources, phi.n_coefs.sum()): - raise Exception('Z_init must be None or an array with shape ' - '(n_sources, n_coefs).') + raise Exception( + "Z_init must be None or an array with shape (n_sources, n_coefs)." + ) for ii in range(n_positions): - if np.any(Z_init[ii * n_orient:(ii + 1) * n_orient]): - active_set[ii * n_orient:(ii + 1) * n_orient] = True + if np.any(Z_init[ii * n_orient : (ii + 1) * n_orient]): + active_set[ii * n_orient : (ii + 1) * n_orient] = True active.append(ii) if len(active): - Z.update(dict(zip(active, - np.vsplit(Z_init[active_set], len(active))))) + Z.update(dict(zip(active, np.vsplit(Z_init[active_set], len(active))))) E = [] candidates = range(n_positions) @@ -1165,9 +1277,23 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, Z_init = dict.fromkeys(np.arange(n_positions), 0.0) Z_init.update(dict(zip(active, Z.values()))) Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_( - M, G, Z_init, active_set, candidates, alpha_space, alpha_time, - lipschitz_constant, phi, phiT, w_space=w_space, w_time=w_time, - n_orient=n_orient, maxit=1, tol=tol, perc=None, verbose=verbose) + M, + G, + Z_init, + active_set, + candidates, + alpha_space, + alpha_time, + lipschitz_constant, + phi, + phiT, + w_space=w_space, + w_time=w_time, + n_orient=n_orient, + maxit=1, + tol=tol, + perc=None, + ) E += E_tmp @@ -1185,14 +1311,24 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, w_time_as = None Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_( - M, G[:, active_set], Z_init, + M, + G[:, active_set], + Z_init, np.ones(len(active) * n_orient, dtype=bool), - candidates_, alpha_space, alpha_time, - lipschitz_constant[active_set[::n_orient]], phi, phiT, - w_space=w_space_as, w_time=w_time_as, - n_orient=n_orient, maxit=maxit, tol=tol, - dgap_freq=dgap_freq, perc=0.5, - verbose=verbose) + candidates_, + alpha_space, + alpha_time, + lipschitz_constant[active_set[::n_orient]], + phi, + phiT, + w_space=w_space_as, + w_time=w_time_as, + n_orient=n_orient, + maxit=maxit, + tol=tol, + dgap_freq=dgap_freq, + perc=0.5, + ) active = np.where(active_set[::n_orient])[0] active_set[active_set] = as_.copy() E += E_tmp @@ -1201,10 +1337,26 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, if converged: Zd = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])]) gap, p_obj, d_obj, _ = dgap_l21l1( - M, G, Zd, active_set, alpha_space, alpha_time, - phi, phiT, n_orient, d_obj, w_space, w_time) - logger.info("\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d" - % (gap, p_obj, d_obj, np.sum(active_set) / n_orient)) + M, + G, + Zd, + active_set, + alpha_space, + alpha_time, + phi, + phiT, + n_orient, + d_obj, + w_space, + w_time, + ) + logger.info( + "\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d", + gap, + p_obj, + d_obj, + np.sum(active_set) / n_orient, + ) if gap < tol: logger.info("\nConvergence reached!\n") break @@ -1220,10 +1372,22 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, @verbose -def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, - n_orient=1, maxit=200, tol=1e-8, - active_set_size=None, debias=True, return_gap=False, - dgap_freq=10, verbose=None): +def tf_mixed_norm_solver( + M, + G, + alpha_space, + alpha_time, + wsize=64, + tstep=4, + n_orient=1, + maxit=200, + tol=1e-8, + active_set_size=None, + debias=True, + return_gap=False, + dgap_freq=10, + verbose=None, +): """Solve TF L21+L1 inverse solver with BCD and active set approach. See :footcite:`GramfortEtAl2013b,GramfortEtAl2011,BekhtiEtAl2016`. @@ -1288,9 +1452,10 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, tstep = np.atleast_1d(tstep) wsize = np.atleast_1d(wsize) if len(tstep) != len(wsize): - raise ValueError('The same number of window sizes and steps must be ' - 'passed. Got tstep = %s and wsize = %s' % - (tstep, wsize)) + raise ValueError( + "The same number of window sizes and steps must be " + f"passed. Got tstep = {tstep} and wsize = {wsize}" + ) n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) n_freqs = wsize // 2 + 1 @@ -1303,14 +1468,24 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, else: lc = np.empty(n_positions) for j in range(n_positions): - G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] + G_tmp = G[:, (j * n_orient) : ((j + 1) * n_orient)] lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) logger.info("Using block coordinate descent with active set approach") X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set( - M, G, alpha_space, alpha_time, lc, phi, phiT, - Z_init=None, n_orient=n_orient, maxit=maxit, tol=tol, - dgap_freq=dgap_freq, verbose=None) + M, + G, + alpha_space, + alpha_time, + lc, + phi, + phiT, + Z_init=None, + n_orient=n_orient, + maxit=maxit, + tol=tol, + dgap_freq=dgap_freq, + ) if np.any(active_set) and debias: bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) @@ -1322,11 +1497,21 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, return X, active_set, E -@verbose -def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time, - n_tfmxne_iter, wsize=64, tstep=4, - maxit=3000, tol=1e-8, debias=True, - n_orient=1, dgap_freq=10, verbose=None): +def iterative_tf_mixed_norm_solver( + M, + G, + alpha_space, + alpha_time, + n_tfmxne_iter, + wsize=64, + tstep=4, + maxit=3000, + tol=1e-8, + debias=True, + n_orient=1, + dgap_freq=10, + verbose=None, +): """Solve TF L0.5/L1 + L0.5 inverse problem with BCD + active set approach. Parameters @@ -1385,9 +1570,10 @@ def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time, tstep = np.atleast_1d(tstep) wsize = np.atleast_1d(wsize) if len(tstep) != len(wsize): - raise ValueError('The same number of window sizes and steps must be ' - 'passed. Got tstep = %s and wsize = %s' % - (tstep, wsize)) + raise ValueError( + "The same number of window sizes and steps must be " + f"passed. Got tstep = {tstep} and wsize = {wsize}" + ) n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) n_freqs = wsize // 2 + 1 @@ -1400,24 +1586,25 @@ def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time, else: lc = np.empty(n_positions) for j in range(n_positions): - G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] + G_tmp = G[:, (j * n_orient) : ((j + 1) * n_orient)] lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) # space and time penalties, and inverse of their derivatives: def g_space(Z): - return np.sqrt(np.sqrt(phi.norm(Z, ord=2).reshape( - -1, n_orient).sum(axis=1))) + return np.sqrt(np.sqrt(phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1))) def g_space_prime_inv(Z): - return 2. * g_space(Z) + return 2.0 * g_space(Z) def g_time(Z): - return np.sqrt(np.sqrt(np.sum((np.abs(Z) ** 2.).reshape( - (n_orient, -1), order='F'), axis=0)).reshape( - (-1, Z.shape[1]), order='F')) + return np.sqrt( + np.sqrt( + np.sum((np.abs(Z) ** 2.0).reshape((n_orient, -1), order="F"), axis=0) + ).reshape((-1, Z.shape[1]), order="F") + ) def g_time_prime_inv(Z): - return 2. * g_time(Z) + return 2.0 * g_time(Z) E = list() @@ -1432,17 +1619,28 @@ def g_time_prime_inv(Z): w_space = None w_time = None else: - w_space = 1. / g_space_prime_inv(Z) + w_space = 1.0 / g_space_prime_inv(Z) w_time = g_time_prime_inv(Z) - w_time[w_time == 0.0] = -1. - w_time = 1. / w_time + w_time[w_time == 0.0] = -1.0 + w_time = 1.0 / w_time w_time[w_time < 0.0] = 0.0 - X, Z, active_set_, E_, _ = _tf_mixed_norm_solver_bcd_active_set( - M, G[:, active_set], alpha_space, alpha_time, - lc[active_set[::n_orient]], phi, phiT, - Z_init=Z, w_space=w_space, w_time=w_time, n_orient=n_orient, - maxit=maxit, tol=tol, dgap_freq=dgap_freq, verbose=None) + X, Z, active_set_, _, _ = _tf_mixed_norm_solver_bcd_active_set( + M, + G[:, active_set], + alpha_space, + alpha_time, + lc[active_set[::n_orient]], + phi, + phiT, + Z_init=Z, + w_space=w_space, + w_time=w_time, + n_orient=n_orient, + maxit=maxit, + tol=tol, + dgap_freq=dgap_freq, + ) active_set[active_set] = active_set_ @@ -1450,25 +1648,35 @@ def g_time_prime_inv(Z): l21_penalty = np.sum(g_space(Z.copy())) l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum() - p_obj = (0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), - 'fro') ** 2. + alpha_space * l21_penalty + - alpha_time * l1_penalty) + p_obj = ( + 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), "fro") ** 2.0 + + alpha_space * l21_penalty + + alpha_time * l1_penalty + ) E.append(p_obj) - logger.info('Iteration %d: active set size=%d, E=%f' % ( - k + 1, active_set.sum() / n_orient, p_obj)) + logger.info( + "Iteration %d: active set size=%d, E=%f", + k + 1, + active_set.sum() / n_orient, + p_obj, + ) # Check convergence if np.array_equal(active_set, active_set_0): max_diff = np.amax(np.abs(Z - Z0)) - if (max_diff < tol): - print('Convergence reached after %d reweightings!' % k) + if max_diff < tol: + logger.info("Convergence reached after %d reweightings!", k) break else: - p_obj = 0.5 * np.linalg.norm(M) ** 2. + p_obj = 0.5 * np.linalg.norm(M) ** 2.0 E.append(p_obj) - logger.info('Iteration %d: as_size=%d, E=%f' % ( - k + 1, active_set.sum() / n_orient, p_obj)) + logger.info( + "Iteration %d: as_size=%d, E=%f", + k + 1, + active_set.sum() / n_orient, + p_obj, + ) break if debias: diff --git a/mne/inverse_sparse/tests/__init__.py b/mne/inverse_sparse/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/inverse_sparse/tests/__init__.py +++ b/mne/inverse_sparse/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/inverse_sparse/tests/test_gamma_map.py b/mne/inverse_sparse/tests/test_gamma_map.py index 6d8aecaf4ad..db4ac5c9260 100644 --- a/mne/inverse_sparse/tests/test_gamma_map.py +++ b/mne/inverse_sparse/tests/test_gamma_map.py @@ -1,50 +1,55 @@ -# Author: Martin Luessi -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pytest import numpy as np -from numpy.testing import assert_array_almost_equal, assert_allclose +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal import mne -from mne.datasets import testing -from mne import (read_cov, read_forward_solution, read_evokeds, - convert_forward_solution, VectorSourceEstimate) +from mne import ( + VectorSourceEstimate, + convert_forward_solution, + pick_types_forward, + read_cov, + read_evokeds, + read_forward_solution, +) from mne.cov import regularize +from mne.datasets import testing +from mne.dipole import Dipole from mne.inverse_sparse import gamma_map from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles -from mne.minimum_norm.tests.test_inverse import (assert_stc_res, - assert_var_exp_log) -from mne import pick_types_forward +from mne.minimum_norm.tests.test_inverse import assert_stc_res, assert_var_exp_log from mne.utils import assert_stcs_equal, catch_logging -from mne.dipole import Dipole data_path = testing.data_path(download=False) fname_evoked = data_path / "MEG" / "sample" / "sample_audvis-ave.fif" fname_cov = data_path / "MEG" / "sample" / "sample_audvis-cov.fif" -fname_fwd = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif" -) +fname_fwd = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif" subjects_dir = data_path / "subjects" -def _check_stc(stc, evoked, idx, hemi, fwd, dist_limit=0., ratio=50., - res=None, atol=1e-20): +def _check_stc( + stc, evoked, idx, hemi, fwd, dist_limit=0.0, ratio=50.0, res=None, atol=1e-20 +): """Check correctness.""" assert_array_almost_equal(stc.times, evoked.times, 5) stc_orig = stc if isinstance(stc, VectorSourceEstimate): assert stc.data.any(1).any(1).all() # all dipoles should have some stc = stc.magnitude() - amps = np.sum(stc.data ** 2, axis=1) + amps = np.sum(stc.data**2, axis=1) order = np.argsort(amps)[::-1] amps = amps[order] verts = np.concatenate(stc.vertices)[order] hemi_idx = int(order[0] >= len(stc.vertices[1])) - hemis = ['lh', 'rh'] + hemis = ["lh", "rh"] assert hemis[hemi_idx] == hemi - dist = np.linalg.norm(np.diff(fwd['src'][hemi_idx]['rr'][[idx, verts[0]]], - axis=0)[0]) * 1000. + dist = ( + np.linalg.norm(np.diff(fwd["src"][hemi_idx]["rr"][[idx, verts[0]]], axis=0)[0]) + * 1000.0 + ) assert dist <= dist_limit assert amps[0] > ratio * amps[1] if res is not None: @@ -59,79 +64,132 @@ def test_gamma_map_standard(): forward = convert_forward_solution(forward, surf_ori=True) forward = pick_types_forward(forward, meg=False, eeg=True) - evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), - proj=False) + evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), proj=False) evoked.resample(50, npad=100) evoked.crop(tmin=0.1, tmax=0.14) # crop to window around peak cov = read_cov(fname_cov) - cov = regularize(cov, evoked.info, rank=None) + cov = regularize(cov, evoked.info) alpha = 0.5 with catch_logging() as log: - stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4, - xyz_same_gamma=True, update_mode=1, verbose=True) - _check_stc(stc, evoked, 68477, 'lh', fwd=forward) + stc = gamma_map( + evoked, + forward, + cov, + alpha, + tol=1e-4, + xyz_same_gamma=True, + update_mode=1, + verbose=True, + ) + _check_stc(stc, evoked, 68477, "lh", fwd=forward) assert_var_exp_log(log.getvalue(), 20, 22) with catch_logging() as log: stc_vec, res = gamma_map( - evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=True, - update_mode=1, pick_ori='vector', return_residual=True, - verbose=True) + evoked, + forward, + cov, + alpha, + tol=1e-4, + xyz_same_gamma=True, + update_mode=1, + pick_ori="vector", + return_residual=True, + verbose=True, + ) assert_var_exp_log(log.getvalue(), 20, 22) assert_stcs_equal(stc_vec.magnitude(), stc) - _check_stc(stc_vec, evoked, 68477, 'lh', fwd=forward, res=res) + _check_stc(stc_vec, evoked, 68477, "lh", fwd=forward, res=res) stc, res = gamma_map( - evoked, forward, cov, alpha, tol=1e-4, xyz_same_gamma=False, - update_mode=1, pick_ori='vector', return_residual=True) - _check_stc(stc, evoked, 82010, 'lh', fwd=forward, dist_limit=6., ratio=2., - res=res) + evoked, + forward, + cov, + alpha, + tol=1e-4, + xyz_same_gamma=False, + update_mode=1, + pick_ori="vector", + return_residual=True, + ) + _check_stc( + stc, evoked, 82010, "lh", fwd=forward, dist_limit=6.0, ratio=2.0, res=res + ) with catch_logging() as log: - dips = gamma_map(evoked, forward, cov, alpha, tol=1e-4, - xyz_same_gamma=False, update_mode=1, - return_as_dipoles=True, verbose=True) + dips = gamma_map( + evoked, + forward, + cov, + alpha, + tol=1e-4, + xyz_same_gamma=False, + update_mode=1, + return_as_dipoles=True, + verbose=True, + ) exp_var = assert_var_exp_log(log.getvalue(), 58, 60) dip_exp_var = np.mean(sum(dip.gof for dip in dips)) assert_allclose(exp_var, dip_exp_var, atol=10) # not really equiv, close - assert (isinstance(dips[0], Dipole)) - stc_dip = make_stc_from_dipoles(dips, forward['src']) + assert isinstance(dips[0], Dipole) + stc_dip = make_stc_from_dipoles(dips, forward["src"]) assert_stcs_equal(stc.magnitude(), stc_dip) # force fixed orientation - stc, res = gamma_map(evoked, forward, cov, alpha, tol=1e-4, - xyz_same_gamma=False, update_mode=2, - loose=0, return_residual=True) - _check_stc(stc, evoked, 85739, 'lh', fwd=forward, ratio=20., res=res) + stc, res = gamma_map( + evoked, + forward, + cov, + alpha, + tol=1e-4, + xyz_same_gamma=False, + update_mode=2, + loose=0, + return_residual=True, + ) + _check_stc(stc, evoked, 85739, "lh", fwd=forward, ratio=20.0, res=res) @pytest.mark.slowtest @testing.requires_testing_data def test_gamma_map_vol_sphere(): """Gamma MAP with a sphere forward and volumic source space.""" - evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), - proj=False) + evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), proj=False) evoked.resample(50, npad=100) evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak cov = read_cov(fname_cov) - cov = regularize(cov, evoked.info, rank=None) + cov = regularize(cov, evoked.info, rank=dict(eeg=58)) info = evoked.info - sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) - src = mne.setup_volume_source_space(subject=None, pos=30., mri=None, - sphere=(0.0, 0.0, 0.0, 0.08), - bem=None, mindist=5.0, - exclude=2.0, sphere_units='m') - fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere, - eeg=False, meg=True) + sphere = mne.make_sphere_model(r0=(0.0, 0.0, 0.0), head_radius=0.080) + src = mne.setup_volume_source_space( + subject=None, + pos=30.0, + mri=None, + sphere=(0.0, 0.0, 0.0, 0.08), + bem=None, + mindist=5.0, + exclude=2.0, + sphere_units="m", + ) + fwd = mne.make_forward_solution( + info, trans=None, src=src, bem=sphere, eeg=False, meg=True + ) alpha = 0.5 - stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4, - xyz_same_gamma=False, update_mode=2, - return_residual=False) + stc = gamma_map( + evoked, + fwd, + cov, + alpha, + tol=1e-4, + xyz_same_gamma=False, + update_mode=2, + return_residual=False, + ) assert_array_almost_equal(stc.times, evoked.times, 5) # Computing inverse with restricted orientations should also work, since @@ -141,18 +199,21 @@ def test_gamma_map_vol_sphere(): # Compare orientation obtained using fit_dipole and gamma_map # for a simulated evoked containing a single dipole - stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4), - vertices=[stc.vertices[0][:1]], - tmin=stc.tmin, - tstep=stc.tstep) - evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9, - use_cps=True) + stc = mne.VolSourceEstimate( + 50e-9 * np.random.RandomState(42).randn(1, 4), + vertices=[stc.vertices[0][:1]], + tmin=stc.tmin, + tstep=stc.tstep, + ) + evoked_dip = mne.simulation.simulate_evoked( + fwd, stc, info, cov, nave=1e9, use_cps=True + ) dip_gmap = gamma_map(evoked_dip, fwd, cov, 0.1, return_as_dipoles=True) amp_max = [np.max(d.amplitude) for d in dip_gmap] dip_gmap = dip_gmap[np.argmax(amp_max)] - assert (dip_gmap[0].pos[0] in src[0]['rr'][stc.vertices[0]]) + assert dip_gmap[0].pos[0] in src[0]["rr"][stc.vertices[0]] dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0] - assert (np.abs(np.dot(dip_fit.ori[0], dip_gmap.ori[0])) > 0.99) + assert np.abs(np.dot(dip_fit.ori[0], dip_gmap.ori[0])) > 0.99 diff --git a/mne/inverse_sparse/tests/test_mxne_debiasing.py b/mne/inverse_sparse/tests/test_mxne_debiasing.py index a81a65cc994..9c32f136eaf 100644 --- a/mne/inverse_sparse/tests/test_mxne_debiasing.py +++ b/mne/inverse_sparse/tests/test_mxne_debiasing.py @@ -1,7 +1,6 @@ -# Authors: Daniel Strohmeier -# Alexandre Gramfort -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np from numpy.testing import assert_almost_equal diff --git a/mne/inverse_sparse/tests/test_mxne_inverse.py b/mne/inverse_sparse/tests/test_mxne_inverse.py index c5a8064b608..4a3a15df0f4 100644 --- a/mne/inverse_sparse/tests/test_mxne_inverse.py +++ b/mne/inverse_sparse/tests/test_mxne_inverse.py @@ -1,44 +1,45 @@ -# Author: Alexandre Gramfort -# Daniel Strohmeier -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_allclose, - assert_array_less, assert_array_equal) import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) import mne +from mne import convert_forward_solution, read_cov, read_evokeds, read_forward_solution from mne.datasets import testing -from mne.label import read_label -from mne import (read_cov, read_forward_solution, read_evokeds, - convert_forward_solution) +from mne.dipole import Dipole from mne.inverse_sparse import mixed_norm, tf_mixed_norm -from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles, _split_gof -from mne.inverse_sparse.mxne_inverse import _compute_mxne_sure +from mne.inverse_sparse.mxne_inverse import ( + _compute_mxne_sure, + _split_gof, + make_stc_from_dipoles, +) from mne.inverse_sparse.mxne_optim import norm_l2inf +from mne.label import read_label from mne.minimum_norm import apply_inverse, make_inverse_operator -from mne.minimum_norm.tests.test_inverse import \ - assert_var_exp_log, assert_stc_res -from mne.utils import assert_stcs_equal, catch_logging, _record_warnings -from mne.dipole import Dipole +from mne.minimum_norm.tests.test_inverse import assert_stc_res, assert_var_exp_log +from mne.simulation import simulate_evoked, simulate_sparse_stc from mne.source_estimate import VolSourceEstimate -from mne.simulation import simulate_sparse_stc, simulate_evoked - +from mne.utils import _record_warnings, assert_stcs_equal, catch_logging data_path = testing.data_path(download=False) # NOTE: These use the ave and cov from sample dataset (no _trunc) fname_data = data_path / "MEG" / "sample" / "sample_audvis-ave.fif" fname_cov = data_path / "MEG" / "sample" / "sample_audvis-cov.fif" fname_raw = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" -fname_fwd = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif" -) +fname_fwd = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif" label = "Aud-rh" -fname_label = data_path / "MEG" / "sample" / "labels" / ("%s.label" % label) +fname_label = data_path / "MEG" / "sample" / "labels" / f"{label}.label" -@pytest.fixture(scope='module', params=[testing._pytest_param]) +@pytest.fixture(scope="module", params=[testing._pytest_param]) def forward(): """Get a forward solution.""" # module scope it for speed (but don't overwrite in use!) @@ -63,32 +64,59 @@ def test_mxne_inverse_standard(forward): evoked_l21 = evoked.copy() evoked_l21.crop(tmin=0.081, tmax=0.1) label = read_label(fname_label) - assert label.hemi == 'rh' + assert label.hemi == "rh" forward = convert_forward_solution(forward, surf_ori=True) # Reduce source space to make test computation faster - inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov, - loose=loose, depth=depth, - fixed=True, use_cps=True) - stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9., - method='dSPM') + inverse_operator = make_inverse_operator( + evoked_l21.info, + forward, + cov, + loose=loose, + depth=depth, + fixed=True, + use_cps=True, + ) + stc_dspm = apply_inverse( + evoked_l21, inverse_operator, lambda2=1.0 / 9.0, method="dSPM" + ) stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0 - stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1. + stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.0 weights_min = 0.5 # MxNE tests alpha = 70 # spatial regularization parameter with _record_warnings(): # CD - stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, - depth=depth, maxit=300, tol=1e-8, - active_set_size=10, weights=stc_dspm, - weights_min=weights_min, solver='cd') - stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, - depth=depth, maxit=300, tol=1e-8, active_set_size=10, - weights=stc_dspm, weights_min=weights_min, - solver='bcd') + stc_cd = mixed_norm( + evoked_l21, + forward, + cov, + alpha, + loose=loose, + depth=depth, + maxit=300, + tol=1e-8, + active_set_size=10, + weights=stc_dspm, + weights_min=weights_min, + solver="cd", + ) + stc_bcd = mixed_norm( + evoked_l21, + forward, + cov, + alpha, + loose=loose, + depth=depth, + maxit=300, + tol=1e-8, + active_set_size=10, + weights=stc_dspm, + weights_min=weights_min, + solver="bcd", + ) assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5) assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5) assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0) @@ -99,20 +127,31 @@ def test_mxne_inverse_standard(forward): with _record_warnings(): # no convergence stc = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2) with _record_warnings(): # no convergence - stc_vec = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2, - pick_ori='vector') + stc_vec = mixed_norm( + evoked_l21, forward, cov, alpha, loose=1, maxit=2, pick_ori="vector" + ) assert_stcs_equal(stc_vec.magnitude(), stc) - with _record_warnings(), \ - pytest.raises(ValueError, match='pick_ori='): - mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2, - pick_ori='vector') + with _record_warnings(), pytest.raises(ValueError, match="pick_ori="): + mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2, pick_ori="vector") with _record_warnings(), catch_logging() as log: # CD - dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, - depth=depth, maxit=300, tol=1e-8, active_set_size=10, - weights=stc_dspm, weights_min=weights_min, - solver='cd', return_as_dipoles=True, verbose=True) - stc_dip = make_stc_from_dipoles(dips, forward['src']) + dips = mixed_norm( + evoked_l21, + forward, + cov, + alpha, + loose=loose, + depth=depth, + maxit=300, + tol=1e-8, + active_set_size=10, + weights=stc_dspm, + weights_min=weights_min, + solver="cd", + return_as_dipoles=True, + verbose=True, + ) + stc_dip = make_stc_from_dipoles(dips, forward["src"]) assert isinstance(dips[0], Dipole) assert stc_dip.subject == "sample" assert_stcs_equal(stc_cd, stc_dip) @@ -120,21 +159,42 @@ def test_mxne_inverse_standard(forward): # Single time point things should match with _record_warnings(), catch_logging() as log: - dips = mixed_norm(evoked_l21.copy().crop(0.081, 0.081), - forward, cov, alpha, loose=loose, - depth=depth, maxit=300, tol=1e-8, active_set_size=10, - weights=stc_dspm, weights_min=weights_min, - solver='cd', return_as_dipoles=True, verbose=True) + dips = mixed_norm( + evoked_l21.copy().crop(0.081, 0.081), + forward, + cov, + alpha, + loose=loose, + depth=depth, + maxit=300, + tol=1e-8, + active_set_size=10, + weights=stc_dspm, + weights_min=weights_min, + solver="cd", + return_as_dipoles=True, + verbose=True, + ) assert_var_exp_log(log.getvalue(), 37.8, 38.0) # 37.9 gof = sum(dip.gof[0] for dip in dips) # these are now partial exp vars assert_allclose(gof, 37.9, atol=0.1) with _record_warnings(), catch_logging() as log: - stc, res = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose, - depth=depth, maxit=300, tol=1e-8, - weights=stc_dspm, # gh-6382 - active_set_size=10, return_residual=True, - solver='cd', verbose=True) + stc, res = mixed_norm( + evoked_l21, + forward, + cov, + alpha, + loose=loose, + depth=depth, + maxit=300, + tol=1e-8, + weights=stc_dspm, # gh-6382 + active_set_size=10, + return_residual=True, + solver="cd", + verbose=True, + ) assert_array_almost_equal(stc.times, evoked_l21.times, 5) assert stc.vertices[1][0] in label.vertices assert_var_exp_log(log.getvalue(), 51, 53) # 51.8 @@ -144,9 +204,21 @@ def test_mxne_inverse_standard(forward): # irMxNE tests with _record_warnings(), catch_logging() as log: # CD stc, residual = mixed_norm( - evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=0.0001, - depth=depth, maxit=300, tol=1e-8, active_set_size=10, - solver='cd', return_residual=True, pick_ori='vector', verbose=True) + evoked_l21, + forward, + cov, + alpha, + n_mxne_iter=5, + loose=0.0001, + depth=depth, + maxit=300, + tol=1e-8, + active_set_size=10, + solver="cd", + return_residual=True, + pick_ori="vector", + verbose=True, + ) assert_array_almost_equal(stc.times, evoked_l21.times, 5) assert stc.vertices[1][0] in label.vertices assert stc.vertices == [[63152], [79017]] @@ -154,33 +226,72 @@ def test_mxne_inverse_standard(forward): assert_stc_res(evoked_l21, stc, forward, residual) # Do with TF-MxNE for test memory savings - alpha = 60. # overall regularization parameter + alpha = 60.0 # overall regularization parameter l1_ratio = 0.01 # temporal regularization proportion - stc, _ = tf_mixed_norm(evoked, forward, cov, - loose=loose, depth=depth, maxit=100, tol=1e-4, - tstep=4, wsize=16, window=0.1, weights=stc_dspm, - weights_min=weights_min, return_residual=True, - alpha=alpha, l1_ratio=l1_ratio) + stc, _ = tf_mixed_norm( + evoked, + forward, + cov, + loose=loose, + depth=depth, + maxit=100, + tol=1e-4, + tstep=4, + wsize=16, + window=0.1, + weights=stc_dspm, + weights_min=weights_min, + return_residual=True, + alpha=alpha, + l1_ratio=l1_ratio, + ) assert_array_almost_equal(stc.times, evoked.times, 5) assert stc.vertices[1][0] in label.vertices # vector stc_nrm = tf_mixed_norm( - evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4, - tstep=4, wsize=16, window=0.1, weights=stc_dspm, - weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio) + evoked, + forward, + cov, + loose=1, + depth=depth, + maxit=2, + tol=1e-4, + tstep=4, + wsize=16, + window=0.1, + weights=stc_dspm, + weights_min=weights_min, + alpha=alpha, + l1_ratio=l1_ratio, + ) stc_vec, residual = tf_mixed_norm( - evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4, - tstep=4, wsize=16, window=0.1, weights=stc_dspm, - weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio, - pick_ori='vector', return_residual=True) + evoked, + forward, + cov, + loose=1, + depth=depth, + maxit=2, + tol=1e-4, + tstep=4, + wsize=16, + window=0.1, + weights=stc_dspm, + weights_min=weights_min, + alpha=alpha, + l1_ratio=l1_ratio, + pick_ori="vector", + return_residual=True, + ) assert_stcs_equal(stc_vec.magnitude(), stc_nrm) - pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov, - alpha=101, l1_ratio=0.03) - pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov, - alpha=50., l1_ratio=1.01) + pytest.raises( + ValueError, tf_mixed_norm, evoked, forward, cov, alpha=101, l1_ratio=0.03 + ) + pytest.raises( + ValueError, tf_mixed_norm, evoked, forward, cov, alpha=50.0, l1_ratio=1.01 + ) @pytest.mark.slowtest @@ -195,123 +306,175 @@ def test_mxne_vol_sphere(): evoked_l21.crop(tmin=0.081, tmax=0.1) info = evoked.info - sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) - src = mne.setup_volume_source_space(subject=None, pos=15., mri=None, - sphere=(0.0, 0.0, 0.0, 0.08), - bem=None, mindist=5.0, - exclude=2.0, sphere_units='m') - fwd = mne.make_forward_solution(info, trans=None, src=src, - bem=sphere, eeg=False, meg=True) + sphere = mne.make_sphere_model(r0=(0.0, 0.0, 0.0), head_radius=0.09) + src = mne.setup_volume_source_space( + subject=None, + pos=15.0, + mri=None, + sphere=(0.0, 0.0, 0.0, 0.08), + bem=None, + mindist=5.0, + exclude=2.0, + sphere_units="m", + ) + fwd = mne.make_forward_solution( + info, trans=None, src=src, bem=sphere, eeg=False, meg=True + ) - alpha = 80. + alpha = 80.0 # Computing inverse with restricted orientations should also work, since # we have a discrete source space. - stc = mixed_norm(evoked_l21, fwd, cov, alpha, loose=0.2, - return_residual=False, maxit=3, tol=1e-8, - active_set_size=10) + stc = mixed_norm( + evoked_l21, + fwd, + cov, + alpha, + loose=0.2, + return_residual=False, + maxit=3, + tol=1e-8, + active_set_size=10, + ) assert_array_almost_equal(stc.times, evoked_l21.times, 5) # irMxNE tests with catch_logging() as log: - stc = mixed_norm(evoked_l21, fwd, cov, alpha, - n_mxne_iter=1, maxit=30, tol=1e-8, - active_set_size=10, verbose=True) + stc = mixed_norm( + evoked_l21, + fwd, + cov, + alpha, + n_mxne_iter=1, + maxit=30, + tol=1e-8, + active_set_size=10, + verbose=True, + ) assert isinstance(stc, VolSourceEstimate) assert_array_almost_equal(stc.times, evoked_l21.times, 5) assert_var_exp_log(log.getvalue(), 9, 11) # 10.2 # Compare orientation obtained using fit_dipole and gamma_map # for a simulated evoked containing a single dipole - stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4), - vertices=[stc.vertices[0][:1]], - tmin=stc.tmin, - tstep=stc.tstep) - evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9, - use_cps=True) + stc = mne.VolSourceEstimate( + 50e-9 * np.random.RandomState(42).randn(1, 4), + vertices=[stc.vertices[0][:1]], + tmin=stc.tmin, + tstep=stc.tstep, + ) + evoked_dip = mne.simulation.simulate_evoked( + fwd, + stc, + info, + cov, + nave=1e9, + use_cps=True, + random_state=0, + ) - dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80, - n_mxne_iter=1, maxit=30, tol=1e-8, - active_set_size=10, return_as_dipoles=True) + dip_mxne = mixed_norm( + evoked_dip, + fwd, + cov, + alpha=80, + n_mxne_iter=1, + maxit=30, + tol=1e-8, + active_set_size=10, + return_as_dipoles=True, + ) amp_max = [np.max(d.amplitude) for d in dip_mxne] dip_mxne = dip_mxne[np.argmax(amp_max)] - assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]] + assert dip_mxne.pos[0] in src[0]["rr"][stc.vertices[0]] dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0] assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99 dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0]) - assert dist < 4. # within 4 mm + assert dist < 4.0 # within 4 mm # Do with TF-MxNE for test memory savings - alpha = 60. # overall regularization parameter + alpha = 60.0 # overall regularization parameter l1_ratio = 0.01 # temporal regularization proportion - stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4, - tstep=16, wsize=32, window=0.1, alpha=alpha, - l1_ratio=l1_ratio, return_residual=True) + stc, _ = tf_mixed_norm( + evoked, + fwd, + cov, + maxit=3, + tol=1e-4, + tstep=16, + wsize=32, + window=0.1, + alpha=alpha, + l1_ratio=l1_ratio, + return_residual=True, + ) assert isinstance(stc, VolSourceEstimate) assert_array_almost_equal(stc.times, evoked.times, 5) -@pytest.mark.parametrize('mod', ( - None, 'mult', 'augment', 'sign', 'zero', 'less')) +@pytest.mark.parametrize("mod", (None, "mult", "augment", "sign", "zero", "less")) def test_split_gof_basic(mod): """Test splitting the goodness of fit.""" # first a trivial case - gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T + gain = np.array([[0.0, 1.0, 1.0], [1.0, 1.0, 0.0]]).T M = np.ones((3, 1)) X = np.ones((2, 1)) M_est = gain @ X - assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate - if mod == 'mult': - gain *= [1., -0.5] + assert_allclose(M_est, np.array([[1.0, 2.0, 1.0]]).T) # a reasonable estimate + if mod == "mult": + gain *= [1.0, -0.5] X[1] *= -2 - elif mod == 'augment': + elif mod == "augment": gain = np.concatenate((gain, np.zeros((3, 1))), axis=1) - X = np.concatenate((X, [[1.]])) - elif mod == 'sign': + X = np.concatenate((X, [[1.0]])) + elif mod == "sign": gain[1] *= -1 M[1] *= -1 M_est[1] *= -1 - elif mod in ('zero', 'less'): - gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T - if mod == 'zero': - X[:, 0] = [1., 0.] + elif mod in ("zero", "less"): + gain = np.array([[1, 1.0, 1.0], [1.0, 1.0, 1.0]]).T + if mod == "zero": + X[:, 0] = [1.0, 0.0] else: - X[:, 0] = [1., 0.5] + X[:, 0] = [1.0, 0.5] M_est = gain @ X else: assert mod is None res = M - M_est - gof = 100 * (1. - (res * res).sum() / (M * M).sum()) + gof = 100 * (1.0 - (res * res).sum() / (M * M).sum()) gof_split = _split_gof(M, X, gain) assert_allclose(gof_split.sum(), gof) want = gof_split[[0, 0]] - if mod == 'augment': + if mod == "augment": want = np.concatenate((want, [[0]])) - if mod in ('mult', 'less'): + if mod in ("mult", "less"): assert_array_less(gof_split[1], gof_split[0]) - elif mod == 'zero': + elif mod == "zero": assert_allclose(gof_split[0], gof_split.sum(0)) - assert_allclose(gof_split[1], 0., atol=1e-6) + assert_allclose(gof_split[1], 0.0, atol=1e-6) else: assert_allclose(gof_split, want, atol=1e-12) @testing.requires_testing_data -@pytest.mark.parametrize('idx, weights', [ - # empirically determined approximately orthogonal columns: 0, 15157, 19448 - ([0], [1]), - ([0, 15157], [1, 1]), - ([0, 15157], [1, 3]), - ([0, 15157], [5, -1]), - ([0, 15157, 19448], [1, 1, 1]), - ([0, 15157, 19448], [1e-2, 1, 5]), -]) +@pytest.mark.parametrize( + "idx, weights", + [ + # empirically determined approximately orthogonal columns: 0, 15157, 19448 + ([0], [1]), + ([0, 15157], [1, 1]), + ([0, 15157], [1, 3]), + ([0, 15157], [5, -1]), + ([0, 15157, 19448], [1, 1, 1]), + ([0, 15157, 19448], [1e-2, 1, 5]), + ], +) def test_split_gof_meg(forward, idx, weights): """Test GOF splitting on MEG data.""" - gain = forward['sol']['data'][:, idx] + gain = forward["sol"]["data"][:, idx] # close to orthogonal norms = np.linalg.norm(gain, axis=0) triu = np.triu_indices(len(idx), 1) @@ -320,7 +483,7 @@ def test_split_gof_meg(forward, idx, weights): # first, split across time (one dipole per time point) M = gain * weights gof_split = _split_gof(M, np.diag(weights), gain) - assert_allclose(gof_split.sum(0), 100., atol=1e-5) # all sum to 100 + assert_allclose(gof_split.sum(0), 100.0, atol=1e-5) # all sum to 100 assert_allclose(gof_split, 100 * np.eye(len(weights)), atol=1) # loc # next, summed to a single time point (all dipoles active at one time pt) weights = np.array(weights)[:, np.newaxis] @@ -333,33 +496,36 @@ def test_split_gof_meg(forward, idx, weights): assert_allclose(gof_split.sum(), 100, rtol=1e-5) -@pytest.mark.parametrize('n_sensors, n_dipoles, n_times', [ - (10, 15, 7), - (20, 60, 20), -]) -@pytest.mark.parametrize('nnz', [2, 4]) -@pytest.mark.parametrize('corr', [0.75]) -@pytest.mark.parametrize('n_orient', [1, 3]) -def test_mxne_inverse_sure_synthetic(n_sensors, n_dipoles, n_times, nnz, corr, - n_orient, snr=4): +@pytest.mark.parametrize( + "n_sensors, n_dipoles, n_times", + [ + (10, 15, 7), + (20, 60, 20), + ], +) +@pytest.mark.parametrize("nnz", [2, 4]) +@pytest.mark.parametrize("corr", [0.75]) +@pytest.mark.parametrize("n_orient", [1, 3]) +def test_mxne_inverse_sure_synthetic( + n_sensors, n_dipoles, n_times, nnz, corr, n_orient, snr=4 +): """Tests SURE criterion for automatic alpha selection on synthetic data.""" rng = np.random.RandomState(0) - sigma = np.sqrt(1 - corr ** 2) + sigma = np.sqrt(1 - corr**2) U = rng.randn(n_sensors) # generate gain matrix - G = np.empty([n_sensors, n_dipoles], order='F') + G = np.empty([n_sensors, n_dipoles], order="F") G[:, :n_orient] = np.expand_dims(U, axis=-1) n_dip_per_pos = n_dipoles // n_orient for j in range(1, n_dip_per_pos): U *= corr U += sigma * rng.randn(n_sensors) - G[:, j * n_orient:(j + 1) * n_orient] = np.expand_dims(U, axis=-1) + G[:, j * n_orient : (j + 1) * n_orient] = np.expand_dims(U, axis=-1) # generate coefficient matrix support = rng.choice(n_dip_per_pos, nnz, replace=False) X = np.zeros((n_dipoles, n_times)) for k in support: - X[k * n_orient:(k + 1) * n_orient, :] = rng.normal( - size=(n_orient, n_times)) + X[k * n_orient : (k + 1) * n_orient, :] = rng.normal(size=(n_orient, n_times)) # generate measurement matrix M = G @ X noise = rng.randn(n_sensors, n_times) @@ -368,52 +534,81 @@ def test_mxne_inverse_sure_synthetic(n_sensors, n_dipoles, n_times, nnz, corr, # inverse modeling with sure alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False) alpha_grid = np.geomspace(alpha_max, alpha_max / 10, num=15) - _, active_set, _ = _compute_mxne_sure(M, G, alpha_grid, sigma=sigma, - n_mxne_iter=5, maxit=3000, tol=1e-4, - n_orient=n_orient, - active_set_size=10, debias=True, - solver="auto", dgap_freq=10, - random_state=0, verbose=False) + _, active_set, _ = _compute_mxne_sure( + M, + G, + alpha_grid, + sigma=sigma, + n_mxne_iter=5, + maxit=3000, + tol=1e-4, + n_orient=n_orient, + active_set_size=10, + debias=True, + solver="auto", + dgap_freq=10, + random_state=0, + verbose=False, + ) assert np.count_nonzero(active_set, axis=-1) == n_orient * nnz @pytest.mark.slowtest # slow on Azure @testing.requires_testing_data -def test_mxne_inverse_sure(): +def test_mxne_inverse_sure_meg(): """Tests SURE criterion for automatic alpha selection on MEG data.""" + def data_fun(times): data = np.zeros(times.shape) data[times >= 0] = 50e-9 return data + n_dipoles = 2 - raw = mne.io.read_raw_fif(fname_raw) - info = mne.io.read_info(fname_data) - with info._unlock(): - info['projs'] = [] + raw = mne.io.read_raw_fif(fname_raw).pick_types("grad", exclude="bads") + raw.del_proj() + info = raw.info + del raw noise_cov = mne.make_ad_hoc_cov(info) - label_names = ['Aud-lh', 'Aud-rh'] + label_names = ["Aud-lh", "Aud-rh"] labels = [ - mne.read_label(data_path / 'MEG' / 'sample' / 'labels' / f'{ln}.label') - for ln in label_names] + mne.read_label(data_path / "MEG" / "sample" / "labels" / f"{ln}.label") + for ln in label_names + ] fname_fwd = ( - data_path - / "MEG" - / "sample" - / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" + data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" ) forward = mne.read_forward_solution(fname_fwd) - forward = mne.pick_types_forward(forward, meg="grad", eeg=False, - exclude=raw.info['bads']) - times = np.arange(100, dtype=np.float64) / raw.info['sfreq'] - 0.1 - stc = simulate_sparse_stc(forward['src'], n_dipoles=n_dipoles, times=times, - random_state=1, labels=labels, data_fun=data_fun) + forward = mne.pick_channels_forward(forward, info["ch_names"]) + times = np.arange(100, dtype=np.float64) / info["sfreq"] - 0.1 + stc = simulate_sparse_stc( + forward["src"], + n_dipoles=n_dipoles, + times=times, + random_state=1, + labels=labels, + data_fun=data_fun, + ) + assert len(stc.vertices) == 2 + assert_array_equal(stc.vertices[0], [89259]) + assert_array_equal(stc.vertices[1], [70279]) nave = 30 - evoked = simulate_evoked(forward, stc, info, noise_cov, nave=nave, - use_cps=False, iir_filter=None) + evoked = simulate_evoked( + forward, + stc, + info, + noise_cov, + nave=nave, + use_cps=False, + iir_filter=None, + random_state=0, + ) evoked = evoked.crop(tmin=0, tmax=10e-3) - stc_ = mixed_norm(evoked, forward, noise_cov, loose=0.9, n_mxne_iter=5, - depth=0.9) - assert_array_equal(stc_.vertices, stc.vertices) + stc_ = mixed_norm( + evoked, forward, noise_cov, loose=0.9, n_mxne_iter=5, depth=0.9, random_state=1 + ) + assert len(stc_.vertices) == len(stc.vertices) == 2 + for si in range(len(stc_.vertices)): + assert_array_equal(stc_.vertices[si], stc.vertices[si], err_msg=f"{si=}") @pytest.mark.slowtest # slow on Azure @@ -423,19 +618,23 @@ def test_mxne_inverse_empty(): evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0)) evoked.pick("grad", exclude="bads") fname_fwd = ( - data_path - / "MEG" - / "sample" - / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" + data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" ) forward = mne.read_forward_solution(fname_fwd) - forward = mne.pick_types_forward(forward, meg="grad", eeg=False, - exclude=evoked.info['bads']) + forward = mne.pick_types_forward( + forward, meg="grad", eeg=False, exclude=evoked.info["bads"] + ) cov = read_cov(fname_cov) - with pytest.warns(RuntimeWarning, match='too big'): + with pytest.warns(RuntimeWarning, match="too big"): stc, residual = mixed_norm( - evoked, forward, cov, n_mxne_iter=3, alpha=99, - return_residual=True) + evoked, + forward, + cov, + n_mxne_iter=3, + alpha=99, + return_residual=True, + random_state=0, + ) assert stc.data.size == 0 assert stc.vertices[0].size == 0 assert stc.vertices[1].size == 0 diff --git a/mne/inverse_sparse/tests/test_mxne_optim.py b/mne/inverse_sparse/tests/test_mxne_optim.py index 4081ee0e5f4..464fe274def 100644 --- a/mne/inverse_sparse/tests/test_mxne_optim.py +++ b/mne/inverse_sparse/tests/test_mxne_optim.py @@ -1,21 +1,29 @@ -# Author: Alexandre Gramfort -# Daniel Strohmeier -# -# License: Simplified BSD +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pytest import numpy as np -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_allclose, assert_array_less) - -from mne.inverse_sparse.mxne_optim import (mixed_norm_solver, - tf_mixed_norm_solver, - iterative_mixed_norm_solver, - iterative_tf_mixed_norm_solver, - norm_epsilon_inf, norm_epsilon, - _Phi, _PhiT, dgap_l21l1) +import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) + +from mne.inverse_sparse.mxne_optim import ( + _Phi, + _PhiT, + dgap_l21l1, + iterative_mixed_norm_solver, + iterative_tf_mixed_norm_solver, + mixed_norm_solver, + norm_epsilon, + norm_epsilon_inf, + tf_mixed_norm_solver, +) from mne.time_frequency._stft import stft_norm2 -from mne.utils import catch_logging, _record_warnings +from mne.utils import _record_warnings, catch_logging def _generate_tf_data(): @@ -37,7 +45,7 @@ def _generate_tf_data(): def test_l21_mxne(): """Test convergence of MxNE solver.""" - n, p, t, alpha = 30, 40, 20, 1. + n, p, t, alpha = 30, 40, 20, 1.0 rng = np.random.RandomState(0) G = rng.randn(n, p) G /= np.std(G, axis=0)[None, :] @@ -49,47 +57,61 @@ def test_l21_mxne(): args = (M, G, alpha, 1000, 1e-8) with _record_warnings(): # CD X_hat_cd, active_set, _, gap_cd = mixed_norm_solver( - *args, active_set_size=None, - debias=True, solver='cd', return_gap=True) + *args, active_set_size=None, debias=True, solver="cd", return_gap=True + ) assert_array_less(gap_cd, 1e-8) assert_array_equal(np.where(active_set)[0], [0, 4]) with _record_warnings(): # CD X_hat_bcd, active_set, E, gap_bcd = mixed_norm_solver( - M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None, - debias=True, solver='bcd', return_gap=True) + M, + G, + alpha, + maxit=1000, + tol=1e-8, + active_set_size=None, + debias=True, + solver="bcd", + return_gap=True, + ) assert_array_less(gap_bcd, 9.6e-9) assert_array_equal(np.where(active_set)[0], [0, 4]) assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2) with _record_warnings(): # CD X_hat_cd, active_set, _ = mixed_norm_solver( - *args, active_set_size=2, debias=True, solver='cd') + *args, active_set_size=2, debias=True, solver="cd" + ) assert_array_equal(np.where(active_set)[0], [0, 4]) with _record_warnings(): # CD X_hat_bcd, active_set, _ = mixed_norm_solver( - *args, active_set_size=2, debias=True, solver='bcd') + *args, active_set_size=2, debias=True, solver="bcd" + ) assert_array_equal(np.where(active_set)[0], [0, 4]) assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2) with _record_warnings(): # CD X_hat_bcd, active_set, _ = mixed_norm_solver( - *args, active_set_size=2, debias=True, n_orient=2, solver='bcd') + *args, active_set_size=2, debias=True, n_orient=2, solver="bcd" + ) assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) # suppress a coordinate-descent warning here - with pytest.warns(RuntimeWarning, match='descent'): + with pytest.warns(RuntimeWarning, match="descent"): X_hat_cd, active_set, _ = mixed_norm_solver( - *args, active_set_size=2, debias=True, n_orient=2, solver='cd') + *args, active_set_size=2, debias=True, n_orient=2, solver="cd" + ) assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2) with _record_warnings(): # CD X_hat_bcd, active_set, _ = mixed_norm_solver( - *args, active_set_size=2, debias=True, n_orient=5, solver='bcd') + *args, active_set_size=2, debias=True, n_orient=5, solver="bcd" + ) assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) - with pytest.warns(RuntimeWarning, match='descent'): + with pytest.warns(RuntimeWarning, match="descent"): X_hat_cd, active_set, _ = mixed_norm_solver( - *args, active_set_size=2, debias=True, n_orient=5, solver='cd') + *args, active_set_size=2, debias=True, n_orient=5, solver="cd" + ) assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) assert_allclose(X_hat_bcd, X_hat_cd) @@ -98,7 +120,7 @@ def test_l21_mxne(): @pytest.mark.slowtest def test_non_convergence(): """Test non-convergence of MxNE solver to catch unexpected bugs.""" - n, p, t, alpha = 30, 40, 20, 1. + n, p, t, alpha = 30, 40, 20, 1.0 rng = np.random.RandomState(0) G = rng.randn(n, p) G /= np.std(G, axis=0)[None, :] @@ -111,23 +133,34 @@ def test_non_convergence(): # In case of non-convegence, we test that no error is returned. args = (M, G, alpha, 1, 1e-12) with catch_logging() as log: - mixed_norm_solver(*args, active_set_size=None, debias=True, - solver='bcd', verbose=True) + mixed_norm_solver( + *args, active_set_size=None, debias=True, solver="bcd", verbose=True + ) log = log.getvalue() - assert 'Convergence reached' not in log + assert "Convergence reached" not in log def test_tf_mxne(): """Test convergence of TF-MxNE solver.""" - alpha_space = 10. - alpha_time = 5. + alpha_space = 10.0 + alpha_time = 5.0 M, G, active_set = _generate_tf_data() with _record_warnings(): # CD X_hat_tf, active_set_hat_tf, E, gap_tfmxne = tf_mixed_norm_solver( - M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True, - n_orient=1, tstep=4, wsize=32, return_gap=True) + M, + G, + alpha_space, + alpha_time, + maxit=200, + tol=1e-8, + verbose=True, + n_orient=1, + tstep=4, + wsize=32, + return_gap=True, + ) assert_array_less(gap_tfmxne, 1e-8) assert_array_equal(np.where(active_set_hat_tf)[0], active_set) @@ -142,37 +175,42 @@ def test_norm_epsilon(): n_coefs = n_steps * n_freqs phi = _Phi(wsize, tstep, n_coefs, n_times) - Y = np.zeros(n_steps * n_freqs) + Y = np.zeros((n_steps * n_freqs).item()) l1_ratio = 0.03 - assert_allclose(norm_epsilon(Y, l1_ratio, phi), 0.) + assert_allclose(norm_epsilon(Y, l1_ratio, phi), 0.0) - Y[0] = 2. + Y[0] = 2.0 assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y)) - l1_ratio = 1. + l1_ratio = 1.0 assert_allclose(norm_epsilon(Y, l1_ratio, phi), np.max(Y)) # dummy value without random: - Y = np.arange(n_steps * n_freqs).reshape(-1, ) + Y = np.arange((n_steps * n_freqs).item()) l1_ratio = 0.0 - assert_allclose(norm_epsilon(Y, l1_ratio, phi) ** 2, - stft_norm2(Y.reshape(-1, n_freqs[0], n_steps[0]))) + assert_allclose( + norm_epsilon(Y, l1_ratio, phi) ** 2, + stft_norm2(Y.reshape(-1, n_freqs[0], n_steps[0])), + ) l1_ratio = 0.03 # test that vanilla epsilon norm = weights equal to 1 w_time = np.ones(n_coefs[0]) Y = np.abs(np.random.randn(n_coefs[0])) - assert_allclose(norm_epsilon(Y, l1_ratio, phi), - norm_epsilon(Y, l1_ratio, phi, w_time=w_time)) + assert_allclose( + norm_epsilon(Y, l1_ratio, phi), norm_epsilon(Y, l1_ratio, phi, w_time=w_time) + ) # scaling w_time and w_space by the same amount should divide # epsilon norm by the same amount - Y = np.arange(n_coefs) + 1 - mult = 2. + Y = np.arange(n_coefs.item()) + 1 + mult = 2.0 assert_allclose( - norm_epsilon(Y, l1_ratio, phi, w_space=1, - w_time=np.ones(n_coefs)) / mult, - norm_epsilon(Y, l1_ratio, phi, w_space=mult, - w_time=mult * np.ones(n_coefs))) + norm_epsilon(Y, l1_ratio, phi, w_space=1, w_time=np.ones(n_coefs.item())) + / mult, + norm_epsilon( + Y, l1_ratio, phi, w_space=mult, w_time=mult * np.ones(n_coefs.item()) + ), + ) @pytest.mark.slowtest # slow-ish on Travis OSX @@ -192,30 +230,59 @@ def test_dgapl21l1(): for l1_ratio in [0.05, 0.1]: alpha_max = norm_epsilon_inf(G, M, phi, l1_ratio, n_orient) - alpha_space = (1. - l1_ratio) * alpha_max + alpha_space = (1.0 - l1_ratio) * alpha_max alpha_time = l1_ratio * alpha_max Z = np.zeros([n_sources, phi.n_coefs.sum()]) # for alpha = alpha_max, Z = 0 is the solution so the dgap is 0 - gap = dgap_l21l1(M, G, Z, np.ones(n_sources, dtype=bool), - alpha_space, alpha_time, phi, phiT, - n_orient, -np.inf)[0] - - assert_allclose(0., gap) + gap = dgap_l21l1( + M, + G, + Z, + np.ones(n_sources, dtype=bool), + alpha_space, + alpha_time, + phi, + phiT, + n_orient, + -np.inf, + )[0] + + assert_allclose(0.0, gap) # check that solution for alpha smaller than alpha_max is non 0: X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver( - M, G, alpha_space / 1.01, alpha_time / 1.01, maxit=200, tol=1e-8, - verbose=True, debias=False, n_orient=n_orient, tstep=tstep, - wsize=wsize, return_gap=True) + M, + G, + alpha_space / 1.01, + alpha_time / 1.01, + maxit=200, + tol=1e-8, + verbose=True, + debias=False, + n_orient=n_orient, + tstep=tstep, + wsize=wsize, + return_gap=True, + ) # allow possible small numerical errors (negative gap) assert_array_less(-1e-10, gap) assert_array_less(gap, 1e-8) assert_array_less(1, len(active_set_hat_tf)) X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver( - M, G, alpha_space / 5., alpha_time / 5., maxit=200, tol=1e-8, - verbose=True, debias=False, n_orient=n_orient, tstep=tstep, - wsize=wsize, return_gap=True) + M, + G, + alpha_space / 5.0, + alpha_time / 5.0, + maxit=200, + tol=1e-8, + verbose=True, + debias=False, + n_orient=n_orient, + tstep=tstep, + wsize=wsize, + return_gap=True, + ) assert_array_less(-1e-10, gap) assert_array_less(gap, 1e-8) assert_array_less(1, len(active_set_hat_tf)) @@ -223,19 +290,37 @@ def test_dgapl21l1(): def test_tf_mxne_vs_mxne(): """Test equivalence of TF-MxNE (with alpha_time=0) and MxNE.""" - alpha_space = 60. - alpha_time = 0. + alpha_space = 60.0 + alpha_time = 0.0 M, G, active_set = _generate_tf_data() X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver( - M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, - verbose=True, debias=False, n_orient=1, tstep=4, wsize=32) + M, + G, + alpha_space, + alpha_time, + maxit=200, + tol=1e-8, + verbose=True, + debias=False, + n_orient=1, + tstep=4, + wsize=32, + ) # Also run L21 and check that we get the same X_hat_l21, _, _ = mixed_norm_solver( - M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1, - active_set_size=None, debias=False) + M, + G, + alpha_space, + maxit=200, + tol=1e-8, + verbose=False, + n_orient=1, + active_set_size=None, + debias=False, + ) assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1) @@ -254,47 +339,107 @@ def test_iterative_reweighted_mxne(): with _record_warnings(): # CD X_hat_l21, _, _ = mixed_norm_solver( - M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1, - active_set_size=None, debias=False, solver='bcd') + M, + G, + alpha, + maxit=1000, + tol=1e-8, + verbose=False, + n_orient=1, + active_set_size=None, + debias=False, + solver="bcd", + ) with _record_warnings(): # CD X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( - M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None, - debias=False, solver='bcd') + M, + G, + alpha, + 1, + maxit=1000, + tol=1e-8, + active_set_size=None, + debias=False, + solver="bcd", + ) assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3) with _record_warnings(): # CD X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( - M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, - debias=True, solver='bcd') + M, + G, + alpha, + 5, + maxit=1000, + tol=1e-8, + active_set_size=2, + debias=True, + solver="bcd", + ) assert_array_equal(np.where(active_set)[0], [0, 4]) with _record_warnings(): # CD X_hat_cd, active_set, _ = iterative_mixed_norm_solver( - M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None, - debias=True, solver='cd') + M, + G, + alpha, + 5, + maxit=1000, + tol=1e-8, + active_set_size=None, + debias=True, + solver="cd", + ) assert_array_equal(np.where(active_set)[0], [0, 4]) assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5) with _record_warnings(): # CD X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( - M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, - debias=True, n_orient=2, solver='bcd') + M, + G, + alpha, + 5, + maxit=1000, + tol=1e-8, + active_set_size=2, + debias=True, + n_orient=2, + solver="bcd", + ) assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) # suppress a coordinate-descent warning here - with pytest.warns(RuntimeWarning, match='descent'): + with pytest.warns(RuntimeWarning, match="descent"): X_hat_cd, active_set, _ = iterative_mixed_norm_solver( - M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, - debias=True, n_orient=2, solver='cd') + M, + G, + alpha, + 5, + maxit=1000, + tol=1e-8, + active_set_size=2, + debias=True, + n_orient=2, + solver="cd", + ) assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) assert_allclose(X_hat_bcd, X_hat_cd) X_hat_bcd, active_set, _ = iterative_mixed_norm_solver( - M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, - n_orient=5) + M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True, n_orient=5 + ) assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) - with pytest.warns(RuntimeWarning, match='descent'): + with pytest.warns(RuntimeWarning, match="descent"): X_hat_cd, active_set, _ = iterative_mixed_norm_solver( - M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, - debias=True, n_orient=5, solver='cd') + M, + G, + alpha, + 5, + maxit=1000, + tol=1e-8, + active_set_size=2, + debias=True, + n_orient=5, + solver="cd", + ) assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) assert_allclose(X_hat_bcd, X_hat_cd) @@ -303,27 +448,69 @@ def test_iterative_reweighted_mxne(): def test_iterative_reweighted_tfmxne(): """Test convergence of irTF-MxNE solver.""" M, G, true_active_set = _generate_tf_data() - alpha_space = 38. + alpha_space = 38.0 alpha_time = 0.5 tstep, wsize = [4, 2], [64, 16] X_hat_tf, _, _ = tf_mixed_norm_solver( - M, G, alpha_space, alpha_time, maxit=1000, tol=1e-4, wsize=wsize, - tstep=tstep, verbose=False, n_orient=1, debias=False) + M, + G, + alpha_space, + alpha_time, + maxit=1000, + tol=1e-4, + wsize=wsize, + tstep=tstep, + verbose=False, + n_orient=1, + debias=False, + ) X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver( - M, G, alpha_space, alpha_time, 1, wsize=wsize, tstep=tstep, - maxit=1000, tol=1e-4, debias=False, verbose=False) + M, + G, + alpha_space, + alpha_time, + 1, + wsize=wsize, + tstep=tstep, + maxit=1000, + tol=1e-4, + debias=False, + verbose=False, + ) assert_allclose(X_hat_tf, X_hat_bcd, rtol=1e-3) assert_array_equal(np.where(active_set)[0], true_active_set) - alpha_space = 50. + alpha_space = 50.0 X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver( - M, G, alpha_space, alpha_time, 3, wsize=wsize, tstep=tstep, - n_orient=5, maxit=1000, tol=1e-4, debias=False, verbose=False) + M, + G, + alpha_space, + alpha_time, + 3, + wsize=wsize, + tstep=tstep, + n_orient=5, + maxit=1000, + tol=1e-4, + debias=False, + verbose=False, + ) assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4]) - alpha_space = 40. + alpha_space = 40.0 X_hat_bcd, active_set, _ = iterative_tf_mixed_norm_solver( - M, G, alpha_space, alpha_time, 2, wsize=wsize, tstep=tstep, - n_orient=2, maxit=1000, tol=1e-4, debias=False, verbose=False) + M, + G, + alpha_space, + alpha_time, + 2, + wsize=wsize, + tstep=tstep, + n_orient=2, + maxit=1000, + tol=1e-4, + debias=False, + verbose=False, + ) assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5]) diff --git a/mne/io/__init__.py b/mne/io/__init__.py index 6ed6b898566..40f385391cd 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -1,70 +1,9 @@ """IO module for reading raw data.""" -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from .open import fiff_open, show_fiff, _fiff_get_fid -from .meas_info import (read_fiducials, write_fiducials, read_info, write_info, - _empty_info, _merge_info, _force_update_info, Info, - anonymize_info, _writing_info_hdf5) +import lazy_loader as lazy -from .proj import make_eeg_average_ref_proj, Projection -from .tag import _loc_to_coil_trans, _coil_trans_to_loc, _loc_to_eeg_loc -from .base import BaseRaw - -from . import array -from . import base -from . import brainvision -from . import bti -from . import cnt -from . import ctf -from . import constants -from . import edf -from . import egi -from . import fiff -from . import fil -from . import kit -from . import nicolet -from . import nirx -from . import boxy -from . import persyst -from . import eeglab -from . import pick -from . import nihon - -from .array import RawArray -from .besa import read_evoked_besa -from .brainvision import read_raw_brainvision -from .bti import read_raw_bti -from .cnt import read_raw_cnt -from .ctf import read_raw_ctf -from .curry import read_raw_curry -from .edf import read_raw_edf, read_raw_bdf, read_raw_gdf -from .egi import read_raw_egi, read_evokeds_mff -from .kit import read_raw_kit, read_epochs_kit -from .fiff import read_raw_fif -from .fil import read_raw_fil -from .nedf import read_raw_nedf -from .nicolet import read_raw_nicolet -from .artemis123 import read_raw_artemis123 -from .eeglab import read_raw_eeglab, read_epochs_eeglab -from .eximia import read_raw_eximia -from .hitachi import read_raw_hitachi -from .nirx import read_raw_nirx -from .boxy import read_raw_boxy -from .snirf import read_raw_snirf -from .persyst import read_raw_persyst -from .fieldtrip import (read_raw_fieldtrip, read_epochs_fieldtrip, - read_evoked_fieldtrip) -from .nihon import read_raw_nihon -from ._read_raw import read_raw - - -# for backward compatibility -from .fiff import Raw -from .fiff import Raw as RawFIF -from .base import concatenate_raws -from .reference import (set_eeg_reference, set_bipolar_reference, - add_reference_channels) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/io/__init__.pyi b/mne/io/__init__.pyi new file mode 100644 index 00000000000..a9c11415f15 --- /dev/null +++ b/mne/io/__init__.pyi @@ -0,0 +1,91 @@ +__all__ = [ + "BaseRaw", + "Raw", + "RawArray", + "anonymize_info", + "concatenate_raws", + "constants", + "get_channel_type_constants", + "match_channel_orders", + "pick", + "read_epochs_eeglab", + "read_epochs_fieldtrip", + "read_epochs_kit", + "read_evoked_besa", + "read_evoked_fieldtrip", + "read_evokeds_mff", + "read_fiducials", + "read_info", + "read_raw", + "read_raw_ant", + "read_raw_artemis123", + "read_raw_bdf", + "read_raw_boxy", + "read_raw_brainvision", + "read_raw_bti", + "read_raw_cnt", + "read_raw_ctf", + "read_raw_curry", + "read_raw_edf", + "read_raw_eeglab", + "read_raw_egi", + "read_raw_eximia", + "read_raw_eyelink", + "read_raw_fieldtrip", + "read_raw_fif", + "read_raw_fil", + "read_raw_gdf", + "read_raw_hitachi", + "read_raw_kit", + "read_raw_nedf", + "read_raw_neuralynx", + "read_raw_nicolet", + "read_raw_nihon", + "read_raw_nirx", + "read_raw_nsx", + "read_raw_persyst", + "read_raw_snirf", + "show_fiff", + "write_fiducials", + "write_info", +] +from . import constants, pick +from ._fiff_wrap import ( + anonymize_info, + get_channel_type_constants, + read_fiducials, + read_info, + show_fiff, + write_fiducials, + write_info, +) +from ._read_raw import read_raw +from .ant import read_raw_ant +from .array import RawArray +from .artemis123 import read_raw_artemis123 +from .base import BaseRaw, concatenate_raws, match_channel_orders +from .besa import read_evoked_besa +from .boxy import read_raw_boxy +from .brainvision import read_raw_brainvision +from .bti import read_raw_bti +from .cnt import read_raw_cnt +from .ctf import read_raw_ctf +from .curry import read_raw_curry +from .edf import read_raw_bdf, read_raw_edf, read_raw_gdf +from .eeglab import read_epochs_eeglab, read_raw_eeglab +from .egi import read_evokeds_mff, read_raw_egi +from .eximia import read_raw_eximia +from .eyelink import read_raw_eyelink +from .fieldtrip import read_epochs_fieldtrip, read_evoked_fieldtrip, read_raw_fieldtrip +from .fiff import Raw, read_raw_fif +from .fil import read_raw_fil +from .hitachi import read_raw_hitachi +from .kit import read_epochs_kit, read_raw_kit +from .nedf import read_raw_nedf +from .neuralynx import read_raw_neuralynx +from .nicolet import read_raw_nicolet +from .nihon import read_raw_nihon +from .nirx import read_raw_nirx +from .nsx import read_raw_nsx +from .persyst import read_raw_persyst +from .snirf import read_raw_snirf diff --git a/mne/io/_digitization.py b/mne/io/_digitization.py deleted file mode 100644 index 37fb471ac68..00000000000 --- a/mne/io/_digitization.py +++ /dev/null @@ -1,545 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Teon Brooks -# Stefan Appelhoff -# Joan Massich -# -# License: BSD-3-Clause - -import heapq -from collections import Counter - -import datetime -import os.path as op - -import numpy as np - -from ..utils import logger, warn, Bunch, _validate_type, _check_fname, verbose - -from .constants import FIFF, _coord_frame_named -from .tree import dir_tree_find -from .tag import read_tag -from .write import (start_and_end_file, write_dig_points) - -from ..transforms import (apply_trans, Transform, - get_ras_to_neuromag_trans, combine_transforms, - invert_transform, _to_const, _str_to_frame, - _coord_frame_name) -from .. import __version__ - -_dig_kind_dict = { - 'cardinal': FIFF.FIFFV_POINT_CARDINAL, - 'hpi': FIFF.FIFFV_POINT_HPI, - 'eeg': FIFF.FIFFV_POINT_EEG, - 'extra': FIFF.FIFFV_POINT_EXTRA, -} -_dig_kind_ints = tuple(sorted(_dig_kind_dict.values())) -_dig_kind_proper = {'cardinal': 'Cardinal', - 'hpi': 'HPI', - 'eeg': 'EEG', - 'extra': 'Extra', - 'unknown': 'Unknown'} -_dig_kind_rev = {val: key for key, val in _dig_kind_dict.items()} -_cardinal_kind_rev = {1: 'LPA', 2: 'Nasion', 3: 'RPA', 4: 'Inion'} - - -def _format_dig_points(dig, enforce_order=False): - """Format the dig points nicely.""" - if enforce_order and dig is not None: - # reorder points based on type: - # Fiducials/HPI, EEG, extra (headshape) - fids_digpoints = [] - hpi_digpoints = [] - eeg_digpoints = [] - extra_digpoints = [] - head_digpoints = [] - - # use a heap to enforce order on FIDS, EEG, Extra - for idx, digpoint in enumerate(dig): - ident = digpoint['ident'] - kind = digpoint['kind'] - - # push onto heap based on 'ident' (for the order) for - # each of the possible DigPoint 'kind's - # keep track of 'idx' in case of any clashes in - # the 'ident' variable, which can occur when - # user passes in DigMontage + DigMontage - if kind == FIFF.FIFFV_POINT_CARDINAL: - heapq.heappush(fids_digpoints, (ident, idx, digpoint)) - elif kind == FIFF.FIFFV_POINT_HPI: - heapq.heappush(hpi_digpoints, (ident, idx, digpoint)) - elif kind == FIFF.FIFFV_POINT_EEG: - heapq.heappush(eeg_digpoints, (ident, idx, digpoint)) - elif kind == FIFF.FIFFV_POINT_EXTRA: - heapq.heappush(extra_digpoints, (ident, idx, digpoint)) - elif kind == FIFF.FIFFV_POINT_HEAD: - heapq.heappush(head_digpoints, (ident, idx, digpoint)) - - # now recreate dig based on sorted order - fids_digpoints.sort(), hpi_digpoints.sort() - eeg_digpoints.sort() - extra_digpoints.sort(), head_digpoints.sort() - new_dig = [] - for idx, d in enumerate(fids_digpoints + hpi_digpoints + - extra_digpoints + eeg_digpoints + - head_digpoints): - new_dig.append(d[-1]) - dig = new_dig - - return [DigPoint(d) for d in dig] if dig is not None else dig - - -def _get_dig_eeg(dig): - return [d for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG] - - -def _count_points_by_type(dig): - """Get the number of points of each type.""" - occurrences = Counter([d['kind'] for d in dig]) - return dict( - fid=occurrences[FIFF.FIFFV_POINT_CARDINAL], - hpi=occurrences[FIFF.FIFFV_POINT_HPI], - eeg=occurrences[FIFF.FIFFV_POINT_EEG], - extra=occurrences[FIFF.FIFFV_POINT_EXTRA], - ) - - -_dig_keys = {'kind', 'ident', 'r', 'coord_frame'} - - -class DigPoint(dict): - """Container for a digitization point. - - This is a simple subclass of the standard dict type designed to provide - a readable string representation. - - Parameters - ---------- - kind : int - The kind of channel, - e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. - r : array, shape (3,) - 3D position in m. and coord_frame. - ident : int - Number specifying the identity of the point. - e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, - or 42 if kind is ``FIFFV_POINT_EEG``. - coord_frame : int - The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. - """ - - def __repr__(self): # noqa: D105 - if self['kind'] == FIFF.FIFFV_POINT_CARDINAL: - id_ = _cardinal_kind_rev.get(self['ident'], 'Unknown cardinal') - else: - id_ = _dig_kind_proper[ - _dig_kind_rev.get(self['kind'], 'unknown')] - id_ = ('%s #%s' % (id_, self['ident'])) - id_ = id_.rjust(10) - cf = _coord_frame_name(self['coord_frame']) - if 'voxel' in cf: - pos = ('(%0.1f, %0.1f, %0.1f)' % tuple(self['r'])).ljust(25) - else: - pos = ('(%0.1f, %0.1f, %0.1f) mm' % - tuple(1000 * self['r'])).ljust(25) - return ('' % (id_, pos, cf)) - - # speed up info copy by only deep copying the mutable item - def __deepcopy__(self, memodict): - """Make a deepcopy.""" - return DigPoint( - kind=self['kind'], r=self['r'].copy(), - ident=self['ident'], coord_frame=self['coord_frame']) - - def __eq__(self, other): # noqa: D105 - """Compare two DigPoints. - - Two digpoints are equal if they are the same kind, share the same - coordinate frame and position. - """ - my_keys = ['kind', 'ident', 'coord_frame'] - if set(self.keys()) != set(other.keys()): - return False - elif any(self[_] != other[_] for _ in my_keys): - return False - else: - return np.allclose(self['r'], other['r']) - - -def _read_dig_fif(fid, meas_info): - """Read digitizer data from a FIFF file.""" - isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK) - dig = None - if len(isotrak) == 0: - logger.info('Isotrak not found') - elif len(isotrak) > 1: - warn('Multiple Isotrak found') - else: - isotrak = isotrak[0] - coord_frame = FIFF.FIFFV_COORD_HEAD - dig = [] - for k in range(isotrak['nent']): - kind = isotrak['directory'][k].kind - pos = isotrak['directory'][k].pos - if kind == FIFF.FIFF_DIG_POINT: - tag = read_tag(fid, pos) - dig.append(tag.data) - elif kind == FIFF.FIFF_MNE_COORD_FRAME: - tag = read_tag(fid, pos) - coord_frame = _coord_frame_named.get(int(tag.data)) - for d in dig: - d['coord_frame'] = coord_frame - return _format_dig_points(dig) - - -@verbose -def write_dig(fname, pts, coord_frame=None, *, overwrite=False, verbose=None): - """Write digitization data to a FIF file. - - Parameters - ---------- - fname : path-like - Destination file name. - pts : iterator of dict - Iterator through digitizer points. Each point is a dictionary with - the keys 'kind', 'ident' and 'r'. - coord_frame : int | str | None - If all the points have the same coordinate frame, specify the type - here. Can be None (default) if the points could have varying - coordinate frames. - %(overwrite)s - - .. versionadded:: 1.0 - %(verbose)s - - .. versionadded:: 1.0 - """ - fname = _check_fname(fname, overwrite=overwrite) - if coord_frame is not None: - coord_frame = _to_const(coord_frame) - pts_frames = {pt.get('coord_frame', coord_frame) for pt in pts} - bad_frames = pts_frames - {coord_frame} - if len(bad_frames) > 0: - raise ValueError( - 'Points have coord_frame entries that are incompatible with ' - 'coord_frame=%i: %s.' % (coord_frame, str(tuple(bad_frames)))) - - with start_and_end_file(fname) as fid: - write_dig_points(fid, pts, block=True, coord_frame=coord_frame) - - -_cardinal_ident_mapping = { - FIFF.FIFFV_POINT_NASION: 'nasion', - FIFF.FIFFV_POINT_LPA: 'lpa', - FIFF.FIFFV_POINT_RPA: 'rpa', -} - - -def _ensure_fiducials_head(dig): - # Ensure that there are all three fiducials in the head coord frame - fids = dict() - for d in dig: - if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: - name = _cardinal_ident_mapping.get(d['ident'], None) - if name is not None: - fids[name] = d - radius = None - mults = dict( - lpa=[-1, 0, 0], - rpa=[1, 0, 0], - nasion=[0, 1, 0], - ) - for ident, name in _cardinal_ident_mapping.items(): - if name not in fids: - if radius is None: - radius = [ - np.linalg.norm(d['r']) for d in dig - if d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] - if not radius: - return # can't complete, no head points - radius = np.mean(radius) - dig.append(DigPoint( - kind=FIFF.FIFFV_POINT_CARDINAL, ident=ident, - r=np.array(mults[name], float) * radius, - coord_frame=FIFF.FIFFV_COORD_HEAD, - )) - - -# XXXX: -# This does something really similar to _read_dig_montage_fif but: -# - does not check coord_frame -# - does not do any operation that implies assumptions with the names -def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): - """Obtain coordinate data from a Dig. - - Parameters - ---------- - dig : list of dicts - A container of DigPoints to be added to the info['dig']. - - Returns - ------- - ch_pos : dict - The container of all relevant channel positions inside dig. - """ - # Split up the dig points by category - hsp, hpi, elp = list(), list(), list() - fids, dig_ch_pos_location = dict(), list() - - for d in dig: - if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: - fids[_cardinal_ident_mapping[d['ident']]] = d['r'] - elif d['kind'] == FIFF.FIFFV_POINT_HPI: - hpi.append(d['r']) - elp.append(d['r']) - elif d['kind'] == FIFF.FIFFV_POINT_EXTRA: - hsp.append(d['r']) - elif d['kind'] == FIFF.FIFFV_POINT_EEG: - if d['ident'] != 0 or not exclude_ref_channel: - dig_ch_pos_location.append(d['r']) - - dig_coord_frames = set([d['coord_frame'] for d in dig]) - if len(dig_coord_frames) != 1: - raise RuntimeError('Only single coordinate frame in dig is supported, ' - f'got {dig_coord_frames}') - dig_ch_pos_location = np.array(dig_ch_pos_location) - dig_ch_pos_location.shape = (-1, 3) # empty will be (0, 3) - return Bunch( - nasion=fids.get('nasion', None), - lpa=fids.get('lpa', None), - rpa=fids.get('rpa', None), - hsp=np.array(hsp) if len(hsp) else None, - hpi=np.array(hpi) if len(hpi) else None, - elp=np.array(elp) if len(elp) else None, - dig_ch_pos_location=dig_ch_pos_location, - coord_frame=dig_coord_frames.pop(), - ) - - -def _get_fid_coords(dig, raise_error=True): - fid_coords = Bunch(nasion=None, lpa=None, rpa=None) - fid_coord_frames = dict() - - for d in dig: - if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: - key = _cardinal_ident_mapping[d['ident']] - fid_coords[key] = d['r'] - fid_coord_frames[key] = d['coord_frame'] - - if len(fid_coord_frames) > 0 and raise_error: - if set(fid_coord_frames.keys()) != set(['nasion', 'lpa', 'rpa']): - raise ValueError("Some fiducial points are missing (got %s)." % - fid_coord_frames.keys()) - - if len(set(fid_coord_frames.values())) > 1: - raise ValueError( - 'All fiducial points must be in the same coordinate system ' - '(got %s)' % len(fid_coord_frames) - ) - - coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None - - return fid_coords, coord_frame - - -def _write_dig_points(fname, dig_points): - """Write points to text file. - - Parameters - ---------- - fname : path-like - Path to the file to write. The kind of file to write is determined - based on the extension: '.txt' for tab separated text file. - dig_points : numpy.ndarray, shape (n_points, 3) - Points. - """ - _, ext = op.splitext(fname) - dig_points = np.asarray(dig_points) - if (dig_points.ndim != 2) or (dig_points.shape[1] != 3): - err = ("Points must be of shape (n_points, 3), " - "not %s" % (dig_points.shape,)) - raise ValueError(err) - - if ext == '.txt': - with open(fname, 'wb') as fid: - version = __version__ - now = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y") - fid.write(b'%% Ascii 3D points file created by mne-python version' - b' %s at %s\n' % (version.encode(), now.encode())) - fid.write(b'%% %d 3D points, x y z per line\n' % len(dig_points)) - np.savetxt(fid, dig_points, delimiter='\t', newline='\n') - else: - msg = "Unrecognized extension: %r. Need '.txt'." % ext - raise ValueError(msg) - - -def _coord_frame_const(coord_frame): - if not isinstance(coord_frame, str) or coord_frame not in _str_to_frame: - raise ValueError('coord_frame must be one of %s, got %s' - % (sorted(_str_to_frame.keys()), coord_frame)) - return _str_to_frame[coord_frame] - - -def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None, - extra_points=None, dig_ch_pos=None, *, - coord_frame='head', add_missing_fiducials=False): - """Construct digitizer info for the info. - - Parameters - ---------- - nasion : array-like | numpy.ndarray, shape (3,) | None - Point designated as the nasion point. - lpa : array-like | numpy.ndarray, shape (3,) | None - Point designated as the left auricular point. - rpa : array-like | numpy.ndarray, shape (3,) | None - Point designated as the right auricular point. - hpi : array-like | numpy.ndarray, shape (n_points, 3) | None - Points designated as head position indicator points. - extra_points : array-like | numpy.ndarray, shape (n_points, 3) - Points designed as the headshape points. - dig_ch_pos : dict - Dict of EEG channel positions. - coord_frame : str - The coordinate frame of the points. Usually this is "unknown" - for native digitizer space. Defaults to "head". - add_missing_fiducials : bool - If True, add fiducials to the dig points if they are not present. - Requires that coord_frame='head' and that lpa, nasion, and rpa are all - None. - - Returns - ------- - dig : list of dicts - A container of DigPoints to be added to the info['dig']. - """ - coord_frame = _coord_frame_const(coord_frame) - - dig = [] - if lpa is not None: - lpa = np.asarray(lpa) - if lpa.shape != (3,): - raise ValueError('LPA should have the shape (3,) instead of %s' - % (lpa.shape,)) - dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA, - 'kind': FIFF.FIFFV_POINT_CARDINAL, - 'coord_frame': coord_frame}) - if nasion is not None: - nasion = np.asarray(nasion) - if nasion.shape != (3,): - raise ValueError('Nasion should have the shape (3,) instead of %s' - % (nasion.shape,)) - dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION, - 'kind': FIFF.FIFFV_POINT_CARDINAL, - 'coord_frame': coord_frame}) - if rpa is not None: - rpa = np.asarray(rpa) - if rpa.shape != (3,): - raise ValueError('RPA should have the shape (3,) instead of %s' - % (rpa.shape,)) - dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA, - 'kind': FIFF.FIFFV_POINT_CARDINAL, - 'coord_frame': coord_frame}) - if hpi is not None: - hpi = np.asarray(hpi) - if hpi.ndim != 2 or hpi.shape[1] != 3: - raise ValueError('HPI should have the shape (n_points, 3) instead ' - 'of %s' % (hpi.shape,)) - for idx, point in enumerate(hpi): - dig.append({'r': point, 'ident': idx + 1, - 'kind': FIFF.FIFFV_POINT_HPI, - 'coord_frame': coord_frame}) - if extra_points is not None: - extra_points = np.asarray(extra_points) - if len(extra_points) and extra_points.shape[1] != 3: - raise ValueError('Points should have the shape (n_points, 3) ' - 'instead of %s' % (extra_points.shape,)) - for idx, point in enumerate(extra_points): - dig.append({'r': point, 'ident': idx + 1, - 'kind': FIFF.FIFFV_POINT_EXTRA, - 'coord_frame': coord_frame}) - if dig_ch_pos is not None: - idents = [] - use_arange = False - for key, value in dig_ch_pos.items(): - _validate_type(key, str, 'dig_ch_pos') - try: - idents.append(int(key[-3:])) - except ValueError: - use_arange = True - _validate_type(value, (np.ndarray, list, tuple), 'dig_ch_pos') - value = np.array(value, dtype=float) - dig_ch_pos[key] = value - if value.shape != (3, ): - raise RuntimeError( - "The position should be a 1D array of 3 floats. " - f"Provided shape {value.shape}.") - if use_arange: - idents = np.arange(1, len(dig_ch_pos) + 1) - for key, ident in zip(dig_ch_pos, idents): - dig.append({'r': dig_ch_pos[key], 'ident': int(ident), - 'kind': FIFF.FIFFV_POINT_EEG, - 'coord_frame': coord_frame}) - if add_missing_fiducials: - assert coord_frame == FIFF.FIFFV_COORD_HEAD - # These being none is really an assumption that if you have one you - # should have all three. But we can relax this later if necessary. - assert lpa is None - assert rpa is None - assert nasion is None - _ensure_fiducials_head(dig) - - return _format_dig_points(dig) - - -def _call_make_dig_points(nasion, lpa, rpa, hpi, extra, convert=True): - if convert: - neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa) - nasion = apply_trans(neuromag_trans, nasion) - lpa = apply_trans(neuromag_trans, lpa) - rpa = apply_trans(neuromag_trans, rpa) - - if hpi is not None: - hpi = apply_trans(neuromag_trans, hpi) - - extra = apply_trans(neuromag_trans, extra).astype(np.float32) - else: - neuromag_trans = None - - ctf_head_t = Transform(fro='ctf_head', to='head', trans=neuromag_trans) - - info_dig = _make_dig_points(nasion=nasion, - lpa=lpa, - rpa=rpa, - hpi=hpi, - extra_points=extra) - - return info_dig, ctf_head_t - - -############################################################################## -# From artemis123 (we have modified the function a bit) -def _artemis123_read_pos(nas, lpa, rpa, hpi, extra): - # move into MNE head coords - dig_points, _ = _call_make_dig_points(nas, lpa, rpa, hpi, extra) - return dig_points - - -############################################################################## -# From bti -def _make_bti_dig_points(nasion, lpa, rpa, hpi, extra, - convert=False, use_hpi=False, - bti_dev_t=False, dev_ctf_t=False): - - _hpi = hpi if use_hpi else None - info_dig, ctf_head_t = _call_make_dig_points(nasion, lpa, rpa, _hpi, extra, - convert) - - if convert: - t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t, - 'meg', 'ctf_head') - dev_head_t = combine_transforms(t, ctf_head_t, 'meg', 'head') - else: - dev_head_t = Transform('meg', 'head', trans=None) - - return info_dig, dev_head_t, ctf_head_t # ctf_head_t should not be needed diff --git a/mne/io/_fiff_wrap.py b/mne/io/_fiff_wrap.py new file mode 100644 index 00000000000..e4566d61a40 --- /dev/null +++ b/mne/io/_fiff_wrap.py @@ -0,0 +1,20 @@ +# ruff: noqa: F401 +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# Backward compat since these were in the public API before switching to _fiff +# (and _empty_info is convenient to keep here for tests and is private) +from .._fiff.meas_info import ( + Info as _info, +) +from .._fiff.meas_info import ( + _empty_info, + anonymize_info, + read_fiducials, + read_info, + write_fiducials, + write_info, +) +from .._fiff.open import show_fiff +from .._fiff.pick import get_channel_type_constants # moved up a level diff --git a/mne/io/_read_raw.py b/mne/io/_read_raw.py index 5227b33ae86..28c86c00ee6 100644 --- a/mne/io/_read_raw.py +++ b/mne/io/_read_raw.py @@ -1,20 +1,14 @@ """Generic wrapper function read_raw for specific read_raw_xxx readers.""" -# Authors: Clemens Brunner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. - -from pathlib import Path from functools import partial +from pathlib import Path -from . import (read_raw_edf, read_raw_bdf, read_raw_gdf, read_raw_brainvision, - read_raw_fif, read_raw_eeglab, read_raw_cnt, read_raw_egi, - read_raw_eximia, read_raw_nirx, read_raw_fieldtrip, - read_raw_artemis123, read_raw_nicolet, read_raw_kit, - read_raw_ctf, read_raw_boxy, read_raw_snirf, read_raw_fil, - read_raw_nihon) from ..utils import fill_doc +from .base import BaseRaw def _read_unsupported(fname, **kwargs): @@ -28,56 +22,102 @@ def _read_unsupported(fname, **kwargs): # supported read file formats -supported = { - ".edf": dict(EDF=read_raw_edf), - ".eeg": dict(NihonKoden=read_raw_nihon), - ".bdf": dict(BDF=read_raw_bdf), - ".gdf": dict(GDF=read_raw_gdf), - ".vhdr": dict(brainvision=read_raw_brainvision), - ".ahdr": dict(brainvision=read_raw_brainvision), - ".fif": dict(FIF=read_raw_fif), - ".fif.gz": dict(FIF=read_raw_fif), - ".set": dict(EEGLAB=read_raw_eeglab), - ".cnt": dict(CNT=read_raw_cnt), - ".mff": dict(EGI=read_raw_egi), - ".nxe": dict(eximia=read_raw_eximia), - ".hdr": dict(NIRx=read_raw_nirx), - ".snirf": dict(SNIRF=read_raw_snirf), - ".mat": dict(fieldtrip=read_raw_fieldtrip), - ".bin": { - 'ARTEMIS': read_raw_artemis123, - 'UCL FIL OPM': read_raw_fil, - }, - ".data": dict(Nicolet=read_raw_nicolet), - ".sqd": dict(KIT=read_raw_kit), - ".con": dict(KIT=read_raw_kit), - ".ds": dict(CTF=read_raw_ctf), - ".txt": dict(BOXY=read_raw_boxy), -} +def _get_supported(): + from . import ( + read_raw_ant, + read_raw_artemis123, + read_raw_bdf, + read_raw_boxy, + read_raw_brainvision, + read_raw_cnt, + read_raw_ctf, + read_raw_curry, + read_raw_edf, + read_raw_eeglab, + read_raw_egi, + read_raw_eximia, + read_raw_eyelink, + read_raw_fieldtrip, + read_raw_fif, + read_raw_fil, + read_raw_gdf, + read_raw_kit, + read_raw_nedf, + read_raw_nicolet, + read_raw_nihon, + read_raw_nirx, + read_raw_nsx, + read_raw_persyst, + read_raw_snirf, + ) + + return { + ".edf": dict(EDF=read_raw_edf), + ".eeg": dict(NihonKoden=read_raw_nihon), + ".bdf": dict(BDF=read_raw_bdf), + ".gdf": dict(GDF=read_raw_gdf), + ".vhdr": dict(brainvision=read_raw_brainvision), + ".ahdr": dict(brainvision=read_raw_brainvision), + ".fif": dict(FIF=read_raw_fif), + ".fif.gz": dict(FIF=read_raw_fif), + ".set": dict(EEGLAB=read_raw_eeglab), + ".cnt": dict(CNT=read_raw_cnt, ANT=read_raw_ant), + ".mff": dict(EGI=read_raw_egi), + ".nxe": dict(eximia=read_raw_eximia), + ".hdr": dict(NIRx=read_raw_nirx), + ".snirf": dict(SNIRF=read_raw_snirf), + ".mat": dict(fieldtrip=read_raw_fieldtrip), + ".bin": { + "ARTEMIS": read_raw_artemis123, + "UCL FIL OPM": read_raw_fil, + }, + ".data": dict(Nicolet=read_raw_nicolet), + ".sqd": dict(KIT=read_raw_kit), + ".con": dict(KIT=read_raw_kit), + ".ds": dict(CTF=read_raw_ctf), + ".txt": dict(BOXY=read_raw_boxy), + # Curry + ".dat": dict(CURRY=read_raw_curry), + ".dap": dict(CURRY=read_raw_curry), + ".rs3": dict(CURRY=read_raw_curry), + ".cdt": dict(CURRY=read_raw_curry), + ".cdt.dpa": dict(CURRY=read_raw_curry), + ".cdt.cef": dict(CURRY=read_raw_curry), + ".cef": dict(CURRY=read_raw_curry), + # NEDF + ".nedf": dict(NEDF=read_raw_nedf), + # EyeLink + ".asc": dict(EyeLink=read_raw_eyelink), + ".ns3": dict(NSx=read_raw_nsx), + ".lay": dict(Persyst=read_raw_persyst), + } + # known but unsupported file formats -suggested = { +_suggested = { ".vmrk": dict(brainvision=partial(_read_unsupported, suggest=".vhdr")), ".amrk": dict(brainvision=partial(_read_unsupported, suggest=".ahdr")), } + # all known file formats -readers = {**supported, **suggested} +def _get_readers(): + return {**_get_supported(), **_suggested} def split_name_ext(fname): """Return name and supported file extension.""" - maxsuffixes = max(ext.count(".") for ext in supported) + maxsuffixes = max(ext.count(".") for ext in _get_supported()) suffixes = Path(fname).suffixes for si in range(-maxsuffixes, 0): ext = "".join(suffixes[si:]).lower() - if ext in readers: - return Path(fname).name[:-len(ext)], ext + if ext in _get_readers(): + return Path(fname).name[: -len(ext)], ext return fname, None # unknown file extension @fill_doc -def read_raw(fname, *, preload=False, verbose=None, **kwargs): +def read_raw(fname, *, preload=False, verbose=None, **kwargs) -> BaseRaw: """Read raw file. This function is a convenient wrapper for readers defined in `mne.io`. The @@ -86,14 +126,31 @@ def read_raw(fname, *, preload=False, verbose=None, **kwargs): The following readers are currently supported: - `~mne.io.read_raw_artemis123`, `~mne.io.read_raw_bdf`, - `~mne.io.read_raw_boxy`, `~mne.io.read_raw_brainvision`, - `~mne.io.read_raw_cnt`, `~mne.io.read_raw_ctf`, `~mne.io.read_raw_edf`, - `~mne.io.read_raw_eeglab`, `~mne.io.read_raw_egi`, - `~mne.io.read_raw_eximia`, `~mne.io.read_raw_fieldtrip`, - `~mne.io.read_raw_fif`, `~mne.io.read_raw_gdf`, `~mne.io.read_raw_kit`, - `~mne.io.read_raw_fil`, - `~mne.io.read_raw_nicolet`, and `~mne.io.read_raw_nirx`. + * `~mne.io.read_raw_ant` + * `~mne.io.read_raw_artemis123` + * `~mne.io.read_raw_bdf` + * `~mne.io.read_raw_boxy` + * `~mne.io.read_raw_brainvision` + * `~mne.io.read_raw_cnt` + * `~mne.io.read_raw_ctf` + * `~mne.io.read_raw_curry` + * `~mne.io.read_raw_edf` + * `~mne.io.read_raw_eeglab` + * `~mne.io.read_raw_egi` + * `~mne.io.read_raw_eximia` + * `~mne.io.read_raw_eyelink` + * `~mne.io.read_raw_fieldtrip` + * `~mne.io.read_raw_fif` + * `~mne.io.read_raw_fil` + * `~mne.io.read_raw_gdf` + * `~mne.io.read_raw_kit` + * `~mne.io.read_raw_nedf` + * `~mne.io.read_raw_nicolet` + * `~mne.io.read_raw_nihon` + * `~mne.io.read_raw_nirx` + * `~mne.io.read_raw_nsx` + * `~mne.io.read_raw_persyst` + * `~mne.io.read_raw_snirf` Parameters ---------- @@ -112,8 +169,9 @@ def read_raw(fname, *, preload=False, verbose=None, **kwargs): Raw object. """ _, ext = split_name_ext(fname) - kwargs['verbose'] = verbose - kwargs['preload'] = preload + kwargs["verbose"] = verbose + kwargs["preload"] = preload + readers = _get_readers() if ext not in readers: _read_unsupported(fname) these_readers = list(readers[ext].values()) @@ -124,10 +182,12 @@ def read_raw(fname, *, preload=False, verbose=None, **kwargs): if len(these_readers) == 1: raise else: - choices = '\n'.join( - f'mne.io.{func.__name__.ljust(20)} ({kind})' - for kind, func in readers[ext].items()) + choices = "\n".join( + f"mne.io.{func.__name__.ljust(20)} ({kind})" + for kind, func in readers[ext].items() + ) raise RuntimeError( - 'Could not read file using any of the possible readers for ' - f'extension {ext}. Consider trying to read the file directly with ' - f'one of:\n{choices}') + "Could not read file using any of the possible readers for " + f"extension {ext}. Consider trying to read the file directly with " + f"one of:\n{choices}" + ) diff --git a/mne/io/ant/__init__.py b/mne/io/ant/__init__.py new file mode 100644 index 00000000000..86ae7b38e0d --- /dev/null +++ b/mne/io/ant/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .ant import read_raw_ant diff --git a/mne/io/ant/ant.py b/mne/io/ant/ant.py new file mode 100644 index 00000000000..854406267f4 --- /dev/null +++ b/mne/io/ant/ant.py @@ -0,0 +1,338 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations + +import re +from collections import defaultdict +from typing import TYPE_CHECKING + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ...annotations import Annotations +from ...utils import ( + _check_fname, + _soft_import, + _validate_type, + copy_doc, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw + +if TYPE_CHECKING: + from pathlib import Path + + from numpy.typing import NDArray + +_UNITS: dict[str, float] = {"uv": 1e-6, "µv": 1e-6} + + +@fill_doc +class RawANT(BaseRaw): + r"""Reader for Raw ANT files in .cnt format. + + Parameters + ---------- + fname : file-like + Path to the ANT raw file to load. The file should have the extension ``.cnt``. + eog : str | None + Regex pattern to find EOG channel labels. If None, no EOG channels are + automatically detected. + misc : str | None + Regex pattern to find miscellaneous channels. If None, no miscellaneous channels + are automatically detected. The default pattern ``"BIP\d+"`` will mark all + bipolar channels as ``misc``. + + .. note:: + + A bipolar channel might actually contain ECG, EOG or other signal types + which might have a dedicated channel type in MNE-Python. In this case, use + :meth:`mne.io.Raw.set_channel_types` to change the channel type of the + channel. + bipolars : list of str | tuple of str | None + The list of channels to treat as bipolar EEG channels. Each element should be + a string of the form ``'anode-cathode'`` or in ANT terminology as ``'label- + reference'``. If None, all channels are interpreted as ``'eeg'`` channels + referenced to the same reference electrode. Bipolar channels are treated + as EEG channels with a special coil type in MNE-Python, see also + :func:`mne.set_bipolar_reference` + + .. warning:: + + Do not provide auxiliary channels in this argument, provide them in the + ``eog`` and ``misc`` arguments. + impedance_annotation : str + The string to use for impedance annotations. Defaults to ``"impedance"``, + however, the impedance measurement might mark the end of a segment and the + beginning of a new segment, in which case a discontinuity similar to what + :func:`mne.concatenate_raws` produces is present. In this case, it's better to + include a ``BAD_xxx`` annotation to mark the discontinuity. + + .. note:: + + Note that the impedance annotation will likely have a duration of ``0``. + If the measurement marks a discontinuity, the duration should be modified to + cover the discontinuity in its entirety. + encoding : str + Encoding to use for :class:`str` in the CNT file. Defaults to ``'latin-1'``. + %(preload)s + %(verbose)s + """ + + @verbose + def __init__( + self, + fname: str | Path, + eog: str | None, + misc: str | None, + bipolars: list[str] | tuple[str, ...] | None, + impedance_annotation: str, + *, + encoding: str = "latin-1", + preload: bool | NDArray, + verbose=None, + ) -> None: + logger.info("Reading ANT file %s", fname) + _soft_import("antio", "reading ANT files", min_version="0.5.0") + + from antio import read_cnt + from antio.parser import ( + read_device_info, + read_info, + read_meas_date, + read_subject_info, + read_triggers, + ) + + fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname") + _validate_type(eog, (str, None), "eog") + _validate_type(misc, (str, None), "misc") + _validate_type(bipolars, (list, tuple, None), "bipolar") + _validate_type(impedance_annotation, (str,), "impedance_annotation") + if len(impedance_annotation) == 0: + raise ValueError("The impedance annotation cannot be an empty string.") + cnt = read_cnt(fname) + # parse channels, sampling frequency, and create info + ch_names, ch_units, ch_refs, _, _ = read_info(cnt, encoding=encoding) + ch_types = _parse_ch_types(ch_names, eog, misc, ch_refs) + if bipolars is not None: # handle bipolar channels + bipolars_idx = _handle_bipolar_channels(ch_names, ch_refs, bipolars) + for idx, ch in zip(bipolars_idx, bipolars): + if ch_types[idx] != "eeg": + warn( + f"Channel {ch} was not parsed as an EEG channel, changing to " + "EEG channel type since bipolar EEG was requested." + ) + ch_names[idx] = ch + ch_types[idx] = "eeg" + info = create_info( + ch_names, sfreq=cnt.get_sample_frequency(), ch_types=ch_types + ) + info.set_meas_date(read_meas_date(cnt)) + make, model, serial, site = read_device_info(cnt, encoding=encoding) + info["device_info"] = dict(type=make, model=model, serial=serial, site=site) + his_id, name, sex, birthday = read_subject_info(cnt, encoding=encoding) + info["subject_info"] = dict( + his_id=his_id, + first_name=name, + sex=sex, + ) + if birthday is not None: + info["subject_info"]["birthday"] = birthday + if bipolars is not None: + with info._unlock(): + for idx in bipolars_idx: + info["chs"][idx]["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR + first_samps = np.array((0,)) + last_samps = (cnt.get_sample_count() - 1,) + raw_extras = { + "orig_nchan": cnt.get_channel_count(), + "orig_ch_units": ch_units, + "first_samples": np.array(first_samps), + "last_samples": np.array(last_samps), + } + super().__init__( + info, + preload=preload, + first_samps=first_samps, + last_samps=last_samps, + filenames=[fname], + verbose=verbose, + raw_extras=[raw_extras], + ) + # look for annotations (called trigger by ant) + onsets, durations, descriptions, _, disconnect = read_triggers(cnt) + onsets, durations, descriptions = _prepare_annotations( + onsets, durations, descriptions, disconnect, impedance_annotation + ) + onsets = np.array(onsets) / self.info["sfreq"] + durations = np.array(durations) / self.info["sfreq"] + annotations = Annotations(onsets, duration=durations, description=descriptions) + self.set_annotations(annotations) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + from antio import read_cnt + from antio.parser import read_data + + ch_units = self._raw_extras[0]["orig_ch_units"] + first_samples = self._raw_extras[0]["first_samples"] + n_times = self._raw_extras[0]["last_samples"] + 1 + for first_samp, this_n_times in zip(first_samples, n_times): + i_start = max(start, first_samp) + i_stop = min(stop, this_n_times + first_samp) + # read and scale data array + cnt = read_cnt(self.filenames[fi]) + one = read_data(cnt, i_start, i_stop) + _scale_data(one, ch_units) + data_view = data[:, i_start - start : i_stop - start] + if isinstance(idx, slice): + data_view[:] = one[idx] + else: + # faster than doing one = one[idx] + np.take(one, idx, axis=0, out=data_view) + + +def _handle_bipolar_channels( + ch_names: list[str], ch_refs: list[str], bipolars: list[str] | tuple[str, ...] +) -> list[int]: + """Handle bipolar channels.""" + bipolars_idx = [] + for ch in bipolars: + _validate_type(ch, (str,), "bipolar_channel") + if "-" not in ch: + raise ValueError( + "Bipolar channels should be provided as 'anode-cathode' or " + f"'label-reference'. '{ch}' is not valid." + ) + anode, cathode = ch.split("-") + if anode not in ch_names: + raise ValueError(f"Anode channel {anode} not found in the channels.") + idx = ch_names.index(anode) + if cathode != ch_refs[idx]: + raise ValueError( + f"Reference electrode for {anode} is {ch_refs[idx]}, not {cathode}." + ) + # store idx for later FIFF coil type change + bipolars_idx.append(idx) + return bipolars_idx + + +def _parse_ch_types( + ch_names: list[str], eog: str | None, misc: str | None, ch_refs: list[str] +) -> list[str]: + """Parse the channel types.""" + eog = re.compile(eog) if eog is not None else None + misc = re.compile(misc) if misc is not None else None + ch_types = [] + for ch in ch_names: + if eog is not None and re.fullmatch(eog, ch): + ch_types.append("eog") + elif misc is not None and re.fullmatch(misc, ch): + ch_types.append("misc") + else: + ch_types.append("eeg") + eeg_refs = [ch_refs[k] for k, elt in enumerate(ch_types) if elt == "eeg"] + if len(set(eeg_refs)) == 1: + logger.info( + "All %i EEG channels are referenced to %s.", len(eeg_refs), eeg_refs[0] + ) + else: + warn("All EEG channels are not referenced to the same electrode.") + return ch_types + + +def _prepare_annotations( + onsets: list[int], + durations: list[int], + descriptions: list[str], + disconnect: dict[str, list[int]], + impedance_annotation: str, +) -> tuple[list[int], list[int], list[str]]: + """Parse the ANT triggers into better Annotations.""" + # first, let's replace the description 'impedance' with impedance_annotation + for k, desc in enumerate(descriptions): + if desc.lower() == "impedance": + descriptions[k] = impedance_annotation + # next, let's look for amplifier connection/disconnection and let's try to create + # BAD_disconnection annotations from them. + if ( + len(disconnect["start"]) == len(disconnect["stop"]) + and len(disconnect["start"]) != 0 + and all( + 0 <= stop - start + for start, stop in zip(disconnect["start"], disconnect["stop"]) + ) + ): + for start, stop in zip(disconnect["start"], disconnect["stop"]): + onsets.append(start) + durations.append(stop - start) + descriptions.append("BAD_disconnection") + else: + for elt in disconnect["start"]: + onsets.append(elt) + durations.append(0) + descriptions.append("Amplifier disconnected") + for elt in disconnect["stop"]: + onsets.append(elt) + durations.append(0) + descriptions.append("Amplifier reconnected") + return onsets, durations, descriptions + + +def _scale_data(data: NDArray[np.float64], ch_units: list[str]) -> None: + """Scale the data array based on the human-readable units reported by ANT. + + Operates in-place. + """ + units_index = defaultdict(list) + for idx, unit in enumerate(ch_units): + units_index[unit].append(idx) + for unit, value in units_index.items(): + if unit in _UNITS: + data[np.array(value, dtype=np.int16), :] *= _UNITS[unit] + else: + warn( + f"Unit {unit} not recognized, not scaling. Please report the unit on " + "a github issue on https://github.com/mne-tools/mne-python." + ) + + +@copy_doc(RawANT) +def read_raw_ant( + fname, + eog=None, + misc=r"BIP\d+", + bipolars=None, + impedance_annotation="impedance", + *, + encoding: str = "latin-1", + preload=False, + verbose=None, +) -> RawANT: + """ + Returns + ------- + raw : instance of RawANT + A Raw object containing ANT data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + Notes + ----- + .. versionadded:: 1.9 + """ + return RawANT( + fname, + eog=eog, + misc=misc, + bipolars=bipolars, + impedance_annotation=impedance_annotation, + encoding=encoding, + preload=preload, + verbose=verbose, + ) diff --git a/mne/io/ant/tests/__init__.py b/mne/io/ant/tests/__init__.py new file mode 100644 index 00000000000..04c673b4e4d --- /dev/null +++ b/mne/io/ant/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/ant/tests/test_ant.py b/mne/io/ant/tests/test_ant.py new file mode 100644 index 00000000000..8c8530d400d --- /dev/null +++ b/mne/io/ant/tests/test_ant.py @@ -0,0 +1,490 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations + +import warnings +from pathlib import Path + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from mne import Annotations +from mne._fiff.constants import FIFF +from mne.datasets import testing +from mne.io import BaseRaw, read_raw, read_raw_ant, read_raw_brainvision +from mne.io.ant.ant import RawANT + +pytest.importorskip("antio", minversion="0.5.0") +data_path = testing.data_path(download=False) / "antio" + + +TypeDataset = dict[ + str, dict[str, Path] | str | int | tuple[str, str, str] | dict[str, str] | None +] + + +def read_raw_bv(fname: Path) -> BaseRaw: + """Read a brainvision file exported from eego. + + For some reason, the first impedance measurement is annotated at sample 0. But since + BrainVision files are 1-indexed, the reader removes '1' to create 0-indexed + annotations. Thus, the first impedance measurement annotation ends up with an onset + 1 sample before the start of the recording. + This is not really an issue as the annotation duration is sufficient to make sure + that MNE does not drop it entirely as 'outside of the data range'. + """ + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="Limited .* annotation.*outside the data range.", + category=RuntimeWarning, + ) + raw_bv = read_raw_brainvision(fname) + return raw_bv + + +@pytest.fixture(scope="module") +def ca_208() -> TypeDataset: + """Return the paths to the CA_208 dataset containing 64 channel gel recordings.""" + cnt = { + "short": data_path / "CA_208" / "test_CA_208.cnt", + "amp-dc": data_path / "CA_208" / "test_CA_208_amp_disconnection.cnt", + "start-stop": data_path / "CA_208" / "test_CA_208_start_stop.cnt", + } + bv = {key: value.with_suffix(".vhdr") for key, value in cnt.items()} + return { + "cnt": cnt, + "bv": bv, + "n_eeg": 64, + "n_misc": 24, + "meas_date": "2024-08-14-10-44-47+0000", + "patient_info": { + "name": "antio test", + "his_id": "", + "birthday": "2024-08-14", + "sex": 0, + }, + "machine_info": ("eego", "EE_225", ""), + "hospital": "", + } + + +@pytest.fixture(scope="module") +def ca_208_refs() -> TypeDataset: + """Return the paths and info to the CA_208_refs dataset. + + The following montage was applid on export: + - highpass: 0.3 Hz - lowpass: 30 Hz + - Fp1, Fpz, Fp2 referenced to Fz + - CP3, CP4 referenced to Cz + - others to CPz + """ + cnt = { + "short": data_path / "CA_208_refs" / "test-ref.cnt", + "legacy": data_path / "CA_208_refs" / "test-ref-legacy.cnt", + } + bv = { + "short": cnt["short"].with_suffix(".vhdr"), + } + return { + "cnt": cnt, + "bv": bv, + "n_eeg": 64, + "n_misc": 0, + "meas_date": "2024-09-09-10-57-44+0000", + "patient_info": { + "name": "antio test", + "his_id": "", + "birthday": "2024-08-14", + "sex": 0, + }, + "machine_info": ("eego", "EE_225", ""), + "hospital": "", + } + + +@pytest.fixture(scope="module") +def andy_101() -> TypeDataset: + """Return the path and info to the andy_101 dataset.""" + cnt = { + "short": data_path / "andy_101" / "Andy_101-raw.cnt", + } + bv = {key: value.with_suffix(".vhdr") for key, value in cnt.items()} + return { + "cnt": cnt, + "bv": bv, + "n_eeg": 128, + "n_misc": 0, + "meas_date": "2024-08-19-16-17-07+0000", + "patient_info": { + "name": "Andy test_middle_name EEG_Exam", + "his_id": "test_subject_code", + "birthday": "2024-08-19", + "sex": 2, + }, + "machine_info": ("eego", "EE_226", ""), + "hospital": "", + } + + +@pytest.fixture(scope="module") +def na_271() -> TypeDataset: + """Return the path to a dataset containing 128 channel recording. + + The recording was done with an NA_271 net dipped in saline solution. + """ + cnt = { + "short": data_path / "NA_271" / "test-na-271.cnt", + "legacy": data_path / "NA_271" / "test-na-271-legacy.cnt", + } + bv = { + "short": cnt["short"].with_suffix(".vhdr"), + } + return { + "cnt": cnt, + "bv": bv, + "n_eeg": 128, + "n_misc": 0, + "meas_date": "2024-09-06-10-45-07+0000", + "patient_info": { + "name": "antio test", + "his_id": "", + "birthday": "2024-08-14", + "sex": 0, + }, + "machine_info": ("eego", "EE_226", ""), + "hospital": "", + } + + +@pytest.fixture(scope="module") +def na_271_bips() -> TypeDataset: + """Return the path to a dataset containing 128 channel recording. + + The recording was done with an NA_271 net dipped in saline solution and includes + bipolar channels. + """ + cnt = { + "short": data_path / "NA_271_bips" / "test-na-271.cnt", + "legacy": data_path / "NA_271_bips" / "test-na-271-legacy.cnt", + } + bv = { + "short": cnt["short"].with_suffix(".vhdr"), + } + return { + "cnt": cnt, + "bv": bv, + "n_eeg": 128, + "n_misc": 6, + "meas_date": "2024-09-06-10-37-23+0000", + "patient_info": { + "name": "antio test", + "his_id": "", + "birthday": "2024-08-14", + "sex": 0, + }, + "machine_info": ("eego", "EE_226", ""), + "hospital": "", + } + + +@pytest.fixture(scope="module") +def user_annotations() -> TypeDataset: + """Return the path to a dataset containing user annotations with floating pins.""" + cnt = { + "short": data_path / "user_annotations" / "test-user-annotation.cnt", + "legacy": data_path / "user_annotations" / "test-user-annotation-legacy.cnt", + } + bv = { + "short": cnt["short"].with_suffix(".vhdr"), + } + return { + "cnt": cnt, + "bv": bv, + "n_eeg": 64, + "n_misc": 0, + "meas_date": "2024-08-29-16-15-44+0000", + "patient_info": { + "name": "test test", + "his_id": "", + "birthday": "2024-02-06", + "sex": 0, + }, + "machine_info": ("eego", "EE_225", ""), + "hospital": "", + } + + +@testing.requires_testing_data +@pytest.mark.parametrize("dataset", ["ca_208", "andy_101", "na_271"]) +def test_io_data(dataset, request): + """Test loading of .cnt file.""" + dataset = request.getfixturevalue(dataset) + raw_cnt = read_raw_ant(dataset["cnt"]["short"]) # preload=False + raw_bv = read_raw_bv(dataset["bv"]["short"]) + cnt = raw_cnt.get_data() + bv = raw_bv.get_data() + assert cnt.shape == bv.shape + assert_allclose(cnt, bv, atol=1e-8) + + # check preload=False and preload=False with raw.load_data() + raw_cnt.crop(0.05, 1.05) + raw_cnt2 = read_raw_ant(dataset["cnt"]["short"], preload=False) + raw_cnt2.crop(0.05, 1.05).load_data() + assert_allclose(raw_cnt.get_data(), raw_cnt2.get_data()) + + # check preload=False vs Brainvision file + raw_bv.crop(0.05, 1.05) + assert_allclose(raw_cnt.get_data(), raw_bv.get_data(), atol=1e-8) + + # check preload=False vs BrainVision file after dropping channels + raw_cnt.pick(raw_cnt.ch_names[::2]) + raw_bv.pick(raw_bv.ch_names[::2]) + assert_allclose(raw_cnt.get_data(), raw_bv.get_data(), atol=1e-8) + + # check after raw_cnt.load_data() + raw_cnt.load_data() + assert_allclose(raw_cnt.get_data(), raw_bv.get_data(), atol=1e-8) + + # check preload True vs False + raw_cnt = read_raw_ant(dataset["cnt"]["short"], preload=False) + raw_cnt2 = read_raw_ant(dataset["cnt"]["short"], preload=True) + bads = [raw_cnt.ch_names[idx] for idx in (1, 5, 10)] + assert_allclose( + raw_cnt.drop_channels(bads).get_data(), raw_cnt2.drop_channels(bads).get_data() + ) + raw_bv = read_raw_bv(dataset["bv"]["short"]).drop_channels(bads) + assert_allclose(raw_cnt.get_data(), raw_bv.get_data(), atol=1e-8) + assert_allclose(raw_cnt2.get_data(), raw_bv.get_data(), atol=1e-8) + + +@testing.requires_testing_data +@pytest.mark.parametrize("dataset", ["ca_208", "andy_101", "na_271"]) +def test_io_info(dataset, request): + """Test the ifo loaded from a .cnt file.""" + dataset = request.getfixturevalue(dataset) + raw_cnt = read_raw_ant(dataset["cnt"]["short"]) # preload=False + raw_bv = read_raw_bv(dataset["bv"]["short"]) + assert raw_cnt.ch_names == raw_bv.ch_names + assert raw_cnt.info["sfreq"] == raw_bv.info["sfreq"] + assert ( + raw_cnt.get_channel_types() + == ["eeg"] * dataset["n_eeg"] + ["misc"] * dataset["n_misc"] + ) + assert_allclose( + (raw_bv.info["meas_date"] - raw_cnt.info["meas_date"]).total_seconds(), + 0, + atol=1e-3, + ) + + +@testing.requires_testing_data +def test_io_info_parse_misc(ca_208: TypeDataset): + """Test parsing misc channels from a .cnt file.""" + raw_cnt = read_raw_ant(ca_208["cnt"]["short"]) + with pytest.warns( + RuntimeWarning, + match="All EEG channels are not referenced to the same electrode.", + ): + raw_cnt = read_raw_ant(ca_208["cnt"]["short"], misc=None) + assert len(raw_cnt.ch_names) == ca_208["n_eeg"] + ca_208["n_misc"] + assert raw_cnt.get_channel_types() == ["eeg"] * len(raw_cnt.ch_names) + + +def test_io_info_parse_non_standard_misc(na_271_bips: TypeDataset): + """Test parsing misc channels with modified names from a .cnt file.""" + with pytest.warns( + RuntimeWarning, match="EEG channels are not referenced to the same electrode" + ): + raw = read_raw_ant(na_271_bips["cnt"]["short"], misc=None) + assert raw.get_channel_types() == ["eeg"] * ( + na_271_bips["n_eeg"] + na_271_bips["n_misc"] + ) + raw = read_raw_ant( + na_271_bips["cnt"]["short"], preload=False, misc=r".{0,1}E.{1}G|Aux|Audio" + ) + assert ( + raw.get_channel_types() + == ["eeg"] * na_271_bips["n_eeg"] + ["misc"] * na_271_bips["n_misc"] + ) + + +@testing.requires_testing_data +def test_io_info_parse_eog(ca_208: TypeDataset): + """Test parsing EOG channels from a .cnt file.""" + raw_cnt = read_raw_ant(ca_208["cnt"]["short"], eog="EOG") + assert len(raw_cnt.ch_names) == ca_208["n_eeg"] + ca_208["n_misc"] + idx = raw_cnt.ch_names.index("EOG") + ch_types = ["eeg"] * ca_208["n_eeg"] + ["misc"] * ca_208["n_misc"] + ch_types[idx] = "eog" + assert raw_cnt.get_channel_types() == ch_types + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "dataset", ["andy_101", "ca_208", "na_271", "user_annotations"] +) +def test_subject_info(dataset, request): + """Test reading the subject info.""" + dataset = request.getfixturevalue(dataset) + raw_cnt = read_raw_ant(dataset["cnt"]["short"]) + subject_info = raw_cnt.info["subject_info"] + assert subject_info["his_id"] == dataset["patient_info"]["his_id"] + assert subject_info["first_name"] == dataset["patient_info"]["name"] + assert subject_info["sex"] == dataset["patient_info"]["sex"] + assert ( + subject_info["birthday"].strftime("%Y-%m-%d") + == dataset["patient_info"]["birthday"] + ) + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "dataset", ["andy_101", "ca_208", "na_271", "user_annotations"] +) +def test_machine_info(dataset, request): + """Test reading the machine info.""" + dataset = request.getfixturevalue(dataset) + raw_cnt = read_raw_ant(dataset["cnt"]["short"]) + device_info = raw_cnt.info["device_info"] + make, model, serial = dataset["machine_info"] + assert device_info["type"] == make + assert device_info["model"] == model + assert device_info["serial"] == serial + + +@testing.requires_testing_data +def test_io_amp_disconnection(ca_208: TypeDataset): + """Test loading of .cnt file with amplifier disconnection.""" + raw_cnt = read_raw_ant(ca_208["cnt"]["amp-dc"]) + raw_bv = read_raw_bv(ca_208["bv"]["amp-dc"]) + assert_allclose(raw_cnt.get_data(), raw_bv.get_data(), atol=1e-8) + assert ( + raw_cnt.get_data(reject_by_annotation="omit").shape != raw_bv.get_data().shape + ) + # create annotation on the BV file + idx = [ + k + for k, elt in enumerate(raw_bv.annotations.description) + if any(code in elt for code in ("9001", "9002")) + ] + assert len(idx) == 2 + start = raw_bv.annotations.onset[idx[0]] + stop = raw_bv.annotations.onset[idx[1]] + annotations = Annotations( + onset=start, + duration=stop - start + 1 / raw_bv.info["sfreq"], # estimate is 1 sample short + description="BAD_segment", + ) + raw_bv.set_annotations(annotations) + assert_allclose( + raw_cnt.get_data(reject_by_annotation="omit"), + raw_bv.get_data(reject_by_annotation="omit"), + atol=1e-8, + ) + + +@testing.requires_testing_data +@pytest.mark.parametrize("description", ["impedance", "test"]) +def test_io_impedance(ca_208: TypeDataset, description: str): + """Test loading of impedances from a .cnt file.""" + raw_cnt = read_raw_ant(ca_208["cnt"]["amp-dc"], impedance_annotation=description) + annotations = [ + annot for annot in raw_cnt.annotations if annot["description"] == description + ] + assert len(annotations) != 0 + + +@testing.requires_testing_data +def test_io_segments(ca_208: TypeDataset): + """Test reading a .cnt file with segents (start/stop).""" + raw_cnt = read_raw_ant(ca_208["cnt"]["start-stop"]) + raw_bv = read_raw_bv(ca_208["bv"]["start-stop"]) + assert_allclose(raw_cnt.get_data(), raw_bv.get_data(), atol=1e-8) + + +@testing.requires_testing_data +def test_annotations_and_preload(ca_208: TypeDataset): + """Test annotation loading with preload True/False.""" + raw_cnt_preloaded = read_raw_ant(ca_208["cnt"]["short"], preload=True) + assert len(raw_cnt_preloaded.annotations) == 2 # impedance measurements, start/end + raw_cnt = read_raw_ant(ca_208["cnt"]["short"], preload=False) + assert len(raw_cnt.annotations) == 2 + raw_cnt.crop(2, 3) + assert len(raw_cnt.annotations) == 0 + raw_cnt.load_data() + assert len(raw_cnt.annotations) == 0 + + raw_cnt_preloaded = read_raw_ant(ca_208["cnt"]["amp-dc"], preload=True) + assert len(raw_cnt_preloaded.annotations) == 5 # 4 impedances, 1 disconnection + raw_cnt = read_raw_ant(ca_208["cnt"]["amp-dc"], preload=False) + assert len(raw_cnt.annotations) == 5 + idx = np.where(raw_cnt.annotations.description == "BAD_disconnection")[0] + onset = raw_cnt.annotations.onset[idx][0] + raw_cnt.crop(0, onset - 1) + assert len(raw_cnt.annotations) == 1 # initial impedance measurement + assert raw_cnt.annotations.description[0] == "impedance" + + +@testing.requires_testing_data +def test_read_raw(ca_208: TypeDataset): + """Test loading through read_raw.""" + raw = read_raw(ca_208["cnt"]["short"]) + assert isinstance(raw, RawANT) + + +@testing.requires_testing_data +@pytest.mark.parametrize("preload", [True, False]) +def test_read_raw_with_user_annotations(user_annotations: TypeDataset, preload: bool): + """Test reading raw objects which have user annotations.""" + raw = read_raw_ant(user_annotations["cnt"]["short"], preload=preload) + assert raw.annotations + assert "1000/user-annot" in raw.annotations.description + assert "1000/user-annot-2" in raw.annotations.description + + +@testing.requires_testing_data +@pytest.mark.parametrize("dataset", ["na_271", "user_annotations"]) +def test_read_raw_legacy_format(dataset, request): + """Test reading the legacy CNT format.""" + dataset = request.getfixturevalue(dataset) + raw_cnt = read_raw_ant(dataset["cnt"]["short"]) # preload=False + raw_bv = read_raw_bv(dataset["bv"]["short"]) + assert raw_cnt.ch_names == raw_bv.ch_names + assert raw_cnt.info["sfreq"] == raw_bv.info["sfreq"] + assert ( + raw_cnt.get_channel_types() + == ["eeg"] * dataset["n_eeg"] + ["misc"] * dataset["n_misc"] + ) + assert_allclose( + (raw_bv.info["meas_date"] - raw_cnt.info["meas_date"]).total_seconds(), + 0, + atol=1e-3, + ) + + +@testing.requires_testing_data +def test_read_raw_custom_reference(ca_208_refs: TypeDataset): + """Test reading a CNT file with custom EEG references.""" + with pytest.warns( + RuntimeWarning, match="EEG channels are not referenced to the same electrode" + ): + raw = read_raw_ant(ca_208_refs["cnt"]["short"], preload=False) + for ch in raw.info["chs"]: + assert ch["coil_type"] == FIFF.FIFFV_COIL_EEG + bipolars = ("Fp1-Fz", "Fpz-Fz", "Fp2-Fz", "CP3-Cz", "CP4-Cz") + with pytest.warns( + RuntimeWarning, match="EEG channels are not referenced to the same electrode" + ): + raw = read_raw_ant( + ca_208_refs["cnt"]["short"], preload=False, bipolars=bipolars + ) + assert all(elt in raw.ch_names for elt in bipolars) + for ch in raw.info["chs"]: + if ch["ch_name"] in bipolars: + assert ch["coil_type"] == FIFF.FIFFV_COIL_EEG_BIPOLAR + else: + assert ch["coil_type"] == FIFF.FIFFV_COIL_EEG diff --git a/mne/io/array/__init__.py b/mne/io/array/__init__.py index 35778e47443..ad53f7c817f 100644 --- a/mne/io/array/__init__.py +++ b/mne/io/array/__init__.py @@ -1,5 +1,7 @@ """Module to convert user data to FIF.""" -# Author: Eric Larson +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from .array import RawArray +from ._array import RawArray diff --git a/mne/io/array/_array.py b/mne/io/array/_array.py new file mode 100644 index 00000000000..f1c987cb006 --- /dev/null +++ b/mne/io/array/_array.py @@ -0,0 +1,96 @@ +"""Tools for creating Raw objects from numpy arrays.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ...utils import _check_option, _validate_type, fill_doc, logger, verbose +from ..base import BaseRaw + + +@fill_doc +class RawArray(BaseRaw): + """Raw object from numpy array. + + Parameters + ---------- + data : array, shape (n_channels, n_times) + The channels' time series. See notes for proper units of measure. + %(info_not_none)s Consider using :func:`mne.create_info` to populate + this structure. This may be modified in place by the class. + first_samp : int + First sample offset used during recording (default 0). + + .. versionadded:: 0.12 + copy : {'data', 'info', 'both', 'auto', None} + Determines what gets copied on instantiation. "auto" (default) + will copy info, and copy "data" only if necessary to get to + double floating point precision. + + .. versionadded:: 0.18 + %(verbose)s + + See Also + -------- + mne.EpochsArray + mne.EvokedArray + mne.create_info + + Notes + ----- + Proper units of measure: + + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + """ + + @verbose + def __init__(self, data, info, first_samp=0, copy="auto", verbose=None): + _validate_type(info, "info", "info") + _check_option("copy", copy, ("data", "info", "both", "auto", None)) + dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 + orig_data = data + data = np.asanyarray(orig_data, dtype=dtype) + if data.ndim != 2: + raise ValueError( + "Data must be a 2D array of shape (n_channels, n_samples), got shape " + f"{data.shape}" + ) + if len(data) != len(info["ch_names"]): + raise ValueError( + 'len(data) ({}) does not match len(info["ch_names"]) ({})'.format( + len(data), len(info["ch_names"]) + ) + ) + assert len(info["ch_names"]) == info["nchan"] + if copy in ("auto", "info", "both"): + info = info.copy() + if copy in ("data", "both"): + if data is orig_data: + data = data.copy() + elif copy != "auto" and data is not orig_data: + raise ValueError( + f"data copying was not requested by copy={copy!r} but it was required " + "to get to double floating point precision" + ) + logger.info( + f"Creating RawArray with {dtype.__name__} data, " + f"n_channels={data.shape[0]}, n_times={data.shape[1]}" + ) + super().__init__( + info, data, first_samps=(int(first_samp),), dtype=dtype, verbose=verbose + ) + logger.info( + " Range : %d ... %d = %9.3f ... %9.3f secs", + self.first_samp, + self.last_samp, + float(self.first_samp) / info["sfreq"], + float(self.last_samp) / info["sfreq"], + ) + logger.info("Ready.") diff --git a/mne/io/array/array.py b/mne/io/array/array.py deleted file mode 100644 index b4cf25d9f65..00000000000 --- a/mne/io/array/array.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Tools for creating Raw objects from numpy arrays.""" - -# Authors: Eric Larson -# -# License: BSD-3-Clause - -import numpy as np - -from ..base import BaseRaw -from ...utils import verbose, logger, _validate_type, fill_doc, _check_option - - -@fill_doc -class RawArray(BaseRaw): - """Raw object from numpy array. - - Parameters - ---------- - data : array, shape (n_channels, n_times) - The channels' time series. See notes for proper units of measure. - %(info_not_none)s Consider using :func:`mne.create_info` to populate - this structure. This may be modified in place by the class. - first_samp : int - First sample offset used during recording (default 0). - - .. versionadded:: 0.12 - copy : {'data', 'info', 'both', 'auto', None} - Determines what gets copied on instantiation. "auto" (default) - will copy info, and copy "data" only if necessary to get to - double floating point precision. - - .. versionadded:: 0.18 - %(verbose)s - - See Also - -------- - mne.EpochsArray - mne.EvokedArray - mne.create_info - - Notes - ----- - Proper units of measure: - - * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog - * T: mag - * T/m: grad - * M: hbo, hbr - * Am: dipole - * AU: misc - """ - - @verbose - def __init__(self, data, info, first_samp=0, copy='auto', - verbose=None): # noqa: D102 - _validate_type(info, 'info', 'info') - _check_option('copy', copy, ('data', 'info', 'both', 'auto', None)) - dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 - orig_data = data - data = np.asanyarray(orig_data, dtype=dtype) - if data.ndim != 2: - raise ValueError('Data must be a 2D array of shape (n_channels, ' - 'n_samples), got shape %s' % (data.shape,)) - if len(data) != len(info['ch_names']): - raise ValueError('len(data) (%s) does not match ' - 'len(info["ch_names"]) (%s)' - % (len(data), len(info['ch_names']))) - assert len(info['ch_names']) == info['nchan'] - if copy in ('auto', 'info', 'both'): - info = info.copy() - if copy in ('data', 'both'): - if data is orig_data: - data = data.copy() - elif copy != 'auto' and data is not orig_data: - raise ValueError('data copying was not requested by copy=%r but ' - 'it was required to get to double floating point ' - 'precision' % (copy,)) - logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s' - % (dtype.__name__, data.shape[0], data.shape[1])) - super(RawArray, self).__init__(info, data, - first_samps=(int(first_samp),), - dtype=dtype, verbose=verbose) - logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( - self.first_samp, self.last_samp, - float(self.first_samp) / info['sfreq'], - float(self.last_samp) / info['sfreq'])) - logger.info('Ready.') diff --git a/mne/io/array/tests/__init__.py b/mne/io/array/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/array/tests/__init__.py +++ b/mne/io/array/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py index 4ab3587b8f6..edac2923c2d 100644 --- a/mne/io/array/tests/test_array.py +++ b/mne/io/array/tests/test_array.py @@ -1,46 +1,46 @@ -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path +import matplotlib.pyplot as plt import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_allclose, - assert_equal) import pytest -import matplotlib.pyplot as plt +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_equal -from mne import find_events, Epochs, pick_types +from mne import Epochs, find_events, pick_types +from mne._fiff.meas_info import create_info +from mne._fiff.pick import get_channel_type_constants +from mne.channels import make_dig_montage from mne.io import read_raw_fif from mne.io.array import RawArray from mne.io.tests.test_raw import _test_raw_reader -from mne.io.meas_info import create_info -from mne.io.pick import get_channel_type_constants -from mne.channels import make_dig_montage -base_dir = Path(__file__).parent.parent.parent / "tests" / "data" +base_dir = Path(__file__).parents[2] / "tests" / "data" fif_fname = base_dir / "test_raw.fif" def test_long_names(): """Test long name support.""" - info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error') + info = create_info(["a" * 15 + "b", "a" * 16], 1000.0, verbose="error") data = np.zeros((2, 1000)) raw = RawArray(data, info) - assert raw.ch_names == ['a' * 15 + 'b', 'a' * 16] + assert raw.ch_names == ["a" * 15 + "b", "a" * 16] # and a way to get the old behavior - raw.rename_channels({k: k[:13] for k in raw.ch_names}, - allow_duplicates=True, verbose='error') - assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1'] - info = create_info(['a' * 16] * 11, 1000., verbose='error') + raw.rename_channels( + {k: k[:13] for k in raw.ch_names}, allow_duplicates=True, verbose="error" + ) + assert raw.ch_names == ["a" * 13 + "-0", "a" * 13 + "-1"] + info = create_info(["a" * 16] * 11, 1000.0, verbose="error") data = np.zeros((11, 1000)) raw = RawArray(data, info) - assert raw.ch_names == ['a' * 16 + '-%s' % ii for ii in range(11)] + assert raw.ch_names == ["a" * 16 + f"-{ii}" for ii in range(11)] def test_array_copy(): """Test copying during construction.""" - info = create_info(1, 1000.) + info = create_info(1, 1000.0) data = np.zeros((1, 1000)) # 'auto' (default) raw = RawArray(data, info) @@ -50,27 +50,27 @@ def test_array_copy(): assert raw._data is not data assert raw.info is not info # 'info' (more restrictive) - raw = RawArray(data, info, copy='info') + raw = RawArray(data, info, copy="info") assert raw._data is data assert raw.info is not info with pytest.raises(ValueError, match="data copying was not .* copy='info"): - RawArray(data.astype(np.float32), info, copy='info') + RawArray(data.astype(np.float32), info, copy="info") # 'data' - raw = RawArray(data, info, copy='data') + raw = RawArray(data, info, copy="data") assert raw._data is not data assert raw.info is info # 'both' - raw = RawArray(data, info, copy='both') + raw = RawArray(data, info, copy="both") assert raw._data is not data assert raw.info is not info - raw = RawArray(data.astype(np.float32), info, copy='both') + raw = RawArray(data.astype(np.float32), info, copy="both") assert raw._data is not data assert raw.info is not info # None raw = RawArray(data, info, copy=None) assert raw._data is data assert raw.info is info - with pytest.raises(ValueError, match='data copying was not .* copy=None'): + with pytest.raises(ValueError, match="data copying was not .* copy=None"): RawArray(data.astype(np.float32), info, copy=None) @@ -80,19 +80,24 @@ def test_array_raw(): # creating raw = read_raw_fif(fif_fname).crop(2, 5) data, times = raw[:, :] - sfreq = raw.info['sfreq'] - ch_names = [(ch[4:] if 'STI' not in ch else ch) - for ch in raw.info['ch_names']] # change them, why not + sfreq = raw.info["sfreq"] + ch_names = [ + (ch[4:] if "STI" not in ch else ch) for ch in raw.info["ch_names"] + ] # change them, why not types = list() for ci in range(101): - types.extend(('grad', 'grad', 'mag')) - types.extend(['ecog', 'seeg', 'hbo']) # really 4 meg channels - types.extend(['stim'] * 9) - types.extend(['dbs']) # really eeg channel - types.extend(['eeg'] * 60) - picks = np.concatenate([pick_types(raw.info, meg=True)[::20], - pick_types(raw.info, meg=False, stim=True), - pick_types(raw.info, meg=False, eeg=True)[::20]]) + types.extend(("grad", "grad", "mag")) + types.extend(["ecog", "seeg", "hbo"]) # really 4 meg channels + types.extend(["stim"] * 9) + types.extend(["dbs"]) # really eeg channel + types.extend(["eeg"] * 60) + picks = np.concatenate( + [ + pick_types(raw.info, meg=True)[::20], + pick_types(raw.info, meg=False, stim=True), + pick_types(raw.info, meg=False, eeg=True)[::20], + ] + ) del raw data = data[picks] ch_names = np.array(ch_names)[picks].tolist() @@ -101,37 +106,39 @@ def test_array_raw(): # wrong length pytest.raises(ValueError, create_info, ch_names, sfreq, types) # bad entry - types.append('foo') + types.append("foo") pytest.raises(KeyError, create_info, ch_names, sfreq, types) - types[-1] = 'eog' + types[-1] = "eog" # default type info = create_info(ch_names, sfreq) - assert_equal(info['chs'][0]['kind'], - get_channel_type_constants()['misc']['kind']) + assert_equal(info["chs"][0]["kind"], get_channel_type_constants()["misc"]["kind"]) # use real types info = create_info(ch_names, sfreq, types) - raw2 = _test_raw_reader(RawArray, test_preloading=False, - data=data, info=info, first_samp=2 * data.shape[1]) + raw2 = _test_raw_reader( + RawArray, + test_preloading=False, + data=data, + info=info, + first_samp=2 * data.shape[1], + ) data2, times2 = raw2[:, :] assert_allclose(data, data2) assert_allclose(times, times2) - assert ('RawArray' in repr(raw2)) + assert "RawArray" in repr(raw2) pytest.raises(TypeError, RawArray, info, data) # filtering - picks = pick_types(raw2.info, meg=True, misc=True, exclude='bads')[:4] + picks = pick_types(raw2.info, meg=True, misc=True, exclude="bads")[:4] assert_equal(len(picks), 4) raw_lp = raw2.copy() - kwargs = dict(fir_design='firwin', picks=picks) - raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs) + kwargs = dict(fir_design="firwin", picks=picks) + raw_lp.filter(None, 4.0, h_trans_bandwidth=4.0, **kwargs) raw_hp = raw2.copy() - raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs) + raw_hp.filter(16.0, None, l_trans_bandwidth=4.0, **kwargs) raw_bp = raw2.copy() - raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4., - **kwargs) + raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4.0, h_trans_bandwidth=4.0, **kwargs) raw_bs = raw2.copy() - raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4., - **kwargs) + raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4.0, h_trans_bandwidth=4.0, **kwargs) data, _ = raw2[picks, :] lp_data, _ = raw_lp[picks, :] hp_data, _ = raw_hp[picks, :] @@ -143,12 +150,13 @@ def test_array_raw(): # plotting raw2.plot() - (raw2.compute_psd(tmax=2., n_fft=1024) - .plot(average=True, spatial_colors=False)) - plt.close('all') + raw2.compute_psd(tmax=2.0, n_fft=1024).plot( + average=True, amplitude=False, spatial_colors=False + ) + plt.close("all") # epoching - events = find_events(raw2, stim_channel='STI 014') + events = find_events(raw2, stim_channel="STI 014") events[:, 2] = 1 assert len(events) > 2 epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) @@ -158,25 +166,24 @@ def test_array_raw(): # complex data rng = np.random.RandomState(0) data = rng.randn(1, 100) + 1j * rng.randn(1, 100) - raw = RawArray(data, create_info(1, 1000., 'eeg')) + raw = RawArray(data, create_info(1, 1000.0, "eeg")) assert_allclose(raw._data, data) # Using digital montage to give MNI electrode coordinates n_elec = 10 ts_size = 10000 - Fs = 512. + Fs = 512.0 ch_names = [str(i) for i in range(n_elec)] ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist() data = np.random.rand(n_elec, ts_size) montage = make_dig_montage( - ch_pos=dict(zip(ch_names, ch_pos_loc)), - coord_frame='head' + ch_pos=dict(zip(ch_names, ch_pos_loc)), coord_frame="head" ) - info = create_info(ch_names, Fs, 'ecog') + info = create_info(ch_names, Fs, "ecog") raw = RawArray(data, info) raw.set_montage(montage) spectrum = raw.compute_psd() - spectrum.plot(average=False) # looking for nonexistent layout + spectrum.plot(average=False, amplitude=False) # looking for nonexistent layout spectrum.plot_topo() diff --git a/mne/io/artemis123/__init__.py b/mne/io/artemis123/__init__.py index d9becf44eb1..7a51a739098 100644 --- a/mne/io/artemis123/__init__.py +++ b/mne/io/artemis123/__init__.py @@ -1,7 +1,7 @@ """artemis123 module for conversion to FIF.""" -# Author: Luke Bloy -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .artemis123 import read_raw_artemis123 diff --git a/mne/io/artemis123/artemis123.py b/mne/io/artemis123/artemis123.py index 2a84588772e..56534c57ca0 100644 --- a/mne/io/artemis123/artemis123.py +++ b/mne/io/artemis123/artemis123.py @@ -1,25 +1,28 @@ -# Author: Luke Bloy -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np -import os.path as op -import datetime import calendar +import datetime +import os.path as op -from .utils import _load_mne_locs, _read_pos -from ...utils import logger, warn, verbose, _check_fname -from ..utils import _read_segments_file +import numpy as np +from scipy.spatial.distance import cdist + +from ..._fiff._digitization import DigPoint, _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _read_segments_file +from ...transforms import Transform, apply_trans, get_ras_to_neuromag_trans +from ...utils import _check_fname, logger, verbose, warn from ..base import BaseRaw -from ..meas_info import _empty_info -from .._digitization import _make_dig_points, DigPoint -from ..constants import FIFF -from ...transforms import get_ras_to_neuromag_trans, apply_trans, Transform +from .utils import _load_mne_locs, _read_pos @verbose -def read_raw_artemis123(input_fname, preload=False, verbose=None, - pos_fname=None, add_head_trans=True): +def read_raw_artemis123( + input_fname, preload=False, verbose=None, pos_fname=None, add_head_trans=True +) -> "RawArtemis123": """Read Artemis123 data as raw object. Parameters @@ -47,27 +50,39 @@ def read_raw_artemis123(input_fname, preload=False, verbose=None, -------- mne.io.Raw : Documentation of attributes and methods. """ - return RawArtemis123(input_fname, preload=preload, verbose=verbose, - pos_fname=pos_fname, add_head_trans=add_head_trans) + return RawArtemis123( + input_fname, + preload=preload, + verbose=verbose, + pos_fname=pos_fname, + add_head_trans=add_head_trans, + ) def _get_artemis123_info(fname, pos_fname=None): """Generate info struct from artemis123 header file.""" fname = op.splitext(fname)[0] - header = fname + '.txt' + header = fname + ".txt" - logger.info('Reading header...') + logger.info("Reading header...") # key names for artemis channel info... - chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass', - 'FLL_AutoReset', 'FLL_ResetLock'] + chan_keys = [ + "name", + "scaling", + "FLL_Gain", + "FLL_Mode", + "FLL_HighPass", + "FLL_AutoReset", + "FLL_ResetLock", + ] header_info = dict() - header_info['filter_hist'] = [] - header_info['comments'] = '' - header_info['channels'] = [] + header_info["filter_hist"] = [] + header_info["comments"] = "" + header_info["channels"] = [] - with open(header, 'r') as fid: + with open(header) as fid: # section flag # 0 - None # 1 - main header @@ -78,12 +93,11 @@ def _get_artemis123_info(fname, pos_fname=None): sectionFlag = 0 for line in fid: # skip emptylines or header line for channel info - if ((not line.strip()) or - (sectionFlag == 2 and line.startswith('DAQ Map'))): + if (not line.strip()) or (sectionFlag == 2 and line.startswith("DAQ Map")): continue # set sectionFlag - if line.startswith('"): sectionFlag = 1 @@ -99,149 +113,158 @@ def _get_artemis123_info(fname, pos_fname=None): # parse header info lines # part of main header - lines are name value pairs if sectionFlag == 1: - values = line.strip().split('\t') + values = line.strip().split("\t") if len(values) == 1: - values.append('') + values.append("") header_info[values[0]] = values[1] # part of channel header - lines are Channel Info elif sectionFlag == 2: - values = line.strip().split('\t') + values = line.strip().split("\t") if len(values) != 7: - raise IOError('Error parsing line \n\t:%s\n' % line + - 'from file %s' % header) + raise OSError( + f"Error parsing line \n\t:{line}\nfrom file {header}" + ) tmp = dict() for k, v in zip(chan_keys, values): tmp[k] = v - header_info['channels'].append(tmp) + header_info["channels"].append(tmp) elif sectionFlag == 3: - header_info['comments'] = '%s%s' \ - % (header_info['comments'], line.strip()) + header_info["comments"] = f"{header_info['comments']}{line.strip()}" elif sectionFlag == 4: - header_info['num_samples'] = int(line.strip()) + header_info["num_samples"] = int(line.strip()) elif sectionFlag == 5: - header_info['filter_hist'].append(line.strip()) - - for k in ['Temporal Filter Active?', 'Decimation Active?', - 'Spatial Filter Active?']: - if header_info[k] != 'FALSE': - warn('%s - set to but is not supported' % k) - if header_info['filter_hist']: - warn('Non-Empty Filter history found, BUT is not supported' % k) + header_info["filter_hist"].append(line.strip()) + + for k in [ + "Temporal Filter Active?", + "Decimation Active?", + "Spatial Filter Active?", + ]: + if header_info[k] != "FALSE": + warn(f"{k} - set to but is not supported") + if header_info["filter_hist"]: + warn("Non-Empty Filter history found, BUT is not supported") # build mne info struct - info = _empty_info(float(header_info['DAQ Sample Rate'])) + info = _empty_info(float(header_info["DAQ Sample Rate"])) # Attempt to get time/date from fname # Artemis123 files saved from the scanner observe the following # naming convention 'Artemis_Data_YYYY-MM-DD-HHh-MMm_[chosen by user].bin' try: date = datetime.datetime.strptime( - op.basename(fname).split('_')[2], '%Y-%m-%d-%Hh-%Mm') + op.basename(fname).split("_")[2], "%Y-%m-%d-%Hh-%Mm" + ) meas_date = (calendar.timegm(date.utctimetuple()), 0) except Exception: meas_date = None # build subject info must be an integer (as per FIFF) try: - subject_info = {'id': int(header_info['Subject ID'])} + subject_info = {"id": int(header_info["Subject ID"])} except ValueError: - subject_info = {'id': 0} + subject_info = {"id": 0} # build description - desc = '' - for k in ['Purpose', 'Notes']: - desc += '{} : {}\n'.format(k, header_info[k]) - desc += 'Comments : {}'.format(header_info['comments']) - - info.update({'meas_date': meas_date, - 'description': desc, - 'subject_info': subject_info, - 'proj_name': header_info['Project Name']}) + desc = "" + for k in ["Purpose", "Notes"]: + desc += f"{k} : {header_info[k]}\n" + desc += f"Comments : {header_info['comments']}" + + info.update( + { + "meas_date": meas_date, + "description": desc, + "subject_info": subject_info, + "proj_name": header_info["Project Name"], + } + ) # Channel Names by type - ref_mag_names = ['REF_001', 'REF_002', 'REF_003', - 'REF_004', 'REF_005', 'REF_006'] + ref_mag_names = ["REF_001", "REF_002", "REF_003", "REF_004", "REF_005", "REF_006"] - ref_grad_names = ['REF_007', 'REF_008', 'REF_009', - 'REF_010', 'REF_011', 'REF_012'] + ref_grad_names = ["REF_007", "REF_008", "REF_009", "REF_010", "REF_011", "REF_012"] # load mne loc dictionary loc_dict = _load_mne_locs() - info['chs'] = [] - info['bads'] = [] + info["chs"] = [] + bads = [] - for i, chan in enumerate(header_info['channels']): + for i, chan in enumerate(header_info["channels"]): # build chs struct - t = {'cal': float(chan['scaling']), 'ch_name': chan['name'], - 'logno': i + 1, 'scanno': i + 1, 'range': 1.0, - 'unit_mul': FIFF.FIFF_UNITM_NONE, - 'coord_frame': FIFF.FIFFV_COORD_DEVICE} + t = { + "cal": float(chan["scaling"]), + "ch_name": chan["name"], + "logno": i + 1, + "scanno": i + 1, + "range": 1.0, + "unit_mul": FIFF.FIFF_UNITM_NONE, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } # REF_018 has a zero cal which can cause problems. Let's set it to # a value of another ref channel to make writers/readers happy. - if t['cal'] == 0: - t['cal'] = 4.716e-10 - info['bads'].append(t['ch_name']) - t['loc'] = loc_dict.get(chan['name'], np.zeros(12)) - - if (chan['name'].startswith('MEG')): - t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD - t['kind'] = FIFF.FIFFV_MEG_CH + if t["cal"] == 0: + t["cal"] = 4.716e-10 + bads.append(t["ch_name"]) + t["loc"] = loc_dict.get(chan["name"], np.zeros(12)) + + if chan["name"].startswith("MEG"): + t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD + t["kind"] = FIFF.FIFFV_MEG_CH # While gradiometer units are T/m, the meg sensors referred to as # gradiometers report the field difference between 2 pick-up coils. # Therefore the units of the measurements should be T # *AND* the baseline (difference between pickup coils) # should not be used in leadfield / forwardfield computations. - t['unit'] = FIFF.FIFF_UNIT_T - t['unit_mul'] = FIFF.FIFF_UNITM_F + t["unit"] = FIFF.FIFF_UNIT_T + t["unit_mul"] = FIFF.FIFF_UNITM_F # 3 axis reference magnetometers - elif (chan['name'] in ref_mag_names): - t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG - t['kind'] = FIFF.FIFFV_REF_MEG_CH - t['unit'] = FIFF.FIFF_UNIT_T - t['unit_mul'] = FIFF.FIFF_UNITM_F + elif chan["name"] in ref_mag_names: + t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG + t["kind"] = FIFF.FIFFV_REF_MEG_CH + t["unit"] = FIFF.FIFF_UNIT_T + t["unit_mul"] = FIFF.FIFF_UNITM_F # reference gradiometers - elif (chan['name'] in ref_grad_names): - t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD - t['kind'] = FIFF.FIFFV_REF_MEG_CH + elif chan["name"] in ref_grad_names: + t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD + t["kind"] = FIFF.FIFFV_REF_MEG_CH # While gradiometer units are T/m, the meg sensors referred to as # gradiometers report the field difference between 2 pick-up coils. # Therefore the units of the measurements should be T # *AND* the baseline (difference between pickup coils) # should not be used in leadfield / forwardfield computations. - t['unit'] = FIFF.FIFF_UNIT_T - t['unit_mul'] = FIFF.FIFF_UNITM_F + t["unit"] = FIFF.FIFF_UNIT_T + t["unit_mul"] = FIFF.FIFF_UNITM_F # other reference channels are unplugged and should be ignored. - elif (chan['name'].startswith('REF')): - t['coil_type'] = FIFF.FIFFV_COIL_NONE - t['kind'] = FIFF.FIFFV_MISC_CH - t['unit'] = FIFF.FIFF_UNIT_V - info['bads'].append(t['ch_name']) - - elif (chan['name'].startswith(('AUX', 'TRG', 'MIO'))): - t['coil_type'] = FIFF.FIFFV_COIL_NONE - t['unit'] = FIFF.FIFF_UNIT_V - if (chan['name'].startswith('TRG')): - t['kind'] = FIFF.FIFFV_STIM_CH + elif chan["name"].startswith("REF"): + t["coil_type"] = FIFF.FIFFV_COIL_NONE + t["kind"] = FIFF.FIFFV_MISC_CH + t["unit"] = FIFF.FIFF_UNIT_V + bads.append(t["ch_name"]) + + elif chan["name"].startswith(("AUX", "TRG", "MIO")): + t["coil_type"] = FIFF.FIFFV_COIL_NONE + t["unit"] = FIFF.FIFF_UNIT_V + if chan["name"].startswith("TRG"): + t["kind"] = FIFF.FIFFV_STIM_CH else: - t['kind'] = FIFF.FIFFV_MISC_CH + t["kind"] = FIFF.FIFFV_MISC_CH else: - raise ValueError('Channel does not match expected' + - ' channel Types:"%s"' % chan['name']) + raise ValueError( + f'Channel does not match expected channel Types:"{chan["name"]}"' + ) # incorporate multiplier (unit_mul) into calibration - t['cal'] *= 10 ** t['unit_mul'] - t['unit_mul'] = FIFF.FIFF_UNITM_NONE + t["cal"] *= 10 ** t["unit_mul"] + t["unit_mul"] = FIFF.FIFF_UNITM_NONE # append this channel to the info - info['chs'].append(t) - if chan['FLL_ResetLock'] == 'TRUE': - info['bads'].append(t['ch_name']) - - # reduce info['bads'] to unique set - info['bads'] = list(set(info['bads'])) + info["chs"].append(t) + if chan["FLL_ResetLock"] == "TRUE": + bads.append(t["ch_name"]) # HPI information # print header_info.keys() @@ -249,41 +272,43 @@ def _get_artemis123_info(fname, pos_fname=None): # Don't know what event_channel is don't think we have it HPIs are either # always on or always off. # hpi_sub['event_channel'] = ??? - hpi_sub['hpi_coils'] = [dict(), dict(), dict(), dict()] + hpi_sub["hpi_coils"] = [dict(), dict(), dict(), dict()] hpi_coils = [dict(), dict(), dict(), dict()] - drive_channels = ['MIO_001', 'MIO_003', 'MIO_009', 'MIO_011'] - key_base = 'Head Tracking %s %d' + drive_channels = ["MIO_001", "MIO_003", "MIO_009", "MIO_011"] + key_base = "Head Tracking %s %d" # set default HPI frequencies - if info['sfreq'] == 1000: + if info["sfreq"] == 1000: default_freqs = [140, 150, 160, 40] else: default_freqs = [700, 750, 800, 40] for i in range(4): # build coil structure - hpi_coils[i]['number'] = i + 1 - hpi_coils[i]['drive_chan'] = drive_channels[i] - this_freq = header_info.pop(key_base % ('Frequency', i + 1), - default_freqs[i]) - hpi_coils[i]['coil_freq'] = this_freq + hpi_coils[i]["number"] = i + 1 + hpi_coils[i]["drive_chan"] = drive_channels[i] + this_freq = header_info.pop(key_base % ("Frequency", i + 1), default_freqs[i]) + hpi_coils[i]["coil_freq"] = this_freq # check if coil is on - if header_info[key_base % ('Channel', i + 1)] == 'OFF': - hpi_sub['hpi_coils'][i]['event_bits'] = [0] + if header_info[key_base % ("Channel", i + 1)] == "OFF": + hpi_sub["hpi_coils"][i]["event_bits"] = [0] else: - hpi_sub['hpi_coils'][i]['event_bits'] = [256] + hpi_sub["hpi_coils"][i]["event_bits"] = [256] - info['hpi_subsystem'] = hpi_sub - info['hpi_meas'] = [{'hpi_coils': hpi_coils}] + info["hpi_subsystem"] = hpi_sub + info["hpi_meas"] = [{"hpi_coils": hpi_coils}] # read in digitized points if supplied if pos_fname is not None: - info['dig'] = _read_pos(pos_fname) + info["dig"] = _read_pos(pos_fname) else: - info['dig'] = [] + info["dig"] = [] info._unlocked = False info._update_redundant() + # reduce info['bads'] to unique set + info["bads"] = list(set(bads)) + del bads return info, header_info @@ -303,133 +328,163 @@ class RawArtemis123(BaseRaw): """ @verbose - def __init__(self, input_fname, preload=False, verbose=None, - pos_fname=None, add_head_trans=True): # noqa: D102 - from scipy.spatial.distance import cdist - from ...chpi import (compute_chpi_amplitudes, compute_chpi_locs, - _fit_coil_order_dev_head_trans) - input_fname = str( - _check_fname(input_fname, "read", True, "input_fname") + def __init__( + self, + input_fname, + preload=False, + verbose=None, + pos_fname=None, + add_head_trans=True, + ): + from ...chpi import ( + _fit_coil_order_dev_head_trans, + compute_chpi_amplitudes, + compute_chpi_locs, ) + + input_fname = str(_check_fname(input_fname, "read", True, "input_fname")) fname, ext = op.splitext(input_fname) - if ext == '.txt': - input_fname = fname + '.bin' - elif ext != '.bin': - raise RuntimeError('Valid artemis123 files must end in "txt"' + - ' or ".bin".') + if ext == ".txt": + input_fname = fname + ".bin" + elif ext != ".bin": + raise RuntimeError( + 'Valid artemis123 files must end in "txt"' + ' or ".bin".' + ) if not op.exists(input_fname): - raise RuntimeError('%s - Not Found' % input_fname) + raise RuntimeError(f"{input_fname} - Not Found") - info, header_info = _get_artemis123_info(input_fname, - pos_fname=pos_fname) + info, header_info = _get_artemis123_info(input_fname, pos_fname=pos_fname) - last_samps = [header_info.get('num_samples', 1) - 1] + last_samps = [header_info.get("num_samples", 1) - 1] - super(RawArtemis123, self).__init__( - info, preload, filenames=[input_fname], raw_extras=[header_info], - last_samps=last_samps, orig_format="single", - verbose=verbose) + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[header_info], + last_samps=last_samps, + orig_format="single", + verbose=verbose, + ) if add_head_trans: n_hpis = 0 - for d in info['hpi_subsystem']['hpi_coils']: - if d['event_bits'] == [256]: + for d in info["hpi_subsystem"]["hpi_coils"]: + if d["event_bits"] == [256]: n_hpis += 1 if n_hpis < 3: - warn('%d HPIs active. At least 3 needed to perform' % n_hpis + - 'head localization\n *NO* head localization performed') + warn( + f"{n_hpis:d} HPIs active. At least 3 needed to perform" + "head localization\n *NO* head localization performed" + ) else: # Localized HPIs using the 1st 250 milliseconds of data. with info._unlock(): - info['hpi_results'] = [dict( - dig_points=[dict( - r=np.zeros(3), - coord_frame=FIFF.FIFFV_COORD_DEVICE, - ident=ii + 1) for ii in range(n_hpis)], - coord_trans=Transform('meg', 'head'))] + info["hpi_results"] = [ + dict( + dig_points=[ + dict( + r=np.zeros(3), + coord_frame=FIFF.FIFFV_COORD_DEVICE, + ident=ii + 1, + ) + for ii in range(n_hpis) + ], + coord_trans=Transform("meg", "head"), + ) + ] coil_amplitudes = compute_chpi_amplitudes( - self, tmin=0, tmax=0.25, t_window=0.25, t_step_min=0.25) - assert len(coil_amplitudes['times']) == 1 + self, tmin=0, tmax=0.25, t_window=0.25, t_step_min=0.25 + ) + assert len(coil_amplitudes["times"]) == 1 + # Need an ititial dev_head_t to compute coil locations + self.info["dev_head_t"] = Transform("meg", "head") coil_locs = compute_chpi_locs(self.info, coil_amplitudes) with info._unlock(): - info['hpi_results'] = None - hpi_g = coil_locs['gofs'][0] - hpi_dev = coil_locs['rrs'][0] + info["hpi_results"] = None + hpi_g = coil_locs["gofs"][0] + hpi_dev = coil_locs["rrs"][0] # only use HPI coils with localizaton goodness_of_fit > 0.98 bad_idx = [] for i, g in enumerate(hpi_g): - msg = 'HPI coil %d - location goodness of fit (%0.3f)' + msg = f"HPI coil {i + 1} - location goodness of fit ({g:0.3f})" if g < 0.98: bad_idx.append(i) - msg += ' *Removed from coregistration*' - logger.info(msg % (i + 1, g)) + msg += " *Removed from coregistration*" + logger.info(msg) hpi_dev = np.delete(hpi_dev, bad_idx, axis=0) hpi_g = np.delete(hpi_g, bad_idx, axis=0) if pos_fname is not None: # Digitized HPI points are needed. - hpi_head = np.array([d['r'] - for d in self.info.get('dig', []) - if d['kind'] == FIFF.FIFFV_POINT_HPI]) - - if (len(hpi_head) != len(hpi_dev)): - mesg = ("number of digitized (%d) and " + - "active (%d) HPI coils are " + - "not the same.") - raise RuntimeError(mesg % (len(hpi_head), - len(hpi_dev))) + hpi_head = np.array( + [ + d["r"] + for d in self.info.get("dig", []) + if d["kind"] == FIFF.FIFFV_POINT_HPI + ] + ) + + if len(hpi_head) != len(hpi_dev): + raise RuntimeError( + f"number of digitized ({len(hpi_head)}) and active " + f"({len(hpi_dev)}) HPI coils are not the same." + ) # compute initial head to dev transform and hpi ordering - head_to_dev_t, order, trans_g = \ - _fit_coil_order_dev_head_trans(hpi_dev, hpi_head) + head_to_dev_t, order, trans_g = _fit_coil_order_dev_head_trans( + hpi_dev, hpi_head + ) # set the device to head transform - self.info['dev_head_t'] = \ - Transform(FIFF.FIFFV_COORD_DEVICE, - FIFF.FIFFV_COORD_HEAD, head_to_dev_t) + self.info["dev_head_t"] = Transform( + FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, head_to_dev_t + ) # add hpi_meg_dev to dig... for idx, point in enumerate(hpi_dev): - d = {'r': point, 'ident': idx + 1, - 'kind': FIFF.FIFFV_POINT_HPI, - 'coord_frame': FIFF.FIFFV_COORD_DEVICE} - self.info['dig'].append(DigPoint(d)) + d = { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } + self.info["dig"].append(DigPoint(d)) dig_dists = cdist(hpi_head[order], hpi_head[order]) dev_dists = cdist(hpi_dev, hpi_dev) tmp_dists = np.abs(dig_dists - dev_dists) dist_limit = tmp_dists.max() * 1.1 - msg = 'HPI-Dig corrregsitration\n' - msg += '\tGOF : %0.3f\n' % trans_g - msg += '\tMax Coil Error : %0.3f cm\n' % (100 * - tmp_dists.max()) - logger.info(msg) + logger.info( + "HPI-Dig corrregsitration\n" + f"\tGOF : {trans_g:0.3f}\n" + f"\tMax Coil Error : {100 * tmp_dists.max():0.3f} cm\n" + ) else: - logger.info('Assuming Cardinal HPIs') + logger.info("Assuming Cardinal HPIs") nas = hpi_dev[0] lpa = hpi_dev[2] rpa = hpi_dev[1] t = get_ras_to_neuromag_trans(nas, lpa, rpa) with self.info._unlock(): - self.info['dev_head_t'] = \ - Transform(FIFF.FIFFV_COORD_DEVICE, - FIFF.FIFFV_COORD_HEAD, t) + self.info["dev_head_t"] = Transform( + FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, t + ) # transform fiducial points nas = apply_trans(t, nas) lpa = apply_trans(t, lpa) rpa = apply_trans(t, rpa) - hpi = apply_trans(self.info['dev_head_t'], hpi_dev) + hpi = apply_trans(self.info["dev_head_t"], hpi_dev) with self.info._unlock(): - self.info['dig'] = _make_dig_points(nasion=nas, - lpa=lpa, - rpa=rpa, - hpi=hpi) + self.info["dig"] = _make_dig_points( + nasion=nas, lpa=lpa, rpa=rpa, hpi=hpi + ) order = np.array([0, 1, 2]) dist_limit = 0.005 @@ -439,33 +494,39 @@ def __init__(self, input_fname, preload=False, verbose=None, # add HPI points in device coords... dig = [] for idx, point in enumerate(hpi_dev): - dig.append({'r': point, 'ident': idx + 1, - 'kind': FIFF.FIFFV_POINT_HPI, - 'coord_frame': FIFF.FIFFV_COORD_DEVICE}) - hpi_result['dig_points'] = dig + dig.append( + { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } + ) + hpi_result["dig_points"] = dig # attach Transform - hpi_result['coord_trans'] = self.info['dev_head_t'] + hpi_result["coord_trans"] = self.info["dev_head_t"] # 1 based indexing - hpi_result['order'] = order + 1 - hpi_result['used'] = np.arange(3) + 1 - hpi_result['dist_limit'] = dist_limit - hpi_result['good_limit'] = 0.98 + hpi_result["order"] = order + 1 + hpi_result["used"] = np.arange(3) + 1 + hpi_result["dist_limit"] = dist_limit + hpi_result["good_limit"] = 0.98 # Warn for large discrepancies between digitized and fit # cHPI locations - if hpi_result['dist_limit'] > 0.005: - warn('Large difference between digitized geometry' + - ' and HPI geometry. Max coil to coil difference' + - ' is %0.2f cm\n' % (100. * tmp_dists.max()) + - 'beware of *POOR* head localization') + if hpi_result["dist_limit"] > 0.005: + warn( + "Large difference between digitized geometry" + " and HPI geometry. Max coil to coil difference" + f" is {100.0 * tmp_dists.max():0.2f} cm\n" + "beware of *POOR* head localization" + ) # store it with self.info._unlock(): - self.info['hpi_results'] = [hpi_result] + self.info["hpi_results"] = [hpi_result] def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" - _read_segments_file( - self, data, idx, fi, start, stop, cals, mult, dtype='>f4') + _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype=">f4") diff --git a/mne/io/artemis123/tests/__init__.py b/mne/io/artemis123/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/artemis123/tests/__init__.py +++ b/mne/io/artemis123/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/artemis123/tests/test_artemis123.py b/mne/io/artemis123/tests/test_artemis123.py index aa38fac5b53..b39f35febb9 100644 --- a/mne/io/artemis123/tests/test_artemis123.py +++ b/mne/io/artemis123/tests/test_artemis123.py @@ -1,35 +1,32 @@ - -# Author: Luke Bloy -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from numpy.testing import assert_allclose, assert_equal import pytest +from numpy.testing import assert_allclose, assert_equal -from mne.io import read_raw_artemis123 -from mne.io.tests.test_raw import _test_raw_reader +from mne import pick_types +from mne._fiff.constants import FIFF from mne.datasets import testing +from mne.io import read_raw_artemis123 from mne.io.artemis123.utils import _generate_mne_locs_file, _load_mne_locs -from mne import pick_types -from mne.transforms import rot_to_quat, _angle_between_quats -from mne.io.constants import FIFF +from mne.io.tests.test_raw import _test_raw_reader +from mne.transforms import _angle_between_quats, rot_to_quat artemis123_dir = testing.data_path(download=False) / "ARTEMIS123" short_HPI_dip_fname = ( - artemis123_dir - / "Artemis_Data_2017-04-04-15h-44m-22s_Motion_Translation-z.bin" + artemis123_dir / "Artemis_Data_2017-04-04-15h-44m-22s_Motion_Translation-z.bin" ) dig_fname = artemis123_dir / "Phantom_040417_dig.pos" short_hpi_1kz_fname = ( - artemis123_dir - / "Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin" + artemis123_dir / "Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin" ) # XXX this tol is way too high, but it's not clear which is correct # (old or new) -def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.): +def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.0): __tracebackhide__ = True trans_est = actual[0:3, 3] quat_est = rot_to_quat(actual[0:3, 0:3]) @@ -38,18 +35,22 @@ def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.): angle = np.rad2deg(_angle_between_quats(quat_est, quat)) dist = np.linalg.norm(trans - trans_est) - assert dist <= dist_tol, \ - '%0.3f > %0.3f mm translation' % (1000 * dist, 1000 * dist_tol) - assert angle <= angle_tol, \ - '%0.3f > %0.3f° rotation' % (angle, angle_tol) + assert dist <= dist_tol, ( + f"{1000 * dist:0.3f} > {1000 * dist_tol:0.3f} mm translation" + ) + assert angle <= angle_tol, f"{angle:0.3f} > {angle_tol:0.3f}° rotation" -@pytest.mark.timeout(60) # ~25 s on Travis Linux OpenBLAS @testing.requires_testing_data def test_artemis_reader(): """Test reading raw Artemis123 files.""" - _test_raw_reader(read_raw_artemis123, input_fname=short_hpi_1kz_fname, - pos_fname=dig_fname, verbose='error') + _test_raw_reader( + read_raw_artemis123, + input_fname=short_hpi_1kz_fname, + pos_fname=dig_fname, + add_head_trans=False, + verbose="error", + ) @pytest.mark.timeout(60) @@ -57,47 +58,64 @@ def test_artemis_reader(): def test_dev_head_t(): """Test dev_head_t computation for Artemis123.""" # test a random selected point - raw = read_raw_artemis123(short_hpi_1kz_fname, preload=True, - add_head_trans=False) + raw = read_raw_artemis123(short_hpi_1kz_fname, preload=True, add_head_trans=False) + assert raw.info["dev_head_t"] is None meg_picks = pick_types(raw.info, meg=True, eeg=False) # checked against matlab reader. assert_allclose(raw[meg_picks[12]][0][0][123], 1.08239606023e-11) - dev_head_t_1 = np.array([[9.713e-01, 2.340e-01, -4.164e-02, 1.302e-04], - [-2.371e-01, 9.664e-01, -9.890e-02, 1.977e-03], - [1.710e-02, 1.059e-01, 9.942e-01, -8.159e-03], - [0.0, 0.0, 0.0, 1.0]]) - - dev_head_t_2 = np.array([[9.890e-01, 1.475e-01, -8.090e-03, 4.997e-04], - [-1.476e-01, 9.846e-01, -9.389e-02, 1.962e-03], - [-5.888e-03, 9.406e-02, 9.955e-01, -1.610e-02], - [0.0, 0.0, 0.0, 1.0]]) - - expected_dev_hpi_rr = np.array([[-0.01579644, 0.06527367, 0.00152648], - [0.06666813, 0.0148956, 0.00545488], - [-0.06699212, -0.01732376, 0.0112027]]) + dev_head_t_1 = np.array( + [ + [9.713e-01, 2.340e-01, -4.164e-02, 1.302e-04], + [-2.371e-01, 9.664e-01, -9.890e-02, 1.977e-03], + [1.710e-02, 1.059e-01, 9.942e-01, -8.159e-03], + [0.0, 0.0, 0.0, 1.0], + ] + ) + + dev_head_t_2 = np.array( + [ + [9.890e-01, 1.475e-01, -8.090e-03, 4.997e-04], + [-1.476e-01, 9.846e-01, -9.389e-02, 1.962e-03], + [-5.888e-03, 9.406e-02, 9.955e-01, -1.610e-02], + [0.0, 0.0, 0.0, 1.0], + ] + ) + + expected_dev_hpi_rr = np.array( + [ + [-0.01579644, 0.06527367, 0.00152648], + [0.06666813, 0.0148956, 0.00545488], + [-0.06699212, -0.01732376, 0.0112027], + ] + ) # test with head loc no digitization raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True) - _assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1) - assert_equal(raw.info['sfreq'], 5000.0) + _assert_trans(raw.info["dev_head_t"]["trans"], dev_head_t_1) + assert_equal(raw.info["sfreq"], 5000.0) # test with head loc and digitization - with pytest.warns(RuntimeWarning, match='Large difference'): - raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True, - pos_fname=dig_fname) - _assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1) + with ( + pytest.warns(RuntimeWarning, match="consistency"), + pytest.warns(RuntimeWarning, match="Large difference"), + ): + raw = read_raw_artemis123( + short_HPI_dip_fname, add_head_trans=True, pos_fname=dig_fname + ) + _assert_trans(raw.info["dev_head_t"]["trans"], dev_head_t_1) # test cHPI localization.. - dev_hpi_rr = np.array([p['r'] for p in raw.info['dig'] - if p['coord_frame'] == FIFF.FIFFV_COORD_DEVICE]) + dev_hpi_rr = np.array( + [p["r"] for p in raw.info["dig"] if p["coord_frame"] == FIFF.FIFFV_COORD_DEVICE] + ) # points should be within 0.1 mm (1e-4m) and within 1% assert_allclose(dev_hpi_rr, expected_dev_hpi_rr, atol=1e-4, rtol=0.01) # test 1kz hpi head loc (different freq) raw = read_raw_artemis123(short_hpi_1kz_fname, add_head_trans=True) - _assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_2) - assert_equal(raw.info['sfreq'], 1000.0) + _assert_trans(raw.info["dev_head_t"]["trans"], dev_head_t_2) + assert_equal(raw.info["sfreq"], 1000.0) def test_utils(tmp_path): diff --git a/mne/io/artemis123/utils.py b/mne/io/artemis123/utils.py index de7e98c9113..90df53ea1aa 100644 --- a/mne/io/artemis123/utils.py +++ b/mne/io/artemis123/utils.py @@ -1,25 +1,31 @@ -import numpy as np +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import os.path as op -from .._digitization import _artemis123_read_pos -from ...utils import logger + +import numpy as np + +from ..._fiff._digitization import _artemis123_read_pos from ...transforms import rotation3d_align_z_axis +from ...utils import logger def _load_mne_locs(fname=None): """Load MNE locs structure from file (if exists) or recreate it.""" - if (not fname): + if not fname: # find input file - resource_dir = op.join(op.dirname(op.abspath(__file__)), 'resources') - fname = op.join(resource_dir, 'Artemis123_mneLoc.csv') + resource_dir = op.join(op.dirname(op.abspath(__file__)), "resources") + fname = op.join(resource_dir, "Artemis123_mneLoc.csv") if not op.exists(fname): - raise IOError('MNE locs file "%s" does not exist' % (fname)) + raise OSError(f'MNE locs file "{fname}" does not exist') - logger.info('Loading mne loc file {}'.format(fname)) + logger.info(f"Loading mne loc file {fname}") locs = dict() - with open(fname, 'r') as fid: + with open(fname) as fid: for line in fid: - vals = line.strip().split(',') + vals = line.strip().split(",") locs[vals[0]] = np.array(vals[1::], np.float64) return locs @@ -27,41 +33,39 @@ def _load_mne_locs(fname=None): def _generate_mne_locs_file(output_fname): """Generate mne coil locs and save to supplied file.""" - logger.info('Converting Tristan coil file to mne loc file...') - resource_dir = op.join(op.dirname(op.abspath(__file__)), 'resources') - chan_fname = op.join(resource_dir, 'Artemis123_ChannelMap.csv') + logger.info("Converting Tristan coil file to mne loc file...") + resource_dir = op.join(op.dirname(op.abspath(__file__)), "resources") + chan_fname = op.join(resource_dir, "Artemis123_ChannelMap.csv") chans = _load_tristan_coil_locs(chan_fname) # compute a dict of loc structs locs = {n: _compute_mne_loc(cinfo) for n, cinfo in chans.items()} # write it out to output_fname - with open(output_fname, 'w') as fid: + with open(output_fname, "w") as fid: for n in sorted(locs.keys()): - fid.write('%s,' % n) - fid.write(','.join(locs[n].astype(str))) - fid.write('\n') + fid.write(f"{n},") + fid.write(",".join(locs[n].astype(str))) + fid.write("\n") def _load_tristan_coil_locs(coil_loc_path): """Load the Coil locations from Tristan CAD drawings.""" channel_info = dict() - with open(coil_loc_path, 'r') as fid: + with open(coil_loc_path) as fid: # skip 2 Header lines fid.readline() fid.readline() for line in fid: line = line.strip() - vals = line.split(',') + vals = line.split(",") channel_info[vals[0]] = dict() if vals[6]: - channel_info[vals[0]]['inner_coil'] = \ - np.array(vals[2:5], np.float64) - channel_info[vals[0]]['outer_coil'] = \ - np.array(vals[5:8], np.float64) + channel_info[vals[0]]["inner_coil"] = np.array(vals[2:5], np.float64) + channel_info[vals[0]]["outer_coil"] = np.array(vals[5:8], np.float64) else: # nothing supplied - channel_info[vals[0]]['inner_coil'] = np.zeros(3) - channel_info[vals[0]]['outer_coil'] = np.zeros(3) + channel_info[vals[0]]["inner_coil"] = np.zeros(3) + channel_info[vals[0]]["outer_coil"] = np.zeros(3) return channel_info @@ -70,16 +74,17 @@ def _compute_mne_loc(coil_loc): Note input coil locations are in inches. """ - loc = np.zeros((12)) - if (np.linalg.norm(coil_loc['inner_coil']) == 0) and \ - (np.linalg.norm(coil_loc['outer_coil']) == 0): + loc = np.zeros(12) + if (np.linalg.norm(coil_loc["inner_coil"]) == 0) and ( + np.linalg.norm(coil_loc["outer_coil"]) == 0 + ): return loc # channel location is inner coil location converted to meters From inches - loc[0:3] = coil_loc['inner_coil'] / 39.370078 + loc[0:3] = coil_loc["inner_coil"] / 39.370078 # figure out rotation - z_axis = coil_loc['outer_coil'] - coil_loc['inner_coil'] + z_axis = coil_loc["outer_coil"] - coil_loc["inner_coil"] R = rotation3d_align_z_axis(z_axis) loc[3:13] = R.T.reshape(9) return loc @@ -88,7 +93,7 @@ def _compute_mne_loc(coil_loc): def _read_pos(fname): """Read the .pos file and return positions as dig points.""" nas, lpa, rpa, hpi, extra = None, None, None, None, None - with open(fname, 'r') as fid: + with open(fname) as fid: for line in fid: line = line.strip() if len(line) > 0: @@ -100,20 +105,19 @@ def _read_pos(fname): if len(parts) not in [4, 5]: continue - if parts[0].lower() == 'nasion': - nas = np.array([float(p) for p in parts[-3:]]) / 100. - elif parts[0].lower() == 'left': - lpa = np.array([float(p) for p in parts[-3:]]) / 100. - elif parts[0].lower() == 'right': - rpa = np.array([float(p) for p in parts[-3:]]) / 100. - elif 'hpi' in parts[0].lower(): + if parts[0].lower() == "nasion": + nas = np.array([float(p) for p in parts[-3:]]) / 100.0 + elif parts[0].lower() == "left": + lpa = np.array([float(p) for p in parts[-3:]]) / 100.0 + elif parts[0].lower() == "right": + rpa = np.array([float(p) for p in parts[-3:]]) / 100.0 + elif "hpi" in parts[0].lower(): if hpi is None: hpi = list() - hpi.append(np.array([float(p) for p in parts[-3:]]) / 100.) + hpi.append(np.array([float(p) for p in parts[-3:]]) / 100.0) else: if extra is None: extra = list() - extra.append(np.array([float(p) - for p in parts[-3:]]) / 100.) + extra.append(np.array([float(p) for p in parts[-3:]]) / 100.0) return _artemis123_read_pos(nas, lpa, rpa, hpi, extra) diff --git a/mne/io/base.py b/mne/io/base.py index b97290a1a7e..314f981dcaf 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1,64 +1,122 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Martin Luessi -# Denis Engemann -# Teon Brooks -# Marijn van Vliet -# Stefan Appelhoff -# Clemens Brunner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from contextlib import nullcontext -from copy import deepcopy -from datetime import timedelta import os -import os.path as op import shutil from collections import defaultdict +from contextlib import nullcontext +from copy import deepcopy +from dataclasses import dataclass, field +from datetime import timedelta +from inspect import getfullargspec +from pathlib import Path import numpy as np -from .constants import FIFF -from .utils import _construct_bids_filename, _check_orig_units -from .pick import (pick_types, pick_channels, pick_info, _picks_to_idx, - channel_type) -from .meas_info import write_meas_info, _ensure_infos_match, ContainsMixin -from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin -from ..channels.channels import (UpdateChannelsMixin, SetChannelsMixin, - InterpolationMixin, _unit2human) -from .compensator import set_current_comp, make_compensator -from .write import (start_and_end_file, start_block, end_block, - write_dau_pack16, write_float, write_double, - write_complex64, write_complex128, write_int, - write_id, write_string, _get_split_size, _NEXT_FILE_BUFFER) - -from ..annotations import (Annotations, _annotations_starts_stops, - _combine_annotations, _handle_meas_date, - _sync_onset, _write_annotations) -from ..filter import (FilterMixin, notch_filter, resample, _resamp_ratio_len, - _resample_stim_channels, _check_fun) -from ..parallel import parallel_func -from ..utils import (_check_fname, _check_pandas_installed, sizeof_fmt, - _check_pandas_index_arguments, fill_doc, copy_doc, - check_fname, _get_stim_channel, _stamp_to_dt, - logger, verbose, _time_mask, warn, SizeMixin, - copy_function_doc_to_method_doc, _validate_type, - _check_preload, _get_argvalues, _check_option, - _build_data_frame, _convert_times, _scale_dataframe_data, - _check_time_format, _arange_div, TimeMixin, repr_html, - _pl) +from .._fiff.compensator import make_compensator, set_current_comp +from .._fiff.constants import FIFF +from .._fiff.meas_info import ( + ContainsMixin, + SetChannelsMixin, + _ensure_infos_match, + _unit2human, + write_meas_info, +) +from .._fiff.pick import ( + _picks_to_idx, + channel_type, + pick_channels, + pick_info, + pick_types, +) +from .._fiff.proj import ProjMixin, _proj_equal, activate_proj, setup_proj +from .._fiff.utils import _check_orig_units, _make_split_fnames +from .._fiff.write import ( + _NEXT_FILE_BUFFER, + _get_split_size, + end_block, + start_and_end_file, + start_block, + write_complex64, + write_complex128, + write_dau_pack16, + write_double, + write_float, + write_id, + write_int, + write_string, +) +from ..annotations import ( + Annotations, + _annotations_starts_stops, + _combine_annotations, + _handle_meas_date, + _sync_onset, + _write_annotations, +) +from ..channels.channels import InterpolationMixin, ReferenceMixin, UpdateChannelsMixin from ..defaults import _handle_default -from ..viz import plot_raw, _RAW_CLIP_DEF -from ..event import find_events, concatenate_events +from ..event import concatenate_events, find_events +from ..filter import ( + FilterMixin, + _check_fun, + _check_resamp_noop, + _resamp_ratio_len, + _resample_stim_channels, + notch_filter, + resample, +) +from ..html_templates import _get_html_template +from ..parallel import parallel_func from ..time_frequency.spectrum import Spectrum, SpectrumMixin, _validate_method +from ..time_frequency.tfr import RawTFR +from ..utils import ( + SizeMixin, + TimeMixin, + _arange_div, + _build_data_frame, + _check_fname, + _check_option, + _check_pandas_index_arguments, + _check_pandas_installed, + _check_preload, + _check_time_format, + _convert_times, + _file_like, + _get_argvalues, + _get_stim_channel, + _pl, + _scale_dataframe_data, + _stamp_to_dt, + _time_mask, + _validate_type, + check_fname, + copy_doc, + copy_function_doc_to_method_doc, + fill_doc, + logger, + repr_html, + sizeof_fmt, + verbose, + warn, +) +from ..viz import _RAW_CLIP_DEF, plot_raw @fill_doc -class BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, - InterpolationMixin, TimeMixin, SizeMixin, FilterMixin, - SpectrumMixin): +class BaseRaw( + ProjMixin, + ContainsMixin, + UpdateChannelsMixin, + ReferenceMixin, + SetChannelsMixin, + InterpolationMixin, + TimeMixin, + SizeMixin, + FilterMixin, + SpectrumMixin, +): """Base class for Raw data. Parameters @@ -79,7 +137,7 @@ class BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, Iterable of the last sample number from each raw file. For unsplit raw files this should be a length-one list or tuple. If None, then preload must be an ndarray. - filenames : tuple + filenames : tuple | None Tuple of length one (for unsplit raw files) or length > 1 (for split raw files). raw_extras : list of dict @@ -118,21 +176,39 @@ class BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, (only needed for types that support on-demand disk reads) """ + # NOTE: If you add a new attribute to this class and get a Sphinx warning like: + # docstring of mne.io.base.BaseRaw:71: + # WARNING: py:obj reference target not found: duration [ref.obj] + # You need to add the attribute to doc/conf.py nitpick_ignore_regex. You should also + # consider adding it to the Attributes list for Raw in mne/io/fiff/raw.py. + + _extra_attributes = () + @verbose - def __init__(self, info, preload=False, - first_samps=(0,), last_samps=None, - filenames=(None,), raw_extras=(None,), - orig_format='double', dtype=np.float64, - buffer_size_sec=1., orig_units=None, - *, verbose=None): # noqa: D102 + def __init__( + self, + info, + preload=False, + first_samps=(0,), + last_samps=None, + filenames=None, + raw_extras=(None,), + orig_format="double", + dtype=np.float64, + buffer_size_sec=1.0, + orig_units=None, + *, + verbose=None, + ): # wait until the end to preload data, but triage here if isinstance(preload, np.ndarray): # some functions (e.g., filtering) only work w/64-bit data if preload.dtype not in (np.float64, np.complex128): - raise RuntimeError('datatype must be float64 or complex128, ' - 'not %s' % preload.dtype) + raise RuntimeError( + f"datatype must be float64 or complex128, not {preload.dtype}" + ) if preload.dtype != dtype: - raise ValueError('preload and dtype must match') + raise ValueError("preload and dtype must match") self._data = preload self.preload = True assert len(first_samps) == 1 @@ -140,8 +216,9 @@ def __init__(self, info, preload=False, load_from_disk = False else: if last_samps is None: - raise ValueError('last_samps must be given unless preload is ' - 'an ndarray') + raise ValueError( + "last_samps must be given unless preload is an ndarray" + ) if not preload: self.preload = False load_from_disk = False @@ -149,51 +226,53 @@ def __init__(self, info, preload=False, load_from_disk = True self._last_samps = np.array(last_samps) self._first_samps = np.array(first_samps) - orig_ch_names = info['ch_names'] + orig_ch_names = info["ch_names"] with info._unlock(check_after=True): # be permissive of old code - if isinstance(info['meas_date'], tuple): - info['meas_date'] = _stamp_to_dt(info['meas_date']) + if isinstance(info["meas_date"], tuple): + info["meas_date"] = _stamp_to_dt(info["meas_date"]) self.info = info self.buffer_size_sec = float(buffer_size_sec) - cals = np.empty(info['nchan']) - for k in range(info['nchan']): - cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal'] + cals = np.empty(info["nchan"]) + for k in range(info["nchan"]): + cals[k] = info["chs"][k]["range"] * info["chs"][k]["cal"] bad = np.where(cals == 0)[0] if len(bad) > 0: - raise ValueError('Bad cals for channels %s' - % {ii: self.ch_names[ii] for ii in bad}) + raise ValueError( + f"Bad cals for channels {dict((ii, self.ch_names[ii]) for ii in bad)}" + ) self._cals = cals + if raw_extras is None: + raw_extras = [None] * len(first_samps) self._raw_extras = list(dict() if r is None else r for r in raw_extras) for r in self._raw_extras: - r['orig_nchan'] = info['nchan'] - self._read_picks = [np.arange(info['nchan']) - for _ in range(len(raw_extras))] + r["orig_nchan"] = info["nchan"] + self._read_picks = [np.arange(info["nchan"]) for _ in range(len(raw_extras))] # deal with compensation (only relevant for CTF data, either CTF # reader or MNE-C converted CTF->FIF files) self._read_comp_grade = self.compensation_grade # read property - if self._read_comp_grade is not None and len(info['comps']): - logger.info('Current compensation grade : %d' - % self._read_comp_grade) + if self._read_comp_grade is not None and len(info["comps"]): + logger.info("Current compensation grade : %d", self._read_comp_grade) self._comp = None - self._filenames = list(filenames) + if filenames is None: + filenames = [None] * len(first_samps) + self.filenames = list(filenames) _validate_type(orig_format, str, "orig_format") - _check_option( - "orig_format", orig_format, ("double", "single", "int", "short") - ) + _check_option("orig_format", orig_format, ("double", "single", "int", "short")) self.orig_format = orig_format # Sanity check and set original units, if provided by the reader: if orig_units: if not isinstance(orig_units, dict): - raise ValueError('orig_units must be of type dict, but got ' - ' {}'.format(type(orig_units))) + raise ValueError( + f"orig_units must be of type dict, but got {type(orig_units)}" + ) # original units need to be truncated to 15 chars or renamed # to match MNE conventions (channel name unique and less than # 15 characters). orig_units = deepcopy(orig_units) - for old_ch, new_ch in zip(orig_ch_names, info['ch_names']): + for old_ch, new_ch in zip(orig_ch_names, info["ch_names"]): if old_ch in orig_units: this_unit = orig_units[old_ch] del orig_units[old_ch] @@ -201,24 +280,23 @@ def __init__(self, info, preload=False, # STI 014 channel is native only to fif ... for all other formats # this was artificially added by the IO procedure, so remove it - ch_names = list(info['ch_names']) - if ('STI 014' in ch_names) and not \ - (self.filenames[0].endswith('.fif')): - ch_names.remove('STI 014') + ch_names = list(info["ch_names"]) + if "STI 014" in ch_names and self.filenames[0].suffix != ".fif": + ch_names.remove("STI 014") # Each channel in the data must have a corresponding channel in # the original units. ch_correspond = [ch in orig_units for ch in ch_names] if not all(ch_correspond): ch_without_orig_unit = ch_names[ch_correspond.index(False)] - raise ValueError('Channel {} has no associated original ' - 'unit.'.format(ch_without_orig_unit)) + raise ValueError( + f"Channel {ch_without_orig_unit} has no associated original unit." + ) # Final check of orig_units, editing a unit if it is not a valid # unit orig_units = _check_orig_units(orig_units) self._orig_units = orig_units or dict() # always a dict - self._projectors = list() self._projector = None self._dtype_ = dtype self.set_annotations(None) @@ -255,22 +333,25 @@ def apply_gradient_compensation(self, grade, verbose=None): current_comp = self.compensation_grade if current_comp != grade: if self.proj: - raise RuntimeError('Cannot change compensation on data where ' - 'projectors have been applied') + raise RuntimeError( + "Cannot change compensation on data where projectors have been " + "applied." + ) # Figure out what operator to use (varies depending on preload) from_comp = current_comp if self.preload else self._read_comp_grade comp = make_compensator(self.info, from_comp, grade) - logger.info('Compensator constructed to change %d -> %d' - % (current_comp, grade)) + logger.info( + "Compensator constructed to change %d -> %d", current_comp, grade + ) set_current_comp(self.info, grade) # We might need to apply it to our data now if self.preload: - logger.info('Applying compensator to loaded data') - lims = np.concatenate([np.arange(0, len(self.times), 10000), - [len(self.times)]]) + logger.info("Applying compensator to loaded data") + lims = np.concatenate( + [np.arange(0, len(self.times), 10000), [len(self.times)]] + ) for start, stop in zip(lims[:-1], lims[1:]): - self._data[:, start:stop] = np.dot( - comp, self._data[:, start:stop]) + self._data[:, start:stop] = np.dot(comp, self._data[:, start:stop]) else: self._comp = comp # store it for later use return self @@ -282,8 +363,9 @@ def _dtype(self): return self._dtype_ @verbose - def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None, - projector=None, verbose=None): + def _read_segment( + self, start=0, stop=None, sel=None, data_buffer=None, *, verbose=None + ): """Read a chunk of raw data. Parameters @@ -314,65 +396,63 @@ def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None, stop = self.n_times if stop is None else min([int(stop), self.n_times]) if start >= stop: - raise ValueError('No data in this range') + raise ValueError("No data in this range") # Initialize the data and calibration vector if sel is None: - n_out = self.info['nchan'] + n_out = self.info["nchan"] idx = slice(None) else: n_out = len(sel) idx = _convert_slice(sel) del sel - assert n_out <= self.info['nchan'] + assert n_out <= self.info["nchan"] data_shape = (n_out, stop - start) dtype = self._dtype if isinstance(data_buffer, np.ndarray): if data_buffer.shape != data_shape: - raise ValueError('data_buffer has incorrect shape: %s != %s' - % (data_buffer.shape, data_shape)) + raise ValueError( + f"data_buffer has incorrect shape: " + f"{data_buffer.shape} != {data_shape}" + ) data = data_buffer else: data = _allocate_data(data_buffer, data_shape, dtype) # deal with having multiple files accessed by the raw object - cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, - dtype='int'))) + cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, dtype="int"))) cumul_lens = np.cumsum(cumul_lens) - files_used = np.logical_and(np.less(start, cumul_lens[1:]), - np.greater_equal(stop - 1, - cumul_lens[:-1])) + files_used = np.logical_and( + np.less(start, cumul_lens[1:]), np.greater_equal(stop - 1, cumul_lens[:-1]) + ) # set up cals and mult (cals, compensation, and projector) n_out = len(np.arange(len(self.ch_names))[idx]) - cals = self._cals.ravel()[np.newaxis, :] - if projector is not None: - assert projector.shape[0] == projector.shape[1] == cals.shape[1] - if self._comp is not None: + cals = self._cals.ravel() + projector, comp = self._projector, self._comp + if comp is not None: + mult = comp if projector is not None: - mult = self._comp * cals - mult = np.dot(projector[idx], mult) - else: - mult = self._comp[idx] * cals - elif projector is not None: - mult = projector[idx] * cals + mult = projector @ mult else: - mult = None - del projector + mult = projector + del projector, comp if mult is None: - cals = cals.T[idx] + cals = cals[idx, np.newaxis] assert cals.shape == (n_out, 1) need_idx = idx # sufficient just to read the given channels else: + mult = mult[idx] * cals cals = None # shouldn't be used assert mult.shape == (n_out, len(self.ch_names)) # read all necessary for proj need_idx = np.where(np.any(mult, axis=0))[0] mult = mult[:, need_idx] logger.debug( - f'Reading {len(need_idx)}/{len(self.ch_names)} channels ' - f'due to projection') + f"Reading {len(need_idx)}/{len(self.ch_names)} channels " + f"due to projection" + ) assert (mult is None) ^ (cals is None) # xor # read from necessary files @@ -382,17 +462,27 @@ def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None, # first iteration (only) could start in the middle somewhere if offset == 0: start_file += start - cumul_lens[fi] - stop_file = np.min([stop - cumul_lens[fi] + self._first_samps[fi], - self._last_samps[fi] + 1]) + stop_file = np.min( + [ + stop - cumul_lens[fi] + self._first_samps[fi], + self._last_samps[fi] + 1, + ] + ) if start_file < self._first_samps[fi] or stop_file < start_file: - raise ValueError('Bad array indexing, could be a bug') + raise ValueError("Bad array indexing, could be a bug") n_read = stop_file - start_file this_sl = slice(offset, offset + n_read) # reindex back to original file orig_idx = _convert_slice(self._read_picks[fi][need_idx]) _ReadSegmentFileProtector(self)._read_segment_file( - data[:, this_sl], orig_idx, fi, - int(start_file), int(stop_file), cals, mult) + data[:, this_sl], + orig_idx, + fi, + int(start_file), + int(stop_file), + cals, + mult, + ) offset += n_read return data @@ -403,7 +493,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ``preload=False``. Any implementation should only make use of: - self._raw_extras[fi] - - self._filenames[fi] + - self.filenames[fi] So be sure to store any information necessary for reading raw data in self._raw_extras[fi]. Things like ``info`` can be decoupled @@ -429,9 +519,9 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """ raise NotImplementedError - def _check_bad_segment(self, start, stop, picks, - reject_start, reject_stop, - reject_by_annotation=False): + def _check_bad_segment( + self, start, stop, picks, reject_start, reject_stop, reject_by_annotation=False + ): """Check if data segment is bad. If the slice is good, returns the data in desired range. @@ -464,13 +554,14 @@ def _check_bad_segment(self, start, stop, picks, return None if reject_by_annotation and len(self.annotations) > 0: annot = self.annotations - sfreq = self.info['sfreq'] + sfreq = self.info["sfreq"] onset = _sync_onset(self, annot.onset) overlaps = np.where(onset < reject_stop / sfreq) - overlaps = np.where(onset[overlaps] + annot.duration[overlaps] > - reject_start / sfreq) + overlaps = np.where( + onset[overlaps] + annot.duration[overlaps] > reject_start / sfreq + ) for descr in annot.description[overlaps]: - if descr.lower().startswith('bad'): + if descr.lower().startswith("bad"): return descr return self._getitem((picks, slice(start, stop)), return_times=False) @@ -501,20 +592,21 @@ def load_data(self, verbose=None): def _preload_data(self, preload): """Actually preload the data.""" data_buffer = preload - if isinstance(preload, (bool, np.bool_)) and not preload: + if isinstance(preload, bool | np.bool_) and not preload: data_buffer = None - logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' % - (0, len(self.times) - 1, 0., self.times[-1])) - self._data = self._read_segment( - data_buffer=data_buffer, projector=self._projector) - assert len(self._data) == self.info['nchan'] + t = self.times + logger.info( + f"Reading 0 ... {len(t) - 1} = {0.0:9.3f} ... {t[-1]:9.3f} secs..." + ) + self._data = self._read_segment(data_buffer=data_buffer) + assert len(self._data) == self.info["nchan"] self.preload = True self._comp = None # no longer needed self.close() @property def _first_time(self): - return self.first_samp / float(self.info['sfreq']) + return self.first_samp / float(self.info["sfreq"]) @property def first_samp(self): @@ -536,7 +628,7 @@ def last_samp(self): @property def _last_time(self): - return self.last_samp / float(self.info['sfreq']) + return self.last_samp / float(self.info["sfreq"]) def time_as_index(self, times, use_rounding=False, origin=None): """Convert time to indices. @@ -563,20 +655,24 @@ def time_as_index(self, times, use_rounding=False, origin=None): origin = _handle_meas_date(origin) if origin is None: delta = 0 - elif self.info['meas_date'] is None: - raise ValueError('origin must be None when info["meas_date"] ' - 'is None, got %s' % (origin,)) + elif self.info["meas_date"] is None: + raise ValueError( + f'origin must be None when info["meas_date"] is None, got {origin}' + ) else: - first_samp_in_abs_time = (self.info['meas_date'] + - timedelta(0, self._first_time)) + first_samp_in_abs_time = self.info["meas_date"] + timedelta( + 0, self._first_time + ) delta = (origin - first_samp_in_abs_time).total_seconds() times = np.atleast_1d(times) + delta - return super(BaseRaw, self).time_as_index(times, use_rounding) + return super().time_as_index(times, use_rounding) @property def _raw_lengths(self): - return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)] + return [ + last - first + 1 for first, last in zip(self._first_samps, self._last_samps) + ] @property def annotations(self): # noqa: D401 @@ -584,13 +680,33 @@ def annotations(self): # noqa: D401 return self._annotations @property - def filenames(self): - """The filenames used.""" + def filenames(self) -> tuple[Path | None, ...]: + """The filenames used. + + :type: :class:`tuple` of :class:`pathlib.Path` | ``None`` + """ return tuple(self._filenames) + @filenames.setter + def filenames(self, value): + """The filenames used, cast to list of paths.""" # noqa: D401 + _validate_type(value, (list, tuple), "filenames") + if isinstance(value, tuple): + value = list(value) + for k, elt in enumerate(value): + if elt is not None: + value[k] = _check_fname(elt, overwrite="read", must_exist=False) + if not value[k].exists(): + # check existence separately from _check_fname since some + # fileformats use directories instead of files and '_check_fname' + # does not handle it correctly. + raise FileNotFoundError(f"File {value[k]} not found.") + self._filenames = list(value) + @verbose - def set_annotations(self, annotations, emit_warning=True, - on_missing='raise', *, verbose=None): + def set_annotations( + self, annotations, emit_warning=True, on_missing="raise", *, verbose=None + ): """Setter for annotations. This setter checks if they are inside the data range. @@ -610,37 +726,37 @@ def set_annotations(self, annotations, emit_warning=True, self : instance of Raw The raw object with annotations. """ - meas_date = _handle_meas_date(self.info['meas_date']) + meas_date = _handle_meas_date(self.info["meas_date"]) if annotations is None: self._annotations = Annotations([], [], [], meas_date) else: - _validate_type(annotations, Annotations, 'annotations') + _validate_type(annotations, Annotations, "annotations") if meas_date is None and annotations.orig_time is not None: - raise RuntimeError('Ambiguous operation. Setting an Annotation' - ' object with known ``orig_time`` to a raw' - ' object which has ``meas_date`` set to' - ' None is ambiguous. Please, either set a' - ' meaningful ``meas_date`` to the raw' - ' object; or set ``orig_time`` to None in' - ' which case the annotation onsets would be' - ' taken in reference to the first sample of' - ' the raw object.') - - delta = 1. / self.info['sfreq'] + raise RuntimeError( + "Ambiguous operation. Setting an Annotation object with known " + "``orig_time`` to a raw object which has ``meas_date`` set to None " + "is ambiguous. Please, either set a meaningful ``meas_date`` to " + "the raw object; or set ``orig_time`` to None in which case the " + "annotation onsets would be taken in reference to the first sample " + "of the raw object." + ) + + delta = 1.0 / self.info["sfreq"] new_annotations = annotations.copy() new_annotations._prune_ch_names(self.info, on_missing) if annotations.orig_time is None: - new_annotations.crop(0, self.times[-1] + delta, - emit_warning=emit_warning) + new_annotations.crop( + 0, self.times[-1] + delta, emit_warning=emit_warning + ) new_annotations.onset += self._first_time else: tmin = meas_date + timedelta(0, self._first_time) tmax = tmin + timedelta(seconds=self.times[-1] + delta) - new_annotations.crop(tmin=tmin, tmax=tmax, - emit_warning=emit_warning) + new_annotations.crop(tmin=tmin, tmax=tmax, emit_warning=emit_warning) new_annotations.onset -= ( - meas_date - new_annotations.orig_time).total_seconds() + meas_date - new_annotations.orig_time + ).total_seconds() new_annotations._orig_time = meas_date self._annotations = new_annotations @@ -649,8 +765,7 @@ def set_annotations(self, annotations, emit_warning=True, def __del__(self): # noqa: D105 # remove file for memmap - if hasattr(self, '_data') and \ - getattr(self._data, 'filename', None) is not None: + if hasattr(self, "_data") and getattr(self._data, "filename", None) is not None: # First, close the file out; happens automatically on del filename = self._data.filename del self._data @@ -678,31 +793,34 @@ def _parse_get_set_params(self, item): item = (item, slice(None, None, None)) if len(item) != 2: # should be channels and time instants - raise RuntimeError("Unable to access raw data (need both channels " - "and time)") + raise RuntimeError( + "Unable to access raw data (need both channels and time)" + ) sel = _picks_to_idx(self.info, item[0]) if isinstance(item[1], slice): time_slice = item[1] - start, stop, step = (time_slice.start, time_slice.stop, - time_slice.step) + start, stop, step = (time_slice.start, time_slice.stop, time_slice.step) else: item1 = item[1] # Let's do automated type conversion to integer here - if np.array(item[1]).dtype.kind == 'i': + if np.array(item[1]).dtype.kind == "i": item1 = int(item1) - if isinstance(item1, (int, np.integer)): + if isinstance(item1, int | np.integer): start, stop, step = item1, item1 + 1, 1 + # Need to special case -1, because -1:0 will be empty + if start == -1: + stop = None else: - raise ValueError('Must pass int or slice to __getitem__') + raise ValueError("Must pass int or slice to __getitem__") if start is None: start = 0 if step is not None and step != 1: - raise ValueError('step needs to be 1 : %d given' % step) + raise ValueError(f"step needs to be 1 : {step} given") - if isinstance(sel, (int, np.integer)): + if isinstance(sel, int | np.integer): sel = np.array([sel]) if sel is not None and len(sel) == 0: @@ -753,30 +871,39 @@ def _getitem(self, item, return_times=True): if self.preload: data = self._data[sel, start:stop] else: - data = self._read_segment(start=start, stop=stop, sel=sel, - projector=self._projector) + data = self._read_segment(start=start, stop=stop, sel=sel) if return_times: # Rather than compute the entire thing just compute the subset # times = self.times[start:stop] # stop can be None here so don't use it directly times = np.arange(start, start + data.shape[1], dtype=float) - times /= self.info['sfreq'] + times /= self.info["sfreq"] return data, times else: return data def __setitem__(self, item, value): """Set raw data content.""" - _check_preload(self, 'Modifying data of Raw') + _check_preload(self, "Modifying data of Raw") sel, start, stop = self._parse_get_set_params(item) # set the data self._data[sel, start:stop] = value @verbose - def get_data(self, picks=None, start=0, stop=None, - reject_by_annotation=None, return_times=False, units=None, - *, tmin=None, tmax=None, verbose=None): + def get_data( + self, + picks=None, + start=0, + stop=None, + reject_by_annotation=None, + return_times=False, + units=None, + *, + tmin=None, + tmax=None, + verbose=None, + ): """Get data in the given range. Parameters @@ -819,12 +946,12 @@ def get_data(self, picks=None, start=0, stop=None, .. versionadded:: 0.14.0 """ # validate types - _validate_type(start, types=('int-like'), item_name='start', - type_name='int') - _validate_type(stop, types=('int-like', None), item_name='stop', - type_name='int, None') + _validate_type(start, types=("int-like"), item_name="start", type_name="int") + _validate_type( + stop, types=("int-like", None), item_name="stop", type_name="int, None" + ) - picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + picks = _picks_to_idx(self.info, picks, "all", exclude=()) # Get channel factors for conversion into specified unit # (vector of ones if no conversion needed) @@ -832,7 +959,7 @@ def get_data(self, picks=None, start=0, stop=None, ch_factors = _get_ch_factors(self, units, picks) # convert to ints - picks = np.atleast_1d(np.arange(self.info['nchan'])[picks]) + picks = np.atleast_1d(np.arange(self.info["nchan"])[picks]) # handle start/tmin stop/tmax tmin_start, tmax_stop = self._handle_tmin_tmax(tmin, tmax) @@ -848,7 +975,8 @@ def get_data(self, picks=None, start=0, stop=None, if len(self.annotations) == 0 or reject_by_annotation is None: getitem = self._getitem( - (picks, slice(start, stop)), return_times=return_times) + (picks, slice(start, stop)), return_times=return_times + ) if return_times: data, times = getitem if units is not None: @@ -857,9 +985,10 @@ def get_data(self, picks=None, start=0, stop=None, if units is not None: getitem *= ch_factors[:, np.newaxis] return getitem - _check_option('reject_by_annotation', reject_by_annotation.lower(), - ['omit', 'nan']) - onsets, ends = _annotations_starts_stops(self, ['BAD']) + _check_option( + "reject_by_annotation", reject_by_annotation.lower(), ["omit", "nan"] + ) + onsets, ends = _annotations_starts_stops(self, ["BAD"]) keep = (onsets < stop) & (ends > start) onsets = np.maximum(onsets[keep], start) ends = np.minimum(ends[keep], stop) @@ -875,19 +1004,26 @@ def get_data(self, picks=None, start=0, stop=None, for onset, end in zip(onsets, ends): if onset >= end: continue - used[onset - start: end - start] = False + used[onset - start : end - start] = False used = np.concatenate([[False], used, [False]]) starts = np.where(~used[:-1] & used[1:])[0] + start stops = np.where(used[:-1] & ~used[1:])[0] + start n_kept = (stops - starts).sum() # kept samples n_rejected = n_samples - n_kept # rejected samples if n_rejected > 0: - if reject_by_annotation == 'omit': - msg = ("Omitting {} of {} ({:.2%}) samples, retaining {}" - " ({:.2%}) samples.") - logger.info(msg.format(n_rejected, n_samples, - n_rejected / n_samples, - n_kept, n_kept / n_samples)) + if reject_by_annotation == "omit": + msg = ( + "Omitting {} of {} ({:.2%}) samples, retaining {} ({:.2%}) samples." + ) + logger.info( + msg.format( + n_rejected, + n_samples, + n_rejected / n_samples, + n_kept, + n_kept / n_samples, + ) + ) data = np.zeros((len(picks), n_kept)) times = np.zeros(data.shape[1]) idx = 0 @@ -898,11 +1034,19 @@ def get_data(self, picks=None, start=0, stop=None, data[:, idx:end], times[idx:end] = self[picks, start:stop] idx = end else: - msg = ("Setting {} of {} ({:.2%}) samples to NaN, retaining {}" - " ({:.2%}) samples.") - logger.info(msg.format(n_rejected, n_samples, - n_rejected / n_samples, - n_kept, n_kept / n_samples)) + msg = ( + "Setting {} of {} ({:.2%}) samples to NaN, retaining {}" + " ({:.2%}) samples." + ) + logger.info( + msg.format( + n_rejected, + n_samples, + n_rejected / n_samples, + n_kept, + n_kept / n_samples, + ) + ) data, times = self[picks, start:stop] data[:, ~used[1:-1]] = np.nan else: @@ -915,8 +1059,16 @@ def get_data(self, picks=None, start=0, stop=None, return data @verbose - def apply_function(self, fun, picks=None, dtype=None, n_jobs=None, - channel_wise=True, verbose=None, **kwargs): + def apply_function( + self, + fun, + picks=None, + dtype=None, + n_jobs=None, + channel_wise=True, + verbose=None, + **kwargs, + ): """Apply a function to a subset of channels. %(applyfun_summary_raw)s @@ -939,58 +1091,123 @@ def apply_function(self, fun, picks=None, dtype=None, n_jobs=None, self : instance of Raw The raw object with transformed data. """ - _check_preload(self, 'raw.apply_function') + _check_preload(self, "raw.apply_function") picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) if not callable(fun): - raise ValueError('fun needs to be a function') + raise ValueError("fun needs to be a function") data_in = self._data if dtype is not None and dtype != self._data.dtype: self._data = self._data.astype(dtype) + args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs + if channel_wise is False: + if ("ch_idx" in args) or ("ch_name" in args): + raise ValueError( + "apply_function cannot access ch_idx or ch_name " + "when channel_wise=False" + ) + if "ch_idx" in args: + logger.info("apply_function requested to access ch_idx") + if "ch_name" in args: + logger.info("apply_function requested to access ch_name") + if channel_wise: parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) if n_jobs == 1: # modify data inplace to save memory - for idx in picks: - self._data[idx, :] = _check_fun(fun, data_in[idx, :], - **kwargs) + for ch_idx in picks: + if "ch_idx" in args: + kwargs.update(ch_idx=ch_idx) + if "ch_name" in args: + kwargs.update(ch_name=self.info["ch_names"][ch_idx]) + self._data[ch_idx, :] = _check_fun( + fun, data_in[ch_idx, :], **kwargs + ) else: # use parallel function data_picks_new = parallel( - p_fun(fun, data_in[p], **kwargs) for p in picks) - for pp, p in enumerate(picks): - self._data[p, :] = data_picks_new[pp] + p_fun( + fun, + data_in[ch_idx], + **kwargs, + **{ + k: v + for k, v in [ + ("ch_name", self.info["ch_names"][ch_idx]), + ("ch_idx", ch_idx), + ] + if k in args + }, + ) + for ch_idx in picks + ) + for run_idx, ch_idx in enumerate(picks): + self._data[ch_idx, :] = data_picks_new[run_idx] else: - self._data[picks, :] = _check_fun( - fun, data_in[picks, :], **kwargs) + self._data[picks, :] = _check_fun(fun, data_in[picks, :], **kwargs) return self # Need a separate method because the default pad is different for raw @copy_doc(FilterMixin.filter) - def filter(self, l_freq, h_freq, picks=None, filter_length='auto', - l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=None, - method='fir', iir_params=None, phase='zero', - fir_window='hamming', fir_design='firwin', - skip_by_annotation=('edge', 'bad_acq_skip'), - pad='reflect_limited', verbose=None): # noqa: D102 + def filter( + self, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + phase="zero", + fir_window="hamming", + fir_design="firwin", + skip_by_annotation=("edge", "bad_acq_skip"), + pad="reflect_limited", + verbose=None, + ): return super().filter( - l_freq, h_freq, picks, filter_length, l_trans_bandwidth, - h_trans_bandwidth, n_jobs=n_jobs, method=method, - iir_params=iir_params, phase=phase, fir_window=fir_window, - fir_design=fir_design, skip_by_annotation=skip_by_annotation, - pad=pad, verbose=verbose) + l_freq, + h_freq, + picks, + filter_length, + l_trans_bandwidth, + h_trans_bandwidth, + n_jobs=n_jobs, + method=method, + iir_params=iir_params, + phase=phase, + fir_window=fir_window, + fir_design=fir_design, + skip_by_annotation=skip_by_annotation, + pad=pad, + verbose=verbose, + ) @verbose - def notch_filter(self, freqs, picks=None, filter_length='auto', - notch_widths=None, trans_bandwidth=1.0, n_jobs=None, - method='fir', iir_params=None, mt_bandwidth=None, - p_value=0.05, phase='zero', fir_window='hamming', - fir_design='firwin', pad='reflect_limited', - skip_by_annotation=('edge', 'bad_acq_skip'), - verbose=None): + def notch_filter( + self, + freqs, + picks=None, + filter_length="auto", + notch_widths=None, + trans_bandwidth=1.0, + n_jobs=None, + method="fir", + iir_params=None, + mt_bandwidth=None, + p_value=0.05, + phase="zero", + fir_window="hamming", + fir_design="firwin", + pad="reflect_limited", + skip_by_annotation=("edge", "bad_acq_skip"), + verbose=None, + ): """Notch filter a subset of channels. Parameters @@ -1008,7 +1225,7 @@ def notch_filter(self, freqs, picks=None, filter_length='auto', If None, ``freqs / 200`` is used. trans_bandwidth : float Width of the transition band in Hz. - Only used for ``method='fir'``. + Only used for ``method='fir'`` and ``method='iir'``. %(n_jobs_fir)s %(method_fir)s %(iir_params)s @@ -1054,28 +1271,49 @@ def notch_filter(self, freqs, picks=None, filter_length='auto', For details, see :func:`mne.filter.notch_filter`. """ - fs = float(self.info['sfreq']) - picks = _picks_to_idx(self.info, picks, exclude=(), none='data_or_ica') - _check_preload(self, 'raw.notch_filter') - onsets, ends = _annotations_starts_stops( - self, skip_by_annotation, invert=True) - logger.info('Filtering raw data in %d contiguous segment%s' - % (len(onsets), _pl(onsets))) + fs = float(self.info["sfreq"]) + picks = _picks_to_idx(self.info, picks, exclude=(), none="data_or_ica") + _check_preload(self, "raw.notch_filter") + onsets, ends = _annotations_starts_stops(self, skip_by_annotation, invert=True) + logger.info( + "Filtering raw data in %d contiguous segment%s", len(onsets), _pl(onsets) + ) for si, (start, stop) in enumerate(zip(onsets, ends)): notch_filter( - self._data[:, start:stop], fs, freqs, - filter_length=filter_length, notch_widths=notch_widths, - trans_bandwidth=trans_bandwidth, method=method, - iir_params=iir_params, mt_bandwidth=mt_bandwidth, - p_value=p_value, picks=picks, n_jobs=n_jobs, copy=False, - phase=phase, fir_window=fir_window, fir_design=fir_design, - pad=pad) + self._data[:, start:stop], + fs, + freqs, + filter_length=filter_length, + notch_widths=notch_widths, + trans_bandwidth=trans_bandwidth, + method=method, + iir_params=iir_params, + mt_bandwidth=mt_bandwidth, + p_value=p_value, + picks=picks, + n_jobs=n_jobs, + copy=False, + phase=phase, + fir_window=fir_window, + fir_design=fir_design, + pad=pad, + ) return self @verbose - def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, - n_jobs=None, events=None, pad='reflect_limited', - verbose=None): + def resample( + self, + sfreq, + *, + npad="auto", + window="auto", + stim_picks=None, + n_jobs=None, + events=None, + pad="auto", + method="fft", + verbose=None, + ): """Resample all channels. If appropriate, an anti-aliasing filter is applied before resampling. @@ -1102,7 +1340,7 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, ---------- sfreq : float New sample rate to use. - %(npad)s + %(npad_resample)s %(window_resample)s stim_picks : list of int | None Stim channels. These channels are simply subsampled or @@ -1115,10 +1353,12 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, An optional event matrix. When specified, the onsets of the events are resampled jointly with the data. NB: The input events are not modified, but a new array is returned with the raw instead. - %(pad)s - The default is ``'reflect_limited'``. + %(pad_resample_auto)s .. versionadded:: 0.15 + %(method_resample)s + + .. versionadded:: 1.7 %(verbose)s Returns @@ -1143,6 +1383,14 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, ``self.load_data()``, but this increases memory requirements. The resulting raw object will have the data loaded into memory. """ + sfreq = float(sfreq) + o_sfreq = float(self.info["sfreq"]) + if _check_resamp_noop(sfreq, o_sfreq): + if events is not None: + return self, events.copy() + else: + return self + # When no event object is supplied, some basic detection of dropped # events is performed to generate a warning. Finding events can fail # for a variety of reasons, e.g. if no stim channel is present or it is @@ -1154,32 +1402,41 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, except Exception: pass - sfreq = float(sfreq) - o_sfreq = float(self.info['sfreq']) - offsets = np.concatenate(([0], np.cumsum(self._raw_lengths))) # set up stim channel processing if stim_picks is None: - stim_picks = pick_types(self.info, meg=False, ref_meg=False, - stim=True, exclude=[]) + stim_picks = pick_types( + self.info, meg=False, ref_meg=False, stim=True, exclude=[] + ) else: - stim_picks = _picks_to_idx(self.info, stim_picks, exclude=(), - with_ref_meg=False) + stim_picks = _picks_to_idx( + self.info, stim_picks, exclude=(), with_ref_meg=False + ) - kwargs = dict(up=sfreq, down=o_sfreq, npad=npad, window=window, - n_jobs=n_jobs, pad=pad) - ratio, n_news = zip(*(_resamp_ratio_len(sfreq, o_sfreq, old_len) - for old_len in self._raw_lengths)) + kwargs = dict( + up=sfreq, + down=o_sfreq, + npad=npad, + window=window, + n_jobs=n_jobs, + pad=pad, + method=method, + ) + ratio, n_news = zip( + *( + _resamp_ratio_len(sfreq, o_sfreq, old_len) + for old_len in self._raw_lengths + ) + ) ratio, n_news = ratio[0], np.array(n_news, int) new_offsets = np.cumsum([0] + list(n_news)) if self.preload: - new_data = np.empty( - (len(self.ch_names), new_offsets[-1]), self._data.dtype) + new_data = np.empty((len(self.ch_names), new_offsets[-1]), self._data.dtype) for ri, (n_orig, n_new) in enumerate(zip(self._raw_lengths, n_news)): this_sl = slice(new_offsets[ri], new_offsets[ri + 1]) if self.preload: - data_chunk = self._data[:, offsets[ri]:offsets[ri + 1]] + data_chunk = self._data[:, offsets[ri] : offsets[ri + 1]] new_data[:, this_sl] = resample(data_chunk, **kwargs) # In empirical testing, it was faster to resample all channels # (above) and then replace the stim channels than it was to @@ -1187,34 +1444,37 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, # np.insert() to restore the stims. if len(stim_picks) > 0: new_data[stim_picks, this_sl] = _resample_stim_channels( - data_chunk[stim_picks], n_new, data_chunk.shape[1]) + data_chunk[stim_picks], n_new, data_chunk.shape[1] + ) else: # this will not be I/O efficient, but will be mem efficient for ci in range(len(self.ch_names)): data_chunk = self.get_data( - ci, offsets[ri], offsets[ri + 1], verbose='error')[0] + ci, offsets[ri], offsets[ri + 1], verbose="error" + )[0] if ci == 0 and ri == 0: new_data = np.empty( - (len(self.ch_names), new_offsets[-1]), - data_chunk.dtype) + (len(self.ch_names), new_offsets[-1]), data_chunk.dtype + ) if ci in stim_picks: resamp = _resample_stim_channels( - data_chunk, n_new, data_chunk.shape[-1])[0] + data_chunk, n_new, data_chunk.shape[-1] + )[0] else: resamp = resample(data_chunk, **kwargs) new_data[ci, this_sl] = resamp self._cropped_samp = int(np.round(self._cropped_samp * ratio)) self._first_samps = np.round(self._first_samps * ratio).astype(int) - self._last_samps = (np.array(self._first_samps) + n_news - 1) + self._last_samps = np.array(self._first_samps) + n_news - 1 self._raw_lengths[ri] = list(n_news) assert np.array_equal(n_news, self._last_samps - self._first_samps + 1) self._data = new_data self.preload = True - lowpass = self.info.get('lowpass') + lowpass = self.info.get("lowpass") lowpass = np.inf if lowpass is None else lowpass with self.info._unlock(): - self.info['lowpass'] = min(lowpass, sfreq / 2.) - self.info['sfreq'] = sfreq + self.info["lowpass"] = min(lowpass, sfreq / 2.0) + self.info["sfreq"] = sfreq # See the comment above why we ignore all errors here. if events is None: @@ -1222,10 +1482,12 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, # Did we loose events? resampled_events = find_events(self) if len(resampled_events) != len(original_events): - warn('Resampling of the stim channels caused event ' - 'information to become unreliable. Consider finding ' - 'events on the original data and passing the event ' - 'matrix as a parameter.') + warn( + "Resampling of the stim channels caused event " + "information to become unreliable. Consider finding " + "events on the original data and passing the event " + "matrix as a parameter." + ) except Exception: pass @@ -1236,10 +1498,75 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, events[:, 0] = np.minimum( np.round(events[:, 0] * ratio).astype(int), - self._data.shape[1] + self.first_samp - 1 + self._data.shape[1] + self.first_samp - 1, ) return self, events + @verbose + def rescale(self, scalings, *, verbose=None): + """Rescale channels. + + .. warning:: + MNE-Python assumes data are stored in SI base units. This function should + typically only be used to fix an incorrect scaling factor in the data to get + it to be in SI base units, otherwise unintended problems (e.g., incorrect + source imaging results) and analysis errors can occur. + + Parameters + ---------- + scalings : int | float | dict + The scaling factor(s) by which to multiply the data. If a float, the same + scaling factor is applied to all channels (this works only if all channels + are of the same type). If a dict, the keys must be valid channel types and + the values the scaling factors to apply to the corresponding channels. + %(verbose)s + + Returns + ------- + raw : Raw + The raw object with rescaled data (modified in-place). + + Examples + -------- + A common use case for EEG data is to convert from µV to V, since many EEG + systems store data in µV, but MNE-Python expects the data to be in V. Therefore, + the data needs to be rescaled by a factor of 1e-6. To rescale all channels from + µV to V, you can do:: + + >>> raw.rescale(1e-6) # doctest: +SKIP + + Note that the previous example only works if all channels are of the same type. + If there are multiple channel types, you can pass a dict with the individual + scaling factors. For example, to rescale only EEG channels, you can do:: + + >>> raw.rescale({"eeg": 1e-6}) # doctest: +SKIP + """ + _validate_type(scalings, (int, float, dict), "scalings") + _check_preload(self, "raw.rescale") + + channel_types = self.get_channel_types(unique=True) + + if isinstance(scalings, int | float): + if len(channel_types) == 1: + self.apply_function(lambda x: x * scalings, channel_wise=False) + else: + raise ValueError( + "If scalings is a scalar, all channels must be of the same type. " + "Consider passing a dict instead." + ) + else: + for ch_type in scalings.keys(): + if ch_type not in channel_types: + raise ValueError( + f'Channel type "{ch_type}" is not present in the Raw file.' + ) + for ch_type, ch_scale in scalings.items(): + self.apply_function( + lambda x: x * ch_scale, picks=ch_type, channel_wise=False + ) + + return self + @verbose def crop(self, tmin=0.0, tmax=None, include_tmax=True, *, verbose=None): """Crop raw data file. @@ -1265,27 +1592,34 @@ def crop(self, tmin=0.0, tmax=None, include_tmax=True, *, verbose=None): raw : instance of Raw The cropped raw object, modified in-place. """ - max_time = (self.n_times - 1) / self.info['sfreq'] + max_time = (self.n_times - 1) / self.info["sfreq"] if tmax is None: tmax = max_time if tmin > tmax: - raise ValueError('tmin (%s) must be less than tmax (%s)' - % (tmin, tmax)) + raise ValueError(f"tmin ({tmin}) must be less than tmax ({tmax})") if tmin < 0.0: - raise ValueError('tmin (%s) must be >= 0' % (tmin,)) - elif tmax - int(not include_tmax) / self.info['sfreq'] > max_time: - raise ValueError('tmax (%s) must be less than or equal to the max ' - 'time (%0.4f s)' % (tmax, max_time)) - - smin, smax = np.where(_time_mask( - self.times, tmin, tmax, sfreq=self.info['sfreq'], - include_tmax=include_tmax))[0][[0, -1]] - cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, - dtype='int'))) + raise ValueError(f"tmin ({tmin}) must be >= 0") + elif tmax - int(not include_tmax) / self.info["sfreq"] > max_time: + raise ValueError( + f"tmax ({tmax}) must be less than or equal to the max " + f"time ({max_time:0.4f} s)" + ) + + smin, smax = np.where( + _time_mask( + self.times, + tmin, + tmax, + sfreq=self.info["sfreq"], + include_tmax=include_tmax, + ) + )[0][[0, -1]] + cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, dtype="int"))) cumul_lens = np.cumsum(cumul_lens) - keepers = np.logical_and(np.less(smin, cumul_lens[1:]), - np.greater_equal(smax, cumul_lens[:-1])) + keepers = np.logical_and( + np.less(smin, cumul_lens[1:]), np.greater_equal(smax, cumul_lens[:-1]) + ) keepers = np.where(keepers)[0] # if we drop file(s) from the beginning, we need to keep track of # how many samples we dropped relative to that one @@ -1296,18 +1630,17 @@ def crop(self, tmin=0.0, tmax=None, include_tmax=True, *, verbose=None): self._last_samps = np.atleast_1d(self._last_samps[keepers]) self._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax self._read_picks = [self._read_picks[ri] for ri in keepers] - assert all(len(r) == len(self._read_picks[0]) - for r in self._read_picks) + assert all(len(r) == len(self._read_picks[0]) for r in self._read_picks) self._raw_extras = [self._raw_extras[ri] for ri in keepers] - self._filenames = [self._filenames[ri] for ri in keepers] + self.filenames = [self.filenames[ri] for ri in keepers] if self.preload: # slice and copy to avoid the reference to large array - self._data = self._data[:, smin:smax + 1].copy() + self._data = self._data[:, smin : smax + 1].copy() annotations = self.annotations # now call setter to filter out annotations outside of interval if annotations.orig_time is None: - assert self.info['meas_date'] is None + assert self.info["meas_date"] is None # When self.info['meas_date'] is None (which is guaranteed if # self.annotations.orig_time is None), when we do the # self.set_annotations, it's assumed that the annotations onset @@ -1342,7 +1675,7 @@ def crop_by_annotations(self, annotations=None, *, verbose=None): onset = annot["onset"] - self.first_time # be careful about near-zero errors (crop is very picky about this, # e.g., -1e-8 is an error) - if -self.info['sfreq'] / 2 < onset < 0: + if -self.info["sfreq"] / 2 < onset < 0: onset = 0 raw_crop = self.copy().crop(onset, onset + annot["duration"]) raws.append(raw_crop) @@ -1350,10 +1683,21 @@ def crop_by_annotations(self, annotations=None, *, verbose=None): return raws @verbose - def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, - drop_small_buffer=False, proj=False, fmt='single', - overwrite=False, split_size='2GB', split_naming='neuromag', - verbose=None): + def save( + self, + fname, + picks=None, + tmin=0, + tmax=None, + buffer_size_sec=None, + drop_small_buffer=False, + proj=False, + fmt="single", + overwrite=False, + split_size="2GB", + split_naming="neuromag", + verbose=None, + ): """Save raw data to file. Parameters @@ -1407,6 +1751,13 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, .. versionadded:: 0.17 %(verbose)s + Returns + ------- + fnames : List of path-like + List of path-like objects containing the path to each file split. + + .. versionadded:: 1.9 + Notes ----- If Raw is a concatenation of several raw files, **be warned** that @@ -1419,48 +1770,55 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, Samples annotated ``BAD_ACQ_SKIP`` are not stored in order to optimize memory. Whatever values, they will be loaded as 0s when reading file. """ - endings = ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', - '_meg.fif', '_eeg.fif', '_ieeg.fif') - endings += tuple([f'{e}.gz' for e in endings]) - endings_err = ('.fif', '.fif.gz') + endings = ( + "raw.fif", + "raw_sss.fif", + "raw_tsss.fif", + "_meg.fif", + "_eeg.fif", + "_ieeg.fif", + ) + endings += tuple([f"{e}.gz" for e in endings]) + endings_err = (".fif", ".fif.gz") # convert to str, check for overwrite a few lines later - fname = str(_check_fname(fname, overwrite=True, verbose="error")) - check_fname(fname, 'raw', endings, endings_err=endings_err) + fname = _check_fname( + fname, + overwrite=True, + verbose="error", + check_bids_split=True, + name="fname", + ) + check_fname(fname, "raw", endings, endings_err=endings_err) split_size = _get_split_size(split_size) - if not self.preload and fname in self._filenames: - raise ValueError('You cannot save data to the same file.' - ' Please use a different filename.') + if not self.preload and fname in self.filenames: + extra = " and overwrite must be True" if not overwrite else "" + raise ValueError( + "In order to save data to the same file, data need to be preloaded" + + extra + ) if self.preload: if np.iscomplexobj(self._data): - warn('Saving raw file with complex data. Loading with ' - 'command-line MNE tools will not work.') - - type_dict = dict(short=FIFF.FIFFT_DAU_PACK16, - int=FIFF.FIFFT_INT, - single=FIFF.FIFFT_FLOAT, - double=FIFF.FIFFT_DOUBLE) - _check_option('fmt', fmt, type_dict.keys()) - reset_dict = dict(short=False, int=False, single=True, double=True) - reset_range = reset_dict[fmt] - data_type = type_dict[fmt] + warn( + "Saving raw file with complex data. Loading with command-line MNE " + "tools will not work." + ) data_test = self[0, 0][0] - if fmt == 'short' and np.iscomplexobj(data_test): - raise ValueError('Complex data must be saved as "single" or ' - '"double", not "short"') + if fmt == "short" and np.iscomplexobj(data_test): + raise ValueError( + 'Complex data must be saved as "single" or "double", not "short"' + ) # check for file existence and expand `~` if present - fname = str( - _check_fname(fname=fname, overwrite=overwrite, verbose="error") - ) + fname = _check_fname(fname=fname, overwrite=overwrite, verbose="error") if proj: info = deepcopy(self.info) projector, info = setup_proj(info) - activate_proj(info['projs'], copy=False) + activate_proj(info["projs"], copy=False) else: info = self.info projector = None @@ -1474,15 +1832,25 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, buffer_size = self._get_buffer_size(buffer_size_sec) # write the raw file - _validate_type(split_naming, str, 'split_naming') - _check_option('split_naming', split_naming, ('neuromag', 'bids')) - _write_raw(fname, self, info, picks, fmt, data_type, reset_range, - start, stop, buffer_size, projector, drop_small_buffer, - split_size, split_naming, 0, None, overwrite) + _validate_type(split_naming, str, "split_naming") + _check_option("split_naming", split_naming, ("neuromag", "bids")) + + cfg = _RawFidWriterCfg(buffer_size, split_size, drop_small_buffer, fmt) + raw_fid_writer = _RawFidWriter(self, info, picks, projector, start, stop, cfg) + filenames = _write_raw(raw_fid_writer, fname, split_naming, overwrite) + return filenames @verbose - def export(self, fname, fmt='auto', physical_range='auto', - add_ch_type=False, *, overwrite=False, verbose=None): + def export( + self, + fname, + fmt="auto", + physical_range="auto", + add_ch_type=False, + *, + overwrite=False, + verbose=None, + ): """Export Raw to external formats. %(export_fmt_support_raw)s @@ -1509,12 +1877,19 @@ def export(self, fname, fmt='auto', physical_range='auto', %(export_edf_note)s """ from ..export import export_raw - export_raw(fname, self, fmt, physical_range=physical_range, - add_ch_type=add_ch_type, overwrite=overwrite, - verbose=verbose) + + export_raw( + fname, + self, + fmt, + physical_range=physical_range, + add_ch_type=add_ch_type, + overwrite=overwrite, + verbose=verbose, + ) def _tmin_tmax_to_start_stop(self, tmin, tmax): - start = int(np.floor(tmin * self.info['sfreq'])) + start = int(np.floor(tmin * self.info["sfreq"])) # "stop" is the first sample *not* to save, so we need +1's here if tmax is None: @@ -1523,42 +1898,102 @@ def _tmin_tmax_to_start_stop(self, tmin, tmax): stop = self.time_as_index(float(tmax), use_rounding=True)[0] + 1 stop = min(stop, self.last_samp - self.first_samp + 1) if stop <= start or stop <= 0: - raise ValueError('tmin (%s) and tmax (%s) yielded no samples' - % (tmin, tmax)) + raise ValueError(f"tmin ({tmin}) and tmax ({tmax}) yielded no samples") return start, stop @copy_function_doc_to_method_doc(plot_raw) - def plot(self, events=None, duration=10.0, start=0.0, n_channels=20, - bgcolor='w', color=None, bad_color='lightgray', - event_color='cyan', scalings=None, remove_dc=True, order=None, - show_options=False, title=None, show=True, block=False, - highpass=None, lowpass=None, filtorder=4, clipping=_RAW_CLIP_DEF, - show_first_samp=False, proj=True, group_by='type', - butterfly=False, decim='auto', noise_cov=None, event_id=None, - show_scrollbars=True, show_scalebars=True, time_format='float', - precompute=None, use_opengl=None, *, theme=None, - overview_mode=None, verbose=None): - return plot_raw(self, events, duration, start, n_channels, bgcolor, - color, bad_color, event_color, scalings, remove_dc, - order, show_options, title, show, block, highpass, - lowpass, filtorder, clipping, show_first_samp, - proj, group_by, butterfly, decim, noise_cov=noise_cov, - event_id=event_id, show_scrollbars=show_scrollbars, - show_scalebars=show_scalebars, time_format=time_format, - precompute=precompute, use_opengl=use_opengl, - theme=theme, overview_mode=overview_mode, - verbose=verbose) + def plot( + self, + events=None, + duration=10.0, + start=0.0, + n_channels=20, + bgcolor="w", + color=None, + bad_color="lightgray", + event_color="cyan", + *, + annotation_regex=".*", + scalings=None, + remove_dc=True, + order=None, + show_options=False, + title=None, + show=True, + block=False, + highpass=None, + lowpass=None, + filtorder=4, + clipping=_RAW_CLIP_DEF, + show_first_samp=False, + proj=True, + group_by="type", + butterfly=False, + decim="auto", + noise_cov=None, + event_id=None, + show_scrollbars=True, + show_scalebars=True, + time_format="float", + precompute=None, + use_opengl=None, + picks=None, + theme=None, + overview_mode=None, + splash=True, + verbose=None, + ): + return plot_raw( + self, + events, + duration, + start, + n_channels, + bgcolor, + color, + bad_color, + event_color, + annotation_regex=annotation_regex, + scalings=scalings, + remove_dc=remove_dc, + order=order, + show_options=show_options, + title=title, + show=show, + block=block, + highpass=highpass, + lowpass=lowpass, + filtorder=filtorder, + clipping=clipping, + show_first_samp=show_first_samp, + proj=proj, + group_by=group_by, + butterfly=butterfly, + decim=decim, + noise_cov=noise_cov, + event_id=event_id, + show_scrollbars=show_scrollbars, + show_scalebars=show_scalebars, + time_format=time_format, + precompute=precompute, + use_opengl=use_opengl, + picks=picks, + theme=theme, + overview_mode=overview_mode, + splash=splash, + verbose=verbose, + ) @property def ch_names(self): """Channel names.""" - return self.info['ch_names'] + return self.info["ch_names"] @property def times(self): """Time points.""" - out = _arange_div(self.n_times, float(self.info['sfreq'])) - out.flags['WRITEABLE'] = False + out = _arange_div(self.n_times, float(self.info["sfreq"])) + out.flags["WRITEABLE"] = False return out @property @@ -1566,6 +2001,14 @@ def n_times(self): """Number of time points.""" return self.last_samp - self.first_samp + 1 + @property + def duration(self): + """Duration of the data in seconds. + + .. versionadded:: 1.9 + """ + return self.n_times / self.info["sfreq"] + def __len__(self): """Return the number of time points. @@ -1603,29 +2046,31 @@ def load_bad_channels(self, bad_file=None, force=False, verbose=None): raising an error. Defaults to ``False``. %(verbose)s """ - prev_bads = self.info['bads'] + prev_bads = self.info["bads"] new_bads = [] if bad_file is not None: # Check to make sure bad channels are there - names = frozenset(self.info['ch_names']) + names = frozenset(self.info["ch_names"]) with open(bad_file) as fid: bad_names = [line for line in fid.read().splitlines() if line] new_bads = [ci for ci in bad_names if ci in names] count_diff = len(bad_names) - len(new_bads) if count_diff > 0: - msg = (f'{count_diff} bad channel(s) from:' - f'\n{bad_file}\nnot found in:\n{self.filenames[0]}') + msg = ( + f"{count_diff} bad channel(s) from:" + f"\n{bad_file}\nnot found in:\n{self.filenames[0]}" + ) if not force: raise ValueError(msg) else: warn(msg) if prev_bads != new_bads: - logger.info(f'Updating bad channels: {prev_bads} -> {new_bads}') - self.info['bads'] = new_bads + logger.info(f"Updating bad channels: {prev_bads} -> {new_bads}") + self.info["bads"] = new_bads else: - logger.info(f'No channels updated. Bads are: {prev_bads}') + logger.info(f"No channels updated. Bads are: {prev_bads}") @fill_doc def append(self, raws, preload=None): @@ -1665,69 +2110,77 @@ def append(self, raws, preload=None): self.preload = False else: # do the concatenation ourselves since preload might be a string - nchan = self.info['nchan'] + nchan = self.info["nchan"] c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)]) nsamp = c_ns[-1] if not self.preload: - this_data = self._read_segment(projector=self._projector) + this_data = self._read_segment() else: this_data = self._data # allocate the buffer _data = _allocate_data(preload, (nchan, nsamp), this_data.dtype) - _data[:, 0:c_ns[0]] = this_data + _data[:, 0 : c_ns[0]] = this_data for ri in range(len(raws)): if not raws[ri].preload: # read the data directly into the buffer - data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]] - raws[ri]._read_segment(data_buffer=data_buffer, - projector=self._projector) + data_buffer = _data[:, c_ns[ri] : c_ns[ri + 1]] + raws[ri]._read_segment(data_buffer=data_buffer) else: - _data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data + _data[:, c_ns[ri] : c_ns[ri + 1]] = raws[ri]._data self._data = _data self.preload = True # now combine information from each raw file to construct new self annotations = self.annotations - assert annotations.orig_time == self.info['meas_date'] + assert annotations.orig_time == self.info["meas_date"] edge_samps = list() for ri, r in enumerate(raws): - n_samples = self.last_samp - self.first_samp + 1 + edge_samps.append(self.last_samp - self.first_samp + 1) annotations = _combine_annotations( - annotations, r.annotations, n_samples, - self.first_samp, r.first_samp, - self.info['sfreq']) - edge_samps.append(sum(self._last_samps) - - sum(self._first_samps) + (ri + 1)) + annotations, + r.annotations, + edge_samps[-1], + self.first_samp, + r.first_samp, + self.info["sfreq"], + ) self._first_samps = np.r_[self._first_samps, r._first_samps] self._last_samps = np.r_[self._last_samps, r._last_samps] self._read_picks += r._read_picks self._raw_extras += r._raw_extras - self._filenames += r._filenames - assert annotations.orig_time == self.info['meas_date'] + self._filenames += r._filenames # use the private attribute to use the list + assert annotations.orig_time == self.info["meas_date"] # The above _combine_annotations gets everything synchronized to # first_samp. set_annotations (with no absolute time reference) assumes # that the annotations being set are relative to first_samp, and will # add it back on. So here we have to remove it: if annotations.orig_time is None: - annotations.onset -= self.first_samp / self.info['sfreq'] + annotations.onset -= self.first_samp / self.info["sfreq"] self.set_annotations(annotations) for edge_samp in edge_samps: - onset = _sync_onset(self, (edge_samp) / self.info['sfreq'], True) - self.annotations.append(onset, 0., 'BAD boundary') - self.annotations.append(onset, 0., 'EDGE boundary') - if not (len(self._first_samps) == len(self._last_samps) == - len(self._raw_extras) == len(self._filenames) == - len(self._read_picks)): - raise RuntimeError('Append error') # should never happen + onset = _sync_onset(self, edge_samp / self.info["sfreq"], True) + logger.debug( + f"Marking edge at {edge_samp} samples (maps to {onset:0.3f} sec)" + ) + self.annotations.append(onset, 0.0, "BAD boundary") + self.annotations.append(onset, 0.0, "EDGE boundary") + if not ( + len(self._first_samps) + == len(self._last_samps) + == len(self._raw_extras) + == len(self.filenames) + == len(self._read_picks) + ): + raise RuntimeError("Append error") # should never happen def close(self): """Clean up the object. Does nothing for objects that close their file descriptors. - Things like RawFIF will override this method. + Things like Raw will override this method. """ pass # noqa @@ -1743,35 +2196,35 @@ def copy(self): def __repr__(self): # noqa: D105 name = self.filenames[0] - name = '' if name is None else op.basename(name) + ', ' + name = "" if name is None else Path(name).name + ", " size_str = str(sizeof_fmt(self._size)) # str in case it fails -> None - size_str += ', data%s loaded' % ('' if self.preload else ' not') - s = ('%s%s x %s (%0.1f s), ~%s' - % (name, len(self.ch_names), self.n_times, self.times[-1], - size_str)) - return "<%s | %s>" % (self.__class__.__name__, s) + size_str += f", data{'' if self.preload else ' not'} loaded" + s = ( + f"{name}{len(self.ch_names)} x {self.n_times} " + f"({self.duration:0.1f} s), ~{size_str}" + ) + return f"<{self.__class__.__name__} | {s}>" @repr_html - def _repr_html_(self, caption=None): - from ..html_templates import repr_templates_env - basenames = [ - os.path.basename(f) for f in self._filenames if f is not None - ] + def _repr_html_(self): + basenames = [f.name for f in self.filenames if f is not None] - # https://stackoverflow.com/a/10981895 - duration = timedelta(seconds=self.times[-1]) - hours, remainder = divmod(duration.seconds, 3600) - minutes, seconds = divmod(remainder, 60) - seconds += duration.microseconds / 1e6 - seconds = np.ceil(seconds) # always take full seconds + duration = self._get_duration_string() - duration = f'{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d}' - raw_template = repr_templates_env.get_template('raw.html.jinja') + raw_template = _get_html_template("repr", "raw.html.jinja") return raw_template.render( - info_repr=self.info._repr_html_(caption=caption), - filenames=basenames, duration=duration + inst=self, + filenames=basenames, + duration=duration, ) + def _get_duration_string(self): + # https://stackoverflow.com/a/10981895 + duration = np.ceil(self.duration) # always take full seconds + hours, remainder = divmod(duration, 3600) + minutes, seconds = divmod(remainder, 60) + return f"{hours:02.0f}:{minutes:02.0f}:{seconds:02.0f}" + def add_events(self, events, stim_channel=None, replace=False): """Add events to stim channel. @@ -1795,23 +2248,25 @@ def add_events(self, events, stim_channel=None, replace=False): ----- Data must be preloaded in order to add events. """ - _check_preload(self, 'Adding events') + _check_preload(self, "Adding events") events = np.asarray(events) if events.ndim != 2 or events.shape[1] != 3: - raise ValueError('events must be shape (n_events, 3)') + raise ValueError("events must be shape (n_events, 3)") stim_channel = _get_stim_channel(stim_channel, self.info) - pick = pick_channels(self.ch_names, stim_channel) + pick = pick_channels(self.ch_names, stim_channel, ordered=False) if len(pick) == 0: - raise ValueError('Channel %s not found' % stim_channel) + raise ValueError(f"Channel {stim_channel} not found") pick = pick[0] idx = events[:, 0].astype(int) if np.any(idx < self.first_samp) or np.any(idx > self.last_samp): - raise ValueError('event sample numbers must be between %s and %s' - % (self.first_samp, self.last_samp)) + raise ValueError( + f"event sample numbers must be between {self.first_samp} " + f"and {self.last_samp}" + ) if not all(idx == events[:, 0]): - raise ValueError('event sample numbers must be integers') + raise ValueError("event sample numbers must be integers") if replace: - self._data[pick, :] = 0. + self._data[pick, :] = 0.0 self._data[pick, idx - self.first_samp] += events[:, 2] def _get_buffer_size(self, buffer_size_sec=None): @@ -1819,23 +2274,40 @@ def _get_buffer_size(self, buffer_size_sec=None): if buffer_size_sec is None: buffer_size_sec = self.buffer_size_sec buffer_size_sec = float(buffer_size_sec) - return int(np.ceil(buffer_size_sec * self.info['sfreq'])) + return int(np.ceil(buffer_size_sec * self.info["sfreq"])) @verbose - def compute_psd(self, method='welch', fmin=0, fmax=np.inf, tmin=None, - tmax=None, picks=None, proj=False, - reject_by_annotation=True, *, n_jobs=1, verbose=None, - **method_kw): + def compute_psd( + self, + method="welch", + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + exclude=(), + proj=False, + remove_dc=True, + reject_by_annotation=True, + *, + n_jobs=1, + verbose=None, + **method_kw, + ): """Perform spectral analysis on sensor data. Parameters ---------- %(method_psd)s - Default is ``'welch'``. + Note that ``"multitaper"`` cannot be used if ``reject_by_annotation=True`` + and there are ``"bad_*"`` annotations in the :class:`~mne.io.Raw` data; + in such cases use ``"welch"``. Default is ``'welch'``. %(fmin_fmax_psd)s %(tmin_tmax_psd)s %(picks_good_data_noref)s + %(exclude_psd)s %(proj_psd)s + %(remove_dc)s %(reject_by_annotation_psd)s %(n_jobs)s %(verbose)s @@ -1858,15 +2330,99 @@ def compute_psd(self, method='welch', fmin=0, fmax=np.inf, tmin=None, self._set_legacy_nfft_default(tmin, tmax, method, method_kw) return Spectrum( - self, method=method, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, - picks=picks, proj=proj, reject_by_annotation=reject_by_annotation, - n_jobs=n_jobs, verbose=verbose, **method_kw) + self, + method=method, + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + exclude=exclude, + proj=proj, + remove_dc=remove_dc, + reject_by_annotation=reject_by_annotation, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) @verbose - def to_data_frame(self, picks=None, index=None, - scalings=None, copy=True, start=None, stop=None, - long_format=False, time_format=None, *, - verbose=None): + def compute_tfr( + self, + method, + freqs, + *, + tmin=None, + tmax=None, + picks=None, + proj=False, + output="power", + reject_by_annotation=True, + decim=1, + n_jobs=None, + verbose=None, + **method_kw, + ): + """Compute a time-frequency representation of sensor data. + + Parameters + ---------- + %(method_tfr)s + %(freqs_tfr)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(output_compute_tfr)s + %(reject_by_annotation_tfr)s + %(decim_tfr)s + %(n_jobs)s + %(verbose)s + %(method_kw_tfr)s + + Returns + ------- + tfr : instance of RawTFR + The time-frequency-resolved power estimates of the data. + + Notes + ----- + .. versionadded:: 1.7 + + References + ---------- + .. footbibliography:: + """ + _check_option("output", output, ("power", "phase", "complex")) + method_kw["output"] = output + return RawTFR( + self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + reject_by_annotation=reject_by_annotation, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def to_data_frame( + self, + picks=None, + index=None, + scalings=None, + copy=True, + start=None, + stop=None, + long_format=False, + time_format=None, + *, + verbose=None, + ): """Export data in tabular structure as a pandas DataFrame. Channels are converted to columns in the DataFrame. By default, an @@ -1900,13 +2456,14 @@ def to_data_frame(self, picks=None, index=None, # check pandas once here, instead of in each private utils function pd = _check_pandas_installed() # noqa # arg checking - valid_index_args = ['time'] - valid_time_formats = ['ms', 'timedelta', 'datetime'] + valid_index_args = ["time"] + valid_time_formats = ["ms", "timedelta", "datetime"] index = _check_pandas_index_arguments(index, valid_index_args) - time_format = _check_time_format(time_format, valid_time_formats, - self.info['meas_date']) + time_format = _check_time_format( + time_format, valid_time_formats, self.info["meas_date"] + ) # get data - picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + picks = _picks_to_idx(self.info, picks, "all", exclude=()) data, times = self[picks, start:stop] data = data.T if copy: @@ -1914,11 +2471,17 @@ def to_data_frame(self, picks=None, index=None, data = _scale_dataframe_data(self, data, picks, scalings) # prepare extra columns / multiindex mindex = list() - times = _convert_times(self, times, time_format) - mindex.append(('time', times)) + times = _convert_times( + times, + time_format, + meas_date=self.info["meas_date"], + first_time=self.first_time, + ) + mindex.append(("time", times)) # build DataFrame - df = _build_data_frame(self, data, picks, long_format, mindex, index, - default_index=['time']) + df = _build_data_frame( + self, data, picks, long_format, mindex, index, default_index=["time"] + ) return df def describe(self, data_frame=False): @@ -1940,7 +2503,6 @@ def describe(self, data_frame=False): If data_frame=False, returns None. If data_frame=True, returns results in a pandas.DataFrame (requires pandas). """ - from scipy.stats import scoreatpercentile as q nchan = self.info["nchan"] # describe each channel @@ -1952,13 +2514,14 @@ def describe(self, data_frame=False): cols["type"].append(channel_type(self.info, i)) cols["unit"].append(_unit2human[ch["unit"]]) cols["min"].append(np.min(data)) - cols["Q1"].append(q(data, 25)) + cols["Q1"].append(np.percentile(data, 25)) cols["median"].append(np.median(data)) - cols["Q3"].append(q(data, 75)) + cols["Q3"].append(np.percentile(data, 75)) cols["max"].append(np.max(data)) if data_frame: # return data frame import pandas as pd + df = pd.DataFrame(cols) df.index.name = "ch" return df @@ -1967,35 +2530,41 @@ def describe(self, data_frame=False): scalings = _handle_default("scalings") units = _handle_default("units") for i in range(nchan): - unit = units.get(cols['type'][i]) - scaling = scalings.get(cols['type'][i], 1) + unit = units.get(cols["type"][i]) + scaling = scalings.get(cols["type"][i], 1) if scaling != 1: - cols['unit'][i] = unit + cols["unit"][i] = unit for col in ["min", "Q1", "median", "Q3", "max"]: cols[col][i] *= scaling - lens = {"ch": max(2, len(str(nchan))), - "name": max(4, max([len(n) for n in cols["name"]])), - "type": max(4, max([len(t) for t in cols["type"]])), - "unit": max(4, max([len(u) for u in cols["unit"]]))} + lens = { + "ch": max(2, len(str(nchan))), + "name": max(4, max([len(n) for n in cols["name"]])), + "type": max(4, max([len(t) for t in cols["type"]])), + "unit": max(4, max([len(u) for u in cols["unit"]])), + } # print description, start with header print(self) - print(f"{'ch':>{lens['ch']}} " - f"{'name':<{lens['name']}} " - f"{'type':<{lens['type']}} " - f"{'unit':<{lens['unit']}} " - f"{'min':>9} " - f"{'Q1':>9} " - f"{'median':>9} " - f"{'Q3':>9} " - f"{'max':>9}") + print( + f"{'ch':>{lens['ch']}} " + f"{'name':<{lens['name']}} " + f"{'type':<{lens['type']}} " + f"{'unit':<{lens['unit']}} " + f"{'min':>9} " + f"{'Q1':>9} " + f"{'median':>9} " + f"{'Q3':>9} " + f"{'max':>9}" + ) # print description for each channel for i in range(nchan): - msg = (f"{i:>{lens['ch']}} " - f"{cols['name'][i]:<{lens['name']}} " - f"{cols['type'][i].upper():<{lens['type']}} " - f"{cols['unit'][i]:<{lens['unit']}} ") + msg = ( + f"{i:>{lens['ch']}} " + f"{cols['name'][i]:<{lens['name']}} " + f"{cols['type'][i].upper():<{lens['type']}} " + f"{cols['unit'][i]:<{lens['unit']}} " + ) for col in ["min", "Q1", "median", "Q3"]: msg += f"{cols[col][i]:>9.2f} " msg += f"{cols['max'][i]:>9.2f}" @@ -2007,31 +2576,11 @@ def _allocate_data(preload, shape, dtype): if preload in (None, True): # None comes from _read_segment data = np.zeros(shape, dtype) else: - _validate_type(preload, 'path-like', 'preload') - data = np.memmap(str(preload), mode='w+', dtype=dtype, shape=shape) + _validate_type(preload, "path-like", "preload") + data = np.memmap(str(preload), mode="w+", dtype=dtype, shape=shape) return data -def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False): - """Convert indices to time. - - Parameters - ---------- - index : list-like | int - List of ints or int representing points in time. - use_first_samp : boolean - If True, the time returned is relative to the session onset, else - relative to the recording onset. - - Returns - ------- - times : ndarray - Times corresponding to the index supplied. - """ - times = np.atleast_1d(index) + (first_samp if use_first_samp else 0) - return times / sfreq - - def _convert_slice(sel): if len(sel) and (np.diff(sel) == 1).all(): return slice(sel[0], sel[-1] + 1) @@ -2053,22 +2602,24 @@ def _get_ch_factors(inst, units, picks_idxs): Returns ------- ch_factors : ndarray of floats, shape(len(picks),) - The sacling factors for each channel, ordered according + The scaling factors for each channel, ordered according to picks. """ _validate_type(units, types=(None, str, dict), item_name="units") ch_factors = np.ones(len(picks_idxs)) - si_units = _handle_default('si_units') + si_units = _handle_default("si_units") ch_types = inst.get_channel_types(picks=picks_idxs) # Convert to dict if str units if isinstance(units, str): # Check that there is only one channel type unit_ch_type = list(set(ch_types) & set(si_units.keys())) if len(unit_ch_type) > 1: - raise ValueError('"units" cannot be str if there is more than ' - 'one channel type with a unit ' - f'{unit_ch_type}.') + raise ValueError( + '"units" cannot be str if there is more than ' + "one channel type with a unit " + f"{unit_ch_type}." + ) units = {unit_ch_type[0]: units} # make the str argument a dict # Loop over the dict to get channel factors if isinstance(units, dict): @@ -2076,8 +2627,7 @@ def _get_ch_factors(inst, units, picks_idxs): # Get the scaling factors scaling = _get_scaling(ch_type, ch_unit) if scaling != 1: - indices = [i_ch for i_ch, ch in enumerate(ch_types) - if ch == ch_type] + indices = [i_ch for i_ch, ch in enumerate(ch_types) if ch == ch_type] ch_factors[indices] *= scaling return ch_factors @@ -2099,43 +2649,44 @@ def _get_scaling(ch_type, target_unit): The scaling factor to convert from the si_unit (used by default for MNE objects) to the target unit. """ - scaling = 1. - si_units = _handle_default('si_units') - si_units_splitted = {key: si_units[key].split('/') for key in si_units} - prefixes = _handle_default('prefixes') + scaling = 1.0 + si_units = _handle_default("si_units") + si_units_splitted = {key: si_units[key].split("/") for key in si_units} + prefixes = _handle_default("prefixes") prefix_list = list(prefixes.keys()) # Check that the provided unit exists for the ch_type - unit_list = target_unit.split('/') + unit_list = target_unit.split("/") if ch_type not in si_units.keys(): raise KeyError( - f'{ch_type} is not a channel type that can be scaled ' - 'from units.') + f"{ch_type} is not a channel type that can be scaled from units." + ) si_unit_list = si_units_splitted[ch_type] if len(unit_list) != len(si_unit_list): raise ValueError( - f'{target_unit} is not a valid unit for {ch_type}, use a ' - f'sub-multiple of {si_units[ch_type]} instead.') + f"{target_unit} is not a valid unit for {ch_type}, use a " + f"sub-multiple of {si_units[ch_type]} instead." + ) for i, unit in enumerate(unit_list): - valid = [prefix + si_unit_list[i] - for prefix in prefix_list] + valid = [prefix + si_unit_list[i] for prefix in prefix_list] if unit not in valid: raise ValueError( - f'{target_unit} is not a valid unit for {ch_type}, use a ' - f'sub-multiple of {si_units[ch_type]} instead.') + f"{target_unit} is not a valid unit for {ch_type}, use a " + f"sub-multiple of {si_units[ch_type]} instead." + ) # Get the scaling factors for i, unit in enumerate(unit_list): has_square = False # XXX power normally not used as csd cannot get_data() - if unit[-1] == '²': + if unit[-1] == "²": has_square = True - if unit == 'm' or unit == 'm²': - factor = 1. + if unit == "m" or unit == "m²": + factor = 1.0 elif unit[0] in prefixes.keys(): factor = prefixes[unit[0]] else: - factor = 1. + factor = 1.0 if factor != 1: if has_square: factor *= factor @@ -2146,30 +2697,34 @@ def _get_scaling(ch_type, target_unit): return scaling -class _ReadSegmentFileProtector(object): +class _ReadSegmentFileProtector: """Ensure only _filenames, _raw_extras, and _read_segment_file are used.""" def __init__(self, raw): self.__raw = raw - assert hasattr(raw, '_projector') + assert hasattr(raw, "_projector") self._filenames = raw._filenames self._raw_extras = raw._raw_extras def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): return self.__raw.__class__._read_segment_file( - self, data, idx, fi, start, stop, cals, mult) + self, data, idx, fi, start, stop, cals, mult + ) + + @property + def filenames(self) -> tuple[Path, ...]: + return tuple(self._filenames) -class _RawShell(object): +class _RawShell: """Create a temporary raw object.""" - def __init__(self): # noqa: D102 + def __init__(self): self.first_samp = None self.last_samp = None self._first_time = None self._last_time = None self._cals = None - self._rawdir = None self._projector = None @property @@ -2188,72 +2743,68 @@ def set_annotations(self, annotations): ############################################################################### # Writing -def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start, - stop, buffer_size, projector, drop_small_buffer, - split_size, split_naming, part_idx, prev_fname, overwrite): + +# Assume we never hit more than 100 splits, like for epochs +MAX_N_SPLITS = 100 + + +def _write_raw(raw_fid_writer, fpath, split_naming, overwrite): """Write raw file with splitting.""" - # we've done something wrong if we hit this - n_times_max = len(raw.times) - if start >= stop or stop > n_times_max: - raise RuntimeError('Cannot write raw file with no data: %s -> %s ' - '(max: %s) requested' % (start, stop, n_times_max)) - - # Expand `~` if present - fname = str(_check_fname(fname=fname, overwrite=overwrite)) - - base, ext = op.splitext(fname) - if part_idx > 0: - if split_naming == 'neuromag': - # insert index in filename - use_fname = '%s-%d%s' % (base, part_idx, ext) + dir_path = fpath.parent + _check_fname( + dir_path, + overwrite="read", + must_exist=True, + name="parent directory", + need_dir=True, + ) + # We have to create one extra filename here to make the for loop below happy, + # but it will raise an error if it actually gets used + split_fnames = _make_split_fnames( + fpath.name, n_splits=MAX_N_SPLITS + 1, split_naming=split_naming + ) + is_next_split, prev_fname = True, None + output_fnames = [] + for part_idx in range(0, MAX_N_SPLITS): + if not is_next_split: + break + bids_special_behavior = part_idx == 0 and split_naming == "bids" + if bids_special_behavior: + reserved_fname = dir_path / split_fnames[0] + logger.info(f"Reserving possible split file {reserved_fname.name}") + _check_fname(reserved_fname, overwrite) + reserved_ctx = _ReservedFilename(reserved_fname) + use_fpath = fpath else: - assert split_naming == 'bids' - use_fname = _construct_bids_filename(base, ext, part_idx + 1) - # check for file existence - _check_fname(use_fname, overwrite) - else: - use_fname = fname - # reserve our BIDS split fname in case we need to split - if split_naming == 'bids' and part_idx == 0: - # reserve our possible split name - reserved_fname = _construct_bids_filename(base, ext, part_idx + 1) - logger.info( - f'Reserving possible split file {op.basename(reserved_fname)}') - _check_fname(reserved_fname, overwrite) - ctx = _ReservedFilename(reserved_fname) + reserved_ctx = nullcontext() + use_fpath = dir_path / split_fnames[part_idx] + next_fname = split_fnames[part_idx + 1] + _check_fname(use_fpath, overwrite) + + logger.info(f"Writing {use_fpath}") + with start_and_end_file(use_fpath) as fid, reserved_ctx: + is_next_split = raw_fid_writer.write(fid, part_idx, prev_fname, next_fname) + logger.info(f"Closing {use_fpath}") + if bids_special_behavior and is_next_split: + logger.info(f"Renaming BIDS split file {fpath.name}") + prev_fname = dir_path / split_fnames[0] + shutil.move(use_fpath, prev_fname) + output_fnames.append(prev_fname) + else: + output_fnames.append(use_fpath) + prev_fname = use_fpath else: - reserved_fname = use_fname - ctx = nullcontext() - logger.info('Writing %s' % use_fname) - - picks = _picks_to_idx(info, picks, 'all', ()) - with start_and_end_file(use_fname) as fid: - cals = _start_writing_raw(fid, info, picks, data_type, - reset_range, raw.annotations) - with ctx: - final_fname = _write_raw_fid( - raw, info, picks, fid, cals, part_idx, start, stop, - buffer_size, prev_fname, split_size, use_fname, - projector, drop_small_buffer, fmt, fname, reserved_fname, - data_type, reset_range, split_naming, - overwrite=True # we've started writing already above - ) - if final_fname != use_fname: - assert split_naming == 'bids' - logger.info(f'Renaming BIDS split file {op.basename(final_fname)}') - ctx.remove = False - shutil.move(use_fname, final_fname) - if part_idx == 0: - logger.info('[done]') - return final_fname, part_idx + raise RuntimeError(f"Exceeded maximum number of splits ({MAX_N_SPLITS}).") + logger.info("[done]") + return output_fnames -class _ReservedFilename: - def __init__(self, fname): +class _ReservedFilename: + def __init__(self, fname: Path): self.fname = fname - assert op.isdir(op.dirname(fname)), fname - with open(fname, 'w'): + assert fname.parent.exists(), fname + with open(fname, "w"): pass self.remove = True @@ -2262,13 +2813,108 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): if self.remove: - os.remove(self.fname) - + self.fname.unlink() + + +@dataclass(frozen=True) +class _RawFidWriterCfg: + buffer_size: int + split_size: int + drop_small_buffer: bool + fmt: str + reset_range: bool = field(init=False) + data_type: int = field(init=False) + + def __post_init__(self): + type_dict = dict( + short=FIFF.FIFFT_DAU_PACK16, + int=FIFF.FIFFT_INT, + single=FIFF.FIFFT_FLOAT, + double=FIFF.FIFFT_DOUBLE, + ) + _check_option("fmt", self.fmt, type_dict.keys()) + reset_dict = dict(short=False, int=False, single=True, double=True) + object.__setattr__(self, "reset_range", reset_dict[self.fmt]) + object.__setattr__(self, "data_type", type_dict[self.fmt]) + + +class _RawFidWriter: + def __init__(self, raw, info, picks, projector, start, stop, cfg): + self.raw = raw + self.picks = _picks_to_idx(info, picks, "all", ()) + self.info = pick_info(info, sel=self.picks, copy=True) + for k in range(self.info["nchan"]): + # Scan numbers may have been messed up + self.info["chs"][k]["scanno"] = k + 1 # scanno starts at 1 in FIF format + if cfg.reset_range: + self.info["chs"][k]["range"] = 1.0 + self.projector = projector + # self.start is the only mutable attribute in this design! + self.start, self.stop = start, stop + self.cfg = cfg + + def write(self, fid, part_idx, prev_fname, next_fname): + self._check_start_stop_within_bounds() + start_block(fid, FIFF.FIFFB_MEAS) + _write_raw_metadata( + fid, + self.info, + self.cfg.data_type, + self.cfg.reset_range, + self.raw.annotations, + ) + self.start = _write_raw_data( + self.raw, + self.info, + self.picks, + fid, + part_idx, + self.start, + self.stop, + self.cfg.buffer_size, + prev_fname, + self.cfg.split_size, + next_fname, + self.projector, + self.cfg.drop_small_buffer, + self.cfg.fmt, + ) + end_block(fid, FIFF.FIFFB_MEAS) + is_next_split = self.start < self.stop + return is_next_split + + def _check_start_stop_within_bounds(self): + # we've done something wrong if we hit this + n_times_max = len(self.raw.times) + error_msg = ( + f"Can't write raw file with no data: {self.start} -> {self.stop} " + f"(max: {n_times_max}) requested" + ) + if self.start >= self.stop or self.stop > n_times_max: + raise RuntimeError(error_msg) + + +def _write_raw_data( + raw, + info, + picks, + fid, + part_idx, + start, + stop, + buffer_size, + prev_fname, + split_size, + next_fname, + projector, + drop_small_buffer, + fmt, +): + # Start the raw data + data_kind = "IAS_" if info.get("maxshield", False) else "" + data_kind = getattr(FIFF, f"FIFFB_{data_kind}RAW_DATA") + start_block(fid, data_kind) -def _write_raw_fid(raw, info, picks, fid, cals, part_idx, start, stop, - buffer_size, prev_fname, split_size, use_fname, - projector, drop_small_buffer, fmt, fname, reserved_fname, - data_type, reset_range, split_naming, overwrite): first_samp = raw.first_samp + start if first_samp != 0: write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp) @@ -2278,17 +2924,19 @@ def _write_raw_fid(raw, info, picks, fid, cals, part_idx, start, stop, start_block(fid, FIFF.FIFFB_REF) write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE) write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname) - if info['meas_id'] is not None: - write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id']) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, info["meas_id"]) write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1) end_block(fid, FIFF.FIFFB_REF) pos_prev = fid.tell() if pos_prev > split_size: - raise ValueError('file is larger than "split_size" after writing ' - 'measurement information, you must use a larger ' - 'value for split size: %s plus enough bytes for ' - 'the chosen buffer_size' % pos_prev) + raise ValueError( + 'file is larger than "split_size" after writing ' + "measurement information, you must use a larger " + f"value for split size: {pos_prev} plus enough bytes for " + "the chosen buffer_size" + ) # Check to see if this has acquisition skips and, if so, if we can # write out empty buffers instead of zeroes @@ -2296,18 +2944,22 @@ def _write_raw_fid(raw, info, picks, fid, cals, part_idx, start, stop, lasts = np.array(firsts) + buffer_size if lasts[-1] > stop: lasts[-1] = stop - sk_onsets, sk_ends = _annotations_starts_stops(raw, 'bad_acq_skip') + sk_onsets, sk_ends = _annotations_starts_stops(raw, "bad_acq_skip") do_skips = False if len(sk_onsets) > 0: - if np.in1d(sk_onsets, firsts).all() and np.in1d(sk_ends, lasts).all(): + if np.isin(sk_onsets, firsts).all() and np.isin(sk_ends, lasts).all(): do_skips = True else: if part_idx == 0: - warn('Acquisition skips detected but did not fit evenly into ' - 'output buffer_size, will be written as zeroes.') + warn( + "Acquisition skips detected but did not fit evenly into " + "output buffer_size, will be written as zeroes." + ) + cals = [ch["cal"] * ch["range"] for ch in info["chs"]] + # Write the blocks n_current_skip = 0 - final_fname = use_fname + new_start = start for first, last in zip(firsts, lasts): if do_skips: if ((first >= sk_onsets) & (last <= sk_ends)).any(): @@ -2328,12 +2980,10 @@ def _write_raw_fid(raw, info, picks, fid, cals, part_idx, start, stop, if projector is not None: data = np.dot(projector, data) - if ((drop_small_buffer and (first > start) and - (len(times) < buffer_size))): - logger.info('Skipping data chunk due to small buffer ... ' - '[done]') + if drop_small_buffer and (first > start) and (len(times) < buffer_size): + logger.info("Skipping data chunk due to small buffer ... [done]") break - logger.debug(f'Writing FIF {first:6d} ... {last:6d} ...') + logger.debug(f"Writing FIF {first:6d} ... {last:6d} ...") _write_raw_buffer(fid, data, cals, fmt) pos = fid.tell() @@ -2343,46 +2993,39 @@ def _write_raw_fid(raw, info, picks, fid, cals, part_idx, start, stop, # This should occur on the first buffer write of the file, so # we should mention the space required for the meas info raise ValueError( - 'buffer size (%s) is too large for the given split size (%s) ' - 'by %s bytes after writing info (%s) and leaving enough space ' - 'for end tags (%s): decrease "buffer_size_sec" or increase ' - '"split_size".' % (this_buff_size_bytes, split_size, overage, - pos_prev, _NEXT_FILE_BUFFER)) + f"buffer size ({this_buff_size_bytes}) is too large for the " + f"given split size ({split_size}) " + f"by {overage} bytes after writing info ({pos_prev}) and " + "leaving enough space " + f'for end tags ({_NEXT_FILE_BUFFER}): decrease "buffer_size_sec" ' + 'or increase "split_size".' + ) + new_start = last # Split files if necessary, leave some space for next file info # make sure we check to make sure we actually *need* another buffer # with the "and" check - if pos >= split_size - this_buff_size_bytes - _NEXT_FILE_BUFFER and \ - first + buffer_size < stop: - final_fname = reserved_fname - next_fname, next_idx = _write_raw( - fname, raw, info, picks, fmt, - data_type, reset_range, first + buffer_size, stop, buffer_size, - projector, drop_small_buffer, split_size, split_naming, - part_idx + 1, final_fname, overwrite) - + if ( + pos >= split_size - this_buff_size_bytes - _NEXT_FILE_BUFFER + and first + buffer_size < stop + ): start_block(fid, FIFF.FIFFB_REF) write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE) - write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname)) - if info['meas_id'] is not None: - write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id']) - write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx) + write_string(fid, FIFF.FIFF_REF_FILE_NAME, next_fname.name) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, info["meas_id"]) + write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx + 1) end_block(fid, FIFF.FIFFB_REF) + break pos_prev = pos - logger.info('Closing %s' % use_fname) - if info.get('maxshield', False): - end_block(fid, FIFF.FIFFB_IAS_RAW_DATA) - else: - end_block(fid, FIFF.FIFFB_RAW_DATA) - end_block(fid, FIFF.FIFFB_MEAS) - return final_fname + end_block(fid, data_kind) + return new_start @fill_doc -def _start_writing_raw(fid, info, sel, data_type, - reset_range, annotations): +def _write_raw_metadata(fid, info, data_type, reset_range, annotations): """Start write raw data in file. Parameters @@ -2390,9 +3033,6 @@ def _start_writing_raw(fid, info, sel, data_type, fid : file The created file. %(info_not_none)s - sel : array of int | None - Indices of channels to include. If None, all channels - are included. data_type : int The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), 5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data. @@ -2401,35 +3041,13 @@ def _start_writing_raw(fid, info, sel, data_type, annotations : instance of Annotations The annotations to write. - Returns - ------- - fid : file - The file descriptor. - cals : list - calibration factors. """ - # - # Measurement info - # - info = pick_info(info, sel) - # # Create the file and save the essentials # - start_block(fid, FIFF.FIFFB_MEAS) write_id(fid, FIFF.FIFF_BLOCK_ID) - if info['meas_id'] is not None: - write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) - - cals = [] - for k in range(info['nchan']): - # - # Scan numbers may have been messed up - # - info['chs'][k]['scanno'] = k + 1 # scanno starts at 1 in FIF format - if reset_range is True: - info['chs'][k]['range'] = 1.0 - cals.append(info['chs'][k]['cal'] * info['chs'][k]['range']) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) write_meas_info(fid, info, data_type=data_type, reset_range=reset_range) @@ -2439,16 +3057,6 @@ def _start_writing_raw(fid, info, sel, data_type, if len(annotations) > 0: # don't save empty annot _write_annotations(fid, annotations) - # - # Start the raw data - # - if info.get('maxshield', False): - start_block(fid, FIFF.FIFFB_IAS_RAW_DATA) - else: - start_block(fid, FIFF.FIFFB_RAW_DATA) - - return cals - def _write_raw_buffer(fid, buf, cals, fmt): """Write raw buffer. @@ -2467,30 +3075,31 @@ def _write_raw_buffer(fid, buf, cals, fmt): that short and int formats cannot be used for complex data. """ if buf.shape[0] != len(cals): - raise ValueError('buffer and calibration sizes do not match') + raise ValueError("buffer and calibration sizes do not match") - _check_option('fmt', fmt, ['short', 'int', 'single', 'double']) + _check_option("fmt", fmt, ["short", "int", "single", "double"]) cast_int = False # allow unsafe cast if np.isrealobj(buf): - if fmt == 'short': + if fmt == "short": write_function = write_dau_pack16 cast_int = True - elif fmt == 'int': + elif fmt == "int": write_function = write_int cast_int = True - elif fmt == 'single': + elif fmt == "single": write_function = write_float else: write_function = write_double else: - if fmt == 'single': + if fmt == "single": write_function = write_complex64 - elif fmt == 'double': + elif fmt == "double": write_function = write_complex128 else: - raise ValueError('only "single" and "double" supported for ' - 'writing complex data') + raise ValueError( + 'only "single" and "double" supported for writing complex data' + ) buf = buf / np.ravel(cals)[:, None] if cast_int: @@ -2502,36 +3111,42 @@ def _check_raw_compatibility(raw): """Ensure all instances of Raw have compatible parameters.""" for ri in range(1, len(raw)): if not isinstance(raw[ri], type(raw[0])): - raise ValueError(f'raw[{ri}] type must match') - for key in ('nchan', 'sfreq'): + raise ValueError(f"raw[{ri}] type must match") + for key in ("nchan", "sfreq"): a, b = raw[ri].info[key], raw[0].info[key] if a != b: raise ValueError( - f'raw[{ri}].info[{key}] must match:\n' - f'{repr(a)} != {repr(b)}') - for kind in ('bads', 'ch_names'): + f"raw[{ri}].info[{key}] must match:\n{repr(a)} != {repr(b)}" + ) + for kind in ("bads", "ch_names"): set1 = set(raw[0].info[kind]) set2 = set(raw[ri].info[kind]) mismatch = set1.symmetric_difference(set2) if mismatch: - raise ValueError(f'raw[{ri}][\'info\'][{kind}] do not match: ' - f'{sorted(mismatch)}') + raise ValueError( + f"raw[{ri}]['info'][{kind}] do not match: {sorted(mismatch)}" + ) if any(raw[ri]._cals != raw[0]._cals): - raise ValueError('raw[%d]._cals must match' % ri) - if len(raw[0].info['projs']) != len(raw[ri].info['projs']): - raise ValueError('SSP projectors in raw files must be the same') - if not all(_proj_equal(p1, p2) for p1, p2 in - zip(raw[0].info['projs'], raw[ri].info['projs'])): - raise ValueError('SSP projectors in raw files must be the same') + raise ValueError(f"raw[{ri}]._cals must match") + if len(raw[0].info["projs"]) != len(raw[ri].info["projs"]): + raise ValueError("SSP projectors in raw files must be the same") + if not all( + _proj_equal(p1, p2) + for p1, p2 in zip(raw[0].info["projs"], raw[ri].info["projs"]) + ): + raise ValueError("SSP projectors in raw files must be the same") if any(r.orig_format != raw[0].orig_format for r in raw): - warn('raw files do not all have the same data format, could result in ' - 'precision mismatch. Setting raw.orig_format="unknown"') - raw[0].orig_format = 'unknown' + warn( + "raw files do not all have the same data format, could result in " + 'precision mismatch. Setting raw.orig_format="unknown"' + ) + raw[0].orig_format = "unknown" @verbose -def concatenate_raws(raws, preload=None, events_list=None, *, - on_mismatch='raise', verbose=None): +def concatenate_raws( + raws, preload=None, events_list=None, *, on_mismatch="raise", verbose=None +): """Concatenate `~mne.io.Raw` instances as if they were continuous. .. note:: ``raws[0]`` is modified in-place to achieve the concatenation. @@ -2558,13 +3173,18 @@ def concatenate_raws(raws, preload=None, events_list=None, *, The events. Only returned if ``event_list`` is not None. """ for idx, raw in enumerate(raws[1:], start=1): - _ensure_infos_match(info1=raws[0].info, info2=raw.info, - name=f'raws[{idx}]', on_mismatch=on_mismatch) + _ensure_infos_match( + info1=raws[0].info, + info2=raw.info, + name=f"raws[{idx}]", + on_mismatch=on_mismatch, + ) if events_list is not None: if len(events_list) != len(raws): - raise ValueError('`raws` and `event_list` are required ' - 'to be of the same length') + raise ValueError( + "`raws` and `event_list` are required to be of the same length" + ) first, last = zip(*[(r.first_samp, r.last_samp) for r in raws]) events = concatenate_events(events_list, first, last) raws[0].append(raws[1:], preload) @@ -2575,18 +3195,57 @@ def concatenate_raws(raws, preload=None, events_list=None, *, return raws[0], events +@fill_doc +def match_channel_orders(insts, copy=True): + """Ensure consistent channel order across instances (Raw, Epochs, or Evoked). + + Parameters + ---------- + insts : list + List of :class:`~mne.io.Raw`, :class:`~mne.Epochs`, + or :class:`~mne.Evoked` instances to order. + %(copy_df)s + + Returns + ------- + list of Raw | list of Epochs | list of Evoked + List of instances (Raw, Epochs, or Evoked) with channel orders matched + according to the order they had in the first item in the ``insts`` list. + """ + insts = deepcopy(insts) if copy else insts + ch_order = insts[0].ch_names + for inst in insts[1:]: + inst.reorder_channels(ch_order) + return insts + + def _check_maxshield(allow_maxshield): """Warn or error about MaxShield.""" - msg = ('This file contains raw Internal Active ' - 'Shielding data. It may be distorted. Elekta ' - 'recommends it be run through MaxFilter to ' - 'produce reliable results. Consider closing ' - 'the file and running MaxFilter on the data.') + msg = ( + "This file contains raw Internal Active " + "Shielding data. It may be distorted. Elekta " + "recommends it be run through MaxFilter to " + "produce reliable results. Consider closing " + "the file and running MaxFilter on the data." + ) if allow_maxshield: - if not (isinstance(allow_maxshield, str) and - allow_maxshield == 'yes'): + if not (isinstance(allow_maxshield, str) and allow_maxshield == "yes"): warn(msg) else: - msg += (' Use allow_maxshield=True if you are sure you' - ' want to load the data despite this warning.') + msg += ( + " Use allow_maxshield=True if you are sure you" + " want to load the data despite this warning." + ) raise ValueError(msg) + + +def _get_fname_rep(fname): + if not _file_like(fname): + out = str(fname) + else: + out = "file-like" + try: + out += f' "{fname.name}"' + except Exception: + pass + return out diff --git a/mne/io/besa/__init__.py b/mne/io/besa/__init__.py index 37d2bb63e02..a91614c31a4 100644 --- a/mne/io/besa/__init__.py +++ b/mne/io/besa/__init__.py @@ -1,7 +1,7 @@ """Support for various BESA file formats.""" -# Author: Marijn van Vliet -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .besa import read_evoked_besa diff --git a/mne/io/besa/besa.py b/mne/io/besa/besa.py index 368506b6506..d6d4ee9657c 100644 --- a/mne/io/besa/besa.py +++ b/mne/io/besa/besa.py @@ -1,10 +1,15 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from collections import OrderedDict from pathlib import Path + import numpy as np -from ...utils import logger, fill_doc, verbose -from ..meas_info import create_info +from ..._fiff.meas_info import create_info from ...evoked import EvokedArray +from ...utils import fill_doc, logger, verbose @fill_doc @@ -27,12 +32,12 @@ def read_evoked_besa(fname, verbose=None): The evoked data in the .avr or .mul file. """ fname = Path(fname) - if fname.suffix == '.avr': + if fname.suffix == ".avr": return _read_evoked_besa_avr(fname, verbose) - elif fname.suffix == '.mul': + elif fname.suffix == ".mul": return _read_evoked_besa_mul(fname, verbose) else: - raise ValueError('Filename must end in either .avr or .mul') + raise ValueError("Filename must end in either .avr or .mul") @verbose @@ -44,7 +49,7 @@ def _read_evoked_besa_avr(fname, verbose): # There are two versions of .avr files. The old style, generated by # BESA 1, 2 and 3 does not define Nchan and does not have channel names # in the file. - new_style = 'Nchan=' in header + new_style = "Nchan=" in header if new_style: ch_names = f.readline().strip().split() else: @@ -58,62 +63,76 @@ def _read_evoked_besa_avr(fname, verbose): if new_style: if len(ch_names) != len(data): raise RuntimeError( - 'Mismatch between the number of channel names defined in ' - f'the .avr file ({len(ch_names)}) and the number of rows ' - f'in the data matrix ({len(data)}).') + "Mismatch between the number of channel names defined in " + f"the .avr file ({len(ch_names)}) and the number of rows " + f"in the data matrix ({len(data)})." + ) else: # Determine channel names from the .elp sidecar file if ch_types is not None: ch_names = list(ch_types.keys()) if len(ch_names) != len(data): - raise RuntimeError('Mismatch between the number of channels ' - f'defined in the .avr file ({len(data)}) ' - f'and .elp file ({len(ch_names)}).') + raise RuntimeError( + "Mismatch between the number of channels " + f"defined in the .avr file ({len(data)}) " + f"and .elp file ({len(ch_names)})." + ) else: - logger.info('No .elp file found and no channel names present in ' - 'the .avr file. Falling back to generic names. ') - ch_names = [f'CH{i + 1:02d}' for i in range(len(data))] + logger.info( + "No .elp file found and no channel names present in " + "the .avr file. Falling back to generic names. " + ) + ch_names = [f"CH{i + 1:02d}" for i in range(len(data))] # Consolidate channel types if ch_types is None: - logger.info('Marking all channels as EEG.') - ch_types = ['eeg'] * len(ch_names) + logger.info("Marking all channels as EEG.") + ch_types = ["eeg"] * len(ch_names) else: ch_types = [ch_types[ch] for ch in ch_names] # Go over all the header fields and make sure they are all defined to # something sensible. - if 'Npts' in fields: - fields['Npts'] = int(fields['Npts']) - if fields['Npts'] != data.shape[1]: - logger.warn(f'The size of the data matrix ({data.shape}) does not ' - f'match the "Npts" field ({fields["Npts"]}).') - if 'Nchan' in fields: - fields['Nchan'] = int(fields['Nchan']) - if fields['Nchan'] != data.shape[0]: - logger.warn(f'The size of the data matrix ({data.shape}) does not ' - f'match the "Nchan" field ({fields["Nchan"]}).') - if 'DI' in fields: - fields['DI'] = float(fields['DI']) + if "Npts" in fields: + fields["Npts"] = int(fields["Npts"]) + if fields["Npts"] != data.shape[1]: + logger.warn( + f"The size of the data matrix ({data.shape}) does not " + f'match the "Npts" field ({fields["Npts"]}).' + ) + if "Nchan" in fields: + fields["Nchan"] = int(fields["Nchan"]) + if fields["Nchan"] != data.shape[0]: + logger.warn( + f"The size of the data matrix ({data.shape}) does not " + f'match the "Nchan" field ({fields["Nchan"]}).' + ) + if "DI" in fields: + fields["DI"] = float(fields["DI"]) else: - raise RuntimeError('No "DI" field present. Could not determine ' - 'sampling frequency.') - if 'TSB' in fields: - fields['TSB'] = float(fields['TSB']) + raise RuntimeError( + 'No "DI" field present. Could not determine sampling frequency.' + ) + if "TSB" in fields: + fields["TSB"] = float(fields["TSB"]) else: - fields['TSB'] = 0 - if 'SB' in fields: - fields['SB'] = float(fields['SB']) + fields["TSB"] = 0 + if "SB" in fields: + fields["SB"] = float(fields["SB"]) else: - fields['SB'] = 1.0 - if 'SegmentName' not in fields: - fields['SegmentName'] = '' + fields["SB"] = 1.0 + if "SegmentName" not in fields: + fields["SegmentName"] = "" # Build the Evoked object based on the header fields. - info = create_info(ch_names, sfreq=1000 / fields['DI'], ch_types='eeg') - return EvokedArray(data / fields['SB'] / 1E6, info, - tmin=fields['TSB'] / 1000, - comment=fields['SegmentName'], verbose=verbose) + info = create_info(ch_names, sfreq=1000 / fields["DI"], ch_types="eeg") + return EvokedArray( + data / fields["SB"] / 1e6, + info, + tmin=fields["TSB"] / 1000, + comment=fields["SegmentName"], + verbose=verbose, + ) @verbose @@ -127,54 +146,66 @@ def _read_evoked_besa_mul(fname, verbose): data = np.loadtxt(fname, skiprows=2, ndmin=2) if len(ch_names) != data.shape[1]: - raise RuntimeError('Mismatch between the number of channel names ' - f'defined in the .mul file ({len(ch_names)}) ' - 'and the number of columns in the data matrix ' - f'({data.shape[1]}).') + raise RuntimeError( + "Mismatch between the number of channel names " + f"defined in the .mul file ({len(ch_names)}) " + "and the number of columns in the data matrix " + f"({data.shape[1]})." + ) # Consolidate channel types ch_types = _read_elp_sidecar(fname) if ch_types is None: - logger.info('Marking all channels as EEG.') - ch_types = ['eeg'] * len(ch_names) + logger.info("Marking all channels as EEG.") + ch_types = ["eeg"] * len(ch_names) else: ch_types = [ch_types[ch] for ch in ch_names] # Go over all the header fields and make sure they are all defined to # something sensible. - if 'TimePoints' in fields: - fields['TimePoints'] = int(fields['TimePoints']) - if fields['TimePoints'] != data.shape[0]: + if "TimePoints" in fields: + fields["TimePoints"] = int(fields["TimePoints"]) + if fields["TimePoints"] != data.shape[0]: + logger.warn( + f"The size of the data matrix ({data.shape}) does not " + f'match the "TimePoints" field ({fields["TimePoints"]}).' + ) + if "Channels" in fields: + fields["Channels"] = int(fields["Channels"]) + if fields["Channels"] != data.shape[1]: logger.warn( - f'The size of the data matrix ({data.shape}) does not ' - f'match the "TimePoints" field ({fields["TimePoints"]}).') - if 'Channels' in fields: - fields['Channels'] = int(fields['Channels']) - if fields['Channels'] != data.shape[1]: - logger.warn(f'The size of the data matrix ({data.shape}) does not ' - f'match the "Channels" field ({fields["Channels"]}).') - if 'SamplingInterval[ms]' in fields: - fields['SamplingInterval[ms]'] = float(fields['SamplingInterval[ms]']) + f"The size of the data matrix ({data.shape}) does not " + f'match the "Channels" field ({fields["Channels"]}).' + ) + if "SamplingInterval[ms]" in fields: + fields["SamplingInterval[ms]"] = float(fields["SamplingInterval[ms]"]) else: - raise RuntimeError('No "SamplingInterval[ms]" field present. Could ' - 'not determine sampling frequency.') - if 'BeginSweep[ms]' in fields: - fields['BeginSweep[ms]'] = float(fields['BeginSweep[ms]']) + raise RuntimeError( + 'No "SamplingInterval[ms]" field present. Could ' + "not determine sampling frequency." + ) + if "BeginSweep[ms]" in fields: + fields["BeginSweep[ms]"] = float(fields["BeginSweep[ms]"]) else: - fields['BeginSweep[ms]'] = 0.0 - if 'Bins/uV' in fields: - fields['Bins/uV'] = float(fields['Bins/uV']) + fields["BeginSweep[ms]"] = 0.0 + if "Bins/uV" in fields: + fields["Bins/uV"] = float(fields["Bins/uV"]) else: - fields['Bins/uV'] = 1 - if 'SegmentName' not in fields: - fields['SegmentName'] = '' + fields["Bins/uV"] = 1 + if "SegmentName" not in fields: + fields["SegmentName"] = "" # Build the Evoked object based on the header fields. - info = create_info(ch_names, sfreq=1000 / fields['SamplingInterval[ms]'], - ch_types=ch_types) - return EvokedArray(data.T / fields['Bins/uV'] / 1E6, info, - tmin=fields['BeginSweep[ms]'] / 1000, - comment=fields['SegmentName'], verbose=verbose) + info = create_info( + ch_names, sfreq=1000 / fields["SamplingInterval[ms]"], ch_types=ch_types + ) + return EvokedArray( + data.T / fields["Bins/uV"] / 1e6, + info, + tmin=fields["BeginSweep[ms]"] / 1000, + comment=fields["SegmentName"], + verbose=verbose, + ) def _parse_header(header): @@ -196,7 +227,7 @@ def _parse_header(header): """ parts = header.split() # Splits on one or more spaces name_val_pairs = zip(parts[::2], parts[1::2]) - return dict((name.replace('=', ''), val) for name, val in name_val_pairs) + return dict((name.replace("=", ""), val) for name, val in name_val_pairs) def _read_elp_sidecar(fname): @@ -218,13 +249,12 @@ def _read_elp_sidecar(fname): If the sidecar file exists, return a dictionary mapping channel names to channel types. Otherwise returns ``None``. """ - fname_elp = fname.parent / (fname.stem + '.elp') + fname_elp = fname.parent / (fname.stem + ".elp") if not fname_elp.exists(): - logger.info(f'No {fname_elp} file present containing electrode ' - 'information.') + logger.info(f"No {fname_elp} file present containing electrode information.") return None - logger.info(f'Reading electrode names and types from {fname_elp}') + logger.info(f"Reading electrode names and types from {fname_elp}") ch_types = OrderedDict() with open(fname_elp) as f: lines = f.readlines() @@ -235,9 +265,10 @@ def _read_elp_sidecar(fname): ch_types[ch_name] = ch_type.lower() else: # No channel types present - logger.info('No channel types present in .elp file. Marking all ' - 'channels as EEG.') + logger.info( + "No channel types present in .elp file. Marking all channels as EEG." + ) for line in lines: ch_name = line.split()[:1] - ch_types[ch_name] = 'eeg' + ch_types[ch_name] = "eeg" return ch_types diff --git a/mne/io/besa/tests/test_besa.py b/mne/io/besa/tests/test_besa.py index 23097dd497f..60deb1b4820 100644 --- a/mne/io/besa/tests/test_besa.py +++ b/mne/io/besa/tests/test_besa.py @@ -1,74 +1,79 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Test reading BESA fileformats.""" + import inspect -import pytest from pathlib import Path -from mne.io import read_evoked_besa -from mne.channels import read_custom_montage +import pytest +from mne.channels import read_custom_montage +from mne.io import read_evoked_besa FILE = Path(inspect.getfile(inspect.currentframe())) -data_dir = FILE.parent / 'data' -avr_file = data_dir / 'simulation.avr' -avr_file_oldstyle = data_dir / 'simulation_oldstyle.avr' -mul_file = data_dir / 'simulation.mul' -montage = read_custom_montage(data_dir / 'simulation.elp') +data_dir = FILE.parent / "data" +avr_file = data_dir / "simulation.avr" +avr_file_oldstyle = data_dir / "simulation_oldstyle.avr" +mul_file = data_dir / "simulation.mul" +montage = read_custom_montage(data_dir / "simulation.elp") @pytest.mark.filterwarnings("ignore:Fiducial point nasion not found") -@pytest.mark.parametrize('fname', (avr_file, avr_file_oldstyle, mul_file)) +@pytest.mark.parametrize("fname", (avr_file, avr_file_oldstyle, mul_file)) def test_read_evoked_besa(fname): """Test reading MESA .avr and .mul files.""" ev = read_evoked_besa(fname) assert len(ev.ch_names) == len(ev.data) == 33 - assert ev.info['sfreq'] == 200 + assert ev.info["sfreq"] == 200 assert ev.tmin == -0.1 assert len(ev.times) == 200 assert ev.ch_names == montage.ch_names - assert ev.comment == 'simulation' + assert ev.comment == "simulation" def test_read_evoked_besa_avr_incomplete(tmp_path): """Test reading incomplete BESA .avr files.""" # Check old style .avr file without an .elp sidecar - with open(f'{tmp_path}/missing.avr', 'w') as f: - f.write('Npts= 1 TSB= 0 SB= 1.00 SC= 500.0 DI= 5\n0\n1\n2\n') - ev = read_evoked_besa(f'{tmp_path}/missing.avr') - assert ev.ch_names == ['CH01', 'CH02', 'CH03'] + with open(f"{tmp_path}/missing.avr", "w") as f: + f.write("Npts= 1 TSB= 0 SB= 1.00 SC= 500.0 DI= 5\n0\n1\n2\n") + ev = read_evoked_besa(f"{tmp_path}/missing.avr") + assert ev.ch_names == ["CH01", "CH02", "CH03"] # Create BESA file with missing header fields and verify things don't break - with open(f'{tmp_path}/missing.avr', 'w') as f: - f.write('DI= 5\n0\n') - ev = read_evoked_besa(f'{tmp_path}/missing.avr') + with open(f"{tmp_path}/missing.avr", "w") as f: + f.write("DI= 5\n0\n") + ev = read_evoked_besa(f"{tmp_path}/missing.avr") assert len(ev.ch_names) == len(ev.data) == 1 - assert ev.info['sfreq'] == 200 + assert ev.info["sfreq"] == 200 assert ev.tmin == 0 assert len(ev.times) == 1 - assert ev.ch_names == ['CH01'] - assert ev.comment == '' + assert ev.ch_names == ["CH01"] + assert ev.comment == "" # The DI field (sample frequency) must exist - with open(f'{tmp_path}/missing.avr', 'w') as f: - f.write('Npts= 1 TSB= 0 SB= 1.00 SC= 500.0\n0\n') + with open(f"{tmp_path}/missing.avr", "w") as f: + f.write("Npts= 1 TSB= 0 SB= 1.00 SC= 500.0\n0\n") with pytest.raises(RuntimeError, match='No "DI" field present'): - ev = read_evoked_besa(f'{tmp_path}/missing.avr') + ev = read_evoked_besa(f"{tmp_path}/missing.avr") def test_read_evoked_besa_mul_incomplete(tmp_path): """Test reading incomplete BESA .mul files.""" # Create BESA file with missing header fields and verify things don't break - with open(f'{tmp_path}/missing.mul', 'w') as f: - f.write('SamplingInterval[ms]= 5\nCH1\n0\n') - ev = read_evoked_besa(f'{tmp_path}/missing.mul') + with open(f"{tmp_path}/missing.mul", "w") as f: + f.write("SamplingInterval[ms]= 5\nCH1\n0\n") + ev = read_evoked_besa(f"{tmp_path}/missing.mul") assert len(ev.ch_names) == len(ev.data) == 1 - assert ev.info['sfreq'] == 200 + assert ev.info["sfreq"] == 200 assert ev.tmin == 0 assert len(ev.times) == 1 - assert ev.ch_names == ['CH1'] - assert ev.comment == '' + assert ev.ch_names == ["CH1"] + assert ev.comment == "" # The SamplingInterval[ms] field (sample frequency) must exist - with open(f'{tmp_path}/missing.mul', 'w') as f: - f.write('TimePoints= 1 Channels= 1\nCH1\n0\n') + with open(f"{tmp_path}/missing.mul", "w") as f: + f.write("TimePoints= 1 Channels= 1\nCH1\n0\n") with pytest.raises(RuntimeError, match=r'No "SamplingInterval\[ms\]"'): - ev = read_evoked_besa(f'{tmp_path}/missing.mul') + ev = read_evoked_besa(f"{tmp_path}/missing.mul") diff --git a/mne/io/boxy/__init__.py b/mne/io/boxy/__init__.py index 701f5fd8d20..ac47cccffbc 100644 --- a/mne/io/boxy/__init__.py +++ b/mne/io/boxy/__init__.py @@ -1,7 +1,7 @@ """fNIRS module for conversion to FIF.""" -# Authors: Kyle Mathewson, Jonathan Kuziek -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .boxy import read_raw_boxy diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 3f3cdcdfbcf..9a131730a51 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -1,20 +1,20 @@ -# Authors: Kyle Mathewson, Jonathan Kuziek -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import re as re import numpy as np -from ..base import BaseRaw -from ..meas_info import create_info -from ..utils import _mult_cal_one -from ...utils import logger, verbose, fill_doc, _check_fname +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one from ...annotations import Annotations +from ...utils import _check_fname, fill_doc, logger, verbose +from ..base import BaseRaw @fill_doc -def read_raw_boxy(fname, preload=False, verbose=None): +def read_raw_boxy(fname, preload=False, verbose=None) -> "RawBOXY": """Reader for an optical imaging recording. This function has been tested using the ISS Imagent I and II systems @@ -58,16 +58,16 @@ class RawBOXY(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): - logger.info('Loading %s' % fname) + logger.info(f"Loading {fname}") # Read header file and grab some info. start_line = np.inf col_names = mrk_col = filetype = mrk_data = end_line = None raw_extras = dict() - raw_extras['offsets'] = list() # keep track of our offsets + raw_extras["offsets"] = list() # keep track of our offsets sfreq = None fname = str(_check_fname(fname, "read", True, "fname")) - with open(fname, 'r') as fid: + with open(fname) as fid: line_num = 0 i_line = fid.readline() while i_line: @@ -75,67 +75,70 @@ def __init__(self, fname, preload=False, verbose=None): if line_num >= start_line: assert col_names is not None assert filetype is not None - if '#DATA ENDS' in i_line: + if "#DATA ENDS" in i_line: # Data ends just before this. end_line = line_num break if mrk_col is not None: - if filetype == 'non-parsed': + if filetype == "non-parsed": # Non-parsed files have different lines lengths. - crnt_line = i_line.rsplit(' ')[0] - temp_data = re.findall( - r'[-+]?\d*\.?\d+', crnt_line) + crnt_line = i_line.rsplit(" ")[0] + temp_data = re.findall(r"[-+]?\d*\.?\d+", crnt_line) if len(temp_data) == len(col_names): - mrk_data.append(float( - re.findall(r'[-+]?\d*\.?\d+', crnt_line) - [mrk_col])) + mrk_data.append( + float( + re.findall(r"[-+]?\d*\.?\d+", crnt_line)[ + mrk_col + ] + ) + ) else: - crnt_line = i_line.rsplit(' ')[0] - mrk_data.append(float(re.findall( - r'[-+]?\d*\.?\d+', crnt_line)[mrk_col])) - raw_extras['offsets'].append(fid.tell()) + crnt_line = i_line.rsplit(" ")[0] + mrk_data.append( + float(re.findall(r"[-+]?\d*\.?\d+", crnt_line)[mrk_col]) + ) + raw_extras["offsets"].append(fid.tell()) # now proceed with more standard header parsing - elif 'BOXY.EXE:' in i_line: - boxy_ver = re.findall(r'\d*\.\d+', - i_line.rsplit(' ')[-1])[0] + elif "BOXY.EXE:" in i_line: + boxy_ver = re.findall(r"\d*\.\d+", i_line.rsplit(" ")[-1])[0] # Check that the BOXY version is supported - if boxy_ver not in ['0.40', '0.84']: - raise RuntimeError('MNE has not been tested with BOXY ' - 'version (%s)' % boxy_ver) - elif 'Detector Channels' in i_line: - raw_extras['detect_num'] = int(i_line.rsplit(' ')[0]) - elif 'External MUX Channels' in i_line: - raw_extras['source_num'] = int(i_line.rsplit(' ')[0]) - elif 'Update Rate (Hz)' in i_line or \ - 'Updata Rate (Hz)' in i_line: + if boxy_ver not in ["0.40", "0.84"]: + raise RuntimeError( + f"MNE has not been tested with BOXY version ({boxy_ver})" + ) + elif "Detector Channels" in i_line: + raw_extras["detect_num"] = int(i_line.rsplit(" ")[0]) + elif "External MUX Channels" in i_line: + raw_extras["source_num"] = int(i_line.rsplit(" ")[0]) + elif "Update Rate (Hz)" in i_line or "Updata Rate (Hz)" in i_line: # Version 0.40 of the BOXY recording software # (and possibly other versions lower than 0.84) contains a # typo in the raw data file where 'Update Rate' is spelled # "Updata Rate. This will account for this typo. - sfreq = float(i_line.rsplit(' ')[0]) - elif '#DATA BEGINS' in i_line: + sfreq = float(i_line.rsplit(" ")[0]) + elif "#DATA BEGINS" in i_line: # Data should start a couple lines later. start_line = line_num + 3 elif line_num == start_line - 2: # Grab names for each column of data. - raw_extras['col_names'] = col_names = re.findall( - r'\w+\-\w+|\w+\-\d+|\w+', i_line.rsplit(' ')[0]) - if 'exmux' in col_names: + raw_extras["col_names"] = col_names = re.findall( + r"\w+\-\w+|\w+\-\d+|\w+", i_line.rsplit(" ")[0] + ) + if "exmux" in col_names: # Change filetype based on data organisation. - filetype = 'non-parsed' + filetype = "non-parsed" else: - filetype = 'parsed' - if 'digaux' in col_names: - mrk_col = col_names.index('digaux') + filetype = "parsed" + if "digaux" in col_names: + mrk_col = col_names.index("digaux") mrk_data = list() # raw_extras['offsets'].append(fid.tell()) elif line_num == start_line - 1: - raw_extras['offsets'].append(fid.tell()) + raw_extras["offsets"].append(fid.tell()) line_num += 1 i_line = fid.readline() assert sfreq is not None - raw_extras.update( - filetype=filetype, start_line=start_line, end_line=end_line) + raw_extras.update(filetype=filetype, start_line=start_line, end_line=end_line) # Label each channel in our data, for each data type (DC, AC, Ph). # Data is organised by channels x timepoint, where the first @@ -144,30 +147,36 @@ def __init__(self, fname, preload=False, verbose=None): ch_names = list() ch_types = list() cals = list() - for det_num in range(raw_extras['detect_num']): - for src_num in range(raw_extras['source_num']): + for det_num in range(raw_extras["detect_num"]): + for src_num in range(raw_extras["source_num"]): for i_type, ch_type in [ - ('DC', 'fnirs_cw_amplitude'), - ('AC', 'fnirs_fd_ac_amplitude'), - ('Ph', 'fnirs_fd_phase')]: - ch_names.append( - f'S{src_num + 1}_D{det_num + 1} {i_type}') + ("DC", "fnirs_cw_amplitude"), + ("AC", "fnirs_fd_ac_amplitude"), + ("Ph", "fnirs_fd_phase"), + ]: + ch_names.append(f"S{src_num + 1}_D{det_num + 1} {i_type}") ch_types.append(ch_type) - cals.append(np.pi / 180. if i_type == 'Ph' else 1.) + cals.append(np.pi / 180.0 if i_type == "Ph" else 1.0) # Create info structure. info = create_info(ch_names, sfreq, ch_types) - for ch, cal in zip(info['chs'], cals): - ch['cal'] = cal + for ch, cal in zip(info["chs"], cals): + ch["cal"] = cal # Determine how long our data is. delta = end_line - start_line - assert len(raw_extras['offsets']) == delta + 1 - if filetype == 'non-parsed': - delta //= (raw_extras['source_num']) - super(RawBOXY, self).__init__( - info, preload, filenames=[fname], first_samps=[0], - last_samps=[delta - 1], raw_extras=[raw_extras], verbose=verbose) + assert len(raw_extras["offsets"]) == delta + 1 + if filetype == "non-parsed": + delta //= raw_extras["source_num"] + super().__init__( + info, + preload, + filenames=[fname], + first_samps=[0], + last_samps=[delta - 1], + raw_extras=[raw_extras], + verbose=verbose, + ) # Now let's grab our markers, if they are present. if mrk_data is not None: @@ -198,33 +207,33 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): Regardless of type, output has (n_montages x n_sources x n_detectors + n_marker_channels) rows, and (n_timepoints x n_blocks) columns. """ - source_num = self._raw_extras[fi]['source_num'] - detect_num = self._raw_extras[fi]['detect_num'] - start_line = self._raw_extras[fi]['start_line'] - end_line = self._raw_extras[fi]['end_line'] - filetype = self._raw_extras[fi]['filetype'] - col_names = self._raw_extras[fi]['col_names'] - offsets = self._raw_extras[fi]['offsets'] - boxy_file = self._filenames[fi] + source_num = self._raw_extras[fi]["source_num"] + detect_num = self._raw_extras[fi]["detect_num"] + start_line = self._raw_extras[fi]["start_line"] + end_line = self._raw_extras[fi]["end_line"] + filetype = self._raw_extras[fi]["filetype"] + col_names = self._raw_extras[fi]["col_names"] + offsets = self._raw_extras[fi]["offsets"] + boxy_file = self.filenames[fi] # Non-parsed multiplexes sources, so we need source_num times as many # lines in that case - if filetype == 'parsed': + if filetype == "parsed": start_read = start_line + start stop_read = start_read + (stop - start) else: - assert filetype == 'non-parsed' + assert filetype == "non-parsed" start_read = start_line + start * source_num stop_read = start_read + (stop - start) * source_num assert start_read >= start_line assert stop_read <= end_line # Possible detector names. - detectors = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[:detect_num] + detectors = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[:detect_num] # Loop through our data. one = np.zeros((len(col_names), stop_read - start_read)) - with open(boxy_file, 'r') as fid: + with open(boxy_file) as fid: # Just a more efficient version of this: # ii = 0 # for line_num, i_line in enumerate(fid): @@ -238,26 +247,36 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): fid.seek(offsets[start_read - start_line], 0) for oo in one.T: i_data = fid.readline().strip().split() - oo[:len(i_data)] = i_data + oo[: len(i_data)] = i_data # in theory we could index in the loop above, but it's painfully slow, # so let's just take a hopefully minor memory hit - if filetype == 'non-parsed': - ch_idxs = [col_names.index(f'{det}-{i_type}') - for det in detectors - for i_type in ['DC', 'AC', 'Ph']] - one = one[ch_idxs].reshape( # each "time point" multiplexes srcs - len(detectors), 3, -1, source_num - ).transpose( # reorganize into (det, source, DC/AC/Ph, t) order - 0, 3, 1, 2 - ).reshape( # reshape the way we store it (det x source x DAP, t) - len(detectors) * source_num * 3, -1) + if filetype == "non-parsed": + ch_idxs = [ + col_names.index(f"{det}-{i_type}") + for det in detectors + for i_type in ["DC", "AC", "Ph"] + ] + one = ( + one[ch_idxs] + .reshape( # each "time point" multiplexes srcs + len(detectors), 3, -1, source_num + ) + .transpose( # reorganize into (det, source, DC/AC/Ph, t) order + 0, 3, 1, 2 + ) + .reshape( # reshape the way we store it (det x source x DAP, t) + len(detectors) * source_num * 3, -1 + ) + ) else: - assert filetype == 'parsed' - ch_idxs = [col_names.index(f'{det}-{i_type}{si + 1}') - for det in detectors - for si in range(source_num) - for i_type in ['DC', 'AC', 'Ph']] + assert filetype == "parsed" + ch_idxs = [ + col_names.index(f"{det}-{i_type}{si + 1}") + for det in detectors + for si in range(source_num) + for i_type in ["DC", "AC", "Ph"] + ] one = one[ch_idxs] # Place our data into the data object in place. diff --git a/mne/io/boxy/tests/__init__.py b/mne/io/boxy/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/boxy/tests/__init__.py +++ b/mne/io/boxy/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index e9cf09ee4cf..58f4671edd0 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -1,12 +1,11 @@ -# Authors: Kyle Mathewson, Jonathan Kuziek -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pytest import numpy as np -from numpy.testing import (assert_allclose, assert_array_equal, - assert_array_less) +import pytest import scipy.io as spio +from numpy.testing import assert_allclose, assert_array_equal, assert_array_less from mne import pick_types from mne.datasets import testing @@ -15,10 +14,7 @@ data_path = testing.data_path(download=False) boxy_0_40 = ( - data_path - / "BOXY" - / "boxy_0_40_recording" - / "boxy_0_40_notriggers_unparsed.txt" + data_path / "BOXY" / "boxy_0_40_recording" / "boxy_0_40_notriggers_unparsed.txt" ) p_pod_0_40 = ( data_path @@ -34,10 +30,7 @@ / "boxy_0_84_triggers_unparsed.txt" ) boxy_0_84_parsed = ( - data_path - / "BOXY" - / "boxy_0_84_digaux_recording" - / "boxy_0_84_triggers_parsed.txt" + data_path / "BOXY" / "boxy_0_84_digaux_recording" / "boxy_0_84_triggers_parsed.txt" ) p_pod_0_84 = ( data_path @@ -50,21 +43,22 @@ def _assert_ppod(raw, p_pod_file): have_types = raw.get_channel_types(unique=True) - assert 'fnirs_fd_phase' in raw, have_types - assert 'fnirs_cw_amplitude' in raw, have_types - assert 'fnirs_fd_ac_amplitude' in raw, have_types + assert "fnirs_fd_phase" in raw, have_types + assert "fnirs_cw_amplitude" in raw, have_types + assert "fnirs_fd_ac_amplitude" in raw, have_types ppod_data = spio.loadmat(p_pod_file) # Compare MNE loaded data to p_pod loaded data. - map_ = dict(dc='fnirs_cw_amplitude', ac='fnirs_fd_ac_amplitude', - ph='fnirs_fd_phase') + map_ = dict( + dc="fnirs_cw_amplitude", ac="fnirs_fd_ac_amplitude", ph="fnirs_fd_phase" + ) for key, value in map_.items(): ppod = ppod_data[key].T m = np.median(np.abs(ppod)) assert 1e-1 < m < 1e5, key # our atol is meaningful atol = m * 1e-10 py = raw.get_data(value) - if key == 'ph': # radians + if key == "ph": # radians assert_array_less(-np.pi, py) assert_array_less(py, 3 * np.pi) py = np.rad2deg(py) @@ -75,33 +69,59 @@ def _assert_ppod(raw, p_pod_file): def test_boxy_load(): """Test reading BOXY files.""" raw = read_raw_boxy(boxy_0_40, verbose=True) - assert raw.info['sfreq'] == 62.5 + assert raw.info["sfreq"] == 62.5 _assert_ppod(raw, p_pod_0_40) # Grab our different data types. - mne_ph = raw.copy().pick(picks='fnirs_fd_phase') - mne_dc = raw.copy().pick(picks='fnirs_cw_amplitude') - mne_ac = raw.copy().pick(picks='fnirs_fd_ac_amplitude') + mne_ph = raw.copy().pick(picks="fnirs_fd_phase") + mne_dc = raw.copy().pick(picks="fnirs_cw_amplitude") + mne_ac = raw.copy().pick(picks="fnirs_fd_ac_amplitude") # Check channel names. - first_chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S5_D1', - 'S6_D1', 'S7_D1', 'S8_D1', 'S9_D1', 'S10_D1'] - last_chans = ['S1_D8', 'S2_D8', 'S3_D8', 'S4_D8', 'S5_D8', - 'S6_D8', 'S7_D8', 'S8_D8', 'S9_D8', 'S10_D8'] - - assert mne_dc.info['ch_names'][:10] == [i_chan + ' ' + 'DC' - for i_chan in first_chans] - assert mne_ac.info['ch_names'][:10] == [i_chan + ' ' + 'AC' - for i_chan in first_chans] - assert mne_ph.info['ch_names'][:10] == [i_chan + ' ' + 'Ph' - for i_chan in first_chans] - - assert mne_dc.info['ch_names'][70::] == [i_chan + ' ' + 'DC' - for i_chan in last_chans] - assert mne_ac.info['ch_names'][70::] == [i_chan + ' ' + 'AC' - for i_chan in last_chans] - assert mne_ph.info['ch_names'][70::] == [i_chan + ' ' + 'Ph' - for i_chan in last_chans] + first_chans = [ + "S1_D1", + "S2_D1", + "S3_D1", + "S4_D1", + "S5_D1", + "S6_D1", + "S7_D1", + "S8_D1", + "S9_D1", + "S10_D1", + ] + last_chans = [ + "S1_D8", + "S2_D8", + "S3_D8", + "S4_D8", + "S5_D8", + "S6_D8", + "S7_D8", + "S8_D8", + "S9_D8", + "S10_D8", + ] + + assert mne_dc.info["ch_names"][:10] == [ + i_chan + " " + "DC" for i_chan in first_chans + ] + assert mne_ac.info["ch_names"][:10] == [ + i_chan + " " + "AC" for i_chan in first_chans + ] + assert mne_ph.info["ch_names"][:10] == [ + i_chan + " " + "Ph" for i_chan in first_chans + ] + + assert mne_dc.info["ch_names"][70::] == [ + i_chan + " " + "DC" for i_chan in last_chans + ] + assert mne_ac.info["ch_names"][70::] == [ + i_chan + " " + "AC" for i_chan in last_chans + ] + assert mne_ph.info["ch_names"][70::] == [ + i_chan + " " + "Ph" for i_chan in last_chans + ] # Since this data set has no 'digaux' for creating trigger annotations, # let's make sure our Raw object has no annotations. @@ -109,7 +129,7 @@ def test_boxy_load(): @testing.requires_testing_data -@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +@pytest.mark.parametrize("fname", (boxy_0_84, boxy_0_84_parsed)) def test_boxy_filetypes(fname): """Test reading parsed and unparsed BOXY data files.""" # BOXY data files can be saved in two formats (parsed and unparsed) which @@ -127,54 +147,49 @@ def test_boxy_filetypes(fname): # files are comparable, then we will compare the MNE loaded data between # parsed and unparsed files. raw = read_raw_boxy(fname, verbose=True) - assert raw.info['sfreq'] == 79.4722 + assert raw.info["sfreq"] == 79.4722 _assert_ppod(raw, p_pod_0_84) # Grab our different data types. - unp_dc = raw.copy().pick('fnirs_cw_amplitude') - unp_ac = raw.copy().pick('fnirs_fd_ac_amplitude') - unp_ph = raw.copy().pick('fnirs_fd_phase') + unp_dc = raw.copy().pick("fnirs_cw_amplitude") + unp_ac = raw.copy().pick("fnirs_fd_ac_amplitude") + unp_ph = raw.copy().pick("fnirs_fd_phase") # Check channel names. - chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', - 'S5_D1', 'S6_D1', 'S7_D1', 'S8_D1'] + chans = ["S1_D1", "S2_D1", "S3_D1", "S4_D1", "S5_D1", "S6_D1", "S7_D1", "S8_D1"] - assert unp_dc.info['ch_names'] == [i_chan + ' ' + 'DC' - for i_chan in chans] - assert unp_ac.info['ch_names'] == [i_chan + ' ' + 'AC' - for i_chan in chans] - assert unp_ph.info['ch_names'] == [i_chan + ' ' + 'Ph' - for i_chan in chans] + assert unp_dc.info["ch_names"] == [i_chan + " " + "DC" for i_chan in chans] + assert unp_ac.info["ch_names"] == [i_chan + " " + "AC" for i_chan in chans] + assert unp_ph.info["ch_names"] == [i_chan + " " + "Ph" for i_chan in chans] @testing.requires_testing_data -@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +@pytest.mark.parametrize("fname", (boxy_0_84, boxy_0_84_parsed)) def test_boxy_digaux(fname): """Test reading BOXY files and generating annotations from digaux.""" srate = 79.4722 raw = read_raw_boxy(fname, verbose=True) # Grab our different data types. - picks_dc = pick_types(raw.info, fnirs='fnirs_cw_amplitude') - picks_ac = pick_types(raw.info, fnirs='fnirs_fd_ac_amplitude') - picks_ph = pick_types(raw.info, fnirs='fnirs_fd_phase') + picks_dc = pick_types(raw.info, fnirs="fnirs_cw_amplitude") + picks_ac = pick_types(raw.info, fnirs="fnirs_fd_ac_amplitude") + picks_ph = pick_types(raw.info, fnirs="fnirs_fd_phase") assert_array_equal(picks_dc, np.arange(0, 8) * 3 + 0) assert_array_equal(picks_ac, np.arange(0, 8) * 3 + 1) assert_array_equal(picks_ph, np.arange(0, 8) * 3 + 2) # Check that our event order matches what we expect. - event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] + event_list = ["1.0", "2.0", "3.0", "4.0", "5.0"] assert_array_equal(raw.annotations.description, event_list) # Check that our event timings are what we expect. - event_onset = [i_time * (1.0 / srate) for i_time in - [105, 185, 265, 344, 424]] + event_onset = [i_time * (1.0 / srate) for i_time in [105, 185, 265, 344, 424]] assert_allclose(raw.annotations.onset, event_onset, atol=1e-6) # Now let's compare parsed and unparsed events to p_pod loaded digaux. # Load our p_pod data. ppod_data = spio.loadmat(p_pod_0_84) - ppod_digaux = np.transpose(ppod_data['digaux'])[0] + ppod_digaux = np.transpose(ppod_data["digaux"])[0] # Now let's get our triggers from the p_pod digaux. # We only want the first instance of each trigger. @@ -192,14 +207,13 @@ def test_boxy_digaux(fname): tmp_dur = 0 prev_mrk = i_mrk onset = np.asarray([i_mrk * (1.0 / srate) for i_mrk in mrk_idx]) - description = np.asarray([str(float(i_mrk))for i_mrk in - ppod_digaux[mrk_idx]]) + description = np.asarray([str(float(i_mrk)) for i_mrk in ppod_digaux[mrk_idx]]) assert_array_equal(raw.annotations.description, description) assert_allclose(raw.annotations.onset, onset, atol=1e-6) @testing.requires_testing_data -@pytest.mark.parametrize('fname', (boxy_0_40, boxy_0_84, boxy_0_84_parsed)) +@pytest.mark.parametrize("fname", (boxy_0_40, boxy_0_84, boxy_0_84_parsed)) def test_raw_properties(fname): """Test raw reader properties.""" _test_raw_reader(read_raw_boxy, fname=fname, boundary_decimal=1) diff --git a/mne/io/brainvision/__init__.py b/mne/io/brainvision/__init__.py index f51241e8673..860e157c014 100644 --- a/mne/io/brainvision/__init__.py +++ b/mne/io/brainvision/__init__.py @@ -1,8 +1,7 @@ """BrainVision module for conversion to FIF.""" -# Author: Teon Brooks -# Stefan Appelhoff -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .brainvision import read_raw_brainvision diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py index 99a3cbcce29..f9dfffccd55 100644 --- a/mne/io/brainvision/brainvision.py +++ b/mne/io/brainvision/brainvision.py @@ -1,14 +1,8 @@ -# -*- coding: utf-8 -*- """Conversion tool from BrainVision EEG to FIF.""" -# Authors: Teon Brooks -# Christian Brodbeck -# Eric Larson -# Jona Sassenhagen -# Phillip Alday -# Okba Bekhelifi -# Stefan Appelhoff -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import configparser import os @@ -19,14 +13,15 @@ import numpy as np -from ...utils import verbose, logger, warn, fill_doc, _DefaultEventParser -from ..constants import FIFF -from ..meas_info import _empty_info -from ..base import BaseRaw -from ..utils import _read_segments_file, _mult_cal_one +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _mult_cal_one, _read_segments_file from ...annotations import Annotations, read_annotations from ...channels import make_dig_montage from ...defaults import HEAD_SIZE_DEFAULT +from ...transforms import _sph_to_cart +from ...utils import _DefaultEventParser, fill_doc, logger, verbose, warn +from ..base import BaseRaw @fill_doc @@ -37,18 +32,23 @@ class RawBrainVision(BaseRaw): ---------- vhdr_fname : path-like Path to the EEG header file. - eog : list or tuple - Names of channels or list of indices that should be designated - EOG channels. Values should correspond to the header file. - Default is ``('HEOGL', 'HEOGR', 'VEOGb')``. - misc : list or tuple of str | ``'auto'`` - Names of channels or list of indices that should be designated - MISC channels. Values should correspond to the electrodes - in the header file. If ``'auto'``, units in header file are used for - inferring misc channels. Default is ``'auto'``. + eog : list of (int | str) | tuple of (int | str) + Names of channels or list of indices that should be designated EOG channels. + Values should correspond to the header file. Default is ``('HEOGL', 'HEOGR', + 'VEOGb')``. + misc : list of (int | str) | tuple of (int | str) | ``'auto'`` + Names of channels or list of indices that should be designated MISC channels. + Values should correspond to the electrodes in the header file. If ``'auto'``, + units in header file are used for inferring misc channels. Default is + ``'auto'``. scale : float - The scaling factor for EEG data. Unless specified otherwise by - header file, units are in microvolts. Default scale factor is 1. + The scaling factor for EEG data. Unless specified otherwise by header file, + units are in microvolts. Default scale factor is 1. + ignore_marker_types : bool + If ``True``, ignore marker types and only use marker descriptions. Default is + ``False``. + + .. versionadded:: 1.8 %(preload)s %(verbose)s @@ -60,32 +60,67 @@ class RawBrainVision(BaseRaw): See Also -------- mne.io.Raw : Documentation of attributes and methods. + + Notes + ----- + If the BrainVision header file contains impedance measurements, these may be + accessed using ``raw.impedances`` after reading using this function. However, this + attribute will NOT be available after a save and re-load of the data. That is, it is + only available when reading data directly from the BrainVision header file. + + BrainVision markers consist of a type and a description (in addition to other fields + like onset and duration). In contrast, annotations in MNE only have a description. + Therefore, a BrainVision marker of type "Stimulus" and description "S 1" will be + converted to an annotation "Stimulus/S 1" by default. If you want to ignore the + type and instead only use the description, set ``ignore_marker_types=True``, which + will convert the same marker to an annotation "S 1". + + The first marker in a BrainVision file is usually a "New Segment" marker, which + contains the recording time. This time is stored in the ``info['meas_date']`` + attribute of the returned object and is not converted to an annotation. """ + _extra_attributes = ("impedances",) + @verbose - def __init__(self, vhdr_fname, - eog=('HEOGL', 'HEOGR', 'VEOGb'), misc='auto', - scale=1., preload=False, verbose=None): # noqa: D107 + def __init__( + self, + vhdr_fname, + eog=("HEOGL", "HEOGR", "VEOGb"), + misc="auto", + scale=1.0, + ignore_marker_types=False, + preload=False, + verbose=None, + ): # noqa: D107 # Channel info and events - logger.info('Extracting parameters from %s...' % vhdr_fname) + logger.info(f"Extracting parameters from {vhdr_fname}...") hdr_fname = op.abspath(vhdr_fname) ext = op.splitext(hdr_fname)[-1] - ahdr_format = True if ext == '.ahdr' else False - (info, data_fname, fmt, order, n_samples, mrk_fname, montage, - orig_units) = _get_hdr_info(hdr_fname, eog, misc, scale) - - with open(data_fname, 'rb') as f: + ahdr_format = True if ext == ".ahdr" else False + ( + info, + data_fname, + fmt, + order, + n_samples, + mrk_fname, + montage, + orig_units, + ) = _get_hdr_info(hdr_fname, eog, misc, scale) + + with open(data_fname, "rb") as f: if isinstance(fmt, dict): # ASCII, this will be slow :( - if order == 'F': # multiplexed, channels in columns + if order == "F": # multiplexed, channels in columns n_skip = 0 - for ii in range(int(fmt['skiplines'])): + for _ in range(int(fmt["skiplines"])): n_skip += len(f.readline()) offsets = np.cumsum([n_skip] + [len(line) for line in f]) n_samples = len(offsets) - 1 - elif order == 'C': # vectorized, channels, in rows + elif order == "C": # vectorized, channels, in rows raise NotImplementedError() else: - n_data_ch = int(info['nchan']) + n_data_ch = int(info["nchan"]) f.seek(0, os.SEEK_END) n_samples = f.tell() dtype_bytes = _fmt_byte_dict[fmt] @@ -93,22 +128,28 @@ def __init__(self, vhdr_fname, n_samples = n_samples // (dtype_bytes * n_data_ch) orig_format = "single" if isinstance(fmt, dict) else fmt - raw_extras = dict( - offsets=offsets, fmt=fmt, order=order, n_samples=n_samples) - super(RawBrainVision, self).__init__( - info, last_samps=[n_samples - 1], filenames=[data_fname], - orig_format=orig_format, preload=preload, verbose=verbose, - raw_extras=[raw_extras], orig_units=orig_units) + raw_extras = dict(offsets=offsets, fmt=fmt, order=order, n_samples=n_samples) + super().__init__( + info, + last_samps=[n_samples - 1], + filenames=[data_fname], + orig_format=orig_format, + preload=preload, + verbose=verbose, + raw_extras=[raw_extras], + orig_units=orig_units, + ) self.set_montage(montage) - settings, cfg, cinfo, _ = _aux_hdr_info(hdr_fname) + settings, _, _, _ = _aux_hdr_info(hdr_fname) split_settings = settings.splitlines() - self.impedances = _parse_impedance(split_settings, - self.info['meas_date']) + self.impedances = _parse_impedance(split_settings, self.info["meas_date"]) # Get annotations from marker file - annots = read_annotations(mrk_fname, info['sfreq']) + annots = read_annotations( + mrk_fname, info["sfreq"], ignore_marker_types=ignore_marker_types + ) self.set_annotations(annots) # Drop the fake ahdr channel if needed @@ -118,40 +159,51 @@ def __init__(self, vhdr_fname, def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" # read data - n_data_ch = self._raw_extras[fi]['orig_nchan'] - fmt = self._raw_extras[fi]['fmt'] - if self._raw_extras[fi]['order'] == 'C': + n_data_ch = self._raw_extras[fi]["orig_nchan"] + fmt = self._raw_extras[fi]["fmt"] + if self._raw_extras[fi]["order"] == "C": _read_segments_c(self, data, idx, fi, start, stop, cals, mult) elif isinstance(fmt, str): dtype = _fmt_dtype_dict[fmt] - _read_segments_file(self, data, idx, fi, start, stop, cals, mult, - dtype=dtype, n_channels=n_data_ch) + _read_segments_file( + self, + data, + idx, + fi, + start, + stop, + cals, + mult, + dtype=dtype, + n_channels=n_data_ch, + ) else: - offsets = self._raw_extras[fi]['offsets'] - with open(self._filenames[fi], 'rb') as fid: + offsets = self._raw_extras[fi]["offsets"] + with open(self.filenames[fi], "rb") as fid: fid.seek(offsets[start]) block = np.empty((n_data_ch, stop - start)) for ii in range(stop - start): - line = fid.readline().decode('ASCII') + line = fid.readline().decode("ASCII") line = line.strip() - # Not sure why we special-handle the "," character here, - # but let's just keep this for historical and backward- - # compat reasons - if (isinstance(fmt, dict) and - 'decimalsymbol' in fmt and - fmt['decimalsymbol'] != '.'): - line = line.replace(',', '.') + # Not sure why we special-handle the "," character here, but let's + # just keep this for historical and backward- compat reasons + if ( + isinstance(fmt, dict) + and "decimalsymbol" in fmt + and fmt["decimalsymbol"] != "." + ): + line = line.replace(",", ".") - if ' ' in line: + if " " in line: line_data = line.split() - elif ',' in line: + elif "," in line: # likely exported from BrainVision Analyzer? - line_data = line.split(',') + line_data = line.split(",") else: raise RuntimeError( - 'Unknown BrainVision data format encountered. ' - 'Please contact the MNE-Python developers.' + "Unknown BrainVision data format encountered. " + "Please contact the MNE-Python developers." ) block[:n_data_ch, ii] = [float(part) for part in line_data] @@ -160,13 +212,13 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): def _read_segments_c(raw, data, idx, fi, start, stop, cals, mult): """Read chunk of vectorized raw data.""" - n_samples = raw._raw_extras[fi]['n_samples'] - fmt = raw._raw_extras[fi]['fmt'] + n_samples = raw._raw_extras[fi]["n_samples"] + fmt = raw._raw_extras[fi]["fmt"] dtype = _fmt_dtype_dict[fmt] n_bytes = _fmt_byte_dict[fmt] - n_channels = raw._raw_extras[fi]['orig_nchan'] + n_channels = raw._raw_extras[fi]["orig_nchan"] block = np.zeros((n_channels, stop - start)) - with open(raw._filenames[fi], 'rb', buffering=0) as fid: + with open(raw.filenames[fi], "rb", buffering=0) as fid: ids = np.arange(idx.start, idx.stop) if isinstance(idx, slice) else idx for ch_id in ids: fid.seek(start * n_bytes + ch_id * n_bytes * n_samples) @@ -184,185 +236,194 @@ def _read_mrk(fname): Returns ------- - onset : array, shape (n_annots,) + onset : list of float The onsets in seconds. - duration : array, shape (n_annots,) + duration : list of float The onsets in seconds. - description : array, shape (n_annots,) - The description of each annotation. + type_ : list of str + The marker types. + description : list of str + The marker descriptions. date_str : str - The recording time as a string. Defaults to empty string if no - recording time is found. + The recording time. Defaults to empty string if no recording time is found. """ # read marker file - with open(fname, 'rb') as fid: + with open(fname, "rb") as fid: txt = fid.read() - # we don't actually need to know the coding for the header line. - # the characters in it all belong to ASCII and are thus the - # same in Latin-1 and UTF-8 - header = txt.decode('ascii', 'ignore').split('\n')[0].strip() - _check_bv_version(header, 'marker') + # we don't actually need to know the encoding for the header line. the characters in + # it all belong to ASCII and are thus the same in Latin-1 and UTF-8 + header = txt.decode("ascii", "ignore").split("\n")[0].strip() + _check_bv_version(header, "marker") - # although the markers themselves are guaranteed to be ASCII (they - # consist of numbers and a few reserved words), we should still - # decode the file properly here because other (currently unused) - # blocks, such as that the filename are specifying are not - # guaranteed to be ASCII. + # although the markers themselves are guaranteed to be ASCII (they consist of + # numbers and a few reserved words), we should still decode the file properly here + # because other (currently unused) blocks are not guaranteed to be ASCII try: - # if there is an explicit codepage set, use it - # we pretend like it's ascii when searching for the codepage - cp_setting = re.search('Codepage=(.+)', - txt.decode('ascii', 'ignore'), - re.IGNORECASE & re.MULTILINE) - codepage = 'utf-8' + # if there is an explicit codepage set, use it; we pretend like it's ASCII when + # searching for the codepage + cp_setting = re.search( + "Codepage=(.+)", txt.decode("ascii", "ignore"), re.IGNORECASE & re.MULTILINE + ) + codepage = "utf-8" if cp_setting: codepage = cp_setting.group(1).strip() - # BrainAmp Recorder also uses ANSI codepage - # an ANSI codepage raises a LookupError exception - # python recognize ANSI decoding as cp1252 - if codepage == 'ANSI': - codepage = 'cp1252' + # BrainAmp Recorder also uses ANSI codepage; an ANSI codepage raises a + # LookupError exception; Python recognize ANSI decoding as cp1252 + if codepage == "ANSI": + codepage = "cp1252" txt = txt.decode(codepage) except UnicodeDecodeError: - # if UTF-8 (new standard) or explicit codepage setting fails, - # fallback to Latin-1, which is Windows default and implicit - # standard in older recordings - txt = txt.decode('latin-1') + # if UTF-8 (new standard) or explicit codepage setting fails, fallback to + # Latin-1, which is Windows default and implicit standard in older recordings + txt = txt.decode("latin-1") # extract Marker Infos block + onset, duration, type_, description = [], [], [], [] + date_str = "" + m = re.search(r"\[Marker Infos\]", txt, re.IGNORECASE) if not m: - return np.array(list()), np.array(list()), np.array(list()), '' + return onset, duration, type_, description, date_str - mk_txt = txt[m.end():] + mk_txt = txt[m.end() :] m = re.search(r"^\[.*\]$", mk_txt) if m: - mk_txt = mk_txt[:m.start()] + mk_txt = mk_txt[: m.start()] # extract event information items = re.findall(r"^Mk\d+=(.*)", mk_txt, re.MULTILINE) - onset, duration, description = list(), list(), list() - date_str = '' for info in items: - info_data = info.split(',') + info_data = info.split(",") mtype, mdesc, this_onset, this_duration = info_data[:4] - # commas in mtype and mdesc are handled as "\1". convert back to comma - mtype = mtype.replace(r'\1', ',') - mdesc = mdesc.replace(r'\1', ',') - if date_str == '' and len(info_data) == 5 and mtype == 'New Segment': - # to handle the origin of time and handle the presence of multiple - # New Segment annotations. We only keep the first one that is - # different from an empty string for date_str. + # commas in mtype and mdesc are handled as "\1", convert back to comma + mtype = mtype.replace(r"\1", ",") + mdesc = mdesc.replace(r"\1", ",") + if date_str == "" and len(info_data) == 5 and mtype == "New Segment": + # to handle the origin of time and handle the presence of multiple New + # Segment annotations, we only keep the first one that is different from an + # empty string for date_str date_str = info_data[-1] - this_duration = (int(this_duration) - if this_duration.isdigit() else 0) + this_duration = int(this_duration) if this_duration.isdigit() else 0 duration.append(this_duration) onset.append(int(this_onset) - 1) # BV is 1-indexed, not 0-indexed - description.append(mtype + '/' + mdesc) + type_.append(mtype) + description.append(mdesc) - return np.array(onset), np.array(duration), np.array(description), date_str + return onset, duration, type_, description, date_str -def _read_annotations_brainvision(fname, sfreq='auto'): +def _read_annotations_brainvision(fname, sfreq="auto", ignore_marker_types=False): """Create Annotations from BrainVision vmrk/amrk. - This function reads a .vmrk or .amrk file and makes an - :class:`mne.Annotations` object. + This function reads a .vmrk or .amrk file and creates an :class:`mne.Annotations` + object. Parameters ---------- fname : str | object The path to the .vmrk/.amrk file. sfreq : float | 'auto' - The sampling frequency in the file. It's necessary - as Annotations are expressed in seconds and vmrk/amrk - files are in samples. If set to 'auto' then - the sfreq is taken from the .vhdr/.ahdr file that - has the same name (without file extension). So - data.vmrk/amrk looks for sfreq in data.vhdr or, - if it does not exist, in data.ahdr. + The sampling frequency in the file. This is necessary because Annotations are + expressed in seconds and vmrk/amrk files are in samples. If set to 'auto' then + the sfreq is taken from the .vhdr/.ahdr file with the same name (without file + extension). So data.vmrk/amrk looks for sfreq in data.vhdr or, if it does not + exist, in data.ahdr. + ignore_marker_types : bool + If True, ignore marker types and only use marker descriptions. Default is False. Returns ------- annotations : instance of Annotations The annotations present in the file. """ - onset, duration, description, date_str = _read_mrk(fname) + onset, duration, type_, description, date_str = _read_mrk(fname) orig_time = _str_to_meas_date(date_str) - if sfreq == 'auto': - hdr_fname = op.splitext(fname)[0] + '.vhdr' + if sfreq == "auto": + hdr_fname = op.splitext(fname)[0] + ".vhdr" # if vhdr file does not exist assume that the format is ahdr if not op.exists(hdr_fname): - hdr_fname = op.splitext(fname)[0] + '.ahdr' - logger.info("Finding 'sfreq' from header file: %s" % hdr_fname) + hdr_fname = op.splitext(fname)[0] + ".ahdr" + logger.info(f"Finding 'sfreq' from header file: {hdr_fname}") _, _, _, info = _aux_hdr_info(hdr_fname) - sfreq = info['sfreq'] + sfreq = info["sfreq"] + + # skip the first "New Segment" marker (as it only contains the recording time) + if len(type_) > 0 and type_[0] == "New Segment": + onset = onset[1:] + duration = duration[1:] + type_ = type_[1:] + description = description[1:] onset = np.array(onset, dtype=float) / sfreq duration = np.array(duration, dtype=float) / sfreq - annotations = Annotations(onset=onset, duration=duration, - description=description, - orig_time=orig_time) + if not ignore_marker_types: + description = [f"{t}/{d}" for t, d in zip(type_, description)] + annotations = Annotations( + onset=onset, duration=duration, description=description, orig_time=orig_time + ) return annotations def _check_bv_version(header, kind): """Check the header version.""" - _data_err = """\ - MNE-Python currently only supports %s versions 1.0 and 2.0, got unparsable\ - %r. Contact MNE-Python developers for support.""" - # optional space, optional Core or V-Amp, optional Exchange, - # Version/Header, optional comma, 1/2 - _data_re = (r"Brain ?Vision( Core| V-Amp)? Data( Exchange)? " - r"%s File,? Version %s\.0") - - assert kind in ('header', 'marker') - - if header == '': - warn(f'Missing header in {kind} file.') + _data_err = ( + "MNE-Python currently only supports %s versions 1.0 and 2.0, got unparsable " + "%r. Contact MNE-Python developers for support." + ) + # optional space, optional Core or V-Amp, optional Exchange, Version/Header, + # optional comma, 1/2 + _data_re = r"Brain ?Vision( Core| V-Amp)? Data( Exchange)? %s File,? Version %s\.0" + + assert kind in ("header", "marker") + for version in range(1, 3): this_re = _data_re % (kind.capitalize(), version) if re.search(this_re, header) is not None: return version else: - warn(_data_err % (kind, header)) + if header == "": + warn(f"Missing header in {kind} file.") + else: + warn(_data_err % (kind, header)) -_orientation_dict = dict(MULTIPLEXED='F', VECTORIZED='C') -_fmt_dict = dict(INT_16='short', INT_32='int', IEEE_FLOAT_32='single') +_orientation_dict = dict(MULTIPLEXED="F", VECTORIZED="C") +_fmt_dict = dict(INT_16="short", INT_32="int", IEEE_FLOAT_32="single") _fmt_byte_dict = dict(short=2, int=4, single=4) -_fmt_dtype_dict = dict(short=' 0: misc += to_misc - warn('No coordinate information found for channels {}. ' - 'Setting channel types to misc. To avoid this warning, set ' - 'channel types explicitly.'.format(to_misc)) + warn( + f"No coordinate information found for channels {to_misc}. Setting " + "channel types to misc. To avoid this warning, set channel types " + "explicitly." + ) if np.isnan(cals).any(): - raise RuntimeError('Missing channel units') + raise RuntimeError("Missing channel units") - # Attempts to extract filtering info from header. If not found, both are - # set to zero. + # Attempts to extract filtering info from header. If not found, both are set to + # zero. settings = settings.splitlines() idx = None - if 'Channels' in settings: - idx = settings.index('Channels') - settings = settings[idx + 1:] + if "Channels" in settings: + idx = settings.index("Channels") + settings = settings[idx + 1 :] hp_col, lp_col = 4, 5 for idx, setting in enumerate(settings): - if re.match(r'#\s+Name', setting): + if re.match(r"#\s+Name", setting): break else: idx = None - # If software filters are active, then they override the hardware setup - # But we still want to be able to double check the channel names - # for alignment purposes, we keep track of the hardware setting idx + # If software filters are active, then they override the hardware setup; we still + # want to be able to double check the channel names for alignment purposes, we keep + # track of the hardware setting idx idx_amp = idx filter_list_has_ch_name = True - if 'S o f t w a r e F i l t e r s' in settings: - idx = settings.index('S o f t w a r e F i l t e r s') - for idx, setting in enumerate(settings[idx + 1:], idx + 1): - if re.match(r'#\s+Low Cutoff', setting): + if "S o f t w a r e F i l t e r s" in settings: + idx = settings.index("S o f t w a r e F i l t e r s") + for idx, setting in enumerate(settings[idx + 1 :], idx + 1): + if re.match(r"#\s+Low Cutoff", setting): hp_col, lp_col = 1, 2 filter_list_has_ch_name = False - warn('Online software filter detected. Using software ' - 'filter settings and ignoring hardware values') + warn( + "Online software filter detected. Using software filter settings " + "and ignoring hardware values" + ) break else: idx = idx_amp @@ -661,27 +724,26 @@ def _get_hdr_info(hdr_fname, eog, misc, scale): lowpass = [] highpass = [] - # for newer BV files, the unit is specified for every channel - # separated by a single space, while for older files, the unit is - # specified in the column headers - divider = r'\s+' - if 'Resolution / Unit' in settings[idx]: + # for newer BV files, the unit is specified for every channel separated by a + # single space, while for older files, the unit is specified in the column + # headers + divider = r"\s+" + if "Resolution / Unit" in settings[idx]: shift = 1 # shift for unit else: shift = 0 - # Extract filter units and convert from seconds to Hz if necessary. - # this cannot be done as post-processing as the inverse t-f - # relationship means that the min/max comparisons don't make sense - # unless we know the units. + # Extract filter units and convert from seconds to Hz if necessary. this cannot + # be done as post-processing as the inverse t-f relationship means that the + # min/max comparisons don't make sense unless we know the units. # # For reasoning about the s to Hz conversion, see this reference: # `Ebersole, J. S., & Pedley, T. A. (Eds.). (2003). # Current practice of clinical electroencephalography. # Lippincott Williams & Wilkins.`, page 40-41 - header = re.split(r'\s\s+', settings[idx]) - hp_s = '[s]' in header[hp_col] - lp_s = '[s]' in header[lp_col] + header = re.split(r"\s\s+", settings[idx]) + hp_s = "[s]" in header[hp_col] + lp_s = "[s]" in header[lp_col] for i, ch in enumerate(ch_names, 1): if ahdr_format and i == len(ch_names) and ch == _AHDR_CHANNEL_NAME: @@ -708,116 +770,127 @@ def _get_hdr_info(hdr_fname, eog, misc, scale): if len(highpass) == 0: pass elif len(set(highpass)) == 1: - if highpass[0] in ('NaN', 'Off'): + if highpass[0] in ("NaN", "Off"): pass # Placeholder for future use. Highpass set in _empty_info - elif highpass[0] == 'DC': - info['highpass'] = 0. + elif highpass[0] == "DC": + info["highpass"] = 0.0 else: - info['highpass'] = float(highpass[0]) + info["highpass"] = float(highpass[0]) if hp_s: # filter time constant t [secs] to Hz conversion: 1/2*pi*t - info['highpass'] = 1. / (2 * np.pi * info['highpass']) + info["highpass"] = 1.0 / (2 * np.pi * info["highpass"]) else: heterogeneous_hp_filter = True if hp_s: - # We convert channels with disabled filters to having - # highpass relaxed / no filters - highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') - else np.Inf for filt in highpass] - info['highpass'] = np.max(np.array(highpass, dtype=np.float64)) - # Coveniently enough 1 / np.Inf = 0.0, so this works for - # DC / no highpass filter + # We convert channels with disabled filters to having highpass relaxed / + # no filters + highpass = [ + float(filt) if filt not in ("NaN", "Off", "DC") else np.inf + for filt in highpass + ] + info["highpass"] = np.max(np.array(highpass, dtype=np.float64)) + # Conveniently enough 1 / np.inf = 0.0, so this works for DC / no + # highpass filter # filter time constant t [secs] to Hz conversion: 1/2*pi*t - info['highpass'] = 1. / (2 * np.pi * info['highpass']) + info["highpass"] = 1.0 / (2 * np.pi * info["highpass"]) - # not exactly the cleanest use of FP, but this makes us - # more conservative in *not* warning. - if info['highpass'] == 0.0 and len(set(highpass)) == 1: + # not exactly the cleanest use of FP, but this makes us more + # conservative in *not* warning. + if info["highpass"] == 0.0 and len(set(highpass)) == 1: # not actually heterogeneous in effect # ... just heterogeneously disabled heterogeneous_hp_filter = False else: - highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') - else 0.0 for filt in highpass] - info['highpass'] = np.min(np.array(highpass, dtype=np.float64)) - if info['highpass'] == 0.0 and len(set(highpass)) == 1: + highpass = [ + float(filt) if filt not in ("NaN", "Off", "DC") else 0.0 + for filt in highpass + ] + info["highpass"] = np.min(np.array(highpass, dtype=np.float64)) + if info["highpass"] == 0.0 and len(set(highpass)) == 1: # not actually heterogeneous in effect # ... just heterogeneously disabled heterogeneous_hp_filter = False if heterogeneous_hp_filter: - warn('Channels contain different highpass filters. ' - 'Lowest (weakest) filter setting (%0.2f Hz) ' - 'will be stored.' % info['highpass']) + warn( + "Channels contain different highpass filters. Lowest (weakest) " + f"filter setting ({info['highpass']:0.2f} Hz) will be stored." + ) if len(lowpass) == 0: pass elif len(set(lowpass)) == 1: - if lowpass[0] in ('NaN', 'Off', '0'): + if lowpass[0] in ("NaN", "Off", "0"): pass # Placeholder for future use. Lowpass set in _empty_info else: - info['lowpass'] = float(lowpass[0]) + info["lowpass"] = float(lowpass[0]) if lp_s: # filter time constant t [secs] to Hz conversion: 1/2*pi*t - info['lowpass'] = 1. / (2 * np.pi * info['lowpass']) + info["lowpass"] = 1.0 / (2 * np.pi * info["lowpass"]) else: heterogeneous_lp_filter = True if lp_s: - # We convert channels with disabled filters to having - # infinitely relaxed / no filters - lowpass = [float(filt) if filt not in ('NaN', 'Off', '0') - else 0.0 for filt in lowpass] - info['lowpass'] = np.min(np.array(lowpass, dtype=np.float64)) + # We convert channels with disabled filters to having infinitely relaxed + # / no filters + lowpass = [ + float(filt) if filt not in ("NaN", "Off", "0") else 0.0 + for filt in lowpass + ] + info["lowpass"] = np.min(np.array(lowpass, dtype=np.float64)) try: # filter time constant t [secs] to Hz conversion: 1/2*pi*t - info['lowpass'] = 1. / (2 * np.pi * info['lowpass']) + info["lowpass"] = 1.0 / (2 * np.pi * info["lowpass"]) except ZeroDivisionError: if len(set(lowpass)) == 1: - # No lowpass actually set for the weakest setting - # so we set lowpass to the Nyquist frequency - info['lowpass'] = info['sfreq'] / 2. + # No lowpass actually set for the weakest setting so we set + # lowpass to the Nyquist frequency + info["lowpass"] = info["sfreq"] / 2.0 # not actually heterogeneous in effect # ... just heterogeneously disabled heterogeneous_lp_filter = False else: - # no lowpass filter is the weakest filter, - # but it wasn't the only filter + # no lowpass filter is the weakest filter, but it wasn't the + # only filter pass else: - # We convert channels with disabled filters to having - # infinitely relaxed / no filters - lowpass = [float(filt) if filt not in ('NaN', 'Off', '0') - else np.Inf for filt in lowpass] - info['lowpass'] = np.max(np.array(lowpass, dtype=np.float64)) - - if np.isinf(info['lowpass']): - # No lowpass actually set for the weakest setting - # so we set lowpass to the Nyquist frequency - info['lowpass'] = info['sfreq'] / 2. + # We convert channels with disabled filters to having infinitely relaxed + # / no filters + lowpass = [ + float(filt) if filt not in ("NaN", "Off", "0") else np.inf + for filt in lowpass + ] + info["lowpass"] = np.max(np.array(lowpass, dtype=np.float64)) + + if np.isinf(info["lowpass"]): + # No lowpass actually set for the weakest setting so we set lowpass + # to the Nyquist frequency + info["lowpass"] = info["sfreq"] / 2.0 if len(set(lowpass)) == 1: # not actually heterogeneous in effect # ... just heterogeneously disabled heterogeneous_lp_filter = False if heterogeneous_lp_filter: - # this isn't clean FP, but then again, we only want to provide - # the Nyquist hint when the lowpass filter was actually - # calculated from dividing the sampling frequency by 2, so the - # exact/direct comparison (instead of tolerance) makes sense - if info['lowpass'] == info['sfreq'] / 2.0: - nyquist = ', Nyquist limit' + # this isn't clean FP, but then again, we only want to provide the + # Nyquist hint when the lowpass filter was actually calculated from + # dividing the sampling frequency by 2, so the exact/direct comparison + # (instead of tolerance) makes sense + if info["lowpass"] == info["sfreq"] / 2.0: + nyquist = ", Nyquist limit" else: nyquist = "" - warn('Channels contain different lowpass filters. ' - 'Highest (weakest) filter setting (%0.2f Hz%s) ' - 'will be stored.' % (info['lowpass'], nyquist)) + warn( + "Channels contain different lowpass filters. " + f"Highest (weakest) filter setting ({info['lowpass']:0.2f} " + f"Hz{nyquist}) will be stored." + ) # Creates a list of dicts of eeg channels for raw.info - logger.info('Setting channel info structure...') - info['chs'] = [] + logger.info("Setting channel info structure...") + info["chs"] = [] for idx, ch_name in enumerate(ch_names): if ch_name in eog or idx in eog or idx - nchan in eog: kind = FIFF.FIFFV_EOG_CH @@ -830,7 +903,7 @@ def _get_hdr_info(hdr_fname, eog, misc, scale): unit = misc_chs[ch_name] else: unit = FIFF.FIFF_UNIT_NONE - elif ch_name == 'STI 014': + elif ch_name == "STI 014": kind = FIFF.FIFFV_STIM_CH coil_type = FIFF.FIFFV_COIL_NONE unit = FIFF.FIFF_UNIT_NONE @@ -838,64 +911,109 @@ def _get_hdr_info(hdr_fname, eog, misc, scale): kind = FIFF.FIFFV_EEG_CH coil_type = FIFF.FIFFV_COIL_EEG unit = FIFF.FIFF_UNIT_V - info['chs'].append(dict( - ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1, - scanno=idx + 1, cal=cals[idx], range=ranges[idx], - loc=np.full(12, np.nan), - unit=unit, unit_mul=FIFF.FIFF_UNITM_NONE, - coord_frame=FIFF.FIFFV_COORD_HEAD)) + info["chs"].append( + dict( + ch_name=ch_name, + coil_type=coil_type, + kind=kind, + logno=idx + 1, + scanno=idx + 1, + cal=cals[idx], + range=ranges[idx], + loc=np.full(12, np.nan), + unit=unit, + unit_mul=FIFF.FIFF_UNITM_NONE, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) info._unlocked = False info._update_redundant() - return (info, data_fname, fmt, order, n_samples, mrk_fname, montage, - orig_units) + return (info, data_fname, fmt, order, n_samples, mrk_fname, montage, orig_units) @fill_doc -def read_raw_brainvision(vhdr_fname, - eog=('HEOGL', 'HEOGR', 'VEOGb'), misc='auto', - scale=1., preload=False, verbose=None): +def read_raw_brainvision( + vhdr_fname, + eog=("HEOGL", "HEOGR", "VEOGb"), + misc="auto", + scale=1.0, + ignore_marker_types=False, + preload=False, + verbose=None, +) -> RawBrainVision: """Reader for Brain Vision EEG file. Parameters ---------- vhdr_fname : path-like Path to the EEG header file. - eog : list or tuple of str - Names of channels or list of indices that should be designated - EOG channels. Values should correspond to the header file - Default is ``('HEOGL', 'HEOGR', 'VEOGb')``. - misc : list or tuple of str | ``'auto'`` - Names of channels or list of indices that should be designated - MISC channels. Values should correspond to the electrodes in the - header file. If ``'auto'``, units in header file are used for inferring - misc channels. Default is ``'auto'``. + eog : list of (int | str) | tuple of (int | str) + Names of channels or list of indices that should be designated EOG channels. + Values should correspond to the header file Default is ``('HEOGL', 'HEOGR', + 'VEOGb')``. + misc : list of (int | str) | tuple of (int | str) | ``'auto'`` + Names of channels or list of indices that should be designated MISC channels. + Values should correspond to the electrodes in the header file. If ``'auto'``, + units in header file are used for inferring misc channels. Default is + ``'auto'``. scale : float - The scaling factor for EEG data. Unless specified otherwise by - header file, units are in microvolts. Default scale factor is 1. + The scaling factor for EEG data. Unless specified otherwise by header file, + units are in microvolts. Default scale factor is 1. + ignore_marker_types : bool + If ``True``, ignore marker types and only use marker descriptions. Default is + ``False``. + + .. versionadded:: 1.8 %(preload)s %(verbose)s Returns ------- raw : instance of RawBrainVision - A Raw object containing BrainVision data. - See :class:`mne.io.Raw` for documentation of attributes and methods. + A Raw object containing BrainVision data. See :class:`mne.io.Raw` for + documentation of attributes and methods. See Also -------- mne.io.Raw : Documentation of attributes and methods of RawBrainVision. - """ - return RawBrainVision(vhdr_fname=vhdr_fname, eog=eog, - misc=misc, scale=scale, preload=preload, - verbose=verbose) - -_BV_EVENT_IO_OFFSETS = {'Event/': 0, 'Stimulus/S': 0, 'Response/R': 1000, - 'Optic/O': 2000} -_OTHER_ACCEPTED_MARKERS = { - 'New Segment/': 99999, 'SyncStatus/Sync On': 99998 + Notes + ----- + If the BrainVision header file contains impedance measurements, these may be + accessed using ``raw.impedances`` after reading using this function. However, this + attribute will NOT be available after a save and re-load of the data. That is, it is + only available when reading data directly from the BrainVision header file. + + BrainVision markers consist of a type and a description (in addition to other fields + like onset and duration). In contrast, annotations in MNE only have a description. + Therefore, a BrainVision marker of type "Stimulus" and description "S 1" will be + converted to an annotation "Stimulus/S 1" by default. If you want to ignore the + type and instead only use the description, set ``ignore_marker_types=True``, which + will convert the same marker to an annotation "S 1". + + The first marker in a BrainVision file is usually a "New Segment" marker, which + contains the recording time. This time is stored in the ``info['meas_date']`` + attribute of the returned object and is not converted to an annotation. + """ + return RawBrainVision( + vhdr_fname=vhdr_fname, + eog=eog, + misc=misc, + scale=scale, + ignore_marker_types=ignore_marker_types, + preload=preload, + verbose=verbose, + ) + + +_BV_EVENT_IO_OFFSETS = { + "Event/": 0, + "Stimulus/S": 0, + "Response/R": 1000, + "Optic/O": 2000, } +_OTHER_ACCEPTED_MARKERS = {"New Segment/": 99999, "SyncStatus/Sync On": 99998} _OTHER_OFFSET = 10001 # where to start "unknown" event_ids _AHDR_CHANNEL_NAME = "AHDR_CHANNEL" @@ -914,15 +1032,15 @@ def __call__(self, description): elif description in _OTHER_ACCEPTED_MARKERS: code = _OTHER_ACCEPTED_MARKERS[description] else: - code = (super(_BVEventParser, self) - .__call__(description, offset=_OTHER_OFFSET)) + code = super().__call__(description, offset=_OTHER_OFFSET) return code def _check_bv_annot(descriptions): - markers_basename = set([dd.rstrip('0123456789 ') for dd in descriptions]) - bv_markers = (set(_BV_EVENT_IO_OFFSETS.keys()) - .union(set(_OTHER_ACCEPTED_MARKERS.keys()))) + markers_basename = set([dd.rstrip("0123456789 ") for dd in descriptions]) + bv_markers = set(_BV_EVENT_IO_OFFSETS.keys()).union( + set(_OTHER_ACCEPTED_MARKERS.keys()) + ) return len(markers_basename - bv_markers) == 0 @@ -942,46 +1060,47 @@ def _parse_impedance(settings, recording_date=None): A dictionary of all electrodes and their impedances. """ ranges = _parse_impedance_ranges(settings) - impedance_setting_lines = [i for i in settings if - i.startswith('Impedance [') and - i.endswith(' :')] + impedance_setting_lines = [ + i for i in settings if i.startswith("Impedance [") and i.endswith(" :") + ] impedances = dict() if len(impedance_setting_lines) > 0: idx = settings.index(impedance_setting_lines[0]) impedance_setting = impedance_setting_lines[0].split() - impedance_unit = impedance_setting[1].lstrip('[').rstrip(']') + impedance_unit = impedance_setting[1].lstrip("[").rstrip("]") impedance_time = None - # If we have a recording date, we can update it with the time of - # impedance measurement + # If we have a recording date, we can update it with the time of impedance + # measurement if recording_date is not None: - meas_time = [int(i) for i in impedance_setting[3].split(':')] - impedance_time = recording_date.replace(hour=meas_time[0], - minute=meas_time[1], - second=meas_time[2], - microsecond=0) - for setting in settings[idx + 1:]: - # Parse channel impedances until we find a line that doesn't start - # with a channel name and optional +/- polarity for passive elecs - match = re.match(r'[ a-zA-Z0-9_+-]+:', setting) + meas_time = [int(i) for i in impedance_setting[3].split(":")] + impedance_time = recording_date.replace( + hour=meas_time[0], + minute=meas_time[1], + second=meas_time[2], + microsecond=0, + ) + for setting in settings[idx + 1 :]: + # Parse channel impedances until we find a line that doesn't start with a + # channel name and optional +/- polarity for passive elecs + match = re.match(r"[ a-zA-Z0-9_+-]+:", setting) if match: - channel_name = match.group().rstrip(':') + channel_name = match.group().rstrip(":") channel_imp_line = setting.split() - imp_as_number = re.findall(r"[-+]?\d*\.\d+|\d+", - channel_imp_line[-1]) + imp_as_number = re.findall(r"[-+]?\d*\.\d+|\d+", channel_imp_line[-1]) channel_impedance = dict( imp=float(imp_as_number[0]) if imp_as_number else np.nan, imp_unit=impedance_unit, ) if impedance_time is not None: - channel_impedance.update({'imp_meas_time': impedance_time}) - - if channel_name == 'Ref' and 'Reference' in ranges: - channel_impedance.update(ranges['Reference']) - elif channel_name == 'Gnd' and 'Ground' in ranges: - channel_impedance.update(ranges['Ground']) - elif 'Data' in ranges: - channel_impedance.update(ranges['Data']) + channel_impedance.update({"imp_meas_time": impedance_time}) + + if channel_name == "Ref" and "Reference" in ranges: + channel_impedance.update(ranges["Reference"]) + elif channel_name == "Gnd" and "Ground" in ranges: + channel_impedance.update(ranges["Ground"]) + elif "Data" in ranges: + channel_impedance.update(ranges["Data"]) impedances[channel_name] = channel_impedance else: break @@ -1001,17 +1120,18 @@ def _parse_impedance_ranges(settings): electrode_imp_ranges : dict A dictionary of impedance ranges for each type of electrode. """ - impedance_ranges = [item for item in settings if - "Selected Impedance Measurement Range" in item] + impedance_ranges = [ + item for item in settings if "Selected Impedance Measurement Range" in item + ] electrode_imp_ranges = dict() if impedance_ranges: if len(impedance_ranges) == 1: img_range = impedance_ranges[0].split() - for electrode_type in ['Data', 'Reference', 'Ground']: + for electrode_type in ["Data", "Reference", "Ground"]: electrode_imp_ranges[electrode_type] = { "imp_lower_bound": float(img_range[-4]), "imp_upper_bound": float(img_range[-2]), - "imp_range_unit": img_range[-1] + "imp_range_unit": img_range[-1], } else: for electrode_range in impedance_ranges: @@ -1019,6 +1139,6 @@ def _parse_impedance_ranges(settings): electrode_imp_ranges[electrode_range[0]] = { "imp_lower_bound": float(electrode_range[6]), "imp_upper_bound": float(electrode_range[8]), - "imp_range_unit": electrode_range[9] + "imp_range_unit": electrode_range[9], } return electrode_imp_ranges diff --git a/mne/io/brainvision/tests/__init__.py b/mne/io/brainvision/tests/__init__.py index 8b137891791..04c673b4e4d 100644 --- a/mne/io/brainvision/tests/__init__.py +++ b/mne/io/brainvision/tests/__init__.py @@ -1 +1,3 @@ - +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/brainvision/tests/test_brainvision.py b/mne/io/brainvision/tests/test_brainvision.py index 64477bbea89..8366f15dc3a 100644 --- a/mne/io/brainvision/tests/test_brainvision.py +++ b/mne/io/brainvision/tests/test_brainvision.py @@ -1,27 +1,25 @@ -# -*- coding: utf-8 -*- """Test reading of BrainVision format.""" -# Author: Teon Brooks -# Stefan Appelhoff -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. +import datetime import re import shutil from pathlib import Path import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_allclose, assert_equal) import pytest +from numpy.testing import assert_allclose, assert_array_equal -import datetime -from mne.utils import _stamp_to_dt, object_diff -from mne import pick_types, read_annotations, concatenate_raws -from mne.io.constants import FIFF -from mne.io import read_raw_fif, read_raw_brainvision -from mne.io.tests.test_raw import _test_raw_reader -from mne.datasets import testing +from mne import concatenate_raws, pick_types, read_annotations +from mne._fiff.constants import FIFF from mne.annotations import events_from_annotations +from mne.datasets import testing +from mne.io import read_raw_brainvision, read_raw_fif +from mne.io.tests.test_raw import _test_raw_reader +from mne.utils import _record_warnings, _stamp_to_dt, object_diff data_dir = Path(__file__).parent / "data" vhdr_path = data_dir / "test.vhdr" @@ -60,9 +58,6 @@ # Test for nanovolts as unit vhdr_units_path = data_dir / "test_units.vhdr" -# Test bad date -vhdr_bad_date = data_dir / "test_bad_date.vhdr" - eeg_bin = data_dir / "test_bin_raw.fif" eog = ["HL", "HR", "Vb"] @@ -73,86 +68,96 @@ # This should be amend in its own PR. montage = data_dir / "test.hpts" +_no_dig = pytest.warns(RuntimeWarning, match="No info on DataPoints") + def test_orig_units(recwarn): """Test exposure of original channel units.""" raw = read_raw_brainvision(vhdr_path) orig_units = raw._orig_units assert len(orig_units) == 32 - assert orig_units['FP1'] == 'µV' + assert orig_units["FP1"] == "µV" # no unit specified in the vhdr, ensure we default to µV here - assert orig_units['FP2'] == 'µV' - assert orig_units['F3'] == 'µV' - - sum([v == 'µV' for v in orig_units.values()]) == 26 - - assert orig_units['CP5'] == 'n/a' # originally BS, not a valid unit - assert orig_units['CP6'] == 'µS' - assert orig_units['HL'] == 'n/a' # originally ARU, not a valid unit - assert orig_units['HR'] == 'n/a' # originally uS ... - assert orig_units['Vb'] == 'S' - assert orig_units['ReRef'] == 'C' - - -DATE_TEST_CASES = np.array([ - ('Mk1=New Segment,,1,1,0,20131113161403794232\n', # content - [1384359243, 794232], # meas_date internal representation - '2013-11-13 16:14:03 UTC'), # meas_date representation - - (('Mk1=New Segment,,1,1,0,20070716122240937454\n' - 'Mk2=New Segment,,2,1,0,20070716122240937455\n'), - [1184588560, 937454], - '2007-07-16 12:22:40 UTC'), - - ('Mk1=New Segment,,1,1,0,\nMk2=New Segment,,2,1,0,20070716122240937454\n', - [1184588560, 937454], - '2007-07-16 12:22:40 UTC'), - - ('Mk1=STATUS,,1,1,0\n', None, 'unspecified'), - ('Mk1=New Segment,,1,1,0,\n', None, 'unspecified'), - ('Mk1=New Segment,,1,1,0\n', None, 'unspecified'), - ('Mk1=New Segment,,1,1,0,00000000000304125000', None, 'unspecified'), - -], dtype=np.dtype({ - 'names': ['content', 'meas_date', 'meas_date_repr'], - 'formats': [object, object, 'U22'] -})) + assert orig_units["FP2"] == "µV" + assert orig_units["F3"] == "µV" + + sum([v == "µV" for v in orig_units.values()]) == 26 + + assert orig_units["CP5"] == "n/a" # originally BS, not a valid unit + assert orig_units["CP6"] == "µS" + assert orig_units["HL"] == "n/a" # originally ARU, not a valid unit + assert orig_units["HR"] == "n/a" # originally uS ... + assert orig_units["Vb"] == "S" + assert orig_units["ReRef"] == "C" + + +DATE_TEST_CASES = np.array( + [ + ( + "Mk1=New Segment,,1,1,0,20131113161403794232\n", # content + [1384359243, 794232], # meas_date internal representation + "2013-11-13 16:14:03 UTC", + ), # meas_date representation + ( + ( + "Mk1=New Segment,,1,1,0,20070716122240937454\n" + "Mk2=New Segment,,2,1,0,20070716122240937455\n" + ), + [1184588560, 937454], + "2007-07-16 12:22:40 UTC", + ), + ( + "Mk1=New Segment,,1,1,0,\nMk2=New Segment,,2,1,0,20070716122240937454\n", + [1184588560, 937454], + "2007-07-16 12:22:40 UTC", + ), + ("Mk1=STATUS,,1,1,0\n", None, "unspecified"), + ("Mk1=New Segment,,1,1,0,\n", None, "unspecified"), + ("Mk1=New Segment,,1,1,0\n", None, "unspecified"), + ("Mk1=New Segment,,1,1,0,00000000000304125000", None, "unspecified"), + ], + dtype=np.dtype( + { + "names": ["content", "meas_date", "meas_date_repr"], + "formats": [object, object, "U22"], + } + ), +) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _mocked_meas_date_data(tmp_path_factory): """Prepare files for mocked_meas_date_file fixture.""" # Prepare the files tmp_path = tmp_path_factory.mktemp("brainvision_mocked_meas_date") - vhdr_fname, vmrk_fname, eeg_fname = [ - tmp_path / ff.name - for ff in [vhdr_path, vmrk_path, eeg_path] - ] + vhdr_fname, vmrk_fname, eeg_fname = ( + tmp_path / ff.name for ff in [vhdr_path, vmrk_path, eeg_path] + ) for orig, dest in zip([vhdr_path, eeg_path], [vhdr_fname, eeg_fname]): shutil.copyfile(orig, dest) # Get the marker information - with open(vmrk_path, 'r') as fin: + with open(vmrk_path) as fin: lines = fin.readlines() return vhdr_fname, vmrk_fname, lines -@pytest.fixture(scope='session', params=[tt for tt in DATE_TEST_CASES]) +@pytest.fixture(scope="session", params=[tt for tt in DATE_TEST_CASES]) def mocked_meas_date_file(_mocked_meas_date_data, request): """Prepare a generator for use in test_meas_date.""" MEAS_DATE_LINE = 11 # see test.vmrk file vhdr_fname, vmrk_fname, lines = _mocked_meas_date_data - lines[MEAS_DATE_LINE] = request.param['content'] - with open(vmrk_fname, 'w') as fout: + lines[MEAS_DATE_LINE] = request.param["content"] + with open(vmrk_fname, "w") as fout: fout.writelines(lines) - meas_date = request.param['meas_date'] + meas_date = request.param["meas_date"] if meas_date is not None: meas_date = _stamp_to_dt(meas_date) - yield vhdr_fname, meas_date, request.param['meas_date_repr'] + yield vhdr_fname, meas_date, request.param["meas_date_repr"] def test_meas_date(mocked_meas_date_file): @@ -161,9 +166,9 @@ def test_meas_date(mocked_meas_date_file): raw = read_raw_brainvision(vhdr_f) assert expected_meas_repr in repr(raw.info) if expected_meas is None: - assert raw.info['meas_date'] is None + assert raw.info["meas_date"] is None else: - assert raw.info['meas_date'] == expected_meas + assert raw.info["meas_date"] == expected_meas def test_vhdr_codepage_ansi(tmp_path): @@ -176,39 +181,42 @@ def test_vhdr_codepage_ansi(tmp_path): # copy data file shutil.copy(eeg_path, ansi_eeg_path) # modify header file - with open(ansi_vhdr_path, 'wb') as fout: - with open(vhdr_path, 'rb') as fin: + with open(ansi_vhdr_path, "wb") as fout: + with open(vhdr_path, "rb") as fin: for line in fin: # Common Infos section - if line.startswith(b'Codepage'): - line = b'Codepage=ANSI\n' + if line.startswith(b"Codepage"): + line = b"Codepage=ANSI\n" fout.write(line) # modify marker file - with open(ansi_vmrk_path, 'wb') as fout: - with open(vmrk_path, 'rb') as fin: + with open(ansi_vmrk_path, "wb") as fout: + with open(vmrk_path, "rb") as fin: for line in fin: # Common Infos section - if line.startswith(b'Codepage'): - line = b'Codepage=ANSI\n' + if line.startswith(b"Codepage"): + line = b"Codepage=ANSI\n" fout.write(line) raw = read_raw_brainvision(ansi_vhdr_path) data_new, times_new = raw[:] - assert_equal(raw_init.ch_names, raw.ch_names) + assert raw_init.ch_names == raw.ch_names assert_allclose(data_new, data_expected, atol=1e-15) assert_allclose(times_new, times_expected, atol=1e-15) -@pytest.mark.parametrize('header', [ - b'BrainVision Data Exchange %s File Version 1.0\n', - # 2.0, space, core, comma - b'Brain Vision Core Data Exchange %s File, Version 2.0\n', - # unsupported version - b'Brain Vision Core Data Exchange %s File, Version 3.0\n', - # missing header - b'\n', -]) +@pytest.mark.parametrize( + "header", + [ + b"BrainVision Data Exchange %s File Version 1.0\n", + # 2.0, space, core, comma + b"Brain Vision Core Data Exchange %s File, Version 2.0\n", + # unsupported version + b"Brain Vision Core Data Exchange %s File, Version 3.0\n", + # missing header + b"\n", + ], +) def test_vhdr_versions(tmp_path, header): """Test BV reading with different header variants.""" raw_init = read_raw_brainvision(vhdr_path) @@ -217,33 +225,33 @@ def test_vhdr_versions(tmp_path, header): use_vmrk_path = tmp_path / vmrk_path.name use_eeg_path = tmp_path / eeg_path.name shutil.copy(eeg_path, use_eeg_path) - with open(use_vhdr_path, 'wb') as fout: - with open(vhdr_path, 'rb') as fin: + with open(use_vhdr_path, "wb") as fout: + with open(vhdr_path, "rb") as fin: for line in fin: # Common Infos section - if line.startswith(b'Brain'): - if header != b'\n': - line = header % b'Header' + if line.startswith(b"Brain"): + if header != b"\n": + line = header % b"Header" else: line = header fout.write(line) - with open(use_vmrk_path, 'wb') as fout: - with open(vmrk_path, 'rb') as fin: + with open(use_vmrk_path, "wb") as fout: + with open(vmrk_path, "rb") as fin: for line in fin: # Common Infos section - if line.startswith(b'Brain'): - if header != b'\n': - line = header % b'Marker' + if line.startswith(b"Brain"): + if header != b"\n": + line = header % b"Marker" else: line = header fout.write(line) - if (b'3.0' in header): # unsupported version - with pytest.warns(RuntimeWarning, match=r'3\.0.*Contact MNE-Python'): + if b"3.0" in header: # unsupported version + with pytest.warns(RuntimeWarning, match=r"3\.0.*Contact MNE-Python"): read_raw_brainvision(use_vhdr_path) return - elif header == b'\n': # no version header - with pytest.warns(RuntimeWarning, match='Missing header'): + elif header == b"\n": # no version header + with pytest.warns(RuntimeWarning, match="Missing header"): read_raw_brainvision(use_vhdr_path) return else: @@ -252,7 +260,7 @@ def test_vhdr_versions(tmp_path, header): assert_allclose(data_new, data_expected, atol=1e-15) -@pytest.mark.parametrize('data_sep', (b' ', b',', b'+')) +@pytest.mark.parametrize("data_sep", (b" ", b",", b"+")) def test_ascii(tmp_path, data_sep): """Test ASCII BV reading.""" raw = read_raw_brainvision(vhdr_path) @@ -264,33 +272,37 @@ def test_ascii(tmp_path, data_sep): ) # modify header file skipping = False - with open(ascii_vhdr_path, 'wb') as fout: - with open(vhdr_path, 'rb') as fin: + with open(ascii_vhdr_path, "wb") as fout: + with open(vhdr_path, "rb") as fin: for line in fin: # Common Infos section - if line.startswith(b'DataFormat'): - line = b'DataFormat=ASCII\n' - elif line.startswith(b'DataFile='): - line = b'DataFile=test.dat\n' + if line.startswith(b"DataFormat"): + line = b"DataFormat=ASCII\n" + elif line.startswith(b"DataFile="): + line = b"DataFile=test.dat\n" # Replace the "'Binary Infos'" section - elif line.startswith(b'[Binary Infos]'): + elif line.startswith(b"[Binary Infos]"): skipping = True - fout.write(b'[ASCII Infos]\nDecimalSymbol=.\nSkipLines=1\n' - b'SkipColumns=0\n\n') - elif skipping and line.startswith(b'['): + fout.write( + b"[ASCII Infos]\nDecimalSymbol=.\nSkipLines=1\n" + b"SkipColumns=0\n\n" + ) + elif skipping and line.startswith(b"["): skipping = False if not skipping: fout.write(line) # create the .dat file data, times = raw[:] with open(ascii_vhdr_path.with_suffix(".dat"), "wb") as fid: - fid.write(data_sep.join(ch_name.encode('ASCII') - for ch_name in raw.ch_names) + b'\n') - fid.write(b'\n'.join(b' '.join(b'%.3f' % dd for dd in d) - for d in data.T / raw._cals)) + fid.write( + data_sep.join(ch_name.encode("ASCII") for ch_name in raw.ch_names) + b"\n" + ) + fid.write( + b"\n".join(b" ".join(b"%.3f" % dd for dd in d) for d in data.T / raw._cals) + ) - if data_sep == b';': - with pytest.raises(RuntimeError, match='Unknown.*data format'): + if data_sep == b";": + with pytest.raises(RuntimeError, match="Unknown.*data format"): read_raw_brainvision(ascii_vhdr_path) return @@ -311,12 +323,13 @@ def test_ch_names_comma(tmp_path): # Copy existing vhdr file to tmp_path and manipulate to contain # a channel with comma - for src, dest in zip((vhdr_path, vmrk_path, eeg_path), - ('test.vhdr', 'test.vmrk', 'test.eeg')): + for src, dest in zip( + (vhdr_path, vmrk_path, eeg_path), ("test.vhdr", "test.vmrk", "test.eeg") + ): shutil.copyfile(src, tmp_path / dest) - comma_vhdr = tmp_path / 'test.vhdr' - with open(comma_vhdr, 'r') as fin: + comma_vhdr = tmp_path / "test.vhdr" + with open(comma_vhdr) as fin: lines = fin.readlines() new_lines = [] @@ -333,7 +346,7 @@ def test_ch_names_comma(tmp_path): new_lines.append(line) assert nperformed_replacements == len(replace_dict) - with open(comma_vhdr, 'w') as fout: + with open(comma_vhdr, "w") as fout: fout.writelines(new_lines) # Read the line containing a "comma channel name" @@ -341,72 +354,58 @@ def test_ch_names_comma(tmp_path): assert "F4,foo" in raw.ch_names +@pytest.mark.filterwarnings("ignore:.*different.*:RuntimeWarning") def test_brainvision_data_highpass_filters(): """Test reading raw Brain Vision files with amplifier filter settings.""" # Homogeneous highpass in seconds (default measurement unit) - raw = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_highpass_path, eog=eog - ) + raw = _test_raw_reader(read_raw_brainvision, vhdr_fname=vhdr_highpass_path, eog=eog) - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) - assert_equal(raw.info['lowpass'], 250.) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 10) + assert raw.info["lowpass"] == 250.0 # Heterogeneous highpass in seconds (default measurement unit) - with pytest.warns(RuntimeWarning, match='different .*pass filters') as w: + with pytest.warns(RuntimeWarning, match="different .*pass filters") as w: raw = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_path, - eog=eog) - - lowpass_warning = ['different lowpass filters' in str(ww.message) - for ww in w] - highpass_warning = ['different highpass filters' in str(ww.message) - for ww in w] - - expected_warnings = zip(lowpass_warning, highpass_warning) + read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_path, eog=eog + ) - assert (all(any([lp, hp]) for lp, hp in expected_warnings)) + w = [str(ww.message) for ww in w] + assert not any("different lowpass filters" in ww for ww in w), w + assert any("different highpass filters" in ww for ww in w), w - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) - assert_equal(raw.info['lowpass'], 250.) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 10) + assert raw.info["lowpass"] == 250.0 # Homogeneous highpass in Hertz raw = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_highpass_hz_path, - eog=eog) + read_raw_brainvision, vhdr_fname=vhdr_highpass_hz_path, eog=eog + ) - assert_equal(raw.info['highpass'], 10.) - assert_equal(raw.info['lowpass'], 250.) + assert raw.info["highpass"] == 10.0 + assert raw.info["lowpass"] == 250.0 # Heterogeneous highpass in Hertz - with pytest.warns(RuntimeWarning, match='different .*pass filters') as w: + with pytest.warns(RuntimeWarning, match="different .*pass filters") as w: raw = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_hz_path, - eog=eog) - - trigger_warning = ['will be dropped' in str(ww.message) - for ww in w] - lowpass_warning = ['different lowpass filters' in str(ww.message) - for ww in w] - highpass_warning = ['different highpass filters' in str(ww.message) - for ww in w] - - expected_warnings = zip(trigger_warning, lowpass_warning, highpass_warning) + read_raw_brainvision, vhdr_fname=vhdr_mixed_highpass_hz_path, eog=eog + ) - assert (all(any([trg, lp, hp]) for trg, lp, hp in expected_warnings)) + w = [str(ww.message) for ww in w] + assert not any("will be dropped" in ww for ww in w), w + assert not any("different lowpass filters" in ww for ww in w), w + assert any("different highpass filters" in ww for ww in w), w - assert_equal(raw.info['highpass'], 5.) - assert_equal(raw.info['lowpass'], 250.) + assert raw.info["highpass"] == 5.0 + assert raw.info["lowpass"] == 250.0 def test_brainvision_data_lowpass_filters(): """Test files with amplifier LP filter settings.""" # Homogeneous lowpass in Hertz (default measurement unit) - raw = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_lowpass_path, eog=eog - ) + raw = _test_raw_reader(read_raw_brainvision, vhdr_fname=vhdr_lowpass_path, eog=eog) - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) - assert_equal(raw.info['lowpass'], 250.) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 10) + assert raw.info["lowpass"] == 250.0 # Heterogeneous lowpass in Hertz (default measurement unit) with pytest.warns(RuntimeWarning) as w: # event parsing @@ -414,25 +413,23 @@ def test_brainvision_data_lowpass_filters(): read_raw_brainvision, vhdr_fname=vhdr_mixed_lowpass_path, eog=eog ) - lowpass_warning = ['different lowpass filters' in str(ww.message) - for ww in w] - highpass_warning = ['different highpass filters' in str(ww.message) - for ww in w] + lowpass_warning = ["different lowpass filters" in str(ww.message) for ww in w] + highpass_warning = ["different highpass filters" in str(ww.message) for ww in w] expected_warnings = zip(lowpass_warning, highpass_warning) - assert (all(any([lp, hp]) for lp, hp in expected_warnings)) + assert any(any([lp, hp]) for lp, hp in expected_warnings) - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) - assert_equal(raw.info['lowpass'], 250.) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 10) + assert raw.info["lowpass"] == 250.0 # Homogeneous lowpass in seconds raw = _test_raw_reader( read_raw_brainvision, vhdr_fname=vhdr_lowpass_s_path, eog=eog ) - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) - assert_equal(raw.info['lowpass'], 1. / (2 * np.pi * 0.004)) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 10) + assert raw.info["lowpass"] == 1.0 / (2 * np.pi * 0.004) # Heterogeneous lowpass in seconds with pytest.warns(RuntimeWarning) as w: # filter settings @@ -440,17 +437,15 @@ def test_brainvision_data_lowpass_filters(): read_raw_brainvision, vhdr_fname=vhdr_mixed_lowpass_s_path, eog=eog ) - lowpass_warning = ['different lowpass filters' in str(ww.message) - for ww in w] - highpass_warning = ['different highpass filters' in str(ww.message) - for ww in w] + lowpass_warning = ["different lowpass filters" in str(ww.message) for ww in w] + highpass_warning = ["different highpass filters" in str(ww.message) for ww in w] expected_warnings = zip(lowpass_warning, highpass_warning) - assert (all(any([lp, hp]) for lp, hp in expected_warnings)) + assert any(any([lp, hp]) for lp, hp in expected_warnings) - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 10)) - assert_equal(raw.info['lowpass'], 1. / (2 * np.pi * 0.004)) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 10) + assert raw.info["lowpass"] == 1.0 / (2 * np.pi * 0.004) def test_brainvision_data_partially_disabled_hw_filters(): @@ -458,162 +453,171 @@ def test_brainvision_data_partially_disabled_hw_filters(): with pytest.warns(RuntimeWarning) as w: # event parsing raw = _test_raw_reader( read_raw_brainvision, - vhdr_fname=vhdr_partially_disabled_hw_filter_path, eog=eog + vhdr_fname=vhdr_partially_disabled_hw_filter_path, + eog=eog, ) - trigger_warning = ['will be dropped' in str(ww.message) - for ww in w] - lowpass_warning = ['different lowpass filters' in str(ww.message) - for ww in w] - highpass_warning = ['different highpass filters' in str(ww.message) - for ww in w] + trigger_warning = ["will be dropped" in str(ww.message) for ww in w] + lowpass_warning = ["different lowpass filters" in str(ww.message) for ww in w] + highpass_warning = ["different highpass filters" in str(ww.message) for ww in w] expected_warnings = zip(trigger_warning, lowpass_warning, highpass_warning) - assert (all(any([trg, lp, hp]) for trg, lp, hp in expected_warnings)) + assert any(any([trg, lp, hp]) for trg, lp, hp in expected_warnings) - assert_equal(raw.info['highpass'], 0.) - assert_equal(raw.info['lowpass'], 500.) + assert raw.info["highpass"] == 0.0 + assert raw.info["lowpass"] == 500.0 def test_brainvision_data_software_filters_latin1_global_units(): """Test reading raw Brain Vision files.""" - with pytest.warns(RuntimeWarning, match='software filter'): + with _no_dig, pytest.warns(RuntimeWarning, match="software filter"): raw = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_old_path, - eog=("VEOGo", "VEOGu", "HEOGli", "HEOGre"), misc=("A2",)) + read_raw_brainvision, + vhdr_fname=vhdr_old_path, + eog=("VEOGo", "VEOGu", "HEOGli", "HEOGre"), + misc=("A2",), + ) - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 0.9)) - assert_equal(raw.info['lowpass'], 50.) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 0.9) + assert raw.info["lowpass"] == 50.0 # test sensor name with spaces (#9299) - with pytest.warns(RuntimeWarning, match='software filter'): + with _no_dig, pytest.warns(RuntimeWarning, match="software filter"): raw = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_old_longname_path, - eog=("VEOGo", "VEOGu", "HEOGli", "HEOGre"), misc=("A2",)) + read_raw_brainvision, + vhdr_fname=vhdr_old_longname_path, + eog=("VEOGo", "VEOGu", "HEOGli", "HEOGre"), + misc=("A2",), + ) - assert_equal(raw.info['highpass'], 1. / (2 * np.pi * 0.9)) - assert_equal(raw.info['lowpass'], 50.) + assert raw.info["highpass"] == 1.0 / (2 * np.pi * 0.9) + assert raw.info["lowpass"] == 50.0 def test_brainvision_data(): """Test reading raw Brain Vision files.""" - pytest.raises(IOError, read_raw_brainvision, vmrk_path) - pytest.raises(ValueError, read_raw_brainvision, vhdr_path, - preload=True, scale="foo") + pytest.raises(OSError, read_raw_brainvision, vmrk_path) + pytest.raises( + ValueError, read_raw_brainvision, vhdr_path, preload=True, scale="foo" + ) raw_py = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_path, eog=eog, misc='auto' + read_raw_brainvision, vhdr_fname=vhdr_path, eog=eog, misc="auto" ) - assert ('RawBrainVision' in repr(raw_py)) + assert "RawBrainVision" in repr(raw_py) - assert_equal(raw_py.info['highpass'], 0.) - assert_equal(raw_py.info['lowpass'], 250.) + assert raw_py.info["highpass"] == 0.0 + assert raw_py.info["lowpass"] == 250.0 - picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') + picks = pick_types(raw_py.info, meg=False, eeg=True, exclude="bads") data_py, times_py = raw_py[picks] # compare with a file that was generated using MNE-C raw_bin = read_raw_fif(eeg_bin, preload=True) - picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') + picks = pick_types(raw_py.info, meg=False, eeg=True, exclude="bads") data_bin, times_bin = raw_bin[picks] - assert_array_almost_equal(data_py, data_bin) - assert_array_almost_equal(times_py, times_bin) + assert_allclose(data_py, data_bin) + assert_allclose(times_py, times_bin) # Make sure EOG channels are marked correctly - for ch in raw_py.info['chs']: - if ch['ch_name'] in eog: - assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH) - elif ch['ch_name'] == 'STI 014': - assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH) - elif ch['ch_name'] in ('CP5', 'CP6'): - assert_equal(ch['kind'], FIFF.FIFFV_MISC_CH) - assert_equal(ch['unit'], FIFF.FIFF_UNIT_NONE) - elif ch['ch_name'] == 'ReRef': - assert_equal(ch['kind'], FIFF.FIFFV_MISC_CH) - assert_equal(ch['unit'], FIFF.FIFF_UNIT_CEL) - elif ch['ch_name'] in raw_py.info['ch_names']: - assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH) - assert_equal(ch['unit'], FIFF.FIFF_UNIT_V) + for ch in raw_py.info["chs"]: + if ch["ch_name"] in eog: + assert ch["kind"] == FIFF.FIFFV_EOG_CH + elif ch["ch_name"] == "STI 014": + assert ch["kind"] == FIFF.FIFFV_STIM_CH + elif ch["ch_name"] in ("CP5", "CP6"): + assert ch["kind"] == FIFF.FIFFV_MISC_CH + assert ch["unit"] == FIFF.FIFF_UNIT_NONE + elif ch["ch_name"] == "ReRef": + assert ch["kind"] == FIFF.FIFFV_MISC_CH + assert ch["unit"] == FIFF.FIFF_UNIT_CEL else: - raise RuntimeError("Unknown Channel: %s" % ch['ch_name']) + assert ch["ch_name"] in raw_py.info["ch_names"], f"Unknown: {ch['ch_name']}" + assert ch["kind"] == FIFF.FIFFV_EEG_CH + assert ch["unit"] == FIFF.FIFF_UNIT_V # test loading v2 - read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True, - verbose='error') + read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True, verbose="error") # test different units with alternative header file raw_units = _test_raw_reader( - read_raw_brainvision, vhdr_fname=vhdr_units_path, eog=eog, misc='auto' + read_raw_brainvision, vhdr_fname=vhdr_units_path, eog=eog, misc="auto" ) - assert_equal(raw_units.info['chs'][0]['ch_name'], 'FP1') - assert_equal(raw_units.info['chs'][0]['kind'], FIFF.FIFFV_EEG_CH) + assert raw_units.info["chs"][0]["ch_name"] == "FP1" + assert raw_units.info["chs"][0]["kind"] == FIFF.FIFFV_EEG_CH data_units, _ = raw_units[0] - assert_array_almost_equal(data_py[0, :], data_units.squeeze()) + assert_allclose(data_py[0, :], data_units.squeeze()) - assert_equal(raw_units.info['chs'][1]['ch_name'], 'FP2') - assert_equal(raw_units.info['chs'][1]['kind'], FIFF.FIFFV_EEG_CH) + assert raw_units.info["chs"][1]["ch_name"] == "FP2" + assert raw_units.info["chs"][1]["kind"] == FIFF.FIFFV_EEG_CH data_units, _ = raw_units[1] - assert_array_almost_equal(data_py[1, :], data_units.squeeze()) + assert_allclose(data_py[1, :], data_units.squeeze()) - assert_equal(raw_units.info['chs'][2]['ch_name'], 'F3') - assert_equal(raw_units.info['chs'][2]['kind'], FIFF.FIFFV_EEG_CH) + assert raw_units.info["chs"][2]["ch_name"] == "F3" + assert raw_units.info["chs"][2]["kind"] == FIFF.FIFFV_EEG_CH data_units, _ = raw_units[2] - assert_array_almost_equal(data_py[2, :], data_units.squeeze()) + assert_allclose(data_py[2, :], data_units.squeeze()) def test_brainvision_vectorized_data(): """Test reading BrainVision data files with vectorized data.""" - with pytest.warns(RuntimeWarning, match='software filter'): + with _no_dig, pytest.warns(RuntimeWarning, match="software filter"): raw = read_raw_brainvision(vhdr_old_path, preload=True) assert_array_equal(raw._data.shape, (29, 251)) - first_two_samples_all_chs = np.array([[+5.22000008e-06, +5.10000000e-06], - [+2.10000000e-06, +2.27000008e-06], - [+1.15000000e-06, +1.33000002e-06], - [+4.00000000e-07, +4.00000000e-07], - [-3.02999992e-06, -2.82000008e-06], - [+2.71000004e-06, +2.45000000e-06], - [+2.41000004e-06, +2.36000004e-06], - [+1.01999998e-06, +1.18000002e-06], - [-1.33999996e-06, -1.25000000e-06], - [-2.60000000e-06, -2.46000004e-06], - [+6.80000019e-07, +8.00000000e-07], - [+1.48000002e-06, +1.48999996e-06], - [+1.61000004e-06, +1.51000004e-06], - [+7.19999981e-07, +8.60000038e-07], - [-3.00000000e-07, -4.00000006e-08], - [-1.20000005e-07, +6.00000024e-08], - [+8.19999981e-07, +9.89999962e-07], - [+1.13000002e-06, +1.28000002e-06], - [+1.08000002e-06, +1.33999996e-06], - [+2.20000005e-07, +5.69999981e-07], - [-4.09999990e-07, +4.00000006e-08], - [+5.19999981e-07, +9.39999962e-07], - [+1.01000004e-06, +1.51999998e-06], - [+1.01000004e-06, +1.55000000e-06], - [-1.43000002e-06, -1.13999996e-06], - [+3.65000000e-06, +3.65999985e-06], - [+4.15999985e-06, +3.79000015e-06], - [+9.26999969e-06, +8.95999985e-06], - [-7.35999985e-06, -7.18000031e-06], - ]) - - assert_array_almost_equal(raw._data[:, :2], first_two_samples_all_chs) + first_two_samples_all_chs = np.array( + [ + [+5.22000008e-06, +5.10000000e-06], + [+2.10000000e-06, +2.27000008e-06], + [+1.15000000e-06, +1.33000002e-06], + [+4.00000000e-07, +4.00000000e-07], + [-3.02999992e-06, -2.82000008e-06], + [+2.71000004e-06, +2.45000000e-06], + [+2.41000004e-06, +2.36000004e-06], + [+1.01999998e-06, +1.18000002e-06], + [-1.33999996e-06, -1.25000000e-06], + [-2.60000000e-06, -2.46000004e-06], + [+6.80000019e-07, +8.00000000e-07], + [+1.48000002e-06, +1.48999996e-06], + [+1.61000004e-06, +1.51000004e-06], + [+7.19999981e-07, +8.60000038e-07], + [-3.00000000e-07, -4.00000006e-08], + [-1.20000005e-07, +6.00000024e-08], + [+8.19999981e-07, +9.89999962e-07], + [+1.13000002e-06, +1.28000002e-06], + [+1.08000002e-06, +1.33999996e-06], + [+2.20000005e-07, +5.69999981e-07], + [-4.09999990e-07, +4.00000006e-08], + [+5.19999981e-07, +9.39999962e-07], + [+1.01000004e-06, +1.51999998e-06], + [+1.01000004e-06, +1.55000000e-06], + [-1.43000002e-06, -1.13999996e-06], + [+3.65000000e-06, +3.65999985e-06], + [+4.15999985e-06, +3.79000015e-06], + [+9.26999969e-06, +8.95999985e-06], + [-7.35999985e-06, -7.18000031e-06], + ] + ) + + assert_allclose(raw._data[:, :2], first_two_samples_all_chs) def test_coodinates_extraction(): """Test reading of [Coordinates] section if present.""" # vhdr 2 has a Coordinates section - with pytest.warns(RuntimeWarning, match='coordinate information'): + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match="coordinate information"), + ): raw = read_raw_brainvision(vhdr_v2_path) # Basic check of extracted coordinates - assert raw.info['dig'] is not None - diglist = raw.info['dig'] - coords = np.array([dig['r'] for dig in diglist]) + assert raw.info["dig"] is not None + diglist = raw.info["dig"] + coords = np.array([dig["r"] for dig in diglist]) EXPECTED_SHAPE = ( # HL, HR, Vb, ReRef are not set in dig # but LPA, Nasion, RPA are estimated @@ -628,16 +632,16 @@ def test_coodinates_extraction(): # vhdr 1 does not have a Coordinates section raw2 = read_raw_brainvision(vhdr_path) - assert raw2.info['dig'] is None + assert raw2.info["dig"] is None @testing.requires_testing_data def test_brainvision_neuroone_export(): """Test Brainvision file exported with neuroone system.""" - raw = read_raw_brainvision(neuroone_vhdr, verbose='error') - assert raw.info['meas_date'] is None - assert len(raw.info['chs']) == 65 - assert raw.info['sfreq'] == 5000. + raw = read_raw_brainvision(neuroone_vhdr, verbose="error") + assert raw.info["meas_date"] is None + assert len(raw.info["chs"]) == 65 + assert raw.info["sfreq"] == 5000.0 @testing.requires_testing_data @@ -649,49 +653,140 @@ def test_read_vmrk_annotations(tmp_path): # delete=False is for Windows compatibility with open(vmrk_path) as myfile: head = [next(myfile) for x in range(6)] - fname = tmp_path / 'temp.vmrk' - with open(str(fname), 'w') as temp: + fname = tmp_path / "temp.vmrk" + with open(str(fname), "w") as temp: for item in head: temp.write(item) read_annotations(fname, sfreq=sfreq) +def test_ignore_marker_types(): + """Test ignore marker types.""" + # default behavior (do not ignore marker types) + raw = read_raw_brainvision(vhdr_path) + expected_descriptions = [ + "Stimulus/S253", + "Stimulus/S255", + "Event/254", + "Stimulus/S255", + "Event/254", + "Stimulus/S255", + "Stimulus/S253", + "Stimulus/S255", + "Response/R255", + "Event/254", + "Stimulus/S255", + "SyncStatus/Sync On", + "Optic/O 1", + ] + assert_array_equal(raw.annotations.description, expected_descriptions) + + # ignore marker types + raw = read_raw_brainvision(vhdr_path, ignore_marker_types=True) + expected_descriptions = [ + "S253", + "S255", + "254", + "S255", + "254", + "S255", + "S253", + "S255", + "R255", + "254", + "S255", + "Sync On", + "O 1", + ] + assert_array_equal(raw.annotations.description, expected_descriptions) + + @testing.requires_testing_data def test_read_vhdr_annotations_and_events(tmp_path): """Test load brainvision annotations and parse them to events.""" # First we add a custom event that contains a comma in its description - for src, dest in zip((vhdr_path, vmrk_path, eeg_path), - ('test.vhdr', 'test.vmrk', 'test.eeg')): + for src, dest in zip( + (vhdr_path, vmrk_path, eeg_path), ("test.vhdr", "test.vmrk", "test.eeg") + ): shutil.copyfile(src, tmp_path / dest) # Commas are encoded as "\1" - with open(tmp_path / 'test.vmrk', 'a') as fout: + with open(tmp_path / "test.vmrk", "a") as fout: fout.write(r"Mk15=Comma\1Type,CommaValue\11,7800,1,0\n") sfreq = 1000.0 expected_orig_time = _stamp_to_dt((1384359243, 794232)) expected_onset_latency = np.array( - [0, 486., 496., 1769., 1779., 3252., 3262., 4935., 4945., 5999., 6619., - 6629., 7629., 7699., 7799.] + [ + 486.0, + 496.0, + 1769.0, + 1779.0, + 3252.0, + 3262.0, + 4935.0, + 4945.0, + 5999.0, + 6619.0, + 6629.0, + 7629.0, + 7699.0, + 7799.0, + ] ) expected_annot_description = [ - 'New Segment/', 'Stimulus/S253', 'Stimulus/S255', 'Event/254', - 'Stimulus/S255', 'Event/254', 'Stimulus/S255', 'Stimulus/S253', - 'Stimulus/S255', 'Response/R255', 'Event/254', 'Stimulus/S255', - 'SyncStatus/Sync On', 'Optic/O 1', 'Comma,Type/CommaValue,1' + "Stimulus/S253", + "Stimulus/S255", + "Event/254", + "Stimulus/S255", + "Event/254", + "Stimulus/S255", + "Stimulus/S253", + "Stimulus/S255", + "Response/R255", + "Event/254", + "Stimulus/S255", + "SyncStatus/Sync On", + "Optic/O 1", + "Comma,Type/CommaValue,1", ] - expected_events = np.stack([ - expected_onset_latency, - np.zeros_like(expected_onset_latency), - [99999, 253, 255, 254, 255, 254, 255, 253, 255, 1255, 254, 255, 99998, - 2001, 10001], - ]).astype('int64').T - expected_event_id = {'New Segment/': 99999, 'Stimulus/S253': 253, - 'Stimulus/S255': 255, 'Event/254': 254, - 'Response/R255': 1255, 'SyncStatus/Sync On': 99998, - 'Optic/O 1': 2001, 'Comma,Type/CommaValue,1': 10001} - - raw = read_raw_brainvision(tmp_path / 'test.vhdr', eog=eog) + expected_events = ( + np.stack( + [ + expected_onset_latency, + np.zeros_like(expected_onset_latency), + [ + 253, + 255, + 254, + 255, + 254, + 255, + 253, + 255, + 1255, + 254, + 255, + 99998, + 2001, + 10001, + ], + ] + ) + .astype("int64") + .T + ) + expected_event_id = { + "Stimulus/S253": 253, + "Stimulus/S255": 255, + "Event/254": 254, + "Response/R255": 1255, + "SyncStatus/Sync On": 99998, + "Optic/O 1": 2001, + "Comma,Type/CommaValue,1": 10001, + } + + raw = read_raw_brainvision(tmp_path / "test.vhdr", eog=eog) # validate annotations assert raw.annotations.orig_time == expected_orig_time @@ -704,14 +799,15 @@ def test_read_vhdr_annotations_and_events(tmp_path): assert event_id == expected_event_id # validate that None gives us a sorted list - expected_none_event_id = {desc: idx + 1 for idx, desc in enumerate(sorted( - event_id.keys()))} + expected_none_event_id = { + desc: idx + 1 for idx, desc in enumerate(sorted(event_id.keys())) + } events, event_id = events_from_annotations(raw, event_id=None) assert event_id == expected_none_event_id # Add some custom ones, plus a 2-digit one - s_10 = 'Stimulus/S 10' - raw.annotations.append([1, 2, 3], 10, ['ZZZ', s_10, 'YYY']) + s_10 = "Stimulus/S 10" + raw.annotations.append([1, 2, 3], 10, ["ZZZ", s_10, "YYY"]) # others starting at 10001 ... # we already have "Comma,Type/CommaValue,1" as 10001 expected_event_id.update(YYY=10002, ZZZ=10003) @@ -721,7 +817,7 @@ def test_read_vhdr_annotations_and_events(tmp_path): # Concatenating two shouldn't change the resulting event_id # (BAD and EDGE should be ignored) - with pytest.warns(RuntimeWarning, match='expanding outside'): + with pytest.warns(RuntimeWarning, match="expanding outside"): raw_concat = concatenate_raws([raw.copy(), raw.copy()]) _, event_id = events_from_annotations(raw_concat) assert event_id == expected_event_id @@ -730,14 +826,16 @@ def test_read_vhdr_annotations_and_events(tmp_path): @testing.requires_testing_data def test_automatic_vmrk_sfreq_recovery(): """Test proper sfreq inference by checking the onsets.""" - assert_array_equal(read_annotations(vmrk_path, sfreq='auto'), - read_annotations(vmrk_path, sfreq=1000.0)) + assert_array_equal( + read_annotations(vmrk_path, sfreq="auto"), + read_annotations(vmrk_path, sfreq=1000.0), + ) @testing.requires_testing_data def test_event_id_stability_when_save_and_fif_reload(tmp_path): """Test load events from brainvision annotations when read_raw_fif.""" - fname = tmp_path / 'bv-raw.fif' + fname = tmp_path / "bv-raw.fif" raw = read_raw_brainvision(vhdr_path, eog=eog) original_events, original_event_id = events_from_annotations(raw) @@ -751,55 +849,109 @@ def test_event_id_stability_when_save_and_fif_reload(tmp_path): def test_parse_impedance(): """Test case for parsing the impedances from header.""" - expected_imp_meas_time = datetime.datetime(2013, 11, 13, 16, 12, 27, - tzinfo=datetime.timezone.utc) - expected_imp_unit = 'kOhm' + expected_imp_meas_time = datetime.datetime( + 2013, 11, 13, 16, 12, 27, tzinfo=datetime.timezone.utc + ) + expected_imp_unit = "kOhm" expected_electrodes = [ - 'FP1', 'FP2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'F7', - 'F8', 'P7', 'P8', 'Fz', 'FCz', 'Cz', 'CPz', 'Pz', 'POz', 'FC1', 'FC2', - 'CP1', 'CP2', 'FC5', 'FC6', 'CP5', 'CP6', 'HL', 'HR', 'Vb', 'ReRef', - 'Ref', 'Gnd' + "FP1", + "FP2", + "F3", + "F4", + "C3", + "C4", + "P3", + "P4", + "O1", + "O2", + "F7", + "F8", + "P7", + "P8", + "Fz", + "FCz", + "Cz", + "CPz", + "Pz", + "POz", + "FC1", + "FC2", + "CP1", + "CP2", + "FC5", + "FC6", + "CP5", + "CP6", + "HL", + "HR", + "Vb", + "ReRef", + "Ref", + "Gnd", ] n_electrodes = len(expected_electrodes) - expected_imps = [np.nan] * (n_electrodes - 2) + [0., 4.] - expected_imp_lower_bound = 0. - expected_imp_upper_bound = [100.] * (n_electrodes - 2) + [10., 10.] - - expected_impedances = {elec: { - 'imp': expected_imps[i], - 'imp_unit': expected_imp_unit, - 'imp_meas_time': expected_imp_meas_time, - 'imp_lower_bound': expected_imp_lower_bound, - 'imp_upper_bound': expected_imp_upper_bound[i], - 'imp_range_unit': expected_imp_unit, - } for i, elec in enumerate(expected_electrodes)} + expected_imps = [np.nan] * (n_electrodes - 2) + [0.0, 4.0] + expected_imp_lower_bound = 0.0 + expected_imp_upper_bound = [100.0] * (n_electrodes - 2) + [10.0, 10.0] + + expected_impedances = { + elec: { + "imp": expected_imps[i], + "imp_unit": expected_imp_unit, + "imp_meas_time": expected_imp_meas_time, + "imp_lower_bound": expected_imp_lower_bound, + "imp_upper_bound": expected_imp_upper_bound[i], + "imp_range_unit": expected_imp_unit, + } + for i, elec in enumerate(expected_electrodes) + } raw = read_raw_brainvision(vhdr_path, eog=eog) - assert object_diff(expected_impedances, raw.impedances) == '' + assert object_diff(expected_impedances, raw.impedances) == "" # Test "Impedances Imported from actiCAP Control Software" - expected_imp_meas_time = expected_imp_meas_time.replace(hour=10, - minute=17, - second=2) - tmpidx = expected_electrodes.index('CP6') + expected_imp_meas_time = expected_imp_meas_time.replace( + hour=10, minute=17, second=2 + ) + tmpidx = expected_electrodes.index("CP6") expected_electrodes = expected_electrodes[:tmpidx] + [ - 'CP 6', 'ECG+', 'ECG-', 'HEOG+', 'HEOG-', 'VEOG+', 'VEOG-', 'ReRef', - 'Ref', 'Gnd' + "CP 6", + "ECG+", + "ECG-", + "HEOG+", + "HEOG-", + "VEOG+", + "VEOG-", + "ReRef", + "Ref", + "Gnd", ] n_electrodes = len(expected_electrodes) expected_imps = [np.nan] * (n_electrodes - 9) + [ - 35., 46., 6., 8., 3., 4., 0., 8., 2.5 + 35.0, + 46.0, + 6.0, + 8.0, + 3.0, + 4.0, + 0.0, + 8.0, + 2.5, ] - expected_impedances = {elec: { - 'imp': expected_imps[i], - 'imp_unit': expected_imp_unit, - 'imp_meas_time': expected_imp_meas_time, - } for i, elec in enumerate(expected_electrodes)} + expected_impedances = { + elec: { + "imp": expected_imps[i], + "imp_unit": expected_imp_unit, + "imp_meas_time": expected_imp_meas_time, + } + for i, elec in enumerate(expected_electrodes) + } - with pytest.warns(RuntimeWarning, match='different .*pass filters'): - raw = read_raw_brainvision(vhdr_mixed_lowpass_path, - eog=['HEOG', 'VEOG'], misc=['ECG']) - assert object_diff(expected_impedances, raw.impedances) == '' + with pytest.warns(RuntimeWarning, match="different .*pass filters"): + raw = read_raw_brainvision( + vhdr_mixed_lowpass_path, eog=["HEOG", "VEOG"], misc=["ECG"] + ) + assert object_diff(expected_impedances, raw.impedances) == "" @testing.requires_testing_data @@ -810,6 +962,6 @@ def test_ahdr_format(): expected_lp = 250.0 raw = read_raw_brainvision(vamp_ahdr) - assert raw.info['nchan'] == expected_num_channels - assert raw.info['highpass'] == expected_hp - assert raw.info['lowpass'] == expected_lp + assert raw.info["nchan"] == expected_num_channels + assert raw.info["highpass"] == expected_hp + assert raw.info["lowpass"] == expected_lp diff --git a/mne/io/bti/__init__.py b/mne/io/bti/__init__.py index aeb4d184baf..3a9d58fe44c 100644 --- a/mne/io/bti/__init__.py +++ b/mne/io/bti/__init__.py @@ -1,5 +1,7 @@ """BTi module for conversion to FIF.""" -# Author: Denis A. Engemann +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .bti import read_raw_bti diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index 9f83d63b7c5..a992d3c7694 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -1,12 +1,6 @@ -# Authors: Denis A. Engemann -# Martin Luessi -# Alexandre Gramfort -# Matti Hämäläinen -# Yuval Harpaz -# Joan Massich -# Teon Brooks -# -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import functools import os.path as op @@ -15,54 +9,65 @@ import numpy as np -from ...utils import logger, verbose, _stamp_to_dt, path_like -from ...transforms import (combine_transforms, invert_transform, - Transform) -from .._digitization import _make_bti_dig_points -from ..constants import FIFF -from .. import BaseRaw, _coil_trans_to_loc, _loc_to_coil_trans, _empty_info -from ..utils import _mult_cal_one, read_str +from ..._fiff._digitization import _make_bti_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.tag import _coil_trans_to_loc, _loc_to_coil_trans +from ..._fiff.utils import _mult_cal_one, read_str +from ...transforms import Transform, combine_transforms, invert_transform +from ...utils import _stamp_to_dt, _validate_type, logger, path_like, verbose +from ..base import BaseRaw from .constants import BTI -from .read import (read_int32, read_int16, read_float, read_double, - read_transform, read_char, read_int64, read_uint16, - read_uint32, read_double_matrix, read_float_matrix, - read_int16_matrix, read_dev_header) - -FIFF_INFO_DIG_FIELDS = ('kind', 'ident', 'r', 'coord_frame') -FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD) - -BTI_WH2500_REF_MAG = ('MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA') -BTI_WH2500_REF_GRAD = ('GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA') - -dtypes = zip(list(range(1, 5)), ('>i2', '>i4', '>f4', '>f8')) +from .read import ( + read_char, + read_dev_header, + read_double, + read_double_matrix, + read_float, + read_float_matrix, + read_int16, + read_int16_matrix, + read_int32, + read_int64, + read_transform, + read_uint16, + read_uint32, +) + +BTI_WH2500_REF_MAG = ("MxA", "MyA", "MzA", "MxaA", "MyaA", "MzaA") +BTI_WH2500_REF_GRAD = ("GxxA", "GyyA", "GyxA", "GzaA", "GzyA") + +dtypes = zip(list(range(1, 5)), (">i2", ">i4", ">f4", ">f8")) DTYPES = {i: np.dtype(t) for i, t in dtypes} def _instantiate_default_info_chs(): """Populate entries in info['chs'] with default values.""" - return dict(loc=np.array([0, 0, 0, 1] * 3, dtype='f4'), - ch_name=None, - unit_mul=FIFF.FIFF_UNITM_NONE, - coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - coil_type=FIFF.FIFFV_COIL_NONE, - range=1.0, - unit=FIFF.FIFF_UNIT_V, - cal=1.0, - scanno=None, - kind=FIFF.FIFFV_MISC_CH, - logno=None) - - -class _bytes_io_mock_context(): + return dict( + loc=np.array([0, 0, 0, 1] * 3, dtype="f4"), + ch_name=None, + unit_mul=FIFF.FIFF_UNITM_NONE, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + coil_type=FIFF.FIFFV_COIL_NONE, + range=1.0, + unit=FIFF.FIFF_UNIT_V, + cal=1.0, + scanno=None, + kind=FIFF.FIFFV_MISC_CH, + logno=None, + ) + + +class _bytes_io_mock_context: """Make a context for BytesIO.""" - def __init__(self, target): # noqa: D102 + def __init__(self, target): self.target = target def __enter__(self): # noqa: D105 return self.target - def __exit__(self, type, value, tb): # noqa: D105 + def __exit__(self, exception_type, value, tb): # noqa: D105 pass @@ -73,10 +78,10 @@ def _bti_open(fname, *args, **kwargs): elif isinstance(fname, BytesIO): return _bytes_io_mock_context(fname) else: - raise RuntimeError('Cannot mock this.') + raise RuntimeError("Cannot mock this.") -def _get_bti_dev_t(adjust=0., translation=(0.0, 0.02, 0.11)): +def _get_bti_dev_t(adjust=0.0, translation=(0.0, 0.02, 0.11)): """Get the general Magnes3600WH to Neuromag coordinate transform. Parameters @@ -93,20 +98,22 @@ def _get_bti_dev_t(adjust=0., translation=(0.0, 0.02, 0.11)): m_nm_t : ndarray 4 x 4 rotation, translation, scaling matrix. """ - flip_t = np.array([[0., -1., 0.], - [1., 0., 0.], - [0., 0., 1.]]) + flip_t = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) rad = np.deg2rad(adjust) - adjust_t = np.array([[1., 0., 0.], - [0., np.cos(rad), -np.sin(rad)], - [0., np.sin(rad), np.cos(rad)]]) + adjust_t = np.array( + [ + [1.0, 0.0, 0.0], + [0.0, np.cos(rad), -np.sin(rad)], + [0.0, np.sin(rad), np.cos(rad)], + ] + ) m_nm_t = np.eye(4) m_nm_t[:3, :3] = np.dot(flip_t, adjust_t) m_nm_t[:3, 3] = translation return m_nm_t -def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')): +def _rename_channels(names, ecg_ch="E31", eog_ch=("E63", "E64")): """Rename appropriately ordered list of channel names. Parameters @@ -120,28 +127,28 @@ def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')): List of names, channel names in Neuromag style """ new = list() - ref_mag, ref_grad, eog, eeg, ext = [count(1) for _ in range(5)] + ref_mag, ref_grad, eog, eeg, ext = (count(1) for _ in range(5)) for i, name in enumerate(names, 1): - if name.startswith('A'): - name = 'MEG %3.3d' % i - elif name == 'RESPONSE': - name = 'STI 013' - elif name == 'TRIGGER': - name = 'STI 014' + if name.startswith("A"): + name = f"MEG {i:03d}" + elif name == "RESPONSE": + name = "STI 013" + elif name == "TRIGGER": + name = "STI 014" elif any(name == k for k in eog_ch): - name = 'EOG %3.3d' % next(eog) + name = f"EOG {next(eog):03d}" elif name == ecg_ch: - name = 'ECG 001' - elif name.startswith('E'): - name = 'EEG %3.3d' % next(eeg) - elif name == 'UACurrent': - name = 'UTL 001' - elif name.startswith('M'): - name = 'RFM %3.3d' % next(ref_mag) - elif name.startswith('G'): - name = 'RFG %3.3d' % next(ref_grad) - elif name.startswith('X'): - name = 'EXT %3.3d' % next(ext) + name = "ECG 001" + elif name.startswith("E"): + name = f"EEG {next(eeg):03d}" + elif name == "UACurrent": + name = "UTL 001" + elif name.startswith("M"): + name = f"RFM {next(ref_mag):03d}" + elif name.startswith("G"): + name = f"RFG {next(ref_grad):03d}" + elif name.startswith("X"): + name = f"EXT {next(ext):03d}" new += [name] @@ -151,40 +158,40 @@ def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')): # read the points def _read_head_shape(fname): """Read the head shape.""" - with _bti_open(fname, 'rb') as fid: + with _bti_open(fname, "rb") as fid: fid.seek(BTI.FILE_HS_N_DIGPOINTS) _n_dig_points = read_int32(fid) idx_points = read_double_matrix(fid, BTI.DATA_N_IDX_POINTS, 3) dig_points = read_double_matrix(fid, _n_dig_points, 3) # reorder to lpa, rpa, nasion so = is direct. - nasion, lpa, rpa = [idx_points[_, :] for _ in [2, 0, 1]] - hpi = idx_points[3:len(idx_points), :] + nasion, lpa, rpa = (idx_points[_, :] for _ in [2, 0, 1]) + hpi = idx_points[3 : len(idx_points), :] return nasion, lpa, rpa, hpi, dig_points def _check_nan_dev_head_t(dev_ctf_t): """Make sure we deal with nans.""" - has_nan = np.isnan(dev_ctf_t['trans']) + has_nan = np.isnan(dev_ctf_t["trans"]) if np.any(has_nan): - logger.info('Missing values BTI dev->head transform. ' - 'Replacing with identity matrix.') - dev_ctf_t['trans'] = np.identity(4) + logger.info( + "Missing values BTI dev->head transform. Replacing with identity matrix." + ) + dev_ctf_t["trans"] = np.identity(4) def _convert_coil_trans(coil_trans, dev_ctf_t, bti_dev_t): """Convert the coil trans.""" - t = combine_transforms(invert_transform(dev_ctf_t), bti_dev_t, - 'ctf_head', 'meg') - t = np.dot(t['trans'], coil_trans) + t = combine_transforms(invert_transform(dev_ctf_t), bti_dev_t, "ctf_head", "meg") + t = np.dot(t["trans"], coil_trans) return t def _correct_offset(fid): """Align fid pointer.""" current = fid.tell() - if ((current % BTI.FILE_CURPOS) != 0): + if (current % BTI.FILE_CURPOS) != 0: offset = current % BTI.FILE_CURPOS fid.seek(BTI.FILE_CURPOS - (offset), 1) @@ -202,346 +209,407 @@ def _read_config(fname): cfg : dict The config blocks found. """ - with _bti_open(fname, 'rb') as fid: + with _bti_open(fname, "rb") as fid: cfg = dict() - cfg['hdr'] = {'version': read_int16(fid), - 'site_name': read_str(fid, 32), - 'dap_hostname': read_str(fid, 16), - 'sys_type': read_int16(fid), - 'sys_options': read_int32(fid), - 'supply_freq': read_int16(fid), - 'total_chans': read_int16(fid), - 'system_fixed_gain': read_float(fid), - 'volts_per_bit': read_float(fid), - 'total_sensors': read_int16(fid), - 'total_user_blocks': read_int16(fid), - 'next_der_chan_no': read_int16(fid)} + cfg["hdr"] = { + "version": read_int16(fid), + "site_name": read_str(fid, 32), + "dap_hostname": read_str(fid, 16), + "sys_type": read_int16(fid), + "sys_options": read_int32(fid), + "supply_freq": read_int16(fid), + "total_chans": read_int16(fid), + "system_fixed_gain": read_float(fid), + "volts_per_bit": read_float(fid), + "total_sensors": read_int16(fid), + "total_user_blocks": read_int16(fid), + "next_der_chan_no": read_int16(fid), + } fid.seek(2, 1) - cfg['checksum'] = read_uint32(fid) - cfg['reserved'] = read_char(fid, 32) - cfg['transforms'] = [read_transform(fid) for t in - range(cfg['hdr']['total_sensors'])] + cfg["checksum"] = read_uint32(fid) + cfg["reserved"] = read_char(fid, 32) + cfg["transforms"] = [ + read_transform(fid) for t in range(cfg["hdr"]["total_sensors"]) + ] - cfg['user_blocks'] = dict() - for block in range(cfg['hdr']['total_user_blocks']): + cfg["user_blocks"] = dict() + for block in range(cfg["hdr"]["total_user_blocks"]): ub = dict() - ub['hdr'] = {'nbytes': read_uint32(fid), - 'kind': read_str(fid, 20), - 'checksum': read_int32(fid), - 'username': read_str(fid, 32), - 'timestamp': read_uint32(fid), - 'user_space_size': read_uint32(fid), - 'reserved': read_char(fid, 32)} + ub["hdr"] = { + "nbytes": read_uint32(fid), + "kind": read_str(fid, 20), + "checksum": read_int32(fid), + "username": read_str(fid, 32), + "timestamp": read_uint32(fid), + "user_space_size": read_uint32(fid), + "reserved": read_char(fid, 32), + } _correct_offset(fid) start_bytes = fid.tell() - kind = ub['hdr'].pop('kind') + kind = ub["hdr"].pop("kind") if not kind: # make sure reading goes right. Should never be empty - raise RuntimeError('Could not read user block. Probably you ' - 'acquired data using a BTi version ' - 'currently not supported. Please contact ' - 'the mne-python developers.') - dta, cfg['user_blocks'][kind] = dict(), ub - if kind in [v for k, v in BTI.items() if k[:5] == 'UB_B_']: + raise RuntimeError( + "Could not read user block. Probably you " + "acquired data using a BTi version " + "currently not supported. Please contact " + "the mne-python developers." + ) + dta, cfg["user_blocks"][kind] = dict(), ub + if kind in [v for k, v in BTI.items() if k[:5] == "UB_B_"]: if kind == BTI.UB_B_MAG_INFO: - dta['version'] = read_int32(fid) + dta["version"] = read_int32(fid) fid.seek(20, 1) - dta['headers'] = list() + dta["headers"] = list() for hdr in range(6): - d = {'name': read_str(fid, 16), - 'transform': read_transform(fid), - 'units_per_bit': read_float(fid)} - dta['headers'] += [d] + d = { + "name": read_str(fid, 16), + "transform": read_transform(fid), + "units_per_bit": read_float(fid), + } + dta["headers"] += [d] fid.seek(20, 1) elif kind == BTI.UB_B_COH_POINTS: - dta['n_points'] = read_int32(fid) - dta['status'] = read_int32(fid) - dta['points'] = [] - for pnt in range(16): - d = {'pos': read_double_matrix(fid, 1, 3), - 'direction': read_double_matrix(fid, 1, 3), - 'error': read_double(fid)} - dta['points'] += [d] + dta["n_points"] = read_int32(fid) + dta["status"] = read_int32(fid) + dta["points"] = [ + { + "pos": read_double_matrix(fid, 1, 3), + "direction": read_double_matrix(fid, 1, 3), + "error": read_double(fid), + } + for _ in range(16) + ] elif kind == BTI.UB_B_CCP_XFM_BLOCK: - dta['method'] = read_int32(fid) + dta["method"] = read_int32(fid) # handle difference btw/ linux (0) and solaris (4) - size = 0 if ub['hdr']['user_space_size'] == 132 else 4 + size = 0 if ub["hdr"]["user_space_size"] == 132 else 4 fid.seek(size, 1) - dta['transform'] = read_transform(fid) + dta["transform"] = read_transform(fid) elif kind == BTI.UB_B_EEG_LOCS: - dta['electrodes'] = [] + dta["electrodes"] = [] while True: - d = {'label': read_str(fid, 16), - 'location': read_double_matrix(fid, 1, 3)} - if not d['label']: + d = { + "label": read_str(fid, 16), + "location": read_double_matrix(fid, 1, 3), + } + if not d["label"]: break - dta['electrodes'] += [d] + dta["electrodes"] += [d] - elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER, - BTI.UB_B_WHS_SUBSYS_VER]: - dta['version'] = read_int16(fid) - dta['struct_size'] = read_int16(fid) - dta['entries'] = read_int16(fid) + elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER, BTI.UB_B_WHS_SUBSYS_VER]: + dta["version"] = read_int16(fid) + dta["struct_size"] = read_int16(fid) + dta["entries"] = read_int16(fid) fid.seek(8, 1) elif kind == BTI.UB_B_WHC_CHAN_MAP: num_channels = None - for name, data in cfg['user_blocks'].items(): + for name, data in cfg["user_blocks"].items(): if name == BTI.UB_B_WHC_CHAN_MAP_VER: - num_channels = data['entries'] + num_channels = data["entries"] break if num_channels is None: - raise ValueError('Cannot find block %s to determine ' - 'number of channels' - % BTI.UB_B_WHC_CHAN_MAP_VER) + raise ValueError( + f"Cannot find block {BTI.UB_B_WHC_CHAN_MAP_VER} to " + "determine number of channels" + ) - dta['channels'] = list() + dta["channels"] = list() for i in range(num_channels): - d = {'subsys_type': read_int16(fid), - 'subsys_num': read_int16(fid), - 'card_num': read_int16(fid), - 'chan_num': read_int16(fid), - 'recdspnum': read_int16(fid)} - dta['channels'] += [d] + d = { + "subsys_type": read_int16(fid), + "subsys_num": read_int16(fid), + "card_num": read_int16(fid), + "chan_num": read_int16(fid), + "recdspnum": read_int16(fid), + } + dta["channels"] += [d] fid.seek(8, 1) elif kind == BTI.UB_B_WHS_SUBSYS: num_subsys = None - for name, data in cfg['user_blocks'].items(): + for name, data in cfg["user_blocks"].items(): if name == BTI.UB_B_WHS_SUBSYS_VER: - num_subsys = data['entries'] + num_subsys = data["entries"] break if num_subsys is None: - raise ValueError('Cannot find block %s to determine' - ' number of subsystems' - % BTI.UB_B_WHS_SUBSYS_VER) - - dta['subsys'] = list() - for sub_key in range(num_subsys): - d = {'subsys_type': read_int16(fid), - 'subsys_num': read_int16(fid), - 'cards_per_sys': read_int16(fid), - 'channels_per_card': read_int16(fid), - 'card_version': read_int16(fid)} + raise ValueError( + f"Cannot find block {BTI.UB_B_WHS_SUBSYS_VER} to determine" + " number of subsystems" + ) + + dta["subsys"] = list() + for _ in range(num_subsys): + d = { + "subsys_type": read_int16(fid), + "subsys_num": read_int16(fid), + "cards_per_sys": read_int16(fid), + "channels_per_card": read_int16(fid), + "card_version": read_int16(fid), + } fid.seek(2, 1) - d.update({'offsetdacgain': read_float(fid), - 'squid_type': read_int32(fid), - 'timesliceoffset': read_int16(fid), - 'padding': read_int16(fid), - 'volts_per_bit': read_float(fid)}) + d.update( + { + "offsetdacgain": read_float(fid), + "squid_type": read_int32(fid), + "timesliceoffset": read_int16(fid), + "padding": read_int16(fid), + "volts_per_bit": read_float(fid), + } + ) - dta['subsys'] += [d] + dta["subsys"] += [d] elif kind == BTI.UB_B_CH_LABELS: - dta['version'] = read_int32(fid) - dta['entries'] = read_int32(fid) + dta["version"] = read_int32(fid) + dta["entries"] = read_int32(fid) fid.seek(16, 1) - dta['labels'] = list() - for label in range(dta['entries']): - dta['labels'] += [read_str(fid, 16)] + dta["labels"] = list() + for label in range(dta["entries"]): + dta["labels"] += [read_str(fid, 16)] elif kind == BTI.UB_B_CALIBRATION: - dta['sensor_no'] = read_int16(fid) + dta["sensor_no"] = read_int16(fid) fid.seek(2, 1) - dta['timestamp'] = read_int32(fid) - dta['logdir'] = read_str(fid, 256) + dta["timestamp"] = read_int32(fid) + dta["logdir"] = read_str(fid, 256) elif kind == BTI.UB_B_SYS_CONFIG_TIME: # handle difference btw/ linux (256) and solaris (512) - size = 256 if ub['hdr']['user_space_size'] == 260 else 512 - dta['sysconfig_name'] = read_str(fid, size) - dta['timestamp'] = read_int32(fid) + size = 256 if ub["hdr"]["user_space_size"] == 260 else 512 + dta["sysconfig_name"] = read_str(fid, size) + dta["timestamp"] = read_int32(fid) elif kind == BTI.UB_B_DELTA_ENABLED: - dta['delta_enabled'] = read_int16(fid) + dta["delta_enabled"] = read_int16(fid) elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]: - dta['hdr'] = {'version': read_int32(fid), - 'entry_size': read_int32(fid), - 'n_entries': read_int32(fid), - 'filtername': read_str(fid, 16), - 'n_e_values': read_int32(fid), - 'reserved': read_str(fid, 28)} - - if dta['hdr']['version'] == 2: + dta["hdr"] = { + "version": read_int32(fid), + "entry_size": read_int32(fid), + "n_entries": read_int32(fid), + "filtername": read_str(fid, 16), + "n_e_values": read_int32(fid), + "reserved": read_str(fid, 28), + } + + if dta["hdr"]["version"] == 2: size = 16 - dta['ch_names'] = [read_str(fid, size) for ch in - range(dta['hdr']['n_entries'])] - dta['e_ch_names'] = [read_str(fid, size) for ch in - range(dta['hdr']['n_e_values'])] - - rows = dta['hdr']['n_entries'] - cols = dta['hdr']['n_e_values'] - dta['etable'] = read_float_matrix(fid, rows, cols) + dta["ch_names"] = [ + read_str(fid, size) for ch in range(dta["hdr"]["n_entries"]) + ] + dta["e_ch_names"] = [ + read_str(fid, size) + for ch in range(dta["hdr"]["n_e_values"]) + ] + + rows = dta["hdr"]["n_entries"] + cols = dta["hdr"]["n_e_values"] + dta["etable"] = read_float_matrix(fid, rows, cols) else: # handle MAGNES2500 naming scheme - dta['ch_names'] = ['WH2500'] * dta['hdr']['n_e_values'] - dta['hdr']['n_e_values'] = 6 - dta['e_ch_names'] = BTI_WH2500_REF_MAG - rows = dta['hdr']['n_entries'] - cols = dta['hdr']['n_e_values'] - dta['etable'] = read_float_matrix(fid, rows, cols) - - elif any([kind == BTI.UB_B_WEIGHTS_USED, - kind[:4] == BTI.UB_B_WEIGHT_TABLE]): - dta['hdr'] = dict( + dta["ch_names"] = ["WH2500"] * dta["hdr"]["n_e_values"] + dta["hdr"]["n_e_values"] = 6 + dta["e_ch_names"] = BTI_WH2500_REF_MAG + rows = dta["hdr"]["n_entries"] + cols = dta["hdr"]["n_e_values"] + dta["etable"] = read_float_matrix(fid, rows, cols) + + elif any( + [kind == BTI.UB_B_WEIGHTS_USED, kind[:4] == BTI.UB_B_WEIGHT_TABLE] + ): + dta["hdr"] = dict( version=read_int32(fid), n_bytes=read_uint32(fid), n_entries=read_uint32(fid), - name=read_str(fid, 32)) - if dta['hdr']['version'] == 2: - dta['hdr'].update( + name=read_str(fid, 32), + ) + if dta["hdr"]["version"] == 2: + dta["hdr"].update( description=read_str(fid, 80), n_anlg=read_uint32(fid), n_dsp=read_uint32(fid), - reserved=read_str(fid, 72)) - dta['ch_names'] = [read_str(fid, 16) for ch in - range(dta['hdr']['n_entries'])] - dta['anlg_ch_names'] = [read_str(fid, 16) for ch in - range(dta['hdr']['n_anlg'])] - - dta['dsp_ch_names'] = [read_str(fid, 16) for ch in - range(dta['hdr']['n_dsp'])] - dta['dsp_wts'] = read_float_matrix( - fid, dta['hdr']['n_entries'], dta['hdr']['n_dsp']) - dta['anlg_wts'] = read_int16_matrix( - fid, dta['hdr']['n_entries'], dta['hdr']['n_anlg']) + reserved=read_str(fid, 72), + ) + dta["ch_names"] = [ + read_str(fid, 16) for ch in range(dta["hdr"]["n_entries"]) + ] + dta["anlg_ch_names"] = [ + read_str(fid, 16) for ch in range(dta["hdr"]["n_anlg"]) + ] + + dta["dsp_ch_names"] = [ + read_str(fid, 16) for ch in range(dta["hdr"]["n_dsp"]) + ] + dta["dsp_wts"] = read_float_matrix( + fid, dta["hdr"]["n_entries"], dta["hdr"]["n_dsp"] + ) + dta["anlg_wts"] = read_int16_matrix( + fid, dta["hdr"]["n_entries"], dta["hdr"]["n_anlg"] + ) else: # handle MAGNES2500 naming scheme - fid.seek(start_bytes + ub['hdr']['user_space_size'] - - dta['hdr']['n_bytes'] * - dta['hdr']['n_entries'], 0) - - dta['hdr']['n_dsp'] = dta['hdr']['n_bytes'] // 4 - 2 - assert (dta['hdr']['n_dsp'] == - len(BTI_WH2500_REF_MAG) + - len(BTI_WH2500_REF_GRAD)) - dta['ch_names'] = ['WH2500'] * dta['hdr']['n_entries'] - dta['hdr']['n_anlg'] = 3 + fid.seek( + start_bytes + + ub["hdr"]["user_space_size"] + - dta["hdr"]["n_bytes"] * dta["hdr"]["n_entries"], + 0, + ) + + dta["hdr"]["n_dsp"] = dta["hdr"]["n_bytes"] // 4 - 2 + assert dta["hdr"]["n_dsp"] == len(BTI_WH2500_REF_MAG) + len( + BTI_WH2500_REF_GRAD + ) + dta["ch_names"] = ["WH2500"] * dta["hdr"]["n_entries"] + dta["hdr"]["n_anlg"] = 3 # These orders could be wrong, so don't set them # for now # dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3] # dta['dsp_ch_names'] = (BTI_WH2500_REF_GRAD + # BTI_WH2500_REF_MAG) - dta['anlg_wts'] = np.zeros( - (dta['hdr']['n_entries'], dta['hdr']['n_anlg']), - dtype='i2') - dta['dsp_wts'] = np.zeros( - (dta['hdr']['n_entries'], dta['hdr']['n_dsp']), - dtype='f4') - for n in range(dta['hdr']['n_entries']): - dta['anlg_wts'][n] = read_int16_matrix( - fid, 1, dta['hdr']['n_anlg']) + dta["anlg_wts"] = np.zeros( + (dta["hdr"]["n_entries"], dta["hdr"]["n_anlg"]), dtype="i2" + ) + dta["dsp_wts"] = np.zeros( + (dta["hdr"]["n_entries"], dta["hdr"]["n_dsp"]), dtype="f4" + ) + for n in range(dta["hdr"]["n_entries"]): + dta["anlg_wts"][n] = read_int16_matrix( + fid, 1, dta["hdr"]["n_anlg"] + ) read_int16(fid) - dta['dsp_wts'][n] = read_float_matrix( - fid, 1, dta['hdr']['n_dsp']) + dta["dsp_wts"][n] = read_float_matrix( + fid, 1, dta["hdr"]["n_dsp"] + ) elif kind == BTI.UB_B_TRIG_MASK: - dta['version'] = read_int32(fid) - dta['entries'] = read_int32(fid) + dta["version"] = read_int32(fid) + dta["entries"] = read_int32(fid) fid.seek(16, 1) - dta['masks'] = [] - for entry in range(dta['entries']): - d = {'name': read_str(fid, 20), - 'nbits': read_uint16(fid), - 'shift': read_uint16(fid), - 'mask': read_uint32(fid)} - dta['masks'] += [d] + dta["masks"] = [] + for entry in range(dta["entries"]): + d = { + "name": read_str(fid, 20), + "nbits": read_uint16(fid), + "shift": read_uint16(fid), + "mask": read_uint32(fid), + } + dta["masks"] += [d] fid.seek(8, 1) else: - dta['unknown'] = {'hdr': read_char(fid, - ub['hdr']['user_space_size'])} + dta["unknown"] = {"hdr": read_char(fid, ub["hdr"]["user_space_size"])} n_read = fid.tell() - start_bytes - if n_read != ub['hdr']['user_space_size']: - raise RuntimeError('Internal MNE reading error, read size %d ' - '!= %d expected size for kind %s' - % (n_read, ub['hdr']['user_space_size'], - kind)) + if n_read != ub["hdr"]["user_space_size"]: + raise RuntimeError( + f"Internal MNE reading error, read size {n_read} " + f"!= {ub['hdr']['user_space_size']} expected size for kind {kind}." + ) ub.update(dta) # finally update the userblock data _correct_offset(fid) # after reading. - cfg['chs'] = list() + cfg["chs"] = list() # prepare reading channels - for channel in range(cfg['hdr']['total_chans']): - ch = {'name': read_str(fid, 16), - 'chan_no': read_int16(fid), - 'ch_type': read_uint16(fid), - 'sensor_no': read_int16(fid), - 'data': dict()} + for channel in range(cfg["hdr"]["total_chans"]): + ch = { + "name": read_str(fid, 16), + "chan_no": read_int16(fid), + "ch_type": read_uint16(fid), + "sensor_no": read_int16(fid), + "data": dict(), + } fid.seek(2, 1) - ch.update({'gain': read_float(fid), - 'units_per_bit': read_float(fid), - 'yaxis_label': read_str(fid, 16), - 'aar_val': read_double(fid), - 'checksum': read_int32(fid), - 'reserved': read_str(fid, 32)}) - - cfg['chs'] += [ch] + ch.update( + { + "gain": read_float(fid), + "units_per_bit": read_float(fid), + "yaxis_label": read_str(fid, 16), + "aar_val": read_double(fid), + "checksum": read_int32(fid), + "reserved": read_str(fid, 32), + } + ) + + cfg["chs"] += [ch] _correct_offset(fid) # before and after dta = dict() - if ch['ch_type'] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]: - dev = {'device_info': read_dev_header(fid), - 'inductance': read_float(fid), - 'padding': read_str(fid, 4), - 'transform': _correct_trans(read_transform(fid), False), - 'xform_flag': read_int16(fid), - 'total_loops': read_int16(fid)} + if ch["ch_type"] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]: + dev = { + "device_info": read_dev_header(fid), + "inductance": read_float(fid), + "padding": read_str(fid, 4), + "transform": _correct_trans(read_transform(fid), False), + "xform_flag": read_int16(fid), + "total_loops": read_int16(fid), + } fid.seek(4, 1) - dev['reserved'] = read_str(fid, 32) - dta.update({'dev': dev, 'loops': []}) - for loop in range(dev['total_loops']): - d = {'position': read_double_matrix(fid, 1, 3), - 'orientation': read_double_matrix(fid, 1, 3), - 'radius': read_double(fid), - 'wire_radius': read_double(fid), - 'turns': read_int16(fid)} + dev["reserved"] = read_str(fid, 32) + dta.update({"dev": dev, "loops": []}) + for _ in range(dev["total_loops"]): + d = { + "position": read_double_matrix(fid, 1, 3), + "orientation": read_double_matrix(fid, 1, 3), + "radius": read_double(fid), + "wire_radius": read_double(fid), + "turns": read_int16(fid), + } fid.seek(2, 1) - d['checksum'] = read_int32(fid) - d['reserved'] = read_str(fid, 32) - dta['loops'] += [d] - - elif ch['ch_type'] == BTI.CHTYPE_EEG: - dta = {'device_info': read_dev_header(fid), - 'impedance': read_float(fid), - 'padding': read_str(fid, 4), - 'transform': read_transform(fid), - 'reserved': read_char(fid, 32)} - - elif ch['ch_type'] == BTI.CHTYPE_EXTERNAL: - dta = {'device_info': read_dev_header(fid), - 'user_space_size': read_int32(fid), - 'reserved': read_str(fid, 32)} - - elif ch['ch_type'] == BTI.CHTYPE_TRIGGER: - dta = {'device_info': read_dev_header(fid), - 'user_space_size': read_int32(fid)} + d["checksum"] = read_int32(fid) + d["reserved"] = read_str(fid, 32) + dta["loops"] += [d] + + elif ch["ch_type"] == BTI.CHTYPE_EEG: + dta = { + "device_info": read_dev_header(fid), + "impedance": read_float(fid), + "padding": read_str(fid, 4), + "transform": read_transform(fid), + "reserved": read_char(fid, 32), + } + + elif ch["ch_type"] == BTI.CHTYPE_EXTERNAL: + dta = { + "device_info": read_dev_header(fid), + "user_space_size": read_int32(fid), + "reserved": read_str(fid, 32), + } + + elif ch["ch_type"] == BTI.CHTYPE_TRIGGER: + dta = { + "device_info": read_dev_header(fid), + "user_space_size": read_int32(fid), + } fid.seek(2, 1) - dta['reserved'] = read_str(fid, 32) + dta["reserved"] = read_str(fid, 32) - elif ch['ch_type'] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]: - dta = {'device_info': read_dev_header(fid), - 'user_space_size': read_int32(fid), - 'reserved': read_str(fid, 32)} + elif ch["ch_type"] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]: + dta = { + "device_info": read_dev_header(fid), + "user_space_size": read_int32(fid), + "reserved": read_str(fid, 32), + } - elif ch['ch_type'] == BTI.CHTYPE_SHORTED: - dta = {'device_info': read_dev_header(fid), - 'reserved': read_str(fid, 32)} + elif ch["ch_type"] == BTI.CHTYPE_SHORTED: + dta = { + "device_info": read_dev_header(fid), + "reserved": read_str(fid, 32), + } ch.update(dta) # add data collected _correct_offset(fid) # after each reading @@ -551,13 +619,15 @@ def _read_config(fname): def _read_epoch(fid): """Read BTi PDF epoch.""" - out = {'pts_in_epoch': read_int32(fid), - 'epoch_duration': read_float(fid), - 'expected_iti': read_float(fid), - 'actual_iti': read_float(fid), - 'total_var_events': read_int32(fid), - 'checksum': read_int32(fid), - 'epoch_timestamp': read_int32(fid)} + out = { + "pts_in_epoch": read_int32(fid), + "epoch_duration": read_float(fid), + "expected_iti": read_float(fid), + "actual_iti": read_float(fid), + "total_var_events": read_int32(fid), + "checksum": read_int32(fid), + "epoch_timestamp": read_int32(fid), + } fid.seek(28, 1) @@ -566,20 +636,26 @@ def _read_epoch(fid): def _read_channel(fid): """Read BTi PDF channel.""" - out = {'chan_label': read_str(fid, 16), - 'chan_no': read_int16(fid), - 'attributes': read_int16(fid), - 'scale': read_float(fid), - 'yaxis_label': read_str(fid, 16), - 'valid_min_max': read_int16(fid)} + out = { + "chan_label": read_str(fid, 16), + "chan_no": read_int16(fid), + "attributes": read_int16(fid), + "scale": read_float(fid), + "yaxis_label": read_str(fid, 16), + "valid_min_max": read_int16(fid), + } fid.seek(6, 1) - out.update({'ymin': read_double(fid), - 'ymax': read_double(fid), - 'index': read_int32(fid), - 'checksum': read_int32(fid), - 'off_flag': read_str(fid, 4), - 'offset': read_float(fid)}) + out.update( + { + "ymin": read_double(fid), + "ymax": read_double(fid), + "index": read_int32(fid), + "checksum": read_int32(fid), + "off_flag": read_str(fid, 4), + "offset": read_float(fid), + } + ) fid.seek(24, 1) @@ -588,12 +664,14 @@ def _read_channel(fid): def _read_event(fid): """Read BTi PDF event.""" - out = {'event_name': read_str(fid, 16), - 'start_lat': read_float(fid), - 'end_lat': read_float(fid), - 'step_size': read_float(fid), - 'fixed_event': read_int16(fid), - 'checksum': read_int32(fid)} + out = { + "event_name": read_str(fid, 16), + "start_lat": read_float(fid), + "end_lat": read_float(fid), + "step_size": read_float(fid), + "fixed_event": read_int16(fid), + "checksum": read_int32(fid), + } fid.seek(32, 1) _correct_offset(fid) @@ -603,44 +681,48 @@ def _read_event(fid): def _read_process(fid): """Read BTi PDF process.""" - out = {'nbytes': read_int32(fid), - 'process_type': read_str(fid, 20), - 'checksum': read_int32(fid), - 'user': read_str(fid, 32), - 'timestamp': read_int32(fid), - 'filename': read_str(fid, 256), - 'total_steps': read_int32(fid)} + out = { + "nbytes": read_int32(fid), + "process_type": read_str(fid, 20), + "checksum": read_int32(fid), + "user": read_str(fid, 32), + "timestamp": read_int32(fid), + "filename": read_str(fid, 256), + "total_steps": read_int32(fid), + } fid.seek(32, 1) _correct_offset(fid) - out['processing_steps'] = list() - for step in range(out['total_steps']): - this_step = {'nbytes': read_int32(fid), - 'process_type': read_str(fid, 20), - 'checksum': read_int32(fid)} - ptype = this_step['process_type'] + out["processing_steps"] = list() + for step in range(out["total_steps"]): + this_step = { + "nbytes": read_int32(fid), + "process_type": read_str(fid, 20), + "checksum": read_int32(fid), + } + ptype = this_step["process_type"] if ptype == BTI.PROC_DEFAULTS: - this_step['scale_option'] = read_int32(fid) + this_step["scale_option"] = read_int32(fid) fid.seek(4, 1) - this_step['scale'] = read_double(fid) - this_step['dtype'] = read_int32(fid) - this_step['selected'] = read_int16(fid) - this_step['color_display'] = read_int16(fid) + this_step["scale"] = read_double(fid) + this_step["dtype"] = read_int32(fid) + this_step["selected"] = read_int16(fid) + this_step["color_display"] = read_int16(fid) fid.seek(32, 1) elif ptype in BTI.PROC_FILTER: - this_step['freq'] = read_float(fid) + this_step["freq"] = read_float(fid) fid.seek(32, 1) elif ptype in BTI.PROC_BPFILTER: - this_step['high_freq'] = read_float(fid) - this_step['low_freq'] = read_float(fid) + this_step["high_freq"] = read_float(fid) + this_step["low_freq"] = read_float(fid) else: - jump = this_step['user_space_size'] = read_int32(fid) + jump = this_step["user_space_size"] = read_int32(fid) fid.seek(32, 1) fid.seek(jump, 1) - out['processing_steps'] += [this_step] + out["processing_steps"] += [this_step] _correct_offset(fid) return out @@ -648,174 +730,120 @@ def _read_process(fid): def _read_assoc_file(fid): """Read BTi PDF assocfile.""" - out = {'file_id': read_int16(fid), - 'length': read_int16(fid)} + out = {"file_id": read_int16(fid), "length": read_int16(fid)} fid.seek(32, 1) - out['checksum'] = read_int32(fid) + out["checksum"] = read_int32(fid) return out def _read_pfid_ed(fid): """Read PDF ed file.""" - out = {'comment_size': read_int32(fid), - 'name': read_str(fid, 17)} + out = {"comment_size": read_int32(fid), "name": read_str(fid, 17)} fid.seek(9, 1) - out.update({'pdf_number': read_int16(fid), - 'total_events': read_int32(fid), - 'timestamp': read_int32(fid), - 'flags': read_int32(fid), - 'de_process': read_int32(fid), - 'checksum': read_int32(fid), - 'ed_id': read_int32(fid), - 'win_width': read_float(fid), - 'win_offset': read_float(fid)}) + out.update( + { + "pdf_number": read_int16(fid), + "total_events": read_int32(fid), + "timestamp": read_int32(fid), + "flags": read_int32(fid), + "de_process": read_int32(fid), + "checksum": read_int32(fid), + "ed_id": read_int32(fid), + "win_width": read_float(fid), + "win_offset": read_float(fid), + } + ) fid.seek(8, 1) return out -def _read_coil_def(fid): - """Read coil definition.""" - coildef = {'position': read_double_matrix(fid, 1, 3), - 'orientation': read_double_matrix(fid, 1, 3), - 'radius': read_double(fid), - 'wire_radius': read_double(fid), - 'turns': read_int16(fid)} - - fid.seek(fid, 2, 1) - coildef['checksum'] = read_int32(fid) - coildef['reserved'] = read_str(fid, 32) - - -def _read_ch_config(fid): - """Read BTi channel config.""" - cfg = {'name': read_str(fid, BTI.FILE_CONF_CH_NAME), - 'chan_no': read_int16(fid), - 'ch_type': read_uint16(fid), - 'sensor_no': read_int16(fid)} - - fid.seek(fid, BTI.FILE_CONF_CH_NEXT, 1) - - cfg.update({'gain': read_float(fid), - 'units_per_bit': read_float(fid), - 'yaxis_label': read_str(fid, BTI.FILE_CONF_CH_YLABEL), - 'aar_val': read_double(fid), - 'checksum': read_int32(fid), - 'reserved': read_str(fid, BTI.FILE_CONF_CH_RESERVED)}) - - _correct_offset(fid) - - # Then the channel info - ch_type, chan = cfg['ch_type'], dict() - chan['dev'] = {'size': read_int32(fid), - 'checksum': read_int32(fid), - 'reserved': read_str(fid, 32)} - if ch_type in [BTI.CHTYPE_MEG, BTI.CHTYPE_REF]: - chan['loops'] = [_read_coil_def(fid) for d in - range(chan['dev']['total_loops'])] - - elif ch_type == BTI.CHTYPE_EEG: - chan['impedance'] = read_float(fid) - chan['padding'] = read_str(fid, BTI.FILE_CONF_CH_PADDING) - chan['transform'] = read_transform(fid) - chan['reserved'] = read_char(fid, BTI.FILE_CONF_CH_RESERVED) - - elif ch_type in [BTI.CHTYPE_TRIGGER, BTI.CHTYPE_EXTERNAL, - BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]: - chan['user_space_size'] = read_int32(fid) - if ch_type == BTI.CHTYPE_TRIGGER: - fid.seek(2, 1) - chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED) - - elif ch_type == BTI.CHTYPE_SHORTED: - chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED) - - cfg['chan'] = chan - - _correct_offset(fid) - - return cfg - - def _read_bti_header_pdf(pdf_fname): """Read header from pdf file.""" - with _bti_open(pdf_fname, 'rb') as fid: + with _bti_open(pdf_fname, "rb") as fid: fid.seek(-8, 2) start = fid.tell() header_position = read_int64(fid) check_value = header_position & BTI.FILE_MASK - if ((start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK): + if (start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK: header_position = check_value # Check header position for alignment issues - if ((header_position % 8) != 0): - header_position += (8 - (header_position % 8)) + if (header_position % 8) != 0: + header_position += 8 - (header_position % 8) fid.seek(header_position, 0) # actual header starts here - info = {'version': read_int16(fid), - 'file_type': read_str(fid, 5), - 'hdr_size': start - header_position, # add for convenience - 'start': start} + info = { + "version": read_int16(fid), + "file_type": read_str(fid, 5), + "hdr_size": start - header_position, # add for convenience + "start": start, + } fid.seek(1, 1) - info.update({'data_format': read_int16(fid), - 'acq_mode': read_int16(fid), - 'total_epochs': read_int32(fid), - 'input_epochs': read_int32(fid), - 'total_events': read_int32(fid), - 'total_fixed_events': read_int32(fid), - 'sample_period': read_float(fid), - 'xaxis_label': read_str(fid, 16), - 'total_processes': read_int32(fid), - 'total_chans': read_int16(fid)}) + info.update( + { + "data_format": read_int16(fid), + "acq_mode": read_int16(fid), + "total_epochs": read_int32(fid), + "input_epochs": read_int32(fid), + "total_events": read_int32(fid), + "total_fixed_events": read_int32(fid), + "sample_period": read_float(fid), + "xaxis_label": read_str(fid, 16), + "total_processes": read_int32(fid), + "total_chans": read_int16(fid), + } + ) fid.seek(2, 1) - info.update({'checksum': read_int32(fid), - 'total_ed_classes': read_int32(fid), - 'total_associated_files': read_int16(fid), - 'last_file_index': read_int16(fid), - 'timestamp': read_int32(fid)}) + info.update( + { + "checksum": read_int32(fid), + "total_ed_classes": read_int32(fid), + "total_associated_files": read_int16(fid), + "last_file_index": read_int16(fid), + "timestamp": read_int32(fid), + } + ) fid.seek(20, 1) _correct_offset(fid) # actual header ends here, so dar seems ok. - info['epochs'] = [_read_epoch(fid) for epoch in - range(info['total_epochs'])] + info["epochs"] = [_read_epoch(fid) for _ in range(info["total_epochs"])] - info['chs'] = [_read_channel(fid) for ch in - range(info['total_chans'])] + info["chs"] = [_read_channel(fid) for _ in range(info["total_chans"])] - info['events'] = [_read_event(fid) for event in - range(info['total_events'])] + info["events"] = [_read_event(fid) for _ in range(info["total_events"])] - info['processes'] = [_read_process(fid) for process in - range(info['total_processes'])] + info["processes"] = [_read_process(fid) for _ in range(info["total_processes"])] - info['assocfiles'] = [_read_assoc_file(fid) for af in - range(info['total_associated_files'])] + info["assocfiles"] = [ + _read_assoc_file(fid) for _ in range(info["total_associated_files"]) + ] - info['edclasses'] = [_read_pfid_ed(fid) for ed_class in - range(info['total_ed_classes'])] + info["edclasses"] = [ + _read_pfid_ed(fid) for _ in range(info["total_ed_classes"]) + ] - info['extra_data'] = fid.read(start - fid.tell()) - info['pdf_fname'] = pdf_fname + info["extra_data"] = fid.read(start - fid.tell()) + info["pdf"] = pdf_fname - info['total_slices'] = sum(e['pts_in_epoch'] for e in - info['epochs']) + info["total_slices"] = sum(e["pts_in_epoch"] for e in info["epochs"]) - info['dtype'] = DTYPES[info['data_format']] - bps = info['dtype'].itemsize * info['total_chans'] - info['bytes_per_slice'] = bps + info["dtype"] = DTYPES[info["data_format"]] + bps = info["dtype"].itemsize * info["total_chans"] + info["bytes_per_slice"] = bps return info @@ -823,71 +851,74 @@ def _read_bti_header(pdf_fname, config_fname, sort_by_ch_name=True): """Read bti PDF header.""" info = _read_bti_header_pdf(pdf_fname) if pdf_fname is not None else dict() cfg = _read_config(config_fname) - info['bti_transform'] = cfg['transforms'] + info["bti_transform"] = cfg["transforms"] # augment channel list by according info from config. # get channels from config present in PDF - chans = info.get('chs', None) + chans = info.get("chs", None) if chans is not None: - chans_cfg = [c for c in cfg['chs'] if c['chan_no'] - in [c_['chan_no'] for c_ in chans]] + chans_cfg = [ + c for c in cfg["chs"] if c["chan_no"] in [c_["chan_no"] for c_ in chans] + ] # sort chans_cfg and chans - chans = sorted(chans, key=lambda k: k['chan_no']) - chans_cfg = sorted(chans_cfg, key=lambda k: k['chan_no']) + chans = sorted(chans, key=lambda k: k["chan_no"]) + chans_cfg = sorted(chans_cfg, key=lambda k: k["chan_no"]) # check all pdf channels are present in config - match = [c['chan_no'] for c in chans_cfg] == \ - [c['chan_no'] for c in chans] + match = [c["chan_no"] for c in chans_cfg] == [c["chan_no"] for c in chans] if not match: - raise RuntimeError('Could not match raw data channels with' - ' config channels. Some of the channels' - ' found are not described in config.') + raise RuntimeError( + "Could not match raw data channels with" + " config channels. Some of the channels" + " found are not described in config." + ) else: - chans_cfg = cfg['chs'] + chans_cfg = cfg["chs"] chans = [dict() for _ in chans_cfg] # transfer channel info from config to channel info for ch, ch_cfg in zip(chans, chans_cfg): - ch['upb'] = ch_cfg['units_per_bit'] - ch['gain'] = ch_cfg['gain'] - ch['name'] = ch_cfg['name'] - if ch_cfg.get('dev', dict()).get('transform', None) is not None: - ch['loc'] = _coil_trans_to_loc(ch_cfg['dev']['transform']) + ch["upb"] = ch_cfg["units_per_bit"] + ch["gain"] = ch_cfg["gain"] + ch["name"] = ch_cfg["name"] + if ch_cfg.get("dev", dict()).get("transform", None) is not None: + ch["loc"] = _coil_trans_to_loc(ch_cfg["dev"]["transform"]) else: - ch['loc'] = np.full(12, np.nan) + ch["loc"] = np.full(12, np.nan) if pdf_fname is not None: - if info['data_format'] <= 2: # see DTYPES, implies integer - ch['cal'] = ch['scale'] * ch['upb'] / float(ch['gain']) + if info["data_format"] <= 2: # see DTYPES, implies integer + ch["cal"] = ch["scale"] * ch["upb"] / float(ch["gain"]) else: # float - ch['cal'] = ch['scale'] * ch['gain'] + ch["cal"] = ch["scale"] * ch["gain"] else: # if we are in this mode we don't read data, only channel info. - ch['cal'] = ch['scale'] = 1.0 # so we put a trivial default value + ch["cal"] = ch["scale"] = 1.0 # so we put a trivial default value if sort_by_ch_name: - by_index = [(i, d['index']) for i, d in enumerate(chans)] + by_index = [(i, d["index"]) for i, d in enumerate(chans)] by_index.sort(key=lambda c: c[1]) by_index = [idx[0] for idx in by_index] chs = [chans[pos] for pos in by_index] - sort_by_name_idx = [(i, d['name']) for i, d in enumerate(chs)] - a_chs = [c for c in sort_by_name_idx if c[1].startswith('A')] - other_chs = [c for c in sort_by_name_idx if not c[1].startswith('A')] - sort_by_name_idx = sorted( - a_chs, key=lambda c: int(c[1][1:])) + sorted(other_chs) + sort_by_name_idx = [(i, d["name"]) for i, d in enumerate(chs)] + a_chs = [c for c in sort_by_name_idx if c[1].startswith("A")] + other_chs = [c for c in sort_by_name_idx if not c[1].startswith("A")] + sort_by_name_idx = sorted(a_chs, key=lambda c: int(c[1][1:])) + sorted( + other_chs + ) sort_by_name_idx = [idx[0] for idx in sort_by_name_idx] - info['chs'] = [chans[pos] for pos in sort_by_name_idx] - info['order'] = sort_by_name_idx + info["chs"] = [chans[pos] for pos in sort_by_name_idx] + info["order"] = sort_by_name_idx else: - info['chs'] = chans - info['order'] = np.arange(len(chans)) + info["chs"] = chans + info["order"] = np.arange(len(chans)) # finally add some important fields from the config - info['e_table'] = cfg['user_blocks'][BTI.UB_B_E_TABLE_USED] - info['weights'] = cfg['user_blocks'][BTI.UB_B_WEIGHTS_USED] + info["e_table"] = cfg["user_blocks"][BTI.UB_B_E_TABLE_USED] + info["weights"] = cfg["user_blocks"][BTI.UB_B_WEIGHTS_USED] return info @@ -896,11 +927,11 @@ def _correct_trans(t, check=True): """Convert to a transformation matrix.""" t = np.array(t, np.float64) t[:3, :3] *= t[3, :3][:, np.newaxis] # apply scalings - t[3, :3] = 0. # remove them + t[3, :3] = 0.0 # remove them if check: - assert t[3, 3] == 1. + assert t[3, 3] == 1.0 else: - t[3, 3] = 1. + t[3, 3] = 1.0 return t @@ -942,58 +973,79 @@ class RawBTi(BaseRaw): """ @verbose - def __init__(self, pdf_fname, config_fname='config', - head_shape_fname='hs_file', rotation_x=0., - translation=(0.0, 0.02, 0.11), convert=True, - rename_channels=True, sort_by_ch_name=True, - ecg_ch='E31', eog_ch=('E63', 'E64'), - preload=False, verbose=None): # noqa: D102 + def __init__( + self, + pdf_fname, + config_fname="config", + head_shape_fname="hs_file", + rotation_x=0.0, + translation=(0.0, 0.02, 0.11), + convert=True, + rename_channels=True, + sort_by_ch_name=True, + ecg_ch="E31", + eog_ch=("E63", "E64"), + preload=False, + verbose=None, + ): + _validate_type(pdf_fname, ("path-like", BytesIO), "pdf_fname") info, bti_info = _get_bti_info( - pdf_fname=pdf_fname, config_fname=config_fname, - head_shape_fname=head_shape_fname, rotation_x=rotation_x, - translation=translation, convert=convert, ecg_ch=ecg_ch, + pdf_fname=pdf_fname, + config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, + translation=translation, + convert=convert, + ecg_ch=ecg_ch, rename_channels=rename_channels, - sort_by_ch_name=sort_by_ch_name, eog_ch=eog_ch) - self.bti_ch_labels = [c['chan_label'] for c in bti_info['chs']] + sort_by_ch_name=sort_by_ch_name, + eog_ch=eog_ch, + ) + bti_info["bti_ch_labels"] = [c["chan_label"] for c in bti_info["chs"]] # make Raw repr work if we have a BytesIO as input - if isinstance(pdf_fname, BytesIO): - pdf_fname = repr(pdf_fname) - super(RawBTi, self).__init__( - info, preload, filenames=[pdf_fname], raw_extras=[bti_info], - last_samps=[bti_info['total_slices'] - 1], verbose=verbose) + filename = bti_info["pdf"] + if isinstance(filename, BytesIO): + filename = None + super().__init__( + info, + preload, + filenames=[filename], + raw_extras=[bti_info], + last_samps=[bti_info["total_slices"] - 1], + verbose=verbose, + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file.""" bti_info = self._raw_extras[fi] - fname = bti_info['pdf_fname'] - dtype = bti_info['dtype'] - assert len(bti_info['chs']) == self._raw_extras[fi]['orig_nchan'] - n_channels = len(bti_info['chs']) + fname_or_bytes = bti_info["pdf"] + dtype = bti_info["dtype"] + assert len(bti_info["chs"]) == self._raw_extras[fi]["orig_nchan"] + n_channels = len(bti_info["chs"]) n_bytes = np.dtype(dtype).itemsize data_left = (stop - start) * n_channels - read_cals = np.empty((bti_info['total_chans'],)) - for ch in bti_info['chs']: - read_cals[ch['index']] = ch['cal'] + read_cals = np.empty((bti_info["total_chans"],)) + for ch in bti_info["chs"]: + read_cals[ch["index"]] = ch["cal"] block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels block_size = min(data_left, block_size) # extract data in chunks - with _bti_open(fname, 'rb') as fid: - fid.seek(bti_info['bytes_per_slice'] * start, 0) - for sample_start in np.arange(0, data_left, - block_size) // n_channels: + with _bti_open(fname_or_bytes, "rb") as fid: + fid.seek(bti_info["bytes_per_slice"] * start, 0) + for sample_start in np.arange(0, data_left, block_size) // n_channels: count = min(block_size, data_left - sample_start * n_channels) if isinstance(fid, BytesIO): block = np.frombuffer(fid.getvalue(), dtype, count) else: block = np.fromfile(fid, dtype, count) sample_stop = sample_start + count // n_channels - shape = (sample_stop - sample_start, bti_info['total_chans']) + shape = (sample_stop - sample_start, bti_info["total_chans"]) block.shape = shape data_view = data[:, sample_start:sample_stop] one = np.empty(block.shape[::-1]) - for ii, b_i_o in enumerate(bti_info['order']): + for ii, b_i_o in enumerate(bti_info["order"]): one[ii] = block[:, b_i_o] * read_cals[b_i_o] _mult_cal_one(data_view, one, idx, cals, mult) @@ -1001,47 +1053,64 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): @functools.lru_cache(1) def _1020_names(): from mne.channels import make_standard_montage - return set(ch_name.lower() - for ch_name in make_standard_montage('standard_1005').ch_names) + + return set( + ch_name.lower() for ch_name in make_standard_montage("standard_1005").ch_names + ) def _eeg_like(ch_name): # Some bti recordigs look like "F4-POz", so let's at least mark them # as EEG - if ch_name.count('-') != 1: + if ch_name.count("-") != 1: return - ch, ref = ch_name.split('-') + ch, ref = ch_name.split("-") eeg_names = _1020_names() return ch.lower() in eeg_names and ref.lower() in eeg_names def _make_bti_digitization( - info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t): + info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t +): with info._unlock(): if head_shape_fname: - logger.info('... Reading digitization points from %s' % - head_shape_fname) - - nasion, lpa, rpa, hpi, dig_points = _read_head_shape( - head_shape_fname) - info['dig'], dev_head_t, ctf_head_t = _make_bti_dig_points( - nasion, lpa, rpa, hpi, dig_points, - convert, use_hpi, bti_dev_t, dev_ctf_t) + logger.info(f"... Reading digitization points from {head_shape_fname}") + + nasion, lpa, rpa, hpi, dig_points = _read_head_shape(head_shape_fname) + info["dig"], dev_head_t, ctf_head_t = _make_bti_dig_points( + nasion, + lpa, + rpa, + hpi, + dig_points, + convert, + use_hpi, + bti_dev_t, + dev_ctf_t, + ) else: - logger.info('... no headshape file supplied, doing nothing.') - info['dig'] = None - dev_head_t = Transform('meg', 'head', trans=None) - ctf_head_t = Transform('ctf_head', 'head', trans=None) + logger.info("... no headshape file supplied, doing nothing.") + info["dig"] = None + dev_head_t = Transform("meg", "head", trans=None) + ctf_head_t = Transform("ctf_head", "head", trans=None) - info.update(dev_head_t=dev_head_t, dev_ctf_t=dev_ctf_t, - ctf_head_t=ctf_head_t) + info.update(dev_head_t=dev_head_t, dev_ctf_t=dev_ctf_t, ctf_head_t=ctf_head_t) return info -def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x, - translation, convert, ecg_ch, eog_ch, rename_channels=True, - sort_by_ch_name=True): +def _get_bti_info( + pdf_fname, + config_fname, + head_shape_fname, + rotation_x, + translation, + convert, + ecg_ch, + eog_ch, + rename_channels=True, + sort_by_ch_name=True, +): """Read BTI info. Note. This helper supports partial construction of infos when `pdf_fname` @@ -1058,170 +1127,180 @@ def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x, """ if pdf_fname is None: - logger.info('No pdf_fname passed, trying to construct partial info ' - 'from config') + logger.info("No pdf_fname passed, trying to construct partial info from config") if pdf_fname is not None and not isinstance(pdf_fname, BytesIO): if not op.isabs(pdf_fname): pdf_fname = op.abspath(pdf_fname) if not isinstance(config_fname, BytesIO): if not op.isabs(config_fname): - config_tries = [op.abspath(config_fname), - op.abspath(op.join(op.dirname(pdf_fname), - config_fname))] + config_tries = [ + op.abspath(config_fname), + op.abspath(op.join(op.dirname(pdf_fname), config_fname)), + ] for config_try in config_tries: if op.isfile(config_try): config_fname = config_try break if not op.isfile(config_fname): - raise ValueError('Could not find the config file %s. Please check' - ' whether you are in the right directory ' - 'or pass the full name' % config_fname) + raise ValueError( + f"Could not find the config file {config_fname}. Please check" + " whether you are in the right directory " + "or pass the full name" + ) - if head_shape_fname is not None and not isinstance( - head_shape_fname, BytesIO): + if head_shape_fname is not None and not isinstance(head_shape_fname, BytesIO): orig_name = head_shape_fname if not op.isfile(head_shape_fname): - head_shape_fname = op.join(op.dirname(pdf_fname), - head_shape_fname) + head_shape_fname = op.join(op.dirname(pdf_fname), head_shape_fname) if not op.isfile(head_shape_fname): - raise ValueError('Could not find the head_shape file "%s". ' - 'You should check whether you are in the ' - 'right directory, pass the full file name, ' - 'or pass head_shape_fname=None.' - % orig_name) - - logger.info('Reading 4D PDF file %s...' % pdf_fname) + raise ValueError( + f'Could not find the head_shape file "{orig_name}". ' + "You should check whether you are in the " + "right directory, pass the full file name, " + "or pass head_shape_fname=None." + ) + + logger.info(f"Reading 4D PDF file {pdf_fname}...") bti_info = _read_bti_header( - pdf_fname, config_fname, sort_by_ch_name=sort_by_ch_name) - - dev_ctf_t = Transform('ctf_meg', 'ctf_head', - _correct_trans(bti_info['bti_transform'][0])) + pdf_fname, config_fname, sort_by_ch_name=sort_by_ch_name + ) + extras = dict( + pdf_fname=pdf_fname, + head_shape_fname=head_shape_fname, + config_fname=config_fname, + ) + for key, val in extras.items(): + bti_info[key] = None if isinstance(val, BytesIO) else val + + dev_ctf_t = Transform( + "ctf_meg", "ctf_head", _correct_trans(bti_info["bti_transform"][0]) + ) _check_nan_dev_head_t(dev_ctf_t) # for old backward compatibility and external processing - rotation_x = 0. if rotation_x is None else rotation_x + rotation_x = 0.0 if rotation_x is None else rotation_x bti_dev_t = _get_bti_dev_t(rotation_x, translation) if convert else None - bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t) + bti_dev_t = Transform("ctf_meg", "meg", bti_dev_t) use_hpi = False # hard coded, but marked as later option. - logger.info('Creating Neuromag info structure ...') - if 'sample_period' in bti_info.keys(): - sfreq = 1. / bti_info['sample_period'] + logger.info("Creating Neuromag info structure ...") + if "sample_period" in bti_info.keys(): + sfreq = 1.0 / bti_info["sample_period"] else: sfreq = None if pdf_fname is not None: info = _empty_info(sfreq) - date = bti_info['processes'][0]['timestamp'] - info['meas_date'] = _stamp_to_dt((date, 0)) + date = bti_info["processes"][0]["timestamp"] + info["meas_date"] = _stamp_to_dt((date, 0)) else: # these cannot be guessed from config, see docstring info = _empty_info(1.0) - info['sfreq'] = None - info['lowpass'] = None - info['highpass'] = None - info['meas_date'] = None - bti_info['processes'] = list() + info["sfreq"] = None + info["lowpass"] = None + info["highpass"] = None + info["meas_date"] = None + bti_info["processes"] = list() # browse processing info for filter specs. - hp, lp = info['highpass'], info['lowpass'] - for proc in bti_info['processes']: - if 'filt' in proc['process_type']: - for step in proc['processing_steps']: - if 'high_freq' in step: - hp, lp = step['high_freq'], step['low_freq'] - elif 'hp' in step['process_type']: - hp = step['freq'] - elif 'lp' in step['process_type']: - lp = step['freq'] - - info['highpass'] = hp - info['lowpass'] = lp + hp, lp = info["highpass"], info["lowpass"] + for proc in bti_info["processes"]: + if "filt" in proc["process_type"]: + for step in proc["processing_steps"]: + if "high_freq" in step: + hp, lp = step["high_freq"], step["low_freq"] + elif "hp" in step["process_type"]: + hp = step["freq"] + elif "lp" in step["process_type"]: + lp = step["freq"] + + info["highpass"] = hp + info["lowpass"] = lp chs = [] # Note that 'name' and 'chan_label' are not the same. # We want the configured label if out IO parsed it # except for the MEG channels for which we keep the config name bti_ch_names = list() - for ch in bti_info['chs']: + for ch in bti_info["chs"]: # we have always relied on 'A' as indicator of MEG data channels. - ch_name = ch['name'] - if not ch_name.startswith('A'): - ch_name = ch.get('chan_label', ch_name) + ch_name = ch["name"] + if not ch_name.startswith("A"): + ch_name = ch.get("chan_label", ch_name) bti_ch_names.append(ch_name) - neuromag_ch_names = _rename_channels( - bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch) + neuromag_ch_names = _rename_channels(bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch) ch_mapping = zip(bti_ch_names, neuromag_ch_names) - logger.info('... Setting channel info structure.') + logger.info("... Setting channel info structure.") for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping): chan_info = _instantiate_default_info_chs() - chan_info['ch_name'] = chan_neuromag if rename_channels else chan_4d - chan_info['logno'] = idx + BTI.FIFF_LOGNO - chan_info['scanno'] = idx + 1 - chan_info['cal'] = float(bti_info['chs'][idx]['scale']) + chan_info["ch_name"] = chan_neuromag if rename_channels else chan_4d + chan_info["logno"] = idx + BTI.FIFF_LOGNO + chan_info["scanno"] = idx + 1 + chan_info["cal"] = float(bti_info["chs"][idx]["scale"]) - if any(chan_4d.startswith(k) for k in ('A', 'M', 'G')): - loc = bti_info['chs'][idx]['loc'] + if any(chan_4d.startswith(k) for k in ("A", "M", "G")): + loc = bti_info["chs"][idx]["loc"] if loc is not None: if convert: if idx == 0: - logger.info('... putting coil transforms in Neuromag ' - 'coordinates') - t = _loc_to_coil_trans(bti_info['chs'][idx]['loc']) + logger.info( + "... putting coil transforms in Neuromag coordinates" + ) + t = _loc_to_coil_trans(bti_info["chs"][idx]["loc"]) t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t) loc = _coil_trans_to_loc(t) - chan_info['loc'] = loc + chan_info["loc"] = loc # BTI sensors are natively stored in 4D head coords we believe - meg_frame = (FIFF.FIFFV_COORD_DEVICE if convert else - FIFF.FIFFV_MNE_COORD_4D_HEAD) - eeg_frame = (FIFF.FIFFV_COORD_HEAD if convert else - FIFF.FIFFV_MNE_COORD_4D_HEAD) - if chan_4d.startswith('A'): - chan_info['kind'] = FIFF.FIFFV_MEG_CH - chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG - chan_info['coord_frame'] = meg_frame - chan_info['unit'] = FIFF.FIFF_UNIT_T - - elif chan_4d.startswith('M'): - chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH - chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_MAG - chan_info['coord_frame'] = meg_frame - chan_info['unit'] = FIFF.FIFF_UNIT_T - - elif chan_4d.startswith('G'): - chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH - chan_info['coord_frame'] = meg_frame - chan_info['unit'] = FIFF.FIFF_UNIT_T_M - if chan_4d in ('GxxA', 'GyyA'): - chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD - elif chan_4d in ('GyxA', 'GzxA', 'GzyA'): - chan_info['coil_type'] = \ - FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD - - elif chan_4d.startswith('EEG') or _eeg_like(chan_4d): - chan_info['kind'] = FIFF.FIFFV_EEG_CH - chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG - chan_info['coord_frame'] = eeg_frame - chan_info['unit'] = FIFF.FIFF_UNIT_V + meg_frame = FIFF.FIFFV_COORD_DEVICE if convert else FIFF.FIFFV_MNE_COORD_4D_HEAD + eeg_frame = FIFF.FIFFV_COORD_HEAD if convert else FIFF.FIFFV_MNE_COORD_4D_HEAD + if chan_4d.startswith("A"): + chan_info["kind"] = FIFF.FIFFV_MEG_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_MAG + chan_info["coord_frame"] = meg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_T + + elif chan_4d.startswith("M"): + chan_info["kind"] = FIFF.FIFFV_REF_MEG_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_MAG + chan_info["coord_frame"] = meg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_T + + elif chan_4d.startswith("G"): + chan_info["kind"] = FIFF.FIFFV_REF_MEG_CH + chan_info["coord_frame"] = meg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_T_M + if chan_4d in ("GxxA", "GyyA"): + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD + elif chan_4d in ("GyxA", "GzxA", "GzyA"): + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD + + elif chan_4d.startswith("EEG") or _eeg_like(chan_4d): + chan_info["kind"] = FIFF.FIFFV_EEG_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_EEG + chan_info["coord_frame"] = eeg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_V # TODO: We should use 'electrodes' to fill this in, and make sure # we turn them into dig as well - chan_info['loc'][:3] = np.nan - - elif chan_4d == 'RESPONSE': - chan_info['kind'] = FIFF.FIFFV_STIM_CH - elif chan_4d == 'TRIGGER': - chan_info['kind'] = FIFF.FIFFV_STIM_CH - elif chan_4d.startswith('EOG') or \ - chan_4d[:4] in ('HEOG', 'VEOG') or chan_4d in eog_ch: - chan_info['kind'] = FIFF.FIFFV_EOG_CH - elif chan_4d.startswith('EMG'): - chan_info['kind'] = FIFF.FIFFV_EMG_CH - elif chan_4d == ecg_ch or chan_4d.startswith('ECG'): - chan_info['kind'] = FIFF.FIFFV_ECG_CH + chan_info["loc"][:3] = np.nan + + elif chan_4d == "RESPONSE": + chan_info["kind"] = FIFF.FIFFV_STIM_CH + elif chan_4d == "TRIGGER": + chan_info["kind"] = FIFF.FIFFV_STIM_CH + elif ( + chan_4d.startswith("EOG") + or chan_4d[:4] in ("HEOG", "VEOG") + or chan_4d in eog_ch + ): + chan_info["kind"] = FIFF.FIFFV_EOG_CH + elif chan_4d.startswith("EMG"): + chan_info["kind"] = FIFF.FIFFV_EMG_CH + elif chan_4d == ecg_ch or chan_4d.startswith("ECG"): + chan_info["kind"] = FIFF.FIFFV_ECG_CH # Our default is now misc, but if we ever change that, # we'll need this: # elif chan_4d.startswith('X') or chan_4d == 'UACurrent': @@ -1229,17 +1308,19 @@ def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x, chs.append(chan_info) - info['chs'] = chs + info["chs"] = chs # ### Dig stuff info = _make_bti_digitization( - info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t) + info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t + ) logger.info( - 'Currently direct inclusion of 4D weight tables is not supported.' - ' For critical use cases please take into account the MNE command' + "Currently direct inclusion of 4D weight tables is not supported." + " For critical use cases please take into account the MNE command" ' "mne_create_comp_data" to include weights as printed out by ' - 'the 4D "print_table" routine.') + 'the 4D "print_table" routine.' + ) # check that the info is complete info._unlocked = False @@ -1249,12 +1330,20 @@ def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x, @verbose -def read_raw_bti(pdf_fname, config_fname='config', - head_shape_fname='hs_file', rotation_x=0., - translation=(0.0, 0.02, 0.11), convert=True, - rename_channels=True, sort_by_ch_name=True, - ecg_ch='E31', eog_ch=('E63', 'E64'), preload=False, - verbose=None): +def read_raw_bti( + pdf_fname, + config_fname="config", + head_shape_fname="hs_file", + rotation_x=0.0, + translation=(0.0, 0.02, 0.11), + convert=True, + rename_channels=True, + sort_by_ch_name=True, + ecg_ch="E31", + eog_ch=("E63", "E64"), + preload=False, + verbose=None, +) -> RawBTi: """Raw object from 4D Neuroimaging MagnesWH3600 data. .. note:: @@ -1309,9 +1398,17 @@ def read_raw_bti(pdf_fname, config_fname='config', -------- mne.io.Raw : Documentation of attributes and methods of RawBTi. """ - return RawBTi(pdf_fname, config_fname=config_fname, - head_shape_fname=head_shape_fname, - rotation_x=rotation_x, translation=translation, - convert=convert, rename_channels=rename_channels, - sort_by_ch_name=sort_by_ch_name, ecg_ch=ecg_ch, - eog_ch=eog_ch, preload=preload, verbose=verbose) + return RawBTi( + pdf_fname, + config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, + translation=translation, + convert=convert, + rename_channels=rename_channels, + sort_by_ch_name=sort_by_ch_name, + ecg_ch=ecg_ch, + eog_ch=eog_ch, + preload=preload, + verbose=verbose, + ) diff --git a/mne/io/bti/constants.py b/mne/io/bti/constants.py index ca09e449af4..d135dae6e60 100644 --- a/mne/io/bti/constants.py +++ b/mne/io/bti/constants.py @@ -1,99 +1,99 @@ -# Authors: Denis Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import BunchConst BTI = BunchConst() -BTI.ELEC_STATE_NOT_COLLECTED = 0 -BTI.ELEC_STATE_COLLECTED = 1 -BTI.ELEC_STATE_SKIPPED = 2 -BTI.ELEC_STATE_NOT_APPLICABLE = 3 +BTI.ELEC_STATE_NOT_COLLECTED = 0 +BTI.ELEC_STATE_COLLECTED = 1 +BTI.ELEC_STATE_SKIPPED = 2 +BTI.ELEC_STATE_NOT_APPLICABLE = 3 # ## Byte offesets and data sizes for different files # -BTI.FILE_MASK = 2147483647 -BTI.FILE_CURPOS = 8 -BTI.FILE_END = -8 +BTI.FILE_MASK = 2147483647 +BTI.FILE_CURPOS = 8 +BTI.FILE_END = -8 -BTI.FILE_HS_VERSION = 0 -BTI.FILE_HS_TIMESTAMP = 4 -BTI.FILE_HS_CHECKSUM = 8 -BTI.FILE_HS_N_DIGPOINTS = 12 -BTI.FILE_HS_N_INDEXPOINTS = 16 +BTI.FILE_HS_VERSION = 0 +BTI.FILE_HS_TIMESTAMP = 4 +BTI.FILE_HS_CHECKSUM = 8 +BTI.FILE_HS_N_DIGPOINTS = 12 +BTI.FILE_HS_N_INDEXPOINTS = 16 -BTI.FILE_PDF_H_ENTER = 1 -BTI.FILE_PDF_H_FTYPE = 5 -BTI.FILE_PDF_H_XLABEL = 16 -BTI.FILE_PDF_H_NEXT = 2 -BTI.FILE_PDF_H_EXIT = 20 +BTI.FILE_PDF_H_ENTER = 1 +BTI.FILE_PDF_H_FTYPE = 5 +BTI.FILE_PDF_H_XLABEL = 16 +BTI.FILE_PDF_H_NEXT = 2 +BTI.FILE_PDF_H_EXIT = 20 -BTI.FILE_PDF_EPOCH_EXIT = 28 +BTI.FILE_PDF_EPOCH_EXIT = 28 -BTI.FILE_PDF_CH_NEXT = 6 -BTI.FILE_PDF_CH_LABELSIZE = 16 -BTI.FILE_PDF_CH_YLABEL = 16 -BTI.FILE_PDF_CH_OFF_FLAG = 16 -BTI.FILE_PDF_CH_EXIT = 12 +BTI.FILE_PDF_CH_NEXT = 6 +BTI.FILE_PDF_CH_LABELSIZE = 16 +BTI.FILE_PDF_CH_YLABEL = 16 +BTI.FILE_PDF_CH_OFF_FLAG = 16 +BTI.FILE_PDF_CH_EXIT = 12 -BTI.FILE_PDF_EVENT_NAME = 16 -BTI.FILE_PDF_EVENT_EXIT = 32 +BTI.FILE_PDF_EVENT_NAME = 16 +BTI.FILE_PDF_EVENT_EXIT = 32 -BTI.FILE_PDF_PROCESS_BLOCKTYPE = 20 -BTI.FILE_PDF_PROCESS_USER = 32 -BTI.FILE_PDF_PROCESS_FNAME = 256 -BTI.FILE_PDF_PROCESS_EXIT = 32 +BTI.FILE_PDF_PROCESS_BLOCKTYPE = 20 +BTI.FILE_PDF_PROCESS_USER = 32 +BTI.FILE_PDF_PROCESS_FNAME = 256 +BTI.FILE_PDF_PROCESS_EXIT = 32 -BTI.FILE_PDF_ASSOC_NEXT = 32 +BTI.FILE_PDF_ASSOC_NEXT = 32 -BTI.FILE_PDFED_NAME = 17 -BTI.FILE_PDFED_NEXT = 9 -BTI.FILE_PDFED_EXIT = 8 +BTI.FILE_PDFED_NAME = 17 +BTI.FILE_PDFED_NEXT = 9 +BTI.FILE_PDFED_EXIT = 8 # ## General data constants # -BTI.DATA_N_IDX_POINTS = 5 -BTI.DATA_ROT_N_ROW = 3 -BTI.DATA_ROT_N_COL = 3 -BTI.DATA_XFM_N_COL = 4 -BTI.DATA_XFM_N_ROW = 4 -BTI.FIFF_LOGNO = 111 +BTI.DATA_N_IDX_POINTS = 5 +BTI.DATA_ROT_N_ROW = 3 +BTI.DATA_ROT_N_COL = 3 +BTI.DATA_XFM_N_COL = 4 +BTI.DATA_XFM_N_ROW = 4 +BTI.FIFF_LOGNO = 111 # ## Channel Types # -BTI.CHTYPE_MEG = 1 -BTI.CHTYPE_EEG = 2 -BTI.CHTYPE_REFERENCE = 3 -BTI.CHTYPE_EXTERNAL = 4 -BTI.CHTYPE_TRIGGER = 5 -BTI.CHTYPE_UTILITY = 6 -BTI.CHTYPE_DERIVED = 7 -BTI.CHTYPE_SHORTED = 8 +BTI.CHTYPE_MEG = 1 +BTI.CHTYPE_EEG = 2 +BTI.CHTYPE_REFERENCE = 3 +BTI.CHTYPE_EXTERNAL = 4 +BTI.CHTYPE_TRIGGER = 5 +BTI.CHTYPE_UTILITY = 6 +BTI.CHTYPE_DERIVED = 7 +BTI.CHTYPE_SHORTED = 8 # ## Processes # -BTI.PROC_DEFAULTS = 'BTi_defaults' -BTI.PROC_FILTER = 'b_filt_hp,b_filt_lp,b_filt_notch' -BTI.PROC_BPFILTER = 'b_filt_b_pass,b_filt_b_reject' +BTI.PROC_DEFAULTS = "BTi_defaults" +BTI.PROC_FILTER = "b_filt_hp,b_filt_lp,b_filt_notch" +BTI.PROC_BPFILTER = "b_filt_b_pass,b_filt_b_reject" # ## User blocks # -BTI.UB_B_MAG_INFO = 'B_Mag_Info' -BTI.UB_B_COH_POINTS = 'B_COH_Points' -BTI.UB_B_CCP_XFM_BLOCK = 'b_ccp_xfm_block' -BTI.UB_B_EEG_LOCS = 'b_eeg_elec_locs' -BTI.UB_B_WHC_CHAN_MAP_VER = 'B_WHChanMapVer' -BTI.UB_B_WHC_CHAN_MAP = 'B_WHChanMap' -BTI.UB_B_WHS_SUBSYS_VER = 'B_WHSubsysVer' # B_WHSubsysVer -BTI.UB_B_WHS_SUBSYS = 'B_WHSubsys' -BTI.UB_B_CH_LABELS = 'B_ch_labels' -BTI.UB_B_CALIBRATION = 'B_Calibration' -BTI.UB_B_SYS_CONFIG_TIME = 'B_SysConfigTime' -BTI.UB_B_DELTA_ENABLED = 'B_DELTA_ENABLED' -BTI.UB_B_E_TABLE_USED = 'B_E_table_used' -BTI.UB_B_E_TABLE = 'B_E_TABLE' -BTI.UB_B_WEIGHTS_USED = 'B_weights_used' -BTI.UB_B_TRIG_MASK = 'B_trig_mask' -BTI.UB_B_WEIGHT_TABLE = 'BWT_' +BTI.UB_B_MAG_INFO = "B_Mag_Info" +BTI.UB_B_COH_POINTS = "B_COH_Points" +BTI.UB_B_CCP_XFM_BLOCK = "b_ccp_xfm_block" +BTI.UB_B_EEG_LOCS = "b_eeg_elec_locs" +BTI.UB_B_WHC_CHAN_MAP_VER = "B_WHChanMapVer" +BTI.UB_B_WHC_CHAN_MAP = "B_WHChanMap" +BTI.UB_B_WHS_SUBSYS_VER = "B_WHSubsysVer" # B_WHSubsysVer +BTI.UB_B_WHS_SUBSYS = "B_WHSubsys" +BTI.UB_B_CH_LABELS = "B_ch_labels" +BTI.UB_B_CALIBRATION = "B_Calibration" +BTI.UB_B_SYS_CONFIG_TIME = "B_SysConfigTime" +BTI.UB_B_DELTA_ENABLED = "B_DELTA_ENABLED" +BTI.UB_B_E_TABLE_USED = "B_E_table_used" +BTI.UB_B_E_TABLE = "B_E_TABLE" +BTI.UB_B_WEIGHTS_USED = "B_weights_used" +BTI.UB_B_TRIG_MASK = "B_trig_mask" +BTI.UB_B_WEIGHT_TABLE = "BWT_" diff --git a/mne/io/bti/read.py b/mne/io/bti/read.py index 210ff827992..abf4b72cd9e 100644 --- a/mne/io/bti/read.py +++ b/mne/io/bti/read.py @@ -1,9 +1,10 @@ -# Authors: Denis A. Engemann -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from ..utils import read_str +from ..._fiff.utils import read_str def _unpack_matrix(fid, rows, cols, dtype, out_dtype): @@ -11,8 +12,7 @@ def _unpack_matrix(fid, rows, cols, dtype, out_dtype): dtype = np.dtype(dtype) string = fid.read(int(dtype.itemsize * rows * cols)) - out = np.frombuffer(string, dtype=dtype).reshape( - rows, cols).astype(out_dtype) + out = np.frombuffer(string, dtype=dtype).reshape(rows, cols).astype(out_dtype) return out @@ -29,80 +29,63 @@ def _unpack_simple(fid, dtype, out_dtype): def read_char(fid, count=1): """Read character from bti file.""" - return _unpack_simple(fid, '>S%s' % count, 'S') - - -def read_bool(fid): - """Read bool value from bti file.""" - return _unpack_simple(fid, '>?', bool) - - -def read_uint8(fid): - """Read unsigned 8bit integer from bti file.""" - return _unpack_simple(fid, '>u1', np.uint8) - - -def read_int8(fid): - """Read 8bit integer from bti file.""" - return _unpack_simple(fid, '>i1', np.int8) + return _unpack_simple(fid, f">S{count}", "S") def read_uint16(fid): """Read unsigned 16bit integer from bti file.""" - return _unpack_simple(fid, '>u2', np.uint16) + return _unpack_simple(fid, ">u2", np.uint32) def read_int16(fid): """Read 16bit integer from bti file.""" - return _unpack_simple(fid, '>i2', np.int16) + return _unpack_simple(fid, ">i2", np.int32) def read_uint32(fid): """Read unsigned 32bit integer from bti file.""" - return _unpack_simple(fid, '>u4', np.uint32) + return _unpack_simple(fid, ">u4", np.uint32) def read_int32(fid): """Read 32bit integer from bti file.""" - return _unpack_simple(fid, '>i4', np.int32) - - -def read_uint64(fid): - """Read unsigned 64bit integer from bti file.""" - return _unpack_simple(fid, '>u8', np.uint64) + return _unpack_simple(fid, ">i4", np.int32) def read_int64(fid): """Read 64bit integer from bti file.""" - return _unpack_simple(fid, '>u8', np.int64) + return _unpack_simple(fid, ">u8", np.int64) def read_float(fid): """Read 32bit float from bti file.""" - return _unpack_simple(fid, '>f4', np.float32) + return _unpack_simple(fid, ">f4", np.float32) def read_double(fid): """Read 64bit float from bti file.""" - return _unpack_simple(fid, '>f8', np.float64) + return _unpack_simple(fid, ">f8", np.float64) def read_int16_matrix(fid, rows, cols): """Read 16bit integer matrix from bti file.""" - return _unpack_matrix(fid, rows, cols, dtype='>i2', - out_dtype=np.int16) + return _unpack_matrix( + fid, + rows, + cols, + dtype=">i2", + out_dtype=np.int32, + ) def read_float_matrix(fid, rows, cols): """Read 32bit float matrix from bti file.""" - return _unpack_matrix(fid, rows, cols, dtype='>f4', - out_dtype=np.float32) + return _unpack_matrix(fid, rows, cols, dtype=">f4", out_dtype=np.float32) def read_double_matrix(fid, rows, cols): """Read 64bit float matrix from bti file.""" - return _unpack_matrix(fid, rows, cols, dtype='>f8', - out_dtype=np.float64) + return _unpack_matrix(fid, rows, cols, dtype=">f8", out_dtype=np.float64) def read_transform(fid): @@ -112,5 +95,4 @@ def read_transform(fid): def read_dev_header(x): """Create a dev header.""" - return dict(size=read_int32(x), checksum=read_int32(x), - reserved=read_str(x, 32)) + return dict(size=read_int32(x), checksum=read_int32(x), reserved=read_str(x, 32)) diff --git a/mne/io/bti/tests/__init__.py b/mne/io/bti/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/bti/tests/__init__.py +++ b/mne/io/bti/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/bti/tests/test_bti.py b/mne/io/bti/tests/test_bti.py index 0df08917bc6..cc595833275 100644 --- a/mne/io/bti/tests/test_bti.py +++ b/mne/io/bti/tests/test_bti.py @@ -1,33 +1,43 @@ -# Authors: Denis Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os from collections import Counter +from functools import partial, reduce from io import BytesIO -from functools import reduce, partial from pathlib import Path import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_allclose, assert_equal) import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) import mne +from mne import pick_info, pick_types +from mne._fiff._digitization import _make_bti_dig_points +from mne._fiff.constants import FIFF from mne.datasets import testing -from mne.io import read_raw_fif, read_raw_bti -from mne.io._digitization import _make_bti_dig_points -from mne.io.bti.bti import (_read_config, _read_head_shape, - _read_bti_header, _get_bti_dev_t, - _correct_trans, _get_bti_info, - _loc_to_coil_trans, _convert_coil_trans, - _check_nan_dev_head_t, _rename_channels) +from mne.io import read_raw_bti, read_raw_fif +from mne.io.bti.bti import ( + _check_nan_dev_head_t, + _convert_coil_trans, + _correct_trans, + _get_bti_dev_t, + _get_bti_info, + _loc_to_coil_trans, + _read_bti_header, + _read_config, + _read_head_shape, + _rename_channels, +) from mne.io.tests.test_raw import _test_raw_reader -from mne.io.pick import pick_info -from mne.io.constants import FIFF -from mne import pick_types -from mne.utils import assert_dig_allclose from mne.transforms import Transform, combine_transforms, invert_transform +from mne.utils import assert_dig_allclose base_dir = Path(__file__).parent / "data" @@ -36,7 +46,9 @@ config_fnames = [base_dir / f"test_config_{a}" for a in archs] hs_fnames = [base_dir / f"test_hs_{a}" for a in archs] exported_fnames = [base_dir / f"exported4D_{a}_raw.fif" for a in archs] -tmp_raw_fname = base_dir / "tmp_raw.fif" +pdf_config_hs_exporteds = list( + zip(pdf_fnames, config_fnames, hs_fnames, exported_fnames) +) testing_path_bti = testing.data_path(download=False) / "BTi" fname_2500 = testing_path_bti / "erm_HFH" / "c,rfDC" @@ -55,21 +67,25 @@ def test_read_2500(): def test_no_loc_none(monkeypatch): """Test that we don't set loc to None when no trans is found.""" - ch_name = 'MLzA' + ch_name = "MLzA" def _read_config_bad(*args, **kwargs): cfg = _read_config(*args, **kwargs) - idx = [ch['name'] for ch in cfg['chs']].index(ch_name) - del cfg['chs'][idx]['dev']['transform'] + idx = [ch["name"] for ch in cfg["chs"]].index(ch_name) + del cfg["chs"][idx]["dev"]["transform"] return cfg - monkeypatch.setattr(mne.io.bti.bti, '_read_config', _read_config_bad) - kwargs = dict(pdf_fname=pdf_fnames[0], config_fname=config_fnames[0], - head_shape_fname=hs_fnames[0], rename_channels=False, - sort_by_ch_name=False) + monkeypatch.setattr(mne.io.bti.bti, "_read_config", _read_config_bad) + kwargs = dict( + pdf_fname=pdf_fnames[0], + config_fname=config_fnames[0], + head_shape_fname=hs_fnames[0], + rename_channels=False, + sort_by_ch_name=False, + ) raw = read_raw_bti(**kwargs) idx = raw.ch_names.index(ch_name) - assert_allclose(raw.info['chs'][idx]['loc'], np.full(12, np.nan)) + assert_allclose(raw.info["chs"][idx]["loc"], np.full(12, np.nan)) def test_read_config(): @@ -77,298 +93,340 @@ def test_read_config(): # for config in config_fname, config_solaris_fname: for config in config_fnames: cfg = _read_config(config) - assert all('unknown' not in block.lower() and block != '' - for block in cfg['user_blocks']) + assert all( + "unknown" not in block.lower() and block != "" + for block in cfg["user_blocks"] + ) def test_crop_append(): """Test crop and append raw.""" raw = _test_raw_reader( - read_raw_bti, pdf_fname=pdf_fnames[0], - config_fname=config_fnames[0], head_shape_fname=hs_fnames[0]) + read_raw_bti, + pdf_fname=pdf_fnames[0], + config_fname=config_fnames[0], + head_shape_fname=hs_fnames[0], + ) y, t = raw[:] t0, t1 = 0.25 * t[-1], 0.75 * t[-1] mask = (t0 <= t) * (t <= t1) raw_ = raw.copy().crop(t0, t1) y_, _ = raw_[:] - assert (y_.shape[1] == mask.sum()) - assert (y_.shape[0] == y.shape[0]) + assert y_.shape[1] == mask.sum() + assert y_.shape[0] == y.shape[0] -def test_transforms(): +@pytest.mark.parametrize("pdf, config, hs, exported", pdf_config_hs_exporteds) +def test_transforms(pdf, config, hs, exported): """Test transformations.""" bti_trans = (0.0, 0.02, 0.11) - bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans)) - for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames): - raw = read_raw_bti(pdf, config, hs, preload=False) - dev_ctf_t = raw.info['dev_ctf_t'] - dev_head_t_old = raw.info['dev_head_t'] - ctf_head_t = raw.info['ctf_head_t'] + bti_dev_t = Transform("ctf_meg", "meg", _get_bti_dev_t(0.0, bti_trans)) + raw = read_raw_bti(pdf, config, hs, preload=False) + dev_ctf_t = raw.info["dev_ctf_t"] + dev_head_t_old = raw.info["dev_head_t"] + ctf_head_t = raw.info["ctf_head_t"] - # 1) get BTI->Neuromag - bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans)) + # 1) get BTI->Neuromag + bti_dev_t = Transform("ctf_meg", "meg", _get_bti_dev_t(0.0, bti_trans)) - # 2) get Neuromag->BTI head - t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t, - 'meg', 'ctf_head') - # 3) get Neuromag->head - dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head') + # 2) get Neuromag->BTI head + t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t, "meg", "ctf_head") + # 3) get Neuromag->head + dev_head_t_new = combine_transforms(t, ctf_head_t, "meg", "head") - assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans']) + assert_array_equal(dev_head_t_new["trans"], dev_head_t_old["trans"]) @pytest.mark.slowtest -def test_raw(): +@pytest.mark.parametrize("pdf, config, hs, exported", pdf_config_hs_exporteds) +def test_raw(pdf, config, hs, exported, tmp_path): """Test bti conversion to Raw object.""" - for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames, - exported_fnames): - # rx = 2 if 'linux' in pdf else 0 - pytest.raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False) - pytest.raises(ValueError, read_raw_bti, pdf, config, 'spam', - preload=False) - if tmp_raw_fname.exists(): - os.remove(tmp_raw_fname) - ex = read_raw_fif(exported, preload=True) - ra = read_raw_bti(pdf, config, hs, preload=False) - assert ('RawBTi' in repr(ra)) - assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH]) - assert_array_almost_equal(ex.info['dev_head_t']['trans'], - ra.info['dev_head_t']['trans'], 7) - assert len(ex.info['dig']) in (3563, 5154) - assert_dig_allclose(ex.info, ra.info, limit=100) - coil1, coil2 = [np.concatenate([d['loc'].flatten() - for d in r_.info['chs'][:NCH]]) - for r_ in (ra, ex)] - assert_array_almost_equal(coil1, coil2, 7) - - loc1, loc2 = [np.concatenate([d['loc'].flatten() - for d in r_.info['chs'][:NCH]]) - for r_ in (ra, ex)] - assert_allclose(loc1, loc2) - - assert_allclose(ra[:NCH][0], ex[:NCH][0]) - assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]], - [c['range'] for c in ex.info['chs'][:NCH]]) - assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]], - [c['cal'] for c in ex.info['chs'][:NCH]]) - assert_array_equal(ra._cals[:NCH], ex._cals[:NCH]) - - # check our transforms - for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): - if ex.info[key] is None: - pass - else: - assert (ra.info[key] is not None) - for ent in ('to', 'from', 'trans'): - assert_allclose(ex.info[key][ent], - ra.info[key][ent]) - - ra.save(tmp_raw_fname) - re = read_raw_fif(tmp_raw_fname) - print(re) - for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): - assert (isinstance(re.info[key], dict)) - this_t = re.info[key]['trans'] - assert_equal(this_t.shape, (4, 4)) - # check that matrix by is not identity - assert (not np.allclose(this_t, np.eye(4))) - os.remove(tmp_raw_fname) - - -def test_info_no_rename_no_reorder_no_pdf(): + # rx = 2 if 'linux' in pdf else 0 + pytest.raises(ValueError, read_raw_bti, pdf, "eggs", preload=False) + pytest.raises(ValueError, read_raw_bti, pdf, config, "spam", preload=False) + tmp_raw_fname = tmp_path / "tmp_raw.fif" + ex = read_raw_fif(exported, preload=True) + ra = read_raw_bti(pdf, config, hs, preload=False) + assert "RawBTi" in repr(ra) + assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH]) + assert_array_almost_equal( + ex.info["dev_head_t"]["trans"], ra.info["dev_head_t"]["trans"], 7 + ) + assert len(ex.info["dig"]) in (3563, 5154) + assert_dig_allclose(ex.info, ra.info, limit=100) + coil1, coil2 = ( + np.concatenate([d["loc"].flatten() for d in r_.info["chs"][:NCH]]) + for r_ in (ra, ex) + ) + assert_array_almost_equal(coil1, coil2, 7) + + loc1, loc2 = ( + np.concatenate([d["loc"].flatten() for d in r_.info["chs"][:NCH]]) + for r_ in (ra, ex) + ) + assert_allclose(loc1, loc2) + + assert_allclose(ra[:NCH][0], ex[:NCH][0]) + assert_array_equal( + [c["range"] for c in ra.info["chs"][:NCH]], + [c["range"] for c in ex.info["chs"][:NCH]], + ) + assert_array_equal( + [c["cal"] for c in ra.info["chs"][:NCH]], + [c["cal"] for c in ex.info["chs"][:NCH]], + ) + assert_array_equal(ra._cals[:NCH], ex._cals[:NCH]) + + # check our transforms + for key in ("dev_head_t", "dev_ctf_t", "ctf_head_t"): + if ex.info[key] is None: + pass + else: + assert ra.info[key] is not None + for ent in ("to", "from", "trans"): + assert_allclose(ex.info[key][ent], ra.info[key][ent]) + + # MNE-BIDS needs these + for key in ("pdf_fname", "config_fname", "head_shape_fname"): + assert os.path.isfile(ra._raw_extras[0][key]) + + ra.save(tmp_raw_fname) + re = read_raw_fif(tmp_raw_fname) + print(re) + for key in ("dev_head_t", "dev_ctf_t", "ctf_head_t"): + assert isinstance(re.info[key], dict) + this_t = re.info[key]["trans"] + assert_equal(this_t.shape, (4, 4)) + # check that matrix by is not identity + assert not np.allclose(this_t, np.eye(4)) + + +@pytest.mark.parametrize("pdf, config, hs, exported", pdf_config_hs_exporteds) +def test_info_no_rename_no_reorder_no_pdf(pdf, config, hs, exported): """Test private renaming, reordering and partial construction option.""" - for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): - info, bti_info = _get_bti_info( - pdf_fname=pdf, config_fname=config, head_shape_fname=hs, - rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, - ecg_ch='E31', eog_ch=('E63', 'E64'), - rename_channels=False, sort_by_ch_name=False) - info2, bti_info = _get_bti_info( - pdf_fname=None, config_fname=config, head_shape_fname=hs, - rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, - ecg_ch='E31', eog_ch=('E63', 'E64'), - rename_channels=False, sort_by_ch_name=False) - - assert_equal(info['ch_names'], - [ch['ch_name'] for ch in info['chs']]) - assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5], - ['A22', 'A2', 'A104', 'A241', 'A138']) - assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:], - ['A133', 'A158', 'A44', 'A134', 'A216']) - - info = pick_info(info, pick_types(info, meg=True, stim=True, - resp=True)) - info2 = pick_info(info2, pick_types(info2, meg=True, stim=True, - resp=True)) - - assert (info['sfreq'] is not None) - assert (info['lowpass'] is not None) - assert (info['highpass'] is not None) - assert (info['meas_date'] is not None) - - assert_equal(info2['sfreq'], None) - assert_equal(info2['lowpass'], None) - assert_equal(info2['highpass'], None) - assert_equal(info2['meas_date'], None) - - assert_equal(info['ch_names'], info2['ch_names']) - assert_equal(info['ch_names'], info2['ch_names']) - for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']: - assert_array_equal(info[key]['trans'], info2[key]['trans']) - - assert_array_equal( - np.array([ch['loc'] for ch in info['chs']]), - np.array([ch['loc'] for ch in info2['chs']])) + info, bti_info = _get_bti_info( + pdf_fname=pdf, + config_fname=config, + head_shape_fname=hs, + rotation_x=0.0, + translation=(0.0, 0.02, 0.11), + convert=False, + ecg_ch="E31", + eog_ch=("E63", "E64"), + rename_channels=False, + sort_by_ch_name=False, + ) + info2, bti_info = _get_bti_info( + pdf_fname=None, + config_fname=config, + head_shape_fname=hs, + rotation_x=0.0, + translation=(0.0, 0.02, 0.11), + convert=False, + ecg_ch="E31", + eog_ch=("E63", "E64"), + rename_channels=False, + sort_by_ch_name=False, + ) + + assert_equal(info["ch_names"], [ch["ch_name"] for ch in info["chs"]]) + assert_equal( + [n for n in info["ch_names"] if n.startswith("A")][:5], + ["A22", "A2", "A104", "A241", "A138"], + ) + assert_equal( + [n for n in info["ch_names"] if n.startswith("A")][-5:], + ["A133", "A158", "A44", "A134", "A216"], + ) + + info = pick_info(info, pick_types(info, meg=True, stim=True, resp=True)) + info2 = pick_info(info2, pick_types(info2, meg=True, stim=True, resp=True)) + + assert info["sfreq"] is not None + assert info["lowpass"] is not None + assert info["highpass"] is not None + assert info["meas_date"] is not None + + assert_equal(info2["sfreq"], None) + assert_equal(info2["lowpass"], None) + assert_equal(info2["highpass"], None) + assert_equal(info2["meas_date"], None) + + assert_equal(info["ch_names"], info2["ch_names"]) + assert_equal(info["ch_names"], info2["ch_names"]) + for key in ["dev_ctf_t", "dev_head_t", "ctf_head_t"]: + assert_array_equal(info[key]["trans"], info2[key]["trans"]) + + assert_array_equal( + np.array([ch["loc"] for ch in info["chs"]]), + np.array([ch["loc"] for ch in info2["chs"]]), + ) # just check reading data | corner case raw1 = read_raw_bti( - pdf_fname=pdf, config_fname=config, head_shape_fname=None, - sort_by_ch_name=False, preload=True) + pdf_fname=pdf, + config_fname=config, + head_shape_fname=None, + sort_by_ch_name=False, + preload=True, + ) # just check reading data | corner case raw2 = read_raw_bti( - pdf_fname=pdf, config_fname=config, head_shape_fname=None, + pdf_fname=pdf, + config_fname=config, + head_shape_fname=None, rename_channels=False, - sort_by_ch_name=True, preload=True) + sort_by_ch_name=True, + preload=True, + ) - sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels] + bti_ch_labels_1 = raw1._raw_extras[0]["bti_ch_labels"] + bti_ch_labels_2 = raw2._raw_extras[0]["bti_ch_labels"] + sort_idx = [bti_ch_labels_1.index(ch) for ch in bti_ch_labels_2] raw1._data = raw1._data[sort_idx] assert_array_equal(raw1._data, raw2._data) - assert_array_equal(raw2.bti_ch_labels, raw2.ch_names) + assert_array_equal(bti_ch_labels_2, raw2.ch_names) -def test_no_conversion(): +@pytest.mark.parametrize("pdf, config, hs, exported", pdf_config_hs_exporteds) +def test_no_conversion(pdf, config, hs, exported): """Test bti no-conversion option.""" get_info = partial( _get_bti_info, - rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False, - ecg_ch='E31', eog_ch=('E63', 'E64'), - rename_channels=False, sort_by_ch_name=False) - - for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): - raw_info, _ = get_info(pdf, config, hs, convert=False) - raw_info_con = read_raw_bti( - pdf_fname=pdf, config_fname=config, head_shape_fname=hs, - convert=True, preload=False).info - - pick_info(raw_info_con, - pick_types(raw_info_con, meg=True, ref_meg=True), - copy=False) - pick_info(raw_info, - pick_types(raw_info, meg=True, ref_meg=True), copy=False) - bti_info = _read_bti_header(pdf, config) - dev_ctf_t = _correct_trans(bti_info['bti_transform'][0]) - assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans']) - assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4)) - assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4)) - - nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs) - dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points, - convert=False, use_hpi=False) - - assert_array_equal(t['trans'], np.eye(4)) - - for ii, (old, new, con) in enumerate(zip( - dig, raw_info['dig'], raw_info_con['dig'])): - assert_equal(old['ident'], new['ident']) - assert_array_equal(old['r'], new['r']) - assert (not np.allclose(old['r'], con['r'])) - - if ii > 10: - break - - ch_map = {ch['chan_label']: ch['loc'] for ch in bti_info['chs']} - - for ii, ch_label in enumerate(raw_info['ch_names']): - if not ch_label.startswith('A'): - continue - t1 = ch_map[ch_label] # correction already performed in bti_info - t2 = raw_info['chs'][ii]['loc'] - t3 = raw_info_con['chs'][ii]['loc'] - assert_allclose(t1, t2, atol=1e-15) - assert (not np.allclose(t1, t3)) - idx_a = raw_info_con['ch_names'].index('MEG 001') - idx_b = raw_info['ch_names'].index('A22') - assert_equal( - raw_info_con['chs'][idx_a]['coord_frame'], - FIFF.FIFFV_COORD_DEVICE) - assert_equal( - raw_info['chs'][idx_b]['coord_frame'], - FIFF.FIFFV_MNE_COORD_4D_HEAD) - - -def test_bytes_io(): + rotation_x=0.0, + translation=(0.0, 0.02, 0.11), + convert=False, + ecg_ch="E31", + eog_ch=("E63", "E64"), + rename_channels=False, + sort_by_ch_name=False, + ) + + raw_info, _ = get_info(pdf, config, hs, convert=False) + raw_info_con = read_raw_bti( + pdf_fname=pdf, + config_fname=config, + head_shape_fname=hs, + convert=True, + preload=False, + ).info + + pick_info( + raw_info_con, pick_types(raw_info_con, meg=True, ref_meg=True), copy=False + ) + pick_info(raw_info, pick_types(raw_info, meg=True, ref_meg=True), copy=False) + bti_info = _read_bti_header(pdf, config) + dev_ctf_t = _correct_trans(bti_info["bti_transform"][0]) + assert_array_equal(dev_ctf_t, raw_info["dev_ctf_t"]["trans"]) + assert_array_equal(raw_info["dev_head_t"]["trans"], np.eye(4)) + assert_array_equal(raw_info["ctf_head_t"]["trans"], np.eye(4)) + + nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs) + dig, t, _ = _make_bti_dig_points( + nasion, lpa, rpa, hpi, dig_points, convert=False, use_hpi=False + ) + + assert_array_equal(t["trans"], np.eye(4)) + + for ii, (old, new, con) in enumerate( + zip(dig, raw_info["dig"], raw_info_con["dig"]) + ): + assert_equal(old["ident"], new["ident"]) + assert_array_equal(old["r"], new["r"]) + assert not np.allclose(old["r"], con["r"]) + + if ii > 10: + break + + ch_map = {ch["chan_label"]: ch["loc"] for ch in bti_info["chs"]} + + for ii, ch_label in enumerate(raw_info["ch_names"]): + if not ch_label.startswith("A"): + continue + t1 = ch_map[ch_label] # correction already performed in bti_info + t2 = raw_info["chs"][ii]["loc"] + t3 = raw_info_con["chs"][ii]["loc"] + assert_allclose(t1, t2, atol=1e-15) + assert not np.allclose(t1, t3) + idx_a = raw_info_con["ch_names"].index("MEG 001") + idx_b = raw_info["ch_names"].index("A22") + assert_equal(raw_info_con["chs"][idx_a]["coord_frame"], FIFF.FIFFV_COORD_DEVICE) + assert_equal( + raw_info["chs"][idx_b]["coord_frame"], FIFF.FIFFV_MNE_COORD_4D_HEAD + ) + + +@pytest.mark.parametrize("pdf, config, hs, exported", pdf_config_hs_exporteds) +def test_bytes_io(pdf, config, hs, exported): """Test bti bytes-io API.""" - for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames): - raw = read_raw_bti(pdf, config, hs, convert=True, preload=False) + raw = read_raw_bti(pdf, config, hs, convert=True, preload=False) - with open(pdf, 'rb') as fid: - pdf = BytesIO(fid.read()) - with open(config, 'rb') as fid: - config = BytesIO(fid.read()) - with open(hs, 'rb') as fid: - hs = BytesIO(fid.read()) + with open(pdf, "rb") as fid: + pdf = BytesIO(fid.read()) + with open(config, "rb") as fid: + config = BytesIO(fid.read()) + with open(hs, "rb") as fid: + hs = BytesIO(fid.read()) - raw2 = read_raw_bti(pdf, config, hs, convert=True, preload=False) - repr(raw2) - assert_array_equal(raw[:][0], raw2[:][0]) + raw2 = read_raw_bti(pdf, config, hs, convert=True, preload=False) + repr(raw2) + assert_array_equal(raw[:][0], raw2[:][0]) -def test_setup_headshape(): +@pytest.mark.parametrize("hs", hs_fnames) +def test_setup_headshape(hs): """Test reading bti headshape.""" - for hs in hs_fnames: - nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs) - dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points) + nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs) + dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points) - expected = {'kind', 'ident', 'r'} - found = set(reduce(lambda x, y: list(x) + list(y), - [d.keys() for d in dig])) - assert (not expected - found) + expected = {"kind", "ident", "r"} + found = set(reduce(lambda x, y: list(x) + list(y), [d.keys() for d in dig])) + assert not expected - found -def test_nan_trans(): +@pytest.mark.parametrize("pdf, config, hs, exported", pdf_config_hs_exporteds) +def test_nan_trans(pdf, config, hs, exported): """Test unlikely case that the device to head transform is empty.""" - for ii, pdf_fname in enumerate(pdf_fnames): - bti_info = _read_bti_header( - pdf_fname, config_fnames[ii], sort_by_ch_name=True) - - dev_ctf_t = Transform('ctf_meg', 'ctf_head', - _correct_trans(bti_info['bti_transform'][0])) - - # reading params - convert = True - rotation_x = 0. - translation = (0.0, 0.02, 0.11) - bti_dev_t = _get_bti_dev_t(rotation_x, translation) - bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t) - ecg_ch = 'E31' - eog_ch = ('E63', 'E64') - - # read parts of info to get trans - bti_ch_names = list() - for ch in bti_info['chs']: - ch_name = ch['name'] - if not ch_name.startswith('A'): - ch_name = ch.get('chan_label', ch_name) - bti_ch_names.append(ch_name) - - neuromag_ch_names = _rename_channels( - bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch) - ch_mapping = zip(bti_ch_names, neuromag_ch_names) - - # add some nan in some locations! - dev_ctf_t['trans'][:, 3] = np.nan - _check_nan_dev_head_t(dev_ctf_t) - for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping): - loc = bti_info['chs'][idx]['loc'] - if loc is not None: - if convert: - t = _loc_to_coil_trans(bti_info['chs'][idx]['loc']) - t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t) + bti_info = _read_bti_header(pdf, config, sort_by_ch_name=True) + + dev_ctf_t = Transform( + "ctf_meg", "ctf_head", _correct_trans(bti_info["bti_transform"][0]) + ) + + # reading params + convert = True + rotation_x = 0.0 + translation = (0.0, 0.02, 0.11) + bti_dev_t = _get_bti_dev_t(rotation_x, translation) + bti_dev_t = Transform("ctf_meg", "meg", bti_dev_t) + ecg_ch = "E31" + eog_ch = ("E63", "E64") + + # read parts of info to get trans + bti_ch_names = list() + for ch in bti_info["chs"]: + ch_name = ch["name"] + if not ch_name.startswith("A"): + ch_name = ch.get("chan_label", ch_name) + bti_ch_names.append(ch_name) + + neuromag_ch_names = _rename_channels(bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch) + ch_mapping = zip(bti_ch_names, neuromag_ch_names) + + # add some nan in some locations! + dev_ctf_t["trans"][:, 3] = np.nan + _check_nan_dev_head_t(dev_ctf_t) + for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping): + loc = bti_info["chs"][idx]["loc"] + if loc is not None: + if convert: + t = _loc_to_coil_trans(bti_info["chs"][idx]["loc"]) + t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t) @testing.requires_testing_data -@pytest.mark.parametrize('fname', (fname_sim, fname_sim_filt)) -@pytest.mark.parametrize('preload', (True, False)) +@pytest.mark.parametrize("fname", (fname_sim, fname_sim_filt)) +@pytest.mark.parametrize("preload", (True, False)) def test_bti_ch_data(fname, preload): """Test for gh-6048.""" read_raw_bti(fname, preload=preload) # used to fail with ascii decode err @@ -377,9 +435,9 @@ def test_bti_ch_data(fname, preload): @testing.requires_testing_data def test_bti_set_eog(): """Check that EOG channels can be set (gh-10092).""" - raw = read_raw_bti(fname_sim, - preload=False, - eog_ch=('X65', 'X67', 'X69', 'X66', 'X68')) + raw = read_raw_bti( + fname_sim, preload=False, eog_ch=("X65", "X67", "X69", "X66", "X68") + ) assert_equal(len(pick_types(raw.info, eog=True)), 5) @@ -402,17 +460,17 @@ def test_bti_ecg_eog_emg(monkeypatch): # already exist got_map = dict(zip(raw.ch_names, ch_types)) kind_map = dict( - stim=['TRIGGER', 'RESPONSE'], - misc=['UACurrent'], + stim=["TRIGGER", "RESPONSE"], + misc=["UACurrent"], ) for kind, ch_names in kind_map.items(): for ch_name in ch_names: assert got_map[ch_name] == kind kind_map = dict( - misc=['SA1', 'SA2', 'SA3'], - ecg=['ECG+', 'ECG-'], - eog=['VEOG+', 'HEOG+', 'VEOG-', 'HEOG-'], - emg=['EMG_LF', 'EMG_LH', 'EMG_RF', 'EMG_RH'], + misc=["SA1", "SA2", "SA3"], + ecg=["ECG+", "ECG-"], + eog=["VEOG+", "HEOG+", "VEOG-", "HEOG-"], + emg=["EMG_LF", "EMG_LH", "EMG_RF", "EMG_RH"], ) new_names = sum(kind_map.values(), list()) assert len(new_names) == 13 @@ -420,11 +478,11 @@ def test_bti_ecg_eog_emg(monkeypatch): def _read_bti_header_2(*args, **kwargs): bti_info = _read_bti_header(*args, **kwargs) - for ch_name, ch in zip(new_names, bti_info['chs'][::-1]): - ch['chan_label'] = ch_name + for ch_name, ch in zip(new_names, bti_info["chs"][::-1]): + ch["chan_label"] = ch_name return bti_info - monkeypatch.setattr(mne.io.bti.bti, '_read_bti_header', _read_bti_header_2) + monkeypatch.setattr(mne.io.bti.bti, "_read_bti_header", _read_bti_header_2) raw = read_raw_bti(fname_2500, **kwargs) got_map = dict(zip(raw.ch_names, raw.get_channel_types())) got = Counter(got_map.values()) @@ -437,5 +495,5 @@ def _read_bti_header_2(*args, **kwargs): for kind, ch_names in kind_map.items(): for ch_name in ch_names: assert ch_name in raw.ch_names - err_msg = f'{ch_name} type {got_map[ch_name]} !+ {kind}' + err_msg = f"{ch_name} type {got_map[ch_name]} !+ {kind}" assert got_map[ch_name] == kind, err_msg diff --git a/mne/io/cnt/__init__.py b/mne/io/cnt/__init__.py index 5021fd75a4c..10aac79f2f1 100644 --- a/mne/io/cnt/__init__.py +++ b/mne/io/cnt/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """CNT data reader.""" from .cnt import read_raw_cnt diff --git a/mne/io/cnt/_utils.py b/mne/io/cnt/_utils.py index 19e459ac506..cf2d45cb1ef 100644 --- a/mne/io/cnt/_utils.py +++ b/mne/io/cnt/_utils.py @@ -1,14 +1,14 @@ -# Author: Joan Massich -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from struct import Struct from collections import namedtuple -from math import modf from datetime import datetime +from math import modf from os import SEEK_END -import numpy as np +from struct import Struct +import numpy as np from ...utils import warn @@ -25,15 +25,14 @@ def _read_teeg(f, teeg_offset): } TEEG; """ # we use a more descriptive names based on TEEG doc comments - Teeg = namedtuple('Teeg', 'event_type total_length offset') - teeg_parser = Struct('3 range 0-15 bit coded response pad */ @@ -60,23 +63,27 @@ def _read_teeg(f, teeg_offset): # needed for backward compat: EVENT type 3 has the same structure as type 2 -CNTEventType3 = namedtuple('CNTEventType3', - ('StimType KeyBoard KeyPad_Accept Offset Type ' - 'Code Latency EpochEvent Accept2 Accuracy')) +CNTEventType3 = namedtuple( + "CNTEventType3", + ( + "StimType KeyBoard KeyPad_Accept Offset Type " + "Code Latency EpochEvent Accept2 Accuracy" + ), +) def _get_event_parser(event_type): if event_type == 1: event_maker = CNTEventType1 - struct_pattern = ' -# Joan Massich -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from os import path import numpy as np -from ...utils import warn, fill_doc, _check_option +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _create_chs, _find_channels, _mult_cal_one, read_str +from ...annotations import Annotations from ...channels.layout import _topo_to_sphere -from ..constants import FIFF -from .._digitization import _make_dig_points -from ..utils import (_mult_cal_one, _find_channels, _create_chs, read_str) -from ..meas_info import _empty_info +from ...utils import _check_option, _explain_exception, _validate_type, fill_doc, warn from ..base import BaseRaw -from ...annotations import Annotations - - -from ._utils import (_read_teeg, _get_event_parser, _session_date_2_meas_date, - _compute_robust_event_table_position, CNTEventType3) +from ._utils import ( + CNTEventType3, + _compute_robust_event_table_position, + _get_event_parser, + _read_teeg, + _session_date_2_meas_date, +) -def _read_annotations_cnt(fname, data_format='int16'): +def _read_annotations_cnt(fname, data_format="int16"): """CNT Annotation File Reader. This method opens the .cnt files, searches all the metadata to construct @@ -46,31 +49,86 @@ def _read_annotations_cnt(fname, data_format='int16'): SETUP_NCHANNELS_OFFSET = 370 SETUP_RATE_OFFSET = 376 - def _translating_function(offset, n_channels, event_type, - data_format=data_format): - n_bytes = 2 if data_format == 'int16' else 4 + def _accept_reject_function(keypad_accept): + accept_list = [] + for code in keypad_accept: + if "xd0" in str(code): + accept_list.append("good") + elif "xc0" in str(code): + accept_list.append("bad") + else: + accept_list.append("NA") + return np.array(accept_list) + + def _translating_function(offset, n_channels, event_type, data_format=data_format): + n_bytes = 2 if data_format == "int16" else 4 if event_type == CNTEventType3: offset *= n_bytes * n_channels event_time = offset - 900 - (75 * n_channels) event_time //= n_channels * n_bytes - return event_time - 1 + event_time = event_time - 1 + # Prevent negative event times + np.clip(event_time, 0, None, out=event_time) + return event_time + + def _update_bad_span_onset(accept_reject, onset, duration, description): + accept_reject = accept_reject.tolist() + onset = onset.tolist() + duration = duration.tolist() + description = description.tolist() + # If there are no bad spans, return original parameters + if "bad" not in accept_reject: + return np.array(onset), np.array(duration), np.array(description) + # Create lists of bad and good span markers and onset + bad_good_span_markers = [i for i in accept_reject if i in ["bad", "good"]] + bad_good_onset = [ + onset[i] + for i, value in enumerate(accept_reject) + if value in ["bad", "good"] + ] + # Calculate duration of bad span + first_bad_index = bad_good_span_markers.index("bad") + duration_list = [ + bad_good_onset[i + 1] - bad_good_onset[i] + for i in range(first_bad_index, len(bad_good_span_markers), 2) + ] + # Add bad event marker duration and description + duration_list_index = 0 + for i in range(len(onset)): + if accept_reject[i] == "bad": + duration[i] = duration_list[duration_list_index] + description[i] = "BAD_" + description[i] + duration_list_index += 1 + # Remove good span markers + final_onset, final_duration, final_description = [], [], [] + for i in range(len(accept_reject)): + if accept_reject[i] != "good": + final_onset.append(onset[i]) + final_duration.append(duration[i]) + final_description.append(description[i]) + return ( + np.array(final_onset), + np.array(final_duration), + np.array(final_description), + ) - with open(fname, 'rb') as fid: + with open(fname, "rb") as fid: fid.seek(SETUP_NCHANNELS_OFFSET) - (n_channels,) = np.frombuffer(fid.read(2), dtype='> 4 + # Lower nibble (4 bits) keypad button press + keypad = event.KeyPad_Accept[0] & 0x0F + if str(keypad) != "0": + description.append(f"KeyPad Response {keypad}") + elif event.KeyBoard != 0: + description.append(f"Keyboard Response {event.KeyBoard}") + else: + description.append(str(event.StimType)) + + description = np.array(description) + + onset, duration, description = _update_bad_span_onset( + accept_reject, onset / sfreq, duration, description + ) + return Annotations( + onset=onset, duration=duration, description=description, orig_time=None + ) @fill_doc -def read_raw_cnt(input_fname, eog=(), misc=(), ecg=(), - emg=(), data_format='auto', date_format='mm/dd/yy', - preload=False, verbose=None): +def read_raw_cnt( + input_fname, + eog=(), + misc=(), + ecg=(), + emg=(), + data_format="auto", + date_format="mm/dd/yy", + *, + header="auto", + preload=False, + verbose=None, +) -> "RawCNT": """Read CNT data as raw object. .. Note:: @@ -113,9 +204,9 @@ def read_raw_cnt(input_fname, eog=(), misc=(), ecg=(), Montages can be created/imported with: - Standard montages with :func:`mne.channels.make_standard_montage` - - Montages for `Compumedics systems `_ with - :func:`mne.channels.read_dig_dat` + - Montages for `Compumedics systems + `__ with + :func:`mne.channels.read_dig_curry` - Other reader functions are listed under *See Also* at :class:`mne.channels.DigMontage` @@ -145,6 +236,13 @@ def read_raw_cnt(input_fname, eog=(), misc=(), ecg=(), Defaults to ``'auto'``. date_format : ``'mm/dd/yy'`` | ``'dd/mm/yy'`` Format of date in the header. Defaults to ``'mm/dd/yy'``. + header : ``'auto'`` | ``'new'`` | ``'old'`` + Defines the header format. Used to describe how bad channels + are formatted. If auto, reads using old and new header and + if either contain a bad channel make channel bad. + Defaults to ``'auto'``. + + .. versionadded:: 1.6 %(preload)s %(verbose)s @@ -162,163 +260,204 @@ def read_raw_cnt(input_fname, eog=(), misc=(), ecg=(), ----- .. versionadded:: 0.12 """ - return RawCNT(input_fname, eog=eog, misc=misc, ecg=ecg, - emg=emg, data_format=data_format, date_format=date_format, - preload=preload, verbose=verbose) + return RawCNT( + input_fname, + eog=eog, + misc=misc, + ecg=ecg, + emg=emg, + data_format=data_format, + date_format=date_format, + header=header, + preload=preload, + verbose=verbose, + ) -def _get_cnt_info(input_fname, eog, ecg, emg, misc, data_format, date_format): +def _get_cnt_info(input_fname, eog, ecg, emg, misc, data_format, date_format, header): """Read the cnt header.""" data_offset = 900 # Size of the 'SETUP' header. cnt_info = dict() # Reading only the fields of interest. Structure of the whole header at # http://paulbourke.net/dataformats/eeg/ - with open(input_fname, 'rb', buffering=0) as fid: + with open(input_fname, "rb", buffering=0) as fid: fid.seek(21) patient_id = read_str(fid, 20) patient_id = int(patient_id) if patient_id.isdigit() else 0 fid.seek(121) patient_name = read_str(fid, 20).split() - last_name = patient_name[0] if len(patient_name) > 0 else '' - first_name = patient_name[-1] if len(patient_name) > 0 else '' + last_name = patient_name[0] if len(patient_name) > 0 else "" + first_name = patient_name[-1] if len(patient_name) > 0 else "" fid.seek(2, 1) sex = read_str(fid, 1) - if sex == 'M': + if sex == "M": sex = FIFF.FIFFV_SUBJ_SEX_MALE - elif sex == 'F': + elif sex == "F": sex = FIFF.FIFFV_SUBJ_SEX_FEMALE else: # can be 'U' sex = FIFF.FIFFV_SUBJ_SEX_UNKNOWN hand = read_str(fid, 1) - if hand == 'R': + if hand == "R": hand = FIFF.FIFFV_SUBJ_HAND_RIGHT - elif hand == 'L': + elif hand == "L": hand = FIFF.FIFFV_SUBJ_HAND_LEFT else: # can be 'M' for mixed or 'U' hand = None fid.seek(205) session_label = read_str(fid, 20) - session_date = ('%s %s' % (read_str(fid, 10), read_str(fid, 12))) + session_date = f"{read_str(fid, 10)} {read_str(fid, 12)}" meas_date = _session_date_2_meas_date(session_date, date_format) fid.seek(370) - n_channels = np.fromfile(fid, dtype='= 0] + eog = [idx for idx in np.fromfile(fid, dtype="i2", count=2) if idx >= 0] fid.seek(438) - lowpass_toggle = np.fromfile(fid, 'i1', count=1)[0] - highpass_toggle = np.fromfile(fid, 'i1', count=1)[0] + lowpass_toggle = np.fromfile(fid, "i1", count=1).item() + highpass_toggle = np.fromfile(fid, "i1", count=1).item() # Header has a field for number of samples, but it does not seem to be # too reliable. That's why we have option for setting n_bytes manually. fid.seek(864) - n_samples = np.fromfile(fid, dtype=' n_samples: + n_bytes = 4 + n_samples = n_samples_header + warn( + "Annotations are outside data range. " + "Changing data format to 'int32'." + ) else: n_bytes = data_size // (n_samples * n_channels) else: - n_bytes = 2 if data_format == 'int16' else 4 + n_bytes = 2 if data_format == "int16" else 4 n_samples = data_size // (n_bytes * n_channels) + # See PR #12393 + if n_samples_header != 0: + n_samples = n_samples_header # Channel offset refers to the size of blocks per channel in the file. - cnt_info['channel_offset'] = np.fromfile(fid, dtype=' 1: - cnt_info['channel_offset'] //= n_bytes + cnt_info["channel_offset"] = np.fromfile(fid, dtype=" 1: + cnt_info["channel_offset"] //= n_bytes else: - cnt_info['channel_offset'] = 1 + cnt_info["channel_offset"] = 1 - ch_names, cals, baselines, chs, pos = ( - list(), list(), list(), list(), list() - ) + ch_names, cals, baselines, chs, pos = (list(), list(), list(), list(), list()) bads = list() + _validate_type(header, str, "header") + _check_option("header", header, ("auto", "new", "old")) for ch_idx in range(n_channels): # ELECTLOC fields fid.seek(data_offset + 75 * ch_idx) ch_name = read_str(fid, 10) ch_names.append(ch_name) - fid.seek(data_offset + 75 * ch_idx + 4) - if np.fromfile(fid, dtype='u1', count=1)[0]: - bads.append(ch_name) + + # Some files have bad channels marked differently in the header. + if header in ("new", "auto"): + fid.seek(data_offset + 75 * ch_idx + 14) + if np.fromfile(fid, dtype="u1", count=1).item(): + bads.append(ch_name) + if header in ("old", "auto"): + fid.seek(data_offset + 75 * ch_idx + 4) + if np.fromfile(fid, dtype="u1", count=1).item(): + bads.append(ch_name) + fid.seek(data_offset + 75 * ch_idx + 19) - xy = np.fromfile(fid, dtype='f4', count=2) + xy = np.fromfile(fid, dtype="f4", count=2) xy[1] *= -1 # invert y-axis pos.append(xy) fid.seek(data_offset + 75 * ch_idx + 47) # Baselines are subtracted before scaling the data. - baselines.append(np.fromfile(fid, dtype='i2', count=1)[0]) + baselines.append(np.fromfile(fid, dtype="i2", count=1).item()) fid.seek(data_offset + 75 * ch_idx + 59) - sensitivity = np.fromfile(fid, dtype='f4', count=1)[0] + sensitivity = np.fromfile(fid, dtype="f4", count=1).item() fid.seek(data_offset + 75 * ch_idx + 71) - cal = np.fromfile(fid, dtype='f4', count=1)[0] + cal = np.fromfile(fid, dtype="f4", count=1).item() cals.append(cal * sensitivity * 1e-6 / 204.8) info = _empty_info(sfreq) if lowpass_toggle == 1: - info['lowpass'] = highcutoff + info["lowpass"] = highcutoff if highpass_toggle == 1: - info['highpass'] = lowcutoff - subject_info = {'hand': hand, 'id': patient_id, 'sex': sex, - 'first_name': first_name, 'last_name': last_name} - - if eog == 'auto': - eog = _find_channels(ch_names, 'EOG') - if ecg == 'auto': - ecg = _find_channels(ch_names, 'ECG') - if emg == 'auto': - emg = _find_channels(ch_names, 'EMG') - - chs = _create_chs(ch_names, cals, FIFF.FIFFV_COIL_EEG, - FIFF.FIFFV_EEG_CH, eog, ecg, emg, misc) - eegs = [idx for idx, ch in enumerate(chs) if - ch['coil_type'] == FIFF.FIFFV_COIL_EEG] + info["highpass"] = lowcutoff + subject_info = { + "hand": hand, + "id": patient_id, + "sex": sex, + "first_name": first_name, + "last_name": last_name, + } + subject_info = {key: val for key, val in subject_info.items() if val is not None} + + if eog == "auto": + eog = _find_channels(ch_names, "EOG") + if ecg == "auto": + ecg = _find_channels(ch_names, "ECG") + if emg == "auto": + emg = _find_channels(ch_names, "EMG") + + chs = _create_chs( + ch_names, cals, FIFF.FIFFV_COIL_EEG, FIFF.FIFFV_EEG_CH, eog, ecg, emg, misc + ) + eegs = [idx for idx, ch in enumerate(chs) if ch["coil_type"] == FIFF.FIFFV_COIL_EEG] coords = _topo_to_sphere(pos, eegs) locs = np.full((len(chs), 12), np.nan) locs[:, :3] = coords dig = _make_dig_points( dig_ch_pos=dict(zip(ch_names, coords)), - coord_frame="head", add_missing_fiducials=True, + coord_frame="head", + add_missing_fiducials=True, ) for ch, loc in zip(chs, locs): ch.update(loc=loc) - cnt_info.update(baselines=np.array(baselines), n_samples=n_samples, - n_bytes=n_bytes) + cnt_info.update(baselines=np.array(baselines), n_samples=n_samples, n_bytes=n_bytes) - session_label = None if str(session_label) == '' else str(session_label) - info.update(meas_date=meas_date, dig=dig, - description=session_label, bads=bads, - subject_info=subject_info, chs=chs) + session_label = None if str(session_label) == "" else str(session_label) + info.update( + meas_date=meas_date, + dig=dig, + description=session_label, + subject_info=subject_info, + chs=chs, + ) info._unlocked = False info._update_redundant() + info["bads"] = bads return info, cnt_info @@ -326,7 +465,8 @@ def _get_cnt_info(input_fname, eog, ecg, emg, misc, data_format, date_format): class RawCNT(BaseRaw): """Raw object from Neuroscan CNT file. - .. Note:: + .. note:: + The channel positions are read from the file header. Channels that are not assigned with keywords ``eog``, ``ecg``, ``emg`` and ``misc`` are assigned as eeg channels. All the eeg channel locations are fit to a @@ -337,10 +477,15 @@ class RawCNT(BaseRaw): the header are correct, it is probably safer to use a (standard) montage. See :func:`mne.channels.make_standard_montage` + .. note:: + + A CNT file can also come from the EEG manufacturer ANT Neuro, in which case the + function :func:`mne.io.read_raw_ant` should be used. + Parameters ---------- input_fname : path-like - Path to the CNT file. + Path to the Neuroscan CNT file. eog : list | tuple Names of channels or list of indices that should be designated EOG channels. If ``'auto'``, the channel names beginning with @@ -362,17 +507,12 @@ class RawCNT(BaseRaw): Defaults to ``'auto'``. date_format : ``'mm/dd/yy'`` | ``'dd/mm/yy'`` Format of date in the header. Defaults to ``'mm/dd/yy'``. + header : ``'auto'`` | ``'new'`` | ``'old'`` + Defines the header format. Used to describe how bad channels + are formatted. If auto, reads using old and new header and + if either contain a bad channel make channel bad. + Defaults to ``'auto'``. %(preload)s - stim_channel : bool | None - Add a stim channel from the events. Defaults to None to trigger a - future warning. - - .. warning:: This defaults to True in 0.18 but will change to False in - 0.19 (when no stim channel synthesis will be allowed) - and be removed in 0.20; migrate code to use - :func:`mne.events_from_annotations` instead. - - .. versionadded:: 0.18 %(verbose)s See Also @@ -380,42 +520,69 @@ class RawCNT(BaseRaw): mne.io.Raw : Documentation of attributes and methods. """ - def __init__(self, input_fname, eog=(), misc=(), - ecg=(), emg=(), data_format='auto', date_format='mm/dd/yy', - preload=False, verbose=None): # noqa: D102 - - _check_option('date_format', date_format, ['mm/dd/yy', 'dd/mm/yy']) - if date_format == 'dd/mm/yy': - _date_format = '%d/%m/%y %H:%M:%S' + def __init__( + self, + input_fname, + eog=(), + misc=(), + ecg=(), + emg=(), + data_format="auto", + date_format="mm/dd/yy", + *, + header="auto", + preload=False, + verbose=None, + ): + _check_option("date_format", date_format, ["mm/dd/yy", "dd/mm/yy"]) + if date_format == "dd/mm/yy": + _date_format = "%d/%m/%y %H:%M:%S" else: - _date_format = '%m/%d/%y %H:%M:%S' + _date_format = "%m/%d/%y %H:%M:%S" input_fname = path.abspath(input_fname) - info, cnt_info = _get_cnt_info(input_fname, eog, ecg, emg, misc, - data_format, _date_format) - last_samps = [cnt_info['n_samples'] - 1] - super(RawCNT, self).__init__( - info, preload, filenames=[input_fname], raw_extras=[cnt_info], - last_samps=last_samps, orig_format='int', verbose=verbose) - - data_format = 'int32' if cnt_info['n_bytes'] == 4 else 'int16' + try: + info, cnt_info = _get_cnt_info( + input_fname, eog, ecg, emg, misc, data_format, _date_format, header + ) + except Exception: + raise RuntimeError( + f"{_explain_exception()}\n" + "WARNING: mne.io.read_raw_cnt " + "supports Neuroscan CNT files only. If this file is an ANT Neuro CNT, " + "please use mne.io.read_raw_ant instead." + ) + last_samps = [cnt_info["n_samples"] - 1] + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[cnt_info], + last_samps=last_samps, + orig_format="int", + verbose=verbose, + ) + + data_format = "int32" if cnt_info["n_bytes"] == 4 else "int16" self.set_annotations( - _read_annotations_cnt(input_fname, data_format=data_format)) + _read_annotations_cnt(input_fname, data_format=data_format) + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Take a chunk of raw data, multiply by mult or cals, and store.""" - n_channels = self._raw_extras[fi]['orig_nchan'] - if 'stim_channel' in self._raw_extras[fi]: + n_channels = self._raw_extras[fi]["orig_nchan"] + if "stim_channel" in self._raw_extras[fi]: f_channels = n_channels - 1 # Stim channel already read. - stim_ch = self._raw_extras[fi]['stim_channel'] + stim_ch = self._raw_extras[fi]["stim_channel"] else: f_channels = n_channels stim_ch = None - channel_offset = self._raw_extras[fi]['channel_offset'] - baselines = self._raw_extras[fi]['baselines'] - n_bytes = self._raw_extras[fi]['n_bytes'] - dtype = '= (channel_offset / 2): # Extend at the end. extra_samps += chunk_size count = n_samps // channel_offset * chunk_size + extra_samps n_chunks = count // chunk_size samps = np.fromfile(fid, dtype=dtype, count=count) - samps = samps.reshape((n_chunks, f_channels, channel_offset), - order='C') + samps = samps.reshape((n_chunks, f_channels, channel_offset), order="C") # Intermediate shaping to chunk sizes. block = np.zeros((n_channels, channel_offset * n_chunks)) for set_idx, row in enumerate(samps): # Final shape. - block_slice = slice(set_idx * channel_offset, - (set_idx + 1) * channel_offset) + block_slice = slice( + set_idx * channel_offset, (set_idx + 1) * channel_offset + ) block[:f_channels, block_slice] = row - if 'stim_channel' in self._raw_extras[fi]: + if "stim_channel" in self._raw_extras[fi]: _data_start = start + sample_start _data_stop = start + sample_stop block[-1] = stim_ch[_data_start:_data_stop] - one[idx] = block[idx, s_offset:n_samps + s_offset] + one[idx] = block[idx, s_offset : n_samps + s_offset] one[idx] -= baselines[idx][:, None] - _mult_cal_one(data[:, sample_start:sample_stop], one, idx, - cals, mult) + _mult_cal_one(data[:, sample_start:sample_stop], one, idx, cals, mult) diff --git a/mne/io/cnt/tests/__init__.py b/mne/io/cnt/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/cnt/tests/__init__.py +++ b/mne/io/cnt/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/cnt/tests/test_cnt.py b/mne/io/cnt/tests/test_cnt.py index db0d1d695aa..f98253b1317 100644 --- a/mne/io/cnt/tests/test_cnt.py +++ b/mne/io/cnt/tests/test_cnt.py @@ -1,53 +1,112 @@ - -# Author: Jaakko Leppakangas -# Joan Massich -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from numpy.testing import assert_array_equal import pytest +from numpy.testing import assert_array_equal from mne import pick_types +from mne.annotations import read_annotations from mne.datasets import testing -from mne.io.tests.test_raw import _test_raw_reader from mne.io.cnt import read_raw_cnt -from mne.annotations import read_annotations +from mne.io.tests.test_raw import _test_raw_reader data_path = testing.data_path(download=False) fname = data_path / "CNT" / "scan41_short.cnt" +# Contains bad spans and could not be read properly before PR #12393 +fname_bad_spans = data_path / "CNT" / "test_CNT_events_mne_JWoess_clipped.cnt" + + +_no_parse = pytest.warns(RuntimeWarning, match="Could not parse") @testing.requires_testing_data -def test_data(): +def test_old_data(): """Test reading raw cnt files.""" - with pytest.warns(RuntimeWarning, match='number of bytes'): - raw = _test_raw_reader(read_raw_cnt, input_fname=fname, - eog='auto', misc=['NA1', 'LEFT_EAR']) + with _no_parse, pytest.warns(RuntimeWarning, match="number of bytes"): + raw = _test_raw_reader( + read_raw_cnt, input_fname=fname, eog="auto", misc=["NA1", "LEFT_EAR"] + ) + + # make sure we use annotations event if we synthesized stim + assert len(raw.annotations) == 6 + + eog_chs = pick_types(raw.info, eog=True, exclude=[]) + assert len(eog_chs) == 2 # test eog='auto' + assert raw.info["bads"] == ["LEFT_EAR", "VEOGR"] # test bads + + # the data has "05/10/200 17:35:31" so it is set to None + assert raw.info["meas_date"] is None + + +@testing.requires_testing_data +def test_new_data(): + """Test reading raw cnt files with different header.""" + with pytest.warns(RuntimeWarning): + raw = read_raw_cnt(input_fname=fname_bad_spans, header="new") + + assert raw.info["bads"] == ["F8"] # test bads + + +@testing.requires_testing_data +def test_auto_data(): + """Test reading raw cnt files with automatic header.""" + first = pytest.warns(RuntimeWarning, match="Could not define the number of bytes.*") + second = pytest.warns(RuntimeWarning, match="Annotations are outside") + third = pytest.warns(RuntimeWarning, match="Omitted 6 annot") + with first, second, third: + raw = read_raw_cnt(input_fname=fname_bad_spans) + # Test that responses are read properly + assert "KeyPad Response 1" in raw.annotations.description + assert raw.info["bads"] == ["F8"] + + with _no_parse, pytest.warns(RuntimeWarning, match="number of bytes"): + raw = _test_raw_reader( + read_raw_cnt, input_fname=fname, eog="auto", misc=["NA1", "LEFT_EAR"] + ) # make sure we use annotations event if we synthesized stim assert len(raw.annotations) == 6 eog_chs = pick_types(raw.info, eog=True, exclude=[]) assert len(eog_chs) == 2 # test eog='auto' - assert raw.info['bads'] == ['LEFT_EAR', 'VEOGR'] # test bads + assert raw.info["bads"] == ["LEFT_EAR", "VEOGR"] # test bads # the data has "05/10/200 17:35:31" so it is set to None - assert raw.info['meas_date'] is None + assert raw.info["meas_date"] is None @testing.requires_testing_data def test_compare_events_and_annotations(): """Test comparing annotations and events.""" - with pytest.warns(RuntimeWarning, match='Could not parse meas date'): + with _no_parse, pytest.warns(RuntimeWarning, match="Could not define the num"): raw = read_raw_cnt(fname) - events = np.array([[333, 0, 7], - [1010, 0, 7], - [1664, 0, 109], - [2324, 0, 7], - [2984, 0, 109]]) + events = np.array( + [[333, 0, 7], [1010, 0, 7], [1664, 0, 109], [2324, 0, 7], [2984, 0, 109]] + ) annot = read_annotations(fname) assert len(annot) == 6 - assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info['sfreq']) - assert 'STI 014' not in raw.info['ch_names'] + assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info["sfreq"]) + assert "STI 014" not in raw.info["ch_names"] + + +@testing.requires_testing_data +@pytest.mark.filterwarnings("ignore::RuntimeWarning") +def test_reading_bytes(): + """Test reading raw cnt files with different header.""" + raw_16 = read_raw_cnt(fname, preload=True) + raw_32 = read_raw_cnt(fname_bad_spans, preload=True) + + # Verify that the number of bytes read is correct + assert len(raw_16) == 3070 + assert len(raw_32) == 90000 + + +@testing.requires_testing_data +def test_bad_spans(): + """Test reading raw cnt files with bad spans.""" + annot = read_annotations(fname_bad_spans) + temp = "\t".join(annot.description) + assert "BAD" in temp diff --git a/mne/io/compensator.py b/mne/io/compensator.py deleted file mode 100644 index 220de1f8259..00000000000 --- a/mne/io/compensator.py +++ /dev/null @@ -1,165 +0,0 @@ -import numpy as np - -from .constants import FIFF -from ..utils import fill_doc - - -def get_current_comp(info): - """Get the current compensation in effect in the data.""" - comp = None - first_comp = -1 - for k, chan in enumerate(info['chs']): - if chan['kind'] == FIFF.FIFFV_MEG_CH: - comp = int(chan['coil_type']) >> 16 - if first_comp < 0: - first_comp = comp - elif comp != first_comp: - raise ValueError('Compensation is not set equally on ' - 'all MEG channels') - return comp - - -def set_current_comp(info, comp): - """Set the current compensation in effect in the data.""" - comp_now = get_current_comp(info) - for k, chan in enumerate(info['chs']): - if chan['kind'] == FIFF.FIFFV_MEG_CH: - rem = chan['coil_type'] - (comp_now << 16) - chan['coil_type'] = int(rem + (comp << 16)) - - -def _make_compensator(info, grade): - """Auxiliary function for make_compensator.""" - for k in range(len(info['comps'])): - if info['comps'][k]['kind'] == grade: - this_data = info['comps'][k]['data'] - - # Create the preselector - presel = np.zeros((this_data['ncol'], info['nchan'])) - for col, col_name in enumerate(this_data['col_names']): - ind = [k for k, ch in enumerate(info['ch_names']) - if ch == col_name] - if len(ind) == 0: - raise ValueError('Channel %s is not available in ' - 'data' % col_name) - elif len(ind) > 1: - raise ValueError('Ambiguous channel %s' % col_name) - presel[col, ind[0]] = 1.0 - - # Create the postselector (zero entries for channels not found) - postsel = np.zeros((info['nchan'], this_data['nrow'])) - for c, ch_name in enumerate(info['ch_names']): - ind = [k for k, ch in enumerate(this_data['row_names']) - if ch == ch_name] - if len(ind) > 1: - raise ValueError('Ambiguous channel %s' % ch_name) - elif len(ind) == 1: - postsel[c, ind[0]] = 1.0 - # else, don't use it at all (postsel[c, ?] = 0.0) by allocation - this_comp = np.dot(postsel, np.dot(this_data['data'], presel)) - return this_comp - - raise ValueError('Desired compensation matrix (grade = %d) not' - ' found' % grade) - - -@fill_doc -def make_compensator(info, from_, to, exclude_comp_chs=False): - """Return compensation matrix eg. for CTF system. - - Create a compensation matrix to bring the data from one compensation - state to another. - - Parameters - ---------- - %(info_not_none)s - from_ : int - Compensation in the input data. - to : int - Desired compensation in the output. - exclude_comp_chs : bool - Exclude compensation channels from the output. - - Returns - ------- - comp : array | None. - The compensation matrix. Might be None if no compensation - is needed (from == to). - """ - if from_ == to: - return None - - # s_orig = s_from + C1*s_from = (I + C1)*s_from - # s_to = s_orig - C2*s_orig = (I - C2)*s_orig - # s_to = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from - if from_ != 0: - C1 = _make_compensator(info, from_) - comp_from_0 = np.linalg.inv(np.eye(info['nchan']) - C1) - if to != 0: - C2 = _make_compensator(info, to) - comp_0_to = np.eye(info['nchan']) - C2 - if from_ != 0: - if to != 0: - # This is mathematically equivalent, but has higher numerical - # error than using the inverse to always go to zero and back - # comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1) - comp = np.dot(comp_0_to, comp_from_0) - else: - comp = comp_from_0 - else: - # from == 0, to != 0 guaranteed here - comp = comp_0_to - - if exclude_comp_chs: - pick = [k for k, c in enumerate(info['chs']) - if c['kind'] != FIFF.FIFFV_REF_MEG_CH] - - if len(pick) == 0: - raise ValueError('Nothing remains after excluding the ' - 'compensation channels') - - comp = comp[pick, :] - - return comp - - -# @verbose -# def compensate_to(data, to, verbose=None): -# """ -# % -# % [newdata] = mne_compensate_to(data,to) -# % -# % Apply compensation to the data as desired -# % -# """ -# -# newdata = data.copy() -# now = get_current_comp(newdata['info']) -# -# # Are we there already? -# if now == to: -# logger.info('Data are already compensated as desired') -# -# # Make the compensator and apply it to all data sets -# comp = make_compensator(newdata['info'], now, to) -# for k in range(len(newdata['evoked'])): -# newdata['evoked'][k]['epochs'] = np.dot(comp, -# newdata['evoked'][k]['epochs']) -# -# # Update the compensation info in the channel descriptors -# newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to) -# return newdata - - -# def set_current_comp(chs, value): -# """Set the current compensation value in the channel info structures -# """ -# new_chs = chs -# -# lower_half = int('FFFF', 16) # hex2dec('FFFF') -# for k in range(len(chs)): -# if chs[k]['kind'] == FIFF.FIFFV_MEG_CH: -# coil_type = float(chs[k]['coil_type']) & lower_half -# new_chs[k]['coil_type'] = int(coil_type | (value << 16)) -# -# return new_chs diff --git a/mne/io/constants.py b/mne/io/constants.py index 1159c85283d..50cf5677731 100644 --- a/mne/io/constants.py +++ b/mne/io/constants.py @@ -1,1066 +1,7 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from ..utils._bunch import BunchConstNamed +from .._fiff.constants import FIFF -FIFF = BunchConstNamed() - -# -# FIFF version number in use -# -FIFF.FIFFC_MAJOR_VERSION = 1 -FIFF.FIFFC_MINOR_VERSION = 4 -FIFF.FIFFC_VERSION = FIFF.FIFFC_MAJOR_VERSION << 16 | FIFF.FIFFC_MINOR_VERSION - -# -# Blocks -# -FIFF.FIFFB_ROOT = 999 -FIFF.FIFFB_MEAS = 100 -FIFF.FIFFB_MEAS_INFO = 101 -FIFF.FIFFB_RAW_DATA = 102 -FIFF.FIFFB_PROCESSED_DATA = 103 -FIFF.FIFFB_EVOKED = 104 -FIFF.FIFFB_ASPECT = 105 -FIFF.FIFFB_SUBJECT = 106 -FIFF.FIFFB_ISOTRAK = 107 -FIFF.FIFFB_HPI_MEAS = 108 # HPI measurement -FIFF.FIFFB_HPI_RESULT = 109 # Result of a HPI fitting procedure -FIFF.FIFFB_HPI_COIL = 110 # Data acquired from one HPI coil -FIFF.FIFFB_PROJECT = 111 -FIFF.FIFFB_CONTINUOUS_DATA = 112 -FIFF.FIFFB_CH_INFO = 113 # Extra channel information -FIFF.FIFFB_VOID = 114 -FIFF.FIFFB_EVENTS = 115 -FIFF.FIFFB_INDEX = 116 -FIFF.FIFFB_DACQ_PARS = 117 -FIFF.FIFFB_REF = 118 -FIFF.FIFFB_IAS_RAW_DATA = 119 -FIFF.FIFFB_IAS_ASPECT = 120 -FIFF.FIFFB_HPI_SUBSYSTEM = 121 -# FIFF.FIFFB_PHANTOM_SUBSYSTEM = 122 -# FIFF.FIFFB_STATUS_SUBSYSTEM = 123 -FIFF.FIFFB_DEVICE = 124 -FIFF.FIFFB_HELIUM = 125 -FIFF.FIFFB_CHANNEL_INFO = 126 - -FIFF.FIFFB_SPHERE = 300 # Concentric sphere model related -FIFF.FIFFB_BEM = 310 # Boundary-element method -FIFF.FIFFB_BEM_SURF = 311 # Boundary-element method surfaces -FIFF.FIFFB_CONDUCTOR_MODEL = 312 # One conductor model definition -FIFF.FIFFB_PROJ = 313 -FIFF.FIFFB_PROJ_ITEM = 314 -FIFF.FIFFB_MRI = 200 -FIFF.FIFFB_MRI_SET = 201 -FIFF.FIFFB_MRI_SLICE = 202 -FIFF.FIFFB_MRI_SCENERY = 203 # These are for writing unrelated 'slices' -FIFF.FIFFB_MRI_SCENE = 204 # Which are actually 3D scenes... -FIFF.FIFFB_MRI_SEG = 205 # MRI segmentation data -FIFF.FIFFB_MRI_SEG_REGION = 206 # One MRI segmentation region -FIFF.FIFFB_PROCESSING_HISTORY = 900 -FIFF.FIFFB_PROCESSING_RECORD = 901 - -FIFF.FIFFB_DATA_CORRECTION = 500 -FIFF.FIFFB_CHANNEL_DECOUPLER = 501 -FIFF.FIFFB_SSS_INFO = 502 -FIFF.FIFFB_SSS_CAL = 503 -FIFF.FIFFB_SSS_ST_INFO = 504 -FIFF.FIFFB_SSS_BASES = 505 -FIFF.FIFFB_IAS = 510 -# -# Of general interest -# -FIFF.FIFF_FILE_ID = 100 -FIFF.FIFF_DIR_POINTER = 101 -FIFF.FIFF_BLOCK_ID = 103 -FIFF.FIFF_BLOCK_START = 104 -FIFF.FIFF_BLOCK_END = 105 -FIFF.FIFF_FREE_LIST = 106 -FIFF.FIFF_FREE_BLOCK = 107 -FIFF.FIFF_NOP = 108 -FIFF.FIFF_PARENT_FILE_ID = 109 -FIFF.FIFF_PARENT_BLOCK_ID = 110 -FIFF.FIFF_BLOCK_NAME = 111 -FIFF.FIFF_BLOCK_VERSION = 112 -FIFF.FIFF_CREATOR = 113 # Program that created the file (string) -FIFF.FIFF_MODIFIER = 114 # Program that modified the file (string) -FIFF.FIFF_REF_ROLE = 115 -FIFF.FIFF_REF_FILE_ID = 116 -FIFF.FIFF_REF_FILE_NUM = 117 -FIFF.FIFF_REF_FILE_NAME = 118 -# -# Megacq saves the parameters in these tags -# -FIFF.FIFF_DACQ_PARS = 150 -FIFF.FIFF_DACQ_STIM = 151 - -FIFF.FIFF_DEVICE_TYPE = 152 -FIFF.FIFF_DEVICE_MODEL = 153 -FIFF.FIFF_DEVICE_SERIAL = 154 -FIFF.FIFF_DEVICE_SITE = 155 - -FIFF.FIFF_HE_LEVEL_RAW = 156 -FIFF.FIFF_HELIUM_LEVEL = 157 -FIFF.FIFF_ORIG_FILE_GUID = 158 -FIFF.FIFF_UTC_OFFSET = 159 - -FIFF.FIFF_NCHAN = 200 -FIFF.FIFF_SFREQ = 201 -FIFF.FIFF_DATA_PACK = 202 -FIFF.FIFF_CH_INFO = 203 -FIFF.FIFF_MEAS_DATE = 204 -FIFF.FIFF_SUBJECT = 205 -FIFF.FIFF_COMMENT = 206 -FIFF.FIFF_NAVE = 207 -FIFF.FIFF_FIRST_SAMPLE = 208 # The first sample of an epoch -FIFF.FIFF_LAST_SAMPLE = 209 # The last sample of an epoch -FIFF.FIFF_ASPECT_KIND = 210 -FIFF.FIFF_REF_EVENT = 211 -FIFF.FIFF_EXPERIMENTER = 212 -FIFF.FIFF_DIG_POINT = 213 -FIFF.FIFF_CH_POS = 214 -FIFF.FIFF_HPI_SLOPES = 215 # HPI data -FIFF.FIFF_HPI_NCOIL = 216 -FIFF.FIFF_REQ_EVENT = 217 -FIFF.FIFF_REQ_LIMIT = 218 -FIFF.FIFF_LOWPASS = 219 -FIFF.FIFF_BAD_CHS = 220 -FIFF.FIFF_ARTEF_REMOVAL = 221 -FIFF.FIFF_COORD_TRANS = 222 -FIFF.FIFF_HIGHPASS = 223 -FIFF.FIFF_CH_CALS = 224 # This will not occur in new files -FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi -FIFF.FIFF_HPI_CORR_COEFF = 226 # HPI curve fit correlations -FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging -FIFF.FIFF_NO_SAMPLES = 228 # Number of samples in an epoch -FIFF.FIFF_FIRST_TIME = 229 # Time scale minimum - -FIFF.FIFF_SUBAVE_SIZE = 230 # Size of a subaverage -FIFF.FIFF_SUBAVE_FIRST = 231 # The first epoch # contained in the subaverage -FIFF.FIFF_NAME = 233 # Intended to be a short name. -FIFF.FIFF_DESCRIPTION = FIFF.FIFF_COMMENT # (Textual) Description of an object -FIFF.FIFF_DIG_STRING = 234 # String of digitized points -FIFF.FIFF_LINE_FREQ = 235 # Line frequency -FIFF.FIFF_GANTRY_ANGLE = 282 # Tilt angle of the gantry in degrees. - -# -# HPI fitting program tags -# -FIFF.FIFF_HPI_COIL_FREQ = 236 # HPI coil excitation frequency -FIFF.FIFF_HPI_COIL_MOMENTS = 240 # Estimated moment vectors for the HPI coil magnetic dipoles -FIFF.FIFF_HPI_FIT_GOODNESS = 241 # Three floats indicating the goodness of fit -FIFF.FIFF_HPI_FIT_ACCEPT = 242 # Bitmask indicating acceptance (see below) -FIFF.FIFF_HPI_FIT_GOOD_LIMIT = 243 # Limit for the goodness-of-fit -FIFF.FIFF_HPI_FIT_DIST_LIMIT = 244 # Limit for the coil distance difference -FIFF.FIFF_HPI_COIL_NO = 245 # Coil number listed by HPI measurement -FIFF.FIFF_HPI_COILS_USED = 246 # List of coils finally used when the transformation was computed -FIFF.FIFF_HPI_DIGITIZATION_ORDER = 247 # Which Isotrak digitization point corresponds to each of the coils energized - - -# -# Tags used for storing channel info -# -FIFF.FIFF_CH_SCAN_NO = 250 # Channel scan number. Corresponds to fiffChInfoRec.scanNo field -FIFF.FIFF_CH_LOGICAL_NO = 251 # Channel logical number. Corresponds to fiffChInfoRec.logNo field -FIFF.FIFF_CH_KIND = 252 # Channel type. Corresponds to fiffChInfoRec.kind field" -FIFF.FIFF_CH_RANGE = 253 # Conversion from recorded number to (possibly virtual) voltage at the output" -FIFF.FIFF_CH_CAL = 254 # Calibration coefficient from output voltage to some real units -FIFF.FIFF_CH_LOC = 255 # Channel loc -FIFF.FIFF_CH_UNIT = 256 # Unit of the data -FIFF.FIFF_CH_UNIT_MUL = 257 # Unit multiplier exponent -FIFF.FIFF_CH_DACQ_NAME = 258 # Name of the channel in the data acquisition system. Corresponds to fiffChInfoRec.name. -FIFF.FIFF_CH_COIL_TYPE = 350 # Coil type in coil_def.dat -FIFF.FIFF_CH_COORD_FRAME = 351 # Coordinate frame (integer) - -# -# Pointers -# -FIFF.FIFFV_NEXT_SEQ = 0 -FIFF.FIFFV_NEXT_NONE = -1 -# -# Channel types -# -FIFF.FIFFV_BIO_CH = 102 -FIFF.FIFFV_MEG_CH = 1 -FIFF.FIFFV_REF_MEG_CH = 301 -FIFF.FIFFV_EEG_CH = 2 -FIFF.FIFFV_MCG_CH = 201 -FIFF.FIFFV_STIM_CH = 3 -FIFF.FIFFV_EOG_CH = 202 -FIFF.FIFFV_EMG_CH = 302 -FIFF.FIFFV_ECG_CH = 402 -FIFF.FIFFV_MISC_CH = 502 -FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring -FIFF.FIFFV_SEEG_CH = 802 # stereotactic EEG -FIFF.FIFFV_DBS_CH = 803 # deep brain stimulation -FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only) -FIFF.FIFFV_ECOG_CH = 902 -FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only) -FIFF.FIFFV_EXCI_CH = 920 # flux excitation channel used to be a stimulus channel -FIFF.FIFFV_DIPOLE_WAVE = 1000 # Dipole time curve (xplotter/xfit) -FIFF.FIFFV_GOODNESS_FIT = 1001 # Goodness of fit (xplotter/xfit) -FIFF.FIFFV_FNIRS_CH = 1100 # Functional near-infrared spectroscopy -FIFF.FIFFV_TEMPERATURE_CH = 1200 # Functional near-infrared spectroscopy -FIFF.FIFFV_GALVANIC_CH = 1300 # Galvanic skin response -_ch_kind_named = {key: key for key in ( - FIFF.FIFFV_BIO_CH, - FIFF.FIFFV_MEG_CH, - FIFF.FIFFV_REF_MEG_CH, - FIFF.FIFFV_EEG_CH, - FIFF.FIFFV_MCG_CH, - FIFF.FIFFV_STIM_CH, - FIFF.FIFFV_EOG_CH, - FIFF.FIFFV_EMG_CH, - FIFF.FIFFV_ECG_CH, - FIFF.FIFFV_MISC_CH, - FIFF.FIFFV_RESP_CH, - FIFF.FIFFV_SEEG_CH, - FIFF.FIFFV_DBS_CH, - FIFF.FIFFV_SYST_CH, - FIFF.FIFFV_ECOG_CH, - FIFF.FIFFV_IAS_CH, - FIFF.FIFFV_EXCI_CH, - FIFF.FIFFV_DIPOLE_WAVE, - FIFF.FIFFV_GOODNESS_FIT, - FIFF.FIFFV_FNIRS_CH, - FIFF.FIFFV_GALVANIC_CH, - FIFF.FIFFV_TEMPERATURE_CH, -)} - -# -# Quaternion channels for head position monitoring -# -FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion -FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation -FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation -FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation -FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation -FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation -FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation -FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi -FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi -FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi -# -# Coordinate frames -# -FIFF.FIFFV_COORD_UNKNOWN = 0 -FIFF.FIFFV_COORD_DEVICE = 1 -FIFF.FIFFV_COORD_ISOTRAK = 2 -FIFF.FIFFV_COORD_HPI = 3 -FIFF.FIFFV_COORD_HEAD = 4 -FIFF.FIFFV_COORD_MRI = 5 -FIFF.FIFFV_COORD_MRI_SLICE = 6 -FIFF.FIFFV_COORD_MRI_DISPLAY = 7 -FIFF.FIFFV_COORD_DICOM_DEVICE = 8 -FIFF.FIFFV_COORD_IMAGING_DEVICE = 9 -_coord_frame_named = {key: key for key in ( - FIFF.FIFFV_COORD_UNKNOWN, - FIFF.FIFFV_COORD_DEVICE, - FIFF.FIFFV_COORD_ISOTRAK, - FIFF.FIFFV_COORD_HPI, - FIFF.FIFFV_COORD_HEAD, - FIFF.FIFFV_COORD_MRI, - FIFF.FIFFV_COORD_MRI_SLICE, - FIFF.FIFFV_COORD_MRI_DISPLAY, - FIFF.FIFFV_COORD_DICOM_DEVICE, - FIFF.FIFFV_COORD_IMAGING_DEVICE, -)} -# -# Needed for raw and evoked-response data -# -FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data -FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers -FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel -FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples - -# -# Info on subject -# -FIFF.FIFF_SUBJ_ID = 400 # Subject ID -FIFF.FIFF_SUBJ_FIRST_NAME = 401 # First name of the subject -FIFF.FIFF_SUBJ_MIDDLE_NAME = 402 # Middle name of the subject -FIFF.FIFF_SUBJ_LAST_NAME = 403 # Last name of the subject -FIFF.FIFF_SUBJ_BIRTH_DAY = 404 # Birthday of the subject -FIFF.FIFF_SUBJ_SEX = 405 # Sex of the subject -FIFF.FIFF_SUBJ_HAND = 406 # Handedness of the subject -FIFF.FIFF_SUBJ_WEIGHT = 407 # Weight of the subject in kg -FIFF.FIFF_SUBJ_HEIGHT = 408 # Height of the subject in m -FIFF.FIFF_SUBJ_COMMENT = 409 # Comment about the subject -FIFF.FIFF_SUBJ_HIS_ID = 410 # ID used in the Hospital Information System - -FIFF.FIFFV_SUBJ_HAND_RIGHT = 1 # Righthanded -FIFF.FIFFV_SUBJ_HAND_LEFT = 2 # Lefthanded -FIFF.FIFFV_SUBJ_HAND_AMBI = 3 # Ambidextrous - -FIFF.FIFFV_SUBJ_SEX_UNKNOWN = 0 # Unknown gender -FIFF.FIFFV_SUBJ_SEX_MALE = 1 # Male -FIFF.FIFFV_SUBJ_SEX_FEMALE = 2 # Female - -FIFF.FIFF_PROJ_ID = 500 -FIFF.FIFF_PROJ_NAME = 501 -FIFF.FIFF_PROJ_AIM = 502 -FIFF.FIFF_PROJ_PERSONS = 503 -FIFF.FIFF_PROJ_COMMENT = 504 - -FIFF.FIFF_EVENT_CHANNELS = 600 # Event channel numbers -FIFF.FIFF_EVENT_LIST = 601 # List of events (integers: -FIFF.FIFF_EVENT_CHANNEL = 602 # Event channel -FIFF.FIFF_EVENT_BITS = 603 # Event bits array - -# -# Tags used in saving SQUID characteristics etc. -# -FIFF.FIFF_SQUID_BIAS = 701 -FIFF.FIFF_SQUID_OFFSET = 702 -FIFF.FIFF_SQUID_GATE = 703 -# -# Aspect values used to save characteristic curves of SQUIDs. (mjk) -# -FIFF.FIFFV_ASPECT_IFII_LOW = 1100 -FIFF.FIFFV_ASPECT_IFII_HIGH = 1101 -FIFF.FIFFV_ASPECT_GATE = 1102 - -# -# Values for file references -# -FIFF.FIFFV_ROLE_PREV_FILE = 1 -FIFF.FIFFV_ROLE_NEXT_FILE = 2 - -# -# References -# -FIFF.FIFF_REF_PATH = 1101 - -# -# Different aspects of data -# -FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs -FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean -FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data -FIFF.FIFFV_ASPECT_SUBAVERAGE = 103 # Partial average (subaverage) -FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage -FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph -FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum -FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve - -# -# BEM surface IDs -# -FIFF.FIFFV_BEM_SURF_ID_UNKNOWN = -1 -FIFF.FIFFV_BEM_SURF_ID_NOT_KNOWN = 0 -FIFF.FIFFV_BEM_SURF_ID_BRAIN = 1 -FIFF.FIFFV_BEM_SURF_ID_CSF = 2 -FIFF.FIFFV_BEM_SURF_ID_SKULL = 3 -FIFF.FIFFV_BEM_SURF_ID_HEAD = 4 - -FIFF.FIFF_SPHERE_ORIGIN = 3001 -FIFF.FIFF_SPHERE_RADIUS = 3002 - -FIFF.FIFF_BEM_SURF_ID = 3101 # int surface number -FIFF.FIFF_BEM_SURF_NAME = 3102 # string surface name -FIFF.FIFF_BEM_SURF_NNODE = 3103 # int number of nodes on a surface -FIFF.FIFF_BEM_SURF_NTRI = 3104 # int number of triangles on a surface -FIFF.FIFF_BEM_SURF_NODES = 3105 # float surface nodes (nnode,3) -FIFF.FIFF_BEM_SURF_TRIANGLES = 3106 # int surface triangles (ntri,3) -FIFF.FIFF_BEM_SURF_NORMALS = 3107 # float surface node normal unit vectors - -FIFF.FIFF_BEM_POT_SOLUTION = 3110 # float ** The solution matrix -FIFF.FIFF_BEM_APPROX = 3111 # int approximation method, see below -FIFF.FIFF_BEM_COORD_FRAME = 3112 # The coordinate frame of the model -FIFF.FIFF_BEM_SIGMA = 3113 # Conductivity of a compartment -FIFF.FIFFV_BEM_APPROX_CONST = 1 # The constant potential approach -FIFF.FIFFV_BEM_APPROX_LINEAR = 2 # The linear potential approach - -# -# More of those defined in MNE -# -FIFF.FIFFV_MNE_SURF_UNKNOWN = -1 -FIFF.FIFFV_MNE_SURF_LEFT_HEMI = 101 -FIFF.FIFFV_MNE_SURF_RIGHT_HEMI = 102 -FIFF.FIFFV_MNE_SURF_MEG_HELMET = 201 # Use this irrespective of the system -# -# These relate to the Isotrak data (enum(point)) -# -FIFF.FIFFV_POINT_CARDINAL = 1 -FIFF.FIFFV_POINT_HPI = 2 -FIFF.FIFFV_POINT_EEG = 3 -FIFF.FIFFV_POINT_ECG = FIFF.FIFFV_POINT_EEG -FIFF.FIFFV_POINT_EXTRA = 4 -FIFF.FIFFV_POINT_HEAD = 5 # Point on the surface of the head -_dig_kind_named = {key: key for key in( - FIFF.FIFFV_POINT_CARDINAL, - FIFF.FIFFV_POINT_HPI, - FIFF.FIFFV_POINT_EEG, - FIFF.FIFFV_POINT_EXTRA, - FIFF.FIFFV_POINT_HEAD, -)} -# -# Cardinal point types (enum(cardinal_point)) -# -FIFF.FIFFV_POINT_LPA = 1 -FIFF.FIFFV_POINT_NASION = 2 -FIFF.FIFFV_POINT_RPA = 3 -FIFF.FIFFV_POINT_INION = 4 -_dig_cardinal_named = {key: key for key in ( - FIFF.FIFFV_POINT_LPA, - FIFF.FIFFV_POINT_NASION, - FIFF.FIFFV_POINT_RPA, - FIFF.FIFFV_POINT_INION, -)} -# -# SSP -# -FIFF.FIFF_PROJ_ITEM_KIND = 3411 -FIFF.FIFF_PROJ_ITEM_TIME = 3412 -FIFF.FIFF_PROJ_ITEM_NVEC = 3414 -FIFF.FIFF_PROJ_ITEM_VECTORS = 3415 -FIFF.FIFF_PROJ_ITEM_DEFINITION = 3416 -FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417 -# XPlotter -FIFF.FIFF_XPLOTTER_LAYOUT = 3501 # string - "Xplotter layout tag" -# -# MRIs -# -FIFF.FIFF_MRI_SOURCE_PATH = FIFF.FIFF_REF_PATH -FIFF.FIFF_MRI_SOURCE_FORMAT = 2002 -FIFF.FIFF_MRI_PIXEL_ENCODING = 2003 -FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004 -FIFF.FIFF_MRI_PIXEL_SCALE = 2005 -FIFF.FIFF_MRI_PIXEL_DATA = 2006 -FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007 -FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA = 2008 -FIFF.FIFF_MRI_BOUNDING_BOX = 2009 -FIFF.FIFF_MRI_WIDTH = 2010 -FIFF.FIFF_MRI_WIDTH_M = 2011 -FIFF.FIFF_MRI_HEIGHT = 2012 -FIFF.FIFF_MRI_HEIGHT_M = 2013 -FIFF.FIFF_MRI_DEPTH = 2014 -FIFF.FIFF_MRI_DEPTH_M = 2015 -FIFF.FIFF_MRI_THICKNESS = 2016 -FIFF.FIFF_MRI_SCENE_AIM = 2017 -FIFF.FIFF_MRI_ORIG_SOURCE_PATH = 2020 -FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT = 2021 -FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING = 2022 -FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023 -FIFF.FIFF_MRI_VOXEL_DATA = 2030 -FIFF.FIFF_MRI_VOXEL_ENCODING = 2031 -FIFF.FIFF_MRI_MRILAB_SETUP = 2100 -FIFF.FIFF_MRI_SEG_REGION_ID = 2200 -# -FIFF.FIFFV_MRI_PIXEL_UNKNOWN = 0 -FIFF.FIFFV_MRI_PIXEL_BYTE = 1 -FIFF.FIFFV_MRI_PIXEL_WORD = 2 -FIFF.FIFFV_MRI_PIXEL_SWAP_WORD = 3 -FIFF.FIFFV_MRI_PIXEL_FLOAT = 4 -FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5 -FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR = 6 -FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7 -FIFF.FIFFV_MRI_PIXEL_BIT_RLE = 8 -# -# These are the MNE fiff definitions (range 350-390 reserved for MNE) -# -FIFF.FIFFB_MNE = 350 -FIFF.FIFFB_MNE_SOURCE_SPACE = 351 -FIFF.FIFFB_MNE_FORWARD_SOLUTION = 352 -FIFF.FIFFB_MNE_PARENT_MRI_FILE = 353 -FIFF.FIFFB_MNE_PARENT_MEAS_FILE = 354 -FIFF.FIFFB_MNE_COV = 355 -FIFF.FIFFB_MNE_INVERSE_SOLUTION = 356 -FIFF.FIFFB_MNE_NAMED_MATRIX = 357 -FIFF.FIFFB_MNE_ENV = 358 -FIFF.FIFFB_MNE_BAD_CHANNELS = 359 -FIFF.FIFFB_MNE_VERTEX_MAP = 360 -FIFF.FIFFB_MNE_EVENTS = 361 -FIFF.FIFFB_MNE_MORPH_MAP = 362 -FIFF.FIFFB_MNE_SURFACE_MAP = 363 -FIFF.FIFFB_MNE_SURFACE_MAP_GROUP = 364 - -# -# CTF compensation data -# -FIFF.FIFFB_MNE_CTF_COMP = 370 -FIFF.FIFFB_MNE_CTF_COMP_DATA = 371 -FIFF.FIFFB_MNE_DERIVATIONS = 372 - -FIFF.FIFFB_MNE_EPOCHS = 373 -FIFF.FIFFB_MNE_ICA = 374 -# -# Fiff tags associated with MNE computations (3500...) -# -# -# 3500... Bookkeeping -# -FIFF.FIFF_MNE_ROW_NAMES = 3502 -FIFF.FIFF_MNE_COL_NAMES = 3503 -FIFF.FIFF_MNE_NROW = 3504 -FIFF.FIFF_MNE_NCOL = 3505 -FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults: - # FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI - # FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD - # FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD -FIFF.FIFF_MNE_CH_NAME_LIST = 3507 -FIFF.FIFF_MNE_FILE_NAME = 3508 # This removes the collision with fiff_file.h (used to be 3501) -# -# 3510... 3590... Source space or surface -# -FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices -FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals -FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices -FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space -FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use -FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = 3515 # Nearest source space vertex for all vertices -FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = 3516 # Distance to the Nearest source space vertex for all vertices -FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier -FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume -FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES = 3519 # List of vertices (zero based) - -FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS = 3596 # Voxel space dimensions in a volume source space -FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR = 3597 # Matrix to interpolate a volume source space into a mri volume -FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE = 3598 # MRI file used in the interpolation - -FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles -FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation -FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = 3592 # Number of triangles corresponding to the number of vertices in use -FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593 # The triangulation of the used vertices in the source space -FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS = 3594 # Number of neighbors for each source space point (used for volume source spaces) -FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS = 3595 # Neighbors for each source space point (used for volume source spaces) - -FIFF.FIFF_MNE_SOURCE_SPACE_DIST = 3599 # Distances between vertices in use (along the surface) -FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT = 3600 # If distance is above this limit (in the volume) it has not been calculated - -FIFF.FIFF_MNE_SURFACE_MAP_DATA = 3610 # Surface map data -FIFF.FIFF_MNE_SURFACE_MAP_KIND = 3611 # Type of map - -# -# 3520... Forward solution -# -FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520 -FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free -FIFF.FIFF_MNE_INCLUDED_METHODS = 3522 -FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523 -# -# 3530... Covariance matrix -# -FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix -FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension -FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle) -FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix -FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above -FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535 -FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom -FIFF.FIFF_MNE_COV_METHOD = 3537 # The estimator used -FIFF.FIFF_MNE_COV_SCORE = 3538 # Negative log-likelihood - -# -# 3540... Inverse operator -# -# We store the inverse operator as the eigenleads, eigenfields, -# and weights -# -FIFF.FIFF_MNE_INVERSE_LEADS = 3540 # The eigenleads -FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED = 3546 # The eigenleads (already weighted with R^0.5) -FIFF.FIFF_MNE_INVERSE_FIELDS = 3541 # The eigenfields -FIFF.FIFF_MNE_INVERSE_SING = 3542 # The singular values -FIFF.FIFF_MNE_PRIORS_USED = 3543 # Which kind of priors have been used for the source covariance matrix -FIFF.FIFF_MNE_INVERSE_FULL = 3544 # Inverse operator as one matrix - # This matrix includes the whitening operator as well - # The regularization is applied -FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = 3545 # Contains the orientation of one source per row - # The source orientations must be expressed in the coordinate system - # given by FIFF_MNE_COORD_FRAME -FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT = 3547 # Are the sources given in Am or Am/m^2 ? -# -# 3550... Saved environment info -# -FIFF.FIFF_MNE_ENV_WORKING_DIR = 3550 # Working directory where the file was created -FIFF.FIFF_MNE_ENV_COMMAND_LINE = 3551 # The command used to create the file -FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN = 3552 # Reference to an external binary file (big-endian) */ -FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = 3553 # Reference to an external binary file (little-endian) */ -# -# 3560... Miscellaneous -# -FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE = 3560 # Is this projection item active? -FIFF.FIFF_MNE_EVENT_LIST = 3561 # An event list (for STI101 / STI 014) -FIFF.FIFF_MNE_HEMI = 3562 # Hemisphere association for general purposes -FIFF.FIFF_MNE_DATA_SKIP_NOP = 3563 # A data skip turned off in the raw data -FIFF.FIFF_MNE_ORIG_CH_INFO = 3564 # Channel information before any changes -FIFF.FIFF_MNE_EVENT_TRIGGER_MASK = 3565 # Mask applied to the trigger channel values -FIFF.FIFF_MNE_EVENT_COMMENTS = 3566 # Event comments merged into one long string -FIFF.FIFF_MNE_CUSTOM_REF = 3567 # Whether a custom reference was applied to the data -FIFF.FIFF_MNE_BASELINE_MIN = 3568 # Time of baseline beginning -FIFF.FIFF_MNE_BASELINE_MAX = 3569 # Time of baseline end -# -# 3570... Morphing maps -# -FIFF.FIFF_MNE_MORPH_MAP = 3570 # Mapping of closest vertices on the sphere -FIFF.FIFF_MNE_MORPH_MAP_FROM = 3571 # Which subject is this map from -FIFF.FIFF_MNE_MORPH_MAP_TO = 3572 # Which subject is this map to -# -# 3580... CTF compensation data -# -FIFF.FIFF_MNE_CTF_COMP_KIND = 3580 # What kind of compensation -FIFF.FIFF_MNE_CTF_COMP_DATA = 3581 # The compensation data itself -FIFF.FIFF_MNE_CTF_COMP_CALIBRATED = 3582 # Are the coefficients calibrated? - -FIFF.FIFF_MNE_DERIVATION_DATA = 3585 # Used to store information about EEG and other derivations -# -# 3601... values associated with ICA decomposition -# -FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS = 3601 # ICA interface parameters -FIFF.FIFF_MNE_ICA_CHANNEL_NAMES = 3602 # ICA channel names -FIFF.FIFF_MNE_ICA_WHITENER = 3603 # ICA whitener -FIFF.FIFF_MNE_ICA_PCA_COMPONENTS = 3604 # PCA components -FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605 # PCA explained variance -FIFF.FIFF_MNE_ICA_PCA_MEAN = 3606 # PCA mean -FIFF.FIFF_MNE_ICA_MATRIX = 3607 # ICA unmixing matrix -FIFF.FIFF_MNE_ICA_BADS = 3608 # ICA bad sources -FIFF.FIFF_MNE_ICA_MISC_PARAMS = 3609 # ICA misc params -# -# Miscellaneous -# -FIFF.FIFF_MNE_KIT_SYSTEM_ID = 3612 # Unique ID assigned to KIT systems -# -# Maxfilter tags -# -FIFF.FIFF_SSS_FRAME = 263 -FIFF.FIFF_SSS_JOB = 264 -FIFF.FIFF_SSS_ORIGIN = 265 -FIFF.FIFF_SSS_ORD_IN = 266 -FIFF.FIFF_SSS_ORD_OUT = 267 -FIFF.FIFF_SSS_NMAG = 268 -FIFF.FIFF_SSS_COMPONENTS = 269 -FIFF.FIFF_SSS_CAL_CHANS = 270 -FIFF.FIFF_SSS_CAL_CORRS = 271 -FIFF.FIFF_SSS_ST_CORR = 272 -FIFF.FIFF_SSS_NFREE = 278 -FIFF.FIFF_SSS_ST_LENGTH = 279 -FIFF.FIFF_DECOUPLER_MATRIX = 800 -# -# Fiff values associated with MNE computations -# -FIFF.FIFFV_MNE_UNKNOWN_ORI = 0 -FIFF.FIFFV_MNE_FIXED_ORI = 1 -FIFF.FIFFV_MNE_FREE_ORI = 2 - -FIFF.FIFFV_MNE_MEG = 1 -FIFF.FIFFV_MNE_EEG = 2 -FIFF.FIFFV_MNE_MEG_EEG = 3 - -FIFF.FIFFV_MNE_PRIORS_NONE = 0 -FIFF.FIFFV_MNE_PRIORS_DEPTH = 1 -FIFF.FIFFV_MNE_PRIORS_LORETA = 2 -FIFF.FIFFV_MNE_PRIORS_SULCI = 3 - -FIFF.FIFFV_MNE_UNKNOWN_COV = 0 -FIFF.FIFFV_MNE_SENSOR_COV = 1 -FIFF.FIFFV_MNE_NOISE_COV = 1 # This is what it should have been called -FIFF.FIFFV_MNE_SOURCE_COV = 2 -FIFF.FIFFV_MNE_FMRI_PRIOR_COV = 3 -FIFF.FIFFV_MNE_SIGNAL_COV = 4 # This will be potentially employed in beamformers -FIFF.FIFFV_MNE_DEPTH_PRIOR_COV = 5 # The depth weighting prior -FIFF.FIFFV_MNE_ORIENT_PRIOR_COV = 6 # The orientation prior - -# -# Output map types -# -FIFF.FIFFV_MNE_MAP_UNKNOWN = -1 # Unspecified -FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT = 1 # Scalar current value -FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE = 2 # Absolute value of the above -FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT = 3 # Current vector components -FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE = 4 # Vector current size -FIFF.FIFFV_MNE_MAP_T_STAT = 5 # Student's t statistic -FIFF.FIFFV_MNE_MAP_F_STAT = 6 # F statistic -FIFF.FIFFV_MNE_MAP_F_STAT_SQRT = 7 # Square root of the F statistic -FIFF.FIFFV_MNE_MAP_CHI2_STAT = 8 # (Approximate) chi^2 statistic -FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT = 9 # Square root of the (approximate) chi^2 statistic -FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE = 10 # Current noise approximation (scalar) -FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE = 11 # Current noise approximation (vector) -# -# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE) -# -FIFF.FIFFV_MNE_SPACE_UNKNOWN = -1 -FIFF.FIFFV_MNE_SPACE_SURFACE = 1 -FIFF.FIFFV_MNE_SPACE_VOLUME = 2 -FIFF.FIFFV_MNE_SPACE_DISCRETE = 3 -# -# Covariance matrix channel classification -# -FIFF.FIFFV_MNE_COV_CH_UNKNOWN = -1 # No idea -FIFF.FIFFV_MNE_COV_CH_MEG_MAG = 0 # Axial gradiometer or magnetometer [T] -FIFF.FIFFV_MNE_COV_CH_MEG_GRAD = 1 # Planar gradiometer [T/m] -FIFF.FIFFV_MNE_COV_CH_EEG = 2 # EEG [V] -# -# Projection item kinds -# -FIFF.FIFFV_PROJ_ITEM_NONE = 0 -FIFF.FIFFV_PROJ_ITEM_FIELD = 1 -FIFF.FIFFV_PROJ_ITEM_DIP_FIX = 2 -FIFF.FIFFV_PROJ_ITEM_DIP_ROT = 3 -FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD = 4 -FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD = 5 -FIFF.FIFFV_PROJ_ITEM_EEG_AVREF = 10 # Linear projection related to EEG average reference -FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF = FIFF.FIFFV_PROJ_ITEM_EEG_AVREF # backward compat alias -# -# Custom EEG references -# -FIFF.FIFFV_MNE_CUSTOM_REF_OFF = 0 -FIFF.FIFFV_MNE_CUSTOM_REF_ON = 1 -FIFF.FIFFV_MNE_CUSTOM_REF_CSD = 2 -# -# SSS job options -# -FIFF.FIFFV_SSS_JOB_NOTHING = 0 # No SSS, just copy input to output -FIFF.FIFFV_SSS_JOB_CTC = 1 # No SSS, only cross-talk correction -FIFF.FIFFV_SSS_JOB_FILTER = 2 # Spatial maxwell filtering -FIFF.FIFFV_SSS_JOB_VIRT = 3 # Transform data to another sensor array -FIFF.FIFFV_SSS_JOB_HEAD_POS = 4 # Estimate head positions, no SSS -FIFF.FIFFV_SSS_JOB_MOVEC_FIT = 5 # Estimate and compensate head movement -FIFF.FIFFV_SSS_JOB_MOVEC_QUA = 6 # Compensate head movement from previously estimated head positions -FIFF.FIFFV_SSS_JOB_REC_ALL = 7 # Reconstruct inside and outside signals -FIFF.FIFFV_SSS_JOB_REC_IN = 8 # Reconstruct inside signals -FIFF.FIFFV_SSS_JOB_REC_OUT = 9 # Reconstruct outside signals -FIFF.FIFFV_SSS_JOB_ST = 10 # Spatio-temporal maxwell filtering -FIFF.FIFFV_SSS_JOB_TPROJ = 11 # Temporal projection, no SSS -FIFF.FIFFV_SSS_JOB_XSSS = 12 # Cross-validation SSS -FIFF.FIFFV_SSS_JOB_XSUB = 13 # Cross-validation subtraction, no SSS -FIFF.FIFFV_SSS_JOB_XWAV = 14 # Cross-validation noise waveforms -FIFF.FIFFV_SSS_JOB_NCOV = 15 # Noise covariance estimation -FIFF.FIFFV_SSS_JOB_SCOV = 16 # SSS sample covariance estimation -#} - -# -# Additional coordinate frames -# -FIFF.FIFFV_MNE_COORD_TUFTS_EEG = 300 # For Tufts EEG data -FIFF.FIFFV_MNE_COORD_CTF_DEVICE = 1001 # CTF device coordinates -FIFF.FIFFV_MNE_COORD_CTF_HEAD = 1004 # CTF head coordinates -FIFF.FIFFV_MNE_COORD_DIGITIZER = FIFF.FIFFV_COORD_ISOTRAK # Original (Polhemus) digitizer coordinates -FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI # The surface RAS coordinates -FIFF.FIFFV_MNE_COORD_MRI_VOXEL = 2001 # The MRI voxel coordinates -FIFF.FIFFV_MNE_COORD_RAS = 2002 # Surface RAS coordinates with non-zero origin -FIFF.FIFFV_MNE_COORD_MNI_TAL = 2003 # MNI Talairach coordinates -FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ = 2004 # FreeSurfer Talairach coordinates (MNI z > 0) -FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ = 2005 # FreeSurfer Talairach coordinates (MNI z < 0) -FIFF.FIFFV_MNE_COORD_FS_TAL = 2006 # FreeSurfer Talairach coordinates -# -# 4D and KIT use the same head coordinate system definition as CTF -# -FIFF.FIFFV_MNE_COORD_4D_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD -FIFF.FIFFV_MNE_COORD_KIT_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD - -# -# FWD Types -# - -FWD = BunchConstNamed() - -FWD.COIL_UNKNOWN = 0 -FWD.COILC_UNKNOWN = 0 -FWD.COILC_EEG = 1000 -FWD.COILC_MAG = 1 -FWD.COILC_AXIAL_GRAD = 2 -FWD.COILC_PLANAR_GRAD = 3 -FWD.COILC_AXIAL_GRAD2 = 4 - -FWD.COIL_ACCURACY_POINT = 0 -FWD.COIL_ACCURACY_NORMAL = 1 -FWD.COIL_ACCURACY_ACCURATE = 2 - -FWD.BEM_IP_APPROACH_LIMIT = 0.1 - -FWD.BEM_LIN_FIELD_SIMPLE = 1 -FWD.BEM_LIN_FIELD_FERGUSON = 2 -FWD.BEM_LIN_FIELD_URANKAR = 3 - -# -# Data types -# -FIFF.FIFFT_VOID = 0 -FIFF.FIFFT_BYTE = 1 -FIFF.FIFFT_SHORT = 2 -FIFF.FIFFT_INT = 3 -FIFF.FIFFT_FLOAT = 4 -FIFF.FIFFT_DOUBLE = 5 -FIFF.FIFFT_JULIAN = 6 -FIFF.FIFFT_USHORT = 7 -FIFF.FIFFT_UINT = 8 -FIFF.FIFFT_ULONG = 9 -FIFF.FIFFT_STRING = 10 -FIFF.FIFFT_LONG = 11 -FIFF.FIFFT_DAU_PACK13 = 13 -FIFF.FIFFT_DAU_PACK14 = 14 -FIFF.FIFFT_DAU_PACK16 = 16 -FIFF.FIFFT_COMPLEX_FLOAT = 20 -FIFF.FIFFT_COMPLEX_DOUBLE = 21 -FIFF.FIFFT_OLD_PACK = 23 -FIFF.FIFFT_CH_INFO_STRUCT = 30 -FIFF.FIFFT_ID_STRUCT = 31 -FIFF.FIFFT_DIR_ENTRY_STRUCT = 32 -FIFF.FIFFT_DIG_POINT_STRUCT = 33 -FIFF.FIFFT_CH_POS_STRUCT = 34 -FIFF.FIFFT_COORD_TRANS_STRUCT = 35 -FIFF.FIFFT_DIG_STRING_STRUCT = 36 -FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37 -# -# Units of measurement -# -FIFF.FIFF_UNIT_NONE = -1 -# -# SI base units -# -FIFF.FIFF_UNIT_UNITLESS = 0 -FIFF.FIFF_UNIT_M = 1 # meter -FIFF.FIFF_UNIT_KG = 2 # kilogram -FIFF.FIFF_UNIT_SEC = 3 # second -FIFF.FIFF_UNIT_A = 4 # ampere -FIFF.FIFF_UNIT_K = 5 # Kelvin -FIFF.FIFF_UNIT_MOL = 6 # mole -# -# SI Supplementary units -# -FIFF.FIFF_UNIT_RAD = 7 # radian -FIFF.FIFF_UNIT_SR = 8 # steradian -# -# SI base candela -# -FIFF.FIFF_UNIT_CD = 9 # candela -# -# SI derived units -# -FIFF.FIFF_UNIT_MOL_M3 = 10 # mol/m^3 -FIFF.FIFF_UNIT_HZ = 101 # hertz -FIFF.FIFF_UNIT_N = 102 # Newton -FIFF.FIFF_UNIT_PA = 103 # pascal -FIFF.FIFF_UNIT_J = 104 # joule -FIFF.FIFF_UNIT_W = 105 # watt -FIFF.FIFF_UNIT_C = 106 # coulomb -FIFF.FIFF_UNIT_V = 107 # volt -FIFF.FIFF_UNIT_F = 108 # farad -FIFF.FIFF_UNIT_OHM = 109 # ohm -FIFF.FIFF_UNIT_S = 110 # Siemens (same as Moh, what fiff-constants calls it) -FIFF.FIFF_UNIT_WB = 111 # weber -FIFF.FIFF_UNIT_T = 112 # tesla -FIFF.FIFF_UNIT_H = 113 # Henry -FIFF.FIFF_UNIT_CEL = 114 # celsius -FIFF.FIFF_UNIT_LM = 115 # lumen -FIFF.FIFF_UNIT_LX = 116 # lux -FIFF.FIFF_UNIT_V_M2 = 117 # V/m^2 -# -# Others we need -# -FIFF.FIFF_UNIT_T_M = 201 # T/m -FIFF.FIFF_UNIT_AM = 202 # Am -FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2 -FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3 -_ch_unit_named = {key: key for key in( - FIFF.FIFF_UNIT_NONE, FIFF.FIFF_UNIT_UNITLESS, FIFF.FIFF_UNIT_M, - FIFF.FIFF_UNIT_KG, FIFF.FIFF_UNIT_SEC, FIFF.FIFF_UNIT_A, FIFF.FIFF_UNIT_K, - FIFF.FIFF_UNIT_MOL, FIFF.FIFF_UNIT_RAD, FIFF.FIFF_UNIT_SR, - FIFF.FIFF_UNIT_CD, FIFF.FIFF_UNIT_MOL_M3, FIFF.FIFF_UNIT_HZ, - FIFF.FIFF_UNIT_N, FIFF.FIFF_UNIT_PA, FIFF.FIFF_UNIT_J, FIFF.FIFF_UNIT_W, - FIFF.FIFF_UNIT_C, FIFF.FIFF_UNIT_V, FIFF.FIFF_UNIT_F, FIFF.FIFF_UNIT_OHM, - FIFF.FIFF_UNIT_S, FIFF.FIFF_UNIT_WB, FIFF.FIFF_UNIT_T, FIFF.FIFF_UNIT_H, - FIFF.FIFF_UNIT_CEL, FIFF.FIFF_UNIT_LM, FIFF.FIFF_UNIT_LX, - FIFF.FIFF_UNIT_V_M2, FIFF.FIFF_UNIT_T_M, FIFF.FIFF_UNIT_AM, - FIFF.FIFF_UNIT_AM_M2, FIFF.FIFF_UNIT_AM_M3, -)} -# -# Multipliers -# -FIFF.FIFF_UNITM_E = 18 -FIFF.FIFF_UNITM_PET = 15 -FIFF.FIFF_UNITM_T = 12 -FIFF.FIFF_UNITM_GIG = 9 -FIFF.FIFF_UNITM_MEG = 6 -FIFF.FIFF_UNITM_K = 3 -FIFF.FIFF_UNITM_H = 2 -FIFF.FIFF_UNITM_DA = 1 -FIFF.FIFF_UNITM_NONE = 0 -FIFF.FIFF_UNITM_D = -1 -FIFF.FIFF_UNITM_C = -2 -FIFF.FIFF_UNITM_M = -3 -FIFF.FIFF_UNITM_MU = -6 -FIFF.FIFF_UNITM_N = -9 -FIFF.FIFF_UNITM_P = -12 -FIFF.FIFF_UNITM_F = -15 -FIFF.FIFF_UNITM_A = -18 -_ch_unit_mul_named = {key: key for key in ( - FIFF.FIFF_UNITM_E, FIFF.FIFF_UNITM_PET, FIFF.FIFF_UNITM_T, - FIFF.FIFF_UNITM_GIG, FIFF.FIFF_UNITM_MEG, FIFF.FIFF_UNITM_K, - FIFF.FIFF_UNITM_H, FIFF.FIFF_UNITM_DA, FIFF.FIFF_UNITM_NONE, - FIFF.FIFF_UNITM_D, FIFF.FIFF_UNITM_C, FIFF.FIFF_UNITM_M, - FIFF.FIFF_UNITM_MU, FIFF.FIFF_UNITM_N, FIFF.FIFF_UNITM_P, - FIFF.FIFF_UNITM_F, FIFF.FIFF_UNITM_A, -)} - -# -# Coil types -# -FIFF.FIFFV_COIL_NONE = 0 # The location info contains no data -FIFF.FIFFV_COIL_EEG = 1 # EEG electrode position in r0 -FIFF.FIFFV_COIL_NM_122 = 2 # Neuromag 122 coils -FIFF.FIFFV_COIL_NM_24 = 3 # Old 24 channel system in HUT -FIFF.FIFFV_COIL_NM_MCG_AXIAL = 4 # The axial devices in the HUCS MCG system -FIFF.FIFFV_COIL_EEG_BIPOLAR = 5 # Bipolar EEG lead -FIFF.FIFFV_COIL_EEG_CSD = 6 # CSD-transformed EEG lead - -FIFF.FIFFV_COIL_DIPOLE = 200 # Time-varying dipole definition -# The coil info contains dipole location (r0) and -# direction (ex) -FIFF.FIFFV_COIL_FNIRS_HBO = 300 # fNIRS oxyhemoglobin -FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin -FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE = 302 # fNIRS continuous wave amplitude -FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density -FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE = 304 # fNIRS frequency domain AC amplitude -FIFF.FIFFV_COIL_FNIRS_FD_PHASE = 305 # fNIRS frequency domain phase -FIFF.FIFFV_COIL_FNIRS_RAW = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE # old alias - -FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software - -FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000 # Simple point magnetometer -FIFF.FIFFV_COIL_AXIAL_GRAD_5CM = 2001 # Generic axial gradiometer - -FIFF.FIFFV_COIL_VV_PLANAR_W = 3011 # VV prototype wirewound planar sensor -FIFF.FIFFV_COIL_VV_PLANAR_T1 = 3012 # Vectorview SQ20483N planar gradiometer -FIFF.FIFFV_COIL_VV_PLANAR_T2 = 3013 # Vectorview SQ20483N-A planar gradiometer -FIFF.FIFFV_COIL_VV_PLANAR_T3 = 3014 # Vectorview SQ20950N planar gradiometer -FIFF.FIFFV_COIL_VV_PLANAR_T4 = 3015 # Vectorview planar gradiometer (MEG-MRI) -FIFF.FIFFV_COIL_VV_MAG_W = 3021 # VV prototype wirewound magnetometer -FIFF.FIFFV_COIL_VV_MAG_T1 = 3022 # Vectorview SQ20483N magnetometer -FIFF.FIFFV_COIL_VV_MAG_T2 = 3023 # Vectorview SQ20483-A magnetometer -FIFF.FIFFV_COIL_VV_MAG_T3 = 3024 # Vectorview SQ20950N magnetometer -FIFF.FIFFV_COIL_VV_MAG_T4 = 3025 # Vectorview magnetometer (MEG-MRI) - -FIFF.FIFFV_COIL_MAGNES_MAG = 4001 # Magnes WH magnetometer -FIFF.FIFFV_COIL_MAGNES_GRAD = 4002 # Magnes WH gradiometer -# -# Magnes reference sensors -# -FIFF.FIFFV_COIL_MAGNES_REF_MAG = 4003 -FIFF.FIFFV_COIL_MAGNES_REF_GRAD = 4004 -FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005 -FIFF.FIFFV_COIL_MAGNES_R_MAG = FIFF.FIFFV_COIL_MAGNES_REF_MAG -FIFF.FIFFV_COIL_MAGNES_R_GRAD = FIFF.FIFFV_COIL_MAGNES_REF_GRAD -FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF = FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD - -# -# CTF coil and channel types -# -FIFF.FIFFV_COIL_CTF_GRAD = 5001 -FIFF.FIFFV_COIL_CTF_REF_MAG = 5002 -FIFF.FIFFV_COIL_CTF_REF_GRAD = 5003 -FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004 -# -# KIT system coil types -# -FIFF.FIFFV_COIL_KIT_GRAD = 6001 -FIFF.FIFFV_COIL_KIT_REF_MAG = 6002 -# -# BabySQUID sensors -# -FIFF.FIFFV_COIL_BABY_GRAD = 7001 -# -# BabyMEG sensors -# -FIFF.FIFFV_COIL_BABY_MAG = 7002 -FIFF.FIFFV_COIL_BABY_REF_MAG = 7003 -FIFF.FIFFV_COIL_BABY_REF_MAG2 = 7004 -# -# Artemis123 sensors -# -FIFF.FIFFV_COIL_ARTEMIS123_GRAD = 7501 -FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG = 7502 -FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD = 7503 -# -# QuSpin sensors -# -FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG = 8001 -FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2 = 8002 -# -# FieldLine sensors -# -FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1 = 8101 -# -# Kernel sensors -# -FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1 = 8201 -# -# KRISS sensors -# -FIFF.FIFFV_COIL_KRISS_GRAD = 9001 -# -# Compumedics adult/pediatric gradiometer -# -FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD = 9101 -FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD = 9102 -_ch_coil_type_named = {key: key for key in ( - FIFF.FIFFV_COIL_NONE, FIFF.FIFFV_COIL_EEG, FIFF.FIFFV_COIL_NM_122, - FIFF.FIFFV_COIL_NM_24, FIFF.FIFFV_COIL_NM_MCG_AXIAL, - FIFF.FIFFV_COIL_EEG_BIPOLAR, FIFF.FIFFV_COIL_EEG_CSD, - FIFF.FIFFV_COIL_DIPOLE, FIFF.FIFFV_COIL_FNIRS_HBO, - FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFFV_COIL_FNIRS_RAW, - FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, - FIFF.FIFFV_COIL_FNIRS_FD_PHASE, FIFF.FIFFV_COIL_MCG_42, - FIFF.FIFFV_COIL_POINT_MAGNETOMETER, FIFF.FIFFV_COIL_AXIAL_GRAD_5CM, - FIFF.FIFFV_COIL_VV_PLANAR_W, FIFF.FIFFV_COIL_VV_PLANAR_T1, - FIFF.FIFFV_COIL_VV_PLANAR_T2, FIFF.FIFFV_COIL_VV_PLANAR_T3, - FIFF.FIFFV_COIL_VV_PLANAR_T4, FIFF.FIFFV_COIL_VV_MAG_W, - FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2, - FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFFV_COIL_VV_MAG_T4, - FIFF.FIFFV_COIL_MAGNES_MAG, FIFF.FIFFV_COIL_MAGNES_GRAD, - FIFF.FIFFV_COIL_MAGNES_REF_MAG, FIFF.FIFFV_COIL_MAGNES_REF_GRAD, - FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD, FIFF.FIFFV_COIL_CTF_GRAD, - FIFF.FIFFV_COIL_CTF_REF_MAG, FIFF.FIFFV_COIL_CTF_REF_GRAD, - FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, FIFF.FIFFV_COIL_KIT_GRAD, - FIFF.FIFFV_COIL_KIT_REF_MAG, FIFF.FIFFV_COIL_BABY_GRAD, - FIFF.FIFFV_COIL_BABY_MAG, FIFF.FIFFV_COIL_BABY_REF_MAG, - FIFF.FIFFV_COIL_BABY_REF_MAG2, FIFF.FIFFV_COIL_ARTEMIS123_GRAD, - FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG, FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD, - FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG, FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, - FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1, - FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1, - FIFF.FIFFV_COIL_KRISS_GRAD, FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD, - FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD, -)} - -# MNE RealTime -FIFF.FIFF_MNE_RT_COMMAND = 3700 # realtime command -FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client - -# MNE epochs bookkeeping -FIFF.FIFF_MNE_EPOCHS_SELECTION = 3800 # the epochs selection -FIFF.FIFF_MNE_EPOCHS_DROP_LOG = 3801 # the drop log -FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT = 3802 # rejection and flat params -FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ = 3803 # original raw sfreq - -# MNE annotations -FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block - -# MNE Metadata Dataframes -FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block - -# Table to match unrecognized channel location names to their known aliases -CHANNEL_LOC_ALIASES = { - # this set of aliases are published in doi:10.1097/WNP.0000000000000316 and - # doi:10.1016/S1388-2457(00)00527-7. - 'Cb1': 'POO7', - 'Cb2': 'POO8', - 'CB1': 'POO7', - 'CB2': 'POO8', - 'T1': 'T9', - 'T2': 'T10', - 'T3': 'T7', - 'T4': 'T8', - 'T5': 'T9', - 'T6': 'T10', - 'M1': 'TP9', - 'M2': 'TP10', - # EGI ref chan is named VREF/Vertex Ref. - # In the standard montages for EGI, the ref is named Cz - 'VREF': 'Cz', - 'Vertex Reference': 'Cz' - # add a comment here (with doi of a published source) above any new - # aliases, as they are added -} +__all__ = ["FIFF"] diff --git a/mne/io/ctf/__init__.py b/mne/io/ctf/__init__.py index 61481f2efff..538d63f0ac3 100644 --- a/mne/io/ctf/__init__.py +++ b/mne/io/ctf/__init__.py @@ -1,7 +1,7 @@ """CTF module for conversion to FIF.""" -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .ctf import read_raw_ctf, RawCTF diff --git a/mne/io/ctf/constants.py b/mne/io/ctf/constants.py index c8dc99880d6..a99b627eeee 100644 --- a/mne/io/ctf/constants.py +++ b/mne/io/ctf/constants.py @@ -1,9 +1,8 @@ """CTF constants.""" -# Authors: Matti Hämäläinen -# Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ...utils import BunchConst @@ -36,4 +35,4 @@ # read_write_data.c CTF.HEADER_SIZE = 8 CTF.BLOCK_SIZE = 2000 -CTF.SYSTEM_CLOCK_CH = 'SCLK01-177' +CTF.SYSTEM_CLOCK_CH = "SCLK01-177" diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py index 4802af31c69..971ac51c2f6 100644 --- a/mne/io/ctf/ctf.py +++ b/mne/io/ctf/ctf.py @@ -1,33 +1,37 @@ """Conversion tool from CTF to FIF.""" -# Authors: Matti Hämäläinen -# Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os import numpy as np -from .._digitization import _format_dig_points -from ...utils import (verbose, logger, _clean_names, fill_doc, _check_option, - _check_fname) - +from ..._fiff._digitization import _format_dig_points +from ..._fiff.utils import _blk_read_lims, _mult_cal_one +from ...utils import ( + _check_fname, + _check_option, + _clean_names, + fill_doc, + logger, + verbose, +) from ..base import BaseRaw -from ..utils import _mult_cal_one, _blk_read_lims - -from .res4 import _read_res4, _make_ctf_name -from .hc import _read_hc -from .eeg import _read_eeg, _read_pos -from .trans import _make_ctf_coord_trans_set -from .info import _compose_meas_info, _read_bad_chans, _annotate_bad_segments from .constants import CTF +from .eeg import _read_eeg, _read_pos +from .hc import _read_hc +from .info import _annotate_bad_segments, _compose_meas_info, _read_bad_chans from .markers import _read_annotations_ctf_call +from .res4 import _make_ctf_name, _read_res4 +from .trans import _make_ctf_coord_trans_set @fill_doc -def read_raw_ctf(directory, system_clock='truncate', preload=False, - clean_names=False, verbose=None): +def read_raw_ctf( + directory, system_clock="truncate", preload=False, clean_names=False, verbose=None +) -> "RawCTF": """Raw object from CTF directory. Parameters @@ -49,11 +53,6 @@ def read_raw_ctf(directory, system_clock='truncate', preload=False, ------- raw : instance of RawCTF The raw data. - See :class:`mne.io.Raw` for documentation of attributes and methods. - - See Also - -------- - mne.io.Raw : Documentation of attributes and methods of RawCTF. Notes ----- @@ -64,8 +63,13 @@ def read_raw_ctf(directory, system_clock='truncate', preload=False, points will then automatically be read into the `mne.io.Raw` instance via `mne.io.read_raw_ctf`. """ - return RawCTF(directory, system_clock, preload=preload, - clean_names=clean_names, verbose=verbose) + return RawCTF( + directory, + system_clock, + preload=preload, + clean_names=clean_names, + verbose=verbose, + ) @fill_doc @@ -93,17 +97,24 @@ class RawCTF(BaseRaw): """ @verbose - def __init__(self, directory, system_clock='truncate', preload=False, - verbose=None, clean_names=False): # noqa: D102 + def __init__( + self, + directory, + system_clock="truncate", + preload=False, + verbose=None, + clean_names=False, + ): # adapted from mne_ctf2fiff.c directory = str( _check_fname(directory, "read", True, "directory", need_dir=True) ) - if not directory.endswith('.ds'): - raise TypeError('directory must be a directory ending with ".ds", ' - f'got {directory}') - _check_option('system_clock', system_clock, ['ignore', 'truncate']) - logger.info('ds directory : %s' % directory) + if not directory.endswith(".ds"): + raise TypeError( + f'directory must be a directory ending with ".ds", got {directory}' + ) + _check_option("system_clock", system_clock, ["ignore", "truncate"]) + logger.info(f"ds directory : {directory}") res4 = _read_res4(directory) # Read the magical res4 file coils = _read_hc(directory) # Read the coil locations eeg = _read_eeg(directory) # Read the EEG electrode loc info @@ -116,9 +127,9 @@ def __init__(self, directory, system_clock='truncate', preload=False, # Compose a structure which makes fiff writing a piece of cake info = _compose_meas_info(res4, coils, coord_trans, eeg) with info._unlock(): - info['dig'] += digs - info['dig'] = _format_dig_points(info['dig']) - info['bads'] += _read_bad_chans(directory, info) + info["dig"] += digs + info["dig"] = _format_dig_points(info["dig"]) + info["bads"] += _read_bad_chans(directory, info) # Determine how our data is distributed across files fnames = list() @@ -127,143 +138,166 @@ def __init__(self, directory, system_clock='truncate', preload=False, missing_names = list() no_samps = list() while True: - suffix = 'meg4' if len(fnames) == 0 else ('%d_meg4' % len(fnames)) - meg4_name, found = _make_ctf_name( - directory, suffix, raise_error=False) + suffix = "meg4" if len(fnames) == 0 else f"{len(fnames)}_meg4" + meg4_name, found = _make_ctf_name(directory, suffix, raise_error=False) if not found: missing_names.append(os.path.relpath(meg4_name, directory)) break # check how much data is in the file sample_info = _get_sample_info(meg4_name, res4, system_clock) - if sample_info['n_samp'] == 0: + if sample_info["n_samp"] == 0: no_samps.append(os.path.relpath(meg4_name, directory)) break if len(fnames) == 0: - buffer_size_sec = sample_info['block_size'] / info['sfreq'] + buffer_size_sec = sample_info["block_size"] / info["sfreq"] else: - buffer_size_sec = 1. + buffer_size_sec = 1.0 fnames.append(meg4_name) - last_samps.append(sample_info['n_samp'] - 1) + last_samps.append(sample_info["n_samp"] - 1) raw_extras.append(sample_info) first_samps = [0] * len(last_samps) if len(fnames) == 0: - raise IOError( - f'Could not find any data, could not find the following ' - f'file(s): {missing_names}, and the following file(s) had no ' - f'valid samples: {no_samps}') - super(RawCTF, self).__init__( - info, preload, first_samps=first_samps, - last_samps=last_samps, filenames=fnames, - raw_extras=raw_extras, orig_format='int', - buffer_size_sec=buffer_size_sec, verbose=verbose) + raise OSError( + f"Could not find any data, could not find the following " + f"file(s): {missing_names}, and the following file(s) had no " + f"valid samples: {no_samps}" + ) + super().__init__( + info, + preload, + first_samps=first_samps, + last_samps=last_samps, + filenames=fnames, + raw_extras=raw_extras, + orig_format="int", + buffer_size_sec=buffer_size_sec, + verbose=verbose, + ) # Add bad segments as Annotations (correct for start time) - start_time = -res4['pre_trig_pts'] / float(info['sfreq']) - annot = _annotate_bad_segments(directory, start_time, - info['meas_date']) + start_time = -res4["pre_trig_pts"] / float(info["sfreq"]) + annot = _annotate_bad_segments(directory, start_time, info["meas_date"]) marker_annot = _read_annotations_ctf_call( directory=directory, - total_offset=(res4['pre_trig_pts'] / res4['sfreq']), - trial_duration=(res4['nsamp'] / res4['sfreq']), - meas_date=info['meas_date'] + total_offset=(res4["pre_trig_pts"] / res4["sfreq"]), + trial_duration=(res4["nsamp"] / res4["sfreq"]), + meas_date=info["meas_date"], ) annot = marker_annot if annot is None else annot + marker_annot self.set_annotations(annot) - if clean_names: - self._clean_names() + _clean_names_inst(self) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" si = self._raw_extras[fi] offset = 0 - trial_start_idx, r_lims, d_lims = _blk_read_lims(start, stop, - int(si['block_size'])) - with open(self._filenames[fi], 'rb') as fid: + trial_start_idx, r_lims, d_lims = _blk_read_lims( + start, stop, int(si["block_size"]) + ) + with open(self.filenames[fi], "rb") as fid: for bi in range(len(r_lims)): - samp_offset = (bi + trial_start_idx) * si['res4_nsamp'] - n_read = min(si['n_samp_tot'] - samp_offset, si['block_size']) + samp_offset = (bi + trial_start_idx) * si["res4_nsamp"] + n_read = min(si["n_samp_tot"] - samp_offset, si["block_size"]) # read the chunk of data # have to be careful on Windows and make sure we are using # 64-bit integers here - with np.errstate(over='raise'): + with np.errstate(over="raise"): pos = np.int64(CTF.HEADER_SIZE) - pos += np.int64(samp_offset) * si['n_chan'] * 4 + pos += np.int64(samp_offset) * si["n_chan"] * 4 fid.seek(pos, 0) - this_data = np.fromfile(fid, '>i4', - count=si['n_chan'] * n_read) - this_data.shape = (si['n_chan'], n_read) - this_data = this_data[:, r_lims[bi, 0]:r_lims[bi, 1]] - data_view = data[:, d_lims[bi, 0]:d_lims[bi, 1]] + this_data = np.fromfile(fid, ">i4", count=si["n_chan"] * n_read) + this_data.shape = (si["n_chan"], n_read) + this_data = this_data[:, r_lims[bi, 0] : r_lims[bi, 1]] + data_view = data[:, d_lims[bi, 0] : d_lims[bi, 1]] _mult_cal_one(data_view, this_data, idx, cals, mult) offset += n_read - def _clean_names(self): - """Clean up CTF suffixes from channel names.""" - mapping = dict(zip(self.ch_names, _clean_names(self.ch_names))) - - self.rename_channels(mapping) - for comp in self.info['comps']: - for key in ('row_names', 'col_names'): - comp['data'][key] = _clean_names(comp['data'][key]) +def _clean_names_inst(inst): + """Clean up CTF suffixes from channel names.""" + mapping = dict(zip(inst.ch_names, _clean_names(inst.ch_names))) + inst.rename_channels(mapping) + for comp in inst.info["comps"]: + for key in ("row_names", "col_names"): + comp["data"][key] = _clean_names(comp["data"][key]) def _get_sample_info(fname, res4, system_clock): """Determine the number of valid samples.""" - logger.info('Finding samples for %s: ' % (fname,)) - if CTF.SYSTEM_CLOCK_CH in res4['ch_names']: - clock_ch = res4['ch_names'].index(CTF.SYSTEM_CLOCK_CH) + logger.info(f"Finding samples for {fname}: ") + if CTF.SYSTEM_CLOCK_CH in res4["ch_names"]: + clock_ch = res4["ch_names"].index(CTF.SYSTEM_CLOCK_CH) else: clock_ch = None - for k, ch in enumerate(res4['chs']): - if ch['ch_name'] == CTF.SYSTEM_CLOCK_CH: + for k, ch in enumerate(res4["chs"]): + if ch["ch_name"] == CTF.SYSTEM_CLOCK_CH: clock_ch = k break - with open(fname, 'rb') as fid: + with open(fname, "rb") as fid: fid.seek(0, os.SEEK_END) st_size = fid.tell() fid.seek(0, 0) - if (st_size - CTF.HEADER_SIZE) % (4 * res4['nsamp'] * - res4['nchan']) != 0: - raise RuntimeError('The number of samples is not an even multiple ' - 'of the trial size') - n_samp_tot = (st_size - CTF.HEADER_SIZE) // (4 * res4['nchan']) - n_trial = n_samp_tot // res4['nsamp'] + if (st_size - CTF.HEADER_SIZE) % (4 * res4["nsamp"] * res4["nchan"]) != 0: + raise RuntimeError( + "The number of samples is not an even multiple of the trial size" + ) + n_samp_tot = (st_size - CTF.HEADER_SIZE) // (4 * res4["nchan"]) + n_trial = n_samp_tot // res4["nsamp"] n_samp = n_samp_tot if clock_ch is None: - logger.info(' System clock channel is not available, assuming ' - 'all samples to be valid.') - elif system_clock == 'ignore': - logger.info(' System clock channel is available, but ignored.') + logger.info( + " System clock channel is not available, assuming " + "all samples to be valid." + ) + elif system_clock == "ignore": + logger.info(" System clock channel is available, but ignored.") else: # use it - logger.info(' System clock channel is available, checking ' - 'which samples are valid.') + logger.info( + " System clock channel is available, checking " + "which samples are valid." + ) for t in range(n_trial): # Skip to the correct trial - samp_offset = t * res4['nsamp'] - offset = CTF.HEADER_SIZE + (samp_offset * res4['nchan'] + - (clock_ch * res4['nsamp'])) * 4 + samp_offset = t * res4["nsamp"] + offset = ( + CTF.HEADER_SIZE + + (samp_offset * res4["nchan"] + (clock_ch * res4["nsamp"])) * 4 + ) fid.seek(offset, 0) - this_data = np.fromfile(fid, '>i4', res4['nsamp']) - if len(this_data) != res4['nsamp']: - raise RuntimeError('Cannot read data for trial %d' - % (t + 1)) + this_data = np.fromfile(fid, ">i4", res4["nsamp"]) + if len(this_data) != res4["nsamp"]: + raise RuntimeError(f"Cannot read data for trial {t + 1}.") end = np.where(this_data == 0)[0] if len(end) > 0: n_samp = samp_offset + end[0] break - if n_samp < res4['nsamp']: + if n_samp < res4["nsamp"]: n_trial = 1 - logger.info(' %d x %d = %d samples from %d chs' - % (n_trial, n_samp, n_samp, res4['nchan'])) + logger.info( + " %d x %d = %d samples from %d chs", + n_trial, + n_samp, + n_samp, + res4["nchan"], + ) else: - n_trial = n_samp // res4['nsamp'] + n_trial = n_samp // res4["nsamp"] n_omit = n_samp_tot - n_samp - logger.info(' %d x %d = %d samples from %d chs' - % (n_trial, res4['nsamp'], n_samp, res4['nchan'])) + logger.info( + " %d x %d = %d samples from %d chs", + n_trial, + res4["nsamp"], + n_samp, + res4["nchan"], + ) if n_omit != 0: - logger.info(' %d samples omitted at the end' % n_omit) + logger.info(" %d samples omitted at the end", n_omit) - return dict(n_samp=n_samp, n_samp_tot=n_samp_tot, block_size=res4['nsamp'], - res4_nsamp=res4['nsamp'], n_chan=res4['nchan']) + return dict( + n_samp=n_samp, + n_samp_tot=n_samp_tot, + block_size=res4["nsamp"], + res4_nsamp=res4["nsamp"], + n_chan=res4["nchan"], + ) diff --git a/mne/io/ctf/eeg.py b/mne/io/ctf/eeg.py index 4e6091abefd..cd39bc980e0 100644 --- a/mne/io/ctf/eeg.py +++ b/mne/io/ctf/eeg.py @@ -1,75 +1,84 @@ """Read .eeg files.""" -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np -from os.path import join from os import listdir +from os.path import join + +import numpy as np +from ..._fiff.constants import FIFF +from ...transforms import apply_trans from ...utils import logger, warn -from ..constants import FIFF from .res4 import _make_ctf_name -from ...transforms import apply_trans - -_cardinal_dict = dict(nasion=FIFF.FIFFV_POINT_NASION, - lpa=FIFF.FIFFV_POINT_LPA, left=FIFF.FIFFV_POINT_LPA, - rpa=FIFF.FIFFV_POINT_RPA, right=FIFF.FIFFV_POINT_RPA) +_cardinal_dict = dict( + nasion=FIFF.FIFFV_POINT_NASION, + lpa=FIFF.FIFFV_POINT_LPA, + left=FIFF.FIFFV_POINT_LPA, + rpa=FIFF.FIFFV_POINT_RPA, + right=FIFF.FIFFV_POINT_RPA, +) def _read_eeg(directory): """Read the .eeg file.""" # Missing file is ok - fname, found = _make_ctf_name(directory, 'eeg', raise_error=False) + fname, found = _make_ctf_name(directory, "eeg", raise_error=False) if not found: - logger.info(' Separate EEG position data file not present.') + logger.info(" Separate EEG position data file not present.") return - eeg = dict(labels=list(), kinds=list(), ids=list(), rr=list(), np=0, - assign_to_chs=True, coord_frame=FIFF.FIFFV_MNE_COORD_CTF_HEAD) - with open(fname, 'rb') as fid: + eeg = dict( + labels=list(), + kinds=list(), + ids=list(), + rr=list(), + np=0, + assign_to_chs=True, + coord_frame=FIFF.FIFFV_MNE_COORD_CTF_HEAD, + ) + with open(fname, "rb") as fid: for line in fid: line = line.strip() if len(line) > 0: - parts = line.decode('utf-8').split() + parts = line.decode("utf-8").split() if len(parts) != 5: - raise RuntimeError('Illegal data in EEG position file: %s' - % line) - r = np.array([float(p) for p in parts[2:]]) / 100. + raise RuntimeError(f"Illegal data in EEG position file: {line}") + r = np.array([float(p) for p in parts[2:]]) / 100.0 if (r * r).sum() > 1e-4: label = parts[1] - eeg['labels'].append(label) - eeg['rr'].append(r) + eeg["labels"].append(label) + eeg["rr"].append(r) id_ = _cardinal_dict.get(label.lower(), int(parts[0])) if label.lower() in _cardinal_dict: kind = FIFF.FIFFV_POINT_CARDINAL else: kind = FIFF.FIFFV_POINT_EXTRA - eeg['ids'].append(id_) - eeg['kinds'].append(kind) - eeg['np'] += 1 - logger.info(' Separate EEG position data file read.') + eeg["ids"].append(id_) + eeg["kinds"].append(kind) + eeg["np"] += 1 + logger.info(" Separate EEG position data file read.") return eeg def _read_pos(directory, transformations): """Read the .pos file and return eeg positions as dig extra points.""" - fname = [join(directory, f) for f in listdir(directory) if - f.endswith('.pos')] + fname = [join(directory, f) for f in listdir(directory) if f.endswith(".pos")] if len(fname) < 1: return list() elif len(fname) > 1: - warn(' Found multiple pos files. Extra digitizer points not added.') + warn(" Found multiple pos files. Extra digitizer points not added.") return list() - logger.info(' Reading digitizer points from %s...' % fname) - if transformations['t_ctf_head_head'] is None: - warn(' No transformation found. Extra digitizer points not added.') + logger.info(f" Reading digitizer points from {fname}...") + if transformations["t_ctf_head_head"] is None: + warn(" No transformation found. Extra digitizer points not added.") return list() fname = fname[0] digs = list() i = 2000 - with open(fname, 'r') as fid: + with open(fname) as fid: for line in fid: line = line.strip() if len(line) > 0: @@ -85,11 +94,15 @@ def _read_pos(directory, transformations): except ValueError: # if id is not an int ident = i i += 1 - dig = dict(kind=FIFF.FIFFV_POINT_EXTRA, ident=ident, r=list(), - coord_frame=FIFF.FIFFV_COORD_HEAD) - r = np.array([float(p) for p in parts[-3:]]) / 100. # cm to m + dig = dict( + kind=FIFF.FIFFV_POINT_EXTRA, + ident=ident, + r=list(), + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + r = np.array([float(p) for p in parts[-3:]]) / 100.0 # cm to m if (r * r).sum() > 1e-4: - r = apply_trans(transformations['t_ctf_head_head'], r) - dig['r'] = r + r = apply_trans(transformations["t_ctf_head_head"], r) + dig["r"] = r digs.append(dig) return digs diff --git a/mne/io/ctf/hc.py b/mne/io/ctf/hc.py index 1911fc84055..22acceda0ca 100644 --- a/mne/io/ctf/hc.py +++ b/mne/io/ctf/hc.py @@ -1,84 +1,89 @@ """Read .hc files.""" -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np +from ..._fiff.constants import FIFF from ...utils import logger -from .res4 import _make_ctf_name from .constants import CTF -from ..constants import FIFF - +from .res4 import _make_ctf_name -_kind_dict = {'nasion': CTF.CTFV_COIL_NAS, 'left ear': CTF.CTFV_COIL_LPA, - 'right ear': CTF.CTFV_COIL_RPA, 'spare': CTF.CTFV_COIL_SPARE} +_kind_dict = { + "nasion": CTF.CTFV_COIL_NAS, + "left ear": CTF.CTFV_COIL_LPA, + "right ear": CTF.CTFV_COIL_RPA, + "spare": CTF.CTFV_COIL_SPARE, +} -_coord_dict = {'relative to dewar': FIFF.FIFFV_MNE_COORD_CTF_DEVICE, - 'relative to head': FIFF.FIFFV_MNE_COORD_CTF_HEAD} +_coord_dict = { + "relative to dewar": FIFF.FIFFV_MNE_COORD_CTF_DEVICE, + "relative to head": FIFF.FIFFV_MNE_COORD_CTF_HEAD, +} def _read_one_coil_point(fid): """Read coil coordinate information from the hc file.""" # Descriptor - one = '#' - while len(one) > 0 and one[0] == '#': + one = "#" + while len(one) > 0 and one[0] == "#": one = fid.readline() if len(one) == 0: return None - one = one.strip().decode('utf-8') - if 'Unable' in one: + one = one.strip().decode("utf-8") + if "Unable" in one: raise RuntimeError("HPI information not available") # Hopefully this is an unambiguous interpretation p = dict() - p['valid'] = ('measured' in one) + p["valid"] = "measured" in one for key, val in _coord_dict.items(): if key in one: - p['coord_frame'] = val + p["coord_frame"] = val break else: - p['coord_frame'] = -1 + p["coord_frame"] = -1 for key, val in _kind_dict.items(): if key in one: - p['kind'] = val + p["kind"] = val break else: - p['kind'] = -1 + p["kind"] = -1 # Three coordinates - p['r'] = np.empty(3) - for ii, coord in enumerate('xyz'): - sp = fid.readline().decode('utf-8').strip() + p["r"] = np.empty(3) + for ii, coord in enumerate("xyz"): + sp = fid.readline().decode("utf-8").strip() if len(sp) == 0: # blank line continue - sp = sp.split(' ') - if len(sp) != 3 or sp[0] != coord or sp[1] != '=': - raise RuntimeError('Bad line: %s' % one) + sp = sp.split(" ") + if len(sp) != 3 or sp[0] != coord or sp[1] != "=": + raise RuntimeError(f"Bad line: {one}") # We do not deal with centimeters - p['r'][ii] = float(sp[2]) / 100.0 + p["r"][ii] = float(sp[2]) / 100.0 return p def _read_hc(directory): """Read the hc file to get the HPI info and to prepare for coord trans.""" - fname, found = _make_ctf_name(directory, 'hc', raise_error=False) + fname, found = _make_ctf_name(directory, "hc", raise_error=False) if not found: - logger.info(' hc data not present') + logger.info(" hc data not present") return None s = list() - with open(fname, 'rb') as fid: + with open(fname, "rb") as fid: while True: p = _read_one_coil_point(fid) if p is None: # First point bad indicates that the file is empty if len(s) == 0: - logger.info('hc file empty, no data present') + logger.info("hc file empty, no data present") return None # Returns None if at EOF - logger.info(' hc data read.') + logger.info(" hc data read.") return s - if p['valid']: + if p["valid"]: s.append(p) diff --git a/mne/io/ctf/info.py b/mne/io/ctf/info.py index 587ca8bd85f..685a20792d3 100644 --- a/mne/io/ctf/info.py +++ b/mne/io/ctf/info.py @@ -1,31 +1,34 @@ """Populate measurement info.""" -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from time import strptime -from calendar import timegm import os.path as op +from calendar import timegm +from time import strptime import numpy as np -from ...utils import logger, warn, _clean_names -from ...transforms import (apply_trans, _coord_frame_name, invert_transform, - combine_transforms) +from ..._fiff.constants import FIFF +from ..._fiff.ctf_comp import _add_kind, _calibrate_comp +from ..._fiff.meas_info import _empty_info +from ..._fiff.write import get_new_file_id from ...annotations import Annotations - -from ..meas_info import _empty_info -from ..write import get_new_file_id -from ..ctf_comp import _add_kind, _calibrate_comp -from ..constants import FIFF - +from ...transforms import ( + _coord_frame_name, + apply_trans, + combine_transforms, + invert_transform, +) +from ...utils import _clean_names, logger, warn from .constants import CTF - -_ctf_to_fiff = {CTF.CTFV_COIL_LPA: FIFF.FIFFV_POINT_LPA, - CTF.CTFV_COIL_RPA: FIFF.FIFFV_POINT_RPA, - CTF.CTFV_COIL_NAS: FIFF.FIFFV_POINT_NASION} +_ctf_to_fiff = { + CTF.CTFV_COIL_LPA: FIFF.FIFFV_POINT_LPA, + CTF.CTFV_COIL_RPA: FIFF.FIFFV_POINT_RPA, + CTF.CTFV_COIL_NAS: FIFF.FIFFV_POINT_NASION, +} def _pick_isotrak_and_hpi_coils(res4, coils, t): @@ -37,47 +40,58 @@ def _pick_isotrak_and_hpi_coils(res4, coils, t): n_coil_dev = 0 n_coil_head = 0 for p in coils: - if p['valid']: - if p['kind'] in [CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, - CTF.CTFV_COIL_NAS]: + if p["valid"]: + if p["kind"] in [CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS]: kind = FIFF.FIFFV_POINT_CARDINAL - ident = _ctf_to_fiff[p['kind']] + ident = _ctf_to_fiff[p["kind"]] else: # CTF.CTFV_COIL_SPARE kind = FIFF.FIFFV_POINT_HPI - ident = p['kind'] - if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: - if t is None or t['t_ctf_dev_dev'] is None: - raise RuntimeError('No coordinate transformation ' - 'available for HPI coil locations') - d = dict(kind=kind, ident=ident, - r=apply_trans(t['t_ctf_dev_dev'], p['r']), - coord_frame=FIFF.FIFFV_COORD_UNKNOWN) - hpi_result['dig_points'].append(d) + ident = p["kind"] + if p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: + if t is None or t["t_ctf_dev_dev"] is None: + raise RuntimeError( + "No coordinate transformation available for HPI coil locations" + ) + d = dict( + kind=kind, + ident=ident, + r=apply_trans(t["t_ctf_dev_dev"], p["r"]), + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + hpi_result["dig_points"].append(d) n_coil_dev += 1 - elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: - if t is None or t['t_ctf_head_head'] is None: - raise RuntimeError('No coordinate transformation ' - 'available for (virtual) Polhemus data') - d = dict(kind=kind, ident=ident, - r=apply_trans(t['t_ctf_head_head'], p['r']), - coord_frame=FIFF.FIFFV_COORD_HEAD) + elif p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + if t is None or t["t_ctf_head_head"] is None: + raise RuntimeError( + "No coordinate transformation " + "available for (virtual) Polhemus data" + ) + d = dict( + kind=kind, + ident=ident, + r=apply_trans(t["t_ctf_head_head"], p["r"]), + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) dig.append(d) n_coil_head += 1 if n_coil_head > 0: - logger.info(' Polhemus data for %d HPI coils added' % n_coil_head) + logger.info(" Polhemus data for %d HPI coils added", n_coil_head) if n_coil_dev > 0: - logger.info(' Device coordinate locations for %d HPI coils added' - % n_coil_dev) + logger.info( + " Device coordinate locations for %d HPI coils added", n_coil_dev + ) return dig, [hpi_result] def _convert_time(date_str, time_str): """Convert date and time strings to float time.""" - if date_str == time_str == '': - date_str = '01/01/1970' - time_str = '00:00:00' - logger.info('No date or time found, setting to the start of the ' - 'POSIX epoch (1970/01/01 midnight)') + if date_str == time_str == "": + date_str = "01/01/1970" + time_str = "00:00:00" + logger.info( + "No date or time found, setting to the start of the " + "POSIX epoch (1970/01/01 midnight)" + ) for fmt in ("%d/%m/%Y", "%d-%b-%Y", "%a, %b %d, %Y", "%Y/%m/%d"): try: @@ -88,12 +102,13 @@ def _convert_time(date_str, time_str): break else: raise RuntimeError( - 'Illegal date: %s.\nIf the language of the date does not ' - 'correspond to your local machine\'s language try to set the ' - 'locale to the language of the date string:\n' - 'locale.setlocale(locale.LC_ALL, "en_US")' % date_str) + f"Illegal date: {date_str}.\nIf the language of the date does not " + "correspond to your local machine's language try to set the " + "locale to the language of the date string:\n" + 'locale.setlocale(locale.LC_ALL, "en_US")' + ) - for fmt in ('%H:%M:%S', '%H:%M'): + for fmt in ("%H:%M:%S", "%H:%M"): try: time = strptime(time_str, fmt) except ValueError: @@ -101,15 +116,25 @@ def _convert_time(date_str, time_str): else: break else: - raise RuntimeError('Illegal time: %s' % time_str) + raise RuntimeError(f"Illegal time: {time_str}") # MNE-C uses mktime which uses local time, but here we instead decouple # conversion location from the process, and instead assume that the # acquisition was in GMT. This will be wrong for most sites, but at least # the value we obtain here won't depend on the geographical location # that the file was converted. - res = timegm((date.tm_year, date.tm_mon, date.tm_mday, - time.tm_hour, time.tm_min, time.tm_sec, - date.tm_wday, date.tm_yday, date.tm_isdst)) + res = timegm( + ( + date.tm_year, + date.tm_mon, + date.tm_mday, + time.tm_hour, + time.tm_min, + time.tm_sec, + date.tm_wday, + date.tm_yday, + date.tm_isdst, + ) + ) return res @@ -118,15 +143,15 @@ def _get_plane_vectors(ez): assert ez.shape == (3,) ez_len = np.sqrt(np.sum(ez * ez)) if ez_len == 0: - raise RuntimeError('Zero length normal. Cannot proceed.') + raise RuntimeError("Zero length normal. Cannot proceed.") if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction - ex = np.array([1., 0., 0.]) + ex = np.array([1.0, 0.0, 0.0]) else: ex = np.zeros(3) if ez[1] < ez[2]: - ex[0 if ez[0] < ez[1] else 1] = 1. + ex[0 if ez[0] < ez[1] else 1] = 1.0 else: - ex[0 if ez[0] < ez[2] else 2] = 1. + ex[0 if ez[0] < ez[2] else 2] = 1.0 ez /= ez_len ex -= np.dot(ez, ex) * ez ex /= np.sqrt(np.sum(ex * ex)) @@ -136,16 +161,17 @@ def _get_plane_vectors(ez): def _at_origin(x): """Determine if a vector is at the origin.""" - return (np.sum(x * x) < 1e-8) + return np.sum(x * x) < 1e-8 def _check_comp_ch(cch, kind, desired=None): if desired is None: - desired = cch['grad_order_no'] - if cch['grad_order_no'] != desired: - raise RuntimeError('%s channel with inconsistent compensation ' - 'grade %s, should be %s' - % (kind, cch['grad_order_no'], desired)) + desired = cch["grad_order_no"] + if cch["grad_order_no"] != desired: + raise RuntimeError( + f"{kind} channel with inconsistent compensation " + f"grade {cch['grad_order_no']}, should be {desired}" + ) return desired @@ -154,49 +180,64 @@ def _convert_channel_info(res4, t, use_eeg_pos): nmeg = neeg = nstim = nmisc = nref = 0 chs = list() this_comp = None - for k, cch in enumerate(res4['chs']): - cal = float(1. / (cch['proper_gain'] * cch['qgain'])) - ch = dict(scanno=k + 1, range=1., cal=cal, loc=np.full(12, np.nan), - unit_mul=FIFF.FIFF_UNITM_NONE, ch_name=cch['ch_name'][:15], - coil_type=FIFF.FIFFV_COIL_NONE) + for k, cch in enumerate(res4["chs"]): + cal = float(1.0 / (cch["proper_gain"] * cch["qgain"])) + ch = dict( + scanno=k + 1, + range=1.0, + cal=cal, + loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, + ch_name=cch["ch_name"][:15], + coil_type=FIFF.FIFFV_COIL_NONE, + ) del k chs.append(ch) # Create the channel position information - if cch['sensor_type_index'] in (CTF.CTFV_REF_MAG_CH, - CTF.CTFV_REF_GRAD_CH, - CTF.CTFV_MEG_CH): + if cch["sensor_type_index"] in ( + CTF.CTFV_REF_MAG_CH, + CTF.CTFV_REF_GRAD_CH, + CTF.CTFV_MEG_CH, + ): # Extra check for a valid MEG channel - if np.sum(cch['coil']['pos'][0] ** 2) < 1e-6 or \ - np.sum(cch['coil']['norm'][0] ** 2) < 1e-6: + if ( + np.sum(cch["coil"]["pos"][0] ** 2) < 1e-6 + or np.sum(cch["coil"]["norm"][0] ** 2) < 1e-6 + ): nmisc += 1 - ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V) - text = 'MEG' - if cch['sensor_type_index'] != CTF.CTFV_MEG_CH: - text += ' ref' - warn('%s channel %s did not have position assigned, so ' - 'it was changed to a MISC channel' - % (text, ch['ch_name'])) + ch.update( + logno=nmisc, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, + unit=FIFF.FIFF_UNIT_V, + ) + text = "MEG" + if cch["sensor_type_index"] != CTF.CTFV_MEG_CH: + text += " ref" + warn( + f"{text} channel {ch['ch_name']} did not have position " + "assigned, so it was changed to a MISC channel" + ) continue - ch['unit'] = FIFF.FIFF_UNIT_T + ch["unit"] = FIFF.FIFF_UNIT_T # Set up the local coordinate frame - r0 = cch['coil']['pos'][0].copy() - ez = cch['coil']['norm'][0].copy() + r0 = cch["coil"]["pos"][0].copy() + ez = cch["coil"]["norm"][0].copy() # It turns out that positive proper_gain requires swapping # of the normal direction - if cch['proper_gain'] > 0.0: + if cch["proper_gain"] > 0.0: ez *= -1 # Check how the other vectors should be defined off_diag = False # Default: ex and ey are arbitrary in the plane normal to ez - if cch['sensor_type_index'] == CTF.CTFV_REF_GRAD_CH: + if cch["sensor_type_index"] == CTF.CTFV_REF_GRAD_CH: # The off-diagonal gradiometers are an exception: # # We use the same convention for ex as for Neuromag planar # gradiometers: ex pointing in the positive gradient direction - diff = cch['coil']['pos'][0] - cch['coil']['pos'][1] + diff = cch["coil"]["pos"][0] - cch["coil"]["pos"][1] size = np.sqrt(np.sum(diff * diff)) - if size > 0.: + if size > 0.0: diff /= size # Is ez normal to the line joining the coils? if np.abs(np.dot(diff, ez)) < 1e-3: @@ -210,69 +251,84 @@ def _convert_channel_info(res4, t, use_eeg_pos): else: ex, ey = _get_plane_vectors(ez) # Transform into a Neuromag-like device coordinate system - ch['loc'] = np.concatenate([ - apply_trans(t['t_ctf_dev_dev'], r0), - apply_trans(t['t_ctf_dev_dev'], ex, move=False), - apply_trans(t['t_ctf_dev_dev'], ey, move=False), - apply_trans(t['t_ctf_dev_dev'], ez, move=False)]) + ch["loc"] = np.concatenate( + [ + apply_trans(t["t_ctf_dev_dev"], r0), + apply_trans(t["t_ctf_dev_dev"], ex, move=False), + apply_trans(t["t_ctf_dev_dev"], ey, move=False), + apply_trans(t["t_ctf_dev_dev"], ez, move=False), + ] + ) del r0, ex, ey, ez # Set the coil type - if cch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: - ch['kind'] = FIFF.FIFFV_REF_MEG_CH - ch['coil_type'] = FIFF.FIFFV_COIL_CTF_REF_MAG + if cch["sensor_type_index"] == CTF.CTFV_REF_MAG_CH: + ch["kind"] = FIFF.FIFFV_REF_MEG_CH + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_REF_MAG nref += 1 - ch['logno'] = nref - elif cch['sensor_type_index'] == CTF.CTFV_REF_GRAD_CH: - ch['kind'] = FIFF.FIFFV_REF_MEG_CH + ch["logno"] = nref + elif cch["sensor_type_index"] == CTF.CTFV_REF_GRAD_CH: + ch["kind"] = FIFF.FIFFV_REF_MEG_CH if off_diag: - ch['coil_type'] = FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD else: - ch['coil_type'] = FIFF.FIFFV_COIL_CTF_REF_GRAD + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_REF_GRAD nref += 1 - ch['logno'] = nref + ch["logno"] = nref else: - this_comp = _check_comp_ch(cch, 'Gradiometer', this_comp) - ch['kind'] = FIFF.FIFFV_MEG_CH - ch['coil_type'] = FIFF.FIFFV_COIL_CTF_GRAD + this_comp = _check_comp_ch(cch, "Gradiometer", this_comp) + ch["kind"] = FIFF.FIFFV_MEG_CH + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_GRAD nmeg += 1 - ch['logno'] = nmeg + ch["logno"] = nmeg # Encode the software gradiometer order - ch['coil_type'] = int( - ch['coil_type'] | (cch['grad_order_no'] << 16)) - ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE - elif cch['sensor_type_index'] == CTF.CTFV_EEG_CH: + ch["coil_type"] = int(ch["coil_type"] | (cch["grad_order_no"] << 16)) + ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE + elif cch["sensor_type_index"] == CTF.CTFV_EEG_CH: coord_frame = FIFF.FIFFV_COORD_HEAD if use_eeg_pos: # EEG electrode coordinates may be present but in the # CTF head frame - ch['loc'][:3] = cch['coil']['pos'][0] - if not _at_origin(ch['loc'][:3]): - if t['t_ctf_head_head'] is None: - warn('EEG electrode (%s) location omitted because of ' - 'missing HPI information' % ch['ch_name']) - ch['loc'].fill(np.nan) + ch["loc"][:3] = cch["coil"]["pos"][0] + if not _at_origin(ch["loc"][:3]): + if t["t_ctf_head_head"] is None: + warn( + f"EEG electrode ({ch['ch_name']}) location omitted because " + "of missing HPI information" + ) + ch["loc"].fill(np.nan) coord_frame = FIFF.FIFFV_MNE_COORD_CTF_HEAD else: - ch['loc'][:3] = apply_trans( - t['t_ctf_head_head'], ch['loc'][:3]) + ch["loc"][:3] = apply_trans(t["t_ctf_head_head"], ch["loc"][:3]) neeg += 1 - ch.update(logno=neeg, kind=FIFF.FIFFV_EEG_CH, - unit=FIFF.FIFF_UNIT_V, coord_frame=coord_frame, - coil_type=FIFF.FIFFV_COIL_EEG) - elif cch['sensor_type_index'] == CTF.CTFV_STIM_CH: + ch.update( + logno=neeg, + kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V, + coord_frame=coord_frame, + coil_type=FIFF.FIFFV_COIL_EEG, + ) + elif cch["sensor_type_index"] == CTF.CTFV_STIM_CH: nstim += 1 - ch.update(logno=nstim, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - kind=FIFF.FIFFV_STIM_CH, unit=FIFF.FIFF_UNIT_V) + ch.update( + logno=nstim, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_STIM_CH, + unit=FIFF.FIFF_UNIT_V, + ) else: nmisc += 1 - ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V) + ch.update( + logno=nmisc, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, + unit=FIFF.FIFF_UNIT_V, + ) return chs def _comp_sort_keys(c): """Sort the compensation data.""" - return (int(c['coeff_type']), int(c['scanno'])) + return (int(c["coeff_type"]), int(c["scanno"])) def _check_comp(comp): @@ -280,78 +336,87 @@ def _check_comp(comp): ref_sens = None kind = -1 for k, c_k in enumerate(comp): - if c_k['coeff_type'] != kind: + if c_k["coeff_type"] != kind: c_ref = c_k - ref_sens = c_ref['sensors'] - kind = c_k['coeff_type'] - elif not c_k['sensors'] == ref_sens: - raise RuntimeError('Cannot use an uneven compensation matrix') + ref_sens = c_ref["sensors"] + kind = c_k["coeff_type"] + elif not c_k["sensors"] == ref_sens: + raise RuntimeError("Cannot use an uneven compensation matrix") def _conv_comp(comp, first, last, chs): """Add a new converted compensation data item.""" - ch_names = [c['ch_name'] for c in chs] - n_col = comp[first]['ncoeff'] - col_names = comp[first]['sensors'][:n_col] - row_names = [comp[p]['sensor_name'] for p in range(first, last + 1)] - mask = np.in1d(col_names, ch_names) # missing channels excluded + ch_names = [c["ch_name"] for c in chs] + n_col = comp[first]["ncoeff"] + col_names = comp[first]["sensors"][:n_col] + row_names = [comp[p]["sensor_name"] for p in range(first, last + 1)] + mask = np.isin(col_names, ch_names) # missing channels excluded col_names = np.array(col_names)[mask].tolist() n_col = len(col_names) n_row = len(row_names) - ccomp = dict(ctfkind=np.array([comp[first]['coeff_type']]), - save_calibrated=False) + ccomp = dict(ctfkind=comp[first]["coeff_type"], save_calibrated=False) _add_kind(ccomp) data = np.empty((n_row, n_col)) - for ii, coeffs in enumerate(comp[first:last + 1]): + for ii, coeffs in enumerate(comp[first : last + 1]): # Pick the elements to the matrix - data[ii, :] = coeffs['coeffs'][mask] - ccomp['data'] = dict(row_names=row_names, col_names=col_names, - data=data, nrow=len(row_names), ncol=len(col_names)) - mk = ('proper_gain', 'qgain') + data[ii, :] = coeffs["coeffs"][mask] + ccomp["data"] = dict( + row_names=row_names, + col_names=col_names, + data=data, + nrow=len(row_names), + ncol=len(col_names), + ) + mk = ("proper_gain", "qgain") _calibrate_comp(ccomp, chs, row_names, col_names, mult_keys=mk, flip=True) return ccomp def _convert_comp_data(res4): """Convert the compensation data into named matrices.""" - if res4['ncomp'] == 0: + if res4["ncomp"] == 0: return # Sort the coefficients in our favorite order - res4['comp'] = sorted(res4['comp'], key=_comp_sort_keys) + res4["comp"] = sorted(res4["comp"], key=_comp_sort_keys) # Check that all items for a given compensation type have the correct # number of channels - _check_comp(res4['comp']) + _check_comp(res4["comp"]) # Create named matrices first = 0 kind = -1 comps = list() - for k in range(len(res4['comp'])): - if res4['comp'][k]['coeff_type'] != kind: + for k in range(len(res4["comp"])): + if res4["comp"][k]["coeff_type"] != kind: if k > 0: - comps.append(_conv_comp(res4['comp'], first, k - 1, - res4['chs'])) - kind = res4['comp'][k]['coeff_type'] + comps.append(_conv_comp(res4["comp"], first, k - 1, res4["chs"])) + kind = res4["comp"][k]["coeff_type"] first = k - comps.append(_conv_comp(res4['comp'], first, k, res4['chs'])) + comps.append(_conv_comp(res4["comp"], first, k, res4["chs"])) return comps def _pick_eeg_pos(c): """Pick EEG positions.""" - eeg = dict(coord_frame=FIFF.FIFFV_COORD_HEAD, assign_to_chs=False, - labels=list(), ids=list(), rr=list(), kinds=list(), np=0) - for ch in c['chs']: - if ch['kind'] == FIFF.FIFFV_EEG_CH and not _at_origin(ch['loc'][:3]): - eeg['labels'].append(ch['ch_name']) - eeg['ids'].append(ch['logno']) - eeg['rr'].append(ch['loc'][:3]) - eeg['kinds'].append(FIFF.FIFFV_POINT_EEG) - eeg['np'] += 1 - if eeg['np'] == 0: + eeg = dict( + coord_frame=FIFF.FIFFV_COORD_HEAD, + assign_to_chs=False, + labels=list(), + ids=list(), + rr=list(), + kinds=list(), + np=0, + ) + for ch in c["chs"]: + if ch["kind"] == FIFF.FIFFV_EEG_CH and not _at_origin(ch["loc"][:3]): + eeg["labels"].append(ch["ch_name"]) + eeg["ids"].append(ch["logno"]) + eeg["rr"].append(ch["loc"][:3]) + eeg["kinds"].append(FIFF.FIFFV_POINT_EEG) + eeg["np"] += 1 + if eeg["np"] == 0: return None - logger.info('Picked positions of %d EEG channels from channel info' - % eeg['np']) + logger.info("Picked positions of %d EEG channels from channel info", eeg["np"]) return eeg @@ -359,96 +424,104 @@ def _add_eeg_pos(eeg, t, c): """Pick the (virtual) EEG position data.""" if eeg is None: return - if t is None or t['t_ctf_head_head'] is None: - raise RuntimeError('No coordinate transformation available for EEG ' - 'position data') + if t is None or t["t_ctf_head_head"] is None: + raise RuntimeError( + "No coordinate transformation available for EEG position data" + ) eeg_assigned = 0 - if eeg['assign_to_chs']: - for k in range(eeg['np']): + if eeg["assign_to_chs"]: + for k in range(eeg["np"]): # Look for a channel name match - for ch in c['chs']: - if ch['ch_name'].lower() == eeg['labels'][k].lower(): - r0 = ch['loc'][:3] - r0[:] = eeg['rr'][k] - if eeg['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: - r0[:] = apply_trans(t['t_ctf_head_head'], r0) - elif eeg['coord_frame'] != FIFF.FIFFV_COORD_HEAD: + for ch in c["chs"]: + if ch["ch_name"].lower() == eeg["labels"][k].lower(): + r0 = ch["loc"][:3] + r0[:] = eeg["rr"][k] + if eeg["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + r0[:] = apply_trans(t["t_ctf_head_head"], r0) + elif eeg["coord_frame"] != FIFF.FIFFV_COORD_HEAD: raise RuntimeError( - 'Illegal coordinate frame for EEG electrode ' - 'positions : %s' - % _coord_frame_name(eeg['coord_frame'])) + "Illegal coordinate frame for EEG electrode " + f"positions : {_coord_frame_name(eeg['coord_frame'])}" + ) # Use the logical channel number as an identifier - eeg['ids'][k] = ch['logno'] - eeg['kinds'][k] = FIFF.FIFFV_POINT_EEG + eeg["ids"][k] = ch["logno"] + eeg["kinds"][k] = FIFF.FIFFV_POINT_EEG eeg_assigned += 1 break # Add these to the Polhemus data fid_count = eeg_count = extra_count = 0 - for k in range(eeg['np']): - d = dict(r=eeg['rr'][k].copy(), kind=eeg['kinds'][k], - ident=eeg['ids'][k], coord_frame=FIFF.FIFFV_COORD_HEAD) - c['dig'].append(d) - if eeg['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: - d['r'] = apply_trans(t['t_ctf_head_head'], d['r']) - elif eeg['coord_frame'] != FIFF.FIFFV_COORD_HEAD: - raise RuntimeError('Illegal coordinate frame for EEG electrode ' - 'positions: %s' - % _coord_frame_name(eeg['coord_frame'])) - if eeg['kinds'][k] == FIFF.FIFFV_POINT_CARDINAL: + for k in range(eeg["np"]): + d = dict( + r=eeg["rr"][k].copy(), + kind=eeg["kinds"][k], + ident=eeg["ids"][k], + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + c["dig"].append(d) + if eeg["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + d["r"] = apply_trans(t["t_ctf_head_head"], d["r"]) + elif eeg["coord_frame"] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError( + "Illegal coordinate frame for EEG electrode positions: " + + _coord_frame_name(eeg["coord_frame"]) + ) + if eeg["kinds"][k] == FIFF.FIFFV_POINT_CARDINAL: fid_count += 1 - elif eeg['kinds'][k] == FIFF.FIFFV_POINT_EEG: + elif eeg["kinds"][k] == FIFF.FIFFV_POINT_EEG: eeg_count += 1 else: extra_count += 1 if eeg_assigned > 0: - logger.info(' %d EEG electrode locations assigned to channel info.' - % eeg_assigned) - for count, kind in zip((fid_count, eeg_count, extra_count), - ('fiducials', 'EEG locations', 'extra points')): + logger.info( + " %d EEG electrode locations assigned to channel info.", eeg_assigned + ) + for count, kind in zip( + (fid_count, eeg_count, extra_count), + ("fiducials", "EEG locations", "extra points"), + ): if count > 0: - logger.info(' %d %s added to Polhemus data.' % (count, kind)) + logger.info(" %d %s added to Polhemus data.", count, kind) -_filt_map = {CTF.CTFV_FILTER_LOWPASS: 'lowpass', - CTF.CTFV_FILTER_HIGHPASS: 'highpass'} +_filt_map = {CTF.CTFV_FILTER_LOWPASS: "lowpass", CTF.CTFV_FILTER_HIGHPASS: "highpass"} def _compose_meas_info(res4, coils, trans, eeg): """Create meas info from CTF data.""" - info = _empty_info(res4['sfreq']) + info = _empty_info(res4["sfreq"]) # Collect all the necessary data from the structures read - info['meas_id'] = get_new_file_id() - info['meas_id']['usecs'] = 0 - info['meas_id']['secs'] = _convert_time(res4['data_date'], - res4['data_time']) - info['meas_date'] = (info['meas_id']['secs'], info['meas_id']['usecs']) - info['experimenter'] = res4['nf_operator'] - info['subject_info'] = dict(his_id=res4['nf_subject_id']) - for filt in res4['filters']: - if filt['type'] in _filt_map: - info[_filt_map[filt['type']]] = filt['freq'] - info['dig'], info['hpi_results'] = _pick_isotrak_and_hpi_coils( - res4, coils, trans) + info["meas_id"] = get_new_file_id() + info["meas_id"]["usecs"] = 0 + info["meas_id"]["secs"] = _convert_time(res4["data_date"], res4["data_time"]) + info["meas_date"] = (info["meas_id"]["secs"], info["meas_id"]["usecs"]) + info["experimenter"] = res4["nf_operator"] + info["subject_info"] = dict(his_id=res4["nf_subject_id"]) + for filt in res4["filters"]: + if filt["type"] in _filt_map: + info[_filt_map[filt["type"]]] = filt["freq"] + info["dig"], info["hpi_results"] = _pick_isotrak_and_hpi_coils(res4, coils, trans) if trans is not None: - if len(info['hpi_results']) > 0: - info['hpi_results'][0]['coord_trans'] = trans['t_ctf_head_head'] - if trans['t_dev_head'] is not None: - info['dev_head_t'] = trans['t_dev_head'] - info['dev_ctf_t'] = combine_transforms( - trans['t_dev_head'], - invert_transform(trans['t_ctf_head_head']), - FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_MNE_COORD_CTF_HEAD) - if trans['t_ctf_head_head'] is not None: - info['ctf_head_t'] = trans['t_ctf_head_head'] - info['chs'] = _convert_channel_info(res4, trans, eeg is None) - info['comps'] = _convert_comp_data(res4) + if len(info["hpi_results"]) > 0: + info["hpi_results"][0]["coord_trans"] = trans["t_ctf_head_head"] + if trans["t_dev_head"] is not None: + info["dev_head_t"] = trans["t_dev_head"] + info["dev_ctf_t"] = combine_transforms( + trans["t_dev_head"], + invert_transform(trans["t_ctf_head_head"]), + FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_MNE_COORD_CTF_HEAD, + ) + if trans["t_ctf_head_head"] is not None: + info["ctf_head_t"] = trans["t_ctf_head_head"] + info["chs"] = _convert_channel_info(res4, trans, eeg is None) + info["comps"] = _convert_comp_data(res4) if eeg is None: # Pick EEG locations from chan info if not read from a separate file eeg = _pick_eeg_pos(info) _add_eeg_pos(eeg, trans, info) - logger.info(' Measurement info composed.') + logger.info(" Measurement info composed.") info._unlocked = False info._update_redundant() return info @@ -456,17 +529,17 @@ def _compose_meas_info(res4, coils, trans, eeg): def _read_bad_chans(directory, info): """Read Bad channel list and match to internal names.""" - fname = op.join(directory, 'BadChannels') + fname = op.join(directory, "BadChannels") if not op.exists(fname): return [] - mapping = dict(zip(_clean_names(info['ch_names']), info['ch_names'])) - with open(fname, 'r') as fid: + mapping = dict(zip(_clean_names(info["ch_names"]), info["ch_names"])) + with open(fname) as fid: bad_chans = [mapping[f.strip()] for f in fid.readlines()] return bad_chans def _annotate_bad_segments(directory, start_time, meas_date): - fname = op.join(directory, 'bad.segments') + fname = op.join(directory, "bad.segments") if not op.exists(fname): return None @@ -474,10 +547,10 @@ def _annotate_bad_segments(directory, start_time, meas_date): onsets = [] durations = [] desc = [] - with open(fname, 'r') as fid: + with open(fname) as fid: for f in fid.readlines(): tmp = f.strip().split() - desc.append('bad_%s' % tmp[0]) + desc.append(f"bad_{tmp[0]}") onsets.append(np.float64(tmp[1]) - start_time) durations.append(np.float64(tmp[2]) - np.float64(tmp[1])) # return None if there are no bad segments diff --git a/mne/io/ctf/markers.py b/mne/io/ctf/markers.py index e129e8db505..64360fbc9c3 100644 --- a/mne/io/ctf/markers.py +++ b/mne/io/ctf/markers.py @@ -1,14 +1,15 @@ -# Author: Joan Massich -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np import os.path as op from io import BytesIO +import numpy as np + from ...annotations import Annotations -from .res4 import _read_res4 from .info import _convert_time +from .res4 import _read_res4 def _get_markers(fname): @@ -18,27 +19,25 @@ def consume(fid, predicate): # just a consumer to move around conveniently def parse_marker(string): # XXX: there should be a nicer way to do that data = np.genfromtxt( - BytesIO(string.encode()), dtype=[('trial', int), ('sync', float)]) - return int(data['trial']), float(data['sync']) + BytesIO(string.encode()), dtype=[("trial", int), ("sync", float)] + ) + return int(data["trial"]), float(data["sync"]) markers = dict() with open(fname) as fid: - consume(fid, lambda line: not line.startswith('NUMBER OF MARKERS:')) + consume(fid, lambda line: not line.startswith("NUMBER OF MARKERS:")) num_of_markers = int(fid.readline()) for _ in range(num_of_markers): - consume(fid, lambda line: not line.startswith('NAME:')) - label = fid.readline().strip('\n') + consume(fid, lambda line: not line.startswith("NAME:")) + label = fid.readline().strip("\n") - consume( - fid, lambda line: not line.startswith('NUMBER OF SAMPLES:')) + consume(fid, lambda line: not line.startswith("NUMBER OF SAMPLES:")) n_markers = int(fid.readline()) - consume(fid, lambda line: not line.startswith('LIST OF SAMPLES:')) + consume(fid, lambda line: not line.startswith("LIST OF SAMPLES:")) next(fid) # skip the samples header - markers[label] = [ - parse_marker(next(fid)) for _ in range(n_markers) - ] + markers[label] = [parse_marker(next(fid)) for _ in range(n_markers)] return markers @@ -49,35 +48,42 @@ def _get_res4_info_needed_by_markers(directory): # instead of parsing the entire res4 file. res4 = _read_res4(directory) - total_offset_duration = res4['pre_trig_pts'] / res4['sfreq'] - trial_duration = res4['nsamp'] / res4['sfreq'] + total_offset_duration = res4["pre_trig_pts"] / res4["sfreq"] + trial_duration = res4["nsamp"] / res4["sfreq"] - meas_date = (_convert_time(res4['data_date'], - res4['data_time']), 0) + meas_date = (_convert_time(res4["data_date"], res4["data_time"]), 0) return total_offset_duration, trial_duration, meas_date def _read_annotations_ctf(directory): - total_offset, trial_duration, meas_date \ - = _get_res4_info_needed_by_markers(directory) - return _read_annotations_ctf_call(directory, total_offset, trial_duration, - meas_date) + total_offset, trial_duration, meas_date = _get_res4_info_needed_by_markers( + directory + ) + return _read_annotations_ctf_call( + directory, total_offset, trial_duration, meas_date + ) -def _read_annotations_ctf_call(directory, total_offset, trial_duration, - meas_date): - fname = op.join(directory, 'MarkerFile.mrk') +def _read_annotations_ctf_call(directory, total_offset, trial_duration, meas_date): + fname = op.join(directory, "MarkerFile.mrk") if not op.exists(fname): return Annotations(list(), list(), list(), orig_time=meas_date) else: markers = _get_markers(fname) - onset = [synctime + (trialnum * trial_duration) + total_offset - for _, m in markers.items() for (trialnum, synctime) in m] - - description = np.concatenate([ - np.repeat(label, len(m)) for label, m in markers.items() - ]) - - return Annotations(onset=onset, duration=np.zeros_like(onset), - description=description, orig_time=meas_date) + onset = [ + synctime + (trialnum * trial_duration) + total_offset + for _, m in markers.items() + for (trialnum, synctime) in m + ] + + description = np.concatenate( + [np.repeat(label, len(m)) for label, m in markers.items()] + ) + + return Annotations( + onset=onset, + duration=np.zeros_like(onset), + description=description, + orig_time=meas_date, + ) diff --git a/mne/io/ctf/res4.py b/mne/io/ctf/res4.py index be70a54b86d..b2ecb9dc304 100644 --- a/mne/io/ctf/res4.py +++ b/mne/io/ctf/res4.py @@ -1,9 +1,8 @@ """Read .res4 files.""" -# Authors: Matti Hämäläinen -# Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os.path as op @@ -15,40 +14,40 @@ def _make_ctf_name(directory, extra, raise_error=True): """Make a CTF name.""" - fname = op.join(directory, op.basename(directory)[:-3] + '.' + extra) + fname = op.join(directory, op.basename(directory)[:-3] + "." + extra) found = True if not op.isfile(fname): if raise_error: - raise IOError('Standard file %s not found' % fname) + raise OSError(f"Standard file {fname} not found") found = False return fname, found def _read_double(fid, n=1): """Read a double.""" - return np.fromfile(fid, '>f8', n) + return np.fromfile(fid, ">f8", n) def _read_string(fid, n_bytes, decode=True): """Read string.""" s0 = fid.read(n_bytes) - s = s0.split(b'\x00')[0] - return s.decode('utf-8') if decode else s + s = s0.split(b"\x00")[0] + return s.decode("utf-8") if decode else s def _read_ustring(fid, n_bytes): """Read unsigned character string.""" - return np.fromfile(fid, '>B', n_bytes) + return np.fromfile(fid, ">B", n_bytes) def _read_int2(fid): """Read int from short.""" - return np.fromfile(fid, '>i2', 1)[0] + return _auto_cast(np.fromfile(fid, ">i2", 1)[0]) def _read_int(fid): """Read a 32-bit integer.""" - return np.fromfile(fid, '>i4', 1)[0] + return np.fromfile(fid, ">i4", 1)[0] def _move_to_next(fid, byte=8): @@ -62,140 +61,172 @@ def _move_to_next(fid, byte=8): def _read_filter(fid): """Read filter information.""" f = dict() - f['freq'] = _read_double(fid)[0] - f['class'] = _read_int(fid) - f['type'] = _read_int(fid) - f['npar'] = _read_int2(fid) - f['pars'] = _read_double(fid, f['npar']) + f["freq"] = _read_double(fid)[0] + f["class"] = _read_int(fid) + f["type"] = _read_int(fid) + f["npar"] = _read_int2(fid) + f["pars"] = _read_double(fid, f["npar"]) return f def _read_comp_coeff(fid, d): """Read compensation coefficients.""" # Read the coefficients and initialize - d['ncomp'] = _read_int2(fid) - d['comp'] = list() + d["ncomp"] = _read_int2(fid) + d["comp"] = list() # Read each record - dt = np.dtype([ - ('sensor_name', 'S32'), - ('coeff_type', '>i4'), ('d0', '>i4'), - ('ncoeff', '>i2'), - ('sensors', 'S%s' % CTF.CTFV_SENSOR_LABEL, CTF.CTFV_MAX_BALANCING), - ('coeffs', '>f8', CTF.CTFV_MAX_BALANCING)]) - comps = np.fromfile(fid, dt, d['ncomp']) - for k in range(d['ncomp']): + dt = np.dtype( + [ + ("sensor_name", "S32"), + ("coeff_type", ">i4"), + ("d0", ">i4"), + ("ncoeff", ">i2"), + ("sensors", f"S{CTF.CTFV_SENSOR_LABEL}", CTF.CTFV_MAX_BALANCING), + ("coeffs", ">f8", CTF.CTFV_MAX_BALANCING), + ] + ) + comps = np.fromfile(fid, dt, d["ncomp"]) + for k in range(d["ncomp"]): comp = dict() - d['comp'].append(comp) - comp['sensor_name'] = \ - comps['sensor_name'][k].split(b'\x00')[0].decode('utf-8') - comp['coeff_type'] = comps['coeff_type'][k] - comp['ncoeff'] = comps['ncoeff'][k] - comp['sensors'] = [s.split(b'\x00')[0].decode('utf-8') - for s in comps['sensors'][k][:comp['ncoeff']]] - comp['coeffs'] = comps['coeffs'][k][:comp['ncoeff']] - comp['scanno'] = d['ch_names'].index(comp['sensor_name']) + d["comp"].append(comp) + comp["sensor_name"] = comps["sensor_name"][k].split(b"\x00")[0].decode("utf-8") + comp["coeff_type"] = comps["coeff_type"][k].item() + comp["ncoeff"] = comps["ncoeff"][k].item() + comp["sensors"] = [ + s.split(b"\x00")[0].decode("utf-8") + for s in comps["sensors"][k][: comp["ncoeff"]] + ] + comp["coeffs"] = comps["coeffs"][k][: comp["ncoeff"]] + comp["scanno"] = d["ch_names"].index(comp["sensor_name"]) def _read_res4(dsdir): """Read the magical res4 file.""" # adapted from read_res4.c - name, _ = _make_ctf_name(dsdir, 'res4') + name, _ = _make_ctf_name(dsdir, "res4") res = dict() - with open(name, 'rb') as fid: + with open(name, "rb") as fid: # Read the fields - res['head'] = _read_string(fid, 8) - res['appname'] = _read_string(fid, 256) - res['origin'] = _read_string(fid, 256) - res['desc'] = _read_string(fid, 256) - res['nave'] = _read_int2(fid) - res['data_time'] = _read_string(fid, 255) - res['data_date'] = _read_string(fid, 255) + res["head"] = _read_string(fid, 8) + res["appname"] = _read_string(fid, 256) + res["origin"] = _read_string(fid, 256) + res["desc"] = _read_string(fid, 256) + res["nave"] = _read_int2(fid) + res["data_time"] = _read_string(fid, 255) + res["data_date"] = _read_string(fid, 255) # Seems that date and time can be swapped # (are they entered manually?!) - if '/' in res['data_time'] and ':' in res['data_date']: - data_date = res['data_date'] - res['data_date'] = res['data_time'] - res['data_time'] = data_date - res['nsamp'] = _read_int(fid) - res['nchan'] = _read_int2(fid) + if "/" in res["data_time"] and ":" in res["data_date"]: + data_date = res["data_date"] + res["data_date"] = res["data_time"] + res["data_time"] = data_date + res["nsamp"] = _read_int(fid) + res["nchan"] = _read_int2(fid) _move_to_next(fid, 8) - res['sfreq'] = _read_double(fid)[0] - res['epoch_time'] = _read_double(fid)[0] - res['no_trials'] = _read_int2(fid) + res["sfreq"] = _read_double(fid)[0] + res["epoch_time"] = _read_double(fid)[0] + res["no_trials"] = _read_int2(fid) _move_to_next(fid, 4) - res['pre_trig_pts'] = _read_int(fid) - res['no_trials_done'] = _read_int2(fid) - res['no_trials_bst_message_windowlay'] = _read_int2(fid) + res["pre_trig_pts"] = _read_int(fid) + res["no_trials_done"] = _read_int2(fid) + res["no_trials_bst_message_windowlay"] = _read_int2(fid) _move_to_next(fid, 4) - res['save_trials'] = _read_int(fid) - res['primary_trigger'] = fid.read(1) - res['secondary_trigger'] = [fid.read(1) - for k in range(CTF.CTFV_MAX_AVERAGE_BINS)] - res['trigger_polarity_mask'] = fid.read(1) - res['trigger_mode'] = _read_int2(fid) + res["save_trials"] = _read_int(fid) + res["primary_trigger"] = fid.read(1) + res["secondary_trigger"] = [ + fid.read(1) for k in range(CTF.CTFV_MAX_AVERAGE_BINS) + ] + res["trigger_polarity_mask"] = fid.read(1) + res["trigger_mode"] = _read_int2(fid) _move_to_next(fid, 4) - res['accept_reject'] = _read_int(fid) - res['run_time_bst_message_windowlay'] = _read_int2(fid) + res["accept_reject"] = _read_int(fid) + res["run_time_bst_message_windowlay"] = _read_int2(fid) _move_to_next(fid, 4) - res['zero_head'] = _read_int(fid) + res["zero_head"] = _read_int(fid) _move_to_next(fid, 4) - res['artifact_mode'] = _read_int(fid) + res["artifact_mode"] = _read_int(fid) _read_int(fid) # padding - res['nf_run_name'] = _read_string(fid, 32) - res['nf_run_title'] = _read_string(fid, 256) - res['nf_instruments'] = _read_string(fid, 32) - res['nf_collect_descriptor'] = _read_string(fid, 32) - res['nf_subject_id'] = _read_string(fid, 32) - res['nf_operator'] = _read_string(fid, 32) - if len(res['nf_operator']) == 0: - res['nf_operator'] = None - res['nf_sensor_file_name'] = _read_ustring(fid, 60) + res["nf_run_name"] = _read_string(fid, 32) + res["nf_run_title"] = _read_string(fid, 256) + res["nf_instruments"] = _read_string(fid, 32) + res["nf_collect_descriptor"] = _read_string(fid, 32) + res["nf_subject_id"] = _read_string(fid, 32) + res["nf_operator"] = _read_string(fid, 32) + if len(res["nf_operator"]) == 0: + res["nf_operator"] = None + res["nf_sensor_file_name"] = _read_ustring(fid, 60) _move_to_next(fid, 4) - res['rdlen'] = _read_int(fid) + res["rdlen"] = _read_int(fid) fid.seek(CTF.FUNNY_POS, 0) - if res['rdlen'] > 0: - res['run_desc'] = _read_string(fid, res['rdlen']) + if res["rdlen"] > 0: + res["run_desc"] = _read_string(fid, res["rdlen"]) # Filters - res['nfilt'] = _read_int2(fid) - res['filters'] = list() - for k in range(res['nfilt']): - res['filters'].append(_read_filter(fid)) + res["nfilt"] = _read_int2(fid) + res["filters"] = list() + for k in range(res["nfilt"]): + res["filters"].append(_read_filter(fid)) # Channel information (names, then data) - res['ch_names'] = list() - for k in range(res['nchan']): + res["ch_names"] = list() + for k in range(res["nchan"]): ch_name = _read_string(fid, 32) - res['ch_names'].append(ch_name) - _coil_dt = np.dtype([ - ('pos', '>f8', 3), ('d0', '>f8'), - ('norm', '>f8', 3), ('d1', '>f8'), - ('turns', '>i2'), ('d2', '>i4'), ('d3', '>i2'), - ('area', '>f8')]) - _ch_dt = np.dtype([ - ('sensor_type_index', '>i2'), - ('original_run_no', '>i2'), - ('coil_type', '>i4'), - ('proper_gain', '>f8'), - ('qgain', '>f8'), - ('io_gain', '>f8'), - ('io_offset', '>f8'), - ('num_coils', '>i2'), - ('grad_order_no', '>i2'), ('d0', '>i4'), - ('coil', _coil_dt, CTF.CTFV_MAX_COILS), - ('head_coil', _coil_dt, CTF.CTFV_MAX_COILS)]) - chs = np.fromfile(fid, _ch_dt, res['nchan']) - for coil in (chs['coil'], chs['head_coil']): - coil['pos'] /= 100. - coil['area'] *= 1e-4 + res["ch_names"].append(ch_name) + _coil_dt = np.dtype( + [ + ("pos", ">f8", 3), + ("d0", ">f8"), + ("norm", ">f8", 3), + ("d1", ">f8"), + ("turns", ">i2"), + ("d2", ">i4"), + ("d3", ">i2"), + ("area", ">f8"), + ] + ) + _ch_dt = np.dtype( + [ + ("sensor_type_index", ">i2"), + ("original_run_no", ">i2"), + ("coil_type", ">i4"), + ("proper_gain", ">f8"), + ("qgain", ">f8"), + ("io_gain", ">f8"), + ("io_offset", ">f8"), + ("num_coils", ">i2"), + ("grad_order_no", ">i2"), + ("d0", ">i4"), + ("coil", _coil_dt, CTF.CTFV_MAX_COILS), + ("head_coil", _coil_dt, CTF.CTFV_MAX_COILS), + ] + ) + chs = np.fromfile(fid, _ch_dt, res["nchan"]) + for coil in (chs["coil"], chs["head_coil"]): + coil["pos"] /= 100.0 + coil["area"] *= 1e-4 # convert to dict chs = [dict(zip(chs.dtype.names, x)) for x in chs] - res['chs'] = chs - for k in range(res['nchan']): - res['chs'][k]['ch_name'] = res['ch_names'][k] + for ch in chs: + for key, val in ch.items(): + ch[key] = _auto_cast(val) + res["chs"] = chs + for k in range(res["nchan"]): + res["chs"][k]["ch_name"] = res["ch_names"][k] # The compensation coefficients _read_comp_coeff(fid, res) - logger.info(' res4 data read.') + logger.info(" res4 data read.") return res + + +def _auto_cast(x): + # Upcast scalars + if isinstance(x, np.ScalarType): + if x.dtype.kind == "i": + if x.dtype != np.int64: + x = x.astype(np.int64) + elif x.dtype.kind == "f": + if x.dtype != np.float64: + x = x.astype(np.float64) + return x diff --git a/mne/io/ctf/tests/__init__.py b/mne/io/ctf/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/ctf/tests/__init__.py +++ b/mne/io/ctf/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/ctf/tests/test_ctf.py b/mne/io/ctf/tests/test_ctf.py index 42de95134d4..448ea90baba 100644 --- a/mne/io/ctf/tests/test_ctf.py +++ b/mne/io/ctf/tests/test_ctf.py @@ -1,44 +1,51 @@ -# Authors: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import copy -from datetime import datetime, timezone import os -from os import path as op import shutil +from datetime import datetime, timezone +from os import path as op import numpy as np +import pytest from numpy import array_equal from numpy.testing import assert_allclose, assert_array_equal -import pytest import mne import mne.io.ctf.info -from mne import (pick_types, read_annotations, create_info, - events_from_annotations, make_forward_solution) -from mne.transforms import apply_trans -from mne.io import read_raw_fif, read_raw_ctf, RawArray -from mne.io.compensator import get_current_comp +from mne import ( + create_info, + events_from_annotations, + make_forward_solution, + pick_types, + read_annotations, +) +from mne._fiff.compensator import get_current_comp +from mne._fiff.constants import FIFF +from mne._fiff.pick import _picks_to_idx +from mne.datasets import brainstorm, spm_face, testing +from mne.io import RawArray, read_raw_ctf, read_raw_fif from mne.io.ctf.constants import CTF from mne.io.ctf.info import _convert_time from mne.io.tests.test_raw import _test_raw_reader from mne.tests.test_annotations import _assert_annotations_equal -from mne.utils import (_clean_names, catch_logging, _stamp_to_dt, - _record_warnings) -from mne.datasets import testing, spm_face, brainstorm -from mne.io.constants import FIFF - -ctf_dir = testing.data_path(download=False) / 'CTF' -ctf_fname_continuous = 'testdata_ctf.ds' -ctf_fname_1_trial = 'testdata_ctf_short.ds' -ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds' -ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds' -ctf_fname_somato = 'somMDYO-18av.ds' -ctf_fname_catch = 'catch-alp-good-f.ds' +from mne.transforms import apply_trans +from mne.utils import _clean_names, _record_warnings, _stamp_to_dt, catch_logging + +ctf_dir = testing.data_path(download=False) / "CTF" +ctf_fname_continuous = "testdata_ctf.ds" +ctf_fname_1_trial = "testdata_ctf_short.ds" +ctf_fname_2_trials = "testdata_ctf_pseudocontinuous.ds" +ctf_fname_discont = "testdata_ctf_short_discontinuous.ds" +ctf_fname_somato = "somMDYO-18av.ds" +ctf_fname_catch = "catch-alp-good-f.ds" somato_fname = op.join( - brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw', - 'subj001_somatosensory_20111109_01_AUX-f.ds' + brainstorm.bst_raw.data_path(download=False), + "MEG", + "bst_raw", + "subj001_somatosensory_20111109_01_AUX-f.ds", ) spm_path = spm_face.data_path(download=False) @@ -63,53 +70,60 @@ def test_read_ctf(tmp_path): """Test CTF reader.""" temp_dir = str(tmp_path) - out_fname = op.join(temp_dir, 'test_py_raw.fif') + out_fname = op.join(temp_dir, "test_py_raw.fif") # Create a dummy .eeg file so we can test our reading/application of it - os.mkdir(op.join(temp_dir, 'randpos')) - ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch) + os.mkdir(op.join(temp_dir, "randpos")) + ctf_eeg_fname = op.join(temp_dir, "randpos", ctf_fname_catch) shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname) - with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): + with pytest.warns(RuntimeWarning, match="RMSP .* changed to a MISC ch"): raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname) picks = pick_types(raw.info, meg=False, eeg=True) pos = np.random.RandomState(42).randn(len(picks), 3) - fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg') + fake_eeg_fname = op.join(ctf_eeg_fname, "catch-alp-good-f.eeg") # Create a bad file - with open(fake_eeg_fname, 'wb') as fid: - fid.write('foo\n'.encode('ascii')) + with open(fake_eeg_fname, "wb") as fid: + fid.write("foo\n".encode("ascii")) pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname) # Create a good file - with open(fake_eeg_fname, 'wb') as fid: + with open(fake_eeg_fname, "wb") as fid: for ii, ch_num in enumerate(picks): - args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple( - '%0.5f' % x for x in 100 * pos[ii]) # convert to cm - fid.write(('\t'.join(args) + '\n').encode('ascii')) - pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) - with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'): + args = ( + str(ch_num + 1), + raw.ch_names[ch_num], + ) + tuple(f"{x:0.5f}" for x in 100 * pos[ii]) # convert to cm + fid.write(("\t".join(args) + "\n").encode("ascii")) + pos_read_old = np.array([raw.info["chs"][p]["loc"][:3] for p in picks]) + with pytest.warns(RuntimeWarning, match="RMSP .* changed to a MISC ch"): raw = read_raw_ctf(ctf_eeg_fname) # read modified data - pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks]) - assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read, - rtol=1e-5, atol=1e-5) + pos_read = np.array([raw.info["chs"][p]["loc"][:3] for p in picks]) + assert_allclose( + apply_trans(raw.info["ctf_head_t"], pos), pos_read, rtol=1e-5, atol=1e-5 + ) assert (pos_read == pos_read_old).mean() < 0.1 - shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'), - op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif')) + shutil.copy( + op.join(ctf_dir, "catch-alp-good-f.ds_randpos_raw.fif"), + op.join(temp_dir, "randpos", "catch-alp-good-f.ds_raw.fif"), + ) # Create a version with no hc, starting out *with* EEG pos (error) - os.mkdir(op.join(temp_dir, 'nohc')) - ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch) + os.mkdir(op.join(temp_dir, "nohc")) + ctf_no_hc_fname = op.join(temp_dir, "no_hc", ctf_fname_catch) shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname) remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3])) - os.remove(remove_base + '.hc') - with pytest.warns(RuntimeWarning, match='MISC channel'): + os.remove(remove_base + ".hc") + with _record_warnings(), pytest.warns(RuntimeWarning, match="MISC channel"): pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname) - os.remove(remove_base + '.eeg') - shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'), - op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif')) + os.remove(remove_base + ".eeg") + shutil.copy( + op.join(ctf_dir, "catch-alp-good-f.ds_nohc_raw.fif"), + op.join(temp_dir, "no_hc", "catch-alp-good-f.ds_raw.fif"), + ) # All our files use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames] for fname in use_fnames: - raw_c = read_raw_fif(fname + '_raw.fif', preload=True) + raw_c = read_raw_fif(fname + "_raw.fif", preload=True) # sometimes matches "MISC channel" with _record_warnings(): raw = read_raw_ctf(fname) @@ -118,172 +132,221 @@ def test_read_ctf(tmp_path): assert_array_equal(raw.ch_names, raw_c.ch_names) assert_allclose(raw.times, raw_c.times) assert_allclose(raw._cals, raw_c._cals) - assert (raw.info['meas_id']['version'] == - raw_c.info['meas_id']['version'] + 1) - for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): - assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'], - rtol=1e-4, atol=1e-7) + assert raw.info["meas_id"]["version"] == raw_c.info["meas_id"]["version"] + 1 + for t in ("dev_head_t", "dev_ctf_t", "ctf_head_t"): + assert_allclose( + raw.info[t]["trans"], raw_c.info[t]["trans"], rtol=1e-4, atol=1e-7 + ) # XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set. # Consider adding meas_date to below checks once this is addressed in # MNE-C - for key in ('acq_pars', 'acq_stim', 'bads', - 'ch_names', 'custom_ref_applied', 'description', - 'events', 'experimenter', 'highpass', 'line_freq', - 'lowpass', 'nchan', 'proj_id', 'proj_name', - 'projs', 'sfreq', 'subject_info'): + for key in ( + "acq_pars", + "acq_stim", + "bads", + "ch_names", + "custom_ref_applied", + "description", + "events", + "experimenter", + "highpass", + "line_freq", + "lowpass", + "nchan", + "proj_id", + "proj_name", + "projs", + "sfreq", + "subject_info", + ): assert raw.info[key] == raw_c.info[key], key if op.basename(fname) not in single_trials: # We don't force buffer size to be smaller like MNE-C assert raw.buffer_size_sec == raw_c.buffer_size_sec - assert len(raw.info['comps']) == len(raw_c.info['comps']) - for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']): - for key in ('colcals', 'rowcals'): + assert len(raw.info["comps"]) == len(raw_c.info["comps"]) + for c1, c2 in zip(raw.info["comps"], raw_c.info["comps"]): + for key in ("colcals", "rowcals"): assert_allclose(c1[key], c2[key]) - assert c1['save_calibrated'] == c2['save_calibrated'] - for key in ('row_names', 'col_names', 'nrow', 'ncol'): - assert_array_equal(c1['data'][key], c2['data'][key]) - assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7, - rtol=1e-5) - assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'], - raw_c.info['hpi_results'][0]['coord_trans']['trans'], - rtol=1e-5, atol=1e-7) - assert len(raw.info['chs']) == len(raw_c.info['chs']) - for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])): - for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul', - 'range', 'coord_frame', 'coil_type', 'logno'): - if c1['ch_name'] == 'RMSP' and \ - 'catch-alp-good-f' in fname and \ - key in ('kind', 'unit', 'coord_frame', 'coil_type', - 'logno'): + assert c1["save_calibrated"] == c2["save_calibrated"] + for key in ("row_names", "col_names", "nrow", "ncol"): + assert_array_equal(c1["data"][key], c2["data"][key]) + assert_allclose( + c1["data"]["data"], c2["data"]["data"], atol=1e-7, rtol=1e-5 + ) + assert_allclose( + raw.info["hpi_results"][0]["coord_trans"]["trans"], + raw_c.info["hpi_results"][0]["coord_trans"]["trans"], + rtol=1e-5, + atol=1e-7, + ) + assert len(raw.info["chs"]) == len(raw_c.info["chs"]) + for ii, (c1, c2) in enumerate(zip(raw.info["chs"], raw_c.info["chs"])): + for key in ( + "kind", + "scanno", + "unit", + "ch_name", + "unit_mul", + "range", + "coord_frame", + "coil_type", + "logno", + ): + if ( + c1["ch_name"] == "RMSP" + and "catch-alp-good-f" in fname + and key in ("kind", "unit", "coord_frame", "coil_type", "logno") + ): continue # XXX see below... - if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG: + if key == "coil_type" and c1[key] == FIFF.FIFFV_COIL_EEG: # XXX MNE-C bug that this is not set assert c2[key] == FIFF.FIFFV_COIL_NONE continue assert c1[key] == c2[key], key - for key in ('cal',): - assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4, - err_msg='raw.info["chs"][%d][%s]' % (ii, key)) + for key in ("cal",): + assert_allclose( + c1[key], + c2[key], + atol=1e-6, + rtol=1e-4, + err_msg=f'raw.info["chs"][{ii}][{key}]', + ) # XXX 2016/02/24: fixed bug with normal computation that used # to exist, once mne-C tools are updated we should update our FIF # conversion files, then the slices can go away (and the check # can be combined with that for "cal") - for key in ('loc',): - if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname: + for key in ("loc",): + if c1["ch_name"] == "RMSP" and "catch-alp-good-f" in fname: continue - if (c2[key][:3] == 0.).all(): + if (c2[key][:3] == 0.0).all(): check = [np.nan] * 3 else: check = c2[key][:3] - assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4, - err_msg='raw.info["chs"][%d][%s]' % (ii, key)) - if (c2[key][3:] == 0.).all(): + assert_allclose( + c1[key][:3], + check, + atol=1e-6, + rtol=1e-4, + err_msg=f'raw.info["chs"][{ii}][{key}]', + ) + if (c2[key][3:] == 0.0).all(): check = [np.nan] * 3 else: check = c2[key][9:12] - assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4, - err_msg='raw.info["chs"][%d][%s]' % (ii, key)) + assert_allclose( + c1[key][9:12], + check, + atol=1e-6, + rtol=1e-4, + err_msg=f'raw.info["chs"][{ii}][{key}]', + ) # Make sure all digitization points are in the MNE head coord frame - for p in raw.info['dig']: - assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \ - 'dig points must be in FIFF.FIFFV_COORD_HEAD' + for p in raw.info["dig"]: + assert p["coord_frame"] == FIFF.FIFFV_COORD_HEAD, ( + "dig points must be in FIFF.FIFFV_COORD_HEAD" + ) - if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file + if fname.endswith("catch-alp-good-f.ds"): # omit points from .pos file with raw.info._unlock(): - raw.info['dig'] = raw.info['dig'][:-10] + raw.info["dig"] = raw.info["dig"][:-10] # XXX: Next test would fail because c-tools assign the fiducials from # CTF data as HPI. Should eventually clarify/unify with Matti. # assert_dig_allclose(raw.info, raw_c.info) # check data match - raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.) + raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.0) raw_read = read_raw_fif(out_fname) # so let's check tricky cases based on sample boundaries rng = np.random.RandomState(0) pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10] - bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec)) - assert bnd == raw._raw_extras[0]['block_size'] + bnd = int(round(raw.info["sfreq"] * raw.buffer_size_sec)) + assert bnd == raw._raw_extras[0]["block_size"] assert bnd == block_sizes[op.basename(fname)] - slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), - slice(3, 300), slice(None)) + slices = ( + slice(0, bnd), + slice(bnd - 1, bnd), + slice(3, bnd), + slice(3, 300), + slice(None), + ) if len(raw.times) >= 2 * bnd: # at least two complete blocks - slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1), - slice(0, bnd + 100)) + slices = slices + ( + slice(bnd, 2 * bnd), + slice(bnd, bnd + 1), + slice(0, bnd + 100), + ) for sl_time in slices: - assert_allclose(raw[pick_ch, sl_time][0], - raw_c[pick_ch, sl_time][0]) - assert_allclose(raw_read[pick_ch, sl_time][0], - raw_c[pick_ch, sl_time][0]) + assert_allclose(raw[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) + assert_allclose(raw_read[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) # all data / preload raw.load_data() assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15) # test bad segment annotations - if 'testdata_ctf_short.ds' in fname: - assert 'bad' in raw.annotations.description[0] + if "testdata_ctf_short.ds" in fname: + assert "bad" in raw.annotations.description[0] assert_allclose(raw.annotations.onset, [2.15]) assert_allclose(raw.annotations.duration, [0.0225]) - with pytest.raises(TypeError, match='path-like'): + with pytest.raises(TypeError, match="path-like"): read_raw_ctf(1) - with pytest.raises(FileNotFoundError, match='does not exist'): - read_raw_ctf(ctf_fname_continuous + 'foo.ds') + with pytest.raises(FileNotFoundError, match="does not exist"): + read_raw_ctf(ctf_fname_continuous + "foo.ds") # test ignoring of system clock - read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore') - with pytest.raises(ValueError, match='system_clock'): - read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'foo') + read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), "ignore") + with pytest.raises(ValueError, match="system_clock"): + read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), "foo") @testing.requires_testing_data def test_rawctf_clean_names(): """Test RawCTF _clean_names method.""" # read test data - with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'): + with pytest.warns(RuntimeWarning, match="ref channel RMSP did not"): raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch)) - raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch), - clean_names=True) + raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch), clean_names=True) test_channel_names = _clean_names(raw.ch_names) - test_info_comps = copy.deepcopy(raw.info['comps']) + test_info_comps = copy.deepcopy(raw.info["comps"]) # channel names should not be cleaned by default assert raw.ch_names != test_channel_names - chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']] + chs_ch_names = [ch["ch_name"] for ch in raw.info["chs"]] assert chs_ch_names != test_channel_names - for test_comp, comp in zip(test_info_comps, raw.info['comps']): - for key in ('row_names', 'col_names'): - assert not array_equal(_clean_names(test_comp['data'][key]), - comp['data'][key]) + for test_comp, comp in zip(test_info_comps, raw.info["comps"]): + for key in ("row_names", "col_names"): + assert not array_equal( + _clean_names(test_comp["data"][key]), comp["data"][key] + ) # channel names should be cleaned if clean_names=True assert raw_cleaned.ch_names == test_channel_names - for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names): - assert ch['ch_name'] == test_ch_name + for ch, test_ch_name in zip(raw_cleaned.info["chs"], test_channel_names): + assert ch["ch_name"] == test_ch_name - for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']): - for key in ('row_names', 'col_names'): - assert _clean_names(test_comp['data'][key]) == comp['data'][key] + for test_comp, comp in zip(test_info_comps, raw_cleaned.info["comps"]): + for key in ("row_names", "col_names"): + assert _clean_names(test_comp["data"][key]) == comp["data"][key] @spm_face.requires_spm_data def test_read_spm_ctf(): """Test CTF reader with omitted samples.""" - raw_fname = op.join(spm_path, 'MEG', 'spm', - 'SPM_CTF_MEG_example_faces1_3D.ds') + raw_fname = op.join(spm_path, "MEG", "spm", "SPM_CTF_MEG_example_faces1_3D.ds") raw = read_raw_ctf(raw_fname) extras = raw._raw_extras[0] - assert extras['n_samp'] == raw.n_times - assert extras['n_samp'] != extras['n_samp_tot'] + assert extras["n_samp"] == raw.n_times + assert extras["n_samp"] != extras["n_samp_tot"] # Test that LPA, nasion and RPA are correct. - coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']]) + coord_frames = np.array([d["coord_frame"] for d in raw.info["dig"]]) assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD) - cardinals = {d['ident']: d['r'] for d in raw.info['dig']} + cardinals = {d["ident"]: d["r"] for d in raw.info["dig"]} assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord assert cardinals[1][1] < cardinals[2][1] # y coord assert cardinals[3][1] < cardinals[2][1] # y coord @@ -292,75 +355,259 @@ def test_read_spm_ctf(): @testing.requires_testing_data -@pytest.mark.parametrize('comp_grade', [0, 1]) +@pytest.mark.parametrize("comp_grade", [0, 1]) def test_saving_picked(tmp_path, comp_grade): """Test saving picked CTF instances.""" temp_dir = str(tmp_path) - out_fname = op.join(temp_dir, 'test_py_raw.fif') + out_fname = op.join(temp_dir, "test_py_raw.fif") raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial)) - assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0)) + assert raw.info["meas_date"] == _stamp_to_dt((1367228160, 0)) raw.crop(0, 1).load_data() assert raw.compensation_grade == get_current_comp(raw.info) == 0 - assert len(raw.info['comps']) == 5 - pick_kwargs = dict(meg=True, ref_meg=False, verbose=True) + assert len(raw.info["comps"]) == 5 + picks = _picks_to_idx(raw.info, "meg", with_ref_meg=False) raw.apply_gradient_compensation(comp_grade) with catch_logging() as log: - raw_pick = raw.copy().pick_types(**pick_kwargs) - assert len(raw.info['comps']) == 5 - assert len(raw_pick.info['comps']) == 0 + raw_pick = raw.copy().pick(picks, verbose=True) + assert len(raw.info["comps"]) == 5 + assert len(raw_pick.info["comps"]) == 0 log = log.getvalue() - assert 'Removing 5 compensators' in log + assert "Removing 5 compensators" in log raw_pick.save(out_fname, overwrite=True) # should work raw2 = read_raw_fif(out_fname) - assert (raw_pick.ch_names == raw2.ch_names) + assert raw_pick.ch_names == raw2.ch_names assert_array_equal(raw_pick.times, raw2.times) - assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, - atol=1e-20) # atol is very small but > 0 + assert_allclose( + raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20 + ) # atol is very small but > 0 raw2 = read_raw_fif(out_fname, preload=True) - assert (raw_pick.ch_names == raw2.ch_names) + assert raw_pick.ch_names == raw2.ch_names assert_array_equal(raw_pick.times, raw2.times) - assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, - atol=1e-20) # atol is very small but > 0 + assert_allclose( + raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6, atol=1e-20 + ) # atol is very small but > 0 @brainstorm.bst_raw.requires_bstraw_data def test_read_ctf_annotations(): """Test reading CTF marker file.""" - EXPECTED_LATENCIES = np.array([ - 5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa - 22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa - 38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa - 56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa - 73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa - 90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa - 105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa - 121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa - 139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa - 156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa - 174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa - 192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa - 209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa - 226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa - 243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa - 260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa - 278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa - 295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa - 312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa - 329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa - 344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa - 361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa - 378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa - 396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa - 413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa - 429278, 431668 # noqa - ]) - 1 # Fieldtrip has 1 sample difference with MNE + EXPECTED_LATENCIES = ( + np.array( + [ + 5640, + 7950, + 9990, + 12253, + 14171, + 16557, + 18896, + 20846, # noqa + 22702, + 24990, + 26830, + 28974, + 30906, + 33077, + 34985, + 36907, # noqa + 38922, + 40760, + 42881, + 45222, + 47457, + 49618, + 51802, + 54227, # noqa + 56171, + 58274, + 60394, + 62375, + 64444, + 66767, + 68827, + 71109, # noqa + 73499, + 75807, + 78146, + 80415, + 82554, + 84508, + 86403, + 88426, # noqa + 90746, + 92893, + 94779, + 96822, + 98996, + 99001, + 100949, + 103325, # noqa + 105322, + 107678, + 109667, + 111844, + 113682, + 115817, + 117691, + 119663, # noqa + 121966, + 123831, + 126110, + 128490, + 130521, + 132808, + 135204, + 137210, # noqa + 139130, + 141390, + 143660, + 145748, + 147889, + 150205, + 152528, + 154646, # noqa + 156897, + 159191, + 161446, + 163722, + 166077, + 168467, + 170624, + 172519, # noqa + 174719, + 176886, + 179062, + 181405, + 183709, + 186034, + 188454, + 190330, # noqa + 192660, + 194682, + 196834, + 199161, + 201035, + 203008, + 204999, + 207409, # noqa + 209661, + 211895, + 213957, + 216005, + 218040, + 220178, + 222137, + 224305, # noqa + 226297, + 228654, + 230755, + 232909, + 235205, + 237373, + 239723, + 241762, # noqa + 243748, + 245762, + 247801, + 250055, + 251886, + 254252, + 256441, + 258354, # noqa + 260680, + 263026, + 265048, + 267073, + 269235, + 271556, + 273927, + 276197, # noqa + 278436, + 280536, + 282691, + 284933, + 287061, + 288936, + 290941, + 293183, # noqa + 295369, + 297729, + 299626, + 301546, + 303449, + 305548, + 307882, + 310124, # noqa + 312374, + 314509, + 316815, + 318789, + 320981, + 322879, + 324878, + 326959, # noqa + 329341, + 331200, + 331201, + 333469, + 335584, + 337984, + 340143, + 342034, # noqa + 344360, + 346309, + 348544, + 350970, + 353052, + 355227, + 357449, + 359603, # noqa + 361725, + 363676, + 365735, + 367799, + 369777, + 371904, + 373856, + 376204, # noqa + 378391, + 380800, + 382859, + 385161, + 387093, + 389434, + 391624, + 393785, # noqa + 396093, + 398214, + 400198, + 402166, + 404104, + 406047, + 408372, + 410686, # noqa + 413029, + 414975, + 416850, + 418797, + 420824, + 422959, + 425026, + 427215, # noqa + 429278, + 431668, # noqa + ] + ) + - 1 + ) # Fieldtrip has 1 sample difference with MNE raw = RawArray( data=np.empty((1, 432000), dtype=np.float64), - info=create_info(ch_names=1, sfreq=1200.0)) - raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date']) + info=create_info(ch_names=1, sfreq=1200.0), + ) + raw.set_meas_date(read_raw_ctf(somato_fname).info["meas_date"]) raw.set_annotations(read_annotations(somato_fname)) events, _ = events_from_annotations(raw) @@ -376,14 +623,44 @@ def test_read_ctf_annotations_smoke_test(): of whatever is in the MarkerFile.mrk. """ EXPECTED_ONSET = [ - 0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667, - 0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667, - 1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333, - 2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57, - 3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667, - 4.57, 4.7125, 4.85583333, 4.99833333 + 0.0, + 0.1425, + 0.285, + 0.42833333, + 0.57083333, + 0.71416667, + 0.85666667, + 0.99916667, + 1.1425, + 1.285, + 1.4275, + 1.57083333, + 1.71333333, + 1.85666667, + 1.99916667, + 2.14166667, + 2.285, + 2.4275, + 2.57083333, + 2.71333333, + 2.85583333, + 2.99916667, + 3.14166667, + 3.28416667, + 3.4275, + 3.57, + 3.71333333, + 3.85583333, + 3.99833333, + 4.14166667, + 4.28416667, + 4.42666667, + 4.57, + 4.7125, + 4.85583333, + 4.99833333, ] - fname = op.join(ctf_dir, 'testdata_ctf_mc.ds') + fname = op.join(ctf_dir, "testdata_ctf_mc.ds") annot = read_annotations(fname) assert_allclose(annot.onset, EXPECTED_ONSET) @@ -393,17 +670,17 @@ def test_read_ctf_annotations_smoke_test(): def _read_res4_mag_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) - for ch in res['chs']: - if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH: - ch['grad_order_no'] = 1 + for ch in res["chs"]: + if ch["sensor_type_index"] == CTF.CTFV_REF_MAG_CH: + ch["grad_order_no"] = 1 return res def _bad_res4_grad_comp(dsdir): res = mne.io.ctf.res4._read_res4(dsdir) - for ch in res['chs']: - if ch['sensor_type_index'] == CTF.CTFV_MEG_CH: - ch['grad_order_no'] = 1 + for ch in res["chs"]: + if ch["sensor_type_index"] == CTF.CTFV_MEG_CH: + ch["grad_order_no"] = 1 break return res @@ -412,11 +689,10 @@ def _bad_res4_grad_comp(dsdir): def test_missing_res4(tmp_path): """Test that res4 missing is handled gracefully.""" use_ds = tmp_path / ctf_fname_continuous - shutil.copytree(ctf_dir / ctf_fname_continuous, - tmp_path / ctf_fname_continuous) + shutil.copytree(ctf_dir / ctf_fname_continuous, tmp_path / ctf_fname_continuous) read_raw_ctf(use_ds) - os.remove(use_ds / (ctf_fname_continuous[:-2] + 'meg4')) - with pytest.raises(IOError, match='could not find the following'): + os.remove(use_ds / (ctf_fname_continuous[:-2] + "meg4")) + with pytest.raises(OSError, match="could not find the following"): read_raw_ctf(use_ds) @@ -426,33 +702,35 @@ def test_read_ctf_mag_bad_comp(tmp_path, monkeypatch): path = op.join(ctf_dir, ctf_fname_continuous) raw_orig = read_raw_ctf(path) assert raw_orig.compensation_grade == 0 - monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp) + monkeypatch.setattr(mne.io.ctf.ctf, "_read_res4", _read_res4_mag_comp) raw_mag_comp = read_raw_ctf(path) assert raw_mag_comp.compensation_grade == 0 sphere = mne.make_sphere_model() - src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere) - assert src[0]['nuse'] == 26 + src = mne.setup_volume_source_space(pos=50.0, exclude=5.0, bem=sphere) + assert src[0]["nuse"] == 26 for grade in (0, 1): raw_orig.apply_gradient_compensation(grade) raw_mag_comp.apply_gradient_compensation(grade) args = (None, src, sphere, True, False) fwd_orig = make_forward_solution(raw_orig.info, *args) fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args) - assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data']) - monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp) - with pytest.raises(RuntimeError, match='inconsistent compensation grade'): + assert_allclose(fwd_orig["sol"]["data"], fwd_mag_comp["sol"]["data"]) + monkeypatch.setattr(mne.io.ctf.ctf, "_read_res4", _bad_res4_grad_comp) + with pytest.raises(RuntimeError, match="inconsistent compensation grade"): read_raw_ctf(path) @testing.requires_testing_data def test_invalid_meas_date(monkeypatch): """Test handling of invalid meas_date.""" + def _convert_time_bad(date_str, time_str): - return _convert_time('', '') - monkeypatch.setattr(mne.io.ctf.info, '_convert_time', _convert_time_bad) + return _convert_time("", "") + + monkeypatch.setattr(mne.io.ctf.info, "_convert_time", _convert_time_bad) with catch_logging() as log: raw = read_raw_ctf(ctf_dir / ctf_fname_continuous, verbose=True) log = log.getvalue() - assert 'No date or time found' in log - assert raw.info['meas_date'] == datetime.fromtimestamp(0, tz=timezone.utc) + assert "No date or time found" in log + assert raw.info["meas_date"] == datetime.fromtimestamp(0, tz=timezone.utc) diff --git a/mne/io/ctf/trans.py b/mne/io/ctf/trans.py index 0497518a314..3e74063fa40 100644 --- a/mne/io/ctf/trans.py +++ b/mne/io/ctf/trans.py @@ -1,23 +1,30 @@ """Create coordinate transforms.""" -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from ...transforms import (combine_transforms, invert_transform, Transform, - _quat_to_affine, _fit_matched_points, apply_trans, - get_ras_to_neuromag_trans) +from ..._fiff.constants import FIFF +from ...transforms import ( + Transform, + _fit_matched_points, + _quat_to_affine, + apply_trans, + combine_transforms, + get_ras_to_neuromag_trans, + invert_transform, +) from ...utils import logger -from ..constants import FIFF from .constants import CTF def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa): """Make a transform from cardinal landmarks.""" - return invert_transform(Transform( - to, fro, get_ras_to_neuromag_trans(r_nasion, r_lpa, r_rpa))) + return invert_transform( + Transform(to, fro, get_ras_to_neuromag_trans(r_nasion, r_lpa, r_rpa)) + ) def _quaternion_align(from_frame, to_frame, from_pts, to_pts, diff_tol=1e-4): @@ -26,17 +33,19 @@ def _quaternion_align(from_frame, to_frame, from_pts, to_pts, diff_tol=1e-4): trans = _quat_to_affine(_fit_matched_points(from_pts, to_pts)[0]) # Test the transformation and print the results - logger.info(' Quaternion matching (desired vs. transformed):') + logger.info(" Quaternion matching (desired vs. transformed):") for fro, to in zip(from_pts, to_pts): rr = apply_trans(trans, fro) diff = np.linalg.norm(to - rr) - logger.info(' %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm ' - '(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm' - % (tuple(1000 * to) + tuple(1000 * rr) + - tuple(1000 * fro) + (1000 * diff,))) + logger.info( + " %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm " + "(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm" + % (tuple(1000 * to) + tuple(1000 * rr) + tuple(1000 * fro) + (1000 * diff,)) + ) if diff > diff_tol: - raise RuntimeError('Something is wrong: quaternion matching did ' - 'not work (see above)') + raise RuntimeError( + "Something is wrong: quaternion matching did not work (see above)" + ) return Transform(from_frame, to_frame, trans) @@ -46,19 +55,18 @@ def _make_ctf_coord_trans_set(res4, coils): lpa = rpa = nas = T1 = T2 = T3 = T5 = None if coils is not None: for p in coils: - if p['valid'] and (p['coord_frame'] == - FIFF.FIFFV_MNE_COORD_CTF_HEAD): - if lpa is None and p['kind'] == CTF.CTFV_COIL_LPA: + if p["valid"] and (p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD): + if lpa is None and p["kind"] == CTF.CTFV_COIL_LPA: lpa = p - elif rpa is None and p['kind'] == CTF.CTFV_COIL_RPA: + elif rpa is None and p["kind"] == CTF.CTFV_COIL_RPA: rpa = p - elif nas is None and p['kind'] == CTF.CTFV_COIL_NAS: + elif nas is None and p["kind"] == CTF.CTFV_COIL_NAS: nas = p if lpa is None or rpa is None or nas is None: - raise RuntimeError('Some of the mandatory HPI device-coordinate ' - 'info was not there.') - t = _make_transform_card('head', 'ctf_head', - lpa['r'], nas['r'], rpa['r']) + raise RuntimeError( + "Some of the mandatory HPI device-coordinate info was not there." + ) + t = _make_transform_card("head", "ctf_head", lpa["r"], nas["r"], rpa["r"]) T3 = invert_transform(t) # CTF device -> Neuromag device @@ -67,48 +75,58 @@ def _make_ctf_coord_trans_set(res4, coils): # in z direction to get a coordinate system comparable to the Neuromag one # R = np.eye(4) - R[:3, 3] = [0., 0., 0.19] - val = 0.5 * np.sqrt(2.) + R[:3, 3] = [0.0, 0.0, 0.19] + val = 0.5 * np.sqrt(2.0) R[0, 0] = val R[0, 1] = -val R[1, 0] = val R[1, 1] = val - T4 = Transform('ctf_meg', 'meg', R) + T4 = Transform("ctf_meg", "meg", R) # CTF device -> CTF head # We need to make the implicit transform explicit! h_pts = dict() d_pts = dict() - kinds = (CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS, - CTF.CTFV_COIL_SPARE) + kinds = ( + CTF.CTFV_COIL_LPA, + CTF.CTFV_COIL_RPA, + CTF.CTFV_COIL_NAS, + CTF.CTFV_COIL_SPARE, + ) if coils is not None: for p in coils: - if p['valid']: - if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + if p["valid"]: + if p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: for kind in kinds: - if kind not in h_pts and p['kind'] == kind: - h_pts[kind] = p['r'] - elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: + if kind not in h_pts and p["kind"] == kind: + h_pts[kind] = p["r"] + elif p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: for kind in kinds: - if kind not in d_pts and p['kind'] == kind: - d_pts[kind] = p['r'] + if kind not in d_pts and p["kind"] == kind: + d_pts[kind] = p["r"] if any(kind not in h_pts for kind in kinds[:-1]): - raise RuntimeError('Some of the mandatory HPI device-coordinate ' - 'info was not there.') + raise RuntimeError( + "Some of the mandatory HPI device-coordinate info was not there." + ) if any(kind not in d_pts for kind in kinds[:-1]): - raise RuntimeError('Some of the mandatory HPI head-coordinate ' - 'info was not there.') - use_kinds = [kind for kind in kinds - if (kind in h_pts and kind in d_pts)] + raise RuntimeError( + "Some of the mandatory HPI head-coordinate info was not there." + ) + use_kinds = [kind for kind in kinds if (kind in h_pts and kind in d_pts)] r_head = np.array([h_pts[kind] for kind in use_kinds]) r_dev = np.array([d_pts[kind] for kind in use_kinds]) - T2 = _quaternion_align('ctf_meg', 'ctf_head', r_dev, r_head) + T2 = _quaternion_align("ctf_meg", "ctf_head", r_dev, r_head) # The final missing transform if T3 is not None and T2 is not None: - T5 = combine_transforms(T2, T3, 'ctf_meg', 'head') - T1 = combine_transforms(invert_transform(T4), T5, 'meg', 'head') - s = dict(t_dev_head=T1, t_ctf_dev_ctf_head=T2, t_ctf_head_head=T3, - t_ctf_dev_dev=T4, t_ctf_dev_head=T5) - logger.info(' Coordinate transformations established.') + T5 = combine_transforms(T2, T3, "ctf_meg", "head") + T1 = combine_transforms(invert_transform(T4), T5, "meg", "head") + s = dict( + t_dev_head=T1, + t_ctf_dev_ctf_head=T2, + t_ctf_head_head=T3, + t_ctf_dev_dev=T4, + t_ctf_dev_head=T5, + ) + logger.info(" Coordinate transformations established.") return s diff --git a/mne/io/ctf_comp.py b/mne/io/ctf_comp.py deleted file mode 100644 index 61fca9117f7..00000000000 --- a/mne/io/ctf_comp.py +++ /dev/null @@ -1,186 +0,0 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Denis Engemann -# -# License: BSD-3-Clause - -from copy import deepcopy - -import numpy as np - -from .constants import FIFF -from .tag import read_tag -from .tree import dir_tree_find -from .write import start_block, end_block, write_int -from .matrix import write_named_matrix, _read_named_matrix - -from ..utils import logger, verbose, _pl - - -def _add_kind(one): - """Convert CTF kind to MNE kind.""" - if one['ctfkind'] == int('47314252', 16): - one['kind'] = 1 - elif one['ctfkind'] == int('47324252', 16): - one['kind'] = 2 - elif one['ctfkind'] == int('47334252', 16): - one['kind'] = 3 - else: - one['kind'] = int(one['ctfkind']) - - -def _calibrate_comp(comp, chs, row_names, col_names, - mult_keys=('range', 'cal'), flip=False): - """Get row and column cals.""" - ch_names = [c['ch_name'] for c in chs] - row_cals = np.zeros(len(row_names)) - col_cals = np.zeros(len(col_names)) - for names, cals, inv in zip((row_names, col_names), (row_cals, col_cals), - (False, True)): - for ii in range(len(cals)): - p = ch_names.count(names[ii]) - if p != 1: - raise RuntimeError('Channel %s does not appear exactly once ' - 'in data, found %d instance%s' - % (names[ii], p, _pl(p))) - idx = ch_names.index(names[ii]) - val = chs[idx][mult_keys[0]] * chs[idx][mult_keys[1]] - val = float(1. / val) if inv else float(val) - val = 1. / val if flip else val - cals[ii] = val - comp['rowcals'] = row_cals - comp['colcals'] = col_cals - comp['data']['data'] = (row_cals[:, None] * - comp['data']['data'] * col_cals[None, :]) - - -@verbose -def read_ctf_comp(fid, node, chs, verbose=None): - """Read the CTF software compensation data from the given node. - - Parameters - ---------- - fid : file - The file descriptor. - node : dict - The node in the FIF tree. - chs : list - The list of channels from info['chs'] to match with - compensators that are read. - %(verbose)s - - Returns - ------- - compdata : list - The compensation data - """ - return _read_ctf_comp(fid, node, chs, None) - - -def _read_ctf_comp(fid, node, chs, ch_names_mapping): - """Read the CTF software compensation data from the given node. - - Parameters - ---------- - fid : file - The file descriptor. - node : dict - The node in the FIF tree. - chs : list - The list of channels from info['chs'] to match with - compensators that are read. - ch_names_mapping : dict | None - The channel renaming to use. - %(verbose)s - - Returns - ------- - compdata : list - The compensation data - """ - from .meas_info import _rename_comps - ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping - compdata = [] - comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA) - - for node in comps: - # Read the data we need - mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA) - for p in range(node['nent']): - kind = node['directory'][p].kind - pos = node['directory'][p].pos - if kind == FIFF.FIFF_MNE_CTF_COMP_KIND: - tag = read_tag(fid, pos) - break - else: - raise Exception('Compensation type not found') - - # Get the compensation kind and map it to a simple number - one = dict(ctfkind=tag.data) - del tag - _add_kind(one) - for p in range(node['nent']): - kind = node['directory'][p].kind - pos = node['directory'][p].pos - if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED: - tag = read_tag(fid, pos) - calibrated = tag.data - break - else: - calibrated = False - - one['save_calibrated'] = bool(calibrated) - one['data'] = mat - _rename_comps([one], ch_names_mapping) - if not calibrated: - # Calibrate... - _calibrate_comp(one, chs, mat['row_names'], mat['col_names']) - else: - one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float64) - one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float64) - - compdata.append(one) - - if len(compdata) > 0: - logger.info(' Read %d compensation matrices' % len(compdata)) - - return compdata - - -############################################################################### -# Writing - -def write_ctf_comp(fid, comps): - """Write the CTF compensation data into a fif file. - - Parameters - ---------- - fid : file - The open FIF file descriptor - - comps : list - The compensation data to write - """ - if len(comps) <= 0: - return - - # This is very simple in fact - start_block(fid, FIFF.FIFFB_MNE_CTF_COMP) - for comp in comps: - start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) - # Write the compensation kind - write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp['ctfkind']) - if comp.get('save_calibrated', False): - write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED, - comp['save_calibrated']) - - if not comp.get('save_calibrated', True): - # Undo calibration - comp = deepcopy(comp) - data = ((1. / comp['rowcals'][:, None]) * comp['data']['data'] * - (1. / comp['colcals'][None, :])) - comp['data']['data'] = data - write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp['data']) - end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) - - end_block(fid, FIFF.FIFFB_MNE_CTF_COMP) diff --git a/mne/io/curry/__init__.py b/mne/io/curry/__init__.py index ae514366e2b..5b2e89b6798 100644 --- a/mne/io/curry/__init__.py +++ b/mne/io/curry/__init__.py @@ -1,7 +1,8 @@ """Reader for CURRY data.""" -# Author: Dirk Gütlin -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .curry import read_raw_curry +from .curry import read_impedances_curry diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 6c1e5d79821..c1ffcad96fb 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -1,486 +1,728 @@ -# -*- coding: UTF-8 -*- -# -# Authors: Dirk Gütlin -# # +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from collections import namedtuple +import re from datetime import datetime, timezone -import os.path as op from pathlib import Path -import re import numpy as np -from .._digitization import _make_dig_points +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.tag import _coil_trans_to_loc +from ..._fiff.utils import _mult_cal_one, _read_segments_file +from ...annotations import annotations_from_events +from ...epochs import Epochs +from ...surface import _normal_orth +from ...transforms import ( + Transform, + _angle_between_quats, + apply_trans, + combine_transforms, + get_ras_to_neuromag_trans, + invert_transform, + rot_to_quat, +) +from ...utils import ( + _on_missing, + _soft_import, + catch_logging, + logger, + verbose, + warn, +) from ..base import BaseRaw -from ..meas_info import create_info -from ..tag import _coil_trans_to_loc -from ..utils import _read_segments_file, _mult_cal_one -from ..constants import FIFF from ..ctf.trans import _quaternion_align -from ...surface import _normal_orth -from ...transforms import (apply_trans, Transform, get_ras_to_neuromag_trans, - combine_transforms, invert_transform, - _angle_between_quats, rot_to_quat) -from ...utils import check_fname, logger, verbose, _check_fname -from ...annotations import Annotations - -FILE_EXTENSIONS = { - "Curry 7": { - "info": ".dap", - "data": ".dat", - "labels": ".rs3", - "events_cef": ".cef", - "events_ceo": ".ceo", - "hpi": ".hpi", - }, - "Curry 8": { - "info": ".cdt.dpa", - "data": ".cdt", - "labels": ".cdt.dpa", - "events_cef": ".cdt.cef", - "events_ceo": ".cdt.ceo", - "hpi": ".cdt.hpi", - } -} -CHANTYPES = {"meg": "_MAG1", "eeg": "", "misc": "_OTHERS"} -FIFFV_CHANTYPES = {"meg": FIFF.FIFFV_MEG_CH, "eeg": FIFF.FIFFV_EEG_CH, - "misc": FIFF.FIFFV_MISC_CH} -FIFFV_COILTYPES = {"meg": FIFF.FIFFV_COIL_CTF_GRAD, "eeg": FIFF.FIFFV_COIL_EEG, - "misc": FIFF.FIFFV_COIL_NONE} -SI_UNITS = dict(V=FIFF.FIFF_UNIT_V, T=FIFF.FIFF_UNIT_T) -SI_UNIT_SCALE = dict(c=1e-2, m=1e-3, u=1e-6, µ=1e-6, n=1e-9, p=1e-12, f=1e-15) -CurryParameters = namedtuple('CurryParameters', - 'n_samples, sfreq, is_ascii, unit_dict, ' - 'n_chans, dt_start, chanidx_in_file') +CURRY_SUFFIX_DATA = [".cdt", ".dat"] +CURRY_SUFFIX_HDR = [".cdt.dpa", ".cdt.dpo", ".dap"] +CURRY_SUFFIX_LABELS = [".cdt.dpa", ".cdt.dpo", ".rs3"] -def _get_curry_version(file_extension): +def _get_curry_version(fname): """Check out the curry file version.""" - return "Curry 8" if "cdt" in file_extension else "Curry 7" - - -def _get_curry_file_structure(fname, required=()): - """Store paths to a dict and check for required files.""" - _msg = "The following required files cannot be found: {0}.\nPlease make " \ - "sure all required files are located in the same directory as {1}." - fname = Path(_check_fname(fname, 'read', True, 'fname')) - - # we don't use os.path.splitext to also handle extensions like .cdt.dpa - # this won't handle a dot in the filename, but it should handle it in - # the parent directories - fname_base = fname.name.split('.', maxsplit=1)[0] - ext = fname.name[len(fname_base):] - fname_base = str(fname) - fname_base = fname_base[:len(fname_base) - len(ext)] - del fname - version = _get_curry_version(ext) - my_curry = dict() - for key in ('info', 'data', 'labels', 'events_cef', 'events_ceo', 'hpi'): - fname = fname_base + FILE_EXTENSIONS[version][key] - if op.isfile(fname): - _key = 'events' if key.startswith('events') else key - my_curry[_key] = fname - - missing = [field for field in required if field not in my_curry] - if missing: - raise FileNotFoundError(_msg.format(np.unique(missing), fname)) - - return my_curry - - -def _read_curry_lines(fname, regex_list): - """Read through the lines of a curry parameter files and save data. + fname_hdr = _check_curry_header_filename(_check_curry_filename(fname)) + content_hdr = fname_hdr.read_text() + return ( + "Curry 7" + if ".dap" in str(fname_hdr) + else "Curry 8" + if re.compile(r"FileVersion\s*=\s*[0-9]+") + .search(content_hdr) + .group(0) + .split()[-1][0] + == "8" + else "Curry 9" + if re.compile(r"FileVersion\s*=\s*[0-9]+") + .search(content_hdr) + .group(0) + .split()[-1][0] + == "9" + else None + ) + + +def _check_curry_filename(fname): + fname_in = Path(fname).expanduser() + fname_out = None + # try suffixes + if fname_in.suffix in CURRY_SUFFIX_DATA: + fname_out = fname_in + elif ( + fname_in.with_suffix("").exists() + and fname_in.with_suffix("").suffix in CURRY_SUFFIX_DATA + ): + fname_out = fname_in.with_suffix("") + else: + for data_suff in CURRY_SUFFIX_DATA: + if fname_in.with_suffix(data_suff).exists(): + fname_out = fname_in.with_suffix(data_suff) + break + # final check + if not fname_out or not fname_out.exists(): + raise FileNotFoundError( + f"no curry data file found (.dat or .cdt), checked {fname_out or fname_in}" + ) + return fname_out + + +def _check_curry_header_filename(fname): + fname_in = Path(fname) + fname_hdr = None + # try suffixes + for hdr_suff in CURRY_SUFFIX_HDR: + if fname_in.with_suffix(hdr_suff).exists(): + fname_hdr = fname_in.with_suffix(hdr_suff) + break + # final check + if not fname_hdr or not fname_in.exists(): + raise FileNotFoundError( + f"no corresponding header file found {CURRY_SUFFIX_HDR}" + ) + return fname_hdr + + +def _check_curry_labels_filename(fname): + fname_in = Path(fname) + fname_labels = None + # try suffixes + for hdr_suff in CURRY_SUFFIX_LABELS: + if fname_in.with_suffix(hdr_suff).exists(): + fname_labels = fname_in.with_suffix(hdr_suff) + break + # final check + if not fname_labels or not fname_in.exists(): + raise FileNotFoundError( + f"no corresponding labels file found {CURRY_SUFFIX_HDR}" + ) + return fname_labels + + +def _check_curry_sfreq_consistency(fname_hdr): + content_hdr = fname_hdr.read_text() + stime = float( + re.compile(r"SampleTimeUsec\s*=\s*.+").search(content_hdr).group(0).split()[-1] + ) + sfreq = float( + re.compile(r"SampleFreqHz\s*=\s*.+").search(content_hdr).group(0).split()[-1] + ) + if stime == 0: + raise ValueError("Header file indicates a sampling interval of 0µs.") + if not np.isclose(1e6 / stime, sfreq): + warn( + f"Sample distance ({stime}µs) and sample frequency ({sfreq}Hz) in header " + "file do not match! sfreq will be derived from sample distance." + ) + + +def _get_curry_meas_info(fname): + # Note that the time zone information is not stored in the Curry info + # file, and it seems the start time info is in the local timezone + # of the acquisition system (which is unknown); therefore, just set + # the timezone to be UTC. If the user knows otherwise, they can + # change it later. (Some Curry files might include StartOffsetUTCMin, + # but its presence is unpredictable, so we won't rely on it.) + fname_hdr = _check_curry_header_filename(fname) + content_hdr = fname_hdr.read_text() + + # read meas_date + meas_date = [ + int(re.compile(rf"{v}\s*=\s*-?\d+").search(content_hdr).group(0).split()[-1]) + for v in [ + "StartYear", + "StartMonth", + "StartDay", + "StartHour", + "StartMin", + "StartSec", + "StartMillisec", + ] + ] + try: + meas_date = datetime( + *meas_date[:-1], + meas_date[-1] * 1000, # -> microseconds + timezone.utc, + ) + except Exception: + meas_date = None + + # read datatype + byteorder = ( + re.compile(r"DataByteOrder\s*=\s*[A-Z]+") + .search(content_hdr) + .group() + .split()[-1] + ) + is_ascii = byteorder == "ASCII" + + # amplifier info + # TODO - PRIVACY + # seems like there can be identifiable information (serial numbers, dates). + # MNE anonymization functions only overwrite "serial" and "site", though + # TODO - FUTURE ENHANCEMENT + # # there can be filter details in AmplifierInfo, too + amp_info = ( + re.compile(r"AmplifierInfo\s*=.*\n") + .search(content_hdr) + .group() + .strip("\n") + .split("= ")[-1] + .strip() + ) + + device_info = ( + dict(serial=amp_info) + if amp_info != "" + else None # model="", serial="", site="" + ) + + return meas_date, is_ascii, device_info + + +def _get_curry_recording_type(fname): + _soft_import("curryreader", "read recording modality") + + import curryreader + + epochinfo = curryreader.read(str(fname), plotdata=0, verbosity=1)["epochinfo"] + if epochinfo.size == 0: + return "raw" + else: + n_average = epochinfo[:, 0] + if (n_average == 1).all(): + return "epochs" + else: + return "evoked" + + +def _get_curry_epoch_info(fname): + _soft_import("curryreader", "read epoch info") + _soft_import("pandas", "dataframe integration") + + import curryreader + import pandas as pd + + # use curry-python-reader + currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) + + # get epoch info + sfreq = currydata["info"]["samplingfreq"] + n_samples = currydata["info"]["samples"] + n_epochs = len(currydata["epochlabels"]) + epochinfo = currydata["epochinfo"] + epochtypes = epochinfo[:, 2].astype(int).tolist() + epochlabels = currydata["epochlabels"] + epochmetainfo = pd.DataFrame( + epochinfo[:, -4:], columns=["accept", "correct", "response", "response time"] + ) + # create mne events + events = np.array( + [[i * n_samples for i in range(n_epochs)], [0] * n_epochs, epochtypes] + ).T + event_id = dict(zip(epochlabels, epochtypes)) + return dict( + events=events, + event_id=event_id, + tmin=0.0, + tmax=(n_samples - 1) / sfreq, + baseline=None, + detrend=None, + verbose=False, + metadata=epochmetainfo, + reject_by_annotation=False, + reject=None, + ) + + +def _get_curry_meg_normals(fname): + fname_lbl = _check_curry_labels_filename(fname) + normals_str = fname_lbl.read_text().split("\n") + # i_start, i_stop = [ + # i + # for i, ll in enumerate(normals_str) + # if ("NORMALS" in ll and "START_LIST" in ll) + # or ("NORMALS" in ll and "END_LIST" in ll) + # ] + # normals_str = [nn.split("\t") for nn in normals_str[i_start + 1 : i_stop]] + i_list = [ + i + for i, ll in enumerate(normals_str) + if ("NORMALS" in ll and "START_LIST" in ll) + or ("NORMALS" in ll and "END_LIST" in ll) + ] + assert len(i_list) % 2 == 0 + i_start_list = i_list[::2] + i_stop_list = i_list[1::2] + normals_str = [ + nn.split("\t") + for i_start, i_stop in zip(i_start_list, i_stop_list) + for nn in normals_str[i_start + 1 : i_stop] + ] + return np.array([[float(nnn.strip()) for nnn in nn] for nn in normals_str]) + + +def _extract_curry_info(fname): + _soft_import("curryreader", "read file header") + + import curryreader + + # check if sfreq values make sense + fname_hdr = _check_curry_header_filename(fname) + _check_curry_sfreq_consistency(fname_hdr) + + # use curry-python-reader + currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) + + # basic info + sfreq = currydata["info"]["samplingfreq"] + n_samples = currydata["info"]["samples"] + if n_samples != currydata["data"].shape[0]: # normal in epoched data + n_samples = currydata["data"].shape[0] + if _get_curry_recording_type(fname) == "raw": + warn( + "sample count from header doesn't match actual data! " + "file corrupted? will use data shape" + ) + + # channel information + n_ch = currydata["info"]["channels"] + ch_names = currydata["labels"] + ch_pos = currydata["sensorpos"] + landmarks = currydata["landmarks"] + if not isinstance(landmarks, np.ndarray): + landmarks = np.array(landmarks) + landmarkslabels = currydata["landmarkslabels"] + hpimatrix = currydata["hpimatrix"] + if isinstance(currydata["hpimatrix"], np.ndarray) and hpimatrix.ndim == 1: + hpimatrix = hpimatrix[np.newaxis, :] + + # data + orig_format = "int" + # curryreader.py always reads float32, but this is probably just numpy. + # legacy MNE code states int. + + # events + events = currydata["events"] + annotations = currydata["annotations"] + assert len(annotations) == len(events) + if len(events) > 0: + event_desc = dict() + for k, v in zip(events[:, 1], annotations): + if int(k) not in event_desc.keys(): + event_desc[int(k)] = v.strip() if (v.strip() != "") else str(int(k)) + else: + event_desc = None + + # impedance measurements + # moved to standalone def; see read_impedances_curry + # impedances = currydata["impedances"] + + # get other essential info not provided by curryreader + # channel types and units + ch_types, units = [], [] + ch_groups = fname_hdr.read_text().split("DEVICE_PARAMETERS")[1::2] + for ch_group in ch_groups: + ch_group = re.compile(r"\s+").sub(" ", ch_group).strip() + groupid = ch_group.split()[0] + unit = ch_group.split("DataUnit = ")[1].split()[0] + n_ch_group = int(ch_group.split("NumChanThisGroup = ")[1].split()[0]) + ch_type = ( + "mag" if ("MAG" in groupid) else "misc" if ("OTHER" in groupid) else "eeg" + ) + # combine info + ch_types += [ch_type] * n_ch_group + units += [unit] * n_ch_group + + # This for Git issue #8391. In some cases, the 'labels' (.rs3 file will + # list channels that are not actually saved in the datafile (such as the + # 'Ref' channel). These channels are denoted in the 'info' (.dap) file + # in the CHAN_IN_FILE section with a '0' as their index. + # + # current curryreader cannot cope with this - loads the list of channels solely + # based on their order, so can be false. fix it here! + if not len(ch_types) == len(units) == len(ch_names) == n_ch: + # read relevant info + fname_lbl = _check_curry_labels_filename(fname) + lbl = fname_lbl.read_text().split("START_LIST") + ch_names_full = [] + for i in range(1, len(lbl)): + if "LABELS" in lbl[i - 1].split()[-1]: + for ll in lbl[i].split("\n")[1:]: + if "LABELS" not in ll: + ch_names_full.append(ll.strip()) + else: + break + hdr = fname_hdr.read_text().split("START_LIST") + chaninfile_full = [] + for i in range(1, len(hdr)): + if "CHAN_IN_FILE" in hdr[i - 1].split()[-1]: + for ll in hdr[i].split("\n")[1:]: + if "CHAN_IN_FILE" not in ll: + chaninfile_full.append(int(ll.strip())) + else: + break + # drop channels with chan_in_file==0, account for order + i_drop = [i for i, ich in enumerate(chaninfile_full) if ich == 0] + ch_names = [ + ch_names_full[i] for i in np.argsort(chaninfile_full) if i not in i_drop + ] + ch_pos = np.array( + [ + ch_pos[i] + for i in np.argsort(chaninfile_full) + if (i not in i_drop) and (i < len(ch_pos)) + ] + ) + ch_types = [ch_types[i] for i in np.argsort(chaninfile_full) if i not in i_drop] + units = [units[i] for i in np.argsort(chaninfile_full) if i not in i_drop] + + assert len(ch_types) == len(units) == len(ch_names) == n_ch + assert len(ch_pos) == ch_types.count("eeg") + ch_types.count("mag") + + # finetune channel types (e.g. stim, eog etc might be identified by name) + # TODO - FUTURE ENHANCEMENT + + # scale data to SI units + orig_units = dict(zip(ch_names, units)) + cals = [ + 1.0 / 1e15 if (u == "fT") else 1.0 / 1e6 if (u == "uV") else 1.0 for u in units + ] + + return ( + sfreq, + n_samples, + ch_names, + ch_types, + ch_pos, + landmarks, + landmarkslabels, + hpimatrix, + events, + event_desc, + orig_format, + orig_units, + cals, + ) + + +def _read_annotations_curry(fname, sfreq="auto"): + r"""Read events from Curry event files. Parameters ---------- fname : path-like - Path to a curry file. - regex_list : list of str - A list of strings or regular expressions to search within the file. - Each element `regex` in `regex_list` must be formulated so that - `regex + " START_LIST"` initiates the start and `regex + " END_LIST"` - initiates the end of the elements that should be saved. + The filename. + sfreq : float | 'auto' + The sampling frequency in the file. If set to 'auto' then the + ``sfreq`` is taken from the fileheader. Returns ------- - data_dict : dict - A dictionary containing the extracted data. For each element `regex` - in `regex_list` a dictionary key `data_dict[regex]` is created, which - contains a list of the according data. - + annot : instance of Annotations | None + The annotations. """ - save_lines = {} - data_dict = {} - - for regex in regex_list: - save_lines[regex] = False - data_dict[regex] = [] - - with open(fname) as fid: - for line in fid: - for regex in regex_list: - if re.match(regex + " END_LIST", line): - save_lines[regex] = False - - if save_lines[regex] and line != "\n": - result = line.replace("\n", "") - if "\t" in result: - result = result.split("\t") - data_dict[regex].append(result) - - if re.match(regex + " START_LIST", line): - save_lines[regex] = True - - return data_dict - - -def _read_curry_parameters(fname): - """Extract Curry params from a Curry info file.""" - _msg_match = "The sampling frequency and the time steps extracted from " \ - "the parameter file do not match." - _msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}" - - var_names = ['NumSamples', 'SampleFreqHz', - 'DataFormat', 'SampleTimeUsec', - 'NumChannels', - 'StartYear', 'StartMonth', 'StartDay', 'StartHour', - 'StartMin', 'StartSec', 'StartMillisec', - 'NUM_SAMPLES', 'SAMPLE_FREQ_HZ', - 'DATA_FORMAT', 'SAMPLE_TIME_USEC', - 'NUM_CHANNELS', - 'START_YEAR', 'START_MONTH', 'START_DAY', 'START_HOUR', - 'START_MIN', 'START_SEC', 'START_MILLISEC'] - - param_dict = dict() - unit_dict = dict() - - with open(fname) as fid: - for line in iter(fid): - if any(var_name in line for var_name in var_names): - key, val = line.replace(" ", "").replace("\n", "").split("=") - param_dict[key.lower().replace("_", "")] = val - for type in CHANTYPES: - if "DEVICE_PARAMETERS" + CHANTYPES[type] + " START" in line: - data_unit = next(fid) - unit_dict[type] = data_unit.replace(" ", "") \ - .replace("\n", "").split("=")[-1] - - # look for CHAN_IN_FILE sections, which may or may not exist; issue #8391 - types = ["meg", "eeg", "misc"] - chanidx_in_file = _read_curry_lines(fname, - ["CHAN_IN_FILE" + - CHANTYPES[key] for key in types]) - - n_samples = int(param_dict["numsamples"]) - sfreq = float(param_dict["samplefreqhz"]) - time_step = float(param_dict["sampletimeusec"]) * 1e-6 - is_ascii = param_dict["dataformat"] == "ASCII" - n_channels = int(param_dict["numchannels"]) - try: - dt_start = datetime(int(param_dict["startyear"]), - int(param_dict["startmonth"]), - int(param_dict["startday"]), - int(param_dict["starthour"]), - int(param_dict["startmin"]), - int(param_dict["startsec"]), - int(param_dict["startmillisec"]) * 1000, - timezone.utc) - # Note that the time zone information is not stored in the Curry info - # file, and it seems the start time info is in the local timezone - # of the acquisition system (which is unknown); therefore, just set - # the timezone to be UTC. If the user knows otherwise, they can - # change it later. (Some Curry files might include StartOffsetUTCMin, - # but its presence is unpredictable, so we won't rely on it.) - except (ValueError, KeyError): - dt_start = None # if missing keywords or illegal values, don't set - - if time_step == 0: - true_sfreq = sfreq - elif sfreq == 0: - true_sfreq = 1 / time_step - elif not np.isclose(sfreq, 1 / time_step): - raise ValueError(_msg_match) - else: # they're equal and != 0 - true_sfreq = sfreq - if true_sfreq <= 0: - raise ValueError(_msg_invalid.format(true_sfreq)) - - return CurryParameters(n_samples, true_sfreq, is_ascii, unit_dict, - n_channels, dt_start, chanidx_in_file) - - -def _read_curry_info(curry_paths): - """Extract info from curry parameter files.""" - curry_params = _read_curry_parameters(curry_paths['info']) - R = np.eye(4) - R[[0, 1], [0, 1]] = -1 # rotate 180 deg - # shift down and back - # (chosen by eyeballing to make the CTF helmet look roughly correct) - R[:3, 3] = [0., -0.015, -0.12] - curry_dev_dev_t = Transform('ctf_meg', 'meg', R) - - # read labels from label files - label_fname = curry_paths['labels'] - types = ["meg", "eeg", "misc"] - labels = _read_curry_lines(label_fname, - ["LABELS" + CHANTYPES[key] for key in types]) - sensors = _read_curry_lines(label_fname, - ["SENSORS" + CHANTYPES[key] for key in types]) - normals = _read_curry_lines(label_fname, - ['NORMALS' + CHANTYPES[key] for key in types]) - assert len(labels) == len(sensors) == len(normals) - - all_chans = list() - dig_ch_pos = dict() - for key in ["meg", "eeg", "misc"]: - chanidx_is_explicit = (len(curry_params.chanidx_in_file["CHAN_IN_FILE" - + CHANTYPES[key]]) > 0) # channel index - # position in the datafile may or may not be explicitly declared, - # based on the CHAN_IN_FILE section in info file - for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]): - chanidx = len(all_chans) + 1 # by default, just assume the - # channel index in the datafile is in order of the channel - # names as we found them in the labels file - if chanidx_is_explicit: # but, if explicitly declared, use - # that index number - chanidx = int(curry_params.chanidx_in_file["CHAN_IN_FILE" - + CHANTYPES[key]][ind]) - if chanidx <= 0: # if chanidx was explicitly declared to be ' 0', - # it means the channel is not actually saved in the data file - # (e.g. the "Ref" channel), so don't add it to our list. - # Git issue #8391 - continue - ch = {"ch_name": chan, - "unit": curry_params.unit_dict[key], - "kind": FIFFV_CHANTYPES[key], - "coil_type": FIFFV_COILTYPES[key], - "ch_idx": chanidx - } - if key == "eeg": - loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) - # XXX just the sensor, where is ref (next 3)? - assert loc.shape == (3,) - loc /= 1000. # to meters - loc = np.concatenate([loc, np.zeros(9)]) - ch['loc'] = loc - # XXX need to check/ensure this - ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD - dig_ch_pos[chan] = loc[:3] - elif key == 'meg': - pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) - pos /= 1000. # to meters - pos = pos[:3] # just the inner coil - pos = apply_trans(curry_dev_dev_t, pos) - nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float) - assert np.isclose(np.linalg.norm(nn), 1., atol=1e-4) - nn /= np.linalg.norm(nn) - nn = apply_trans(curry_dev_dev_t, nn, move=False) - trans = np.eye(4) - trans[:3, 3] = pos - trans[:3, :3] = _normal_orth(nn).T - ch['loc'] = _coil_trans_to_loc(trans) - ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE - all_chans.append(ch) - dig = _make_dig_points( - dig_ch_pos=dig_ch_pos, coord_frame='head', add_missing_fiducials=True) - del dig_ch_pos - - ch_count = len(all_chans) - assert (ch_count == curry_params.n_chans) # ensure that we have assembled - # the same number of channels as declared in the info (.DAP) file in the - # DATA_PARAMETERS section. Git issue #8391 - - # sort the channels to assure they are in the order that matches how - # recorded in the datafile. In general they most likely are already in - # the correct order, but if the channel index in the data file was - # explicitly declared we might as well use it. - all_chans = sorted(all_chans, key=lambda ch: ch['ch_idx']) - - ch_names = [chan["ch_name"] for chan in all_chans] - info = create_info(ch_names, curry_params.sfreq) - with info._unlock(): - info['meas_date'] = curry_params.dt_start # for Git issue #8398 - info['dig'] = dig - _make_trans_dig(curry_paths, info, curry_dev_dev_t) - - for ind, ch_dict in enumerate(info["chs"]): - all_chans[ind].pop('ch_idx') - ch_dict.update(all_chans[ind]) - assert ch_dict['loc'].shape == (12,) - ch_dict['unit'] = SI_UNITS[all_chans[ind]['unit'][1]] - ch_dict['cal'] = SI_UNIT_SCALE[all_chans[ind]['unit'][0]] - - return info, curry_params.n_samples, curry_params.is_ascii - + fname = _check_curry_filename(fname) + + (sfreq_fromfile, _, _, _, _, _, _, _, events, event_desc, _, _, _) = ( + _extract_curry_info(fname) + ) + if sfreq == "auto": + sfreq = sfreq_fromfile + elif np.isreal(sfreq): + if float(sfreq) != float(sfreq_fromfile): + warn( + f"provided sfreq ({sfreq} Hz) does not match freq from fileheader " + "({sfreq_fromfile} Hz)!" + ) + else: + raise ValueError("'sfreq' must be numeric or 'auto'") -_card_dict = {'Left ear': FIFF.FIFFV_POINT_LPA, - 'Nasion': FIFF.FIFFV_POINT_NASION, - 'Right ear': FIFF.FIFFV_POINT_RPA} + if isinstance(events, np.ndarray): # if there are events + events = events.astype("int") + events = np.insert(events, 1, np.diff(events[:, 2:]).flatten(), axis=1)[:, :3] + return annotations_from_events(events, sfreq, event_desc=event_desc) + else: + warn("no event annotations found") + return None + + +def _set_chanloc_curry( + inst, ch_types, ch_pos, landmarks, landmarkslabels, hpimatrix, on_bad_hpi_match +): + ch_names = inst.info["ch_names"] + + # scale ch_pos to m?! + ch_pos /= 1000.0 + landmarks /= 1000.0 + # channel locations + # what about misc without pos? can they mess things up if unordered? + assert len(ch_pos) >= (ch_types.count("mag") + ch_types.count("eeg")) + assert len(ch_pos) == (ch_types.count("mag") + ch_types.count("eeg")) + ch_pos_meg = { + ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "mag" + } + ch_pos_eeg = { + ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "eeg" + } + # landmarks and headshape + # FIX: one of the test files (c,rfDC*.cdt) names landmarks differently: + NAS_NAMES = ["nasion", "nas"] + LPA_NAMES = ["left ear", "lpa"] + RPA_NAMES = ["right ear", "rpa"] + landmarkslabels = [ + "Nas" + if (ll.lower() in NAS_NAMES) + else "LPA" + if (ll.lower() in LPA_NAMES) + else "RPA" + if (ll.lower() in RPA_NAMES) + else ll + for ll in landmarkslabels + ] + landmark_dict = dict(zip(landmarkslabels, landmarks)) + for k in ["Nas", "RPA", "LPA"]: + if k not in landmark_dict.keys(): + landmark_dict[k] = None + if len(landmarkslabels) > 0: + hpi_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("HPI.?[1-99]", n)], + :, + ] + else: + hpi_pos = None + if len(landmarkslabels) > 0: + hsp_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("H.?[1-99]", n)], : + ] + else: + hsp_pos = None + + has_cards = ( + False + if ( + isinstance(landmark_dict["Nas"], type(None)) + and isinstance(landmark_dict["LPA"], type(None)) + and isinstance(landmark_dict["RPA"], type(None)) + ) + else True + ) + has_hpi = True if isinstance(hpi_pos, np.ndarray) else False + + add_missing_fiducials = not has_cards # raises otherwise + dig = _make_dig_points( + nasion=landmark_dict["Nas"], + lpa=landmark_dict["LPA"], + rpa=landmark_dict["RPA"], + hpi=hpi_pos, + extra_points=hsp_pos, + dig_ch_pos=ch_pos_eeg, + coord_frame="head", + add_missing_fiducials=add_missing_fiducials, + ) + with inst.info._unlock(): + inst.info["dig"] = dig + + # loc transformation for meg sensors (taken from previous version) + if len(ch_pos_meg) > 0: + R = np.eye(4) + R[[0, 1], [0, 1]] = -1 # rotate 180 deg + # shift down and back + # (chosen by eyeballing to make the helmet look roughly correct) + R[:3, 3] = [0.0, -0.015, -0.12] + curry_dev_dev_t = Transform("ctf_meg", "meg", R) + + ch_normals_meg = _get_curry_meg_normals(inst.filenames[0]) + assert len(ch_normals_meg) == len(ch_pos_meg) + else: + curry_dev_dev_t, ch_normals_meg = None, None + # fill up chanlocs + assert len(ch_names) == len(ch_types) >= len(ch_pos) + for i, (ch_name, ch_type, ch_loc) in enumerate(zip(ch_names, ch_types, ch_pos)): + assert inst.info["ch_names"][i] == ch_name + ch = inst.info["chs"][i] + if ch_type == "eeg": + with inst.info._unlock(): + ch["loc"][:3] = ch_loc[:3] + ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD + elif ch_type == "mag": + # transform mode + pos = ch_loc[:3] # just the inner coil for MEG + pos = apply_trans(curry_dev_dev_t, pos) + nn = ch_normals_meg[i] + assert np.isclose(np.linalg.norm(nn), 1.0, atol=1e-4) + nn /= np.linalg.norm(nn) + nn = apply_trans(curry_dev_dev_t, nn, move=False) + trans = np.eye(4) + trans[:3, 3] = pos + trans[:3, :3] = _normal_orth(nn).T + with inst.info._unlock(): + ch["loc"] = _coil_trans_to_loc(trans) + # TODO: We should figure out if all files are Compumedics, + # and even then figure out if it's adult or child + ch["coil_type"] = FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD + ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE + elif ch_type == "misc": + pass + else: + raise NotImplementedError + + # TODO - REVIEW NEEDED + # do we need further transformations for MEG channel positions? + # the testfiles i got look good to me.. + _make_trans_dig( + inst.info, + curry_dev_dev_t, + landmark_dict, + has_cards, + has_hpi, + hpimatrix, + on_bad_hpi_match, + ) + + +def _make_trans_dig( + info, + curry_dev_dev_t, + landmark_dict, + has_cards, + has_hpi, + chpidata, + on_bad_hpi_match, +): + cards = { + FIFF.FIFFV_POINT_LPA: landmark_dict["LPA"], + FIFF.FIFFV_POINT_NASION: landmark_dict["Nas"], + FIFF.FIFFV_POINT_RPA: landmark_dict["RPA"], + } -def _make_trans_dig(curry_paths, info, curry_dev_dev_t): # Coordinate frame transformations and definitions - no_msg = 'Leaving device<->head transform as None' - info['dev_head_t'] = None - label_fname = curry_paths['labels'] - key = 'LANDMARKS' + CHANTYPES['meg'] - lm = _read_curry_lines(label_fname, [key])[key] - lm = np.array(lm, float) - lm.shape = (-1, 3) + no_msg = "Leaving device<->head transform as None" + info["dev_head_t"] = None + lm = [v for v in landmark_dict.values() if isinstance(v, np.ndarray)] if len(lm) == 0: # no dig - logger.info(no_msg + ' (no landmarks found)') + logger.info(no_msg + " (no landmarks found)") return - lm /= 1000. - key = 'LM_REMARKS' + CHANTYPES['meg'] - remarks = _read_curry_lines(label_fname, [key])[key] - assert len(remarks) == len(lm) - with info._unlock(): - info['dig'] = list() - cards = dict() - for remark, r in zip(remarks, lm): - kind = ident = None - if remark in _card_dict: - kind = FIFF.FIFFV_POINT_CARDINAL - ident = _card_dict[remark] - cards[ident] = r - elif remark.startswith('HPI'): - kind = FIFF.FIFFV_POINT_HPI - ident = int(remark[3:]) - 1 - if kind is not None: - info['dig'].append(dict( - kind=kind, ident=ident, r=r, - coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) - with info._unlock(): - info['dig'].sort(key=lambda x: (x['kind'], x['ident'])) - has_cards = len(cards) == 3 - has_hpi = 'hpi' in curry_paths + if has_cards and has_hpi: # have all three - logger.info('Composing device<->head transformation from dig points') - hpi_u = np.array([d['r'] for d in info['dig'] - if d['kind'] == FIFF.FIFFV_POINT_HPI], float) - hpi_c = np.ascontiguousarray( - _first_hpi(curry_paths['hpi'])[:len(hpi_u), 1:4]) - unknown_curry_t = _quaternion_align( - 'unknown', 'ctf_meg', hpi_u, hpi_c, 1e-2) - angle = np.rad2deg(_angle_between_quats( - np.zeros(3), rot_to_quat(unknown_curry_t['trans'][:3, :3]))) - dist = 1000 * np.linalg.norm(unknown_curry_t['trans'][:3, 3]) - logger.info(' Fit a %0.1f° rotation, %0.1f mm translation' - % (angle, dist)) + logger.info("Composing device<->head transformation from dig points") + hpi_u = np.array( + [d["r"] for d in info["dig"] if d["kind"] == FIFF.FIFFV_POINT_HPI], float + ) + hpi_c = np.ascontiguousarray(chpidata[0][: len(hpi_u), 1:4]) + bad_hpi_match = False + try: + with catch_logging() as log: + unknown_curry_t = _quaternion_align( + "unknown", + "ctf_meg", + hpi_u.astype("float64"), + hpi_c.astype("float64"), + 1e-2, + ) + except RuntimeError: + bad_hpi_match = True + with catch_logging() as log: + unknown_curry_t = _quaternion_align( + "unknown", + "ctf_meg", + hpi_u.astype("float64"), + hpi_c.astype("float64"), + 1e-1, + ) + logger.info(log.getvalue()) + + angle = np.rad2deg( + _angle_between_quats( + np.zeros(3), rot_to_quat(unknown_curry_t["trans"][:3, :3]) + ) + ) + dist = 1000 * np.linalg.norm(unknown_curry_t["trans"][:3, 3]) + logger.info(f" Fit a {angle:0.1f}° rotation, {dist:0.1f} mm translation") + + if bad_hpi_match: + _on_missing( + on_bad_hpi_match, + "Poor HPI matching (see log above)!", + name="on_bad_hpi_match", + ) + unknown_dev_t = combine_transforms( - unknown_curry_t, curry_dev_dev_t, 'unknown', 'meg') + unknown_curry_t, curry_dev_dev_t, "unknown", "meg" + ) unknown_head_t = Transform( - 'unknown', 'head', + "unknown", + "head", get_ras_to_neuromag_trans( - *(cards[key] for key in (FIFF.FIFFV_POINT_NASION, - FIFF.FIFFV_POINT_LPA, - FIFF.FIFFV_POINT_RPA)))) + *( + cards[key] + for key in ( + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_RPA, + ) + ) + ), + ) with info._unlock(): - info['dev_head_t'] = combine_transforms( - invert_transform(unknown_dev_t), unknown_head_t, 'meg', 'head') - for d in info['dig']: - d.update(coord_frame=FIFF.FIFFV_COORD_HEAD, - r=apply_trans(unknown_head_t, d['r'])) + info["dev_head_t"] = combine_transforms( + invert_transform(unknown_dev_t), unknown_head_t, "meg", "head" + ) + for d in info["dig"]: + d.update( + coord_frame=FIFF.FIFFV_COORD_HEAD, + r=apply_trans(unknown_head_t, d["r"]), + ) else: if has_cards: - no_msg += ' (no .hpi file found)' + no_msg += " (no .hpi file found)" elif has_hpi: - no_msg += ' (not all cardinal points found)' + no_msg += " (not all cardinal points found)" else: - no_msg += ' (neither cardinal points nor .hpi file found)' + no_msg += " (neither cardinal points nor .hpi file found)" logger.info(no_msg) -def _first_hpi(fname): - # Get the first HPI result - with open(fname, 'r') as fid: - for line in fid: - line = line.strip() - if any(x in line for x in ('FileVersion', 'NumCoils')) or not line: - continue - hpi = np.array(line.split(), float) - break - else: - raise RuntimeError('Could not find valid HPI in %s' % (fname,)) - # t is the first entry - assert hpi.ndim == 1 - hpi = hpi[1:] - hpi.shape = (-1, 5) - hpi /= 1000. - return hpi - - -def _read_events_curry(fname): - """Read events from Curry event files. - - Parameters - ---------- - fname : path-like - Path to a curry event file with extensions .cef, .ceo, - .cdt.cef, or .cdt.ceo - - Returns - ------- - events : ndarray, shape (n_events, 3) - The array of events. - """ - check_fname(fname, 'curry event', ('.cef', '.ceo', '.cdt.cef', '.cdt.ceo'), - endings_err=('.cef', '.ceo', '.cdt.cef', '.cdt.ceo')) - - events_dict = _read_curry_lines(fname, ["NUMBER_LIST"]) - # The first 3 column seem to contain the event information - curry_events = np.array(events_dict["NUMBER_LIST"], dtype=int)[:, 0:3] - - return curry_events - - -def _read_annotations_curry(fname, sfreq='auto'): - r"""Read events from Curry event files. - - Parameters - ---------- - fname : str - The filename. - sfreq : float | 'auto' - The sampling frequency in the file. If set to 'auto' then the - ``sfreq`` is taken from the respective info file of the same name with - according file extension (\*.dap for Curry 7; \*.cdt.dpa for Curry8). - So data.cef looks in data.dap and data.cdt.cef looks in data.cdt.dpa. - - Returns - ------- - annot : instance of Annotations | None - The annotations. - """ - required = ["events", "info"] if sfreq == 'auto' else ["events"] - curry_paths = _get_curry_file_structure(fname, required) - events = _read_events_curry(curry_paths['events']) - - if sfreq == 'auto': - sfreq = _read_curry_parameters(curry_paths['info']).sfreq - - onset = events[:, 0] / sfreq - duration = np.zeros(events.shape[0]) - description = events[:, 2] - - return Annotations(onset, duration, description) - - @verbose -def read_raw_curry(fname, preload=False, verbose=None): +def read_raw_curry( + fname, preload=False, on_bad_hpi_match="warn", verbose=None +) -> "RawCurry": """Read raw data from Curry files. + .. versionchanged:: 1.11 + This function now requires ``curryreader`` to be installed. + Parameters ---------- fname : path-like - Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, - ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + Path to a valid curry file. %(preload)s + %(on_bad_hpi_match)s %(verbose)s Returns @@ -493,7 +735,20 @@ def read_raw_curry(fname, preload=False, verbose=None): -------- mne.io.Raw : Documentation of attributes and methods of RawCurry. """ - return RawCurry(fname, preload, verbose) + fname = _check_curry_filename(fname) + fname_hdr = _check_curry_header_filename(fname) + + _check_curry_sfreq_consistency(fname_hdr) + + rectype = _get_curry_recording_type(fname) + + inst = RawCurry(fname, preload, on_bad_hpi_match, verbose) + if rectype in ["epochs", "evoked"]: + curry_epoch_info = _get_curry_epoch_info(fname) + inst = Epochs(inst, **curry_epoch_info) + if rectype == "evoked": + raise NotImplementedError # not sure this is even supported format + return inst class RawCurry(BaseRaw): @@ -502,9 +757,9 @@ class RawCurry(BaseRaw): Parameters ---------- fname : path-like - Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, - ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + Path to a valid curry file. %(preload)s + %(on_bad_hpi_match)s %(verbose)s See Also @@ -514,41 +769,152 @@ class RawCurry(BaseRaw): """ @verbose - def __init__(self, fname, preload=False, verbose=None): - - curry_paths = _get_curry_file_structure( - fname, required=["info", "data", "labels"]) - - data_fname = op.abspath(curry_paths['data']) - - info, n_samples, is_ascii = _read_curry_info(curry_paths) - + def __init__(self, fname, preload=False, on_bad_hpi_match="warn", verbose=None): + fname = _check_curry_filename(fname) + + ( + sfreq, + n_samples, + ch_names, + ch_types, + ch_pos, + landmarks, + landmarkslabels, + hpimatrix, + events, + event_desc, + orig_format, + orig_units, + cals, + ) = _extract_curry_info(fname) + + meas_date, is_ascii, device_info = _get_curry_meas_info(fname) + + # construct info + info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) + info["device_info"] = device_info + + # create raw object last_samps = [n_samples - 1] raw_extras = dict(is_ascii=is_ascii) - - super(RawCurry, self).__init__( - info, preload, filenames=[data_fname], last_samps=last_samps, - orig_format='int', raw_extras=[raw_extras], verbose=verbose) - - if 'events' in curry_paths: - logger.info('Event file found. Extracting Annotations from' - ' %s...' % curry_paths['events']) - annots = _read_annotations_curry(curry_paths['events'], - sfreq=self.info["sfreq"]) - self.set_annotations(annots) - else: - logger.info('Event file not found. No Annotations set.') + super().__init__( + info, + preload=False, + filenames=[fname], + last_samps=last_samps, + orig_format=orig_format, + raw_extras=[raw_extras], + orig_units=orig_units, + verbose=verbose, + ) + + # set meas_date + self.set_meas_date(meas_date) + + # scale data to SI units + self._cals = np.array(cals) + if isinstance(preload, bool | np.bool_) and preload: + self.load_data() + + # set events / annotations + # format from curryreader: sample, etype, startsample, endsample + if isinstance(events, np.ndarray): # if there are events + events = events.astype("int") + events = np.insert(events, 1, np.diff(events[:, 2:]).flatten(), axis=1)[ + :, :3 + ] + annot = annotations_from_events(events, sfreq, event_desc=event_desc) + self.set_annotations(annot) + + # add HPI data (if present) + # TODO - FUTURE ENHANCEMENT + # from curryreader docstring: + # "HPI-coil measurements matrix (Orion-MEG only) where every row is: + # [measurementsample, dipolefitflag, x, y, z, deviation]" + # + # that's incorrect, though. it ratehr seems to be: + # [sample, dipole_1, x_1,y_1, z_1, dev_1, ..., dipole_n, x_n, ...] + # for all n coils. + # + # Do not implement cHPI reader for now. + # Can be used for dev-head transform, though! + if not isinstance(hpimatrix, list): + # warn("cHPI data found, but reader not implemented.") + hpisamples = hpimatrix[:, 0] + n_coil = int((hpimatrix.shape[1] - 1) / 5) + hpimatrix = hpimatrix[:, 1:].reshape(hpimatrix.shape[0], n_coil, 5) / 1000 + logger.info(f"found {len(hpisamples)} cHPI samples for {n_coil} coils") + + # add sensor locations + # TODO - REVIEW NEEDED + assert len(self.info["ch_names"]) == len(ch_types) >= len(ch_pos) + _set_chanloc_curry( + inst=self, + ch_types=ch_types, + ch_pos=ch_pos, + landmarks=landmarks, + landmarkslabels=landmarkslabels, + hpimatrix=hpimatrix, + on_bad_hpi_match=on_bad_hpi_match, + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" - if self._raw_extras[fi]['is_ascii']: + if self._raw_extras[fi]["is_ascii"]: if isinstance(idx, slice): idx = np.arange(idx.start, idx.stop) block = np.loadtxt( - self._filenames[0], skiprows=start, max_rows=stop - start, - ndmin=2).T + self.filenames[0], skiprows=start, max_rows=stop - start, ndmin=2 + ).T _mult_cal_one(data, block, idx, cals, mult) else: _read_segments_file( - self, data, idx, fi, start, stop, cals, mult, dtype=" -# # +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from datetime import datetime, timezone from pathlib import Path from shutil import copyfile -import pytest import numpy as np +import pytest from numpy.testing import assert_allclose, assert_array_equal -from mne.annotations import events_from_annotations +from mne._fiff.constants import FIFF +from mne._fiff.tag import _loc_to_coil_trans +from mne.annotations import events_from_annotations, read_annotations from mne.bem import _fit_sphere +from mne.channels import DigMontage, read_dig_curry from mne.datasets import testing +from mne.epochs import Epochs from mne.event import find_events -from mne.io import _loc_to_coil_trans -from mne.io.constants import FIFF -from mne.io.edf import read_raw_bdf from mne.io.bti import read_raw_bti -from mne.io.curry import read_raw_curry +from mne.io.curry import read_impedances_curry, read_raw_curry +from mne.io.curry.curry import ( + _check_curry_filename, + _check_curry_header_filename, + _get_curry_version, +) +from mne.io.edf import read_raw_bdf from mne.io.tests.test_raw import _test_raw_reader from mne.utils import catch_logging -from mne.annotations import read_annotations -from mne.io.curry.curry import (_get_curry_version, _get_curry_file_structure, - _read_events_curry, FILE_EXTENSIONS) + +pytest.importorskip("curryreader") data_dir = testing.data_path(download=False) curry_dir = data_dir / "curry" bdf_file = data_dir / "BDF" / "test_bdf_stim_channel.bdf" -bti_rfDC_file = data_dir / "BTi" / "erm_HFH" / "c,rfDC" -curry7_rfDC_file = curry_dir / "c,rfDC Curry 7.dat" -curry8_rfDC_file = curry_dir / "c,rfDC Curry 8.cdt" curry7_bdf_file = curry_dir / "test_bdf_stim_channel Curry 7.dat" curry7_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 7 ASCII.dat" curry8_bdf_file = curry_dir / "test_bdf_stim_channel Curry 8.cdt" curry8_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt" -missing_event_file = curry_dir / "test_sfreq_0.dat" Ref_chan_omitted_file = curry_dir / "Ref_channel_omitted Curry7.dat" -Ref_chan_omitted_reordered_file = ( - curry_dir / "Ref_channel_omitted reordered Curry7.dat" -) +Ref_chan_omitted_reordered_file = curry_dir / "Ref_channel_omitted reordered Curry7.dat" +curry_epoched_file = curry_dir / "Epoched.cdt" +curry_hpi_file = curry_dir / "HPI.cdt" +bti_rfDC_file = data_dir / "BTi" / "erm_HFH" / "c,rfDC" +curry7_rfDC_file = curry_dir / "c,rfDC Curry 7.dat" +curry8_rfDC_file = curry_dir / "c,rfDC Curry 8.cdt" -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def bdf_curry_ref(): """Return a view of the reference bdf used to create the curry files.""" - raw = read_raw_bdf(bdf_file, preload=True).drop_channels(['Status']) + raw = read_raw_bdf(bdf_file, preload=True).drop_channels(["Status"]) return raw @testing.requires_testing_data -@pytest.mark.parametrize('fname,tol', [ - pytest.param(curry7_bdf_file, 1e-7, id='curry 7'), - pytest.param(curry8_bdf_file, 1e-7, id='curry 8'), - pytest.param(curry7_bdf_ascii_file, 1e-4, id='curry 7 ascii'), - pytest.param(curry8_bdf_ascii_file, 1e-4, id='curry 8 ascii'), -]) -@pytest.mark.parametrize('preload', [True, False]) +@pytest.mark.parametrize( + "fname,tol", + [ + pytest.param(curry7_bdf_file, 1e-7, id="curry 7"), + pytest.param(curry8_bdf_file, 1e-7, id="curry 8"), + pytest.param(curry7_bdf_ascii_file, 1e-4, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, 1e-4, id="curry 8 ascii"), + ], +) +@pytest.mark.parametrize("preload", [True, False]) def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): """Test reading CURRY files.""" raw = read_raw_curry(fname, preload=preload) - assert hasattr(raw, '_data') == preload + assert hasattr(raw, "_data") == preload assert raw.n_times == bdf_curry_ref.n_times - assert raw.info['sfreq'] == bdf_curry_ref.info['sfreq'] + assert raw.info["sfreq"] == bdf_curry_ref.info["sfreq"] - for field in ['kind', 'ch_name']: - assert_array_equal([ch[field] for ch in raw.info['chs']], - [ch[field] for ch in bdf_curry_ref.info['chs']]) + for field in ["kind", "ch_name"]: + assert_array_equal( + [ch[field] for ch in raw.info["chs"]], + [ch[field] for ch in bdf_curry_ref.info["chs"]], + ) - assert_allclose(raw.get_data(verbose='error'), - bdf_curry_ref.get_data(), atol=tol) + assert_allclose(raw.get_data(verbose="error"), bdf_curry_ref.get_data(), atol=tol) picks, start, stop = ["C3", "C4"], 200, 800 assert_allclose( - raw.get_data(picks=picks, start=start, stop=stop, verbose='error'), + raw.get_data(picks=picks, start=start, stop=stop, verbose="error"), bdf_curry_ref.get_data(picks=picks, start=start, stop=stop), - rtol=tol) - assert raw.info['dev_head_t'] is None + rtol=tol, + ) + assert not raw.info["dev_head_t"] @testing.requires_testing_data -@pytest.mark.parametrize('fname', [ - pytest.param(curry7_bdf_file, id='curry 7'), - pytest.param(curry8_bdf_file, id='curry 8'), - pytest.param(curry7_bdf_ascii_file, id='curry 7 ascii'), - pytest.param(curry8_bdf_ascii_file, id='curry 8 ascii'), -]) +def test_read_raw_curry_epoched(): + """Test reading epoched file.""" + ep = read_raw_curry(curry_epoched_file) + assert isinstance(ep, Epochs) + assert len(ep.events) == 26 + assert len(ep.annotations) == 0 + + +GOOD_HPI_MATCH = """ +FileVersion: 804 +NumCoils: 10 + +0 1 52.73 -74.87 111.56 0.002538 1 57.87 23.21 126.11 0.002692 1 -3.68 -18.54 130.38 0.008380 1 -19.40 49.74 90.95 0.008395 1 -56.17 -6.00 62.95 0.003832 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 +4100 1 50.54 -62.26 151.17 0.002511 1 28.10 56.94 148.74 0.002720 1 -21.68 -43.61 175.47 0.008313 1 -57.26 23.61 147.11 0.008390 1 -80.77 -32.52 125.38 0.003828 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 + +""" # noqa: E501 + + +@pytest.mark.parametrize("good_match", [True, False]) +@testing.requires_testing_data +def test_read_raw_curry_hpi(good_match, tmp_path): + """Test reading hpi file.""" + fname = curry_hpi_file + if not good_match: + # real data does not have a good fit + read_raw_curry(fname, on_bad_hpi_match="ignore") + with pytest.warns(match="Poor HPI matching"): + read_raw_curry(fname, on_bad_hpi_match="warn") + with pytest.raises(ValueError, match="Poor HPI matching"): + read_raw_curry(fname, on_bad_hpi_match="raise") + else: + # tweak HPI point to have a good fit + for ext in (".cdt", ".cdt.dpa"): + src = fname.with_suffix(ext) + dst = tmp_path / fname.with_suffix(ext).name + copyfile(src, dst) + fname = tmp_path / fname.name + with open(fname.with_suffix(fname.suffix + ".hpi"), "w") as fid: + fid.write(GOOD_HPI_MATCH) + read_raw_curry(fname, on_bad_hpi_match="ignore") + read_raw_curry(fname, on_bad_hpi_match="warn") + read_raw_curry(fname, on_bad_hpi_match="raise") + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry7_bdf_file, id="curry 7"), + pytest.param(curry8_bdf_file, id="curry 8"), + pytest.param(curry7_bdf_ascii_file, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, id="curry 8 ascii"), + ], +) def test_read_raw_curry_test_raw(fname): """Test read_raw_curry with _test_raw_reader.""" _test_raw_reader(read_raw_curry, fname=fname) @@ -190,30 +246,36 @@ def test_read_raw_curry_test_raw(fname): """ WANT_TRANS = np.array( - [[0.99729224, -0.07353067, -0.00119791, 0.00126953], - [0.07319243, 0.99085848, 0.11332405, 0.02670814], - [-0.00714583, -0.11310488, 0.99355736, 0.04721836], - [0., 0., 0., 1.]]) + [ + [0.99729224, -0.07353067, -0.00119791, 0.00126953], + [0.07319243, 0.99085848, 0.11332405, 0.02670814], + [-0.00714583, -0.11310488, 0.99355736, 0.04721836], + [0.0, 0.0, 0.0, 1.0], + ] +) @testing.requires_testing_data -@pytest.mark.parametrize('fname,tol', [ - pytest.param(curry7_rfDC_file, 1e-6, id='curry 7'), - pytest.param(curry8_rfDC_file, 1e-3, id='curry 8'), -]) -@pytest.mark.parametrize('mock_dev_head_t', [True, False]) +@pytest.mark.parametrize( + "fname,tol", + [ + pytest.param(curry7_rfDC_file, 1e-6, id="curry 7"), + pytest.param(curry8_rfDC_file, 1e-3, id="curry 8"), + ], +) +@pytest.mark.parametrize("mock_dev_head_t", [True, False]) def test_read_raw_curry_rfDC(fname, tol, mock_dev_head_t, tmp_path): """Test reading CURRY files.""" if mock_dev_head_t: - if 'Curry 7' in fname.name: # not supported yet + if "Curry 7" in fname.name: # not supported yet return # copy files to tmp_path - for ext in ('.cdt', '.cdt.dpa'): + for ext in (".cdt", ".cdt.dpa"): src = fname.with_suffix(ext) dst = tmp_path / fname.with_suffix(ext).name copyfile(src, dst) - if ext == '.cdt.dpa': - with open(dst, 'a') as fid: + if ext == ".cdt.dpa": + with open(dst, "a") as fid: fid.write(LM_CONTENT) fname = tmp_path / fname.name with open(fname.with_suffix(fname.suffix + ".hpi"), "w") as fid: @@ -225,49 +287,49 @@ def test_read_raw_curry_rfDC(fname, tol, mock_dev_head_t, tmp_path): raw = read_raw_curry(fname, verbose=True) log = log.getvalue() if mock_dev_head_t: - assert 'Composing device' in log + assert "Composing device" in log else: - assert 'Leaving device' in log - assert 'no landmark' in log + assert "Leaving device" in log + assert "no landmark" in log # test on the eeg chans, since these were not renamed by curry - eeg_names = [ch["ch_name"] for ch in raw.info["chs"] - if ch["kind"] == FIFF.FIFFV_EEG_CH] + eeg_names = [ + ch["ch_name"] for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_EEG_CH + ] - assert_allclose(raw.get_data(eeg_names), - bti_rfDC.get_data(eeg_names), rtol=tol) - assert bti_rfDC.info['dev_head_t'] is not None # XXX probably a BTI bug + assert_allclose(raw.get_data(eeg_names), bti_rfDC.get_data(eeg_names), rtol=tol) + assert bti_rfDC.info["dev_head_t"] is not None # XXX probably a BTI bug if mock_dev_head_t: - assert raw.info['dev_head_t'] is not None - assert_allclose(raw.info['dev_head_t']['trans'], WANT_TRANS, atol=1e-5) + assert raw.info["dev_head_t"] is not None + assert_allclose(raw.info["dev_head_t"]["trans"], WANT_TRANS, atol=1e-5) else: - assert raw.info['dev_head_t'] is None + assert not raw.info["dev_head_t"] # check that most MEG sensors are approximately oriented outward from # the device origin n_meg = n_eeg = n_other = 0 pos = list() nn = list() - for ch in raw.info['chs']: - if ch['kind'] == FIFF.FIFFV_MEG_CH: - assert ch['coil_type'] == FIFF.FIFFV_COIL_CTF_GRAD - t = _loc_to_coil_trans(ch['loc']) + for ch in raw.info["chs"]: + if ch["kind"] == FIFF.FIFFV_MEG_CH: + assert ch["coil_type"] == FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD + t = _loc_to_coil_trans(ch["loc"]) pos.append(t[:3, 3]) nn.append(t[:3, 2]) - assert_allclose(np.linalg.norm(nn[-1]), 1.) + assert_allclose(np.linalg.norm(nn[-1]), 1.0) n_meg += 1 - elif ch['kind'] == FIFF.FIFFV_EEG_CH: - assert ch['coil_type'] == FIFF.FIFFV_COIL_EEG + elif ch["kind"] == FIFF.FIFFV_EEG_CH: + assert ch["coil_type"] == FIFF.FIFFV_COIL_EEG n_eeg += 1 else: - assert ch['coil_type'] == FIFF.FIFFV_COIL_NONE + assert ch["coil_type"] == FIFF.FIFFV_COIL_NONE n_other += 1 assert n_meg == 148 assert n_eeg == 31 assert n_other == 15 pos = np.array(pos) nn = np.array(nn) - rad, origin = _fit_sphere(pos, disp=False) + rad, origin = _fit_sphere(pos) assert 0.11 < rad < 0.13 pos -= origin pos /= np.linalg.norm(pos, axis=1, keepdims=True) @@ -276,19 +338,21 @@ def test_read_raw_curry_rfDC(fname, tol, mock_dev_head_t, tmp_path): @testing.requires_testing_data -@pytest.mark.parametrize('fname', [ - pytest.param(curry7_bdf_file, id='curry 7'), - pytest.param(curry8_bdf_file, id='curry 8'), -]) +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry7_bdf_file, id="curry 7"), + pytest.param(curry8_bdf_file, id="curry 8"), + ], +) def test_read_events_curry_are_same_as_bdf(fname): """Test events from curry annotations recovers the right events.""" EVENT_ID = {str(ii): ii for ii in range(5)} REF_EVENTS = find_events(read_raw_bdf(bdf_file, preload=True)) - raw = read_raw_curry(fname) events, _ = events_from_annotations(raw, event_id=EVENT_ID) assert_allclose(events, REF_EVENTS) - assert raw.info['dev_head_t'] is None + assert not raw.info["dev_head_t"] @testing.requires_testing_data @@ -296,44 +360,57 @@ def test_check_missing_files(): """Test checking for missing curry files (smoke test).""" invalid_fname = "/invalid/path/name.xy" - with pytest.raises(IOError, match="file type .*? must end with"): - _read_events_curry(invalid_fname) + with pytest.raises(FileNotFoundError, match="no curry data file"): + _check_curry_filename(invalid_fname) + + with pytest.raises(FileNotFoundError, match="no corresponding header"): + _check_curry_header_filename(invalid_fname) - with pytest.raises(FileNotFoundError, match='does not exist'): - _get_curry_file_structure(invalid_fname) + with pytest.raises(FileNotFoundError, match="no curry data file"): + read_raw_curry(invalid_fname) - with pytest.raises(FileNotFoundError, match="files cannot be found"): - _get_curry_file_structure(missing_event_file, - required=["info", "events"]) + with pytest.raises(FileNotFoundError, match="no curry data file"): + read_impedances_curry(invalid_fname) def _mock_info_file(src, dst, sfreq, time_step): - with open(src, 'r') as in_file, open(dst, 'w') as out_file: + with open(src) as in_file, open(dst, "w") as out_file: for line in in_file: - if 'SampleFreqHz' in line: - out_file.write(line.replace('500', str(sfreq))) - elif 'SampleTimeUsec' in line: - out_file.write(line.replace('2000', str(time_step))) + if "SampleFreqHz" in line: + out_file.write(line.replace("500", str(sfreq))) + elif "SampleTimeUsec" in line: + out_file.write(line.replace("2000", str(time_step))) else: out_file.write(line) -@pytest.fixture(params=[ - pytest.param(dict(sfreq=500, time_step=0), id='correct sfreq'), - pytest.param(dict(sfreq=0, time_step=2000), id='correct time_step'), - pytest.param(dict(sfreq=500, time_step=2000), id='both correct'), - pytest.param(dict(sfreq=0, time_step=0), id='both 0', - marks=pytest.mark.xfail(raises=ValueError)), - pytest.param(dict(sfreq=500, time_step=42), id='mismatch', - marks=pytest.mark.xfail(raises=ValueError)), -]) +# In the new version based on curryreader package, time_step is always prioritized, i.e. +# sfreq in the header file will be ignored and overridden by sampling interval +@pytest.fixture( + params=[ + pytest.param( + dict(sfreq=500, time_step=1), + id="correct sfreq", + ), + pytest.param(dict(sfreq=0, time_step=2000), id="correct time_step"), + pytest.param(dict(sfreq=500, time_step=2000), id="both correct"), + pytest.param( + dict(sfreq=0, time_step=0), + id="both 0", + ), + pytest.param( + dict(sfreq=500, time_step=42), + id="mismatch", + ), + ] +) def sfreq_testing_data(tmp_path, request): """Generate different sfreq, time_step scenarios to be tested.""" - sfreq, time_step = request.param['sfreq'], request.param['time_step'] + sfreq, time_step = request.param["sfreq"], request.param["time_step"] # create dummy empty files for 'dat' and 'rs3' for fname in ["curry.dat", "curry.rs3"]: - open(tmp_path / fname, 'a').close() + open(tmp_path / fname, "a").close() _mock_info_file( src=curry7_bdf_file.with_suffix(".dap"), @@ -347,111 +424,194 @@ def sfreq_testing_data(tmp_path, request): sfreq=sfreq, time_step=time_step, ) - return tmp_path / "curry.dat" + copyfile(curry7_bdf_file, tmp_path / "curry.dat") + + return tmp_path / "curry.dat", sfreq, time_step @testing.requires_testing_data def test_sfreq(sfreq_testing_data): """Test sfreq and time_step.""" - raw = read_raw_curry(sfreq_testing_data, preload=False) - assert raw.info['sfreq'] == 500 + fname, sfreq, time_step = sfreq_testing_data + if time_step == 0: + with pytest.raises(ValueError, match="sampling interval of 0µs."): + read_raw_curry(fname, preload=False) + else: + if sfreq != 1e6 / time_step: + with pytest.warns( + RuntimeWarning, match="sfreq will be derived from sample distance." + ): + raw = read_raw_curry(fname, preload=False) + else: + raw = read_raw_curry(fname, preload=False) + assert raw.info["sfreq"] == 1e6 / time_step @testing.requires_testing_data -@pytest.mark.parametrize('fname', [ - pytest.param(curry_dir / "test_bdf_stim_channel Curry 7.cef", id="7"), - pytest.param(curry_dir / "test_bdf_stim_channel Curry 8.cdt.cef", id="8"), - pytest.param(curry_dir / "test_bdf_stim_channel Curry 7 ASCII.cef", - id="7 ascii"), - pytest.param(curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt.cef", - id="8 ascii"), -]) +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry_dir / "test_bdf_stim_channel Curry 7.cef", id="7"), + pytest.param( + curry_dir / "test_bdf_stim_channel Curry 7 ASCII.cef", id="7 ascii" + ), + pytest.param(curry_dir / "test_bdf_stim_channel Curry 8.cdt.cef", id="8"), + pytest.param( + curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt.cef", id="8 ascii" + ), + ], +) def test_read_curry_annotations(fname): """Test reading for Curry events file.""" - EXPECTED_ONSET = [0.484, 0.486, 0.62, 0.622, 1.904, 1.906, 3.212, 3.214, - 4.498, 4.5, 5.8, 5.802, 7.074, 7.076, 8.324, 8.326, 9.58, - 9.582] + EXPECTED_ONSET = [ + 0.484, + 0.486, + 0.62, + 0.622, + 1.904, + 1.906, + 3.212, + 3.214, + 4.498, + 4.5, + 5.8, + 5.802, + 7.074, + 7.076, + 8.324, + 8.326, + 9.58, + 9.582, + ] EXPECTED_DURATION = np.zeros_like(EXPECTED_ONSET) - EXPECTED_DESCRIPTION = ['4', '50000', '2', '50000', '1', '50000', '1', - '50000', '1', '50000', '1', '50000', '1', '50000', - '1', '50000', '1', '50000'] - - annot = read_annotations(fname, sfreq='auto') + EXPECTED_DESCRIPTION = [ + "4", + "50000", + "2", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + ] + + annot = read_annotations(fname, sfreq="auto") assert annot.orig_time is None assert_array_equal(annot.onset, EXPECTED_ONSET) assert_array_equal(annot.duration, EXPECTED_DURATION) assert_array_equal(annot.description, EXPECTED_DESCRIPTION) + with pytest.raises(ValueError, match="must be numeric or 'auto'"): + _ = read_annotations(fname, sfreq="nonsense") + with pytest.warns(RuntimeWarning, match="does not match freq from fileheader"): + _ = read_annotations(fname, sfreq=12.0) + + +FILE_EXTENSIONS = { + "Curry 7": { + "info": ".dap", + "data": ".dat", + "labels": ".rs3", + "events_cef": ".cef", + "events_ceo": ".ceo", + "hpi": ".hpi", + }, + "Curry 8": { + "info": ".cdt.dpa", + "data": ".cdt", + "labels": ".cdt.dpa", + "events_cef": ".cdt.cef", + "events_ceo": ".cdt.ceo", + "hpi": ".cdt.hpi", + }, + "Curry 9": { + "info": ".cdt.dpo", + "data": ".cdt", + "labels": ".cdt.dpo", + "events_cef": ".cdt.cef", + "events_ceo": ".cdt.ceo", + "hpi": ".cdt.hpi", + }, +} + -def _get_read_annotations_mock_info(name_part, mock_dir): +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry7_bdf_file, id="7"), + pytest.param(curry8_bdf_file, id="8"), + pytest.param(curry7_bdf_ascii_file, id="7 (ascii)"), + pytest.param(curry8_bdf_ascii_file, id="8 (ascii)"), + ], +) +def test_incomplete_file_suite(tmp_path, fname): + """Test reading incomplete Curry filesets.""" original, modified = dict(), dict() - original["event"] = curry_dir / ("test_bdf_stim_channel " + name_part) - original["base"], ext = str(original["event"]).split(".", maxsplit=1) - original["base"] = Path(original["base"]) - version = _get_curry_version(ext) - original["info"] = original["base"].with_suffix( - FILE_EXTENSIONS[version]["info"] - ) + version = _get_curry_version(fname) + + original["base"] = fname.with_suffix("") + original["event"] = fname.with_suffix(FILE_EXTENSIONS[version]["events_cef"]) + original["info"] = fname.with_suffix(FILE_EXTENSIONS[version]["info"]) + original["data"] = fname.with_suffix(FILE_EXTENSIONS[version]["data"]) + original["labels"] = fname.with_suffix(FILE_EXTENSIONS[version]["labels"]) - modified["base"] = mock_dir / "curry" + modified["base"] = tmp_path / "curry" modified["event"] = modified["base"].with_suffix( FILE_EXTENSIONS[version]["events_cef"] ) - modified["info"] = modified["base"].with_suffix( - FILE_EXTENSIONS[version]["info"] + modified["info"] = modified["base"].with_suffix(FILE_EXTENSIONS[version]["info"]) + modified["data"] = modified["base"].with_suffix(FILE_EXTENSIONS[version]["data"]) + modified["labels"] = modified["base"].with_suffix( + FILE_EXTENSIONS[version]["labels"] ) - return original, modified - - -@testing.requires_testing_data -@pytest.mark.parametrize('name_part', [ - pytest.param('7.cef', id='7'), - pytest.param('8.cdt.cef', id='8'), - pytest.param('7 ASCII.cef', id='7 (ascii)'), - pytest.param('8 ASCII.cdt.cef', id='8 (ascii)'), -]) -def test_read_curry_annotations_using_mocked_info(tmp_path, name_part): - """Test reading for Curry events file.""" - EXPECTED_ONSET = [0.484, 0.486, 0.62, 0.622, 1.904, 1.906, 3.212, 3.214, - 4.498, 4.5, 5.8, 5.802, 7.074, 7.076, 8.324, 8.326, 9.58, - 9.582] - EXPECTED_DURATION = np.zeros_like(EXPECTED_ONSET) - EXPECTED_DESCRIPTION = ['4', '50000', '2', '50000', '1', '50000', '1', - '50000', '1', '50000', '1', '50000', '1', '50000', - '1', '50000', '1', '50000'] - - original, fname = _get_read_annotations_mock_info( - "Curry " + name_part, tmp_path - ) - copyfile(src=original['event'], dst=fname['event']) + # only data + copyfile(src=original["data"], dst=modified["data"]) + _msg = rf"does not exist: .*{modified['event'].name}.*" + with pytest.raises(FileNotFoundError, match=_msg): + read_annotations(modified["event"], sfreq="auto") - _msg = 'required files cannot be found' + # events missing + copyfile(src=original["info"], dst=modified["info"]) with pytest.raises(FileNotFoundError, match=_msg): - read_annotations(fname['event'], sfreq='auto') + read_annotations(modified["event"], sfreq="auto") - _mock_info_file(src=original['info'], dst=fname['info'], - sfreq=0, time_step=2000) - - annot = read_annotations(fname['event'], sfreq='auto') - - assert annot.orig_time is None - assert_array_equal(annot.onset, EXPECTED_ONSET) - assert_array_equal(annot.duration, EXPECTED_DURATION) - assert_array_equal(annot.description, EXPECTED_DESCRIPTION) + # all there + copyfile(src=original["event"], dst=modified["event"]) + if not modified["labels"].exists(): + copyfile(src=original["labels"], dst=modified["labels"]) + read_raw_curry(modified["data"]) @testing.requires_testing_data -@pytest.mark.parametrize('fname,expected_channel_list', [ - pytest.param(Ref_chan_omitted_file, - ['FP1', 'FPZ', 'FP2', 'VEO', 'EKG', 'Trigger'], - id='Ref omitted, normal order'), - pytest.param(Ref_chan_omitted_reordered_file, - ['FP2', 'FPZ', 'FP1', 'VEO', 'EKG', 'Trigger'], - id='Ref omitted, reordered') -]) +@pytest.mark.parametrize( + "fname,expected_channel_list", + [ + pytest.param( + Ref_chan_omitted_file, + ["FP1", "FPZ", "FP2", "VEO", "EKG", "Trigger"], + id="Ref omitted, normal order", + ), + pytest.param( + Ref_chan_omitted_reordered_file, + ["FP2", "FPZ", "FP1", "VEO", "EKG", "Trigger"], + id="Ref omitted, reordered", + ), + ], +) def test_read_files_missing_channel(fname, expected_channel_list): """Test reading data files that has an omitted channel.""" # This for Git issue #8391. In some cases, the 'labels' (.rs3 file will @@ -467,35 +627,42 @@ def test_read_files_missing_channel(fname, expected_channel_list): @testing.requires_testing_data -@pytest.mark.parametrize('fname,expected_meas_date', [ - pytest.param(Ref_chan_omitted_file, - datetime(2018, 11, 21, 12, 53, 48, - 525000, tzinfo=timezone.utc), - id='valid start date'), - pytest.param(curry7_rfDC_file, - None, - id='start date year is 0'), - pytest.param(curry7_bdf_file, - None, - id='start date seconds invalid') -]) +@pytest.mark.parametrize( + "fname,expected_meas_date", + [ + pytest.param( + Ref_chan_omitted_file, + datetime(2018, 11, 21, 12, 53, 48, 525000, tzinfo=timezone.utc), + id="valid start date", + ), + pytest.param(curry7_rfDC_file, None, id="start date year is 0"), + pytest.param( + curry7_bdf_file, + None, + id="start date seconds invalid", + ), + ], +) def test_meas_date(fname, expected_meas_date): """Test reading acquisition start datetime info info['meas_date'].""" # This for Git issue #8398. The 'info' (.dap) file includes acquisition # start date & time. Test that this goes into raw.info['meas_date']. # If the information is not valid, raw.info['meas_date'] should be None raw = read_raw_curry(fname, preload=False) - assert raw.info['meas_date'] == expected_meas_date + assert raw.info["meas_date"] == expected_meas_date @testing.requires_testing_data -@pytest.mark.parametrize('fname, others', [ - pytest.param(curry7_rfDC_file, ('.dap', '.rs3'), id='curry7'), - pytest.param(curry8_rfDC_file, ('.cdt.dpa',), id='curry8'), -]) +@pytest.mark.parametrize( + "fname, others", + [ + pytest.param(curry7_bdf_file, (".dap", ".rs3"), id="curry7"), + pytest.param(curry8_bdf_file, (".cdt.cef", ".cdt.dpa"), id="curry8"), + ], +) def test_dot_names(fname, others, tmp_path): """Test that dots are parsed properly (e.g., in paths).""" - my_path = tmp_path / 'dot.dot.dot' + my_path = tmp_path / "dot.dot.dot" my_path.mkdir() my_path = my_path / Path(fname).parts[-1] fname = Path(fname) @@ -505,3 +672,95 @@ def test_dot_names(fname, others, tmp_path): to_fname = my_path.with_suffix(ext) copyfile(this_fname, to_fname) read_raw_curry(my_path) + + +@testing.requires_testing_data +def test_read_device_info(): + """Test extraction of device_info.""" + raw = read_raw_curry(curry7_bdf_file) + assert not raw.info["device_info"] + raw2 = read_raw_curry(Ref_chan_omitted_file) + assert isinstance(raw2.info["device_info"], dict) + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry7_bdf_file, id="curry 7"), + pytest.param(curry8_bdf_file, id="curry 8"), + pytest.param(curry7_bdf_ascii_file, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, id="curry 8 ascii"), + ], +) +def test_read_impedances_curry(fname): + """Test reading impedances from CURRY files.""" + _, imp = read_impedances_curry(fname) + actual_imp = np.empty(shape=(0, 3)) # TODO - need better testing data + assert_allclose( + imp, + actual_imp, + ) + + +def _mock_info_noeeg(src, dst): + # artificially remove eeg channels + content_hdr = src.read_text() + if ".dap" in src.name: + # curry 7 + content_hdr_ = content_hdr + elif ".rs3" in src.name: + # curry 7 + content_hdr_ = ( + content_hdr.split("NUMBERS START")[0] + + content_hdr.split("TRANSFORM END_LIST")[-1] + ) + else: + # curry 8 + content_hdr_ = ( + content_hdr.split("LABELS START")[0] + + content_hdr.split("SENSORS END_LIST")[-1] + ) + # both + content_hdr_ = content_hdr_.replace( + "NumChannels = 194", "NumChannels = 163" + ) + content_hdr_ = content_hdr_.replace( + "NumChanThisGroup = 31", "NumChanThisGroup = 0" + ) + content_hdr_ = content_hdr_.replace( + "NumSensorsThisGroup = 31", "NumSensorsThisGroup = 0" + ) + with dst.open("w+") as f: + f.write(content_hdr_) + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname,mont_present", + [ + pytest.param(curry7_bdf_file, True, id="curry 7"), + pytest.param(curry8_bdf_file, True, id="curry 8"), + pytest.param(curry7_bdf_ascii_file, True, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, True, id="curry 8 ascii"), + pytest.param(curry7_rfDC_file, False, id="no eeg, curry 7"), + pytest.param(curry8_rfDC_file, False, id="no eeg, curry 8"), + pytest.param(curry_hpi_file, True, id="curry 8, w/ HPI data"), + ], +) +def test_read_montage_curry(tmp_path, fname, mont_present): + """Test reading montage from CURRY files.""" + if mont_present: + assert isinstance(read_dig_curry(fname), DigMontage) + else: + # copy files to tmp_path + for ext in (".cdt", ".cdt.hpi", ".cdt.dpa", ".dat", ".dap", ".rs3"): + src = fname.with_suffix(ext) + dst = tmp_path / fname.with_suffix(ext).name + if src.exists(): + if ext in [".cdt.dpa", ".dap", ".rs3"]: + _mock_info_noeeg(src, dst) + else: + copyfile(src, dst) + with pytest.raises(ValueError, match="No eeg sensor locations found"): + read_dig_curry(tmp_path / fname.name) diff --git a/mne/io/diff.py b/mne/io/diff.py deleted file mode 100644 index 46ded94848b..00000000000 --- a/mne/io/diff.py +++ /dev/null @@ -1,40 +0,0 @@ -# Author: Alexandre Gramfort -# -# License: BSD Style. - -import numpy as np - -from ..utils import logger, verbose - - -@verbose -def is_equal(first, second, verbose=None): - """Check if 2 python structures are the same. - - Designed to handle dict, list, np.ndarray etc. - """ - all_equal = True - # Check all keys in first dict - if type(first) != type(second): - all_equal = False - if isinstance(first, dict): - for key in first.keys(): - if (key not in second): - logger.info("Missing key %s in %s" % (key, second)) - all_equal = False - else: - if not is_equal(first[key], second[key]): - all_equal = False - elif isinstance(first, np.ndarray): - if not np.allclose(first, second): - all_equal = False - elif isinstance(first, list): - for a, b in zip(first, second): - if not is_equal(a, b): - logger.info('%s and\n%s are different' % (a, b)) - all_equal = False - else: - if first != second: - logger.info('%s and\n%s are different' % (first, second)) - all_equal = False - return all_equal diff --git a/mne/io/edf/__init__.py b/mne/io/edf/__init__.py index 221f6c7f698..49e41a7ea5b 100644 --- a/mne/io/edf/__init__.py +++ b/mne/io/edf/__init__.py @@ -1,7 +1,7 @@ """EDF+,BDF module for conversion to FIF.""" -# Author: Teon Brooks -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .edf import read_raw_edf, read_raw_bdf, read_raw_gdf diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py index b3a7b38da36..7d73c287d39 100644 --- a/mne/io/edf/edf.py +++ b/mne/io/edf/edf.py @@ -1,57 +1,75 @@ -# -*- coding: utf-8 -*- """Reading tools from EDF, EDF+, BDF, and GDF.""" -# Authors: Teon Brooks -# Martin Billinger -# Nicolas Barascud -# Stefan Appelhoff -# Joan Massich -# Clemens Brunner -# Jeroen Van Der Donckt (IDlab - imec) -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from datetime import datetime, timezone, timedelta import os import re +from datetime import date, datetime, timedelta, timezone +from enum import Enum +from pathlib import Path import numpy as np +from scipy.interpolate import interp1d -from ...utils import verbose, logger, warn, _validate_type -from ..utils import _blk_read_lims, _mult_cal_one -from ..base import BaseRaw, _get_scaling -from ..meas_info import _empty_info, _unique_channel_names -from ..constants import FIFF -from ...filter import resample -from ...utils import fill_doc +from ..._edf.open import _gdf_edf_get_fid +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info, _unique_channel_names +from ..._fiff.utils import _blk_read_lims, _mult_cal_one from ...annotations import Annotations +from ...filter import resample +from ...fixes import read_from_file_or_buffer +from ...utils import ( + _check_fname, + _file_like, + _validate_type, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw, _get_scaling + + +class FileType(Enum): + """Enumeration to differentiate files when the extension is not known.""" + + GDF = 1 + EDF = 2 + BDF = 3 # common channel type names mapped to internal ch types CH_TYPE_MAPPING = { - 'EEG': FIFF.FIFFV_EEG_CH, - 'SEEG': FIFF.FIFFV_SEEG_CH, - 'ECOG': FIFF.FIFFV_ECOG_CH, - 'DBS': FIFF.FIFFV_DBS_CH, - 'EOG': FIFF.FIFFV_EOG_CH, - 'ECG': FIFF.FIFFV_ECG_CH, - 'EMG': FIFF.FIFFV_EMG_CH, - 'BIO': FIFF.FIFFV_BIO_CH, - 'RESP': FIFF.FIFFV_RESP_CH, - 'TEMP': FIFF.FIFFV_TEMPERATURE_CH, - 'MISC': FIFF.FIFFV_MISC_CH, - 'SAO2': FIFF.FIFFV_BIO_CH, + "EEG": FIFF.FIFFV_EEG_CH, + "SEEG": FIFF.FIFFV_SEEG_CH, + "ECOG": FIFF.FIFFV_ECOG_CH, + "DBS": FIFF.FIFFV_DBS_CH, + "EOG": FIFF.FIFFV_EOG_CH, + "ECG": FIFF.FIFFV_ECG_CH, + "EMG": FIFF.FIFFV_EMG_CH, + "BIO": FIFF.FIFFV_BIO_CH, + "RESP": FIFF.FIFFV_RESP_CH, + "TEMP": FIFF.FIFFV_TEMPERATURE_CH, + "MISC": FIFF.FIFFV_MISC_CH, + "SAO2": FIFF.FIFFV_BIO_CH, + "STIM": FIFF.FIFFV_STIM_CH, } @fill_doc class RawEDF(BaseRaw): - """Raw object from EDF, EDF+ or BDF file. + """Raw object from EDF, EDF+ file. Parameters ---------- - input_fname : path-like - Path to the EDF, EDF+ or BDF file. + input_fname : path-like | file-like + Path to the EDF, EDF+ file. If a file-like object is provided, + preloading must be used. + + .. versionchanged:: 1.10 + Added support for file-like objects eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Values should correspond to the electrodes in the file. @@ -87,16 +105,18 @@ class RawEDF(BaseRaw): %(preload)s %(units_edf_bdf_io)s %(encoding_edf)s + %(exclude_after_unique)s %(verbose)s See Also -------- mne.io.Raw : Documentation of attributes and methods. mne.io.read_raw_edf : Recommended way to read EDF/EDF+ files. - mne.io.read_raw_bdf : Recommended way to read BDF files. Notes ----- + %(edf_resamp_note)s + Biosemi devices trigger codes are encoded in 16-bit format, whereas system codes (CMS in/out-of range, battery low, etc.) are coded in bits 16-23 of the status channel (see http://www.biosemi.com/faq/trigger_signals.htm). @@ -123,7 +143,7 @@ class RawEDF(BaseRaw): >>> events[:, 2] >>= 8 # doctest:+SKIP - TAL channels called 'EDF Annotations' or 'BDF Annotations' are parsed and + TAL channels called 'EDF Annotations' are parsed and extracted annotations are stored in raw.annotations. Use :func:`mne.events_from_annotations` to obtain events from these annotations. @@ -134,30 +154,55 @@ class RawEDF(BaseRaw): """ @verbose - def __init__(self, input_fname, eog=None, misc=None, stim_channel='auto', - exclude=(), infer_types=False, preload=False, include=None, - units=None, encoding='utf8', *, verbose=None): - logger.info('Extracting EDF parameters from {}...'.format(input_fname)) - input_fname = os.path.abspath(input_fname) - info, edf_info, orig_units = _get_info(input_fname, stim_channel, eog, - misc, exclude, infer_types, - preload, include) - logger.info('Creating raw.info structure...') - - _validate_type(units, (str, None, dict), 'units') + def __init__( + self, + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + infer_types=False, + preload=False, + include=None, + units=None, + encoding="utf8", + exclude_after_unique=False, + *, + verbose=None, + ): + if not _file_like(input_fname): + logger.info(f"Extracting EDF parameters from {input_fname}...") + input_fname = os.path.abspath(input_fname) + + info, edf_info, orig_units = _get_info( + input_fname, + stim_channel, + eog, + misc, + exclude, + infer_types, + FileType.EDF, + include, + exclude_after_unique, + ) + logger.info("Creating raw.info structure...") + edf_info["blob"] = input_fname if _file_like(input_fname) else None + + _validate_type(units, (str, None, dict), "units") if units is None: units = dict() elif isinstance(units, str): - units = {ch_name: units for ch_name in info['ch_names']} + units = {ch_name: units for ch_name in info["ch_names"]} for k, (this_ch, this_unit) in enumerate(orig_units.items()): if this_ch not in units: continue if this_unit not in ("", units[this_ch]): raise ValueError( - f'Unit for channel {this_ch} is present in the file as ' - f'{repr(this_unit)}, cannot overwrite it with the units ' - f'argument {repr(units[this_ch])}.') + f"Unit for channel {this_ch} is present in the file as " + f"{repr(this_unit)}, cannot overwrite it with the units " + f"argument {repr(units[this_ch])}." + ) if this_unit == "": orig_units[this_ch] = units[this_ch] ch_type = edf_info["ch_types"][k] @@ -165,33 +210,265 @@ def __init__(self, input_fname, eog=None, misc=None, stim_channel='auto', edf_info["units"][k] /= scaling # Raw attributes - last_samps = [edf_info['nsamples'] - 1] - super().__init__(info, preload, filenames=[input_fname], - raw_extras=[edf_info], last_samps=last_samps, - orig_format='int', orig_units=orig_units, - verbose=verbose) + last_samps = [edf_info["nsamples"] - 1] + super().__init__( + info, + preload, + filenames=[_path_from_fname(input_fname)], + raw_extras=[edf_info], + last_samps=last_samps, + orig_format="int", + orig_units=orig_units, + verbose=verbose, + ) # Read annotations from file and set it - onset, duration, desc = list(), list(), list() - if len(edf_info['tal_idx']) > 0: + if len(edf_info["tal_idx"]) > 0: # Read TAL data exploiting the header info (no regexp) idx = np.empty(0, int) tal_data = self._read_segment_file( - np.empty((0, self.n_times)), idx, 0, 0, int(self.n_times), - np.ones((len(idx), 1)), None) - onset, duration, desc = _read_annotations_edf( + np.empty((0, self.n_times)), + idx, + 0, + 0, + int(self.n_times), + np.ones((len(idx), 1)), + None, + ) + annotations = _read_annotations_edf( tal_data[0], + ch_names=info["ch_names"], encoding=encoding, ) + self.set_annotations(annotations, on_missing="warn") + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + return _read_segment_file( + data, + idx, + fi, + start, + stop, + self._raw_extras[fi], + self.filenames[fi] + if self._raw_extras[fi]["blob"] is None + else self._raw_extras[fi]["blob"], + cals, + mult, + ) + + +def _path_from_fname(fname) -> Path | None: + if isinstance(fname, str | Path): + return Path(fname) + + # Try to get a filename from the file-like object + try: + return Path(fname.name) + except Exception: + return None + - self.set_annotations(Annotations(onset=onset, duration=duration, - description=desc, orig_time=None)) +@fill_doc +class RawBDF(BaseRaw): + """Raw object from BDF file. + + Parameters + ---------- + input_fname : path-like | file-like + Path to the BDF file. If a file-like object is provided, + preloading must be used. + + .. versionchanged:: 1.10 + Added support for file-like objects + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), the channels corresponding to the indices are set to + STIM. + exclude : list of str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. + infer_types : bool + If True, try to infer channel types from channel labels. If a channel + label starts with a known type (such as 'EEG') followed by a space and + a name (such as 'Fp1'), the channel type will be set accordingly, and + the channel will be renamed to the original label without the prefix. + For unknown prefixes, the type will be 'EEG' and the name will not be + modified. If False, do not infer types and assume all channels are of + type 'EEG'. + + .. versionadded:: 0.24.1 + include : list of str | str + Channel names to be included. A str is interpreted as a regular + expression. 'exclude' must be empty if include is assigned. + + .. versionadded:: 1.1 + %(preload)s + %(units_edf_bdf_io)s + %(encoding_edf)s + %(exclude_after_unique)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + mne.io.read_raw_bdf : Recommended way to read BDF files. + + Notes + ----- + %(edf_resamp_note)s + + Biosemi devices trigger codes are encoded in 16-bit format, whereas system + codes (CMS in/out-of range, battery low, etc.) are coded in bits 16-23 of + the status channel (see http://www.biosemi.com/faq/trigger_signals.htm). + To retrieve correct event values (bits 1-16), one could do: + + >>> events = mne.find_events(...) # doctest:+SKIP + >>> events[:, 2] &= (2**16 - 1) # doctest:+SKIP + + The above operation can be carried out directly in :func:`mne.find_events` + using the ``mask`` and ``mask_type`` parameters (see + :func:`mne.find_events` for more details). + + It is also possible to retrieve system codes, but no particular effort has + been made to decode these in MNE. In case it is necessary, for instance to + check the CMS bit, the following operation can be carried out: + + >>> cms_bit = 20 # doctest:+SKIP + >>> cms_high = (events[:, 2] & (1 << cms_bit)) != 0 # doctest:+SKIP + + It is worth noting that in some special cases, it may be necessary to shift + event values in order to retrieve correct event triggers. This depends on + the triggering device used to perform the synchronization. For instance, in + some files events need to be shifted by 8 bits: + + >>> events[:, 2] >>= 8 # doctest:+SKIP + + TAL channels called 'BDF Annotations' are parsed and + extracted annotations are stored in raw.annotations. Use + :func:`mne.events_from_annotations` to obtain events from these + annotations. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + + @verbose + def __init__( + self, + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + infer_types=False, + preload=False, + include=None, + units=None, + encoding="utf8", + exclude_after_unique=False, + *, + verbose=None, + ): + if not _file_like(input_fname): + logger.info(f"Extracting BDF parameters from {input_fname}...") + input_fname = os.path.abspath(input_fname) + + info, edf_info, orig_units = _get_info( + input_fname, + stim_channel, + eog, + misc, + exclude, + infer_types, + FileType.BDF, + include, + exclude_after_unique, + ) + logger.info("Creating raw.info structure...") + edf_info["blob"] = input_fname if _file_like(input_fname) else None + + _validate_type(units, (str, None, dict), "units") + if units is None: + units = dict() + elif isinstance(units, str): + units = {ch_name: units for ch_name in info["ch_names"]} + + for k, (this_ch, this_unit) in enumerate(orig_units.items()): + if this_ch not in units: + continue + if this_unit not in ("", units[this_ch]): + raise ValueError( + f"Unit for channel {this_ch} is present in the file as " + f"{repr(this_unit)}, cannot overwrite it with the units " + f"argument {repr(units[this_ch])}." + ) + if this_unit == "": + orig_units[this_ch] = units[this_ch] + ch_type = edf_info["ch_types"][k] + scaling = _get_scaling(ch_type.lower(), orig_units[this_ch]) + edf_info["units"][k] /= scaling + + # Raw attributes + last_samps = [edf_info["nsamples"] - 1] + super().__init__( + info, + preload, + filenames=[_path_from_fname(input_fname)], + raw_extras=[edf_info], + last_samps=last_samps, + orig_format="int", + orig_units=orig_units, + verbose=verbose, + ) + + # Read annotations from file and set it + if len(edf_info["tal_idx"]) > 0: + # Read TAL data exploiting the header info (no regexp) + idx = np.empty(0, int) + tal_data = self._read_segment_file( + np.empty((0, self.n_times)), + idx, + 0, + 0, + int(self.n_times), + np.ones((len(idx), 1)), + None, + ) + annotations = _read_annotations_edf( + tal_data[0], + ch_names=info["ch_names"], + encoding=encoding, + ) + self.set_annotations(annotations, on_missing="warn") def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" - return _read_segment_file(data, idx, fi, start, stop, - self._raw_extras[fi], self._filenames[fi], - cals, mult) + return _read_segment_file( + data, + idx, + fi, + start, + stop, + self._raw_extras[fi], + self.filenames[fi] + if self._raw_extras[fi]["blob"] is None + else self._raw_extras[fi]["blob"], + cals, + mult, + ) @fill_doc @@ -200,8 +477,12 @@ class RawGDF(BaseRaw): Parameters ---------- - input_fname : path-like - Path to the GDF file. + input_fname : path-like | file-like + Path to the GDF file. If a file-like object is provided, + preloading must be used. + + .. versionchanged:: 1.10 + Added support for file-like objects eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Values should correspond to the electrodes in the file. @@ -241,73 +522,104 @@ class RawGDF(BaseRaw): """ @verbose - def __init__(self, input_fname, eog=None, misc=None, - stim_channel='auto', exclude=(), preload=False, include=None, - verbose=None): - logger.info('Extracting EDF parameters from {}...'.format(input_fname)) - input_fname = os.path.abspath(input_fname) - info, edf_info, orig_units = _get_info(input_fname, stim_channel, eog, - misc, exclude, True, preload, - include) - logger.info('Creating raw.info structure...') + def __init__( + self, + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + preload=False, + include=None, + verbose=None, + ): + if not _file_like(input_fname): + logger.info(f"Extracting GDF parameters from {input_fname}...") + input_fname = os.path.abspath(input_fname) + + info, edf_info, orig_units = _get_info( + input_fname, + stim_channel, + eog, + misc, + exclude, + True, + FileType.GDF, + include, + ) + logger.info("Creating raw.info structure...") + edf_info["blob"] = input_fname if _file_like(input_fname) else None # Raw attributes - last_samps = [edf_info['nsamples'] - 1] - super().__init__(info, preload, filenames=[input_fname], - raw_extras=[edf_info], last_samps=last_samps, - orig_format='int', orig_units=orig_units, - verbose=verbose) + last_samps = [edf_info["nsamples"] - 1] + super().__init__( + info, + preload, + filenames=[_path_from_fname(input_fname)], + raw_extras=[edf_info], + last_samps=last_samps, + orig_format="int", + orig_units=orig_units, + verbose=verbose, + ) # Read annotations from file and set it - onset, duration, desc = _get_annotations_gdf(edf_info, - self.info['sfreq']) + onset, duration, desc = _get_annotations_gdf(edf_info, self.info["sfreq"]) - self.set_annotations(Annotations(onset=onset, duration=duration, - description=desc, orig_time=None)) + self.set_annotations( + Annotations( + onset=onset, duration=duration, description=desc, orig_time=None + ) + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" - return _read_segment_file(data, idx, fi, start, stop, - self._raw_extras[fi], self._filenames[fi], - cals, mult) + return _read_segment_file( + data, + idx, + fi, + start, + stop, + self._raw_extras[fi], + self.filenames[fi] + if self._raw_extras[fi]["blob"] is None + else self._raw_extras[fi]["blob"], + cals, + mult, + ) def _read_ch(fid, subtype, samp, dtype_byte, dtype=None): """Read a number of samples for a single channel.""" # BDF - if subtype == 'bdf': - ch_data = np.fromfile(fid, dtype=dtype, count=samp * dtype_byte) + if subtype == "bdf": + ch_data = read_from_file_or_buffer(fid, dtype=dtype, count=samp * dtype_byte) ch_data = ch_data.reshape(-1, 3).astype(INT32) - ch_data = ((ch_data[:, 0]) + - (ch_data[:, 1] << 8) + - (ch_data[:, 2] << 16)) + ch_data = (ch_data[:, 0]) + (ch_data[:, 1] << 8) + (ch_data[:, 2] << 16) # 24th bit determines the sign - ch_data[ch_data >= (1 << 23)] -= (1 << 24) + ch_data[ch_data >= (1 << 23)] -= 1 << 24 # GDF data and EDF data else: - ch_data = np.fromfile(fid, dtype=dtype, count=samp) + ch_data = read_from_file_or_buffer(fid, dtype=dtype, count=samp) return ch_data -def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, - cals, mult): +def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, cals, mult): """Read a chunk of raw data.""" - from scipy.interpolate import interp1d - - n_samps = raw_extras['n_samps'] - buf_len = int(raw_extras['max_samp']) - dtype = raw_extras['dtype_np'] - dtype_byte = raw_extras['dtype_byte'] - data_offset = raw_extras['data_offset'] - stim_channel_idxs = raw_extras['stim_channel_idxs'] - orig_sel = raw_extras['sel'] - tal_idx = raw_extras.get('tal_idx', np.empty(0, int)) - subtype = raw_extras['subtype'] - cal = raw_extras['cal'] - offsets = raw_extras['offsets'] - gains = raw_extras['units'] + n_samps = raw_extras["n_samps"] + buf_len = int(raw_extras["max_samp"]) + dtype = raw_extras["dtype_np"] + dtype_byte = raw_extras["dtype_byte"] + data_offset = raw_extras["data_offset"] + stim_channel_idxs = raw_extras["stim_channel_idxs"] + orig_sel = raw_extras["sel"] + tal_idx = raw_extras.get("tal_idx", np.empty(0, int)) + subtype = raw_extras["subtype"] + cal = raw_extras["cal"] + offsets = raw_extras["offsets"] + gains = raw_extras["units"] read_sel = np.concatenate([orig_sel[idx], tal_idx]) tal_data = [] @@ -318,33 +630,41 @@ def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, # We could read this one EDF block at a time, which would be this: ch_offsets = np.cumsum(np.concatenate([[0], n_samps]), dtype=np.int64) - block_start_idx, r_lims, d_lims = _blk_read_lims(start, stop, buf_len) + block_start_idx, r_lims, _ = _blk_read_lims(start, stop, buf_len) # But to speed it up, we really need to read multiple blocks at once, # Otherwise we can end up with e.g. 18,181 chunks for a 20 MB file! # Let's do ~10 MB chunks: n_per = max(10 * 1024 * 1024 // (ch_offsets[-1] * dtype_byte), 1) - with open(filenames, 'rb', buffering=0) as fid: + with _gdf_edf_get_fid(filenames, buffering=0) as fid: # Extract data - start_offset = (data_offset + - block_start_idx * ch_offsets[-1] * dtype_byte) + start_offset = data_offset + block_start_idx * ch_offsets[-1] * dtype_byte + + # first read everything into the `ones` array. For channels with + # lower sampling frequency, there will be zeros left at the end of the + # row. Ignore TAL/annotations channel and only store `orig_sel` + ones = np.zeros((len(orig_sel), data.shape[-1]), dtype=data.dtype) + # save how many samples have already been read per channel + n_smp_read = [0 for _ in range(len(orig_sel))] + + # read data in chunks for ai in range(0, len(r_lims), n_per): block_offset = ai * ch_offsets[-1] * dtype_byte n_read = min(len(r_lims) - ai, n_per) fid.seek(start_offset + block_offset, 0) # Read and reshape to (n_chunks_read, ch0_ch1_ch2_ch3...) - many_chunk = _read_ch(fid, subtype, ch_offsets[-1] * n_read, - dtype_byte, dtype).reshape(n_read, -1) + many_chunk = _read_ch( + fid, subtype, ch_offsets[-1] * n_read, dtype_byte, dtype + ).reshape(n_read, -1) r_sidx = r_lims[ai][0] - r_eidx = (buf_len * (n_read - 1) + r_lims[ai + n_read - 1][1]) - d_sidx = d_lims[ai][0] - d_eidx = d_lims[ai + n_read - 1][1] - one = np.zeros((len(orig_sel), d_eidx - d_sidx), dtype=data.dtype) + r_eidx = buf_len * (n_read - 1) + r_lims[ai + n_read - 1][1] + + # loop over selected channels, ci=channel selection for ii, ci in enumerate(read_sel): # This now has size (n_chunks_read, n_samp[ci]) - ch_data = many_chunk[:, - ch_offsets[ci]:ch_offsets[ci + 1]].copy() + ch_data = many_chunk[:, ch_offsets[ci] : ch_offsets[ci + 1]].copy() + # annotation channel has to be treated separately if ci in tal_idx: tal_data.append(ch_data) continue @@ -361,22 +681,55 @@ def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, # Stim channel will be interpolated old = np.linspace(0, 1, n_samps[ci] + 1, True) new = np.linspace(0, 1, buf_len, False) - ch_data = np.append( - ch_data, np.zeros((len(ch_data), 1)), -1) - ch_data = interp1d(old, ch_data, - kind='zero', axis=-1)(new) - else: - # XXX resampling each chunk isn't great, - # it forces edge artifacts to appear at - # each buffer boundary :( - # it can also be very slow... - ch_data = resample( - ch_data.astype(np.float64), buf_len, n_samps[ci], - npad=0, axis=-1) + ch_data = np.append(ch_data, np.zeros((len(ch_data), 1)), -1) + ch_data = interp1d(old, ch_data, kind="zero", axis=-1)(new) elif orig_idx in stim_channel_idxs: ch_data = np.bitwise_and(ch_data.astype(int), 2**17 - 1) - one[orig_idx] = ch_data.ravel()[r_sidx:r_eidx] - _mult_cal_one(data[:, d_sidx:d_eidx], one, idx, cals, mult) + + one_i = ch_data.ravel()[r_sidx:r_eidx] + + # note how many samples have been read + smp_read = n_smp_read[orig_idx] + ones[orig_idx, smp_read : smp_read + len(one_i)] = one_i + n_smp_read[orig_idx] += len(one_i) + + # resample channels with lower sample frequency + # skip if no data was requested, ie. only annotations were read + if any(n_smp_read) > 0: + # expected number of samples, equals maximum sfreq + smp_exp = data.shape[-1] + + # resample data after loading all chunks to prevent edge artifacts + resampled = False + + for i, smp_read in enumerate(n_smp_read): + # nothing read, nothing to resample + if smp_read == 0: + continue + # upsample if n_samples is lower than from highest sfreq + if smp_read != smp_exp: + # sanity check that we read exactly how much we expected + assert (ones[i, smp_read:] == 0).all() + + ones[i, :] = resample( + ones[i, :smp_read].astype(np.float64), + smp_exp, + smp_read, + npad=0, + axis=-1, + ) + resampled = True + + # give warning if we resampled a subselection + if resampled and raw_extras["nsamples"] != (stop - start): + warn( + "Loading an EDF with mixed sampling frequencies and " + "preload=False will result in edge artifacts. " + "It is recommended to use preload=True." + "See also https://github.com/mne-tools/mne-python/issues/10635" + ) + + _mult_cal_one(data[:, :], ones, idx, cals, mult) if len(tal_data) > 1: tal_data = np.concatenate([tal.ravel() for tal in tal_data]) @@ -384,13 +737,21 @@ def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, return tal_data -def _read_header(fname, exclude, infer_types, include=None): +@fill_doc +def _read_header( + fname, + exclude, + infer_types, + file_type, + include=None, + exclude_after_unique=False, +): """Unify EDF, BDF and GDF _read_header call. Parameters ---------- fname : str - Path to the EDF+, BDF, or GDF file. + Path to the EDF+, BDF, or GDF file or file-like object. exclude : list of str | str Channel names to exclude. This can help when reading data with different sampling rates to avoid unnecessary resampling. A str is @@ -406,62 +767,81 @@ def _read_header(fname, exclude, infer_types, include=None): include : list of str | str Channel names to be included. A str is interpreted as a regular expression. 'exclude' must be empty if include is assigned. + %(exclude_after_unique)s Returns ------- (edf_info, orig_units) : tuple """ - ext = os.path.splitext(fname)[1][1:].lower() - logger.info('%s file detected' % ext.upper()) - if ext in ('bdf', 'edf'): - return _read_edf_header(fname, exclude, infer_types, include) - elif ext == 'gdf': + if file_type in (FileType.BDF, FileType.EDF): + return _read_edf_header( + fname, + exclude, + infer_types, + file_type, + include, + exclude_after_unique, + ) + elif file_type == FileType.GDF: return _read_gdf_header(fname, exclude, include), None else: - raise NotImplementedError( - f'Only GDF, EDF, and BDF files are supported, got {ext}.') - - -def _get_info(fname, stim_channel, eog, misc, exclude, infer_types, preload, - include=None): + raise NotImplementedError("Only GDF, EDF, and BDF files are supported.") + + +def _get_info( + fname, + stim_channel, + eog, + misc, + exclude, + infer_types, + file_type, + include=None, + exclude_after_unique=False, +): """Extract information from EDF+, BDF or GDF file.""" eog = eog if eog is not None else [] misc = misc if misc is not None else [] - edf_info, orig_units = _read_header(fname, exclude, infer_types, include) + edf_info, orig_units = _read_header( + fname, exclude, infer_types, file_type, include, exclude_after_unique + ) # XXX: `tal_ch_names` to pass to `_check_stim_channel` should be computed # from `edf_info['ch_names']` and `edf_info['tal_idx']` but 'tal_idx' # contains stim channels that are not TAL. - stim_channel_idxs, _ = _check_stim_channel( - stim_channel, edf_info['ch_names']) + stim_channel_idxs, _ = _check_stim_channel(stim_channel, edf_info["ch_names"]) - sel = edf_info['sel'] # selection of channels not excluded - ch_names = edf_info['ch_names'] # of length len(sel) - if 'ch_types' in edf_info: - ch_types = edf_info['ch_types'] # of length len(sel) + sel = edf_info["sel"] # selection of channels not excluded + ch_names = edf_info["ch_names"] # of length len(sel) + if "ch_types" in edf_info: + ch_types = edf_info["ch_types"] # of length len(sel) else: ch_types = [None] * len(sel) if len(sel) == 0: # only want stim channels - n_samps = edf_info['n_samps'][[0]] + n_samps = edf_info["n_samps"][[0]] else: - n_samps = edf_info['n_samps'][sel] - nchan = edf_info['nchan'] - physical_ranges = edf_info['physical_max'] - edf_info['physical_min'] - cals = edf_info['digital_max'] - edf_info['digital_min'] + n_samps = edf_info["n_samps"][sel] + nchan = edf_info["nchan"] + physical_ranges = edf_info["physical_max"] - edf_info["physical_min"] + cals = edf_info["digital_max"] - edf_info["digital_min"] bad_idx = np.where((~np.isfinite(cals)) | (cals == 0))[0] if len(bad_idx) > 0: - warn('Scaling factor is not defined in following channels:\n' + - ', '.join(ch_names[i] for i in bad_idx)) + warn( + "Scaling factor is not defined in following channels:\n" + + ", ".join(ch_names[i] for i in bad_idx) + ) cals[bad_idx] = 1 bad_idx = np.where(physical_ranges == 0)[0] if len(bad_idx) > 0: - warn('Physical range is not defined in following channels:\n' + - ', '.join(ch_names[i] for i in bad_idx)) + warn( + "Physical range is not defined in following channels:\n" + + ", ".join(ch_names[i] for i in bad_idx) + ) physical_ranges[bad_idx] = 1 # Creates a list of dicts of eeg channels for raw.info - logger.info('Setting channel info structure...') + logger.info("Setting channel info structure...") chs = list() pick_mask = np.ones(len(ch_names)) @@ -469,255 +849,336 @@ def _get_info(fname, stim_channel, eog, misc, exclude, infer_types, preload, for idx, ch_name in enumerate(ch_names): chan_info = {} - chan_info['cal'] = 1. - chan_info['logno'] = idx + 1 - chan_info['scanno'] = idx + 1 - chan_info['range'] = 1. - chan_info['unit_mul'] = FIFF.FIFF_UNITM_NONE - chan_info['ch_name'] = ch_name - chan_info['unit'] = FIFF.FIFF_UNIT_V - chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD - chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG - chan_info['kind'] = FIFF.FIFFV_EEG_CH + chan_info["cal"] = 1.0 + chan_info["logno"] = idx + 1 + chan_info["scanno"] = idx + 1 + chan_info["range"] = 1.0 + chan_info["unit_mul"] = FIFF.FIFF_UNITM_NONE + chan_info["ch_name"] = ch_name + chan_info["unit"] = FIFF.FIFF_UNIT_V + chan_info["coord_frame"] = FIFF.FIFFV_COORD_HEAD + chan_info["coil_type"] = FIFF.FIFFV_COIL_EEG + chan_info["kind"] = FIFF.FIFFV_EEG_CH # montage can't be stored in EDF so channel locs are unknown: - chan_info['loc'] = np.full(12, np.nan) + chan_info["loc"] = np.full(12, np.nan) # if the edf info contained channel type information # set it now ch_type = ch_types[idx] if ch_type is not None and ch_type in CH_TYPE_MAPPING: - chan_info['kind'] = CH_TYPE_MAPPING.get(ch_type) - if ch_type not in ['EEG', 'ECOG', 'SEEG', 'DBS']: - chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = CH_TYPE_MAPPING.get(ch_type) + if ch_type not in ["EEG", "ECOG", "SEEG", "DBS"]: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE pick_mask[idx] = False # if user passes in explicit mapping for eog, misc and stim # channels set them here if ch_name in eog or idx in eog or idx - nchan in eog: - chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE - chan_info['kind'] = FIFF.FIFFV_EOG_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_EOG_CH pick_mask[idx] = False elif ch_name in misc or idx in misc or idx - nchan in misc: - chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE - chan_info['kind'] = FIFF.FIFFV_MISC_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_MISC_CH pick_mask[idx] = False elif idx in stim_channel_idxs: - chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE - chan_info['unit'] = FIFF.FIFF_UNIT_NONE - chan_info['kind'] = FIFF.FIFFV_STIM_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["unit"] = FIFF.FIFF_UNIT_NONE + chan_info["kind"] = FIFF.FIFFV_STIM_CH pick_mask[idx] = False - chan_info['ch_name'] = ch_name - ch_names[idx] = chan_info['ch_name'] - edf_info['units'][idx] = 1 + chan_info["ch_name"] = ch_name + ch_names[idx] = chan_info["ch_name"] + edf_info["units"][idx] = 1 elif ch_type not in CH_TYPE_MAPPING: chs_without_types.append(ch_name) chs.append(chan_info) # warn if channel type was not inferable if len(chs_without_types): - msg = ('Could not determine channel type of the following channels, ' - f'they will be set as EEG:\n{", ".join(chs_without_types)}') + msg = ( + "Could not determine channel type of the following channels, " + f"they will be set as EEG:\n{', '.join(chs_without_types)}" + ) logger.info(msg) - edf_info['stim_channel_idxs'] = stim_channel_idxs + edf_info["stim_channel_idxs"] = stim_channel_idxs if any(pick_mask): picks = [item for item, mask in zip(range(nchan), pick_mask) if mask] - edf_info['max_samp'] = max_samp = n_samps[picks].max() + edf_info["max_samp"] = max_samp = n_samps[picks].max() else: - edf_info['max_samp'] = max_samp = n_samps.max() + edf_info["max_samp"] = max_samp = n_samps.max() # Info structure # ------------------------------------------------------------------------- - not_stim_ch = [x for x in range(n_samps.shape[0]) - if x not in stim_channel_idxs] + not_stim_ch = [x for x in range(n_samps.shape[0]) if x not in stim_channel_idxs] if len(not_stim_ch) == 0: # only loading stim channels not_stim_ch = list(range(len(n_samps))) - sfreq = np.take(n_samps, not_stim_ch).max() * \ - edf_info['record_length'][1] / edf_info['record_length'][0] + sfreq = ( + np.take(n_samps, not_stim_ch).max() + * edf_info["record_length"][1] + / edf_info["record_length"][0] + ) del n_samps info = _empty_info(sfreq) - info['meas_date'] = edf_info['meas_date'] - info['chs'] = chs - info['ch_names'] = ch_names - - # Filter settings - highpass = edf_info['highpass'] - lowpass = edf_info['lowpass'] - if highpass.size == 0: - pass - elif all(highpass): - if highpass[0] == 'NaN': - # Placeholder for future use. Highpass set in _empty_info. - pass - elif highpass[0] == 'DC': - info['highpass'] = 0. + info["meas_date"] = edf_info["meas_date"] + info["chs"] = chs + info["ch_names"] = ch_names + + # Subject information + info["subject_info"] = {} + + # String subject identifier + if edf_info["subject_info"].get("id") is not None: + info["subject_info"]["his_id"] = edf_info["subject_info"]["id"] + # Subject sex (0=unknown, 1=male, 2=female) + if edf_info["subject_info"].get("sex") is not None: + if edf_info["subject_info"]["sex"] == "M": + info["subject_info"]["sex"] = 1 + elif edf_info["subject_info"]["sex"] == "F": + info["subject_info"]["sex"] = 2 else: - hp = highpass[0] - try: - hp = float(hp) - except Exception: - hp = 0. - info['highpass'] = hp - else: - info['highpass'] = float(np.max(highpass)) - warn('Channels contain different highpass filters. Highest filter ' - 'setting will be stored.') - if np.isnan(info['highpass']): - info['highpass'] = 0. - if lowpass.size == 0: - # Placeholder for future use. Lowpass set in _empty_info. - pass - elif all(lowpass): - if lowpass[0] in ('NaN', '0', '0.0'): - # Placeholder for future use. Lowpass set in _empty_info. - pass + info["subject_info"]["sex"] = 0 + # Subject names (first, middle, last). + if edf_info["subject_info"].get("name") is not None: + sub_names = edf_info["subject_info"]["name"].split("_") + if len(sub_names) < 2 or len(sub_names) > 3: + info["subject_info"]["last_name"] = edf_info["subject_info"]["name"] + elif len(sub_names) == 2: + info["subject_info"]["first_name"] = sub_names[0] + info["subject_info"]["last_name"] = sub_names[1] else: - info['lowpass'] = float(lowpass[0]) - else: - info['lowpass'] = float(np.min(lowpass)) - warn('Channels contain different lowpass filters. Lowest filter ' - 'setting will be stored.') - if np.isnan(info['lowpass']): - info['lowpass'] = info['sfreq'] / 2. - - if info['highpass'] > info['lowpass']: - warn(f'Highpass cutoff frequency {info["highpass"]} is greater ' - f'than lowpass cutoff frequency {info["lowpass"]}, ' - 'setting values to 0 and Nyquist.') - info['highpass'] = 0. - info['lowpass'] = info['sfreq'] / 2. + info["subject_info"]["first_name"] = sub_names[0] + info["subject_info"]["middle_name"] = sub_names[1] + info["subject_info"]["last_name"] = sub_names[2] + # Birthday in (year, month, day) format. + if isinstance(edf_info["subject_info"].get("birthday"), datetime): + info["subject_info"]["birthday"] = date( + edf_info["subject_info"]["birthday"].year, + edf_info["subject_info"]["birthday"].month, + edf_info["subject_info"]["birthday"].day, + ) + # Handedness (1=right, 2=left, 3=ambidextrous). + if edf_info["subject_info"].get("hand") is not None: + info["subject_info"]["hand"] = int(edf_info["subject_info"]["hand"]) + # Height in meters. + if edf_info["subject_info"].get("height") is not None: + info["subject_info"]["height"] = float(edf_info["subject_info"]["height"]) + # Weight in kilograms. + if edf_info["subject_info"].get("weight") is not None: + info["subject_info"]["weight"] = float(edf_info["subject_info"]["weight"]) + # Remove values after conversion to help with in-memory anonymization + for key in ("subject_info", "meas_date"): + del edf_info[key] + + # Filter settings + if filt_ch_idxs := [x for x in range(len(sel)) if x not in stim_channel_idxs]: + _set_prefilter(info, edf_info, filt_ch_idxs, "highpass") + _set_prefilter(info, edf_info, filt_ch_idxs, "lowpass") + + if np.isnan(info["lowpass"]): + info["lowpass"] = info["sfreq"] / 2.0 + + if info["highpass"] > info["lowpass"]: + warn( + f"Highpass cutoff frequency {info['highpass']} is greater " + f"than lowpass cutoff frequency {info['lowpass']}, " + "setting values to 0 and Nyquist." + ) + info["highpass"] = 0.0 + info["lowpass"] = info["sfreq"] / 2.0 # Some keys to be consistent with FIF measurement info - info['description'] = None - edf_info['nsamples'] = int(edf_info['n_records'] * max_samp) + info["description"] = None + edf_info["nsamples"] = int(edf_info["n_records"] * max_samp) info._unlocked = False info._update_redundant() # Later used for reading - edf_info['cal'] = physical_ranges / cals + edf_info["cal"] = physical_ranges / cals # physical dimension in µV - edf_info['offsets'] = ( - edf_info['physical_min'] - edf_info['digital_min'] * edf_info['cal']) - del edf_info['physical_min'] - del edf_info['digital_min'] + edf_info["offsets"] = ( + edf_info["physical_min"] - edf_info["digital_min"] * edf_info["cal"] + ) + del edf_info["physical_min"] + del edf_info["digital_min"] - if edf_info['subtype'] == 'bdf': - edf_info['cal'][stim_channel_idxs] = 1 - edf_info['offsets'][stim_channel_idxs] = 0 - edf_info['units'][stim_channel_idxs] = 1 + if edf_info["subtype"] == "bdf": + edf_info["cal"][stim_channel_idxs] = 1 + edf_info["offsets"][stim_channel_idxs] = 0 + edf_info["units"][stim_channel_idxs] = 1 return info, edf_info, orig_units def _parse_prefilter_string(prefiltering): """Parse prefilter string from EDF+ and BDF headers.""" - highpass = np.array( - [v for hp in [re.findall(r'HP:\s*([0-9]+[.]*[0-9]*)', filt) - for filt in prefiltering] for v in hp] - ) - lowpass = np.array( - [v for hp in [re.findall(r'LP:\s*([0-9]+[.]*[0-9]*)', filt) - for filt in prefiltering] for v in hp] - ) - return highpass, lowpass + filter_types = ["HP", "LP"] + filter_strings = {t: [] for t in filter_types} + for filt in prefiltering: + for t in filter_types: + matches = re.findall(rf"{t}:\s*([a-zA-Z0-9,.]+)(Hz)?", filt) + value = "" + for match in matches: + if match[0]: + value = match[0].replace("Hz", "").replace(",", ".") + filter_strings[t].append(value) + return np.array(filter_strings["HP"]), np.array(filter_strings["LP"]) + + +def _prefilter_float(filt): + if isinstance(filt, int | float | np.number): + return filt + if filt == "DC": + return 0.0 + if filt.replace(".", "", 1).isdigit(): + return float(filt) + return np.nan + + +def _set_prefilter(info, edf_info, ch_idxs, key): + value = 0 + if len(values := edf_info.get(key, [])): + values = [x for i, x in enumerate(values) if i in ch_idxs] + if len(np.unique(values)) > 1: + warn( + f"Channels contain different {key} filters. " + f"{'Highest' if key == 'highpass' else 'Lowest'} filter " + "setting will be stored." + ) + if key == "highpass": + value = np.nanmax([_prefilter_float(x) for x in values]) + else: + value = np.nanmin([_prefilter_float(x) for x in values]) + else: + value = _prefilter_float(values[0]) + if not np.isnan(value) and value != 0: + info[key] = value def _edf_str(x): - return x.decode('latin-1').split('\x00')[0] + return x.decode("latin-1").split("\x00")[0] def _edf_str_num(x): return _edf_str(x).replace(",", ".") -def _read_edf_header(fname, exclude, infer_types, include=None): +def _read_edf_header( + fname, + exclude, + infer_types, + file_type, + include=None, + exclude_after_unique=False, +): """Read header information from EDF+ or BDF file.""" - edf_info = {'events': []} - - with open(fname, 'rb') as fid: + edf_info = {"events": []} + with _gdf_edf_get_fid(fname) as fid: fid.read(8) # version (unused here) # patient ID patient = {} - id_info = fid.read(80).decode('latin-1').rstrip() - id_info = id_info.split(' ') + id_info = fid.read(80).decode("latin-1").rstrip() + id_info = id_info.split(" ") if len(id_info): - patient['id'] = id_info[0] - if len(id_info) == 4: + patient["id"] = id_info[0] + if len(id_info) >= 4: try: birthdate = datetime.strptime(id_info[2], "%d-%b-%Y") except ValueError: birthdate = "X" - patient['sex'] = id_info[1] - patient['birthday'] = birthdate - patient['name'] = id_info[3] + patient["sex"] = id_info[1] + patient["birthday"] = birthdate + patient["name"] = id_info[3] + if len(id_info) > 4: + for info in id_info[4:]: + if "=" in info: + key, value = info.split("=") + err = f"patient {key} info cannot be {value}, skipping." + if key in ["weight", "height"]: + try: + patient[key] = float(value) + except ValueError: + logger.debug(err) + continue + elif key in ["hand"]: + try: + patient[key] = int(value) + except ValueError: + logger.debug(err) + continue + else: + warn(f"Invalid patient information {key}") # Recording ID - meas_id = {} - rec_info = fid.read(80).decode('latin-1').rstrip().split(' ') - valid_startdate = False + rec_info = fid.read(80).decode("latin-1").rstrip().split(" ") + # if the measurement date is available in the recording info, it's used instead + # of the file's meas_date since it contains all 4 digits of the year. + meas_date = None if len(rec_info) == 5: try: - startdate = datetime.strptime(rec_info[1], "%d-%b-%Y") - except ValueError: - startdate = "X" + meas_date = datetime.strptime(rec_info[1], "%d-%b-%Y") + except Exception: + meas_date = None else: - valid_startdate = True - meas_id['startdate'] = startdate - meas_id['study_id'] = rec_info[2] - meas_id['technician'] = rec_info[3] - meas_id['equipment'] = rec_info[4] - - # If startdate available in recording info, use it instead of the - # file's meas_date since it contains all 4 digits of the year - if valid_startdate: - day = meas_id['startdate'].day - month = meas_id['startdate'].month - year = meas_id['startdate'].year - fid.read(8) # skip file's meas_date + fid.read(8) # skip the file's meas_date + if meas_date is None: + try: + meas_date = fid.read(8).decode("latin-1") + day, month, year = (int(x) for x in meas_date.split(".")) + year = year + 2000 if year < 85 else year + 1900 + meas_date = datetime(year, month, day) + except Exception: + meas_date = None + if meas_date is not None: + # try to get the hour/minute/sec from the recording info + try: + meas_time = fid.read(8).decode("latin-1") + hour, minute, second = (int(x) for x in meas_time.split(".")) + except Exception: + hour, minute, second = 0, 0, 0 + meas_date = meas_date.replace( + hour=hour, minute=minute, second=second, tzinfo=timezone.utc + ) else: - meas_date = fid.read(8).decode('latin-1') - day, month, year = [int(x) for x in meas_date.split('.')] - year = year + 2000 if year < 85 else year + 1900 + fid.read(8) # skip the file's measurement time + warn("Invalid measurement date encountered in the header.") - meas_time = fid.read(8).decode('latin-1') - hour, minute, sec = [int(x) for x in meas_time.split('.')] try: - meas_date = datetime(year, month, day, hour, minute, sec, - tzinfo=timezone.utc) + header_nbytes = int(_edf_str(fid.read(8))) except ValueError: - warn(f'Invalid date encountered ({year:04d}-{month:02d}-' - f'{day:02d} {hour:02d}:{minute:02d}:{sec:02d}).') - meas_date = None - - header_nbytes = int(_edf_str(fid.read(8))) + raise ValueError( + f"Bad {'EDF' if file_type == FileType.EDF else 'BDF'} file provided." + ) # The following 44 bytes sometimes identify the file type, but this is - # not guaranteed. Therefore, we skip this field and use the file - # extension to determine the subtype (EDF or BDF, which differ in the + # not guaranteed. Therefore, we skip this field and use the file_type + # to determine the subtype (EDF or BDF, which differ in the # number of bytes they use for the data records; EDF uses 2 bytes # whereas BDF uses 3 bytes). fid.read(44) - subtype = os.path.splitext(fname)[1][1:].lower() + subtype = file_type n_records = int(_edf_str(fid.read(8))) record_length = float(_edf_str(fid.read(8))) - record_length = np.array([record_length, 1.]) # in seconds + record_length = np.array([record_length, 1.0]) # in seconds if record_length[0] == 0: - record_length[0] = 1. - warn('Header information is incorrect for record length. Default ' - 'record length set to 1.\nIt is possible that this file only' - ' contains annotations and no signals. In that case, please ' - 'use mne.read_annotations() to load these annotations.') + record_length[0] = 1.0 + warn( + "Header information is incorrect for record length. Default " + "record length set to 1.\nIt is possible that this file only" + " contains annotations and no signals. In that case, please " + "use mne.read_annotations() to load these annotations." + ) nchan = int(_edf_str(fid.read(4))) channels = list(range(nchan)) # read in 16 byte labels and strip any extra spaces at the end - ch_labels = [fid.read(16).strip().decode('latin-1') for _ in channels] + ch_labels = [fid.read(16).strip().decode("latin-1") for _ in channels] # get channel names and optionally channel type # EDF specification contains 16 bytes that encode channel names, @@ -726,55 +1187,68 @@ def _read_edf_header(fname, exclude, infer_types, include=None): if infer_types: ch_types, ch_names = [], [] for ch_label in ch_labels: - ch_type, ch_name = 'EEG', ch_label # default to EEG - parts = ch_label.split(' ') + ch_type, ch_name = "EEG", ch_label # default to EEG + parts = ch_label.split(" ") if len(parts) > 1: if parts[0].upper() in CH_TYPE_MAPPING: ch_type = parts[0].upper() - ch_name = ' '.join(parts[1:]) - logger.info(f"Channel '{ch_label}' recognized as type " - f"{ch_type} (renamed to '{ch_name}').") + ch_name = " ".join(parts[1:]) + logger.info( + f"Channel '{ch_label}' recognized as type " + f"{ch_type} (renamed to '{ch_name}')." + ) ch_types.append(ch_type) ch_names.append(ch_name) else: - ch_types, ch_names = ['EEG'] * nchan, ch_labels + ch_types, ch_names = ["EEG"] * nchan, ch_labels - exclude = _find_exclude_idx(ch_names, exclude, include) tal_idx = _find_tal_idx(ch_names) + if exclude_after_unique: + # make sure channel names are unique + ch_names = _unique_channel_names(ch_names) + + exclude = _find_exclude_idx(ch_names, exclude, include) exclude = np.concatenate([exclude, tal_idx]) sel = np.setdiff1d(np.arange(len(ch_names)), exclude) + for ch in channels: fid.read(80) # transducer - units = [fid.read(8).strip().decode('latin-1') for ch in channels] - edf_info['units'] = list() + units = [fid.read(8).strip().decode("latin-1") for ch in channels] + edf_info["units"] = list() for i, unit in enumerate(units): if i in exclude: continue # allow μ (greek mu), µ (micro symbol) and μ (sjis mu) codepoints - if unit in ('\u03BCV', '\u00B5V', '\x83\xCAV', 'uV'): - edf_info['units'].append(1e-6) - elif unit == 'mV': - edf_info['units'].append(1e-3) + if unit in ("\u03bcV", "\u00b5V", "\x83\xcaV", "uV"): + edf_info["units"].append(1e-6) + elif unit == "mV": + edf_info["units"].append(1e-3) else: - edf_info['units'].append(1) - edf_info['units'] = np.array(edf_info['units'], float) + edf_info["units"].append(1) + edf_info["units"] = np.array(edf_info["units"], float) ch_names = [ch_names[idx] for idx in sel] + ch_types = [ch_types[idx] for idx in sel] units = [units[idx] for idx in sel] - # make sure channel names are unique - ch_names = _unique_channel_names(ch_names) + if not exclude_after_unique: + # make sure channel names are unique + ch_names = _unique_channel_names(ch_names) orig_units = dict(zip(ch_names, units)) - physical_min = np.array( - [float(_edf_str_num(fid.read(8))) for ch in channels])[sel] - physical_max = np.array( - [float(_edf_str_num(fid.read(8))) for ch in channels])[sel] - digital_min = np.array( - [float(_edf_str_num(fid.read(8))) for ch in channels])[sel] - digital_max = np.array( - [float(_edf_str_num(fid.read(8))) for ch in channels])[sel] - prefiltering = [_edf_str(fid.read(80)).strip() for ch in channels][:-1] + physical_min = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + physical_max = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + digital_min = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + digital_max = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + prefiltering = np.array([_edf_str(fid.read(80)).strip() for ch in channels]) highpass, lowpass = _parse_prefilter_string(prefiltering) # number of samples per record @@ -782,13 +1256,25 @@ def _read_edf_header(fname, exclude, infer_types, include=None): # Populate edf_info edf_info.update( - ch_names=ch_names, ch_types=ch_types, data_offset=header_nbytes, - digital_max=digital_max, digital_min=digital_min, - highpass=highpass, sel=sel, lowpass=lowpass, meas_date=meas_date, - n_records=n_records, n_samps=n_samps, nchan=nchan, - subject_info=patient, physical_max=physical_max, - physical_min=physical_min, record_length=record_length, - subtype=subtype, tal_idx=tal_idx) + ch_names=ch_names, + ch_types=ch_types, + data_offset=header_nbytes, + digital_max=digital_max, + digital_min=digital_min, + highpass=highpass, + sel=sel, + lowpass=lowpass, + meas_date=meas_date, + n_records=n_records, + n_samps=n_samps, + nchan=nchan, + subject_info=patient, + physical_max=physical_max, + physical_min=physical_min, + record_length=record_length, + subtype="bdf" if subtype == FileType.BDF else "edf", + tal_idx=tal_idx, + ) fid.read(32 * nchan).decode() # reserved assert fid.tell() == header_nbytes @@ -796,41 +1282,60 @@ def _read_edf_header(fname, exclude, infer_types, include=None): fid.seek(0, 2) n_bytes = fid.tell() n_data_bytes = n_bytes - header_nbytes - total_samps = (n_data_bytes // 3 if subtype == 'bdf' - else n_data_bytes // 2) + total_samps = ( + n_data_bytes // 3 if subtype == FileType.BDF else n_data_bytes // 2 + ) read_records = total_samps // np.sum(n_samps) if n_records != read_records: - warn('Number of records from the header does not match the file ' - 'size (perhaps the recording was not stopped before exiting).' - ' Inferring from the file size.') - edf_info['n_records'] = read_records + warn( + "Number of records from the header does not match the file " + "size (perhaps the recording was not stopped before exiting)." + " Inferring from the file size." + ) + edf_info["n_records"] = read_records del n_records - if subtype == 'bdf': - edf_info['dtype_byte'] = 3 # 24-bit (3 byte) integers - edf_info['dtype_np'] = UINT8 + if subtype == FileType.BDF: + edf_info["dtype_byte"] = 3 # 24-bit (3 byte) integers + edf_info["dtype_np"] = UINT8 else: - edf_info['dtype_byte'] = 2 # 16-bit (2 byte) integers - edf_info['dtype_np'] = INT16 + edf_info["dtype_byte"] = 2 # 16-bit (2 byte) integers + edf_info["dtype_np"] = INT16 return edf_info, orig_units -INT8 = '= 2: - patient['id'] = pid[0] - patient['name'] = pid[1] + patient["id"] = pid[0] + patient["name"] = pid[1] # Recording ID meas_id = {} - meas_id['recording_id'] = _edf_str(fid.read(80)).strip() + meas_id["recording_id"] = _edf_str(fid.read(80)).strip() # date tm = _edf_str(fid.read(16)).strip() try: - if tm[14:16] == ' ': - tm = tm[:14] + '00' + tm[16:] + if tm[14:16] == " ": + tm = tm[:14] + "00" + tm[16:] meas_date = datetime( - int(tm[0:4]), int(tm[4:6]), - int(tm[6:8]), int(tm[8:10]), - int(tm[10:12]), int(tm[12:14]), + int(tm[0:4]), + int(tm[4:6]), + int(tm[6:8]), + int(tm[8:10]), + int(tm[10:12]), + int(tm[12:14]), int(tm[14:16]) * pow(10, 4), - tzinfo=timezone.utc) + tzinfo=timezone.utc, + ) except Exception: pass - header_nbytes = np.fromfile(fid, INT64, 1)[0] - meas_id['equipment'] = np.fromfile(fid, UINT8, 8)[0] - meas_id['hospital'] = np.fromfile(fid, UINT8, 8)[0] - meas_id['technician'] = np.fromfile(fid, UINT8, 8)[0] - fid.seek(20, 1) # 20bytes reserved + header_nbytes = read_from_file_or_buffer(fid, INT64, 1)[0] + meas_id["equipment"] = read_from_file_or_buffer(fid, UINT8, 8)[0] + meas_id["hospital"] = read_from_file_or_buffer(fid, UINT8, 8)[0] + meas_id["technician"] = read_from_file_or_buffer(fid, UINT8, 8)[0] + fid.seek(20, 1) # 20bytes reserved - n_records = np.fromfile(fid, INT64, 1)[0] + n_records = read_from_file_or_buffer(fid, INT64, 1)[0] # record length in seconds - record_length = np.fromfile(fid, UINT32, 2) + record_length = read_from_file_or_buffer(fid, UINT32, 2) if record_length[0] == 0: - record_length[0] = 1. - warn('Header information is incorrect for record length. ' - 'Default record length set to 1.') - nchan = np.fromfile(fid, UINT32, 1)[0] + record_length[0] = 1.0 + warn( + "Header information is incorrect for record length. " + "Default record length set to 1." + ) + nchan = int(read_from_file_or_buffer(fid, UINT32, 1)[0]) channels = list(range(nchan)) ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] exclude = _find_exclude_idx(ch_names, exclude, include) sel = np.setdiff1d(np.arange(len(ch_names)), exclude) fid.seek(80 * len(channels), 1) # transducer units = [_edf_str(fid.read(8)).strip() for ch in channels] - edf_info['units'] = list() + edf_info["units"] = list() for i, unit in enumerate(units): if i in exclude: continue - if unit[:2] == 'uV': - edf_info['units'].append(1e-6) + if unit[:2] == "uV": + edf_info["units"].append(1e-6) else: - edf_info['units'].append(1) - edf_info['units'] = np.array(edf_info['units'], float) + edf_info["units"].append(1) + edf_info["units"] = np.array(edf_info["units"], float) ch_names = [ch_names[idx] for idx in sel] - physical_min = np.fromfile(fid, FLOAT64, len(channels)) - physical_max = np.fromfile(fid, FLOAT64, len(channels)) - digital_min = np.fromfile(fid, INT64, len(channels)) - digital_max = np.fromfile(fid, INT64, len(channels)) - prefiltering = [_edf_str(fid.read(80)) for ch in channels][:-1] + physical_min = read_from_file_or_buffer(fid, FLOAT64, len(channels)) + physical_max = read_from_file_or_buffer(fid, FLOAT64, len(channels)) + digital_min = read_from_file_or_buffer(fid, INT64, len(channels)) + digital_max = read_from_file_or_buffer(fid, INT64, len(channels)) + prefiltering = [_edf_str(fid.read(80)) for ch in channels] highpass, lowpass = _parse_prefilter_string(prefiltering) # n samples per record - n_samps = np.fromfile(fid, INT32, len(channels)) + n_samps = read_from_file_or_buffer(fid, INT32, len(channels)) # channel data type - dtype = np.fromfile(fid, INT32, len(channels)) + dtype = read_from_file_or_buffer(fid, INT32, len(channels)) # total number of bytes for data - bytes_tot = np.sum([GDFTYPE_BYTE[t] * n_samps[i] - for i, t in enumerate(dtype)]) + bytes_tot = np.sum( + [GDFTYPE_BYTE[t] * n_samps[i] for i, t in enumerate(dtype)] + ) # Populate edf_info dtype_np, dtype_byte = _check_dtype_byte(dtype) edf_info.update( - bytes_tot=bytes_tot, ch_names=ch_names, - data_offset=header_nbytes, digital_min=digital_min, + bytes_tot=bytes_tot, + ch_names=ch_names, + data_offset=header_nbytes, + digital_min=digital_min, digital_max=digital_max, - dtype_byte=dtype_byte, dtype_np=dtype_np, exclude=exclude, - highpass=highpass, sel=sel, lowpass=lowpass, + dtype_byte=dtype_byte, + dtype_np=dtype_np, + exclude=exclude, + highpass=highpass, + sel=sel, + lowpass=lowpass, meas_date=meas_date, - meas_id=meas_id, n_records=n_records, n_samps=n_samps, - nchan=nchan, subject_info=patient, physical_max=physical_max, - physical_min=physical_min, record_length=record_length) + meas_id=meas_id, + n_records=n_records, + n_samps=n_samps, + nchan=nchan, + subject_info=patient, + physical_max=physical_max, + physical_min=physical_min, + record_length=record_length, + ) - fid.seek(32 * edf_info['nchan'], 1) # reserved + fid.seek(32 * edf_info["nchan"], 1) # reserved assert fid.tell() == header_nbytes # Event table # ----------------------------------------------------------------- - etp = header_nbytes + n_records * edf_info['bytes_tot'] + etp = header_nbytes + n_records * edf_info["bytes_tot"] # skip data to go to event table fid.seek(etp) - etmode = np.fromfile(fid, UINT8, 1)[0] + etmode = read_from_file_or_buffer(fid, UINT8, 1)[0] if etmode in (1, 3): - sr = np.fromfile(fid, UINT8, 3) + sr = read_from_file_or_buffer(fid, UINT8, 3).astype(np.uint32) event_sr = sr[0] for i in range(1, len(sr)): event_sr = event_sr + sr[i] * 2 ** (i * 8) - n_events = np.fromfile(fid, UINT32, 1)[0] - pos = np.fromfile(fid, UINT32, n_events) - 1 # 1-based inds - typ = np.fromfile(fid, UINT16, n_events) + n_events = read_from_file_or_buffer(fid, UINT32, 1)[0] + pos = ( + read_from_file_or_buffer(fid, UINT32, n_events) - 1 + ) # 1-based inds + typ = read_from_file_or_buffer(fid, UINT16, n_events) if etmode == 3: - chn = np.fromfile(fid, UINT16, n_events) - dur = np.fromfile(fid, UINT32, n_events) + chn = read_from_file_or_buffer(fid, UINT16, n_events) + dur = read_from_file_or_buffer(fid, UINT32, n_events) else: chn = np.zeros(n_events, dtype=np.int32) dur = np.ones(n_events, dtype=UINT32) @@ -976,105 +1505,112 @@ def _read_gdf_header(fname, exclude, include=None): # --------------------------------------------------------------------- else: # FIXED HEADER - handedness = ('Unknown', 'Right', 'Left', 'Equal') - gender = ('Unknown', 'Male', 'Female') - scale = ('Unknown', 'No', 'Yes', 'Corrected') + handedness = ("Unknown", "Right", "Left", "Equal") + gender = ("Unknown", "Male", "Female") + scale = ("Unknown", "No", "Yes", "Corrected") # date pid = fid.read(66).decode() - pid = pid.split(' ', 2) + pid = pid.split(" ", 2) patient = {} if len(pid) >= 2: - patient['id'] = pid[0] - patient['name'] = pid[1] + patient["id"] = pid[0] + patient["name"] = pid[1] fid.seek(10, 1) # 10bytes reserved # Smoking / Alcohol abuse / drug abuse / medication - sadm = np.fromfile(fid, UINT8, 1)[0] - patient['smoking'] = scale[sadm % 4] - patient['alcohol_abuse'] = scale[(sadm >> 2) % 4] - patient['drug_abuse'] = scale[(sadm >> 4) % 4] - patient['medication'] = scale[(sadm >> 6) % 4] - patient['weight'] = np.fromfile(fid, UINT8, 1)[0] - if patient['weight'] == 0 or patient['weight'] == 255: - patient['weight'] = None - patient['height'] = np.fromfile(fid, UINT8, 1)[0] - if patient['height'] == 0 or patient['height'] == 255: - patient['height'] = None + sadm = read_from_file_or_buffer(fid, UINT8, 1)[0] + patient["smoking"] = scale[sadm % 4] + patient["alcohol_abuse"] = scale[(sadm >> 2) % 4] + patient["drug_abuse"] = scale[(sadm >> 4) % 4] + patient["medication"] = scale[(sadm >> 6) % 4] + patient["weight"] = read_from_file_or_buffer(fid, UINT8, 1)[0] + if patient["weight"] == 0 or patient["weight"] == 255: + patient["weight"] = None + patient["height"] = read_from_file_or_buffer(fid, UINT8, 1)[0] + if patient["height"] == 0 or patient["height"] == 255: + patient["height"] = None # Gender / Handedness / Visual Impairment - ghi = np.fromfile(fid, UINT8, 1)[0] - patient['sex'] = gender[ghi % 4] - patient['handedness'] = handedness[(ghi >> 2) % 4] - patient['visual'] = scale[(ghi >> 4) % 4] + ghi = read_from_file_or_buffer(fid, UINT8, 1)[0] + patient["sex"] = gender[ghi % 4] + patient["handedness"] = handedness[(ghi >> 2) % 4] + patient["visual"] = scale[(ghi >> 4) % 4] # Recording identification meas_id = {} - meas_id['recording_id'] = _edf_str(fid.read(64)).strip() - vhsv = np.fromfile(fid, UINT8, 4) + meas_id["recording_id"] = _edf_str(fid.read(64)).strip() + vhsv = read_from_file_or_buffer(fid, UINT8, 4) loc = {} if vhsv[3] == 0: - loc['vertpre'] = 10 * int(vhsv[0] >> 4) + int(vhsv[0] % 16) - loc['horzpre'] = 10 * int(vhsv[1] >> 4) + int(vhsv[1] % 16) - loc['size'] = 10 * int(vhsv[2] >> 4) + int(vhsv[2] % 16) + loc["vertpre"] = 10 * int(vhsv[0] >> 4) + int(vhsv[0] % 16) + loc["horzpre"] = 10 * int(vhsv[1] >> 4) + int(vhsv[1] % 16) + loc["size"] = 10 * int(vhsv[2] >> 4) + int(vhsv[2] % 16) else: - loc['vertpre'] = 29 - loc['horzpre'] = 29 - loc['size'] = 29 - loc['version'] = 0 - loc['latitude'] = \ - float(np.fromfile(fid, UINT32, 1)[0]) / 3600000 - loc['longitude'] = \ - float(np.fromfile(fid, UINT32, 1)[0]) / 3600000 - loc['altitude'] = float(np.fromfile(fid, INT32, 1)[0]) / 100 - meas_id['loc'] = loc - - meas_date = np.fromfile(fid, UINT64, 1)[0] + loc["vertpre"] = 29 + loc["horzpre"] = 29 + loc["size"] = 29 + loc["version"] = 0 + loc["latitude"] = ( + float(read_from_file_or_buffer(fid, UINT32, 1)[0]) / 3600000 + ) + loc["longitude"] = ( + float(read_from_file_or_buffer(fid, UINT32, 1)[0]) / 3600000 + ) + loc["altitude"] = float(read_from_file_or_buffer(fid, INT32, 1)[0]) / 100 + meas_id["loc"] = loc + + meas_date = read_from_file_or_buffer(fid, UINT64, 1)[0] if meas_date != 0: - meas_date = (datetime(1, 1, 1, tzinfo=timezone.utc) + - timedelta(meas_date * pow(2, -32) - 367)) + meas_date = datetime(1, 1, 1, tzinfo=timezone.utc) + timedelta( + meas_date * pow(2, -32) - 367 + ) else: meas_date = None - birthday = np.fromfile(fid, UINT64, 1).tolist()[0] + birthday = read_from_file_or_buffer(fid, UINT64, 1).tolist()[0] if birthday == 0: birthday = datetime(1, 1, 1, tzinfo=timezone.utc) else: - birthday = (datetime(1, 1, 1, tzinfo=timezone.utc) + - timedelta(birthday * pow(2, -32) - 367)) - patient['birthday'] = birthday - if patient['birthday'] != datetime(1, 1, 1, 0, 0, - tzinfo=timezone.utc): + birthday = datetime(1, 1, 1, tzinfo=timezone.utc) + timedelta( + birthday * pow(2, -32) - 367 + ) + patient["birthday"] = birthday + if patient["birthday"] != datetime(1, 1, 1, 0, 0, tzinfo=timezone.utc): today = datetime.now(tz=timezone.utc) - patient['age'] = today.year - patient['birthday'].year - today = today.replace(year=patient['birthday'].year) - if today < patient['birthday']: - patient['age'] -= 1 + patient["age"] = today.year - patient["birthday"].year + # fudge the day by -1 if today happens to be a leap day + day = 28 if today.month == 2 and today.day == 29 else today.day + today = today.replace(year=patient["birthday"].year, day=day) + if today < patient["birthday"]: + patient["age"] -= 1 else: - patient['age'] = None + patient["age"] = None - header_nbytes = np.fromfile(fid, UINT16, 1)[0] * 256 + header_nbytes = read_from_file_or_buffer(fid, UINT16, 1)[0] * 256 fid.seek(6, 1) # 6 bytes reserved - meas_id['equipment'] = np.fromfile(fid, UINT8, 8) - meas_id['ip'] = np.fromfile(fid, UINT8, 6) - patient['headsize'] = np.fromfile(fid, UINT16, 3) - patient['headsize'] = np.asarray(patient['headsize'], np.float32) - patient['headsize'] = np.ma.masked_array( - patient['headsize'], - np.equal(patient['headsize'], 0), None).filled() - ref = np.fromfile(fid, FLOAT32, 3) - gnd = np.fromfile(fid, FLOAT32, 3) - n_records = np.fromfile(fid, INT64, 1)[0] + meas_id["equipment"] = read_from_file_or_buffer(fid, UINT8, 8) + meas_id["ip"] = read_from_file_or_buffer(fid, UINT8, 6) + patient["headsize"] = read_from_file_or_buffer(fid, UINT16, 3) + patient["headsize"] = np.asarray(patient["headsize"], np.float32) + patient["headsize"] = np.ma.masked_array( + patient["headsize"], np.equal(patient["headsize"], 0), None + ).filled() + ref = read_from_file_or_buffer(fid, FLOAT32, 3) + gnd = read_from_file_or_buffer(fid, FLOAT32, 3) + n_records = read_from_file_or_buffer(fid, INT64, 1)[0] # record length in seconds - record_length = np.fromfile(fid, UINT32, 2) + record_length = read_from_file_or_buffer(fid, UINT32, 2) if record_length[0] == 0: - record_length[0] = 1. - warn('Header information is incorrect for record length. ' - 'Default record length set to 1.') + record_length[0] = 1.0 + warn( + "Header information is incorrect for record length. " + "Default record length set to 1." + ) - nchan = np.fromfile(fid, UINT16, 1)[0] + nchan = int(read_from_file_or_buffer(fid, UINT16, 1)[0]) fid.seek(2, 1) # 2bytes reserved # Channels (variable header) @@ -1092,56 +1628,60 @@ def _read_gdf_header(fname, exclude, include=None): - Decimal factors codes: https://sourceforge.net/p/biosig/svn/HEAD/tree/trunk/biosig/doc/DecimalFactors.txt """ # noqa - units = np.fromfile(fid, UINT16, len(channels)).tolist() + units = read_from_file_or_buffer(fid, UINT16, len(channels)).tolist() unitcodes = np.array(units[:]) - edf_info['units'] = list() + edf_info["units"] = list() for i, unit in enumerate(units): if i in exclude: continue if unit == 4275: # microvolts - edf_info['units'].append(1e-6) + edf_info["units"].append(1e-6) elif unit == 4274: # millivolts - edf_info['units'].append(1e-3) + edf_info["units"].append(1e-3) elif unit == 512: # dimensionless - edf_info['units'].append(1) + edf_info["units"].append(1) elif unit == 0: - edf_info['units'].append(1) # unrecognized + edf_info["units"].append(1) # unrecognized else: - warn('Unsupported physical dimension for channel %d ' - '(assuming dimensionless). Please contact the ' - 'MNE-Python developers for support.' % i) - edf_info['units'].append(1) - edf_info['units'] = np.array(edf_info['units'], float) + warn( + f"Unsupported physical dimension for channel {i} " + "(assuming dimensionless). Please contact the " + "MNE-Python developers for support." + ) + edf_info["units"].append(1) + edf_info["units"] = np.array(edf_info["units"], float) ch_names = [ch_names[idx] for idx in sel] - physical_min = np.fromfile(fid, FLOAT64, len(channels)) - physical_max = np.fromfile(fid, FLOAT64, len(channels)) - digital_min = np.fromfile(fid, FLOAT64, len(channels)) - digital_max = np.fromfile(fid, FLOAT64, len(channels)) + physical_min = read_from_file_or_buffer(fid, FLOAT64, len(channels)) + physical_max = read_from_file_or_buffer(fid, FLOAT64, len(channels)) + digital_min = read_from_file_or_buffer(fid, FLOAT64, len(channels)) + digital_max = read_from_file_or_buffer(fid, FLOAT64, len(channels)) fid.seek(68 * len(channels), 1) # obsolete - lowpass = np.fromfile(fid, FLOAT32, len(channels)) - highpass = np.fromfile(fid, FLOAT32, len(channels)) - notch = np.fromfile(fid, FLOAT32, len(channels)) + lowpass = read_from_file_or_buffer(fid, FLOAT32, len(channels)) + highpass = read_from_file_or_buffer(fid, FLOAT32, len(channels)) + notch = read_from_file_or_buffer(fid, FLOAT32, len(channels)) # number of samples per record - n_samps = np.fromfile(fid, INT32, len(channels)) + n_samps = read_from_file_or_buffer(fid, INT32, len(channels)) # data type - dtype = np.fromfile(fid, INT32, len(channels)) + dtype = read_from_file_or_buffer(fid, INT32, len(channels)) channel = {} - channel['xyz'] = [np.fromfile(fid, FLOAT32, 3)[0] - for ch in channels] - - if edf_info['number'] < 2.19: - impedance = np.fromfile(fid, UINT8, - len(channels)).astype(float) + channel["xyz"] = [ + read_from_file_or_buffer(fid, FLOAT32, 3)[0] for ch in channels + ] + + if edf_info["number"] < 2.19: + impedance = read_from_file_or_buffer(fid, UINT8, len(channels)).astype( + float + ) impedance[impedance == 255] = np.nan - channel['impedance'] = pow(2, impedance / 8) + channel["impedance"] = pow(2, impedance / 8) fid.seek(19 * len(channels), 1) # reserved else: - tmp = np.fromfile(fid, FLOAT32, 5 * len(channels)) + tmp = read_from_file_or_buffer(fid, FLOAT32, 5 * len(channels)) tmp = tmp[::5] fZ = tmp[:] impedance = tmp[:] @@ -1155,80 +1695,103 @@ def _read_gdf_header(fname, exclude, include=None): assert fid.tell() == header_nbytes # total number of bytes for data - bytes_tot = np.sum([GDFTYPE_BYTE[t] * n_samps[i] - for i, t in enumerate(dtype)]) + bytes_tot = np.sum( + [GDFTYPE_BYTE[t] * n_samps[i] for i, t in enumerate(dtype)] + ) # Populate edf_info dtype_np, dtype_byte = _check_dtype_byte(dtype) edf_info.update( - bytes_tot=bytes_tot, ch_names=ch_names, + bytes_tot=bytes_tot, + ch_names=ch_names, data_offset=header_nbytes, - dtype_byte=dtype_byte, dtype_np=dtype_np, - digital_min=digital_min, digital_max=digital_max, - exclude=exclude, gnd=gnd, highpass=highpass, sel=sel, - impedance=impedance, lowpass=lowpass, meas_date=meas_date, - meas_id=meas_id, n_records=n_records, n_samps=n_samps, - nchan=nchan, notch=notch, subject_info=patient, - physical_max=physical_max, physical_min=physical_min, - record_length=record_length, ref=ref) + dtype_byte=dtype_byte, + dtype_np=dtype_np, + digital_min=digital_min, + digital_max=digital_max, + exclude=exclude, + gnd=gnd, + highpass=highpass, + sel=sel, + impedance=impedance, + lowpass=lowpass, + meas_date=meas_date, + meas_id=meas_id, + n_records=n_records, + n_samps=n_samps, + nchan=nchan, + notch=notch, + subject_info=patient, + physical_max=physical_max, + physical_min=physical_min, + record_length=record_length, + ref=ref, + ) # EVENT TABLE # ----------------------------------------------------------------- - etp = edf_info['data_offset'] + edf_info['n_records'] * \ - edf_info['bytes_tot'] + etp = ( + edf_info["data_offset"] + edf_info["n_records"] * edf_info["bytes_tot"] + ) fid.seek(etp) # skip data to go to event table etmode = fid.read(1).decode() - if etmode != '': + if etmode != "": etmode = np.fromstring(etmode, UINT8).tolist()[0] - if edf_info['number'] < 1.94: - sr = np.fromfile(fid, UINT8, 3) + if edf_info["number"] < 1.94: + sr = read_from_file_or_buffer(fid, UINT8, 3) event_sr = sr[0] for i in range(1, len(sr)): - event_sr = event_sr + sr[i] * 2**(i * 8) - n_events = np.fromfile(fid, UINT32, 1)[0] + event_sr = event_sr + sr[i] * 2 ** (i * 8) + n_events = read_from_file_or_buffer(fid, UINT32, 1)[0] else: - ne = np.fromfile(fid, UINT8, 3) - n_events = ne[0] - for i in range(1, len(ne)): - n_events = n_events + ne[i] * 2**(i * 8) - event_sr = np.fromfile(fid, FLOAT32, 1)[0] + ne = read_from_file_or_buffer(fid, UINT8, 3) + n_events = sum(int(ne[i]) << (i * 8) for i in range(len(ne))) + event_sr = read_from_file_or_buffer(fid, FLOAT32, 1)[0] - pos = np.fromfile(fid, UINT32, n_events) - 1 # 1-based inds - typ = np.fromfile(fid, UINT16, n_events) + pos = ( + read_from_file_or_buffer(fid, UINT32, n_events) - 1 + ) # 1-based inds + typ = read_from_file_or_buffer(fid, UINT16, n_events) if etmode == 3: - chn = np.fromfile(fid, UINT16, n_events) - dur = np.fromfile(fid, UINT32, n_events) + chn = read_from_file_or_buffer(fid, UINT16, n_events) + dur = read_from_file_or_buffer(fid, UINT32, n_events) else: chn = np.zeros(n_events, dtype=np.uint32) dur = np.ones(n_events, dtype=np.uint32) - np.clip(dur, 1, np.inf, out=dur) + np.maximum(dur, 1, out=dur) events = [n_events, pos, typ, chn, dur] - edf_info['event_sfreq'] = event_sr + edf_info["event_sfreq"] = event_sr - edf_info.update(events=events, sel=np.arange(len(edf_info['ch_names']))) + edf_info.update(events=events, sel=np.arange(len(edf_info["ch_names"]))) return edf_info -def _check_stim_channel(stim_channel, ch_names, - tal_ch_names=['EDF Annotations', 'BDF Annotations']): +def _check_stim_channel( + stim_channel, + ch_names, + tal_ch_names=("EDF Annotations", "BDF Annotations"), +): """Check that the stimulus channel exists in the current datafile.""" - DEFAULT_STIM_CH_NAMES = ['status', 'trigger'] + DEFAULT_STIM_CH_NAMES = ["status", "trigger"] if stim_channel is None or stim_channel is False: return [], [] if stim_channel is True: # convenient aliases - stim_channel = 'auto' + stim_channel = "auto" elif isinstance(stim_channel, str): - if stim_channel == 'auto': - if 'auto' in ch_names: - warn(RuntimeWarning, "Using `stim_channel='auto'` when auto" - " also corresponds to a channel name is ambiguous." - " Please use `stim_channel=['auto']`.") + if stim_channel == "auto": + if "auto" in ch_names: + warn( + RuntimeWarning, + "Using `stim_channel='auto'` when auto" + " also corresponds to a channel name is ambiguous." + " Please use `stim_channel=['auto']`.", + ) else: valid_stim_ch_names = DEFAULT_STIM_CH_NAMES else: @@ -1243,18 +1806,20 @@ def _check_stim_channel(stim_channel, ch_names, elif all([isinstance(s, int) for s in stim_channel]): valid_stim_ch_names = [ch_names[s].lower() for s in stim_channel] else: - raise ValueError('Invalid stim_channel') + raise ValueError("Invalid stim_channel") else: - raise ValueError('Invalid stim_channel') + raise ValueError("Invalid stim_channel") # Forbid the synthesis of stim channels from TAL Annotations - tal_ch_names_found = [ch for ch in valid_stim_ch_names - if ch in [t.lower() for t in tal_ch_names]] + tal_ch_names_found = [ + ch for ch in valid_stim_ch_names if ch in [t.lower() for t in tal_ch_names] + ] if len(tal_ch_names_found): - _msg = ('The synthesis of the stim channel is not supported' - ' since 0.18. Please remove {} from `stim_channel`' - ' and use `mne.events_from_annotations` instead' - ).format(tal_ch_names_found) + _msg = ( + "The synthesis of the stim channel is not supported since 0.18. Please " + f"remove {tal_ch_names_found} from `stim_channel` and use " + "`mne.events_from_annotations` instead." + ) raise ValueError(_msg) ch_names_low = [ch.lower() for ch in ch_names] @@ -1277,8 +1842,8 @@ def _find_exclude_idx(ch_names, exclude, include=None): if include: # find other than include channels if exclude: raise ValueError( - "'exclude' must be empty if 'include' is assigned. " - f"Got {exclude}.") + f"'exclude' must be empty if 'include' is assigned. Got {exclude}." + ) if isinstance(include, str): # regex for channel names indices_include = [] for idx, ch in enumerate(ch_names): @@ -1301,21 +1866,51 @@ def _find_exclude_idx(ch_names, exclude, include=None): def _find_tal_idx(ch_names): # Annotations / TAL Channels - accepted_tal_ch_names = ['EDF Annotations', 'BDF Annotations'] - tal_channel_idx = np.where(np.in1d(ch_names, accepted_tal_ch_names))[0] + accepted_tal_ch_names = ["EDF Annotations", "BDF Annotations"] + tal_channel_idx = np.where(np.isin(ch_names, accepted_tal_ch_names))[0] return tal_channel_idx +def _check_args(input_fname, preload, target_ext): + if not _file_like(input_fname): + input_fname = _check_fname(fname=input_fname, overwrite="read", must_exist=True) + ext = input_fname.suffix[1:].lower() + + if ext != target_ext: + raise NotImplementedError( + f"Only {target_ext.upper()} files are supported, got {ext}." + ) + else: + if not preload: + raise ValueError("preload must be used with file-like objects") + + @fill_doc -def read_raw_edf(input_fname, eog=None, misc=None, stim_channel='auto', - exclude=(), infer_types=False, include=None, preload=False, - units=None, encoding='utf8', *, verbose=None): +def read_raw_edf( + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + infer_types=False, + include=None, + preload=False, + units=None, + encoding="utf8", + exclude_after_unique=False, + *, + verbose=None, +) -> RawEDF: """Reader function for EDF and EDF+ files. Parameters ---------- input_fname : path-like - Path to the EDF or EDF+ file. + Path to the EDF or EDF+ file or EDF/EDF+ file itself. If a file-like + object is provided, preload must be used. + + .. versionchanged:: 1.10 + Added support for file-like objects eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Values should correspond to the electrodes in the file. @@ -1351,6 +1946,7 @@ def read_raw_edf(input_fname, eog=None, misc=None, stim_channel='auto', %(preload)s %(units_edf_bdf_io)s %(encoding_edf)s + %(exclude_after_unique)s %(verbose)s Returns @@ -1368,6 +1964,8 @@ def read_raw_edf(input_fname, eog=None, misc=None, stim_channel='auto', Notes ----- + %(edf_resamp_note)s + It is worth noting that in some special cases, it may be necessary to shift event values in order to retrieve correct event triggers. This depends on the triggering device used to perform the synchronization. For instance, in @@ -1408,26 +2006,50 @@ def read_raw_edf(input_fname, eog=None, misc=None, stim_channel='auto', The EDF specification allows storage of subseconds in measurement date. However, this reader currently sets subseconds to 0 by default. """ - input_fname = os.path.abspath(input_fname) - ext = os.path.splitext(input_fname)[1][1:].lower() - if ext != 'edf': - raise NotImplementedError(f'Only EDF files are supported, got {ext}.') - return RawEDF(input_fname=input_fname, eog=eog, misc=misc, - stim_channel=stim_channel, exclude=exclude, - infer_types=infer_types, preload=preload, include=include, - units=units, encoding=encoding, verbose=verbose) + _check_args(input_fname, preload, "edf") + + return RawEDF( + input_fname=input_fname, + eog=eog, + misc=misc, + stim_channel=stim_channel, + exclude=exclude, + infer_types=infer_types, + preload=preload, + include=include, + units=units, + encoding=encoding, + exclude_after_unique=exclude_after_unique, + verbose=verbose, + ) @fill_doc -def read_raw_bdf(input_fname, eog=None, misc=None, stim_channel='auto', - exclude=(), infer_types=False, include=None, preload=False, - units=None, encoding='utf8', *, verbose=None): +def read_raw_bdf( + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + infer_types=False, + include=None, + preload=False, + units=None, + encoding="utf8", + exclude_after_unique=False, + *, + verbose=None, +) -> RawBDF: """Reader function for BDF files. Parameters ---------- - input_fname : path-like - Path to the BDF file. + input_fname : path-like | file-like + Path to the BDF file of BDF file itself. If a file-like object is + provided, preload must be used. + + .. versionchanged:: 1.10 + Added support for file-like objects eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Values should correspond to the electrodes in the file. @@ -1463,6 +2085,7 @@ def read_raw_bdf(input_fname, eog=None, misc=None, stim_channel='auto', %(preload)s %(units_edf_bdf_io)s %(encoding_edf)s + %(exclude_after_unique)s %(verbose)s Returns @@ -1479,6 +2102,12 @@ def read_raw_bdf(input_fname, eog=None, misc=None, stim_channel='auto', Notes ----- + :class:`mne.io.Raw` only stores signals with matching sampling frequencies. + Therefore, if mixed sampling frequency signals are requested, all signals + are upsampled to the highest loaded sampling frequency. In this case, using + preload=True is recommended, as otherwise, edge artifacts appear when + slices of the signal are requested. + Biosemi devices trigger codes are encoded in 16-bit format, whereas system codes (CMS in/out-of range, battery low, etc.) are coded in bits 16-23 of the status channel (see http://www.biosemi.com/faq/trigger_signals.htm). @@ -1513,25 +2142,45 @@ def read_raw_bdf(input_fname, eog=None, misc=None, stim_channel='auto', STIM channels by default. Use func:`mne.find_events` to parse events encoded in such analog stim channels. """ - input_fname = os.path.abspath(input_fname) - ext = os.path.splitext(input_fname)[1][1:].lower() - if ext != 'bdf': - raise NotImplementedError(f'Only BDF files are supported, got {ext}.') - return RawEDF(input_fname=input_fname, eog=eog, misc=misc, - stim_channel=stim_channel, exclude=exclude, - infer_types=infer_types, preload=preload, include=include, - units=units, encoding=encoding, verbose=verbose) + _check_args(input_fname, preload, "bdf") + + return RawBDF( + input_fname=input_fname, + eog=eog, + misc=misc, + stim_channel=stim_channel, + exclude=exclude, + infer_types=infer_types, + preload=preload, + include=include, + units=units, + encoding=encoding, + exclude_after_unique=exclude_after_unique, + verbose=verbose, + ) @fill_doc -def read_raw_gdf(input_fname, eog=None, misc=None, stim_channel='auto', - exclude=(), include=None, preload=False, verbose=None): +def read_raw_gdf( + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + include=None, + preload=False, + verbose=None, +) -> RawGDF: """Reader function for GDF files. Parameters ---------- - input_fname : path-like - Path to the GDF file. + input_fname : path-like | file-like + Path to the GDF file or GDF file itself. If a file-like object is + provided, preload must be used. + + .. versionchanged:: 1.10 + Added support for file-like objects eog : list or tuple Names of channels or list of indices that should be designated EOG channels. Values should correspond to the electrodes in the file. @@ -1573,41 +2222,42 @@ def read_raw_gdf(input_fname, eog=None, misc=None, stim_channel='auto', STIM channels by default. Use func:`mne.find_events` to parse events encoded in such analog stim channels. """ - input_fname = os.path.abspath(input_fname) - ext = os.path.splitext(input_fname)[1][1:].lower() - if ext != 'gdf': - raise NotImplementedError(f'Only BDF files are supported, got {ext}.') - return RawGDF(input_fname=input_fname, eog=eog, misc=misc, - stim_channel=stim_channel, exclude=exclude, preload=preload, - include=include, verbose=verbose) + _check_args(input_fname, preload, "gdf") + + return RawGDF( + input_fname=input_fname, + eog=eog, + misc=misc, + stim_channel=stim_channel, + exclude=exclude, + preload=preload, + include=include, + verbose=verbose, + ) @fill_doc -def _read_annotations_edf(annotations, encoding='utf8'): +def _read_annotations_edf(annotations, ch_names=None, encoding="utf8"): """Annotation File Reader. Parameters ---------- annotations : ndarray (n_chans, n_samples) | str Channel data in EDF+ TAL format or path to annotation file. + ch_names : list of string + List of channels' names. %(encoding_edf)s Returns ------- - onset : array of float, shape (n_annotations,) - The starting time of annotations in seconds after ``orig_time``. - duration : array of float, shape (n_annotations,) - Durations of the annotations in seconds. - description : array of str, shape (n_annotations,) - Array of strings containing description for each annotation. If a - string, all the annotations are given the same description. To reject - epochs, use description starting with keyword 'bad'. See example above. + annot : instance of Annotations + The annotations. """ - pat = '([+-]\\d+\\.?\\d*)(\x15(\\d+\\.?\\d*))?(\x14.*?)\x14\x00' - if isinstance(annotations, str): + pat = "([+-]\\d+\\.?\\d*)(\x15(\\d+\\.?\\d*))?(\x14.*?)\x14\x00" + if isinstance(annotations, str | Path): with open(annotations, "rb") as annot_file: triggers = re.findall(pat.encode(), annot_file.read()) - triggers = [tuple(map(lambda x: x.decode(), t)) for t in triggers] + triggers = [tuple(map(lambda x: x.decode(encoding), t)) for t in triggers] else: tals = bytearray() annotations = np.atleast_2d(annotations) @@ -1624,8 +2274,7 @@ def _read_annotations_edf(annotations, encoding='utf8'): else: this_chan = chan.astype(np.int64) # Exploit np vectorized processing - tals.extend(np.uint8([this_chan % 256, this_chan // 256]) - .flatten('F')) + tals.extend(np.uint8([this_chan % 256, this_chan // 256]).flatten("F")) try: triggers = re.findall(pat, tals.decode(encoding)) except UnicodeDecodeError as e: @@ -1634,14 +2283,35 @@ def _read_annotations_edf(annotations, encoding='utf8'): " You might want to try setting \"encoding='latin1'\"." ) from e - events = [] - offset = 0. + events = {} + offset = 0.0 for k, ev in enumerate(triggers): onset = float(ev[0]) + offset duration = float(ev[2]) if ev[2] else 0 - for description in ev[3].split('\x14')[1:]: + for description in ev[3].split("\x14")[1:]: if description: - events.append([onset, duration, description]) + if ( + "@@" in description + and ch_names is not None + and description.split("@@")[1] in ch_names + ): + description, ch_name = description.split("@@") + key = f"{onset}_{duration}_{description}" + else: + ch_name = None + key = f"{onset}_{duration}_{description}" + if key in events: + key += f"_{k}" # make key unique + if key in events and ch_name: + events[key][3] += (ch_name,) + else: + events[key] = [ + onset, + duration, + description, + (ch_name,) if ch_name else (), + ] + elif k == 0: # The startdate/time of a file is specified in the EDF+ header # fields 'startdate of recording' and 'starttime of recording'. @@ -1653,12 +2323,25 @@ def _read_annotations_edf(annotations, encoding='utf8'): # header. If X=0, then the .X may be omitted. offset = -onset - return zip(*events) if events else (list(), list(), list()) + if events: + onset, duration, description, annot_ch_names = zip(*events.values()) + else: + onset, duration, description, annot_ch_names = list(), list(), list(), list() + + assert len(onset) == len(duration) == len(description) == len(annot_ch_names) + + return Annotations( + onset=onset, + duration=duration, + description=description, + orig_time=None, + ch_names=annot_ch_names, + ) def _get_annotations_gdf(edf_info, sfreq): onset, duration, desc = list(), list(), list() - events = edf_info.get('events', None) + events = edf_info.get("events", None) # Annotations in GDF: events are stored as the following # list: `events = [n_events, pos, typ, chn, dur]` where pos is the # latency, dur is the duration in samples. They both are diff --git a/mne/io/edf/tests/__init__.py b/mne/io/edf/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/edf/tests/__init__.py +++ b/mne/io/edf/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index bae740ba7ef..1760081bac4 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -1,35 +1,41 @@ -# -*- coding: utf-8 -*- -# Authors: Teon Brooks -# Martin Billinger -# Alan Leggitt -# Alexandre Barachant -# Stefan Appelhoff -# Joan Massich -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. +import datetime from contextlib import nullcontext from functools import partial +from io import BytesIO from pathlib import Path import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_equal, assert_allclose) -from scipy.io import loadmat - import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) +from scipy.io import loadmat -from mne import pick_types, Annotations -from mne.annotations import events_from_annotations, read_annotations +from mne import Annotations, pick_types +from mne._fiff.pick import channel_indices_by_type, get_channel_type_constants +from mne.annotations import _ndarray_ch_names, events_from_annotations, read_annotations from mne.datasets import testing -from mne.utils import requires_pandas, _record_warnings -from mne.io import read_raw_edf, read_raw_bdf, read_raw_fif, edf, read_raw_gdf +from mne.io import edf, read_raw_bdf, read_raw_edf, read_raw_fif, read_raw_gdf +from mne.io.edf.edf import ( + _edf_str, + _parse_prefilter_string, + _prefilter_float, + _read_annotations_edf, + _read_ch, + _read_edf_header, + _read_header, + _set_prefilter, +) from mne.io.tests.test_raw import _test_raw_reader -from mne.io.edf.edf import (_read_annotations_edf, - _read_ch, _parse_prefilter_string, _edf_str, - _read_edf_header, _read_header) -from mne.io.pick import channel_indices_by_type, get_channel_type_constants from mne.tests.test_annotations import _assert_annotations_equal +from mne.utils import _record_warnings td_mark = testing._pytest_mark() @@ -40,30 +46,24 @@ duplicate_channel_labels_path = data_dir / "duplicate_channel_labels.edf" edf_uneven_path = data_dir / "test_uneven_samp.edf" bdf_eeglab_path = data_dir / "test_bdf_eeglab.mat" -edf_eeglab_path = data_dir / "test_edf_eeglab.mat" -edf_uneven_eeglab_path = data_dir / "test_uneven_samp.mat" edf_stim_channel_path = data_dir / "test_edf_stim_channel.edf" edf_txt_stim_channel_path = data_dir / "test_edf_stim_channel.txt" data_path = testing.data_path(download=False) edf_stim_resamp_path = data_path / "EDF" / "test_edf_stim_resamp.edf" -edf_overlap_annot_path = ( - data_path / "EDF" / "test_edf_overlapping_annotations.edf" -) +edf_overlap_annot_path = data_path / "EDF" / "test_edf_overlapping_annotations.edf" edf_reduced = data_path / "EDF" / "test_reduced.edf" edf_annot_only = data_path / "EDF" / "SC4001EC-Hypnogram.edf" bdf_stim_channel_path = data_path / "BDF" / "test_bdf_stim_channel.bdf" -bdf_multiple_annotations_path = ( - data_path / "BDF" / "multiple_annotation_chans.bdf" -) +bdf_multiple_annotations_path = data_path / "BDF" / "multiple_annotation_chans.bdf" test_generator_bdf = data_path / "BDF" / "test_generator_2.bdf" test_generator_edf = data_path / "EDF" / "test_generator_2.edf" edf_annot_sub_s_path = data_path / "EDF" / "subsecond_starttime.edf" edf_chtypes_path = data_path / "EDF" / "chtypes_edf.edf" edf_utf8_annotations = data_path / "EDF" / "test_utf8_annotations.edf" -eog = ['REOG', 'LEOG', 'IEOG'] -misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2'] +eog = ["REOG", "LEOG", "IEOG"] +misc = ["EXG1", "EXG5", "EXG8", "M1", "M2"] def test_orig_units(): @@ -73,16 +73,16 @@ def test_orig_units(): # Test original units orig_units = raw._orig_units assert len(orig_units) == len(raw.ch_names) - assert orig_units['A1'] == 'µV' # formerly 'uV' edit by _check_orig_units + assert orig_units["A1"] == "µV" # formerly 'uV' edit by _check_orig_units del orig_units - raw.rename_channels(dict(A1='AA')) - assert raw._orig_units['AA'] == 'µV' - raw.rename_channels(dict(AA='A1')) + raw.rename_channels(dict(A1="AA")) + assert raw._orig_units["AA"] == "µV" + raw.rename_channels(dict(AA="A1")) raw_back = raw.copy().pick(raw.ch_names[:1]) # _pick_drop_channels - assert raw_back.ch_names == ['A1'] - assert set(raw_back._orig_units) == {'A1'} + assert raw_back.ch_names == ["A1"] + assert set(raw_back._orig_units) == {"A1"} raw_back.add_channels([raw.copy().pick(raw.ch_names[1:])]) assert raw_back.ch_names == raw.ch_names assert set(raw_back._orig_units) == set(raw.ch_names) @@ -92,41 +92,63 @@ def test_orig_units(): def test_units_params(): """Test enforcing original channel units.""" - with pytest.raises(ValueError, - match=r"Unit for channel .* is present .* cannot " - "overwrite it"): - _ = read_raw_edf(edf_path, units='V', preload=True) + with pytest.raises( + ValueError, match=r"Unit for channel .* is present .* cannot overwrite it" + ): + _ = read_raw_edf(edf_path, units="V", preload=True) def test_edf_temperature(monkeypatch): """Test that we can parse temperature channel type.""" raw = read_raw_edf(edf_path) - assert raw.get_channel_types()[0] == 'eeg' + assert raw.get_channel_types()[0] == "eeg" def _first_chan_temp(*args, **kwargs): out, orig_units = _read_edf_header(*args, **kwargs) - out['ch_types'][0] = 'TEMP' + out["ch_types"][0] = "TEMP" return out, orig_units - monkeypatch.setattr(edf.edf, '_read_edf_header', _first_chan_temp) + monkeypatch.setattr(edf.edf, "_read_edf_header", _first_chan_temp) raw = read_raw_edf(edf_path) - assert 'temperature' in raw - assert raw.get_channel_types()[0] == 'temperature' + assert "temperature" in raw + assert raw.get_channel_types()[0] == "temperature" +@testing.requires_testing_data def test_subject_info(tmp_path): """Test exposure of original channel units.""" - raw = read_raw_edf(edf_path) - assert raw.info['subject_info'] is None # XXX this is arguably a bug - edf_info = raw._raw_extras[0] - assert edf_info['subject_info'] is not None - want = {'id': 'X', 'sex': 'X', 'birthday': 'X', 'name': 'X'} + raw = read_raw_edf(edf_stim_resamp_path, preload=True) + + # check subject_info from `info` + assert raw.info["subject_info"] is not None + want = { + "his_id": "X", + "sex": 1, + "birthday": datetime.date(1967, 10, 9), + "last_name": "X", + } for key, val in want.items(): - assert edf_info['subject_info'][key] == val, key - fname = tmp_path / 'test_raw.fif' + assert raw.info["subject_info"][key] == val, key + + # add information + raw.info["subject_info"]["hand"] = 0 + + # save raw to FIF and load it back + fname = tmp_path / "test_raw.fif" raw.save(fname) raw = read_raw_fif(fname) - assert raw.info['subject_info'] is None # XXX should eventually round-trip + + # check subject_info from `info` + assert raw.info["subject_info"] is not None + want = { + "his_id": "X", + "sex": 1, + "birthday": datetime.date(1967, 10, 9), + "last_name": "X", + "hand": 0, + } + for key, val in want.items(): + assert raw.info["subject_info"][key] == val def test_bdf_data(): @@ -134,83 +156,160 @@ def test_bdf_data(): # XXX BDF data for these is around 0.01 when it should be in the uV range, # probably some bug test_scaling = False - raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path, - eog=eog, misc=misc, - exclude=['M2', 'IEOG'], - test_scaling=test_scaling, - ) + raw_py = _test_raw_reader( + read_raw_bdf, + input_fname=bdf_path, + eog=eog, + misc=misc, + exclude=["M2", "IEOG"], + test_scaling=test_scaling, + ) assert len(raw_py.ch_names) == 71 - raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path, - montage='biosemi64', eog=eog, misc=misc, - exclude=['M2', 'IEOG'], - test_scaling=test_scaling) + raw_py = _test_raw_reader( + read_raw_bdf, + input_fname=bdf_path, + montage="biosemi64", + eog=eog, + misc=misc, + exclude=["M2", "IEOG"], + test_scaling=test_scaling, + ) assert len(raw_py.ch_names) == 71 - assert 'RawEDF' in repr(raw_py) - picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') + assert "RawBDF" in repr(raw_py) + picks = pick_types(raw_py.info, meg=False, eeg=True, exclude="bads") data_py, _ = raw_py[picks] # this .mat was generated using the EEG Lab Biosemi Reader raw_eeglab = loadmat(bdf_eeglab_path) - raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts + raw_eeglab = raw_eeglab["data"] * 1e-6 # data are stored in microvolts data_eeglab = raw_eeglab[picks] # bdf saved as a single, resolution to seven decimal points in matlab assert_array_almost_equal(data_py, data_eeglab, 8) # Manually checking that float coordinates are imported - assert (raw_py.info['chs'][0]['loc']).any() - assert (raw_py.info['chs'][25]['loc']).any() - assert (raw_py.info['chs'][63]['loc']).any() + assert (raw_py.info["chs"][0]["loc"]).any() + assert (raw_py.info["chs"][25]["loc"]).any() + assert (raw_py.info["chs"][63]["loc"]).any() @testing.requires_testing_data def test_bdf_crop_save_stim_channel(tmp_path): """Test EDF with various sampling rates.""" raw = read_raw_bdf(bdf_stim_channel_path) - raw.save(tmp_path / 'test-raw.fif', tmin=1.2, tmax=4.0, overwrite=True) + raw.save(tmp_path / "test-raw.fif", tmin=1.2, tmax=4.0, overwrite=True) @testing.requires_testing_data -@pytest.mark.parametrize('fname', [ - edf_reduced, - edf_overlap_annot_path, -]) -@pytest.mark.parametrize('stim_channel', (None, False, 'auto')) +@pytest.mark.parametrize( + "fname", + [ + edf_reduced, + edf_overlap_annot_path, + ], +) +@pytest.mark.parametrize("stim_channel", (None, False, "auto")) def test_edf_others(fname, stim_channel): """Test EDF with various sampling rates and overlapping annotations.""" _test_raw_reader( - read_raw_edf, input_fname=fname, stim_channel=stim_channel, - verbose='error') + read_raw_edf, + input_fname=fname, + stim_channel=stim_channel, + verbose="error", + test_preloading=False, + preload=True, # no preload=False for mixed sfreqs + ) + + +@testing.requires_testing_data +@pytest.mark.parametrize("stim_channel", (None, False, "auto")) +def test_edf_different_sfreqs(stim_channel): + """Test EDF with various sampling rates.""" + rng = np.random.RandomState(0) + # load with and without preloading, should produce the same results + raw1 = read_raw_edf( + input_fname=edf_reduced, + stim_channel=stim_channel, + verbose="error", + preload=False, + ) + raw2 = read_raw_edf( + input_fname=edf_reduced, + stim_channel=stim_channel, + verbose="error", + preload=True, + ) + + picks = rng.permutation(np.arange(len(raw1.ch_names) - 1))[:10] + data1, times1 = raw1[picks, :] + data2, times2 = raw2[picks, :] + assert_allclose(data1, data2, err_msg="Data mismatch with preload") + assert_allclose(times1, times2) + + # loading slices should throw a warning as they have different + # edge artifacts than when loading the entire file at once + with pytest.warns(RuntimeWarning, match="mixed sampling frequencies"): + data1, times1 = raw1[picks, :512] + data2, times2 = raw2[picks, :512] + + # should NOT throw a warning when loading channels that have all the same + # sampling frequency - here, no edge artifacts can appear + picks = np.arange(15, 20) # these channels all have 512 Hz + data1, times1 = raw1[picks, :512] + data2, times2 = raw2[picks, :512] + assert_allclose(data1, data2, err_msg="Data mismatch with preload") + assert_allclose(times1, times2) + + +@testing.requires_testing_data +@pytest.mark.parametrize("stim_channel", (None, False, "auto")) +def test_edf_different_sfreqs_nopreload(stim_channel): + """Test loading smaller sfreq channels without preloading.""" + # load without preloading, then load a channel that has smaller sfreq + # as other channels, produced an error, see mne-python/issues/12897 + + for i in range(1, 13): + raw = read_raw_edf(input_fname=edf_reduced, verbose="error", preload=False) + + # this should work for channels of all sfreq, even if larger sfreqs + # are present in the file + x1 = raw.get_data(picks=[f"A{i}"], return_times=False) + # load next ch, this is sometimes with a higher sometimes a lower sfreq + x2 = raw.get_data([f"A{i + 1}"], return_times=False) + assert x1.shape == x2.shape def test_edf_data_broken(tmp_path): """Test edf files.""" - raw = _test_raw_reader(read_raw_edf, input_fname=edf_path, - exclude=['Ergo-Left', 'H10'], verbose='error') + raw = _test_raw_reader( + read_raw_edf, + input_fname=edf_path, + exclude=["Ergo-Left", "H10"], + verbose="error", + ) raw_py = read_raw_edf(edf_path) data = raw_py.get_data() assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names)) # Test with number of records not in header (-1). broken_fname = tmp_path / "broken.edf" - with open(edf_path, 'rb') as fid_in: + with open(edf_path, "rb") as fid_in: fid_in.seek(0, 2) n_bytes = fid_in.tell() fid_in.seek(0, 0) rbytes = fid_in.read() - with open(broken_fname, 'wb') as fid_out: + with open(broken_fname, "wb") as fid_out: fid_out.write(rbytes[:236]) - fid_out.write(b'-1 ') - fid_out.write(rbytes[244:244 + int(n_bytes * 0.4)]) - with pytest.warns(RuntimeWarning, - match='records .* not match the file size'): + fid_out.write(b"-1 ") + fid_out.write(rbytes[244 : 244 + int(n_bytes * 0.4)]) + with pytest.warns(RuntimeWarning, match="records .* not match the file size"): raw = read_raw_edf(broken_fname, preload=True) read_raw_edf(broken_fname, exclude=raw.ch_names[:132], preload=True) # Test with \x00's in the data - with open(broken_fname, 'wb') as fid_out: + with open(broken_fname, "wb") as fid_out: fid_out.write(rbytes[:184]) - assert rbytes[184:192] == b'36096 ' - fid_out.write(rbytes[184:192].replace(b' ', b'\x00')) + assert rbytes[184:192] == b"36096 " + fid_out.write(rbytes[184:192].replace(b" ", b"\x00")) fid_out.write(rbytes[192:]) raw_py = read_raw_edf(broken_fname) data_new = raw_py.get_data() @@ -219,8 +318,8 @@ def test_edf_data_broken(tmp_path): def test_duplicate_channel_labels_edf(): """Test reading edf file with duplicate channel names.""" - EXPECTED_CHANNEL_NAMES = ['EEG F1-Ref-0', 'EEG F2-Ref', 'EEG F1-Ref-1'] - with pytest.warns(RuntimeWarning, match='Channel names are not unique'): + EXPECTED_CHANNEL_NAMES = ["EEG F1-Ref-0", "EEG F2-Ref", "EEG F1-Ref-1"] + with pytest.warns(RuntimeWarning, match="Channel names are not unique"): raw = read_raw_edf(duplicate_channel_labels_path, preload=False) assert raw.ch_names == EXPECTED_CHANNEL_NAMES @@ -229,55 +328,63 @@ def test_duplicate_channel_labels_edf(): def test_parse_annotation(tmp_path): """Test parsing the tal channel.""" # test the parser - annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00' - b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00' - b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00' - b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00' - b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00' - b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00') - annot_file = tmp_path / 'annotations.txt' + annot = ( + b"+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00" + b"+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00" + b"+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00" + b"+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00" + b"+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00" + b"+123\x14\x14\x00\x00\x00\x00\x00\x00\x00" + ) + annot_file = tmp_path / "annotations.txt" with open(annot_file, "wb") as f: f.write(annot) annot = [a for a in bytes(annot)] annot[1::2] = [a * 256 for a in annot[1::2]] - tal_channel_A = np.array(list(map(sum, zip(annot[0::2], annot[1::2]))), - dtype=np.int64) + tal_channel_A = np.array( + list(map(sum, zip(annot[0::2], annot[1::2]))), dtype=np.int64 + ) - with open(annot_file, 'rb') as fid: + with open(annot_file, "rb") as fid: # ch_data = np.fromfile(fid, dtype=' -# Nicolas Barascud -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from datetime import datetime, timezone, timedelta import shutil +from datetime import date, datetime, timedelta, timezone +from io import BytesIO -import pytest -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_equal) import numpy as np +import pytest import scipy.io as sio +from numpy.testing import assert_allclose, assert_array_equal, assert_equal +from mne import events_from_annotations, find_events, pick_types from mne.datasets import testing from mne.io import read_raw_gdf from mne.io.tests.test_raw import _test_raw_reader -from mne import pick_types, find_events, events_from_annotations data_path = testing.data_path(download=False) gdf1_path = data_path / "GDF" / "test_gdf_1.25" @@ -32,15 +31,34 @@ def test_gdf_data(): misc=None, preload=True, ) - picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') + picks = pick_types(raw.info, meg=False, eeg=True, exclude="bads") data, _ = raw[picks] # Test Status is added as event - EXPECTED_EVS_ONSETS = raw._raw_extras[0]['events'][1] + EXPECTED_EVS_ONSETS = raw._raw_extras[0]["events"][1] EXPECTED_EVS_ID = { - '{}'.format(evs): i for i, evs in enumerate( - [32769, 32770, 33024, 33025, 33026, 33027, 33028, 33029, 33040, - 33041, 33042, 33043, 33044, 33045, 33285, 33286], 1) + f"{evs}": i + for i, evs in enumerate( + [ + 32769, + 32770, + 33024, + 33025, + 33026, + 33027, + 33028, + 33029, + 33040, + 33041, + 33042, + 33043, + 33044, + 33045, + 33285, + 33286, + ], + 1, + ) } evs, evs_id = events_from_annotations(raw) assert_array_equal(evs[:, 0], EXPECTED_EVS_ONSETS) @@ -52,29 +70,28 @@ def test_gdf_data(): data_biosig = raw_biosig[picks] # Assert data are almost equal - assert_array_almost_equal(data, data_biosig, 8) + assert_allclose(data, data_biosig, rtol=1e-8) # Test for events assert len(raw.annotations.duration == 963) # gh-5604 - assert raw.info['meas_date'] is None + assert raw.info["meas_date"] is None @testing.requires_testing_data def test_gdf2_birthday(tmp_path): """Test reading raw GDF 2.x files.""" - new_fname = tmp_path / 'temp.gdf' + new_fname = tmp_path / "temp.gdf" shutil.copyfile(gdf2_path.with_name(gdf2_path.name + ".gdf"), new_fname) # go back 44.5 years so the subject should show up as 44 - offset_edf = ( # to their ref - datetime.now(tz=timezone.utc) - - datetime(1, 1, 1, tzinfo=timezone.utc) + offset_edf = datetime.now(tz=timezone.utc) - datetime( # to their ref + 1, 1, 1, tzinfo=timezone.utc ) offset_44_yr = offset_edf - timedelta(days=int(365 * 44.5)) # 44.5 yr ago offset_44_yr_days = offset_44_yr.total_seconds() / (24 * 60 * 60) # days - d = (int(offset_44_yr_days) + 367) * 2 ** 32 # with their conversion - with open(new_fname, 'r+b') as fid: + d = (int(offset_44_yr_days) + 367) * 2**32 # with their conversion + with open(new_fname, "r+b") as fid: fid.seek(176, 0) assert np.fromfile(fid, np.uint64, 1)[0] == 0 fid.seek(176, 0) @@ -82,9 +99,14 @@ def test_gdf2_birthday(tmp_path): fid.seek(176, 0) assert np.fromfile(fid, np.uint64, 1)[0] == d raw = read_raw_gdf(new_fname, eog=None, misc=None, preload=True) - assert raw._raw_extras[0]['subject_info']['age'] == 44 - # XXX this is a bug, it should be populated... - assert raw.info['subject_info'] is None + assert "subject_info" not in raw._raw_extras[0] + assert raw.info["subject_info"] is not None + birthdate = datetime(1, 1, 1, tzinfo=timezone.utc) + offset_44_yr + assert raw.info["subject_info"]["birthday"] == date( + birthdate.year, + birthdate.month, + birthdate.day, + ) @testing.requires_testing_data @@ -96,18 +118,18 @@ def test_gdf2_data(): misc=None, preload=True, ) - assert raw._raw_extras[0]['subject_info']['age'] is None + assert raw.info["subject_info"]["birthday"] == date(1, 1, 1) - picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads') + picks = pick_types(raw.info, meg=False, eeg=True, exclude="bads") data, _ = raw[picks] # This .mat was generated using the official biosig matlab package mat = sio.loadmat(gdf2_path.with_name(gdf2_path.name + "_biosig.mat")) - data_biosig = mat['dat'] * 1e-6 # data are stored in microvolts + data_biosig = mat["dat"] * 1e-6 # data are stored in microvolts data_biosig = data_biosig[picks] # Assert data are almost equal - assert_array_almost_equal(data, data_biosig, 8) + assert_allclose(data, data_biosig, rtol=1e-8) # Find events events = find_events(raw, verbose=1) @@ -116,7 +138,7 @@ def test_gdf2_data(): assert_array_equal(events[:, 2], [20, 28]) # gh-5604 - assert raw.info['meas_date'] is None + assert raw.info["meas_date"] is None _test_raw_reader( read_raw_gdf, input_fname=gdf2_path.with_name(gdf2_path.name + ".gdf"), @@ -129,38 +151,51 @@ def test_gdf2_data(): @testing.requires_testing_data def test_one_channel_gdf(): """Test a one-channel GDF file.""" - with pytest.warns(RuntimeWarning, match='different highpass'): - ecg = read_raw_gdf(gdf_1ch_path, preload=True) - assert ecg['ECG'][0].shape == (1, 4500) - assert 150.0 == ecg.info['sfreq'] + ecg = read_raw_gdf(gdf_1ch_path, preload=True) + assert ecg["ECG"][0].shape == (1, 4500) + assert 150.0 == ecg.info["sfreq"] @testing.requires_testing_data def test_gdf_exclude_channels(): """Test reading GDF data with excluded channels.""" raw = read_raw_gdf( - gdf1_path.with_name(gdf1_path.name + ".gdf"), exclude=('FP1', 'O1') - ) - assert 'FP1' not in raw.ch_names - assert 'O1' not in raw.ch_names - raw = read_raw_gdf( - gdf2_path.with_name(gdf2_path.name + ".gdf"), exclude=('Fp1', 'O1') + gdf1_path.with_name(gdf1_path.name + ".gdf"), exclude=("FP1", "O1") ) - assert 'Fp1' not in raw.ch_names - assert 'O1' not in raw.ch_names + assert "FP1" not in raw.ch_names + assert "O1" not in raw.ch_names raw = read_raw_gdf( - gdf2_path.with_name(gdf2_path.name + ".gdf"), exclude=".+z$" + gdf2_path.with_name(gdf2_path.name + ".gdf"), exclude=("Fp1", "O1") ) - assert 'AFz' not in raw.ch_names - assert 'Cz' not in raw.ch_names - assert 'Pz' not in raw.ch_names - assert 'Oz' not in raw.ch_names + assert "Fp1" not in raw.ch_names + assert "O1" not in raw.ch_names + raw = read_raw_gdf(gdf2_path.with_name(gdf2_path.name + ".gdf"), exclude=".+z$") + assert "AFz" not in raw.ch_names + assert "Cz" not in raw.ch_names + assert "Pz" not in raw.ch_names + assert "Oz" not in raw.ch_names @testing.requires_testing_data def test_gdf_include(): """Test reading GDF data with include.""" raw = read_raw_gdf( - gdf1_path.with_name(gdf1_path.name + ".gdf"), include=('FP1', 'O1') + gdf1_path.with_name(gdf1_path.name + ".gdf"), include=("FP1", "O1") ) - assert sorted(raw.ch_names) == ['FP1', 'O1'] + assert sorted(raw.ch_names) == ["FP1", "O1"] + + +@testing.requires_testing_data +def test_gdf_read_from_file_like(): + """Test that RawGDF is able to read from file-like objects for GDF files.""" + channels = "FP1 FP2 F5 AFz F6 T7 Cz T8 P7 P3 Pz P4 P8 O1 Oz O2".split() + fname = gdf1_path.with_name(gdf1_path.name + ".gdf") + with open(fname, "rb") as blob: + raw = read_raw_gdf(blob, preload=True) + assert raw.ch_names == channels + data = raw.get_data() + data_2 = read_raw_gdf(fname, preload=True).get_data() + assert_allclose(data, data_2) + + with pytest.raises(Exception, match="Bad GDF file provided."): + read_raw_gdf(BytesIO(), preload=True) diff --git a/mne/io/eeglab/__init__.py b/mne/io/eeglab/__init__.py index 1573360162d..6ffc4e416cc 100644 --- a/mne/io/eeglab/__init__.py +++ b/mne/io/eeglab/__init__.py @@ -1,5 +1,7 @@ """EEGLAB module for conversion to FIF.""" -# Author: Mainak Jas +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .eeglab import read_raw_eeglab, read_epochs_eeglab diff --git a/mne/io/eeglab/_eeglab.py b/mne/io/eeglab/_eeglab.py index 71b0ce54388..78df0be366b 100644 --- a/mne/io/eeglab/_eeglab.py +++ b/mne/io/eeglab/_eeglab.py @@ -1,6 +1,17 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import numpy as np -from ...utils import _import_pymatreader_funcs +try: + from scipy.io.matlab import MatlabFunction, MatlabOpaque +except ImportError: # scipy < 1.8 + from scipy.io.matlab.mio5 import MatlabFunction + from scipy.io.matlab.mio5_params import MatlabOpaque +from scipy.io import loadmat, whosmat + +from ...utils import _import_pymatreader_funcs, _soft_import, warn def _todict_from_np_struct(data): # taken from pymatreader.utils @@ -26,13 +37,7 @@ def _todict_from_np_struct(data): # taken from pymatreader.utils def _handle_scipy_ndarray(data): # taken from pymatreader.utils - try: - from scipy.io.matlab import MatlabFunction - except ImportError: # scipy < 1.8 - from scipy.io.matlab.mio5 import MatlabFunction - - if data.dtype == np.dtype('object') and not \ - isinstance(data, MatlabFunction): + if data.dtype == np.dtype("object") and not isinstance(data, MatlabFunction): as_list = [] for element in data: as_list.append(_check_for_scipy_mat_struct(element)) @@ -49,18 +54,13 @@ def _handle_scipy_ndarray(data): # taken from pymatreader.utils def _check_for_scipy_mat_struct(data): # taken from pymatreader.utils """Convert all scipy.io.matlab.mio5_params.mat_struct elements.""" - try: - from scipy.io.matlab import MatlabOpaque - except ImportError: # scipy < 1.8 - from scipy.io.matlab.mio5_params import MatlabOpaque - if isinstance(data, dict): for key in data: data[key] = _check_for_scipy_mat_struct(data[key]) if isinstance(data, MatlabOpaque): try: - if data[0][2] == b'string': + if data[0][2] == b"string": return None except IndexError: pass @@ -71,12 +71,201 @@ def _check_for_scipy_mat_struct(data): # taken from pymatreader.utils return data -def _readmat(fname, uint16_codec=None): +def _scipy_reader(file_name, variable_names=None, uint16_codec=None): + """Load with scipy and then run the check function.""" + mat_data = loadmat( + file_name, + squeeze_me=True, + mat_dtype=False, + variable_names=variable_names, + uint16_codec=uint16_codec, + ) + return _check_for_scipy_mat_struct(mat_data) + + +def _whosmat_hdf5(fname: str): + """List variables in a MATLAB v7.3 (HDF5) .mat file without loading data. + + This function provides similar functionality to :func:`scipy.io.whosmat` but + for MATLAB v7.3 files stored in HDF5 format, which are not supported by SciPy. + + Parameters + ---------- + fname : str | PathLike + Path to the MATLAB v7.3 (.mat) file. + + Returns + ------- + variables : list of tuple + A list of (name, shape, class) tuples for each variable in the file. + The name is a string, shape is a tuple of ints, and class is a string + indicating the MATLAB data type (e.g., 'double', 'int32', 'struct'). + + Notes + ----- + This function only works with MATLAB v7.3 (HDF5) files. For earlier versions, + use :func:`scipy.io.whosmat` instead. + + See Also + -------- + scipy.io.whosmat : List variables in classic MATLAB files. + """ + h5py = _soft_import("h5py", purpose="MATLAB v7.3 I/O", strict=False) + if h5py is False: + raise ModuleNotFoundError( + "h5py is required to inspect MATLAB v7.3 files preload=`False` " + "Please install h5py to use this functionality." + ) + + variables = [] + + with h5py.File(str(fname), "r") as f: + for name in f.keys(): + node = f[name] + + # Extract shape from HDF5 object + if isinstance(node, h5py.Dataset): + shape = tuple(int(x) for x in node.shape) + else: + shape = () + for attr_key in ( + "MATLAB_shape", + "MATLAB_Size", + "MATLAB_size", + "dims", + "MATLAB_dims", + ): + shp = node.attrs.get(attr_key) + if shp is not None: + try: + shape = tuple(int(x) for x in shp) + break + except Exception: + pass + if not shape and "size" in node: + try: + shape = tuple(int(x) for x in node["size"][()]) + except Exception: + pass + + # Infer MATLAB class from HDF5 object + mcls = node.attrs.get("MATLAB_class", "").lower() + if mcls: + matlab_class = "char" if mcls == "string" else mcls + elif isinstance(node, h5py.Dataset): + dt = node.dtype + # Handle complex numbers stored as {real, imag} struct + if getattr(dt, "names", None) and {"real", "imag"} <= set(dt.names): + matlab_class = ( + "double" if dt["real"].base.itemsize == 8 else "single" + ) + # Map NumPy dtype to MATLAB class + elif (kind := dt.kind) == "f": + matlab_class = "double" if dt.itemsize == 8 else "single" + elif kind == "i": + matlab_class = f"int{8 * dt.itemsize}" + elif kind == "u": + matlab_class = f"uint{8 * dt.itemsize}" + elif kind == "b": + matlab_class = "logical" + elif kind in ("S", "U", "O"): + matlab_class = "char" + else: + matlab_class = "unknown" + # Check for sparse matrix structure + elif {"ir", "jc", "data"}.issubset(set(node.keys())): + matlab_class = "sparse" + else: + matlab_class = "unknown" + + variables.append((name, shape, matlab_class)) + + return variables + + +def _readmat(fname, uint16_codec=None, *, preload=False): try: - read_mat = _import_pymatreader_funcs('EEGLAB I/O') + read_mat = _import_pymatreader_funcs("EEGLAB I/O") except RuntimeError: # pymatreader not installed - from scipy.io import loadmat - eeg = loadmat(fname, squeeze_me=True, mat_dtype=False) - return _check_for_scipy_mat_struct(eeg) - else: + read_mat = _scipy_reader + + # First handle the preload=False case + if not preload: + # when preload is `False`, we need to be selective about what we load + # and handle the 'data' field specially + + # the files in eeglab are always the same field names + # the the fields were taken from the eeglab sample reference + # available at the eeglab github: + # https://github.com/sccn/eeglab/blob/develop/sample_data/eeglab_data.set + # The sample reference is the big reference for the field names + # in eeglab files, and what is used in the eeglab tests. + info_fields = """ + setname filename filepath subject group condition session comments + nbchan trials pnts srate xmin xmax times icaact icawinv icasphere + icaweights icachansind chanlocs urchanlocs chaninfo ref event + urevent eventdescription epoch epochdescription reject stats + specdata specicaact splinefile icasplinefile dipfit history saved + etc + """.split() + + # We first load only the info fields that are not data + # Then we check if 'data' is present and load it separately if needed + mat_data = read_mat( + fname, + variable_names=info_fields, + uint16_codec=uint16_codec, + ) + + # checking the variables in the .set file + # to decide how to handle 'data' variable + try: + variables = whosmat(str(fname)) + except NotImplementedError: + try: + variables = _whosmat_hdf5(str(fname)) + except ModuleNotFoundError: + warn( + "pymatreader is required to preload=`False` for " + "Matlab files v7.3 files with HDF5 support. " + "Setting preload=True." + ) + preload = True + return read_mat(fname, uint16_codec=uint16_codec) + + is_possible_not_loaded = False + + numeric_types = """ + int8 int16 int32 + int64 uint8 uint16 + uint32 uint64 single double + """.split() + + for var in variables: + # looking for 'data' variable + if var[0] != "data": + continue + + # checking if 'data' variable is numeric + is_numeric = var[2] in numeric_types + + # if any 'data' variable is numeric, mark as possibly not loaded + if is_numeric: + # set the 'data' field to the filename + mat_data["data"] = str(fname) + + is_possible_not_loaded = is_possible_not_loaded or is_numeric + + if is_possible_not_loaded: + return mat_data + else: + # "The 'data' variable in the .set file appears to be numeric. " + # "In preload=False mode, the data is not loaded into memory. " + # "Instead, the filename is provided in mat_data['data']. " + # "To load the actual data, set preload=True." + # this is case of single file .set with data inside + preload = True + + # here is intended to be if and not else if + if preload: return read_mat(fname, uint16_codec=uint16_codec) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index f3cc8606c8b..497a7eeaf8f 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -1,8 +1,6 @@ -# Authors: Mainak Jas -# Jona Sassenhagen -# Stefan Appelhoff -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os.path as op from os import PathLike @@ -10,20 +8,29 @@ import numpy as np -from ._eeglab import _readmat -from .._digitization import _ensure_fiducials_head -from ..constants import FIFF -from ..meas_info import create_info -from ..pick import _PICK_TYPES_KEYS -from ..utils import _read_segments_file, _find_channels -from ..base import BaseRaw -from ...defaults import DEFAULTS -from ...utils import (logger, verbose, warn, fill_doc, Bunch, _check_fname, - _check_head_radius) +from mne.utils.check import _check_option + +from ..._fiff._digitization import _ensure_fiducials_head +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.pick import _PICK_TYPES_KEYS +from ..._fiff.utils import _find_channels, _mult_cal_one, _read_segments_file +from ...annotations import Annotations, read_annotations from ...channels import make_dig_montage +from ...defaults import DEFAULTS from ...epochs import BaseEpochs from ...event import read_events -from ...annotations import Annotations, read_annotations +from ...utils import ( + Bunch, + _check_fname, + _check_head_radius, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw +from ._eeglab import _readmat # just fix the scaling for now, EEGLAB doesn't seem to provide this info CAL = 1e-6 @@ -32,55 +39,61 @@ def _check_eeglab_fname(fname, dataname): """Check whether the filename is valid. - Check if the file extension is ``.fdt`` (older ``.dat`` being invalid) or - whether the ``EEG.data`` filename exists. If ``EEG.data`` file is absent - the set file name with .set changed to .fdt is checked. + Check if the file extension is ``.fdt`` (older ``.dat`` being invalid) + or ``.set`` (new EEGLAB format) or whether the ``EEG.data`` filename exists. + If ``EEG.data`` file is absent the set file name with + .set changed to .fdt is checked. """ fmt = str(op.splitext(dataname)[-1]) - if fmt == '.dat': + if fmt == ".dat": raise NotImplementedError( - 'Old data format .dat detected. Please update your EEGLAB ' - 'version and resave the data in .fdt format') - elif fmt != '.fdt': - raise IOError('Expected .fdt file format. Found %s format' % fmt) + "Old data format .dat detected. Please update your EEGLAB " + "version and resave the data in .fdt format" + ) + + _check_option("EEGLAB file extension", fmt, (".set", ".fdt")) basedir = op.dirname(fname) data_fname = op.join(basedir, dataname) if not op.exists(data_fname): - fdt_from_set_fname = op.splitext(fname)[0] + '.fdt' + fdt_from_set_fname = op.splitext(fname)[0] + ".fdt" if op.exists(fdt_from_set_fname): data_fname = fdt_from_set_fname - msg = ('Data file name in EEG.data ({}) is incorrect, the file ' - 'name must have changed on disk, using the correct file ' - 'name ({}).') + msg = ( + "Data file name in EEG.data ({}) is incorrect, the file " + "name must have changed on disk, using the correct file " + "name ({})." + ) warn(msg.format(dataname, op.basename(fdt_from_set_fname))) elif not data_fname == fdt_from_set_fname: - msg = 'Could not find the .fdt data file, tried {} and {}.' + msg = "Could not find the .fdt data file, tried {} and {}." raise FileNotFoundError(msg.format(data_fname, fdt_from_set_fname)) return data_fname -def _check_load_mat(fname, uint16_codec): +def _check_load_mat(fname, uint16_codec, *, preload=False): """Check if the mat struct contains 'EEG'.""" - eeg = _readmat(fname, uint16_codec=uint16_codec) - if 'ALLEEG' in eeg: + fname = _check_fname(fname, "read", True) + eeg = _readmat(fname, uint16_codec=uint16_codec, preload=preload) + if "ALLEEG" in eeg: raise NotImplementedError( - 'Loading an ALLEEG array is not supported. Please contact' - 'mne-python developers for more information.') - if 'EEG' in eeg: # fields are contained in EEG structure - eeg = eeg['EEG'] - eeg = eeg.get('EEG', eeg) # handle nested EEG structure + "Loading an ALLEEG array is not supported. Please contact" + "mne-python developers for more information." + ) + if "EEG" in eeg: # fields are contained in EEG structure + eeg = eeg["EEG"] + eeg = eeg.get("EEG", eeg) # handle nested EEG structure eeg = Bunch(**eeg) - eeg.trials = int(eeg.trials) - eeg.nbchan = int(eeg.nbchan) - eeg.pnts = int(eeg.pnts) + eeg.trials = int(eeg.get("trials", 1)) + eeg.nbchan = int(eeg.get("nbchan", 1)) + eeg.pnts = int(eeg.get("pnts", 1)) return eeg -def _to_loc(ll, scale_units=1.): +def _to_loc(ll): """Check if location exists.""" - if isinstance(ll, (int, float)) or len(ll) > 0: - return ll * scale_units + if isinstance(ll, int | float) or len(ll) > 0: + return ll else: return np.nan @@ -93,14 +106,12 @@ def _eeg_has_montage_information(eeg): if not len(eeg.chanlocs): has_pos = False else: - pos_fields = ['X', 'Y', 'Z'] + pos_fields = ["X", "Y", "Z"] if isinstance(eeg.chanlocs[0], mat_struct): - has_pos = all(hasattr(eeg.chanlocs[0], fld) - for fld in pos_fields) + has_pos = all(hasattr(eeg.chanlocs[0], fld) for fld in pos_fields) elif isinstance(eeg.chanlocs[0], np.ndarray): # Old files - has_pos = all(fld in eeg.chanlocs[0].dtype.names - for fld in pos_fields) + has_pos = all(fld in eeg.chanlocs[0].dtype.names for fld in pos_fields) elif isinstance(eeg.chanlocs[0], dict): # new files has_pos = all(fld in eeg.chanlocs[0] for fld in pos_fields) @@ -110,73 +121,95 @@ def _eeg_has_montage_information(eeg): return has_pos -def _get_montage_information(eeg, get_pos, scale_units=1.): +def _get_montage_information(eeg, get_pos, *, montage_units): """Get channel name, type and montage information from ['chanlocs'].""" ch_names, ch_types, pos_ch_names, pos = list(), list(), list(), list() unknown_types = dict() for chanloc in eeg.chanlocs: # channel name - ch_names.append(chanloc['labels']) + ch_names.append(chanloc["labels"]) # channel type - ch_type = 'eeg' - try_type = chanloc.get('type', None) + ch_type = "eeg" + try_type = chanloc.get("type", None) if isinstance(try_type, str): try_type = try_type.strip().lower() if try_type in _PICK_TYPES_KEYS: ch_type = try_type else: if try_type in unknown_types: - unknown_types[try_type].append(chanloc['labels']) + unknown_types[try_type].append(chanloc["labels"]) else: - unknown_types[try_type] = [chanloc['labels']] + unknown_types[try_type] = [chanloc["labels"]] ch_types.append(ch_type) # channel loc if get_pos: - loc_x = _to_loc(chanloc['X'], scale_units=scale_units) - loc_y = _to_loc(chanloc['Y'], scale_units=scale_units) - loc_z = _to_loc(chanloc['Z'], scale_units=scale_units) + loc_x = _to_loc(chanloc["X"]) + loc_y = _to_loc(chanloc["Y"]) + loc_z = _to_loc(chanloc["Z"]) locs = np.r_[-loc_y, loc_x, loc_z] - pos_ch_names.append(chanloc['labels']) + pos_ch_names.append(chanloc["labels"]) pos.append(locs) # warn if unknown types were provided if len(unknown_types): - warn('Unknown types found, setting as type EEG:\n' + - '\n'.join([f'{key}: {sorted(unknown_types[key])}' - for key in sorted(unknown_types)])) + warn( + "Unknown types found, setting as type EEG:\n" + + "\n".join( + [ + f"{key}: {sorted(unknown_types[key])}" + for key in sorted(unknown_types) + ] + ) + ) lpa, rpa, nasion = None, None, None - if hasattr(eeg, "chaninfo") and len(eeg.chaninfo.get('nodatchans', [])): - for item in list(zip(*eeg.chaninfo['nodatchans'].values())): - d = dict(zip(eeg.chaninfo['nodatchans'].keys(), item)) - if d.get("type", None) != 'FID': + if hasattr(eeg, "chaninfo") and isinstance(eeg.chaninfo.get("nodatchans"), dict): + nodatchans = eeg.chaninfo["nodatchans"] + types = nodatchans.get("type", []) + descriptions = nodatchans.get("description", []) + xs = np.atleast_1d(nodatchans.get("X", [])) + ys = np.atleast_1d(nodatchans.get("Y", [])) + zs = np.atleast_1d(nodatchans.get("Z", [])) + + for type_, description, x, y, z in zip(types, descriptions, xs, ys, zs): + if type_ != "FID": continue - elif d.get('description', None) == 'Nasion': - nasion = np.array([d["X"], d["Y"], d["Z"]]) - elif d.get('description', None) == 'Right periauricular point': - rpa = np.array([d["X"], d["Y"], d["Z"]]) - elif d.get('description', None) == 'Left periauricular point': - lpa = np.array([d["X"], d["Y"], d["Z"]]) - + if description == "Nasion": + nasion = np.array([x, y, z]) + elif description == "Right periauricular point": + rpa = np.array([x, y, z]) + elif description == "Left periauricular point": + lpa = np.array([x, y, z]) + + # Always check this even if it's not used + _check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto")) if pos_ch_names: - pos_array = np.array(pos) + pos_array = np.array(pos, float) + pos_array.shape = (-1, 3) # roughly estimate head radius and check if its reasonable - is_nan_pos = np.isnan(pos).all(axis=1) + is_nan_pos = np.isnan(pos).any(axis=1) if not is_nan_pos.all(): - mean_radius = np.mean(np.linalg.norm( - pos_array[~is_nan_pos], axis=1)) + mean_radius = np.mean(np.linalg.norm(pos_array[~is_nan_pos], axis=1)) + scale_units = _handle_montage_units(montage_units, mean_radius) + mean_radius *= scale_units + pos_array *= scale_units additional_info = ( - ' Check if the montage_units argument is correct (the default ' + " Check if the montage_units argument is correct (the default " 'is "mm", but your channel positions may be in different units' - ').') + ")." + ) _check_head_radius(mean_radius, add_info=additional_info) montage = make_dig_montage( ch_pos=dict(zip(ch_names, pos_array)), - coord_frame='head', lpa=lpa, rpa=rpa, nasion=nasion) + coord_frame="head", + lpa=lpa, + rpa=rpa, + nasion=nasion, + ) _ensure_fiducials_head(montage.dig) else: montage = None @@ -184,7 +217,7 @@ def _get_montage_information(eeg, get_pos, scale_units=1.): return ch_names, ch_types, montage -def _get_info(eeg, eog=(), scale_units=1.): +def _get_info(eeg, *, eog, montage_units): """Get measurement info.""" # add the ch_names and info['chs'][idx]['loc'] if not isinstance(eeg.chanlocs, np.ndarray) and eeg.nbchan == 1: @@ -197,23 +230,24 @@ def _get_info(eeg, eog=(), scale_units=1.): if eeg_has_ch_names_info: has_pos = _eeg_has_montage_information(eeg) - ch_names, ch_types, eeg_montage = \ - _get_montage_information(eeg, has_pos, scale_units=scale_units) + ch_names, ch_types, eeg_montage = _get_montage_information( + eeg, has_pos, montage_units=montage_units + ) update_ch_names = False else: # if eeg.chanlocs is empty, we still need default chan names - ch_names = ["EEG %03d" % ii for ii in range(eeg.nbchan)] - ch_types = 'eeg' + ch_names = [f"EEG {ii:03d}" for ii in range(eeg.nbchan)] + ch_types = "eeg" eeg_montage = None update_ch_names = True info = create_info(ch_names, sfreq=eeg.srate, ch_types=ch_types) - eog = _find_channels(ch_names, ch_type='EOG') if eog == 'auto' else eog - for idx, ch in enumerate(info['chs']): - ch['cal'] = CAL - if ch['ch_name'] in eog or idx in eog: - ch['coil_type'] = FIFF.FIFFV_COIL_NONE - ch['kind'] = FIFF.FIFFV_EOG_CH + eog = _find_channels(ch_names, ch_type="EOG") if eog == "auto" else eog + for idx, ch in enumerate(info["chs"]): + ch["cal"] = CAL + if ch["ch_name"] in eog or idx in eog: + ch["coil_type"] = FIFF.FIFFV_COIL_NONE + ch["kind"] = FIFF.FIFFV_EOG_CH return info, eeg_montage, update_ch_names @@ -228,29 +262,37 @@ def _set_dig_montage_in_init(self, montage): self.set_montage(None) else: missing_channels = set(self.ch_names) - set(montage.ch_names) - ch_pos = dict(zip( - list(missing_channels), - np.full((len(missing_channels), 3), np.nan) - )) - self.set_montage( - montage + make_dig_montage(ch_pos=ch_pos, coord_frame='head') + ch_pos = dict( + zip(list(missing_channels), np.full((len(missing_channels), 3), np.nan)) ) - - -def _handle_montage_units(montage_units): - n_char_unit = len(montage_units) - if montage_units[-1:] != 'm' or n_char_unit > 2: - raise ValueError('``montage_units`` has to be in prefix + "m" format' - f', got "{montage_units}"') - + self.set_montage(montage + make_dig_montage(ch_pos=ch_pos, coord_frame="head")) + + +def _handle_montage_units(montage_units, mean_radius): + if montage_units == "auto": + # radius should be between 0.05 and 0.11 meters + if mean_radius < 0.25: + montage_units = "m" + elif mean_radius < 2.5: + montage_units = "dm" + elif mean_radius < 25: + montage_units = "cm" + else: # mean_radius >= 25 + montage_units = "mm" prefix = montage_units[:-1] - scale_units = 1 / DEFAULTS['prefixes'][prefix] + scale_units = 1 / DEFAULTS["prefixes"][prefix] return scale_units @fill_doc -def read_raw_eeglab(input_fname, eog=(), preload=False, - uint16_codec=None, montage_units='mm', verbose=None): +def read_raw_eeglab( + input_fname, + eog=(), + preload=False, + uint16_codec=None, + montage_units="auto", + verbose=None, +) -> "RawEEGLAB": r"""Read an EEGLAB .set file. Parameters @@ -263,10 +305,11 @@ def read_raw_eeglab(input_fname, eog=(), preload=False, If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. Defaults to empty tuple. %(preload)s - Note that ``preload=False`` will be effective only if the data is - stored in a separate binary file. %(uint16_codec)s %(montage_units)s + + .. versionchanged:: 1.6 + Support for ``'auto'`` was added and is the new default. %(verbose)s Returns @@ -283,15 +326,27 @@ def read_raw_eeglab(input_fname, eog=(), preload=False, ----- .. versionadded:: 0.11.0 """ - return RawEEGLAB(input_fname=input_fname, preload=preload, - eog=eog, uint16_codec=uint16_codec, - montage_units=montage_units, verbose=verbose) + return RawEEGLAB( + input_fname=input_fname, + preload=preload, + eog=eog, + uint16_codec=uint16_codec, + montage_units=montage_units, + verbose=verbose, + ) @fill_doc -def read_epochs_eeglab(input_fname, events=None, event_id=None, - eog=(), *, uint16_codec=None, montage_units='mm', - verbose=None): +def read_epochs_eeglab( + input_fname, + events=None, + event_id=None, + eog=(), + *, + uint16_codec=None, + montage_units="auto", + verbose=None, +) -> "EpochsEEGLAB": r"""Reader function for EEGLAB epochs files. Parameters @@ -322,11 +377,14 @@ def read_epochs_eeglab(input_fname, events=None, event_id=None, Defaults to empty tuple. %(uint16_codec)s %(montage_units)s + + .. versionchanged:: 1.6 + Support for ``'auto'`` was added and is the new default. %(verbose)s Returns ------- - epochs : instance of Epochs + EpochsEEGLAB : instance of BaseEpochs The epochs. See Also @@ -337,9 +395,15 @@ def read_epochs_eeglab(input_fname, events=None, event_id=None, ----- .. versionadded:: 0.11.0 """ - epochs = EpochsEEGLAB(input_fname=input_fname, events=events, eog=eog, - event_id=event_id, uint16_codec=uint16_codec, - montage_units=montage_units, verbose=verbose) + epochs = EpochsEEGLAB( + input_fname=input_fname, + events=events, + eog=eog, + event_id=event_id, + uint16_codec=uint16_codec, + montage_units=montage_units, + verbose=verbose, + ) return epochs @@ -357,8 +421,6 @@ class RawEEGLAB(BaseRaw): If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. Defaults to empty tuple. %(preload)s - Note that preload=False will be effective only if the data is stored - in a separate binary file. %(uint16_codec)s %(montage_units)s %(verbose)s @@ -373,37 +435,51 @@ class RawEEGLAB(BaseRaw): """ @verbose - def __init__(self, input_fname, eog=(), - preload=False, *, uint16_codec=None, montage_units='mm', - verbose=None): # noqa: D102 - input_fname = str( - _check_fname(input_fname, "read", True, "input_fname") - ) - eeg = _check_load_mat(input_fname, uint16_codec) + def __init__( + self, + input_fname, + eog=(), + preload=False, + *, + uint16_codec=None, + montage_units="auto", + verbose=None, + ): + input_fname = str(_check_fname(input_fname, "read", True, "input_fname")) + eeg = _check_load_mat(input_fname, uint16_codec, preload=preload) if eeg.trials != 1: - raise TypeError('The number of trials is %d. It must be 1 for raw' - ' files. Please use `mne.io.read_epochs_eeglab` if' - ' the .set file contains epochs.' % eeg.trials) + raise TypeError( + f"The number of trials is {eeg.trials:d}. It must be 1 for raw" + " files. Please use `mne.io.read_epochs_eeglab` if" + " the .set file contains epochs." + ) last_samps = [eeg.pnts - 1] - scale_units = _handle_montage_units(montage_units) - info, eeg_montage, _ = _get_info(eeg, eog=eog, scale_units=scale_units) + info, eeg_montage, _ = _get_info(eeg, eog=eog, montage_units=montage_units) # read the data if isinstance(eeg.data, str): data_fname = _check_eeglab_fname(input_fname, eeg.data) - logger.info('Reading %s' % data_fname) - - super(RawEEGLAB, self).__init__( - info, preload, filenames=[data_fname], last_samps=last_samps, - orig_format='double', verbose=verbose) + logger.info(f"Reading {data_fname}") + # Check if data is embedded in the same .set file + is_embedded = op.realpath(data_fname) == op.realpath(input_fname) + + super().__init__( + info, + preload, + filenames=[data_fname], + last_samps=last_samps, + orig_format="double", + verbose=verbose, + raw_extras=[ + { + "is_embedded": is_embedded, + "input_fname": input_fname, + "uint16_codec": uint16_codec, + } + ], + ) else: - if preload is False or isinstance(preload, str): - warn('Data will be preloaded. preload=False or a string ' - 'preload is not supported when the data is stored in ' - 'the .set file') - # can't be done in standard way with preload=True because of - # different reading path (.set file) if eeg.nbchan == 1 and len(eeg.data.shape) == 1: n_chan, n_times = [1, eeg.data.shape[0]] else: @@ -411,24 +487,67 @@ def __init__(self, input_fname, eog=(), data = np.empty((n_chan, n_times), dtype=float) data[:n_chan] = eeg.data data *= CAL - super(RawEEGLAB, self).__init__( - info, data, filenames=[input_fname], last_samps=last_samps, - orig_format='double', verbose=verbose) + super().__init__( + info, + data, + filenames=[input_fname], + last_samps=last_samps, + orig_format="double", + verbose=verbose, + ) # create event_ch from annotations - annot = read_annotations(input_fname) + annot = read_annotations(input_fname, uint16_codec=uint16_codec) self.set_annotations(annot) _check_boundary(annot, None) _set_dig_montage_in_init(self, eeg_montage) - latencies = np.round(annot.onset * self.info['sfreq']) + latencies = np.round(annot.onset * self.info["sfreq"]) _check_latencies(latencies) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" - _read_segments_file( - self, data, idx, fi, start, stop, cals, mult, dtype=' 1: # first extract the events and construct an event_id dict @@ -530,19 +665,20 @@ def __init__(self, input_fname, events=None, event_id=None, tmin=0, epochs = _bunchify(eeg.epoch) events = _bunchify(eeg.event) for ep in epochs: - if isinstance(ep.eventtype, (int, float)): + if isinstance(ep.eventtype, int | float): ep.eventtype = str(ep.eventtype) if not isinstance(ep.eventtype, str): - event_type = '/'.join([str(et) for et in ep.eventtype]) + event_type = "/".join([str(et) for et in ep.eventtype]) event_name.append(event_type) # store latency of only first event - event_latencies.append(events[ev_idx].latency) + # -1 to account for Matlab 1-based indexing of samples + event_latencies.append(events[ev_idx].latency - 1) ev_idx += len(ep.eventtype) warn_multiple_events = True else: event_type = ep.eventtype event_name.append(ep.eventtype) - event_latencies.append(events[ev_idx].latency) + event_latencies.append(events[ev_idx].latency - 1) ev_idx += 1 if event_type not in unique_ev: @@ -553,81 +689,96 @@ def __init__(self, input_fname, events=None, event_id=None, tmin=0, # warn about multiple events in epoch if necessary if warn_multiple_events: - warn('At least one epoch has multiple events. Only the latency' - ' of the first event will be retained.') + warn( + "At least one epoch has multiple events. Only the latency" + " of the first event will be retained." + ) # now fill up the event array events = np.zeros((eeg.trials, 3), dtype=int) for idx in range(0, eeg.trials): if idx == 0: prev_stim = 0 - elif (idx > 0 and - event_latencies[idx] - event_latencies[idx - 1] == 1): + elif idx > 0 and event_latencies[idx] - event_latencies[idx - 1] == 1: prev_stim = event_id[event_name[idx - 1]] events[idx, 0] = event_latencies[idx] events[idx, 1] = prev_stim events[idx, 2] = event_id[event_name[idx]] - elif isinstance(events, (str, Path, PathLike)): + elif isinstance(events, str | Path | PathLike): events = read_events(events) - logger.info('Extracting parameters from %s...' % input_fname) - scale_units = _handle_montage_units(montage_units) - info, eeg_montage, _ = _get_info(eeg, eog=eog, scale_units=scale_units) + logger.info(f"Extracting parameters from {input_fname}...") + info, eeg_montage, _ = _get_info(eeg, eog=eog, montage_units=montage_units) for key, val in event_id.items(): if val not in events[:, 2]: - raise ValueError('No matching events found for %s ' - '(event id %i)' % (key, val)) + raise ValueError(f"No matching events found for {key} (event id {val})") if isinstance(eeg.data, str): data_fname = _check_eeglab_fname(input_fname, eeg.data) - with open(data_fname, 'rb') as data_fid: + with open(data_fname, "rb") as data_fid: data = np.fromfile(data_fid, dtype=np.float32) - data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), - order="F") + data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), order="F") else: data = eeg.data if eeg.nbchan == 1 and len(data.shape) == 2: data = data[np.newaxis, :] - data = data.transpose((2, 0, 1)).astype('double') + data = data.transpose((2, 0, 1)).astype("double") data *= CAL assert data.shape == (eeg.trials, eeg.nbchan, eeg.pnts) tmin, tmax = eeg.xmin, eeg.xmax - super(EpochsEEGLAB, self).__init__( - info, data, events, event_id, tmin, tmax, baseline, - reject=reject, flat=flat, reject_tmin=reject_tmin, - reject_tmax=reject_tmax, filename=input_fname, verbose=verbose) + super().__init__( + info, + data, + events, + event_id, + tmin, + tmax, + baseline, + reject=reject, + flat=flat, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + filename=input_fname, + verbose=verbose, + ) # data are preloaded but _bad_dropped is not set so we do it here: self._bad_dropped = True _set_dig_montage_in_init(self, eeg_montage) - logger.info('Ready.') + logger.info("Ready.") def _check_boundary(annot, event_id): if event_id is None: event_id = dict() if "boundary" in annot.description and "boundary" not in event_id: - warn("The data contains 'boundary' events, indicating data " - "discontinuities. Be cautious of filtering and epoching around " - "these events.") + warn( + "The data contains 'boundary' events, indicating data " + "discontinuities. Be cautious of filtering and epoching around " + "these events." + ) def _check_latencies(latencies): if (latencies < -1).any(): - raise ValueError('At least one event sample index is negative. Please' - ' check if EEG.event.sample values are correct.') + raise ValueError( + "At least one event sample index is negative. Please" + " check if EEG.event.sample values are correct." + ) if (latencies == -1).any(): - warn("At least one event has a sample index of -1. This usually is " - "a consequence of how eeglab handles event latency after " - "resampling - especially when you had a boundary event at the " - "beginning of the file. Please make sure that the events at " - "the very beginning of your EEGLAB file can be safely dropped " - "(e.g., because they are boundary events).") + warn( + "At least one event has a sample index of -1. This usually is " + "a consequence of how eeglab handles event latency after " + "resampling - especially when you had a boundary event at the " + "beginning of the file. Please make sure that the events at " + "the very beginning of your EEGLAB file can be safely dropped " + "(e.g., because they are boundary events)." + ) def _bunchify(items): @@ -646,7 +797,7 @@ def _read_annotations_eeglab(eeg, uint16_codec=None): Parameters ---------- - eeg : object | str + eeg : object | str | Path 'EEG' struct or the path to the (EEGLAB) .set file. uint16_codec : str | None If your \*.set file contains non-ascii characters, sometimes reading @@ -660,15 +811,14 @@ def _read_annotations_eeglab(eeg, uint16_codec=None): annotations : instance of Annotations The annotations present in the file. """ - if isinstance(eeg, str): + if isinstance(eeg, (str | Path | PathLike)): eeg = _check_load_mat(eeg, uint16_codec=uint16_codec) - if not hasattr(eeg, 'event'): + if not hasattr(eeg, "event"): events = [] - elif isinstance(eeg.event, dict) and \ - np.array(eeg.event['latency']).ndim > 0: + elif isinstance(eeg.event, dict) and np.array(eeg.event["latency"]).ndim > 0: events = _dol_to_lod(eeg.event) - elif not isinstance(eeg.event, (np.ndarray, list)): + elif not isinstance(eeg.event, np.ndarray | list): events = [eeg.event] else: events = eeg.event @@ -676,20 +826,41 @@ def _read_annotations_eeglab(eeg, uint16_codec=None): description = [str(event.type) for event in events] onset = [event.latency - 1 for event in events] duration = np.zeros(len(onset)) - if len(events) > 0 and hasattr(events[0], 'duration'): + if len(events) > 0 and hasattr(events[0], "duration"): for idx, event in enumerate(events): # empty duration fields are read as empty arrays - is_empty_array = (isinstance(event.duration, np.ndarray) - and len(event.duration) == 0) + is_empty_array = ( + isinstance(event.duration, np.ndarray) and len(event.duration) == 0 + ) duration[idx] = np.nan if is_empty_array else event.duration - return Annotations(onset=np.array(onset) / eeg.srate, - duration=duration / eeg.srate, - description=description, - orig_time=None) + # Drop events with NaN onset see PR #12484 + valid_indices = [ + idx for idx, onset_idx in enumerate(onset) if not np.isnan(onset_idx) + ] + n_dropped = len(onset) - len(valid_indices) + if len(valid_indices) != len(onset): + warn( + f"{n_dropped} events have an onset that is NaN. These values are " + "usually ignored by EEGLAB and will be dropped from the " + "annotations." + ) + + onset = np.array([onset[idx] for idx in valid_indices]) + duration = np.array([duration[idx] for idx in valid_indices]) + description = [description[idx] for idx in valid_indices] + + return Annotations( + onset=np.array(onset) / eeg.srate, + duration=duration / eeg.srate, + description=description, + orig_time=None, + ) def _dol_to_lod(dol): """Convert a dict of lists to a list of dicts.""" - return [{key: dol[key][ii] for key in dol.keys()} - for ii in range(len(dol[list(dol.keys())[0]]))] + return [ + {key: dol[key][ii] for key in dol.keys()} + for ii in range(len(dol[list(dol.keys())[0]])) + ] diff --git a/mne/io/eeglab/tests/__init__.py b/mne/io/eeglab/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/eeglab/tests/__init__.py +++ b/mne/io/eeglab/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/eeglab/tests/test_eeglab.py b/mne/io/eeglab/tests/test_eeglab.py index 8c2a966253e..a042566fbc9 100644 --- a/mne/io/eeglab/tests/test_eeglab.py +++ b/mne/io/eeglab/tests/test_eeglab.py @@ -1,29 +1,32 @@ -# Author: Mainak Jas -# Mikolaj Magnuski -# Stefan Appelhoff -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os import shutil +import time from copy import deepcopy import numpy as np -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_equal, assert_allclose) import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) from scipy import io import mne -from mne import write_events, read_epochs_eeglab +from mne import read_epochs_eeglab, write_events +from mne.annotations import events_from_annotations, read_annotations from mne.channels import read_custom_montage +from mne.datasets import testing from mne.io import read_raw_eeglab -from mne.io.eeglab.eeglab import _get_montage_information, _dol_to_lod from mne.io.eeglab._eeglab import _readmat +from mne.io.eeglab.eeglab import _dol_to_lod, _get_montage_information from mne.io.tests.test_raw import _test_raw_reader -from mne.datasets import testing -from mne.utils import Bunch, _check_pymatreader_installed -from mne.annotations import events_from_annotations, read_annotations +from mne.utils import Bunch, _check_pymatreader_installed, _record_warnings base_dir = testing.data_path(download=False) / "EEGLAB" raw_fname_mat = base_dir / "test_raw.set" @@ -31,69 +34,67 @@ raw_fname_event_duration = base_dir / "test_raw_event_duration.set" epochs_fname_mat = base_dir / "test_epochs.set" epochs_fname_onefile_mat = base_dir / "test_epochs_onefile.set" -raw_mat_fnames = [raw_fname_mat, raw_fname_onefile_mat] epochs_mat_fnames = [epochs_fname_mat, epochs_fname_onefile_mat] raw_fname_chanloc = base_dir / "test_raw_chanloc.set" raw_fname_chanloc_fids = base_dir / "test_raw_chanloc_fids.set" raw_fname_2021 = base_dir / "test_raw_2021.set" raw_fname_h5 = base_dir / "test_raw_h5.set" -raw_fname_onefile_h5 = base_dir / "test_raw_onefile_h5.set" epochs_fname_h5 = base_dir / "test_epochs_h5.set" epochs_fname_onefile_h5 = base_dir / "test_epochs_onefile_h5.set" -raw_h5_fnames = [raw_fname_h5, raw_fname_onefile_h5] epochs_h5_fnames = [epochs_fname_h5, epochs_fname_onefile_h5] montage_path = base_dir / "test_chans.locs" @testing.requires_testing_data -@pytest.mark.parametrize('fname', [ - raw_fname_mat, - pytest.param( - raw_fname_h5, - marks=[ - pytest.mark.skipif( - not _check_pymatreader_installed(strict=False), - reason='pymatreader not installed' - ) - ] - ), - raw_fname_chanloc, -], ids=os.path.basename) +@pytest.mark.parametrize( + "fname", + [ + raw_fname_mat, + pytest.param( + raw_fname_h5, + marks=[ + pytest.mark.skipif( + not _check_pymatreader_installed(strict=False), + reason="pymatreader not installed", + ) + ], + ), + raw_fname_chanloc, + ], + ids=os.path.basename, +) def test_io_set_raw(fname): """Test importing EEGLAB .set files.""" montage = read_custom_montage(montage_path) - montage.ch_names = [ - 'EEG {0:03d}'.format(ii) for ii in range(len(montage.ch_names)) - ] + montage.ch_names = [f"EEG {ii:03d}" for ii in range(len(montage.ch_names))] kws = dict(reader=read_raw_eeglab, input_fname=fname) if fname.name == "test_raw_chanloc.set": - with pytest.warns(RuntimeWarning, - match="The data contains 'boundary' events"): + with pytest.warns(RuntimeWarning, match="The data contains 'boundary' events"): raw0 = _test_raw_reader(**kws) - elif '_h5' in fname.name: # should be safe enough, and much faster + elif "_h5" in fname.name: # should be safe enough, and much faster raw0 = read_raw_eeglab(fname, preload=True) else: raw0 = _test_raw_reader(**kws) # test that preloading works if fname.name == "test_raw_chanloc.set": - raw0.set_montage(montage, on_missing='ignore') + raw0.set_montage(montage, on_missing="ignore") # crop to check if the data has been properly preloaded; we cannot # filter as the snippet of raw data is very short raw0.crop(0, 1) else: raw0.set_montage(montage) - raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto', - phase='zero') + raw0.filter( + 1, None, l_trans_bandwidth="auto", filter_length="auto", phase="zero" + ) # test that using uint16_codec does not break stuff - read_raw_kws = dict(input_fname=fname, preload=False, uint16_codec='ascii') + read_raw_kws = dict(input_fname=fname, preload=False, uint16_codec="ascii") if fname.name == "test_raw_chanloc.set": - with pytest.warns(RuntimeWarning, - match="The data contains 'boundary' events"): + with pytest.warns(RuntimeWarning, match="The data contains 'boundary' events"): raw0 = read_raw_eeglab(**read_raw_kws) - raw0.set_montage(montage, on_missing='ignore') + raw0.set_montage(montage, on_missing="ignore") else: raw0 = read_raw_eeglab(**read_raw_kws) raw0.set_montage(montage) @@ -101,121 +102,209 @@ def test_io_set_raw(fname): # Annotations if fname != raw_fname_chanloc: assert len(raw0.annotations) == 154 - assert set(raw0.annotations.description) == {'rt', 'square'} - assert_array_equal(raw0.annotations.duration, 0.) + assert set(raw0.annotations.description) == {"rt", "square"} + assert_array_equal(raw0.annotations.duration, 0.0) + + +@testing.requires_testing_data +def test_io_set_preload_false_uses_lazy_loading(): + """Ensure reading .set files without preload keeps data out of memory.""" + raw_preloaded = read_raw_eeglab(raw_fname_mat, preload=True) + raw_not_preloaded = read_raw_eeglab(raw_fname_mat, preload=False) + + assert not raw_not_preloaded.preload + assert getattr(raw_not_preloaded, "_data", None) is None + assert raw_not_preloaded.n_times == raw_preloaded.n_times + assert raw_not_preloaded.info["nchan"] == raw_preloaded.info["nchan"] + + lazy_slice = raw_not_preloaded[:2, :10][0] + assert lazy_slice.shape == (2, 10) + + # on-demand reads must not flip the preload flag or populate _data + assert not raw_not_preloaded.preload + assert getattr(raw_not_preloaded, "_data", None) is None + + assert raw_preloaded._size > raw_not_preloaded._size @testing.requires_testing_data def test_io_set_raw_more(tmp_path): """Test importing EEGLAB .set files.""" - eeg = io.loadmat(raw_fname_mat, struct_as_record=False, - squeeze_me=True)['EEG'] + eeg = io.loadmat(raw_fname_mat, struct_as_record=False, squeeze_me=True)["EEG"] # test reading file with one event (read old version) negative_latency_fname = tmp_path / "test_negative_latency.set" events = deepcopy(eeg.event[0]) events.latency = 0 - io.savemat(negative_latency_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': eeg.nbchan, - 'data': 'test_negative_latency.fdt', - 'epoch': eeg.epoch, 'event': events, - 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, - appendmat=False, oned_as='row') + io.savemat( + negative_latency_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": "test_negative_latency.fdt", + "epoch": eeg.epoch, + "event": events, + "chanlocs": eeg.chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", + ) shutil.copyfile( base_dir / "test_raw.fdt", negative_latency_fname.with_suffix(".fdt") ) - with pytest.warns(RuntimeWarning, match="has a sample index of -1."): + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match="has a sample index of -1."), + ): read_raw_eeglab(input_fname=negative_latency_fname, preload=True) # test negative event latencies events.latency = -1 - io.savemat(negative_latency_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': eeg.nbchan, - 'data': 'test_negative_latency.fdt', - 'epoch': eeg.epoch, 'event': events, - 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, - appendmat=False, oned_as='row') - with pytest.raises(ValueError, match='event sample index is negative'): - with pytest.warns(RuntimeWarning, match="has a sample index of -1."): + io.savemat( + negative_latency_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": "test_negative_latency.fdt", + "epoch": eeg.epoch, + "event": events, + "chanlocs": eeg.chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", + ) + with pytest.raises(ValueError, match="event sample index is negative"): + with _record_warnings(): read_raw_eeglab(input_fname=negative_latency_fname, preload=True) # test overlapping events overlap_fname = tmp_path / "test_overlap_event.set" - io.savemat(overlap_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': eeg.nbchan, 'data': 'test_overlap_event.fdt', - 'epoch': eeg.epoch, - 'event': [eeg.event[0], eeg.event[0]], - 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, - appendmat=False, oned_as='row') - shutil.copyfile( - base_dir / "test_raw.fdt", overlap_fname.with_suffix(".fdt") + io.savemat( + overlap_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": "test_overlap_event.fdt", + "epoch": eeg.epoch, + "event": [eeg.event[0], eeg.event[0]], + "chanlocs": eeg.chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", ) + shutil.copyfile(base_dir / "test_raw.fdt", overlap_fname.with_suffix(".fdt")) read_raw_eeglab(input_fname=overlap_fname, preload=True) # test reading file with empty event durations empty_dur_fname = tmp_path / "test_empty_durations.set" events = deepcopy(eeg.event) for ev in events: - ev.duration = np.array([], dtype='float') - - io.savemat(empty_dur_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': eeg.nbchan, - 'data': 'test_negative_latency.fdt', - 'epoch': eeg.epoch, 'event': events, - 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, - appendmat=False, oned_as='row') - shutil.copyfile( - base_dir / "test_raw.fdt", empty_dur_fname.with_suffix(".fdt") + ev.duration = np.array([], dtype="float") + + io.savemat( + empty_dur_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": "test_negative_latency.fdt", + "epoch": eeg.epoch, + "event": events, + "chanlocs": eeg.chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", ) + shutil.copyfile(base_dir / "test_raw.fdt", empty_dur_fname.with_suffix(".fdt")) raw = read_raw_eeglab(input_fname=empty_dur_fname, preload=True) assert (raw.annotations.duration == 0).all() # test reading file when the EEG.data name is wrong - io.savemat(overlap_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': eeg.nbchan, 'data': 'test_overla_event.fdt', - 'epoch': eeg.epoch, - 'event': [eeg.event[0], eeg.event[0]], - 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, - appendmat=False, oned_as='row') + io.savemat( + overlap_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": "test_overla_event.fdt", + "epoch": eeg.epoch, + "event": [eeg.event[0], eeg.event[0]], + "chanlocs": eeg.chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", + ) with pytest.warns(RuntimeWarning, match="must have changed on disk"): read_raw_eeglab(input_fname=overlap_fname, preload=True) # raise error when both EEG.data and fdt name from set are wrong overlap_fname = tmp_path / "test_ovrlap_event.set" - io.savemat(overlap_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': eeg.nbchan, 'data': 'test_overla_event.fdt', - 'epoch': eeg.epoch, - 'event': [eeg.event[0], eeg.event[0]], - 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, - appendmat=False, oned_as='row') + io.savemat( + overlap_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": "test_overla_event.fdt", + "epoch": eeg.epoch, + "event": [eeg.event[0], eeg.event[0]], + "chanlocs": eeg.chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", + ) with pytest.raises(FileNotFoundError, match="not find the .fdt data file"): read_raw_eeglab(input_fname=overlap_fname, preload=True) # test reading file with one channel one_chan_fname = tmp_path / "test_one_channel.set" - io.savemat(one_chan_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': 1, 'data': np.random.random((1, 3)), - 'epoch': eeg.epoch, 'event': eeg.epoch, - 'chanlocs': {'labels': 'E1', 'Y': -6.6069, - 'X': 6.3023, 'Z': -2.9423}, - 'times': eeg.times[:3], 'pnts': 3}}, - appendmat=False, oned_as='row') - read_raw_eeglab(input_fname=one_chan_fname, preload=True, - montage_units='cm') + io.savemat( + one_chan_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": 1, + "data": np.random.random((1, 3)), + "epoch": eeg.epoch, + "event": eeg.epoch, + "chanlocs": {"labels": "E1", "Y": -6.6069, "X": 6.3023, "Z": -2.9423}, + "times": eeg.times[:3], + "pnts": 3, + } + }, + appendmat=False, + oned_as="row", + ) + read_raw_eeglab(input_fname=one_chan_fname, preload=True, montage_units="cm") # test reading file with 3 channels - one without position information # first, create chanlocs structured array - ch_names = ['F3', 'unknown', 'FPz'] - x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan] - dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')] - nopos_dt = [('labels', 'S10'), ('Z', 'f8')] + ch_names = ["F3", "unknown", "FPz"] + x, y, z = [1.0, 2.0, np.nan], [4.0, 5.0, np.nan], [7.0, 8.0, np.nan] + dt = [("labels", "S10"), ("X", "f8"), ("Y", "f8"), ("Z", "f8")] + nopos_dt = [("labels", "S10"), ("Z", "f8")] chanlocs = np.zeros((3,), dtype=dt) nopos_chanlocs = np.zeros((3,), dtype=nopos_dt) for ind, vals in enumerate(zip(ch_names, x, y, z)): @@ -230,48 +319,63 @@ def test_io_set_raw_more(tmp_path): # test reading channel names but not positions when there is no X (only Z) # field in the EEG.chanlocs structure nopos_fname = tmp_path / "test_no_chanpos.set" - io.savemat(nopos_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 3, - 'data': np.random.random((3, 2)), 'epoch': eeg.epoch, - 'event': eeg.epoch, 'chanlocs': nopos_chanlocs, - 'times': eeg.times[:2], 'pnts': 2}}, - appendmat=False, oned_as='row') + io.savemat( + nopos_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": 3, + "data": np.random.random((3, 2)), + "epoch": eeg.epoch, + "event": eeg.epoch, + "chanlocs": nopos_chanlocs, + "times": eeg.times[:2], + "pnts": 2, + } + }, + appendmat=False, + oned_as="row", + ) # load the file - raw = read_raw_eeglab(input_fname=nopos_fname, preload=True, - montage_units='cm') + raw = read_raw_eeglab(input_fname=nopos_fname, preload=True, montage_units="cm") # test that channel names have been loaded but not channel positions for i in range(3): - assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i]) - assert_array_equal(raw.info['chs'][i]['loc'][:3], - np.array([np.nan, np.nan, np.nan])) + assert_equal(raw.info["chs"][i]["ch_name"], ch_names[i]) + assert_array_equal( + raw.info["chs"][i]["loc"][:3], np.array([np.nan, np.nan, np.nan]) + ) @pytest.mark.timeout(60) # ~60 s on Travis OSX @testing.requires_testing_data -@pytest.mark.parametrize('fnames', [ - epochs_mat_fnames, - pytest.param( - epochs_h5_fnames, - marks=[ - pytest.mark.slowtest, - pytest.mark.skipif( - not _check_pymatreader_installed(strict=False), - reason='pymatreader not installed' - ) - ] - ) -]) +@pytest.mark.parametrize( + "fnames", + [ + epochs_mat_fnames, + pytest.param( + epochs_h5_fnames, + marks=[ + pytest.mark.slowtest, + pytest.mark.skipif( + not _check_pymatreader_installed(strict=False), + reason="pymatreader not installed", + ), + ], + ), + ], +) def test_io_set_epochs(fnames): """Test importing EEGLAB .set epochs files.""" epochs_fname, epochs_fname_onefile = fnames - with pytest.warns(RuntimeWarning, match='multiple events'): + with _record_warnings(), pytest.warns(RuntimeWarning, match="multiple events"): epochs = read_epochs_eeglab(epochs_fname) - with pytest.warns(RuntimeWarning, match='multiple events'): + with _record_warnings(), pytest.warns(RuntimeWarning, match="multiple events"): epochs2 = read_epochs_eeglab(epochs_fname_onefile) # one warning for each read_epochs_eeglab because both files have epochs # associated with multiple events - assert_array_equal(epochs.get_data(), epochs2.get_data()) + assert_array_equal(epochs.get_data(copy=False), epochs2.get_data(copy=False)) @testing.requires_testing_data @@ -280,67 +384,77 @@ def test_io_set_epochs_events(tmp_path): out_fname = tmp_path / "test-eve.fif" events = np.array([[4, 0, 1], [12, 0, 2], [20, 0, 3], [26, 0, 3]]) write_events(out_fname, events) - event_id = {'S255/S8': 1, 'S8': 2, 'S255/S9': 3} + event_id = {"S255/S8": 1, "S8": 2, "S255/S9": 3} epochs = read_epochs_eeglab(epochs_fname_mat, events, event_id) assert_equal(len(epochs.events), 4) assert epochs.preload assert epochs._bad_dropped epochs = read_epochs_eeglab(epochs_fname_mat, out_fname, event_id) - pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat, - None, event_id) - pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat, - epochs.events, None) + pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat, None, event_id) + pytest.raises(ValueError, read_epochs_eeglab, epochs_fname_mat, epochs.events, None) @testing.requires_testing_data -@pytest.mark.filterwarnings('ignore:At least one epoch has multiple events') +@pytest.mark.filterwarnings("ignore:At least one epoch has multiple events") @pytest.mark.filterwarnings("ignore:The data contains 'boundary' events") def test_degenerate(tmp_path): """Test some degenerate conditions.""" # test if .dat file raises an error - eeg = io.loadmat(epochs_fname_mat, struct_as_record=False, - squeeze_me=True)['EEG'] - eeg.data = 'epochs_fname.dat' + eeg = io.loadmat(epochs_fname_mat, struct_as_record=False, squeeze_me=True)["EEG"] + eeg.data = "epochs_fname.dat" bad_epochs_fname = tmp_path / "test_epochs.set" - io.savemat(bad_epochs_fname, - {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, - 'nbchan': eeg.nbchan, 'data': eeg.data, - 'epoch': eeg.epoch, 'event': eeg.event, - 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, - appendmat=False, oned_as='row') - shutil.copyfile( - base_dir / "test_epochs.fdt", tmp_path / "test_epochs.dat" + io.savemat( + bad_epochs_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": eeg.data, + "epoch": eeg.epoch, + "event": eeg.event, + "chanlocs": eeg.chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", ) - pytest.raises(NotImplementedError, read_epochs_eeglab, - bad_epochs_fname) + shutil.copyfile(base_dir / "test_epochs.fdt", tmp_path / "test_epochs.dat") + pytest.raises(NotImplementedError, read_epochs_eeglab, bad_epochs_fname) # error when montage units incorrect - with pytest.raises(ValueError, match=r'prefix \+ "m" format'): - read_epochs_eeglab(epochs_fname_mat, montage_units='mV') - - # warning when head radius too small - with pytest.warns(RuntimeWarning, match='is above'): - read_raw_eeglab(raw_fname_chanloc, montage_units='km') + with pytest.raises(ValueError, match=r"Invalid value"): + read_epochs_eeglab(epochs_fname_mat, montage_units="mV") # warning when head radius too large - with pytest.warns(RuntimeWarning, match='is below'): - read_raw_eeglab(raw_fname_chanloc, montage_units='µm') + with pytest.warns(RuntimeWarning, match="is above"): + read_raw_eeglab(raw_fname_chanloc, montage_units="m") - -@pytest.mark.parametrize("fname", [ - raw_fname_mat, - raw_fname_onefile_mat, - # We don't test the h5 variants here because they are implicitly tested - # in test_io_set_raw -]) -@pytest.mark.filterwarnings('ignore: Complex objects') + # warning when head radius too small + m_fname = tmp_path / "test_montage_m.set" + _create_eeg_with_scaled_montage_units(raw_fname_chanloc, m_fname, 1e-3) + with pytest.warns(RuntimeWarning, match="is below"): + read_raw_eeglab(m_fname, montage_units="mm") + + +@pytest.mark.parametrize( + "fname", + [ + raw_fname_mat, + raw_fname_onefile_mat, + # We don't test the h5 variants here because they are implicitly tested + # in test_io_set_raw + ], +) +@pytest.mark.filterwarnings("ignore: Complex objects") @testing.requires_testing_data def test_eeglab_annotations(fname): """Test reading annotations in EEGLAB files.""" annotations = read_annotations(fname) assert len(annotations) == 154 - assert set(annotations.description) == {'rt', 'square'} - assert np.all(annotations.duration == 0.) + assert set(annotations.description) == {"rt", "square"} + assert np.all(annotations.duration == 0.0) @testing.requires_testing_data @@ -348,15 +462,30 @@ def test_eeglab_read_annotations(): """Test annotations onsets are timestamps (+ validate some).""" annotations = read_annotations(raw_fname_mat) validation_samples = [0, 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] - expected_onset = np.array([1.00, 1.69, 2.08, 4.70, 7.71, 11.30, 17.18, - 20.20, 26.12, 29.14, 35.25, 44.30, 47.15]) + expected_onset = np.array( + [ + 1.00, + 1.69, + 2.08, + 4.70, + 7.71, + 11.30, + 17.18, + 20.20, + 26.12, + 29.14, + 35.25, + 44.30, + 47.15, + ] + ) assert annotations.orig_time is None - assert_array_almost_equal(annotations.onset[validation_samples], - expected_onset, decimal=2) + assert_array_almost_equal( + annotations.onset[validation_samples], expected_onset, decimal=2 + ) # test if event durations are imported correctly - raw = read_raw_eeglab(raw_fname_event_duration, preload=True, - montage_units='dm') + raw = read_raw_eeglab(raw_fname_event_duration, preload=True, montage_units="dm") # file contains 3 annotations with 0.5 s (64 samples) duration each assert_allclose(raw.annotations.duration, np.ones(3) * 0.5) @@ -366,7 +495,7 @@ def test_eeglab_event_from_annot(): """Test all forms of obtaining annotations.""" raw_fname_mat = base_dir / "test_raw.set" raw_fname = raw_fname_mat - event_id = {'rt': 1, 'square': 2} + event_id = {"rt": 1, "square": 2} raw1 = read_raw_eeglab(input_fname=raw_fname, preload=False) annotations = read_annotations(raw_fname) @@ -381,7 +510,7 @@ def _assert_array_allclose_nan(left, right): assert_allclose(left[~np.isnan(left)], right[~np.isnan(left)], atol=1e-8) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def three_chanpos_fname(tmp_path_factory): """Test file with 3 channels to exercise EEGLAB reader. @@ -393,21 +522,29 @@ def three_chanpos_fname(tmp_path_factory): Notes from when this code was factorized: # test reading file with one event (read old version) """ - fname = str(tmp_path_factory.mktemp('data') / 'test_chanpos.set') - file_conent = dict(EEG={ - 'trials': 1, 'nbchan': 3, 'pnts': 3, 'epoch': [], 'event': [], - 'srate': 128, 'times': np.array([0., 0.1, 0.2]), - 'data': np.empty([3, 3]), - 'chanlocs': np.array( - [(b'F3', 1., 4., 7.), - (b'unknown', np.nan, np.nan, np.nan), - (b'FPz', 2., 5., 8.)], - dtype=[('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')] - ) - }) + fname = str(tmp_path_factory.mktemp("data") / "test_chanpos.set") + file_conent = dict( + EEG={ + "trials": 1, + "nbchan": 3, + "pnts": 3, + "epoch": [], + "event": [], + "srate": 128, + "times": np.array([0.0, 0.1, 0.2]), + "data": np.empty([3, 3]), + "chanlocs": np.array( + [ + (b"F3", 1.0, 4.0, 7.0), + (b"unknown", np.nan, np.nan, np.nan), + (b"FPz", 2.0, 5.0, 8.0), + ], + dtype=[("labels", "S10"), ("X", "f8"), ("Y", "f8"), ("Z", "f8")], + ), + } + ) - io.savemat(file_name=fname, mdict=file_conent, appendmat=False, - oned_as='row') + io.savemat(file_name=fname, mdict=file_conent, appendmat=False, oned_as="row") return fname @@ -416,22 +553,31 @@ def three_chanpos_fname(tmp_path_factory): def test_position_information(three_chanpos_fname): """Test reading file with 3 channels - one without position information.""" nan = np.nan - EXPECTED_LOCATIONS_FROM_FILE = np.array([ - [-4., 1., 7., 0., 0., 0., nan, nan, nan, nan, nan, nan], - [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], - [-5., 2., 8., 0., 0., 0., nan, nan, nan, nan, nan, nan], - ]) * 0.01 # 0.01 is to scale cm to meters - - EXPECTED_LOCATIONS_FROM_MONTAGE = np.array([ - [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], - [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], - [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], - ]) - - raw = read_raw_eeglab(input_fname=three_chanpos_fname, preload=True, - montage_units='cm') - assert_array_equal(np.array([ch['loc'] for ch in raw.info['chs']]), - EXPECTED_LOCATIONS_FROM_FILE) + EXPECTED_LOCATIONS_FROM_FILE = ( + np.array( + [ + [-4.0, 1.0, 7.0, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan], + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + [-5.0, 2.0, 8.0, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan], + ] + ) + * 0.01 + ) # 0.01 is to scale cm to meters + + EXPECTED_LOCATIONS_FROM_MONTAGE = np.array( + [ + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + [nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], + ] + ) + + raw = read_raw_eeglab( + input_fname=three_chanpos_fname, preload=True, montage_units="cm" + ) + assert_array_equal( + np.array([ch["loc"] for ch in raw.info["chs"]]), EXPECTED_LOCATIONS_FROM_FILE + ) # To accommodate the new behavior so that: # read_raw_eeglab(.. montage=montage) and raw.set_montage(montage) @@ -439,25 +585,91 @@ def test_position_information(three_chanpos_fname): # a mix of what is in montage and in the file raw = read_raw_eeglab( input_fname=three_chanpos_fname, - preload=True, montage_units='cm', + preload=True, + montage_units="cm", ).set_montage(None) # Flush the montage builtin within input_fname - _assert_array_allclose_nan(np.array([ch['loc'] for ch in raw.info['chs']]), - EXPECTED_LOCATIONS_FROM_MONTAGE) + _assert_array_allclose_nan( + np.array([ch["loc"] for ch in raw.info["chs"]]), EXPECTED_LOCATIONS_FROM_MONTAGE + ) + + +def _create_eeg_with_scaled_montage_units(in_fname, out_fname, scale): + eeg = io.loadmat(in_fname, struct_as_record=False, squeeze_me=True)["EEG"] + + # test reading file with one event (read old version) + # chanlocs = deepcopy(eeg.chanlocs) + chanlocs = eeg.chanlocs + xyz = np.empty((len(chanlocs), 3)) + labels = [] + for ch_i, loc in enumerate(chanlocs): + xyz[ch_i] = [loc.X, loc.Y, loc.Z] + labels.append(loc.labels) + xyz *= scale + chanlocs = np.rec.fromarrays( + [labels, *xyz.T], + names=["labels", "X", "Y", "Z"], + ) + + fdt = isinstance(eeg.data, str) + if fdt: + shutil.copyfile(in_fname.with_suffix(".fdt"), out_fname.with_suffix(".fdt")) + io.savemat( + out_fname, + { + "EEG": { + "trials": eeg.trials, + "srate": eeg.srate, + "nbchan": eeg.nbchan, + "data": out_fname.with_suffix(".fdt").name if fdt else eeg.data, + "epoch": eeg.epoch, + "event": eeg.event, + "chanlocs": chanlocs, + "pnts": eeg.pnts, + } + }, + appendmat=False, + oned_as="row", + ) + + +@testing.requires_testing_data +def test_estimate_montage_units(tmp_path): + """Test automatic estimation of montage units.""" + m_fname = tmp_path / "test_montage_m.set" + _create_eeg_with_scaled_montage_units(raw_fname_chanloc, m_fname, 1e-3) + cm_fname = tmp_path / "test_montage_cm.set" + _create_eeg_with_scaled_montage_units(raw_fname_chanloc, cm_fname, 1e-1) + with pytest.warns(RuntimeWarning, match="The data contains 'boundary' events"): + # read 3 versions of the same file, with different montage units + raw_mm = read_raw_eeglab(raw_fname_chanloc, montage_units="auto") + raw_m = read_raw_eeglab(m_fname, montage_units="auto") + raw_cm = read_raw_eeglab(cm_fname, montage_units="auto") + # All locations should be the same if the units are correctly estimated + assert_allclose( + np.array([ch["loc"] for ch in raw_mm.info["chs"]]), + np.array([ch["loc"] for ch in raw_m.info["chs"]]), + ) + assert_allclose( + np.array([ch["loc"] for ch in raw_mm.info["chs"]]), + np.array([ch["loc"] for ch in raw_cm.info["chs"]]), + ) @testing.requires_testing_data def test_io_set_raw_2021(): """Test reading new default file format (no EEG struct).""" assert "EEG" not in io.loadmat(raw_fname_2021) - _test_raw_reader(reader=read_raw_eeglab, input_fname=raw_fname_2021, - test_preloading=False, preload=True) + _test_raw_reader( + reader=read_raw_eeglab, + input_fname=raw_fname_2021, + ) @testing.requires_testing_data def test_read_single_epoch(): """Test reading raw set file as an Epochs instance.""" - with pytest.raises(ValueError, match='trials less than 2'): + with pytest.raises(ValueError, match="trials less than 2"): read_epochs_eeglab(raw_fname_mat) @@ -465,51 +677,207 @@ def test_read_single_epoch(): def test_get_montage_info_with_ch_type(): """Test that the channel types are properly returned.""" mat = _readmat(raw_fname_onefile_mat) - n = len(mat['EEG']['chanlocs']['labels']) - mat['EEG']['chanlocs']['type'] = ['eeg'] * (n - 2) + ['eog'] + ['stim'] - mat['EEG']['chanlocs'] = _dol_to_lod(mat['EEG']['chanlocs']) - mat['EEG'] = Bunch(**mat['EEG']) - ch_names, ch_types, montage = _get_montage_information(mat['EEG'], False) + n = len(mat["EEG"]["chanlocs"]["labels"]) + mat["EEG"]["chanlocs"]["type"] = ["eeg"] * (n - 2) + ["eog"] + ["stim"] + mat["EEG"]["chanlocs"] = _dol_to_lod(mat["EEG"]["chanlocs"]) + mat["EEG"] = Bunch(**mat["EEG"]) + ch_names, ch_types, montage = _get_montage_information( + mat["EEG"], + get_pos=False, + montage_units="mm", + ) assert len(ch_names) == len(ch_types) == n - assert ch_types == ['eeg'] * (n - 2) + ['eog'] + ['stim'] + assert ch_types == ["eeg"] * (n - 2) + ["eog"] + ["stim"] assert montage is None # test unknown type warning mat = _readmat(raw_fname_onefile_mat) - n = len(mat['EEG']['chanlocs']['labels']) - mat['EEG']['chanlocs']['type'] = ['eeg'] * (n - 2) + ['eog'] + ['unknown'] - mat['EEG']['chanlocs'] = _dol_to_lod(mat['EEG']['chanlocs']) - mat['EEG'] = Bunch(**mat['EEG']) - with pytest.warns(RuntimeWarning, match='Unknown types found'): - ch_names, ch_types, montage = \ - _get_montage_information(mat['EEG'], False) + n = len(mat["EEG"]["chanlocs"]["labels"]) + mat["EEG"]["chanlocs"]["type"] = ["eeg"] * (n - 2) + ["eog"] + ["unknown"] + mat["EEG"]["chanlocs"] = _dol_to_lod(mat["EEG"]["chanlocs"]) + mat["EEG"] = Bunch(**mat["EEG"]) + with pytest.warns(RuntimeWarning, match="Unknown types found"): + ch_names, ch_types, montage = _get_montage_information( + mat["EEG"], + get_pos=False, + montage_units="mm", + ) @testing.requires_testing_data -@pytest.mark.parametrize('has_type', (True, False)) +@pytest.mark.parametrize("has_type", (True, False)) def test_fidsposition_information(monkeypatch, has_type): """Test reading file with 3 fiducial locations.""" if not has_type: - def get_bad_information(eeg, get_pos, scale_units=1.): - del eeg.chaninfo['nodatchans']['type'] - return _get_montage_information(eeg, get_pos, - scale_units=scale_units) - - monkeypatch.setattr(mne.io.eeglab.eeglab, '_get_montage_information', - get_bad_information) - raw = read_raw_eeglab(raw_fname_chanloc_fids, montage_units='cm') + + def get_bad_information(eeg, get_pos, *, montage_units): + del eeg.chaninfo["nodatchans"]["type"] + return _get_montage_information(eeg, get_pos, montage_units=montage_units) + + monkeypatch.setattr( + mne.io.eeglab.eeglab, "_get_montage_information", get_bad_information + ) + raw = read_raw_eeglab(raw_fname_chanloc_fids, montage_units="cm") montage = raw.get_montage() pos = montage.get_positions() n_eeg = 129 if not has_type: # These should now be estimated from the data - assert_allclose(pos['nasion'], [0, 0.0997, 0], atol=1e-4) - assert_allclose(pos['lpa'], -pos['nasion'][[1, 0, 0]]) - assert_allclose(pos['rpa'], pos['nasion'][[1, 0, 0]]) - assert pos['nasion'] is not None - assert pos['lpa'] is not None - assert pos['rpa'] is not None - assert len(pos['nasion']) == 3 - assert len(pos['lpa']) == 3 - assert len(pos['rpa']) == 3 - assert len(raw.info['dig']) == n_eeg + 3 + assert_allclose(pos["nasion"], [0, 0.0997, 0], atol=1e-4) + assert_allclose(pos["lpa"], -pos["nasion"][[1, 0, 0]]) + assert_allclose(pos["rpa"], pos["nasion"][[1, 0, 0]]) + assert pos["nasion"] is not None + assert pos["lpa"] is not None + assert pos["rpa"] is not None + assert len(pos["nasion"]) == 3 + assert len(pos["lpa"]) == 3 + assert len(pos["rpa"]) == 3 + assert len(raw.info["dig"]) == n_eeg + 3 + + +@testing.requires_testing_data +def test_eeglab_drop_nan_annotations(tmp_path): + """Test reading file with NaN annotations.""" + pytest.importorskip("eeglabio") + from eeglabio.raw import export_set + + file_path = tmp_path / "test_nan_anno.set" + raw = read_raw_eeglab(raw_fname_mat, preload=True) + data = raw.get_data() + sfreq = raw.info["sfreq"] + ch_names = raw.ch_names + anno = [ + raw.annotations.description, + raw.annotations.onset, + raw.annotations.duration, + ] + anno[1][0] = np.nan + + export_set( + str(file_path), + data, + sfreq, + ch_names, + ch_locs=None, + annotations=anno, + ref_channels="common", + ch_types=np.repeat("EEG", len(ch_names)), + ) + + with pytest.warns(RuntimeWarning, match="1 .* have an onset that is NaN.*"): + raw = read_raw_eeglab(file_path, preload=True) + + +@pytest.mark.flaky +@testing.requires_testing_data +@pytest.mark.timeout(10) +@pytest.mark.slowtest # has the advantage of not running on macOS where it errs a lot +def test_io_set_preload_false_is_faster(): + """Using preload=False should skip the expensive data read branch.""" + # warm start + read_raw_eeglab(raw_fname_mat, preload=False) + + durations = {} + for preload in (True, False): + start = time.perf_counter() + _ = read_raw_eeglab(raw_fname_mat, preload=preload) + durations[preload] = time.perf_counter() - start + + # preload=True should not be faster than preload=False (timings may vary + # across systems, so avoid strict thresholds) + assert durations[True] > durations[False] + + +@testing.requires_testing_data +def test_lazy_vs_preload_integrity(): + """Test that lazy loading produces identical data to preload.""" + raw_lazy = read_raw_eeglab(raw_fname_onefile_mat, preload=False) + raw_preload = read_raw_eeglab(raw_fname_onefile_mat, preload=True) + + # Get data from both modes + data_lazy = raw_lazy.get_data() + data_preload = raw_preload.get_data() + + # Data should be identical + assert_array_almost_equal(data_lazy, data_preload, decimal=5) + + # Verify shape consistency + assert data_lazy.shape == data_preload.shape + assert raw_lazy.n_times == raw_preload.n_times + assert len(raw_lazy.ch_names) == len(raw_preload.ch_names) + + # Verify no NaN/Inf and data is not all zeros + assert np.isfinite(data_lazy).all() + assert not np.all(data_lazy == 0) + + +@testing.requires_testing_data +def test_lazy_loading_segment_reads(): + """Test that lazy loading correctly reads data segments.""" + raw_lazy = read_raw_eeglab(raw_fname_onefile_mat, preload=False) + raw_preload = read_raw_eeglab(raw_fname_onefile_mat, preload=True) + + # Test beginning, middle, and end segments + segments = [ + (0, 100), + (100, 200), + (raw_lazy.n_times - 100, raw_lazy.n_times), + ] + + for start, stop in segments: + data_lazy = raw_lazy[:, start:stop][0] + data_preload = raw_preload[:, start:stop][0] + + # Segments should be identical + assert_array_almost_equal(data_lazy, data_preload, decimal=5) + + # Data should not be all zeros + assert not np.all(data_lazy == 0) + + +@testing.requires_testing_data +def test_lazy_loading_data_consistency(): + """Test that lazy loading maintains consistency across multiple reads.""" + raw_lazy = read_raw_eeglab(raw_fname_onefile_mat, preload=False) + raw_preload = read_raw_eeglab(raw_fname_onefile_mat, preload=True) + + # Get data multiple times from lazy-loaded raw + reads = [raw_lazy.get_data().copy() for _ in range(3)] + + # All reads should be identical + for i in range(1, len(reads)): + assert_array_equal(reads[0], reads[i]) + + # Should match preloaded data + data_preload = raw_preload.get_data() + assert_array_almost_equal(reads[0], data_preload, decimal=5) + + # Check numerical stability + lazy_mean = np.mean(reads[0]) + lazy_std = np.std(reads[0]) + preload_mean = np.mean(data_preload) + preload_std = np.std(data_preload) + + assert_allclose(lazy_mean, preload_mean, rtol=1e-10) + assert_allclose(lazy_std, preload_std, rtol=1e-10) + + +@testing.requires_testing_data +@pytest.mark.parametrize("fname", [raw_fname_onefile_mat, raw_fname_mat]) +def test_lazy_vs_preload_all_formats(fname): + """Test lazy loading vs preload for both embedded and separate formats.""" + raw_lazy = read_raw_eeglab(fname, preload=False) + raw_preload = read_raw_eeglab(fname, preload=True) + + # Verify identical data + data_lazy = raw_lazy.get_data() + data_preload = raw_preload.get_data() + assert_array_almost_equal(data_lazy, data_preload, decimal=5) + + # Verify metadata is identical + assert raw_lazy.n_times == raw_preload.n_times + assert raw_lazy.info["sfreq"] == raw_preload.info["sfreq"] + assert len(raw_lazy.ch_names) == len(raw_preload.ch_names) + + # Verify annotations are present + assert len(raw_lazy.annotations) == len(raw_preload.annotations) diff --git a/mne/io/egi/__init__.py b/mne/io/egi/__init__.py index dccf8e6e6bf..72f638d49e9 100644 --- a/mne/io/egi/__init__.py +++ b/mne/io/egi/__init__.py @@ -1,6 +1,8 @@ """EGI module for conversion to FIF.""" -# Author: Denis A. Engemann +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .egi import read_raw_egi from .egimff import read_evokeds_mff diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py index 4e5321fb95f..73e93d39460 100644 --- a/mne/io/egi/egi.py +++ b/mne/io/egi/egi.py @@ -1,100 +1,110 @@ -# Authors: Denis A. Engemann -# Teon Brooks -# -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import datetime import time import numpy as np -from .egimff import _read_raw_egi_mff -from .events import _combine_triggers +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _create_chs, _read_segments_file +from ...annotations import Annotations +from ...utils import _check_fname, _validate_type, logger, verbose from ..base import BaseRaw -from ..utils import _read_segments_file, _create_chs -from ..meas_info import _empty_info -from ..constants import FIFF -from ...utils import verbose, logger, warn, _validate_type, _check_fname +from .egimff import _read_raw_egi_mff +from .events import _combine_triggers, _triage_include_exclude def _read_header(fid): """Read EGI binary header.""" - version = np.fromfile(fid, ' 6 & ~np.bitwise_and(version, 6): version = version.byteswap().astype(np.uint32) else: - raise ValueError('Watchout. This does not seem to be a simple ' - 'binary EGI file.') + raise ValueError("Watchout. This does not seem to be a simple binary EGI file.") def my_fread(*x, **y): - return np.fromfile(*x, **y)[0] + return int(np.fromfile(*x, **y)[0]) info = dict( version=version, - year=my_fread(fid, '>i2', 1), - month=my_fread(fid, '>i2', 1), - day=my_fread(fid, '>i2', 1), - hour=my_fread(fid, '>i2', 1), - minute=my_fread(fid, '>i2', 1), - second=my_fread(fid, '>i2', 1), - millisecond=my_fread(fid, '>i4', 1), - samp_rate=my_fread(fid, '>i2', 1), - n_channels=my_fread(fid, '>i2', 1), - gain=my_fread(fid, '>i2', 1), - bits=my_fread(fid, '>i2', 1), - value_range=my_fread(fid, '>i2', 1) + year=my_fread(fid, ">i2", 1), + month=my_fread(fid, ">i2", 1), + day=my_fread(fid, ">i2", 1), + hour=my_fread(fid, ">i2", 1), + minute=my_fread(fid, ">i2", 1), + second=my_fread(fid, ">i2", 1), + millisecond=my_fread(fid, ">i4", 1), + samp_rate=my_fread(fid, ">i2", 1), + n_channels=my_fread(fid, ">i2", 1), + gain=my_fread(fid, ">i2", 1), + bits=my_fread(fid, ">i2", 1), + value_range=my_fread(fid, ">i2", 1), ) unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0 precision = np.bitwise_and(version, 6) if precision == 0: - raise RuntimeError('Floating point precision is undefined.') + raise RuntimeError("Floating point precision is undefined.") if unsegmented: - info.update(dict(n_categories=0, - n_segments=1, - n_samples=np.fromfile(fid, '>i4', 1)[0], - n_events=np.fromfile(fid, '>i2', 1)[0], - event_codes=[], - category_names=[], - category_lengths=[], - pre_baseline=0)) - for event in range(info['n_events']): - event_codes = ''.join(np.fromfile(fid, 'S1', 4).astype('U1')) - info['event_codes'].append(event_codes) + info.update( + dict( + n_categories=0, + n_segments=1, + n_samples=int(np.fromfile(fid, ">i4", 1)[0]), + n_events=int(np.fromfile(fid, ">i2", 1)[0]), + event_codes=[], + category_names=[], + category_lengths=[], + pre_baseline=0, + ) + ) + for event in range(info["n_events"]): + event_codes = "".join(np.fromfile(fid, "S1", 4).astype("U1")) + info["event_codes"].append(event_codes) else: - raise NotImplementedError('Only continuous files are supported') - info['unsegmented'] = unsegmented - info['dtype'], info['orig_format'] = {2: ('>i2', 'short'), - 4: ('>f4', 'float'), - 6: ('>f8', 'double')}[precision] - info['dtype'] = np.dtype(info['dtype']) + raise NotImplementedError("Only continuous files are supported") + info["unsegmented"] = unsegmented + info["dtype"], info["orig_format"] = { + 2: (">i2", "short"), + 4: (">f4", "float"), + 6: (">f8", "double"), + }[precision] + info["dtype"] = np.dtype(info["dtype"]) return info def _read_events(fid, info): """Read events.""" - events = np.zeros([info['n_events'], - info['n_segments'] * info['n_samples']]) - fid.seek(36 + info['n_events'] * 4, 0) # skip header - for si in range(info['n_samples']): + events = np.zeros([info["n_events"], info["n_segments"] * info["n_samples"]]) + fid.seek(36 + info["n_events"] * 4, 0) # skip header + for si in range(info["n_samples"]): # skip data channels - fid.seek(info['n_channels'] * info['dtype'].itemsize, 1) + fid.seek(info["n_channels"] * info["dtype"].itemsize, 1) # read event channels - events[:, si] = np.fromfile(fid, info['dtype'], info['n_events']) + events[:, si] = np.fromfile(fid, info["dtype"], info["n_events"]) return events @verbose -def read_raw_egi(input_fname, eog=None, misc=None, - include=None, exclude=None, preload=False, - channel_naming='E%d', verbose=None): +def read_raw_egi( + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, +) -> "RawEGI": """Read EGI simple binary as raw object. - .. note:: This function attempts to create a synthetic trigger channel. - See the Notes section below. - Parameters ---------- input_fname : path-like @@ -107,14 +117,13 @@ def read_raw_egi(input_fname, eog=None, misc=None, Names of channels or list of indices that should be designated MISC channels. Default is None. include : None | list - The event channels to be ignored when creating the synthetic - trigger. Defaults to None. + The event channels to be included when creating the synthetic + trigger or annotations. Defaults to None. Note. Overrides ``exclude`` parameter. exclude : None | list The event channels to be ignored when creating the synthetic - trigger. Defaults to None. If None, channels that have more than - one event and the ``sync`` and ``TREV`` channels will be - ignored. + trigger or annotations. Defaults to None. If None, the ``sync`` and ``TREV`` + channels will be ignored. This is ignored when ``include`` is not None. %(preload)s .. versionadded:: 0.11 @@ -123,7 +132,15 @@ def read_raw_egi(input_fname, eog=None, misc=None, (resulting in channel names ``'E1'``, ``'E2'``, ``'E3'``...). The effective default prior to 0.14.0 was ``'EEG %%03d'``. - .. versionadded:: 0.14.0 + .. versionadded:: 0.14.0 + + events_as_annotations : bool + If True, annotations are created from experiment events. If False (default), + a synthetic trigger channel ``STI 014`` is created from experiment events. + See the Notes section for details. + The default will change from False to True in version 1.9. + + .. versionadded:: 1.8.0 %(verbose)s Returns @@ -138,144 +155,179 @@ def read_raw_egi(input_fname, eog=None, misc=None, Notes ----- - The trigger channel names are based on the arbitrary user dependent event - codes used. However this function will attempt to generate a **synthetic - trigger channel** named ``STI 014`` in accordance with the general - Neuromag / MNE naming pattern. - - The event_id assignment equals ``np.arange(n_events) + 1``. The resulting - ``event_id`` mapping is stored as attribute to the resulting raw object but - will be ignored when saving to a fiff. Note. The trigger channel is - artificially constructed based on timestamps received by the Netstation. - As a consequence, triggers have only short durations. - - This step will fail if events are not mutually exclusive. + When ``events_from_annotations=True``, event codes on stimulus channels like + ``DIN1`` are stored as annotations with the ``description`` set to the stimulus + channel name. + + When ``events_from_annotations=False`` and events are present on the included + stimulus channels, a new stim channel ``STI014`` will be synthesized from the + events. It will contain 1-sample pulses where the Netstation file had event + timestamps. A ``raw.event_id`` dictionary is added to the raw object that will have + arbitrary sequential integer IDs for the events. This will fail if any timestamps + are duplicated. The ``event_id`` will also not survive a save/load roundtrip. + + For these reasons, it is recommended to use ``events_as_annotations=True``. """ - _validate_type(input_fname, 'path-like', 'input_fname') + _validate_type(input_fname, "path-like", "input_fname") input_fname = str(input_fname) - if input_fname.rstrip('/\\').endswith('.mff'): # allows .mff or .mff/ - return _read_raw_egi_mff(input_fname, eog, misc, include, - exclude, preload, channel_naming, verbose) - return RawEGI(input_fname, eog, misc, include, exclude, preload, - channel_naming, verbose) + _validate_type(events_as_annotations, bool, "events_as_annotations") + + if input_fname.rstrip("/\\").endswith(".mff"): # allows .mff or .mff/ + return _read_raw_egi_mff( + input_fname, + eog, + misc, + include, + exclude, + preload, + channel_naming, + events_as_annotations=events_as_annotations, + verbose=verbose, + ) + return RawEGI( + input_fname, + eog, + misc, + include, + exclude, + preload, + channel_naming, + events_as_annotations=events_as_annotations, + verbose=verbose, + ) class RawEGI(BaseRaw): """Raw object from EGI simple binary file.""" + _extra_attributes = ("event_id",) + @verbose - def __init__(self, input_fname, eog=None, misc=None, - include=None, exclude=None, preload=False, - channel_naming='E%d', verbose=None): # noqa: D102 - input_fname = str( - _check_fname(input_fname, "read", True, "input_fname") - ) + def __init__( + self, + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, + ): + input_fname = str(_check_fname(input_fname, "read", True, "input_fname")) if eog is None: eog = [] if misc is None: misc = [] - with open(input_fname, 'rb') as fid: # 'rb' important for py3k - logger.info('Reading EGI header from %s...' % input_fname) + with open(input_fname, "rb") as fid: # 'rb' important for py3k + logger.info(f"Reading EGI header from {input_fname}...") egi_info = _read_header(fid) - logger.info(' Reading events ...') + logger.info(" Reading events ...") egi_events = _read_events(fid, egi_info) # update info + jump - if egi_info['value_range'] != 0 and egi_info['bits'] != 0: - cal = egi_info['value_range'] / 2. ** egi_info['bits'] + if egi_info["value_range"] != 0 and egi_info["bits"] != 0: + cal = egi_info["value_range"] / 2.0 ** egi_info["bits"] else: cal = 1e-6 - logger.info(' Assembling measurement info ...') - - event_codes = [] - if egi_info['n_events'] > 0: - event_codes = list(egi_info['event_codes']) - if include is None: - exclude_list = ['sync', 'TREV'] if exclude is None else exclude - exclude_inds = [i for i, k in enumerate(event_codes) if k in - exclude_list] - more_excludes = [] - if exclude is None: - for ii, event in enumerate(egi_events): - if event.sum() <= 1 and event_codes[ii]: - more_excludes.append(ii) - if len(exclude_inds) + len(more_excludes) == len(event_codes): - warn('Did not find any event code with more than one ' - 'event.', RuntimeWarning) - else: - exclude_inds.extend(more_excludes) - - exclude_inds.sort() - include_ = [i for i in np.arange(egi_info['n_events']) if - i not in exclude_inds] - include_names = [k for i, k in enumerate(event_codes) - if i in include_] - else: - include_ = [i for i, k in enumerate(event_codes) - if k in include] - include_names = include - - for kk, v in [('include', include_names), ('exclude', exclude)]: - if isinstance(v, list): - for k in v: - if k not in event_codes: - raise ValueError('Could find event named "%s"' % k) - elif v is not None: - raise ValueError('`%s` must be None or of type list' % kk) - - event_ids = np.arange(len(include_)) + 1 + logger.info(" Assembling measurement info ...") + + event_codes = egi_info["event_codes"] + include = _triage_include_exclude(include, exclude, egi_events, egi_info) + if egi_info["n_events"] > 0 and not events_as_annotations: + event_ids = np.arange(len(include)) + 1 logger.info(' Synthesizing trigger channel "STI 014" ...') - logger.info(' Excluding events {%s} ...' % - ", ".join([k for i, k in enumerate(event_codes) - if i not in include_])) - egi_info['new_trigger'] = _combine_triggers( - egi_events[include_], remapping=event_ids) - self.event_id = dict(zip([e for e in event_codes if e in - include_names], event_ids)) + egi_info["new_trigger"] = _combine_triggers( + egi_events[[e in include for e in event_codes]], remapping=event_ids + ) + self.event_id = dict( + zip([e for e in event_codes if e in include], event_ids) + ) else: - # No events self.event_id = None - egi_info['new_trigger'] = None - info = _empty_info(egi_info['samp_rate']) + egi_info["new_trigger"] = None + info = _empty_info(egi_info["samp_rate"]) my_time = datetime.datetime( - egi_info['year'], egi_info['month'], egi_info['day'], - egi_info['hour'], egi_info['minute'], egi_info['second']) + egi_info["year"], + egi_info["month"], + egi_info["day"], + egi_info["hour"], + egi_info["minute"], + egi_info["second"], + ) my_timestamp = time.mktime(my_time.timetuple()) - info['meas_date'] = (my_timestamp, 0) - ch_names = [channel_naming % (i + 1) for i in - range(egi_info['n_channels'])] - ch_names.extend(list(egi_info['event_codes'])) - if egi_info['new_trigger'] is not None: - ch_names.append('STI 014') # our new_trigger - nchan = len(ch_names) - cals = np.repeat(cal, nchan) + info["meas_date"] = (my_timestamp, 0) + ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])] + cals = np.repeat(cal, len(ch_names)) + ch_names.extend(list(event_codes)) + cals = np.concatenate([cals, np.ones(egi_info["n_events"])]) + if egi_info["new_trigger"] is not None: + ch_names.append("STI 014") # our new_trigger + cals = np.concatenate([cals, [1.0]]) ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) - sti_ch_idx = [i for i, name in enumerate(ch_names) if - name.startswith('STI') or name in event_codes] + sti_ch_idx = [ + i + for i, name in enumerate(ch_names) + if name.startswith("STI") or name in event_codes + ] for idx in sti_ch_idx: - chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, 'cal': 1., - 'kind': FIFF.FIFFV_STIM_CH, - 'coil_type': FIFF.FIFFV_COIL_NONE, - 'unit': FIFF.FIFF_UNIT_NONE, - 'loc': np.zeros(12)}) - info['chs'] = chs + chs[idx].update( + { + "unit_mul": FIFF.FIFF_UNITM_NONE, + "kind": FIFF.FIFFV_STIM_CH, + "coil_type": FIFF.FIFFV_COIL_NONE, + "unit": FIFF.FIFF_UNIT_NONE, + "loc": np.zeros(12), + } + ) + info["chs"] = chs info._unlocked = False info._update_redundant() - orig_format = egi_info["orig_format"] \ - if egi_info["orig_format"] != "float" else "single" - super(RawEGI, self).__init__( - info, preload, orig_format=orig_format, - filenames=[input_fname], last_samps=[egi_info['n_samples'] - 1], - raw_extras=[egi_info], verbose=verbose) + orig_format = ( + egi_info["orig_format"] if egi_info["orig_format"] != "float" else "single" + ) + super().__init__( + info, + preload, + orig_format=orig_format, + filenames=[input_fname], + last_samps=[egi_info["n_samples"] - 1], + raw_extras=[egi_info], + verbose=verbose, + ) + if events_as_annotations: + annot = dict(onset=list(), duration=list(), description=list()) + for code, row in zip(egi_info["event_codes"], egi_events): + if code not in include: + continue + onset = np.where(row)[0] / self.info["sfreq"] + annot["onset"].extend(onset) + annot["duration"].extend([0.0] * len(onset)) + annot["description"].extend([code] * len(onset)) + if annot: + self.set_annotations(Annotations(**annot)) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file.""" egi_info = self._raw_extras[fi] - dtype = egi_info['dtype'] - n_chan_read = egi_info['n_channels'] + egi_info['n_events'] - offset = 36 + egi_info['n_events'] * 4 - trigger_ch = egi_info['new_trigger'] - _read_segments_file(self, data, idx, fi, start, stop, cals, mult, - dtype=dtype, n_channels=n_chan_read, offset=offset, - trigger_ch=trigger_ch) + dtype = egi_info["dtype"] + n_chan_read = egi_info["n_channels"] + egi_info["n_events"] + offset = 36 + egi_info["n_events"] * 4 + trigger_ch = egi_info["new_trigger"] + _read_segments_file( + self, + data, + idx, + fi, + start, + stop, + cals, + mult, + dtype=dtype, + n_channels=n_chan_read, + offset=offset, + trigger_ch=trigger_ch, + ) diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py index aee9310e86b..870f58890a2 100644 --- a/mne/io/egi/egimff.py +++ b/mne/io/egi/egimff.py @@ -1,67 +1,80 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """EGI NetStation Load Function.""" -from collections import OrderedDict import datetime import math import os.path as op import re -from xml.dom.minidom import parse +from collections import OrderedDict from pathlib import Path import numpy as np -from .events import _read_events, _combine_triggers -from .general import (_get_signalfname, _get_ep_info, _extract, _get_blocks, - _get_gains, _block_r) -from ..base import BaseRaw -from ..constants import FIFF -from ..meas_info import _empty_info, create_info, _ensure_meas_date_none_or_dt -from ..proj import setup_proj -from ..utils import _create_chs, _mult_cal_one +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info, _ensure_meas_date_none_or_dt, create_info +from ..._fiff.proj import setup_proj +from ..._fiff.utils import _create_chs, _mult_cal_one from ...annotations import Annotations -from ...utils import verbose, logger, warn, _check_option, _check_fname +from ...channels.montage import make_dig_montage from ...evoked import EvokedArray +from ...utils import _check_fname, _check_option, _soft_import, logger, verbose, warn +from ..base import BaseRaw +from .events import _combine_triggers, _read_events, _triage_include_exclude +from .general import ( + _block_r, + _extract, + _get_blocks, + _get_ep_info, + _get_gains, + _get_signalfname, +) -REFERENCE_NAMES = ('VREF', 'Vertex Reference') +REFERENCE_NAMES = ("VREF", "Vertex Reference") def _read_mff_header(filepath): """Read mff header.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + all_files = _get_signalfname(filepath) - eeg_file = all_files['EEG']['signal'] - eeg_info_file = all_files['EEG']['info'] + eeg_file = all_files["EEG"]["signal"] + eeg_info_file = all_files["EEG"]["info"] - info_filepath = op.join(filepath, 'info.xml') # add with filepath - tags = ['mffVersion', 'recordTime'] + info_filepath = op.join(filepath, "info.xml") # add with filepath + tags = ["mffVersion", "recordTime"] version_and_date = _extract(tags, filepath=info_filepath) version = "" - if len(version_and_date['mffVersion']): - version = version_and_date['mffVersion'][0] + if len(version_and_date["mffVersion"]): + version = version_and_date["mffVersion"][0] fname = op.join(filepath, eeg_file) signal_blocks = _get_blocks(fname) epochs = _get_ep_info(filepath) - summaryinfo = dict(eeg_fname=eeg_file, - info_fname=eeg_info_file) + summaryinfo = dict(eeg_fname=eeg_file, info_fname=eeg_info_file) summaryinfo.update(signal_blocks) # sanity check and update relevant values - record_time = version_and_date['recordTime'][0] + record_time = version_and_date["recordTime"][0] # e.g., # 2018-07-30T10:47:01.021673-04:00 # 2017-09-20T09:55:44.072000000+01:00 g = re.match( - r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.(\d{6}(?:\d{3})?)[+-]\d{2}:\d{2}', # noqa: E501 - record_time) + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.(\d{6}(?:\d{3})?)[+-]\d{2}:\d{2}", # noqa: E501 + record_time, + ) if g is None: - raise RuntimeError('Could not parse recordTime %r' % (record_time,)) + raise RuntimeError(f"Could not parse recordTime {repr(record_time)}") frac = g.groups()[0] assert len(frac) in (6, 9) and all(f.isnumeric() for f in frac) # regex div = 1000 if len(frac) == 6 else 1000000 - for key in ('last_samps', 'first_samps'): + for key in ("last_samps", "first_samps"): # convert from times in µS to samples for ei, e in enumerate(epochs[key]): if e % div != 0: - raise RuntimeError('Could not parse epoch time %s' % (e,)) + raise RuntimeError(f"Could not parse epoch time {e}") epochs[key][ei] = e // div epochs[key] = np.array(epochs[key], np.uint64) # I guess they refer to times in milliseconds? @@ -72,131 +85,125 @@ def _read_mff_header(filepath): # by what we need to (e.g., a sample rate of 500 means we can multiply # by 1 and divide by 2 rather than multiplying by 500 and dividing by # 1000) - numerator = signal_blocks['sfreq'] + numerator = int(signal_blocks["sfreq"]) denominator = 1000 this_gcd = math.gcd(numerator, denominator) numerator = numerator // this_gcd denominator = denominator // this_gcd - with np.errstate(over='raise'): + with np.errstate(over="raise"): epochs[key] *= numerator epochs[key] //= denominator # Should be safe to cast to int now, which makes things later not # upbroadcast to float epochs[key] = epochs[key].astype(np.int64) - n_samps_block = signal_blocks['samples_block'].sum() - n_samps_epochs = (epochs['last_samps'] - epochs['first_samps']).sum() - bad = (n_samps_epochs != n_samps_block or - not (epochs['first_samps'] < epochs['last_samps']).all() or - not (epochs['first_samps'][1:] >= epochs['last_samps'][:-1]).all()) + n_samps_block = signal_blocks["samples_block"].sum() + n_samps_epochs = (epochs["last_samps"] - epochs["first_samps"]).sum() + bad = ( + n_samps_epochs != n_samps_block + or not (epochs["first_samps"] < epochs["last_samps"]).all() + or not (epochs["first_samps"][1:] >= epochs["last_samps"][:-1]).all() + ) if bad: - raise RuntimeError('EGI epoch first/last samps could not be parsed:\n' - '%s\n%s' % (list(epochs['first_samps']), - list(epochs['last_samps']))) + raise RuntimeError( + "EGI epoch first/last samps could not be parsed:\n" + f"{list(epochs['first_samps'])}\n{list(epochs['last_samps'])}" + ) summaryinfo.update(epochs) # index which samples in raw are actually readable from disk (i.e., not # in a skip) - disk_samps = np.full(epochs['last_samps'][-1], -1) + disk_samps = np.full(epochs["last_samps"][-1], -1) offset = 0 - for first, last in zip(epochs['first_samps'], epochs['last_samps']): + for first, last in zip(epochs["first_samps"], epochs["last_samps"]): n_this = last - first disk_samps[first:last] = np.arange(offset, offset + n_this) offset += n_this - summaryinfo['disk_samps'] = disk_samps + summaryinfo["disk_samps"] = disk_samps # Add the sensor info. - sensor_layout_file = op.join(filepath, 'sensorLayout.xml') + sensor_layout_file = op.join(filepath, "sensorLayout.xml") sensor_layout_obj = parse(sensor_layout_file) - summaryinfo['device'] = (sensor_layout_obj.getElementsByTagName('name') - [0].firstChild.data) - sensors = sensor_layout_obj.getElementsByTagName('sensor') + + summaryinfo["device"] = sensor_layout_obj.getElementsByTagName("name")[ + 0 + ].firstChild.data + sensors = sensor_layout_obj.getElementsByTagName("sensor") chan_type = list() chan_unit = list() n_chans = 0 numbers = list() # used for identification for sensor in sensors: - sensortype = int(sensor.getElementsByTagName('type')[0] - .firstChild.data) + sensortype = int(sensor.getElementsByTagName("type")[0].firstChild.data) if sensortype in [0, 1]: - sn = sensor.getElementsByTagName('number')[0].firstChild.data + sn = sensor.getElementsByTagName("number")[0].firstChild.data sn = sn.encode() numbers.append(sn) - chan_type.append('eeg') - chan_unit.append('uV') + chan_type.append("eeg") + chan_unit.append("uV") n_chans = n_chans + 1 - if n_chans != summaryinfo['n_channels']: - raise RuntimeError('Number of defined channels (%d) did not match the ' - 'expected channels (%d)' - % (n_chans, summaryinfo['n_channels'])) + if n_chans != summaryinfo["n_channels"]: + raise RuntimeError( + f"Number of defined channels ({n_chans}) did not match the " + f"expected channels ({summaryinfo['n_channels']})." + ) # Check presence of PNS data pns_names = [] - if 'PNS' in all_files: - pns_fpath = op.join(filepath, all_files['PNS']['signal']) + if "PNS" in all_files: + pns_fpath = op.join(filepath, all_files["PNS"]["signal"]) pns_blocks = _get_blocks(pns_fpath) - pns_samples = pns_blocks['samples_block'] - signal_samples = signal_blocks['samples_block'] - same_blocks = (np.array_equal(pns_samples[:-1], - signal_samples[:-1]) and - pns_samples[-1] in (signal_samples[-1] - np.arange(2))) + pns_samples = pns_blocks["samples_block"] + signal_samples = signal_blocks["samples_block"] + same_blocks = np.array_equal( + pns_samples[:-1], signal_samples[:-1] + ) and pns_samples[-1] in (signal_samples[-1] - np.arange(2)) if not same_blocks: - raise RuntimeError('PNS and signals samples did not match:\n' - '%s\nvs\n%s' - % (list(pns_samples), list(signal_samples))) + raise RuntimeError( + "PNS and signals samples did not match:\n" + f"{list(pns_samples)}\nvs\n{list(signal_samples)}" + ) - pns_file = op.join(filepath, 'pnsSet.xml') + pns_file = op.join(filepath, "pnsSet.xml") pns_obj = parse(pns_file) - sensors = pns_obj.getElementsByTagName('sensor') + sensors = pns_obj.getElementsByTagName("sensor") pns_types = [] pns_units = [] for sensor in sensors: # sensor number: # sensor.getElementsByTagName('number')[0].firstChild.data - name = sensor.getElementsByTagName('name')[0].firstChild.data - unit_elem = sensor.getElementsByTagName('unit')[0].firstChild - unit = '' + name = sensor.getElementsByTagName("name")[0].firstChild.data + unit_elem = sensor.getElementsByTagName("unit")[0].firstChild + unit = "" if unit_elem is not None: unit = unit_elem.data - if name == 'ECG': - ch_type = 'ecg' - elif 'EMG' in name: - ch_type = 'emg' + if name == "ECG": + ch_type = "ecg" + elif "EMG" in name: + ch_type = "emg" else: - ch_type = 'bio' + ch_type = "bio" pns_types.append(ch_type) pns_units.append(unit) pns_names.append(name) - summaryinfo.update(pns_types=pns_types, pns_units=pns_units, - pns_fname=all_files['PNS']['signal'], - pns_sample_blocks=pns_blocks) - summaryinfo.update(pns_names=pns_names, version=version, - date=version_and_date['recordTime'][0], - chan_type=chan_type, chan_unit=chan_unit, - numbers=numbers) + summaryinfo.update( + pns_types=pns_types, + pns_units=pns_units, + pns_fname=all_files["PNS"]["signal"], + pns_sample_blocks=pns_blocks, + ) + summaryinfo.update( + pns_names=pns_names, + version=version, + date=version_and_date["recordTime"][0], + chan_type=chan_type, + chan_unit=chan_unit, + numbers=numbers, + ) return summaryinfo -class _FixedOffset(datetime.tzinfo): - """Fixed offset in minutes east from UTC. - - Adapted from the official Python documentation. - """ - - def __init__(self, offset): - self._offset = datetime.timedelta(minutes=offset) - - def utcoffset(self, dt): - return self._offset - - def tzname(self, dt): - return 'MFF' - - def dst(self, dt): - return datetime.timedelta(0) - - def _read_header(input_fname): """Obtain the headers from the file package mff. @@ -214,7 +221,7 @@ def _read_header(input_fname): mff_hdr = _read_mff_header(input_fname) with open(input_fname + "/signal1.bin", "rb") as fid: version = np.fromfile(fid, np.int32, 1)[0] - ''' + """ the datetime.strptime .f directive (milleseconds) will only accept up to 6 digits. if there are more than six millesecond digits in the provided timestamp string @@ -223,72 +230,85 @@ def _read_header(input_fname): elements of the timestamp string to truncate the milleseconds to 6 digits and extract the timezone, and then piece these together and assign back to mff_hdr['date'] - ''' - if len(mff_hdr['date']) > 32: - dt, tz = [mff_hdr['date'][:26], mff_hdr['date'][-6:]] - mff_hdr['date'] = dt + tz + """ + if len(mff_hdr["date"]) > 32: + dt, tz = [mff_hdr["date"][:26], mff_hdr["date"][-6:]] + mff_hdr["date"] = dt + tz - time_n = (datetime.datetime.strptime( - mff_hdr['date'], '%Y-%m-%dT%H:%M:%S.%f%z')) + time_n = datetime.datetime.strptime(mff_hdr["date"], "%Y-%m-%dT%H:%M:%S.%f%z") info = dict( version=version, meas_dt_local=time_n, - utc_offset=time_n.strftime('%z'), + utc_offset=time_n.strftime("%z"), gain=0, bits=0, - value_range=0) - info.update(n_categories=0, n_segments=1, n_events=0, event_codes=[], - category_names=[], category_lengths=[], pre_baseline=0) + value_range=0, + ) + info.update( + n_categories=0, + n_segments=1, + n_events=0, + event_codes=[], + category_names=[], + category_lengths=[], + pre_baseline=0, + ) info.update(mff_hdr) return info def _get_eeg_calibration_info(filepath, egi_info): """Calculate calibration info for EEG channels.""" - gains = _get_gains(op.join(filepath, egi_info['info_fname'])) - if egi_info['value_range'] != 0 and egi_info['bits'] != 0: - cals = [egi_info['value_range'] / 2 ** egi_info['bits']] * \ - len(egi_info['chan_type']) + gains = _get_gains(op.join(filepath, egi_info["info_fname"])) + if egi_info["value_range"] != 0 and egi_info["bits"] != 0: + cals = [egi_info["value_range"] / 2 ** egi_info["bits"]] * len( + egi_info["chan_type"] + ) else: - cal_scales = {'uV': 1e-6, 'V': 1} - cals = [cal_scales[t] for t in egi_info['chan_unit']] - if 'gcal' in gains: - cals *= gains['gcal'] + cal_scales = {"uV": 1e-6, "V": 1} + cals = [cal_scales[t] for t in egi_info["chan_unit"]] + if "gcal" in gains: + cals *= gains["gcal"] return cals def _read_locs(filepath, egi_info, channel_naming): """Read channel locations.""" - from ...channels.montage import make_dig_montage - fname = op.join(filepath, 'coordinates.xml') + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + fname = op.join(filepath, "coordinates.xml") if not op.exists(fname): - logger.warn( - 'File coordinates.xml not found, not setting channel locations') - ch_names = [channel_naming % (i + 1) for i in - range(egi_info['n_channels'])] + warn("File coordinates.xml not found, not setting channel locations") + ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])] return ch_names, None dig_ident_map = { - 'Left periauricular point': 'lpa', - 'Right periauricular point': 'rpa', - 'Nasion': 'nasion', + "Left periauricular point": "lpa", + "Right periauricular point": "rpa", + "Nasion": "nasion", } - numbers = np.array(egi_info['numbers']) + numbers = np.array(egi_info["numbers"]) coordinates = parse(fname) - sensors = coordinates.getElementsByTagName('sensor') + sensors = coordinates.getElementsByTagName("sensor") ch_pos = OrderedDict() hsp = list() nlr = dict() ch_names = list() for sensor in sensors: - name_element = sensor.getElementsByTagName('name')[0].firstChild - num_element = sensor.getElementsByTagName('number')[0].firstChild - name = (channel_naming % int(num_element.data) if name_element is None - else name_element.data) + name_element = sensor.getElementsByTagName("name")[0].firstChild + num_element = sensor.getElementsByTagName("number")[0].firstChild + name = ( + channel_naming % int(num_element.data) + if name_element is None + else name_element.data + ) nr = num_element.data.encode() - coords = [float(sensor.getElementsByTagName(coord)[0].firstChild.data) - for coord in 'xyz'] + coords = [ + float(sensor.getElementsByTagName(coord)[0].firstChild.data) + for coord in "xyz" + ] loc = np.array(coords) / 100 # cm -> m # create dig entry if name in dig_ident_map: @@ -309,97 +329,68 @@ def _read_locs(filepath, egi_info, channel_naming): def _add_pns_channel_info(chs, egi_info, ch_names): """Add info for PNS channels to channel info dict.""" - for i_ch, ch_name in enumerate(egi_info['pns_names']): + for i_ch, ch_name in enumerate(egi_info["pns_names"]): idx = ch_names.index(ch_name) - ch_type = egi_info['pns_types'][i_ch] - type_to_kind_map = {'ecg': FIFF.FIFFV_ECG_CH, - 'emg': FIFF.FIFFV_EMG_CH - } + ch_type = egi_info["pns_types"][i_ch] + type_to_kind_map = {"ecg": FIFF.FIFFV_ECG_CH, "emg": FIFF.FIFFV_EMG_CH} ch_kind = type_to_kind_map.get(ch_type, FIFF.FIFFV_BIO_CH) ch_unit = FIFF.FIFF_UNIT_V ch_cal = 1e-6 - if egi_info['pns_units'][i_ch] != 'uV': + if egi_info["pns_units"][i_ch] != "uV": ch_unit = FIFF.FIFF_UNIT_NONE ch_cal = 1.0 chs[idx].update( - cal=ch_cal, kind=ch_kind, coil_type=FIFF.FIFFV_COIL_NONE, - unit=ch_unit) + cal=ch_cal, kind=ch_kind, coil_type=FIFF.FIFFV_COIL_NONE, unit=ch_unit + ) return chs @verbose -def _read_raw_egi_mff(input_fname, eog=None, misc=None, - include=None, exclude=None, preload=False, - channel_naming='E%d', verbose=None): - """Read EGI mff binary as raw object. - - .. note:: This function attempts to create a synthetic trigger channel. - See notes below. - - Parameters - ---------- - input_fname : path-like - Path to the raw file. - eog : list or tuple - Names of channels or list of indices that should be designated - EOG channels. Default is None. - misc : list or tuple - Names of channels or list of indices that should be designated - MISC channels. Default is None. - include : None | list - The event channels to be ignored when creating the synthetic - trigger. Defaults to None. - Note. Overrides `exclude` parameter. - exclude : None | list - The event channels to be ignored when creating the synthetic - trigger. Defaults to None. If None, channels that have more than - one event and the ``sync`` and ``TREV`` channels will be - ignored. - %(preload)s - channel_naming : str - Channel naming convention for the data channels. Defaults to 'E%%d' - (resulting in channel names 'E1', 'E2', 'E3'...). The effective default - prior to 0.14.0 was 'EEG %%03d'. - %(verbose)s - - Returns - ------- - raw : instance of RawMff - A Raw object containing EGI mff data. - See :class:`mne.io.Raw` for documentation of attributes and methods. - - Notes - ----- - The trigger channel names are based on the arbitrary user dependent event - codes used. However this function will attempt to generate a synthetic - trigger channel named ``STI 014`` in accordance with the general - Neuromag / MNE naming pattern. - - The event_id assignment equals ``np.arange(n_events) + 1``. The resulting - ``event_id`` mapping is stored as attribute to the resulting raw object but - will be ignored when saving to a fiff. Note. The trigger channel is - artificially constructed based on timestamps received by the Netstation. - As a consequence, triggers have only short durations. - - This step will fail if events are not mutually exclusive. - - See Also - -------- - mne.io.Raw : Documentation of attributes and methods of RawMff. - - .. versionadded:: 0.15.0 - """ - return RawMff(input_fname, eog, misc, include, exclude, - preload, channel_naming, verbose) +def _read_raw_egi_mff( + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, +): + """Read EGI mff binary as raw object.""" + return RawMff( + input_fname, + eog, + misc, + include, + exclude, + preload, + channel_naming, + events_as_annotations=events_as_annotations, + verbose=verbose, + ) class RawMff(BaseRaw): """RawMff class.""" + _extra_attributes = ("event_id",) + @verbose - def __init__(self, input_fname, eog=None, misc=None, - include=None, exclude=None, preload=False, - channel_naming='E%d', verbose=None): + def __init__( + self, + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, + ): """Init the RawMff class.""" input_fname = str( _check_fname( @@ -410,207 +401,198 @@ def __init__(self, input_fname, eog=None, misc=None, need_dir=True, ) ) - logger.info('Reading EGI MFF Header from %s...' % input_fname) + logger.info(f"Reading EGI MFF Header from {input_fname}...") egi_info = _read_header(input_fname) if eog is None: eog = [] if misc is None: - misc = np.where(np.array( - egi_info['chan_type']) != 'eeg')[0].tolist() + misc = np.where(np.array(egi_info["chan_type"]) != "eeg")[0].tolist() - logger.info(' Reading events ...') - egi_events, egi_info = _read_events(input_fname, egi_info) + logger.info(" Reading events ...") + egi_events, egi_info, mff_events = _read_events(input_fname, egi_info) cals = _get_eeg_calibration_info(input_fname, egi_info) - logger.info(' Assembling measurement info ...') - if egi_info['n_events'] > 0: - event_codes = list(egi_info['event_codes']) - if include is None: - exclude_list = ['sync', 'TREV'] if exclude is None else exclude - exclude_inds = [i for i, k in enumerate(event_codes) if k in - exclude_list] - more_excludes = [] - if exclude is None: - for ii, event in enumerate(egi_events): - if event.sum() <= 1 and event_codes[ii]: - more_excludes.append(ii) - if len(exclude_inds) + len(more_excludes) == len(event_codes): - warn('Did not find any event code with more than one ' - 'event.', RuntimeWarning) - else: - exclude_inds.extend(more_excludes) - - exclude_inds.sort() - include_ = [i for i in np.arange(egi_info['n_events']) if - i not in exclude_inds] - include_names = [k for i, k in enumerate(event_codes) - if i in include_] - else: - include_ = [i for i, k in enumerate(event_codes) - if k in include] - include_names = include - - for kk, v in [('include', include_names), ('exclude', exclude)]: - if isinstance(v, list): - for k in v: - if k not in event_codes: - raise ValueError( - f'Could not find event named {repr(k)}') - elif v is not None: - raise ValueError('`%s` must be None or of type list' % kk) + logger.info(" Assembling measurement info ...") + event_codes = egi_info["event_codes"] + include = _triage_include_exclude(include, exclude, egi_events, egi_info) + if egi_info["n_events"] > 0 and not events_as_annotations: logger.info(' Synthesizing trigger channel "STI 014" ...') - logger.info(' Excluding events {%s} ...' % - ", ".join([k for i, k in enumerate(event_codes) - if i not in include_])) - events_ids = np.arange(len(include_)) + 1 - egi_info['new_trigger'] = _combine_triggers( - egi_events[include_], remapping=events_ids) - self.event_id = dict(zip([e for e in event_codes if e in - include_names], events_ids)) - if egi_info['new_trigger'] is not None: - egi_events = np.vstack([egi_events, egi_info['new_trigger']]) - assert egi_events.shape[1] == egi_info['last_samps'][-1] + if all(ch.startswith("D") for ch in include): + # support the DIN format DIN1, DIN2, ..., DIN9, DI10, DI11, ... DI99, + # D100, D101, ..., D255 that we get when sending 0-255 triggers on a + # parallel port. + events_ids = list() + for ch in include: + while not ch[0].isnumeric(): + ch = ch[1:] + events_ids.append(int(ch)) + else: + events_ids = np.arange(len(include)) + 1 + egi_info["new_trigger"] = _combine_triggers( + egi_events[[c in include for c in event_codes]], remapping=events_ids + ) + self.event_id = dict( + zip([e for e in event_codes if e in include], events_ids) + ) + if egi_info["new_trigger"] is not None: + egi_events = np.vstack([egi_events, egi_info["new_trigger"]]) else: - # No events self.event_id = None - egi_info['new_trigger'] = None - event_codes = [] + egi_info["new_trigger"] = None + assert egi_events.shape[1] == egi_info["last_samps"][-1] - meas_dt_utc = (egi_info['meas_dt_local'] - .astimezone(datetime.timezone.utc)) - info = _empty_info(egi_info['sfreq']) - info['meas_date'] = _ensure_meas_date_none_or_dt(meas_dt_utc) - info['utc_offset'] = egi_info['utc_offset'] - info['device_info'] = dict(type=egi_info['device']) + meas_dt_utc = egi_info["meas_dt_local"].astimezone(datetime.timezone.utc) + info = _empty_info(egi_info["sfreq"]) + info["meas_date"] = _ensure_meas_date_none_or_dt(meas_dt_utc) + info["utc_offset"] = egi_info["utc_offset"] + info["device_info"] = dict(type=egi_info["device"]) # read in the montage, if it exists ch_names, mon = _read_locs(input_fname, egi_info, channel_naming) # Second: Stim - ch_names.extend(list(egi_info['event_codes'])) - if egi_info['new_trigger'] is not None: - ch_names.append('STI 014') # channel for combined events - cals = np.concatenate( - [cals, np.repeat(1, len(event_codes) + 1 + len(misc) + len(eog))]) + ch_names.extend(list(egi_info["event_codes"])) + n_extra = len(event_codes) + len(misc) + len(eog) + len(egi_info["pns_names"]) + if egi_info["new_trigger"] is not None: + ch_names.append("STI 014") # channel for combined events + n_extra += 1 # Third: PNS - ch_names.extend(egi_info['pns_names']) - cals = np.concatenate( - [cals, np.repeat(1, len(egi_info['pns_names']))]) + ch_names.extend(egi_info["pns_names"]) + + cals = np.concatenate([cals, np.ones(n_extra)]) + assert len(cals) == len(ch_names), (len(cals), len(ch_names)) # Actually create channels as EEG, then update stim and PNS ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) - sti_ch_idx = [i for i, name in enumerate(ch_names) if - name.startswith('STI') or name in event_codes] + sti_ch_idx = [ + i + for i, name in enumerate(ch_names) + if name.startswith("STI") or name in event_codes + ] for idx in sti_ch_idx: - chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, - 'cal': cals[idx], - 'kind': FIFF.FIFFV_STIM_CH, - 'coil_type': FIFF.FIFFV_COIL_NONE, - 'unit': FIFF.FIFF_UNIT_NONE}) + chs[idx].update( + { + "unit_mul": FIFF.FIFF_UNITM_NONE, + "cal": cals[idx], + "kind": FIFF.FIFFV_STIM_CH, + "coil_type": FIFF.FIFFV_COIL_NONE, + "unit": FIFF.FIFF_UNIT_NONE, + } + ) chs = _add_pns_channel_info(chs, egi_info, ch_names) - info['chs'] = chs + info["chs"] = chs info._unlocked = False info._update_redundant() if mon is not None: - info.set_montage(mon, on_missing='ignore') - - ref_idx = np.flatnonzero(np.in1d(mon.ch_names, REFERENCE_NAMES)) - if len(ref_idx): - ref_coords = info['chs'][int(ref_idx)]['loc'][:3] - for chan in info['chs']: - is_eeg = chan['kind'] == FIFF.FIFFV_EEG_CH - is_not_ref = chan['ch_name'] not in REFERENCE_NAMES - if is_eeg and is_not_ref: - chan['loc'][3:6] = ref_coords - - # Cz ref was applied during acquisition, so mark as already set. - with info._unlock(): - info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON - file_bin = op.join(input_fname, egi_info['eeg_fname']) - egi_info['egi_events'] = egi_events + info.set_montage(mon, on_missing="ignore") + ref_idx = np.flatnonzero(np.isin(mon.ch_names, REFERENCE_NAMES)) + if len(ref_idx): + ref_idx = ref_idx.item() + ref_coords = info["chs"][int(ref_idx)]["loc"][:3] + for chan in info["chs"]: + if chan["kind"] == FIFF.FIFFV_EEG_CH: + chan["loc"][3:6] = ref_coords + + file_bin = op.join(input_fname, egi_info["eeg_fname"]) + egi_info["egi_events"] = egi_events # Check how many channels to read are from EEG - keys = ('eeg', 'sti', 'pns') + keys = ("eeg", "sti", "pns") idx = dict() - idx['eeg'] = np.where( - [ch['kind'] == FIFF.FIFFV_EEG_CH for ch in chs])[0] - idx['sti'] = np.where( - [ch['kind'] == FIFF.FIFFV_STIM_CH for ch in chs])[0] - idx['pns'] = np.where( - [ch['kind'] in (FIFF.FIFFV_ECG_CH, FIFF.FIFFV_EMG_CH, - FIFF.FIFFV_BIO_CH) for ch in chs])[0] + idx["eeg"] = np.where([ch["kind"] == FIFF.FIFFV_EEG_CH for ch in chs])[0] + idx["sti"] = np.where([ch["kind"] == FIFF.FIFFV_STIM_CH for ch in chs])[0] + idx["pns"] = np.where( + [ + ch["kind"] in (FIFF.FIFFV_ECG_CH, FIFF.FIFFV_EMG_CH, FIFF.FIFFV_BIO_CH) + for ch in chs + ] + )[0] # By construction this should always be true, but check anyway if not np.array_equal( - np.concatenate([idx[key] for key in keys]), - np.arange(len(chs))): - raise ValueError('Currently interlacing EEG and PNS channels' - 'is not supported') - egi_info['kind_bounds'] = [0] + np.concatenate([idx[key] for key in keys]), np.arange(len(chs)) + ): + raise ValueError( + "Currently interlacing EEG and PNS channels is not supported" + ) + egi_info["kind_bounds"] = [0] for key in keys: - egi_info['kind_bounds'].append(len(idx[key])) - egi_info['kind_bounds'] = np.cumsum(egi_info['kind_bounds']) - assert egi_info['kind_bounds'][0] == 0 - assert egi_info['kind_bounds'][-1] == info['nchan'] + egi_info["kind_bounds"].append(len(idx[key])) + egi_info["kind_bounds"] = np.cumsum(egi_info["kind_bounds"]) + assert egi_info["kind_bounds"][0] == 0 + assert egi_info["kind_bounds"][-1] == info["nchan"] first_samps = [0] - last_samps = [egi_info['last_samps'][-1] - 1] + last_samps = [egi_info["last_samps"][-1] - 1] annot = dict(onset=list(), duration=list(), description=list()) - if len(idx['pns']): + + if len(idx["pns"]): # PNS Data is present and should be read: - egi_info['pns_filepath'] = op.join( - input_fname, egi_info['pns_fname']) + egi_info["pns_filepath"] = op.join(input_fname, egi_info["pns_fname"]) # Check for PNS bug immediately - pns_samples = np.sum( - egi_info['pns_sample_blocks']['samples_block']) - eeg_samples = np.sum(egi_info['samples_block']) + pns_samples = np.sum(egi_info["pns_sample_blocks"]["samples_block"]) + eeg_samples = np.sum(egi_info["samples_block"]) if pns_samples == eeg_samples - 1: - warn('This file has the EGI PSG sample bug') - annot['onset'].append(last_samps[-1] / egi_info['sfreq']) - annot['duration'].append(1 / egi_info['sfreq']) - annot['description'].append('BAD_EGI_PSG') + warn("This file has the EGI PSG sample bug") + annot["onset"].append(last_samps[-1] / egi_info["sfreq"]) + annot["duration"].append(1 / egi_info["sfreq"]) + annot["description"].append("BAD_EGI_PSG") elif pns_samples != eeg_samples: raise RuntimeError( - 'PNS samples (%d) did not match EEG samples (%d)' - % (pns_samples, eeg_samples)) - - self._filenames = [file_bin] - self._raw_extras = [egi_info] - - super(RawMff, self).__init__( - info, preload=preload, orig_format="single", filenames=[file_bin], - first_samps=first_samps, last_samps=last_samps, - raw_extras=[egi_info], verbose=verbose) + f"PNS samples ({pns_samples}) did not match EEG samples " + f"({eeg_samples})." + ) + + super().__init__( + info, + preload=preload, + orig_format="single", + filenames=[file_bin], + first_samps=first_samps, + last_samps=last_samps, + raw_extras=[egi_info], + verbose=verbose, + ) # Annotate acquisition skips - for first, prev_last in zip(egi_info['first_samps'][1:], - egi_info['last_samps'][:-1]): + for first, prev_last in zip( + egi_info["first_samps"][1:], egi_info["last_samps"][:-1] + ): gap = first - prev_last assert gap >= 0 if gap: - annot['onset'].append((prev_last - 0.5) / egi_info['sfreq']) - annot['duration'].append(gap / egi_info['sfreq']) - annot['description'].append('BAD_ACQ_SKIP') - - if len(annot['onset']): - self.set_annotations(Annotations(**annot, orig_time=None)) + annot["onset"].append((prev_last - 0.5) / egi_info["sfreq"]) + annot["duration"].append(gap / egi_info["sfreq"]) + annot["description"].append("BAD_ACQ_SKIP") + + # create events from annotations + if events_as_annotations: + for code, samples in mff_events.items(): + if code not in include: + continue + annot["onset"].extend(np.array(samples) / egi_info["sfreq"]) + annot["duration"].extend([0.0] * len(samples)) + annot["description"].extend([code] * len(samples)) + + if len(annot["onset"]): + self.set_annotations(Annotations(**annot)) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of data.""" - logger.debug(f'Reading MFF {start:6d} ... {stop:6d} ...') - dtype = ' -1)[0] # short circuit in case we don't need any samples if not len(disk_use_idx): @@ -644,13 +626,14 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Get starting/stopping block/samples block_samples_offset = np.cumsum(samples_block) offset_blocks = np.sum(block_samples_offset <= start) - offset_samples = start - (block_samples_offset[offset_blocks - 1] - if offset_blocks > 0 else 0) + offset_samples = start - ( + block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0 + ) # TODO: Refactor this reading with the PNS reading in a single function # (DRY) samples_to_read = stop - start - with open(self._filenames[fi], 'rb', buffering=0) as fid: + with open(self.filenames[fi], "rb", buffering=0) as fid: # Go to starting block current_block = 0 current_block_info = None @@ -659,26 +642,25 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): this_block_info = _block_r(fid) if this_block_info is not None: current_block_info = this_block_info - fid.seek(current_block_info['block_size'], 1) + fid.seek(current_block_info["block_size"], 1) current_block += 1 # Start reading samples while samples_to_read > 0: - logger.debug(f' Reading from block {current_block}') + logger.debug(f" Reading from block {current_block}") this_block_info = _block_r(fid) current_block += 1 if this_block_info is not None: current_block_info = this_block_info - to_read = (current_block_info['nsamples'] * - current_block_info['nc']) + to_read = current_block_info["nsamples"] * current_block_info["nc"] block_data = np.fromfile(fid, dtype, to_read) - block_data = block_data.reshape(n_channels, -1, order='C') + block_data = block_data.reshape(n_channels, -1, order="C") # Compute indexes samples_read = block_data.shape[1] - logger.debug(f' Read {samples_read} samples') - logger.debug(f' Offset {offset_samples} samples') + logger.debug(f" Read {samples_read} samples") + logger.debug(f" Offset {offset_samples} samples") if offset_samples > 0: # First block read, skip to the offset: block_data = block_data[:, offset_samples:] @@ -688,7 +670,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Last block to read, skip the last samples block_data = block_data[:, :samples_to_read] samples_read = samples_to_read - logger.debug(f' Keep {samples_read} samples') + logger.debug(f" Keep {samples_read} samples") s_start = current_data_sample s_end = s_start + samples_read @@ -699,19 +681,20 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): if len(pns_one) > 0: # PNS Data is present and should be read: - pns_filepath = egi_info['pns_filepath'] - pns_info = egi_info['pns_sample_blocks'] - n_channels = pns_info['n_channels'] - samples_block = pns_info['samples_block'] + pns_filepath = egi_info["pns_filepath"] + pns_info = egi_info["pns_sample_blocks"] + n_channels = pns_info["n_channels"] + samples_block = pns_info["samples_block"] # Get starting/stopping block/samples block_samples_offset = np.cumsum(samples_block) offset_blocks = np.sum(block_samples_offset < start) - offset_samples = start - (block_samples_offset[offset_blocks - 1] - if offset_blocks > 0 else 0) + offset_samples = start - ( + block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0 + ) samples_to_read = stop - start - with open(pns_filepath, 'rb', buffering=0) as fid: + with open(pns_filepath, "rb", buffering=0) as fid: # Check file size fid.seek(0, 2) file_size = fid.tell() @@ -724,7 +707,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): this_block_info = _block_r(fid) if this_block_info is not None: current_block_info = this_block_info - fid.seek(current_block_info['block_size'], 1) + fid.seek(current_block_info["block_size"], 1) current_block += 1 # Start reading samples @@ -739,10 +722,9 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): if this_block_info is not None: current_block_info = this_block_info - to_read = (current_block_info['nsamples'] * - current_block_info['nc']) + to_read = current_block_info["nsamples"] * current_block_info["nc"] block_data = np.fromfile(fid, dtype, to_read) - block_data = block_data.reshape(n_channels, -1, order='C') + block_data = block_data.reshape(n_channels, -1, order="C") # Compute indexes samples_read = block_data.shape[1] @@ -760,8 +742,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): s_start = current_data_sample s_end = s_start + samples_read - one[pns_one, disk_use_idx[s_start:s_end]] = \ - block_data[pns_in] + one[pns_one, disk_use_idx[s_start:s_end]] = block_data[pns_in] samples_to_read = samples_to_read - samples_read current_data_sample = current_data_sample + samples_read @@ -770,8 +751,9 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): @verbose -def read_evokeds_mff(fname, condition=None, channel_naming='E%d', - baseline=None, verbose=None): +def read_evokeds_mff( + fname, condition=None, channel_naming="E%d", baseline=None, verbose=None +): """Read averaged MFF file as EvokedArray or list of EvokedArray. Parameters @@ -832,13 +814,17 @@ def read_evokeds_mff(fname, condition=None, channel_naming='E%d', flavor = mff.mff_flavor except AttributeError: # < 6.3 flavor = mff.flavor - if flavor not in ('averaged', 'segmented'): # old, new names - raise ValueError(f'{fname} is a {flavor} MFF file. ' - 'fname must be the path to an averaged MFF file.') + if flavor not in ("averaged", "segmented"): # old, new names + raise ValueError( + f"{fname} is a {flavor} MFF file. " + "fname must be the path to an averaged MFF file." + ) # Check for categories.xml file - if 'categories.xml' not in mff.directory.listdir(): - raise ValueError('categories.xml not found in MFF directory. ' - f'{fname} may not be an averaged MFF file.') + if "categories.xml" not in mff.directory.listdir(): + raise ValueError( + "categories.xml not found in MFF directory. " + f"{fname} may not be an averaged MFF file." + ) return_list = True if condition is None: categories = mff.categories.categories @@ -846,135 +832,143 @@ def read_evokeds_mff(fname, condition=None, channel_naming='E%d', elif not isinstance(condition, list): condition = [condition] return_list = False - logger.info(f'Reading {len(condition)} evoked datasets from {fname} ...') - output = [_read_evoked_mff(fname, c, channel_naming=channel_naming, - verbose=verbose).apply_baseline(baseline) - for c in condition] + logger.info(f"Reading {len(condition)} evoked datasets from {fname} ...") + output = [ + _read_evoked_mff( + fname, c, channel_naming=channel_naming, verbose=verbose + ).apply_baseline(baseline) + for c in condition + ] return output if return_list else output[0] -def _read_evoked_mff(fname, condition, channel_naming='E%d', verbose=None): +def _read_evoked_mff(fname, condition, channel_naming="E%d", verbose=None): """Read evoked data from MFF file.""" import mffpy + egi_info = _read_header(fname) mff = mffpy.Reader(fname) categories = mff.categories.categories if isinstance(condition, str): # Condition is interpreted as category name - category = _check_option('condition', condition, categories, - extra='provided as category name') + category = _check_option( + "condition", condition, categories, extra="provided as category name" + ) epoch = mff.epochs[category] elif isinstance(condition, int): # Condition is interpreted as epoch index try: epoch = mff.epochs[condition] except IndexError: - raise ValueError(f'"condition" parameter ({condition}), provided ' - 'as epoch index, is out of range for available ' - f'epochs ({len(mff.epochs)}).') + raise ValueError( + f'"condition" parameter ({condition}), provided ' + "as epoch index, is out of range for available " + f"epochs ({len(mff.epochs)})." + ) category = epoch.name else: raise TypeError('"condition" parameter must be either int or str.') # Read in signals from the target epoch data = mff.get_physical_samples_from_epoch(epoch) - eeg_data, t0 = data['EEG'] - if 'PNSData' in data: - pns_data, t0 = data['PNSData'] + eeg_data, t0 = data["EEG"] + if "PNSData" in data: + pns_data, t0 = data["PNSData"] all_data = np.vstack((eeg_data, pns_data)) - ch_types = egi_info['chan_type'] + egi_info['pns_types'] + ch_types = egi_info["chan_type"] + egi_info["pns_types"] else: all_data = eeg_data - ch_types = egi_info['chan_type'] + ch_types = egi_info["chan_type"] all_data *= 1e-6 # convert to volts # Load metadata into info object # Exclude info['meas_date'] because record time info in # averaged MFF is the time of the averaging, not true record time. ch_names, mon = _read_locs(fname, egi_info, channel_naming) - ch_names.extend(egi_info['pns_names']) - info = create_info(ch_names, mff.sampling_rates['EEG'], ch_types) + ch_names.extend(egi_info["pns_names"]) + info = create_info(ch_names, mff.sampling_rates["EEG"], ch_types) with info._unlock(): - info['device_info'] = dict(type=egi_info['device']) - info['nchan'] = sum(mff.num_channels.values()) + info["device_info"] = dict(type=egi_info["device"]) + info["nchan"] = sum(mff.num_channels.values()) # Add individual channel info # Get calibration info for EEG channels cals = _get_eeg_calibration_info(fname, egi_info) # Initialize calibration for PNS channels, will be updated later - cals = np.concatenate([cals, np.repeat(1, len(egi_info['pns_names']))]) + cals = np.concatenate([cals, np.repeat(1, len(egi_info["pns_names"]))]) ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, (), (), (), ()) # Update PNS channel info chs = _add_pns_channel_info(chs, egi_info, ch_names) with info._unlock(): - info['chs'] = chs + info["chs"] = chs if mon is not None: - info.set_montage(mon, on_missing='ignore') + info.set_montage(mon, on_missing="ignore") # Add bad channels to info - info['description'] = category + info["description"] = category try: - channel_status = categories[category][0]['channelStatus'] + channel_status = categories[category][0]["channelStatus"] except KeyError: - warn(f'Channel status data not found for condition {category}. ' - 'No channels will be marked as bad.', category=UserWarning) + warn( + f"Channel status data not found for condition {category}. " + "No channels will be marked as bad.", + category=UserWarning, + ) channel_status = None bads = [] if channel_status: for entry in channel_status: - if entry['exclusion'] == 'badChannels': - if entry['signalBin'] == 1: + if entry["exclusion"] == "badChannels": + if entry["signalBin"] == 1: # Add bad EEG channels - for ch in entry['channels']: + for ch in entry["channels"]: bads.append(ch_names[ch - 1]) - elif entry['signalBin'] == 2: + elif entry["signalBin"] == 2: # Add bad PNS channels - for ch in entry['channels']: - bads.append(egi_info['pns_names'][ch - 1]) - info['bads'] = bads + for ch in entry["channels"]: + bads.append(egi_info["pns_names"][ch - 1]) + info["bads"] = bads # Add EEG reference to info - # Initialize 'custom_ref_applied' to False - with info._unlock(): - info['custom_ref_applied'] = False try: - fp = mff.directory.filepointer('history') + fp = mff.directory.filepointer("history") except (ValueError, FileNotFoundError): # old (<=0.6.3) vs new mffpy pass else: with fp: history = mffpy.XML.from_file(fp) for entry in history.entries: - if entry['method'] == 'Montage Operations Tool': - if 'Average Reference' in entry['settings']: + if entry["method"] == "Montage Operations Tool": + if "Average Reference" in entry["settings"]: # Average reference has been applied - projector, info = setup_proj(info) - else: - # Custom reference has been applied that is not an average - info['custom_ref_applied'] = True + _, info = setup_proj(info) # Get nave from categories.xml try: - nave = categories[category][0]['keys']['#seg']['data'] + nave = categories[category][0]["keys"]["#seg"]["data"] except KeyError: - warn(f'Number of averaged epochs not found for condition {category}. ' - 'nave will default to 1.', category=UserWarning) + warn( + f"Number of averaged epochs not found for condition {category}. " + "nave will default to 1.", + category=UserWarning, + ) nave = 1 # Let tmin default to 0 - return EvokedArray(all_data, info, tmin=0., comment=category, - nave=nave, verbose=verbose) + return EvokedArray( + all_data, info, tmin=0.0, comment=category, nave=nave, verbose=verbose + ) -def _import_mffpy(why='read averaged .mff files'): +def _import_mffpy(why="read averaged .mff files"): """Import and return module mffpy.""" try: import mffpy except ImportError as exp: - msg = f'mffpy is required to {why}, got:\n{exp}' + msg = f"mffpy is required to {why}, got:\n{exp}" raise ImportError(msg) return mffpy diff --git a/mne/io/egi/events.py b/mne/io/egi/events.py index 9e4967d115a..9c853dd73eb 100644 --- a/mne/io/egi/events.py +++ b/mne/io/egi/events.py @@ -1,15 +1,15 @@ -# -*- coding: utf-8 -*- # +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from datetime import datetime from glob import glob from os.path import basename, join, splitext -from xml.etree.ElementTree import parse import numpy as np -from ...utils import logger +from ...utils import _soft_import, _validate_type, logger, warn def _read_events(input_fname, info): @@ -22,17 +22,17 @@ def _read_events(input_fname, info): info : dict Header info array. """ - n_samples = info['last_samps'][-1] - mff_events, event_codes = _read_mff_events(input_fname, info['sfreq']) - info['n_events'] = len(event_codes) - info['event_codes'] = event_codes - events = np.zeros([info['n_events'], info['n_segments'] * n_samples]) + n_samples = info["last_samps"][-1] + mff_events, event_codes = _read_mff_events(input_fname, info["sfreq"]) + info["n_events"] = len(event_codes) + info["event_codes"] = event_codes + events = np.zeros([info["n_events"], info["n_segments"] * n_samples]) for n, event in enumerate(event_codes): for i in mff_events[event]: if (i < 0) or (i >= events.shape[1]): continue events[n][i] = n + 1 - return events, info + return events, info, mff_events def _read_mff_events(filename, sfreq): @@ -46,41 +46,50 @@ def _read_mff_events(filename, sfreq): The sampling frequency """ orig = {} - for xml_file in glob(join(filename, '*.xml')): + for xml_file in glob(join(filename, "*.xml")): xml_type = splitext(basename(xml_file))[0] - orig[xml_type] = _parse_xml(xml_file) + et = _parse_xml(xml_file) + if et is not None: + orig[xml_type] = et xml_files = orig.keys() - xml_events = [x for x in xml_files if x[:7] == 'Events_'] - for item in orig['info']: - if 'recordTime' in item: - start_time = _ns2py_time(item['recordTime']) + xml_events = [x for x in xml_files if x[:7] == "Events_"] + for item in orig["info"]: + if "recordTime" in item: + start_time = _ns2py_time(item["recordTime"]) break markers = [] code = [] for xml in xml_events: for event in orig[xml][2:]: - event_start = _ns2py_time(event['beginTime']) + event_start = _ns2py_time(event["beginTime"]) start = (event_start - start_time).total_seconds() - if event['code'] not in code: - code.append(event['code']) - marker = {'name': event['code'], - 'start': start, - 'start_sample': int(np.fix(start * sfreq)), - 'end': start + float(event['duration']) / 1e9, - 'chan': None, - } + if event["code"] not in code: + code.append(event["code"]) + marker = { + "name": event["code"], + "start": start, + "start_sample": int(np.fix(start * sfreq)), + "end": start + float(event["duration"]) / 1e9, + "chan": None, + } markers.append(marker) events_tims = dict() for ev in code: - trig_samp = list(c['start_sample'] for n, - c in enumerate(markers) if c['name'] == ev) + trig_samp = list( + c["start_sample"] for n, c in enumerate(markers) if c["name"] == ev + ) events_tims.update({ev: trig_samp}) return events_tims, code -def _parse_xml(xml_file): +def _parse_xml(xml_file: str) -> list[dict[str, str]] | None: """Parse XML file.""" - xml = parse(xml_file) + defusedxml = _soft_import("defusedxml", "reading EGI MFF data") + try: + xml = defusedxml.ElementTree.parse(xml_file) + except defusedxml.ElementTree.ParseError as e: + warn(f"Could not parse the XML file {xml_file}: {e}") + return root = xml.getroot() return _xml2list(root) @@ -89,7 +98,6 @@ def _xml2list(root): """Parse XML item.""" output = [] for element in root: - if len(element) > 0: if element[0].tag != element[-1].tag: output.append(_xml2dict(element)) @@ -107,8 +115,8 @@ def _xml2list(root): def _ns(s): """Remove namespace, but only if there is a namespace to begin with.""" - if '}' in s: - return '}'.join(s.split('}')[1:]) + if "}" in s: + return "}".join(s.split("}")[1:]) else: return s @@ -147,7 +155,7 @@ def _ns2py_time(nstime): nsdate = nstime[0:10] nstime0 = nstime[11:26] nstime00 = nsdate + " " + nstime0 - pytime = datetime.strptime(nstime00, '%Y-%m-%d %H:%M:%S.%f') + pytime = datetime.strptime(nstime00, "%Y-%m-%d %H:%M:%S.%f") return pytime @@ -155,8 +163,10 @@ def _combine_triggers(data, remapping=None): """Combine binary triggers.""" new_trigger = np.zeros(data.shape[1]) if data.astype(bool).sum(axis=0).max() > 1: # ensure no overlaps - logger.info(' Found multiple events at the same time ' - 'sample. Cannot create trigger channel.') + logger.info( + " Found multiple events at the same time " + "sample. Cannot create trigger channel." + ) return if remapping is None: remapping = np.arange(data) + 1 @@ -165,3 +175,39 @@ def _combine_triggers(data, remapping=None): if np.any(idx): new_trigger[idx] += event_id return new_trigger + + +def _triage_include_exclude(include, exclude, egi_events, egi_info): + """Triage include and exclude.""" + _validate_type(exclude, (list, None), "exclude") + _validate_type(include, (list, None), "include") + event_codes = list(egi_info["event_codes"]) + for name, lst in dict(exclude=exclude, include=include).items(): + for ii, item in enumerate(lst or []): + what = f"{name}[{ii}]" + _validate_type(item, str, what) + if item not in event_codes: + raise ValueError( + f"Could not find event channel named {what}={repr(item)}" + ) + if include is None: + if exclude is None: + default_exclude = ["sync", "TREV"] + exclude = [code for code in default_exclude if code in event_codes] + for code, event in zip(event_codes, egi_events): + if event.sum() < 1 and code: + exclude.append(code) + if ( + len(exclude) == len(event_codes) + and egi_info["n_events"] + and set(exclude) - set(default_exclude) + ): + warn( + "Did not find any event code with at least one event.", + RuntimeWarning, + ) + include = [k for k in event_codes if k not in exclude] + del exclude + excl_events = ", ".join(k for k in event_codes if k not in include) + logger.info(f" Excluding events {{{excl_events}}} ...") + return include diff --git a/mne/io/egi/general.py b/mne/io/egi/general.py index b8212ed3d56..ed028e3e5ed 100644 --- a/mne/io/egi/general.py +++ b/mne/io/egi/general.py @@ -1,24 +1,27 @@ -# -*- coding: utf-8 -*- # +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os -from xml.dom.minidom import parse import re import numpy as np -from ...utils import _pl +from ...utils import _pl, _soft_import def _extract(tags, filepath=None, obj=None): """Extract info from XML.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + if obj is not None: fileobj = obj elif filepath is not None: fileobj = parse(filepath) else: - raise ValueError('There is not object or file to extract data') + raise ValueError("There is not object or file to extract data") infoxml = dict() for tag in tags: value = fileobj.getElementsByTagName(tag) @@ -30,39 +33,42 @@ def _extract(tags, filepath=None, obj=None): def _get_gains(filepath): """Parse gains.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + file_obj = parse(filepath) - objects = file_obj.getElementsByTagName('calibration') + objects = file_obj.getElementsByTagName("calibration") gains = dict() for ob in objects: - value = ob.getElementsByTagName('type') - if value[0].firstChild.data == 'GCAL': - data_g = _extract(['ch'], obj=ob)['ch'] + value = ob.getElementsByTagName("type") + if value[0].firstChild.data == "GCAL": + data_g = _extract(["ch"], obj=ob)["ch"] gains.update(gcal=np.asarray(data_g, dtype=np.float64)) - elif value[0].firstChild.data == 'ICAL': - data_g = _extract(['ch'], obj=ob)['ch'] + elif value[0].firstChild.data == "ICAL": + data_g = _extract(["ch"], obj=ob)["ch"] gains.update(ical=np.asarray(data_g, dtype=np.float64)) return gains def _get_ep_info(filepath): """Get epoch info.""" - epochfile = filepath + '/epochs.xml' + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + epochfile = filepath + "/epochs.xml" epochlist = parse(epochfile) - epochs = epochlist.getElementsByTagName('epoch') - keys = ('first_samps', 'last_samps', 'first_blocks', 'last_blocks') + epochs = epochlist.getElementsByTagName("epoch") + keys = ("first_samps", "last_samps", "first_blocks", "last_blocks") epoch_info = {key: list() for key in keys} for epoch in epochs: - ep_begin = int(epoch.getElementsByTagName('beginTime')[0] - .firstChild.data) - ep_end = int(epoch.getElementsByTagName('endTime')[0].firstChild.data) - first_block = int(epoch.getElementsByTagName('firstBlock')[0] - .firstChild.data) - last_block = int(epoch.getElementsByTagName('lastBlock')[0] - .firstChild.data) - epoch_info['first_samps'].append(ep_begin) - epoch_info['last_samps'].append(ep_end) - epoch_info['first_blocks'].append(first_block) - epoch_info['last_blocks'].append(last_block) + ep_begin = int(epoch.getElementsByTagName("beginTime")[0].firstChild.data) + ep_end = int(epoch.getElementsByTagName("endTime")[0].firstChild.data) + first_block = int(epoch.getElementsByTagName("firstBlock")[0].firstChild.data) + last_block = int(epoch.getElementsByTagName("lastBlock")[0].firstChild.data) + epoch_info["first_samps"].append(ep_begin) + epoch_info["last_samps"].append(ep_end) + epoch_info["first_blocks"].append(first_block) + epoch_info["last_blocks"].append(last_block) # Don't turn into ndarray here, keep native int because it can deal with # huge numbers (could use np.uint64 but it's more work) return epoch_info @@ -83,7 +89,7 @@ def _get_blocks(filepath): # * 1 byte of n_channels # * n_channels bytes of offsets # * n_channels bytes of sigfreqs? - with open(binfile, 'rb') as fid: + with open(binfile, "rb") as fid: fid.seek(0, 2) # go to end of file file_length = fid.tell() block_size = file_length @@ -97,79 +103,90 @@ def _get_blocks(filepath): fid.seek(block_size, 1) position = fid.tell() continue - block_size = block['block_size'] - header_size = block['header_size'] + block_size = block["block_size"] + header_size = block["header_size"] header_sizes.append(header_size) - samples_block.append(block['nsamples']) + samples_block.append(block["nsamples"]) n_blocks += 1 fid.seek(block_size, 1) - sfreq.append(block['sfreq']) - n_channels.append(block['nc']) + sfreq.append(block["sfreq"]) + n_channels.append(block["nc"]) position = fid.tell() if any([n != n_channels[0] for n in n_channels]): - raise RuntimeError("All the blocks don't have the same amount of " - "channels.") + raise RuntimeError("All the blocks don't have the same amount of channels.") if any([f != sfreq[0] for f in sfreq]): - raise RuntimeError("All the blocks don't have the same sampling " - "frequency.") + raise RuntimeError("All the blocks don't have the same sampling frequency.") if len(samples_block) < 1: raise RuntimeError("There seems to be no data") samples_block = np.array(samples_block) - signal_blocks = dict(n_channels=n_channels[0], sfreq=sfreq[0], - n_blocks=n_blocks, samples_block=samples_block, - header_sizes=header_sizes) + signal_blocks = dict( + n_channels=n_channels[0], + sfreq=sfreq[0], + n_blocks=n_blocks, + samples_block=samples_block, + header_sizes=header_sizes, + ) return signal_blocks def _get_signalfname(filepath): """Get filenames.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + listfiles = os.listdir(filepath) - binfiles = list(f for f in listfiles if 'signal' in f and - f[-4:] == '.bin' and f[0] != '.') + binfiles = list( + f for f in listfiles if "signal" in f and f[-4:] == ".bin" and f[0] != "." + ) all_files = {} infofiles = list() for binfile in binfiles: - bin_num_str = re.search(r'\d+', binfile).group() - infofile = 'info' + bin_num_str + '.xml' + bin_num_str = re.search(r"\d+", binfile).group() + infofile = "info" + bin_num_str + ".xml" infofiles.append(infofile) infobjfile = os.path.join(filepath, infofile) infobj = parse(infobjfile) - if len(infobj.getElementsByTagName('EEG')): - signal_type = 'EEG' - elif len(infobj.getElementsByTagName('PNSData')): - signal_type = 'PNS' + if len(infobj.getElementsByTagName("EEG")): + signal_type = "EEG" + elif len(infobj.getElementsByTagName("PNSData")): + signal_type = "PNS" all_files[signal_type] = { - 'signal': 'signal{}.bin'.format(bin_num_str), - 'info': infofile} - if 'EEG' not in all_files: + "signal": f"signal{bin_num_str}.bin", + "info": infofile, + } + if "EEG" not in all_files: + infofiles_str = "\n".join(infofiles) raise FileNotFoundError( - 'Could not find any EEG data in the %d file%s found in %s:\n%s' - % (len(infofiles), _pl(infofiles), filepath, '\n'.join(infofiles))) + f"Could not find any EEG data in the {len(infofiles)} file{_pl(infofiles)} " + f"found in {filepath}:\n{infofiles_str}" + ) return all_files def _block_r(fid): """Read meta data.""" - if np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] != 1: # not metadata + if np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() != 1: # not meta return None - header_size = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] - block_size = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] + header_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() + block_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() hl = int(block_size / 4) - nc = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] + nc = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() nsamples = int(hl / nc) - np.fromfile(fid, dtype=np.dtype('i4'), count=nc) # sigoffset - sigfreq = np.fromfile(fid, dtype=np.dtype('i4'), count=nc) + np.fromfile(fid, dtype=np.dtype("i4"), count=nc) # sigoffset + sigfreq = np.fromfile(fid, dtype=np.dtype("i4"), count=nc) depth = sigfreq[0] & 0xFF if depth != 32: - raise ValueError('I do not know how to read this MFF (depth != 32)') + raise ValueError("I do not know how to read this MFF (depth != 32)") sfreq = sigfreq[0] >> 8 count = int(header_size / 4 - (4 + 2 * nc)) - np.fromfile(fid, dtype=np.dtype('i4'), count=count) # sigoffset - block = dict(nc=nc, - hl=hl, - nsamples=nsamples, - block_size=block_size, - header_size=header_size, - sfreq=sfreq) + np.fromfile(fid, dtype=np.dtype("i4"), count=count) # sigoffset + block = dict( + nc=nc, + hl=hl, + nsamples=nsamples, + block_size=block_size, + header_size=header_size, + sfreq=sfreq, + ) return block diff --git a/mne/io/egi/tests/__init__.py b/mne/io/egi/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/egi/tests/__init__.py +++ b/mne/io/egi/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py index 2f543321ce5..261a9c80da3 100644 --- a/mne/io/egi/tests/test_egi.py +++ b/mne/io/egi/tests/test_egi.py @@ -1,25 +1,26 @@ -# -*- coding: utf-8 -*- -# Authors: Denis A. Engemann -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from pathlib import Path import os import shutil +from copy import deepcopy from datetime import datetime, timezone +from pathlib import Path import numpy as np -from numpy.testing import assert_array_equal, assert_allclose import pytest +from numpy.testing import assert_allclose, assert_array_equal from scipy import io as sio -from mne import find_events, pick_types, pick_channels -from mne.io import read_raw_egi, read_evokeds_mff, read_raw_fif -from mne.io.constants import FIFF +from mne import events_from_annotations, find_events, pick_types +from mne._fiff.constants import FIFF +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_evokeds_mff, read_raw_egi, read_raw_fif from mne.io.egi.egi import _combine_triggers from mne.io.tests.test_raw import _test_raw_reader -from mne.utils import requires_version, object_diff -from mne.datasets.testing import data_path, requires_testing_data +from mne.utils import object_diff base_dir = Path(__file__).parent / "data" egi_fname = base_dir / "test_egi.raw" @@ -36,17 +37,21 @@ egi_txt_evoked_cat2_fname = egi_path / "test_egi_evoked_cat2.txt" # absolute event times from NetStation -egi_pause_events = {'AM40': [7.224, 11.928, 14.413, 16.848], - 'bgin': [6.121, 8.434, 13.369, 15.815, 18.094], - 'FIX+': [6.225, 10.929, 13.414, 15.849], - 'ITI+': [8.293, 12.997, 15.482, 17.918]} +egi_pause_events = { + "AM40": [7.224, 11.928, 14.413, 16.848], + "bgin": [6.121, 8.434, 13.369, 15.815, 18.094], + "FIX+": [6.225, 10.929, 13.414, 15.849], + "ITI+": [8.293, 12.997, 15.482, 17.918], +} # absolute epoch times egi_pause_skips = [(1304000.0, 1772000.0), (8660000.0, 12296000.0)] -egi_eprime_pause_events = {'AM40': [6.049, 8.434, 10.936, 13.321], - 'bgin': [4.902, 7.381, 9.901, 12.268, 14.619], - 'FIX+': [5.050, 7.435, 9.937, 12.322], - 'ITI+': [7.185, 9.503, 12.005, 14.391]} +egi_eprime_pause_events = { + "AM40": [6.049, 8.434, 10.936, 13.321], + "bgin": [4.902, 7.381, 9.901, 12.268, 14.619], + "FIX+": [5.050, 7.435, 9.937, 12.322], + "ITI+": [7.185, 9.503, 12.005, 14.391], +} egi_eprime_pause_skips = [(1344000.0, 1804000.0)] egi_pause_w1337_events = None @@ -54,69 +59,83 @@ @requires_testing_data -@pytest.mark.parametrize('fname, skip_times, event_times', [ - (egi_pause_fname, egi_pause_skips, egi_pause_events), - (egi_eprime_pause_fname, egi_eprime_pause_skips, egi_eprime_pause_events), - (egi_pause_w1337_fname, egi_pause_w1337_skips, egi_pause_w1337_events), -]) +@pytest.mark.parametrize( + "fname, skip_times, event_times", + [ + (egi_pause_fname, egi_pause_skips, egi_pause_events), + (egi_eprime_pause_fname, egi_eprime_pause_skips, egi_eprime_pause_events), + (egi_pause_w1337_fname, egi_pause_w1337_skips, egi_pause_w1337_events), + ], +) def test_egi_mff_pause(fname, skip_times, event_times): """Test EGI MFF with pauses.""" + pytest.importorskip("defusedxml") if fname == egi_pause_w1337_fname: # too slow to _test_raw_reader - raw = read_raw_egi(fname).load_data() + raw = read_raw_egi(fname, events_as_annotations=False).load_data() else: - with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): - raw = _test_raw_reader(read_raw_egi, input_fname=fname, - test_scaling=False, # XXX probably some bug - test_rank='less', - ) - assert raw.info['sfreq'] == 250. # true for all of these files + with pytest.warns(RuntimeWarning, match="Acquisition skips detected"): + raw = _test_raw_reader( + read_raw_egi, + input_fname=fname, + test_scaling=False, # XXX probably some bug + test_rank="less", + events_as_annotations=False, + ) + assert raw.info["sfreq"] == 250.0 # true for all of these files + assert raw.info["dev_head_t"] is None # no MEG data assert len(raw.annotations) == len(skip_times) # assert event onsets match expected times if event_times is None: - with pytest.raises(ValueError, match='Consider using .*events_from'): + with pytest.raises(ValueError, match="Consider using .*events_from"): find_events(raw) else: events = find_events(raw) for event_type in event_times.keys(): - ns_samples = np.floor(np.array(event_times[event_type]) * - raw.info['sfreq']) + ns_samples = np.floor(np.array(event_times[event_type]) * raw.info["sfreq"]) assert_array_equal( - events[events[:, 2] == raw.event_id[event_type], 0], - ns_samples) + events[events[:, 2] == raw.event_id[event_type], 0], ns_samples + ) # read some data from the middle of the skip, assert it's all zeros stim_picks = pick_types(raw.info, meg=False, stim=True, exclude=()) other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), stim_picks) for ii, annot in enumerate(raw.annotations): - assert annot['description'] == 'BAD_ACQ_SKIP' + assert annot["description"] == "BAD_ACQ_SKIP" start, stop = raw.time_as_index( - [annot['onset'], annot['onset'] + annot['duration']]) + [annot["onset"], annot["onset"] + annot["duration"]] + ) data, _ = raw[:, start:stop] - assert_array_equal(data[other_picks], 0.) + assert_array_equal(data[other_picks], 0.0) if event_times is not None: - assert raw.ch_names[-1] == 'STI 014' - assert not np.array_equal(data[stim_picks], 0.) + assert raw.ch_names[-1] == "STI 014" + assert not np.array_equal(data[stim_picks], 0.0) # assert skips match expected onset and duration - skip = ((start + 1) / raw.info['sfreq'] * 1e6, - (stop + 1) / raw.info['sfreq'] * 1e6) + skip = ( + (start + 1) / raw.info["sfreq"] * 1e6, + (stop + 1) / raw.info["sfreq"] * 1e6, + ) assert skip == skip_times[ii] @requires_testing_data -@pytest.mark.parametrize('fname', [ - egi_pause_fname, - egi_eprime_pause_fname, - egi_pause_w1337_fname, -]) +@pytest.mark.parametrize( + "fname", + [ + egi_pause_fname, + egi_eprime_pause_fname, + egi_pause_w1337_fname, + ], +) def test_egi_mff_pause_chunks(fname, tmp_path): """Test that on-demand of all short segments works (via I/O).""" - fname_temp = tmp_path / 'test_raw.fif' + pytest.importorskip("defusedxml") + fname_temp = tmp_path / "test_raw.fif" raw_data = read_raw_egi(fname, preload=True).get_data() raw = read_raw_egi(fname) - with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): + with pytest.warns(RuntimeWarning, match="Acquisition skips detected"): raw.save(fname_temp) del raw raw_data_2 = read_raw_fif(fname_temp).get_data() @@ -124,60 +143,86 @@ def test_egi_mff_pause_chunks(fname, tmp_path): @requires_testing_data -def test_io_egi_mff(): +@pytest.mark.parametrize("events_as_annotations", (True, False)) +def test_io_egi_mff(events_as_annotations): """Test importing EGI MFF simple binary files.""" + pytest.importorskip("defusedxml") # want vars for n chans n_ref = 1 n_eeg = 128 n_card = 3 - raw = read_raw_egi(egi_mff_fname, include=None) - assert ('RawMff' in repr(raw)) + raw = read_raw_egi(egi_mff_fname, events_as_annotations=events_as_annotations) + assert "RawMff" in repr(raw) assert raw.orig_format == "single" - include = ['DIN1', 'DIN2', 'DIN3', 'DIN4', 'DIN5', 'DIN7'] - raw = _test_raw_reader(read_raw_egi, input_fname=egi_mff_fname, - include=include, channel_naming='EEG %03d', - test_scaling=False, # XXX probably some bug - ) - assert raw.info['sfreq'] == 1000. - assert len(raw.info['dig']) == n_card + n_eeg + n_ref - assert raw.info['dig'][0]['ident'] == FIFF.FIFFV_POINT_LPA - assert raw.info['dig'][0]['kind'] == FIFF.FIFFV_POINT_CARDINAL - assert raw.info['dig'][3]['kind'] == FIFF.FIFFV_POINT_EEG - assert raw.info['dig'][-1]['ident'] == 129 - assert raw.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_ON - ref_loc = raw.info['dig'][-1]['r'] + include = ["DIN1", "DIN2", "DIN3", "DIN4", "DIN5", "DIN7"] + raw = _test_raw_reader( + read_raw_egi, + input_fname=egi_mff_fname, + include=include, + channel_naming="EEG %03d", + test_scaling=False, # XXX probably some bug + events_as_annotations=events_as_annotations, + ) + assert raw.info["sfreq"] == 1000.0 + assert len(raw.info["dig"]) == n_card + n_eeg + n_ref + assert raw.info["dig"][0]["ident"] == FIFF.FIFFV_POINT_LPA + assert raw.info["dig"][0]["kind"] == FIFF.FIFFV_POINT_CARDINAL + assert raw.info["dig"][3]["kind"] == FIFF.FIFFV_POINT_EEG + assert raw.info["dig"][-1]["ident"] == 129 + # This is not a custom reference, it's consistent across all channels + assert raw.info["custom_ref_applied"] == FIFF.FIFFV_MNE_CUSTOM_REF_OFF + ref_loc = raw.info["dig"][-1]["r"] eeg_picks = pick_types(raw.info, eeg=True) assert len(eeg_picks) == n_eeg + n_ref # 129 - # ref channel doesn't store its own loc as ref location - # so don't test it - ref_pick = pick_channels(raw.info['ch_names'], ['VREF']) - eeg_picks = np.setdiff1d(eeg_picks, ref_pick) + # ref channel should store its own loc as ref location, so't test it for i in eeg_picks: - loc = raw.info['chs'][i]['loc'] + loc = raw.info["chs"][i]["loc"] assert loc[:3].any(), loc[:3] - assert_array_equal(loc[3:6], ref_loc, err_msg=f'{i}') - assert raw.info['device_info']['type'] == 'HydroCel GSN 128 1.0' + assert_array_equal(loc[3:6], ref_loc, err_msg=f"{i}") + assert raw.info["device_info"]["type"] == "HydroCel GSN 128 1.0" - assert 'eeg' in raw + assert "eeg" in raw # test our custom channel naming logic functionality - eeg_chan = [c for c in raw.ch_names if 'EEG' in c] + eeg_chan = [c for c in raw.ch_names if "EEG" in c] assert len(eeg_chan) == n_eeg # 128: VREF will not match in comprehension - assert 'STI 014' in raw.ch_names + if events_as_annotations: + assert "STI 014" not in raw.ch_names + assert raw.event_id is None + event_id = {"DIN1": 1, "DIN2": 2, "DIN3": 3, "DIN4": 4, "DIN5": 5, "DIN7": 7} + events, _ = events_from_annotations(raw, event_id=event_id) + else: + assert "STI 014" in raw.ch_names + events = find_events(raw, stim_channel="STI 014") + event_id = raw.event_id - events = find_events(raw, stim_channel='STI 014') assert len(events) == 8 assert np.unique(events[:, 1])[0] == 0 assert np.unique(events[:, 0])[0] != 0 assert np.unique(events[:, 2])[0] != 0 - - with pytest.raises(ValueError, match='Could not find event'): - read_raw_egi(egi_mff_fname, include=['Foo']) - with pytest.raises(ValueError, match='Could not find event'): - read_raw_egi(egi_mff_fname, exclude=['Bar']) - for ii, k in enumerate(include, 1): - assert k in raw.event_id - assert raw.event_id[k] == ii + assert "DIN1" in event_id + + with pytest.raises(ValueError, match="Could not find event"): + read_raw_egi(egi_mff_fname, include=["Foo"]) + with pytest.raises(ValueError, match="Could not find event"): + read_raw_egi(egi_mff_fname, exclude=["Bar"]) + for ch in include: + assert ch in event_id + assert event_id[ch] == int(ch[-1]) + # test converting stim triggers to annotations + if events_as_annotations: + # Grab the first annotation. Should be the first "DIN1" event. + assert len(raw.annotations) + onset, dur, desc, _, _ = raw.annotations[0].values() + assert_allclose(onset, 2.438) + assert np.isclose(dur, 0) + assert desc == "DIN1" + # grab the DIN1 channel + din1 = raw.get_data(picks="DIN1") + # Check that the time in sec of first event is the same as the first annotation + pin_hi_idx = np.where(din1 == 1)[1] + pin_hi_sec = pin_hi_idx / raw.info["sfreq"] + assert np.isclose(pin_hi_sec[0], onset) def test_io_egi(): @@ -189,33 +234,35 @@ def test_io_egi(): data = data[1:] data *= 1e-6 # µV - with pytest.warns(RuntimeWarning, match='Did not find any event code'): - raw = read_raw_egi(egi_fname, include=None) + raw = read_raw_egi(egi_fname, events_as_annotations=False) # The reader should accept a Path, too. - with pytest.warns(RuntimeWarning, match='Did not find any event code'): - raw = read_raw_egi(Path(egi_fname), include=None) + raw_annot = read_raw_egi(Path(egi_fname), events_as_annotations=True) - assert 'RawEGI' in repr(raw) + assert "RawEGI" in repr(raw) data_read, t_read = raw[:256] assert_allclose(t_read, t) assert_allclose(data_read, data, atol=1e-10) - include = ['TRSP', 'XXX1'] - raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname, - include=include, test_rank='less', - test_scaling=False, # XXX probably some bug - ) + include = ["TRSP", "XXX1"] + raw = _test_raw_reader( + read_raw_egi, + input_fname=egi_fname, + include=include, + test_rank="less", + test_scaling=False, # XXX probably some bug + events_as_annotations=False, + ) - assert 'eeg' in raw + assert "eeg" in raw assert raw.orig_format == "single" - eeg_chan = [c for c in raw.ch_names if c.startswith('E')] + eeg_chan = [c for c in raw.ch_names if c.startswith("E")] assert len(eeg_chan) == 256 picks = pick_types(raw.info, eeg=True) assert len(picks) == 256 - assert 'STI 014' in raw.ch_names + assert "STI 014" in raw.ch_names - events = find_events(raw, stim_channel='STI 014') + events = find_events(raw, stim_channel="STI 014") assert len(events) == 2 # ground truth assert np.unique(events[:, 1])[0] == 0 assert np.unique(events[:, 0])[0] != 0 @@ -228,52 +275,62 @@ def test_io_egi(): new_trigger = _combine_triggers(triggers, events_ids) assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24])) - pytest.raises(ValueError, read_raw_egi, egi_fname, include=['Foo'], - preload=False) - pytest.raises(ValueError, read_raw_egi, egi_fname, exclude=['Bar'], - preload=False) + with pytest.raises(ValueError, match="Could not find.*include.*"): + read_raw_egi(egi_fname, include=["Foo"]) + with pytest.raises(ValueError, match="Could not find.*exclude.*"): + read_raw_egi(egi_fname, exclude=["Bar"]) for ii, k in enumerate(include, 1): - assert (k in raw.event_id) - assert (raw.event_id[k] == ii) + assert k in raw.event_id + assert raw.event_id[k] == ii + assert raw_annot.event_id is None + events, event_id = events_from_annotations(raw_annot, event_id=raw.event_id) + assert event_id == raw.event_id + events_2 = find_events(raw) + assert_array_equal(events, events_2) @requires_testing_data def test_io_egi_pns_mff(tmp_path): """Test importing EGI MFF with PNS data.""" - raw = read_raw_egi(egi_mff_pns_fname, include=None, preload=True, - verbose='error') - assert ('RawMff' in repr(raw)) + pytest.importorskip("defusedxml") + raw = read_raw_egi(egi_mff_pns_fname, include=None, preload=True, verbose="error") + assert "RawMff" in repr(raw) pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True) assert len(pns_chans) == 7 names = [raw.ch_names[x] for x in pns_chans] - pns_names = ['Resp. Temperature', - 'Resp. Pressure', - 'ECG', - 'Body Position', - 'Resp. Effort Chest', - 'Resp. Effort Abdomen', - 'EMG-Leg'] - _test_raw_reader(read_raw_egi, input_fname=egi_mff_pns_fname, - channel_naming='EEG %03d', verbose='error', - test_rank='less', - test_scaling=False, # XXX probably some bug - ) + pns_names = [ + "Resp. Temperature", + "Resp. Pressure", + "ECG", + "Body Position", + "Resp. Effort Chest", + "Resp. Effort Abdomen", + "EMG-Leg", + ] + _test_raw_reader( + read_raw_egi, + input_fname=egi_mff_pns_fname, + channel_naming="EEG %03d", + verbose="error", + test_rank="less", + test_scaling=False, # XXX probably some bug + ) assert names == pns_names mat_names = [ - 'Resp_Temperature', - 'Resp_Pressure', - 'ECG', - 'Body_Position', - 'Resp_Effort_Chest', - 'Resp_Effort_Abdomen', - 'EMGLeg' + "Resp_Temperature", + "Resp_Pressure", + "ECG", + "Body_Position", + "Resp_Effort_Chest", + "Resp_Effort_Abdomen", + "EMGLeg", ] egi_fname_mat = testing_path / "EGI" / "test_egi_pns.mat" mc = sio.loadmat(egi_fname_mat) for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names): - print('Testing {}'.format(ch_name)) + print(f"Testing {ch_name}") mc_key = [x for x in mc.keys() if mat_name in x][0] - cal = raw.info['chs'][ch_idx]['cal'] + cal = raw.info["chs"][ch_idx]["cal"] mat_data = mc[mc_key] * cal raw_data = raw[ch_idx][0] assert_array_equal(mat_data, raw_data) @@ -281,48 +338,51 @@ def test_io_egi_pns_mff(tmp_path): # EEG missing new_mff = tmp_path / "temp.mff" shutil.copytree(egi_mff_pns_fname, new_mff) - read_raw_egi(new_mff, verbose='error') + read_raw_egi(new_mff, verbose="error") os.remove(new_mff / "info1.xml") os.remove(new_mff / "signal1.bin") - with pytest.raises(FileNotFoundError, match='Could not find any EEG'): - read_raw_egi(new_mff, verbose='error') + with pytest.raises(FileNotFoundError, match="Could not find any EEG"): + read_raw_egi(new_mff, verbose="error") @requires_testing_data -@pytest.mark.parametrize('preload', (True, False)) +@pytest.mark.parametrize("preload", (True, False)) def test_io_egi_pns_mff_bug(preload): """Test importing EGI MFF with PNS data (BUG).""" + pytest.importorskip("defusedxml") egi_fname_mff = testing_path / "EGI" / "test_egi_pns_bug.mff" - with pytest.warns(RuntimeWarning, match='EGI PSG sample bug'): - raw = read_raw_egi(egi_fname_mff, include=None, preload=preload, - verbose='warning') + with pytest.warns(RuntimeWarning, match="EGI PSG sample bug"): + raw = read_raw_egi( + egi_fname_mff, include=None, preload=preload, verbose="warning" + ) assert len(raw.annotations) == 1 assert_allclose(raw.annotations.duration, [0.004]) assert_allclose(raw.annotations.onset, [13.948]) egi_fname_mat = testing_path / "EGI" / "test_egi_pns.mat" mc = sio.loadmat(egi_fname_mat) pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True) - pns_names = ['Resp. Temperature'[:15], - 'Resp. Pressure', - 'ECG', - 'Body Position', - 'Resp. Effort Chest'[:15], - 'Resp. Effort Abdomen'[:15], - 'EMG-Leg'] + pns_names = [ + "Resp. Temperature"[:15], + "Resp. Pressure", + "ECG", + "Body Position", + "Resp. Effort Chest"[:15], + "Resp. Effort Abdomen"[:15], + "EMG-Leg", + ] mat_names = [ - 'Resp_Temperature'[:15], - 'Resp_Pressure', - 'ECG', - 'Body_Position', - 'Resp_Effort_Chest'[:15], - 'Resp_Effort_Abdomen'[:15], - 'EMGLeg' - + "Resp_Temperature"[:15], + "Resp_Pressure", + "ECG", + "Body_Position", + "Resp_Effort_Chest"[:15], + "Resp_Effort_Abdomen"[:15], + "EMGLeg", ] for ch_name, ch_idx, mat_name in zip(pns_names, pns_chans, mat_names): - print('Testing {}'.format(ch_name)) + print(f"Testing {ch_name}") mc_key = [x for x in mc.keys() if mat_name in x][0] - cal = raw.info['chs'][ch_idx]['cal'] + cal = raw.info["chs"][ch_idx]["cal"] mat_data = mc[mc_key] * cal mat_data[:, -1] = 0 # The MFF has one less sample, the last one raw_data = raw[ch_idx][0] @@ -332,6 +392,7 @@ def test_io_egi_pns_mff_bug(preload): @requires_testing_data def test_io_egi_crop_no_preload(): """Test crop non-preloaded EGI MFF data (BUG).""" + pytest.importorskip("defusedxml") raw = read_raw_egi(egi_mff_fname, preload=False) raw.crop(17.5, 20.5) raw.load_data() @@ -341,17 +402,25 @@ def test_io_egi_crop_no_preload(): assert_allclose(raw._data, raw_preload._data) -@pytest.mark.filterwarnings('ignore::FutureWarning') -@requires_version('mffpy', '0.5.7') @requires_testing_data -@pytest.mark.parametrize('idx, cond, tmax, signals, bads', [ - (0, 'Category 1', 0.016, egi_txt_evoked_cat1_fname, - ['E8', 'E11', 'E17', 'E28', 'ECG']), - (1, 'Category 2', 0.0, egi_txt_evoked_cat2_fname, - ['VREF', 'EMG']) -]) +@pytest.mark.parametrize( + "idx, cond, tmax, signals, bads", + [ + ( + 0, + "Category 1", + 0.016, + egi_txt_evoked_cat1_fname, + ["E8", "E11", "E17", "E28", "ECG"], + ), + (1, "Category 2", 0.0, egi_txt_evoked_cat2_fname, ["VREF", "EMG"]), + ], +) def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads): """Test reading evoked MFF file.""" + pytest.importorskip("mffpy", "0.5.7") + + pytest.importorskip("defusedxml") # expected n channels n_eeg = 256 n_ref = 1 @@ -366,15 +435,19 @@ def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads): assert len(evokeds) == 2 # Test invalid condition with pytest.raises(ValueError) as exc_info: - read_evokeds_mff(egi_mff_evoked_fname, condition='Invalid Condition') - message = "Invalid value for the 'condition' parameter provided as " \ - "category name. Allowed values are 'Category 1' and " \ - "'Category 2', but got 'Invalid Condition' instead." + read_evokeds_mff(egi_mff_evoked_fname, condition="Invalid Condition") + message = ( + "Invalid value for the 'condition' parameter provided as " + "category name. Allowed values are 'Category 1' and " + "'Category 2', but got 'Invalid Condition' instead." + ) assert str(exc_info.value) == message with pytest.raises(ValueError) as exc_info: read_evokeds_mff(egi_mff_evoked_fname, condition=2) - message = '"condition" parameter (2), provided as epoch index, ' \ - 'is out of range for available epochs (2).' + message = ( + '"condition" parameter (2), provided as epoch index, ' + "is out of range for available epochs (2)." + ) assert str(exc_info.value) == message with pytest.raises(TypeError) as exc_info: read_evokeds_mff(egi_mff_evoked_fname, condition=1.2) @@ -393,28 +466,27 @@ def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads): assert_allclose(evoked_cond.data, data, atol=1e-12) assert_allclose(evoked_idx.data, data, atol=1e-12) # Check info - assert object_diff(evoked_cond.info, evoked_idx.info) == '' - assert evoked_cond.info['description'] == cond - assert evoked_cond.info['bads'] == bads - assert len(evoked_cond.info['ch_names']) == n_eeg + n_ref + n_pns # 259 - assert 'ECG' in evoked_cond.info['ch_names'] - assert 'EMG' in evoked_cond.info['ch_names'] - assert 'ecg' in evoked_cond - assert 'emg' in evoked_cond + assert object_diff(evoked_cond.info, evoked_idx.info) == "" + assert evoked_cond.info["description"] == cond + assert evoked_cond.info["bads"] == bads + assert len(evoked_cond.info["ch_names"]) == n_eeg + n_ref + n_pns # 259 + assert "ECG" in evoked_cond.info["ch_names"] + assert "EMG" in evoked_cond.info["ch_names"] + assert "ecg" in evoked_cond + assert "emg" in evoked_cond pick_eeg = pick_types(evoked_cond.info, eeg=True, exclude=[]) assert len(pick_eeg) == n_eeg + n_ref # 257 - assert evoked_cond.info['nchan'] == n_eeg + n_ref + n_pns # 259 - assert evoked_cond.info['sfreq'] == 250.0 - assert not evoked_cond.info['custom_ref_applied'] - assert len(evoked_cond.info['dig']) == n_card + n_eeg + n_ref - assert evoked_cond.info['device_info']['type'] == 'HydroCel GSN 256 1.0' + assert evoked_cond.info["nchan"] == n_eeg + n_ref + n_pns # 259 + assert evoked_cond.info["sfreq"] == 250.0 + assert not evoked_cond.info["custom_ref_applied"] + assert len(evoked_cond.info["dig"]) == n_card + n_eeg + n_ref + assert evoked_cond.info["device_info"]["type"] == "HydroCel GSN 256 1.0" -@pytest.mark.filterwarnings('ignore::FutureWarning') -@requires_version('mffpy', '0.5.7') @requires_testing_data def test_read_evokeds_mff_bad_input(): """Test errors are thrown when reading invalid input file.""" + pytest.importorskip("mffpy", "0.5.7") # Test file that is not an MFF with pytest.raises(ValueError) as exc_info: read_evokeds_mff(egi_fname) @@ -423,14 +495,17 @@ def test_read_evokeds_mff_bad_input(): # Test continuous MFF with pytest.raises(ValueError) as exc_info: read_evokeds_mff(egi_mff_fname) - message = f'{egi_mff_fname} is a continuous MFF file. ' \ - 'fname must be the path to an averaged MFF file.' + message = ( + f"{egi_mff_fname} is a continuous MFF file. " + "fname must be the path to an averaged MFF file." + ) assert str(exc_info.value) == message @requires_testing_data def test_egi_coord_frame(): """Test that EGI coordinate frame is changed to head.""" + pytest.importorskip("defusedxml") info = read_raw_egi(egi_mff_fname).info want_idents = ( FIFF.FIFFV_POINT_LPA, @@ -438,56 +513,93 @@ def test_egi_coord_frame(): FIFF.FIFFV_POINT_RPA, ) for ii, want in enumerate(want_idents): - d = info['dig'][ii] - assert d['kind'] == FIFF.FIFFV_POINT_CARDINAL - assert d['ident'] == want - loc = d['r'] + d = info["dig"][ii] + assert d["kind"] == FIFF.FIFFV_POINT_CARDINAL + assert d["ident"] == want + loc = d["r"] if ii == 0: - assert 0.05 < -loc[0] < 0.1, 'LPA' - assert_allclose(loc[1:], 0, atol=1e-7, err_msg='LPA') + assert 0.05 < -loc[0] < 0.1, "LPA" + assert_allclose(loc[1:], 0, atol=1e-7, err_msg="LPA") elif ii == 1: - assert 0.05 < loc[1] < 0.11, 'Nasion' - assert_allclose(loc[::2], 0, atol=1e-7, err_msg='Nasion') + assert 0.05 < loc[1] < 0.11, "Nasion" + assert_allclose(loc[::2], 0, atol=1e-7, err_msg="Nasion") else: assert ii == 2 - assert 0.05 < loc[0] < 0.1, 'RPA' - assert_allclose(loc[1:], 0, atol=1e-7, err_msg='RPA') - for d in info['dig'][3:]: - assert d['kind'] == FIFF.FIFFV_POINT_EEG + assert 0.05 < loc[0] < 0.1, "RPA" + assert_allclose(loc[1:], 0, atol=1e-7, err_msg="RPA") + for d in info["dig"][3:]: + assert d["kind"] == FIFF.FIFFV_POINT_EEG @requires_testing_data -@pytest.mark.parametrize('fname, timestamp, utc_offset', [ - (egi_mff_fname, '2017-02-23T11:35:13.220824+01:00', '+0100'), - (egi_mff_pns_fname, '2017-09-20T09:55:44.072000+01:00', '+0100'), - (egi_eprime_pause_fname, '2018-07-30T10:46:09.621673-04:00', '-0400'), - (egi_pause_w1337_fname, '2019-10-14T10:54:27.395210-07:00', '-0700'), -]) +@pytest.mark.parametrize( + "fname, timestamp, utc_offset", + [ + (egi_mff_fname, "2017-02-23T11:35:13.220824+01:00", "+0100"), + (egi_mff_pns_fname, "2017-09-20T09:55:44.072000+01:00", "+0100"), + (egi_eprime_pause_fname, "2018-07-30T10:46:09.621673-04:00", "-0400"), + (egi_pause_w1337_fname, "2019-10-14T10:54:27.395210-07:00", "-0700"), + ], +) def test_meas_date(fname, timestamp, utc_offset): """Test meas date conversion.""" - raw = read_raw_egi(fname, verbose='warning') - dt = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f%z') + pytest.importorskip("defusedxml") + raw = read_raw_egi(fname, verbose="warning") + dt = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z") measdate = dt.astimezone(timezone.utc) - hour_local = int(dt.strftime('%H')) - hour_utc = int(raw.info['meas_date'].strftime('%H')) + hour_local = int(dt.strftime("%H")) + hour_utc = int(raw.info["meas_date"].strftime("%H")) local_utc_diff = hour_local - hour_utc - assert raw.info['meas_date'] == measdate - assert raw.info['utc_offset'] == utc_offset + assert raw.info["meas_date"] == measdate + assert raw.info["utc_offset"] == utc_offset assert local_utc_diff == int(utc_offset[:-2]) @requires_testing_data -@pytest.mark.parametrize('fname, standard_montage', [ - (egi_mff_fname, 'GSN-HydroCel-129'), # 129 chan EGI file - (egi_mff_pns_fname, 'GSN-HydroCel-257') # 257 chan EGI file -]) -def test_set_standard_montage(fname, standard_montage): +@pytest.mark.parametrize( + "fname, standard_montage", + [ + (egi_mff_fname, "GSN-HydroCel-129"), # 129 chan EGI file + (egi_mff_pns_fname, "GSN-HydroCel-257"), # 257 chan EGI file + ], +) +def test_set_standard_montage_mff(fname, standard_montage): """Test setting a standard montage.""" - raw = read_raw_egi(fname, verbose='warning') - dig_before_mon = raw.info['dig'] + pytest.importorskip("defusedxml") + raw = read_raw_egi(fname, verbose="warning") + n_eeg = int(standard_montage.split("-")[-1]) + n_dig = n_eeg + 3 + dig_before_mon = deepcopy(raw.info["dig"]) + assert len(dig_before_mon) == n_dig + ref_loc = dig_before_mon[-1]["r"] + picks = pick_types(raw.info, eeg=True) + assert len(picks) == n_eeg + for pick in picks: + assert_allclose(raw.info["chs"][pick]["loc"][3:6], ref_loc) - raw.set_montage(standard_montage, match_alias=True, on_missing='ignore') - dig_after_mon = raw.info['dig'] + raw.set_montage(standard_montage, match_alias=True, on_missing="ignore") + dig_after_mon = raw.info["dig"] # No dig entries should have been dropped while setting montage - assert len(dig_before_mon) == len(dig_after_mon) + assert len(dig_before_mon) == n_dig + assert len(dig_after_mon) == n_dig + + # Check that the reference remained + for pick in picks: + assert_allclose(raw.info["chs"][pick]["loc"][3:6], ref_loc) + + +@requires_testing_data +def test_egi_mff_bad_xml(tmp_path): + """Test that corrupt XML files are gracefully handled.""" + pytest.importorskip("defusedxml") + mff_fname = shutil.copytree(egi_mff_fname, tmp_path / "test_egi_bad_xml.mff") + bad_xml = mff_fname / "bad.xml" + bad_xml.write_text("", encoding="utf-8") + # Missing coordinate file + (mff_fname / "coordinates.xml").unlink() + with pytest.warns(RuntimeWarning, match="Could not parse the XML"): + with pytest.warns(RuntimeWarning, match="File coordinates.xml not found"): + raw = read_raw_egi(mff_fname) + # little check that the bad XML doesn't affect the parsing of other xml files + assert "DIN1" in raw.annotations.description diff --git a/mne/io/eximia/__init__.py b/mne/io/eximia/__init__.py index b6cd9403301..2530990e13e 100644 --- a/mne/io/eximia/__init__.py +++ b/mne/io/eximia/__init__.py @@ -1,7 +1,7 @@ """Eximia module for conversion to FIF.""" -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .eximia import read_raw_eximia diff --git a/mne/io/eximia/eximia.py b/mne/io/eximia/eximia.py index af6060f7709..5d21879de32 100644 --- a/mne/io/eximia/eximia.py +++ b/mne/io/eximia/eximia.py @@ -1,18 +1,17 @@ -# Authors: Eric Larson -# Federico Raimondo -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os.path as op +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _file_size, _read_segments_file +from ...utils import _check_fname, fill_doc, logger, verbose, warn from ..base import BaseRaw -from ..utils import _read_segments_file, _file_size -from ..meas_info import create_info -from ...utils import logger, verbose, warn, fill_doc, _check_fname @fill_doc -def read_raw_eximia(fname, preload=False, verbose=None): +def read_raw_eximia(fname, preload=False, verbose=None) -> "RawEximia": """Reader for an eXimia EEG file. Parameters @@ -53,41 +52,52 @@ class RawEximia(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): - fname = str(_check_fname(fname, 'read', True, 'fname')) + fname = str(_check_fname(fname, "read", True, "fname")) data_name = op.basename(fname) - logger.info('Loading %s' % data_name) + logger.info(f"Loading {data_name}") # Create vhdr and vmrk files so that we can use mne_brain_vision2fiff n_chan = 64 - sfreq = 1450. + sfreq = 1450.0 # data are multiplexed int16 - ch_names = ['GateIn', 'Trig1', 'Trig2', 'EOG'] - ch_types = ['stim', 'stim', 'stim', 'eog'] - cals = [0.0015259021896696422, 0.0015259021896696422, - 0.0015259021896696422, 0.3814755474174106] - ch_names += ('Fp1 Fpz Fp2 AF1 AFz AF2 ' - 'F7 F3 F1 Fz F2 F4 F8 ' - 'FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 ' - 'T7 C5 C3 C1 Cz C2 C4 C6 T8 ' - 'TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10 ' - 'P9 P7 P3 P1 Pz P2 P4 P8 ' - 'P10 PO3 POz PO4 O1 Oz O2 Iz'.split()) + ch_names = ["GateIn", "Trig1", "Trig2", "EOG"] + ch_types = ["stim", "stim", "stim", "eog"] + cals = [ + 0.0015259021896696422, + 0.0015259021896696422, + 0.0015259021896696422, + 0.3814755474174106, + ] + ch_names += ( + "Fp1 Fpz Fp2 AF1 AFz AF2 " + "F7 F3 F1 Fz F2 F4 F8 " + "FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 " + "T7 C5 C3 C1 Cz C2 C4 C6 T8 " + "TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10 " + "P9 P7 P3 P1 Pz P2 P4 P8 " + "P10 PO3 POz PO4 O1 Oz O2 Iz".split() + ) n_eeg = len(ch_names) - len(cals) cals += [0.07629510948348212] * n_eeg - ch_types += ['eeg'] * n_eeg + ch_types += ["eeg"] * n_eeg assert len(ch_names) == n_chan info = create_info(ch_names, sfreq, ch_types) n_bytes = _file_size(fname) n_samples, extra = divmod(n_bytes, (n_chan * 2)) if extra != 0: - warn('Incorrect number of samples in file (%s), the file is ' - 'likely truncated' % (n_samples,)) - for ch, cal in zip(info['chs'], cals): - ch['cal'] = cal - super(RawEximia, self).__init__( - info, preload=preload, last_samps=(n_samples - 1,), - filenames=[fname], orig_format='short') + warn( + f"Incorrect number of samples in file ({n_samples}), the file is likely" + " truncated" + ) + for ch, cal in zip(info["chs"], cals): + ch["cal"] = cal + super().__init__( + info, + preload=preload, + last_samps=(n_samples - 1,), + filenames=[fname], + orig_format="short", + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" - _read_segments_file( - self, data, idx, fi, start, stop, cals, mult, dtype=' -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. from numpy.testing import assert_array_equal from scipy import io as sio +from mne.datasets.testing import data_path, requires_testing_data from mne.io import read_raw_eximia from mne.io.tests.test_raw import _test_raw_reader -from mne.datasets.testing import data_path, requires_testing_data testing_path = data_path(download=False) @@ -17,28 +17,34 @@ def test_eximia_nxe(): """Test reading Eximia NXE files.""" fname = testing_path / "eximia" / "test_eximia.nxe" raw = read_raw_eximia(fname, preload=True) - assert 'RawEximia' in repr(raw) - _test_raw_reader(read_raw_eximia, fname=fname, - test_scaling=False, # XXX probably a scaling problem - ) + assert "RawEximia" in repr(raw) + _test_raw_reader( + read_raw_eximia, + fname=fname, + test_scaling=False, # XXX probably a scaling problem + ) fname_mat = testing_path / "eximia" / "test_eximia.mat" mc = sio.loadmat(fname_mat) - m_data = mc['data'] - m_header = mc['header'] + m_data = mc["data"] + m_header = mc["header"] assert raw._data.shape == m_data.shape - assert m_header['Fs'][0, 0][0, 0] == raw.info['sfreq'] - m_names = [x[0][0] for x in m_header['label'][0, 0]] + assert m_header["Fs"][0, 0][0, 0] == raw.info["sfreq"] + m_names = [x[0][0] for x in m_header["label"][0, 0]] m_names = list( - map(lambda x: x.replace('GATE', 'GateIn').replace('TRIG', 'Trig'), - m_names)) + map(lambda x: x.replace("GATE", "GateIn").replace("TRIG", "Trig"), m_names) + ) assert raw.ch_names == m_names - m_ch_types = [x[0][0] for x in m_header['chantype'][0, 0]] + m_ch_types = [x[0][0] for x in m_header["chantype"][0, 0]] m_ch_types = list( - map(lambda x: x.replace('unknown', 'stim').replace('trigger', 'stim'), - m_ch_types)) - types_dict = {2: 'eeg', 3: 'stim', 202: 'eog'} - ch_types = [types_dict[raw.info['chs'][x]['kind']] - for x in range(len(raw.ch_names))] + map( + lambda x: x.replace("unknown", "stim").replace("trigger", "stim"), + m_ch_types, + ) + ) + types_dict = {2: "eeg", 3: "stim", 202: "eog"} + ch_types = [ + types_dict[raw.info["chs"][x]["kind"]] for x in range(len(raw.ch_names)) + ] assert ch_types == m_ch_types assert_array_equal(m_data, raw._data) diff --git a/mne/io/eyelink/__init__.py b/mne/io/eyelink/__init__.py new file mode 100644 index 00000000000..2d5e5290b22 --- /dev/null +++ b/mne/io/eyelink/__init__.py @@ -0,0 +1,7 @@ +"""Module for loading Eye-Tracker data.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .eyelink import read_raw_eyelink diff --git a/mne/io/eyelink/_utils.py b/mne/io/eyelink/_utils.py new file mode 100644 index 00000000000..2539baef038 --- /dev/null +++ b/mne/io/eyelink/_utils.py @@ -0,0 +1,1032 @@ +"""Helper functions for reading eyelink ASCII files.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re +from datetime import datetime, timedelta, timezone + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ...annotations import Annotations +from ...utils import _check_pandas_installed, logger, warn + +EYELINK_COLS = { + "timestamp": ("time",), + "pos": { + "left": ("xpos_left", "ypos_left", "pupil_left"), + "right": ("xpos_right", "ypos_right", "pupil_right"), + }, + "velocity": { + "left": ("xvel_left", "yvel_left"), + "right": ("xvel_right", "yvel_right"), + }, + "resolution": ("xres", "yres"), + "input": ("DIN",), + "remote": ("x_head", "y_head", "distance"), + "block_num": ("block",), + "eye_event": ("eye", "time", "end_time", "duration"), + "fixation": ("fix_avg_x", "fix_avg_y", "fix_avg_pupil_size"), + "saccade": ( + "sacc_start_x", + "sacc_start_y", + "sacc_end_x", + "sacc_end_y", + "sacc_visual_angle", + "peak_velocity", + ), + "messages": ("time", "offset", "event_msg"), +} + + +def _parse_eyelink_ascii( + fname, find_overlaps=True, overlap_threshold=0.05, apply_offsets=False +): + # ======================== Parse ASCII File ========================= + raw_extras = dict() + raw_extras["dt"] = _get_recording_datetime(fname) + data_blocks: list[dict] = _parse_recording_blocks(fname) + _validate_data(data_blocks) + + # ======================== Create DataFrames ======================== + # Process each block individually, then combine + processed_blocks = _create_dataframes(data_blocks, apply_offsets) + raw_extras["dfs"], ch_names = _combine_block_dataframes(processed_blocks) + del processed_blocks # free memory + for block in data_blocks: + del block["samples"] # remove samples from block to save memory + + first_block = data_blocks[0] + raw_extras["pos_unit"] = first_block["info"]["unit"] + raw_extras["sfreq"] = first_block["info"]["sfreq"] + raw_extras["first_timestamp"] = first_block["info"]["first_timestamp"] + raw_extras["n_blocks"] = len(data_blocks) + # if HREF data, convert to radians + if raw_extras["pos_unit"] == "HREF": + raw_extras["dfs"]["samples"] = _convert_href_samples( + raw_extras["dfs"]["samples"] + ) + # fill in times between recording blocks with BAD_ACQ_SKIP + if raw_extras["n_blocks"] > 1: + logger.info( + f"There are {raw_extras['n_blocks']} recording blocks in this file." + f" Times between blocks will be annotated with BAD_ACQ_SKIP." + ) + raw_extras["dfs"]["samples"] = _adjust_times( + raw_extras["dfs"]["samples"], raw_extras["sfreq"] + ) + # Convert timestamps to seconds + for df in raw_extras["dfs"].values(): + df = _convert_times(df, raw_extras["first_timestamp"]) + # Find overlaps between left and right eye events + if find_overlaps: + for key in raw_extras["dfs"]: + if key not in ["blinks", "fixations", "saccades"]: + continue + raw_extras["dfs"][key] = _find_overlaps( + raw_extras["dfs"][key], max_time=overlap_threshold + ) + # ======================== Info for BaseRaw ======================== + eye_ch_data = raw_extras["dfs"]["samples"][ch_names].to_numpy().T + info = _create_info(ch_names, raw_extras) + + return eye_ch_data, info, raw_extras + + +def _parse_recording_blocks(fname): + """Parse Eyelink ASCII file. + + Eyelink samples occur within START and END blocks. + samples lines start with a posix-like string, + and contain eyetracking sample info. Event Lines + start with an upper case string and contain info + about occular events (i.e. blink/saccade), or experiment + messages sent by the stimulus presentation software. + """ + with fname.open() as file: + data_dict = dict() + data_dict["sample_lines"] = [] + data_dict["event_lines"] = { + "START": [], + "END": [], + "SAMPLES": [], + "EVENTS": [], + "ESACC": [], + "EBLINK": [], + "EFIX": [], + "MSG": [], + "INPUT": [], + "BUTTON": [], + "PUPIL": [], + } + data_blocks = [] + + is_recording_block = False + for line in file: + if line.startswith("START"): # start of recording block + is_recording_block = True + # Initialize container for new block data + current_block = { + "samples": [], + "events": { + "START": [], + "END": [], + "SAMPLES": [], + "EVENTS": [], + "ESACC": [], + "EBLINK": [], + "EFIX": [], + "MSG": [], + "INPUT": [], + "BUTTON": [], + "PUPIL": [], + }, + "info": None, + } + if is_recording_block: + tokens = line.split() + if not tokens: + continue # skip empty lines + if tokens[0][0].isnumeric(): # Samples + current_block["samples"].append(tokens) + elif tokens[0] in current_block["events"].keys(): + if _is_sys_msg(line): + continue # system messages don't need to be parsed. + event_key, event_info = tokens[0], tokens[1:] + current_block["events"][event_key].append(event_info) + if tokens[0] == "END": # end of recording block + current_block["info"] = _get_metadata(current_block) + data_blocks.append(current_block) + is_recording_block = False + if not data_blocks: # no samples parsed + raise ValueError(f"Couldn't find any samples in {fname}") + return data_blocks + + +def _validate_data(data_blocks: list): + """Check the incoming data for some known problems that can occur.""" + # Detect the datatypes that are in file. + units = [] + pupil_units = [] + modes = [] + eyes = [] + sfreqs = [] + for block in data_blocks: + units.append(block["info"]["unit"]) + modes.append(block["info"]["tracking_mode"]) + eyes.append(block["info"]["eye"]) + sfreqs.append(block["info"]["sfreq"]) + pupil_units.append(block["info"]["pupil_unit"]) + if "GAZE" in units: + logger.info( + "Pixel coordinate data detected." + "Pass `scalings=dict(eyegaze=1e3)` when using plot" + " method to make traces more legible." + ) + if "HREF" in units: + logger.info("Head-referenced eye-angle (HREF) data detected.") + elif "PUPIL" in units: + warn("Raw eyegaze coordinates detected. Analyze with caution.") + if "AREA" in pupil_units: + logger.info("Pupil-size area detected.") + elif "DIAMETER" in pupil_units: + logger.info("Pupil-size diameter detected.") + + if len(set(modes)) > 1: + warn( + "This recording switched between monocular and binocular tracking. " + f"In order of acquisition blocks, tracking modes were {modes}. Data " + "for the missing eye during monocular tracking will be filled with NaN." + ) + # Monocular tracking but switched between left/right eye + elif len(set(eyes)) > 1: + warn( + "The eye being tracked changed during the recording. " + f"In order of acquisition blocks, they were {eyes}. " + "Missing data for each eye will be filled with NaN." + ) + if len(set(sfreqs)) > 1: + raise RuntimeError( + "The sampling frequency changed during the recording. " + f"In order of acquisition blocks, they were {sfreqs}. " + "please notify MNE-Python developers" + ) # pragma: no cover + if len(set(units)) > 1: + raise RuntimeError( + "The unit of measurement for x/y coordinates changed during the recording. " + f"In order of acquisition blocks, they were {units}. " + "please notify MNE-Python developers" + ) # pragma: no cover + + +def _get_recording_datetime(fname): + """Create a datetime object from the datetime in ASCII file.""" + # create a timezone object for UTC + tz = timezone(timedelta(hours=0)) + in_header = False + with fname.open() as file: + for line in file: + # header lines are at top of file and start with ** + if line.startswith("**"): + in_header = True + if in_header: + if line.startswith("** DATE:"): + dt_str = line.replace("** DATE:", "").strip() + fmt = "%a %b %d %H:%M:%S %Y" + # Eyelink measdate timestamps are timezone naive. + # Force datetime to be in UTC. + # Even though dt is probably in local time zone. + try: + dt_naive = datetime.strptime(dt_str, fmt) + except ValueError: + # date string is missing or in an unexpected format + logger.info( + "Could not detect date from file with date entry: " + f"{repr(dt_str)}" + ) + return + else: + return dt_naive.replace(tzinfo=tz) # make it dt aware + return + + +def _get_metadata(data_block: dict): + """Get tracking mode, sfreq, eye tracked, pupil metric, etc. for one data block.""" + meta_data = dict() + rec_info = data_block["events"]["SAMPLES"][0] + meta_data["unit"] = rec_info[0] + + # If the file doesn't have pupil data, i'm not sure if there will be any PUPIL info? + if not data_block["events"]["PUPIL"]: + ps_unit = None + else: + ps_unit = data_block["events"]["PUPIL"][0][0] + meta_data["pupil_unit"] = ps_unit + if ("LEFT" in rec_info) and ("RIGHT" in rec_info): + meta_data["tracking_mode"] = "binocular" + meta_data["eye"] = "both" + else: + meta_data["tracking_mode"] = "monocular" + meta_data["eye"] = rec_info[1].lower() + meta_data["first_timestamp"] = float(data_block["events"]["START"][0][0]) + meta_data["last_timestamp"] = float(data_block["events"]["END"][0][0]) + meta_data["sfreq"] = _get_sfreq_from_ascii(rec_info) + meta_data["rec_info"] = data_block["events"]["SAMPLES"][0] + return meta_data + + +def _is_sys_msg(line): + """Flag lines from eyelink ASCII file that contain a known system message. + + Some lines in eyelink files are system outputs usually + only meant for Eyelinks DataViewer application to read. + These shouldn't need to be parsed. + + Parameters + ---------- + line : string + single line from Eyelink asc file + + Returns + ------- + bool : + True if any of the following strings that are + known to indicate a system message are in the line + + Notes + ----- + Examples of eyelink system messages: + - ;Sess:22Aug22;Tria:1;Tri2:False;ESNT:182BFE4C2F4; + - ;NTPT:182BFE55C96;SMSG:__NTP_CLOCK_SYNC__;DIFF:-1; + - !V APLAYSTART 0 1 library/audio + - !MODE RECORD CR 500 2 1 R + """ + return "!V" in line or "!MODE" in line or ";" in line + + +def _get_sfreq_from_ascii(rec_info): + """Get sampling frequency from Eyelink ASCII file. + + Parameters + ---------- + rec_info : list + the first list in raw_extras["event_lines"]['SAMPLES']. + The sfreq occurs after RATE: i.e. [..., RATE, 1000, ...]. + + Returns + ------- + sfreq : float + """ + return float(rec_info[rec_info.index("RATE") + 1]) + + +def _create_dataframes(data_blocks, apply_offsets): + """Create and process pandas DataFrames for each recording block. + + Processes each block individually with its own column structure, + then returns a list of processed block dataframes. + """ + processed_blocks = [] + + for block_idx, block in enumerate(data_blocks): + # Create dataframes for this block + block_dfs = _create_dataframes_for_block(block, apply_offsets) + + # Infer column names for this specific block + col_names, ch_names = _infer_col_names_for_block(block) + + # Assign column names and set dtypes for this block + block_dfs = _assign_col_names(col_names, block_dfs) + block_dfs = _set_df_dtypes(block_dfs) + + processed_blocks.append( + { + "block_idx": block_idx, + "dfs": block_dfs, + "ch_names": ch_names, + "info": block["info"], + } + ) + return processed_blocks + + +def _create_dataframes_for_block(block, apply_offsets): + """Create pandas.DataFrame for one recording block's samples and events. + + Creates a pandas DataFrame for sample_lines and for each + non-empty key in event_lines for a single recording block. + No column names are assigned at this point. + This also returns the MNE channel names needed to represent this block of data. + """ + pd = _check_pandas_installed() + df_dict = dict() + + # dataframe for samples in this block + if block["samples"]: + df_dict["samples"] = pd.DataFrame(block["samples"]) + df_dict["samples"] = _drop_status_col(df_dict["samples"]) # drop STATUS col + + # dataframe for each type of occular event in this block + for event, label in zip( + ["EFIX", "ESACC", "EBLINK"], ["fixations", "saccades", "blinks"] + ): + if block["events"][event]: # an empty list returns False + df_dict[label] = pd.DataFrame(block["events"][event]) + else: + # Changed this from info to debug level to avoid spamming the log + logger.debug(f"No {label} events found in block") + + # make dataframe for experiment messages in this block + if block["events"]["MSG"]: + msgs = [] + for token in block["events"]["MSG"]: + if apply_offsets and len(token) == 2: + ts, msg = token + offset = np.nan + elif apply_offsets: + ts = token[0] + try: + offset = float(token[1]) + msg = " ".join(str(x) for x in token[2:]) + except ValueError: + offset = np.nan + msg = " ".join(str(x) for x in token[1:]) + else: + ts, offset = token[0], np.nan + msg = " ".join(str(x) for x in token[1:]) + msgs.append([ts, offset, msg]) + df_dict["messages"] = pd.DataFrame(msgs) + + # TODO: Make dataframes for other eyelink events (Buttons) + return df_dict + + +def _infer_col_names_for_block(block: dict) -> tuple[dict[str, list], list]: + """Build column and channel names for data from one Eyelink recording block. + + Returns the expected column names for the sample lines and event + lines for a single recording block. The columns present can vary + between blocks if tracking mode changes. + """ + col_names = {} + block_info = block["info"] + + # initiate the column names for the sample lines + col_names["samples"] = list(EYELINK_COLS["timestamp"]) + col_names["messages"] = list(EYELINK_COLS["messages"]) + + # and for the eye message lines + col_names["blinks"] = list(EYELINK_COLS["eye_event"]) + col_names["fixations"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["fixation"]) + col_names["saccades"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["saccade"]) + + # Get block-specific tracking info + tracking_mode = block_info["tracking_mode"] + eye = block_info["eye"] + rec_info = block["events"]["SAMPLES"][0] # SAMPLES line for this block + + # Recording was either binocular or monocular for this block + if tracking_mode == "monocular": + ch_names = list(EYELINK_COLS["pos"][eye]) + elif tracking_mode == "binocular": + ch_names = list(EYELINK_COLS["pos"]["left"] + EYELINK_COLS["pos"]["right"]) + col_names["samples"].extend(ch_names) + + # The order of these if statements should not be changed. + if "VEL" in rec_info: # If velocity data are reported + if tracking_mode == "monocular": + ch_names.extend(EYELINK_COLS["velocity"][eye]) + col_names["samples"].extend(EYELINK_COLS["velocity"][eye]) + elif tracking_mode == "binocular": + ch_names.extend( + EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"] + ) + col_names["samples"].extend( + EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"] + ) + # if resolution data are reported + if "RES" in rec_info: + ch_names.extend(EYELINK_COLS["resolution"]) + col_names["samples"].extend(EYELINK_COLS["resolution"]) + col_names["fixations"].extend(EYELINK_COLS["resolution"]) + col_names["saccades"].extend(EYELINK_COLS["resolution"]) + # if digital input port values are reported + if "INPUT" in rec_info: + ch_names.extend(EYELINK_COLS["input"]) + col_names["samples"].extend(EYELINK_COLS["input"]) + + # if head target info was reported, add its cols + if "HTARGET" in rec_info: + ch_names.extend(EYELINK_COLS["remote"]) + col_names["samples"].extend(EYELINK_COLS["remote"]) + + return col_names, ch_names + + +def _combine_block_dataframes(processed_blocks: list[dict]): + """Combine dataframes across acquisition blocks. + + Handles cases where blocks have different columns/data in them + (e.g. binocular vs monocular tracking, or switching between the left and right eye). + """ + pd = _check_pandas_installed() + + # Determine unified column structure by collecting all unique column names + # across all acquisition blocks + all_ch_names = [] + all_samples_cols = set() + all_df_types = set() + + for block in processed_blocks: + # The tests assume a certain order of channel names. + # so we can't use a set like we do for the columns. + # bc it randomly orders the channel names. + for ch_name in block["ch_names"]: + if ch_name not in all_ch_names: + all_ch_names.append(ch_name) + if "samples" in block["dfs"]: + all_samples_cols.update(block["dfs"]["samples"].columns) + all_df_types.update(block["dfs"].keys()) + + # The sets randomly ordered the column names. + all_samples_cols = sorted(all_samples_cols) + + # Combine dataframes by type + combined_dfs = {} + + for df_type in all_df_types: + block_dfs = [] + + for block in processed_blocks: + if df_type in block["dfs"]: + # We will update the dfs in-place to conserve memory + block_df = block["dfs"][df_type] + + # For samples dataframes, ensure all have the same columns + if df_type == "samples": + for col in all_samples_cols: + if col not in block_df.columns: + block_df[col] = np.nan + + # Reorder columns + block_df = block_df[all_samples_cols] + + block_dfs.append(block_df) + + if block_dfs: + # Concatenate all blocks for this dataframe type + combined_dfs[df_type] = pd.concat(block_dfs, ignore_index=True) + + # Create recording blocks dataframe from block info + blocks_data = [] + for i, block in enumerate(processed_blocks): + start_time = block["info"]["first_timestamp"] + end_time = block["info"]["last_timestamp"] + blocks_data.append((start_time, end_time, i + 1)) + combined_dfs["recording_blocks"] = pd.DataFrame( + blocks_data, columns=["time", "end_time", "block"] + ) + + return combined_dfs, all_ch_names + + +def _drop_status_col(samples_df): + """Drop STATUS column from samples dataframe. + + see https://github.com/mne-tools/mne-python/issues/11809, and section 4.9.2.1 of + the Eyelink 1000 Plus User Manual, version 1.0.19. We know that the STATUS + column is either 3, 5, 13, or 17 characters long, i.e. "...", ".....", ".C." + """ + status_cols = [] + # we know the first 3 columns will be the time, xpos, ypos + for col in samples_df.columns[3:]: + if samples_df[col][0][0].isnumeric(): + # if the value is numeric, it's not a status column + continue + if len(samples_df[col][0]) in [3, 5, 13, 17]: + status_cols.append(col) + return samples_df.drop(columns=status_cols) + + +def _assign_col_names(col_names, df_dict): + """Assign column names to dataframes. + + Parameters + ---------- + col_names : dict of str to list + Dictionary of column names for each dataframe. + df_dict : dict of str to pandas.DataFrame + Dictionary of dataframes to assign column names to. + """ + skipped_types = [] + for key, df in df_dict.items(): + if key in ("samples", "blinks", "fixations", "saccades", "messages"): + cols = col_names[key] + else: + skipped_types.append(key) + continue + max_cols = len(cols) + if len(df.columns) != len(cols): + if key in ("saccades", "fixations") and len(df.columns) >= 4: + # see https://github.com/mne-tools/mne-python/pull/13357 + logger.debug( + f"{key} events have more columns ({len(df.columns)}) than " + f"expected ({len(cols)}). Using first 4 (eye, time, end_time, " + "duration)." + ) + max_cols = 4 + else: + raise ValueError( + f"Expected the {key} data in this file to have {len(cols)} columns " + f"of data, but got {len(df.columns)}. Expected columns: {cols}." + ) + new_col_names = { + old: new for old, new in zip(df.columns[:max_cols], cols[:max_cols]) + } + df.rename(columns=new_col_names, inplace=True) + logger.debug(f"Skipped assigning column names to {skipped_types} dataframes.") + return df_dict + + +def _set_df_dtypes(df_dict): + from mne.utils import _set_pandas_dtype + + for key, df in df_dict.items(): + if key in ["samples"]: + # convert missing position values to NaN + _set_missing_values(df, df.columns[1:]) + _set_pandas_dtype(df, df.columns, float, verbose="warning") + elif key in ["blinks", "fixations", "saccades"]: + _set_missing_values(df, df.columns[1:]) + _set_pandas_dtype(df, df.columns[1:], float, verbose="warning") + elif key == "messages": + _set_pandas_dtype(df, ["time"], float, verbose="warning") # timestamp + return df_dict + + +def _set_missing_values(df, columns): + """Set missing values to NaN. operates in-place.""" + missing_vals = (".", "MISSING_DATA") + for col in columns: + # we explicitly use numpy instead of pd.replace because it is faster + # if a stim channel (DIN) we should use zero so it can cast to int properly + # in find_events + replacement = 0 if col == "DIN" else np.nan + df[col] = np.where(df[col].isin(missing_vals), replacement, df[col]) + + +def _sort_by_time(df, col="time"): + df.sort_values(col, ascending=True, inplace=True) + df.reset_index(drop=True, inplace=True) + + +def _convert_times(df, first_samp, col="time"): + """Set initial time to 0, converts from ms to seconds in place. + + Parameters + ---------- + df pandas.DataFrame: + One of the dataframes in raw_extras["dfs"] dict. + + first_samp int: + timestamp of the first sample of the recording. This should + be the first sample of the first recording block. + col str (default 'time'): + column name to sort pandas.DataFrame by + + Notes + ----- + Each sample in an Eyelink file has a posix timestamp string. + Subtracts the "first" sample's timestamp from each timestamp. + The "first" sample is inferred to be the first sample of + the first recording block, i.e. the first "START" line. + """ + _sort_by_time(df, col) + for col in df.columns: + if str(col).endswith("time"): # 'time' and 'end_time' cols + df[col] -= first_samp + df[col] /= 1000 + if str(col) in ["duration", "offset"]: + df[col] /= 1000 + return df + + +def _adjust_times( + df, + sfreq, + time_col="time", +): + """Fill missing timestamps if there are multiple recording blocks. + + Parameters + ---------- + df : pandas.DataFrame: + dataframe of the eyetracking data samples, BEFORE + _convert_times() is applied to the dataframe + + sfreq : int | float: + sampling frequency of the data + + time_col : str (default 'time'): + name of column with the timestamps (e.g. 9511881, 9511882, ...) + + Returns + ------- + %(df_return)s + + Notes + ----- + After _parse_recording_blocks, Files with multiple recording blocks will + have missing timestamps for the duration of the period between the blocks. + This would cause the occular annotations (i.e. blinks) to not line up with + the signal. + """ + pd = _check_pandas_installed() + + first, last = df[time_col].iloc[[0, -1]] + step = 1000 / sfreq + df[time_col] = df[time_col].astype(float) + new_times = pd.DataFrame( + np.arange(first, last + step / 2, step), columns=[time_col] + ) + df = pd.merge_asof( + new_times, df, on=time_col, direction="nearest", tolerance=step / 2 + ) + # fix DIN NaN values + if "DIN" in df.columns: + df["DIN"] = df["DIN"].fillna(0) + return df + + +def _find_overlaps(df, max_time=0.05): + """Merge left/right eye events with onset/offset diffs less than max_time. + + Parameters + ---------- + df : pandas.DataFrame + Pandas DataFrame with occular events (fixations, saccades, blinks) + max_time : float (default 0.05) + Time in seconds. Defaults to .05 (50 ms) + + Returns + ------- + DataFrame: %(df_return)s + :class:`pandas.DataFrame` specifying overlapped eye events, if any + + Notes + ----- + The idea is to cumulative sum the boolean values for rows with onset and + offset differences (against the previous row) that are greater than the + max_time. If onset and offset diffs are less than max_time then no_overlap + will become False. Alternatively, if either the onset or offset diff is + greater than max_time, no_overlap becomes True. Cumulatively summing over + these boolean values will leave rows with no_overlap == False unchanged + and hence with the same group number. + """ + pd = _check_pandas_installed() + + if not len(df): + return + df["overlap_start"] = df.sort_values("time")["time"].diff().lt(max_time) + + df["overlap_end"] = df["end_time"].diff().abs().lt(max_time) + + df["no_overlap"] = ~(df["overlap_end"] & df["overlap_start"]) + df["group"] = df["no_overlap"].cumsum() + + # now use groupby on 'group'. If one left and one right eye in group + # the new start/end times are the mean of the two eyes + ovrlp = pd.concat( + [ + pd.DataFrame(g[1].drop(columns="eye").mean()).T + if (len(g[1]) == 2) and (len(g[1].eye.unique()) == 2) + else g[1] # not an overlap, return group unchanged + for g in df.groupby("group") + ] + ) + # overlapped events get a "both" value in the "eye" col + if "eye" in ovrlp.columns: + ovrlp["eye"] = ovrlp["eye"].fillna("both") + else: + ovrlp["eye"] = "both" + tmp_cols = ["overlap_start", "overlap_end", "no_overlap", "group"] + return ovrlp.drop(columns=tmp_cols).reset_index(drop=True) + + +def _convert_href_samples(samples_df): + """Convert HREF eyegaze samples to radians.""" + # grab the xpos and ypos channel names + pos_names = EYELINK_COLS["pos"]["left"][:-1] + EYELINK_COLS["pos"]["right"][:-1] + for col in samples_df.columns: + if col not in pos_names: # 'xpos_left' ... 'ypos_right' + continue + series = _href_to_radian(samples_df[col]) + samples_df[col] = series + return samples_df + + +def _href_to_radian(opposite, f=15_000): + """Convert HREF eyegaze samples to radians. + + Parameters + ---------- + opposite : int + The x or y coordinate in an HREF gaze sample. + f : int (default 15_000) + distance of plane from the eye. Defaults to 15,000 units, which was taken + from the Eyelink 1000 plus user manual. + + Returns + ------- + x or y coordinate in radians + + Notes + ----- + See section 4.4.2.2 in the Eyelink 1000 Plus User Manual + (version 1.0.19) for a detailed description of HREF data. + """ + return np.arcsin(opposite / f) + + +def _create_info(ch_names, raw_extras): + """Create info object for RawEyelink.""" + # assign channel type from ch_name + pos_names = EYELINK_COLS["pos"]["left"][:-1] + EYELINK_COLS["pos"]["right"][:-1] + pupil_names = EYELINK_COLS["pos"]["left"][-1] + EYELINK_COLS["pos"]["right"][-1] + ch_types = [ + "eyegaze" + if ch in pos_names + else "pupil" + if ch in pupil_names + else "stim" + if ch == "DIN" + else "misc" + for ch in ch_names + ] + info = create_info(ch_names, raw_extras["sfreq"], ch_types) + # set correct loc for eyepos and pupil channels + for ch_dict in info["chs"]: + # loc index 3 can indicate left or right eye + if ch_dict["ch_name"].endswith("left"): # [x,y,pupil]_left + ch_dict["loc"][3] = -1 # left eye + elif ch_dict["ch_name"].endswith("right"): # [x,y,pupil]_right + ch_dict["loc"][3] = 1 # right eye + else: + logger.debug( + f"leaving index 3 of loc array as" + f" {ch_dict['loc'][3]} for {ch_dict['ch_name']}" + ) + # loc index 4 can indicate x/y coord + if ch_dict["ch_name"].startswith("x"): + ch_dict["loc"][4] = -1 # x-coord + elif ch_dict["ch_name"].startswith("y"): + ch_dict["loc"][4] = 1 # y-coord + else: + logger.debug( + f"leaving index 4 of loc array as" + f" {ch_dict['loc'][4]} for {ch_dict['ch_name']}" + ) + if raw_extras["pos_unit"] == "HREF": + if ch_dict["ch_name"].startswith(("xpos", "ypos")): + ch_dict["unit"] = FIFF.FIFF_UNIT_RAD + return info + + +def _make_eyelink_annots(df_dict, create_annots, apply_offsets): + """Create Annotations for each df in raw_extras.""" + eye_ch_map = { + "L": ("xpos_left", "ypos_left", "pupil_left"), + "R": ("xpos_right", "ypos_right", "pupil_right"), + "both": ( + "xpos_left", + "ypos_left", + "pupil_left", + "xpos_right", + "ypos_right", + "pupil_right", + ), + } + valid_descs = ["blinks", "saccades", "fixations", "messages"] + msg = ( + "create_annotations must be True or a list containing one or" + f" more of {valid_descs}." + ) + wrong_type = msg + f" Got a {type(create_annots)} instead." + if create_annots is True: + descs = valid_descs + else: + if not isinstance(create_annots, list): + raise TypeError(wrong_type) + for desc in create_annots: + if desc not in valid_descs: + raise ValueError(msg + f" Got '{desc}' instead") + descs = create_annots + + annots = None + for key, df in df_dict.items(): + eye_annot_cond = (key in ["blinks", "fixations", "saccades"]) and (key in descs) + if eye_annot_cond: + onsets = df["time"] + durations = df["duration"] + # Create annotations for both eyes + descriptions = key[:-1] # i.e "blink", "fixation", "saccade" + if key == "blinks": + descriptions = "BAD_" + descriptions + ch_names = df["eye"].map(eye_ch_map).tolist() + this_annot = Annotations( + onset=onsets, + duration=durations, + description=descriptions, + ch_names=ch_names, + ) + elif (key in ["messages"]) and (key in descs): + if apply_offsets: + # If df['offset] is all NaNs, time is not changed + onsets = df["time"] + df["offset"].fillna(0) + else: + onsets = df["time"] + durations = [0] * onsets + descriptions = df["event_msg"] + this_annot = Annotations( + onset=onsets, duration=durations, description=descriptions + ) + else: + continue # TODO make df and annotations for Buttons + if not annots: + annots = this_annot + elif annots: + annots += this_annot + if not annots: + warn(f"Annotations for {descs} were requested but none could be made.") + return + return annots + + +def _make_gap_annots(raw_extras, key="recording_blocks"): + """Create Annotations for gap periods between recording blocks.""" + df = raw_extras["dfs"][key] + onsets = df["end_time"].iloc[:-1] + diffs = df["time"].shift(-1) - df["end_time"] + durations = diffs.iloc[:-1] + descriptions = ["BAD_ACQ_SKIP"] * len(onsets) + return Annotations(onset=onsets, duration=durations, description=descriptions) + + +# ======================== Used by read_eyelink-calibration =========================== + + +def _find_recording_start(lines): + """Return the first START line in an SR Research EyeLink ASCII file. + + Parameters + ---------- + lines: A list of strings, which are The lines in an eyelink ASCII file. + + Returns + ------- + The line that contains the info on the start of the recording. + """ + for line in lines: + if line.startswith("START"): + return line + raise ValueError("Could not find the start of the recording.") + + +def _parse_validation_line(line): + """Parse a single line of eyelink validation data. + + Parameters + ---------- + line: A string containing a line of validation data from an eyelink + ASCII file. + + Returns + ------- + A list of tuples containing the validation data. + """ + tokens = line.split() + xy = tokens[-6].strip("[]").split(",") # e.g. '960, 540' + xy_diff = tokens[-2].strip("[]").split(",") # e.g. '-1.5, -2.8' + vals = [float(v) for v in [*xy, tokens[-4], *xy_diff]] + vals[3] += vals[0] # pos_x + eye_x i.e. 960 + -1.5 + vals[4] += vals[1] # pos_y + eye_y + + return tuple(vals) + + +def _parse_calibration( + lines, screen_size=None, screen_distance=None, screen_resolution=None +): + """Parse the lines in the given list and returns a list of Calibration instances. + + Parameters + ---------- + lines: A list of strings, which are The lines in an eyelink ASCII file. + + Returns + ------- + A list containing one or more Calibration instances, + one for each calibration that was recorded in the eyelink ASCII file + data. + """ + from ...preprocessing.eyetracking.calibration import Calibration + + regex = re.compile(r"\d+") # for finding numeric characters + calibrations = list() + rec_start = float(_find_recording_start(lines).split()[1]) + + for line_number, line in enumerate(lines): + if ( + "!CAL VALIDATION " in line and "ABORTED" not in line + ): # Start of a calibration + tokens = line.split() + model = tokens[4] # e.g. 'HV13' + this_eye = tokens[6].lower() # e.g. 'left' + timestamp = float(tokens[1]) + onset = (timestamp - rec_start) / 1000.0 # in seconds + avg_error = float(line.split("avg.")[0].split()[-1]) # e.g. 0.3 + max_error = float(line.split("max")[0].split()[-1]) # e.g. 0.9 + + n_points = int(regex.search(model).group()) # e.g. 13 + n_points *= 2 if "LR" in line else 1 # one point per eye if "LR" + + # The next n_point lines contain the validation data + points = [] + line_idx = line_number + 1 + read_points = 0 + while read_points < n_points and line_idx < len(lines): + subline = lines[line_idx].strip() + line_idx += 1 + + if not subline or "!CAL VALIDATION" in subline: + continue # for bino mode, skip the second eye's validation summary + + subline_eye = subline.split("at")[0].split()[-1].lower() # e.g. 'left' + if subline_eye != this_eye: + continue # skip the validation lines for the other eye + point_info = _parse_validation_line(subline) + points.append(point_info) + read_points += 1 + # Convert the list of validation data into a numpy array + positions = np.array([point[:2] for point in points]) + offsets = np.array([point[2] for point in points]) + gaze = np.array([point[3:] for point in points]) + # create the Calibration instance + calibration = Calibration( + onset=onset, + model=model, + eye=this_eye, + avg_error=avg_error, + max_error=max_error, + positions=positions, + offsets=offsets, + gaze=gaze, + screen_size=screen_size, + screen_distance=screen_distance, + screen_resolution=screen_resolution, + ) + calibrations.append(calibration) + return calibrations diff --git a/mne/io/eyelink/eyelink.py b/mne/io/eyelink/eyelink.py new file mode 100644 index 00000000000..192a5555465 --- /dev/null +++ b/mne/io/eyelink/eyelink.py @@ -0,0 +1,133 @@ +"""SR Research Eyelink Load Function.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +from ...utils import ( + _check_fname, + fill_doc, + logger, + verbose, +) +from ..base import BaseRaw +from ._utils import _make_eyelink_annots, _make_gap_annots, _parse_eyelink_ascii + + +@fill_doc +def read_raw_eyelink( + fname, + *, + create_annotations=True, + apply_offsets=False, + find_overlaps=False, + overlap_threshold=0.05, + verbose=None, +) -> "RawEyelink": + """Reader for an Eyelink ``.asc`` file. + + Parameters + ---------- + %(eyelink_fname)s + %(eyelink_create_annotations)s + %(eyelink_apply_offsets)s + %(eyelink_find_overlaps)s + %(eyelink_overlap_threshold)s + %(verbose)s + + Returns + ------- + raw : instance of RawEyelink + A Raw object containing eyetracker data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + It is common for SR Research Eyelink eye trackers to only record data during trials. + To avoid frequent data discontinuities and to ensure that the data is continuous + so that it can be aligned with EEG and MEG data (if applicable), this reader will + preserve the times between recording trials and annotate them with + ``'BAD_ACQ_SKIP'``. + """ + fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname") + + raw_eyelink = RawEyelink( + fname, + create_annotations=create_annotations, + apply_offsets=apply_offsets, + find_overlaps=find_overlaps, + overlap_threshold=overlap_threshold, + verbose=verbose, + ) + return raw_eyelink + + +@fill_doc +class RawEyelink(BaseRaw): + """Raw object from an XXX file. + + Parameters + ---------- + %(eyelink_fname)s + %(eyelink_create_annotations)s + %(eyelink_apply_offsets)s + %(eyelink_find_overlaps)s + %(eyelink_overlap_threshold)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__( + self, + fname, + *, + create_annotations=True, + apply_offsets=False, + find_overlaps=False, + overlap_threshold=0.05, + verbose=None, + ): + logger.info(f"Loading {fname}") + + fname = Path(fname) + + # ======================== Parse ASCII file ========================== + eye_ch_data, info, raw_extras = _parse_eyelink_ascii( + fname, find_overlaps, overlap_threshold, apply_offsets + ) + # ======================== Create Raw Object ========================= + super().__init__( + info, + preload=eye_ch_data, + filenames=[fname], + verbose=verbose, + raw_extras=[raw_extras], + ) + self.set_meas_date(self._raw_extras[0]["dt"]) + + # ======================== Make Annotations ========================= + gap_annots = None + if self._raw_extras[0]["n_blocks"] > 1: + gap_annots = _make_gap_annots(self._raw_extras[0]) + eye_annots = None + if create_annotations: + eye_annots = _make_eyelink_annots( + self._raw_extras[0]["dfs"], create_annotations, apply_offsets + ) + if gap_annots and eye_annots: # set both + self.set_annotations(gap_annots + eye_annots) + elif gap_annots: + self.set_annotations(gap_annots) + elif eye_annots: + self.set_annotations(eye_annots) + else: + logger.info("Not creating any annotations") diff --git a/mne/io/eyelink/tests/__init__.py b/mne/io/eyelink/tests/__init__.py new file mode 100644 index 00000000000..04c673b4e4d --- /dev/null +++ b/mne/io/eyelink/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/eyelink/tests/test_eyelink.py b/mne/io/eyelink/tests/test_eyelink.py new file mode 100644 index 00000000000..22191a4d6a4 --- /dev/null +++ b/mne/io/eyelink/tests/test_eyelink.py @@ -0,0 +1,485 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from mne import find_events +from mne._fiff.constants import FIFF +from mne._fiff.pick import _DATA_CH_TYPES_SPLIT +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_eyelink +from mne.io.eyelink._utils import _adjust_times, _find_overlaps +from mne.io.tests.test_raw import _test_raw_reader +from mne.utils import _record_warnings + +pd = pytest.importorskip("pandas") + +MAPPING = { + "left": ["xpos_left", "ypos_left", "pupil_left"], + "right": ["xpos_right", "ypos_right", "pupil_right"], + "both": [ + "xpos_left", + "ypos_left", + "pupil_left", + "xpos_right", + "ypos_right", + "pupil_right", + ], +} + +testing_path = data_path(download=False) +fname = testing_path / "eyetrack" / "test_eyelink.asc" +fname_href = testing_path / "eyetrack" / "test_eyelink_HREF.asc" + + +def test_eyetrack_not_data_ch(): + """Eyetrack channels are not data channels.""" + msg = ( + "eyetrack channels are not data channels. Refer to MNE definition" + " of data channels in the glossary section of the documentation." + ) + assert "eyegaze" not in _DATA_CH_TYPES_SPLIT, msg + assert "pupil" not in _DATA_CH_TYPES_SPLIT, msg + + +@requires_testing_data +@pytest.mark.parametrize( + "fname, create_annotations, find_overlaps, apply_offsets", + [ + (fname, False, False, False), + ( + fname, + False, + False, + False, + ), + ( + fname, + True, + False, + False, + ), + ( + fname, + True, + True, + True, + ), + ( + fname, + ["fixations", "saccades", "blinks"], + True, + False, + ), + ], +) +def test_eyelink(fname, create_annotations, find_overlaps, apply_offsets): + """Test reading eyelink asc files.""" + raw = read_raw_eyelink( + fname, + create_annotations=create_annotations, + find_overlaps=find_overlaps, + apply_offsets=apply_offsets, + ) + + # First, tests that shouldn't change based on function arguments + assert raw.info["sfreq"] == 500 # True for this file + assert raw.info["meas_date"].month == 3 + assert raw.info["meas_date"].day == 10 + assert raw.info["meas_date"].year == 2022 + + assert len(raw.info["ch_names"]) == 6 + assert raw.info["chs"][0]["kind"] == FIFF.FIFFV_EYETRACK_CH + assert raw.info["chs"][0]["coil_type"] == FIFF.FIFFV_COIL_EYETRACK_POS + raw.info["chs"][2]["coil_type"] == FIFF.FIFFV_COIL_EYETRACK_PUPIL + + # x_left + assert all(raw.info["chs"][0]["loc"][3:5] == [-1, -1]) + # pupil_left + assert raw.info["chs"][2]["loc"][3] == -1 + assert np.isnan(raw.info["chs"][2]["loc"][4]) + # y_right + assert all(raw.info["chs"][4]["loc"][3:5] == [1, 1]) + assert "RawEyelink" in repr(raw) + + # Test some annotation values for accuracy. + if create_annotations is True and find_overlaps: + orig = raw.info["meas_date"] + df = raw.annotations.to_data_frame() + # Convert annot onset datetimes to seconds, relative to orig_time + df["time_in_sec"] = df["onset"].apply( + lambda x: x.timestamp() - orig.timestamp() + ) + # There is a blink in this data at 8.9 seconds + cond = (df["time_in_sec"] > 8.899) & (df["time_in_sec"] < 8.95) + assert df[cond]["description"].values[0].startswith("BAD_blink") + + # Check that the annotation ch_names are set correctly + assert np.array_equal(raw.annotations[0]["ch_names"], MAPPING["both"]) + + if isinstance(create_annotations, list) and find_overlaps: + # the last pytest parametrize condition should hit this + assert np.array_equal(raw.annotations[0]["ch_names"], MAPPING["both"]) + + +@requires_testing_data +@pytest.mark.parametrize("fname_href", [(fname_href)]) +def test_radian(fname_href): + """Test converting HREF position data to radians.""" + with pytest.warns(RuntimeWarning, match="Annotations for"): + raw = read_raw_eyelink(fname_href, create_annotations=["blinks"]) + # Test channel types + assert raw.get_channel_types() == ["eyegaze", "eyegaze", "pupil"] + + # Test that eyegaze channels have a radian unit + assert raw.info["chs"][0]["unit"] == FIFF.FIFF_UNIT_RAD + assert raw.info["chs"][1]["unit"] == FIFF.FIFF_UNIT_RAD + + # Data in radians should range between -1 and 1 + # Test first channel (xpos_right) + assert raw.get_data()[0].min() > -1 + assert raw.get_data()[0].max() < 1 + + +@requires_testing_data +@pytest.mark.parametrize("fname", [(fname)]) +def test_fill_times(fname): + """Test use of pd.merge_asof in _fill_times. + + We are merging on floating + point values. pd.merge_asof is used so that any differences in floating + point precision between df['samples']['times'] and the times generated + with np.arange don't result in the time columns not merging + correctly - i.e. 1560687.0 and 1560687.000001 should merge. + """ + raw = read_raw_eyelink(fname, create_annotations=False) + sfreq = raw.info["sfreq"] + # just take first 1000 points for testing + df = raw.to_data_frame()[:1000] + # even during blinks, pupil val is 0, so there should be no nans + # in this column + assert not df["pupil_left"].isna().sum() + nan_count = df["pupil_left"].isna().sum() # i.e 0 + df_merged = _adjust_times(df, sfreq) + # If times dont merge correctly, there will be additional rows in + # in df_merged with all nan values + assert df_merged["pupil_left"].isna().sum() == nan_count # i.e. 0 + + +def test_find_overlaps(): + """Test finding overlapping occular events between the left and right eyes. + + In the simulated blink df below, the first two rows + will be considered an overlap because the diff() of both the 'time' and + 'end_time' values is <.05 (50ms). the 3rd and 4th rows will not be + considered an overlap because the diff() of the 'time' values is > .05 + (4.20 - 4.14 = .06). The 5th and 6th rows will not be considered an + overlap because they are both left eye events. + """ + blink_df = pd.DataFrame( + { + "eye": ["L", "R", "L", "R", "L", "L"], + "time": [0.01, 0.04, 4.14, 4.20, 6.50, 6.504], + "end_time": [0.05, 0.08, 4.18, 4.22, 6.60, 6.604], + } + ) + overlap_df = _find_overlaps(blink_df) + assert len(overlap_df["eye"].unique()) == 3 # ['both', 'left', 'right'] + assert len(overlap_df) == 5 # ['both', 'L', 'R', 'L', 'L'] + assert overlap_df["eye"].iloc[0] == "both" + + +@requires_testing_data +@pytest.mark.parametrize("fname", [fname]) +def test_bino_to_mono(tmp_path, fname): + """Test a file that switched from binocular to monocular mid-recording.""" + out_file = tmp_path / "tmp_eyelink.asc" + in_file = Path(fname) + + lines = in_file.read_text("utf-8").splitlines() + # We'll also add some binocular velocity data to increase our testing coverage. + start_idx = [li for li, line in enumerate(lines) if line.startswith("START")][0] + for li, line in enumerate(lines[start_idx:-2], start=start_idx): + tokens = line.split("\t") + event_type = tokens[0] + if event_type == "SAMPLES": + tokens.insert(3, "VEL") + lines[li] = "\t".join(tokens) + elif event_type.isnumeric(): + # fake velocity values for x/y left/right + tokens[4:4] = ["999.1", "999.2", "999.3", "999.4"] + lines[li] = "\t".join(tokens) + end_line = lines[-2] + end_ts = int(end_line.split("\t")[1]) + # Now only left eye data + second_block = [] + new_ts = end_ts + 1 + info = [ + "GAZE", + "LEFT", + "VEL", + "RATE", + "500.00", + "TRACKING", + "CR", + "FILTER", + "2", + ] + start = ["START", f"{new_ts}", "LEFT", "SAMPLES", "EVENTS"] + pupil = ["PUPIL", "DIAMETER"] + samples = ["SAMPLES"] + info + events = ["EVENTS"] + info + second_block.append("\t".join(start) + "\n") + second_block.append("\t".join(pupil) + "\n") + second_block.append("\t".join(samples) + "\n") + second_block.append("\t".join(events) + "\n") + # Some fake data.. # x, y, pupil, velicty x/y status + left = ["960", "540", "0.0", "999.1", "999.2", "..."] + NUM_FAKE_SAMPLES = 4000 + for ii in range(NUM_FAKE_SAMPLES): + ts = new_ts + ii + tokens = [f"{ts}"] + left + second_block.append("\t".join(tokens) + "\n") + # interleave some events into the second block + duration = 500 + blink_ts = new_ts + 500 + end_blink = ["EBLINK", "L", f"{blink_ts}", f"{blink_ts + 50}", "106"] + fix_ts = new_ts + 1500 + end_fix = [ + "EFIX", + "L", + f"{fix_ts}", + f"{fix_ts + duration}", + "1616", + "1025.1", + "580.9", + "1289", + ] + sacc_ts = new_ts + 2500 + end_sacc = [ + "ESACC", + "L", + f"{sacc_ts}", + f"{sacc_ts + duration}", + "52", + "1029.6", + "582.3", + "581.7", + "292.5", + "10.30", + "387", + ] + second_block.append("\t".join(end_blink) + "\n") + second_block.append("\t".join(end_fix) + "\n") + second_block.append("\t".join(end_sacc) + "\n") + end_ts = ts + 1 + end_block = ["END", f"{end_ts}", "SAMPLES", "EVENTS", "RES", "45", "45"] + second_block.append("\t".join(end_block)) + lines += second_block + out_file.write_text("\n".join(lines), encoding="utf-8") + + with pytest.warns( + RuntimeWarning, match="This recording switched between monocular and binocular" + ): + raw = read_raw_eyelink(out_file) + want_channels = [ + "xpos_left", + "ypos_left", + "pupil_left", + "xpos_right", + "ypos_right", + "pupil_right", + "xvel_left", + "yvel_left", + "xvel_right", + "yvel_right", + ] + assert len(set(raw.info["ch_names"]).difference(set(want_channels))) == 0 + + +def _simulate_eye_tracking_data(in_file, out_file): + out_file = Path(out_file) + + new_samples_line = ( + "SAMPLES\tPUPIL\tLEFT\tVEL\tRES\tHTARGET\tRATE\t1000.00" + "\tTRACKING\tCR\tFILTER\t2\tINPUT" + ) + with out_file.open("w") as fp: + in_recording_block = False + events = [] + + for line in Path(in_file).read_text().splitlines(): + if line.startswith("START"): + in_recording_block = True + if in_recording_block: + tokens = line.split() + event_type = tokens[0] + if event_type.isnumeric(): # samples + tokens[4:4] = ["100", "20", "45", "45", "127.0"] # vel, res, DIN + tokens.extend(["1497.0", "5189.0", "512.5", "............."]) + elif event_type in ("EFIX", "ESACC"): + if event_type == "ESACC": + tokens[5:7] = [".", "."] # pretend start pos is unknown + tokens.extend(["45", "45"]) # resolution + elif event_type == "SAMPLES": + tokens[1] = "PUPIL" # simulate raw coordinate data + tokens[3:3] = ["VEL", "RES", "HTARGET"] + tokens.append("INPUT") + elif event_type == "EBLINK": + continue # simulate no blink events + elif event_type == "END": + pass + else: + fp.write(f"{line}\n") + continue + events.append("\t".join(tokens)) + if event_type == "END": + fp.write("\n".join(events) + "\n") + events.clear() + in_recording_block = False + else: + fp.write(f"{line}\n") + + fp.write("START\t7452389\tRIGHT\tSAMPLES\tEVENTS\n") + fp.write(f"{new_samples_line}\n") + + for timestamp in np.arange(7452389, 7453390): # simulate a second block + fp.write( + f"{timestamp}\t-2434.0\t-1760.0\t840.0\t100\t20\t45\t45\t127.0\t" + "...\t1497\t5189\t512.5\t.............\n" + ) + + fp.write("END\t7453390\tRIGHT\tSAMPLES\tEVENTS\n") + + +@requires_testing_data +@pytest.mark.parametrize("fname", [fname_href]) +def test_multi_block_misc_channels(fname, tmp_path): + """Test a file with many edge casses. + + This file has multiple acquisition blocks, each tracking a different eye. + The coordinates are in raw units (not pixels or radians). + It has some misc channels (head position, saccade velocity, etc.) + """ + out_file = tmp_path / "tmp_eyelink.asc" + _simulate_eye_tracking_data(fname, out_file) + + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match="Raw eyegaze coordinates"), + pytest.warns(RuntimeWarning, match="The eye being tracked changed"), + ): + raw = read_raw_eyelink(out_file, apply_offsets=True) + + chs_in_file = [ + "xpos_right", + "ypos_right", + "pupil_right", + "xvel_right", + "yvel_right", + "xres", + "yres", + "DIN", + "x_head", + "y_head", + "distance", + "xpos_left", + "ypos_left", + "pupil_left", + "xvel_left", + "yvel_left", + ] + + assert raw.ch_names == chs_in_file + assert raw.annotations.description[1] == "SYNCTIME" + assert raw.annotations.description[-1] == "BAD_ACQ_SKIP" + assert np.isclose(raw.annotations.onset[-1], 1.001) + assert np.isclose(raw.annotations.duration[-1], 0.1) + + data, times = raw.get_data(return_times=True) + assert not np.isnan(data[0, np.where(times < 1)[0]]).any() + assert np.isnan(data[0, np.logical_and(times > 1, times <= 1.1)]).all() + + # smoke test for reading events with missing samples (should not emit a warning) + find_events(raw, verbose=True) + + +@requires_testing_data +@pytest.mark.parametrize("this_fname", (fname, fname_href)) +def test_basics(this_fname): + """Test basics of reading.""" + _test_raw_reader(read_raw_eyelink, fname=this_fname, test_preloading=False) + + +@requires_testing_data +def test_annotations_without_offset(tmp_path): + """Test read of annotations without offset.""" + out_file = tmp_path / "tmp_eyelink.asc" + + # create fake dataset + with open(fname_href) as file: + lines = file.readlines() + ts = lines[-3].split("\t")[0] + line = f"MSG\t{ts} test string\n" + lines = lines[:-3] + [line] + lines[-3:] + with open(out_file, "w") as file: + file.writelines(lines) + + raw = read_raw_eyelink(out_file, apply_offsets=False) + assert raw.annotations[-1]["description"] == "test string" + onset1 = raw.annotations[-1]["onset"] + assert raw.annotations[1]["description"] == "-2 SYNCTIME" + onset2 = raw.annotations[1]["onset"] + + raw = read_raw_eyelink(out_file, apply_offsets=True) + assert raw.annotations[-1]["description"] == "test string" + assert raw.annotations[1]["description"] == "SYNCTIME" + assert_allclose(raw.annotations[-1]["onset"], onset1) + assert_allclose(raw.annotations[1]["onset"], onset2 - 2 / raw.info["sfreq"]) + + +@requires_testing_data +def test_no_datetime(tmp_path): + """Test reading a file with no datetime.""" + out_file = tmp_path / "tmp_eyelink.asc" + with open(fname) as file: + lines = file.readlines() + # remove the timestamp from the datetime line + lines[1] = lines[1].split(":")[0] + ":" + with open(out_file, "w") as file: + file.writelines(lines) + raw = read_raw_eyelink(out_file) + assert raw.info["meas_date"] is None + # Sanity check that a None meas_date doesn't change annotation times + # First annotation in this file is a fixation at 0.004 seconds + np.testing.assert_allclose(raw.annotations.onset[0], 0.004) + + +@requires_testing_data +def test_href_eye_events(tmp_path): + """Test Parsing file where Eye Event Data option was set to 'HREF'.""" + out_file = tmp_path / "tmp_eyelink.asc" + lines = fname_href.read_text("utf-8").splitlines() + for li, line in enumerate(lines): + if not line.startswith(("ESACC", "EFIX")): + continue + tokens = line.split() + if line.startswith("ESACC"): + href_sacc_vals = ["9999", "9999", "9999", "9999", "99.99", "999"] + tokens[5:5] = href_sacc_vals # add href saccade values + elif line.startswith("EFIX"): + tokens = line.split() + href_fix_vals = ["9999.9", "9999.9", "999"] + tokens[5:3] = href_fix_vals + new_line = "\t".join(tokens) + "\n" + lines[li] = new_line + out_file.write_text("\n".join(lines), encoding="utf-8") + raw = read_raw_eyelink(out_file) + # Just check that we actually parsed the Saccade and Fixation events + assert "saccade" in raw.annotations.description + assert "fixation" in raw.annotations.description diff --git a/mne/io/fieldtrip/__init__.py b/mne/io/fieldtrip/__init__.py index 2085c931925..efdd3d2eda6 100644 --- a/mne/io/fieldtrip/__init__.py +++ b/mne/io/fieldtrip/__init__.py @@ -1,8 +1,5 @@ -# -*- coding: UTF-8 -*- -# Authors: Thomas Hartmann -# Dirk Gütlin -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from .fieldtrip import (read_evoked_fieldtrip, read_epochs_fieldtrip, - read_raw_fieldtrip) +from .fieldtrip import read_evoked_fieldtrip, read_epochs_fieldtrip, read_raw_fieldtrip diff --git a/mne/io/fieldtrip/fieldtrip.py b/mne/io/fieldtrip/fieldtrip.py index 3c7cfb3394c..c8521722003 100644 --- a/mne/io/fieldtrip/fieldtrip.py +++ b/mne/io/fieldtrip/fieldtrip.py @@ -1,20 +1,23 @@ -# -*- coding: UTF-8 -*- -# Authors: Thomas Hartmann -# Dirk Gütlin -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np -from .utils import _create_info, _set_tmin, _create_events, \ - _create_event_metadata, _validate_ft_struct -from ...utils import _check_fname, _import_pymatreader_funcs -from ..array.array import RawArray from ...epochs import EpochsArray from ...evoked import EvokedArray +from ...utils import _check_fname, _import_pymatreader_funcs +from ..array._array import RawArray +from .utils import ( + _create_event_metadata, + _create_events, + _create_info, + _set_tmin, + _validate_ft_struct, +) -def read_raw_fieldtrip(fname, info, data_name='data'): +def read_raw_fieldtrip(fname, info, data_name="data") -> RawArray: """Load continuous (raw) data from a FieldTrip preprocessing structure. This function expects to find single trial raw data (FT_DATATYPE_RAW) in @@ -49,12 +52,10 @@ def read_raw_fieldtrip(fname, info, data_name='data'): -------- mne.io.Raw : Documentation of attributes and methods of RawArray. """ - read_mat = _import_pymatreader_funcs('FieldTrip I/O') - fname = _check_fname(fname, overwrite='read', must_exist=True) + read_mat = _import_pymatreader_funcs("FieldTrip I/O") + fname = _check_fname(fname, overwrite="read", must_exist=True) - ft_struct = read_mat(fname, - ignore_fields=['previous'], - variable_names=[data_name]) + ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name]) # load data and set ft_struct to the heading dictionary ft_struct = ft_struct[data_name] @@ -62,7 +63,7 @@ def read_raw_fieldtrip(fname, info, data_name='data'): _validate_ft_struct(ft_struct) info = _create_info(ft_struct, info) # create info structure - data = np.array(ft_struct['trial']) # create the main data array + data = np.array(ft_struct["trial"]) # create the main data array if data.ndim > 2: data = np.squeeze(data) @@ -71,15 +72,17 @@ def read_raw_fieldtrip(fname, info, data_name='data'): data = data[np.newaxis, ...] if data.ndim != 2: - raise RuntimeError('The data you are trying to load does not seem to ' - 'be raw data') + raise RuntimeError( + "The data you are trying to load does not seem to be raw data" + ) raw = RawArray(data, info) # create an MNE RawArray return raw -def read_epochs_fieldtrip(fname, info, data_name='data', - trialinfo_column=0): +def read_epochs_fieldtrip( + fname, info, data_name="data", trialinfo_column=0 +) -> EpochsArray: """Load epoched data from a FieldTrip preprocessing structure. This function expects to find epoched data in the structure data_name is @@ -114,10 +117,8 @@ def read_epochs_fieldtrip(fname, info, data_name='data', epochs : instance of EpochsArray An EpochsArray containing the loaded data. """ - read_mat = _import_pymatreader_funcs('FieldTrip I/O') - ft_struct = read_mat(fname, - ignore_fields=['previous'], - variable_names=[data_name]) + read_mat = _import_pymatreader_funcs("FieldTrip I/O") + ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name]) # load data and set ft_struct to the heading dictionary ft_struct = ft_struct[data_name] @@ -125,7 +126,7 @@ def read_epochs_fieldtrip(fname, info, data_name='data', _validate_ft_struct(ft_struct) info = _create_info(ft_struct, info) # create info structure - data = np.array(ft_struct['trial']) # create the epochs data array + data = np.array(ft_struct["trial"]) # create the epochs data array events = _create_events(ft_struct, trialinfo_column) if events is not None: metadata = _create_event_metadata(ft_struct) @@ -133,13 +134,13 @@ def read_epochs_fieldtrip(fname, info, data_name='data', metadata = None tmin = _set_tmin(ft_struct) # create start time - epochs = EpochsArray(data=data, info=info, tmin=tmin, - events=events, metadata=metadata, proj=False) + epochs = EpochsArray( + data=data, info=info, tmin=tmin, events=events, metadata=metadata, proj=False + ) return epochs -def read_evoked_fieldtrip(fname, info, comment=None, - data_name='data'): +def read_evoked_fieldtrip(fname, info, comment=None, data_name="data"): """Load evoked data from a FieldTrip timelocked structure. This function expects to find timelocked data in the structure data_name is @@ -171,16 +172,14 @@ def read_evoked_fieldtrip(fname, info, comment=None, evoked : instance of EvokedArray An EvokedArray containing the loaded data. """ - read_mat = _import_pymatreader_funcs('FieldTrip I/O') - ft_struct = read_mat(fname, - ignore_fields=['previous'], - variable_names=[data_name]) + read_mat = _import_pymatreader_funcs("FieldTrip I/O") + ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name]) ft_struct = ft_struct[data_name] _validate_ft_struct(ft_struct) info = _create_info(ft_struct, info) # create info structure - data_evoked = ft_struct['avg'] # create evoked data + data_evoked = ft_struct["avg"] # create evoked data evoked = EvokedArray(data_evoked, info, comment=comment) return evoked diff --git a/mne/io/fieldtrip/tests/__init__.py b/mne/io/fieldtrip/tests/__init__.py index 047bd9a9574..04c673b4e4d 100644 --- a/mne/io/fieldtrip/tests/__init__.py +++ b/mne/io/fieldtrip/tests/__init__.py @@ -1,5 +1,3 @@ -# -*- coding: UTF-8 -*- -# Authors: Thomas Hartmann -# Dirk Gütlin -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/fieldtrip/tests/helpers.py b/mne/io/fieldtrip/tests/helpers.py index 076c7f9053a..0a0f6671d6b 100644 --- a/mne/io/fieldtrip/tests/helpers.py +++ b/mne/io/fieldtrip/tests/helpers.py @@ -1,58 +1,87 @@ -# -*- coding: UTF-8 -*- -# Authors: Thomas Hartmann -# Dirk Gütlin -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause -from functools import partial +# Copyright the MNE-Python contributors. + import os +from functools import partial import numpy as np import mne from mne.utils import object_diff - -info_ignored_fields = ('file_id', 'hpi_results', 'hpi_meas', 'meas_id', - 'meas_date', 'highpass', 'lowpass', 'subject_info', - 'hpi_subsystem', 'experimenter', 'description', - 'proj_id', 'proj_name', 'line_freq', 'gantry_angle', - 'dev_head_t', 'bads', 'ctf_head_t', 'dev_ctf_t', - 'dig') - -ch_ignore_fields = ('logno', 'cal', 'range', 'scanno', 'coil_type', 'kind', - 'loc', 'coord_frame', 'unit') - -info_long_fields = ('hpi_meas', 'projs') - -system_to_reader_fn_dict = {'neuromag306': mne.io.read_raw_fif, - 'CNT': partial(mne.io.read_raw_cnt), - 'CTF': partial(mne.io.read_raw_ctf, - clean_names=True), - 'BTI': partial(mne.io.read_raw_bti, - head_shape_fname=None, - rename_channels=False, - sort_by_ch_name=False), - 'EGI': mne.io.read_raw_egi, - 'eximia': mne.io.read_raw_eximia} - -ignore_channels_dict = {'BTI': ['MUz', 'MLx', 'MLy', 'MUx', 'MUy', 'MLz']} - -drop_extra_chans_dict = {'EGI': ['STI 014', 'DIN1', 'DIN3', - 'DIN7', 'DIN4', 'DIN5', 'DIN2'], - 'eximia': ['GateIn', 'Trig1', 'Trig2']} - -system_decimal_accuracy_dict = {'CNT': 2} - -pandas_not_found_warning_msg = 'The Pandas library is not installed. Not ' \ - 'returning the original trialinfo matrix as ' \ - 'metadata.' +info_ignored_fields = ( + "file_id", + "hpi_results", + "hpi_meas", + "meas_id", + "meas_date", + "highpass", + "lowpass", + "subject_info", + "hpi_subsystem", + "experimenter", + "description", + "proj_id", + "proj_name", + "line_freq", + "gantry_angle", + "dev_head_t", + "bads", + "ctf_head_t", + "dev_ctf_t", + "dig", +) + +ch_ignore_fields = ( + "logno", + "cal", + "range", + "scanno", + "coil_type", + "kind", + "loc", + "coord_frame", + "unit", +) + +info_long_fields = ("hpi_meas", "projs") + +system_to_reader_fn_dict = { + "neuromag306": mne.io.read_raw_fif, + "CNT": partial(mne.io.read_raw_cnt), + "CTF": partial(mne.io.read_raw_ctf, clean_names=True), + "BTI": partial( + mne.io.read_raw_bti, + head_shape_fname=None, + rename_channels=False, + sort_by_ch_name=False, + ), + "EGI": mne.io.read_raw_egi, + "eximia": mne.io.read_raw_eximia, +} + +ignore_channels_dict = {"BTI": ["MUz", "MLx", "MLy", "MUx", "MUy", "MLz"]} + +drop_extra_chans_dict = { + "EGI": ["STI 014", "DIN1", "DIN3", "DIN7", "DIN4", "DIN5", "DIN2"], + "eximia": ["GateIn", "Trig1", "Trig2"], +} + +system_decimal_accuracy_dict = {"CNT": 2} + +pandas_not_found_warning_msg = ( + "The Pandas library is not installed. Not " + "returning the original trialinfo matrix as " + "metadata." +) testing_path = mne.datasets.testing.data_path(download=False) def _remove_ignored_ch_fields(info): - if 'chs' in info: - for cur_ch in info['chs']: + if "chs" in info: + for cur_ch in info["chs"]: for cur_field in ch_ignore_fields: if cur_field in cur_ch: del cur_ch[cur_field] @@ -80,8 +109,10 @@ def get_data_paths(system): def get_cfg_local(system): """Return cfg_local field for the system.""" from pymatreader import read_mat - cfg_local = read_mat(os.path.join(get_data_paths(system), 'raw_v7.mat'), - ['cfg_local'])['cfg_local'] + + cfg_local = read_mat( + os.path.join(get_data_paths(system), "raw_v7.mat"), ["cfg_local"] + )["cfg_local"] return cfg_local @@ -90,12 +121,12 @@ def get_raw_info(system): """Return the info dict of the raw data.""" cfg_local = get_cfg_local(system) - raw_data_file = os.path.join(testing_path, cfg_local['file_name']) + raw_data_file = os.path.join(testing_path, cfg_local["file_name"]) reader_function = system_to_reader_fn_dict[system] info = reader_function(raw_data_file, preload=False).info with info._unlock(): - info['comps'] = [] + info["comps"] = [] return info @@ -103,23 +134,23 @@ def get_raw_data(system, drop_extra_chs=False): """Find, load and process the raw data.""" cfg_local = get_cfg_local(system) - raw_data_file = os.path.join(testing_path, cfg_local['file_name']) + raw_data_file = os.path.join(testing_path, cfg_local["file_name"]) reader_function = system_to_reader_fn_dict[system] raw_data = reader_function(raw_data_file, preload=True) - crop = min(cfg_local['crop'], np.max(raw_data.times)) - if system == 'eximia': - crop -= 0.5 * (1.0 / raw_data.info['sfreq']) + crop = min(cfg_local["crop"], np.max(raw_data.times)) + if system == "eximia": + crop -= 0.5 * (1.0 / raw_data.info["sfreq"]) raw_data.crop(0, crop) - raw_data.del_proj('all') + raw_data.del_proj("all") with raw_data.info._unlock(): - raw_data.info['comps'] = [] - raw_data.drop_channels(cfg_local['removed_chan_names']) + raw_data.info["comps"] = [] + raw_data.drop_channels(cfg_local["removed_chan_names"]) - if system in ['EGI']: + if system in ["EGI"]: raw_data._data[0:-1, :] = raw_data._data[0:-1, :] * 1e6 - if system in ['CNT']: + if system in ["CNT"]: raw_data._data = raw_data._data * 1e6 if system in ignore_channels_dict: @@ -136,29 +167,32 @@ def get_epochs(system): cfg_local = get_cfg_local(system) raw_data = get_raw_data(system) - if cfg_local['eventtype'] in raw_data.ch_names: - stim_channel = cfg_local['eventtype'] + if cfg_local["eventtype"] in raw_data.ch_names: + stim_channel = cfg_local["eventtype"] else: - stim_channel = 'STI 014' + stim_channel = "STI 014" - if system == 'CNT': + if system == "CNT": events, event_id = mne.events_from_annotations(raw_data) events[:, 0] = events[:, 0] + 1 else: - events = mne.find_events(raw_data, stim_channel=stim_channel, - shortest_event=1) + events = mne.find_events(raw_data, stim_channel=stim_channel, shortest_event=1) - if isinstance(cfg_local['eventvalue'], np.ndarray): - event_id = list(cfg_local['eventvalue'].astype('int')) + if isinstance(cfg_local["eventvalue"], np.ndarray): + event_id = list(cfg_local["eventvalue"].astype("int")) else: - event_id = [int(cfg_local['eventvalue'])] + event_id = [int(cfg_local["eventvalue"])] - event_id = [id for id in event_id if id in events[:, 2]] + event_id = [id_ for id_ in event_id if id_ in events[:, 2]] - epochs = mne.Epochs(raw_data, events=events, - event_id=event_id, - tmin=-cfg_local['prestim'], - tmax=cfg_local['poststim'], baseline=None) + epochs = mne.Epochs( + raw_data, + events=events, + event_id=event_id, + tmin=-cfg_local["prestim"], + tmax=cfg_local["poststim"], + baseline=None, + ) return epochs @@ -169,7 +203,7 @@ def get_evoked(system): return epochs.average(picks=np.arange(len(epochs.ch_names))) -def check_info_fields(expected, actual, has_raw_info, ignore_long=True): +def check_info_fields(expected, actual, has_raw_info): """ Check if info fields are equal. @@ -188,12 +222,12 @@ def check_info_fields(expected, actual, has_raw_info, ignore_long=True): # we annoyingly have two ways of representing this, so just always use # an empty list here for obj in (expected, actual): - if obj.get('dig', None) is None: + if obj.get("dig", None) is None: with obj._unlock(): - obj['dig'] = [] + obj["dig"] = [] d = object_diff(actual, expected, allclose=True) - assert d == '', d + assert d == "", d def check_data(expected, actual, system): diff --git a/mne/io/fieldtrip/tests/test_fieldtrip.py b/mne/io/fieldtrip/tests/test_fieldtrip.py index 080ee0a7eda..341a67df7c4 100644 --- a/mne/io/fieldtrip/tests/test_fieldtrip.py +++ b/mne/io/fieldtrip/tests/test_fieldtrip.py @@ -1,25 +1,29 @@ -# -*- coding: UTF-8 -*- -# Authors: Thomas Hartmann -# Dirk Gütlin -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import copy import itertools -import os from contextlib import nullcontext -import pytest import numpy as np +import pytest import mne from mne.datasets import testing from mne.io import read_raw_fieldtrip -from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events from mne.io.fieldtrip.tests.helpers import ( - check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, - pandas_not_found_warning_msg, get_raw_info, check_data, - assert_warning_in_record) + assert_warning_in_record, + check_data, + check_info_fields, + get_data_paths, + get_epochs, + get_evoked, + get_raw_data, + get_raw_info, + pandas_not_found_warning_msg, +) +from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events from mne.io.tests.test_raw import _test_raw_reader from mne.utils import _check_pandas_installed, _record_warnings @@ -27,28 +31,26 @@ # names. # EGI: no calibration done in FT. so data is VERY different -all_systems_raw = ['neuromag306', 'CTF', 'CNT', 'BTI', 'eximia'] -all_systems_epochs = ['neuromag306', 'CTF', 'CNT'] -all_versions = ['v7', 'v73'] +all_systems_raw = ["neuromag306", "CTF", "CNT", "BTI", "eximia"] +all_systems_epochs = ["neuromag306", "CTF", "CNT"] +all_versions = ["v7", "v73"] use_info = [True, False] -all_test_params_raw = list(itertools.product(all_systems_raw, all_versions, - use_info)) -all_test_params_epochs = list(itertools.product(all_systems_epochs, - all_versions, - use_info)) +all_test_params_raw = list(itertools.product(all_systems_raw, all_versions, use_info)) +all_test_params_epochs = list( + itertools.product(all_systems_epochs, all_versions, use_info) +) # just for speed we skip some slowest ones -- the coverage should still # be sufficient for obj in (all_test_params_epochs, all_test_params_raw): - for key in [('CTF', 'v73', True), ('neuromag306', 'v73', False)]: + for key in [("CTF", "v73", True), ("neuromag306", "v73", False)]: obj.pop(obj.index(key)) for ki, key in enumerate(obj): - if key[1] == 'v73': + if key[1] == "v73": obj[ki] = pytest.param(*obj[ki], marks=pytest.mark.slowtest) -no_info_warning = {'expected_warning': RuntimeWarning, - 'match': NOINFO_WARNING} +no_info_warning = {"expected_warning": RuntimeWarning, "match": NOINFO_WARNING} -pymatreader = pytest.importorskip('pymatreader') # module-level +pymatreader = pytest.importorskip("pymatreader") # module-level testing_path = mne.datasets.testing.data_path(download=False) @@ -56,24 +58,21 @@ @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. -@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') -@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') -@pytest.mark.parametrize('cur_system, version, use_info', - all_test_params_epochs) +@pytest.mark.filterwarnings("ignore:.*parse meas date.*:RuntimeWarning") +@pytest.mark.filterwarnings("ignore:.*number of bytes.*:RuntimeWarning") +@pytest.mark.parametrize("cur_system, version, use_info", all_test_params_epochs) def test_read_evoked(cur_system, version, use_info): """Test comparing reading an Evoked object and the FieldTrip version.""" test_data_folder_ft = get_data_paths(cur_system) mne_avg = get_evoked(cur_system) + cur_fname = test_data_folder_ft / f"averaged_{version}.mat" if use_info: info = get_raw_info(cur_system) - ctx = nullcontext() + avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info) else: info = None - ctx = pytest.warns(**no_info_warning) - - cur_fname = test_data_folder_ft / f"averaged_{version}.mat" - with ctx: - avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info) + with _record_warnings(), pytest.warns(**no_info_warning): + avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info) mne_data = mne_avg.data[:, :-1] ft_data = avg_ft.data @@ -85,21 +84,16 @@ def test_read_evoked(cur_system, version, use_info): @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. -@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') -@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') -@pytest.mark.parametrize('cur_system, version, use_info', - all_test_params_epochs) -# Strange, non-deterministic Pandas errors: -# "ValueError: cannot expose native-only dtype 'g' in non-native -# byte order '<' via buffer interface" -@pytest.mark.skipif(os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true', - reason='Pandas problem on Azure CI') +@pytest.mark.filterwarnings("ignore:.*parse meas date.*:RuntimeWarning") +@pytest.mark.filterwarnings("ignore:.*number of bytes.*:RuntimeWarning") +@pytest.mark.parametrize("cur_system, version, use_info", all_test_params_epochs) def test_read_epochs(cur_system, version, use_info, monkeypatch): """Test comparing reading an Epochs object and the FieldTrip version.""" pandas = _check_pandas_installed(strict=False) has_pandas = pandas is not False test_data_folder_ft = get_data_paths(cur_system) mne_epoched = get_epochs(cur_system) + cur_fname = test_data_folder_ft / f"epoched_{version}.mat" if use_info: info = get_raw_info(cur_system) ctx = nullcontext() @@ -107,9 +101,8 @@ def test_read_epochs(cur_system, version, use_info, monkeypatch): info = None ctx = pytest.warns(**no_info_warning) - cur_fname = test_data_folder_ft / f"epoched_{version}.mat" if has_pandas: - with ctx: + with _record_warnings(), ctx: epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info) assert isinstance(epoched_ft.metadata, pandas.DataFrame) else: @@ -120,8 +113,8 @@ def test_read_epochs(cur_system, version, use_info, monkeypatch): if info is None: assert_warning_in_record(NOINFO_WARNING, warn_record) - mne_data = mne_epoched.get_data()[:, :, :-1] - ft_data = epoched_ft.get_data() + mne_data = mne_epoched.get_data(copy=False)[:, :, :-1] + ft_data = epoched_ft.get_data(copy=False) check_data(mne_data, ft_data, cur_system) check_info_fields(mne_epoched, epoched_ft, use_info) @@ -130,21 +123,21 @@ def test_read_epochs(cur_system, version, use_info, monkeypatch): # weird sfreq def modify_mat(fname, variable_names=None, ignore_fields=None): out = read_mat(fname, variable_names, ignore_fields) - if 'fsample' in out['data']: - out['data']['fsample'] = np.repeat(out['data']['fsample'], 2) + if "fsample" in out["data"]: + out["data"]["fsample"] = np.repeat(out["data"]["fsample"], 2) return out - monkeypatch.setattr(pymatreader, 'read_mat', modify_mat) - with pytest.warns(RuntimeWarning, match='multiple'): + monkeypatch.setattr(pymatreader, "read_mat", modify_mat) + with _record_warnings(), pytest.warns(RuntimeWarning, match="multiple"): mne.io.read_epochs_fieldtrip(cur_fname, info) @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. -@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') -@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') -@pytest.mark.parametrize('cur_system, version, use_info', all_test_params_raw) +@pytest.mark.filterwarnings("ignore:.*parse meas date.*:RuntimeWarning") +@pytest.mark.filterwarnings("ignore:.*number of bytes.*:RuntimeWarning") +@pytest.mark.parametrize("cur_system, version, use_info", all_test_params_raw) def test_read_raw_fieldtrip(cur_system, version, use_info): """Test comparing reading a raw fiff file and the FieldTrip version.""" # Load the raw fiff file with mne @@ -152,8 +145,8 @@ def test_read_raw_fieldtrip(cur_system, version, use_info): raw_fiff_mne = get_raw_data(cur_system, drop_extra_chs=True) if use_info: info = get_raw_info(cur_system) - if cur_system in ('BTI', 'eximia'): - ctx = pytest.warns(RuntimeWarning, match='cannot be found in') + if cur_system in ("BTI", "eximia"): + ctx = pytest.warns(RuntimeWarning, match="cannot be found in") else: ctx = nullcontext() else: @@ -162,27 +155,27 @@ def test_read_raw_fieldtrip(cur_system, version, use_info): cur_fname = test_data_folder_ft / f"raw_{version}.mat" - with ctx: + with _record_warnings(), ctx: raw_fiff_ft = mne.io.read_raw_fieldtrip(cur_fname, info) - if cur_system == 'BTI' and not use_info: - raw_fiff_ft.drop_channels(['MzA', 'MxA', 'MyaA', - 'MyA', 'MxaA', 'MzaA']) + if cur_system == "BTI" and not use_info: + raw_fiff_ft.drop_channels(["MzA", "MxA", "MyaA", "MyA", "MxaA", "MzaA"]) - if cur_system == 'eximia' and not use_info: - raw_fiff_ft.drop_channels(['TRIG2', 'TRIG1', 'GATE']) + if cur_system == "eximia" and not use_info: + raw_fiff_ft.drop_channels(["TRIG2", "TRIG1", "GATE"]) # Check that the data was loaded correctly - check_data(raw_fiff_mne.get_data(), - raw_fiff_ft.get_data(), - cur_system) + check_data(raw_fiff_mne.get_data(), raw_fiff_ft.get_data(), cur_system) # standard tests with _record_warnings(): _test_raw_reader( - read_raw_fieldtrip, fname=cur_fname, info=info, + read_raw_fieldtrip, + fname=cur_fname, + info=info, test_preloading=False, - test_kwargs=False) # TODO: This should probably work + test_kwargs=False, + ) # TODO: This should probably work # Check info field check_info_fields(raw_fiff_mne, raw_fiff_ft, use_info) @@ -191,8 +184,8 @@ def test_read_raw_fieldtrip(cur_system, version, use_info): @testing.requires_testing_data def test_load_epoched_as_raw(): """Test whether exception is thrown when loading epochs as raw.""" - test_data_folder_ft = get_data_paths('neuromag306') - info = get_raw_info('neuromag306') + test_data_folder_ft = get_data_paths("neuromag306") + info = get_raw_info("neuromag306") cur_fname = test_data_folder_ft / "epoched_v7.mat" with pytest.raises(RuntimeError): @@ -202,8 +195,8 @@ def test_load_epoched_as_raw(): @testing.requires_testing_data def test_invalid_trialinfocolumn(): """Test for exceptions when using wrong values for trialinfo parameter.""" - test_data_folder_ft = get_data_paths('neuromag306') - info = get_raw_info('neuromag306') + test_data_folder_ft = get_data_paths("neuromag306") + info = get_raw_info("neuromag306") cur_fname = test_data_folder_ft / "epoched_v7.mat" with pytest.raises(ValueError): @@ -216,14 +209,17 @@ def test_invalid_trialinfocolumn(): @testing.requires_testing_data def test_create_events(): """Test 2dim trialinfo fields.""" - test_data_folder_ft = get_data_paths('neuromag306') + test_data_folder_ft = get_data_paths("neuromag306") cur_fname = test_data_folder_ft / "epoched_v7.mat" - original_data = pymatreader.read_mat(cur_fname, ['data', ]) + original_data = pymatreader.read_mat( + cur_fname, + [ + "data", + ], + ) new_data = copy.deepcopy(original_data) - new_data['trialinfo'] = np.array([[1, 2, 3, 4], - [1, 2, 3, 4], - [1, 2, 3, 4]]) + new_data["trialinfo"] = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) with pytest.raises(ValueError): _create_events(new_data, -1) @@ -237,14 +233,10 @@ def test_create_events(): @testing.requires_testing_data -@pytest.mark.parametrize('version', all_versions) +@pytest.mark.parametrize("version", all_versions) def test_one_channel_elec_bug(version): """Test if loading data having only one elec in the elec field works.""" - fname = ( - testing_path - / "fieldtrip" - / f"one_channel_elec_bug_data_{version}.mat" - ) + fname = testing_path / "fieldtrip" / f"one_channel_elec_bug_data_{version}.mat" with pytest.warns(**no_info_warning): mne.io.read_raw_fieldtrip(fname, info=None) @@ -253,65 +245,70 @@ def test_one_channel_elec_bug(version): @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. -@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') -@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') -@pytest.mark.parametrize('version', all_versions) -@pytest.mark.parametrize('type', ['averaged', 'epoched', 'raw']) -def test_throw_exception_on_cellarray(version, type): +@pytest.mark.filterwarnings("ignore:.*parse meas date.*:RuntimeWarning") +@pytest.mark.filterwarnings("ignore:.*number of bytes.*:RuntimeWarning") +@pytest.mark.parametrize("version", all_versions) +@pytest.mark.parametrize("type_", ["averaged", "epoched", "raw"]) +def test_throw_exception_on_cellarray(version, type_): """Test for a meaningful exception when the data is a cell array.""" - fname = get_data_paths("cellarray") / f"{type}_{version}.mat" - info = get_raw_info('CNT') - with pytest.raises(RuntimeError, match='Loading of data in cell arrays ' - 'is not supported'): - if type == 'averaged': + fname = get_data_paths("cellarray") / f"{type_}_{version}.mat" + info = get_raw_info("CNT") + with pytest.raises( + RuntimeError, match="Loading of data in cell arrays is not supported" + ): + if type_ == "averaged": mne.read_evoked_fieldtrip(fname, info) - elif type == 'epoched': + elif type_ == "epoched": mne.read_epochs_fieldtrip(fname, info) - elif type == 'raw': + elif type_ == "raw": mne.io.read_raw_fieldtrip(fname, info) @testing.requires_testing_data def test_with_missing_channels(): """Test _create_info when channels are missing from info.""" - cur_system = 'neuromag306' + cur_system = "neuromag306" test_data_folder_ft = get_data_paths(cur_system) info = get_raw_info(cur_system) - del info['chs'][1:20] + del info["chs"][1:20] info._update_redundant() with pytest.warns(RuntimeWarning): mne.io.read_raw_fieldtrip(test_data_folder_ft / "raw_v7.mat", info) - mne.read_evoked_fieldtrip( - test_data_folder_ft / "averaged_v7.mat", info) + mne.read_evoked_fieldtrip(test_data_folder_ft / "averaged_v7.mat", info) mne.read_epochs_fieldtrip(test_data_folder_ft / "epoched_v7.mat", info) @testing.requires_testing_data -@pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') -@pytest.mark.filterwarnings('ignore: Cannot guess the correct type') +@pytest.mark.filterwarnings("ignore: Importing FieldTrip data without an info") +@pytest.mark.filterwarnings("ignore: Cannot guess the correct type") def test_throw_error_on_non_uniform_time_field(): """Test if an error is thrown when time fields are not uniform.""" fname = testing_path / "fieldtrip" / "not_uniform_time.mat" - with pytest.raises(RuntimeError, match='Loading data with non-uniform ' - 'times per epoch is not supported'): + with pytest.raises( + RuntimeError, + match="Loading data with non-uniform times per epoch is not supported", + ): mne.io.read_epochs_fieldtrip(fname, info=None) @testing.requires_testing_data -@pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') +@pytest.mark.filterwarnings("ignore: Importing FieldTrip data without an info") def test_throw_error_when_importing_old_ft_version_data(): """Test if an error is thrown if the data was saved with an old version.""" fname = testing_path / "fieldtrip" / "old_version.mat" - with pytest.raises(RuntimeError, match='This file was created with ' - 'an old version of FieldTrip. You ' - 'can convert the data to the new ' - 'version by loading it into ' - 'FieldTrip and applying ' - 'ft_selectdata with an ' - 'empty cfg structure on it. ' - 'Otherwise you can supply ' - 'the Info field.'): + with pytest.raises( + RuntimeError, + match="This file was created with " + "an old version of FieldTrip. You " + "can convert the data to the new " + "version by loading it into " + "FieldTrip and applying " + "ft_selectdata with an " + "empty cfg structure on it. " + "Otherwise you can supply " + "the Info field.", + ): mne.io.read_epochs_fieldtrip(fname, info=None) diff --git a/mne/io/fieldtrip/utils.py b/mne/io/fieldtrip/utils.py index 7127f63ab54..f855b7cf0cf 100644 --- a/mne/io/fieldtrip/utils.py +++ b/mne/io/fieldtrip/utils.py @@ -1,39 +1,42 @@ -# -*- coding: UTF-8 -*- -# Authors: Thomas Hartmann -# Dirk Gütlin -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import numpy as np -from .._digitization import DigPoint, _ensure_fiducials_head -from ..constants import FIFF -from ..meas_info import create_info -from ..pick import pick_info +from ..._fiff._digitization import DigPoint, _ensure_fiducials_head +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.pick import pick_info from ...transforms import rotation3d_align_z_axis -from ...utils import warn, _check_pandas_installed - -_supported_megs = ['neuromag306'] - -_unit_dict = {'m': 1, - 'cm': 1e-2, - 'mm': 1e-3, - 'V': 1, - 'mV': 1e-3, - 'uV': 1e-6, - 'T': 1, - 'T/m': 1, - 'T/cm': 1e2} - -NOINFO_WARNING = 'Importing FieldTrip data without an info dict from the ' \ - 'original file. Channel locations, orientations and types ' \ - 'will be incorrect. The imported data cannot be used for ' \ - 'source analysis, channel interpolation etc.' +from ...utils import _check_pandas_installed, warn + +_supported_megs = ["neuromag306"] + +_unit_dict = { + "m": 1, + "cm": 1e-2, + "mm": 1e-3, + "V": 1, + "mV": 1e-3, + "uV": 1e-6, + "T": 1, + "T/m": 1, + "T/cm": 1e2, +} + +NOINFO_WARNING = ( + "Importing FieldTrip data without an info dict from the " + "original file. Channel locations, orientations and types " + "will be incorrect. The imported data cannot be used for " + "source analysis, channel interpolation etc." +) def _validate_ft_struct(ft_struct): """Run validation checks on the ft_structure.""" if isinstance(ft_struct, list): - raise RuntimeError('Loading of data in cell arrays is not supported') + raise RuntimeError("Loading of data in cell arrays is not supported") def _create_info(ft_struct, raw_info): @@ -42,36 +45,36 @@ def _create_info(ft_struct, raw_info): warn(NOINFO_WARNING) sfreq = _set_sfreq(ft_struct) - ch_names = ft_struct['label'] + ch_names = ft_struct["label"] if raw_info: info = raw_info.copy() - missing_channels = set(ch_names) - set(info['ch_names']) + missing_channels = set(ch_names) - set(info["ch_names"]) if missing_channels: - warn('The following channels are present in the FieldTrip data ' - 'but cannot be found in the provided info: %s.\n' - 'These channels will be removed from the resulting data!' - % (str(missing_channels), )) + warn( + "The following channels are present in the FieldTrip data " + f"but cannot be found in the provided info: {missing_channels}.\n" + "These channels will be removed from the resulting data!" + ) missing_chan_idx = [ch_names.index(ch) for ch in missing_channels] new_chs = [ch for ch in ch_names if ch not in missing_channels] ch_names = new_chs - ft_struct['label'] = ch_names + ft_struct["label"] = ch_names - if 'trial' in ft_struct: - ft_struct['trial'] = _remove_missing_channels_from_trial( - ft_struct['trial'], - missing_chan_idx + if "trial" in ft_struct: + ft_struct["trial"] = _remove_missing_channels_from_trial( + ft_struct["trial"], missing_chan_idx ) - if 'avg' in ft_struct: - if ft_struct['avg'].ndim == 2: - ft_struct['avg'] = np.delete(ft_struct['avg'], - missing_chan_idx, - axis=0) + if "avg" in ft_struct: + if ft_struct["avg"].ndim == 2: + ft_struct["avg"] = np.delete( + ft_struct["avg"], missing_chan_idx, axis=0 + ) with info._unlock(): - info['sfreq'] = sfreq - ch_idx = [info['ch_names'].index(ch) for ch in ch_names] + info["sfreq"] = sfreq + ch_idx = [info["ch_names"].index(ch) for ch in ch_names] pick_info(info, ch_idx, copy=False) else: info = create_info(ch_names, sfreq) @@ -90,80 +93,89 @@ def _remove_missing_channels_from_trial(trial, missing_chan_idx): ) elif isinstance(trial, np.ndarray): if trial.ndim == 2: - trial = np.delete(trial, - missing_chan_idx, - axis=0) + trial = np.delete(trial, missing_chan_idx, axis=0) else: - raise ValueError('"trial" field of the FieldTrip structure ' - 'has an unknown format.') + raise ValueError( + '"trial" field of the FieldTrip structure has an unknown format.' + ) return trial def _create_info_chs_dig(ft_struct): """Create the chs info field from the FieldTrip structure.""" - all_channels = ft_struct['label'] - ch_defaults = dict(coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - cal=1.0, - range=1.0, - unit_mul=FIFF.FIFF_UNITM_NONE, - loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]), - unit=FIFF.FIFF_UNIT_V) + all_channels = ft_struct["label"] + ch_defaults = dict( + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + cal=1.0, + range=1.0, + unit_mul=FIFF.FIFF_UNITM_NONE, + loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]), + unit=FIFF.FIFF_UNIT_V, + ) try: - elec = ft_struct['elec'] + elec = ft_struct["elec"] except KeyError: elec = None try: - grad = ft_struct['grad'] + grad = ft_struct["grad"] except KeyError: grad = None if elec is None and grad is None: - warn('The supplied FieldTrip structure does not have an elec or grad ' - 'field. No channel locations will extracted and the kind of ' - 'channel might be inaccurate.') - if 'chanpos' not in (elec or grad or {'chanpos': None}): + warn( + "The supplied FieldTrip structure does not have an elec or grad " + "field. No channel locations will extracted and the kind of " + "channel might be inaccurate." + ) + if "chanpos" not in (elec or grad or {"chanpos": None}): raise RuntimeError( - 'This file was created with an old version of FieldTrip. You can ' - 'convert the data to the new version by loading it into FieldTrip ' - 'and applying ft_selectdata with an empty cfg structure on it. ' - 'Otherwise you can supply the Info field.') + "This file was created with an old version of FieldTrip. You can " + "convert the data to the new version by loading it into FieldTrip " + "and applying ft_selectdata with an empty cfg structure on it. " + "Otherwise you can supply the Info field." + ) chs = list() dig = list() counter = 0 for idx_chan, cur_channel_label in enumerate(all_channels): cur_ch = ch_defaults.copy() - cur_ch['ch_name'] = cur_channel_label - cur_ch['logno'] = idx_chan + 1 - cur_ch['scanno'] = idx_chan + 1 - if elec and cur_channel_label in elec['label']: + cur_ch["ch_name"] = cur_channel_label + cur_ch["logno"] = idx_chan + 1 + cur_ch["scanno"] = idx_chan + 1 + if elec and cur_channel_label in elec["label"]: cur_ch = _process_channel_eeg(cur_ch, elec) - assert cur_ch['coord_frame'] == FIFF.FIFFV_COORD_HEAD + assert cur_ch["coord_frame"] == FIFF.FIFFV_COORD_HEAD # Ref gets ident=0 and we don't have it, so start at 1 counter += 1 d = DigPoint( - r=cur_ch['loc'][:3], coord_frame=FIFF.FIFFV_COORD_HEAD, - kind=FIFF.FIFFV_POINT_EEG, ident=counter) + r=cur_ch["loc"][:3], + coord_frame=FIFF.FIFFV_COORD_HEAD, + kind=FIFF.FIFFV_POINT_EEG, + ident=counter, + ) dig.append(d) - elif grad and cur_channel_label in grad['label']: + elif grad and cur_channel_label in grad["label"]: cur_ch = _process_channel_meg(cur_ch, grad) else: - if cur_channel_label.startswith('EOG'): - cur_ch['kind'] = FIFF.FIFFV_EOG_CH - cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG - elif cur_channel_label.startswith('ECG'): - cur_ch['kind'] = FIFF.FIFFV_ECG_CH - cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR - elif cur_channel_label.startswith('STI'): - cur_ch['kind'] = FIFF.FIFFV_STIM_CH - cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE + if cur_channel_label.startswith("EOG"): + cur_ch["kind"] = FIFF.FIFFV_EOG_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG + elif cur_channel_label.startswith("ECG"): + cur_ch["kind"] = FIFF.FIFFV_ECG_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR + elif cur_channel_label.startswith("STI"): + cur_ch["kind"] = FIFF.FIFFV_STIM_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_NONE else: - warn('Cannot guess the correct type of channel %s. Making ' - 'it a MISC channel.' % (cur_channel_label,)) - cur_ch['kind'] = FIFF.FIFFV_MISC_CH - cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE + warn( + f"Cannot guess the correct type of channel {cur_channel_label}. " + "Making it a MISC channel." + ) + cur_ch["kind"] = FIFF.FIFFV_MISC_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_NONE chs.append(cur_ch) _ensure_fiducials_head(dig) @@ -174,63 +186,67 @@ def _create_info_chs_dig(ft_struct): def _set_sfreq(ft_struct): """Set the sample frequency.""" try: - sfreq = ft_struct['fsample'] + sfreq = ft_struct["fsample"] except KeyError: try: - time = ft_struct['time'] + time = ft_struct["time"] except KeyError: - raise ValueError('No Source for sfreq found') + raise ValueError("No Source for sfreq found") else: t1, t2 = float(time[0]), float(time[1]) sfreq = 1 / (t2 - t1) try: sfreq = float(sfreq) except TypeError: - warn('FieldTrip structure contained multiple sample rates, trying the ' - f'first of:\n{sfreq} Hz') + warn( + "FieldTrip structure contained multiple sample rates, trying the " + f"first of:\n{sfreq} Hz" + ) sfreq = float(sfreq.ravel()[0]) return sfreq def _set_tmin(ft_struct): """Set the start time before the event in evoked data if possible.""" - times = ft_struct['time'] - time_check = all(times[i][0] == times[i - 1][0] - for i, x in enumerate(times)) + times = ft_struct["time"] + time_check = all(times[i][0] == times[i - 1][0] for i, x in enumerate(times)) if time_check: tmin = times[0][0] else: - raise RuntimeError('Loading data with non-uniform ' - 'times per epoch is not supported') + raise RuntimeError( + "Loading data with non-uniform times per epoch is not supported" + ) return tmin def _create_events(ft_struct, trialinfo_column): """Create an event matrix from the FieldTrip structure.""" - if 'trialinfo' not in ft_struct: + if "trialinfo" not in ft_struct: return None - event_type = ft_struct['trialinfo'] + event_type = ft_struct["trialinfo"] event_number = range(len(event_type)) if trialinfo_column < 0: - raise ValueError('trialinfo_column must be positive') + raise ValueError("trialinfo_column must be positive") available_ti_cols = 1 if event_type.ndim == 2: available_ti_cols = event_type.shape[1] if trialinfo_column > (available_ti_cols - 1): - raise ValueError('trialinfo_column is higher than the amount of' - 'columns in trialinfo.') + raise ValueError( + "trialinfo_column is higher than the amount of columns in trialinfo." + ) event_trans_val = np.zeros(len(event_type)) if event_type.ndim == 2: event_type = event_type[:, trialinfo_column] - events = np.vstack([np.array(event_number), event_trans_val, - event_type]).astype('int').T + events = ( + np.vstack([np.array(event_number), event_trans_val, event_type]).astype("int").T + ) return events @@ -239,11 +255,13 @@ def _create_event_metadata(ft_struct): """Create event metadata from trialinfo.""" pandas = _check_pandas_installed(strict=False) if not pandas: - warn('The Pandas library is not installed. Not returning the original ' - 'trialinfo matrix as metadata.') + warn( + "The Pandas library is not installed. Not returning the original " + "trialinfo matrix as metadata." + ) return None - metadata = pandas.DataFrame(ft_struct['trialinfo']) + metadata = pandas.DataFrame(ft_struct["trialinfo"]) return metadata @@ -264,18 +282,18 @@ def _process_channel_eeg(cur_ch, elec): cur_ch: dict The original dict (cur_ch) with the added information """ - all_labels = np.asanyarray(elec['label']) - chan_idx_in_elec = np.where(all_labels == cur_ch['ch_name'])[0][0] - position = np.squeeze(elec['chanpos'][chan_idx_in_elec, :]) + all_labels = np.asanyarray(elec["label"]) + chan_idx_in_elec = np.where(all_labels == cur_ch["ch_name"])[0][0] + position = np.squeeze(elec["chanpos"][chan_idx_in_elec, :]) # chanunit = elec['chanunit'][chan_idx_in_elec] # not used/needed yet - position_unit = elec['unit'] + position_unit = elec["unit"] position = position * _unit_dict[position_unit] - cur_ch['loc'] = np.hstack((position, np.zeros((9,)))) - cur_ch['unit'] = FIFF.FIFF_UNIT_V - cur_ch['kind'] = FIFF.FIFFV_EEG_CH - cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG - cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD + cur_ch["loc"] = np.hstack((position, np.zeros((9,)))) + cur_ch["unit"] = FIFF.FIFF_UNIT_V + cur_ch["kind"] = FIFF.FIFFV_EEG_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG + cur_ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD return cur_ch @@ -295,27 +313,27 @@ def _process_channel_meg(cur_ch, grad): ------- dict: The original dict (cur_ch) with the added information """ - all_labels = np.asanyarray(grad['label']) - chan_idx_in_grad = np.where(all_labels == cur_ch['ch_name'])[0][0] - gradtype = grad['type'] - chantype = grad['chantype'][chan_idx_in_grad] - position_unit = grad['unit'] - position = np.squeeze(grad['chanpos'][chan_idx_in_grad, :]) + all_labels = np.asanyarray(grad["label"]) + chan_idx_in_grad = np.where(all_labels == cur_ch["ch_name"])[0][0] + gradtype = grad["type"] + chantype = grad["chantype"][chan_idx_in_grad] + position_unit = grad["unit"] + position = np.squeeze(grad["chanpos"][chan_idx_in_grad, :]) position = position * _unit_dict[position_unit] - if gradtype == 'neuromag306' and 'tra' in grad and 'coilpos' in grad: + if gradtype == "neuromag306" and "tra" in grad and "coilpos" in grad: # Try to regenerate original channel pos. - idx_in_coilpos = np.where(grad['tra'][chan_idx_in_grad, :] != 0)[0] - cur_coilpos = grad['coilpos'][idx_in_coilpos, :] + idx_in_coilpos = np.where(grad["tra"][chan_idx_in_grad, :] != 0)[0] + cur_coilpos = grad["coilpos"][idx_in_coilpos, :] cur_coilpos = cur_coilpos * _unit_dict[position_unit] - cur_coilori = grad['coilori'][idx_in_coilpos, :] - if chantype == 'megmag': + cur_coilori = grad["coilori"][idx_in_coilpos, :] + if chantype == "megmag": position = cur_coilpos[0] - 0.0003 * cur_coilori[0] - if chantype == 'megplanar': + if chantype == "megplanar": tmp_pos = cur_coilpos - 0.0003 * cur_coilori position = np.average(tmp_pos, axis=0) - original_orientation = np.squeeze(grad['chanori'][chan_idx_in_grad, :]) + original_orientation = np.squeeze(grad["chanori"][chan_idx_in_grad, :]) try: orientation = rotation3d_align_z_axis(original_orientation).T except AssertionError: @@ -324,27 +342,26 @@ def _process_channel_meg(cur_ch, grad): orientation = orientation.flatten() # chanunit = grad['chanunit'][chan_idx_in_grad] # not used/needed yet - cur_ch['loc'] = np.hstack((position, orientation)) - cur_ch['kind'] = FIFF.FIFFV_MEG_CH - if chantype == 'megmag': - cur_ch['coil_type'] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER - cur_ch['unit'] = FIFF.FIFF_UNIT_T - elif chantype == 'megplanar': - cur_ch['coil_type'] = FIFF.FIFFV_COIL_VV_PLANAR_T1 - cur_ch['unit'] = FIFF.FIFF_UNIT_T_M - elif chantype == 'refmag': - cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_MAG - cur_ch['unit'] = FIFF.FIFF_UNIT_T - elif chantype == 'refgrad': - cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD - cur_ch['unit'] = FIFF.FIFF_UNIT_T - elif chantype == 'meggrad': - cur_ch['coil_type'] = FIFF.FIFFV_COIL_AXIAL_GRAD_5CM - cur_ch['unit'] = FIFF.FIFF_UNIT_T + cur_ch["loc"] = np.hstack((position, orientation)) + cur_ch["kind"] = FIFF.FIFFV_MEG_CH + if chantype == "megmag": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER + cur_ch["unit"] = FIFF.FIFF_UNIT_T + elif chantype == "megplanar": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_VV_PLANAR_T1 + cur_ch["unit"] = FIFF.FIFF_UNIT_T_M + elif chantype == "refmag": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_MAG + cur_ch["unit"] = FIFF.FIFF_UNIT_T + elif chantype == "refgrad": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD + cur_ch["unit"] = FIFF.FIFF_UNIT_T + elif chantype == "meggrad": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_AXIAL_GRAD_5CM + cur_ch["unit"] = FIFF.FIFF_UNIT_T else: - raise RuntimeError('Unexpected coil type: %s.' % ( - chantype,)) + raise RuntimeError(f"Unexpected coil type: {chantype}.") - cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD + cur_ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD return cur_ch diff --git a/mne/io/fiff/__init__.py b/mne/io/fiff/__init__.py index 0df2dc2458a..3d83b2c7a7b 100644 --- a/mne/io/fiff/__init__.py +++ b/mne/io/fiff/__init__.py @@ -1,4 +1,10 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """FIF raw data reader.""" from .raw import Raw from .raw import read_raw_fif + +RawFIF = Raw diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index b7e1a02b631..f9e10a0039d 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -1,31 +1,39 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Martin Luessi -# Denis Engemann -# Teon Brooks -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import copy -import os import os.path as op +from pathlib import Path import numpy as np -from ..constants import FIFF -from ..open import fiff_open, _fiff_get_fid, _get_next_fname -from ..meas_info import read_meas_info -from ..tree import dir_tree_find -from ..tag import read_tag, read_tag_info -from ..base import (BaseRaw, _RawShell, _check_raw_compatibility, - _check_maxshield) -from ..utils import _mult_cal_one - +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import read_meas_info +from ..._fiff.open import _fiff_get_fid, _get_next_fname, fiff_open +from ..._fiff.tag import _call_dict, read_tag +from ..._fiff.tree import dir_tree_find +from ..._fiff.utils import _mult_cal_one from ...annotations import Annotations, _read_annotations_fif - +from ...channels import fix_mag_coil_types from ...event import AcqParserFIF -from ...utils import (check_fname, logger, verbose, warn, fill_doc, _file_like, - _on_missing, _check_fname) +from ...utils import ( + _check_fname, + _file_like, + _on_missing, + check_fname, + fill_doc, + logger, + verbose, + warn, +) +from ..base import ( + BaseRaw, + _check_maxshield, + _check_raw_compatibility, + _get_fname_rep, + _RawShell, +) @fill_doc @@ -65,59 +73,95 @@ class Raw(BaseRaw): Time vector in seconds. Starts from 0, independently of `first_samp` value. Time interval between consecutive time samples is equal to the inverse of the sampling frequency. + duration : float + The duration of the raw file in seconds. + + .. versionadded:: 1.9 preload : bool Indicates whether raw data are in memory. - %(verbose)s """ + _extra_attributes = ( + "fix_mag_coil_types", + "acqparser", + "_read_raw_file", # this would be ugly to move, but maybe we should + ) + @verbose - def __init__(self, fname, allow_maxshield=False, preload=False, - on_split_missing='raise', verbose=None): # noqa: D102 + def __init__( + self, + fname, + allow_maxshield=False, + preload=False, + on_split_missing="raise", + verbose=None, + ): raws = [] do_check_ext = not _file_like(fname) next_fname = fname while next_fname is not None: - raw, next_fname, buffer_size_sec = \ - self._read_raw_file(next_fname, allow_maxshield, - preload, do_check_ext) + raw, next_fname, buffer_size_sec = self._read_raw_file( + next_fname, allow_maxshield, preload, do_check_ext + ) do_check_ext = False raws.append(raw) if next_fname is not None: if not op.exists(next_fname): msg = ( - f'Split raw file detected but next file {next_fname} ' - 'does not exist. Ensure all files were transferred ' - 'properly and that split and original files were not ' - 'manually renamed on disk (split files should be ' - 'renamed by loading and re-saving with MNE-Python to ' - 'preserve proper filename linkage).') - _on_missing(on_split_missing, msg, name='on_split_missing') + f"Split raw file detected but next file {next_fname} " + "does not exist. Ensure all files were transferred " + "properly and that split and original files were not " + "manually renamed on disk (split files should be " + "renamed by loading and re-saving with MNE-Python to " + "preserve proper filename linkage)." + ) + _on_missing(on_split_missing, msg, name="on_split_missing") break - if _file_like(fname): - # avoid serialization error when copying file-like - fname = None # noqa + # If using a file-like object, we need to be careful about serialization and + # types. + # + # 1. We must change both the variable named "fname" here so that _get_argvalues + # (magic) does not store the file-like object. + # 2. We need to ensure "filenames" passed to the constructor below gets a list + # of Path or None. + # 3. We need to remove the file-like objects from _raw_extras. This must + # be done *after* the super().__init__ call, because the constructor + # needs the file-like objects to read the data (which it will do because we + # force preloading for file-like objects). + + # Avoid file-like in _get_argvalues (1) + fname = _path_from_fname(fname) _check_raw_compatibility(raws) - super(Raw, self).__init__( - copy.deepcopy(raws[0].info), False, - [r.first_samp for r in raws], [r.last_samp for r in raws], - [r.filename for r in raws], [r._raw_extras for r in raws], - raws[0].orig_format, None, buffer_size_sec=buffer_size_sec, - verbose=verbose) + super().__init__( + copy.deepcopy(raws[0].info), + preload=False, + first_samps=[r.first_samp for r in raws], + last_samps=[r.last_samp for r in raws], + # Avoid file-like objects in raw.filenames (2) + filenames=[_path_from_fname(r._raw_extras["filename"]) for r in raws], + raw_extras=[r._raw_extras for r in raws], + orig_format=raws[0].orig_format, + dtype=None, + buffer_size_sec=buffer_size_sec, + verbose=verbose, + ) # combine annotations self.set_annotations(raws[0].annotations, emit_warning=False) # Add annotations for in-data skips for extra in self._raw_extras: - mask = [ent is None for ent in extra['ent']] - start = extra['bounds'][:-1][mask] - stop = extra['bounds'][1:][mask] - 1 - duration = (stop - start + 1.) / self.info['sfreq'] - annot = Annotations(onset=(start / self.info['sfreq']), - duration=duration, - description='BAD_ACQ_SKIP', - orig_time=self.info['meas_date']) + mask = [ent is None for ent in extra["ent"]] + start = extra["bounds"][:-1][mask] + stop = extra["bounds"][1:][mask] - 1 + duration = (stop - start + 1.0) / self.info["sfreq"] + annot = Annotations( + onset=(start / self.info["sfreq"]), + duration=duration, + description="BAD_ACQ_SKIP", + orig_time=self.info["meas_date"], + ) self._annotations += annot @@ -125,34 +169,39 @@ def __init__(self, fname, allow_maxshield=False, preload=False, self._preload_data(preload) else: self.preload = False - # If using a file-like object, fix the filenames to be representative - # strings now instead of the file-like objects - self._filenames = [_get_fname_rep(fname) for fname in self._filenames] + # Avoid file-like objects in _raw_extras (3) + for extra in self._raw_extras: + if not isinstance(extra["filename"], Path): + extra["filename"] = None @verbose - def _read_raw_file(self, fname, allow_maxshield, preload, - do_check_ext=True, verbose=None): + def _read_raw_file( + self, fname, allow_maxshield, preload, do_check_ext=True, verbose=None + ): """Read in header information from a raw file.""" - logger.info('Opening raw data file %s...' % fname) + logger.info(f"Opening raw data file {fname}...") # Read in the whole file if preload is on and .fif.gz (saves time) if not _file_like(fname): if do_check_ext: - endings = ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', - '_meg.fif', '_eeg.fif', '_ieeg.fif') - endings += tuple([f'{e}.gz' for e in endings]) - check_fname(fname, 'raw', endings) + endings = ( + "raw.fif", + "raw_sss.fif", + "raw_tsss.fif", + "_meg.fif", + "_eeg.fif", + "_ieeg.fif", + ) + endings += tuple([f"{e}.gz" for e in endings]) + check_fname(fname, "raw", endings) # filename - fname = str(_check_fname(fname, "read", True, "fname")) - ext = os.path.splitext(fname)[1].lower() - whole_file = preload if '.gz' in ext else False - del ext + fname = _check_fname(fname, "read", True, "fname") + whole_file = preload if fname.suffix == ".gz" else False else: # file-like if not preload: - raise ValueError('preload must be used with file-like objects') + raise ValueError("preload must be used with file-like objects") whole_file = True - fname_rep = _get_fname_rep(fname) ff, tree, _ = fiff_open(fname, preload=whole_file) with ff as fid: # Read the measurement info @@ -164,22 +213,22 @@ def _read_raw_file(self, fname, allow_maxshield, preload, raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA) if len(raw_node) == 0: raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA) - if (len(raw_node) == 0): + if len(raw_node) == 0: raw_node = dir_tree_find(meas, FIFF.FIFFB_IAS_RAW_DATA) - if (len(raw_node) == 0): - raise ValueError('No raw data in %s' % fname_rep) + if len(raw_node) == 0: + raise ValueError(f"No raw data in {_get_fname_rep(fname)}") _check_maxshield(allow_maxshield) with info._unlock(): - info['maxshield'] = True + info["maxshield"] = True del meas if len(raw_node) == 1: raw_node = raw_node[0] # Process the directory - directory = raw_node['directory'] - nent = raw_node['nent'] - nchan = int(info['nchan']) + directory = raw_node["directory"] + nent = raw_node["nent"] + nchan = int(info["nchan"]) first = 0 first_samp = 0 first_skip = 0 @@ -187,7 +236,7 @@ def _read_raw_file(self, fname, allow_maxshield, preload, # Get first sample tag if it is there if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE: tag = read_tag(fid, directory[first].pos) - first_samp = int(tag.data) + first_samp = int(tag.data.item()) first += 1 _check_entry(first, nent) @@ -195,18 +244,17 @@ def _read_raw_file(self, fname, allow_maxshield, preload, if directory[first].kind == FIFF.FIFF_DATA_SKIP: # This first skip can be applied only after we know the bufsize tag = read_tag(fid, directory[first].pos) - first_skip = int(tag.data) + first_skip = int(tag.data.item()) first += 1 _check_entry(first, nent) raw = _RawShell() - raw.filename = fname raw.first_samp = first_samp - if info['meas_date'] is None and annotations is not None: + if info["meas_date"] is None and annotations is not None: # we need to adjust annotations.onset as when there is no meas # date set_annotations considers that the origin of time is the # first available sample (ignores first_samp) - annotations.onset -= first_samp / info['sfreq'] + annotations.onset -= first_samp / info["sfreq"] raw.set_annotations(annotations) # Go through the remaining tags in the directory @@ -214,47 +262,40 @@ def _read_raw_file(self, fname, allow_maxshield, preload, nskip = 0 orig_format = None + _byte_dict = { + FIFF.FIFFT_DAU_PACK16: 2, + FIFF.FIFFT_SHORT: 2, + FIFF.FIFFT_FLOAT: 4, + FIFF.FIFFT_DOUBLE: 8, + FIFF.FIFFT_INT: 4, + FIFF.FIFFT_COMPLEX_FLOAT: 8, + FIFF.FIFFT_COMPLEX_DOUBLE: 16, + } + _orig_format_dict = { + FIFF.FIFFT_DAU_PACK16: "short", + FIFF.FIFFT_SHORT: "short", + FIFF.FIFFT_FLOAT: "single", + FIFF.FIFFT_DOUBLE: "double", + FIFF.FIFFT_INT: "int", + FIFF.FIFFT_COMPLEX_FLOAT: "single", + FIFF.FIFFT_COMPLEX_DOUBLE: "double", + } + for k in range(first, nent): ent = directory[k] # There can be skips in the data (e.g., if the user unclicked) # an re-clicked the button - if ent.kind == FIFF.FIFF_DATA_SKIP: - tag = read_tag(fid, ent.pos) - nskip = int(tag.data) - elif ent.kind == FIFF.FIFF_DATA_BUFFER: + if ent.kind == FIFF.FIFF_DATA_BUFFER: # Figure out the number of samples in this buffer - if ent.type == FIFF.FIFFT_DAU_PACK16: - nsamp = ent.size // (2 * nchan) - elif ent.type == FIFF.FIFFT_SHORT: - nsamp = ent.size // (2 * nchan) - elif ent.type == FIFF.FIFFT_FLOAT: - nsamp = ent.size // (4 * nchan) - elif ent.type == FIFF.FIFFT_DOUBLE: - nsamp = ent.size // (8 * nchan) - elif ent.type == FIFF.FIFFT_INT: - nsamp = ent.size // (4 * nchan) - elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT: - nsamp = ent.size // (8 * nchan) - elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE: - nsamp = ent.size // (16 * nchan) - else: - raise ValueError('Cannot handle data buffers of type ' - '%d' % ent.type) + try: + div = _byte_dict[ent.type] + except KeyError: + raise RuntimeError( + f"Cannot handle data buffers of type {ent.type}" + ) from None + nsamp = ent.size // (div * nchan) if orig_format is None: - if ent.type == FIFF.FIFFT_DAU_PACK16: - orig_format = 'short' - elif ent.type == FIFF.FIFFT_SHORT: - orig_format = 'short' - elif ent.type == FIFF.FIFFT_FLOAT: - orig_format = 'single' - elif ent.type == FIFF.FIFFT_DOUBLE: - orig_format = 'double' - elif ent.type == FIFF.FIFFT_INT: - orig_format = 'int' - elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT: - orig_format = 'single' - elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE: - orig_format = 'double' + orig_format = _orig_format_dict[ent.type] # Do we have an initial skip pending? if first_skip > 0: @@ -264,58 +305,74 @@ def _read_raw_file(self, fname, allow_maxshield, preload, # Do we have a skip pending? if nskip > 0: - raw_extras.append(dict( - ent=None, first=first_samp, nsamp=nskip * nsamp, - last=first_samp + nskip * nsamp - 1)) + raw_extras.append( + dict( + ent=None, + first=first_samp, + nsamp=nskip * nsamp, + last=first_samp + nskip * nsamp - 1, + ) + ) first_samp += nskip * nsamp nskip = 0 # Add a data buffer - raw_extras.append(dict(ent=ent, first=first_samp, - last=first_samp + nsamp - 1, - nsamp=nsamp)) + raw_extras.append( + dict( + ent=ent, + first=first_samp, + last=first_samp + nsamp - 1, + nsamp=nsamp, + ) + ) first_samp += nsamp + elif ent.kind == FIFF.FIFF_DATA_SKIP: + tag = read_tag(fid, ent.pos) + nskip = int(tag.data.item()) - next_fname = _get_next_fname(fid, fname_rep, tree) + next_fname = _get_next_fname(fid, _path_from_fname(fname), tree) # reformat raw_extras to be a dict of list/ndarray rather than # list of dict (faster access) - raw_extras = {key: [r[key] for r in raw_extras] - for key in raw_extras[0]} + raw_extras = {key: [r[key] for r in raw_extras] for key in raw_extras[0]} for key in raw_extras: - if key != 'ent': # dict or None + if key != "ent": # dict or None raw_extras[key] = np.array(raw_extras[key], int) - if not np.array_equal(raw_extras['last'][:-1], - raw_extras['first'][1:] - 1): - raise RuntimeError('FIF file appears to be broken') - bounds = np.cumsum(np.concatenate( - [raw_extras['first'][:1], raw_extras['nsamp']])) - raw_extras['bounds'] = bounds - assert len(raw_extras['bounds']) == len(raw_extras['ent']) + 1 + if not np.array_equal(raw_extras["last"][:-1], raw_extras["first"][1:] - 1): + raise RuntimeError("FIF file appears to be broken") + bounds = np.cumsum( + np.concatenate([raw_extras["first"][:1], raw_extras["nsamp"]]) + ) + raw_extras["bounds"] = bounds + assert len(raw_extras["bounds"]) == len(raw_extras["ent"]) + 1 # store the original buffer size - buffer_size_sec = np.median(raw_extras['nsamp']) / info['sfreq'] - del raw_extras['first'] - del raw_extras['last'] - del raw_extras['nsamp'] + buffer_size_sec = np.median(raw_extras["nsamp"]) / info["sfreq"] + del raw_extras["first"] + del raw_extras["last"] + del raw_extras["nsamp"] + raw_extras["filename"] = fname raw.last_samp = first_samp - 1 raw.orig_format = orig_format # Add the calibration factors - cals = np.zeros(info['nchan']) - for k in range(info['nchan']): - cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal'] + cals = np.zeros(info["nchan"]) + for k in range(info["nchan"]): + cals[k] = info["chs"][k]["range"] * info["chs"][k]["cal"] raw._cals = cals raw._raw_extras = raw_extras - logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % ( - raw.first_samp, raw.last_samp, - float(raw.first_samp) / info['sfreq'], - float(raw.last_samp) / info['sfreq'])) + logger.info( + " Range : %d ... %d = %9.3f ... %9.3f secs", + raw.first_samp, + raw.last_samp, + float(raw.first_samp) / info["sfreq"], + float(raw.last_samp) / info["sfreq"], + ) raw.info = info - logger.info('Ready.') + logger.info("Ready.") return raw, next_fname, buffer_size_sec @@ -325,34 +382,31 @@ def _dtype(self): if self._dtype_ is not None: return self._dtype_ dtype = None - for raw_extra, filename in zip(self._raw_extras, self._filenames): - for ent in raw_extra['ent']: + for raw_extra in self._raw_extras: + for ent in raw_extra["ent"]: if ent is not None: - with _fiff_get_fid(filename) as fid: - fid.seek(ent.pos, 0) - tag = read_tag_info(fid) - if tag is not None: - if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT, - FIFF.FIFFT_COMPLEX_DOUBLE): - dtype = np.complex128 - else: - dtype = np.float64 - if dtype is not None: - break + if ent.type in ( + FIFF.FIFFT_COMPLEX_FLOAT, + FIFF.FIFFT_COMPLEX_DOUBLE, + ): + dtype = np.complex128 + else: + dtype = np.float64 + break if dtype is not None: break if dtype is None: - raise RuntimeError('bug in reading') + raise RuntimeError("bug in reading") self._dtype_ = dtype return dtype def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file.""" n_bad = 0 - with _fiff_get_fid(self._filenames[fi]) as fid: - bounds = self._raw_extras[fi]['bounds'] - ents = self._raw_extras[fi]['ent'] - nchan = self._raw_extras[fi]['orig_nchan'] + with _fiff_get_fid(self._raw_extras[fi]["filename"]) as fid: + bounds = self._raw_extras[fi]["bounds"] + ents = self._raw_extras[fi]["ent"] + nchan = self._raw_extras[fi]["orig_nchan"] use = (stop > bounds[:-1]) & (start < bounds[1:]) offset = 0 for ei in np.where(use)[0]: @@ -363,22 +417,36 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): first_pick = max(start - first, 0) last_pick = min(nsamp, stop - first) picksamp = last_pick - first_pick - # only read data if it exists - if ent is not None: - one = read_tag(fid, ent.pos, - shape=(nsamp, nchan), - rlims=(first_pick, last_pick)).data - try: - one.shape = (picksamp, nchan) - except AttributeError: # one is None - n_bad += picksamp - else: - _mult_cal_one(data[:, offset:(offset + picksamp)], - one.T, idx, cals, mult) + this_start = offset offset += picksamp + this_stop = offset + # only read data if it exists + if ent is None: + continue # just use zeros for gaps + # faster to always read full tag, taking advantage of knowing the header + # already (cutting out some of read_tag) ... + fid.seek(ent.pos + 16, 0) + one = _call_dict[ent.type](fid, ent, shape=None, rlims=None) + try: + one.shape = (nsamp, nchan) + except AttributeError: # one is None + n_bad += picksamp + else: + # ... then pick samples we want + if first_pick != 0 or last_pick != nsamp: + one = one[first_pick:last_pick] + _mult_cal_one( + data[:, this_start:this_stop], + one.T, + idx, + cals, + mult, + ) if n_bad: - warn(f'FIF raw buffer could not be read, acquisition error ' - f'likely: {n_bad} samples set to zero') + warn( + f"FIF raw buffer could not be read, acquisition error " + f"likely: {n_bad} samples set to zero" + ) assert offset == stop - start def fix_mag_coil_types(self): @@ -409,7 +477,6 @@ def fix_mag_coil_types(self): current estimates computed by the MNE software is very small. Therefore the use of mne_fix_mag_coil_types is not mandatory. """ - from ...channels import fix_mag_coil_types fix_mag_coil_types(self.info) return self @@ -421,27 +488,21 @@ def acqparser(self): -------- mne.AcqParserFIF """ - if getattr(self, '_acqparser', None) is None: + if getattr(self, "_acqparser", None) is None: self._acqparser = AcqParserFIF(self.info) return self._acqparser -def _get_fname_rep(fname): - if not _file_like(fname): - return fname - else: - return 'File-like' - - def _check_entry(first, nent): """Sanity check entries.""" if first >= nent: - raise IOError('Could not read data, perhaps this is a corrupt file') + raise OSError("Could not read data, perhaps this is a corrupt file") @fill_doc -def read_raw_fif(fname, allow_maxshield=False, preload=False, - on_split_missing='raise', verbose=None): +def read_raw_fif( + fname, allow_maxshield=False, preload=False, on_split_missing="raise", verbose=None +) -> Raw: """Reader function for Raw FIF data. Parameters @@ -479,6 +540,23 @@ def read_raw_fif(fname, allow_maxshield=False, preload=False, ``raw.n_times`` parameters but ``raw.first_samp`` and ``raw.first_time`` are updated accordingly. """ - return Raw(fname=fname, allow_maxshield=allow_maxshield, - preload=preload, verbose=verbose, - on_split_missing=on_split_missing) + return Raw( + fname=fname, + allow_maxshield=allow_maxshield, + preload=preload, + verbose=verbose, + on_split_missing=on_split_missing, + ) + + +def _path_from_fname(fname) -> Path | None: + if not isinstance(fname, Path): + if isinstance(fname, str): + fname = Path(fname) + else: + # Try to get a filename from the file-like object + try: + fname = Path(fname.name) + except Exception: + fname = None + return fname diff --git a/mne/io/fiff/tests/__init__.py b/mne/io/fiff/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/fiff/tests/__init__.py +++ b/mne/io/fiff/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 39b8cc61c58..29aa160ec19 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -1,46 +1,61 @@ -# -*- coding: utf-8 -*- -# Author: Alexandre Gramfort -# Denis Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from copy import deepcopy -from pathlib import Path -from functools import partial -from io import BytesIO +import datetime import os import pathlib import pickle +import platform import shutil -import sys +from contextlib import nullcontext +from copy import deepcopy +from functools import partial +from io import BytesIO +from pathlib import Path import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_allclose) import pytest - +from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal + +from mne import ( + compute_proj_raw, + concatenate_events, + create_info, + equalize_channels, + events_from_annotations, + find_events, + make_fixed_length_epochs, + pick_channels, + pick_info, + pick_types, +) +from mne._fiff.constants import FIFF +from mne._fiff.tag import _read_tag_header, read_tag +from mne.annotations import Annotations from mne.datasets import testing from mne.filter import filter_data -from mne.io.constants import FIFF -from mne.io import RawArray, concatenate_raws, read_raw_fif, base -from mne.io.open import read_tag, read_tag_info -from mne.io.tag import _read_tag_header +from mne.io import RawArray, base, concatenate_raws, match_channel_orders, read_raw_fif from mne.io.tests.test_raw import _test_concat, _test_raw_reader -from mne import (concatenate_events, find_events, equalize_channels, - compute_proj_raw, pick_types, pick_channels, create_info, - pick_info, make_fixed_length_epochs) -from mne.utils import (requires_pandas, assert_object_equal, _dt_to_stamp, - requires_mne, run_subprocess, _record_warnings, - assert_and_remove_boundary_annot) -from mne.annotations import Annotations +from mne.transforms import Transform +from mne.utils import ( + _dt_to_stamp, + _record_warnings, + assert_and_remove_boundary_annot, + assert_object_equal, + catch_logging, + requires_mne, + run_subprocess, +) testing_path = testing.data_path(download=False) data_dir = testing_path / "MEG" / "sample" fif_fname = data_dir / "sample_audvis_trunc_raw.fif" ms_fname = testing_path / "SSS" / "test_move_anon_raw.fif" skip_fname = testing_path / "misc" / "intervalrecording_raw.fif" +tri_fname = testing_path / "SSS" / "TRIUX" / "triux_bmlhus_erm_raw.fif" -base_dir = Path(__file__).parent.parent.parent / "tests" / "data" +base_dir = Path(__file__).parents[2] / "tests" / "data" test_fif_fname = base_dir / "test_raw.fif" test_fif_gz_fname = base_dir / "test_raw.fif.gz" ctf_fname = base_dir / "test_ctf_raw.fif" @@ -48,7 +63,6 @@ fif_bad_marked_fname = base_dir / "test_withbads_raw.fif" bad_file_works = base_dir / "test_bads.txt" bad_file_wrong = base_dir / "test_wrong_bads.txt" -hp_fname = base_dir / "test_chpi_raw_hp.txt" hp_fif_fname = base_dir / "test_chpi_raw_sss.fif" @@ -61,28 +75,29 @@ def test_acq_skip(tmp_path): annotations = raw.annotations assert len(annotations) == 3 # there are 3 skips assert_allclose(annotations.onset, [14, 19, 23]) - assert_allclose(annotations.duration, [2., 2., 3.]) # inclusive! - data, times = raw.get_data( - picks, reject_by_annotation='omit', return_times=True) - expected_data, expected_times = zip(raw[picks, :2000], - raw[picks, 4000:7000], - raw[picks, 9000:11000], - raw[picks, 14000:17000]) + assert_allclose(annotations.duration, [2.0, 2.0, 3.0]) # inclusive! + data, times = raw.get_data(picks, reject_by_annotation="omit", return_times=True) + expected_data, expected_times = zip( + raw[picks, :2000], + raw[picks, 4000:7000], + raw[picks, 9000:11000], + raw[picks, 14000:17000], + ) expected_times = np.concatenate(list(expected_times), axis=-1) assert_allclose(times, expected_times) expected_data = list(expected_data) assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22) # Check that acquisition skips are handled properly in filtering - kwargs = dict(l_freq=None, h_freq=50., fir_design='firwin') + kwargs = dict(l_freq=None, h_freq=50.0, fir_design="firwin") raw_filt = raw.copy().filter(picks=picks, **kwargs) for data in expected_data: - filter_data(data, raw.info['sfreq'], copy=False, **kwargs) - data = raw_filt.get_data(picks, reject_by_annotation='omit') + filter_data(data, raw.info["sfreq"], copy=False, **kwargs) + data = raw_filt.get_data(picks, reject_by_annotation="omit") assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22) # Check that acquisition skips are handled properly during I/O - fname = tmp_path / 'test_raw.fif' + fname = tmp_path / "test_raw.fif" raw.save(fname, fmt=raw.orig_format) # first: file size should not increase much (orig data is missing # 7 of 17 buffers, so if we write them out it should increase the file @@ -96,12 +111,12 @@ def test_acq_skip(tmp_path): assert_allclose(raw.times, raw_read.times) assert_allclose(raw_read[:][0], raw[:][0], atol=1e-17) # Saving with a bad buffer length emits warning - raw.pick_channels(raw.ch_names[:2]) + raw.pick(raw.ch_names[:2]) with _record_warnings() as w: raw.save(fname, buffer_size_sec=0.5, overwrite=True) assert len(w) == 0 - with pytest.warns(RuntimeWarning, match='did not fit evenly'): - raw.save(fname, buffer_size_sec=2., overwrite=True) + with pytest.warns(RuntimeWarning, match="did not fit evenly"): + raw.save(fname, buffer_size_sec=2.0, overwrite=True) def test_fix_types(): @@ -109,35 +124,34 @@ def test_fix_types(): for fname, change, bads in ( (hp_fif_fname, True, ["MEG0111"]), (test_fif_fname, False, []), - (ctf_fname, False, []) + (ctf_fname, False, []), ): raw = read_raw_fif(fname) raw.info["bads"] = bads - mag_picks = pick_types(raw.info, meg='mag', exclude=[]) + mag_picks = pick_types(raw.info, meg="mag", exclude=[]) other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks) # we don't actually have any files suffering from this problem, so # fake it if change: for ii in mag_picks: - raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2 - orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']]) + raw.info["chs"][ii]["coil_type"] = FIFF.FIFFV_COIL_VV_MAG_T2 + orig_types = np.array([ch["coil_type"] for ch in raw.info["chs"]]) raw.fix_mag_coil_types() - new_types = np.array([ch['coil_type'] for ch in raw.info['chs']]) + new_types = np.array([ch["coil_type"] for ch in raw.info["chs"]]) if not change: assert_array_equal(orig_types, new_types) else: assert_array_equal(orig_types[other_picks], new_types[other_picks]) - assert ((orig_types[mag_picks] != new_types[mag_picks]).all()) - assert ((new_types[mag_picks] == - FIFF.FIFFV_COIL_VV_MAG_T3).all()) + assert (orig_types[mag_picks] != new_types[mag_picks]).all() + assert (new_types[mag_picks] == FIFF.FIFFV_COIL_VV_MAG_T3).all() def test_concat(tmp_path): """Test RawFIF concatenation.""" # we trim the file to save lots of memory and some time raw = read_raw_fif(test_fif_fname) - raw.crop(0, 2.) - test_name = tmp_path / 'test_raw.fif' + raw.crop(0, 2.0) + test_name = tmp_path / "test_raw.fif" raw.save(test_name) # now run the standard test _test_concat(partial(read_raw_fif), test_name) @@ -152,7 +166,7 @@ def test_hash_raw(): raw_size = raw._size raw.load_data() raw_load_size = raw._size - assert (raw_size < raw_load_size) + assert raw_size < raw_load_size raw_2 = read_raw_fif(fif_fname).crop(0, 0.5) raw_2.load_data() assert hash(raw) == hash(raw_2) @@ -166,35 +180,35 @@ def test_hash_raw(): @testing.requires_testing_data def test_maxshield(): """Test maxshield warning.""" - with pytest.warns(RuntimeWarning, match='Internal Active Shielding') as w: + with pytest.warns(RuntimeWarning, match="Internal Active Shielding") as w: read_raw_fif(ms_fname, allow_maxshield=True) - assert ('test_raw_fiff.py' in w[0].filename) + assert "test_raw_fiff.py" in w[0].filename @testing.requires_testing_data def test_subject_info(tmp_path): """Test reading subject information.""" raw = read_raw_fif(fif_fname).crop(0, 1) - assert (raw.info['subject_info'] is None) + assert raw.info["subject_info"] is None # fake some subject data - keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex', - 'hand'] - vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1] + keys = ["id", "his_id", "last_name", "first_name", "birthday", "sex", "hand"] + vals = [1, "foobar", "bar", "foo", datetime.date(1901, 2, 3), 0, 1] subject_info = dict() for key, val in zip(keys, vals): subject_info[key] = val - raw.info['subject_info'] = subject_info - out_fname = tmp_path / 'test_subj_info_raw.fif' + raw.info["subject_info"] = subject_info + out_fname = tmp_path / "test_subj_info_raw.fif" raw.save(out_fname, overwrite=True) raw_read = read_raw_fif(out_fname) for key in keys: - assert subject_info[key] == raw_read.info['subject_info'][key] - assert raw.info['meas_date'] == raw_read.info['meas_date'] + assert subject_info[key] == raw_read.info["subject_info"][key] + assert raw.info["meas_date"] == raw_read.info["meas_date"] - for key in ['secs', 'usecs', 'version']: - assert raw.info['meas_id'][key] == raw_read.info['meas_id'][key] - assert_array_equal(raw.info['meas_id']['machid'], - raw_read.info['meas_id']['machid']) + for key in ["secs", "usecs", "version"]: + assert raw.info["meas_id"][key] == raw_read.info["meas_id"][key] + assert_array_equal( + raw.info["meas_id"]["machid"], raw_read.info["meas_id"]["machid"] + ) @testing.requires_testing_data @@ -210,17 +224,17 @@ def test_copy_append(): @testing.requires_testing_data def test_output_formats(tmp_path): """Test saving and loading raw data using multiple formats.""" - formats = ['short', 'int', 'single', 'double'] + formats = ["short", "int", "single", "double"] tols = [1e-4, 1e-7, 1e-7, 1e-15] # let's fake a raw file with different formats raw = read_raw_fif(test_fif_fname).crop(0, 1) - temp_file = tmp_path / 'raw.fif' + temp_file = tmp_path / "raw.fif" for ii, (fmt, tol) in enumerate(zip(formats, tols)): # Let's test the overwriting error throwing while we're at it if ii > 0: - pytest.raises(IOError, raw.save, temp_file, fmt=fmt) + pytest.raises(OSError, raw.save, temp_file, fmt=fmt) raw.save(temp_file, fmt=fmt, overwrite=True) raw2 = read_raw_fif(temp_file) raw2_data = raw2[:, :][0] @@ -244,10 +258,10 @@ def test_multiple_files(tmp_path): raw = read_raw_fif(fif_fname).crop(0, 10) raw.load_data() raw.load_data() # test no operation - split_size = 3. # in seconds - sfreq = raw.info['sfreq'] - nsamp = (raw.last_samp - raw.first_samp) - tmins = np.round(np.arange(0., nsamp, split_size * sfreq)) + split_size = 3.0 # in seconds + sfreq = raw.info["sfreq"] + nsamp = raw.last_samp - raw.first_samp + tmins = np.round(np.arange(0.0, nsamp, split_size * sfreq)) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp])) tmaxs /= sfreq tmins /= sfreq @@ -256,20 +270,20 @@ def test_multiple_files(tmp_path): # going in reverse order so the last fname is the first file (need later) raws = [None] * len(tmins) for ri in range(len(tmins) - 1, -1, -1): - fname = tmp_path / ('test_raw_split-%d_raw.fif' % ri) + fname = tmp_path / (f"test_raw_split-{ri}_raw.fif") raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) raws[ri] = read_raw_fif(fname) - assert (len(raws[ri].times) == - int(round((tmaxs[ri] - tmins[ri]) * - raw.info['sfreq'])) + 1) # + 1 b/c inclusive - events = [find_events(r, stim_channel='STI 014') for r in raws] + assert ( + len(raws[ri].times) + == int(round((tmaxs[ri] - tmins[ri]) * raw.info["sfreq"])) + 1 + ) # + 1 b/c inclusive + events = [find_events(r, stim_channel="STI 014") for r in raws] last_samps = [r.last_samp for r in raws] first_samps = [r.first_samp for r in raws] # test concatenation of split file pytest.raises(ValueError, concatenate_raws, raws, True, events[1:]) - all_raw_1, events1 = concatenate_raws(raws, preload=False, - events_list=events) + all_raw_1, events1 = concatenate_raws(raws, preload=False, events_list=events) assert_allclose(all_raw_1.times, raw.times) assert raw.first_samp == all_raw_1.first_samp assert raw.last_samp == all_raw_1.last_samp @@ -280,7 +294,7 @@ def test_multiple_files(tmp_path): # test proper event treatment for split files events2 = concatenate_events(events, first_samps, last_samps) - events3 = find_events(all_raw_2, stim_channel='STI 014') + events3 = find_events(all_raw_2, stim_channel="STI 014") assert_array_equal(events1, events2) assert_array_equal(events1, events3) @@ -292,17 +306,17 @@ def test_multiple_files(tmp_path): # add potentially problematic points times.extend([n_times - 1, n_times, 2 * n_times - 1]) - raw_combo0 = concatenate_raws([read_raw_fif(f) - for f in [fif_fname, fif_fname]], - preload=True) + raw_combo0 = concatenate_raws( + [read_raw_fif(f) for f in [fif_fname, fif_fname]], preload=True + ) _compare_combo(raw, raw_combo0, times, n_times) - raw_combo = concatenate_raws([read_raw_fif(f) - for f in [fif_fname, fif_fname]], - preload=False) + raw_combo = concatenate_raws( + [read_raw_fif(f) for f in [fif_fname, fif_fname]], preload=False + ) _compare_combo(raw, raw_combo, times, n_times) - raw_combo = concatenate_raws([read_raw_fif(f) - for f in [fif_fname, fif_fname]], - preload='memmap8.dat') + raw_combo = concatenate_raws( + [read_raw_fif(f) for f in [fif_fname, fif_fname]], preload="memmap8.dat" + ) _compare_combo(raw, raw_combo, times, n_times) assert raw[:, :][0].shape[1] * 2 == raw_combo0[:, :][0].shape[1] assert raw_combo0[:, :][0].shape[1] == raw_combo0.n_times @@ -310,52 +324,63 @@ def test_multiple_files(tmp_path): # with all data preloaded, result should be preloaded raw_combo = read_raw_fif(fif_fname, preload=True) raw_combo.append(read_raw_fif(fif_fname, preload=True)) - assert (raw_combo.preload is True) + assert raw_combo.preload is True assert raw_combo.n_times == raw_combo._data.shape[1] _compare_combo(raw, raw_combo, times, n_times) # with any data not preloaded, don't set result as preloaded - raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=True), - read_raw_fif(fif_fname, preload=False)]) - assert (raw_combo.preload is False) - assert_array_equal(find_events(raw_combo, stim_channel='STI 014'), - find_events(raw_combo0, stim_channel='STI 014')) + raw_combo = concatenate_raws( + [read_raw_fif(fif_fname, preload=True), read_raw_fif(fif_fname, preload=False)] + ) + assert raw_combo.preload is False + assert_array_equal( + find_events(raw_combo, stim_channel="STI 014"), + find_events(raw_combo0, stim_channel="STI 014"), + ) _compare_combo(raw, raw_combo, times, n_times) # user should be able to force data to be preloaded upon concat - raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False), - read_raw_fif(fif_fname, preload=True)], - preload=True) - assert (raw_combo.preload is True) + raw_combo = concatenate_raws( + [read_raw_fif(fif_fname, preload=False), read_raw_fif(fif_fname, preload=True)], + preload=True, + ) + assert raw_combo.preload is True _compare_combo(raw, raw_combo, times, n_times) - raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False), - read_raw_fif(fif_fname, preload=True)], - preload='memmap3.dat') + raw_combo = concatenate_raws( + [read_raw_fif(fif_fname, preload=False), read_raw_fif(fif_fname, preload=True)], + preload="memmap3.dat", + ) _compare_combo(raw, raw_combo, times, n_times) - raw_combo = concatenate_raws([ - read_raw_fif(fif_fname, preload=True), - read_raw_fif(fif_fname, preload=True)], preload='memmap4.dat') + raw_combo = concatenate_raws( + [read_raw_fif(fif_fname, preload=True), read_raw_fif(fif_fname, preload=True)], + preload="memmap4.dat", + ) _compare_combo(raw, raw_combo, times, n_times) - raw_combo = concatenate_raws([ - read_raw_fif(fif_fname, preload=False), - read_raw_fif(fif_fname, preload=False)], preload='memmap5.dat') + raw_combo = concatenate_raws( + [ + read_raw_fif(fif_fname, preload=False), + read_raw_fif(fif_fname, preload=False), + ], + preload="memmap5.dat", + ) _compare_combo(raw, raw_combo, times, n_times) # verify that combining raws with different projectors throws an exception raw.add_proj([], remove_existing=True) - pytest.raises(ValueError, raw.append, - read_raw_fif(fif_fname, preload=True)) + pytest.raises(ValueError, raw.append, read_raw_fif(fif_fname, preload=True)) # now test event treatment for concatenated raw files - events = [find_events(raw, stim_channel='STI 014'), - find_events(raw, stim_channel='STI 014')] + events = [ + find_events(raw, stim_channel="STI 014"), + find_events(raw, stim_channel="STI 014"), + ] last_samps = [raw.last_samp, raw.last_samp] first_samps = [raw.first_samp, raw.first_samp] events = concatenate_events(events, first_samps, last_samps) - events2 = find_events(raw_combo0, stim_channel='STI 014') + events2 = find_events(raw_combo0, stim_channel="STI 014") assert_array_equal(events, events2) # check out the len method @@ -364,21 +389,21 @@ def test_multiple_files(tmp_path): @testing.requires_testing_data -@pytest.mark.parametrize('on_mismatch', ('ignore', 'warn', 'raise')) +@pytest.mark.parametrize("on_mismatch", ("ignore", "warn", "raise")) def test_concatenate_raws(on_mismatch): """Test error handling during raw concatenation.""" raw = read_raw_fif(fif_fname).crop(0, 10) raws = [raw, raw.copy()] - raws[1].info['dev_head_t']['trans'] += 0.1 + raws[1].info["dev_head_t"]["trans"] += 0.1 kws = dict(raws=raws, on_mismatch=on_mismatch) - if on_mismatch == 'ignore': + if on_mismatch == "ignore": concatenate_raws(**kws) - elif on_mismatch == 'warn': - with pytest.warns(RuntimeWarning, match='different head positions'): + elif on_mismatch == "warn": + with pytest.warns(RuntimeWarning, match="different head positions"): concatenate_raws(**kws) - elif on_mismatch == 'raise': - with pytest.raises(ValueError, match='different head positions'): + elif on_mismatch == "raise": + with pytest.raises(ValueError, match="different head positions"): concatenate_raws(**kws) @@ -390,7 +415,7 @@ def _create_toy_data(n_channels=3, sfreq=250, seed=None): def test_concatenate_raws_bads_order(): - """Test concatenation of raw instances.""" + """Test concatenation of raws when the order of *bad* channels varies.""" raw0 = _create_toy_data() raw1 = _create_toy_data() @@ -411,87 +436,142 @@ def test_concatenate_raws_bads_order(): # Bad channel mismatch raises raw2 = raw1.copy() raw2.info["bads"] = ["0", "2"] - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="bads.*must match"): concatenate_raws([raw0, raw2]) # Type mismatch raises epochs1 = make_fixed_length_epochs(raw1) - with pytest.raises(ValueError): - concatenate_raws([raw0, epochs1]) + with pytest.raises(ValueError, match="type.*must match"): + concatenate_raws([raw0, epochs1.load_data()]) # Sample rate mismatch raw3 = _create_toy_data(sfreq=500) - with pytest.raises(ValueError): + raw3.info["bads"] = ["0", "1"] + with pytest.raises(ValueError, match="info.*must match"): concatenate_raws([raw0, raw3]) # Number of channels mismatch raw4 = _create_toy_data(n_channels=4) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="nchan.*must match"): concatenate_raws([raw0, raw4]) +def test_concatenate_raws_order(): + """Test concatenation of raws when the order of *good* channels varies.""" + raw0 = _create_toy_data(n_channels=2) + raw0._data[0] = np.zeros_like(raw0._data[0]) # set one channel zero + + # Create copy and concatenate raws + raw1 = raw0.copy() + raw_concat = concatenate_raws([raw0.copy(), raw1]) + assert raw0.ch_names == raw1.ch_names == raw_concat.ch_names == ["0", "1"] + ch0 = raw_concat.get_data(picks=["0"]) + assert np.all(ch0 == 0) + + # Change the order of the channels and concatenate again + raw1.reorder_channels(["1", "0"]) + assert raw1.ch_names == ["1", "0"] + raws = [raw0.copy(), raw1] + with pytest.raises(ValueError, match="Channel order must match."): + # Fails now due to wrong order of channels + raw_concat = concatenate_raws(raws) + + with pytest.raises(ValueError, match="Channel order must match."): + # still fails, because raws is copied and not changed in place + match_channel_orders(insts=raws, copy=True) + raw_concat = concatenate_raws(raws) + + # Now passes because all raws have the same order + match_channel_orders(insts=raws, copy=False) + raw_concat = concatenate_raws(raws) + ch0 = raw_concat.get_data(picks=["0"]) + assert np.all(ch0 == 0) + + @testing.requires_testing_data -@pytest.mark.parametrize('mod', ( - 'meg', - pytest.param('raw', marks=[ - pytest.mark.filterwarnings( - 'ignore:.*naming conventions.*:RuntimeWarning'), - pytest.mark.slowtest]), -)) +@pytest.mark.parametrize( + "mod", + ( + "meg", + pytest.param( + "raw", + marks=[ + pytest.mark.filterwarnings( + "ignore:.*naming conventions.*:RuntimeWarning" + ), + pytest.mark.slowtest, + ], + ), + ), +) def test_split_files(tmp_path, mod, monkeypatch): """Test writing and reading of split raw files.""" raw_1 = read_raw_fif(fif_fname, preload=True) # Test a very close corner case - assert_allclose(raw_1.buffer_size_sec, 10., atol=1e-2) # samp rate - split_fname = tmp_path / f'split_raw_{mod}.fif' + assert_allclose(raw_1.buffer_size_sec, 10.0, atol=1e-2) # samp rate + split_fname = tmp_path / f"split_raw_{mod}.fif" # intended filenames - split_fname_elekta_part2 = tmp_path / f'split_raw_{mod}-1.fif' - split_fname_bids_part1 = tmp_path / f'split_raw_split-01_{mod}.fif' - split_fname_bids_part2 = tmp_path / f'split_raw_split-02_{mod}.fif' - raw_1.set_annotations(Annotations([2.], [5.5], 'test')) + split_fname_elekta_part2 = tmp_path / f"split_raw_{mod}-1.fif" + split_fname_bids_part1 = tmp_path / f"split_raw_split-01_{mod}.fif" + split_fname_bids_part2 = tmp_path / f"split_raw_split-02_{mod}.fif" + raw_1.set_annotations(Annotations([2.0], [5.5], "test")) # Check that if BIDS is used and no split is needed it defaults to # simple writing without _split- entity. - raw_1.save(split_fname, split_naming='bids', verbose=True) + split_fnames = raw_1.save(split_fname, split_naming="bids", verbose=True) assert split_fname.is_file() assert not split_fname_bids_part1.is_file() - for split_naming in ('neuromag', 'bids'): - with pytest.raises(FileExistsError, match='Destination file'): + assert split_fnames == [split_fname] + + for split_naming in ("neuromag", "bids"): + with pytest.raises(FileExistsError, match="Destination file"): raw_1.save(split_fname, split_naming=split_naming, verbose=True) os.remove(split_fname) - with open(split_fname_bids_part1, 'w'): + with open(split_fname_bids_part1, "w"): pass - with pytest.raises(FileExistsError, match='Destination file'): - raw_1.save(split_fname, split_naming='bids', verbose=True) + with pytest.raises(FileExistsError, match="Destination file"): + raw_1.save(split_fname, split_naming="bids", verbose=True) assert not split_fname.is_file() - raw_1.save(split_fname, split_naming='neuromag', verbose=True) # okay + split_fnames = raw_1.save( + split_fname, split_naming="neuromag", verbose=True + ) # okay os.remove(split_fname) os.remove(split_fname_bids_part1) - - raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB', - verbose=True) - + # Multiple splits + split_filenames = raw_1.save( + split_fname, buffer_size_sec=1.0, split_size="10MB", verbose=True + ) # check that the filenames match the intended pattern assert split_fname.is_file() assert split_fname_elekta_part2.is_file() + assert split_filenames == [split_fname, split_fname_elekta_part2] # check that filenames are being formatted correctly for BIDS - raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB', - split_naming='bids', overwrite=True, verbose=True) + split_filenames = raw_1.save( + split_fname, + buffer_size_sec=1.0, + split_size="10MB", + split_naming="bids", + overwrite=True, + verbose=True, + ) assert split_fname_bids_part1.is_file() assert split_fname_bids_part2.is_file() + assert split_filenames == [split_fname_bids_part1, split_fname_bids_part2] - annot = Annotations(np.arange(20), np.ones((20,)), 'test') + annot = Annotations(np.arange(20), np.ones((20,)), "test") raw_1.set_annotations(annot) - split_fname = tmp_path / "split_raw.fif" - raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB') + split_fname = tmp_path / f"split_{mod}.fif" + raw_1.save(split_fname, buffer_size_sec=1.0, split_size="10MB") raw_2 = read_raw_fif(split_fname) - assert_allclose(raw_2.buffer_size_sec, 1., atol=1e-2) # samp rate + assert_allclose(raw_2.buffer_size_sec, 1.0, atol=1e-2) # samp rate assert_allclose(raw_1.annotations.onset, raw_2.annotations.onset) - assert_allclose(raw_1.annotations.duration, raw_2.annotations.duration, - rtol=0.001 / raw_2.info['sfreq']) - assert_array_equal(raw_1.annotations.description, - raw_2.annotations.description) + assert_allclose( + raw_1.annotations.duration, + raw_2.annotations.duration, + rtol=0.001 / raw_2.info["sfreq"], + ) + assert_array_equal(raw_1.annotations.description, raw_2.annotations.description) data_1, times_1 = raw_1[:, :] data_2, times_2 = raw_2[:, :] @@ -505,11 +585,11 @@ def test_split_files(tmp_path, mod, monkeypatch): del raw_bids # split missing behaviors os.remove(split_fname_bids_part2) - with pytest.raises(ValueError, match='manually renamed'): - read_raw_fif(split_fname_bids_part1, on_split_missing='raise') - with pytest.warns(RuntimeWarning, match='Split raw file detected'): - read_raw_fif(split_fname_bids_part1, on_split_missing='warn') - read_raw_fif(split_fname_bids_part1, on_split_missing='ignore') + with pytest.raises(ValueError, match="manually renamed"): + read_raw_fif(split_fname_bids_part1, on_split_missing="raise") + with pytest.warns(RuntimeWarning, match="Split raw file detected"): + read_raw_fif(split_fname_bids_part1, on_split_missing="warn") + read_raw_fif(split_fname_bids_part1, on_split_missing="ignore") # test the case where we only end up with one buffer to write # (GH#3210). These tests rely on writing meas info and annotations @@ -517,21 +597,24 @@ def test_split_files(tmp_path, mod, monkeypatch): # somehow, the numbers below for e.g. split_size might need to be # adjusted. raw_crop = raw_1.copy().crop(0, 5) - raw_crop.set_annotations(Annotations([2.], [5.5], 'test'), - emit_warning=False) - with pytest.raises(ValueError, - match='after writing measurement information'): - raw_crop.save(split_fname, split_size='1MB', # too small a size - buffer_size_sec=1., overwrite=True) - with pytest.raises(ValueError, - match='too large for the given split size'): - raw_crop.save(split_fname, - split_size=3003000, # still too small, now after Info - buffer_size_sec=1., overwrite=True) + raw_crop.set_annotations(Annotations([2.0], [5.5], "test"), emit_warning=False) + with pytest.raises(ValueError, match="after writing measurement information"): + raw_crop.save( + split_fname, + split_size="1MB", # too small a size + buffer_size_sec=1.0, + overwrite=True, + ) + with pytest.raises(ValueError, match="too large for the given split size"): + raw_crop.save( + split_fname, + split_size=3003000, # still too small, now after Info + buffer_size_sec=1.0, + overwrite=True, + ) # just barely big enough here; the right size to write exactly one buffer # at a time so we hit GH#3210 if we aren't careful - raw_crop.save(split_fname, split_size='4.5MB', - buffer_size_sec=1., overwrite=True) + raw_crop.save(split_fname, split_size="4.5MB", buffer_size_sec=1.0, overwrite=True) raw_read = read_raw_fif(split_fname) assert_allclose(raw_crop[:][0], raw_read[:][0], atol=1e-20) @@ -539,46 +622,100 @@ def test_split_files(tmp_path, mod, monkeypatch): # 1 buffer required raw_crop = raw_1.copy().crop(0, 1) - raw_crop.save(split_fname, buffer_size_sec=1., overwrite=True) + raw_crop.save(split_fname, buffer_size_sec=1.0, overwrite=True) raw_read = read_raw_fif(split_fname) - assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (301,)) + assert_array_equal(np.diff(raw_read._raw_extras[0]["bounds"]), (301,)) assert_allclose(raw_crop[:][0], raw_read[:][0]) # 2 buffers required raw_crop.save(split_fname, buffer_size_sec=0.5, overwrite=True) raw_read = read_raw_fif(split_fname) - assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (151, 150)) + assert_array_equal(np.diff(raw_read._raw_extras[0]["bounds"]), (151, 150)) assert_allclose(raw_crop[:][0], raw_read[:][0]) # 2 buffers required - raw_crop.save(split_fname, - buffer_size_sec=1. - 1.01 / raw_crop.info['sfreq'], - overwrite=True) + raw_crop.save( + split_fname, buffer_size_sec=1.0 - 1.01 / raw_crop.info["sfreq"], overwrite=True + ) raw_read = read_raw_fif(split_fname) - assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (300, 1)) + assert_array_equal(np.diff(raw_read._raw_extras[0]["bounds"]), (300, 1)) assert_allclose(raw_crop[:][0], raw_read[:][0]) - raw_crop.save(split_fname, - buffer_size_sec=1. - 2.01 / raw_crop.info['sfreq'], - overwrite=True) + raw_crop.save( + split_fname, buffer_size_sec=1.0 - 2.01 / raw_crop.info["sfreq"], overwrite=True + ) raw_read = read_raw_fif(split_fname) - assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (299, 2)) + assert_array_equal(np.diff(raw_read._raw_extras[0]["bounds"]), (299, 2)) assert_allclose(raw_crop[:][0], raw_read[:][0]) # proper ending assert tmp_path.is_dir() - with pytest.raises(ValueError, match='must end with an underscore'): - raw_crop.save( - tmp_path / 'test.fif', split_naming='bids', verbose='error') + with pytest.raises(ValueError, match="must end with an underscore"): + raw_crop.save(tmp_path / "test.fif", split_naming="bids", verbose="error") # reserved file is deleted - fname = tmp_path / 'test_raw.fif' - monkeypatch.setattr(base, '_write_raw_fid', _err) - with pytest.raises(RuntimeError, match='Killed mid-write'): - raw_1.save(fname, split_size='10MB', split_naming='bids') + fname = tmp_path / f"test_{mod}.fif" + with monkeypatch.context() as m: + m.setattr(base, "_write_raw_data", _err) + with pytest.raises(RuntimeError, match="Killed mid-write"): + raw_1.save(fname, split_size="10MB", split_naming="bids") + assert fname.is_file() + assert not (tmp_path / "test_split-01_{mod}.fif").is_file() + + # MAX_N_SPLITS exceeded + raw = RawArray(np.zeros((1, 2000000)), create_info(1, 1000.0, "eeg")) + fname.unlink() + kwargs = dict(split_size="2MB", overwrite=True, verbose=True) + with monkeypatch.context() as m: + m.setattr(base, "MAX_N_SPLITS", 2) + with pytest.raises(RuntimeError, match="Exceeded maximum number of splits"): + raw.save(fname, split_naming="bids", **kwargs) + fname_1, fname_2, fname_3 = ( + (tmp_path / f"test_split-{ii:02d}_{mod}.fif") for ii in range(1, 4) + ) + assert not fname.is_file() + assert fname_1.is_file() + assert fname_2.is_file() + assert not fname_3.is_file() + with monkeypatch.context() as m: + m.setattr(base, "MAX_N_SPLITS", 2) + with pytest.raises(RuntimeError, match="Exceeded maximum number of splits"): + raw.save(fname, split_naming="neuromag", **kwargs) + fname_2, fname_3 = ((tmp_path / f"test_{mod}-{ii}.fif") for ii in range(1, 3)) assert fname.is_file() - assert not (tmp_path / "test_split-01_raw.fif").is_file() + assert fname_2.is_file() + assert not fname_3.is_file() + + +def test_bids_split_files(tmp_path): + """Test that BIDS split files are written safely.""" + mne_bids = pytest.importorskip("mne_bids") + bids_path = mne_bids.BIDSPath( + root=tmp_path, + subject="01", + datatype="meg", + split="01", + suffix="raw", + extension=".fif", + check=False, + ) + (tmp_path / "sub-01" / "meg").mkdir(parents=True) + raw = read_raw_fif(test_fif_fname) + save_kwargs = dict( + buffer_size_sec=1.0, split_size="10MB", split_naming="bids", verbose=True + ) + with pytest.raises(ValueError, match="Passing a BIDSPath"): + raw.save(bids_path, **save_kwargs) + bids_path.split = None + want_paths = [ + Path(bids_path.copy().update(split=f"{ii:02d}").fpath) for ii in range(1, 3) + ] + for want_path in want_paths: + assert not want_path.is_file() + raw.save(bids_path, **save_kwargs) + for want_path in want_paths: + assert want_path.is_file(), want_path def _err(*args, **kwargs): - raise RuntimeError('Killed mid-write') + raise RuntimeError("Killed mid-write") def _no_write_file_name(fid, kind, data): @@ -588,12 +725,11 @@ def _no_write_file_name(fid, kind, data): def test_split_numbers(tmp_path, monkeypatch): """Test handling of split files using numbers instead of names.""" - monkeypatch.setattr(base, 'write_string', _no_write_file_name) - raw = read_raw_fif(test_fif_fname).pick('eeg') + monkeypatch.setattr(base, "write_string", _no_write_file_name) + raw = read_raw_fif(test_fif_fname).pick("eeg") # gh-8339 - dashes_fname = tmp_path / 'sub-1_ses-2_task-3_raw.fif' - raw.save(dashes_fname, split_size='5MB', - buffer_size_sec=1.) + dashes_fname = tmp_path / "sub-1_ses-2_task-3_raw.fif" + raw.save(dashes_fname, split_size="5MB", buffer_size_sec=1.0) assert dashes_fname.is_file() next_fname = Path(str(dashes_fname)[:-4] + "-1.fif") assert next_fname.is_file() @@ -606,37 +742,37 @@ def test_load_bad_channels(tmp_path): """Test reading/writing of bad channels.""" # Load correctly marked file (manually done in mne_process_raw) raw_marked = read_raw_fif(fif_bad_marked_fname) - correct_bads = raw_marked.info['bads'] + correct_bads = raw_marked.info["bads"] raw = read_raw_fif(test_fif_fname) # Make sure it starts clean - assert_array_equal(raw.info['bads'], []) + assert_array_equal(raw.info["bads"], []) # Test normal case raw.load_bad_channels(bad_file_works) # Write it out, read it in, and check - raw.save(tmp_path / 'foo_raw.fif') - raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') - assert correct_bads == raw_new.info['bads'] + raw.save(tmp_path / "foo_raw.fif") + raw_new = read_raw_fif(tmp_path / "foo_raw.fif") + assert correct_bads == raw_new.info["bads"] # Reset it - raw.info['bads'] = [] + raw.info["bads"] = [] # Test bad case pytest.raises(ValueError, raw.load_bad_channels, bad_file_wrong) # Test forcing the bad case - with pytest.warns(RuntimeWarning, match='1 bad channel'): + with pytest.warns(RuntimeWarning, match="1 bad channel"): raw.load_bad_channels(bad_file_wrong, force=True) # write it out, read it in, and check - raw.save(tmp_path / 'foo_raw.fif', overwrite=True) - raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') - assert correct_bads == raw_new.info['bads'] + raw.save(tmp_path / "foo_raw.fif", overwrite=True) + raw_new = read_raw_fif(tmp_path / "foo_raw.fif") + assert correct_bads == raw_new.info["bads"] # Check that bad channels are cleared raw.load_bad_channels(None) - raw.save(tmp_path / 'foo_raw.fif', overwrite=True) - raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') - assert raw_new.info['bads'] == [] + raw.save(tmp_path / "foo_raw.fif", overwrite=True) + raw_new = read_raw_fif(tmp_path / "foo_raw.fif") + assert raw_new.info["bads"] == [] @pytest.mark.slowtest @@ -645,15 +781,15 @@ def test_io_raw(tmp_path): """Test IO for raw data (Neuromag).""" rng = np.random.RandomState(0) # test unicode io - for chars in [u'äöé', 'a']: + for chars in ["äöé", "a"]: with read_raw_fif(fif_fname) as r: - assert ('Raw' in repr(r)) - assert (fif_fname.name in repr(r)) - r.info['description'] = chars - temp_file = tmp_path / 'raw.fif' + assert "Raw" in repr(r) + assert fif_fname.name in repr(r) + r.info["description"] = chars + temp_file = tmp_path / "raw.fif" r.save(temp_file, overwrite=True) with read_raw_fif(temp_file) as r2: - desc2 = r2.info['description'] + desc2 = r2.info["description"] assert desc2 == chars # Let's construct a simple test for IO first @@ -663,7 +799,7 @@ def test_io_raw(tmp_path): data = rng.randn(raw._data.shape[0], raw._data.shape[1]) raw._data[:, :] = data # save it somewhere - fname = tmp_path / 'test_copy_raw.fif' + fname = tmp_path / "test_copy_raw.fif" raw.save(fname, buffer_size_sec=1.0) # read it in, make sure the whole thing matches raw = read_raw_fif(fname) @@ -673,49 +809,68 @@ def test_io_raw(tmp_path): sl = slice(inds[0], inds[1]) assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20) + # missing dir raises informative error + with pytest.raises(FileNotFoundError, match="parent directory does not exist"): + raw.save(tmp_path / "foo" / "test_raw.fif", split_size="1MB") + -@pytest.mark.parametrize('fname_in, fname_out', [ - (test_fif_fname, 'raw.fif'), - pytest.param(test_fif_gz_fname, 'raw.fif.gz', marks=pytest.mark.slowtest), - (ctf_fname, 'raw.fif')]) +@pytest.mark.parametrize( + "fname_in, fname_out", + [ + (test_fif_fname, "raw.fif"), + pytest.param(test_fif_gz_fname, "raw.fif.gz", marks=pytest.mark.slowtest), + (ctf_fname, "raw.fif"), + ], +) def test_io_raw_additional(fname_in, fname_out, tmp_path): """Test IO for raw data (Neuromag + CTF + gz).""" fname_out = tmp_path / fname_out raw = read_raw_fif(fname_in).crop(0, 2) - nchan = raw.info['nchan'] - ch_names = raw.info['ch_names'] - meg_channels_idx = [k for k in range(nchan) - if ch_names[k][0] == 'M'] + nchan = raw.info["nchan"] + ch_names = raw.info["ch_names"] + meg_channels_idx = [k for k in range(nchan) if ch_names[k][0] == "M"] n_channels = 100 meg_channels_idx = meg_channels_idx[:n_channels] start, stop = raw.time_as_index([0, 5], use_rounding=True) - data, times = raw[meg_channels_idx, start:(stop + 1)] + data, times = raw[meg_channels_idx, start : (stop + 1)] meg_ch_names = [ch_names[k] for k in meg_channels_idx] # Set up pick list: MEG + STI 014 - bad channels - include = ['STI 014'] + include = ["STI 014"] include += meg_ch_names - picks = pick_types(raw.info, meg=True, eeg=False, stim=True, - misc=True, ref_meg=True, include=include, - exclude='bads') + picks = pick_types( + raw.info, + meg=True, + eeg=False, + stim=True, + misc=True, + ref_meg=True, + include=include, + exclude="bads", + ) # Writing with drop_small_buffer True - raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3, - drop_small_buffer=True, overwrite=True) + raw.save( + fname_out, + picks, + tmin=0, + tmax=4, + buffer_size_sec=3, + drop_small_buffer=True, + overwrite=True, + ) raw2 = read_raw_fif(fname_out) sel = pick_channels(raw2.ch_names, meg_ch_names) data2, times2 = raw2[sel, :] - assert (times2.max() <= 3) + assert times2.max() <= 3 # Writing raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True) - if fname_in in ( - fif_fname, fif_fname.with_suffix(fif_fname.suffix + ".gz") - ): - assert len(raw.info['dig']) == 146 + if fname_in in (fif_fname, fif_fname.with_suffix(fif_fname.suffix + ".gz")): + assert len(raw.info["dig"]) == 146 raw2 = read_raw_fif(fname_out) @@ -724,44 +879,41 @@ def test_io_raw_additional(fname_in, fname_out, tmp_path): assert_allclose(data, data2, rtol=1e-6, atol=1e-20) assert_allclose(times, times2) - assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5) + assert_allclose(raw.info["sfreq"], raw2.info["sfreq"], rtol=1e-5) # check transformations - for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']: + for trans in ["dev_head_t", "dev_ctf_t", "ctf_head_t"]: if raw.info[trans] is None: - assert (raw2.info[trans] is None) + assert raw2.info[trans] is None else: - assert_array_equal(raw.info[trans]['trans'], - raw2.info[trans]['trans']) + assert_array_equal(raw.info[trans]["trans"], raw2.info[trans]["trans"]) # check transformation 'from' and 'to' - if trans.startswith('dev'): + if trans.startswith("dev"): from_id = FIFF.FIFFV_COORD_DEVICE else: from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD - if trans[4:8] == 'head': + if trans[4:8] == "head": to_id = FIFF.FIFFV_COORD_HEAD else: to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD for raw_ in [raw, raw2]: - assert raw_.info[trans]['from'] == from_id - assert raw_.info[trans]['to'] == to_id + assert raw_.info[trans]["from"] == from_id + assert raw_.info[trans]["to"] == to_id - if fname_in in ( - fif_fname, fif_fname.with_suffix(fif_fname.suffix + ".gz") - ): - assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r']) + if fname_in in (fif_fname, fif_fname.with_suffix(fif_fname.suffix + ".gz")): + assert_allclose(raw.info["dig"][0]["r"], raw2.info["dig"][0]["r"]) # test warnings on bad filenames - raw_badname = tmp_path / 'test-bad-name.fif.gz' - with pytest.warns(RuntimeWarning, match='raw.fif'): + raw_badname = tmp_path / "test-bad-name.fif.gz" + with pytest.warns(RuntimeWarning, match="raw.fif"): raw.save(raw_badname) - with pytest.warns(RuntimeWarning, match='raw.fif'): + with pytest.warns(RuntimeWarning, match="raw.fif"): read_raw_fif(raw_badname) @testing.requires_testing_data -@pytest.mark.parametrize('dtype', ('complex128', 'complex64')) +@pytest.mark.parametrize("dtype", ("complex128", "complex64")) def test_io_complex(tmp_path, dtype): """Test IO with complex data types.""" rng = np.random.RandomState(0) @@ -772,14 +924,14 @@ def test_io_complex(tmp_path, dtype): raw_cp = raw.copy() raw_cp._data = np.array(raw_cp._data, dtype) raw_cp._data += imag_rand - with pytest.warns(RuntimeWarning, match='Saving .* complex data.'): - raw_cp.save(tmp_path / 'raw.fif', overwrite=True) + with pytest.warns(RuntimeWarning, match="Saving .* complex data."): + raw_cp.save(tmp_path / "raw.fif", overwrite=True) - raw2 = read_raw_fif(tmp_path / 'raw.fif') + raw2 = read_raw_fif(tmp_path / "raw.fif") raw2_data, _ = raw2[:] assert_allclose(raw2_data, raw_cp._data) # with preloading - raw2 = read_raw_fif(tmp_path / 'raw.fif', preload=True) + raw2 = read_raw_fif(tmp_path / "raw.fif", preload=True) raw2_data, _ = raw2[:] assert_allclose(raw2_data, raw_cp._data) assert_allclose(data_orig, raw_cp._data.real) @@ -788,7 +940,7 @@ def test_io_complex(tmp_path, dtype): @testing.requires_testing_data def test_getitem(): """Test getitem/indexing of Raw.""" - for preload in [False, True, 'memmap.dat']: + for preload in [False, True, "memmap1.dat"]: raw = read_raw_fif(fif_fname, preload=preload) data, times = raw[0, :] data1, times1 = raw[0] @@ -803,14 +955,23 @@ def test_getitem(): assert_array_equal(times, times1) assert_array_equal(raw[raw.ch_names[0]][0][0], raw[0][0][0]) assert_array_equal( - raw[-10:-1, :][0], - raw[len(raw.ch_names) - 10:len(raw.ch_names) - 1, :][0]) - with pytest.raises(ValueError, match='No appropriate channels'): + raw[-10:-1, :][0], raw[len(raw.ch_names) - 10 : len(raw.ch_names) - 1, :][0] + ) + with pytest.raises(ValueError, match="No appropriate channels"): raw[slice(-len(raw.ch_names) - 1), slice(None)] - with pytest.raises(ValueError, match='must be'): + with pytest.raises(IndexError, match="must be"): raw[-1000] +@testing.requires_testing_data +def test_iter(): + """Test iterating over Raw via __getitem__().""" + raw = read_raw_fif(fif_fname).pick("eeg") # 60 EEG channels + for i, _ in enumerate(raw): # iterate over channels + pass + assert i == 59 # 60 channels means iterating from 0 to 59 + + @testing.requires_testing_data def test_proj(tmp_path): """Test SSP proj operations.""" @@ -818,7 +979,7 @@ def test_proj(tmp_path): raw = read_raw_fif(fif_fname, preload=False) if proj: raw.apply_proj() - assert (all(p['active'] == proj for p in raw.info['projs'])) + assert all(p["active"] == proj for p in raw.info["projs"]) data, times = raw[0:2, :] data1, times1 = raw[0:2] @@ -827,19 +988,18 @@ def test_proj(tmp_path): # test adding / deleting proj if proj: - pytest.raises(ValueError, raw.add_proj, [], - {'remove_existing': True}) + pytest.raises(ValueError, raw.add_proj, [], {"remove_existing": True}) pytest.raises(ValueError, raw.del_proj, 0) else: - projs = deepcopy(raw.info['projs']) - n_proj = len(raw.info['projs']) + projs = deepcopy(raw.info["projs"]) + n_proj = len(raw.info["projs"]) raw.del_proj(0) - assert len(raw.info['projs']) == n_proj - 1 + assert len(raw.info["projs"]) == n_proj - 1 raw.add_proj(projs, remove_existing=False) # Test that already existing projections are not added. - assert len(raw.info['projs']) == n_proj + assert len(raw.info["projs"]) == n_proj raw.add_proj(projs[:-1], remove_existing=True) - assert len(raw.info['projs']) == n_proj - 1 + assert len(raw.info["projs"]) == n_proj - 1 # test apply_proj() with and without preload for preload in [True, False]: @@ -852,18 +1012,18 @@ def test_proj(tmp_path): raw = read_raw_fif(fif_fname, preload=preload) # write the file with proj. activated, make sure proj has been applied - raw.save(tmp_path / 'raw.fif', proj=True, overwrite=True) - raw2 = read_raw_fif(tmp_path / 'raw.fif') + raw.save(tmp_path / "raw.fif", proj=True, overwrite=True) + raw2 = read_raw_fif(tmp_path / "raw.fif") data_proj_2, _ = raw2[:, 0:2] assert_allclose(data_proj_1, data_proj_2) - assert (all(p['active'] for p in raw2.info['projs'])) + assert all(p["active"] for p in raw2.info["projs"]) # read orig file with proj. active raw2 = read_raw_fif(fif_fname, preload=preload) raw2.apply_proj() data_proj_2, _ = raw2[:, 0:2] assert_allclose(data_proj_1, data_proj_2) - assert (all(p['active'] for p in raw2.info['projs'])) + assert all(p["active"] for p in raw2.info["projs"]) # test that apply_proj works raw.apply_proj() @@ -873,25 +1033,25 @@ def test_proj(tmp_path): # Test that picking removes projectors ... raw = read_raw_fif(fif_fname) - n_projs = len(raw.info['projs']) - raw.pick_types(meg=False, eeg=True) - assert len(raw.info['projs']) == n_projs - 3 + n_projs = len(raw.info["projs"]) + raw.pick(picks="eeg") + assert len(raw.info["projs"]) == n_projs - 3 # ... but only if it doesn't apply to any channels in the dataset anymore. raw = read_raw_fif(fif_fname) - n_projs = len(raw.info['projs']) - raw.pick_types(meg='mag', eeg=True) - assert len(raw.info['projs']) == n_projs + n_projs = len(raw.info["projs"]) + raw.pick(picks=["mag", "eeg"]) + assert len(raw.info["projs"]) == n_projs # I/O roundtrip of an MEG projector with a Raw that only contains EEG # data. - out_fname = tmp_path / 'test_raw.fif' + out_fname = tmp_path / "test_raw.fif" raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002) - proj = raw.info['projs'][-1] - raw.pick_types(meg=False, eeg=True) + proj = raw.info["projs"][-1] + raw.pick(picks="eeg") raw.add_proj(proj) # Restore, because picking removed it! raw._data.fill(0) - raw._data[-1] = 1. + raw._data[-1] = 1.0 raw.save(out_fname) raw = read_raw_fif(out_fname, preload=False) raw.apply_proj() @@ -899,30 +1059,30 @@ def test_proj(tmp_path): @testing.requires_testing_data -@pytest.mark.parametrize('preload', [False, True, 'memmap.dat']) +@pytest.mark.parametrize("preload", [False, True, "memmap2.dat"]) def test_preload_modify(preload, tmp_path): """Test preloading and modifying data.""" rng = np.random.RandomState(0) raw = read_raw_fif(fif_fname, preload=preload) nsamp = raw.last_samp - raw.first_samp + 1 - picks = pick_types(raw.info, meg='grad', exclude='bads') + picks = pick_types(raw.info, meg="grad", exclude="bads") data = rng.randn(len(picks), nsamp // 2) try: - raw[picks, :nsamp // 2] = data + raw[picks, : nsamp // 2] = data except RuntimeError: if not preload: return else: raise - tmp_fname = tmp_path / 'raw.fif' + tmp_fname = tmp_path / "raw.fif" raw.save(tmp_fname, overwrite=True) raw_new = read_raw_fif(tmp_fname) - data_new, _ = raw_new[picks, :nsamp // 2] + data_new, _ = raw_new[picks, : nsamp // 2] assert_allclose(data, data_new) @@ -935,13 +1095,17 @@ def test_filter(): raw.load_data() sig_dec_notch = 12 sig_dec_notch_fit = 12 - picks_meg = pick_types(raw.info, meg=True, exclude='bads') + picks_meg = pick_types(raw.info, meg=True, exclude="bads") picks = picks_meg[:4] trans = 2.0 - filter_params = dict(picks=picks, filter_length='auto', - h_trans_bandwidth=trans, l_trans_bandwidth=trans, - fir_design='firwin') + filter_params = dict( + picks=picks, + filter_length="auto", + h_trans_bandwidth=trans, + l_trans_bandwidth=trans, + fir_design="firwin", + ) raw_lp = raw.copy().filter(None, 8.0, **filter_params) raw_hp = raw.copy().filter(16.0, None, **filter_params) raw_bp = raw.copy().filter(8.0 + trans, 16.0 - trans, **filter_params) @@ -959,8 +1123,9 @@ def test_filter(): assert_allclose(data, lp_data + bp_data + hp_data, **tols) assert_allclose(data, bp_data + bs_data, **tols) - filter_params_iir = dict(picks=picks, n_jobs=2, method='iir', - iir_params=dict(output='ba')) + filter_params_iir = dict( + picks=picks, n_jobs=2, method="iir", iir_params=dict(output="ba") + ) raw_lp_iir = raw.copy().filter(None, 4.0, **filter_params_iir) raw_hp_iir = raw.copy().filter(8.0, None, **filter_params_iir) raw_bp_iir = raw.copy().filter(4.0, 8.0, **filter_params_iir) @@ -984,83 +1149,98 @@ def test_filter(): assert not np.may_share_memory(raw_copy._data, raw._data) # this could be assert_array_equal but we do this to mirror the call below assert (raw._data[0] == raw_copy._data[0]).all() - raw_copy.filter(None, 20., n_jobs=2, **filter_params) + raw_copy.filter(None, 20.0, n_jobs=2, **filter_params) assert not (raw._data[0] == raw_copy._data[0]).all() - assert_array_equal(raw.copy().filter(None, 20., **filter_params)._data, - raw_copy._data) + assert_array_equal( + raw.copy().filter(None, 20.0, **filter_params)._data, raw_copy._data + ) # do a very simple check on line filtering raw_bs = raw.copy().filter(60.0 + trans, 60.0 - trans, **filter_params) data_bs, _ = raw_bs[picks, :] raw_notch = raw.copy().notch_filter( - 60.0, picks=picks, n_jobs=2, method='fir', - trans_bandwidth=2 * trans) + 60.0, picks=picks, n_jobs=2, method="fir", trans_bandwidth=2 * trans + ) data_notch, _ = raw_notch[picks, :] assert_array_almost_equal(data_bs, data_notch, sig_dec_notch) # now use the sinusoidal fitting assert raw.times[-1] < 10 # catch error with filter_length > n_times raw_notch = raw.copy().notch_filter( - None, picks=picks, n_jobs=2, method='spectrum_fit', - filter_length='10s') + None, picks=picks, n_jobs=2, method="spectrum_fit", filter_length="10s" + ) data_notch, _ = raw_notch[picks, :] data, _ = raw[picks, :] assert_array_almost_equal(data, data_notch, sig_dec_notch_fit) # filter should set the "lowpass" and "highpass" parameters - raw = RawArray(np.random.randn(3, 1000), - create_info(3, 1000., ['eeg'] * 2 + ['stim'])) + raw = RawArray( + np.random.randn(3, 1000), create_info(3, 1000.0, ["eeg"] * 2 + ["stim"]) + ) with raw.info._unlock(): - raw.info['lowpass'] = raw.info['highpass'] = None - for kind in ('none', 'lowpass', 'highpass', 'bandpass', 'bandstop'): + raw.info["lowpass"] = raw.info["highpass"] = None + for kind in ("none", "lowpass", "highpass", "bandpass", "bandstop"): print(kind) h_freq = l_freq = None - if kind in ('lowpass', 'bandpass'): + if kind in ("lowpass", "bandpass"): h_freq = 70 - if kind in ('highpass', 'bandpass'): + if kind in ("highpass", "bandpass"): l_freq = 30 - if kind == 'bandstop': + if kind == "bandstop": l_freq, h_freq = 70, 30 - assert (raw.info['lowpass'] is None) - assert (raw.info['highpass'] is None) - kwargs = dict(l_trans_bandwidth=20, h_trans_bandwidth=20, - filter_length='auto', phase='zero', fir_design='firwin') - raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1), - **kwargs) - assert (raw.info['lowpass'] is None) - assert (raw.info['highpass'] is None) + assert raw.info["lowpass"] is None + assert raw.info["highpass"] is None + kwargs = dict( + l_trans_bandwidth=20, + h_trans_bandwidth=20, + filter_length="auto", + phase="zero", + fir_design="firwin", + ) + raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1), **kwargs) + assert raw.info["lowpass"] is None + assert raw.info["highpass"] is None raw_filt = raw.copy().filter(l_freq, h_freq, **kwargs) - wanted_h = h_freq if kind != 'bandstop' else None - wanted_l = l_freq if kind != 'bandstop' else None - assert raw_filt.info['lowpass'] == wanted_h - assert raw_filt.info['highpass'] == wanted_l + wanted_h = h_freq if kind != "bandstop" else None + wanted_l = l_freq if kind != "bandstop" else None + assert raw_filt.info["lowpass"] == wanted_h + assert raw_filt.info["highpass"] == wanted_l # Using all data channels should still set the params (GH#3259) - raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2), - **kwargs) - assert raw_filt.info['lowpass'] == wanted_h - assert raw_filt.info['highpass'] == wanted_l + raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2), **kwargs) + assert raw_filt.info["lowpass"] == wanted_h + assert raw_filt.info["highpass"] == wanted_l def test_filter_picks(): """Test filtering default channel picks.""" - ch_types = ['mag', 'grad', 'eeg', 'seeg', 'dbs', 'misc', 'stim', 'ecog', - 'hbo', 'hbr'] + ch_types = [ + "mag", + "grad", + "eeg", + "seeg", + "dbs", + "misc", + "stim", + "ecog", + "hbo", + "hbr", + ] info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256) raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info) # -- Deal with meg mag grad and fnirs exceptions - ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'dbs', 'ecog') + ch_types = ("misc", "stim", "meg", "eeg", "seeg", "dbs", "ecog") # -- Filter data channels - for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'hbo', 'hbr'): + for ch_type in ("mag", "grad", "eeg", "seeg", "dbs", "ecog", "hbo", "hbr"): picks = {ch: ch == ch_type for ch in ch_types} - picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False - picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False + picks["meg"] = ch_type if ch_type in ("mag", "grad") else False + picks["fnirs"] = ch_type if ch_type in ("hbo", "hbr") else False raw_ = raw.copy().pick_types(**picks) - raw_.filter(10, 30, fir_design='firwin') + raw_.filter(10, 30, fir_design="firwin") # -- Error if no data channel - for ch_type in ('misc', 'stim'): + for ch_type in ("misc", "stim"): picks = {ch: ch == ch_type for ch in ch_types} raw_ = raw.copy().pick_types(**picks) pytest.raises(ValueError, raw_.filter, 10, 30) @@ -1070,14 +1250,13 @@ def test_filter_picks(): def test_crop(): """Test cropping raw files.""" # split a concatenated file to test a difficult case - raw = concatenate_raws([read_raw_fif(f) - for f in [fif_fname, fif_fname]]) - split_size = 10. # in seconds - sfreq = raw.info['sfreq'] - nsamp = (raw.last_samp - raw.first_samp + 1) + raw = concatenate_raws([read_raw_fif(f) for f in [fif_fname, fif_fname]]) + split_size = 10.0 # in seconds + sfreq = raw.info["sfreq"] + nsamp = raw.last_samp - raw.first_samp + 1 # do an annoying case (off-by-one splitting) - tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))] + tmins = np.r_[1.0, np.round(np.arange(0.0, nsamp - 1, split_size * sfreq))] tmins = np.sort(tmins) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1])) tmaxs /= sfreq @@ -1088,14 +1267,15 @@ def test_crop(): if ri < len(tmins) - 1: assert_allclose( raws[ri].times, - raw.copy().crop(tmin, tmins[ri + 1], include_tmax=False).times) + raw.copy().crop(tmin, tmins[ri + 1], include_tmax=False).times, + ) assert raws[ri] all_raw_2 = concatenate_raws(raws, preload=False) assert raw.first_samp == all_raw_2.first_samp assert raw.last_samp == all_raw_2.last_samp assert_array_equal(raw[:, :][0], all_raw_2[:, :][0]) - tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq)) + tmins = np.round(np.arange(0.0, nsamp - 1, split_size * sfreq)) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1])) tmaxs /= sfreq tmins /= sfreq @@ -1123,60 +1303,83 @@ def test_crop(): assert raw1[:][0].shape == (1, 2001) # degenerate - with pytest.raises(ValueError, match='No samples.*when include_tmax=Fals'): + with pytest.raises(ValueError, match="No samples.*when include_tmax=False"): raw.crop(0, 0, include_tmax=False) # edge cases cropping to exact duration +/- 1 sample data = np.zeros((1, 100)) info = create_info(1, 100) raw = RawArray(data, info) - with pytest.raises(ValueError, match='tmax \\(1\\) must be less than or '): + with pytest.raises(ValueError, match="tmax \\(1\\) must be less than or "): raw.copy().crop(tmax=1, include_tmax=True) - raw1 = raw.copy().crop(tmax=1 - 1 / raw.info['sfreq'], include_tmax=True) + raw1 = raw.copy().crop(tmax=1 - 1 / raw.info["sfreq"], include_tmax=True) assert raw.n_times == raw1.n_times raw2 = raw.copy().crop(tmax=1, include_tmax=False) assert raw.n_times == raw2.n_times - raw3 = raw.copy().crop(tmax=1 - 1 / raw.info['sfreq'], include_tmax=False) + raw3 = raw.copy().crop(tmax=1 - 1 / raw.info["sfreq"], include_tmax=False) assert raw.n_times - 1 == raw3.n_times +@testing.requires_testing_data +def test_resample_with_events(): + """Test resampling raws with events.""" + raw = read_raw_fif(fif_fname) + raw.resample(250) # pretend raw is recorded at 250 Hz + events, _ = events_from_annotations(raw) + raw, events = raw.resample(250, events=events) + + @testing.requires_testing_data def test_resample_equiv(): """Test resample (with I/O and multiple files).""" raw = read_raw_fif(fif_fname).crop(0, 1) raw_preload = raw.copy().load_data() for r in (raw, raw_preload): - r.resample(r.info['sfreq'] / 4.) + r.resample(r.info["sfreq"] / 4.0) assert_allclose(raw._data, raw_preload._data) @pytest.mark.slowtest @testing.requires_testing_data -@pytest.mark.parametrize('preload, n, npad', [ - (True, 512, 'auto'), - (False, 512, 0), -]) -def test_resample(tmp_path, preload, n, npad): +@pytest.mark.parametrize( + "preload, n, npad, method", + [ + (True, 512, "auto", "fft"), + (True, 512, "auto", "polyphase"), + (False, 512, 0, "fft"), # only test one with non-preload because it's slow + ], +) +def test_resample(tmp_path, preload, n, npad, method): """Test resample (with I/O and multiple files).""" + kwargs = dict(npad=npad, method=method) raw = read_raw_fif(fif_fname) raw.crop(0, raw.times[n - 1]) + # Reduce to a few MEG channels and a few stim channels to speed up + n_meg = 5 + raw.pick(raw.ch_names[:n_meg] + raw.ch_names[312:320]) # 10 MEG + 3 STIM + 5 EEG assert len(raw.times) == n if preload: raw.load_data() raw_resamp = raw.copy() - sfreq = raw.info['sfreq'] + sfreq = raw.info["sfreq"] # test parallel on upsample - raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad) + raw_resamp.resample(sfreq * 2, n_jobs=2, **kwargs) assert raw_resamp.n_times == len(raw_resamp.times) - raw_resamp.save(tmp_path / 'raw_resamp-raw.fif') - raw_resamp = read_raw_fif(tmp_path / 'raw_resamp-raw.fif', preload=True) - assert sfreq == raw_resamp.info['sfreq'] / 2 + raw_resamp.save(tmp_path / "raw_resamp-raw.fif") + raw_resamp = read_raw_fif(tmp_path / "raw_resamp-raw.fif", preload=True) + assert sfreq == raw_resamp.info["sfreq"] / 2 assert raw.n_times == raw_resamp.n_times // 2 assert raw_resamp.get_data().shape[1] == raw_resamp.n_times assert raw.get_data().shape[0] == raw_resamp._data.shape[0] # test non-parallel on downsample - raw_resamp.resample(sfreq, n_jobs=None, npad=npad) - assert raw_resamp.info['sfreq'] == sfreq + with catch_logging() as log: + raw_resamp.resample(sfreq, n_jobs=None, verbose=True, **kwargs) + log = log.getvalue() + if method == "fft": + assert "neighborhood" not in log + else: + assert "neighborhood" in log + assert raw_resamp.info["sfreq"] == sfreq assert raw.get_data().shape == raw_resamp._data.shape assert raw.first_samp == raw_resamp.first_samp assert raw.last_samp == raw.last_samp @@ -1184,12 +1387,12 @@ def test_resample(tmp_path, preload, n, npad): # works (hooray). Note that the stim channels had to be sub-sampled # without filtering to be accurately preserved # note we have to treat MEG and EEG+STIM channels differently (tols) - assert_allclose(raw.get_data()[:306, 200:-200], - raw_resamp._data[:306, 200:-200], - rtol=1e-2, atol=1e-12) - assert_allclose(raw.get_data()[306:, 200:-200], - raw_resamp._data[306:, 200:-200], - rtol=1e-2, atol=1e-7) + want_meg = raw.get_data()[:n_meg, 200:-200] + got_meg = raw_resamp._data[:n_meg, 200:-200] + want_non_meg = raw.get_data()[n_meg:, 200:-200] + got_non_meg = raw_resamp._data[n_meg:, 200:-200] + assert_allclose(got_meg, want_meg, rtol=1e-2, atol=1e-12) + assert_allclose(want_non_meg, got_non_meg, rtol=1e-2, atol=1e-7) # now check multiple file support w/resampling, as order of operations # (concat, resample) should not affect our data @@ -1198,9 +1401,9 @@ def test_resample(tmp_path, preload, n, npad): raw3 = raw.copy() raw4 = raw.copy() raw1 = concatenate_raws([raw1, raw2]) - raw1.resample(10., npad=npad) - raw3.resample(10., npad=npad) - raw4.resample(10., npad=npad) + raw1.resample(10.0, **kwargs) + raw3.resample(10.0, **kwargs) + raw4.resample(10.0, **kwargs) raw3 = concatenate_raws([raw3, raw4]) assert_array_equal(raw1._data, raw3._data) assert_array_equal(raw1._first_samps, raw3._first_samps) @@ -1208,7 +1411,7 @@ def test_resample(tmp_path, preload, n, npad): assert_array_equal(raw1._raw_lengths, raw3._raw_lengths) assert raw1.first_samp == raw3.first_samp assert raw1.last_samp == raw3.last_samp - assert raw1.info['sfreq'] == raw3.info['sfreq'] + assert raw1.info["sfreq"] == raw3.info["sfreq"] # smoke test crop after resample raw4.crop(tmin=raw4.times[1], tmax=raw4.times[-1]) @@ -1217,34 +1420,33 @@ def test_resample(tmp_path, preload, n, npad): # basic decimation stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] - raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - assert_allclose(raw.resample(8., npad=npad)._data, - [[1, 1, 0, 0, 1, 1, 0, 0]]) + raw = RawArray([stim], create_info(1, len(stim), ["stim"])) + assert_allclose(raw.resample(8.0, **kwargs)._data, [[1, 1, 0, 0, 1, 1, 0, 0]]) # decimation of multiple stim channels - raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim'])) - assert_allclose(raw.resample(8., npad=npad, verbose='error')._data, - [[1, 1, 0, 0, 1, 1, 0, 0], - [1, 1, 0, 0, 1, 1, 0, 0]]) + raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ["stim"])) + assert_allclose( + raw.resample(8.0, **kwargs, verbose="error")._data, + [[1, 1, 0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0]], + ) # decimation that could potentially drop events if the decimation is # done naively stim = [0, 0, 0, 1, 1, 0, 0, 0] - raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - assert_allclose(raw.resample(4., npad=npad)._data, - [[0, 1, 1, 0]]) + raw = RawArray([stim], create_info(1, len(stim), ["stim"])) + assert_allclose(raw.resample(4.0, **kwargs)._data, [[0, 1, 1, 0]]) # two events are merged in this case (warning) stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0] - raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - with pytest.warns(RuntimeWarning, match='become unreliable'): - raw.resample(8., npad=npad) + raw = RawArray([stim], create_info(1, len(stim), ["stim"])) + with pytest.warns(RuntimeWarning, match="become unreliable"): + raw.resample(8.0, **kwargs) # events are dropped in this case (warning) stim = [0, 1, 1, 0, 0, 1, 1, 0] - raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - with pytest.warns(RuntimeWarning, match='become unreliable'): - raw.resample(4., npad=npad) + raw = RawArray([stim], create_info(1, len(stim), ["stim"])) + with pytest.warns(RuntimeWarning, match="become unreliable"): + raw.resample(4.0, **kwargs) # test resampling events: this should no longer give a warning # we often have first_samp != 0, include it here too @@ -1253,10 +1455,9 @@ def test_resample(tmp_path, preload, n, npad): o_sfreq, sfreq_ratio = len(stim), 0.5 n_sfreq = o_sfreq * sfreq_ratio first_samp = len(stim) // 2 - raw = RawArray([stim], create_info(1, o_sfreq, ['stim']), - first_samp=first_samp) + raw = RawArray([stim], create_info(1, o_sfreq, ["stim"]), first_samp=first_samp) events = find_events(raw) - raw, events = raw.resample(n_sfreq, events=events, npad=npad) + raw, events = raw.resample(n_sfreq, events=events, **kwargs) # Try index into raw.times with resampled events: raw.times[events[:, 0] - raw.first_samp] n_fsamp = int(first_samp * sfreq_ratio) # how it's calc'd in base.py @@ -1264,62 +1465,80 @@ def test_resample(tmp_path, preload, n, npad): # https://docs.scipy.org/doc/numpy/reference/generated/numpy.around.html assert_array_equal( events, - np.array([[np.round(1 * sfreq_ratio) + n_fsamp, 0, 1], - [np.round(10 * sfreq_ratio) + n_fsamp, 0, 1], - [np.minimum(np.round(15 * sfreq_ratio), - raw._data.shape[1] - 1) + n_fsamp, 0, 1]])) + np.array( + [ + [np.round(1 * sfreq_ratio) + n_fsamp, 0, 1], + [np.round(10 * sfreq_ratio) + n_fsamp, 0, 1], + [ + np.minimum(np.round(15 * sfreq_ratio), raw._data.shape[1] - 1) + + n_fsamp, + 0, + 1, + ], + ] + ), + ) # test copy flag stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] - raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - raw_resampled = raw.copy().resample(4., npad=npad) - assert (raw_resampled is not raw) - raw_resampled = raw.resample(4., npad=npad) - assert (raw_resampled is raw) + raw = RawArray([stim], create_info(1, len(stim), ["stim"])) + raw_resampled = raw.copy().resample(4.0, **kwargs) + assert raw_resampled is not raw + raw_resampled = raw.resample(4.0, **kwargs) + assert raw_resampled is raw # resample should still work even when no stim channel is present - raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg'])) + raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ["eeg"])) with raw.info._unlock(): - raw.info['lowpass'] = 50. - raw.resample(10, npad=npad) - assert raw.info['lowpass'] == 5. + raw.info["lowpass"] = 50.0 + raw.resample(10, **kwargs) + assert raw.info["lowpass"] == 5.0 assert len(raw) == 10 def test_resample_stim(): """Test stim_picks argument.""" data = np.ones((2, 1000)) - info = create_info(2, 1000., ('eeg', 'misc')) + info = create_info(2, 1000.0, ("eeg", "misc")) raw = RawArray(data, info) - raw.resample(500., stim_picks='misc') + raw.resample(500.0, stim_picks="misc") @testing.requires_testing_data def test_hilbert(): """Test computation of analytic signal using hilbert.""" raw = read_raw_fif(fif_fname, preload=True) - picks_meg = pick_types(raw.info, meg=True, exclude='bads') + picks_meg = pick_types(raw.info, meg=True, exclude="bads") picks = picks_meg[:4] raw_filt = raw.copy() - raw_filt.filter(10, 20, picks=picks, l_trans_bandwidth='auto', - h_trans_bandwidth='auto', filter_length='auto', - phase='zero', fir_window='blackman', fir_design='firwin') + raw_filt.filter( + 10, + 20, + picks=picks, + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + filter_length="auto", + phase="zero", + fir_window="blackman", + fir_design="firwin", + ) raw_filt_2 = raw_filt.copy() raw2 = raw.copy() raw3 = raw.copy() - raw.apply_hilbert(picks, n_fft='auto') - raw2.apply_hilbert(picks, n_fft='auto', envelope=True) + raw.apply_hilbert(picks, n_fft="auto") + raw2.apply_hilbert(picks, n_fft="auto", envelope=True) # Test custom n_fft - raw_filt.apply_hilbert(picks, n_fft='auto') + raw_filt.apply_hilbert(picks, n_fft="auto") n_fft = 2 ** int(np.ceil(np.log2(raw_filt_2.n_times + 1000))) raw_filt_2.apply_hilbert(picks, n_fft=n_fft) assert raw_filt._data.shape == raw_filt_2._data.shape - assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50], - atol=1e-13, rtol=1e-2) - with pytest.raises(ValueError, match='n_fft.*must be at least the number'): + assert_allclose( + raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50], atol=1e-13, rtol=1e-2 + ) + with pytest.raises(ValueError, match="n_fft.*must be at least the number"): raw3.apply_hilbert(picks, n_fft=raw3.n_times - 100) env = np.abs(raw._data[picks, :]) @@ -1344,57 +1563,60 @@ def test_raw_copy(): assert sorted(raw.__dict__.keys()) == sorted(copied.__dict__.keys()) -@requires_pandas def test_to_data_frame(): """Test raw Pandas exporter.""" - from pandas import Timedelta + pd = pytest.importorskip("pandas") raw = read_raw_fif(test_fif_fname).crop(0, 1).load_data() - df = raw.to_data_frame(index='time') - assert ((df.columns == raw.ch_names).all()) + df = raw.to_data_frame(index="time") + assert (df.columns == raw.ch_names).all() df = raw.to_data_frame(index=None) - assert ('time' in df.columns) + assert "time" in df.columns assert_array_equal(df.values[:, 1], raw._data[0] * 1e13) assert_array_equal(df.values[:, 3], raw._data[2] * 1e15) # test long format df_long = raw.to_data_frame(long_format=True) assert len(df_long) == raw.get_data().size - expected = ('time', 'channel', 'ch_type', 'value') + expected = ("time", "channel", "ch_type", "value") assert set(expected) == set(df_long.columns) # test bad time format - with pytest.raises(ValueError, match='not a valid time format. Valid'): - raw.to_data_frame(time_format='foo') + with pytest.raises(ValueError, match="not a valid time format. Valid"): + raw.to_data_frame(time_format="foo") # test time format error handling raw.set_meas_date(None) - with pytest.warns(RuntimeWarning, match='Cannot convert to Datetime when'): - df = raw.to_data_frame(time_format='datetime') - assert isinstance(df['time'].iloc[0], Timedelta) + with pytest.warns(RuntimeWarning, match="Cannot convert to Datetime when"): + df = raw.to_data_frame(time_format="datetime") + assert isinstance(df["time"].iloc[0], pd.Timedelta) -@requires_pandas -@pytest.mark.parametrize('time_format', (None, 'ms', 'timedelta', 'datetime')) +@pytest.mark.parametrize("time_format", (None, "ms", "timedelta", "datetime")) def test_to_data_frame_time_format(time_format): """Test time conversion in epochs Pandas exporter.""" - from pandas import Timedelta, Timestamp, to_timedelta + pd = pytest.importorskip("pandas") raw = read_raw_fif(test_fif_fname, preload=True) # test time_format df = raw.to_data_frame(time_format=time_format) - dtypes = {None: np.float64, 'ms': np.int64, 'timedelta': Timedelta, - 'datetime': Timestamp} - assert isinstance(df['time'].iloc[0], dtypes[time_format]) + dtypes = { + None: np.float64, + "ms": np.int64, + "timedelta": pd.Timedelta, + "datetime": pd.Timestamp, + } + assert isinstance(df["time"].iloc[0], dtypes[time_format]) # test values _, times = raw[0, :10] - offset = 0. - if time_format == 'datetime': + offset = 0.0 + if time_format == "datetime": times += raw.first_time - offset = raw.info['meas_date'] - elif time_format == 'timedelta': - offset = Timedelta(0.) - funcs = {None: lambda x: x, - 'ms': lambda x: np.rint(x * 1e3).astype(int), # s → ms - 'timedelta': partial(to_timedelta, unit='s'), - 'datetime': partial(to_timedelta, unit='s') - } - assert_array_equal(funcs[time_format](times) + offset, df['time'][:10]) + offset = raw.info["meas_date"] + elif time_format == "timedelta": + offset = pd.Timedelta(0.0) + funcs = { + None: lambda x: x, + "ms": lambda x: np.rint(x * 1e3).astype(int), # s → ms + "timedelta": partial(pd.to_timedelta, unit="s"), + "datetime": partial(pd.to_timedelta, unit="s"), + } + assert_array_equal(funcs[time_format](times) + offset, df["time"][:10]) def test_add_channels(): @@ -1403,34 +1625,35 @@ def test_add_channels(): raw = read_raw_fif(test_fif_fname).crop(0, 1).load_data() assert raw._orig_units == {} raw_nopre = read_raw_fif(test_fif_fname, preload=False) - raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True) - raw_eeg = raw.copy().pick_types(eeg=True) - raw_meg = raw.copy().pick_types(meg=True) - raw_stim = raw.copy().pick_types(stim=True) + raw_eeg_meg = raw.copy().pick(picks=["meg", "eeg"]) + raw_eeg = raw.copy().pick(picks="eeg") + raw_meg = raw.copy().pick(picks="meg") + raw_stim = raw.copy().pick(picks="stim") raw_new = raw_meg.copy().add_channels([raw_eeg, raw_stim]) - assert ( - all(ch in raw_new.ch_names - for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names)) + assert all( + ch in raw_new.ch_names + for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names) ) raw_new = raw_meg.copy().add_channels([raw_eeg]) assert (ch in raw_new.ch_names for ch in raw.ch_names) assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0]) assert_array_equal(raw_new[:, :][1], raw[:, :][1]) - assert (all(ch not in raw_new.ch_names for ch in raw_stim.ch_names)) + assert all(ch not in raw_new.ch_names for ch in raw_stim.ch_names) # Testing force updates - raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg') - orig_head_t = raw_arr_info['dev_head_t'] + raw_arr_info = create_info(["1", "2"], raw_meg.info["sfreq"], "eeg") + assert raw_arr_info["dev_head_t"] is None + orig_head_t = Transform("meg", "head") raw_arr = rng.randn(2, raw_eeg.n_times) raw_arr = RawArray(raw_arr, raw_arr_info) # This should error because of conflicts in Info - raw_arr.info['dev_head_t'] = orig_head_t - with pytest.raises(ValueError, match='mutually inconsistent dev_head_t'): + raw_arr.info["dev_head_t"] = orig_head_t + with pytest.raises(ValueError, match="mutually inconsistent dev_head_t"): raw_meg.copy().add_channels([raw_arr]) raw_meg.copy().add_channels([raw_arr], force_update_info=True) # Make sure that values didn't get overwritten - assert_object_equal(raw_arr.info['dev_head_t'], orig_head_t) + assert_object_equal(raw_arr.info["dev_head_t"], orig_head_t) # Make sure all variants work for simult in (False, True): # simultaneous adding or not raw_new = raw_meg.copy() @@ -1441,14 +1664,15 @@ def test_add_channels(): raw_new.add_channels([raw_stim]) for other in (raw_meg, raw_stim, raw_eeg): assert_allclose( - raw_new.copy().pick_channels(other.ch_names).get_data(), - other.get_data()) + raw_new.copy().pick(other.ch_names).get_data(), + other.get_data(), + ) # Now test errors raw_badsf = raw_eeg.copy() with raw_badsf.info._unlock(): - raw_badsf.info['sfreq'] = 3.1415927 - raw_eeg.crop(.5) + raw_badsf.info["sfreq"] = 3.1415927 + raw_eeg.crop(0.5) pytest.raises(RuntimeError, raw_meg.add_channels, [raw_nopre]) pytest.raises(RuntimeError, raw_meg.add_channels, [raw_badsf]) @@ -1460,23 +1684,23 @@ def test_add_channels(): @testing.requires_testing_data def test_save(tmp_path): """Test saving raw.""" - temp_fname = tmp_path / 'test_raw.fif' + temp_fname = tmp_path / "test_raw.fif" shutil.copyfile(fif_fname, temp_fname) raw = read_raw_fif(temp_fname, preload=False) # can't write over file being read - with pytest.raises(ValueError, match='to the same file'): + with pytest.raises(ValueError, match="to the same file"): raw.save(temp_fname) raw.load_data() # can't overwrite file without overwrite=True - with pytest.raises(IOError, match='file exists'): + with pytest.raises(OSError, match="file exists"): raw.save(fif_fname) # test abspath support and annotations - orig_time = _dt_to_stamp(raw.info['meas_date'])[0] + raw._first_time - annot = Annotations([10], [5], ['test'], orig_time=orig_time) + orig_time = _dt_to_stamp(raw.info["meas_date"])[0] + raw._first_time + annot = Annotations([10], [5], ["test"], orig_time=orig_time) raw.set_annotations(annot) annot = raw.annotations - new_fname = tmp_path / 'break_raw.fif' + new_fname = tmp_path / "break_raw.fif" raw.save(new_fname, overwrite=True) new_raw = read_raw_fif(new_fname, preload=False) pytest.raises(ValueError, new_raw.save, new_fname) @@ -1489,37 +1713,38 @@ def test_save(tmp_path): raw.set_meas_date(None) raw.save(new_fname, overwrite=True) new_raw = read_raw_fif(new_fname, preload=False) - assert new_raw.info['meas_date'] is None + assert new_raw.info["meas_date"] is None @testing.requires_testing_data def test_annotation_crop(tmp_path): """Test annotation sync after cropping and concatenating.""" - annot = Annotations([5., 11., 15.], [2., 1., 3.], ['test', 'test', 'test']) + annot = Annotations([5.0, 11.0, 15.0], [2.0, 1.0, 3.0], ["test", "test", "test"]) raw = read_raw_fif(fif_fname, preload=False) raw.set_annotations(annot) r1 = raw.copy().crop(2.5, 7.5) r2 = raw.copy().crop(12.5, 17.5) - r3 = raw.copy().crop(10., 12.) + r3 = raw.copy().crop(10.0, 12.0) raw = concatenate_raws([r1, r2, r3]) # segments reordered assert_and_remove_boundary_annot(raw, 2) onsets = raw.annotations.onset durations = raw.annotations.duration # 2*5s clips combined with annotations at 2.5s + 2s clip, annotation at 1s assert_array_almost_equal(onsets[:3], [47.95, 52.95, 56.46], decimal=2) - assert_array_almost_equal([2., 2.5, 1.], durations[:3], decimal=2) + assert_array_almost_equal([2.0, 2.5, 1.0], durations[:3], decimal=2) # test annotation clipping - orig_time = _dt_to_stamp(raw.info['meas_date']) - orig_time = orig_time[0] + orig_time[1] * 1e-6 + raw._first_time - 1. - annot = Annotations([0., raw.times[-1]], [2., 2.], 'test', orig_time) - with pytest.warns(RuntimeWarning, match='Limited .* expanding outside'): + orig_time = _dt_to_stamp(raw.info["meas_date"]) + orig_time = orig_time[0] + orig_time[1] * 1e-6 + raw._first_time - 1.0 + annot = Annotations([0.0, raw.times[-1]], [2.0, 2.0], "test", orig_time) + with pytest.warns(RuntimeWarning, match="Limited .* expanding outside"): raw.set_annotations(annot) - assert_allclose(raw.annotations.duration, - [1., 1. + 1. / raw.info['sfreq']], atol=1e-3) + assert_allclose( + raw.annotations.duration, [1.0, 1.0 + 1.0 / raw.info["sfreq"]], atol=1e-3 + ) # make sure we can overwrite the file we loaded when preload=True - new_fname = tmp_path / 'break_raw.fif' + new_fname = tmp_path / "break_raw.fif" raw.save(new_fname) new_raw = read_raw_fif(new_fname, preload=True) new_raw.save(new_fname, overwrite=True) @@ -1555,16 +1780,16 @@ def test_compensation_raw(tmp_path): assert raw_0.compensation_grade == 0 data_0, times_new = raw_0[:, :] assert_array_equal(times, times_new) - assert (np.mean(np.abs(data_0 - data_3)) > 1e-12) + assert np.mean(np.abs(data_0 - data_3)) > 1e-12 # change to grade 1 raw_1 = raw_0.copy().apply_gradient_compensation(1) assert raw_1.compensation_grade == 1 data_1, times_new = raw_1[:, :] assert_array_equal(times, times_new) - assert (np.mean(np.abs(data_1 - data_3)) > 1e-12) + assert np.mean(np.abs(data_1 - data_3)) > 1e-12 pytest.raises(ValueError, raw_1.apply_gradient_compensation, 33) raw_bad = raw_0.copy() - raw_bad.add_proj(compute_proj_raw(raw_0, duration=0.5, verbose='error')) + raw_bad.add_proj(compute_proj_raw(raw_0, duration=0.5, verbose="error")) raw_bad.apply_proj() pytest.raises(RuntimeError, raw_bad.apply_gradient_compensation, 1) # with preload @@ -1573,7 +1798,7 @@ def test_compensation_raw(tmp_path): assert raw_1_new.compensation_grade == 1 data_1_new, times_new = raw_1_new[:, :] assert_array_equal(times, times_new) - assert (np.mean(np.abs(data_1_new - data_3)) > 1e-12) + assert np.mean(np.abs(data_1_new - data_3)) > 1e-12 assert_allclose(data_1, data_1_new, **tols) # change back raw_3_new = raw_1.copy().apply_gradient_compensation(3) @@ -1592,11 +1817,11 @@ def test_compensation_raw(tmp_path): assert raw_3_new.compensation_grade == 3 data_3_new, times_new = raw_3_new[:, :] assert_array_equal(times, times_new) - assert (np.mean(np.abs(data_3_new - data_1)) > 1e-12) + assert np.mean(np.abs(data_3_new - data_1)) > 1e-12 assert_allclose(data_3, data_3_new, **tols) # Try IO with compensation - temp_file = tmp_path / 'raw.fif' + temp_file = tmp_path / "raw.fif" raw_3.save(temp_file, overwrite=True) for preload in (True, False): raw_read = read_raw_fif(temp_file, preload=preload) @@ -1634,10 +1859,20 @@ def test_compensation_raw(tmp_path): @requires_mne def test_compensation_raw_mne(tmp_path): """Test Raw compensation by comparing with MNE-C.""" + def compensate_mne(fname, grad): - tmp_fname = tmp_path / 'mne_ctf_test_raw.fif' - cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname, - '--grad', str(grad), '--projoff', '--filteroff'] + tmp_fname = tmp_path / "mne_ctf_test_raw.fif" + cmd = [ + "mne_process_raw", + "--raw", + fname, + "--save", + tmp_fname, + "--grad", + str(grad), + "--projoff", + "--filteroff", + ] run_subprocess(cmd) return read_raw_fif(tmp_fname, preload=True) @@ -1646,12 +1881,19 @@ def compensate_mne(fname, grad): raw_py.apply_gradient_compensation(grad) raw_c = compensate_mne(ctf_comp_fname, grad) assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17) - assert raw_py.info['nchan'] == raw_c.info['nchan'] - for ch_py, ch_c in zip(raw_py.info['chs'], raw_c.info['chs']): - for key in ('ch_name', 'coil_type', 'scanno', 'logno', 'unit', - 'coord_frame', 'kind'): + assert raw_py.info["nchan"] == raw_c.info["nchan"] + for ch_py, ch_c in zip(raw_py.info["chs"], raw_c.info["chs"]): + for key in ( + "ch_name", + "coil_type", + "scanno", + "logno", + "unit", + "coord_frame", + "kind", + ): assert ch_py[key] == ch_c[key] - for key in ('loc', 'unit_mul', 'range', 'cal'): + for key in ("loc", "unit_mul", "range", "cal"): assert_allclose(ch_py[key], ch_c[key]) @@ -1676,23 +1918,23 @@ def test_drop_channels_mixin(): # Test that dropping all channels a projector applies to will lead to the # removal of said projector. raw = read_raw_fif(fif_fname).crop(0, 1) - n_projs = len(raw.info['projs']) - eeg_names = raw.info['projs'][-1]['data']['col_names'] - with pytest.raises(RuntimeError, match='loaded'): + n_projs = len(raw.info["projs"]) + eeg_names = raw.info["projs"][-1]["data"]["col_names"] + with pytest.raises(RuntimeError, match="loaded"): raw.copy().apply_proj().drop_channels(eeg_names) raw.load_data().drop_channels(eeg_names) # EEG proj - assert len(raw.info['projs']) == n_projs - 1 + assert len(raw.info["projs"]) == n_projs - 1 # Dropping EEG channels with custom ref removes info['custom_ref_applied'] raw = read_raw_fif(fif_fname).crop(0, 1).load_data() raw.set_eeg_reference() - assert raw.info['custom_ref_applied'] + assert raw.info["custom_ref_applied"] raw.drop_channels(eeg_names) - assert not raw.info['custom_ref_applied'] + assert not raw.info["custom_ref_applied"] @testing.requires_testing_data -@pytest.mark.parametrize('preload', (True, False)) +@pytest.mark.parametrize("preload", (True, False)) def test_pick_channels_mixin(preload): """Test channel-picking functionality.""" raw = read_raw_fif(fif_fname, preload=preload) @@ -1700,17 +1942,17 @@ def test_pick_channels_mixin(preload): ch_names = raw.ch_names[:3] ch_names_orig = raw.ch_names - dummy = raw.copy().pick_channels(ch_names) + dummy = raw.copy().pick(ch_names) assert ch_names == dummy.ch_names assert ch_names_orig == raw.ch_names assert len(ch_names_orig) == raw.get_data().shape[0] - raw.pick_channels(ch_names) # copy is False + raw.pick(ch_names) # copy is False assert ch_names == raw.ch_names assert len(ch_names) == len(raw._cals) assert len(ch_names) == raw.get_data().shape[0] - with pytest.raises(ValueError, match='must be'): - raw.pick_channels(ch_names[0]) + with pytest.raises(ValueError, match='must be list, tuple, ndarray, or "bads"'): + raw.pick_channels(ch_names[0]) # legacy method OK here; testing its warning assert_allclose(raw[:][0], raw_orig[:3][0]) @@ -1733,7 +1975,7 @@ def test_equalize_channels(): def test_memmap(tmp_path): """Test some interesting memmapping cases.""" # concatenate_raw - memmaps = [str(tmp_path / str(ii)) for ii in range(3)] + memmaps = [str(tmp_path / str(ii)) for ii in range(4)] raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0]) assert raw_0._data.filename == memmaps[0] raw_1 = read_raw_fif(test_fif_fname, preload=memmaps[1]) @@ -1743,13 +1985,13 @@ def test_memmap(tmp_path): # add_channels orig_data = raw_0[:][0] new_ch_info = pick_info(raw_0.info, [0]) - new_ch_info['chs'][0]['ch_name'] = 'foo' + new_ch_info["chs"][0]["ch_name"] = "foo" new_ch_info._update_redundant() new_data = np.linspace(0, 1, len(raw_0.times))[np.newaxis] ch = RawArray(new_data, new_ch_info) raw_0.add_channels([ch]) - if sys.platform == 'darwin': - assert not hasattr(raw_0._data, 'filename') + if platform.system() == "Darwin": + assert not hasattr(raw_0._data, "filename") else: assert raw_0._data.filename == memmaps[2] assert_allclose(orig_data, raw_0[:-1][0], atol=1e-7) @@ -1758,13 +2000,13 @@ def test_memmap(tmp_path): # now let's see if .copy() actually works; it does, but eventually # we should make it optionally memmap to a new filename rather than # create an in-memory version (filename=None) - raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0]) - assert raw_0._data.filename == memmaps[0] + raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[3]) + assert raw_0._data.filename == memmaps[3] assert raw_0._data[:1, 3:5].all() raw_1 = raw_0.copy() assert isinstance(raw_1._data, np.memmap) assert raw_1._data.filename is None - raw_0._data[:] = 0. + raw_0._data[:] = 0.0 assert not raw_0._data.any() assert raw_1._data[:1, 3:5].all() # other things like drop_channels and crop work but do not use memmapping, @@ -1773,44 +2015,80 @@ def test_memmap(tmp_path): # These are slow on Azure Windows so let's do a subset -@pytest.mark.parametrize('kind', [ - 'file', - pytest.param('bytes', marks=pytest.mark.slowtest), -]) -@pytest.mark.parametrize('preload', [ - True, - pytest.param(str, marks=pytest.mark.slowtest), -]) -@pytest.mark.parametrize('split', [ - False, - pytest.param(True, marks=pytest.mark.slowtest), -]) +@pytest.mark.parametrize( + "kind", + ["path", pytest.param("file", id="kindFile"), "bytes"], +) +@pytest.mark.parametrize( + "preload", + [pytest.param(True, id="preloadTrue"), str], +) +@pytest.mark.parametrize( + "split", + [False, pytest.param(True, marks=pytest.mark.slowtest, id="splitTrue")], +) def test_file_like(kind, preload, split, tmp_path): """Test handling with file-like objects.""" + fname = tmp_path / "test_file_like_raw.fif" + fnames = (fname,) + this_raw = read_raw_fif(test_fif_fname).crop(0, 4).pick("mag") if split: - fname = tmp_path / 'test_raw.fif' - read_raw_fif(test_fif_fname).save(fname, split_size='5MB') - assert fname.is_file() - assert Path(str(fname)[:-4] + '-1.fif').is_file() + this_raw.save(fname, split_size="5MB") + fnames += (Path(str(fname)[:-4] + "-1.fif"),) + bad_fname = Path(str(fname)[:-4] + "-2.fif") + assert not bad_fname.is_file() else: - fname = test_fif_fname + this_raw.save(fname) + for f in fnames: + assert f.is_file() if preload is str: - preload = str(tmp_path / 'memmap') - with open(str(fname), 'rb') as file_fid: - fid = BytesIO(file_fid.read()) if kind == 'bytes' else file_fid - assert not fid.closed + if platform.system() == "Windows": + pytest.skip("Cannot test preload=str on Windows") + preload = str(tmp_path / "memmap") + with open(fname, "rb") as file_fid: + if kind == "bytes": + fid = BytesIO(file_fid.read()) + elif kind == "path": + fid = fname + else: + assert kind == "file" + fid = file_fid + if kind != "path": + assert not fid.closed + with pytest.raises(ValueError, match="preload must be used with file"): + read_raw_fif(fid) assert not file_fid.closed - with pytest.raises(ValueError, match='preload must be used with file'): - read_raw_fif(fid) - assert not fid.closed + if kind != "path": + assert not fid.closed assert not file_fid.closed # Use test_preloading=False but explicitly pass the preload type # so that we don't bother testing preload=False - kwargs = dict(fname=fid, preload=preload, on_split_missing='ignore', - test_preloading=False, test_kwargs=False) - _test_raw_reader(read_raw_fif, **kwargs) - assert not fid.closed + kwargs = dict( + fname=fid, + preload=preload, + on_split_missing="warn", + test_preloading=False, + test_kwargs=False, + ) + want_filenames = list(fnames) + if kind == "bytes": + # the split file will not be correctly resolved for BytesIO + want_filenames = [None] + if split and kind == "bytes": + ctx = pytest.warns(RuntimeWarning, match="Split raw file detected") + else: + ctx = nullcontext() + with ctx: + raw = _test_raw_reader(read_raw_fif, **kwargs) + if kind != "path": + assert not fid.closed assert not file_fid.closed + want_filenames = tuple(want_filenames) + assert raw.filenames == want_filenames + if kind == "bytes": + assert fname.name not in raw._repr_html_() + else: + assert fname.name in raw._repr_html_() assert file_fid.closed @@ -1822,41 +2100,44 @@ def test_str_like(): assert_allclose(raw_path._data, raw_str._data) -@pytest.mark.parametrize('fname', [ - test_fif_fname, - testing._pytest_param(fif_fname), - testing._pytest_param(ms_fname), -]) +@pytest.mark.parametrize( + "fname", + [ + test_fif_fname, + testing._pytest_param(fif_fname), + testing._pytest_param(ms_fname), + ], +) def test_bad_acq(fname): """Test handling of acquisition errors.""" # see gh-7844 - raw = read_raw_fif(fname, allow_maxshield='yes').load_data() - with open(fname, 'rb') as fid: - for ent in raw._raw_extras[0]['ent']: - fid.seek(ent.pos, 0) - tag = _read_tag_header(fid) + raw = read_raw_fif(fname, allow_maxshield="yes").load_data() + with open(fname, "rb") as fid: + for ent in raw._raw_extras[0]["ent"]: + tag = _read_tag_header(fid, ent.pos) # hack these, others (kind, type) should be correct tag.pos, tag.next = ent.pos, ent.next assert tag == ent @testing.requires_testing_data -@pytest.mark.skipif(sys.platform not in ('darwin', 'linux'), - reason='Needs proper symlinking') +@pytest.mark.skipif( + platform.system() not in ("Linux", "Darwin"), reason="Needs proper symlinking" +) def test_split_symlink(tmp_path): """Test split files with symlinks.""" # regression test for gh-9221 - (tmp_path / 'first').mkdir() - first = tmp_path / 'first' / 'test_raw.fif' - raw = read_raw_fif(fif_fname).pick('meg').load_data() - raw.save(first, buffer_size_sec=1, split_size='10MB', verbose=True) - second = Path(str(first)[:-4] + '-1.fif') + (tmp_path / "first").mkdir() + first = tmp_path / "first" / "test_raw.fif" + raw = read_raw_fif(fif_fname).pick("meg").load_data() + raw.save(first, buffer_size_sec=1, split_size="10MB", verbose=True) + second = Path(str(first)[:-4] + "-1.fif") assert second.is_file() - assert not Path(str(first)[:-4] + '-2.fif').is_file() - (tmp_path / 'a').mkdir() - (tmp_path / 'b').mkdir() - new_first = tmp_path / 'a' / 'test_raw.fif' - new_second = tmp_path / 'b' / 'test_raw-1.fif' + assert not Path(str(first)[:-4] + "-2.fif").is_file() + (tmp_path / "a").mkdir() + (tmp_path / "b").mkdir() + new_first = tmp_path / "a" / "test_raw.fif" + new_second = tmp_path / "b" / "test_raw-1.fif" shutil.move(first, new_first) shutil.move(second, new_second) os.symlink(new_first, first) @@ -1866,22 +2147,26 @@ def test_split_symlink(tmp_path): @testing.requires_testing_data -def test_corrupted(tmp_path): +@pytest.mark.parametrize("offset", (0, 1)) +def test_corrupted(tmp_path, offset): """Test that a corrupted file can still be read.""" # Must be a file written by Neuromag, not us, since we don't write the dir # at the end, so use the skip one (straight from acq). raw = read_raw_fif(skip_fname) - with open(skip_fname, 'rb') as fid: - tag = read_tag_info(fid) - tag = read_tag(fid) - dirpos = int(tag.data) + with open(skip_fname, "rb") as fid: + file_id_tag = read_tag(fid, 0) + dir_pos_tag = read_tag(fid, file_id_tag.next_pos) + dirpos = int(dir_pos_tag.data.item()) assert dirpos == 12641532 fid.seek(0) - data = fid.read(dirpos) - bad_fname = tmp_path / 'test_raw.fif' - with open(bad_fname, 'wb') as fid: + data = fid.read(dirpos + offset) + bad_fname = tmp_path / "test_raw.fif" + with open(bad_fname, "wb") as fid: fid.write(data) - with pytest.warns(RuntimeWarning, match='.*tag directory.*corrupt.*'): + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match=".*tag directory.*corrupt.*"), + ): raw_bad = read_raw_fif(bad_fname) assert_allclose(raw.get_data(), raw_bad.get_data()) @@ -1889,17 +2174,38 @@ def test_corrupted(tmp_path): @testing.requires_testing_data def test_expand_user(tmp_path, monkeypatch): """Test that we're expanding `~` before reading and writing.""" - monkeypatch.setenv('HOME', str(tmp_path)) - monkeypatch.setenv('USERPROFILE', str(tmp_path)) # Windows + monkeypatch.setenv("HOME", str(tmp_path)) + monkeypatch.setenv("USERPROFILE", str(tmp_path)) # Windows path_in = Path(fif_fname) path_out = tmp_path / path_in.name - path_home = Path('~') / path_in.name + path_home = Path("~") / path_in.name - shutil.copyfile( - src=path_in, - dst=path_out - ) + shutil.copyfile(src=path_in, dst=path_out) raw = read_raw_fif(fname=path_home, preload=True) raw.save(fname=path_home, overwrite=True) + + +@pytest.mark.parametrize("cast", [pathlib.Path, str]) +def test_init_kwargs(cast): + """Test for pull/12843#issuecomment-2380491528.""" + raw = read_raw_fif(cast(test_fif_fname)) + raw2 = read_raw_fif(**raw._init_kwargs) + for r in (raw, raw2): + assert isinstance(r._init_kwargs["fname"], pathlib.Path) + + +@pytest.mark.slowtest +@testing.requires_testing_data +@pytest.mark.parametrize("fname", [ms_fname, tri_fname]) +def test_fif_files(fname): + """Test reading of various FIF files.""" + _test_raw_reader( + read_raw_fif, + fname=fname, + allow_maxshield="yes", + verbose="error", + test_kwargs=False, + test_preloading=False, + ) diff --git a/mne/io/fil/__init__.py b/mne/io/fil/__init__.py index e07e1fc0bab..ad780452c77 100644 --- a/mne/io/fil/__init__.py +++ b/mne/io/fil/__init__.py @@ -1,5 +1,5 @@ -# Authors: George O'Neill -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from .fil import read_raw_fil \ No newline at end of file +from .fil import read_raw_fil diff --git a/mne/io/fil/fil.py b/mne/io/fil/fil.py index 29ed794d7ac..a7dd157049a 100644 --- a/mne/io/fil/fil.py +++ b/mne/io/fil/fil.py @@ -1,27 +1,32 @@ -# Authors: George O'Neill -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pathlib import json +import pathlib import numpy as np -from ..constants import FIFF -from ..meas_info import _empty_info -from ..write import get_new_file_id +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _read_segments_file +from ..._fiff.write import get_new_file_id +from ...transforms import Transform, apply_trans, get_ras_to_neuromag_trans +from ...utils import _check_fname, fill_doc, verbose, warn from ..base import BaseRaw -from ..utils import _read_segments_file -from .._digitization import _make_dig_points -from ...transforms import get_ras_to_neuromag_trans, apply_trans, Transform -from ...utils import warn, fill_doc, verbose, _check_fname - -from .sensors import (_refine_sensor_orientation, _get_pos_units, - _size2units, _get_plane_vectors) +from .sensors import ( + _get_plane_vectors, + _get_pos_units, + _refine_sensor_orientation, + _size2units, +) @verbose -def read_raw_fil(binfile, precision='single', preload=False, *, verbose=None): +def read_raw_fil( + binfile, precision="single", preload=False, *, verbose=None +) -> "RawFIL": """Raw object from FIL-OPMEG formatted data. Parameters @@ -71,67 +76,73 @@ class RawFIL(BaseRaw): mne.io.Raw : Documentation of attributes and methods of RawFIL. """ - def __init__(self, binfile, precision='single', preload=False): - - if precision == 'single': - dt = np.dtype('>f') + def __init__(self, binfile, precision="single", preload=False): + if precision == "single": + dt = np.dtype(">f") bps = 4 else: - dt = np.dtype('>d') + dt = np.dtype(">d") bps = 8 sample_info = dict() - sample_info['dt'] = dt - sample_info['bps'] = bps + sample_info["dt"] = dt + sample_info["bps"] = bps files = _get_file_names(binfile) - chans = _from_tsv(files['chans']) - chanpos = _from_tsv(files['positions']) - nchans = len(chans['name']) - nlocs = len(chanpos['name']) - nsamples = _determine_nsamples(files['bin'], nchans, precision) - 1 - sample_info['nsamples'] = nsamples + chans = _from_tsv(files["chans"]) + nchans = len(chans["name"]) + nsamples = _determine_nsamples(files["bin"], nchans, precision) - 1 + sample_info["nsamples"] = nsamples raw_extras = list() raw_extras.append(sample_info) - chans['pos'] = [None] * nchans - chans['ori'] = [None] * nchans - - for ii in range(0, nlocs): - idx = chans['name'].index(chanpos['name'][ii]) - tmp = np.array([chanpos['Px'][ii], - chanpos['Py'][ii], - chanpos['Pz'][ii]]) - chans['pos'][idx] = tmp.astype(np.float64) - tmp = np.array([chanpos['Ox'][ii], - chanpos['Oy'][ii], - chanpos['Oz'][ii]]) - chans['ori'][idx] = tmp.astype(np.float64) - - with open(files['meg'], 'r') as fid: + chans["pos"] = [None] * nchans + chans["ori"] = [None] * nchans + if files["positions"].is_file(): + chanpos = _from_tsv(files["positions"]) + nlocs = len(chanpos["name"]) + for ii in range(0, nlocs): + idx = chans["name"].index(chanpos["name"][ii]) + tmp = np.array( + [chanpos["Px"][ii], chanpos["Py"][ii], chanpos["Pz"][ii]] + ) + chans["pos"][idx] = tmp.astype(np.float64) + tmp = np.array( + [chanpos["Ox"][ii], chanpos["Oy"][ii], chanpos["Oz"][ii]] + ) + chans["ori"][idx] = tmp.astype(np.float64) + else: + warn("No sensor position information found.") + + with open(files["meg"]) as fid: meg = json.load(fid) info = _compose_meas_info(meg, chans) - super(RawFIL, self).__init__( - info, preload, filenames=[files['bin']], raw_extras=raw_extras, - last_samps=[nsamples], orig_format=precision) - - if files['coordsystem'].is_file(): - with open(files['coordsystem'], 'r') as fid: + super().__init__( + info, + preload, + filenames=[files["bin"]], + raw_extras=raw_extras, + last_samps=[nsamples], + orig_format=precision, + ) + + if files["coordsystem"].is_file(): + with open(files["coordsystem"]) as fid: csys = json.load(fid) - hc = csys['HeadCoilCoordinates'] + hc = csys["HeadCoilCoordinates"] for key in hc: - if key.lower() == 'lpa': + if key.lower() == "lpa": lpa = np.asarray(hc[key]) - elif key.lower() == 'rpa': + elif key.lower() == "rpa": rpa = np.asarray(hc[key]) - elif key.lower().startswith('nas'): + elif key.lower().startswith("nas"): nas = np.asarray(hc[key]) else: - warn(f'{key} is not a valid fiducial name!') + warn(f"{key} is not a valid fiducial name!") size = np.linalg.norm(nas - rpa) unit, sf = _size2units(size) @@ -149,79 +160,103 @@ def __init__(self, binfile, precision='single', preload=False): rpa = apply_trans(t, rpa) with self.info._unlock(): - self.info['dig'] = _make_dig_points(nasion=nas, - lpa=lpa, - rpa=rpa, - coord_frame='meg') + self.info["dig"] = _make_dig_points( + nasion=nas, lpa=lpa, rpa=rpa, coord_frame="meg" + ) else: warn( - 'No fiducials found in files, defaulting sensor array to ' - 'FIFFV_COORD_DEVICE, this may cause problems later!') + "No fiducials found in files, defaulting sensor array to " + "FIFFV_COORD_DEVICE, this may cause problems later!" + ) t = np.eye(4) with self.info._unlock(): - self.info['dev_head_t'] = \ - Transform(FIFF.FIFFV_COORD_DEVICE, - FIFF.FIFFV_COORD_HEAD, t) + self.info["dev_head_t"] = Transform( + FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, t + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" si = self._raw_extras[fi] _read_segments_file( - self, data, idx, fi, start, stop, cals, mult, dtype=si['dt']) + self, data, idx, fi, start, stop, cals, mult, dtype=si["dt"] + ) def _convert_channel_info(chans): """Convert the imported _channels.tsv into the chs element of raw.info.""" nmeg = nstim = nmisc = nref = 0 - units, sf = _get_pos_units(chans['pos']) + if not all(p is None for p in chans["pos"]): + _, sf = _get_pos_units(chans["pos"]) chs = list() - for ii in range(len(chans['name'])): - ch = dict(scanno=ii + 1, range=1., cal=1., loc=np.full(12, np.nan), - unit_mul=FIFF.FIFF_UNITM_NONE, ch_name=chans['name'][ii], - coil_type=FIFF.FIFFV_COIL_NONE) + for ii in range(len(chans["name"])): + ch = dict( + scanno=ii + 1, + range=1.0, + cal=1.0, + loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, + ch_name=chans["name"][ii], + coil_type=FIFF.FIFFV_COIL_NONE, + ) chs.append(ch) # create the channel information - if chans['pos'][ii] is not None: - r0 = chans['pos'][ii].copy() / sf # mm to m - ez = chans['ori'][ii].copy() + if chans["pos"][ii] is not None: + r0 = chans["pos"][ii].copy() / sf # mm to m + ez = chans["ori"][ii].copy() ez = ez / np.linalg.norm(ez) ex, ey = _get_plane_vectors(ez) - ch['loc'] = np.concatenate([r0, ex, ey, ez]) + ch["loc"] = np.concatenate([r0, ex, ey, ez]) - if chans['type'][ii] == 'MEGMAG': + if chans["type"][ii] == "MEGMAG": nmeg += 1 - ch.update(logno=nmeg, coord_frame=FIFF.FIFFV_COORD_DEVICE, - kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T, - coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2) - elif chans['type'][ii] == 'MEGREFMAG': + ch.update( + logno=nmeg, + coord_frame=FIFF.FIFFV_COORD_DEVICE, + kind=FIFF.FIFFV_MEG_CH, + unit=FIFF.FIFF_UNIT_T, + coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, + ) + elif chans["type"][ii] == "MEGREFMAG": nref += 1 - ch.update(logno=nref, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - kind=FIFF.FIFFV_REF_MEG_CH, unit=FIFF.FIFF_UNIT_T, - coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2) - elif chans['type'][ii] == 'TRIG': + ch.update( + logno=nref, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_REF_MEG_CH, + unit=FIFF.FIFF_UNIT_T, + coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, + ) + elif chans["type"][ii] == "TRIG": nstim += 1 - ch.update(logno=nstim, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - kind=FIFF.FIFFV_STIM_CH, unit=FIFF.FIFF_UNIT_V) + ch.update( + logno=nstim, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_STIM_CH, + unit=FIFF.FIFF_UNIT_V, + ) else: nmisc += 1 - ch.update(logno=nmisc, coord_frame=FIFF.FIFFV_COORD_UNKNOWN, - kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_NONE) + ch.update( + logno=nmisc, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, + unit=FIFF.FIFF_UNIT_NONE, + ) # set the calibration based on the units - MNE expects T units for meg # and V for eeg - if chans['units'][ii] == 'fT': + if chans["units"][ii] == "fT": ch.update(cal=1e-15) - elif chans['units'][ii] == 'pT': + elif chans["units"][ii] == "pT": ch.update(cal=1e-12) - elif chans['units'][ii] == 'nT': + elif chans["units"][ii] == "nT": ch.update(cal=1e-9) - elif chans['units'][ii] == 'mV': + elif chans["units"][ii] == "mV": ch.update(cal=1e3) - elif chans['units'][ii] == 'uV': + elif chans["units"][ii] == "uV": ch.update(cal=1e6) return chs @@ -229,24 +264,22 @@ def _convert_channel_info(chans): def _compose_meas_info(meg, chans): """Create info structure.""" - info = _empty_info(meg['SamplingFrequency']) - + info = _empty_info(meg["SamplingFrequency"]) # Collect all the necessary data from the structures read - info['meas_id'] = get_new_file_id() + info["meas_id"] = get_new_file_id() tmp = _convert_channel_info(chans) - info['chs'] = _refine_sensor_orientation(tmp) - # info['chs'] = _convert_channel_info(chans) - info['line_freq'] = meg['PowerLineFrequency'] - info['bads'] = _read_bad_channels(chans) - info._unlocked = False + info["chs"] = _refine_sensor_orientation(tmp) + info["line_freq"] = meg["PowerLineFrequency"] info._update_redundant() + info["bads"] = _read_bad_channels(chans) + info._unlocked = False return info def _determine_nsamples(bin_fname, nchans, precision): """Identify how many temporal samples in a dataset.""" bsize = bin_fname.stat().st_size - if precision == 'single': + if precision == "single": bps = 4 else: bps = 8 @@ -257,26 +290,29 @@ def _determine_nsamples(bin_fname, nchans, precision): def _read_bad_channels(chans): """Check _channels.tsv file to look for premarked bad channels.""" bads = list() - for ii in range(0, len(chans['status'])): - if chans['status'][ii] == 'bad': - bads.append(chans['name'][ii]) + for ii in range(0, len(chans["status"])): + if chans["status"][ii] == "bad": + bads.append(chans["name"][ii]) return bads def _from_tsv(fname, dtypes=None): """Read a tsv file into a dict (which we know is ordered).""" - data = np.loadtxt(fname, dtype=str, delimiter='\t', ndmin=2, - comments=None, encoding='utf-8-sig') + data = np.loadtxt( + fname, dtype=str, delimiter="\t", ndmin=2, comments=None, encoding="utf-8-sig" + ) column_names = data[0, :] info = data[1:, :] data_dict = dict() if dtypes is None: dtypes = [str] * info.shape[1] - if not isinstance(dtypes, (list, tuple)): + if not isinstance(dtypes, list | tuple): dtypes = [dtypes] * info.shape[1] if not len(dtypes) == info.shape[1]: - raise ValueError('dtypes length mismatch. Provided: {0}, ' - 'Expected: {1}'.format(len(dtypes), info.shape[1])) + raise ValueError( + f"dtypes length mismatch. Provided: {len(dtypes)}, " + f"Expected: {info.shape[1]}" + ) for i, name in enumerate(column_names): data_dict[name] = info[:, i].astype(dtypes[i]).tolist() return data_dict @@ -285,16 +321,16 @@ def _from_tsv(fname, dtypes=None): def _get_file_names(binfile): """Guess the filenames based on predicted suffixes.""" binfile = pathlib.Path( - _check_fname(binfile, overwrite='read', must_exist=True, name='fname')) - if not (binfile.suffix == '.bin' and binfile.stem.endswith('_meg')): - raise ValueError( - f'File must be a filename ending in _meg.bin, got {binfile}') + _check_fname(binfile, overwrite="read", must_exist=True, name="fname") + ) + if not (binfile.suffix == ".bin" and binfile.stem.endswith("_meg")): + raise ValueError(f"File must be a filename ending in _meg.bin, got {binfile}") files = dict() dir_ = binfile.parent root = binfile.stem[:-4] # no _meg - files['bin'] = dir_ / (root + '_meg.bin') - files['meg'] = dir_ / (root + '_meg.json') - files['chans'] = dir_ / (root + '_channels.tsv') - files['positions'] = dir_ / (root + '_positions.tsv') - files['coordsystem'] = dir_ / (root + '_coordsystem.json') + files["bin"] = dir_ / (root + "_meg.bin") + files["meg"] = dir_ / (root + "_meg.json") + files["chans"] = dir_ / (root + "_channels.tsv") + files["positions"] = dir_ / (root + "_positions.tsv") + files["coordsystem"] = dir_ / (root + "_coordsystem.json") return files diff --git a/mne/io/fil/sensors.py b/mne/io/fil/sensors.py index 942057787d0..6431a16bb4d 100644 --- a/mne/io/fil/sensors.py +++ b/mne/io/fil/sensors.py @@ -1,8 +1,9 @@ -# Authors: George O'Neill -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from copy import deepcopy + import numpy as np from ...utils import logger @@ -32,10 +33,9 @@ def _refine_sensor_orientation(chanin): if np.isnan(targetloc.sum()) is False: targetloc = targetloc.reshape(3, 4, order="F") tmploc[:, 2] = targetloc[:, 3] - tmploc[:, 1] = flipFlag * np.cross(tmploc[:, 2], - tmploc[:, 3]) + tmploc[:, 1] = flipFlag * np.cross(tmploc[:, 2], tmploc[:, 3]) chanout[ii]["loc"] = tmploc.reshape(12, order="F") - logger.info('[done]') + logger.info("[done]") return chanout diff --git a/mne/io/fil/tests/test_fil.py b/mne/io/fil/tests/test_fil.py index 87017e04567..df15dd13353 100644 --- a/mne/io/fil/tests/test_fil.py +++ b/mne/io/fil/tests/test_fil.py @@ -1,45 +1,62 @@ -# Authors: George O'Neill -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from numpy import isnan, empty -from numpy.testing import assert_array_equal, assert_array_almost_equal +import shutil +from os import remove import pytest +import scipy.io +from numpy import array, empty, isnan +from numpy.testing import assert_array_almost_equal, assert_array_equal +from mne import pick_types from mne.datasets import testing from mne.io import read_raw_fil from mne.io.fil.sensors import _get_pos_units -from mne.io.pick import pick_types - -import scipy.io - -fil_path = testing.data_path(download=False) / 'FIL' +fil_path = testing.data_path(download=False) / "FIL" # TODO: Ignore this warning in all these tests until we deal with this properly pytestmark = pytest.mark.filterwarnings( - 'ignore:.*problems later!:RuntimeWarning', + "ignore:.*problems later!:RuntimeWarning", ) +def _set_bads_tsv(chanfile, badchan): + """Update channels.tsv by setting target channel to bad.""" + data = [] + with open(chanfile, encoding="utf-8") as f: + for line in f: + columns = line.strip().split("\t") + data.append(columns) + + with open(chanfile, "w", encoding="utf-8") as f: + for row in data: + if badchan in row: + row[-1] = "bad" + f.write("\t".join(row) + "\n") + + def unpack_mat(matin): """Extract relevant entries from unstructred readmat.""" - data = matin['data'] - grad = data[0][0]['grad'] + data = matin["data"] + grad = data[0][0]["grad"] label = list() coil_label = list() - for ii in range(len(data[0][0]['label'])): - label.append(str(data[0][0]['label'][ii][0][0])) - for ii in range(len(grad[0][0]['label'])): - coil_label.append(str(grad[0][0]['label'][ii][0][0])) - - matout = {'label': label, - 'trial': data['trial'][0][0][0][0], - 'coil_label': coil_label, - 'coil_pos': grad[0][0]['coilpos'], - 'coil_ori': grad[0][0]['coilori']} + for ii in range(len(data[0][0]["label"])): + label.append(str(data[0][0]["label"][ii][0][0])) + for ii in range(len(grad[0][0]["label"])): + coil_label.append(str(grad[0][0]["label"][ii][0][0])) + + matout = { + "label": label, + "trial": data["trial"][0][0][0][0], + "coil_label": coil_label, + "coil_pos": grad[0][0]["coilpos"], + "coil_ori": grad[0][0]["coilori"], + } return matout @@ -65,15 +82,14 @@ def _get_channels_with_positions(info): def _fil_megmag(raw_test, raw_mat): """Test the magnetometer channels.""" - test_inds = pick_types(raw_test.info, meg="mag", - ref_meg=False, exclude="bads") + test_inds = pick_types(raw_test.info, meg="mag", ref_meg=False, exclude="bads") test_list = list(raw_test.info["ch_names"][i] for i in test_inds) mat_list = raw_mat["label"] mat_inds = _match_str(test_list, mat_list) - assert len(mat_inds) == len( - test_inds - ), "Number of magnetometer channels in RAW does not match .mat file!" + assert len(mat_inds) == len(test_inds), ( + "Number of magnetometer channels in RAW does not match .mat file!" + ) a = raw_test._data[test_inds, :] b = raw_mat["trial"][mat_inds, :] * 1e-15 # fT to T @@ -90,9 +106,9 @@ def _fil_stim(raw_test, raw_mat): mat_list = raw_mat["label"] mat_inds = _match_str(test_list, mat_list) - assert len(mat_inds) == len( - test_inds - ), "Number of stim channels in RAW does not match .mat file!" + assert len(mat_inds) == len(test_inds), ( + "Number of stim channels in RAW does not match .mat file!" + ) a = raw_test._data[test_inds, :] b = raw_mat["trial"][mat_inds, :] # fT to T @@ -106,9 +122,9 @@ def _fil_sensorpos(raw_test, raw_mat): grad_list = raw_mat["coil_label"] grad_inds = _match_str(test_list, grad_list) - assert len(grad_inds) == len( - test_inds - ), "Number of channels with position data in RAW does not match .mat file!" + assert len(grad_inds) == len(test_inds), ( + "Number of channels with position data in RAW does not match .mat file!" + ) mat_pos = raw_mat["coil_pos"][grad_inds, :] mat_ori = raw_mat["coil_ori"][grad_inds, :] @@ -126,12 +142,10 @@ def _fil_sensorpos(raw_test, raw_mat): @testing.requires_testing_data -def test_fil_all(): +def test_fil_complete(): """Test FIL reader, match to known answers from .mat file.""" binname = fil_path / "sub-noise_ses-001_task-noise220622_run-001_meg.bin" - matname = ( - fil_path / "sub-noise_ses-001_task-noise220622_run-001_fieldtrip.mat" - ) + matname = fil_path / "sub-noise_ses-001_task-noise220622_run-001_fieldtrip.mat" raw = read_raw_fil(binname) raw.load_data(verbose=False) @@ -141,3 +155,38 @@ def test_fil_all(): _fil_megmag(raw, mat) _fil_stim(raw, mat) _fil_sensorpos(raw, mat) + + +@testing.requires_testing_data +def test_fil_no_positions(tmp_path): + """Test FIL reader in cases where a position file is missing.""" + test_path = tmp_path / "FIL" + shutil.copytree(fil_path, test_path) + + posname = test_path / "sub-noise_ses-001_task-noise220622_run-001_positions.tsv" + binname = test_path / "sub-noise_ses-001_task-noise220622_run-001_meg.bin" + + remove(posname) + + with pytest.warns(RuntimeWarning, match="No sensor position.*"): + raw = read_raw_fil(binname) + chs = raw.info["chs"] + locs = array([ch["loc"][:] for ch in chs]) + assert isnan(locs).all() + + +@testing.requires_testing_data +def test_fil_bad_channel_spec(tmp_path): + """Test FIL reader when a bad channel is specified in channels.tsv.""" + test_path = tmp_path / "FIL" + shutil.copytree(fil_path, test_path) + + channame = test_path / "sub-noise_ses-001_task-noise220622_run-001_channels.tsv" + binname = test_path / "sub-noise_ses-001_task-noise220622_run-001_meg.bin" + bad_chan = "G2-OG-Y" + + _set_bads_tsv(channame, bad_chan) + + raw = read_raw_fil(binname) + bads = raw.info["bads"] + assert bad_chan in bads diff --git a/mne/io/hitachi/__init__.py b/mne/io/hitachi/__init__.py index cdd39bad2fe..fc2cdec78ee 100644 --- a/mne/io/hitachi/__init__.py +++ b/mne/io/hitachi/__init__.py @@ -1,7 +1,7 @@ """fNIRS module for conversion to FIF.""" -# Author: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .hitachi import read_raw_hitachi diff --git a/mne/io/hitachi/hitachi.py b/mne/io/hitachi/hitachi.py index 892e4d33c72..9d8e27ba20d 100644 --- a/mne/io/hitachi/hitachi.py +++ b/mne/io/hitachi/hitachi.py @@ -1,23 +1,22 @@ -# Authors: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import datetime as dt import re import numpy as np +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _merge_info, create_info +from ..._fiff.utils import _mult_cal_one +from ...utils import _check_fname, _check_option, fill_doc, logger, verbose, warn from ..base import BaseRaw -from ..constants import FIFF -from ..meas_info import create_info, _merge_info from ..nirx.nirx import _read_csv_rows_cols -from ..utils import _mult_cal_one -from ...utils import (logger, verbose, fill_doc, warn, _check_fname, - _check_option) @fill_doc -def read_raw_hitachi(fname, preload=False, verbose=None): +def read_raw_hitachi(fname, preload=False, verbose=None) -> "RawHitachi": """Reader for a Hitachi fNIRS recording. Parameters @@ -45,7 +44,7 @@ def read_raw_hitachi(fname, preload=False, verbose=None): def _check_bad(cond, msg): if cond: - raise RuntimeError(f'Could not parse file: {msg}') + raise RuntimeError(f"Could not parse file: {msg}") @fill_doc @@ -69,22 +68,21 @@ class RawHitachi(BaseRaw): @verbose def __init__(self, fname, preload=False, *, verbose=None): - if not isinstance(fname, (list, tuple)): + if not isinstance(fname, list | tuple): fname = [fname] fname = list(fname) # our own list that we can modify for fi, this_fname in enumerate(fname): - fname[fi] = str( - _check_fname(this_fname, "read", True, f"fname[{fi}]") - ) + fname[fi] = _check_fname(this_fname, "read", True, f"fname[{fi}]") infos = list() probes = list() last_samps = list() S_offset = D_offset = 0 - ignore_names = ['Time'] + ignore_names = ["Time"] for this_fname in fname: - info, extra, last_samp, offsets = self._get_hitachi_info( - this_fname, S_offset, D_offset, ignore_names) - ignore_names = list(set(ignore_names + info['ch_names'])) + info, extra, last_samp, offsets = _get_hitachi_info( + this_fname, S_offset, D_offset, ignore_names + ) + ignore_names = list(set(ignore_names + info["ch_names"])) S_offset += offsets[0] D_offset += offsets[1] infos.append(info) @@ -96,212 +94,223 @@ def __init__(self, fname, preload=False, *, verbose=None): else: info = infos[0] if len(set(last_samps)) != 1: - raise RuntimeError('All files must have the same number of ' - 'samples, got: {last_samps}') + raise RuntimeError( + "All files must have the same number of samples, got: {last_samps}" + ) last_samps = [last_samps[0]] raw_extras = [dict(probes=probes)] # One representative filename is good enough here # (additional filenames indicate temporal concat, not ch concat) - filenames = [fname[0]] super().__init__( - info, preload, filenames=filenames, last_samps=last_samps, - raw_extras=raw_extras, verbose=verbose) - - # This could be a function, but for the sake of indentation, let's make it - # a method instead - def _get_hitachi_info(self, fname, S_offset, D_offset, ignore_names): - logger.info('Loading %s' % fname) - raw_extra = dict(fname=fname) - info_extra = dict() - subject_info = dict() - ch_wavelengths = dict() - fnirs_wavelengths = [None, None] - meas_date = age = ch_names = sfreq = None - with open(fname, 'rb') as fid: - lines = fid.read() - lines = lines.decode('latin-1').rstrip('\r\n') - oldlen = len(lines) - assert len(lines) == oldlen - bounds = [0] - end = '\n' if '\n' in lines else '\r' - bounds.extend(a.end() for a in re.finditer(end, lines)) - bounds.append(len(lines)) - lines = lines.split(end) - assert len(bounds) == len(lines) + 1 - line = lines[0].rstrip(',\r\n') - _check_bad(line != 'Header', 'no header found') - li = 0 - mode = None - for li, line in enumerate(lines[1:], 1): - # Newer format has some blank lines - if len(line) == 0: - continue - parts = line.rstrip(',\r\n').split(',') - if len(parts) == 0: # some header lines are blank - continue - kind, parts = parts[0], parts[1:] - if len(parts) == 0: - parts = [''] # some fields (e.g., Comment) meaningfully blank - if kind == 'File Version': - logger.info(f'Reading Hitachi fNIRS file version {parts[0]}') - elif kind == 'AnalyzeMode': - _check_bad( - parts != ['Continuous'], f'not continuous data ({parts})') - elif kind == 'Sampling Period[s]': - sfreq = 1 / float(parts[0]) - elif kind == 'Exception': - raise NotImplementedError(kind) - elif kind == 'Comment': - info_extra['description'] = parts[0] - elif kind == 'ID': - subject_info['his_id'] = parts[0] - elif kind == 'Name': - if len(parts): - name = parts[0].split(' ') - if len(name): - subject_info['first_name'] = name[0] - subject_info['last_name'] = ' '.join(name[1:]) - elif kind == 'Age': - age = int(parts[0].rstrip('y')) - elif kind == 'Mode': - mode = parts[0] - elif kind in ('HPF[Hz]', 'LPF[Hz]'): - try: - freq = float(parts[0]) - except ValueError: - pass - else: - info_extra[{'HPF[Hz]': 'highpass', - 'LPF[Hz]': 'lowpass'}[kind]] = freq - elif kind == 'Date': - # 5/17/04 5:14 - try: - mdy, HM = parts[0].split(' ') - H, M = HM.split(':') - if len(H) == 1: - H = f'0{H}' - mdyHM = ' '.join([mdy, ':'.join([H, M])]) - for fmt in ('%m/%d/%y %H:%M', '%Y/%m/%d %H:%M'): - try: - meas_date = dt.datetime.strptime(mdyHM, fmt) - except Exception: - pass - else: - break - else: - raise RuntimeError # unknown format - except Exception: - warn('Extraction of measurement date failed. ' - 'Please report this as a github issue. ' - 'The date is being set to January 1st, 2000, ' - f'instead of {repr(parts[0])}') - elif kind == 'Sex': - try: - subject_info['sex'] = dict( - female=FIFF.FIFFV_SUBJ_SEX_FEMALE, - male=FIFF.FIFFV_SUBJ_SEX_MALE)[parts[0].lower()] - except KeyError: - pass - elif kind == 'Wave[nm]': - fnirs_wavelengths[:] = [int(part) for part in parts] - elif kind == 'Wave Length': - ch_regex = re.compile(r'^(.*)\(([0-9\.]+)\)$') - for ent in parts: - _, v = ch_regex.match(ent).groups() - ch_wavelengths[ent] = float(v) - elif kind == 'Data': - break - fnirs_wavelengths = np.array(fnirs_wavelengths, int) - assert len(fnirs_wavelengths) == 2 - ch_names = lines[li + 1].rstrip(',\r\n').split(',') - # cull to correct ones - raw_extra['keep_mask'] = ~np.in1d(ch_names, list(ignore_names)) - for ci, ch_name in enumerate(ch_names): - if re.match('Probe[0-9]+', ch_name): - raw_extra['keep_mask'][ci] = False - # set types - ch_names = [ch_name for ci, ch_name in enumerate(ch_names) - if raw_extra['keep_mask'][ci]] - ch_types = ['fnirs_cw_amplitude' if ch_name.startswith('CH') - else 'stim' - for ch_name in ch_names] - # get locations - nirs_names = [ch_name for ch_name, ch_type in zip(ch_names, ch_types) - if ch_type == 'fnirs_cw_amplitude'] - n_nirs = len(nirs_names) - assert n_nirs % 2 == 0 - names = { - '3x3': 'ETG-100', - '3x5': 'ETG-7000', - '4x4': 'ETG-7000', - '3x11': 'ETG-4000', - } - _check_option('Hitachi mode', mode, sorted(names)) - n_row, n_col = [int(x) for x in mode.split('x')] - logger.info(f'Constructing pairing matrix for {names[mode]} ({mode})') - pairs = _compute_pairs(n_row, n_col, n=1 + (mode == '3x3')) - assert n_nirs == len(pairs) * 2 - locs = np.zeros((len(ch_names), 12)) - locs[:, :9] = np.nan - idxs = np.where(np.array(ch_types, 'U') == 'fnirs_cw_amplitude')[0] - for ii, idx in enumerate(idxs): - ch_name = ch_names[idx] - # Use the actual/accurate wavelength in loc - acc_freq = ch_wavelengths[ch_name] - locs[idx][9] = acc_freq - # Rename channel based on standard naming scheme, using the - # nominal wavelength - sidx, didx = pairs[ii // 2] - nom_freq = fnirs_wavelengths[np.argmin(np.abs( - acc_freq - fnirs_wavelengths))] - ch_names[idx] = ( - f'S{S_offset + sidx + 1}_' - f'D{D_offset + didx + 1} ' - f'{nom_freq}' - ) - offsets = np.array(pairs, int).max(axis=0) + 1 - - # figure out bounds - bounds = raw_extra['bounds'] = bounds[li + 2:] - last_samp = len(bounds) - 2 - - if age is not None and meas_date is not None: - subject_info['birthday'] = (meas_date.year - age, - meas_date.month, - meas_date.day) - if meas_date is None: - meas_date = dt.datetime(2000, 1, 1, 0, 0, 0) - meas_date = meas_date.replace(tzinfo=dt.timezone.utc) - if subject_info: - info_extra['subject_info'] = subject_info - - # Create mne structure - info = create_info(ch_names, sfreq, ch_types=ch_types) - with info._unlock(): - info.update(info_extra) - info['meas_date'] = meas_date - for li, loc in enumerate(locs): - info['chs'][li]['loc'][:] = loc - return info, raw_extra, last_samp, offsets + info, + preload, + filenames=[fname[0]], + last_samps=last_samps, + raw_extras=raw_extras, + verbose=verbose, + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file.""" this_data = list() - for this_probe in self._raw_extras[fi]['probes']: - this_data.append(_read_csv_rows_cols( - this_probe['fname'], - start, stop, this_probe['keep_mask'], - this_probe['bounds'], sep=',', - replace=lambda x: - x.replace('\r', '\n') - .replace('\n\n', '\n') - .replace('\n', ',') - .replace(':', '')).T) + for this_probe in self._raw_extras[fi]["probes"]: + this_data.append( + _read_csv_rows_cols( + this_probe["fname"], + start, + stop, + this_probe["keep_mask"], + this_probe["bounds"], + sep=",", + replace=lambda x: x.replace("\r", "\n") + .replace("\n\n", "\n") + .replace("\n", ",") + .replace(":", ""), + ).T + ) this_data = np.concatenate(this_data, axis=0) _mult_cal_one(data, this_data, idx, cals, mult) return data +def _get_hitachi_info(fname, S_offset, D_offset, ignore_names): + logger.info(f"Loading {fname}") + raw_extra = dict(fname=fname) + info_extra = dict() + subject_info = dict() + ch_wavelengths = dict() + fnirs_wavelengths = [None, None] + meas_date = age = ch_names = sfreq = None + with open(fname, "rb") as fid: + lines = fid.read() + lines = lines.decode("latin-1").rstrip("\r\n") + oldlen = len(lines) + assert len(lines) == oldlen + bounds = [0] + end = "\n" if "\n" in lines else "\r" + bounds.extend(a.end() for a in re.finditer(end, lines)) + bounds.append(len(lines)) + lines = lines.split(end) + assert len(bounds) == len(lines) + 1 + line = lines[0].rstrip(",\r\n") + _check_bad(line != "Header", "no header found") + li = 0 + mode = None + for li, line in enumerate(lines[1:], 1): + # Newer format has some blank lines + if len(line) == 0: + continue + parts = line.rstrip(",\r\n").split(",") + if len(parts) == 0: # some header lines are blank + continue + kind, parts = parts[0], parts[1:] + if len(parts) == 0: + parts = [""] # some fields (e.g., Comment) meaningfully blank + if kind == "File Version": + logger.info(f"Reading Hitachi fNIRS file version {parts[0]}") + elif kind == "AnalyzeMode": + _check_bad(parts != ["Continuous"], f"not continuous data ({parts})") + elif kind == "Sampling Period[s]": + sfreq = 1 / float(parts[0]) + elif kind == "Exception": + raise NotImplementedError(kind) + elif kind == "Comment": + info_extra["description"] = parts[0] + elif kind == "ID": + subject_info["his_id"] = parts[0] + elif kind == "Name": + if len(parts): + name = parts[0].split(" ") + if len(name): + subject_info["first_name"] = name[0] + subject_info["last_name"] = " ".join(name[1:]) + elif kind == "Age": + age = int(parts[0].rstrip("y")) + elif kind == "Mode": + mode = parts[0] + elif kind in ("HPF[Hz]", "LPF[Hz]"): + try: + freq = float(parts[0]) + except ValueError: + pass + else: + info_extra[{"HPF[Hz]": "highpass", "LPF[Hz]": "lowpass"}[kind]] = freq + elif kind == "Date": + # 5/17/04 5:14 + try: + mdy, HM = parts[0].split(" ") + H, M = HM.split(":") + if len(H) == 1: + H = f"0{H}" + mdyHM = " ".join([mdy, ":".join([H, M])]) + for fmt in ("%m/%d/%y %H:%M", "%Y/%m/%d %H:%M"): + try: + meas_date = dt.datetime.strptime(mdyHM, fmt) + except Exception: + pass + else: + break + else: + raise RuntimeError # unknown format + except Exception: + warn( + "Extraction of measurement date failed. " + "Please report this as a github issue. " + "The date is being set to January 1st, 2000, " + f"instead of {repr(parts[0])}" + ) + elif kind == "Sex": + try: + subject_info["sex"] = dict( + female=FIFF.FIFFV_SUBJ_SEX_FEMALE, male=FIFF.FIFFV_SUBJ_SEX_MALE + )[parts[0].lower()] + except KeyError: + pass + elif kind == "Wave[nm]": + fnirs_wavelengths[:] = [int(part) for part in parts] + elif kind == "Wave Length": + ch_regex = re.compile(r"^(.*)\(([0-9\.]+)\)$") + for ent in parts: + _, v = ch_regex.match(ent).groups() + ch_wavelengths[ent] = float(v) + elif kind == "Data": + break + fnirs_wavelengths = np.array(fnirs_wavelengths, int) + assert len(fnirs_wavelengths) == 2 + ch_names = lines[li + 1].rstrip(",\r\n").split(",") + # cull to correct ones + raw_extra["keep_mask"] = ~np.isin(ch_names, list(ignore_names)) + for ci, ch_name in enumerate(ch_names): + if re.match("Probe[0-9]+", ch_name): + raw_extra["keep_mask"][ci] = False + # set types + ch_names = [ + ch_name for ci, ch_name in enumerate(ch_names) if raw_extra["keep_mask"][ci] + ] + ch_types = [ + "fnirs_cw_amplitude" if ch_name.startswith("CH") else "stim" + for ch_name in ch_names + ] + # get locations + nirs_names = [ + ch_name + for ch_name, ch_type in zip(ch_names, ch_types) + if ch_type == "fnirs_cw_amplitude" + ] + n_nirs = len(nirs_names) + assert n_nirs % 2 == 0 + names = { + "3x3": "ETG-100", + "3x5": "ETG-7000", + "4x4": "ETG-7000", + "3x11": "ETG-4000", + } + _check_option("Hitachi mode", mode, sorted(names)) + n_row, n_col = (int(x) for x in mode.split("x")) + logger.info(f"Constructing pairing matrix for {names[mode]} ({mode})") + pairs = _compute_pairs(n_row, n_col, n=1 + (mode == "3x3")) + assert n_nirs == len(pairs) * 2 + locs = np.zeros((len(ch_names), 12)) + locs[:, :9] = np.nan + idxs = np.where(np.array(ch_types, "U") == "fnirs_cw_amplitude")[0] + for ii, idx in enumerate(idxs): + ch_name = ch_names[idx] + # Use the actual/accurate wavelength in loc + acc_freq = ch_wavelengths[ch_name] + locs[idx][9] = acc_freq + # Rename channel based on standard naming scheme, using the + # nominal wavelength + sidx, didx = pairs[ii // 2] + nom_freq = fnirs_wavelengths[np.argmin(np.abs(acc_freq - fnirs_wavelengths))] + ch_names[idx] = f"S{S_offset + sidx + 1}_D{D_offset + didx + 1} {nom_freq}" + offsets = np.array(pairs, int).max(axis=0) + 1 + + # figure out bounds + bounds = raw_extra["bounds"] = bounds[li + 2 :] + last_samp = len(bounds) - 2 + + if age is not None and meas_date is not None: + subject_info["birthday"] = dt.date( + meas_date.year - age, + meas_date.month, + meas_date.day, + ) + if meas_date is None: + meas_date = dt.datetime(2000, 1, 1, 0, 0, 0) + meas_date = meas_date.replace(tzinfo=dt.timezone.utc) + if subject_info: + info_extra["subject_info"] = subject_info + + # Create mne structure + info = create_info(ch_names, sfreq, ch_types=ch_types) + with info._unlock(): + info.update(info_extra) + info["meas_date"] = meas_date + for li, loc in enumerate(locs): + info["chs"][li]["loc"][:] = loc + return info, raw_extra, last_samp, offsets + + def _compute_pairs(n_rows, n_cols, n=1): n_tot = n_rows * n_cols sd_idx = (np.arange(n_tot) // 2).reshape(n_rows, n_cols) diff --git a/mne/io/hitachi/tests/test_hitachi.py b/mne/io/hitachi/tests/test_hitachi.py index 802b46063bc..09b01b535e3 100644 --- a/mne/io/hitachi/tests/test_hitachi.py +++ b/mne/io/hitachi/tests/test_hitachi.py @@ -1,25 +1,27 @@ -# -*- coding: utf-8 -*- -# Authors: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import datetime as dt -import pytest import numpy as np +import pytest from numpy.testing import assert_allclose, assert_array_less from mne.channels import make_standard_montage from mne.io import read_raw_hitachi from mne.io.hitachi.hitachi import _compute_pairs from mne.io.tests.test_raw import _test_raw_reader -from mne.preprocessing.nirs import (source_detector_distances, - optical_density, tddr, beer_lambert_law, - scalp_coupling_index) - +from mne.preprocessing.nirs import ( + beer_lambert_law, + optical_density, + scalp_coupling_index, + source_detector_distances, + tddr, +) CONTENTS = dict() -CONTENTS['1.18'] = b"""\ +CONTENTS["1.18"] = b"""\ Header,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, File Version,1.18,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, Patient Information,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, @@ -124,7 +126,7 @@ """ # noqa: E501 -CONTENTS['1.25'] = b"""\ +CONTENTS["1.25"] = b"""\ Header File Version,1.25 Patient Information @@ -179,17 +181,21 @@ """ # noqa: E501 -@pytest.mark.parametrize('preload', (True, False)) -@pytest.mark.parametrize('version, n_ch, n_times, lowpass, sex, date, end', [ - ('1.18', 48, 60, 0.1, 2, (2004, 5, 17, 5, 14, 0, 0), None), - ('1.25', 108, 10, 5., 1, (2020, 2, 2, 11, 20, 0, 0), b'\r'), - ('1.25', 108, 10, 5., 1, (2020, 2, 2, 11, 20, 0, 0), b'\n'), - ('1.25', 108, 10, 5., 1, (2020, 2, 2, 11, 20, 0, 0), b'\r\n'), - # Fake a dual-probe file - (['1.18', '1.18'], 92, 60, 0.1, 2, (2004, 5, 17, 5, 14, 0, 0), None), -]) -def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, - end, tmp_path): +@pytest.mark.parametrize("preload", (True, False)) +@pytest.mark.parametrize( + "version, n_ch, n_times, lowpass, sex, date, end", + [ + ("1.18", 48, 60, 0.1, 2, (2004, 5, 17, 5, 14, 0, 0), None), + ("1.25", 108, 10, 5.0, 1, (2020, 2, 2, 11, 20, 0, 0), b"\r"), + ("1.25", 108, 10, 5.0, 1, (2020, 2, 2, 11, 20, 0, 0), b"\n"), + ("1.25", 108, 10, 5.0, 1, (2020, 2, 2, 11, 20, 0, 0), b"\r\n"), + # Fake a dual-probe file + (["1.18", "1.18"], 92, 60, 0.1, 2, (2004, 5, 17, 5, 14, 0, 0), None), + ], +) +def test_hitachi_basic( + preload, version, n_ch, n_times, lowpass, sex, date, end, tmp_path +): """Test NIRSport1 file with no saturation.""" if not isinstance(version, list): versions = [version] @@ -198,63 +204,68 @@ def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, del version fnames = list() for vi, v in enumerate(versions, 1): - fname = tmp_path / f'test{vi}.csv' - contents = CONTENTS[v].replace( - f'Probe{vi - 1}'.encode(), - f'Probe{vi}'.encode()) + fname = tmp_path / f"test{vi}.csv" + contents = CONTENTS[v].replace(f"Probe{vi - 1}".encode(), f"Probe{vi}".encode()) if end is not None: - contents = contents.replace(b'\r', b'\n').replace(b'\n\n', b'\n') - contents = contents.replace(b'\n', end) - with open(fname, 'wb') as fid: + contents = contents.replace(b"\r", b"\n").replace(b"\n\n", b"\n") + contents = contents.replace(b"\n", end) + with open(fname, "wb") as fid: fid.write(CONTENTS[v]) fnames.append(fname) del fname raw = read_raw_hitachi(fnames, preload=preload, verbose=True) data = raw.get_data() assert data.shape == (n_ch, n_times) - assert raw.info['sfreq'] == 10 - assert raw.info['lowpass'] == lowpass - assert raw.info['subject_info']['sex'] == sex + assert raw.info["sfreq"] == 10 + assert raw.info["lowpass"] == lowpass + assert raw.info["subject_info"]["sex"] == sex assert np.isfinite(raw.get_data()).all() - assert raw.info['meas_date'] == dt.datetime(*date, tzinfo=dt.timezone.utc) + assert raw.info["meas_date"] == dt.datetime(*date, tzinfo=dt.timezone.utc) # bad distances (zero) distances = source_detector_distances(raw.info) want = [np.nan] * (n_ch - 4) - assert_allclose(distances, want, atol=0.) + assert_allclose(distances, want, atol=0.0) raw_od_bad = optical_density(raw) - with pytest.warns(RuntimeWarning, match='will be zero'): + with pytest.warns(RuntimeWarning, match="will be zero"): beer_lambert_law(raw_od_bad, ppf=6) # bad distances (too big) - if versions[0] == '1.18' and len(fnames) == 1: - need = sum(([f'S{ii}', f'D{ii}'] for ii in range(1, 9)), [])[:-1] - have = 'P7 FC3 C3 CP3 P3 F5 FC5 C5 CP5 P5 F7 FT7 T7 TP7 F3'.split() + if versions[0] == "1.18" and len(fnames) == 1: + need = sum(([f"S{ii}", f"D{ii}"] for ii in range(1, 9)), [])[:-1] + have = "P7 FC3 C3 CP3 P3 F5 FC5 C5 CP5 P5 F7 FT7 T7 TP7 F3".split() assert len(need) == len(have) - mon = make_standard_montage('standard_1020') + mon = make_standard_montage("standard_1020") mon.rename_channels(dict(zip(have, need))) raw.set_montage(mon) raw_od_bad = optical_density(raw) - with pytest.warns(RuntimeWarning, match='greater than 10 cm'): + with pytest.warns(RuntimeWarning, match="greater than 10 cm"): beer_lambert_law(raw_od_bad, ppf=6) # good distances - mon = make_standard_montage('standard_1020') - if versions[0] == '1.18': + mon = make_standard_montage("standard_1020") + if versions[0] == "1.18": assert len(fnames) in (1, 2) - need = sum(([f'S{ii}', f'D{ii}'] for ii in range(1, 9)), [])[:-1] - have = 'F3 FC3 C3 CP3 P3 F5 FC5 C5 CP5 P5 F7 FT7 T7 TP7 P7'.split() + need = sum(([f"S{ii}", f"D{ii}"] for ii in range(1, 9)), [])[:-1] + have = "F3 FC3 C3 CP3 P3 F5 FC5 C5 CP5 P5 F7 FT7 T7 TP7 P7".split() assert len(need) == 15 if len(fnames) == 2: - need.extend(sum(( - [f'S{ii}', f'D{jj}'] - for ii, jj in zip(range(9, 17), range(8, 16))), [])[:-1]) - have.extend( - 'F4 FC4 C4 CP4 P4 F6 FC6 C6 CP6 P6 F8 FT8 T8 TP8 P8'.split()) + need.extend( + sum( + ( + [f"S{ii}", f"D{jj}"] + for ii, jj in zip(range(9, 17), range(8, 16)) + ), + [], + )[:-1] + ) + have.extend("F4 FC4 C4 CP4 P4 F6 FC6 C6 CP6 P6 F8 FT8 T8 TP8 P8".split()) assert len(need) == 30 else: assert len(fnames) == 1 - need = sum(([f'S{ii}', f'D{ii}'] for ii in range(1, 18)), [])[:-1] - have = ('FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 ' - 'T9 T7 C5 C3 C1 Cz C2 C4 C6 T8 T10 ' - 'TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10').split() + need = sum(([f"S{ii}", f"D{ii}"] for ii in range(1, 18)), [])[:-1] + have = ( + "FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 " + "T9 T7 C5 C3 C1 Cz C2 C4 C6 T8 T10 " + "TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10" + ).split() assert len(need) == 33 assert len(need) == len(have) for h in have: @@ -266,9 +277,10 @@ def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, distances = source_detector_distances(raw.info) want = [0.03] * (n_ch - 4) assert_allclose(distances, want, atol=0.01) - test_rank = 'less' if n_times < n_ch else True - _test_raw_reader(read_raw_hitachi, fname=fnames, - boundary_decimal=1, test_rank=test_rank) # low fs + test_rank = "less" if n_times < n_ch else True + _test_raw_reader( + read_raw_hitachi, fname=fnames, boundary_decimal=1, test_rank=test_rank + ) # low fs # TODO: eventually we should refactor these to be in # mne/io/tests/test_raw.py and run them for all fNIRS readers @@ -276,9 +288,9 @@ def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, # OD raw_od = optical_density(raw) assert np.isfinite(raw_od.get_data()).all() - sci = scalp_coupling_index(raw_od, verbose='error') + sci = scalp_coupling_index(raw_od, verbose="error") lo, mi, hi = np.percentile(sci, [5, 50, 95]) - if versions[0] == '1.18': + if versions[0] == "1.18": assert -0.1 < lo < 0.1 # not great assert 0.4 < mi < 0.5 assert 0.8 < hi < 0.9 @@ -286,58 +298,158 @@ def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, assert 0.99 <= lo <= hi <= 1 # TDDR raw_tddr = tddr(raw_od) - data = raw_tddr.get_data('fnirs') + data = raw_tddr.get_data("fnirs") assert np.isfinite(data.all()) peaks = np.ptp(data, axis=-1) - assert_array_less(1e-4, peaks, err_msg='TDDR too small') - assert_array_less(peaks, 1, err_msg='TDDR too big') + assert_array_less(1e-4, peaks, err_msg="TDDR too small") + assert_array_less(peaks, 1, err_msg="TDDR too big") # HbO/HbR raw_tddr.set_montage(mon) raw_h = beer_lambert_law(raw_tddr, ppf=6) - data = raw_h.get_data('fnirs') + data = raw_h.get_data("fnirs") assert np.isfinite(data).all() assert data.shape == (n_ch - 4, n_times) peaks = np.ptp(data, axis=-1) - assert_array_less(1e-10, peaks, err_msg='Beer-Lambert too small') - assert_array_less(peaks, 1e-5, err_msg='Beer-Lambert too big') + assert_array_less(1e-10, peaks, err_msg="Beer-Lambert too small") + assert_array_less(peaks, 1e-5, err_msg="Beer-Lambert too big") # From Hitachi 2 Homer KNOWN_PAIRS = { (3, 3, 2): ( - (0, 0), (1, 0), (0, 1), (2, 0), (1, 2), - (2, 1), (2, 2), (3, 1), (2, 3), (4, 2), - (3, 3), (4, 3), (5, 4), (6, 4), (5, 5), - (7, 4), (6, 6), (7, 5), (7, 6), (8, 5), - (7, 7), (9, 6), (8, 7), (9, 7)), + (0, 0), + (1, 0), + (0, 1), + (2, 0), + (1, 2), + (2, 1), + (2, 2), + (3, 1), + (2, 3), + (4, 2), + (3, 3), + (4, 3), + (5, 4), + (6, 4), + (5, 5), + (7, 4), + (6, 6), + (7, 5), + (7, 6), + (8, 5), + (7, 7), + (9, 6), + (8, 7), + (9, 7), + ), (3, 5, 1): ( - (0, 0), (1, 0), (1, 1), (2, 1), (0, 2), - (3, 0), (1, 3), (4, 1), (2, 4), (3, 2), - (3, 3), (4, 3), (4, 4), (5, 2), (3, 5), - (6, 3), (4, 6), (7, 4), (5, 5), (6, 5), - (6, 6), (7, 6)), + (0, 0), + (1, 0), + (1, 1), + (2, 1), + (0, 2), + (3, 0), + (1, 3), + (4, 1), + (2, 4), + (3, 2), + (3, 3), + (4, 3), + (4, 4), + (5, 2), + (3, 5), + (6, 3), + (4, 6), + (7, 4), + (5, 5), + (6, 5), + (6, 6), + (7, 6), + ), (4, 4, 1): ( - (0, 0), (1, 0), (1, 1), (0, 2), (2, 0), - (1, 3), (3, 1), (2, 2), (2, 3), (3, 3), - (4, 2), (2, 4), (5, 3), (3, 5), (4, 4), - (5, 4), (5, 5), (4, 6), (6, 4), (5, 7), - (7, 5), (6, 6), (6, 7), (7, 7)), + (0, 0), + (1, 0), + (1, 1), + (0, 2), + (2, 0), + (1, 3), + (3, 1), + (2, 2), + (2, 3), + (3, 3), + (4, 2), + (2, 4), + (5, 3), + (3, 5), + (4, 4), + (5, 4), + (5, 5), + (4, 6), + (6, 4), + (5, 7), + (7, 5), + (6, 6), + (6, 7), + (7, 7), + ), (3, 11, 1): ( - (0, 0), (1, 0), (1, 1), (2, 1), (2, 2), - (3, 2), (3, 3), (4, 3), (4, 4), (5, 4), - (0, 5), (6, 0), (1, 6), (7, 1), (2, 7), - (8, 2), (3, 8), (9, 3), (4, 9), (10, 4), - (5, 10), (6, 5), (6, 6), (7, 6), (7, 7), - (8, 7), (8, 8), (9, 8), (9, 9), (10, 9), - (10, 10), (11, 5), (6, 11), (12, 6), (7, 12), - (13, 7), (8, 13), (14, 8), (9, 14), (15, 9), - (10, 15), (16, 10), (11, 11), (12, 11), (12, 12), - (13, 12), (13, 13), (14, 13), (14, 14), (15, 14), - (15, 15), (16, 15)), + (0, 0), + (1, 0), + (1, 1), + (2, 1), + (2, 2), + (3, 2), + (3, 3), + (4, 3), + (4, 4), + (5, 4), + (0, 5), + (6, 0), + (1, 6), + (7, 1), + (2, 7), + (8, 2), + (3, 8), + (9, 3), + (4, 9), + (10, 4), + (5, 10), + (6, 5), + (6, 6), + (7, 6), + (7, 7), + (8, 7), + (8, 8), + (9, 8), + (9, 9), + (10, 9), + (10, 10), + (11, 5), + (6, 11), + (12, 6), + (7, 12), + (13, 7), + (8, 13), + (14, 8), + (9, 14), + (15, 9), + (10, 15), + (16, 10), + (11, 11), + (12, 11), + (12, 12), + (13, 12), + (13, 13), + (14, 13), + (14, 14), + (15, 14), + (15, 15), + (16, 15), + ), } -@pytest.mark.parametrize('n_rows, n_cols, n', list(KNOWN_PAIRS)) +@pytest.mark.parametrize("n_rows, n_cols, n", list(KNOWN_PAIRS)) def test_compute_pairs(n_rows, n_cols, n): """Test computation of S-D pairings.""" want = KNOWN_PAIRS[(n_rows, n_cols, n)] diff --git a/mne/io/kit/__init__.py b/mne/io/kit/__init__.py index a520b91da62..c522113f64e 100644 --- a/mne/io/kit/__init__.py +++ b/mne/io/kit/__init__.py @@ -1,8 +1,8 @@ """KIT module for reading raw data.""" -# Author: Teon Brooks -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .kit import read_raw_kit, read_epochs_kit from .coreg import read_mrk diff --git a/mne/io/kit/constants.py b/mne/io/kit/constants.py index 144dc584816..7831e81c6fa 100644 --- a/mne/io/kit/constants.py +++ b/mne/io/kit/constants.py @@ -1,11 +1,10 @@ """KIT constants.""" -# Authors: Teon Brooks -# Christian Brodbeck -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from ..constants import FIFF +from ..._fiff.constants import FIFF from ...utils import BunchConst @@ -18,7 +17,7 @@ # channel parameters KIT.CALIB_FACTOR = 1.0 # mne_manual p.272 -KIT.RANGE = 1. # mne_manual p.272 +KIT.RANGE = 1.0 # mne_manual p.272 KIT.UNIT_MUL = FIFF.FIFF_UNITM_NONE # default is 0 mne_manual p.273 KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200] @@ -129,11 +128,11 @@ KIT.CHANNEL_NULL: FIFF.FIFFV_MISC_CH, } KIT.CH_LABEL = { - KIT.CHANNEL_TRIGGER: 'TRIGGER', - KIT.CHANNEL_EEG: 'EEG', - KIT.CHANNEL_ECG: 'ECG', - KIT.CHANNEL_ETC: 'MISC', - KIT.CHANNEL_NULL: 'MISC', + KIT.CHANNEL_TRIGGER: "TRIGGER", + KIT.CHANNEL_EEG: "EEG", + KIT.CHANNEL_ECG: "ECG", + KIT.CHANNEL_ETC: "MISC", + KIT.CHANNEL_NULL: "MISC", } # Acquisition modes @@ -170,19 +169,19 @@ # Sensor layouts for plotting KIT_LAYOUT = { KIT.SYSTEM_AS: None, - KIT.SYSTEM_AS_2008: 'KIT-AS-2008', - KIT.SYSTEM_MQ_ADULT: 'KIT-160', - KIT.SYSTEM_MQ_CHILD: 'KIT-125', - KIT.SYSTEM_NYU_2008: 'KIT-157', - KIT.SYSTEM_NYU_2009: 'KIT-157', - KIT.SYSTEM_NYU_2010: 'KIT-157', + KIT.SYSTEM_AS_2008: "KIT-AS-2008", + KIT.SYSTEM_MQ_ADULT: "KIT-160", + KIT.SYSTEM_MQ_CHILD: "KIT-125", + KIT.SYSTEM_NYU_2008: "KIT-157", + KIT.SYSTEM_NYU_2009: "KIT-157", + KIT.SYSTEM_NYU_2010: "KIT-157", KIT.SYSTEM_NYU_2019: None, - KIT.SYSTEM_NYUAD_2011: 'KIT-AD', - KIT.SYSTEM_NYUAD_2012: 'KIT-AD', - KIT.SYSTEM_NYUAD_2014: 'KIT-AD', + KIT.SYSTEM_NYUAD_2011: "KIT-AD", + KIT.SYSTEM_NYUAD_2012: "KIT-AD", + KIT.SYSTEM_NYUAD_2014: "KIT-AD", KIT.SYSTEM_UMD_2004: None, KIT.SYSTEM_UMD_2014_07: None, - KIT.SYSTEM_UMD_2014_12: 'KIT-UMD-3', + KIT.SYSTEM_UMD_2014_12: "KIT-UMD-3", KIT.SYSTEM_UMD_2019_09: None, KIT.SYSTEM_YOKOGAWA_2017_01: None, KIT.SYSTEM_YOKOGAWA_2018_01: None, @@ -195,17 +194,17 @@ KIT.SYSTEM_AS_2008: None, KIT.SYSTEM_MQ_ADULT: None, KIT.SYSTEM_MQ_CHILD: None, - KIT.SYSTEM_NYU_2008: 'KIT-157', - KIT.SYSTEM_NYU_2009: 'KIT-157', - KIT.SYSTEM_NYU_2010: 'KIT-157', - KIT.SYSTEM_NYU_2019: 'KIT-NYU-2019', - KIT.SYSTEM_NYUAD_2011: 'KIT-208', - KIT.SYSTEM_NYUAD_2012: 'KIT-208', - KIT.SYSTEM_NYUAD_2014: 'KIT-208', - KIT.SYSTEM_UMD_2004: 'KIT-UMD-1', - KIT.SYSTEM_UMD_2014_07: 'KIT-UMD-2', - KIT.SYSTEM_UMD_2014_12: 'KIT-UMD-3', - KIT.SYSTEM_UMD_2019_09: 'KIT-UMD-4', + KIT.SYSTEM_NYU_2008: "KIT-157", + KIT.SYSTEM_NYU_2009: "KIT-157", + KIT.SYSTEM_NYU_2010: "KIT-157", + KIT.SYSTEM_NYU_2019: "KIT-NYU-2019", + KIT.SYSTEM_NYUAD_2011: "KIT-208", + KIT.SYSTEM_NYUAD_2012: "KIT-208", + KIT.SYSTEM_NYUAD_2014: "KIT-208", + KIT.SYSTEM_UMD_2004: "KIT-UMD-1", + KIT.SYSTEM_UMD_2014_07: "KIT-UMD-2", + KIT.SYSTEM_UMD_2014_12: "KIT-UMD-3", + KIT.SYSTEM_UMD_2019_09: "KIT-UMD-4", KIT.SYSTEM_YOKOGAWA_2017_01: None, KIT.SYSTEM_YOKOGAWA_2018_01: None, KIT.SYSTEM_YOKOGAWA_2020_08: None, @@ -213,31 +212,31 @@ } # Names displayed in the info dict description KIT_SYSNAMES = { - KIT.SYSTEM_MQ_ADULT: 'Macquarie Dept of Cognitive Science (Adult), 2006-', - KIT.SYSTEM_MQ_CHILD: 'Macquarie Dept of Cognitive Science (Child), 2006-', - KIT.SYSTEM_AS: 'Academia Sinica, -2008', - KIT.SYSTEM_AS_2008: 'Academia Sinica, 2008-', - KIT.SYSTEM_NYU_2008: 'NYU New York, 2008-9', - KIT.SYSTEM_NYU_2009: 'NYU New York, 2009-10', - KIT.SYSTEM_NYU_2010: 'NYU New York, 2010-', - KIT.SYSTEM_NYUAD_2011: 'New York University Abu Dhabi, 2011-12', - KIT.SYSTEM_NYUAD_2012: 'New York University Abu Dhabi, 2012-14', - KIT.SYSTEM_NYUAD_2014: 'New York University Abu Dhabi, 2014-', - KIT.SYSTEM_UMD_2004: 'University of Maryland, 2004-14', - KIT.SYSTEM_UMD_2014_07: 'University of Maryland, 2014', - KIT.SYSTEM_UMD_2014_12: 'University of Maryland, 2014-', - KIT.SYSTEM_UMD_2019_09: 'University of Maryland, 2019-', - KIT.SYSTEM_YOKOGAWA_2017_01: 'Yokogawa of Kanazawa (until 2017)', - KIT.SYSTEM_YOKOGAWA_2018_01: 'Yokogawa of Kanazawa (since 2018)', - KIT.SYSTEM_YOKOGAWA_2020_08: 'Yokogawa of Kanazawa (since August 2020)', - KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: 'Eagle Technology MEG (KIT/Yokogawa style) at PTB (since 2008, software upgrade in 2018)', # noqa: E501 + KIT.SYSTEM_MQ_ADULT: "Macquarie Dept of Cognitive Science (Adult), 2006-", + KIT.SYSTEM_MQ_CHILD: "Macquarie Dept of Cognitive Science (Child), 2006-", + KIT.SYSTEM_AS: "Academia Sinica, -2008", + KIT.SYSTEM_AS_2008: "Academia Sinica, 2008-", + KIT.SYSTEM_NYU_2008: "NYU New York, 2008-9", + KIT.SYSTEM_NYU_2009: "NYU New York, 2009-10", + KIT.SYSTEM_NYU_2010: "NYU New York, 2010-", + KIT.SYSTEM_NYUAD_2011: "New York University Abu Dhabi, 2011-12", + KIT.SYSTEM_NYUAD_2012: "New York University Abu Dhabi, 2012-14", + KIT.SYSTEM_NYUAD_2014: "New York University Abu Dhabi, 2014-", + KIT.SYSTEM_UMD_2004: "University of Maryland, 2004-14", + KIT.SYSTEM_UMD_2014_07: "University of Maryland, 2014", + KIT.SYSTEM_UMD_2014_12: "University of Maryland, 2014-", + KIT.SYSTEM_UMD_2019_09: "University of Maryland, 2019-", + KIT.SYSTEM_YOKOGAWA_2017_01: "Yokogawa of Kanazawa (until 2017)", + KIT.SYSTEM_YOKOGAWA_2018_01: "Yokogawa of Kanazawa (since 2018)", + KIT.SYSTEM_YOKOGAWA_2020_08: "Yokogawa of Kanazawa (since August 2020)", + KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: "Eagle Technology MEG (KIT/Yokogawa style) at PTB (since 2008, software upgrade in 2018)", # noqa: E501 } LEGACY_AMP_PARAMS = { - KIT.SYSTEM_NYU_2008: (5., 11.), - KIT.SYSTEM_NYU_2009: (5., 11.), - KIT.SYSTEM_NYU_2010: (5., 11.), - KIT.SYSTEM_UMD_2004: (5., 11.), + KIT.SYSTEM_NYU_2008: (5.0, 11.0), + KIT.SYSTEM_NYU_2009: (5.0, 11.0), + KIT.SYSTEM_NYU_2010: (5.0, 11.0), + KIT.SYSTEM_UMD_2004: (5.0, 11.0), } # Ones that we don't use are commented out diff --git a/mne/io/kit/coreg.py b/mne/io/kit/coreg.py index c3dda423a97..8e6698d6f78 100644 --- a/mne/io/kit/coreg.py +++ b/mne/io/kit/coreg.py @@ -1,10 +1,9 @@ """Coordinate Point Extractor for KIT system.""" -# Author: Teon Brooks -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pickle import re from collections import OrderedDict from os import SEEK_CUR, PathLike @@ -12,15 +11,24 @@ import numpy as np -from .constants import KIT, FIFF -from .._digitization import _make_dig_points -from ...transforms import (Transform, apply_trans, get_ras_to_neuromag_trans, - als_ras_trans) -from ...utils import warn, _check_option, _check_fname - - -INT32 = ' KIT.DIG_POINTS: hsp = _decimate_points(hsp, res=0.005) n_new = len(hsp) - warn("The selected head shape contained {n_in} points, which is " - "more than recommended ({n_rec}), and was automatically " - "downsampled to {n_new} points. The preferred way to " - "downsample is using FastScan.".format( - n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)) + warn( + f"The selected head shape contained {n_pts} points, which is more than " + f"recommended ({KIT.DIG_POINTS}), and was automatically downsampled to " + f"{n_new} points. The preferred way to downsample is using FastScan." + ) - if isinstance(elp, (str, Path, PathLike)): + if isinstance(elp, str | Path | PathLike): elp_points = _read_dig_kit(elp) if len(elp_points) != 8: - raise ValueError("File %r should contain 8 points; got shape " - "%s." % (elp, elp_points.shape)) + raise ValueError( + f"File {repr(elp)} should contain 8 points; got shape " + f"{elp_points.shape}." + ) elp = elp_points - elif len(elp) not in (6, 7, 8): - raise ValueError("ELP should contain 6 ~ 8 points; got shape " - "%s." % (elp.shape,)) - if isinstance(mrk, (str, Path, PathLike)): + if len(bad_coils) > 0: + elp = np.delete(elp, np.array(bad_coils) + 3, 0) + # check we have at least 3 marker coils (whether read from file or + # passed in directly) + if len(elp) not in (6, 7, 8): + raise ValueError(f"ELP should contain 6 ~ 8 points; got shape {elp.shape}.") + if isinstance(mrk, str | Path | PathLike): mrk = read_mrk(mrk) + if len(bad_coils) > 0: + mrk = np.delete(mrk, bad_coils, 0) + if len(mrk) not in (3, 4, 5): + raise ValueError(f"MRK should contain 3 ~ 5 points; got shape {mrk.shape}.") mrk = apply_trans(als_ras_trans, mrk) @@ -166,55 +177,57 @@ def _set_dig_kit(mrk, elp, hsp, eeg): eeg = OrderedDict((k, apply_trans(nmtrans, p)) for k, p in eeg.items()) # device head transform - trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans') + trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out="trans") nasion, lpa, rpa = elp[:3] elp = elp[3:] dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg) - dev_head_t = Transform('meg', 'head', trans) - - hpi_results = [dict(dig_points=[ - dict(ident=ci, r=r, kind=FIFF.FIFFV_POINT_HPI, - coord_frame=FIFF.FIFFV_COORD_UNKNOWN) - for ci, r in enumerate(mrk)], coord_trans=dev_head_t)] + dev_head_t = Transform("meg", "head", trans) + + hpi_results = [ + dict( + dig_points=[ + dict( + ident=ci, + r=r, + kind=FIFF.FIFFV_POINT_HPI, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + for ci, r in enumerate(mrk) + ], + coord_trans=dev_head_t, + ) + ] return dig_points, dev_head_t, hpi_results -def _read_dig_kit(fname, unit='auto'): +def _read_dig_kit(fname, unit="auto"): # Read dig points from a file and return ndarray, using FastSCAN for .txt - from ...channels.montage import ( - read_polhemus_fastscan, read_dig_polhemus_isotrak, read_custom_montage, - _check_dig_shape) - - fname = _check_fname( - fname, "read", must_exist=True, name="hsp or elp file" - ) - assert unit in ('auto', 'm', 'mm') - _check_option( - "file extension", fname.suffix, (".hsp", ".elp", ".mat", ".txt") - ) + fname = _check_fname(fname, "read", must_exist=True, name="hsp or elp file") + assert unit in ("auto", "m", "mm") + _check_option("file extension", fname.suffix, (".hsp", ".elp", ".mat", ".txt")) if fname.suffix == ".txt": - unit = 'mm' if unit == 'auto' else unit - out = read_polhemus_fastscan(fname, unit=unit, - on_header_missing='ignore') + unit = "mm" if unit == "auto" else unit + out = read_polhemus_fastscan(fname, unit=unit, on_header_missing="ignore") elif fname.suffix in (".hsp", ".elp"): - unit = 'm' if unit == 'auto' else unit + unit = "m" if unit == "auto" else unit mon = read_dig_polhemus_isotrak(fname, unit=unit) if fname.suffix == ".hsp": - dig = [d['r'] for d in mon.dig - if d['kind'] != FIFF.FIFFV_POINT_CARDINAL] + dig = [d["r"] for d in mon.dig if d["kind"] != FIFF.FIFFV_POINT_CARDINAL] else: - dig = [d['r'] for d in mon.dig] - if dig and \ - mon.dig[0]['kind'] == FIFF.FIFFV_POINT_CARDINAL and \ - mon.dig[0]['ident'] == FIFF.FIFFV_POINT_LPA: + dig = [d["r"] for d in mon.dig] + if ( + dig + and mon.dig[0]["kind"] == FIFF.FIFFV_POINT_CARDINAL + and mon.dig[0]["ident"] == FIFF.FIFFV_POINT_LPA + ): # LPA, Nasion, RPA -> NLR dig[:3] = [dig[1], dig[0], dig[2]] out = np.array(dig, float) else: assert fname.suffix == ".mat" - out = np.array([d['r'] for d in read_custom_montage(fname).dig]) + out = np.array([d["r"] for d in read_custom_montage(fname).dig]) _check_dig_shape(out) return out diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index dd978b38b85..4a783518344 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -3,59 +3,72 @@ RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py. """ -# Authors: Teon Brooks -# Joan Massich -# Christian Brodbeck -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from collections import defaultdict, OrderedDict -from math import sin, cos -from os import SEEK_CUR, path as op, PathLike +from collections import OrderedDict, defaultdict +from math import cos, sin +from os import SEEK_CUR, PathLike +from os import path as op from pathlib import Path import numpy as np -from ..pick import pick_types -from ...utils import (verbose, logger, warn, fill_doc, _check_option, - _stamp_to_dt, _check_fname) -from ...transforms import apply_trans, als_ras_trans -from ..base import BaseRaw -from ..utils import _mult_cal_one +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.pick import pick_types +from ..._fiff.utils import _mult_cal_one from ...epochs import BaseEpochs -from ..constants import FIFF -from ..meas_info import _empty_info -from .constants import KIT, LEGACY_AMP_PARAMS -from .coreg import read_mrk, _set_dig_kit from ...event import read_events +from ...transforms import Transform, als_ras_trans, apply_trans +from ...utils import ( + _check_fname, + _check_option, + _stamp_to_dt, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw +from .constants import KIT, LEGACY_AMP_PARAMS +from .coreg import _set_dig_kit, read_mrk - -FLOAT64 = '' - Can be submitted as list of trigger channels. - If a list is not specified, the default triggers extracted from - misc channels will be used with specified directionality. - '<' means that largest values assigned to the first channel - in sequence. - '>' means the largest trigger assigned to the last channel - in sequence. - stim_code : 'binary' | 'channel' - How to decode trigger values from stim channels. 'binary' read stim - channel events as binary code, 'channel' encodes channel number. - """ - if self.preload: - raise NotImplementedError("Can't change stim channel after " - "loading data") - _check_option('stim_code', stim_code, ['binary', 'channel']) - - if stim is not None: - if isinstance(stim, str): - picks = _default_stim_chs(info) - if stim == '<': - stim = picks[::-1] - elif stim == '>': - stim = picks - else: - raise ValueError("stim needs to be list of int, '>' or " - "'<', not %r" % str(stim)) - else: - stim = np.asarray(stim, int) - if stim.max() >= self._raw_extras[0]['nchan']: - raise ValueError( - 'Got stim=%s, but sqd file only has %i channels' % - (stim, self._raw_extras[0]['nchan'])) - - # modify info - nchan = self._raw_extras[0]['nchan'] + 1 - info['chs'].append(dict( - cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0, - unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE, - ch_name='STI 014', - coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan), - kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) - info._update_redundant() - - self._raw_extras[0]['stim'] = stim - self._raw_extras[0]['stim_code'] = stim_code - def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" sqd = self._raw_extras[fi] - nchan = sqd['nchan'] + nchan = sqd["nchan"] data_left = (stop - start) * nchan - conv_factor = sqd['conv_factor'] + conv_factor = sqd["conv_factor"] - n_bytes = sqd['dtype'].itemsize + n_bytes = sqd["dtype"].itemsize assert n_bytes in (2, 4) # Read up to 100 MB of data at a time. blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan) - with open(self._filenames[fi], 'rb', buffering=0) as fid: + with open(self.filenames[fi], "rb", buffering=0) as fid: # extract data pointer = start * nchan * n_bytes - fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer) - stim = sqd['stim'] + fid.seek(sqd["dirs"][KIT.DIR_INDEX_RAW_DATA]["offset"] + pointer) + stim = sqd["stim"] for blk_start in np.arange(0, data_left, blk_size) // nchan: blk_size = min(blk_size, data_left - blk_start * nchan) - block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size) - block = block.reshape(nchan, -1, order='F').astype(float) + block = np.fromfile(fid, dtype=sqd["dtype"], count=blk_size) + block = block.reshape(nchan, -1, order="F").astype(float) blk_stop = blk_start + block.shape[1] data_view = data[:, blk_start:blk_stop] block *= conv_factor @@ -245,35 +226,106 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Create a synthetic stim channel if stim is not None: stim_ch = _make_stim_channel( - block[stim, :], sqd['slope'], sqd['stimthresh'], - sqd['stim_code'], stim) + block[stim, :], + sqd["slope"], + sqd["stimthresh"], + sqd["stim_code"], + stim, + ) block = np.vstack((block, stim_ch)) _mult_cal_one(data_view, block, idx, cals, mult) # cals are all unity, so can be ignored +def _set_stimchannels(inst, info, stim, stim_code): + """Specify how the trigger channel is synthesized from analog channels. + + Has to be done before loading data. For a RawKIT instance that has been + created with preload=True, this method will raise a + NotImplementedError. + + Parameters + ---------- + %(info_not_none)s + stim : list of int | '<' | '>' + Can be submitted as list of trigger channels. + If a list is not specified, the default triggers extracted from + misc channels will be used with specified directionality. + '<' means that largest values assigned to the first channel + in sequence. + '>' means the largest trigger assigned to the last channel + in sequence. + stim_code : 'binary' | 'channel' + How to decode trigger values from stim channels. 'binary' read stim + channel events as binary code, 'channel' encodes channel number. + """ + if inst.preload: + raise NotImplementedError("Can't change stim channel after loading data") + _check_option("stim_code", stim_code, ["binary", "channel"]) + + if stim is not None: + if isinstance(stim, str): + picks = _default_stim_chs(info) + if stim == "<": + stim = picks[::-1] + elif stim == ">": + stim = picks + else: + raise ValueError( + f"stim needs to be list of int, '>' or '<', not {str(stim)!r}" + ) + else: + stim = np.asarray(stim, int) + if stim.max() >= inst._raw_extras[0]["nchan"]: + raise ValueError( + f"Got stim={stim}, but sqd file only has " + f"{inst._raw_extras[0]['nchan']} channels." + ) + + # modify info + nchan = inst._raw_extras[0]["nchan"] + 1 + info["chs"].append( + dict( + cal=KIT.CALIB_FACTOR, + logno=nchan, + scanno=nchan, + range=1.0, + unit=FIFF.FIFF_UNIT_NONE, + unit_mul=FIFF.FIFF_UNITM_NONE, + ch_name="STI 014", + coil_type=FIFF.FIFFV_COIL_NONE, + loc=np.full(12, np.nan), + kind=FIFF.FIFFV_STIM_CH, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + ) + info._update_redundant() + + inst._raw_extras[0]["stim"] = stim + inst._raw_extras[0]["stim_code"] = stim_code + + def _default_stim_chs(info): """Return default stim channels for SQD files.""" - return pick_types(info, meg=False, ref_meg=False, misc=True, - exclude=[])[:8] + return pick_types(info, meg=False, ref_meg=False, misc=True, exclude=[])[:8] -def _make_stim_channel(trigger_chs, slope, threshold, stim_code, - trigger_values): +def _make_stim_channel(trigger_chs, slope, threshold, stim_code, trigger_values): """Create synthetic stim channel from multiple trigger channels.""" - if slope == '+': + if slope == "+": trig_chs_bin = trigger_chs > threshold - elif slope == '-': + elif slope == "-": trig_chs_bin = trigger_chs < threshold else: raise ValueError("slope needs to be '+' or '-'") # trigger value - if stim_code == 'binary': + if stim_code == "binary": trigger_values = 2 ** np.arange(len(trigger_chs)) - elif stim_code != 'channel': - raise ValueError("stim_code must be 'binary' or 'channel', got %s" % - repr(stim_code)) + elif stim_code != "channel": + raise ValueError( + f"stim_code must be 'binary' or 'channel', got {repr(stim_code)}" + ) trig_chs = trig_chs_bin * trigger_values[:, np.newaxis] return np.array(trig_chs.sum(axis=0), ndmin=2) @@ -322,53 +374,80 @@ class EpochsKIT(BaseEpochs): """ @verbose - def __init__(self, input_fname, events, event_id=None, tmin=0, - baseline=None, reject=None, flat=None, reject_tmin=None, - reject_tmax=None, mrk=None, elp=None, hsp=None, - allow_unknown_format=False, standardize_names=None, - verbose=None): # noqa: D102 - - if isinstance(events, (str, PathLike, Path)): + def __init__( + self, + input_fname, + events, + event_id=None, + tmin=0, + baseline=None, + reject=None, + flat=None, + reject_tmin=None, + reject_tmax=None, + mrk=None, + elp=None, + hsp=None, + allow_unknown_format=False, + standardize_names=None, + verbose=None, + ): + if isinstance(events, str | PathLike | Path): events = read_events(events) input_fname = str( _check_fname(fname=input_fname, must_exist=True, overwrite="read") ) - logger.info('Extracting KIT Parameters from %s...' % input_fname) + logger.info(f"Extracting KIT Parameters from {input_fname}...") self.info, kit_info = get_kit_info( - input_fname, allow_unknown_format, standardize_names) + input_fname, allow_unknown_format, standardize_names + ) kit_info.update(input_fname=input_fname) self._raw_extras = [kit_info] - self._filenames = [] - if len(events) != self._raw_extras[0]['n_epochs']: - raise ValueError('Event list does not match number of epochs.') + self.filenames = [] + if len(events) != self._raw_extras[0]["n_epochs"]: + raise ValueError("Event list does not match number of epochs.") - if self._raw_extras[0]['acq_type'] == KIT.EPOCHS: - self._raw_extras[0]['data_length'] = KIT.INT + if self._raw_extras[0]["acq_type"] == KIT.EPOCHS: + self._raw_extras[0]["data_length"] = KIT.INT else: - raise TypeError('SQD file contains raw data, not epochs or ' - 'average. Wrong reader.') + raise TypeError( + "SQD file contains raw data, not epochs or average. Wrong reader." + ) if event_id is None: # convert to int to make typing-checks happy event_id = {str(e): int(e) for e in np.unique(events[:, 2])} for key, val in event_id.items(): if val not in events[:, 2]: - raise ValueError('No matching events found for %s ' - '(event id %i)' % (key, val)) + raise ValueError(f"No matching events found for {key} (event id {val})") data = self._read_kit_data() - assert data.shape == (self._raw_extras[0]['n_epochs'], - self.info['nchan'], - self._raw_extras[0]['frame_length']) - tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin - super(EpochsKIT, self).__init__( - self.info, data, events, event_id, tmin, tmax, baseline, - reject=reject, flat=flat, reject_tmin=reject_tmin, - reject_tmax=reject_tmax, filename=input_fname, verbose=verbose) + assert data.shape == ( + self._raw_extras[0]["n_epochs"], + self.info["nchan"], + self._raw_extras[0]["frame_length"], + ) + tmax = ((data.shape[2] - 1) / self.info["sfreq"]) + tmin + super().__init__( + self.info, + data, + events, + event_id, + tmin, + tmax, + baseline, + reject=reject, + flat=flat, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + filename=input_fname, + verbose=verbose, + ) self.info = _call_digitization( - info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info) - logger.info('Ready.') + info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info + ) + logger.info("Ready.") def _read_kit_data(self): """Read epochs data. @@ -381,19 +460,19 @@ def _read_kit_data(self): returns the time values corresponding to the samples. """ info = self._raw_extras[0] - epoch_length = info['frame_length'] - n_epochs = info['n_epochs'] - n_samples = info['n_samples'] - input_fname = info['input_fname'] - dtype = info['dtype'] - nchan = info['nchan'] - - with open(input_fname, 'rb', buffering=0) as fid: - fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset']) + epoch_length = info["frame_length"] + n_epochs = info["n_epochs"] + n_samples = info["n_samples"] + input_fname = info["input_fname"] + dtype = info["dtype"] + nchan = info["nchan"] + + with open(input_fname, "rb", buffering=0) as fid: + fid.seek(info["dirs"][KIT.DIR_INDEX_RAW_DATA]["offset"]) count = n_samples * nchan data = np.fromfile(fid, dtype=dtype, count=count) data = data.reshape((n_samples, nchan)).T - data = data * info['conv_factor'] + data = data * info["conv_factor"] data = data.reshape((nchan, n_epochs, epoch_length)) data = data.transpose((1, 0, 2)) @@ -401,26 +480,27 @@ def _read_kit_data(self): def _read_dir(fid): - return dict(offset=np.fromfile(fid, UINT32, 1)[0], - size=np.fromfile(fid, INT32, 1)[0], - max_count=np.fromfile(fid, INT32, 1)[0], - count=np.fromfile(fid, INT32, 1)[0]) + return dict( + offset=np.fromfile(fid, UINT32, 1)[0], + size=np.fromfile(fid, INT32, 1)[0], + max_count=np.fromfile(fid, INT32, 1)[0], + count=np.fromfile(fid, INT32, 1)[0], + ) @verbose def _read_dirs(fid, verbose=None): dirs = list() dirs.append(_read_dir(fid)) - for ii in range(dirs[0]['count'] - 1): - logger.debug(f' KIT dir entry {ii} @ {fid.tell()}') + for ii in range(dirs[0]["count"] - 1): + logger.debug(f" KIT dir entry {ii} @ {fid.tell()}") dirs.append(_read_dir(fid)) - assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] + assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]["count"] return dirs @verbose -def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, - verbose=None): +def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): """Extract all the information from the sqd/con file. Parameters @@ -440,31 +520,31 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, A dict containing all the sqd parameter settings. """ sqd = dict() - sqd['rawfile'] = rawfile + sqd["rawfile"] = rawfile unsupported_format = False - with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug + with open(rawfile, "rb", buffering=0) as fid: # buffering=0 for np bug # # directories (0) # - sqd['dirs'] = dirs = _read_dirs(fid) + sqd["dirs"] = dirs = _read_dirs(fid) # # system (1) # - fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset']) + fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]["offset"]) # check file format version version, revision = np.fromfile(fid, INT32, 2) if version < 2 or (version == 2 and revision < 3): - version_string = "V%iR%03i" % (version, revision) + version_string = f"V{version}R{revision:03d}" if allow_unknown_format: unsupported_format = True - warn("Force loading KIT format %s" % version_string) + warn(f"Force loading KIT format {version_string}") else: raise UnsupportedKITFormat( version_string, - "SQD file format %s is not officially supported. " - "Set allow_unknown_format=True to load it anyways." % - (version_string,)) + f"SQD file format {version_string} is not officially supported. " + "Set allow_unknown_format=True to load it anyways.", + ) sysid = np.fromfile(fid, INT32, 1)[0] # basic info @@ -472,9 +552,10 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, # model name model_name = _read_name(fid, n=128) # channels - sqd['nchan'] = channel_count = int(np.fromfile(fid, INT32, 1)[0]) + sqd["nchan"] = channel_count = int(np.fromfile(fid, INT32, 1)[0]) comment = _read_name(fid, n=256) create_time, last_modified_time = np.fromfile(fid, INT32, 2) + del last_modified_time fid.seek(KIT.INT * 3, SEEK_CUR) # reserved dewar_style = np.fromfile(fid, INT32, 1)[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare @@ -490,12 +571,13 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, else: adc_range = np.fromfile(fid, FLOAT64, 1)[0] adc_polarity, adc_allocated, adc_stored = np.fromfile(fid, INT32, 3) - system_name = system_name.replace('\x00', '') - system_name = system_name.strip().replace('\n', '/') - model_name = model_name.replace('\x00', '') - model_name = model_name.strip().replace('\n', '/') + del adc_polarity + system_name = system_name.replace("\x00", "") + system_name = system_name.strip().replace("\n", "/") + model_name = model_name.replace("\x00", "") + model_name = model_name.strip().replace("\n", "/") - full_version = f'V{version:d}R{revision:03d}' + full_version = f"V{version:d}R{revision:03d}" logger.debug("SQD file basic information:") logger.debug("Meg160 version = %s", full_version) logger.debug("System ID = %i", sysid) @@ -507,37 +589,36 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, logger.debug("FLL type = %i", fll_type) logger.debug("Trigger type = %i", trigger_type) logger.debug("A/D board type = %i", adboard_type) - logger.debug("ADC range = +/-%s[V]", adc_range / 2.) + logger.debug("ADC range = +/-%s[V]", adc_range / 2.0) logger.debug("ADC allocate = %i[bit]", adc_allocated) logger.debug("ADC bit = %i[bit]", adc_stored) # MGH description: 'acquisition (megacq) VectorView system at NMR-MGH' - description = \ - f'{system_name} ({sysid}) {full_version} {model_name}' + description = f"{system_name} ({sysid}) {full_version} {model_name}" assert adc_allocated % 8 == 0 - sqd['dtype'] = np.dtype(f'%d, check ' - 'your data for correctness, including channel scales and ' - 'filter settings!' - % (system_name, model_name, sysid, fll_type, use_fll_type)) + use_fll_type = fll_types[np.searchsorted(fll_types, fll_type) - 1] + warn( + "Unknown site filter settings (FLL) for system " + f'"{system_name}" model "{model_name}" (ID {sysid}), will assume FLL ' + f"{fll_type}->{use_fll_type}, check your data for correctness, " + "including channel scales and filter settings!" + ) fll_type = use_fll_type # # channel information (4) # chan_dir = dirs[KIT.DIR_INDEX_CHANNELS] - chan_offset, chan_size = chan_dir['offset'], chan_dir['size'] - sqd['channels'] = channels = [] + chan_offset, chan_size = chan_dir["offset"], chan_dir["size"] + sqd["channels"] = channels = [] exg_gains = list() for i in range(channel_count): fid.seek(chan_offset + chan_size * i) - channel_type, = np.fromfile(fid, INT32, 1) + (channel_type,) = np.fromfile(fid, INT32, 1) # System 52 mislabeled reference channels as NULL. This was fixed # in system 53; not sure about 51... if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL: @@ -546,34 +627,39 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, if channel_type in KIT.CHANNELS_MEG: if channel_type not in KIT.CH_TO_FIFF_COIL: raise NotImplementedError( - "KIT channel type %i can not be read. Please contact " - "the mne-python developers." % channel_type) - channels.append({ - 'type': channel_type, - # (x, y, z, theta, phi) for all MEG channels. Some channel - # types have additional information which we're not using. - 'loc': np.fromfile(fid, dtype=FLOAT64, count=5), - }) + "KIT channel type {channel_type} can not be read. Please " + "contact the mne-python developers." + ) + channels.append( + { + "type": channel_type, + # (x, y, z, theta, phi) for all MEG channels. Some channel + # types have additional information which we're not using. + "loc": np.fromfile(fid, dtype=FLOAT64, count=5), + } + ) if channel_type in KIT.CHANNEL_NAME_NCHAR: fid.seek(16, SEEK_CUR) # misc fields - channels[-1]['name'] = _read_name(fid, channel_type) + channels[-1]["name"] = _read_name(fid, channel_type) elif channel_type in KIT.CHANNELS_MISC: - channel_no, = np.fromfile(fid, INT32, 1) + (channel_no,) = np.fromfile(fid, INT32, 1) fid.seek(4, SEEK_CUR) name = _read_name(fid, channel_type) - channels.append({ - 'type': channel_type, - 'no': channel_no, - 'name': name, - }) + channels.append( + { + "type": channel_type, + "no": channel_no, + "name": name, + } + ) if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG): offset = 6 if channel_type == KIT.CHANNEL_EEG else 8 fid.seek(offset, SEEK_CUR) exg_gains.append(np.fromfile(fid, FLOAT64, 1)[0]) elif channel_type == KIT.CHANNEL_NULL: - channels.append({'type': channel_type}) + channels.append({"type": channel_type}) else: - raise IOError("Unknown KIT channel type: %i" % channel_type) + raise OSError("Unknown KIT channel type: {channel_type}") exg_gains = np.array(exg_gains) # @@ -583,7 +669,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, # only sensor channels requires gain. the additional misc channels # (trigger channels, audio and voice channels) are passed # through unaffected - fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset']) + fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]["offset"]) # (offset [Volt], gain [Tesla/Volt]) for each channel sensitivity = np.fromfile(fid, dtype=FLOAT64, count=channel_count * 2) sensitivity.shape = (channel_count, 2) @@ -593,14 +679,14 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, # # amplifier gain (7) # - fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset']) + fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]["offset"]) amp_data = np.fromfile(fid, INT32, 1)[0] if fll_type >= 100: # Kapper Type # gain: mask bit gain1 = (amp_data & 0x00007000) >> 12 gain2 = (amp_data & 0x70000000) >> 28 gain3 = (amp_data & 0x07000000) >> 24 - amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3]) + amp_gain = KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3] # filter settings hpf = (amp_data & 0x00000700) >> 8 lpf = (amp_data & 0x00070000) >> 16 @@ -613,34 +699,36 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, # filter settings hpf = (amp_data & 0x007) >> 4 lpf = (amp_data & 0x0700) >> 8 - bef = (amp_data & 0xc000) >> 14 + bef = (amp_data & 0xC000) >> 14 hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type] - sqd['highpass'] = KIT.HPFS[hpf_options][hpf] - sqd['lowpass'] = KIT.LPFS[lpf_options][lpf] - sqd['notch'] = KIT.BEFS[bef_options][bef] + sqd["highpass"] = KIT.HPFS[hpf_options][hpf] + sqd["lowpass"] = KIT.LPFS[lpf_options][lpf] + sqd["notch"] = KIT.BEFS[bef_options][bef] # # Acquisition Parameters (8) # - fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset']) - sqd['acq_type'], = acq_type, = np.fromfile(fid, INT32, 1) - sqd['sfreq'], = np.fromfile(fid, FLOAT64, 1) + fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]["offset"]) + (sqd["acq_type"],) = (acq_type,) = np.fromfile(fid, INT32, 1) + (sqd["sfreq"],) = np.fromfile(fid, FLOAT64, 1) if acq_type == KIT.CONTINUOUS: # samples_count, = np.fromfile(fid, INT32, 1) fid.seek(KIT.INT, SEEK_CUR) - sqd['n_samples'], = np.fromfile(fid, INT32, 1) + (sqd["n_samples"],) = np.fromfile(fid, INT32, 1) elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS: - sqd['frame_length'], = np.fromfile(fid, INT32, 1) - sqd['pretrigger_length'], = np.fromfile(fid, INT32, 1) - sqd['average_count'], = np.fromfile(fid, INT32, 1) - sqd['n_epochs'], = np.fromfile(fid, INT32, 1) + (sqd["frame_length"],) = np.fromfile(fid, INT32, 1) + (sqd["pretrigger_length"],) = np.fromfile(fid, INT32, 1) + (sqd["average_count"],) = np.fromfile(fid, INT32, 1) + (sqd["n_epochs"],) = np.fromfile(fid, INT32, 1) if acq_type == KIT.EVOKED: - sqd['n_samples'] = sqd['frame_length'] + sqd["n_samples"] = sqd["frame_length"] else: - sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs'] + sqd["n_samples"] = sqd["frame_length"] * sqd["n_epochs"] else: - raise IOError("Invalid acquisition type: %i. Your file is neither " - "continuous nor epoched data." % (acq_type,)) + raise OSError( + f"Invalid acquisition type: {acq_type}. Your file is neither " + "continuous nor epoched data." + ) # # digitization information (12 and 26) @@ -649,10 +737,10 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, cor_dir = dirs[KIT.DIR_INDEX_COREG] dig = dict() hsp = list() - if dig_dir['count'] > 0 and cor_dir['count'] > 0: + if dig_dir["count"] > 0 and cor_dir["count"] > 0: # directories (0) - fid.seek(dig_dir['offset']) - for _ in range(dig_dir['count']): + fid.seek(dig_dir["offset"]) + for _ in range(dig_dir["count"]): name = _read_name(fid, n=8).strip() # Sometimes there are mismatches (e.g., AFz vs AFZ) between # the channel name and its digitized, name, so let's be case @@ -668,38 +756,49 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, # nasion, lpa, rpa, HPI in native space elp = [] for key in ( - 'fidnz', 'fidt9', 'fidt10', - 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4', 'hpi_5'): + "fidnz", + "fidt9", + "fidt10", + "hpi_1", + "hpi_2", + "hpi_3", + "hpi_4", + "hpi_5", + ): if key in dig and np.isfinite(dig[key]).all(): elp.append(dig.pop(key)) elp = np.array(elp) hsp = np.array(hsp, float).reshape(-1, 3) if elp.shape not in ((6, 3), (7, 3), (8, 3)): - raise RuntimeError( - f'Fewer than 3 HPI coils found, got {len(elp) - 3}') + raise RuntimeError(f"Fewer than 3 HPI coils found, got {len(elp) - 3}") # coregistration - fid.seek(cor_dir['offset']) + fid.seek(cor_dir["offset"]) mrk = np.zeros((elp.shape[0] - 3, 3)) meg_done = [True] * 5 - for _ in range(cor_dir['count']): + for _ in range(cor_dir["count"]): done = np.fromfile(fid, INT32, 1)[0] - fid.seek(16 * KIT.DOUBLE + # meg_to_mri - 16 * KIT.DOUBLE, # mri_to_meg - SEEK_CUR) + fid.seek( + 16 * KIT.DOUBLE + 16 * KIT.DOUBLE, # meg_to_mri # mri_to_meg + SEEK_CUR, + ) marker_count = np.fromfile(fid, INT32, 1)[0] if not done: continue assert marker_count >= len(mrk) for mi in range(len(mrk)): - mri_type, meg_type, mri_done, this_meg_done = \ - np.fromfile(fid, INT32, 4) + mri_type, meg_type, mri_done, this_meg_done = np.fromfile( + fid, INT32, 4 + ) + del mri_type, meg_type, mri_done meg_done[mi] = bool(this_meg_done) fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos mrk[mi] = np.fromfile(fid, FLOAT64, 3) fid.seek(256, SEEK_CUR) # marker_file (char) if not all(meg_done): - logger.info(f'Keeping {sum(meg_done)}/{len(meg_done)} HPI ' - 'coils that were digitized') + logger.info( + f"Keeping {sum(meg_done)}/{len(meg_done)} HPI " + "coils that were digitized" + ) elp = elp[[True] * 3 + meg_done] mrk = mrk[meg_done] sqd.update(hsp=hsp, elp=elp, mrk=mrk) @@ -707,11 +806,10 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, # precompute conversion factor for reading data if unsupported_format: if sysid not in LEGACY_AMP_PARAMS: - raise IOError("Legacy parameters for system ID %i unavailable" % - (sysid,)) + raise OSError(f"Legacy parameters for system ID {sysid} unavailable.") adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid] - is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels]) - ad_to_volt = adc_range / (2. ** adc_stored) + is_meg = np.array([ch["type"] in KIT.CHANNELS_MEG for ch in channels]) + ad_to_volt = adc_range / (2.0**adc_stored) ad_to_tesla = ad_to_volt / amp_gain * channel_gain conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt) # XXX this is a bit of a hack. Should probably do this more cleanly at @@ -719,33 +817,36 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, # the test files with known amplitudes. The conv_factors need to be # replaced by these values otherwise we're off by a factor off 5000.0 # for the EEG data. - is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG) - for ch in channels] - exg_gains /= 2. ** (adc_stored - 14) + is_exg = [ch["type"] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG) for ch in channels] + exg_gains /= 2.0 ** (adc_stored - 14) exg_gains[exg_gains == 0] = ad_to_volt conv_factor[is_exg] = exg_gains - sqd['conv_factor'] = conv_factor[:, np.newaxis] + sqd["conv_factor"] = conv_factor[:, np.newaxis] # Create raw.info dict for raw fif object with SQD data - info = _empty_info(float(sqd['sfreq'])) - info.update(meas_date=_stamp_to_dt((create_time, 0)), - lowpass=sqd['lowpass'], - highpass=sqd['highpass'], kit_system_id=sysid, - description=description) + info = _empty_info(float(sqd["sfreq"])) + info.update( + meas_date=_stamp_to_dt((create_time, 0)), + lowpass=sqd["lowpass"], + highpass=sqd["highpass"], + kit_system_id=sysid, + description=description, + dev_head_t=Transform("meg", "head"), + ) # Creates a list of dicts of meg channels for raw.info - logger.info('Setting channel info structure...') - info['chs'] = fiff_channels = [] + logger.info("Setting channel info structure...") + info["chs"] = fiff_channels = [] channel_index = defaultdict(lambda: 0) - sqd['eeg_dig'] = OrderedDict() + sqd["eeg_dig"] = OrderedDict() for idx, ch in enumerate(channels, 1): - if ch['type'] in KIT.CHANNELS_MEG: - ch_name = ch.get('name', '') - if ch_name == '' or standardize_names: - ch_name = 'MEG %03d' % idx + if ch["type"] in KIT.CHANNELS_MEG: + ch_name = ch.get("name", "") + if ch_name == "" or standardize_names: + ch_name = f"MEG {idx:03d}" # create three orthogonal vector # ch_angles[0]: theta, ch_angles[1]: phi - theta, phi = np.radians(ch['loc'][3:]) + theta, phi = np.radians(ch["loc"][3:]) x = sin(theta) * cos(phi) y = sin(theta) * sin(phi) z = cos(theta) @@ -765,29 +866,38 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, vec_x /= np.linalg.norm(vec_x) vec_y = np.cross(vec_z, vec_x) # transform to Neuromag like coordinate space - vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) + vecs = np.vstack((ch["loc"][:3], vec_x, vec_y, vec_z)) vecs = apply_trans(als_ras_trans, vecs) unit = FIFF.FIFF_UNIT_T loc = vecs.ravel() else: - ch_type_label = KIT.CH_LABEL[ch['type']] + ch_type_label = KIT.CH_LABEL[ch["type"]] channel_index[ch_type_label] += 1 ch_type_index = channel_index[ch_type_label] - ch_name = ch.get('name', '') + ch_name = ch.get("name", "") eeg_name = ch_name.lower() # some files have all EEG labeled as EEG - if ch_name in ('', 'EEG') or standardize_names: - ch_name = '%s %03i' % (ch_type_label, ch_type_index) + if ch_name in ("", "EEG") or standardize_names: + ch_name = f"{ch_type_label} {ch_type_index:03d}" unit = FIFF.FIFF_UNIT_V loc = np.zeros(12) if eeg_name and eeg_name in dig: - loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name] - fiff_channels.append(dict( - cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE, - unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name, - coord_frame=FIFF.FIFFV_COORD_DEVICE, - coil_type=KIT.CH_TO_FIFF_COIL[ch['type']], - kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc)) + loc[:3] = sqd["eeg_dig"][eeg_name] = dig[eeg_name] + fiff_channels.append( + dict( + cal=KIT.CALIB_FACTOR, + logno=idx, + scanno=idx, + range=KIT.RANGE, + unit=unit, + unit_mul=KIT.UNIT_MUL, + ch_name=ch_name, + coord_frame=FIFF.FIFFV_COORD_DEVICE, + coil_type=KIT.CH_TO_FIFF_COIL[ch["type"]], + kind=KIT.CH_TO_FIFF_KIND[ch["type"]], + loc=loc, + ) + ) info._unlocked = False info._update_redundant() return info, sqd @@ -795,14 +905,26 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, def _read_name(fid, ch_type=None, n=None): n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type] - return fid.read(n).split(b'\x00')[0].decode('utf-8') + return fid.read(n).split(b"\x00")[0].decode("utf-8") @fill_doc -def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', - slope='-', stimthresh=1, preload=False, stim_code='binary', - allow_unknown_format=False, standardize_names=False, - verbose=None): +def read_raw_kit( + input_fname, + mrk=None, + elp=None, + hsp=None, + stim=">", + slope="-", + stimthresh=1, + preload=False, + stim_code="binary", + allow_unknown_format=False, + standardize_names=False, + *, + bad_coils=(), + verbose=None, +) -> RawKIT: r"""Reader function for Ricoh/KIT conversion to FIF. Parameters @@ -821,6 +943,7 @@ def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s + %(kit_badcoils)s %(verbose)s Returns @@ -843,17 +966,35 @@ def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', If ``mrk``\, ``hsp`` or ``elp`` are :term:`array_like` inputs, then the numbers in xyz coordinates should be in units of meters. """ - return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp, - stim=stim, slope=slope, stimthresh=stimthresh, - preload=preload, stim_code=stim_code, - allow_unknown_format=allow_unknown_format, - standardize_names=standardize_names, verbose=verbose) + return RawKIT( + input_fname=input_fname, + mrk=mrk, + elp=elp, + hsp=hsp, + stim=stim, + slope=slope, + stimthresh=stimthresh, + preload=preload, + stim_code=stim_code, + allow_unknown_format=allow_unknown_format, + standardize_names=standardize_names, + bad_coils=bad_coils, + verbose=verbose, + ) @fill_doc -def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None, - hsp=None, allow_unknown_format=False, - standardize_names=False, verbose=None): +def read_epochs_kit( + input_fname, + events, + event_id=None, + mrk=None, + elp=None, + hsp=None, + allow_unknown_format=False, + standardize_names=False, + verbose=None, +) -> EpochsKIT: """Reader function for Ricoh/KIT epochs files. Parameters @@ -879,16 +1020,26 @@ def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None, Returns ------- - epochs : instance of Epochs + EpochsKIT : instance of BaseEpochs The epochs. + See Also + -------- + mne.Epochs : Documentation of attributes and methods. + Notes ----- .. versionadded:: 0.9.0 """ - epochs = EpochsKIT(input_fname=input_fname, events=events, - event_id=event_id, mrk=mrk, elp=elp, hsp=hsp, - allow_unknown_format=allow_unknown_format, - standardize_names=standardize_names, - verbose=verbose) + epochs = EpochsKIT( + input_fname=input_fname, + events=events, + event_id=event_id, + mrk=mrk, + elp=elp, + hsp=hsp, + allow_unknown_format=allow_unknown_format, + standardize_names=standardize_names, + verbose=verbose, + ) return epochs diff --git a/mne/io/kit/tests/__init__.py b/mne/io/kit/tests/__init__.py index 4f7fb08a01d..25a040855f1 100644 --- a/mne/io/kit/tests/__init__.py +++ b/mne/io/kit/tests/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + from pathlib import Path data_dir = Path(__file__).parent / "data" diff --git a/mne/io/kit/tests/test_coreg.py b/mne/io/kit/tests/test_coreg.py index 7c30a401507..2ffa7a1178a 100644 --- a/mne/io/kit/tests/test_coreg.py +++ b/mne/io/kit/tests/test_coreg.py @@ -1,15 +1,14 @@ -# Authors: Christian Brodbeck -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import pickle from pathlib import Path +import numpy as np import pytest from numpy.testing import assert_array_equal from mne.io.kit import read_mrk -from mne.io._digitization import _write_dig_points mrk_fname = Path(__file__).parent / "data" / "test_mrk.sqd" @@ -20,19 +19,17 @@ def test_io_mrk(tmp_path): # txt path = tmp_path / "mrk.txt" - _write_dig_points(path, pts) + with open(path, "wb") as fid: + fid.write(b"%% %d 3D points, x y z per line\n" % len(pts)) + np.savetxt(fid, pts, delimiter="\t", newline="\n") + pts_2 = read_mrk(path) assert_array_equal(pts, pts_2, "read/write mrk to text") - # pickle - fname = tmp_path / "mrk.pickled" - with open(fname, 'wb') as fid: - pickle.dump(dict(mrk=pts), fid) - pts_2 = read_mrk(fname) - assert_array_equal(pts_2, pts, "pickle mrk") - with open(fname, 'wb') as fid: - pickle.dump(dict(), fid) - pytest.raises(ValueError, read_mrk, fname) - # unsupported extension - pytest.raises(ValueError, read_mrk, "file.ext") + fname = tmp_path / "file.ext" + with pytest.raises(FileNotFoundError, match="does not exist"): + read_mrk(fname) + fname.write_text("") + with pytest.raises(ValueError, match="file extension"): + read_mrk(fname) diff --git a/mne/io/kit/tests/test_kit.py b/mne/io/kit/tests/test_kit.py index d3746012328..6bd8cb83e84 100644 --- a/mne/io/kit/tests/test_kit.py +++ b/mne/io/kit/tests/test_kit.py @@ -1,28 +1,32 @@ -# Author: Teon Brooks -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path import numpy as np -from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_equal, assert_allclose) import pytest -from scipy import linalg import scipy.io +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, +) +from scipy import linalg import mne -from mne import pick_types, Epochs, find_events, read_events +from mne import Epochs, find_events, pick_types, read_events +from mne._fiff.constants import FIFF from mne.datasets.testing import requires_testing_data -from mne.transforms import apply_trans -from mne.utils import assert_dig_allclose -from mne.io import read_raw_fif, read_raw_kit, read_epochs_kit -from mne.io.constants import FIFF -from mne.io.kit.kit import get_kit_info -from mne.io.kit.coreg import read_sns +from mne.io import read_epochs_kit, read_raw_fif, read_raw_kit from mne.io.kit.constants import KIT +from mne.io.kit.coreg import read_sns +from mne.io.kit.kit import get_kit_info from mne.io.tests.test_raw import _test_raw_reader from mne.surface import _get_ico_surface +from mne.transforms import apply_trans +from mne.utils import assert_dig_allclose data_dir = Path(__file__).parent / "data" sqd_path = data_dir / "test.sqd" @@ -41,9 +45,7 @@ sqd_as_path = data_path / "KIT" / "test_as-raw.con" yokogawa_path = data_path / "KIT" / "ArtificalSignalData_Yokogawa_1khz.con" ricoh_path = data_path / "KIT" / "ArtificalSignalData_RICOH_1khz.con" -ricoh_systems_paths = [ - data_path / "KIT" / "Example_PQA160C_1001-export_anonymyze.con" -] +ricoh_systems_paths = [data_path / "KIT" / "Example_PQA160C_1001-export_anonymyze.con"] ricoh_systems_paths += [ data_path / "KIT" / "Example_RICOH160-1_10020-export_anonymyze.con" ] @@ -62,92 +64,134 @@ def test_data(tmp_path): pytest.raises(TypeError, read_raw_kit, epochs_path) pytest.raises(TypeError, read_epochs_kit, sqd_path) pytest.raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_txt_path) - pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None, - list(range(200, 190, -1))) - pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None, - list(range(167, 159, -1)), '*', 1, True) + pytest.raises( + ValueError, read_raw_kit, sqd_path, None, None, None, list(range(200, 190, -1)) + ) + pytest.raises( + ValueError, + read_raw_kit, + sqd_path, + None, + None, + None, + list(range(167, 159, -1)), + "*", + 1, + True, + ) # check functionality - raw_mrk = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_txt_path, - hsp_txt_path) - assert raw_mrk.info['description'] == \ - 'NYU 160ch System since Jan24 2009 (34) V2R004 EQ1160C' - raw_py = _test_raw_reader(read_raw_kit, input_fname=sqd_path, mrk=mrk_path, - elp=elp_txt_path, hsp=hsp_txt_path, - stim=list(range(167, 159, -1)), slope='+', - stimthresh=1) - assert 'RawKIT' in repr(raw_py) - assert_equal(raw_mrk.info['kit_system_id'], KIT.SYSTEM_NYU_2010) + raw_mrk = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_txt_path, hsp_txt_path) + assert ( + raw_mrk.info["description"] + == "NYU 160ch System since Jan24 2009 (34) V2R004 EQ1160C" # noqa: E501 + ) + raw_py = _test_raw_reader( + read_raw_kit, + input_fname=sqd_path, + mrk=mrk_path, + elp=elp_txt_path, + hsp=hsp_txt_path, + stim=list(range(167, 159, -1)), + slope="+", + stimthresh=1, + ) + assert "RawKIT" in repr(raw_py) + assert_equal(raw_mrk.info["kit_system_id"], KIT.SYSTEM_NYU_2010) # check number/kind of channels - assert_equal(len(raw_py.info['chs']), 193) - kit_channels = (('kind', {FIFF.FIFFV_MEG_CH: 157, FIFF.FIFFV_REF_MEG_CH: 3, - FIFF.FIFFV_MISC_CH: 32, FIFF.FIFFV_STIM_CH: 1}), - ('coil_type', {FIFF.FIFFV_COIL_KIT_GRAD: 157, - FIFF.FIFFV_COIL_KIT_REF_MAG: 3, - FIFF.FIFFV_COIL_NONE: 33})) + assert_equal(len(raw_py.info["chs"]), 193) + kit_channels = ( + ( + "kind", + { + FIFF.FIFFV_MEG_CH: 157, + FIFF.FIFFV_REF_MEG_CH: 3, + FIFF.FIFFV_MISC_CH: 32, + FIFF.FIFFV_STIM_CH: 1, + }, + ), + ( + "coil_type", + { + FIFF.FIFFV_COIL_KIT_GRAD: 157, + FIFF.FIFFV_COIL_KIT_REF_MAG: 3, + FIFF.FIFFV_COIL_NONE: 33, + }, + ), + ) for label, target in kit_channels: - actual = {id_: sum(ch[label] == id_ for ch in raw_py.info['chs']) for - id_ in target.keys()} + actual = { + id_: sum(ch[label] == id_ for ch in raw_py.info["chs"]) + for id_ in target.keys() + } assert_equal(actual, target) # Test stim channel - raw_stim = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path, - stim='<', preload=False) + raw_stim = read_raw_kit( + sqd_path, mrk_path, elp_txt_path, hsp_txt_path, stim="<", preload=False + ) for raw in [raw_py, raw_stim, raw_mrk]: - stim_pick = pick_types(raw.info, meg=False, ref_meg=False, - stim=True, exclude='bads') + stim_pick = pick_types( + raw.info, meg=False, ref_meg=False, stim=True, exclude="bads" + ) stim1, _ = raw[stim_pick] stim2 = np.array(raw.read_stim_ch(), ndmin=2) assert_array_equal(stim1, stim2) # Binary file only stores the sensor channels - py_picks = pick_types(raw_py.info, meg=True, exclude='bads') + py_picks = pick_types(raw_py.info, meg=True, exclude="bads") raw_bin = data_dir / "test_bin_raw.fif" raw_bin = read_raw_fif(raw_bin, preload=True) - bin_picks = pick_types(raw_bin.info, meg=True, stim=True, exclude='bads') + bin_picks = pick_types(raw_bin.info, meg=True, stim=True, exclude="bads") data_bin, _ = raw_bin[bin_picks] data_py, _ = raw_py[py_picks] # this .mat was generated using the Yokogawa MEG Reader data_Ykgw = data_dir / "test_Ykgw.mat" - data_Ykgw = scipy.io.loadmat(data_Ykgw)['data'] + data_Ykgw = scipy.io.loadmat(data_Ykgw)["data"] data_Ykgw = data_Ykgw[py_picks] assert_array_almost_equal(data_py, data_Ykgw) - py_picks = pick_types(raw_py.info, meg=True, stim=True, ref_meg=False, - exclude='bads') + py_picks = pick_types( + raw_py.info, meg=True, stim=True, ref_meg=False, exclude="bads" + ) data_py, _ = raw_py[py_picks] assert_array_almost_equal(data_py, data_bin) # KIT-UMD data - _test_raw_reader(read_raw_kit, input_fname=sqd_umd_path, test_rank='less') + _test_raw_reader(read_raw_kit, input_fname=sqd_umd_path, test_rank="less") raw = read_raw_kit(sqd_umd_path) - assert raw.info['description'] == \ - 'University of Maryland/Kanazawa Institute of Technology/160-channel MEG System (53) V2R004 PQ1160R' # noqa: E501 - assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_UMD_2014_12) + assert ( + raw.info["description"] + == "University of Maryland/Kanazawa Institute of Technology/160-channel MEG System (53) V2R004 PQ1160R" # noqa: E501 + ) + assert_equal(raw.info["kit_system_id"], KIT.SYSTEM_UMD_2014_12) # check number/kind of channels - assert_equal(len(raw.info['chs']), 193) + assert_equal(len(raw.info["chs"]), 193) for label, target in kit_channels: - actual = {id_: sum(ch[label] == id_ for ch in raw.info['chs']) for - id_ in target.keys()} + actual = { + id_: sum(ch[label] == id_ for ch in raw.info["chs"]) + for id_ in target.keys() + } assert_equal(actual, target) # KIT Academia Sinica - raw = read_raw_kit(sqd_as_path, slope='+') - assert raw.info['description'] == \ - 'Academia Sinica/Institute of Linguistics//Magnetoencephalograph System (261) V2R004 PQ1160R-N2' # noqa: E501 - assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_AS_2008) - assert_equal(raw.info['chs'][100]['ch_name'], 'MEG 101') - assert_equal(raw.info['chs'][100]['kind'], FIFF.FIFFV_MEG_CH) - assert_equal(raw.info['chs'][100]['coil_type'], FIFF.FIFFV_COIL_KIT_GRAD) - assert_equal(raw.info['chs'][157]['ch_name'], 'MEG 158') - assert_equal(raw.info['chs'][157]['kind'], FIFF.FIFFV_REF_MEG_CH) - assert_equal(raw.info['chs'][157]['coil_type'], - FIFF.FIFFV_COIL_KIT_REF_MAG) - assert_equal(raw.info['chs'][160]['ch_name'], 'EEG 001') - assert_equal(raw.info['chs'][160]['kind'], FIFF.FIFFV_EEG_CH) - assert_equal(raw.info['chs'][160]['coil_type'], FIFF.FIFFV_COIL_EEG) + raw = read_raw_kit(sqd_as_path, slope="+") + assert ( + raw.info["description"] + == "Academia Sinica/Institute of Linguistics//Magnetoencephalograph System (261) V2R004 PQ1160R-N2" # noqa: E501 + ) + assert_equal(raw.info["kit_system_id"], KIT.SYSTEM_AS_2008) + assert_equal(raw.info["chs"][100]["ch_name"], "MEG 101") + assert_equal(raw.info["chs"][100]["kind"], FIFF.FIFFV_MEG_CH) + assert_equal(raw.info["chs"][100]["coil_type"], FIFF.FIFFV_COIL_KIT_GRAD) + assert_equal(raw.info["chs"][157]["ch_name"], "MEG 158") + assert_equal(raw.info["chs"][157]["kind"], FIFF.FIFFV_REF_MEG_CH) + assert_equal(raw.info["chs"][157]["coil_type"], FIFF.FIFFV_COIL_KIT_REF_MAG) + assert_equal(raw.info["chs"][160]["ch_name"], "EEG 001") + assert_equal(raw.info["chs"][160]["kind"], FIFF.FIFFV_EEG_CH) + assert_equal(raw.info["chs"][160]["coil_type"], FIFF.FIFFV_COIL_EEG) assert_array_equal(find_events(raw), [[91, 0, 2]]) @@ -156,21 +200,21 @@ def test_unknown_format(tmp_path): """Test our warning about an unknown format.""" fname = tmp_path / ricoh_path.name _, kit_info = get_kit_info(ricoh_path, allow_unknown_format=False) - n_before = kit_info['dirs'][KIT.DIR_INDEX_SYSTEM]['offset'] - with open(fname, 'wb') as fout: - with open(ricoh_path, 'rb') as fin: + n_before = kit_info["dirs"][KIT.DIR_INDEX_SYSTEM]["offset"] + with open(fname, "wb") as fout: + with open(ricoh_path, "rb") as fin: fout.write(fin.read(n_before)) - version, revision = np.fromfile(fin, ' 2 # good version = 1 # bad - np.array([version, revision], ' 5000 # should have similar size, distance from center - dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1)) - dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1)) + dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0)) ** 2, axis=1)) + dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0)) ** 2, axis=1)) hsp_rad = np.mean(dist) hsp_dec_rad = np.mean(dist_dec) assert_array_almost_equal(hsp_rad, hsp_dec_rad, decimal=3) @requires_testing_data -@pytest.mark.parametrize('fname, desc, system_id', [ - (ricoh_systems_paths[0], - 'Meg160/Analysis (1001) V2R004 PQA160C', 1001), - (ricoh_systems_paths[1], - 'RICOH MEG System (10020) V3R000 RICOH160-1', 10020), - (ricoh_systems_paths[2], - 'RICOH MEG System (10021) V3R000 RICOH160-1', 10021), - (ricoh_systems_paths[3], - 'Yokogawa Electric Corporation/MEG device for infants/151-channel MEG ' - 'System (903) V2R004 PQ1151R', 903), -]) +@pytest.mark.parametrize( + "fname, desc, system_id", + [ + (ricoh_systems_paths[0], "Meg160/Analysis (1001) V2R004 PQA160C", 1001), + (ricoh_systems_paths[1], "RICOH MEG System (10020) V3R000 RICOH160-1", 10020), + (ricoh_systems_paths[2], "RICOH MEG System (10021) V3R000 RICOH160-1", 10021), + ( + ricoh_systems_paths[3], + "Yokogawa Electric Corporation/MEG device for infants/151-channel MEG " + "System (903) V2R004 PQ1151R", + 903, + ), + ], +) def test_raw_system_id(fname, desc, system_id): """Test reading basics and system IDs.""" raw = _test_raw_reader(read_raw_kit, input_fname=fname) - assert raw.info['description'] == desc - assert raw.info['kit_system_id'] == system_id + assert raw.info["description"] == desc + assert raw.info["kit_system_id"] == system_id @requires_testing_data @@ -374,13 +437,16 @@ def test_berlin(): """Test data from Berlin.""" # gh-8535 raw = read_raw_kit(berlin_path) - assert raw.info['description'] == 'Physikalisch Technische Bundesanstalt, Berlin/128-channel MEG System (124) V2R004 PQ1128R-N2' # noqa: E501 - assert raw.info['kit_system_id'] == 124 - assert raw.info['highpass'] == 0. - assert raw.info['lowpass'] == 200. - assert raw.info['sfreq'] == 500. - n = int(round(28.77 * raw.info['sfreq'])) - meg = raw.get_data('MEG 003', n, n + 1)[0, 0] + assert ( + raw.info["description"] + == "Physikalisch Technische Bundesanstalt, Berlin/128-channel MEG System (124) V2R004 PQ1128R-N2" # noqa: E501 + ) + assert raw.info["kit_system_id"] == 124 + assert raw.info["highpass"] == 0.0 + assert raw.info["lowpass"] == 200.0 + assert raw.info["sfreq"] == 500.0 + n = int(round(28.77 * raw.info["sfreq"])) + meg = raw.get_data("MEG 003", n, n + 1)[0, 0] assert_allclose(meg, -8.89e-12, rtol=1e-3) - eeg = raw.get_data('E14', n, n + 1)[0, 0] + eeg = raw.get_data("E14", n, n + 1)[0, 0] assert_allclose(eeg, -2.55, rtol=1e-3) diff --git a/mne/io/matrix.py b/mne/io/matrix.py deleted file mode 100644 index 4da12b8506f..00000000000 --- a/mne/io/matrix.py +++ /dev/null @@ -1,128 +0,0 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# -# License: BSD-3-Clause - -from .constants import FIFF -from .tag import find_tag, has_tag -from .write import (write_int, start_block, end_block, write_float_matrix, - write_name_list) -from ..utils import logger - - -def _transpose_named_matrix(mat): - """Transpose mat inplace (no copy).""" - mat['nrow'], mat['ncol'] = mat['ncol'], mat['nrow'] - mat['row_names'], mat['col_names'] = mat['col_names'], mat['row_names'] - mat['data'] = mat['data'].T - - -def _read_named_matrix(fid, node, matkind, indent=' ', transpose=False): - """Read named matrix from the given node. - - Parameters - ---------- - fid : file - The opened file descriptor. - node : dict - The node in the tree. - matkind : int - The type of matrix. - transpose : bool - If True, transpose the matrix. Default is False. - %(verbose)s - - Returns - ------- - mat: dict - The matrix data - """ - # Descend one level if necessary - if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX: - for k in range(node['nchild']): - if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX: - if has_tag(node['children'][k], matkind): - node = node['children'][k] - break - else: - logger.info(indent + 'Desired named matrix (kind = %d) not ' - 'available' % matkind) - return None - else: - if not has_tag(node, matkind): - logger.info(indent + 'Desired named matrix (kind = %d) not ' - 'available' % matkind) - return None - - # Read everything we need - tag = find_tag(fid, node, matkind) - if tag is None: - raise ValueError('Matrix data missing') - else: - data = tag.data - - nrow, ncol = data.shape - tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW) - if tag is not None and tag.data != nrow: - raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW ' - 'tag do not match') - - tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL) - if tag is not None and tag.data != ncol: - raise ValueError('Number of columns in matrix data and ' - 'FIFF_MNE_NCOL tag do not match') - - tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES) - row_names = tag.data.split(':') if tag is not None else [] - - tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES) - col_names = tag.data.split(':') if tag is not None else [] - - mat = dict(nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names, - data=data) - if transpose: - _transpose_named_matrix(mat) - return mat - - -def write_named_matrix(fid, kind, mat): - """Write named matrix from the given node. - - Parameters - ---------- - fid : file - The opened file descriptor. - kind : int - The kind of the matrix. - matkind : int - The type of matrix. - """ - # let's save ourselves from disaster - n_tot = mat['nrow'] * mat['ncol'] - if mat['data'].size != n_tot: - ratio = n_tot / float(mat['data'].size) - if n_tot < mat['data'].size and ratio > 0: - ratio = 1 / ratio - raise ValueError('Cannot write matrix: row (%i) and column (%i) ' - 'total element (%i) mismatch with data size (%i), ' - 'appears to be off by a factor of %gx' - % (mat['nrow'], mat['ncol'], n_tot, - mat['data'].size, ratio)) - start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) - write_int(fid, FIFF.FIFF_MNE_NROW, mat['nrow']) - write_int(fid, FIFF.FIFF_MNE_NCOL, mat['ncol']) - - if len(mat['row_names']) > 0: - # let's prevent unintentional stupidity - if len(mat['row_names']) != mat['nrow']: - raise ValueError('len(mat["row_names"]) != mat["nrow"]') - write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat['row_names']) - - if len(mat['col_names']) > 0: - # let's prevent unintentional stupidity - if len(mat['col_names']) != mat['ncol']: - raise ValueError('len(mat["col_names"]) != mat["ncol"]') - write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat['col_names']) - - write_float_matrix(fid, kind, mat['data']) - end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py deleted file mode 100644 index 20b795ab478..00000000000 --- a/mne/io/meas_info.py +++ /dev/null @@ -1,2943 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Teon Brooks -# Stefan Appelhoff -# -# License: BSD-3-Clause - -from collections import Counter, OrderedDict -from collections.abc import Mapping -import contextlib -from copy import deepcopy -import datetime -from io import BytesIO -import operator -from textwrap import shorten -import string - -import numpy as np - -from .pick import (channel_type, _get_channel_types, - get_channel_type_constants, pick_types, _contains_ch_type) -from .constants import FIFF, _coord_frame_named -from .open import fiff_open -from .tree import dir_tree_find -from .tag import (read_tag, find_tag, _ch_coord_dict, _update_ch_info_named, - _rename_list) -from .proj import (_read_proj, _write_proj, _uniquify_projs, _normalize_proj, - _proj_equal, Projection) -from .ctf_comp import _read_ctf_comp, write_ctf_comp -from .write import (start_and_end_file, start_block, end_block, - write_string, write_dig_points, write_float, write_int, - write_coord_trans, write_ch_info, - write_julian, write_float_matrix, write_id, DATE_NONE, - _safe_name_list, write_name_list_sanitized) -from .proc_history import _read_proc_history, _write_proc_history -from ..transforms import (invert_transform, Transform, _coord_frame_name, - _ensure_trans, _frame_to_str) -from ..utils import (logger, verbose, warn, object_diff, _validate_type, - _stamp_to_dt, _dt_to_stamp, _pl, _is_numeric, - _check_option, _on_missing, _check_on_missing, fill_doc, - _check_fname, repr_html) -from ._digitization import (_format_dig_points, _dig_kind_proper, DigPoint, - _dig_kind_rev, _dig_kind_ints, _read_dig_fif) -from ._digitization import write_dig, _get_data_as_dict_from_dig -from .compensator import get_current_comp -from ..defaults import _handle_default - - -b = bytes # alias - -_SCALAR_CH_KEYS = ('scanno', 'logno', 'kind', 'range', 'cal', 'coil_type', - 'unit', 'unit_mul', 'coord_frame') -_ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ('loc', 'ch_name')) -# XXX we need to require these except when doing simplify_info -_MIN_CH_KEYS_SET = set(('kind', 'cal', 'unit', 'loc', 'ch_name')) - - -def _get_valid_units(): - """Get valid units according to the International System of Units (SI). - - The International System of Units (SI, :footcite:`WikipediaSI`) is the - default system for describing units in the Brain Imaging Data Structure - (BIDS). For more information, see the BIDS specification - :footcite:`BIDSdocs` and the appendix "Units" therein. - - References - ---------- - .. footbibliography:: - """ - valid_prefix_names = ['yocto', 'zepto', 'atto', 'femto', 'pico', 'nano', - 'micro', 'milli', 'centi', 'deci', 'deca', 'hecto', - 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', - 'zetta', 'yotta'] - valid_prefix_symbols = ['y', 'z', 'a', 'f', 'p', 'n', u'µ', 'm', 'c', 'd', - 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] - valid_unit_names = ['metre', 'kilogram', 'second', 'ampere', 'kelvin', - 'mole', 'candela', 'radian', 'steradian', 'hertz', - 'newton', 'pascal', 'joule', 'watt', 'coulomb', 'volt', - 'farad', 'ohm', 'siemens', 'weber', 'tesla', 'henry', - 'degree Celsius', 'lumen', 'lux', 'becquerel', 'gray', - 'sievert', 'katal'] - valid_unit_symbols = ['m', 'kg', 's', 'A', 'K', 'mol', 'cd', 'rad', 'sr', - 'Hz', 'N', 'Pa', 'J', 'W', 'C', 'V', 'F', u'Ω', 'S', - 'Wb', 'T', 'H', u'°C', 'lm', 'lx', 'Bq', 'Gy', 'Sv', - 'kat'] - - # Valid units are all possible combinations of either prefix name or prefix - # symbol together with either unit name or unit symbol. E.g., nV for - # nanovolt - valid_units = [] - valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names - for unit in valid_unit_names]) - valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names - for unit in valid_unit_symbols]) - valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols - for unit in valid_unit_names]) - valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols - for unit in valid_unit_symbols]) - - # units are also valid without a prefix - valid_units += valid_unit_names - valid_units += valid_unit_symbols - - # we also accept "n/a" as a unit, which is the default missing value in - # BIDS - valid_units += ["n/a"] - - return tuple(valid_units) - - -@verbose -def _unique_channel_names(ch_names, max_length=None, verbose=None): - """Ensure unique channel names.""" - suffixes = tuple(string.ascii_lowercase) - if max_length is not None: - ch_names[:] = [name[:max_length] for name in ch_names] - unique_ids = np.unique(ch_names, return_index=True)[1] - if len(unique_ids) != len(ch_names): - dups = {ch_names[x] - for x in np.setdiff1d(range(len(ch_names)), unique_ids)} - warn('Channel names are not unique, found duplicates for: ' - '%s. Applying running numbers for duplicates.' % dups) - for ch_stem in dups: - overlaps = np.where(np.array(ch_names) == ch_stem)[0] - # We need an extra character since we append '-'. - # np.ceil(...) is the maximum number of appended digits. - if max_length is not None: - n_keep = ( - max_length - 1 - int(np.ceil(np.log10(len(overlaps))))) - else: - n_keep = np.inf - n_keep = min(len(ch_stem), n_keep) - ch_stem = ch_stem[:n_keep] - for idx, ch_idx in enumerate(overlaps): - # try idx first, then loop through lower case chars - for suffix in (idx,) + suffixes: - ch_name = ch_stem + '-%s' % suffix - if ch_name not in ch_names: - break - if ch_name not in ch_names: - ch_names[ch_idx] = ch_name - else: - raise ValueError('Adding a single alphanumeric for a ' - 'duplicate resulted in another ' - 'duplicate name %s' % ch_name) - return ch_names - - -class MontageMixin(object): - """Mixin for Montage getting and setting.""" - - @fill_doc - def get_montage(self): - """Get a DigMontage from instance. - - Returns - ------- - %(montage)s - """ - from ..channels.montage import make_dig_montage - info = self if isinstance(self, Info) else self.info - if info['dig'] is None: - return None - # obtain coord_frame, and landmark coords - # (nasion, lpa, rpa, hsp, hpi) from DigPoints - montage_bunch = _get_data_as_dict_from_dig(info['dig']) - coord_frame = _frame_to_str.get(montage_bunch.coord_frame) - - # get the channel names and chs data structure - ch_names, chs = info['ch_names'], info['chs'] - picks = pick_types(info, meg=False, eeg=True, seeg=True, - ecog=True, dbs=True, fnirs=True, exclude=[]) - - # channel positions from dig do not match ch_names one to one, - # so use loc[:3] instead - ch_pos = {ch_names[ii]: chs[ii]['loc'][:3] for ii in picks} - - # fNIRS uses multiple channels for the same sensors, we use - # a private function to format these for dig montage. - fnirs_picks = pick_types(info, fnirs=True, exclude=[]) - if len(ch_pos) == len(fnirs_picks): - ch_pos = _get_fnirs_ch_pos(info) - elif len(fnirs_picks) > 0: - raise ValueError("MNE does not support getting the montage " - "for a mix of fNIRS and other data types. " - "Please raise a GitHub issue if you " - "require this feature.") - - # create montage - montage = make_dig_montage( - ch_pos=ch_pos, - coord_frame=coord_frame, - nasion=montage_bunch.nasion, - lpa=montage_bunch.lpa, - rpa=montage_bunch.rpa, - hsp=montage_bunch.hsp, - hpi=montage_bunch.hpi, - ) - return montage - - @verbose - def set_montage(self, montage, match_case=True, match_alias=False, - on_missing='raise', verbose=None): - """Set %(montage_types)s channel positions and digitization points. - - Parameters - ---------- - %(montage)s - %(match_case)s - %(match_alias)s - %(on_missing_montage)s - %(verbose)s - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - The instance, modified in-place. - - See Also - -------- - mne.channels.make_standard_montage - mne.channels.make_dig_montage - mne.channels.read_custom_montage - - Notes - ----- - .. warning:: - Only %(montage_types)s channels can have their positions set using - a montage. Other channel types (e.g., MEG channels) should have - their positions defined properly using their data reading - functions. - """ - # How to set up a montage to old named fif file (walk through example) - # https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df - - from ..channels.montage import _set_montage - info = self if isinstance(self, Info) else self.info - _set_montage(info, montage, match_case, match_alias, on_missing) - return self - - -class ContainsMixin(object): - """Mixin class for Raw, Evoked, Epochs and Info.""" - - def __contains__(self, ch_type): - """Check channel type membership. - - Parameters - ---------- - ch_type : str - Channel type to check for. Can be e.g. ``'meg'``, ``'eeg'``, - ``'stim'``, etc. - - Returns - ------- - in : bool - Whether or not the instance contains the given channel type. - - Examples - -------- - Channel type membership can be tested as:: - - >>> 'meg' in inst # doctest: +SKIP - True - >>> 'seeg' in inst # doctest: +SKIP - False - - """ - info = self if isinstance(self, Info) else self.info - if ch_type == 'meg': - has_ch_type = (_contains_ch_type(info, 'mag') or - _contains_ch_type(info, 'grad')) - else: - has_ch_type = _contains_ch_type(info, ch_type) - return has_ch_type - - @property - def compensation_grade(self): - """The current gradient compensation grade.""" - info = self if isinstance(self, Info) else self.info - return get_current_comp(info) - - @fill_doc - def get_channel_types(self, picks=None, unique=False, only_data_chs=False): - """Get a list of channel type for each channel. - - Parameters - ---------- - %(picks_all)s - unique : bool - Whether to return only unique channel types. Default is ``False``. - only_data_chs : bool - Whether to ignore non-data channels. Default is ``False``. - - Returns - ------- - channel_types : list - The channel types. - """ - info = self if isinstance(self, Info) else self.info - return _get_channel_types(info, picks=picks, unique=unique, - only_data_chs=only_data_chs) - - -def _format_trans(obj, key): - try: - t = obj[key] - except KeyError: - pass - else: - if t is not None: - obj[key] = Transform(t['from'], t['to'], t['trans']) - - -def _check_ch_keys(ch, ci, name='info["chs"]', check_min=True): - ch_keys = set(ch) - bad = sorted(ch_keys.difference(_ALL_CH_KEYS_SET)) - if bad: - raise KeyError( - f'key{_pl(bad)} errantly present for {name}[{ci}]: {bad}') - if check_min: - bad = sorted(_MIN_CH_KEYS_SET.difference(ch_keys)) - if bad: - raise KeyError( - f'key{_pl(bad)} missing for {name}[{ci}]: {bad}',) - - -# As options are added here, test_meas_info.py:test_info_bad should be updated -def _check_bads(bads): - _validate_type(bads, list, 'bads') - return bads - - -def _check_description(description): - _validate_type(description, (None, str), "info['description']") - return description - - -def _check_dev_head_t(dev_head_t): - _validate_type(dev_head_t, (Transform, None), "info['dev_head_t']") - if dev_head_t is not None: - dev_head_t = _ensure_trans(dev_head_t, 'meg', 'head') - return dev_head_t - - -def _check_experimenter(experimenter): - _validate_type(experimenter, (None, str), 'experimenter') - return experimenter - - -def _check_line_freq(line_freq): - _validate_type(line_freq, (None, 'numeric'), 'line_freq') - line_freq = float(line_freq) if line_freq is not None else line_freq - return line_freq - - -def _check_subject_info(subject_info): - _validate_type(subject_info, (None, dict), 'subject_info') - return subject_info - - -def _check_device_info(device_info): - _validate_type(device_info, (None, dict, ), 'device_info') - return device_info - - -def _check_helium_info(helium_info): - _validate_type(helium_info, (None, dict, ), 'helium_info') - return helium_info - - -class Info(dict, MontageMixin, ContainsMixin): - """Measurement information. - - This data structure behaves like a dictionary. It contains all metadata - that is available for a recording. However, its keys are restricted to - those provided by the - `FIF format specification `__, - so new entries should not be manually added. - - .. note:: - This class should not be instantiated directly via - ``mne.Info(...)``. Instead, use :func:`mne.create_info` to create - measurement information from scratch. - - .. warning:: - The only entries that should be manually changed by the user are: - ``info['bads']``, ``info['description']``, ``info['device_info']`` - ``info['dev_head_t']``, ``info['experimenter']``, - ``info['helium_info']``, ``info['line_freq']``, ``info['temp']``, - and ``info['subject_info']``. - - All other entries should be considered read-only, though they can be - modified by various MNE-Python functions or methods (which have - safeguards to ensure all fields remain in sync). - - Parameters - ---------- - *args : list - Arguments. - **kwargs : dict - Keyword arguments. - - Attributes - ---------- - acq_pars : str | None - MEG system acquisition parameters. - See :class:`mne.AcqParserFIF` for details. - acq_stim : str | None - MEG system stimulus parameters. - bads : list of str - List of bad (noisy/broken) channels, by name. These channels will by - default be ignored by many processing steps. - ch_names : list of str - The names of the channels. - chs : list of dict - A list of channel information dictionaries, one per channel. - See Notes for more information. - command_line : str - Contains the command and arguments used to create the source space - (used for source estimation). - comps : list of dict - CTF software gradient compensation data. - See Notes for more information. - ctf_head_t : Transform | None - The transformation from 4D/CTF head coordinates to Neuromag head - coordinates. This is only present in 4D/CTF data. - custom_ref_applied : int - Whether a custom (=other than average) reference has been applied to - the EEG data. This flag is checked by some algorithms that require an - average reference to be set. - description : str | None - String description of the recording. - dev_ctf_t : Transform | None - The transformation from device coordinates to 4D/CTF head coordinates. - This is only present in 4D/CTF data. - dev_head_t : Transform | None - The device to head transformation. - device_info : dict | None - Information about the acquisition device. See Notes for details. - - .. versionadded:: 0.19 - dig : list of dict | None - The Polhemus digitization data in head coordinates. - See Notes for more information. - events : list of dict - Event list, sometimes extracted from the stim channels by Neuromag - systems. In general this should not be used and - :func:`mne.find_events` should be used for event processing. - See Notes for more information. - experimenter : str | None - Name of the person that ran the experiment. - file_id : dict | None - The FIF globally unique ID. See Notes for more information. - gantry_angle : float | None - Tilt angle of the gantry in degrees. - helium_info : dict | None - Information about the device helium. See Notes for details. - - .. versionadded:: 0.19 - highpass : float - Highpass corner frequency in Hertz. Zero indicates a DC recording. - hpi_meas : list of dict - HPI measurements that were taken at the start of the recording - (e.g. coil frequencies). - See Notes for details. - hpi_results : list of dict - Head position indicator (HPI) digitization points and fit information - (e.g., the resulting transform). - See Notes for details. - hpi_subsystem : dict | None - Information about the HPI subsystem that was used (e.g., event - channel used for cHPI measurements). - See Notes for details. - kit_system_id : int - Identifies the KIT system. - line_freq : float | None - Frequency of the power line in Hertz. - lowpass : float - Lowpass corner frequency in Hertz. - It is automatically set to half the sampling rate if there is - otherwise no low-pass applied to the data. - maxshield : bool - True if active shielding (IAS) was active during recording. - meas_date : datetime - The time (UTC) of the recording. - - .. versionchanged:: 0.20 - This is stored as a :class:`~python:datetime.datetime` object - instead of a tuple of seconds/microseconds. - meas_file : str | None - Raw measurement file (used for source estimation). - meas_id : dict | None - The ID assigned to this measurement by the acquisition system or - during file conversion. Follows the same format as ``file_id``. - mri_file : str | None - File containing the MRI to head transformation (used for source - estimation). - mri_head_t : dict | None - Transformation from MRI to head coordinates (used for source - estimation). - mri_id : dict | None - MRI unique ID (used for source estimation). - nchan : int - Number of channels. - proc_history : list of dict - The MaxFilter processing history. - See Notes for details. - proj_id : int | None - ID number of the project the experiment belongs to. - proj_name : str | None - Name of the project the experiment belongs to. - projs : list of Projection - List of SSP operators that operate on the data. - See :class:`mne.Projection` for details. - sfreq : float - Sampling frequency in Hertz. - subject_info : dict | None - Information about the subject. - See Notes for details. - temp : object | None - Can be used to store temporary objects in an Info instance. It will not - survive an I/O roundtrip. - - .. versionadded:: 0.24 - utc_offset : str - "UTC offset of related meas_date (sHH:MM). - - .. versionadded:: 0.19 - working_dir : str - Working directory used when the source space was created (used for - source estimation). - xplotter_layout : str - Layout of the Xplotter (Neuromag system only). - - See Also - -------- - mne.create_info - - Notes - ----- - The following parameters have a nested structure. - - * ``chs`` list of dict: - - cal : float - The calibration factor to bring the channels to physical - units. Used in product with ``range`` to scale the data read - from disk. - ch_name : str - The channel name. - coil_type : int - Coil type, e.g. ``FIFFV_COIL_MEG``. - coord_frame : int - The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. - kind : int - The kind of channel, e.g. ``FIFFV_EEG_CH``. - loc : array, shape (12,) - Channel location. For MEG this is the position plus the - normal given by a 3x3 rotation matrix. For EEG this is the - position followed by reference position (with 6 unused). - The values are specified in device coordinates for MEG and in - head coordinates for EEG channels, respectively. - logno : int - Logical channel number, conventions in the usage of this - number vary. - range : float - The hardware-oriented part of the calibration factor. - This should be only applied to the continuous raw data. - Used in product with ``cal`` to scale data read from disk. - scanno : int - Scanning order number, starting from 1. - unit : int - The unit to use, e.g. ``FIFF_UNIT_T_M``. - unit_mul : int - Unit multipliers, most commonly ``FIFF_UNITM_NONE``. - - * ``comps`` list of dict: - - ctfkind : int - CTF compensation grade. - colcals : ndarray - Column calibrations. - mat : dict - A named matrix dictionary (with entries "data", "col_names", etc.) - containing the compensation matrix. - rowcals : ndarray - Row calibrations. - save_calibrated : bool - Were the compensation data saved in calibrated form. - - * ``device_info`` dict: - - type : str - Device type. - model : str - Device model. - serial : str - Device serial. - site : str - Device site. - - * ``dig`` list of dict: - - kind : int - The kind of channel, - e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. - r : array, shape (3,) - 3D position in m. and coord_frame. - ident : int - Number specifying the identity of the point. - e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, or - 42 if kind is ``FIFFV_POINT_EEG``. - coord_frame : int - The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. - - * ``events`` list of dict: - - channels : list of int - Channel indices for the events. - list : ndarray, shape (n_events * 3,) - Events in triplets as number of samples, before, after. - - * ``file_id`` dict: - - version : int - FIF format version, i.e. ``FIFFC_VERSION``. - machid : ndarray, shape (2,) - Unique machine ID, usually derived from the MAC address. - secs : int - Time in seconds. - usecs : int - Time in microseconds. - - * ``helium_info`` dict: - - he_level_raw : float - Helium level (%) before position correction. - helium_level : float - Helium level (%) after position correction. - orig_file_guid : str - Original file GUID. - meas_date : tuple of int - The helium level meas date. - - * ``hpi_meas`` list of dict: - - creator : str - Program that did the measurement. - sfreq : float - Sample rate. - nchan : int - Number of channels used. - nave : int - Number of averages used. - ncoil : int - Number of coils used. - first_samp : int - First sample used. - last_samp : int - Last sample used. - hpi_coils : list of dict - Coils, containing: - - number: int - Coil number - epoch : ndarray - Buffer containing one epoch and channel. - slopes : ndarray, shape (n_channels,) - HPI data. - corr_coeff : ndarray, shape (n_channels,) - HPI curve fit correlations. - coil_freq : float - HPI coil excitation frequency - - * ``hpi_results`` list of dict: - - dig_points : list - Digitization points (see ``dig`` definition) for the HPI coils. - order : ndarray, shape (ncoil,) - The determined digitization order. - used : ndarray, shape (nused,) - The indices of the used coils. - moments : ndarray, shape (ncoil, 3) - The coil moments. - goodness : ndarray, shape (ncoil,) - The goodness of fits. - good_limit : float - The goodness of fit limit. - dist_limit : float - The distance limit. - accept : int - Whether or not the fit was accepted. - coord_trans : instance of Transformation - The resulting MEG<->head transformation. - - * ``hpi_subsystem`` dict: - - ncoil : int - The number of coils. - event_channel : str - The event channel used to encode cHPI status (e.g., STI201). - hpi_coils : list of ndarray - List of length ``ncoil``, each 4-element ndarray contains the - event bits used on the event channel to indicate cHPI status - (using the first element of these arrays is typically - sufficient). - - * ``mri_id`` dict: - - version : int - FIF format version, i.e. ``FIFFC_VERSION``. - machid : ndarray, shape (2,) - Unique machine ID, usually derived from the MAC address. - secs : int - Time in seconds. - usecs : int - Time in microseconds. - - * ``proc_history`` list of dict: - - block_id : dict - See ``id`` above. - date : ndarray, shape (2,) - 2-element tuple of seconds and microseconds. - experimenter : str - Name of the person who ran the program. - creator : str - Program that did the processing. - max_info : dict - Maxwel filtering info, can contain: - - sss_info : dict - SSS processing information. - max_st - tSSS processing information. - sss_ctc : dict - Cross-talk processing information. - sss_cal : dict - Fine-calibration information. - smartshield : dict - MaxShield information. This dictionary is (always?) empty, - but its presence implies that MaxShield was used during - acquisition. - - * ``subject_info`` dict: - - id : int - Integer subject identifier. - his_id : str - String subject identifier. - last_name : str - Last name. - first_name : str - First name. - middle_name : str - Middle name. - birthday : tuple of int - Birthday in (year, month, day) format. - sex : int - Subject sex (0=unknown, 1=male, 2=female). - hand : int - Handedness (1=right, 2=left, 3=ambidextrous). - weight : float - Weight in kilograms. - height : float - Height in meters. - """ - - _attributes = { - 'acq_pars': 'acq_pars cannot be set directly. ' - 'See mne.AcqParserFIF() for details.', - 'acq_stim': 'acq_stim cannot be set directly.', - 'bads': _check_bads, - 'ch_names': 'ch_names cannot be set directly. ' - 'Please use methods inst.add_channels(), ' - 'inst.drop_channels(), inst.pick_channels(), ' - 'inst.rename_channels(), inst.reorder_channels() ' - 'and inst.set_channel_types() instead.', - 'chs': 'chs cannot be set directly. ' - 'Please use methods inst.add_channels(), ' - 'inst.drop_channels(), inst.pick_channels(), ' - 'inst.rename_channels(), inst.reorder_channels() ' - 'and inst.set_channel_types() instead.', - 'command_line': 'command_line cannot be set directly.', - 'comps': 'comps cannot be set directly. ' - 'Please use method Raw.apply_gradient_compensation() ' - 'instead.', - 'ctf_head_t': 'ctf_head_t cannot be set directly.', - 'custom_ref_applied': 'custom_ref_applied cannot be set directly. ' - 'Please use method inst.set_eeg_reference() ' - 'instead.', - 'description': _check_description, - 'dev_ctf_t': 'dev_ctf_t cannot be set directly.', - 'dev_head_t': _check_dev_head_t, - 'device_info': _check_device_info, - 'dig': 'dig cannot be set directly. ' - 'Please use method inst.set_montage() instead.', - 'events': 'events cannot be set directly.', - 'experimenter': _check_experimenter, - 'file_id': 'file_id cannot be set directly.', - 'gantry_angle': 'gantry_angle cannot be set directly.', - 'helium_info': _check_helium_info, - 'highpass': 'highpass cannot be set directly. ' - 'Please use method inst.filter() instead.', - 'hpi_meas': 'hpi_meas can not be set directly.', - 'hpi_results': 'hpi_results cannot be set directly.', - 'hpi_subsystem': 'hpi_subsystem cannot be set directly.', - 'kit_system_id': 'kit_system_id cannot be set directly.', - 'line_freq': _check_line_freq, - 'lowpass': 'lowpass cannot be set directly. ' - 'Please use method inst.filter() instead.', - 'maxshield': 'maxshield cannot be set directly.', - 'meas_date': 'meas_date cannot be set directly. ' - 'Please use method inst.set_meas_date() instead.', - 'meas_file': 'meas_file cannot be set directly.', - 'meas_id': 'meas_id cannot be set directly.', - 'mri_file': 'mri_file cannot be set directly.', - 'mri_head_t': 'mri_head_t cannot be set directly.', - 'mri_id': 'mri_id cannot be set directly.', - 'nchan': 'nchan cannot be set directly. ' - 'Please use methods inst.add_channels(), ' - 'inst.drop_channels(), and inst.pick_channels() instead.', - 'proc_history': 'proc_history cannot be set directly.', - 'proj_id': 'proj_id cannot be set directly.', - 'proj_name': 'proj_name cannot be set directly.', - 'projs': 'projs cannot be set directly. ' - 'Please use methods inst.add_proj() and inst.del_proj() ' - 'instead.', - 'sfreq': 'sfreq cannot be set directly. ' - 'Please use method inst.resample() instead.', - 'subject_info': _check_subject_info, - 'temp': lambda x: x, - 'utc_offset': 'utc_offset cannot be set directly.', - 'working_dir': 'working_dir cannot be set directly.', - 'xplotter_layout': 'xplotter_layout cannot be set directly.' - } - - def __init__(self, *args, **kwargs): - self._unlocked = True - super().__init__(*args, **kwargs) - # Deal with h5io writing things as dict - for key in ('dev_head_t', 'ctf_head_t', 'dev_ctf_t'): - _format_trans(self, key) - for res in self.get('hpi_results', []): - _format_trans(res, 'coord_trans') - if self.get('dig', None) is not None and len(self['dig']): - if isinstance(self['dig'], dict): # needs to be unpacked - self['dig'] = _dict_unpack(self['dig'], _DIG_CAST) - if not isinstance(self['dig'][0], DigPoint): - self['dig'] = _format_dig_points(self['dig']) - if isinstance(self.get('chs', None), dict): - self['chs']['ch_name'] = [str(x) for x in np.char.decode( - self['chs']['ch_name'], encoding='utf8')] - self['chs'] = _dict_unpack(self['chs'], _CH_CAST) - for pi, proj in enumerate(self.get('projs', [])): - if not isinstance(proj, Projection): - self['projs'][pi] = Projection(**proj) - # Old files could have meas_date as tuple instead of datetime - try: - meas_date = self['meas_date'] - except KeyError: - pass - else: - self['meas_date'] = _ensure_meas_date_none_or_dt(meas_date) - self._unlocked = False - - def __getstate__(self): - """Get state (for pickling).""" - return {'_unlocked': self._unlocked} - - def __setstate__(self, state): - """Set state (for pickling).""" - self._unlocked = state['_unlocked'] - - def __setitem__(self, key, val): - """Attribute setter.""" - # During unpickling, the _unlocked attribute has not been set, so - # let __setstate__ do it later and act unlocked now - unlocked = getattr(self, '_unlocked', True) - if key in self._attributes: - if isinstance(self._attributes[key], str): - if not unlocked: - raise RuntimeError(self._attributes[key]) - else: - val = self._attributes[key](val) # attribute checker function - else: - raise RuntimeError( - f"Info does not support directly setting the key {repr(key)}. " - "You can set info['temp'] to store temporary objects in an " - "Info instance, but these will not survive an I/O round-trip.") - super().__setitem__(key, val) - - def update(self, other=None, **kwargs): - """Update method using __setitem__().""" - iterable = other.items() if isinstance(other, Mapping) else other - if other is not None: - for key, val in iterable: - self[key] = val - for key, val in kwargs.items(): - self[key] = val - - @contextlib.contextmanager - def _unlock(self, *, update_redundant=False, check_after=False): - """Context manager unlocking access to attributes.""" - # needed for nested _unlock() - state = self._unlocked if hasattr(self, '_unlocked') else False - - self._unlocked = True - try: - yield - except Exception: - raise - else: - if update_redundant: - self._update_redundant() - if check_after: - self._check_consistency() - finally: - self._unlocked = state - - def copy(self): - """Copy the instance. - - Returns - ------- - info : instance of Info - The copied info. - """ - return deepcopy(self) - - def normalize_proj(self): - """(Re-)Normalize projection vectors after subselection. - - Applying projection after sub-selecting a set of channels that - were originally used to compute the original projection vectors - can be dangerous (e.g., if few channels remain, most power was - in channels that are no longer picked, etc.). By default, mne - will emit a warning when this is done. - - This function will re-normalize projectors to use only the - remaining channels, thus avoiding that warning. Only use this - function if you're confident that the projection vectors still - adequately capture the original signal of interest. - """ - _normalize_proj(self) - - def __repr__(self): - """Summarize info instead of printing all.""" - MAX_WIDTH = 68 - strs = [' 0: - entr = ('%d item%s (%s)' % (this_len, _pl(this_len), - type(v).__name__)) - else: - entr = '' - if entr != '': - non_empty += 1 - strs.append('%s: %s' % (k, entr)) - st = '\n '.join(sorted(strs)) - st += '\n>' - st %= non_empty - return st - - def __deepcopy__(self, memodict): - """Make a deepcopy.""" - result = Info.__new__(Info) - result._unlocked = True - for k, v in self.items(): - # chs is roughly half the time but most are immutable - if k == 'chs': - # dict shallow copy is fast, so use it then overwrite - result[k] = list() - for ch in v: - ch = ch.copy() # shallow - ch['loc'] = ch['loc'].copy() - result[k].append(ch) - elif k == 'ch_names': - # we know it's list of str, shallow okay and saves ~100 µs - result[k] = v.copy() - elif k == 'hpi_meas': - hms = list() - for hm in v: - hm = hm.copy() - # the only mutable thing here is some entries in coils - hm['hpi_coils'] = [coil.copy() for coil in hm['hpi_coils']] - # There is a *tiny* risk here that someone could write - # raw.info['hpi_meas'][0]['hpi_coils'][1]['epoch'] = ... - # and assume that info.copy() will make an actual copy, - # but copying these entries has a 2x slowdown penalty so - # probably not worth it for such a deep corner case: - # for coil in hpi_coils: - # for key in ('epoch', 'slopes', 'corr_coeff'): - # coil[key] = coil[key].copy() - hms.append(hm) - result[k] = hms - else: - result[k] = deepcopy(v, memodict) - result._unlocked = False - return result - - def _check_consistency(self, prepend_error=''): - """Do some self-consistency checks and datatype tweaks.""" - missing = [bad for bad in self['bads'] if bad not in self['ch_names']] - if len(missing) > 0: - msg = '%sbad channel(s) %s marked do not exist in info' - raise RuntimeError(msg % (prepend_error, missing,)) - meas_date = self.get('meas_date') - if meas_date is not None: - if (not isinstance(self['meas_date'], datetime.datetime) or - self['meas_date'].tzinfo is None or - self['meas_date'].tzinfo is not datetime.timezone.utc): - raise RuntimeError('%sinfo["meas_date"] must be a datetime ' - 'object in UTC or None, got %r' - % (prepend_error, repr(self['meas_date']),)) - - chs = [ch['ch_name'] for ch in self['chs']] - if len(self['ch_names']) != len(chs) or any( - ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \ - self['nchan'] != len(chs): - raise RuntimeError('%sinfo channel name inconsistency detected, ' - 'please notify mne-python developers' - % (prepend_error,)) - - # make sure we have the proper datatypes - with self._unlock(): - for key in ('sfreq', 'highpass', 'lowpass'): - if self.get(key) is not None: - self[key] = float(self[key]) - - for pi, proj in enumerate(self.get('projs', [])): - _validate_type(proj, Projection, f'info["projs"][{pi}]') - for key in ('kind', 'active', 'desc', 'data', 'explained_var'): - if key not in proj: - raise RuntimeError(f'Projection incomplete, missing {key}') - - # Ensure info['chs'] has immutable entries (copies much faster) - for ci, ch in enumerate(self['chs']): - _check_ch_keys(ch, ci) - ch_name = ch['ch_name'] - if not isinstance(ch_name, str): - raise TypeError( - 'Bad info: info["chs"][%d]["ch_name"] is not a string, ' - 'got type %s' % (ci, type(ch_name))) - for key in _SCALAR_CH_KEYS: - val = ch.get(key, 1) - if not _is_numeric(val): - raise TypeError( - 'Bad info: info["chs"][%d][%r] = %s is type %s, must ' - 'be float or int' % (ci, key, val, type(val))) - loc = ch['loc'] - if not (isinstance(loc, np.ndarray) and loc.shape == (12,)): - raise TypeError( - 'Bad info: info["chs"][%d]["loc"] must be ndarray with ' - '12 elements, got %r' % (ci, loc)) - - # make sure channel names are unique - with self._unlock(): - self['ch_names'] = _unique_channel_names(self['ch_names']) - for idx, ch_name in enumerate(self['ch_names']): - self['chs'][idx]['ch_name'] = ch_name - - def _update_redundant(self): - """Update the redundant entries.""" - with self._unlock(): - self['ch_names'] = [ch['ch_name'] for ch in self['chs']] - self['nchan'] = len(self['chs']) - - @property - def ch_names(self): - return self['ch_names'] - - def _get_chs_for_repr(self): - titles = _handle_default('titles') - - # good channels - channels = {} - ch_types = [channel_type(self, idx) for idx in range(len(self['chs']))] - ch_counts = Counter(ch_types) - for ch_type, count in ch_counts.items(): - if ch_type == 'meg': - channels['mag'] = len(pick_types(self, meg='mag')) - channels['grad'] = len(pick_types(self, meg='grad')) - elif ch_type == 'eog': - pick_eog = pick_types(self, eog=True) - eog = ', '.join( - np.array(self['ch_names'])[pick_eog]) - elif ch_type == 'ecg': - pick_ecg = pick_types(self, ecg=True) - ecg = ', '.join( - np.array(self['ch_names'])[pick_ecg]) - channels[ch_type] = count - - good_channels = ', '.join( - [f'{v} {titles.get(k, k.upper())}' for k, v in channels.items()]) - - if 'ecg' not in channels.keys(): - ecg = 'Not available' - if 'eog' not in channels.keys(): - eog = 'Not available' - - # bad channels - if len(self['bads']) > 0: - bad_channels = ', '.join(self['bads']) - else: - bad_channels = 'None' - - return good_channels, bad_channels, ecg, eog - - @repr_html - def _repr_html_(self, caption=None): - """Summarize info for HTML representation.""" - from ..html_templates import repr_templates_env - if isinstance(caption, str): - html = f'

    {caption}

    ' - else: - html = '' - - good_channels, bad_channels, ecg, eog = self._get_chs_for_repr() - - # TODO - # Most of the following checks are to ensure that we get a proper repr - # for Forward['info'] (and probably others like - # InverseOperator['info']??), which doesn't seem to follow our standard - # Info structure used elsewhere. - # Proposed solution for a future refactoring: - # Forward['info'] should get its own Info subclass (with respective - # repr). - - # meas date - meas_date = self.get('meas_date') - if meas_date is not None: - meas_date = meas_date.strftime("%B %d, %Y %H:%M:%S") + ' GMT' - - projs = self.get('projs') - if projs: - projs = [ - f'{p["desc"]} : {"on" if p["active"] else "off"}' - for p in self['projs'] - ] - else: - projs = None - - info_template = repr_templates_env.get_template('info.html.jinja') - return html + info_template.render( - caption=caption, - meas_date=meas_date, - projs=projs, - ecg=ecg, - eog=eog, - good_channels=good_channels, - bad_channels=bad_channels, - dig=self.get('dig'), - subject_info=self.get('subject_info'), - lowpass=self.get('lowpass'), - highpass=self.get('highpass'), - sfreq=self.get('sfreq'), - experimenter=self.get('experimenter'), - ) - - def save(self, fname): - """Write measurement info in fif file. - - Parameters - ---------- - fname : path-like - The name of the file. Should end by ``'-info.fif'``. - """ - write_info(fname, self) - - -def _simplify_info(info): - """Return a simplified info structure to speed up picking.""" - chs = [{key: ch[key] - for key in ('ch_name', 'kind', 'unit', 'coil_type', 'loc', 'cal')} - for ch in info['chs']] - sub_info = Info(chs=chs, bads=info['bads'], comps=info['comps'], - projs=info['projs'], - custom_ref_applied=info['custom_ref_applied']) - sub_info._update_redundant() - return sub_info - - -@verbose -def read_fiducials(fname, verbose=None): - """Read fiducials from a fiff file. - - Parameters - ---------- - fname : path-like - The filename to read. - %(verbose)s - - Returns - ------- - pts : list of dict - List of digitizer points (each point in a dict). - coord_frame : int - The coordinate frame of the points (one of - ``mne.io.constants.FIFF.FIFFV_COORD_...``). - """ - fname = _check_fname( - fname=fname, - overwrite='read', - must_exist=True - ) - fid, tree, _ = fiff_open(fname) - with fid: - isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK) - isotrak = isotrak[0] - pts = [] - coord_frame = FIFF.FIFFV_COORD_HEAD - for k in range(isotrak['nent']): - kind = isotrak['directory'][k].kind - pos = isotrak['directory'][k].pos - if kind == FIFF.FIFF_DIG_POINT: - tag = read_tag(fid, pos) - pts.append(DigPoint(tag.data)) - elif kind == FIFF.FIFF_MNE_COORD_FRAME: - tag = read_tag(fid, pos) - coord_frame = tag.data[0] - coord_frame = _coord_frame_named.get(coord_frame, coord_frame) - - # coord_frame is not stored in the tag - for pt in pts: - pt['coord_frame'] = coord_frame - - return pts, coord_frame - - -@verbose -def write_fiducials(fname, pts, coord_frame='unknown', *, overwrite=False, - verbose=None): - """Write fiducials to a fiff file. - - Parameters - ---------- - fname : path-like - Destination file name. - pts : iterator of dict - Iterator through digitizer points. Each point is a dictionary with - the keys 'kind', 'ident' and 'r'. - coord_frame : str | int - The coordinate frame of the points. If a string, must be one of - ``'meg'``, ``'mri'``, ``'mri_voxel'``, ``'head'``, - ``'mri_tal'``, ``'ras'``, ``'fs_tal'``, ``'ctf_head'``, - ``'ctf_meg'``, and ``'unknown'`` - If an integer, must be one of the constants defined as - ``mne.io.constants.FIFF.FIFFV_COORD_...``. - %(overwrite)s - - .. versionadded:: 1.0 - %(verbose)s - """ - write_dig(fname, pts, coord_frame, overwrite=overwrite) - - -@verbose -def read_info(fname, verbose=None): - """Read measurement info from a file. - - Parameters - ---------- - fname : path-like - File name. - %(verbose)s - - Returns - ------- - %(info_not_none)s - """ - f, tree, _ = fiff_open(fname) - with f as fid: - info = read_meas_info(fid, tree)[0] - return info - - -def read_bad_channels(fid, node): - """Read bad channels. - - Parameters - ---------- - fid : file - The file descriptor. - node : dict - The node of the FIF tree that contains info on the bad channels. - - Returns - ------- - bads : list - A list of bad channel's names. - """ - return _read_bad_channels(fid, node) - - -def _read_bad_channels(fid, node, ch_names_mapping): - ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping - nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS) - - bads = [] - if len(nodes) > 0: - for node in nodes: - tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST) - if tag is not None and tag.data is not None: - bads = _safe_name_list(tag.data, 'read', 'bads') - bads[:] = _rename_list(bads, ch_names_mapping) - return bads - - -def _write_bad_channels(fid, bads, ch_names_mapping): - if bads is not None and len(bads) > 0: - ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping - bads = _rename_list(bads, ch_names_mapping) - start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) - write_name_list_sanitized( - fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads, 'bads') - end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) - - -@verbose -def read_meas_info(fid, tree, clean_bads=False, verbose=None): - """Read the measurement info. - - Parameters - ---------- - fid : file - Open file descriptor. - tree : tree - FIF tree structure. - clean_bads : bool - If True, clean info['bads'] before running consistency check. - Should only be needed for old files where we did not check bads - before saving. - %(verbose)s - - Returns - ------- - %(info_not_none)s - meas : dict - Node in tree that contains the info. - """ - # Find the desired blocks - meas = dir_tree_find(tree, FIFF.FIFFB_MEAS) - if len(meas) == 0: - raise ValueError('Could not find measurement data') - if len(meas) > 1: - raise ValueError('Cannot read more that 1 measurement data') - meas = meas[0] - - meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO) - if len(meas_info) == 0: - raise ValueError('Could not find measurement info') - if len(meas_info) > 1: - raise ValueError('Cannot read more that 1 measurement info') - meas_info = meas_info[0] - - # Read measurement info - dev_head_t = None - ctf_head_t = None - dev_ctf_t = None - meas_date = None - utc_offset = None - highpass = None - lowpass = None - nchan = None - sfreq = None - chs = [] - experimenter = None - description = None - proj_id = None - proj_name = None - line_freq = None - gantry_angle = None - custom_ref_applied = FIFF.FIFFV_MNE_CUSTOM_REF_OFF - xplotter_layout = None - kit_system_id = None - for k in range(meas_info['nent']): - kind = meas_info['directory'][k].kind - pos = meas_info['directory'][k].pos - if kind == FIFF.FIFF_NCHAN: - tag = read_tag(fid, pos) - nchan = int(tag.data) - elif kind == FIFF.FIFF_SFREQ: - tag = read_tag(fid, pos) - sfreq = float(tag.data) - elif kind == FIFF.FIFF_CH_INFO: - tag = read_tag(fid, pos) - chs.append(tag.data) - elif kind == FIFF.FIFF_LOWPASS: - tag = read_tag(fid, pos) - if not np.isnan(tag.data): - lowpass = float(tag.data) - elif kind == FIFF.FIFF_HIGHPASS: - tag = read_tag(fid, pos) - if not np.isnan(tag.data): - highpass = float(tag.data) - elif kind == FIFF.FIFF_MEAS_DATE: - tag = read_tag(fid, pos) - meas_date = tuple(tag.data) - if len(meas_date) == 1: # can happen from old C conversions - meas_date = (meas_date[0], 0) - elif kind == FIFF.FIFF_UTC_OFFSET: - tag = read_tag(fid, pos) - utc_offset = str(tag.data) - elif kind == FIFF.FIFF_COORD_TRANS: - tag = read_tag(fid, pos) - cand = tag.data - - if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \ - cand['to'] == FIFF.FIFFV_COORD_HEAD: - dev_head_t = cand - elif cand['from'] == FIFF.FIFFV_COORD_HEAD and \ - cand['to'] == FIFF.FIFFV_COORD_DEVICE: - # this reversal can happen with BabyMEG data - dev_head_t = invert_transform(cand) - elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \ - cand['to'] == FIFF.FIFFV_COORD_HEAD: - ctf_head_t = cand - elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \ - cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: - dev_ctf_t = cand - elif kind == FIFF.FIFF_EXPERIMENTER: - tag = read_tag(fid, pos) - experimenter = tag.data - elif kind == FIFF.FIFF_DESCRIPTION: - tag = read_tag(fid, pos) - description = tag.data - elif kind == FIFF.FIFF_PROJ_ID: - tag = read_tag(fid, pos) - proj_id = tag.data - elif kind == FIFF.FIFF_PROJ_NAME: - tag = read_tag(fid, pos) - proj_name = tag.data - elif kind == FIFF.FIFF_LINE_FREQ: - tag = read_tag(fid, pos) - line_freq = float(tag.data) - elif kind == FIFF.FIFF_GANTRY_ANGLE: - tag = read_tag(fid, pos) - gantry_angle = float(tag.data) - elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11 - tag = read_tag(fid, pos) - custom_ref_applied = int(tag.data) - elif kind == FIFF.FIFF_XPLOTTER_LAYOUT: - tag = read_tag(fid, pos) - xplotter_layout = str(tag.data) - elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID: - tag = read_tag(fid, pos) - kit_system_id = int(tag.data) - ch_names_mapping = _read_extended_ch_info(chs, meas_info, fid) - - # Check that we have everything we need - if nchan is None: - raise ValueError('Number of channels is not defined') - - if sfreq is None: - raise ValueError('Sampling frequency is not defined') - - if len(chs) == 0: - raise ValueError('Channel information not defined') - - if len(chs) != nchan: - raise ValueError('Incorrect number of channel definitions found') - - if dev_head_t is None or ctf_head_t is None: - hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) - if len(hpi_result) == 1: - hpi_result = hpi_result[0] - for k in range(hpi_result['nent']): - kind = hpi_result['directory'][k].kind - pos = hpi_result['directory'][k].pos - if kind == FIFF.FIFF_COORD_TRANS: - tag = read_tag(fid, pos) - cand = tag.data - if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and - cand['to'] == FIFF.FIFFV_COORD_HEAD and - dev_head_t is None): - dev_head_t = cand - elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and - cand['to'] == FIFF.FIFFV_COORD_HEAD and - ctf_head_t is None): - ctf_head_t = cand - - # Locate the Polhemus data - dig = _read_dig_fif(fid, meas_info) - - # Locate the acquisition information - acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS) - acq_pars = None - acq_stim = None - if len(acqpars) == 1: - acqpars = acqpars[0] - for k in range(acqpars['nent']): - kind = acqpars['directory'][k].kind - pos = acqpars['directory'][k].pos - if kind == FIFF.FIFF_DACQ_PARS: - tag = read_tag(fid, pos) - acq_pars = tag.data - elif kind == FIFF.FIFF_DACQ_STIM: - tag = read_tag(fid, pos) - acq_stim = tag.data - - # Load the SSP data - projs = _read_proj( - fid, meas_info, ch_names_mapping=ch_names_mapping) - - # Load the CTF compensation data - comps = _read_ctf_comp( - fid, meas_info, chs, ch_names_mapping=ch_names_mapping) - - # Load the bad channel list - bads = _read_bad_channels( - fid, meas_info, ch_names_mapping=ch_names_mapping) - - # - # Put the data together - # - info = Info(file_id=tree['id']) - info._unlocked = True - - # Locate events list - events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS) - evs = list() - for event in events: - ev = dict() - for k in range(event['nent']): - kind = event['directory'][k].kind - pos = event['directory'][k].pos - if kind == FIFF.FIFF_EVENT_CHANNELS: - ev['channels'] = read_tag(fid, pos).data - elif kind == FIFF.FIFF_EVENT_LIST: - ev['list'] = read_tag(fid, pos).data - evs.append(ev) - info['events'] = evs - - # Locate HPI result - hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) - hrs = list() - for hpi_result in hpi_results: - hr = dict() - hr['dig_points'] = [] - for k in range(hpi_result['nent']): - kind = hpi_result['directory'][k].kind - pos = hpi_result['directory'][k].pos - if kind == FIFF.FIFF_DIG_POINT: - hr['dig_points'].append(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER: - hr['order'] = read_tag(fid, pos).data - elif kind == FIFF.FIFF_HPI_COILS_USED: - hr['used'] = read_tag(fid, pos).data - elif kind == FIFF.FIFF_HPI_COIL_MOMENTS: - hr['moments'] = read_tag(fid, pos).data - elif kind == FIFF.FIFF_HPI_FIT_GOODNESS: - hr['goodness'] = read_tag(fid, pos).data - elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT: - hr['good_limit'] = float(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT: - hr['dist_limit'] = float(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_HPI_FIT_ACCEPT: - hr['accept'] = int(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_COORD_TRANS: - hr['coord_trans'] = read_tag(fid, pos).data - hrs.append(hr) - info['hpi_results'] = hrs - - # Locate HPI Measurement - hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS) - hms = list() - for hpi_meas in hpi_meass: - hm = dict() - for k in range(hpi_meas['nent']): - kind = hpi_meas['directory'][k].kind - pos = hpi_meas['directory'][k].pos - if kind == FIFF.FIFF_CREATOR: - hm['creator'] = str(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_SFREQ: - hm['sfreq'] = float(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_NCHAN: - hm['nchan'] = int(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_NAVE: - hm['nave'] = int(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_HPI_NCOIL: - hm['ncoil'] = int(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_FIRST_SAMPLE: - hm['first_samp'] = int(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_LAST_SAMPLE: - hm['last_samp'] = int(read_tag(fid, pos).data) - hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL) - hcs = [] - for hpi_coil in hpi_coils: - hc = dict() - for k in range(hpi_coil['nent']): - kind = hpi_coil['directory'][k].kind - pos = hpi_coil['directory'][k].pos - if kind == FIFF.FIFF_HPI_COIL_NO: - hc['number'] = int(read_tag(fid, pos).data) - elif kind == FIFF.FIFF_EPOCH: - hc['epoch'] = read_tag(fid, pos).data - hc['epoch'].flags.writeable = False - elif kind == FIFF.FIFF_HPI_SLOPES: - hc['slopes'] = read_tag(fid, pos).data - hc['slopes'].flags.writeable = False - elif kind == FIFF.FIFF_HPI_CORR_COEFF: - hc['corr_coeff'] = read_tag(fid, pos).data - hc['corr_coeff'].flags.writeable = False - elif kind == FIFF.FIFF_HPI_COIL_FREQ: - hc['coil_freq'] = float(read_tag(fid, pos).data) - hcs.append(hc) - hm['hpi_coils'] = hcs - hms.append(hm) - info['hpi_meas'] = hms - del hms - - subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT) - si = None - if len(subject_info) == 1: - subject_info = subject_info[0] - si = dict() - for k in range(subject_info['nent']): - kind = subject_info['directory'][k].kind - pos = subject_info['directory'][k].pos - if kind == FIFF.FIFF_SUBJ_ID: - tag = read_tag(fid, pos) - si['id'] = int(tag.data) - elif kind == FIFF.FIFF_SUBJ_HIS_ID: - tag = read_tag(fid, pos) - si['his_id'] = str(tag.data) - elif kind == FIFF.FIFF_SUBJ_LAST_NAME: - tag = read_tag(fid, pos) - si['last_name'] = str(tag.data) - elif kind == FIFF.FIFF_SUBJ_FIRST_NAME: - tag = read_tag(fid, pos) - si['first_name'] = str(tag.data) - elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME: - tag = read_tag(fid, pos) - si['middle_name'] = str(tag.data) - elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY: - try: - tag = read_tag(fid, pos) - except OverflowError: - warn('Encountered an error while trying to read the ' - 'birthday from the input data. No birthday will be ' - 'set. Please check the integrity of the birthday ' - 'information in the input data.') - continue - si['birthday'] = tag.data - elif kind == FIFF.FIFF_SUBJ_SEX: - tag = read_tag(fid, pos) - si['sex'] = int(tag.data) - elif kind == FIFF.FIFF_SUBJ_HAND: - tag = read_tag(fid, pos) - si['hand'] = int(tag.data) - elif kind == FIFF.FIFF_SUBJ_WEIGHT: - tag = read_tag(fid, pos) - si['weight'] = tag.data - elif kind == FIFF.FIFF_SUBJ_HEIGHT: - tag = read_tag(fid, pos) - si['height'] = tag.data - info['subject_info'] = si - del si - - device_info = dir_tree_find(meas_info, FIFF.FIFFB_DEVICE) - di = None - if len(device_info) == 1: - device_info = device_info[0] - di = dict() - for k in range(device_info['nent']): - kind = device_info['directory'][k].kind - pos = device_info['directory'][k].pos - if kind == FIFF.FIFF_DEVICE_TYPE: - tag = read_tag(fid, pos) - di['type'] = str(tag.data) - elif kind == FIFF.FIFF_DEVICE_MODEL: - tag = read_tag(fid, pos) - di['model'] = str(tag.data) - elif kind == FIFF.FIFF_DEVICE_SERIAL: - tag = read_tag(fid, pos) - di['serial'] = str(tag.data) - elif kind == FIFF.FIFF_DEVICE_SITE: - tag = read_tag(fid, pos) - di['site'] = str(tag.data) - info['device_info'] = di - del di - - helium_info = dir_tree_find(meas_info, FIFF.FIFFB_HELIUM) - hi = None - if len(helium_info) == 1: - helium_info = helium_info[0] - hi = dict() - for k in range(helium_info['nent']): - kind = helium_info['directory'][k].kind - pos = helium_info['directory'][k].pos - if kind == FIFF.FIFF_HE_LEVEL_RAW: - tag = read_tag(fid, pos) - hi['he_level_raw'] = float(tag.data) - elif kind == FIFF.FIFF_HELIUM_LEVEL: - tag = read_tag(fid, pos) - hi['helium_level'] = float(tag.data) - elif kind == FIFF.FIFF_ORIG_FILE_GUID: - tag = read_tag(fid, pos) - hi['orig_file_guid'] = str(tag.data) - elif kind == FIFF.FIFF_MEAS_DATE: - tag = read_tag(fid, pos) - hi['meas_date'] = tuple(int(t) for t in tag.data) - info['helium_info'] = hi - del hi - - hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM) - hs = None - if len(hpi_subsystem) == 1: - hpi_subsystem = hpi_subsystem[0] - hs = dict() - for k in range(hpi_subsystem['nent']): - kind = hpi_subsystem['directory'][k].kind - pos = hpi_subsystem['directory'][k].pos - if kind == FIFF.FIFF_HPI_NCOIL: - tag = read_tag(fid, pos) - hs['ncoil'] = int(tag.data) - elif kind == FIFF.FIFF_EVENT_CHANNEL: - tag = read_tag(fid, pos) - hs['event_channel'] = str(tag.data) - hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL) - hc = [] - for coil in hpi_coils: - this_coil = dict() - for j in range(coil['nent']): - kind = coil['directory'][j].kind - pos = coil['directory'][j].pos - if kind == FIFF.FIFF_EVENT_BITS: - tag = read_tag(fid, pos) - this_coil['event_bits'] = np.array(tag.data) - hc.append(this_coil) - hs['hpi_coils'] = hc - info['hpi_subsystem'] = hs - - # Read processing history - info['proc_history'] = _read_proc_history(fid, tree) - - # Make the most appropriate selection for the measurement id - if meas_info['parent_id'] is None: - if meas_info['id'] is None: - if meas['id'] is None: - if meas['parent_id'] is None: - info['meas_id'] = info['file_id'] - else: - info['meas_id'] = meas['parent_id'] - else: - info['meas_id'] = meas['id'] - else: - info['meas_id'] = meas_info['id'] - else: - info['meas_id'] = meas_info['parent_id'] - info['experimenter'] = experimenter - info['description'] = description - info['proj_id'] = proj_id - info['proj_name'] = proj_name - if meas_date is None: - meas_date = (info['meas_id']['secs'], info['meas_id']['usecs']) - info['meas_date'] = _ensure_meas_date_none_or_dt(meas_date) - info['utc_offset'] = utc_offset - - info['sfreq'] = sfreq - info['highpass'] = highpass if highpass is not None else 0. - info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0 - info['line_freq'] = line_freq - info['gantry_angle'] = gantry_angle - - # Add the channel information and make a list of channel names - # for convenience - info['chs'] = chs - - # - # Add the coordinate transformations - # - info['dev_head_t'] = dev_head_t - info['ctf_head_t'] = ctf_head_t - info['dev_ctf_t'] = dev_ctf_t - if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None: - from ..transforms import Transform - head_ctf_trans = np.linalg.inv(ctf_head_t['trans']) - dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans']) - info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans) - - # All kinds of auxliary stuff - info['dig'] = _format_dig_points(dig) - info['bads'] = bads - info._update_redundant() - if clean_bads: - info['bads'] = [b for b in bads if b in info['ch_names']] - info['projs'] = projs - info['comps'] = comps - info['acq_pars'] = acq_pars - info['acq_stim'] = acq_stim - info['custom_ref_applied'] = custom_ref_applied - info['xplotter_layout'] = xplotter_layout - info['kit_system_id'] = kit_system_id - info._check_consistency() - info._unlocked = False - return info, meas - - -def _read_extended_ch_info(chs, parent, fid): - ch_infos = dir_tree_find(parent, FIFF.FIFFB_CH_INFO) - if len(ch_infos) == 0: - return - _check_option('length of channel infos', len(ch_infos), [len(chs)]) - logger.info(' Reading extended channel information') - - # Here we assume that ``remap`` is in the same order as the channels - # themselves, which is hopefully safe enough. - ch_names_mapping = dict() - for new, ch in zip(ch_infos, chs): - for k in range(new['nent']): - kind = new['directory'][k].kind - try: - key, cast = _CH_READ_MAP[kind] - except KeyError: - # This shouldn't happen if we're up to date with the FIFF - # spec - warn(f'Discarding extra channel information kind {kind}') - continue - assert key in ch - data = read_tag(fid, new['directory'][k].pos).data - if data is not None: - data = cast(data) - if key == 'ch_name': - ch_names_mapping[ch[key]] = data - ch[key] = data - _update_ch_info_named(ch) - # we need to return ch_names_mapping so that we can also rename the - # bad channels - return ch_names_mapping - - -def _rename_comps(comps, ch_names_mapping): - if not (comps and ch_names_mapping): - return - for comp in comps: - data = comp['data'] - for key in ('row_names', 'col_names'): - data[key][:] = _rename_list(data[key], ch_names_mapping) - - -def _ensure_meas_date_none_or_dt(meas_date): - if meas_date is None or np.array_equal(meas_date, DATE_NONE): - meas_date = None - elif not isinstance(meas_date, datetime.datetime): - meas_date = _stamp_to_dt(meas_date) - return meas_date - - -def _check_dates(info, prepend_error=''): - """Check dates before writing as fif files. - - It's needed because of the limited integer precision - of the fix standard. - """ - for key in ('file_id', 'meas_id'): - value = info.get(key) - if value is not None: - assert 'msecs' not in value - for key_2 in ('secs', 'usecs'): - if (value[key_2] < np.iinfo('>i4').min or - value[key_2] > np.iinfo('>i4').max): - raise RuntimeError('%sinfo[%s][%s] must be between ' - '"%r" and "%r", got "%r"' - % (prepend_error, key, key_2, - np.iinfo('>i4').min, - np.iinfo('>i4').max, - value[key_2]),) - - meas_date = info.get('meas_date') - if meas_date is None: - return - - meas_date_stamp = _dt_to_stamp(meas_date) - if (meas_date_stamp[0] < np.iinfo('>i4').min or - meas_date_stamp[0] > np.iinfo('>i4').max): - raise RuntimeError( - '%sinfo["meas_date"] seconds must be between "%r" ' - 'and "%r", got "%r"' - % (prepend_error, (np.iinfo('>i4').min, 0), - (np.iinfo('>i4').max, 0), meas_date_stamp[0],)) - - -@fill_doc -def write_meas_info(fid, info, data_type=None, reset_range=True): - """Write measurement info into a file id (from a fif file). - - Parameters - ---------- - fid : file - Open file descriptor. - %(info_not_none)s - data_type : int - The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), - 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for - raw data. - reset_range : bool - If True, info['chs'][k]['range'] will be set to unity. - - Notes - ----- - Tags are written in a particular order for compatibility with maxfilter. - """ - info._check_consistency() - _check_dates(info) - - # Measurement info - start_block(fid, FIFF.FIFFB_MEAS_INFO) - - # Add measurement id - if info['meas_id'] is not None: - write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id']) - - for event in info['events']: - start_block(fid, FIFF.FIFFB_EVENTS) - if event.get('channels') is not None: - write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels']) - if event.get('list') is not None: - write_int(fid, FIFF.FIFF_EVENT_LIST, event['list']) - end_block(fid, FIFF.FIFFB_EVENTS) - - # HPI Result - for hpi_result in info['hpi_results']: - start_block(fid, FIFF.FIFFB_HPI_RESULT) - write_dig_points(fid, hpi_result['dig_points']) - if 'order' in hpi_result: - write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER, - hpi_result['order']) - if 'used' in hpi_result: - write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used']) - if 'moments' in hpi_result: - write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS, - hpi_result['moments']) - if 'goodness' in hpi_result: - write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS, - hpi_result['goodness']) - if 'good_limit' in hpi_result: - write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT, - hpi_result['good_limit']) - if 'dist_limit' in hpi_result: - write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT, - hpi_result['dist_limit']) - if 'accept' in hpi_result: - write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept']) - if 'coord_trans' in hpi_result: - write_coord_trans(fid, hpi_result['coord_trans']) - end_block(fid, FIFF.FIFFB_HPI_RESULT) - - # HPI Measurement - for hpi_meas in info['hpi_meas']: - start_block(fid, FIFF.FIFFB_HPI_MEAS) - if hpi_meas.get('creator') is not None: - write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator']) - if hpi_meas.get('sfreq') is not None: - write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq']) - if hpi_meas.get('nchan') is not None: - write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan']) - if hpi_meas.get('nave') is not None: - write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave']) - if hpi_meas.get('ncoil') is not None: - write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil']) - if hpi_meas.get('first_samp') is not None: - write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp']) - if hpi_meas.get('last_samp') is not None: - write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp']) - for hpi_coil in hpi_meas['hpi_coils']: - start_block(fid, FIFF.FIFFB_HPI_COIL) - if hpi_coil.get('number') is not None: - write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number']) - if hpi_coil.get('epoch') is not None: - write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch']) - if hpi_coil.get('slopes') is not None: - write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes']) - if hpi_coil.get('corr_coeff') is not None: - write_float(fid, FIFF.FIFF_HPI_CORR_COEFF, - hpi_coil['corr_coeff']) - if hpi_coil.get('coil_freq') is not None: - write_float(fid, FIFF.FIFF_HPI_COIL_FREQ, - hpi_coil['coil_freq']) - end_block(fid, FIFF.FIFFB_HPI_COIL) - end_block(fid, FIFF.FIFFB_HPI_MEAS) - - # Polhemus data - write_dig_points(fid, info['dig'], block=True) - - # megacq parameters - if info['acq_pars'] is not None or info['acq_stim'] is not None: - start_block(fid, FIFF.FIFFB_DACQ_PARS) - if info['acq_pars'] is not None: - write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars']) - - if info['acq_stim'] is not None: - write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim']) - - end_block(fid, FIFF.FIFFB_DACQ_PARS) - - # Coordinate transformations if the HPI result block was not there - if info['dev_head_t'] is not None: - write_coord_trans(fid, info['dev_head_t']) - - if info['ctf_head_t'] is not None: - write_coord_trans(fid, info['ctf_head_t']) - - if info['dev_ctf_t'] is not None: - write_coord_trans(fid, info['dev_ctf_t']) - - # Projectors - ch_names_mapping = _make_ch_names_mapping(info['chs']) - _write_proj(fid, info['projs'], ch_names_mapping=ch_names_mapping) - - # Bad channels - _write_bad_channels(fid, info['bads'], ch_names_mapping=ch_names_mapping) - - # General - if info.get('experimenter') is not None: - write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter']) - if info.get('description') is not None: - write_string(fid, FIFF.FIFF_DESCRIPTION, info['description']) - if info.get('proj_id') is not None: - write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id']) - if info.get('proj_name') is not None: - write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name']) - if info.get('meas_date') is not None: - write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info['meas_date'])) - if info.get('utc_offset') is not None: - write_string(fid, FIFF.FIFF_UTC_OFFSET, info['utc_offset']) - write_int(fid, FIFF.FIFF_NCHAN, info['nchan']) - write_float(fid, FIFF.FIFF_SFREQ, info['sfreq']) - if info['lowpass'] is not None: - write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass']) - if info['highpass'] is not None: - write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass']) - if info.get('line_freq') is not None: - write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq']) - if info.get('gantry_angle') is not None: - write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info['gantry_angle']) - if data_type is not None: - write_int(fid, FIFF.FIFF_DATA_PACK, data_type) - if info.get('custom_ref_applied'): - write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied']) - if info.get('xplotter_layout'): - write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout']) - - # Channel information - _write_ch_infos(fid, info['chs'], reset_range, ch_names_mapping) - - # Subject information - if info.get('subject_info') is not None: - start_block(fid, FIFF.FIFFB_SUBJECT) - si = info['subject_info'] - if si.get('id') is not None: - write_int(fid, FIFF.FIFF_SUBJ_ID, si['id']) - if si.get('his_id') is not None: - write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id']) - if si.get('last_name') is not None: - write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name']) - if si.get('first_name') is not None: - write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name']) - if si.get('middle_name') is not None: - write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name']) - if si.get('birthday') is not None: - write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday']) - if si.get('sex') is not None: - write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex']) - if si.get('hand') is not None: - write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand']) - if si.get('weight') is not None: - write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si['weight']) - if si.get('height') is not None: - write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si['height']) - end_block(fid, FIFF.FIFFB_SUBJECT) - del si - - if info.get('device_info') is not None: - start_block(fid, FIFF.FIFFB_DEVICE) - di = info['device_info'] - write_string(fid, FIFF.FIFF_DEVICE_TYPE, di['type']) - for key in ('model', 'serial', 'site'): - if di.get(key) is not None: - write_string(fid, getattr(FIFF, 'FIFF_DEVICE_' + key.upper()), - di[key]) - end_block(fid, FIFF.FIFFB_DEVICE) - del di - - if info.get('helium_info') is not None: - start_block(fid, FIFF.FIFFB_HELIUM) - hi = info['helium_info'] - if hi.get('he_level_raw') is not None: - write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi['he_level_raw']) - if hi.get('helium_level') is not None: - write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi['helium_level']) - if hi.get('orig_file_guid') is not None: - write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi['orig_file_guid']) - write_int(fid, FIFF.FIFF_MEAS_DATE, hi['meas_date']) - end_block(fid, FIFF.FIFFB_HELIUM) - del hi - - if info.get('hpi_subsystem') is not None: - hs = info['hpi_subsystem'] - start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) - if hs.get('ncoil') is not None: - write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil']) - if hs.get('event_channel') is not None: - write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel']) - if hs.get('hpi_coils') is not None: - for coil in hs['hpi_coils']: - start_block(fid, FIFF.FIFFB_HPI_COIL) - if coil.get('event_bits') is not None: - write_int(fid, FIFF.FIFF_EVENT_BITS, - coil['event_bits']) - end_block(fid, FIFF.FIFFB_HPI_COIL) - end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) - del hs - - # CTF compensation info - comps = info['comps'] - if ch_names_mapping: - comps = deepcopy(comps) - _rename_comps(comps, ch_names_mapping) - write_ctf_comp(fid, comps) - - # KIT system ID - if info.get('kit_system_id') is not None: - write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id']) - - end_block(fid, FIFF.FIFFB_MEAS_INFO) - - # Processing history - _write_proc_history(fid, info) - - -@fill_doc -def write_info(fname, info, data_type=None, reset_range=True): - """Write measurement info in fif file. - - Parameters - ---------- - fname : path-like - The name of the file. Should end by ``-info.fif``. - %(info_not_none)s - data_type : int - The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), - 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for - raw data. - reset_range : bool - If True, info['chs'][k]['range'] will be set to unity. - """ - with start_and_end_file(fname) as fid: - start_block(fid, FIFF.FIFFB_MEAS) - write_meas_info(fid, info, data_type, reset_range) - end_block(fid, FIFF.FIFFB_MEAS) - - -@verbose -def _merge_info_values(infos, key, verbose=None): - """Merge things together. - - Fork for {'dict', 'list', 'array', 'other'} - and consider cases where one or all are of the same type. - - Does special things for "projs", "bads", and "meas_date". - """ - values = [d[key] for d in infos] - msg = ("Don't know how to merge '%s'. Make sure values are " - "compatible, got types:\n %s" - % (key, [type(v) for v in values])) - - def _flatten(lists): - return [item for sublist in lists for item in sublist] - - def _check_isinstance(values, kind, func): - return func([isinstance(v, kind) for v in values]) - - def _where_isinstance(values, kind): - """Get indices of instances.""" - return np.where([isinstance(v, type) for v in values])[0] - - # list - if _check_isinstance(values, list, all): - lists = (d[key] for d in infos) - if key == 'projs': - return _uniquify_projs(_flatten(lists)) - elif key == 'bads': - return sorted(set(_flatten(lists))) - else: - return _flatten(lists) - elif _check_isinstance(values, list, any): - idx = _where_isinstance(values, list) - if len(idx) == 1: - return values[int(idx)] - elif len(idx) > 1: - lists = (d[key] for d in infos if isinstance(d[key], list)) - return _flatten(lists) - # dict - elif _check_isinstance(values, dict, all): - is_qual = all(object_diff(values[0], v) == '' for v in values[1:]) - if is_qual: - return values[0] - else: - RuntimeError(msg) - elif _check_isinstance(values, dict, any): - idx = _where_isinstance(values, dict) - if len(idx) == 1: - return values[int(idx)] - elif len(idx) > 1: - raise RuntimeError(msg) - # ndarray - elif _check_isinstance(values, np.ndarray, all) or \ - _check_isinstance(values, tuple, all): - is_qual = all(np.array_equal(values[0], x) for x in values[1:]) - if is_qual: - return values[0] - elif key == 'meas_date': - logger.info('Found multiple entries for %s. ' - 'Setting value to `None`' % key) - return None - else: - raise RuntimeError(msg) - elif _check_isinstance(values, (np.ndarray, tuple), any): - idx = _where_isinstance(values, np.ndarray) - if len(idx) == 1: - return values[int(idx)] - elif len(idx) > 1: - raise RuntimeError(msg) - # other - else: - unique_values = set(values) - if len(unique_values) == 1: - return list(values)[0] - elif isinstance(list(unique_values)[0], BytesIO): - logger.info('Found multiple StringIO instances. ' - 'Setting value to `None`') - return None - elif isinstance(list(unique_values)[0], str): - logger.info('Found multiple filenames. ' - 'Setting value to `None`') - return None - else: - raise RuntimeError(msg) - - -@verbose -def _merge_info(infos, force_update_to_first=False, verbose=None): - """Merge multiple measurement info dictionaries. - - - Fields that are present in only one info object will be used in the - merged info. - - Fields that are present in multiple info objects and are the same - will be used in the merged info. - - Fields that are present in multiple info objects and are different - will result in a None value in the merged info. - - Channels will be concatenated. If multiple info objects contain - channels with the same name, an exception is raised. - - Parameters - ---------- - infos | list of instance of Info - Info objects to merge into one info object. - force_update_to_first : bool - If True, force the fields for objects in `info` will be updated - to match those in the first item. Use at your own risk, as this - may overwrite important metadata. - %(verbose)s - - Returns - ------- - info : instance of Info - The merged info object. - """ - for info in infos: - info._check_consistency() - if force_update_to_first is True: - infos = deepcopy(infos) - _force_update_info(infos[0], infos[1:]) - info = Info() - info._unlocked = True - info['chs'] = [] - for this_info in infos: - info['chs'].extend(this_info['chs']) - info._update_redundant() - duplicates = {ch for ch in info['ch_names'] - if info['ch_names'].count(ch) > 1} - if len(duplicates) > 0: - msg = ("The following channels are present in more than one input " - "measurement info objects: %s" % list(duplicates)) - raise ValueError(msg) - - transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t'] - for trans_name in transforms: - trans = [i[trans_name] for i in infos if i[trans_name]] - if len(trans) == 0: - info[trans_name] = None - elif len(trans) == 1: - info[trans_name] = trans[0] - elif all(np.all(trans[0]['trans'] == x['trans']) and - trans[0]['from'] == x['from'] and - trans[0]['to'] == x['to'] - for x in trans[1:]): - info[trans_name] = trans[0] - else: - msg = ("Measurement infos provide mutually inconsistent %s" % - trans_name) - raise ValueError(msg) - - # KIT system-IDs - kit_sys_ids = [i['kit_system_id'] for i in infos if i['kit_system_id']] - if len(kit_sys_ids) == 0: - info['kit_system_id'] = None - elif len(set(kit_sys_ids)) == 1: - info['kit_system_id'] = kit_sys_ids[0] - else: - raise ValueError("Trying to merge channels from different KIT systems") - - # hpi infos and digitization data: - fields = ['hpi_results', 'hpi_meas', 'dig'] - for k in fields: - values = [i[k] for i in infos if i[k]] - if len(values) == 0: - info[k] = [] - elif len(values) == 1: - info[k] = values[0] - elif all(object_diff(values[0], v) == '' for v in values[1:]): - info[k] = values[0] - else: - msg = ("Measurement infos are inconsistent for %s" % k) - raise ValueError(msg) - - # other fields - other_fields = ['acq_pars', 'acq_stim', 'bads', - 'comps', 'custom_ref_applied', 'description', - 'experimenter', 'file_id', 'highpass', 'utc_offset', - 'hpi_subsystem', 'events', 'device_info', 'helium_info', - 'line_freq', 'lowpass', 'meas_id', - 'proj_id', 'proj_name', 'projs', 'sfreq', 'gantry_angle', - 'subject_info', 'sfreq', 'xplotter_layout', 'proc_history'] - - for k in other_fields: - info[k] = _merge_info_values(infos, k) - - info['meas_date'] = infos[0]['meas_date'] - info._unlocked = False - - return info - - -@verbose -def create_info(ch_names, sfreq, ch_types='misc', verbose=None): - """Create a basic Info instance suitable for use with create_raw. - - Parameters - ---------- - ch_names : list of str | int - Channel names. If an int, a list of channel names will be created - from ``range(ch_names)``. - sfreq : float - Sample rate of the data. - ch_types : list of str | str - Channel types, default is ``'misc'`` which is not a - :term:`data channel `. - Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', - 'seeg', 'dbs', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' - or 'hbo'. If str, then all channels are assumed to be of the same type. - %(verbose)s - - Returns - ------- - %(info_not_none)s - - Notes - ----- - The info dictionary will be sparsely populated to enable functionality - within the rest of the package. Advanced functionality such as source - localization can only be obtained through substantial, proper - modifications of the info structure (not recommended). - - Note that the MEG device-to-head transform ``info['dev_head_t']`` will - be initialized to the identity transform. - - Proper units of measure: - - * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog - * T: mag - * T/m: grad - * M: hbo, hbr - * Am: dipole - * AU: misc - """ - try: - ch_names = operator.index(ch_names) # int-like - except TypeError: - pass - else: - ch_names = list(np.arange(ch_names).astype(str)) - _validate_type(ch_names, (list, tuple), "ch_names", - ("list, tuple, or int")) - sfreq = float(sfreq) - if sfreq <= 0: - raise ValueError('sfreq must be positive') - nchan = len(ch_names) - if isinstance(ch_types, str): - ch_types = [ch_types] * nchan - ch_types = np.atleast_1d(np.array(ch_types, np.str_)) - if ch_types.ndim != 1 or len(ch_types) != nchan: - raise ValueError('ch_types and ch_names must be the same length ' - '(%s != %s) for ch_types=%s' - % (len(ch_types), nchan, ch_types)) - info = _empty_info(sfreq) - ch_types_dict = get_channel_type_constants(include_defaults=True) - for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)): - _validate_type(ch_name, 'str', "each entry in ch_names") - _validate_type(ch_type, 'str', "each entry in ch_types") - if ch_type not in ch_types_dict: - raise KeyError(f'kind must be one of {list(ch_types_dict)}, ' - f'not {ch_type}') - this_ch_dict = ch_types_dict[ch_type] - kind = this_ch_dict['kind'] - # handle chpi, where kind is a *list* of FIFF constants: - kind = kind[0] if isinstance(kind, (list, tuple)) else kind - # mirror what tag.py does here - coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN) - coil_type = this_ch_dict.get('coil_type', FIFF.FIFFV_COIL_NONE) - unit = this_ch_dict.get('unit', FIFF.FIFF_UNIT_NONE) - chan_info = dict(loc=np.full(12, np.nan), - unit_mul=FIFF.FIFF_UNITM_NONE, range=1., cal=1., - kind=kind, coil_type=coil_type, unit=unit, - coord_frame=coord_frame, ch_name=str(ch_name), - scanno=ci + 1, logno=ci + 1) - info['chs'].append(chan_info) - - info._update_redundant() - info._check_consistency() - info._unlocked = False - return info - - -RAW_INFO_FIELDS = ( - 'acq_pars', 'acq_stim', 'bads', 'ch_names', 'chs', - 'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t', - 'dev_head_t', 'dig', 'experimenter', 'events', 'utc_offset', 'device_info', - 'file_id', 'highpass', 'hpi_meas', 'hpi_results', 'helium_info', - 'hpi_subsystem', 'kit_system_id', 'line_freq', 'lowpass', 'meas_date', - 'meas_id', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq', - 'subject_info', 'xplotter_layout', 'proc_history', 'gantry_angle', -) - - -def _empty_info(sfreq): - """Create an empty info dictionary.""" - _none_keys = ( - 'acq_pars', 'acq_stim', 'ctf_head_t', 'description', - 'dev_ctf_t', 'dig', 'experimenter', 'utc_offset', 'device_info', - 'file_id', 'highpass', 'hpi_subsystem', 'kit_system_id', 'helium_info', - 'line_freq', 'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name', - 'subject_info', 'xplotter_layout', 'gantry_angle', - ) - _list_keys = ('bads', 'chs', 'comps', 'events', 'hpi_meas', 'hpi_results', - 'projs', 'proc_history') - info = Info() - info._unlocked = True - for k in _none_keys: - info[k] = None - for k in _list_keys: - info[k] = list() - info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF - info['highpass'] = 0. - info['sfreq'] = float(sfreq) - info['lowpass'] = info['sfreq'] / 2. - info['dev_head_t'] = Transform('meg', 'head') - info._update_redundant() - info._check_consistency() - return info - - -def _force_update_info(info_base, info_target): - """Update target info objects with values from info base. - - Note that values in info_target will be overwritten by those in info_base. - This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'. - - Parameters - ---------- - info_base : mne.Info - The Info object you want to use for overwriting values - in target Info objects. - info_target : mne.Info | list of mne.Info - The Info object(s) you wish to overwrite using info_base. These objects - will be modified in-place. - """ - exclude_keys = ['chs', 'ch_names', 'nchan'] - info_target = np.atleast_1d(info_target).ravel() - all_infos = np.hstack([info_base, info_target]) - for ii in all_infos: - if not isinstance(ii, Info): - raise ValueError('Inputs must be of type Info. ' - 'Found type %s' % type(ii)) - for key, val in info_base.items(): - if key in exclude_keys: - continue - for i_targ in info_target: - with i_targ._unlock(): - i_targ[key] = val - - -def _add_timedelta_to_stamp(meas_date_stamp, delta_t): - """Add a timedelta to a meas_date tuple.""" - if meas_date_stamp is not None: - meas_date_stamp = _dt_to_stamp(_stamp_to_dt(meas_date_stamp) + delta_t) - return meas_date_stamp - - -@verbose -def anonymize_info(info, daysback=None, keep_his=False, verbose=None): - """Anonymize measurement information in place. - - .. warning:: If ``info`` is part of an object like - :class:`raw.info `, you should directly use - the method :meth:`raw.anonymize() ` - to ensure that all parts of the data are anonymized and - stay synchronized (e.g., - :class:`raw.annotations `). - - Parameters - ---------- - %(info_not_none)s - %(daysback_anonymize_info)s - %(keep_his_anonymize_info)s - %(verbose)s - - Returns - ------- - info : instance of Info - The anonymized measurement information. - - Notes - ----- - %(anonymize_info_notes)s - """ - _validate_type(info, 'info', "self") - - default_anon_dos = datetime.datetime(2000, 1, 1, 0, 0, 0, - tzinfo=datetime.timezone.utc) - default_str = "mne_anonymize" - default_subject_id = 0 - default_sex = 0 - default_desc = ("Anonymized using a time shift" - " to preserve age at acquisition") - - none_meas_date = info['meas_date'] is None - - if none_meas_date: - if daysback is not None: - warn('Input info has "meas_date" set to None. ' - 'Removing all information from time/date structures, ' - '*NOT* performing any time shifts!') - else: - # compute timeshift delta - if daysback is None: - delta_t = info['meas_date'] - default_anon_dos - else: - delta_t = datetime.timedelta(days=daysback) - with info._unlock(): - info['meas_date'] = info['meas_date'] - delta_t - - # file_id and meas_id - for key in ('file_id', 'meas_id'): - value = info.get(key) - if value is not None: - assert 'msecs' not in value - if (none_meas_date or - ((value['secs'], value['usecs']) == DATE_NONE)): - # Don't try to shift backwards in time when no measurement - # date is available or when file_id is already a place holder - tmp = DATE_NONE - else: - tmp = _add_timedelta_to_stamp( - (value['secs'], value['usecs']), -delta_t) - value['secs'] = tmp[0] - value['usecs'] = tmp[1] - # The following copy is needed for a test CTF dataset - # otherwise value['machid'][:] = 0 would suffice - _tmp = value['machid'].copy() - _tmp[:] = 0 - value['machid'] = _tmp - - # subject info - subject_info = info.get('subject_info') - if subject_info is not None: - if subject_info.get('id') is not None: - subject_info['id'] = default_subject_id - if keep_his: - logger.info('Not fully anonymizing info - keeping ' - 'his_id, sex, and hand info') - else: - if subject_info.get('his_id') is not None: - subject_info['his_id'] = str(default_subject_id) - if subject_info.get('sex') is not None: - subject_info['sex'] = default_sex - if subject_info.get('hand') is not None: - del subject_info['hand'] # there's no "unknown" setting - - for key in ('last_name', 'first_name', 'middle_name'): - if subject_info.get(key) is not None: - subject_info[key] = default_str - - # anonymize the subject birthday - if none_meas_date: - subject_info.pop('birthday', None) - elif subject_info.get('birthday') is not None: - dob = datetime.datetime(subject_info['birthday'][0], - subject_info['birthday'][1], - subject_info['birthday'][2]) - dob -= delta_t - subject_info['birthday'] = dob.year, dob.month, dob.day - - for key in ('weight', 'height'): - if subject_info.get(key) is not None: - subject_info[key] = 0 - - info['experimenter'] = default_str - info['description'] = default_desc - with info._unlock(): - if info['proj_id'] is not None: - info['proj_id'] = np.zeros_like(info['proj_id']) - if info['proj_name'] is not None: - info['proj_name'] = default_str - if info['utc_offset'] is not None: - info['utc_offset'] = None - - proc_hist = info.get('proc_history') - if proc_hist is not None: - for record in proc_hist: - record['block_id']['machid'][:] = 0 - record['experimenter'] = default_str - if none_meas_date: - record['block_id']['secs'] = DATE_NONE[0] - record['block_id']['usecs'] = DATE_NONE[1] - record['date'] = DATE_NONE - else: - this_t0 = (record['block_id']['secs'], - record['block_id']['usecs']) - this_t1 = _add_timedelta_to_stamp( - this_t0, -delta_t) - record['block_id']['secs'] = this_t1[0] - record['block_id']['usecs'] = this_t1[1] - record['date'] = _add_timedelta_to_stamp( - record['date'], -delta_t) - - hi = info.get('helium_info') - if hi is not None: - if hi.get('orig_file_guid') is not None: - hi['orig_file_guid'] = default_str - if none_meas_date and hi.get('meas_date') is not None: - hi['meas_date'] = DATE_NONE - elif hi.get('meas_date') is not None: - hi['meas_date'] = _add_timedelta_to_stamp( - hi['meas_date'], -delta_t) - - di = info.get('device_info') - if di is not None: - for k in ('serial', 'site'): - if di.get(k) is not None: - di[k] = default_str - - err_mesg = ('anonymize_info generated an inconsistent info object. ' - 'Underlying Error:\n') - info._check_consistency(prepend_error=err_mesg) - err_mesg = ('anonymize_info generated an inconsistent info object. ' - 'daysback parameter was too large. ' - 'Underlying Error:\n') - _check_dates(info, prepend_error=err_mesg) - - return info - - -@fill_doc -def _bad_chans_comp(info, ch_names): - """Check if channel names are consistent with current compensation status. - - Parameters - ---------- - %(info_not_none)s - - ch_names : list of str - The channel names to check. - - Returns - ------- - status : bool - True if compensation is *currently* in use but some compensation - channels are not included in picks - - False if compensation is *currently* not being used - or if compensation is being used and all compensation channels - in info and included in picks. - - missing_ch_names: array-like of str, shape (n_missing,) - The names of compensation channels not included in picks. - Returns [] if no channels are missing. - - """ - if 'comps' not in info: - # should this be thought of as a bug? - return False, [] - - # only include compensation channels that would affect selected channels - ch_names_s = set(ch_names) - comp_names = [] - for comp in info['comps']: - if len(ch_names_s.intersection(comp['data']['row_names'])) > 0: - comp_names.extend(comp['data']['col_names']) - comp_names = sorted(set(comp_names)) - - missing_ch_names = sorted(set(comp_names).difference(ch_names)) - - if get_current_comp(info) != 0 and len(missing_ch_names) > 0: - return True, missing_ch_names - - return False, missing_ch_names - - -_DIG_CAST = dict( - kind=int, ident=int, r=lambda x: x, coord_frame=int) -# key -> const, cast, write -_CH_INFO_MAP = OrderedDict( - scanno=(FIFF.FIFF_CH_SCAN_NO, int, write_int), - logno=(FIFF.FIFF_CH_LOGICAL_NO, int, write_int), - kind=(FIFF.FIFF_CH_KIND, int, write_int), - range=(FIFF.FIFF_CH_RANGE, float, write_float), - cal=(FIFF.FIFF_CH_CAL, float, write_float), - coil_type=(FIFF.FIFF_CH_COIL_TYPE, int, write_int), - loc=(FIFF.FIFF_CH_LOC, lambda x: x, write_float), - unit=(FIFF.FIFF_CH_UNIT, int, write_int), - unit_mul=(FIFF.FIFF_CH_UNIT_MUL, int, write_int), - ch_name=(FIFF.FIFF_CH_DACQ_NAME, str, write_string), - coord_frame=(FIFF.FIFF_CH_COORD_FRAME, int, write_int), -) -# key -> cast -_CH_CAST = OrderedDict((key, val[1]) for key, val in _CH_INFO_MAP.items()) -# const -> key, cast -_CH_READ_MAP = OrderedDict((val[0], (key, val[1])) - for key, val in _CH_INFO_MAP.items()) - - -@contextlib.contextmanager -def _writing_info_hdf5(info): - # Make info writing faster by packing chs and dig into numpy arrays - orig_dig = info.get('dig', None) - orig_chs = info['chs'] - with info._unlock(): - try: - if orig_dig is not None and len(orig_dig) > 0: - info['dig'] = _dict_pack(info['dig'], _DIG_CAST) - info['chs'] = _dict_pack(info['chs'], _CH_CAST) - info['chs']['ch_name'] = np.char.encode( - info['chs']['ch_name'], encoding='utf8') - yield - finally: - if orig_dig is not None: - info['dig'] = orig_dig - info['chs'] = orig_chs - - -def _dict_pack(obj, casts): - # pack a list of dict into dict of array - return {key: np.array([o[key] for o in obj]) for key in casts} - - -def _dict_unpack(obj, casts): - # unpack a dict of array into a list of dict - n = len(obj[list(casts)[0]]) - return [{key: cast(obj[key][ii]) for key, cast in casts.items()} - for ii in range(n)] - - -def _make_ch_names_mapping(chs): - orig_ch_names = [c['ch_name'] for c in chs] - ch_names = orig_ch_names.copy() - _unique_channel_names(ch_names, max_length=15, verbose='error') - ch_names_mapping = dict() - if orig_ch_names != ch_names: - ch_names_mapping.update(zip(orig_ch_names, ch_names)) - return ch_names_mapping - - -def _write_ch_infos(fid, chs, reset_range, ch_names_mapping): - ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping - for k, c in enumerate(chs): - # Scan numbers may have been messed up - c = c.copy() - c['ch_name'] = ch_names_mapping.get(c['ch_name'], c['ch_name']) - assert len(c['ch_name']) <= 15 - c['scanno'] = k + 1 - # for float/double, the "range" param is unnecessary - if reset_range: - c['range'] = 1.0 - write_ch_info(fid, c) - # only write new-style channel information if necessary - if len(ch_names_mapping): - logger.info( - ' Writing channel names to FIF truncated to 15 characters ' - 'with remapping') - for ch in chs: - start_block(fid, FIFF.FIFFB_CH_INFO) - assert set(ch) == set(_CH_INFO_MAP) - for (key, (const, _, write)) in _CH_INFO_MAP.items(): - write(fid, const, ch[key]) - end_block(fid, FIFF.FIFFB_CH_INFO) - - -def _ensure_infos_match(info1, info2, name, *, on_mismatch='raise'): - """Check if infos match. - - Parameters - ---------- - info1, info2 : instance of Info - The infos to compare. - name : str - The name of the object appearing in the error message of the comparison - fails. - on_mismatch : 'raise' | 'warn' | 'ignore' - What to do in case of a mismatch of ``dev_head_t`` between ``info1`` - and ``info2``. - """ - _check_on_missing(on_missing=on_mismatch, name='on_mismatch') - - info1._check_consistency() - info2._check_consistency() - - if info1['nchan'] != info2['nchan']: - raise ValueError(f'{name}.info[\'nchan\'] must match') - if set(info1['bads']) != set(info2['bads']): - raise ValueError(f'{name}.info[\'bads\'] must match') - if info1['sfreq'] != info2['sfreq']: - raise ValueError(f'{name}.info[\'sfreq\'] must match') - if set(info1['ch_names']) != set(info2['ch_names']): - raise ValueError(f'{name}.info[\'ch_names\'] must match') - if len(info2['projs']) != len(info1['projs']): - raise ValueError(f'SSP projectors in {name} must be the same') - if any(not _proj_equal(p1, p2) for p1, p2 in - zip(info2['projs'], info1['projs'])): - raise ValueError(f'SSP projectors in {name} must be the same') - if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \ - (info1['dev_head_t'] is not None and not - np.allclose(info1['dev_head_t']['trans'], - info2['dev_head_t']['trans'], rtol=1e-6)): - msg = (f"{name}.info['dev_head_t'] differs. The " - f"instances probably come from different runs, and " - f"are therefore associated with different head " - f"positions. Manually change info['dev_head_t'] to " - f"avoid this message but beware that this means the " - f"MEG sensors will not be properly spatially aligned. " - f"See mne.preprocessing.maxwell_filter to realign the " - f"runs to a common head position.") - _on_missing(on_missing=on_mismatch, msg=msg, - name='on_mismatch') - - -def _get_fnirs_ch_pos(info): - """Return positions of each fNIRS optode. - - fNIRS uses two types of optodes, sources and detectors. - There can be multiple connections between each source - and detector at different wavelengths. This function - returns the location of each source and detector. - """ - from ..preprocessing.nirs import _fnirs_optode_names, _optode_position - srcs, dets = _fnirs_optode_names(info) - ch_pos = {} - for optode in [*srcs, *dets]: - ch_pos[optode] = _optode_position(info, optode) - return ch_pos diff --git a/mne/io/nedf/__init__.py b/mne/io/nedf/__init__.py index 717669425d1..fe67ee79fb5 100644 --- a/mne/io/nedf/__init__.py +++ b/mne/io/nedf/__init__.py @@ -1,7 +1,7 @@ """NEDF file import module.""" -# Author: Tristan Stenner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .nedf import read_raw_nedf, _parse_nedf_header diff --git a/mne/io/nedf/nedf.py b/mne/io/nedf/nedf.py index 55c11c2c244..bd4054f1c16 100644 --- a/mne/io/nedf/nedf.py +++ b/mne/io/nedf/nedf.py @@ -1,16 +1,18 @@ -# -*- coding: utf-8 -*- +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + """Import NeuroElectrics DataFormat (NEDF) files.""" from copy import deepcopy from datetime import datetime, timezone -from xml.etree import ElementTree import numpy as np +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one +from ...utils import _check_fname, _soft_import, verbose, warn from ..base import BaseRaw -from ..meas_info import create_info -from ..utils import _mult_cal_one -from ...utils import warn, verbose, _check_fname def _getsubnodetext(node, name): @@ -30,7 +32,7 @@ def _getsubnodetext(node, name): """ subnode = node.findtext(name) if not subnode: - raise RuntimeError('NEDF header ' + name + ' not found') + raise RuntimeError("NEDF header " + name + " not found") return subnode @@ -51,6 +53,7 @@ def _parse_nedf_header(header): n_samples : int The number of data samples. """ + defusedxml = _soft_import("defusedxml", "reading NEDF data") info = {} # nedf files have three accelerometer channels sampled at 100Hz followed # by five EEG samples + TTL trigger sampled at 500Hz @@ -65,55 +68,56 @@ def _parse_nedf_header(header): dt = [] # dtype for the binary data block datadt = [] # dtype for a single EEG sample - headerend = header.find(b'\0') + headerend = header.find(b"\0") if headerend == -1: - raise RuntimeError('End of header null not found') - headerxml = ElementTree.fromstring(header[:headerend]) - nedfversion = headerxml.findtext('NEDFversion', '') - if nedfversion not in ['1.3', '1.4']: - warn('NEDFversion unsupported, use with caution') + raise RuntimeError("End of header null not found") + headerxml = defusedxml.ElementTree.fromstring(header[:headerend]) + nedfversion = headerxml.findtext("NEDFversion", "") + if nedfversion not in ["1.3", "1.4"]: + warn("NEDFversion unsupported, use with caution") - if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM': - warn('Found Starstim, this hasn\'t been tested extensively!') + if headerxml.findtext("stepDetails/DeviceClass", "") == "STARSTIM": + warn("Found Starstim, this hasn't been tested extensively!") - if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF': - raise RuntimeError('Unknown additional channel, aborting.') + if headerxml.findtext("AdditionalChannelStatus", "OFF") != "OFF": + raise RuntimeError("Unknown additional channel, aborting.") - n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0)) + n_acc = int(headerxml.findtext("NumberOfChannelsOfAccelerometer", 0)) if n_acc: # expect one sample of u16 accelerometer data per block - dt.append(('acc', '>u2', (n_acc,))) + dt.append(("acc", ">u2", (n_acc,))) - eegset = headerxml.find('EEGSettings') + eegset = headerxml.find("EEGSettings") if eegset is None: - raise RuntimeError('No EEG channels found') - nchantotal = int(_getsubnodetext(eegset, 'TotalNumberOfChannels')) - info['nchan'] = nchantotal + raise RuntimeError("No EEG channels found") + nchantotal = int(_getsubnodetext(eegset, "TotalNumberOfChannels")) + info["nchan"] = nchantotal - info['sfreq'] = int(_getsubnodetext(eegset, 'EEGSamplingRate')) - info['ch_names'] = [e.text for e in eegset.find('EEGMontage')] - if nchantotal != len(info['ch_names']): + info["sfreq"] = int(_getsubnodetext(eegset, "EEGSamplingRate")) + info["ch_names"] = [e.text for e in eegset.find("EEGMontage")] + if nchantotal != len(info["ch_names"]): raise RuntimeError( f"TotalNumberOfChannels ({nchantotal}) != " - f"channel count ({len(info['ch_names'])})") + f"channel count ({len(info['ch_names'])})" + ) # expect nchantotal uint24s - datadt.append(('eeg', 'B', (nchantotal, 3))) + datadt.append(("eeg", "B", (nchantotal, 3))) - if headerxml.find('STIMSettings') is not None: + if headerxml.find("STIMSettings") is not None: # 2* -> two stim samples per eeg sample - datadt.append(('stim', 'B', (2, nchantotal, 3))) - warn('stim channels are currently ignored') + datadt.append(("stim", "B", (2, nchantotal, 3))) + warn("stim channels are currently ignored") # Trigger data: 4 bytes in newer versions, 1 byte in older versions - trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B' - datadt.append(('trig', trigger_type)) + trigger_type = ">i4" if headerxml.findtext("NEDFversion") else "B" + datadt.append(("trig", trigger_type)) # 5 data samples per block - dt.append(('data', np.dtype(datadt), (5,))) + dt.append(("data", np.dtype(datadt), (5,))) - date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', 0) - info['meas_date'] = datetime.fromtimestamp(int(date) / 1000, timezone.utc) + date = headerxml.findtext("StepDetails/StartDate_firstEEGTimestamp", 0) + info["meas_date"] = datetime.fromtimestamp(int(date) / 1000, timezone.utc) - n_samples = int(_getsubnodetext(eegset, 'NumberOfRecordsOfEEG')) + n_samples = int(_getsubnodetext(eegset, "NumberOfRecordsOfEEG")) n_full, n_last = divmod(n_samples, 5) dt_last = deepcopy(dt) assert dt_last[-1][-1] == (5,) @@ -132,29 +136,34 @@ class RawNedf(BaseRaw): def __init__(self, filename, preload=False, verbose=None): filename = str(_check_fname(filename, "read", True, "filename")) - with open(filename, mode='rb') as fid: + with open(filename, mode="rb") as fid: header = fid.read(_HDRLEN) header, dt, dt_last, n_samp, n_full = _parse_nedf_header(header) - ch_names = header['ch_names'] + ['STI 014'] - ch_types = ['eeg'] * len(ch_names) - ch_types[-1] = 'stim' - info = create_info(ch_names, header['sfreq'], ch_types) + ch_names = header["ch_names"] + ["STI 014"] + ch_types = ["eeg"] * len(ch_names) + ch_types[-1] = "stim" + info = create_info(ch_names, header["sfreq"], ch_types) # scaling factor ADC-values -> volts # taken from the NEDF EEGLAB plugin # (https://www.neuroelectrics.com/resources/software/): - for ch in info['chs'][:-1]: - ch['cal'] = 2.4 / (6.0 * 8388607) + for ch in info["chs"][:-1]: + ch["cal"] = 2.4 / (6.0 * 8388607) with info._unlock(): - info['meas_date'] = header['meas_date'] + info["meas_date"] = header["meas_date"] raw_extra = dict(dt=dt, dt_last=dt_last, n_full=n_full) super().__init__( - info, preload=preload, filenames=[filename], verbose=verbose, - raw_extras=[raw_extra], last_samps=[n_samp - 1]) + info, + preload=preload, + filenames=[filename], + verbose=verbose, + raw_extras=[raw_extra], + last_samps=[n_samp - 1], + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): - dt = self._raw_extras[fi]['dt'] - dt_last = self._raw_extras[fi]['dt_last'] - n_full = self._raw_extras[fi]['n_full'] + dt = self._raw_extras[fi]["dt"] + dt_last = self._raw_extras[fi]["dt_last"] + n_full = self._raw_extras[fi]["n_full"] n_eeg = dt[1].subdtype[0][0].shape[0] # data is stored in 5-sample chunks (except maybe the last one!) # so we have to do some gymnastics to pick the correct parts to @@ -166,28 +175,28 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): last = None n_chunks = (n_samples_full - 1) // 5 + 1 n_tot = n_chunks * 5 - with open(self._filenames[fi], 'rb') as fid: + with open(self.filenames[fi], "rb") as fid: fid.seek(offset, 0) chunks = np.fromfile(fid, dtype=dt, count=n_chunks) assert len(chunks) == n_chunks if n_samples != n_samples_full: last = np.fromfile(fid, dtype=dt_last, count=1) eeg = _convert_eeg(chunks, n_eeg, n_tot) - trig = chunks['data']['trig'].reshape(1, n_tot) + trig = chunks["data"]["trig"].reshape(1, n_tot) if last is not None: - n_last = dt_last['data'].shape[0] - eeg = np.concatenate( - (eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1) + n_last = dt_last["data"].shape[0] + eeg = np.concatenate((eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1) trig = np.concatenate( - (trig, last['data']['trig'].reshape(1, n_last)), axis=-1) + (trig, last["data"]["trig"].reshape(1, n_last)), axis=-1 + ) one_ = np.concatenate((eeg, trig)) - one = one_[:, start_sl:n_samples + start_sl] + one = one_[:, start_sl : n_samples + start_sl] _mult_cal_one(data, one, idx, cals, mult) def _convert_eeg(chunks, n_eeg, n_tot): # convert uint8-triplet -> int32 - eeg = chunks['data']['eeg'] @ np.array([1 << 16, 1 << 8, 1]) + eeg = chunks["data"]["eeg"] @ np.array([1 << 16, 1 << 8, 1]) # convert sign if necessary eeg[eeg > (1 << 23)] -= 1 << 24 eeg = eeg.reshape((n_tot, n_eeg)).T @@ -195,7 +204,7 @@ def _convert_eeg(chunks, n_eeg, n_tot): @verbose -def read_raw_nedf(filename, preload=False, verbose=None): +def read_raw_nedf(filename, preload=False, verbose=None) -> RawNedf: """Read NeuroElectrics .nedf files. NEDF file versions starting from 1.3 are supported. diff --git a/mne/io/nedf/tests/__init__.py b/mne/io/nedf/tests/__init__.py index 8b137891791..04c673b4e4d 100644 --- a/mne/io/nedf/tests/__init__.py +++ b/mne/io/nedf/tests/__init__.py @@ -1 +1,3 @@ - +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/nedf/tests/test_nedf.py b/mne/io/nedf/tests/test_nedf.py index 9f00cda7a04..1382417cd10 100644 --- a/mne/io/nedf/tests/test_nedf.py +++ b/mne/io/nedf/tests/test_nedf.py @@ -1,16 +1,16 @@ -# -*- coding: utf-8 -*- """Test reading of NEDF format.""" -# Author: Tristan Stenner -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import pytest from numpy.testing import assert_allclose, assert_array_equal from mne import find_events -from mne.io.constants import FIFF -from mne.io.nedf import read_raw_nedf, _parse_nedf_header +from mne._fiff.constants import FIFF from mne.datasets import testing +from mne.io.nedf import _parse_nedf_header, read_raw_nedf from mne.io.tests.test_raw import _test_raw_reader eeg_path = testing.data_path(download=False, verbose=True) @@ -29,29 +29,30 @@ \x00""" +pytest.importorskip("defusedxml") + -@pytest.mark.parametrize('nacc', (0, 3)) +@pytest.mark.parametrize("nacc", (0, 3)) def test_nedf_header_parser(nacc): """Test NEDF header parsing and dtype extraction.""" - with pytest.warns(RuntimeWarning, match='stim channels.*ignored'): - info, dt, dt_last, n_samples, n_full = _parse_nedf_header( - stimhdr % nacc) + with pytest.warns(RuntimeWarning, match="stim channels.*ignored"): + info, dt, dt_last, n_samples, n_full = _parse_nedf_header(stimhdr % nacc) assert n_samples == 11 assert n_full == 2 nchan = 4 - assert info['nchan'] == nchan + assert info["nchan"] == nchan assert dt.itemsize == 200 + nacc * 2 if nacc: - assert dt.names[0] == 'acc' - assert dt['acc'].shape == (nacc,) + assert dt.names[0] == "acc" + assert dt["acc"].shape == (nacc,) - assert dt['data'].shape == (5,) # blocks of 5 EEG samples each - assert dt_last['data'].shape == (1,) # plus one last extra one + assert dt["data"].shape == (5,) # blocks of 5 EEG samples each + assert dt_last["data"].shape == (1,) # plus one last extra one - eegsampledt = dt['data'].subdtype[0] - assert eegsampledt.names == ('eeg', 'stim', 'trig') - assert eegsampledt['eeg'].shape == (nchan, 3) - assert eegsampledt['stim'].shape == (2, nchan, 3) + eegsampledt = dt["data"].subdtype[0] + assert eegsampledt.names == ("eeg", "stim", "trig") + assert eegsampledt["eeg"].shape == (nchan, 3) + assert eegsampledt["stim"].shape == (2, nchan, 3) def test_invalid_headers(): @@ -63,38 +64,35 @@ def test_invalid_headers(): ABCD \x00""" - nchan = b'4' - sr = b'500' + nchan = b"4" + sr = b"500" hdr = { - 'null': - b'No null terminator', - 'Unknown additional': - (b'1.3' + - b'???\x00'), # noqa: E501 - 'No EEG channels found': - b'1.3\x00', - 'TotalNumberOfChannels not found': - tpl % b'No nchan.', - '!= channel count': - tpl % (sr + b'52'), - 'EEGSamplingRate not found': - tpl % nchan, - 'NumberOfRecordsOfEEG not found': - tpl % (sr + nchan), + "null": b"No null terminator", + "Unknown additional": ( + b"1.3" + + b"???\x00" + ), # noqa: E501 + "No EEG channels found": b"1.3\x00", + "TotalNumberOfChannels not found": tpl % b"No nchan.", + "!= channel count": tpl + % (sr + b"52"), + "EEGSamplingRate not found": tpl % nchan, + "NumberOfRecordsOfEEG not found": tpl % (sr + nchan), } for match, invalid_hdr in hdr.items(): with pytest.raises(RuntimeError, match=match): _parse_nedf_header(invalid_hdr) sus_hdrs = { - 'unsupported': b'25\x00', - 'tested': ( - b'1.3' + - b'STARSTIM\x00'), + "unsupported": b"25\x00", + "tested": ( + b"1.3" + + b"STARSTIM\x00" + ), } for match, sus_hdr in sus_hdrs.items(): with pytest.warns(RuntimeWarning, match=match): - with pytest.raises(RuntimeError, match='No EEG channels found'): + with pytest.raises(RuntimeError, match="No EEG channels found"): _parse_nedf_header(sus_hdr) @@ -108,22 +106,22 @@ def test_nedf_data(): events = find_events(raw, shortest_event=1) assert len(events) == 4 assert_array_equal(events[:, 2], [1, 1, 1, 1]) - onsets = events[:, 0] / raw.info['sfreq'] - assert raw.info['sfreq'] == 500 + onsets = events[:, 0] / raw.info["sfreq"] + assert raw.info["sfreq"] == 500 - data_end = raw.get_data('Fp1', nsamples - 100, nsamples).mean() - assert_allclose(data_end, .0176, atol=.01) - assert_allclose(raw.get_data('Fpz', 0, 100).mean(), .0185, atol=.01) + data_end = raw.get_data("Fp1", nsamples - 100, nsamples).mean() + assert_allclose(data_end, 0.0176, atol=0.01) + assert_allclose(raw.get_data("Fpz", 0, 100).mean(), 0.0185, atol=0.01) assert_allclose(onsets, [22.384, 38.238, 49.496, 63.15]) - assert raw.info['meas_date'].year == 2019 - assert raw.ch_names[2] == 'AF7' - - for ch in raw.info['chs'][:-1]: - assert ch['kind'] == FIFF.FIFFV_EEG_CH - assert ch['unit'] == FIFF.FIFF_UNIT_V - assert raw.info['chs'][-1]['kind'] == FIFF.FIFFV_STIM_CH - assert raw.info['chs'][-1]['unit'] == FIFF.FIFF_UNIT_V + assert raw.info["meas_date"].year == 2019 + assert raw.ch_names[2] == "AF7" + + for ch in raw.info["chs"][:-1]: + assert ch["kind"] == FIFF.FIFFV_EEG_CH + assert ch["unit"] == FIFF.FIFF_UNIT_V + assert raw.info["chs"][-1]["kind"] == FIFF.FIFFV_STIM_CH + assert raw.info["chs"][-1]["unit"] == FIFF.FIFF_UNIT_V # full tests _test_raw_reader(read_raw_nedf, filename=eegfile) diff --git a/mne/io/neuralynx/__init__.py b/mne/io/neuralynx/__init__.py new file mode 100644 index 00000000000..f302a22a893 --- /dev/null +++ b/mne/io/neuralynx/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .neuralynx import read_raw_neuralynx diff --git a/mne/io/neuralynx/neuralynx.py b/mne/io/neuralynx/neuralynx.py new file mode 100644 index 00000000000..56ff9fa4adb --- /dev/null +++ b/mne/io/neuralynx/neuralynx.py @@ -0,0 +1,426 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime +import glob +import inspect +import os + +import numpy as np + +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one +from ...annotations import Annotations +from ...utils import _check_fname, _soft_import, fill_doc, logger, verbose +from ..base import BaseRaw + + +@fill_doc +def read_raw_neuralynx( + fname, *, preload=False, exclude_fname_patterns=None, verbose=None +) -> "RawNeuralynx": + """Reader for Neuralynx files. + + Parameters + ---------- + fname : path-like + Path to a folder with Neuralynx .ncs files. + %(preload)s + exclude_fname_patterns : list of str + List of glob-like string patterns to exclude from channel list. + Useful when not all channels have the same number of samples + so you can read separate instances. + %(verbose)s + + Returns + ------- + raw : instance of RawNeuralynx + A Raw object containing Neuralynx data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawNeuralynx. + + Notes + ----- + Neuralynx files are read from disk using the `Neo package + `__. + Currently, only reading of the ``.ncs files`` is supported. + + ``raw.info["meas_date"]`` is read from the ``recording_opened`` property + of the first ``.ncs`` file (i.e. channel) in the dataset (a warning is issued + if files have different dates of acquisition). + + Channel-specific high and lowpass frequencies of online filters are determined + based on the ``DspLowCutFrequency`` and ``DspHighCutFrequency`` header fields, + respectively. If no filters were used for a channel, the default lowpass is set + to the Nyquist frequency and the default highpass is set to 0. + If channels have different high/low cutoffs, ``raw.info["highpass"]`` and + ``raw.info["lowpass"]`` are then set to the maximum highpass and minimumlowpass + values across channels, respectively. + + Other header variables can be inspected using Neo directly. For example:: + + from neo.io import NeuralynxIO # doctest: +SKIP + fname = 'path/to/your/data' # doctest: +SKIP + nlx_reader = NeuralynxIO(dirname=fname) # doctest: +SKIP + print(nlx_reader.header) # doctest: +SKIP + print(nlx_reader.file_headers.items()) # doctest: +SKIP + """ + return RawNeuralynx( + fname, + preload=preload, + exclude_fname_patterns=exclude_fname_patterns, + verbose=verbose, + ) + + +# Helper for neo change of exclude_filename -> exclude_filenames in 0.13.2 +def _exclude_kwarg(exclude_fnames): + from neo.io import NeuralynxIO + + key = "exclude_filename" + if "exclude_filenames" in inspect.getfullargspec(NeuralynxIO).args: + key += "s" + return {key: exclude_fnames} + + +@fill_doc +class RawNeuralynx(BaseRaw): + """RawNeuralynx class.""" + + @verbose + def __init__( + self, + fname, + *, + preload=False, + exclude_fname_patterns=None, + verbose=None, + ): + fname = _check_fname(fname, "read", True, "fname", need_dir=True) + + _soft_import("neo", "Reading NeuralynxIO files", strict=True) + from neo.io import NeuralynxIO + + logger.info(f"Checking files in {fname}") + + # construct a list of filenames to ignore + exclude_fnames = None + if exclude_fname_patterns: + exclude_fnames = [] + for pattern in exclude_fname_patterns: + fnames = glob.glob(os.path.join(fname, pattern)) + fnames = [os.path.basename(fname) for fname in fnames] + exclude_fnames.extend(fnames) + + logger.info("Ignoring .ncs files:\n" + "\n".join(exclude_fnames)) + + # get basic file info from header, throw Error if NeuralynxIO can't parse + try: + nlx_reader = NeuralynxIO(dirname=fname, **_exclude_kwarg(exclude_fnames)) + except ValueError as e: + # give a more informative error message and what the user can do about it + if "Incompatible section structures across streams" in str(e): + raise ValueError( + "It seems .ncs channels have different numbers of samples. " + + "This is likely due to different sampling rates. " + + "Try reading in only channels with uniform sampling rate " + + "by excluding other channels with `exclude_fname_patterns` " + + "input argument." + + f"\nOriginal neo.NeuralynxRawIO ValueError:\n{e}" + ) from None + else: + raise + + info = create_info( + ch_types="seeg", + ch_names=nlx_reader.header["signal_channels"]["name"].tolist(), + sfreq=nlx_reader.get_signal_sampling_rate(), + ) + + ncs_fnames = nlx_reader.ncs_filenames.values() + ncs_hdrs = [ + hdr + for hdr_key, hdr in nlx_reader.file_headers.items() + if hdr_key in ncs_fnames + ] + + # if all files have the same recording_opened date, write it to info + meas_dates = np.array([hdr["recording_opened"] for hdr in ncs_hdrs]) + # to be sure, only write if all dates are the same + meas_diff = [] + for md in meas_dates: + meas_diff.append((md - meas_dates[0]).total_seconds()) + + # tolerate a +/-1 second meas_date difference (arbitrary threshold) + # else issue a warning + warn_meas = (np.abs(meas_diff) > 1.0).any() + if warn_meas: + logger.warning( + "Not all .ncs files have the same recording_opened date. " + + "Writing meas_date based on the first .ncs file." + ) + + # Neuarlynx allows channel specific low/highpass filters + # if not enabled, assume default lowpass = nyquist, highpass = 0 + default_lowpass = info["sfreq"] / 2 # nyquist + default_highpass = 0 + + has_hp = [hdr["DSPLowCutFilterEnabled"] for hdr in ncs_hdrs] + has_lp = [hdr["DSPHighCutFilterEnabled"] for hdr in ncs_hdrs] + if not all(has_hp) or not all(has_lp): + logger.warning( + "Not all .ncs files have the same high/lowpass filter settings. " + + "Assuming default highpass = 0, lowpass = nyquist." + ) + + highpass_freqs = [ + float(hdr["DspLowCutFrequency"]) + if hdr["DSPLowCutFilterEnabled"] + else default_highpass + for hdr in ncs_hdrs + ] + + lowpass_freqs = [ + float(hdr["DspHighCutFrequency"]) + if hdr["DSPHighCutFilterEnabled"] + else default_lowpass + for hdr in ncs_hdrs + ] + + with info._unlock(): + info["meas_date"] = meas_dates[0].astimezone(datetime.timezone.utc) + info["highpass"] = np.max(highpass_freqs) + info["lowpass"] = np.min(lowpass_freqs) + + # Neo reads only valid contiguous .ncs samples grouped as segments + n_segments = nlx_reader.header["nb_segment"][0] + block_id = 0 # assumes there's only one block of recording + + # get segment start/stop times + start_times = np.array( + [nlx_reader.segment_t_start(block_id, i) for i in range(n_segments)] + ) + stop_times = np.array( + [nlx_reader.segment_t_stop(block_id, i) for i in range(n_segments)] + ) + + # find discontinuous boundaries (of length n-1) + next_start_times = start_times[1::] + previous_stop_times = stop_times[:-1] + seg_diffs = next_start_times - previous_stop_times + + # mark as discontinuous any two segments that have + # start/stop delta larger than sampling period (1.5/sampling_rate) + logger.info("Checking for temporal discontinuities in Neo data segments.") + delta = 1.5 / info["sfreq"] + gaps = seg_diffs > delta + + seg_gap_dict = {} + + logger.info( + f"N = {gaps.sum()} discontinuous Neo segments detected " + + f"with delta > {delta} sec. " + + "Annotating gaps as BAD_ACQ_SKIP." + if gaps.any() + else "No discontinuities detected." + ) + + gap_starts = stop_times[:-1][gaps] # gap starts at segment offset + gap_stops = start_times[1::][gaps] # gap stops at segment onset + + # (n_gaps,) array of ints giving number of samples per inferred gap + gap_n_samps = np.array( + [ + int(round(stop * info["sfreq"])) - int(round(start * info["sfreq"])) + for start, stop in zip(gap_starts, gap_stops) + ] + ).astype(int) # force an int array (if no gaps, empty array is a float) + + # get sort indices for all segments (valid and gap) in ascending order + all_starts_ids = np.argsort(np.concatenate([start_times, gap_starts])) + + # variable indicating whether each segment is a gap or not + gap_indicator = np.concatenate( + [ + np.full(len(start_times), fill_value=0), + np.full(len(gap_starts), fill_value=1), + ] + ) + gap_indicator = gap_indicator[all_starts_ids].astype(bool) + + # store this in a dict to be passed to _raw_extras + seg_gap_dict = { + "gap_n_samps": gap_n_samps, + "isgap": gap_indicator, # False (data segment) or True (gap segment) + } + + valid_segment_sizes = [ + nlx_reader.get_signal_size(block_id, i) for i in range(n_segments) + ] + + sizes_sorted = np.concatenate([valid_segment_sizes, gap_n_samps])[ + all_starts_ids + ] + + # now construct an (n_samples,) indicator variable + sample2segment = np.concatenate( + [np.full(shape=(n,), fill_value=i) for i, n in enumerate(sizes_sorted)] + ) + + # get the start sample index for each gap segment () + gap_start_ids = np.cumsum(np.hstack([[0], sizes_sorted[:-1]]))[gap_indicator] + + # recreate time axis for gap annotations + mne_times = np.arange(0, len(sample2segment)) / info["sfreq"] + + assert len(gap_start_ids) == len(gap_n_samps) + annotations = Annotations( + onset=[mne_times[onset_id] for onset_id in gap_start_ids], + duration=[ + mne_times[onset_id + (n - 1)] - mne_times[onset_id] + for onset_id, n in zip(gap_start_ids, gap_n_samps) + ], + description=["BAD_ACQ_SKIP"] * len(gap_start_ids), + ) + + super().__init__( + info=info, + last_samps=[sizes_sorted.sum() - 1], + filenames=[fname], + preload=preload, + raw_extras=[ + dict( + smp2seg=sample2segment, + exclude_fnames=exclude_fnames, + segment_sizes=sizes_sorted, + seg_gap_dict=seg_gap_dict, + ) + ], + ) + + self.set_annotations(annotations) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + from neo import AnalogSignal, Segment + from neo.io import NeuralynxIO + from neo.io.proxyobjects import AnalogSignalProxy + + # quantities is a dependency of neo so we are guaranteed it exists + from quantities import Hz + + nlx_reader = NeuralynxIO( + dirname=self.filenames[fi], + **_exclude_kwarg(self._raw_extras[0]["exclude_fnames"]), + ) + neo_block = nlx_reader.read(lazy=True) + + # check that every segment has 1 associated neo.AnalogSignal() object + # (not sure what multiple analogsignals per neo.Segment would mean) + assert sum( + [len(segment.analogsignals) for segment in neo_block[0].segments] + ) == len(neo_block[0].segments) + + segment_sizes = self._raw_extras[fi]["segment_sizes"] + + # construct a (n_segments, 2) array of the first and last + # sample index for each segment relative to the start of the recording + seg_starts = [0] # first chunk starts at sample 0 + seg_stops = [segment_sizes[0] - 1] + for i in range(1, len(segment_sizes)): + ons_new = ( + seg_stops[i - 1] + 1 + ) # current chunk starts one sample after the previous one + seg_starts.append(ons_new) + off_new = ( + seg_stops[i - 1] + segment_sizes[i] + ) # the last sample is len(chunk) samples after the previous ended + seg_stops.append(off_new) + + start_stop_samples = np.stack([np.array(seg_starts), np.array(seg_stops)]).T + + first_seg = self._raw_extras[0]["smp2seg"][ + start + ] # segment containing start sample + last_seg = self._raw_extras[0]["smp2seg"][ + stop - 1 + ] # segment containing stop sample + + # select all segments between the one that contains the start sample + # and the one that contains the stop sample + sel_samples_global = start_stop_samples[first_seg : last_seg + 1, :] + + # express end samples relative to segment onsets + # to be used for slicing the arrays below + sel_samples_local = sel_samples_global.copy() + sel_samples_local[0:-1, 1] = ( + sel_samples_global[0:-1, 1] - sel_samples_global[0:-1, 0] + ) + sel_samples_local[1::, 0] = ( + 0 # now set the start sample for all segments after the first to 0 + ) + + sel_samples_local[0, 0] = ( + start - sel_samples_global[0, 0] + ) # express start sample relative to segment onset + sel_samples_local[-1, -1] = (stop - 1) - sel_samples_global[ + -1, 0 + ] # express stop sample relative to segment onset + + # array containing Segments + segments_arr = np.array(neo_block[0].segments, dtype=object) + + # if gaps were detected, correctly insert gap Segments in between valid Segments + gap_samples = self._raw_extras[fi]["seg_gap_dict"]["gap_n_samps"] + gap_segments = [Segment(f"gap-{i}") for i in range(len(gap_samples))] + + # create AnalogSignal objects representing gap data filled with 0's + sfreq = nlx_reader.get_signal_sampling_rate() + n_chans = ( + np.arange(idx.start, idx.stop, idx.step).size + if type(idx) is slice + else len(idx) # idx can be a slice or an np.array so check both + ) + + for seg, n in zip(gap_segments, gap_samples): + asig = AnalogSignal( + signal=np.zeros((n, n_chans)), units="uV", sampling_rate=sfreq * Hz + ) + seg.analogsignals.append(asig) + + n_total_segments = len(neo_block[0].segments + gap_segments) + segments_arr = np.zeros((n_total_segments,), dtype=object) + + # insert inferred gap segments at the right place in between valid segments + isgap = self._raw_extras[0]["seg_gap_dict"]["isgap"] + segments_arr[~isgap] = neo_block[0].segments + segments_arr[isgap] = gap_segments + + # now load data for selected segments/channels via + # neo.Segment.AnalogSignalProxy.load() or + # pad directly as AnalogSignal.magnitude for any gap data + all_data = np.concatenate( + [ + signal.load(channel_indexes=idx).magnitude[ + samples[0] : samples[-1] + 1, : + ] + if isinstance(signal, AnalogSignalProxy) + else signal.magnitude[samples[0] : samples[-1] + 1, :] + for seg, samples in zip( + segments_arr[first_seg : last_seg + 1], sel_samples_local + ) + for signal in seg.analogsignals + ] + ).T + + all_data *= 1e-6 # Convert uV to V + n_channels = len(nlx_reader.header["signal_channels"]["name"]) + block = np.zeros((n_channels, stop - start), dtype=data.dtype) + block[idx] = all_data # shape = (n_channels, n_samples) + + # Then store the result where it needs to go + _mult_cal_one(data, block, idx, cals, mult) diff --git a/mne/io/neuralynx/tests/__init__.py b/mne/io/neuralynx/tests/__init__.py new file mode 100644 index 00000000000..04c673b4e4d --- /dev/null +++ b/mne/io/neuralynx/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/neuralynx/tests/test_neuralynx.py b/mne/io/neuralynx/tests/test_neuralynx.py new file mode 100644 index 00000000000..18578ef4ab7 --- /dev/null +++ b/mne/io/neuralynx/tests/test_neuralynx.py @@ -0,0 +1,249 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +from ast import literal_eval +from datetime import datetime, timezone + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy.io import loadmat + +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_neuralynx +from mne.io.neuralynx.neuralynx import _exclude_kwarg +from mne.io.tests.test_raw import _test_raw_reader + +testing_path = data_path(download=False) / "neuralynx" + +pytest.importorskip("neo") + + +def _nlxheader_to_dict(matdict: dict) -> dict: + """Convert the read-in "Header" field into a dict. + + All the key-value pairs of Header entries are formatted as strings + (e.g. np.array("-AdbitVolts 0.000323513")) so we reformat that + into dict by splitting at blank spaces. + """ + entries = matdict["Header"][ + 1::, : + ] # skip the first row which is just the "Header" string + + return { + arr.item().item().split(" ")[0].strip("-"): arr.item().item().split(" ")[-1] + for arr in entries + if arr[0].size > 0 + } + + +def _read_nlx_mat_chan(matfile: str) -> np.ndarray: + """Read a single channel from a Neuralynx .mat file.""" + mat = loadmat(matfile) + + hdr_dict = _nlxheader_to_dict(mat) + + # Nlx2MatCSC.m reads the data in N equal-sized (512-item) chunks + # this array (1, n_chunks) stores the number of valid samples + # per chunk (the last chunk is usually shorter) + n_valid_samples = mat["NumberOfValidSamples"].ravel() + + # concatenate chunks, respecting the number of valid samples + x = np.concatenate( + [mat["Samples"][0:n, i] for i, n in enumerate(n_valid_samples)] + ) # in ADBits + + # this value is the same for all channels and + # converts data from ADBits to Volts + conversionf = literal_eval(hdr_dict["ADBitVolts"]) + x = x * conversionf + + # if header says input was inverted at acquisition + # (possibly for spike detection or so?), flip it back + # NeuralynxIO does this under the hood in NeuralynxIO.parse_header() + # see this discussion: https://github.com/NeuralEnsemble/python-neo/issues/819 + if hdr_dict["InputInverted"] == "True": + x *= -1 + + return x + + +def _read_nlx_mat_chan_keep_gaps(matfile: str) -> np.ndarray: + """Read a single channel from a Neuralynx .mat file and keep invalid samples.""" + mat = loadmat(matfile) + + hdr_dict = _nlxheader_to_dict(mat) + + # Nlx2MatCSC.m reads the data in N equal-sized (512-item) chunks + # this array (1, n_chunks) stores the number of valid samples + # per chunk (the last chunk is usually shorter) + n_valid_samples = mat["NumberOfValidSamples"].ravel() + + # read in the artificial zeros so that + # we can compare with the mne padded arrays + ncs_records_with_gaps = [9, 15, 20] + for i in ncs_records_with_gaps: + n_valid_samples[i] = 512 + + # concatenate chunks, respecting the number of valid samples + x = np.concatenate( + [mat["Samples"][0:n, i] for i, n in enumerate(n_valid_samples)] + ) # in ADBits + + # this value is the same for all channels and + # converts data from ADBits to Volts + conversionf = literal_eval(hdr_dict["ADBitVolts"]) + x = x * conversionf + + # if header says input was inverted at acquisition + # (possibly for spike detection or so?), flip it back + # NeuralynxIO does this under the hood in NeuralynxIO.parse_header() + # see this discussion: https://github.com/NeuralEnsemble/python-neo/issues/819 + if hdr_dict["InputInverted"] == "True": + x *= -1 + + return x + + +# set known values for the Neuralynx data for testing +expected_chan_names = ["LAHC1", "LAHC2", "LAHC3", "xAIR1", "xEKG1"] +expected_hp_freq = 0.1 +expected_lp_freq = 500.0 +expected_sfreq = 2000.0 +expected_meas_date = datetime.strptime("2023/11/02 13:39:27", "%Y/%m/%d %H:%M:%S") + + +@requires_testing_data +def test_neuralynx(): + """Test basic reading.""" + from neo.io import NeuralynxIO + + excluded_ncs_files = [ + "LAHCu1.ncs", + "LAHC1_3_gaps.ncs", + "LAHC2_3_gaps.ncs", + ] + + # ==== MNE-Python ==== # + fname_patterns = ["*u*.ncs", "*3_gaps.ncs"] + raw = read_raw_neuralynx( + fname=testing_path, + preload=True, + exclude_fname_patterns=fname_patterns, + ) + + # test that we picked the right info from headers + assert raw.info["highpass"] == expected_hp_freq, "highpass freq not set correctly" + assert raw.info["lowpass"] == expected_lp_freq, "lowpass freq not set correctly" + assert raw.info["sfreq"] == expected_sfreq, "sampling freq not set correctly" + + meas_date_utc = expected_meas_date.astimezone(timezone.utc) + assert raw.info["meas_date"] == meas_date_utc, "meas_date not set correctly" + + # test that channel selection worked + assert raw.ch_names == expected_chan_names, ( + "labels in raw.ch_names don't match expected channel names" + ) + + mne_y = raw.get_data() # in V + + # ==== NeuralynxIO ==== # + nlx_reader = NeuralynxIO(dirname=testing_path, **_exclude_kwarg(excluded_ncs_files)) + bl = nlx_reader.read( + lazy=False + ) # read a single block which contains the data split in segments + + # concatenate all signals and times from all segments (== total recording) + nlx_y = np.concatenate( + [sig.magnitude for seg in bl[0].segments for sig in seg.analogsignals] + ).T + nlx_y *= 1e-6 # convert from uV to V + + nlx_t = np.concatenate( + [sig.times.magnitude for seg in bl[0].segments for sig in seg.analogsignals] + ).T + nlx_t = np.round(nlx_t, 3) # round to millisecond precision + + nlx_ch_names = [ch[0] for ch in nlx_reader.header["signal_channels"]] + + # ===== Nlx2MatCSC.m ===== # + matchans = ["LAHC1.mat", "LAHC2.mat", "LAHC3.mat", "xAIR1.mat", "xEKG1.mat"] + + # (n_chan, n_samples) array, in V + mat_y = np.stack( + [_read_nlx_mat_chan(os.path.join(testing_path, ch)) for ch in matchans] + ) + + # ===== Check sample values across MNE-Python, NeuralynxIO and MATLAB ===== # + assert nlx_ch_names == raw.ch_names # check channel names + + assert_allclose( + mne_y, nlx_y, rtol=1e-6, err_msg="MNE and NeuralynxIO not all close" + ) # data + assert_allclose( + mne_y, mat_y, rtol=1e-6, err_msg="MNE and Nlx2MatCSC.m not all close" + ) # data + + _test_raw_reader( + read_raw_neuralynx, + fname=testing_path, + exclude_fname_patterns=fname_patterns, + ) + + +@requires_testing_data +def test_neuralynx_gaps(): + """Test gap detection.""" + # ignore files with no gaps + ignored_ncs_files = [ + "LAHC1.ncs", + "LAHC2.ncs", + "LAHC3.ncs", + "xAIR1.ncs", + "xEKG1.ncs", + "LAHCu1.ncs", + ] + raw = read_raw_neuralynx( + fname=testing_path, + preload=True, + exclude_fname_patterns=ignored_ncs_files, + ) + mne_y, _ = raw.get_data(return_times=True) # in V + + # there should be 2 channels with 3 gaps (of 130 samples in total) + n_expected_gaps = 3 + n_expected_missing_samples = 130 + assert len(raw.annotations) == n_expected_gaps, "Wrong number of gaps detected" + assert (mne_y[0, :] == 0).sum() == n_expected_missing_samples, ( + "Number of true and inferred missing samples differ" + ) + + # read in .mat files containing original gaps + matchans = ["LAHC1_3_gaps.mat", "LAHC2_3_gaps.mat"] + + # (n_chan, n_samples) array, in V + mat_y = np.stack( + [ + _read_nlx_mat_chan_keep_gaps(os.path.join(testing_path, ch)) + for ch in matchans + ] + ) + + # compare originally modified .ncs arrays with MNE-padded arrays + # and test that we back-inserted 0's at the right places + assert_allclose( + mne_y, mat_y, rtol=1e-6, err_msg="MNE and Nlx2MatCSC.m not all close" + ) + + # test that channel selection works + raw = read_raw_neuralynx( + fname=testing_path, + preload=False, + exclude_fname_patterns=ignored_ncs_files, + ) + + raw.pick("LAHC2") + assert raw.ch_names == ["LAHC2"] + raw.load_data() # before gh-12357 this would fail diff --git a/mne/io/nicolet/__init__.py b/mne/io/nicolet/__init__.py index c3253d3cbdf..6e7ad05b63d 100644 --- a/mne/io/nicolet/__init__.py +++ b/mne/io/nicolet/__init__.py @@ -1,7 +1,7 @@ """Nicolet module for conversion to FIF.""" -# Author: Jaakko Leppakangas -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .nicolet import read_raw_nicolet diff --git a/mne/io/nicolet/nicolet.py b/mne/io/nicolet/nicolet.py index 6681dcff523..05b8035bddd 100644 --- a/mne/io/nicolet/nicolet.py +++ b/mne/io/nicolet/nicolet.py @@ -1,25 +1,27 @@ -# Author: Jaakko Leppakangas -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np -from os import path -import datetime import calendar +import datetime +from os import path -from ...utils import logger, fill_doc -from ..utils import _read_segments_file, _find_channels, _create_chs +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _create_chs, _find_channels, _read_segments_file +from ...utils import fill_doc, logger from ..base import BaseRaw -from ..meas_info import _empty_info -from ..constants import FIFF @fill_doc -def read_raw_nicolet(input_fname, ch_type, eog=(), - ecg=(), emg=(), misc=(), preload=False, verbose=None): +def read_raw_nicolet( + input_fname, ch_type, eog=(), ecg=(), emg=(), misc=(), preload=False, verbose=None +) -> "RawNicolet": """Read Nicolet data as raw object. - ..note:: This reader takes data files with the extension ``.data`` as an + .. note:: This reader takes data files with the extension ``.data`` as an input. The header file with the same file name stem and an extension ``.head`` is expected to be found in the same directory. @@ -58,68 +60,80 @@ def read_raw_nicolet(input_fname, ch_type, eog=(), -------- mne.io.Raw : Documentation of attributes and methods. """ - return RawNicolet(input_fname, ch_type, eog=eog, ecg=ecg, - emg=emg, misc=misc, preload=preload, verbose=verbose) + return RawNicolet( + input_fname, + ch_type, + eog=eog, + ecg=ecg, + emg=emg, + misc=misc, + preload=preload, + verbose=verbose, + ) def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc): """Extract info from Nicolet header files.""" fname, extension = path.splitext(fname) - if extension != '.data': - raise ValueError( - f'File name should end with .data not "{extension}".' - ) + if extension != ".data": + raise ValueError(f'File name should end with .data not "{extension}".') - header = fname + '.head' + header = fname + ".head" - logger.info('Reading header...') + logger.info("Reading header...") header_info = dict() - with open(header, 'r') as fid: + with open(header) as fid: for line in fid: - var, value = line.split('=') - if var == 'elec_names': - value = value[1:-2].split(',') # strip brackets - elif var == 'conversion_factor': + var, value = line.split("=") + if var == "elec_names": + value = value[1:-2].split(",") # strip brackets + elif var == "conversion_factor": value = float(value) - elif var in ['num_channels', 'rec_id', 'adm_id', 'pat_id', - 'num_samples']: + elif var in ["num_channels", "rec_id", "adm_id", "pat_id", "num_samples"]: value = int(value) - elif var != 'start_ts': + elif var != "start_ts": value = float(value) header_info[var] = value - ch_names = header_info['elec_names'] - if eog == 'auto': - eog = _find_channels(ch_names, 'EOG') - if ecg == 'auto': - ecg = _find_channels(ch_names, 'ECG') - if emg == 'auto': - emg = _find_channels(ch_names, 'EMG') - - date, time = header_info['start_ts'].split() - date = date.split('-') - time = time.split(':') - sec, msec = time[2].split('.') - date = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), - int(time[0]), int(time[1]), int(sec), int(msec)) - info = _empty_info(header_info['sample_freq']) - info['meas_date'] = (calendar.timegm(date.utctimetuple()), 0) - - if ch_type == 'eeg': + ch_names = header_info["elec_names"] + if eog == "auto": + eog = _find_channels(ch_names, "EOG") + if ecg == "auto": + ecg = _find_channels(ch_names, "ECG") + if emg == "auto": + emg = _find_channels(ch_names, "EMG") + + date, time = header_info["start_ts"].split() + date = date.split("-") + time = time.split(":") + sec, msec = time[2].split(".") + date = datetime.datetime( + int(date[0]), + int(date[1]), + int(date[2]), + int(time[0]), + int(time[1]), + int(sec), + int(msec), + ) + info = _empty_info(header_info["sample_freq"]) + info["meas_date"] = (calendar.timegm(date.utctimetuple()), 0) + + if ch_type == "eeg": ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH - elif ch_type == 'seeg': + elif ch_type == "seeg": ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_SEEG_CH else: - raise TypeError("Channel type not recognized. Available types are " - "'eeg' and 'seeg'.") - cals = np.repeat(header_info['conversion_factor'] * 1e-6, len(ch_names)) - info['chs'] = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, - misc) - info['highpass'] = 0. - info['lowpass'] = info['sfreq'] / 2.0 + raise TypeError( + "Channel type not recognized. Available types are 'eeg' and 'seeg'." + ) + cals = np.repeat(header_info["conversion_factor"] * 1e-6, len(ch_names)) + info["chs"] = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc) + info["highpass"] = 0.0 + info["lowpass"] = info["sfreq"] / 2.0 info._unlocked = False info._update_redundant() return info, header_info @@ -158,19 +172,30 @@ class RawNicolet(BaseRaw): mne.io.Raw : Documentation of attributes and methods. """ - def __init__(self, input_fname, ch_type, eog=(), - ecg=(), emg=(), misc=(), preload=False, - verbose=None): # noqa: D102 + def __init__( + self, + input_fname, + ch_type, + eog=(), + ecg=(), + emg=(), + misc=(), + preload=False, + verbose=None, + ): input_fname = path.abspath(input_fname) - info, header_info = _get_nicolet_info(input_fname, ch_type, eog, ecg, - emg, misc) - last_samps = [header_info['num_samples'] - 1] - super(RawNicolet, self).__init__( - info, preload, filenames=[input_fname], raw_extras=[header_info], - last_samps=last_samps, orig_format='int', - verbose=verbose) + info, header_info = _get_nicolet_info(input_fname, ch_type, eog, ecg, emg, misc) + last_samps = [header_info["num_samples"] - 1] + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[header_info], + last_samps=last_samps, + orig_format="int", + verbose=verbose, + ) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" - _read_segments_file( - self, data, idx, fi, start, stop, cals, mult, dtype=' -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path +import pytest + from mne.io import read_raw_nicolet from mne.io.tests.test_raw import _test_raw_reader -import pytest - base_dir = Path(__file__).parent / "data" fname_data = base_dir / "test_nicolet_raw.data" fname_head = base_dir / "test_nicolet_raw.head" @@ -17,9 +16,17 @@ def test_data(): """Test reading raw nicolet files.""" - _test_raw_reader(read_raw_nicolet, input_fname=fname_data, ch_type='eeg', - ecg='auto', eog='auto', emg='auto', misc=['PHO']) - - with pytest.raises(ValueError, - match='File name should end with .data not ".head".'): - read_raw_nicolet(fname_head, 'eeg') + _test_raw_reader( + read_raw_nicolet, + input_fname=fname_data, + ch_type="eeg", + ecg="auto", + eog="auto", + emg="auto", + misc=["PHO"], + ) + + with pytest.raises( + ValueError, match='File name should end with .data not ".head".' + ): + read_raw_nicolet(fname_head, "eeg") diff --git a/mne/io/nihon/__init__.py b/mne/io/nihon/__init__.py index 09a735eb6bc..ae63480559e 100644 --- a/mne/io/nihon/__init__.py +++ b/mne/io/nihon/__init__.py @@ -1,7 +1,7 @@ """Nihon Kohden module for conversion to FIF.""" -# Author: Fede Raimondo -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .nihon import read_raw_nihon diff --git a/mne/io/nihon/nihon.py b/mne/io/nihon/nihon.py index ca287427df4..0c474a4b250 100644 --- a/mne/io/nihon/nihon.py +++ b/mne/io/nihon/nihon.py @@ -1,6 +1,6 @@ -# Authors: Federico Raimondo -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from collections import OrderedDict from datetime import datetime, timezone @@ -8,11 +8,11 @@ import numpy as np -from ...utils import fill_doc, logger, verbose, warn, _check_fname -from ..base import BaseRaw -from ..meas_info import create_info +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one from ...annotations import Annotations -from ..utils import _mult_cal_one +from ...utils import _check_fname, fill_doc, logger, verbose, warn +from ..base import BaseRaw def _ensure_path(fname): @@ -23,7 +23,9 @@ def _ensure_path(fname): @fill_doc -def read_raw_nihon(fname, preload=False, verbose=None): +def read_raw_nihon( + fname, preload=False, *, encoding="utf-8", verbose=None +) -> "RawNihon": """Reader for an Nihon Kohden EEG file. Parameters @@ -32,6 +34,9 @@ def read_raw_nihon(fname, preload=False, verbose=None): Path to the Nihon Kohden data file (``.EEG``). preload : bool If True, all data are loaded at initialization. + %(encoding_nihon)s + + .. versionadded:: 1.11 %(verbose)s Returns @@ -44,95 +49,127 @@ def read_raw_nihon(fname, preload=False, verbose=None): -------- mne.io.Raw : Documentation of attributes and methods of RawNihon. """ - return RawNihon(fname, preload, verbose) + return RawNihon(fname, preload, encoding=encoding, verbose=verbose) _valid_headers = [ - 'EEG-1100A V01.00', - 'EEG-1100B V01.00', - 'EEG-1100C V01.00', - 'QI-403A V01.00', - 'QI-403A V02.00', - 'EEG-2100 V01.00', - 'EEG-2100 V02.00', - 'DAE-2100D V01.30', - 'DAE-2100D V02.00', - # 'EEG-1200A V01.00', # Not working for the moment. + "EEG-1100A V01.00", + "EEG-1100B V01.00", + "EEG-1100C V01.00", + "QI-403A V01.00", + "QI-403A V02.00", + "EEG-2100 V01.00", + "EEG-2100 V02.00", + "DAE-2100D V01.30", + "DAE-2100D V02.00", + "EEG-1200A V01.00", ] def _read_nihon_metadata(fname): metadata = {} fname = _ensure_path(fname) - pnt_fname = fname.with_suffix('.PNT') + pnt_fname = fname.with_suffix(".PNT") if not pnt_fname.exists(): - warn('No PNT file exists. Metadata will be blank') - return metadata - logger.info('Found PNT file, reading metadata.') - with open(pnt_fname, 'r') as fid: - version = np.fromfile(fid, '|S16', 1).astype('U16')[0] + pnt_fname = fname.with_suffix(".pnt") + if not pnt_fname.exists(): + warn("No PNT file exists. Metadata will be blank") + return metadata + logger.info("Found PNT file, reading metadata.") + with open(pnt_fname) as fid: + version = np.fromfile(fid, "|S16", 1).astype("U16")[0] if version not in _valid_headers: - raise ValueError(f'Not a valid Nihon Kohden PNT file ({version})') - metadata['version'] = version + raise ValueError(f"Not a valid Nihon Kohden PNT file ({version})") + metadata["version"] = version # Read timestamp fid.seek(0x40) - meas_str = np.fromfile(fid, '|S14', 1).astype('U14')[0] - meas_date = datetime.strptime(meas_str, '%Y%m%d%H%M%S') + meas_str = np.fromfile(fid, "|S14", 1).astype("U14")[0] + meas_date = datetime.strptime(meas_str, "%Y%m%d%H%M%S") meas_date = meas_date.replace(tzinfo=timezone.utc) - metadata['meas_date'] = meas_date + metadata["meas_date"] = meas_date return metadata _default_chan_labels = [ - 'FP1', 'FP2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'F7', 'F8', - 'T3', 'T4', 'T5', 'T6', 'FZ', 'CZ', 'PZ', 'E', 'PG1', 'PG2', 'A1', 'A2', - 'T1', 'T2' + "FP1", + "FP2", + "F3", + "F4", + "C3", + "C4", + "P3", + "P4", + "O1", + "O2", + "F7", + "F8", + "T3", + "T4", + "T5", + "T6", + "FZ", + "CZ", + "PZ", + "E", + "PG1", + "PG2", + "A1", + "A2", + "T1", + "T2", ] -_default_chan_labels += [f'X{i}' for i in range(1, 12)] -_default_chan_labels += [f'NA{i}' for i in range(1, 6)] -_default_chan_labels += [f'DC{i:02}' for i in range(1, 33)] -_default_chan_labels += ['BN1', 'BN2', 'Mark1', 'Mark2'] -_default_chan_labels += [f'NA{i}' for i in range(6, 28)] -_default_chan_labels += ['X12/BP1', 'X13/BP2', 'X14/BP3', 'X15/BP4'] -_default_chan_labels += [f'X{i}' for i in range(16, 166)] -_default_chan_labels += ['NA28', 'Z'] +_default_chan_labels += [f"X{i}" for i in range(1, 12)] +_default_chan_labels += [f"NA{i}" for i in range(1, 6)] +_default_chan_labels += [f"DC{i:02}" for i in range(1, 33)] +_default_chan_labels += ["BN1", "BN2", "Mark1", "Mark2"] +_default_chan_labels += [f"NA{i}" for i in range(6, 28)] +_default_chan_labels += ["X12/BP1", "X13/BP2", "X14/BP3", "X15/BP4"] +_default_chan_labels += [f"X{i}" for i in range(16, 166)] +_default_chan_labels += ["NA28", "Z"] -_encodings = ('utf-8', 'latin1') +_encodings = ("utf-8", "latin1") def _read_21e_file(fname): fname = _ensure_path(fname) - e_fname = fname.with_suffix('.21E') + e_fname = fname.with_suffix(".21E") _chan_labels = [x for x in _default_chan_labels] if e_fname.exists(): # Read the 21E file and update the labels accordingly. - logger.info('Found 21E file, reading channel names.') + logger.info("Found 21E file, reading channel names.") for enc in _encodings: try: - with open(e_fname, 'r', encoding=enc) as fid: + with open(e_fname, encoding=enc) as fid: keep_parsing = False for line in fid: - if line.startswith('['): - if 'ELECTRODE' in line or 'REFERENCE' in line: + if line.startswith("["): + if "ELECTRODE" in line or "REFERENCE" in line: keep_parsing = True else: keep_parsing = False elif keep_parsing is True: - idx, name = line.split('=') + idx, name = line.split("=") idx = int(idx) if idx >= len(_chan_labels): n = idx - len(_chan_labels) + 1 - _chan_labels.extend(['UNK'] * n) + _chan_labels.extend(["UNK"] * n) _chan_labels[idx] = name.strip() except UnicodeDecodeError: pass else: break else: - warn(f'Could not decode 21E file as one of {_encodings}; ' - f'Default channel names are chosen.') + warn( + f"Could not decode {e_fname} as one of {_encodings}; " + f"Default channel names are chosen." + ) + else: + warn( + f"Could not find {e_fname} containing channel definitions; " + f"Default channel names are chosen." + ) return _chan_labels @@ -142,48 +179,47 @@ def _read_nihon_header(fname): fname = _ensure_path(fname) _chan_labels = _read_21e_file(fname) header = {} - logger.info(f'Reading header from {fname}') - with open(fname, 'r') as fid: - version = np.fromfile(fid, '|S16', 1).astype('U16')[0] + logger.info(f"Reading header from {fname}") + with open(fname) as fid: + version = np.fromfile(fid, "|S16", 1).astype("U16")[0] if version not in _valid_headers: - raise ValueError( - 'Not a valid Nihon Kohden EEG file ({})'.format(version)) + raise ValueError(f"Not a valid Nihon Kohden EEG file ({version})") fid.seek(0x0081) - control_block = np.fromfile(fid, '|S16', 1).astype('U16')[0] + control_block = np.fromfile(fid, "|S16", 1).astype("U16")[0] if control_block not in _valid_headers: - raise ValueError('Not a valid Nihon Kohden EEG file ' - '(control block {})'.format(version)) + raise ValueError( + f"Not a valid Nihon Kohden EEG file (control block {version})" + ) - fid.seek(0x17fe) + fid.seek(0x17FE) waveform_sign = np.fromfile(fid, np.uint8, 1)[0] if waveform_sign != 1: - raise ValueError('Not a valid Nihon Kohden EEG file ' - '(waveform block)') - header['version'] = version + raise ValueError("Not a valid Nihon Kohden EEG file (waveform block)") + header["version"] = version fid.seek(0x0091) n_ctlblocks = np.fromfile(fid, np.uint8, 1)[0] - header['n_ctlblocks'] = n_ctlblocks + header["n_ctlblocks"] = n_ctlblocks controlblocks = [] for i_ctl_block in range(n_ctlblocks): t_controlblock = {} fid.seek(0x0092 + i_ctl_block * 20) t_ctl_address = np.fromfile(fid, np.uint32, 1)[0] - t_controlblock['address'] = t_ctl_address + t_controlblock["address"] = t_ctl_address fid.seek(t_ctl_address + 17) n_datablocks = np.fromfile(fid, np.uint8, 1)[0] - t_controlblock['n_datablocks'] = n_datablocks - t_controlblock['datablocks'] = [] + t_controlblock["n_datablocks"] = n_datablocks + t_controlblock["datablocks"] = [] for i_data_block in range(n_datablocks): t_datablock = {} fid.seek(t_ctl_address + i_data_block * 20 + 18) t_data_address = np.fromfile(fid, np.uint32, 1)[0] - t_datablock['address'] = t_data_address + t_datablock["address"] = t_data_address fid.seek(t_data_address + 0x26) - t_n_channels = np.fromfile(fid, np.uint8, 1)[0] - t_datablock['n_channels'] = t_n_channels + t_n_channels = np.fromfile(fid, np.uint8, 1)[0].astype(np.int64) + t_datablock["n_channels"] = t_n_channels t_channels = [] for i_ch in range(t_n_channels): @@ -191,130 +227,171 @@ def _read_nihon_header(fname): t_idx = np.fromfile(fid, np.uint8, 1)[0] t_channels.append(_chan_labels[t_idx]) - t_datablock['channels'] = t_channels + t_datablock["channels"] = t_channels fid.seek(t_data_address + 0x1C) - t_record_duration = np.fromfile(fid, np.uint32, 1)[0] - t_datablock['duration'] = t_record_duration + t_record_duration = np.fromfile(fid, np.uint32, 1)[0].astype(np.int64) + t_datablock["duration"] = t_record_duration - fid.seek(t_data_address + 0x1a) + fid.seek(t_data_address + 0x1A) sfreq = np.fromfile(fid, np.uint16, 1)[0] & 0x3FFF - t_datablock['sfreq'] = sfreq + t_datablock["sfreq"] = sfreq.astype(np.int64) - t_datablock['n_samples'] = int(t_record_duration * sfreq / 10) - t_controlblock['datablocks'].append(t_datablock) + t_datablock["n_samples"] = np.int64(t_record_duration * sfreq // 10) + t_controlblock["datablocks"].append(t_datablock) controlblocks.append(t_controlblock) - header['controlblocks'] = controlblocks + header["controlblocks"] = controlblocks # Now check that every data block has the same channels and sfreq chans = [] sfreqs = [] nsamples = [] - for t_ctl in header['controlblocks']: - for t_dtb in t_ctl['datablocks']: - chans.append(t_dtb['channels']) - sfreqs.append(t_dtb['sfreq']) - nsamples.append(t_dtb['n_samples']) + for t_ctl in header["controlblocks"]: + for t_dtb in t_ctl["datablocks"]: + chans.append(t_dtb["channels"]) + sfreqs.append(t_dtb["sfreq"]) + nsamples.append(t_dtb["n_samples"]) for i_elem in range(1, len(chans)): if chans[0] != chans[i_elem]: - raise ValueError('Channel names in datablocks do not match') + raise ValueError("Channel names in datablocks do not match") if sfreqs[0] != sfreqs[i_elem]: - raise ValueError('Sample frequency in datablocks do not match') - header['ch_names'] = chans[0] - header['sfreq'] = sfreqs[0] - header['n_samples'] = np.sum(nsamples) + raise ValueError("Sample frequency in datablocks do not match") + header["ch_names"] = chans[0] + header["sfreq"] = sfreqs[0] + header["n_samples"] = np.sum(nsamples) # TODO: Support more than one controlblock and more than one datablock - if header['n_ctlblocks'] != 1: - raise NotImplementedError('I dont know how to read more than one ' - 'control block for this type of file :(') - if header['controlblocks'][0]['n_datablocks'] > 1: + if header["n_ctlblocks"] != 1: + raise NotImplementedError( + "I dont know how to read more than one " + "control block for this type of file :(" + ) + if header["controlblocks"][0]["n_datablocks"] > 1: # Multiple blocks, check that they all have the same kind of data - datablocks = header['controlblocks'][0]['datablocks'] + datablocks = header["controlblocks"][0]["datablocks"] block_0 = datablocks[0] for t_block in datablocks[1:]: - if block_0['n_channels'] != t_block['n_channels']: + if block_0["n_channels"] != t_block["n_channels"]: raise ValueError( - 'Cannot read NK file with different number of channels ' - 'in each datablock') - if block_0['channels'] != t_block['channels']: + "Cannot read NK file with different number of channels " + "in each datablock" + ) + if block_0["channels"] != t_block["channels"]: raise ValueError( - 'Cannot read NK file with different channels in each ' - 'datablock') - if block_0['sfreq'] != t_block['sfreq']: + "Cannot read NK file with different channels in each datablock" + ) + if block_0["sfreq"] != t_block["sfreq"]: raise ValueError( - 'Cannot read NK file with different sfreq in each ' - 'datablock') + "Cannot read NK file with different sfreq in each datablock" + ) + + return header, _chan_labels + + +def _read_event_log_block(fid, t_block, version): + fid.seek(0x92 + t_block * 20) + data = np.fromfile(fid, np.uint32, 1) + if data.size == 0 or data[0] == 0: + return + t_blk_address = data[0] - return header + fid.seek(t_blk_address + 0x1) + data = np.fromfile(fid, "|S16", 1).astype("U16") + if data.size == 0 or data[0] != version: + return + fid.seek(t_blk_address + 0x12) + data = np.fromfile(fid, np.uint8, 1) + if data.size == 0: + return + n_logs = data[0] -def _read_nihon_annotations(fname): + fid.seek(t_blk_address + 0x14) + return np.fromfile(fid, "|S45", n_logs) + + +def _parse_event_log(event_log): + t_desc = event_log[:20] + hour, minute, second = ( + int(event_log[20:22]), + int(event_log[22:24]), + int(event_log[24:26]), + ) + t_onset = hour * 3600 + minute * 60 + second + return t_desc, t_onset + + +def _parse_sub_event_log(sub_event_log): + t_sub_desc = sub_event_log[:20] + t_sub_onset = int(sub_event_log[24:30]) / 1e6 + return t_sub_desc, t_sub_onset + + +def _read_nihon_annotations(fname, encoding="utf-8"): fname = _ensure_path(fname) - log_fname = fname.with_suffix('.LOG') + log_fname = fname.with_suffix(".LOG") if not log_fname.exists(): - warn('No LOG file exists. Annotations will not be read') + warn("No LOG file exists. Annotations will not be read") return dict(onset=[], duration=[], description=[]) - logger.info('Found LOG file, reading events.') - with open(log_fname, 'r') as fid: - version = np.fromfile(fid, '|S16', 1).astype('U16')[0] + logger.info("Found LOG file, reading events.") + with open(log_fname) as fid: + version = np.fromfile(fid, "|S16", 1).astype("U16")[0] if version not in _valid_headers: - raise ValueError( - 'Not a valid Nihon Kohden LOG file ({})'.format(version)) + raise ValueError(f"Not a valid Nihon Kohden LOG file ({version})") fid.seek(0x91) n_logblocks = np.fromfile(fid, np.uint8, 1)[0] all_onsets = [] all_descriptions = [] + may_have_sub_blocks = n_logblocks <= 21 for t_block in range(n_logblocks): - fid.seek(0x92 + t_block * 20) - t_blk_address = np.fromfile(fid, np.uint32, 1)[0] - fid.seek(t_blk_address + 0x12) - n_logs = np.fromfile(fid, np.uint8, 1)[0] - fid.seek(t_blk_address + 0x14) - t_logs = np.fromfile(fid, '|S45', n_logs) - for t_log in t_logs: - for enc in _encodings: - try: - t_log = t_log.decode(enc) - except UnicodeDecodeError: - pass - else: - break - else: - warn(f'Could not decode log as one of {_encodings}') + t_logs = _read_event_log_block(fid, t_block, version) + t_sub_logs = None + if may_have_sub_blocks: + t_sub_logs = _read_event_log_block(fid, t_block + 22, version) + + for li, t_log in enumerate(t_logs): + t_desc, t_onset = _parse_event_log(t_log) + if t_sub_logs is not None and t_sub_logs.size == t_logs.size: + t_sub_desc, t_sub_onset = _parse_sub_event_log(t_sub_logs[li]) + t_desc += t_sub_desc + t_onset += t_sub_onset + + t_desc = t_desc.rstrip(b"\x00") + try: + t_desc = t_desc.decode(encoding) + except UnicodeDecodeError: + warn(f"Could not decode log as {encoding}") continue - t_desc = t_log[:20].strip('\x00') - t_onset = datetime.strptime(t_log[20:26], '%H%M%S') - t_onset = (t_onset.hour * 3600 + t_onset.minute * 60 + - t_onset.second) + all_onsets.append(t_onset) all_descriptions.append(t_desc) annots = dict( onset=all_onsets, duration=[0] * len(all_onsets), - description=all_descriptions) + description=all_descriptions, + ) return annots def _map_ch_to_type(ch_name): - ch_type_pattern = OrderedDict([ - ('stim', ('Mark',)), ('misc', ('DC', 'NA', 'Z', '$')), - ('bio', ('X',))]) + ch_type_pattern = OrderedDict( + [("stim", ("Mark",)), ("misc", ("DC", "NA", "Z", "$")), ("bio", ("X",))] + ) for key, kinds in ch_type_pattern.items(): if any(kind in ch_name for kind in kinds): return key - return 'eeg' + return "eeg" -def _map_ch_to_specs(ch_name): +def _map_ch_to_specs(ch_name, chan_labels_upper): unit_mult = 1e-3 phys_min = -12002.9 phys_max = 12002.56 dig_min = -32768 - if ch_name.upper() in _default_chan_labels: - idx = _default_chan_labels.index(ch_name.upper()) + if ch_name.upper() in chan_labels_upper: + idx = chan_labels_upper.index(ch_name.upper()) if (idx < 42 or idx > 73) and idx not in [76, 77]: unit_mult = 1e-6 phys_min = -3200 @@ -323,8 +400,14 @@ def _map_ch_to_specs(ch_name): cal = t_range / 65535 offset = phys_min - (dig_min * cal) - out = dict(unit=unit_mult, phys_min=phys_min, phys_max=phys_max, - dig_min=dig_min, cal=cal, offset=offset) + out = dict( + unit=unit_mult, + phys_min=phys_min, + phys_max=phys_max, + dig_min=dig_min, + cal=cal, + offset=offset, + ) return out @@ -338,6 +421,9 @@ class RawNihon(BaseRaw): Path to the Nihon Kohden data ``.eeg`` file. preload : bool If True, all data are loaded at initialization. + %(encoding_nihon)s + + .. versionadded:: 1.11 %(verbose)s See Also @@ -346,84 +432,89 @@ class RawNihon(BaseRaw): """ @verbose - def __init__(self, fname, preload=False, verbose=None): - fname = _check_fname(fname, 'read', True, 'fname') + def __init__(self, fname, preload=False, *, encoding="utf-8", verbose=None): + fname = _check_fname(fname, "read", True, "fname") data_name = fname.name - logger.info('Loading %s' % data_name) + logger.info(f"Loading {data_name}") - header = _read_nihon_header(fname) + # chan_labels are electrode codes defined in the .21E file. + # It is not the same as header["ch_names"]. + header, chan_labels = _read_nihon_header(fname) metadata = _read_nihon_metadata(fname) # n_chan = len(header['ch_names']) + 1 - sfreq = header['sfreq'] + sfreq = header["sfreq"] # data are multiplexed int16 - ch_names = header['ch_names'] + ch_names = header["ch_names"] ch_types = [_map_ch_to_type(x) for x in ch_names] info = create_info(ch_names, sfreq, ch_types) - n_samples = header['n_samples'] + n_samples = header["n_samples"] - if 'meas_date' in metadata: + if "meas_date" in metadata: with info._unlock(): - info['meas_date'] = metadata['meas_date'] - chs = {x: _map_ch_to_specs(x) for x in info['ch_names']} - - cal = np.array( - [chs[x]['cal'] for x in info['ch_names']], float)[:, np.newaxis] - offsets = np.array( - [chs[x]['offset'] for x in info['ch_names']], float)[:, np.newaxis] - gains = np.array( - [chs[x]['unit'] for x in info['ch_names']], float)[:, np.newaxis] - - raw_extras = dict( - cal=cal, offsets=offsets, gains=gains, header=header) - self._header = header - - for i_ch, ch_name in enumerate(info['ch_names']): - t_range = (chs[ch_name]['phys_max'] - chs[ch_name]['phys_min']) - info['chs'][i_ch]['range'] = t_range - info['chs'][i_ch]['cal'] = 1 / t_range - - super(RawNihon, self).__init__( - info, preload=preload, last_samps=(n_samples - 1,), - filenames=[fname.as_posix()], orig_format='short', - raw_extras=[raw_extras]) + info["meas_date"] = metadata["meas_date"] + + chan_labels_upper = [x.upper() for x in chan_labels] + chs = {x: _map_ch_to_specs(x, chan_labels_upper) for x in info["ch_names"]} + cal = np.array([chs[x]["cal"] for x in info["ch_names"]], float)[:, np.newaxis] + offsets = np.array([chs[x]["offset"] for x in info["ch_names"]], float)[ + :, np.newaxis + ] + gains = np.array([chs[x]["unit"] for x in info["ch_names"]], float)[ + :, np.newaxis + ] + + raw_extras = dict(cal=cal, offsets=offsets, gains=gains, header=header) + for i_ch, ch_name in enumerate(info["ch_names"]): + t_range = chs[ch_name]["phys_max"] - chs[ch_name]["phys_min"] + info["chs"][i_ch]["range"] = t_range + info["chs"][i_ch]["cal"] = 1 / t_range + + super().__init__( + info, + preload=preload, + last_samps=(n_samples - 1,), + filenames=[fname.as_posix()], + orig_format="short", + raw_extras=[raw_extras], + ) # Get annotations from LOG file - annots = _read_nihon_annotations(fname) + annots = _read_nihon_annotations(fname, encoding) # Annotate acquisition skips - controlblock = self._header['controlblocks'][0] + controlblock = header["controlblocks"][0] cur_sample = 0 - if controlblock['n_datablocks'] > 1: - for i_block in range(controlblock['n_datablocks'] - 1): - t_block = controlblock['datablocks'][i_block] - cur_sample = cur_sample + t_block['n_samples'] - cur_tpoint = (cur_sample - 0.5) / t_block['sfreq'] + if controlblock["n_datablocks"] > 1: + for i_block in range(controlblock["n_datablocks"] - 1): + t_block = controlblock["datablocks"][i_block] + cur_sample = cur_sample + t_block["n_samples"] + cur_tpoint = (cur_sample - 0.5) / t_block["sfreq"] # Add annotations as in append raw - annots['onset'].append(cur_tpoint) - annots['duration'].append(0.0) - annots['description'].append('BAD boundary') - annots['onset'].append(cur_tpoint) - annots['duration'].append(0.0) - annots['description'].append('EDGE boundary') - - annotations = Annotations(**annots, orig_time=info['meas_date']) + annots["onset"].append(cur_tpoint) + annots["duration"].append(0.0) + annots["description"].append("BAD boundary") + annots["onset"].append(cur_tpoint) + annots["duration"].append(0.0) + annots["description"].append("EDGE boundary") + + annotations = Annotations(**annots, orig_time=info["meas_date"]) self.set_annotations(annotations) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" # For now we assume one control block - header = self._raw_extras[fi]['header'] + header = self._raw_extras[fi]["header"] # Get the original cal, offsets and gains - cal = self._raw_extras[fi]['cal'] - offsets = self._raw_extras[fi]['offsets'] - gains = self._raw_extras[fi]['gains'] + cal = self._raw_extras[fi]["cal"] + offsets = self._raw_extras[fi]["offsets"] + gains = self._raw_extras[fi]["gains"] # get the right datablock - datablocks = header['controlblocks'][0]['datablocks'] - ends = np.cumsum([t['n_samples'] for t in datablocks]) + datablocks = header["controlblocks"][0]["datablocks"] + ends = np.cumsum([t["n_samples"] for t in datablocks]) start_block = np.where(start < ends)[0][0] stop_block = np.where(stop <= ends)[0][0] @@ -439,13 +530,18 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): new_stop = stop else: # Otherwise, stop on the last sample of the block - new_stop = t_block['n_samples'] + new_start + new_stop = t_block["n_samples"] + new_start samples_to_read = new_stop - new_start sample_stop = sample_start + samples_to_read self._read_segment_file( - data[:, sample_start:sample_stop], idx, fi, - new_start, new_stop, cals, mult + data[:, sample_start:sample_stop], + idx, + fi, + new_start, + new_stop, + cals, + mult, ) # Update variables for next loop @@ -454,9 +550,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): else: datablock = datablocks[start_block] - n_channels = datablock['n_channels'] + 1 - datastart = (datablock['address'] + 0x27 + - (datablock['n_channels'] * 10)) + n_channels = datablock["n_channels"] + 1 + datastart = datablock["address"] + 0x27 + (datablock["n_channels"] * 10) # Compute start offset based on the beginning of the block rel_start = start @@ -464,12 +559,12 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): rel_start = start - ends[start_block - 1] start_offset = datastart + rel_start * n_channels * 2 - with open(self._filenames[fi], 'rb') as fid: + with open(self.filenames[fi], "rb") as fid: to_read = (stop - start) * n_channels fid.seek(start_offset) - block_data = np.fromfile(fid, ' -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. import pytest -from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_allclose -from mne.io import read_raw_nihon, read_raw_edf -from mne.io.tests.test_raw import _test_raw_reader from mne.datasets import testing -from mne.io.nihon.nihon import (_read_nihon_header, _read_nihon_metadata, - _read_nihon_annotations) +from mne.io import read_raw_edf, read_raw_nihon from mne.io.nihon import nihon +from mne.io.nihon.nihon import ( + _read_nihon_annotations, + _read_nihon_header, + _read_nihon_metadata, +) +from mne.io.tests.test_raw import _test_raw_reader data_path = testing.data_path(download=False) @@ -18,74 +21,107 @@ @testing.requires_testing_data def test_nihon_eeg(): """Test reading Nihon Kohden EEG files.""" - fname = data_path / 'NihonKohden' / 'MB0400FU.EEG' + fname = data_path / "NihonKohden" / "MB0400FU.EEG" raw = read_raw_nihon(fname.as_posix(), preload=True) - assert 'RawNihon' in repr(raw) + assert "RawNihon" in repr(raw) _test_raw_reader(read_raw_nihon, fname=fname, test_scaling=False) - fname_edf = data_path / 'NihonKohden' / 'MB0400FU.EDF' + fname_edf = data_path / "NihonKohden" / "MB0400FU.EDF" raw_edf = read_raw_edf(fname_edf, preload=True) + raw_edf.drop_channels(["Events/Markers"]) assert raw._data.shape == raw_edf._data.shape - assert raw.info['sfreq'] == raw.info['sfreq'] - # ch names and order are switched in the EDF - edf_ch_names = {x: x.split(' ')[1].replace('-Ref', '') - for x in raw_edf.ch_names} + assert raw.info["sfreq"] == raw_edf.info["sfreq"] + # a couple of ch names differ in the EDF + edf_ch_names = {"EEG Mark1": "$A2", "EEG Mark2": "$A1"} raw_edf.rename_channels(edf_ch_names) assert raw.ch_names == raw_edf.ch_names - for i, an1 in enumerate(raw.annotations): - # EDF has some weird annotations, which are not in the LOG file - an2 = raw_edf.annotations[i * 2 + 1] - assert an1['onset'] == an2['onset'] - assert an1['duration'] == an2['duration'] - # Also, it prepends 'Segment: ' to some annotations - t_desc = an2['description'].replace('Segment: ', '') - assert an1['description'] == t_desc + assert len(raw.annotations) == len(raw_edf.annotations) + for an1, an2 in zip(raw.annotations, raw_edf.annotations): + assert an1["onset"] == an2["onset"] + assert an1["duration"] == an2["duration"] + assert an1["description"] == an2["description"].rstrip() - assert_array_almost_equal(raw._data, raw_edf._data) + assert_allclose(raw.get_data(), raw_edf.get_data()) - with pytest.raises(ValueError, match='Not a valid Nihon Kohden EEG file'): + with pytest.raises(ValueError, match="Not a valid Nihon Kohden EEG file"): raw = read_raw_nihon(fname_edf, preload=True) - with pytest.raises(ValueError, match='Not a valid Nihon Kohden EEG file'): - raw = _read_nihon_header(fname_edf) + with pytest.raises(ValueError, match="Not a valid Nihon Kohden EEG file"): + header, _ = _read_nihon_header(fname_edf) - bad_fname = data_path / 'eximia' / 'text_eximia.nxe' + bad_fname = data_path / "eximia" / "text_eximia.nxe" - msg = 'No PNT file exists. Metadata will be blank' + msg = "No PNT file exists. Metadata will be blank" with pytest.warns(RuntimeWarning, match=msg): meta = _read_nihon_metadata(bad_fname) assert len(meta) == 0 - msg = 'No LOG file exists. Annotations will not be read' + msg = "No LOG file exists. Annotations will not be read" with pytest.warns(RuntimeWarning, match=msg): annot = _read_nihon_annotations(bad_fname) assert all(len(x) == 0 for x in annot.values()) # the nihon test file has $A1 and $A2 in it, which are not EEG - assert '$A1' in raw.ch_names + assert "$A1" in raw.ch_names # assert that channels with $ are 'misc' - picks = [ch for ch in raw.ch_names if ch.startswith('$')] + picks = [ch for ch in raw.ch_names if ch.startswith("$")] ch_types = raw.get_channel_types(picks=picks) - assert all(ch == 'misc' for ch in ch_types) + assert all(ch == "misc" for ch in ch_types) @testing.requires_testing_data def test_nihon_duplicate_channels(monkeypatch): """Test deduplication of channel names.""" - fname = data_path / 'NihonKohden' / 'MB0400FU.EEG' + fname = data_path / "NihonKohden" / "MB0400FU.EEG" def return_channel_duplicates(fname): ch_names = nihon._default_chan_labels ch_names[1] = ch_names[0] return ch_names - monkeypatch.setattr(nihon, '_read_21e_file', return_channel_duplicates) + monkeypatch.setattr(nihon, "_read_21e_file", return_channel_duplicates) - assert len(nihon._read_21e_file(fname)) > \ - len(set(nihon._read_21e_file(fname))) - msg = 'Channel names are not unique, found duplicates for: ' \ - '{\'FP1\'}. Applying running numbers for duplicates.' + assert len(nihon._read_21e_file(fname)) > len(set(nihon._read_21e_file(fname))) + msg = ( + "Channel names are not unique, found duplicates for: " + "{'FP1'}. Applying running numbers for duplicates." + ) with pytest.warns(RuntimeWarning, match=msg): read_raw_nihon(fname) + + +@testing.requires_testing_data +def test_nihon_calibration(): + """Test handling of calibration factor and range in Nihon Kohden EEG files.""" + fname = data_path / "NihonKohden" / "DA00100E.EEG" + raw = read_raw_nihon(fname, preload=True, encoding="cp936") + + Fp1_idx = raw.ch_names.index("Fp1") + M1_idx = raw.ch_names.index("M1") + M2_idx = raw.ch_names.index("M2") + + Fp1_info = raw.info["chs"][Fp1_idx] + M1_info = raw.info["chs"][M1_idx] + M2_info = raw.info["chs"][M2_idx] + + # M1, M2 are EEG channels, just like Fp1. + # So they should have the same calibration factor and physical range. + assert_allclose(M1_info["cal"], Fp1_info["cal"]) + assert_allclose(M2_info["cal"], Fp1_info["cal"]) + assert_allclose(M1_info["range"], Fp1_info["range"]) + assert_allclose(M2_info["range"], Fp1_info["range"]) + + fname_edf = data_path / "NihonKohden" / "DA00100E.EDF" + raw_edf = read_raw_edf(fname_edf, preload=True) + raw_edf.drop_channels(["Events/Markers"]) + # a couple of ch names differ in the EDF + edf_ch_names = {"EEG Mark1": "$M1", "EEG Mark2": "$M2"} + raw_edf.rename_channels(edf_ch_names) + + assert raw.ch_names == raw_edf.ch_names + assert raw._data.shape == raw_edf._data.shape + assert raw.info["sfreq"] == raw_edf.info["sfreq"] + + assert_allclose(raw.get_data(), raw_edf.get_data()) diff --git a/mne/io/nirx/__init__.py b/mne/io/nirx/__init__.py index 0a8ee5e709d..5946836cda1 100644 --- a/mne/io/nirx/__init__.py +++ b/mne/io/nirx/__init__.py @@ -1,7 +1,7 @@ """fNIRS module for conversion to FIF.""" -# Author: Robert Luke -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .nirx import read_raw_nirx diff --git a/mne/io/nirx/_localized_abbr.py b/mne/io/nirx/_localized_abbr.py index 4e42f7ddcff..37e05016717 100644 --- a/mne/io/nirx/_localized_abbr.py +++ b/mne/io/nirx/_localized_abbr.py @@ -1,7 +1,8 @@ """Localizations for meas_date extraction.""" -# Authors: Eric Larson -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. # This file was generated on 2021/01/31 on an Ubuntu system. # When getting "unsupported locale setting" on Ubuntu (e.g., with localepurge), @@ -40,21 +41,106 @@ print('}\n') """ +# TODO: this should really be outsourced to a dedicated module like arrow or babel _localized_abbr = { - 'en_US.utf8': { - "month": {'jan': 'jan', 'feb': 'feb', 'mar': 'mar', 'apr': 'apr', 'may': 'may', 'jun': 'jun', 'jul': 'jul', 'aug': 'aug', 'sep': 'sep', 'oct': 'oct', 'nov': 'nov', 'dec': 'dec', }, # noqa - "weekday": {'sat': 'sat', 'sun': 'sun', 'mon': 'mon', 'tue': 'tue', 'wed': 'wed', 'thu': 'thu', 'fri': 'fri', }, # noqa + "en_US.utf8": { + "month": { + "jan": "jan", + "feb": "feb", + "mar": "mar", + "apr": "apr", + "may": "may", + "jun": "jun", + "jul": "jul", + "aug": "aug", + "sep": "sep", + "oct": "oct", + "nov": "nov", + "dec": "dec", + }, # noqa + "weekday": { + "sat": "sat", + "sun": "sun", + "mon": "mon", + "tue": "tue", + "wed": "wed", + "thu": "thu", + "fri": "fri", + }, # noqa }, - 'de_DE': { - "month": {'jan': 'jan', 'feb': 'feb', 'mär': 'mar', 'apr': 'apr', 'mai': 'may', 'jun': 'jun', 'jul': 'jul', 'aug': 'aug', 'sep': 'sep', 'okt': 'oct', 'nov': 'nov', 'dez': 'dec', }, # noqa - "weekday": {'sa': 'sat', 'so': 'sun', 'mo': 'mon', 'di': 'tue', 'mi': 'wed', 'do': 'thu', 'fr': 'fri', }, # noqa + "de_DE": { + "month": { + "jan": "jan", + "feb": "feb", + "mär": "mar", + "apr": "apr", + "mai": "may", + "jun": "jun", + "jul": "jul", + "aug": "aug", + "sep": "sep", + "okt": "oct", + "nov": "nov", + "dez": "dec", + }, # noqa + "weekday": { + "sa": "sat", + "so": "sun", + "mo": "mon", + "di": "tue", + "mi": "wed", + "do": "thu", + "fr": "fri", + }, # noqa }, - 'fr_FR': { - "month": {'janv.': 'jan', 'févr.': 'feb', 'mars': 'mar', 'avril': 'apr', 'mai': 'may', 'juin': 'jun', 'juil.': 'jul', 'août': 'aug', 'sept.': 'sep', 'oct.': 'oct', 'nov.': 'nov', 'déc.': 'dec', }, # noqa - "weekday": {'sam.': 'sat', 'dim.': 'sun', 'lun.': 'mon', 'mar.': 'tue', 'mer.': 'wed', 'jeu.': 'thu', 'ven.': 'fri', }, # noqa + "fr_FR": { + "month": { + "janv.": "jan", + "févr.": "feb", + "mars": "mar", + "avril": "apr", + "mai": "may", + "juin": "jun", + "juil.": "jul", + "août": "aug", + "sept.": "sep", + "oct.": "oct", + "nov.": "nov", + "déc.": "dec", + }, # noqa + "weekday": { + "sam.": "sat", + "dim.": "sun", + "lun.": "mon", + "mar.": "tue", + "mer.": "wed", + "jeu.": "thu", + "ven.": "fri", + }, # noqa }, - 'it_IT': { - "month": {'gen': 'jan', 'feb': 'feb', 'mar': 'mar', 'apr': 'apr', 'mag': 'may', 'giu': 'jun', 'lug': 'jul', 'ago': 'aug', 'set': 'sep', 'ott': 'oct', 'nov': 'nov', 'dic': 'dec', }, # noqa - "weekday": {'sab': 'sat', 'dom': 'sun', 'lun': 'mon', 'mar': 'tue', 'mer': 'wed', 'gio': 'thu', 'ven': 'fri', }, # noqa + "it_IT": { + "month": { + "gen": "jan", + "feb": "feb", + "mar": "mar", + "apr": "apr", + "mag": "may", + "giu": "jun", + "lug": "jul", + "ago": "aug", + "set": "sep", + "ott": "oct", + "nov": "nov", + "dic": "dec", + }, # noqa + "weekday": { + "sab": "sat", + "dom": "sun", + "lun": "mon", + "mar": "tue", + "mer": "wed", + "gio": "thu", + "ven": "fri", + }, # noqa }, } diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index 3704379b28a..5d9b79b57cc 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -1,30 +1,41 @@ -# Authors: Robert Luke -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from configparser import ConfigParser, RawConfigParser -import glob as glob -import re as re -import os.path as op import datetime as dt +import glob as glob import json +import os.path as op +import re as re +from configparser import ConfigParser, RawConfigParser import numpy as np +from scipy.io import loadmat -from ._localized_abbr import _localized_abbr -from ..base import BaseRaw -from ..utils import _mult_cal_one -from ..constants import FIFF -from ..meas_info import create_info, _format_dig_points -from ...annotations import Annotations +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _format_dig_points, create_info +from ..._fiff.utils import _mult_cal_one from ..._freesurfer import get_mni_fiducials -from ...transforms import apply_trans, _get_trans -from ...utils import (logger, verbose, fill_doc, warn, _check_fname, - _validate_type, _check_option, _mask_to_onsets_offsets) +from ...annotations import Annotations +from ...transforms import _get_trans, apply_trans +from ...utils import ( + _check_fname, + _check_option, + _mask_to_onsets_offsets, + _validate_type, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw +from ._localized_abbr import _localized_abbr @fill_doc -def read_raw_nirx(fname, saturated='annotate', preload=False, verbose=None): +def read_raw_nirx( + fname, saturated="annotate", *, preload=False, encoding="latin-1", verbose=None +) -> "RawNIRX": """Reader for a NIRX fNIRS recording. Parameters @@ -33,6 +44,7 @@ def read_raw_nirx(fname, saturated='annotate', preload=False, verbose=None): Path to the NIRX data folder or header file. %(saturated)s %(preload)s + %(encoding_nirx)s %(verbose)s Returns @@ -49,11 +61,13 @@ def read_raw_nirx(fname, saturated='annotate', preload=False, verbose=None): ----- %(nirx_notes)s """ - return RawNIRX(fname, saturated, preload, verbose) + return RawNIRX( + fname, saturated, preload=preload, encoding=encoding, verbose=verbose + ) def _open(fname): - return open(fname, 'r', encoding='latin-1') + return open(fname, encoding="latin-1") @fill_doc @@ -66,6 +80,7 @@ class RawNIRX(BaseRaw): Path to the NIRX data folder or header file. %(saturated)s %(preload)s + %(encoding_nirx)s %(verbose)s See Also @@ -78,113 +93,134 @@ class RawNIRX(BaseRaw): """ @verbose - def __init__(self, fname, saturated, preload=False, verbose=None): - from scipy.io import loadmat - logger.info('Loading %s' % fname) - _validate_type(fname, 'path-like', 'fname') - _validate_type(saturated, str, 'saturated') - _check_option('saturated', saturated, ('annotate', 'nan', 'ignore')) + def __init__(self, fname, saturated, *, preload=False, encoding=None, verbose=None): + logger.info(f"Loading {fname}") + _validate_type(fname, "path-like", "fname") + _validate_type(saturated, str, "saturated") + _check_option("saturated", saturated, ("annotate", "nan", "ignore")) fname = str(fname) - if fname.endswith('.hdr'): + if fname.endswith(".hdr"): fname = op.dirname(op.abspath(fname)) fname = str(_check_fname(fname, "read", True, "fname", need_dir=True)) - json_config = glob.glob('%s/*%s' % (fname, "config.json")) - if len(json_config): - is_aurora = True - else: - is_aurora = False + json_config = glob.glob(f"{fname}/*{'config.json'}") + is_aurora = len(json_config) if is_aurora: # NIRSport2 devices using Aurora software - keys = ('hdr', 'config.json', 'description.json', - 'wl1', 'wl2', 'probeInfo.mat', 'tri') + keys = ( + "hdr", + "config.json", + "description.json", + "wl1", + "wl2", + "probeInfo.mat", + "tri", + ) else: # NIRScout devices and NIRSport1 devices - keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2', - 'config.txt', 'probeInfo.mat') - n_dat = len(glob.glob('%s/*%s' % (fname, 'dat'))) + keys = ( + "hdr", + "inf", + "set", + "tpl", + "wl1", + "wl2", + "config.txt", + "probeInfo.mat", + ) + n_dat = len(glob.glob(f"{fname}/*{'dat'}")) if n_dat != 1: - warn("A single dat file was expected in the specified path, " - f"but got {n_dat}. This may indicate that the file " - "structure has been modified since the measurement " - "was saved.") + warn( + "A single dat file was expected in the specified path, " + f"but got {n_dat}. This may indicate that the file " + "structure has been modified since the measurement " + "was saved." + ) # Check if required files exist and store names for later use files = dict() nan_mask = dict() for key in keys: - files[key] = glob.glob('%s/*%s' % (fname, key)) + files[key] = glob.glob(f"{fname}/*{key}") fidx = 0 if len(files[key]) != 1: - if key not in ('wl1', 'wl2'): - raise RuntimeError( - f'Need one {key} file, got {len(files[key])}') - noidx = np.where(['nosatflags_' in op.basename(x) - for x in files[key]])[0] + if key not in ("wl1", "wl2"): + raise RuntimeError(f"Need one {key} file, got {len(files[key])}") + noidx = np.where(["nosatflags_" in op.basename(x) for x in files[key]])[ + 0 + ] if len(noidx) != 1 or len(files[key]) != 2: raise RuntimeError( - f'Need one nosatflags and one standard {key} file, ' - f'got {len(files[key])}') + f"Need one nosatflags and one standard {key} file, " + f"got {len(files[key])}" + ) # Here two files have been found, one that is called # no sat flags. The nosatflag file has no NaNs in it. noidx = noidx[0] - if saturated == 'ignore': + if saturated == "ignore": # Ignore NaN and return values fidx = noidx - elif saturated == 'nan': + elif saturated == "nan": # Return NaN fidx = 0 if noidx == 1 else 1 else: - assert saturated == 'annotate' # guaranteed above + assert saturated == "annotate" # guaranteed above fidx = noidx nan_mask[key] = files[key][0 if noidx == 1 else 1] files[key] = files[key][fidx] # Read number of rows/samples of wavelength data - with _open(files['wl1']) as fid: - last_sample = fid.read().count('\n') - 1 + with _open(files["wl1"]) as fid: + last_sample = fid.read().count("\n") - 1 # Read header file # The header file isn't compliant with the configparser. So all the # text between comments must be removed before passing to parser - with _open(files['hdr']) as f: + with open(files["hdr"], encoding=encoding) as f: hdr_str_all = f.read() - hdr_str = re.sub('#.*?#', '', hdr_str_all, flags=re.DOTALL) + hdr_str = re.sub("#.*?#", "", hdr_str_all, flags=re.DOTALL) if is_aurora: - hdr_str = re.sub('(\\[DataStructure].*)', '', - hdr_str, flags=re.DOTALL) + hdr_str = re.sub("(\\[DataStructure].*)", "", hdr_str, flags=re.DOTALL) hdr = RawConfigParser() hdr.read_string(hdr_str) # Check that the file format version is supported if is_aurora: # We may need to ease this requirement back - if hdr['GeneralInfo']['Version'] not in ['2021.4.0-34-ge9fdbbc8', - '2021.9.0-5-g3eb32851', - '2021.9.0-6-g14ef4a71']: - warn("MNE has not been tested with Aurora version " - f"{hdr['GeneralInfo']['Version']}") + if hdr["GeneralInfo"]["Version"] not in [ + "2021.4.0-34-ge9fdbbc8", + "2021.9.0-5-g3eb32851", + "2021.9.0-6-g14ef4a71", + ]: + warn( + "MNE has not been tested with Aurora version " + f"{hdr['GeneralInfo']['Version']}" + ) else: - if hdr['GeneralInfo']['NIRStar'] not in ['"15.0"', '"15.2"', - '"15.3"']: - raise RuntimeError('MNE does not support this NIRStar version' - ' (%s)' % (hdr['GeneralInfo']['NIRStar'],)) - if "NIRScout" not in hdr['GeneralInfo']['Device'] \ - and "NIRSport" not in hdr['GeneralInfo']['Device']: - warn("Only import of data from NIRScout devices have been " - "thoroughly tested. You are using a %s device. " % - hdr['GeneralInfo']['Device']) + if hdr["GeneralInfo"]["NIRStar"] not in ['"15.0"', '"15.2"', '"15.3"']: + raise RuntimeError( + "MNE does not support this NIRStar version" + f" ({hdr['GeneralInfo']['NIRStar']})" + ) + if ( + "NIRScout" not in hdr["GeneralInfo"]["Device"] + and "NIRSport" not in hdr["GeneralInfo"]["Device"] + ): + warn( + "Only import of data from NIRScout devices have been " + f"thoroughly tested. You are using a {hdr['GeneralInfo']['Device']}" + " device." + ) # Parse required header fields # Extract measurement date and time if is_aurora: - datetime_str = hdr['GeneralInfo']['Date'] + datetime_str = hdr["GeneralInfo"]["Date"] else: - datetime_str = hdr['GeneralInfo']['Date'] + \ - hdr['GeneralInfo']['Time'] + datetime_str = hdr["GeneralInfo"]["Date"] + hdr["GeneralInfo"]["Time"] meas_date = None # Several formats have been observed so we try each in turn @@ -193,19 +229,21 @@ def __init__(self, fname, saturated, preload=False, verbose=None): # So far we are lucky in that all the formats below, if they # include %a (weekday abbr), always come first. Thus we can use # a .split(), replace, and rejoin. - loc_datetime_str = datetime_str.split(' ') - for key, val in translations['weekday'].items(): + loc_datetime_str = datetime_str.split(" ") + for key, val in translations["weekday"].items(): loc_datetime_str[0] = loc_datetime_str[0].replace(key, val) for ii in range(1, len(loc_datetime_str)): - for key, val in translations['month'].items(): - loc_datetime_str[ii] = \ - loc_datetime_str[ii].replace(key, val) - loc_datetime_str = ' '.join(loc_datetime_str) - logger.debug(f'Trying {loc} datetime: {loc_datetime_str}') - for dt_code in ['"%a, %b %d, %Y""%H:%M:%S.%f"', - '"%a %d %b %Y""%H:%M:%S.%f"', - '"%a, %d %b %Y""%H:%M:%S.%f"', - '%Y-%m-%d %H:%M:%S.%f']: + for key, val in translations["month"].items(): + loc_datetime_str[ii] = loc_datetime_str[ii].replace(key, val) + loc_datetime_str = " ".join(loc_datetime_str) + logger.debug(f"Trying {loc} datetime: {loc_datetime_str}") + for dt_code in [ + '"%a, %b %d, %Y""%H:%M:%S.%f"', + '"%a %d %b %Y""%H:%M:%S.%f"', + '"%a, %d %b %Y""%H:%M:%S.%f"', + "%Y-%m-%d %H:%M:%S.%f", + '"%Y年%m月%d日""%H:%M:%S.%f"', + ]: try: meas_date = dt.datetime.strptime(loc_datetime_str, dt_code) except ValueError: @@ -213,61 +251,71 @@ def __init__(self, fname, saturated, preload=False, verbose=None): else: meas_date = meas_date.replace(tzinfo=dt.timezone.utc) do_break = True - logger.debug( - f'Measurement date language {loc} detected: {dt_code}') + logger.debug(f"Measurement date language {loc} detected: {dt_code}") break if do_break: break if meas_date is None: - warn("Extraction of measurement date from NIRX file failed. " - "This can be caused by files saved in certain locales " - f"(currently only {list(_localized_abbr)} supported). " - "Please report this as a github issue. " - "The date is being set to January 1st, 2000, " - f"instead of {repr(datetime_str)}.") - meas_date = dt.datetime(2000, 1, 1, 0, 0, 0, - tzinfo=dt.timezone.utc) + warn( + "Extraction of measurement date from NIRX file failed. " + "This can be caused by files saved in certain locales " + f"(currently only {list(_localized_abbr)} supported). " + "Please report this as a github issue. " + "The date is being set to January 1st, 2000, " + f"instead of {repr(datetime_str)}." + ) + meas_date = dt.datetime(2000, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc) # Extract frequencies of light used by machine if is_aurora: fnirs_wavelengths = [760, 850] else: - fnirs_wavelengths = [int(s) for s in - re.findall(r'(\d+)', - hdr['ImagingParameters'][ - 'Wavelengths'])] + fnirs_wavelengths = [ + int(s) + for s in re.findall(r"(\d+)", hdr["ImagingParameters"]["Wavelengths"]) + ] # Extract source-detectors if is_aurora: - sources = re.findall(r'(\d+)-\d+', hdr_str_all.split("\n")[-2]) - detectors = re.findall(r'\d+-(\d+)', hdr_str_all.split("\n")[-2]) + sources = re.findall(r"(\d+)-\d+", hdr_str_all.split("\n")[-2]) + detectors = re.findall(r"\d+-(\d+)", hdr_str_all.split("\n")[-2]) sources = [int(s) + 1 for s in sources] detectors = [int(d) + 1 for d in detectors] else: - sources = np.asarray([int(s) for s in - re.findall(r'(\d+)-\d+:\d+', - hdr['DataStructure'] - ['S-D-Key'])], int) - detectors = np.asarray([int(s) for s in - re.findall(r'\d+-(\d+):\d+', - hdr['DataStructure'] - ['S-D-Key'])], int) + sources = np.asarray( + [ + int(s) + for s in re.findall( + r"(\d+)-\d+:\d+", hdr["DataStructure"]["S-D-Key"] + ) + ], + int, + ) + detectors = np.asarray( + [ + int(s) + for s in re.findall( + r"\d+-(\d+):\d+", hdr["DataStructure"]["S-D-Key"] + ) + ], + int, + ) # Extract sampling rate if is_aurora: - samplingrate = float(hdr['GeneralInfo']['Sampling rate']) + samplingrate = float(hdr["GeneralInfo"]["Sampling rate"]) else: - samplingrate = float(hdr['ImagingParameters']['SamplingRate']) + samplingrate = float(hdr["ImagingParameters"]["SamplingRate"]) # Read participant information file if is_aurora: - with open(files['description.json']) as f: + with open(files["description.json"]) as f: inf = json.load(f) else: inf = ConfigParser(allow_no_value=True) - inf.read(files['inf']) - inf = inf._sections['Subject Demographics'] + inf.read(files["inf"]) + inf = inf._sections["Subject Demographics"] # Store subject information from inf file in mne format # Note: NIRX also records "Study Type", "Experiment History", @@ -279,29 +327,28 @@ def __init__(self, fname, saturated, preload=False, verbose=None): if is_aurora: names = inf["subject"].split() else: - names = inf['name'].replace('"', "").split() - subject_info['his_id'] = "_".join(names) + names = inf["name"].replace('"', "").split() + subject_info["his_id"] = "_".join(names) if len(names) > 0: - subject_info['first_name'] = \ - names[0].replace("\"", "") + subject_info["first_name"] = names[0].replace('"', "") if len(names) > 1: - subject_info['last_name'] = \ - names[-1].replace("\"", "") + subject_info["last_name"] = names[-1].replace('"', "") if len(names) > 2: - subject_info['middle_name'] = \ - names[-2].replace("\"", "") - subject_info['sex'] = inf['gender'].replace("\"", "") + subject_info["middle_name"] = names[-2].replace('"', "") + subject_info["sex"] = inf["gender"].replace('"', "") # Recode values - if subject_info['sex'] in {'M', 'Male', '1'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE - elif subject_info['sex'] in {'F', 'Female', '2'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE + if subject_info["sex"] in {"M", "Male", "1"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_MALE + elif subject_info["sex"] in {"F", "Female", "2"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_FEMALE else: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN - if inf['age'] != '': - subject_info['birthday'] = (meas_date.year - int(inf['age']), - meas_date.month, - meas_date.day) + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + if inf["age"] != "": + subject_info["birthday"] = dt.date( + meas_date.year - int(inf["age"]), + meas_date.month, + meas_date.day, + ) # Read information about probe/montage/optodes # A word on terminology used here: @@ -310,30 +357,33 @@ def __init__(self, fname, saturated, preload=False, verbose=None): # Sources and detectors are both called optodes # Each source - detector pair produces a channel # Channels are defined as the midpoint between source and detector - mat_data = loadmat(files['probeInfo.mat']) - probes = mat_data['probeInfo']['probes'][0, 0] - requested_channels = probes['index_c'][0, 0] - src_locs = probes['coords_s3'][0, 0] / 100. - det_locs = probes['coords_d3'][0, 0] / 100. - ch_locs = probes['coords_c3'][0, 0] / 100. + mat_data = loadmat(files["probeInfo.mat"]) + probes = mat_data["probeInfo"]["probes"][0, 0] + requested_channels = probes["index_c"][0, 0] + src_locs = probes["coords_s3"][0, 0] / 100.0 + det_locs = probes["coords_d3"][0, 0] / 100.0 + ch_locs = probes["coords_c3"][0, 0] / 100.0 # These are all in MNI coordinates, so let's transform them to # the Neuromag head coordinate frame src_locs, det_locs, ch_locs, mri_head_t = _convert_fnirs_to_head( - 'fsaverage', 'mri', 'head', src_locs, det_locs, ch_locs) + "fsaverage", "mri", "head", src_locs, det_locs, ch_locs + ) # Set up digitization - dig = get_mni_fiducials('fsaverage', verbose=False) + dig = get_mni_fiducials("fsaverage", verbose=False) for fid in dig: - fid['r'] = apply_trans(mri_head_t, fid['r']) - fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD + fid["r"] = apply_trans(mri_head_t, fid["r"]) + fid["coord_frame"] = FIFF.FIFFV_COORD_HEAD for ii, ch_loc in enumerate(ch_locs, 1): - dig.append(dict( - kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay - r=ch_loc, - ident=ii, - coord_frame=FIFF.FIFFV_COORD_HEAD, - )) + dig.append( + dict( + kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay + r=ch_loc, + ident=ii, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) dig = _format_dig_points(dig) del mri_head_t @@ -343,25 +393,25 @@ def __init__(self, fname, saturated, preload=False, verbose=None): # subset requested in the probe file req_ind = np.array([], int) for req_idx in range(requested_channels.shape[0]): - sd_idx = np.where((sources == requested_channels[req_idx][0]) & - (detectors == requested_channels[req_idx][1])) + sd_idx = np.where( + (sources == requested_channels[req_idx][0]) + & (detectors == requested_channels[req_idx][1]) + ) req_ind = np.concatenate((req_ind, sd_idx[0])) req_ind = req_ind.astype(int) snames = [f"S{sources[idx]}" for idx in req_ind] dnames = [f"_D{detectors[idx]}" for idx in req_ind] sdnames = [m + str(n) for m, n in zip(snames, dnames)] - sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames] - sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames] + sd1 = [s + " " + str(fnirs_wavelengths[0]) for s in sdnames] + sd2 = [s + " " + str(fnirs_wavelengths[1]) for s in sdnames] chnames = [val for pair in zip(sd1, sd2) for val in pair] # Create mne structure - info = create_info(chnames, - samplingrate, - ch_types='fnirs_cw_amplitude') + info = create_info(chnames, samplingrate, ch_types="fnirs_cw_amplitude") with info._unlock(): info.update(subject_info=subject_info, dig=dig) - info['meas_date'] = meas_date + info["meas_date"] = meas_date # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. @@ -378,11 +428,11 @@ def __init__(self, fname, saturated, preload=False, verbose=None): midpoint = (src_locs[src, :] + det_locs[det, :]) / 2 for ii in range(2): ch_idx3 = ch_idx2 * 2 + ii - info['chs'][ch_idx3]['loc'][3:6] = src_locs[src, :] - info['chs'][ch_idx3]['loc'][6:9] = det_locs[det, :] - info['chs'][ch_idx3]['loc'][:3] = midpoint - info['chs'][ch_idx3]['loc'][9] = fnirs_wavelengths[ii] - info['chs'][ch_idx3]['coord_frame'] = FIFF.FIFFV_COORD_HEAD + info["chs"][ch_idx3]["loc"][3:6] = src_locs[src, :] + info["chs"][ch_idx3]["loc"][6:9] = det_locs[det, :] + info["chs"][ch_idx3]["loc"][:3] = midpoint + info["chs"][ch_idx3]["loc"][9] = fnirs_wavelengths[ii] + info["chs"][ch_idx3]["coord_frame"] = FIFF.FIFFV_COORD_HEAD # Extract the start/stop numbers for samples in the CSV. In theory the # sample bounds should just be 10 * the number of channels, but some @@ -390,10 +440,10 @@ def __init__(self, fname, saturated, preload=False, verbose=None): # instead make a single pass over the entire file at the beginning so # that we know how to seek and read later. bounds = dict() - for key in ('wl1', 'wl2'): + for key in ("wl1", "wl2"): offset = 0 bounds[key] = [offset] - with open(files[key], 'rb') as fid: + with open(files[key], "rb") as fid: for line in fid: offset += len(line) bounds[key].append(offset) @@ -401,51 +451,60 @@ def __init__(self, fname, saturated, preload=False, verbose=None): # Extras required for reading data raw_extras = { - 'sd_index': req_ind, - 'files': files, - 'bounds': bounds, - 'nan_mask': nan_mask, + "sd_index": req_ind, + "files": files, + "bounds": bounds, + "nan_mask": nan_mask, } # Get our saturated mask annot_mask = None - for ki, key in enumerate(('wl1', 'wl2')): + for ki, key in enumerate(("wl1", "wl2")): if nan_mask.get(key, None) is None: continue - mask = np.isnan(_read_csv_rows_cols( - nan_mask[key], 0, last_sample + 1, req_ind, {0: 0, 1: None}).T) - if saturated == 'nan': + mask = np.isnan( + _read_csv_rows_cols( + nan_mask[key], 0, last_sample + 1, req_ind, {0: 0, 1: None} + ).T + ) + if saturated == "nan": nan_mask[key] = mask else: - assert saturated == 'annotate' + assert saturated == "annotate" if annot_mask is None: annot_mask = np.zeros( - (len(info['ch_names']) // 2, last_sample + 1), bool) + (len(info["ch_names"]) // 2, last_sample + 1), bool + ) annot_mask |= mask nan_mask[key] = None # shouldn't need again - super(RawNIRX, self).__init__( - info, preload, filenames=[fname], last_samps=[last_sample], - raw_extras=[raw_extras], verbose=verbose) + super().__init__( + info, + preload, + filenames=[fname], + last_samps=[last_sample], + raw_extras=[raw_extras], + verbose=verbose, + ) # make onset/duration/description onset, duration, description, ch_names = list(), list(), list(), list() if annot_mask is not None: for ci, mask in enumerate(annot_mask): on, dur = _mask_to_onsets_offsets(mask) - on = on / info['sfreq'] - dur = dur / info['sfreq'] + on = on / info["sfreq"] + dur = dur / info["sfreq"] dur -= on onset.extend(on) duration.extend(dur) - description.extend(['BAD_SATURATED'] * len(on)) - ch_names.extend([self.ch_names[2 * ci:2 * ci + 2]] * len(on)) + description.extend(["BAD_SATURATED"] * len(on)) + ch_names.extend([self.ch_names[2 * ci : 2 * ci + 2]] * len(on)) # Read triggers from event file if not is_aurora: - files['tri'] = files['hdr'][:-3] + 'evt' - if op.isfile(files['tri']): - with _open(files['tri']) as fid: - t = [re.findall(r'(\d+)', line) for line in fid] + files["tri"] = files["hdr"][:-3] + "evt" + if op.isfile(files["tri"]): + with _open(files["tri"]) as fid: + t = [re.findall(r"(\d+)", line) for line in fid] if is_aurora: tf_idx, desc_idx = _determine_tri_idxs(t[0]) for t_ in t: @@ -453,11 +512,11 @@ def __init__(self, fname, saturated, preload=False, verbose=None): trigger_frame = float(t_[tf_idx]) desc = float(t_[desc_idx]) else: - binary_value = ''.join(t_[1:])[::-1] + binary_value = "".join(t_[1:])[::-1] desc = float(int(binary_value, 2)) trigger_frame = float(t_[0]) onset.append(trigger_frame / samplingrate) - duration.append(1.) # No duration info stored in files + duration.append(1.0) # No duration info stored in files description.append(desc) ch_names.append(list()) annot = Annotations(onset, duration, description, ch_names=ch_names) @@ -469,15 +528,18 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): The NIRX machine records raw data as two different wavelengths. The returned data interleaves the wavelengths. """ - sd_index = self._raw_extras[fi]['sd_index'] + sd_index = self._raw_extras[fi]["sd_index"] wls = list() - for key in ('wl1', 'wl2'): + for key in ("wl1", "wl2"): d = _read_csv_rows_cols( - self._raw_extras[fi]['files'][key], - start, stop, sd_index, - self._raw_extras[fi]['bounds'][key]).T - nan_mask = self._raw_extras[fi]['nan_mask'].get(key, None) + self._raw_extras[fi]["files"][key], + start, + stop, + sd_index, + self._raw_extras[fi]["bounds"][key], + ).T + nan_mask = self._raw_extras[fi]["nan_mask"].get(key, None) if nan_mask is not None: d[nan_mask[:, start:stop]] = np.nan wls.append(d) @@ -492,14 +554,13 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): return data -def _read_csv_rows_cols(fname, start, stop, cols, bounds, - sep=' ', replace=None): - with open(fname, 'rb') as fid: +def _read_csv_rows_cols(fname, start, stop, cols, bounds, sep=" ", replace=None): + with open(fname, "rb") as fid: fid.seek(bounds[start]) args = list() if bounds[1] is not None: args.append(bounds[stop] - bounds[start]) - data = fid.read(*args).decode('latin-1') + data = fid.read(*args).decode("latin-1") if replace is not None: data = replace(data) x = np.fromstring(data, float, sep=sep) diff --git a/mne/io/nirx/tests/__init__.py b/mne/io/nirx/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/nirx/tests/__init__.py +++ b/mne/io/nirx/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/nirx/tests/test_nirx.py b/mne/io/nirx/tests/test_nirx.py index 79b348b1ab5..16d81c55e78 100644 --- a/mne/io/nirx/tests/test_nirx.py +++ b/mne/io/nirx/tests/test_nirx.py @@ -1,25 +1,27 @@ -# -*- coding: utf-8 -*- -# Authors: Robert Luke -# Eric Larson -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import shutil -import os import datetime as dt -import numpy as np +import os +import shutil +import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal from mne import pick_types +from mne._fiff.constants import FIFF from mne.datasets.testing import data_path, requires_testing_data from mne.io import read_raw_nirx from mne.io.tests.test_raw import _test_raw_reader from mne.preprocessing import annotate_nan -from mne.transforms import apply_trans, _get_trans -from mne.preprocessing.nirs import source_detector_distances,\ - short_channels, _reorder_nirx -from mne.io.constants import FIFF +from mne.preprocessing.nirs import ( + _reorder_nirx, + short_channels, + source_detector_distances, +) +from mne.transforms import _get_trans, apply_trans testing_path = data_path(download=False) fname_nirx_15_0 = testing_path / "NIRx" / "nirscout" / "nirx_15_0_recording" @@ -27,9 +29,7 @@ fname_nirx_15_2_short = ( testing_path / "NIRx" / "nirscout" / "nirx_15_2_recording_w_short" ) -fname_nirx_15_3_short = ( - testing_path / "NIRx" / "nirscout" / "nirx_15_3_recording" -) +fname_nirx_15_3_short = testing_path / "NIRx" / "nirscout" / "nirx_15_3_recording" # This file has no saturated sections @@ -52,9 +52,7 @@ ) # NIRSport2 device using Aurora software -nirsport2 = ( - testing_path / "NIRx" / "nirsport_v2" / "aurora_recording _w_short_and_acc" -) +nirsport2 = testing_path / "NIRx" / "nirsport_v2" / "aurora_recording _w_short_and_acc" nirsport2_2021_9 = testing_path / "NIRx" / "nirsport_v2" / "aurora_2021_9" nirsport2_2021_9_6 = testing_path / "NIRx" / "nirsport_v2" / "aurora_2021_9_6" @@ -78,7 +76,7 @@ def test_nirsport_v2_matches_snirf(nirx_snirf): @requires_testing_data -@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +@pytest.mark.filterwarnings("ignore:.*Extraction of measurement.*:") def test_nirsport_v2(): """Test NIRSport2 file.""" raw = read_raw_nirx(nirsport2, preload=True) @@ -88,90 +86,87 @@ def test_nirsport_v2(): # nirsite https://github.com/mne-tools/mne-testing-data/pull/86 # figure 3 allowed_distance_error = 0.005 - assert_allclose(source_detector_distances(raw.copy(). - pick("S1_D1 760").info), - [0.0304], atol=allowed_distance_error) - assert_allclose(source_detector_distances(raw.copy(). - pick("S2_D2 760").info), - [0.0400], atol=allowed_distance_error) + assert_allclose( + source_detector_distances(raw.copy().pick("S1_D1 760").info), + [0.0304], + atol=allowed_distance_error, + ) + assert_allclose( + source_detector_distances(raw.copy().pick("S2_D2 760").info), + [0.0400], + atol=allowed_distance_error, + ) # Test location of detectors # The locations of detectors can be seen in the first # figure on this page... # https://github.com/mne-tools/mne-testing-data/pull/86 allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][6:9] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + assert raw.info["ch_names"][0][3:5] == "D1" + assert_allclose(mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) - assert raw.info['ch_names'][2][3:5] == 'D6' - assert_allclose( - mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error) + assert raw.info["ch_names"][2][3:5] == "D6" + assert_allclose(mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error) - assert raw.info['ch_names'][34][3:5] == 'D5' - assert_allclose( - mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error) + assert raw.info["ch_names"][34][3:5] == "D5" + assert_allclose(mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error) # Test location of sensors # The locations of sensors can be seen in the second # figure on this page... # https://github.com/mne-tools/mne-testing-data/pull/86 - locs = [ch['loc'][3:6] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][3:6] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][:2] == 'S1' - assert_allclose( - mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error) + assert raw.info["ch_names"][0][:2] == "S1" + assert_allclose(mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error) - assert raw.info['ch_names'][9][:2] == 'S2' - assert_allclose( - mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error) + assert raw.info["ch_names"][9][:2] == "S2" + assert_allclose(mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error) - assert raw.info['ch_names'][39][:2] == 'S8' - assert_allclose( - mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error) + assert raw.info["ch_names"][39][:2] == "S8" + assert_allclose(mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error) assert len(raw.annotations) == 3 - assert raw.annotations.description[0] == '1.0' - assert raw.annotations.description[2] == '6.0' + assert raw.annotations.description[0] == "1.0" + assert raw.annotations.description[2] == "6.0" # Lose tolerance as I am eyeballing the time differences on screen - assert_allclose( - np.diff(raw.annotations.onset), [2.3, 3.1], atol=0.1) + assert_allclose(np.diff(raw.annotations.onset), [2.3, 3.1], atol=0.1) mon = raw.get_montage() assert len(mon.dig) == 27 @requires_testing_data -@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +@pytest.mark.filterwarnings("ignore:.*Extraction of measurement.*:") def test_nirsport_v1_wo_sat(): """Test NIRSport1 file with no saturation.""" raw = read_raw_nirx(nirsport1_wo_sat, preload=True) # Test data import assert raw._data.shape == (26, 164) - assert raw.info['sfreq'] == 10.416667 + assert raw.info["sfreq"] == 10.416667 # By default real data is returned assert np.sum(np.isnan(raw.get_data())) == 0 - raw = read_raw_nirx(nirsport1_wo_sat, preload=True, saturated='nan') + raw = read_raw_nirx(nirsport1_wo_sat, preload=True, saturated="nan") data = raw.get_data() assert data.shape == (26, 164) assert np.sum(np.isnan(data)) == 0 - raw = read_raw_nirx(nirsport1_wo_sat, saturated='annotate') + raw = read_raw_nirx(nirsport1_wo_sat, saturated="annotate") data = raw.get_data() assert data.shape == (26, 164) assert np.sum(np.isnan(data)) == 0 -@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +@pytest.mark.filterwarnings("ignore:.*Extraction of measurement.*:") @requires_testing_data def test_nirsport_v1_w_sat(): """Test NIRSport1 file with NaNs but not in channel of interest.""" @@ -180,24 +175,24 @@ def test_nirsport_v1_w_sat(): # Test data import data = raw.get_data() assert data.shape == (26, 176) - assert raw.info['sfreq'] == 10.416667 + assert raw.info["sfreq"] == 10.416667 assert np.sum(np.isnan(data)) == 0 - raw = read_raw_nirx(nirsport1_w_sat, saturated='nan') + raw = read_raw_nirx(nirsport1_w_sat, saturated="nan") data = raw.get_data() assert data.shape == (26, 176) assert np.sum(np.isnan(data)) == 0 - raw = read_raw_nirx(nirsport1_w_sat, saturated='annotate') + raw = read_raw_nirx(nirsport1_w_sat, saturated="annotate") data = raw.get_data() assert data.shape == (26, 176) assert np.sum(np.isnan(data)) == 0 -@pytest.mark.filterwarnings('ignore:.*Extraction of measurement.*:') +@pytest.mark.filterwarnings("ignore:.*Extraction of measurement.*:") @requires_testing_data -@pytest.mark.parametrize('preload', (True, False)) -@pytest.mark.parametrize('meas_date', (None, "orig")) +@pytest.mark.parametrize("preload", (True, False)) +@pytest.mark.parametrize("meas_date", (None, "orig")) def test_nirsport_v1_w_bad_sat(preload, meas_date): """Test NIRSport1 file with NaNs.""" fname = nirsport1_w_fullsat @@ -206,12 +201,12 @@ def test_nirsport_v1_w_bad_sat(preload, meas_date): assert not np.isnan(data).any() assert len(raw.annotations) == 5 # annotated version and ignore should have same data but different annot - raw_ignore = read_raw_nirx(fname, saturated='ignore', preload=preload) + raw_ignore = read_raw_nirx(fname, saturated="ignore", preload=preload) assert_allclose(raw_ignore.get_data(), data) assert len(raw_ignore.annotations) == 2 - assert not any('NAN' in d for d in raw_ignore.annotations.description) + assert not any("NAN" in d for d in raw_ignore.annotations.description) # nan version should not have same data, but we can give it the same annot - raw_nan = read_raw_nirx(fname, saturated='nan', preload=preload) + raw_nan = read_raw_nirx(fname, saturated="nan", preload=preload) data_nan = raw_nan.get_data() assert np.isnan(data_nan).any() assert not np.allclose(raw_nan.get_data(), data) @@ -223,8 +218,8 @@ def test_nirsport_v1_w_bad_sat(preload, meas_date): nan_annots = annotate_nan(raw_nan) assert nan_annots.orig_time == raw_nan.info["meas_date"] raw_nan_annot.set_annotations(nan_annots) - use_mask = np.where(raw.annotations.description == 'BAD_SATURATED') - for key in ('onset', 'duration'): + use_mask = np.where(raw.annotations.description == "BAD_SATURATED") + for key in ("onset", "duration"): a = getattr(raw_nan_annot.annotations, key)[::2] # one ch in each b = getattr(raw.annotations, key)[use_mask] # two chs in each assert_allclose(a, b) @@ -238,13 +233,13 @@ def test_nirx_hdr_load(): # Test data import assert raw._data.shape == (26, 145) - assert raw.info['sfreq'] == 12.5 + assert raw.info["sfreq"] == 12.5 @requires_testing_data def test_nirx_missing_warn(): """Test reading NIRX files when missing data.""" - with pytest.raises(FileNotFoundError, match='does not exist'): + with pytest.raises(FileNotFoundError, match="does not exist"): read_raw_nirx(fname_nirx_15_2_short / "1", preload=True) @@ -252,21 +247,25 @@ def test_nirx_missing_warn(): def test_nirx_missing_evt(tmp_path): """Test reading NIRX files when missing data.""" shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + "/data/") - os.rename(tmp_path / "data" / "NIRS-2019-08-23_001.evt", - tmp_path / "data" / "NIRS-2019-08-23_001.xxx") + os.rename( + tmp_path / "data" / "NIRS-2019-08-23_001.evt", + tmp_path / "data" / "NIRS-2019-08-23_001.xxx", + ) fname = tmp_path / "data" / "NIRS-2019-08-23_001.hdr" raw = read_raw_nirx(fname, preload=True) - assert raw.annotations.onset.shape == (0, ) + assert raw.annotations.onset.shape == (0,) @requires_testing_data def test_nirx_dat_warn(tmp_path): """Test reading NIRX files when missing data.""" shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + "/data/") - os.rename(tmp_path / "data" / "NIRS-2019-08-23_001.dat", - tmp_path / "data" / "NIRS-2019-08-23_001.tmp") + os.rename( + tmp_path / "data" / "NIRS-2019-08-23_001.dat", + tmp_path / "data" / "NIRS-2019-08-23_001.tmp", + ) fname = tmp_path / "data" / "NIRS-2019-08-23_001.hdr" - with pytest.warns(RuntimeWarning, match='A single dat'): + with pytest.warns(RuntimeWarning, match="A single dat"): read_raw_nirx(fname, preload=True) @@ -277,36 +276,48 @@ def test_nirx_15_2_short(): # Test data import assert raw._data.shape == (26, 145) - assert raw.info['sfreq'] == 12.5 - assert raw.info['meas_date'] == dt.datetime(2019, 8, 23, 7, 37, 4, 540000, - tzinfo=dt.timezone.utc) + assert raw.info["sfreq"] == 12.5 + assert raw.info["meas_date"] == dt.datetime( + 2019, 8, 23, 7, 37, 4, 540000, tzinfo=dt.timezone.utc + ) # Test channel naming - assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", - "S1_D9 760", "S1_D9 850"] - assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] + assert raw.info["ch_names"][:4] == [ + "S1_D1 760", + "S1_D1 850", + "S1_D9 760", + "S1_D9 850", + ] + assert raw.info["ch_names"][24:26] == ["S5_D13 760", "S5_D13 850"] # Test frequency encoding - assert raw.info['chs'][0]['loc'][9] == 760 - assert raw.info['chs'][1]['loc'][9] == 850 + assert raw.info["chs"][0]["loc"][9] == 760 + assert raw.info["chs"][1]["loc"][9] == 850 # Test info import - assert raw.info['subject_info'] == dict(sex=1, first_name="MNE", - middle_name="Test", - last_name="Recording", - birthday=(2014, 8, 23), - his_id="MNE_Test_Recording") + assert raw.info["subject_info"] == dict( + sex=1, + first_name="MNE", + middle_name="Test", + last_name="Recording", + birthday=dt.date(2014, 8, 23), + his_id="MNE_Test_Recording", + ) # Test distance between optodes matches values from # nirsite https://github.com/mne-tools/mne-testing-data/pull/51 # step 4 figure 2 allowed_distance_error = 0.0002 - assert_allclose(source_detector_distances(raw.copy(). - pick("S1_D1 760").info), - [0.0304], atol=allowed_distance_error) - assert_allclose(source_detector_distances(raw.copy(). - pick("S2_D10 760").info), - [0.0086], atol=allowed_distance_error) + assert_allclose( + source_detector_distances(raw.copy().pick("S1_D1 760").info), + [0.0304], + atol=allowed_distance_error, + ) + assert_allclose( + source_detector_distances(raw.copy().pick("S2_D10 760").info), + [0.0086], + atol=allowed_distance_error, + ) # Test which channels are short # These are the ones marked as red at @@ -319,7 +330,7 @@ def test_nirx_15_2_short(): assert_array_equal(is_short[:3:2], [True, True]) # Test trigger events - assert_array_equal(raw.annotations.description, ['3.0', '2.0', '1.0']) + assert_array_equal(raw.annotations.description, ["3.0", "2.0", "1.0"]) # Test location of detectors # The locations of detectors can be seen in the first @@ -331,37 +342,30 @@ def test_nirx_15_2_short(): # 3d locations should be specified in meters, so that's what's tested below # Detector locations are stored in the third three loc values allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][6:9] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + assert raw.info["ch_names"][0][3:5] == "D1" + assert_allclose(mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) - assert raw.info['ch_names'][4][3:5] == 'D3' - assert_allclose( - mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) + assert raw.info["ch_names"][4][3:5] == "D3" + assert_allclose(mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) - assert raw.info['ch_names'][8][3:5] == 'D2' - assert_allclose( - mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) + assert raw.info["ch_names"][8][3:5] == "D2" + assert_allclose(mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) - assert raw.info['ch_names'][12][3:5] == 'D4' - assert_allclose( - mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) + assert raw.info["ch_names"][12][3:5] == "D4" + assert_allclose(mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) - assert raw.info['ch_names'][16][3:5] == 'D5' - assert_allclose( - mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) + assert raw.info["ch_names"][16][3:5] == "D5" + assert_allclose(mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) - assert raw.info['ch_names'][19][3:5] == 'D6' - assert_allclose( - mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error) + assert raw.info["ch_names"][19][3:5] == "D6" + assert_allclose(mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error) - assert raw.info['ch_names'][21][3:5] == 'D7' - assert_allclose( - mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) + assert raw.info["ch_names"][21][3:5] == "D7" + assert_allclose(mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) @requires_testing_data @@ -371,34 +375,42 @@ def test_nirx_15_3_short(): # Test data import assert raw._data.shape == (26, 220) - assert raw.info['sfreq'] == 12.5 + assert raw.info["sfreq"] == 12.5 # Test channel naming - assert raw.info['ch_names'][:4] == ["S1_D2 760", "S1_D2 850", - "S1_D9 760", "S1_D9 850"] - assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] + assert raw.info["ch_names"][:4] == [ + "S1_D2 760", + "S1_D2 850", + "S1_D9 760", + "S1_D9 850", + ] + assert raw.info["ch_names"][24:26] == ["S5_D13 760", "S5_D13 850"] # Test frequency encoding - assert raw.info['chs'][0]['loc'][9] == 760 - assert raw.info['chs'][1]['loc'][9] == 850 + assert raw.info["chs"][0]["loc"][9] == 760 + assert raw.info["chs"][1]["loc"][9] == 850 # Test info import - assert raw.info['subject_info'] == dict(birthday=(2020, 8, 18), - sex=0, - first_name="testMontage\\0A" - "TestMontage", - his_id="testMontage\\0A" - "TestMontage") + assert raw.info["subject_info"] == dict( + birthday=dt.date(2020, 8, 18), + sex=0, + first_name="testMontage\\0ATestMontage", + his_id="testMontage\\0ATestMontage", + ) # Test distance between optodes matches values from # https://github.com/mne-tools/mne-testing-data/pull/72 allowed_distance_error = 0.001 - assert_allclose(source_detector_distances(raw.copy(). - pick("S1_D2 760").info), - [0.0304], atol=allowed_distance_error) - assert_allclose(source_detector_distances(raw.copy(). - pick("S5_D13 760").info), - [0.0076], atol=allowed_distance_error) + assert_allclose( + source_detector_distances(raw.copy().pick("S1_D2 760").info), + [0.0304], + atol=allowed_distance_error, + ) + assert_allclose( + source_detector_distances(raw.copy().pick("S5_D13 760").info), + [0.0076], + atol=allowed_distance_error, + ) # Test which channels are short # These are the ones marked as red at @@ -411,7 +423,7 @@ def test_nirx_15_3_short(): assert_array_equal(is_short[:3:2], [True, True]) # Test trigger events - assert_array_equal(raw.annotations.description, ['4.0', '2.0', '1.0']) + assert_array_equal(raw.annotations.description, ["4.0", "2.0", "1.0"]) # Test location of detectors # The locations of detectors can be seen in the first @@ -419,70 +431,62 @@ def test_nirx_15_3_short(): # https://github.com/mne-tools/mne-testing-data/pull/72 # And have been manually copied below allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][6:9] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][3:5] == 'D2' - assert_allclose( - mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + assert raw.info["ch_names"][0][3:5] == "D2" + assert_allclose(mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) - assert raw.info['ch_names'][4][3:5] == 'D1' - assert_allclose( - mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) + assert raw.info["ch_names"][4][3:5] == "D1" + assert_allclose(mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) - assert raw.info['ch_names'][8][3:5] == 'D3' - assert_allclose( - mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) + assert raw.info["ch_names"][8][3:5] == "D3" + assert_allclose(mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) - assert raw.info['ch_names'][12][3:5] == 'D4' - assert_allclose( - mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) + assert raw.info["ch_names"][12][3:5] == "D4" + assert_allclose(mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) - assert raw.info['ch_names'][16][3:5] == 'D5' - assert_allclose( - mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) + assert raw.info["ch_names"][16][3:5] == "D5" + assert_allclose(mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) - assert raw.info['ch_names'][19][3:5] == 'D6' - assert_allclose( - mni_locs[19], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) + assert raw.info["ch_names"][19][3:5] == "D6" + assert_allclose(mni_locs[19], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) - assert raw.info['ch_names'][21][3:5] == 'D7' - assert_allclose( - mni_locs[21], [-0.0394, -0.0483, 0.0928], atol=allowed_dist_error) + assert raw.info["ch_names"][21][3:5] == "D7" + assert_allclose(mni_locs[21], [-0.0394, -0.0483, 0.0928], atol=allowed_dist_error) @requires_testing_data def test_locale_encoding(tmp_path): """Test NIRx encoding.""" - fname = tmp_path / 'latin' + fname = tmp_path / "latin" shutil.copytree(fname_nirx_15_2, fname) hdr_fname = fname / "NIRS-2019-10-02_003.hdr" hdr = list() - with open(hdr_fname, 'rb') as fid: + with open(hdr_fname, "rb") as fid: hdr.extend(line for line in fid) # French hdr[2] = b'Date="jeu. 13 f\xe9vr. 2020"\r\n' - with open(hdr_fname, 'wb') as fid: + with open(hdr_fname, "wb") as fid: for line in hdr: fid.write(line) - read_raw_nirx(fname, verbose='debug') + read_raw_nirx(fname, verbose="debug") # German hdr[2] = b'Date="mi 13 dez 2020"\r\n' - with open(hdr_fname, 'wb') as fid: + with open(hdr_fname, "wb") as fid: for line in hdr: fid.write(line) - read_raw_nirx(fname, verbose='debug') + read_raw_nirx(fname, verbose="debug") # Italian hdr[2] = b'Date="ven 24 gen 2020"\r\n' hdr[3] = b'Time="10:57:41.454"\r\n' - with open(hdr_fname, 'wb') as fid: + with open(hdr_fname, "wb") as fid: for line in hdr: fid.write(line) - raw = read_raw_nirx(fname, verbose='debug') - want_dt = dt.datetime( - 2020, 1, 24, 10, 57, 41, 454000, tzinfo=dt.timezone.utc) - assert raw.info['meas_date'] == want_dt + raw = read_raw_nirx(fname, verbose="debug") + want_dt = dt.datetime(2020, 1, 24, 10, 57, 41, 454000, tzinfo=dt.timezone.utc) + assert raw.info["meas_date"] == want_dt @requires_testing_data @@ -492,43 +496,49 @@ def test_nirx_15_2(): # Test data import assert raw._data.shape == (64, 67) - assert raw.info['sfreq'] == 3.90625 - assert raw.info['meas_date'] == dt.datetime(2019, 10, 2, 9, 8, 47, 511000, - tzinfo=dt.timezone.utc) + assert raw.info["sfreq"] == 3.90625 + assert raw.info["meas_date"] == dt.datetime( + 2019, 10, 2, 9, 8, 47, 511000, tzinfo=dt.timezone.utc + ) # Test channel naming - assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", - "S1_D10 760", "S1_D10 850"] + assert raw.info["ch_names"][:4] == [ + "S1_D1 760", + "S1_D1 850", + "S1_D10 760", + "S1_D10 850", + ] # Test info import - assert raw.info['subject_info'] == dict(sex=1, first_name="TestRecording", - birthday=(1989, 10, 2), - his_id="TestRecording") + assert raw.info["subject_info"] == dict( + sex=1, + first_name="TestRecording", + birthday=dt.date(1989, 10, 2), + his_id="TestRecording", + ) # Test trigger events - assert_array_equal(raw.annotations.description, ['4.0', '6.0', '2.0']) + assert_array_equal(raw.annotations.description, ["4.0", "6.0", "2.0"]) print(raw.annotations.onset) # Test location of detectors allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][6:9] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error) + assert raw.info["ch_names"][0][3:5] == "D1" + assert_allclose(mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error) - assert raw.info['ch_names'][15][3:5] == 'D4' - assert_allclose( - mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error) + assert raw.info["ch_names"][15][3:5] == "D4" + assert_allclose(mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error) # Old name aliases for backward compat - assert 'fnirs_cw_amplitude' in raw - with pytest.raises(ValueError, match='Invalid value'): - 'fnirs_raw' in raw - assert 'fnirs_od' not in raw - picks = pick_types(raw.info, fnirs='fnirs_cw_amplitude') + assert "fnirs_cw_amplitude" in raw + with pytest.raises(ValueError, match="Invalid value"): + "fnirs_raw" in raw + assert "fnirs_od" not in raw + picks = pick_types(raw.info, fnirs="fnirs_cw_amplitude") assert len(picks) > 0 @@ -548,78 +558,248 @@ def test_nirx_15_0(): # Test data import assert raw._data.shape == (20, 92) - assert raw.info['sfreq'] == 6.25 - assert raw.info['meas_date'] == dt.datetime(2019, 10, 27, 13, 53, 34, - 209000, - tzinfo=dt.timezone.utc) + assert raw.info["sfreq"] == 6.25 + assert raw.info["meas_date"] == dt.datetime( + 2019, 10, 27, 13, 53, 34, 209000, tzinfo=dt.timezone.utc + ) # Test channel naming - assert raw.info['ch_names'][:12] == ["S1_D1 760", "S1_D1 850", - "S2_D2 760", "S2_D2 850", - "S3_D3 760", "S3_D3 850", - "S4_D4 760", "S4_D4 850", - "S5_D5 760", "S5_D5 850", - "S6_D6 760", "S6_D6 850"] + assert raw.info["ch_names"][:12] == [ + "S1_D1 760", + "S1_D1 850", + "S2_D2 760", + "S2_D2 850", + "S3_D3 760", + "S3_D3 850", + "S4_D4 760", + "S4_D4 850", + "S5_D5 760", + "S5_D5 850", + "S6_D6 760", + "S6_D6 850", + ] # Test info import - assert raw.info['subject_info'] == {'birthday': (2004, 10, 27), - 'first_name': 'NIRX', - 'last_name': 'Test', - 'sex': FIFF.FIFFV_SUBJ_SEX_UNKNOWN, - 'his_id': "NIRX_Test"} + assert raw.info["subject_info"] == { + "birthday": dt.date(2004, 10, 27), + "first_name": "NIRX", + "last_name": "Test", + "sex": FIFF.FIFFV_SUBJ_SEX_UNKNOWN, + "his_id": "NIRX_Test", + } # Test trigger events - assert_array_equal(raw.annotations.description, ['1.0', '2.0', '2.0']) + assert_array_equal(raw.annotations.description, ["1.0", "2.0", "2.0"]) # Test location of detectors allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][6:9] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error) + assert raw.info["ch_names"][0][3:5] == "D1" + assert_allclose(mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error) - assert raw.info['ch_names'][15][3:5] == 'D8' - assert_allclose( - mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error) + assert raw.info["ch_names"][15][3:5] == "D8" + assert_allclose(mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error) # Test distance between optodes matches values from allowed_distance_error = 0.0002 - assert_allclose(source_detector_distances(raw.copy(). - pick("S1_D1 760").info), - [0.0300], atol=allowed_distance_error) - assert_allclose(source_detector_distances(raw.copy(). - pick("S7_D7 760").info), - [0.0392], atol=allowed_distance_error) + assert_allclose( + source_detector_distances(raw.copy().pick("S1_D1 760").info), + [0.0300], + atol=allowed_distance_error, + ) + assert_allclose( + source_detector_distances(raw.copy().pick("S7_D7 760").info), + [0.0392], + atol=allowed_distance_error, + ) @requires_testing_data -@pytest.mark.parametrize('fname, boundary_decimal', ( - [fname_nirx_15_2_short, 1], - [fname_nirx_15_2, 0], - [fname_nirx_15_2, 0], - [nirsport2_2021_9, 0], -)) +@pytest.mark.parametrize( + "fname, boundary_decimal", + ( + [fname_nirx_15_2_short, 1], + [fname_nirx_15_2, 0], + [fname_nirx_15_2, 0], + [nirsport2_2021_9, 0], + ), +) def test_nirx_standard(fname, boundary_decimal): """Test standard operations.""" - _test_raw_reader(read_raw_nirx, fname=fname, - boundary_decimal=boundary_decimal) # low fs + _test_raw_reader( + read_raw_nirx, fname=fname, boundary_decimal=boundary_decimal + ) # low fs # Below are the native (on-disk) orders, which should be preserved @requires_testing_data -@pytest.mark.parametrize('fname, want_order', [ - (fname_nirx_15_0, ['S1_D1', 'S2_D2', 'S3_D3', 'S4_D4', 'S5_D5', 'S6_D6', 'S7_D7', 'S8_D8', 'S9_D9', 'S10_D10']), # noqa: E501 - (fname_nirx_15_2, ['S1_D1', 'S1_D10', 'S2_D1', 'S2_D2', 'S3_D2', 'S3_D3', 'S4_D3', 'S4_D4', 'S5_D4', 'S5_D5', 'S6_D5', 'S6_D6', 'S7_D6', 'S7_D7', 'S8_D7', 'S8_D8', 'S9_D8', 'S9_D9', 'S10_D9', 'S10_D10', 'S11_D11', 'S11_D12', 'S12_D12', 'S12_D13', 'S13_D13', 'S13_D14', 'S14_D14', 'S14_D15', 'S15_D15', 'S15_D16', 'S16_D11', 'S16_D16']), # noqa: E501 - (fname_nirx_15_2_short, ['S1_D1', 'S1_D9', 'S2_D3', 'S2_D10', 'S3_D2', 'S3_D11', 'S4_D4', 'S4_D12', 'S5_D5', 'S5_D6', 'S5_D7', 'S5_D8', 'S5_D13']), # noqa: E501 - (fname_nirx_15_3_short, ['S1_D2', 'S1_D9', 'S2_D1', 'S2_D10', 'S3_D3', 'S3_D11', 'S4_D4', 'S4_D12', 'S5_D5', 'S5_D6', 'S5_D7', 'S5_D8', 'S5_D13']), # noqa: E501 - (nirsport1_wo_sat, ['S1_D4', 'S1_D5', 'S1_D6', 'S2_D5', 'S2_D6', 'S3_D5', 'S4_D1', 'S4_D3', 'S4_D4', 'S5_D1', 'S5_D2', 'S6_D1', 'S6_D3']), # noqa: E501 - (nirsport2, ['S1_D1', 'S1_D6', 'S1_D9', 'S2_D2', 'S2_D10', 'S3_D5', 'S3_D7', 'S3_D11', 'S4_D8', 'S4_D12', 'S5_D3', 'S5_D13', 'S6_D4', 'S6_D14', 'S7_D1', 'S7_D6', 'S7_D15', 'S8_D5', 'S8_D7', 'S8_D16']), # noqa: E501 - (nirsport2_2021_9, ['S1_D1', 'S1_D3', 'S2_D1', 'S2_D2', 'S2_D4', 'S3_D2', 'S3_D5', 'S4_D1', 'S4_D3', 'S4_D4', 'S4_D6', 'S5_D2', 'S5_D4', 'S5_D5', 'S5_D7', 'S6_D3', 'S6_D6', 'S7_D4', 'S7_D6', 'S7_D7', 'S8_D5', 'S8_D7']), # noqa: E501 -]) +@pytest.mark.parametrize( + "fname, want_order", + [ + ( + fname_nirx_15_0, + [ + "S1_D1", + "S2_D2", + "S3_D3", + "S4_D4", + "S5_D5", + "S6_D6", + "S7_D7", + "S8_D8", + "S9_D9", + "S10_D10", + ], + ), # noqa: E501 + ( + fname_nirx_15_2, + [ + "S1_D1", + "S1_D10", + "S2_D1", + "S2_D2", + "S3_D2", + "S3_D3", + "S4_D3", + "S4_D4", + "S5_D4", + "S5_D5", + "S6_D5", + "S6_D6", + "S7_D6", + "S7_D7", + "S8_D7", + "S8_D8", + "S9_D8", + "S9_D9", + "S10_D9", + "S10_D10", + "S11_D11", + "S11_D12", + "S12_D12", + "S12_D13", + "S13_D13", + "S13_D14", + "S14_D14", + "S14_D15", + "S15_D15", + "S15_D16", + "S16_D11", + "S16_D16", + ], + ), # noqa: E501 + ( + fname_nirx_15_2_short, + [ + "S1_D1", + "S1_D9", + "S2_D3", + "S2_D10", + "S3_D2", + "S3_D11", + "S4_D4", + "S4_D12", + "S5_D5", + "S5_D6", + "S5_D7", + "S5_D8", + "S5_D13", + ], + ), # noqa: E501 + ( + fname_nirx_15_3_short, + [ + "S1_D2", + "S1_D9", + "S2_D1", + "S2_D10", + "S3_D3", + "S3_D11", + "S4_D4", + "S4_D12", + "S5_D5", + "S5_D6", + "S5_D7", + "S5_D8", + "S5_D13", + ], + ), # noqa: E501 + ( + nirsport1_wo_sat, + [ + "S1_D4", + "S1_D5", + "S1_D6", + "S2_D5", + "S2_D6", + "S3_D5", + "S4_D1", + "S4_D3", + "S4_D4", + "S5_D1", + "S5_D2", + "S6_D1", + "S6_D3", + ], + ), # noqa: E501 + ( + nirsport2, + [ + "S1_D1", + "S1_D6", + "S1_D9", + "S2_D2", + "S2_D10", + "S3_D5", + "S3_D7", + "S3_D11", + "S4_D8", + "S4_D12", + "S5_D3", + "S5_D13", + "S6_D4", + "S6_D14", + "S7_D1", + "S7_D6", + "S7_D15", + "S8_D5", + "S8_D7", + "S8_D16", + ], + ), # noqa: E501 + ( + nirsport2_2021_9, + [ + "S1_D1", + "S1_D3", + "S2_D1", + "S2_D2", + "S2_D4", + "S3_D2", + "S3_D5", + "S4_D1", + "S4_D3", + "S4_D4", + "S4_D6", + "S5_D2", + "S5_D4", + "S5_D5", + "S5_D7", + "S6_D3", + "S6_D6", + "S7_D4", + "S7_D6", + "S7_D7", + "S8_D5", + "S8_D7", + ], + ), # noqa: E501 + ], +) def test_channel_order(fname, want_order): """Test that logical channel order is preserved.""" raw = read_raw_nirx(fname) diff --git a/mne/io/nsx/__init__.py b/mne/io/nsx/__init__.py new file mode 100644 index 00000000000..cb2500e2235 --- /dev/null +++ b/mne/io/nsx/__init__.py @@ -0,0 +1,7 @@ +"""NSx module for reading Blackrock Microsystem files.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .nsx import read_raw_nsx diff --git a/mne/io/nsx/nsx.py b/mne/io/nsx/nsx.py new file mode 100644 index 00000000000..1fc8a6968ea --- /dev/null +++ b/mne/io/nsx/nsx.py @@ -0,0 +1,537 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +from datetime import datetime, timezone + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _file_size, _read_segments_file +from ...annotations import Annotations +from ...utils import _check_fname, fill_doc, logger, warn +from ..base import BaseRaw, _get_scaling + +CH_TYPE_MAPPING = { + "CC": "SEEG", +} + + +# See https://blackrockneurotech.com/wp-content/uploads/LB-0023-7.00_NEV_File_Format.pdf +DATA_BYTE_SIZE = 2 +ORIG_FORMAT = "short" + + +nsx_header_dict = { + "basic": [ + ("file_id", "S8"), # achFileType + # file specification split into major and minor version number + ("ver_major", "uint8"), + ("ver_minor", "uint8"), + # bytes of basic & extended header + ("bytes_in_headers", "uint32"), + # label of the sampling group (e.g., "1 kS/s" or "LFP low") + ("label", "S16"), + ("comment", "S256"), + ("period", "uint32"), + ("timestamp_resolution", "uint32"), + # time origin: 2byte uint16 values for ... + ("year", "uint16"), + ("month", "uint16"), + ("weekday", "uint16"), + ("day", "uint16"), + ("hour", "uint16"), + ("minute", "uint16"), + ("second", "uint16"), + ("millisecond", "uint16"), + # number of channel_count match number of extended headers + ("channel_count", "uint32"), + ], + "extended": [ + ("type", "S2"), + ("electrode_id", "uint16"), + ("electrode_label", "S16"), + # used front-end amplifier bank (e.g., A, B, C, D) + ("physical_connector", "uint8"), + # used connector pin (e.g., 1-37 on bank A, B, C or D) + ("connector_pin", "uint8"), + # digital and analog value ranges of the signal + ("min_digital_val", "int16"), + ("max_digital_val", "int16"), + ("min_analog_val", "int16"), + ("max_analog_val", "int16"), + # units of the analog range values ("mV" or "uV") + ("units", "S16"), + # filter settings used to create nsx from source signal + ("hi_freq_corner", "uint32"), + ("hi_freq_order", "uint32"), + ("hi_freq_type", "uint16"), # 0=None, 1=Butterworth + ("lo_freq_corner", "uint32"), + ("lo_freq_order", "uint32"), + ("lo_freq_type", "uint16"), + ], # 0=None, 1=Butterworth, + "data>2.1<3": [ + ("header", "uint8"), + ("timestamp", "uint32"), + ("nb_data_points", "uint32"), + ], + "data>=3": [ + ("header", "uint8"), + ("timestamp", "uint64"), + ("nb_data_points", "uint32"), + ], +} + + +@fill_doc +def read_raw_nsx( + input_fname, stim_channel=True, eog=None, misc=None, preload=False, *, verbose=None +) -> "RawNSX": + """Reader function for NSx (Blackrock Microsystems) files. + + Parameters + ---------- + input_fname : str + Path to the NSx file. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), channels corresponding to the indices are set to STIM. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawEDF + The raw instance. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + Notes + ----- + NSx files with id (= NEURALSG), i.e., version 2.1 is currently not + supported. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + input_fname = _check_fname( + input_fname, overwrite="read", must_exist=True, name="input_fname" + ) + if not input_fname.suffix.lower().startswith(".ns"): + raise NotImplementedError( + f"Only NSx files are supported, got {input_fname.suffix}." + ) + return RawNSX( + input_fname, stim_channel, eog, misc, preload=preload, verbose=verbose + ) + + +@fill_doc +class RawNSX(BaseRaw): + """Raw object from NSx file from Blackrock Microsystems. + + Parameters + ---------- + input_fname : str + Path to the NSx file. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), channels corresponding to the indices are set to STIM. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + %(preload)s + %(verbose)s + + Notes + ----- + NSx files with id (= NEURALSG), i.e., version 2.1 is currently not + supported. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + + def __init__( + self, + input_fname, + stim_channel="auto", + eog=None, + misc=None, + preload=False, + verbose=None, + ): + logger.info(f"Extracting NSX parameters from {input_fname}...") + input_fname = os.path.abspath(input_fname) + ( + info, + data_fname, + fmt, + n_samples, + orig_format, + raw_extras, + orig_units, + ) = _get_hdr_info(input_fname, stim_channel=stim_channel, eog=eog, misc=misc) + raw_extras["orig_format"] = orig_format + first_samps = (raw_extras["timestamp"][0],) + super().__init__( + info, + first_samps=first_samps, + last_samps=[first_samps[0] + n_samples - 1], + filenames=[data_fname], + orig_format=orig_format, + preload=preload, + verbose=verbose, + raw_extras=[raw_extras], + orig_units=orig_units, + ) + + # Add annotations for in-data skips + if len(self._raw_extras[0]["timestamp"]) > 1: + starts = ( + self._raw_extras[0]["timestamp"] + self._raw_extras[0]["nb_data_points"] + )[:-1] + 1 + stops = self._raw_extras[0]["timestamp"][1:] - 1 + durations = (stops - starts + 1) / self.info["sfreq"] + annot = Annotations( + onset=(starts / self.info["sfreq"]), + duration=durations, + description="BAD_ACQ_SKIP", + orig_time=self.info["meas_date"], + ) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + dtype = self._raw_extras[fi]["orig_format"] + first_samps = self._raw_extras[fi]["timestamp"] + recording_extents = self._raw_extras[fi]["nb_data_points"] + offsets = self._raw_extras[fi]["offset_to_data_block"] + for first_samp, recording_extent, offset in zip( + first_samps, recording_extents, offsets + ): + if start > first_samp + recording_extent or stop < first_samp: + # There is nothing to read in this chunk + continue + i_start = max(start, first_samp) + i_stop = min(stop, first_samp + recording_extent) + _read_segments_file( + self, + data[:, i_start - start : i_stop - start], + idx, + fi, + i_start - first_samp, + i_stop - first_samp, + cals, + mult, + dtype, + n_channels=None, + offset=offset, + trigger_ch=None, + ) + + +def _read_header(fname): + nsx_file_id = np.fromfile(fname, count=1, dtype=[("file_id", "S8")])[0][ + "file_id" + ].decode() + + if nsx_file_id in ["NEURALCD", "BRSMPGRP"]: + basic_header = _read_header_22_and_above(fname) + elif nsx_file_id == "NEURALSG": + raise NotImplementedError( + "NSx file id (= NEURALSG), i.e., file" + " version 2.1 is currently not supported." + ) + else: + raise ValueError( + f"NSx file id (={nsx_file_id}) does not match" + " with supported file ids:" + " ('NEURALCD', 'BRSMPGRP')" + ) + + time_origin = datetime( + *[ + basic_header.pop(xx) + for xx in ( + "year", + "month", + "day", + "hour", + "minute", + "second", + "millisecond", + ) + ], + tzinfo=timezone.utc, + ) + basic_header["meas_date"] = time_origin + return basic_header + + +def _read_header_22_and_above(fname): + basic_header = {} + dtype0 = nsx_header_dict["basic"] + dtype1 = nsx_header_dict["extended"] + + nsx_file_header = np.fromfile(fname, count=1, dtype=dtype0)[0] + basic_header.update( + {name: nsx_file_header[name] for name in nsx_file_header.dtype.names} + ) + + offset_dtype0 = np.dtype(dtype0).itemsize + shape = nsx_file_header["channel_count"] + basic_header["extended"] = np.memmap( + fname, shape=shape, offset=offset_dtype0, dtype=dtype1, mode="r" + ) + + # The following values are stored in mHz + # See: + # https://blackrockneurotech.com/wp-content/uploads/LB-0023-7.00_NEV_File_Format.pdf + basic_header["highpass"] = basic_header["extended"]["hi_freq_corner"] + basic_header["lowpass"] = basic_header["extended"]["lo_freq_corner"] + for x in ["highpass", "lowpass"]: + basic_header[x] = basic_header[x] * 1e-3 + + ver_major, ver_minor = basic_header.pop("ver_major"), basic_header.pop("ver_minor") + basic_header["spec"] = f"{ver_major}.{ver_minor}" + + data_header = list() + index = 0 + offset = basic_header["bytes_in_headers"] + filesize = _file_size(fname) + if float(basic_header["spec"]) < 3.0: + dtype2 = nsx_header_dict["data>2.1<3"] + else: + dtype2 = nsx_header_dict["data>=3"] + while offset < filesize: + dh = np.memmap(fname, dtype=dtype2, shape=1, offset=offset, mode="r")[0] + data_header.append( + { + "header": dh["header"], + "timestamp": dh["timestamp"], + "nb_data_points": dh["nb_data_points"], + "offset_to_data_block": offset + dh.dtype.itemsize, + } + ) + # data size = number of data points * (data_bytes * number of channels) + # use of `int` avoids overflow problem + data_size = ( + int(dh["nb_data_points"]) + * int(basic_header["channel_count"]) + * DATA_BYTE_SIZE + ) + # define new offset (to possible next data block) + offset = data_header[index]["offset_to_data_block"] + data_size + index += 1 + + basic_header["data_header"] = data_header + return basic_header + + +def _get_hdr_info(fname, stim_channel=True, eog=None, misc=None): + """Read header information NSx file.""" + eog = eog if eog is not None else [] + misc = misc if misc is not None else [] + + nsx_info = _read_header(fname) + ch_names = list(nsx_info["extended"]["electrode_label"]) + ch_types = list(nsx_info["extended"]["type"]) + ch_units = list(nsx_info["extended"]["units"]) + ch_names, ch_types, ch_units = ( + list(map(bytes.decode, xx)) for xx in (ch_names, ch_types, ch_units) + ) + max_analog_val = nsx_info["extended"]["max_analog_val"].astype("double") + min_analog_val = nsx_info["extended"]["min_analog_val"].astype("double") + max_digital_val = nsx_info["extended"]["max_digital_val"].astype("double") + min_digital_val = nsx_info["extended"]["min_digital_val"].astype("double") + cals = (max_analog_val - min_analog_val) / (max_digital_val - min_digital_val) + + stim_channel_idxs, _ = _check_stim_channel(stim_channel, ch_names) + + nchan = int(nsx_info["channel_count"]) + logger.info("Setting channel info structure...") + chs = list() + pick_mask = np.ones(len(ch_names)) + + orig_units = {} + for idx, ch_name in enumerate(ch_names): + chan_info = {} + chan_info["logno"] = int(nsx_info["extended"]["electrode_id"][idx]) + chan_info["scanno"] = int(nsx_info["extended"]["electrode_id"][idx]) + chan_info["ch_name"] = ch_name + chan_info["unit_mul"] = FIFF.FIFF_UNITM_NONE + ch_unit = ch_units[idx] + chan_info["unit"] = FIFF.FIFF_UNIT_V + # chan_info["range"] = _unit_range_dict[ch_units[idx]] + chan_info["range"] = 1 / _get_scaling("eeg", ch_units[idx]) + chan_info["cal"] = cals[idx] + chan_info["coord_frame"] = FIFF.FIFFV_COORD_HEAD + chan_info["coil_type"] = FIFF.FIFFV_COIL_EEG + chan_info["kind"] = FIFF.FIFFV_SEEG_CH + # montage can't be stored in NSx so channel locs are unknown: + chan_info["loc"] = np.full(12, np.nan) + orig_units[ch_name] = ch_unit + + # if the NSx info contained channel type information + # set it now. They are always set to 'CC'. + # If not inferable, set it to 'SEEG' with a warning. + ch_type = ch_types[idx] + ch_const = getattr(FIFF, f"FIFFV_{CH_TYPE_MAPPING.get(ch_type, 'SEEG')}_CH") + chan_info["kind"] = ch_const + # if user passes in explicit mapping for eog, misc and stim + # channels set them here. + if ch_name in eog or idx in eog or idx - nchan in eog: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_EOG_CH + pick_mask[idx] = False + elif ch_name in misc or idx in misc or idx - nchan in misc: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_MISC_CH + pick_mask[idx] = False + elif idx in stim_channel_idxs: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["unit"] = FIFF.FIFF_UNIT_NONE + chan_info["kind"] = FIFF.FIFFV_STIM_CH + pick_mask[idx] = False + chan_info["ch_name"] = ch_name + ch_names[idx] = chan_info["ch_name"] + chs.append(chan_info) + + sfreq = nsx_info["timestamp_resolution"] / nsx_info["period"] + info = _empty_info(sfreq) + info["meas_date"] = nsx_info["meas_date"] + info["chs"] = chs + info["ch_names"] = ch_names + + highpass = nsx_info["highpass"][:128] + lowpass = nsx_info["lowpass"][:128] + _decode_online_filters(info, highpass, lowpass) + + # Some keys to be consistent with FIF measurement info + info["description"] = None + + info._unlocked = False + info._update_redundant() + + orig_format = ORIG_FORMAT + + raw_extras = { + key: [r[key] for r in nsx_info["data_header"]] + for key in nsx_info["data_header"][0] + } + for key in raw_extras: + raw_extras[key] = np.array(raw_extras[key], int) + good_data_packets = raw_extras.pop("header") == 1 + if not good_data_packets.any(): + raise RuntimeError("NSx file appears to be broken") + raw_extras = {key: raw_extras[key][good_data_packets] for key in raw_extras.keys()} + raw_extras["timestamp"] = raw_extras["timestamp"] // nsx_info["period"] + first_samp = raw_extras["timestamp"][0] + last_samp = raw_extras["timestamp"][-1] + raw_extras["nb_data_points"][-1] + n_samples = last_samp - first_samp + + return ( + info, + fname, + nsx_info["spec"], + n_samples, + orig_format, + raw_extras, + orig_units, + ) + + +def _decode_online_filters(info, highpass, lowpass): + """Decode low/high-pass filters that are applied online.""" + if np.all(highpass == highpass[0]): + if highpass[0] == "NaN": + # Placeholder for future use. Highpass set in _empty_info. + pass + else: + hp = float(highpass[0]) + info["highpass"] = hp + else: + info["highpass"] = float(np.max(highpass)) + warn( + "Channels contain different highpass filters. Highest filter " + "setting will be stored." + ) + + if np.all(lowpass == lowpass[0]): + if lowpass[0] in ("NaN", "0", "0.0"): + # Placeholder for future use. Lowpass set in _empty_info. + pass + else: + info["lowpass"] = float(lowpass[0]) + else: + info["lowpass"] = float(np.min(lowpass)) + warn( + "Channels contain different lowpass filters. Lowest filter " + "setting will be stored." + ) + + +def _check_stim_channel(stim_channel, ch_names): + """Check that the stimulus channel exists in the current datafile.""" + DEFAULT_STIM_CH_NAMES = ["status", "trigger"] + + if stim_channel is None or stim_channel is False: + return [], [] + + if stim_channel is True: # convenient aliases + stim_channel = "auto" + + if isinstance(stim_channel, str): + if stim_channel == "auto": + if "auto" in ch_names: + warn( + RuntimeWarning, + "Using `stim_channel='auto'` when auto" + " also corresponds to a channel name is ambiguous." + " Please use `stim_channel=['auto']`.", + ) + else: + valid_stim_ch_names = DEFAULT_STIM_CH_NAMES + else: + valid_stim_ch_names = [stim_channel.lower()] + + elif isinstance(stim_channel, int): + valid_stim_ch_names = [ch_names[stim_channel].lower()] + + elif isinstance(stim_channel, list): + if all([isinstance(s, str) for s in stim_channel]): + valid_stim_ch_names = [s.lower() for s in stim_channel] + elif all([isinstance(s, int) for s in stim_channel]): + valid_stim_ch_names = [ch_names[s].lower() for s in stim_channel] + else: + raise ValueError("Invalid stim_channel") + else: + raise ValueError("Invalid stim_channel") + + ch_names_low = [ch.lower() for ch in ch_names] + found = list(set(valid_stim_ch_names) & set(ch_names_low)) + + stim_channel_idxs = [ch_names_low.index(f) for f in found] + names = [ch_names[idx] for idx in stim_channel_idxs] + return stim_channel_idxs, names diff --git a/mne/io/nsx/tests/test_nsx.py b/mne/io/nsx/tests/test_nsx.py new file mode 100644 index 00000000000..fa1f708adc5 --- /dev/null +++ b/mne/io/nsx/tests/test_nsx.py @@ -0,0 +1,282 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from mne import make_fixed_length_epochs +from mne._fiff.constants import FIFF +from mne._fiff.meas_info import _empty_info +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_nsx +from mne.io.nsx.nsx import _decode_online_filters, _read_header +from mne.io.tests.test_raw import _test_raw_reader + +testing_path = data_path(download=False) +nsx_21_fname = os.path.join(testing_path, "nsx", "test_NEURALSG_raw.ns3") +nsx_22_fname = os.path.join(testing_path, "nsx", "test_NEURALCD_raw.ns3") +nsx_31_fname = os.path.join(testing_path, "nsx", "test_BRSMPGRP_raw.ns3") +nsx_test_fname = os.path.join(testing_path, "nsx", "Test_anonymized.ns3") +edf_test_fname = os.path.join(testing_path, "EDF", "test_reduced.edf") + + +def test_decode_online_filters(): + """Tests for online low/high-pass filter decoding.""" + info = _empty_info(100.0) + highpass = np.array([0.0, 0.1]) + lowpass = np.array([50, 50]) + with pytest.warns(RuntimeWarning, match="different highpass filters"): + _decode_online_filters(info, highpass, lowpass) + assert info["highpass"] == 0.1 + + info = _empty_info(100.0) + highpass = np.array([0.0, 0.0]) + lowpass = np.array([40, 50]) + with pytest.warns(RuntimeWarning, match="different lowpass filters"): + _decode_online_filters(info, highpass, lowpass) + assert info["lowpass"] == 40 + + info = _empty_info(100.0) + highpass = np.array(["NaN", "NaN"]) + lowpass = np.array(["NaN", "NaN"]) + _decode_online_filters(info, highpass, lowpass) + assert info["highpass"] == 0.0 + assert info["lowpass"] == 50.0 + + +@requires_testing_data +def test_filetype_checks(): + """Tests for various error meaasges for wrong filetypes.""" + with pytest.raises(ValueError, match="does not match"): + _read_header(edf_test_fname) + with pytest.raises(NotImplementedError, match="Only NSx files"): + read_raw_nsx(edf_test_fname) + + +@requires_testing_data +def test_nsx_ver_31(): + """Primary tests for BRSMPGRP reader.""" + raw = read_raw_nsx(nsx_31_fname) + assert getattr(raw, "_data", False) is False + assert raw.info["sfreq"] == 2000 + + # Check info object + assert raw.info["meas_date"].day == 31 + assert raw.info["meas_date"].year == 2023 + assert raw.info["meas_date"].month == 1 + assert raw.info["chs"][0]["cal"] == 0.6103515625 + assert raw.info["chs"][0]["range"] == 0.001 + + # Check raw_extras + for r in raw._raw_extras: + assert r["orig_format"] == raw.orig_format + assert r["orig_nchan"] == 128 + assert len(r["timestamp"]) == len(r["nb_data_points"]) + assert len(r["timestamp"]) == len(r["offset_to_data_block"]) + + # Check annotations + assert raw.annotations[0]["onset"] * raw.info["sfreq"] == 101 + assert raw.annotations[0]["duration"] * raw.info["sfreq"] == 49 + + # Ignore following RuntimeWarning in mne/io/base.py in _write_raw_data + # "Acquisition skips detected but did not fit evenly into output" + # "buffer_size, will be written as zeroes." + with pytest.warns(RuntimeWarning, match="skips detected"): + raw = _test_raw_reader( + read_raw_nsx, + input_fname=nsx_31_fname, + eog=None, + misc=None, + test_scaling=False, + test_rank=False, + ) + raw_data, _ = raw[:] + n_channels, n_times = raw_data.shape + assert n_times == 300 + assert n_channels == 128 + + # Check data + # There are two contiguous data packets (samples 0--100 and + # samples 150--300. Each data was generated as: + # ```data = np.ones((n_samples, ch_count)) + # data[n_samples // 2] = np.arange(ch_count) + 10 + # data[:, ch_count // 2] = np.arange(n_samples) + 100``` + orig_data = raw_data / (raw.info["chs"][0]["cal"] * raw.info["chs"][0]["range"]) + assert_allclose(sum(orig_data[:, 50] - 10 - np.arange(n_channels)), 76.0) + + assert_allclose(orig_data[n_channels // 2, :100] - 100, np.arange(100)) + assert_allclose(orig_data[n_channels // 2, 150:] - 100, np.arange(150)) + + data, times = raw.get_data(start=10, stop=20, return_times=True) + assert 128, 10 == data.shape + + data, times = raw.get_data(start=50, stop=110, return_times=True) + assert 128, 50 == data.shape + + data, times = raw.get_data(start=130, stop=170, return_times=True) + assert 128, 40 == data.shape + + data, times = raw.get_data(start=200, stop=350, return_times=True) + assert 128, 100 == data.shape + + data, times = raw.get_data(start=0, stop=300, return_times=True) + epochs = make_fixed_length_epochs(raw, duration=0.05, preload=False) + assert len(epochs.events) == 3 + epochs = make_fixed_length_epochs(raw, duration=0.05, preload=True) + assert len(epochs) == 2 + assert "BAD_ACQ_SKIP" in epochs.drop_log[1] + + +@requires_testing_data +def test_nsx_ver_22(): + """Primary tests for NEURALCD reader.""" + raw = read_raw_nsx( + nsx_22_fname, + ) + assert getattr(raw, "_data", False) is False + assert raw.info["sfreq"] == 2000 + + # Check info object + assert raw.info["meas_date"].day == 31 + assert raw.info["meas_date"].year == 2023 + assert raw.info["meas_date"].month == 1 + assert raw.info["chs"][0]["cal"] == 0.6103515625 + assert raw.info["chs"][0]["range"] == 0.001 + + # check raw_extras + for r in raw._raw_extras: + assert r["orig_format"] == raw.orig_format + assert r["orig_nchan"] == 128 + assert len(r["timestamp"]) == len(r["nb_data_points"]) + assert len(r["timestamp"]) == len(r["offset_to_data_block"]) + + # Check annotations + assert len(raw.annotations) == 0 + + raw = _test_raw_reader( + read_raw_nsx, + input_fname=nsx_22_fname, + eog=None, + misc=None, + test_scaling=False, # XXX this should be True + test_rank=False, + ) + raw_data, _ = raw[:] + n_channels, n_times = raw_data.shape + assert n_times == 100 + assert n_channels == 128 + + # Check data + # There is only one contiguous data packet, samples 0--100. Data + # was generated as: + # ```data = np.ones((n_samples, ch_count)) + # data[n_samples // 2] = np.arange(ch_count) + 10 + # data[:, ch_count // 2] = np.arange(n_samples) + 100``` + orig_data = raw_data / (raw.info["chs"][0]["cal"] * raw.info["chs"][0]["range"]) + assert_allclose(sum(orig_data[:, 50] - 10 - np.arange(n_channels)), 76.0) + + assert_allclose(orig_data[n_channels // 2, :100] - 100, np.arange(100)) + + data, _ = raw.get_data(start=10, stop=20, return_times=True) + assert 128, 10 == data.shape + + data, _ = raw.get_data(start=0, stop=300, return_times=True) + assert 128, 100 == data.shape + + epochs = make_fixed_length_epochs(raw, duration=0.05, preload=True, id=1) + assert len(epochs) == 1 + assert epochs.event_id["1"] == 1 + with pytest.raises(ValueError, match="No events produced"): + _ = make_fixed_length_epochs(raw, duration=0.5, preload=True) + + +@requires_testing_data +def test_stim_eog_misc_chs_in_nsx(): + """Test stim/misc/eog channel assignments.""" + raw = read_raw_nsx(nsx_22_fname, stim_channel="elec127", eog=["elec126"]) + assert raw.info["chs"][127]["kind"] == FIFF.FIFFV_STIM_CH + assert raw.info["chs"][126]["kind"] == FIFF.FIFFV_EOG_CH + raw = read_raw_nsx(nsx_22_fname, stim_channel=["elec127"], eog=["elec126"]) + assert raw.info["chs"][127]["kind"] == FIFF.FIFFV_STIM_CH + assert raw.info["chs"][126]["kind"] == FIFF.FIFFV_EOG_CH + raw = read_raw_nsx(nsx_22_fname, stim_channel=127, eog=["elec126"]) + assert raw.info["chs"][127]["kind"] == FIFF.FIFFV_STIM_CH + assert raw.info["chs"][126]["kind"] == FIFF.FIFFV_EOG_CH + raw = read_raw_nsx(nsx_22_fname, stim_channel=[127], eog=["elec126"]) + assert raw.info["chs"][127]["kind"] == FIFF.FIFFV_STIM_CH + assert raw.info["chs"][126]["kind"] == FIFF.FIFFV_EOG_CH + stims = [ch_info["kind"] == FIFF.FIFFV_STIM_CH for ch_info in raw.info["chs"]] + assert np.any(stims) + assert raw.info["chs"][126]["kind"] == FIFF.FIFFV_EOG_CH + with pytest.raises(ValueError, match="Invalid stim_channel"): + raw = read_raw_nsx(nsx_22_fname, stim_channel=["elec128", 129], eog=["elec126"]) + with pytest.raises(ValueError, match="Invalid stim_channel"): + raw = read_raw_nsx(nsx_22_fname, stim_channel=("elec128",), eog=["elec126"]) + + raw = read_raw_nsx(nsx_22_fname, stim_channel="elec127", misc=["elec126", "elec1"]) + assert raw.info["chs"][126]["kind"] == FIFF.FIFFV_MISC_CH + assert raw.info["chs"][1]["kind"] == FIFF.FIFFV_MISC_CH + + +@requires_testing_data +def test_nsx_ver_21(): + """Primary tests for NEURALSG reader.""" + with pytest.raises(NotImplementedError, match="(= NEURALSG)*not supported"): + read_raw_nsx(nsx_21_fname) + + +@requires_testing_data +def test_nsx(): + """Tests for NEURALCD reader using real anonymized data.""" + raw = read_raw_nsx( + nsx_test_fname, + ) + assert getattr(raw, "_data", False) is False + assert raw.info["sfreq"] == 2000 + + # Check info object + assert raw.info["meas_date"].day == 13 + assert raw.info["meas_date"].year == 2000 + assert raw.info["meas_date"].month == 6 + assert raw.info["lowpass"] == 1000 + assert raw.info["highpass"] == 0.3 + assert raw.info["chs"][0]["cal"] == 0.25 + assert raw.info["chs"][0]["range"] == 1e-6 + + # check raw_extras + for r in raw._raw_extras: + assert r["orig_format"] == raw.orig_format + assert r["orig_nchan"] == 5 + assert len(r["timestamp"]) == len(r["nb_data_points"]) + assert len(r["timestamp"]) == len(r["offset_to_data_block"]) + + # Check annotations + assert len(raw.annotations) == 0 + + raw = _test_raw_reader( + read_raw_nsx, + input_fname=nsx_test_fname, + eog=None, + misc=None, + ) + raw_data, times = raw[:] + n_channels, n_times = raw_data.shape + assert n_times == 100 + assert times.shape[0] == n_times + assert n_channels == 5 + # Check data + assert_allclose( + raw_data.mean(axis=-1), + np.array([-52.6375, 88.57, 70.5825, -22.055, -166.5]) * 1e-6, # uV + ) + assert raw.first_time == 3.8 + + epochs = make_fixed_length_epochs(raw, duration=0.05, preload=True, id=1) + assert len(epochs) == 1 + assert epochs.event_id["1"] == 1 + with pytest.raises(ValueError, match="No events produced"): + _ = make_fixed_length_epochs(raw, duration=0.5, preload=True) diff --git a/mne/io/open.py b/mne/io/open.py deleted file mode 100644 index 7680c1344b5..00000000000 --- a/mne/io/open.py +++ /dev/null @@ -1,325 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# -# License: BSD-3-Clause - -import os.path as op -from io import BytesIO, SEEK_SET -from gzip import GzipFile - -import numpy as np - -from .tag import read_tag_info, read_tag, Tag, _call_dict_names -from .tree import make_dir_tree, dir_tree_find -from .constants import FIFF -from ..utils import logger, verbose, _file_like, warn - - -class _NoCloseRead(object): - """Create a wrapper that will not close when used as a context manager.""" - - def __init__(self, fid): - self.fid = fid - - def __enter__(self): - return self.fid - - def __exit__(self, type_, value, traceback): - return - - def close(self): - return - - def seek(self, offset, whence=SEEK_SET): - return self.fid.seek(offset, whence) - - def read(self, size=-1): - return self.fid.read(size) - - -def _fiff_get_fid(fname): - """Open a FIF file with no additional parsing.""" - if _file_like(fname): - fid = _NoCloseRead(fname) - fid.seek(0) - else: - fname = str(fname) - if op.splitext(fname)[1].lower() == '.gz': - logger.debug('Using gzip') - fid = GzipFile(fname, "rb") # Open in binary mode - else: - logger.debug('Using normal I/O') - fid = open(fname, "rb") # Open in binary mode - return fid - - -def _get_next_fname(fid, fname, tree): - """Get the next filename in split files.""" - nodes_list = dir_tree_find(tree, FIFF.FIFFB_REF) - next_fname = None - for nodes in nodes_list: - next_fname = None - for ent in nodes['directory']: - if ent.kind == FIFF.FIFF_REF_ROLE: - tag = read_tag(fid, ent.pos) - role = int(tag.data) - if role != FIFF.FIFFV_ROLE_NEXT_FILE: - next_fname = None - break - if ent.kind == FIFF.FIFF_REF_FILE_NAME: - tag = read_tag(fid, ent.pos) - next_fname = op.join(op.dirname(fname), tag.data) - if ent.kind == FIFF.FIFF_REF_FILE_NUM: - # Some files don't have the name, just the number. So - # we construct the name from the current name. - if next_fname is not None: - continue - next_num = read_tag(fid, ent.pos).data - path, base = op.split(fname) - idx = base.find('.') - idx2 = base.rfind('-') - num_str = base[idx2 + 1:idx] - if not num_str.isdigit(): - idx2 = -1 - - if idx2 < 0 and next_num == 1: - # this is the first file, which may not be numbered - next_fname = op.join( - path, '%s-%d.%s' % (base[:idx], next_num, - base[idx + 1:])) - continue - - next_fname = op.join(path, '%s-%d.%s' - % (base[:idx2], next_num, base[idx + 1:])) - if next_fname is not None: - break - return next_fname - - -@verbose -def fiff_open(fname, preload=False, verbose=None): - """Open a FIF file. - - Parameters - ---------- - fname : path-like | fid - Name of the fif file, or an opened file (will seek back to 0). - preload : bool - If True, all data from the file is read into a memory buffer. This - requires more memory, but can be faster for I/O operations that require - frequent seeks. - %(verbose)s - - Returns - ------- - fid : file - The file descriptor of the open file. - tree : fif tree - The tree is a complex structure filled with dictionaries, - lists and tags. - directory : list - A list of tags. - """ - fid = _fiff_get_fid(fname) - try: - return _fiff_open(fname, fid, preload) - except Exception: - fid.close() - raise - - -def _fiff_open(fname, fid, preload): - # do preloading of entire file - if preload: - # note that StringIO objects instantiated this way are read-only, - # but that's okay here since we are using mode "rb" anyway - with fid as fid_old: - fid = BytesIO(fid_old.read()) - - tag = read_tag_info(fid) - - # Check that this looks like a fif file - prefix = f'file {repr(fname)} does not' - if tag.kind != FIFF.FIFF_FILE_ID: - raise ValueError(f'{prefix} start with a file id tag') - - if tag.type != FIFF.FIFFT_ID_STRUCT: - raise ValueError(f'{prefix} start with a file id tag') - - if tag.size != 20: - raise ValueError(f'{prefix} start with a file id tag') - - tag = read_tag(fid) - - if tag.kind != FIFF.FIFF_DIR_POINTER: - raise ValueError(f'{prefix} have a directory pointer') - - # Read or create the directory tree - logger.debug(' Creating tag directory for %s...' % fname) - - dirpos = int(tag.data) - read_slow = True - if dirpos > 0: - dir_tag = read_tag(fid, dirpos) - if dir_tag is None: - warn(f'FIF tag directory missing at the end of the file, possibly ' - f'corrupted file: {fname}') - else: - directory = dir_tag.data - read_slow = False - if read_slow: - fid.seek(0, 0) - directory = list() - while tag.next >= 0: - pos = fid.tell() - tag = read_tag_info(fid) - if tag is None: - break # HACK : to fix file ending with empty tag... - else: - tag.pos = pos - directory.append(tag) - - tree, _ = make_dir_tree(fid, directory) - - logger.debug('[done]') - - # Back to the beginning - fid.seek(0) - - return fid, tree, directory - - -@verbose -def show_fiff(fname, indent=' ', read_limit=np.inf, max_str=30, - output=str, tag=None, verbose=None): - """Show FIFF information. - - This function is similar to mne_show_fiff. - - Parameters - ---------- - fname : path-like - Filename to evaluate. - indent : str - How to indent the lines. - read_limit : int - Max number of bytes of data to read from a tag. Can be np.inf - to always read all data (helps test read completion). - max_str : int - Max number of characters of string representation to print for - each tag's data. - output : type - Either str or list. str is a convenience output for printing. - tag : int | None - Provide information about this tag. If None (default), all information - is shown. - %(verbose)s - - Returns - ------- - contents : str - The contents of the file. - """ - if output not in [list, str]: - raise ValueError('output must be list or str') - if isinstance(tag, str): # command mne show_fiff passes string - tag = int(tag) - f, tree, directory = fiff_open(fname) - # This gets set to 0 (unknown) by fiff_open, but FIFFB_ROOT probably - # makes more sense for display - tree['block'] = FIFF.FIFFB_ROOT - with f as fid: - out = _show_tree(fid, tree, indent=indent, level=0, - read_limit=read_limit, max_str=max_str, tag_id=tag) - if output == str: - out = '\n'.join(out) - return out - - -def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']): - """Find matching values.""" - value = int(value) - vals = [k for k, v in FIFF.items() - if v == value and any(fmt in k for fmt in fmts) and - not any(exc in k for exc in exclude)] - if len(vals) == 0: - vals = ['???'] - return vals - - -def _show_tree(fid, tree, indent, level, read_limit, max_str, tag_id): - """Show FIFF tree.""" - from scipy import sparse - this_idt = indent * level - next_idt = indent * (level + 1) - # print block-level information - out = [this_idt + str(int(tree['block'])) + ' = ' + - '/'.join(_find_type(tree['block'], fmts=['FIFFB_']))] - tag_found = False - if tag_id is None or out[0].strip().startswith(str(tag_id)): - tag_found = True - - if tree['directory'] is not None: - kinds = [ent.kind for ent in tree['directory']] + [-1] - types = [ent.type for ent in tree['directory']] - sizes = [ent.size for ent in tree['directory']] - poss = [ent.pos for ent in tree['directory']] - counter = 0 - good = True - for k, kn, size, pos, type_ in zip(kinds[:-1], kinds[1:], sizes, poss, - types): - if not tag_found and k != tag_id: - continue - tag = Tag(k, size, 0, pos) - if read_limit is None or size <= read_limit: - try: - tag = read_tag(fid, pos) - except Exception: - good = False - - if kn == k: - # don't print if the next item is the same type (count 'em) - counter += 1 - else: - # find the tag type - this_type = _find_type(k, fmts=['FIFF_']) - # prepend a count if necessary - prepend = 'x' + str(counter + 1) + ': ' if counter > 0 else '' - postpend = '' - # print tag data nicely - if tag.data is not None: - postpend = ' = ' + str(tag.data)[:max_str] - if isinstance(tag.data, np.ndarray): - if tag.data.size > 1: - postpend += ' ... array size=' + str(tag.data.size) - elif isinstance(tag.data, dict): - postpend += ' ... dict len=' + str(len(tag.data)) - elif isinstance(tag.data, str): - postpend += ' ... str len=' + str(len(tag.data)) - elif isinstance(tag.data, (list, tuple)): - postpend += ' ... list len=' + str(len(tag.data)) - elif sparse.issparse(tag.data): - postpend += (' ... sparse (%s) shape=%s' - % (tag.data.getformat(), tag.data.shape)) - else: - postpend += ' ... type=' + str(type(tag.data)) - postpend = '>' * 20 + 'BAD' if not good else postpend - type_ = _call_dict_names.get(type_, '?%s?' % (type_,)) - out += [next_idt + prepend + str(k) + ' = ' + - '/'.join(this_type) + - ' (' + str(size) + 'b %s)' % type_ + - postpend] - out[-1] = out[-1].replace('\n', u'¶') - counter = 0 - good = True - if tag_id in kinds: - tag_found = True - if not tag_found: - out = [''] - level = -1 # removes extra indent - # deal with children - for branch in tree['children']: - out += _show_tree(fid, branch, indent, level + 1, read_limit, max_str, - tag_id) - return out diff --git a/mne/io/persyst/__init__.py b/mne/io/persyst/__init__.py index cef562f4af6..aac04211ff8 100644 --- a/mne/io/persyst/__init__.py +++ b/mne/io/persyst/__init__.py @@ -1,7 +1,7 @@ """Persyst module for conversion to FIF.""" -# Author: Adam Li -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from .persyst import read_raw_persyst diff --git a/mne/io/persyst/persyst.py b/mne/io/persyst/persyst.py index a9897d37e67..8de44286445 100644 --- a/mne/io/persyst/persyst.py +++ b/mne/io/persyst/persyst.py @@ -1,6 +1,7 @@ -# Authors: Adam Li -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import os import os.path as op from collections import OrderedDict @@ -8,16 +9,16 @@ import numpy as np -from ..base import BaseRaw -from ..constants import FIFF -from ..meas_info import create_info -from ..utils import _mult_cal_one +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one from ...annotations import Annotations -from ...utils import logger, verbose, fill_doc, warn, _check_fname +from ...utils import _check_fname, fill_doc, logger, verbose, warn +from ..base import BaseRaw @fill_doc -def read_raw_persyst(fname, preload=False, verbose=None): +def read_raw_persyst(fname, preload=False, verbose=None) -> "RawPersyst": """Reader for a Persyst (.lay/.dat) recording. Parameters @@ -67,16 +68,17 @@ class RawPersyst(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): fname = str(_check_fname(fname, "read", True, "fname")) - logger.info('Loading %s' % fname) + logger.info(f"Loading {fname}") # make sure filename is the Lay file - if not fname.endswith('.lay'): - fname = fname + '.lay' + if not fname.endswith(".lay"): + fname = fname + ".lay" # get the current directory and Lay filename curr_path, lay_fname = op.dirname(fname), op.basename(fname) if not op.exists(fname): - raise FileNotFoundError(f'The path you specified, ' - f'"{lay_fname}",does not exist.') + raise FileNotFoundError( + f'The path you specified, "{lay_fname}",does not exist.' + ) # sections and subsections currently unused keys, data, sections = _read_lay_contents(fname) @@ -93,50 +95,51 @@ def __init__(self, fname, preload=False, verbose=None): # loop through each line in the lay file for key, val, section in zip(keys, data, sections): - if key == '': + if key == "": continue # Make sure key are lowercase for everything, but electrodes. # We also do not want to lower-case comments because those # are free-form text where casing may matter. - if key is not None and section not in ['channelmap', - 'comments']: + if key is not None and section not in ["channelmap", "comments"]: key = key.lower() # FileInfo - if section == 'fileinfo': + if section == "fileinfo": # extract the .dat file name - if key == 'file': + if key == "file": dat_fname = op.basename(val) dat_fpath = op.join(curr_path, op.basename(dat_fname)) # determine if .dat file exists where it should - error_msg = f'The data path you specified ' \ - f'does not exist for the lay path, ' \ - f'{lay_fname}. Make sure the dat file ' \ - f'is in the same directory as the lay ' \ - f'file, and the specified dat filename ' \ - f'matches.' + error_msg = ( + f"The data path you specified " + f"does not exist for the lay path, " + f"{lay_fname}. Make sure the dat file " + f"is in the same directory as the lay " + f"file, and the specified dat filename " + f"matches." + ) if not op.exists(dat_fpath): raise FileNotFoundError(error_msg) fileinfo_dict[key] = val # ChannelMap - elif section == 'channelmap': + elif section == "channelmap": # channel map has = for = channelmap_dict[key] = val # Patient (All optional) - elif section == 'patient': + elif section == "patient": patient_dict[key] = val # Comments (turned into mne.Annotations) - elif section == 'comments': + elif section == "comments": comments_dict[key] = comments_dict.get(key, list()) + [val] num_comments += 1 # get numerical metadata # datatype is either 7 for 32 bit, or 0 for 16 bit - datatype = fileinfo_dict.get('datatype') - cal = float(fileinfo_dict.get('calibration')) - n_chs = int(fileinfo_dict.get('waveformcount')) + datatype = fileinfo_dict.get("datatype") + cal = float(fileinfo_dict.get("calibration")) + n_chs = int(fileinfo_dict.get("waveformcount")) # Store subject information from lay file in mne format # Note: Persyst also records "Physician", "Technician", @@ -145,97 +148,103 @@ def __init__(self, fname, preload=False, verbose=None): subject_info = _get_subjectinfo(patient_dict) # set measurement date - testdate = patient_dict.get('testdate') + testdate = patient_dict.get("testdate") if testdate is not None: # TODO: Persyst may change its internal date schemas # without notice # These are the 3 "so far" possible datatime storage # formats in Persyst .lay - if '/' in testdate: - testdate = datetime.strptime(testdate, '%m/%d/%Y') - elif '-' in testdate: - testdate = datetime.strptime(testdate, '%d-%m-%Y') - elif '.' in testdate: - testdate = datetime.strptime(testdate, '%Y.%m.%d') + if "/" in testdate: + testdate = datetime.strptime(testdate, "%m/%d/%Y") + elif "-" in testdate: + testdate = datetime.strptime(testdate, "%d-%m-%Y") + elif "." in testdate: + testdate = datetime.strptime(testdate, "%Y.%m.%d") if not isinstance(testdate, datetime): - warn('Cannot read in the measurement date due ' - 'to incompatible format. Please set manually ' - 'for %s ' % lay_fname) + warn( + "Cannot read in the measurement date due " + "to incompatible format. Please set manually " + f"for {lay_fname} " + ) meas_date = None else: - testtime = datetime.strptime(patient_dict.get('testtime'), - '%H:%M:%S') + testtime = datetime.strptime(patient_dict.get("testtime"), "%H:%M:%S") meas_date = datetime( - year=testdate.year, month=testdate.month, - day=testdate.day, hour=testtime.hour, - minute=testtime.minute, second=testtime.second, - tzinfo=timezone.utc) + year=testdate.year, + month=testdate.month, + day=testdate.day, + hour=testtime.hour, + minute=testtime.minute, + second=testtime.second, + tzinfo=timezone.utc, + ) # Create mne structure ch_names = list(channelmap_dict.keys()) if n_chs != len(ch_names): - raise RuntimeError('Channels in lay file do not ' - 'match the number of channels ' - 'in the .dat file.') # noqa + raise RuntimeError( + "Channels in lay file do not " + "match the number of channels " + "in the .dat file." + ) # noqa # get rid of the "-Ref" in channel names - ch_names = [ch.upper().split('-REF')[0] for ch in ch_names] + ch_names = [ch.upper().split("-REF")[0] for ch in ch_names] # get the sampling rate and default channel types to EEG - sfreq = fileinfo_dict.get('samplingrate') - ch_types = 'eeg' + sfreq = fileinfo_dict.get("samplingrate") + ch_types = "eeg" info = create_info(ch_names, sfreq, ch_types=ch_types) info.update(subject_info=subject_info) with info._unlock(): for idx in range(n_chs): # calibration brings to uV then 1e-6 brings to V - info['chs'][idx]['cal'] = cal * 1.0e-6 - info['meas_date'] = meas_date + info["chs"][idx]["cal"] = cal * 1.0e-6 + info["meas_date"] = meas_date # determine number of samples in file # Note: We do not use the lay file to do this # because clips in time may be generated by Persyst that # DO NOT modify the "SampleTimes" section - with open(dat_fpath, 'rb') as f: + with open(dat_fpath, "rb") as f: # determine the precision if int(datatype) == 7: # 32 bit - dtype = np.dtype('i4') + dtype = np.dtype("i4") elif int(datatype) == 0: # 16 bit - dtype = np.dtype('i2') + dtype = np.dtype("i2") else: - raise RuntimeError(f'Unknown format: {datatype}') + raise RuntimeError(f"Unknown format: {datatype}") # allow offset to occur f.seek(0, os.SEEK_END) n_samples = f.tell() n_samples = n_samples // (dtype.itemsize * n_chs) - logger.debug(f'Loaded {n_samples} samples ' - f'for {n_chs} channels.') + logger.debug(f"Loaded {n_samples} samples for {n_chs} channels.") - raw_extras = { - 'dtype': dtype, - 'n_chs': n_chs, - 'n_samples': n_samples - } + raw_extras = {"dtype": dtype, "n_chs": n_chs, "n_samples": n_samples} # create Raw object - super(RawPersyst, self).__init__( - info, preload, filenames=[dat_fpath], + super().__init__( + info, + preload, + filenames=[dat_fpath], last_samps=[n_samples - 1], - raw_extras=[raw_extras], verbose=verbose) + raw_extras=[raw_extras], + verbose=verbose, + ) # set annotations based on the comments read in onset = np.zeros(num_comments, float) duration = np.zeros(num_comments, float) - description = [''] * num_comments + description = [""] * num_comments # loop through comments dictionary, which may contain # multiple events for the same "text" annotation t_idx = 0 for _description, event_tuples in comments_dict.items(): - for (_onset, _duration) in event_tuples: + for _onset, _duration in event_tuples: # extract the onset, duration, description to # create an Annotations object onset[t_idx] = _onset @@ -252,9 +261,9 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): binary files. In addition, it stores the calibration to convert data to uV in the lay file. """ - dtype = self._raw_extras[fi]['dtype'] - n_chs = self._raw_extras[fi]['n_chs'] - dat_fname = self._filenames[fi] + dtype = self._raw_extras[fi]["dtype"] + n_chs = self._raw_extras[fi]["n_chs"] + dat_fname = self.filenames[fi] # compute samples count based on start and stop time_length_samps = stop - start @@ -264,17 +273,16 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): count = time_length_samps * n_chs # seek the dat file - with open(dat_fname, 'rb') as dat_file_ID: + with open(dat_fname, "rb") as dat_file_ID: # allow offset to occur dat_file_ID.seek(n_chs * dtype.itemsize * start, 1) # read in the actual record starting at possibly offset - record = np.fromfile(dat_file_ID, dtype=dtype, - count=count) + record = np.fromfile(dat_file_ID, dtype=dtype, count=count) # chs * rows # cast as float32; more than enough precision - record = np.reshape(record, (n_chs, -1), 'F').astype(np.float32) + record = np.reshape(record, (n_chs, -1), order="F").astype(np.float32) # calibrate to convert to V and handle mult _mult_cal_one(data, record, idx, cals, mult) @@ -283,29 +291,30 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): def _get_subjectinfo(patient_dict): # attempt to parse out the birthdate, but if it doesn't # meet spec, then it will set to None - birthdate = patient_dict.get('birthdate') - if '/' in birthdate: + birthdate = patient_dict.get("birthdate") + if "/" in birthdate: try: - birthdate = datetime.strptime(birthdate, '%m/%d/%y') + birthdate = datetime.strptime(birthdate, "%m/%d/%y") except ValueError: birthdate = None - print('Unable to process birthdate of %s ' % birthdate) - elif '-' in birthdate: + print(f"Unable to process birthdate of {birthdate} ") + elif "-" in birthdate: try: - birthdate = datetime.strptime(birthdate, '%d-%m-%y') + birthdate = datetime.strptime(birthdate, "%d-%m-%y") except ValueError: birthdate = None - print('Unable to process birthdate of %s ' % birthdate) + print(f"Unable to process birthdate of {birthdate} ") subject_info = { - 'first_name': patient_dict.get('first'), - 'middle_name': patient_dict.get('middle'), - 'last_name': patient_dict.get('last'), - 'sex': patient_dict.get('sex'), - 'hand': patient_dict.get('hand'), - 'his_id': patient_dict.get('id'), - 'birthday': birthdate, + "first_name": patient_dict.get("first"), + "middle_name": patient_dict.get("middle"), + "last_name": patient_dict.get("last"), + "sex": patient_dict.get("sex"), + "hand": patient_dict.get("hand"), + "his_id": patient_dict.get("id"), + "birthday": birthdate, } + subject_info = {key: val for key, val in subject_info.items() if val is not None} # Recode sex values sex_dict = dict( @@ -314,8 +323,7 @@ def _get_subjectinfo(patient_dict): f=FIFF.FIFFV_SUBJ_SEX_FEMALE, female=FIFF.FIFFV_SUBJ_SEX_FEMALE, ) - subject_info['sex'] = sex_dict.get(subject_info['sex'], - FIFF.FIFFV_SUBJ_SEX_UNKNOWN) + subject_info["sex"] = sex_dict.get(subject_info["sex"], FIFF.FIFFV_SUBJ_SEX_UNKNOWN) # Recode hand values hand_dict = dict( @@ -329,9 +337,9 @@ def _get_subjectinfo(patient_dict): ) # no handedness is set when unknown try: - subject_info['hand'] = hand_dict[subject_info['hand']] + subject_info["hand"] = hand_dict[subject_info["hand"]] except KeyError: - subject_info.pop('hand') + subject_info.pop("hand") return subject_info @@ -343,8 +351,8 @@ def _read_lay_contents(fname): keys, data = [], [] # initialize all section to empty str - section = '' - with open(fname, 'r') as fin: + section = "" + with open(fname) as fin: for line in fin: # break a line into a status, key and value status, key, val = _process_lay_line(line, section) @@ -420,19 +428,18 @@ def _process_lay_line(line, section): 4. variable type (unused) 5. free-form text describing the annotation """ - key = '' # default; only return value possibly not set + key = "" # default; only return value possibly not set line = line.strip() # remove leading and trailing spaces end_idx = len(line) - 1 # get the last index of the line # empty sequence evaluates to false if not line: status = 0 - key = '' - value = '' + key = "" + value = "" return status, key, value # section found - elif (line[0] == '[') and (line[end_idx] == ']') \ - and (end_idx + 1 >= 3): + elif (line[0] == "[") and (line[end_idx] == "]") and (end_idx + 1 >= 3): status = 1 value = line[1:end_idx].lower() # key found @@ -440,25 +447,28 @@ def _process_lay_line(line, section): # handle Comments section differently from all other sections # TODO: utilize state and var_type in code. # Currently not used - if section == 'comments': + if section == "comments": # Persyst Comments output 5 variables "," separated - time_sec, duration, state, var_type, text = line.split(',', 4) + time_sec, duration, state, var_type, text = line.split(",", 4) + del var_type, state status = 2 key = text value = (time_sec, duration) # all other sections else: - if '=' not in line: - raise RuntimeError('The line %s does not conform ' - 'to the standards. Please check the ' - '.lay file.' % line) # noqa - pos = line.index('=') + if "=" not in line: + raise RuntimeError( + f"The line {line} does not conform " + "to the standards. Please check the " + ".lay file." + ) # noqa + pos = line.index("=") status = 2 # the line now is composed of a # = key = line[0:pos] key.strip() - value = line[pos + 1:end_idx + 1] + value = line[pos + 1 : end_idx + 1] value.strip() return status, key, value diff --git a/mne/io/persyst/tests/__init__.py b/mne/io/persyst/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/persyst/tests/__init__.py +++ b/mne/io/persyst/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/persyst/tests/test_persyst.py b/mne/io/persyst/tests/test_persyst.py index c907fae42df..986a37b9dbb 100644 --- a/mne/io/persyst/tests/test_persyst.py +++ b/mne/io/persyst/tests/test_persyst.py @@ -1,14 +1,13 @@ -# -*- coding: utf-8 -*- -# Authors: Adam Li -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import os import shutil +import numpy as np import pytest from numpy.testing import assert_array_equal -import numpy as np from mne.datasets.testing import data_path, requires_testing_data from mne.io import read_raw_persyst @@ -16,14 +15,10 @@ testing_path = data_path(download=False) fname_lay = ( - testing_path - / "Persyst" - / "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay" + testing_path / "Persyst" / "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay" ) fname_dat = ( - testing_path - / "Persyst" - / "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.dat" + testing_path / "Persyst" / "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.dat" ) @@ -33,7 +28,7 @@ def test_persyst_lay_load(): raw = read_raw_persyst(fname_lay, preload=False) # Test data import - assert raw.info['sfreq'] == 200 + assert raw.info["sfreq"] == 200 assert raw.preload is False # load raw data @@ -42,12 +37,11 @@ def test_persyst_lay_load(): assert raw.preload is True # defaults channels to EEG - raw = raw.pick_types(eeg=True) + raw = raw.pick("eeg") assert len(raw.ch_names) == 83 # no "-Ref" in channel names - assert all(['-ref' not in ch.lower() - for ch in raw.ch_names]) + assert all(["-ref" not in ch.lower() for ch in raw.ch_names]) # test with preload True raw = read_raw_persyst(fname_lay, preload=True) @@ -59,7 +53,7 @@ def test_persyst_raw(): raw = read_raw_persyst(fname_lay, preload=False) # defaults channels to EEG - raw = raw.pick_types(eeg=True) + raw = raw.pick("eeg") # get data data, times = raw.get_data(start=200, return_times=True) @@ -90,40 +84,40 @@ def test_persyst_dates(tmp_path): # reformat the lay file to have testdate with # "/" character - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: + with open(fname_lay) as fin: + with open(new_fname_lay, "w") as fout: # for each line in the input file for idx, line in enumerate(fin): - if line.startswith('TestDate'): - line = 'TestDate=01/23/2000\n' + if line.startswith("TestDate"): + line = "TestDate=01/23/2000\n" fout.write(line) # file should update correctly with datetime raw = read_raw_persyst(new_fname_lay) - assert raw.info['meas_date'].month == 1 - assert raw.info['meas_date'].day == 23 - assert raw.info['meas_date'].year == 2000 + assert raw.info["meas_date"].month == 1 + assert raw.info["meas_date"].day == 23 + assert raw.info["meas_date"].year == 2000 # reformat the lay file to have testdate with # "-" character os.remove(new_fname_lay) - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: + with open(fname_lay) as fin: + with open(new_fname_lay, "w") as fout: # for each line in the input file for idx, line in enumerate(fin): - if line.startswith('TestDate'): - line = 'TestDate=24-01-2000\n' + if line.startswith("TestDate"): + line = "TestDate=24-01-2000\n" fout.write(line) # file should update correctly with datetime raw = read_raw_persyst(new_fname_lay) - assert raw.info['meas_date'].month == 1 - assert raw.info['meas_date'].day == 24 - assert raw.info['meas_date'].year == 2000 + assert raw.info["meas_date"].month == 1 + assert raw.info["meas_date"].day == 24 + assert raw.info["meas_date"].year == 2000 @requires_testing_data def test_persyst_wrong_file(tmp_path): """Test reading Persyst files when passed in wrong file path.""" - with pytest.raises(FileNotFoundError, match='The path you'): + with pytest.raises(FileNotFoundError, match="The path you"): read_raw_persyst(fname_dat, preload=True) new_fname_lay = tmp_path / fname_lay.name @@ -131,10 +125,11 @@ def test_persyst_wrong_file(tmp_path): shutil.copy(fname_lay, new_fname_lay) # without a .dat file, reader should break - desired_err_msg = \ - 'The data path you specified does ' \ - 'not exist for the lay path, ' \ - 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay' + desired_err_msg = ( + "The data path you specified does " + "not exist for the lay path, " + "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay" + ) with pytest.raises(FileNotFoundError, match=desired_err_msg): read_raw_persyst(new_fname_lay, preload=True) @@ -155,10 +150,11 @@ def test_persyst_moved_file(tmp_path): # without a .dat file, reader should break # when the lay file was moved - desired_err_msg = \ - 'The data path you specified does ' \ - 'not exist for the lay path, ' \ - 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay' + desired_err_msg = ( + "The data path you specified does " + "not exist for the lay path, " + "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay" + ) with pytest.raises(FileNotFoundError, match=desired_err_msg): read_raw_persyst(new_fname_lay, preload=True) @@ -166,14 +162,14 @@ def test_persyst_moved_file(tmp_path): # to the full path, but it should still not work # as reader requires lay and dat file to be in # same directory - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: + with open(fname_lay) as fin: + with open(new_fname_lay, "w") as fout: # for each line in the input file for idx, line in enumerate(fin): - if line.startswith('File='): + if line.startswith("File="): # give it the full path to the old data - test_fpath = fname_dat.parent / line.split('=')[1] - line = f'File={test_fpath}\n' + test_fpath = fname_dat.parent / line.split("=")[1] + line = f"File={test_fpath}\n" fout.write(line) with pytest.raises(FileNotFoundError, match=desired_err_msg): read_raw_persyst(new_fname_lay, preload=True) @@ -204,11 +200,11 @@ def test_persyst_annotations(tmp_path): # get the annotations and make sure that repeated annotations # are in the dataset annotations = raw.annotations - assert np.count_nonzero(annotations.description == 'seizure') == 2 + assert np.count_nonzero(annotations.description == "seizure") == 2 # make sure annotation with a "," character is in there - assert 'seizure1,2' in annotations.description - assert 'CLip2' in annotations.description + assert "seizure1,2" in annotations.description + assert "CLip2" in annotations.description @requires_testing_data @@ -219,43 +215,41 @@ def test_persyst_errors(tmp_path): shutil.copy(fname_dat, new_fname_dat) # reformat the lay file - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: + with open(fname_lay) as fin: + with open(new_fname_lay, "w") as fout: # for each line in the input file for idx, line in enumerate(fin): if idx == 1: - line = line.replace('=', ',') + line = line.replace("=", ",") fout.write(line) # file should break - with pytest.raises(RuntimeError, match='The line'): + with pytest.raises(RuntimeError, match="The line"): read_raw_persyst(new_fname_lay) # reformat the lay file os.remove(new_fname_lay) - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: + with open(fname_lay) as fin: + with open(new_fname_lay, "w") as fout: # for each line in the input file for idx, line in enumerate(fin): - if line.startswith('WaveformCount'): - line = 'WaveformCount=1\n' + if line.startswith("WaveformCount"): + line = "WaveformCount=1\n" fout.write(line) # file should break - with pytest.raises(RuntimeError, match='Channels in lay ' - 'file do not'): + with pytest.raises(RuntimeError, match="Channels in lay file do not"): read_raw_persyst(new_fname_lay) # reformat the lay file to have testdate # improperly specified os.remove(new_fname_lay) - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: + with open(fname_lay) as fin: + with open(new_fname_lay, "w") as fout: # for each line in the input file for idx, line in enumerate(fin): - if line.startswith('TestDate'): - line = 'TestDate=Jan 23rd 2000\n' + if line.startswith("TestDate"): + line = "TestDate=Jan 23rd 2000\n" fout.write(line) # file should not read in meas date - with pytest.warns(RuntimeWarning, - match='Cannot read in the measurement date'): + with pytest.warns(RuntimeWarning, match="Cannot read in the measurement date"): raw = read_raw_persyst(new_fname_lay) - assert raw.info['meas_date'] is None + assert raw.info["meas_date"] is None diff --git a/mne/io/pick.py b/mne/io/pick.py index 87511710143..e78cfc85442 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -1,1187 +1,18 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Martin Luessi -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause - -from copy import deepcopy -import re - -import numpy as np - -from .constants import FIFF -from ..utils import (logger, verbose, _validate_type, fill_doc, _ensure_int, - _check_option, warn) - - -def get_channel_type_constants(include_defaults=False): - """Return all known channel types, and associated FIFF constants. - - Parameters - ---------- - include_defaults : bool - Whether to include default values for "unit" and "coil_type" for all - entries (see Notes). Defaults are generally based on values normally - present for a VectorView MEG system. Defaults to ``False``. - - Returns - ------- - channel_types : dict - The keys are channel type strings, and the values are dictionaries of - FIFF constants for "kind", and possibly "unit" and "coil_type". - - Notes - ----- - Values which might vary within a channel type across real data - recordings are excluded unless ``include_defaults=True``. For example, - "ref_meg" channels may have coil type - ``FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD``, ``FIFFV_COIL_VV_MAG_T3``, etc - (depending on the recording system), so no "coil_type" entry is given - for "ref_meg" unless ``include_defaults`` is requested. - """ - base = dict(grad=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T_M), - mag=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T), - ref_meg=dict(kind=FIFF.FIFFV_REF_MEG_CH), - eeg=dict(kind=FIFF.FIFFV_EEG_CH, - unit=FIFF.FIFF_UNIT_V, - coil_type=FIFF.FIFFV_COIL_EEG), - seeg=dict(kind=FIFF.FIFFV_SEEG_CH, - unit=FIFF.FIFF_UNIT_V, - coil_type=FIFF.FIFFV_COIL_EEG), - dbs=dict(kind=FIFF.FIFFV_DBS_CH, - unit=FIFF.FIFF_UNIT_V, - coil_type=FIFF.FIFFV_COIL_EEG), - ecog=dict(kind=FIFF.FIFFV_ECOG_CH, - unit=FIFF.FIFF_UNIT_V, - coil_type=FIFF.FIFFV_COIL_EEG), - eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V), - emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V), - ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V), - resp=dict(kind=FIFF.FIFFV_RESP_CH, unit=FIFF.FIFF_UNIT_V), - bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V), - misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V), - stim=dict(kind=FIFF.FIFFV_STIM_CH), - exci=dict(kind=FIFF.FIFFV_EXCI_CH), - syst=dict(kind=FIFF.FIFFV_SYST_CH), - ias=dict(kind=FIFF.FIFFV_IAS_CH), - gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), - dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), - chpi=dict(kind=[FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, - FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3, - FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, - FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, - FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]), - fnirs_cw_amplitude=dict( - kind=FIFF.FIFFV_FNIRS_CH, - unit=FIFF.FIFF_UNIT_V, - coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), - fnirs_fd_ac_amplitude=dict( - kind=FIFF.FIFFV_FNIRS_CH, - unit=FIFF.FIFF_UNIT_V, - coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE), - fnirs_fd_phase=dict( - kind=FIFF.FIFFV_FNIRS_CH, - unit=FIFF.FIFF_UNIT_RAD, - coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE), - fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, - coil_type=FIFF.FIFFV_COIL_FNIRS_OD), - hbo=dict(kind=FIFF.FIFFV_FNIRS_CH, - unit=FIFF.FIFF_UNIT_MOL, - coil_type=FIFF.FIFFV_COIL_FNIRS_HBO), - hbr=dict(kind=FIFF.FIFFV_FNIRS_CH, - unit=FIFF.FIFF_UNIT_MOL, - coil_type=FIFF.FIFFV_COIL_FNIRS_HBR), - csd=dict(kind=FIFF.FIFFV_EEG_CH, - unit=FIFF.FIFF_UNIT_V_M2, - coil_type=FIFF.FIFFV_COIL_EEG_CSD), - temperature=dict(kind=FIFF.FIFFV_TEMPERATURE_CH, - unit=FIFF.FIFF_UNIT_CEL), - gsr=dict(kind=FIFF.FIFFV_GALVANIC_CH, - unit=FIFF.FIFF_UNIT_S), - ) - if include_defaults: - coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE) - unit_none = dict(unit=FIFF.FIFF_UNIT_NONE) - defaults = dict( - grad=dict(coil_type=FIFF.FIFFV_COIL_VV_PLANAR_T1), - mag=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3), - ref_meg=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3, - unit=FIFF.FIFF_UNIT_T), - misc=dict(**coil_none, **unit_none), # NB: overwrites UNIT_V - stim=dict(unit=FIFF.FIFF_UNIT_V, **coil_none), - eog=coil_none, - ecg=coil_none, - emg=coil_none, - bio=coil_none, - fnirs_od=unit_none, - ) - for key, value in defaults.items(): - base[key].update(value) - return base - - -_first_rule = { - FIFF.FIFFV_MEG_CH: 'meg', - FIFF.FIFFV_REF_MEG_CH: 'ref_meg', - FIFF.FIFFV_EEG_CH: 'eeg', - FIFF.FIFFV_STIM_CH: 'stim', - FIFF.FIFFV_EOG_CH: 'eog', - FIFF.FIFFV_EMG_CH: 'emg', - FIFF.FIFFV_ECG_CH: 'ecg', - FIFF.FIFFV_RESP_CH: 'resp', - FIFF.FIFFV_MISC_CH: 'misc', - FIFF.FIFFV_EXCI_CH: 'exci', - FIFF.FIFFV_IAS_CH: 'ias', - FIFF.FIFFV_SYST_CH: 'syst', - FIFF.FIFFV_SEEG_CH: 'seeg', - FIFF.FIFFV_DBS_CH: 'dbs', - FIFF.FIFFV_BIO_CH: 'bio', - FIFF.FIFFV_QUAT_0: 'chpi', - FIFF.FIFFV_QUAT_1: 'chpi', - FIFF.FIFFV_QUAT_2: 'chpi', - FIFF.FIFFV_QUAT_3: 'chpi', - FIFF.FIFFV_QUAT_4: 'chpi', - FIFF.FIFFV_QUAT_5: 'chpi', - FIFF.FIFFV_QUAT_6: 'chpi', - FIFF.FIFFV_HPI_G: 'chpi', - FIFF.FIFFV_HPI_ERR: 'chpi', - FIFF.FIFFV_HPI_MOV: 'chpi', - FIFF.FIFFV_DIPOLE_WAVE: 'dipole', - FIFF.FIFFV_GOODNESS_FIT: 'gof', - FIFF.FIFFV_ECOG_CH: 'ecog', - FIFF.FIFFV_FNIRS_CH: 'fnirs', - FIFF.FIFFV_TEMPERATURE_CH: 'temperature', - FIFF.FIFFV_GALVANIC_CH: 'gsr', -} -# How to reduce our categories in channel_type (originally) -_second_rules = { - 'meg': ('unit', {FIFF.FIFF_UNIT_T_M: 'grad', - FIFF.FIFF_UNIT_T: 'mag'}), - 'fnirs': ('coil_type', {FIFF.FIFFV_COIL_FNIRS_HBO: 'hbo', - FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', - FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: - 'fnirs_cw_amplitude', - FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE: - 'fnirs_fd_ac_amplitude', - FIFF.FIFFV_COIL_FNIRS_FD_PHASE: - 'fnirs_fd_phase', - FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', - }), - 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', - FIFF.FIFFV_COIL_EEG_BIPOLAR: 'eeg', - FIFF.FIFFV_COIL_NONE: 'eeg', # MNE-C backward compat - FIFF.FIFFV_COIL_EEG_CSD: 'csd', - }) -} - - -@fill_doc -def channel_type(info, idx): - """Get channel type. - - Parameters - ---------- - %(info_not_none)s - idx : int - Index of channel. - - Returns - ------- - type : str - Type of channel. Will be one of:: - - {'grad', 'mag', 'eeg', 'csd', 'stim', 'eog', 'emg', 'ecg', - 'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'dbs', - 'bio', 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr', - 'temperature', 'gsr'} - """ - # This is faster than the original _channel_type_old now in test_pick.py - # because it uses (at most!) two dict lookups plus one conditional - # to get the channel type string. - ch = info['chs'][idx] - try: - first_kind = _first_rule[ch['kind']] - except KeyError: - raise ValueError('Unknown channel type (%s) for channel "%s"' - % (ch['kind'], ch["ch_name"])) - if first_kind in _second_rules: - key, second_rule = _second_rules[first_kind] - first_kind = second_rule[ch[key]] - return first_kind - - -def pick_channels(ch_names, include, exclude=[], ordered=False): - """Pick channels by names. - - Returns the indices of ``ch_names`` in ``include`` but not in ``exclude``. - - Parameters - ---------- - ch_names : list of str - List of channels. - include : list of str - List of channels to include (if empty include all available). - - .. note:: This is to be treated as a set. The order of this list - is not used or maintained in ``sel``. - - exclude : list of str - List of channels to exclude (if empty do not exclude any channel). - Defaults to []. - ordered : bool - If true (default False), treat ``include`` as an ordered list - rather than a set, and any channels from ``include`` are missing - in ``ch_names`` an error will be raised. - - .. versionadded:: 0.18 - - Returns - ------- - sel : array of int - Indices of good channels. - - See Also - -------- - pick_channels_regexp, pick_types - """ - if len(np.unique(ch_names)) != len(ch_names): - raise RuntimeError('ch_names is not a unique list, picking is unsafe') - _check_excludes_includes(include) - _check_excludes_includes(exclude) - if not ordered: - if not isinstance(include, set): - include = set(include) - if not isinstance(exclude, set): - exclude = set(exclude) - sel = [] - for k, name in enumerate(ch_names): - if (len(include) == 0 or name in include) and name not in exclude: - sel.append(k) - else: - if not isinstance(include, list): - include = list(include) - if len(include) == 0: - include = list(ch_names) - if not isinstance(exclude, list): - exclude = list(exclude) - sel, missing = list(), list() - for name in include: - if name in ch_names: - if name not in exclude: - sel.append(ch_names.index(name)) - else: - missing.append(name) - if len(missing): - raise ValueError('Missing channels from ch_names required by ' - 'include:\n%s' % (missing,)) - return np.array(sel, int) - - -def pick_channels_regexp(ch_names, regexp): - """Pick channels using regular expression. - - Returns the indices of the good channels in ch_names. - - Parameters - ---------- - ch_names : list of str - List of channels. - - regexp : str - The regular expression. See python standard module for regular - expressions. - - Returns - ------- - sel : array of int - Indices of good channels. - - See Also - -------- - pick_channels - - Examples - -------- - >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1') - [0] - >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *') - [0, 1, 2] - """ - r = re.compile(regexp) - return [k for k, name in enumerate(ch_names) if r.match(name)] - - -def _triage_meg_pick(ch, meg): - """Triage an MEG pick type.""" - if meg is True: - return True - elif ch['unit'] == FIFF.FIFF_UNIT_T_M: - if meg == 'grad': - return True - elif meg == 'planar1' and ch['ch_name'].endswith('2'): - return True - elif meg == 'planar2' and ch['ch_name'].endswith('3'): - return True - elif (meg == 'mag' and ch['unit'] == FIFF.FIFF_UNIT_T): - return True - return False - - -def _triage_fnirs_pick(ch, fnirs, warned): - """Triage an fNIRS pick type.""" - if fnirs is True: - return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and 'hbo' in fnirs: - return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and 'hbr' in fnirs: - return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE and \ - 'fnirs_cw_amplitude' in fnirs: - return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE and \ - 'fnirs_fd_ac_amplitude' in fnirs: - return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and \ - 'fnirs_fd_phase' in fnirs: - return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and 'fnirs_od' in fnirs: - return True - return False - - -def _check_meg_type(meg, allow_auto=False): - """Ensure a valid meg type.""" - if isinstance(meg, str): - allowed_types = ['grad', 'mag', 'planar1', 'planar2'] - allowed_types += ['auto'] if allow_auto else [] - if meg not in allowed_types: - raise ValueError('meg value must be one of %s or bool, not %s' - % (allowed_types, meg)) - - -def _check_info_exclude(info, exclude): - _validate_type(info, "info") - info._check_consistency() - if exclude is None: - raise ValueError('exclude must be a list of strings or "bads"') - elif exclude == 'bads': - exclude = info.get('bads', []) - elif not isinstance(exclude, (list, tuple)): - raise ValueError('exclude must either be "bads" or a list of strings.' - ' If only one channel is to be excluded, use ' - '[ch_name] instead of passing ch_name.') - return exclude - - -@fill_doc -def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, - emg=False, ref_meg='auto', *, misc=False, resp=False, - chpi=False, exci=False, ias=False, syst=False, seeg=False, - dipole=False, gof=False, bio=False, ecog=False, fnirs=False, - csd=False, dbs=False, temperature=False, gsr=False, - include=(), exclude='bads', selection=None): - """Pick channels by type and names. - - Parameters - ---------- - %(info_not_none)s - %(pick_types_params)s - - Returns - ------- - sel : array of int - Indices of good channels. - """ - # NOTE: Changes to this function's signature should also be changed in - # PickChannelsMixin - _validate_type(meg, (bool, str), 'meg') - - exclude = _check_info_exclude(info, exclude) - nchan = info['nchan'] - pick = np.zeros(nchan, dtype=bool) - - _check_meg_type(ref_meg, allow_auto=True) - _check_meg_type(meg) - if isinstance(ref_meg, str) and ref_meg == 'auto': - ref_meg = ('comps' in info and info['comps'] is not None and - len(info['comps']) > 0 and meg is not False) - - for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci, - ias, syst, seeg, dipole, gof, bio, ecog, csd, dbs, - temperature, gsr): - if not isinstance(param, bool): - w = ('Parameters for all channel types (with the exception of ' - '"meg", "ref_meg" and "fnirs") must be of type bool, not {}.') - raise ValueError(w.format(type(param))) - - param_dict = dict(eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, - misc=misc, resp=resp, chpi=chpi, exci=exci, - ias=ias, syst=syst, seeg=seeg, dbs=dbs, dipole=dipole, - gof=gof, bio=bio, ecog=ecog, csd=csd, - temperature=temperature, gsr=gsr) - # avoid triage if possible - if isinstance(meg, bool): - for key in ('grad', 'mag'): - param_dict[key] = meg - if isinstance(fnirs, bool): - for key in _FNIRS_CH_TYPES_SPLIT: - param_dict[key] = fnirs - warned = [False] - for k in range(nchan): - ch_type = channel_type(info, k) - try: - pick[k] = param_dict[ch_type] - except KeyError: # not so simple - assert ch_type in ( - 'grad', 'mag', 'ref_meg') + _FNIRS_CH_TYPES_SPLIT - if ch_type in ('grad', 'mag'): - pick[k] = _triage_meg_pick(info['chs'][k], meg) - elif ch_type == 'ref_meg': - pick[k] = _triage_meg_pick(info['chs'][k], ref_meg) - else: # ch_type in ('hbo', 'hbr') - pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs, warned) - - # restrict channels to selection if provided - if selection is not None: - # the selection only restricts these types of channels - sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH, - FIFF.FIFFV_EEG_CH] - for k in np.where(pick)[0]: - if (info['chs'][k]['kind'] in sel_kind and - info['ch_names'][k] not in selection): - pick[k] = False - - myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]] - myinclude += include - - if len(myinclude) == 0: - sel = np.array([], int) - else: - sel = pick_channels(info['ch_names'], myinclude, exclude) - - return sel - - -@verbose -def pick_info(info, sel=(), copy=True, verbose=None): - """Restrict an info structure to a selection of channels. - - Parameters - ---------- - %(info_not_none)s - sel : list of int | None - Indices of channels to include. If None, all channels - are included. - copy : bool - If copy is False, info is modified inplace. - %(verbose)s - - Returns - ------- - res : dict - Info structure restricted to a selection of channels. - """ - # avoid circular imports - from .meas_info import _bad_chans_comp - - info._check_consistency() - info = info.copy() if copy else info - if sel is None: - return info - elif len(sel) == 0: - raise ValueError('No channels match the selection.') - n_unique = len(np.unique(np.arange(len(info['ch_names']))[sel])) - if n_unique != len(sel): - raise ValueError('Found %d / %d unique names, sel is not unique' - % (n_unique, len(sel))) - - # make sure required the compensation channels are present - if len(info.get('comps', [])) > 0: - ch_names = [info['ch_names'][idx] for idx in sel] - _, comps_missing = _bad_chans_comp(info, ch_names) - if len(comps_missing) > 0: - logger.info('Removing %d compensators from info because ' - 'not all compensation channels were picked.' - % (len(info['comps']),)) - with info._unlock(): - info['comps'] = [] - with info._unlock(): - info['chs'] = [info['chs'][k] for k in sel] - info._update_redundant() - info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']] - if 'comps' in info: - comps = deepcopy(info['comps']) - for c in comps: - row_idx = [k for k, n in enumerate(c['data']['row_names']) - if n in info['ch_names']] - row_names = [c['data']['row_names'][i] for i in row_idx] - rowcals = c['rowcals'][row_idx] - c['rowcals'] = rowcals - c['data']['nrow'] = len(row_names) - c['data']['row_names'] = row_names - c['data']['data'] = c['data']['data'][row_idx] - with info._unlock(): - info['comps'] = comps - if info.get('custom_ref_applied', False) and not _electrode_types(info): - with info._unlock(): - info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF - info._check_consistency() - - return info - - -def _has_kit_refs(info, picks): - """Determine if KIT ref channels are chosen. - - This is currently only used by make_forward_solution, which cannot - run when KIT reference channels are included. - """ - for p in picks: - if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG: - return True - return False - - -def pick_channels_evoked(orig, include=[], exclude='bads'): - """Pick channels from evoked data. - - Parameters - ---------- - orig : Evoked object - One evoked dataset. - include : list of str, (optional) - List of channels to include (if empty, include all available). - exclude : list of str | str - List of channels to exclude. If empty do not exclude any (default). - If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'. - - Returns - ------- - res : instance of Evoked - Evoked data restricted to selected channels. If include and - exclude are empty it returns orig without copy. - """ - if len(include) == 0 and len(exclude) == 0: - return orig - - exclude = _check_excludes_includes(exclude, info=orig.info, - allow_bads=True) - sel = pick_channels(orig.info['ch_names'], include=include, - exclude=exclude) - - if len(sel) == 0: - raise ValueError('Warning : No channels match the selection.') - - res = deepcopy(orig) - # - # Modify the measurement info - # - res.info = pick_info(res.info, sel) - # - # Create the reduced data set - # - res.data = res.data[sel, :] - - return res - - -@verbose -def pick_channels_forward(orig, include=[], exclude=[], ordered=False, - copy=True, verbose=None): - """Pick channels from forward operator. - - Parameters - ---------- - orig : dict - A forward solution. - include : list of str - List of channels to include (if empty, include all available). - Defaults to []. - exclude : list of str | 'bads' - Channels to exclude (if empty, do not exclude any). Defaults to []. - If 'bads', then exclude bad channels in orig. - ordered : bool - If true (default False), treat ``include`` as an ordered list - rather than a set. - - .. versionadded:: 0.18 - copy : bool - If True (default), make a copy. - - .. versionadded:: 0.19 - %(verbose)s - - Returns - ------- - res : dict - Forward solution restricted to selected channels. If include and - exclude are empty it returns orig without copy. - """ - orig['info']._check_consistency() - if len(include) == 0 and len(exclude) == 0: - return orig.copy() if copy else orig - exclude = _check_excludes_includes(exclude, - info=orig['info'], allow_bads=True) - - # Allow for possibility of channel ordering in forward solution being - # different from that of the M/EEG file it is based on. - sel_sol = pick_channels(orig['sol']['row_names'], include=include, - exclude=exclude, ordered=ordered) - sel_info = pick_channels(orig['info']['ch_names'], include=include, - exclude=exclude, ordered=ordered) - - fwd = deepcopy(orig) if copy else orig - - # Check that forward solution and original data file agree on #channels - if len(sel_sol) != len(sel_info): - raise ValueError('Forward solution and functional data appear to ' - 'have different channel names, please check.') - - # Do we have something? - nuse = len(sel_sol) - if nuse == 0: - raise ValueError('Nothing remains after picking') - - logger.info(' %d out of %d channels remain after picking' - % (nuse, fwd['nchan'])) - - # Pick the correct rows of the forward operator using sel_sol - fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :] - fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :] - fwd['sol']['nrow'] = nuse - - ch_names = [fwd['sol']['row_names'][k] for k in sel_sol] - fwd['nchan'] = nuse - fwd['sol']['row_names'] = ch_names - - # Pick the appropriate channel names from the info-dict using sel_info - with fwd['info']._unlock(): - fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info] - fwd['info']._update_redundant() - fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names] - - if fwd['sol_grad'] is not None: - fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :] - fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :] - fwd['sol_grad']['nrow'] = nuse - fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k] - for k in sel_sol] - - return fwd - - -def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, - ecog=False, dbs=False, include=[], exclude=[]): - """Pick by channel type and names from a forward operator. - - Parameters - ---------- - orig : dict - A forward solution. - meg : bool | str - If True include MEG channels. If string it can be 'mag', 'grad', - 'planar1' or 'planar2' to select only magnetometers, all gradiometers, - or a specific type of gradiometer. - eeg : bool - If True include EEG channels. - ref_meg : bool - If True include CTF / 4D reference channels. - seeg : bool - If True include stereotactic EEG channels. - ecog : bool - If True include electrocorticography channels. - dbs : bool - If True include deep brain stimulation channels. - include : list of str - List of additional channels to include. If empty do not include any. - exclude : list of str | str - List of channels to exclude. If empty do not exclude any (default). - If 'bads', exclude channels in orig['info']['bads']. - - Returns - ------- - res : dict - Forward solution restricted to selected channel types. - """ - info = orig['info'] - sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, - ecog=ecog, dbs=dbs, include=include, exclude=exclude) - if len(sel) == 0: - raise ValueError('No valid channels found') - include_ch_names = [info['ch_names'][k] for k in sel] - - return pick_channels_forward(orig, include_ch_names) - - -@fill_doc -def channel_indices_by_type(info, picks=None): - """Get indices of channels by type. - - Parameters - ---------- - %(info_not_none)s - %(picks_all)s - - Returns - ------- - idx_by_type : dict - A dictionary that maps each channel type to a (possibly empty) list of - channel indices. - """ - idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if - key not in ('meg', 'fnirs')} - idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), - fnirs_cw_amplitude=list(), fnirs_fd_ac_amplitude=list(), - fnirs_fd_phase=list(), fnirs_od=list()) - picks = _picks_to_idx(info, picks, - none='all', exclude=(), allow_empty=True) - for k in picks: - ch_type = channel_type(info, k) - for key in idx_by_type.keys(): - if ch_type == key: - idx_by_type[key].append(k) - return idx_by_type - - -def pick_channels_cov(orig, include=[], exclude='bads', ordered=False, - copy=True): - """Pick channels from covariance matrix. - - Parameters - ---------- - orig : Covariance - A covariance. - include : list of str, (optional) - List of channels to include (if empty, include all available). - exclude : list of str, (optional) | 'bads' - Channels to exclude (if empty, do not exclude any). Defaults to 'bads'. - ordered : bool - If True (default False), ensure that the order of the channels in the - modified instance matches the order of ``include``. - - .. versionadded:: 0.20.0 - copy : bool - If True (the default), return a copy of the covariance matrix with the - modified channels. If False, channels are modified in-place. - - .. versionadded:: 0.20.0 - - Returns - ------- - res : dict - Covariance solution restricted to selected channels. - """ - if copy: - orig = orig.copy() - # A little peculiarity of the cov objects is that these two fields - # should not be copied over when None. - if 'method' in orig and orig['method'] is None: - del orig['method'] - if 'loglik' in orig and orig['loglik'] is None: - del orig['loglik'] - - exclude = orig['bads'] if exclude == 'bads' else exclude - sel = pick_channels(orig['names'], include=include, exclude=exclude, - ordered=ordered) - data = orig['data'][sel][:, sel] if not orig['diag'] else orig['data'][sel] - names = [orig['names'][k] for k in sel] - bads = [name for name in orig['bads'] if name in orig['names']] - - orig['data'] = data - orig['names'] = names - orig['bads'] = bads - orig['dim'] = len(data) - - return orig - - -def _mag_grad_dependent(info): - """Determine of mag and grad should be dealt with jointly.""" - # right now just uses SSS, could be computed / checked from cov - # but probably overkill - return any(ph.get('max_info', {}).get('sss_info', {}).get('in_order', 0) - for ph in info.get('proc_history', [])) - - -@fill_doc -def _contains_ch_type(info, ch_type): - """Check whether a certain channel type is in an info object. - - Parameters - ---------- - %(info_not_none)s - ch_type : str - the channel type to be checked for - - Returns - ------- - has_ch_type : bool - Whether the channel type is present or not. - """ - _validate_type(ch_type, 'str', "ch_type") - - meg_extras = list(_MEG_CH_TYPES_SPLIT) - fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT) - valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS - if key != 'meg'] + meg_extras + fnirs_extras) - _check_option('ch_type', ch_type, valid_channel_types) - if info is None: - raise ValueError('Cannot check for channels of type "%s" because info ' - 'is None' % (ch_type,)) - return any(ch_type == channel_type(info, ii) - for ii in range(info['nchan'])) - - -@fill_doc -def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude='bads'): - """Get data channel indices as separate list of tuples. - - Parameters - ---------- - %(info_not_none)s - meg_combined : bool | 'auto' - Whether to return combined picks for grad and mag. - Can be 'auto' to choose based on Maxwell filtering status. - ref_meg : bool - If True include CTF / 4D reference channels - exclude : list of str | str - List of channels to exclude. If 'bads' (default), exclude channels - in info['bads']. - - Returns - ------- - picks_list : list of tuples - The list of tuples of picks and the type string. - """ - _validate_type(ref_meg, bool, 'ref_meg') - exclude = _check_info_exclude(info, exclude) - if meg_combined == 'auto': - meg_combined = _mag_grad_dependent(info) - picks_list = [] - picks_list = {ch_type: list() for ch_type in _DATA_CH_TYPES_SPLIT} - for k in range(info['nchan']): - if info['chs'][k]['ch_name'] not in exclude: - this_type = channel_type(info, k) - try: - picks_list[this_type].append(k) - except KeyError: - # This annoyance is due to differences in pick_types - # and channel_type behavior - if this_type == 'ref_meg': - ch = info['chs'][k] - if _triage_meg_pick(ch, ref_meg): - if ch['unit'] == FIFF.FIFF_UNIT_T: - picks_list['mag'].append(k) - elif ch['unit'] == FIFF.FIFF_UNIT_T_M: - picks_list['grad'].append(k) - else: - pass # not a data channel type - picks_list = [(ch_type, np.array(picks_list[ch_type], int)) - for ch_type in _DATA_CH_TYPES_SPLIT] - assert _DATA_CH_TYPES_SPLIT[:2] == ('mag', 'grad') - if meg_combined and len(picks_list[0][1]) and len(picks_list[1][1]): - picks_list.insert( - 0, ('meg', np.unique(np.concatenate([picks_list.pop(0)[1], - picks_list.pop(0)[1]]))) - ) - picks_list = [p for p in picks_list if len(p[1])] - return picks_list - - -def _check_excludes_includes(chs, info=None, allow_bads=False): - """Ensure that inputs to exclude/include are list-like or "bads". - - Parameters - ---------- - chs : any input, should be list, tuple, set, str - The channels passed to include or exclude. - allow_bads : bool - Allow the user to supply "bads" as a string for auto exclusion. - - Returns - ------- - chs : list - Channels to be excluded/excluded. If allow_bads, and chs=="bads", - this will be the bad channels found in 'info'. - """ - from .meas_info import Info - if not isinstance(chs, (list, tuple, set, np.ndarray)): - if allow_bads is True: - if not isinstance(info, Info): - raise ValueError('Supply an info object if allow_bads is true') - elif chs != 'bads': - raise ValueError('If chs is a string, it must be "bads"') - else: - chs = info['bads'] - else: - raise ValueError( - 'include/exclude must be list, tuple, ndarray, or "bads". ' + - 'You provided type {}'.format(type(chs))) - return chs - - -_PICK_TYPES_DATA_DICT = dict( - meg=True, eeg=True, csd=True, stim=False, eog=False, ecg=False, emg=False, - misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, - seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True, - dbs=True, temperature=False, gsr=False) -_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) -_MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') -_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') -_DATA_CH_TYPES_ORDER_DEFAULT = ( - 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'resp', 'emg', 'ref_meg', - 'misc', 'stim', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', - 'dbs', 'temperature', 'gsr', 'gof', 'dipole', -) + _FNIRS_CH_TYPES_SPLIT + ('whitened',) -# Valid data types, ordered for consistency, used in viz/evoked. -_VALID_CHANNEL_TYPES = ( - 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'resp', 'emg', 'dipole', 'gof', - 'bio', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') -_DATA_CH_TYPES_SPLIT = ( - 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT -# Electrode types (e.g., can be average-referenced together or separately) -_ELECTRODE_CH_TYPES = ('eeg', 'ecog', 'seeg', 'dbs') - - -def _electrode_types(info, *, exclude='bads'): - return [ch_type for ch_type in _ELECTRODE_CH_TYPES - if len(pick_types(info, exclude=exclude, **{ch_type: True}))] - - -def _pick_data_channels(info, exclude='bads', with_ref_meg=True, - with_aux=False): - """Pick only data channels.""" - kwargs = _PICK_TYPES_DATA_DICT - if with_aux: - kwargs = kwargs.copy() - kwargs.update(eog=True, ecg=True, emg=True, bio=True) - return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, **kwargs) - - -def _pick_data_or_ica(info, exclude=()): - """Pick only data or ICA channels.""" - if any(ch_name.startswith('ICA') for ch_name in info['ch_names']): - picks = pick_types(info, exclude=exclude, misc=True) - else: - picks = _pick_data_channels(info, exclude=exclude, with_ref_meg=True) - return picks - - -def _picks_to_idx(info, picks, none='data', exclude='bads', allow_empty=False, - with_ref_meg=True, return_kind=False, picks_on="channels"): - """Convert and check pick validity. - - Parameters - ---------- - picks_on : str - 'channels' (default) for error messages about selection of channels. - 'components' for error messages about selection of components. - """ - from .meas_info import Info - picked_ch_type_or_generic = False - # - # None -> all, data, or data_or_ica (ndarray of int) - # - if isinstance(info, Info): - n_chan = info['nchan'] - else: - info = _ensure_int(info, 'info', 'an int or Info') - n_chan = info - assert n_chan >= 0 - - orig_picks = picks - # We do some extra_repr gymnastics to avoid calling repr(orig_picks) too - # soon as it can be a performance bottleneck (repr on ndarray is slow) - extra_repr = '' - if picks is None: - if isinstance(info, int): # special wrapper for no real info - picks = np.arange(n_chan) - extra_repr = ', treated as range(%d)' % (n_chan,) - else: - picks = none # let _picks_str_to_idx handle it - extra_repr = 'None, treated as "%s"' % (none,) - - # - # slice - # - if isinstance(picks, slice): - picks = np.arange(n_chan)[picks] - - # - # -> ndarray of int (and make a copy) - # - picks = np.atleast_1d(picks) # this works even for picks == 'something' - picks = np.array([], dtype=int) if len(picks) == 0 else picks - if picks.ndim != 1: - raise ValueError('picks must be 1D, got %sD' % (picks.ndim,)) - if picks.dtype.char in ('S', 'U'): - picks = _picks_str_to_idx(info, picks, exclude, with_ref_meg, - return_kind, extra_repr, allow_empty, - orig_picks) - if return_kind: - picked_ch_type_or_generic = picks[1] - picks = picks[0] - if picks.dtype.kind not in ['i', 'u']: - extra_ch = " or list of str (names)" if picks_on == "channels" else "" - msg = ( - f"picks must be a list of int (indices){extra_ch}. " - f"The provided data type {picks.dtype} is invalid." - ) - raise TypeError(msg) - del extra_repr - picks = picks.astype(int) - - # - # ensure we have (optionally non-empty) ndarray of valid int - # - if len(picks) == 0 and not allow_empty: - raise ValueError('No appropriate %s found for the given picks ' - '(%r)' % (picks_on, orig_picks)) - if (picks < -n_chan).any(): - raise ValueError('All picks must be >= %d, got %r' - % (-n_chan, orig_picks)) - if (picks >= n_chan).any(): - raise ValueError('All picks must be < n_%s (%d), got %r' - % (picks_on, n_chan, orig_picks)) - picks %= n_chan # ensure positive - if return_kind: - return picks, picked_ch_type_or_generic - return picks - - -def _picks_str_to_idx(info, picks, exclude, with_ref_meg, return_kind, - extra_repr, allow_empty, orig_picks): - """Turn a list of str into ndarray of int.""" - # special case for _picks_to_idx w/no info: shouldn't really happen - if isinstance(info, int): - raise ValueError('picks as str can only be used when measurement ' - 'info is available') - - # - # first: check our special cases - # - - picks_generic = list() - if len(picks) == 1: - if picks[0] in ('all', 'data', 'data_or_ica'): - if picks[0] == 'all': - use_exclude = info['bads'] if exclude == 'bads' else exclude - picks_generic = pick_channels( - info['ch_names'], info['ch_names'], exclude=use_exclude) - elif picks[0] == 'data': - picks_generic = _pick_data_channels(info, exclude=exclude, - with_ref_meg=with_ref_meg) - elif picks[0] == 'data_or_ica': - picks_generic = _pick_data_or_ica(info, exclude=exclude) - if len(picks_generic) == 0 and orig_picks is None and \ - not allow_empty: - raise ValueError('picks (%s) yielded no channels, consider ' - 'passing picks explicitly' - % (repr(orig_picks) + extra_repr,)) - - # - # second: match all to channel names - # - - bad_names = [] - picks_name = list() - for pick in picks: - try: - picks_name.append(info['ch_names'].index(pick)) - except ValueError: - bad_names.append(pick) - - # - # third: match all to types - # - bad_type = None - picks_type = list() - kwargs = dict(meg=False) - meg, fnirs = set(), set() - for pick in picks: - if pick in _PICK_TYPES_KEYS: - kwargs[pick] = True - elif pick in _MEG_CH_TYPES_SPLIT: - meg |= {pick} - elif pick in _FNIRS_CH_TYPES_SPLIT: - fnirs |= {pick} - else: - bad_type = pick - break - else: - # triage MEG and FNIRS, which are complicated due to non-bool entries - extra_picks = set() - if len(meg) > 0 and not kwargs.get('meg', False): - # easiest just to iterate - for use_meg in meg: - extra_picks |= set(pick_types( - info, meg=use_meg, ref_meg=False, exclude=exclude)) - if len(fnirs) > 0 and not kwargs.get('fnirs', False): - if len(fnirs) == 1: - kwargs['fnirs'] = list(fnirs)[0] - else: - kwargs['fnirs'] = list(fnirs) - picks_type = pick_types(info, exclude=exclude, **kwargs) - if len(extra_picks) > 0: - picks_type = sorted(set(picks_type) | set(extra_picks)) - - # - # finally: ensure we have exactly one usable list - # - all_picks = (picks_generic, picks_name, picks_type) - any_found = [len(p) > 0 for p in all_picks] - if sum(any_found) == 0: - if not allow_empty: - raise ValueError( - 'picks (%s) could not be interpreted as ' - 'channel names (no channel "%s"), channel types (no ' - 'type "%s"), or a generic type (just "all" or "data")' - % (repr(orig_picks) + extra_repr, str(bad_names), bad_type)) - picks = np.array([], int) - elif sum(any_found) > 1: - raise RuntimeError('Some channel names are ambiguously equivalent to ' - 'channel types, cannot use string-based ' - 'picks for these') - else: - picks = np.array(all_picks[np.where(any_found)[0][0]]) - - picked_ch_type_or_generic = not len(picks_name) - if len(bad_names) > 0 and not picked_ch_type_or_generic: - warn(f'Channel(s) {bad_names} could not be picked, because ' - 'they are not present in the info instance.') - - if return_kind: - return picks, picked_ch_type_or_generic - return picks - - -def _pick_inst(inst, picks, exclude, copy=True): - """Return an instance with picked and excluded channels.""" - if copy is True: - inst = inst.copy() - picks = _picks_to_idx(inst.info, picks, exclude=[]) - pick_names = [inst.info['ch_names'][pick] for pick in picks] - inst.pick_channels(pick_names) - - if exclude == 'bads': - exclude = [ch for ch in inst.info['bads'] - if ch in inst.info['ch_names']] - if exclude is not None: - inst.drop_channels(exclude) - return inst - - -def _get_channel_types(info, picks=None, unique=False, only_data_chs=False): - """Get the data channel types in an info instance.""" - none = 'data' if only_data_chs else 'all' - picks = _picks_to_idx(info, picks, none, (), allow_empty=False) - ch_types = [channel_type(info, pick) for pick in picks] - if only_data_chs: - ch_types = [ch_type for ch_type in ch_types - if ch_type in _DATA_CH_TYPES_SPLIT] - if unique: - # set does not preserve order but dict does, so let's just use it - ch_types = list({k: k for k in ch_types}.keys()) - return ch_types +# Copyright the MNE-Python contributors. + + +from .._fiff.pick import ( + _DATA_CH_TYPES_ORDER_DEFAULT, + _DATA_CH_TYPES_SPLIT, + _picks_to_idx, +) + +__all__ = [ + # mne-bids, autoreject, mne-connectivity, mne-realtime, mne-nirs, mne-realtime + "_picks_to_idx", + # mne-qt-browser + "_DATA_CH_TYPES_ORDER_DEFAULT", + "_DATA_CH_TYPES_SPLIT", +] diff --git a/mne/io/proc_history.py b/mne/io/proc_history.py deleted file mode 100644 index 306b33ff13f..00000000000 --- a/mne/io/proc_history.py +++ /dev/null @@ -1,294 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Denis A. Engemann -# Eric Larson -# License: Simplified BSD - -import numpy as np - -from .open import read_tag, fiff_open -from .tree import dir_tree_find -from .write import (start_block, end_block, write_int, write_float, - write_string, write_float_matrix, write_int_matrix, - write_float_sparse, write_id, write_name_list_sanitized, - _safe_name_list) -from .tag import find_tag -from .constants import FIFF -from ..fixes import _csc_matrix_cast -from ..utils import warn, _check_fname - -_proc_keys = ['parent_file_id', 'block_id', 'parent_block_id', - 'date', 'experimenter', 'creator'] -_proc_ids = [FIFF.FIFF_PARENT_FILE_ID, - FIFF.FIFF_BLOCK_ID, - FIFF.FIFF_PARENT_BLOCK_ID, - FIFF.FIFF_MEAS_DATE, - FIFF.FIFF_EXPERIMENTER, - FIFF.FIFF_CREATOR] -_proc_writers = [write_id, write_id, write_id, - write_int, write_string, write_string] -_proc_casters = [dict, dict, dict, np.array, str, str] - - -def _read_proc_history(fid, tree): - """Read processing history from fiff file. - - This function reads the SSS info, the CTC correction and the - calibaraions from the SSS processing logs inside af a raw file - (C.f. Maxfilter v2.2 manual (October 2010), page 21):: - - 104 = { 900 = proc. history - 104 = { 901 = proc. record - 103 = block ID - 204 = date - 212 = scientist - 113 = creator program - 104 = { 502 = SSS info - 264 = SSS task - 263 = SSS coord frame - 265 = SSS origin - 266 = SSS ins.order - 267 = SSS outs.order - 268 = SSS nr chnls - 269 = SSS components - 278 = SSS nfree - 243 = HPI g limit 0.98 - 244 = HPI dist limit 0.005 - 105 = } 502 = SSS info - 104 = { 504 = MaxST info - 264 = SSS task - 272 = SSST subspace correlation - 279 = SSST buffer length - 105 = } - 104 = { 501 = CTC correction - 103 = block ID - 204 = date - 113 = creator program - 800 = CTC matrix - 3417 = proj item chs - 105 = } 501 = CTC correction - 104 = { 503 = SSS finecalib. - 270 = SSS cal chnls - 271 = SSS cal coeff - 105 = } 503 = SSS finecalib. - 105 = } 901 = proc. record - 105 = } 900 = proc. history - """ - proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY) - out = list() - if len(proc_history) > 0: - proc_history = proc_history[0] - proc_records = dir_tree_find(proc_history, - FIFF.FIFFB_PROCESSING_RECORD) - for proc_record in proc_records: - record = dict() - for i_ent in range(proc_record['nent']): - kind = proc_record['directory'][i_ent].kind - pos = proc_record['directory'][i_ent].pos - for key, id_, cast in zip(_proc_keys, _proc_ids, - _proc_casters): - if kind == id_: - tag = read_tag(fid, pos) - record[key] = cast(tag.data) - break - else: - warn('Unknown processing history item %s' % kind) - record['max_info'] = _read_maxfilter_record(fid, proc_record) - iass = dir_tree_find(proc_record, FIFF.FIFFB_IAS) - if len(iass) > 0: - # XXX should eventually populate this - ss = [dict() for _ in range(len(iass))] - record['ias'] = ss - if len(record['max_info']) > 0: - out.append(record) - return out - - -def _write_proc_history(fid, info): - """Write processing history to file.""" - if len(info['proc_history']) > 0: - start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) - for record in info['proc_history']: - start_block(fid, FIFF.FIFFB_PROCESSING_RECORD) - for key, id_, writer in zip(_proc_keys, _proc_ids, _proc_writers): - if key in record: - writer(fid, id_, record[key]) - _write_maxfilter_record(fid, record['max_info']) - if 'ias' in record: - for _ in record['ias']: - start_block(fid, FIFF.FIFFB_IAS) - # XXX should eventually populate this - end_block(fid, FIFF.FIFFB_IAS) - end_block(fid, FIFF.FIFFB_PROCESSING_RECORD) - end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) - - -_sss_info_keys = ('job', 'frame', 'origin', 'in_order', - 'out_order', 'nchan', 'components', 'nfree', - 'hpi_g_limit', 'hpi_dist_limit') -_sss_info_ids = (FIFF.FIFF_SSS_JOB, - FIFF.FIFF_SSS_FRAME, - FIFF.FIFF_SSS_ORIGIN, - FIFF.FIFF_SSS_ORD_IN, - FIFF.FIFF_SSS_ORD_OUT, - FIFF.FIFF_SSS_NMAG, - FIFF.FIFF_SSS_COMPONENTS, - FIFF.FIFF_SSS_NFREE, - FIFF.FIFF_HPI_FIT_GOOD_LIMIT, - FIFF.FIFF_HPI_FIT_DIST_LIMIT) -_sss_info_writers = (write_int, write_int, write_float, write_int, - write_int, write_int, write_int, write_int, - write_float, write_float) -_sss_info_casters = (int, int, np.array, int, - int, int, np.array, int, - float, float) - -_max_st_keys = ('job', 'subspcorr', 'buflen') -_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR, - FIFF.FIFF_SSS_ST_LENGTH) -_max_st_writers = (write_int, write_float, write_float) -_max_st_casters = (int, float, float) - -_sss_ctc_keys = ('block_id', 'date', 'creator', 'decoupler') -_sss_ctc_ids = (FIFF.FIFF_BLOCK_ID, - FIFF.FIFF_MEAS_DATE, - FIFF.FIFF_CREATOR, - FIFF.FIFF_DECOUPLER_MATRIX) -_sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse) -_sss_ctc_casters = (dict, np.array, str, _csc_matrix_cast) - -_sss_cal_keys = ('cal_chans', 'cal_corrs') -_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS) -_sss_cal_writers = (write_int_matrix, write_float_matrix) -_sss_cal_casters = (np.array, np.array) - - -def _read_ctc(fname): - """Read cross-talk correction matrix.""" - fname = _check_fname(fname, overwrite='read', must_exist=True) - f, tree, _ = fiff_open(fname) - with f as fid: - sss_ctc = _read_maxfilter_record(fid, tree)['sss_ctc'] - bad_str = 'Invalid cross-talk FIF: %s' % fname - if len(sss_ctc) == 0: - raise ValueError(bad_str) - node = dir_tree_find(tree, FIFF.FIFFB_DATA_CORRECTION)[0] - comment = find_tag(fid, node, FIFF.FIFF_COMMENT).data - if comment != 'cross-talk compensation matrix': - raise ValueError(bad_str) - sss_ctc['creator'] = find_tag(fid, node, FIFF.FIFF_CREATOR).data - sss_ctc['date'] = find_tag(fid, node, FIFF.FIFF_MEAS_DATE).data - return sss_ctc - - -def _read_maxfilter_record(fid, tree): - """Read maxfilter processing record from file.""" - sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO) # 502 - sss_info = dict() - if len(sss_info_block) > 0: - sss_info_block = sss_info_block[0] - for i_ent in range(sss_info_block['nent']): - kind = sss_info_block['directory'][i_ent].kind - pos = sss_info_block['directory'][i_ent].pos - for key, id_, cast in zip(_sss_info_keys, _sss_info_ids, - _sss_info_casters): - if kind == id_: - tag = read_tag(fid, pos) - sss_info[key] = cast(tag.data) - break - - max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO) # 504 - max_st = dict() - if len(max_st_block) > 0: - max_st_block = max_st_block[0] - for i_ent in range(max_st_block['nent']): - kind = max_st_block['directory'][i_ent].kind - pos = max_st_block['directory'][i_ent].pos - for key, id_, cast in zip(_max_st_keys, _max_st_ids, - _max_st_casters): - if kind == id_: - tag = read_tag(fid, pos) - max_st[key] = cast(tag.data) - break - - sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER) # 501 - sss_ctc = dict() - if len(sss_ctc_block) > 0: - sss_ctc_block = sss_ctc_block[0] - for i_ent in range(sss_ctc_block['nent']): - kind = sss_ctc_block['directory'][i_ent].kind - pos = sss_ctc_block['directory'][i_ent].pos - for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids, - _sss_ctc_casters): - if kind == id_: - tag = read_tag(fid, pos) - sss_ctc[key] = cast(tag.data) - break - else: - if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST: - tag = read_tag(fid, pos) - chs = _safe_name_list(tag.data, 'read', 'proj_items_chs') - # This list can null chars in the last entry, e.g.: - # [..., u'MEG2642', u'MEG2643', u'MEG2641\x00 ... \x00'] - chs[-1] = chs[-1].split('\x00')[0] - sss_ctc['proj_items_chs'] = chs - - sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL) # 503 - sss_cal = dict() - if len(sss_cal_block) > 0: - sss_cal_block = sss_cal_block[0] - for i_ent in range(sss_cal_block['nent']): - kind = sss_cal_block['directory'][i_ent].kind - pos = sss_cal_block['directory'][i_ent].pos - for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids, - _sss_cal_casters): - if kind == id_: - tag = read_tag(fid, pos) - sss_cal[key] = cast(tag.data) - break - - max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc, - sss_cal=sss_cal, max_st=max_st) - return max_info - - -def _write_maxfilter_record(fid, record): - """Write maxfilter processing record to file.""" - sss_info = record['sss_info'] - if len(sss_info) > 0: - start_block(fid, FIFF.FIFFB_SSS_INFO) - for key, id_, writer in zip(_sss_info_keys, _sss_info_ids, - _sss_info_writers): - if key in sss_info: - writer(fid, id_, sss_info[key]) - end_block(fid, FIFF.FIFFB_SSS_INFO) - - max_st = record['max_st'] - if len(max_st) > 0: - start_block(fid, FIFF.FIFFB_SSS_ST_INFO) - for key, id_, writer in zip(_max_st_keys, _max_st_ids, - _max_st_writers): - if key in max_st: - writer(fid, id_, max_st[key]) - end_block(fid, FIFF.FIFFB_SSS_ST_INFO) - - sss_ctc = record['sss_ctc'] - if len(sss_ctc) > 0: # dict has entries - start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) - for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids, - _sss_ctc_writers): - if key in sss_ctc: - writer(fid, id_, sss_ctc[key]) - if 'proj_items_chs' in sss_ctc: - write_name_list_sanitized( - fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, - sss_ctc['proj_items_chs'], 'proj_items_chs') - end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) - - sss_cal = record['sss_cal'] - if len(sss_cal) > 0: - start_block(fid, FIFF.FIFFB_SSS_CAL) - for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids, - _sss_cal_writers): - if key in sss_cal: - writer(fid, id_, sss_cal[key]) - end_block(fid, FIFF.FIFFB_SSS_CAL) diff --git a/mne/io/proj.py b/mne/io/proj.py deleted file mode 100644 index 12db504f372..00000000000 --- a/mne/io/proj.py +++ /dev/null @@ -1,1051 +0,0 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Denis Engemann -# Teon Brooks -# -# License: BSD-3-Clause - -from copy import deepcopy -from itertools import count -import re - -import numpy as np - -from .constants import FIFF -from .pick import pick_types, pick_info, _electrode_types, _ELECTRODE_CH_TYPES -from .tag import find_tag, _rename_list -from .tree import dir_tree_find -from .write import (write_int, write_float, write_string, write_float_matrix, - end_block, start_block, write_name_list_sanitized, - _safe_name_list) -from ..defaults import (_INTERPOLATION_DEFAULT, _BORDER_DEFAULT, - _EXTRAPOLATE_DEFAULT) -from ..utils import (logger, verbose, warn, fill_doc, _validate_type, - object_diff, _check_option) - - -class Projection(dict): - """Projection vector. - - A basic class to proj a meaningful print for projection vectors. - - .. warning:: This class is generally not meant to be instantiated - directly, use ``compute_proj_*`` functions instead. - - Parameters - ---------- - data : dict - The data dictionary. - desc : str - The projector description. - kind : int - The projector kind. - active : bool - Whether or not the projector has been applied. - explained_var : float | None - The explained variance (proportion). - """ - - def __init__(self, *, data, desc='', kind=FIFF.FIFFV_PROJ_ITEM_FIELD, - active=False, explained_var=None): - super().__init__(desc=desc, kind=kind, active=active, data=data, - explained_var=explained_var) - - def __repr__(self): # noqa: D105 - s = "%s" % self['desc'] - s += ", active : %s" % self['active'] - s += f", n_channels : {len(self['data']['col_names'])}" - if self['explained_var'] is not None: - s += f', exp. var : {self["explained_var"] * 100:0.2f}%' - return "" % s - - # speed up info copy by taking advantage of mutability - def __deepcopy__(self, memodict): - """Make a deepcopy.""" - cls = self.__class__ - result = cls.__new__(cls) - for k, v in self.items(): - if k == 'data': - v = v.copy() - v['data'] = v['data'].copy() - result[k] = v - else: - result[k] = v # kind, active, desc, explained_var immutable - return result - - def __eq__(self, other): - """Equality == method.""" - return True if len(object_diff(self, other)) == 0 else False - - def __ne__(self, other): - """Different != method.""" - return not self.__eq__(other) - - @fill_doc - def plot_topomap( - self, info, *, sensors=True, show_names=False, contours=6, - outlines='head', sphere=None, image_interp=_INTERPOLATION_DEFAULT, - extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT, res=64, - size=1, cmap=None, vlim=(None, None), cnorm=None, colorbar=False, - cbar_fmt='%3.1f', units=None, axes=None, show=True): - """Plot topographic maps of SSP projections. - - Parameters - ---------- - %(info_not_none)s Used to determine the layout. - %(sensors_topomap)s - %(show_names_topomap)s - - .. versionadded:: 1.2 - %(contours_topomap)s - %(outlines_topomap)s - %(sphere_topomap_auto)s - %(image_interp_topomap)s - %(extrapolate_topomap)s - - .. versionadded:: 1.2 - %(border_topomap)s - %(res_topomap)s - %(size_topomap)s - %(cmap_topomap)s - %(vlim_plot_topomap_proj)s - %(cnorm)s - - .. versionadded:: 1.2 - %(colorbar_topomap)s - %(cbar_fmt_topomap)s - - .. versionadded:: 1.2 - %(units_topomap)s - - .. versionadded:: 1.2 - %(axes_plot_projs_topomap)s - %(show)s - - Returns - ------- - fig : instance of Figure - Figure distributing one image per channel across sensor topography. - - Notes - ----- - .. versionadded:: 0.15.0 - """ # noqa: E501 - from ..viz.topomap import plot_projs_topomap - return plot_projs_topomap( - self, info, sensors=sensors, show_names=show_names, - contours=contours, outlines=outlines, sphere=sphere, - image_interp=image_interp, extrapolate=extrapolate, border=border, - res=res, size=size, cmap=cmap, vlim=vlim, cnorm=cnorm, - colorbar=colorbar, cbar_fmt=cbar_fmt, units=units, axes=axes, - show=show) - - -class ProjMixin(object): - """Mixin class for Raw, Evoked, Epochs. - - Notes - ----- - This mixin adds a proj attribute as a property to data containers. - It is True if at least one proj is present and all of them are active. - The projs might not be applied yet if data are not preloaded. In - this case it's the _projector attribute that does the job. - If a private _data attribute is present then the projs applied - to it are the ones marked as active. - - A proj parameter passed in constructor of raw or epochs calls - apply_proj and hence after the .proj attribute is True. - - As soon as you've applied the projs it will stay active in the - remaining pipeline. - - The suggested pipeline is proj=True in epochs (it's cheaper than for raw). - - When you use delayed SSP in Epochs, projs are applied when you call - get_data() method. They are not applied to the evoked._data unless you call - apply_proj(). The reason is that you want to reject with projs although - it's not stored in proj mode. - """ - - @property - def proj(self): - """Whether or not projections are active.""" - return (len(self.info['projs']) > 0 and - all(p['active'] for p in self.info['projs'])) - - @verbose - def add_proj(self, projs, remove_existing=False, verbose=None): - """Add SSP projection vectors. - - Parameters - ---------- - projs : list - List with projection vectors. - remove_existing : bool - Remove the projection vectors currently in the file. - %(verbose)s - - Returns - ------- - self : instance of Raw | Epochs | Evoked - The data container. - """ - if isinstance(projs, Projection): - projs = [projs] - - if (not isinstance(projs, list) and - not all(isinstance(p, Projection) for p in projs)): - raise ValueError('Only projs can be added. You supplied ' - 'something else.') - - # mark proj as inactive, as they have not been applied - projs = deactivate_proj(projs, copy=True) - if remove_existing: - # we cannot remove the proj if they are active - if any(p['active'] for p in self.info['projs']): - raise ValueError('Cannot remove projectors that have ' - 'already been applied') - with self.info._unlock(): - self.info['projs'] = projs - else: - self.info['projs'].extend(projs) - # We don't want to add projectors that are activated again. - with self.info._unlock(): - self.info['projs'] = _uniquify_projs(self.info['projs'], - check_active=False, - sort=False) - return self - - @verbose - def apply_proj(self, verbose=None): - """Apply the signal space projection (SSP) operators to the data. - - Parameters - ---------- - %(verbose)s - - Returns - ------- - self : instance of Raw | Epochs | Evoked - The instance. - - Notes - ----- - Once the projectors have been applied, they can no longer be - removed. It is usually not recommended to apply the projectors at - too early stages, as they are applied automatically later on - (e.g. when computing inverse solutions). - Hint: using the copy method individual projection vectors - can be tested without affecting the original data. - With evoked data, consider the following example:: - - projs_a = mne.read_proj('proj_a.fif') - projs_b = mne.read_proj('proj_b.fif') - # add the first, copy, apply and see ... - evoked.add_proj(a).copy().apply_proj().plot() - # add the second, copy, apply and see ... - evoked.add_proj(b).copy().apply_proj().plot() - # drop the first and see again - evoked.copy().del_proj(0).apply_proj().plot() - evoked.apply_proj() # finally keep both - """ - from ..epochs import BaseEpochs - from ..evoked import Evoked - from .base import BaseRaw - if self.info['projs'] is None or len(self.info['projs']) == 0: - logger.info('No projector specified for this dataset. ' - 'Please consider the method self.add_proj.') - return self - - # Exit delayed mode if you apply proj - if isinstance(self, BaseEpochs) and self._do_delayed_proj: - logger.info('Leaving delayed SSP mode.') - self._do_delayed_proj = False - - if all(p['active'] for p in self.info['projs']): - logger.info('Projections have already been applied. ' - 'Setting proj attribute to True.') - return self - - _projector, info = setup_proj(deepcopy(self.info), add_eeg_ref=False, - activate=True) - # let's not raise a RuntimeError here, otherwise interactive plotting - if _projector is None: # won't be fun. - logger.info('The projections don\'t apply to these data.' - ' Doing nothing.') - return self - self._projector, self.info = _projector, info - if isinstance(self, (BaseRaw, Evoked)): - if self.preload: - self._data = np.dot(self._projector, self._data) - else: # BaseEpochs - if self.preload: - for ii, e in enumerate(self._data): - self._data[ii] = self._project_epoch(e) - else: - self.load_data() # will automatically apply - logger.info('SSP projectors applied...') - return self - - def del_proj(self, idx='all'): - """Remove SSP projection vector. - - .. note:: The projection vector can only be removed if it is inactive - (has not been applied to the data). - - Parameters - ---------- - idx : int | list of int | str - Index of the projector to remove. Can also be "all" (default) - to remove all projectors. - - Returns - ------- - self : instance of Raw | Epochs | Evoked - The instance. - """ - if isinstance(idx, str) and idx == 'all': - idx = list(range(len(self.info['projs']))) - idx = np.atleast_1d(np.array(idx, int)).ravel() - - for ii in idx: - proj = self.info['projs'][ii] - if (proj['active'] and - set(self.info['ch_names']) & - set(proj['data']['col_names'])): - msg = (f'Cannot remove projector that has already been ' - f'applied, unless you first remove all channels it ' - f'applies to. The problematic projector is: {proj}') - raise ValueError(msg) - - keep = np.ones(len(self.info['projs'])) - keep[idx] = False # works with negative indexing and does checks - with self.info._unlock(): - self.info['projs'] = [p for p, k in zip(self.info['projs'], keep) - if k] - return self - - @fill_doc - def plot_projs_topomap( - self, ch_type=None, *, sensors=True, show_names=False, contours=6, - outlines='head', sphere=None, image_interp=_INTERPOLATION_DEFAULT, - extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT, res=64, - size=1, cmap=None, vlim=(None, None), cnorm=None, colorbar=False, - cbar_fmt='%3.1f', units=None, axes=None, show=True): - """Plot SSP vector. - - Parameters - ---------- - %(ch_type_topomap_proj)s - %(sensors_topomap)s - %(show_names_topomap)s - - .. versionadded:: 1.2 - %(contours_topomap)s - %(outlines_topomap)s - %(sphere_topomap_auto)s - %(image_interp_topomap)s - %(extrapolate_topomap)s - - .. versionadded:: 0.20 - %(border_topomap)s - %(res_topomap)s - %(size_topomap)s - Only applies when plotting multiple topomaps at a time. - %(cmap_topomap)s - %(vlim_plot_topomap_proj)s - %(cnorm)s - - .. versionadded:: 1.2 - %(colorbar_topomap)s - %(cbar_fmt_topomap)s - - .. versionadded:: 1.2 - %(units_topomap)s - - .. versionadded:: 1.2 - %(axes_plot_projs_topomap)s - %(show)s - - Returns - ------- - fig : instance of Figure - Figure distributing one image per channel across sensor topography. - """ - _projs = [deepcopy(_proj) for _proj in self.info['projs']] - if _projs is None or len(_projs) == 0: - raise ValueError('No projectors in Info; nothing to plot.') - if ch_type is not None: - # make sure the requested channel type(s) exist - _validate_type(ch_type, (str, list, tuple), 'ch_type') - if isinstance(ch_type, str): - ch_type = [ch_type] - bad_ch_types = [_type not in self for _type in ch_type] - if any(bad_ch_types): - raise ValueError(f'ch_type {ch_type[bad_ch_types]} not ' - f'present in {self.__class__.__name__}.') - # remove projs from unrequested channel types. This is a bit - # convoluted because Projection objects don't store channel types, - # only channel names - available_ch_types = np.array(self.get_channel_types()) - for _proj in _projs[::-1]: - idx = np.isin(self.ch_names, _proj['data']['col_names']) - proj_ch_type = np.unique(available_ch_types[idx]) - err_msg = 'Projector contains multiple channel types' - assert len(proj_ch_type) == 1, err_msg - if proj_ch_type[0] != ch_type: - _projs.remove(_proj) - if len(_projs) == 0: - raise ValueError('Nothing to plot (no projectors for channel ' - f'type {ch_type}).') - # now we have non-empty _projs list with correct channel type(s) - from ..viz.topomap import plot_projs_topomap - fig = plot_projs_topomap( - _projs, self.info, sensors=sensors, show_names=show_names, - contours=contours, outlines=outlines, sphere=sphere, - image_interp=image_interp, extrapolate=extrapolate, - border=border, res=res, size=size, cmap=cmap, vlim=vlim, - cnorm=cnorm, colorbar=colorbar, cbar_fmt=cbar_fmt, - units=units, axes=axes, show=show) - return fig - - def _reconstruct_proj(self, mode='accurate', origin='auto'): - from ..forward import _map_meg_or_eeg_channels - if len(self.info['projs']) == 0: - return self - self.apply_proj() - for kind in ('meg', 'eeg'): - kwargs = dict(meg=False) - kwargs[kind] = True - picks = pick_types(self.info, **kwargs) - if len(picks) == 0: - continue - info_from = pick_info(self.info, picks) - info_to = info_from.copy() - with info_to._unlock(): - info_to['projs'] = [] - if kind == 'eeg' and _has_eeg_average_ref_proj(info_from): - info_to['projs'] = [ - make_eeg_average_ref_proj(info_to, verbose=False)] - mapping = _map_meg_or_eeg_channels( - info_from, info_to, mode=mode, origin=origin) - self.data[..., picks, :] = np.matmul( - mapping, self.data[..., picks, :]) - return self - - -def _proj_equal(a, b, check_active=True): - """Test if two projectors are equal.""" - equal = ((a['active'] == b['active'] or not check_active) and - a['kind'] == b['kind'] and - a['desc'] == b['desc'] and - a['data']['col_names'] == b['data']['col_names'] and - a['data']['row_names'] == b['data']['row_names'] and - a['data']['ncol'] == b['data']['ncol'] and - a['data']['nrow'] == b['data']['nrow'] and - np.all(a['data']['data'] == b['data']['data'])) - return equal - - -@verbose -def _read_proj(fid, node, *, ch_names_mapping=None, verbose=None): - ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping - projs = list() - - # Locate the projection data - nodes = dir_tree_find(node, FIFF.FIFFB_PROJ) - if len(nodes) == 0: - return projs - - # This might exist but we won't use it: - # global_nchan = None - # tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN) - # if tag is not None: - # global_nchan = int(tag.data) - - items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM) - for item in items: - # Find all desired tags in one item - - # This probably also exists but used to be written incorrectly - # sometimes - # tag = find_tag(fid, item, FIFF.FIFF_NCHAN) - # if tag is not None: - # nchan = int(tag.data) - # else: - # nchan = global_nchan - - tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION) - if tag is not None: - desc = tag.data - else: - tag = find_tag(fid, item, FIFF.FIFF_NAME) - if tag is not None: - desc = tag.data - else: - raise ValueError('Projection item description missing') - - tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND) - if tag is not None: - kind = int(tag.data) - else: - raise ValueError('Projection item kind missing') - - tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC) - if tag is not None: - nvec = int(tag.data) - else: - raise ValueError('Number of projection vectors not specified') - - tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST) - if tag is not None: - names = _safe_name_list(tag.data, 'read', 'names') - else: - raise ValueError('Projection item channel list missing') - - tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS) - if tag is not None: - data = tag.data - else: - raise ValueError('Projection item data missing') - - tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE) - if tag is not None: - active = bool(tag.data) - else: - active = False - - tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR) - if tag is not None: - explained_var = float(tag.data) - else: - explained_var = None - - # handle the case when data is transposed for some reason - if data.shape[0] == len(names) and data.shape[1] == nvec: - data = data.T - - if data.shape[1] != len(names): - raise ValueError('Number of channel names does not match the ' - 'size of data matrix') - - # just always use this, we used to have bugs with writing the - # number correctly... - nchan = len(names) - names[:] = _rename_list(names, ch_names_mapping) - # Use exactly the same fields in data as in a named matrix - one = Projection(kind=kind, active=active, desc=desc, - data=dict(nrow=nvec, ncol=nchan, row_names=None, - col_names=names, data=data), - explained_var=explained_var) - - projs.append(one) - - if len(projs) > 0: - logger.info(' Read a total of %d projection items:' % len(projs)) - for proj in projs: - misc = 'active' if proj['active'] else ' idle' - logger.info(f' {proj["desc"]} ' - f'({proj["data"]["nrow"]} x ' - f'{len(proj["data"]["col_names"])}) {misc}') - - return projs - - -############################################################################### -# Write - -def _write_proj(fid, projs, *, ch_names_mapping=None): - """Write a projection operator to a file. - - Parameters - ---------- - fid : file - The file descriptor of the open file. - projs : dict - The projection operator. - """ - if len(projs) == 0: - return - - ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping - # validation - _validate_type(projs, (list, tuple), 'projs') - for pi, proj in enumerate(projs): - _validate_type(proj, Projection, f'projs[{pi}]') - - start_block(fid, FIFF.FIFFB_PROJ) - - for proj in projs: - start_block(fid, FIFF.FIFFB_PROJ_ITEM) - write_int(fid, FIFF.FIFF_NCHAN, len(proj['data']['col_names'])) - names = _rename_list(proj['data']['col_names'], ch_names_mapping) - write_name_list_sanitized( - fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, names, 'col_names') - write_string(fid, FIFF.FIFF_NAME, proj['desc']) - write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind']) - if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD: - write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0) - - write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow']) - write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active']) - write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS, - proj['data']['data']) - if proj['explained_var'] is not None: - write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR, - proj['explained_var']) - end_block(fid, FIFF.FIFFB_PROJ_ITEM) - - end_block(fid, FIFF.FIFFB_PROJ) - - -############################################################################### -# Utils - -def _check_projs(projs, copy=True): - """Check that projs is a list of Projection.""" - if not isinstance(projs, (list, tuple)): - raise TypeError('projs must be a list or tuple, got %s' - % (type(projs),)) - for pi, p in enumerate(projs): - if not isinstance(p, Projection): - raise TypeError('All entries in projs list must be Projection ' - 'instances, but projs[%d] is type %s' - % (pi, type(p))) - return deepcopy(projs) if copy else projs - - -def make_projector(projs, ch_names, bads=(), include_active=True): - """Create an SSP operator from SSP projection vectors. - - Parameters - ---------- - projs : list - List of projection vectors. - ch_names : list of str - List of channels to include in the projection matrix. - bads : list of str - Some bad channels to exclude. If bad channels were marked - in the raw file when projs were calculated using mne-python, - they should not need to be included here as they will - have been automatically omitted from the projectors. - include_active : bool - Also include projectors that are already active. - - Returns - ------- - proj : array of shape [n_channels, n_channels] - The projection operator to apply to the data. - nproj : int - How many items in the projector. - U : array - The orthogonal basis of the projection vectors. - """ - return _make_projector(projs, ch_names, bads, include_active) - - -def _make_projector(projs, ch_names, bads=(), include_active=True, - inplace=False): - """Subselect projs based on ch_names and bads. - - Use inplace=True mode to modify ``projs`` inplace so that no - warning will be raised next time projectors are constructed with - the given inputs. If inplace=True, no meaningful data are returned. - """ - from scipy import linalg - nchan = len(ch_names) - if nchan == 0: - raise ValueError('No channel names specified') - - default_return = (np.eye(nchan, nchan), 0, np.empty((nchan, 0))) - - # Check trivial cases first - if projs is None: - return default_return - - nvec = 0 - nproj = 0 - for p in projs: - if not p['active'] or include_active: - nproj += 1 - nvec += p['data']['nrow'] - - if nproj == 0: - return default_return - - # Pick the appropriate entries - vecs = np.zeros((nchan, nvec)) - nvec = 0 - nonzero = 0 - bads = set(bads) - for k, p in enumerate(projs): - if not p['active'] or include_active: - if (len(p['data']['col_names']) != - len(np.unique(p['data']['col_names']))): - raise ValueError('Channel name list in projection item %d' - ' contains duplicate items' % k) - - # Get the two selection vectors to pick correct elements from - # the projection vectors omitting bad channels - sel = [] - vecsel = [] - p_set = set(p['data']['col_names']) # faster membership access - for c, name in enumerate(ch_names): - if name not in bads and name in p_set: - sel.append(c) - vecsel.append(p['data']['col_names'].index(name)) - - # If there is something to pick, pickit - nrow = p['data']['nrow'] - this_vecs = vecs[:, nvec:nvec + nrow] - if len(sel) > 0: - this_vecs[sel] = p['data']['data'][:, vecsel].T - - # Rescale for better detection of small singular values - for v in range(p['data']['nrow']): - psize = np.linalg.norm(this_vecs[:, v]) - if psize > 0: - orig_n = p['data']['data'].any(axis=0).sum() - # Average ref still works if channels are removed - # Use relative power to determine if we're in trouble. - # 10% loss is hopefully a reasonable threshold. - if psize < 0.9 and not inplace and \ - (p['kind'] != FIFF.FIFFV_PROJ_ITEM_EEG_AVREF or - len(vecsel) == 1): - warn( - f'Projection vector {repr(p["desc"])} has been ' - f'reduced to {100 * psize:0.2f}% of its ' - 'original magnitude by subselecting ' - f'{len(vecsel)}/{orig_n} of the original ' - 'channels. If the ignored channels were bad ' - 'during SSP computation, we recommend ' - 'recomputing proj (via compute_proj_raw ' - 'or related functions) with the bad channels ' - 'properly marked, because computing SSP with bad ' - 'channels present in the data but unmarked is ' - 'dangerous (it can bias the PCA used by SSP). ' - 'On the other hand, if you know that all channels ' - 'were good during SSP computation, you can safely ' - 'use info.normalize_proj() to suppress this ' - 'warning during projection.') - this_vecs[:, v] /= psize - nonzero += 1 - # If doing "inplace" mode, "fix" the projectors to only operate - # on this subset of channels. - if inplace: - p['data']['data'] = this_vecs[sel].T - p['data']['col_names'] = [p['data']['col_names'][ii] - for ii in vecsel] - p['data']['ncol'] = len(p['data']['col_names']) - nvec += p['data']['nrow'] - - # Check whether all of the vectors are exactly zero - if nonzero == 0 or inplace: - return default_return - - # Reorthogonalize the vectors - U, S, _ = linalg.svd(vecs[:, :nvec], full_matrices=False) - - # Throw away the linearly dependent guys - nproj = np.sum((S / S[0]) > 1e-2) - U = U[:, :nproj] - - # Here is the celebrated result - proj = np.eye(nchan, nchan) - np.dot(U, U.T) - if nproj >= nchan: # e.g., 3 channels and 3 projectors - raise RuntimeError('Application of %d projectors for %d channels ' - 'will yield no components.' % (nproj, nchan)) - - return proj, nproj, U - - -def _normalize_proj(info): - """Normalize proj after subselection to avoid warnings. - - This is really only useful for tests, and might not be needed - eventually if we change or improve our handling of projectors - with picks. - """ - # Here we do info.get b/c info can actually be a noise cov - _make_projector(info['projs'], info.get('ch_names', info.get('names')), - info['bads'], include_active=True, inplace=True) - - -@fill_doc -def make_projector_info(info, include_active=True): - """Make an SSP operator using the measurement info. - - Calls make_projector on good channels. - - Parameters - ---------- - %(info_not_none)s - include_active : bool - Also include projectors that are already active. - - Returns - ------- - proj : array of shape [n_channels, n_channels] - The projection operator to apply to the data. - nproj : int - How many items in the projector. - """ - proj, nproj, _ = make_projector(info['projs'], info['ch_names'], - info['bads'], include_active) - return proj, nproj - - -@verbose -def activate_proj(projs, copy=True, verbose=None): - """Set all projections to active. - - Useful before passing them to make_projector. - - Parameters - ---------- - projs : list - The projectors. - copy : bool - Modify projs in place or operate on a copy. - %(verbose)s - - Returns - ------- - projs : list - The projectors. - """ - if copy: - projs = deepcopy(projs) - - # Activate the projection items - for proj in projs: - proj['active'] = True - - logger.info('%d projection items activated' % len(projs)) - - return projs - - -@verbose -def deactivate_proj(projs, copy=True, verbose=None): - """Set all projections to inactive. - - Useful before saving raw data without projectors applied. - - Parameters - ---------- - projs : list - The projectors. - copy : bool - Modify projs in place or operate on a copy. - %(verbose)s - - Returns - ------- - projs : list - The projectors. - """ - if copy: - projs = deepcopy(projs) - - # Deactivate the projection items - for proj in projs: - proj['active'] = False - - logger.info('%d projection items deactivated' % len(projs)) - - return projs - - -# Keep in sync with doc below -_EEG_AVREF_PICK_DICT = {k: True for k in _ELECTRODE_CH_TYPES} - - -@verbose -def make_eeg_average_ref_proj(info, activate=True, *, ch_type='eeg', - verbose=None): - """Create an EEG average reference SSP projection vector. - - Parameters - ---------- - %(info_not_none)s - activate : bool - If True projections are activated. - ch_type : str - The channel type to use for reference projection. - Valid types are ``'eeg'``, ``'ecog'``, ``'seeg'`` and ``'dbs'``. - - .. versionadded:: 1.2 - %(verbose)s - - Returns - ------- - proj: instance of Projection - The SSP/PCA projector. - """ - if info.get('custom_ref_applied', False): - raise RuntimeError('A custom reference has been applied to the ' - 'data earlier. Please use the ' - 'mne.io.set_eeg_reference function to move from ' - 'one EEG reference to another.') - - _validate_type(ch_type, (list, tuple, str), 'ch_type') - singleton = False - if isinstance(ch_type, str): - ch_type = [ch_type] - singleton = True - for ci, this_ch_type in enumerate(ch_type): - _check_option('ch_type' + ('' if singleton else f'[{ci}]'), - this_ch_type, list(_EEG_AVREF_PICK_DICT)) - - ch_type_name = '/'.join(c.upper() for c in ch_type) - logger.info(f"Adding average {ch_type_name} reference projection.") - - ch_dict = {c: True for c in ch_type} - for c in ch_type: - one_picks = pick_types(info, exclude='bads', **{c: True}) - if len(one_picks) == 0: - raise ValueError(f'Cannot create {ch_type_name} average reference ' - f'projector (no {c.upper()} data found)') - del ch_type - ch_sel = pick_types(info, **ch_dict, exclude='bads') - ch_names = info['ch_names'] - ch_names = [ch_names[k] for k in ch_sel] - n_chs = len(ch_sel) - vec = np.ones((1, n_chs)) - vec /= np.sqrt(n_chs) - explained_var = None - proj_data = dict(col_names=ch_names, row_names=None, - data=vec, nrow=1, ncol=n_chs) - proj = Projection( - active=activate, data=proj_data, explained_var=explained_var, - desc=f'Average {ch_type_name} reference', - kind=FIFF.FIFFV_PROJ_ITEM_EEG_AVREF) - return proj - - -@verbose -def _has_eeg_average_ref_proj( - info, *, projs=None, check_active=False, ch_type=None, verbose=None): - """Determine if a list of projectors has an average EEG ref. - - Optionally, set check_active=True to additionally check if the CAR - has already been applied. - """ - from .meas_info import Info - _validate_type(info, Info, 'info') - projs = info.get('projs', []) if projs is None else projs - if ch_type is None: - pick_kwargs = _EEG_AVREF_PICK_DICT - else: - ch_type = [ch_type] if isinstance(ch_type, str) else ch_type - pick_kwargs = {ch_type: True for ch_type in ch_type} - ch_type = '/'.join(c.upper() for c in pick_kwargs) - want_names = [ - info['ch_names'][pick] for pick in pick_types( - info, exclude='bads', **pick_kwargs)] - if not want_names: - return False - found_names = list() - for proj in projs: - if (proj['kind'] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF or - re.match('^Average .* reference$', proj['desc'])): - if not check_active or proj['active']: - found_names.extend(proj['data']['col_names']) - # If some are missing we have a problem (keep order for the message, - # otherwise we could use set logic) - missing = [name for name in want_names if name not in found_names] - if missing: - if found_names: # found some but not all: warn - warn(f'Incomplete {ch_type} projector, ' - f'missing channel(s) {missing}') - return False - return True - - -def _needs_eeg_average_ref_proj(info): - """Determine if the EEG needs an averge EEG reference. - - This returns True if no custom reference has been applied and no average - reference projection is present in the list of projections. - """ - if info['custom_ref_applied']: - return False - if not _electrode_types(info): - return False - if _has_eeg_average_ref_proj(info): - return False - return True - - -@verbose -def setup_proj(info, add_eeg_ref=True, activate=True, *, eeg_ref_ch_type='eeg', - verbose=None): - """Set up projection for Raw and Epochs. - - Parameters - ---------- - %(info_not_none)s Warning: will be modified in-place. - add_eeg_ref : bool - If True, an EEG average reference will be added (unless one - already exists). - activate : bool - If True projections are activated. - eeg_ref_ch_type : str - The channel type to use for reference projection. - Valid types are 'eeg', 'ecog', 'seeg' and 'dbs'. - - .. versionadded:: 1.2 - %(verbose)s - - Returns - ------- - projector : array of shape [n_channels, n_channels] - The projection operator to apply to the data. - info : mne.Info - The modified measurement info. - """ - # Add EEG ref reference proj if necessary - if add_eeg_ref and _needs_eeg_average_ref_proj(info): - eeg_proj = make_eeg_average_ref_proj( - info, activate=activate, ch_type=eeg_ref_ch_type) - info['projs'].append(eeg_proj) - - # Create the projector - projector, nproj = make_projector_info(info) - if nproj == 0: - if verbose: - logger.info('The projection vectors do not apply to these ' - 'channels') - projector = None - else: - logger.info('Created an SSP operator (subspace dimension = %d)' - % nproj) - - # The projection items have been activated - if activate: - with info._unlock(): - info['projs'] = activate_proj(info['projs'], copy=False) - - return projector, info - - -def _uniquify_projs(projs, check_active=True, sort=True): - """Make unique projs.""" - final_projs = [] - for proj in projs: # flatten - if not any(_proj_equal(p, proj, check_active) for p in final_projs): - final_projs.append(proj) - - my_count = count(len(final_projs)) - - def sorter(x): - """Sort in a nice way.""" - digits = [s for s in x['desc'] if s.isdigit()] - if digits: - sort_idx = int(digits[-1]) - else: - sort_idx = next(my_count) - return (sort_idx, x['desc']) - - return sorted(final_projs, key=sorter) if sort else final_projs diff --git a/mne/io/reference.py b/mne/io/reference.py deleted file mode 100644 index f62c5637140..00000000000 --- a/mne/io/reference.py +++ /dev/null @@ -1,604 +0,0 @@ -# Authors: Marijn van Vliet -# Alexandre Gramfort -# Teon Brooks -# -# License: BSD-3-Clause - -import numpy as np - -from .constants import FIFF -from .meas_info import _check_ch_keys -from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj -from .proj import setup_proj -from .pick import (pick_types, pick_channels, pick_channels_forward, - _ELECTRODE_CH_TYPES) -from .base import BaseRaw -from ..evoked import Evoked -from ..epochs import BaseEpochs -from ..fixes import pinv -from ..utils import (logger, warn, verbose, _validate_type, _check_preload, - _check_option, fill_doc, _on_missing) -from ..defaults import DEFAULTS - - -def _copy_channel(inst, ch_name, new_ch_name): - """Add a copy of a channel specified by ch_name. - - Input data can be in the form of Raw, Epochs or Evoked. - - The instance object is modified inplace. - - Parameters - ---------- - inst : instance of Raw | Epochs | Evoked - Data containing the EEG channels - ch_name : str - Name of the channel to copy. - new_ch_name : str - Name given to the copy of the channel. - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - The data with a copy of a given channel. - """ - new_inst = inst.copy().pick_channels([ch_name]) - new_inst.rename_channels({ch_name: new_ch_name}) - inst.add_channels([new_inst], force_update_info=True) - return inst - - -def _check_before_reference(inst, ref_from, ref_to, ch_type): - """Prepare instance for referencing.""" - # Check to see that data is preloaded - _check_preload(inst, "Applying a reference") - - ch_type = _get_ch_type(inst, ch_type) - ch_dict = {**{type_: True for type_ in ch_type}, - 'meg': False, 'ref_meg': False} - eeg_idx = pick_types(inst.info, **ch_dict) - - if ref_to is None: - ref_to = [inst.ch_names[i] for i in eeg_idx] - extra = 'EEG channels found' - else: - extra = 'channels supplied' - if len(ref_to) == 0: - raise ValueError('No %s to apply the reference to' % (extra,)) - - # After referencing, existing SSPs might not be valid anymore. - projs_to_remove = [] - for i, proj in enumerate(inst.info['projs']): - # Remove any average reference projections - if proj['desc'] == 'Average EEG reference' or \ - proj['kind'] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF: - logger.info('Removing existing average EEG reference ' - 'projection.') - # Don't remove the projection right away, but do this at the end of - # this loop. - projs_to_remove.append(i) - - # Inactive SSPs may block re-referencing - elif (not proj['active'] and - len([ch for ch in (ref_from + ref_to) - if ch in proj['data']['col_names']]) > 0): - - raise RuntimeError( - 'Inactive signal space projection (SSP) operators are ' - 'present that operate on sensors involved in the desired ' - 'referencing scheme. These projectors need to be applied ' - 'using the apply_proj() method function before the desired ' - 'reference can be set.' - ) - - for i in projs_to_remove: - del inst.info['projs'][i] - - # Need to call setup_proj after changing the projs: - inst._projector, _ = \ - setup_proj(inst.info, add_eeg_ref=False, activate=False) - - # If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the - # info that a non-CAR has been applied. - ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True) - if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0: - with inst.info._unlock(): - inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON - - return ref_to - - -def _apply_reference(inst, ref_from, ref_to=None, forward=None, - ch_type='auto'): - """Apply a custom EEG referencing scheme.""" - ref_to = _check_before_reference(inst, ref_from, ref_to, ch_type) - - # Compute reference - if len(ref_from) > 0: - # this is guaranteed below, but we should avoid the crazy pick_channels - # behavior that [] gives all. Also use ordered=True just to make sure - # that all supplied channels actually exist. - assert len(ref_to) > 0 - ref_names = ref_from - ref_from = pick_channels(inst.ch_names, ref_from, ordered=True) - ref_to = pick_channels(inst.ch_names, ref_to, ordered=True) - - data = inst._data - ref_data = data[..., ref_from, :].mean(-2, keepdims=True) - data[..., ref_to, :] -= ref_data - ref_data = ref_data[..., 0, :] - - # REST - if forward is not None: - # use ch_sel and the given forward - forward = pick_channels_forward(forward, ref_names, ordered=True) - # 1-3. Compute a forward (G) and avg-ref'ed data (done above) - G = forward['sol']['data'] - assert G.shape[0] == len(ref_names) - # 4. Compute the forward (G) and average-reference it (Ga): - Ga = G - np.mean(G, axis=0, keepdims=True) - # 5. Compute the Ga_inv by SVD - Ga_inv = pinv(Ga, rtol=1e-6) - # 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv - Ra = G @ Ga_inv - # 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp) - Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True) - data[..., ref_to, :] += Vpa - else: - ref_data = None - - return inst, ref_data - - -@fill_doc -def add_reference_channels(inst, ref_channels, copy=True): - """Add reference channels to data that consists of all zeros. - - Adds reference channels to data that were not included during recording. - This is useful when you need to re-reference your data to different - channels. These added channels will consist of all zeros. - - Parameters - ---------- - inst : instance of Raw | Epochs | Evoked - Instance of Raw or Epochs with EEG channels and reference channel(s). - %(ref_channels)s - copy : bool - Specifies whether the data will be copied (True) or modified in-place - (False). Defaults to True. - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - Data with added EEG reference channels. - """ - # Check to see that data is preloaded - _check_preload(inst, 'add_reference_channels') - _validate_type(ref_channels, (list, tuple, str), 'ref_channels') - if isinstance(ref_channels, str): - ref_channels = [ref_channels] - for ch in ref_channels: - if ch in inst.info['ch_names']: - raise ValueError("Channel %s already specified in inst." % ch) - - # Once CAR is applied (active), don't allow adding channels - if _has_eeg_average_ref_proj(inst.info, check_active=True): - raise RuntimeError('Average reference already applied to data.') - - if copy: - inst = inst.copy() - - if isinstance(inst, (BaseRaw, Evoked)): - data = inst._data - refs = np.zeros((len(ref_channels), data.shape[1])) - data = np.vstack((data, refs)) - inst._data = data - elif isinstance(inst, BaseEpochs): - data = inst._data - x, y, z = data.shape - refs = np.zeros((x * len(ref_channels), z)) - data = np.vstack((data.reshape((x * y, z), order='F'), refs)) - data = data.reshape(x, y + len(ref_channels), z, order='F') - inst._data = data - else: - raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s." - % type(inst)) - nchan = len(inst.info['ch_names']) - - # only do this if we actually have digitisation points - if inst.info.get('dig', None) is not None: - # "zeroth" EEG electrode dig points is reference - ref_dig_loc = [dl for dl in inst.info['dig'] if ( - dl['kind'] == FIFF.FIFFV_POINT_EEG and - dl['ident'] == 0)] - if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels): - ref_dig_array = np.full(12, np.nan) - warn('The locations of multiple reference channels are ignored.') - else: # n_ref_channels == 1 and a single ref digitization exists - ref_dig_array = np.concatenate((ref_dig_loc[0]['r'], - ref_dig_loc[0]['r'], np.zeros(6))) - # Replace the (possibly new) Ref location for each channel - for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]): - inst.info['chs'][idx]['loc'][3:6] = ref_dig_loc[0]['r'] - else: - # Ideally we'd fall back on getting the location from a montage, but - # locations for non-present channels aren't stored, so location is - # unknown. Users can call set_montage() again if needed. - ref_dig_array = np.full(12, np.nan) - logger.info('Location for this channel is unknown; consider calling ' - 'set_montage() again if needed.') - - for ch in ref_channels: - chan_info = {'ch_name': ch, - 'coil_type': FIFF.FIFFV_COIL_EEG, - 'kind': FIFF.FIFFV_EEG_CH, - 'logno': nchan + 1, - 'scanno': nchan + 1, - 'cal': 1, - 'range': 1., - 'unit_mul': 0., - 'unit': FIFF.FIFF_UNIT_V, - 'coord_frame': FIFF.FIFFV_COORD_HEAD, - 'loc': ref_dig_array} - inst.info['chs'].append(chan_info) - inst.info._update_redundant() - range_ = np.arange(1, len(ref_channels) + 1) - if isinstance(inst, BaseRaw): - inst._cals = np.hstack((inst._cals, [1] * len(ref_channels))) - for pi, picks in enumerate(inst._read_picks): - inst._read_picks[pi] = np.concatenate( - [picks, np.max(picks) + range_]) - elif isinstance(inst, BaseEpochs): - picks = inst.picks - inst.picks = np.concatenate( - [picks, np.max(picks) + range_]) - inst.info._check_consistency() - set_eeg_reference(inst, ref_channels=ref_channels, copy=False, - verbose=False) - return inst - - -_ref_dict = { - FIFF.FIFFV_MNE_CUSTOM_REF_ON: 'on', - FIFF.FIFFV_MNE_CUSTOM_REF_OFF: 'off', - FIFF.FIFFV_MNE_CUSTOM_REF_CSD: 'CSD', -} - - -def _check_can_reref(inst): - _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance") - current_custom = inst.info['custom_ref_applied'] - if current_custom not in (FIFF.FIFFV_MNE_CUSTOM_REF_ON, - FIFF.FIFFV_MNE_CUSTOM_REF_OFF): - raise RuntimeError('Cannot set new reference on data with custom ' - 'reference type %r' % (_ref_dict[current_custom],)) - - -@verbose -def set_eeg_reference(inst, ref_channels='average', copy=True, - projection=False, ch_type='auto', forward=None, - *, joint=False, verbose=None): - """Specify which reference to use for EEG data. - - Use this function to explicitly specify the desired reference for EEG. - This can be either an existing electrode or a new virtual channel. - This function will re-reference the data according to the desired - reference. - - Note that it is also possible to re-reference the signal using a - Laplacian (LAP) "reference-free" transformation using the - :func:`.compute_current_source_density` function. - - Parameters - ---------- - inst : instance of Raw | Epochs | Evoked - Instance of Raw or Epochs with EEG channels and reference channel(s). - %(ref_channels_set_eeg_reference)s - copy : bool - Specifies whether the data will be copied (True) or modified in-place - (False). Defaults to True. - %(projection_set_eeg_reference)s - %(ch_type_set_eeg_reference)s - %(forward_set_eeg_reference)s - %(joint_set_eeg_reference)s - %(verbose)s - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - Data with EEG channels re-referenced. If ``ref_channels='average'`` and - ``projection=True`` a projection will be added instead of directly - re-referencing the data. - ref_data : array - Array of reference data subtracted from EEG channels. This will be - ``None`` if ``projection=True`` or ``ref_channels='REST'``. - %(set_eeg_reference_see_also_notes)s - """ - from ..forward import Forward - _check_can_reref(inst) - - ch_type = _get_ch_type(inst, ch_type) - - if projection: # average reference projector - if ref_channels != 'average': - raise ValueError('Setting projection=True is only supported for ' - 'ref_channels="average", got %r.' - % (ref_channels,)) - # We need verbose='error' here in case we add projs sequentially - if _has_eeg_average_ref_proj( - inst.info, ch_type=ch_type, verbose='error'): - warn('An average reference projection was already added. The data ' - 'has been left untouched.') - else: - # Creating an average reference may fail. In this case, make - # sure that the custom_ref_applied flag is left untouched. - custom_ref_applied = inst.info['custom_ref_applied'] - - try: - with inst.info._unlock(): - inst.info['custom_ref_applied'] = \ - FIFF.FIFFV_MNE_CUSTOM_REF_OFF - if joint: - inst.add_proj( - make_eeg_average_ref_proj( - inst.info, ch_type=ch_type, activate=False)) - else: - for this_ch_type in ch_type: - inst.add_proj( - make_eeg_average_ref_proj( - inst.info, ch_type=this_ch_type, - activate=False)) - except Exception: - with inst.info._unlock(): - inst.info['custom_ref_applied'] = custom_ref_applied - raise - # If the data has been preloaded, projections will no - # longer be automatically applied. - if inst.preload: - logger.info('Average reference projection was added, ' - 'but has not been applied yet. Use the ' - 'apply_proj method to apply it.') - return inst, None - del projection # not used anymore - - inst = inst.copy() if copy else inst - ch_dict = {**{type_: True for type_ in ch_type}, - 'meg': False, 'ref_meg': False} - ch_sel = [inst.ch_names[i] for i in pick_types(inst.info, **ch_dict)] - - if ref_channels == 'REST': - _validate_type(forward, Forward, 'forward when ref_channels="REST"') - else: - forward = None # signal to _apply_reference not to do REST - - if ref_channels in ('average', 'REST'): - logger.info(f'Applying {ref_channels} reference.') - ref_channels = ch_sel - - if ref_channels == []: - logger.info('EEG data marked as already having the desired reference.') - else: - logger.info( - 'Applying a custom ' - f"{tuple(DEFAULTS['titles'][type_] for type_ in ch_type)} " - 'reference.') - - return _apply_reference(inst, ref_channels, ch_sel, forward, - ch_type=ch_type) - - -def _get_ch_type(inst, ch_type): - _validate_type(ch_type, (str, list, tuple), 'ch_type') - valid_ch_types = ('auto',) + _ELECTRODE_CH_TYPES - if isinstance(ch_type, str): - _check_option('ch_type', ch_type, valid_ch_types) - if ch_type != 'auto': - ch_type = [ch_type] - elif isinstance(ch_type, (list, tuple)): - for type_ in ch_type: - _validate_type(type_, str, 'ch_type') - _check_option('ch_type', type_, valid_ch_types[1:]) - ch_type = list(ch_type) - - # if ch_type is 'auto', search through list to find first reasonable - # reference-able channel type. - if ch_type == 'auto': - for type_ in _ELECTRODE_CH_TYPES: - if type_ in inst: - ch_type = [type_] - logger.info('%s channel type selected for ' - 're-referencing' % DEFAULTS['titles'][type_]) - break - # if auto comes up empty, or the user specifies a bad ch_type. - else: - raise ValueError('No EEG, ECoG, sEEG or DBS channels found ' - 'to rereference.') - return ch_type - - -@verbose -def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None, - drop_refs=True, copy=True, on_bad="warn", - verbose=None): - """Re-reference selected channels using a bipolar referencing scheme. - - A bipolar reference takes the difference between two channels (the anode - minus the cathode) and adds it as a new virtual channel. The original - channels will be dropped by default. - - Multiple anodes and cathodes can be specified, in which case multiple - virtual channels will be created. The 1st cathode will be subtracted - from the 1st anode, the 2nd cathode from the 2nd anode, etc. - - By default, the virtual channels will be annotated with channel-info and - -location of the anodes and coil types will be set to EEG_BIPOLAR. - - Parameters - ---------- - inst : instance of Raw | Epochs | Evoked - Data containing the unreferenced channels. - anode : str | list of str - The name(s) of the channel(s) to use as anode in the bipolar reference. - cathode : str | list of str - The name(s) of the channel(s) to use as cathode in the bipolar - reference. - ch_name : str | list of str | None - The channel name(s) for the virtual channel(s) containing the resulting - signal. By default, bipolar channels are named after the anode and - cathode, but it is recommended to supply a more meaningful name. - ch_info : dict | list of dict | None - This parameter can be used to supply a dictionary (or a dictionary for - each bipolar channel) containing channel information to merge in, - overwriting the default values. Defaults to None. - drop_refs : bool - Whether to drop the anode/cathode channels from the instance. - copy : bool - Whether to operate on a copy of the data (True) or modify it in-place - (False). Defaults to True. - on_bad : str - If a bipolar channel is created from a bad anode or a bad cathode, mne - warns if on_bad="warns", raises ValueError if on_bad="raise", and does - nothing if on_bad="ignore". For "warn" and "ignore", the new bipolar - channel will be marked as bad. Defaults to on_bad="warns". - %(verbose)s - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - Data with the specified channels re-referenced. - - See Also - -------- - set_eeg_reference : Convenience function for creating an EEG reference. - - Notes - ----- - 1. If the anodes contain any EEG channels, this function removes - any pre-existing average reference projections. - - 2. During source localization, the EEG signal should have an average - reference. - - 3. The data must be preloaded. - - .. versionadded:: 0.9.0 - """ - from .meas_info import create_info - from ..io import RawArray - from ..epochs import EpochsArray - from ..evoked import EvokedArray - - _check_can_reref(inst) - if not isinstance(anode, list): - anode = [anode] - - if not isinstance(cathode, list): - cathode = [cathode] - - if len(anode) != len(cathode): - raise ValueError('Number of anodes (got %d) must equal the number ' - 'of cathodes (got %d).' % (len(anode), len(cathode))) - - if ch_name is None: - ch_name = [f'{a}-{c}' for (a, c) in zip(anode, cathode)] - elif not isinstance(ch_name, list): - ch_name = [ch_name] - if len(ch_name) != len(anode): - raise ValueError('Number of channel names must equal the number of ' - 'anodes/cathodes (got %d).' % len(ch_name)) - - # Check for duplicate channel names (it is allowed to give the name of the - # anode or cathode channel, as they will be replaced). - for ch, a, c in zip(ch_name, anode, cathode): - if ch not in [a, c] and ch in inst.ch_names: - raise ValueError('There is already a channel named "%s", please ' - 'specify a different name for the bipolar ' - 'channel using the ch_name parameter.' % ch) - - if ch_info is None: - ch_info = [{} for _ in anode] - elif not isinstance(ch_info, list): - ch_info = [ch_info] - if len(ch_info) != len(anode): - raise ValueError('Number of channel info dictionaries must equal the ' - 'number of anodes/cathodes.') - - if copy: - inst = inst.copy() - - anode = _check_before_reference(inst, ref_from=cathode, - ref_to=anode, ch_type='auto') - - # Create bipolar reference channels by multiplying the data - # (channels x time) with a matrix (n_virtual_channels x channels) - # and add them to the instance. - multiplier = np.zeros((len(anode), len(inst.ch_names))) - for idx, (a, c) in enumerate(zip(anode, cathode)): - multiplier[idx, inst.ch_names.index(a)] = 1 - multiplier[idx, inst.ch_names.index(c)] = -1 - - ref_info = create_info(ch_names=ch_name, sfreq=inst.info['sfreq'], - ch_types=inst.get_channel_types(picks=anode)) - - # Update "chs" in Reference-Info. - for ch_idx, (an, info) in enumerate(zip(anode, ch_info)): - _check_ch_keys(info, ch_idx, name='ch_info', check_min=False) - an_idx = inst.ch_names.index(an) - # Copy everything from anode (except ch_name). - an_chs = {k: v for k, v in inst.info['chs'][an_idx].items() - if k != 'ch_name'} - ref_info['chs'][ch_idx].update(an_chs) - # Set coil-type to bipolar. - ref_info['chs'][ch_idx]['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR - # Update with info from ch_info-parameter. - ref_info['chs'][ch_idx].update(info) - - # Set other info-keys from original instance. - pick_info = {k: v for k, v in inst.info.items() if k not in - ['chs', 'ch_names', 'bads', 'nchan', 'sfreq']} - - with ref_info._unlock(): - ref_info.update(pick_info) - - # Rereferencing of data. - ref_data = multiplier @ inst._data - - if isinstance(inst, BaseRaw): - ref_inst = RawArray(ref_data, ref_info, first_samp=inst.first_samp, - copy=None) - elif isinstance(inst, BaseEpochs): - ref_inst = EpochsArray(ref_data, ref_info, events=inst.events, - tmin=inst.tmin, event_id=inst.event_id, - metadata=inst.metadata) - else: - ref_inst = EvokedArray(ref_data, ref_info, tmin=inst.tmin, - comment=inst.comment, nave=inst.nave, - kind='average') - - # Add referenced instance to original instance. - inst.add_channels([ref_inst], force_update_info=True) - - # Handle bad channels. - bad_bipolar_chs = [] - for ch_idx, (a, c) in enumerate(zip(anode, cathode)): - if a in inst.info['bads'] or c in inst.info['bads']: - bad_bipolar_chs.append(ch_name[ch_idx]) - - # Add warnings if bad channels are present. - if bad_bipolar_chs: - msg = f'Bipolar channels are based on bad channels: {bad_bipolar_chs}.' - _on_missing(on_bad, msg) - inst.info['bads'] += bad_bipolar_chs - - added_channels = ', '.join([name for name in ch_name]) - logger.info(f'Added the following bipolar channels:\n{added_channels}') - - for attr_name in ['picks', '_projector']: - setattr(inst, attr_name, None) - - # Drop remaining channels. - if drop_refs: - drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names)) - inst.drop_channels(drop_channels) - - return inst diff --git a/mne/io/snirf/__init__.py b/mne/io/snirf/__init__.py index ea3f11dc0a4..a50ff500a41 100644 --- a/mne/io/snirf/__init__.py +++ b/mne/io/snirf/__init__.py @@ -1,7 +1,7 @@ """SNIRF module for conversion to FIF.""" -# Author: Robert Luke -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from ._snirf import read_raw_snirf diff --git a/mne/io/snirf/_snirf.py b/mne/io/snirf/_snirf.py index 93d024a4a75..55f3c54605c 100644 --- a/mne/io/snirf/_snirf.py +++ b/mne/io/snirf/_snirf.py @@ -1,26 +1,36 @@ -# Authors: Robert Luke -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. +import datetime import re + import numpy as np -import datetime -from ..base import BaseRaw -from ..meas_info import create_info, _format_dig_points -from ..utils import _mult_cal_one +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _format_dig_points, create_info +from ..._fiff.utils import _mult_cal_one +from ..._freesurfer import get_mni_fiducials from ...annotations import Annotations -from ...utils import (logger, verbose, fill_doc, warn, _check_fname, - _import_h5py) -from ..constants import FIFF -from .._digitization import _make_dig_points from ...transforms import _frame_to_str, apply_trans +from ...utils import ( + _check_fname, + _import_h5py, + _validate_type, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw from ..nirx.nirx import _convert_fnirs_to_head -from ..._freesurfer import get_mni_fiducials @fill_doc -def read_raw_snirf(fname, optode_frame="unknown", preload=False, verbose=None): +def read_raw_snirf( + fname, optode_frame="unknown", *, sfreq=None, preload=False, verbose=None +) -> "RawSNIRF": """Reader for a continuous wave SNIRF data. .. note:: This reader supports the .snirf file type only, @@ -39,6 +49,11 @@ def read_raw_snirf(fname, optode_frame="unknown", preload=False, verbose=None): in which case the positions are not modified. If a known coordinate frame is provided (head, meg, mri), then the positions are transformed in to the Neuromag head coordinate frame (head). + sfreq : float | None + The nominal sampling frequency at which the data were acquired. If ``None``, + will be estimated from the time data in the file. + + .. versionadded:: 1.10 %(preload)s %(verbose)s @@ -52,11 +67,11 @@ def read_raw_snirf(fname, optode_frame="unknown", preload=False, verbose=None): -------- mne.io.Raw : Documentation of attributes and methods of RawSNIRF. """ - return RawSNIRF(fname, optode_frame, preload, verbose) + return RawSNIRF(fname, optode_frame, sfreq=sfreq, preload=preload, verbose=verbose) def _open(fname): - return open(fname, 'r', encoding='latin-1') + return open(fname, encoding="latin-1") @fill_doc @@ -72,6 +87,11 @@ class RawSNIRF(BaseRaw): in which case the positions are not modified. If a known coordinate frame is provided (head, meg, mri), then the positions are transformed in to the Neuromag head coordinate frame (head). + sfreq : float | None + The nominal sampling frequency at which the data were acquired. If ``None``, + will be estimated from the time data in the file. + + .. versionadded:: 1.10 %(preload)s %(verbose)s @@ -81,194 +101,232 @@ class RawSNIRF(BaseRaw): """ @verbose - def __init__(self, fname, optode_frame="unknown", - preload=False, verbose=None): + def __init__( + self, fname, optode_frame="unknown", *, sfreq=None, preload=False, verbose=None + ): # Must be here due to circular import error from ...preprocessing.nirs import _validate_nirs_info + h5py = _import_h5py() fname = str(_check_fname(fname, "read", True, "fname")) - logger.info('Loading %s' % fname) - - with h5py.File(fname, 'r') as dat: - - if 'data2' in dat['nirs']: - warn("File contains multiple recordings. " - "MNE does not support this feature. " - "Only the first dataset will be processed.") + logger.info(f"Loading {fname}") + + with h5py.File(fname, "r") as dat: + if "data2" in dat["nirs"]: + warn( + "File contains multiple recordings. " + "MNE does not support this feature. " + "Only the first dataset will be processed." + ) manufacturer = _get_metadata_str(dat, "ManufacturerName") if (optode_frame == "unknown") & (manufacturer == "Gowerlabs"): optode_frame = "head" - snirf_data_type = np.array(dat.get('nirs/data1/measurementList1' - '/dataType')).item() + snirf_data_type = np.array( + dat.get("nirs/data1/measurementList1/dataType") + ).item() if snirf_data_type not in [1, 99999]: # 1 = Continuous Wave # 99999 = Processed - raise RuntimeError('MNE only supports reading continuous' - ' wave amplitude and processed haemoglobin' - ' SNIRF files. Expected type' - ' code 1 or 99999 but received type ' - f'code {snirf_data_type}') + raise RuntimeError( + "MNE only supports reading continuous" + " wave amplitude and processed haemoglobin" + " SNIRF files. Expected type" + " code 1 or 99999 but received type " + f"code {snirf_data_type}" + ) - last_samps = dat.get('/nirs/data1/dataTimeSeries').shape[0] - 1 + last_samps = dat.get("/nirs/data1/dataTimeSeries").shape[0] - 1 - sampling_rate = _extract_sampling_rate(dat) + sampling_rate = _extract_sampling_rate(dat, sfreq) if sampling_rate == 0: warn("Unable to extract sample rate from SNIRF file.") # Extract wavelengths - fnirs_wavelengths = np.array(dat.get('nirs/probe/wavelengths')) + fnirs_wavelengths = np.array(dat.get("nirs/probe/wavelengths")) fnirs_wavelengths = [int(w) for w in fnirs_wavelengths] if len(fnirs_wavelengths) != 2: - raise RuntimeError(f'The data contains ' - f'{len(fnirs_wavelengths)}' - f' wavelengths: {fnirs_wavelengths}. ' - f'MNE only supports reading continuous' - ' wave amplitude SNIRF files ' - 'with two wavelengths.') + raise RuntimeError( + f"The data contains " + f"{len(fnirs_wavelengths)}" + f" wavelengths: {fnirs_wavelengths}. " + f"MNE only supports reading continuous" + " wave amplitude SNIRF files " + "with two wavelengths." + ) # Extract channels def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): - return [atoi(c) for c in re.split(r'(\d+)', text)] + return [atoi(c) for c in re.split(r"(\d+)", text)] - channels = np.array([name for name in dat['nirs']['data1'].keys()]) - channels_idx = np.array(['measurementList' in n for n in channels]) + channels = np.array([name for name in dat["nirs"]["data1"].keys()]) + channels_idx = np.array(["measurementList" in n for n in channels]) channels = channels[channels_idx] channels = sorted(channels, key=natural_keys) # Source and detector labels are optional fields. # Use S1, S2, S3, etc if not specified. - if 'sourceLabels_disabled' in dat['nirs/probe']: + if "sourceLabels_disabled" in dat["nirs/probe"]: # This is disabled as # MNE-Python does not currently support custom source names. # Instead, sources must be integer values. - sources = np.array(dat.get('nirs/probe/sourceLabels')) - sources = [s.decode('UTF-8') for s in sources] + sources = np.array(dat.get("nirs/probe/sourceLabels")) + sources = [s.decode("UTF-8") for s in sources] else: - sources = np.unique([_correct_shape(np.array(dat.get( - 'nirs/data1/' + c + '/sourceIndex')))[0] - for c in channels]) - sources = [f"S{int(s)}" for s in sources] + sources = np.unique( + [ + _correct_shape( + np.array(dat.get("nirs/data1/" + c + "/sourceIndex")) + )[0] + for c in channels + ] + ) + sources = {int(s): f"S{int(s)}" for s in sources} - if 'detectorLabels_disabled' in dat['nirs/probe']: + if "detectorLabels_disabled" in dat["nirs/probe"]: # This is disabled as # MNE-Python does not currently support custom detector names. # Instead, detector must be integer values. - detectors = np.array(dat.get('nirs/probe/detectorLabels')) - detectors = [d.decode('UTF-8') for d in detectors] + detectors = np.array(dat.get("nirs/probe/detectorLabels")) + detectors = [d.decode("UTF-8") for d in detectors] else: - detectors = np.unique([_correct_shape(np.array(dat.get( - 'nirs/data1/' + c + '/detectorIndex')))[0] - for c in channels]) - detectors = [f"D{int(d)}" for d in detectors] + detectors = np.unique( + [ + _correct_shape( + np.array(dat.get("nirs/data1/" + c + "/detectorIndex")) + )[0] + for c in channels + ] + ) + detectors = {int(d): f"D{int(d)}" for d in detectors} # Extract source and detector locations # 3D positions are optional in SNIRF, # but highly recommended in MNE. - if ('detectorPos3D' in dat['nirs/probe']) &\ - ('sourcePos3D' in dat['nirs/probe']): + if ("detectorPos3D" in dat["nirs/probe"]) & ( + "sourcePos3D" in dat["nirs/probe"] + ): # If 3D positions are available they are used even if 2D exists - detPos3D = np.array(dat.get('nirs/probe/detectorPos3D')) - srcPos3D = np.array(dat.get('nirs/probe/sourcePos3D')) - elif ('detectorPos2D' in dat['nirs/probe']) &\ - ('sourcePos2D' in dat['nirs/probe']): - warn('The data only contains 2D location information for the ' - 'optode positions. ' - 'It is highly recommended that data is used ' - 'which contains 3D location information for the ' - 'optode positions. With only 2D locations it can not be ' - 'guaranteed that MNE functions will behave correctly ' - 'and produce accurate results. If it is not possible to ' - 'include 3D positions in your data, please consider ' - 'using the set_montage() function.') - - detPos2D = np.array(dat.get('nirs/probe/detectorPos2D')) - srcPos2D = np.array(dat.get('nirs/probe/sourcePos2D')) + detPos3D = np.array(dat.get("nirs/probe/detectorPos3D")) + srcPos3D = np.array(dat.get("nirs/probe/sourcePos3D")) + elif ("detectorPos2D" in dat["nirs/probe"]) & ( + "sourcePos2D" in dat["nirs/probe"] + ): + warn( + "The data only contains 2D location information for the " + "optode positions. " + "It is highly recommended that data is used " + "which contains 3D location information for the " + "optode positions. With only 2D locations it can not be " + "guaranteed that MNE functions will behave correctly " + "and produce accurate results. If it is not possible to " + "include 3D positions in your data, please consider " + "using the set_montage() function." + ) + + detPos2D = np.array(dat.get("nirs/probe/detectorPos2D")) + srcPos2D = np.array(dat.get("nirs/probe/sourcePos2D")) # Set the third dimension to zero. See gh#9308 - detPos3D = np.append(detPos2D, - np.zeros((detPos2D.shape[0], 1)), axis=1) - srcPos3D = np.append(srcPos2D, - np.zeros((srcPos2D.shape[0], 1)), axis=1) + detPos3D = np.append(detPos2D, np.zeros((detPos2D.shape[0], 1)), axis=1) + srcPos3D = np.append(srcPos2D, np.zeros((srcPos2D.shape[0], 1)), axis=1) else: - raise RuntimeError('No optode location information is ' - 'provided. MNE requires at least 2D ' - 'location information') - - assert len(sources) == srcPos3D.shape[0] - assert len(detectors) == detPos3D.shape[0] + raise RuntimeError( + "No optode location information is " + "provided. MNE requires at least 2D " + "location information" + ) chnames = [] ch_types = [] for chan in channels: - src_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + - chan + '/sourceIndex')))[0]) - det_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + - chan + '/detectorIndex')))[0]) + src_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/sourceIndex")) + )[0] + ) + det_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/detectorIndex")) + )[0] + ) if snirf_data_type == 1: - wve_idx = int(_correct_shape(np.array( - dat.get('nirs/data1/' + chan + - '/wavelengthIndex')))[0]) - ch_name = sources[src_idx - 1] + '_' +\ - detectors[det_idx - 1] + ' ' +\ - str(fnirs_wavelengths[wve_idx - 1]) + wve_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/wavelengthIndex")) + )[0] + ) + ch_name = ( + sources[src_idx] + + "_" + + detectors[det_idx] + + " " + + str(fnirs_wavelengths[wve_idx - 1]) + ) chnames.append(ch_name) - ch_types.append('fnirs_cw_amplitude') + ch_types.append("fnirs_cw_amplitude") elif snirf_data_type == 99999: dt_id = _correct_shape( - np.array(dat.get('nirs/data1/' + chan + - '/dataTypeLabel')))[0].decode('UTF-8') + np.array(dat.get("nirs/data1/" + chan + "/dataTypeLabel")) + )[0].decode("UTF-8") # Convert between SNIRF processed names and MNE type names dt_id = dt_id.lower().replace("dod", "fnirs_od") - ch_name = sources[src_idx - 1] + '_' + \ - detectors[det_idx - 1] + ch_name = sources[src_idx] + "_" + detectors[det_idx] if dt_id == "fnirs_od": - wve_idx = int(_correct_shape(np.array( - dat.get('nirs/data1/' + chan + - '/wavelengthIndex')))[0]) - suffix = ' ' + str(fnirs_wavelengths[wve_idx - 1]) + wve_idx = int( + _correct_shape( + np.array( + dat.get("nirs/data1/" + chan + "/wavelengthIndex") + ) + )[0] + ) + suffix = " " + str(fnirs_wavelengths[wve_idx - 1]) else: - suffix = ' ' + dt_id.lower() + suffix = " " + dt_id.lower() ch_name = ch_name + suffix chnames.append(ch_name) ch_types.append(dt_id) # Create mne structure - info = create_info(chnames, - sampling_rate, - ch_types=ch_types) + info = create_info(chnames, sampling_rate, ch_types=ch_types) subject_info = {} - names = np.array(dat.get('nirs/metaDataTags/SubjectID')) - subject_info['first_name'] = \ - _correct_shape(names)[0].decode('UTF-8') + names = np.array(dat.get("nirs/metaDataTags/SubjectID")) + names = _correct_shape(names)[0].decode("UTF-8") + subject_info["his_id"] = names # Read non standard (but allowed) custom metadata tags - if 'lastName' in dat.get('nirs/metaDataTags/'): - ln = dat.get('/nirs/metaDataTags/lastName')[0].decode('UTF-8') - subject_info['last_name'] = ln - if 'middleName' in dat.get('nirs/metaDataTags/'): - m = dat.get('/nirs/metaDataTags/middleName')[0].decode('UTF-8') - subject_info['middle_name'] = m - if 'sex' in dat.get('nirs/metaDataTags/'): - s = dat.get('/nirs/metaDataTags/sex')[0].decode('UTF-8') - if s in {'M', 'Male', '1', 'm'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE - elif s in {'F', 'Female', '2', 'f'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE - elif s in {'0', 'u'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + if "lastName" in dat.get("nirs/metaDataTags/"): + ln = dat.get("/nirs/metaDataTags/lastName")[0].decode("UTF-8") + subject_info["last_name"] = ln + if "middleName" in dat.get("nirs/metaDataTags/"): + m = dat.get("/nirs/metaDataTags/middleName")[0].decode("UTF-8") + subject_info["middle_name"] = m + if "firstName" in dat.get("nirs/metaDataTags/"): + fn = dat.get("/nirs/metaDataTags/firstName")[0].decode("UTF-8") + subject_info["first_name"] = fn + else: + # MNE < 1.7 used to not write the firstName tag, so pull it from names + subject_info["first_name"] = names.split("_")[0] + if "sex" in dat.get("nirs/metaDataTags/"): + s = dat.get("/nirs/metaDataTags/sex")[0].decode("UTF-8") + if s in {"M", "Male", "1", "m"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_MALE + elif s in {"F", "Female", "2", "f"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_FEMALE + elif s in {"0", "u"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN # End non standard name reading # Update info info.update(subject_info=subject_info) @@ -283,140 +341,168 @@ def natural_keys(text): # These are all in MNI or MEG coordinates, so let's transform # them to the Neuromag head coordinate frame srcPos3D, detPos3D, _, head_t = _convert_fnirs_to_head( - 'fsaverage', optode_frame, 'head', srcPos3D, detPos3D, []) + "fsaverage", optode_frame, "head", srcPos3D, detPos3D, [] + ) else: head_t = np.eye(4) if optode_frame in ["head", "mri", "meg"]: # Then the transformation to head was performed above coord_frame = FIFF.FIFFV_COORD_HEAD - elif 'MNE_coordFrame' in dat.get('nirs/metaDataTags/'): - coord_frame = int(dat.get('/nirs/metaDataTags/MNE_coordFrame') - [0]) + elif "MNE_coordFrame" in dat.get("nirs/metaDataTags/"): + coord_frame = int(dat.get("/nirs/metaDataTags/MNE_coordFrame")[0]) else: coord_frame = FIFF.FIFFV_COORD_UNKNOWN for idx, chan in enumerate(channels): - src_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + - chan + '/sourceIndex')))[0]) - det_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' + - chan + '/detectorIndex')))[0]) + src_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/sourceIndex")) + )[0] + ) + det_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/detectorIndex")) + )[0] + ) - info['chs'][idx]['loc'][3:6] = srcPos3D[src_idx - 1, :] - info['chs'][idx]['loc'][6:9] = detPos3D[det_idx - 1, :] + info["chs"][idx]["loc"][3:6] = srcPos3D[src_idx - 1, :] + info["chs"][idx]["loc"][6:9] = detPos3D[det_idx - 1, :] # Store channel as mid point - midpoint = (info['chs'][idx]['loc'][3:6] + - info['chs'][idx]['loc'][6:9]) / 2 - info['chs'][idx]['loc'][0:3] = midpoint - info['chs'][idx]['coord_frame'] = coord_frame - - if (snirf_data_type in [1]) or \ - ((snirf_data_type == 99999) and - (ch_types[idx] == "fnirs_od")): - wve_idx = int(_correct_shape(np.array(dat.get( - 'nirs/data1/' + chan + '/wavelengthIndex')))[0]) - info['chs'][idx]['loc'][9] = fnirs_wavelengths[wve_idx - 1] - - if 'landmarkPos3D' in dat.get('nirs/probe/'): - diglocs = np.array(dat.get('/nirs/probe/landmarkPos3D')) + midpoint = ( + info["chs"][idx]["loc"][3:6] + info["chs"][idx]["loc"][6:9] + ) / 2 + info["chs"][idx]["loc"][0:3] = midpoint + info["chs"][idx]["coord_frame"] = coord_frame + + if (snirf_data_type in [1]) or ( + (snirf_data_type == 99999) and (ch_types[idx] == "fnirs_od") + ): + wve_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/wavelengthIndex")) + )[0] + ) + info["chs"][idx]["loc"][9] = fnirs_wavelengths[wve_idx - 1] + + if "landmarkPos3D" in dat.get("nirs/probe/"): + diglocs = np.array(dat.get("/nirs/probe/landmarkPos3D")) diglocs /= length_scaling - digname = np.array(dat.get('/nirs/probe/landmarkLabels')) + digname = np.array(dat.get("/nirs/probe/landmarkLabels")) nasion, lpa, rpa, hpi = None, None, None, None extra_ps = dict() for idx, dign in enumerate(digname): dign = dign.lower() - if dign in [b'lpa', b'al']: + if dign in [b"lpa", b"al"]: lpa = diglocs[idx, :3] - elif dign in [b'nasion']: + elif dign in [b"nasion"]: nasion = diglocs[idx, :3] - elif dign in [b'rpa', b'ar']: + elif dign in [b"rpa", b"ar"]: rpa = diglocs[idx, :3] else: - extra_ps[f'EEG{len(extra_ps) + 1:03d}'] = \ - diglocs[idx, :3] + extra_ps[f"EEG{len(extra_ps) + 1:03d}"] = diglocs[idx, :3] add_missing_fiducials = ( - coord_frame == FIFF.FIFFV_COORD_HEAD and - lpa is None and rpa is None and nasion is None + coord_frame == FIFF.FIFFV_COORD_HEAD + and lpa is None + and rpa is None + and nasion is None ) dig = _make_dig_points( - nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, + nasion=nasion, + lpa=lpa, + rpa=rpa, + hpi=hpi, dig_ch_pos=extra_ps, coord_frame=_frame_to_str[coord_frame], - add_missing_fiducials=add_missing_fiducials) + add_missing_fiducials=add_missing_fiducials, + ) else: - ch_locs = [info['chs'][idx]['loc'][0:3] - for idx in range(len(channels))] + ch_locs = [info["chs"][idx]["loc"][0:3] for idx in range(len(channels))] # Set up digitization - dig = get_mni_fiducials('fsaverage', verbose=False) + dig = get_mni_fiducials("fsaverage", verbose=False) for fid in dig: - fid['r'] = apply_trans(head_t, fid['r']) - fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD + fid["r"] = apply_trans(head_t, fid["r"]) + fid["coord_frame"] = FIFF.FIFFV_COORD_HEAD for ii, ch_loc in enumerate(ch_locs, 1): - dig.append(dict( - kind=FIFF.FIFFV_POINT_EEG, # misnomer prob okay - r=ch_loc, - ident=ii, - coord_frame=FIFF.FIFFV_COORD_HEAD, - )) + dig.append( + dict( + kind=FIFF.FIFFV_POINT_EEG, # misnomer prob okay + r=ch_loc, + ident=ii, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) dig = _format_dig_points(dig) del head_t with info._unlock(): - info['dig'] = dig - - str_date = _correct_shape(np.array((dat.get( - '/nirs/metaDataTags/MeasurementDate'))))[0].decode('UTF-8') - str_time = _correct_shape(np.array((dat.get( - '/nirs/metaDataTags/MeasurementTime'))))[0].decode('UTF-8') + info["dig"] = dig + + str_date = _correct_shape( + np.array(dat.get("/nirs/metaDataTags/MeasurementDate")) + )[0].decode("UTF-8") + str_time = _correct_shape( + np.array(dat.get("/nirs/metaDataTags/MeasurementTime")) + )[0].decode("UTF-8") str_datetime = str_date + str_time # Several formats have been observed so we try each in turn - for dt_code in ['%Y-%m-%d%H:%M:%SZ', - '%Y-%m-%d%H:%M:%S']: + for dt_code in [ + "%Y-%m-%d%H:%M:%SZ", + "%Y-%m-%d%H:%M:%S", + "%Y-%m-%d%H:%M:%S.%f", + "%Y-%m-%d%H:%M:%S.%f%z", + ]: try: - meas_date = datetime.datetime.strptime( - str_datetime, dt_code) + meas_date = datetime.datetime.strptime(str_datetime, dt_code) except ValueError: pass else: break else: - warn("Extraction of measurement date from SNIRF file failed. " - "The date is being set to January 1st, 2000, " - f"instead of {str_datetime}") + warn( + "Extraction of measurement date from SNIRF file failed. " + "The date is being set to January 1st, 2000, " + f"instead of {str_datetime}" + ) meas_date = datetime.datetime(2000, 1, 1, 0, 0, 0) meas_date = meas_date.replace(tzinfo=datetime.timezone.utc) with info._unlock(): - info['meas_date'] = meas_date + info["meas_date"] = meas_date - if 'DateOfBirth' in dat.get('nirs/metaDataTags/'): - str_birth = np.array((dat.get('/nirs/metaDataTags/' - 'DateOfBirth')))[0].decode() - birth_matched = re.fullmatch(r'(\d+)-(\d+)-(\d+)', str_birth) + if "DateOfBirth" in dat.get("nirs/metaDataTags/"): + str_birth = ( + np.array(dat.get("/nirs/metaDataTags/DateOfBirth")).item().decode() + ) + birth_matched = re.fullmatch(r"(\d+)-(\d+)-(\d+)", str_birth) if birth_matched is not None: - birthday = (int(birth_matched.groups()[0]), - int(birth_matched.groups()[1]), - int(birth_matched.groups()[2])) + birthday = datetime.date( + int(birth_matched.groups()[0]), + int(birth_matched.groups()[1]), + int(birth_matched.groups()[2]), + ) with info._unlock(): - info["subject_info"]['birthday'] = birthday + info["subject_info"]["birthday"] = birthday - super(RawSNIRF, self).__init__(info, preload, filenames=[fname], - last_samps=[last_samps], - verbose=verbose) + super().__init__( + info, + preload, + filenames=[fname], + last_samps=[last_samps], + verbose=verbose, + ) # Extract annotations # As described at https://github.com/fNIRS/snirf/ # blob/master/snirf_specification.md#nirsistimjdata annot = Annotations([], [], []) - for key in dat['nirs']: - if 'stim' in key: - data = np.atleast_2d(np.array( - dat.get('/nirs/' + key + '/data'))) - if data.size > 0: - desc = _correct_shape(np.array(dat.get( - '/nirs/' + key + '/name')))[0] - annot.append(data[:, 0], - data[:, 1], - desc.decode('UTF-8')) + for key in dat["nirs"]: + if "stim" in key: + data = np.atleast_2d(np.array(dat.get("/nirs/" + key + "/data"))) + if data.shape[1] >= 3: + desc = _correct_shape( + np.array(dat.get("/nirs/" + key + "/name")) + )[0] + annot.append(data[:, 0], data[:, 1], desc.decode("UTF-8")) self.set_annotations(annot, emit_warning=False) # Validate that the fNIRS info is correctly formatted @@ -426,8 +512,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file.""" import h5py - with h5py.File(self._filenames[0], 'r') as dat: - one = dat['/nirs/data1/dataTimeSeries'][start:stop].T + with h5py.File(self.filenames[0], "r") as dat: + one = dat["/nirs/data1/dataTimeSeries"][start:stop].T _mult_cal_one(data, one, idx, cals, mult) @@ -441,54 +527,78 @@ def _correct_shape(arr): def _get_timeunit_scaling(time_unit): """MNE expects time in seconds, return required scaling.""" - scalings = {'ms': 1000, 's': 1, 'unknown': 1} + scalings = {"ms": 1000, "s": 1, "unknown": 1} if time_unit in scalings: return scalings[time_unit] else: - raise RuntimeError(f'The time unit {time_unit} is not supported by ' - 'MNE. Please report this error as a GitHub ' - 'issue to inform the developers.') + raise RuntimeError( + f"The time unit {time_unit} is not supported by " + "MNE. Please report this error as a GitHub " + "issue to inform the developers." + ) def _get_lengthunit_scaling(length_unit): """MNE expects distance in m, return required scaling.""" - scalings = {'m': 1, 'cm': 100, 'mm': 1000} + scalings = {"m": 1, "cm": 100, "mm": 1000} if length_unit in scalings: return scalings[length_unit] else: - raise RuntimeError(f'The length unit {length_unit} is not supported ' - 'by MNE. Please report this error as a GitHub ' - 'issue to inform the developers.') + raise RuntimeError( + f"The length unit {length_unit} is not supported " + "by MNE. Please report this error as a GitHub " + "issue to inform the developers." + ) -def _extract_sampling_rate(dat): +def _extract_sampling_rate(dat, user_sfreq): """Extract the sample rate from the time field.""" - time_data = np.array(dat.get('nirs/data1/time')) - sampling_rate = 0 - if len(time_data) == 2: - # specified as onset, samplerate - sampling_rate = 1. / (time_data[1] - time_data[0]) - else: - # specified as time points - fs_diff = np.around(np.diff(time_data), decimals=4) - if len(np.unique(fs_diff)) == 1: - # Uniformly sampled data - sampling_rate = 1. / np.unique(fs_diff) - else: - warn("MNE does not currently support reading " - "SNIRF files with non-uniform sampled data.") + # This is a workaround to provide support for Artinis data. + # It allows for a 1% variation in the sampling times relative + # to the average sampling rate of the file. + MAXIMUM_ALLOWED_SAMPLING_JITTER_PERCENTAGE = 1.0 + _validate_type(user_sfreq, ("numeric", None), "sfreq") + time_data = np.array(dat.get("nirs/data1/time")) time_unit = _get_metadata_str(dat, "TimeUnit") - time_unit_scaling = _get_timeunit_scaling(time_unit) - sampling_rate *= time_unit_scaling - - return sampling_rate + time_unit_scaling = _get_timeunit_scaling(time_unit) # always 1 (s) or 1000 (ms) + if len(time_data) == 2: # special-cased in the snirf standard as (onset, period) + onset, period = time_data + file_sfreq = time_unit_scaling / period + else: + onset = time_data[0] + periods = np.diff(time_data) + sfreqs = time_unit_scaling / periods + file_sfreq = sfreqs.mean() # our best estimate, likely including some jitter + if user_sfreq is not None: + logger.info(f"Setting sampling frequency to user-supplied value: {user_sfreq}") + if not np.allclose(file_sfreq, user_sfreq, rtol=0.01, atol=0): + warn( + f"User-supplied sampling frequency ({user_sfreq} Hz) differs by " + f"{(user_sfreq - file_sfreq) / file_sfreq:.1%} from the frequency " + f"estimated from data in the file ({file_sfreq} Hz)." + ) + sfreq = user_sfreq or file_sfreq # user-passed value overrides value from file + # estimate jitter + if len(time_data) > 2: + ideal_times = onset + np.arange(len(time_data)) / sfreq + max_jitter = np.max(np.abs(time_data - ideal_times)) + percent_jitter = 100.0 * max_jitter / periods.mean() + msg = f"Found jitter of {percent_jitter:3f}% in sample times." + if percent_jitter > MAXIMUM_ALLOWED_SAMPLING_JITTER_PERCENTAGE: + warn( + f"{msg} Note that MNE-Python does not currently support SNIRF " + "files with non-uniformly-sampled data." + ) + else: + logger.info(msg) + return sfreq def _get_metadata_str(dat, field): - if field not in np.array(dat.get('nirs/metaDataTags')): + if field not in np.array(dat.get("nirs/metaDataTags")): return None - data = dat.get(f'/nirs/metaDataTags/{field}') + data = dat.get(f"/nirs/metaDataTags/{field}") data = _correct_shape(np.array(data)) - data = str(data[0], 'utf-8') + data = str(data[0], "utf-8") return data diff --git a/mne/io/snirf/tests/__init__.py b/mne/io/snirf/tests/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/snirf/tests/__init__.py +++ b/mne/io/snirf/tests/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/snirf/tests/test_snirf.py b/mne/io/snirf/tests/test_snirf.py index fc4572f3e3f..73e3c775ed1 100644 --- a/mne/io/snirf/tests/test_snirf.py +++ b/mne/io/snirf/tests/test_snirf.py @@ -1,21 +1,28 @@ -# -*- coding: utf-8 -*- -# Authors: Robert Luke -# simplified BSD-3 license +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. -import numpy as np -from numpy.testing import assert_allclose, assert_almost_equal, assert_equal +import datetime import shutil +from contextlib import nullcontext + +import numpy as np import pytest +from numpy.testing import assert_allclose, assert_almost_equal, assert_equal +from mne._fiff.constants import FIFF from mne.datasets.testing import data_path, requires_testing_data -from mne.io import read_raw_snirf, read_raw_nirx +from mne.io import read_raw_nirx, read_raw_snirf from mne.io.tests.test_raw import _test_raw_reader -from mne.preprocessing.nirs import (optical_density, beer_lambert_law, - short_channels, source_detector_distances, - _reorder_nirx) -from mne.transforms import apply_trans, _get_trans -from mne.io.constants import FIFF - +from mne.preprocessing.nirs import ( + _reorder_nirx, + beer_lambert_law, + optical_density, + short_channels, + source_detector_distances, +) +from mne.transforms import _get_trans, apply_trans +from mne.utils import catch_logging testing_path = data_path(download=False) # SfNIRS files @@ -41,40 +48,17 @@ # NIRSport2 files nirx_nirsport2_103 = ( - testing_path - / "SNIRF" - / "NIRx" - / "NIRSport2" - / "1.0.3" - / "2021-04-23_005.snirf" + testing_path / "SNIRF" / "NIRx" / "NIRSport2" / "1.0.3" / "2021-04-23_005.snirf" ) nirx_nirsport2_103_2 = ( - testing_path - / "SNIRF" - / "NIRx" - / "NIRSport2" - / "1.0.3" - / "2021-05-05_001.snirf" + testing_path / "SNIRF" / "NIRx" / "NIRSport2" / "1.0.3" / "2021-05-05_001.snirf" ) snirf_nirsport2_20219 = ( - testing_path - / "SNIRF" - / "NIRx" - / "NIRSport2" - / "2021.9" - / "2021-10-01_002.snirf" + testing_path / "SNIRF" / "NIRx" / "NIRSport2" / "2021.9" / "2021-10-01_002.snirf" ) -nirx_nirsport2_20219 = testing_path / "NIRx" / "nirsport_v2" / "aurora_2021_9" # Kernel -kernel_hb = ( - testing_path - / "SNIRF" - / "Kernel" - / "Flow50" - / "Portal_2021_11" - / "hb.snirf" -) +kernel_hb = testing_path / "SNIRF" / "Kernel" / "Flow50" / "Portal_2021_11" / "hb.snirf" h5py = pytest.importorskip("h5py") # module-level @@ -86,43 +70,49 @@ def _get_loc(raw, ch_name): - return raw.copy().pick(ch_name).info['chs'][0]['loc'] + return raw.copy().pick(ch_name).info["chs"][0]["loc"] @requires_testing_data -@pytest.mark.filterwarnings('ignore:.*contains 2D location.*:') -@pytest.mark.filterwarnings('ignore:.*measurement date.*:') -@pytest.mark.parametrize('fname', ([sfnirs_homer_103_wShort, - nirx_nirsport2_103, - sfnirs_homer_103_153, - nirx_nirsport2_103, - nirx_nirsport2_103_2, - nirx_nirsport2_103_2, - kernel_hb, - lumo110 - ])) +@pytest.mark.filterwarnings("ignore:.*contains 2D location.*:") +@pytest.mark.filterwarnings("ignore:.*measurement date.*:") +@pytest.mark.parametrize( + "fname", + ( + [ + sfnirs_homer_103_wShort, + nirx_nirsport2_103, + sfnirs_homer_103_153, + nirx_nirsport2_103, + nirx_nirsport2_103_2, + nirx_nirsport2_103_2, + kernel_hb, + lumo110, + ] + ), +) def test_basic_reading_and_min_process(fname): """Test reading SNIRF files and minimum typical processing.""" raw = read_raw_snirf(fname, preload=True) # SNIRF data can contain several types, so only apply appropriate functions - if 'fnirs_cw_amplitude' in raw: + if "fnirs_cw_amplitude" in raw: raw = optical_density(raw) - if 'fnirs_od' in raw: + if "fnirs_od" in raw: raw = beer_lambert_law(raw, ppf=6) - assert 'hbo' in raw - assert 'hbr' in raw + assert "hbo" in raw + assert "hbr" in raw @requires_testing_data -@pytest.mark.filterwarnings('ignore:.*measurement date.*:') +@pytest.mark.filterwarnings("ignore:.*measurement date.*:") def test_snirf_gowerlabs(): """Test reading SNIRF files.""" raw = read_raw_snirf(lumo110, preload=True) assert raw._data.shape == (216, 274) - assert raw.info['dig'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD + assert raw.info["dig"][0]["coord_frame"] == FIFF.FIFFV_COORD_HEAD assert len(raw.ch_names) == 216 - assert_allclose(raw.info['sfreq'], 10.0) + assert_allclose(raw.info["sfreq"], 10.0) # we don't force them to be sorted according to a naive split assert raw.ch_names != sorted(raw.ch_names) # ... but this file does have a nice logical ordering already @@ -131,54 +121,87 @@ def test_snirf_gowerlabs(): raw.ch_names, # use a key which is (src triplet, freq, src, freq, det) key=lambda name: ( - (int(name.split()[0].split('_')[0][1:]) - 1) // 3, + (int(name.split()[0].split("_")[0][1:]) - 1) // 3, int(name.split()[1]), - int(name.split()[0].split('_')[0][1:]), - int(name.split()[0].split('_')[1][1:]) - )) + int(name.split()[0].split("_")[0][1:]), + int(name.split()[0].split("_")[1][1:]), + ), + ) @requires_testing_data def test_snirf_basic(): """Test reading SNIRF files.""" raw = read_raw_snirf(sfnirs_homer_103_wShort, preload=True) + assert raw.info["subject_info"]["his_id"] == "default" # Test data import assert raw._data.shape == (26, 145) - assert raw.info['sfreq'] == 12.5 + assert raw.info["sfreq"] == 12.5 # Test channel naming - assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D9 760", - "S2_D3 760", "S2_D10 760"] - assert raw.info['ch_names'][24:26] == ['S5_D8 850', 'S5_D13 850'] + assert raw.info["ch_names"][:4] == [ + "S1_D1 760", + "S1_D9 760", + "S2_D3 760", + "S2_D10 760", + ] + assert raw.info["ch_names"][24:26] == ["S5_D8 850", "S5_D13 850"] # Test frequency encoding - assert raw.info['chs'][0]['loc'][9] == 760 - assert raw.info['chs'][24]['loc'][9] == 850 + assert raw.info["chs"][0]["loc"][9] == 760 + assert raw.info["chs"][24]["loc"][9] == 850 # Test source locations - assert_allclose([-8.6765 * 1e-2, 0.0049 * 1e-2, -2.6167 * 1e-2], - _get_loc(raw, 'S1_D1 760')[3:6], rtol=0.02) - assert_allclose([7.9579 * 1e-2, -2.7571 * 1e-2, -2.2631 * 1e-2], - _get_loc(raw, 'S2_D3 760')[3:6], rtol=0.02) - assert_allclose([-2.1387 * 1e-2, -8.8874 * 1e-2, 3.8393 * 1e-2], - _get_loc(raw, 'S3_D2 760')[3:6], rtol=0.02) - assert_allclose([1.8602 * 1e-2, 9.7164 * 1e-2, 1.7539 * 1e-2], - _get_loc(raw, 'S4_D4 760')[3:6], rtol=0.02) - assert_allclose([-0.1108 * 1e-2, 0.7066 * 1e-2, 8.9883 * 1e-2], - _get_loc(raw, 'S5_D5 760')[3:6], rtol=0.02) + assert_allclose( + [-8.6765 * 1e-2, 0.0049 * 1e-2, -2.6167 * 1e-2], + _get_loc(raw, "S1_D1 760")[3:6], + rtol=0.02, + ) + assert_allclose( + [7.9579 * 1e-2, -2.7571 * 1e-2, -2.2631 * 1e-2], + _get_loc(raw, "S2_D3 760")[3:6], + rtol=0.02, + ) + assert_allclose( + [-2.1387 * 1e-2, -8.8874 * 1e-2, 3.8393 * 1e-2], + _get_loc(raw, "S3_D2 760")[3:6], + rtol=0.02, + ) + assert_allclose( + [1.8602 * 1e-2, 9.7164 * 1e-2, 1.7539 * 1e-2], + _get_loc(raw, "S4_D4 760")[3:6], + rtol=0.02, + ) + assert_allclose( + [-0.1108 * 1e-2, 0.7066 * 1e-2, 8.9883 * 1e-2], + _get_loc(raw, "S5_D5 760")[3:6], + rtol=0.02, + ) # Test detector locations - assert_allclose([-8.0409 * 1e-2, -2.9677 * 1e-2, -2.5415 * 1e-2], - _get_loc(raw, 'S1_D1 760')[6:9], rtol=0.02) - assert_allclose([-8.7329 * 1e-2, 0.7577 * 1e-2, -2.7980 * 1e-2], - _get_loc(raw, 'S1_D9 850')[6:9], rtol=0.02) - assert_allclose([9.2027 * 1e-2, 0.0161 * 1e-2, -2.8909 * 1e-2], - _get_loc(raw, 'S2_D3 850')[6:9], rtol=0.02) - assert_allclose([7.7548 * 1e-2, -3.5901 * 1e-2, -2.3179 * 1e-2], - _get_loc(raw, 'S2_D10 850')[6:9], rtol=0.02) + assert_allclose( + [-8.0409 * 1e-2, -2.9677 * 1e-2, -2.5415 * 1e-2], + _get_loc(raw, "S1_D1 760")[6:9], + rtol=0.02, + ) + assert_allclose( + [-8.7329 * 1e-2, 0.7577 * 1e-2, -2.7980 * 1e-2], + _get_loc(raw, "S1_D9 850")[6:9], + rtol=0.02, + ) + assert_allclose( + [9.2027 * 1e-2, 0.0161 * 1e-2, -2.8909 * 1e-2], + _get_loc(raw, "S2_D3 850")[6:9], + rtol=0.02, + ) + assert_allclose( + [7.7548 * 1e-2, -3.5901 * 1e-2, -2.3179 * 1e-2], + _get_loc(raw, "S2_D10 850")[6:9], + rtol=0.02, + ) - assert 'fnirs_cw_amplitude' in raw + assert "fnirs_cw_amplitude" in raw @requires_testing_data @@ -190,21 +213,25 @@ def test_snirf_against_nirx(): # Check annotations are the same assert_allclose(raw_homer.annotations.onset, raw_orig.annotations.onset) - assert_allclose([float(d) for d in raw_homer.annotations.description], - [float(d) for d in raw_orig.annotations.description]) + assert_allclose( + [float(d) for d in raw_homer.annotations.description], + [float(d) for d in raw_orig.annotations.description], + ) # Homer writes durations as 5s regardless of the true duration. # So we will not test that the nirx file stim durations equal # the homer file stim durations. # Check names are the same - assert raw_homer.info['ch_names'] == raw_orig.info['ch_names'] + assert raw_homer.info["ch_names"] == raw_orig.info["ch_names"] # Check frequencies are the same num_chans = len(raw_homer.ch_names) - new_chs = raw_homer.info['chs'] - ori_chs = raw_orig.info['chs'] - assert_allclose([new_chs[idx]['loc'][9] for idx in range(num_chans)], - [ori_chs[idx]['loc'][9] for idx in range(num_chans)]) + new_chs = raw_homer.info["chs"] + ori_chs = raw_orig.info["chs"] + assert_allclose( + [new_chs[idx]["loc"][9] for idx in range(num_chans)], + [ori_chs[idx]["loc"][9] for idx in range(num_chans)], + ) # Check data is the same assert_allclose(raw_homer.get_data(), raw_orig.get_data()) @@ -217,26 +244,27 @@ def test_snirf_nonstandard(tmp_path): fname = str(tmp_path) + "/mod.snirf" # Manually mark up the file to match MNE-NIRS custom tags with h5py.File(fname, "r+") as f: - f.create_dataset("nirs/metaDataTags/middleName", - data=['X'.encode('UTF-8')]) - f.create_dataset("nirs/metaDataTags/lastName", - data=['Y'.encode('UTF-8')]) - f.create_dataset("nirs/metaDataTags/sex", - data=['1'.encode('UTF-8')]) + f.create_dataset("nirs/metaDataTags/middleName", data=[b"X"]) + f.create_dataset("nirs/metaDataTags/lastName", data=[b"Y"]) + f.create_dataset("nirs/metaDataTags/sex", data=[b"1"]) + raw = read_raw_snirf(fname, preload=True) + assert raw.info["subject_info"]["first_name"] == "default" # pull from his_id + with h5py.File(fname, "r+") as f: + f.create_dataset("nirs/metaDataTags/firstName", data=[b"W"]) raw = read_raw_snirf(fname, preload=True) - assert raw.info["subject_info"]["middle_name"] == 'X' - assert raw.info["subject_info"]["last_name"] == 'Y' + assert raw.info["subject_info"]["first_name"] == "W" + assert raw.info["subject_info"]["middle_name"] == "X" + assert raw.info["subject_info"]["last_name"] == "Y" assert raw.info["subject_info"]["sex"] == 1 + assert raw.info["subject_info"]["his_id"] == "default" with h5py.File(fname, "r+") as f: - del f['nirs/metaDataTags/sex'] - f.create_dataset("nirs/metaDataTags/sex", - data=['2'.encode('UTF-8')]) + del f["nirs/metaDataTags/sex"] + f.create_dataset("nirs/metaDataTags/sex", data=[b"2"]) raw = read_raw_snirf(fname, preload=True) assert raw.info["subject_info"]["sex"] == 2 with h5py.File(fname, "r+") as f: - del f['nirs/metaDataTags/sex'] - f.create_dataset("nirs/metaDataTags/sex", - data=['0'.encode('UTF-8')]) + del f["nirs/metaDataTags/sex"] + f.create_dataset("nirs/metaDataTags/sex", data=[b"0"]) raw = read_raw_snirf(fname, preload=True) assert raw.info["subject_info"]["sex"] == 0 @@ -251,16 +279,20 @@ def test_snirf_nirsport2(): # Test data import assert raw._data.shape == (92, 84) - assert_almost_equal(raw.info['sfreq'], 7.6, decimal=1) + assert_almost_equal(raw.info["sfreq"], 7.6, decimal=1) # Test channel naming - assert raw.info['ch_names'][:4] == ['S1_D1 760', 'S1_D3 760', - 'S1_D9 760', 'S1_D16 760'] - assert raw.info['ch_names'][24:26] == ['S8_D15 760', 'S8_D20 760'] + assert raw.info["ch_names"][:4] == [ + "S1_D1 760", + "S1_D3 760", + "S1_D9 760", + "S1_D16 760", + ] + assert raw.info["ch_names"][24:26] == ["S8_D15 760", "S8_D20 760"] # Test frequency encoding - assert raw.info['chs'][0]['loc'][9] == 760 - assert raw.info['chs'][-1]['loc'][9] == 850 + assert raw.info["chs"][0]["loc"][9] == 760 + assert raw.info["chs"][-1]["loc"][9] == 850 assert sum(short_channels(raw.info)) == 16 @@ -268,38 +300,44 @@ def test_snirf_nirsport2(): @requires_testing_data def test_snirf_coordframe(): """Test reading SNIRF files.""" - raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="head").\ - info['chs'][3]['coord_frame'] + raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="head").info["chs"][3][ + "coord_frame" + ] assert raw == FIFF.FIFFV_COORD_HEAD - raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="mri").\ - info['chs'][3]['coord_frame'] + raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="mri").info["chs"][3][ + "coord_frame" + ] assert raw == FIFF.FIFFV_COORD_HEAD - raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="unknown").\ - info['chs'][3]['coord_frame'] + raw = read_raw_snirf(nirx_nirsport2_103, optode_frame="unknown").info["chs"][3][ + "coord_frame" + ] assert raw == FIFF.FIFFV_COORD_UNKNOWN @requires_testing_data def test_snirf_nirsport2_w_positions(): """Test reading SNIRF files with known positions.""" - raw = read_raw_snirf(nirx_nirsport2_103_2, preload=True, - optode_frame="mri") + raw = read_raw_snirf(nirx_nirsport2_103_2, preload=True, optode_frame="mri") _reorder_nirx(raw) # Test data import assert raw._data.shape == (40, 128) - assert_almost_equal(raw.info['sfreq'], 10.2, decimal=1) + assert_almost_equal(raw.info["sfreq"], 10.2, decimal=1) # Test channel naming - assert raw.info['ch_names'][:4] == ['S1_D1 760', 'S1_D1 850', - 'S1_D6 760', 'S1_D6 850'] - assert raw.info['ch_names'][24:26] == ['S6_D4 760', 'S6_D4 850'] + assert raw.info["ch_names"][:4] == [ + "S1_D1 760", + "S1_D1 850", + "S1_D6 760", + "S1_D6 850", + ] + assert raw.info["ch_names"][24:26] == ["S6_D4 760", "S6_D4 850"] # Test frequency encoding - assert raw.info['chs'][0]['loc'][9] == 760 - assert raw.info['chs'][1]['loc'][9] == 850 + assert raw.info["chs"][0]["loc"][9] == 760 + assert raw.info["chs"][1]["loc"][9] == 850 assert sum(short_channels(raw.info)) == 16 @@ -307,54 +345,52 @@ def test_snirf_nirsport2_w_positions(): # nirsite https://github.com/mne-tools/mne-testing-data/pull/86 # figure 3 allowed_distance_error = 0.005 - assert_allclose(source_detector_distances(raw.copy(). - pick("S1_D1 760").info), - [0.0304], atol=allowed_distance_error) - assert_allclose(source_detector_distances(raw.copy(). - pick("S2_D2 760").info), - [0.0400], atol=allowed_distance_error) + assert_allclose( + source_detector_distances(raw.copy().pick("S1_D1 760").info), + [0.0304], + atol=allowed_distance_error, + ) + assert_allclose( + source_detector_distances(raw.copy().pick("S2_D2 760").info), + [0.0400], + atol=allowed_distance_error, + ) # Test location of detectors # The locations of detectors can be seen in the first # figure on this page... # https://github.com/mne-tools/mne-testing-data/pull/86 allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][6:9] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + assert raw.info["ch_names"][0][3:5] == "D1" + assert_allclose(mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) - assert raw.info['ch_names'][2][3:5] == 'D6' - assert_allclose( - mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error) + assert raw.info["ch_names"][2][3:5] == "D6" + assert_allclose(mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error) - assert raw.info['ch_names'][34][3:5] == 'D5' - assert_allclose( - mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error) + assert raw.info["ch_names"][34][3:5] == "D5" + assert_allclose(mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error) # Test location of sensors # The locations of sensors can be seen in the second # figure on this page... # https://github.com/mne-tools/mne-testing-data/pull/86 allowed_dist_error = 0.0002 - locs = [ch['loc'][3:6] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + locs = [ch["loc"][3:6] for ch in raw.info["chs"]] + head_mri_t, _ = _get_trans("fsaverage", "head", "mri") mni_locs = apply_trans(head_mri_t, locs) - assert raw.info['ch_names'][0][:2] == 'S1' - assert_allclose( - mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error) + assert raw.info["ch_names"][0][:2] == "S1" + assert_allclose(mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error) - assert raw.info['ch_names'][9][:2] == 'S2' - assert_allclose( - mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error) + assert raw.info["ch_names"][9][:2] == "S2" + assert_allclose(mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error) - assert raw.info['ch_names'][34][:2] == 'S8' - assert_allclose( - mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error) + assert raw.info["ch_names"][34][:2] == "S8" + assert_allclose(mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error) mon = raw.get_montage() assert len(mon.dig) == 27 @@ -367,14 +403,14 @@ def test_snirf_fieldtrip_od(): # Test data import assert raw._data.shape == (72, 500) - assert raw.copy().pick('fnirs')._data.shape == (72, 500) - assert raw.copy().pick('fnirs_od')._data.shape == (72, 500) - with pytest.raises(ValueError, match='not be interpreted as channel'): - raw.copy().pick('hbo') - with pytest.raises(ValueError, match='not be interpreted as channel'): - raw.copy().pick('hbr') + assert raw.copy().pick("fnirs")._data.shape == (72, 500) + assert raw.copy().pick("fnirs_od")._data.shape == (72, 500) + with pytest.raises(ValueError, match="not be interpreted as channel"): + raw.copy().pick("hbo") + with pytest.raises(ValueError, match="not be interpreted as channel"): + raw.copy().pick("hbr") - assert_allclose(raw.info['sfreq'], 50) + assert_allclose(raw.info["sfreq"], 50) @requires_testing_data @@ -384,10 +420,10 @@ def test_snirf_kernel_hb(): # Test data import assert raw._data.shape == (180 * 2, 14) - assert raw.copy().pick('hbo')._data.shape == (180, 14) - assert raw.copy().pick('hbr')._data.shape == (180, 14) + assert raw.copy().pick("hbo")._data.shape == (180, 14) + assert raw.copy().pick("hbr")._data.shape == (180, 14) - assert_allclose(raw.info['sfreq'], 8.257638) + assert_allclose(raw.info["sfreq"], 8.256495) bad_nans = np.isnan(raw.get_data()).any(axis=1) assert np.sum(bad_nans) == 20 @@ -400,25 +436,48 @@ def test_snirf_kernel_hb(): @requires_testing_data -@pytest.mark.parametrize('fname, boundary_decimal, test_scaling, test_rank', ( - [sfnirs_homer_103_wShort, 0, True, True], - [nirx_nirsport2_103, 0, True, False], # strange rank behavior - [nirx_nirsport2_103_2, 0, False, True], # weirdly small values - [snirf_nirsport2_20219, 0, True, True], -)) +@pytest.mark.parametrize( + "sfreq,context", + ( + [8.2, nullcontext()], # sfreq estimated from file is 8.256495 + [22, pytest.warns(RuntimeWarning, match="User-supplied sampling frequency")], + ), +) +def test_user_set_sfreq(sfreq, context): + """Test manually setting sfreq.""" + with context: + # both sfreqs are far enough from true rate to yield >1% jitter + with pytest.warns(RuntimeWarning, match=r"jitter of \d+\.\d*% in sample times"): + raw = read_raw_snirf(kernel_hb, preload=False, sfreq=sfreq) + assert raw.info["sfreq"] == sfreq + + +@requires_testing_data +@pytest.mark.parametrize( + "fname, boundary_decimal, test_scaling, test_rank", + ( + [sfnirs_homer_103_wShort, 0, True, True], + [nirx_nirsport2_103, 0, True, False], # strange rank behavior + [nirx_nirsport2_103_2, 0, False, True], # weirdly small values + [snirf_nirsport2_20219, 0, True, True], + ), +) def test_snirf_standard(fname, boundary_decimal, test_scaling, test_rank): """Test standard operations.""" - _test_raw_reader(read_raw_snirf, fname=fname, - boundary_decimal=boundary_decimal, - test_scaling=test_scaling, - test_rank=test_rank) # low fs + _test_raw_reader( + read_raw_snirf, + fname=fname, + boundary_decimal=boundary_decimal, + test_scaling=test_scaling, + test_rank=test_rank, + ) # low fs @requires_testing_data def test_annotation_description_from_stim_groups(): """Test annotation descriptions parsed from stim group names.""" raw = read_raw_snirf(nirx_nirsport2_103_2, preload=True) - expected_descriptions = ['1', '2', '6'] + expected_descriptions = ["1", "2", "6"] assert_equal(expected_descriptions, raw.annotations.description) @@ -433,5 +492,85 @@ def test_annotation_duration_from_stim_groups(): # which represents duration, will be all 10s. # from snirf import Snirf # a = Snirf(snirf_nirsport2_20219, "r+"); print(a.nirs[0].stim[0].data) - expected_durations = np.full((10,), 10.) + expected_durations = np.full((10,), 10.0) assert_equal(expected_durations, raw.annotations.duration) + + +def test_birthday(tmp_path, monkeypatch): + """Test birthday parsing.""" + try: + snirf = pytest.importorskip("snirf") + except AttributeError as exc: + # Until https://github.com/BUNPC/pysnirf2/pull/43 is released + pytest.skip(f"snirf import error: {exc}") + fname = tmp_path / "test.snirf" + with snirf.Snirf(str(fname), "w") as a: + a.nirs.appendGroup() + a.nirs[0].data.appendGroup() + a.nirs[0].data[0].dataTimeSeries = np.zeros((2, 2)) + a.nirs[0].data[0].time = [0, 1] + for i in range(2): + a.nirs[0].data[0].measurementList.appendGroup() + a.nirs[0].data[0].measurementList[i].sourceIndex = 1 + a.nirs[0].data[0].measurementList[i].detectorIndex = 1 + a.nirs[0].data[0].measurementList[i].wavelengthIndex = 1 + a.nirs[0].data[0].measurementList[i].dataType = 99999 + a.nirs[0].data[0].measurementList[i].dataTypeIndex = 0 + a.nirs[0].data[0].measurementList[0].dataTypeLabel = "HbO" + a.nirs[0].data[0].measurementList[1].dataTypeLabel = "HbR" + a.nirs[0].metaDataTags.SubjectID = "0" + a.nirs[0].metaDataTags.MeasurementDate = "2000-01-01" + a.nirs[0].metaDataTags.MeasurementTime = "00:00:00" + a.nirs[0].metaDataTags.LengthUnit = "m" + a.nirs[0].metaDataTags.TimeUnit = "s" + a.nirs[0].metaDataTags.FrequencyUnit = "Hz" + a.nirs[0].metaDataTags.add("DateOfBirth", "1950-01-01") + a.nirs[0].probe.wavelengths = [0, 0] + a.nirs[0].probe.sourcePos3D = np.zeros((1, 3)) + a.nirs[0].probe.detectorPos3D = np.zeros((1, 3)) + # Until https://github.com/BUNPC/pysnirf2/pull/39 is released + monkeypatch.setattr(a._cfg.logger, "info", lambda *args, **kwargs: None) + a.save() + + raw = read_raw_snirf(fname) + assert raw.info["subject_info"]["birthday"] == datetime.date(1950, 1, 1) + # TODO: trigger some setting checkers that should maybe be in the reader (like + # those for subject_info) + raw.info.copy() + + +@requires_testing_data +def test_sample_rate_jitter(tmp_path): + """Test handling of jittered sample times.""" + from shutil import copy2 + + # Create a clean copy and ensure it loads without error + new_file = tmp_path / "snirf_nirsport2_2019.snirf" + copy2(snirf_nirsport2_20219, new_file) + read_raw_snirf(new_file) + + # Edit the file and add jitter within tolerance (0.99%) + with h5py.File(new_file, "r+") as f: + orig_time = np.array(f.get("nirs/data1/time")) + acceptable_time_jitter = orig_time.copy() + mean_period = np.mean(np.diff(orig_time)) + acceptable_time_jitter[-1] += 0.0099 * mean_period + del f["nirs/data1/time"] + f.flush() + f.create_dataset("nirs/data1/time", data=acceptable_time_jitter) + with catch_logging("info") as log: + read_raw_snirf(new_file) + lines = "\n".join(line for line in log.getvalue().splitlines() if "jitter" in line) + assert "Found jitter of 0.9" in lines + + # Add jitter of 1.02%, which is greater than allowed tolerance + with h5py.File(new_file, "r+") as f: + unacceptable_time_jitter = orig_time + unacceptable_time_jitter[-1] = unacceptable_time_jitter[-1] + ( + 0.0102 * mean_period + ) + del f["nirs/data1/time"] + f.flush() + f.create_dataset("nirs/data1/time", data=unacceptable_time_jitter) + with pytest.warns(RuntimeWarning, match="non-uniformly-sampled data"): + read_raw_snirf(new_file, verbose=True) diff --git a/mne/io/tag.py b/mne/io/tag.py deleted file mode 100644 index 69504a5e49a..00000000000 --- a/mne/io/tag.py +++ /dev/null @@ -1,517 +0,0 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# -# License: BSD-3-Clause - -from functools import partial -import struct - -import numpy as np - -from .constants import (FIFF, _dig_kind_named, _dig_cardinal_named, - _ch_kind_named, _ch_coil_type_named, _ch_unit_named, - _ch_unit_mul_named) -from ..utils.numerics import _julian_to_cal - - -############################################################################## -# HELPERS - -class Tag(object): - """Tag in FIF tree structure. - - Parameters - ---------- - kind : int - Kind of Tag. - type_ : int - Type of Tag. - size : int - Size in bytes. - int : next - Position of next Tag. - pos : int - Position of Tag is the original file. - """ - - def __init__(self, kind, type_, size, next, pos=None): # noqa: D102 - self.kind = int(kind) - self.type = int(type_) - self.size = int(size) - self.next = int(next) - self.pos = pos if pos is not None else next - self.pos = int(self.pos) - self.data = None - - def __repr__(self): # noqa: D105 - out = (" 0: - fid.seek(tag.next, 0) - return tag - - -def _frombuffer_rows(fid, tag_size, dtype=None, shape=None, rlims=None): - """Get a range of rows from a large tag.""" - if shape is not None: - item_size = np.dtype(dtype).itemsize - if not len(shape) == 2: - raise ValueError('Only implemented for 2D matrices') - want_shape = np.prod(shape) - have_shape = tag_size // item_size - if want_shape != have_shape: - raise ValueError('Wrong shape specified, requested %s have %s' - % (want_shape, have_shape)) - if not len(rlims) == 2: - raise ValueError('rlims must have two elements') - n_row_out = rlims[1] - rlims[0] - if n_row_out <= 0: - raise ValueError('rlims must yield at least one output') - row_size = item_size * shape[1] - # # of bytes to skip at the beginning, # to read, where to end - start_skip = int(rlims[0] * row_size) - read_size = int(n_row_out * row_size) - end_pos = int(fid.tell() + tag_size) - # Move the pointer ahead to the read point - fid.seek(start_skip, 1) - # Do the reading - out = np.frombuffer(fid.read(read_size), dtype=dtype) - # Move the pointer ahead to the end of the tag - fid.seek(end_pos) - else: - out = np.frombuffer(fid.read(tag_size), dtype=dtype) - return out - - -def _loc_to_coil_trans(loc): - """Convert loc vector to coil_trans.""" - assert loc.shape[-1] == 12 - coil_trans = np.zeros(loc.shape[:-1] + (4, 4)) - coil_trans[..., :3, 3] = loc[..., :3] - coil_trans[..., :3, :3] = np.reshape( - loc[..., 3:], loc.shape[:-1] + (3, 3)).swapaxes(-1, -2) - coil_trans[..., -1, -1] = 1. - return coil_trans - - -def _coil_trans_to_loc(coil_trans): - """Convert coil_trans to loc.""" - coil_trans = coil_trans.astype(np.float64) - return np.roll(coil_trans.T[:, :3], 1, 0).flatten() - - -def _loc_to_eeg_loc(loc): - """Convert a loc to an EEG loc.""" - if not np.isfinite(loc[:3]).all(): - raise RuntimeError('Missing EEG channel location') - if np.isfinite(loc[3:6]).all() and (loc[3:6]).any(): - return np.array([loc[0:3], loc[3:6]]).T - else: - return loc[0:3][:, np.newaxis].copy() - - -############################################################################## -# READING FUNCTIONS - -# None of these functions have docstring because it's more compact that way, -# and hopefully it's clear what they do by their names and variable values. -# See ``read_tag`` for variable descriptions. Return values are implied -# by the function names. - -_is_matrix = 4294901760 # ffff0000 -_matrix_coding_dense = 16384 # 4000 -_matrix_coding_CCS = 16400 # 4010 -_matrix_coding_RCS = 16416 # 4020 -_data_type = 65535 # ffff - - -def _read_tag_header(fid): - """Read only the header of a Tag.""" - s = fid.read(4 * 4) - if len(s) == 0: - return None - # struct.unpack faster than np.frombuffer, saves ~10% of time some places - return Tag(*struct.unpack('>iIii', s)) - - -_matrix_bit_dtype = { - FIFF.FIFFT_INT: (4, '>i4'), - FIFF.FIFFT_JULIAN: (4, '>i4'), - FIFF.FIFFT_FLOAT: (4, '>f4'), - FIFF.FIFFT_DOUBLE: (8, '>f8'), - FIFF.FIFFT_COMPLEX_FLOAT: (8, '>f4'), - FIFF.FIFFT_COMPLEX_DOUBLE: (16, '>f8'), -} - - -def _read_matrix(fid, tag, shape, rlims, matrix_coding): - """Read a matrix (dense or sparse) tag.""" - from scipy import sparse - matrix_coding = matrix_coding >> 16 - - # This should be easy to implement (see _frombuffer_rows) - # if we need it, but for now, it's not... - if shape is not None: - raise ValueError('Row reading not implemented for matrices ' - 'yet') - - # Matrices - if matrix_coding == _matrix_coding_dense: - # Find dimensions and return to the beginning of tag data - pos = fid.tell() - fid.seek(tag.size - 4, 1) - ndim = int(np.frombuffer(fid.read(4), dtype='>i4')) - fid.seek(-(ndim + 1) * 4, 1) - dims = np.frombuffer(fid.read(4 * ndim), dtype='>i4')[::-1] - # - # Back to where the data start - # - fid.seek(pos, 0) - - if ndim > 3: - raise Exception('Only 2 or 3-dimensional matrices are ' - 'supported at this time') - - matrix_type = _data_type & tag.type - try: - bit, dtype = _matrix_bit_dtype[matrix_type] - except KeyError: - raise RuntimeError('Cannot handle matrix of type %d yet' - % matrix_type) - data = fid.read(int(bit * dims.prod())) - data = np.frombuffer(data, dtype=dtype) - # Note: we need the non-conjugate transpose here - if matrix_type == FIFF.FIFFT_COMPLEX_FLOAT: - data = data.view('>c8') - elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: - data = data.view('>c16') - data.shape = dims - elif matrix_coding in (_matrix_coding_CCS, _matrix_coding_RCS): - # Find dimensions and return to the beginning of tag data - pos = fid.tell() - fid.seek(tag.size - 4, 1) - ndim = int(np.frombuffer(fid.read(4), dtype='>i4')) - fid.seek(-(ndim + 2) * 4, 1) - dims = np.frombuffer(fid.read(4 * (ndim + 1)), dtype='>i4') - if ndim != 2: - raise Exception('Only two-dimensional matrices are ' - 'supported at this time') - - # Back to where the data start - fid.seek(pos, 0) - nnz = int(dims[0]) - nrow = int(dims[1]) - ncol = int(dims[2]) - data = np.frombuffer(fid.read(4 * nnz), dtype='>f4') - shape = (dims[1], dims[2]) - if matrix_coding == _matrix_coding_CCS: - # CCS - tmp_indices = fid.read(4 * nnz) - indices = np.frombuffer(tmp_indices, dtype='>i4') - tmp_ptr = fid.read(4 * (ncol + 1)) - indptr = np.frombuffer(tmp_ptr, dtype='>i4') - if indptr[-1] > len(indices) or np.any(indptr < 0): - # There was a bug in MNE-C that caused some data to be - # stored without byte swapping - indices = np.concatenate( - (np.frombuffer(tmp_indices[:4 * (nrow + 1)], dtype='>i4'), - np.frombuffer(tmp_indices[4 * (nrow + 1):], dtype=' len(indices) or np.any(indptr < 0): - # There was a bug in MNE-C that caused some data to be - # stored without byte swapping - indices = np.concatenate( - (np.frombuffer(tmp_indices[:4 * (ncol + 1)], dtype='>i4'), - np.frombuffer(tmp_indices[4 * (ncol + 1):], dtype='c8") - return d - - -def _read_complex_double(fid, tag, shape, rlims): - """Read complex double tag.""" - # data gets stored twice as large - if shape is not None: - shape = (shape[0], shape[1] * 2) - d = _frombuffer_rows(fid, tag.size, dtype=">f8", shape=shape, rlims=rlims) - d = d.view(">c16") - return d - - -def _read_id_struct(fid, tag, shape, rlims): - """Read ID struct tag.""" - return dict( - version=int(np.frombuffer(fid.read(4), dtype=">i4")), - machid=np.frombuffer(fid.read(8), dtype=">i4"), - secs=int(np.frombuffer(fid.read(4), dtype=">i4")), - usecs=int(np.frombuffer(fid.read(4), dtype=">i4"))) - - -def _read_dig_point_struct(fid, tag, shape, rlims): - """Read dig point struct tag.""" - kind = int(np.frombuffer(fid.read(4), dtype=">i4")) - kind = _dig_kind_named.get(kind, kind) - ident = int(np.frombuffer(fid.read(4), dtype=">i4")) - if kind == FIFF.FIFFV_POINT_CARDINAL: - ident = _dig_cardinal_named.get(ident, ident) - return dict( - kind=kind, ident=ident, - r=np.frombuffer(fid.read(12), dtype=">f4"), - coord_frame=FIFF.FIFFV_COORD_UNKNOWN) - - -def _read_coord_trans_struct(fid, tag, shape, rlims): - """Read coord trans struct tag.""" - from ..transforms import Transform - fro = int(np.frombuffer(fid.read(4), dtype=">i4")) - to = int(np.frombuffer(fid.read(4), dtype=">i4")) - rot = np.frombuffer(fid.read(36), dtype=">f4").reshape(3, 3) - move = np.frombuffer(fid.read(12), dtype=">f4") - trans = np.r_[np.c_[rot, move], - np.array([[0], [0], [0], [1]]).T] - data = Transform(fro, to, trans) - fid.seek(48, 1) # Skip over the inverse transformation - return data - - -_ch_coord_dict = { - FIFF.FIFFV_MEG_CH: FIFF.FIFFV_COORD_DEVICE, - FIFF.FIFFV_REF_MEG_CH: FIFF.FIFFV_COORD_DEVICE, - FIFF.FIFFV_EEG_CH: FIFF.FIFFV_COORD_HEAD, - FIFF.FIFFV_ECOG_CH: FIFF.FIFFV_COORD_HEAD, - FIFF.FIFFV_SEEG_CH: FIFF.FIFFV_COORD_HEAD, - FIFF.FIFFV_DBS_CH: FIFF.FIFFV_COORD_HEAD, - FIFF.FIFFV_FNIRS_CH: FIFF.FIFFV_COORD_HEAD -} - - -def _read_ch_info_struct(fid, tag, shape, rlims): - """Read channel info struct tag.""" - d = dict( - scanno=int(np.frombuffer(fid.read(4), dtype=">i4")), - logno=int(np.frombuffer(fid.read(4), dtype=">i4")), - kind=int(np.frombuffer(fid.read(4), dtype=">i4")), - range=float(np.frombuffer(fid.read(4), dtype=">f4")), - cal=float(np.frombuffer(fid.read(4), dtype=">f4")), - coil_type=int(np.frombuffer(fid.read(4), dtype=">i4")), - # deal with really old OSX Anaconda bug by casting to float64 - loc=np.frombuffer(fid.read(48), dtype=">f4").astype(np.float64), - # unit and exponent - unit=int(np.frombuffer(fid.read(4), dtype=">i4")), - unit_mul=int(np.frombuffer(fid.read(4), dtype=">i4")), - ) - # channel name - ch_name = np.frombuffer(fid.read(16), dtype=">c") - ch_name = ch_name[:np.argmax(ch_name == b'')].tobytes() - d['ch_name'] = ch_name.decode() - # coil coordinate system definition - _update_ch_info_named(d) - return d - - -def _update_ch_info_named(d): - d['coord_frame'] = _ch_coord_dict.get(d['kind'], FIFF.FIFFV_COORD_UNKNOWN) - d['kind'] = _ch_kind_named.get(d['kind'], d['kind']) - d['coil_type'] = _ch_coil_type_named.get(d['coil_type'], d['coil_type']) - d['unit'] = _ch_unit_named.get(d['unit'], d['unit']) - d['unit_mul'] = _ch_unit_mul_named.get(d['unit_mul'], d['unit_mul']) - - -def _read_old_pack(fid, tag, shape, rlims): - """Read old pack tag.""" - offset = float(np.frombuffer(fid.read(4), dtype=">f4")) - scale = float(np.frombuffer(fid.read(4), dtype=">f4")) - data = np.frombuffer(fid.read(tag.size - 8), dtype=">i2") - data = data * scale # to float64 - data += offset - return data - - -def _read_dir_entry_struct(fid, tag, shape, rlims): - """Read dir entry struct tag.""" - return [_read_tag_header(fid) for _ in range(tag.size // 16 - 1)] - - -def _read_julian(fid, tag, shape, rlims): - """Read julian tag.""" - return _julian_to_cal(int(np.frombuffer(fid.read(4), dtype=">i4"))) - - -# Read types call dict -_call_dict = { - FIFF.FIFFT_STRING: _read_string, - FIFF.FIFFT_COMPLEX_FLOAT: _read_complex_float, - FIFF.FIFFT_COMPLEX_DOUBLE: _read_complex_double, - FIFF.FIFFT_ID_STRUCT: _read_id_struct, - FIFF.FIFFT_DIG_POINT_STRUCT: _read_dig_point_struct, - FIFF.FIFFT_COORD_TRANS_STRUCT: _read_coord_trans_struct, - FIFF.FIFFT_CH_INFO_STRUCT: _read_ch_info_struct, - FIFF.FIFFT_OLD_PACK: _read_old_pack, - FIFF.FIFFT_DIR_ENTRY_STRUCT: _read_dir_entry_struct, - FIFF.FIFFT_JULIAN: _read_julian, -} -_call_dict_names = { - FIFF.FIFFT_STRING: 'str', - FIFF.FIFFT_COMPLEX_FLOAT: 'c8', - FIFF.FIFFT_COMPLEX_DOUBLE: 'c16', - FIFF.FIFFT_ID_STRUCT: 'ids', - FIFF.FIFFT_DIG_POINT_STRUCT: 'dps', - FIFF.FIFFT_COORD_TRANS_STRUCT: 'cts', - FIFF.FIFFT_CH_INFO_STRUCT: 'cis', - FIFF.FIFFT_OLD_PACK: 'op_', - FIFF.FIFFT_DIR_ENTRY_STRUCT: 'dir', - FIFF.FIFFT_JULIAN: 'jul', - FIFF.FIFFT_VOID: 'nul', # 0 -} - -# Append the simple types -_simple_dict = { - FIFF.FIFFT_BYTE: '>B', - FIFF.FIFFT_SHORT: '>i2', - FIFF.FIFFT_INT: '>i4', - FIFF.FIFFT_USHORT: '>u2', - FIFF.FIFFT_UINT: '>u4', - FIFF.FIFFT_FLOAT: '>f4', - FIFF.FIFFT_DOUBLE: '>f8', - FIFF.FIFFT_DAU_PACK16: '>i2', -} -for key, dtype in _simple_dict.items(): - _call_dict[key] = partial(_read_simple, dtype=dtype) - _call_dict_names[key] = dtype - - -def read_tag(fid, pos=None, shape=None, rlims=None): - """Read a Tag from a file at a given position. - - Parameters - ---------- - fid : file - The open FIF file descriptor. - pos : int - The position of the Tag in the file. - shape : tuple | None - If tuple, the shape of the stored matrix. Only to be used with - data stored as a vector (not implemented for matrices yet). - rlims : tuple | None - If tuple, the first (inclusive) and last (exclusive) rows to retrieve. - Note that data are assumed to be stored row-major in the file. Only to - be used with data stored as a vector (not implemented for matrices - yet). - - Returns - ------- - tag : Tag - The Tag read. - """ - if pos is not None: - fid.seek(pos, 0) - tag = _read_tag_header(fid) - if tag is None: - return tag - if tag.size > 0: - matrix_coding = _is_matrix & tag.type - if matrix_coding != 0: - tag.data = _read_matrix(fid, tag, shape, rlims, matrix_coding) - else: - # All other data types - try: - fun = _call_dict[tag.type] - except KeyError: - raise Exception('Unimplemented tag data type %s' % tag.type) - tag.data = fun(fid, tag, shape, rlims) - if tag.next != FIFF.FIFFV_NEXT_SEQ: - # f.seek(tag.next,0) - fid.seek(tag.next, 1) # XXX : fix? pb when tag.next < 0 - - return tag - - -def find_tag(fid, node, findkind): - """Find Tag in an open FIF file descriptor. - - Parameters - ---------- - fid : file-like - Open file. - node : dict - Node to search. - findkind : int - Tag kind to find. - - Returns - ------- - tag : instance of Tag - The first tag found. - """ - if node['directory'] is not None: - for subnode in node['directory']: - if subnode.kind == findkind: - return read_tag(fid, subnode.pos) - return None - - -def has_tag(node, kind): - """Check if the node contains a Tag of a given kind.""" - for d in node['directory']: - if d.kind == kind: - return True - return False - - -def _rename_list(bads, ch_names_mapping): - return [ch_names_mapping.get(bad, bad) for bad in bads] diff --git a/mne/io/tests/__init__.py b/mne/io/tests/__init__.py index aba6507665f..c45203dcc03 100644 --- a/mne/io/tests/__init__.py +++ b/mne/io/tests/__init__.py @@ -1,3 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + import os.path as op -data_dir = op.join(op.dirname(__file__), 'data') +data_dir = op.join(op.dirname(__file__), "data") diff --git a/mne/io/tests/data/__init__.py b/mne/io/tests/data/__init__.py index e69de29bb2d..04c673b4e4d 100644 --- a/mne/io/tests/data/__init__.py +++ b/mne/io/tests/data/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/io/tests/test_apply_function.py b/mne/io/tests/test_apply_function.py index 887ba6a8eb3..a6745d5e31d 100644 --- a/mne/io/tests/test_apply_function.py +++ b/mne/io/tests/test_apply_function.py @@ -1,13 +1,13 @@ -# Authors: Eric Larson -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. import numpy as np import pytest from mne import create_info from mne.io import RawArray -from mne.utils import logger, catch_logging +from mne.utils import catch_logging, logger def bad_1(x): @@ -27,7 +27,7 @@ def bad_3(x): def printer(x): """Print.""" - logger.info('exec') + logger.info("exec") return x @@ -37,23 +37,22 @@ def test_apply_function_verbose(): n_chan = 2 n_times = 3 ch_names = [str(ii) for ii in range(n_chan)] - raw = RawArray(np.zeros((n_chan, n_times)), - create_info(ch_names, 1., 'mag')) + raw = RawArray(np.zeros((n_chan, n_times)), create_info(ch_names, 1.0, "mag")) # test return types in both code paths (parallel / 1 job) - with pytest.raises(TypeError, match='Return value must be an ndarray'): + with pytest.raises(TypeError, match="Return value must be an ndarray"): raw.apply_function(bad_1) - with pytest.raises(ValueError, match='Return data must have shape'): + with pytest.raises(ValueError, match="Return data must have shape"): raw.apply_function(bad_2) - with pytest.raises(TypeError, match='Return value must be an ndarray'): + with pytest.raises(TypeError, match="Return value must be an ndarray"): raw.apply_function(bad_1, n_jobs=2) - with pytest.raises(ValueError, match='Return data must have shape'): + with pytest.raises(ValueError, match="Return data must have shape"): raw.apply_function(bad_2, n_jobs=2) # test return type when `channel_wise=False` raw.apply_function(printer, channel_wise=False) - with pytest.raises(TypeError, match='Return value must be an ndarray'): + with pytest.raises(TypeError, match="Return value must be an ndarray"): raw.apply_function(bad_1, channel_wise=False) - with pytest.raises(ValueError, match='Return data must have shape'): + with pytest.raises(ValueError, match="Return data must have shape"): raw.apply_function(bad_3, channel_wise=False) # check our arguments @@ -62,4 +61,33 @@ def test_apply_function_verbose(): assert len(sio.getvalue(close=False)) == 0 assert out is raw raw.apply_function(printer, verbose=True) - assert sio.getvalue().count('\n') == n_chan + assert sio.getvalue().count("\n") == n_chan + + +def test_apply_function_ch_access(): + """Test apply_function is able to access channel idx.""" + + def _bad_ch_idx(x, ch_idx): + assert x[0] == ch_idx + return x + + def _bad_ch_name(x, ch_name): + assert isinstance(ch_name, str) + assert x[0] == float(ch_name) + return x + + data = np.full((2, 10), np.arange(2).reshape(-1, 1)) + raw = RawArray(data, create_info(2, 1.0, "mag")) + + # test ch_idx access in both code paths (parallel / 1 job) + raw.apply_function(_bad_ch_idx) + raw.apply_function(_bad_ch_idx, n_jobs=2) + raw.apply_function(_bad_ch_name) + raw.apply_function(_bad_ch_name, n_jobs=2) + + # test input catches + with pytest.raises( + ValueError, + match="cannot access.*when channel_wise=False", + ): + raw.apply_function(_bad_ch_idx, channel_wise=False) diff --git a/mne/io/tests/test_compensator.py b/mne/io/tests/test_compensator.py deleted file mode 100644 index bb8b33bce32..00000000000 --- a/mne/io/tests/test_compensator.py +++ /dev/null @@ -1,110 +0,0 @@ -# Author: Alexandre Gramfort -# -# License: BSD-3-Clause - -from pathlib import Path - -import numpy as np -from numpy.testing import assert_allclose -import pytest - -from mne import Epochs, read_evokeds, pick_types -from mne.io.compensator import make_compensator, get_current_comp -from mne.io import read_raw_fif -from mne.utils import requires_mne, run_subprocess - -base_dir = Path(__file__).parent / "data" -ctf_comp_fname = base_dir / "test_ctf_comp_raw.fif" - - -def test_compensation_identity(): - """Test compensation identity.""" - raw = read_raw_fif(ctf_comp_fname) - assert get_current_comp(raw.info) == 3 - comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False) - assert comp1.shape == (340, 340) - comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True) - assert comp2.shape == (311, 340) - - # round-trip - desired = np.eye(340) - for from_ in range(3): - for to in range(3): - if from_ == to: - continue - comp1 = make_compensator(raw.info, from_, to) - comp2 = make_compensator(raw.info, to, from_) - # To get 1e-12 here (instead of 1e-6) we must use the linalg.inv - # method mentioned in compensator.py - assert_allclose(np.dot(comp1, comp2), desired, atol=1e-12) - assert_allclose(np.dot(comp2, comp1), desired, atol=1e-12) - - -@pytest.mark.parametrize('preload', (True, False)) -@pytest.mark.parametrize('pick', (False, True)) -def test_compensation_apply(tmp_path, preload, pick): - """Test applying compensation.""" - # make sure that changing the comp doesn't modify the original data - raw = read_raw_fif(ctf_comp_fname, preload=preload) - assert raw._comp is None - raw2 = raw.copy() - raw2.apply_gradient_compensation(2) - if pick: - raw2.pick([0] + list(range(2, len(raw.ch_names)))) - raw.pick([0] + list(range(2, len(raw.ch_names)))) - assert get_current_comp(raw2.info) == 2 - if preload: - assert raw2._comp is None - else: - assert raw2._comp.shape == (len(raw2.ch_names),) * 2 - fname = tmp_path / "ctf-raw.fif" - raw2.save(fname) - raw2 = read_raw_fif(fname) - assert raw2.compensation_grade == 2 - raw2.apply_gradient_compensation(3) - assert raw2.compensation_grade == 3 - data, _ = raw[:, :] - data2, _ = raw2[:, :] - # channels have norm ~1e-12 - assert_allclose(data, data2, rtol=1e-9, atol=1e-18) - for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']): - assert ch1['coil_type'] == ch2['coil_type'] - - -@requires_mne -def test_compensation_mne(tmp_path): - """Test comensation by comparing with MNE.""" - def make_evoked(fname, comp): - """Make evoked data.""" - raw = read_raw_fif(fname) - if comp is not None: - raw.apply_gradient_compensation(comp) - picks = pick_types(raw.info, meg=True, ref_meg=True) - events = np.array([[0, 0, 1]], dtype=np.int64) - evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks, - baseline=None).average() - return evoked - - def compensate_mne(fname, comp): - """Compensate using MNE-C.""" - tmp_fname = '%s-%d-ave.fif' % (fname.stem, comp) - cmd = ['mne_compensate_data', '--in', str(fname), - '--out', tmp_fname, '--grad', str(comp)] - run_subprocess(cmd) - return read_evokeds(tmp_fname)[0] - - # save evoked response with default compensation - fname_default = tmp_path / "ctf_default-ave.fif" - make_evoked(ctf_comp_fname, None).save(fname_default) - - for comp in [0, 1, 2, 3]: - evoked_py = make_evoked(ctf_comp_fname, comp) - evoked_c = compensate_mne(fname_default, comp) - picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True) - picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True) - assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c], - rtol=1e-3, atol=1e-17) - chs_py = [evoked_py.info['chs'][ii] for ii in picks_py] - chs_c = [evoked_c.info['chs'][ii] for ii in picks_c] - for ch_py, ch_c in zip(chs_py, chs_c): - assert ch_py['coil_type'] == ch_c['coil_type'] diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py deleted file mode 100644 index 1f0cd473992..00000000000 --- a/mne/io/tests/test_constants.py +++ /dev/null @@ -1,345 +0,0 @@ -# Author: Eric Larson -# -# License: BSD-3-Clause - -import os -import re -import shutil -import zipfile - -import numpy as np -import pooch -import pytest - -from mne.io.constants import (FIFF, FWD, _coord_frame_named, _ch_kind_named, - _ch_unit_named, _ch_unit_mul_named, - _ch_coil_type_named, _dig_kind_named, - _dig_cardinal_named) -from mne.forward._make_forward import _read_coil_defs -from mne.utils import requires_good_network - - -# https://github.com/mne-tools/fiff-constants/commits/master -REPO = 'mne-tools' -COMMIT = '6d9ca9ce7fb44c63d429c2986a953500743dfb22' - -# These are oddities that we won't address: -iod_dups = (355, 359) # these are in both MEGIN and MNE files -tag_dups = (3501,) # in both MEGIN and MNE files - -_dir_ignore_names = ('clear', 'copy', 'fromkeys', 'get', 'items', 'keys', - 'pop', 'popitem', 'setdefault', 'update', 'values', - 'has_key', 'iteritems', 'iterkeys', 'itervalues', # Py2 - 'viewitems', 'viewkeys', 'viewvalues', # Py2 - ) -_tag_ignore_names = ( -) # for fiff-constants pending updates -_ignore_incomplete_enums = ( # XXX eventually we could complete these - 'bem_surf_id', 'cardinal_point_cardiac', 'cond_model', 'coord', - 'dacq_system', 'diffusion_param', 'gantry_type', 'map_surf', - 'mne_lin_proj', 'mne_ori', 'mri_format', 'mri_pixel', 'proj_by', - 'tags', 'type', 'iod', 'volume_type', 'vol_type', -) -# not in coil_def.dat but in DictionaryTypes:enum(coil) -_missing_coil_def = ( - 0, # The location info contains no data - 1, # EEG electrode position in r0 - 3, # Old 24 channel system in HUT - 4, # The axial devices in the HUCS MCG system - 5, # Bipolar EEG electrode position - 6, # CSD-transformed EEG electrodes - 200, # Time-varying dipole definition - 300, # fNIRS oxyhemoglobin - 301, # fNIRS deoxyhemoglobin - 302, # fNIRS continuous wave - 303, # fNIRS optical density - 304, # fNIRS frequency domain AC amplitude - 305, # fNIRS frequency domain phase - 1000, # For testing the MCG software - 2001, # Generic axial gradiometer - 3011, # VV prototype wirewound planar sensor - 3014, # Vectorview SQ20950N planar gradiometer - 3021, # VV prototype wirewound magnetometer -) -# explicit aliases in constants.py -_aliases = dict( - FIFFV_COIL_MAGNES_R_MAG='FIFFV_COIL_MAGNES_REF_MAG', - FIFFV_COIL_MAGNES_R_GRAD='FIFFV_COIL_MAGNES_REF_GRAD', - FIFFV_COIL_MAGNES_R_GRAD_OFF='FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD', - FIFFV_COIL_FNIRS_RAW='FIFFV_COIL_FNIRS_CW_AMPLITUDE', - FIFFV_MNE_COORD_CTF_HEAD='FIFFV_MNE_COORD_4D_HEAD', - FIFFV_MNE_COORD_KIT_HEAD='FIFFV_MNE_COORD_4D_HEAD', - FIFFV_MNE_COORD_DIGITIZER='FIFFV_COORD_ISOTRAK', - FIFFV_MNE_COORD_SURFACE_RAS='FIFFV_COORD_MRI', - FIFFV_MNE_SENSOR_COV='FIFFV_MNE_NOISE_COV', - FIFFV_POINT_EEG='FIFFV_POINT_ECG', - FIFF_DESCRIPTION='FIFF_COMMENT', - FIFF_REF_PATH='FIFF_MRI_SOURCE_PATH', -) - - -@requires_good_network -def test_constants(tmp_path): - """Test compensation.""" - fname = 'fiff.zip' - dest = tmp_path / fname - pooch.retrieve( - url='https://codeload.github.com/' - f'{REPO}/fiff-constants/zip/{COMMIT}', - path=tmp_path, - fname=fname, - known_hash=None - ) - names = list() - with zipfile.ZipFile(dest, 'r') as ff: - for name in ff.namelist(): - if 'Dictionary' in name: - ff.extract(name, tmp_path) - names.append(os.path.basename(name)) - shutil.move(tmp_path / name, tmp_path / names[-1]) - names = sorted(names) - assert names == ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt', - 'DictionaryStructures.txt', - 'DictionaryTags.txt', 'DictionaryTags_MNE.txt', - 'DictionaryTypes.txt', 'DictionaryTypes_MNE.txt'] - # IOD (MEGIN and MNE) - fif = dict(iod=dict(), tags=dict(), types=dict(), defines=dict()) - con = dict(iod=dict(), tags=dict(), types=dict(), defines=dict()) - fiff_version = None - for name in ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt']: - with open(tmp_path / name, "rb") as fid: - for line in fid: - line = line.decode('latin1').strip() - if line.startswith('# Packing revision'): - assert fiff_version is None - fiff_version = line.split()[-1] - if (line.startswith('#') or line.startswith('alias') or - len(line) == 0): - continue - line = line.split('"') - assert len(line) in (1, 2, 3) - desc = '' if len(line) == 1 else line[1] - line = line[0].split() - assert len(line) in (2, 3) - if len(line) == 2: - kind, id_ = line - else: - kind, id_, tagged = line - assert tagged in ('tagged',) - id_ = int(id_) - if id_ not in iod_dups: - assert id_ not in fif['iod'] - fif['iod'][id_] = [kind, desc] - # Tags (MEGIN) - with open(tmp_path / "DictionaryTags.txt", "rb") as fid: - for line in fid: - line = line.decode('ISO-8859-1').strip() - if (line.startswith('#') or line.startswith('alias') or - line.startswith(':') or len(line) == 0): - continue - line = line.split('"') - assert len(line) in (1, 2, 3), line - desc = '' if len(line) == 1 else line[1] - line = line[0].split() - assert len(line) == 4, line - kind, id_, dtype, unit = line - id_ = int(id_) - val = [kind, dtype, unit] - assert id_ not in fif['tags'], (fif['tags'].get(id_), val) - fif['tags'][id_] = val - # Tags (MNE) - with open(tmp_path / "DictionaryTags_MNE.txt", "rb") as fid: - for li, line in enumerate(fid): - line = line.decode('ISO-8859-1').strip() - # ignore continuation lines (*) - if (line.startswith('#') or line.startswith('alias') or - line.startswith(':') or line.startswith('*') or - len(line) == 0): - continue - # weird syntax around line 80: - if line in ('/*', '"'): - continue - line = line.split('"') - assert len(line) in (1, 2, 3), line - if len(line) == 3 and len(line[2]) > 0: - l2 = line[2].strip() - assert l2.startswith('/*') and l2.endswith('*/'), l2 - desc = '' if len(line) == 1 else line[1] - line = line[0].split() - assert len(line) == 3, (li + 1, line) - kind, id_, dtype = line - unit = '-' - id_ = int(id_) - val = [kind, dtype, unit] - if id_ not in tag_dups: - assert id_ not in fif['tags'], (fif['tags'].get(id_), val) - fif['tags'][id_] = val - - # Types and enums - in_ = None - re_prim = re.compile(r'^primitive\((.*)\)\s*(\S*)\s*"(.*)"$') - re_enum = re.compile(r'^enum\((\S*)\)\s*".*"$') - re_enum_entry = re.compile(r'\s*(\S*)\s*(\S*)\s*"(.*)"$') - re_defi = re.compile(r'#define\s*(\S*)\s*(\S*)\s*"(.*)"$') - used_enums = list() - for extra in ('', '_MNE'): - with open(tmp_path / f"DictionaryTypes{extra}.txt", "rb") as fid: - for li, line in enumerate(fid): - line = line.decode('ISO-8859-1').strip() - if in_ is None: - p = re_prim.match(line) - e = re_enum.match(line) - d = re_defi.match(line) - if p is not None: - t, s, d = p.groups() - s = int(s) - assert s not in fif['types'] - fif['types'][s] = [t, d] - elif e is not None: - # entering an enum - this_enum = e.group(1) - if this_enum not in fif: - used_enums.append(this_enum) - fif[this_enum] = dict() - con[this_enum] = dict() - in_ = fif[this_enum] - elif d is not None: - t, s, d = d.groups() - s = int(s) - fif['defines'][t] = [s, d] - else: - assert not line.startswith('enum(') - else: # in an enum - if line == '{': - continue - elif line == '}': - in_ = None - continue - t, s, d = re_enum_entry.match(line).groups() - s = int(s) - if t != 'ecg' and s != 3: # ecg defined the same way - assert s not in in_ - in_[s] = [t, d] - - # - # Assertions - # - - # Version - mne_version = '%d.%d' % (FIFF.FIFFC_MAJOR_VERSION, - FIFF.FIFFC_MINOR_VERSION) - assert fiff_version == mne_version - unknowns = list() - - # Assert that all our constants are in the FIF def - assert 'FIFFV_SSS_JOB_NOTHING' in dir(FIFF) - for name in sorted(dir(FIFF)): - if name.startswith('_') or name in _dir_ignore_names: - continue - check = None - val = getattr(FIFF, name) - if name in fif['defines']: - assert fif['defines'][name][0] == val - elif name.startswith('FIFFC_'): - # Checked above - assert name in ('FIFFC_MAJOR_VERSION', 'FIFFC_MINOR_VERSION', - 'FIFFC_VERSION') - elif name.startswith('FIFFB_'): - check = 'iod' - elif name.startswith('FIFFT_'): - check = 'types' - elif name.startswith('FIFFV_'): - if name.startswith('FIFFV_MNE_') and name.endswith('_ORI'): - check = 'mne_ori' - elif name.startswith('FIFFV_MNE_') and name.endswith('_COV'): - check = 'covariance_type' - elif name.startswith('FIFFV_MNE_COORD'): - check = 'coord' # weird wrapper - elif name.endswith('_CH') or '_QUAT_' in name or name in \ - ('FIFFV_DIPOLE_WAVE', 'FIFFV_GOODNESS_FIT', - 'FIFFV_HPI_ERR', 'FIFFV_HPI_G', 'FIFFV_HPI_MOV'): - check = 'ch_type' - elif name.startswith('FIFFV_SUBJ_'): - check = name.split('_')[2].lower() - elif name in ('FIFFV_POINT_LPA', 'FIFFV_POINT_NASION', - 'FIFFV_POINT_RPA', 'FIFFV_POINT_INION'): - check = 'cardinal_point' - else: - for check in used_enums: - if name.startswith('FIFFV_' + check.upper()): - break - else: - if name not in _tag_ignore_names: - raise RuntimeError('Could not find %s' % (name,)) - assert check in used_enums, name - if 'SSS' in check: - raise RuntimeError - elif name.startswith('FIFF_UNIT'): # units and multipliers - check = name.split('_')[1].lower() - elif name.startswith('FIFF_'): - check = 'tags' - else: - unknowns.append((name, val)) - if check is not None and name not in _tag_ignore_names: - assert val in fif[check], '%s: %s, %s' % (check, val, name) - if val in con[check]: - msg = "%s='%s' ?" % (name, con[check][val]) - assert _aliases.get(name) == con[check][val], msg - else: - con[check][val] = name - unknowns = '\n\t'.join('%s (%s)' % u for u in unknowns) - assert len(unknowns) == 0, 'Unknown types\n\t%s' % unknowns - - # Assert that all the FIF defs are in our constants - assert set(fif.keys()) == set(con.keys()) - for key in sorted(set(fif.keys()) - {'defines'}): - this_fif, this_con = fif[key], con[key] - assert len(set(this_fif.keys())) == len(this_fif) - assert len(set(this_con.keys())) == len(this_con) - missing_from_con = sorted(set(this_con.keys()) - set(this_fif.keys())) - assert missing_from_con == [], key - if key not in _ignore_incomplete_enums: - missing_from_fif = sorted(set(this_fif.keys()) - - set(this_con.keys())) - assert missing_from_fif == [], key - - # Assert that `coil_def.dat` has accurate descriptions of all enum(coil) - coil_def = _read_coil_defs() - coil_desc = np.array([c['desc'] for c in coil_def]) - coil_def = np.array([(c['coil_type'], c['accuracy']) - for c in coil_def], int) - mask = (coil_def[:, 1] == FWD.COIL_ACCURACY_ACCURATE) - coil_def = coil_def[mask, 0] - coil_desc = coil_desc[mask] - bad_list = [] - for key in fif['coil']: - if key not in _missing_coil_def and key not in coil_def: - bad_list.append((' %s,' % key).ljust(10) + - ' # ' + fif['coil'][key][1]) - assert len(bad_list) == 0, \ - '\nIn fiff-constants, missing from coil_def:\n' + '\n'.join(bad_list) - # Assert that enum(coil) has all `coil_def.dat` entries - for key, desc in zip(coil_def, coil_desc): - if key not in fif['coil']: - bad_list.append((' %s,' % key).ljust(10) + ' # ' + desc) - assert len(bad_list) == 0, \ - 'In coil_def, missing from fiff-constants:\n' + '\n'.join(bad_list) - - -@pytest.mark.parametrize('dict_, match, extras', [ - ({**_dig_kind_named, **_dig_cardinal_named}, 'FIFFV_POINT_', ()), - (_ch_kind_named, '^FIFFV_.*_CH$', - (FIFF.FIFFV_DIPOLE_WAVE, FIFF.FIFFV_GOODNESS_FIT)), - (_coord_frame_named, 'FIFFV_COORD_', ()), - (_ch_unit_named, 'FIFF_UNIT_', ()), - (_ch_unit_mul_named, 'FIFF_UNITM_', ()), - (_ch_coil_type_named, 'FIFFV_COIL_', ()), -]) -def test_dict_completion(dict_, match, extras): - """Test readable dict completions.""" - regex = re.compile(match) - got = set(FIFF[key] for key in FIFF if regex.search(key) is not None) - for e in extras: - got.add(e) - want = set(dict_) - assert got == want, match diff --git a/mne/io/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py deleted file mode 100644 index b4fc87783bb..00000000000 --- a/mne/io/tests/test_meas_info.py +++ /dev/null @@ -1,1084 +0,0 @@ -# -*- coding: utf-8 -*- -# # Authors: MNE Developers -# Stefan Appelhoff -# -# License: BSD-3-Clause - -import hashlib -import pickle -from datetime import datetime, timedelta, timezone, date -from pathlib import Path - -import pytest -import numpy as np -from numpy.testing import assert_array_equal, assert_allclose -from scipy import sparse -import string - -from mne import (Epochs, read_events, pick_info, pick_types, Annotations, - read_evokeds, make_forward_solution, make_sphere_model, - setup_volume_source_space, write_forward_solution, - read_forward_solution, write_cov, read_cov, read_epochs, - compute_covariance) -from mne.channels import (read_polhemus_fastscan, make_standard_montage, - equalize_channels) -from mne.event import make_fixed_length_events -from mne.datasets import testing -from mne.io import (read_fiducials, write_fiducials, _coil_trans_to_loc, - _loc_to_coil_trans, read_raw_fif, read_info, write_info, - meas_info, Projection, BaseRaw, read_raw_ctf, RawArray) -from mne.io.constants import FIFF -from mne.io.write import _generate_meas_id, DATE_NONE -from mne.io.meas_info import (Info, create_info, _merge_info, - _force_update_info, RAW_INFO_FIELDS, - _bad_chans_comp, _get_valid_units, - anonymize_info, _stamp_to_dt, _dt_to_stamp, - _add_timedelta_to_stamp, _read_extended_ch_info) -from mne.minimum_norm import (make_inverse_operator, write_inverse_operator, - read_inverse_operator, apply_inverse) -from mne.io._digitization import _write_dig_points, _make_dig_points, DigPoint -from mne.transforms import Transform -from mne.utils import catch_logging, assert_object_equal, _record_warnings - -fiducials_fname = ( - Path(__file__).parent.parent.parent - / "data" - / "fsaverage" - / "fsaverage-fiducials.fif" -) -base_dir = Path(__file__).parent / "data" -raw_fname = base_dir / "test_raw.fif" -chpi_fname = base_dir / "test_chpi_raw_sss.fif" -event_name = base_dir / "test-eve.fif" - -kit_data_dir = Path(__file__).parent.parent / "kit" / "tests" / "data" -hsp_fname = kit_data_dir / "test_hsp.txt" -elp_fname = kit_data_dir / "test_elp.txt" - -data_path = testing.data_path(download=False) -sss_path = data_path / "SSS" -sss_ctc_fname = sss_path / "test_move_anon_crossTalk_raw_sss.fif" -ctf_fname = data_path / "CTF" / "testdata_ctf.ds" -raw_invalid_bday_fname = data_path / "misc" / "sample_invalid_birthday_raw.fif" - - -@pytest.mark.parametrize('kwargs, want', [ - (dict(meg=False, eeg=True), [0]), - (dict(meg=False, fnirs=True), [5]), - (dict(meg=False, fnirs='hbo'), [5]), - (dict(meg=False, fnirs='hbr'), []), - (dict(meg=False, misc=True), [1]), - (dict(meg=True), [2, 3, 4]), - (dict(meg='grad'), [2, 3]), - (dict(meg='planar1'), [2]), - (dict(meg='planar2'), [3]), - (dict(meg='mag'), [4]), -]) -def test_create_info_grad(kwargs, want): - """Test create_info behavior with grad coils.""" - info = create_info(6, 256, ["eeg", "misc", "grad", "grad", "mag", "hbo"]) - # Put these in an order such that grads get named "2" and "3", since - # they get picked based first on coil_type then ch_name... - assert [ch['ch_name'] for ch in info['chs'] - if ch['coil_type'] == FIFF.FIFFV_COIL_VV_PLANAR_T1] == ['2', '3'] - picks = pick_types(info, **kwargs) - assert_array_equal(picks, want) - - -def test_get_valid_units(): - """Test the valid units.""" - valid_units = _get_valid_units() - assert isinstance(valid_units, tuple) - assert all(isinstance(unit, str) for unit in valid_units) - assert "n/a" in valid_units - - -def test_coil_trans(): - """Test loc<->coil_trans functions.""" - rng = np.random.RandomState(0) - x = rng.randn(4, 4) - x[3] = [0, 0, 0, 1] - assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x) - x = rng.randn(12) - assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x) - - -def test_make_info(): - """Test some create_info properties.""" - n_ch = np.longlong(1) - info = create_info(n_ch, 1000., 'eeg') - assert set(info.keys()) == set(RAW_INFO_FIELDS) - - coil_types = {ch['coil_type'] for ch in info['chs']} - assert FIFF.FIFFV_COIL_EEG in coil_types - - pytest.raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000) - pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000) - pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000, - ch_types=['eeg', 'eeg']) - pytest.raises(TypeError, create_info, ch_names=[np.array([1])], - sfreq=1000) - pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000, - ch_types=np.array([1])) - pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000, - ch_types='awesome') - pytest.raises(TypeError, create_info, ['Test Ch'], sfreq=1000, - montage=np.array([1])) - m = make_standard_montage('biosemi32') - info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg') - info.set_montage(m) - ch_pos = [ch['loc'][:3] for ch in info['chs']] - ch_pos_mon = m._get_ch_pos() - ch_pos_mon = np.array( - [ch_pos_mon[ch_name] for ch_name in info['ch_names']]) - # transform to head - ch_pos_mon += (0., 0., 0.04014) - assert_allclose(ch_pos, ch_pos_mon, atol=1e-5) - - -def test_duplicate_name_correction(): - """Test duplicate channel names with running number.""" - # When running number is possible - info = create_info(['A', 'A', 'A'], 1000., verbose='error') - assert info['ch_names'] == ['A-0', 'A-1', 'A-2'] - - # When running number is not possible but alpha numeric is - info = create_info(['A', 'A', 'A-0'], 1000., verbose='error') - assert info['ch_names'] == ['A-a', 'A-1', 'A-0'] - - # When a single addition is not sufficient - with pytest.raises(ValueError, match='Adding a single alphanumeric'): - ch_n = ['A', 'A'] - # add all options for first duplicate channel (0) - ch_n.extend([f'{ch_n[0]}-{c}' for c in string.ascii_lowercase + '0']) - create_info(ch_n, 1000., verbose='error') - - -def test_fiducials_io(tmp_path): - """Test fiducials i/o.""" - pts, coord_frame = read_fiducials(fiducials_fname) - assert pts[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI - assert pts[0]['ident'] == FIFF.FIFFV_POINT_CARDINAL - - temp_fname = tmp_path / 'test.fif' - write_fiducials(temp_fname, pts, coord_frame) - pts_1, coord_frame_1 = read_fiducials(temp_fname) - assert coord_frame == coord_frame_1 - for pt, pt_1 in zip(pts, pts_1): - assert pt['kind'] == pt_1['kind'] - assert pt['ident'] == pt_1['ident'] - assert pt['coord_frame'] == pt_1['coord_frame'] - assert_array_equal(pt['r'], pt_1['r']) - assert isinstance(pt, DigPoint) - assert isinstance(pt_1, DigPoint) - - # test safeguards - pts[0]['coord_frame'] += 1 - with pytest.raises(ValueError, match='coord_frame entries that are incom'): - write_fiducials(temp_fname, pts, coord_frame, overwrite=True) - - -def test_info(): - """Test info object.""" - raw = read_raw_fif(raw_fname) - event_id, tmin, tmax = 1, -0.2, 0.5 - events = read_events(event_name) - event_id = int(events[0, 2]) - epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None) - - evoked = epochs.average() - - # Test subclassing was successful. - info = Info(a=7, b='aaaaa') - assert ('a' in info) - assert ('b' in info) - - # Test info attribute in API objects - for obj in [raw, epochs, evoked]: - assert (isinstance(obj.info, Info)) - rep = repr(obj.info) - assert '2002-12-03 19:01:10 UTC' in rep, rep - assert '146 items (3 Cardinal, 4 HPI, 61 EEG, 78 Extra)' in rep - dig_rep = repr(obj.info['dig'][0]) - assert 'LPA' in dig_rep, dig_rep - assert '(-71.4, 0.0, 0.0) mm' in dig_rep, dig_rep - assert 'head frame' in dig_rep, dig_rep - # Test our BunchConstNamed support - for func in (str, repr): - assert '4 (FIFFV_COORD_HEAD)' == \ - func(obj.info['dig'][0]['coord_frame']) - - # Test read-only fields - info = raw.info.copy() - nchan = len(info['chs']) - ch_names = [ch['ch_name'] for ch in info['chs']] - assert info['nchan'] == nchan - assert list(info['ch_names']) == ch_names - - # Deleting of regular fields should work - info['experimenter'] = 'bar' - del info['experimenter'] - - # Test updating of fields - del info['chs'][-1] - info._update_redundant() - assert info['nchan'] == nchan - 1 - assert list(info['ch_names']) == ch_names[:-1] - - info['chs'][0]['ch_name'] = 'foo' - info._update_redundant() - assert info['ch_names'][0] == 'foo' - - # Test casting to and from a dict - info_dict = dict(info) - info2 = Info(info_dict) - assert info == info2 - - -def test_read_write_info(tmp_path): - """Test IO of info.""" - info = read_info(raw_fname) - temp_file = tmp_path / 'info.fif' - # check for bug `#1198` - info['dev_head_t']['trans'] = np.eye(4) - t1 = info['dev_head_t']['trans'] - write_info(temp_file, info) - info2 = read_info(temp_file) - t2 = info2['dev_head_t']['trans'] - assert (len(info['chs']) == len(info2['chs'])) - assert_array_equal(t1, t2) - # proc_history (e.g., GH#1875) - creator = u'é' - info = read_info(chpi_fname) - info['proc_history'][0]['creator'] = creator - info['hpi_meas'][0]['creator'] = creator - info['subject_info']['his_id'] = creator - info['subject_info']['weight'] = 11.1 - info['subject_info']['height'] = 2.3 - - with info._unlock(): - if info['gantry_angle'] is None: # future testing data may include it - info['gantry_angle'] = 0. # Elekta supine position - gantry_angle = info['gantry_angle'] - - meas_id = info['meas_id'] - write_info(temp_file, info) - info = read_info(temp_file) - assert info['proc_history'][0]['creator'] == creator - assert info['hpi_meas'][0]['creator'] == creator - assert info['subject_info']['his_id'] == creator - assert info['gantry_angle'] == gantry_angle - assert info['subject_info']['height'] == 2.3 - assert info['subject_info']['weight'] == 11.1 - for key in ['secs', 'usecs', 'version']: - assert info['meas_id'][key] == meas_id[key] - assert_array_equal(info['meas_id']['machid'], meas_id['machid']) - - # Test that writing twice produces the same file - m1 = hashlib.md5() - with open(temp_file, 'rb') as fid: - m1.update(fid.read()) - m1 = m1.hexdigest() - temp_file_2 = tmp_path / 'info2.fif' - assert temp_file_2 != temp_file - write_info(temp_file_2, info) - m2 = hashlib.md5() - with open(str(temp_file_2), 'rb') as fid: - m2.update(fid.read()) - m2 = m2.hexdigest() - assert m1 == m2 - - info = read_info(raw_fname) - with info._unlock(): - info['meas_date'] = None - anonymize_info(info, verbose='error') - assert info['meas_date'] is None - tmp_fname_3 = tmp_path / 'info3.fif' - write_info(tmp_fname_3, info) - assert info['meas_date'] is None - info2 = read_info(tmp_fname_3) - assert info2['meas_date'] is None - - # Check that having a very old date in fine until you try to save it to fif - with info._unlock(check_after=True): - info['meas_date'] = datetime(1800, 1, 1, 0, 0, 0, tzinfo=timezone.utc) - fname = tmp_path / 'test.fif' - with pytest.raises(RuntimeError, match='must be between '): - write_info(fname, info) - - -def test_io_dig_points(tmp_path): - """Test Writing for dig files.""" - points = read_polhemus_fastscan(hsp_fname, on_header_missing='ignore') - - dest = tmp_path / 'test.txt' - dest_bad = tmp_path / 'test.mne' - with pytest.raises(ValueError, match='must be of shape'): - _write_dig_points(dest, points[:, :2]) - with pytest.raises(ValueError, match='extension'): - _write_dig_points(dest_bad, points) - _write_dig_points(dest, points) - points1 = read_polhemus_fastscan( - dest, unit='m', on_header_missing='ignore') - err = "Dig points diverged after writing and reading." - assert_array_equal(points, points1, err) - - points2 = np.array([[-106.93, 99.80], [99.80, 68.81]]) - np.savetxt(dest, points2, delimiter='\t', newline='\n') - with pytest.raises(ValueError, match='must be of shape'): - with pytest.warns(RuntimeWarning, match='FastSCAN header'): - read_polhemus_fastscan(dest, on_header_missing='warn') - - -def test_io_coord_frame(tmp_path): - """Test round trip for coordinate frame.""" - fname = tmp_path / 'test.fif' - for ch_type in ('eeg', 'seeg', 'ecog', 'dbs', 'hbo', 'hbr'): - info = create_info( - ch_names=['Test Ch'], sfreq=1000., ch_types=[ch_type]) - info['chs'][0]['loc'][:3] = [0.05, 0.01, -0.03] - write_info(fname, info) - info2 = read_info(fname) - assert info2['chs'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD - - -def test_make_dig_points(): - """Test application of Polhemus HSP to info.""" - extra_points = read_polhemus_fastscan( - hsp_fname, on_header_missing='ignore') - info = create_info(ch_names=['Test Ch'], sfreq=1000.) - assert info['dig'] is None - - with info._unlock(): - info['dig'] = _make_dig_points(extra_points=extra_points) - assert (info['dig']) - assert_allclose(info['dig'][0]['r'], [-.10693, .09980, .06881]) - - elp_points = read_polhemus_fastscan(elp_fname, on_header_missing='ignore') - nasion, lpa, rpa = elp_points[:3] - info = create_info(ch_names=['Test Ch'], sfreq=1000.) - assert info['dig'] is None - - with info._unlock(): - info['dig'] = _make_dig_points(nasion, lpa, rpa, elp_points[3:], None) - assert (info['dig']) - idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION) - assert_allclose(info['dig'][idx]['r'], [.0013930, .0131613, -.0046967]) - pytest.raises(ValueError, _make_dig_points, nasion[:2]) - pytest.raises(ValueError, _make_dig_points, None, lpa[:2]) - pytest.raises(ValueError, _make_dig_points, None, None, rpa[:2]) - pytest.raises(ValueError, _make_dig_points, None, None, None, - elp_points[:, :2]) - pytest.raises(ValueError, _make_dig_points, None, None, None, None, - elp_points[:, :2]) - - -def test_redundant(): - """Test some of the redundant properties of info.""" - # Indexing - info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) - assert info['ch_names'][0] == 'a' - assert info['ch_names'][1] == 'b' - assert info['ch_names'][2] == 'c' - - # Equality - assert info['ch_names'] == info['ch_names'] - assert info['ch_names'] == ['a', 'b', 'c'] - - # No channels in info - info = create_info(ch_names=[], sfreq=1000.) - assert info['ch_names'] == [] - - # List should be read-only - info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) - - -def test_merge_info(): - """Test merging of multiple Info objects.""" - info_a = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) - info_b = create_info(ch_names=['d', 'e', 'f'], sfreq=1000.) - info_merged = _merge_info([info_a, info_b]) - assert info_merged['nchan'], 6 - assert info_merged['ch_names'], ['a', 'b', 'c', 'd', 'e', 'f'] - pytest.raises(ValueError, _merge_info, [info_a, info_a]) - - # Testing for force updates before merging - info_c = create_info(ch_names=['g', 'h', 'i'], sfreq=500.) - # This will break because sfreq is not equal - pytest.raises(RuntimeError, _merge_info, [info_a, info_c]) - _force_update_info(info_a, info_c) - assert (info_c['sfreq'] == info_a['sfreq']) - assert (info_c['ch_names'][0] != info_a['ch_names'][0]) - # Make sure it works now - _merge_info([info_a, info_c]) - # Check that you must supply Info - pytest.raises(ValueError, _force_update_info, info_a, - dict([('sfreq', 1000.)])) - # KIT System-ID - info_a._unlocked = info_b._unlocked = True - info_a['kit_system_id'] = 50 - assert _merge_info((info_a, info_b))['kit_system_id'] == 50 - info_b['kit_system_id'] = 50 - assert _merge_info((info_a, info_b))['kit_system_id'] == 50 - info_b['kit_system_id'] = 60 - pytest.raises(ValueError, _merge_info, (info_a, info_b)) - - # hpi infos - info_d = create_info(ch_names=['d', 'e', 'f'], sfreq=1000.) - info_merged = _merge_info([info_a, info_d]) - assert not info_merged['hpi_meas'] - assert not info_merged['hpi_results'] - info_a['hpi_meas'] = [{'f1': 3, 'f2': 4}] - assert _merge_info([info_a, info_d])['hpi_meas'] == info_a['hpi_meas'] - info_d._unlocked = True - info_d['hpi_meas'] = [{'f1': 3, 'f2': 4}] - assert _merge_info([info_a, info_d])['hpi_meas'] == info_d['hpi_meas'] - # This will break because of inconsistency - info_d['hpi_meas'] = [{'f1': 3, 'f2': 5}] - pytest.raises(ValueError, _merge_info, [info_a, info_d]) - - info_0 = read_info(raw_fname) - info_0['bads'] = ['MEG 2443', 'EEG 053'] - assert len(info_0['chs']) == 376 - assert len(info_0['dig']) == 146 - info_1 = create_info(["STI YYY"], info_0['sfreq'], ['stim']) - assert info_1['bads'] == [] - info_out = _merge_info([info_0, info_1], force_update_to_first=True) - assert len(info_out['chs']) == 377 - assert len(info_out['bads']) == 2 - assert len(info_out['dig']) == 146 - assert len(info_0['chs']) == 376 - assert len(info_0['bads']) == 2 - assert len(info_0['dig']) == 146 - - -def test_check_consistency(): - """Test consistency check of Info objects.""" - info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.) - - # This should pass - info._check_consistency() - - # Info without any channels - info_empty = create_info(ch_names=[], sfreq=1000.) - info_empty._check_consistency() - - # Bad channels that are not in the info object - info2 = info.copy() - info2['bads'] = ['b', 'foo', 'bar'] - pytest.raises(RuntimeError, info2._check_consistency) - - # Bad data types - info2 = info.copy() - with info2._unlock(): - info2['sfreq'] = 'foo' - pytest.raises(ValueError, info2._check_consistency) - - info2 = info.copy() - with info2._unlock(): - info2['highpass'] = 'foo' - pytest.raises(ValueError, info2._check_consistency) - - info2 = info.copy() - with info2._unlock(): - info2['lowpass'] = 'foo' - pytest.raises(ValueError, info2._check_consistency) - - # Silent type conversion to float - info2 = info.copy() - with info2._unlock(check_after=True): - info2['sfreq'] = 1 - info2['highpass'] = 2 - info2['lowpass'] = 2 - assert (isinstance(info2['sfreq'], float)) - assert (isinstance(info2['highpass'], float)) - assert (isinstance(info2['lowpass'], float)) - - # Duplicate channel names - info2 = info.copy() - with info2._unlock(): - info2['chs'][2]['ch_name'] = 'b' - pytest.raises(RuntimeError, info2._check_consistency) - - # Duplicates appended with running numbers - with pytest.warns(RuntimeWarning, match='Channel names are not'): - info3 = create_info(ch_names=['a', 'b', 'b', 'c', 'b'], sfreq=1000.) - assert_array_equal(info3['ch_names'], ['a', 'b-0', 'b-1', 'c', 'b-2']) - - # a few bad ones - idx = 0 - ch = info['chs'][idx] - for key, bad, match in (('ch_name', 1., 'not a string'), - ('loc', np.zeros(15), '12 elements'), - ('cal', np.ones(1), 'float or int')): - info._check_consistency() # okay - old = ch[key] - ch[key] = bad - if key == 'ch_name': - info['ch_names'][idx] = bad - with pytest.raises(TypeError, match=match): - info._check_consistency() - ch[key] = old - if key == 'ch_name': - info['ch_names'][idx] = old - - # bad channel entries - info2 = info.copy() - info2['chs'][0]['foo'] = 'bar' - with pytest.raises(KeyError, match='key errantly present'): - info2._check_consistency() - info2 = info.copy() - del info2['chs'][0]['loc'] - with pytest.raises(KeyError, match='key missing'): - info2._check_consistency() - - -def _test_anonymize_info(base_info): - """Test that sensitive information can be anonymized.""" - pytest.raises(TypeError, anonymize_info, 'foo') - - default_anon_dos = datetime(2000, 1, 1, 0, 0, 0, tzinfo=timezone.utc) - default_str = "mne_anonymize" - default_subject_id = 0 - default_desc = ("Anonymized using a time shift" + - " to preserve age at acquisition") - - # Test no error for incomplete info - info = base_info.copy() - info.pop('file_id') - anonymize_info(info) - - # Fake some subject data - meas_date = datetime(2010, 1, 1, 0, 0, 0, tzinfo=timezone.utc) - with base_info._unlock(): - base_info['meas_date'] = meas_date - base_info['subject_info'] = dict(id=1, - his_id='foobar', - last_name='bar', - first_name='bar', - birthday=(1987, 4, 8), - sex=0, hand=1) - - # generate expected info... - # first expected result with no options. - # will move DOS from 2010/1/1 to 2000/1/1 which is 3653 days. - exp_info = base_info.copy() - exp_info._unlocked = True - exp_info['description'] = default_desc - exp_info['experimenter'] = default_str - exp_info['proj_name'] = default_str - exp_info['proj_id'] = np.array([0]) - exp_info['subject_info']['first_name'] = default_str - exp_info['subject_info']['last_name'] = default_str - exp_info['subject_info']['id'] = default_subject_id - exp_info['subject_info']['his_id'] = str(default_subject_id) - exp_info['subject_info']['sex'] = 0 - del exp_info['subject_info']['hand'] # there's no "unknown" setting - - # this bday is 3653 days different. the change in day is due to a - # different number of leap days between 1987 and 1977 than between - # 2010 and 2000. - exp_info['subject_info']['birthday'] = (1977, 4, 7) - exp_info['meas_date'] = default_anon_dos - exp_info._unlocked = False - - # make copies - exp_info_3 = exp_info.copy() - - # adjust each expected outcome - delta_t = timedelta(days=3653) - for key in ('file_id', 'meas_id'): - value = exp_info.get(key) - if value is not None: - assert 'msecs' not in value - tmp = _add_timedelta_to_stamp( - (value['secs'], value['usecs']), -delta_t) - value['secs'] = tmp[0] - value['usecs'] = tmp[1] - value['machid'][:] = 0 - - # exp 2 tests the keep_his option - exp_info_2 = exp_info.copy() - with exp_info_2._unlock(): - exp_info_2['subject_info']['his_id'] = 'foobar' - exp_info_2['subject_info']['sex'] = 0 - exp_info_2['subject_info']['hand'] = 1 - - # exp 3 tests is a supplied daysback - delta_t_2 = timedelta(days=43) - with exp_info_3._unlock(): - exp_info_3['subject_info']['birthday'] = (1987, 2, 24) - exp_info_3['meas_date'] = meas_date - delta_t_2 - for key in ('file_id', 'meas_id'): - value = exp_info_3.get(key) - if value is not None: - assert 'msecs' not in value - tmp = _add_timedelta_to_stamp( - (value['secs'], value['usecs']), -delta_t_2) - value['secs'] = tmp[0] - value['usecs'] = tmp[1] - value['machid'][:] = 0 - - # exp 4 tests is a supplied daysback - delta_t_3 = timedelta(days=223 + 364 * 500) - - new_info = anonymize_info(base_info.copy()) - assert_object_equal(new_info, exp_info) - - new_info = anonymize_info(base_info.copy(), keep_his=True) - assert_object_equal(new_info, exp_info_2) - - new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days) - assert_object_equal(new_info, exp_info_3) - - with pytest.raises(RuntimeError, match='anonymize_info generated'): - anonymize_info(base_info.copy(), daysback=delta_t_3.days) - # assert_object_equal(new_info, exp_info_4) - - # test with meas_date = None - with base_info._unlock(): - base_info['meas_date'] = None - exp_info_3._unlocked = True - exp_info_3['meas_date'] = None - exp_info_3['file_id']['secs'] = DATE_NONE[0] - exp_info_3['file_id']['usecs'] = DATE_NONE[1] - exp_info_3['meas_id']['secs'] = DATE_NONE[0] - exp_info_3['meas_id']['usecs'] = DATE_NONE[1] - exp_info_3['subject_info'].pop('birthday', None) - exp_info_3._unlocked = False - - if base_info['meas_date'] is None: - with pytest.warns(RuntimeWarning, match='all information'): - new_info = anonymize_info(base_info.copy(), - daysback=delta_t_2.days) - else: - new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days) - assert_object_equal(new_info, exp_info_3) - - with _record_warnings(): # meas_date is None - new_info = anonymize_info(base_info.copy()) - assert_object_equal(new_info, exp_info_3) - - -@pytest.mark.parametrize('stamp, dt', [ - [(1346981585, 835782), (2012, 9, 7, 1, 33, 5, 835782)], - # test old dates for BIDS anonymization - [(-1533443343, 24382), (1921, 5, 29, 19, 30, 57, 24382)], - # gh-7116 - [(-908196946, 988669), (1941, 3, 22, 11, 4, 14, 988669)], -]) -def test_meas_date_convert(stamp, dt): - """Test conversions of meas_date to datetime objects.""" - meas_datetime = _stamp_to_dt(stamp) - stamp2 = _dt_to_stamp(meas_datetime) - assert stamp == stamp2 - assert meas_datetime == datetime(*dt, tzinfo=timezone.utc) - # smoke test for info __repr__ - info = create_info(1, 1000., 'eeg') - with info._unlock(): - info['meas_date'] = meas_datetime - assert str(dt[0]) in repr(info) - - -def test_anonymize(tmp_path): - """Test that sensitive information can be anonymized.""" - pytest.raises(TypeError, anonymize_info, 'foo') - - # Fake some subject data - raw = read_raw_fif(raw_fname) - raw.set_annotations(Annotations(onset=[0, 1], - duration=[1, 1], - description='dummy', - orig_time=None)) - first_samp = raw.first_samp - expected_onset = np.arange(2) + raw._first_time - assert raw.first_samp == first_samp - assert_allclose(raw.annotations.onset, expected_onset) - - # test mne.anonymize_info() - events = read_events(event_name) - epochs = Epochs(raw, events[:1], 2, 0., 0.1, baseline=None) - _test_anonymize_info(raw.info.copy()) - _test_anonymize_info(epochs.info.copy()) - - # test instance methods & I/O roundtrip - for inst, keep_his in zip((raw, epochs), (True, False)): - inst = inst.copy() - - subject_info = dict(his_id='Volunteer', sex=2, hand=1) - inst.info['subject_info'] = subject_info - inst.anonymize(keep_his=keep_his) - - si = inst.info['subject_info'] - if keep_his: - assert si == subject_info - else: - assert si['his_id'] == '0' - assert si['sex'] == 0 - assert 'hand' not in si - - # write to disk & read back - inst_type = 'raw' if isinstance(inst, BaseRaw) else 'epo' - fname = 'tmp_raw.fif' if inst_type == 'raw' else 'tmp_epo.fif' - out_path = tmp_path / fname - inst.save(out_path, overwrite=True) - if inst_type == 'raw': - read_raw_fif(out_path) - else: - read_epochs(out_path) - - # test that annotations are correctly zeroed - raw.anonymize() - assert raw.first_samp == first_samp - assert_allclose(raw.annotations.onset, expected_onset) - assert raw.annotations.orig_time == raw.info['meas_date'] - stamp = _dt_to_stamp(raw.info['meas_date']) - assert raw.annotations.orig_time == _stamp_to_dt(stamp) - - with raw.info._unlock(): - raw.info['meas_date'] = None - raw.anonymize(daysback=None) - with pytest.warns(RuntimeWarning, match='None'): - raw.anonymize(daysback=123) - assert raw.annotations.orig_time is None - assert raw.first_samp == first_samp - assert_allclose(raw.annotations.onset, expected_onset) - - -def test_anonymize_with_io(tmp_path): - """Test that IO does not break anonymization.""" - raw = read_raw_fif(raw_fname) - - temp_path = tmp_path / 'tmp_raw.fif' - raw.save(temp_path) - - raw2 = read_raw_fif(temp_path) - - daysback = (raw2.info['meas_date'].date() - date(1924, 1, 1)).days - raw2.anonymize(daysback=daysback) - - -@testing.requires_testing_data -def test_csr_csc(tmp_path): - """Test CSR and CSC.""" - info = read_info(sss_ctc_fname) - info = pick_info(info, pick_types(info, meg=True, exclude=[])) - sss_ctc = info['proc_history'][0]['max_info']['sss_ctc'] - ct = sss_ctc['decoupler'].copy() - # CSC - assert isinstance(ct, sparse.csc_matrix) - fname = tmp_path / 'test.fif' - write_info(fname, info) - info_read = read_info(fname) - ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] - assert isinstance(ct_read, sparse.csc_matrix) - assert_array_equal(ct_read.toarray(), ct.toarray()) - # Now CSR - csr = ct.tocsr() - assert isinstance(csr, sparse.csr_matrix) - assert_array_equal(csr.toarray(), ct.toarray()) - info['proc_history'][0]['max_info']['sss_ctc']['decoupler'] = csr - fname = tmp_path / 'test1.fif' - write_info(fname, info) - info_read = read_info(fname) - ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] - assert isinstance(ct_read, sparse.csc_matrix) # this gets cast to CSC - assert_array_equal(ct_read.toarray(), ct.toarray()) - - -@testing.requires_testing_data -def test_check_compensation_consistency(): - """Test check picks compensation.""" - raw = read_raw_ctf(ctf_fname, preload=False) - events = make_fixed_length_events(raw, 99999) - picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=True) - pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] - for (comp, expected_result) in zip([0, 1], [False, False]): - raw.apply_gradient_compensation(comp) - ret, missing = _bad_chans_comp(raw.info, pick_ch_names) - assert ret == expected_result - assert len(missing) == 0 - Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks) - - picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=False) - pick_ch_names = [raw.info['ch_names'][idx] for idx in picks] - - for (comp, expected_result) in zip([0, 1], [False, True]): - raw.apply_gradient_compensation(comp) - ret, missing = _bad_chans_comp(raw.info, pick_ch_names) - assert ret == expected_result - assert len(missing) == 17 - with catch_logging() as log: - Epochs(raw, events, None, -0.2, 0.2, preload=False, - picks=picks, verbose=True) - assert 'Removing 5 compensators' in log.getvalue() - - -def test_field_round_trip(tmp_path): - """Test round-trip for new fields.""" - info = create_info(1, 1000., 'eeg') - with info._unlock(): - for key in ('file_id', 'meas_id'): - info[key] = _generate_meas_id() - info['device_info'] = dict( - type='a', model='b', serial='c', site='d') - info['helium_info'] = dict( - he_level_raw=1., helium_level=2., - orig_file_guid='e', meas_date=(1, 2)) - fname = tmp_path / 'temp-info.fif' - write_info(fname, info) - info_read = read_info(fname) - assert_object_equal(info, info_read) - - -def test_equalize_channels(): - """Test equalization of channels for instances of Info.""" - info1 = create_info(['CH1', 'CH2', 'CH3'], sfreq=1.) - info2 = create_info(['CH4', 'CH2', 'CH1'], sfreq=1.) - info1, info2 = equalize_channels([info1, info2]) - - assert info1.ch_names == ['CH1', 'CH2'] - assert info2.ch_names == ['CH1', 'CH2'] - - -def test_repr(): - """Test Info repr.""" - info = create_info(1, 1000, 'eeg') - assert '7 non-empty values' in repr(info) - - t = Transform('meg', 'head', np.ones((4, 4))) - info['dev_head_t'] = t - assert 'dev_head_t: MEG device -> head transform' in repr(info) - - -def test_repr_html(): - """Test Info HTML repr.""" - info = read_info(raw_fname) - assert 'Projections' in info._repr_html_() - with info._unlock(): - info['projs'] = [] - assert 'Projections' not in info._repr_html_() - info['bads'] = [] - assert 'None' in info._repr_html_() - info['bads'] = ['MEG 2443', 'EEG 053'] - assert 'MEG 2443' in info._repr_html_() - assert 'EEG 053' in info._repr_html_() - - html = info._repr_html_() - for ch in ['204 Gradiometers', '102 Magnetometers', '9 Stimulus', - '60 EEG', '1 EOG']: - assert ch in html - - -@testing.requires_testing_data -def test_invalid_subject_birthday(): - """Test handling of an invalid birthday in the raw file.""" - with pytest.warns(RuntimeWarning, match='No birthday will be set'): - raw = read_raw_fif(raw_invalid_bday_fname) - assert 'birthday' not in raw.info['subject_info'] - - -@pytest.mark.parametrize('fname', [ - pytest.param(ctf_fname, marks=testing._pytest_mark()), - raw_fname, -]) -def test_channel_name_limit(tmp_path, monkeypatch, fname): - """Test that our remapping works properly.""" - # - # raw - # - if fname.suffix == ".fif": - raw = read_raw_fif(fname) - raw.pick_channels(raw.ch_names[:3]) - ref_names = [] - data_names = raw.ch_names - else: - assert fname.suffix == ".ds" - raw = read_raw_ctf(fname) - ref_names = [raw.ch_names[pick] - for pick in pick_types(raw.info, meg=False, ref_meg=True)] - data_names = raw.ch_names[32:35] - proj = dict(data=np.ones((1, len(data_names))), - col_names=data_names[:2].copy(), row_names=None, nrow=1) - proj = Projection( - data=proj, active=False, desc='test', kind=0, explained_var=0.) - raw.add_proj(proj, remove_existing=True) - raw.info.normalize_proj() - raw.pick_channels(data_names + ref_names).crop(0, 2) - long_names = ['123456789abcdefg' + name for name in raw.ch_names] - fname = tmp_path / 'test-raw.fif' - with catch_logging() as log: - raw.save(fname) - log = log.getvalue() - assert 'truncated' not in log - rename = dict(zip(raw.ch_names, long_names)) - long_data_names = [rename[name] for name in data_names] - long_proj_names = long_data_names[:2] - raw.rename_channels(rename) - for comp in raw.info['comps']: - for key in ('row_names', 'col_names'): - for name in comp['data'][key]: - assert name in raw.ch_names - if raw.info['comps']: - assert raw.compensation_grade == 0 - raw.apply_gradient_compensation(3) - assert raw.compensation_grade == 3 - assert len(raw.info['projs']) == 1 - assert raw.info['projs'][0]['data']['col_names'] == long_proj_names - raw.info['bads'] = bads = long_data_names[2:3] - good_long_data_names = [ - name for name in long_data_names if name not in bads] - with catch_logging() as log: - raw.save(fname, overwrite=True, verbose=True) - log = log.getvalue() - assert 'truncated to 15' in log - for name in raw.ch_names: - assert len(name) > 15 - # first read the full waytmp_path - with catch_logging() as log: - raw_read = read_raw_fif(fname, verbose=True) - log = log.getvalue() - assert 'Reading extended channel information' in log - for ra in (raw, raw_read): - assert ra.ch_names == long_names - assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names - del raw_read - # next read as if no longer names could be read - monkeypatch.setattr( - meas_info, '_read_extended_ch_info', lambda x, y, z: None) - with catch_logging() as log: - raw_read = read_raw_fif(fname, verbose=True) - log = log.getvalue() - assert 'extended' not in log - if raw.info['comps']: - assert raw_read.compensation_grade == 3 - raw_read.apply_gradient_compensation(0) - assert raw_read.compensation_grade == 0 - monkeypatch.setattr( # restore - meas_info, '_read_extended_ch_info', _read_extended_ch_info) - short_proj_names = [ - f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}' - for ni, name in enumerate(long_data_names[:2])] - assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names - # - # epochs - # - epochs = Epochs(raw, make_fixed_length_events(raw)) - fname = tmp_path / 'test-epo.fif' - epochs.save(fname) - epochs_read = read_epochs(fname) - for ep in (epochs, epochs_read): - assert ep.info['ch_names'] == long_names - assert ep.ch_names == long_names - del raw, epochs_read - # cov - epochs.info['bads'] = [] - cov = compute_covariance(epochs, verbose='error') - fname = tmp_path / 'test-cov.fif' - write_cov(fname, cov) - cov_read = read_cov(fname) - for co in (cov, cov_read): - assert co['names'] == long_data_names - assert co['bads'] == [] - del cov_read - - # - # evoked - # - evoked = epochs.average() - evoked.info['bads'] = bads - assert evoked.nave == 1 - fname = tmp_path / 'test-ave.fif' - evoked.save(fname) - evoked_read = read_evokeds(fname)[0] - for ev in (evoked, evoked_read): - assert ev.ch_names == long_names - assert ev.info['bads'] == bads - del evoked_read, epochs - - # - # forward - # - with _record_warnings(): # not enough points for CTF - sphere = make_sphere_model('auto', 'auto', evoked.info) - src = setup_volume_source_space( - pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) - fwd = make_forward_solution(evoked.info, None, src, sphere) - fname = tmp_path / 'temp-fwd.fif' - write_forward_solution(fname, fwd) - fwd_read = read_forward_solution(fname) - for fw in (fwd, fwd_read): - assert fw['sol']['row_names'] == long_data_names - assert fw['info']['ch_names'] == long_data_names - assert fw['info']['bads'] == bads - del fwd_read - - # - # inv - # - inv = make_inverse_operator(evoked.info, fwd, cov) - fname = tmp_path / 'test-inv.fif' - write_inverse_operator(fname, inv) - inv_read = read_inverse_operator(fname) - for iv in (inv, inv_read): - assert iv['info']['ch_names'] == good_long_data_names - apply_inverse(evoked, inv) # smoke test - - -@pytest.mark.parametrize('fname_info', (raw_fname, 'create_info')) -@pytest.mark.parametrize('unlocked', (True, False)) -def test_pickle(fname_info, unlocked): - """Test that Info can be (un)pickled.""" - if fname_info == 'create_info': - info = create_info(3, 1000., 'eeg') - else: - info = read_info(fname_info) - assert not info._unlocked - info._unlocked = unlocked - data = pickle.dumps(info) - info_un = pickle.loads(data) - assert isinstance(info_un, Info) - assert_object_equal(info, info_un) - assert info_un._unlocked == unlocked - - -def test_info_bad(): - """Test our info sanity checkers.""" - info = create_info(2, 1000., 'eeg') - info['description'] = 'foo' - info['experimenter'] = 'bar' - info['line_freq'] = 50. - info['bads'] = info['ch_names'][:1] - info['temp'] = ('whatever', 1.) - # After 0.24 these should be pytest.raises calls - check, klass = pytest.raises, RuntimeError - with check(klass, match=r"info\['temp'\]"): - info['bad_key'] = 1. - for (key, match) in ([ - ('sfreq', r'inst\.resample'), - ('chs', r'inst\.add_channels')]): - with check(klass, match=match): - info[key] = info[key] - with pytest.raises(ValueError, match='between meg<->head'): - info['dev_head_t'] = Transform('mri', 'head', np.eye(4)) - - -def test_get_montage(): - """Test ContainsMixin.get_montage().""" - ch_names = make_standard_montage('standard_1020').ch_names - sfreq = 512 - data = np.zeros((len(ch_names), sfreq * 2)) - raw = RawArray(data, create_info(ch_names, sfreq, 'eeg')) - raw.set_montage('standard_1020') - - assert len(raw.get_montage().ch_names) == len(ch_names) - raw.info['bads'] = [ch_names[0]] - assert len(raw.get_montage().ch_names) == len(ch_names) - - # test info - raw = RawArray(data, create_info(ch_names, sfreq, 'eeg')) - raw.set_montage('standard_1020') - - assert len(raw.info.get_montage().ch_names) == len(ch_names) - raw.info['bads'] = [ch_names[0]] - assert len(raw.info.get_montage().ch_names) == len(ch_names) diff --git a/mne/io/tests/test_pick.py b/mne/io/tests/test_pick.py deleted file mode 100644 index 5ae95424b19..00000000000 --- a/mne/io/tests/test_pick.py +++ /dev/null @@ -1,636 +0,0 @@ -from copy import deepcopy -from pathlib import Path - -import pytest -import numpy as np -from numpy.testing import assert_array_equal, assert_equal - -from mne import (pick_channels_regexp, pick_types, Epochs, - read_forward_solution, rename_channels, - pick_info, pick_channels, create_info, make_ad_hoc_cov) -from mne.io import (read_raw_fif, RawArray, read_raw_bti, read_raw_kit, - read_info) -from mne.channels import make_standard_montage -from mne.preprocessing import compute_current_source_density -from mne.io.pick import (channel_indices_by_type, channel_type, - pick_types_forward, _picks_by_type, _picks_to_idx, - _contains_ch_type, pick_channels_cov, - _get_channel_types, get_channel_type_constants, - _DATA_CH_TYPES_SPLIT) -from mne.io.constants import FIFF -from mne.datasets import testing -from mne.utils import catch_logging, assert_object_equal - -data_path = testing.data_path(download=False) -fname_meeg = ( - data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif" -) -fname_mc = data_path / "SSS" / "test_move_anon_movecomp_raw_sss.fif" - -io_dir = Path(__file__).parent.parent -ctf_fname = io_dir / "tests" / "data" / "test_ctf_raw.fif" -fif_fname = io_dir / "tests" / "data" / "test_raw.fif" - - -def _picks_by_type_old(info, meg_combined=False, ref_meg=False, - exclude='bads'): - """Use the old, slower _picks_by_type code.""" - picks_list = [] - has = [_contains_ch_type(info, k) for k in _DATA_CH_TYPES_SPLIT] - has = dict(zip(_DATA_CH_TYPES_SPLIT, has)) - if has['mag'] and (meg_combined is not True or not has['grad']): - picks_list.append( - ('mag', pick_types(info, meg='mag', eeg=False, stim=False, - ref_meg=ref_meg, exclude=exclude)) - ) - if has['grad'] and (meg_combined is not True or not has['mag']): - picks_list.append( - ('grad', pick_types(info, meg='grad', eeg=False, stim=False, - ref_meg=ref_meg, exclude=exclude)) - ) - if has['mag'] and has['grad'] and meg_combined is True: - picks_list.append( - ('meg', pick_types(info, meg=True, eeg=False, stim=False, - ref_meg=ref_meg, exclude=exclude)) - ) - for ch_type in _DATA_CH_TYPES_SPLIT: - if ch_type in ['grad', 'mag']: # exclude just MEG channels - continue - if has[ch_type]: - picks_list.append( - (ch_type, pick_types(info, meg=False, stim=False, - ref_meg=ref_meg, exclude=exclude, - **{ch_type: True})) - ) - return picks_list - - -def _channel_type_old(info, idx): - """Get channel type using old, slower scheme.""" - ch = info['chs'][idx] - - # iterate through all defined channel types until we find a match with ch - # go in order from most specific (most rules entries) to least specific - channel_types = sorted(get_channel_type_constants().items(), - key=lambda x: len(x[1]), reverse=True) - for t, rules in channel_types: - for key, vals in rules.items(): # all keys must match the values - if ch.get(key, None) not in np.array(vals): - break # not channel type t, go to next iteration - else: - return t - - raise ValueError(f'Unknown channel type for {ch["ch_name"]}') - - -def _assert_channel_types(info): - for k in range(info['nchan']): - a, b = channel_type(info, k), _channel_type_old(info, k) - assert a == b - - -def test_pick_refs(): - """Test picking of reference sensors.""" - infos = list() - # KIT - kit_dir = io_dir / "kit" / "tests" / "data" - sqd_path = kit_dir / "test.sqd" - mrk_path = kit_dir / "test_mrk.sqd" - elp_path = kit_dir / "test_elp.txt" - hsp_path = kit_dir / "test_hsp.txt" - raw_kit = read_raw_kit( - sqd_path, str(mrk_path), str(elp_path), str(hsp_path) - ) - infos.append(raw_kit.info) - # BTi - bti_dir = io_dir / "bti" / "tests" / "data" - bti_pdf = bti_dir / "test_pdf_linux" - bti_config = bti_dir / "test_config_linux" - bti_hs = bti_dir / "test_hs_linux" - raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False) - infos.append(raw_bti.info) - # CTF - fname_ctf_raw = io_dir / "tests" / "data" / "test_ctf_comp_raw.fif" - raw_ctf = read_raw_fif(fname_ctf_raw) - raw_ctf.apply_gradient_compensation(2) - for info in infos: - info['bads'] = [] - _assert_channel_types(info) - with pytest.raises(ValueError, match="'planar2'] or bool, not foo"): - pick_types(info, meg='foo') - with pytest.raises(ValueError, match="'planar2', 'auto'] or bool,"): - pick_types(info, ref_meg='foo') - picks_meg_ref = pick_types(info, meg=True, ref_meg=True) - picks_meg = pick_types(info, meg=True, ref_meg=False) - picks_ref = pick_types(info, meg=False, ref_meg=True) - assert_array_equal(picks_meg_ref, - np.sort(np.concatenate([picks_meg, picks_ref]))) - picks_grad = pick_types(info, meg='grad', ref_meg=False) - picks_ref_grad = pick_types(info, meg=False, ref_meg='grad') - picks_meg_ref_grad = pick_types(info, meg='grad', ref_meg='grad') - assert_array_equal(picks_meg_ref_grad, - np.sort(np.concatenate([picks_grad, - picks_ref_grad]))) - picks_mag = pick_types(info, meg='mag', ref_meg=False) - picks_ref_mag = pick_types(info, meg=False, ref_meg='mag') - picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag') - assert_array_equal(picks_meg_ref_mag, - np.sort(np.concatenate([picks_mag, - picks_ref_mag]))) - assert_array_equal(picks_meg, - np.sort(np.concatenate([picks_mag, picks_grad]))) - assert_array_equal(picks_ref, - np.sort(np.concatenate([picks_ref_mag, - picks_ref_grad]))) - assert_array_equal(picks_meg_ref, np.sort(np.concatenate( - [picks_grad, picks_mag, picks_ref_grad, picks_ref_mag]))) - - for pick in (picks_meg_ref, picks_meg, picks_ref, - picks_grad, picks_ref_grad, picks_meg_ref_grad, - picks_mag, picks_ref_mag, picks_meg_ref_mag): - if len(pick) > 0: - pick_info(info, pick) - - # test CTF expected failures directly - info = raw_ctf.info - info['bads'] = [] - picks_meg_ref = pick_types(info, meg=True, ref_meg=True) - picks_meg = pick_types(info, meg=True, ref_meg=False) - picks_ref = pick_types(info, meg=False, ref_meg=True) - picks_mag = pick_types(info, meg='mag', ref_meg=False) - picks_ref_mag = pick_types(info, meg=False, ref_meg='mag') - picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag') - for pick in (picks_meg_ref, picks_ref, picks_ref_mag, picks_meg_ref_mag): - if len(pick) > 0: - pick_info(info, pick) - - for pick in (picks_meg, picks_mag): - if len(pick) > 0: - with catch_logging() as log: - pick_info(info, pick, verbose=True) - assert ('Removing {} compensators'.format(len(info['comps'])) - in log.getvalue()) - picks_ref_grad = pick_types(info, meg=False, ref_meg='grad') - assert set(picks_ref_mag) == set(picks_ref) - assert len(picks_ref_grad) == 0 - all_meg = np.arange(3, 306) - assert_array_equal(np.concatenate([picks_ref, picks_meg]), all_meg) - assert_array_equal(picks_meg_ref_mag, all_meg) - - -def test_pick_channels_regexp(): - """Test pick with regular expression.""" - ch_names = ['MEG 2331', 'MEG 2332', 'MEG 2333'] - assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...1'), [0]) - assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...[2-3]'), [1, 2]) - assert_array_equal(pick_channels_regexp(ch_names, 'MEG *'), [0, 1, 2]) - - -def assert_indexing(info, picks_by_type, ref_meg=False, all_data=True): - """Assert our indexing functions work properly.""" - # First that our old and new channel typing functions are equivalent - _assert_channel_types(info) - # Next that channel_indices_by_type works - if not ref_meg: - idx = channel_indices_by_type(info) - for key in idx: - for p in picks_by_type: - if key == p[0]: - assert_array_equal(idx[key], p[1]) - break - else: - assert len(idx[key]) == 0 - # Finally, picks_by_type (if relevant) - if not all_data: - picks_by_type = [p for p in picks_by_type - if p[0] in _DATA_CH_TYPES_SPLIT] - picks_by_type = [(p[0], np.array(p[1], int)) for p in picks_by_type] - actual = _picks_by_type(info, ref_meg=ref_meg) - assert_object_equal(actual, picks_by_type) - if not ref_meg and idx['hbo']: # our old code had a bug - with pytest.raises(TypeError, match='unexpected keyword argument'): - _picks_by_type_old(info, ref_meg=ref_meg) - else: - old = _picks_by_type_old(info, ref_meg=ref_meg) - assert_object_equal(old, picks_by_type) - # test bads - info = info.copy() - info['bads'] = [info['chs'][picks_by_type[0][1][0]]['ch_name']] - picks_by_type = deepcopy(picks_by_type) - picks_by_type[0] = (picks_by_type[0][0], picks_by_type[0][1][1:]) - actual = _picks_by_type(info, ref_meg=ref_meg) - assert_object_equal(actual, picks_by_type) - - -def test_pick_seeg_ecog(): - """Test picking with sEEG and ECoG.""" - names = 'A1 A2 Fz O OTp1 OTp2 E1 OTp3 E2 E3'.split() - types = 'mag mag eeg eeg seeg seeg ecog seeg ecog ecog'.split() - info = create_info(names, 1024., types) - picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), - ('seeg', [4, 5, 7]), ('ecog', [6, 8, 9])] - assert_indexing(info, picks_by_type) - assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 7]) - for i, t in enumerate(types): - assert_equal(channel_type(info, i), types[i]) - raw = RawArray(np.zeros((len(names), 10)), info) - events = np.array([[1, 0, 0], [2, 0, 0]]) - epochs = Epochs(raw, events=events, event_id={'event': 0}, - tmin=-1e-5, tmax=1e-5, - baseline=(0, 0)) # only one sample - evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True)) - e_seeg = evoked.copy().pick_types(meg=False, seeg=True) - for lt, rt in zip(e_seeg.ch_names, [names[4], names[5], names[7]]): - assert lt == rt - # Deal with constant debacle - raw = read_raw_fif(io_dir / "tests" / "data" / "test_chpi_raw_sss.fif") - assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0) - - -def test_pick_dbs(): - """Test picking with DBS.""" - # gh-8739 - names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split() - types = 'mag mag eeg eeg dbs dbs dbs'.split() - info = create_info(names, 1024., types) - picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('dbs', [4, 5, 6])] - assert_indexing(info, picks_by_type) - assert_array_equal(pick_types(info, meg=False, dbs=True), [4, 5, 6]) - for i, t in enumerate(types): - assert channel_type(info, i) == types[i] - raw = RawArray(np.zeros((len(names), 7)), info) - events = np.array([[1, 0, 0], [2, 0, 0]]) - epochs = Epochs(raw, events=events, event_id={'event': 0}, - tmin=-1e-5, tmax=1e-5, - baseline=(0, 0)) # only one sample - evoked = epochs.average(pick_types(epochs.info, meg=True, dbs=True)) - e_dbs = evoked.copy().pick_types(meg=False, dbs=True) - for lt, rt in zip(e_dbs.ch_names, [names[4], names[5], names[6]]): - assert lt == rt - raw = read_raw_fif(io_dir / "tests" / "data" / "test_chpi_raw_sss.fif") - assert len(pick_types(raw.info, meg=False, dbs=True)) == 0 - - -def test_pick_chpi(): - """Test picking cHPI.""" - # Make sure we don't mis-classify cHPI channels - info = read_info(io_dir / "tests" / "data" / "test_chpi_raw_sss.fif") - _assert_channel_types(info) - channel_types = _get_channel_types(info) - assert 'chpi' in channel_types - assert 'seeg' not in channel_types - assert 'ecog' not in channel_types - - -def test_pick_csd(): - """Test picking current source density channels.""" - # Make sure we don't mis-classify cHPI channels - names = ['MEG 2331', 'MEG 2332', 'MEG 2333', 'A1', 'A2', 'Fz'] - types = 'mag mag grad csd csd csd'.split() - info = create_info(names, 1024., types) - picks_by_type = [('mag', [0, 1]), ('grad', [2]), ('csd', [3, 4, 5])] - assert_indexing(info, picks_by_type, all_data=False) - - -def test_pick_bio(): - """Test picking BIO channels.""" - names = 'A1 A2 Fz O BIO1 BIO2 BIO3'.split() - types = 'mag mag eeg eeg bio bio bio'.split() - info = create_info(names, 1024., types) - picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('bio', [4, 5, 6])] - assert_indexing(info, picks_by_type, all_data=False) - - -def test_pick_fnirs(): - """Test picking fNIRS channels.""" - names = 'A1 A2 Fz O hbo1 hbo2 hbr1 fnirsRaw1 fnirsRaw2 fnirsOD1'.split() - types = 'mag mag eeg eeg hbo hbo hbr fnirs_cw_' \ - 'amplitude fnirs_cw_amplitude fnirs_od'.split() - info = create_info(names, 1024., types) - picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), - ('hbo', [4, 5]), ('hbr', [6]), - ('fnirs_cw_amplitude', [7, 8]), ('fnirs_od', [9])] - assert_indexing(info, picks_by_type) - - -def test_pick_ref(): - """Test picking ref_meg channels.""" - info = read_info(ctf_fname) - picks_by_type = [('stim', [0]), ('eog', [306, 307]), ('ecg', [308]), - ('misc', [1]), - ('mag', np.arange(31, 306)), - ('ref_meg', np.arange(2, 31))] - assert_indexing(info, picks_by_type, all_data=False) - picks_by_type.append(('mag', np.concatenate([picks_by_type.pop(-1)[1], - picks_by_type.pop(-1)[1]]))) - assert_indexing(info, picks_by_type, ref_meg=True, all_data=False) - - -def _check_fwd_n_chan_consistent(fwd, n_expected): - n_ok = len(fwd['info']['ch_names']) - n_sol = fwd['sol']['data'].shape[0] - assert_equal(n_expected, n_sol) - assert_equal(n_expected, n_ok) - - -@testing.requires_testing_data -def test_pick_forward_seeg_ecog(): - """Test picking forward with SEEG and ECoG.""" - fwd = read_forward_solution(fname_meeg) - counts = channel_indices_by_type(fwd['info']) - for key in counts.keys(): - counts[key] = len(counts[key]) - counts['meg'] = counts['mag'] + counts['grad'] - fwd_ = pick_types_forward(fwd, meg=True) - _check_fwd_n_chan_consistent(fwd_, counts['meg']) - fwd_ = pick_types_forward(fwd, meg=False, eeg=True) - _check_fwd_n_chan_consistent(fwd_, counts['eeg']) - # should raise exception related to emptiness - pytest.raises(ValueError, pick_types_forward, fwd, meg=False, seeg=True) - pytest.raises(ValueError, pick_types_forward, fwd, meg=False, ecog=True) - # change last chan from EEG to sEEG, second-to-last to ECoG - ecog_name = 'E1' - seeg_name = 'OTp1' - rename_channels(fwd['info'], {'EEG 059': ecog_name}) - rename_channels(fwd['info'], {'EEG 060': seeg_name}) - for ch in fwd['info']['chs']: - if ch['ch_name'] == seeg_name: - ch['kind'] = FIFF.FIFFV_SEEG_CH - ch['coil_type'] = FIFF.FIFFV_COIL_EEG - elif ch['ch_name'] == ecog_name: - ch['kind'] = FIFF.FIFFV_ECOG_CH - ch['coil_type'] = FIFF.FIFFV_COIL_EEG - fwd['sol']['row_names'][-1] = fwd['info']['chs'][-1]['ch_name'] - fwd['sol']['row_names'][-2] = fwd['info']['chs'][-2]['ch_name'] - counts['eeg'] -= 2 - counts['seeg'] += 1 - counts['ecog'] += 1 - # repick & check - fwd_seeg = pick_types_forward(fwd, meg=False, seeg=True) - assert_equal(fwd_seeg['sol']['row_names'], [seeg_name]) - assert_equal(fwd_seeg['info']['ch_names'], [seeg_name]) - # should work fine - fwd_ = pick_types_forward(fwd, meg=True) - _check_fwd_n_chan_consistent(fwd_, counts['meg']) - fwd_ = pick_types_forward(fwd, meg=False, eeg=True) - _check_fwd_n_chan_consistent(fwd_, counts['eeg']) - fwd_ = pick_types_forward(fwd, meg=False, seeg=True) - _check_fwd_n_chan_consistent(fwd_, counts['seeg']) - fwd_ = pick_types_forward(fwd, meg=False, ecog=True) - _check_fwd_n_chan_consistent(fwd_, counts['ecog']) - - -def test_picks_by_channels(): - """Test creating pick_lists.""" - rng = np.random.RandomState(909) - - test_data = rng.random_sample((4, 2000)) - ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]] - ch_types = ['grad', 'mag', 'mag', 'eeg'] - sfreq = 250.0 - info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) - _assert_channel_types(info) - raw = RawArray(test_data, info) - - pick_list = _picks_by_type(raw.info) - assert_equal(len(pick_list), 3) - assert_equal(pick_list[0][0], 'mag') - pick_list2 = _picks_by_type(raw.info, meg_combined=False) - assert_equal(len(pick_list), len(pick_list2)) - assert_equal(pick_list2[0][0], 'mag') - - pick_list2 = _picks_by_type(raw.info, meg_combined=True) - assert_equal(len(pick_list), len(pick_list2) + 1) - assert_equal(pick_list2[0][0], 'meg') - - test_data = rng.random_sample((4, 2000)) - ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]] - ch_types = ['mag', 'mag', 'mag', 'mag'] - sfreq = 250.0 - info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) - raw = RawArray(test_data, info) - # This acts as a set, not an order - assert_array_equal(pick_channels(info['ch_names'], ['MEG 002', 'MEG 001']), - [0, 1]) - - # Make sure checks for list input work. - pytest.raises(ValueError, pick_channels, ch_names, 'MEG 001') - pytest.raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi') - - pick_list = _picks_by_type(raw.info) - assert_equal(len(pick_list), 1) - assert_equal(pick_list[0][0], 'mag') - pick_list2 = _picks_by_type(raw.info, meg_combined=True) - assert_equal(len(pick_list), len(pick_list2)) - assert_equal(pick_list2[0][0], 'mag') - - # pick_types type check - with pytest.raises(ValueError, match='must be of type'): - raw.pick_types(eeg='string') - - # duplicate check - names = ['MEG 002', 'MEG 002'] - assert len(pick_channels(raw.info['ch_names'], names)) == 1 - assert len(raw.copy().pick_channels(names)[0][0]) == 1 - - -def test_clean_info_bads(): - """Test cleaning info['bads'] when bad_channels are excluded.""" - raw_file = io_dir / "tests" / "data" / "test_raw.fif" - raw = read_raw_fif(raw_file) - _assert_channel_types(raw.info) - - # select eeg channels - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - - # select 3 eeg channels as bads - idx_eeg_bad_ch = picks_eeg[[1, 5, 14]] - eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch] - - # select meg channels - picks_meg = pick_types(raw.info, meg=True, eeg=False) - - # select randomly 3 meg channels as bads - idx_meg_bad_ch = picks_meg[[0, 15, 34]] - meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch] - - # simulate the bad channels - raw.info['bads'] = eeg_bad_ch + meg_bad_ch - - # simulate the call to pick_info excluding the bad eeg channels - info_eeg = pick_info(raw.info, picks_eeg) - - # simulate the call to pick_info excluding the bad meg channels - info_meg = pick_info(raw.info, picks_meg) - - assert_equal(info_eeg['bads'], eeg_bad_ch) - assert_equal(info_meg['bads'], meg_bad_ch) - - info = pick_info(raw.info, picks_meg) - info._check_consistency() - info['bads'] += ['EEG 053'] - pytest.raises(RuntimeError, info._check_consistency) - with pytest.raises(ValueError, match='unique'): - pick_info(raw.info, [0, 0]) - - -@testing.requires_testing_data -def test_picks_to_idx(): - """Test checking type integrity checks of picks.""" - info = create_info(12, 1000., 'eeg') - _assert_channel_types(info) - picks = np.arange(info['nchan']) - # Array and list - assert_array_equal(picks, _picks_to_idx(info, picks)) - assert_array_equal(picks, _picks_to_idx(info, list(picks))) - with pytest.raises(TypeError, match='data type float64 is invalid'): - _picks_to_idx(info, 1.) - # None - assert_array_equal(picks, _picks_to_idx(info, None)) - # Type indexing - assert_array_equal(picks, _picks_to_idx(info, 'eeg')) - assert_array_equal(picks, _picks_to_idx(info, ['eeg'])) - # Negative indexing - assert_array_equal([len(picks) - 1], _picks_to_idx(info, len(picks) - 1)) - assert_array_equal([len(picks) - 1], _picks_to_idx(info, -1)) - assert_array_equal([len(picks) - 1], _picks_to_idx(info, [-1])) - # Name indexing - assert_array_equal([2], _picks_to_idx(info, info['ch_names'][2])) - assert_array_equal(np.arange(5, 9), - _picks_to_idx(info, info['ch_names'][5:9])) - with pytest.raises(ValueError, match='must be >= '): - _picks_to_idx(info, -len(picks) - 1) - with pytest.raises(ValueError, match='must be < '): - _picks_to_idx(info, len(picks)) - with pytest.raises(ValueError, match='could not be interpreted'): - _picks_to_idx(info, ['a', 'b']) - with pytest.raises(ValueError, match='could not be interpreted'): - _picks_to_idx(info, 'b') - # bads behavior - info['bads'] = info['ch_names'][1:2] - picks_good = np.array([0] + list(range(2, 12))) - assert_array_equal(picks_good, _picks_to_idx(info, None)) - assert_array_equal(picks_good, _picks_to_idx(info, None, - exclude=info['bads'])) - assert_array_equal(picks, _picks_to_idx(info, None, exclude=())) - with pytest.raises(ValueError, match=' 1D, got'): - _picks_to_idx(info, [[1]]) - # MEG types - info = read_info(fname_mc) - meg_picks = np.arange(306) - mag_picks = np.arange(2, 306, 3) - grad_picks = np.setdiff1d(meg_picks, mag_picks) - assert_array_equal(meg_picks, _picks_to_idx(info, 'meg')) - assert_array_equal(meg_picks, _picks_to_idx(info, ('mag', 'grad'))) - assert_array_equal(mag_picks, _picks_to_idx(info, 'mag')) - assert_array_equal(grad_picks, _picks_to_idx(info, 'grad')) - - info = create_info(['eeg', 'foo'], 1000., 'eeg') - with pytest.raises(RuntimeError, match='equivalent to channel types'): - _picks_to_idx(info, 'eeg') - with pytest.raises(ValueError, match='same length'): - create_info(['a', 'b'], 1000., dict(hbo=['a'], hbr=['b'])) - info = create_info(['a', 'b'], 1000., ['hbo', 'hbr']) - assert_array_equal(np.arange(2), _picks_to_idx(info, 'fnirs')) - assert_array_equal([0], _picks_to_idx(info, 'hbo')) - assert_array_equal([1], _picks_to_idx(info, 'hbr')) - info = create_info(['a', 'b'], 1000., ['hbo', 'misc']) - assert_array_equal(np.arange(len(info['ch_names'])), - _picks_to_idx(info, 'all')) - assert_array_equal([0], _picks_to_idx(info, 'data')) - info = create_info(['a', 'b'], 1000., ['fnirs_cw_amplitude', 'fnirs_od']) - assert_array_equal(np.arange(2), _picks_to_idx(info, 'fnirs')) - assert_array_equal([0], _picks_to_idx(info, 'fnirs_cw_amplitude')) - assert_array_equal([1], _picks_to_idx(info, 'fnirs_od')) - info = create_info(['a', 'b'], 1000., ['fnirs_cw_amplitude', 'misc']) - assert_array_equal(np.arange(len(info['ch_names'])), - _picks_to_idx(info, 'all')) - assert_array_equal([0], _picks_to_idx(info, 'data')) - info = create_info(['a', 'b'], 1000., ['fnirs_od', 'misc']) - assert_array_equal(np.arange(len(info['ch_names'])), - _picks_to_idx(info, 'all')) - assert_array_equal([0], _picks_to_idx(info, 'data')) - - -def test_pick_channels_cov(): - """Test picking channels from a Covariance object.""" - info = create_info(['CH1', 'CH2', 'CH3'], 1., ch_types='eeg') - cov = make_ad_hoc_cov(info) - cov['data'] = np.array([1., 2., 3.]) - - cov_copy = pick_channels_cov(cov, ['CH2', 'CH1'], ordered=False, copy=True) - assert cov_copy.ch_names == ['CH1', 'CH2'] - assert_array_equal(cov_copy['data'], [1., 2.]) - - # Test re-ordering channels - cov_copy = pick_channels_cov(cov, ['CH2', 'CH1'], ordered=True, copy=True) - assert cov_copy.ch_names == ['CH2', 'CH1'] - assert_array_equal(cov_copy['data'], [2., 1.]) - - # Test picking in-place - pick_channels_cov(cov, ['CH2', 'CH1'], copy=False) - assert cov.ch_names == ['CH1', 'CH2'] - assert_array_equal(cov['data'], [1., 2.]) - - # Test whether `method` and `loglik` are dropped when None - cov['method'] = None - cov['loglik'] = None - cov_copy = pick_channels_cov(cov, ['CH1', 'CH2'], copy=True) - assert 'method' not in cov_copy - assert 'loglik' not in cov_copy - - -def test_pick_types_meg(): - """Test pick_types(meg=True).""" - # info with MEG channels at indices 1, 2, and 4 - info1 = create_info(6, 256, ["eeg", "mag", "grad", "misc", "grad", "hbo"]) - - assert list(pick_types(info1, meg=True)) == [1, 2, 4] - assert list(pick_types(info1, meg=True, eeg=True)) == [0, 1, 2, 4] - - assert list(pick_types(info1, meg=True)) == [1, 2, 4] - assert not list(pick_types(info1, meg=False)) # empty - assert list(pick_types(info1, meg='planar1')) == [2] - assert not list(pick_types(info1, meg='planar2')) # empty - - # info without any MEG channels - info2 = create_info(6, 256, ["eeg", "eeg", "eog", "misc", "stim", "hbo"]) - - assert not list(pick_types(info2)) # empty - assert list(pick_types(info2, eeg=True)) == [0, 1] - - -def test_pick_types_csd(): - """Test pick_types(csd=True).""" - # info with laplacian/CSD channels at indices 1, 2 - names = ['F1', 'F2', 'C1', 'C2', 'A1', 'A2', 'misc1', 'CSD1'] - info1 = create_info(names, 256, ["eeg", "eeg", "eeg", "eeg", "mag", - "mag", 'misc', 'csd']) - raw = RawArray(np.zeros((8, 512)), info1) - raw.set_montage(make_standard_montage('standard_1020'), verbose='error') - raw_csd = compute_current_source_density(raw, verbose='error') - - assert_array_equal(pick_types(info1, csd=True), [7]) - - # pick from the raw object - assert raw_csd.copy().pick_types(csd=True).ch_names == [ - 'F1', 'F2', 'C1', 'C2', 'CSD1'] - - -@pytest.mark.parametrize('meg', [True, False, 'grad', 'mag']) -@pytest.mark.parametrize('eeg', [True, False]) -@pytest.mark.parametrize('ordered', [True, False]) -def test_get_channel_types_equiv(meg, eeg, ordered): - """Test equivalence of get_channel_types.""" - raw = read_raw_fif(fif_fname) - pick_types(raw.info, meg=meg, eeg=eeg) - picks = pick_types(raw.info, meg=meg, eeg=eeg) - if not ordered: - picks = np.random.RandomState(0).permutation(picks) - if not meg and not eeg: - with pytest.raises(ValueError, match='No appropriate channels'): - raw.get_channel_types(picks=picks) - return - types = np.array(raw.get_channel_types(picks=picks)) - types_iter = np.array([channel_type(raw.info, idx) for idx in picks]) - assert_array_equal(types, types_iter) diff --git a/mne/io/tests/test_proc_history.py b/mne/io/tests/test_proc_history.py deleted file mode 100644 index 964464522cf..00000000000 --- a/mne/io/tests/test_proc_history.py +++ /dev/null @@ -1,38 +0,0 @@ -# Authors: Denis A. Engemann -# Eric Larson -# License: Simplified BSD - -from pathlib import Path - -import numpy as np -from numpy.testing import assert_array_equal - -from mne.io import read_info -from mne.io.constants import FIFF - -base_dir = Path(__file__).parent / "data" -raw_fname = base_dir / "test_chpi_raw_sss.fif" - - -def test_maxfilter_io(): - """Test maxfilter io.""" - info = read_info(raw_fname) - mf = info['proc_history'][1]['max_info'] - - assert mf['sss_info']['frame'] == FIFF.FIFFV_COORD_HEAD - # based on manual 2.0, rev. 5.0 page 23 - assert 5 <= mf['sss_info']['in_order'] <= 11 - assert mf['sss_info']['out_order'] <= 5 - assert mf['sss_info']['nchan'] > len(mf['sss_info']['components']) - - assert (info['ch_names'][:mf['sss_info']['nchan']] == - mf['sss_ctc']['proj_items_chs']) - assert (mf['sss_ctc']['decoupler'].shape == - (mf['sss_info']['nchan'], mf['sss_info']['nchan'])) - assert_array_equal( - np.unique(np.diag(mf['sss_ctc']['decoupler'].toarray())), - np.array([1.], dtype=np.float32)) - assert mf['sss_cal']['cal_corrs'].shape == (306, 14) - assert mf['sss_cal']['cal_chans'].shape == (306, 2) - vv_coils = [v for k, v in FIFF.items() if 'FIFFV_COIL_VV' in k] - assert all(k in vv_coils for k in set(mf['sss_cal']['cal_chans'][:, 1])) diff --git a/mne/io/tests/test_raw.py b/mne/io/tests/test_raw.py index 76329ce74ef..c03963c9854 100644 --- a/mne/io/tests/test_raw.py +++ b/mne/io/tests/test_raw.py @@ -1,74 +1,113 @@ -# -*- coding: utf-8 -*- """Generic tests that all raw classes should run.""" -# Authors: MNE Developers -# Stefan Appelhoff -# + +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from contextlib import redirect_stdout -from io import StringIO import math import os +import re +from contextlib import redirect_stdout +from io import StringIO from os import path as op from pathlib import Path -import re -import pytest import numpy as np -from numpy.testing import (assert_allclose, assert_array_almost_equal, - assert_array_equal, assert_array_less) +import pytest +from numpy.testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) import mne -from mne import concatenate_raws, create_info, Annotations, pick_types -from mne.datasets import testing -from mne.io import read_raw_fif, RawArray, BaseRaw, Info, _writing_info_hdf5 -from mne.io._digitization import _dig_kind_dict +from mne import Annotations, concatenate_raws, create_info, pick_types +from mne._fiff._digitization import DigPoint, _dig_kind_dict +from mne._fiff.constants import FIFF +from mne._fiff.meas_info import Info, _get_valid_units, _writing_info_hdf5 +from mne._fiff.pick import _ELECTRODE_CH_TYPES, _FNIRS_CH_TYPES_SPLIT +from mne._fiff.proj import Projection +from mne._fiff.utils import _mult_cal_one +from mne.io import BaseRaw, RawArray, read_raw_fif from mne.io.base import _get_scaling -from mne.io.pick import _ELECTRODE_CH_TYPES, _FNIRS_CH_TYPES_SPLIT -from mne.utils import (_TempDir, catch_logging, _raw_annot, _stamp_to_dt, - object_diff, check_version, requires_pandas, - _import_h5io_funcs) -from mne.io.meas_info import _get_valid_units -from mne.io._digitization import DigPoint -from mne.io.proj import Projection -from mne.io.utils import _mult_cal_one -from mne.io.constants import FIFF - -raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', - 'data', 'test_raw.fif') +from mne.transforms import Transform +from mne.utils import ( + _import_h5io_funcs, + _raw_annot, + _stamp_to_dt, + _TempDir, + catch_logging, + check_version, + object_diff, + sizeof_fmt, +) + +raw_fname = op.join( + op.dirname(__file__), "..", "..", "io", "tests", "data", "test_raw.fif" +) def assert_named_constants(info): """Assert that info['chs'] has named constants.""" # for now we just check one __tracebackhide__ = True - r = repr(info['chs'][0]) - for check in ('.*FIFFV_COORD_.*', '.*FIFFV_COIL_.*', '.*FIFF_UNIT_.*', - '.*FIFF_UNITM_.*',): + r = repr(info["chs"][0]) + for check in ( + ".*FIFFV_COORD_.*", + ".*FIFFV_COIL_.*", + ".*FIFF_UNIT_.*", + ".*FIFF_UNITM_.*", + ): assert re.match(check, r, re.DOTALL) is not None, (check, r) +def assert_attributes(raw): + """Assert that the instance keeps all its extra attributes in _raw_extras.""" + __tracebackhide__ = True + assert isinstance(raw, BaseRaw) + base_attrs = set(dir(BaseRaw(create_info(1, 1000.0, "eeg"), last_samps=[1]))) + base_attrs = base_attrs.union( + [ + "_data", # in the case of preloaded data + "__slotnames__", # something about being decorated (?) + ] + ) + for attr in raw._extra_attributes: + assert attr not in base_attrs + base_attrs.add(attr) + got_attrs = set(dir(raw)) + extra = got_attrs.difference(base_attrs) + assert extra == set() + + def test_orig_units(): """Test the error handling for original units.""" # Should work fine - info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg') - BaseRaw(info, last_samps=[1], orig_units={'Cz': 'nV'}) + info = create_info(ch_names=["Cz"], sfreq=100, ch_types="eeg") + BaseRaw(info, last_samps=[1], orig_units={"Cz": "nV"}) # Should complain that channel Cz does not have a corresponding original # unit. - with pytest.raises(ValueError, match='has no associated original unit.'): - info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg') - BaseRaw(info, last_samps=[1], orig_units={'not_Cz': 'nV'}) + with pytest.raises(ValueError, match="has no associated original unit."): + info = create_info(ch_names=["Cz"], sfreq=100, ch_types="eeg") + BaseRaw(info, last_samps=[1], orig_units={"not_Cz": "nV"}) # Test that a non-dict orig_units argument raises a ValueError - with pytest.raises(ValueError, match='orig_units must be of type dict'): - info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg') + with pytest.raises(ValueError, match="orig_units must be of type dict"): + info = create_info(ch_names=["Cz"], sfreq=100, ch_types="eeg") BaseRaw(info, last_samps=[1], orig_units=True) -def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, - boundary_decimal=2, test_scaling=True, test_rank=True, - **kwargs): +def _test_raw_reader( + reader, + test_preloading=True, + test_kwargs=True, + boundary_decimal=2, + test_scaling=True, + test_rank=True, + **kwargs, +): """Test reading, writing and slicing of raw classes. Parameters @@ -95,86 +134,104 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, rng = np.random.RandomState(0) montage = None if "montage" in kwargs: - montage = kwargs['montage'] - del kwargs['montage'] + montage = kwargs["montage"] + del kwargs["montage"] if test_preloading: raw = reader(preload=True, **kwargs) rep = repr(raw) - assert rep.count('<') == 1 - assert rep.count('>') == 1 + assert rep.count("<") == 1 + assert rep.count(">") == 1 if montage is not None: raw.set_montage(montage) # don't assume the first is preloaded - buffer_fname = op.join(tempdir, 'buffer') + buffer_fname = op.join(tempdir, "buffer") picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10] picks = np.append(picks, len(raw.ch_names) - 1) # test trigger channel - bnd = min(int(round(raw.buffer_size_sec * - raw.info['sfreq'])), raw.n_times) - slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), - slice(3, 300), slice(None), slice(1, bnd)] + bnd = min(int(round(raw.buffer_size_sec * raw.info["sfreq"])), raw.n_times) + slices = [ + slice(0, bnd), + slice(bnd - 1, bnd), + slice(3, bnd), + slice(3, 300), + slice(None), + slice(1, bnd), + ] if raw.n_times >= 2 * bnd: # at least two complete blocks - slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1), - slice(0, bnd + 100)] - other_raws = [reader(preload=buffer_fname, **kwargs), - reader(preload=False, **kwargs)] + slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1), slice(0, bnd + 100)] + other_raws = [ + reader(preload=buffer_fname, **kwargs), + reader(preload=False, **kwargs), + ] for sl_time in slices: data1, times1 = raw[picks, sl_time] for other_raw in other_raws: data2, times2 = other_raw[picks, sl_time] - assert_allclose( - data1, data2, err_msg='Data mismatch with preload') + assert_allclose(data1, data2, err_msg="Data mismatch with preload") assert_allclose(times1, times2) # test projection vs cals and data units other_raw = reader(preload=False, **kwargs) other_raw.del_proj() - eeg = meg = fnirs = False - if 'eeg' in raw: + eeg = meg = fnirs = seeg = eyetrack = False + if "eeg" in raw: eeg, atol = True, 1e-18 - elif 'grad' in raw: - meg, atol = 'grad', 1e-24 - elif 'mag' in raw: - meg, atol = 'mag', 1e-24 - elif 'hbo' in raw: - fnirs, atol = 'hbo', 1e-10 - elif 'hbr' in raw: - fnirs, atol = 'hbr', 1e-10 + elif "grad" in raw: + meg, atol = "grad", 1e-24 + elif "mag" in raw: + meg, atol = "mag", 1e-24 + elif "hbo" in raw: + fnirs, atol = "hbo", 1e-10 + elif "hbr" in raw: + fnirs, atol = "hbr", 1e-10 + elif "fnirs_cw_amplitude" in raw: + fnirs, atol = "fnirs_cw_amplitude", 1e-10 + elif "eyegaze" in raw: + eyetrack = "eyegaze", 1e-3 else: - assert 'fnirs_cw_amplitude' in raw, 'New channel type necessary?' - fnirs, atol = 'fnirs_cw_amplitude', 1e-10 + # e.g., https://github.com/mne-tools/mne-python/pull/11432/files + assert "seeg" in raw, "New channel type necessary? See gh-11432 for example" + seeg, atol = True, 1e-18 + picks = pick_types( - other_raw.info, meg=meg, eeg=eeg, fnirs=fnirs) + other_raw.info, + meg=meg, + eeg=eeg, + fnirs=fnirs, + seeg=seeg, + eyetrack=eyetrack, + ) col_names = [other_raw.ch_names[pick] for pick in picks] proj = np.ones((1, len(picks))) proj /= np.sqrt(proj.shape[1]) proj = Projection( - data=dict(data=proj, nrow=1, row_names=None, - col_names=col_names, ncol=len(picks)), - active=False) - assert len(other_raw.info['projs']) == 0 + data=dict( + data=proj, nrow=1, row_names=None, col_names=col_names, ncol=len(picks) + ), + active=False, + ) + assert len(other_raw.info["projs"]) == 0 other_raw.add_proj(proj) - assert len(other_raw.info['projs']) == 1 + assert len(other_raw.info["projs"]) == 1 # Orders of projector application, data loading, and reordering # equivalent: # 1. load->apply->get - data_load_apply_get = \ - other_raw.copy().load_data().apply_proj().get_data(picks) + data_load_apply_get = other_raw.copy().load_data().apply_proj().get_data(picks) # 2. apply->get (and don't allow apply->pick) apply = other_raw.copy().apply_proj() data_apply_get = apply.get_data(picks) data_apply_get_0 = apply.get_data(picks[0])[0] - with pytest.raises(RuntimeError, match='loaded'): + with pytest.raises(RuntimeError, match="loaded"): apply.copy().pick(picks[0]).get_data() # 3. apply->load->get data_apply_load_get = apply.copy().load_data().get_data(picks) - data_apply_load_get_0, data_apply_load_get_1 = \ + data_apply_load_get_0, data_apply_load_get_1 = ( apply.copy().load_data().pick(picks[:2]).get_data() + ) # 4. reorder->apply->load->get all_picks = np.arange(len(other_raw.ch_names)) - reord = np.concatenate(( - picks[1::2], - picks[0::2], - np.setdiff1d(all_picks, picks))) + reord = np.concatenate( + (picks[1::2], picks[0::2], np.setdiff1d(all_picks, picks)) + ) rev = np.argsort(reord) assert_array_equal(reord[rev], all_picks) assert_array_equal(rev[reord], all_picks) @@ -185,17 +242,22 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, reorder_apply = reorder.copy().apply_proj() assert reorder_apply.ch_names == reorder.ch_names assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]] - assert_allclose(reorder_apply.get_data([0]), apply.get_data(picks[1]), - atol=1e-18) - data_reorder_apply_load_get = \ - reorder_apply.load_data().get_data(rev[:len(picks)]) - data_reorder_apply_load_get_1 = \ + assert_allclose( + reorder_apply.get_data([0]), apply.get_data(picks[1]), atol=1e-18 + ) + data_reorder_apply_load_get = reorder_apply.load_data().get_data( + rev[: len(picks)] + ) + data_reorder_apply_load_get_1 = ( reorder_apply.copy().load_data().pick([0]).get_data()[0] + ) assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]] - assert (data_load_apply_get.shape == - data_apply_get.shape == - data_apply_load_get.shape == - data_reorder_apply_load_get.shape) + assert ( + data_load_apply_get.shape + == data_apply_get.shape + == data_apply_load_get.shape + == data_reorder_apply_load_get.shape + ) del apply # first check that our data are (probably) in the right units data = data_load_apply_get.copy() @@ -209,12 +271,12 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, else: atol = 1e-7 * np.median(data) # 1e-7 * MAD # ranks should all be reduced by 1 - if test_rank == 'less': + if test_rank == "less": cmp = np.less elif test_rank is False: cmp = None else: # anything else is like True or 'equal' - assert test_rank is True or test_rank == 'equal', test_rank + assert test_rank is True or test_rank == "equal", test_rank cmp = np.equal rank_load_apply_get = np.linalg.matrix_rank(data_load_apply_get) rank_apply_get = np.linalg.matrix_rank(data_apply_get) @@ -224,59 +286,59 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, assert cmp(rank_apply_get, len(col_names) - 1) assert cmp(rank_apply_load_get, len(col_names) - 1) # and they should all match - t_kw = dict( - atol=atol, err_msg='before != after, likely _mult_cal_one prob') + t_kw = dict(atol=atol, err_msg="before != after, likely _mult_cal_one prob") assert_allclose(data_apply_get[0], data_apply_get_0, **t_kw) - assert_allclose(data_apply_load_get_1, - data_reorder_apply_load_get_1, **t_kw) + assert_allclose(data_apply_load_get_1, data_reorder_apply_load_get_1, **t_kw) assert_allclose(data_load_apply_get[0], data_apply_load_get_0, **t_kw) assert_allclose(data_load_apply_get, data_apply_get, **t_kw) assert_allclose(data_load_apply_get, data_apply_load_get, **t_kw) - if 'eeg' in raw: + if "eeg" in raw: other_raw.del_proj() - direct = \ - other_raw.copy().load_data().set_eeg_reference().get_data() + direct = other_raw.copy().load_data().set_eeg_reference().get_data() other_raw.set_eeg_reference(projection=True) - assert len(other_raw.info['projs']) == 1 - this_proj = other_raw.info['projs'][0]['data'] - assert this_proj['col_names'] == col_names - assert this_proj['data'].shape == proj['data']['data'].shape - assert_allclose( - np.linalg.norm(proj['data']['data']), 1., atol=1e-6) - assert_allclose( - np.linalg.norm(this_proj['data']), 1., atol=1e-6) - assert_allclose(this_proj['data'], proj['data']['data']) + assert len(other_raw.info["projs"]) == 1 + this_proj = other_raw.info["projs"][0]["data"] + assert this_proj["col_names"] == col_names + assert this_proj["data"].shape == proj["data"]["data"].shape + assert_allclose(np.linalg.norm(proj["data"]["data"]), 1.0, atol=1e-6) + assert_allclose(np.linalg.norm(this_proj["data"]), 1.0, atol=1e-6) + assert_allclose(this_proj["data"], proj["data"]["data"]) proj = other_raw.apply_proj().get_data() assert_allclose(proj[picks], data_load_apply_get, atol=1e-10) - assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw['err_msg']) + assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw["err_msg"]) else: raw = reader(**kwargs) n_samp = len(raw.times) assert_named_constants(raw.info) + assert_attributes(raw) # smoke test for gh #9743 - ids = [id(ch['loc']) for ch in raw.info['chs']] + ids = [id(ch["loc"]) for ch in raw.info["chs"]] assert len(set(ids)) == len(ids) - full_data = raw._data + full_data = raw.get_data() assert raw.__class__.__name__ in repr(raw) # to test repr assert raw.info.__class__.__name__ in repr(raw.info) - assert isinstance(raw.info['dig'], (type(None), list)) - data_max = full_data.max() - data_min = full_data.min() + assert isinstance(raw.info["dig"], type(None) | list) + data_max = np.nanmax(full_data) + data_min = np.nanmin(full_data) # these limits could be relaxed if we actually find data with # huge values (in SI units) assert data_max < 1e5 assert data_min > -1e5 - if isinstance(raw.info['dig'], list): - for di, d in enumerate(raw.info['dig']): + if isinstance(raw.info["dig"], list): + for di, d in enumerate(raw.info["dig"]): assert isinstance(d, DigPoint), (di, d) # gh-5604 - meas_date = raw.info['meas_date'] + meas_date = raw.info["meas_date"] assert meas_date is None or meas_date >= _stamp_to_dt((0, 0)) + # gh-13112 + if "meg" not in raw: + assert raw.info["dev_head_t"] is None, "dev_head_t should be None if no MEG" + # test repr_html - assert 'Good channels' in raw.info._repr_html_() + assert "Channels" in raw._repr_html_() # test resetting raw if test_kwargs: @@ -285,49 +347,59 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, assert_array_equal(raw.times, raw2.times) # Test saving and reading - out_fname = op.join(tempdir, 'test_raw.fif') + out_fname = op.join(tempdir, "test_out_raw.fif") raw = concatenate_raws([raw]) - raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1) - + filenames = raw.save( + out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1 + ) + for filename in filenames: + assert filename.is_file() # Test saving with not correct extension - out_fname_h5 = op.join(tempdir, 'test_raw.h5') - with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'): + out_fname_h5 = op.join(tempdir, "test_raw.h5") + with pytest.raises(OSError, match="raw must end with .fif or .fif.gz"): raw.save(out_fname_h5) - raw3 = read_raw_fif(out_fname) + raw3 = read_raw_fif(out_fname, allow_maxshield="yes") assert_named_constants(raw3.info) assert set(raw.info.keys()) == set(raw3.info.keys()) - assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6, - atol=1e-20) # atol is very small but > 0 + assert_allclose( + raw3[0:20][0], full_data[0:20], rtol=1e-6, atol=1e-20 + ) # atol is very small but > 0 assert_allclose(raw.times, raw3.times, atol=1e-6, rtol=1e-6) - assert not math.isnan(raw3.info['highpass']) - assert not math.isnan(raw3.info['lowpass']) - assert not math.isnan(raw.info['highpass']) - assert not math.isnan(raw.info['lowpass']) + assert not math.isnan(raw3.info["highpass"]) + assert not math.isnan(raw3.info["lowpass"]) + assert not math.isnan(raw.info["highpass"]) + assert not math.isnan(raw.info["lowpass"]) - assert raw3.info['kit_system_id'] == raw.info['kit_system_id'] + assert raw3.info["kit_system_id"] == raw.info["kit_system_id"] # Make sure concatenation works first_samp = raw.first_samp last_samp = raw.last_samp - concat_raw = concatenate_raws([raw.copy(), raw]) + concat_raw = concatenate_raws([raw.copy(), raw], verbose="debug") assert concat_raw.n_times == 2 * raw.n_times assert concat_raw.first_samp == first_samp assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1 - idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0] + idx = np.where(concat_raw.annotations.description == "BAD boundary")[0] + assert len(idx) == 1 + assert len(concat_raw.times) == 2 * n_samp expected_bad_boundary_onset = raw._last_time - assert_array_almost_equal(concat_raw.annotations.onset[idx], - expected_bad_boundary_onset, - decimal=boundary_decimal) + assert_array_almost_equal( + concat_raw.annotations.onset[idx], + [expected_bad_boundary_onset], + decimal=boundary_decimal, + err_msg="BAD boundary onset mismatch", + ) - if raw.info['meas_id'] is not None: - for key in ['secs', 'usecs', 'version']: - assert raw.info['meas_id'][key] == raw3.info['meas_id'][key] - assert_array_equal(raw.info['meas_id']['machid'], - raw3.info['meas_id']['machid']) + if raw.info["meas_id"] is not None: + for key in ["secs", "usecs", "version"]: + assert raw.info["meas_id"][key] == raw3.info["meas_id"][key] + assert_array_equal( + raw.info["meas_id"]["machid"], raw3.info["meas_id"]["machid"] + ) assert isinstance(raw.annotations, Annotations) @@ -351,14 +423,15 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, these_kwargs = kwargs.copy() these_kwargs.update(preload_kwarg) # don't use the same filename or it could create problems - if isinstance(these_kwargs.get('preload', None), str) and \ - op.isfile(these_kwargs['preload']): - these_kwargs['preload'] += '-1' + if isinstance(these_kwargs.get("preload", None), str) and op.isfile( + these_kwargs["preload"] + ): + these_kwargs["preload"] += "-1" whole_raw = reader(**these_kwargs) print(whole_raw) # __repr__ assert n_ch >= 2 - picks_1 = picks[:n_ch // 2] - picks_2 = picks[n_ch // 2:] + picks_1 = picks[: n_ch // 2] + picks_2 = picks[n_ch // 2 :] raw_1 = whole_raw.copy().pick(picks_1) raw_2 = whole_raw.copy().pick(picks_2) data, times = whole_raw[:] @@ -366,30 +439,34 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, data_2, times_2 = raw_2[:] assert_array_equal(times, times_1) assert_array_equal(data[picks_1], data_1) - assert_array_equal(times, times_2,) + assert_array_equal( + times, + times_2, + ) assert_array_equal(data[picks_2], data_2) # Make sure that writing info to h5 format # (all fields should be compatible) - if check_version('h5io'): + if check_version("h5io"): read_hdf5, write_hdf5 = _import_h5io_funcs() - fname_h5 = op.join(tempdir, 'info.h5') + fname_h5 = op.join(tempdir, "info.h5") with _writing_info_hdf5(raw.info): write_hdf5(fname_h5, raw.info) - new_info = Info(read_hdf5(fname_h5)) - assert object_diff(new_info, raw.info) == '' + new_info = Info(read_hdf5(fname_h5)) + assert object_diff(new_info, raw.info) == "" # Make sure that changing directory does not break anything if test_preloading: these_kwargs = kwargs.copy() key = None - for key in ('fname', - 'input_fname', # artemis123 - 'vhdr_fname', # BV - 'pdf_fname', # BTi - 'directory', # CTF - 'filename', # nedf - ): + for key in ( + "fname", + "input_fname", # artemis123 + "vhdr_fname", # BV + "pdf_fname", # BTi + "directory", # CTF + "filename", # nedf + ): try: fname = kwargs[key] except KeyError: @@ -402,7 +479,7 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, this_fname = fname[0] if isinstance(fname, list) else fname dirname = op.dirname(this_fname) these_kwargs[key] = op.basename(this_fname) - these_kwargs['preload'] = False + these_kwargs["preload"] = False orig_dir = os.getcwd() try: os.chdir(dirname) @@ -413,40 +490,42 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, # make sure that cropping works (with first_samp shift) if n_samp >= 50: # we crop to this number of samples below - for t_prop in (0., 0.5): + for t_prop in (0.0, 0.5): _test_raw_crop(reader, t_prop, kwargs) if test_preloading: use_kwargs = kwargs.copy() - use_kwargs['preload'] = True + use_kwargs["preload"] = True _test_raw_crop(reader, t_prop, use_kwargs) # make sure electrode-like sensor locations show up as dig points - eeg_dig = [d for d in (raw.info['dig'] or []) - if d['kind'] == _dig_kind_dict['eeg']] + eeg_dig = [d for d in (raw.info["dig"] or []) if d["kind"] == _dig_kind_dict["eeg"]] pick_kwargs = dict() - for t in _ELECTRODE_CH_TYPES + ('fnirs',): + for t in _ELECTRODE_CH_TYPES + ("fnirs",): pick_kwargs[t] = True dig_picks = pick_types(raw.info, exclude=(), **pick_kwargs) dig_types = _ELECTRODE_CH_TYPES + _FNIRS_CH_TYPES_SPLIT assert (len(dig_picks) > 0) == any(t in raw for t in dig_types) if len(dig_picks): - eeg_loc = np.array([ # eeg_loc a bit of a misnomer to match eeg_dig - raw.info['chs'][pick]['loc'][:3] for pick in dig_picks]) + eeg_loc = np.array( + [ # eeg_loc a bit of a misnomer to match eeg_dig + raw.info["chs"][pick]["loc"][:3] for pick in dig_picks + ] + ) eeg_loc = eeg_loc[np.isfinite(eeg_loc).all(axis=1)] if len(eeg_loc): - if 'fnirs_cw_amplitude' in raw: + if "fnirs_cw_amplitude" in raw: assert 2 * len(eeg_dig) >= len(eeg_loc) else: assert len(eeg_dig) >= len(eeg_loc) # could have some excluded # make sure that dig points in head coords implies that fiducials are # present - if len(raw.info['dig'] or []) > 0: - card_pts = [d for d in raw.info['dig'] - if d['kind'] == _dig_kind_dict['cardinal']] - eeg_dig_head = [ - d for d in eeg_dig if d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] + if len(raw.info["dig"] or []) > 0: + card_pts = [ + d for d in raw.info["dig"] if d["kind"] == _dig_kind_dict["cardinal"] + ] + eeg_dig_head = [d for d in eeg_dig if d["coord_frame"] == FIFF.FIFFV_COORD_HEAD] if len(eeg_dig_head): - assert len(card_pts) == 3, 'Cardinal points missing' + assert len(card_pts) == 3, "Cardinal points missing" if len(card_pts) == 3: # they should all be in head coords then assert len(eeg_dig_head) == len(eeg_dig) @@ -456,44 +535,51 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, def _test_raw_crop(reader, t_prop, kwargs): raw_1 = reader(**kwargs) n_samp = 50 # crop to this number of samples (per instance) - crop_t = n_samp / raw_1.info['sfreq'] + crop_t = n_samp / raw_1.info["sfreq"] t_start = t_prop * crop_t # also crop to some fraction into the first inst - extra = f' t_start={t_start}, preload={kwargs.get("preload", False)}' - stop = (n_samp - 1) / raw_1.info['sfreq'] + extra = f" t_start={t_start}, preload={kwargs.get('preload', False)}" + stop = (n_samp - 1) / raw_1.info["sfreq"] raw_1.crop(0, stop) assert len(raw_1.times) == 50 first_time = raw_1.first_time - atol = 0.5 / raw_1.info['sfreq'] + atol = 0.5 / raw_1.info["sfreq"] assert_allclose(raw_1.times[-1], stop, atol=atol) raw_2, raw_3 = raw_1.copy(), raw_1.copy() - t_tot = raw_1.times[-1] * 3 + 2. / raw_1.info['sfreq'] + t_tot = raw_1.times[-1] * 3 + 2.0 / raw_1.info["sfreq"] raw_concat = concatenate_raws([raw_1, raw_2, raw_3]) - assert len(raw_concat._filenames) == 3 + assert len(raw_concat.filenames) == 3 assert_allclose(raw_concat.times[-1], t_tot) assert_allclose(raw_concat.first_time, first_time) # keep all instances, but crop to t_start at the beginning raw_concat.crop(t_start, None) - assert len(raw_concat._filenames) == 3 + assert len(raw_concat.filenames) == 3 assert_allclose(raw_concat.times[-1], t_tot - t_start, atol=atol) assert_allclose( - raw_concat.first_time, first_time + t_start, atol=atol, - err_msg=f'Base concat, {extra}') + raw_concat.first_time, + first_time + t_start, + atol=atol, + err_msg=f"Base concat, {extra}", + ) # drop the first instance raw_concat.crop(crop_t, None) - assert len(raw_concat._filenames) == 2 + assert len(raw_concat.filenames) == 2 + assert_allclose(raw_concat.times[-1], t_tot - t_start - crop_t, atol=atol) assert_allclose( - raw_concat.times[-1], t_tot - t_start - crop_t, atol=atol) - assert_allclose( - raw_concat.first_time, first_time + t_start + crop_t, - atol=atol, err_msg=f'Dropping one, {extra}') + raw_concat.first_time, + first_time + t_start + crop_t, + atol=atol, + err_msg=f"Dropping one, {extra}", + ) # drop the second instance, leaving just one raw_concat.crop(crop_t, None) - assert len(raw_concat._filenames) == 1 - assert_allclose( - raw_concat.times[-1], t_tot - t_start - 2 * crop_t, atol=atol) + assert len(raw_concat.filenames) == 1 + assert_allclose(raw_concat.times[-1], t_tot - t_start - 2 * crop_t, atol=atol) assert_allclose( - raw_concat.first_time, first_time + t_start + 2 * crop_t, - atol=atol, err_msg=f'Dropping two, {extra}') + raw_concat.first_time, + first_time + t_start + 2 * crop_t, + atol=atol, + err_msg=f"Dropping two, {extra}", + ) def _test_concat(reader, *args): @@ -514,8 +600,7 @@ def _test_concat(reader, *args): data = raw[:, :][0] for preloads in ((True, True), (True, False), (False, False)): for last_preload in (True, False): - t_crops = raw.times[np.argmin(np.abs(raw.times - 0.5)) + - [0, 1]] + t_crops = raw.times[np.argmin(np.abs(raw.times - 0.5)) + [0, 1]] raw1 = raw.copy().crop(0, t_crops[0]) if preloads[0]: raw1.load_data() @@ -528,7 +613,6 @@ def _test_concat(reader, *args): assert_allclose(data, raw1[:, :][0]) -@testing.requires_testing_data def test_time_as_index(): """Test indexing of raw times.""" raw = read_raw_fif(raw_fname) @@ -542,8 +626,8 @@ def test_time_as_index(): assert_array_equal(new_inds, np.arange(len(raw.times))) -@pytest.mark.parametrize('meas_date', [None, "orig"]) -@pytest.mark.parametrize('first_samp', [0, 10000]) +@pytest.mark.parametrize("meas_date", [None, "orig"]) +@pytest.mark.parametrize("first_samp", [0, 10000]) def test_crop_by_annotations(meas_date, first_samp): """Test crop by annotations of raw.""" raw = read_raw_fif(raw_fname) @@ -560,7 +644,8 @@ def test_crop_by_annotations(meas_date, first_samp): onset=onset, duration=[1, 0.5], description=["a", "b"], - orig_time=raw.info['meas_date']) + orig_time=raw.info["meas_date"], + ) raw.set_annotations(annot) raws = raw.crop_by_annotations() @@ -573,21 +658,23 @@ def test_crop_by_annotations(meas_date, first_samp): assert raws[1].annotations.description[0] == annot.description[1] -@pytest.mark.parametrize('offset, origin', [ - pytest.param(0, None, id='times in s. relative to first_samp (default)'), - pytest.param(0, 2.0, id='times in s. relative to first_samp'), - pytest.param(1, 1.0, id='times in s. relative to meas_date'), - pytest.param(2, 0.0, id='absolute times in s. relative to 0')]) +@pytest.mark.parametrize( + "offset, origin", + [ + pytest.param(0, None, id="times in s. relative to first_samp (default)"), + pytest.param(0, 2.0, id="times in s. relative to first_samp"), + pytest.param(1, 1.0, id="times in s. relative to meas_date"), + pytest.param(2, 0.0, id="absolute times in s. relative to 0"), + ], +) def test_time_as_index_ref(offset, origin): """Test indexing of raw times.""" - info = create_info(ch_names=10, sfreq=10.) + info = create_info(ch_names=10, sfreq=10.0) raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10) raw.set_meas_date(1) relative_times = raw.times - inds = raw.time_as_index(relative_times + offset, - use_rounding=True, - origin=origin) + inds = raw.time_as_index(relative_times + offset, use_rounding=True, origin=origin) assert_array_equal(inds, np.arange(raw.n_times)) @@ -611,7 +698,7 @@ def test_meas_date_orig_time(): # Raise error, it makes no sense to have an annotations object that we know # when was acquired and set it to a raw object that does not know when was # it acquired. - with pytest.raises(RuntimeError, match='Ambiguous operation'): + with pytest.raises(RuntimeError, match="Ambiguous operation"): _raw_annot(None, 1.5) # meas_time is None and orig_time is None: @@ -628,19 +715,22 @@ def test_get_data_reject(): ch_names = ["C3", "Cz", "C4"] info = create_info(ch_names, sfreq=fs) raw = RawArray(np.zeros((len(ch_names), 10 * fs)), info) - raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2], - description="bad")) + raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2], description="bad")) with catch_logging() as log: data = raw.get_data(reject_by_annotation="omit", verbose=True) - msg = ('Omitting 1024 of 2560 (40.00%) samples, retaining 1536' + - ' (60.00%) samples.') + msg = ( + "Omitting 1024 of 2560 (40.00%) samples, retaining 1536" + + " (60.00%) samples." + ) assert log.getvalue().strip() == msg assert data.shape == (len(ch_names), 1536) with catch_logging() as log: data = raw.get_data(reject_by_annotation="nan", verbose=True) - msg = ('Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536' + - ' (60.00%) samples.') + msg = ( + "Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536" + + " (60.00%) samples." + ) assert log.getvalue().strip() == msg assert data.shape == (len(ch_names), 2560) # shape doesn't change assert np.isnan(data).sum() == 3072 # but NaNs are introduced instead @@ -665,22 +755,25 @@ def test_5839(): # latency . 0 0 1 1 2 2 3 # . 5 0 5 0 5 0 # - EXPECTED_ONSET = [1.5, 2., 2., 2.5] - EXPECTED_DURATION = [0.2, 0., 0., 0.2] - EXPECTED_DESCRIPTION = ['dummy', 'BAD boundary', 'EDGE boundary', 'dummy'] + EXPECTED_ONSET = [1.5, 2.0, 2.0, 2.5] + EXPECTED_DURATION = [0.2, 0.0, 0.0, 0.2] + EXPECTED_DESCRIPTION = ["dummy", "BAD boundary", "EDGE boundary", "dummy"] def raw_factory(meas_date): - raw = RawArray(data=np.empty((10, 10)), - info=create_info(ch_names=10, sfreq=10.), - first_samp=10) + raw = RawArray( + data=np.empty((10, 10)), + info=create_info(ch_names=10, sfreq=10.0), + first_samp=10, + ) raw.set_meas_date(meas_date) - raw.set_annotations(annotations=Annotations(onset=[.5], - duration=[.2], - description='dummy', - orig_time=None)) + raw.set_annotations( + annotations=Annotations( + onset=[0.5], duration=[0.2], description="dummy", orig_time=None + ) + ) return raw - raw_A, raw_B = [raw_factory((x, 0)) for x in [0, 2]] + raw_A, raw_B = (raw_factory((x, 0)) for x in [0, 2]) raw_A.append(raw_B) assert_array_equal(raw_A.annotations.onset, EXPECTED_ONSET) @@ -689,22 +782,51 @@ def raw_factory(meas_date): assert raw_A.annotations.orig_time == _stamp_to_dt((0, 0)) -def test_repr(): +def test_duration_property(): + """Test BaseRAW.duration property.""" + sfreq = 1000 + info = create_info(ch_names=["EEG 001"], sfreq=sfreq) + raw = BaseRaw(info, last_samps=[sfreq * 60 - 1]) + assert raw.duration == 60 + + +@pytest.mark.parametrize("sfreq", [1, 10, 100, 1000]) +@pytest.mark.parametrize( + "duration, expected", + [ + (0.1, "00:00:01"), + (1, "00:00:01"), + (59, "00:00:59"), + (59.1, "00:01:00"), + (60, "00:01:00"), + (60.1, "00:01:01"), + (61, "00:01:01"), + (61.1, "00:01:02"), + ], +) +def test_get_duration_string(sfreq, duration, expected): + """Test BaseRAW_get_duration_string() method.""" + info = create_info(ch_names=["EEG 001"], sfreq=sfreq) + raw = BaseRaw(info, last_samps=[sfreq * duration - 1]) + assert raw._get_duration_string() == expected + + +@pytest.mark.parametrize("sfreq", [1, 10, 100, 256, 1000]) +def test_repr(sfreq): """Test repr of Raw.""" - sfreq = 256 info = create_info(3, sfreq) - raw = RawArray(np.zeros((3, 10 * sfreq)), info) + sample_count = 10 * sfreq + raw = RawArray(np.zeros((3, sample_count)), info) r = repr(raw) - assert re.search('', - r) is not None, r + size_str = sizeof_fmt(raw._size) + assert r == f"" assert raw._repr_html_() # A class that sets channel data to np.arange, for testing _test_raw_reader class _RawArange(BaseRaw): - def __init__(self, preload=False, verbose=None): - info = create_info(list(str(x) for x in range(1, 9)), 1000., 'eeg') + info = create_info(list(str(x) for x in range(1, 9)), 1000.0, "eeg") super().__init__(info, preload, last_samps=(999,), verbose=verbose) assert len(self.times) == 1000 @@ -720,7 +842,7 @@ def _read_raw_arange(preload=False, verbose=None): def test_test_raw_reader(): """Test _test_raw_reader.""" - _test_raw_reader(_read_raw_arange, test_scaling=False, test_rank='less') + _test_raw_reader(_read_raw_arange, test_scaling=False, test_rank="less") @pytest.mark.slowtest @@ -736,53 +858,81 @@ def test_describe_print(): s = f.getvalue().strip().split("\n") assert len(s) == 378 # Can be 3.1, 3.3, etc. - assert re.match( - r'', s[0]) is not None, s[0] - assert s[1] == " ch name type unit min Q1 median Q3 max" # noqa - assert s[2] == " 0 MEG 0113 GRAD fT/cm -221.80 -38.57 -9.64 19.29 414.67" # noqa - assert s[-1] == "375 EOG 061 EOG µV -231.41 271.28 277.16 285.66 334.69" # noqa + assert ( + re.match( + r"", + s[0], + ) + is not None + ), s[0] + assert ( + s[1] + == " ch name type unit min Q1 median Q3 max" # noqa: E501 + ) + assert ( + s[2] + == " 0 MEG 0113 GRAD fT/cm -221.80 -38.57 -9.64 19.29 414.67" # noqa: E501 + ) + assert ( + s[-1] + == "375 EOG 061 EOG µV -231.41 271.28 277.16 285.66 334.69" # noqa: E501 + ) -@requires_pandas @pytest.mark.slowtest def test_describe_df(): """Test returned data frame of describe method.""" + pytest.importorskip("pandas") fname = Path(__file__).parent / "data" / "test_raw.fif" raw = read_raw_fif(fname) df = raw.describe(data_frame=True) assert df.shape == (376, 8) - assert (df.columns.tolist() == ["name", "type", "unit", "min", "Q1", - "median", "Q3", "max"]) + assert df.columns.tolist() == [ + "name", + "type", + "unit", + "min", + "Q1", + "median", + "Q3", + "max", + ] assert df.index.name == "ch" - assert_allclose(df.iloc[0, 3:].astype(float), - np.array([-2.218017605790535e-11, - -3.857421923113974e-12, - -9.643554807784935e-13, - 1.928710961556987e-12, - 4.146728567347522e-11])) + assert_allclose( + df.iloc[0, 3:].astype(float), + np.array( + [ + -2.218017605790535e-11, + -3.857421923113974e-12, + -9.643554807784935e-13, + 1.928710961556987e-12, + 4.146728567347522e-11, + ] + ), + ) def test_get_data_units(): """Test the "units" argument of get_data method.""" # Test the unit conversion function - assert _get_scaling('eeg', 'uV') == 1e6 - assert _get_scaling('eeg', 'dV') == 1e1 - assert _get_scaling('eeg', 'pV') == 1e12 - assert _get_scaling('mag', 'fT') == 1e15 - assert _get_scaling('grad', 'T/m') == 1 - assert _get_scaling('grad', 'T/mm') == 1e-3 - assert _get_scaling('grad', 'fT/m') == 1e15 - assert _get_scaling('grad', 'fT/cm') == 1e13 - assert _get_scaling('csd', 'uV/cm²') == 1e2 + assert _get_scaling("eeg", "uV") == 1e6 + assert _get_scaling("eeg", "dV") == 1e1 + assert _get_scaling("eeg", "pV") == 1e12 + assert _get_scaling("mag", "fT") == 1e15 + assert _get_scaling("grad", "T/m") == 1 + assert _get_scaling("grad", "T/mm") == 1e-3 + assert _get_scaling("grad", "fT/m") == 1e15 + assert _get_scaling("grad", "fT/cm") == 1e13 + assert _get_scaling("csd", "uV/cm²") == 1e2 fname = Path(__file__).parent / "data" / "test_raw.fif" raw = read_raw_fif(fname) last = np.array([4.63803098e-05, 7.66563736e-05, 2.71933595e-04]) last_eeg = np.array([7.12207023e-05, 4.63803098e-05, 7.66563736e-05]) - last_grad = np.array([-3.85742192e-12, 9.64355481e-13, -1.06079103e-11]) + last_grad = np.array([-3.85742192e-12, 9.64355481e-13, -1.06079103e-11]) # None data_none = raw.get_data() @@ -790,62 +940,68 @@ def test_get_data_units(): assert_array_almost_equal(data_none[-3:, -1], last) # str: unit no conversion - data_str_noconv = raw.get_data(picks=['eeg'], units='V') + data_str_noconv = raw.get_data(picks=["eeg"], units="V") assert data_str_noconv.shape == (60, 14400) assert_array_almost_equal(data_str_noconv[-3:, -1], last_eeg) # str: simple unit - data_str_simple = raw.get_data(picks=['eeg'], units='uV') + data_str_simple = raw.get_data(picks=["eeg"], units="uV") assert data_str_simple.shape == (60, 14400) assert_array_almost_equal(data_str_simple[-3:, -1], last_eeg * 1e6) # str: fraction unit - data_str_fraction = raw.get_data(picks=['grad'], units='fT/cm') + data_str_fraction = raw.get_data(picks=["grad"], units="fT/cm") assert data_str_fraction.shape == (204, 14400) - assert_array_almost_equal(data_str_fraction[-3:, -1], - last_grad * (1e15 / 1e2)) + assert_array_almost_equal(data_str_fraction[-3:, -1], last_grad * (1e15 / 1e2)) # str: more than one channel type but one with unit - data_str_simplestim = raw.get_data(picks=['eeg', 'stim'], units='V') + data_str_simplestim = raw.get_data(picks=["eeg", "stim"], units="V") assert data_str_simplestim.shape == (69, 14400) assert_array_almost_equal(data_str_simplestim[-3:, -1], last_eeg) # str: too many channels - with pytest.raises(ValueError, match='more than one channel'): - raw.get_data(units='uV') + with pytest.raises(ValueError, match="more than one channel"): + raw.get_data(units="uV") # str: invalid unit - with pytest.raises(ValueError, match='is not a valid unit'): - raw.get_data(picks=['eeg'], units='fV/cm') + with pytest.raises(ValueError, match="is not a valid unit"): + raw.get_data(picks=["eeg"], units="fV/cm") # dict: combination of simple and fraction units - data_dict = raw.get_data(units=dict(grad='fT/cm', mag='fT', eeg='uV')) + data_dict = raw.get_data(units=dict(grad="fT/cm", mag="fT", eeg="uV")) assert data_dict.shape == (376, 14400) - assert_array_almost_equal(data_dict[0, -1], - -3.857421923113974e-12 * (1e15 / 1e2)) + assert_array_almost_equal(data_dict[0, -1], -3.857421923113974e-12 * (1e15 / 1e2)) assert_array_almost_equal(data_dict[2, -1], -2.1478272253525944e-13 * 1e15) assert_array_almost_equal(data_dict[-2, -1], 7.665637356879529e-05 * 1e6) # dict: channel type not in instance - data_dict_notin = raw.get_data(units=dict(hbo='uM')) + data_dict_notin = raw.get_data(units=dict(hbo="uM")) assert data_dict_notin.shape == (376, 14400) assert_array_almost_equal(data_dict_notin[-3:, -1], last) # dict: one invalid unit - with pytest.raises(ValueError, match='is not a valid unit'): - raw.get_data(units=dict(grad='fT/cV', mag='fT', eeg='uV')) + with pytest.raises(ValueError, match="is not a valid unit"): + raw.get_data(units=dict(grad="fT/cV", mag="fT", eeg="uV")) # dict: one invalid channel type - with pytest.raises(KeyError, match='is not a channel type'): - raw.get_data(units=dict(bad_type='fT/cV', mag='fT', eeg='uV')) + with pytest.raises(KeyError, match="is not a channel type"): + raw.get_data(units=dict(bad_type="fT/cV", mag="fT", eeg="uV")) # not the good type - with pytest.raises(TypeError, match='instance of None, str, or dict'): - raw.get_data(units=['fT/cm', 'fT', 'uV']) + with pytest.raises(TypeError, match="instance of None, str, or dict"): + raw.get_data(units=["fT/cm", "fT", "uV"]) def test_repr_dig_point(): """Test printing of DigPoint.""" - dp = DigPoint(r=np.arange(3), coord_frame=FIFF.FIFFV_COORD_HEAD, - kind=FIFF.FIFFV_POINT_EEG, ident=0) - assert 'mm' in repr(dp) - - dp = DigPoint(r=np.arange(3), coord_frame=FIFF.FIFFV_MNE_COORD_MRI_VOXEL, - kind=FIFF.FIFFV_POINT_CARDINAL, ident=0) - assert 'mm' not in repr(dp) - assert 'voxel' in repr(dp) + dp = DigPoint( + r=np.arange(3), + coord_frame=FIFF.FIFFV_COORD_HEAD, + kind=FIFF.FIFFV_POINT_EEG, + ident=0, + ) + assert "mm" in repr(dp) + + dp = DigPoint( + r=np.arange(3), + coord_frame=FIFF.FIFFV_MNE_COORD_MRI_VOXEL, + kind=FIFF.FIFFV_POINT_CARDINAL, + ident=0, + ) + assert "mm" not in repr(dp) + assert "voxel" in repr(dp) def test_get_data_tmin_tmax(): @@ -859,7 +1015,7 @@ def test_get_data_tmin_tmax(): d2 = raw.get_data(tmin=tmin, tmax=tmax) idxs = raw.time_as_index([tmin, tmax]) - assert_allclose(d1[:, idxs[0]:idxs[1]], d2) + assert_allclose(d1[:, idxs[0] : idxs[1]], d2) # specifying a too low tmin truncates to idx 0 d3 = raw.get_data(tmin=-5) @@ -874,14 +1030,57 @@ def test_get_data_tmin_tmax(): assert d5.shape[1] == 1 # validate inputs are properly raised - with pytest.raises(TypeError, match='start must be .* int'): + with pytest.raises(TypeError, match="start must be .* int"): raw.get_data(start=None) - with pytest.raises(TypeError, match='stop must be .* int'): + with pytest.raises(TypeError, match="stop must be .* int"): raw.get_data(stop=2.3) - with pytest.raises(TypeError, match='tmin must be .* float'): + with pytest.raises(TypeError, match="tmin must be .* float"): raw.get_data(tmin=[1, 2]) - with pytest.raises(TypeError, match='tmax must be .* float'): + with pytest.raises(TypeError, match="tmax must be .* float"): raw.get_data(tmax=[1, 2]) + + +def test_resamp_noop(): + """Tests resampling doesn't affect data if sfreq is identical.""" + raw = read_raw_fif(raw_fname) + data_before = raw.get_data() + data_after = raw.resample(sfreq=raw.info["sfreq"]).get_data() + assert_array_equal(data_before, data_after) + + +def test_concatenate_raw_dev_head_t(): + """Test concatenating raws with dev-head-t including nans.""" + data = np.random.randn(3, 10) + info = create_info(3, 1000.0, ["mag", "grad", "grad"]) + raw = RawArray(data, info) + raw.info["dev_head_t"] = Transform("meg", "head", np.eye(4)) + raw.info["dev_head_t"]["trans"][0, 0] = np.nan + raw2 = raw.copy() + concatenate_raws([raw, raw2]) + + +def test_last_samp(): + """Test that getting the last sample works.""" + raw = read_raw_fif(raw_fname).crop(0, 0.1).load_data() + last_data = raw._data[:, [-1]] + assert_array_equal(raw[:, -1][0], last_data) + + +def test_rescale(): + """Test rescaling channels.""" + raw = read_raw_fif(raw_fname, preload=True) # multiple channel types + + with pytest.raises(ValueError, match="If scalings is a scalar, all channels"): + raw.rescale(2) # need to use dict + + orig = raw.get_data(picks="eeg") + raw.rescale({"eeg": 2}) # need to use dict + assert_allclose(raw.get_data(picks="eeg"), orig * 2) + + raw.pick("mag") # only a single channel type "mag" + orig = raw.get_data() + raw.rescale(4) # a scalar works + assert_allclose(raw.get_data(), orig * 4) diff --git a/mne/io/tests/test_read_raw.py b/mne/io/tests/test_read_raw.py index 13c696f0f17..f74a715d9e7 100644 --- a/mne/io/tests/test_read_raw.py +++ b/mne/io/tests/test_read_raw.py @@ -1,8 +1,8 @@ """Test generic read_raw function.""" -# Authors: Clemens Brunner -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. from pathlib import Path from shutil import copyfile @@ -11,59 +11,69 @@ from mne.datasets import testing from mne.io import read_raw -from mne.io._read_raw import split_name_ext, readers +from mne.io._read_raw import _get_readers, _get_supported, split_name_ext - -base = Path(__file__).parent.parent +base = Path(__file__).parents[1] test_base = Path(testing.data_path(download=False)) -@pytest.mark.parametrize('fname', ['x.xxx', 'x']) +@pytest.mark.parametrize("fname", ["x.xxx", "x"]) def test_read_raw_unsupported_single(fname): """Test handling of unsupported file types.""" - with pytest.raises(ValueError, match='Unsupported file type'): + with pytest.raises(ValueError, match="Unsupported file type"): read_raw(fname) -@pytest.mark.parametrize('fname', ['x.bin']) +@pytest.mark.parametrize("fname", ["x.bin"]) def test_read_raw_unsupported_multi(fname, tmp_path): """Test handling of supported file types but with bad data.""" fname = tmp_path / fname - fname.write_text('') - with pytest.raises(RuntimeError, match='Could not read.*using any'): + fname.write_text("") + with pytest.raises(RuntimeError, match="Could not read.*using any"): read_raw(fname) -@pytest.mark.parametrize('fname', ['x.vmrk', 'y.amrk']) +@pytest.mark.parametrize("fname", ["x.vmrk", "y.amrk"]) def test_read_raw_suggested(fname): """Test handling of unsupported file types with suggested alternatives.""" - with pytest.raises(ValueError, match='Try reading'): + with pytest.raises(ValueError, match="Try reading"): read_raw(fname) _testing_mark = testing._pytest_mark() -@pytest.mark.parametrize('fname', [ - base / 'tests/data/test_raw.fif', - base / 'tests/data/test_raw.fif.gz', - base / 'edf/tests/data/test.edf', - base / 'edf/tests/data/test.bdf', - base / 'brainvision/tests/data/test.vhdr', - base / 'kit/tests/data/test.sqd', - pytest.param(test_base / 'KIT' / 'data_berlin.con', marks=_testing_mark), - pytest.param( - test_base / 'ARTEMIS123' / - 'Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin', - marks=_testing_mark), - pytest.param( - test_base / 'FIL' / - 'sub-noise_ses-001_task-noise220622_run-001_meg.bin', - marks=( - _testing_mark, - pytest.mark.filterwarnings( - 'ignore:.*problems later!:RuntimeWarning'))), -]) +@pytest.mark.parametrize( + "fname", + [ + base / "tests/data/test_raw.fif", + base / "tests/data/test_raw.fif.gz", + base / "edf/tests/data/test.edf", + pytest.param( + base / "edf/tests/data/test.bdf", + marks=( + _testing_mark, + pytest.mark.filterwarnings("ignore:Channels contain different"), + ), + ), + base / "brainvision/tests/data/test.vhdr", + base / "kit/tests/data/test.sqd", + pytest.param(test_base / "KIT" / "data_berlin.con", marks=_testing_mark), + pytest.param( + test_base + / "ARTEMIS123" + / "Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin", + marks=_testing_mark, + ), + pytest.param( + test_base / "FIL" / "sub-noise_ses-001_task-noise220622_run-001_meg.bin", + marks=( + _testing_mark, + pytest.mark.filterwarnings("ignore:.*problems later!:RuntimeWarning"), + ), + ), + ], +) def test_read_raw_supported(fname): """Test supported file types.""" read_raw(fname) @@ -75,7 +85,7 @@ def test_read_raw_supported(fname): def test_split_name_ext(): """Test file name extension splitting.""" # test known extensions - for ext in readers: + for ext in _get_readers(): assert split_name_ext(f"test{ext}")[1] == ext # test unsupported extensions @@ -85,7 +95,64 @@ def test_split_name_ext(): def test_read_raw_multiple_dots(tmp_path): """Test if file names with multiple dots work correctly.""" - src = base / 'edf/tests/data/test.edf' + src = base / "edf/tests/data/test.edf" dst = tmp_path / "test.this.file.edf" copyfile(src, dst) read_raw(dst) + + +reader_excluded_from_read_raw = { + "read_raw_bti", + "read_raw_hitachi", + "read_raw_neuralynx", +} + + +def test_all_reader_documented(): + """Test that all the readers in the documentation are accepted by read_raw.""" + readers = _get_supported() + # flatten the dictionaries and retrieve the function names + functions = [foo.__name__ for value in readers.values() for foo in value.values()] + # read documentation .rst source file + doc_folder = Path(__file__).parents[3] / "doc" + if not doc_folder.exists(): + pytest.skip("Documentation folder not found.") + doc_file = doc_folder / "api" / "reading_raw_data.rst" + doc = doc_file.read_text("utf-8") + reader_lines = [ + line.strip() for line in doc.split("\n") if line.strip().startswith("read_raw_") + ] + reader_lines = [ + elt for elt in reader_lines if elt not in reader_excluded_from_read_raw + ] + missing_from_read_raw = set(reader_lines) - set(functions) + missing_from_doc = set(functions) - set(reader_lines) + if len(missing_from_doc) != 0 or len(missing_from_read_raw) != 0: + raise AssertionError( + "Functions missing from documentation:\n\t" + + "\n\t".join(missing_from_doc) + + "\n\nFunctions missing from read_raw:\n\t" + + "\n\t".join(missing_from_read_raw) + ) + if sorted(reader_lines) != list(reader_lines): + raise AssertionError( + "Functions in documentation are not sorted. Expected order:\n\t" + + "\n\t".join(sorted(reader_lines)) + ) + + +def test_all_reader_documented_in_docstring(): + """Test that all the readers are documented in read_raw docstring.""" + readers = _get_supported() + # flatten the dictionaries and retrieve the function names + functions = [foo.__name__ for value in readers.values() for foo in value.values()] + doc = read_raw.__doc__.split("Parameters")[0] + documented = [elt.strip().split("`")[0] for elt in doc.split("mne.io.")[1:]] + missing_from_docstring = set(functions) - set(documented) + if len(missing_from_docstring) != 0: + raise AssertionError( + "Functions missing from docstring:\n\t" + + "\n\t".join(missing_from_docstring) + ) + if sorted(documented) != documented: + raise AssertionError("Functions in docstring are not sorted.") diff --git a/mne/io/tests/test_reference.py b/mne/io/tests/test_reference.py deleted file mode 100644 index 8ab37fb5879..00000000000 --- a/mne/io/tests/test_reference.py +++ /dev/null @@ -1,725 +0,0 @@ -# Authors: Marijn van Vliet -# Alexandre Gramfort -# Teon Brooks -# -# License: BSD-3-Clause - -import itertools -from contextlib import nullcontext -from pathlib import Path - -import numpy as np -from numpy.testing import assert_array_equal, assert_allclose, assert_equal -import pytest - -from mne import (pick_channels, pick_types, Epochs, read_events, - set_eeg_reference, set_bipolar_reference, - add_reference_channels, create_info, make_sphere_model, - make_forward_solution, setup_volume_source_space, - pick_channels_forward, read_evokeds, - find_events) -from mne.epochs import BaseEpochs, make_fixed_length_epochs -from mne.io import RawArray, read_raw_fif -from mne.io.constants import FIFF -from mne.io.proj import _has_eeg_average_ref_proj, Projection -from mne.io.reference import _apply_reference -from mne.datasets import testing -from mne.utils import catch_logging, _record_warnings - -base_dir = Path(__file__).parent / "data" -raw_fname = base_dir / "test_raw.fif" -data_dir = testing.data_path(download=False) / "MEG" / "sample" -fif_fname = data_dir / "sample_audvis_trunc_raw.fif" -eve_fname = data_dir / "sample_audvis_trunc_raw-eve.fif" -ave_fname = data_dir / "sample_audvis-ave.fif" - - -def _test_reference(raw, reref, ref_data, ref_from): - """Test whether a reference has been correctly applied.""" - # Separate EEG channels from other channel types - picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads') - picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True, - stim=True, exclude='bads') - - # Calculate indices of reference channesl - picks_ref = [raw.ch_names.index(ch) for ch in ref_from] - - # Get data - _data = raw._data - _reref = reref._data - - # Check that the ref has been properly computed - if ref_data is not None: - assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2)) - - # Get the raw EEG data and other channel data - raw_eeg_data = _data[..., picks_eeg, :] - raw_other_data = _data[..., picks_other, :] - - # Get the rereferenced EEG data - reref_eeg_data = _reref[..., picks_eeg, :] - reref_other_data = _reref[..., picks_other, :] - - # Check that non-EEG channels are untouched - assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15) - - # Undo rereferencing of EEG channels if possible - if ref_data is not None: - if isinstance(raw, BaseEpochs): - unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :] - else: - unref_eeg_data = reref_eeg_data + ref_data - assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15) - - -@testing.requires_testing_data -def test_apply_reference(): - """Test base function for rereferencing.""" - raw = read_raw_fif(fif_fname, preload=True) - - # Rereference raw data by creating a copy of original data - reref, ref_data = _apply_reference( - raw.copy(), ref_from=['EEG 001', 'EEG 002']) - assert reref.info['custom_ref_applied'] - _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) - - # The CAR reference projection should have been removed by the function - assert not _has_eeg_average_ref_proj(reref.info) - - # Test that data is modified in place when copy=False - reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002']) - assert raw is reref - - # Test that disabling the reference does not change anything - reref, ref_data = _apply_reference(raw.copy(), []) - assert_array_equal(raw._data, reref._data) - - # Test re-referencing Epochs object - raw = read_raw_fif(fif_fname, preload=False) - events = read_events(eve_fname) - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, - picks=picks_eeg, preload=True) - reref, ref_data = _apply_reference( - epochs.copy(), ref_from=['EEG 001', 'EEG 002']) - assert reref.info['custom_ref_applied'] - _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002']) - - # Test re-referencing Evoked object - evoked = epochs.average() - reref, ref_data = _apply_reference( - evoked.copy(), ref_from=['EEG 001', 'EEG 002']) - assert reref.info['custom_ref_applied'] - _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002']) - - # Referencing needs data to be preloaded - raw_np = read_raw_fif(fif_fname, preload=False) - pytest.raises(RuntimeError, _apply_reference, raw_np, ['EEG 001']) - - # Test having inactive SSP projections that deal with channels involved - # during re-referencing - raw = read_raw_fif(fif_fname, preload=True) - raw.add_proj( - Projection( - active=False, - data=dict( - col_names=['EEG 001', 'EEG 002'], - row_names=None, - data=np.array([[1, 1]]), - ncol=2, - nrow=1 - ), - desc='test', - kind=1, - ) - ) - # Projection concerns channels mentioned in projector - with pytest.raises(RuntimeError, match='Inactive signal space'): - _apply_reference(raw, ['EEG 001']) - - # Projection does not concern channels mentioned in projector, no error - _apply_reference(raw, ['EEG 003'], ['EEG 004']) - - # CSD cannot be rereferenced - with raw.info._unlock(): - raw.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_CSD - with pytest.raises(RuntimeError, match="Cannot set.* type 'CSD'"): - raw.set_eeg_reference() - - -@testing.requires_testing_data -def test_set_eeg_reference(): - """Test rereference eeg data.""" - raw = read_raw_fif(fif_fname, preload=True) - with raw.info._unlock(): - raw.info['projs'] = [] - - # Test setting an average reference projection - assert not _has_eeg_average_ref_proj(raw.info) - reref, ref_data = set_eeg_reference(raw, projection=True) - assert _has_eeg_average_ref_proj(reref.info) - assert not reref.info['projs'][0]['active'] - assert ref_data is None - reref.apply_proj() - eeg_chans = [raw.ch_names[ch] - for ch in pick_types(raw.info, meg=False, eeg=True)] - _test_reference(raw, reref, ref_data, - [ch for ch in eeg_chans if ch not in raw.info['bads']]) - - # Test setting an average reference when one was already present - with pytest.warns(RuntimeWarning, match='untouched'): - reref, ref_data = set_eeg_reference(raw, copy=False, projection=True) - assert ref_data is None - - # Test setting an average reference on non-preloaded data - raw_nopreload = read_raw_fif(fif_fname, preload=False) - with raw_nopreload.info._unlock(): - raw_nopreload.info['projs'] = [] - reref, ref_data = set_eeg_reference(raw_nopreload, projection=True) - assert _has_eeg_average_ref_proj(reref.info) - assert not reref.info['projs'][0]['active'] - - # Rereference raw data by creating a copy of original data - reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True) - assert reref.info['custom_ref_applied'] - _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002']) - - # Test that data is modified in place when copy=False - reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], - copy=False) - assert raw is reref - - # Test moving from custom to average reference - reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002']) - reref, _ = set_eeg_reference(reref, projection=True) - assert _has_eeg_average_ref_proj(reref.info) - assert not reref.info['custom_ref_applied'] - - # When creating an average reference fails, make sure the - # custom_ref_applied flag remains untouched. - reref = raw.copy() - with reref.info._unlock(): - reref.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON - reref.pick_types(meg=True, eeg=False) # Cause making average ref fail - # should have turned it off - assert reref.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_OFF - with pytest.raises(ValueError, match='found to rereference'): - set_eeg_reference(reref, projection=True) - - # Test moving from average to custom reference - reref, ref_data = set_eeg_reference(raw, projection=True) - reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002']) - assert not _has_eeg_average_ref_proj(reref.info) - assert len(reref.info['projs']) == 0 - assert reref.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_ON - - # Test that disabling the reference does not change the data - assert _has_eeg_average_ref_proj(raw.info) - reref, _ = set_eeg_reference(raw, []) - assert_array_equal(raw._data, reref._data) - assert not _has_eeg_average_ref_proj(reref.info) - - # make sure ref_channels=[] removes average reference projectors - assert _has_eeg_average_ref_proj(raw.info) - reref, _ = set_eeg_reference(raw, []) - assert not _has_eeg_average_ref_proj(reref.info) - - # Test that average reference gives identical results when calculated - # via SSP projection (projection=True) or directly (projection=False) - with raw.info._unlock(): - raw.info['projs'] = [] - reref_1, _ = set_eeg_reference(raw.copy(), projection=True) - reref_1.apply_proj() - reref_2, _ = set_eeg_reference(raw.copy(), projection=False) - assert_allclose(reref_1._data, reref_2._data, rtol=1e-6, atol=1e-15) - - # Test average reference without projection - reref, ref_data = set_eeg_reference(raw.copy(), ref_channels="average", - projection=False) - _test_reference(raw, reref, ref_data, eeg_chans) - - with pytest.raises(ValueError, match='supported for ref_channels="averag'): - set_eeg_reference(raw, [], True, True) - with pytest.raises(ValueError, match='supported for ref_channels="averag'): - set_eeg_reference(raw, ['EEG 001'], True, True) - - -@pytest.mark.parametrize('ch_type, msg', - [('auto', ('ECoG',)), - ('ecog', ('ECoG',)), - ('dbs', ('DBS',)), - (['ecog', 'dbs'], ('ECoG', 'DBS'))]) -@pytest.mark.parametrize('projection', [False, True]) -def test_set_eeg_reference_ch_type(ch_type, msg, projection): - """Test setting EEG reference for ECoG or DBS.""" - # gh-6454 - # gh-8739 added DBS - ch_names = ['ECOG01', 'ECOG02', 'DBS01', 'DBS02', 'MISC'] - rng = np.random.RandomState(0) - data = rng.randn(5, 1000) - raw = RawArray(data, create_info(ch_names, 1000., ['ecog'] * 2 - + ['dbs'] * 2 + ['misc'])) - - if ch_type == 'auto': - ref_ch = ch_names[:2] - else: - ref_ch = raw.copy().pick(picks=ch_type).ch_names - - with catch_logging() as log: - reref, ref_data = set_eeg_reference(raw.copy(), ch_type=ch_type, - projection=projection, - verbose=True) - - if not projection: - assert f"Applying a custom {msg}" in log.getvalue() - assert reref.info['custom_ref_applied'] # gh-7350 - _test_reference(raw, reref, ref_data, ref_ch) - match = "no EEG data found" if projection else "No channels supplied" - with pytest.raises(ValueError, match=match): - set_eeg_reference(raw, ch_type='eeg', projection=projection) - # gh-8739 - raw2 = RawArray(data, create_info(5, 1000., ['mag'] * 4 + ['misc'])) - with pytest.raises(ValueError, match='No EEG, ECoG, sEEG or DBS channels ' - 'found to rereference.'): - set_eeg_reference(raw2, ch_type='auto', projection=projection) - - -@testing.requires_testing_data -def test_set_eeg_reference_rest(): - """Test setting a REST reference.""" - raw = read_raw_fif(fif_fname).crop(0, 1).pick_types( - meg=False, eeg=True, exclude=()).load_data() - raw.info['bads'] = ['EEG 057'] # should be excluded - same = [raw.ch_names.index(raw.info['bads'][0])] - picks = np.setdiff1d(np.arange(len(raw.ch_names)), same) - trans = None - sphere = make_sphere_model('auto', 'auto', raw.info) - src = setup_volume_source_space(pos=20., sphere=sphere, exclude=30.) - assert src[0]['nuse'] == 223 # low but fast - fwd = make_forward_solution(raw.info, trans, src, sphere) - orig_data = raw.get_data() - avg_data = raw.copy().set_eeg_reference('average').get_data() - assert_array_equal(avg_data[same], orig_data[same]) # not processed - raw.set_eeg_reference('REST', forward=fwd) - rest_data = raw.get_data() - assert_array_equal(rest_data[same], orig_data[same]) - # should be more similar to an avg ref than nose ref - orig_corr = np.corrcoef(rest_data[picks].ravel(), - orig_data[picks].ravel())[0, 1] - avg_corr = np.corrcoef(rest_data[picks].ravel(), - avg_data[picks].ravel())[0, 1] - assert -0.6 < orig_corr < -0.5 - assert 0.1 < avg_corr < 0.2 - # and applying an avg ref after should work - avg_after = raw.set_eeg_reference('average').get_data() - assert_allclose(avg_after, avg_data, atol=1e-12) - with pytest.raises(TypeError, match='forward when ref_channels="REST"'): - raw.set_eeg_reference('REST') - fwd_bad = pick_channels_forward(fwd, raw.ch_names[:-1]) - with pytest.raises(ValueError, match='Missing channels'): - raw.set_eeg_reference('REST', forward=fwd_bad) - # compare to FieldTrip - evoked = read_evokeds(ave_fname, baseline=(None, 0))[0] - evoked.info['bads'] = [] - evoked.pick_types(meg=False, eeg=True, exclude=()) - assert len(evoked.ch_names) == 60 - # Data obtained from FieldTrip with something like (after evoked.save'ing - # then scipy.io.savemat'ing fwd['sol']['data']): - # dat = ft_read_data('ft-ave.fif'); - # load('leadfield.mat', 'G'); - # dat_ref = ft_preproc_rereference(dat, 'all', 'rest', true, G); - # sprintf('%g ', dat_ref(:, 171)); - want = np.array('-3.3265e-05 -3.2419e-05 -3.18758e-05 -3.24079e-05 -3.39801e-05 -3.40573e-05 -3.24163e-05 -3.26896e-05 -3.33814e-05 -3.54734e-05 -3.51289e-05 -3.53229e-05 -3.51532e-05 -3.53149e-05 -3.4505e-05 -3.03462e-05 -2.81848e-05 -3.08895e-05 -3.27158e-05 -3.4605e-05 -3.47728e-05 -3.2459e-05 -3.06552e-05 -2.53255e-05 -2.69671e-05 -2.83425e-05 -3.12836e-05 -3.30965e-05 -3.34099e-05 -3.32766e-05 -3.32256e-05 -3.36385e-05 -3.20796e-05 -2.7108e-05 -2.47054e-05 -2.49589e-05 -2.7382e-05 -3.09774e-05 -3.12003e-05 -3.1246e-05 -3.07572e-05 -2.64942e-05 -2.25505e-05 -2.67194e-05 -2.86e-05 -2.94903e-05 -2.96249e-05 -2.92653e-05 -2.86472e-05 -2.81016e-05 -2.69737e-05 -2.48076e-05 -3.00473e-05 -2.73404e-05 -2.60153e-05 -2.41608e-05 -2.61937e-05 -2.5539e-05 -2.47104e-05 -2.35194e-05'.split(' '), float) # noqa: E501 - norm = np.linalg.norm(want) - idx = np.argmin(np.abs(evoked.times - 0.083)) - assert idx == 170 - old = evoked.data[:, idx].ravel() - exp_var = 1 - np.linalg.norm(want - old) / norm - assert 0.006 < exp_var < 0.008 - evoked.set_eeg_reference('REST', forward=fwd) - exp_var_old = 1 - np.linalg.norm(evoked.data[:, idx] - old) / norm - assert 0.005 < exp_var_old <= 0.009 - exp_var = 1 - np.linalg.norm(evoked.data[:, idx] - want) / norm - assert 0.995 < exp_var <= 1 - - -@testing.requires_testing_data -@pytest.mark.parametrize('inst_type', ('raw', 'epochs', 'evoked')) -def test_set_bipolar_reference(inst_type): - """Test bipolar referencing.""" - raw = read_raw_fif(fif_fname, preload=True) - raw.apply_proj() - - if inst_type == 'raw': - inst = raw - del raw - elif inst_type in ['epochs', 'evoked']: - events = find_events(raw, stim_channel='STI 014') - epochs = Epochs(raw, events, tmin=-0.3, tmax=0.7, preload=True) - inst = epochs - if inst_type == 'evoked': - inst = epochs.average() - del epochs - - ch_info = {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'} - with pytest.raises(KeyError, match='key errantly present'): - set_bipolar_reference(inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) - ch_info.pop('extra') - reref = set_bipolar_reference( - inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) - assert reref.info['custom_ref_applied'] - - # Compare result to a manual calculation - a = inst.copy().pick_channels(['EEG 001', 'EEG 002']) - a = a._data[..., 0, :] - a._data[..., 1, :] - b = reref.copy().pick_channels(['bipolar'])._data[..., 0, :] - assert_allclose(a, b) - - # Original channels should be replaced by a virtual one - assert 'EEG 001' not in reref.ch_names - assert 'EEG 002' not in reref.ch_names - assert 'bipolar' in reref.ch_names - - # Check channel information - bp_info = reref.info['chs'][reref.ch_names.index('bipolar')] - an_info = inst.info['chs'][inst.ch_names.index('EEG 001')] - for key in bp_info: - if key == 'coil_type': - assert bp_info[key] == FIFF.FIFFV_COIL_EEG_BIPOLAR, key - elif key == 'kind': - assert bp_info[key] == FIFF.FIFFV_EOG_CH, key - elif key != 'ch_name': - assert_equal(bp_info[key], an_info[key], err_msg=key) - - # Minimalist call - reref = set_bipolar_reference(inst, 'EEG 001', 'EEG 002') - assert 'EEG 001-EEG 002' in reref.ch_names - - # Minimalist call with twice the same anode - reref = set_bipolar_reference(inst, - ['EEG 001', 'EEG 001', 'EEG 002'], - ['EEG 002', 'EEG 003', 'EEG 003']) - assert 'EEG 001-EEG 002' in reref.ch_names - assert 'EEG 001-EEG 003' in reref.ch_names - - # Set multiple references at once - reref = set_bipolar_reference( - inst, - ['EEG 001', 'EEG 003'], - ['EEG 002', 'EEG 004'], - ['bipolar1', 'bipolar2'], - [{'kind': FIFF.FIFFV_EOG_CH}, - {'kind': FIFF.FIFFV_EOG_CH}], - ) - a = inst.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']) - a = np.concatenate( - [a._data[..., :1, :] - a._data[..., 1:2, :], - a._data[..., 2:3, :] - a._data[..., 3:4, :]], - axis=-2 - ) - b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data - assert_allclose(a, b) - - # Test creating a bipolar reference that doesn't involve EEG channels: - # it should not set the custom_ref_applied flag - reref = set_bipolar_reference(inst, 'MEG 0111', 'MEG 0112', - ch_info={'kind': FIFF.FIFFV_MEG_CH}, - verbose='error') - assert not reref.info['custom_ref_applied'] - assert 'MEG 0111-MEG 0112' in reref.ch_names - - # Test a battery of invalid inputs - pytest.raises(ValueError, set_bipolar_reference, inst, - 'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar') - pytest.raises(ValueError, set_bipolar_reference, inst, - ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar') - pytest.raises(ValueError, set_bipolar_reference, inst, - 'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2']) - pytest.raises(ValueError, set_bipolar_reference, inst, - 'EEG 001', 'EEG 002', 'bipolar', - ch_info=[{'foo': 'bar'}, {'foo': 'bar'}]) - pytest.raises(ValueError, set_bipolar_reference, inst, - 'EEG 001', 'EEG 002', ch_name='EEG 003') - - # Test if bad anode/cathode raises error if on_bad="raise" - inst.info["bads"] = ["EEG 001"] - pytest.raises(ValueError, set_bipolar_reference, inst, - 'EEG 001', 'EEG 002', on_bad="raise") - inst.info["bads"] = ["EEG 002"] - pytest.raises(ValueError, set_bipolar_reference, inst, - 'EEG 001', 'EEG 002', on_bad="raise") - - # Test if bad anode/cathode raises warning if on_bad="warn" - inst.info["bads"] = ["EEG 001"] - pytest.warns(RuntimeWarning, set_bipolar_reference, inst, - 'EEG 001', 'EEG 002', on_bad="warn") - inst.info["bads"] = ["EEG 002"] - pytest.warns(RuntimeWarning, set_bipolar_reference, inst, - 'EEG 001', 'EEG 002', on_bad="warn") - - -def _check_channel_names(inst, ref_names): - """Check channel names.""" - if isinstance(ref_names, str): - ref_names = [ref_names] - - # Test that the names of the reference channels are present in `ch_names` - ref_idx = pick_channels(inst.info['ch_names'], ref_names) - assert len(ref_idx) == len(ref_names) - - # Test that the names of the reference channels are present in the `chs` - # list - inst.info._check_consistency() # Should raise no exceptions - - -@testing.requires_testing_data -def test_add_reference(): - """Test adding a reference.""" - raw = read_raw_fif(fif_fname, preload=True) - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - # check if channel already exists - pytest.raises(ValueError, add_reference_channels, - raw, raw.info['ch_names'][0]) - # add reference channel to Raw - raw_ref = add_reference_channels(raw, 'Ref', copy=True) - assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) - assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) - _check_channel_names(raw_ref, 'Ref') - - orig_nchan = raw.info['nchan'] - raw = add_reference_channels(raw, 'Ref', copy=False) - assert_array_equal(raw._data, raw_ref._data) - assert_equal(raw.info['nchan'], orig_nchan + 1) - _check_channel_names(raw, 'Ref') - - # for Neuromag fif's, the reference electrode location is placed in - # elements [3:6] of each "data" electrode location - assert_allclose(raw.info['chs'][-1]['loc'][:3], - raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6) - - ref_idx = raw.ch_names.index('Ref') - ref_data, _ = raw[ref_idx] - assert_array_equal(ref_data, 0) - - # add reference channel to Raw when no digitization points exist - raw = read_raw_fif(fif_fname).crop(0, 1).load_data() - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - del raw.info['dig'] - - raw_ref = add_reference_channels(raw, 'Ref', copy=True) - - assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1) - assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) - _check_channel_names(raw_ref, 'Ref') - - orig_nchan = raw.info['nchan'] - raw = add_reference_channels(raw, 'Ref', copy=False) - assert_array_equal(raw._data, raw_ref._data) - assert_equal(raw.info['nchan'], orig_nchan + 1) - _check_channel_names(raw, 'Ref') - - # Test adding an existing channel as reference channel - pytest.raises(ValueError, add_reference_channels, raw, - raw.info['ch_names'][0]) - - # add two reference channels to Raw - raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True) - _check_channel_names(raw_ref, ['M1', 'M2']) - assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2) - assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :]) - assert_array_equal(raw_ref._data[-2:, :], 0) - - raw = add_reference_channels(raw, ['M1', 'M2'], copy=False) - _check_channel_names(raw, ['M1', 'M2']) - ref_idx = raw.ch_names.index('M1') - ref_idy = raw.ch_names.index('M2') - ref_data, _ = raw[[ref_idx, ref_idy]] - assert_array_equal(ref_data, 0) - - # add reference channel to epochs - raw = read_raw_fif(fif_fname, preload=True) - events = read_events(eve_fname) - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, - picks=picks_eeg, preload=True) - # default: proj=True, after which adding a Ref channel is prohibited - pytest.raises(RuntimeError, add_reference_channels, epochs, 'Ref') - - # create epochs in delayed mode, allowing removal of CAR when re-reffing - epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, - picks=picks_eeg, preload=True, proj='delayed') - epochs_ref = add_reference_channels(epochs, 'Ref', copy=True) - - assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1) - _check_channel_names(epochs_ref, 'Ref') - ref_idx = epochs_ref.ch_names.index('Ref') - ref_data = epochs_ref.get_data()[:, ref_idx, :] - assert_array_equal(ref_data, 0) - picks_eeg = pick_types(epochs.info, meg=False, eeg=True) - assert_array_equal(epochs.get_data()[:, picks_eeg, :], - epochs_ref.get_data()[:, picks_eeg, :]) - - # add two reference channels to epochs - raw = read_raw_fif(fif_fname, preload=True) - events = read_events(eve_fname) - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - # create epochs in delayed mode, allowing removal of CAR when re-reffing - epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, - picks=picks_eeg, preload=True, proj='delayed') - with pytest.warns(RuntimeWarning, match='reference channels are ignored'): - epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True) - assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2) - _check_channel_names(epochs_ref, ['M1', 'M2']) - ref_idx = epochs_ref.ch_names.index('M1') - ref_idy = epochs_ref.ch_names.index('M2') - assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1') - assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2') - ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :] - assert_array_equal(ref_data, 0) - picks_eeg = pick_types(epochs.info, meg=False, eeg=True) - assert_array_equal(epochs.get_data()[:, picks_eeg, :], - epochs_ref.get_data()[:, picks_eeg, :]) - - # add reference channel to evoked - raw = read_raw_fif(fif_fname, preload=True) - events = read_events(eve_fname) - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - # create epochs in delayed mode, allowing removal of CAR when re-reffing - epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, - picks=picks_eeg, preload=True, proj='delayed') - evoked = epochs.average() - evoked_ref = add_reference_channels(evoked, 'Ref', copy=True) - assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1) - _check_channel_names(evoked_ref, 'Ref') - ref_idx = evoked_ref.ch_names.index('Ref') - ref_data = evoked_ref.data[ref_idx, :] - assert_array_equal(ref_data, 0) - picks_eeg = pick_types(evoked.info, meg=False, eeg=True) - assert_array_equal(evoked.data[picks_eeg, :], - evoked_ref.data[picks_eeg, :]) - - # add two reference channels to evoked - raw = read_raw_fif(fif_fname, preload=True) - events = read_events(eve_fname) - picks_eeg = pick_types(raw.info, meg=False, eeg=True) - # create epochs in delayed mode, allowing removal of CAR when re-reffing - epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5, - picks=picks_eeg, preload=True, proj='delayed') - evoked = epochs.average() - with pytest.warns(RuntimeWarning, match='reference channels are ignored'): - evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True) - assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2) - _check_channel_names(evoked_ref, ['M1', 'M2']) - ref_idx = evoked_ref.ch_names.index('M1') - ref_idy = evoked_ref.ch_names.index('M2') - ref_data = evoked_ref.data[[ref_idx, ref_idy], :] - assert_array_equal(ref_data, 0) - picks_eeg = pick_types(evoked.info, meg=False, eeg=True) - assert_array_equal(evoked.data[picks_eeg, :], - evoked_ref.data[picks_eeg, :]) - - # Test invalid inputs - raw = read_raw_fif(fif_fname, preload=False) - with pytest.raises(RuntimeError, match='loaded'): - add_reference_channels(raw, ['Ref']) - raw.load_data() - with pytest.raises(ValueError, match='Channel.*already.*'): - add_reference_channels(raw, raw.ch_names[:1]) - with pytest.raises(TypeError, match='instance of'): - add_reference_channels(raw, 1) - - # gh-10878 - raw = read_raw_fif(raw_fname).crop(0, 1, include_tmax=False).load_data() - data = raw.copy().add_reference_channels(['REF']).pick_types(eeg=True) - data = data.get_data() - epochs = make_fixed_length_epochs(raw).load_data() - data_2 = epochs.copy().add_reference_channels(['REF']).pick_types(eeg=True) - data_2 = data_2.get_data()[0] - assert_allclose(data, data_2) - evoked = epochs.average() - data_3 = evoked.copy().add_reference_channels(['REF']).pick_types(eeg=True) - data_3 = data_3.get_data() - assert_allclose(data, data_3) - - -@pytest.mark.parametrize('n_ref', (1, 2)) -def test_add_reorder(n_ref): - """Test that a reference channel can be added and then data reordered.""" - # gh-8300 - raw = read_raw_fif(raw_fname).crop(0, 0.1).del_proj().pick('eeg') - assert len(raw.ch_names) == 60 - chs = ['EEG %03d' % (60 + ii) for ii in range(1, n_ref)] + ['EEG 000'] - with pytest.raises(RuntimeError, match='preload'): - with _record_warnings(): # ignore multiple warning - add_reference_channels(raw, chs, copy=False) - raw.load_data() - if n_ref == 1: - ctx = nullcontext() - else: - assert n_ref == 2 - ctx = pytest.warns(RuntimeWarning, match='locations of multiple') - with ctx: - add_reference_channels(raw, chs, copy=False) - data = raw.get_data() - assert_array_equal(data[-1], 0.) - assert raw.ch_names[-n_ref:] == chs - raw.reorder_channels(raw.ch_names[-1:] + raw.ch_names[:-1]) - assert raw.ch_names == ['EEG %03d' % ii for ii in range(60 + n_ref)] - data_new = raw.get_data() - data_new = np.concatenate([data_new[1:], data_new[:1]]) - assert_allclose(data, data_new) - - -def test_bipolar_combinations(): - """Test bipolar channel generation.""" - ch_names = ['CH' + str(ni + 1) for ni in range(10)] - info = create_info( - ch_names=ch_names, sfreq=1000., ch_types=['eeg'] * len(ch_names)) - raw_data = np.random.randn(len(ch_names), 1000) - raw = RawArray(raw_data, info) - - def _check_bipolar(raw_test, ch_a, ch_b): - picks = [raw_test.ch_names.index(ch_a + '-' + ch_b)] - get_data_res = raw_test.get_data(picks=picks)[0, :] - manual_a = raw_data[ch_names.index(ch_a), :] - manual_b = raw_data[ch_names.index(ch_b), :] - assert_array_equal(get_data_res, manual_a - manual_b) - - # test classic EOG/ECG bipolar reference (only two channels per pair). - raw_test = set_bipolar_reference(raw, ['CH2'], ['CH1'], copy=True) - _check_bipolar(raw_test, 'CH2', 'CH1') - - # test all combinations. - a_channels, b_channels = zip(*itertools.combinations(ch_names, 2)) - a_channels, b_channels = list(a_channels), list(b_channels) - raw_test = set_bipolar_reference(raw, a_channels, b_channels, copy=True) - for ch_a, ch_b in zip(a_channels, b_channels): - _check_bipolar(raw_test, ch_a, ch_b) - # check if reference channels have been dropped. - assert len(raw_test.ch_names) == len(a_channels) - - raw_test = set_bipolar_reference( - raw, a_channels, b_channels, drop_refs=False, copy=True) - # check if reference channels have been kept correctly. - assert len(raw_test.ch_names) == len(a_channels) + len(ch_names) - for idx, ch_label in enumerate(ch_names): - manual_ch = raw_data[np.newaxis, idx] - assert_array_equal(raw_test.get_data(ch_label), manual_ch) - - # test bipolars with a channel in both list (anode & cathode). - raw_test = set_bipolar_reference( - raw, ['CH2', 'CH1'], ['CH1', 'CH2'], copy=True) - _check_bipolar(raw_test, 'CH2', 'CH1') - _check_bipolar(raw_test, 'CH1', 'CH2') - - # test if bipolar channel is bad if anode is a bad channel - raw.info["bads"] = ["CH1"] - raw_test = set_bipolar_reference(raw, ['CH1'], ['CH2'], on_bad="ignore", - ch_name="bad_bipolar", copy=True) - assert raw_test.info["bads"] == ["bad_bipolar"] - - # test if bipolar channel is bad if cathode is a bad channel - raw.info["bads"] = ["CH2"] - raw_test = set_bipolar_reference(raw, ['CH1'], ['CH2'], on_bad="ignore", - ch_name="bad_bipolar", copy=True) - assert raw_test.info["bads"] == ["bad_bipolar"] diff --git a/mne/io/tests/test_show_fiff.py b/mne/io/tests/test_show_fiff.py deleted file mode 100644 index f25c6c04cac..00000000000 --- a/mne/io/tests/test_show_fiff.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# Author: Eric Larson -# -# License: BSD-3-Clause - -from pathlib import Path - -from mne.io import show_fiff - -base_dir = Path(__file__).parent / "data" -fname_evoked = base_dir / "test-ave.fif" -fname_raw = base_dir / "test_raw.fif" -fname_c_annot = base_dir / "test_raw-annot.fif" - - -def test_show_fiff(): - """Test show_fiff.""" - # this is not exhaustive, but hopefully bugs will be found in use - info = show_fiff(fname_evoked) - assert 'BAD' not in info - keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM', - 'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE', - 'FIFF_EPOCH', 'COORD_TRANS'] - assert all(key in info for key in keys) - info = show_fiff(fname_raw, read_limit=1024) - assert 'BAD' not in info - info = show_fiff(fname_c_annot) - assert 'BAD' not in info - assert '>B' in info, info diff --git a/mne/io/tests/test_utils.py b/mne/io/tests/test_utils.py deleted file mode 100644 index 6bfe5ae933d..00000000000 --- a/mne/io/tests/test_utils.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -"""Run tests for the utilities.""" -# Author: Stefan Appelhoff -# -# License: BSD-3-Clause - -from mne.io.utils import _check_orig_units - - -def test_check_orig_units(): - """Test the checking of original units.""" - orig_units = dict(FC1='nV', Hfp3erz='n/a', Pz='uV', greekMu='μV', - microSign='µV') - orig_units = _check_orig_units(orig_units) - assert orig_units['FC1'] == 'nV' - assert orig_units['Hfp3erz'] == 'n/a' - assert orig_units['Pz'] == 'µV' - assert orig_units['greekMu'] == 'µV' - assert orig_units['microSign'] == 'µV' diff --git a/mne/io/tests/test_what.py b/mne/io/tests/test_what.py deleted file mode 100644 index 96f4cf0d42d..00000000000 --- a/mne/io/tests/test_what.py +++ /dev/null @@ -1,52 +0,0 @@ -# Authors: Eric Larson -# License: BSD - -import glob -from pathlib import Path - -import numpy as np -import pytest - -from mne import what, create_info -from mne.datasets import testing -from mne.io import RawArray -from mne.preprocessing import ICA -from mne.utils import requires_sklearn, _record_warnings - -data_path = testing.data_path(download=False) - - -@pytest.mark.slowtest -@requires_sklearn -@testing.requires_testing_data -def test_what(tmp_path, verbose_debug): - """Test mne.what.""" - # ICA - ica = ICA(max_iter=1) - raw = RawArray(np.random.RandomState(0).randn(3, 10), - create_info(3, 1000., 'eeg')) - with _record_warnings(): # convergence sometimes - ica.fit(raw) - fname = tmp_path / "x-ica.fif" - ica.save(fname) - assert what(fname) == 'ica' - # test files - fnames = glob.glob(str(data_path / "MEG" / "sample" / "*.fif")) - fnames += glob.glob( - str(data_path / "subjects" / "sample" / "bem" / "*.fif") - ) - fnames = sorted(fnames) - want_dict = dict(eve='events', ave='evoked', cov='cov', inv='inverse', - fwd='forward', trans='transform', proj='proj', - raw='raw', meg='raw', sol='bem solution', - bem='bem surfaces', http://github.com/src='src', dense='bem surfaces', - sparse='bem surfaces', head='bem surfaces', - fiducials='fiducials') - for fname in fnames: - kind = Path(fname).stem.split("-")[-1] - if len(kind) > 5: - kind = kind.split('_')[-1] - this = what(fname) - assert this == want_dict[kind] - fname = data_path / "MEG" / "sample" / "sample_audvis-ave_xfit.dip" - assert what(fname) == 'unknown' diff --git a/mne/io/tests/test_write.py b/mne/io/tests/test_write.py deleted file mode 100644 index 2a67566c61f..00000000000 --- a/mne/io/tests/test_write.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -"""Run tests for writing.""" -# Author: Eric Larson -# -# License: BSD-3-Clause - -import pytest - -from mne.io.constants import FIFF -from mne.io.write import start_file, write_int - - -def test_write_int(tmp_path): - """Test that write_int raises an error on bad values.""" - with start_file(tmp_path / 'temp.fif') as fid: - write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483647]) # 2 ** 31 - 1 - write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, []) # 2 ** 31 - 1 - with pytest.raises(TypeError, match=r'.*exceeds max.*EVENT_LIST\)'): - write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483648]) # 2 ** 31 - with pytest.raises(TypeError, match='Cannot safely write'): - write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [0.]) # float diff --git a/mne/io/tree.py b/mne/io/tree.py deleted file mode 100644 index 16293df4152..00000000000 --- a/mne/io/tree.py +++ /dev/null @@ -1,153 +0,0 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# -# License: BSD-3-Clause - -import numpy as np - -from .constants import FIFF -from .tag import Tag -from .tag import read_tag -from .write import write_id, start_block, end_block, _write -from ..utils import logger, verbose - - -def dir_tree_find(tree, kind): - """Find nodes of the given kind from a directory tree structure. - - Parameters - ---------- - tree : dict - Directory tree. - kind : int - Kind to find. - - Returns - ------- - nodes : list - List of matching nodes. - """ - nodes = [] - - if isinstance(tree, list): - for t in tree: - nodes += dir_tree_find(t, kind) - else: - # Am I desirable myself? - if tree['block'] == kind: - nodes.append(tree) - - # Search the subtrees - for child in tree['children']: - nodes += dir_tree_find(child, kind) - return nodes - - -@verbose -def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): - """Create the directory tree structure.""" - FIFF_BLOCK_START = 104 - FIFF_BLOCK_END = 105 - FIFF_FILE_ID = 100 - FIFF_BLOCK_ID = 103 - FIFF_PARENT_BLOCK_ID = 110 - - if directory[start].kind == FIFF_BLOCK_START: - tag = read_tag(fid, directory[start].pos) - block = tag.data - else: - block = 0 - - logger.debug(' ' * indent + 'start { %d' % block) - - this = start - - tree = dict() - tree['block'] = block - tree['id'] = None - tree['parent_id'] = None - tree['nent'] = 0 - tree['nchild'] = 0 - tree['directory'] = directory[this] - tree['children'] = [] - - while this < len(directory): - if directory[this].kind == FIFF_BLOCK_START: - if this != start: - child, this = make_dir_tree(fid, directory, this, indent + 1) - tree['nchild'] += 1 - tree['children'].append(child) - elif directory[this].kind == FIFF_BLOCK_END: - tag = read_tag(fid, directory[start].pos) - if tag.data == block: - break - else: - tree['nent'] += 1 - if tree['nent'] == 1: - tree['directory'] = list() - tree['directory'].append(directory[this]) - - # Add the id information if available - if block == 0: - if directory[this].kind == FIFF_FILE_ID: - tag = read_tag(fid, directory[this].pos) - tree['id'] = tag.data - else: - if directory[this].kind == FIFF_BLOCK_ID: - tag = read_tag(fid, directory[this].pos) - tree['id'] = tag.data - elif directory[this].kind == FIFF_PARENT_BLOCK_ID: - tag = read_tag(fid, directory[this].pos) - tree['parent_id'] = tag.data - - this += 1 - - # Eliminate the empty directory - if tree['nent'] == 0: - tree['directory'] = None - - logger.debug(' ' * (indent + 1) + 'block = %d nent = %d nchild = %d' - % (tree['block'], tree['nent'], tree['nchild'])) - logger.debug(' ' * indent + 'end } %d' % block) - last = this - return tree, last - - -############################################################################### -# Writing - -def copy_tree(fidin, in_id, nodes, fidout): - """Copy directory subtrees from fidin to fidout.""" - if len(nodes) <= 0: - return - - if not isinstance(nodes, list): - nodes = [nodes] - - for node in nodes: - start_block(fidout, node['block']) - if node['id'] is not None: - if in_id is not None: - write_id(fidout, FIFF.FIFF_PARENT_FILE_ID, in_id) - - write_id(fidout, FIFF.FIFF_BLOCK_ID, in_id) - write_id(fidout, FIFF.FIFF_PARENT_BLOCK_ID, node['id']) - - if node['directory'] is not None: - for d in node['directory']: - # Do not copy these tags - if d.kind == FIFF.FIFF_BLOCK_ID or \ - d.kind == FIFF.FIFF_PARENT_BLOCK_ID or \ - d.kind == FIFF.FIFF_PARENT_FILE_ID: - continue - - # Read and write tags, pass data through transparently - fidin.seek(d.pos, 0) - tag = Tag(*np.fromfile(fidin, ('>i4,>I4,>i4,>i4'), 1)[0]) - tag.data = np.fromfile(fidin, '>B', tag.size) - _write(fidout, tag.data, tag.kind, 1, tag.type, '>B') - - for child in node['children']: - copy_tree(fidin, in_id, child, fidout) - - end_block(fidout, node['block']) diff --git a/mne/io/utils.py b/mne/io/utils.py deleted file mode 100644 index 8520490ef9e..00000000000 --- a/mne/io/utils.py +++ /dev/null @@ -1,317 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Martin Luessi -# Denis Engemann -# Teon Brooks -# Marijn van Vliet -# Mainak Jas -# Stefan Appelhoff -# -# License: BSD-3-Clause - -import numpy as np -import os -import os.path as op - -from .constants import FIFF -from .meas_info import _get_valid_units - - -def _check_orig_units(orig_units): - """Check original units from a raw file. - - Units that are close to a valid_unit but not equal can be remapped to fit - into the valid_units. All other units that are not valid will be replaced - with "n/a". - - Parameters - ---------- - orig_units : dict - Dictionary mapping channel names to their units as specified in - the header file. Example: {'FC1': 'nV'} - - Returns - ------- - orig_units_remapped : dict - Dictionary mapping channel names to their VALID units as specified in - the header file. Invalid units are now labeled "n/a". - Example: {'FC1': 'nV', 'Hfp3erz': 'n/a'} - """ - if orig_units is None: - return - valid_units = _get_valid_units() - valid_units_lowered = [unit.lower() for unit in valid_units] - orig_units_remapped = dict(orig_units) - for ch_name, unit in orig_units.items(): - - # Be lenient: we ignore case for now. - if unit.lower() in valid_units_lowered: - continue - - # Common "invalid units" can be remapped to their valid equivalent - remap_dict = dict() - remap_dict['uv'] = 'µV' - remap_dict['μv'] = 'µV' # greek letter mu vs micro sign. use micro - remap_dict['\x83\xeav'] = 'µV' # for shift-jis mu, use micro - if unit.lower() in remap_dict: - orig_units_remapped[ch_name] = remap_dict[unit.lower()] - continue - - # Some units cannot be saved, they are invalid: assign "n/a" - orig_units_remapped[ch_name] = 'n/a' - - return orig_units_remapped - - -def _find_channels(ch_names, ch_type='EOG'): - """Find EOG channel.""" - substrings = (ch_type,) - substrings = [s.upper() for s in substrings] - if ch_type == 'EOG': - substrings = ('EOG', 'EYE') - eog_idx = [idx for idx, ch in enumerate(ch_names) if - any(substring in ch.upper() for substring in substrings)] - return eog_idx - - -def _mult_cal_one(data_view, one, idx, cals, mult): - """Take a chunk of raw data, multiply by mult or cals, and store.""" - one = np.asarray(one, dtype=data_view.dtype) - assert data_view.shape[1] == one.shape[1], (data_view.shape[1], one.shape[1]) # noqa: E501 - if mult is not None: - mult.ndim == one.ndim == 2 - data_view[:] = mult @ one[idx] - else: - assert cals is not None - if isinstance(idx, slice): - data_view[:] = one[idx] - else: - # faster than doing one = one[idx] - np.take(one, idx, axis=0, out=data_view) - data_view *= cals - - -def _blk_read_lims(start, stop, buf_len): - """Deal with indexing in the middle of a data block. - - Parameters - ---------- - start : int - Starting index. - stop : int - Ending index (exclusive). - buf_len : int - Buffer size in samples. - - Returns - ------- - block_start_idx : int - The first block to start reading from. - r_lims : list - The read limits. - d_lims : list - The write limits. - - Notes - ----- - Consider this example:: - - >>> start, stop, buf_len = 2, 27, 10 - - +---------+---------+--------- - File structure: | buf0 | buf1 | buf2 | - +---------+---------+--------- - File time: 0 10 20 30 - +---------+---------+--------- - Requested time: 2 27 - - | | - blockstart blockstop - | | - start stop - - We need 27 - 2 = 25 samples (per channel) to store our data, and - we need to read from 3 buffers (30 samples) to get all of our data. - - On all reads but the first, the data we read starts at - the first sample of the buffer. On all reads but the last, - the data we read ends on the last sample of the buffer. - - We call ``this_data`` the variable that stores the current buffer's data, - and ``data`` the variable that stores the total output. - - On the first read, we need to do this:: - - >>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP - - On the second read, we need to do:: - - >>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP - - On the final read, we need to do:: - - >>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP - - This function encapsulates this logic to allow a loop over blocks, where - data is stored using the following limits:: - - >>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP - - """ # noqa: E501 - # this is used to deal with indexing in the middle of a sampling period - assert all(isinstance(x, int) for x in (start, stop, buf_len)) - block_start_idx = (start // buf_len) - block_start = block_start_idx * buf_len - last_used_samp = stop - 1 - block_stop = last_used_samp - last_used_samp % buf_len + buf_len - read_size = block_stop - block_start - n_blk = read_size // buf_len + (read_size % buf_len != 0) - start_offset = start - block_start - end_offset = block_stop - stop - d_lims = np.empty((n_blk, 2), int) - r_lims = np.empty((n_blk, 2), int) - for bi in range(n_blk): - # Triage start (sidx) and end (eidx) indices for - # data (d) and read (r) - if bi == 0: - d_sidx = 0 - r_sidx = start_offset - else: - d_sidx = bi * buf_len - start_offset - r_sidx = 0 - if bi == n_blk - 1: - d_eidx = stop - start - r_eidx = buf_len - end_offset - else: - d_eidx = (bi + 1) * buf_len - start_offset - r_eidx = buf_len - d_lims[bi] = [d_sidx, d_eidx] - r_lims[bi] = [r_sidx, r_eidx] - return block_start_idx, r_lims, d_lims - - -def _file_size(fname): - """Get the file size in bytes.""" - with open(fname, 'rb') as f: - f.seek(0, os.SEEK_END) - return f.tell() - - -def _read_segments_file(raw, data, idx, fi, start, stop, cals, mult, - dtype, n_channels=None, offset=0, trigger_ch=None): - """Read a chunk of raw data.""" - if n_channels is None: - n_channels = raw._raw_extras[fi]['orig_nchan'] - - n_bytes = np.dtype(dtype).itemsize - # data_offset and data_left count data samples (channels x time points), - # not bytes. - data_offset = n_channels * start * n_bytes + offset - data_left = (stop - start) * n_channels - - # Read up to 100 MB of data at a time, block_size is in data samples - block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels - block_size = min(data_left, block_size) - with open(raw._filenames[fi], 'rb', buffering=0) as fid: - fid.seek(data_offset) - # extract data in chunks - for sample_start in np.arange(0, data_left, block_size) // n_channels: - count = min(block_size, data_left - sample_start * n_channels) - block = np.fromfile(fid, dtype, count) - if block.size != count: - raise RuntimeError('Incorrect number of samples (%s != %s), ' - 'please report this error to MNE-Python ' - 'developers' % (block.size, count)) - block = block.reshape(n_channels, -1, order='F') - n_samples = block.shape[1] # = count // n_channels - sample_stop = sample_start + n_samples - if trigger_ch is not None: - stim_ch = trigger_ch[start:stop][sample_start:sample_stop] - block = np.vstack((block, stim_ch)) - data_view = data[:, sample_start:sample_stop] - _mult_cal_one(data_view, block, idx, cals, mult) - - -def read_str(fid, count=1): - """Read string from a binary file in a python version compatible way.""" - dtype = np.dtype('>S%i' % count) - string = fid.read(dtype.itemsize) - data = np.frombuffer(string, dtype=dtype)[0] - bytestr = b''.join([data[0:data.index(b'\x00') if - b'\x00' in data else count]]) - - return str(bytestr.decode('ascii')) # Return native str type for Py2/3 - - -def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc): - """Initialize info['chs'] for eeg channels.""" - chs = list() - for idx, ch_name in enumerate(ch_names): - if ch_name in eog or idx in eog: - coil_type = FIFF.FIFFV_COIL_NONE - kind = FIFF.FIFFV_EOG_CH - elif ch_name in ecg or idx in ecg: - coil_type = FIFF.FIFFV_COIL_NONE - kind = FIFF.FIFFV_ECG_CH - elif ch_name in emg or idx in emg: - coil_type = FIFF.FIFFV_COIL_NONE - kind = FIFF.FIFFV_EMG_CH - elif ch_name in misc or idx in misc: - coil_type = FIFF.FIFFV_COIL_NONE - kind = FIFF.FIFFV_MISC_CH - else: - coil_type = ch_coil - kind = ch_kind - - chan_info = {'cal': cals[idx], 'logno': idx + 1, 'scanno': idx + 1, - 'range': 1.0, 'unit_mul': FIFF.FIFF_UNITM_NONE, - 'ch_name': ch_name, 'unit': FIFF.FIFF_UNIT_V, - 'coord_frame': FIFF.FIFFV_COORD_HEAD, - 'coil_type': coil_type, 'kind': kind, 'loc': np.zeros(12)} - if coil_type == FIFF.FIFFV_COIL_EEG: - chan_info['loc'][:3] = np.nan - chs.append(chan_info) - return chs - - -def _synthesize_stim_channel(events, n_samples): - """Synthesize a stim channel from events read from an event file. - - Parameters - ---------- - events : array, shape (n_events, 3) - Each row representing an event. - n_samples : int - The number of samples. - - Returns - ------- - stim_channel : array, shape (n_samples,) - An array containing the whole recording's event marking. - """ - # select events overlapping buffer - events = events.copy() - events[events[:, 1] < 1, 1] = 1 - # create output buffer - stim_channel = np.zeros(n_samples, int) - for onset, duration, trigger in events: - stim_channel[onset:onset + duration] = trigger - return stim_channel - - -def _construct_bids_filename(base, ext, part_idx, validate=True): - """Construct a BIDS compatible filename for split files.""" - # insert index in filename - dirname = op.dirname(base) - base = op.basename(base) - deconstructed_base = base.split('_') - if len(deconstructed_base) < 2 and validate: - raise ValueError('Filename base must end with an underscore followed ' - f'by the modality (e.g., _eeg or _meg), got {base}') - suffix = deconstructed_base[-1] - base = '_'.join(deconstructed_base[:-1]) - use_fname = '{}_split-{:02}_{}{}'.format(base, part_idx, suffix, ext) - if dirname: - use_fname = op.join(dirname, use_fname) - return use_fname diff --git a/mne/io/what.py b/mne/io/what.py deleted file mode 100644 index 0d4f5d2297a..00000000000 --- a/mne/io/what.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- -# Authors: Eric Larson -# -# License: BSD-3-Clause - -from collections import OrderedDict -from inspect import signature - -from ..utils import _check_fname, logger - - -def what(fname): - """Try to determine the type of the FIF file. - - Parameters - ---------- - fname : path-like - The filename. Should end in ``.fif`` or ``.fif.gz``. - - Returns - ------- - what : str | None - The type of the file. Will be 'unknown' if it could not be determined. - - Notes - ----- - .. versionadded:: 0.19 - """ - from .fiff import read_raw_fif - from ..epochs import read_epochs - from ..evoked import read_evokeds - from ..preprocessing import read_ica - from ..forward import read_forward_solution - from ..minimum_norm import read_inverse_operator - from ..source_space import read_source_spaces - from ..bem import read_bem_solution, read_bem_surfaces - from ..cov import read_cov - from ..transforms import read_trans - from ..event import read_events - from ..proj import read_proj - from .meas_info import read_fiducials - _check_fname(fname, overwrite='read', must_exist=True) - checks = OrderedDict() - checks['raw'] = read_raw_fif - checks['ica'] = read_ica - checks['epochs'] = read_epochs - checks['evoked'] = read_evokeds - checks['forward'] = read_forward_solution - checks['inverse'] = read_inverse_operator - checks['src'] = read_source_spaces - checks['bem solution'] = read_bem_solution - checks['bem surfaces'] = read_bem_surfaces - checks['cov'] = read_cov - checks['transform'] = read_trans - checks['events'] = read_events - checks['fiducials'] = read_fiducials - checks['proj'] = read_proj - for what, func in checks.items(): - args = signature(func).parameters - assert 'verbose' in args, func - kwargs = dict(verbose='error') - if 'preload' in args: - kwargs['preload'] = False - try: - func(fname, **kwargs) - except Exception as exp: - logger.debug('Not %s: %s' % (what, exp)) - else: - return what - return 'unknown' diff --git a/mne/io/write.py b/mne/io/write.py deleted file mode 100644 index edd240b8570..00000000000 --- a/mne/io/write.py +++ /dev/null @@ -1,507 +0,0 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# -# License: BSD-3-Clause - -from contextlib import contextmanager -from gzip import GzipFile -import os.path as op -import re -import time -import uuid - -import numpy as np - -from .constants import FIFF -from ..utils import logger, _file_like -from ..utils.numerics import _cal_to_julian - -# We choose a "magic" date to store (because meas_date is obligatory) -# to treat as meas_date=None. This one should be impossible for systems -# to write -- the second field is microseconds, so anything >= 1e6 -# should be moved into the first field (seconds). -DATE_NONE = (0, 2 ** 31 - 1) - - -def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype): - """Write data.""" - if isinstance(data, np.ndarray): - data_size *= data.size - - # XXX for string types the data size is used as - # computed in ``write_string``. - - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFFT_TYPE, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - fid.write(np.array(data, dtype=dtype).tobytes()) - - -def _get_split_size(split_size): - """Convert human-readable bytes to machine-readable bytes.""" - if isinstance(split_size, str): - exp = dict(MB=20, GB=30).get(split_size[-2:], None) - if exp is None: - raise ValueError('split_size has to end with either' - '"MB" or "GB"') - split_size = int(float(split_size[:-2]) * 2 ** exp) - - if split_size > 2147483648: - raise ValueError('split_size cannot be larger than 2GB') - return split_size - - -_NEXT_FILE_BUFFER = 1048576 # 2 ** 20 extra cushion for last post-data tags - - -def write_nop(fid, last=False): - """Write a FIFF_NOP.""" - fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tobytes()) - fid.write(np.array(0, dtype='>i4').tobytes()) - next_ = FIFF.FIFFV_NEXT_NONE if last else FIFF.FIFFV_NEXT_SEQ - fid.write(np.array(next_, dtype='>i4').tobytes()) - - -INT32_MAX = 2147483647 - - -def write_int(fid, kind, data): - """Write a 32-bit integer tag to a fif file.""" - data_size = 4 - data = np.asarray(data) - if data.dtype.kind not in 'uib' and data.size > 0: - raise TypeError( - f'Cannot safely write data with dtype {data.dtype} as int') - max_val = data.max() if data.size > 0 else 0 - if max_val > INT32_MAX: - raise TypeError( - f'Value {max_val} exceeds maximum allowed ({INT32_MAX}) for ' - f'tag {kind}') - data = data.astype('>i4').T - _write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4') - - -def write_double(fid, kind, data): - """Write a double-precision floating point tag to a fif file.""" - data_size = 8 - data = np.array(data, dtype='>f8').T - _write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8') - - -def write_float(fid, kind, data): - """Write a single-precision floating point tag to a fif file.""" - data_size = 4 - data = np.array(data, dtype='>f4').T - _write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4') - - -def write_dau_pack16(fid, kind, data): - """Write a dau_pack16 tag to a fif file.""" - data_size = 2 - data = np.array(data, dtype='>i2').T - _write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2') - - -def write_complex64(fid, kind, data): - """Write a 64 bit complex floating point tag to a fif file.""" - data_size = 8 - data = np.array(data, dtype='>c8').T - _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8') - - -def write_complex128(fid, kind, data): - """Write a 128 bit complex floating point tag to a fif file.""" - data_size = 16 - data = np.array(data, dtype='>c16').T - _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16') - - -def write_julian(fid, kind, data): - """Write a Julian-formatted date to a FIF file.""" - assert len(data) == 3 - data_size = 4 - jd = np.sum(_cal_to_julian(*data)) - data = np.array(jd, dtype='>i4') - _write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4') - - -def write_string(fid, kind, data): - """Write a string tag.""" - str_data = str(data).encode('latin1') - data_size = len(str_data) # therefore compute size here - my_dtype = '>a' # py2/3 compatible on writing -- don't ask me why - if data_size > 0: - _write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype) - - -def write_name_list(fid, kind, data): - """Write a colon-separated list of names. - - Parameters - ---------- - data : list of strings - """ - write_string(fid, kind, ':'.join(data)) - - -def write_name_list_sanitized(fid, kind, lst, name): - """Write a sanitized, colon-separated list of names.""" - write_string(fid, kind, _safe_name_list(lst, 'write', name)) - - -def _safe_name_list(lst, operation, name): - if operation == 'write': - assert isinstance(lst, (list, tuple, np.ndarray)), type(lst) - if any('{COLON}' in val for val in lst): - raise ValueError( - f'The substring "{{COLON}}" in {name} not supported.') - return ':'.join(val.replace(':', '{COLON}') for val in lst) - else: - # take a sanitized string and return a list of strings - assert operation == 'read' - assert lst is None or isinstance(lst, str) - if not lst: # None or empty string - return [] - return [val.replace('{COLON}', ':') for val in lst.split(':')] - - -def write_float_matrix(fid, kind, mat): - """Write a single-precision floating-point matrix tag.""" - FIFFT_MATRIX = 1 << 30 - FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX - - data_size = 4 * mat.size + 4 * (mat.ndim + 1) - - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - fid.write(np.array(mat, dtype='>f4').tobytes()) - - dims = np.empty(mat.ndim + 1, dtype=np.int32) - dims[:mat.ndim] = mat.shape[::-1] - dims[-1] = mat.ndim - fid.write(np.array(dims, dtype='>i4').tobytes()) - check_fiff_length(fid) - - -def write_double_matrix(fid, kind, mat): - """Write a double-precision floating-point matrix tag.""" - FIFFT_MATRIX = 1 << 30 - FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX - - data_size = 8 * mat.size + 4 * (mat.ndim + 1) - - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - fid.write(np.array(mat, dtype='>f8').tobytes()) - - dims = np.empty(mat.ndim + 1, dtype=np.int32) - dims[:mat.ndim] = mat.shape[::-1] - dims[-1] = mat.ndim - fid.write(np.array(dims, dtype='>i4').tobytes()) - check_fiff_length(fid) - - -def write_int_matrix(fid, kind, mat): - """Write integer 32 matrix tag.""" - FIFFT_MATRIX = 1 << 30 - FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX - - data_size = 4 * mat.size + 4 * 3 - - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - fid.write(np.array(mat, dtype='>i4').tobytes()) - - dims = np.empty(3, dtype=np.int32) - dims[0] = mat.shape[1] - dims[1] = mat.shape[0] - dims[2] = 2 - fid.write(np.array(dims, dtype='>i4').tobytes()) - check_fiff_length(fid) - - -def write_complex_float_matrix(fid, kind, mat): - """Write complex 64 matrix tag.""" - FIFFT_MATRIX = 1 << 30 - FIFFT_MATRIX_COMPLEX_FLOAT = FIFF.FIFFT_COMPLEX_FLOAT | FIFFT_MATRIX - - data_size = 4 * 2 * mat.size + 4 * (mat.ndim + 1) - - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFFT_MATRIX_COMPLEX_FLOAT, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - fid.write(np.array(mat, dtype='>c8').tobytes()) - - dims = np.empty(mat.ndim + 1, dtype=np.int32) - dims[:mat.ndim] = mat.shape[::-1] - dims[-1] = mat.ndim - fid.write(np.array(dims, dtype='>i4').tobytes()) - check_fiff_length(fid) - - -def write_complex_double_matrix(fid, kind, mat): - """Write complex 128 matrix tag.""" - FIFFT_MATRIX = 1 << 30 - FIFFT_MATRIX_COMPLEX_DOUBLE = FIFF.FIFFT_COMPLEX_DOUBLE | FIFFT_MATRIX - - data_size = 8 * 2 * mat.size + 4 * (mat.ndim + 1) - - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFFT_MATRIX_COMPLEX_DOUBLE, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - fid.write(np.array(mat, dtype='>c16').tobytes()) - - dims = np.empty(mat.ndim + 1, dtype=np.int32) - dims[:mat.ndim] = mat.shape[::-1] - dims[-1] = mat.ndim - fid.write(np.array(dims, dtype='>i4').tobytes()) - check_fiff_length(fid) - - -def get_machid(): - """Get (mostly) unique machine ID. - - Returns - ------- - ids : array (length 2, int32) - The machine identifier used in MNE. - """ - mac = b'%012x' % uuid.getnode() # byte conversion for Py3 - mac = re.findall(b'..', mac) # split string - mac += [b'00', b'00'] # add two more fields - - # Convert to integer in reverse-order (for some reason) - from codecs import encode - mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]]) - ids = np.flipud(np.frombuffer(mac, np.int32, count=2)) - return ids - - -def get_new_file_id(): - """Create a new file ID tag.""" - secs, usecs = divmod(time.time(), 1.) - secs, usecs = int(secs), int(usecs * 1e6) - return {'machid': get_machid(), 'version': FIFF.FIFFC_VERSION, - 'secs': secs, 'usecs': usecs} - - -def write_id(fid, kind, id_=None): - """Write fiff id.""" - id_ = _generate_meas_id() if id_ is None else id_ - - data_size = 5 * 4 # The id comprises five integers - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - - # Collect the bits together for one write - arr = np.array([id_['version'], - id_['machid'][0], id_['machid'][1], - id_['secs'], id_['usecs']], dtype='>i4') - fid.write(arr.tobytes()) - - -def start_block(fid, kind): - """Write a FIFF_BLOCK_START tag.""" - write_int(fid, FIFF.FIFF_BLOCK_START, kind) - - -def end_block(fid, kind): - """Write a FIFF_BLOCK_END tag.""" - write_int(fid, FIFF.FIFF_BLOCK_END, kind) - - -def start_file(fname, id_=None): - """Open a fif file for writing and writes the compulsory header tags. - - Parameters - ---------- - fname : path-like | fid - The name of the file to open. It is recommended - that the name ends with .fif or .fif.gz. Can also be an - already opened file. - id_ : dict | None - ID to use for the FIFF_FILE_ID. - """ - if _file_like(fname): - logger.debug('Writing using %s I/O' % type(fname)) - fid = fname - fid.seek(0) - else: - fname = str(fname) - if op.splitext(fname)[1].lower() == '.gz': - logger.debug('Writing using gzip') - # defaults to compression level 9, which is barely smaller but much - # slower. 2 offers a good compromise. - fid = GzipFile(fname, "wb", compresslevel=2) - else: - logger.debug('Writing using normal I/O') - fid = open(fname, "wb") - # Write the compulsory items - write_id(fid, FIFF.FIFF_FILE_ID, id_) - write_int(fid, FIFF.FIFF_DIR_POINTER, -1) - write_int(fid, FIFF.FIFF_FREE_LIST, -1) - return fid - - -@contextmanager -def start_and_end_file(fname, id_=None): - """Start and (if successfully written) close the file.""" - with start_file(fname, id_=id_) as fid: - yield fid - end_file(fid) # we only hit this line if the yield does not err - - -def check_fiff_length(fid, close=True): - """Ensure our file hasn't grown too large to work properly.""" - if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations - if close: - fid.close() - raise IOError('FIFF file exceeded 2GB limit, please split file, reduce' - ' split_size (if possible), or save to a different ' - 'format') - - -def end_file(fid): - """Write the closing tags to a fif file and closes the file.""" - write_nop(fid, last=True) - check_fiff_length(fid) - fid.close() - - -def write_coord_trans(fid, trans): - """Write a coordinate transformation structure.""" - data_size = 4 * 2 * 12 + 4 * 2 - fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - fid.write(np.array(trans['from'], dtype='>i4').tobytes()) - fid.write(np.array(trans['to'], dtype='>i4').tobytes()) - - # The transform... - rot = trans['trans'][:3, :3] - move = trans['trans'][:3, 3] - fid.write(np.array(rot, dtype='>f4').tobytes()) - fid.write(np.array(move, dtype='>f4').tobytes()) - - # ...and its inverse - trans_inv = np.linalg.inv(trans['trans']) - rot = trans_inv[:3, :3] - move = trans_inv[:3, 3] - fid.write(np.array(rot, dtype='>f4').tobytes()) - fid.write(np.array(move, dtype='>f4').tobytes()) - - -def write_ch_info(fid, ch): - """Write a channel information record to a fif file.""" - data_size = 4 * 13 + 4 * 7 + 16 - - fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - - # Start writing fiffChInfoRec - fid.write(np.array(ch['scanno'], dtype='>i4').tobytes()) - fid.write(np.array(ch['logno'], dtype='>i4').tobytes()) - fid.write(np.array(ch['kind'], dtype='>i4').tobytes()) - fid.write(np.array(ch['range'], dtype='>f4').tobytes()) - fid.write(np.array(ch['cal'], dtype='>f4').tobytes()) - fid.write(np.array(ch['coil_type'], dtype='>i4').tobytes()) - fid.write(np.array(ch['loc'], dtype='>f4').tobytes()) # writing 12 values - - # unit and unit multiplier - fid.write(np.array(ch['unit'], dtype='>i4').tobytes()) - fid.write(np.array(ch['unit_mul'], dtype='>i4').tobytes()) - - # Finally channel name - ch_name = ch['ch_name'][:15] - fid.write(np.array(ch_name, dtype='>c').tobytes()) - fid.write(b'\0' * (16 - len(ch_name))) - - -def write_dig_points(fid, dig, block=False, coord_frame=None): - """Write a set of digitizer data points into a fif file.""" - if dig is not None: - data_size = 5 * 4 - if block: - start_block(fid, FIFF.FIFFB_ISOTRAK) - if coord_frame is not None: - write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame) - for d in dig: - fid.write(np.array(FIFF.FIFF_DIG_POINT, '>i4').tobytes()) - fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, '>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, '>i4').tobytes()) - # Start writing fiffDigPointRec - fid.write(np.array(d['kind'], '>i4').tobytes()) - fid.write(np.array(d['ident'], '>i4').tobytes()) - fid.write(np.array(d['r'][:3], '>f4').tobytes()) - if block: - end_block(fid, FIFF.FIFFB_ISOTRAK) - - -def write_float_sparse_rcs(fid, kind, mat): - """Write a single-precision sparse compressed row matrix tag.""" - return write_float_sparse(fid, kind, mat, fmt='csr') - - -def write_float_sparse_ccs(fid, kind, mat): - """Write a single-precision sparse compressed column matrix tag.""" - return write_float_sparse(fid, kind, mat, fmt='csc') - - -def write_float_sparse(fid, kind, mat, fmt='auto'): - """Write a single-precision floating-point sparse matrix tag.""" - from scipy import sparse - from .tag import _matrix_coding_CCS, _matrix_coding_RCS - if fmt == 'auto': - fmt = 'csr' if isinstance(mat, sparse.csr_matrix) else 'csc' - if fmt == 'csr': - need = sparse.csr_matrix - bits = _matrix_coding_RCS - else: - need = sparse.csc_matrix - bits = _matrix_coding_CCS - if not isinstance(mat, need): - raise TypeError('Must write %s, got %s' % (fmt.upper(), type(mat),)) - FIFFT_MATRIX = bits << 16 - FIFFT_MATRIX_FLOAT_RCS = FIFF.FIFFT_FLOAT | FIFFT_MATRIX - - nnzm = mat.nnz - nrow = mat.shape[0] - data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4 - - fid.write(np.array(kind, dtype='>i4').tobytes()) - fid.write(np.array(FIFFT_MATRIX_FLOAT_RCS, dtype='>i4').tobytes()) - fid.write(np.array(data_size, dtype='>i4').tobytes()) - fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tobytes()) - - fid.write(np.array(mat.data, dtype='>f4').tobytes()) - fid.write(np.array(mat.indices, dtype='>i4').tobytes()) - fid.write(np.array(mat.indptr, dtype='>i4').tobytes()) - - dims = [nnzm, mat.shape[0], mat.shape[1], 2] - fid.write(np.array(dims, dtype='>i4').tobytes()) - check_fiff_length(fid) - - -def _generate_meas_id(): - """Generate a new meas_id dict.""" - id_ = dict() - id_['version'] = FIFF.FIFFC_VERSION - id_['machid'] = get_machid() - id_['secs'], id_['usecs'] = DATE_NONE - return id_ diff --git a/mne/label.py b/mne/label.py index 69e9af93f52..2d55de755c0 100644 --- a/mne/label.py +++ b/mne/label.py @@ -1,31 +1,53 @@ -# Authors: Alexandre Gramfort -# Martin Luessi -# Denis Engemann -# +# Authors: The MNE-Python contributors. # License: BSD-3-Clause +# Copyright the MNE-Python contributors. -from collections import defaultdict -from colorsys import hsv_to_rgb, rgb_to_hsv import copy as cp import os import os.path as op import re +from collections import defaultdict +from colorsys import hsv_to_rgb, rgb_to_hsv import numpy as np +from scipy import linalg, sparse +from .fixes import _safe_svd from .morph_map import read_morph_map from .parallel import parallel_func -from .source_estimate import (SourceEstimate, VolSourceEstimate, - _center_of_mass, extract_label_time_course, - spatial_src_adjacency) -from .source_space import (add_source_space_distances, SourceSpaces, - _ensure_src) +from .source_estimate import ( + SourceEstimate, + VolSourceEstimate, + _center_of_mass, + extract_label_time_course, + spatial_src_adjacency, +) +from .source_space._source_space import ( + SourceSpaces, + _ensure_src, + add_source_space_distances, +) from .stats.cluster_level import _find_clusters, _get_components -from .surface import (complete_surface_info, read_surface, fast_cross_3d, - _mesh_borders, mesh_edges, mesh_dist) -from .utils import (get_subjects_dir, _check_subject, logger, verbose, warn, - check_random_state, _validate_type, fill_doc, - _check_option, _check_fname) +from .surface import ( + _mesh_borders, + complete_surface_info, + fast_cross_3d, + mesh_dist, + mesh_edges, + read_surface, +) +from .utils import ( + _check_fname, + _check_option, + _check_subject, + _validate_type, + check_random_state, + fill_doc, + get_subjects_dir, + logger, + verbose, + warn, +) def _blend_colors(color_1, color_2): @@ -58,14 +80,14 @@ def _blend_colors(color_1, color_2): h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2) hue_diff = abs(h_1 - h_2) if hue_diff < 0.5: - h = min(h_1, h_2) + hue_diff / 2. + h = min(h_1, h_2) + hue_diff / 2.0 else: - h = max(h_1, h_2) + (1. - hue_diff) / 2. - h %= 1. - s = (s_1 + s_2) / 2. - v = (v_1 + v_2) / 2. + h = max(h_1, h_2) + (1.0 - hue_diff) / 2.0 + h %= 1.0 + s = (s_1 + s_2) / 2.0 + v = (v_1 + v_2) / 2.0 r, g, b = hsv_to_rgb(h, s, v) - a = (a_1 + a_2) / 2. + a = (a_1 + a_2) / 2.0 color = (r, g, b, a) return color @@ -88,7 +110,7 @@ def _split_colors(color, n): """ r, g, b, a = color h, s, v = rgb_to_hsv(r, g, b) - gradient_range = np.sqrt(n / 10.) + gradient_range = np.sqrt(n / 10.0) if v > 0.5: v_max = min(0.95, v + gradient_range / 2) v_min = max(0.05, v_max - gradient_range) @@ -98,11 +120,19 @@ def _split_colors(color, n): hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n)) rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors) - rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors) + rgba_colors = ( + ( + r_, + g_, + b_, + a, + ) + for r_, g_, b_ in rgb_colors + ) return tuple(rgba_colors) -def _n_colors(n, bytes_=False, cmap='hsv'): +def _n_colors(n, bytes_=False, cmap="hsv"): """Produce a list of n unique RGBA color tuples based on a colormap. Parameters @@ -120,12 +150,12 @@ def _n_colors(n, bytes_=False, cmap='hsv'): colors : array, shape (n, 4) RGBA color values. """ - n_max = 2 ** 10 + n_max = 2**10 if n > n_max: - raise NotImplementedError("Can't produce more than %i unique " - "colors" % n_max) + raise NotImplementedError(f"Can't produce more than {n_max} unique colors.") from .viz.utils import _get_cmap + cm = _get_cmap(cmap) pos = np.linspace(0, 1, n, False) colors = cm(pos, bytes=bytes_) @@ -133,9 +163,10 @@ def _n_colors(n, bytes_=False, cmap='hsv'): # make sure colors are unique for ii, c in enumerate(colors): if np.any(np.all(colors[:ii] == c, 1)): - raise RuntimeError('Could not get %d unique colors from %s ' - 'colormap. Try using a different colormap.' - % (n, cmap)) + raise RuntimeError( + f"Could not get {n} unique colors from {cmap} " + "colormap. Try using a different colormap." + ) return colors @@ -195,18 +226,30 @@ class Label: """ @verbose - def __init__(self, vertices=(), pos=None, values=None, hemi=None, - comment="", name=None, filename=None, subject=None, - color=None, *, verbose=None): # noqa: D102 + def __init__( + self, + vertices=(), + pos=None, + values=None, + hemi=None, + comment="", + name=None, + filename=None, + subject=None, + color=None, + *, + verbose=None, + ): # check parameters if not isinstance(hemi, str): - raise ValueError('hemi must be a string, not %s' % type(hemi)) + raise ValueError(f"hemi must be a string, not {type(hemi)}") vertices = np.asarray(vertices, int) if np.any(np.diff(vertices.astype(int)) <= 0): - raise ValueError('Vertices must be ordered in increasing order.') + raise ValueError("Vertices must be ordered in increasing order.") if color is not None: from matplotlib.colors import colorConverter + color = colorConverter.to_rgba(color) if values is None: @@ -220,8 +263,9 @@ def __init__(self, vertices=(), pos=None, values=None, hemi=None, pos = np.asarray(pos) if not (len(vertices) == len(values) == len(pos)): - raise ValueError("vertices, values and pos need to have same " - "length (number of vertices)") + raise ValueError( + "vertices, values and pos need to have same length (number of vertices)" + ) # name if name is None and filename is not None: @@ -238,33 +282,35 @@ def __init__(self, vertices=(), pos=None, values=None, hemi=None, self.filename = filename def __setstate__(self, state): # noqa: D105 - self.vertices = state['vertices'] - self.pos = state['pos'] - self.values = state['values'] - self.hemi = state['hemi'] - self.comment = state['comment'] - self.subject = state.get('subject', None) - self.color = state.get('color', None) - self.name = state['name'] - self.filename = state['filename'] + self.vertices = state["vertices"] + self.pos = state["pos"] + self.values = state["values"] + self.hemi = state["hemi"] + self.comment = state["comment"] + self.subject = state.get("subject", None) + self.color = state.get("color", None) + self.name = state["name"] + self.filename = state["filename"] def __getstate__(self): # noqa: D105 - out = dict(vertices=self.vertices, - pos=self.pos, - values=self.values, - hemi=self.hemi, - comment=self.comment, - subject=self.subject, - color=self.color, - name=self.name, - filename=self.filename) + out = dict( + vertices=self.vertices, + pos=self.pos, + values=self.values, + hemi=self.hemi, + comment=self.comment, + subject=self.subject, + color=self.color, + name=self.name, + filename=self.filename, + ) return out def __repr__(self): # noqa: D105 - name = 'unknown, ' if self.subject is None else self.subject + ', ' + name = "unknown, " if self.subject is None else self.subject + ", " name += repr(self.name) if self.name is not None else "unnamed" n_vert = len(self) - return "